From 0497ab926d2a8181c766c3a1fe04bc4e3bfb4a82 Mon Sep 17 00:00:00 2001 From: Karol Blaszczak Date: Tue, 14 Jan 2025 13:35:31 +0100 Subject: [PATCH] [DOCS] Refactoring OV versions in references (#28410) setting 24.6 references to be fixed --- CONTRIBUTING.md | 44 ++-- CONTRIBUTING_PR.md | 40 ++-- docs/articles_en/about-openvino.rst | 2 +- .../about-openvino/contributing.rst | 14 +- .../contributing/code-contribution-guide.rst | 42 ++-- .../performance-benchmarks-faq.rst | 4 +- .../about-openvino/release-notes-openvino.rst | 2 +- .../images/openvino-overview-diagram.jpg | 4 +- .../documentation/openvino-extensibility.rst | 2 +- .../frontend-extensions.rst | 2 +- .../openvino-plugin-library.rst | 2 +- .../get-started/install-openvino.rst | 2 +- .../llm_inference_guide/genai-guide.rst | 2 +- .../llm_inference_guide/ov-tokenizers.rst | 2 +- .../openvino-samples/bert-benchmark.rst | 2 +- .../openvino-samples/get-started-demos.rst | 6 +- .../openvino-samples/hello-classification.rst | 6 +- .../hello-nv12-input-classification.rst | 4 +- .../openvino-samples/hello-query-device.rst | 4 +- .../openvino-samples/hello-reshape-ssd.rst | 4 +- .../image-classification-async.rst | 4 +- .../openvino-samples/model-creation.rst | 4 +- .../openvino-samples/sync-benchmark.rst | 4 +- .../openvino-samples/throughput-benchmark.rst | 4 +- .../openvino-workflow/deployment-locally.rst | 4 +- .../local-distribution-libraries.rst | 4 +- ...al-binary-size-conditional-compilation.rst | 6 +- .../cpu-device.rst | 6 +- .../gpu-device.rst | 6 +- .../remote-tensor-api-npu-plugin.rst | 2 +- .../running-inference/string-tensors.rst | 4 +- docs/dev/build_linux.md | 4 +- docs/dev/build_raspbian.md | 6 +- .../cmake_options_for_custom_compilation.md | 2 +- docs/dev/index.md | 2 +- docs/dev/static_libaries.md | 4 +- ...cy-models-image-generation-with-output.rst | 126 +++++----- .../llm-chatbot-generate-api-with-output.rst | 74 +++--- docs/notebooks/llm-chatbot-with-output.rst | 2 +- .../llm-question-answering-with-output.rst | 88 +++---- ...tract-structure-extraction-with-output.rst | 62 ++--- .../openvino-tokenizers-with-output.rst | 102 ++++---- .../whisper-asr-genai-with-output.rst | 220 +++++++++--------- ...isper-subtitles-generation-with-output.rst | 156 ++++++------- docs/sphinx_setup/api/nodejs_api/addon.rst | 14 +- .../api/nodejs_api/nodejs_api.rst | 10 +- .../openvino-node/enums/element.rst | 22 +- .../openvino-node/enums/resizeAlgorithm.rst | 6 +- .../interfaces/CompiledModel.rst | 26 +-- .../openvino-node/interfaces/Core.rst | 42 ++-- .../interfaces/CoreConstructor.rst | 4 +- .../openvino-node/interfaces/InferRequest.rst | 32 +-- .../openvino-node/interfaces/InputInfo.rst | 8 +- .../interfaces/InputModelInfo.rst | 4 +- .../interfaces/InputTensorInfo.rst | 8 +- .../openvino-node/interfaces/Model.rst | 34 +-- .../openvino-node/interfaces/Output.rst | 14 +- .../openvino-node/interfaces/OutputInfo.rst | 4 +- .../interfaces/OutputTensorInfo.rst | 6 +- .../openvino-node/interfaces/PartialShape.rst | 10 +- .../interfaces/PartialShapeConstructor.rst | 4 +- .../interfaces/PrePostProcessor.rst | 8 +- .../PrePostProcessorConstructor.rst | 4 +- .../interfaces/PreProcessSteps.rst | 4 +- .../openvino-node/interfaces/Tensor.rst | 12 +- .../interfaces/TensorConstructor.rst | 8 +- .../openvino-node/types/Dimension.rst | 2 +- .../nodejs_api/openvino-node/types/OVAny.rst | 2 +- .../types/SupportedTypedArray.rst | 2 +- .../openvino-node/types/elementTypeString.rst | 2 +- 70 files changed, 689 insertions(+), 689 deletions(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index c30ce12665ab33..b7397bf3c58a1a 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -58,7 +58,7 @@ product better. ### Improve documentation * **OpenVINO developer documentation** is contained entirely in this repository, under the - [./docs/dev](https://github.com/openvinotoolkit/openvino/tree/master/docs/dev) folder. + [./docs/dev](https://github.com/openvinotoolkit/openvino/tree/releases/2024/6/docs/dev) folder. * **User documentation** is built from several sources and published at [docs.openvino.ai](https://docs.openvino.ai/), which is the recommended place for reading @@ -97,7 +97,7 @@ Keep in mind that we are here to help - **do not hesitate to ask the development You can start with the following links: - [What is OpenVINO?](https://github.com/openvinotoolkit/openvino#what-is-openvino-toolkit) -- [OpenVINO architecture](https://github.com/openvinotoolkit/openvino/blob/master/src/docs/architecture.md) +- [OpenVINO architecture](https://github.com/openvinotoolkit/openvino/blob/releases/2024/6/src/docs/architecture.md) - [User documentation](https://docs.openvino.ai/) - [Blog post on contributing to OpenVINO](https://medium.com/openvino-toolkit/how-to-contribute-to-an-ai-open-source-project-c741f48e009e) - [Pick up a Good First Issue](https://github.com/orgs/openvinotoolkit/projects/3) @@ -105,39 +105,39 @@ You can start with the following links: ### 2. Building the project -In order to build the project, follow the [build instructions for your specific OS](https://github.com/openvinotoolkit/openvino/blob/master/docs/dev/build.md). +In order to build the project, follow the [build instructions for your specific OS](https://github.com/openvinotoolkit/openvino/blob/releases/2024/6/docs/dev/build.md). ### 3. Familiarize yourself with the component you'll be working with Choose the component your Good First Issue is related to. You can run tests to make sure it works correctly. ##### APIs -- [C API](https://github.com/openvinotoolkit/openvino/tree/master/src/bindings/c) -- [Core](https://github.com/openvinotoolkit/openvino/tree/master/src/core) -- [Python API](https://github.com/openvinotoolkit/openvino/tree/master/src/bindings/python) -- [Node.js API](https://github.com/openvinotoolkit/openvino/tree/master/src/bindings/js/node) +- [C API](https://github.com/openvinotoolkit/openvino/tree/releases/2024/6/src/bindings/c) +- [Core](https://github.com/openvinotoolkit/openvino/tree/releases/2024/6/src/core) +- [Python API](https://github.com/openvinotoolkit/openvino/tree/releases/2024/6/src/bindings/python) +- [Node.js API](https://github.com/openvinotoolkit/openvino/tree/releases/2024/6/src/bindings/js/node) ##### Frontends -- [IR Frontend](https://github.com/openvinotoolkit/openvino/tree/master/src/frontends/ir) -- [ONNX Frontend](https://github.com/openvinotoolkit/openvino/tree/master/src/frontends/onnx) -- [PaddlePaddle Frontend](https://github.com/openvinotoolkit/openvino/tree/master/src/frontends/paddle) -- [PyTorch Frontend](https://github.com/openvinotoolkit/openvino/tree/master/src/frontends/pytorch) -- [TensorFlow Frontend](https://github.com/openvinotoolkit/openvino/tree/master/src/frontends/tensorflow) +- [IR Frontend](https://github.com/openvinotoolkit/openvino/tree/releases/2024/6/src/frontends/ir) +- [ONNX Frontend](https://github.com/openvinotoolkit/openvino/tree/releases/2024/6/src/frontends/onnx) +- [PaddlePaddle Frontend](https://github.com/openvinotoolkit/openvino/tree/releases/2024/6/src/frontends/paddle) +- [PyTorch Frontend](https://github.com/openvinotoolkit/openvino/tree/releases/2024/6/src/frontends/pytorch) +- [TensorFlow Frontend](https://github.com/openvinotoolkit/openvino/tree/releases/2024/6/src/frontends/tensorflow) ##### Plugins -- [Auto plugin](https://github.com/openvinotoolkit/openvino/blob/master/src/plugins/auto) -- [CPU plugin](https://github.com/openvinotoolkit/openvino/blob/master/src/plugins/intel_cpu) -- [GPU plugin](https://github.com/openvinotoolkit/openvino/blob/master/src/plugins/intel_gpu) -- [NPU plugin](https://github.com/openvinotoolkit/openvino/blob/master/src/plugins/intel_npu) -- [Hetero plugin](https://github.com/openvinotoolkit/openvino/blob/master/src/plugins/hetero) -- [Template plugin](https://github.com/openvinotoolkit/openvino/tree/master/src/plugins/template) +- [Auto plugin](https://github.com/openvinotoolkit/openvino/blob/releases/2024/6/src/plugins/auto) +- [CPU plugin](https://github.com/openvinotoolkit/openvino/blob/releases/2024/6/src/plugins/intel_cpu) +- [GPU plugin](https://github.com/openvinotoolkit/openvino/blob/releases/2024/6/src/plugins/intel_gpu) +- [NPU plugin](https://github.com/openvinotoolkit/openvino/blob/releases/2024/6/src/plugins/intel_npu) +- [Hetero plugin](https://github.com/openvinotoolkit/openvino/blob/releases/2024/6/src/plugins/hetero) +- [Template plugin](https://github.com/openvinotoolkit/openvino/tree/releases/2024/6/src/plugins/template) ##### Tools -- [Benchmark Tool](https://github.com/openvinotoolkit/openvino/tree/master/tools/benchmark_tool) -- [OpenVINO Model Converter](https://github.com/openvinotoolkit/openvino/tree/master/tools/ovc) +- [Benchmark Tool](https://github.com/openvinotoolkit/openvino/tree/releases/2024/6/tools/benchmark_tool) +- [OpenVINO Model Converter](https://github.com/openvinotoolkit/openvino/tree/releases/2024/6/tools/ovc) ##### Others -- [Documentation](https://github.com/openvinotoolkit/openvino/blob/master/CONTRIBUTING_DOCS.md) +- [Documentation](https://github.com/openvinotoolkit/openvino/blob/releases/2024/6/CONTRIBUTING_DOCS.md) ### 3. Start working on your Good First Issue @@ -152,7 +152,7 @@ questions in the channel dedicated to Good First Issue support. ### 4. Submit a PR with your changes -Follow our [Good Pull Request guidelines](https://github.com/openvinotoolkit/openvino/blob/master/CONTRIBUTING_PR.md). Please remember about [linking your Pull Request to the issue](https://docs.github.com/en/issues/tracking-your-work-with-issues/linking-a-pull-request-to-an-issue#manually-linking-a-pull-request-to-an-issue-using-the-pull-request-sidebar) it addresses. +Follow our [Good Pull Request guidelines](https://github.com/openvinotoolkit/openvino/blob/releases/2024/6/CONTRIBUTING_PR.md). Please remember about [linking your Pull Request to the issue](https://docs.github.com/en/issues/tracking-your-work-with-issues/linking-a-pull-request-to-an-issue#manually-linking-a-pull-request-to-an-issue-using-the-pull-request-sidebar) it addresses. ### 5. Wait for a review diff --git a/CONTRIBUTING_PR.md b/CONTRIBUTING_PR.md index a1046b455ecf7b..f9f4f7da3c446e 100644 --- a/CONTRIBUTING_PR.md +++ b/CONTRIBUTING_PR.md @@ -1,45 +1,45 @@ # How to Prepare a Good PR - OpenVINO is an open-source project and you can contribute to its code directly. - To do so, follow these guidelines for creating Pull Requests, so that your + OpenVINO is an open-source project and you can contribute to its code directly. + To do so, follow these guidelines for creating Pull Requests, so that your changes get the highest chance of being merged. ## General Rules of a Good Pull Request -* Create your own fork of the repository and use it to create PRs. +* Create your own fork of the repository and use it to create PRs. Avoid creating change branches in the main repository. -* Choose a proper branch for your work and create your own branch based on it. -* Give your branches, commits, and Pull Requests meaningful names and descriptions. - It helps to track changes later. If your changes cover a particular component, +* Choose a proper branch for your work and create your own branch based on it. +* Give your branches, commits, and Pull Requests meaningful names and descriptions. + It helps to track changes later. If your changes cover a particular component, you can indicate it in the PR name as a prefix, for example: ``[DOCS] PR name``. -* Follow the [OpenVINO code style guide](https://github.com/openvinotoolkit/openvino/blob/master/docs/dev/coding_style.md). -* Make your PRs small - each PR should address one issue. Remove all changes +* Follow the [OpenVINO code style guide](https://github.com/openvinotoolkit/openvino/blob/releases/2024/6/docs/dev/coding_style.md). +* Make your PRs small - each PR should address one issue. Remove all changes unrelated to the PR. * [Link your Pull Request to an issue](https://docs.github.com/en/issues/tracking-your-work-with-issues/linking-a-pull-request-to-an-issue#manually-linking-a-pull-request-to-an-issue-using-the-pull-request-sidebar) if it addresses one. * Document your contribution! If your changes may impact how the user works with - OpenVINO, provide the information in proper articles. You can do it yourself, + OpenVINO, provide the information in proper articles. You can do it yourself, or contact one of OpenVINO documentation contributors to work together on - developing the right content. + developing the right content. * For Work In Progress, or checking test results early, use a Draft PR. ## Ensure Change Quality -Your pull request will be automatically tested by OpenVINO™'s pre-commit and marked -as "green" if it is ready for merging. If any builders fail, the status is "red," -you need to fix the issues listed in console logs. Any change to the PR branch will +Your pull request will be automatically tested by OpenVINO™'s pre-commit and marked +as "green" if it is ready for merging. If any builders fail, the status is "red," +you need to fix the issues listed in console logs. Any change to the PR branch will automatically trigger the checks, so you don't need to recreate the PR, Just wait -for the updated results. +for the updated results. Regardless of the automated tests, you should ensure the quality of your changes: * Test your changes locally: - * Make sure to double-check your code. - * Run tests locally to identify and fix potential issues (execute test binaries + * Make sure to double-check your code. + * Run tests locally to identify and fix potential issues (execute test binaries from the artifacts directory, e.g. ``/bin/intel64/Release/ieFuncTests``) -* Before creating a PR, make sure that your branch is up to date with the latest - state of the branch you want to contribute to (e.g. git fetch upstream && git +* Before creating a PR, make sure that your branch is up to date with the latest + state of the branch you want to contribute to (e.g. git fetch upstream && git merge upstream/master). @@ -47,7 +47,7 @@ Regardless of the automated tests, you should ensure the quality of your changes * The "master" branch is used for development and constitutes the base for each new release. * Each OpenVINO release has its own branch: ``releases//``. -* The final release each year is considered a Long Term Support version, +* The final release each year is considered a Long Term Support version, which means it remains active. * Contributions are accepted only by active branches, which are: * the "master" branch for future releases, @@ -57,7 +57,7 @@ Regardless of the automated tests, you should ensure the quality of your changes ## Need Additional Help? Check these Articles -* [How to create a fork](https://docs.github.com/en/pull-requests/collaborating-with-pull-requests/working-with-forks/fork-a-repo) +* [How to create a fork](https://docs.github.com/en/pull-requests/collaborating-with-pull-requests/working-with-forks/fork-a-repo) * [Install Git](https://git-scm.com/book/en/v2/Getting-Started-First-Time-Git-Setup) * If you want to add a new sample, please have a look at the Guide for contributing to C++/C/Python OV samples and add the license statement at the top of new files for diff --git a/docs/articles_en/about-openvino.rst b/docs/articles_en/about-openvino.rst index 48422d9b3a7ae9..a12939fff23ae7 100644 --- a/docs/articles_en/about-openvino.rst +++ b/docs/articles_en/about-openvino.rst @@ -32,7 +32,7 @@ To learn about the main properties of OpenVINO, see the :doc:`Key Features `__ and `core components `__. +To learn more about how OpenVINO works, read the Developer documentation on its `architecture `__ and `core components `__. OpenVINO Ecosystem ############################################################## diff --git a/docs/articles_en/about-openvino/contributing.rst b/docs/articles_en/about-openvino/contributing.rst index 91d3aa5ca90a68..13f6e29512e4df 100644 --- a/docs/articles_en/about-openvino/contributing.rst +++ b/docs/articles_en/about-openvino/contributing.rst @@ -65,7 +65,7 @@ you can always ask if you can help. Choose one of the issues reported in `GitHub Issue Tracker `__ and -`create a Pull Request `__ +`create a Pull Request `__ (PR) addressing it. If you find a new bug and want to fix it, you should still @@ -96,17 +96,17 @@ can see how to develop a new plugin for it in the ##################################### OpenVINO user documentation is built from several sources, mainly the files in -the `docs/articles_en `__ +the `docs/articles_en `__ folder, using `Sphinx `__ and the `reStructuredText `__ markup language. -OpenVINO `developer documentation `__ -is available only in markdown in the `docs/dev `__ +OpenVINO `developer documentation `__ +is available only in markdown in the `docs/dev `__ folder. To edit docs, consider using the Editor’s -`guide `__ +`guide `__ and contacting `documentation maintainers `__, who will help you with information architecture and formatting, as well as review, adjust, and merge the PR. @@ -155,7 +155,7 @@ you can help someone. .. note:: By contributing to the OpenVINO project, you agree that your contributions - will be licensed under `the terms of the OpenVINO repository `__. + will be licensed under `the terms of the OpenVINO repository `__. Additional Resources @@ -163,7 +163,7 @@ Additional Resources - :doc:`Code Contribution Guide <./contributing/code-contribution-guide>` - Choose a `"Good First Issue" `__. -- Learn more about `OpenVINO architecture `__. +- Learn more about `OpenVINO architecture `__. - Check out a `blog post on contributing to OpenVINO `__. - Visit `Intel DevHub Discord server `__ to join discussions and talk to OpenVINO developers. \ No newline at end of file diff --git a/docs/articles_en/about-openvino/contributing/code-contribution-guide.rst b/docs/articles_en/about-openvino/contributing/code-contribution-guide.rst index a74bb586e18130..18672cfbdfc31e 100644 --- a/docs/articles_en/about-openvino/contributing/code-contribution-guide.rst +++ b/docs/articles_en/about-openvino/contributing/code-contribution-guide.rst @@ -20,33 +20,33 @@ Remember, your questions help us keep improving OpenVINO. .. tab-item:: APIs - - `Core C++ API `__ - - `C API `__ - - `Python API `__ - - `JavaScript (Node.js) API `__ + - `Core C++ API `__ + - `C API `__ + - `Python API `__ + - `JavaScript (Node.js) API `__ .. tab-item:: Frontends - - `IR Frontend `__ - - `ONNX Frontend `__ - - `PaddlePaddle Frontend `__ - - `PyTorch Frontend `__ - - `TensorFlow Frontend `__ - - `TensorFlow Lite Frontend `__ + - `IR Frontend `__ + - `ONNX Frontend `__ + - `PaddlePaddle Frontend `__ + - `PyTorch Frontend `__ + - `TensorFlow Frontend `__ + - `TensorFlow Lite Frontend `__ .. tab-item:: Plugins - - `Auto plugin `__ - - `CPU plugin `__ - - `GPU plugin `__ - - `NPU plugin `__ - - `Hetero plugin `__ - - `Template plugin `__ + - `Auto plugin `__ + - `CPU plugin `__ + - `GPU plugin `__ + - `NPU plugin `__ + - `Hetero plugin `__ + - `Template plugin `__ .. tab-item:: Tools - - `Benchmark Tool `__ - - `Model Conversion `__ + - `Benchmark Tool `__ + - `Model Conversion `__ 2. **Assign yourself to the issue.** @@ -61,14 +61,14 @@ Remember, your questions help us keep improving OpenVINO. 3. **Build OpenVINO.** In order to build OpenVINO, follow the - `build instructions for your specific OS `__. + `build instructions for your specific OS `__. Use the local build and the information found in the issue description to develop your contribution. 4. **Submit a PR with your changes.** - Follow the `guidelines `__ + Follow the `guidelines `__ and do not forget to `link your Pull Request to the issue `__ it addresses. @@ -83,6 +83,6 @@ Additional Resources ##################### - Choose a `“Good First Issue” `__. -- Learn more about `OpenVINO architecture `__. +- Learn more about `OpenVINO architecture `__. - Check out a `blog post on contributing to OpenVINO `__. - Visit `Intel DevHub Discord server `__ to join discussions and talk to OpenVINO developers. \ No newline at end of file diff --git a/docs/articles_en/about-openvino/performance-benchmarks/performance-benchmarks-faq.rst b/docs/articles_en/about-openvino/performance-benchmarks/performance-benchmarks-faq.rst index 0f70c93e9c8b96..3d1b51470344cf 100644 --- a/docs/articles_en/about-openvino/performance-benchmarks/performance-benchmarks-faq.rst +++ b/docs/articles_en/about-openvino/performance-benchmarks/performance-benchmarks-faq.rst @@ -35,8 +35,8 @@ Performance Information F.A.Q. open-source tool within the Intel® Distribution of OpenVINO™ toolkit called :doc:`benchmark_app <../../learn-openvino/openvino-samples/benchmark-tool>`. - For diffusers (Stable-Diffusion) and foundational models (aka LLMs) please use the OpenVINO GenAI - opensource repo `OpenVINO GenAI tools/llm_bench `__ + For diffusers (Stable-Diffusion) and foundational models (aka LLMs) please use the OpenVINO GenAI + opensource repo `OpenVINO GenAI tools/llm_bench `__ For a simple instruction on testing performance, see the :doc:`Getting Performance Numbers Guide `. diff --git a/docs/articles_en/about-openvino/release-notes-openvino.rst b/docs/articles_en/about-openvino/release-notes-openvino.rst index 0be0981942be7a..2aa31b08347d67 100644 --- a/docs/articles_en/about-openvino/release-notes-openvino.rst +++ b/docs/articles_en/about-openvino/release-notes-openvino.rst @@ -641,7 +641,7 @@ Previous 2024 releases * New samples and pipelines are now available: * An example IterableStreamer implementation in - `multinomial_causal_lm/python sample `__ + `multinomial_causal_lm/python sample `__ * GenAI compilation is now available as part of OpenVINO via the –DOPENVINO_EXTRA_MODULES CMake option. diff --git a/docs/articles_en/assets/images/openvino-overview-diagram.jpg b/docs/articles_en/assets/images/openvino-overview-diagram.jpg index bfd3c6533446f3..982b65f5eab254 100644 --- a/docs/articles_en/assets/images/openvino-overview-diagram.jpg +++ b/docs/articles_en/assets/images/openvino-overview-diagram.jpg @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:739d604dc4b8bae082e9c70e24328bcf9c30fa3fe5b1f884b9bd129509302b4e -size 1465073 +oid sha256:bf3adb1b6fafa18ecf6c5cf2944e687695953605de7e7e4e4315d108fbfb608e +size 124217 diff --git a/docs/articles_en/documentation/openvino-extensibility.rst b/docs/articles_en/documentation/openvino-extensibility.rst index 216135009b1806..a31716c2fd3cd6 100644 --- a/docs/articles_en/documentation/openvino-extensibility.rst +++ b/docs/articles_en/documentation/openvino-extensibility.rst @@ -78,7 +78,7 @@ Registering Extensions A custom operation class and a new mapping frontend extension class object should be registered to be usable in OpenVINO runtime. .. note:: - This documentation is derived from the `Template extension `__, which demonstrates the details of extension development. It is based on minimalistic ``Identity`` operation that is a placeholder for your real custom operation. Review the complete, fully compilable code to see how it works. + This documentation is derived from the `Template extension `__, which demonstrates the details of extension development. It is based on minimalistic ``Identity`` operation that is a placeholder for your real custom operation. Review the complete, fully compilable code to see how it works. Use the ``ov::Core::add_extension`` method to load the extensions to the ``ov::Core`` object. This method allows loading library with extensions or extensions from the code. diff --git a/docs/articles_en/documentation/openvino-extensibility/frontend-extensions.rst b/docs/articles_en/documentation/openvino-extensibility/frontend-extensions.rst index 115f149657821c..8b5dfea1c62491 100644 --- a/docs/articles_en/documentation/openvino-extensibility/frontend-extensions.rst +++ b/docs/articles_en/documentation/openvino-extensibility/frontend-extensions.rst @@ -20,7 +20,7 @@ guide. .. note:: - This documentation is written based on the `Template extension `__, + This documentation is written based on the `Template extension `__, which demonstrates extension development details based on minimalistic ``Identity`` operation that is a placeholder for your real custom operation. You can review the complete code, which is fully compilable, to see how it works. diff --git a/docs/articles_en/documentation/openvino-extensibility/openvino-plugin-library.rst b/docs/articles_en/documentation/openvino-extensibility/openvino-plugin-library.rst index 7a82099fcede73..940314a768fd5f 100644 --- a/docs/articles_en/documentation/openvino-extensibility/openvino-plugin-library.rst +++ b/docs/articles_en/documentation/openvino-extensibility/openvino-plugin-library.rst @@ -89,7 +89,7 @@ Detailed Guides * :doc:`Quantized networks ` * :doc:`Low precision transformations ` guide * :doc:`Writing OpenVINO™ transformations ` guide -* `Integration with AUTO Plugin `__ +* `Integration with AUTO Plugin `__ API References ############## diff --git a/docs/articles_en/get-started/install-openvino.rst b/docs/articles_en/get-started/install-openvino.rst index 29547d5b0fc2e5..f49dc66d154cf2 100644 --- a/docs/articles_en/get-started/install-openvino.rst +++ b/docs/articles_en/get-started/install-openvino.rst @@ -50,7 +50,7 @@ All currently supported versions are: .. dropdown:: Building OpenVINO from Source OpenVINO Toolkit source files are available on GitHub as open source. If you want to build your own version of OpenVINO for your platform, - follow the `OpenVINO Build Instructions `__. + follow the `OpenVINO Build Instructions `__. diff --git a/docs/articles_en/learn-openvino/llm_inference_guide/genai-guide.rst b/docs/articles_en/learn-openvino/llm_inference_guide/genai-guide.rst index 64f42c257caca1..765b87db610ab5 100644 --- a/docs/articles_en/learn-openvino/llm_inference_guide/genai-guide.rst +++ b/docs/articles_en/learn-openvino/llm_inference_guide/genai-guide.rst @@ -918,7 +918,7 @@ Additional Resources #################### * `OpenVINO GenAI Repo `__ -* `OpenVINO GenAI Samples `__ +* `OpenVINO GenAI Samples `__ * A Jupyter notebook demonstrating `Visual-language assistant with MiniCPM-V2 and OpenVINO `__ * `OpenVINO Tokenizers `__ diff --git a/docs/articles_en/learn-openvino/llm_inference_guide/ov-tokenizers.rst b/docs/articles_en/learn-openvino/llm_inference_guide/ov-tokenizers.rst index 339d8e814ace73..398f82777eb3ad 100644 --- a/docs/articles_en/learn-openvino/llm_inference_guide/ov-tokenizers.rst +++ b/docs/articles_en/learn-openvino/llm_inference_guide/ov-tokenizers.rst @@ -336,7 +336,7 @@ Additional Resources * `OpenVINO Tokenizers repo `__ * `OpenVINO Tokenizers Notebook `__ -* `Text generation C++ samples that support most popular models like LLaMA 3 `__ +* `Text generation C++ samples that support most popular models like LLaMA 3 `__ * `OpenVINO GenAI Repo `__ diff --git a/docs/articles_en/learn-openvino/openvino-samples/bert-benchmark.rst b/docs/articles_en/learn-openvino/openvino-samples/bert-benchmark.rst index 92f6a410219f43..b9e9794bdac474 100644 --- a/docs/articles_en/learn-openvino/openvino-samples/bert-benchmark.rst +++ b/docs/articles_en/learn-openvino/openvino-samples/bert-benchmark.rst @@ -65,4 +65,4 @@ Additional Resources - :doc:`Get Started with Samples ` - :doc:`Using OpenVINO Samples <../openvino-samples>` - :doc:`Convert a Model <../../documentation/legacy-features/transition-legacy-conversion-api/legacy-conversion-api>` -- `Bert Benchmark Python Sample on Github `__ +- `Bert Benchmark Python Sample on Github `__ diff --git a/docs/articles_en/learn-openvino/openvino-samples/get-started-demos.rst b/docs/articles_en/learn-openvino/openvino-samples/get-started-demos.rst index 4d7d94efddb898..3c726113b859ae 100644 --- a/docs/articles_en/learn-openvino/openvino-samples/get-started-demos.rst +++ b/docs/articles_en/learn-openvino/openvino-samples/get-started-demos.rst @@ -12,7 +12,7 @@ To use OpenVINO samples, install OpenVINO using one of the following distributio * Archive files (recommended) - :doc:`Linux <../../get-started/install-openvino/install-openvino-archive-linux>` | :doc:`Windows <../../get-started/install-openvino/install-openvino-archive-windows>` | :doc:`macOS <../../get-started/install-openvino/install-openvino-archive-macos>` * :doc:`APT <../../get-started/install-openvino/install-openvino-apt>` or :doc:`YUM <../../get-started/install-openvino/install-openvino-yum>` for Linux * :doc:`Docker image <../../get-started/install-openvino/install-openvino-docker-linux>` -* `Build from source `__ +* `Build from source `__ If you install OpenVINO Runtime via archive files, sample applications are created in the following directories: @@ -43,7 +43,7 @@ Select a sample you want to use from the :doc:`OpenVINO Samples <../openvino-sam Some samples may also require `OpenCV `__ to run properly. Make sure to install it for use with vision-oriented samples. -Instructions below show how to build sample applications with CMake. If you are interested in building them from source, check the `build instructions on GitHub `__ . +Instructions below show how to build sample applications with CMake. If you are interested in building them from source, check the `build instructions on GitHub `__ . .. tab-set:: @@ -188,7 +188,7 @@ Instructions below show how to build sample applications with CMake. If you are .. note:: - For building samples from the open-source version of OpenVINO toolkit, see the `build instructions on GitHub `__ . + For building samples from the open-source version of OpenVINO toolkit, see the `build instructions on GitHub `__ . To build the C or C++ sample applications for macOS, go to the ``/samples/c`` or ``/samples/cpp`` directory, respectively, and run the ``build_samples.sh`` script: diff --git a/docs/articles_en/learn-openvino/openvino-samples/hello-classification.rst b/docs/articles_en/learn-openvino/openvino-samples/hello-classification.rst index f8222e495c7387..cc73259ff1bf3f 100644 --- a/docs/articles_en/learn-openvino/openvino-samples/hello-classification.rst +++ b/docs/articles_en/learn-openvino/openvino-samples/hello-classification.rst @@ -259,6 +259,6 @@ Additional Resources - :doc:`Using OpenVINO Samples <../openvino-samples>` - :doc:`Convert a Model <../../documentation/legacy-features/transition-legacy-conversion-api/legacy-conversion-api>` - `OpenVINO Runtime C API `__ -- `Hello Classification Python Sample on Github `__ -- `Hello Classification C++ Sample on Github `__ -- `Hello Classification C Sample on Github `__ +- `Hello Classification Python Sample on Github `__ +- `Hello Classification C++ Sample on Github `__ +- `Hello Classification C Sample on Github `__ diff --git a/docs/articles_en/learn-openvino/openvino-samples/hello-nv12-input-classification.rst b/docs/articles_en/learn-openvino/openvino-samples/hello-nv12-input-classification.rst index 19219070cbfbe2..4e166413aea78f 100644 --- a/docs/articles_en/learn-openvino/openvino-samples/hello-nv12-input-classification.rst +++ b/docs/articles_en/learn-openvino/openvino-samples/hello-nv12-input-classification.rst @@ -210,5 +210,5 @@ Additional Resources - :doc:`Using OpenVINO Samples <../openvino-samples>` - :doc:`Convert a Model <../../documentation/legacy-features/transition-legacy-conversion-api/legacy-conversion-api>` - `API Reference `__ -- `Hello NV12 Input Classification C++ Sample on Github `__ -- `Hello NV12 Input Classification C Sample on Github `__ +- `Hello NV12 Input Classification C++ Sample on Github `__ +- `Hello NV12 Input Classification C Sample on Github `__ diff --git a/docs/articles_en/learn-openvino/openvino-samples/hello-query-device.rst b/docs/articles_en/learn-openvino/openvino-samples/hello-query-device.rst index 46f145a808e330..bda264c6062448 100644 --- a/docs/articles_en/learn-openvino/openvino-samples/hello-query-device.rst +++ b/docs/articles_en/learn-openvino/openvino-samples/hello-query-device.rst @@ -133,5 +133,5 @@ Additional Resources - :doc:`Integrate the OpenVINO™ Runtime with Your Application <../../openvino-workflow/running-inference/integrate-openvino-with-your-application>` - :doc:`Get Started with Samples ` - :doc:`Using OpenVINO™ Toolkit Samples <../openvino-samples>` -- `Hello Query Device Python Sample on Github `__ -- `Hello Query Device C++ Sample on Github `__ +- `Hello Query Device Python Sample on Github `__ +- `Hello Query Device C++ Sample on Github `__ diff --git a/docs/articles_en/learn-openvino/openvino-samples/hello-reshape-ssd.rst b/docs/articles_en/learn-openvino/openvino-samples/hello-reshape-ssd.rst index 23de8eb1979824..2204481ab26ce0 100644 --- a/docs/articles_en/learn-openvino/openvino-samples/hello-reshape-ssd.rst +++ b/docs/articles_en/learn-openvino/openvino-samples/hello-reshape-ssd.rst @@ -205,6 +205,6 @@ Additional Resources - :doc:`Get Started with Samples ` - :doc:`Using OpenVINO Samples <../openvino-samples>` - :doc:`Convert a Model <../../documentation/legacy-features/transition-legacy-conversion-api/legacy-conversion-api>` -- `Hello Reshape SSD Python Sample on Github `__ -- `Hello Reshape SSD C++ Sample on Github `__ +- `Hello Reshape SSD Python Sample on Github `__ +- `Hello Reshape SSD C++ Sample on Github `__ diff --git a/docs/articles_en/learn-openvino/openvino-samples/image-classification-async.rst b/docs/articles_en/learn-openvino/openvino-samples/image-classification-async.rst index b112452e932c72..4d415f4126cbe7 100644 --- a/docs/articles_en/learn-openvino/openvino-samples/image-classification-async.rst +++ b/docs/articles_en/learn-openvino/openvino-samples/image-classification-async.rst @@ -327,5 +327,5 @@ Additional Resources - :doc:`Get Started with Samples ` - :doc:`Using OpenVINO™ Toolkit Samples <../openvino-samples>` - :doc:`Convert a Model <../../documentation/legacy-features/transition-legacy-conversion-api/legacy-conversion-api>` -- `Image Classification Async Python Sample on Github `__ -- `Image Classification Async C++ Sample on Github `__ +- `Image Classification Async Python Sample on Github `__ +- `Image Classification Async C++ Sample on Github `__ diff --git a/docs/articles_en/learn-openvino/openvino-samples/model-creation.rst b/docs/articles_en/learn-openvino/openvino-samples/model-creation.rst index e0e3034c225763..e33a2f99cdc9b1 100644 --- a/docs/articles_en/learn-openvino/openvino-samples/model-creation.rst +++ b/docs/articles_en/learn-openvino/openvino-samples/model-creation.rst @@ -293,5 +293,5 @@ Additional Resources - :doc:`Get Started with Samples ` - :doc:`Using OpenVINO Samples <../openvino-samples>` - :doc:`Convert a Model <../../documentation/legacy-features/transition-legacy-conversion-api/legacy-conversion-api>` -- `Model Creation Python Sample on Github `__ -- `Model Creation C++ Sample on Github `__ +- `Model Creation Python Sample on Github `__ +- `Model Creation C++ Sample on Github `__ diff --git a/docs/articles_en/learn-openvino/openvino-samples/sync-benchmark.rst b/docs/articles_en/learn-openvino/openvino-samples/sync-benchmark.rst index 245672decb7ab2..830adb12973b4b 100644 --- a/docs/articles_en/learn-openvino/openvino-samples/sync-benchmark.rst +++ b/docs/articles_en/learn-openvino/openvino-samples/sync-benchmark.rst @@ -168,5 +168,5 @@ Additional Resources - :doc:`Get Started with Samples ` - :doc:`Using OpenVINO Samples <../openvino-samples>` - :doc:`Convert a Model <../../documentation/legacy-features/transition-legacy-conversion-api/legacy-conversion-api>` -- `Sync Benchmark Python Sample on Github `__ -- `Sync Benchmark C++ Sample on Github `__ +- `Sync Benchmark Python Sample on Github `__ +- `Sync Benchmark C++ Sample on Github `__ diff --git a/docs/articles_en/learn-openvino/openvino-samples/throughput-benchmark.rst b/docs/articles_en/learn-openvino/openvino-samples/throughput-benchmark.rst index e8b723afd2a480..b8dda23128aed5 100644 --- a/docs/articles_en/learn-openvino/openvino-samples/throughput-benchmark.rst +++ b/docs/articles_en/learn-openvino/openvino-samples/throughput-benchmark.rst @@ -172,5 +172,5 @@ Additional Resources - :doc:`Get Started with Samples ` - :doc:`Using OpenVINO Samples <../openvino-samples>` - :doc:`Convert a Model <../../documentation/legacy-features/transition-legacy-conversion-api/legacy-conversion-api>` -- `Throughput Benchmark Python Sample on Github `__ -- `Throughput Benchmark C++ Sample on Github `__ +- `Throughput Benchmark Python Sample on Github `__ +- `Throughput Benchmark C++ Sample on Github `__ diff --git a/docs/articles_en/openvino-workflow/deployment-locally.rst b/docs/articles_en/openvino-workflow/deployment-locally.rst index a8cdd8949fb318..38b1c25392ff86 100644 --- a/docs/articles_en/openvino-workflow/deployment-locally.rst +++ b/docs/articles_en/openvino-workflow/deployment-locally.rst @@ -33,7 +33,7 @@ Local Deployment Options - Grab a necessary functionality of OpenVINO together with your application, also called "local distribution": - using the :doc:`local distribution ` approach; - - using `a static version of OpenVINO Runtime linked to the final app `__. + - using `a static version of OpenVINO Runtime linked to the final app `__. The table below shows which distribution type can be used for what target operating system: @@ -52,7 +52,7 @@ The table below shows which distribution type can be used for what target operat - See https://pypi.org/project/openvino * - :doc:`Libraries for Local Distribution ` - All operating systems - * - `Build OpenVINO statically and link to the final app `__ + * - `Build OpenVINO statically and link to the final app `__ - All operating systems diff --git a/docs/articles_en/openvino-workflow/deployment-locally/local-distribution-libraries.rst b/docs/articles_en/openvino-workflow/deployment-locally/local-distribution-libraries.rst index 5e85d157c592a3..11816cf2f48523 100644 --- a/docs/articles_en/openvino-workflow/deployment-locally/local-distribution-libraries.rst +++ b/docs/articles_en/openvino-workflow/deployment-locally/local-distribution-libraries.rst @@ -16,9 +16,9 @@ what minimal set of libraries is required to deploy the application. Local distribution is also suitable for OpenVINO binaries built from source using `Build instructions `__, but this guide assumes that OpenVINO Runtime is built dynamically. -For `Static OpenVINO Runtime `__, +For `Static OpenVINO Runtime `__, select the required OpenVINO capabilities at the CMake configuration stage using -`CMake Options for Custom Compilation `__, +`CMake Options for Custom Compilation `__, then build and link the OpenVINO components to the final application. .. note:: diff --git a/docs/articles_en/openvino-workflow/deployment-locally/optimial-binary-size-conditional-compilation.rst b/docs/articles_en/openvino-workflow/deployment-locally/optimial-binary-size-conditional-compilation.rst index 0747fa87aabcd5..ac90425d6f8b8c 100644 --- a/docs/articles_en/openvino-workflow/deployment-locally/optimial-binary-size-conditional-compilation.rst +++ b/docs/articles_en/openvino-workflow/deployment-locally/optimial-binary-size-conditional-compilation.rst @@ -9,7 +9,7 @@ Conditional compilation can significantly reduce the binary size of the OpenVINO The tradeoff of conditional compilation is that the produced OpenVINO runtime can only run inference for the models and platforms which conditional compilation was applied. -Lean more in the `conditional_compilation_guide `__ and `Conditional_compilation_developer_guide `__ +Lean more in the `conditional_compilation_guide `__ and `Conditional_compilation_developer_guide `__ There are two steps to reduce binary size of the OpenVINO runtime library with conditional compilation: @@ -57,7 +57,7 @@ Stage 1: collecting statistics information about code usage If you want to run other application rather than benchmark_app to get statistics data, please make sure to limit inference request number and iterations to avoid too long profiling time and too large statistics data. - You can run this `script `__ to get the generated header file from csv files, and to confirm whether the statistics is correct. This step will be implicitly done in stage 2 of conditional compilation, skip it, if not needed. + You can run this `script `__ to get the generated header file from csv files, and to confirm whether the statistics is correct. This step will be implicitly done in stage 2 of conditional compilation, skip it, if not needed. .. code-block:: sh @@ -224,7 +224,7 @@ Perform the above steps for each SKUs information (CPUID, L1/L2/L3 cache size, c How to Enable Conditional Compilation on Windows ################################################ -Find detailed information in the Building OpenVINO static libraries `document `__ . +Find detailed information in the Building OpenVINO static libraries `document `__ . Stage 1: Selective build analyzed stage diff --git a/docs/articles_en/openvino-workflow/running-inference/inference-devices-and-modes/cpu-device.rst b/docs/articles_en/openvino-workflow/running-inference/inference-devices-and-modes/cpu-device.rst index f1a914e6b9dac3..48aa77f80c8784 100644 --- a/docs/articles_en/openvino-workflow/running-inference/inference-devices-and-modes/cpu-device.rst +++ b/docs/articles_en/openvino-workflow/running-inference/inference-devices-and-modes/cpu-device.rst @@ -16,8 +16,8 @@ CPU Device The CPU plugin is a part of the Intel® Distribution of OpenVINO™ toolkit. It is developed to achieve high performance inference of neural networks on Intel® x86-64 and Arm® CPUs. The newer 11th generation and later Intel® CPUs provide even further performance boost, especially with INT8 models. For an in-depth description of CPU plugin, see: -- `CPU plugin developer documentation `__. -- `OpenVINO Runtime CPU plugin source files `__. +- `CPU plugin developer documentation `__. +- `OpenVINO Runtime CPU plugin source files `__. .. note:: @@ -512,7 +512,7 @@ Additional Resources * :doc:`Inference Devices and Modes <../inference-devices-and-modes>` * :doc:`Optimization guide <../optimize-inference>` -* `CPU plugin developer documentation `__ +* `CPU plugin developer documentation `__ diff --git a/docs/articles_en/openvino-workflow/running-inference/inference-devices-and-modes/gpu-device.rst b/docs/articles_en/openvino-workflow/running-inference/inference-devices-and-modes/gpu-device.rst index b4e1c7ac15afcc..e067688b08afb4 100644 --- a/docs/articles_en/openvino-workflow/running-inference/inference-devices-and-modes/gpu-device.rst +++ b/docs/articles_en/openvino-workflow/running-inference/inference-devices-and-modes/gpu-device.rst @@ -17,8 +17,8 @@ GPU Device The GPU plugin is an OpenCL based plugin for inference of deep neural networks on Intel GPUs, both integrated and discrete ones. For an in-depth description of the GPU plugin, see: -- `GPU plugin developer documentation `__ -- `OpenVINO Runtime GPU plugin source files `__ +- `GPU plugin developer documentation `__ +- `OpenVINO Runtime GPU plugin source files `__ - `Start AI Development with Intel `__ The GPU plugin is a part of the Intel® Distribution of OpenVINO™ toolkit. For more information on how to configure a system to use it, see the :doc:`GPU configuration <../../../get-started/configurations/configurations-intel-gpu>`. @@ -477,6 +477,6 @@ Additional Resources * `Working with GPUs in OpenVINO™ Notebook `__ * :doc:`Inference Devices and Modes <../inference-devices-and-modes>`. * :doc:`Optimization guide <../optimize-inference>`. -* `GPU plugin developer documentation `__ +* `GPU plugin developer documentation `__ diff --git a/docs/articles_en/openvino-workflow/running-inference/inference-devices-and-modes/npu-device/remote-tensor-api-npu-plugin.rst b/docs/articles_en/openvino-workflow/running-inference/inference-devices-and-modes/npu-device/remote-tensor-api-npu-plugin.rst index c960a57124a28a..1aa24a629a13d7 100644 --- a/docs/articles_en/openvino-workflow/running-inference/inference-devices-and-modes/npu-device/remote-tensor-api-npu-plugin.rst +++ b/docs/articles_en/openvino-workflow/running-inference/inference-devices-and-modes/npu-device/remote-tensor-api-npu-plugin.rst @@ -125,7 +125,7 @@ Two types of map entries are possible: a descriptor and a container. The descriptor sets the expected structure and possible parameter values of the map. For possible low-level properties and their description, refer to the header file: -`remote_properties.hpp `__. +`remote_properties.hpp `__. Additional Resources #################### diff --git a/docs/articles_en/openvino-workflow/running-inference/string-tensors.rst b/docs/articles_en/openvino-workflow/running-inference/string-tensors.rst index 3bd8c3e04499b0..2d162cd27f9e00 100644 --- a/docs/articles_en/openvino-workflow/running-inference/string-tensors.rst +++ b/docs/articles_en/openvino-workflow/running-inference/string-tensors.rst @@ -12,7 +12,7 @@ Such a tensor is called a string tensor and can be passed as input or retrieved While this section describes basic API to handle string tensors, more practical examples that leverage both string tensors and OpenVINO tokenizer can be found in -`GenAI Samples `__. +`GenAI Samples `__. Representation @@ -203,4 +203,4 @@ Additional Resources * Use `OpenVINO tokenizers `__ to produce models that use string tensors to work with textual information as pre- and post-processing for the large language models. -* Check out `GenAI Samples `__ to see how string tensors are used in real-life applications. +* Check out `GenAI Samples `__ to see how string tensors are used in real-life applications. diff --git a/docs/dev/build_linux.md b/docs/dev/build_linux.md index 09afc6fbcfaf7d..9ea0ba1d8992bc 100644 --- a/docs/dev/build_linux.md +++ b/docs/dev/build_linux.md @@ -55,13 +55,13 @@ The software was validated on: You can use the following additional build options: -- For IA32 operation systems, use [ia32.linux.toolchain.cmake](https://github.com/openvinotoolkit/openvino/blob/master/cmake/toolchains/ia32.linux.toolchain.cmake) CMake toolchain file: +- For IA32 operation systems, use [ia32.linux.toolchain.cmake](https://github.com/openvinotoolkit/openvino/blob/releases/2024/6/cmake/toolchains/ia32.linux.toolchain.cmake) CMake toolchain file: ```sh cmake -DCMAKE_TOOLCHAIN_FILE=/cmake/toolchains/ia32.linux.toolchain.cmake .. ``` -- OpenVINO offers several CMake options that can be used to build a custom OpenVINO runtime, which can speed up compilation. These options allow you to skip compilation of other plugins and frontends by disabling/enabling them. You can find a list of available options [here](https://github.com/openvinotoolkit/openvino/blob/master/docs/dev/cmake_options_for_custom_compilation.md) +- OpenVINO offers several CMake options that can be used to build a custom OpenVINO runtime, which can speed up compilation. These options allow you to skip compilation of other plugins and frontends by disabling/enabling them. You can find a list of available options [here](https://github.com/openvinotoolkit/openvino/blob/releases/2024/6/docs/dev/cmake_options_for_custom_compilation.md) - To build the OpenVINO Runtime Python API: 1. Enable the `-DENABLE_PYTHON=ON` option in the CMake step above (Step 4). To specify an exact Python version, use the following options (requires cmake 3.16 and higher): diff --git a/docs/dev/build_raspbian.md b/docs/dev/build_raspbian.md index d227fe6e78dd0c..3ff4c43979d4ae 100644 --- a/docs/dev/build_raspbian.md +++ b/docs/dev/build_raspbian.md @@ -1,6 +1,6 @@ # Build OpenVINO™ Runtime for Raspbian Stretch OS -> **NOTE**: Since 2023.0 release, you can compile [OpenVINO Intel CPU plugin](https://github.com/openvinotoolkit/openvino/tree/master/src/plugins/intel_cpu) on ARM platforms. +> **NOTE**: Since 2023.0 release, you can compile [OpenVINO Intel CPU plugin](https://github.com/openvinotoolkit/openvino/tree/releases/2024/6/src/plugins/intel_cpu) on ARM platforms. ## Hardware Requirements * Raspberry Pi with Raspbian Stretch OS or Raspberry Pi OS (32 or 64-bit). @@ -17,7 +17,7 @@ You can perform native compilation of the OpenVINO Runtime for Raspberry Pi, whi ``` 2. Clone the repository: ``` -git clone --recurse-submodules --single-branch --branch=master https://github.com/openvinotoolkit/openvino.git +git clone --recurse-submodules --single-branch --branch=master https://github.com/openvinotoolkit/openvino.git ``` 3. Go to the cloned `openvino` repository: @@ -33,7 +33,7 @@ git clone --recurse-submodules --single-branch --branch=master https://github.co ```bash cmake -DCMAKE_BUILD_TYPE=Release \ -DARM_COMPUTE_SCONS_JOBS=$(nproc --all) \ - .. && cmake --build . --parallel + .. && cmake --build . --parallel ``` > **NOTE**: The build command may fail due to insufficient RAM. To fix this issue, you can increase the swap size: diff --git a/docs/dev/cmake_options_for_custom_compilation.md b/docs/dev/cmake_options_for_custom_compilation.md index 5ace401ce091c6..2ea8ceab87585f 100644 --- a/docs/dev/cmake_options_for_custom_compilation.md +++ b/docs/dev/cmake_options_for_custom_compilation.md @@ -182,7 +182,7 @@ In this case OpenVINO CMake scripts take `TBBROOT` environment variable into acc [cpplint]:https://github.com/cpplint/cpplint [Clang format]:http://clang.llvm.org/docs/ClangFormat.html [OpenVINO Contrib]:https://github.com/openvinotoolkit/openvino_contrib -[OpenVINO thirdparty pugixml]:https://github.com/openvinotoolkit/openvino/tree/master/inference-engine/thirdparty/pugixml +[OpenVINO thirdparty pugixml]:https://github.com/openvinotoolkit/openvino/tree/releases/2024/6/inference-engine/thirdparty/pugixml [pugixml]:https://pugixml.org/ [ONNX]:https://onnx.ai/ [protobuf]:https://github.com/protocolbuffers/protobuf diff --git a/docs/dev/index.md b/docs/dev/index.md index cef96f4aa1003e..69b18d78c8ff3e 100644 --- a/docs/dev/index.md +++ b/docs/dev/index.md @@ -113,7 +113,7 @@ OpenVINO Components include: * [Plugins](../../src/plugins) - contains OpenVINO plugins which are maintained in open-source by the OpenVINO team. For more information, take a look at the [list of supported devices](https://docs.openvino.ai/2024/about-openvino/compatibility-and-support/supported-devices.html). * [Frontends](../../src/frontends) - contains available OpenVINO frontends that allow reading models from the native framework format. * [OpenVINO Model Converter (OVC)](https://docs.openvino.ai/2024/openvino-workflow/model-preparation.html) - is a cross-platform command-line tool that facilitates the transition between training and deployment environments, and adjusts deep learning models for optimal execution on end-point target devices. -* [Samples](https://github.com/openvinotoolkit/openvino/tree/master/samples) - applications in C, C++ and Python languages that show basic OpenVINO use cases. +* [Samples](https://github.com/openvinotoolkit/openvino/tree/releases/2024/6/samples) - applications in C, C++ and Python languages that show basic OpenVINO use cases. #### OpenVINO Component Structure diff --git a/docs/dev/static_libaries.md b/docs/dev/static_libaries.md index 19972a292f3b0c..73a5d2693a0a5b 100644 --- a/docs/dev/static_libaries.md +++ b/docs/dev/static_libaries.md @@ -92,7 +92,7 @@ find_package(OpenVINO REQUIRED) target_link_libraries( PRIVATE openvino::runtime) ``` -`openvino::runtime` transitively adds all other static OpenVINO libraries to a linker command. +`openvino::runtime` transitively adds all other static OpenVINO libraries to a linker command. ### Pass libraries to linker directly @@ -121,7 +121,7 @@ The conditional compilation feature can be paired with static OpenVINO libraries ## Building with static MSVC Runtime -In order to build with static MSVC runtime, use the special [OpenVINO toolchain](https://github.com/openvinotoolkit/openvino/blob/master/cmake/toolchains/mt.runtime.win32.toolchain.cmake) file: +In order to build with static MSVC runtime, use the special [OpenVINO toolchain](https://github.com/openvinotoolkit/openvino/blob/releases/2024/6/cmake/toolchains/mt.runtime.win32.toolchain.cmake) file: ```sh cmake -DCMAKE_TOOLCHAIN_FILE=/cmake/toolchains/mt.runtime.win32.toolchain.cmake diff --git a/docs/notebooks/latent-consistency-models-image-generation-with-output.rst b/docs/notebooks/latent-consistency-models-image-generation-with-output.rst index 523afca76dd660..173e2320b9d55a 100644 --- a/docs/notebooks/latent-consistency-models-image-generation-with-output.rst +++ b/docs/notebooks/latent-consistency-models-image-generation-with-output.rst @@ -95,11 +95,11 @@ Prerequisites from pathlib import Path import requests - + utility_files = [Path("notebook_utils.py"), Path("skip_kernel_extension.py"), Path("cmd_helper.py")] - + base_utils_url = "https://raw.githubusercontent.com/openvinotoolkit/openvino_notebooks/latest/utils/" - + for utility_file in utility_files: if not utility_file.exists(): r = requests.get(base_utils_url + utility_file.name) @@ -119,7 +119,7 @@ fine-tune of `Stable-Diffusion v1-5 `__ using Latent Consistency Distillation (LCD) approach discussed above. This model is also integrated into -`Diffusers `__ library. +`Diffusers `__ library. Diffusers is the go-to library for state-of-the-art pretrained diffusion models for generating images, audio, and even 3D structures of molecules. This allows us to compare running original Stable Diffusion @@ -129,9 +129,9 @@ and distilled using LCD. The distillation approach efficiently converts a pre-trained guided diffusion model into a latent consistency model by solving an augmented PF-ODE. -For simplifying model export we will utilize Optimum Intel library. +For simplifying model export we will utilize Optimum Intel library. `Optimum Intel `__ is -the interface between the +the interface between the `Transformers `__ and `Diffusers `__ libraries and OpenVINO to accelerate end-to-end pipelines on Intel architectures. @@ -165,10 +165,10 @@ this step we will use fp16 as base model export precision. .. code:: ipython3 from cmd_helper import optimum_cli - + model_id = "SimianLuo/LCM_Dreamshaper_v7" model_path = Path(model_id.split("/")[-1] + "_ov") - + if not model_path.exists(): optimum_cli(model_id, model_path, additional_args={"weight-format": "fp16"}) @@ -227,9 +227,9 @@ inference. Select desired inference device from dropdown list bellow. .. code:: ipython3 from notebook_utils import device_widget - + device = device_widget() - + device @@ -244,7 +244,7 @@ inference. Select desired inference device from dropdown list bellow. .. code:: ipython3 from optimum.intel.openvino import OVDiffusionPipeline - + ov_pipe = OVDiffusionPipeline.from_pretrained(model_path, device=device.value) @@ -277,10 +277,10 @@ Now, let’s see model in action .. code:: ipython3 import torch - + prompt = "a beautiful pink unicorn, 8k" num_inference_steps = 4 - + images = ov_pipe( prompt=prompt, num_inference_steps=num_inference_steps, guidance_scale=8.0, height=512, width=512, generator=torch.Generator().manual_seed(1234567) ).images @@ -308,7 +308,7 @@ Nice. As you can see, the picture has quite a high definition 🔥. .. code:: ipython3 import gc - + del ov_pipe gc.collect(); @@ -344,11 +344,11 @@ improve model inference speed. .. code:: ipython3 from notebook_utils import quantization_widget - + skip_for_device = "GPU" in device.value to_quantize = quantization_widget(not skip_for_device) int8_model_path = model_path.parent / (model_path.name + "_int8") - + to_quantize @@ -380,36 +380,36 @@ model inputs for calibration we should customize ``CompiledModel``. .. code:: ipython3 %%skip not $to_quantize.value - + import datasets from tqdm.notebook import tqdm from transformers import set_seed from typing import Any, Dict, List import openvino as ov import numpy as np - + set_seed(1) - + class CompiledModelDecorator(ov.CompiledModel): def __init__(self, compiled_model, prob: float, data_cache: List[Any] = None): super().__init__(compiled_model) self.data_cache = data_cache if data_cache else [] self.prob = np.clip(prob, 0, 1) - + def __call__(self, *args, **kwargs): if np.random.rand() >= self.prob: self.data_cache.append(*args) return super().__call__(*args, **kwargs) - + def collect_calibration_data(lcm_pipeline, subset_size: int) -> List[Dict]: original_unet = lcm_pipeline.unet.request lcm_pipeline.unet.request = CompiledModelDecorator(original_unet, prob=0.3) - + dataset = datasets.load_dataset("google-research-datasets/conceptual_captions", split="train", trust_remote_code=True).shuffle(seed=42) lcm_pipeline.set_progress_bar_config(disable=True) safety_checker = lcm_pipeline.safety_checker lcm_pipeline.safety_checker = None - + # Run inference for data collection pbar = tqdm(total=subset_size) diff = 0 @@ -430,7 +430,7 @@ model inputs for calibration we should customize ``CompiledModel``. break pbar.update(collected_subset_size - diff) diff = collected_subset_size - + calibration_dataset = lcm_pipeline.unet.request.data_cache lcm_pipeline.set_progress_bar_config(disable=False) lcm_pipeline.unet.request = original_unet @@ -440,11 +440,11 @@ model inputs for calibration we should customize ``CompiledModel``. .. code:: ipython3 %%skip not $to_quantize.value - + import logging logging.basicConfig(level=logging.WARNING) logger = logging.getLogger(__name__) - + if not int8_model_path.exists(): subset_size = 200 ov_pipe = OVDiffusionPipeline.from_pretrained(model_path, device=device.value) @@ -472,12 +472,12 @@ Create a quantized model from the pre-trained converted OpenVINO model. .. code:: ipython3 %%skip not $to_quantize.value - + import nncf from nncf.scopes import IgnoredScope import shutil core = ov.Core() - + if not int8_model_path.exists(): unet = core.read_model(model_path / "unet/openvino_model.xml") quantized_unet = nncf.quantize( @@ -546,7 +546,7 @@ Create a quantized model from the pre-trained converted OpenVINO model. .. code:: ipython3 %%skip not $to_quantize.value - + int8_pipe = OVDiffusionPipeline.from_pretrained(int8_model_path, device=device.value) Let us check predictions with the quantized UNet using the same input @@ -555,12 +555,12 @@ data. .. code:: ipython3 %%skip not $to_quantize.value - + from IPython.display import display - + prompt = "a beautiful pink unicorn, 8k" num_inference_steps = 4 - + images = int8_pipe( prompt=prompt, num_inference_steps=num_inference_steps, @@ -569,7 +569,7 @@ data. width=512, generator=torch.Generator().manual_seed(1234567) ).images - + display(images[0]) @@ -598,9 +598,9 @@ pipelines, we use median inference time on calibration subset. .. code:: ipython3 %%skip not $to_quantize.value - + import time - + validation_size = 10 calibration_dataset = datasets.load_dataset("google-research-datasets/conceptual_captions", split="train", trust_remote_code=True) validation_data = [] @@ -609,7 +609,7 @@ pipelines, we use median inference time on calibration subset. break prompt = batch["caption"] validation_data.append(prompt) - + def calculate_inference_time(pipeline, calibration_dataset): inference_time = [] pipeline.set_progress_bar_config(disable=True) @@ -632,14 +632,14 @@ pipelines, we use median inference time on calibration subset. .. code:: ipython3 %%skip not $to_quantize.value - + int8_latency = calculate_inference_time(int8_pipe, validation_data) del int8_pipe gc.collect() ov_pipe = OVDiffusionPipeline.from_pretrained(model_path, device=device.value) fp_latency = calculate_inference_time(ov_pipe, validation_data) print(f"Performance speed up: {fp_latency / int8_latency:.3f}") - + del ov_pipe gc.collect(); @@ -658,11 +658,11 @@ Compare UNet file size UNET_OV_PATH = model_path / "unet/openvino_model.xml" UNET_INT8_OV_PATH = int8_model_path / "unet/openvino_model.xml" - + if UNET_INT8_OV_PATH.exists(): fp16_ir_model_size = UNET_OV_PATH.with_suffix(".bin").stat().st_size / 1024 quantized_model_size = UNET_INT8_OV_PATH.with_suffix(".bin").stat().st_size / 1024 - + print(f"FP16 model size: {fp16_ir_model_size:.2f} KB") print(f"INT8 model size: {quantized_model_size:.2f} KB") print(f"Model compression rate: {fp16_ir_model_size / quantized_model_size:.3f}") @@ -694,7 +694,7 @@ generative models as it already includes all the core functionality. ``openvino_genai.Text2ImagePipeline`` class supports inference of `Diffusers -models `__. +models `__. For pipeline initialization, we should provide directory with converted by Optimum Intel pipeline and specify inference device. Optionally, we can provide configuration for LoRA Adapters using ``adapter_config``. @@ -722,10 +722,10 @@ generation process. .. code:: ipython3 import ipywidgets as widgets - + int8_can_be_used = int8_model_path.exists() and "GPU" not in device.value use_quantized_model = widgets.Checkbox(value=int8_can_be_used, description="Use INT8 model", disabled=not int8_can_be_used) - + use_quantized_model @@ -740,9 +740,9 @@ generation process. .. code:: ipython3 import openvino_genai as ov_genai - + used_model_path = model_path if not use_quantized_model.value else int8_model_path - + pipe = ov_genai.Text2ImagePipeline(used_model_path, device.value) .. code:: ipython3 @@ -750,30 +750,30 @@ generation process. from PIL import Image import torch import openvino as ov - - + + class Generator(ov_genai.Generator): def __init__(self, seed): ov_genai.Generator.__init__(self) self.generator = torch.Generator(device="cpu").manual_seed(seed) - + def next(self): return torch.randn(1, generator=self.generator, dtype=torch.float32).item() - + def randn_tensor(self, shape: ov.Shape): torch_tensor = torch.randn(list(shape), generator=self.generator, dtype=torch.float32) return ov.Tensor(torch_tensor.numpy()) - - + + prompt = "a beautiful pink unicorn, 8k" num_inference_steps = 4 - + random_generator = Generator(1234567) - + image_tensor = pipe.generate(prompt, width=512, height=512, num_inference_steps=4, num_images_per_prompt=1, generator=random_generator) - + image = Image.fromarray(image_tensor.data[0]) - + image @@ -793,16 +793,16 @@ Interactive demo import random import gradio as gr import numpy as np - + MAX_SEED = np.iinfo(np.int32).max - - + + def randomize_seed_fn(seed: int, randomize_seed: bool) -> int: if randomize_seed: seed = random.randint(0, MAX_SEED) return seed - - + + def generate( prompt: str, seed: int = 0, @@ -828,11 +828,11 @@ Interactive demo url="https://raw.githubusercontent.com/openvinotoolkit/openvino_notebooks/latest/notebooks/latent-consistency-models-image-generation/gradio_helper.py" ) open("gradio_helper.py", "w").write(r.text) - + from gradio_helper import make_demo_lcm - + demo = make_demo_lcm(fn=generate) - + try: demo.queue().launch(debug=False) except Exception: diff --git a/docs/notebooks/llm-chatbot-generate-api-with-output.rst b/docs/notebooks/llm-chatbot-generate-api-with-output.rst index c09b463ae985d0..ae0f3c320da485 100644 --- a/docs/notebooks/llm-chatbot-generate-api-with-output.rst +++ b/docs/notebooks/llm-chatbot-generate-api-with-output.rst @@ -39,7 +39,7 @@ The tutorial consists of the following steps: - Compress model weights to 4-bit or 8-bit data types using `NNCF `__ - Create a chat inference pipeline with `OpenVINO Generate - API `__. + API `__. - Run chat pipeline with `Gradio `__. Installation Instructions @@ -81,9 +81,9 @@ Install required dependencies .. code:: ipython3 import os - + os.environ["GIT_CLONE_PROTECTION_ACTIVE"] = "false" - + %pip install -Uq pip %pip uninstall -q -y optimum optimum-intel %pip install -q -U "openvino>=2024.3.0" openvino-tokenizers[transformers] openvino-genai @@ -103,12 +103,12 @@ Install required dependencies from pathlib import Path import requests import shutil - + # fetch model configuration - + config_shared_path = Path("../../utils/llm_config.py") config_dst_path = Path("llm_config.py") - + if not config_dst_path.exists(): if config_shared_path.exists(): try: @@ -127,7 +127,7 @@ Install required dependencies r = requests.get(url="https://raw.githubusercontent.com/openvinotoolkit/openvino_notebooks/latest/utils/llm_config.py") with open("llm_config.py", "w", encoding="utf-8") as f: f.write(r.text) - + if not Path("notebook_utils.py").exists(): r = requests.get(url="https://raw.githubusercontent.com/openvinotoolkit/openvino_notebooks/latest/utils/notebook_utils.py") open("notebook_utils.py", "w").write(r.text) @@ -238,7 +238,7 @@ Click here to see available models options .. code:: python - # login to huggingfacehub to get access to pretrained model + # login to huggingfacehub to get access to pretrained model from huggingface_hub import notebook_login, whoami @@ -270,7 +270,7 @@ Click here to see available models options .. code:: python - # login to huggingfacehub to get access to pretrained model + # login to huggingfacehub to get access to pretrained model from huggingface_hub import notebook_login, whoami @@ -304,7 +304,7 @@ Click here to see available models options .. code:: python - # login to huggingfacehub to get access to pretrained model + # login to huggingfacehub to get access to pretrained model from huggingface_hub import notebook_login, whoami @@ -338,7 +338,7 @@ Click here to see available models options .. code:: python - # login to huggingfacehub to get access to pretrained model + # login to huggingfacehub to get access to pretrained model from huggingface_hub import notebook_login, whoami @@ -399,7 +399,7 @@ Click here to see available models options .. code:: python - # login to huggingfacehub to get access to pretrained model + # login to huggingfacehub to get access to pretrained model from huggingface_hub import notebook_login, whoami @@ -432,7 +432,7 @@ Click here to see available models options .. code:: python - # login to huggingfacehub to get access to pretrained model + # login to huggingfacehub to get access to pretrained model from huggingface_hub import notebook_login, whoami @@ -466,7 +466,7 @@ Click here to see available models options .. code:: python - # login to huggingfacehub to get access to pretrained model + # login to huggingfacehub to get access to pretrained model from huggingface_hub import notebook_login, whoami @@ -500,7 +500,7 @@ Click here to see available models options .. code:: python - # login to huggingfacehub to get access to pretrained model + # login to huggingfacehub to get access to pretrained model from huggingface_hub import notebook_login, whoami @@ -531,7 +531,7 @@ Click here to see available models options .. code:: python - # login to huggingfacehub to get access to pretrained model + # login to huggingfacehub to get access to pretrained model from huggingface_hub import notebook_login, whoami @@ -644,9 +644,9 @@ Click here to see available models options .. code:: ipython3 from llm_config import get_llm_selection_widget - + form, lang, model_id_widget, compression_variant, use_preconverted = get_llm_selection_widget() - + form @@ -668,7 +668,7 @@ Click here to see available models options .. parsed-literal:: Selected model qwen2-0.5b-instruct with INT4 compression - + Convert model using Optimum-CLI tool ------------------------------------ @@ -676,7 +676,7 @@ Convert model using Optimum-CLI tool `Optimum Intel `__ -is the interface between the +is the interface between the `Transformers `__ and `Diffusers `__ libraries and OpenVINO to accelerate end-to-end pipelines on Intel architectures. @@ -776,28 +776,28 @@ be additionally applied during model export with INT4 precision using .. code:: ipython3 from llm_config import convert_and_compress_model - + model_dir = convert_and_compress_model(model_id, model_configuration, compression_variant.value, use_preconverted.value) .. parsed-literal:: ✅ INT4 qwen2-0.5b-instruct model already converted and can be found in qwen2/INT4_compressed_weights - + Let’s compare model size for different compression types .. code:: ipython3 from llm_config import compare_model_size - + compare_model_size(model_dir) .. parsed-literal:: Size of model with INT4 compressed weights is 358.86 MB - + Select device for inference --------------------------- @@ -807,9 +807,9 @@ Select device for inference .. code:: ipython3 from notebook_utils import device_widget - + device = device_widget(default="CPU", exclude=["NPU"]) - + device @@ -830,7 +830,7 @@ Instantiate pipeline with OpenVINO Generate API `OpenVINO Generate -API `__ +API `__ can be used to create pipelines to run an inference with OpenVINO Runtime. @@ -852,14 +852,14 @@ of the available generation parameters more deeply later. .. code:: ipython3 import openvino_genai as ov_genai - + print(f"Loading model from {model_dir}\n") - - + + pipe = ov_genai.LLMPipeline(str(model_dir), device.value) - + generation_config = pipe.get_generation_config() - + input_prompt = "The Sun is yellow bacause" print(f"Input text: {input_prompt}") print(pipe.generate(input_prompt, max_new_tokens=10)) @@ -868,10 +868,10 @@ of the available generation parameters more deeply later. .. parsed-literal:: Loading model from qwen2/INT4_compressed_weights - + Input text: The Sun is yellow bacause it is made of hydrogen and oxygen atoms. The - + Run Chatbot ----------- @@ -1022,11 +1022,11 @@ Click here to see detailed description of advanced options if not Path("gradio_helper_genai.py").exists(): r = requests.get(url="https://raw.githubusercontent.com/openvinotoolkit/openvino_notebooks/latest/notebooks/llm-chatbot/gradio_helper_genai.py") open("gradio_helper_genai.py", "w").write(r.text) - + from gradio_helper_genai import make_demo - + demo = make_demo(pipe, model_configuration, model_id, lang.value) - + try: demo.launch(debug=True) except Exception: diff --git a/docs/notebooks/llm-chatbot-with-output.rst b/docs/notebooks/llm-chatbot-with-output.rst index 0d214f5cccc0fc..34514278e1a342 100644 --- a/docs/notebooks/llm-chatbot-with-output.rst +++ b/docs/notebooks/llm-chatbot-with-output.rst @@ -28,7 +28,7 @@ Intel `__ library is used to convert the models to OpenVINO™ IR format and to create inference pipeline. The inference pipeline can also be created using `OpenVINO Generate -API `__, +API `__, the example of that, please, see in the notebook `LLM chatbot with OpenVINO Generate API `__ diff --git a/docs/notebooks/llm-question-answering-with-output.rst b/docs/notebooks/llm-question-answering-with-output.rst index f9c792ba1657d6..bac09f045919c3 100644 --- a/docs/notebooks/llm-question-answering-with-output.rst +++ b/docs/notebooks/llm-question-answering-with-output.rst @@ -168,7 +168,7 @@ The available options are: .. code:: python - ## login to huggingfacehub to get access to pretrained model + ## login to huggingfacehub to get access to pretrained model from huggingface_hub import notebook_login, whoami @@ -182,14 +182,14 @@ The available options are: from pathlib import Path import requests - + # Fetch `notebook_utils` module r = requests.get( url="https://raw.githubusercontent.com/openvinotoolkit/openvino_notebooks/latest/utils/notebook_utils.py", ) open("notebook_utils.py", "w").write(r.text) from notebook_utils import download_file, device_widget - + if not Path("./config.py").exists(): download_file(url="https://raw.githubusercontent.com/openvinotoolkit/openvino_notebooks/latest/notebooks/llm-question-answering/config.py") from config import SUPPORTED_LLM_MODELS @@ -198,14 +198,14 @@ The available options are: .. code:: ipython3 model_ids = list(SUPPORTED_LLM_MODELS) - + model_id = widgets.Dropdown( options=model_ids, value=model_ids[1], description="Model:", disabled=False, ) - + model_id @@ -300,7 +300,7 @@ compression. .. code:: ipython3 from IPython.display import display, Markdown - + prepare_int4_model = widgets.Checkbox( value=True, description="Prepare INT4 model", @@ -316,7 +316,7 @@ compression. description="Prepare FP16 model", disabled=False, ) - + display(prepare_int4_model) display(prepare_int8_model) display(prepare_fp16_model) @@ -346,17 +346,17 @@ compression. import logging import openvino as ov import nncf - + nncf.set_log_level(logging.ERROR) - + pt_model_id = model_configuration["model_id"] fp16_model_dir = Path(model_id.value) / "FP16" int8_model_dir = Path(model_id.value) / "INT8_compressed_weights" int4_model_dir = Path(model_id.value) / "INT4_compressed_weights" - + core = ov.Core() - - + + def convert_to_fp16(): if (fp16_model_dir / "openvino_model.xml").exists(): return @@ -365,8 +365,8 @@ compression. display(Markdown("**Export command:**")) display(Markdown(f"`{export_command}`")) ! $export_command - - + + def convert_to_int8(): if (int8_model_dir / "openvino_model.xml").exists(): return @@ -376,8 +376,8 @@ compression. display(Markdown("**Export command:**")) display(Markdown(f"`{export_command}`")) ! $export_command - - + + def convert_to_int4(): compression_configs = { "mistral-7b": { @@ -398,7 +398,7 @@ compression. "ratio": 0.8, }, } - + model_compression_params = compression_configs.get(model_id.value, compression_configs["default"]) if (int4_model_dir / "openvino_model.xml").exists(): return @@ -411,8 +411,8 @@ compression. display(Markdown("**Export command:**")) display(Markdown(f"`{export_command}`")) ! $export_command - - + + if prepare_fp16_model.value: convert_to_fp16() if prepare_int8_model.value: @@ -433,7 +433,7 @@ Let’s compare model size for different compression types fp16_weights = fp16_model_dir / "openvino_model.bin" int8_weights = int8_model_dir / "openvino_model.bin" int4_weights = int4_model_dir / "openvino_model.bin" - + if fp16_weights.exists(): print(f"Size of FP16 model is {fp16_weights.stat().st_size / 1024 / 1024:.2f} MB") for precision, compressed_weights in zip([8, 4], [int8_weights, int4_weights]): @@ -463,9 +463,9 @@ Select device for inference and model variant .. code:: ipython3 core = ov.Core() - + device = device_widget("CPU", exclude=["NPU"]) - + device @@ -486,14 +486,14 @@ Select device for inference and model variant available_models.append("INT8") if fp16_model_dir.exists(): available_models.append("FP16") - + model_to_run = widgets.Dropdown( options=available_models, value=available_models[0], description="Model to run:", disabled=False, ) - + model_to_run @@ -509,7 +509,7 @@ Select device for inference and model variant from transformers import AutoTokenizer from openvino_tokenizers import convert_tokenizer - + if model_to_run.value == "INT4": model_dir = int4_model_dir elif model_to_run.value == "INT8": @@ -517,7 +517,7 @@ Select device for inference and model variant else: model_dir = fp16_model_dir print(f"Loading model from {model_dir}") - + # optionally convert tokenizer if used cached model without it if not (model_dir / "openvino_tokenizer.xml").exists() or not (model_dir / "openvino_detokenizer.xml").exists(): hf_tokenizer = AutoTokenizer.from_pretrained(model_dir, trust_remote_code=True) @@ -559,7 +559,7 @@ You can find more information about the most popular decoding methods in this `blog `__. To simplify user experience we will use `OpenVINO Generate -API `__. +API `__. Firstly we will create pipeline with ``LLMPipeline``. ``LLMPipeline`` is the main object used for decoding. You can construct it straight away from the folder with the converted model. It will automatically load the @@ -582,7 +582,7 @@ generation is finished, we will write class-iterator based on .. code:: ipython3 import openvino_genai as ov_genai - + pipe = ov_genai.LLMPipeline(model_dir.as_posix(), device.value) print(pipe.generate("The Sun is yellow bacause", temperature=1.2, top_k=4, do_sample=True, max_new_tokens=150)) @@ -690,10 +690,10 @@ when it is needed. It will help estimate performance. .. code:: ipython3 core = ov.Core() - + detokinizer_dir = Path(model_dir, "openvino_detokenizer.xml") - - + + class TextIteratorStreamer(ov_genai.StreamerBase): def __init__(self, tokenizer): super().__init__() @@ -701,17 +701,17 @@ when it is needed. It will help estimate performance. self.compiled_detokenizer = core.compile_model(detokinizer_dir.as_posix()) self.text_queue = Queue() self.stop_signal = None - + def __iter__(self): return self - + def __next__(self): value = self.text_queue.get() if value == self.stop_signal: raise StopIteration() else: return value - + def put(self, token_id): openvino_output = self.compiled_detokenizer(np.array([[token_id]], dtype=int)) text = str(openvino_output["string_output"][0]) @@ -719,7 +719,7 @@ when it is needed. It will help estimate performance. text = text.lstrip("!") text = re.sub("<.*>", "", text) self.text_queue.put(text) - + def end(self): self.text_queue.put(self.stop_signal) @@ -744,7 +744,7 @@ parameter and returns model response. ): """ Text generation function - + Parameters: user_text (str): User-provided instruction for a generation. top_p (float): Nucleus sampling. If set to < 1, only the smallest set of most probable tokens with probabilities that add up to top_p or higher are kept for a generation. @@ -756,7 +756,7 @@ parameter and returns model response. model_output (str) - model-generated text perf_text (str) - updated perf text filed content """ - + # setup config for decoding stage config = pipe.get_generation_config() config.temperature = temperature @@ -765,13 +765,13 @@ parameter and returns model response. config.top_p = top_p config.do_sample = True config.max_new_tokens = max_new_tokens - + # Start generation on a separate thread, so that we don't block the UI. The text is pulled from the streamer # in the main thread. streamer = TextIteratorStreamer(pipe.get_tokenizer()) t = Thread(target=pipe.generate, args=(user_text, config, streamer)) t.start() - + model_output = "" per_token_time = [] num_tokens = 0 @@ -803,13 +803,13 @@ elements. ): """ Helper function for performance estimation - + Parameters: current_time (float): This step time in seconds. current_perf_text (str): Current content of performance UI field. per_token_time (List[float]): history of performance from previous steps. num_tokens (int): Total number of generated tokens. - + Returns: update for performance text field update for a total number of tokens @@ -852,11 +852,11 @@ generation parameters: if not Path("gradio_helper.py").exists(): r = requests.get(url="https://raw.githubusercontent.com/openvinotoolkit/openvino_notebooks/latest/notebooks/llm-question-answering/gradio_helper.py") open("gradio_helper.py", "w").write(r.text) - + from gradio_helper import make_demo - + demo = make_demo(run_fn=run_generation, title=f"Question Answering with {model_id.value} and OpenVINO") - + try: demo.queue().launch(height=800) except Exception: diff --git a/docs/notebooks/nuextract-structure-extraction-with-output.rst b/docs/notebooks/nuextract-structure-extraction-with-output.rst index 8dd88ca62bd161..52bd096b67b1ac 100644 --- a/docs/notebooks/nuextract-structure-extraction-with-output.rst +++ b/docs/notebooks/nuextract-structure-extraction-with-output.rst @@ -92,17 +92,17 @@ Prerequisites from pathlib import Path import requests import shutil - + if not Path("notebook_utils.py").exists(): r = requests.get(url="https://raw.githubusercontent.com/openvinotoolkit/openvino_notebooks/latest/utils/notebook_utils.py") open("notebook_utils.py", "w").write(r.text) - + from notebook_utils import download_file - + # Fetch llm_config.py llm_config_shared_path = Path("../../utils/llm_config.py") llm_config_dst_path = Path("llm_config.py") - + if not llm_config_dst_path.exists(): if llm_config_shared_path.exists(): try: @@ -150,15 +150,15 @@ dataset for information extraction. .. code:: ipython3 from llm_config import get_llm_selection_widget - + models = { "NuExtract_tiny": {"model_id": "numind/NuExtract-tiny"}, "NuExtract": {"model_id": "numind/NuExtract"}, "NuExtract_large": {"model_id": "numind/NuExtract-large"}, } - + form, _, model_dropdown, compression_dropdown, _ = get_llm_selection_widget(languages=None, models=models, show_preconverted_checkbox=False) - + form @@ -180,7 +180,7 @@ dataset for information extraction. .. parsed-literal:: Selected model NuExtract_tiny with INT4 compression - + Download and convert model to OpenVINO IR via Optimum Intel CLI --------------------------------------------------------------- @@ -243,14 +243,14 @@ parameters. An example of this approach usage you can find in .. code:: ipython3 from llm_config import convert_and_compress_model - + model_dir = convert_and_compress_model(model_name, model_config, compression_dropdown.value, use_preconverted=False) .. parsed-literal:: ⌛ NuExtract_tiny conversion to INT4 started. It may takes some time. - + **Export command:** @@ -271,7 +271,7 @@ parameters. An example of this approach usage you can find in if sequence_length != 1: /home/ytarkan/miniconda3/envs/ov_notebooks_env/lib/python3.9/site-packages/transformers/models/qwen2/modeling_qwen2.py:110: TracerWarning: Converting a tensor to a Python boolean might cause the trace to be incorrect. We can't record the data flow of Python values, so this value will be treated as a constant in the future. This means that the trace might not generalize to other inputs! if seq_len > self.max_seq_len_cached: - + .. parsed-literal:: @@ -291,26 +291,26 @@ parameters. An example of this approach usage you can find in Set tokenizer padding side to left for `text-generation-with-past` task. Replacing `(?!\S)` pattern to `(?:$|[^\S])` in RegexSplit operation - + .. parsed-literal:: ✅ INT4 NuExtract_tiny model converted and can be found in NuExtract_tiny/INT4_compressed_weights - + Let’s compare model size for different compression types .. code:: ipython3 from llm_config import compare_model_size - + compare_model_size(model_dir) .. parsed-literal:: Size of model with INT4 compressed weights is 347.03 MB - + Select device for inference and model variant --------------------------------------------- @@ -323,9 +323,9 @@ Select device for inference and model variant .. code:: ipython3 from notebook_utils import device_widget - + device = device_widget(default="CPU", exclude=["NPU"]) - + device @@ -360,20 +360,20 @@ potentially improving accuracy for complex or ambiguous cases. import json from typing import List - - + + def prepare_input(text: str, schema: str, examples: List[str] = ["", "", ""]) -> str: schema = json.dumps(json.loads(schema), indent=4) input_llm = "<|input|>\n### Template:\n" + schema + "\n" for example in examples: if example != "": input_llm += "### Example:\n" + json.dumps(json.loads(example), indent=4) + "\n" - + input_llm += "### Text:\n" + text + "\n<|output|>\n" return input_llm To simplify user experience we will use `OpenVINO Generate -API `__. +API `__. We will create pipeline with ``LLMPipeline``. ``LLMPipeline`` is the main object used for decoding. You can construct it straight away from the folder with the converted model. It will automatically load the @@ -392,10 +392,10 @@ LLMPipeline. .. code:: ipython3 import openvino_genai as ov_genai - + pipe = ov_genai.LLMPipeline(model_dir.as_posix(), device.value) - - + + def run_structure_extraction(text: str, schema: str) -> str: input = prepare_input(text, schema) return pipe.generate(input, max_new_tokens=200) @@ -417,7 +417,7 @@ schema format: automated benchmarks. Our models are released under the Apache 2.0 license. Code: https://github.com/mistralai/mistral-src Webpage: https://mistral.ai/news/announcing-mistral-7b/""" - + schema = """{ "Model": { "Name": "", @@ -430,7 +430,7 @@ schema format: "Licence": "" } }""" - + output = run_structure_extraction(text, schema) print(output) @@ -456,8 +456,8 @@ schema format: "Licence": "Apache 2.0" } } - - + + Run interactive structure extraction demo with Gradio ----------------------------------------------------- @@ -471,11 +471,11 @@ Run interactive structure extraction demo with Gradio url="https://raw.githubusercontent.com/openvinotoolkit/openvino_notebooks/latest/notebooks/nuextract-structure-extraction/gradio_helper.py" ) open("gradio_helper.py", "w").write(r.text) - + from gradio_helper import make_demo - + demo = make_demo(fn=run_structure_extraction) - + try: demo.launch(height=800) except Exception: diff --git a/docs/notebooks/openvino-tokenizers-with-output.rst b/docs/notebooks/openvino-tokenizers-with-output.rst index 3f78b5a82ff46f..0a1180747b9660 100644 --- a/docs/notebooks/openvino-tokenizers-with-output.rst +++ b/docs/notebooks/openvino-tokenizers-with-output.rst @@ -84,7 +84,7 @@ Some tasks only need a tokenizer, like text classification, named entity recognition, question answering, and feature extraction. On the other hand, for tasks such as text generation, chat, translation, and abstractive summarization, both a tokenizer and a detokenizer are -required. +required. This is a self-contained example that relies solely on its own code. @@ -112,8 +112,8 @@ use ``pip install openvino-tokenizers[transformers]``. .. code:: ipython3 from pathlib import Path - - + + tokenizer_dir = Path("tokenizer/") model_id = "TinyLlama/TinyLlama-1.1B-intermediate-step-1431k-3T" @@ -141,7 +141,7 @@ constructor. Converting Huggingface Tokenizer to OpenVINO... Saved OpenVINO Tokenizer: tokenizer/openvino_tokenizer.xml, tokenizer/openvino_tokenizer.bin Saved OpenVINO Detokenizer: tokenizer/openvino_detokenizer.xml, tokenizer/openvino_detokenizer.bin - + ⚠️ If you have any problems with the command above on MacOS, try to `install tbb `__. @@ -163,8 +163,8 @@ The other method is to pass HuggingFace ``hf_tokenizer`` object to from transformers import AutoTokenizer from openvino_tokenizers import convert_tokenizer - - + + hf_tokenizer = AutoTokenizer.from_pretrained(model_id) ov_tokenizer, ov_detokenizer = convert_tokenizer(hf_tokenizer, with_detokenizer=True) ov_tokenizer, ov_detokenizer @@ -202,11 +202,11 @@ from OpenVINO to reuse converted tokenizers later: .. code:: ipython3 import openvino as ov - + # This import is needed to add all tokenizer-related operations to OpenVINO import openvino_tokenizers # noqa: F401 - - + + ov.save_model(ov_tokenizer, tokenizer_dir / "openvino_tokenizer.xml") ov.save_model(ov_detokenizer, tokenizer_dir / "openvino_detokenizer.xml") @@ -225,10 +225,10 @@ tasks, but not suitable for text generation. tokenizer, detokenizer = ov.compile_model(ov_tokenizer), ov.compile_model(ov_detokenizer) test_strings = ["Test", "strings"] - + token_ids = tokenizer(test_strings)["input_ids"] print(f"Token ids: {token_ids}") - + detokenized_text = detokenizer(token_ids)["string_output"] print(f"Detokenized text: {detokenized_text}") @@ -238,7 +238,7 @@ tasks, but not suitable for text generation. Token ids: [[ 1 4321] [ 1 6031]] Detokenized text: ['Test' 'strings'] - + We can compare the result of converted (de)tokenizer with the original one: @@ -247,7 +247,7 @@ one: hf_token_ids = hf_tokenizer(test_strings).input_ids print(f"Token ids: {hf_token_ids}") - + hf_detokenized_text = hf_tokenizer.batch_decode(hf_token_ids) print(f"Detokenized text: {hf_detokenized_text}") @@ -256,7 +256,7 @@ one: Token ids: [[1, 4321], [1, 6031]] Detokenized text: [' Test', ' strings'] - + Text Generation Pipeline with OpenVINO Tokenizers ------------------------------------------------- @@ -277,15 +277,15 @@ command is commented. .. code:: ipython3 model_dir = Path(Path(model_id).name) - + if not model_dir.exists(): # Converting the original model # %pip install -U "git+https://github.com/huggingface/optimum-intel.git" "nncf>=2.8.0" onnx # %optimum-cli export openvino -m $model_id --task text-generation-with-past $model_dir - + # Load already converted model from huggingface_hub import hf_hub_download - + hf_hub_download( "chgk13/TinyLlama-1.1B-intermediate-step-1431k-3T", filename="openvino_model.xml", @@ -303,10 +303,10 @@ command is commented. from tqdm.notebook import trange from pathlib import Path from openvino_tokenizers.constants import EOS_TOKEN_ID_NAME - - + + core = ov.Core() - + ov_model = core.read_model(model_dir / "openvino_model.xml") compiled_model = core.compile_model(ov_model) infer_request = compiled_model.create_infer_request() @@ -319,40 +319,40 @@ distinct and separate state. .. code:: ipython3 text_input = ["Quick brown fox jumped"] - + model_input = {name.any_name: output for name, output in tokenizer(text_input).items()} - + if "position_ids" in (input.any_name for input in infer_request.model_inputs): model_input["position_ids"] = np.arange(model_input["input_ids"].shape[1], dtype=np.int64)[np.newaxis, :] - + # No beam search, set idx to 0 model_input["beam_idx"] = np.array([0], dtype=np.int32) - + # End of sentence token is that model signifies the end of text generation # Read EOS token ID from rt_info of tokenizer/detokenizer ov.Model object eos_token = ov_tokenizer.get_rt_info(EOS_TOKEN_ID_NAME).value - + tokens_result = np.array([[]], dtype=np.int64) - + # Reset KV cache inside the model before inference infer_request.reset_state() max_infer = 5 - + for _ in trange(max_infer): infer_request.start_async(model_input) infer_request.wait() - + output_tensor = infer_request.get_output_tensor() - + # Get the most probable token token_indices = np.argmax(output_tensor.data, axis=-1) output_token = token_indices[:, -1:] - + # Concatenate previous tokens result with newly generated token tokens_result = np.hstack((tokens_result, output_token)) if output_token[0, 0] == eos_token: break - + # Prepare input for the next inference iteration model_input["input_ids"] = output_token model_input["attention_mask"] = np.hstack((model_input["attention_mask"].data, [[1]])) @@ -362,8 +362,8 @@ distinct and separate state. [[model_input["position_ids"].data.shape[-1]]], ) ) - - + + text_result = detokenizer(tokens_result)["string_output"] print(f"Prompt:\n{text_input[0]}") print(f"Generated:\n{text_result[0]}") @@ -381,7 +381,7 @@ distinct and separate state. Quick brown fox jumped Generated: over the fence. - + Text Generation Pipeline with OpenVINO GenAI and OpenVINO Tokenizers -------------------------------------------------------------------- @@ -418,18 +418,18 @@ reached. Let’s build the same text generation pipeline, but with simplified Python `OpenVINO Generate -API `__. +API `__. We will use the same model and tokenizer downloaded in previous steps. .. code:: ipython3 import openvino_genai as ov_genai - + genai_tokenizer = ov_genai.Tokenizer(str(tokenizer_dir)) pipe = ov_genai.LLMPipeline(str(model_dir), genai_tokenizer, "CPU") - + result = pipe.generate(text_input[0], max_new_tokens=max_infer) - + print(f"Prompt:\n{text_input[0]}") print(f"Generated:\n{result}") @@ -440,7 +440,7 @@ We will use the same model and tokenizer downloaded in previous steps. Quick brown fox jumped Generated: over the lazy dog. - + Merge Tokenizer into a Model ---------------------------- @@ -480,7 +480,7 @@ model has only one input for text input prompt. model_id = "mrm8488/bert-tiny-finetuned-sms-spam-detection" model_dir = Path(Path(model_id).name) - + if not model_dir.exists(): %pip install -qU git+https://github.com/huggingface/optimum-intel.git "onnx<1.16.2" !optimum-cli export openvino --model $model_id --task text-classification $model_dir @@ -488,27 +488,27 @@ model has only one input for text input prompt. .. code:: ipython3 from openvino_tokenizers import connect_models - - + + core = ov.Core() text_input = ["Free money!!!"] - + ov_tokenizer = core.read_model(model_dir / "openvino_tokenizer.xml") ov_model = core.read_model(model_dir / "openvino_model.xml") combined_model = connect_models(ov_tokenizer, ov_model) ov.save_model(combined_model, model_dir / "combined_openvino_model.xml") - + print("Original OpenVINO model inputs:") for input in ov_model.inputs: print(input) - + print("\nCombined OpenVINO model inputs:") for input in combined_model.inputs: print(input) - + compiled_combined_model = core.compile_model(combined_model) openvino_output = compiled_combined_model(text_input) - + print(f"\nLogits: {openvino_output['logits']}") @@ -518,12 +518,12 @@ model has only one input for text input prompt. - + Combined OpenVINO model inputs: - + Logits: [[ 1.2007061 -1.469803 ]] - + Conclusion ---------- @@ -548,6 +548,6 @@ Links Types `__ - `OpenVINO.GenAI repository with the C++ example of OpenVINO Tokenizers - usage `__ + usage `__ - `HuggingFace Tokenizers Comparison Table `__ diff --git a/docs/notebooks/whisper-asr-genai-with-output.rst b/docs/notebooks/whisper-asr-genai-with-output.rst index bef25aa3eb8c6c..ca5b7ce2ea6e3e 100644 --- a/docs/notebooks/whisper-asr-genai-with-output.rst +++ b/docs/notebooks/whisper-asr-genai-with-output.rst @@ -30,7 +30,7 @@ converts the models to OpenVINO™ IR format. To simplify the user experience, we will use `OpenVINO Generate API `__ for `Whisper automatic speech recognition -scenarios `__. +scenarios `__. Installation Instructions ~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -76,8 +76,8 @@ Prerequisites .. code:: ipython3 import platform - - + + %pip install -q "torch>=2.3" "torchvision>=0.18.1" --extra-index-url https://download.pytorch.org/whl/cpu %pip install -q "transformers>=4.45" "git+https://github.com/huggingface/optimum-intel.git" --extra-index-url https://download.pytorch.org/whl/cpu %pip install -q -U "openvino>=2024.5.0" "openvino-tokenizers>=2024.5.0" "openvino-genai>=2024.5.0" @@ -90,13 +90,13 @@ Prerequisites import requests from pathlib import Path - + if not Path("notebook_utils.py").exists(): r = requests.get( url="https://raw.githubusercontent.com/openvinotoolkit/openvino_notebooks/latest/utils/notebook_utils.py", ) open("notebook_utils.py", "w").write(r.text) - + if not Path("cmd_helper.py").exists(): r = requests.get( url="https://raw.githubusercontent.com/openvinotoolkit/openvino_notebooks/latest/utils/cmd_helper.py", @@ -127,7 +127,7 @@ arbitrary length. .. code:: ipython3 import ipywidgets as widgets - + model_ids = { "Multilingual models": [ "openai/whisper-large-v3-turbo", @@ -150,14 +150,14 @@ arbitrary length. "openai/whisper-tiny.en", ], } - + model_type = widgets.Dropdown( options=model_ids.keys(), value="Multilingual models", description="Model:", disabled=False, ) - + model_type @@ -177,7 +177,7 @@ arbitrary length. description="Model:", disabled=False, ) - + model_id @@ -193,11 +193,11 @@ arbitrary length. from transformers import AutoProcessor, AutoModelForSpeechSeq2Seq, pipeline from transformers.utils import logging - + processor = AutoProcessor.from_pretrained(model_id.value) - + pt_model = AutoModelForSpeechSeq2Seq.from_pretrained(model_id.value) - + pipe_pt = pipeline( "automatic-speech-recognition", model=pt_model, @@ -217,9 +217,9 @@ The ``pipeline`` expects audio data in numpy array format. We will use .. code:: ipython3 from notebook_utils import download_file - + en_example_short = Path("data", "courtroom.wav") - + # a wav sample download_file( "https://huggingface.co/datasets/Xenova/transformers.js-docs/resolve/main/courtroom.wav", @@ -231,7 +231,7 @@ The ``pipeline`` expects audio data in numpy array format. We will use .. parsed-literal:: 'data/courtroom.wav' already exists. - + @@ -244,7 +244,7 @@ The ``pipeline`` expects audio data in numpy array format. We will use .. code:: ipython3 import librosa - + en_raw_speech, samplerate = librosa.load(str(en_example_short), sr=16000) Let’s check how to work the ``transcribe`` task. @@ -253,13 +253,13 @@ Let’s check how to work the ``transcribe`` task. import copy import IPython.display as ipd - + logging.set_verbosity_error() - + sample = copy.deepcopy(en_raw_speech) - + display(ipd.Audio(sample, rate=samplerate)) - + pt_result = pipe_pt(sample) print(f"Result: {pt_result['text']}") @@ -267,24 +267,24 @@ Let’s check how to work the ``transcribe`` task. .. raw:: html - + - + .. parsed-literal:: /home/labuser/work/notebook/whisper_new/lib/python3.10/site-packages/transformers/models/whisper/generation_whisper.py:496: FutureWarning: The input name `inputs` is deprecated. Please make sure to use `input_features` instead. warnings.warn( - + .. parsed-literal:: Result: Colonel Jessif, did you order the code rate? You don't have to answer that question. I'll answer the question. You want answers? I think I'm entitled. You want answers? I want the truth. You can't handle the truth. - + If the multilingual model was chosen, let’s see how task ``translate`` is working. We will use ``facebook/multilingual_librispeech`` @@ -297,9 +297,9 @@ be found in the `paper `__. .. code:: ipython3 import ipywidgets as widgets - + languages = {"japanese": "ja_jp", "dutch": "da_dk", "french": "fr_fr", "spanish": "ca_es", "italian": "it_it", "portuguese": "pt_br", "polish": "pl_pl"} - + SAMPLE_LANG = None if model_type.value == "Multilingual models": SAMPLE_LANG = widgets.Dropdown( @@ -308,7 +308,7 @@ be found in the `paper `__. description="Dataset language:", disabled=False, ) - + SAMPLE_LANG @@ -323,7 +323,7 @@ be found in the `paper `__. .. code:: ipython3 from datasets import load_dataset - + mls_dataset = None if model_type.value == "Multilingual models": mls_dataset = load_dataset("google/fleurs", languages[SAMPLE_LANG.value], split="test", streaming=True, trust_remote_code=True) @@ -334,10 +334,10 @@ be found in the `paper `__. if model_type.value == "Multilingual models": sample = copy.deepcopy(mls_example["audio"]) - + display(ipd.Audio(sample["array"], rate=sample["sampling_rate"])) print(f"Reference: {mls_example['raw_transcription']}") - + pt_result = pipe_pt(sample, generate_kwargs={"task": "translate"}) print(f"\nResult: {pt_result['text']}") @@ -345,20 +345,20 @@ be found in the `paper `__. .. raw:: html - + - + .. parsed-literal:: Reference: Il blog è uno strumento che si prefigge di incoraggiare la collaborazione e sviluppare l'apprendimento degli studenti ben oltre la giornata scolastica normale. - + Result: The blog is our tool that is prefilled to encourage collaboration and develop the learning of the students and to attract a normal school class. - + Convert model to OpenVINO IR via Optimum Intel CLI -------------------------------------------------- @@ -393,9 +393,9 @@ documentation `__. import logging import nncf from cmd_helper import optimum_cli - + nncf.set_log_level(logging.ERROR) - + model_path = Path(model_id.value.split("/")[1]) optimum_cli(model_id.value, model_path) print(f"✅ {model_id.value} model converted and can be found in {model_path}") @@ -406,7 +406,7 @@ Run inference OpenVINO model with WhisperPipeline To simplify user experience we will use `OpenVINO Generate -API `__. +API `__. Firstly we will create pipeline with ``WhisperPipeline``. You can construct it straight away from the folder with the converted model. It will automatically load the ``model``, ``tokenizer``, ``detokenizer`` @@ -415,9 +415,9 @@ and default ``generation configuration``. .. code:: ipython3 from notebook_utils import device_widget - + device = device_widget(default="CPU", exclude=["NPU"]) - + device @@ -432,7 +432,7 @@ and default ``generation configuration``. .. code:: ipython3 import openvino_genai as ov_genai - + ov_pipe = ov_genai.WhisperPipeline(str(model_path), device=device.value) Let’s run the ``transcribe`` task. We just call ``generate`` for that @@ -441,7 +441,7 @@ and put array as input. .. code:: ipython3 genai_result = ov_pipe.generate(en_raw_speech) - + display(ipd.Audio(en_raw_speech, rate=samplerate)) print(f"Result: {genai_result}") @@ -449,18 +449,18 @@ and put array as input. .. raw:: html - + - + .. parsed-literal:: Result: Colonel Jessif, did you order the code rate? You don't have to answer that question. I'll answer the question. You want answers? I think I'm entitled. You want answers? I want the truth. You can't handle the truth. - + Whisper could provide a phrase-level timestamps for audio. Let’s try this scenario, we will specify ``return_timestamps=True`` for @@ -473,7 +473,7 @@ return ``chunks``, which contain attributes: ``text``, ``start_ts`` and .. code:: ipython3 genai_result_timestamps = ov_pipe.generate(en_raw_speech, return_timestamps=True) - + for segment in genai_result_timestamps.chunks: print(f"{segment.start_ts}sec. ---> {segment.end_ts}sec.") print(f"{segment.text}\n") @@ -483,29 +483,29 @@ return ``chunks``, which contain attributes: ``text``, ``start_ts`` and 0.0sec. ---> 3.0sec. Colonel Jessif, did you order the code rate? - + 3.0sec. ---> 4.5sec. You don't have to answer that question. - + 4.5sec. ---> 6.5sec. I'll answer the question. - + 6.5sec. ---> 8.0sec. You want answers? - + 8.0sec. ---> 9.0sec. I think I'm entitled. - + 9.0sec. ---> 10.0sec. You want answers? - + 10.0sec. ---> 11.0sec. I want the truth. - + 11.0sec. ---> 13.0sec. You can't handle the truth. - - + + Let’s see how to work the ``translate`` task. It supports for multilingual models only. For that case we will specify ``language`` and @@ -527,12 +527,12 @@ format. "portuguese": "<|pt|>", "polish": "<|pl|>", } - + if model_type.value == "Multilingual models": sample = mls_example["audio"] - + genai_result_ml = ov_pipe.generate(sample["array"], max_new_tokens=100, task="translate", language=languages_genai[SAMPLE_LANG.value]) - + display(ipd.Audio(sample["array"], rate=sample["sampling_rate"])) print(f"Reference: {mls_example['raw_transcription']}") print(f"\nResult: {genai_result_ml}") @@ -541,20 +541,20 @@ format. .. raw:: html - + - + .. parsed-literal:: Reference: Il blog è uno strumento che si prefigge di incoraggiare la collaborazione e sviluppare l'apprendimento degli studenti ben oltre la giornata scolastica normale. - + Result: The blog is our tool that is prefilled to encourage collaboration and develop the learning of the students and to attract a normal school class. - + Compare performance PyTorch vs OpenVINO --------------------------------------- @@ -566,8 +566,8 @@ Compare performance PyTorch vs OpenVINO import time import numpy as np from tqdm.notebook import tqdm - - + + def measure_perf(pipe, n=10, model_type="ov"): timers = [] for _ in tqdm(range(n), desc="Measuring performance"): @@ -611,7 +611,7 @@ Compare performance PyTorch vs OpenVINO Mean torch openai/whisper-tiny generation time: 0.564s Mean openvino openai/whisper-tiny generation time: 0.311s Performance openai/whisper-tiny openvino speedup: 1.815 - + Quantization ------------ @@ -641,9 +641,9 @@ Please select below whether you would like to run Whisper quantization. .. code:: ipython3 from notebook_utils import quantization_widget - + to_quantize = quantization_widget() - + to_quantize @@ -659,14 +659,14 @@ Please select below whether you would like to run Whisper quantization. # Fetch `skip_kernel_extension` module import requests - + r = requests.get( url="https://raw.githubusercontent.com/openvinotoolkit/openvino_notebooks/latest/utils/skip_kernel_extension.py", ) open("skip_kernel_extension.py", "w").write(r.text) - + ov_quantized_pipe = None - + %load_ext skip_kernel_extension Let’s load converted OpenVINO model format using Optimum-Intel to easily @@ -700,9 +700,9 @@ interface for ``automatic-speech-recognition``. .. code:: ipython3 %%skip not $to_quantize.value - + from optimum.intel.openvino import OVModelForSpeechSeq2Seq - + ov_model = OVModelForSpeechSeq2Seq.from_pretrained(str(model_path), device=device.value) ov_processor = AutoProcessor.from_pretrained(str(model_path)) @@ -722,11 +722,11 @@ improves quantization quality. .. code:: ipython3 %%skip not $to_quantize.value - + from itertools import islice from optimum.intel.openvino.quantization import InferRequestWrapper - - + + def collect_calibration_dataset(ov_model: OVModelForSpeechSeq2Seq, calibration_dataset_size: int): # Overwrite model request properties, saving the original ones for restoring later encoder_calibration_data = [] @@ -735,7 +735,7 @@ improves quantization quality. ov_model.decoder_with_past.request = InferRequestWrapper(ov_model.decoder_with_past.request, decoder_calibration_data, apply_caching=True) - + pipe = pipeline( "automatic-speech-recognition", model=ov_model, @@ -750,7 +750,7 @@ improves quantization quality. finally: ov_model.encoder.request = ov_model.encoder.request.request ov_model.decoder_with_past.request = ov_model.decoder_with_past.request.request - + return encoder_calibration_data, decoder_calibration_data Quantize Whisper encoder and decoder models @@ -766,20 +766,20 @@ negligible. .. code:: ipython3 %%skip not $to_quantize.value - + import gc import shutil import nncf import openvino as ov from datasets import load_dataset from tqdm.notebook import tqdm - - - + + + CALIBRATION_DATASET_SIZE = 30 quantized_model_path = Path(f"{model_path}-quantized") - - + + def quantize(ov_model: OVModelForSpeechSeq2Seq, calibration_dataset_size: int): if not quantized_model_path.exists(): encoder_calibration_data, decoder_calibration_data = collect_calibration_dataset( @@ -798,7 +798,7 @@ negligible. del quantized_encoder del encoder_calibration_data gc.collect() - + print("Quantizing decoder with past") quantized_decoder_with_past = nncf.quantize( ov_model.decoder_with_past.model, @@ -812,7 +812,7 @@ negligible. del quantized_decoder_with_past del decoder_calibration_data gc.collect() - + # Copy the config file and the first-step-decoder manually shutil.copy(model_path / "config.json", quantized_model_path / "config.json") shutil.copy(model_path / "generation_config.json", quantized_model_path / "generation_config.json") @@ -830,11 +830,11 @@ negligible. shutil.copy(model_path / "normalizer.json", quantized_model_path / "normalizer.json") shutil.copy(model_path / "merges.txt", quantized_model_path / "merges.txt") shutil.copy(model_path / "added_tokens.json", quantized_model_path / "added_tokens.json") - + quantized_ov_pipe = ov_genai.WhisperPipeline(str(quantized_model_path), device=device.value) return quantized_ov_pipe - - + + ov_quantized_pipe = quantize(ov_model, CALIBRATION_DATASET_SIZE) Run quantized model inference @@ -848,12 +848,12 @@ models. .. code:: ipython3 %%skip not $to_quantize.value - + genai_result = ov_pipe.generate(en_raw_speech) quantized_genai_result = ov_quantized_pipe.generate(en_raw_speech) - + display(ipd.Audio(en_raw_speech, rate=samplerate)) - + print(f"Original : {genai_result}") print(f"Quantized: {quantized_genai_result}") @@ -861,19 +861,19 @@ models. .. raw:: html - + - + .. parsed-literal:: Original : Colonel Jessif, did you order the code rate? You don't have to answer that question. I'll answer the question. You want answers? I think I'm entitled. You want answers? I want the truth. You can't handle the truth. Quantized: Don, I'll just, if you order the code right. You don have to answer that question. I'll answer the question. You want answers. I think I'm entitled you want answer. I want the truth. You can't handle the truth. You can't handle the truth. - + Compare performance and accuracy of the original and quantized models ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -889,38 +889,38 @@ for Word Error Rate. .. code:: ipython3 %%skip not $to_quantize.value - + import time from contextlib import contextmanager from jiwer import wer, wer_standardize - - + + TEST_DATASET_SIZE = 50 - + def calculate_transcription_time_and_accuracy(ov_model, test_samples): whole_infer_times = [] - + ground_truths = [] predictions = [] for data_item in tqdm(test_samples, desc="Measuring performance and accuracy"): - + start_time = time.perf_counter() transcription = ov_model.generate(data_item["audio"]["array"]) end_time = time.perf_counter() whole_infer_times.append(end_time - start_time) - + ground_truths.append(data_item["text"]) predictions.append(transcription.texts[0]) - + word_accuracy = (1 - wer(ground_truths, predictions, reference_transform=wer_standardize, hypothesis_transform=wer_standardize)) * 100 mean_whole_infer_time = sum(whole_infer_times) return word_accuracy, mean_whole_infer_time - + test_dataset = load_dataset("openslr/librispeech_asr", "clean", split="test", streaming=True, trust_remote_code=True) test_dataset = test_dataset.shuffle(seed=42).take(TEST_DATASET_SIZE) test_samples = [sample for sample in test_dataset] - + accuracy_original, times_original = calculate_transcription_time_and_accuracy(ov_pipe, test_samples) accuracy_quantized, times_quantized = calculate_transcription_time_and_accuracy(ov_quantized_pipe, test_samples) print(f"Whole pipeline performance speedup: {times_original / times_quantized:.3f}") @@ -945,7 +945,7 @@ for Word Error Rate. Whole pipeline performance speedup: 1.350 Whisper transcription word accuracy. Original model: 82.88%. Quantized model: 84.13%. Accuracy drop: -1.25%. - + Interactive demo ---------------- @@ -959,19 +959,19 @@ upload button) or record using your microphone. .. code:: ipython3 import requests - + if not Path("gradio_helper.py").exists(): r = requests.get(url="https://raw.githubusercontent.com/openvinotoolkit/openvino_notebooks/latest/notebooks/whisper-asr-genai/gradio_helper.py") open("gradio_helper.py", "w").write(r.text) - + from gradio_helper import make_demo, GradioPipeline - + pipe = ov_quantized_pipe if to_quantize.value else ov_pipe - + gr_pipeline = GradioPipeline(pipe, model_id.value, quantized=to_quantize.value) - + demo = make_demo(gr_pipeline) - + try: demo.launch(debug=True) except Exception: diff --git a/docs/notebooks/whisper-subtitles-generation-with-output.rst b/docs/notebooks/whisper-subtitles-generation-with-output.rst index 02bca63007b4f5..d25c094bac5bef 100644 --- a/docs/notebooks/whisper-subtitles-generation-with-output.rst +++ b/docs/notebooks/whisper-subtitles-generation-with-output.rst @@ -81,19 +81,19 @@ Install dependencies. import platform import importlib.metadata import importlib.util - + %pip install -q "nncf>=2.14.0" %pip install -q -U "openvino>=2024.5.0" "openvino-tokenizers>=2024.5.0" "openvino-genai>=2024.5.0" %pip install -q "python-ffmpeg<=1.0.16" "ffmpeg" "moviepy" "transformers>=4.45" "git+https://github.com/huggingface/optimum-intel.git" "torch>=2.1" --extra-index-url https://download.pytorch.org/whl/cpu %pip install -q -U "yt_dlp>=2024.8.6" soundfile librosa jiwer packaging %pip install -q "gradio>=4.19" "typing_extensions>=4.9" - + if platform.system() == "Darwin": %pip install -q "numpy<2.0" - - + + from packaging import version - + if ( importlib.util.find_spec("tensorflow") is not None and version.parse(importlib.metadata.version("tensorflow")) < version.parse("2.18.0") @@ -105,13 +105,13 @@ Install dependencies. import requests from pathlib import Path - + if not Path("notebook_utils.py").exists(): r = requests.get( url="https://raw.githubusercontent.com/openvinotoolkit/openvino_notebooks/latest/utils/notebook_utils.py", ) open("notebook_utils.py", "w").write(r.text) - + if not Path("cmd_helper.py").exists(): r = requests.get( url="https://raw.githubusercontent.com/openvinotoolkit/openvino_notebooks/latest/utils/cmd_helper.py", @@ -147,7 +147,7 @@ Whisper family. .. code:: ipython3 import ipywidgets as widgets - + MODELS = [ "openai/whisper-large-v3-turbo", "openai/whisper-large-v3", @@ -158,14 +158,14 @@ Whisper family. "openai/whisper-base", "openai/whisper-tiny", ] - + model_id = widgets.Dropdown( options=list(MODELS), value="openai/whisper-tiny", description="Model:", disabled=False, ) - + model_id @@ -208,9 +208,9 @@ documentation `__. .. code:: ipython3 from cmd_helper import optimum_cli - + model_dir = model_id.value.split("/")[-1] - + if not Path(model_dir).exists(): optimum_cli(model_id.value, model_dir) @@ -244,9 +244,9 @@ select device from dropdown list for running inference using OpenVINO .. code:: ipython3 from notebook_utils import device_widget - + device = device_widget(default="CPU", exclude=["NPU"]) - + device @@ -261,7 +261,7 @@ select device from dropdown list for running inference using OpenVINO .. code:: ipython3 import openvino_genai as ov_genai - + ov_pipe = ov_genai.WhisperPipeline(str(model_dir), device=device.value) Run video transcription pipeline @@ -274,9 +274,9 @@ Now, we are ready to start transcription. Let’s load the video first. .. code:: ipython3 from notebook_utils import download_file - + output_file = Path("downloaded_video.mp4") - + download_file( "https://storage.openvinotoolkit.org/repositories/openvino_notebooks/data/data/video/Sheldon%20Cooper%20Jim%20Parsons%20at%20Intels%20Lab.mp4", filename=output_file.name, @@ -329,13 +329,13 @@ Select the task for the model: except ImportError: from moviepy.editor import VideoFileClip from transformers.pipelines.audio_utils import ffmpeg_read - - + + def get_audio(video_file): """ Extract audio signal from a given video file, then convert it to float, then mono-channel format and resample it to the expected sample rate - + Parameters: video_file: path to input video file Returns: @@ -372,33 +372,33 @@ return ``chunks``, which contain attributes: ``text``, ``start_ts`` and .. code:: ipython3 inputs, duration = get_audio(output_file) - + transcription = ov_pipe.generate(inputs["raw"], task=task.value, return_timestamps=True).chunks .. code:: ipython3 import math - - + + def format_timestamp(seconds: float): """ format time in srt-file expected format """ assert seconds >= 0, "non-negative timestamp expected" milliseconds = round(seconds * 1000.0) - + hours = milliseconds // 3_600_000 milliseconds -= hours * 3_600_000 - + minutes = milliseconds // 60_000 milliseconds -= minutes * 60_000 - + seconds = milliseconds // 1_000 milliseconds -= seconds * 1_000 - + return (f"{hours}:" if hours > 0 else "00:") + f"{minutes:02d}:{seconds:02d},{milliseconds:03d}" - - + + def prepare_srt(transcription, filter_duration=None): """ Format transcription into srt file format @@ -409,7 +409,7 @@ return ``chunks``, which contain attributes: ``text``, ``start_ts`` and # for the case where the model could not predict an ending timestamp, which can happen if audio is cut off in the middle of a word. if segment.end_ts == -1: timestamp[1] = filter_duration - + if filter_duration is not None and (timestamp[0] >= math.floor(filter_duration) or timestamp[1] > math.ceil(filter_duration) + 1): break segment_lines.append(str(idx + 1) + "\n") @@ -458,40 +458,40 @@ Now let us see the results. 1 00:00:00,000 --> 00:00:05,000 Oh, what's that? - + 2 00:00:05,000 --> 00:00:08,000 Oh, wow. - + 3 00:00:08,000 --> 00:00:10,000 Hello, humans. - + 4 00:00:13,000 --> 00:00:15,000 Focus on me. - + 5 00:00:15,000 --> 00:00:17,000 Focus on the guard. - + 6 00:00:17,000 --> 00:00:20,000 Don't tell anyone what you're seeing in here. - + 7 00:00:22,000 --> 00:00:24,000 Have you seen what's in there? - + 8 00:00:24,000 --> 00:00:25,000 They have intel. - + 9 00:00:25,000 --> 00:00:27,000 This is where it all changes. - - + + Quantization @@ -526,7 +526,7 @@ Please select below whether you would like to run Whisper quantization. description="Quantization", disabled=False, ) - + to_quantize @@ -542,15 +542,15 @@ Please select below whether you would like to run Whisper quantization. # Fetch `skip_kernel_extension` module import requests - + r = requests.get( url="https://raw.githubusercontent.com/openvinotoolkit/openvino_notebooks/latest/utils/skip_kernel_extension.py", ) open("skip_kernel_extension.py", "w").write(r.text) - + ov_quantized_model = None quantized_ov_pipe = None - + %load_ext skip_kernel_extension Let’s load converted OpenVINO model format using Optimum-Intel to easily @@ -584,10 +584,10 @@ interface for ``automatic-speech-recognition``. .. code:: ipython3 %%skip not $to_quantize.value - + from transformers import AutoProcessor from optimum.intel.openvino import OVModelForSpeechSeq2Seq - + ov_model = OVModelForSpeechSeq2Seq.from_pretrained(model_dir, device=device.value) processor = AutoProcessor.from_pretrained(model_dir) @@ -607,14 +607,14 @@ improves quantization quality. .. code:: ipython3 %%skip not $to_quantize.value - + from itertools import islice from tqdm.notebook import tqdm from datasets import load_dataset from transformers import pipeline from optimum.intel.openvino.quantization import InferRequestWrapper - - + + def collect_calibration_dataset(ov_model: OVModelForSpeechSeq2Seq, calibration_dataset_size: int): # Overwrite model request properties, saving the original ones for restoring later encoder_calibration_data = [] @@ -623,7 +623,7 @@ improves quantization quality. ov_model.decoder_with_past.request = InferRequestWrapper(ov_model.decoder_with_past.request, decoder_calibration_data, apply_caching=True) - + pipe = pipeline( "automatic-speech-recognition", model=ov_model, @@ -638,7 +638,7 @@ improves quantization quality. finally: ov_model.encoder.request = ov_model.encoder.request.request ov_model.decoder_with_past.request = ov_model.decoder_with_past.request.request - + return encoder_calibration_data, decoder_calibration_data Quantize Whisper encoder and decoder models @@ -654,17 +654,17 @@ negligible. .. code:: ipython3 %%skip not $to_quantize.value - + import gc import shutil import nncf import openvino as ov - - + + CALIBRATION_DATASET_SIZE = 30 quantized_model_path = Path(f"{model_dir}_quantized") - - + + def quantize(ov_model: OVModelForSpeechSeq2Seq, calibration_dataset_size: int): if not quantized_model_path.exists(): encoder_calibration_data, decoder_calibration_data = collect_calibration_dataset(ov_model, calibration_dataset_size) @@ -681,7 +681,7 @@ negligible. del quantized_encoder del encoder_calibration_data gc.collect() - + print("Quantizing decoder with past") quantized_decoder_with_past = nncf.quantize( ov_model.decoder_with_past.model, @@ -695,7 +695,7 @@ negligible. del quantized_decoder_with_past del decoder_calibration_data gc.collect() - + # Copy the config file and the first-step-decoder manually model_path = Path(model_dir) shutil.copy(model_path / "config.json", quantized_model_path / "config.json") @@ -714,11 +714,11 @@ negligible. shutil.copy(model_path / "normalizer.json", quantized_model_path / "normalizer.json") shutil.copy(model_path / "merges.txt", quantized_model_path / "merges.txt") shutil.copy(model_path / "added_tokens.json", quantized_model_path / "added_tokens.json") - + quantized_ov_pipe = ov_genai.WhisperPipeline(str(quantized_model_path), device=device.value) return quantized_ov_pipe - - + + quantized_ov_pipe = quantize(ov_model, CALIBRATION_DATASET_SIZE) Run quantized model inference @@ -752,16 +752,16 @@ for Word Error Rate. .. code:: ipython3 %%skip not $to_quantize.value - + import time from contextlib import contextmanager from jiwer import wer, wer_standardize - + TEST_DATASET_SIZE = 50 - + def calculate_transcription_time_and_accuracy(ov_model, test_samples): whole_infer_times = [] - + ground_truths = [] predictions = [] for data_item in tqdm(test_samples, desc="Measuring performance and accuracy"): @@ -769,19 +769,19 @@ for Word Error Rate. transcription = ov_model.generate(data_item["audio"]["array"], return_timestamps=True) end_time = time.perf_counter() whole_infer_times.append(end_time - start_time) - + ground_truths.append(data_item["text"]) predictions.append(transcription.texts[0]) - + word_accuracy = (1 - wer(ground_truths, predictions, reference_transform=wer_standardize, hypothesis_transform=wer_standardize)) * 100 mean_whole_infer_time = sum(whole_infer_times) return word_accuracy, mean_whole_infer_time - + test_dataset = load_dataset("openslr/librispeech_asr", "clean", split="validation", streaming=True, trust_remote_code=True) test_dataset = test_dataset.shuffle(seed=42).take(TEST_DATASET_SIZE) test_samples = [sample for sample in test_dataset] - + accuracy_original, times_original = calculate_transcription_time_and_accuracy(ov_pipe, test_samples) accuracy_quantized, times_quantized = calculate_transcription_time_and_accuracy(quantized_ov_pipe, test_samples) print(f"Whole pipeline performance speedup: {times_original / times_quantized:.3f}") @@ -816,35 +816,35 @@ Interactive demo .. code:: ipython3 def_config = ov_pipe.get_generation_config() - - + + def transcribe(video_path, task, use_int8): data_path = Path(video_path) inputs, duration = get_audio(data_path) m_pipe = quantized_ov_pipe if use_int8 else ov_pipe - + frame_num = len(inputs["raw"]) / 16000 if frame_num > 30: config = ov_pipe.get_generation_config() chink_num = math.ceil(frame_num / 30) config.max_length = chink_num * def_config.max_length m_pipe.set_generation_config(config) - + transcription = m_pipe.generate(inputs["raw"], task=task.lower(), return_timestamps=True).chunks srt_lines = prepare_srt(transcription, duration) with data_path.with_suffix(".srt").open("w") as f: f.writelines(srt_lines) return [str(data_path), str(data_path.with_suffix(".srt"))] - - + + if not Path("gradio_helper.py").exists(): r = requests.get(url="https://raw.githubusercontent.com/openvinotoolkit/openvino_notebooks/latest/notebooks/whisper-subtitles-generation/gradio_helper.py") open("gradio_helper.py", "w").write(r.text) - + from gradio_helper import make_demo - + demo = make_demo(fn=transcribe, quantized=ov_quantized_model is not None, sample_path=output_file) - + try: demo.launch(debug=False) except Exception: diff --git a/docs/sphinx_setup/api/nodejs_api/addon.rst b/docs/sphinx_setup/api/nodejs_api/addon.rst index 7c42824bcd88a3..f926ef49573dac 100644 --- a/docs/sphinx_setup/api/nodejs_api/addon.rst +++ b/docs/sphinx_setup/api/nodejs_api/addon.rst @@ -54,7 +54,7 @@ The **openvino-node** package exports ``addon`` which contains the following pro } * **Defined in:** - `addon.ts:669 `__ + `addon.ts:669 `__ Properties @@ -74,7 +74,7 @@ Properties - CoreConstructor: :doc:`CoreConstructor <./openvino-node/interfaces/CoreConstructor>` - **Defined in:** - `addon.ts:670 `__ + `addon.ts:670 `__ .. rubric:: PartialShape @@ -90,7 +90,7 @@ Properties - PartialShapeConstructor: :doc:`PartialShapeConstructor <./openvino-node/interfaces/PartialShapeConstructor>` - **Defined in:** - `addon.ts:672 `__ + `addon.ts:672 `__ .. rubric:: Tensor @@ -105,7 +105,7 @@ Properties - TensorConstructor: :doc:`TensorConstructor <./openvino-node/interfaces/TensorConstructor>` - **Defined in:** - `addon.ts:671 `__ + `addon.ts:671 `__ .. rubric:: element @@ -121,7 +121,7 @@ Properties - element: typeof :doc:`element <./openvino-node/enums/element>` - **Defined in:** - `addon.ts:678 `__ + `addon.ts:678 `__ .. rubric:: preprocess @@ -141,7 +141,7 @@ Properties - PrePostProcessor: :doc:`PrePostProcessorConstructor <./openvino-node/interfaces/PrePostProcessorConstructor>` - **Defined in:** - `addon.ts:674 `__ + `addon.ts:674 `__ .. rubric:: saveModelSync @@ -177,5 +177,5 @@ Properties * **Returns:** void * **Defined in:** - `addon.ts:692 `__ + `addon.ts:692 `__ diff --git a/docs/sphinx_setup/api/nodejs_api/nodejs_api.rst b/docs/sphinx_setup/api/nodejs_api/nodejs_api.rst index 0ad4d1fb53d126..c9dd2f01b86a7d 100644 --- a/docs/sphinx_setup/api/nodejs_api/nodejs_api.rst +++ b/docs/sphinx_setup/api/nodejs_api/nodejs_api.rst @@ -34,7 +34,7 @@ Build From Sources For more details, refer to the `OpenVINO™ JavaScript API Developer Documentation -`__ +`__ @@ -54,7 +54,7 @@ OpenVINO 2024.4 has introduced the following methods: Additional Resources ##################### -- `OpenVINO™ Node.js Bindings Examples of Usage `__ -- `OpenVINO™ Core Components `__ -- `OpenVINO™ Python API `__ -- `OpenVINO™ Other Bindings `__ \ No newline at end of file +- `OpenVINO™ Node.js Bindings Examples of Usage `__ +- `OpenVINO™ Core Components `__ +- `OpenVINO™ Python API `__ +- `OpenVINO™ Other Bindings `__ \ No newline at end of file diff --git a/docs/sphinx_setup/api/nodejs_api/openvino-node/enums/element.rst b/docs/sphinx_setup/api/nodejs_api/openvino-node/enums/element.rst index 61197254580f81..d8f250230e5d15 100644 --- a/docs/sphinx_setup/api/nodejs_api/openvino-node/enums/element.rst +++ b/docs/sphinx_setup/api/nodejs_api/openvino-node/enums/element.rst @@ -10,7 +10,7 @@ Enumeration element f32: number * **Defined in:** - `addon.ts:658 `__ + `addon.ts:658 `__ .. rubric:: f64 @@ -22,7 +22,7 @@ Enumeration element f64: number * **Defined in:** - `addon.ts:659 `__ + `addon.ts:659 `__ .. rubric:: i16 @@ -34,7 +34,7 @@ Enumeration element i16: number * **Defined in:** - `addon.ts:655 `__ + `addon.ts:655 `__ .. rubric:: i32 @@ -46,7 +46,7 @@ Enumeration element i32: number * **Defined in:** - `addon.ts:656 `__ + `addon.ts:656 `__ .. rubric:: i64 @@ -58,7 +58,7 @@ Enumeration element i64: number * **Defined in:** - `addon.ts:657 `__ + `addon.ts:657 `__ .. rubric:: i8 @@ -70,7 +70,7 @@ Enumeration element i8: number * **Defined in:** - `addon.ts:654 `__ + `addon.ts:654 `__ .. rubric:: string @@ -82,7 +82,7 @@ Enumeration element string: string * **Defined in:** - `addon.ts:660 `__ + `addon.ts:660 `__ .. rubric:: u16 @@ -94,7 +94,7 @@ Enumeration element u16: number * **Defined in:** - `addon.ts:652 `__ + `addon.ts:652 `__ .. rubric:: u32 @@ -106,7 +106,7 @@ Enumeration element u32: number * **Defined in:** - `addon.ts:651 `__ + `addon.ts:651 `__ .. rubric:: u64 @@ -118,7 +118,7 @@ Enumeration element u64: number * **Defined in:** - `addon.ts:653 `__ + `addon.ts:653 `__ .. rubric:: u8 @@ -130,5 +130,5 @@ Enumeration element u8: number * **Defined in:** - `addon.ts:650 `__ + `addon.ts:650 `__ diff --git a/docs/sphinx_setup/api/nodejs_api/openvino-node/enums/resizeAlgorithm.rst b/docs/sphinx_setup/api/nodejs_api/openvino-node/enums/resizeAlgorithm.rst index 168c2cca846d23..d3c65076cb7c29 100644 --- a/docs/sphinx_setup/api/nodejs_api/openvino-node/enums/resizeAlgorithm.rst +++ b/docs/sphinx_setup/api/nodejs_api/openvino-node/enums/resizeAlgorithm.rst @@ -10,7 +10,7 @@ Enumeration resizeAlgorithm RESIZE_CUBIC: number - **Defined in:** - `addon.ts:663 `__ + `addon.ts:663 `__ .. rubric:: RESIZE_LINEAR @@ -22,7 +22,7 @@ Enumeration resizeAlgorithm RESIZE_LINEAR: number - **Defined in:** - `addon.ts:666 `__ + `addon.ts:666 `__ .. rubric:: RESIZE_NEAREST @@ -34,5 +34,5 @@ Enumeration resizeAlgorithm RESIZE_NEAREST: number - **Defined in:** - `addon.ts:664 `__ + `addon.ts:664 `__ diff --git a/docs/sphinx_setup/api/nodejs_api/openvino-node/interfaces/CompiledModel.rst b/docs/sphinx_setup/api/nodejs_api/openvino-node/interfaces/CompiledModel.rst index 71e834c1b30cee..5909ccb69685af 100644 --- a/docs/sphinx_setup/api/nodejs_api/openvino-node/interfaces/CompiledModel.rst +++ b/docs/sphinx_setup/api/nodejs_api/openvino-node/interfaces/CompiledModel.rst @@ -22,7 +22,7 @@ CompiledModel represents a model that is compiled for a specific device by apply multiple optimization transformations, then mapping to compute kernels. * **Defined in:** - `addon.ts:317 `__ + `addon.ts:317 `__ Properties @@ -40,7 +40,7 @@ Properties It gets all inputs of a compiled model. - **Defined in:** - `addon.ts:319 `__ + `addon.ts:319 `__ .. rubric:: outputs @@ -54,7 +54,7 @@ Properties It gets all outputs of a compiled model. - **Defined in:** - `addon.ts:321 `__ + `addon.ts:321 `__ Methods @@ -81,7 +81,7 @@ Methods * **Returns:** :doc:`OVAny <../types/OVAny>` * **Defined in:** - `addon.ts:327 `__ + `addon.ts:327 `__ @@ -99,7 +99,7 @@ Methods * **Returns:** :doc:`InferRequest ` - **Defined in:** - `addon.ts:332 `__ + `addon.ts:332 `__ .. rubric:: exportModelSync @@ -114,7 +114,7 @@ Methods * **Returns:** Buffer - **Defined in:** - `addon.ts:339 `__ + `addon.ts:339 `__ .. rubric:: input @@ -133,7 +133,7 @@ Methods A compiled model input. * **Defined in:** - `addon.ts:363 `__ + `addon.ts:363 `__ .. code-block:: ts @@ -153,7 +153,7 @@ Methods A compiled model input. * **Defined in:** - `addon.ts:369 `__ + `addon.ts:369 `__ .. code-block:: ts @@ -173,7 +173,7 @@ Methods A compiled model input. * **Defined in:** - `addon.ts:375 `__ + `addon.ts:375 `__ .. rubric:: output @@ -191,7 +191,7 @@ Methods A compiled model output. * **Defined in:** - `addon.ts:345 `__ + `addon.ts:345 `__ .. code-block:: ts @@ -215,7 +215,7 @@ Methods A compiled model output. * **Defined in:** - `addon.ts:351 `__ + `addon.ts:351 `__ .. code-block:: ts @@ -239,7 +239,7 @@ Methods A compiled model output. * **Defined in:** - `addon.ts:357 `__ + `addon.ts:357 `__ .. rubric:: setProperty @@ -267,5 +267,5 @@ Methods * **Returns:** void * **Defined in:** - `addon.ts:382 `__ + `addon.ts:382 `__ diff --git a/docs/sphinx_setup/api/nodejs_api/openvino-node/interfaces/Core.rst b/docs/sphinx_setup/api/nodejs_api/openvino-node/interfaces/Core.rst index 87d4068b850f78..412c8597ce897a 100644 --- a/docs/sphinx_setup/api/nodejs_api/openvino-node/interfaces/Core.rst +++ b/docs/sphinx_setup/api/nodejs_api/openvino-node/interfaces/Core.rst @@ -39,7 +39,7 @@ It is recommended to have a single Core instance per application. * **Defined in:** - `addon.ts:34 `__ + `addon.ts:34 `__ Methods @@ -63,7 +63,7 @@ Methods A path to the library with ov::Extension * **Defined in:** - `addon.ts:39 `__ + `addon.ts:39 `__ .. rubric:: compileModel @@ -103,7 +103,7 @@ Methods * **Returns:** Promise<\ :doc:`CompiledModel `\> * **Defined in:** - `addon.ts:50 `__ + `addon.ts:50 `__ .. code-block:: ts @@ -141,7 +141,7 @@ Methods * **Returns:** Promise<\ :doc:`CompiledModel `\> * **Defined in:** - `addon.ts:69 `__ + `addon.ts:69 `__ .. rubric:: compileModelSync @@ -170,7 +170,7 @@ Methods * **Returns:** :doc:`CompiledModel ` * **Defined in:** - `addon.ts:78 `__ + `addon.ts:78 `__ .. code-block:: ts @@ -195,7 +195,7 @@ Methods * **Returns:** :doc:`CompiledModel ` * **Defined in:** - `addon.ts:87 `__ + `addon.ts:87 `__ .. rubric:: getAvailableDevices @@ -218,7 +218,7 @@ Methods ``set_property`` and so on. * **Defined in:** - `addon.ts:101 `__ + `addon.ts:101 `__ .. rubric:: getProperty @@ -240,7 +240,7 @@ Methods * **Returns:** :doc:`OVAny <../types/OVAny>` * **Defined in:** - `addon.ts:106 `__ + `addon.ts:106 `__ * @@ -263,7 +263,7 @@ Methods * **Returns:** :doc:`OVAny <../types/OVAny>` * **Defined in:** - `addon.ts:113 `__ + `addon.ts:113 `__ .. rubric:: getVersions @@ -302,7 +302,7 @@ Methods * description: string * **Defined in:** - `addon.ts:121 `__ + `addon.ts:121 `__ .. rubric:: importModel @@ -341,7 +341,7 @@ Methods * **Returns:** Promise<\ :doc:`CompiledModel `\ > * **Defined in:** - `addon.ts:137 `__ + `addon.ts:137 `__ .. rubric:: importModelSync @@ -380,7 +380,7 @@ Methods * **Returns:** :doc:`CompiledModel ` * **Defined in:** - `addon.ts:146 `__ + `addon.ts:146 `__ .. rubric:: queryModel @@ -418,7 +418,7 @@ Methods * **Returns:** [key: string]: string * **Defined in:** - `addon.ts:217 `__ + `addon.ts:217 `__ .. rubric:: readModel @@ -456,7 +456,7 @@ Methods * **Returns:** Promise<\ :doc:`Model `\ > * **Defined in:** - `addon.ts:164 `__ + `addon.ts:164 `__ .. code-block:: ts @@ -479,7 +479,7 @@ Methods * **Returns:** Promise<\ :doc:`Model `\ > * **Defined in:** - `addon.ts:172 `__ + `addon.ts:172 `__ .. code-block:: ts @@ -505,7 +505,7 @@ Methods * **Returns:** Promise<\ :doc:`Model `\ > * **Defined in:** - `addon.ts:179 `__ + `addon.ts:179 `__ .. rubric:: readModelSync @@ -533,7 +533,7 @@ Methods * **Returns:** Promise<\ :doc:`Model `\ > * **Defined in:** - `addon.ts:187 `__ + `addon.ts:187 `__ .. code-block:: ts @@ -551,7 +551,7 @@ Methods * **Returns:** :doc:`Model ` * **Defined in:** - `addon.ts:192 `__ + `addon.ts:192 `__ .. code-block:: ts @@ -570,7 +570,7 @@ Methods * **Returns:** :doc:`Model ` * **Defined in:** - `addon.ts:197 `__ + `addon.ts:197 `__ .. rubric:: setProperty @@ -598,7 +598,7 @@ Methods * **Returns:** void * **Defined in:** - `addon.ts:202 `__ + `addon.ts:202 `__ .. code-block:: ts @@ -621,5 +621,5 @@ Methods * **Returns:** :doc:`OVAny <../types/OVAny>` * **Defined in:** - `addon.ts:204 `__ + `addon.ts:204 `__ diff --git a/docs/sphinx_setup/api/nodejs_api/openvino-node/interfaces/CoreConstructor.rst b/docs/sphinx_setup/api/nodejs_api/openvino-node/interfaces/CoreConstructor.rst index fd2015b8072812..9f1b4006fda0b6 100644 --- a/docs/sphinx_setup/api/nodejs_api/openvino-node/interfaces/CoreConstructor.rst +++ b/docs/sphinx_setup/api/nodejs_api/openvino-node/interfaces/CoreConstructor.rst @@ -8,7 +8,7 @@ Interface CoreConstructor } * **Defined in:** - `addon.ts:223 `__ + `addon.ts:223 `__ Constructors @@ -26,5 +26,5 @@ Constructors * **Returns:** :doc:`Core ` * **Defined in:** - `addon.ts:224 `__ + `addon.ts:224 `__ diff --git a/docs/sphinx_setup/api/nodejs_api/openvino-node/interfaces/InferRequest.rst b/docs/sphinx_setup/api/nodejs_api/openvino-node/interfaces/InferRequest.rst index 138842736ce52c..118ae42c39dcf0 100644 --- a/docs/sphinx_setup/api/nodejs_api/openvino-node/interfaces/InferRequest.rst +++ b/docs/sphinx_setup/api/nodejs_api/openvino-node/interfaces/InferRequest.rst @@ -37,7 +37,7 @@ can be run in asynchronous or synchronous manners. * **Defined in:** - `addon.ts:468 `__ + `addon.ts:468 `__ Methods @@ -57,7 +57,7 @@ Methods * **Returns:** :doc:`CompiledModel ` * **Defined in:** - `addon.ts:508 `__ + `addon.ts:508 `__ .. rubric:: getInputTensor @@ -85,7 +85,7 @@ Methods an exception is thrown. * **Defined in:** - `addon.ts:514 `__ + `addon.ts:514 `__ .. code-block:: ts @@ -110,7 +110,7 @@ Methods idx is not found, an exception is thrown. * **Defined in:** - `addon.ts:521 `__ + `addon.ts:521 `__ .. rubric:: getOutputTensor @@ -129,7 +129,7 @@ Methods idx is not found, an exception is thrown. * **Defined in:** - `addon.ts:527 `__ + `addon.ts:527 `__ .. code-block:: ts @@ -154,7 +154,7 @@ Methods idx is not found, an exception is thrown. * **Defined in:** - `addon.ts:534 `__ + `addon.ts:534 `__ .. rubric:: getTensor @@ -177,7 +177,7 @@ Methods * **Returns:** :doc:`Tensor ` * **Defined in:** - `addon.ts:543 `__ + `addon.ts:543 `__ .. rubric:: infer @@ -202,7 +202,7 @@ Methods * **Defined in:** - `addon.ts:460 `__ + `addon.ts:460 `__ .. code-block:: ts @@ -237,7 +237,7 @@ Methods } * **Defined in:** - `addon.ts:468 `__ + `addon.ts:468 `__ .. code-block:: ts @@ -271,7 +271,7 @@ Methods } * **Defined in:** - `addon.ts:477 `__ + `addon.ts:477 `__ .. rubric:: inferAsync @@ -310,7 +310,7 @@ Methods * **Defined in:** - `addon.ts:485 `__ + `addon.ts:485 `__ .. rubric:: setInputTensor @@ -336,7 +336,7 @@ Methods * **Returns:** void * **Defined in:** - `addon.ts:532 `__ + `addon.ts:532 `__ .. code-block:: ts @@ -360,7 +360,7 @@ Methods * **Returns:** void * **Defined in:** - `addon.ts:540 `__ + `addon.ts:540 `__ .. rubric:: setOutputTensor @@ -384,7 +384,7 @@ Methods * **Returns:** void * **Defined in:** - `addon.ts:547 `__ + `addon.ts:547 `__ .. code-block:: ts @@ -407,7 +407,7 @@ Methods * **Returns:** void * **Defined in:** - `addon.ts:554 `__ + `addon.ts:554 `__ .. rubric:: setTensor @@ -435,5 +435,5 @@ Methods * **Returns:** void * **Defined in:** - `addon.ts:561 `__ + `addon.ts:561 `__ diff --git a/docs/sphinx_setup/api/nodejs_api/openvino-node/interfaces/InputInfo.rst b/docs/sphinx_setup/api/nodejs_api/openvino-node/interfaces/InputInfo.rst index 067298a9ad38ad..0dfad1216541cb 100644 --- a/docs/sphinx_setup/api/nodejs_api/openvino-node/interfaces/InputInfo.rst +++ b/docs/sphinx_setup/api/nodejs_api/openvino-node/interfaces/InputInfo.rst @@ -10,7 +10,7 @@ Interface InputInfo } * **Defined in:** - `addon.ts:611 `__ + `addon.ts:611 `__ Methods ##################### @@ -26,7 +26,7 @@ Methods * **Returns:** :doc:`InputModelInfo ` * **Defined in:** - `addon.ts:614 `__ + `addon.ts:614 `__ .. rubric:: preprocess @@ -40,7 +40,7 @@ Methods * **Returns:** :doc:`PreProcessSteps ` * **Defined in:** - `addon.ts:613 `__ + `addon.ts:613 `__ .. rubric:: tensor @@ -54,5 +54,5 @@ Methods * **Returns:** :doc:`InputTensorInfo ` * **Defined in:** - `addon.ts:612 `__ + `addon.ts:612 `__ diff --git a/docs/sphinx_setup/api/nodejs_api/openvino-node/interfaces/InputModelInfo.rst b/docs/sphinx_setup/api/nodejs_api/openvino-node/interfaces/InputModelInfo.rst index fed97b555fca1c..e0237db5b58c2d 100644 --- a/docs/sphinx_setup/api/nodejs_api/openvino-node/interfaces/InputModelInfo.rst +++ b/docs/sphinx_setup/api/nodejs_api/openvino-node/interfaces/InputModelInfo.rst @@ -8,7 +8,7 @@ Interface InputModelInfo } * **Defined in:** - `addon.ts:607 `__ + `addon.ts:607 `__ Methods ##################### @@ -28,5 +28,5 @@ Methods * **Returns:** :doc:`InputModelInfo ` * **Defined in:** - `addon.ts:608 `__ + `addon.ts:608 `__ diff --git a/docs/sphinx_setup/api/nodejs_api/openvino-node/interfaces/InputTensorInfo.rst b/docs/sphinx_setup/api/nodejs_api/openvino-node/interfaces/InputTensorInfo.rst index bd476e75aa8c89..5add72745b2735 100644 --- a/docs/sphinx_setup/api/nodejs_api/openvino-node/interfaces/InputTensorInfo.rst +++ b/docs/sphinx_setup/api/nodejs_api/openvino-node/interfaces/InputTensorInfo.rst @@ -10,7 +10,7 @@ Interface InputTensorInfo } * **Defined in:** - `addon.ts:593 `__ + `addon.ts:593 `__ Methods @@ -32,7 +32,7 @@ Methods * **Returns:** :doc:`InputTensorInfo ` * **Defined in:** - `addon.ts:594 `__ + `addon.ts:594 `__ .. rubric:: setLayout @@ -50,7 +50,7 @@ Methods * **Returns:** :doc:`InputTensorInfo ` * **Defined in:** - `addon.ts:595 `__ + `addon.ts:595 `__ .. rubric:: setShape @@ -68,5 +68,5 @@ Methods * **Returns:** :doc:`InputTensorInfo ` * **Defined in:** - `addon.ts:596 `__ + `addon.ts:596 `__ diff --git a/docs/sphinx_setup/api/nodejs_api/openvino-node/interfaces/Model.rst b/docs/sphinx_setup/api/nodejs_api/openvino-node/interfaces/Model.rst index ef8bbbdb393f1d..398949351b71e7 100644 --- a/docs/sphinx_setup/api/nodejs_api/openvino-node/interfaces/Model.rst +++ b/docs/sphinx_setup/api/nodejs_api/openvino-node/interfaces/Model.rst @@ -25,7 +25,7 @@ Interface Model A user-defined model read by :ref:`Core.readModel `. * **Defined in:** - `addon.ts:230 `__ + `addon.ts:230 `__ Properties @@ -41,7 +41,7 @@ Properties inputs: Output[] - **Defined in:** - `addon.ts:305 `__ + `addon.ts:305 `__ .. rubric:: outputs @@ -53,7 +53,7 @@ Properties outputs: Output[] - **Defined in:** - `addon.ts:309 `__ + `addon.ts:309 `__ Methods @@ -73,7 +73,7 @@ Methods * **Returns:** :doc:`Model ` * **Defined in:** - `addon.ts:234 `__ + `addon.ts:234 `__ .. rubric:: getFriendlyName @@ -93,7 +93,7 @@ Methods A string with a friendly name of the model. * **Defined in:** - `addon.ts:240 `__ + `addon.ts:240 `__ .. rubric:: getName @@ -111,7 +111,7 @@ Methods A string with the name of the model. * **Defined in:** - `addon.ts:245 `__ + `addon.ts:245 `__ .. rubric:: getOutputShape @@ -127,7 +127,7 @@ Methods * **Returns:** number[] * **Defined in:** - `addon.ts:250 `__ + `addon.ts:250 `__ .. rubric:: getOutputSize @@ -143,7 +143,7 @@ Methods * **Returns:** number[] * **Defined in:** - `addon.ts:254 `__ + `addon.ts:254 `__ .. rubric:: getOutputElementType :name: getOutputElementType @@ -169,7 +169,7 @@ Methods * **Returns:** string * **Defined in:** - `addon.ts:259 `__ + `addon.ts:259 `__ .. rubric:: input @@ -186,7 +186,7 @@ Methods * **Returns:** :doc:`Output ` * **Defined in:** - `addon.ts:264 `__ + `addon.ts:264 `__ .. code-block:: ts @@ -208,7 +208,7 @@ Methods * **Returns:** :doc:`Output ` * **Defined in:** - `addon.ts:269 `__ + `addon.ts:269 `__ .. code-block:: ts @@ -230,7 +230,7 @@ Methods * **Returns:** :doc:`Output ` * **Defined in:** - `addon.ts:274 `__ + `addon.ts:274 `__ .. rubric:: isDynamic @@ -246,7 +246,7 @@ Methods * **Returns:** boolean * **Defined in:** - `addon.ts:279 `__ + `addon.ts:279 `__ .. rubric:: output @@ -263,7 +263,7 @@ Methods * **Returns:** :doc:`Output ` * **Defined in:** - `addon.ts:284 `__ + `addon.ts:284 `__ .. rubric:: output @@ -287,7 +287,7 @@ Methods * **Returns:** :doc:`Output ` * **Defined in:** - `addon.ts:289 `__ + `addon.ts:289 `__ .. code-block:: ts @@ -307,7 +307,7 @@ Methods * **Returns:** :doc:`Output ` * **Defined in:** - `addon.ts:294 `__ + `addon.ts:294 `__ .. rubric:: setFriendlyName @@ -330,4 +330,4 @@ Methods * **Returns:** void * **Defined in:** - `addon.ts:301 `__ \ No newline at end of file + `addon.ts:301 `__ \ No newline at end of file diff --git a/docs/sphinx_setup/api/nodejs_api/openvino-node/interfaces/Output.rst b/docs/sphinx_setup/api/nodejs_api/openvino-node/interfaces/Output.rst index 5748e26a706ebc..4c3222a975b76d 100644 --- a/docs/sphinx_setup/api/nodejs_api/openvino-node/interfaces/Output.rst +++ b/docs/sphinx_setup/api/nodejs_api/openvino-node/interfaces/Output.rst @@ -14,7 +14,7 @@ Interface Output } * **Defined in:** - `addon.ts:584 `__ + `addon.ts:584 `__ Properties @@ -31,7 +31,7 @@ Properties anyName: string - **Defined in:** - `addon.ts:585 `__ + `addon.ts:585 `__ @@ -44,7 +44,7 @@ Properties shape: number[] - **Defined in:** - `addon.ts:586 `__ + `addon.ts:586 `__ Methods @@ -62,7 +62,7 @@ Methods * **Returns:** string * **Defined in:** - `addon.ts:588 `__ + `addon.ts:588 `__ .. rubric:: getPartialShape @@ -75,7 +75,7 @@ Methods * **Returns:** :doc:`PartialShape ` * **Defined in:** - `addon.ts:590 `__ + `addon.ts:590 `__ .. rubric:: getShape @@ -88,7 +88,7 @@ Methods * **Returns:** number[] * **Defined in:** - `addon.ts:589 `__ + `addon.ts:589 `__ .. rubric:: toString @@ -101,5 +101,5 @@ Methods * **Returns:** string * **Defined in:** - `addon.ts:587 `__ + `addon.ts:587 `__ diff --git a/docs/sphinx_setup/api/nodejs_api/openvino-node/interfaces/OutputInfo.rst b/docs/sphinx_setup/api/nodejs_api/openvino-node/interfaces/OutputInfo.rst index 4739e761778d3c..1750e18221a57c 100644 --- a/docs/sphinx_setup/api/nodejs_api/openvino-node/interfaces/OutputInfo.rst +++ b/docs/sphinx_setup/api/nodejs_api/openvino-node/interfaces/OutputInfo.rst @@ -8,7 +8,7 @@ Interface OutputInfo } * **Defined in:** - `addon.ts:617 `__ + `addon.ts:617 `__ Methods @@ -26,5 +26,5 @@ Methods * **Returns** :doc:`OutputTensorInfo ` * **Defined in:** - `addon.ts:618 `__ + `addon.ts:618 `__ diff --git a/docs/sphinx_setup/api/nodejs_api/openvino-node/interfaces/OutputTensorInfo.rst b/docs/sphinx_setup/api/nodejs_api/openvino-node/interfaces/OutputTensorInfo.rst index 9b13d3d4328684..db58a5bc284b3c 100644 --- a/docs/sphinx_setup/api/nodejs_api/openvino-node/interfaces/OutputTensorInfo.rst +++ b/docs/sphinx_setup/api/nodejs_api/openvino-node/interfaces/OutputTensorInfo.rst @@ -9,7 +9,7 @@ Interface OutputTensorInfo } * **Defined in:** - `addon.ts:599 `__ + `addon.ts:599 `__ Methods @@ -31,7 +31,7 @@ Methods * **Returns** :doc:`InputTensorInfo ` * **Defined in:** - `addon.ts:600 `__ + `addon.ts:600 `__ .. rubric:: setLayout @@ -48,5 +48,5 @@ Methods * **Returns:** :doc:`InputTensorInfo ` * **Defined in:** - `addon.ts:601 `__ + `addon.ts:601 `__ diff --git a/docs/sphinx_setup/api/nodejs_api/openvino-node/interfaces/PartialShape.rst b/docs/sphinx_setup/api/nodejs_api/openvino-node/interfaces/PartialShape.rst index ee1c0b1300daa1..94decc5fc26394 100644 --- a/docs/sphinx_setup/api/nodejs_api/openvino-node/interfaces/PartialShape.rst +++ b/docs/sphinx_setup/api/nodejs_api/openvino-node/interfaces/PartialShape.rst @@ -11,7 +11,7 @@ Interface PartialShape } * **Defined in:** - `addon.ts:630 `__ + `addon.ts:630 `__ Methods @@ -29,7 +29,7 @@ Methods * **Returns:** :doc:`Dimension <../types/Dimension>`\[] * **Defined in:** - `addon.ts:634 `__ + `addon.ts:634 `__ .. rubric:: isDynamic @@ -43,7 +43,7 @@ Methods * **Returns:** boolean * **Defined in:** - `addon.ts:632 `__ + `addon.ts:632 `__ .. rubric:: isStatic @@ -57,7 +57,7 @@ Methods * **Returns:** boolean * **Defined in:** - `addon.ts:631 `__ + `addon.ts:631 `__ .. rubric:: toString @@ -71,5 +71,5 @@ Methods * **Returns:** string * **Defined in:** - `addon.ts:633 `__ + `addon.ts:633 `__ diff --git a/docs/sphinx_setup/api/nodejs_api/openvino-node/interfaces/PartialShapeConstructor.rst b/docs/sphinx_setup/api/nodejs_api/openvino-node/interfaces/PartialShapeConstructor.rst index 809fd34966dbb0..e3c9ab54888095 100644 --- a/docs/sphinx_setup/api/nodejs_api/openvino-node/interfaces/PartialShapeConstructor.rst +++ b/docs/sphinx_setup/api/nodejs_api/openvino-node/interfaces/PartialShapeConstructor.rst @@ -10,7 +10,7 @@ Interface PartialShapeConstructor This interface contains constructor of the :doc:`PartialShape ` class. * **Defined in:** - `addon.ts:640 `__ + `addon.ts:640 `__ Constructors @@ -41,5 +41,5 @@ Constructors * **Returns:** :doc:`PartialShape ` - **Defined in** - `addon.ts:646 `__ + `addon.ts:646 `__ diff --git a/docs/sphinx_setup/api/nodejs_api/openvino-node/interfaces/PrePostProcessor.rst b/docs/sphinx_setup/api/nodejs_api/openvino-node/interfaces/PrePostProcessor.rst index a8e8e01b294082..41935a6a743556 100644 --- a/docs/sphinx_setup/api/nodejs_api/openvino-node/interfaces/PrePostProcessor.rst +++ b/docs/sphinx_setup/api/nodejs_api/openvino-node/interfaces/PrePostProcessor.rst @@ -10,7 +10,7 @@ Interface PrePostProcessor } * **Defined in:** - `addon.ts:621 `__ + `addon.ts:621 `__ Methods @@ -28,7 +28,7 @@ Methods * **Returns:** :doc:`PrePostProcessor ` * **Defined in:** - `addon.ts:622 `__ + `addon.ts:622 `__ .. rubric:: input @@ -50,7 +50,7 @@ Methods * **Returns:** :doc:`InputInfo ` * **Defined in:** - `addon.ts:623 `__ + `addon.ts:623 `__ .. rubric:: output @@ -71,5 +71,5 @@ Methods * **Returns:** :doc:`OutputInfo ` * **Defined in:** - `addon.ts:624 `__ + `addon.ts:624 `__ diff --git a/docs/sphinx_setup/api/nodejs_api/openvino-node/interfaces/PrePostProcessorConstructor.rst b/docs/sphinx_setup/api/nodejs_api/openvino-node/interfaces/PrePostProcessorConstructor.rst index 7276505c2f8dc8..bb1d7a58051d0e 100644 --- a/docs/sphinx_setup/api/nodejs_api/openvino-node/interfaces/PrePostProcessorConstructor.rst +++ b/docs/sphinx_setup/api/nodejs_api/openvino-node/interfaces/PrePostProcessorConstructor.rst @@ -8,7 +8,7 @@ Interface PrePostProcessorConstructor } * **Defined in:** - `addon.ts:626 `__ + `addon.ts:626 `__ Constructors @@ -30,5 +30,5 @@ Constructors * **Returns:** :doc:`PrePostProcessor ` * **Defined in:** - `addon.ts:627 `__ + `addon.ts:627 `__ diff --git a/docs/sphinx_setup/api/nodejs_api/openvino-node/interfaces/PreProcessSteps.rst b/docs/sphinx_setup/api/nodejs_api/openvino-node/interfaces/PreProcessSteps.rst index 2c232a6283ce12..be3e8edbc3363f 100644 --- a/docs/sphinx_setup/api/nodejs_api/openvino-node/interfaces/PreProcessSteps.rst +++ b/docs/sphinx_setup/api/nodejs_api/openvino-node/interfaces/PreProcessSteps.rst @@ -9,7 +9,7 @@ Interface PreProcessSteps } * **Defined in:** - `addon.ts:603 `__ + `addon.ts:603 `__ Methods @@ -31,5 +31,5 @@ Methods * **Returns:** :doc:`PreProcessSteps ` * **Defined in:** - `addon.ts:604 `__ + `addon.ts:604 `__ diff --git a/docs/sphinx_setup/api/nodejs_api/openvino-node/interfaces/Tensor.rst b/docs/sphinx_setup/api/nodejs_api/openvino-node/interfaces/Tensor.rst index 9b0e19b559cdf8..aaeaa8b5635a18 100644 --- a/docs/sphinx_setup/api/nodejs_api/openvino-node/interfaces/Tensor.rst +++ b/docs/sphinx_setup/api/nodejs_api/openvino-node/interfaces/Tensor.rst @@ -17,7 +17,7 @@ inference. There are different ways to create a tensor. You can find them in :doc:`TensorConstructor ` section. * **Defined in:** - `addon.ts:390 `__ + `addon.ts:390 `__ Properties @@ -44,7 +44,7 @@ Properties or type of array does not match the tensor. - **Defined in:** - `addon.ts:403 `__ + `addon.ts:403 `__ Methods @@ -67,7 +67,7 @@ Methods element type, e.g. ``Float32Array`` corresponds to float32. * **Defined in:** - `addon.ts:413 `__ + `addon.ts:413 `__ .. rubric:: getElementType @@ -82,7 +82,7 @@ Methods * **Returns:** :doc:`element <../enums/element>` * **Defined in:** - `addon.ts:407 `__ + `addon.ts:407 `__ .. rubric:: getShape @@ -98,7 +98,7 @@ Methods * **Returns:** number[] * **Defined in:** - `addon.ts:417 `__ + `addon.ts:417 `__ .. rubric:: getSize @@ -114,5 +114,5 @@ Methods * **Returns:** number[] * **Defined in:** - `addon.ts:421 `__ + `addon.ts:421 `__ diff --git a/docs/sphinx_setup/api/nodejs_api/openvino-node/interfaces/TensorConstructor.rst b/docs/sphinx_setup/api/nodejs_api/openvino-node/interfaces/TensorConstructor.rst index f8f054c012465e..7d8553c50ee1e3 100644 --- a/docs/sphinx_setup/api/nodejs_api/openvino-node/interfaces/TensorConstructor.rst +++ b/docs/sphinx_setup/api/nodejs_api/openvino-node/interfaces/TensorConstructor.rst @@ -16,7 +16,7 @@ the user. Any action performed on the ``TypedArray`` will be reflected in this tensor memory. * **Defined in:** - `addon.ts:433 `__ + `addon.ts:433 `__ Constructors @@ -47,7 +47,7 @@ Constructors * **Returns:** :doc:`Tensor ` * **Defined in:** - `addon.ts:440 `__ + `addon.ts:440 `__ .. code-block:: ts @@ -74,7 +74,7 @@ Constructors * **Returns:** :doc:`Tensor ` * **Defined in:** - `addon.ts:449 `__ + `addon.ts:449 `__ .. code-block:: ts @@ -88,5 +88,5 @@ Constructors * **Returns:** :doc:`Tensor ` * **Defined in:** - `addon.ts:459 `__ + `addon.ts:459 `__ diff --git a/docs/sphinx_setup/api/nodejs_api/openvino-node/types/Dimension.rst b/docs/sphinx_setup/api/nodejs_api/openvino-node/types/Dimension.rst index d8993d3e39f86f..0d85873f96884e 100644 --- a/docs/sphinx_setup/api/nodejs_api/openvino-node/types/Dimension.rst +++ b/docs/sphinx_setup/api/nodejs_api/openvino-node/types/Dimension.rst @@ -6,5 +6,5 @@ Type alias Dimension Dimension: number|[number,number] * **Defined in:** - `addon.ts:582 `__ + `addon.ts:582 `__ diff --git a/docs/sphinx_setup/api/nodejs_api/openvino-node/types/OVAny.rst b/docs/sphinx_setup/api/nodejs_api/openvino-node/types/OVAny.rst index 0c194f9c703b99..1c0e2405246768 100644 --- a/docs/sphinx_setup/api/nodejs_api/openvino-node/types/OVAny.rst +++ b/docs/sphinx_setup/api/nodejs_api/openvino-node/types/OVAny.rst @@ -6,5 +6,5 @@ Type alias OVAny OVAny: string | number | boolean * **Defined in:** - `addon.ts:24 `__ + `addon.ts:24 `__ diff --git a/docs/sphinx_setup/api/nodejs_api/openvino-node/types/SupportedTypedArray.rst b/docs/sphinx_setup/api/nodejs_api/openvino-node/types/SupportedTypedArray.rst index 8645d7ac79091e..5ba28d5e5b3fad 100644 --- a/docs/sphinx_setup/api/nodejs_api/openvino-node/types/SupportedTypedArray.rst +++ b/docs/sphinx_setup/api/nodejs_api/openvino-node/types/SupportedTypedArray.rst @@ -6,5 +6,5 @@ Type alias SupportedTypedArray SupportedTypedArray: Int8Array | Uint8Array | Int16Array | Uint16Array | Int32Array | Uint32Array | Float32Array | Float64Array * **Defined in:** - `addon.ts:1 `__ + `addon.ts:1 `__ diff --git a/docs/sphinx_setup/api/nodejs_api/openvino-node/types/elementTypeString.rst b/docs/sphinx_setup/api/nodejs_api/openvino-node/types/elementTypeString.rst index 1fe9fe16c2c1ba..cdfd99c26bab51 100644 --- a/docs/sphinx_setup/api/nodejs_api/openvino-node/types/elementTypeString.rst +++ b/docs/sphinx_setup/api/nodejs_api/openvino-node/types/elementTypeString.rst @@ -6,5 +6,5 @@ Type alias elementTypeString elementTypeString: "u8" | "u32" | "u16" | "u64" | "i8" | "i64" | "i32" | "i16" | "f64" | "f32" | "string" * **Defined in:** - `addon.ts:11 `__ + `addon.ts:11 `__