diff --git a/cspell.json b/cspell.json deleted file mode 100644 index f59d00a6a052f6..00000000000000 --- a/cspell.json +++ /dev/null @@ -1,412 +0,0 @@ -{ - "version": "0.2", - "ignorePaths": [], - "dictionaryDefinitions": [], - "dictionaries": [], - "words": [ - "aarch64", - "acdadcfa", - "acea", - "abmrd", - "acfb", - "acosh", - "Acosh", - "adfcd", - "addcmul", - "addif", - "addmm", - "aeaa", - "agem", - "agew", - "armeabi", - "armhf", - "artefacts", - "ARTEFACTS", - "Asinh", - "asynch", - "Atanh", - "autodoc", - "Autograd", - "autoplugin", - "AUTOPLUGIN", - "autoremove", - "autosummary", - "bace", - "Backprop", - "bblayers", - "Beautif", - "Bilat", - "bindir", - "bitbake", - "BFYX", - "BFXY", - "bkgr", - "brctl", - "Bucketize", - "BUILDDIR", - "buildtools", - "buildsystems", - "BYXF", - "bvalue", - "bvlc", - "caffe", - "caffemodel", - "camvid", - "cbba", - "cbcd", - "cdad", - "cdrom", - "chrpath", - "classov", - "cldnn", - "clumber", - "codepath", - "codepaths", - "coeffs", - "concat", - "Concat", - "Conts", - "constexpr", - "consts", - "Consts", - "conv", - "Convolutional", - "CPPLINT", - "cpplint", - "crbegin", - "crend", - "ctest", - "ctput", - "CVAT", - "cython", - "dadb", - "DANDROID", - "DARM", - "Datumaro", - "datumaro", - "DBUILD", - "DCMAKE", - "ddepth", - "Depthwise", - "dearmor", - "devicesupport", - "dequantization", - "Dequantization", - "deeplabv", - "deeced", - "DENABLE", - "delif", - "denormal", - "DENORMAL", - "denormalized", - "Detectron", - "Dequantize", - "devel", - "devtoolset", - "dgpu", - "diffstat", - "dldt", - "dlstreamer", - "dkms", - "Dockerfiles", - "DOPENVINO", - "downscript", - "doxid", - "doxygen", - "Doxygen", - "doxygensnippet", - "DTHREADING", - "dpkg", - "DPYTHON", - "DSELECTIVE", - "dylib", - "DWORD", - "efficientdet", - "Efficientdet", - "Einsum", - "Elems", - "Elementwise", - "elementwise", - "Eltwise", - "endsphinxdirective", - "enumov", - "emcmake", - "emmake", - "emod", - "emom", - "emow", - "Emscripten", - "emscripten", - "emsdk", - "epel", - "ERRORLEVEL", - "evolutionally", - "executionpolicy", - "fafe", - "fdupes", - "flatbuffers", - "FLATBUFFERS", - "frontends", - "Frontends", - "FYXB", - "gaddb", - "GAPI", - "gapi", - "Gaussed", - "gcompoundkernel", - "gcomputation", - "GCPU", - "gcpukernel", - "Gelu", - "GELU", - "Geti", - "getitem", - "gimg", - "gitee", - "gflags", - "globbing", - "gmmlib", - "GNAs", - "gmock", - "gnueabihf", - "googlenet", - "gpgcheck", - "gpgkey", - "graphviz", - "Graphviz", - "groupov", - "gtest", - "hardtanh", - "hashfile", - "HDDL", - "HKLM", - "HOSTTOOLS", - "Hotspots", - "hotspots", - "hostnet", - "hwloc", - "hwquote", - "idbf", - "IDFT", - "iigd", - "ifdef", - "ifdown", - "ifup", - "imgproc", - "imshow", - "inet", - "INTEGRITYCHECK", - "ILSVRC", - "inferenced", - "Informations", - "insmod", - "intelocl", - "INTERPROCEDURAL", - "INSTALLDIR", - "IRDFT", - "jemalloc", - "kaldi", - "Keras", - "keypress", - "keyrings", - "Khronos", - "KROIs", - "Landm", - "landm", - "Latency", - "Lcov", - "ldconfig", - "libc", - "libopencl", - "libopencv", - "libpython", - "libtbb", - "libtbbbind", - "libtpm", - "libvirtd", - "linmac", - "Liskov", - "lowlatency", - "LTSC", - "LSTM", - "makefiles", - "malloc", - "memleaks", - "manylinux", - "maxdepth", - "miktext", - "Mish", - "mklink", - "mmap", - "mobilenet", - "Mobilenet", - "monodepth", - "mozallowfullscreen", - "msallowfullscreen", - "MSVC", - "msvc", - "Multiclass", - "muxed", - "mxnet", - "namespaceov", - "NCHW", - "ncpu", - "netdev", - "netplan", - "ngraph", - "nireq", - "NNCF", - "nncf", - "nocache", - "noglob", - "nohup", - "nlohmann", - "norestart", - "noqueue", - "nproc", - "NUMA", - "numpy", - "Numpy", - "oallowfullscreen", - "ocloc", - "OCSP", - "oneapi", - "onetbb", - "onnx", - "opencl", - "openembedded", - "openvino", - "Opset", - "opset", - "opsets", - "OVMS", - "ovms", - "ovsa", - "OVSA", - "ovsatool", - "OVTF", - "PACKAGECONFIG", - "paddlepaddle", - "parameterizable", - "partitioner", - "patchelf", - "passpattern", - "Pexels", - "pdmodel", - "PDPD", - "pkgdata", - "pkgs", - "pkill", - "polylines", - "postproc", - "postprocess", - "preprocess", - "Preprocess", - "protobuf", - "Protobuf", - "PROTOBUF", - "prototxt", - "PSROI", - "Pugi", - "pugixml", - "PUGIXML", - "pypi", - "PYTHONPATH", - "pzstd", - "qcow", - "qlen", - "QSPECTRE", - "Qspectre", - "quantizer", - "Rects", - "Relu", - "relu", - "rcnn", - "RCNN", - "RDFT", - "Redistributable", - "remotesigned", - "repolist", - "reproject", - "reshapable", - "Requantize", - "retval", - "RHODS", - "rmmod", - "runtool", - "scons", - "SCONS", - "segm", - "Selu", - "servercore", - "setuptools", - "setupvars", - "SETX", - "SIMD", - "Softmax", - "skylake", - "sphinxdirective", - "Strided", - "squeezenet", - "SWTPM", - "swtpm", - "TBBBIND", - "TBBROOT", - "Tensro", - "texlive", - "textrm", - "tflite", - "thirdparty", - "Thresholded", - "toctree", - "toolset", - "Torchvision", - "tpmrm", - "tpmstate", - "tput", - "Tunables", - "unet", - "Uninstallation", - "unixio", - "unsharp", - "Unsharp", - "Unsh", - "Unsqueeze", - "Usecase", - "usecases", - "USERPROFILE", - "userspace", - "VAAPI", - "valgrind", - "vcpkg", - "vcvars", - "venv", - "virbr", - "virsh", - "virt", - "virtio", - "VMHWM", - "VMRSS", - "VNNI", - "vtune", - "vtunesummary", - "vtunebottonup", - "WHOLEARCHIVE", - "WDDM", - "WORKDIR", - "WORKSIZE", - "xbyak", - "Xbyak", - "xdot", - "xvfz", - "yocto", - "yolo", - "YOLO", - "yolov", - "Yolov", - "YXFB", - "zstd" - ], - "ignoreWords": [], - "import": [] -} diff --git a/docs/articles_en/about-openvino/release-notes-openvino.rst b/docs/articles_en/about-openvino/release-notes-openvino.rst index 0134ed15215541..739c411dcbe7e5 100644 --- a/docs/articles_en/about-openvino/release-notes-openvino.rst +++ b/docs/articles_en/about-openvino/release-notes-openvino.rst @@ -16,7 +16,7 @@ OpenVINO Release Notes -2024.6 - 18 December 2024 +2025.0 - 05 February 2025 ############################# :doc:`System Requirements <./release-notes-openvino/system-requirements>` | :doc:`Release policy <./release-notes-openvino/release-policy>` | :doc:`Installation Guides <./../get-started/install-openvino>` @@ -26,10 +26,9 @@ OpenVINO Release Notes What's new +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ -* OpenVINO 2024.6 release includes updates for enhanced stability and improved LLM performance. -* Introduced support for Intel® Arc™ B-Series Graphics (formerly known as Battlemage). -* Implemented optimizations to improve the inference time and LLM performance on NPUs. -* Improved LLM performance with GenAI API optimizations and bug fixes. +* . +* . + @@ -39,26 +38,19 @@ OpenVINO™ Runtime CPU Device Plugin ----------------------------------------------------------------------------------------------- -* KV cache now uses asymmetric 8-bit unsigned integer (U8) as the default precision, reducing - memory stress for LLMs and increasing their performance. This option can be controlled by - model meta data. -* Quality and accuracy has been improved for selected models with several bug fixes. +* . +* . GPU Device Plugin ----------------------------------------------------------------------------------------------- -* Device memory copy optimizations have been introduced for inference with **Intel® Arc™ B-Series - Graphics** (formerly known as Battlemage). Since it does not utilize L2 cache for copying memory - between the device and host, a dedicated `copy` operation is used, if inputs or results are - not expected in the device memory. -* ChatGLM4 inference on GPU has been optimized. +* . +* . NPU Device Plugin ----------------------------------------------------------------------------------------------- -* LLM performance and inference time has been improved with memory optimizations. - - +* . @@ -98,14 +90,10 @@ Previous 2025 releases .. ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ -.. dropdown:: 2024.5 - 20 November 2024 +.. dropdown:: 2024.6 - 18 December 2024 :animate: fade-in-slide-down :color: secondary - **What's new** - - * More GenAI coverage and framework integrations to minimize code changes. - @@ -126,74 +114,44 @@ page. -Discontinued in 2024 +Discontinued in 2025 ----------------------------- * Runtime components: - * Intel® Gaussian & Neural Accelerator (Intel® GNA). Consider using the Neural Processing - Unit (NPU) for low-powered systems like Intel® Core™ Ultra or 14th generation and beyond. - * OpenVINO C++/C/Python 1.0 APIs (see - `2023.3 API transition guide `__ - for reference). - * All ONNX Frontend legacy API (known as ONNX_IMPORTER_API). - * ``PerfomanceMode.UNDEFINED`` property as part of the OpenVINO Python API. + * OpenVINO property Affinity API will is no longer available. It has been replaced with CPU + binding configurations (``ov::hint::enable_cpu_pinning``). * Tools: - * Deployment Manager. See :doc:`installation <../get-started/install-openvino>` and - :doc:`deployment <../get-started/install-openvino>` guides for current distribution - options. - * `Accuracy Checker `__. - * `Post-Training Optimization Tool `__ - (POT). Neural Network Compression Framework (NNCF) should be used instead. - * A `Git patch `__ - for NNCF integration with `huggingface/transformers `__. - The recommended approach is to use `huggingface/optimum-intel `__ - for applying NNCF optimization on top of models from Hugging Face. - * Support for Apache MXNet, Caffe, and Kaldi model formats. Conversion to ONNX may be used - as a solution. - * The macOS x86_64 debug bins are no longer provided with the OpenVINO toolkit, starting - with OpenVINO 2024.5. - * Python 3.8 is no longer supported, starting with OpenVINO 2024.5. - - * As MxNet doesn't support Python version higher than 3.8, according to the - `MxNet PyPI project `__, - it is no longer supported by OpenVINO, either. - - * Discrete Keem Bay support is no longer supported, starting with OpenVINO 2024.5. - * Support for discrete devices (formerly codenamed Raptor Lake) is no longer available for - NPU. + * Intel® Streaming SIMD Extensions (Intel® SSE) are currently not enabled in the binary + package by default. They are still supported in the source code form. + * The OpenVINO™ Development Tools package (pip install openvino-dev) is no longer available + for OpenVINO releases in 2025. + * Model Optimizer is no longer avilable. Consider using the + :doc:`new conversion methods <../openvino-workflow/model-preparation/convert-model-to-ir>` + instead. For more details, see the + `model conversion transition guide `__. Deprecated and to be removed in the future -------------------------------------------- -* Intel® Streaming SIMD Extensions (Intel® SSE) will be supported in source code form, but not - enabled in the binary package by default, starting with OpenVINO 2025.0. * Ubuntu 20.04 support will be deprecated in future OpenVINO releases due to the end of standard support. * The openvino-nightly PyPI module will soon be discontinued. End-users should proceed with the Simple PyPI nightly repo instead. More information in `Release Policy `__. -* The OpenVINO™ Development Tools package (pip install openvino-dev) will be removed from - installation options and distribution channels beginning with OpenVINO 2025.0. -* Model Optimizer will be discontinued with OpenVINO 2025.0. Consider using the - :doc:`new conversion methods <../openvino-workflow/model-preparation/convert-model-to-ir>` - instead. For more details, see the - `model conversion transition guide `__. -* OpenVINO property Affinity API will be discontinued with OpenVINO 2025.0. - It will be replaced with CPU binding configurations (``ov::hint::enable_cpu_pinning``). - - - +* “auto shape” and “auto batch size” (reshaping a model in runtime) will be removed in the + future. OpenVINO's dynamic shape models are recommended instead. +* MacOS x86 is no longer recommended for use due to the discontinuation of validation. + Full support will be removed later in 2025. +* The `openvino` namespace of the OpenVINO Python API has been redesigned, removing the nested + `openvino.runtime` module. The old namespace is now considered deprecated and will be + discontinued in 2026.0. - * “auto shape” and “auto batch size” (reshaping a model in runtime) will be removed in the - future. OpenVINO's dynamic shape models are recommended instead. -* Starting with 2025.0 MacOS x86 is no longer recommended for use due to the discontinuation - of validation. Full support will be removed later in 2025. diff --git a/docs/articles_en/get-started/configurations/genai-dependencies.rst b/docs/articles_en/get-started/configurations/genai-dependencies.rst index 6eec18a74f0f05..13e28107f69d63 100644 --- a/docs/articles_en/get-started/configurations/genai-dependencies.rst +++ b/docs/articles_en/get-started/configurations/genai-dependencies.rst @@ -4,8 +4,8 @@ OpenVINO™ GenAI Dependencies OpenVINO™ GenAI depends on both `OpenVINO `__ and `OpenVINO Tokenizers `__. During OpenVINO™ GenAI installation from PyPi, the same versions of OpenVINO and OpenVINO Tokenizers -are used (e.g. ``openvino==2024.6.0`` and ``openvino-tokenizers==2024.6.0.0`` are installed for -``openvino-genai==2024.6.0``). +are used (e.g. ``openvino==2025.0.0`` and ``openvino-tokenizers==2025.0.0.0`` are installed for +``openvino-genai==2025.0.0``). Trying to update any of the dependency packages might result in a version incompatibility due to different Application Binary Interfaces (ABIs), which will result in errors while running diff --git a/docs/articles_en/get-started/install-openvino.rst b/docs/articles_en/get-started/install-openvino.rst index 401aa79213e6d7..387a0bf2ab37e3 100644 --- a/docs/articles_en/get-started/install-openvino.rst +++ b/docs/articles_en/get-started/install-openvino.rst @@ -1,4 +1,4 @@ -Install OpenVINO™ 2024.6 +Install OpenVINO™ 2025.0 ========================== @@ -23,10 +23,10 @@ Install OpenVINO™ 2024.6 -OpenVINO 2024.6, described here, is not a Long-Term-Support version! +OpenVINO 2025.0, described here, is not a Long-Term-Support version! All currently supported versions are: -* 2024.6 (development) +* 2025.0 (development) * 2023.3 (LTS) diff --git a/docs/articles_en/openvino-workflow-generative.rst b/docs/articles_en/openvino-workflow-generative.rst index a4fa53335988ae..14521f118f6dfc 100644 --- a/docs/articles_en/openvino-workflow-generative.rst +++ b/docs/articles_en/openvino-workflow-generative.rst @@ -40,7 +40,7 @@ options: `Check out the OpenVINO GenAI Quick-start Guide [PDF] `__ - .. tab-item:: Hugging Face integration + .. tab-item:: Optimum Intel (Hugging Face integration) | - Suggested for prototyping and, if the use case is not covered by OpenVINO GenAI, production. | - Bigger footprint and more dependencies. @@ -55,10 +55,16 @@ options: as well as conversion on the fly. For integration with the final product it may offer lower performance, though. -Note that the base version of OpenVINO may also be used to run generative AI. Although it may -offer a simpler environment, with fewer dependencies, it has significant limitations and a more -demanding implementation process. For reference, see -`the article on generative AI usage of OpenVINO 2024.6 `__. + .. tab-item:: Base OpenVINO (not recommended) + + Note that the base version of OpenVINO may also be used to run generative AI. Although it may + offer a simpler environment, with fewer dependencies, it has significant limitations and a more + demanding implementation process. + + To learn more, refer to the article for the 2024.6 OpenVINO version: + `Generative AI with Base OpenVINO `__ + + The advantages of using OpenVINO for generative model deployment: diff --git a/docs/articles_en/openvino-workflow/running-inference/inference-devices-and-modes/gpu-device/remote-tensor-api-gpu-plugin.rst b/docs/articles_en/openvino-workflow/running-inference/inference-devices-and-modes/gpu-device/remote-tensor-api-gpu-plugin.rst index f1018b82cf40ee..ce243dbd87f9ae 100644 --- a/docs/articles_en/openvino-workflow/running-inference/inference-devices-and-modes/gpu-device/remote-tensor-api-gpu-plugin.rst +++ b/docs/articles_en/openvino-workflow/running-inference/inference-devices-and-modes/gpu-device/remote-tensor-api-gpu-plugin.rst @@ -621,7 +621,7 @@ Two types of map entries are possible: descriptor and container. Descriptor sets the expected structure and possible parameter values of the map. For possible low-level properties and their description, refer to the header file: -`remote_properties.hpp `__. +`remote_properties.hpp `__. Examples ########################################################### diff --git a/docs/articles_en/openvino-workflow/running-inference/inference-devices-and-modes/query-device-properties.rst b/docs/articles_en/openvino-workflow/running-inference/inference-devices-and-modes/query-device-properties.rst index 913d0090b92a52..a704833b374f19 100644 --- a/docs/articles_en/openvino-workflow/running-inference/inference-devices-and-modes/query-device-properties.rst +++ b/docs/articles_en/openvino-workflow/running-inference/inference-devices-and-modes/query-device-properties.rst @@ -88,7 +88,7 @@ The ``ov::CompiledModel`` class is also extended to support the properties: * ``ov::CompiledModel::set_property`` For documentation about OpenVINO common device-independent properties, refer to -`properties.hpp (GitHub) `__. +`properties.hpp (GitHub) `__. Device-specific configuration keys can be found in a corresponding device folders, for example, ``openvino/runtime/intel_gpu/properties.hpp``. diff --git a/docs/dev/ov_dependencies.txt b/docs/dev/ov_dependencies.txt index cb64e4d5a6534c..71c9c906f9640d 100644 --- a/docs/dev/ov_dependencies.txt +++ b/docs/dev/ov_dependencies.txt @@ -1,6 +1,6 @@ # Copyright (C) 2024 Intel Corporation # SPDX-License-Identifier: Apache-2.0 -#This file provides a comprehensive list of all dependencies of OpenVINO 2024.6 +#This file provides a comprehensive list of all dependencies of OpenVINO 2025.0 #The file is part of the automation pipeline for posting OpenVINO IR models on the HuggingFace Hub, including OneBOM dependency checks. diff --git a/docs/sphinx_setup/index.rst b/docs/sphinx_setup/index.rst index d0da8fa4244dd6..b4e1039248f3a0 100644 --- a/docs/sphinx_setup/index.rst +++ b/docs/sphinx_setup/index.rst @@ -1,5 +1,5 @@ ============================ -OpenVINO 2024.6 +OpenVINO 2025.0 ============================ .. meta:: diff --git a/samples/cpp/benchmark/sync_benchmark/README.md b/samples/cpp/benchmark/sync_benchmark/README.md index b1eb079216064d..7cbc0f26624fa6 100644 --- a/samples/cpp/benchmark/sync_benchmark/README.md +++ b/samples/cpp/benchmark/sync_benchmark/README.md @@ -1,6 +1,6 @@ # Sync Benchmark C++ Sample -This sample demonstrates how to estimate performance of a model using Synchronous Inference Request API. It makes sense to use synchronous inference only in latency oriented scenarios. Models with static input shapes are supported. Unlike [demos](https://docs.openvino.ai/2024/omz_demos.html) this sample doesn't have other configurable command line arguments. Feel free to modify sample's source code to try out different options. +This sample demonstrates how to estimate performance of a model using Synchronous Inference Request API. It makes sense to use synchronous inference only in latency oriented scenarios. Models with static input shapes are supported. Unlike [demos](https://github.com/openvinotoolkit/open_model_zoo/tree/master/demos) this sample doesn't have other configurable command line arguments. Feel free to modify sample's source code to try out different options. For more detailed information on how this sample works, check the dedicated [article](https://docs.openvino.ai/2024/learn-openvino/openvino-samples/sync-benchmark.html) @@ -8,8 +8,8 @@ For more detailed information on how this sample works, check the dedicated [art | Options | Values | | -------------------------------| -------------------------------------------------------------------------------------------------------------------------| -| Validated Models | [yolo-v3-tf](https://docs.openvino.ai/2024/omz_models_model_yolo_v3_tf.html), | -| | [face-detection-0200](https://docs.openvino.ai/2024/omz_models_model_face_detection_0200.html) | +| Validated Models | [yolo-v3-tf](https://github.com/openvinotoolkit/open_model_zoo/tree/master/models/public/yolo-v3-tf), | +| | [face-detection-0200](https://github.com/openvinotoolkit/open_model_zoo/tree/master/models/intel/face-detection-0200) | | Model Format | OpenVINO™ toolkit Intermediate Representation | | | (\*.xml + \*.bin), ONNX (\*.onnx) | | Supported devices | [All](https://docs.openvino.ai/2024/about-openvino/compatibility-and-support/supported-devices.html) | diff --git a/samples/cpp/benchmark/throughput_benchmark/README.md b/samples/cpp/benchmark/throughput_benchmark/README.md index 43633498321c1e..bf8e7e6c8b6291 100644 --- a/samples/cpp/benchmark/throughput_benchmark/README.md +++ b/samples/cpp/benchmark/throughput_benchmark/README.md @@ -1,6 +1,6 @@ # Throughput Benchmark C++ Sample -This sample demonstrates how to estimate performance of a model using Asynchronous Inference Request API in throughput mode. Unlike [demos](https://docs.openvino.ai/2024/omz_demos.html) this sample doesn't have other configurable command line arguments. Feel free to modify sample's source code to try out different options. +This sample demonstrates how to estimate performance of a model using Asynchronous Inference Request API in throughput mode. Unlike [demos](https://github.com/openvinotoolkit/open_model_zoo/tree/master/demos) this sample doesn't have other configurable command line arguments. Feel free to modify sample's source code to try out different options. The reported results may deviate from what [benchmark_app](https://docs.openvino.ai/2024/learn-openvino/openvino-samples/benchmark-tool.html) reports. One example is model input precision for computer vision tasks. benchmark_app sets ``uint8``, while the sample uses default model precision which is usually ``float32``. @@ -10,8 +10,8 @@ For more detailed information on how this sample works, check the dedicated [art | Options | Values | | ----------------------------| -------------------------------------------------------------------------------------------------------------------------------| -| Validated Models | [yolo-v3-tf](https://docs.openvino.ai/2024/omz_models_model_yolo_v3_tf.html), | -| | [face-detection-](https://docs.openvino.ai/2024/omz_models_model_face_detection_0200.html) | +| Validated Models | [yolo-v3-tf](https://github.com/openvinotoolkit/open_model_zoo/tree/master/models/public/yolo-v3-tf), | +| | [face-detection-](https://github.com/openvinotoolkit/open_model_zoo/tree/master/models/intel/face-detection-0200) | | Model Format | OpenVINO™ toolkit Intermediate Representation | | | (\*.xml + \*.bin), ONNX (\*.onnx) | | Supported devices | [All](https://docs.openvino.ai/2024/about-openvino/compatibility-and-support/supported-devices.html) | diff --git a/samples/cpp/hello_reshape_ssd/README.md b/samples/cpp/hello_reshape_ssd/README.md index bc346e850cf5ba..1359b07fdf27b5 100644 --- a/samples/cpp/hello_reshape_ssd/README.md +++ b/samples/cpp/hello_reshape_ssd/README.md @@ -9,7 +9,7 @@ For more detailed information on how this sample works, check the dedicated [art | Options | Values | | ----------------------------| -----------------------------------------------------------------------------------------------------------------------------------------| -| Validated Models | [person-detection-retail-0013](https://docs.openvino.ai/2024/omz_models_model_person_detection_retail_0013.html) | +| Validated Models | [person-detection-retail-0013](https://github.com/openvinotoolkit/open_model_zoo/tree/master/models/intel/person-detection-retail-0013) | | Model Format | OpenVINO™ toolkit Intermediate Representation (\*.xml + \*.bin), ONNX (\*.onnx) | | Supported devices | [All](https://docs.openvino.ai/2024/about-openvino/compatibility-and-support/supported-devices.html) | | Other language realization | [Python](https://docs.openvino.ai/2024/learn-openvino/openvino-samples/hello-reshape-ssd.html) | diff --git a/samples/js/node/notebooks/hello-detection.nnb b/samples/js/node/notebooks/hello-detection.nnb index 60640b3bd042ea..e5c6f43f92a550 100644 --- a/samples/js/node/notebooks/hello-detection.nnb +++ b/samples/js/node/notebooks/hello-detection.nnb @@ -3,7 +3,7 @@ { "language": "markdown", "source": [ - "# Hello Object Detection\n\nA very basic introduction to using object detection models with OpenVINO™.\n\nThe [horizontal-text-detection-0001](https://docs.openvino.ai/2023.0/omz_models_model_horizontal_text_detection_0001.html) model from [Open Model Zoo](https://github.com/openvinotoolkit/open_model_zoo/) is used. It detects horizontal text in images and returns a blob of data in the shape of `[100, 5]`. Each detected text box is stored in the `[x_min, y_min, x_max, y_max, conf]` format, where the\n`(x_min, y_min)` are the coordinates of the top left bounding box corner, `(x_max, y_max)` are the coordinates of the bottom right bounding box corner and `conf` is the confidence for the predicted class." + "# Hello Object Detection\n\nA very basic introduction to using object detection models with OpenVINO™.\n\nThe [horizontal-text-detection-0001](https://github.com/openvinotoolkit/open_model_zoo/tree/master/models/intel/horizontal-text-detection-0001) model from [Open Model Zoo](https://github.com/openvinotoolkit/open_model_zoo/) is used. It detects horizontal text in images and returns a blob of data in the shape of `[100, 5]`. Each detected text box is stored in the `[x_min, y_min, x_max, y_max, conf]` format, where the\n`(x_min, y_min)` are the coordinates of the top left bounding box corner, `(x_max, y_max)` are the coordinates of the bottom right bounding box corner and `conf` is the confidence for the predicted class." ], "outputs": [] }, diff --git a/samples/js/node/notebooks/hello-segmentation.nnb b/samples/js/node/notebooks/hello-segmentation.nnb index a7da34a2799edf..31873f1e1528df 100644 --- a/samples/js/node/notebooks/hello-segmentation.nnb +++ b/samples/js/node/notebooks/hello-segmentation.nnb @@ -3,7 +3,7 @@ { "language": "markdown", "source": [ - "# Hello Image Segmentation\n\nA very basic introduction to using segmentation models with OpenVINO™.\nIn this tutorial, a pre-trained [road-segmentation-adas-0001](https://docs.openvino.ai/2023.0/omz_models_model_road_segmentation_adas_0001.html) model from the [Open Model Zoo](https://github.com/openvinotoolkit/open_model_zoo/) is used. ADAS stands for Advanced Driver Assistance Services. The model recognizes four classes: background, road, curb and mark.\n" + "# Hello Image Segmentation\n\nA very basic introduction to using segmentation models with OpenVINO™.\nIn this tutorial, a pre-trained [road-segmentation-adas-0001](https://github.com/openvinotoolkit/open_model_zoo/tree/master/models/intel/road-segmentation-adas-0001) model from the [Open Model Zoo](https://github.com/openvinotoolkit/open_model_zoo/) is used. ADAS stands for Advanced Driver Assistance Services. The model recognizes four classes: background, road, curb and mark.\n" ], "outputs": [] }, diff --git a/samples/js/node/notebooks/hello-world.nnb b/samples/js/node/notebooks/hello-world.nnb index 83d4ca8bec29f5..4da8eb3b4b334c 100644 --- a/samples/js/node/notebooks/hello-world.nnb +++ b/samples/js/node/notebooks/hello-world.nnb @@ -3,7 +3,7 @@ { "language": "markdown", "source": [ - "# Hello Image Classification\n\nThis basic introduction to OpenVINO™ shows how to do inference with an image classification model.\n\n A pre-trained [MobileNetV3 model](https://docs.openvino.ai/2023.0/omz_models_model_mobilenet_v3_small_1_0_224_tf.html) from [Open Model Zoo](https://github.com/openvinotoolkit/open_model_zoo/) is used in this tutorial. For more information about how OpenVINO IR models are created, refer to the [TensorFlow to OpenVINO](../tensorflow-to-openvino/tensorflow-to-openvino.ipynb) tutorial.\n " + "# Hello Image Classification\n\nThis basic introduction to OpenVINO™ shows how to do inference with an image classification model.\n\n A pre-trained [MobileNetV3 model](https://github.com/openvinotoolkit/open_model_zoo/tree/master/models/public/mobilenet-v3-small-1.0-224-tf) from [Open Model Zoo](https://github.com/openvinotoolkit/open_model_zoo/) is used in this tutorial. For more information about how OpenVINO IR models are created, refer to the [TensorFlow to OpenVINO](../tensorflow-to-openvino/tensorflow-to-openvino.ipynb) tutorial.\n " ], "outputs": [] }, diff --git a/samples/python/benchmark/bert_benchmark/README.md b/samples/python/benchmark/bert_benchmark/README.md index 84ddcba1e598a4..2894c5f33d633b 100644 --- a/samples/python/benchmark/bert_benchmark/README.md +++ b/samples/python/benchmark/bert_benchmark/README.md @@ -1,6 +1,6 @@ # Bert Benchmark Python Sample -This sample demonstrates how to estimate performance of a Bert model using Asynchronous Inference Request API. Unlike [demos](https://docs.openvino.ai/2024/omz_demos.html) this sample doesn't have configurable command line arguments. Feel free to modify sample's source code to try out different options. +This sample demonstrates how to estimate performance of a Bert model using Asynchronous Inference Request API. Unlike [demos](https://github.com/openvinotoolkit/open_model_zoo/tree/master/demos) this sample doesn't have configurable command line arguments. Feel free to modify sample's source code to try out different options. For more detailed information on how this sample works, check the dedicated [article](https://docs.openvino.ai/2024/learn-openvino/openvino-samples/bert-benchmark.html) diff --git a/samples/python/benchmark/sync_benchmark/README.md b/samples/python/benchmark/sync_benchmark/README.md index 4ce1329277b5b8..c7604386625572 100644 --- a/samples/python/benchmark/sync_benchmark/README.md +++ b/samples/python/benchmark/sync_benchmark/README.md @@ -1,19 +1,19 @@ # Sync Benchmark Python Sample -This sample demonstrates how to estimate performance of a model using Synchronous Inference Request API. It makes sense to use synchronous inference only in latency oriented scenarios. Models with static input shapes are supported. Unlike [demos](https://docs.openvino.ai/2024/omz_demos.html) this sample doesn't have other configurable command line arguments. Feel free to modify sample's source code to try out different options. +This sample demonstrates how to estimate performance of a model using Synchronous Inference Request API. It makes sense to use synchronous inference only in latency oriented scenarios. Models with static input shapes are supported. Unlike [demos](https://github.com/openvinotoolkit/open_model_zoo/tree/master/demos) this sample doesn't have other configurable command line arguments. Feel free to modify sample's source code to try out different options. For more detailed information on how this sample works, check the dedicated [article](https://docs.openvino.ai/2024/learn-openvino/openvino-samples/sync-benchmark.html) ## Requirements -| Options | Values | -| ----------------------------| -----------------------------------------------------------------------------------------------------| -| Validated Models | [yolo-v3-tf](https://docs.openvino.ai/2024/omz_models_model_yolo_v3_tf.html), | -| | [face-detection-0200](https://docs.openvino.ai/2024/omz_models_model_face_detection_0200.html) | -| Model Format | OpenVINO™ toolkit Intermediate Representation | -| | (\*.xml + \*.bin), ONNX (\*.onnx) | -| Supported devices | [All](https://docs.openvino.ai/2024/about-openvino/compatibility-and-support/supported-devices.html) | -| Other language realization | [C++](https://docs.openvino.ai/2024/learn-openvino/openvino-samples/sync-benchmark.html) | +| Options | Values | +| ----------------------------| ----------------------------------------------------------------------------------------------------------------------| +| Validated Models | [yolo-v3-tf](https://github.com/openvinotoolkit/open_model_zoo/tree/master/models/public/yolo-v3-tf), | +| | [face-detection-0200](https://github.com/openvinotoolkit/open_model_zoo/tree/master/models/intel/face-detection-0200) | +| Model Format | OpenVINO™ toolkit Intermediate Representation | +| | (\*.xml + \*.bin), ONNX (\*.onnx) | +| Supported devices | [All](https://docs.openvino.ai/2024/about-openvino/compatibility-and-support/supported-devices.html) | +| Other language realization | [C++](https://docs.openvino.ai/2024/learn-openvino/openvino-samples/sync-benchmark.html) | The following Python API is used in the application: diff --git a/samples/python/benchmark/throughput_benchmark/README.md b/samples/python/benchmark/throughput_benchmark/README.md index 1ff02319ade062..5214c1190bb5e9 100644 --- a/samples/python/benchmark/throughput_benchmark/README.md +++ b/samples/python/benchmark/throughput_benchmark/README.md @@ -1,6 +1,6 @@ # Throughput Benchmark Python Sample -This sample demonstrates how to estimate performance of a model using Asynchronous Inference Request API in throughput mode. Unlike [demos](https://docs.openvino.ai/2024/omz_demos.html) this sample doesn't have other configurable command line arguments. Feel free to modify sample's source code to try out different options. +This sample demonstrates how to estimate performance of a model using Asynchronous Inference Request API in throughput mode. Unlike [demos](https://github.com/openvinotoolkit/open_model_zoo/tree/master/demos) this sample doesn't have other configurable command line arguments. Feel free to modify sample's source code to try out different options. The reported results may deviate from what [benchmark_app](https://docs.openvino.ai/2024/learn-openvino/openvino-samples/benchmark-tool.html) reports. One example is model input precision for computer vision tasks. benchmark_app sets uint8, while the sample uses default model precision which is usually float32. @@ -8,14 +8,14 @@ For more detailed information on how this sample works, check the dedicated [art ## Requirements -| Options | Values | -| -------------------------------| -----------------------------------------------------------------------------------------------------| -| Validated Models | [yolo-v3-tf](https://docs.openvino.ai/2024/omz_models_model_yolo_v3_tf.html) | -| | [face-detection-0200](https://docs.openvino.ai/2024/omz_models_model_face_detection_0200.html) | -| Model Format | OpenVINO™ toolkit Intermediate Representation | -| | (\*.xml + \*.bin), ONNX (\*.onnx) | -| Supported devices | [All](https://docs.openvino.ai/2024/about-openvino/compatibility-and-support/supported-devices.html) | -| Other language realization | [C++](https://docs.openvino.ai/2024/learn-openvino/openvino-samples/sync-benchmark.html) | +| Options | Values | +| -------------------------------| -----------------------------------------------------------------------------------------------------------------------| +| Validated Models | [yolo-v3-tf](https://github.com/openvinotoolkit/open_model_zoo/tree/master/models/public/yolo-v3-tf) | +| | [face-detection-0200](https://github.com/openvinotoolkit/open_model_zoo/tree/master/models/intel/face-detection-0200) | +| Model Format | OpenVINO™ toolkit Intermediate Representation | +| | (\*.xml + \*.bin), ONNX (\*.onnx) | +| Supported devices | [All](https://docs.openvino.ai/2024/about-openvino/compatibility-and-support/supported-devices.html) | +| Other language realization | [C++](https://docs.openvino.ai/2024/learn-openvino/openvino-samples/sync-benchmark.html) | The following Python API is used in the application: