Skip to content

Commit

Permalink
Merge branch 'openvinotoolkit:master' into quantile
Browse files Browse the repository at this point in the history
  • Loading branch information
geeky33 authored Jan 28, 2025
2 parents 4b8cc1c + 848a7c3 commit 96f8166
Show file tree
Hide file tree
Showing 299 changed files with 6,317 additions and 3,277 deletions.
3 changes: 1 addition & 2 deletions .github/workflows/debian_10_arm.yml
Original file line number Diff line number Diff line change
Expand Up @@ -113,8 +113,7 @@ jobs:

CPU_Functional_Tests:
name: CPU functional tests
if: ${{ 'false' }} # Ticket: 153289
# if: fromJSON(needs.smart_ci.outputs.affected_components).CPU.test
if: fromJSON(needs.smart_ci.outputs.affected_components).CPU.test
needs: [ Docker, Build, Smart_CI ]
uses: ./.github/workflows/job_cpu_functional_tests.yml
with:
Expand Down
121 changes: 121 additions & 0 deletions .github/workflows/job_keras3_backend.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,121 @@
name: Keras 3 OpenVINO Backend

on:
workflow_call:
inputs:
runner:
description: 'Machine on which the tests would run'
type: string
required: true
container:
description: 'JSON to be converted to the value of the "container" configuration for the job'
type: string
required: false
default: '{"image": null}'
python-version:
description: 'Python version to setup. E.g., "3.11"'
type: string
required: true

permissions: read-all

env:
PIP_CACHE_PATH_LINUX: /mount/caches/pip/linux
PIP_CACHE_PATH_WIN: "C:\\mount\\caches\\pip\\win"

jobs:
Keras3_OpenVINO_Backend:
name: Keras 3 OpenVINO Backend
timeout-minutes: 10
defaults:
run:
shell: ${{ contains(inputs.runner, 'win') && 'pwsh' || 'bash' }}
runs-on: ${{ inputs.runner }}
container: ${{ fromJSON(inputs.container) }}
env:
INSTALL_DIR: ${{ github.workspace }}/install
INSTALL_TEST_DIR: ${{ github.workspace }}/install/tests
INSTALL_WHEELS_DIR: ${{ github.workspace }}/install/wheels
KERAS_REPO: ${{ github.workspace }}/keras_repo
KERAS_HOME: ${{ github.workspace }}/keras_repo/.github/workflows/config/openvino

steps:
# Needed as ${{ github.workspace }} is not working correctly when using Docker
- name: Setup Variables
run: |
echo "INSTALL_DIR=$GITHUB_WORKSPACE/install" >> "$GITHUB_ENV"
echo "INSTALL_TEST_DIR=$GITHUB_WORKSPACE/install/tests" >> "$GITHUB_ENV"
echo "INSTALL_WHEELS_DIR=$GITHUB_WORKSPACE/install/wheels" >> "$GITHUB_ENV"
echo "KERAS_REPO=$GITHUB_WORKSPACE/keras" >> "$GITHUB_ENV"
echo "KERAS_HOME=$GITHUB_WORKSPACE/keras/.github/workflows/config/openvino" >> "$GITHUB_ENV"
- name: Fetch setup_python and install wheels actions
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
timeout-minutes: 15
with:
sparse-checkout: |
.github/actions/setup_python/action.yml
.github/actions/install_ov_wheels/action.yml
sparse-checkout-cone-mode: false
path: 'openvino'

- name: Clone Keras 3 repository
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
timeout-minutes: 15
with:
repository: 'keras-team/keras'
path: ${{ env.KERAS_REPO }}
ref: 'master'

- name: Download OpenVINO artifacts (wheels)
uses: actions/download-artifact@fa0a91b85d4f404e444e00e005971372dc801d16 # v4.1.8
with:
pattern: openvino_[wheels|openvino_tokenizers]*
path: ${{ env.INSTALL_WHEELS_DIR }}
merge-multiple: true

- name: Setup Python ${{ inputs.python-version }}
uses: ./openvino/.github/actions/setup_python
with:
version: ${{ inputs.python-version }}
pip-cache-path: ${{ runner.os == 'Linux' && env.PIP_CACHE_PATH_LINUX || env.PIP_CACHE_PATH_WIN }}
should-setup-pip-paths: ${{ runner.os != 'macOS' }}
self-hosted-runner: ${{ runner.os != 'macOS' }}

- name: Install OpenVINO Python wheels
uses: ./openvino/.github/actions/install_ov_wheels
with:
wheels-dir-path: ${{ env.INSTALL_WHEELS_DIR }}
wheels-to-install: 'openvino openvino_tokenizers'

- name: Install test dependencies
working-directory: ${{ env.KERAS_REPO }}
run: |
pip install -r requirements.txt --upgrade
# make sure that no other keras is installed via pip
pip uninstall -y keras keras-nightly
# manually set keras
echo "PYTHONPATH=$KERAS_REPO:$PYTHONPATH" >> "$GITHUB_ENV"
- name: Test integrations
working-directory: ${{ env.KERAS_REPO }}
run: |
python integration_tests/import_test.py --junitxml=${{ env.INSTALL_TEST_DIR }}/TEST-keras3_integration1.xml
python integration_tests/numerical_test.py --junitxml=${{ env.INSTALL_TEST_DIR }}/TEST-keras3_integration2.xml
- name: Test with pytest
working-directory: ${{ env.KERAS_REPO }}
run: |
IGNORE_FILE="keras/src/backend/openvino/excluded_tests.txt"
IGNORE_ARGS=$(awk '{print "--ignore=" $0}' "$IGNORE_FILE")
pytest keras --ignore keras/src/applications $IGNORE_ARGS --junitxml=${{ env.INSTALL_TEST_DIR }}/TEST-keras3_pytest.xml
- name: Upload Test Results
uses: actions/upload-artifact@65c4c4a1ddee5b72f698fdd19549f0f0fb45cf08 # v4.6.0
if: ${{ !cancelled() }}
with:
name: test-results-keras3-backend
path: |
${{ env.INSTALL_TEST_DIR }}/TEST*.html
${{ env.INSTALL_TEST_DIR }}/TEST*.xml
if-no-files-found: 'warn'
13 changes: 12 additions & 1 deletion .github/workflows/ubuntu_22.yml
Original file line number Diff line number Diff line change
Expand Up @@ -344,6 +344,16 @@ jobs:
affected-components: ${{ needs.smart_ci.outputs.affected_components }}
python-version: '3.11'

Keras3_OpenVINO_Backend:
name: Keras 3 OpenVINO Backend Tests
if: fromJSON(needs.smart_ci.outputs.affected_components).Python_API.test
needs: [ Docker, Build, Smart_CI, Openvino_tokenizers ]
uses: ./.github/workflows/job_keras3_backend.yml
with:
runner: 'aks-linux-4-cores-16gb'
container: '{"image": "${{ fromJSON(needs.docker.outputs.images).ov_test.ubuntu_22_04_x64 }}", "volumes": ["/mount:/mount"]}'
python-version: '3.11'

CPU_Functional_Tests:
name: CPU functional tests
if: fromJSON(needs.smart_ci.outputs.affected_components).CPU.test
Expand Down Expand Up @@ -558,7 +568,8 @@ jobs:
Overall_Status:
name: ci/gha_overall_status
needs: [Smart_CI, Build, Debian_Packages, Samples, Conformance, ONNX_Runtime, CXX_Unit_Tests, Python_Unit_Tests, TensorFlow_Layer_Tests, Pytorch_Layer_Tests,
CPU_Functional_Tests, TensorFlow_Models_Tests_Precommit, PyTorch_Models_Tests, JAX_Models_Tests_Precommit, NVIDIA_Plugin, Openvino_tokenizers, iGPU]
CPU_Functional_Tests, TensorFlow_Models_Tests_Precommit, PyTorch_Models_Tests, JAX_Models_Tests_Precommit, NVIDIA_Plugin, Openvino_tokenizers, iGPU,
Keras3_OpenVINO_Backend]
if: ${{ always() }}
runs-on: ubuntu-latest
steps:
Expand Down
5 changes: 3 additions & 2 deletions src/common/snippets/src/utils/debug_caps_config.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -11,10 +11,11 @@ namespace snippets {
void DebugCapsConfig::readProperties() {
auto readEnv = [](const char* envVar) {
const char* env = std::getenv(envVar);
if (env && *env)
if (env && *env) {
return env;
}

return (const char*)nullptr;
return static_cast<const char*>(nullptr);
};

const char* envVarValue = nullptr;
Expand Down
2 changes: 1 addition & 1 deletion src/core/src/any.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@ namespace ov {

bool util::equal(std::type_index lhs, std::type_index rhs) {
auto result = lhs == rhs;
#if (defined(__ANDROID__) || defined(__APPLE__)) && defined(__clang__)
#if (defined(__ANDROID__) || defined(__APPLE__) || defined(__CHROMIUMOS__)) && defined(__clang__)
if (!result) {
result = std::strcmp(lhs.name(), rhs.name()) == 0;
}
Expand Down
10 changes: 7 additions & 3 deletions src/plugins/intel_cpu/src/.clang-tidy
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,6 @@
### Scopes to be enabled:
#
# cppcoreguidelines-*,
# google-*,
# readability-*,
# modernize-*,
# bugprone-*,
Expand All @@ -26,7 +25,9 @@
# -bugprone-fold-init-type
# -bugprone-implicit-widening-of-multiplication-result
# -cppcoreguidelines-narrowing-conversions
# -google-readability-braces-around-statements
# -google-default-arguments,
# -google-explicit-constructor,
# -google-readability-casting,
# -readability-implicit-bool-conversion,
# -readability-magic-numbers, cppcoreguidelines-avoid-magic-numbers
# -readability-function-cognitive-complexity. Reasonable way to enforce splitting complex code into simple functions
Expand All @@ -35,6 +36,7 @@
Checks: >
-*,
performance-*,
google-*,
modernize-pass-by-value,
cppcoreguidelines-prefer-member-initializer,
-bugprone-easily-swappable-parameters,
Expand All @@ -44,9 +46,11 @@ Checks: >
-cppcoreguidelines-narrowing-conversions,
-cppcoreguidelines-pro-bounds-pointer-arithmetic,
-google-build-using-namespace,
-google-default-arguments,
-google-explicit-constructor,
-google-readability-casting,
-google-readability-todo,
-readability-braces-around-statements,
-google-readability-braces-around-statements,
-modernize-use-trailing-return-type,
-readability-identifier-length,
-readability-implicit-bool-conversion,
Expand Down
46 changes: 26 additions & 20 deletions src/plugins/intel_cpu/src/compiled_model.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -57,8 +57,9 @@ CompiledModel::CompiledModel(const std::shared_ptr<ov::Model>& model,
m_sub_memory_manager(std::move(sub_memory_manager)) {
m_mutex = std::make_shared<std::mutex>();
const auto& core = m_plugin->get_core();
if (!core)
if (!core) {
OPENVINO_THROW("Unable to get API version. Core is unavailable");
}

IStreamsExecutor::Config executor_config;
if (m_cfg.exclusiveAsyncRequests) {
Expand All @@ -81,10 +82,12 @@ CompiledModel::CompiledModel(const std::shared_ptr<ov::Model>& model,
m_callback_executor = m_task_executor;
}

if (m_task_executor)
if (m_task_executor) {
set_task_executor(m_task_executor);
if (m_callback_executor)
}
if (m_callback_executor) {
set_callback_executor(m_callback_executor);
}

int streams = std::max(1, executor_config.get_streams());
std::vector<Task> tasks;
Expand Down Expand Up @@ -208,15 +211,17 @@ std::shared_ptr<ov::IAsyncInferRequest> CompiledModel::create_infer_request() co
}

std::shared_ptr<const ov::Model> CompiledModel::get_runtime_model() const {
if (m_graphs.empty())
if (m_graphs.empty()) {
OPENVINO_THROW("No graph was found");
}

return get_graph()._graph.dump();
}

ov::Any CompiledModel::get_property(const std::string& name) const {
if (m_graphs.empty())
if (m_graphs.empty()) {
OPENVINO_THROW("No graph was found");
}

if (name == ov::loaded_from_cache) {
return m_loaded_from_cache;
Expand Down Expand Up @@ -275,30 +280,30 @@ ov::Any CompiledModel::get_property(const std::string& name) const {
return decltype(ov::model_name)::value_type(modelName);
} else if (name == ov::optimal_number_of_infer_requests) {
const auto streams = config.streamExecutorConfig.get_streams();
return decltype(ov::optimal_number_of_infer_requests)::value_type(
return static_cast<decltype(ov::optimal_number_of_infer_requests)::value_type>(
streams > 0 ? streams : 1); // ov::optimal_number_of_infer_requests has no negative values
} else if (name == ov::num_streams) {
const auto streams = config.streamExecutorConfig.get_streams();
return decltype(ov::num_streams)::value_type(
streams); // ov::num_streams has special negative values (AUTO = -1, NUMA = -2)
} else if (name == ov::inference_num_threads) {
const auto num_threads = config.streamExecutorConfig.get_threads();
return decltype(ov::inference_num_threads)::value_type(num_threads);
return static_cast<decltype(ov::inference_num_threads)::value_type>(num_threads);
} else if (name == ov::enable_profiling.name()) {
const bool perfCount = config.collectPerfCounters;
return decltype(ov::enable_profiling)::value_type(perfCount);
return static_cast<decltype(ov::enable_profiling)::value_type>(perfCount);
} else if (name == ov::hint::inference_precision) {
return decltype(ov::hint::inference_precision)::value_type(config.inferencePrecision);
} else if (name == ov::hint::performance_mode) {
return decltype(ov::hint::performance_mode)::value_type(config.hintPerfMode);
return static_cast<decltype(ov::hint::performance_mode)::value_type>(config.hintPerfMode);
} else if (name == ov::log::level) {
return decltype(ov::log::level)::value_type(config.logLevel);
return static_cast<decltype(ov::log::level)::value_type>(config.logLevel);
} else if (name == ov::hint::enable_cpu_pinning.name()) {
const bool use_pin = config.enableCpuPinning;
return decltype(ov::hint::enable_cpu_pinning)::value_type(use_pin);
return static_cast<decltype(ov::hint::enable_cpu_pinning)::value_type>(use_pin);
} else if (name == ov::hint::enable_cpu_reservation.name()) {
const bool use_reserve = config.enableCpuReservation;
return decltype(ov::hint::enable_cpu_reservation)::value_type(use_reserve);
return static_cast<decltype(ov::hint::enable_cpu_reservation)::value_type>(use_reserve);
} else if (name == ov::hint::scheduling_core_type) {
const auto stream_mode = config.schedulingCoreType;
return stream_mode;
Expand All @@ -307,31 +312,32 @@ ov::Any CompiledModel::get_property(const std::string& name) const {
return distribution_policy;
} else if (name == ov::hint::enable_hyper_threading.name()) {
const bool use_ht = config.enableHyperThreading;
return decltype(ov::hint::enable_hyper_threading)::value_type(use_ht);
return static_cast<decltype(ov::hint::enable_hyper_threading)::value_type>(use_ht);
} else if (name == ov::hint::execution_mode) {
return config.executionMode;
} else if (name == ov::hint::num_requests) {
return decltype(ov::hint::num_requests)::value_type(config.hintNumRequests);
return static_cast<decltype(ov::hint::num_requests)::value_type>(config.hintNumRequests);
} else if (name == ov::execution_devices) {
return decltype(ov::execution_devices)::value_type{m_plugin->get_device_name()};
} else if (name == ov::intel_cpu::denormals_optimization) {
return decltype(ov::intel_cpu::denormals_optimization)::value_type(config.denormalsOptMode ==
Config::DenormalsOptMode::DO_On);
return static_cast<decltype(ov::intel_cpu::denormals_optimization)::value_type>(
config.denormalsOptMode == Config::DenormalsOptMode::DO_On);
} else if (name == ov::intel_cpu::sparse_weights_decompression_rate) {
return decltype(ov::intel_cpu::sparse_weights_decompression_rate)::value_type(
return static_cast<decltype(ov::intel_cpu::sparse_weights_decompression_rate)::value_type>(
config.fcSparseWeiDecompressionRate);
} else if (name == ov::hint::dynamic_quantization_group_size) {
return decltype(ov::hint::dynamic_quantization_group_size)::value_type(config.fcDynamicQuantizationGroupSize);
return static_cast<decltype(ov::hint::dynamic_quantization_group_size)::value_type>(
config.fcDynamicQuantizationGroupSize);
} else if (name == ov::hint::kv_cache_precision) {
return decltype(ov::hint::kv_cache_precision)::value_type(config.kvCachePrecision);
} else if (name == ov::key_cache_precision) {
return decltype(ov::key_cache_precision)::value_type(config.keyCachePrecision);
} else if (name == ov::value_cache_precision) {
return decltype(ov::value_cache_precision)::value_type(config.valueCachePrecision);
} else if (name == ov::key_cache_group_size) {
return decltype(ov::key_cache_group_size)::value_type(config.keyCacheGroupSize);
return static_cast<decltype(ov::key_cache_group_size)::value_type>(config.keyCacheGroupSize);
} else if (name == ov::value_cache_group_size) {
return decltype(ov::value_cache_group_size)::value_type(config.valueCacheGroupSize);
return static_cast<decltype(ov::value_cache_group_size)::value_type>(config.valueCacheGroupSize);
}
OPENVINO_THROW("Unsupported property: ", name);
}
Expand Down
Loading

0 comments on commit 96f8166

Please sign in to comment.