From 65b8bdf892fd26ee956c568267ce967d4f5a475f Mon Sep 17 00:00:00 2001 From: River Li Date: Tue, 5 Dec 2023 22:18:16 +0800 Subject: [PATCH 01/13] [CPU Tests] migrate sub_graph test cases - part 2 (#21379) * [CPU Tests] migrate sub_graph test case - part 2 * remove unused header files --------- Co-authored-by: Vitaliy Urusovskij --- .../src/arm/convert_group_conv.cpp | 45 ++++---- .../src/arm/convert_group_conv1d.cpp | 47 +++----- .../src/arm/convert_reduce_multi_axis.cpp | 60 ++++------ .../subgraph_tests/src/eltwise_caching.cpp | 53 ++++----- .../subgraph_tests/src/eltwise_chain.cpp | 92 +++++++-------- .../subgraph_tests/src/rotary_pos_emb.cpp | 9 +- .../subgraph_tests/src/seq_native_order.cpp | 108 +++++++++--------- .../subgraph_tests/src/static_zero_dims.cpp | 31 +++-- 8 files changed, 212 insertions(+), 233 deletions(-) diff --git a/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/arm/convert_group_conv.cpp b/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/arm/convert_group_conv.cpp index 48aea1512428e5..2a631c344162f2 100644 --- a/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/arm/convert_group_conv.cpp +++ b/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/arm/convert_group_conv.cpp @@ -2,28 +2,14 @@ // SPDX-License-Identifier: Apache-2.0 // -#include -#include -#include -#include -#include -#include -#include -#include "common_test_utils/common_utils.hpp" -#include -#include "functional_test_utils/skip_tests_config.hpp" +#include "common_test_utils/node_builders/group_convolution.hpp" +#include "shared_test_classes/base/ov_subgraph.hpp" #include "test_utils/cpu_test_utils.hpp" -#include "cpp_interfaces/interface/ie_internal_plugin_config.hpp" - -#include "test_utils/cpu_test_utils.hpp" -#include "test_utils/convolution_params.hpp" using namespace CPUTestUtils; -using namespace ov::test; -using namespace ngraph; -using namespace ngraph::helpers; -namespace CPUSubgraphTestsDefinitions { +namespace ov { +namespace test { typedef std::tuple groupConvLayerCPUTestParamsSet; @@ -60,15 +46,23 @@ class GroupConvToConvTransformationCPUTest: public testing::WithParamInterface(ngraph::element::f32, shape)); + inputParams.push_back(std::make_shared(ov::element::f32, shape)); } - conv = builder::makeGroupConvolution(inputParams[0], element::f32, kernelSize, strides, padBegin, padEnd, dilation, - paddingType, numOutChannels, numOfGroups); + conv = utils::make_group_convolution(inputParams[0], + element::f32, + kernelSize, + strides, + padBegin, + padEnd, + dilation, + paddingType, + numOutChannels, + numOfGroups); ResultVector results; - results.push_back(std::make_shared(conv)); + results.push_back(std::make_shared(conv)); - function = std::make_shared(results, inputParams, "groupConvolution"); + function = std::make_shared(results, inputParams, "groupConvolution"); } }; @@ -96,5 +90,6 @@ const auto groupConvTransformationParams = ::testing::Combine(::testing::ValuesI INSTANTIATE_TEST_SUITE_P(smoke_GroupConvToConvTransformationTest, GroupConvToConvTransformationCPUTest, groupConvTransformationParams, GroupConvToConvTransformationCPUTest::getTestCaseName); -} // namespace -} // namespace CPUSubgraphTestsDefinitions +} // namespace +} // namespace test +} // namespace ov diff --git a/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/arm/convert_group_conv1d.cpp b/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/arm/convert_group_conv1d.cpp index 79a21d4c8bd854..9beb3be72f5882 100644 --- a/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/arm/convert_group_conv1d.cpp +++ b/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/arm/convert_group_conv1d.cpp @@ -2,35 +2,23 @@ // SPDX-License-Identifier: Apache-2.0 // -#include -#include -#include -#include -#include -#include -#include #include "common_test_utils/common_utils.hpp" -#include -#include "functional_test_utils/skip_tests_config.hpp" +#include "common_test_utils/node_builders/convolution.hpp" +#include "common_test_utils/node_builders/group_convolution.hpp" +#include "common_test_utils/ov_tensor_utils.hpp" +#include "shared_test_classes/base/ov_subgraph.hpp" #include "test_utils/cpu_test_utils.hpp" -#include "cpp_interfaces/interface/ie_internal_plugin_config.hpp" -#include "test_utils/cpu_test_utils.hpp" -#include "test_utils/convolution_params.hpp" - -using namespace InferenceEngine; using namespace CPUTestUtils; -using namespace ov::test; -using namespace ngraph; -using namespace ngraph::helpers; -namespace CPUSubgraphTestsDefinitions { +namespace ov { +namespace test { -typedef std::tuple conv1dConvertCPUTestParamsSet; +typedef std::tuple conv1dConvertCPUTestParamsSet; -class Conv1dConvertTransformationCPUTest: public testing::WithParamInterface, - virtual public SubgraphBaseTest, public CPUTestsBase { +class Conv1dConvertTransformationCPUTest : public testing::WithParamInterface, + virtual public SubgraphBaseTest, + public CPUTestsBase { public: static std::string getTestCaseName(testing::TestParamInfo obj) { InputShape inputShapes; @@ -65,16 +53,16 @@ class Conv1dConvertTransformationCPUTest: public testing::WithParamInterface(ngraph::element::f32, shape)); + inputParams.push_back(std::make_shared(ov::element::f32, shape)); } switch (convType) { case nodeType::convolution : { - conv = builder::makeConvolution(inputParams[0], element::f32, kernelSize, strides, padBegin, padEnd, dilation, + conv = utils::make_convolution(inputParams[0], element::f32, kernelSize, strides, padBegin, padEnd, dilation, paddingType, numOutChannels); break; } case nodeType::groupConvolution : { - conv = builder::makeGroupConvolution(inputParams[0], element::f32, kernelSize, strides, padBegin, padEnd, dilation, + conv = utils::make_group_convolution(inputParams[0], element::f32, kernelSize, strides, padBegin, padEnd, dilation, paddingType, numOutChannels, numOfGroups); break; } @@ -84,9 +72,9 @@ class Conv1dConvertTransformationCPUTest: public testing::WithParamInterface(conv)); + results.push_back(std::make_shared(conv)); - function = std::make_shared(results, inputParams, "convolution"); + function = std::make_shared(results, inputParams, "convolution"); } }; @@ -132,5 +120,6 @@ const auto groupConvTransformationParams = ::testing::Combine(::testing::ValuesI INSTANTIATE_TEST_SUITE_P(smoke_GroupConvToConvTransformationTest, Conv1dConvertTransformationCPUTest, groupConvTransformationParams, Conv1dConvertTransformationCPUTest::getTestCaseName); -} // namespace -} // namespace CPUSubgraphTestsDefinitions +} // namespace +} // namespace test +} // namespace ov diff --git a/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/arm/convert_reduce_multi_axis.cpp b/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/arm/convert_reduce_multi_axis.cpp index 3bb5a06c7d9f2c..2b837a4fd275c4 100644 --- a/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/arm/convert_reduce_multi_axis.cpp +++ b/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/arm/convert_reduce_multi_axis.cpp @@ -2,35 +2,22 @@ // SPDX-License-Identifier: Apache-2.0 // -#include -#include -#include -#include -#include -#include -#include #include "common_test_utils/common_utils.hpp" -#include -#include "functional_test_utils/skip_tests_config.hpp" +#include "common_test_utils/node_builders/reduce.hpp" +#include "common_test_utils/ov_tensor_utils.hpp" +#include "shared_test_classes/base/ov_subgraph.hpp" #include "test_utils/cpu_test_utils.hpp" -#include "cpp_interfaces/interface/ie_internal_plugin_config.hpp" -#include "test_utils/cpu_test_utils.hpp" -#include "test_utils/convolution_params.hpp" - -using namespace InferenceEngine; using namespace CPUTestUtils; -using namespace ov::test; -using namespace ngraph; -using namespace ngraph::helpers; -namespace CPUSubgraphTestsDefinitions { +namespace ov { +namespace test { -typedef std::tuple< - std::vector, // Axis to reduce order - ngraph::helpers::ReductionType, // Reduce operation type - std::vector // Input shapes -> reduceConvertCPUTestParamsSet; +typedef std::tuple, // Axis to reduce order + ov::test::utils::ReductionType, // Reduce operation type + std::vector // Input shapes + > + reduceConvertCPUTestParamsSet; class reduceTransformationCPUTest: public testing::WithParamInterface, virtual public SubgraphBaseTest, public CPUTestsBase { @@ -38,7 +25,7 @@ class reduceTransformationCPUTest: public testing::WithParamInterface obj) { std::vector inputShapes; std::vector axes; - ReductionType reductionType; + utils::ReductionType reductionType; std::tie(axes, reductionType, inputShapes) = obj.param; std::ostringstream result; @@ -65,18 +52,18 @@ class reduceTransformationCPUTest: public testing::WithParamInterface(ngraph::element::f32, shape)); + params.push_back(std::make_shared(ov::element::f32, shape)); } std::vector shapeAxes; shapeAxes.push_back(axes.size()); - auto reductionAxesNode = std::dynamic_pointer_cast( - std::make_shared(ngraph::element::Type_t::i64, ngraph::Shape(shapeAxes), axes)); + auto reductionAxesNode = std::dynamic_pointer_cast( + std::make_shared(ov::element::Type_t::i64, ov::Shape(shapeAxes), axes)); - const auto reduce = ngraph::builder::makeReduce(params[0], reductionAxesNode, keepDims, reductionType); + const auto reduce = utils::make_reduce(params[0], reductionAxesNode, keepDims, reductionType); function = makeNgraphFunction(ElementType::f32, params, reduce, "Reduce"); } private: - ngraph::helpers::ReductionType reductionType; + utils::ReductionType reductionType; }; TEST_P(reduceTransformationCPUTest, CompareWithRefs) { @@ -88,11 +75,11 @@ namespace { std::vector> inputShapes = { {{{}, {{2, 19, 2, 9}}}} }; -const std::vector reductionTypes = { - ReductionType::Min, - ReductionType::Max, - ReductionType::Sum, - ReductionType::Prod +const std::vector reductionTypes = { + utils::ReductionType::Min, + utils::ReductionType::Max, + utils::ReductionType::Sum, + utils::ReductionType::Prod }; const std::vector> axes = { {0, 1}, @@ -114,5 +101,6 @@ const auto reduceTransformationParams = ::testing::Combine(::testing::ValuesIn(a INSTANTIATE_TEST_SUITE_P(smoke_GroupConvToConvTransformationTest, reduceTransformationCPUTest, reduceTransformationParams, reduceTransformationCPUTest::getTestCaseName); -} // namespace -} // namespace CPUSubgraphTestsDefinitions +} // namespace +} // namespace test +} // namespace ov diff --git a/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/eltwise_caching.cpp b/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/eltwise_caching.cpp index d5f5f3ead3cac9..487235fa8dd15b 100644 --- a/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/eltwise_caching.cpp +++ b/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/eltwise_caching.cpp @@ -32,23 +32,23 @@ // |output| // -------- -#include -#include -#include -#include -#include -#include -#include #include "common_test_utils/common_utils.hpp" -#include -#include "functional_test_utils/skip_tests_config.hpp" +#include "common_test_utils/node_builders/eltwise.hpp" +#include "common_test_utils/ov_tensor_utils.hpp" +#include "ov_models/builders.hpp" +#include "shared_test_classes/base/ov_subgraph.hpp" #include "test_utils/cpu_test_utils.hpp" +#include +#include +#include +#include + using namespace CPUTestUtils; -using ngraph::helpers::EltwiseTypes; -using namespace ov::test; -namespace CPUSubgraphTestsDefinitions { +namespace ov { +namespace test { +using namespace ov::test::utils; using InputShapesTuple = std::tuple< std::vector, // eltwise input shapes @@ -122,7 +122,7 @@ class EltwiseCacheTest : public testing::WithParamInterface& targetInputStaticShapes) override { + void generate_inputs(const std::vector& targetInputStaticShapes) override { inputs.clear(); const auto& funcInputs = function->inputs(); for (size_t i = 0; i < funcInputs.size(); ++i) { @@ -155,31 +155,31 @@ class EltwiseCacheTest : public testing::WithParamInterface> ngraphInputs; + ov::ParameterVector paramVec; + std::vector> inputNodes; for (size_t i = 0; i < inputDynamicShapes.size(); i++) { - ngraphParam.push_back(std::make_shared(inputPrecisions[i], inputDynamicShapes[i])); - ngraphInputs.push_back(ngraphParam.back()); + paramVec.push_back(std::make_shared(inputPrecisions[i], inputDynamicShapes[i])); + inputNodes.push_back(paramVec.back()); } - auto lastNode0 = ngraph::builder::makeEltwise(ngraphParam[0], ngraphParam[1], eltwiseOpTypes[0]); + auto lastNode0 = utils::makeEltwise(paramVec[0], paramVec[1], eltwiseOpTypes[0]); lastNode0->get_rt_info() = getCPUInfo(); - auto lastNode1 = ngraph::builder::makeEltwise(ngraphParam[2], ngraphParam[3], eltwiseOpTypes[1]); + auto lastNode1 = utils::makeEltwise(paramVec[2], paramVec[3], eltwiseOpTypes[1]); lastNode1->get_rt_info() = getCPUInfo(); if (withQuantization) { - lastNode0 = ngraph::builder::makeFakeQuantize(lastNode0, ::ngraph::element::Type(::ngraph::element::Type_t::f32), + lastNode0 = ngraph::builder::makeFakeQuantize(lastNode0, ov::element::Type(ov::element::Type_t::f32), 256, fqInputShapes[0]); - lastNode1 = ngraph::builder::makeFakeQuantize(lastNode1, ::ngraph::element::Type(::ngraph::element::Type_t::f32), + lastNode1 = ngraph::builder::makeFakeQuantize(lastNode1, ov::element::Type(ov::element::Type_t::f32), 256, fqInputShapes[1]); } if (needReshape) { - auto reshapeConstNode = ngraph::builder::makeConstant(::ngraph::element::Type(::ngraph::element::Type_t::i32), + auto reshapeConstNode = ngraph::builder::makeConstant(ov::element::Type(ov::element::Type_t::i32), {reshapeShape.size()}, reshapeShape); - lastNode1 = std::make_shared(lastNode1, reshapeConstNode, false); + lastNode1 = std::make_shared(lastNode1, reshapeConstNode, false); } auto concat = std::make_shared(ov::NodeVector{lastNode0, lastNode1}, 0); - function = std::make_shared(concat, ngraphParam, "eltwise_cache"); + function = std::make_shared(concat, paramVec, "eltwise_cache"); } }; @@ -1328,5 +1328,6 @@ INSTANTIATE_TEST_SUITE_P(smoke_EltwiseCache_7D_dyn, EltwiseCacheTest, ::testing::Values(cpuParams_empty)), EltwiseCacheTest::getTestCaseName); -} // namespace -} // namespace CPUSubgraphTestsDefinitions +} // namespace +} // namespace test +} // namespace ov \ No newline at end of file diff --git a/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/eltwise_chain.cpp b/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/eltwise_chain.cpp index ef20c4ffd0518d..b4bff5634bcac6 100644 --- a/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/eltwise_chain.cpp +++ b/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/eltwise_chain.cpp @@ -6,35 +6,36 @@ #include #include #include -#include -#include -#include + +#include "shared_test_classes/base/ov_subgraph.hpp" +#include "ov_models/builders.hpp" #include "common_test_utils/common_utils.hpp" -#include -#include "functional_test_utils/skip_tests_config.hpp" +#include "common_test_utils/ov_tensor_utils.hpp" #include "test_utils/cpu_test_utils.hpp" +#include "common_test_utils/node_builders/eltwise.hpp" using namespace CPUTestUtils; -using ngraph::helpers::EltwiseTypes; -using namespace ov::test; -namespace CPUSubgraphTestsDefinitions { -typedef std::tuple< - std::vector, // Input shapes - ngraph::helpers::InputLayerType, // Secondary input type - std::vector, // Input precisions - std::vector, // Eltwise operations - bool, // With quantization - std::string // Device name -> EltwiseChainTuple; +namespace ov { +namespace test { +using namespace ov::test::utils; + +typedef std::tuple, // Input shapes + InputLayerType, // Secondary input type + std::vector, // Input precisions + std::vector, // Eltwise operations + bool, // With quantization + std::string // Device name + > + EltwiseChainTuple; class EltwiseChainTest : public testing::WithParamInterface, virtual public SubgraphBaseTest { public: static std::string getTestCaseName(const testing::TestParamInfo &obj) { std::vector inputShapes; - ngraph::helpers::InputLayerType secondaryInputType; + InputLayerType secondaryInputType; std::vector inputPrecisions; std::vector eltwiseOpTypes; bool withQuantization; @@ -65,7 +66,7 @@ class EltwiseChainTest : public testing::WithParamInterface, return results.str(); } - void generate_inputs(const std::vector& targetInputStaticShapes) override { + void generate_inputs(const std::vector& targetInputStaticShapes) override { inputs.clear(); const auto& funcInputs = function->inputs(); for (size_t i = 0; i < funcInputs.size(); ++i) { @@ -81,7 +82,7 @@ class EltwiseChainTest : public testing::WithParamInterface, abs_threshold = 0.1f; std::vector inputShapes; - ngraph::helpers::InputLayerType secondaryInputType; + InputLayerType secondaryInputType; std::vector inputPrecisions; std::vector eltwiseOpTypes; bool withQuantization; @@ -89,27 +90,27 @@ class EltwiseChainTest : public testing::WithParamInterface, init_input_shapes(inputShapes); - ngraph::ParameterVector ngraphParam; - std::vector> ngraphInputs; + ov::ParameterVector paramVec; + std::vector> inputNodes; if (secondaryInputType == ngraph::helpers::InputLayerType::PARAMETER) { for (size_t i = 0; i < inputDynamicShapes.size(); i++) { - ngraphParam.push_back(std::make_shared(inputPrecisions[i], inputDynamicShapes[i])); - ngraphInputs.push_back(ngraphParam.back()); + paramVec.push_back(std::make_shared(inputPrecisions[i], inputDynamicShapes[i])); + inputNodes.push_back(paramVec.back()); } } else { - ngraphParam = ov::ParameterVector {std::make_shared(inputPrecisions[0], inputDynamicShapes.front())}; + paramVec = ov::ParameterVector {std::make_shared(inputPrecisions[0], inputDynamicShapes.front())}; for (size_t i = 1; i < inputPrecisions.size(); i++) { - std::vector ngraphInput1Data(ngraph::shape_size(targetStaticShapes[0][i])); - ngraphInputs.push_back(ngraph::builder::makeConstant(inputPrecisions[i], targetStaticShapes[0][i], - ngraphInput1Data, true)); + std::vector input1Data(ov::shape_size(targetStaticShapes[0][i])); + inputNodes.push_back( + ngraph::builder::makeConstant(inputPrecisions[i], targetStaticShapes[0][i], input1Data, true)); } } if (withQuantization) { - std::vector> eltwiseOps; - eltwiseOps.push_back(ngraph::builder::makeEltwise(ngraphParam[0], ngraphInputs[0], eltwiseOpTypes[0])); + std::vector> eltwiseOps; + eltwiseOps.push_back(makeEltwise(paramVec[0], inputNodes[0], eltwiseOpTypes[0])); for (size_t i = 1; i < eltwiseOpTypes.size() - 1; i++) { - eltwiseOps.push_back(ngraph::builder::makeEltwise(eltwiseOps[eltwiseOps.size() - 1], ngraphInputs[i], eltwiseOpTypes[i])); + eltwiseOps.push_back(makeEltwise(eltwiseOps[eltwiseOps.size() - 1], inputNodes[i], eltwiseOpTypes[i])); } std::vector constShape(targetStaticShapes[0][0].size(), 1); @@ -118,19 +119,19 @@ class EltwiseChainTest : public testing::WithParamInterface, ::ngraph::element::Type(::ngraph::element::Type_t::f32), 256, constShape); - eltwiseOps.push_back(ngraph::builder::makeEltwise(fq, ngraphInputs[eltwiseOpTypes.size() - 1], eltwiseOpTypes[eltwiseOpTypes.size() - 1])); + eltwiseOps.push_back(makeEltwise(fq, inputNodes[eltwiseOpTypes.size() - 1], eltwiseOpTypes[eltwiseOpTypes.size() - 1])); - ngraph::ResultVector results{std::make_shared(eltwiseOps[eltwiseOps.size() - 1])}; - function = std::make_shared(results, ngraphParam, "eltwise_chain_fq"); + ov::ResultVector results{std::make_shared(eltwiseOps[eltwiseOps.size() - 1])}; + function = std::make_shared(results, paramVec, "eltwise_chain_fq"); } else { - std::vector> eltwiseOps; - eltwiseOps.push_back(ngraph::builder::makeEltwise(ngraphParam[0], ngraphInputs[0], eltwiseOpTypes[0])); + std::vector> eltwiseOps; + eltwiseOps.push_back(makeEltwise(paramVec[0], inputNodes[0], eltwiseOpTypes[0])); for (size_t i = 1; i < eltwiseOpTypes.size(); i++) { - eltwiseOps.push_back(ngraph::builder::makeEltwise(eltwiseOps[eltwiseOps.size() - 1], ngraphInputs[i], eltwiseOpTypes[i])); + eltwiseOps.push_back(makeEltwise(eltwiseOps[eltwiseOps.size() - 1], inputNodes[i], eltwiseOpTypes[i])); } - ngraph::ResultVector results{std::make_shared(eltwiseOps[eltwiseOps.size() - 1])}; - function = std::make_shared(results, ngraphParam, "eltwise_chain"); + ov::ResultVector results{std::make_shared(eltwiseOps[eltwiseOps.size() - 1])}; + function = std::make_shared(results, paramVec, "eltwise_chain"); } } }; @@ -141,7 +142,7 @@ TEST_P(EltwiseChainTest, CompareWithRefs) { namespace { -std::vector> inputShapes = { +std::vector> inputShapes = { {{1, 1, 2, 3}, {1, 1, 2, 3}, {1, 1, 2, 3}, {1, 1, 2, 3}}, {{1, 48, 5, 6}, {1, 48, 1, 1}, {1, 48, 5, 6}, {1, 1, 5, 6}}, {{1, 72, 28, 28}, {1, 72, 1, 1}, {1, 72, 1, 1}, {1, 72, 1, 1}}, @@ -166,14 +167,14 @@ std::vector> eltwiseOps = { INSTANTIATE_TEST_SUITE_P(smoke_EltwiseChain, EltwiseChainTest, ::testing::Combine( ::testing::ValuesIn(static_shapes_to_test_representation(inputShapes)), - ::testing::Values(ngraph::helpers::InputLayerType::CONSTANT), + ::testing::Values(InputLayerType::CONSTANT), ::testing::ValuesIn(inputPrecisions), ::testing::ValuesIn(eltwiseOps), ::testing::Values(false), ::testing::Values(ov::test::utils::DEVICE_CPU)), EltwiseChainTest::getTestCaseName); -std::vector> inputShapesFQ = { +std::vector> inputShapesFQ = { {{1, 2, 2, 3}, {1, 2, 2, 3}, {1, 2, 2, 3}, {1, 2, 2, 3}}, {{2, 33, 5, 5}, {2, 33, 5, 5}, {2, 33, 1, 5}, {2, 33, 5, 5}}, {{2, 33, 5, 17}, {2, 33, 5, 17}, {2, 33, 5, 17}, {2, 33, 5, 17}}, @@ -197,7 +198,7 @@ std::vector> inputPrecisionsFQ { INSTANTIATE_TEST_SUITE_P(smoke_EltwiseChainWithFQ, EltwiseChainTest, ::testing::Combine( ::testing::ValuesIn(static_shapes_to_test_representation(inputShapesFQ)), - ::testing::Values(ngraph::helpers::InputLayerType::CONSTANT), + ::testing::Values(InputLayerType::CONSTANT), ::testing::ValuesIn(inputPrecisionsFQ), ::testing::ValuesIn(eltwiseOps), ::testing::Values(true), @@ -455,12 +456,13 @@ std::vector> inputShapes_dyn = { INSTANTIATE_TEST_SUITE_P(smoke_EltwiseChain_dyn, EltwiseChainTest, ::testing::Combine( ::testing::ValuesIn(inputShapes_dyn), - ::testing::Values(ngraph::helpers::InputLayerType::PARAMETER), + ::testing::Values(InputLayerType::PARAMETER), ::testing::ValuesIn(inputPrecisions), ::testing::ValuesIn(eltwiseOps), ::testing::Values(false), ::testing::Values(ov::test::utils::DEVICE_CPU)), EltwiseChainTest::getTestCaseName); -} // namespace -} // namespace CPUSubgraphTestsDefinitions +} // namespace +} // namespace test +} // namespace ov diff --git a/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/rotary_pos_emb.cpp b/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/rotary_pos_emb.cpp index 28ed4371c6262d..c9b367599ab5b7 100644 --- a/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/rotary_pos_emb.cpp +++ b/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/rotary_pos_emb.cpp @@ -22,9 +22,11 @@ using namespace CPUTestUtils; using namespace ov::gen_pattern; -using namespace ov::test; using namespace ov; +namespace ov { +namespace test { + static ov::OutputVector makeCosSinCache(int max_position_embeddings, int rotary_ndims) { std::vector lut_sin(max_position_embeddings * rotary_ndims, 0.0f); std::vector lut_cos(max_position_embeddings * rotary_ndims, 0.0f); @@ -129,8 +131,6 @@ static std::shared_ptr buildROPE_Llama2(const int batch, return std::make_shared(ov::NodeVector{add_Add}, ov::ParameterVector{input, pos_id_end, pos_ids}); } -namespace CPULayerTestsDefinitions { - class RoPECPUTestLlama2 : public SubgraphBaseTest { public: ov::Tensor create_i32_tensor(const ov::Shape& shape, int start, int step = 1) { @@ -328,4 +328,5 @@ TEST_F(RoPECPUTestChatGLM, smoke_CompareWithRefs) { CheckNumberOfNodesWithType(compiledModel, "RoPE", 1); } -} // namespace CPULayerTestsDefinitions +} // namespace test +} // namespace ov diff --git a/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/seq_native_order.cpp b/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/seq_native_order.cpp index 5c8d1d8e2ddcf2..5c20ec3a6440cd 100644 --- a/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/seq_native_order.cpp +++ b/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/seq_native_order.cpp @@ -2,16 +2,19 @@ // SPDX-License-Identifier: Apache-2.0 // +#include "common_test_utils/node_builders/gru_cell.hpp" +#include "common_test_utils/node_builders/lstm_cell.hpp" +#include "common_test_utils/node_builders/rnn_cell.hpp" #include "shared_test_classes/base/ov_subgraph.hpp" -#include "ov_models/builders.hpp" #include "test_utils/cpu_test_utils.hpp" #include "transformations/op_conversions/bidirectional_sequences_decomposition.hpp" #include "transformations/op_conversions/convert_sequences_to_tensor_iterator.hpp" using namespace CPUTestUtils; -using namespace ov::test; +using namespace ov::test::utils; -namespace SubgraphTestsDefinitions { +namespace ov { +namespace test { enum class SEQ_TYPE { GRU, @@ -34,7 +37,7 @@ using SeqParams = std::tuple; // 'sequence_lengths' input type + InputLayerType>; // 'sequence_lengths' input type class SequenceCPUTest : public testing::WithParamInterface, virtual public ov::test::SubgraphBaseTest, public CPUTestsBase { public: @@ -47,7 +50,7 @@ class SequenceCPUTest : public testing::WithParamInterface, virtual p bool linearBeforeReset; ov::op::RecurrentSequenceDirection direction; ElementType netPrecision; - ngraph::helpers::InputLayerType seqInType; + InputLayerType seqInType; std::tie(seqType, hidden_size, input_size, inShapeParams, activations, clip, linearBeforeReset, direction, netPrecision, seqInType) = obj.param; @@ -141,7 +144,7 @@ class SequenceCPUTest : public testing::WithParamInterface, virtual p weightShape.push_back(B_shape); ov::PartialShape seq_len_shape(std::vector{bounds[batch_size_pos]}); - if (seqInType == ngraph::helpers::InputLayerType::PARAMETER) { + if (seqInType == InputLayerType::PARAMETER) { inputDynamicShapes.push_back(seq_len_shape); } else { OPENVINO_ASSERT(seq_len_shape.is_static()); @@ -160,7 +163,7 @@ class SequenceCPUTest : public testing::WithParamInterface, virtual p if (seqType == SEQ_TYPE::LSTM) { currTS.emplace_back(std::vector{bs, numDirections, hidden_size}); } - if (seqInType == ngraph::helpers::InputLayerType::PARAMETER) { + if (seqInType == InputLayerType::PARAMETER) { currTS.emplace_back(std::vector{bs}); } targetStaticShapes.push_back(currTS); @@ -168,7 +171,7 @@ class SequenceCPUTest : public testing::WithParamInterface, virtual p // funciton creation std::vector types(inputDynamicShapes.size(), netPrecision); - if (seqInType == ngraph::helpers::InputLayerType::PARAMETER) { + if (seqInType == InputLayerType::PARAMETER) { types.back() = ElementType::i64; } ov::ParameterVector params; @@ -190,45 +193,45 @@ class SequenceCPUTest : public testing::WithParamInterface, virtual p std::shared_ptr seq_node; if (seqType == SEQ_TYPE::GRU) { - seq_node = ngraph::builder::makeGRU(inputs, - weightShape, - hidden_size, - activations, - {}, - {}, - clip, - linearBeforeReset, - true, - direction, - (seqInType == ngraph::helpers::InputLayerType::CONSTANT ? - ngraph::helpers::SequenceTestsMode::CONVERT_TO_TI_MAX_SEQ_LEN_CONST : - ngraph::helpers::SequenceTestsMode::PURE_SEQ_RAND_SEQ_LEN_PARAM)); + seq_node = utils::make_gru( + inputs, + weightShape, + hidden_size, + activations, + {}, + {}, + clip, + linearBeforeReset, + true, + direction, + (seqInType == InputLayerType::CONSTANT ? SequenceTestsMode::CONVERT_TO_TI_MAX_SEQ_LEN_CONST + : SequenceTestsMode::PURE_SEQ_RAND_SEQ_LEN_PARAM)); } else if (seqType == SEQ_TYPE::LSTM) { - seq_node = ngraph::builder::makeLSTM(inputs, - weightShape, - hidden_size, - activations, - {}, - {}, - clip, - true, - direction, - (seqInType == ngraph::helpers::InputLayerType::CONSTANT ? - ngraph::helpers::SequenceTestsMode::CONVERT_TO_TI_MAX_SEQ_LEN_CONST : - ngraph::helpers::SequenceTestsMode::PURE_SEQ_RAND_SEQ_LEN_PARAM)); + seq_node = utils::make_lstm( + inputs, + weightShape, + hidden_size, + activations, + {}, + {}, + clip, + true, + direction, + (seqInType == InputLayerType::CONSTANT ? SequenceTestsMode::CONVERT_TO_TI_MAX_SEQ_LEN_CONST + : SequenceTestsMode::PURE_SEQ_RAND_SEQ_LEN_PARAM)); } else if (seqType == SEQ_TYPE::RNN) { - seq_node = ngraph::builder::makeRNN(inputs, - weightShape, - hidden_size, - activations, - {}, - {}, - clip, - true, - direction, - (seqInType == ngraph::helpers::InputLayerType::CONSTANT ? - ngraph::helpers::SequenceTestsMode::CONVERT_TO_TI_MAX_SEQ_LEN_CONST : - ngraph::helpers::SequenceTestsMode::PURE_SEQ_RAND_SEQ_LEN_PARAM)); + seq_node = utils::make_rnn( + inputs, + weightShape, + hidden_size, + activations, + {}, + {}, + clip, + true, + direction, + (seqInType == InputLayerType::CONSTANT ? SequenceTestsMode::CONVERT_TO_TI_MAX_SEQ_LEN_CONST + : SequenceTestsMode::PURE_SEQ_RAND_SEQ_LEN_PARAM)); } else { OPENVINO_THROW("Unsupported seq type"); } @@ -254,7 +257,7 @@ class SequenceCPUTest : public testing::WithParamInterface, virtual p const size_t batchSize = targetInputStaticShapes[0][1]; const int64_t maxSeqLen = targetInputStaticShapes[0][0]; - if (seqInType == ngraph::helpers::InputLayerType::PARAMETER) { + if (seqInType == InputLayerType::PARAMETER) { const auto& funcInputs = function->inputs(); const auto& seqLenInput = inputs.find(funcInputs[seqLengthInIdx].get_node_shared_ptr()); if (seqLenInput == inputs.end()) @@ -266,7 +269,7 @@ class SequenceCPUTest : public testing::WithParamInterface, virtual p } private: - ngraph::helpers::InputLayerType seqInType; + InputLayerType seqInType; size_t seqLengthInIdx = 2; }; @@ -326,7 +329,7 @@ INSTANTIATE_TEST_SUITE_P(smoke_SequenceCPUTest_dynamic_lstm_rnn, SequenceCPUTest ::testing::ValuesIn(linearBeforeReset), ::testing::ValuesIn(direction), ::testing::ValuesIn(netPrecisions), - ::testing::Values(ngraph::helpers::InputLayerType::PARAMETER)), + ::testing::Values(InputLayerType::PARAMETER)), SequenceCPUTest::getTestCaseName); INSTANTIATE_TEST_SUITE_P(smoke_SequenceCPUTest_dynamic_gru, SequenceCPUTest, @@ -339,7 +342,7 @@ INSTANTIATE_TEST_SUITE_P(smoke_SequenceCPUTest_dynamic_gru, SequenceCPUTest, ::testing::ValuesIn(linearBeforeReset), ::testing::ValuesIn(direction), ::testing::ValuesIn(netPrecisions), - ::testing::Values(ngraph::helpers::InputLayerType::PARAMETER)), + ::testing::Values(InputLayerType::PARAMETER)), SequenceCPUTest::getTestCaseName); INSTANTIATE_TEST_SUITE_P(smoke_SequenceCPUTest_static_gru, SequenceCPUTest, @@ -352,7 +355,7 @@ INSTANTIATE_TEST_SUITE_P(smoke_SequenceCPUTest_static_gru, SequenceCPUTest, ::testing::ValuesIn(linearBeforeReset), ::testing::ValuesIn(direction), ::testing::ValuesIn(netPrecisions), - ::testing::Values(ngraph::helpers::InputLayerType::CONSTANT)), + ::testing::Values(InputLayerType::CONSTANT)), SequenceCPUTest::getTestCaseName); INSTANTIATE_TEST_SUITE_P(smoke_SequenceCPUTest_static_rnn_lstm, SequenceCPUTest, @@ -365,7 +368,8 @@ INSTANTIATE_TEST_SUITE_P(smoke_SequenceCPUTest_static_rnn_lstm, SequenceCPUTest, ::testing::ValuesIn(linearBeforeReset), ::testing::ValuesIn(direction), ::testing::ValuesIn(netPrecisions), - ::testing::Values(ngraph::helpers::InputLayerType::CONSTANT)), + ::testing::Values(InputLayerType::CONSTANT)), SequenceCPUTest::getTestCaseName); -} // namespace SubgraphTestsDefinitions +} // namespace test +} // namespace ov diff --git a/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/static_zero_dims.cpp b/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/static_zero_dims.cpp index 1072890e51774b..4d304f5d19002d 100644 --- a/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/static_zero_dims.cpp +++ b/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/static_zero_dims.cpp @@ -2,14 +2,12 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "shared_test_classes/base/ov_subgraph.hpp" +#include "common_test_utils/ov_tensor_utils.hpp" #include "ov_models/builders.hpp" -#include -#include "functional_test_utils/skip_tests_config.hpp" - -using namespace ov::test; +#include "shared_test_classes/base/ov_subgraph.hpp" -namespace SubgraphTestsDefinitions { +namespace ov { +namespace test { class StaticZeroDims : public SubgraphBaseTest { protected: @@ -20,26 +18,26 @@ class StaticZeroDims : public SubgraphBaseTest { init_input_shapes({inputShapes}); - auto ngPrc = ngraph::element::f32; + auto ngPrc = ov::element::f32; ov::ParameterVector inputParams; for (auto&& shape : inputDynamicShapes) { inputParams.push_back(std::make_shared(ngPrc, shape)); } - auto splitAxisOp = std::make_shared(ngraph::element::i64, ngraph::Shape{}, std::vector{0}); + auto splitAxisOp = std::make_shared(ov::element::i64, ov::Shape{}, std::vector{0}); std::vector splitLenght = {1, 0, 6}; - auto splitLengthsOp = std::make_shared(ngraph::element::i32, ngraph::Shape{splitLenght.size()}, splitLenght); - auto varSplit = std::make_shared(inputParams[0], splitAxisOp, splitLengthsOp); + auto splitLengthsOp = std::make_shared(ov::element::i32, ov::Shape{splitLenght.size()}, splitLenght); + auto varSplit = std::make_shared(inputParams[0], splitAxisOp, splitLengthsOp); - auto relu1 = std::make_shared(varSplit->output(0)); + auto relu1 = std::make_shared(varSplit->output(0)); auto numInRoi = ngraph::builder::makeConstant(ngPrc, {0}, std::vector{}, false); auto expDet = std::make_shared(varSplit->output(1), numInRoi, 10); - auto relu2 = std::make_shared(expDet); + auto relu2 = std::make_shared(expDet); - auto relu3 = std::make_shared(varSplit->output(2)); + auto relu3 = std::make_shared(varSplit->output(2)); - ngraph::NodeVector results{relu1, relu2, relu3}; - function = std::make_shared(results, inputParams, "StaticZeroDims"); + ov::NodeVector results{relu1, relu2, relu3}; + function = std::make_shared(results, inputParams, "StaticZeroDims"); } void compare(const std::vector &expected, const std::vector &actual) override { @@ -59,4 +57,5 @@ TEST_F(StaticZeroDims, smoke_CompareWithRefs) { run(); } -} // namespace SubgraphTestsDefinitions +} // namespace test +} // namespace ov From bd315f4b6aa29f06cc4e2075130a417686b1eb19 Mon Sep 17 00:00:00 2001 From: River Li Date: Tue, 5 Dec 2023 22:42:47 +0800 Subject: [PATCH 02/13] [CPU Tests] migrate matmul test cases to be api 2.0 (#21332) * [CPU Tests] migrate matmul test cases to be api 2.0 * Update * Handle convert2OutputVector inplace --------- Co-authored-by: Vitaliy Urusovskij --- .../skip_tests_config.cpp | 15 +- .../src/matmul_decompress_convert.cpp | 313 +++++++++--------- .../src/matmul_quantized_subgraph.cpp | 53 +-- .../src/matmul_strided_inputs_outputs.cpp | 52 +-- .../src/matmul_weights_decompression.cpp | 57 ++-- .../functional/test_utils/cpu_test_utils.hpp | 6 +- 6 files changed, 244 insertions(+), 252 deletions(-) diff --git a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/skip_tests_config.cpp b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/skip_tests_config.cpp index 3fcbe9ab2a1ffc..6348e1afccfd40 100644 --- a/src/plugins/intel_cpu/tests/functional/shared_tests_instances/skip_tests_config.cpp +++ b/src/plugins/intel_cpu/tests/functional/shared_tests_instances/skip_tests_config.cpp @@ -3,14 +3,11 @@ // #include "functional_test_utils/skip_tests_config.hpp" - -#include +#include "openvino/runtime/system_conf.hpp" #include #include -#include "ie_parallel.hpp" - std::vector disabledTestPatterns() { std::vector retVector{ // TODO: Issue 31841 @@ -314,7 +311,7 @@ std::vector disabledTestPatterns() { retVector.emplace_back(R"(.*LoadNetworkCompiledKernelsCacheTest.*CanCreateCacheDirAndDumpBinariesUnicodePath.*)"); #endif - if (!InferenceEngine::with_cpu_x86_avx512_core()) { + if (!ov::with_cpu_x86_avx512_core()) { // on platforms which do not support bfloat16, we are disabling bf16 tests since there are no bf16 primitives, // tests are useless on such platforms retVector.emplace_back(R"(.*(BF|bf)16.*)"); @@ -325,7 +322,7 @@ std::vector disabledTestPatterns() { retVector.emplace_back(R"(.*Snippets.*(MatMul|Matmul).*)"); } #if defined(OPENVINO_ARCH_X86) || defined(OPENVINO_ARCH_X86_64) - if (!InferenceEngine::with_cpu_x86_avx512_core_fp16()) { + if (!ov::with_cpu_x86_avx512_core_fp16()) { // Skip fp16 tests for paltforms that don't support fp16 precision retVector.emplace_back(R"(.*INFERENCE_PRECISION_HINT=(F|f)16.*)"); } @@ -339,7 +336,7 @@ std::vector disabledTestPatterns() { R"(.*EltwiseLayerCPUTest.*IS=\(\[1\.\.10\.2\.5\.6\]_\).*eltwiseOpType=SqDiff.*_configItem=INFERENCE_PRECISION_HINT=f16.*)"); # endif // OV_CPU_ARM_ENABLE_FP16 #endif - if (!InferenceEngine::with_cpu_x86_avx512_core_vnni() && !InferenceEngine::with_cpu_x86_avx512_core_amx_int8()) { + if (!ov::with_cpu_x86_avx512_core_vnni() && !ov::with_cpu_x86_avx512_core_amx_int8()) { // MatMul in Snippets uses BRGEMM that supports i8 only on platforms with VNNI or AMX instructions retVector.emplace_back(R"(.*Snippets.*MatMulFQ.*)"); retVector.emplace_back(R"(.*Snippets.*MatMul.*Quantized.*)"); @@ -347,11 +344,11 @@ std::vector disabledTestPatterns() { retVector.emplace_back(R"(.*Snippets.*MHAINT8.*)"); retVector.emplace_back(R"(.*Snippets.*MHAQuant.*)"); } - if (!InferenceEngine::with_cpu_x86_avx512_core_amx_int8()) + if (!ov::with_cpu_x86_avx512_core_amx_int8()) // TODO: Issue 92895 // on platforms which do not support AMX, we are disabling I8 input tests retVector.emplace_back(R"(smoke_LPT/FakeQuantizeWithNotOptimalTransformation.CompareWithRefImpl.*CPU.*i8.*)"); - if (!InferenceEngine::with_cpu_x86_avx512_core_amx_bf16() && !InferenceEngine::with_cpu_x86_bfloat16()) { + if (!ov::with_cpu_x86_avx512_core_amx_bf16() && !ov::with_cpu_x86_bfloat16()) { // ignored for not supported bf16 platforms retVector.emplace_back(R"(.*smoke_Snippets_EnforcePrecision_bf16.*)"); retVector.emplace_back(R"(.*smoke_Snippets_MHAWOTransposeEnforceBF16.*)"); diff --git a/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/matmul_decompress_convert.cpp b/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/matmul_decompress_convert.cpp index b0edba98c1dbff..55197fa04e63ca 100644 --- a/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/matmul_decompress_convert.cpp +++ b/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/matmul_decompress_convert.cpp @@ -2,17 +2,15 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "test_utils/fusing_test_utils.hpp" #include "ov_models/builders.hpp" #include "shared_test_classes/base/ov_subgraph.hpp" +#include "test_utils/fusing_test_utils.hpp" #include "transformations/rt_info/decompression.hpp" -using namespace ngraph; -using namespace InferenceEngine; using namespace CPUTestUtils; -using namespace ov::test; -namespace SubgraphTestsDefinitions { +namespace ov { +namespace test { /* This test checks MatMul weights constant folding on CPU plugin side and cover two optimizations: 1. Decompressing Convert FP16 -> FP32 CF (FuseFCAndConvertOnWeights in cpu graph optimizer) @@ -82,22 +80,21 @@ namespace SubgraphTestsDefinitions { -------- */ -using MatMulDecompressConvertParams = std::tuple< - std::vector, // input shapes - std::pair, // transposeA, transposeB - ElementType, // weights precision - std::map, // additional config - CPUSpecificParams ->; +using MatMulDecompressConvertParams = std::tuple, // input shapes + std::pair, // transposeA, transposeB + ElementType, // weights precision + ov::AnyMap, // additional config + CPUSpecificParams>; class MatMulDecompressConvertTest : public testing::WithParamInterface, - virtual public SubgraphBaseTest, public CPUTestsBase { + virtual public SubgraphBaseTest, + public CPUTestsBase { public: static std::string getTestCaseName(testing::TestParamInfo obj) { std::vector inputShapes; std::pair transpose; ElementType weiElemType; - std::map additionalConfig; + ov::AnyMap additionalConfig; CPUSpecificParams cpuParams; std::tie(inputShapes, transpose, weiElemType, additionalConfig, cpuParams) = obj.param; @@ -124,7 +121,7 @@ class MatMulDecompressConvertTest : public testing::WithParamInterface() << ":"; } result << ")"; @@ -134,14 +131,14 @@ class MatMulDecompressConvertTest : public testing::WithParamInterface - void transposeShape(T& shape) { + template + void transpose_shape(T& shape) { OPENVINO_ASSERT(shape.size() > 1); std::swap(*(shape.end() - 1), *(shape.end() - 2)); } - void CheckFCWeightsPrecision(ElementType expectedWeiElemType) const { - auto getExecValue = [](const ov::Node::RTMap& rtInfo, const std::string ¶mName) -> std::string { + void check_fc_weights_precision(ElementType expectedWeiElemType) const { + auto getExecValue = [](const ov::Node::RTMap& rtInfo, const std::string& paramName) -> std::string { auto it = rtInfo.find(paramName); OPENVINO_ASSERT(rtInfo.end() != it); return it->second.as(); @@ -149,10 +146,11 @@ class MatMulDecompressConvertTest : public testing::WithParamInterfaceget_ops()) { + for (const auto& fcNode : execFunction->get_ops()) { if (getExecValue(fcNode->get_rt_info(), ExecGraphInfoSerialization::LAYER_TYPE) == "FullyConnected") { - const auto &constNode = fcNode->get_input_node_shared_ptr(1); - element::Type expectedType(getExecValue(constNode->get_rt_info(), ExecGraphInfoSerialization::OUTPUT_PRECISIONS)); + const auto& constNode = fcNode->get_input_node_shared_ptr(1); + ov::element::Type expectedType( + getExecValue(constNode->get_rt_info(), ov::exec_model_info::OUTPUT_PRECISIONS)); ASSERT_EQ(expectedType, expectedWeiElemType); } } @@ -164,7 +162,7 @@ class MatMulDecompressConvertTest : public testing::WithParamInterface inputShapes; std::pair transpose; ElementType weiConstElemType; - std::map additionalConfig; + ov::AnyMap additionalConfig; CPUSpecificParams cpuParams; std::tie(inputShapes, transpose, weiConstElemType, additionalConfig, cpuParams) = this->GetParam(); @@ -175,19 +173,21 @@ class MatMulDecompressConvertTest : public testing::WithParamInterfacesecond.as() == ov::element::bf16) { convertOutType = inType = outType = netType = ElementType::bf16; weiConstElemType = (weiConstElemType != ElementType::f32) ? weiConstElemType : ElementType::bf16; } else { @@ -209,9 +210,10 @@ class MatMulDecompressConvertTest : public testing::WithParamInterface(inType, inShapeA)}; - std::shared_ptr inputB = builder::makeConstant(weiConstElemType, inShapeB.get_shape(), {}, true); + std::shared_ptr inputB = + ngraph::builder::makeConstant(weiConstElemType, inShapeB.get_shape(), {}, true); if (weiConstElemType == ElementType::f16) { - inputB = std::make_shared(inputB, convertOutType); + inputB = std::make_shared(inputB, convertOutType); mark_as_decompression(inputB); } expectedWeiConstElemType = weiConstElemType; @@ -221,13 +223,13 @@ class MatMulDecompressConvertTest : public testing::WithParamInterface> transposeParams = { const std::vector> inputShapes2D = { static_shapes_to_test_representation({{2, 3}, {3, 4}}), - { - {{-1, -1}, {{2, 3}, {5, 3}}}, - {{3, 4}, {{3, 4}, {3, 4}}} - }, + {{{-1, -1}, {{2, 3}, {5, 3}}}, {{3, 4}, {{3, 4}, {3, 4}}}}, }; const std::vector> inputShapes3D = { static_shapes_to_test_representation({{2, 2, 3}, {3, 4}}), static_shapes_to_test_representation({{2, 3}, {1, 3, 4}}), static_shapes_to_test_representation({{1, 2, 3}, {1, 3, 4}}), - { - {{-1, -1, -1}, {{2, 2, 3}, {3, 5, 3}}}, - {{3, 4}, {{3, 4}, {3, 4}}} - }, - { - {{-1, -1}, {{2, 3}, {5, 3}}}, - {{1, 3, 4}, {{1, 3, 4}, {1, 3, 4}}} - }, - { - {{-1, -1, -1}, {{1, 2, 3}, {1, 5, 3}}}, - {{1, 3, 4}, {{1, 3, 4}, {1, 3, 4}}} - }, + {{{-1, -1, -1}, {{2, 2, 3}, {3, 5, 3}}}, {{3, 4}, {{3, 4}, {3, 4}}}}, + {{{-1, -1}, {{2, 3}, {5, 3}}}, {{1, 3, 4}, {{1, 3, 4}, {1, 3, 4}}}}, + {{{-1, -1, -1}, {{1, 2, 3}, {1, 5, 3}}}, {{1, 3, 4}, {{1, 3, 4}, {1, 3, 4}}}}, }; -std::map emptyConfig = {/* empty config */}; +ov::AnyMap emptyConfig = {/* empty config */}; -std::vector> filterAdditionalConfig_BF16() { - std::vector> additionalConfig; - if (with_cpu_x86_avx512_core()) { - additionalConfig.push_back({{PluginConfigParams::KEY_ENFORCE_BF16, PluginConfigParams::YES}}); +std::vector filter_additional_config_bf16() { + std::vector additionalConfig; + if (ov::with_cpu_x86_avx512_core()) { + additionalConfig.push_back({{ov::hint::inference_precision(ov::element::bf16)}}); } return additionalConfig; } -std::vector filterSpecificParams(bool trySetMlas) { +std::vector filter_specific_params(bool trySetMlas) { std::vector specificParams; if (trySetMlas) { #ifdef OV_CPU_WITH_MLAS @@ -295,9 +285,9 @@ std::vector filterSpecificParams(bool trySetMlas) { } // try set onednn jit params if we can't or shouldn't use mlas if (specificParams.empty()) { - if (with_cpu_x86_avx512_core()) { + if (ov::with_cpu_x86_avx512_core()) { specificParams.push_back(CPUSpecificParams{{}, {}, {"brgemm_avx512"}, "brgemm_avx512"}); - } else if (with_cpu_x86_avx2()) { + } else if (ov::with_cpu_x86_avx2()) { specificParams.push_back(CPUSpecificParams{{}, {}, {"brgemm_avx2"}, "brgemm_avx2"}); } } @@ -305,84 +295,84 @@ std::vector filterSpecificParams(bool trySetMlas) { return specificParams; } -std::vector filterSpecificParams_BF16() { +std::vector filter_specific_params_bf16() { std::vector specificParams; specificParams.push_back(CPUSpecificParams{{}, {}, {"jit_gemm"}, "jit_gemm"}); return specificParams; } - -const auto testParams2D_FP32_smoke = ::testing::Combine( - ::testing::ValuesIn(inputShapes2D), - ::testing::ValuesIn(transposeParams), - ::testing::Values(ElementType::f32), - ::testing::Values(emptyConfig), - ::testing::ValuesIn(filterSpecificParams(true))); - -INSTANTIATE_TEST_SUITE_P(smoke_FC_2D_FP32, MatMulDecompressConvertTest, testParams2D_FP32_smoke, - MatMulDecompressConvertTest::getTestCaseName); - - -const auto testParams2D_FP16_smoke = ::testing::Combine( - ::testing::ValuesIn(inputShapes2D), - ::testing::ValuesIn(transposeParams), - ::testing::Values(ElementType::f16), - ::testing::Values(emptyConfig), - ::testing::ValuesIn(filterSpecificParams(false))); - -INSTANTIATE_TEST_SUITE_P(smoke_FC_2D_FP16, MatMulDecompressConvertTest, testParams2D_FP16_smoke, - MatMulDecompressConvertTest::getTestCaseName); - - -const auto testParams2D_BF16_smoke = ::testing::Combine( - ::testing::ValuesIn(inputShapes2D), - ::testing::ValuesIn(transposeParams), - ::testing::Values(ElementType::f32, ElementType::f16), - ::testing::ValuesIn(filterAdditionalConfig_BF16()), - ::testing::ValuesIn(filterSpecificParams_BF16())); - -INSTANTIATE_TEST_SUITE_P(smoke_FC_2D_BF16, MatMulDecompressConvertTest, testParams2D_BF16_smoke, - MatMulDecompressConvertTest::getTestCaseName); - - -const auto testParams3D_FP32_smoke = ::testing::Combine( - ::testing::ValuesIn(inputShapes3D), - ::testing::ValuesIn(transposeParams), - ::testing::Values(ElementType::f32), - ::testing::Values(emptyConfig), - ::testing::ValuesIn(filterSpecificParams(true))); - -INSTANTIATE_TEST_SUITE_P(smoke_FC_3D_FP32, MatMulDecompressConvertTest, testParams3D_FP32_smoke, - MatMulDecompressConvertTest::getTestCaseName); - - -const auto testParams3D_FP16_smoke = ::testing::Combine( - ::testing::ValuesIn(inputShapes3D), - ::testing::ValuesIn(transposeParams), - ::testing::Values(ElementType::f16), - ::testing::Values(emptyConfig), - ::testing::ValuesIn(filterSpecificParams(false))); - -INSTANTIATE_TEST_SUITE_P(smoke_FC_3D_FP16, MatMulDecompressConvertTest, testParams3D_FP16_smoke, - MatMulDecompressConvertTest::getTestCaseName); - - -const auto testParams3D_BF16_smoke = ::testing::Combine( - ::testing::ValuesIn(inputShapes3D), - ::testing::ValuesIn(transposeParams), - ::testing::Values(ElementType::f32, ElementType::f16), - ::testing::ValuesIn(filterAdditionalConfig_BF16()), - ::testing::ValuesIn(filterSpecificParams_BF16())); - -INSTANTIATE_TEST_SUITE_P(smoke_FC_3D_BF16, MatMulDecompressConvertTest, testParams3D_BF16_smoke, - MatMulDecompressConvertTest::getTestCaseName); - -} // namespace +const auto testParams2D_FP32_smoke = ::testing::Combine(::testing::ValuesIn(inputShapes2D), + ::testing::ValuesIn(transposeParams), + ::testing::Values(ElementType::f32), + ::testing::Values(emptyConfig), + ::testing::ValuesIn(filter_specific_params(true))); + +INSTANTIATE_TEST_SUITE_P(smoke_FC_2D_FP32, + MatMulDecompressConvertTest, + testParams2D_FP32_smoke, + MatMulDecompressConvertTest::getTestCaseName); + +const auto testParams2D_FP16_smoke = ::testing::Combine(::testing::ValuesIn(inputShapes2D), + ::testing::ValuesIn(transposeParams), + ::testing::Values(ElementType::f16), + ::testing::Values(emptyConfig), + ::testing::ValuesIn(filter_specific_params(false))); + +INSTANTIATE_TEST_SUITE_P(smoke_FC_2D_FP16, + MatMulDecompressConvertTest, + testParams2D_FP16_smoke, + MatMulDecompressConvertTest::getTestCaseName); + +const auto testParams2D_BF16_smoke = ::testing::Combine(::testing::ValuesIn(inputShapes2D), + ::testing::ValuesIn(transposeParams), + ::testing::Values(ElementType::f32, ElementType::f16), + ::testing::ValuesIn(filter_additional_config_bf16()), + ::testing::ValuesIn(filter_specific_params_bf16())); + +INSTANTIATE_TEST_SUITE_P(smoke_FC_2D_BF16, + MatMulDecompressConvertTest, + testParams2D_BF16_smoke, + MatMulDecompressConvertTest::getTestCaseName); + +const auto testParams3D_FP32_smoke = ::testing::Combine(::testing::ValuesIn(inputShapes3D), + ::testing::ValuesIn(transposeParams), + ::testing::Values(ElementType::f32), + ::testing::Values(emptyConfig), + ::testing::ValuesIn(filter_specific_params(true))); + +INSTANTIATE_TEST_SUITE_P(smoke_FC_3D_FP32, + MatMulDecompressConvertTest, + testParams3D_FP32_smoke, + MatMulDecompressConvertTest::getTestCaseName); + +const auto testParams3D_FP16_smoke = ::testing::Combine(::testing::ValuesIn(inputShapes3D), + ::testing::ValuesIn(transposeParams), + ::testing::Values(ElementType::f16), + ::testing::Values(emptyConfig), + ::testing::ValuesIn(filter_specific_params(false))); + +INSTANTIATE_TEST_SUITE_P(smoke_FC_3D_FP16, + MatMulDecompressConvertTest, + testParams3D_FP16_smoke, + MatMulDecompressConvertTest::getTestCaseName); + +const auto testParams3D_BF16_smoke = ::testing::Combine(::testing::ValuesIn(inputShapes3D), + ::testing::ValuesIn(transposeParams), + ::testing::Values(ElementType::f32, ElementType::f16), + ::testing::ValuesIn(filter_additional_config_bf16()), + ::testing::ValuesIn(filter_specific_params_bf16())); + +INSTANTIATE_TEST_SUITE_P(smoke_FC_3D_BF16, + MatMulDecompressConvertTest, + testParams3D_BF16_smoke, + MatMulDecompressConvertTest::getTestCaseName); + +} // namespace /* In case of Convert has 2 or more consumers there is a problem with memory allocation in CPU plug-in (see Edge::init() method). Maybe we can just remove the check (edgePtr->getParent()->isConstant() && !edgePtr->getChild()->isConstant()) and everything will be OK, But this solution should be additionally checked. For now, for these cases we will not be - doing CF on the CPU side and it should be done on the ngraph side. + doing CF on the CPU side and it should be done on the graph side. * Graph before: ------------ ------------ ------------ @@ -422,13 +412,11 @@ INSTANTIATE_TEST_SUITE_P(smoke_FC_3D_BF16, MatMulDecompressConvertTest, testPara |Output| -------- */ -using MatMulDecompressConvertParams2 = std::tuple< - std::vector, // input shapes - std::pair, // transposeA, transposeB - ElementType, // weights precision - std::map, // additional config - CPUSpecificParams ->; +using MatMulDecompressConvertParams2 = std::tuple, // input shapes + std::pair, // transposeA, transposeB + ElementType, // weights precision + ov::AnyMap, // additional property + CPUSpecificParams>; class MatMulDecompressConvertTest2 : public MatMulDecompressConvertTest { protected: @@ -438,7 +426,7 @@ class MatMulDecompressConvertTest2 : public MatMulDecompressConvertTest { std::vector inputShapes; std::pair transpose; ElementType weiConstElemType; - std::map additionalConfig; + ov::AnyMap additionalConfig; CPUSpecificParams cpuParams; std::tie(inputShapes, transpose, weiConstElemType, additionalConfig, cpuParams) = this->GetParam(); @@ -450,23 +438,25 @@ class MatMulDecompressConvertTest2 : public MatMulDecompressConvertTest { bool transpB = transpose.second; fullyConnectedCount = 2; - if (transpA) transposeCount += 2; - if (!transpB) transposeCount++; + if (transpA) + transposeCount += 2; + if (!transpB) + transposeCount++; if (transpA) { - transposeShape(inputDynamicShapes[0]); + transpose_shape(inputDynamicShapes[0]); for (auto& shapes : targetStaticShapes) { - transposeShape(shapes[0]); + transpose_shape(shapes[0]); } - transposeShape(inputDynamicShapes[1]); + transpose_shape(inputDynamicShapes[1]); for (auto& shapes : targetStaticShapes) { - transposeShape(shapes[1]); + transpose_shape(shapes[1]); } } if (transpB) { - transposeShape(inputDynamicShapes[2]); + transpose_shape(inputDynamicShapes[2]); for (auto& shapes : targetStaticShapes) { - transposeShape(shapes[2]); + transpose_shape(shapes[2]); } } @@ -478,7 +468,8 @@ class MatMulDecompressConvertTest2 : public MatMulDecompressConvertTest { ElementType netType = ElementType::f32; ElementType convertOutType = ElementType::f32; - if (additionalConfig[PluginConfigParams::KEY_ENFORCE_BF16] == PluginConfigParams::YES) { + auto it = additionalConfig.find(ov::hint::inference_precision.name()); + if (it != additionalConfig.end() && it->second.as() == ov::element::bf16) { convertOutType = inType = outType = netType = ElementType::bf16; weiConstElemType = (weiConstElemType != ElementType::f32) ? weiConstElemType : ElementType::bf16; } else { @@ -492,12 +483,13 @@ class MatMulDecompressConvertTest2 : public MatMulDecompressConvertTest { for (auto&& shape : {inShapeFC0, inShapeFC1}) { params.push_back(std::make_shared(inType, shape)); } - std::shared_ptr inputWeights = builder::makeConstant(weiConstElemType, inShapeWeights.get_shape(), {}, true); + std::shared_ptr inputWeights = + ngraph::builder::makeConstant(weiConstElemType, inShapeWeights.get_shape(), {}, true); if (weiConstElemType == ElementType::f16) { - inputWeights = std::make_shared(inputWeights, convertOutType); + inputWeights = std::make_shared(inputWeights, convertOutType); mark_as_decompression(inputWeights); } - // In this test, convert must be folded on the ngraph side, so the constant with fp32 precision is expected + // In this test, convert must be folded on the graph side, so the constant with fp32 precision is expected expectedWeiConstElemType = ElementType::f32; auto matMul0 = std::make_shared(params[0], inputWeights, transpA, transpB); @@ -512,21 +504,24 @@ class MatMulDecompressConvertTest2 : public MatMulDecompressConvertTest { TEST_P(MatMulDecompressConvertTest2, CompareWithRefs) { SKIP_IF_CURRENT_TEST_IS_DISABLED(); run(); - CheckExecutionGraph(); + check_execution_graph(); } namespace { -const auto testParams2D_FP16_2_smoke = ::testing::Combine( - ::testing::Values(static_shapes_to_test_representation({{2, 3}, {2, 3}, {3, 4}})), - ::testing::Values(std::pair{false, true}), - ::testing::Values(ElementType::f16), - ::testing::Values(emptyConfig), - ::testing::ValuesIn(filterSpecificParams(true))); +const auto testParams2D_FP16_2_smoke = + ::testing::Combine(::testing::Values(static_shapes_to_test_representation({{2, 3}, {2, 3}, {3, 4}})), + ::testing::Values(std::pair{false, true}), + ::testing::Values(ElementType::f16), + ::testing::Values(emptyConfig), + ::testing::ValuesIn(filter_specific_params(true))); -INSTANTIATE_TEST_SUITE_P(smoke_FC_2D_FP16_2, MatMulDecompressConvertTest2, testParams2D_FP16_2_smoke, - MatMulDecompressConvertTest2::getTestCaseName); +INSTANTIATE_TEST_SUITE_P(smoke_FC_2D_FP16_2, + MatMulDecompressConvertTest2, + testParams2D_FP16_2_smoke, + MatMulDecompressConvertTest2::getTestCaseName); -} // namespace +} // namespace -} // namespace SubgraphTestsDefinitions +} // namespace test +} // namespace ov diff --git a/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/matmul_quantized_subgraph.cpp b/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/matmul_quantized_subgraph.cpp index 2a04bdf843a72a..c5e2a9ec08d903 100644 --- a/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/matmul_quantized_subgraph.cpp +++ b/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/matmul_quantized_subgraph.cpp @@ -6,18 +6,18 @@ #include "test_utils/fusing_test_utils.hpp" #include "ov_models/builders.hpp" #include "common_test_utils/common_utils.hpp" +#include "shared_test_classes/base/ov_subgraph.hpp" #include #include -using namespace ngraph; -using namespace InferenceEngine; using namespace CPUTestUtils; -namespace SubgraphTestsDefinitions { +namespace ov { +namespace test { using ElementType = ov::element::Type_t; -using MatmulBrgemmInt8TestParams = std::tuplef32 // (u8/s8 + s8)->u8/s8 class MatmulBrgemmInt8Test : public testing::WithParamInterface, public CpuTestWithFusing, - virtual public LayerTestsUtils::LayerTestsCommon { + virtual public ov::test::SubgraphBaseStaticTest { public: static std::string getTestCaseName(testing::TestParamInfo obj) { - SizeVector supportedInputShapes; + ov::Shape supportedInputShapes; bool isFC; ElementType inType; ElementType outType; @@ -41,7 +41,7 @@ class MatmulBrgemmInt8Test : public testing::WithParamInterfaceGetParam(); std::tie(inFmts, outFmts, priority, selectedType) = cpuParams; - const auto ngPrec = element::f32; + const auto ngPrec = ov::element::f32; ov::ParameterVector inputParams {std::make_shared(ngPrec, ov::Shape(inShapes))}; - std::shared_ptr fq1; - std::shared_ptr matMul; - std::shared_ptr nodeBeforeConv; + std::shared_ptr fq1; + std::shared_ptr matMul; + std::shared_ptr nodeBeforeConv; selectedType = makeSelectedTypeStr(selectedType, ElementType::i8); if (inType == ElementType::u8) fq1 = ngraph::builder::makeFakeQuantize(inputParams[0], ngPrec, 256, {}, {0.0f}, {2.55f}, {0.0f}, {2.55f}); @@ -74,15 +74,15 @@ class MatmulBrgemmInt8Test : public testing::WithParamInterface{0.0f}, true); auto fq2 = ngraph::builder::makeFakeQuantize(weightsNode, ngPrec, 256, {}, {-1.28f}, {1.27f}, {-1.28f}, {1.27f}); - auto fc = std::make_shared(fq1, fq2, false, false); + auto fc = std::make_shared(fq1, fq2, false, false); fc->get_rt_info() = getCPUInfo(); fc->set_friendly_name(nameMatmul); auto biasWeightsNode = ngraph::builder::makeConstant(ngPrec, {}, std::vector{0.0f}, true); - matMul = std::make_shared(fc, biasWeightsNode); + matMul = std::make_shared(fc, biasWeightsNode); } else { auto fq2 = ngraph::builder::makeFakeQuantize(inputParams[0], ngPrec, 256, {}, {-1.28f}, {1.27f}, {-1.28f}, {1.27f}); matMul = std::make_shared(fq1, fq2, false, true); @@ -98,7 +98,7 @@ class MatmulBrgemmInt8Test : public testing::WithParamInterfacefq->matmul can cover x8*s8->x8 case auto filterWeightsShape = matMul->get_output_shape(0); - auto filterWeightsNode = ngraph::builder::makeConstant(element::f32, filterWeightsShape, std::vector{}, true); + auto filterWeightsNode = ngraph::builder::makeConstant(ov::element::f32, filterWeightsShape, std::vector{}, true); auto fq3 = ngraph::builder::makeFakeQuantize(filterWeightsNode, ngPrec, 256, {}, {-1.28f}, {1.27f}, {-1.28f}, {1.27f}); // only matmul avx2 support s8*s8 input auto matMul2 = std::make_shared(nodeBeforeConv, fq3, false, false); @@ -106,7 +106,7 @@ class MatmulBrgemmInt8Test : public testing::WithParamInterface function, const std::string& nodeName) { + void check_node(std::shared_ptr function, const std::string& nodeName) { ASSERT_NE(nullptr, function); for (const auto &node : function->get_ops()) { const auto & rtInfo = node->get_rt_info(); @@ -127,18 +127,17 @@ class MatmulBrgemmInt8Test : public testing::WithParamInterface supportedInputShapes = { +const std::vector supportedInputShapes = { {16, 32}, {17, 15}, }; @@ -148,7 +147,8 @@ const std::vectormatmulSpecificFilterParams = { {{}, {}, {"jit_gemm"}, "jit_gemm"} }; -INSTANTIATE_TEST_SUITE_P(smoke_matmulBrgemmInt8, MatmulBrgemmInt8Test, +INSTANTIATE_TEST_SUITE_P(smoke_matmulBrgemmInt8, + MatmulBrgemmInt8Test, ::testing::Combine(::testing::ValuesIn(supportedInputShapes), ::testing::ValuesIn({true, false}), ::testing::ValuesIn({ElementType::u8, ElementType::i8}), @@ -156,6 +156,7 @@ INSTANTIATE_TEST_SUITE_P(smoke_matmulBrgemmInt8, MatmulBrgemmInt8Test, ::testing::ValuesIn(matmulSpecificFilterParams)), MatmulBrgemmInt8Test::getTestCaseName); -} // namespace +} // namespace -} // namespace SubgraphTestsDefinitions +} // namespace test +} // namespace ov diff --git a/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/matmul_strided_inputs_outputs.cpp b/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/matmul_strided_inputs_outputs.cpp index 4bf666a1600bd0..ced1ce82c8b7b6 100644 --- a/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/matmul_strided_inputs_outputs.cpp +++ b/src/plugins/intel_cpu/tests/functional/subgraph_tests/src/matmul_strided_inputs_outputs.cpp @@ -2,59 +2,62 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "test_utils/cpu_test_utils.hpp" #include "ov_models/builders.hpp" +#include "shared_test_classes/base/ov_subgraph.hpp" +#include "test_utils/cpu_test_utils.hpp" -using namespace ngraph; -using namespace InferenceEngine; using namespace CPUTestUtils; -namespace SubgraphTestsDefinitions { +namespace ov { +namespace test { -using MatmulStridedInputsOutputsTestParams = Precision; +using MatmulStridedInputsOutputsTestParams = ov::element::Type; class MatmulStridedInputsOutputsTest : public testing::WithParamInterface, public CPUTestsBase, - virtual public LayerTestsUtils::LayerTestsCommon { + virtual public SubgraphBaseStaticTest { public: static std::string getTestCaseName(testing::TestParamInfo obj) { - Precision netPrecision; + ov::element::Type netPrecision; netPrecision = obj.param; std::ostringstream result; - result << "netPRC=" << netPrecision.name() << "_"; + result << "netPRC=" << netPrecision.to_string() << "_"; return result.str(); } protected: void SetUp() override { - targetDevice = ov::test::utils::DEVICE_CPU; - Precision netPrecision; - netPrecision = this->GetParam(); - const auto ngPrec = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision); + targetDevice = utils::DEVICE_CPU; + const auto ngPrec = this->GetParam(); - SizeVector splitShape{1, 2, 1, 16}; + ov::Shape splitShape{1, 2, 1, 16}; ov::ParameterVector splitInputParams {std::make_shared(ngPrec, ov::Shape(splitShape))}; auto split_axis_op = std::make_shared(ov::element::Type_t::i64, ov::Shape{}, std::vector{1}); auto split = std::make_shared(splitInputParams[0], split_axis_op, 2); std::vector concatShapes{{1, 1, 8, 8}, {1, 1, 8, 8}}; - ov::ParameterVector concatInputParams {std::make_shared(ngPrec, concatShapes[0]), - std::make_shared(ngPrec, concatShapes[1])}; - const auto concatOutputNodes = helpers::convert2OutputVector(helpers::castOps2Nodes(concatInputParams)); + ov::ParameterVector concatInputParams{std::make_shared(ngPrec, concatShapes[0]), + std::make_shared(ngPrec, concatShapes[1])}; + ov::OutputVector concatOutputNodes; + for (auto&& node : concatInputParams) { + for (auto&& param : node->outputs()) + concatOutputNodes.push_back(param); + } + const auto concat = std::make_shared(concatOutputNodes, 2); const auto matMul1 = std::make_shared(split->output(0), concat, false, false); - SizeVector matmulShape{1, 1, 16, 8}; + ov::Shape matmulShape{1, 1, 16, 8}; ov::ParameterVector matmulInputParams {std::make_shared(ngPrec, ov::Shape(matmulShape))}; const auto matMul2 = std::make_shared(split->output(1), matmulInputParams[0], false, false); const auto concatMatMuls = std::make_shared(ov::NodeVector{matMul1, matMul2}, 2 /* 3rd axis */); - ngraph::ParameterVector inputParams = {splitInputParams[0], concatInputParams[0], concatInputParams[1], matmulInputParams[0]}; + ov::ParameterVector inputParams = {splitInputParams[0], concatInputParams[0], concatInputParams[1], matmulInputParams[0]}; function = makeNgraphFunction(ngPrec, inputParams, concatMatMuls, "MatmulStridedInputsOutputs"); } }; @@ -84,16 +87,17 @@ class MatmulStridedInputsOutputsTest : public testing::WithParamInterface, // additional config + ov::AnyMap, // additional config fusingSpecificParams, bool>; // should use decompression implementation @@ -73,7 +71,7 @@ class MatmulWeightsDecompression : public testing::WithParamInterface additional_config; + ov::AnyMap additional_config; fusingSpecificParams fusing_params; bool should_fuse; @@ -99,7 +97,7 @@ class MatmulWeightsDecompression : public testing::WithParamInterface() << ":"; } result << ")"; result << CpuTestWithFusing::getTestCaseName(fusing_params); @@ -145,7 +143,7 @@ class MatmulWeightsDecompression : public testing::WithParamInterface(weights_precision, transformed_weights_shape, {}, true, 7); weights->set_friendly_name("Compressed_weights"); - auto weights_convert = std::make_shared(weights, decompression_precision); + auto weights_convert = std::make_shared(weights, decompression_precision); std::shared_ptr mul_parent = weights_convert; auto output_channels = *weights_shape.rbegin(); @@ -166,7 +164,7 @@ class MatmulWeightsDecompression : public testing::WithParamInterface(weights_precision, scaleshift_const_shape, {}, true, 7); - std::shared_ptr shift_convert = std::make_shared(shift_const, decompression_precision); + std::shared_ptr shift_convert = std::make_shared(shift_const, decompression_precision); if (reshape_on_decompression_constant) { auto shift_reshape_const = ov::opset10::Constant::create(ov::element::i32, {scaleshift_target_shape.size()}, scaleshift_target_shape); auto shift_reshape = std::make_shared(shift_convert, shift_reshape_const, false); @@ -234,7 +232,7 @@ class MatmulWeightsDecompression : public testing::WithParamInterface additional_config; + ov::AnyMap additional_config; fusingSpecificParams fusing_params; bool should_fuse; @@ -252,7 +250,7 @@ class MatmulWeightsDecompression : public testing::WithParamInterface(test_param); @@ -290,19 +288,19 @@ class MatmulWeightsDecompression : public testing::WithParamInterface> filterAdditionalConfigBasic() { - std::vector> additional_config = {CPUTestUtils::cpuEmptyPluginConfig}; +std::vector filter_additional_config_basic() { + std::vector additional_config = {CPUTestUtils::empty_plugin_config}; return additional_config; } -std::vector> filterAdditionalConfigAMX() { - std::vector> additional_config = {}; - if (with_cpu_x86_avx512_core_amx()) - additional_config.push_back({{PluginConfigParams::KEY_ENFORCE_BF16, PluginConfigParams::YES}}); +std::vector filter_additional_config_amx() { + std::vector additional_config = {}; + if (ov::with_cpu_x86_avx512_core_amx()) + additional_config.push_back({{ov::hint::inference_precision(ov::element::bf16)}}); return additional_config; } @@ -331,11 +329,7 @@ const std::vector input_shapes_amx = { {{{}, {{11, 339, 577}}}, {577, 335}}, {{{}, {{1, 1, 256}}}, {256, 128}, 64ul}, }; -const std::vector fusing_params { - emptyFusingSpec, - fusingBias, - fusingFakeQuantizePerTensorRelu -}; +const std::vector fusing_params{emptyFusingSpec, fusingBias, fusingFakeQuantizePerTensorRelu}; INSTANTIATE_TEST_SUITE_P(smoke_MatMulCompressedWeights_basic, MatmulWeightsDecompression, @@ -345,7 +339,7 @@ INSTANTIATE_TEST_SUITE_P(smoke_MatMulCompressedWeights_basic, ::testing::Values(true), ::testing::Values(true), ::testing::Values(true), - ::testing::ValuesIn(filterAdditionalConfigBasic()), + ::testing::ValuesIn(filter_additional_config_basic()), ::testing::ValuesIn(fusing_params), ::testing::Values(true)), MatmulWeightsDecompression::getTestCaseName); @@ -358,7 +352,7 @@ INSTANTIATE_TEST_SUITE_P(smoke_MatMulCompressedWeights_amx, ::testing::Values(true), ::testing::Values(true), ::testing::Values(true), - ::testing::ValuesIn(filterAdditionalConfigAMX()), + ::testing::ValuesIn(filter_additional_config_amx()), ::testing::ValuesIn(fusing_params), ::testing::Values(true)), MatmulWeightsDecompression::getTestCaseName); @@ -387,7 +381,7 @@ INSTANTIATE_TEST_SUITE_P(smoke_MatMulCompressedWeights_corner_cases_basic, ::testing::ValuesIn(transpose_weights), ::testing::ValuesIn(add_decompression_sub), ::testing::ValuesIn(reshape_on_decompression), - ::testing::ValuesIn(filterAdditionalConfigBasic()), + ::testing::ValuesIn(filter_additional_config_basic()), ::testing::Values(emptyFusingSpec), ::testing::Values(true)), MatmulWeightsDecompression::getTestCaseName); @@ -400,9 +394,10 @@ INSTANTIATE_TEST_SUITE_P(smoke_MatMulCompressedWeights_corner_cases_amx, ::testing::ValuesIn(transpose_weights), ::testing::ValuesIn(add_decompression_sub), ::testing::ValuesIn(reshape_on_decompression), - ::testing::ValuesIn(filterAdditionalConfigAMX()), + ::testing::ValuesIn(filter_additional_config_amx()), ::testing::Values(emptyFusingSpec), ::testing::Values(true)), MatmulWeightsDecompression::getTestCaseName); -} // namespace -} // namespace SubgraphTestsDefinitions +} // namespace +} // namespace test +} // namespace ov diff --git a/src/plugins/intel_cpu/tests/functional/test_utils/cpu_test_utils.hpp b/src/plugins/intel_cpu/tests/functional/test_utils/cpu_test_utils.hpp index 99299059e28197..a17f0e13303aa4 100644 --- a/src/plugins/intel_cpu/tests/functional/test_utils/cpu_test_utils.hpp +++ b/src/plugins/intel_cpu/tests/functional/test_utils/cpu_test_utils.hpp @@ -155,9 +155,9 @@ class CPUTestsBase { * @param lastNode The last node of the initial graph. * @return The last node of the modified graph. */ - virtual std::shared_ptr modifyGraph(const ov::element::Type &ngPrc, - ov::ParameterVector ¶ms, - const std::shared_ptr &lastNode); + virtual std::shared_ptr modifyGraph(const ov::element::Type& ngPrc, + ov::ParameterVector& params, + const std::shared_ptr& lastNode); virtual bool primTypeCheck(std::string primType) const; From ea1ffbaf7f6eb9f343837ac7a6dc3e4774f4cd71 Mon Sep 17 00:00:00 2001 From: Fang Xu Date: Wed, 6 Dec 2023 02:52:15 +0800 Subject: [PATCH 03/13] [CPU] Output correct streams and threads number (#21421) --- src/plugins/intel_cpu/src/config.cpp | 2 -- 1 file changed, 2 deletions(-) diff --git a/src/plugins/intel_cpu/src/config.cpp b/src/plugins/intel_cpu/src/config.cpp index df2304c50e8583..53b2779936b7b3 100644 --- a/src/plugins/intel_cpu/src/config.cpp +++ b/src/plugins/intel_cpu/src/config.cpp @@ -378,8 +378,6 @@ void Config::updateProperties() { _config.insert({ov::device::id.name(), device_id}); - _config.insert({ov::num_streams.name(), std::to_string(streamExecutorConfig._streams)}); - _config.insert({ov::inference_num_threads.name(), std::to_string(streamExecutorConfig._threads)}); _config.insert({ov::hint::performance_mode.name(), ov::util::to_string(hintPerfMode)}); _config.insert({ov::hint::num_requests.name(), std::to_string(hintNumRequests)}); From 052021648132eae40f2a9a4e3a87640424989501 Mon Sep 17 00:00:00 2001 From: Wanglei Shen Date: Wed, 6 Dec 2023 11:53:58 +0800 Subject: [PATCH 04/13] fix streams calculation issue for latency mode with big threads input (#21437) * fix streams calculation issue for latency mode with big threads input * update for typo * update for typo * add test cases * update for comments --- .../intel_cpu/src/cpu_streams_calculation.cpp | 113 ++++++------- .../streams_info/streams_info_table_test.cpp | 148 ++++++++++++++++-- 2 files changed, 185 insertions(+), 76 deletions(-) diff --git a/src/plugins/intel_cpu/src/cpu_streams_calculation.cpp b/src/plugins/intel_cpu/src/cpu_streams_calculation.cpp index 0d22db1546118e..ea8ba981dc1992 100644 --- a/src/plugins/intel_cpu/src/cpu_streams_calculation.cpp +++ b/src/plugins/intel_cpu/src/cpu_streams_calculation.cpp @@ -146,68 +146,62 @@ std::vector> get_streams_info_table(const int input_streams, } if (((input_streams_changed == false) && - (input_perf_hint == ov::util::to_string(ov::hint::PerformanceMode::LATENCY)) && - ((latencyThreadingMode == Config::LatencyThreadingMode::PER_PLATFORM) || (proc_type_table.size() == 1))) || + (input_perf_hint == ov::util::to_string(ov::hint::PerformanceMode::LATENCY))) || ((input_streams_changed == true) && (input_streams == 1))) { - n_streams = 1; - if ((proc_type_table.size() == 1) && (input_threads == 0) && (model_prefer_threads > 0)) { - stream_info[NUMBER_OF_STREAMS] = n_streams; - if ((model_prefer_threads == proc_type_table[0][MAIN_CORE_PROC]) && - (proc_type_table[0][MAIN_CORE_PROC] > 0)) { - stream_info[PROC_TYPE] = MAIN_CORE_PROC; - n_threads_per_stream = proc_type_table[0][MAIN_CORE_PROC] + proc_type_table[0][HYPER_THREADING_PROC]; - stream_info[THREADS_PER_STREAM] = n_threads_per_stream; - update_ids_method(proc_type_table[0]); - } else if (proc_type_table[0][MAIN_CORE_PROC] == 0) { - stream_info[PROC_TYPE] = EFFICIENT_CORE_PROC; - n_threads_per_stream = proc_type_table[0][EFFICIENT_CORE_PROC]; - stream_info[THREADS_PER_STREAM] = n_threads_per_stream; - update_ids_method(proc_type_table[0]); + if (input_threads > 0) { + n_streams = 1; + n_threads_per_stream = std::min(input_threads, proc_type_table[0][ALL_PROC]); + if (proc_type_table.size() == 1) { + if ((n_threads_per_stream > proc_type_table[0][MAIN_CORE_PROC]) && + (proc_type_table[0][MAIN_CORE_PROC] > 0)) { + stream_info[PROC_TYPE] = ALL_PROC; + } + } + } else if (((input_streams_changed == false) && + (latencyThreadingMode == Config::LatencyThreadingMode::PER_PLATFORM)) || + (proc_type_table.size() == 1) || ((input_streams_changed == true) && (input_streams == 1))) { + n_streams = 1; + if ((proc_type_table.size() == 1) && (model_prefer_threads > 0)) { + stream_info[NUMBER_OF_STREAMS] = n_streams; + if ((model_prefer_threads == proc_type_table[0][MAIN_CORE_PROC]) && + (proc_type_table[0][MAIN_CORE_PROC] > 0)) { + stream_info[PROC_TYPE] = MAIN_CORE_PROC; + n_threads_per_stream = + proc_type_table[0][MAIN_CORE_PROC] + proc_type_table[0][HYPER_THREADING_PROC]; + stream_info[THREADS_PER_STREAM] = n_threads_per_stream; + update_ids_method(proc_type_table[0]); + } else if (proc_type_table[0][MAIN_CORE_PROC] == 0) { + stream_info[PROC_TYPE] = EFFICIENT_CORE_PROC; + n_threads_per_stream = proc_type_table[0][EFFICIENT_CORE_PROC]; + stream_info[THREADS_PER_STREAM] = n_threads_per_stream; + update_ids_method(proc_type_table[0]); + } else { + stream_info[PROC_TYPE] = ALL_PROC; + n_threads_per_stream = proc_type_table[0][ALL_PROC]; + } } else { - stream_info[PROC_TYPE] = ALL_PROC; n_threads_per_stream = proc_type_table[0][ALL_PROC]; } - } else { - n_threads_per_stream = input_threads > 0 ? std::min(input_threads, proc_type_table[0][ALL_PROC]) - : proc_type_table[0][ALL_PROC]; - if ((proc_type_table.size() == 1) && (n_threads_per_stream > proc_type_table[0][MAIN_CORE_PROC]) && - (proc_type_table[0][MAIN_CORE_PROC] > 0)) { - stream_info[PROC_TYPE] = ALL_PROC; + } else if ((input_streams_changed == false) && + (latencyThreadingMode == Config::LatencyThreadingMode::PER_SOCKET)) { + for (auto& row : proc_socket_table) { + n_threads_per_stream = std::max(n_threads_per_stream, row[ALL_PROC]); } - } - } else if ((input_streams_changed == false) && - (input_perf_hint == ov::util::to_string(ov::hint::PerformanceMode::LATENCY)) && - (latencyThreadingMode == Config::LatencyThreadingMode::PER_SOCKET)) { - for (auto& row : proc_socket_table) { - n_threads_per_stream = std::max(n_threads_per_stream, row[ALL_PROC]); - } - n_threads_per_stream = input_threads > 0 ? std::min(input_threads, n_threads_per_stream) : n_threads_per_stream; - for (auto& row : proc_socket_table) { - if (n_threads_per_stream <= row[ALL_PROC]) { - n_streams++; + for (auto& row : proc_socket_table) { + if (n_threads_per_stream <= row[ALL_PROC]) { + n_streams++; + } } - } - n_streams = input_threads > 0 ? static_cast(input_threads / n_threads_per_stream) : n_streams; - n_streams = input_infer_requests > 0 ? std::min(input_infer_requests, n_streams) : n_streams; - } else if ((input_streams_changed == false) && - (input_perf_hint == ov::util::to_string(ov::hint::PerformanceMode::LATENCY)) && - (latencyThreadingMode == Config::LatencyThreadingMode::PER_NUMA_NODE)) { - if (proc_type_table.size() == 1) { - n_streams = 1; - n_threads_per_stream = input_threads > 0 ? std::min(input_threads, proc_type_table[0][ALL_PROC]) - : proc_type_table[0][ALL_PROC]; + n_streams = input_infer_requests > 0 ? std::min(input_infer_requests, n_streams) : n_streams; } else { for (size_t i = 1; i < proc_type_table.size(); i++) { n_threads_per_stream = std::max(n_threads_per_stream, proc_type_table[i][ALL_PROC]); } - n_threads_per_stream = - input_threads > 0 ? std::min(input_threads, n_threads_per_stream) : n_threads_per_stream; for (size_t i = 1; i < proc_type_table.size(); i++) { if (n_threads_per_stream <= proc_type_table[i][ALL_PROC]) { n_streams++; } } - n_streams = input_threads > 0 ? static_cast(input_threads / n_threads_per_stream) : n_streams; n_streams = input_infer_requests > 0 ? std::min(input_infer_requests, n_streams) : n_streams; } } else { @@ -305,35 +299,20 @@ std::vector> get_streams_info_table(const int input_streams, } } - if (total_streams == n_streams) { + if ((total_streams == n_streams) && (input_threads == 0)) { if (proc_type_table.size() == 1) { if (proc_type_table[0][ALL_PROC] >= stream_info[THREADS_PER_STREAM]) { update_mix_stream_info(proc_type_table[0], proc_type_table); n_streams--; } } else { - for (size_t n_node = 1; (n_node < proc_type_table.size()) && (n_streams > 0); n_node++) { - if (proc_type_table[n_node][ALL_PROC] >= stream_info[THREADS_PER_STREAM]) { - update_mix_stream_info(proc_type_table[n_node], proc_type_table); + for (size_t n_node = 0; (n_node < proc_socket_table.size()) && (n_streams > 0); n_node++) { + if (proc_socket_table[n_node][ALL_PROC] >= stream_info[THREADS_PER_STREAM]) { + update_mix_stream_info(proc_socket_table[n_node], proc_type_table); n_streams--; } } } - for (size_t n_node = 0; (n_node < proc_socket_table.size()) && (n_streams > 0); n_node++) { - if (proc_socket_table[n_node][ALL_PROC] >= stream_info[THREADS_PER_STREAM]) { - update_mix_stream_info(proc_socket_table[n_node], proc_type_table); - n_streams--; - } - } - } - - if (total_streams == n_streams) { - for (size_t n_node = 0; (n_node < proc_socket_table.size()) && (n_streams > 0); n_node++) { - if (proc_socket_table[n_node][ALL_PROC] >= stream_info[THREADS_PER_STREAM]) { - update_mix_stream_info(proc_socket_table[n_node], proc_type_table); - n_streams--; - } - } } if (total_streams == n_streams) { @@ -389,7 +368,7 @@ std::vector> get_streams_info_table(const int input_streams, } } else { if (stream_info[PROC_TYPE] == ALL_PROC) { - update_mix_stream_info(proc_socket_table[0], proc_type_table); + update_mix_stream_info(proc_type_table[0], proc_type_table); } else if (stream_info[PROC_TYPE] == MAIN_CORE_PROC) { if (stream_info[THREADS_PER_STREAM] == proc_socket_table[0][MAIN_CORE_PROC]) { streams_info_table.push_back(stream_info); diff --git a/src/plugins/intel_cpu/tests/unit/streams_info/streams_info_table_test.cpp b/src/plugins/intel_cpu/tests/unit/streams_info/streams_info_table_test.cpp index 61efd3ec2e1b10..3b68ef44d693fc 100644 --- a/src/plugins/intel_cpu/tests/unit/streams_info/streams_info_table_test.cpp +++ b/src/plugins/intel_cpu/tests/unit/streams_info/streams_info_table_test.cpp @@ -222,6 +222,43 @@ StreamsCalculationTestCase _2sockets_104cores_latency_socket_7 = { {0, MAIN_CORE_PROC, 26, 2, 1}, {0, MAIN_CORE_PROC, 26, 3, 1}}, }; +StreamsCalculationTestCase _2sockets_104cores_latency_socket_8 = { + 1, + false, + 208, + 0, + 0, + "LATENCY", + ov::intel_cpu::Config::LatencyThreadingMode::PER_SOCKET, + {{208, 104, 0, 104, -1, -1}, {104, 52, 0, 52, 0, 0}, {104, 52, 0, 52, 1, 1}}, + {{1, ALL_PROC, 208, -1, -1}, + {0, MAIN_CORE_PROC, 52, 0, 0}, + {0, MAIN_CORE_PROC, 52, 1, 1}, + {0, HYPER_THREADING_PROC, 52, 0, 0}, + {0, HYPER_THREADING_PROC, 52, 1, 1}}, +}; +StreamsCalculationTestCase _2sockets_104cores_latency_socket_9 = { + 1, + false, + 104, + 0, + 0, + "LATENCY", + ov::intel_cpu::Config::LatencyThreadingMode::PER_SOCKET, + {{208, 104, 0, 104, -1, -1}, {104, 52, 0, 52, 0, 0}, {104, 52, 0, 52, 1, 1}}, + {{1, ALL_PROC, 104, -1, -1}, {0, MAIN_CORE_PROC, 52, 0, 0}, {0, MAIN_CORE_PROC, 52, 1, 1}}, +}; +StreamsCalculationTestCase _2sockets_104cores_latency_socket_10 = { + 1, + false, + 52, + 0, + 0, + "LATENCY", + ov::intel_cpu::Config::LatencyThreadingMode::PER_SOCKET, + {{208, 104, 0, 104, -1, -1}, {104, 52, 0, 52, 0, 0}, {104, 52, 0, 52, 1, 1}}, + {{1, MAIN_CORE_PROC, 52, 0, 0}}, +}; StreamsCalculationTestCase _2sockets_104cores_latency_node_1 = { 1, false, @@ -262,17 +299,17 @@ StreamsCalculationTestCase _2sockets_104cores_latency_node_3 = { {52, 26, 0, 26, 1, 0}, {52, 26, 0, 26, 2, 1}, {52, 26, 0, 26, 3, 1}}, - {{1, ALL_PROC, 52, 0, 0}, + {{1, ALL_PROC, 52, -1, 0}, {0, MAIN_CORE_PROC, 26, 0, 0}, - {0, HYPER_THREADING_PROC, 26, 0, 0}, - {1, ALL_PROC, 52, 1, 0}, {0, MAIN_CORE_PROC, 26, 1, 0}, - {0, HYPER_THREADING_PROC, 26, 1, 0}, - {1, ALL_PROC, 52, 2, 1}, + {1, ALL_PROC, 52, -1, 1}, {0, MAIN_CORE_PROC, 26, 2, 1}, - {0, HYPER_THREADING_PROC, 26, 2, 1}, - {1, ALL_PROC, 52, 3, 1}, {0, MAIN_CORE_PROC, 26, 3, 1}, + {1, ALL_PROC, 52, -1, 0}, + {0, HYPER_THREADING_PROC, 26, 0, 0}, + {0, HYPER_THREADING_PROC, 26, 1, 0}, + {1, ALL_PROC, 52, -1, 1}, + {0, HYPER_THREADING_PROC, 26, 2, 1}, {0, HYPER_THREADING_PROC, 26, 3, 1}}, }; StreamsCalculationTestCase _2sockets_104cores_latency_node_4 = { @@ -304,6 +341,43 @@ StreamsCalculationTestCase _2sockets_104cores_latency_node_5 = { {0, MAIN_CORE_PROC, 26, 2, 1}, {0, MAIN_CORE_PROC, 26, 3, 1}}, }; +StreamsCalculationTestCase _2sockets_104cores_latency_node_6 = { + 1, + false, + 104, + 0, + 0, + "LATENCY", + ov::intel_cpu::Config::LatencyThreadingMode::PER_NUMA_NODE, + {{104, 104, 0, 0, -1, -1}, {26, 26, 0, 0, 0, 0}, {26, 26, 0, 0, 1, 0}, {26, 26, 0, 0, 2, 1}, {26, 26, 0, 0, 3, 1}}, + {{1, ALL_PROC, 104, -1, -1}, + {0, MAIN_CORE_PROC, 26, 0, 0}, + {0, MAIN_CORE_PROC, 26, 1, 0}, + {0, MAIN_CORE_PROC, 26, 2, 1}, + {0, MAIN_CORE_PROC, 26, 3, 1}}, +}; +StreamsCalculationTestCase _2sockets_104cores_latency_node_7 = { + 1, + false, + 52, + 0, + 0, + "LATENCY", + ov::intel_cpu::Config::LatencyThreadingMode::PER_NUMA_NODE, + {{104, 104, 0, 0, -1, -1}, {26, 26, 0, 0, 0, 0}, {26, 26, 0, 0, 1, 0}, {26, 26, 0, 0, 2, 1}, {26, 26, 0, 0, 3, 1}}, + {{1, ALL_PROC, 52, -1, -1}, {0, MAIN_CORE_PROC, 26, 0, 0}, {0, MAIN_CORE_PROC, 26, 1, 0}}, +}; +StreamsCalculationTestCase _2sockets_104cores_latency_node_8 = { + 1, + false, + 26, + 0, + 0, + "LATENCY", + ov::intel_cpu::Config::LatencyThreadingMode::PER_NUMA_NODE, + {{104, 104, 0, 0, -1, -1}, {26, 26, 0, 0, 0, 0}, {26, 26, 0, 0, 1, 0}, {26, 26, 0, 0, 2, 1}, {26, 26, 0, 0, 3, 1}}, + {{1, MAIN_CORE_PROC, 26, 0, 0}}, +}; StreamsCalculationTestCase _2sockets_104cores_latency_1 = { 1, false, @@ -705,7 +779,17 @@ StreamsCalculationTestCase _2sockets_48cores_latency_1 = { {{48, 48, 0, 0, -1, -1}, {24, 24, 0, 0, 0, 0}, {24, 24, 0, 0, 1, 1}}, {{1, ALL_PROC, 48, -1, -1}, {0, MAIN_CORE_PROC, 24, 0, 0}, {0, MAIN_CORE_PROC, 24, 1, 1}}, }; - +StreamsCalculationTestCase _2sockets_48cores_latency_2 = { + 1, + false, + 96, + 0, + 0, + "LATENCY", + ov::intel_cpu::Config::LatencyThreadingMode::PER_SOCKET, + {{48, 48, 0, 0, -1, -1}, {24, 24, 0, 0, 0, 0}, {24, 24, 0, 0, 1, 1}}, + {{1, ALL_PROC, 48, -1, -1}, {0, MAIN_CORE_PROC, 24, 0, 0}, {0, MAIN_CORE_PROC, 24, 1, 1}}, +}; StreamsCalculationTestCase _2sockets_48cores_tput_1 = { 1, false, @@ -754,6 +838,18 @@ StreamsCalculationTestCase _2sockets_48cores_tput_4 = { {{2, MAIN_CORE_PROC, 10, 0, 0}}, }; +StreamsCalculationTestCase _2sockets_20cores_tput_1 = { + 1, + false, + 0, + 0, + 0, + "THROUGHPUT", + ov::intel_cpu::Config::LatencyThreadingMode::PER_PLATFORM, + {{20, 20, 0, 0, -1, -1}, {10, 10, 0, 0, 0, 0}, {10, 10, 0, 0, 1, 1}}, + {{2, MAIN_CORE_PROC, 5, 0, 0}, {2, MAIN_CORE_PROC, 5, 1, 1}}, +}; + StreamsCalculationTestCase _1sockets_14cores_latency_1 = { 1, false, @@ -1747,6 +1843,30 @@ StreamsCalculationTestCase _1sockets_mock_tput_1 = { {{6, MAIN_CORE_PROC, 1, 0, 0}, {3, EFFICIENT_CORE_PROC, 2, 0, 0}, {3, HYPER_THREADING_PROC, 1, 0, 0}}, }; +StreamsCalculationTestCase _1sockets_mock_tput_2 = { + 1, + false, + 0, + 0, + 0, + "THROUGHPUT", + ov::intel_cpu::Config::LatencyThreadingMode::PER_PLATFORM, + {{27, 27, 0, 0, -1, -1}, {19, 19, 0, 0, 0, 0}, {8, 8, 0, 0, 1, 1}}, + {{4, MAIN_CORE_PROC, 4, 0, 0}, {2, MAIN_CORE_PROC, 4, 1, 1}}, +}; + +StreamsCalculationTestCase _1sockets_mock_tput_3 = { + 1, + false, + 0, + 0, + 0, + "THROUGHPUT", + ov::intel_cpu::Config::LatencyThreadingMode::PER_PLATFORM, + {{19, 19, 0, 0, -1, -1}, {11, 11, 0, 0, 0, 0}, {8, 8, 0, 0, 1, 1}}, + {{5, MAIN_CORE_PROC, 2, 0, 0}, {4, MAIN_CORE_PROC, 2, 1, 1}}, +}; + TEST_P(StreamsCalculationTests, StreamsCalculation) {} INSTANTIATE_TEST_SUITE_P(StreamsInfoTable, @@ -1762,11 +1882,17 @@ INSTANTIATE_TEST_SUITE_P(StreamsInfoTable, _2sockets_104cores_latency_socket_5, _2sockets_104cores_latency_socket_6, _2sockets_104cores_latency_socket_7, + _2sockets_104cores_latency_socket_8, + _2sockets_104cores_latency_socket_9, + _2sockets_104cores_latency_socket_10, _2sockets_104cores_latency_node_1, _2sockets_104cores_latency_node_2, _2sockets_104cores_latency_node_3, _2sockets_104cores_latency_node_4, _2sockets_104cores_latency_node_5, + _2sockets_104cores_latency_node_6, + _2sockets_104cores_latency_node_7, + _2sockets_104cores_latency_node_8, _2sockets_104cores_latency_1, _2sockets_104cores_latency_2, _2sockets_104cores_latency_3, @@ -1796,10 +1922,12 @@ INSTANTIATE_TEST_SUITE_P(StreamsInfoTable, _2sockets_104cores_tput_19, _2sockets_104cores_tput_20, _2sockets_48cores_latency_1, + _2sockets_48cores_latency_2, _2sockets_48cores_tput_1, _2sockets_48cores_tput_2, _2sockets_48cores_tput_3, _2sockets_48cores_tput_4, + _2sockets_20cores_tput_1, _1sockets_14cores_latency_1, _1sockets_14cores_latency_2, _1sockets_14cores_latency_3, @@ -1879,6 +2007,8 @@ INSTANTIATE_TEST_SUITE_P(StreamsInfoTable, _1sockets_ecores_tput_3, _1sockets_ecores_tput_4, _1sockets_ecores_tput_5, - _1sockets_mock_tput_1)); + _1sockets_mock_tput_1, + _1sockets_mock_tput_2, + _1sockets_mock_tput_3)); } // namespace \ No newline at end of file From d217847714587cd388b3910ee74be889ace410b0 Mon Sep 17 00:00:00 2001 From: Ilya Lavrenov Date: Wed, 6 Dec 2023 10:21:30 +0400 Subject: [PATCH 05/13] Allow to load extension by relative path in frontends, node factory (#21486) --- .../dev_api/openvino/core/so_extension.hpp | 14 ++++++++++++- src/inference/src/core.cpp | 20 ++----------------- 2 files changed, 15 insertions(+), 19 deletions(-) diff --git a/src/core/dev_api/openvino/core/so_extension.hpp b/src/core/dev_api/openvino/core/so_extension.hpp index 98fb1e9f67a65a..cbba0d29c68d37 100644 --- a/src/core/dev_api/openvino/core/so_extension.hpp +++ b/src/core/dev_api/openvino/core/so_extension.hpp @@ -28,8 +28,20 @@ class OPENVINO_API SOExtension : public Extension { std::shared_ptr m_so; }; +inline std::string resolve_extension_path(const std::string& path) { + std::string retvalue; + try { + const std::string absolute_path = ov::util::get_absolute_file_path(path); + retvalue = ov::util::file_exists(absolute_path) ? absolute_path : path; + } catch (const std::runtime_error&) { + retvalue = path; + } + return retvalue; +} + inline std::vector load_extensions(const std::string& path) { - auto so = ov::util::load_shared_object(path.c_str()); + const std::string resolved_path = resolve_extension_path(path); + auto so = ov::util::load_shared_object(resolved_path.c_str()); using CreateFunction = void(std::vector&); std::vector extensions; reinterpret_cast(ov::util::get_symbol(so, "create_extensions"))(extensions); diff --git a/src/inference/src/core.cpp b/src/inference/src/core.cpp index fd05fbaec54a78..9ee07f6246afe5 100644 --- a/src/inference/src/core.cpp +++ b/src/inference/src/core.cpp @@ -13,20 +13,6 @@ #include "openvino/runtime/iremote_context.hpp" #include "openvino/util/file_util.hpp" -namespace { -std::string resolve_extension_path(const std::string& path) { - std::string retvalue; - try { - const std::string absolute_path = ov::util::get_absolute_file_path(path); - retvalue = ov::util::file_exists(absolute_path) ? absolute_path : path; - } catch (const std::runtime_error&) { - retvalue = path; - } - return retvalue; -} - -} // namespace - namespace ov { std::string find_plugins_xml(const std::string& xml_file) { @@ -166,8 +152,7 @@ void Core::add_extension(const InferenceEngine::IExtensionPtr& extension) { void Core::add_extension(const std::string& library_path) { try { - const std::string path = resolve_extension_path(library_path); - add_extension(ov::detail::load_extensions(path)); + add_extension(ov::detail::load_extensions(library_path)); } catch (const std::runtime_error&) { try { // Try to load legacy extension @@ -186,8 +171,7 @@ void Core::add_extension(const std::string& library_path) { #ifdef OPENVINO_ENABLE_UNICODE_PATH_SUPPORT void Core::add_extension(const std::wstring& library_path) { try { - const std::string path = resolve_extension_path(ov::util::wstring_to_string(library_path)); - add_extension(ov::detail::load_extensions(ov::util::string_to_wstring(path))); + add_extension(ov::detail::load_extensions(library_path)); } catch (const std::runtime_error&) { try { // Try to load legacy extension From bbfe22732fb2761f02fad687bb74f2fb805dae4b Mon Sep 17 00:00:00 2001 From: Irina Efode Date: Wed, 6 Dec 2023 13:02:04 +0400 Subject: [PATCH 06/13] [CONFORMANCE] Change downgrade coefficient logic for conformance reports (#21475) * [CONFORMANCE] Change downgrade coefficient logic for conformance reports * Remove extra reports --- .../src/read_ir/read_ir.cpp | 3 --- .../summary/op_summary.hpp | 6 ++---- .../utils/stat_update_utils.py | 12 +++++++----- .../src/summary/op_summary.cpp | 17 +++-------------- 4 files changed, 12 insertions(+), 26 deletions(-) diff --git a/src/tests/functional/plugin/conformance/test_runner/op_conformance_runner/src/read_ir/read_ir.cpp b/src/tests/functional/plugin/conformance/test_runner/op_conformance_runner/src/read_ir/read_ir.cpp index ab025592455daa..a90e4393b45d5d 100644 --- a/src/tests/functional/plugin/conformance/test_runner/op_conformance_runner/src/read_ir/read_ir.cpp +++ b/src/tests/functional/plugin/conformance/test_runner/op_conformance_runner/src/read_ir/read_ir.cpp @@ -122,9 +122,6 @@ uint64_t clip(uint64_t n, uint64_t lower, uint64_t upper) { } void ReadIRTest::SetUp() { - // todo: find the optimal way to find TEST_P instances - // inference + query_model + import_export - summary.setDowngradeCoefficient(3); std::pair model_pair; std::tie(model_pair, targetDevice, configuration) = this->GetParam(); std::tie(path_to_model, path_to_ref_tensor) = model_pair; diff --git a/src/tests/test_utils/functional_test_utils/include/functional_test_utils/summary/op_summary.hpp b/src/tests/test_utils/functional_test_utils/include/functional_test_utils/summary/op_summary.hpp index 44cf995f7184c8..f10e8532fc60d4 100644 --- a/src/tests/test_utils/functional_test_utils/include/functional_test_utils/summary/op_summary.hpp +++ b/src/tests/test_utils/functional_test_utils/include/functional_test_utils/summary/op_summary.hpp @@ -30,19 +30,17 @@ class OpSummary : public virtual Summary { static OpSummary* p_instance; static bool extractBody; std::map opsStats = {}; - unsigned short int downgrade_coefficient; std::string get_opset_number(const std::string& opset_full_name); protected: - OpSummary(unsigned short int downgrade_coefficient = 1); - static OpSummary& createInstance(unsigned short int downgrade_coefficient = 1); + OpSummary(); + static OpSummary& createInstance(); static OpSummaryDestroyer destroyer; friend class OpSummaryDestroyer; public: static OpSummary& getInstance(); - static void setDowngradeCoefficient(unsigned short int downgrade_coefficient = 1); std::map getOPsStats() { return opsStats; diff --git a/src/tests/test_utils/functional_test_utils/layer_tests_summary/utils/stat_update_utils.py b/src/tests/test_utils/functional_test_utils/layer_tests_summary/utils/stat_update_utils.py index 634eee35560b6b..454dcff7ca0920 100644 --- a/src/tests/test_utils/functional_test_utils/layer_tests_summary/utils/stat_update_utils.py +++ b/src/tests/test_utils/functional_test_utils/layer_tests_summary/utils/stat_update_utils.py @@ -21,7 +21,8 @@ def update_passrates(results: ET.SubElement, rel_weights={}): passed_tests = 0 total_tests = 0 rel_passed_tests = None - rel_all_tests = None + rel_all_tests_expected = None + rel_all_tests_actual = None for attrib in op.attrib: if attrib == "passrate" or attrib == "relative_passrate": continue @@ -34,13 +35,14 @@ def update_passrates(results: ET.SubElement, rel_weights={}): continue elif attrib == "relative_all": if op.tag in rel_weights.keys(): - rel_all_tests = rel_weights[op.tag] - else: - rel_all_tests = float(op.attrib.get(attrib)) + rel_all_tests_expected = rel_weights[op.tag] + rel_all_tests_actual = float(op.attrib.get(attrib)) continue total_tests += int(float(op.attrib.get(attrib))) passrate = float(passed_tests * 100 / total_tests) if total_tests != 0 else 0 - rel_passrate = float(rel_passed_tests * 100 / rel_all_tests) if rel_all_tests != None and rel_all_tests != 0 else 0 + rel_all_tests = rel_all_tests_actual if rel_all_tests_expected is None else rel_all_tests_expected + k = 1 if rel_all_tests_expected is None else round(rel_all_tests_actual / rel_all_tests_expected) + rel_passrate = float(rel_passed_tests * 100 / (k * rel_all_tests)) if rel_all_tests != None and rel_all_tests != 0 else 0 op.set("passrate", "%.2f"%passrate) if rel_all_tests != None and rel_passed_tests != None: op.set("relative_passrate", "%.2f"%rel_passrate) diff --git a/src/tests/test_utils/functional_test_utils/src/summary/op_summary.cpp b/src/tests/test_utils/functional_test_utils/src/summary/op_summary.cpp index 1e9c9684ca7d4d..f78e923c37f6e7 100644 --- a/src/tests/test_utils/functional_test_utils/src/summary/op_summary.cpp +++ b/src/tests/test_utils/functional_test_utils/src/summary/op_summary.cpp @@ -28,14 +28,13 @@ void OpSummaryDestroyer::initialize(OpSummary* p) { p_instance = p; } -OpSummary::OpSummary(unsigned short int in_downgrade_coefficient) { +OpSummary::OpSummary() { reportFilename = ov::test::utils::OP_REPORT_FILENAME; - downgrade_coefficient = in_downgrade_coefficient; } -OpSummary& OpSummary::createInstance(unsigned short int in_downgrade_coefficient) { +OpSummary& OpSummary::createInstance() { if (!p_instance) { - p_instance = new OpSummary(in_downgrade_coefficient); + p_instance = new OpSummary(); destroyer.initialize(p_instance); } return *p_instance; @@ -45,13 +44,6 @@ OpSummary& OpSummary::getInstance() { return createInstance(); } -void OpSummary::setDowngradeCoefficient(unsigned short int in_downgrade_coefficient) { - if (p_instance && p_instance->downgrade_coefficient != in_downgrade_coefficient) { - p_instance->downgrade_coefficient = in_downgrade_coefficient; - } - auto& summary_instance = createInstance(in_downgrade_coefficient); -} - void OpSummary::updateOPsStats(const ov::NodeTypeInfo& op, const PassRate::Statuses& status, double rel_influence_coef) { @@ -313,9 +305,6 @@ void OpSummary::saveReport() { pugi::xml_node currentDeviceNode = resultsNode.append_child(summary.deviceName.c_str()); std::unordered_set opList; for (auto& it : stats) { - it.second.rel_passed /= downgrade_coefficient; - it.second.rel_all /= downgrade_coefficient; - std::string name = functional::get_node_version(it.first); opList.insert(name); pugi::xml_node entry = currentDeviceNode.append_child(name.c_str()); From 6d3fbf4237a5cbccc554968a44f087d9bf0b507f Mon Sep 17 00:00:00 2001 From: Sonder <55493212+AndSonder@users.noreply.github.com> Date: Wed, 6 Dec 2023 17:59:50 +0800 Subject: [PATCH 07/13] =?UTF-8?q?=E3=80=90Hackathon=205th=20No.113?= =?UTF-8?q?=E3=80=91Support=20paddle=202.5.1=20(#20161)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Fixed comments for linux-riscv64 GHA workflow * Try to enable PDPD tests back * support paddle 2.5 * fix op test errors * fix op test errors * recover openvino/src/core/src/op/swish.cpp * recover thirdparty open_model_zoo * update cmakelist * disable some tests * fix code style * enable paddle ci tests * disable some tests * fix np.long error * recover reverse op * update ci config * recover set_value test codes * rm linux_debian.yml * Compatible with cases where different paddle versions have different output shapes * remove set_value tests * recover save_model.py * Added ctest labels for FE tests only if FW is found * recover thirdparty * Update CMakeLists.txt Fixed creation of paddle_tests target two times * update paddle v2.5.1 proto file * recover thirdparty * fix Paddle_Reader_Tests.LoadModelMemoryToCore error * fix Paddle_Places test issue in v2.5.1 * support some tests for low version paddle * fix paddle FrontEndCutModelTest issue * fix * support all other tests for low version paddle * fix codestyle * fix codestyle * Update generate_multi_tensor_split.py * fix build error * add testUnloadLibBeforeDeletingDependentObject into paddle skip tests config * remove PROTOBUF_LITE from paddle CmakeList.txt * fix path error * add debug info * add debug codes * use make_model_path * recover * add option optimize_for * use FrontEndTestUtils in read_paddle_model_test.cpp * use FrontEndTestUtils in read_paddle_model_test.cpp * fix grid_sample error when using dynamic shape * fix error tests for 2.4 version * add paddle version judge for floor_div * fix grid_sample and add tests * fix * fix * recover * recover grid_sampler * Apply suggestions from code review * fix * Apply suggestions from code review * Apply suggestions from code review * fix build error --------- Co-authored-by: Ilya Lavrenov Co-authored-by: meiyang-intel --- .github/workflows/linux.yml | 1 - .../openvino/frontend/paddle/decoder.hpp | 3 + .../openvino/frontend/paddle/node_context.hpp | 4 + src/frontends/paddle/src/decoder_proto.cpp | 4 + src/frontends/paddle/src/decoder_proto.hpp | 2 + src/frontends/paddle/src/input_model.cpp | 7 + src/frontends/paddle/src/input_model.hpp | 1 + src/frontends/paddle/src/op/argmax.cpp | 14 +- .../paddle/src/op/elementwise_ops.cpp | 15 +- .../src/op/fill_constant_batch_size_like.cpp | 2 +- src/frontends/paddle/src/op/grid_sampler.cpp | 4 + src/frontends/paddle/src/op/matmul_v2.cpp | 4 +- src/frontends/paddle/src/op/p_norm.cpp | 28 +- src/frontends/paddle/src/op/reduce_ops.hpp | 6 + src/frontends/paddle/src/op/reverse.cpp | 2 +- src/frontends/paddle/src/op_table.cpp | 31 +- src/frontends/paddle/src/place.hpp | 4 + .../paddle/src/proto/framework.proto | 27 +- src/frontends/paddle/tests/CMakeLists.txt | 3 - src/frontends/paddle/tests/op_fuzzy.cpp | 52 ++- src/frontends/paddle/tests/places.cpp | 123 +++---- .../paddle/tests/read_paddle_model_test.cpp | 8 +- .../paddle/tests/skip_tests_config.cpp | 3 +- .../gen_scripts/generate_2in_2out.py | 43 ++- .../gen_scripts/generate_2in_2out_dynbatch.py | 44 ++- .../test_models/gen_scripts/generate_clip.py | 5 +- .../gen_scripts/generate_conv2d_relu.py | 21 +- .../gen_scripts/generate_conv2d_s.py | 18 +- .../gen_scripts/generate_dynamic_pool2d.py | 25 +- .../gen_scripts/generate_elementwise_ops.py | 227 +++++++----- .../gen_scripts/generate_embedding.py | 96 +++--- .../test_models/gen_scripts/generate_exp.py | 7 +- .../gen_scripts/generate_fill_constant.py | 100 ------ .../generate_fill_constant_batch_size_like.py | 42 --- .../test_models/gen_scripts/generate_full.py | 118 +++++++ ...fill_any_like.py => generate_full_like.py} | 22 +- .../test_models/gen_scripts/generate_gelu.py | 5 +- .../gen_scripts/generate_greater_equal.py | 5 +- .../gen_scripts/generate_greater_than.py | 8 +- .../gen_scripts/generate_grid_sampler.py | 2 + .../gen_scripts/generate_hard_sigmoid.py | 2 +- .../gen_scripts/generate_hard_swish.py | 5 +- .../gen_scripts/generate_index_select.py | 1 - .../gen_scripts/generate_layer_norm.py | 8 +- .../gen_scripts/generate_leaky_relu.py | 7 +- .../gen_scripts/generate_less_than.py | 23 +- .../test_models/gen_scripts/generate_log.py | 5 +- .../test_models/gen_scripts/generate_loop.py | 15 +- .../test_models/gen_scripts/generate_mul.py | 5 +- .../generate_multi_tensor_split.py | 17 +- .../gen_scripts/generate_place_test_model.py | 42 ++- .../gen_scripts/generate_pool2d.py | 326 ++++++++++-------- .../test_models/gen_scripts/generate_pow.py | 11 +- .../gen_scripts/generate_prior_box.py | 38 +- .../test_models/gen_scripts/generate_range.py | 5 +- .../gen_scripts/generate_reduce_all.py | 5 +- .../test_models/gen_scripts/generate_relu6.py | 7 +- .../gen_scripts/generate_reshape.py | 12 +- .../gen_scripts/generate_reverse.py | 61 ---- .../test_models/gen_scripts/generate_scale.py | 12 +- .../gen_scripts/generate_set_value.py | 2 +- .../gen_scripts/generate_sigmoid.py | 5 +- .../test_models/gen_scripts/generate_split.py | 18 +- .../gen_scripts/generate_squeeze.py | 6 +- .../gen_scripts/generate_strided_slice.py | 159 +++++---- .../test_models/gen_scripts/generate_swish.py | 25 +- .../test_models/gen_scripts/generate_tile.py | 7 +- .../gen_scripts/generate_unsqueeze.py | 5 +- .../test_models/gen_scripts/generate_where.py | 5 +- .../gen_scripts/generate_where_index.py | 5 +- .../tests/test_models/gen_scripts/nms.py | 35 +- 71 files changed, 1161 insertions(+), 854 deletions(-) delete mode 100644 src/frontends/paddle/tests/test_models/gen_scripts/generate_fill_constant.py delete mode 100644 src/frontends/paddle/tests/test_models/gen_scripts/generate_fill_constant_batch_size_like.py create mode 100644 src/frontends/paddle/tests/test_models/gen_scripts/generate_full.py rename src/frontends/paddle/tests/test_models/gen_scripts/{generate_fill_any_like.py => generate_full_like.py} (63%) delete mode 100644 src/frontends/paddle/tests/test_models/gen_scripts/generate_reverse.py diff --git a/.github/workflows/linux.yml b/.github/workflows/linux.yml index 567c6e9a14c038..900d29a34d7fd7 100644 --- a/.github/workflows/linux.yml +++ b/.github/workflows/linux.yml @@ -745,7 +745,6 @@ jobs: --gtest_output=xml:${INSTALL_TEST_DIR}/TEST-IRFrontend.xml - name: PaddlePaddle frontend tests - if: ${{ 'false' }} run: | source ${INSTALL_DIR}/setupvars.sh ${INSTALL_TEST_DIR}/paddle_tests --gtest_print_time=1 \ diff --git a/src/frontends/paddle/include/openvino/frontend/paddle/decoder.hpp b/src/frontends/paddle/include/openvino/frontend/paddle/decoder.hpp index 1934352e3c01b1..a687f137ca0e03 100644 --- a/src/frontends/paddle/include/openvino/frontend/paddle/decoder.hpp +++ b/src/frontends/paddle/include/openvino/frontend/paddle/decoder.hpp @@ -40,6 +40,9 @@ class DecoderBase { virtual size_t get_output_size() const = 0; virtual size_t get_output_size(const std::string& port_name) const = 0; + /// \brief Get the version + virtual int64_t get_version() const = 0; + /// \brief Get output port type /// /// Current API assumes that output port has only one output type. diff --git a/src/frontends/paddle/include/openvino/frontend/paddle/node_context.hpp b/src/frontends/paddle/include/openvino/frontend/paddle/node_context.hpp index d571af6ddddf44..dde7df0770c2ff 100644 --- a/src/frontends/paddle/include/openvino/frontend/paddle/node_context.hpp +++ b/src/frontends/paddle/include/openvino/frontend/paddle/node_context.hpp @@ -98,6 +98,10 @@ class NodeContext : public ov::frontend::NodeContext { return decoder->get_output_port_infos(port_name); } + int64_t get_version() const { + return decoder->get_version(); + } + private: ov::Any apply_additional_conversion_rules(const ov::Any& any, const std::type_info& type_info) const override { auto res = decoder->convert_attribute(any, type_info); diff --git a/src/frontends/paddle/src/decoder_proto.cpp b/src/frontends/paddle/src/decoder_proto.cpp index f286bfcf1f81fc..41da414db6a23e 100644 --- a/src/frontends/paddle/src/decoder_proto.cpp +++ b/src/frontends/paddle/src/decoder_proto.cpp @@ -75,6 +75,10 @@ ov::Any DecoderProto::get_attribute(const std::string& name) const { } } +int64_t DecoderProto::get_version() const { + return get_place()->get_version(); +} + ov::Any DecoderProto::convert_attribute(const Any& data, const std::type_info& type_info) const { if (data.is() && type_info == typeid(ov::element::Type)) { return get_ov_type(static_cast(data.as())); diff --git a/src/frontends/paddle/src/decoder_proto.hpp b/src/frontends/paddle/src/decoder_proto.hpp index 11627c6fba6ab9..cfad67bbc4ff2d 100644 --- a/src/frontends/paddle/src/decoder_proto.hpp +++ b/src/frontends/paddle/src/decoder_proto.hpp @@ -55,6 +55,8 @@ class DecoderProto : public paddle::DecoderBase { std::map map_for_each_output( const std::function(const std::string&, size_t)>& func) const; + int64_t get_version() const override; + private: std::vector<::paddle::framework::proto::OpDesc_Attr> decode_attribute_helper(const std::string& name) const; std::weak_ptr op_place; diff --git a/src/frontends/paddle/src/input_model.cpp b/src/frontends/paddle/src/input_model.cpp index 287fa5e54ad743..8518db040eea39 100644 --- a/src/frontends/paddle/src/input_model.cpp +++ b/src/frontends/paddle/src/input_model.cpp @@ -34,6 +34,9 @@ class InputModel::InputModelImpl { const std::shared_ptr& telemetry); std::vector get_inputs() const; std::vector get_outputs() const; + int64_t get_version() const { + return m_fw_ptr->version().version(); + } Place::Ptr get_place_by_tensor_name(const std::string& tensorName) const; void override_all_outputs(const std::vector& outputs); void override_all_inputs(const std::vector& inputs); @@ -589,6 +592,10 @@ std::vector InputModel::get_outputs() const { return _impl->get_outputs(); } +int64_t InputModel::get_version() const { + return _impl->get_version(); +} + Place::Ptr InputModel::get_place_by_tensor_name(const std::string& tensorName) const { return _impl->get_place_by_tensor_name(tensorName); } diff --git a/src/frontends/paddle/src/input_model.hpp b/src/frontends/paddle/src/input_model.hpp index 069e7a94c98b34..8607cf30134129 100644 --- a/src/frontends/paddle/src/input_model.hpp +++ b/src/frontends/paddle/src/input_model.hpp @@ -32,6 +32,7 @@ class InputModel : public ov::frontend::InputModel { ov::PartialShape get_partial_shape(const Place::Ptr& place) const override; void set_element_type(const Place::Ptr& place, const ov::element::Type&) override; void set_tensor_value(const Place::Ptr& place, const void* value) override; + int64_t get_version() const; private: friend class ov::frontend::paddle::FrontEnd; diff --git a/src/frontends/paddle/src/op/argmax.cpp b/src/frontends/paddle/src/op/argmax.cpp index 10029a385eb08d..7ec245a8c2ffbe 100644 --- a/src/frontends/paddle/src/op/argmax.cpp +++ b/src/frontends/paddle/src/op/argmax.cpp @@ -28,9 +28,17 @@ NamedOutputs argmax(const NodeContext& node) { const Output reshape_flatten = ov::opset6::Constant::create(ov::element::i64, {1}, {-1}); auto node_reshape = std::make_shared(data, reshape_flatten, true); auto node_topk = std::make_shared(node_reshape, k, axis, "max", "index", index_element_type); - return node.default_single_output_mapping( - {std::make_shared(node_topk->output(1), element::i64)}, - {"Out"}); + const auto output_info = node.get_output_port_infos("Out"); + size_t output_size = output_info[0].second.size(); + if (output_size == 0) { + auto out = std::make_shared(node_topk->output(1)); + return node.default_single_output_mapping({std::make_shared(out, element::i64)}, + {"Out"}); + } else { + return node.default_single_output_mapping( + {std::make_shared(node_topk->output(1), element::i64)}, + {"Out"}); + } } } diff --git a/src/frontends/paddle/src/op/elementwise_ops.cpp b/src/frontends/paddle/src/op/elementwise_ops.cpp index d0c3a12c48b60b..edbe9564883a99 100644 --- a/src/frontends/paddle/src/op/elementwise_ops.cpp +++ b/src/frontends/paddle/src/op/elementwise_ops.cpp @@ -42,15 +42,15 @@ NamedOutputs elementwise_pow(const NodeContext& node_context) { return elementwise_ops(node_context); } -NamedOutputs elementwise_equal(const NodeContext& node_context) { +NamedOutputs equal(const NodeContext& node_context) { return elementwise_ops(node_context); } -NamedOutputs elementwise_greater_equal(const NodeContext& node_context) { +NamedOutputs greater_equal(const NodeContext& node_context) { return elementwise_ops(node_context); } -NamedOutputs elementwise_not_equal(const NodeContext& node_context) { +NamedOutputs not_equal(const NodeContext& node_context) { return elementwise_ops(node_context); } @@ -61,10 +61,17 @@ NamedOutputs elementwise_floordiv(const NodeContext& node_context) { if (node_context.has_attribute("axis")) { axis = node_context.get_attribute("axis"); } + + int64_t pd_version = node_context.get_version(); + + bool python_div = false; + if (pd_version >= 2005000 || pd_version == 0) { + python_div = true; + } return node_context.default_single_output_mapping( {std::make_shared(x, y, - false, + python_div, ov::op::AutoBroadcastSpec(ov::op::AutoBroadcastType::PDPD, axis))}, {"Out"}); } diff --git a/src/frontends/paddle/src/op/fill_constant_batch_size_like.cpp b/src/frontends/paddle/src/op/fill_constant_batch_size_like.cpp index 4f5c3c891c4c4f..cf23a4a2e151b6 100644 --- a/src/frontends/paddle/src/op/fill_constant_batch_size_like.cpp +++ b/src/frontends/paddle/src/op/fill_constant_batch_size_like.cpp @@ -121,4 +121,4 @@ NamedOutputs fill_constant_batch_size_like(const NodeContext& node) { } // namespace op } // namespace paddle } // namespace frontend -} // namespace ov +} // namespace ov \ No newline at end of file diff --git a/src/frontends/paddle/src/op/grid_sampler.cpp b/src/frontends/paddle/src/op/grid_sampler.cpp index 148ad60cfd7d1e..b78c3ea345ad1e 100644 --- a/src/frontends/paddle/src/op/grid_sampler.cpp +++ b/src/frontends/paddle/src/op/grid_sampler.cpp @@ -4,11 +4,15 @@ #include "default_opset.hpp" #include "openvino/frontend/paddle/node_context.hpp" +#include "openvino/op/grid_sample.hpp" namespace ov { namespace frontend { namespace paddle { namespace op { + +using namespace ov::op; + NamedOutputs grid_sampler(const NodeContext& node) { auto data = node.get_input("X"); auto grid = node.get_input("Grid"); diff --git a/src/frontends/paddle/src/op/matmul_v2.cpp b/src/frontends/paddle/src/op/matmul_v2.cpp index 12dc2eee018072..2a096569d0335b 100644 --- a/src/frontends/paddle/src/op/matmul_v2.cpp +++ b/src/frontends/paddle/src/op/matmul_v2.cpp @@ -16,7 +16,9 @@ NamedOutputs matmul_v2(const NodeContext& node) { const auto mm = std::make_shared(x, y, transpose_a, transpose_b); std::shared_ptr result = mm; - if (is_scalar(mm->get_output_partial_shape(0))) { + const auto output_info = node.get_output_port_infos("Out"); + size_t output_size = output_info[0].second.size(); + if (is_scalar(mm->get_output_partial_shape(0)) && output_size) { auto unsqueeze_scalar = default_opset::Constant::create(ov::element::i64, {}, {0}); result = std::make_shared(mm, unsqueeze_scalar); } diff --git a/src/frontends/paddle/src/op/p_norm.cpp b/src/frontends/paddle/src/op/p_norm.cpp index 3023700e5e57ee..645bd9e53c9b11 100644 --- a/src/frontends/paddle/src/op/p_norm.cpp +++ b/src/frontends/paddle/src/op/p_norm.cpp @@ -18,22 +18,20 @@ NamedOutputs p_norm(const NodeContext& node) { const auto absNode = std::make_shared(data); const auto axisNode = default_opset::Constant::create(ov::element::i32, {1}, {axis}); + std::shared_ptr p_norm_node; + const auto input_shape = data.get_partial_shape(); + if (p == std::numeric_limits::infinity()) { - return node.default_single_output_mapping( - {std::make_shared(absNode, axisNode, keepdim)}, - {"Out"}); + p_norm_node = std::make_shared(absNode, axisNode, keepdim); } else if (p == -std::numeric_limits::infinity()) { - return node.default_single_output_mapping( - {std::make_shared(absNode, axisNode, keepdim)}, - {"Out"}); + p_norm_node = std::make_shared(absNode, axisNode, keepdim); } else if (p == 0.0) { const auto input_dtype = data.get_element_type(); const auto zero = default_opset::Constant::create(input_dtype, {1}, {0}); const auto non_zero = std::make_shared(absNode, zero); const auto converted_non_zero = std::make_shared(non_zero, input_dtype); - const auto reduce_sum = std::make_shared(converted_non_zero, axisNode, keepdim); - const auto input_shape = data.get_partial_shape(); + p_norm_node = std::make_shared(converted_non_zero, axisNode, keepdim); // process 1-d input and keepdim=false, output shape is [1], instead of scalar. if (!keepdim) { PADDLE_OP_CHECK(node, @@ -42,19 +40,23 @@ NamedOutputs p_norm(const NodeContext& node) { const auto input_rank = input_shape.rank().get_length(); if (input_rank == 1) { const auto one = default_opset::Constant::create(ov::element::i64, {1}, {1}); - auto out = std::make_shared(reduce_sum, one, false); - return node.default_single_output_mapping({out}, {"Out"}); + p_norm_node = std::make_shared(p_norm_node, one, false); } } - return node.default_single_output_mapping({reduce_sum}, {"Out"}); } else { const auto power_factor = default_opset::Constant::create(ov::element::f32, Shape{1}, {p}); const auto powNode = std::make_shared(absNode, power_factor); const auto reduce_sum = std::make_shared(powNode, axisNode, keepdim); const auto extract_factor = default_opset::Constant::create(ov::element::f32, Shape{1}, {1.0 / p}); - return node.default_single_output_mapping({std::make_shared(reduce_sum, extract_factor)}, - {"Out"}); + p_norm_node = std::make_shared(reduce_sum, extract_factor); + } + + const auto output_info = node.get_output_port_infos("Out"); + size_t output_size = output_info[0].second.size(); + if ((axis == -1 || input_shape.size() == 1) && !keepdim && !output_size) { + p_norm_node = std::make_shared(p_norm_node); } + return node.default_single_output_mapping({p_norm_node}, {"Out"}); } } // namespace op diff --git a/src/frontends/paddle/src/op/reduce_ops.hpp b/src/frontends/paddle/src/op/reduce_ops.hpp index bc700c1ed0327e..0944d1081ff759 100644 --- a/src/frontends/paddle/src/op/reduce_ops.hpp +++ b/src/frontends/paddle/src/op/reduce_ops.hpp @@ -40,6 +40,12 @@ NamedOutputs reduce_ops(const NodeContext& node) { auto unsqueeze_scalar = default_opset::Constant::create(ov::element::i64, {}, {0}); result = std::make_shared(reduceNode, unsqueeze_scalar); } + + const auto output_info = node.get_output_port_infos("Out"); + size_t output_size = output_info[0].second.size(); + if (reduce_all && !output_size) { + result = std::make_shared(reduceNode); + } return node.default_single_output_mapping({result}, {"Out"}); } diff --git a/src/frontends/paddle/src/op/reverse.cpp b/src/frontends/paddle/src/op/reverse.cpp index 097e13b401986b..bf986055dab067 100644 --- a/src/frontends/paddle/src/op/reverse.cpp +++ b/src/frontends/paddle/src/op/reverse.cpp @@ -14,4 +14,4 @@ NamedOutputs reverse(const NodeContext& node) { } // namespace op } // namespace paddle } // namespace frontend -} // namespace ov +} // namespace ov \ No newline at end of file diff --git a/src/frontends/paddle/src/op_table.cpp b/src/frontends/paddle/src/op_table.cpp index f701065569517e..4ed557a4edd13e 100644 --- a/src/frontends/paddle/src/op_table.cpp +++ b/src/frontends/paddle/src/op_table.cpp @@ -29,25 +29,25 @@ OP_CONVERTER(dequantize_linear); OP_CONVERTER(dropout); OP_CONVERTER(elementwise_add); OP_CONVERTER(elementwise_div); -OP_CONVERTER(elementwise_equal); OP_CONVERTER(elementwise_floordiv); -OP_CONVERTER(elementwise_greater_equal); OP_CONVERTER(elementwise_max); OP_CONVERTER(elementwise_min); OP_CONVERTER(elementwise_mod); OP_CONVERTER(elementwise_mul); -OP_CONVERTER(elementwise_not_equal); OP_CONVERTER(elementwise_pow); OP_CONVERTER(elementwise_sub); +OP_CONVERTER(equal); +OP_CONVERTER(greater_equal); +OP_CONVERTER(not_equal); OP_CONVERTER(embedding); OP_CONVERTER(exp); OP_CONVERTER(expand_v2); OP_CONVERTER(flip); -OP_CONVERTER(fill_any_like); -OP_CONVERTER(fill_constant_batch_size_like); -OP_CONVERTER(fill_constant); OP_CONVERTER(flatten_contiguous_range); OP_CONVERTER(floor); +OP_CONVERTER(fill_any_like); +OP_CONVERTER(fill_constant); +OP_CONVERTER(fill_constant_batch_size_like); OP_CONVERTER(gather); OP_CONVERTER(gather_nd); OP_CONVERTER(gelu); @@ -152,22 +152,22 @@ std::map get_supported_ops() { {"depthwise_conv2d", op::conv2d}, {"depthwise_conv2d_transpose", op::conv2d_transpose}, {"dequantize_linear", op::dequantize_linear}, - {"dropout", op::dropout}, {"elementwise_add", op::elementwise_add}, {"elementwise_div", op::elementwise_div}, {"elementwise_floordiv", op::elementwise_floordiv}, - {"elementwise_max", op::elementwise_max}, - {"elementwise_min", op::elementwise_min}, {"elementwise_mod", op::elementwise_mod}, {"elementwise_mul", op::elementwise_mul}, - {"elementwise_pow", op::elementwise_pow}, + {"elementwise_max", op::elementwise_max}, + {"elementwise_min", op::elementwise_min}, {"elementwise_sub", op::elementwise_sub}, - {"equal", op::elementwise_equal}, + {"dropout", op::dropout}, + {"elementwise_pow", op::elementwise_pow}, + {"equal", op::equal}, {"exp", op::exp}, {"expand_v2", op::expand_v2}, {"fill_any_like", op::fill_any_like}, - {"fill_constant_batch_size_like", op::fill_constant_batch_size_like}, {"fill_constant", op::fill_constant}, + {"fill_constant_batch_size_like", op::fill_constant_batch_size_like}, {"flatten_contiguous_range", op::flatten_contiguous_range}, {"flip", op::flip}, {"floor", op::floor}, @@ -175,7 +175,7 @@ std::map get_supported_ops() { {"gather_nd", op::gather_nd}, {"gelu", op::gelu}, {"generate_proposals_v2", op::generate_proposals_v2}, - {"greater_equal", op::elementwise_greater_equal}, + {"greater_equal", op::greater_equal}, {"greater_than", op::greater_than}, {"grid_sampler", op::grid_sampler}, {"group_norm", op::group_norm}, @@ -202,7 +202,7 @@ std::map get_supported_ops() { {"multiclass_nms3", op::multiclass_nms}, {"nearest_interp_v2", op::nearest_interp_v2}, {"nearest_interp", op::nearest_interp_v2}, - {"not_equal", op::elementwise_not_equal}, + {"not_equal", op::not_equal}, {"one_hot_v2", op::one_hot_v2}, {"p_norm", op::p_norm}, {"pad3d", op::pad3d}, @@ -255,8 +255,7 @@ std::map get_supported_ops() { {"while", op::while_}, {"write_to_array", op::write_to_array}, {"where_index", op::where_index}, - {"yolo_box", op::yolo_box}, - {"generate_proposals_v2", op::generate_proposals_v2}}; + {"yolo_box", op::yolo_box}}; }; } // namespace paddle diff --git a/src/frontends/paddle/src/place.hpp b/src/frontends/paddle/src/place.hpp index fc2fe9eb29efe0..583d5191feab81 100644 --- a/src/frontends/paddle/src/place.hpp +++ b/src/frontends/paddle/src/place.hpp @@ -44,6 +44,10 @@ class Place : public ov::frontend::Place { return m_names; } + int64_t get_version() const { + return dynamic_cast(m_input_model).get_version(); + } + private: const ov::frontend::InputModel& m_input_model; std::vector m_names; diff --git a/src/frontends/paddle/src/proto/framework.proto b/src/frontends/paddle/src/proto/framework.proto index 22112cba29667d..cfcc10c6ee692f 100644 --- a/src/frontends/paddle/src/proto/framework.proto +++ b/src/frontends/paddle/src/proto/framework.proto @@ -41,8 +41,31 @@ enum AttrType { VAR = 13; VARS = 14; FLOAT64 = 15; + SCALAR = 16; + SCALARS = 17; } + +message Complex { + required double r = 1; + required double i = 2; +}; + +message Scalar { + enum Type { + BOOLEAN = 1; + LONG = 2; + FLOAT64 = 3; + COMPLEX128 = 4; + } + required Type type = 1; + + optional bool b = 2; + optional int64 i = 3; + optional double r = 4; + optional Complex c = 5; +}; + // OpDesc describes an instance of a C++ framework::OperatorBase // derived class type. message OpDesc { @@ -66,6 +89,8 @@ message OpDesc { optional string var_name = 17; repeated string vars_name = 18; optional double float64 = 19; + optional Scalar scalar = 20; + repeated Scalar scalars = 21; }; message Var { @@ -126,7 +151,7 @@ message VarType { FP16 = 4; FP32 = 5; FP64 = 6; - // Tensor is used in C++. + // phi::DenseTensor is used in C++. SIZE_T = 19; UINT8 = 20; INT8 = 21; diff --git a/src/frontends/paddle/tests/CMakeLists.txt b/src/frontends/paddle/tests/CMakeLists.txt index 6d373e67c0a663..078a133138aa45 100644 --- a/src/frontends/paddle/tests/CMakeLists.txt +++ b/src/frontends/paddle/tests/CMakeLists.txt @@ -21,9 +21,6 @@ else() set(paddlepaddle_FOUND ON) endif() -# PDPD 2.5.1 is not compatible with tests models we use -set(paddlepaddle_FOUND OFF) - if(paddlepaddle_FOUND) set(ctest_labels OV UNIT) endif() diff --git a/src/frontends/paddle/tests/op_fuzzy.cpp b/src/frontends/paddle/tests/op_fuzzy.cpp index 8487bed8827af3..d99862ceb69490 100644 --- a/src/frontends/paddle/tests/op_fuzzy.cpp +++ b/src/frontends/paddle/tests/op_fuzzy.cpp @@ -192,22 +192,21 @@ static const std::vector models{ std::string("flip_5"), std::string("flip_dynamic_1"), std::string("flip_dynamic_2"), - std::string("fill_any_like"), - std::string("fill_any_like_f16"), - std::string("fill_any_like_f32"), - std::string("fill_any_like_f64"), - std::string("fill_any_like_i16"), - std::string("fill_any_like_i32"), - std::string("fill_any_like_i64"), - std::string("fill_any_like_bool"), - std::string("fill_any_like_bool_2"), - std::string("fill_constant"), - std::string("fill_constant_batch_size_like"), - std::string("fill_constant_int32"), - std::string("fill_constant_int64"), - std::string("fill_constant_tensor"), - std::string("fill_constant_shape_tensor"), - std::string("fill_constant_shape_tensor_list"), + std::string("full_like"), + std::string("full_like_f16"), + std::string("full_like_f32"), + std::string("full_like_f64"), + std::string("full_like_i16"), + std::string("full_like_i32"), + std::string("full_like_i64"), + std::string("full_like_bool"), + std::string("full_like_bool_2"), + std::string("full"), + std::string("full_int32"), + std::string("full_int64"), + std::string("full_tensor"), + std::string("full_shape_tensor"), + std::string("full_shape_tensor_list"), std::string("flatten_contiguous_range_test1"), std::string("floor_float32"), std::string("floor_mod1"), @@ -244,7 +243,8 @@ static const std::vector models{ std::string("greater_than_int64"), std::string("grid_sampler_1"), std::string("grid_sampler_2"), - std::string("grid_sampler_dyn"), + // std::string("grid_sampler_3"), + // std::string("grid_sampler_dyn"), std::string("group_norm_1/group_norm_1.pdmodel"), std::string("group_norm_2/group_norm_2.pdmodel"), std::string("group_norm_3/group_norm_3.pdmodel"), @@ -282,15 +282,15 @@ static const std::vector models{ std::string("loop/loop.pdmodel"), std::string("loop_dyn/loop_dyn.pdmodel"), std::string("loop_dyn_x/loop_dyn_x.pdmodel"), - std::string("loop_if/loop_if.pdmodel"), - std::string("loop_if_loop/loop_if_loop.pdmodel"), - std::string("loop_if_loop_if/loop_if_loop_if.pdmodel"), - std::string("loop_if_loop_complex/loop_if_loop_complex.pdmodel"), + // std::string("loop_if/loop_if.pdmodel"), + // std::string("loop_if_loop/loop_if_loop.pdmodel"), + // std::string("loop_if_loop_if/loop_if_loop_if.pdmodel"), + // std::string("loop_if_loop_complex/loop_if_loop_complex.pdmodel"), // disabed due to slice could not produce full dynamic shape // std::string("loop_if_tensor_array/loop_if_tensor_array.pdmodel"), + // std::string("loop_x/loop_x.pdmodel"), std::string("loop_t/loop_t.pdmodel"), std::string("loop_tensor_array/loop_tensor_array.pdmodel"), - std::string("loop_x/loop_x.pdmodel"), std::string("matmul_xt/matmul_xt.pdmodel"), std::string("matmul_xt_yt/matmul_xt_yt.pdmodel"), std::string("matmul_yt/matmul_yt.pdmodel"), @@ -446,14 +446,6 @@ static const std::vector models{ std::string("reshape"), std::string("reshape_tensor"), std::string("reshape_tensor_list"), - std::string("reverse_static_1"), - std::string("reverse_static_2"), - std::string("reverse_static_3"), - std::string("reverse_static_4"), - std::string("reverse_dynamic_1"), - std::string("reverse_dynamic_2"), - std::string("reverse_dynamic_3"), - std::string("reverse_dynamic_4"), std::string("rnn_lstm_layer_1_bidirectional/rnn_lstm_layer_1_bidirectional.pdmodel"), std::string("rnn_lstm_layer_1_forward/rnn_lstm_layer_1_forward.pdmodel"), std::string("rnn_lstm_layer_2_bidirectional/rnn_lstm_layer_2_bidirectional.pdmodel"), diff --git a/src/frontends/paddle/tests/places.cpp b/src/frontends/paddle/tests/places.cpp index 17f0df7e2d7aed..655d3941d69a66 100644 --- a/src/frontends/paddle/tests/places.cpp +++ b/src/frontends/paddle/tests/places.cpp @@ -3,6 +3,8 @@ // #include +#include +#include #include #include "gtest/gtest.h" @@ -10,50 +12,42 @@ using namespace ov::frontend; -const std::string model_file = std::string(TEST_PADDLE_MODELS_DIRNAME) + "place_test_model/place_test_model.pdmodel"; - -/*** -model: - - [input] - | - [const] [const] [transpose] - \ | / - [ RNN (LSTM) ] - / | \ - [transpose] [scale_{1,2}] [relu_{0,1,2}] - | | | - [scale_0] [out_{1,2}] [scale_{3,4,5}] - | | - [out_1] [out_{3,4,5}] - -***/ - -std::vector tensor_names = { - "x", - "const_1.tmp_0", - "const_2.tmp_0", - "transpose_0.tmp_0", - "transpose_0.tmp_1", - "lstm_0.tmp_0", - "lstm_0._generated_var_0", - "lstm_0.tmp_3", - "lstm_0.tmp_1", - "lstm_0.tmp_2", - "transpose_1.tmp_0", - "transpose_1.tmp_1", - "relu_1.tmp_0", - "relu_2.tmp_0", - "relu_3.tmp_0", - "save_infer_model/scale_0.tmp_0", - "save_infer_model/scale_1.tmp_0", - "save_infer_model/scale_2.tmp_0", - "save_infer_model/scale_3.tmp_0", - "save_infer_model/scale_4.tmp_0", - "save_infer_model/scale_5.tmp_0", +const std::string model_file = FrontEndTestUtils::make_model_path(std::string(TEST_PADDLE_MODELS_DIRNAME) + + "place_test_model/place_test_model.pdmodel"); +const std::string vars_name_file = + FrontEndTestUtils::make_model_path(std::string(TEST_PADDLE_MODELS_DIRNAME) + "place_test_model/vars_name.txt"); +const std::string outputs_name_file = + FrontEndTestUtils::make_model_path(std::string(TEST_PADDLE_MODELS_DIRNAME) + "place_test_model/outputs_name.txt"); + +class Paddle_Places : public ::testing::Test { +protected: + void SetUp() override { + std::fstream name_file; + name_file.open(vars_name_file, std::ios::in); + if (name_file.is_open()) { + std::string name; + while (std::getline(name_file, name)) + tensor_names.push_back(name); + name_file.close(); + } else + FRONT_END_THROW("Can not open " + vars_name_file); + + std::fstream output_file; + output_file.open(outputs_name_file, std::ios::in); + if (output_file.is_open()) { + std::string name; + while (std::getline(output_file, name)) + output_names.push_back(name); + output_file.close(); + } else + FRONT_END_THROW("Can not open " + outputs_name_file); + } + + std::vector tensor_names; + std::vector output_names; }; -TEST(Paddle_Places, check_tensor_names) { +TEST_F(Paddle_Places, check_tensor_names) { auto fem = FrontEndManager(); FrontEnd::Ptr frontend; ASSERT_NO_THROW(frontend = fem.load_by_framework(PADDLE_FE)); @@ -66,7 +60,7 @@ TEST(Paddle_Places, check_tensor_names) { } } -TEST(Paddle_Places, check_input_outputs) { +TEST_F(Paddle_Places, check_input_outputs) { auto fem = FrontEndManager(); FrontEnd::Ptr frontend; ASSERT_NO_THROW(frontend = fem.load_by_framework(PADDLE_FE)); @@ -82,13 +76,6 @@ TEST(Paddle_Places, check_input_outputs) { auto tensor_place = input_model->get_place_by_tensor_name("x"); tensor_place->is_equal(inputs[0]); - std::vector output_names = {"save_infer_model/scale_0.tmp_0", - "save_infer_model/scale_1.tmp_0", - "save_infer_model/scale_2.tmp_0", - "save_infer_model/scale_3.tmp_0", - "save_infer_model/scale_4.tmp_0", - "save_infer_model/scale_5.tmp_0"}; - for (const auto& name : output_names) { const auto output_place = input_model->get_place_by_tensor_name(name); auto it = std::find_if(outputs.begin(), outputs.end(), [&output_place](const Place::Ptr& place) { @@ -99,7 +86,7 @@ TEST(Paddle_Places, check_input_outputs) { } // all existed in the model ops have "Out" port -TEST(Paddle_Places, check_out_port_of_all_ops) { +TEST_F(Paddle_Places, check_out_port_of_all_ops) { auto fem = FrontEndManager(); FrontEnd::Ptr frontend; ASSERT_NO_THROW(frontend = fem.load_by_framework(PADDLE_FE)); @@ -121,7 +108,7 @@ TEST(Paddle_Places, check_out_port_of_all_ops) { } } -TEST(Paddle_Places, check_in_out_ports_of_model_outputs) { +TEST_F(Paddle_Places, check_in_out_ports_of_model_outputs) { auto fem = FrontEndManager(); FrontEnd::Ptr frontend; ASSERT_NO_THROW(frontend = fem.load_by_framework(PADDLE_FE)); @@ -155,7 +142,7 @@ TEST(Paddle_Places, check_in_out_ports_of_model_outputs) { } } -TEST(Paddle_Places, check_source_target_tensors_of_model_outputs) { +TEST_F(Paddle_Places, check_source_target_tensors_of_model_outputs) { auto fem = FrontEndManager(); FrontEnd::Ptr frontend; ASSERT_NO_THROW(frontend = fem.load_by_framework(PADDLE_FE)); @@ -189,7 +176,7 @@ TEST(Paddle_Places, check_source_target_tensors_of_model_outputs) { } } -TEST(Paddle_Places, check_producing_consuming_ops_of_model_outputs) { +TEST_F(Paddle_Places, check_producing_consuming_ops_of_model_outputs) { auto fem = FrontEndManager(); FrontEnd::Ptr frontend; ASSERT_NO_THROW(frontend = fem.load_by_framework(PADDLE_FE)); @@ -224,7 +211,7 @@ TEST(Paddle_Places, check_producing_consuming_ops_of_model_outputs) { } // check data flow [ output port -> tensor -> input port ] -TEST(Paddle_Places, check_data_flow) { +TEST_F(Paddle_Places, check_data_flow) { auto fem = FrontEndManager(); FrontEnd::Ptr frontend; ASSERT_NO_THROW(frontend = fem.load_by_framework(PADDLE_FE)); @@ -263,7 +250,7 @@ TEST(Paddle_Places, check_data_flow) { // -> input_port_2 // -> input_port_N] // input_port, input_port_2, ... input_port_N are equal data -TEST(Paddle_Places, check_tensor_to_multiple_ports) { +TEST_F(Paddle_Places, check_tensor_to_multiple_ports) { auto fem = FrontEndManager(); FrontEnd::Ptr frontend; ASSERT_NO_THROW(frontend = fem.load_by_framework(PADDLE_FE)); @@ -289,7 +276,7 @@ TEST(Paddle_Places, check_tensor_to_multiple_ports) { } // consuming ops should be equal for tensor place and producing output port -TEST(Paddle_Places, check_consuming_ops) { +TEST_F(Paddle_Places, check_consuming_ops) { auto fem = FrontEndManager(); FrontEnd::Ptr frontend; ASSERT_NO_THROW(frontend = fem.load_by_framework(PADDLE_FE)); @@ -330,7 +317,7 @@ TEST(Paddle_Places, check_consuming_ops) { } } -TEST(Paddle_Places, check_consuming_ops_2) { +TEST_F(Paddle_Places, check_consuming_ops_2) { auto fem = FrontEndManager(); FrontEnd::Ptr frontend; ASSERT_NO_THROW(frontend = fem.load_by_framework(PADDLE_FE)); @@ -369,7 +356,7 @@ TEST(Paddle_Places, check_consuming_ops_2) { } } -TEST(Paddle_Places, check_producing_ops) { +TEST_F(Paddle_Places, check_producing_ops) { auto fem = FrontEndManager(); FrontEnd::Ptr frontend; ASSERT_NO_THROW(frontend = fem.load_by_framework(PADDLE_FE)); @@ -391,20 +378,13 @@ TEST(Paddle_Places, check_producing_ops) { } } -TEST(Paddle_Places, check_input_output_ports_dy_idx) { +TEST_F(Paddle_Places, check_input_output_ports_dy_idx) { auto fem = FrontEndManager(); FrontEnd::Ptr frontend; ASSERT_NO_THROW(frontend = fem.load_by_framework(PADDLE_FE)); InputModel::Ptr input_model; ASSERT_NO_THROW(input_model = frontend->load(FrontEndTestUtils::make_model_path(model_file))); - std::vector output_names = {"save_infer_model/scale_0.tmp_0", - "save_infer_model/scale_1.tmp_0", - "save_infer_model/scale_2.tmp_0", - "save_infer_model/scale_3.tmp_0", - "save_infer_model/scale_4.tmp_0", - "save_infer_model/scale_5.tmp_0"}; - for (const auto& tensor_name : output_names) { auto tensor_place = input_model->get_place_by_tensor_name(tensor_name); EXPECT_NE(tensor_place, nullptr); @@ -417,20 +397,13 @@ TEST(Paddle_Places, check_input_output_ports_dy_idx) { } } -TEST(Paddle_Places, check_ops_tensors_by_idx) { +TEST_F(Paddle_Places, check_ops_tensors_by_idx) { auto fem = FrontEndManager(); FrontEnd::Ptr frontend; ASSERT_NO_THROW(frontend = fem.load_by_framework(PADDLE_FE)); InputModel::Ptr input_model; ASSERT_NO_THROW(input_model = frontend->load(FrontEndTestUtils::make_model_path(model_file))); - std::vector output_names = {"save_infer_model/scale_0.tmp_0", - "save_infer_model/scale_1.tmp_0", - "save_infer_model/scale_2.tmp_0", - "save_infer_model/scale_3.tmp_0", - "save_infer_model/scale_4.tmp_0", - "save_infer_model/scale_5.tmp_0"}; - for (const auto& tensor_name : output_names) { auto tensor_place = input_model->get_place_by_tensor_name(tensor_name); EXPECT_NE(tensor_place, nullptr); diff --git a/src/frontends/paddle/tests/read_paddle_model_test.cpp b/src/frontends/paddle/tests/read_paddle_model_test.cpp index b362566f52af11..08beac86b67f24 100644 --- a/src/frontends/paddle/tests/read_paddle_model_test.cpp +++ b/src/frontends/paddle/tests/read_paddle_model_test.cpp @@ -20,8 +20,10 @@ #include "openvino/pass/serialize.hpp" TEST(Paddle_Reader_Tests, LoadModelMemoryToCore) { - auto model = std::string(TEST_PADDLE_MODELS_DIRNAME) + "conv2d_relu/conv2d_relu.pdmodel"; - auto param = std::string(TEST_PADDLE_MODELS_DIRNAME) + "conv2d_relu/conv2d_relu.pdiparams"; + auto model = + FrontEndTestUtils::make_model_path(std::string(TEST_PADDLE_MODELS_DIRNAME) + "conv2d_relu/conv2d_relu.pdmodel"); + auto param = FrontEndTestUtils::make_model_path(std::string(TEST_PADDLE_MODELS_DIRNAME) + + "conv2d_relu/conv2d_relu.pdiparams"); ov::Core core; auto read_file = [&](const std::string& file_name, size_t& size) { @@ -73,7 +75,7 @@ TEST(Paddle_Reader_Tests, LoadModelMemoryToCore) { } TEST(Paddle_Reader_Tests, ImportBasicModelToCore) { - auto model = std::string(TEST_PADDLE_MODELS_DIRNAME) + "relu/relu.pdmodel"; + auto model = FrontEndTestUtils::make_model_path(std::string(TEST_PADDLE_MODELS_DIRNAME) + "relu/relu.pdmodel"); ov::Core core; auto function = core.read_model(FrontEndTestUtils::make_model_path(model)); diff --git a/src/frontends/paddle/tests/skip_tests_config.cpp b/src/frontends/paddle/tests/skip_tests_config.cpp index 144e9d001ae276..c5ca1227a9a693 100644 --- a/src/frontends/paddle/tests/skip_tests_config.cpp +++ b/src/frontends/paddle/tests/skip_tests_config.cpp @@ -11,7 +11,8 @@ std::vector disabledTestPatterns() { return { #ifdef OPENVINO_STATIC_LIBRARY // Disable tests for static libraries - ".*FrontendLibCloseTest.*" + ".*FrontendLibCloseTest.*", #endif + ".*testUnloadLibBeforeDeletingDependentObject.*", }; } diff --git a/src/frontends/paddle/tests/test_models/gen_scripts/generate_2in_2out.py b/src/frontends/paddle/tests/test_models/gen_scripts/generate_2in_2out.py index 5d6cd952189692..54911ebdc59443 100644 --- a/src/frontends/paddle/tests/test_models/gen_scripts/generate_2in_2out.py +++ b/src/frontends/paddle/tests/test_models/gen_scripts/generate_2in_2out.py @@ -14,24 +14,45 @@ inp_blob1 = np.random.randn(1, 1, 3, 3).astype(np.float32) inp_blob2 = np.random.randn(1, 2, 3, 3).astype(np.float32) -x1 = fluid.data(name='inputX1', shape=[1, 1, 3, 3], dtype='float32') -x2 = fluid.data(name='inputX2', shape=[1, 2, 3, 3], dtype='float32') +if paddle.__version__ >= '2.0.0': + x1 = paddle.static.data(name='inputX1', shape=[1, 1, 3, 3], dtype='float32') + x2 = paddle.static.data(name='inputX2', shape=[1, 2, 3, 3], dtype='float32') +else: + x1 = fluid.data(name='inputX1', shape=[1, 1, 3, 3], dtype='float32') + x2 = fluid.data(name='inputX2', shape=[1, 2, 3, 3], dtype='float32') -conv2d1 = fluid.layers.conv2d(input=x1, num_filters=1, filter_size=(1, 1), stride=(1, 1), padding=(0, 0), +if paddle.__version__ >= '2.0.0': + conv2d1 = paddle.static.nn.conv2d(input=x1, num_filters=1, filter_size=(1, 1), stride=(1, 1), padding=(0, 0), + dilation=(1, 1), groups=1, bias_attr=False, name="conv2dX1") + + conv2d2 = paddle.static.nn.conv2d(input=x2, num_filters=1, filter_size=(1, 1), stride=(1, 1), padding=(0, 0), + dilation=(1, 1), groups=1, bias_attr=False, name="conv2dX2") + + add1 = paddle.add(conv2d1, conv2d2, name="add1.tmp_0") + + relu2a = paddle.nn.functional.relu(add1, name="relu2a") + relu2b = paddle.nn.functional.relu(add1, name="relu2b") + + add2 = paddle.add(relu2a, relu2b, name="add2.tmp_0") + + relu3a = paddle.nn.functional.relu(add2, name="relu3a") + relu3b = paddle.nn.functional.relu(add2, name="relu3b") +else: + conv2d1 = fluid.layers.conv2d(input=x1, num_filters=1, filter_size=(1, 1), stride=(1, 1), padding=(0, 0), dilation=(1, 1), groups=1, bias_attr=False, name="conv2dX1") -conv2d2 = fluid.layers.conv2d(input=x2, num_filters=1, filter_size=(1, 1), stride=(1, 1), padding=(0, 0), - dilation=(1, 1), groups=1, bias_attr=False, name="conv2dX2") + conv2d2 = fluid.layers.conv2d(input=x2, num_filters=1, filter_size=(1, 1), stride=(1, 1), padding=(0, 0), + dilation=(1, 1), groups=1, bias_attr=False, name="conv2dX2") -add1 = fluid.layers.elementwise_add(conv2d1, conv2d2, name="add1") + add1 = fluid.layers.elementwise_add(conv2d1, conv2d2, name="add1") -relu2a = fluid.layers.relu(add1, name="relu2a") -relu2b = fluid.layers.relu(add1, name="relu2b") + relu2a = fluid.layers.relu(add1, name="relu2a") + relu2b = fluid.layers.relu(add1, name="relu2b") -add2 = fluid.layers.elementwise_add(relu2a, relu2b, name="add2") + add2 = fluid.layers.elementwise_add(relu2a, relu2b, name="add2") -relu3a = fluid.layers.relu(add2, name="relu3a") -relu3b = fluid.layers.relu(add2, name="relu3b") + relu3a = fluid.layers.relu(add2, name="relu3a") + relu3b = fluid.layers.relu(add2, name="relu3b") exe = fluid.Executor(fluid.CPUPlace()) exe.run(fluid.default_startup_program()) diff --git a/src/frontends/paddle/tests/test_models/gen_scripts/generate_2in_2out_dynbatch.py b/src/frontends/paddle/tests/test_models/gen_scripts/generate_2in_2out_dynbatch.py index d3fc8c95207d0f..b49b4ad6e5d182 100644 --- a/src/frontends/paddle/tests/test_models/gen_scripts/generate_2in_2out_dynbatch.py +++ b/src/frontends/paddle/tests/test_models/gen_scripts/generate_2in_2out_dynbatch.py @@ -13,24 +13,44 @@ inp_blob1 = np.random.randn(1, 1, 3, 3).astype(np.float32) inp_blob2 = np.random.randn(1, 2, 3, 3).astype(np.float32) -x1 = fluid.data(name='inputX1', shape=[-1, 1, 3, 3], dtype='float32') -x2 = fluid.data(name='inputX2', shape=[-1, 2, 3, 3], dtype='float32') +if paddle.__version__ >= '2.0.0': + x1 = paddle.static.data(name='inputX1', shape=[-1, 1, 3, 3], dtype='float32') + x2 = paddle.static.data(name='inputX2', shape=[-1, 2, 3, 3], dtype='float32') -conv2d1 = fluid.layers.conv2d(input=x1, num_filters=1, filter_size=(1, 1), stride=(1, 1), padding=(0, 0), - dilation=(1, 1), groups=1, bias_attr=False, name="conv2dX1") + conv2d1 = paddle.static.nn.conv2d(input=x1, num_filters=1, filter_size=(1, 1), stride=(1, 1), padding=(0, 0), + dilation=(1, 1), groups=1, bias_attr=False, name="conv2dX1") -conv2d2 = fluid.layers.conv2d(input=x2, num_filters=1, filter_size=(1, 1), stride=(1, 1), padding=(0, 0), - dilation=(1, 1), groups=1, bias_attr=False, name="conv2dX2") + conv2d2 = paddle.static.nn.conv2d(input=x2, num_filters=1, filter_size=(1, 1), stride=(1, 1), padding=(0, 0), + dilation=(1, 1), groups=1, bias_attr=False, name="conv2dX2") -add1 = fluid.layers.elementwise_add(conv2d1, conv2d2, name="add1") + add1 = paddle.add(conv2d1, conv2d2, name="add1") -relu2a = fluid.layers.relu(add1, name="relu2a") -relu2b = fluid.layers.relu(add1, name="relu2b") + relu2a = paddle.nn.functional.relu(add1, name="relu2a") + relu2b = paddle.nn.functional.relu(add1, name="relu2b") -add2 = fluid.layers.elementwise_add(relu2a, relu2b, name="add2") + add2 = paddle.add(relu2a, relu2b, name="add2") -relu3a = fluid.layers.relu(add2, name="relu3a") -relu3b = fluid.layers.relu(add2, name="relu3b") + relu3a = paddle.nn.functional.relu(add2, name="relu3a") + relu3b = paddle.nn.functional.relu(add2, name="relu3b") +else: + x1 = fluid.data(name='inputX1', shape=[-1, 1, 3, 3], dtype='float32') + x2 = fluid.data(name='inputX2', shape=[-1, 2, 3, 3], dtype='float32') + + conv2d1 = fluid.layers.conv2d(input=x1, num_filters=1, filter_size=(1, 1), stride=(1, 1), padding=(0, 0), + dilation=(1, 1), groups=1, bias_attr=False, name="conv2dX1") + + conv2d2 = fluid.layers.conv2d(input=x2, num_filters=1, filter_size=(1, 1), stride=(1, 1), padding=(0, 0), + dilation=(1, 1), groups=1, bias_attr=False, name="conv2dX2") + + add1 = fluid.layers.elementwise_add(conv2d1, conv2d2, name="add1") + + relu2a = fluid.layers.relu(add1, name="relu2a") + relu2b = fluid.layers.relu(add1, name="relu2b") + + add2 = fluid.layers.elementwise_add(relu2a, relu2b, name="add2") + + relu3a = fluid.layers.relu(add2, name="relu3a") + relu3b = fluid.layers.relu(add2, name="relu3b") exe = fluid.Executor(fluid.CPUPlace()) exe.run(fluid.default_startup_program()) diff --git a/src/frontends/paddle/tests/test_models/gen_scripts/generate_clip.py b/src/frontends/paddle/tests/test_models/gen_scripts/generate_clip.py index 39979ee1d0787f..5fe76bf6a0e0a8 100644 --- a/src/frontends/paddle/tests/test_models/gen_scripts/generate_clip.py +++ b/src/frontends/paddle/tests/test_models/gen_scripts/generate_clip.py @@ -14,7 +14,10 @@ def clip(name: str, x, min, max): with paddle.static.program_guard(paddle.static.Program(), paddle.static.Program()): node_x = paddle.static.data(name='x', shape=x.shape, dtype='float32') - out = paddle.fluid.layers.clip(node_x, min=min, max=max) + if paddle.__version__ >= '2.0.0': + out = paddle.clip(node_x, min=min, max=max) + else: + out = paddle.fluid.layers.clip(node_x, min=min, max=max) cpu = paddle.static.cpu_places(1) exe = paddle.static.Executor(cpu[0]) diff --git a/src/frontends/paddle/tests/test_models/gen_scripts/generate_conv2d_relu.py b/src/frontends/paddle/tests/test_models/gen_scripts/generate_conv2d_relu.py index d28ed1d52fa366..ebd06b34e9002f 100644 --- a/src/frontends/paddle/tests/test_models/gen_scripts/generate_conv2d_relu.py +++ b/src/frontends/paddle/tests/test_models/gen_scripts/generate_conv2d_relu.py @@ -12,17 +12,26 @@ inp_blob = np.random.randn(1, 3, 4, 4).astype(np.float32) -x = fluid.data(name='xxx', shape=[1, 3, 4, 4], dtype='float32') -test_layer = fluid.layers.conv2d(input=x, num_filters=5, filter_size=(1, 1), stride=(1, 1), padding=(1, 1), +if paddle.__version__ >= '2.0.0': + x = paddle.static.data(name='xxx', shape=[1, 3, 4, 4], dtype='float32') + test_layer = paddle.static.nn.conv2d(input=x, num_filters=5, filter_size=(1, 1), stride=(1, 1), padding=(1, 1), + dilation=(1, 1), groups=1, bias_attr=False) +else: + x = fluid.data(name='xxx', shape=[1, 3, 4, 4], dtype='float32') + test_layer = fluid.layers.conv2d(input=x, num_filters=5, filter_size=(1, 1), stride=(1, 1), padding=(1, 1), dilation=(1, 1), groups=1, bias_attr=False) -relu = fluid.layers.relu(test_layer) +if paddle.__version__ >= '2.0.0': + relu = paddle.nn.functional.relu(test_layer) +else: + relu = fluid.layers.relu(test_layer) exe = fluid.Executor(fluid.CPUPlace()) exe.run(fluid.default_startup_program()) inp_dict = {'xxx': inp_blob} var = [relu] -res_paddle = exe.run(fluid.default_main_program(), fetch_list=var, feed=inp_dict) +res_paddle = exe.run(fluid.default_main_program(), + fetch_list=var, feed=inp_dict) -fluid.io.save_inference_model(os.path.join(sys.argv[1], "conv2d_relu"), list(inp_dict.keys()), var, exe, - model_filename="conv2d_relu.pdmodel", params_filename="conv2d_relu.pdiparams") +paddle.fluid.io.save_inference_model(os.path.join(sys.argv[1], "conv2d_relu"), list(inp_dict.keys()), var, exe, + model_filename="conv2d_relu.pdmodel", params_filename="conv2d_relu.pdiparams") diff --git a/src/frontends/paddle/tests/test_models/gen_scripts/generate_conv2d_s.py b/src/frontends/paddle/tests/test_models/gen_scripts/generate_conv2d_s.py index 2568bc6bfe217e..1463cb5a43e88a 100644 --- a/src/frontends/paddle/tests/test_models/gen_scripts/generate_conv2d_s.py +++ b/src/frontends/paddle/tests/test_models/gen_scripts/generate_conv2d_s.py @@ -11,15 +11,21 @@ inp_blob = np.random.randn(1, 3, 4, 4).astype(np.float32) -x = fluid.data(name='x', shape=[1, 3, 4, 4], dtype='float32') -test_layer = fluid.layers.conv2d(input=x, num_filters=5, filter_size=(1, 1), stride=(1, 1), padding=(1, 1), - dilation=(1, 1), groups=1, bias_attr=False) +if paddle.__version__ >= '2.0.0': + x = paddle.static.data(name='x', shape=[1, 3, 4, 4], dtype='float32') + test_layer = paddle.static.nn.conv2d(input=x, num_filters=5, filter_size=(1, 1), stride=(1, 1), padding=(1, 1), + dilation=(1, 1), groups=1, bias_attr=False) +else: + x = fluid.data(name='x', shape=[1, 3, 4, 4], dtype='float32') + test_layer = fluid.layers.conv2d(input=x, num_filters=5, filter_size=(1, 1), stride=(1, 1), padding=(1, 1), + dilation=(1, 1), groups=1, bias_attr=False) exe = fluid.Executor(fluid.CPUPlace()) exe.run(fluid.default_startup_program()) inp_dict = {'x': inp_blob} var = [test_layer] -res_paddle = exe.run(fluid.default_main_program(), fetch_list=var, feed=inp_dict) +res_paddle = exe.run(fluid.default_main_program(), + fetch_list=var, feed=inp_dict) -fluid.io.save_inference_model(os.path.join(sys.argv[1], "conv2d_s"), list(inp_dict.keys()), var, exe, - model_filename="conv2d.pdmodel", params_filename="conv2d.pdiparams") +paddle.fluid.io.save_inference_model(os.path.join(sys.argv[1], "conv2d_s"), list(inp_dict.keys()), var, exe, + model_filename="conv2d.pdmodel", params_filename="conv2d.pdiparams") diff --git a/src/frontends/paddle/tests/test_models/gen_scripts/generate_dynamic_pool2d.py b/src/frontends/paddle/tests/test_models/gen_scripts/generate_dynamic_pool2d.py index 515fec958ec6af..e967cd06d6d716 100644 --- a/src/frontends/paddle/tests/test_models/gen_scripts/generate_dynamic_pool2d.py +++ b/src/frontends/paddle/tests/test_models/gen_scripts/generate_dynamic_pool2d.py @@ -15,13 +15,21 @@ paddle.enable_static() inp_blob1 = np.random.randn(1, 1, 224, 224).astype(np.float32) -x1 = fluid.data(name='inputX1', shape=[1, 1, -1, -1], dtype='float32') - -adative_pool2d = paddle.fluid.layers.adaptive_pool2d( - input=x1, - pool_size=[3,3], - pool_type='avg', - require_index=False) +if paddle.__version__ >= '2.0.0': + x1 = paddle.static.data(name='inputX1', shape=[ + 1, 1, -1, -1], dtype='float32') + + adative_pool2d = paddle.nn.functional.adaptive_avg_pool2d( + x=x1, + output_size=[3, 3]) +else: + x1 = fluid.data(name='inputX1', shape=[1, 1, -1, -1], dtype='float32') + + adative_pool2d = paddle.fluid.layers.adaptive_pool2d( + input=x1, + pool_size=[3, 3], + pool_type='avg', + require_index=False) cpu = paddle.static.cpu_places(1) exe = paddle.static.Executor(cpu[0]) @@ -32,4 +40,5 @@ feed={'inputX1': inp_blob1}, fetch_list=[adative_pool2d]) -saveModel("pool2d_dyn_hw", exe, feedkeys=['inputX1'], fetchlist=adative_pool2d, inputs=[inp_blob1], outputs=outs, target_dir=sys.argv[1]) +saveModel("pool2d_dyn_hw", exe, feedkeys=['inputX1'], fetchlist=adative_pool2d, inputs=[ + inp_blob1], outputs=outs, target_dir=sys.argv[1]) diff --git a/src/frontends/paddle/tests/test_models/gen_scripts/generate_elementwise_ops.py b/src/frontends/paddle/tests/test_models/gen_scripts/generate_elementwise_ops.py index 844ff58b963413..37f556e298b74f 100644 --- a/src/frontends/paddle/tests/test_models/gen_scripts/generate_elementwise_ops.py +++ b/src/frontends/paddle/tests/test_models/gen_scripts/generate_elementwise_ops.py @@ -7,16 +7,21 @@ import numpy as np import sys from save_model import saveModel +import paddle -def elementwise_add(name : str, x, y, axis, in_dtype): +def elementwise_add(name: str, x, y, in_dtype, axis=-1): import paddle paddle.enable_static() with paddle.static.program_guard(paddle.static.Program(), paddle.static.Program()): node_x = paddle.static.data(name='x', shape=x.shape, dtype=in_dtype) node_y = paddle.static.data(name='y', shape=y.shape, dtype=in_dtype) - out = paddle.fluid.layers.nn.elementwise_add(node_x, node_y, axis=axis) + if paddle.__version__ >= '2.0.0': + out = paddle.add(node_x, node_y) + else: + out = paddle.fluid.layers.elementwise_add( + node_x, node_y, axis=axis) cpu = paddle.static.cpu_places(1) exe = paddle.static.Executor(cpu[0]) @@ -24,21 +29,26 @@ def elementwise_add(name : str, x, y, axis, in_dtype): # startup program will call initializer to initialize the parameters. exe.run(paddle.static.default_startup_program()) outs = exe.run( - feed={'x': x, 'y': y}, - fetch_list=[out]) - saveModel(name, exe, feedkeys=['x', 'y'], fetchlist=[out], inputs=[x, y], outputs=[outs[0]], target_dir=sys.argv[1]) + feed={'x': x, 'y': y}, + fetch_list=[out]) + saveModel(name, exe, feedkeys=['x', 'y'], fetchlist=[out], inputs=[ + x, y], outputs=[outs[0]], target_dir=sys.argv[1]) return outs[0] -def elementwise_sub(name : str, x, y, axis, in_dtype): +def elementwise_sub(name: str, x, y, in_dtype, axis=-1): import paddle paddle.enable_static() with paddle.static.program_guard(paddle.static.Program(), paddle.static.Program()): node_x = paddle.static.data(name='x', shape=x.shape, dtype=in_dtype) node_y = paddle.static.data(name='y', shape=y.shape, dtype=in_dtype) - out = paddle.fluid.layers.nn.elementwise_sub(node_x, node_y, axis=axis) + if paddle.__version__ >= '2.0.0': + out = paddle.subtract(node_x, node_y) + else: + out = paddle.fluid.layers.elementwise_sub( + node_x, node_y, axis=axis) cpu = paddle.static.cpu_places(1) exe = paddle.static.Executor(cpu[0]) @@ -46,21 +56,26 @@ def elementwise_sub(name : str, x, y, axis, in_dtype): # startup program will call initializer to initialize the parameters. exe.run(paddle.static.default_startup_program()) outs = exe.run( - feed={'x': x, 'y': y}, - fetch_list=[out]) - saveModel(name, exe, feedkeys=['x', 'y'], fetchlist=[out], inputs=[x, y], outputs=[outs[0]], target_dir=sys.argv[1]) + feed={'x': x, 'y': y}, + fetch_list=[out]) + saveModel(name, exe, feedkeys=['x', 'y'], fetchlist=[out], inputs=[ + x, y], outputs=[outs[0]], target_dir=sys.argv[1]) return outs[0] -def elementwise_div(name : str, x, y, axis, in_dtype): +def elementwise_div(name: str, x, y, in_dtype, axis=-1): import paddle paddle.enable_static() with paddle.static.program_guard(paddle.static.Program(), paddle.static.Program()): - node_x = paddle.static.data(name = 'x', shape = x.shape, dtype = in_dtype) - node_y = paddle.static.data(name = 'y', shape = y.shape, dtype = in_dtype) - out = paddle.fluid.layers.nn.elementwise_div(node_x, node_y, axis=axis) + node_x = paddle.static.data(name='x', shape=x.shape, dtype=in_dtype) + node_y = paddle.static.data(name='y', shape=y.shape, dtype=in_dtype) + if paddle.__version__ >= '2.0.0': + out = paddle.divide(node_x, node_y) + else: + out = paddle.fluid.layers.elementwise_div( + node_x, node_y, axis=axis) cpu = paddle.static.cpu_places(1) exe = paddle.static.Executor(cpu[0]) @@ -68,24 +83,29 @@ def elementwise_div(name : str, x, y, axis, in_dtype): # startup program will call initializer to initialize the parameters. exe.run(paddle.static.default_startup_program()) outs = exe.run( - feed={'x': x, 'y': y}, - fetch_list=[out]) - saveModel(name, exe, feedkeys=['x', 'y'], fetchlist=[out], inputs=[x, y], outputs=[outs[0]], target_dir=sys.argv[1]) + feed={'x': x, 'y': y}, + fetch_list=[out]) + saveModel(name, exe, feedkeys=['x', 'y'], fetchlist=[out], inputs=[ + x, y], outputs=[outs[0]], target_dir=sys.argv[1]) return outs[0] -def elementwise_mod(name : str, x, y, axis, in_dtype, is_api=False): +def elementwise_mod(name: str, x, y, in_dtype, is_api=False, axis=-1): import paddle paddle.enable_static() with paddle.static.program_guard(paddle.static.Program(), paddle.static.Program()): node_x = paddle.static.data(name='x', shape=x.shape, dtype=in_dtype) node_y = paddle.static.data(name='y', shape=y.shape, dtype=in_dtype) - if is_api: + if paddle.__version__ >= '2.0.0': out = paddle.floor_mod(node_x, node_y) else: - out = paddle.fluid.layers.elementwise_mod(node_x, node_y, axis=axis) + if is_api: + out = paddle.floor_mod(node_x, node_y) + else: + out = paddle.fluid.layers.elementwise_mod( + node_x, node_y, axis=axis) cpu = paddle.static.cpu_places(1) exe = paddle.static.Executor(cpu[0]) @@ -93,21 +113,26 @@ def elementwise_mod(name : str, x, y, axis, in_dtype, is_api=False): # startup program will call initializer to initialize the parameters. exe.run(paddle.static.default_startup_program()) outs = exe.run( - feed={'x': x, 'y': y}, - fetch_list=[out]) - saveModel(name, exe, feedkeys=['x', 'y'], fetchlist=[out], inputs=[x, y], outputs=[outs[0]], target_dir=sys.argv[1]) + feed={'x': x, 'y': y}, + fetch_list=[out]) + saveModel(name, exe, feedkeys=['x', 'y'], fetchlist=[out], inputs=[ + x, y], outputs=[outs[0]], target_dir=sys.argv[1]) return outs[0] -def elementwise_mul(name : str, x, y, axis, in_dtype): +def elementwise_mul(name: str, x, y, in_dtype, axis=-1): import paddle paddle.enable_static() with paddle.static.program_guard(paddle.static.Program(), paddle.static.Program()): - node_x = paddle.static.data(name = 'x', shape = x.shape, dtype = in_dtype) - node_y = paddle.static.data(name = 'y', shape = y.shape, dtype = in_dtype) - out = paddle.fluid.layers.nn.elementwise_mul(node_x, node_y, axis=axis) + node_x = paddle.static.data(name='x', shape=x.shape, dtype=in_dtype) + node_y = paddle.static.data(name='y', shape=y.shape, dtype=in_dtype) + if paddle.__version__ >= '2.0.0': + out = paddle.multiply(node_x, node_y) + else: + out = paddle.fluid.layers.elementwise_mul( + node_x, node_y, axis=axis) cpu = paddle.static.cpu_places(1) exe = paddle.static.Executor(cpu[0]) @@ -115,19 +140,21 @@ def elementwise_mul(name : str, x, y, axis, in_dtype): # startup program will call initializer to initialize the parameters. exe.run(paddle.static.default_startup_program()) outs = exe.run( - feed={'x': x, 'y': y}, - fetch_list=[out]) - saveModel(name, exe, feedkeys=['x', 'y'], fetchlist=[out], inputs=[x, y], outputs=[outs[0]], target_dir=sys.argv[1]) + feed={'x': x, 'y': y}, + fetch_list=[out]) + saveModel(name, exe, feedkeys=['x', 'y'], fetchlist=[out], inputs=[ + x, y], outputs=[outs[0]], target_dir=sys.argv[1]) return outs[0] -def elementwise_mul_bool(name : str, x, y, in_dtype='bool'): + +def elementwise_mul_bool(name: str, x, y, in_dtype='bool'): import paddle paddle.enable_static() with paddle.static.program_guard(paddle.static.Program(), paddle.static.Program()): - node_x = paddle.static.data(name = 'x', shape = x.shape, dtype = in_dtype) - node_y = paddle.static.data(name = 'y', shape = y.shape, dtype = in_dtype) + node_x = paddle.static.data(name='x', shape=x.shape, dtype=in_dtype) + node_y = paddle.static.data(name='y', shape=y.shape, dtype=in_dtype) mul = node_x * node_y out = paddle.cast(mul, 'float32') @@ -137,21 +164,26 @@ def elementwise_mul_bool(name : str, x, y, in_dtype='bool'): # startup program will call initializer to initialize the parameters. exe.run(paddle.static.default_startup_program()) outs = exe.run( - feed={'x': x, 'y': y}, - fetch_list=[out]) - saveModel(name, exe, feedkeys=['x', 'y'], fetchlist=[out], inputs=[x, y], outputs=[outs[0]], target_dir=sys.argv[1]) + feed={'x': x, 'y': y}, + fetch_list=[out]) + saveModel(name, exe, feedkeys=['x', 'y'], fetchlist=[out], inputs=[ + x, y], outputs=[outs[0]], target_dir=sys.argv[1]) return outs[0] -def elementwise_min(name : str, x, y, axis, in_dtype): +def elementwise_min(name: str, x, y, in_dtype, axis=-1): import paddle paddle.enable_static() with paddle.static.program_guard(paddle.static.Program(), paddle.static.Program()): - node_x = paddle.static.data(name = 'x', shape = x.shape, dtype = in_dtype) - node_y = paddle.static.data(name = 'y', shape = y.shape, dtype = in_dtype) - out = paddle.fluid.layers.nn.elementwise_min(node_x, node_y, axis=axis) + node_x = paddle.static.data(name='x', shape=x.shape, dtype=in_dtype) + node_y = paddle.static.data(name='y', shape=y.shape, dtype=in_dtype) + if paddle.__version__ >= '2.0.0': + out = paddle.minimum(node_x, node_y) + else: + out = paddle.fluid.layers.elementwise_min( + node_x, node_y, axis=axis) cpu = paddle.static.cpu_places(1) exe = paddle.static.Executor(cpu[0]) @@ -161,19 +193,24 @@ def elementwise_min(name : str, x, y, axis, in_dtype): outs = exe.run( feed={'x': x, 'y': y}, fetch_list=[out]) - saveModel(name, exe, feedkeys=['x', 'y'], fetchlist=[out], inputs=[x, y], outputs=[outs[0]], target_dir=sys.argv[1]) + saveModel(name, exe, feedkeys=['x', 'y'], fetchlist=[out], inputs=[ + x, y], outputs=[outs[0]], target_dir=sys.argv[1]) return outs[0] -def elementwise_max(name : str, x, y, axis, in_dtype): +def elementwise_max(name: str, x, y, in_dtype, axis=-1): import paddle paddle.enable_static() with paddle.static.program_guard(paddle.static.Program(), paddle.static.Program()): - node_x = paddle.static.data(name = 'x', shape = x.shape, dtype = in_dtype) - node_y = paddle.static.data(name = 'y', shape = y.shape, dtype = in_dtype) - out = paddle.fluid.layers.nn.elementwise_max(node_x, node_y, axis=axis) + node_x = paddle.static.data(name='x', shape=x.shape, dtype=in_dtype) + node_y = paddle.static.data(name='y', shape=y.shape, dtype=in_dtype) + if paddle.__version__ >= '2.0.0': + out = paddle.maximum(node_x, node_y) + else: + out = paddle.fluid.layers.elementwise_max( + node_x, node_y, axis=axis) cpu = paddle.static.cpu_places(1) exe = paddle.static.Executor(cpu[0]) @@ -183,19 +220,24 @@ def elementwise_max(name : str, x, y, axis, in_dtype): outs = exe.run( feed={'x': x, 'y': y}, fetch_list=[out]) - saveModel(name, exe, feedkeys=['x', 'y'], fetchlist=[out], inputs=[x, y], outputs=[outs[0]], target_dir=sys.argv[1]) + saveModel(name, exe, feedkeys=['x', 'y'], fetchlist=[out], inputs=[ + x, y], outputs=[outs[0]], target_dir=sys.argv[1]) return outs[0] -def elementwise_pow(name : str, x, y, axis, in_dtype): +def elementwise_pow(name: str, x, y, in_dtype, axis=-1): import paddle paddle.enable_static() with paddle.static.program_guard(paddle.static.Program(), paddle.static.Program()): - node_x = paddle.static.data(name = 'x', shape = x.shape, dtype = in_dtype) - node_y = paddle.static.data(name = 'y', shape = y.shape, dtype = in_dtype) - out = paddle.fluid.layers.nn.elementwise_pow(node_x, node_y, axis=axis) + node_x = paddle.static.data(name='x', shape=x.shape, dtype=in_dtype) + node_y = paddle.static.data(name='y', shape=y.shape, dtype=in_dtype) + if paddle.__version__ >= '2.0.0': + out = paddle.pow(node_x, node_y) + else: + out = paddle.fluid.layers.elementwise_pow( + node_x, node_y, axis=axis) cpu = paddle.static.cpu_places(1) exe = paddle.static.Executor(cpu[0]) @@ -205,24 +247,24 @@ def elementwise_pow(name : str, x, y, axis, in_dtype): outs = exe.run( feed={'x': x, 'y': y}, fetch_list=[out]) - saveModel(name, exe, feedkeys=['x', 'y'], fetchlist=[out], inputs=[x, y], outputs=[outs[0]], target_dir=sys.argv[1]) + saveModel(name, exe, feedkeys=['x', 'y'], fetchlist=[out], inputs=[ + x, y], outputs=[outs[0]], target_dir=sys.argv[1]) return outs[0] -def elementwise_floordiv(name : str, x, y, axis, in_dtype): +def elementwise_floordiv(name: str, x, y, in_dtype, axis=-1): import paddle paddle.enable_static() with paddle.static.program_guard(paddle.static.Program(), paddle.static.Program()): - node_x = paddle.static.data(name = 'x', shape = x.shape, dtype = in_dtype) - node_y = paddle.static.data(name = 'y', shape = y.shape, dtype = in_dtype) - if paddle.__version__ == "1.8": - out = paddle.fluid.layers.nn.elementwise_floordiv(node_x, node_y, axis=axis) - else: - if axis != -1: - pass + node_x = paddle.static.data(name='x', shape=x.shape, dtype=in_dtype) + node_y = paddle.static.data(name='y', shape=y.shape, dtype=in_dtype) + if paddle.__version__ >= '2.0.0': out = paddle.floor_divide(node_x, node_y) + else: + out = paddle.fluid.layers.nn.elementwise_floordiv( + node_x, node_y, axis=axis) cpu = paddle.static.cpu_places(1) exe = paddle.static.Executor(cpu[0]) @@ -232,20 +274,21 @@ def elementwise_floordiv(name : str, x, y, axis, in_dtype): outs = exe.run( feed={'x': x, 'y': y}, fetch_list=[out]) - saveModel(name, exe, feedkeys=['x', 'y'], fetchlist=[out], inputs=[x, y], outputs=[outs[0]], target_dir=sys.argv[1]) + saveModel(name, exe, feedkeys=['x', 'y'], fetchlist=[out], inputs=[ + x, y], outputs=[outs[0]], target_dir=sys.argv[1]) return outs[0] -def elementwise_ops(name : str, data_x, data_y, axis, in_dtype): - elementwise_add("elementwise_add" + name, data_x, data_y, axis, in_dtype) - elementwise_sub("elementwise_sub" + name, data_x, data_y, axis, in_dtype) - elementwise_div("elementwise_div" + name, data_x, data_y, axis, in_dtype) - elementwise_mod("elementwise_mod" + name, data_x, data_y, axis, in_dtype) - elementwise_mul("elementwise_mul" + name, data_x, data_y, axis, in_dtype) - elementwise_min("elementwise_min" + name, data_x, data_y, axis, in_dtype) - elementwise_max("elementwise_max" + name, data_x, data_y, axis, in_dtype) - elementwise_pow("elementwise_pow" + name, data_x, data_y, axis, in_dtype) +def elementwise_ops(name: str, data_x, data_y, in_dtype, axis=-1): + elementwise_add("elementwise_add" + name, data_x, data_y, in_dtype, axis) + elementwise_sub("elementwise_sub" + name, data_x, data_y, in_dtype, axis) + elementwise_div("elementwise_div" + name, data_x, data_y, in_dtype, axis) + elementwise_mod("elementwise_mod" + name, data_x, data_y, in_dtype, axis) + elementwise_mul("elementwise_mul" + name, data_x, data_y, in_dtype, axis) + elementwise_min("elementwise_min" + name, data_x, data_y, in_dtype, axis) + elementwise_max("elementwise_max" + name, data_x, data_y, in_dtype, axis) + elementwise_pow("elementwise_pow" + name, data_x, data_y, in_dtype, axis) def main(): @@ -253,52 +296,56 @@ def main(): in_dtype = 'float32' data_x = np.array([2, 3, 4]).astype(in_dtype) data_y = np.array([1, 5, 2]).astype(in_dtype) - axis = -1 - elementwise_ops("1", data_x, data_y, axis, in_dtype) - elementwise_mod('floor_mod1', data_x, data_y, -1, in_dtype, True) + elementwise_ops("1", data_x, data_y, in_dtype) + elementwise_mod('floor_mod1', data_x, data_y, in_dtype, True) # data_y's shape is the continuous subsequence of data_x's shape data_x = np.random.rand(2, 5, 3, 4).astype(np.float32) data_y = (0.1 + np.random.rand(3, 4).astype(np.float32)) / 1.1 - elementwise_ops("2", data_x, data_y, axis, in_dtype) - elementwise_mod('floor_mod2', data_x, data_y, -1, in_dtype, True) + elementwise_ops("2", data_x, data_y, in_dtype) + elementwise_mod('floor_mod2', data_x, data_y, in_dtype, True) + + data_y = (0.1 + np.random.rand(4).astype(np.float32)) / 1.1 - data_y = (0.1 + np.random.rand(5).astype(np.float32)) / 1.1 - axis = 1 - elementwise_ops("3", data_x, data_y, axis, in_dtype) + if paddle.__version__ >= '2.0.0': + elementwise_ops("3", data_x, data_y, in_dtype) + else: + elementwise_ops("3", data_x, data_y, in_dtype, 1) - data_y = (0.1 + np.random.rand(2, 5, 3).astype(np.float32)) / 1.1 - axis = 0 - elementwise_ops("4", data_x, data_y, axis, in_dtype) + data_y = (0.1 + np.random.rand(5, 3, 4).astype(np.float32)) / 1.1 + if paddle.__version__ >= '2.0.0': + elementwise_ops("4", data_x, data_y, in_dtype) + else: + elementwise_ops("4", data_x, data_y, in_dtype, 0) # test for elementwise_floordiv, support int and int64 # paddle1.8 support axis = [0, x_last_dims] # paddle2.x only support axis = -1 floordiv_support_dtype = ['int64', 'int32'] data_x = np.array([-4, 0, -8]) - + data_y = np.array([3, 5, 3]) - axis = -1 for dtype in floordiv_support_dtype: - elementwise_floordiv("elementwise_floordiv_" + dtype + "_1", - data_x.astype(dtype), data_y.astype(dtype), axis, dtype) + elementwise_floordiv("elementwise_floordiv_" + dtype + "_1", + data_x.astype(dtype), data_y.astype(dtype), dtype) data_x = np.random.randint(-10, 10, [2, 5, 3, 4]) data_y = np.random.randint(1, 5, [3, 4]) for dtype in floordiv_support_dtype: - elementwise_floordiv("elementwise_floordiv_" + dtype + "_2", - data_x.astype(dtype), data_y.astype(dtype), axis, dtype) + elementwise_floordiv("elementwise_floordiv_" + dtype + "_2", + data_x.astype(dtype), data_y.astype(dtype), dtype) data_y = np.random.randint(1, 5, [5, 3, 4]) for dtype in floordiv_support_dtype: - elementwise_floordiv("elementwise_floordiv_" + dtype + "_3", - data_x.astype(dtype), data_y.astype(dtype), axis, dtype) + elementwise_floordiv("elementwise_floordiv_" + dtype + "_3", + data_x.astype(dtype), data_y.astype(dtype), dtype) # test for elementwise_mul with bool data type sample_arr = [True, False] - data_x = np.random.choice(sample_arr, size=(2,3,4)) - data_y = np.random.choice(sample_arr, size=(1,3,4)) + data_x = np.random.choice(sample_arr, size=(2, 3, 4)) + data_y = np.random.choice(sample_arr, size=(1, 3, 4)) elementwise_mul_bool("elementwise_mul_bool1", data_x, data_y) + if __name__ == "__main__": main() diff --git a/src/frontends/paddle/tests/test_models/gen_scripts/generate_embedding.py b/src/frontends/paddle/tests/test_models/gen_scripts/generate_embedding.py index 1808a613f5d142..576c08b2b87a1d 100644 --- a/src/frontends/paddle/tests/test_models/gen_scripts/generate_embedding.py +++ b/src/frontends/paddle/tests/test_models/gen_scripts/generate_embedding.py @@ -6,9 +6,10 @@ # for lookup_table_v2 # https://www.paddlepaddle.org.cn/documentation/docs/zh/api/paddle/nn/Embedding_cn.html#embedding # equal to "gather" -# +# import numpy as np import sys +import paddle from save_model import saveModel @@ -21,21 +22,22 @@ def ov_embedding(ids, vocab_embeddings, vocab_size, embedding_dim, padding_idx, from openvino import Core if vocab_embeddings is None: - # - vocab_embeddings = np.zeros((vocab_size, embedding_dim)).astype("float32") + # + vocab_embeddings = np.zeros( + (vocab_size, embedding_dim)).astype("float32") node_ids = opset8.parameter(shape=ids.shape, name='ids', dtype=ids.dtype) node_w = opset8.parameter(shape=vocab_embeddings.shape, name='w', dtype=vocab_embeddings.dtype) if padding_idx == -1: padding_idx += vocab_size - + if padding_idx is not None: ''' mask W ''' masked_embeddings = np.ones(vocab_embeddings.shape, dtype='int64') - masked_embeddings[padding_idx,:] = 0 # mask + masked_embeddings[padding_idx, :] = 0 # mask node_mask = opset8.constant(masked_embeddings, name='mask', dtype=vocab_embeddings.dtype) node_masked_w = opset8.multiply(node_w, node_mask) @@ -56,21 +58,24 @@ def ov_embedding(ids, vocab_embeddings, vocab_size, embedding_dim, padding_idx, return output -def embedding(name : str, ids, vocab_size, embedding_dim, padding_idx=None, sparse=False, vocab_embeddings=None, compare=False): + +def embedding(name: str, ids, vocab_size, embedding_dim, padding_idx=None, sparse=False, vocab_embeddings=None, compare=False): """ padding_idx (int|long|None) """ - import paddle paddle.enable_static() with paddle.static.program_guard(paddle.static.Program(), paddle.static.Program()): - node_ids = paddle.static.data(name = 'Ids', shape = ids.shape, dtype = ids.dtype) + node_ids = paddle.static.data( + name='Ids', shape=ids.shape, dtype=ids.dtype) pretrained_attr = paddle.ParamAttr(name='W', - initializer=paddle.nn.initializer.Assign(vocab_embeddings), - trainable=False) if vocab_embeddings is not None else None + initializer=paddle.nn.initializer.Assign( + vocab_embeddings), + trainable=False) if vocab_embeddings is not None else None - node_embedding = paddle.nn.Embedding(num_embeddings=vocab_size, embedding_dim=embedding_dim, padding_idx=padding_idx, sparse=sparse, weight_attr=pretrained_attr, name=name) + node_embedding = paddle.nn.Embedding(num_embeddings=vocab_size, embedding_dim=embedding_dim, + padding_idx=padding_idx, sparse=sparse, weight_attr=pretrained_attr, name=name) node_out = node_embedding(node_ids) cpu = paddle.static.cpu_places(1) @@ -78,15 +83,16 @@ def embedding(name : str, ids, vocab_size, embedding_dim, padding_idx=None, spar # startup program will call initializer to initialize the parameters. exe.run(paddle.static.default_startup_program()) - + input_dict = {'Ids': ids} output_vars_list = [node_out] infer_results = exe.run( - feed=input_dict, - fetch_list=output_vars_list ) + feed=input_dict, + fetch_list=output_vars_list) - saveModel(name, exe, feedkeys=[node_ids], fetchlist=output_vars_list, inputs=list(input_dict.values()), outputs=infer_results, target_dir=sys.argv[1], use_static_api=True) + saveModel(name, exe, feedkeys=[node_ids], fetchlist=output_vars_list, inputs=list( + input_dict.values()), outputs=infer_results, target_dir=sys.argv[1], use_static_api=True) # outputs = dict() @@ -98,30 +104,29 @@ def embedding(name : str, ids, vocab_size, embedding_dim, padding_idx=None, spar ng_result = ov_embedding(ids, vocab_embeddings, vocab_size, embedding_dim, padding_idx, sparse) ng_result = list(ng_result.values())[0] paddle_result = list(outputs.values())[0] - + match = np.all(np.isclose( paddle_result, ng_result, rtol=1e-4, atol=1e-5)) prefix_color = '\n\033[92m' if match else '\n\033[91m' - print(prefix_color + - 'TestCase {} Result {} '.format(name, match) + '\033[0m\n') + print(prefix_color + + 'TestCase {} Result {} '.format(name, match) + '\033[0m\n') if not match: np.set_printoptions(precision=2) np.set_printoptions(suppress=True) - print(prefix_color + - 'paddle_result: {}'.format(paddle_result) + '\033[0m\n') - print(prefix_color + - 'ng_result: {}'.format(ng_result) + '\033[0m\n') + print(prefix_color + + 'paddle_result: {}'.format(paddle_result) + '\033[0m\n') + print(prefix_color + + 'ng_result: {}'.format(ng_result) + '\033[0m\n') raise ValueError(name + ': OV result does not match paddle!') return outputs - + if __name__ == "__main__": - import paddle.compat as cpt vocab_size = 17 embedding_dim = 31 @@ -129,42 +134,55 @@ def embedding(name : str, ids, vocab_size, embedding_dim, padding_idx=None, spar # ids = np.random.randint(0, vocab_size, 4).astype("int32") - embedding("embedding_0", ids, vocab_size, embedding_dim, vocab_embeddings=table, compare=False) + embedding("embedding_0", ids, vocab_size, embedding_dim, + vocab_embeddings=table, compare=False) # ids = np.random.randint(0, vocab_size, 4).astype("int32") - embedding("embedding_sparse", ids, vocab_size, embedding_dim, sparse=True, vocab_embeddings=table, compare=False) + embedding("embedding_sparse", ids, vocab_size, embedding_dim, + sparse=True, vocab_embeddings=table, compare=False) # # compare fail ids = np.random.randint(0, vocab_size, 4).astype("int32") - embedding("embedding_none_weight", ids, vocab_size, embedding_dim, compare=False) + embedding("embedding_none_weight", ids, + vocab_size, embedding_dim, compare=False) # ids = np.random.randint(0, vocab_size, 4).astype("int32") ids = np.squeeze(ids) padding_idx = np.random.choice(ids, 1)[0] # print('padding_idx {}, ids {}'.format(padding_idx, ids)) - outputs = embedding("embedding_paddings", ids, vocab_size, embedding_dim, padding_idx=int(padding_idx), vocab_embeddings=table, compare=False) + outputs = embedding("embedding_paddings", ids, vocab_size, embedding_dim, padding_idx=int( + padding_idx), vocab_embeddings=table, compare=False) # print('outputs {}'.format(outputs)) # corner case ids = np.random.randint(0, vocab_size, 4).astype("int32") - pick = np.random.choice(4, 1)[0] # pick randomly to be max vacab_size -1 - ids[pick] = vocab_size-1 + pick = np.random.choice(4, 1)[0] # pick randomly to be max vacab_size -1 + ids[pick] = vocab_size - 1 padding_idx = -1 # print('padding_idx {}, ids {}'.format(padding_idx, ids)) - outputs = embedding("embedding_paddings_neg1", ids, vocab_size, embedding_dim, padding_idx=int(padding_idx), vocab_embeddings=table, compare=False) - # print('outputs {}'.format(outputs)) + outputs = embedding("embedding_paddings_neg1", ids, vocab_size, embedding_dim, + padding_idx=int(padding_idx), vocab_embeddings=table, compare=False) + # print('outputs {}'.format(outputs)) # - ids = np.random.randint(low=0, high=vocab_size, size=(2, 4, 5)).astype("int32") - embedding("embedding_tensorIds", ids, vocab_size, embedding_dim, vocab_embeddings=table, compare=False) - + ids = np.random.randint(low=0, high=vocab_size, + size=(2, 4, 5)).astype("int32") + embedding("embedding_tensorIds", ids, vocab_size, + embedding_dim, vocab_embeddings=table, compare=False) + # - ids = np.random.randint(low=0, high=vocab_size, size=(2, 4, 5)).astype("int32") + ids = np.random.randint(low=0, high=vocab_size, + size=(2, 4, 5)).astype("int32") flatten_idx = ids.flatten() padding_idx = np.random.choice(flatten_idx, 1)[0] # print('padding_idx {}'.format(padding_idx)) - outputs = embedding("embedding_tensorIds_paddings", ids, vocab_size, embedding_dim, padding_idx=cpt.long_type(padding_idx), vocab_embeddings=table, compare=False) - # print('outputs {}'.format(outputs)) - + + if paddle.__version__ >= '2.0.0': + outputs = embedding("embedding_tensorIds_paddings", ids, vocab_size, embedding_dim, + padding_idx=np.compat.long(padding_idx), vocab_embeddings=table, compare=False) + else: + import paddle.compat as cpt + outputs = embedding("embedding_tensorIds_paddings", ids, vocab_size, embedding_dim, + padding_idx=cpt.long_type(padding_idx), vocab_embeddings=table, compare=False) diff --git a/src/frontends/paddle/tests/test_models/gen_scripts/generate_exp.py b/src/frontends/paddle/tests/test_models/gen_scripts/generate_exp.py index 344351c9bc790f..e1bc08053b3be8 100644 --- a/src/frontends/paddle/tests/test_models/gen_scripts/generate_exp.py +++ b/src/frontends/paddle/tests/test_models/gen_scripts/generate_exp.py @@ -4,6 +4,8 @@ # # exp paddle model generator # + +import paddle import numpy as np from save_model import saveModel import sys @@ -15,7 +17,10 @@ def exp(name: str, x): with paddle.static.program_guard(paddle.static.Program(), paddle.static.Program()): node_x = paddle.static.data(name='x', shape=x.shape, dtype=x.dtype) - out = paddle.fluid.layers.exp(x=node_x) + if paddle.__version__ >= '2.0.0': + out = paddle.exp(x=node_x) + else: + out = paddle.fluid.layers.exp(x=node_x) cpu = paddle.static.cpu_places(1) exe = paddle.static.Executor(cpu[0]) # startup program will call initializer to initialize the parameters. diff --git a/src/frontends/paddle/tests/test_models/gen_scripts/generate_fill_constant.py b/src/frontends/paddle/tests/test_models/gen_scripts/generate_fill_constant.py deleted file mode 100644 index f1b2f250dd0345..00000000000000 --- a/src/frontends/paddle/tests/test_models/gen_scripts/generate_fill_constant.py +++ /dev/null @@ -1,100 +0,0 @@ -# Copyright (C) 2018-2023 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -# -# fill_const paddle model generator -# -import numpy as np -from save_model import saveModel -import paddle -import sys - - -def fill_constant(name : str, shape : list, dtype, value): - paddle.enable_static() - with paddle.static.program_guard(paddle.static.Program(), paddle.static.Program()): - x1 = paddle.fluid.layers.fill_constant(shape=shape, value=value, dtype=dtype, name='fill_constant') - x2 = paddle.fluid.layers.fill_constant(shape=shape, value=value, dtype=dtype, name='fill_constant') - out = paddle.add(paddle.cast(x1, np.float32), paddle.cast(x2, np.float32)) - cpu = paddle.static.cpu_places(1) - exe = paddle.static.Executor(cpu[0]) - # startup program will call initializer to initialize the parameters. - exe.run(paddle.static.default_startup_program()) - - outs = exe.run( - fetch_list=[out]) - - saveModel(name, exe, feedkeys=[], fetchlist=[out], inputs=[], outputs=[outs[0]], target_dir=sys.argv[1]) - - return outs[0] - - -def fill_constant_tensor(name : str, shape : list, dtype, value): - paddle.enable_static() - with paddle.static.program_guard(paddle.static.Program(), paddle.static.Program()): - node_value = paddle.static.data(name='value', shape=[1], dtype=dtype) - x1 = paddle.fluid.layers.fill_constant(shape=shape, value=node_value, dtype=dtype, name='fill_constant1') - out = paddle.cast(x1, np.float32) - cpu = paddle.static.cpu_places(1) - exe = paddle.static.Executor(cpu[0]) - # startup program will call initializer to initialize the parameters. - exe.run(paddle.static.default_startup_program()) - - outs = exe.run( - feed={"value": value}, - fetch_list=[out]) - - saveModel(name, exe, feedkeys=["value"], fetchlist=[out], inputs=[np.array([value]).astype(dtype)], outputs=[outs[0]], target_dir=sys.argv[1]) - - return outs[0] - - -def fill_constant_shape_tensor(name : str, shape, dtype, value): - paddle.enable_static() - with paddle.static.program_guard(paddle.static.Program(), paddle.static.Program()): - node_shape = paddle.fluid.layers.fill_constant(shape=[2], value=shape, dtype='int32', name='shape') - x1 = paddle.fluid.layers.fill_constant(shape=node_shape, value=value, dtype=dtype, name='fill_constant') - out = paddle.cast(x1, np.float32) - cpu = paddle.static.cpu_places(1) - exe = paddle.static.Executor(cpu[0]) - # startup program will call initializer to initialize the parameters. - exe.run(paddle.static.default_startup_program()) - - outs = exe.run( - fetch_list=[out]) - - saveModel(name, exe, feedkeys=[], fetchlist=[out], inputs=[], outputs=[outs[0]], target_dir=sys.argv[1]) - - return outs[0] - - -def fill_constant_shape_tensor_list(name : str, shape: list, dtype, value): - paddle.enable_static() - with paddle.static.program_guard(paddle.static.Program(), paddle.static.Program()): - node_shape = paddle.fluid.layers.fill_constant(shape=[1], value=shape, dtype='int32', name='shape') - x1 = paddle.fluid.layers.fill_constant(shape=[2, node_shape], value=value, dtype=dtype, name='fill_constant') - out = paddle.cast(x1, np.float32) - cpu = paddle.static.cpu_places(1) - exe = paddle.static.Executor(cpu[0]) - # startup program will call initializer to initialize the parameters. - exe.run(paddle.static.default_startup_program()) - - outs = exe.run( - fetch_list=[out]) - - saveModel(name, exe, feedkeys=[], fetchlist=[out], inputs=[], outputs=[outs[0]], target_dir=sys.argv[1]) - - return outs[0] - - -def main(): - fill_constant("fill_constant", [2, 3, 4], 'float32', 0.03) - fill_constant("fill_constant_int32", [2, 3, 4], "int32", 2) - fill_constant("fill_constant_int64", [2, 3, 4], "int64", 4) - fill_constant_tensor("fill_constant_tensor", [2, 3, 4], 'float32', 0.05) - fill_constant_shape_tensor("fill_constant_shape_tensor", 2, 'float32', 0.05) - fill_constant_shape_tensor_list("fill_constant_shape_tensor_list", 2, 'float32', 0.05) - - -if __name__ == "__main__": - main() diff --git a/src/frontends/paddle/tests/test_models/gen_scripts/generate_fill_constant_batch_size_like.py b/src/frontends/paddle/tests/test_models/gen_scripts/generate_fill_constant_batch_size_like.py deleted file mode 100644 index 7701eba8121395..00000000000000 --- a/src/frontends/paddle/tests/test_models/gen_scripts/generate_fill_constant_batch_size_like.py +++ /dev/null @@ -1,42 +0,0 @@ -# Copyright (C) 2018-2023 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -# -# fill_constant_batch_size_like paddle model generator -# -import numpy as np -from save_model import saveModel -import paddle -import sys - -data_type = 'float32' - -def fill_constant_batch_size_like(name : str, x, shape, dtype, value, input_dim_idx=0, output_dim_idx=0): - paddle.enable_static() - - with paddle.static.program_guard(paddle.static.Program(), paddle.static.Program()): - like = paddle.static.data(name='x', shape=x.shape, dtype = data_type) - out = paddle.fluid.layers.fill_constant_batch_size_like(input=like, shape=shape, \ - value=value, dtype=dtype, \ - output_dim_idx=output_dim_idx, input_dim_idx=input_dim_idx) - - cpu = paddle.static.cpu_places(1) - exe = paddle.static.Executor(cpu[0]) - # startup program will call initializer to initialize the parameters. - exe.run(paddle.static.default_startup_program()) - - outs = exe.run( - feed={'x': x}, - fetch_list=[out]) - - saveModel(name, exe, feedkeys=['x'], fetchlist=[out], inputs=[x], outputs=[outs[0]], target_dir=sys.argv[1]) - - return outs[0] - -def main(): - x = np.random.rand(4, 3, 2).astype(data_type) - fill_constant_batch_size_like("fill_constant_batch_size_like", \ - x, [1, -1, 3], data_type, 0.03, 2, 1) - -if __name__ == "__main__": - main() diff --git a/src/frontends/paddle/tests/test_models/gen_scripts/generate_full.py b/src/frontends/paddle/tests/test_models/gen_scripts/generate_full.py new file mode 100644 index 00000000000000..2bbcbaf7836e12 --- /dev/null +++ b/src/frontends/paddle/tests/test_models/gen_scripts/generate_full.py @@ -0,0 +1,118 @@ +# Copyright (C) 2018-2023 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +# +# fill_const paddle model generator +# +import numpy as np +from save_model import saveModel +import paddle +import sys + + +def full(name : str, shape : list, dtype, value): + paddle.enable_static() + with paddle.static.program_guard(paddle.static.Program(), paddle.static.Program()): + if paddle.__version__ >= '2.0.0': + x1 = paddle.full(shape=shape, fill_value=value, dtype=dtype, name='fill') + x2 = paddle.full(shape=shape, fill_value=value, dtype=dtype, name='fill') + else: + x1 = paddle.fluid.layers.fill_constant(shape=shape, value=value, dtype=dtype, name='fill_constant') + x2 = paddle.fluid.layers.fill_constant(shape=shape, value=value, dtype=dtype, name='fill_constant') + out = paddle.add(paddle.cast(x1, np.float32), paddle.cast(x2, np.float32)) + cpu = paddle.static.cpu_places(1) + exe = paddle.static.Executor(cpu[0]) + # startup program will call initializer to initialize the parameters. + exe.run(paddle.static.default_startup_program()) + + outs = exe.run( + fetch_list=[out]) + + saveModel(name, exe, feedkeys=[], fetchlist=[out], inputs=[], outputs=[outs[0]], target_dir=sys.argv[1]) + + return outs[0] + + +def full_tensor(name : str, shape : list, dtype, value): + paddle.enable_static() + with paddle.static.program_guard(paddle.static.Program(), paddle.static.Program()): + if paddle.__version__ >= '2.5.1': + node_value = paddle.static.data(name='value', shape=[], dtype=dtype) + x1 = paddle.full(shape=shape, fill_value=node_value, dtype=dtype, name='full1') + elif paddle.__version__ >= '2.0.0': + node_value = paddle.static.data(name='value', shape=[1], dtype=dtype) + x1 = paddle.full(shape=shape, fill_value=node_value, dtype=dtype, name='full1') + else: + node_value = paddle.static.data(name='value', shape=[1], dtype=dtype) + x1 = paddle.fluid.layers.fill_constant(shape=shape, value=node_value, dtype=dtype, name='fill_constant1') + out = paddle.cast(x1, np.float32) + cpu = paddle.static.cpu_places(1) + exe = paddle.static.Executor(cpu[0]) + # startup program will call initializer to initialize the parameters. + exe.run(paddle.static.default_startup_program()) + + outs = exe.run( + feed={"value": value}, + fetch_list=[out]) + + if paddle.__version__ >= '2.5.1': + saveModel(name, exe, feedkeys=["value"], fetchlist=[out], inputs=[np.array(value).astype(dtype)], outputs=[outs[0]], target_dir=sys.argv[1]) + else: + saveModel(name, exe, feedkeys=["value"], fetchlist=[out], inputs=[np.array([value]).astype(dtype)], outputs=[outs[0]], target_dir=sys.argv[1]) + + return outs[0] + + +def full_shape_tensor(name : str, shape, dtype, value): + paddle.enable_static() + with paddle.static.program_guard(paddle.static.Program(), paddle.static.Program()): + if paddle.__version__ >= '2.0.0': + node_shape = paddle.full(shape=[2], fill_value=shape, dtype='int32', name='shape') + x1 = paddle.full(shape=node_shape, fill_value=value, dtype=dtype, name='full') + else: + node_shape = paddle.fluid.layers.fill_constant(shape=[1], value=shape, dtype='int32', name='shape') + x1 = paddle.fluid.layers.fill_constant(shape=[2, node_shape], value=value, dtype=dtype, name='fill_constant') + out = paddle.cast(x1, np.float32) + cpu = paddle.static.cpu_places(1) + exe = paddle.static.Executor(cpu[0]) + # startup program will call initializer to initialize the parameters. + exe.run(paddle.static.default_startup_program()) + + outs = exe.run( + fetch_list=[out]) + + saveModel(name, exe, feedkeys=[], fetchlist=[out], inputs=[], outputs=[outs[0]], target_dir=sys.argv[1]) + + return outs[0] + + +def full_shape_tensor_list(name : str, shape: list, dtype, value): + paddle.enable_static() + with paddle.static.program_guard(paddle.static.Program(), paddle.static.Program()): + node_shape = paddle.full(shape=[1], fill_value=shape, dtype='int32', name='shape') + x1 = paddle.full(shape=[2, node_shape], fill_value=value, dtype=dtype, name='full') + out = paddle.cast(x1, np.float32) + cpu = paddle.static.cpu_places(1) + exe = paddle.static.Executor(cpu[0]) + # startup program will call initializer to initialize the parameters. + exe.run(paddle.static.default_startup_program()) + + outs = exe.run( + fetch_list=[out]) + + saveModel(name, exe, feedkeys=[], fetchlist=[out], inputs=[], outputs=[outs[0]], target_dir=sys.argv[1]) + + return outs[0] + + +def main(): + full("full", [2, 3, 4], 'float32', 0.03) + full("full_int32", [2, 3, 4], "int32", 2) + full("full_int64", [2, 3, 4], "int64", 4) + full_tensor("full_tensor", [2, 3, 4], 'float32', 0.05) + full_shape_tensor("full_shape_tensor", 2, 'float32', 0.05) + full_shape_tensor_list("full_shape_tensor_list", 2, 'float32', 0.05) + + +if __name__ == "__main__": + main() diff --git a/src/frontends/paddle/tests/test_models/gen_scripts/generate_fill_any_like.py b/src/frontends/paddle/tests/test_models/gen_scripts/generate_full_like.py similarity index 63% rename from src/frontends/paddle/tests/test_models/gen_scripts/generate_fill_any_like.py rename to src/frontends/paddle/tests/test_models/gen_scripts/generate_full_like.py index ab32b20a8e6898..71a2d634ba624a 100644 --- a/src/frontends/paddle/tests/test_models/gen_scripts/generate_fill_any_like.py +++ b/src/frontends/paddle/tests/test_models/gen_scripts/generate_full_like.py @@ -2,7 +2,7 @@ # SPDX-License-Identifier: Apache-2.0 # -# fill_any_like paddle model generator +# full_like paddle model generator # import numpy as np from save_model import saveModel @@ -11,7 +11,7 @@ data_type = 'float32' -def fill_any_like(name:str, x, value, dtype=None): +def full_like(name:str, x, value, dtype=None): paddle.enable_static() with paddle.static.program_guard(paddle.static.Program(), paddle.static.Program()): @@ -35,18 +35,18 @@ def fill_any_like(name:str, x, value, dtype=None): def main(): x = np.random.rand(8, 24, 32).astype(data_type) - fill_any_like("fill_any_like", x, 1.2) - fill_any_like("fill_any_like_f16", x, 1.0, dtype='float16') - fill_any_like("fill_any_like_f32", x, 1.2, dtype='float32') - fill_any_like("fill_any_like_f64", x, 1.2, dtype='float64') - fill_any_like("fill_any_like_i16", x, 3, dtype='int16') - fill_any_like("fill_any_like_i32", x, 2, dtype='int32') - fill_any_like("fill_any_like_i64", x, 10, dtype='int64') - fill_any_like("fill_any_like_bool", x, True, dtype='bool') + full_like("full_like", x, 1.2) + full_like("full_like_f16", x, 1.0, dtype='float16') + full_like("full_like_f32", x, 1.2, dtype='float32') + full_like("full_like_f64", x, 1.2, dtype='float64') + full_like("full_like_i16", x, 3, dtype='int16') + full_like("full_like_i32", x, 2, dtype='int32') + full_like("full_like_i64", x, 10, dtype='int64') + full_like("full_like_bool", x, True, dtype='bool') sample_arr = [True, False] x = np.random.choice(sample_arr, size=(13,17,11)) - fill_any_like("fill_any_like_bool_2", x, False, dtype=None) + full_like("full_like_bool_2", x, False, dtype=None) if __name__ == "__main__": main() diff --git a/src/frontends/paddle/tests/test_models/gen_scripts/generate_gelu.py b/src/frontends/paddle/tests/test_models/gen_scripts/generate_gelu.py index b67959ef0ed439..a17fadc9c10bcb 100644 --- a/src/frontends/paddle/tests/test_models/gen_scripts/generate_gelu.py +++ b/src/frontends/paddle/tests/test_models/gen_scripts/generate_gelu.py @@ -16,7 +16,10 @@ def gelu(name:str, x, approximate=False): with paddle.static.program_guard(paddle.static.Program(), paddle.static.Program()): data = paddle.static.data(name='x', shape=x.shape, dtype = data_type) - out = paddle.fluid.layers.gelu(data, approximate=approximate) + if paddle.__version__ >= '2.0.0': + out = paddle.nn.functional.gelu(data, approximate=approximate) + else: + out = paddle.fluid.layers.gelu(data, approximate=approximate) cpu = paddle.static.cpu_places(1) exe = paddle.static.Executor(cpu[0]) diff --git a/src/frontends/paddle/tests/test_models/gen_scripts/generate_greater_equal.py b/src/frontends/paddle/tests/test_models/gen_scripts/generate_greater_equal.py index c3a82914fc0427..f540c0ea8302ec 100644 --- a/src/frontends/paddle/tests/test_models/gen_scripts/generate_greater_equal.py +++ b/src/frontends/paddle/tests/test_models/gen_scripts/generate_greater_equal.py @@ -16,7 +16,10 @@ def greater_equal(name : str, x, y, data_type, cast_to_fp32=False): with paddle.static.program_guard(paddle.static.Program(), paddle.static.Program()): node_x = paddle.static.data(name='input_x', shape=x.shape, dtype=data_type) node_y = paddle.static.data(name='input_y', shape=y.shape, dtype=data_type) - out = paddle.fluid.layers.greater_equal(x=node_x, y=node_y, name='greater_equal') + if paddle.__version__ >= '2.0.0': + out = paddle.greater_equal(x=node_x, y=node_y, name='greater_equal') + else: + out = paddle.fluid.layers.greater_equal(x=node_x, y=node_y, name='greater_equal') # FuzzyTest framework doesn't support boolean so cast to fp32/int32 if cast_to_fp32: diff --git a/src/frontends/paddle/tests/test_models/gen_scripts/generate_greater_than.py b/src/frontends/paddle/tests/test_models/gen_scripts/generate_greater_than.py index 045f80c341b411..7ca2b6ad95f9a5 100644 --- a/src/frontends/paddle/tests/test_models/gen_scripts/generate_greater_than.py +++ b/src/frontends/paddle/tests/test_models/gen_scripts/generate_greater_than.py @@ -15,8 +15,12 @@ def greater_than(name: str, x, y, data_type, cast_to_fp32=False): name='input_x', shape=x.shape, dtype=data_type) node_y = pdpd.static.data( name='input_y', shape=y.shape, dtype=data_type) - out = pdpd.fluid.layers.greater_than( - x=node_x, y=node_y, name='greater_than') + if pdpd.__version__ >= '2.0.0': + out = pdpd.greater_than( + x=node_x, y=node_y, name='greater_than') + else: + out = pdpd.fluid.layers.greater_than( + x=node_x, y=node_y, name='greater_than') # FuzzyTest framework doesn't support boolean so cast to fp32/int32 if cast_to_fp32: diff --git a/src/frontends/paddle/tests/test_models/gen_scripts/generate_grid_sampler.py b/src/frontends/paddle/tests/test_models/gen_scripts/generate_grid_sampler.py index 4dec54ad32c223..d3c404fb88108b 100644 --- a/src/frontends/paddle/tests/test_models/gen_scripts/generate_grid_sampler.py +++ b/src/frontends/paddle/tests/test_models/gen_scripts/generate_grid_sampler.py @@ -51,6 +51,8 @@ def main(): x = np.random.randn(2, 3, 128, 128).astype(dtype) grid = np.random.uniform(-1, 1, [2, 130, 130, 2]).astype(dtype) padding_mode = "border" + grid_sampler(name='grid_sampler_3', x=x, grid=grid, mode=mode, padding_mode=padding_mode, + align_corners=align_corners) grid_sampler(name='grid_sampler_dyn', x=x, grid=grid, mode=mode, padding_mode=padding_mode, align_corners=align_corners, is_dynamic=True) diff --git a/src/frontends/paddle/tests/test_models/gen_scripts/generate_hard_sigmoid.py b/src/frontends/paddle/tests/test_models/gen_scripts/generate_hard_sigmoid.py index 68c9179421c788..9ccdb623094107 100644 --- a/src/frontends/paddle/tests/test_models/gen_scripts/generate_hard_sigmoid.py +++ b/src/frontends/paddle/tests/test_models/gen_scripts/generate_hard_sigmoid.py @@ -15,7 +15,7 @@ def hard_sigmoid(name: str, x, slope: float = 0.2, offset: float = 0.5, data_typ with paddle.static.program_guard(paddle.static.Program(), paddle.static.Program()): node_x = paddle.static.data(name='x', shape=x.shape, dtype = data_type) - out = paddle.fluid.layers.hard_sigmoid(node_x, slope=slope, offset=offset, name='hard_sigmoid') + out = paddle.nn.functional.hardsigmoid(node_x, slope=slope, offset=offset, name='hard_sigmoid') cpu = paddle.static.cpu_places(1) exe = paddle.static.Executor(cpu[0]) diff --git a/src/frontends/paddle/tests/test_models/gen_scripts/generate_hard_swish.py b/src/frontends/paddle/tests/test_models/gen_scripts/generate_hard_swish.py index 2fdf4a3ee29f66..c86eab1877abe3 100644 --- a/src/frontends/paddle/tests/test_models/gen_scripts/generate_hard_swish.py +++ b/src/frontends/paddle/tests/test_models/gen_scripts/generate_hard_swish.py @@ -15,7 +15,10 @@ def hard_swish(name: str, x, threshold=6.0, scale=6.0, offset=3.0, data_type='fl with paddle.static.program_guard(paddle.static.Program(), paddle.static.Program()): node_x = paddle.static.data(name='x', shape=x.shape, dtype=data_type) - out = paddle.fluid.layers.hard_swish(node_x, threshold=threshold, scale=scale, offset=offset, name='hard_swish') + if paddle.__version__ >= '2.0.0': + out = paddle.nn.functional.hardswish(node_x, name='hard_swish') + else: + out = paddle.fluid.layers.hard_swish(node_x, threshold=threshold, scale=scale, offset=offset, name='hard_swish') cpu = paddle.static.cpu_places(1) exe = paddle.static.Executor(cpu[0]) diff --git a/src/frontends/paddle/tests/test_models/gen_scripts/generate_index_select.py b/src/frontends/paddle/tests/test_models/gen_scripts/generate_index_select.py index f1f2bdcd39e1f8..476471daed2070 100644 --- a/src/frontends/paddle/tests/test_models/gen_scripts/generate_index_select.py +++ b/src/frontends/paddle/tests/test_models/gen_scripts/generate_index_select.py @@ -5,7 +5,6 @@ # index_select paddle model generator # import numpy as np -from paddle.fluid import param_attr from save_model import saveModel import paddle import sys diff --git a/src/frontends/paddle/tests/test_models/gen_scripts/generate_layer_norm.py b/src/frontends/paddle/tests/test_models/gen_scripts/generate_layer_norm.py index 593f019f2fba87..25d90cae8f981b 100644 --- a/src/frontends/paddle/tests/test_models/gen_scripts/generate_layer_norm.py +++ b/src/frontends/paddle/tests/test_models/gen_scripts/generate_layer_norm.py @@ -36,8 +36,12 @@ def layer_norm(name:str, x, begin_norm_axis, scale=True, shift=True, param_attr= def main(): x = np.random.rand(8, 24, 32).astype(data_type) random_data = np.random.rand(24 * 32).astype(data_type) - attr = paddle.ParamAttr( - initializer=paddle.fluid.initializer.NumpyArrayInitializer(random_data)) + if paddle.__version__ >= '2.0.0': + attr = paddle.ParamAttr( + initializer=paddle.nn.initializer.Assign(random_data)) + else: + attr = paddle.ParamAttr( + initializer=paddle.fluid.initializer.NumpyArrayInitializer(random_data)) layer_norm("layer_norm", x, begin_norm_axis=1, param_attr=attr, bias_attr=attr) layer_norm("layer_norm_noscale", x, scale=False, begin_norm_axis=2) layer_norm("layer_norm_noshift", x, shift=False, begin_norm_axis=1) diff --git a/src/frontends/paddle/tests/test_models/gen_scripts/generate_leaky_relu.py b/src/frontends/paddle/tests/test_models/gen_scripts/generate_leaky_relu.py index 580a0a0a7acbb0..71740e7961382e 100644 --- a/src/frontends/paddle/tests/test_models/gen_scripts/generate_leaky_relu.py +++ b/src/frontends/paddle/tests/test_models/gen_scripts/generate_leaky_relu.py @@ -10,12 +10,15 @@ import sys -def leaky_relu(name: str, x, alpha: float = 0.02, data_type='float32'): +def leaky_relu(name: str, x, alpha: float = 0.01, data_type='float32'): paddle.enable_static() with paddle.static.program_guard(paddle.static.Program(), paddle.static.Program()): node_x = paddle.static.data(name='x', shape=x.shape, dtype = data_type) - out = paddle.fluid.layers.leaky_relu(node_x, alpha=alpha, name='leaky_relu') + if paddle.__version__ >= '2.0.0': + out = paddle.nn.functional.leaky_relu(node_x, negative_slope=alpha, name='leaky_relu') + else: + out = paddle.fluid.layers.leaky_relu(node_x, alpha=alpha, name='leaky_relu') cpu = paddle.static.cpu_places(1) exe = paddle.static.Executor(cpu[0]) diff --git a/src/frontends/paddle/tests/test_models/gen_scripts/generate_less_than.py b/src/frontends/paddle/tests/test_models/gen_scripts/generate_less_than.py index 187f920df297c8..91749fffdd3f84 100644 --- a/src/frontends/paddle/tests/test_models/gen_scripts/generate_less_than.py +++ b/src/frontends/paddle/tests/test_models/gen_scripts/generate_less_than.py @@ -3,29 +3,32 @@ # import numpy as np from save_model import saveModel -import paddle as pdpd +import paddle import sys def less_than(name: str, x, y, data_type, cast_to_fp32=False): - pdpd.enable_static() + paddle.enable_static() - with pdpd.static.program_guard(pdpd.static.Program(), pdpd.static.Program()): - node_x = pdpd.static.data( + with paddle.static.program_guard(paddle.static.Program(), paddle.static.Program()): + node_x = paddle.static.data( name='input_x', shape=x.shape, dtype=data_type) - node_y = pdpd.static.data( + node_y = paddle.static.data( name='input_y', shape=y.shape, dtype=data_type) - out = pdpd.fluid.layers.less_than(x=node_x, y=node_y, name='less_than') + if paddle.__version__ >= '2.0.0': + out = paddle.less_than(x=node_x, y=node_y, name='less_than') + else: + out = paddle.fluid.layers.less_than(x=node_x, y=node_y, name='less_than') # FuzzyTest framework doesn't support boolean so cast to fp32/int32 if cast_to_fp32: data_type = "float32" - out = pdpd.cast(out, data_type) - cpu = pdpd.static.cpu_places(1) - exe = pdpd.static.Executor(cpu[0]) + out = paddle.cast(out, data_type) + cpu = paddle.static.cpu_places(1) + exe = paddle.static.Executor(cpu[0]) # startup program will call initializer to initialize the parameters. - exe.run(pdpd.static.default_startup_program()) + exe.run(paddle.static.default_startup_program()) outs = exe.run( feed={'input_x': x, 'input_y': y}, diff --git a/src/frontends/paddle/tests/test_models/gen_scripts/generate_log.py b/src/frontends/paddle/tests/test_models/gen_scripts/generate_log.py index 7cd9196172d551..1fd2cbde20a3f5 100644 --- a/src/frontends/paddle/tests/test_models/gen_scripts/generate_log.py +++ b/src/frontends/paddle/tests/test_models/gen_scripts/generate_log.py @@ -15,7 +15,10 @@ def log(name: str, x, data_type='float32'): with paddle.static.program_guard(paddle.static.Program(), paddle.static.Program()): node_x = paddle.static.data(name='x', shape=x.shape, dtype=data_type) - out = paddle.fluid.layers.log(node_x, name='log') + if paddle.__version__ >= '2.0.0': + out = paddle.log(node_x, name='log') + else: + out = paddle.fluid.layers.log(node_x, name='log') cpu = paddle.static.cpu_places(1) exe = paddle.static.Executor(cpu[0]) diff --git a/src/frontends/paddle/tests/test_models/gen_scripts/generate_loop.py b/src/frontends/paddle/tests/test_models/gen_scripts/generate_loop.py index 2cb59b42e45320..72185ba530cb3d 100644 --- a/src/frontends/paddle/tests/test_models/gen_scripts/generate_loop.py +++ b/src/frontends/paddle/tests/test_models/gen_scripts/generate_loop.py @@ -23,7 +23,10 @@ def body(i, dummy): with paddle.static.program_guard(paddle.static.Program(), paddle.static.Program()): node_x = paddle.static.data(name='x', shape=x.shape, dtype=x.dtype) node_i = paddle.full(shape=[1], fill_value=0, dtype='int64', name='i') - node_i = paddle.fluid.layers.nn.elementwise_add(node_i, node_x) + if paddle.__version__ >= '2.0.0': + node_i = paddle.add(node_i, node_x) + else: + node_i = paddle.fluid.layers.nn.elementwise_add(node_i, node_x) node_ten = paddle.full(shape=[1], fill_value=10, dtype='int64', name='ten') out, dummy = paddle.static.nn.while_loop(cond, body, [node_i, node_ten], name='while_loop') @@ -49,7 +52,10 @@ def body(i, t): with paddle.static.program_guard(paddle.static.Program(), paddle.static.Program()): node_x = paddle.static.data(name='x', shape=x.shape, dtype=x.dtype) node_i = paddle.full(shape=[1], fill_value=0, dtype='int64', name='i') - node_i = paddle.fluid.layers.nn.elementwise_add(node_i, node_x) + if paddle.__version__ >= '2.0.0': + node_i = paddle.add(node_i, node_x) + else: + node_i = paddle.fluid.layers.nn.elementwise_add(node_i, node_x) node_ten = paddle.full(shape=[1], fill_value=10, dtype='int64', name='ten') out, dummy = paddle.static.nn.while_loop(cond, body, [node_i, node_ten], name='while_loop') @@ -76,7 +82,10 @@ def body(i, t): with paddle.static.program_guard(paddle.static.Program(), paddle.static.Program()): node_x = paddle.static.data(name='x', shape=x.shape, dtype=x.dtype) node_i = paddle.full(shape=[1], fill_value=0, dtype='int64', name='i') - node_i = paddle.fluid.layers.nn.elementwise_add(node_i, node_x) + if paddle.__version__ >= '2.0.0': + node_i = paddle.add(node_i, node_x) + else: + paddle.fluid.layers.nn.elementwise_add(node_i, node_x) node_ten = paddle.full(shape=[1], fill_value=10, dtype='int64', name='ten') out_i,out_t = paddle.static.nn.while_loop(cond, body, [node_i, node_ten], name='while_loop') diff --git a/src/frontends/paddle/tests/test_models/gen_scripts/generate_mul.py b/src/frontends/paddle/tests/test_models/gen_scripts/generate_mul.py index dbc8c818d2f8c4..f198178ae4a2c8 100644 --- a/src/frontends/paddle/tests/test_models/gen_scripts/generate_mul.py +++ b/src/frontends/paddle/tests/test_models/gen_scripts/generate_mul.py @@ -12,7 +12,10 @@ def paddle_matmul(name, x1, x2, x_transpose=False, y_transpose=False): with paddle.static.program_guard(paddle.static.Program(), paddle.static.Program()): node_x1 = paddle.static.data(name='x1', shape=x1.shape, dtype=x1.dtype) node_x2 = paddle.static.data(name='x2', shape=x2.shape, dtype=x2.dtype) - mul_node = paddle.fluid.layers.matmul(node_x1, node_x2, x_transpose, y_transpose) + if paddle.__version__ >= '2.0.0': + mul_node = paddle.matmul(node_x1, node_x2, x_transpose, y_transpose) + else: + mul_node = paddle.fluid.layers.matmul(node_x1, node_x2, x_transpose, y_transpose) result = paddle.static.nn.batch_norm(mul_node, use_global_stats=True) cpu = paddle.static.cpu_places(1) diff --git a/src/frontends/paddle/tests/test_models/gen_scripts/generate_multi_tensor_split.py b/src/frontends/paddle/tests/test_models/gen_scripts/generate_multi_tensor_split.py index 8ec25d91c40f17..b98c4a4325810e 100644 --- a/src/frontends/paddle/tests/test_models/gen_scripts/generate_multi_tensor_split.py +++ b/src/frontends/paddle/tests/test_models/gen_scripts/generate_multi_tensor_split.py @@ -15,12 +15,19 @@ def create_multi_output_model(): num_splits = 20 inp_blob_1 = np.random.randn(2, num_splits, 4, 4).astype(np.float32) - x = fluid.data(name='x', shape=[2, num_splits, 4, 4], dtype='float32') - test_layer = fluid.layers.split(x, num_or_sections=num_splits, dim=1) - + if paddle.__version__ >= '2.0.0': + x = paddle.static.data(name='x', shape=[2, num_splits, 4, 4], dtype='float32') + test_layer = paddle.split(x, num_or_sections=num_splits, axis=1) + else: + x = fluid.data(name='x', shape=[2, num_splits, 4, 4], dtype='float32') + test_layer = paddle.split(x, num_or_sections=num_splits, axis=1) + var = [] for i in range(num_splits//2): - add = fluid.layers.elementwise_add(test_layer[2*i], test_layer[2*i+1]) + if paddle.__version__ >= '2.0.0': + add = paddle.add(test_layer[2*i], test_layer[2*i+1]) + else: + add = fluid.layers.elementwise_add(test_layer[2*i], test_layer[2*i+1]) var.append(add) exe = fluid.Executor(fluid.CPUPlace()) @@ -28,7 +35,7 @@ def create_multi_output_model(): inp_dict = {'x': inp_blob_1} res_paddle = exe.run(fluid.default_main_program(), fetch_list=var, feed=inp_dict) - fluid.io.save_inference_model(os.path.join(sys.argv[1], "multi_tensor_split"), + paddle.fluid.io.save_inference_model(os.path.join(sys.argv[1], "multi_tensor_split"), list(inp_dict.keys()), var, exe, model_filename="multi_tensor_split.pdmodel", params_filename="multi_tensor_split.pdiparams") diff --git a/src/frontends/paddle/tests/test_models/gen_scripts/generate_place_test_model.py b/src/frontends/paddle/tests/test_models/gen_scripts/generate_place_test_model.py index da4a7f901d2aef..a7e92ca264a6e8 100644 --- a/src/frontends/paddle/tests/test_models/gen_scripts/generate_place_test_model.py +++ b/src/frontends/paddle/tests/test_models/gen_scripts/generate_place_test_model.py @@ -1,10 +1,11 @@ # Copyright (C) 2018-2023 Intel Corporation # SPDX-License-Identifier: Apache-2.0 +import paddle import numpy as np from save_model import saveModel import sys - +import os def paddle_rnn_lstm(input_size, hidden_size, layers, direction): import paddle @@ -34,11 +35,40 @@ def paddle_rnn_lstm(input_size, hidden_size, layers, direction): feed={'x': np.ones([4, 3, input_size]).astype(np.float32)}, fetch_list=[y, h, c], program=main_program) - saveModel("place_test_model", exe, feedkeys=[data], - fetchlist=[y, h, c, relu_1, relu_2, relu_3], - inputs=[np.ones([4, 3, input_size]).astype(np.float32)], - outputs=[outs[0], outs[1], outs[2]], target_dir=sys.argv[1], - use_static_api=True) + + if paddle.__version__ >= '2.0.0': + feed_vars = [data] + fetch_vars = [y, h, c, relu_1, relu_2, relu_3] + saveModel("place_test_model", exe, feedkeys=feed_vars, + fetchlist=fetch_vars, + inputs=[np.ones([4, 3, input_size]).astype(np.float32)], + outputs=[outs[0], outs[1], outs[2]], target_dir=sys.argv[1], + use_static_api=True) + path_prefix = os.path.join(sys.argv[1], "place_test_model", "place_test_model") + program, feed_target_names, fetch_targets = paddle.static.io.load_inference_model(path_prefix, exe) + + from paddle.fluid import core + condition = lambda v : not v.persistable and v.name != "transpose_1.tmp_1" and v.name != "transpose_0.tmp_1" + vars_ = list(filter(condition, program.list_vars())) + vars_name = [v.name for v in vars_] + vars_name_file = os.path.join(sys.argv[1], "place_test_model", "vars_name.txt") + with open(vars_name_file, 'w') as f: + for name in vars_name: + f.writelines(f"{name}\n") + + fetch_targets_name = [ft.name for ft in fetch_targets] + outputs_name_file = os.path.join(sys.argv[1], "place_test_model", "outputs_name.txt") + with open(outputs_name_file, 'w') as f: + for name in fetch_targets_name: + f.writelines(f"{name}\n") + + else: + saveModel("place_test_model", exe, feedkeys=[data], + fetchlist=[y, h, c, relu_1, relu_2, relu_3], + inputs=[np.ones([4, 3, input_size]).astype(np.float32)], + outputs=[outs[0], outs[1], outs[2]], target_dir=sys.argv[1], + use_static_api=True) + return outs[0] diff --git a/src/frontends/paddle/tests/test_models/gen_scripts/generate_pool2d.py b/src/frontends/paddle/tests/test_models/gen_scripts/generate_pool2d.py index afec1e06243691..45aa60862d1c79 100644 --- a/src/frontends/paddle/tests/test_models/gen_scripts/generate_pool2d.py +++ b/src/frontends/paddle/tests/test_models/gen_scripts/generate_pool2d.py @@ -8,70 +8,102 @@ import sys from save_model import saveModel -data_type = 'float32' +data_type = "float32" -def pool2d(name : str, x, attrs : dict): + +def pool2d(name: str, x, attrs: dict): import paddle + paddle.enable_static() with paddle.static.program_guard(paddle.static.Program(), paddle.static.Program()): - node_x = paddle.static.data(name='x', shape=x.shape, dtype=data_type) - out = paddle.fluid.layers.pool2d(node_x, - pool_size=attrs['pool_size'], - pool_type=attrs['pool_type'], - pool_stride=attrs['pool_stride'], - pool_padding=attrs['pool_padding'], - global_pooling=attrs['global_pooling'], - ceil_mode=attrs['ceil_mode'], - exclusive=attrs['exclusive'], - data_format=attrs['data_format']) + node_x = paddle.static.data(name="x", shape=x.shape, dtype=data_type) + + if attrs["pool_type"] == "max": + out = paddle.nn.functional.max_pool2d( + node_x, + kernel_size=attrs["pool_size"], + stride=attrs["pool_stride"], + padding=attrs["pool_padding"], + ceil_mode=attrs["ceil_mode"], + data_format=attrs["data_format"] + ) + else: + out = paddle.nn.functional.avg_pool2d( + node_x, + kernel_size=attrs["pool_size"], + stride=attrs["pool_stride"], + padding=attrs["pool_padding"], + ceil_mode=attrs["ceil_mode"], + data_format=attrs["data_format"] + ) cpu = paddle.static.cpu_places(1) exe = paddle.static.Executor(cpu[0]) # startup program will call initializer to initialize the parameters. exe.run(paddle.static.default_startup_program()) - outs = exe.run( - feed={'x': x}, - fetch_list=[out]) + outs = exe.run(feed={"x": x}, fetch_list=[out]) - saveModel(name, exe, feedkeys=['x'], fetchlist=[out], inputs=[x], outputs=[outs[0]], target_dir=sys.argv[1]) + saveModel( + name, + exe, + feedkeys=["x"], + fetchlist=[out], + inputs=[x], + outputs=[outs[0]], + target_dir=sys.argv[1], + ) return outs[0] -def adaptive_pool2d(name : str, x, attrs : dict): + +def adaptive_pool2d(name: str, x, attrs: dict): import paddle + paddle.enable_static() with paddle.static.program_guard(paddle.static.Program(), paddle.static.Program()): - node_x = paddle.static.data(name='x', shape=x.shape, dtype=data_type) - out = paddle.fluid.layers.adaptive_pool2d( - input=node_x, - pool_size=attrs['pool_size'], - pool_type=attrs['pool_type'], - require_index=attrs['require_index']) + node_x = paddle.static.data(name="x", shape=x.shape, dtype=data_type) + if attrs["pool_type"] == "max": + out = paddle.nn.functional.adaptive_max_pool2d( + x=node_x, + output_size=attrs["pool_size"] + ) + else: + out = paddle.nn.functional.adaptive_avg_pool2d( + x=node_x, + output_size=attrs["pool_size"] + ) cpu = paddle.static.cpu_places(1) exe = paddle.static.Executor(cpu[0]) # startup program will call initializer to initialize the parameters. exe.run(paddle.static.default_startup_program()) - outs = exe.run( - feed={'x': x}, - fetch_list=[out]) + outs = exe.run(feed={"x": x}, fetch_list=[out]) - saveModel(name, exe, feedkeys=['x'], fetchlist=[out], inputs=[x], outputs=[outs[0]], target_dir=sys.argv[1]) + saveModel( + name, + exe, + feedkeys=["x"], + fetchlist=[out], + inputs=[x], + outputs=[outs[0]], + target_dir=sys.argv[1], + ) return outs[0] + def main(): N, C, H, W = 2, 3, 4, 4 - data = np.arange(N*C*H*W).astype(data_type) + data = np.arange(N * C * H * W).astype(data_type) data_NCHW = data.reshape(N, C, H, W) data_NHWC = data.reshape(N, H, W, C) - #print(data_NCHW, data_NCHW.shape) + # print(data_NCHW, data_NCHW.shape) - pooling_types = ['max', 'avg'] + pooling_types = ["max", "avg"] # pool2d for i, pooling_type in enumerate(pooling_types): @@ -79,185 +111,193 @@ def main(): # ceil_mode = False paddle_attrs = { # input=data_NCHW, # shape: [2, 3, 8, 8] - 'pool_size' : [3,3], - 'pool_type' : pooling_type, - 'pool_stride' : [3,3], - 'pool_padding' : [2,1], # it is same as pool_padding = [2,2,1,1] - 'global_pooling' : False, - 'ceil_mode' : False, - 'exclusive' : True, - 'data_format' : "NCHW" + "pool_size": [3, 3], + "pool_type": pooling_type, + "pool_stride": [3, 3], + "pool_padding": [2, 1], # it is same as pool_padding = [2,2,1,1] + "global_pooling": False, + "ceil_mode": False, + "exclusive": True, + "data_format": "NCHW", } # shape of out_1: [2, 3, 4, 3] - pool2d(pooling_type+'Pool_test1', data_NCHW, paddle_attrs) + pool2d(pooling_type + "Pool_test1", data_NCHW, paddle_attrs) # Cecilia: there is a bug of PaddlePaddle in this case. # example 2: # ceil_mode = True (different from example 1) paddle_attrs = { - #input=data_NCHW, - 'pool_size':[3,3], - 'pool_type' : pooling_type, - 'pool_stride' : [3,3], - 'pool_padding':[[0,0], [0,0], [2,2], [1,1]], # it is same as pool_padding = [2,2,1,1] - 'global_pooling':False, - 'ceil_mode':True, - 'exclusive':True, - 'data_format':"NCHW" + # input=data_NCHW, + "pool_size": [3, 3], + "pool_type": pooling_type, + "pool_stride": [3, 3], + "pool_padding": [ + [0, 0], + [0, 0], + [2, 2], + [1, 1], + ], # it is same as pool_padding = [2,2,1,1] + "global_pooling": False, + "ceil_mode": True, + "exclusive": True, + "data_format": "NCHW", } # shape of out_2: [2, 3, 4, 4] which is different from out_1 - pool2d(pooling_type+'Pool_test2', data_NCHW, paddle_attrs) + pool2d(pooling_type + "Pool_test2", data_NCHW, paddle_attrs) # example 3: # pool_padding = "SAME" (different from example 1) paddle_attrs = { - #input=data_NCHW, - 'pool_size':[3,3], - 'pool_type' : pooling_type, - 'pool_stride' : [3,3], - 'pool_padding':"SAME", - 'global_pooling':False, - 'ceil_mode':False, - 'exclusive':True, - 'data_format':"NCHW" + # input=data_NCHW, + "pool_size": [3, 3], + "pool_type": pooling_type, + "pool_stride": [3, 3], + "pool_padding": "SAME", + "global_pooling": False, + "ceil_mode": False, + "exclusive": True, + "data_format": "NCHW", } # shape of out_3: [2, 3, 3, 3] which is different from out_1 - pool2d(pooling_type+'Pool_test3', data_NCHW, paddle_attrs) + pool2d(pooling_type + "Pool_test3", data_NCHW, paddle_attrs) # example 4: # pool_padding = "VALID" (different from example 1) paddle_attrs = { - #input=data_NCHW, - 'pool_size':[3,3], - 'pool_type' : pooling_type, - 'pool_stride' : [3,3], - 'pool_padding':"VALID", - 'global_pooling':False, - 'ceil_mode':False, - 'exclusive':True, - 'data_format':"NCHW" + # input=data_NCHW, + "pool_size": [3, 3], + "pool_type": pooling_type, + "pool_stride": [3, 3], + "pool_padding": "VALID", + "global_pooling": False, + "ceil_mode": False, + "exclusive": True, + "data_format": "NCHW", } # shape of out_4: [2, 3, 2, 2] which is different from out_1 - pool2d(pooling_type+'Pool_test4', data_NCHW, paddle_attrs) + pool2d(pooling_type + "Pool_test4", data_NCHW, paddle_attrs) # example 5: # global_pooling = True (different from example 1) # It will be set pool_size = [8,8] and pool_padding = [0,0] actually. paddle_attrs = { - #input=data_NCHW, - 'pool_size':[3,3], - 'pool_type' : pooling_type, - 'pool_stride' : [3,3], - 'pool_padding':[2,1], - 'global_pooling':True, - 'ceil_mode':False, - 'exclusive':True, - 'data_format':"NCHW" + # input=data_NCHW, + "pool_size": [3, 3], + "pool_type": pooling_type, + "pool_stride": [3, 3], + "pool_padding": [2, 1], + "global_pooling": True, + "ceil_mode": False, + "exclusive": True, + "data_format": "NCHW", } # shape of out_5: [2, 3, 1, 1] which is different from out_1 - pool2d(pooling_type+'Pool_test5', data_NCHW, paddle_attrs) + pool2d(pooling_type + "Pool_test5", data_NCHW, paddle_attrs) # example 6: # data_format = "NHWC" (different from example 1) paddle_attrs = { - #input=data_NHWC, # shape: [2, 8, 8, 3] - 'pool_size':[3,3], - 'pool_type' : pooling_type, - 'pool_stride' : [3,3], - 'pool_padding':[2,1], - 'global_pooling':False, - 'ceil_mode':False, - 'exclusive':True, - 'data_format':"NHWC" + # input=data_NHWC, # shape: [2, 8, 8, 3] + "pool_size": [3, 3], + "pool_type": pooling_type, + "pool_stride": [3, 3], + "pool_padding": [2, 1], + "global_pooling": False, + "ceil_mode": False, + "exclusive": True, + "data_format": "NHWC", } # shape of out_6: [2, 4, 3, 3] which is different from out_1 - pool2d(pooling_type+'Pool_test6', data_NHWC, paddle_attrs) + pool2d(pooling_type + "Pool_test6", data_NHWC, paddle_attrs) # example 7: # pool_size is [9, 9] paddle_attrs = { - #input=data_NCHW, - 'pool_size':[9,9], - 'pool_type' : pooling_type, - 'pool_stride' : [3,3], - 'pool_padding':[[0,0], [0,0], [2,2], [1,1]], # it is same as pool_padding = [2,2,1,1] - 'global_pooling':False, - 'ceil_mode':True, - 'exclusive':True, - 'data_format':"NCHW" + # input=data_NCHW, + "pool_size": [9, 9], + "pool_type": pooling_type, + "pool_stride": [3, 3], + "pool_padding": [ + [0, 0], + [0, 0], + [2, 2], + [1, 1], + ], # it is same as pool_padding = [2,2,1,1] + "global_pooling": False, + "ceil_mode": True, + "exclusive": True, + "data_format": "NCHW", } - pool2d(pooling_type+'Pool_test7', data_NCHW, paddle_attrs) + pool2d(pooling_type + "Pool_test7", data_NCHW, paddle_attrs) # example 8: # pool_padding size is 1 paddle_attrs = { - 'pool_size':[3,3], - 'pool_type' : pooling_type, - 'pool_stride' : [3,3], - 'pool_padding':2, - 'global_pooling':False, - 'ceil_mode':False, - 'exclusive':True, - 'data_format':"NCHW" + "pool_size": [3, 3], + "pool_type": pooling_type, + "pool_stride": [3, 3], + "pool_padding": 2, + "global_pooling": False, + "ceil_mode": False, + "exclusive": True, + "data_format": "NCHW", } - pool2d(pooling_type+'Pool_test8', data_NCHW, paddle_attrs) + pool2d(pooling_type + "Pool_test8", data_NCHW, paddle_attrs) - #input data for test9 and test10 + # input data for test9 and test10 N_data1, C_data1, H_data1, W_data1 = 2, 3, 8, 8 - data1 = np.arange(N_data1*C_data1*H_data1*W_data1).astype(data_type) + data1 = np.arange(N_data1 * C_data1 * H_data1 * W_data1).astype(data_type) data1_NCHW = data1.reshape(N_data1, C_data1, H_data1, W_data1) # example 9: # pool_padding size is 4: [pad_height_top, pad_height_bottom, pad_width_left, pad_width_right] paddle_attrs = { - 'pool_size':[3,3], - 'pool_type' : pooling_type, - 'pool_stride' : [3,3], - 'pool_padding':[2, 1, 2, 1], - 'global_pooling':False, - 'ceil_mode':False, - 'exclusive':True, - 'data_format':"NCHW" + "pool_size": [3, 3], + "pool_type": pooling_type, + "pool_stride": [3, 3], + "pool_padding": [2, 1, 2, 1], + "global_pooling": False, + "ceil_mode": False, + "exclusive": True, + "data_format": "NCHW", } - pool2d(pooling_type+'Pool_test9', data1_NCHW, paddle_attrs) + pool2d(pooling_type + "Pool_test9", data1_NCHW, paddle_attrs) # example 10: # input=data_NCHW and pool_padding is [[0,0], [0,0], [pad_height_top, pad_height_bottom], [pad_width_left, pad_width_right]] paddle_attrs = { - 'pool_size':[3,3], - 'pool_type' : pooling_type, - 'pool_stride' : [3,3], - 'pool_padding':[[0,0], [0,0], [2, 1], [2, 1]], - 'global_pooling':False, - 'ceil_mode':False, - 'exclusive':True, - 'data_format':"NCHW" + "pool_size": [3, 3], + "pool_type": pooling_type, + "pool_stride": [3, 3], + "pool_padding": [[0, 0], [0, 0], [2, 1], [2, 1]], + "global_pooling": False, + "ceil_mode": False, + "exclusive": True, + "data_format": "NCHW", } - pool2d(pooling_type+'Pool_test10', data1_NCHW, paddle_attrs) + pool2d(pooling_type + "Pool_test10", data1_NCHW, paddle_attrs) # example 11: # input=data_NCHW and poolsize is the multiply by width & height. pool_padding is [[0,0], [0,0], [pad_height_top, pad_height_bottom], [pad_width_left, pad_width_right]] paddle_attrs = { - 'pool_size': 9, - 'pool_type' : pooling_type, - 'pool_stride' : [3,3], - 'pool_padding':[[0,0], [0,0], [2, 1], [2, 1]], - 'global_pooling':False, - 'ceil_mode':False, - 'exclusive':True, - 'data_format':"NCHW" + "pool_size": 9, + "pool_type": pooling_type, + "pool_stride": [3, 3], + "pool_padding": [[0, 0], [0, 0], [2, 1], [2, 1]], + "global_pooling": False, + "ceil_mode": False, + "exclusive": True, + "data_format": "NCHW", } - pool2d(pooling_type+'Pool_test11', data1_NCHW, paddle_attrs) - + pool2d(pooling_type + "Pool_test11", data1_NCHW, paddle_attrs) # adaptive_pool2d for i, pooling_type in enumerate(pooling_types): paddle_attrs = { - 'pool_size': [3,3], - 'pool_type': pooling_type, - 'require_index': False + "pool_size": [3, 3], + "pool_type": pooling_type } - adaptive_pool2d(pooling_type+'AdaptivePool2D_test1', data_NCHW, paddle_attrs) + adaptive_pool2d(pooling_type + "AdaptivePool2D_test1", data_NCHW, paddle_attrs) if __name__ == "__main__": - main() + main() diff --git a/src/frontends/paddle/tests/test_models/gen_scripts/generate_pow.py b/src/frontends/paddle/tests/test_models/gen_scripts/generate_pow.py index c845fac78d3337..c762c8a6506f92 100644 --- a/src/frontends/paddle/tests/test_models/gen_scripts/generate_pow.py +++ b/src/frontends/paddle/tests/test_models/gen_scripts/generate_pow.py @@ -15,7 +15,11 @@ def paddle_pow(name : str, x, y, data_type): with paddle.static.program_guard(paddle.static.Program(), paddle.static.Program()): node_x = paddle.static.data(name='x', shape=x.shape, dtype=data_type) - out = paddle.fluid.layers.pow(node_x, y, name='pow') + if paddle.__version__ >= '2.0.0': + y = paddle.to_tensor(y, dtype=data_type) + out = paddle.pow(node_x, y, name='pow') + else: + out = paddle.fluid.layers.pow(node_x, y, name='pow') #FuzzyTest supports int32 & float32 if data_type == "int64": out = paddle.cast(out, "float32") @@ -40,7 +44,10 @@ def paddle_pow_tensor(name : str, x, y, data_type): with paddle.static.program_guard(paddle.static.Program(), paddle.static.Program()): node_x = paddle.static.data(name='x', shape=x.shape, dtype=data_type) node_y = paddle.static.data(name='y', shape=y.shape, dtype=data_type) - out = paddle.fluid.layers.pow(node_x, node_y, name='pow') + if paddle.__version__ >= '2.0.0': + out = paddle.pow(node_x, node_y, name='pow') + else: + out = paddle.fluid.layers.pow(node_x, node_y, name='pow') out = paddle.cast(out, "float32") cpu = paddle.static.cpu_places(1) diff --git a/src/frontends/paddle/tests/test_models/gen_scripts/generate_prior_box.py b/src/frontends/paddle/tests/test_models/gen_scripts/generate_prior_box.py index 12088009701c0f..b0f1c6b12aacbb 100644 --- a/src/frontends/paddle/tests/test_models/gen_scripts/generate_prior_box.py +++ b/src/frontends/paddle/tests/test_models/gen_scripts/generate_prior_box.py @@ -19,18 +19,32 @@ def prior_box(name: str, input_data, image_data, attrs: dict): Image = paddle.static.data( name='Image', shape=image_data.shape, dtype=image_data.dtype) - box, var = paddle.fluid.layers.prior_box(Input, - Image, - min_sizes=attrs['min_sizes'], - max_sizes=attrs['max_sizes'], - aspect_ratios=attrs['aspect_ratios'], - variance=attrs['variance'], - flip=attrs['flip'], - clip=attrs['clip'], - steps=attrs['steps'], - offset=attrs['offset'], - name=None, - min_max_aspect_ratios_order=attrs['min_max_aspect_ratios_order']) + if paddle.__version__ >= '2.0.0': + box, var = paddle.vision.ops.prior_box(Input, + Image, + min_sizes=attrs['min_sizes'], + max_sizes=attrs['max_sizes'], + aspect_ratios=attrs['aspect_ratios'], + variance=attrs['variance'], + flip=attrs['flip'], + clip=attrs['clip'], + steps=attrs['steps'], + offset=attrs['offset'], + name=None, + min_max_aspect_ratios_order=attrs['min_max_aspect_ratios_order']) + else: + box, var = paddle.fluid.layers.prior_box(Input, + Image, + min_sizes=attrs['min_sizes'], + max_sizes=attrs['max_sizes'], + aspect_ratios=attrs['aspect_ratios'], + variance=attrs['variance'], + flip=attrs['flip'], + clip=attrs['clip'], + steps=attrs['steps'], + offset=attrs['offset'], + name=None, + min_max_aspect_ratios_order=attrs['min_max_aspect_ratios_order']) cpu = paddle.static.cpu_places(1) exe = paddle.static.Executor(cpu[0]) diff --git a/src/frontends/paddle/tests/test_models/gen_scripts/generate_range.py b/src/frontends/paddle/tests/test_models/gen_scripts/generate_range.py index 0b1dde6c1fa8b2..dee96b6874fe27 100644 --- a/src/frontends/paddle/tests/test_models/gen_scripts/generate_range.py +++ b/src/frontends/paddle/tests/test_models/gen_scripts/generate_range.py @@ -16,7 +16,10 @@ def paddle_range(name : str, x, start, end, step, out_type): with paddle.static.program_guard(paddle.static.Program(), paddle.static.Program()): node_x = paddle.static.data(name='x', shape=x.shape, dtype='float32') # Range op only support fill_constant input, since dynamic op is not supported in ov - out = paddle.fluid.layers.range(start, end, step, out_type) + if paddle.__version__ >= '2.0.0': + out = paddle.arange(start, end, step, out_type) + else: + out = paddle.fluid.layers.range(start, end, step, out_type) out = paddle.cast(out, np.float32) out = paddle.add(node_x, out) #out = paddle.cast(out, np.float32) diff --git a/src/frontends/paddle/tests/test_models/gen_scripts/generate_reduce_all.py b/src/frontends/paddle/tests/test_models/gen_scripts/generate_reduce_all.py index c24f9fd3eaef74..6f11265a760bfd 100644 --- a/src/frontends/paddle/tests/test_models/gen_scripts/generate_reduce_all.py +++ b/src/frontends/paddle/tests/test_models/gen_scripts/generate_reduce_all.py @@ -16,7 +16,10 @@ def reduce_all(name : str, x, axis=None, keepdim=False): with paddle.static.program_guard(paddle.static.Program(), paddle.static.Program()): data_x = paddle.static.data(name='x', shape=x.shape, dtype=x.dtype) - reduced = paddle.fluid.layers.reduce_all(data_x, dim=axis, keep_dim=keepdim) + if paddle.__version__ >= '2.0.0': + reduced = paddle.all(data_x, axis=axis, keepdim=keepdim) + else: + reduced = paddle.fluid.layers.reduce_all(data_x, dim=axis, keep_dim=keepdim) out = paddle.cast(reduced, 'int32') cpu = paddle.static.cpu_places(1) diff --git a/src/frontends/paddle/tests/test_models/gen_scripts/generate_relu6.py b/src/frontends/paddle/tests/test_models/gen_scripts/generate_relu6.py index 2c79c0332a7698..0af6c04ea2d5af 100644 --- a/src/frontends/paddle/tests/test_models/gen_scripts/generate_relu6.py +++ b/src/frontends/paddle/tests/test_models/gen_scripts/generate_relu6.py @@ -15,8 +15,11 @@ def relu6(name: str, x, threshold: float = 6.0, data_type='float32'): with paddle.static.program_guard(paddle.static.Program(), paddle.static.Program()): node_x = paddle.static.data(name='x', shape=x.shape, dtype=data_type) - out = paddle.fluid.layers.relu6(node_x, threshold=threshold, name='relu6') - + + if paddle.__version__ >= '2.0.0': + out = paddle.nn.functional.relu6(node_x, name='relu6') + else: + out = paddle.fluid.layers.relu6(node_x, name='relu6') cpu = paddle.static.cpu_places(1) exe = paddle.static.Executor(cpu[0]) # startup program will call initializer to initialize the parameters. diff --git a/src/frontends/paddle/tests/test_models/gen_scripts/generate_reshape.py b/src/frontends/paddle/tests/test_models/gen_scripts/generate_reshape.py index 289c2128a0d972..24fbc3f3af351a 100644 --- a/src/frontends/paddle/tests/test_models/gen_scripts/generate_reshape.py +++ b/src/frontends/paddle/tests/test_models/gen_scripts/generate_reshape.py @@ -17,7 +17,10 @@ def reshape(name : str, x, out_shape): with paddle.static.program_guard(paddle.static.Program(), paddle.static.Program()): node_x = paddle.static.data(name='x', shape=x.shape, dtype=data_type) - out = paddle.fluid.layers.reshape(x=node_x, shape=out_shape) + if paddle.__version__ >= '2.0.0': + out = paddle.reshape(x=node_x, shape=out_shape) + else: + out = paddle.fluid.layers.reshape(x=node_x, shape=out_shape) cpu = paddle.static.cpu_places(1) exe = paddle.static.Executor(cpu[0]) @@ -42,11 +45,14 @@ def reshape_tensor(name : str, x, out_shape, use_tensor_in_list): node_x = paddle.static.data(name='x', shape=x.shape, dtype=data_type) if use_tensor_in_list: out_shape[0] = paddle.assign(np.array((out_shape[0],)).astype('int32')) - out = paddle.fluid.layers.reshape(x=node_x, shape=out_shape) + if paddle.__version__ >= '2.0.0': + out = paddle.reshape(x=node_x, shape=out_shape) + else: + out = paddle.fluid.layers.reshape(x=node_x, shape=out_shape) else: out_shape = np.array(out_shape).astype('int32') node_shape = paddle.assign(out_shape) - out = paddle.fluid.layers.reshape(x=node_x, shape=node_shape) + out = paddle.reshape(x=node_x, shape=node_shape) out = paddle.pow(out, 1) cpu = paddle.static.cpu_places(1) diff --git a/src/frontends/paddle/tests/test_models/gen_scripts/generate_reverse.py b/src/frontends/paddle/tests/test_models/gen_scripts/generate_reverse.py deleted file mode 100644 index e993057df8389d..00000000000000 --- a/src/frontends/paddle/tests/test_models/gen_scripts/generate_reverse.py +++ /dev/null @@ -1,61 +0,0 @@ -# Copyright (C) 2018-2023 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -# -# reverse paddle model generator -# -import numpy as np -from save_model import saveModel -import sys -import paddle - -def reverse(name: str, x, axis, use_static=True, dtype="float32"): - paddle.enable_static() - - with paddle.static.program_guard(paddle.static.Program(), paddle.static.Program()): - if use_static: - node_x = paddle.static.data(name='x', shape=x.shape, dtype=dtype) - else: - node_x = paddle.fluid.data(name='x', shape=[1, 1, -1, -1], dtype=dtype) - out = paddle.fluid.layers.reverse(node_x, axis) - - cpu = paddle.static.cpu_places(1) - exe = paddle.static.Executor(cpu[0]) - # startup program will call initializer to initialize the parameters. - exe.run(paddle.static.default_startup_program()) - - outs = exe.run( - feed={'x': x}, - fetch_list=[out]) - - saveModel(name, exe, feedkeys=['x'], fetchlist=[out], inputs=[x], outputs=[outs[0]], target_dir=sys.argv[1]) - - return outs[0] - -def main(): - data1 = np.array([0,2], dtype='int64') - reverse("reverse_static_1", data1, 0, True, 'int64') - - data2 = np.array([[0,1,2], [3,4,5], [6,7,8]], dtype='float32') - reverse("reverse_static_2", data2, 1, True, 'float32') - - data3 = np.array([[0,1,2], [3,4,5], [6,7,8]], dtype='float32') - reverse("reverse_static_3", data3, [0, 1], True, 'float32') - - data4 = np.array([[0,1,2], [3,4,5], [6,7,8]], dtype='int32') - reverse("reverse_static_4", data4, -1, True, 'int32') - - data5 = np.random.randn(1, 1, 32, 32).astype('int32') - reverse("reverse_dynamic_1", data5, [2], False, dtype='int32') - - data6 = np.random.randn(1, 1, 64, 64).astype('float32') - reverse("reverse_dynamic_2", data6, [3], False, dtype='float32') - - data7 = np.random.randn(1, 1, 112, 112).astype('float32') - reverse("reverse_dynamic_3", data7, [2,3], False, dtype='float32') - - data8 = np.random.randn(1, 1, 224, 224).astype('int32') - reverse("reverse_dynamic_4", data8, [-2, -1], False, dtype='int32') - -if __name__ == "__main__": - main() diff --git a/src/frontends/paddle/tests/test_models/gen_scripts/generate_scale.py b/src/frontends/paddle/tests/test_models/gen_scripts/generate_scale.py index 2cea7f87e15a12..cacd230574e1d5 100644 --- a/src/frontends/paddle/tests/test_models/gen_scripts/generate_scale.py +++ b/src/frontends/paddle/tests/test_models/gen_scripts/generate_scale.py @@ -34,12 +34,15 @@ def paddle_scale(name : str, x, scale, bias, attrs : dict, data_type): def paddle_scale_tensor(name : str, x, scale, bias, attrs : dict, data_type): - import paddle as paddle + import paddle paddle.enable_static() with paddle.static.program_guard(paddle.static.Program(), paddle.static.Program()): node_x = paddle.static.data(name='x', shape=x.shape, dtype=data_type) - node_scale = paddle.static.data(name='scale', shape=[1], dtype='float32') + if paddle.__version__ >= '2.5.1': + node_scale = paddle.static.data(name='scale', shape=[], dtype='float32') + else: + node_scale = paddle.static.data(name='scale', shape=[1], dtype='float32') out = paddle.scale(x=node_x, scale=node_scale, bias=bias, bias_after_scale=attrs['bias_after_scale']) #FuzzyTest only support FP32 now, so cast result to fp32 @@ -53,7 +56,10 @@ def paddle_scale_tensor(name : str, x, scale, bias, attrs : dict, data_type): feed={'x': x, 'scale': scale}, fetch_list=[out]) - saveModel(name, exe, feedkeys=['x', 'scale'], fetchlist=[out], inputs=[x, np.array([scale]).astype('float32')], outputs=[outs[0]], target_dir=sys.argv[1]) + if paddle.__version__ >= '2.5.1': + saveModel(name, exe, feedkeys=['x', 'scale'], fetchlist=[out], inputs=[x, np.array(scale).astype('float32')], outputs=[outs[0]], target_dir=sys.argv[1]) + else: + saveModel(name, exe, feedkeys=['x', 'scale'], fetchlist=[out], inputs=[x, np.array([scale]).astype('float32')], outputs=[outs[0]], target_dir=sys.argv[1]) return outs[0] diff --git a/src/frontends/paddle/tests/test_models/gen_scripts/generate_set_value.py b/src/frontends/paddle/tests/test_models/gen_scripts/generate_set_value.py index a3266684a7dcfa..876eb03825bbf1 100644 --- a/src/frontends/paddle/tests/test_models/gen_scripts/generate_set_value.py +++ b/src/frontends/paddle/tests/test_models/gen_scripts/generate_set_value.py @@ -167,4 +167,4 @@ def set_value7(x, value, *slice): paddle_set_value("set_value_dynamic2", data, value, set_value7, dtype, starts, ends, steps, is_dynamic=True) if __name__ == "__main__": - main() + main() \ No newline at end of file diff --git a/src/frontends/paddle/tests/test_models/gen_scripts/generate_sigmoid.py b/src/frontends/paddle/tests/test_models/gen_scripts/generate_sigmoid.py index 7b6bc715e6a54f..09c8327f277350 100644 --- a/src/frontends/paddle/tests/test_models/gen_scripts/generate_sigmoid.py +++ b/src/frontends/paddle/tests/test_models/gen_scripts/generate_sigmoid.py @@ -15,7 +15,10 @@ def sigmoid(name: str, x, data_type): with paddle.static.program_guard(paddle.static.Program(), paddle.static.Program()): node_x = paddle.static.data(name='x', shape=x.shape, dtype=data_type) - out = paddle.fluid.layers.sigmoid(node_x, name='sigmoid') + if paddle.__version__ >= '2.0.0': + out = paddle.nn.functional.sigmoid(node_x, name='sigmoid') + else: + out = paddle.fluid.layers.sigmoid(node_x, name='sigmoid') cpu = paddle.static.cpu_places(1) exe = paddle.static.Executor(cpu[0]) diff --git a/src/frontends/paddle/tests/test_models/gen_scripts/generate_split.py b/src/frontends/paddle/tests/test_models/gen_scripts/generate_split.py index 639ab5b51d14c1..d92d286ec6c57b 100644 --- a/src/frontends/paddle/tests/test_models/gen_scripts/generate_split.py +++ b/src/frontends/paddle/tests/test_models/gen_scripts/generate_split.py @@ -15,8 +15,11 @@ def split(name : str, x, attrs : dict): with paddle.static.program_guard(paddle.static.Program(), paddle.static.Program()): node_x = paddle.static.data(name='x', shape=x.shape, dtype=x.dtype) - out = paddle.fluid.layers.split(node_x, num_or_sections=attrs['num_or_sections'], dim=attrs['axis']) - + + if paddle.__version__ >= '2.0.0': + out = paddle.split(node_x, num_or_sections=attrs['num_or_sections'], axis=attrs['axis']) + else: + out = paddle.fluid.layers.split(node_x, num_or_sections=attrs['num_or_sections'], dim=attrs['axis']) cpu = paddle.static.cpu_places(1) exe = paddle.static.Executor(cpu[0]) # startup program will call initializer to initialize the parameters. @@ -40,7 +43,10 @@ def split_dim_tensor(name : str, x, attrs : dict, dim): with paddle.static.program_guard(paddle.static.Program(), paddle.static.Program()): node_x = paddle.static.data(name='x', shape=x.shape, dtype=x.dtype) dim_node = paddle.assign(dim) - out = paddle.fluid.layers.split(node_x, num_or_sections=attrs['num_or_sections'], dim=dim_node) + if paddle.__version__ >= '2.0.0': + out = paddle.split(node_x, num_or_sections=attrs['num_or_sections'], axis=dim_node) + else: + out = paddle.fluid.layers.split(node_x, num_or_sections=attrs['num_or_sections'], dim=dim_node) cpu = paddle.static.cpu_places(1) exe = paddle.static.Executor(cpu[0]) @@ -66,8 +72,10 @@ def split_test_list_tensor(name : str, x, attrs : dict): node_x = paddle.static.data(name='x', shape=x.shape, dtype=x.dtype) section = attrs['num_or_sections'] section[0] = paddle.assign(np.array((section[0],)).astype('int32')) - out = paddle.fluid.layers.split(node_x, num_or_sections=section, dim=attrs['axis']) - + if paddle.__version__ >= '2.0.0': + out = paddle.split(node_x, num_or_sections=section, axis=attrs['axis']) + else: + out = paddle.fluid.layers.split(node_x, num_or_sections=section, dim=attrs['axis']) cpu = paddle.static.cpu_places(1) exe = paddle.static.Executor(cpu[0]) # startup program will call initializer to initialize the parameters. diff --git a/src/frontends/paddle/tests/test_models/gen_scripts/generate_squeeze.py b/src/frontends/paddle/tests/test_models/gen_scripts/generate_squeeze.py index 08f99ddc3dcdd3..855646d795372d 100644 --- a/src/frontends/paddle/tests/test_models/gen_scripts/generate_squeeze.py +++ b/src/frontends/paddle/tests/test_models/gen_scripts/generate_squeeze.py @@ -16,8 +16,10 @@ def squeeze(name : str, x, axes : list): with paddle.static.program_guard(paddle.static.Program(), paddle.static.Program()): node_x = paddle.static.data(name='x', shape=x.shape, dtype = data_type) - out = paddle.fluid.layers.squeeze(node_x, axes=axes, name='squeeze') - + if paddle.__version__ >= '2.0.0': + out = paddle.squeeze(node_x, axis=axes, name='squeeze') + else: + out = paddle.fluid.layers.squeeze(node_x, axes=axes, name='squeeze') cpu = paddle.static.cpu_places(1) exe = paddle.static.Executor(cpu[0]) # startup program will call initializer to initialize the parameters. diff --git a/src/frontends/paddle/tests/test_models/gen_scripts/generate_strided_slice.py b/src/frontends/paddle/tests/test_models/gen_scripts/generate_strided_slice.py index b9517b6add48c4..d651da8dfb0774 100644 --- a/src/frontends/paddle/tests/test_models/gen_scripts/generate_strided_slice.py +++ b/src/frontends/paddle/tests/test_models/gen_scripts/generate_strided_slice.py @@ -11,124 +11,145 @@ def strided_slice(name: str, input_data, attrs: dict): import paddle + paddle.enable_static() with paddle.static.program_guard(paddle.static.Program(), paddle.static.Program()): Input = paddle.static.data( - name='x', shape=input_data.shape, dtype=input_data.dtype) - - out = paddle.fluid.layers.strided_slice(Input, axes=attrs['axes'], - starts=attrs['starts'], - ends=attrs['ends'], - strides=attrs['strides']) + name="x", shape=input_data.shape, dtype=input_data.dtype + ) + + if paddle.__version__ >= '2.0.0': + out = paddle.strided_slice( + Input, + axes=attrs["axes"], + starts=attrs["starts"], + ends=attrs["ends"], + strides=attrs["strides"], + ) + else: + out = paddle.fluid.layers.strided_slice(Input, axes=attrs['axes'], + starts=attrs['starts'], + ends=attrs['ends'], + strides=attrs['strides']) cpu = paddle.static.cpu_places(1) exe = paddle.static.Executor(cpu[0]) # startup program will call initializer to initialize the parameters. exe.run(paddle.static.default_startup_program()) - outs = exe.run( - feed={'x': input_data}, - fetch_list=[out]) + outs = exe.run(feed={"x": input_data}, fetch_list=[out]) # Save inputs in order of OpenVINO model, to facilite Fuzzy test, # which accepts inputs and outputs in this order as well. - saveModel(name, exe, feedkeys=['x'], fetchlist=[out], - inputs=[input_data], outputs=[outs[0]], target_dir=sys.argv[1]) + saveModel( + name, + exe, + feedkeys=["x"], + fetchlist=[out], + inputs=[input_data], + outputs=[outs[0]], + target_dir=sys.argv[1], + ) return outs if __name__ == "__main__": - strided_slice_input1_1 = { - 'name': "strided_slice_input1_1", - 'axes': np.array([0]).astype('int32').tolist(), - 'starts': np.array([-4]).astype('int32').tolist(), - 'ends': np.array([-3]).astype('int32').tolist(), - 'strides': np.array([1]).astype('int32').tolist() + "name": "strided_slice_input1_1", + "axes": np.array([0]).astype("int32").tolist(), + "starts": np.array([-4]).astype("int32").tolist(), + "ends": np.array([-3]).astype("int32").tolist(), + "strides": np.array([1]).astype("int32").tolist(), } strided_slice_input1_2 = { - 'name': "strided_slice_input1_2", - 'axes': np.array([0]).astype('int32').tolist(), - 'starts': np.array([3]).astype('int32').tolist(), - 'ends': np.array([8]).astype('int32').tolist(), - 'strides': np.array([1]).astype('int32').tolist() + "name": "strided_slice_input1_2", + "axes": np.array([0]).astype("int32").tolist(), + "starts": np.array([3]).astype("int32").tolist(), + "ends": np.array([8]).astype("int32").tolist(), + "strides": np.array([1]).astype("int32").tolist(), } strided_slice_input1_3 = { - 'name': "strided_slice_input1_3", - 'axes': np.array([0]).astype('int32').tolist(), - 'starts': np.array([5]).astype('int32').tolist(), - 'ends': np.array([0]).astype('int32').tolist(), - 'strides': np.array([-1]).astype('int32').tolist() + "name": "strided_slice_input1_3", + "axes": np.array([0]).astype("int32").tolist(), + "starts": np.array([5]).astype("int32").tolist(), + "ends": np.array([0]).astype("int32").tolist(), + "strides": np.array([-1]).astype("int32").tolist(), } strided_slice_input1_4 = { - 'name': "strided_slice_input1_4", - 'axes': np.array([0]).astype('int32').tolist(), - 'starts': np.array([-1]).astype('int32').tolist(), - 'ends': np.array([-3]).astype('int32').tolist(), - 'strides': np.array([-1]).astype('int32').tolist() + "name": "strided_slice_input1_4", + "axes": np.array([0]).astype("int32").tolist(), + "starts": np.array([-1]).astype("int32").tolist(), + "ends": np.array([-3]).astype("int32").tolist(), + "strides": np.array([-1]).astype("int32").tolist(), } strided_slice_input2_1 = { - 'name': "strided_slice_input2_1", - 'axes': np.array([0, 1, 2]).astype('int32').tolist(), - 'starts': np.array([1, 0, 0]).astype('int32').tolist(), - 'ends': np.array([2, 1, 3]).astype('int32').tolist(), - 'strides': np.array([1, 1, 1]).astype('int32').tolist() + "name": "strided_slice_input2_1", + "axes": np.array([0, 1, 2]).astype("int32").tolist(), + "starts": np.array([1, 0, 0]).astype("int32").tolist(), + "ends": np.array([2, 1, 3]).astype("int32").tolist(), + "strides": np.array([1, 1, 1]).astype("int32").tolist(), } strided_slice_input2_2 = { - 'name': "strided_slice_input2_2", - 'axes': np.array([0, 1, 2]).astype('int32').tolist(), - 'starts': np.array([1, -1, 0]).astype('int32').tolist(), - 'ends': np.array([2, -3, 3]).astype('int32').tolist(), - 'strides': np.array([1, -1, 1]).astype('int32').tolist() + "name": "strided_slice_input2_2", + "axes": np.array([0, 1, 2]).astype("int32").tolist(), + "starts": np.array([1, -1, 0]).astype("int32").tolist(), + "ends": np.array([2, -3, 3]).astype("int32").tolist(), + "strides": np.array([1, -1, 1]).astype("int32").tolist(), } strided_slice_input2_3 = { - 'name': "strided_slice_input2_3", - 'axes': np.array([0, 1, 2]).astype('int32').tolist(), - 'starts': np.array([1, 0, 0]).astype('int32').tolist(), - 'ends': np.array([2, 2, 3]).astype('int32').tolist(), - 'strides': np.array([1, 1, 1]).astype('int32').tolist() + "name": "strided_slice_input2_3", + "axes": np.array([0, 1, 2]).astype("int32").tolist(), + "starts": np.array([1, 0, 0]).astype("int32").tolist(), + "ends": np.array([2, 2, 3]).astype("int32").tolist(), + "strides": np.array([1, 1, 1]).astype("int32").tolist(), } strided_slice_input3_1 = { - 'name': "strided_slice_input3_1", - 'axes': np.array([1]).astype('int32').tolist(), - 'starts': np.array([1]).astype('int32').tolist(), - 'ends': np.array([2]).astype('int32').tolist(), - 'strides': np.array([1]).astype('int32').tolist() + "name": "strided_slice_input3_1", + "axes": np.array([1]).astype("int32").tolist(), + "starts": np.array([1]).astype("int32").tolist(), + "ends": np.array([2]).astype("int32").tolist(), + "strides": np.array([1]).astype("int32").tolist(), } strided_slice_input3_2 = { - 'name': "strided_slice_input3_2", - 'axes': np.array([1]).astype('int32').tolist(), - 'starts': np.array([-1]).astype('int32').tolist(), - 'ends': np.array([-2]).astype('int32').tolist(), - 'strides': np.array([-1]).astype('int32').tolist() + "name": "strided_slice_input3_2", + "axes": np.array([1]).astype("int32").tolist(), + "starts": np.array([-1]).astype("int32").tolist(), + "ends": np.array([-2]).astype("int32").tolist(), + "strides": np.array([-1]).astype("int32").tolist(), } - strided_slice_input1_list = [strided_slice_input1_1, - strided_slice_input1_2, strided_slice_input1_3, strided_slice_input1_4] + strided_slice_input1_list = [ + strided_slice_input1_1, + strided_slice_input1_2, + strided_slice_input1_3, + strided_slice_input1_4, + ] - strided_slice_input2_list = [strided_slice_input2_1, - strided_slice_input2_2, strided_slice_input2_3] + strided_slice_input2_list = [ + strided_slice_input2_1, + strided_slice_input2_2, + strided_slice_input2_3, + ] - strided_slice_input3_list = [ - strided_slice_input3_1, strided_slice_input3_2] + strided_slice_input3_list = [strided_slice_input3_1, strided_slice_input3_2] - input1 = np.random.rand(100).astype('float32') + input1 = np.random.rand(100).astype("float32") for item in strided_slice_input1_list: - pred_paddle = strided_slice(item['name'], input1, item) + pred_paddle = strided_slice(item["name"], input1, item) - input2 = np.random.rand(5, 5, 5).astype('int32') + input2 = np.random.rand(5, 5, 5).astype("int32") for item in strided_slice_input2_list: - pred_paddle = strided_slice(item['name'], input2, item) + pred_paddle = strided_slice(item["name"], input2, item) - input3 = np.random.rand(1, 100, 1).astype('float32') + input3 = np.random.rand(1, 100, 1).astype("float32") for item in strided_slice_input3_list: - pred_paddle = strided_slice(item['name'], input3, item) + pred_paddle = strided_slice(item["name"], input3, item) diff --git a/src/frontends/paddle/tests/test_models/gen_scripts/generate_swish.py b/src/frontends/paddle/tests/test_models/gen_scripts/generate_swish.py index 9d7d68ae0ec8ed..cd83bc22a16652 100644 --- a/src/frontends/paddle/tests/test_models/gen_scripts/generate_swish.py +++ b/src/frontends/paddle/tests/test_models/gen_scripts/generate_swish.py @@ -3,22 +3,25 @@ # import numpy as np from save_model import saveModel -import paddle as pdpd +import paddle import sys -def swish(name: str, x, data_type, input_beta): - pdpd.enable_static() +def swish(name: str, x, data_type): + paddle.enable_static() - with pdpd.static.program_guard(pdpd.static.Program(), pdpd.static.Program()): - node_x = pdpd.static.data( + with paddle.static.program_guard(paddle.static.Program(), paddle.static.Program()): + node_x = paddle.static.data( name='input_x', shape=x.shape, dtype=data_type) - out = pdpd.fluid.layers.swish(x=node_x, beta=input_beta, name='swish') + if paddle.__version__ >= '2.0.0': + out = paddle.nn.functional.swish(x=node_x, name='swish') + else: + out = paddle.fluid.layers.swish(x=node_x, name='swish') - cpu = pdpd.static.cpu_places(1) - exe = pdpd.static.Executor(cpu[0]) + cpu = paddle.static.cpu_places(1) + exe = paddle.static.Executor(cpu[0]) # startup program will call initializer to initialize the parameters. - exe.run(pdpd.static.default_startup_program()) + exe.run(paddle.static.default_startup_program()) outs = exe.run( feed={'input_x': x}, @@ -34,11 +37,11 @@ def main(): data_type = 'float32' input_beta = 1.0 x = np.random.randn(2, 3).astype(data_type) - swish("swish_default_params", x, data_type, input_beta) + swish("swish_default_params", x, data_type) input_beta = 2.0 x = np.random.randn(2, 3).astype(data_type) - swish("swish_beta", x, data_type, input_beta) + swish("swish_beta", x, data_type) if __name__ == "__main__": diff --git a/src/frontends/paddle/tests/test_models/gen_scripts/generate_tile.py b/src/frontends/paddle/tests/test_models/gen_scripts/generate_tile.py index bf9089d5c710fb..84174906b39609 100644 --- a/src/frontends/paddle/tests/test_models/gen_scripts/generate_tile.py +++ b/src/frontends/paddle/tests/test_models/gen_scripts/generate_tile.py @@ -8,7 +8,6 @@ from save_model import saveModel import paddle import sys -from paddle.fluid.layers.tensor import fill_constant def paddle_tile(name: str, x, repeat_times, to_tensor=False, tensor_list=False): @@ -24,7 +23,11 @@ def paddle_tile(name: str, x, repeat_times, to_tensor=False, tensor_list=False): repeat_times_list = [] if tensor_list: for i in repeat_times: - temp_out = fill_constant([1], "int32", i, force_cpu=True) + if paddle.__version__ >= '2.0.0': + temp_out = paddle.full([1], i, "int32").cpu() + else: + temp_out = paddle.fluid.layers.tensor.fill_constant([1], "int32", i, force_cpu=True) + repeat_times_list.append(temp_out) else: repeat_times_list = repeat_times diff --git a/src/frontends/paddle/tests/test_models/gen_scripts/generate_unsqueeze.py b/src/frontends/paddle/tests/test_models/gen_scripts/generate_unsqueeze.py index 1260e2cb2040ca..50ef4693635514 100644 --- a/src/frontends/paddle/tests/test_models/gen_scripts/generate_unsqueeze.py +++ b/src/frontends/paddle/tests/test_models/gen_scripts/generate_unsqueeze.py @@ -16,7 +16,10 @@ def unsqueeze(name : str, x, axes : list): with paddle.static.program_guard(paddle.static.Program(), paddle.static.Program()): node_x = paddle.static.data(name='x', shape=x.shape, dtype = data_type) - out = paddle.fluid.layers.unsqueeze(node_x, axes = axes, name = 'unsqueeze') + if paddle.__version__ >= '2.0.0': + out = paddle.unsqueeze(node_x, axis=axes, name='unsqueeze') + else: + out = paddle.fluid.layers.unsqueeze(node_x, axes=axes, name='unsqueeze') cpu = paddle.static.cpu_places(1) exe = paddle.static.Executor(cpu[0]) diff --git a/src/frontends/paddle/tests/test_models/gen_scripts/generate_where.py b/src/frontends/paddle/tests/test_models/gen_scripts/generate_where.py index 3f6a8d89903917..3dbde8def410d3 100644 --- a/src/frontends/paddle/tests/test_models/gen_scripts/generate_where.py +++ b/src/frontends/paddle/tests/test_models/gen_scripts/generate_where.py @@ -22,7 +22,10 @@ def where(name, test_x, test_y, test_cond): Cond_Node = paddle.static.data( name='cond', shape=test_cond.shape, dtype=test_cond.dtype) - Cond_Node_bl = paddle.fluid.layers.cast(Cond_Node, "bool") + if paddle.__version__ >= '2.0.0': + Cond_Node_bl = paddle.cast(Cond_Node, "bool") + else: + Cond_Node_bl = paddle.fluid.layers.cast(Cond_Node, "bool") out = paddle.where(Cond_Node_bl, X_Node, Y_Node) cpu = paddle.static.cpu_places(1) diff --git a/src/frontends/paddle/tests/test_models/gen_scripts/generate_where_index.py b/src/frontends/paddle/tests/test_models/gen_scripts/generate_where_index.py index 369162cbdba38e..b7c1ee8258f294 100644 --- a/src/frontends/paddle/tests/test_models/gen_scripts/generate_where_index.py +++ b/src/frontends/paddle/tests/test_models/gen_scripts/generate_where_index.py @@ -16,7 +16,10 @@ def where_index(name: str, x, force_boolean=False): with paddle.static.program_guard(paddle.static.Program(), paddle.static.Program()): node_x = paddle.static.data(name='x', shape=x.shape, dtype=x.dtype) if force_boolean: - node_x_bl = paddle.fluid.layers.cast(node_x, "bool") + if paddle.__version__ >= '2.0.0': + node_x_bl = paddle.cast(node_x, "bool") + else: + node_x_bl = paddle.fluid.layers.cast(node_x, "bool") out = paddle.nonzero(node_x_bl) else: out = paddle.nonzero(node_x) diff --git a/src/frontends/paddle/tests/test_models/gen_scripts/nms.py b/src/frontends/paddle/tests/test_models/gen_scripts/nms.py index d4123e239f6f3d..fc74071799814d 100644 --- a/src/frontends/paddle/tests/test_models/gen_scripts/nms.py +++ b/src/frontends/paddle/tests/test_models/gen_scripts/nms.py @@ -9,30 +9,32 @@ import sys from save_model import saveModel, exportModel, print_alike +from paddle.fluid import data_feeder + # bboxes shape (N, M, 4) if shared else (M, C, 4) # scores shape (N, C, M) if shared else (M, C) def NMS(name: str, bboxes, scores, attrs: dict, rois_num=None, verbose=False): - import paddle as pdpd + import paddle from ops import multiclass_nms as multiclass_nms from ops import matrix_nms as matrix_nms - pdpd.enable_static() + paddle.enable_static() - with pdpd.static.program_guard(pdpd.static.Program(), - pdpd.static.Program()): + with paddle.static.program_guard(paddle.static.Program(), + paddle.static.Program()): # make model with inputs of dynamic shape - node_boxes = pdpd.static.data(name='bboxes', + node_boxes = paddle.static.data(name='bboxes', shape=[-1, -1, 4], dtype=bboxes.dtype, lod_level=1) - node_scores = pdpd.static.data(name='scores', + node_scores = paddle.static.data(name='scores', shape=[-1] * len(scores.shape), dtype=scores.dtype, lod_level=1) node_rois_num = None if rois_num is not None: - node_rois_num = pdpd.static.data(name='rois_num', + node_rois_num = paddle.static.data(name='rois_num', shape=rois_num.shape, dtype=rois_num.dtype, lod_level=1) @@ -67,14 +69,14 @@ def NMS(name: str, bboxes, scores, attrs: dict, rois_num=None, verbose=False): output = [] for x in nms_outputs: if x is not None: - if x.dtype==pdpd.int32 or x.dtype==pdpd.int64: - x = pdpd.cast(x, "float32") + if x.dtype==paddle.int32 or x.dtype==paddle.int64: + x = paddle.cast(x, "float32") output.append(x) - cpu = pdpd.static.cpu_places(1) - exe = pdpd.static.Executor(cpu[0]) + cpu = paddle.static.cpu_places(1) + exe = paddle.static.Executor(cpu[0]) # startup program will call initializer to initialize the parameters. - exe.run(pdpd.static.default_startup_program()) + exe.run(paddle.static.default_startup_program()) fetch_vars = [x for x in output if x is not None] feed_dict = {'bboxes': bboxes, 'scores': scores} @@ -92,9 +94,12 @@ def NMS(name: str, bboxes, scores, attrs: dict, rois_num=None, verbose=False): out = np.array(output_lod.pop(0)) nms_rois_num = np.array( output_lod.pop(0)) if output[1] is not None else None - index = np.array(output_lod.pop(0)).astype(pdpd.fluid.data_feeder.convert_dtype( - output[2].dtype)) if output[2] is not None else None - + if paddle.__version__ >= '2.0.0': + index = np.array(output_lod.pop(0)).astype(data_feeder.convert_dtype( + output[2].dtype)) if output[2] is not None else None + else: + index = np.array(output_lod.pop(0)).astype(paddle.fluid.data_feeder.convert_dtype( + output[2].dtype)) if output[2] is not None else None # Save inputs in order of OpenVINO model, to facilite Fuzzy test, # which accepts inputs and outputs in this order as well. output_np = [out, nms_rois_num, index] From e4c38e3afd519dd9e26ac06810b442102b7b215f Mon Sep 17 00:00:00 2001 From: Vitaliy Urusovskij Date: Wed, 6 Dec 2023 14:36:43 +0400 Subject: [PATCH 08/13] Port `tests/` to API2.0 (#21476) * Port properties in time and stress tests to API2.0 * Port CC tests infer_tool.py to API2.0 * Update tests/conditional_compilation/tools/infer_tool.py Co-authored-by: Ilya Lavrenov --------- Co-authored-by: Ilya Lavrenov --- .../tools/infer_tool.py | 25 +++++++++---------- .../common/ie_pipelines/pipelines.cpp | 2 +- tests/stress_tests/memcheck_tests/tests.cpp | 2 +- .../include/timetests_helper/utils.h | 8 +++--- 4 files changed, 18 insertions(+), 19 deletions(-) diff --git a/tests/conditional_compilation/tools/infer_tool.py b/tests/conditional_compilation/tools/infer_tool.py index c52f5b773ffcc3..b669b9a7349866 100644 --- a/tests/conditional_compilation/tools/infer_tool.py +++ b/tests/conditional_compilation/tools/infer_tool.py @@ -14,20 +14,20 @@ from pathlib import Path import numpy as np -from openvino.inference_engine import IECore +from openvino import Core log.basicConfig(format="[ %(levelname)s ] %(message)s", level=log.INFO, stream=sys.stdout) -def input_preparation(net): +def input_preparation(model): """ Function to prepare reproducible from run to run input data - :param net: IENetwork object + :param model: OpenVINO Model object :return: Dict where keys are layers' names and values are numpy arrays with layers' shapes """ feed_dict = {} - for layer_name, layer_data in net.input_info.items(): + for layer_name, layer_data in model.input_info.items(): feed_dict.update({layer_name: np.ones(shape=layer_data.input_data.shape)}) return feed_dict @@ -40,17 +40,16 @@ def infer(ir_path, device): :return: Dict containing out blob name and out data """ - bin_path = os.path.splitext(ir_path)[0] + '.bin' - ie = IECore() - net = ie.read_network(model=ir_path, weights=bin_path) - exec_net = ie.load_network(net, device) - res = exec_net.infer(inputs=input_preparation(net)) + core = Core() + model = core.read_model(ir_path) + compiled_model = core.compile_model(model, device) + res = compiled_model(input_preparation(model)) - del net - # It's important to delete executable network first to avoid double free in plugin offloading. + del model + # It's important to delete compiled model first to avoid double free in plugin offloading. # Issue relates ony for hetero and Myriad plugins - del exec_net - del ie + del compiled_model + del core return res diff --git a/tests/stress_tests/common/ie_pipelines/pipelines.cpp b/tests/stress_tests/common/ie_pipelines/pipelines.cpp index 6960799a475218..deb655f4ab5d6f 100644 --- a/tests/stress_tests/common/ie_pipelines/pipelines.cpp +++ b/tests/stress_tests/common/ie_pipelines/pipelines.cpp @@ -123,7 +123,7 @@ inference_with_streams(const std::string &model, const std::string &target_devic ie_api_wrapper->read_network(model); ie_api_wrapper->load_network(target_device); try { - nireq = ie_api_wrapper->get_property(METRIC_KEY(OPTIMAL_NUMBER_OF_INFER_REQUESTS)); + nireq = ie_api_wrapper->get_property(ov::optimal_number_of_infer_requests.name()); } catch (const std::exception &ex) { log_err("Failed to query OPTIMAL_NUMBER_OF_INFER_REQUESTS"); } diff --git a/tests/stress_tests/memcheck_tests/tests.cpp b/tests/stress_tests/memcheck_tests/tests.cpp index ce72b969c4ddc9..672a9456673ef6 100644 --- a/tests/stress_tests/memcheck_tests/tests.cpp +++ b/tests/stress_tests/memcheck_tests/tests.cpp @@ -114,7 +114,7 @@ TEST_P(MemCheckTestSuite, inference_with_streams) { ie_api_wrapper->read_network(model); ie_api_wrapper->load_network(device); try { - nireq = ie_api_wrapper->get_property(METRIC_KEY(OPTIMAL_NUMBER_OF_INFER_REQUESTS)); + nireq = ie_api_wrapper->get_property(ov::optimal_number_of_infer_requests.name()); } catch (const std::exception &ex) { log_err("Failed to query OPTIMAL_NUMBER_OF_INFER_REQUESTS"); } diff --git a/tests/time_tests/include/timetests_helper/utils.h b/tests/time_tests/include/timetests_helper/utils.h index a823127d16934d..d10e6086344ed8 100644 --- a/tests/time_tests/include/timetests_helper/utils.h +++ b/tests/time_tests/include/timetests_helper/utils.h @@ -41,14 +41,14 @@ void setPerformanceConfig(InferenceEngine::Core ie, const std::string &device) { * @brief Function that enables Latency performance hint for specified device (OV API 2) */ void setPerformanceConfig(ov::Core ie, const std::string &device) { - auto supported_config_keys = ie.get_property(device, METRIC_KEY(SUPPORTED_CONFIG_KEYS)).as>(); + auto supported_config_keys = ie.get_property(device, ov::supported_properties); - if (std::find(supported_config_keys.begin(), supported_config_keys.end(), "PERFORMANCE_HINT") == + if (std::find(supported_config_keys.begin(), supported_config_keys.end(), ov::hint::performance_mode) == supported_config_keys.end()) { - std::cerr << "Device " << device << " doesn't support config key 'PERFORMANCE_HINT'!\n" + std::cerr << "Device " << device << " doesn't support " << ov::hint::performance_mode.name() << " property!\n" << "Performance config was not set."; } else - ie.set_property(device, {{CONFIG_KEY(PERFORMANCE_HINT), CONFIG_VALUE(LATENCY)}}); + ie.set_property(device, ov::hint::performance_mode(ov::hint::PerformanceMode::LATENCY)); } } From 635f5d373d1ec9786bf86959a1b781fc8629cbcc Mon Sep 17 00:00:00 2001 From: Pawel Raasz Date: Wed, 6 Dec 2023 13:27:14 +0100 Subject: [PATCH 09/13] [core]Migrate Gathers operators to new API (#21390) * Migrate Gather operators to new API * Remove redundant code form reference * Use IF_TYPE_OF macro * Remove unused include * Use common utils in gather base * Fix normalize after merge issues --- .../include/openvino/op/util/gather_base.hpp | 5 +- .../include/openvino/reference/gather.hpp | 6 +- src/core/src/op/gather.cpp | 64 ++-- src/core/src/op/util/gather_base.cpp | 340 ++++++++---------- 4 files changed, 193 insertions(+), 222 deletions(-) diff --git a/src/core/include/openvino/op/util/gather_base.hpp b/src/core/include/openvino/op/util/gather_base.hpp index 2ead83fdac83d2..5a963b3a748a9f 100644 --- a/src/core/include/openvino/op/util/gather_base.hpp +++ b/src/core/include/openvino/op/util/gather_base.hpp @@ -27,10 +27,7 @@ class OPENVINO_API GatherBase : public Op { void validate_and_infer_types() override; virtual int64_t get_axis() const; - OPENVINO_SUPPRESS_DEPRECATED_START - bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override; - - OPENVINO_SUPPRESS_DEPRECATED_END + bool evaluate(TensorVector& outputs, const TensorVector& inputs) const override; bool evaluate_lower(TensorVector& outputs) const override; bool evaluate_upper(TensorVector& outputs) const override; bool evaluate_label(TensorLabelVector& output_labels) const override; diff --git a/src/core/reference/include/openvino/reference/gather.hpp b/src/core/reference/include/openvino/reference/gather.hpp index 4324e0ffc5de4a..a580c20bc35b8c 100644 --- a/src/core/reference/include/openvino/reference/gather.hpp +++ b/src/core/reference/include/openvino/reference/gather.hpp @@ -28,7 +28,6 @@ void gather(const T* const data, int64_t batch_data_mul = shape_size(span(data_shape).subspan(batch_dims)); int64_t batch_out_mul = shape_size(span(out_shape).subspan(batch_dims)); - int64_t batch_indices_mul = shape_size(span(indices_shape).subspan(batch_dims)); int64_t axis_size = data_shape[axis]; int64_t data_offset, out_offset, idx; @@ -40,7 +39,7 @@ void gather(const T* const data, data_offset = batch_data_mul * batch + inner_size * axis_size * outer_idx; out_offset = batch_out_mul * batch + indices_size * inner_size * outer_idx; for (int64_t i = 0; i < indices_size; i++) { - idx = indices[i + batch_indices_mul * batch]; + idx = indices[i + indices_size * batch]; if (idx < 0) idx += axis_size; // for out of bound values have to be filled with zeros @@ -48,9 +47,8 @@ void gather(const T* const data, continue; const auto src_begin = std::next(data, data_offset + inner_size * idx); - const auto src_end = std::next(src_begin, inner_size); const auto out_ptr = std::next(out, out_offset + inner_size * i); - std::copy(src_begin, src_end, out_ptr); + std::copy_n(src_begin, inner_size, out_ptr); } } } diff --git a/src/core/src/op/gather.cpp b/src/core/src/op/gather.cpp index 2680d161a0ad94..e53f290f1633c1 100644 --- a/src/core/src/op/gather.cpp +++ b/src/core/src/op/gather.cpp @@ -5,39 +5,37 @@ #include "openvino/op/gather.hpp" #include "itt.hpp" -#include "openvino/core/validation_util.hpp" +#include "validation_util.hpp" namespace ov { - -op::v1::Gather::Gather(const Output& params, const Output& indices, const Output& axes) +namespace op { +namespace v1 { +Gather::Gather(const Output& params, const Output& indices, const Output& axes) : GatherBase(params, indices, axes) { constructor_validate_and_infer_types(); } -int64_t op::v1::Gather::get_axis() const { - OPENVINO_SUPPRESS_DEPRECATED_START - if (!get_constant_from_source(input_value(2))) { - OPENVINO_SUPPRESS_DEPRECATED_END - return AXIS_NOT_SET_VALUE; - } - return GatherBase::get_axis(); +int64_t Gather::get_axis() const { + return ov::util::get_constant_from_source(input_value(2)) ? GatherBase::get_axis() : AXIS_NOT_SET_VALUE; } -std::shared_ptr op::v1::Gather::clone_with_new_inputs(const OutputVector& new_args) const { +std::shared_ptr Gather::clone_with_new_inputs(const OutputVector& new_args) const { OV_OP_SCOPE(v1_Gather_clone_with_new_inputs); check_new_args_count(this, new_args); - return std::make_shared(new_args.at(0), new_args.at(1), new_args.at(2)); + return std::make_shared(new_args.at(0), new_args.at(1), new_args.at(2)); } +} // namespace v1 -op::v7::Gather::Gather(const Output& data, - const Output& indices, - const Output& axis, - const int64_t batch_dims) +namespace v7 { +Gather::Gather(const Output& data, + const Output& indices, + const Output& axis, + const int64_t batch_dims) : GatherBase(data, indices, axis, batch_dims) { constructor_validate_and_infer_types(); } -void op::v7::Gather::validate_and_infer_types() { +void Gather::validate_and_infer_types() { OV_OP_SCOPE(v7_Gather_validate_and_infer_types); NODE_VALIDATION_CHECK(this, get_input_element_type(1).is_integral_number(), @@ -47,37 +45,39 @@ void op::v7::Gather::validate_and_infer_types() { get_input_element_type(2).is_integral_number(), "Axis element type must be of an integral number type."); - op::util::GatherBase::validate_and_infer_types(); + util::GatherBase::validate_and_infer_types(); } -int64_t op::v7::Gather::get_batch_dims() const { +int64_t Gather::get_batch_dims() const { if (m_batch_dims < 0 && get_input_partial_shape(1).rank().is_static()) return m_batch_dims + get_input_partial_shape(1).rank().get_length(); else return m_batch_dims; } -bool op::v7::Gather::visit_attributes(AttributeVisitor& visitor) { +bool Gather::visit_attributes(AttributeVisitor& visitor) { OV_OP_SCOPE(v7_Gather_visit_attributes); visitor.on_attribute("batch_dims", m_batch_dims); return true; } -std::shared_ptr op::v7::Gather::clone_with_new_inputs(const OutputVector& new_args) const { +std::shared_ptr Gather::clone_with_new_inputs(const OutputVector& new_args) const { OV_OP_SCOPE(v7_Gather_clone_with_new_inputs); check_new_args_count(this, new_args); - return std::make_shared(new_args.at(0), new_args.at(1), new_args.at(2), m_batch_dims); + return std::make_shared(new_args.at(0), new_args.at(1), new_args.at(2), m_batch_dims); } +} // namespace v7 -op::v8::Gather::Gather(const Output& data, - const Output& indices, - const Output& axis, - const int64_t batch_dims) +namespace v8 { +Gather::Gather(const Output& data, + const Output& indices, + const Output& axis, + const int64_t batch_dims) : GatherBase(data, indices, axis, batch_dims) { constructor_validate_and_infer_types(); } -void op::v8::Gather::validate_and_infer_types() { +void Gather::validate_and_infer_types() { OV_OP_SCOPE(v8_Gather_validate_and_infer_types); NODE_VALIDATION_CHECK(this, get_input_element_type(1).is_integral_number(), @@ -90,22 +90,24 @@ void op::v8::Gather::validate_and_infer_types() { op::util::GatherBase::validate_and_infer_types(); } -int64_t op::v8::Gather::get_batch_dims() const { +int64_t Gather::get_batch_dims() const { if (m_batch_dims < 0 && get_input_partial_shape(1).rank().is_static()) return m_batch_dims + get_input_partial_shape(1).rank().get_length(); else return m_batch_dims; } -bool op::v8::Gather::visit_attributes(AttributeVisitor& visitor) { +bool Gather::visit_attributes(AttributeVisitor& visitor) { OV_OP_SCOPE(v8_Gather_visit_attributes); visitor.on_attribute("batch_dims", m_batch_dims); return true; } -std::shared_ptr op::v8::Gather::clone_with_new_inputs(const OutputVector& new_args) const { +std::shared_ptr Gather::clone_with_new_inputs(const OutputVector& new_args) const { OV_OP_SCOPE(v8_Gather_clone_with_new_inputs); check_new_args_count(this, new_args); - return std::make_shared(new_args.at(0), new_args.at(1), new_args.at(2), m_batch_dims); + return std::make_shared(new_args.at(0), new_args.at(1), new_args.at(2), m_batch_dims); } +} // namespace v8 +} // namespace op } // namespace ov diff --git a/src/core/src/op/util/gather_base.cpp b/src/core/src/op/util/gather_base.cpp index be0d0158dce9fd..6d6f338bb50b7e 100644 --- a/src/core/src/op/util/gather_base.cpp +++ b/src/core/src/op/util/gather_base.cpp @@ -5,145 +5,40 @@ #include "openvino/op/util/gather_base.hpp" #include "bound_evaluate.hpp" +#include "element_visitor.hpp" #include "gather_shape_inference.hpp" #include "itt.hpp" -#include "ngraph/validation_util.hpp" #include "openvino/op/concat.hpp" #include "openvino/op/constant.hpp" #include "openvino/op/squeeze.hpp" #include "openvino/reference/gather.hpp" +#include "validation_util.hpp" -ov::op::util::GatherBase::GatherBase(const Output& data, - const Output& indices, - const Output& axis, - const int64_t batch_dims) - : Op({data, indices, axis}), - m_batch_dims(batch_dims) { - constructor_validate_and_infer_types(); -} - -void ov::op::util::GatherBase::validate_and_infer_types() { - OV_OP_SCOPE(util_GatherBase_validate_and_infer_types); - const auto& data_type = get_input_element_type(0); - - const auto& data_pshape = get_input_partial_shape(0); - const auto& indices_pshape = get_input_partial_shape(1); - const auto& axis_pshape = get_input_partial_shape(2); - std::vector input_shapes = {data_pshape, indices_pshape, axis_pshape}; - const auto output_shapes = shape_infer(this, input_shapes); - set_output_type(0, data_type, output_shapes[0]); -} - -int64_t ov::op::util::GatherBase::get_axis() const { - OPENVINO_SUPPRESS_DEPRECATED_START - const auto& const_op = get_constant_from_source(input_value(2)); - OPENVINO_SUPPRESS_DEPRECATED_END - OPENVINO_ASSERT(const_op, "axis value is not set"); - - int64_t axis = const_op->cast_vector()[0]; - if (axis < 0) { - const auto& data_rank = get_input_partial_shape(0).rank(); - if (data_rank.is_static()) { - axis += data_rank.get_length(); - } - } - return axis; -} - -const int64_t& ov::op::util::GatherBase::get_batch_dims() const { - return m_batch_dims; -} - -void ov::op::util::GatherBase::set_batch_dims(int64_t batch_dims) { - m_batch_dims = batch_dims; -} - -OPENVINO_SUPPRESS_DEPRECATED_START +namespace ov { +namespace op { namespace gather { namespace { -template -bool evaluate(const ngraph::HostTensorPtr& arg0, - const ngraph::HostTensorPtr& arg1, - const ngraph::HostTensorPtr& out, - int64_t axis, - int64_t batch_dims) { - using T = typename ov::element_type_traits::value_type; - ov::Shape params_shape = arg0->get_shape(); - ov::Shape indices_shape = arg1->get_shape(); - ov::Shape out_shape(params_shape.size() + indices_shape.size() - 1 - batch_dims); - int64_t i = 0; - for (; i < axis; i++) { - out_shape[i] = params_shape[i]; - } - for (int64_t j = batch_dims; j < static_cast(indices_shape.size()); i++, j++) { - out_shape[i] = indices_shape[j]; - } - for (int64_t j = axis + 1; j < static_cast(params_shape.size()); i++, j++) { - out_shape[i] = params_shape[j]; - } - - out->set_shape(out_shape); - - if (arg1->get_element_type() == ov::element::i64) { - ov::reference::gather(arg0->get_data_ptr(), - arg1->get_data_ptr(), - out->get_data_ptr(), - arg0->get_shape(), - arg1->get_shape(), - out->get_shape(), - axis, - batch_dims); - } else if (arg1->get_element_type() == ov::element::i32) { - ov::reference::gather(arg0->get_data_ptr(), - arg1->get_data_ptr(), - out->get_data_ptr(), - arg0->get_shape(), - arg1->get_shape(), - out->get_shape(), - axis, - batch_dims); - } else { - OPENVINO_THROW("Unexpected type ", arg1->get_element_type().c_type_string(), " for Gather evaluate method."); - } - return true; -} +Shape out_shape_infer(const Shape& data_shape, const Shape& indices_shape, int64_t axis, int64_t batch_dims) { + Shape out_shape; + out_shape.reserve(data_shape.size() + indices_shape.size() - 1 - batch_dims); + auto out_dim_inserter = std::copy_n(data_shape.begin(), axis, std::back_inserter(out_shape)); + out_dim_inserter = std::copy(indices_shape.begin() + batch_dims, indices_shape.end(), out_dim_inserter); + std::copy(std::next(data_shape.begin(), axis + 1), data_shape.end(), out_dim_inserter); -bool evaluate_gather(const ngraph::HostTensorPtr& arg0, - const ngraph::HostTensorPtr& arg1, - const ngraph::HostTensorPtr& out, - int64_t axis, - int64_t batch_dims = 0) { - bool rc = true; - - using ov::element::Type_t; - switch (out->get_element_type()) { - OPENVINO_TYPE_CASE(evaluate_gather, i32, arg0, arg1, out, axis, batch_dims); - OPENVINO_TYPE_CASE(evaluate_gather, i64, arg0, arg1, out, axis, batch_dims); - OPENVINO_TYPE_CASE(evaluate_gather, i8, arg0, arg1, out, axis, batch_dims); - OPENVINO_TYPE_CASE(evaluate_gather, u8, arg0, arg1, out, axis, batch_dims); - OPENVINO_TYPE_CASE(evaluate_gather, u32, arg0, arg1, out, axis, batch_dims); - OPENVINO_TYPE_CASE(evaluate_gather, u64, arg0, arg1, out, axis, batch_dims); - OPENVINO_TYPE_CASE(evaluate_gather, f16, arg0, arg1, out, axis, batch_dims); - OPENVINO_TYPE_CASE(evaluate_gather, f32, arg0, arg1, out, axis, batch_dims); - OPENVINO_TYPE_CASE(evaluate_gather, boolean, arg0, arg1, out, axis, batch_dims); - default: - rc = false; - break; - } - return rc; + return out_shape; } -bool cf_gather_with_subgraph(ov::OutputVector& output_values, - const ov::OutputVector& input_values, - const ov::PartialShape& gather_ps) { +bool cf_gather_with_subgraph(OutputVector& output_values, + const OutputVector& input_values, + const PartialShape& gather_ps) { if (gather_ps.is_dynamic() || input_values.size() != 3) { return false; } - const auto concat = std::dynamic_pointer_cast(input_values[0].get_node_shared_ptr()); - const auto indices = std::dynamic_pointer_cast(input_values[1].get_node_shared_ptr()); - const auto axis = std::dynamic_pointer_cast(input_values[2].get_node_shared_ptr()); + const auto concat = std::dynamic_pointer_cast(input_values[0].get_node_shared_ptr()); + const auto indices = std::dynamic_pointer_cast(input_values[1].get_node_shared_ptr()); + const auto axis = std::dynamic_pointer_cast(input_values[2].get_node_shared_ptr()); if (!concat || !indices || !axis) { return false; @@ -169,8 +64,8 @@ bool cf_gather_with_subgraph(ov::OutputVector& output_values, } const int64_t rank = concat->get_shape()[0]; - const int64_t raw_index = indices->cast_vector()[0]; - const int64_t positive_index = raw_index < 0 ? rank + raw_index : raw_index; + const auto raw_index = indices->cast_vector()[0]; + const auto positive_index = ov::util::normalize(raw_index, rank); OPENVINO_ASSERT(positive_index >= 0 && positive_index < rank); // gather takes exactly one element out of the Concat output @@ -179,88 +74,164 @@ bool cf_gather_with_subgraph(ov::OutputVector& output_values, auto gathered = gathered_concat_input; if (indices_shape.empty()) { // gathering a scalar - const auto axis_const = ov::op::v0::Constant::create(ov::element::i64, ov::Shape{1}, {0}); - gathered = std::make_shared(gathered_concat_input, axis_const); + const auto axis_const = v0::Constant::create(element::i64, Shape{1}, {0}); + gathered = std::make_shared(gathered_concat_input, axis_const); } output_values[0] = gathered; return true; } + +bool have_indices_and_axis_bound_set(const util::GatherBase* const gather) { + return ov::have_node_inputs_bounds_set(gather, 1, 2); +} } // namespace + +struct Evaluate : element::NoAction { + using element::NoAction::visit; + + template > + static result_type visit(const Tensor& data, + const Tensor& indices, + Tensor& out, + const Shape& data_shape, + const Shape& indices_shape, + const Shape& out_shape, + const int64_t axis, + const int64_t batch_dims) { + using namespace ov::element; + return IF_TYPE_OF(util_GatherBase_indices_type, + OV_PP_ET_LIST(i32, i64), + EvaluateByIndicesType, + indices.get_element_type(), + data.data(), + indices, + out.data
(), + data_shape, + indices_shape, + out_shape, + axis, + batch_dims); + } + +private: + struct EvaluateByIndicesType : element::NoAction { + using element::NoAction::visit; + + template > + static result_type visit(const DT* const data, + const Tensor& indices, + DT* const output, + const Shape& data_shape, + const Shape& indices_shape, + const Shape& out_shape, + const int64_t axis, + const int64_t batch_dims) { + reference::gather(data, + indices.data(), + output, + data_shape, + indices_shape, + out_shape, + axis, + batch_dims); + return true; + } + }; +}; } // namespace gather -bool ov::op::util::GatherBase::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const { - OV_OP_SCOPE(util_GatherBase_evaluate); - OPENVINO_SUPPRESS_DEPRECATED_START - OPENVINO_ASSERT(ngraph::validate_host_tensor_vector(inputs, 3)); - OPENVINO_ASSERT(ngraph::validate_host_tensor_vector(outputs, 1)); - OPENVINO_SUPPRESS_DEPRECATED_END - int64_t axis = 0; - switch (inputs[2]->get_element_type()) { - case element::Type_t::i32: - axis = inputs[2]->get_data_ptr()[0]; - break; - case element::Type_t::i64: - axis = inputs[2]->get_data_ptr()[0]; - break; - case element::Type_t::i8: - axis = inputs[2]->get_data_ptr()[0]; - break; - case element::Type_t::i16: - axis = inputs[2]->get_data_ptr()[0]; - break; - case element::Type_t::u8: - axis = inputs[2]->get_data_ptr()[0]; - break; - case element::Type_t::u16: - axis = inputs[2]->get_data_ptr()[0]; - break; - case element::Type_t::u32: - axis = inputs[2]->get_data_ptr()[0]; - break; - case element::Type_t::u64: - axis = inputs[2]->get_data_ptr()[0]; - break; - default: - OPENVINO_THROW("axis must be of integral data type."); - } +namespace util { - if (axis < 0) { - const auto input_rank = inputs[0]->get_shape().size(); - axis += input_rank; - } +GatherBase::GatherBase(const Output& data, + const Output& indices, + const Output& axis, + const int64_t batch_dims) + : Op({data, indices, axis}), + m_batch_dims(batch_dims) { + constructor_validate_and_infer_types(); +} + +void GatherBase::validate_and_infer_types() { + OV_OP_SCOPE(util_GatherBase_validate_and_infer_types); + + const auto& data_type = get_input_element_type(0); + const auto output_shapes = shape_infer(this, ov::util::get_node_input_partial_shapes(*this)); + + set_output_type(0, data_type, output_shapes[0]); +} - int64_t batch_dims = m_batch_dims; - if (batch_dims < 0) { - const auto indices_rank = inputs[1]->get_shape().size(); - batch_dims += indices_rank; +int64_t GatherBase::get_axis() const { + const auto& const_op = ov::util::get_constant_from_source(input_value(2)); + OPENVINO_ASSERT(const_op, "axis value is not set"); + + const auto axis = const_op->cast_vector()[0]; + if (axis < 0 && get_input_partial_shape(0).rank().is_static()) { + return axis + get_input_partial_shape(0).rank().get_length(); + } else { + return axis; } +} - return gather::evaluate_gather(inputs[0], inputs[1], outputs[0], axis, batch_dims); +const int64_t& GatherBase::get_batch_dims() const { + return m_batch_dims; } -bool ov::op::util::GatherBase::evaluate_lower(ov::TensorVector& output_values) const { - if (!get_input_tensor(1).has_and_set_bound() || !get_input_tensor(2).has_and_set_bound()) - return false; - return default_lower_bound_evaluator(this, output_values); +void GatherBase::set_batch_dims(int64_t batch_dims) { + m_batch_dims = batch_dims; } -bool ov::op::util::GatherBase::evaluate_upper(ov::TensorVector& output_values) const { - if (!get_input_tensor(1).has_and_set_bound() || !get_input_tensor(2).has_and_set_bound()) - return false; - return default_upper_bound_evaluator(this, output_values); +bool GatherBase::evaluate(TensorVector& outputs, const TensorVector& inputs) const { + OV_OP_SCOPE(util_GatherBase_evaluate); + + OPENVINO_ASSERT(outputs.size() == 1); + OPENVINO_ASSERT(inputs.size() == 3); + + OPENVINO_ASSERT(inputs[2].get_element_type().is_integral_number(), "axis must be of integral data type."); + + const auto& data = inputs[0]; + const auto& data_shape = data.get_shape(); + const auto& indices = inputs[1]; + const auto& indices_shape = indices.get_shape(); + + const auto axis = ov::util::normalize(get_tensor_data_as(inputs[2])[0], data_shape.size()); + const auto batch_dims = ov::util::normalize(m_batch_dims, indices_shape.size()); + + const auto out_shape = gather::out_shape_infer(data_shape, indices_shape, axis, batch_dims); + auto& output = outputs[0]; + output.set_shape(out_shape); + + using namespace ov::element; + return IF_TYPE_OF(util_GatherBase_evaluate, + OV_PP_ET_LIST(boolean, f16, f32, i8, i32, i64, u8, u32, u64), + gather::Evaluate, + data.get_element_type(), + data, + indices, + output, + data_shape, + indices_shape, + out_shape, + axis, + batch_dims); } -bool ov::op::util::GatherBase::evaluate_label(TensorLabelVector& output_labels) const { - if (!get_input_tensor(1).has_and_set_bound() || !get_input_tensor(2).has_and_set_bound()) - return false; +bool GatherBase::evaluate_lower(TensorVector& output_values) const { + return gather::have_indices_and_axis_bound_set(this) && default_lower_bound_evaluator(this, output_values); +} + +bool GatherBase::evaluate_upper(TensorVector& output_values) const { + return gather::have_indices_and_axis_bound_set(this) && default_upper_bound_evaluator(this, output_values); +} + +bool GatherBase::evaluate_label(TensorLabelVector& output_labels) const { OPENVINO_SUPPRESS_DEPRECATED_START - return default_label_evaluator(this, output_labels); + return gather::have_indices_and_axis_bound_set(this) && default_label_evaluator(this, output_labels); OPENVINO_SUPPRESS_DEPRECATED_END } -bool ov::op::util::GatherBase::constant_fold(OutputVector& output_values, const OutputVector& input_values) { +bool GatherBase::constant_fold(OutputVector& output_values, const OutputVector& input_values) { // try the regular constant folding just for the Gather node if (Node::constant_fold(output_values, input_values)) { return true; @@ -268,3 +239,6 @@ bool ov::op::util::GatherBase::constant_fold(OutputVector& output_values, const return gather::cf_gather_with_subgraph(output_values, input_values, get_output_partial_shape(0)); } } +} // namespace util +} // namespace op +} // namespace ov From 1f0103699751c5494ae99cbfd356ce67897a0da0 Mon Sep 17 00:00:00 2001 From: Irina Efode Date: Wed, 6 Dec 2023 16:29:03 +0400 Subject: [PATCH 10/13] [CONFORMANCE] Fix reg exp to find models (#21496) --- .../layer_tests_summary/data/default_re_to_find_models.lst | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/tests/test_utils/functional_test_utils/layer_tests_summary/data/default_re_to_find_models.lst b/src/tests/test_utils/functional_test_utils/layer_tests_summary/data/default_re_to_find_models.lst index e7c64cbe45dc75..37551cc69f5111 100644 --- a/src/tests/test_utils/functional_test_utils/layer_tests_summary/data/default_re_to_find_models.lst +++ b/src/tests/test_utils/functional_test_utils/layer_tests_summary/data/default_re_to_find_models.lst @@ -1,5 +1,5 @@ ww*_dynamic* ww*_pytorch* ww*_static* -ww42*tfhub* -# ww42*tflite* \ No newline at end of file +ww*_tfhub* +# ww*_tflite* \ No newline at end of file From ddc395b6cb7bccba0e18687da65bf58c059f49d8 Mon Sep 17 00:00:00 2001 From: Haiqi Pan Date: Wed, 6 Dec 2023 20:38:10 +0800 Subject: [PATCH 11/13] API 2.0 infer request io blob test case (#20336) * add failToSetUninitializedInputBlob, failToSetUninitializedOutputBlob and canInferWithGetOut * FailedAsyncInferWithNegativeTimeForWait * Revert "FailedAsyncInferWithNegativeTimeForWait" This reverts commit 3b6780102fde29c2da9b37fca732881b61552a8d. * Update src/tests/functional/plugin/shared/src/behavior/ov_infer_request/io_tensor.cpp Co-authored-by: River Li * Update src/tests/functional/plugin/shared/src/behavior/ov_infer_request/io_tensor.cpp Co-authored-by: River Li --------- Co-authored-by: River Li --- .../behavior/ov_infer_request/io_tensor.cpp | 21 +++++++++++++++++++ 1 file changed, 21 insertions(+) diff --git a/src/tests/functional/plugin/shared/src/behavior/ov_infer_request/io_tensor.cpp b/src/tests/functional/plugin/shared/src/behavior/ov_infer_request/io_tensor.cpp index 6d23a1f615d95f..c205ec0c1f4e26 100644 --- a/src/tests/functional/plugin/shared/src/behavior/ov_infer_request/io_tensor.cpp +++ b/src/tests/functional/plugin/shared/src/behavior/ov_infer_request/io_tensor.cpp @@ -46,6 +46,18 @@ TEST_P(OVInferRequestIOTensorTest, failToSetNullptrForOutput) { ASSERT_THROW(req.set_tensor(output, {}), ov::Exception); } +TEST_P(OVInferRequestIOTensorTest, failToSetUninitializedInputTensor) { + ov::Tensor tensor; + OV_ASSERT_NO_THROW(req = execNet.create_infer_request()); + ASSERT_THROW(req.set_tensor(input, tensor), ov::Exception); +} + +TEST_P(OVInferRequestIOTensorTest, failToSetUninitializedOutputTensor) { + ov::Tensor tensor; + OV_ASSERT_NO_THROW(req = execNet.create_infer_request()); + ASSERT_THROW(req.set_tensor(output, tensor), ov::Exception); +} + TEST_P(OVInferRequestIOTensorTest, canSetAndGetInput) { auto tensor = utils::create_and_fill_tensor(input.get_element_type(), input.get_shape()); OV_ASSERT_NO_THROW(req.set_tensor(input, tensor)); @@ -175,6 +187,15 @@ TEST_P(OVInferRequestIOTensorTest, canInferAfterIOBlobReallocation) { OV_ASSERT_NO_THROW(req.get_tensor(output)); } +TEST_P(OVInferRequestIOTensorTest, canInferWithGetOut) { + ov::Tensor output_tensor; + OV_ASSERT_NO_THROW(output_tensor = req.get_tensor(output)); + OV_ASSERT_NO_THROW(req.infer()); + OV_ASSERT_NO_THROW(req.start_async()); + OV_ASSERT_NO_THROW(req.wait()); + OV_ASSERT_NO_THROW(req.get_tensor(output)); +} + TEST_P(OVInferRequestIOTensorTest, InferStaticNetworkSetChangedInputTensorThrow) { const ov::Shape shape1 = {1, 2, 32, 32}; const ov::Shape shape2 = {1, 2, 40, 40}; From 6391b0c9f64b7b6394adbf4c12d5ac86dbd243e1 Mon Sep 17 00:00:00 2001 From: Xuejun Zhai Date: Wed, 6 Dec 2023 20:55:44 +0800 Subject: [PATCH 12/13] [CPU tests] migrate single layer test cases to be API 2.0 - part 3 (#21445) * [CPU tests] migrate single layer test cases to be API 2.0 - part 3 Signed-off-by: Zhai, Xuejun * [CPU tests] migrate single layer test cases to be API 2.0 - part 3 Signed-off-by: Zhai, Xuejun --------- Signed-off-by: Zhai, Xuejun --- .../single_layer_tests/ctc_greedy_decoder.cpp | 25 +- .../ctc_greedy_decoder_seq_len.cpp | 24 +- .../single_layer_tests/ctc_loss.cpp | 174 ++-- .../functional/single_layer_tests/cum_sum.cpp | 239 +++-- .../custom_op_internal_dyn.cpp | 44 +- .../deformable_convolution.cpp | 845 +++++++++--------- .../single_layer_tests/depth_to_space.cpp | 295 +++--- .../single_layer_tests/detection_output.cpp | 634 +++++++------ .../embedding_bag_offsets_sum.cpp | 171 ++-- .../embedding_bag_packed_sum.cpp | 147 ++- .../embedding_segments_sum.cpp | 169 ++-- .../extract_image_patches.cpp | 110 +-- .../functional/single_layer_tests/eye.cpp | 233 +++-- .../single_layer_tests/fake_quantize.cpp | 329 +++---- 14 files changed, 1629 insertions(+), 1810 deletions(-) diff --git a/src/plugins/intel_cpu/tests/functional/single_layer_tests/ctc_greedy_decoder.cpp b/src/plugins/intel_cpu/tests/functional/single_layer_tests/ctc_greedy_decoder.cpp index 185ca4d29cc438..c75d192c1e98e6 100644 --- a/src/plugins/intel_cpu/tests/functional/single_layer_tests/ctc_greedy_decoder.cpp +++ b/src/plugins/intel_cpu/tests/functional/single_layer_tests/ctc_greedy_decoder.cpp @@ -4,21 +4,13 @@ #include -#include -#include -#include -#include -#include -#include - +#include "common_test_utils/ov_tensor_utils.hpp" #include "shared_test_classes/base/ov_subgraph.hpp" #include "test_utils/cpu_test_utils.hpp" -using namespace InferenceEngine; using namespace CPUTestUtils; -using namespace ov::test; - -namespace CPULayerTestsDefinitions { +namespace ov { +namespace test { using CtcGreedyDecoderParams = std::tuple(params[0], params[1], mergeRepeated); - ngraph::ResultVector results{std::make_shared(ctcGreedyDecoder)}; - function = std::make_shared(results, params, "CTCGreedyDecoderCPU"); + ov::ResultVector results{std::make_shared(ctcGreedyDecoder)}; + function = std::make_shared(results, params, "CTCGreedyDecoderCPU"); }; - void generate_inputs(const std::vector& targetInputStaticShapes) override { + void generate_inputs(const std::vector& targetInputStaticShapes) override { inputs.clear(); const auto& funcInputs = function->inputs(); for (size_t i = 0; i < funcInputs.size(); ++i) { @@ -173,4 +165,5 @@ INSTANTIATE_TEST_SUITE_P(smoke_CtcGreedyDecoderCPU, CTCGreedyDecoderLayerCPUTest::getTestCaseName); } // namespace -} // namespace CPULayerTestsDefinitions +} // namespace test +} // namespace ov diff --git a/src/plugins/intel_cpu/tests/functional/single_layer_tests/ctc_greedy_decoder_seq_len.cpp b/src/plugins/intel_cpu/tests/functional/single_layer_tests/ctc_greedy_decoder_seq_len.cpp index 46757e4d63d4e4..96e6d7fea65c60 100644 --- a/src/plugins/intel_cpu/tests/functional/single_layer_tests/ctc_greedy_decoder_seq_len.cpp +++ b/src/plugins/intel_cpu/tests/functional/single_layer_tests/ctc_greedy_decoder_seq_len.cpp @@ -4,20 +4,13 @@ #include -#include -#include -#include -#include -#include - +#include "common_test_utils/ov_tensor_utils.hpp" #include "shared_test_classes/base/ov_subgraph.hpp" #include "test_utils/cpu_test_utils.hpp" -using namespace InferenceEngine; using namespace CPUTestUtils; -using namespace ov::test; - -namespace CPULayerTestsDefinitions { +namespace ov { +namespace test { using CtcGreedyDecoderSeqLenParams = std::tuple(ctcGreedyDecoderSeqLen)}; - function = std::make_shared(results, params, "CTCGreedyDecoderSeqLenCPU"); + ov::ResultVector results{std::make_shared(ctcGreedyDecoderSeqLen)}; + function = std::make_shared(results, params, "CTCGreedyDecoderSeqLenCPU"); }; - void generate_inputs(const std::vector& targetInputStaticShapes) override { + void generate_inputs(const std::vector& targetInputStaticShapes) override { inputs.clear(); const auto& funcInputs = function->inputs(); const auto& dataShape = targetInputStaticShapes[0]; @@ -207,4 +200,5 @@ INSTANTIATE_TEST_SUITE_P(smoke_CtcGreedyDecoderSeqLenCPU, CTCGreedyDecoderSeqLenLayerCPUTest::getTestCaseName); } // namespace -} // namespace CPULayerTestsDefinitions +} // namespace test +} // namespace ov diff --git a/src/plugins/intel_cpu/tests/functional/single_layer_tests/ctc_loss.cpp b/src/plugins/intel_cpu/tests/functional/single_layer_tests/ctc_loss.cpp index 8a310a82ec5254..014c2388fef125 100644 --- a/src/plugins/intel_cpu/tests/functional/single_layer_tests/ctc_loss.cpp +++ b/src/plugins/intel_cpu/tests/functional/single_layer_tests/ctc_loss.cpp @@ -4,34 +4,29 @@ #include -#include -#include -#include -#include -#include - +#include "common_test_utils/ov_tensor_utils.hpp" #include "shared_test_classes/base/ov_subgraph.hpp" #include "test_utils/cpu_test_utils.hpp" -using namespace InferenceEngine; using namespace CPUTestUtils; -using namespace ov::test; - -namespace CPULayerTestsDefinitions { +namespace ov { +namespace test { // N,T,C -using CTCLossShapeParams = std::pair, std::vector>>; - -using CTCLossLayerCPUTestParams = std::tuple; - -class CTCLossLayerCPUTest : public testing::WithParamInterface, virtual public SubgraphBaseTest, public CPUTestsBase { +using CTCLossShapeParams = std::pair, std::vector>>; + +using CTCLossLayerCPUTestParams = std::tuple; + +class CTCLossLayerCPUTest : public testing::WithParamInterface, + virtual public SubgraphBaseTest, + public CPUTestsBase { public: static std::string getTestCaseName(const testing::TestParamInfo& obj) { CTCLossShapeParams shapes; @@ -39,19 +34,20 @@ class CTCLossLayerCPUTest : public testing::WithParamInterface& staticShapes : shapes.second) { - for (ngraph::Shape& shape : staticShapes) { + for (std::vector& staticShapes : shapes.second) { + for (ov::Shape& shape : staticShapes) { size_t N = shape[0]; size_t T = shape[1]; size_t C = shape[2]; @@ -90,10 +87,12 @@ class CTCLossLayerCPUTest : public testing::WithParamInterface types{fPrecision, iPrecision, iPrecision, iPrecision}; + std::vector types{fPrecision, iPrecision, iPrecision, iPrecision}; std::vector partialShapes{inputDynamicShapesValues, shapeN, shapeNT, shapeN}; ov::ParameterVector params; @@ -101,23 +100,29 @@ class CTCLossLayerCPUTest : public testing::WithParamInterface(types[i], partialShapes[i]); params.push_back(param_node); } - auto bankNode = ngraph::op::Constant::create(ngraph::element::i64, ngraph::Shape{ }, {blank}); - - auto ctcLoss = std::make_shared(params[0], params[1], params[2], - params[3], bankNode, preprocessCollapseRepeated, ctcMergeRepeated, unique); - ngraph::ResultVector results{std::make_shared(ctcLoss)}; - function = std::make_shared(results, params, "CTCLossLayerCPUTest"); + auto bankNode = ov::op::v0::Constant::create(ov::element::i64, ov::Shape{}, {blank}); + + auto ctcLoss = std::make_shared(params[0], + params[1], + params[2], + params[3], + bankNode, + preprocessCollapseRepeated, + ctcMergeRepeated, + unique); + ov::ResultVector results{std::make_shared(ctcLoss)}; + function = std::make_shared(results, params, "CTCLossLayerCPUTest"); }; - void generate_inputs(const std::vector& targetInputStaticShapes) override { + void generate_inputs(const std::vector& targetInputStaticShapes) override { inputs.clear(); const auto& funcInputs = function->inputs(); const auto& dataShape = targetInputStaticShapes[0]; const auto N = dataShape[0]; const auto T = dataShape[1]; const auto C = dataShape[2]; - ngraph::Shape shapeN{N}; - ngraph::Shape shapeNT{N, T}; + ov::Shape shapeN{N}; + ov::Shape shapeNT{N, T}; std::mt19937 gen(42); std::uniform_int_distribution dist(1, T); @@ -129,11 +134,7 @@ class CTCLossLayerCPUTest : public testing::WithParamInterface preprocessCollapseRepeated = {true, false}; - const std::vector ctcMergeRepeated = {true, false}; - const std::vector unique = {true, false}; - - const std::vector shapes = { - { - // dynamic undifined - { - {-1, -1, -1}, - }, - // target - { - {{3, 6, 8}, {2, 5, 6}, {5, 6, 10}} - } - }, - { - // dynamic lower/upper bound - { - {{1, 10}, {5, 10}, {6, 12}}, - }, - // target - { - {{1, 5, 6}, {10, 10, 12}, {5, 7, 8}} - } - }, - }; +const ov::element::TypeVector fPrecisions = {ov::element::f32}; + +const ov::element::TypeVector iPrecisions = {ov::element::i32, ov::element::i64}; + +const std::vector preprocessCollapseRepeated = {true, false}; +const std::vector ctcMergeRepeated = {true, false}; +const std::vector unique = {true, false}; + +const std::vector shapes = { + {// dynamic undifined + { + {-1, -1, -1}, + }, + // target + {{{3, 6, 8}, {2, 5, 6}, {5, 6, 10}}}}, + {// dynamic lower/upper bound + { + {{1, 10}, {5, 10}, {6, 12}}, + }, + // target + {{{1, 5, 6}, {10, 10, 12}, {5, 7, 8}}}}, +}; - const std::vector blanks = { - 0, 2, 5 - }; +const std::vector blanks = {0, 2, 5}; const auto basicCases = ::testing::Combine(::testing::ValuesIn(shapes), ::testing::ValuesIn(blanks), @@ -244,10 +230,8 @@ const auto basicCases = ::testing::Combine(::testing::ValuesIn(shapes), ::testing::ValuesIn(fPrecisions), ::testing::ValuesIn(iPrecisions)); -INSTANTIATE_TEST_SUITE_P(smoke_CTCLossCPU, - CTCLossLayerCPUTest, - basicCases, - CTCLossLayerCPUTest::getTestCaseName); +INSTANTIATE_TEST_SUITE_P(smoke_CTCLossCPU, CTCLossLayerCPUTest, basicCases, CTCLossLayerCPUTest::getTestCaseName); } // namespace -} // namespace CPULayerTestsDefinitions +} // namespace test +} // namespace ov diff --git a/src/plugins/intel_cpu/tests/functional/single_layer_tests/cum_sum.cpp b/src/plugins/intel_cpu/tests/functional/single_layer_tests/cum_sum.cpp index 4f99471511f1ec..144e1485137031 100644 --- a/src/plugins/intel_cpu/tests/functional/single_layer_tests/cum_sum.cpp +++ b/src/plugins/intel_cpu/tests/functional/single_layer_tests/cum_sum.cpp @@ -2,30 +2,25 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "test_utils/cpu_test_utils.hpp" #include "shared_test_classes/base/ov_subgraph.hpp" -#include "ov_models/builders.hpp" +#include "test_utils/cpu_test_utils.hpp" -using namespace ngraph; -using namespace InferenceEngine; using namespace CPUTestUtils; -using namespace ov; -using namespace test; - -namespace CPULayerTestsDefinitions { +namespace ov { +namespace test { -using cumSumParams = std::tuple< - ngraph::element::Type, // data precision - InputShape, // input shape - std::int64_t, // axis - bool, // exclusive - bool>; // reverse +using cumSumParams = std::tuple; // reverse class CumSumLayerCPUTest : public testing::WithParamInterface, - public SubgraphBaseTest, public CPUTestsBase { + public SubgraphBaseTest, + public CPUTestsBase { public: static std::string getTestCaseName(testing::TestParamInfo obj) { - ngraph::element::Type inputPrecision; + ov::element::Type inputPrecision; InputShape shapes; std::int64_t axis; bool exclusive; @@ -61,11 +56,12 @@ class CumSumLayerCPUTest : public testing::WithParamInterface, for (auto&& shape : inputDynamicShapes) { params.push_back(std::make_shared(inType, shape)); } - auto axisNode = ngraph::opset1::Constant::create(ngraph::element::i32, ngraph::Shape{}, std::vector{axis})->output(0); + auto axisNode = + ov::op::v0::Constant::create(ov::element::i32, ov::Shape{}, std::vector{axis})->output(0); auto cumSum = std::make_shared(params[0], axisNode, exclusive, reverse); - function = std::make_shared(ngraph::NodeVector{ cumSum }, params, "CumSumLayerCPUTest"); - functionRefs = ngraph::clone_function(*function); + function = std::make_shared(ov::NodeVector{cumSum}, params, "CumSumLayerCPUTest"); + functionRefs = ov::clone_model(*function); } }; @@ -74,118 +70,121 @@ TEST_P(CumSumLayerCPUTest, CompareWithRefs) { CheckPluginRelatedResults(compiledModel, "CumSum"); } -const ngraph::element::TypeVector inputPrecision = { - ngraph::element::i8, - ngraph::element::bf16, - ngraph::element::f32 -}; +const std::vector inputPrecision = {ov::element::i8, ov::element::bf16, ov::element::f32}; -const std::vector axes = { 0, 1, 2, 3, 4, 5, 6 }; -const std::vector negativeAxes = { -1, -2, -3, -4, -5, -6 }; +const std::vector axes = {0, 1, 2, 3, 4, 5, 6}; +const std::vector negativeAxes = {-1, -2, -3, -4, -5, -6}; -const std::vector exclusive = { true, false }; -const std::vector reverse = { true, false }; +const std::vector exclusive = {true, false}; +const std::vector reverse = {true, false}; const std::vector inShapes = { - {{-1}, - {{16}, {18}, {12}}}, + {{-1}, {{16}, {18}, {12}}}, - {{-1, -1}, - {{9, 15}, {18, 12}, {12, 12}}}, + {{-1, -1}, {{9, 15}, {18, 12}, {12, 12}}}, - {{-1, -1, -1}, - {{16, 10, 12}, {18, 12, 10}, {12, 18, 10}}}, + {{-1, -1, -1}, {{16, 10, 12}, {18, 12, 10}, {12, 18, 10}}}, - {{-1, -1, -1, -1}, - {{18, 20, 14, 12}, {19, 20, 14, 12}, {20, 22, 23, 25}}}, + {{-1, -1, -1, -1}, {{18, 20, 14, 12}, {19, 20, 14, 12}, {20, 22, 23, 25}}}, - {{-1, -1, -1, -1, -1}, - {{2, 4, 6, 2, 4}, {3, 5, 6, 3, 5}, {1, 4, 2, 6, 8}}}, + {{-1, -1, -1, -1, -1}, {{2, 4, 6, 2, 4}, {3, 5, 6, 3, 5}, {1, 4, 2, 6, 8}}}, - {{-1, -1, -1, -1, -1, -1}, - {{2, 4, 6, 2, 4, 2}, {3, 5, 6, 3, 5, 3}, {1, 4, 2, 6, 8, 1}}}, + {{-1, -1, -1, -1, -1, -1}, {{2, 4, 6, 2, 4, 2}, {3, 5, 6, 3, 5, 3}, {1, 4, 2, 6, 8, 1}}}, - {{{-1, -1, -1, -1, -1, -1, -1}}, - {{2, 4, 6, 2, 4, 2, 4}, {3, 5, 6, 3, 5, 3, 5}, {1, 4, 2, 6, 8, 1, 4}}}, + {{{-1, -1, -1, -1, -1, -1, -1}}, {{2, 4, 6, 2, 4, 2, 4}, {3, 5, 6, 3, 5, 3, 5}, {1, 4, 2, 6, 8, 1, 4}}}, {{{2, 5}, {3, 7}, {4, 8}, {5, 7}, {2, 5}, {3, 7}, {1, 2}}, {{2, 4, 6, 5, 4, 3, 1}, {3, 5, 6, 6, 5, 3, 1}, {5, 7, 4, 6, 3, 7, 2}}}, - {{{2, 5}, -1, {4, 8}, -1, -1, {3, 7}, -1}, - {{2, 4, 6, 5, 4, 3, 1}, {3, 5, 6, 6, 5, 3, 1}, {5, 7, 4, 6, 3, 7, 2}}} -}; - -const auto testCasesAxis_0 = ::testing::Combine( - ::testing::ValuesIn(inputPrecision), - ::testing::ValuesIn(inShapes), - ::testing::Values(axes[0]), - ::testing::ValuesIn(exclusive), - ::testing::ValuesIn(reverse) -); - -const auto testCasesAxis_1 = ::testing::Combine( - ::testing::ValuesIn(inputPrecision), - ::testing::ValuesIn(std::vector(inShapes.begin() + 1, inShapes.end())), - ::testing::Values(axes[1]), - ::testing::ValuesIn(exclusive), - ::testing::ValuesIn(reverse) -); - -const auto testCasesAxis_2 = ::testing::Combine( - ::testing::ValuesIn(inputPrecision), - ::testing::ValuesIn(std::vector(inShapes.begin() + 2, inShapes.end())), - ::testing::Values(axes[2]), - ::testing::ValuesIn(exclusive), - ::testing::ValuesIn(reverse) -); - -const auto testCasesAxis_3 = ::testing::Combine( - ::testing::ValuesIn(inputPrecision), - ::testing::ValuesIn(std::vector(inShapes.begin() + 3, inShapes.end())), - ::testing::Values(axes[3]), - ::testing::ValuesIn(exclusive), - ::testing::ValuesIn(reverse) -); - -const auto testCasesAxis_4 = ::testing::Combine( - ::testing::ValuesIn(inputPrecision), - ::testing::ValuesIn(std::vector(inShapes.begin() + 4, inShapes.end())), - ::testing::Values(axes[4]), - ::testing::ValuesIn(exclusive), - ::testing::ValuesIn(reverse) -); - -const auto testCasesAxis_5 = ::testing::Combine( - ::testing::ValuesIn(inputPrecision), - ::testing::ValuesIn(std::vector(inShapes.begin() + 5, inShapes.end())), - ::testing::Values(axes[5]), - ::testing::ValuesIn(exclusive), - ::testing::ValuesIn(reverse) -); - -const auto testCasesAxis_6 = ::testing::Combine( - ::testing::ValuesIn(inputPrecision), - ::testing::ValuesIn(std::vector(inShapes.begin() + 6, inShapes.end())), - ::testing::Values(axes[6]), - ::testing::ValuesIn(exclusive), - ::testing::ValuesIn(reverse) -); - -const auto testCasesAxis_negative = ::testing::Combine( - ::testing::ValuesIn(inputPrecision), - ::testing::ValuesIn(std::vector(inShapes.begin() + 6, inShapes.end())), - ::testing::ValuesIn(negativeAxes), - ::testing::ValuesIn(exclusive), - ::testing::ValuesIn(reverse) -); - -INSTANTIATE_TEST_SUITE_P(smoke_CompareWithRefsNumpy_axis_0, CumSumLayerCPUTest, testCasesAxis_0, CumSumLayerCPUTest::getTestCaseName); -INSTANTIATE_TEST_SUITE_P(smoke_CompareWithRefsNumpy_axis_1, CumSumLayerCPUTest, testCasesAxis_1, CumSumLayerCPUTest::getTestCaseName); -INSTANTIATE_TEST_SUITE_P(smoke_CompareWithRefsNumpy_axis_2, CumSumLayerCPUTest, testCasesAxis_2, CumSumLayerCPUTest::getTestCaseName); -INSTANTIATE_TEST_SUITE_P(smoke_CompareWithRefsNumpy_axis_3, CumSumLayerCPUTest, testCasesAxis_3, CumSumLayerCPUTest::getTestCaseName); -INSTANTIATE_TEST_SUITE_P(smoke_CompareWithRefsNumpy_axis_4, CumSumLayerCPUTest, testCasesAxis_4, CumSumLayerCPUTest::getTestCaseName); -INSTANTIATE_TEST_SUITE_P(smoke_CompareWithRefsNumpy_axis_5, CumSumLayerCPUTest, testCasesAxis_5, CumSumLayerCPUTest::getTestCaseName); -INSTANTIATE_TEST_SUITE_P(smoke_CompareWithRefsNumpy_axis_6, CumSumLayerCPUTest, testCasesAxis_6, CumSumLayerCPUTest::getTestCaseName); -INSTANTIATE_TEST_SUITE_P(smoke_CompareWithRefsNumpy_negative_axes, CumSumLayerCPUTest, testCasesAxis_negative, CumSumLayerCPUTest::getTestCaseName); - -} // namespace CPULayerTestsDefinitions + {{{2, 5}, -1, {4, 8}, -1, -1, {3, 7}, -1}, {{2, 4, 6, 5, 4, 3, 1}, {3, 5, 6, 6, 5, 3, 1}, {5, 7, 4, 6, 3, 7, 2}}}}; + +const auto testCasesAxis_0 = ::testing::Combine(::testing::ValuesIn(inputPrecision), + ::testing::ValuesIn(inShapes), + ::testing::Values(axes[0]), + ::testing::ValuesIn(exclusive), + ::testing::ValuesIn(reverse)); + +const auto testCasesAxis_1 = + ::testing::Combine(::testing::ValuesIn(inputPrecision), + ::testing::ValuesIn(std::vector(inShapes.begin() + 1, inShapes.end())), + ::testing::Values(axes[1]), + ::testing::ValuesIn(exclusive), + ::testing::ValuesIn(reverse)); + +const auto testCasesAxis_2 = + ::testing::Combine(::testing::ValuesIn(inputPrecision), + ::testing::ValuesIn(std::vector(inShapes.begin() + 2, inShapes.end())), + ::testing::Values(axes[2]), + ::testing::ValuesIn(exclusive), + ::testing::ValuesIn(reverse)); + +const auto testCasesAxis_3 = + ::testing::Combine(::testing::ValuesIn(inputPrecision), + ::testing::ValuesIn(std::vector(inShapes.begin() + 3, inShapes.end())), + ::testing::Values(axes[3]), + ::testing::ValuesIn(exclusive), + ::testing::ValuesIn(reverse)); + +const auto testCasesAxis_4 = + ::testing::Combine(::testing::ValuesIn(inputPrecision), + ::testing::ValuesIn(std::vector(inShapes.begin() + 4, inShapes.end())), + ::testing::Values(axes[4]), + ::testing::ValuesIn(exclusive), + ::testing::ValuesIn(reverse)); + +const auto testCasesAxis_5 = + ::testing::Combine(::testing::ValuesIn(inputPrecision), + ::testing::ValuesIn(std::vector(inShapes.begin() + 5, inShapes.end())), + ::testing::Values(axes[5]), + ::testing::ValuesIn(exclusive), + ::testing::ValuesIn(reverse)); + +const auto testCasesAxis_6 = + ::testing::Combine(::testing::ValuesIn(inputPrecision), + ::testing::ValuesIn(std::vector(inShapes.begin() + 6, inShapes.end())), + ::testing::Values(axes[6]), + ::testing::ValuesIn(exclusive), + ::testing::ValuesIn(reverse)); + +const auto testCasesAxis_negative = + ::testing::Combine(::testing::ValuesIn(inputPrecision), + ::testing::ValuesIn(std::vector(inShapes.begin() + 6, inShapes.end())), + ::testing::ValuesIn(negativeAxes), + ::testing::ValuesIn(exclusive), + ::testing::ValuesIn(reverse)); + +INSTANTIATE_TEST_SUITE_P(smoke_CompareWithRefsNumpy_axis_0, + CumSumLayerCPUTest, + testCasesAxis_0, + CumSumLayerCPUTest::getTestCaseName); +INSTANTIATE_TEST_SUITE_P(smoke_CompareWithRefsNumpy_axis_1, + CumSumLayerCPUTest, + testCasesAxis_1, + CumSumLayerCPUTest::getTestCaseName); +INSTANTIATE_TEST_SUITE_P(smoke_CompareWithRefsNumpy_axis_2, + CumSumLayerCPUTest, + testCasesAxis_2, + CumSumLayerCPUTest::getTestCaseName); +INSTANTIATE_TEST_SUITE_P(smoke_CompareWithRefsNumpy_axis_3, + CumSumLayerCPUTest, + testCasesAxis_3, + CumSumLayerCPUTest::getTestCaseName); +INSTANTIATE_TEST_SUITE_P(smoke_CompareWithRefsNumpy_axis_4, + CumSumLayerCPUTest, + testCasesAxis_4, + CumSumLayerCPUTest::getTestCaseName); +INSTANTIATE_TEST_SUITE_P(smoke_CompareWithRefsNumpy_axis_5, + CumSumLayerCPUTest, + testCasesAxis_5, + CumSumLayerCPUTest::getTestCaseName); +INSTANTIATE_TEST_SUITE_P(smoke_CompareWithRefsNumpy_axis_6, + CumSumLayerCPUTest, + testCasesAxis_6, + CumSumLayerCPUTest::getTestCaseName); +INSTANTIATE_TEST_SUITE_P(smoke_CompareWithRefsNumpy_negative_axes, + CumSumLayerCPUTest, + testCasesAxis_negative, + CumSumLayerCPUTest::getTestCaseName); + +} // namespace test +} // namespace ov diff --git a/src/plugins/intel_cpu/tests/functional/single_layer_tests/custom_op_internal_dyn.cpp b/src/plugins/intel_cpu/tests/functional/single_layer_tests/custom_op_internal_dyn.cpp index f7fffe774d90f0..a45a23aa470618 100644 --- a/src/plugins/intel_cpu/tests/functional/single_layer_tests/custom_op_internal_dyn.cpp +++ b/src/plugins/intel_cpu/tests/functional/single_layer_tests/custom_op_internal_dyn.cpp @@ -2,15 +2,12 @@ // SPDX-License-Identifier: Apache-2.0 // -#include -#include -#include -#include "ov_models/builders.hpp" -#include "ov_models/utils/ov_helpers.hpp" +#include "common_test_utils/ov_tensor_utils.hpp" +#include "openvino/op/op.hpp" +#include "shared_test_classes/base/ov_subgraph.hpp" -using namespace ov::test; - -namespace CPULayerTestsDefinitions { +namespace ov { +namespace test { /* This is a synthetic op that mimics the general behaviour of operations with internal dynamics, i.e. nodes where the the output shapes may only be defined after the actual computations. */ @@ -26,20 +23,19 @@ class CustomOp : public ov::op::Op { void validate_and_infer_types() override { const auto& inputs_count = input_values().size(); - OPENVINO_ASSERT(inputs_count == 1, - "Input count must be 1, Got: ", - inputs_count); + OPENVINO_ASSERT(inputs_count == 1, "Input count must be 1, Got: ", inputs_count); set_output_size(2); auto shape0 = get_input_partial_shape(0); - auto rank0 = shape0.rank(); + auto rank0 = shape0.rank(); - OPENVINO_ASSERT(rank0.compatible(3), - "The input must be 3D."); + OPENVINO_ASSERT(rank0.compatible(3), "The input must be 3D."); - //here we set undefined shapes since they can only be determined after the actual calculations + // here we set undefined shapes since they can only be determined after the actual calculations set_output_type(0, get_input_element_type(0), ov::PartialShape({ov::Dimension()})); - set_output_type(1, get_input_element_type(0), ov::PartialShape({ov::Dimension(), ov::Dimension(), ov::Dimension()})); + set_output_type(1, + get_input_element_type(0), + ov::PartialShape({ov::Dimension(), ov::Dimension(), ov::Dimension()})); } std::shared_ptr clone_with_new_inputs(const ov::OutputVector& new_args) const override { @@ -88,16 +84,16 @@ class CustomOpCPUTest : public SubgraphBaseTest { ov::ParameterVector inputParams; ov::OutputVector paramsOuts; for (auto&& shape : inputDynamicShapes) { - auto param = std::make_shared(ngraph::element::f32, shape); + auto param = std::make_shared(ov::element::f32, shape); inputParams.push_back(param); paramsOuts.push_back(param); } auto customOp = std::make_shared(paramsOuts); - auto shapeOf = std::make_shared(customOp->output(1)); + auto shapeOf = std::make_shared(customOp->output(1)); - ngraph::ResultVector results{std::make_shared(customOp->output(0)), - std::make_shared(shapeOf)}; - function = std::make_shared(results, inputParams, "customOpTest"); + ov::ResultVector results{std::make_shared(customOp->output(0)), + std::make_shared(shapeOf)}; + function = std::make_shared(results, inputParams, "customOpTest"); } void generate_inputs(const std::vector& targetInputStaticShapes) override { @@ -105,7 +101,8 @@ class CustomOpCPUTest : public SubgraphBaseTest { const auto& funcInputs = function->inputs(); for (size_t i = 0; i < funcInputs.size(); ++i) { const auto& funcInput = funcInputs[i]; - auto tensor = ov::test::utils::create_and_fill_tensor(funcInput.get_element_type(), targetInputStaticShapes[i]); + auto tensor = + ov::test::utils::create_and_fill_tensor(funcInput.get_element_type(), targetInputStaticShapes[i]); inputs.insert({funcInput.get_node_shared_ptr(), tensor}); } } @@ -126,4 +123,5 @@ class CustomOpCPUTest : public SubgraphBaseTest { TEST_F(CustomOpCPUTest, smoke_CustomOpInternalDynamismCPUTest) { run(); } -} // namespace CPULayerTestsDefinitions +} // namespace test +} // namespace ov diff --git a/src/plugins/intel_cpu/tests/functional/single_layer_tests/deformable_convolution.cpp b/src/plugins/intel_cpu/tests/functional/single_layer_tests/deformable_convolution.cpp index cf7b5d7eae4910..73b269b43569d9 100644 --- a/src/plugins/intel_cpu/tests/functional/single_layer_tests/deformable_convolution.cpp +++ b/src/plugins/intel_cpu/tests/functional/single_layer_tests/deformable_convolution.cpp @@ -2,69 +2,65 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "test_utils/cpu_test_utils.hpp" -#include "ov_models/builders.hpp" -#include "ov_models/utils/ov_helpers.hpp" -#include +#include "common_test_utils/ov_tensor_utils.hpp" #include "shared_test_classes/base/ov_subgraph.hpp" +#include "test_utils/cpu_test_utils.hpp" -using namespace InferenceEngine; using namespace CPUTestUtils; -using namespace ov::test; - -namespace CPULayerTestsDefinitions { -enum OffsetType {ZERO, NATURAL, REAL_POSITIVE, REAL_NEGATIVE, REAL_MISC}; - -typedef std::tuple< - bool, // with_bilinear_interpolation_pad - bool, // with_modulation - OffsetType // type of def. offsets - > DefConvSpecificParams; - -typedef std::tuple< - ngraph::op::PadType, // pad. type - std::vector, // pad. begin - std::vector, // pad. end - std::vector, // strides - std::vector // dilations - > AddSpatialParamsDyn; - -typedef std::tuple< - AddSpatialParamsDyn, - std::vector, - DefConvSpecificParams, - InferenceEngine::Precision, // Net precision - LayerTestsUtils::TargetDevice // Device name - > DefConvLayerTestParams; - -typedef std::tuple< - CPULayerTestsDefinitions::DefConvLayerTestParams, - CPUSpecificParams> DefConvLayerCPUTestParamsSet; +namespace ov { +namespace test { +enum OffsetType { ZERO, NATURAL, REAL_POSITIVE, REAL_NEGATIVE, REAL_MISC }; + +typedef std::tuple + DefConvSpecificParams; + +typedef std::tuple, // pad. begin + std::vector, // pad. end + std::vector, // strides + std::vector // dilations + > + AddSpatialParamsDyn; + +typedef std::tuple, + DefConvSpecificParams, + ov::element::Type, // Net precision + std::string // Device name + > + DefConvLayerTestParams; + +typedef std::tuple DefConvLayerCPUTestParamsSet; class DefConvLayerCPUTest : public testing::WithParamInterface, - virtual public SubgraphBaseTest, public CPUTestsBase { + virtual public SubgraphBaseTest, + public CPUTestsBase { public: OffsetType offsetType; static std::string getTestCaseName(testing::TestParamInfo obj) { - CPULayerTestsDefinitions::DefConvSpecificParams dcSpecificParams; + DefConvSpecificParams dcSpecificParams; std::vector inputShape; - InferenceEngine::Precision netPrecision; - CPULayerTestsDefinitions::DefConvLayerTestParams basicParamsSet; + ov::element::Type netPrecision; + DefConvLayerTestParams basicParamsSet; CPUSpecificParams cpuParams; std::tie(basicParamsSet, cpuParams) = obj.param; AddSpatialParamsDyn addSpParams; std::string td; std::tie(addSpParams, inputShape, dcSpecificParams, netPrecision, td) = basicParamsSet; - ngraph::op::PadType padType; - InferenceEngine::SizeVector stride, dilation; + ov::op::PadType padType; + std::vector stride, dilation; std::vector padBegin, padEnd; std::tie(padType, padBegin, padEnd, stride, dilation) = addSpParams; // gr * in_ch_per_gr / in_ch_per_gr size_t groups = inputShape[0].second[0][1] / inputShape[2].second[0][1]; // dg * ker_spat_shape[0] * ker_spat_shape[1] * 2 / (ker_spat_shape[0] * ker_spat_shape[1] * 2) - size_t deformableGroups = inputShape[1].second[0][1] / (inputShape[2].second[0][2] * inputShape[2].second[0][3] *2); + size_t deformableGroups = + inputShape[1].second[0][1] / (inputShape[2].second[0][2] * inputShape[2].second[0][3] * 2); bool withBilinearInterpolationPad, withModulation; OffsetType offType; @@ -85,7 +81,7 @@ class DefConvLayerCPUTest : public testing::WithParamInterface& targetInputStaticShapes) override { + void generate_inputs(const std::vector& targetInputStaticShapes) override { inputs.clear(); const auto& funcInputs = function->inputs(); auto inShape = targetInputStaticShapes[0]; @@ -114,9 +111,11 @@ class DefConvLayerCPUTest : public testing::WithParamInterface inputShape; - InferenceEngine::Precision netPrecision; - CPULayerTestsDefinitions::DefConvLayerTestParams basicParamsSet; + ov::element::Type netPrecision; + DefConvLayerTestParams basicParamsSet; CPUSpecificParams cpuParams; std::tie(basicParamsSet, cpuParams) = this->GetParam(); std::tie(inFmts, outFmts, priority, selectedType) = cpuParams; @@ -143,22 +142,21 @@ class DefConvLayerCPUTest : public testing::WithParamInterface stride, dilation; std::vector padBegin, padEnd; std::tie(padType, padBegin, padEnd, stride, dilation) = addSpParams; // gr * in_ch_per_gr / in_ch_per_gr size_t groups = inputShape[0].second[0].at(1) / inputShape[2].second[0].at(1); // dg * ker_spat_shape[0] * ker_spat_shape[1] * 2 / (ker_spat_shape[0] * ker_spat_shape[1] * 2) - size_t deformableGroups = inputShape[1].second[0].at(1) / - (inputShape[2].second[0].at(2) * inputShape[2].second[0].at(3) * 2); + size_t deformableGroups = + inputShape[1].second[0].at(1) / (inputShape[2].second[0].at(2) * inputShape[2].second[0].at(3) * 2); bool withBilinearInterpolationPad, withModulation; std::tie(withBilinearInterpolationPad, withModulation, offsetType) = dcSpecificParams; - auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision); ov::ParameterVector inputParams; for (auto&& shape : inputDynamicShapes) { - inputParams.push_back(std::make_shared(ngPrc, shape)); + inputParams.push_back(std::make_shared(netPrecision, shape)); } auto data = inputParams[0]; data->set_friendly_name("a_data"); @@ -166,22 +164,40 @@ class DefConvLayerCPUTest : public testing::WithParamInterfaceset_friendly_name("b_offset_vals"); auto filter_vals = inputParams[2]; filter_vals->set_friendly_name("c_filter_vals"); - ngraph::ParameterVector parameters{data, offset_vals, filter_vals}; - std::shared_ptr deformable_conv; + ov::ParameterVector parameters{data, offset_vals, filter_vals}; + std::shared_ptr deformable_conv; if (withModulation) { auto modulation_scalars = inputParams[3]; modulation_scalars->set_friendly_name("c_modulation_scalars"); - deformable_conv = std::make_shared(data, offset_vals, filter_vals, modulation_scalars, stride, padBegin, - padEnd, dilation, padType, groups, deformableGroups, - withBilinearInterpolationPad); + deformable_conv = std::make_shared(data, + offset_vals, + filter_vals, + modulation_scalars, + stride, + padBegin, + padEnd, + dilation, + padType, + groups, + deformableGroups, + withBilinearInterpolationPad); parameters.push_back(modulation_scalars); } else { - deformable_conv = std::make_shared(data, offset_vals, filter_vals, stride, padBegin, padEnd, dilation, - padType, groups, deformableGroups, withBilinearInterpolationPad); + deformable_conv = std::make_shared(data, + offset_vals, + filter_vals, + stride, + padBegin, + padEnd, + dilation, + padType, + groups, + deformableGroups, + withBilinearInterpolationPad); } - function = makeNgraphFunction(ngPrc, parameters, deformable_conv, "deformable_convolution"); + function = makeNgraphFunction(netPrecision, parameters, deformable_conv, "deformable_convolution"); } }; @@ -209,47 +225,26 @@ std::vector filterCPUInfoForDevice(bool enforceRef = false) { return resCPUParams; } -const std::vector netPrecisions = { - InferenceEngine::Precision::FP32 -}; - -const auto defConvSpecificParams_Smoke = ::testing::Combine( - ::testing::ValuesIn(std::vector { - true, - false - }), // with_bilinear_interpolation_pad - ::testing::ValuesIn(std::vector { - true, - false - }), // with_modulation - ::testing::ValuesIn(std::vector { - OffsetType::REAL_MISC, - }) // offset type -); - -const auto defConvSpecificParams = ::testing::Combine( - ::testing::ValuesIn(std::vector { - true, - false - }), // with_bilinear_interpolation_pad - ::testing::ValuesIn(std::vector { - true, - false - }), // with_modulation - ::testing::ValuesIn(std::vector { - OffsetType::NATURAL, - OffsetType::ZERO, - OffsetType::REAL_MISC, - OffsetType::REAL_POSITIVE, - OffsetType::REAL_NEGATIVE - }) // offset type -); - -std::vector padTypes = { - ngraph::op::PadType::EXPLICIT, - ngraph::op::PadType::VALID -}; -std::vector> getCartProduct(const std::vector> &v) { +const std::vector netPrecisions = {ov::element::f32}; + +const auto defConvSpecificParams_Smoke = + ::testing::Combine(::testing::ValuesIn(std::vector{true, false}), // with_bilinear_interpolation_pad + ::testing::ValuesIn(std::vector{true, false}), // with_modulation + ::testing::ValuesIn(std::vector{ + OffsetType::REAL_MISC, // offset type + })); + +const auto defConvSpecificParams = + ::testing::Combine(::testing::ValuesIn(std::vector{true, false}), // with_bilinear_interpolation_pad + ::testing::ValuesIn(std::vector{true, false}), // with_modulation + ::testing::ValuesIn(std::vector{OffsetType::NATURAL, // offset type + OffsetType::ZERO, + OffsetType::REAL_MISC, + OffsetType::REAL_POSITIVE, + OffsetType::REAL_NEGATIVE})); + +std::vector padTypes = {ov::op::PadType::EXPLICIT, ov::op::PadType::VALID}; +std::vector> getCartProduct(const std::vector>& v) { int outSize = 1; int n = v.size(); for (int i = 0; i < n; i++) { @@ -259,7 +254,7 @@ std::vector> getCartProduct(const std::vector cortege(n); int curResid = i, curInd = 0; - for (int j = v.size() - 1; j >= 0 ; j--) { + for (int j = v.size() - 1; j >= 0; j--) { curInd = curResid % v[j].size(); curResid = curResid / v[j].size(); cortege[j] = v[j][curInd]; @@ -268,12 +263,11 @@ std::vector> getCartProduct(const std::vector> buildStaticParams( - const std::vector> spatParams, - const std::vector> chParamsUncombined) { +std::vector> buildStaticParams(const std::vector> spatParams, + const std::vector> chParamsUncombined) { std::vector> chParams = getCartProduct(chParamsUncombined); std::vector> shapes; - for (std::vector &chPar : chParams) { + for (std::vector& chPar : chParams) { const size_t batch = spatParams[0][0]; const size_t inSpH = spatParams[1][0]; const size_t inSpW = spatParams[1][1]; @@ -287,339 +281,336 @@ std::vector> buildStaticParams( const size_t inChPerGr = chPar[2]; const size_t outChPerGr = chPar[3]; - std::vector inputShape = { - {batch, gr * inChPerGr, inSpH, inSpW}, - {batch, defGr * kerSpH * kerSpW * 2, offSpH, offSpW}, - {gr * outChPerGr, inChPerGr, kerSpH, kerSpW}, - {batch, defGr * kerSpH * kerSpW, offSpH, offSpW} - }; + std::vector inputShape = {{batch, gr * inChPerGr, inSpH, inSpW}, + {batch, defGr * kerSpH * kerSpW * 2, offSpH, offSpW}, + {gr * outChPerGr, inChPerGr, kerSpH, kerSpW}, + {batch, defGr * kerSpH * kerSpW, offSpH, offSpW}}; shapes.push_back(inputShape); } return shapes; } -const auto addSpParams = ::testing::Combine( - ::testing::ValuesIn(padTypes), // pad. type - ::testing::Values(std::vector({0, 0})), // pad. begin - ::testing::Values(std::vector({0, 0})), // pad. end - ::testing::Values(std::vector {1, 1}), // strides - ::testing::Values(std::vector {1, 1}) // dilations +const auto addSpParams = ::testing::Combine(::testing::ValuesIn(padTypes), // pad. type + ::testing::Values(std::vector({0, 0})), // pad. begin + ::testing::Values(std::vector({0, 0})), // pad. end + ::testing::Values(std::vector{1, 1}), // strides + ::testing::Values(std::vector{1, 1}) // dilations ); -const auto addSpParamsDilationUneven = ::testing::Combine( - ::testing::ValuesIn(padTypes), // pad. type - ::testing::Values(std::vector({0, 0})), // pad. begin - ::testing::Values(std::vector({0, 0})), // pad. end - ::testing::Values(std::vector {1, 1}), // strides - ::testing::Values(std::vector {2, 1}) // dilations -); +const auto addSpParamsDilationUneven = + ::testing::Combine(::testing::ValuesIn(padTypes), // pad. type + ::testing::Values(std::vector({0, 0})), // pad. begin + ::testing::Values(std::vector({0, 0})), // pad. end + ::testing::Values(std::vector{1, 1}), // strides + ::testing::Values(std::vector{2, 1})); // dilations const std::vector> spatParams1 = { - {1}, // batch - {34, 34}, // in. spat. shape - {32, 32}, // off. spat. shape - {3, 3} // ker. spat. shape + {1}, // batch + {34, 34}, // in. spat. shape + {32, 32}, // off. spat. shape + {3, 3} // ker. spat. shape }; const std::vector> spatParams2 = { - {1}, // batch - {3, 3}, // in. spat. shape - {2, 2}, // off. spat. shape - {2, 2} // ker. spat. shape + {1}, // batch + {3, 3}, // in. spat. shape + {2, 2}, // off. spat. shape + {2, 2} // ker. spat. shape }; const std::vector> spatParams3 = { - {1}, // batch - {5, 5}, // in. spat. shape - {4, 4}, // off. spat. shape - {2, 2} // ker. spat. shape + {1}, // batch + {5, 5}, // in. spat. shape + {4, 4}, // off. spat. shape + {2, 2} // ker. spat. shape }; const std::vector> spatParams4 = { - {1}, // batch - {3, 2}, // in. spat. shape - {2, 1}, // off. spat. shape - {2, 2} // ker. spat. shape + {1}, // batch + {3, 2}, // in. spat. shape + {2, 1}, // off. spat. shape + {2, 2} // ker. spat. shape }; const std::vector> spatParamsDilationUneven = { - {1}, // batch - {3, 2}, // in. spat. shape - {1, 1}, // off. spat. shape - {2, 2} // ker. spat. shape + {1}, // batch + {3, 2}, // in. spat. shape + {1, 1}, // off. spat. shape + {2, 2} // ker. spat. shape }; const std::vector> spatParams5_onnx2d = { - {1}, // batch - {4, 4}, // in. spat. shape - {3, 3}, // off. spat. shape - {2, 2} // ker. spat. shape + {1}, // batch + {4, 4}, // in. spat. shape + {3, 3}, // off. spat. shape + {2, 2} // ker. spat. shape }; const std::vector> channelParamsSingleGr = { - {1}, // gr. 2,4 - {1, 2}, // def. gr. 1,2 - {16, 32}, // in. ch. per gr. - {16, 32} // out. ch. per gr. + {1}, // gr. 2,4 + {1, 2}, // def. gr. 1,2 + {16, 32}, // in. ch. per gr. + {16, 32} // out. ch. per gr. }; const std::vector> channelParamsSingleGr2 = { - {1}, // gr. 2,4 - {1}, // def. gr. 1,2 - {3}, // in. ch. per gr. - {3} // out. ch. per gr. + {1}, // gr. 2,4 + {1}, // def. gr. 1,2 + {3}, // in. ch. per gr. + {3} // out. ch. per gr. }; const std::vector> channelParamsMulGr = { - {2, 4}, // gr. 2,4 - {1, 2}, // def. gr. 1,2 - {3, 7}, // in. ch. per gr. - {3, 7} // out. ch. per gr. + {2, 4}, // gr. 2,4 + {1, 2}, // def. gr. 1,2 + {3, 7}, // in. ch. per gr. + {3, 7} // out. ch. per gr. }; const std::vector> channelParams_onnx2d = { - {1}, // gr. 2,4 - {1}, // def. gr. 1,2 - {1}, // in. ch. per gr. - {1} // out. ch. per gr. + {1}, // gr. 2,4 + {1}, // def. gr. 1,2 + {1}, // in. ch. per gr. + {1} // out. ch. per gr. }; const std::vector> dynShapeChainRef = { - { - // gr == 2, dg == 1, in_ch_per_gr == 3, out_ch_per_gr == 3 - // {{dynamic shape}, {{static shape case1}, {static shape case2}, ...} - {{-1, -1, -1, -1}, {{1, 6, 3, 2}, {1, 6, 4, 3}, {1, 6, 5, 4}, {1, 6, 3, 2}}}, // input 0 - {{-1, -1, -1, -1}, {{1, 8, 2, 1}, {1, 8, 3, 2}, {1, 8, 4, 3}, {1, 8, 2, 1}}}, // input 1 - {{6, 3, 2, 2}, {{6, 3, 2, 2}, {6, 3, 2, 2}, {6, 3, 2, 2}, {6, 3, 2, 2}}}, // input 2 - {{-1, -1, -1, -1}, {{1, 4, 2, 1}, {1, 4, 3, 2}, {1, 4, 4, 3}, {1, 4, 2, 1}}} // input 3 - }, - { - {{{1, 5}, 6, {1, 10}, {1, 8}}, {{2, 6, 3, 2}, {1, 6, 4, 3}, {3, 6, 5, 4}, {2, 6, 3, 2}}}, - {{{1, 3}, 8, {1, 10}, {1, 8}}, {{2, 8, 2, 1}, {1, 8, 3, 2}, {3, 8, 4, 3}, {2, 8, 2, 1}}}, - {{6, 3, 2, 2}, {{6, 3, 2, 2}, {6, 3, 2, 2}, {6, 3, 2, 2}, {6, 3, 2, 2}}}, - {{{1, 3}, 4, {1, 10}, {1, 8}}, {{2, 4, 2, 1}, {1, 4, 3, 2}, {3, 4, 4, 3}, {2, 4, 2, 1}}} - }, - { - {{{1, 5}, {1, 6}, {1, 10}, {1, 8}}, {{2, 6, 3, 2}, {1, 6, 4, 3}, {3, 6, 5, 4}, {2, 6, 3, 2}}}, - {{{1, 3}, {1, 8}, {1, 10}, {1, 8}}, {{2, 8, 2, 1}, {1, 8, 3, 2}, {3, 8, 4, 3}, {2, 8, 2, 1}}}, - {{6, 3, 2, 2}, {{6, 3, 2, 2}, {6, 3, 2, 2}, {6, 3, 2, 2}, {6, 3, 2, 2}}}, - {{{1, 3}, {1, 5}, {1, 10}, {1, 8}}, {{2, 4, 2, 1}, {1, 4, 3, 2}, {3, 4, 4, 3}, {2, 4, 2, 1}}} - }, + { + // gr == 2, dg == 1, in_ch_per_gr == 3, out_ch_per_gr == 3 + // {{dynamic shape}, {{static shape case1}, {static shape case2}, ...} + {{-1, -1, -1, -1}, {{1, 6, 3, 2}, {1, 6, 4, 3}, {1, 6, 5, 4}, {1, 6, 3, 2}}}, // input 0 + {{-1, -1, -1, -1}, {{1, 8, 2, 1}, {1, 8, 3, 2}, {1, 8, 4, 3}, {1, 8, 2, 1}}}, // input 1 + {{6, 3, 2, 2}, {{6, 3, 2, 2}, {6, 3, 2, 2}, {6, 3, 2, 2}, {6, 3, 2, 2}}}, // input 2 + {{-1, -1, -1, -1}, {{1, 4, 2, 1}, {1, 4, 3, 2}, {1, 4, 4, 3}, {1, 4, 2, 1}}} // input 3 + }, + {{{{1, 5}, 6, {1, 10}, {1, 8}}, {{2, 6, 3, 2}, {1, 6, 4, 3}, {3, 6, 5, 4}, {2, 6, 3, 2}}}, + {{{1, 3}, 8, {1, 10}, {1, 8}}, {{2, 8, 2, 1}, {1, 8, 3, 2}, {3, 8, 4, 3}, {2, 8, 2, 1}}}, + {{6, 3, 2, 2}, {{6, 3, 2, 2}, {6, 3, 2, 2}, {6, 3, 2, 2}, {6, 3, 2, 2}}}, + {{{1, 3}, 4, {1, 10}, {1, 8}}, {{2, 4, 2, 1}, {1, 4, 3, 2}, {3, 4, 4, 3}, {2, 4, 2, 1}}}}, + {{{{1, 5}, {1, 6}, {1, 10}, {1, 8}}, {{2, 6, 3, 2}, {1, 6, 4, 3}, {3, 6, 5, 4}, {2, 6, 3, 2}}}, + {{{1, 3}, {1, 8}, {1, 10}, {1, 8}}, {{2, 8, 2, 1}, {1, 8, 3, 2}, {3, 8, 4, 3}, {2, 8, 2, 1}}}, + {{6, 3, 2, 2}, {{6, 3, 2, 2}, {6, 3, 2, 2}, {6, 3, 2, 2}, {6, 3, 2, 2}}}, + {{{1, 3}, {1, 5}, {1, 10}, {1, 8}}, {{2, 4, 2, 1}, {1, 4, 3, 2}, {3, 4, 4, 3}, {2, 4, 2, 1}}}}, }; const std::vector> dynShapeChainJIT = { - { - // gr == 1, dg == 1, in_ch_per_gr == 16, out_ch_per_gr == 16 - // {{dynamic shape}, {{static shape case1}, {static shape case2}, ...} - {{-1, -1, -1, -1}, {{1, 16, 3, 2}, {1, 16, 4, 3}, {1, 16, 5, 4}, {1, 16, 3, 2}}}, // input 0 - {{-1, 8, -1, -1}, {{1, 8, 2, 1}, {1, 8, 3, 2}, {1, 8, 4, 3}, {1, 8, 2, 1}}}, // input 1 - {{16, 16, 2, 2}, {{16, 16, 2, 2}, {16, 16, 2, 2}, {16, 16, 2, 2}, {16, 16, 2, 2}}}, // input 2 - {{-1, 4, -1, -1}, {{1, 4, 2, 1}, {1, 4, 3, 2}, {1, 4, 4, 3}, {1, 4, 2, 1}}} // input 3 - }, - { - {{{1, 5}, 16, {1, 10}, {1, 8}}, {{1, 16, 3, 2}, {1, 16, 4, 3}, {1, 16, 5, 4}, {1, 16, 3, 2}}}, // input 0 - {{{1, 5}, 8, {1, 10}, {1, 8}}, {{1, 8, 2, 1}, {1, 8, 3, 2}, {1, 8, 4, 3}, {1, 8, 2, 1}}}, // input 1 - {{16, 16, 2, 2}, {{16, 16, 2, 2}, {16, 16, 2, 2}, {16, 16, 2, 2}, {16, 16, 2, 2}}}, // input 2 - {{{1, 5}, 4, {1, 10}, {1, 8}}, {{1, 4, 2, 1}, {1, 4, 3, 2}, {1, 4, 4, 3}, {1, 4, 2, 1}}} // input 3 - }, - { - {{{1, 5}, {1, 16}, {1, 10}, {1, 8}}, {{1, 16, 3, 2}, {1, 16, 4, 3}, {1, 16, 5, 4}, {1, 16, 3, 2}}}, // input 0 - {{{1, 5}, {1, 8}, {1, 10}, {1, 8}}, {{1, 8, 2, 1}, {1, 8, 3, 2}, {1, 8, 4, 3}, {1, 8, 2, 1}}}, // input 1 - {{16, 16, 2, 2}, {{16, 16, 2, 2}, {16, 16, 2, 2}, {16, 16, 2, 2}, {16, 16, 2, 2}}}, // input 2 - {{{1, 5}, {1, 5}, {1, 10}, {1, 8}}, {{1, 4, 2, 1}, {1, 4, 3, 2}, {1, 4, 4, 3}, {1, 4, 2, 1}}} // input 3 - }, + { + // gr == 1, dg == 1, in_ch_per_gr == 16, out_ch_per_gr == 16 + // {{dynamic shape}, {{static shape case1}, {static shape case2}, ...} + {{-1, -1, -1, -1}, {{1, 16, 3, 2}, {1, 16, 4, 3}, {1, 16, 5, 4}, {1, 16, 3, 2}}}, // input 0 + {{-1, 8, -1, -1}, {{1, 8, 2, 1}, {1, 8, 3, 2}, {1, 8, 4, 3}, {1, 8, 2, 1}}}, // input 1 + {{16, 16, 2, 2}, {{16, 16, 2, 2}, {16, 16, 2, 2}, {16, 16, 2, 2}, {16, 16, 2, 2}}}, // input 2 + {{-1, 4, -1, -1}, {{1, 4, 2, 1}, {1, 4, 3, 2}, {1, 4, 4, 3}, {1, 4, 2, 1}}} // input 3 + }, + { + {{{1, 5}, 16, {1, 10}, {1, 8}}, {{1, 16, 3, 2}, {1, 16, 4, 3}, {1, 16, 5, 4}, {1, 16, 3, 2}}}, // input 0 + {{{1, 5}, 8, {1, 10}, {1, 8}}, {{1, 8, 2, 1}, {1, 8, 3, 2}, {1, 8, 4, 3}, {1, 8, 2, 1}}}, // input 1 + {{16, 16, 2, 2}, {{16, 16, 2, 2}, {16, 16, 2, 2}, {16, 16, 2, 2}, {16, 16, 2, 2}}}, // input 2 + {{{1, 5}, 4, {1, 10}, {1, 8}}, {{1, 4, 2, 1}, {1, 4, 3, 2}, {1, 4, 4, 3}, {1, 4, 2, 1}}} // input 3 + }, + { + {{{1, 5}, {1, 16}, {1, 10}, {1, 8}}, {{1, 16, 3, 2}, {1, 16, 4, 3}, {1, 16, 5, 4}, {1, 16, 3, 2}}}, // input 0 + {{{1, 5}, {1, 8}, {1, 10}, {1, 8}}, {{1, 8, 2, 1}, {1, 8, 3, 2}, {1, 8, 4, 3}, {1, 8, 2, 1}}}, // input 1 + {{16, 16, 2, 2}, {{16, 16, 2, 2}, {16, 16, 2, 2}, {16, 16, 2, 2}, {16, 16, 2, 2}}}, // input 2 + {{{1, 5}, {1, 5}, {1, 10}, {1, 8}}, {{1, 4, 2, 1}, {1, 4, 3, 2}, {1, 4, 4, 3}, {1, 4, 2, 1}}} // input 3 + }, }; // autopad params -const std::vector> dynShapeChainJITAutoPad = { - { - {{{1, 5}, {1, 16}, {1, 10}, {1, 10}}, {{1, 16, 3, 2}, {1, 16, 10, 10}, {1, 16, 3, 2}}}, // input 0 - {{{1, 5}, 8, {1, 10}, {1, 10}}, {{1, 8, 3, 2}, {1, 8, 10, 10}, {1, 8, 3, 2}}}, // input 1 - {{16, 16, 2, 2}, {{16, 16, 2, 2}, {16, 16, 2, 2}, {16, 16, 2, 2}}}, // input 2 - {{{1, 5}, 4, {1, 10}, {1, 10}}, {{1, 4, 3, 2}, {1, 4, 10, 10}, {1, 4, 3, 2}}} // input 3 - } -}; +const std::vector> dynShapeChainJITAutoPad = {{ + {{{1, 5}, {1, 16}, {1, 10}, {1, 10}}, {{1, 16, 3, 2}, {1, 16, 10, 10}, {1, 16, 3, 2}}}, // input 0 + {{{1, 5}, 8, {1, 10}, {1, 10}}, {{1, 8, 3, 2}, {1, 8, 10, 10}, {1, 8, 3, 2}}}, // input 1 + {{16, 16, 2, 2}, {{16, 16, 2, 2}, {16, 16, 2, 2}, {16, 16, 2, 2}}}, // input 2 + {{{1, 5}, 4, {1, 10}, {1, 10}}, {{1, 4, 3, 2}, {1, 4, 10, 10}, {1, 4, 3, 2}}} // input 3 +}}; const std::vector> autoPadSpatParams = { - {1}, // batch - {3, 2}, // in. spat. shape - {3, 2}, // off. spat. shape - {2, 2} // ker. spat. shape + {1}, // batch + {3, 2}, // in. spat. shape + {3, 2}, // off. spat. shape + {2, 2} // ker. spat. shape }; -std::vector padTypesAutoPad = { - ngraph::op::PadType::SAME_LOWER, - ngraph::op::PadType::SAME_UPPER -}; +std::vector padTypesAutoPad = {ov::op::PadType::SAME_LOWER, ov::op::PadType::SAME_UPPER}; -const auto autoPadAddSpParams = ::testing::Combine( - ::testing::ValuesIn(padTypesAutoPad), // pad. type - ::testing::Values(std::vector({0, 0})), // pad. begin - ignored - ::testing::Values(std::vector({0, 0})), // pad. end - ignored - ::testing::Values(std::vector {1, 1}), // strides - ::testing::Values(std::vector {1, 1}) // dilations -); +const auto autoPadAddSpParams = + ::testing::Combine(::testing::ValuesIn(padTypesAutoPad), // pad. type + ::testing::Values(std::vector({0, 0})), // pad. begin - ignored + ::testing::Values(std::vector({0, 0})), // pad. end - ignored + ::testing::Values(std::vector{1, 1}), // strides + ::testing::Values(std::vector{1, 1})); // dilations const auto params1_Smoke = ::testing::Combine( - ::testing::Combine( - addSpParams, - ::testing::ValuesIn(static_shapes_to_test_representation(buildStaticParams(spatParams1, channelParamsSingleGr))), - defConvSpecificParams_Smoke, - ::testing::ValuesIn(netPrecisions), - ::testing::Values(ov::test::utils::DEVICE_CPU)), - ::testing::ValuesIn(filterCPUInfoForDevice())); + ::testing::Combine(addSpParams, + ::testing::ValuesIn( + static_shapes_to_test_representation(buildStaticParams(spatParams1, channelParamsSingleGr))), + defConvSpecificParams_Smoke, + ::testing::ValuesIn(netPrecisions), + ::testing::Values(ov::test::utils::DEVICE_CPU)), + ::testing::ValuesIn(filterCPUInfoForDevice())); const auto params2_Smoke = ::testing::Combine( - ::testing::Combine( - addSpParams, - ::testing::ValuesIn(static_shapes_to_test_representation(buildStaticParams(spatParams2, channelParamsSingleGr))), - defConvSpecificParams_Smoke, - ::testing::ValuesIn(netPrecisions), - ::testing::Values(ov::test::utils::DEVICE_CPU)), - ::testing::ValuesIn(filterCPUInfoForDevice())); + ::testing::Combine(addSpParams, + ::testing::ValuesIn( + static_shapes_to_test_representation(buildStaticParams(spatParams2, channelParamsSingleGr))), + defConvSpecificParams_Smoke, + ::testing::ValuesIn(netPrecisions), + ::testing::Values(ov::test::utils::DEVICE_CPU)), + ::testing::ValuesIn(filterCPUInfoForDevice())); const auto params3_Smoke = ::testing::Combine( - ::testing::Combine( - addSpParams, - ::testing::ValuesIn(static_shapes_to_test_representation(buildStaticParams(spatParams3, channelParamsSingleGr))), - defConvSpecificParams_Smoke, - ::testing::ValuesIn(netPrecisions), - ::testing::Values(ov::test::utils::DEVICE_CPU)), - ::testing::ValuesIn(filterCPUInfoForDevice())); + ::testing::Combine(addSpParams, + ::testing::ValuesIn( + static_shapes_to_test_representation(buildStaticParams(spatParams3, channelParamsSingleGr))), + defConvSpecificParams_Smoke, + ::testing::ValuesIn(netPrecisions), + ::testing::Values(ov::test::utils::DEVICE_CPU)), + ::testing::ValuesIn(filterCPUInfoForDevice())); const auto params4_Smoke = ::testing::Combine( - ::testing::Combine( - addSpParams, - ::testing::ValuesIn(static_shapes_to_test_representation(buildStaticParams(spatParams4, channelParamsSingleGr))), - defConvSpecificParams_Smoke, - ::testing::ValuesIn(netPrecisions), - ::testing::Values(ov::test::utils::DEVICE_CPU)), - ::testing::ValuesIn(filterCPUInfoForDevice())); + ::testing::Combine(addSpParams, + ::testing::ValuesIn( + static_shapes_to_test_representation(buildStaticParams(spatParams4, channelParamsSingleGr))), + defConvSpecificParams_Smoke, + ::testing::ValuesIn(netPrecisions), + ::testing::Values(ov::test::utils::DEVICE_CPU)), + ::testing::ValuesIn(filterCPUInfoForDevice())); const auto params5_Smoke = ::testing::Combine( - ::testing::Combine( - addSpParams, - ::testing::ValuesIn(static_shapes_to_test_representation(buildStaticParams(spatParams4, channelParamsMulGr))), - defConvSpecificParams_Smoke, - ::testing::ValuesIn(netPrecisions), - ::testing::Values(ov::test::utils::DEVICE_CPU)), - ::testing::ValuesIn(filterCPUInfoForDevice(true))); -const auto params6_Smoke = ::testing::Combine( - ::testing::Combine( - addSpParams, - ::testing::ValuesIn(dynShapeChainRef), - defConvSpecificParams_Smoke, - ::testing::ValuesIn(netPrecisions), - ::testing::Values(ov::test::utils::DEVICE_CPU)), - ::testing::ValuesIn(filterCPUInfoForDevice(true))); -const auto params7_Smoke = ::testing::Combine( - ::testing::Combine( - addSpParams, - ::testing::ValuesIn(dynShapeChainJIT), - defConvSpecificParams_Smoke, - ::testing::ValuesIn(netPrecisions), - ::testing::Values(ov::test::utils::DEVICE_CPU)), - ::testing::ValuesIn(filterCPUInfoForDevice(false))); -const auto params8_Smoke = ::testing::Combine( - ::testing::Combine( - autoPadAddSpParams, - ::testing::ValuesIn(static_shapes_to_test_representation(buildStaticParams(autoPadSpatParams, channelParamsSingleGr))), - defConvSpecificParams_Smoke, - ::testing::ValuesIn(netPrecisions), - ::testing::Values(ov::test::utils::DEVICE_CPU)), - ::testing::ValuesIn(filterCPUInfoForDevice())); -const auto params9_Smoke = ::testing::Combine( - ::testing::Combine( - autoPadAddSpParams, - ::testing::ValuesIn(dynShapeChainJITAutoPad), - defConvSpecificParams_Smoke, - ::testing::ValuesIn(netPrecisions), - ::testing::Values(ov::test::utils::DEVICE_CPU)), - ::testing::ValuesIn(filterCPUInfoForDevice(false))); - -INSTANTIATE_TEST_SUITE_P(smoke_DefConvLayoutTest1, DefConvLayerCPUTest, params1_Smoke, DefConvLayerCPUTest::getTestCaseName); -INSTANTIATE_TEST_SUITE_P(smoke_DefConvLayoutTest2, DefConvLayerCPUTest, params2_Smoke, DefConvLayerCPUTest::getTestCaseName); -INSTANTIATE_TEST_SUITE_P(smoke_DefConvLayoutTest3, DefConvLayerCPUTest, params3_Smoke, DefConvLayerCPUTest::getTestCaseName); -INSTANTIATE_TEST_SUITE_P(smoke_DefConvLayoutTest4, DefConvLayerCPUTest, params4_Smoke, DefConvLayerCPUTest::getTestCaseName); -INSTANTIATE_TEST_SUITE_P(smoke_DefConvLayoutTest5, DefConvLayerCPUTest, params5_Smoke, DefConvLayerCPUTest::getTestCaseName); -INSTANTIATE_TEST_SUITE_P(smoke_DefConvLayoutTest6, DefConvLayerCPUTest, params6_Smoke, DefConvLayerCPUTest::getTestCaseName); -INSTANTIATE_TEST_SUITE_P(smoke_DefConvLayoutTest7, DefConvLayerCPUTest, params7_Smoke, DefConvLayerCPUTest::getTestCaseName); -INSTANTIATE_TEST_SUITE_P(smoke_DefConvLayoutTest8, DefConvLayerCPUTest, params8_Smoke, DefConvLayerCPUTest::getTestCaseName); -INSTANTIATE_TEST_SUITE_P(smoke_DefConvLayoutTest9, DefConvLayerCPUTest, params9_Smoke, DefConvLayerCPUTest::getTestCaseName); - -const auto params1 = ::testing::Combine( - ::testing::Combine( - addSpParams, - ::testing::ValuesIn(static_shapes_to_test_representation(buildStaticParams(spatParams1, channelParamsSingleGr2))), - defConvSpecificParams, - ::testing::ValuesIn(netPrecisions), - ::testing::Values(ov::test::utils::DEVICE_CPU)), - ::testing::ValuesIn(filterCPUInfoForDevice())); -const auto params2 = ::testing::Combine( - ::testing::Combine( - addSpParams, - ::testing::ValuesIn(static_shapes_to_test_representation(buildStaticParams(spatParams2, channelParamsSingleGr))), - defConvSpecificParams, - ::testing::ValuesIn(netPrecisions), - ::testing::Values(ov::test::utils::DEVICE_CPU)), - ::testing::ValuesIn(filterCPUInfoForDevice())); -const auto params3 = ::testing::Combine( - ::testing::Combine( - addSpParams, - ::testing::ValuesIn(static_shapes_to_test_representation(buildStaticParams(spatParams3, channelParamsSingleGr))), - defConvSpecificParams, - ::testing::ValuesIn(netPrecisions), - ::testing::Values(ov::test::utils::DEVICE_CPU)), - ::testing::ValuesIn(filterCPUInfoForDevice())); -const auto params4 = ::testing::Combine( - ::testing::Combine( - addSpParams, - ::testing::ValuesIn(static_shapes_to_test_representation(buildStaticParams(spatParams4, channelParamsSingleGr))), - defConvSpecificParams, - ::testing::ValuesIn(netPrecisions), - ::testing::Values(ov::test::utils::DEVICE_CPU)), - ::testing::ValuesIn(filterCPUInfoForDevice())); + ::testing::Combine( + addSpParams, + ::testing::ValuesIn(static_shapes_to_test_representation(buildStaticParams(spatParams4, channelParamsMulGr))), + defConvSpecificParams_Smoke, + ::testing::ValuesIn(netPrecisions), + ::testing::Values(ov::test::utils::DEVICE_CPU)), + ::testing::ValuesIn(filterCPUInfoForDevice(true))); +const auto params6_Smoke = ::testing::Combine(::testing::Combine(addSpParams, + ::testing::ValuesIn(dynShapeChainRef), + defConvSpecificParams_Smoke, + ::testing::ValuesIn(netPrecisions), + ::testing::Values(ov::test::utils::DEVICE_CPU)), + ::testing::ValuesIn(filterCPUInfoForDevice(true))); +const auto params7_Smoke = ::testing::Combine(::testing::Combine(addSpParams, + ::testing::ValuesIn(dynShapeChainJIT), + defConvSpecificParams_Smoke, + ::testing::ValuesIn(netPrecisions), + ::testing::Values(ov::test::utils::DEVICE_CPU)), + ::testing::ValuesIn(filterCPUInfoForDevice(false))); +const auto params8_Smoke = + ::testing::Combine(::testing::Combine(autoPadAddSpParams, + ::testing::ValuesIn(static_shapes_to_test_representation( + buildStaticParams(autoPadSpatParams, channelParamsSingleGr))), + defConvSpecificParams_Smoke, + ::testing::ValuesIn(netPrecisions), + ::testing::Values(ov::test::utils::DEVICE_CPU)), + ::testing::ValuesIn(filterCPUInfoForDevice())); +const auto params9_Smoke = ::testing::Combine(::testing::Combine(autoPadAddSpParams, + ::testing::ValuesIn(dynShapeChainJITAutoPad), + defConvSpecificParams_Smoke, + ::testing::ValuesIn(netPrecisions), + ::testing::Values(ov::test::utils::DEVICE_CPU)), + ::testing::ValuesIn(filterCPUInfoForDevice(false))); + +INSTANTIATE_TEST_SUITE_P(smoke_DefConvLayoutTest1, + DefConvLayerCPUTest, + params1_Smoke, + DefConvLayerCPUTest::getTestCaseName); +INSTANTIATE_TEST_SUITE_P(smoke_DefConvLayoutTest2, + DefConvLayerCPUTest, + params2_Smoke, + DefConvLayerCPUTest::getTestCaseName); +INSTANTIATE_TEST_SUITE_P(smoke_DefConvLayoutTest3, + DefConvLayerCPUTest, + params3_Smoke, + DefConvLayerCPUTest::getTestCaseName); +INSTANTIATE_TEST_SUITE_P(smoke_DefConvLayoutTest4, + DefConvLayerCPUTest, + params4_Smoke, + DefConvLayerCPUTest::getTestCaseName); +INSTANTIATE_TEST_SUITE_P(smoke_DefConvLayoutTest5, + DefConvLayerCPUTest, + params5_Smoke, + DefConvLayerCPUTest::getTestCaseName); +INSTANTIATE_TEST_SUITE_P(smoke_DefConvLayoutTest6, + DefConvLayerCPUTest, + params6_Smoke, + DefConvLayerCPUTest::getTestCaseName); +INSTANTIATE_TEST_SUITE_P(smoke_DefConvLayoutTest7, + DefConvLayerCPUTest, + params7_Smoke, + DefConvLayerCPUTest::getTestCaseName); +INSTANTIATE_TEST_SUITE_P(smoke_DefConvLayoutTest8, + DefConvLayerCPUTest, + params8_Smoke, + DefConvLayerCPUTest::getTestCaseName); +INSTANTIATE_TEST_SUITE_P(smoke_DefConvLayoutTest9, + DefConvLayerCPUTest, + params9_Smoke, + DefConvLayerCPUTest::getTestCaseName); + +const auto params1 = ::testing::Combine(::testing::Combine(addSpParams, + ::testing::ValuesIn(static_shapes_to_test_representation( + buildStaticParams(spatParams1, channelParamsSingleGr2))), + defConvSpecificParams, + ::testing::ValuesIn(netPrecisions), + ::testing::Values(ov::test::utils::DEVICE_CPU)), + ::testing::ValuesIn(filterCPUInfoForDevice())); +const auto params2 = ::testing::Combine(::testing::Combine(addSpParams, + ::testing::ValuesIn(static_shapes_to_test_representation( + buildStaticParams(spatParams2, channelParamsSingleGr))), + defConvSpecificParams, + ::testing::ValuesIn(netPrecisions), + ::testing::Values(ov::test::utils::DEVICE_CPU)), + ::testing::ValuesIn(filterCPUInfoForDevice())); +const auto params3 = ::testing::Combine(::testing::Combine(addSpParams, + ::testing::ValuesIn(static_shapes_to_test_representation( + buildStaticParams(spatParams3, channelParamsSingleGr))), + defConvSpecificParams, + ::testing::ValuesIn(netPrecisions), + ::testing::Values(ov::test::utils::DEVICE_CPU)), + ::testing::ValuesIn(filterCPUInfoForDevice())); +const auto params4 = ::testing::Combine(::testing::Combine(addSpParams, + ::testing::ValuesIn(static_shapes_to_test_representation( + buildStaticParams(spatParams4, channelParamsSingleGr))), + defConvSpecificParams, + ::testing::ValuesIn(netPrecisions), + ::testing::Values(ov::test::utils::DEVICE_CPU)), + ::testing::ValuesIn(filterCPUInfoForDevice())); const auto params5 = ::testing::Combine( - ::testing::Combine( - addSpParams, - ::testing::ValuesIn(static_shapes_to_test_representation(buildStaticParams(spatParams4, channelParamsMulGr))), - defConvSpecificParams, - ::testing::ValuesIn(netPrecisions), - ::testing::Values(ov::test::utils::DEVICE_CPU)), - ::testing::ValuesIn(filterCPUInfoForDevice(true))); -const auto params6 = ::testing::Combine( - ::testing::Combine( - addSpParams, - ::testing::ValuesIn(dynShapeChainRef), - defConvSpecificParams, - ::testing::ValuesIn(netPrecisions), - ::testing::Values(ov::test::utils::DEVICE_CPU)), - ::testing::ValuesIn(filterCPUInfoForDevice(true))); -const auto params7 = ::testing::Combine( - ::testing::Combine( - addSpParams, - ::testing::ValuesIn(dynShapeChainJIT), - defConvSpecificParams, - ::testing::ValuesIn(netPrecisions), - ::testing::Values(ov::test::utils::DEVICE_CPU)), - ::testing::ValuesIn(filterCPUInfoForDevice(false))); + ::testing::Combine( + addSpParams, + ::testing::ValuesIn(static_shapes_to_test_representation(buildStaticParams(spatParams4, channelParamsMulGr))), + defConvSpecificParams, + ::testing::ValuesIn(netPrecisions), + ::testing::Values(ov::test::utils::DEVICE_CPU)), + ::testing::ValuesIn(filterCPUInfoForDevice(true))); +const auto params6 = ::testing::Combine(::testing::Combine(addSpParams, + ::testing::ValuesIn(dynShapeChainRef), + defConvSpecificParams, + ::testing::ValuesIn(netPrecisions), + ::testing::Values(ov::test::utils::DEVICE_CPU)), + ::testing::ValuesIn(filterCPUInfoForDevice(true))); +const auto params7 = ::testing::Combine(::testing::Combine(addSpParams, + ::testing::ValuesIn(dynShapeChainJIT), + defConvSpecificParams, + ::testing::ValuesIn(netPrecisions), + ::testing::Values(ov::test::utils::DEVICE_CPU)), + ::testing::ValuesIn(filterCPUInfoForDevice(false))); // autopad cases -const auto params8 = ::testing::Combine( - ::testing::Combine( - autoPadAddSpParams, - ::testing::ValuesIn(static_shapes_to_test_representation(buildStaticParams(autoPadSpatParams, channelParamsSingleGr))), - defConvSpecificParams, - ::testing::ValuesIn(netPrecisions), - ::testing::Values(ov::test::utils::DEVICE_CPU)), - ::testing::ValuesIn(filterCPUInfoForDevice())); -const auto params9 = ::testing::Combine( - ::testing::Combine( - autoPadAddSpParams, - ::testing::ValuesIn(dynShapeChainJITAutoPad), - defConvSpecificParams, - ::testing::ValuesIn(netPrecisions), - ::testing::Values(ov::test::utils::DEVICE_CPU)), - ::testing::ValuesIn(filterCPUInfoForDevice(false))); -const auto params10 = ::testing::Combine( - ::testing::Combine( - addSpParamsDilationUneven, - ::testing::ValuesIn(static_shapes_to_test_representation(buildStaticParams(spatParamsDilationUneven, channelParamsSingleGr))), - defConvSpecificParams, - ::testing::ValuesIn(netPrecisions), - ::testing::Values(ov::test::utils::DEVICE_CPU)), - ::testing::ValuesIn(filterCPUInfoForDevice(false))); -const auto params11 = ::testing::Combine( - ::testing::Combine( - addSpParams, - ::testing::ValuesIn(static_shapes_to_test_representation(buildStaticParams(spatParams5_onnx2d, channelParams_onnx2d))), - defConvSpecificParams, - ::testing::ValuesIn(netPrecisions), - ::testing::Values(ov::test::utils::DEVICE_CPU)), - ::testing::ValuesIn(filterCPUInfoForDevice())); +const auto params8 = + ::testing::Combine(::testing::Combine(autoPadAddSpParams, + ::testing::ValuesIn(static_shapes_to_test_representation( + buildStaticParams(autoPadSpatParams, channelParamsSingleGr))), + defConvSpecificParams, + ::testing::ValuesIn(netPrecisions), + ::testing::Values(ov::test::utils::DEVICE_CPU)), + ::testing::ValuesIn(filterCPUInfoForDevice())); +const auto params9 = ::testing::Combine(::testing::Combine(autoPadAddSpParams, + ::testing::ValuesIn(dynShapeChainJITAutoPad), + defConvSpecificParams, + ::testing::ValuesIn(netPrecisions), + ::testing::Values(ov::test::utils::DEVICE_CPU)), + ::testing::ValuesIn(filterCPUInfoForDevice(false))); +const auto params10 = + ::testing::Combine(::testing::Combine(addSpParamsDilationUneven, + ::testing::ValuesIn(static_shapes_to_test_representation( + buildStaticParams(spatParamsDilationUneven, channelParamsSingleGr))), + defConvSpecificParams, + ::testing::ValuesIn(netPrecisions), + ::testing::Values(ov::test::utils::DEVICE_CPU)), + ::testing::ValuesIn(filterCPUInfoForDevice(false))); +const auto params11 = + ::testing::Combine(::testing::Combine(addSpParams, + ::testing::ValuesIn(static_shapes_to_test_representation( + buildStaticParams(spatParams5_onnx2d, channelParams_onnx2d))), + defConvSpecificParams, + ::testing::ValuesIn(netPrecisions), + ::testing::Values(ov::test::utils::DEVICE_CPU)), + ::testing::ValuesIn(filterCPUInfoForDevice())); INSTANTIATE_TEST_SUITE_P(DefConvLayoutTest1, DefConvLayerCPUTest, params1, DefConvLayerCPUTest::getTestCaseName); INSTANTIATE_TEST_SUITE_P(DefConvLayoutTest2, DefConvLayerCPUTest, params2, DefConvLayerCPUTest::getTestCaseName); @@ -634,37 +625,39 @@ INSTANTIATE_TEST_SUITE_P(DefConvLayoutTest10, DefConvLayerCPUTest, params10, Def INSTANTIATE_TEST_SUITE_P(DefConvLayoutTest11, DefConvLayerCPUTest, params11, DefConvLayerCPUTest::getTestCaseName); const std::vector> blockMultigroupChParam = { - {2}, // gr. - {1}, // def. gr. - {16}, // in. ch. per gr. - {16} // out. ch. per gr. + {2}, // gr. + {1}, // def. gr. + {16}, // in. ch. per gr. + {16} // out. ch. per gr. }; const std::vector> blockMultigroupSpatParam = { - {1}, // batch - {2, 2}, // in. spat. shape - {2, 2}, // off. spat. shape - {1, 1} // ker. spat. shape + {1}, // batch + {2, 2}, // in. spat. shape + {2, 2}, // off. spat. shape + {1, 1} // ker. spat. shape }; -const auto blockMultigroupAddParam = ::testing::Combine( - ::testing::Values(true), // with_bilinear_interpolation_pad - ::testing::Values(false), // with_modulation - ::testing::Values(OffsetType::ZERO) // offset type -); -const auto blockMultigroupKernelParam = ::testing::Combine( - ::testing::Values(ngraph::op::PadType::EXPLICIT), // pad. type - ::testing::Values(std::vector({0, 0})), // pad. begin - ::testing::Values(std::vector({0, 0})), // pad. end - ::testing::Values(std::vector {1, 1}), // strides - ::testing::Values(std::vector {1, 1}) // dilations +const auto blockMultigroupAddParam = ::testing::Combine(::testing::Values(true), // with_bilinear_interpolation_pad + ::testing::Values(false), // with_modulation + ::testing::Values(OffsetType::ZERO) // offset type ); -const auto blockMultigroupParam = ::testing::Combine( - ::testing::Combine( - blockMultigroupKernelParam, - ::testing::ValuesIn(static_shapes_to_test_representation(buildStaticParams(blockMultigroupSpatParam, blockMultigroupChParam))), - blockMultigroupAddParam, - ::testing::ValuesIn(netPrecisions), - ::testing::Values(ov::test::utils::DEVICE_CPU)), - ::testing::ValuesIn(filterCPUInfoForDevice(true))); -INSTANTIATE_TEST_SUITE_P(blockMultigroupDefConvTest, DefConvLayerCPUTest, blockMultigroupParam, DefConvLayerCPUTest::getTestCaseName); -} // namespace -} // namespace CPULayerTestsDefinitions +const auto blockMultigroupKernelParam = + ::testing::Combine(::testing::Values(ov::op::PadType::EXPLICIT), // pad. type + ::testing::Values(std::vector({0, 0})), // pad. begin + ::testing::Values(std::vector({0, 0})), // pad. end + ::testing::Values(std::vector{1, 1}), // strides + ::testing::Values(std::vector{1, 1})); // dilations +const auto blockMultigroupParam = + ::testing::Combine(::testing::Combine(blockMultigroupKernelParam, + ::testing::ValuesIn(static_shapes_to_test_representation( + buildStaticParams(blockMultigroupSpatParam, blockMultigroupChParam))), + blockMultigroupAddParam, + ::testing::ValuesIn(netPrecisions), + ::testing::Values(ov::test::utils::DEVICE_CPU)), + ::testing::ValuesIn(filterCPUInfoForDevice(true))); +INSTANTIATE_TEST_SUITE_P(blockMultigroupDefConvTest, + DefConvLayerCPUTest, + blockMultigroupParam, + DefConvLayerCPUTest::getTestCaseName); +} // namespace +} // namespace test +} // namespace ov diff --git a/src/plugins/intel_cpu/tests/functional/single_layer_tests/depth_to_space.cpp b/src/plugins/intel_cpu/tests/functional/single_layer_tests/depth_to_space.cpp index 60a56a42b38b59..f39dd7c2f9d9d2 100644 --- a/src/plugins/intel_cpu/tests/functional/single_layer_tests/depth_to_space.cpp +++ b/src/plugins/intel_cpu/tests/functional/single_layer_tests/depth_to_space.cpp @@ -3,33 +3,29 @@ // #include "shared_test_classes/single_layer/depth_to_space.hpp" + +#include "shared_test_classes/base/ov_subgraph.hpp" #include "test_utils/cpu_test_utils.hpp" #include "test_utils/filter_cpu_info.hpp" -#include "shared_test_classes/base/ov_subgraph.hpp" - -using namespace InferenceEngine; using namespace CPUTestUtils; -using namespace ngraph::opset3; -using namespace ov::test; - -namespace CPULayerTestsDefinitions { +namespace ov { +namespace test { -using DepthToSpaceLayerCPUTestParamSet = std::tuple< - InputShape, // Input shape - ElementType, // Input element type - DepthToSpace::DepthToSpaceMode, // Mode - std::size_t, // Block size - CPUSpecificParams ->; +using DepthToSpaceLayerCPUTestParamSet = std::tuple; class DepthToSpaceLayerCPUTest : public testing::WithParamInterface, - virtual public ov::test::SubgraphBaseTest, public CPUTestsBase { + virtual public ov::test::SubgraphBaseTest, + public CPUTestsBase { public: static std::string getTestCaseName(testing::TestParamInfo obj) { InputShape shapes; ElementType inType; - DepthToSpace::DepthToSpaceMode mode; + ov::op::v0::DepthToSpace::DepthToSpaceMode mode; std::size_t blockSize; CPUSpecificParams cpuParams; std::tie(shapes, inType, mode, blockSize, cpuParams) = obj.param; @@ -42,24 +38,25 @@ class DepthToSpaceLayerCPUTest : public testing::WithParamInterfaceGetParam(); @@ -88,127 +85,91 @@ TEST_P(DepthToSpaceLayerCPUTest, CompareWithRefs) { namespace { -const auto cpuParams_nChw16c = CPUSpecificParams {{nChw16c}, {nChw16c}, {}, {}}; -const auto cpuParams_nCdhw16c = CPUSpecificParams {{nCdhw16c}, {nCdhw16c}, {}, {}}; +const auto cpuParams_nChw16c = CPUSpecificParams{{nChw16c}, {nChw16c}, {}, {}}; +const auto cpuParams_nCdhw16c = CPUSpecificParams{{nCdhw16c}, {nCdhw16c}, {}, {}}; -const auto cpuParams_nChw8c = CPUSpecificParams {{nChw8c}, {nChw8c}, {}, {}}; -const auto cpuParams_nCdhw8c = CPUSpecificParams {{nCdhw8c}, {nCdhw8c}, {}, {}}; +const auto cpuParams_nChw8c = CPUSpecificParams{{nChw8c}, {nChw8c}, {}, {}}; +const auto cpuParams_nCdhw8c = CPUSpecificParams{{nCdhw8c}, {nCdhw8c}, {}, {}}; -const auto cpuParams_nhwc = CPUSpecificParams {{nhwc}, {nhwc}, {}, {}}; -const auto cpuParams_ndhwc = CPUSpecificParams {{ndhwc}, {ndhwc}, {}, {}}; +const auto cpuParams_nhwc = CPUSpecificParams{{nhwc}, {nhwc}, {}, {}}; +const auto cpuParams_ndhwc = CPUSpecificParams{{ndhwc}, {ndhwc}, {}, {}}; -const auto cpuParams_nchw = CPUSpecificParams {{nchw}, {nchw}, {}, {}}; -const auto cpuParams_ncdhw = CPUSpecificParams {{ncdhw}, {ncdhw}, {}, {}}; +const auto cpuParams_nchw = CPUSpecificParams{{nchw}, {nchw}, {}, {}}; +const auto cpuParams_ncdhw = CPUSpecificParams{{ncdhw}, {ncdhw}, {}, {}}; -const std::vector CPUParams4D = { - cpuParams_nhwc, - cpuParams_nchw -}; +const std::vector CPUParams4D = {cpuParams_nhwc, cpuParams_nchw}; -const std::vector CPUParamsBlocked4D = { - cpuParams_nChw16c, - cpuParams_nChw8c, - cpuParams_nhwc -}; +const std::vector CPUParamsBlocked4D = {cpuParams_nChw16c, cpuParams_nChw8c, cpuParams_nhwc}; -const std::vector CPUParams5D = { - cpuParams_ndhwc, - cpuParams_ncdhw -}; +const std::vector CPUParams5D = {cpuParams_ndhwc, cpuParams_ncdhw}; -const std::vector CPUParamsBlocked5D = { - cpuParams_nCdhw16c, - cpuParams_nCdhw8c, - cpuParams_ndhwc -}; +const std::vector CPUParamsBlocked5D = {cpuParams_nCdhw16c, cpuParams_nCdhw8c, cpuParams_ndhwc}; -const std::vector inputElementType = { - ElementType::f32, - ElementType::bf16, - ElementType::i8 -}; +const std::vector inputElementType = {ElementType::f32, ElementType::bf16, ElementType::i8}; -const std::vector depthToSpaceModes = { - DepthToSpace::DepthToSpaceMode::BLOCKS_FIRST, - DepthToSpace::DepthToSpaceMode::DEPTH_FIRST -}; +const std::vector depthToSpaceModes = { + ov::op::v0::DepthToSpace::DepthToSpaceMode::BLOCKS_FIRST, + ov::op::v0::DepthToSpace::DepthToSpaceMode::DEPTH_FIRST}; /* *========================* Static Shapes Tests *========================* */ namespace static_shapes { -const std::vector inputShapesBS2_4D = { - {1, 64, 1, 1}, - {1, 64, 1, 3}, - {1, 128, 3, 3}, - {2, 128, 1, 1}, - {1, 192, 2, 2}, - {2, 256, 2, 3}, - {1, 512, 2, 1} -}; - -const std::vector inputShapesBS3_4D = { - {1, 27, 1, 1}, - {1, 27, 2, 3}, - {1, 18, 2, 3}, - {3, 18, 1, 1}, - {2, 18, 3, 1} -}; - -INSTANTIATE_TEST_SUITE_P(smoke_CPUDepthToSpaceBS2_4D, DepthToSpaceLayerCPUTest, - testing::Combine( - testing::ValuesIn(static_shapes_to_test_representation(inputShapesBS2_4D)), - testing::ValuesIn(inputElementType), - testing::ValuesIn(depthToSpaceModes), - testing::Values(1, 2), - testing::ValuesIn(filterCPUInfoForDevice(CPUParamsBlocked4D))), +const std::vector inputShapesBS2_4D = + {{1, 64, 1, 1}, {1, 64, 1, 3}, {1, 128, 3, 3}, {2, 128, 1, 1}, {1, 192, 2, 2}, {2, 256, 2, 3}, {1, 512, 2, 1}}; + +const std::vector inputShapesBS3_4D = {{1, 27, 1, 1}, + {1, 27, 2, 3}, + {1, 18, 2, 3}, + {3, 18, 1, 1}, + {2, 18, 3, 1}}; + +INSTANTIATE_TEST_SUITE_P(smoke_CPUDepthToSpaceBS2_4D, + DepthToSpaceLayerCPUTest, + testing::Combine(testing::ValuesIn(static_shapes_to_test_representation(inputShapesBS2_4D)), + testing::ValuesIn(inputElementType), + testing::ValuesIn(depthToSpaceModes), + testing::Values(1, 2), + testing::ValuesIn(filterCPUInfoForDevice(CPUParamsBlocked4D))), DepthToSpaceLayerCPUTest::getTestCaseName); -INSTANTIATE_TEST_SUITE_P(smoke_CPUDepthToSpaceStaticBS3_4D, DepthToSpaceLayerCPUTest, - testing::Combine( - testing::ValuesIn(static_shapes_to_test_representation(inputShapesBS3_4D)), - testing::ValuesIn(inputElementType), - testing::ValuesIn(depthToSpaceModes), - testing::Values(1, 3), - testing::ValuesIn(filterCPUInfoForDevice(CPUParams4D))), +INSTANTIATE_TEST_SUITE_P(smoke_CPUDepthToSpaceStaticBS3_4D, + DepthToSpaceLayerCPUTest, + testing::Combine(testing::ValuesIn(static_shapes_to_test_representation(inputShapesBS3_4D)), + testing::ValuesIn(inputElementType), + testing::ValuesIn(depthToSpaceModes), + testing::Values(1, 3), + testing::ValuesIn(filterCPUInfoForDevice(CPUParams4D))), DepthToSpaceLayerCPUTest::getTestCaseName); -const std::vector inputShapesBS2_5D = { - {1, 128, 1, 1, 1}, - {1, 128, 2, 1, 2}, - {1, 256, 2, 1, 3}, - {2, 256, 3, 1, 1}, - {1, 384, 1, 2, 2}, - {2, 512, 1, 2, 1} -}; - -const std::vector inputShapesBS3_5D = { - {1, 54, 1, 1, 1}, - {1, 54, 2, 1, 2}, - {3, 54, 1, 1, 1}, - {2, 54, 3, 1, 2}, - {1, 54, 3, 2, 2} -}; - -INSTANTIATE_TEST_SUITE_P(smoke_CPUDepthToSpaceStaticBS2_5D, DepthToSpaceLayerCPUTest, - testing::Combine( - testing::ValuesIn(static_shapes_to_test_representation(inputShapesBS2_5D)), - testing::ValuesIn(inputElementType), - testing::ValuesIn(depthToSpaceModes), - testing::Values(1, 2), - testing::ValuesIn(filterCPUInfoForDevice(CPUParamsBlocked5D))), +const std::vector inputShapesBS2_5D = + {{1, 128, 1, 1, 1}, {1, 128, 2, 1, 2}, {1, 256, 2, 1, 3}, {2, 256, 3, 1, 1}, {1, 384, 1, 2, 2}, {2, 512, 1, 2, 1}}; + +const std::vector inputShapesBS3_5D = {{1, 54, 1, 1, 1}, + {1, 54, 2, 1, 2}, + {3, 54, 1, 1, 1}, + {2, 54, 3, 1, 2}, + {1, 54, 3, 2, 2}}; + +INSTANTIATE_TEST_SUITE_P(smoke_CPUDepthToSpaceStaticBS2_5D, + DepthToSpaceLayerCPUTest, + testing::Combine(testing::ValuesIn(static_shapes_to_test_representation(inputShapesBS2_5D)), + testing::ValuesIn(inputElementType), + testing::ValuesIn(depthToSpaceModes), + testing::Values(1, 2), + testing::ValuesIn(filterCPUInfoForDevice(CPUParamsBlocked5D))), DepthToSpaceLayerCPUTest::getTestCaseName); -INSTANTIATE_TEST_SUITE_P(smoke_CPUDepthToSpaceStaticBS3_5D, DepthToSpaceLayerCPUTest, - testing::Combine( - testing::ValuesIn(static_shapes_to_test_representation(inputShapesBS3_5D)), - testing::ValuesIn(inputElementType), - testing::ValuesIn(depthToSpaceModes), - testing::Values(1, 3), - testing::ValuesIn(filterCPUInfoForDevice(CPUParams5D))), +INSTANTIATE_TEST_SUITE_P(smoke_CPUDepthToSpaceStaticBS3_5D, + DepthToSpaceLayerCPUTest, + testing::Combine(testing::ValuesIn(static_shapes_to_test_representation(inputShapesBS3_5D)), + testing::ValuesIn(inputElementType), + testing::ValuesIn(depthToSpaceModes), + testing::Values(1, 3), + testing::ValuesIn(filterCPUInfoForDevice(CPUParams5D))), DepthToSpaceLayerCPUTest::getTestCaseName); -} // namespace static_shapes +} // namespace static_shapes /* *========================* *==================* *========================* */ /* *========================* Dynamic Shapes Tests *========================* */ @@ -226,11 +187,8 @@ const std::vector inputShapes4D = { }; const std::vector inputShapes5D = { - {{-1, -1, -1, -1, -1}, // dynamic - {{2, 216, 1, 1, 1}, - {1, 216, 3, 1, 2}, - {1, 432, 2, 3, 1}, - {2, 216, 1, 1, 1}}}, // target + {{-1, -1, -1, -1, -1}, // dynamic + {{2, 216, 1, 1, 1}, {1, 216, 3, 1, 2}, {1, 432, 2, 3, 1}, {2, 216, 1, 1, 1}}}, // target {{{1, 3}, {216, 432}, {1, 4}, {1, 4}, {1, 4}}, // dynamic {{3, 216, 2, 2, 2}, {1, 432, 1, 1, 1}, {3, 216, 2, 2, 2}}}, // target @@ -244,53 +202,54 @@ const std::vector inputShapesBlocked5D = { {{1, 256, 1, 1, 1}, {1, 256, 2, 1, 3}, {3, 256, 3, 1, 2}, {1, 256, 2, 1, 3}}}, // target }; -INSTANTIATE_TEST_SUITE_P(smoke_CPUDepthToSpaceDynamic4D, DepthToSpaceLayerCPUTest, - testing::Combine( - testing::ValuesIn(inputShapes4D), - testing::ValuesIn(inputElementType), - testing::ValuesIn(depthToSpaceModes), - testing::Values(1, 2, 3), - testing::ValuesIn(filterCPUInfoForDevice(CPUParams4D))), +INSTANTIATE_TEST_SUITE_P(smoke_CPUDepthToSpaceDynamic4D, + DepthToSpaceLayerCPUTest, + testing::Combine(testing::ValuesIn(inputShapes4D), + testing::ValuesIn(inputElementType), + testing::ValuesIn(depthToSpaceModes), + testing::Values(1, 2, 3), + testing::ValuesIn(filterCPUInfoForDevice(CPUParams4D))), + DepthToSpaceLayerCPUTest::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(smoke_CPUDepthToSpaceDynamicBlocksFirstBlocked4D, + DepthToSpaceLayerCPUTest, + testing::Combine(testing::Values(inputShapes4D[1]), + testing::ValuesIn(inputElementType), + testing::Values(ov::op::v0::DepthToSpace::DepthToSpaceMode::BLOCKS_FIRST), + testing::Values(1, 2, 3), + testing::ValuesIn(filterCPUInfoForDevice(CPUParamsBlocked4D))), DepthToSpaceLayerCPUTest::getTestCaseName); -INSTANTIATE_TEST_SUITE_P(smoke_CPUDepthToSpaceDynamicBlocksFirstBlocked4D, DepthToSpaceLayerCPUTest, - testing::Combine( - testing::Values(inputShapes4D[1]), - testing::ValuesIn(inputElementType), - testing::Values(DepthToSpace::DepthToSpaceMode::BLOCKS_FIRST), - testing::Values(1, 2, 3), - testing::ValuesIn(filterCPUInfoForDevice(CPUParamsBlocked4D))), - DepthToSpaceLayerCPUTest::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(smoke_CPUDepthToSpaceDynamicDepthFirstBlocked4D, DepthToSpaceLayerCPUTest, - testing::Combine( - testing::Values(inputShapes4D[1]), - testing::ValuesIn(inputElementType), - testing::Values(DepthToSpace::DepthToSpaceMode::DEPTH_FIRST), - testing::Values(1, 2), - testing::ValuesIn(filterCPUInfoForDevice(CPUParamsBlocked4D))), +INSTANTIATE_TEST_SUITE_P(smoke_CPUDepthToSpaceDynamicDepthFirstBlocked4D, + DepthToSpaceLayerCPUTest, + testing::Combine(testing::Values(inputShapes4D[1]), + testing::ValuesIn(inputElementType), + testing::Values(ov::op::v0::DepthToSpace::DepthToSpaceMode::DEPTH_FIRST), + testing::Values(1, 2), + testing::ValuesIn(filterCPUInfoForDevice(CPUParamsBlocked4D))), DepthToSpaceLayerCPUTest::getTestCaseName); -INSTANTIATE_TEST_SUITE_P(smoke_CPUDepthToSpaceDynamic5D, DepthToSpaceLayerCPUTest, - testing::Combine( - testing::ValuesIn(inputShapes5D), - testing::ValuesIn(inputElementType), - testing::ValuesIn(depthToSpaceModes), - testing::Values(1, 2, 3), - testing::ValuesIn(filterCPUInfoForDevice(CPUParams5D))), +INSTANTIATE_TEST_SUITE_P(smoke_CPUDepthToSpaceDynamic5D, + DepthToSpaceLayerCPUTest, + testing::Combine(testing::ValuesIn(inputShapes5D), + testing::ValuesIn(inputElementType), + testing::ValuesIn(depthToSpaceModes), + testing::Values(1, 2, 3), + testing::ValuesIn(filterCPUInfoForDevice(CPUParams5D))), DepthToSpaceLayerCPUTest::getTestCaseName); -INSTANTIATE_TEST_SUITE_P(smoke_CPUDepthToSpaceDynamicCPUSpecific5D, DepthToSpaceLayerCPUTest, - testing::Combine( - testing::ValuesIn(inputShapesBlocked5D), - testing::ValuesIn(inputElementType), - testing::ValuesIn(depthToSpaceModes), - testing::Values(1, 2), - testing::ValuesIn(filterCPUInfoForDevice(CPUParamsBlocked5D))), +INSTANTIATE_TEST_SUITE_P(smoke_CPUDepthToSpaceDynamicCPUSpecific5D, + DepthToSpaceLayerCPUTest, + testing::Combine(testing::ValuesIn(inputShapesBlocked5D), + testing::ValuesIn(inputElementType), + testing::ValuesIn(depthToSpaceModes), + testing::Values(1, 2), + testing::ValuesIn(filterCPUInfoForDevice(CPUParamsBlocked5D))), DepthToSpaceLayerCPUTest::getTestCaseName); } // namespace dynamic_shapes /* *========================* *==================* *========================* */ -} // namespace -} // namespace CPULayerTestsDefinitions +} // namespace +} // namespace test +} // namespace ov diff --git a/src/plugins/intel_cpu/tests/functional/single_layer_tests/detection_output.cpp b/src/plugins/intel_cpu/tests/functional/single_layer_tests/detection_output.cpp index e42f7e6ee6f31b..0fd802163263f3 100644 --- a/src/plugins/intel_cpu/tests/functional/single_layer_tests/detection_output.cpp +++ b/src/plugins/intel_cpu/tests/functional/single_layer_tests/detection_output.cpp @@ -4,83 +4,89 @@ #include "shared_test_classes/single_layer/detection_output.hpp" -#include "ov_models/builders.hpp" -#include -#include "test_utils/cpu_test_utils.hpp" +#include "common_test_utils/ov_tensor_utils.hpp" #include "shared_test_classes/base/ov_subgraph.hpp" +#include "test_utils/cpu_test_utils.hpp" -using namespace InferenceEngine; using namespace CPUTestUtils; -using namespace ov; -using namespace test; - -namespace CPULayerTestsDefinitions { - -using namespace ov::test; - -enum { - idxLocation, - idxConfidence, - idxPriors, - idxArmConfidence, - idxArmLocation, - numInputs -}; - -using ParamsWhichSizeDependsDynamic = std::tuple< - bool, // varianceEncodedInTarget - bool, // shareLocation - bool, // normalized - size_t, // inputHeight - size_t, // inputWidth - ov::test::InputShape, // "Location" input - ov::test::InputShape, // "Confidence" input - ov::test::InputShape, // "Priors" input - ov::test::InputShape, // "ArmConfidence" input - ov::test::InputShape // "ArmLocation" input - >; - -using DetectionOutputAttributes = std::tuple< - int, // numClasses - int, // backgroundLabelId - int, // topK - std::vector, // keepTopK - std::string, // codeType - float, // nmsThreshold - float, // confidenceThreshold - bool, // clip_afterNms - bool, // clip_beforeNms - bool // decreaseLabelId - >; - -using DetectionOutputParamsDynamic = std::tuple< - DetectionOutputAttributes, - ParamsWhichSizeDependsDynamic, - size_t, // Number of batch - float, // objectnessScore - bool, // replace dynamic shapes to intervals - std::string // Device name - >; +namespace ov { +namespace test { + +enum { idxLocation, idxConfidence, idxPriors, idxArmConfidence, idxArmLocation, numInputs }; + +using ParamsWhichSizeDependsDynamic = std::tuple; + +using DetectionOutputAttributes = std::tuple, // keepTopK + std::string, // codeType + float, // nmsThreshold + float, // confidenceThreshold + bool, // clip_afterNms + bool, // clip_beforeNms + bool // decreaseLabelId + >; + +using DetectionOutputParamsDynamic = std::tuple; class DetectionOutputLayerCPUTest : public testing::WithParamInterface, - virtual public SubgraphBaseTest, public CPUTestsBase { + virtual public SubgraphBaseTest, + public CPUTestsBase { public: static std::string getTestCaseName(const testing::TestParamInfo& obj) { DetectionOutputAttributes commonAttrs; ParamsWhichSizeDependsDynamic specificAttrs; - ngraph::op::DetectionOutputAttrs attrs; + ov::op::v0::DetectionOutput::Attributes attrs; size_t batch; bool replaceDynamicShapesToIntervals; std::string targetDevice; - std::tie(commonAttrs, specificAttrs, batch, attrs.objectness_score, replaceDynamicShapesToIntervals, targetDevice) = obj.param; - - std::tie(attrs.num_classes, attrs.background_label_id, attrs.top_k, attrs.keep_top_k, attrs.code_type, attrs.nms_threshold, attrs.confidence_threshold, - attrs.clip_after_nms, attrs.clip_before_nms, attrs.decrease_label_id) = commonAttrs; + std::tie(commonAttrs, + specificAttrs, + batch, + attrs.objectness_score, + replaceDynamicShapesToIntervals, + targetDevice) = obj.param; + + std::tie(attrs.num_classes, + attrs.background_label_id, + attrs.top_k, + attrs.keep_top_k, + attrs.code_type, + attrs.nms_threshold, + attrs.confidence_threshold, + attrs.clip_after_nms, + attrs.clip_before_nms, + attrs.decrease_label_id) = commonAttrs; const size_t numInputs = 5; std::vector inShapes(numInputs); - std::tie(attrs.variance_encoded_in_target, attrs.share_location, attrs.normalized, attrs.input_height, attrs.input_width, - inShapes[idxLocation], inShapes[idxConfidence], inShapes[idxPriors], inShapes[idxArmConfidence], inShapes[idxArmLocation]) = specificAttrs; + std::tie(attrs.variance_encoded_in_target, + attrs.share_location, + attrs.normalized, + attrs.input_height, + attrs.input_width, + inShapes[idxLocation], + inShapes[idxConfidence], + inShapes[idxPriors], + inShapes[idxArmConfidence], + inShapes[idxArmLocation]) = specificAttrs; if (inShapes[idxArmConfidence].first.rank().get_length() == 0ul) { inShapes.resize(3); @@ -109,34 +115,8 @@ class DetectionOutputLayerCPUTest : public testing::WithParamInterface& targetInputStaticShapes) override { - inputs.clear(); - const auto& funcInputs = function->inputs(); - for (auto i = 0ul; i < funcInputs.size(); ++i) { - const auto &funcInput = funcInputs[i]; - InferenceEngine::Blob::Ptr blob; - int32_t resolution = 1; - uint32_t range = 1; - if (i == 2) { - if (attrs.normalized) { - resolution = 100; - } else { - range = 10; - } - } else if (i == 1 || i == 3) { - resolution = 1000; - } else { - resolution = 10; - } - - auto tensor = ov::test::utils::create_and_fill_tensor(funcInput.get_element_type(), targetInputStaticShapes[i], range, 0, resolution); - inputs.insert({funcInput.get_node_shared_ptr(), tensor}); - } - } - - void compare( - const std::vector& expectedTensors, - const std::vector& actualTensors) override { + void compare(const std::vector& expectedTensors, + const std::vector& actualTensors) override { ASSERT_EQ(expectedTensors.size(), actualTensors.size()); for (size_t i = 0; i < expectedTensors.size(); ++i) { @@ -146,7 +126,7 @@ class DetectionOutputLayerCPUTest : public testing::WithParamInterface(); - for (size_t i = 0; i < expected.get_size(); i+=7) { + for (size_t i = 0; i < expected.get_size(); i += 7) { if (expBuf[i] == -1) break; expSize += 7; @@ -154,7 +134,7 @@ class DetectionOutputLayerCPUTest : public testing::WithParamInterface(); - for (size_t i = 0; i < actual.get_size(); i+=7) { + for (size_t i = 0; i < actual.get_size(); i += 7) { if (actBuf[i] == -1) break; actSize += 7; @@ -171,14 +151,35 @@ class DetectionOutputLayerCPUTest : public testing::WithParamInterfaceGetParam(); - - std::tie(attrs.num_classes, attrs.background_label_id, attrs.top_k, attrs.keep_top_k, attrs.code_type, attrs.nms_threshold, attrs.confidence_threshold, - attrs.clip_after_nms, attrs.clip_before_nms, attrs.decrease_label_id) = commonAttrs; + std::tie(commonAttrs, + specificAttrs, + batch, + attrs.objectness_score, + replaceDynamicShapesToIntervals, + targetDevice) = this->GetParam(); + + std::tie(attrs.num_classes, + attrs.background_label_id, + attrs.top_k, + attrs.keep_top_k, + attrs.code_type, + attrs.nms_threshold, + attrs.confidence_threshold, + attrs.clip_after_nms, + attrs.clip_before_nms, + attrs.decrease_label_id) = commonAttrs; inShapes.resize(numInputs); - std::tie(attrs.variance_encoded_in_target, attrs.share_location, attrs.normalized, attrs.input_height, attrs.input_width, - inShapes[idxLocation], inShapes[idxConfidence], inShapes[idxPriors], inShapes[idxArmConfidence], inShapes[idxArmLocation]) = specificAttrs; + std::tie(attrs.variance_encoded_in_target, + attrs.share_location, + attrs.normalized, + attrs.input_height, + attrs.input_width, + inShapes[idxLocation], + inShapes[idxConfidence], + inShapes[idxPriors], + inShapes[idxArmConfidence], + inShapes[idxArmLocation]) = specificAttrs; if (inShapes[idxArmConfidence].first.rank().get_length() == 0) { inShapes.resize(3); @@ -195,7 +196,7 @@ class DetectionOutputLayerCPUTest : public testing::WithParamInterface(params[0], params[1], params[2], attrs); else if (params.size() == 5) - detOut = std::make_shared(params[0], params[1], params[2], params[3], params[4], attrs); + detOut = std::make_shared(params[0], + params[1], + params[2], + params[3], + params[4], + attrs); else OPENVINO_THROW("DetectionOutput layer supports only 3 or 5 inputs"); - ngraph::ResultVector results{std::make_shared(detOut)}; - function = std::make_shared(results, params, "DetectionOutputDynamic"); + ov::ResultVector results{std::make_shared(detOut)}; + function = std::make_shared(results, params, "DetectionOutputDynamic"); } private: @@ -236,13 +242,12 @@ class DetectionOutputLayerCPUTest : public testing::WithParamInterface inShapes; }; @@ -255,7 +260,7 @@ namespace { const int numClasses = 11; const int backgroundLabelId = 0; const std::vector topK = {75}; -const std::vector> keepTopK = { {50}, {100} }; +const std::vector> keepTopK = {{50}, {100}}; const std::vector codeType = {"caffe.PriorBoxParameter.CORNER", "caffe.PriorBoxParameter.CENTER_SIZE"}; const float nmsThreshold = 0.5f; const float confidenceThreshold = 0.3f; @@ -265,290 +270,337 @@ const std::vector decreaseLabelId = {true, false}; const float objectnessScore = 0.4f; const std::vector numberBatch = {1, 2}; -const auto commonAttributes = ::testing::Combine( - ::testing::Values(numClasses), - ::testing::Values(backgroundLabelId), - ::testing::ValuesIn(topK), - ::testing::ValuesIn(keepTopK), - ::testing::ValuesIn(codeType), - ::testing::Values(nmsThreshold), - ::testing::Values(confidenceThreshold), - ::testing::ValuesIn(clipAfterNms), - ::testing::ValuesIn(clipBeforeNms), - ::testing::ValuesIn(decreaseLabelId) -); +const auto commonAttributes = ::testing::Combine(::testing::Values(numClasses), + ::testing::Values(backgroundLabelId), + ::testing::ValuesIn(topK), + ::testing::ValuesIn(keepTopK), + ::testing::ValuesIn(codeType), + ::testing::Values(nmsThreshold), + ::testing::Values(confidenceThreshold), + ::testing::ValuesIn(clipAfterNms), + ::testing::ValuesIn(clipBeforeNms), + ::testing::ValuesIn(decreaseLabelId)); /* =============== 3 inputs cases =============== */ const std::vector specificParams3InDynamic = { // dynamic input shapes - ParamsWhichSizeDependsDynamic { - true, true, true, 1, 1, - { - // input model dynamic shapes - {ov::Dimension::dynamic(), ov::Dimension::dynamic()}, - // input tensor shapes - {{1, 60}, {1, 120}} - }, - { - // input model dynamic shapes - {ov::Dimension::dynamic(), ov::Dimension::dynamic()}, - // input tensor shapes - {{1, 165}, {1, 330}} - }, - { - // input model dynamic shapes - {ov::Dimension::dynamic(), ov::Dimension::dynamic(), ov::Dimension::dynamic()}, - // input tensor shapes - {{1, 1, 60}, {1, 1, 120}} - }, - {}, - {} - }, - ParamsWhichSizeDependsDynamic { - true, false, true, 1, 1, + ParamsWhichSizeDependsDynamic{true, + true, + true, + 1, + 1, + {// input model dynamic shapes + {ov::Dimension::dynamic(), ov::Dimension::dynamic()}, + // input tensor shapes + {{1, 60}, {1, 120}}}, + {// input model dynamic shapes + {ov::Dimension::dynamic(), ov::Dimension::dynamic()}, + // input tensor shapes + {{1, 165}, {1, 330}}}, + {// input model dynamic shapes + {ov::Dimension::dynamic(), ov::Dimension::dynamic(), ov::Dimension::dynamic()}, + // input tensor shapes + {{1, 1, 60}, {1, 1, 120}}}, + {}, + {}}, + ParamsWhichSizeDependsDynamic{ + true, + false, + true, + 1, + 1, {{ov::Dimension::dynamic(), ov::Dimension::dynamic()}, {{1, 660}, {1, 1320}}}, {{ov::Dimension::dynamic(), ov::Dimension::dynamic()}, {{1, 165}, {1, 330}}}, {{ov::Dimension::dynamic(), ov::Dimension::dynamic(), ov::Dimension::dynamic()}, {{1, 1, 60}, {1, 1, 120}}}, {}, - {} - }, - ParamsWhichSizeDependsDynamic { - false, true, true, 1, 1, + {}}, + ParamsWhichSizeDependsDynamic{ + false, + true, + true, + 1, + 1, {{ov::Dimension::dynamic(), ov::Dimension::dynamic()}, {{1, 60}, {1, 120}}}, {{ov::Dimension::dynamic(), ov::Dimension::dynamic()}, {{1, 165}, {1, 330}}}, {{ov::Dimension::dynamic(), ov::Dimension::dynamic(), ov::Dimension::dynamic()}, {{1, 2, 60}, {1, 2, 120}}}, {}, - {} - }, - ParamsWhichSizeDependsDynamic { - false, false, true, 1, 1, + {}}, + ParamsWhichSizeDependsDynamic{ + false, + false, + true, + 1, + 1, {{ov::Dimension::dynamic(), ov::Dimension::dynamic()}, {{1, 660}, {1, 1320}}}, {{ov::Dimension::dynamic(), ov::Dimension::dynamic()}, {{1, 165}, {1, 330}}}, {{ov::Dimension::dynamic(), ov::Dimension::dynamic(), ov::Dimension::dynamic()}, {{1, 2, 60}, {1, 2, 120}}}, {}, - {} - }, - ParamsWhichSizeDependsDynamic { - true, true, false, 10, 10, + {}}, + ParamsWhichSizeDependsDynamic{ + true, + true, + false, + 10, + 10, {{ov::Dimension::dynamic(), ov::Dimension::dynamic()}, {{1, 60}, {1, 120}}}, {{ov::Dimension::dynamic(), ov::Dimension::dynamic()}, {{1, 165}, {1, 330}}}, {{ov::Dimension::dynamic(), ov::Dimension::dynamic(), ov::Dimension::dynamic()}, {{1, 1, 75}, {1, 1, 150}}}, {}, - {} - }, - ParamsWhichSizeDependsDynamic { - true, false, false, 10, 10, + {}}, + ParamsWhichSizeDependsDynamic{ + true, + false, + false, + 10, + 10, {{ov::Dimension::dynamic(), ov::Dimension::dynamic()}, {{1, 660}, {1, 1320}}}, {{ov::Dimension::dynamic(), ov::Dimension::dynamic()}, {{1, 165}, {1, 330}}}, {{ov::Dimension::dynamic(), ov::Dimension::dynamic(), ov::Dimension::dynamic()}, {{1, 1, 75}, {1, 1, 150}}}, {}, - {} - }, - ParamsWhichSizeDependsDynamic { - false, true, false, 10, 10, + {}}, + ParamsWhichSizeDependsDynamic{ + false, + true, + false, + 10, + 10, {{ov::Dimension::dynamic(), ov::Dimension::dynamic()}, {{1, 60}, {1, 120}}}, {{ov::Dimension::dynamic(), ov::Dimension::dynamic()}, {{1, 165}, {1, 330}}}, {{ov::Dimension::dynamic(), ov::Dimension::dynamic(), ov::Dimension::dynamic()}, {{1, 2, 75}, {1, 2, 150}}}, {}, - {} - }, - ParamsWhichSizeDependsDynamic { - false, false, false, 10, 10, + {}}, + ParamsWhichSizeDependsDynamic{ + false, + false, + false, + 10, + 10, {{ov::Dimension::dynamic(), ov::Dimension::dynamic()}, {{1, 660}, {1, 1320}}}, {{ov::Dimension::dynamic(), ov::Dimension::dynamic()}, {{1, 165}, {1, 330}}}, {{ov::Dimension::dynamic(), ov::Dimension::dynamic(), ov::Dimension::dynamic()}, {{1, 2, 75}, {1, 2, 150}}}, {}, - {} - }, + {}}, }; -const auto params3InputsDynamic = ::testing::Combine( - commonAttributes, - ::testing::ValuesIn(specificParams3InDynamic), - ::testing::ValuesIn(numberBatch), - ::testing::Values(0.0f), - ::testing::Values(false, true), - ::testing::Values(ov::test::utils::DEVICE_CPU) -); - -INSTANTIATE_TEST_SUITE_P( - smoke_CPUDetectionOutputDynamic3In, - DetectionOutputLayerCPUTest, - params3InputsDynamic, - DetectionOutputLayerCPUTest::getTestCaseName); +const auto params3InputsDynamic = ::testing::Combine(commonAttributes, + ::testing::ValuesIn(specificParams3InDynamic), + ::testing::ValuesIn(numberBatch), + ::testing::Values(0.0f), + ::testing::Values(false, true), + ::testing::Values(ov::test::utils::DEVICE_CPU)); + +INSTANTIATE_TEST_SUITE_P(smoke_CPUDetectionOutputDynamic3In, + DetectionOutputLayerCPUTest, + params3InputsDynamic, + DetectionOutputLayerCPUTest::getTestCaseName); //////////////////large tensor///////////////// // There are two major implemenation for DO node, sparsity and dense manner. -// This test config(shapes, threshold...) go to sparsity path in most machines(as long as L3 per core cache is smaller than 8M). +// This test config(shapes, threshold...) go to sparsity path in most machines(as long as L3 per core cache is smaller +// than 8M). const std::vector specificParams3InDynamicLargeTensor = { // dynamic input shapes - ParamsWhichSizeDependsDynamic { - true, true, true, 1, 1, - {{ov::Dimension::dynamic(), ov::Dimension::dynamic()}, {{1, 381360}, {1, 381360}}}, - {{ov::Dimension::dynamic(), ov::Dimension::dynamic()}, {{1, 1048740}, {1, 1048740}}}, - {{ov::Dimension::dynamic(), ov::Dimension::dynamic(), ov::Dimension::dynamic()}, {{1, 1, 381360}, {1, 1, 381360}}}, - {}, - {} - }, - ParamsWhichSizeDependsDynamic { - false, true, true, 1, 1, - {{ov::Dimension::dynamic(), ov::Dimension::dynamic()}, {{1, 381360}, {1, 381360}}}, - {{ov::Dimension::dynamic(), ov::Dimension::dynamic()}, {{1, 1048740}, {1, 1048740}}}, - {{ov::Dimension::dynamic(), ov::Dimension::dynamic(), ov::Dimension::dynamic()}, {{1, 1, 381360}, {1, 1, 381360}}}, - {}, - {} - }, + ParamsWhichSizeDependsDynamic{true, + true, + true, + 1, + 1, + {{ov::Dimension::dynamic(), ov::Dimension::dynamic()}, {{1, 381360}, {1, 381360}}}, + {{ov::Dimension::dynamic(), ov::Dimension::dynamic()}, {{1, 1048740}, {1, 1048740}}}, + {{ov::Dimension::dynamic(), ov::Dimension::dynamic(), ov::Dimension::dynamic()}, + {{1, 1, 381360}, {1, 1, 381360}}}, + {}, + {}}, + ParamsWhichSizeDependsDynamic{false, + true, + true, + 1, + 1, + {{ov::Dimension::dynamic(), ov::Dimension::dynamic()}, {{1, 381360}, {1, 381360}}}, + {{ov::Dimension::dynamic(), ov::Dimension::dynamic()}, {{1, 1048740}, {1, 1048740}}}, + {{ov::Dimension::dynamic(), ov::Dimension::dynamic(), ov::Dimension::dynamic()}, + {{1, 1, 381360}, {1, 1, 381360}}}, + {}, + {}}, }; const std::vector confThreshold = {0.032f, 0.88f}; -const auto commonAttributesLargeTensor = ::testing::Combine( - ::testing::Values(numClasses), - ::testing::Values(backgroundLabelId), - ::testing::ValuesIn(topK), - ::testing::ValuesIn(keepTopK), - ::testing::ValuesIn(codeType), - ::testing::Values(nmsThreshold), - ::testing::ValuesIn(confThreshold), - ::testing::ValuesIn(clipAfterNms), - ::testing::ValuesIn(clipBeforeNms), - ::testing::Values(false) -); - -const auto params3InputsDynamicLargeTensor = ::testing::Combine( - commonAttributesLargeTensor, - ::testing::ValuesIn(specificParams3InDynamicLargeTensor), - ::testing::ValuesIn(numberBatch), - ::testing::Values(0.0f), - ::testing::Values(false, true), - ::testing::Values(ov::test::utils::DEVICE_CPU) -); -INSTANTIATE_TEST_SUITE_P( - CPUDetectionOutputDynamic3InLargeTensor, - DetectionOutputLayerCPUTest, - params3InputsDynamicLargeTensor, - DetectionOutputLayerCPUTest::getTestCaseName); +const auto commonAttributesLargeTensor = ::testing::Combine(::testing::Values(numClasses), + ::testing::Values(backgroundLabelId), + ::testing::ValuesIn(topK), + ::testing::ValuesIn(keepTopK), + ::testing::ValuesIn(codeType), + ::testing::Values(nmsThreshold), + ::testing::ValuesIn(confThreshold), + ::testing::ValuesIn(clipAfterNms), + ::testing::ValuesIn(clipBeforeNms), + ::testing::Values(false)); + +const auto params3InputsDynamicLargeTensor = + ::testing::Combine(commonAttributesLargeTensor, + ::testing::ValuesIn(specificParams3InDynamicLargeTensor), + ::testing::ValuesIn(numberBatch), + ::testing::Values(0.0f), + ::testing::Values(false, true), + ::testing::Values(ov::test::utils::DEVICE_CPU)); +INSTANTIATE_TEST_SUITE_P(CPUDetectionOutputDynamic3InLargeTensor, + DetectionOutputLayerCPUTest, + params3InputsDynamicLargeTensor, + DetectionOutputLayerCPUTest::getTestCaseName); /* =============== 5 inputs cases =============== */ const std::vector specificParams5InDynamic = { // dynamic input shapes - ParamsWhichSizeDependsDynamic { - true, true, true, 1, 1, + ParamsWhichSizeDependsDynamic{ + true, + true, + true, + 1, + 1, {{ov::Dimension::dynamic(), ov::Dimension::dynamic()}, {{1, 60}, {1, 120}}}, {{ov::Dimension::dynamic(), ov::Dimension::dynamic()}, {{1, 165}, {1, 330}}}, {{ov::Dimension::dynamic(), ov::Dimension::dynamic(), ov::Dimension::dynamic()}, {{1, 1, 60}, {1, 1, 120}}}, {{ov::Dimension::dynamic(), ov::Dimension::dynamic()}, {{1, 30}, {1, 60}}}, {{ov::Dimension::dynamic(), ov::Dimension::dynamic()}, {{1, 60}, {1, 120}}}, }, - ParamsWhichSizeDependsDynamic { - true, false, true, 1, 1, + ParamsWhichSizeDependsDynamic{ + true, + false, + true, + 1, + 1, {{ov::Dimension::dynamic(), ov::Dimension::dynamic()}, {{1, 660}, {1, 1320}}}, {{ov::Dimension::dynamic(), ov::Dimension::dynamic()}, {{1, 165}, {1, 330}}}, {{ov::Dimension::dynamic(), ov::Dimension::dynamic(), ov::Dimension::dynamic()}, {{1, 1, 60}, {1, 1, 120}}}, {{ov::Dimension::dynamic(), ov::Dimension::dynamic()}, {{1, 30}, {1, 60}}}, {{ov::Dimension::dynamic(), ov::Dimension::dynamic()}, {{1, 660}, {1, 1320}}}, }, - ParamsWhichSizeDependsDynamic { - false, true, true, 1, 1, + ParamsWhichSizeDependsDynamic{ + false, + true, + true, + 1, + 1, {{ov::Dimension::dynamic(), ov::Dimension::dynamic()}, {{1, 60}, {1, 120}}}, {{ov::Dimension::dynamic(), ov::Dimension::dynamic()}, {{1, 165}, {1, 330}}}, {{ov::Dimension::dynamic(), ov::Dimension::dynamic(), ov::Dimension::dynamic()}, {{1, 2, 60}, {1, 2, 120}}}, {{ov::Dimension::dynamic(), ov::Dimension::dynamic()}, {{1, 30}, {1, 60}}}, - {{ov::Dimension::dynamic(), ov::Dimension::dynamic()}, {{1, 60}, {1, 120}}} - }, - ParamsWhichSizeDependsDynamic { - false, false, true, 1, 1, + {{ov::Dimension::dynamic(), ov::Dimension::dynamic()}, {{1, 60}, {1, 120}}}}, + ParamsWhichSizeDependsDynamic{ + false, + false, + true, + 1, + 1, {{ov::Dimension::dynamic(), ov::Dimension::dynamic()}, {{1, 660}, {1, 1320}}}, {{ov::Dimension::dynamic(), ov::Dimension::dynamic()}, {{1, 165}, {1, 330}}}, {{ov::Dimension::dynamic(), ov::Dimension::dynamic(), ov::Dimension::dynamic()}, {{1, 2, 60}, {1, 2, 120}}}, {{ov::Dimension::dynamic(), ov::Dimension::dynamic()}, {{1, 30}, {1, 60}}}, - {{ov::Dimension::dynamic(), ov::Dimension::dynamic()}, {{1, 660}, {1, 1320}}} - }, - - ParamsWhichSizeDependsDynamic { - true, true, false, 10, 10, + {{ov::Dimension::dynamic(), ov::Dimension::dynamic()}, {{1, 660}, {1, 1320}}}}, + + ParamsWhichSizeDependsDynamic{ + true, + true, + false, + 10, + 10, {{ov::Dimension::dynamic(), ov::Dimension::dynamic()}, {{1, 60}, {1, 120}}}, {{ov::Dimension::dynamic(), ov::Dimension::dynamic()}, {{1, 165}, {1, 330}}}, {{ov::Dimension::dynamic(), ov::Dimension::dynamic(), ov::Dimension::dynamic()}, {{1, 1, 75}, {1, 1, 150}}}, {{ov::Dimension::dynamic(), ov::Dimension::dynamic()}, {{1, 30}, {1, 60}}}, - {{ov::Dimension::dynamic(), ov::Dimension::dynamic()}, {{1, 60}, {1, 120}}} - }, - ParamsWhichSizeDependsDynamic { - true, false, false, 10, 10, + {{ov::Dimension::dynamic(), ov::Dimension::dynamic()}, {{1, 60}, {1, 120}}}}, + ParamsWhichSizeDependsDynamic{ + true, + false, + false, + 10, + 10, {{ov::Dimension::dynamic(), ov::Dimension::dynamic()}, {{1, 660}, {1, 1320}}}, {{ov::Dimension::dynamic(), ov::Dimension::dynamic()}, {{1, 165}, {1, 330}}}, {{ov::Dimension::dynamic(), ov::Dimension::dynamic(), ov::Dimension::dynamic()}, {{1, 1, 75}, {1, 1, 150}}}, {{ov::Dimension::dynamic(), ov::Dimension::dynamic()}, {{1, 30}, {1, 60}}}, - {{ov::Dimension::dynamic(), ov::Dimension::dynamic()}, {{1, 660}, {1, 1320}}} - }, - ParamsWhichSizeDependsDynamic { - false, true, false, 10, 10, + {{ov::Dimension::dynamic(), ov::Dimension::dynamic()}, {{1, 660}, {1, 1320}}}}, + ParamsWhichSizeDependsDynamic{ + false, + true, + false, + 10, + 10, {{ov::Dimension::dynamic(), ov::Dimension::dynamic()}, {{1, 60}, {1, 120}}}, {{ov::Dimension::dynamic(), ov::Dimension::dynamic()}, {{1, 165}, {1, 330}}}, {{ov::Dimension::dynamic(), ov::Dimension::dynamic(), ov::Dimension::dynamic()}, {{1, 2, 75}, {1, 2, 150}}}, {{ov::Dimension::dynamic(), ov::Dimension::dynamic()}, {{1, 30}, {1, 60}}}, - {{ov::Dimension::dynamic(), ov::Dimension::dynamic()}, {{1, 60}, {1, 120}}} - }, - ParamsWhichSizeDependsDynamic { - false, false, false, 10, 10, + {{ov::Dimension::dynamic(), ov::Dimension::dynamic()}, {{1, 60}, {1, 120}}}}, + ParamsWhichSizeDependsDynamic{ + false, + false, + false, + 10, + 10, {{ov::Dimension::dynamic(), ov::Dimension::dynamic()}, {{1, 660}, {1, 1320}}}, {{ov::Dimension::dynamic(), ov::Dimension::dynamic()}, {{1, 165}, {1, 330}}}, {{ov::Dimension::dynamic(), ov::Dimension::dynamic(), ov::Dimension::dynamic()}, {{1, 2, 75}, {1, 2, 150}}}, {{ov::Dimension::dynamic(), ov::Dimension::dynamic()}, {{1, 30}, {1, 60}}}, - {{ov::Dimension::dynamic(), ov::Dimension::dynamic()}, {{1, 660}, {1, 1320}}} - }, + {{ov::Dimension::dynamic(), ov::Dimension::dynamic()}, {{1, 660}, {1, 1320}}}}, }; -const auto params5InputsDynamic = ::testing::Combine( - commonAttributes, - ::testing::ValuesIn(specificParams5InDynamic), - ::testing::ValuesIn(numberBatch), - ::testing::Values(objectnessScore), - ::testing::Values(false, true), - ::testing::Values(ov::test::utils::DEVICE_CPU) -); - -INSTANTIATE_TEST_SUITE_P( - smoke_CPUDetectionOutputDynamic5In, - DetectionOutputLayerCPUTest, - params5InputsDynamic, - DetectionOutputLayerCPUTest::getTestCaseName); +const auto params5InputsDynamic = ::testing::Combine(commonAttributes, + ::testing::ValuesIn(specificParams5InDynamic), + ::testing::ValuesIn(numberBatch), + ::testing::Values(objectnessScore), + ::testing::Values(false, true), + ::testing::Values(ov::test::utils::DEVICE_CPU)); + +INSTANTIATE_TEST_SUITE_P(smoke_CPUDetectionOutputDynamic5In, + DetectionOutputLayerCPUTest, + params5InputsDynamic, + DetectionOutputLayerCPUTest::getTestCaseName); //////////////////large tensor///////////////// const std::vector specificParams5InDynamicLargeTensor = { // dynamic input shapes - ParamsWhichSizeDependsDynamic { - true, true, true, 1, 1, + ParamsWhichSizeDependsDynamic{ + true, + true, + true, + 1, + 1, {{ov::Dimension::dynamic(), ov::Dimension::dynamic()}, {{1, 381360}, {1, 381360}}}, {{ov::Dimension::dynamic(), ov::Dimension::dynamic()}, {{1, 1048740}, {1, 1048740}}}, - {{ov::Dimension::dynamic(), ov::Dimension::dynamic(), ov::Dimension::dynamic()}, {{1, 1, 381360}, {1, 1, 381360}}}, + {{ov::Dimension::dynamic(), ov::Dimension::dynamic(), ov::Dimension::dynamic()}, + {{1, 1, 381360}, {1, 1, 381360}}}, {{ov::Dimension::dynamic(), ov::Dimension::dynamic()}, {{1, 190680}, {1, 190680}}}, {{ov::Dimension::dynamic(), ov::Dimension::dynamic()}, {{1, 381360}, {1, 381360}}}, }, - ParamsWhichSizeDependsDynamic { - true, false, true, 1, 1, + ParamsWhichSizeDependsDynamic{ + true, + false, + true, + 1, + 1, {{ov::Dimension::dynamic(), ov::Dimension::dynamic()}, {{1, 4194960}, {1, 4194960}}}, {{ov::Dimension::dynamic(), ov::Dimension::dynamic()}, {{1, 1048740}, {1, 1048740}}}, - {{ov::Dimension::dynamic(), ov::Dimension::dynamic(), ov::Dimension::dynamic()}, {{1, 1, 381360}, {1, 1, 381360}}}, + {{ov::Dimension::dynamic(), ov::Dimension::dynamic(), ov::Dimension::dynamic()}, + {{1, 1, 381360}, {1, 1, 381360}}}, {{ov::Dimension::dynamic(), ov::Dimension::dynamic()}, {{1, 190680}, {1, 190680}}}, {{ov::Dimension::dynamic(), ov::Dimension::dynamic()}, {{1, 4194960}, {1, 4194960}}}, }, }; -const auto params5InputsDynamicLargeTensor = ::testing::Combine( - commonAttributesLargeTensor, - ::testing::ValuesIn(specificParams5InDynamicLargeTensor), - ::testing::ValuesIn(numberBatch), - ::testing::Values(objectnessScore), - ::testing::Values(false, true), - ::testing::Values(ov::test::utils::DEVICE_CPU) -); - -INSTANTIATE_TEST_SUITE_P( - CPUDetectionOutputDynamic5InLargeTensor, - DetectionOutputLayerCPUTest, - params5InputsDynamicLargeTensor, - DetectionOutputLayerCPUTest::getTestCaseName); +const auto params5InputsDynamicLargeTensor = + ::testing::Combine(commonAttributesLargeTensor, + ::testing::ValuesIn(specificParams5InDynamicLargeTensor), + ::testing::ValuesIn(numberBatch), + ::testing::Values(objectnessScore), + ::testing::Values(false, true), + ::testing::Values(ov::test::utils::DEVICE_CPU)); + +INSTANTIATE_TEST_SUITE_P(CPUDetectionOutputDynamic5InLargeTensor, + DetectionOutputLayerCPUTest, + params5InputsDynamicLargeTensor, + DetectionOutputLayerCPUTest::getTestCaseName); } // namespace -} // namespace CPULayerTestsDefinitions +} // namespace test +} // namespace ov diff --git a/src/plugins/intel_cpu/tests/functional/single_layer_tests/embedding_bag_offsets_sum.cpp b/src/plugins/intel_cpu/tests/functional/single_layer_tests/embedding_bag_offsets_sum.cpp index 8227678c0188f1..e84af61c1e7617 100644 --- a/src/plugins/intel_cpu/tests/functional/single_layer_tests/embedding_bag_offsets_sum.cpp +++ b/src/plugins/intel_cpu/tests/functional/single_layer_tests/embedding_bag_offsets_sum.cpp @@ -2,41 +2,33 @@ // SPDX-License-Identifier: Apache-2.0 // -#include -#include -#include +#include "common_test_utils/node_builders/embedding_bag_offsets_sum.hpp" -#include -#include "ov_models/builders.hpp" -#include "shared_test_classes/base/layer_test_utils.hpp" #include "shared_test_classes/base/ov_subgraph.hpp" #include "test_utils/cpu_test_utils.hpp" -using namespace InferenceEngine; using namespace CPUTestUtils; -using namespace ov::test; - -namespace CPULayerTestsDefinitions { - -typedef std::tuple< - InputShape, // input_shapes - std::vector, // indices - std::vector, // offsets - size_t, // default_index - bool, // with_weights - bool // with_def_index - > embeddingBagOffsetsSumParams; - -typedef std::tuple< - embeddingBagOffsetsSumParams, - ElementType, // embedding table - ElementType, // indices - LayerTestsUtils::TargetDevice> embeddingBagOffsetsSumLayerTestParamsSet; - -class EmbeddingBagOffsetsSumLayerCPUTest : - public testing::WithParamInterface, - virtual public SubgraphBaseTest, - public CPUTestsBase { +namespace ov { +namespace test { + +typedef std::tuple, // indices + std::vector, // offsets + size_t, // default_index + bool, // with_weights + bool // with_def_index + > + embeddingBagOffsetsSumParams; + +typedef std::tuple + embeddingBagOffsetsSumLayerTestParamsSet; + +class EmbeddingBagOffsetsSumLayerCPUTest : public testing::WithParamInterface, + virtual public SubgraphBaseTest, + public CPUTestsBase { public: static std::string getTestCaseName(const testing::TestParamInfo& obj) { embeddingBagOffsetsSumParams params; @@ -77,22 +69,22 @@ class EmbeddingBagOffsetsSumLayerCPUTest : selectedType = makeSelectedTypeStr("ref", inType); targetDevice = ov::test::utils::DEVICE_CPU; - init_input_shapes({ inputShapes }); - - auto emb_table_node = std::make_shared(inType, inputShapes.first); - ngraph::ParameterVector params = {emb_table_node}; - - auto embBag = std::dynamic_pointer_cast(ngraph::builder::makeEmbeddingBagOffsetsSum( - inType, - indPrecision, - emb_table_node, - indices, - offsets, - defaultIndex, - withWeights, - withDefIndex)); - ngraph::ResultVector results{std::make_shared(embBag)}; - function = std::make_shared(results, params, "embeddingBagOffsetsSum"); + init_input_shapes({inputShapes}); + + auto emb_table_node = std::make_shared(inType, inputShapes.first); + ov::ParameterVector params = {emb_table_node}; + + auto embBag = std::dynamic_pointer_cast( + ov::test::utils::make_embedding_bag_offsets_sum(inType, + indPrecision, + emb_table_node, + indices, + offsets, + defaultIndex, + withWeights, + withDefIndex)); + ov::ResultVector results{std::make_shared(embBag)}; + function = std::make_shared(results, params, "embeddingBagOffsetsSum"); } }; @@ -103,65 +95,50 @@ TEST_P(EmbeddingBagOffsetsSumLayerCPUTest, CompareWithRefs) { namespace { -const std::vector netPrecisions = { - ElementType::f32, - ElementType::i32, - ElementType::u8 -}; +const std::vector netPrecisions = {ElementType::f32, ElementType::i32, ElementType::u8}; -const std::vector indPrecisions = { - ElementType::i64, - ElementType::i32 -}; +const std::vector indPrecisions = {ElementType::i64, ElementType::i32}; const std::vector input_shapes = { - // dynamic input shapes - { - // input model dynamic shapes - {ov::Dimension::dynamic(), ov::Dimension::dynamic()}, - // input tensor shapes - {{5, 6}, {10, 35}} - }, - { - // input model dynamic shapes - {ov::Dimension::dynamic(), ov::Dimension::dynamic(), ov::Dimension::dynamic()}, - // input tensor shapes - {{5, 4, 16}, {10, 12, 8}} - }, - { - // input model dynamic shapes with limits - {{5, 10}, {6, 35}, {4, 8}}, - // input tensor shapes - {{5, 6, 4}, {10, 35, 8}, {5, 6, 4}} - }, - // static shapes - {{5, 6}, {{5, 6}}}, - {{10, 35}, {{10, 35}}}, - {{5, 4, 16}, {{5, 4, 16}}}, + // dynamic input shapes + {// input model dynamic shapes + {ov::Dimension::dynamic(), ov::Dimension::dynamic()}, + // input tensor shapes + {{5, 6}, {10, 35}}}, + {// input model dynamic shapes + {ov::Dimension::dynamic(), ov::Dimension::dynamic(), ov::Dimension::dynamic()}, + // input tensor shapes + {{5, 4, 16}, {10, 12, 8}}}, + {// input model dynamic shapes with limits + {{5, 10}, {6, 35}, {4, 8}}, + // input tensor shapes + {{5, 6, 4}, {10, 35, 8}, {5, 6, 4}}}, + // static shapes + {{5, 6}, {{5, 6}}}, + {{10, 35}, {{10, 35}}}, + {{5, 4, 16}, {{5, 4, 16}}}, }; -const std::vector> indices = - {{0, 1, 2, 2, 3}, {4, 4, 3, 1, 0}, {1, 2, 1, 2, 1, 2, 1, 2, 1, 2}}; +const std::vector> indices = {{0, 1, 2, 2, 3}, {4, 4, 3, 1, 0}, {1, 2, 1, 2, 1, 2, 1, 2, 1, 2}}; const std::vector> offsets = {{0, 2}, {0, 0, 2, 2}, {2, 4}}; const std::vector default_index = {0, 4}; const std::vector with_weights = {false, true}; const std::vector with_default_index = {false, true}; -const auto embBagOffsetSumArgSet = ::testing::Combine( - ::testing::ValuesIn(input_shapes), - ::testing::ValuesIn(indices), - ::testing::ValuesIn(offsets), - ::testing::ValuesIn(default_index), - ::testing::ValuesIn(with_weights), - ::testing::ValuesIn(with_default_index) -); - -INSTANTIATE_TEST_SUITE_P(smoke, EmbeddingBagOffsetsSumLayerCPUTest, - ::testing::Combine( - embBagOffsetSumArgSet, - ::testing::ValuesIn(netPrecisions), - ::testing::ValuesIn(indPrecisions), - ::testing::Values(ov::test::utils::DEVICE_CPU)), - EmbeddingBagOffsetsSumLayerCPUTest::getTestCaseName); +const auto embBagOffsetSumArgSet = ::testing::Combine(::testing::ValuesIn(input_shapes), + ::testing::ValuesIn(indices), + ::testing::ValuesIn(offsets), + ::testing::ValuesIn(default_index), + ::testing::ValuesIn(with_weights), + ::testing::ValuesIn(with_default_index)); + +INSTANTIATE_TEST_SUITE_P(smoke, + EmbeddingBagOffsetsSumLayerCPUTest, + ::testing::Combine(embBagOffsetSumArgSet, + ::testing::ValuesIn(netPrecisions), + ::testing::ValuesIn(indPrecisions), + ::testing::Values(ov::test::utils::DEVICE_CPU)), + EmbeddingBagOffsetsSumLayerCPUTest::getTestCaseName); } // namespace -} // namespace CPULayerTestsDefinitions +} // namespace test +} // namespace ov diff --git a/src/plugins/intel_cpu/tests/functional/single_layer_tests/embedding_bag_packed_sum.cpp b/src/plugins/intel_cpu/tests/functional/single_layer_tests/embedding_bag_packed_sum.cpp index 17512416b655d1..6a01ce8f52dc97 100644 --- a/src/plugins/intel_cpu/tests/functional/single_layer_tests/embedding_bag_packed_sum.cpp +++ b/src/plugins/intel_cpu/tests/functional/single_layer_tests/embedding_bag_packed_sum.cpp @@ -2,38 +2,30 @@ // SPDX-License-Identifier: Apache-2.0 // -#include -#include -#include +#include "common_test_utils/node_builders/embedding_bag_packed_sum.hpp" -#include -#include "ov_models/builders.hpp" -#include "shared_test_classes/base/layer_test_utils.hpp" #include "shared_test_classes/base/ov_subgraph.hpp" #include "test_utils/cpu_test_utils.hpp" -using namespace InferenceEngine; using namespace CPUTestUtils; -using namespace ov::test; - -namespace CPULayerTestsDefinitions { - -typedef std::tuple< - InputShape, // input_shapes - std::vector>, // indices - bool // with_weights - > embeddingBagPackedSumParams; - -typedef std::tuple< - embeddingBagPackedSumParams, - ElementType, // embedding table - ElementType, // indices - LayerTestsUtils::TargetDevice> embeddingBagPackedSumLayerTestParamsSet; - -class EmbeddingBagPackedSumLayerCPUTest : - public testing::WithParamInterface, - virtual public SubgraphBaseTest, - public CPUTestsBase { +namespace ov { +namespace test { + +typedef std::tuple>, // indices + bool // with_weights + > + embeddingBagPackedSumParams; + +typedef std::tuple + embeddingBagPackedSumLayerTestParamsSet; + +class EmbeddingBagPackedSumLayerCPUTest : public testing::WithParamInterface, + virtual public SubgraphBaseTest, + public CPUTestsBase { public: static std::string getTestCaseName(const testing::TestParamInfo& obj) { embeddingBagPackedSumParams params; @@ -70,19 +62,15 @@ class EmbeddingBagPackedSumLayerCPUTest : selectedType = makeSelectedTypeStr("ref", inType); targetDevice = ov::test::utils::DEVICE_CPU; - init_input_shapes({ inputShapes }); + init_input_shapes({inputShapes}); - auto emb_table_node = std::make_shared(inType, inputShapes.first); - ngraph::ParameterVector params = {emb_table_node}; + auto emb_table_node = std::make_shared(inType, inputShapes.first); + ov::ParameterVector params = {emb_table_node}; - auto embBag = std::dynamic_pointer_cast(ngraph::builder::makeEmbeddingBagPackedSum( - inType, - indPrecision, - emb_table_node, - indices, - withWeights)); - ngraph::ResultVector results{std::make_shared(embBag)}; - function = std::make_shared(results, params, "embeddingBagPackedSum"); + auto embBag = std::dynamic_pointer_cast( + ov::test::utils::make_embedding_bag_packed_sum(inType, indPrecision, emb_table_node, indices, withWeights)); + ov::ResultVector results{std::make_shared(embBag)}; + function = std::make_shared(results, params, "embeddingBagPackedSum"); } }; @@ -93,59 +81,46 @@ TEST_P(EmbeddingBagPackedSumLayerCPUTest, CompareWithRefs) { namespace { -const std::vector netPrecisions = { - ElementType::f32, - ElementType::i32, - ElementType::u8 -}; +const std::vector netPrecisions = {ElementType::f32, ElementType::i32, ElementType::u8}; -const std::vector indPrecisions = { - ElementType::i64, - ElementType::i32 -}; +const std::vector indPrecisions = {ElementType::i64, ElementType::i32}; const std::vector input_shapes = { - // dynamic input shapes - { - // input model dynamic shapes - {ov::Dimension::dynamic(), ov::Dimension::dynamic()}, - // input tensor shapes - {{{5, 6}}, {10, 35}} - }, - { - // input model dynamic shapes - {ov::Dimension::dynamic(), ov::Dimension::dynamic(), ov::Dimension::dynamic()}, - // input tensor shapes - {{5, 4, 16}, {10, 12, 8}} - }, - { - // input model dynamic shapes with limits - {{5, 10}, {6, 35}, {4, 8}}, - // input tensor shapes - {{5, 6, 4}, {10, 35, 8}, {5, 6, 4}} - }, - // static shapes - {{5, 6}, {{5, 6}}}, - {{10, 35}, {{10, 35}}}, - {{5, 4, 16}, {{5, 4, 16}}}, + // dynamic input shapes + {// input model dynamic shapes + {ov::Dimension::dynamic(), ov::Dimension::dynamic()}, + // input tensor shapes + {{{5, 6}}, {10, 35}}}, + {// input model dynamic shapes + {ov::Dimension::dynamic(), ov::Dimension::dynamic(), ov::Dimension::dynamic()}, + // input tensor shapes + {{5, 4, 16}, {10, 12, 8}}}, + {// input model dynamic shapes with limits + {{5, 10}, {6, 35}, {4, 8}}, + // input tensor shapes + {{5, 6, 4}, {10, 35, 8}, {5, 6, 4}}}, + // static shapes + {{5, 6}, {{5, 6}}}, + {{10, 35}, {{10, 35}}}, + {{5, 4, 16}, {{5, 4, 16}}}, }; -const std::vector>> indices = - {{{0, 1}, {2, 2}, {3, 4}}, {{4, 4, 3}, {1, 0, 2}}, {{1, 2, 1, 2}, {1, 2, 1, 2}}}; +const std::vector>> indices = {{{0, 1}, {2, 2}, {3, 4}}, + {{4, 4, 3}, {1, 0, 2}}, + {{1, 2, 1, 2}, {1, 2, 1, 2}}}; const std::vector with_weights = {false, true}; -const auto embBagPackedSumArgSet = ::testing::Combine( - ::testing::ValuesIn(input_shapes), - ::testing::ValuesIn(indices), - ::testing::ValuesIn(with_weights) -); - -INSTANTIATE_TEST_SUITE_P(smoke, EmbeddingBagPackedSumLayerCPUTest, - ::testing::Combine( - embBagPackedSumArgSet, - ::testing::ValuesIn(netPrecisions), - ::testing::ValuesIn(indPrecisions), - ::testing::Values(ov::test::utils::DEVICE_CPU)), - EmbeddingBagPackedSumLayerCPUTest::getTestCaseName); +const auto embBagPackedSumArgSet = ::testing::Combine(::testing::ValuesIn(input_shapes), + ::testing::ValuesIn(indices), + ::testing::ValuesIn(with_weights)); + +INSTANTIATE_TEST_SUITE_P(smoke, + EmbeddingBagPackedSumLayerCPUTest, + ::testing::Combine(embBagPackedSumArgSet, + ::testing::ValuesIn(netPrecisions), + ::testing::ValuesIn(indPrecisions), + ::testing::Values(ov::test::utils::DEVICE_CPU)), + EmbeddingBagPackedSumLayerCPUTest::getTestCaseName); } // namespace -} // namespace CPULayerTestsDefinitions +} // namespace test +} // namespace ov diff --git a/src/plugins/intel_cpu/tests/functional/single_layer_tests/embedding_segments_sum.cpp b/src/plugins/intel_cpu/tests/functional/single_layer_tests/embedding_segments_sum.cpp index 36bd193bbd08da..5b3cf8bcb917b3 100644 --- a/src/plugins/intel_cpu/tests/functional/single_layer_tests/embedding_segments_sum.cpp +++ b/src/plugins/intel_cpu/tests/functional/single_layer_tests/embedding_segments_sum.cpp @@ -2,42 +2,34 @@ // SPDX-License-Identifier: Apache-2.0 // -#include -#include -#include +#include "common_test_utils/node_builders/embedding_segments_sum.hpp" -#include -#include "ov_models/builders.hpp" -#include "shared_test_classes/base/layer_test_utils.hpp" #include "shared_test_classes/base/ov_subgraph.hpp" #include "test_utils/cpu_test_utils.hpp" -using namespace InferenceEngine; using namespace CPUTestUtils; -using namespace ov::test; - -namespace CPULayerTestsDefinitions { - -typedef std::tuple< - InputShape, // input_shapes - std::vector, // indices - std::vector, // segment_ids - size_t, // num_segments - size_t, // default_index - bool, // with_weights - bool // with_def_index - > embeddingSegmentsSumParams; - -typedef std::tuple< - embeddingSegmentsSumParams, - ElementType, // embedding table - ElementType, // indices - LayerTestsUtils::TargetDevice> embeddingSegmentsSumLayerTestParamsSet; - -class EmbeddingSegmentsSumLayerCPUTest : - public testing::WithParamInterface, - virtual public SubgraphBaseTest, - public CPUTestsBase { +namespace ov { +namespace test { + +typedef std::tuple, // indices + std::vector, // segment_ids + size_t, // num_segments + size_t, // default_index + bool, // with_weights + bool // with_def_index + > + embeddingSegmentsSumParams; + +typedef std::tuple + embeddingSegmentsSumLayerTestParamsSet; + +class EmbeddingSegmentsSumLayerCPUTest : public testing::WithParamInterface, + virtual public SubgraphBaseTest, + public CPUTestsBase { public: static std::string getTestCaseName(const testing::TestParamInfo& obj) { embeddingSegmentsSumParams params; @@ -53,7 +45,7 @@ class EmbeddingSegmentsSumLayerCPUTest : std::ostringstream result; result << "IS=" << inputShapes << "_"; - result << "I" << ov::test::utils::vec2str(indices) << "_"; + result << "I" << ov::test::utils::vec2str(indices) << "_"; result << "SI" << ov::test::utils::vec2str(segmentIds) << "_"; result << "NS" << numSegments << "_"; result << "DI" << defaultIndex << "_"; @@ -80,23 +72,23 @@ class EmbeddingSegmentsSumLayerCPUTest : selectedType = makeSelectedTypeStr("ref", inType); targetDevice = ov::test::utils::DEVICE_CPU; - init_input_shapes({ inputShapes }); - - auto emb_table_node = std::make_shared(inType, inputShapes.first); - ngraph::ParameterVector params = {emb_table_node}; - - auto embBag = std::dynamic_pointer_cast(ngraph::builder::makeEmbeddingSegmentsSum( - inType, - indPrecision, - emb_table_node, - indices, - segmentIds, - numSegments, - defaultIndex, - withWeights, - withDefIndex)); - ngraph::ResultVector results{std::make_shared(embBag)}; - function = std::make_shared(results, params, "embeddingSegmentsSum"); + init_input_shapes({inputShapes}); + + auto emb_table_node = std::make_shared(inType, inputShapes.first); + ov::ParameterVector params = {emb_table_node}; + + auto embBag = std::dynamic_pointer_cast( + ov::test::utils::make_embedding_segments_sum(inType, + indPrecision, + emb_table_node, + indices, + segmentIds, + numSegments, + defaultIndex, + withWeights, + withDefIndex)); + ov::ResultVector results{std::make_shared(embBag)}; + function = std::make_shared(results, params, "embeddingSegmentsSum"); } }; @@ -106,67 +98,52 @@ TEST_P(EmbeddingSegmentsSumLayerCPUTest, CompareWithRefs) { } namespace { -const std::vector netPrecisions = { - ElementType::f32, - ElementType::i32, - ElementType::u8 -}; +const std::vector netPrecisions = {ElementType::f32, ElementType::i32, ElementType::u8}; -const std::vector indPrecisions = { - ElementType::i64, - ElementType::i32 -}; +const std::vector indPrecisions = {ElementType::i64, ElementType::i32}; const std::vector input_shapes = { // dynamic input shapes - { - // input model dynamic shapes - {ov::Dimension::dynamic(), ov::Dimension::dynamic()}, - // input tensor shapes - {{5, 6}, {10, 35}} - }, - { - // input model dynamic shapes - {ov::Dimension::dynamic(), ov::Dimension::dynamic(), ov::Dimension::dynamic()}, - // input tensor shapes - {{5, 4, 16}, {10, 12, 8}} - }, - { - // input model dynamic shapes with limits - {{5, 10}, {6, 35}, {4, 8}}, - // input tensor shapes - {{5, 6, 4}, {10, 35, 8}, {5, 6, 4}} - }, + {// input model dynamic shapes + {ov::Dimension::dynamic(), ov::Dimension::dynamic()}, + // input tensor shapes + {{5, 6}, {10, 35}}}, + {// input model dynamic shapes + {ov::Dimension::dynamic(), ov::Dimension::dynamic(), ov::Dimension::dynamic()}, + // input tensor shapes + {{5, 4, 16}, {10, 12, 8}}}, + {// input model dynamic shapes with limits + {{5, 10}, {6, 35}, {4, 8}}, + // input tensor shapes + {{5, 6, 4}, {10, 35, 8}, {5, 6, 4}}}, // static shapes {{5, 6}, {{5, 6}}}, {{10, 35}, {{10, 35}}}, {{5, 4, 16}, {{5, 4, 16}}}, }; -const std::vector> indices = - {{0, 1, 2, 2, 3}, {4, 4, 3, 1, 2}}; +const std::vector> indices = {{0, 1, 2, 2, 3}, {4, 4, 3, 1, 2}}; const std::vector> segment_ids = {{0, 1, 2, 3, 4}, {0, 0, 2, 2, 4}}; const std::vector num_segments = {5, 7}; const std::vector default_index = {0, 4}; const std::vector with_weights = {false, true}; const std::vector with_default_index = {false, true}; -const auto embSegmentsSumArgSet = ::testing::Combine( - ::testing::ValuesIn(input_shapes), - ::testing::ValuesIn(indices), - ::testing::ValuesIn(segment_ids), - ::testing::ValuesIn(num_segments), - ::testing::ValuesIn(default_index), - ::testing::ValuesIn(with_weights), - ::testing::ValuesIn(with_default_index) -); - -INSTANTIATE_TEST_SUITE_P(smoke, EmbeddingSegmentsSumLayerCPUTest, - ::testing::Combine( - embSegmentsSumArgSet, - ::testing::ValuesIn(netPrecisions), - ::testing::ValuesIn(indPrecisions), - ::testing::Values(ov::test::utils::DEVICE_CPU)), - EmbeddingSegmentsSumLayerCPUTest::getTestCaseName); +const auto embSegmentsSumArgSet = ::testing::Combine(::testing::ValuesIn(input_shapes), + ::testing::ValuesIn(indices), + ::testing::ValuesIn(segment_ids), + ::testing::ValuesIn(num_segments), + ::testing::ValuesIn(default_index), + ::testing::ValuesIn(with_weights), + ::testing::ValuesIn(with_default_index)); + +INSTANTIATE_TEST_SUITE_P(smoke, + EmbeddingSegmentsSumLayerCPUTest, + ::testing::Combine(embSegmentsSumArgSet, + ::testing::ValuesIn(netPrecisions), + ::testing::ValuesIn(indPrecisions), + ::testing::Values(ov::test::utils::DEVICE_CPU)), + EmbeddingSegmentsSumLayerCPUTest::getTestCaseName); } // namespace -} // namespace CPULayerTestsDefinitions +} // namespace test +} // namespace ov diff --git a/src/plugins/intel_cpu/tests/functional/single_layer_tests/extract_image_patches.cpp b/src/plugins/intel_cpu/tests/functional/single_layer_tests/extract_image_patches.cpp index fecd504bf6ba31..17df64a4f5d31a 100755 --- a/src/plugins/intel_cpu/tests/functional/single_layer_tests/extract_image_patches.cpp +++ b/src/plugins/intel_cpu/tests/functional/single_layer_tests/extract_image_patches.cpp @@ -4,23 +4,20 @@ #include "shared_test_classes/base/ov_subgraph.hpp" #include "test_utils/cpu_test_utils.hpp" -#include "shared_test_classes/base/layer_test_utils.hpp" -#include "ov_models/builders.hpp" using namespace CPUTestUtils; -using namespace ov::test; - -namespace CPULayerTestsDefinitions { -using extractImagePatchesParams = typename std::tuple< - InputShape, // input shape - ElementType, // Network precision - ov::Shape, // kernel size - ov::Strides, // strides - ov::Shape, // rates - ov::op::PadType>; // pad type +namespace ov { +namespace test { +using extractImagePatchesParams = typename std::tuple; // pad type class ExtractImagePatchesLayerCPUTest : public testing::WithParamInterface, - virtual public SubgraphBaseTest, public CPUTestsBase { + virtual public SubgraphBaseTest, + public CPUTestsBase { public: static std::string getTestCaseName(testing::TestParamInfo obj) { InputShape inputShapes; @@ -32,16 +29,19 @@ class ExtractImagePatchesLayerCPUTest : public testing::WithParamInterface inputShapes = { InputShape{{}, {{2, 3, 13, 37}}}, - InputShape{ - // dynamic - {-1, -1, -1, -1}, - // static - {{2, 3, 13, 37}, {6, 4, 14, 14}, {8, 12, 15, 16}, {2, 3, 13, 37}} - }, - InputShape{ - // dynamic - {{5, 15}, {6, 17}, {10, 15}, {13, 16}}, - // static - {{5, 17, 10, 15}, {15, 10, 12, 13}, {10, 10, 15, 16}, {5, 17, 10, 15}} - }, + InputShape{// dynamic + {-1, -1, -1, -1}, + // static + {{2, 3, 13, 37}, {6, 4, 14, 14}, {8, 12, 15, 16}, {2, 3, 13, 37}}}, + InputShape{// dynamic + {{5, 15}, {6, 17}, {10, 15}, {13, 16}}, + // static + {{5, 17, 10, 15}, {15, 10, 12, 13}, {10, 10, 15, 16}, {5, 17, 10, 15}}}, }; -const std::vector inputPrecisions = { - ElementType::i8, - ElementType::bf16, - ElementType::f32 -}; +const std::vector inputPrecisions = {ElementType::i8, ElementType::bf16, ElementType::f32}; -const std::vector kSizes = { - {1, 5}, - {3, 4}, - {3, 1} -}; +const std::vector kSizes = {{1, 5}, {3, 4}, {3, 1}}; -const std::vector strides = { - {1, 2}, - {2, 2}, - {2, 1} -}; +const std::vector strides = {{1, 2}, {2, 2}, {2, 1}}; -const std::vector rates = { - {1, 3}, - {3, 3}, - {3, 1} -}; +const std::vector rates = {{1, 3}, {3, 3}, {3, 1}}; -const std::vector autoPads = { - ov::op::PadType::VALID, - ov::op::PadType::SAME_UPPER, - ov::op::PadType::SAME_LOWER -}; +const std::vector autoPads = {ov::op::PadType::VALID, + ov::op::PadType::SAME_UPPER, + ov::op::PadType::SAME_LOWER}; -const auto params = ::testing::Combine( - ::testing::ValuesIn(inputShapes), - ::testing::ValuesIn(inputPrecisions), - ::testing::ValuesIn(kSizes), - ::testing::ValuesIn(strides), - ::testing::ValuesIn(rates), - ::testing::ValuesIn(autoPads)); +const auto params = ::testing::Combine(::testing::ValuesIn(inputShapes), + ::testing::ValuesIn(inputPrecisions), + ::testing::ValuesIn(kSizes), + ::testing::ValuesIn(strides), + ::testing::ValuesIn(rates), + ::testing::ValuesIn(autoPads)); -INSTANTIATE_TEST_SUITE_P(smoke_ExtractImagePatches_CPU, ExtractImagePatchesLayerCPUTest, params, ExtractImagePatchesLayerCPUTest::getTestCaseName); +INSTANTIATE_TEST_SUITE_P(smoke_ExtractImagePatches_CPU, + ExtractImagePatchesLayerCPUTest, + params, + ExtractImagePatchesLayerCPUTest::getTestCaseName); -} // namespace -} // namespace CPULayerTestsDefinitions +} // namespace +} // namespace test +} // namespace ov diff --git a/src/plugins/intel_cpu/tests/functional/single_layer_tests/eye.cpp b/src/plugins/intel_cpu/tests/functional/single_layer_tests/eye.cpp index 22b8cb7a0de857..5dc2219e382ed5 100644 --- a/src/plugins/intel_cpu/tests/functional/single_layer_tests/eye.cpp +++ b/src/plugins/intel_cpu/tests/functional/single_layer_tests/eye.cpp @@ -2,40 +2,34 @@ // SPDX-License-Identifier: Apache-2.0 // -#include -#include -#include "test_utils/cpu_test_utils.hpp" -#include "ov_models/builders.hpp" +#include "common_test_utils/ov_tensor_utils.hpp" #include "shared_test_classes/base/ov_subgraph.hpp" +#include "test_utils/cpu_test_utils.hpp" -using namespace InferenceEngine; using namespace CPUTestUtils; -using namespace ov::test; - -namespace CPULayerTestsDefinitions { +namespace ov { +namespace test { namespace { - std::vector inputShape; - std::vector outBatchShape; - int rowNum, colNum; - int shift; +std::vector inputShape; +std::vector outBatchShape; +int rowNum, colNum; +int shift; } // namespace -using EyeLayerTestParams = std::tuple< - std::vector, // eye shape - std::vector, // output batch shape - std::vector, // eye params (rows, cols, diag_shift) - ElementType, // Net precision - TargetDevice>; // Device name +using EyeLayerTestParams = std::tuple, // eye shape + std::vector, // output batch shape + std::vector, // eye params (rows, cols, diag_shift) + ElementType, // Net precision + TargetDevice>; // Device name -using EyeLayerCPUTestParamsSet = std::tuple< - CPULayerTestsDefinitions::EyeLayerTestParams, - CPUSpecificParams>; +using EyeLayerCPUTestParamsSet = std::tuple; class EyeLayerCPUTest : public testing::WithParamInterface, - virtual public SubgraphBaseTest, public CPUTestsBase { + virtual public SubgraphBaseTest, + public CPUTestsBase { public: static std::string getTestCaseName(testing::TestParamInfo obj) { - CPULayerTestsDefinitions::EyeLayerTestParams basicParamsSet; + EyeLayerTestParams basicParamsSet; CPUSpecificParams cpuParams; std::tie(basicParamsSet, cpuParams) = obj.param; std::string td; @@ -63,9 +57,10 @@ class EyeLayerCPUTest : public testing::WithParamInterfaceGetParam(); std::tie(inFmts, outFmts, priority, selectedType) = cpuParams; @@ -79,11 +74,11 @@ class EyeLayerCPUTest : public testing::WithParamInterface createFunction() { + std::shared_ptr createFunction() { ov::ParameterVector inputParams; for (auto&& shape : inputDynamicShapes) { inputParams.push_back(std::make_shared(ov::element::i32, shape)); @@ -97,13 +92,14 @@ class EyeLayerCPUTest : public testing::WithParamInterfaceset_friendly_name("batchShape"); - auto eyelikeBatchShape = std::make_shared(rowsPar, colsPar, diagPar, batchShapePar, ngraph::element::i32); + auto eyelikeBatchShape = + std::make_shared(rowsPar, colsPar, diagPar, batchShapePar, ov::element::i32); eyelikeBatchShape->get_rt_info() = getCPUInfo(); - return makeNgraphFunction(ngraph::element::i32, inputParams, eyelikeBatchShape, "Eye"); + return makeNgraphFunction(ov::element::i32, inputParams, eyelikeBatchShape, "Eye"); } else { - auto eyelikePure = std::make_shared(rowsPar, colsPar, diagPar, ngraph::element::i32); + auto eyelikePure = std::make_shared(rowsPar, colsPar, diagPar, ov::element::i32); eyelikePure->get_rt_info() = getCPUInfo(); - return makeNgraphFunction(ngraph::element::i32, inputParams, eyelikePure, "Eye"); + return makeNgraphFunction(ov::element::i32, inputParams, eyelikePure, "Eye"); } } @@ -115,16 +111,19 @@ class EyeLayerCPUTest : public testing::WithParamInterface(); - // Spec: batch_shape - 1D tensor with non-negative values of type T_NUM defines leading batch dimensions of output shape + int* batchShapePtr = tensor.data(); + // Spec: batch_shape - 1D tensor with non-negative values of type T_NUM defines leading batch dimensions + // of output shape EXPECT_EQ(targetInputStaticShapes[i].size(), 1); EXPECT_EQ(targetInputStaticShapes[i][0], outBatchShape.size()); for (size_t j = 0; j < targetInputStaticShapes[i][0]; j++) { batchShapePtr[j] = outBatchShape[j]; } } else { - tensor = ov::test::utils::create_and_fill_tensor(funcInput.get_element_type(), targetInputStaticShapes[i], 1, - (i == 0 ? rowNum : (i == 1 ? colNum : shift))); + tensor = ov::test::utils::create_and_fill_tensor(funcInput.get_element_type(), + targetInputStaticShapes[i], + 1, + (i == 0 ? rowNum : (i == 1 ? colNum : shift))); } inputs.insert({funcInput.get_node_shared_ptr(), tensor}); } @@ -138,10 +137,11 @@ TEST_P(EyeLayerCPUTest, CompareWithRefs) { namespace { -const std::vector netPrecisions = { - ElementType::f32, ElementType::bf16, ElementType::i32, - ElementType::i8, ElementType::u8 -}; +const std::vector netPrecisions = {ElementType::f32, + ElementType::bf16, + ElementType::i32, + ElementType::i8, + ElementType::u8}; const std::vector> eyePars = { // rows, cols, diag_shift {3, 3, 0}, @@ -153,70 +153,64 @@ const std::vector> eyePars = { // rows, cols, diag_shift {4, 3, -1}, {3, 4, 10}, {4, 4, -2}, - {0, 0, 0} -}; + {0, 0, 0}}; // dummy parameter to prevent empty set of test cases const std::vector> emptyBatchShape = {{0}}; -const std::vector> batchShapes1D = { - {3}, {2}, {1}, {0} -}; -const std::vector> batchShapes2D = { - {3, 2}, {2, 1}, {0, 0} -}; +const std::vector> batchShapes1D = {{3}, {2}, {1}, {0}}; +const std::vector> batchShapes2D = {{3, 2}, {2, 1}, {0, 0}}; // Ticket: 85127 // const std::vector> batchShapes3D = { // {3, 2, 1}, {1, 1, 1} // }; -INSTANTIATE_TEST_SUITE_P(smoke_Eye2D_PureScalar_Test, EyeLayerCPUTest, - ::testing::Combine( - ::testing::Combine( - ::testing::ValuesIn(static_shapes_to_test_representation( - std::vector> {{{}, {}, {}}})), - ::testing::ValuesIn(emptyBatchShape), - ::testing::ValuesIn(eyePars), - ::testing::ValuesIn(netPrecisions), - ::testing::Values(ov::test::utils::DEVICE_CPU)), - ::testing::Values(CPUSpecificParams{{}, {}, {}, {}})), +INSTANTIATE_TEST_SUITE_P(smoke_Eye2D_PureScalar_Test, + EyeLayerCPUTest, + ::testing::Combine(::testing::Combine(::testing::ValuesIn(static_shapes_to_test_representation( + std::vector>{{{}, {}, {}}})), + ::testing::ValuesIn(emptyBatchShape), + ::testing::ValuesIn(eyePars), + ::testing::ValuesIn(netPrecisions), + ::testing::Values(ov::test::utils::DEVICE_CPU)), + ::testing::Values(CPUSpecificParams{{}, {}, {}, {}})), EyeLayerCPUTest::getTestCaseName); -INSTANTIATE_TEST_SUITE_P(smoke_Eye2D_WithNonScalar_Test, EyeLayerCPUTest, - ::testing::Combine( - ::testing::Combine( - ::testing::ValuesIn(static_shapes_to_test_representation( - std::vector> {{{1}, {1}, {1}}})), - ::testing::ValuesIn(emptyBatchShape), - ::testing::ValuesIn(eyePars), - ::testing::ValuesIn(netPrecisions), - ::testing::Values(ov::test::utils::DEVICE_CPU)), - ::testing::Values(CPUSpecificParams{{}, {}, {}, {}})), - EyeLayerCPUTest::getTestCaseName); +INSTANTIATE_TEST_SUITE_P( + smoke_Eye2D_WithNonScalar_Test, + EyeLayerCPUTest, + ::testing::Combine(::testing::Combine(::testing::ValuesIn(static_shapes_to_test_representation( + std::vector>{{{1}, {1}, {1}}})), + ::testing::ValuesIn(emptyBatchShape), + ::testing::ValuesIn(eyePars), + ::testing::ValuesIn(netPrecisions), + ::testing::Values(ov::test::utils::DEVICE_CPU)), + ::testing::Values(CPUSpecificParams{{}, {}, {}, {}})), + EyeLayerCPUTest::getTestCaseName); -INSTANTIATE_TEST_SUITE_P(smoke_Eye_1DBatch_Test, EyeLayerCPUTest, - ::testing::Combine( - ::testing::Combine( - ::testing::ValuesIn(static_shapes_to_test_representation( - std::vector> {{{}, {}, {}, {1}}})), - ::testing::ValuesIn(batchShapes1D), - ::testing::ValuesIn(eyePars), - ::testing::ValuesIn(netPrecisions), - ::testing::Values(ov::test::utils::DEVICE_CPU)), - ::testing::Values(CPUSpecificParams{{}, {}, {}, {}})), - EyeLayerCPUTest::getTestCaseName); +INSTANTIATE_TEST_SUITE_P( + smoke_Eye_1DBatch_Test, + EyeLayerCPUTest, + ::testing::Combine(::testing::Combine(::testing::ValuesIn(static_shapes_to_test_representation( + std::vector>{{{}, {}, {}, {1}}})), + ::testing::ValuesIn(batchShapes1D), + ::testing::ValuesIn(eyePars), + ::testing::ValuesIn(netPrecisions), + ::testing::Values(ov::test::utils::DEVICE_CPU)), + ::testing::Values(CPUSpecificParams{{}, {}, {}, {}})), + EyeLayerCPUTest::getTestCaseName); -INSTANTIATE_TEST_SUITE_P(smoke_Eye_2DBatch_Test, EyeLayerCPUTest, - ::testing::Combine( - ::testing::Combine( - ::testing::ValuesIn(static_shapes_to_test_representation( - std::vector> {{{}, {}, {}, {2}}})), - ::testing::ValuesIn(batchShapes2D), - ::testing::ValuesIn(eyePars), - ::testing::ValuesIn(netPrecisions), - ::testing::Values(ov::test::utils::DEVICE_CPU)), - ::testing::Values(CPUSpecificParams{{}, {}, {}, {}})), - EyeLayerCPUTest::getTestCaseName); +INSTANTIATE_TEST_SUITE_P( + smoke_Eye_2DBatch_Test, + EyeLayerCPUTest, + ::testing::Combine(::testing::Combine(::testing::ValuesIn(static_shapes_to_test_representation( + std::vector>{{{}, {}, {}, {2}}})), + ::testing::ValuesIn(batchShapes2D), + ::testing::ValuesIn(eyePars), + ::testing::ValuesIn(netPrecisions), + ::testing::Values(ov::test::utils::DEVICE_CPU)), + ::testing::Values(CPUSpecificParams{{}, {}, {}, {}})), + EyeLayerCPUTest::getTestCaseName); // Ticket: 85127 // INSTANTIATE_TEST_SUITE_P(smoke_Eye_3DBatch_Test, EyeLayerCPUTest, @@ -232,20 +226,20 @@ INSTANTIATE_TEST_SUITE_P(smoke_Eye_2DBatch_Test, EyeLayerCPUTest, // EyeLayerCPUTest::getTestCaseName); const std::vector> dynShapes = { - { - {{-1}, {{1}, {1}}}, // input 0 - {{-1}, {{1}, {1}}}, // input 1 - {{-1}, {{1}, {1}}} // input 2 - }, + { + {{-1}, {{1}, {1}}}, // input 0 + {{-1}, {{1}, {1}}}, // input 1 + {{-1}, {{1}, {1}}} // input 2 + }, }; const std::vector> dynShapesWith2DBatches = { - { - {{-1}, {{1}, {1}, {1}}}, // input 0 - {{-1}, {{1}, {1}, {1}}}, // input 1 - {{-1}, {{1}, {1}, {1}}}, // input 2 - {{2}, {{2}, {2}, {2}}} // input 3 - }, + { + {{-1}, {{1}, {1}, {1}}}, // input 0 + {{-1}, {{1}, {1}, {1}}}, // input 1 + {{-1}, {{1}, {1}, {1}}}, // input 2 + {{2}, {{2}, {2}, {2}}} // input 3 + }, }; // Ticket: 85127 @@ -258,26 +252,24 @@ const std::vector> dynShapesWith2DBatches = { // }, // }; -INSTANTIATE_TEST_SUITE_P(smoke_Eye_Dynamic_Test, EyeLayerCPUTest, - ::testing::Combine( - ::testing::Combine( - ::testing::ValuesIn(dynShapes), - ::testing::ValuesIn(emptyBatchShape), - ::testing::ValuesIn(eyePars), - ::testing::ValuesIn(netPrecisions), - ::testing::Values(ov::test::utils::DEVICE_CPU)), - ::testing::Values(CPUSpecificParams{{}, {}, {}, {}})), +INSTANTIATE_TEST_SUITE_P(smoke_Eye_Dynamic_Test, + EyeLayerCPUTest, + ::testing::Combine(::testing::Combine(::testing::ValuesIn(dynShapes), + ::testing::ValuesIn(emptyBatchShape), + ::testing::ValuesIn(eyePars), + ::testing::ValuesIn(netPrecisions), + ::testing::Values(ov::test::utils::DEVICE_CPU)), + ::testing::Values(CPUSpecificParams{{}, {}, {}, {}})), EyeLayerCPUTest::getTestCaseName); -INSTANTIATE_TEST_SUITE_P(smoke_Eye_With2DBatchShape_Dynamic_Test, EyeLayerCPUTest, - ::testing::Combine( - ::testing::Combine( - ::testing::ValuesIn(dynShapesWith2DBatches), - ::testing::ValuesIn(batchShapes2D), - ::testing::ValuesIn(eyePars), - ::testing::ValuesIn(netPrecisions), - ::testing::Values(ov::test::utils::DEVICE_CPU)), - ::testing::Values(CPUSpecificParams{{}, {}, {}, {}})), +INSTANTIATE_TEST_SUITE_P(smoke_Eye_With2DBatchShape_Dynamic_Test, + EyeLayerCPUTest, + ::testing::Combine(::testing::Combine(::testing::ValuesIn(dynShapesWith2DBatches), + ::testing::ValuesIn(batchShapes2D), + ::testing::ValuesIn(eyePars), + ::testing::ValuesIn(netPrecisions), + ::testing::Values(ov::test::utils::DEVICE_CPU)), + ::testing::Values(CPUSpecificParams{{}, {}, {}, {}})), EyeLayerCPUTest::getTestCaseName); // Ticket: 85127 @@ -291,5 +283,6 @@ INSTANTIATE_TEST_SUITE_P(smoke_Eye_With2DBatchShape_Dynamic_Test, EyeLayerCPUTes // ::testing::Values(ov::test::utils::DEVICE_CPU)), // ::testing::Values(CPUSpecificParams{{}, {}, {}, {}})), // EyeLayerCPUTest::getTestCaseName); -} // namespace -} // namespace CPULayerTestsDefinitions +} // namespace +} // namespace test +} // namespace ov diff --git a/src/plugins/intel_cpu/tests/functional/single_layer_tests/fake_quantize.cpp b/src/plugins/intel_cpu/tests/functional/single_layer_tests/fake_quantize.cpp index a3d96b81acbe56..760f2bcaed1d3c 100644 --- a/src/plugins/intel_cpu/tests/functional/single_layer_tests/fake_quantize.cpp +++ b/src/plugins/intel_cpu/tests/functional/single_layer_tests/fake_quantize.cpp @@ -2,47 +2,45 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "test_utils/cpu_test_utils.hpp" +#include "common_test_utils/ov_tensor_utils.hpp" #include "ov_models/builders.hpp" #include "shared_test_classes/base/ov_subgraph.hpp" -#include +#include "test_utils/cpu_test_utils.hpp" -using namespace InferenceEngine; -using namespace ngraph; using namespace CPUTestUtils; -using namespace ov::test; - -namespace CPULayerTestsDefinitions { -using inputShapes = std::tuple>; // range input shapes +namespace ov { +namespace test { +using inputShapes = std::tuple>; // range input shapes -using fqSpecificParams = std::tuple, // output low - std::vector, // output high - size_t>; // levels +using fqSpecificParams = std::tuple, // output low + std::vector, // output high + size_t>; // levels using fqLayerTestParamsSet = std::tuple, std::vector>, // il and ih values - bool, // should be decomposed + inputShapes, // input shapes + ov::element::Type, // input type + std::pair, std::vector>, // il and ih values + bool, // should be decomposed CPUSpecificParams>; class FakeQuantizeLayerCPUTest : public testing::WithParamInterface, - virtual public SubgraphBaseTest, public CPUTestsBase { + virtual public SubgraphBaseTest, + public CPUTestsBase { public: static std::string getTestCaseName(testing::TestParamInfo obj) { fqSpecificParams fqParams; inputShapes testShapes; - Precision inPrec; + ov::element::Type inPrec; std::pair, std::vector> inputRangesValues; bool shouldBeDecomposed; CPUSpecificParams cpuParams; std::tie(fqParams, testShapes, inPrec, inputRangesValues, shouldBeDecomposed, cpuParams) = obj.param; InputShape shapes; - std::vector ranges; + std::vector ranges; std::tie(shapes, ranges) = testShapes; int64_t inDataLowBounds, inDataHighBounds; @@ -63,7 +61,7 @@ class FakeQuantizeLayerCPUTest : public testing::WithParamInterface, std::vector> inputRangesValues; bool shouldBeDecomposed; CPUSpecificParams cpuParams; @@ -96,7 +92,7 @@ class FakeQuantizeLayerCPUTest : public testing::WithParamInterface ranges; + std::vector ranges; std::tie(shapes, ranges) = testShapes; inputDynamicShapes.push_back(shapes.first); @@ -110,24 +106,23 @@ class FakeQuantizeLayerCPUTest : public testing::WithParamInterface(ngInPrec, shape)); + params.push_back(std::make_shared(inPrec, shape)); - auto il = builder::makeConstant(ngInPrec, ranges[0], rangesBounds[0], rangesBounds[0].empty()); - auto ih = builder::makeConstant(ngInPrec, ranges[1], rangesBounds[1], rangesBounds[1].empty()); - auto ol = builder::makeConstant(ngInPrec, ranges[2], rangesBounds[2], rangesBounds[2].empty()); - auto oh = builder::makeConstant(ngInPrec, ranges[3], rangesBounds[3], rangesBounds[3].empty()); - auto fq = std::make_shared(params[0], il, ih, ol, oh, levels); + auto il = ngraph::builder::makeConstant(inPrec, ranges[0], rangesBounds[0], rangesBounds[0].empty()); + auto ih = ngraph::builder::makeConstant(inPrec, ranges[1], rangesBounds[1], rangesBounds[1].empty()); + auto ol = ngraph::builder::makeConstant(inPrec, ranges[2], rangesBounds[2], rangesBounds[2].empty()); + auto oh = ngraph::builder::makeConstant(inPrec, ranges[3], rangesBounds[3], rangesBounds[3].empty()); + auto fq = std::make_shared(params[0], il, ih, ol, oh, levels); layerName = shouldBeDecomposed ? "" : "FakeQuantize"; if (selectedType.empty()) { - selectedType = getPrimitiveType() + "_" + inPrec.name(); + selectedType = getPrimitiveType() + "_" + inPrec.get_type_name(); } - function = makeNgraphFunction(ngInPrec, params, fq, "FakeQuantizeCPU"); + function = makeNgraphFunction(inPrec, params, fq, "FakeQuantizeCPU"); } void generate_inputs(const std::vector& targetInputStaticShapes) override { @@ -155,16 +150,13 @@ TEST_P(FakeQuantizeLayerCPUTest, CompareWithRefs) { CheckPluginRelatedResults(compiledModel, layerName); } - const std::vector levels = {16, 255, 256}; int64_t dataLowBounds{-10}, dataHighBounds{10}; -const std::vector, std::vector>> input_ranges = { - {{0.0f}, {5.f}}, - {{0.0f}, {}}, - {{-10.0f}, {-5.f}} -}; +const std::vector, std::vector>> input_ranges = {{{0.0f}, {5.f}}, + {{0.0f}, {}}, + {{-10.0f}, {-5.f}}}; const std::vector outputLow{5.0f}, outputHigh{25.0f}; @@ -177,121 +169,109 @@ const auto specificParams = ::testing::Combine(::testing::Values(dataLowBounds), namespace fqImpl { std::vector memForm4D_jit = { - CPUSpecificParams({nchw}, {nchw}, {}, {}), - CPUSpecificParams({nhwc}, {nhwc}, {}, {}), -// CPUSpecificParams({nChw16c}, {nChw16c}, {}, {}) comment out due to post ops optimizations in lpt plugin.cpp + CPUSpecificParams({nchw}, {nchw}, {}, {}), + CPUSpecificParams({nhwc}, {nhwc}, {}, {}), + // CPUSpecificParams({nChw16c}, {nChw16c}, {}, {}) comment out due to post ops optimizations in lpt + // plugin.cpp }; std::vector rangesShapes4D_jit = { - inputShapes{ - InputShape{{{4, 5, 6, 7}}, {{4, 5, 6, 7}}}, - {{1, 5, 1, 1}, {1, 5, 1, 1}, {1, 5, 1, 1}, {1, 5, 1, 1}} - }, - inputShapes{ - InputShape{{{4, 5, 6, 7}}, {{4, 5, 6, 7}}}, - {{1, 1, 1, 1}, {1, 1, 1, 1}, {1, 1, 1, 1}, {1, 1, 1, 1}} - }, - inputShapes{ - InputShape{{-1, -1, -1, -1}, {{4, 5, 6, 7}, {1, 12, 1, 1}, {4, 1, 8, 2}, {1, 16, 6, 1}, {4, 5, 6, 7}}}, - {{1, 1, 1, 1}, {1, 1, 1, 1}, {1, 1, 1, 1}, {1, 1, 1, 1}} - }, + inputShapes{InputShape{{{4, 5, 6, 7}}, {{4, 5, 6, 7}}}, {{1, 5, 1, 1}, {1, 5, 1, 1}, {1, 5, 1, 1}, {1, 5, 1, 1}}}, + inputShapes{InputShape{{{4, 5, 6, 7}}, {{4, 5, 6, 7}}}, {{1, 1, 1, 1}, {1, 1, 1, 1}, {1, 1, 1, 1}, {1, 1, 1, 1}}}, + inputShapes{InputShape{{-1, -1, -1, -1}, {{4, 5, 6, 7}, {1, 12, 1, 1}, {4, 1, 8, 2}, {1, 16, 6, 1}, {4, 5, 6, 7}}}, + {{1, 1, 1, 1}, {1, 1, 1, 1}, {1, 1, 1, 1}, {1, 1, 1, 1}}}, inputShapes{ InputShape{{-1, -1, -1, -1}, {{4, 16, 6, 7}, {1, 16, 1, 1}, {7, 16, 1, 2}, {1, 16, 6, 1}, {4, 16, 6, 7}}}, - {{1, 16, 1, 1}, {1, 16, 1, 1}, {1, 16, 1, 1}, {1, 16, 1, 1}} - }, + {{1, 16, 1, 1}, {1, 16, 1, 1}, {1, 16, 1, 1}, {1, 16, 1, 1}}}, }; #if defined(OPENVINO_ARCH_X86_64) const auto testParams4D_jit = ::testing::Combine(specificParams, ::testing::ValuesIn(rangesShapes4D_jit), - ::testing::Values(Precision::FP32), + ::testing::Values(ov::element::f32), ::testing::ValuesIn(input_ranges), ::testing::Values(false), ::testing::ValuesIn(filterCPUSpecificParams(memForm4D_jit))); -INSTANTIATE_TEST_SUITE_P(smoke_FakeQuantizeLayerCPUTest_4D_jit, FakeQuantizeLayerCPUTest, testParams4D_jit, FakeQuantizeLayerCPUTest::getTestCaseName); +INSTANTIATE_TEST_SUITE_P(smoke_FakeQuantizeLayerCPUTest_4D_jit, + FakeQuantizeLayerCPUTest, + testParams4D_jit, + FakeQuantizeLayerCPUTest::getTestCaseName); #endif -std::vector memForm4D_ref = { - CPUSpecificParams({nchw}, {nchw}, {"ref_FP32"}, {"ref_FP32"}) -}; +std::vector memForm4D_ref = {CPUSpecificParams({nchw}, {nchw}, {"ref_FP32"}, {"ref_FP32"})}; std::vector rangesShapes4D_ref = { - inputShapes{ - InputShape{{{4, 5, 6, 7}}, {{4, 5, 6, 7}}}, - {{4, 1, 1, 1}, {4, 1, 1, 1}, {4, 1, 1, 1}, {4, 1, 1, 1}} - }, + inputShapes{InputShape{{{4, 5, 6, 7}}, {{4, 5, 6, 7}}}, {{4, 1, 1, 1}, {4, 1, 1, 1}, {4, 1, 1, 1}, {4, 1, 1, 1}}}, inputShapes{ InputShape{{-1, -1, -1, -1}, {{4, 16, 6, 7}, {4, 1, 1, 1}, {4, 16, 1, 2}, {4, 16, 6, 1}, {4, 16, 6, 7}}}, - {{4, 1, 1, 1}, {4, 1, 1, 1}, {4, 1, 1, 1}, {4, 1, 1, 1}} - }, + {{4, 1, 1, 1}, {4, 1, 1, 1}, {4, 1, 1, 1}, {4, 1, 1, 1}}}, }; const auto testParams4D_ref = ::testing::Combine(specificParams, ::testing::ValuesIn(rangesShapes4D_ref), - ::testing::Values(Precision::FP32), + ::testing::Values(ov::element::f32), ::testing::ValuesIn(input_ranges), ::testing::Values(false), ::testing::ValuesIn(memForm4D_ref)); -INSTANTIATE_TEST_SUITE_P(smoke_FakeQuantizeLayerCPUTest_4D_ref, FakeQuantizeLayerCPUTest, testParams4D_ref, FakeQuantizeLayerCPUTest::getTestCaseName); +INSTANTIATE_TEST_SUITE_P(smoke_FakeQuantizeLayerCPUTest_4D_ref, + FakeQuantizeLayerCPUTest, + testParams4D_ref, + FakeQuantizeLayerCPUTest::getTestCaseName); #if defined(OPENVINO_ARCH_X86_64) std::vector memForm5D_jit = { - CPUSpecificParams({ncdhw}, {ncdhw}, {}, {}), - CPUSpecificParams({ndhwc}, {ndhwc}, {}, {}), -// CPUSpecificParams({nCdhw16c}, {nCdhw16c}, {}, {}) comment out due to post ops optimizations in lpt plugin.cpp + CPUSpecificParams({ncdhw}, {ncdhw}, {}, {}), + CPUSpecificParams({ndhwc}, {ndhwc}, {}, {}), + // CPUSpecificParams({nCdhw16c}, {nCdhw16c}, {}, {}) comment out due to post ops optimizations in lpt + // plugin.cpp }; std::vector rangesShapes5D_jit = { - inputShapes{ - InputShape{{3, 4, 5, 6, 7}, {{3, 4, 5, 6, 7}}}, - {{1, 4, 1, 1, 1}, {1, 4, 1, 1, 1}, {1, 4, 1, 1, 1}, {1, 4, 1, 1, 1}} - }, - inputShapes{ - InputShape{{3, 4, 5, 6, 7}, {{3, 4, 5, 6, 7}}}, - {{1, 1, 1, 1, 1}, {1, 1, 1, 1, 1}, {1, 1, 1, 1, 1}, {1, 1, 1, 1, 1}} - }, - inputShapes{ - InputShape{{-1, -1, -1, -1, -1}, {{3, 4, 5, 6, 7}, {1, 12, 1, 1, 1}, {4, 1, 8, 2, 7}, {3, 4, 5, 6, 7}, {1, 16, 6, 5, 1}}}, - {{1, 1, 1, 1, 1}, {1, 1, 1, 1, 1}, {1, 1, 1, 1, 1}, {1, 1, 1, 1, 1}} - }, - inputShapes{ - InputShape{{-1, -1, -1, -1, -1}, {{4, 16, 6, 7, 8}, {1, 16, 1, 1, 1}, {7, 16, 1, 2, 5}, {4, 16, 6, 7, 8}, {1, 16, 6, 1, 7}}}, - {{1, 16, 1, 1, 1}, {1, 16, 1, 1, 1}, {1, 16, 1, 1, 1}, {1, 16, 1, 1, 1}} - }, + inputShapes{InputShape{{3, 4, 5, 6, 7}, {{3, 4, 5, 6, 7}}}, + {{1, 4, 1, 1, 1}, {1, 4, 1, 1, 1}, {1, 4, 1, 1, 1}, {1, 4, 1, 1, 1}}}, + inputShapes{InputShape{{3, 4, 5, 6, 7}, {{3, 4, 5, 6, 7}}}, + {{1, 1, 1, 1, 1}, {1, 1, 1, 1, 1}, {1, 1, 1, 1, 1}, {1, 1, 1, 1, 1}}}, + inputShapes{InputShape{{-1, -1, -1, -1, -1}, + {{3, 4, 5, 6, 7}, {1, 12, 1, 1, 1}, {4, 1, 8, 2, 7}, {3, 4, 5, 6, 7}, {1, 16, 6, 5, 1}}}, + {{1, 1, 1, 1, 1}, {1, 1, 1, 1, 1}, {1, 1, 1, 1, 1}, {1, 1, 1, 1, 1}}}, + inputShapes{InputShape{{-1, -1, -1, -1, -1}, + {{4, 16, 6, 7, 8}, {1, 16, 1, 1, 1}, {7, 16, 1, 2, 5}, {4, 16, 6, 7, 8}, {1, 16, 6, 1, 7}}}, + {{1, 16, 1, 1, 1}, {1, 16, 1, 1, 1}, {1, 16, 1, 1, 1}, {1, 16, 1, 1, 1}}}, }; const auto testParams5D_jit = ::testing::Combine(specificParams, ::testing::ValuesIn(rangesShapes5D_jit), - ::testing::Values(Precision::FP32), + ::testing::Values(ov::element::f32), ::testing::ValuesIn(input_ranges), ::testing::Values(false), ::testing::ValuesIn(filterCPUSpecificParams(memForm5D_jit))); -INSTANTIATE_TEST_SUITE_P(smoke_FakeQuantizeLayerCPUTest_5D_jit, FakeQuantizeLayerCPUTest, testParams5D_jit, FakeQuantizeLayerCPUTest::getTestCaseName); +INSTANTIATE_TEST_SUITE_P(smoke_FakeQuantizeLayerCPUTest_5D_jit, + FakeQuantizeLayerCPUTest, + testParams5D_jit, + FakeQuantizeLayerCPUTest::getTestCaseName); #endif -std::vector memForm5D_ref = { - CPUSpecificParams({ncdhw}, {ncdhw}, {"ref_FP32"}, {"ref_FP32"}) -}; +std::vector memForm5D_ref = {CPUSpecificParams({ncdhw}, {ncdhw}, {"ref_FP32"}, {"ref_FP32"})}; std::vector rangesShapes5D_ref = { - inputShapes{ - InputShape{{3, 4, 5, 6, 7}, {{3, 4, 5, 6, 7}}}, - {{3, 1, 1, 1, 1}, {3, 1, 1, 1, 1}, {3, 1, 1, 1, 1}, {3, 1, 1, 1, 1}} - }, - inputShapes{ - InputShape{{-1, -1, -1, -1, -1}, {{3, 16, 6, 7, 8}, {3, 16, 1, 1, 1}, {3, 16, 1, 2, 5}, {3, 16, 6, 1, 7}, {3, 16, 6, 7, 8}}}, - {{3, 1, 1, 1, 1}, {3, 1, 1, 1, 1}, {3, 1, 1, 1, 1}, {3, 1, 1, 1, 1}} - }, + inputShapes{InputShape{{3, 4, 5, 6, 7}, {{3, 4, 5, 6, 7}}}, + {{3, 1, 1, 1, 1}, {3, 1, 1, 1, 1}, {3, 1, 1, 1, 1}, {3, 1, 1, 1, 1}}}, + inputShapes{InputShape{{-1, -1, -1, -1, -1}, + {{3, 16, 6, 7, 8}, {3, 16, 1, 1, 1}, {3, 16, 1, 2, 5}, {3, 16, 6, 1, 7}, {3, 16, 6, 7, 8}}}, + {{3, 1, 1, 1, 1}, {3, 1, 1, 1, 1}, {3, 1, 1, 1, 1}, {3, 1, 1, 1, 1}}}, }; const auto testParams5D_ref = ::testing::Combine(specificParams, ::testing::ValuesIn(rangesShapes5D_ref), - ::testing::Values(Precision::FP32), + ::testing::Values(ov::element::f32), ::testing::ValuesIn(input_ranges), ::testing::Values(false), ::testing::ValuesIn(memForm5D_ref)); -INSTANTIATE_TEST_SUITE_P(smoke_FakeQuantizeLayerCPUTest_5D_ref, FakeQuantizeLayerCPUTest, testParams5D_ref, FakeQuantizeLayerCPUTest::getTestCaseName); +INSTANTIATE_TEST_SUITE_P(smoke_FakeQuantizeLayerCPUTest_5D_ref, + FakeQuantizeLayerCPUTest, + testParams5D_ref, + FakeQuantizeLayerCPUTest::getTestCaseName); const auto specificParamsBin = ::testing::Combine(::testing::Values(dataLowBounds), ::testing::Values(dataHighBounds), @@ -299,107 +279,70 @@ const auto specificParamsBin = ::testing::Combine(::testing::Values(dataLowBound ::testing::Values(std::vector{1.0f}), ::testing::Values(2)); -const auto testParamsBin4D = ::testing::Combine(specificParamsBin, - ::testing::ValuesIn(rangesShapes4D_jit), - ::testing::Values(Precision::FP32), - ::testing::Values(std::pair, std::vector>{{3.0f}, {3.f}}), - ::testing::Values(false), - ::testing::Values(CPUSpecificParams())); +const auto testParamsBin4D = + ::testing::Combine(specificParamsBin, + ::testing::ValuesIn(rangesShapes4D_jit), + ::testing::Values(ov::element::f32), + ::testing::Values(std::pair, std::vector>{{3.0f}, {3.f}}), + ::testing::Values(false), + ::testing::Values(CPUSpecificParams())); -INSTANTIATE_TEST_SUITE_P(smoke_FakeQuantizeLayerCPUTest_4D_bin, FakeQuantizeLayerCPUTest, testParamsBin4D, FakeQuantizeLayerCPUTest::getTestCaseName); +INSTANTIATE_TEST_SUITE_P(smoke_FakeQuantizeLayerCPUTest_4D_bin, + FakeQuantizeLayerCPUTest, + testParamsBin4D, + FakeQuantizeLayerCPUTest::getTestCaseName); -} // namespace fqImpl +} // namespace fqImpl namespace fqDecompos { std::vector decomposeShapes = { - inputShapes{ - InputShape{{4, 5, 6, 7}, {{4, 5, 6, 7}}}, - {{4, 5, 6, 7}, {4, 5, 6, 7}, {4, 5, 6, 7}, {4, 5, 6, 7}} - }, - inputShapes{ - InputShape{{4, 5, 6, 7}, {{4, 5, 6, 7}}}, - {{1, 5, 1, 1}, {1, 1, 6, 7}, {1, 1, 6, 7}, {1, 1, 6, 7}} - }, - inputShapes{ - InputShape{{4, 5, 6, 7}, {{4, 5, 6, 7}}}, - {{1, 1, 6, 7}, {1, 1, 6, 7}, {1, 1, 6, 7}, {1, 1, 6, 7}} - }, - inputShapes{ - InputShape{{4, 5, 6, 7}, {{4, 5, 6, 7}}}, - {{1, 1, 6, 7}, {1, 1, 6, 7}, {1, 1, 1, 1}, {1, 1, 1, 1}} - }, - inputShapes{ - InputShape{{4, 5, 6, 7}, {{4, 5, 6, 7}}}, - {{1, 1, 6, 1}, {1, 5, 6, 7}, {1, 1, 6, 1}, {1, 1, 6, 1}} - }, - inputShapes{ - InputShape{{4, 5, 6, 6}, {{4, 5, 6, 6}}}, - {{1, 1, 6, 6}, {1, 1, 6, 6}, {1, 5, 6, 1}, {1, 5, 1, 6}} - }, - inputShapes{ - InputShape{{4, 5, 6, 6}, {{4, 5, 6, 6}}}, - {{1, 5, 6, 1}, {1, 5, 6, 1}, {1, 5, 6, 1}, {1, 5, 1, 6}} - }, - inputShapes{ - InputShape{{3, 4, 5, 6, 7}, {{3, 4, 5, 6, 7}}}, - {{4, 5, 6, 7}, {4, 5, 6, 7}, {4, 5, 6, 7}, {4, 5, 6, 7}} - }, - inputShapes{ - InputShape{{3, 4, 5, 6, 7}, {{3, 4, 5, 6, 7}}}, - {{1, 5, 1, 1}, {1, 1, 6, 7}, {1, 1, 6, 7}, {1, 1, 6, 7}} - }, - inputShapes{ - InputShape{{3, 4, 5, 6, 7}, {{3, 4, 5, 6, 7}}}, - {{1, 1, 6, 7}, {1, 1, 6, 7}, {1, 1, 6, 7}, {1, 1, 6, 7}} - }, - inputShapes{ - InputShape{{3, 4, 5, 6, 7}, {{3, 4, 5, 6, 7}}}, - {{1, 1, 6, 7}, {1, 1, 6, 7}, {1, 1, 1, 1}, {1, 1, 1, 1}} - }, - inputShapes{ - InputShape{{3, 4, 5, 6, 7}, {{3, 4, 5, 6, 7}}}, - {{1, 1, 6, 1}, {1, 5, 6, 7}, {1, 1, 6, 1}, {1, 1, 6, 1}} - }, - inputShapes{ - InputShape{{2, 3, 4, 5, 6, 7}, {{2, 3, 4, 5, 6, 7}}}, - {{4, 5, 6, 7}, {4, 5, 6, 7}, {4, 5, 6, 7}, {4, 5, 6, 7}} - }, - inputShapes{ - InputShape{{2, 3, 4, 5, 6, 7}, {{2, 3, 4, 5, 6, 7}}}, - {{1, 5, 1, 1}, {1, 1, 6, 7}, {1, 1, 6, 7}, {1, 1, 6, 7}} - }, - inputShapes{ - InputShape{{2, 3, 4, 5, 6, 7}, {{2, 3, 4, 5, 6, 7}}}, - {{1, 1, 6, 7}, {1, 1, 6, 7}, {1, 1, 6, 7}, {1, 1, 6, 7}} - }, - inputShapes{ - InputShape{{2, 3, 4, 5, 6, 7}, {{2, 3, 4, 5, 6, 7}}}, - {{1, 1, 6, 7}, {1, 1, 6, 7}, {1, 1, 1, 1}, {1, 1, 1, 1}} - }, - inputShapes{ - InputShape{{2, 3, 4, 5, 6, 7}, {{2, 3, 4, 5, 6, 7}}}, - {{1, 1, 6, 1}, {1, 5, 6, 7}, {1, 1, 6, 1}, {1, 1, 6, 1}} - }, - inputShapes{ - InputShape{{-1, -1, -1, -1}, {{4, 5, 6, 7}, {1, 5, 6, 7}, {7, 5, 6, 7}, {4, 5, 6, 7}}}, - {{1, 1, 6, 1}, {1, 5, 6, 7}, {1, 1, 6, 1}, {1, 1, 6, 1}} - }, - inputShapes{ - InputShape{{-1, -1, -1, -1, -1}, {{8, 4, 5, 6, 7}, {1, 1, 5, 6, 7}, {1, 1, 1, 6, 7}, {8, 4, 5, 6, 7}}}, - {{1, 1, 6, 7}, {1, 1, 6, 7}, {1, 1, 1, 1}, {1, 1, 1, 1}} - }, + inputShapes{InputShape{{4, 5, 6, 7}, {{4, 5, 6, 7}}}, {{4, 5, 6, 7}, {4, 5, 6, 7}, {4, 5, 6, 7}, {4, 5, 6, 7}}}, + inputShapes{InputShape{{4, 5, 6, 7}, {{4, 5, 6, 7}}}, {{1, 5, 1, 1}, {1, 1, 6, 7}, {1, 1, 6, 7}, {1, 1, 6, 7}}}, + inputShapes{InputShape{{4, 5, 6, 7}, {{4, 5, 6, 7}}}, {{1, 1, 6, 7}, {1, 1, 6, 7}, {1, 1, 6, 7}, {1, 1, 6, 7}}}, + inputShapes{InputShape{{4, 5, 6, 7}, {{4, 5, 6, 7}}}, {{1, 1, 6, 7}, {1, 1, 6, 7}, {1, 1, 1, 1}, {1, 1, 1, 1}}}, + inputShapes{InputShape{{4, 5, 6, 7}, {{4, 5, 6, 7}}}, {{1, 1, 6, 1}, {1, 5, 6, 7}, {1, 1, 6, 1}, {1, 1, 6, 1}}}, + inputShapes{InputShape{{4, 5, 6, 6}, {{4, 5, 6, 6}}}, {{1, 1, 6, 6}, {1, 1, 6, 6}, {1, 5, 6, 1}, {1, 5, 1, 6}}}, + inputShapes{InputShape{{4, 5, 6, 6}, {{4, 5, 6, 6}}}, {{1, 5, 6, 1}, {1, 5, 6, 1}, {1, 5, 6, 1}, {1, 5, 1, 6}}}, + inputShapes{InputShape{{3, 4, 5, 6, 7}, {{3, 4, 5, 6, 7}}}, + {{4, 5, 6, 7}, {4, 5, 6, 7}, {4, 5, 6, 7}, {4, 5, 6, 7}}}, + inputShapes{InputShape{{3, 4, 5, 6, 7}, {{3, 4, 5, 6, 7}}}, + {{1, 5, 1, 1}, {1, 1, 6, 7}, {1, 1, 6, 7}, {1, 1, 6, 7}}}, + inputShapes{InputShape{{3, 4, 5, 6, 7}, {{3, 4, 5, 6, 7}}}, + {{1, 1, 6, 7}, {1, 1, 6, 7}, {1, 1, 6, 7}, {1, 1, 6, 7}}}, + inputShapes{InputShape{{3, 4, 5, 6, 7}, {{3, 4, 5, 6, 7}}}, + {{1, 1, 6, 7}, {1, 1, 6, 7}, {1, 1, 1, 1}, {1, 1, 1, 1}}}, + inputShapes{InputShape{{3, 4, 5, 6, 7}, {{3, 4, 5, 6, 7}}}, + {{1, 1, 6, 1}, {1, 5, 6, 7}, {1, 1, 6, 1}, {1, 1, 6, 1}}}, + inputShapes{InputShape{{2, 3, 4, 5, 6, 7}, {{2, 3, 4, 5, 6, 7}}}, + {{4, 5, 6, 7}, {4, 5, 6, 7}, {4, 5, 6, 7}, {4, 5, 6, 7}}}, + inputShapes{InputShape{{2, 3, 4, 5, 6, 7}, {{2, 3, 4, 5, 6, 7}}}, + {{1, 5, 1, 1}, {1, 1, 6, 7}, {1, 1, 6, 7}, {1, 1, 6, 7}}}, + inputShapes{InputShape{{2, 3, 4, 5, 6, 7}, {{2, 3, 4, 5, 6, 7}}}, + {{1, 1, 6, 7}, {1, 1, 6, 7}, {1, 1, 6, 7}, {1, 1, 6, 7}}}, + inputShapes{InputShape{{2, 3, 4, 5, 6, 7}, {{2, 3, 4, 5, 6, 7}}}, + {{1, 1, 6, 7}, {1, 1, 6, 7}, {1, 1, 1, 1}, {1, 1, 1, 1}}}, + inputShapes{InputShape{{2, 3, 4, 5, 6, 7}, {{2, 3, 4, 5, 6, 7}}}, + {{1, 1, 6, 1}, {1, 5, 6, 7}, {1, 1, 6, 1}, {1, 1, 6, 1}}}, + inputShapes{InputShape{{-1, -1, -1, -1}, {{4, 5, 6, 7}, {1, 5, 6, 7}, {7, 5, 6, 7}, {4, 5, 6, 7}}}, + {{1, 1, 6, 1}, {1, 5, 6, 7}, {1, 1, 6, 1}, {1, 1, 6, 1}}}, + inputShapes{InputShape{{-1, -1, -1, -1, -1}, {{8, 4, 5, 6, 7}, {1, 1, 5, 6, 7}, {1, 1, 1, 6, 7}, {8, 4, 5, 6, 7}}}, + {{1, 1, 6, 7}, {1, 1, 6, 7}, {1, 1, 1, 1}, {1, 1, 1, 1}}}, }; const auto testParams = ::testing::Combine(specificParams, ::testing::ValuesIn(decomposeShapes), - ::testing::Values(Precision::FP32), + ::testing::Values(ov::element::f32), ::testing::ValuesIn(input_ranges), ::testing::Values(true), ::testing::Values(CPUSpecificParams{})); -INSTANTIATE_TEST_SUITE_P(smoke_FakeQuantizeLayerCPUTest_Decompos, FakeQuantizeLayerCPUTest, testParams, FakeQuantizeLayerCPUTest::getTestCaseName); +INSTANTIATE_TEST_SUITE_P(smoke_FakeQuantizeLayerCPUTest_Decompos, + FakeQuantizeLayerCPUTest, + testParams, + FakeQuantizeLayerCPUTest::getTestCaseName); -} // namespace fqDecompos +} // namespace fqDecompos -} // namespace CPULayerTestsDefinitions +} // namespace test +} // namespace ov From 275e14d94ab58e78f98e3da12491b40ae3fdf73f Mon Sep 17 00:00:00 2001 From: Xuejun Zhai Date: Wed, 6 Dec 2023 21:05:44 +0800 Subject: [PATCH 13/13] [CPU tests] migrate single layer test cases to be API 2.0 - part 5 (#21489) Signed-off-by: Zhai, Xuejun --- .../functional/single_layer_tests/select.cpp | 188 ++-- .../single_layer_tests/shape_ops.cpp | 335 ++++--- .../functional/single_layer_tests/shapeof.cpp | 118 +-- .../single_layer_tests/shuffle_channels.cpp | 193 ++-- .../functional/single_layer_tests/slice.cpp | 880 ++++++++--------- .../single_layer_tests/space_to_batch.cpp | 471 +++++---- .../single_layer_tests/space_to_depth.cpp | 262 +++-- .../functional/single_layer_tests/split.cpp | 775 +++++++-------- .../single_layer_tests/strided_slice.cpp | 757 +++++++++------ .../single_layer_tests/tensor_iterator.cpp | 147 ++- .../functional/single_layer_tests/tile.cpp | 314 +++--- .../functional/single_layer_tests/topk.cpp | 334 +++---- .../functional/single_layer_tests/unique.cpp | 289 +++--- .../single_layer_tests/variadic_split.cpp | 911 ++++++++---------- 14 files changed, 2807 insertions(+), 3167 deletions(-) diff --git a/src/plugins/intel_cpu/tests/functional/single_layer_tests/select.cpp b/src/plugins/intel_cpu/tests/functional/single_layer_tests/select.cpp index 84b6a9d19adbdb..f66768c314c36e 100644 --- a/src/plugins/intel_cpu/tests/functional/single_layer_tests/select.cpp +++ b/src/plugins/intel_cpu/tests/functional/single_layer_tests/select.cpp @@ -2,58 +2,55 @@ // SPDX-License-Identifier: Apache-2.0 // -#include +#include "common_test_utils/ov_tensor_utils.hpp" #include "shared_test_classes/base/ov_subgraph.hpp" #include "test_utils/fusing_test_utils.hpp" -#include "ov_models/builders.hpp" -using namespace InferenceEngine; using namespace CPUTestUtils; -using namespace ov::test; +namespace ov { +namespace test { -namespace CPULayerTestsDefinitions { - -using selectParams = std::tuple, // input shapes - ElementType, // Then/Else precision - ngraph::op::AutoBroadcastSpec, // broadcast +using selectParams = std::tuple, // input shapes + ElementType, // Then/Else precision + ov::op::AutoBroadcastSpec, // broadcast fusingSpecificParams>; class SelectLayerCPUTest : public testing::WithParamInterface, virtual public SubgraphBaseTest, public CpuTestWithFusing { public: - static std::string getTestCaseName(testing::TestParamInfo obj) { - std::vector shapes; - ElementType precision; - ngraph::op::AutoBroadcastSpec broadcast; - fusingSpecificParams fusingParams; - std::tie(shapes, precision, broadcast, fusingParams) = obj.param; - - std::ostringstream result; - result << "Condition_prc_" << ElementType::boolean << "_Then_Else_prc_" << precision << "_"; - result << "IS=("; - for (const auto& shape : shapes) { - result << shape.first << "_"; - } - result << ")_TS=("; - for (const auto& shape : shapes) { - for (const auto& item : shape.second) { - result << ov::test::utils::vec2str(item) << "_"; - } - } - result << "Broadcast=" << broadcast.m_type; - result << CpuTestWithFusing::getTestCaseName(fusingParams); - - return result.str(); - } + static std::string getTestCaseName(testing::TestParamInfo obj) { + std::vector shapes; + ElementType precision; + ov::op::AutoBroadcastSpec broadcast; + fusingSpecificParams fusingParams; + std::tie(shapes, precision, broadcast, fusingParams) = obj.param; + + std::ostringstream result; + result << "Condition_prc_" << ElementType::boolean << "_Then_Else_prc_" << precision << "_"; + result << "IS=("; + for (const auto& shape : shapes) { + result << shape.first << "_"; + } + result << ")_TS=("; + for (const auto& shape : shapes) { + for (const auto& item : shape.second) { + result << ov::test::utils::vec2str(item) << "_"; + } + } + result << "Broadcast=" << broadcast.m_type; + result << CpuTestWithFusing::getTestCaseName(fusingParams); + + return result.str(); + } protected: - void SetUp() override { + void SetUp() override { abs_threshold = 0; targetDevice = ov::test::utils::DEVICE_CPU; std::vector shapes; ElementType precision; - ngraph::op::AutoBroadcastSpec broadcast; + ov::op::AutoBroadcastSpec broadcast; fusingSpecificParams fusingParams; std::tie(shapes, precision, broadcast, fusingParams) = this->GetParam(); init_input_shapes(shapes); @@ -69,14 +66,26 @@ class SelectLayerCPUTest : public testing::WithParamInterface, auto select = std::make_shared(parameters[0], parameters[1], parameters[2], broadcast); function = makeNgraphFunction(precision, parameters, select, "Eltwise"); - } + } - void generate_inputs(const std::vector& targetInputStaticShapes) override { + void generate_inputs(const std::vector& targetInputStaticShapes) override { inputs.clear(); const auto& modelInputs = function->inputs(); - auto condTensor = ov::test::utils::create_and_fill_tensor(modelInputs[0].get_element_type(), targetInputStaticShapes[0], 3, -1, 2); - auto thenTensor = ov::test::utils::create_and_fill_tensor(modelInputs[1].get_element_type(), targetInputStaticShapes[1], 10, -10, 2); - auto elseTensor = ov::test::utils::create_and_fill_tensor(modelInputs[2].get_element_type(), targetInputStaticShapes[2], 10, 0, 2); + auto condTensor = ov::test::utils::create_and_fill_tensor(modelInputs[0].get_element_type(), + targetInputStaticShapes[0], + 3, + -1, + 2); + auto thenTensor = ov::test::utils::create_and_fill_tensor(modelInputs[1].get_element_type(), + targetInputStaticShapes[1], + 10, + -10, + 2); + auto elseTensor = ov::test::utils::create_and_fill_tensor(modelInputs[2].get_element_type(), + targetInputStaticShapes[2], + 10, + 0, + 2); inputs.insert({modelInputs[0].get_node_shared_ptr(), condTensor}); inputs.insert({modelInputs[1].get_node_shared_ptr(), thenTensor}); inputs.insert({modelInputs[2].get_node_shared_ptr(), elseTensor}); @@ -88,12 +97,7 @@ TEST_P(SelectLayerCPUTest, CompareWithRefs) { CheckPluginRelatedResults(compiledModel, std::set{"Eltwise", "Subgraph"}); } -const std::vector precisions = { - ElementType::f32, - ElementType::i32, - ElementType::bf16, - ElementType::i8 -}; +const std::vector precisions = {ElementType::f32, ElementType::i32, ElementType::bf16, ElementType::i8}; const std::vector fusingParamsSet{ emptyFusingSpec, @@ -104,106 +108,68 @@ const std::vector fusingParamsSet{ const std::vector> inShapesDynamicNumpy = { { // Condition - { - {-1, -1, -1, -1}, - {{5, 1, 2, 1}, {1, 1, 1, 1}, {5, 9, 8, 7}} - }, + {{-1, -1, -1, -1}, {{5, 1, 2, 1}, {1, 1, 1, 1}, {5, 9, 8, 7}}}, // Then - { - {-1, -1, -1, -1, -1}, - {{8, 1, 9, 1, 1}, {1, 1, 1, 1, 1}, {21, 5, 9, 8, 7}} - }, + {{-1, -1, -1, -1, -1}, {{8, 1, 9, 1, 1}, {1, 1, 1, 1, 1}, {21, 5, 9, 8, 7}}}, // Else - { - {-1, -1, -1, -1}, - {{5, 1, 2, 1}, {1, 1, 1, 1}, {1, 1, 1, 1}} - }, + {{-1, -1, -1, -1}, {{5, 1, 2, 1}, {1, 1, 1, 1}, {1, 1, 1, 1}}}, }, { // Condition - { - {-1, -1}, - {{8, 1}, {10, 5}, {8, 7}} - }, + {{-1, -1}, {{8, 1}, {10, 5}, {8, 7}}}, // Then - { - {-1, -1, -1, -1, -1}, - {{2, 1, 1, 8, 1}, {7, 8, 3, 10, 5}, {1, 1, 1, 8, 1}} - }, + {{-1, -1, -1, -1, -1}, {{2, 1, 1, 8, 1}, {7, 8, 3, 10, 5}, {1, 1, 1, 8, 1}}}, // Else - { - {-1, -1, -1}, - {{9, 1, 1}, {3, 10, 5}, {1, 1, 7}} - }, + {{-1, -1, -1}, {{9, 1, 1}, {3, 10, 5}, {1, 1, 7}}}, }, { // Condition - { - {{2, 8}, {3, 7}, {1, 10}, {1, 6}, {1, 10}}, - {{5, 4, 1, 1, 1}, {8, 5, 5, 5, 1}, {2, 3, 4, 5, 6}} - }, + {{{2, 8}, {3, 7}, {1, 10}, {1, 6}, {1, 10}}, {{5, 4, 1, 1, 1}, {8, 5, 5, 5, 1}, {2, 3, 4, 5, 6}}}, // Then - { - {-1, -1, -1, -1, -1}, - {{5, 1, 8, 1, 1}, {8, 1, 1, 1, 8}, {2, 3, 4, 5, 6}} - }, + {{-1, -1, -1, -1, -1}, {{5, 1, 8, 1, 1}, {8, 1, 1, 1, 8}, {2, 3, 4, 5, 6}}}, // Else - { - {{1, 5}, {1, 11}, {5, 5}, {1, 8}}, - {{1, 1, 5, 1}, {5, 5, 5, 8}, {3, 4, 5, 6}} - }, + {{{1, 5}, {1, 11}, {5, 5}, {1, 8}}, {{1, 1, 5, 1}, {5, 5, 5, 8}, {3, 4, 5, 6}}}, }, { // Condition - { - {{1, 10}}, - {{4}, {10}, {1}} - }, + {{{1, 10}}, {{4}, {10}, {1}}}, // Then - { - {{1, 15}, {2, 7}, {1, 6}, {5, 12}, {1, 20}}, - {{8, 5, 6, 6, 1}, {15, 7, 6, 10, 10}, {2, 5, 4, 5, 3}} - }, + {{{1, 15}, {2, 7}, {1, 6}, {5, 12}, {1, 20}}, {{8, 5, 6, 6, 1}, {15, 7, 6, 10, 10}, {2, 5, 4, 5, 3}}}, // Else - { - {{2, 10}, {1, 16}}, - {{6, 4}, {10, 10}, {5, 1}} - }, + {{{2, 10}, {1, 16}}, {{6, 4}, {10, 10}, {5, 1}}}, }, }; const auto numpyCases = ::testing::Combine(::testing::ValuesIn(inShapesDynamicNumpy), ::testing::ValuesIn(precisions), - ::testing::Values(ngraph::op::AutoBroadcastType::NUMPY), + ::testing::Values(ov::op::AutoBroadcastType::NUMPY), ::testing::ValuesIn(fusingParamsSet)); -INSTANTIATE_TEST_SUITE_P(smoke_CompareWithRefsNumpy_dynamic, SelectLayerCPUTest, numpyCases, SelectLayerCPUTest::getTestCaseName); +INSTANTIATE_TEST_SUITE_P(smoke_CompareWithRefsNumpy_dynamic, + SelectLayerCPUTest, + numpyCases, + SelectLayerCPUTest::getTestCaseName); const std::vector> inShapesDynamicNone = { { // Condition - { - {{1, 10}, -1, {10, 20}, {1, 5}}, - {{3, 16, 15, 5}, {1, 16, 10, 1}, {10, 16, 20, 5}} - }, + {{{1, 10}, -1, {10, 20}, {1, 5}}, {{3, 16, 15, 5}, {1, 16, 10, 1}, {10, 16, 20, 5}}}, // Then - { - {-1, {16, 16}, -1, -1}, - {{3, 16, 15, 5}, {1, 16, 10, 1}, {10, 16, 20, 5}} - }, + {{-1, {16, 16}, -1, -1}, {{3, 16, 15, 5}, {1, 16, 10, 1}, {10, 16, 20, 5}}}, // Else - { - {-1, -1, -1, -1}, - {{3, 16, 15, 5}, {1, 16, 10, 1}, {10, 16, 20, 5}} - }, + {{-1, -1, -1, -1}, {{3, 16, 15, 5}, {1, 16, 10, 1}, {10, 16, 20, 5}}}, }, }; const auto noneCases = ::testing::Combine(::testing::ValuesIn(inShapesDynamicNone), ::testing::ValuesIn(precisions), - ::testing::Values(ngraph::op::AutoBroadcastType::NONE), + ::testing::Values(ov::op::AutoBroadcastType::NONE), ::testing::ValuesIn(fusingParamsSet)); -INSTANTIATE_TEST_SUITE_P(smoke_CompareWithRefsNone_dynamic, SelectLayerCPUTest, noneCases, SelectLayerCPUTest::getTestCaseName); +INSTANTIATE_TEST_SUITE_P(smoke_CompareWithRefsNone_dynamic, + SelectLayerCPUTest, + noneCases, + SelectLayerCPUTest::getTestCaseName); -} // namespace CPULayerTestsDefinitions +} // namespace test +} // namespace ov diff --git a/src/plugins/intel_cpu/tests/functional/single_layer_tests/shape_ops.cpp b/src/plugins/intel_cpu/tests/functional/single_layer_tests/shape_ops.cpp index c7ddbab6e5c27d..196281d1572380 100644 --- a/src/plugins/intel_cpu/tests/functional/single_layer_tests/shape_ops.cpp +++ b/src/plugins/intel_cpu/tests/functional/single_layer_tests/shape_ops.cpp @@ -2,39 +2,32 @@ // SPDX-License-Identifier: Apache-2.0 // +#include "common_test_utils/ov_tensor_utils.hpp" +#include "ov_models/builders.hpp" #include "shared_test_classes/base/ov_subgraph.hpp" #include "test_utils/cpu_test_utils.hpp" -#include "ov_models/builders.hpp" -#include -using namespace ngraph; -using namespace InferenceEngine; using namespace CPUTestUtils; -using namespace ov::test; -namespace CPULayerTestsDefinitions { +namespace ov { +namespace test { -enum class shapeNodeType { - Reshape, - Squeeze, - Unsqueeze, - ReshapeWithNonZero -}; +enum class shapeNodeType { Reshape, Squeeze, Unsqueeze, ReshapeWithNonZero }; inline std::ostream& operator<<(std::ostream& os, shapeNodeType type) { switch (type) { - case shapeNodeType::Reshape: - os << "Reshape"; - break; - case shapeNodeType::Squeeze: - os << "Squeeze"; - break; - case shapeNodeType::Unsqueeze: - os << "Unsqueeze"; - break; - case shapeNodeType::ReshapeWithNonZero: - os << "ReshapeWithNonZero"; - break; + case shapeNodeType::Reshape: + os << "Reshape"; + break; + case shapeNodeType::Squeeze: + os << "Squeeze"; + break; + case shapeNodeType::Unsqueeze: + os << "Unsqueeze"; + break; + case shapeNodeType::ReshapeWithNonZero: + os << "ReshapeWithNonZero"; + break; } return os; } @@ -44,21 +37,22 @@ struct inputDescription { std::vector> data; }; -using shapeOpsParams = std::tuple< - inputDescription, // input shapes - ngraph::helpers::InputLayerType, // second input type - shapeNodeType, // node type - Precision, // precision - ngraph::element::Type_t, // second input precision - bool>; // special zero +using shapeOpsParams = std::tuple; // special zero -class ShapeOpsCPUTest : public testing::WithParamInterface, virtual public SubgraphBaseTest, public CPUTestsBase { +class ShapeOpsCPUTest : public testing::WithParamInterface, + virtual public SubgraphBaseTest, + public CPUTestsBase { public: static std::string getTestCaseName(testing::TestParamInfo obj) { inputDescription inpDesc; - ngraph::helpers::InputLayerType secondType; + ov::test::utils::InputLayerType secondType; shapeNodeType nodeType; - Precision prc; + ov::element::Type prc; bool specialZero; element::Type_t tmpSecondInPrc; std::tie(inpDesc, secondType, nodeType, prc, tmpSecondInPrc, specialZero) = obj.param; @@ -66,7 +60,7 @@ class ShapeOpsCPUTest : public testing::WithParamInterface, virt std::ostringstream result; result << nodeType << "_"; result << "IS="; - result << ov::test::utils::partialShape2str({inpDesc.inputShape.first}) << "_"; + result << ov::test::utils::partialShape2str({inpDesc.inputShape.first}) << "_"; result << "TS=("; for (const auto& shape : inpDesc.inputShape.second) { result << ov::test::utils::vec2str(shape) << "_"; @@ -91,33 +85,38 @@ class ShapeOpsCPUTest : public testing::WithParamInterface, virt const auto& funcInput = funcInputs[i]; ov::runtime::Tensor tensor; if (i == 1) { -#define RESHAPE_TEST_CASE(INT_TYPE) \ - case ov::element::Type_t::INT_TYPE: { \ - tensor = ov::runtime::Tensor{ov::element::INT_TYPE, targetInputStaticShapes[i]}; \ - auto inputData = tensor.data::value_type>(); \ - ASSERT_TRUE(idx < data.size()); \ - for (size_t j = 0lu; j < data[idx].size(); ++j) { \ - inputData[j] = data[idx][j]; \ - } \ - break; \ - } +#define RESHAPE_TEST_CASE(INT_TYPE) \ + case ov::element::Type_t::INT_TYPE: { \ + tensor = ov::runtime::Tensor{ov::element::INT_TYPE, targetInputStaticShapes[i]}; \ + auto inputData = tensor.data::value_type>(); \ + ASSERT_TRUE(idx < data.size()); \ + for (size_t j = 0lu; j < data[idx].size(); ++j) { \ + inputData[j] = data[idx][j]; \ + } \ + break; \ + } switch (secondInPrc) { RESHAPE_TEST_CASE(i64) RESHAPE_TEST_CASE(i32) - default: - FAIL() << "We shouldn't get here."; + default: + FAIL() << "We shouldn't get here."; #undef RESHAPE_TEST_CASE } } else { if (isWithNonZero) { // fill tensor with all zero, so the NonZero op will create 0 shape as the input of reshape op - tensor = utils::create_and_fill_tensor(funcInput.get_element_type(), targetInputStaticShapes[i], 1, 0); + tensor = + utils::create_and_fill_tensor(funcInput.get_element_type(), targetInputStaticShapes[i], 1, 0); } else { if (funcInput.get_element_type().is_real()) { - tensor = utils::create_and_fill_tensor( - funcInput.get_element_type(), targetInputStaticShapes[i], 10, 0, 1000); + tensor = utils::create_and_fill_tensor(funcInput.get_element_type(), + targetInputStaticShapes[i], + 10, + 0, + 1000); } else { - tensor = utils::create_and_fill_tensor(funcInput.get_element_type(), targetInputStaticShapes[i]); + tensor = + utils::create_and_fill_tensor(funcInput.get_element_type(), targetInputStaticShapes[i]); } } } @@ -135,9 +134,9 @@ class ShapeOpsCPUTest : public testing::WithParamInterface, virt targetDevice = ov::test::utils::DEVICE_CPU; inputDescription inpDesc; - ngraph::helpers::InputLayerType secondType; + ov::test::utils::InputLayerType secondType; shapeNodeType nodeType; - Precision prc; + ov::element::Type prc; bool specialZero; std::tie(inpDesc, secondType, nodeType, prc, secondInPrc, specialZero) = this->GetParam(); @@ -147,23 +146,22 @@ class ShapeOpsCPUTest : public testing::WithParamInterface, virt // so the input of reshape is i32. selectedType = std::string("unknown_I32"); } else { - selectedType = std::string("unknown_") + prc.name(); + selectedType = std::string("unknown_") + prc.get_type_name(); } data = inpDesc.data; - - std::vector inputShapes = - {inpDesc.inputShape, InputShape{{static_cast(inpDesc.data[0].size())}, - std::vector(inpDesc.inputShape.second.size(), {inpDesc.data[0].size()})}}; + std::vector inputShapes = { + inpDesc.inputShape, + InputShape{{static_cast(inpDesc.data[0].size())}, + std::vector(inpDesc.inputShape.second.size(), {inpDesc.data[0].size()})}}; init_input_shapes(inputShapes); - auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(prc); - ov::ParameterVector inputs{std::make_shared(ngPrc, inputDynamicShapes.front())}; + ov::ParameterVector inputs{std::make_shared(prc, inputDynamicShapes.front())}; auto dataInput = inputs.front(); dataInput->set_friendly_name("param_1"); - std::shared_ptr secondaryInput; - if (secondType == ngraph::helpers::InputLayerType::PARAMETER) { + std::shared_ptr secondaryInput; + if (secondType == ov::test::utils::InputLayerType::PARAMETER) { auto param = std::make_shared(secondInPrc, inputDynamicShapes.back()); param->set_friendly_name("param_2"); secondaryInput = param; @@ -172,28 +170,28 @@ class ShapeOpsCPUTest : public testing::WithParamInterface, virt secondaryInput = ngraph::builder::makeConstant(secondInPrc, {inpDesc.data[0].size()}, inpDesc.data[0]); } - std::shared_ptr shapeOps; + std::shared_ptr shapeOps; switch (nodeType) { - case shapeNodeType::Reshape: { - shapeOps = std::make_shared(dataInput, secondaryInput, specialZero); - break; - } - case shapeNodeType::Squeeze: { - shapeOps = std::make_shared(dataInput, secondaryInput); - break; - } - case shapeNodeType::Unsqueeze: { - shapeOps = std::make_shared(dataInput, secondaryInput); - break; - } - case shapeNodeType::ReshapeWithNonZero: { - auto nonZero = std::make_shared(dataInput); - shapeOps = std::make_shared(nonZero, secondaryInput, specialZero); - break; - } + case shapeNodeType::Reshape: { + shapeOps = std::make_shared(dataInput, secondaryInput, specialZero); + break; + } + case shapeNodeType::Squeeze: { + shapeOps = std::make_shared(dataInput, secondaryInput); + break; + } + case shapeNodeType::Unsqueeze: { + shapeOps = std::make_shared(dataInput, secondaryInput); + break; + } + case shapeNodeType::ReshapeWithNonZero: { + auto nonZero = std::make_shared(dataInput); + shapeOps = std::make_shared(nonZero, secondaryInput, specialZero); + break; + } } - function = makeNgraphFunction(ngPrc, inputs, shapeOps, "ShapeOpsCPUTest"); + function = makeNgraphFunction(prc, inputs, shapeOps, "ShapeOpsCPUTest"); } private: @@ -211,106 +209,123 @@ TEST_P(ShapeOpsCPUTest, CompareWithRefs) { namespace reshapeTest { const std::vector secondInPrcs{ov::element::Type_t::i64, ov::element::Type_t::i32}; -inputDescription noBounds{{{-1, -1, -1, -1}, - {ngraph::Shape{2, 5, 7, 3}, ngraph::Shape{10, 6, 10, 5}, ngraph::Shape{10, 6, 10, 5}, ngraph::Shape{1, 2, 5, 5}}}, - {std::vector{1, -1, 0}, std::vector{-1, 60, 2}, std::vector{10, 30, 10}, std::vector{5, 10, -1}}}; +inputDescription noBounds{ + {{-1, -1, -1, -1}, + {ov::Shape{2, 5, 7, 3}, ov::Shape{10, 6, 10, 5}, ov::Shape{10, 6, 10, 5}, ov::Shape{1, 2, 5, 5}}}, + {std::vector{1, -1, 0}, + std::vector{-1, 60, 2}, + std::vector{10, 30, 10}, + std::vector{5, 10, -1}}}; const auto params = ::testing::Combine(::testing::Values(noBounds), - ::testing::Values(ngraph::helpers::InputLayerType::PARAMETER), + ::testing::Values(ov::test::utils::InputLayerType::PARAMETER), ::testing::Values(shapeNodeType::Reshape), - ::testing::Values(Precision::FP32), + ::testing::Values(ov::element::f32), ::testing::ValuesIn(secondInPrcs), ::testing::Values(true)); INSTANTIATE_TEST_SUITE_P(smoke_CompareWithRefs_dynamic, ShapeOpsCPUTest, params, ShapeOpsCPUTest::getTestCaseName); -inputDescription noBounds_const{{{{1, 10}, {2, 6}, {1, 15}, {3, 11}}, - {ngraph::Shape{2, 5, 7, 3}, ngraph::Shape{10, 6, 10, 5}, ngraph::Shape{1, 2, 5, 5}}}, - {std::vector{2, -1, 0}}}; +inputDescription noBounds_const{ + {{{1, 10}, {2, 6}, {1, 15}, {3, 11}}, {ov::Shape{2, 5, 7, 3}, ov::Shape{10, 6, 10, 5}, ov::Shape{1, 2, 5, 5}}}, + {std::vector{2, -1, 0}}}; const auto params_const = ::testing::Combine(::testing::Values(noBounds_const), - ::testing::Values(ngraph::helpers::InputLayerType::CONSTANT), + ::testing::Values(ov::test::utils::InputLayerType::CONSTANT), ::testing::Values(shapeNodeType::Reshape), - ::testing::Values(Precision::FP32), + ::testing::Values(ov::element::f32), ::testing::ValuesIn(secondInPrcs), ::testing::Values(true)); -INSTANTIATE_TEST_SUITE_P(smoke_CompareWithRefs_dynamic_const, ShapeOpsCPUTest, params_const, ShapeOpsCPUTest::getTestCaseName); +INSTANTIATE_TEST_SUITE_P(smoke_CompareWithRefs_dynamic_const, + ShapeOpsCPUTest, + params_const, + ShapeOpsCPUTest::getTestCaseName); -inputDescription shape_dynBatch{{{{1, 10}, 5, 7, 3}, - {ngraph::Shape{2, 5, 7, 3}, ngraph::Shape{10, 5, 7, 3}, ngraph::Shape{1, 5, 7, 3}}}, - {std::vector{-1, 15, 7}}}; +inputDescription shape_dynBatch{ + {{{1, 10}, 5, 7, 3}, {ov::Shape{2, 5, 7, 3}, ov::Shape{10, 5, 7, 3}, ov::Shape{1, 5, 7, 3}}}, + {std::vector{-1, 15, 7}}}; const auto params_dynBatch = ::testing::Combine(::testing::Values(shape_dynBatch), - ::testing::Values(ngraph::helpers::InputLayerType::CONSTANT), + ::testing::Values(ov::test::utils::InputLayerType::CONSTANT), ::testing::Values(shapeNodeType::Reshape), - ::testing::Values(Precision::FP32), + ::testing::Values(ov::element::f32), ::testing::ValuesIn(secondInPrcs), ::testing::Values(true)); -INSTANTIATE_TEST_SUITE_P(smoke_CompareWithRefs_dynBatch, ShapeOpsCPUTest, params_dynBatch, ShapeOpsCPUTest::getTestCaseName); +INSTANTIATE_TEST_SUITE_P(smoke_CompareWithRefs_dynBatch, + ShapeOpsCPUTest, + params_dynBatch, + ShapeOpsCPUTest::getTestCaseName); // test cases about NonZero connect with reshape // the output shape of NonZero is {4. 0} // the output shapes of reshapes are {1, 0 ,4} {4, 0, 1} {2, 0, 2} -inputDescription shape_NonZero{{{-1, -1, -1, -1}, - {ngraph::Shape{4, 5, 7, 3}, ngraph::Shape{6, 3, 4, 8}, ngraph::Shape{2, 2, 3, 9}}}, - {std::vector{-1, 0, 4}, std::vector{0, 0, -1}, std::vector{2, 0, 2}}}; +inputDescription shape_NonZero{ + {{-1, -1, -1, -1}, {ov::Shape{4, 5, 7, 3}, ov::Shape{6, 3, 4, 8}, ov::Shape{2, 2, 3, 9}}}, + {std::vector{-1, 0, 4}, std::vector{0, 0, -1}, std::vector{2, 0, 2}}}; const auto params_NonZero = ::testing::Combine(::testing::Values(shape_NonZero), - ::testing::Values(ngraph::helpers::InputLayerType::PARAMETER), - ::testing::Values(shapeNodeType::ReshapeWithNonZero), - ::testing::Values(Precision::FP32), - ::testing::ValuesIn(secondInPrcs), - ::testing::Values(true)); + ::testing::Values(ov::test::utils::InputLayerType::PARAMETER), + ::testing::Values(shapeNodeType::ReshapeWithNonZero), + ::testing::Values(ov::element::f32), + ::testing::ValuesIn(secondInPrcs), + ::testing::Values(true)); -INSTANTIATE_TEST_SUITE_P(smoke_CompareWithRefs_NonZero, ShapeOpsCPUTest, params_NonZero, ShapeOpsCPUTest::getTestCaseName); +INSTANTIATE_TEST_SUITE_P(smoke_CompareWithRefs_NonZero, + ShapeOpsCPUTest, + params_NonZero, + ShapeOpsCPUTest::getTestCaseName); // test cases about reshape with empty tensor -inputDescription shape_EmptyTensor{{{-1, 2, 2}, - {ngraph::Shape{0, 2, 2}, ngraph::Shape{2, 2, 2}}}, - {std::vector{0, 4}, std::vector{2, 4}}}; +inputDescription shape_EmptyTensor{{{-1, 2, 2}, {ov::Shape{0, 2, 2}, ov::Shape{2, 2, 2}}}, + {std::vector{0, 4}, std::vector{2, 4}}}; const auto params_EmptyTensor = ::testing::Combine(::testing::Values(shape_EmptyTensor), - ::testing::Values(ngraph::helpers::InputLayerType::PARAMETER), - ::testing::Values(shapeNodeType::Reshape), - ::testing::Values(Precision::FP32), - ::testing::ValuesIn(secondInPrcs), - ::testing::Values(false)); + ::testing::Values(ov::test::utils::InputLayerType::PARAMETER), + ::testing::Values(shapeNodeType::Reshape), + ::testing::Values(ov::element::f32), + ::testing::ValuesIn(secondInPrcs), + ::testing::Values(false)); -INSTANTIATE_TEST_SUITE_P(smoke_CompareWithRefs_EmptyTensor, ShapeOpsCPUTest, params_EmptyTensor, ShapeOpsCPUTest::getTestCaseName); +INSTANTIATE_TEST_SUITE_P(smoke_CompareWithRefs_EmptyTensor, + ShapeOpsCPUTest, + params_EmptyTensor, + ShapeOpsCPUTest::getTestCaseName); // test cases about NeedShapeInfer return right result -inputDescription shape_NeedShapeInfer{{{-1, -1}, - {ngraph::Shape{640, 80}, ngraph::Shape{640, 80}, ngraph::Shape{1280, 40}, ngraph::Shape{1280, 40}}}, - {std::vector{320, 160}, std::vector{640, 80}, std::vector{320, 160}, std::vector{640, 80}}}; +inputDescription shape_NeedShapeInfer{ + {{-1, -1}, {ov::Shape{640, 80}, ov::Shape{640, 80}, ov::Shape{1280, 40}, ov::Shape{1280, 40}}}, + {std::vector{320, 160}, std::vector{640, 80}, std::vector{320, 160}, std::vector{640, 80}}}; const auto params_NeedShapeInfer = ::testing::Combine(::testing::Values(shape_NeedShapeInfer), - ::testing::Values(ngraph::helpers::InputLayerType::PARAMETER), - ::testing::Values(shapeNodeType::Reshape), - ::testing::Values(Precision::FP32), - ::testing::ValuesIn(secondInPrcs), - ::testing::Values(false)); + ::testing::Values(ov::test::utils::InputLayerType::PARAMETER), + ::testing::Values(shapeNodeType::Reshape), + ::testing::Values(ov::element::f32), + ::testing::ValuesIn(secondInPrcs), + ::testing::Values(false)); -INSTANTIATE_TEST_SUITE_P(smoke_CompareWithRefs_NeedShapeInfer, ShapeOpsCPUTest, params_NeedShapeInfer, ShapeOpsCPUTest::getTestCaseName); +INSTANTIATE_TEST_SUITE_P(smoke_CompareWithRefs_NeedShapeInfer, + ShapeOpsCPUTest, + params_NeedShapeInfer, + ShapeOpsCPUTest::getTestCaseName); -} // namespace reshapeTest +} // namespace reshapeTest namespace squeezeTest { const std::vector secondInPrcs{ov::element::Type_t::i64, ov::element::Type_t::i32}; -inputDescription noBounds{{{-1, -1, -1, -1, -1, -1}, - { - ngraph::Shape{2, 5, 1, 7, 3, 1}, - ngraph::Shape{10, 1, 1, 6, 10, 5}, - ngraph::Shape{10, 6, 10, 5, 1, 1}, - ngraph::Shape{1, 1, 5, 1, 5} - }}, - {std::vector{2, 5}, std::vector{1, 2}, std::vector{4, 5}, std::vector{0, 1}}}; +inputDescription noBounds{ + {{-1, -1, -1, -1, -1, -1}, + {ov::Shape{2, 5, 1, 7, 3, 1}, + ov::Shape{10, 1, 1, 6, 10, 5}, + ov::Shape{10, 6, 10, 5, 1, 1}, + ov::Shape{1, 1, 5, 1, 5}}}, + {std::vector{2, 5}, std::vector{1, 2}, std::vector{4, 5}, std::vector{0, 1}}}; const auto params = ::testing::Combine(::testing::Values(noBounds), - ::testing::Values(ngraph::helpers::InputLayerType::PARAMETER), + ::testing::Values(ov::test::utils::InputLayerType::PARAMETER), ::testing::Values(shapeNodeType::Squeeze), - ::testing::Values(Precision::FP32), + ::testing::Values(ov::element::f32), ::testing::ValuesIn(secondInPrcs), ::testing::Values(true)); @@ -318,31 +333,35 @@ const auto params = ::testing::Combine(::testing::Values(noBounds), // enable after CPU plug-in will support dynamic rank // INSTANTIATE_TEST_SUITE_P(smoke_CompareWithRefs_dynamic, ShapeOpsCPUTest, params, ShapeOpsCPUTest::getTestCaseName); -inputDescription noBounds_const{{{{1, 10}, {1, 15}, {2, 6}, {1, 15}, {3, 11}, {1, 15}}, - {ngraph::Shape{2, 1, 5, 7, 3, 1}, ngraph::Shape{10, 1, 6, 10, 5, 1}, ngraph::Shape{1, 1, 2, 5, 5, 1}}}, - {std::vector{1, 5}}}; +inputDescription noBounds_const{ + {{{1, 10}, {1, 15}, {2, 6}, {1, 15}, {3, 11}, {1, 15}}, + {ov::Shape{2, 1, 5, 7, 3, 1}, ov::Shape{10, 1, 6, 10, 5, 1}, ov::Shape{1, 1, 2, 5, 5, 1}}}, + {std::vector{1, 5}}}; const auto params_const = ::testing::Combine(::testing::Values(noBounds_const), - ::testing::Values(ngraph::helpers::InputLayerType::CONSTANT), + ::testing::Values(ov::test::utils::InputLayerType::CONSTANT), ::testing::Values(shapeNodeType::Squeeze), - ::testing::Values(Precision::FP32), + ::testing::Values(ov::element::f32), ::testing::ValuesIn(secondInPrcs), ::testing::Values(true)); -INSTANTIATE_TEST_SUITE_P(smoke_CompareWithRefs_dynamic_const, ShapeOpsCPUTest, params_const, ShapeOpsCPUTest::getTestCaseName); +INSTANTIATE_TEST_SUITE_P(smoke_CompareWithRefs_dynamic_const, + ShapeOpsCPUTest, + params_const, + ShapeOpsCPUTest::getTestCaseName); -} // namespace squeezeTest +} // namespace squeezeTest namespace unsqueezeTest { const std::vector secondInPrcs{ov::element::Type_t::i64, ov::element::Type_t::i32}; -inputDescription noBounds{{{-1, -1, -1, -1}, - {ngraph::Shape{2, 5, 7, 3}, ngraph::Shape{10, 6, 10, 5}, ngraph::Shape{10, 6, 10, 5}, ngraph::Shape{5, 1, 5}}}, - {std::vector{2, 5}, std::vector{1, 2}, std::vector{4, 5}, std::vector{0, 1}}}; +inputDescription noBounds{ + {{-1, -1, -1, -1}, {ov::Shape{2, 5, 7, 3}, ov::Shape{10, 6, 10, 5}, ov::Shape{10, 6, 10, 5}, ov::Shape{5, 1, 5}}}, + {std::vector{2, 5}, std::vector{1, 2}, std::vector{4, 5}, std::vector{0, 1}}}; const auto params = ::testing::Combine(::testing::Values(noBounds), - ::testing::Values(ngraph::helpers::InputLayerType::PARAMETER), + ::testing::Values(ov::test::utils::InputLayerType::PARAMETER), ::testing::Values(shapeNodeType::Unsqueeze), - ::testing::Values(Precision::FP32), + ::testing::Values(ov::element::f32), ::testing::ValuesIn(secondInPrcs), ::testing::Values(true)); @@ -350,19 +369,23 @@ const auto params = ::testing::Combine(::testing::Values(noBounds), // enable after CPU plug-in will support dynamic rank // INSTANTIATE_TEST_SUITE_P(smoke_CompareWithRefs_dynamic, ShapeOpsCPUTest, params, ShapeOpsCPUTest::getTestCaseName); -inputDescription noBounds_const{{{{1, 10}, {1, 15}, {2, 20}, {3, 7}}, - {ngraph::Shape{2, 5, 7, 3}, ngraph::Shape{10, 6, 10, 5}, ngraph::Shape{1, 2, 5, 5}}}, - {std::vector{1, 3}}}; +inputDescription noBounds_const{ + {{{1, 10}, {1, 15}, {2, 20}, {3, 7}}, {ov::Shape{2, 5, 7, 3}, ov::Shape{10, 6, 10, 5}, ov::Shape{1, 2, 5, 5}}}, + {std::vector{1, 3}}}; const auto params_const = ::testing::Combine(::testing::Values(noBounds_const), - ::testing::Values(ngraph::helpers::InputLayerType::CONSTANT), + ::testing::Values(ov::test::utils::InputLayerType::CONSTANT), ::testing::Values(shapeNodeType::Unsqueeze), - ::testing::Values(Precision::FP32), + ::testing::Values(ov::element::f32), ::testing::ValuesIn(secondInPrcs), ::testing::Values(true)); -INSTANTIATE_TEST_SUITE_P(smoke_CompareWithRefs_dynamic_const, ShapeOpsCPUTest, params_const, ShapeOpsCPUTest::getTestCaseName); +INSTANTIATE_TEST_SUITE_P(smoke_CompareWithRefs_dynamic_const, + ShapeOpsCPUTest, + params_const, + ShapeOpsCPUTest::getTestCaseName); -} // namespace unsqueezeTest +} // namespace unsqueezeTest -} // namespace CPULayerTestsDefinitions +} // namespace test +} // namespace ov diff --git a/src/plugins/intel_cpu/tests/functional/single_layer_tests/shapeof.cpp b/src/plugins/intel_cpu/tests/functional/single_layer_tests/shapeof.cpp index 3daef205663526..8759fda78154c3 100644 --- a/src/plugins/intel_cpu/tests/functional/single_layer_tests/shapeof.cpp +++ b/src/plugins/intel_cpu/tests/functional/single_layer_tests/shapeof.cpp @@ -2,34 +2,31 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "test_utils/cpu_test_utils.hpp" - #include "ov_models/builders.hpp" #include "ov_models/utils/ov_helpers.hpp" #include "shared_test_classes/base/ov_subgraph.hpp" +#include "test_utils/cpu_test_utils.hpp" -using namespace InferenceEngine; using namespace CPUTestUtils; using InputShape = ov::test::InputShape; using ElementType = ov::element::Type_t; -namespace CPULayerTestsDefinitions { -typedef std::tuple< - InputShape, - ElementType // Net precision -> ShapeOfLayerTestParams; +namespace ov { +namespace test { +typedef std::tuple + ShapeOfLayerTestParams; -typedef std::tuple< - ShapeOfLayerTestParams, - CPUSpecificParams -> ShapeOfLayerCPUTestParamsSet; +typedef std::tuple ShapeOfLayerCPUTestParamsSet; class ShapeOfLayerCPUTest : public testing::WithParamInterface, - virtual public ov::test::SubgraphBaseTest, public CPUTestsBase { + virtual public ov::test::SubgraphBaseTest, + public CPUTestsBase { public: static std::string getTestCaseName(testing::TestParamInfo obj) { - CPULayerTestsDefinitions::ShapeOfLayerTestParams basicParamsSet; + ShapeOfLayerTestParams basicParamsSet; CPUSpecificParams cpuParams; std::tie(basicParamsSet, cpuParams) = obj.param; ElementType netPr; @@ -50,11 +47,12 @@ class ShapeOfLayerCPUTest : public testing::WithParamInterfaceGetParam(); std::tie(inFmts, outFmts, priority, selectedType) = cpuParams; @@ -72,7 +70,7 @@ class ShapeOfLayerCPUTest : public testing::WithParamInterface(inType, shape)); - auto shapeOf = std::make_shared(params.front(), ngraph::element::i32); + auto shapeOf = std::make_shared(params.front(), ov::element::i32); function = makeNgraphFunction(netPrecision, params, shapeOf, "ShapeOf"); } @@ -85,69 +83,41 @@ TEST_P(ShapeOfLayerCPUTest, CompareWithRefs) { namespace { -const std::vector netPrecisions = { - ElementType::f32, - ElementType::bf16, - ElementType::i32, - ElementType::i8 -}; +const std::vector netPrecisions = {ElementType::f32, ElementType::bf16, ElementType::i32, ElementType::i8}; -std::vector inShapesDynamic3d = { - { - {-1, -1, -1}, - { - { 8, 16, 4 }, - { 8, 16, 3 }, - { 8, 16, 2 } - } - } -}; +std::vector inShapesDynamic3d = {{{-1, -1, -1}, {{8, 16, 4}, {8, 16, 3}, {8, 16, 2}}}}; std::vector inShapesDynamic4d = { - { - {-1, -1, -1, -1}, - { - { 8, 16, 3, 4 }, - { 8, 16, 3, 3 }, - { 8, 16, 3, 2 } - } - }, + {{-1, -1, -1, -1}, {{8, 16, 3, 4}, {8, 16, 3, 3}, {8, 16, 3, 2}}}, }; std::vector inShapesDynamic5d = { - { - { -1, -1, -1, -1, -1 }, - { - { 8, 16, 3, 2, 4 }, - { 8, 16, 3, 2, 3 }, - { 8, 16, 3, 2, 2 } - } - } -}; -const auto params5dDynamic = ::testing::Combine( - ::testing::Combine( - ::testing::ValuesIn(inShapesDynamic5d), - ::testing::ValuesIn(netPrecisions)), - ::testing::Values(emptyCPUSpec)); - -const auto params4dDynamic = ::testing::Combine( - ::testing::Combine( - ::testing::ValuesIn(inShapesDynamic4d), - ::testing::ValuesIn(netPrecisions)), - ::testing::Values(emptyCPUSpec)); - -const auto params3dDynamic = ::testing::Combine( - ::testing::Combine( - ::testing::ValuesIn(inShapesDynamic3d), - ::testing::ValuesIn(netPrecisions)), - ::testing::Values(emptyCPUSpec)); + {{-1, -1, -1, -1, -1}, {{8, 16, 3, 2, 4}, {8, 16, 3, 2, 3}, {8, 16, 3, 2, 2}}}}; +const auto params5dDynamic = + ::testing::Combine(::testing::Combine(::testing::ValuesIn(inShapesDynamic5d), ::testing::ValuesIn(netPrecisions)), + ::testing::Values(emptyCPUSpec)); + +const auto params4dDynamic = + ::testing::Combine(::testing::Combine(::testing::ValuesIn(inShapesDynamic4d), ::testing::ValuesIn(netPrecisions)), + ::testing::Values(emptyCPUSpec)); + +const auto params3dDynamic = + ::testing::Combine(::testing::Combine(::testing::ValuesIn(inShapesDynamic3d), ::testing::ValuesIn(netPrecisions)), + ::testing::Values(emptyCPUSpec)); // We don't check static case, because of constant folding -INSTANTIATE_TEST_SUITE_P(smoke_ShapeOf3dDynamicLayoutTest, ShapeOfLayerCPUTest, - params3dDynamic, ShapeOfLayerCPUTest::getTestCaseName); -INSTANTIATE_TEST_SUITE_P(smoke_ShapeOf4dDynamicLayoutTest, ShapeOfLayerCPUTest, - params4dDynamic, ShapeOfLayerCPUTest::getTestCaseName); -INSTANTIATE_TEST_SUITE_P(smoke_ShapeOf5dDynamicLayoutTest, ShapeOfLayerCPUTest, - params5dDynamic, ShapeOfLayerCPUTest::getTestCaseName); -} // namespace -} // namespace CPULayerTestsDefinitions +INSTANTIATE_TEST_SUITE_P(smoke_ShapeOf3dDynamicLayoutTest, + ShapeOfLayerCPUTest, + params3dDynamic, + ShapeOfLayerCPUTest::getTestCaseName); +INSTANTIATE_TEST_SUITE_P(smoke_ShapeOf4dDynamicLayoutTest, + ShapeOfLayerCPUTest, + params4dDynamic, + ShapeOfLayerCPUTest::getTestCaseName); +INSTANTIATE_TEST_SUITE_P(smoke_ShapeOf5dDynamicLayoutTest, + ShapeOfLayerCPUTest, + params5dDynamic, + ShapeOfLayerCPUTest::getTestCaseName); +} // namespace +} // namespace test +} // namespace ov diff --git a/src/plugins/intel_cpu/tests/functional/single_layer_tests/shuffle_channels.cpp b/src/plugins/intel_cpu/tests/functional/single_layer_tests/shuffle_channels.cpp index 385cee23a7fb1d..3518e6fc543a22 100644 --- a/src/plugins/intel_cpu/tests/functional/single_layer_tests/shuffle_channels.cpp +++ b/src/plugins/intel_cpu/tests/functional/single_layer_tests/shuffle_channels.cpp @@ -3,26 +3,23 @@ // #include "shared_test_classes/single_layer/shuffle_channels.hpp" -#include "test_utils/cpu_test_utils.hpp" -#include "ov_models/builders.hpp" + #include "ov_models/utils/ov_helpers.hpp" #include "shared_test_classes/base/ov_subgraph.hpp" +#include "test_utils/cpu_test_utils.hpp" -using namespace InferenceEngine; using namespace CPUTestUtils; -using namespace ngraph::opset3; -using namespace ov::test; +namespace ov { +namespace test { -namespace CPULayerTestsDefinitions { - -using ShuffleChannelsLayerCPUTestParamsSet = std::tuple< - InputShape, // Input shape - ElementType, // Input element type - LayerTestsDefinitions::shuffleChannelsSpecificParams, - CPUSpecificParams>; +using ShuffleChannelsLayerCPUTestParamsSet = std::tuple; class ShuffleChannelsLayerCPUTest : public testing::WithParamInterface, - virtual public SubgraphBaseTest, public CPUTestsBase { + virtual public SubgraphBaseTest, + public CPUTestsBase { public: static std::string getTestCaseName(testing::TestParamInfo obj) { InputShape shapes; @@ -141,118 +138,106 @@ std::vector filterCPUInfoForDevice5DBlock() { } /* ========== */ -const std::vector inputElementType = { - ElementType::f32, - ElementType::bf16, - ElementType::i8 -}; +const std::vector inputElementType = {ElementType::f32, ElementType::bf16, ElementType::i8}; -const auto shuffleChannelsParams4D = ::testing::Combine( - ::testing::ValuesIn(std::vector{-4, -2, 0, 1, 3}), - ::testing::ValuesIn(std::vector{1, 2, 4}) -); +const auto shuffleChannelsParams4D = ::testing::Combine(::testing::ValuesIn(std::vector{-4, -2, 0, 1, 3}), + ::testing::ValuesIn(std::vector{1, 2, 4})); -const auto shuffleChannelsParams5D = ::testing::Combine( - ::testing::ValuesIn(std::vector{-5, -3, -1, 0, 1, 3}), - ::testing::ValuesIn(std::vector{1, 2, 3}) -); +const auto shuffleChannelsParams5D = ::testing::Combine(::testing::ValuesIn(std::vector{-5, -3, -1, 0, 1, 3}), + ::testing::ValuesIn(std::vector{1, 2, 3})); -const auto shuffleChannelsParams4DBlock = ::testing::Combine( - ::testing::ValuesIn(std::vector{-4, -2, -1, 0, 2, 3}), - ::testing::ValuesIn(std::vector{1, 2, 4}) -); +const auto shuffleChannelsParams4DBlock = ::testing::Combine(::testing::ValuesIn(std::vector{-4, -2, -1, 0, 2, 3}), + ::testing::ValuesIn(std::vector{1, 2, 4})); -const auto shuffleChannelsParams5DBlock = ::testing::Combine( - ::testing::ValuesIn(std::vector{-5, -2, -1, 0, 2, 3, 4}), - ::testing::ValuesIn(std::vector{1, 2, 3}) -); +const auto shuffleChannelsParams5DBlock = + ::testing::Combine(::testing::ValuesIn(std::vector{-5, -2, -1, 0, 2, 3, 4}), + ::testing::ValuesIn(std::vector{1, 2, 3})); const std::vector inputShapesDynamic4D = { - {{-1, -1, -1, -1}, - {{8, 4, 4, 4}, {8, 16, 8, 4}, {8, 4, 4, 4}}}, + {{-1, -1, -1, -1}, {{8, 4, 4, 4}, {8, 16, 8, 4}, {8, 4, 4, 4}}}, - {{-1, 8, -1, -1}, - {{8, 8, 8, 8}, {8, 8, 4, 16}, {8, 8, 8, 8}}}, + {{-1, 8, -1, -1}, {{8, 8, 8, 8}, {8, 8, 4, 16}, {8, 8, 8, 8}}}, - {{{4, 32}, {4, 32}, {4, 32}, {4, 32}}, - {{4, 12, 8, 8}, {8, 32, 12, 4}, {4, 12, 8, 8}}}, + {{{4, 32}, {4, 32}, {4, 32}, {4, 32}}, {{4, 12, 8, 8}, {8, 32, 12, 4}, {4, 12, 8, 8}}}, }; const std::vector inputShapesDynamic5D = { - {{-1, -1, -1, -1, -1}, - {{6, 6, 6, 6, 6}, {12, 6, 12, 12, 12}, {6, 6, 6, 6, 6}}}, + {{-1, -1, -1, -1, -1}, {{6, 6, 6, 6, 6}, {12, 6, 12, 12, 12}, {6, 6, 6, 6, 6}}}, - {{-1, 18, -1, -1, -1}, - {{6, 18, 12, 6, 12}, {6, 18, 6, 6, 6}, {6, 18, 12, 6, 12}}}, + {{-1, 18, -1, -1, -1}, {{6, 18, 12, 6, 12}, {6, 18, 6, 6, 6}, {6, 18, 12, 6, 12}}}, - {{{6, 24}, {6, 24}, {6, 24}, {6, 24}, {6, 24}}, - {{24, 12, 6, 6, 6}, {12, 24, 6, 12, 12}, {24, 12, 6, 6, 6}}}, + {{{6, 24}, {6, 24}, {6, 24}, {6, 24}, {6, 24}}, {{24, 12, 6, 6, 6}, {12, 24, 6, 12, 12}, {24, 12, 6, 6, 6}}}, }; -INSTANTIATE_TEST_SUITE_P(smoke_ShuffleChannelsStatic4D, ShuffleChannelsLayerCPUTest, - ::testing::Combine( - ::testing::ValuesIn(static_shapes_to_test_representation({{16, 24, 32, 40}})), - ::testing::ValuesIn(inputElementType), - shuffleChannelsParams4D, - ::testing::ValuesIn(filterCPUInfoForDevice4D())), - ShuffleChannelsLayerCPUTest::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(smoke_ShuffleChannelsDynamic4D, ShuffleChannelsLayerCPUTest, - ::testing::Combine( - ::testing::ValuesIn(inputShapesDynamic4D), - ::testing::ValuesIn(inputElementType), - shuffleChannelsParams4D, - ::testing::ValuesIn(filterCPUInfoForDevice4D())), - ShuffleChannelsLayerCPUTest::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(smoke_ShuffleChannelsStatic5D, ShuffleChannelsLayerCPUTest, - ::testing::Combine( - ::testing::ValuesIn(static_shapes_to_test_representation({{6, 24, 12, 12, 6}})), - ::testing::ValuesIn(inputElementType), - shuffleChannelsParams5D, - ::testing::ValuesIn(filterCPUInfoForDevice5D())), - ShuffleChannelsLayerCPUTest::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(smoke_ShuffleChannelsDynamic5D, ShuffleChannelsLayerCPUTest, - ::testing::Combine( - ::testing::ValuesIn(inputShapesDynamic5D), - ::testing::ValuesIn(inputElementType), - shuffleChannelsParams5D, - ::testing::ValuesIn(filterCPUInfoForDevice5D())), - ShuffleChannelsLayerCPUTest::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(smoke_ShuffleChannelsStatic4DBlock, ShuffleChannelsLayerCPUTest, - ::testing::Combine( - ::testing::ValuesIn(static_shapes_to_test_representation({{40, 32, 24, 16}})), - ::testing::ValuesIn(inputElementType), - shuffleChannelsParams4DBlock, - ::testing::ValuesIn(filterCPUInfoForDevice4DBlock())), +INSTANTIATE_TEST_SUITE_P( + smoke_ShuffleChannelsStatic4D, + ShuffleChannelsLayerCPUTest, + ::testing::Combine(::testing::ValuesIn(static_shapes_to_test_representation({{16, 24, 32, 40}})), + ::testing::ValuesIn(inputElementType), + shuffleChannelsParams4D, + ::testing::ValuesIn(filterCPUInfoForDevice4D())), + ShuffleChannelsLayerCPUTest::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(smoke_ShuffleChannelsDynamic4D, + ShuffleChannelsLayerCPUTest, + ::testing::Combine(::testing::ValuesIn(inputShapesDynamic4D), + ::testing::ValuesIn(inputElementType), + shuffleChannelsParams4D, + ::testing::ValuesIn(filterCPUInfoForDevice4D())), ShuffleChannelsLayerCPUTest::getTestCaseName); -INSTANTIATE_TEST_SUITE_P(smoke_ShuffleChannelsDynamic4DBlock, ShuffleChannelsLayerCPUTest, - ::testing::Combine( - ::testing::ValuesIn(inputShapesDynamic4D), - ::testing::ValuesIn(inputElementType), - shuffleChannelsParams4DBlock, - ::testing::ValuesIn(filterCPUInfoForDevice4DBlock())), +INSTANTIATE_TEST_SUITE_P( + smoke_ShuffleChannelsStatic5D, + ShuffleChannelsLayerCPUTest, + ::testing::Combine(::testing::ValuesIn(static_shapes_to_test_representation({{6, 24, 12, 12, 6}})), + ::testing::ValuesIn(inputElementType), + shuffleChannelsParams5D, + ::testing::ValuesIn(filterCPUInfoForDevice5D())), + ShuffleChannelsLayerCPUTest::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(smoke_ShuffleChannelsDynamic5D, + ShuffleChannelsLayerCPUTest, + ::testing::Combine(::testing::ValuesIn(inputShapesDynamic5D), + ::testing::ValuesIn(inputElementType), + shuffleChannelsParams5D, + ::testing::ValuesIn(filterCPUInfoForDevice5D())), ShuffleChannelsLayerCPUTest::getTestCaseName); -INSTANTIATE_TEST_SUITE_P(smoke_ShuffleChannelsStatic5DBlock, ShuffleChannelsLayerCPUTest, - ::testing::Combine( - ::testing::ValuesIn(static_shapes_to_test_representation({{18, 12, 18, 12, 30}})), - ::testing::ValuesIn(inputElementType), - shuffleChannelsParams5DBlock, - ::testing::ValuesIn(filterCPUInfoForDevice5DBlock())), +INSTANTIATE_TEST_SUITE_P( + smoke_ShuffleChannelsStatic4DBlock, + ShuffleChannelsLayerCPUTest, + ::testing::Combine(::testing::ValuesIn(static_shapes_to_test_representation({{40, 32, 24, 16}})), + ::testing::ValuesIn(inputElementType), + shuffleChannelsParams4DBlock, + ::testing::ValuesIn(filterCPUInfoForDevice4DBlock())), + ShuffleChannelsLayerCPUTest::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(smoke_ShuffleChannelsDynamic4DBlock, + ShuffleChannelsLayerCPUTest, + ::testing::Combine(::testing::ValuesIn(inputShapesDynamic4D), + ::testing::ValuesIn(inputElementType), + shuffleChannelsParams4DBlock, + ::testing::ValuesIn(filterCPUInfoForDevice4DBlock())), ShuffleChannelsLayerCPUTest::getTestCaseName); -INSTANTIATE_TEST_SUITE_P(smoke_ShuffleChannelsDynamic5DBlock, ShuffleChannelsLayerCPUTest, - ::testing::Combine( - ::testing::ValuesIn(inputShapesDynamic5D), - ::testing::ValuesIn(inputElementType), - shuffleChannelsParams5DBlock, - ::testing::ValuesIn(filterCPUInfoForDevice5DBlock())), +INSTANTIATE_TEST_SUITE_P( + smoke_ShuffleChannelsStatic5DBlock, + ShuffleChannelsLayerCPUTest, + ::testing::Combine(::testing::ValuesIn(static_shapes_to_test_representation({{18, 12, 18, 12, 30}})), + ::testing::ValuesIn(inputElementType), + shuffleChannelsParams5DBlock, + ::testing::ValuesIn(filterCPUInfoForDevice5DBlock())), + ShuffleChannelsLayerCPUTest::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(smoke_ShuffleChannelsDynamic5DBlock, + ShuffleChannelsLayerCPUTest, + ::testing::Combine(::testing::ValuesIn(inputShapesDynamic5D), + ::testing::ValuesIn(inputElementType), + shuffleChannelsParams5DBlock, + ::testing::ValuesIn(filterCPUInfoForDevice5DBlock())), ShuffleChannelsLayerCPUTest::getTestCaseName); -} // namespace +} // namespace -} // namespace CPULayerTestsDefinitions +} // namespace test +} // namespace ov diff --git a/src/plugins/intel_cpu/tests/functional/single_layer_tests/slice.cpp b/src/plugins/intel_cpu/tests/functional/single_layer_tests/slice.cpp index 02907dba518f40..45b68a6c8379e6 100644 --- a/src/plugins/intel_cpu/tests/functional/single_layer_tests/slice.cpp +++ b/src/plugins/intel_cpu/tests/functional/single_layer_tests/slice.cpp @@ -2,38 +2,37 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "ov_models/builders.hpp" -#include "test_utils/cpu_test_utils.hpp" -#include "shared_test_classes/base/ov_subgraph.hpp" #include +#include "shared_test_classes/base/ov_subgraph.hpp" +#include "test_utils/cpu_test_utils.hpp" + using namespace CPUTestUtils; using namespace ov::test; -namespace CPULayerTestsDefinitions { - struct Slice8SpecificParams { - std::vector start; - std::vector stop; - std::vector step; - std::vector axes; + std::vector start; + std::vector stop; + std::vector step; + std::vector axes; }; -typedef std::tuple< - std::vector, // Parameters shapes - Slice8SpecificParams, // Slice-8 specific parameters - ngraph::helpers::InputLayerType, // Secondary input types - ElementType, // Network precision - CPUSpecificParams // CPU specific parameters -> Slice8LayerTestCPUParam; +typedef std::tuple, // Parameters shapes + Slice8SpecificParams, // Slice-8 specific parameters + ov::test::utils::InputLayerType, // Secondary input types + ElementType, // Network precision + CPUSpecificParams // CPU specific parameters + > + Slice8LayerTestCPUParam; class Slice8LayerCPUTest : public testing::WithParamInterface, - virtual public SubgraphBaseTest, public CPUTestsBase { + virtual public SubgraphBaseTest, + public CPUTestsBase { public: static std::string getTestCaseName(testing::TestParamInfo obj) { std::vector shapes; Slice8SpecificParams params; - ngraph::helpers::InputLayerType secondaryInputType; + ov::test::utils::InputLayerType secondaryInputType; ElementType netPrecision; CPUSpecificParams cpuParams; std::tie(shapes, params, secondaryInputType, netPrecision, cpuParams) = obj.param; @@ -49,10 +48,10 @@ class Slice8LayerCPUTest : public testing::WithParamInterface& targetInputStaticShapes) override { - std::vector inputValues = {sliceParams.start.data(), sliceParams.stop.data(), sliceParams.step.data(), sliceParams.axes.data()}; + void generate_inputs(const std::vector& targetInputStaticShapes) override { + std::vector inputValues = {sliceParams.start.data(), + sliceParams.stop.data(), + sliceParams.step.data(), + sliceParams.axes.data()}; inputs.clear(); const auto& funcInputs = function->inputs(); @@ -71,16 +73,20 @@ class Slice8LayerCPUTest : public testing::WithParamInterface shapes; - ngraph::helpers::InputLayerType secondaryInputType; + ov::test::utils::InputLayerType secondaryInputType; ElementType netPrecision; CPUSpecificParams cpuParams; std::tie(shapes, sliceParams, secondaryInputType, netPrecision, cpuParams) = this->GetParam(); @@ -102,42 +108,50 @@ class Slice8LayerCPUTest : public testing::WithParamInterface(netPrecision, shape)); } - std::shared_ptr sliceNode; - if (secondaryInputType == ngraph::helpers::InputLayerType::PARAMETER) { + std::shared_ptr sliceNode; + if (secondaryInputType == ov::test::utils::InputLayerType::PARAMETER) { // Slice start, stop, step, axes are parameters. - auto startNode = std::make_shared(ov::element::i64, ov::Shape{sliceParams.start.size()}); - auto stopdNode = std::make_shared(ov::element::i64, ov::Shape{sliceParams.stop.size()}); - auto stepNode = std::make_shared(ov::element::i64, ov::Shape{sliceParams.step.size()}); - - params.push_back(std::dynamic_pointer_cast(startNode)); - params.push_back(std::dynamic_pointer_cast(stopdNode)); - params.push_back(std::dynamic_pointer_cast(stepNode)); + auto startNode = + std::make_shared(ov::element::i64, ov::Shape{sliceParams.start.size()}); + auto stopdNode = + std::make_shared(ov::element::i64, ov::Shape{sliceParams.stop.size()}); + auto stepNode = + std::make_shared(ov::element::i64, ov::Shape{sliceParams.step.size()}); + + params.push_back(std::dynamic_pointer_cast(startNode)); + params.push_back(std::dynamic_pointer_cast(stopdNode)); + params.push_back(std::dynamic_pointer_cast(stepNode)); if (!sliceParams.axes.empty()) { // With axes parameter - auto axesNode = std::make_shared(ov::element::i64, ov::Shape{sliceParams.axes.size()}); - params.push_back(std::dynamic_pointer_cast(axesNode)); + auto axesNode = + std::make_shared(ov::element::i64, ov::Shape{sliceParams.axes.size()}); + params.push_back(std::dynamic_pointer_cast(axesNode)); sliceNode = std::make_shared(params[0], startNode, stopdNode, stepNode, axesNode); } else { - //without axes parameter + // without axes parameter sliceNode = std::make_shared(params[0], startNode, stopdNode, stepNode); } - } else if (secondaryInputType == ngraph::helpers::InputLayerType::CONSTANT) { + } else if (secondaryInputType == ov::test::utils::InputLayerType::CONSTANT) { // Slice start, stop, step, axes are const. ov::Shape constShape = {sliceParams.start.size()}; - auto beginNode = std::make_shared(ov::element::i64, constShape, sliceParams.start.data()); - auto endNode = std::make_shared(ov::element::i64, constShape, sliceParams.stop.data()); - auto strideNode = std::make_shared(ov::element::i64, constShape, sliceParams.step.data()); + auto beginNode = + std::make_shared(ov::element::i64, constShape, sliceParams.start.data()); + auto endNode = + std::make_shared(ov::element::i64, constShape, sliceParams.stop.data()); + auto strideNode = + std::make_shared(ov::element::i64, constShape, sliceParams.step.data()); if (!sliceParams.axes.empty()) { // With axes parameter - auto axesNode = std::make_shared(ov::element::i64, constShape, sliceParams.axes.data()); + auto axesNode = + std::make_shared(ov::element::i64, constShape, sliceParams.axes.data()); sliceNode = std::make_shared(params[0], beginNode, endNode, strideNode, axesNode); } else { - //without axes parameter + // without axes parameter sliceNode = std::make_shared(params[0], beginNode, endNode, strideNode); } } else { // Not supported others. - OPENVINO_THROW("Slice8LayerCPUTest: Unsupported ngraph::helpers::InputLayerType , value: ", + OPENVINO_THROW("Slice8LayerCPUTest: Unsupported ov::test::utils::InputLayerType , value: ", secondaryInputType); } @@ -153,437 +167,378 @@ TEST_P(Slice8LayerCPUTest, CompareWithRefs) { namespace { -const std::vector inputLayerTypes = { - ngraph::helpers::InputLayerType::CONSTANT, - ngraph::helpers::InputLayerType::PARAMETER -}; - -const auto cpuParams_nChw16c = CPUSpecificParams {{nChw16c}, {nChw16c}, {}, {}}; -const auto cpuParams_nCdhw16c = CPUSpecificParams {{nCdhw16c}, {nCdhw16c}, {}, {}}; - -const auto cpuParams_nChw8c = CPUSpecificParams {{nChw8c}, {nChw8c}, {}, {}}; -const auto cpuParams_nCdhw8c = CPUSpecificParams {{nCdhw8c}, {nCdhw8c}, {}, {}}; - -const auto cpuParams_nhwc = CPUSpecificParams {{nhwc}, {nhwc}, {}, {}}; -const auto cpuParams_ndhwc = CPUSpecificParams {{ndhwc}, {ndhwc}, {}, {}}; - -const auto cpuParams_nchw = CPUSpecificParams {{nchw}, {nchw}, {}, {}}; -const auto cpuParams_ncdhw = CPUSpecificParams {{ncdhw}, {ncdhw}, {}, {}}; - -const std::vector inputPrecisions = { - ElementType::f32, - ElementType::bf16, - ElementType::i8 -}; - -const std::vector> inputShapesDynamic2D = { - { - { // Origin dynamic shape - {-1, -1}, - { // Dynamic shapes instances - {32, 20}, {16, 16}, {24, 16} - } - } - }, - { - { // Origin dynamic shape - {-1, 16}, - { // Dynamic shapes instances - {16, 16}, {20, 16}, {32, 16} - } - } - }, - { - { // Origin dynamic shape - { {16, 32}, {16, 32} }, - { // Dynamic shapes instances - {16, 32}, {32, 16}, {24, 24} - } - } - } -}; - -const std::vector paramsPlain2D = { - Slice8SpecificParams{ { 0, 10 }, { 16, 16 }, { 1, 1 }, { 0, 1 } }, - Slice8SpecificParams{ { 2, 5 }, { 16, 8 }, { 1, 1 }, { } }, - Slice8SpecificParams{ { 2, 5 }, { 16, 16 }, { 1, 2 }, { 0, 1 } }, - Slice8SpecificParams{ { 0, 0 }, { 16, 16 }, { 1, 2 }, { 1, 0} }, - Slice8SpecificParams{ { 0 }, { 16 }, { 2 }, { 0 } }, - Slice8SpecificParams{ { 0 }, { 16 }, { 1 }, { 1 } } -}; - -INSTANTIATE_TEST_SUITE_P(smoke_CompareWithRefs_Plain_Static_2D, Slice8LayerCPUTest, - ::testing::Combine( - ::testing::Values(static_shapes_to_test_representation({{32, 20}})), - ::testing::ValuesIn(paramsPlain2D), - ::testing::Values(ngraph::helpers::InputLayerType::CONSTANT), - ::testing::ValuesIn(inputPrecisions), - ::testing::Values(emptyCPUSpec)), - Slice8LayerCPUTest::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(smoke_CompareWithRefs_Plain_Dynamic_2D, Slice8LayerCPUTest, - ::testing::Combine( - ::testing::ValuesIn(inputShapesDynamic2D), - ::testing::ValuesIn(paramsPlain2D), - ::testing::Values(ngraph::helpers::InputLayerType::CONSTANT), - ::testing::ValuesIn(inputPrecisions), - ::testing::Values(emptyCPUSpec)), - Slice8LayerCPUTest::getTestCaseName); +const std::vector inputLayerTypes = {ov::test::utils::InputLayerType::CONSTANT, + ov::test::utils::InputLayerType::PARAMETER}; + +const auto cpuParams_nChw16c = CPUSpecificParams{{nChw16c}, {nChw16c}, {}, {}}; +const auto cpuParams_nCdhw16c = CPUSpecificParams{{nCdhw16c}, {nCdhw16c}, {}, {}}; + +const auto cpuParams_nChw8c = CPUSpecificParams{{nChw8c}, {nChw8c}, {}, {}}; +const auto cpuParams_nCdhw8c = CPUSpecificParams{{nCdhw8c}, {nCdhw8c}, {}, {}}; + +const auto cpuParams_nhwc = CPUSpecificParams{{nhwc}, {nhwc}, {}, {}}; +const auto cpuParams_ndhwc = CPUSpecificParams{{ndhwc}, {ndhwc}, {}, {}}; + +const auto cpuParams_nchw = CPUSpecificParams{{nchw}, {nchw}, {}, {}}; +const auto cpuParams_ncdhw = CPUSpecificParams{{ncdhw}, {ncdhw}, {}, {}}; + +const std::vector inputPrecisions = {ElementType::f32, ElementType::bf16, ElementType::i8}; + +const std::vector> inputShapesDynamic2D = {{{// Origin dynamic shape + {-1, -1}, + {// Dynamic shapes instances + {32, 20}, + {16, 16}, + {24, 16}}}}, + {{// Origin dynamic shape + {-1, 16}, + {// Dynamic shapes instances + {16, 16}, + {20, 16}, + {32, 16}}}}, + {{// Origin dynamic shape + {{16, 32}, {16, 32}}, + {// Dynamic shapes instances + {16, 32}, + {32, 16}, + {24, 24}}}}}; + +const std::vector paramsPlain2D = {Slice8SpecificParams{{0, 10}, {16, 16}, {1, 1}, {0, 1}}, + Slice8SpecificParams{{2, 5}, {16, 8}, {1, 1}, {}}, + Slice8SpecificParams{{2, 5}, {16, 16}, {1, 2}, {0, 1}}, + Slice8SpecificParams{{0, 0}, {16, 16}, {1, 2}, {1, 0}}, + Slice8SpecificParams{{0}, {16}, {2}, {0}}, + Slice8SpecificParams{{0}, {16}, {1}, {1}}}; + +INSTANTIATE_TEST_SUITE_P(smoke_CompareWithRefs_Plain_Static_2D, + Slice8LayerCPUTest, + ::testing::Combine(::testing::Values(static_shapes_to_test_representation({{32, 20}})), + ::testing::ValuesIn(paramsPlain2D), + ::testing::Values(ov::test::utils::InputLayerType::CONSTANT), + ::testing::ValuesIn(inputPrecisions), + ::testing::Values(emptyCPUSpec)), + Slice8LayerCPUTest::getTestCaseName); +INSTANTIATE_TEST_SUITE_P(smoke_CompareWithRefs_Plain_Dynamic_2D, + Slice8LayerCPUTest, + ::testing::Combine(::testing::ValuesIn(inputShapesDynamic2D), + ::testing::ValuesIn(paramsPlain2D), + ::testing::Values(ov::test::utils::InputLayerType::CONSTANT), + ::testing::ValuesIn(inputPrecisions), + ::testing::Values(emptyCPUSpec)), + Slice8LayerCPUTest::getTestCaseName); const std::vector testCasesCommon4D = { - Slice8SpecificParams{ { 0, 2, 5, 4 }, { 1, 4, 28, 27 }, { 1, 1, 1, 1 }, { } }, - Slice8SpecificParams{ { 0, 1, 0, 0 }, { 20, 3, 32, 1 }, { 1, 1, 1, 1 }, { 3, 1, 2, 0 } }, - Slice8SpecificParams{ { 0, 0, 10, 0 }, { 1, 3, 20, 20 }, { 1, 1, 1, 1 }, { } }, - Slice8SpecificParams{ { 0, 0, 20, 20 }, { 1, 5, 26, 25 }, { 1, 1, 2, 1 }, { 0, 1, 3, 2 } }, - Slice8SpecificParams{ { 0, 0, 0, 20 }, { 1, 2, 30, 30 }, { 1, 1, 2, 1 }, { } }, - Slice8SpecificParams{ { 0, 0, 2, 10 }, { 1, 3, 32, 20 }, { 1, 1, 1, 1 }, { 0, 1, 2, 3 } }, - Slice8SpecificParams{ { 0, 1, 0, 10 }, { 1, 5, 32, 30 }, { 1, 1, 1, 1 }, { } }, - Slice8SpecificParams{ { 0, 1, 2, 10 }, { 1, 5, 32, 18 }, { 1, 1, 1, 2 }, { 0, 1, 2, 3 } }, - Slice8SpecificParams{ { 0, 0, 2, 10 }, { 1, 8, 32, 18 }, { 1, 2, 1, 2 }, { } }, - Slice8SpecificParams{ { 0, 0, 10 }, { 2, 32, 18 }, { 1, 1, 1 }, { 1, 2, 3 } }, - Slice8SpecificParams{ { 0, 10 }, { 2, 32 }, { 1, 1 }, { 1, 3 } } -}; - -const std::vector> inputShapesStatic4D = { - {{ 1, 5, 32, 32 }}, {{ 2, 5, 32, 48 }} -}; - -const std::vector> inputShapesDynamic4D = { - { - { // Origin dynamic shape - {-1, -1, -1, -1}, - { // Dynamic shapes instances - { 1, 5, 32, 32 }, { 2, 5, 32, 32 }, { 1, 5, 64, 64 } - } - } - }, - { - { // Origin dynamic shape - {-1, 5, -1, -1}, - { // Dynamic shapes instances - { 1, 5, 32, 32 }, { 2, 5, 32, 32 }, { 3, 5, 32, 36 } - } - } - }, - { - { // Origin dynamic shape - {{1, 5}, 5, {32, 64}, {32, 64}}, - { // Dynamic shapes instances - { 2, 5, 32, 32 }, { 1, 5, 48, 32 }, { 5, 5, 32, 32 } - } - } - } -}; + Slice8SpecificParams{{0, 2, 5, 4}, {1, 4, 28, 27}, {1, 1, 1, 1}, {}}, + Slice8SpecificParams{{0, 1, 0, 0}, {20, 3, 32, 1}, {1, 1, 1, 1}, {3, 1, 2, 0}}, + Slice8SpecificParams{{0, 0, 10, 0}, {1, 3, 20, 20}, {1, 1, 1, 1}, {}}, + Slice8SpecificParams{{0, 0, 20, 20}, {1, 5, 26, 25}, {1, 1, 2, 1}, {0, 1, 3, 2}}, + Slice8SpecificParams{{0, 0, 0, 20}, {1, 2, 30, 30}, {1, 1, 2, 1}, {}}, + Slice8SpecificParams{{0, 0, 2, 10}, {1, 3, 32, 20}, {1, 1, 1, 1}, {0, 1, 2, 3}}, + Slice8SpecificParams{{0, 1, 0, 10}, {1, 5, 32, 30}, {1, 1, 1, 1}, {}}, + Slice8SpecificParams{{0, 1, 2, 10}, {1, 5, 32, 18}, {1, 1, 1, 2}, {0, 1, 2, 3}}, + Slice8SpecificParams{{0, 0, 2, 10}, {1, 8, 32, 18}, {1, 2, 1, 2}, {}}, + Slice8SpecificParams{{0, 0, 10}, {2, 32, 18}, {1, 1, 1}, {1, 2, 3}}, + Slice8SpecificParams{{0, 10}, {2, 32}, {1, 1}, {1, 3}}}; + +const std::vector> inputShapesStatic4D = {{{1, 5, 32, 32}}, {{2, 5, 32, 48}}}; + +const std::vector> inputShapesDynamic4D = {{{// Origin dynamic shape + {-1, -1, -1, -1}, + {// Dynamic shapes instances + {1, 5, 32, 32}, + {2, 5, 32, 32}, + {1, 5, 64, 64}}}}, + {{// Origin dynamic shape + {-1, 5, -1, -1}, + {// Dynamic shapes instances + {1, 5, 32, 32}, + {2, 5, 32, 32}, + {3, 5, 32, 36}}}}, + {{// Origin dynamic shape + {{1, 5}, 5, {32, 64}, {32, 64}}, + {// Dynamic shapes instances + {2, 5, 32, 32}, + {1, 5, 48, 32}, + {5, 5, 32, 32}}}}}; const std::vector CPUParamsCommon4D = { - cpuParams_nchw, - cpuParams_nhwc, + cpuParams_nchw, + cpuParams_nhwc, }; -INSTANTIATE_TEST_SUITE_P(smoke_CompareWithRefs_Common_Static_4D, Slice8LayerCPUTest, - ::testing::Combine( - ::testing::ValuesIn(static_shapes_to_test_representation(inputShapesStatic4D)), - ::testing::ValuesIn(testCasesCommon4D), - ::testing::Values(ngraph::helpers::InputLayerType::CONSTANT), - ::testing::ValuesIn(inputPrecisions), - ::testing::ValuesIn(CPUParamsCommon4D)), - Slice8LayerCPUTest::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(smoke_CompareWithRefs_Common_Dynamic_4D, Slice8LayerCPUTest, - ::testing::Combine( - ::testing::ValuesIn(inputShapesDynamic4D), - ::testing::ValuesIn(testCasesCommon4D), - ::testing::ValuesIn(inputLayerTypes), - ::testing::ValuesIn(inputPrecisions), - ::testing::ValuesIn(CPUParamsCommon4D)), - Slice8LayerCPUTest::getTestCaseName); - +INSTANTIATE_TEST_SUITE_P( + smoke_CompareWithRefs_Common_Static_4D, + Slice8LayerCPUTest, + ::testing::Combine(::testing::ValuesIn(static_shapes_to_test_representation(inputShapesStatic4D)), + ::testing::ValuesIn(testCasesCommon4D), + ::testing::Values(ov::test::utils::InputLayerType::CONSTANT), + ::testing::ValuesIn(inputPrecisions), + ::testing::ValuesIn(CPUParamsCommon4D)), + Slice8LayerCPUTest::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(smoke_CompareWithRefs_Common_Dynamic_4D, + Slice8LayerCPUTest, + ::testing::Combine(::testing::ValuesIn(inputShapesDynamic4D), + ::testing::ValuesIn(testCasesCommon4D), + ::testing::ValuesIn(inputLayerTypes), + ::testing::ValuesIn(inputPrecisions), + ::testing::ValuesIn(CPUParamsCommon4D)), + Slice8LayerCPUTest::getTestCaseName); const std::vector testCasesBlocked4DSubset1 = { - Slice8SpecificParams{ { 0, 0, 0, 0 }, { 1, 32, 32, 32 }, { 1, 1, 1, 1 }, { } }, - Slice8SpecificParams{ { 0, 0, 16, 0 }, { 1, 32, 32, 32 }, { 1, 1, 1, 1 }, { 0, 3, 2, 1 } }, - Slice8SpecificParams{ { 0, 0, 0 }, { 32, 32, 16 }, { 1, 1, 1 }, { } }, - Slice8SpecificParams{ { 0, 0, 0 }, { 16, 32, 32 }, { 1, 1, 1 }, { 1, 3, 2 } }, + Slice8SpecificParams{{0, 0, 0, 0}, {1, 32, 32, 32}, {1, 1, 1, 1}, {}}, + Slice8SpecificParams{{0, 0, 16, 0}, {1, 32, 32, 32}, {1, 1, 1, 1}, {0, 3, 2, 1}}, + Slice8SpecificParams{{0, 0, 0}, {32, 32, 16}, {1, 1, 1}, {}}, + Slice8SpecificParams{{0, 0, 0}, {16, 32, 32}, {1, 1, 1}, {1, 3, 2}}, }; const std::vector testCasesBlocked4DSubset2 = { - Slice8SpecificParams{ { 0, 0, 5, 4 }, { 1, 16, 28, 27 }, { 1, 1, 1, 1 }, { 0, 1, 2, 3 } }, - Slice8SpecificParams{ { 0, 16, 0, 0 }, { 1, 32, 10, 10 }, { 1, 1, 1, 1 }, { } }, - Slice8SpecificParams{ { 0, 0, 10, 0 }, { 16, 1, 20, 10 }, { 1, 1, 1, 1 }, { 1, 0, 2, 3 } }, - Slice8SpecificParams{ { 0, 0, 20, 20 }, { 1, 32, 25, 25 }, { 1, 1, 1, 1 }, { 0, 1, 3, 2 } }, - Slice8SpecificParams{ { 0, 16, 0, 20 }, { 32, 32, 1, 30 }, { 1, 1, 1, 2 }, { 2, 1, 0, 3 } }, - Slice8SpecificParams{ { 0, 16, 2, 10 }, { 1, 32, 32, 20 }, { 1, 1, 2, 1 }, { 0, 1, 2, 3 } }, - Slice8SpecificParams{ { 0, 16, 0, 0 }, { 2, 64, 32, 20 }, { 1, 1, 1, 1 }, { 0, 1, 2, 3 } }, - Slice8SpecificParams{ { 0, 32, 0, 0 }, { 2, 50, 32, 20 }, { 1, 1, 1, 1 }, { } }, - Slice8SpecificParams{ { 0, 0, 0, 0 }, { 32, 12, 2, 20 }, { 1, 1, 1, 1 }, { 0, 3, 2, 1 } }, - Slice8SpecificParams{ { 0, -16, 0, 10 }, { 2, 100, 32, 20 }, { 1, 1, 1, 1 }, { } }, - Slice8SpecificParams{ { 0, -16, 0, 0 }, { 2, -4, 32, 20 }, { 1, 1, 1, 1 }, { } }, - Slice8SpecificParams{ { 0, -32, 0, 0 }, { 2, -12, 32, 20 }, { 1, 1, 1, 1 }, { } } -}; - -const std::vector> inputShapesBlockedStatic4DSubset1 = { - {{ 1, 32, 32, 32 }}, {{ 1, 32, 32, 64 }} -}; - -const std::vector> inputShapesBlockedStatic4DSubset2 = { - {{ 1, 64, 32, 32 }}, {{ 1, 64, 32, 64 }} -}; - -const std::vector> inputShapesBlockedDynamic4DSubset1 = { - { - { // Origin dynamic shape - {-1, 32, -1, -1}, - { // Dynamic shapes instances - { 1, 32, 32, 32 }, { 2, 32, 32, 32 }, { 3, 32, 32, 48 } - } - } - }, - { - { // Origin dynamic shape - {{1, 5}, 32, {32, 64}, {32, 64}}, - { // Dynamic shapes instances - { 2, 32, 32, 32 }, { 1, 32, 48, 32 }, { 5, 32, 32, 48 } - } - } - } -}; - -const std::vector> inputShapesBlockedDynamic4DSubset2 = { - { - { // Origin dynamic shape - {-1, 64, -1, -1}, - { // Dynamic shapes instances - { 1, 64, 64, 32 }, { 2, 64, 32, 32 }, { 3, 64, 32, 48 } - } - } - }, - { - { // Origin dynamic shape - {{1, 5}, 64, {32, 64}, {32, 64}}, - { // Dynamic shapes instances - { 2, 64, 32, 32 }, { 1, 64, 48, 32 }, { 1, 64, 64, 64 } - } - } - } -}; + Slice8SpecificParams{{0, 0, 5, 4}, {1, 16, 28, 27}, {1, 1, 1, 1}, {0, 1, 2, 3}}, + Slice8SpecificParams{{0, 16, 0, 0}, {1, 32, 10, 10}, {1, 1, 1, 1}, {}}, + Slice8SpecificParams{{0, 0, 10, 0}, {16, 1, 20, 10}, {1, 1, 1, 1}, {1, 0, 2, 3}}, + Slice8SpecificParams{{0, 0, 20, 20}, {1, 32, 25, 25}, {1, 1, 1, 1}, {0, 1, 3, 2}}, + Slice8SpecificParams{{0, 16, 0, 20}, {32, 32, 1, 30}, {1, 1, 1, 2}, {2, 1, 0, 3}}, + Slice8SpecificParams{{0, 16, 2, 10}, {1, 32, 32, 20}, {1, 1, 2, 1}, {0, 1, 2, 3}}, + Slice8SpecificParams{{0, 16, 0, 0}, {2, 64, 32, 20}, {1, 1, 1, 1}, {0, 1, 2, 3}}, + Slice8SpecificParams{{0, 32, 0, 0}, {2, 50, 32, 20}, {1, 1, 1, 1}, {}}, + Slice8SpecificParams{{0, 0, 0, 0}, {32, 12, 2, 20}, {1, 1, 1, 1}, {0, 3, 2, 1}}, + Slice8SpecificParams{{0, -16, 0, 10}, {2, 100, 32, 20}, {1, 1, 1, 1}, {}}, + Slice8SpecificParams{{0, -16, 0, 0}, {2, -4, 32, 20}, {1, 1, 1, 1}, {}}, + Slice8SpecificParams{{0, -32, 0, 0}, {2, -12, 32, 20}, {1, 1, 1, 1}, {}}}; + +const std::vector> inputShapesBlockedStatic4DSubset1 = {{{1, 32, 32, 32}}, {{1, 32, 32, 64}}}; + +const std::vector> inputShapesBlockedStatic4DSubset2 = {{{1, 64, 32, 32}}, {{1, 64, 32, 64}}}; + +const std::vector> inputShapesBlockedDynamic4DSubset1 = {{{// Origin dynamic shape + {-1, 32, -1, -1}, + {// Dynamic shapes instances + {1, 32, 32, 32}, + {2, 32, 32, 32}, + {3, 32, 32, 48}}}}, + {{// Origin dynamic shape + {{1, 5}, 32, {32, 64}, {32, 64}}, + {// Dynamic shapes instances + {2, 32, 32, 32}, + {1, 32, 48, 32}, + {5, 32, 32, 48}}}}}; + +const std::vector> inputShapesBlockedDynamic4DSubset2 = {{{// Origin dynamic shape + {-1, 64, -1, -1}, + {// Dynamic shapes instances + {1, 64, 64, 32}, + {2, 64, 32, 32}, + {3, 64, 32, 48}}}}, + {{// Origin dynamic shape + {{1, 5}, 64, {32, 64}, {32, 64}}, + {// Dynamic shapes instances + {2, 64, 32, 32}, + {1, 64, 48, 32}, + {1, 64, 64, 64}}}}}; const std::vector CPUParamsBlocked4D = { - cpuParams_nChw16c, - cpuParams_nChw8c, + cpuParams_nChw16c, + cpuParams_nChw8c, }; -INSTANTIATE_TEST_SUITE_P(smoke_CompareWithRefs_Common_Static_4D_Subset1, Slice8LayerCPUTest, - ::testing::Combine( - ::testing::ValuesIn(static_shapes_to_test_representation(inputShapesBlockedStatic4DSubset1)), - ::testing::ValuesIn(testCasesBlocked4DSubset1), - ::testing::Values(ngraph::helpers::InputLayerType::CONSTANT), - ::testing::ValuesIn(inputPrecisions), - ::testing::ValuesIn(CPUParamsBlocked4D)), +INSTANTIATE_TEST_SUITE_P( + smoke_CompareWithRefs_Common_Static_4D_Subset1, + Slice8LayerCPUTest, + ::testing::Combine(::testing::ValuesIn(static_shapes_to_test_representation(inputShapesBlockedStatic4DSubset1)), + ::testing::ValuesIn(testCasesBlocked4DSubset1), + ::testing::Values(ov::test::utils::InputLayerType::CONSTANT), + ::testing::ValuesIn(inputPrecisions), + ::testing::ValuesIn(CPUParamsBlocked4D)), + Slice8LayerCPUTest::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(smoke_CompareWithRefs_Common_Dynamic_4D_Subset1, + Slice8LayerCPUTest, + ::testing::Combine(::testing::ValuesIn(inputShapesBlockedDynamic4DSubset1), + ::testing::ValuesIn(testCasesBlocked4DSubset1), + ::testing::Values(ov::test::utils::InputLayerType::CONSTANT), + ::testing::ValuesIn(inputPrecisions), + ::testing::ValuesIn(CPUParamsBlocked4D)), Slice8LayerCPUTest::getTestCaseName); -INSTANTIATE_TEST_SUITE_P(smoke_CompareWithRefs_Common_Dynamic_4D_Subset1, Slice8LayerCPUTest, - ::testing::Combine( - ::testing::ValuesIn(inputShapesBlockedDynamic4DSubset1), - ::testing::ValuesIn(testCasesBlocked4DSubset1), - ::testing::Values(ngraph::helpers::InputLayerType::CONSTANT), - ::testing::ValuesIn(inputPrecisions), - ::testing::ValuesIn(CPUParamsBlocked4D)), +INSTANTIATE_TEST_SUITE_P( + smoke_CompareWithRefs_Common_Static_4D_Subset2, + Slice8LayerCPUTest, + ::testing::Combine(::testing::ValuesIn(static_shapes_to_test_representation(inputShapesBlockedStatic4DSubset2)), + ::testing::ValuesIn(testCasesBlocked4DSubset2), + ::testing::Values(ov::test::utils::InputLayerType::CONSTANT), + ::testing::ValuesIn(inputPrecisions), + ::testing::ValuesIn(CPUParamsBlocked4D)), + Slice8LayerCPUTest::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(smoke_CompareWithRefs_Common_Dynamic_4D_Subset2, + Slice8LayerCPUTest, + ::testing::Combine(::testing::ValuesIn(inputShapesBlockedDynamic4DSubset2), + ::testing::ValuesIn(testCasesBlocked4DSubset2), + ::testing::Values(ov::test::utils::InputLayerType::CONSTANT), + ::testing::ValuesIn(inputPrecisions), + ::testing::ValuesIn(CPUParamsBlocked4D)), Slice8LayerCPUTest::getTestCaseName); -INSTANTIATE_TEST_SUITE_P(smoke_CompareWithRefs_Common_Static_4D_Subset2, Slice8LayerCPUTest, - ::testing::Combine( - ::testing::ValuesIn(static_shapes_to_test_representation(inputShapesBlockedStatic4DSubset2)), - ::testing::ValuesIn(testCasesBlocked4DSubset2), - ::testing::Values(ngraph::helpers::InputLayerType::CONSTANT), - ::testing::ValuesIn(inputPrecisions), - ::testing::ValuesIn(CPUParamsBlocked4D)), - Slice8LayerCPUTest::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(smoke_CompareWithRefs_Common_Dynamic_4D_Subset2, Slice8LayerCPUTest, - ::testing::Combine( - ::testing::ValuesIn(inputShapesBlockedDynamic4DSubset2), - ::testing::ValuesIn(testCasesBlocked4DSubset2), - ::testing::Values(ngraph::helpers::InputLayerType::CONSTANT), - ::testing::ValuesIn(inputPrecisions), - ::testing::ValuesIn(CPUParamsBlocked4D)), - Slice8LayerCPUTest::getTestCaseName); - - const std::vector testCasesCommon5D = { - Slice8SpecificParams{ { 0, 2, 0, 5, 4 }, { 1, 4, 5, 28, 27 }, { 1, 1, 1, 1, 1 }, { 0, 1, 2, 3, 4 } }, - Slice8SpecificParams{ { 0, 0, 10, 0, 0 }, { 1, 5, 20, 32, 20 }, { 1, 1, 1, 1, 1 }, { } }, - Slice8SpecificParams{ { 0, 1, 10, 0, 0 }, { 20, 3, 20, 32, 1 }, { 1, 1, 1, 1, 1 }, { 4, 1, 2, 3, 0 } }, - Slice8SpecificParams{ { 0, 20, 0, 0, 20 }, { 1, 30, 20, 5, 26 }, { 1, 1, 1, 2, 2 }, { 0, 3, 2, 1, 4 } }, - Slice8SpecificParams{ { 0, 0, 10, 0, 20 }, { 1, 2, 20, 30, 30 }, { 1, 1, 2, 1, 1 }, { } }, - Slice8SpecificParams{ { 0, 0, 2, 10, 0 }, { 1, 5, 10, 32, 20 }, { 1, 1, 1, 1, 1 }, { 0, 1, 2, 3, 4 } }, - Slice8SpecificParams{ { 0, 1, 0, 10, 0 }, { 1, 5, 20, 32, 32 }, { 1, 1, 1, 1, 1 }, { } }, - Slice8SpecificParams{ { 0, 0, 0, 0, 0 }, { 1, 5, 10, 16, 16 }, { 1, 1, 2, 1, 1 }, { 0, 1, 2, 3, 4 } } -}; - -const std::vector> inputShapesStatic5D = { - {{ 1, 5, 20, 32, 32 }}, {{ 2, 5, 32, 32, 32 }} -}; - -const std::vector> inputShapesDynamic5D = { - { - { // Origin dynamic shape - {-1, -1, -1, -1, -1}, - { // Dynamic shapes instances - { 1, 5, 32, 32, 32 }, { 1, 5, 32, 32, 48 }, { 1, 5, 64, 64, 64 }, { 1, 10, 32, 32, 32 } - } - } - }, - { - { // Origin dynamic shape - {-1, 5, -1, -1, -1}, - { // Dynamic shapes instances - { 1, 5, 32, 32, 48 }, { 1, 5, 32, 48, 32 }, { 1, 5, 48, 32, 32 } - } - } - }, - { - { // Origin dynamic shape - {{1, 5}, 5, {32, 64}, {32, 64}, {32, 64}}, - { // Dynamic shapes instances - { 2, 5, 32, 32, 32 }, { 1, 5, 48, 32, 32 }, { 5, 5, 32, 32, 48 } - } - } - } -}; + Slice8SpecificParams{{0, 2, 0, 5, 4}, {1, 4, 5, 28, 27}, {1, 1, 1, 1, 1}, {0, 1, 2, 3, 4}}, + Slice8SpecificParams{{0, 0, 10, 0, 0}, {1, 5, 20, 32, 20}, {1, 1, 1, 1, 1}, {}}, + Slice8SpecificParams{{0, 1, 10, 0, 0}, {20, 3, 20, 32, 1}, {1, 1, 1, 1, 1}, {4, 1, 2, 3, 0}}, + Slice8SpecificParams{{0, 20, 0, 0, 20}, {1, 30, 20, 5, 26}, {1, 1, 1, 2, 2}, {0, 3, 2, 1, 4}}, + Slice8SpecificParams{{0, 0, 10, 0, 20}, {1, 2, 20, 30, 30}, {1, 1, 2, 1, 1}, {}}, + Slice8SpecificParams{{0, 0, 2, 10, 0}, {1, 5, 10, 32, 20}, {1, 1, 1, 1, 1}, {0, 1, 2, 3, 4}}, + Slice8SpecificParams{{0, 1, 0, 10, 0}, {1, 5, 20, 32, 32}, {1, 1, 1, 1, 1}, {}}, + Slice8SpecificParams{{0, 0, 0, 0, 0}, {1, 5, 10, 16, 16}, {1, 1, 2, 1, 1}, {0, 1, 2, 3, 4}}}; + +const std::vector> inputShapesStatic5D = {{{1, 5, 20, 32, 32}}, {{2, 5, 32, 32, 32}}}; + +const std::vector> inputShapesDynamic5D = {{{// Origin dynamic shape + {-1, -1, -1, -1, -1}, + {// Dynamic shapes instances + {1, 5, 32, 32, 32}, + {1, 5, 32, 32, 48}, + {1, 5, 64, 64, 64}, + {1, 10, 32, 32, 32}}}}, + {{// Origin dynamic shape + {-1, 5, -1, -1, -1}, + {// Dynamic shapes instances + {1, 5, 32, 32, 48}, + {1, 5, 32, 48, 32}, + {1, 5, 48, 32, 32}}}}, + {{// Origin dynamic shape + {{1, 5}, 5, {32, 64}, {32, 64}, {32, 64}}, + {// Dynamic shapes instances + {2, 5, 32, 32, 32}, + {1, 5, 48, 32, 32}, + {5, 5, 32, 32, 48}}}}}; const std::vector CPUParamsCommon5D = { - cpuParams_ncdhw, - cpuParams_ndhwc, + cpuParams_ncdhw, + cpuParams_ndhwc, }; -INSTANTIATE_TEST_SUITE_P(smoke_CompareWithRefs_Common_Static_5D, Slice8LayerCPUTest, - ::testing::Combine( - ::testing::ValuesIn(static_shapes_to_test_representation(inputShapesStatic5D)), - ::testing::ValuesIn(testCasesCommon5D), - ::testing::Values(ngraph::helpers::InputLayerType::CONSTANT), - ::testing::ValuesIn(inputPrecisions), - ::testing::ValuesIn(CPUParamsCommon5D)), - Slice8LayerCPUTest::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(smoke_CompareWithRefs_Common_Dynamic_5D, Slice8LayerCPUTest, - ::testing::Combine( - ::testing::ValuesIn(inputShapesDynamic5D), - ::testing::ValuesIn(testCasesCommon5D), - ::testing::ValuesIn(inputLayerTypes), - ::testing::ValuesIn(inputPrecisions), - ::testing::ValuesIn(CPUParamsCommon5D)), +INSTANTIATE_TEST_SUITE_P( + smoke_CompareWithRefs_Common_Static_5D, + Slice8LayerCPUTest, + ::testing::Combine(::testing::ValuesIn(static_shapes_to_test_representation(inputShapesStatic5D)), + ::testing::ValuesIn(testCasesCommon5D), + ::testing::Values(ov::test::utils::InputLayerType::CONSTANT), + ::testing::ValuesIn(inputPrecisions), + ::testing::ValuesIn(CPUParamsCommon5D)), + Slice8LayerCPUTest::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(smoke_CompareWithRefs_Common_Dynamic_5D, + Slice8LayerCPUTest, + ::testing::Combine(::testing::ValuesIn(inputShapesDynamic5D), + ::testing::ValuesIn(testCasesCommon5D), + ::testing::ValuesIn(inputLayerTypes), + ::testing::ValuesIn(inputPrecisions), + ::testing::ValuesIn(CPUParamsCommon5D)), Slice8LayerCPUTest::getTestCaseName); const std::vector testCasesBlocked5DSubset1 = { - Slice8SpecificParams{ { 0, 0, 0, 5, 4 }, { 1, 16, 5, 28, 27 }, { 1, 1, 1, 1, 1 }, { 0, 1, 2, 3, 4 } }, - Slice8SpecificParams{ { 0, 0, 10, 0, 0 }, { 1, 16, 20, 32, 20 }, { 1, 1, 1, 1, 1 }, { } }, - Slice8SpecificParams{ { 0, 0, 0, 20, 20 }, { 16, 1, 20, 26, 30 }, { 1, 1, 1, 2, 2 }, { 1, 0, 2, 4, 3 } }, - Slice8SpecificParams{ { 0, 0, 10, 0, 20 }, { 1, 16, 20, 30, 30 }, { 1, 1, 2, 1, 1 }, { } }, - Slice8SpecificParams{ { 0, 0, 2, 10, 0 }, { 1, 16, 10, 32, 20 }, { 1, 1, 1, 1, 1 }, { } }, - Slice8SpecificParams{ { 0, 0, 0, 10, 0 }, { 1, 8, 20, 32, 32 }, { 1, 1, 1, 1, 1 }, { } }, - Slice8SpecificParams{ { 0, 0, 0, 0, 0 }, { 1, 16, 10, 16, 16 }, { 1, 1, 2, 1, 1 }, { 0, 1, 2, 3, 4 } }, + Slice8SpecificParams{{0, 0, 0, 5, 4}, {1, 16, 5, 28, 27}, {1, 1, 1, 1, 1}, {0, 1, 2, 3, 4}}, + Slice8SpecificParams{{0, 0, 10, 0, 0}, {1, 16, 20, 32, 20}, {1, 1, 1, 1, 1}, {}}, + Slice8SpecificParams{{0, 0, 0, 20, 20}, {16, 1, 20, 26, 30}, {1, 1, 1, 2, 2}, {1, 0, 2, 4, 3}}, + Slice8SpecificParams{{0, 0, 10, 0, 20}, {1, 16, 20, 30, 30}, {1, 1, 2, 1, 1}, {}}, + Slice8SpecificParams{{0, 0, 2, 10, 0}, {1, 16, 10, 32, 20}, {1, 1, 1, 1, 1}, {}}, + Slice8SpecificParams{{0, 0, 0, 10, 0}, {1, 8, 20, 32, 32}, {1, 1, 1, 1, 1}, {}}, + Slice8SpecificParams{{0, 0, 0, 0, 0}, {1, 16, 10, 16, 16}, {1, 1, 2, 1, 1}, {0, 1, 2, 3, 4}}, }; const std::vector testCasesBlocked5DSubset2 = { - Slice8SpecificParams{ { 0, 0, 0, 5, 4 }, { 1, 16, 5, 28, 27 }, { 1, 1, 1, 1, 1 }, { 0, 1, 2, 3, 4 } }, - Slice8SpecificParams{ { 0, 0, 5, 4 }, { 16, 5, 28, 27 }, { 1, 1, 1, 1 }, { 1, 2, 3, 4 } }, - Slice8SpecificParams{ { 0, 0, 10, 0, 0 }, { 1, 16, 20, 32, 20 }, { 1, 1, 1, 1, 1 }, { 0, 1, 2, 3, 4 } }, - Slice8SpecificParams{ { 0, 0, 0, 20, 20 }, { 1, 20, 16, 30, 26 }, { 1, 1, 1, 2, 2 }, { 0, 2, 1, 3, 4 } }, - Slice8SpecificParams{ { 0, 0, 10, 0, 20 }, { 1, 16, 20, 30, 30 }, { 1, 1, 2, 1, 1 }, { 0, 1, 2, 3, 4 } }, - Slice8SpecificParams{ { 0, 0, 2, 10, 0 }, { 1, 16, 10, 32, 20 }, { 1, 1, 1, 1, 1 }, { 0, 1, 2, 3, 4 } }, - Slice8SpecificParams{ { 0, 0, 0, 10, 0 }, { 1, 8, 20, 32, 32 }, { 1, 1, 1, 1, 1 }, { 0, 1, 2, 3, 4 } }, - Slice8SpecificParams{ { 0, 0, 0, 0, 0 }, { 10, 16, 1, 16, 16 }, { 2, 1, 1, 1, 1 }, { 2, 1, 0, 3, 4 } }, - Slice8SpecificParams{ { 0, 0, 0, 0, 0 }, { 1, 25, 20, 10, 10 }, { 1, 1, 1, 1, 1 }, { } }, - Slice8SpecificParams{ { 0, 16, 0, 0, 0 }, { 1, 25, 20, 10, 10 }, { 1, 1, 1, 1, 1 }, { 0, 1, 2, 3, 4 } }, - Slice8SpecificParams{ { 0, 16, 0, 0, 0 }, { 1, 64, 20, 10, 10 }, { 1, 1, 1, 1, 1 }, { 0, 1, 2, 3, 4 } }, + Slice8SpecificParams{{0, 0, 0, 5, 4}, {1, 16, 5, 28, 27}, {1, 1, 1, 1, 1}, {0, 1, 2, 3, 4}}, + Slice8SpecificParams{{0, 0, 5, 4}, {16, 5, 28, 27}, {1, 1, 1, 1}, {1, 2, 3, 4}}, + Slice8SpecificParams{{0, 0, 10, 0, 0}, {1, 16, 20, 32, 20}, {1, 1, 1, 1, 1}, {0, 1, 2, 3, 4}}, + Slice8SpecificParams{{0, 0, 0, 20, 20}, {1, 20, 16, 30, 26}, {1, 1, 1, 2, 2}, {0, 2, 1, 3, 4}}, + Slice8SpecificParams{{0, 0, 10, 0, 20}, {1, 16, 20, 30, 30}, {1, 1, 2, 1, 1}, {0, 1, 2, 3, 4}}, + Slice8SpecificParams{{0, 0, 2, 10, 0}, {1, 16, 10, 32, 20}, {1, 1, 1, 1, 1}, {0, 1, 2, 3, 4}}, + Slice8SpecificParams{{0, 0, 0, 10, 0}, {1, 8, 20, 32, 32}, {1, 1, 1, 1, 1}, {0, 1, 2, 3, 4}}, + Slice8SpecificParams{{0, 0, 0, 0, 0}, {10, 16, 1, 16, 16}, {2, 1, 1, 1, 1}, {2, 1, 0, 3, 4}}, + Slice8SpecificParams{{0, 0, 0, 0, 0}, {1, 25, 20, 10, 10}, {1, 1, 1, 1, 1}, {}}, + Slice8SpecificParams{{0, 16, 0, 0, 0}, {1, 25, 20, 10, 10}, {1, 1, 1, 1, 1}, {0, 1, 2, 3, 4}}, + Slice8SpecificParams{{0, 16, 0, 0, 0}, {1, 64, 20, 10, 10}, {1, 1, 1, 1, 1}, {0, 1, 2, 3, 4}}, }; -const std::vector> inputShapesBlockedStatic5DSubset1 = { - {{ 1, 16, 32, 32, 32 }}, {{ 2, 16, 32, 32, 32 }}, {{ 2, 32, 32, 32, 32 }} -}; +const std::vector> inputShapesBlockedStatic5DSubset1 = {{{1, 16, 32, 32, 32}}, + {{2, 16, 32, 32, 32}}, + {{2, 32, 32, 32, 32}}}; -const std::vector> inputShapesBlockedStatic5DSubset2 = { - {{ 1, 64, 32, 32, 32 }}, {{ 2, 64, 32, 64, 32 }}, {{ 2, 64, 32, 32, 32 }} -}; +const std::vector> inputShapesBlockedStatic5DSubset2 = {{{1, 64, 32, 32, 32}}, + {{2, 64, 32, 64, 32}}, + {{2, 64, 32, 32, 32}}}; const std::vector> inputShapesBlockedDynamic5DSubset1 = { - { - { // Origin dynamic shape - {-1, 16, -1, -1, -1}, - { // Dynamic shapes instances - { 1, 16, 32, 32, 32 }, { 2, 16, 32, 32, 32 }, { 2, 16, 32, 32, 32 } - } - } - }, - { - { // Origin dynamic shape - {{1, 5}, 16, {16, 32}, {16, 32}, {16, 32}}, - { // Dynamic shapes instances - { 1, 16, 32, 32, 32 }, { 2, 16, 32, 32, 32 }, { 1, 16, 20, 32, 32 } - } - } - } -}; + {{// Origin dynamic shape + {-1, 16, -1, -1, -1}, + {// Dynamic shapes instances + {1, 16, 32, 32, 32}, + {2, 16, 32, 32, 32}, + {2, 16, 32, 32, 32}}}}, + {{// Origin dynamic shape + {{1, 5}, 16, {16, 32}, {16, 32}, {16, 32}}, + {// Dynamic shapes instances + {1, 16, 32, 32, 32}, + {2, 16, 32, 32, 32}, + {1, 16, 20, 32, 32}}}}}; const std::vector> inputShapesBlockedDynamic5DSubset2 = { { - { // Origin dynamic shape - {-1, 64, -1, -1, -1}, - { // Dynamic shapes instances - { 1, 64, 64, 32, 32 }, { 2, 64, 32, 32, 32 }, { 3, 64, 32, 48, 32 } - } - }, + {// Origin dynamic shape + {-1, 64, -1, -1, -1}, + {// Dynamic shapes instances + {1, 64, 64, 32, 32}, + {2, 64, 32, 32, 32}, + {3, 64, 32, 48, 32}}}, }, - { - { // Origin dynamic shape - {{1, 5}, 64, {16, 32}, {16, 32}, {16, 32}}, - { // Dynamic shapes instances - { 1, 64, 32, 32, 32 }, { 2, 64, 32, 32, 32 }, { 1, 64, 20, 32, 32 } - } - } - } -}; + {{// Origin dynamic shape + {{1, 5}, 64, {16, 32}, {16, 32}, {16, 32}}, + {// Dynamic shapes instances + {1, 64, 32, 32, 32}, + {2, 64, 32, 32, 32}, + {1, 64, 20, 32, 32}}}}}; const std::vector CPUParamsBlocked5D = { - cpuParams_nCdhw16c, - cpuParams_nCdhw8c, + cpuParams_nCdhw16c, + cpuParams_nCdhw8c, }; -INSTANTIATE_TEST_SUITE_P(smoke_CompareWithRefs_Common_Static_5D_Subset1, Slice8LayerCPUTest, - ::testing::Combine( - ::testing::ValuesIn(static_shapes_to_test_representation(inputShapesBlockedStatic5DSubset1)), - ::testing::ValuesIn(testCasesBlocked5DSubset1), - ::testing::Values(ngraph::helpers::InputLayerType::CONSTANT), - ::testing::ValuesIn(inputPrecisions), - ::testing::ValuesIn(CPUParamsBlocked5D)), - Slice8LayerCPUTest::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(smoke_CompareWithRefs_Common_Dynamic_5D_Subset1, Slice8LayerCPUTest, - ::testing::Combine( - ::testing::ValuesIn(inputShapesBlockedDynamic5DSubset1), - ::testing::ValuesIn(testCasesBlocked5DSubset1), - ::testing::Values(ngraph::helpers::InputLayerType::CONSTANT), - ::testing::ValuesIn(inputPrecisions), - ::testing::ValuesIn(CPUParamsBlocked5D)), - Slice8LayerCPUTest::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(smoke_CompareWithRefs_Common_Static_5D_Subset2, Slice8LayerCPUTest, - ::testing::Combine( - ::testing::ValuesIn(static_shapes_to_test_representation(inputShapesBlockedStatic4DSubset2)), - ::testing::ValuesIn(testCasesBlocked4DSubset2), - ::testing::Values(ngraph::helpers::InputLayerType::CONSTANT), - ::testing::ValuesIn(inputPrecisions), - ::testing::ValuesIn(CPUParamsBlocked4D)), +INSTANTIATE_TEST_SUITE_P( + smoke_CompareWithRefs_Common_Static_5D_Subset1, + Slice8LayerCPUTest, + ::testing::Combine(::testing::ValuesIn(static_shapes_to_test_representation(inputShapesBlockedStatic5DSubset1)), + ::testing::ValuesIn(testCasesBlocked5DSubset1), + ::testing::Values(ov::test::utils::InputLayerType::CONSTANT), + ::testing::ValuesIn(inputPrecisions), + ::testing::ValuesIn(CPUParamsBlocked5D)), + Slice8LayerCPUTest::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(smoke_CompareWithRefs_Common_Dynamic_5D_Subset1, + Slice8LayerCPUTest, + ::testing::Combine(::testing::ValuesIn(inputShapesBlockedDynamic5DSubset1), + ::testing::ValuesIn(testCasesBlocked5DSubset1), + ::testing::Values(ov::test::utils::InputLayerType::CONSTANT), + ::testing::ValuesIn(inputPrecisions), + ::testing::ValuesIn(CPUParamsBlocked5D)), Slice8LayerCPUTest::getTestCaseName); -INSTANTIATE_TEST_SUITE_P(smoke_CompareWithRefs_Common_Dynamic_5D_Subset2, Slice8LayerCPUTest, - ::testing::Combine( - ::testing::ValuesIn(inputShapesBlockedDynamic5DSubset2), - ::testing::ValuesIn(testCasesBlocked5DSubset2), - ::testing::Values(ngraph::helpers::InputLayerType::CONSTANT), - ::testing::ValuesIn(inputPrecisions), - ::testing::ValuesIn(CPUParamsBlocked5D)), +INSTANTIATE_TEST_SUITE_P( + smoke_CompareWithRefs_Common_Static_5D_Subset2, + Slice8LayerCPUTest, + ::testing::Combine(::testing::ValuesIn(static_shapes_to_test_representation(inputShapesBlockedStatic4DSubset2)), + ::testing::ValuesIn(testCasesBlocked4DSubset2), + ::testing::Values(ov::test::utils::InputLayerType::CONSTANT), + ::testing::ValuesIn(inputPrecisions), + ::testing::ValuesIn(CPUParamsBlocked4D)), + Slice8LayerCPUTest::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(smoke_CompareWithRefs_Common_Dynamic_5D_Subset2, + Slice8LayerCPUTest, + ::testing::Combine(::testing::ValuesIn(inputShapesBlockedDynamic5DSubset2), + ::testing::ValuesIn(testCasesBlocked5DSubset2), + ::testing::Values(ov::test::utils::InputLayerType::CONSTANT), + ::testing::ValuesIn(inputPrecisions), + ::testing::ValuesIn(CPUParamsBlocked5D)), Slice8LayerCPUTest::getTestCaseName); /* Descriptors check */ @@ -597,44 +552,29 @@ TEST_P(Slice8LayerDescriptorCPUTest, DescriptorsCheck) { } const std::vector testCasesDescriptors = { - Slice8SpecificParams{ { 0, -4, 0, 0 }, { 0, 2147483647, 0, 0 }, { 1, 1, 1, 1 }, { 0, 1, 2, 3 } }, - Slice8SpecificParams{ { 0, 5, 0, 0 }, { 1, 20, 28, 27 }, { 1, 1, 1, 1 }, { 0, 1, 2, 3 } }, - Slice8SpecificParams{ { 0, 0, 0, 0 }, { 1, 2147483647, 32, 32 }, { 1, 2, 1, 1 }, { 0, 1, 2, 3 } } -}; - -const std::vector> inputShapesDescriptors = { - { - { {}, - { // Static shapes - { 1, 16, 32, 32 } - } - } - }, - { - { {}, - { // Static shapes - { 1, 17, 32, 32 } - } - } - }, - { - { // Origin dynamic shapes - {1, -1, 32, 32}, - { // Dynamic shapes instances - { 1, 16, 32, 32 }, { 1, 32, 32, 32 } - } - } - } -}; - -INSTANTIATE_TEST_SUITE_P(smoke_Slice8LayerDescriptorCPUTest, Slice8LayerDescriptorCPUTest, - ::testing::Combine( - ::testing::ValuesIn(inputShapesDescriptors), - ::testing::ValuesIn(testCasesDescriptors), - ::testing::Values(ngraph::helpers::InputLayerType::CONSTANT), - ::testing::Values(ElementType::f32), - ::testing::Values(cpuParams_nChw8c)), + Slice8SpecificParams{{0, -4, 0, 0}, {0, 2147483647, 0, 0}, {1, 1, 1, 1}, {0, 1, 2, 3}}, + Slice8SpecificParams{{0, 5, 0, 0}, {1, 20, 28, 27}, {1, 1, 1, 1}, {0, 1, 2, 3}}, + Slice8SpecificParams{{0, 0, 0, 0}, {1, 2147483647, 32, 32}, {1, 2, 1, 1}, {0, 1, 2, 3}}}; + +const std::vector> inputShapesDescriptors = {{{{}, + {// Static shapes + {1, 16, 32, 32}}}}, + {{{}, + {// Static shapes + {1, 17, 32, 32}}}}, + {{// Origin dynamic shapes + {1, -1, 32, 32}, + {// Dynamic shapes instances + {1, 16, 32, 32}, + {1, 32, 32, 32}}}}}; + +INSTANTIATE_TEST_SUITE_P(smoke_Slice8LayerDescriptorCPUTest, + Slice8LayerDescriptorCPUTest, + ::testing::Combine(::testing::ValuesIn(inputShapesDescriptors), + ::testing::ValuesIn(testCasesDescriptors), + ::testing::Values(ov::test::utils::InputLayerType::CONSTANT), + ::testing::Values(ElementType::f32), + ::testing::Values(cpuParams_nChw8c)), Slice8LayerDescriptorCPUTest::getTestCaseName); -} // namespace -} // namespace CPULayerTestsDefinitions +} // namespace \ No newline at end of file diff --git a/src/plugins/intel_cpu/tests/functional/single_layer_tests/space_to_batch.cpp b/src/plugins/intel_cpu/tests/functional/single_layer_tests/space_to_batch.cpp index 616474d3a4c299..710302cba87a54 100644 --- a/src/plugins/intel_cpu/tests/functional/single_layer_tests/space_to_batch.cpp +++ b/src/plugins/intel_cpu/tests/functional/single_layer_tests/space_to_batch.cpp @@ -2,42 +2,41 @@ // SPDX-License-Identifier: Apache-2.0 // -#include -#include "shared_test_classes/base/ov_subgraph.hpp" +#include "common_test_utils/ov_tensor_utils.hpp" #include "ov_models/builders.hpp" +#include "shared_test_classes/base/ov_subgraph.hpp" #include "test_utils/cpu_test_utils.hpp" -using namespace InferenceEngine; using namespace CPUTestUtils; -using namespace ov::test; -namespace CPULayerTestsDefinitions { +namespace ov { +namespace test { namespace { - std::vector blockShape, padsBegin, padsEnd; - ov::Shape paramShape; +std::vector blockShape, padsBegin, padsEnd; +ov::Shape paramShape; } // namespace -using SpaceToBatchLayerTestCPUParams = std::tuple< - std::vector, // Input shapes - std::vector, // block shape - std::vector, // pads begin - std::vector, // pads end - Precision , // Network precision - CPUSpecificParams>; +using SpaceToBatchLayerTestCPUParams = std::tuple, // Input shapes + std::vector, // block shape + std::vector, // pads begin + std::vector, // pads end + ov::element::Type, // Network type + CPUSpecificParams>; class SpaceToBatchCPULayerTest : public testing::WithParamInterface, - virtual public SubgraphBaseTest, public CPUTestsBase { + virtual public SubgraphBaseTest, + public CPUTestsBase { public: - static std::string getTestCaseName(const testing::TestParamInfo &obj) { + static std::string getTestCaseName(const testing::TestParamInfo& obj) { std::vector inputShapes; - Precision netPrecision; + ov::element::Type netPrecision; CPUSpecificParams cpuParams; std::tie(inputShapes, blockShape, padsBegin, padsEnd, netPrecision, cpuParams) = obj.param; std::ostringstream result; if (inputShapes.front().first.size() != 0) { result << "IS=("; - for (const auto &shape : inputShapes) { + for (const auto& shape : inputShapes) { result << ov::test::utils::partialShape2str({shape.first}) << "_"; } result.seekp(-1, result.cur); @@ -52,7 +51,7 @@ class SpaceToBatchCPULayerTest : public testing::WithParamInterface(); + auto* dataPtr = tensor.data(); for (size_t j = 0; j < blockShape.size(); j++) { dataPtr[j] = blockShape[j]; } } else if (i == 2U) { tensor = ov::Tensor(funcInput.get_element_type(), paramShape); - auto *dataPtr = tensor.data(); + auto* dataPtr = tensor.data(); for (size_t j = 0; j < padsBegin.size(); j++) { dataPtr[j] = padsBegin[j]; } } else if (i == 3U) { tensor = ov::Tensor(funcInput.get_element_type(), paramShape); - auto *dataPtr = tensor.data(); + auto* dataPtr = tensor.data(); for (size_t j = 0; j < padsEnd.size(); j++) { dataPtr[j] = padsEnd[j]; } @@ -92,21 +95,20 @@ class SpaceToBatchCPULayerTest : public testing::WithParamInterface inputShapes; - Precision netPrecision; + ov::element::Type netPrecision; CPUSpecificParams cpuParams; std::tie(inputShapes, blockShape, padsBegin, padsEnd, netPrecision, cpuParams) = this->GetParam(); std::tie(inFmts, outFmts, priority, selectedType) = cpuParams; - auto ngPrec = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision); const std::vector inputShapesVec{inputShapes}; init_input_shapes(inputShapesVec); - if (strcmp(netPrecision.name(), "U8") == 0) - selectedType = std::string("ref_any_") + "I8"; + if (strcmp(netPrecision.get_type_name().c_str(), "u8") == 0) + selectedType = std::string("ref_any_") + "i8"; else - selectedType = std::string("ref_any_") + netPrecision.name(); + selectedType = std::string("ref_any_") + netPrecision.get_type_name(); - ov::ParameterVector params{std::make_shared(ngPrec, inputDynamicShapes.front())}; + ov::ParameterVector params{std::make_shared(netPrecision, inputDynamicShapes.front())}; paramShape = {params[0]->get_partial_shape().size()}; std::shared_ptr in2, in3, in4; @@ -132,17 +134,15 @@ TEST_P(SpaceToBatchCPULayerTest, CompareWithRefs) { namespace { -const std::vector netPrecision = { - Precision::U8, - Precision::I8, - Precision::I32, - Precision::FP32, - Precision::BF16 -}; +const std::vector netPrecision = {ov::element::u8, + ov::element::i8, + ov::element::i32, + ov::element::f32, + ov::element::bf16}; const std::vector> blockShape4D1 = {{1, 2, 1, 2}, {1, 1, 2, 2}, {1, 2, 2, 2}}; const std::vector> padsBegin4D1 = {{0, 0, 0, 1}, {0, 0, 2, 1}, {0, 0, 4, 3}}; -const std::vector> padsEnd4D1 = {{0, 0, 0, 1}, {0, 0, 4, 1}, {0, 0, 2, 3}}; +const std::vector> padsEnd4D1 = {{0, 0, 0, 1}, {0, 0, 4, 1}, {0, 0, 2, 3}}; std::vector> staticInputShapes4D1 = { {{1, 16, 8, 12}, {4}, {4}, {4}}, @@ -150,233 +150,212 @@ std::vector> staticInputShapes4D1 = { }; std::vector> dynamicInputShapes4D1 = { - { - {{-1, -1, -1, -1}, {{1, 6, 4, 8}, {2, 4, 8, 10}, {1, 8, 4, 10}}}, - {{4}, {{4}, {4}, {4}}}, - {{4}, {{4}, {4}, {4}}}, - {{4}, {{4}, {4}, {4}}} - }, - { - {{{1, 4}, {2, 16}, 6, -1}, {{4, 8, 6, 4}, {1, 6, 6, 8}, {2, 12, 6, 4}}}, - {{4}, {{4}, {4}, {4}}}, - {{4}, {{4}, {4}, {4}}}, - {{4}, {{4}, {4}, {4}}} - } -}; + {{{-1, -1, -1, -1}, {{1, 6, 4, 8}, {2, 4, 8, 10}, {1, 8, 4, 10}}}, + {{4}, {{4}, {4}, {4}}}, + {{4}, {{4}, {4}, {4}}}, + {{4}, {{4}, {4}, {4}}}}, + {{{{1, 4}, {2, 16}, 6, -1}, {{4, 8, 6, 4}, {1, 6, 6, 8}, {2, 12, 6, 4}}}, + {{4}, {{4}, {4}, {4}}}, + {{4}, {{4}, {4}, {4}}}, + {{4}, {{4}, {4}, {4}}}}}; std::vector> dynamicInputShapes4D1Blocked = { - { - {{-1, 16, -1, -1}, {{1, 16, 4, 6}, {2, 16, 6, 6}, {4, 16, 4, 8}}}, - {{4}, {{4}, {4}, {4}}}, - {{4}, {{4}, {4}, {4}}}, - {{4}, {{4}, {4}, {4}}} - } -}; + {{{-1, 16, -1, -1}, {{1, 16, 4, 6}, {2, 16, 6, 6}, {4, 16, 4, 8}}}, + {{4}, {{4}, {4}, {4}}}, + {{4}, {{4}, {4}, {4}}}, + {{4}, {{4}, {4}, {4}}}}}; - -const std::vector> blockShape4D2 = { {1, 2, 4, 3}, {1, 4, 4, 1}}; +const std::vector> blockShape4D2 = {{1, 2, 4, 3}, {1, 4, 4, 1}}; const std::vector> padsBegin4D2 = {{0, 0, 0, 0}, {0, 0, 4, 3}}; -const std::vector> padsEnd4D2 = {{0, 0, 4, 0}, {0, 0, 4, 3}}; +const std::vector> padsEnd4D2 = {{0, 0, 4, 0}, {0, 0, 4, 3}}; std::vector> staticInputShapes4D2 = { - {{1, 16, 12, 12}, {4}, {4}, {4}}, - {{1, 32, 12, 15}, {4}, {4}, {4}}, + {{1, 16, 12, 12}, {4}, {4}, {4}}, + {{1, 32, 12, 15}, {4}, {4}, {4}}, }; std::vector> dynamicInputShapes4D2 = { - { - {{-1, -1, -1, -1}, {{1, 4, 8, 9}, {2, 8, 12, 9}, {6, 12, 4, 12}}}, - {{4}, {{4}, {4}, {4}}}, - {{4}, {{4}, {4}, {4}}}, - {{4}, {{4}, {4}, {4}}} - }, - { - {{2, {4, 16}, -1, -1}, {{2, 8, 4, 9}, {2, 4, 8, 6}, {2, 12, 12, 3}}}, - {{4}, {{4}, {4}, {4}}}, - {{4}, {{4}, {4}, {4}}}, - {{4}, {{4}, {4}, {4}}} - } -}; + {{{-1, -1, -1, -1}, {{1, 4, 8, 9}, {2, 8, 12, 9}, {6, 12, 4, 12}}}, + {{4}, {{4}, {4}, {4}}}, + {{4}, {{4}, {4}, {4}}}, + {{4}, {{4}, {4}, {4}}}}, + {{{2, {4, 16}, -1, -1}, {{2, 8, 4, 9}, {2, 4, 8, 6}, {2, 12, 12, 3}}}, + {{4}, {{4}, {4}, {4}}}, + {{4}, {{4}, {4}, {4}}}, + {{4}, {{4}, {4}, {4}}}}}; std::vector> dynamicInputShapes4D2Blocked = { - { - {{-1, 16, -1, -1}, {{2, 16, 4, 15}, {2, 16, 8, 12}, {3, 16, 12, 9}}}, - {{4}, {{4}, {4}, {4}}}, - {{4}, {{4}, {4}, {4}}}, - {{4}, {{4}, {4}, {4}}} - } -}; - -const std::vector cpuParamsWithBlock_4D = { - CPUSpecificParams({nChw16c}, {nChw16c}, {}, {}), - CPUSpecificParams({nChw8c}, {nChw8c}, {}, {}), - CPUSpecificParams({nhwc}, {nhwc}, {}, {}), - CPUSpecificParams({nchw}, {nchw}, {}, {}) -}; - -const std::vector cpuParams_4D = { - CPUSpecificParams({nhwc}, {nhwc}, {}, {}), - CPUSpecificParams({nchw}, {nchw}, {}, {}) -}; - -const auto staticSpaceToBatchParamsSet4D1 = ::testing::Combine( - ::testing::ValuesIn(static_shapes_to_test_representation(staticInputShapes4D1)), - ::testing::ValuesIn(blockShape4D1), - ::testing::ValuesIn(padsBegin4D1), - ::testing::ValuesIn(padsEnd4D1), - ::testing::ValuesIn(netPrecision), - ::testing::ValuesIn(cpuParams_4D)); - -const auto dynamicSpaceToBatchParamsSet4D1 = ::testing::Combine( - ::testing::ValuesIn(dynamicInputShapes4D1), - ::testing::ValuesIn(blockShape4D1), - ::testing::ValuesIn(padsBegin4D1), - ::testing::ValuesIn(padsEnd4D1), - ::testing::ValuesIn(netPrecision), - ::testing::ValuesIn(cpuParams_4D)); - -const auto dynamicSpaceToBatchParamsWithBlockedSet4D1 = ::testing::Combine( - ::testing::ValuesIn(dynamicInputShapes4D1Blocked), - ::testing::ValuesIn(blockShape4D1), - ::testing::ValuesIn(padsBegin4D1), - ::testing::ValuesIn(padsEnd4D1), - ::testing::ValuesIn(netPrecision), - ::testing::ValuesIn(cpuParamsWithBlock_4D)); - -const auto staticSpaceToBatchParamsSet4D2 = ::testing::Combine( - ::testing::ValuesIn(static_shapes_to_test_representation(staticInputShapes4D2)), - ::testing::ValuesIn(blockShape4D2), - ::testing::ValuesIn(padsBegin4D2), - ::testing::ValuesIn(padsEnd4D2), - ::testing::ValuesIn(netPrecision), - ::testing::ValuesIn(cpuParams_4D)); - -const auto dynamicSpaceToBatchParamsSet4D2 = ::testing::Combine( - ::testing::ValuesIn(dynamicInputShapes4D2), - ::testing::ValuesIn(blockShape4D2), - ::testing::ValuesIn(padsBegin4D2), - ::testing::ValuesIn(padsEnd4D2), - ::testing::ValuesIn(netPrecision), - ::testing::ValuesIn(cpuParams_4D)); - -const auto dynamicSpaceToBatchParamsWithBlockedSet4D2 = ::testing::Combine( - ::testing::ValuesIn(dynamicInputShapes4D2Blocked), - ::testing::ValuesIn(blockShape4D2), - ::testing::ValuesIn(padsBegin4D2), - ::testing::ValuesIn(padsEnd4D2), - ::testing::ValuesIn(netPrecision), - ::testing::ValuesIn(cpuParamsWithBlock_4D)); - -INSTANTIATE_TEST_SUITE_P(smoke_StaticSpaceToBatchCPULayerTestCase1_4D, SpaceToBatchCPULayerTest, - staticSpaceToBatchParamsSet4D1, SpaceToBatchCPULayerTest::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(smoke_DynamicSpaceToBatchCPULayerTestCase1_4D, SpaceToBatchCPULayerTest, - dynamicSpaceToBatchParamsSet4D1, SpaceToBatchCPULayerTest::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(smoke_DynamicSpaceToBatchCPULayerTestCaseWithBlocked1_4D, SpaceToBatchCPULayerTest, - dynamicSpaceToBatchParamsWithBlockedSet4D1, SpaceToBatchCPULayerTest::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(smoke_StaticSpaceToBatchCPULayerTestCase2_4D, SpaceToBatchCPULayerTest, - staticSpaceToBatchParamsSet4D2, SpaceToBatchCPULayerTest::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(smoke_DynamicSpaceToBatchCPULayerTestCase2_4D, SpaceToBatchCPULayerTest, - dynamicSpaceToBatchParamsSet4D2, SpaceToBatchCPULayerTest::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(smoke_DynamicSpaceToBatchCPULayerTestCaseWithBlocked2_4D, SpaceToBatchCPULayerTest, - dynamicSpaceToBatchParamsWithBlockedSet4D2, SpaceToBatchCPULayerTest::getTestCaseName); - -std::vector> staticInputShapes4DPE = { - {{1, 2, 9, 1}, {4}, {4}, {4}} -}; -INSTANTIATE_TEST_SUITE_P(smoke_StaticSpaceToBatch_4D_parallel_block_edge, SpaceToBatchCPULayerTest, - ::testing::Combine( - ::testing::ValuesIn(static_shapes_to_test_representation(staticInputShapes4DPE)), - ::testing::Values(std::vector{1, 4, 3, 1}), - ::testing::Values(std::vector{0, 1, 2, 0}), - ::testing::Values(std::vector{0, 1, 4, 0}), - ::testing::Values(Precision::FP32), - ::testing::Values(CPUSpecificParams{})), - SpaceToBatchCPULayerTest::getTestCaseName); + {{{-1, 16, -1, -1}, {{2, 16, 4, 15}, {2, 16, 8, 12}, {3, 16, 12, 9}}}, + {{4}, {{4}, {4}, {4}}}, + {{4}, {{4}, {4}, {4}}}, + {{4}, {{4}, {4}, {4}}}}}; + +const std::vector cpuParamsWithBlock_4D = {CPUSpecificParams({nChw16c}, {nChw16c}, {}, {}), + CPUSpecificParams({nChw8c}, {nChw8c}, {}, {}), + CPUSpecificParams({nhwc}, {nhwc}, {}, {}), + CPUSpecificParams({nchw}, {nchw}, {}, {})}; + +const std::vector cpuParams_4D = {CPUSpecificParams({nhwc}, {nhwc}, {}, {}), + CPUSpecificParams({nchw}, {nchw}, {}, {})}; + +const auto staticSpaceToBatchParamsSet4D1 = + ::testing::Combine(::testing::ValuesIn(static_shapes_to_test_representation(staticInputShapes4D1)), + ::testing::ValuesIn(blockShape4D1), + ::testing::ValuesIn(padsBegin4D1), + ::testing::ValuesIn(padsEnd4D1), + ::testing::ValuesIn(netPrecision), + ::testing::ValuesIn(cpuParams_4D)); + +const auto dynamicSpaceToBatchParamsSet4D1 = ::testing::Combine(::testing::ValuesIn(dynamicInputShapes4D1), + ::testing::ValuesIn(blockShape4D1), + ::testing::ValuesIn(padsBegin4D1), + ::testing::ValuesIn(padsEnd4D1), + ::testing::ValuesIn(netPrecision), + ::testing::ValuesIn(cpuParams_4D)); + +const auto dynamicSpaceToBatchParamsWithBlockedSet4D1 = + ::testing::Combine(::testing::ValuesIn(dynamicInputShapes4D1Blocked), + ::testing::ValuesIn(blockShape4D1), + ::testing::ValuesIn(padsBegin4D1), + ::testing::ValuesIn(padsEnd4D1), + ::testing::ValuesIn(netPrecision), + ::testing::ValuesIn(cpuParamsWithBlock_4D)); + +const auto staticSpaceToBatchParamsSet4D2 = + ::testing::Combine(::testing::ValuesIn(static_shapes_to_test_representation(staticInputShapes4D2)), + ::testing::ValuesIn(blockShape4D2), + ::testing::ValuesIn(padsBegin4D2), + ::testing::ValuesIn(padsEnd4D2), + ::testing::ValuesIn(netPrecision), + ::testing::ValuesIn(cpuParams_4D)); + +const auto dynamicSpaceToBatchParamsSet4D2 = ::testing::Combine(::testing::ValuesIn(dynamicInputShapes4D2), + ::testing::ValuesIn(blockShape4D2), + ::testing::ValuesIn(padsBegin4D2), + ::testing::ValuesIn(padsEnd4D2), + ::testing::ValuesIn(netPrecision), + ::testing::ValuesIn(cpuParams_4D)); + +const auto dynamicSpaceToBatchParamsWithBlockedSet4D2 = + ::testing::Combine(::testing::ValuesIn(dynamicInputShapes4D2Blocked), + ::testing::ValuesIn(blockShape4D2), + ::testing::ValuesIn(padsBegin4D2), + ::testing::ValuesIn(padsEnd4D2), + ::testing::ValuesIn(netPrecision), + ::testing::ValuesIn(cpuParamsWithBlock_4D)); + +INSTANTIATE_TEST_SUITE_P(smoke_StaticSpaceToBatchCPULayerTestCase1_4D, + SpaceToBatchCPULayerTest, + staticSpaceToBatchParamsSet4D1, + SpaceToBatchCPULayerTest::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(smoke_DynamicSpaceToBatchCPULayerTestCase1_4D, + SpaceToBatchCPULayerTest, + dynamicSpaceToBatchParamsSet4D1, + SpaceToBatchCPULayerTest::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(smoke_DynamicSpaceToBatchCPULayerTestCaseWithBlocked1_4D, + SpaceToBatchCPULayerTest, + dynamicSpaceToBatchParamsWithBlockedSet4D1, + SpaceToBatchCPULayerTest::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(smoke_StaticSpaceToBatchCPULayerTestCase2_4D, + SpaceToBatchCPULayerTest, + staticSpaceToBatchParamsSet4D2, + SpaceToBatchCPULayerTest::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(smoke_DynamicSpaceToBatchCPULayerTestCase2_4D, + SpaceToBatchCPULayerTest, + dynamicSpaceToBatchParamsSet4D2, + SpaceToBatchCPULayerTest::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(smoke_DynamicSpaceToBatchCPULayerTestCaseWithBlocked2_4D, + SpaceToBatchCPULayerTest, + dynamicSpaceToBatchParamsWithBlockedSet4D2, + SpaceToBatchCPULayerTest::getTestCaseName); + +std::vector> staticInputShapes4DPE = {{{1, 2, 9, 1}, {4}, {4}, {4}}}; +INSTANTIATE_TEST_SUITE_P( + smoke_StaticSpaceToBatch_4D_parallel_block_edge, + SpaceToBatchCPULayerTest, + ::testing::Combine(::testing::ValuesIn(static_shapes_to_test_representation(staticInputShapes4DPE)), + ::testing::Values(std::vector{1, 4, 3, 1}), + ::testing::Values(std::vector{0, 1, 2, 0}), + ::testing::Values(std::vector{0, 1, 4, 0}), + ::testing::Values(ov::element::f32), + ::testing::Values(CPUSpecificParams{})), + SpaceToBatchCPULayerTest::getTestCaseName); const std::vector> blockShape5D = {{1, 1, 2, 2, 1}, {1, 2, 4, 1, 3}}; const std::vector> padsBegin5D = {{0, 0, 0, 0, 0}, {0, 0, 4, 0, 0}, {0, 0, 0, 2, 3}}; -const std::vector> padsEnd5D = {{0, 0, 0, 0, 0}, {0, 0, 0, 4, 3}, {0, 0, 4, 2, 3}}; +const std::vector> padsEnd5D = {{0, 0, 0, 0, 0}, {0, 0, 0, 4, 3}, {0, 0, 4, 2, 3}}; -std::vector> staticInputShapes5D = { - {{2, 16, 4, 6, 12}, {5}, {5}, {5}}, - {{1, 32, 8, 8, 6}, {5}, {5}, {5}}, - {{1, 16, 4, 12, 12}, {5}, {5}, {5}} -}; +std::vector> staticInputShapes5D = {{{2, 16, 4, 6, 12}, {5}, {5}, {5}}, + {{1, 32, 8, 8, 6}, {5}, {5}, {5}}, + {{1, 16, 4, 12, 12}, {5}, {5}, {5}}}; std::vector> dynamicInputShapes5D = { - { - {{-1, -1, -1, -1, -1}, {{2, 2, 12, 4, 15}, {4, 4, 8, 6, 9}, {3, 6, 4, 2, 12}}}, - {{5}, {{5}, {5}, {5}}}, - {{5}, {{5}, {5}, {5}}}, - {{5}, {{5}, {5}, {5}}} - }, - { - {{{1, 10}, {2, 20}, {4, 50}, -1, -1}, {{3, 12, 8, 6, 9}, {5, 10, 4, 8, 15}, {6, 8, 20, 4, 12}}}, - {{5}, {{5}, {5}, {5}}}, - {{5}, {{5}, {5}, {5}}}, - {{5}, {{5}, {5}, {5}}} - } -}; + {{{-1, -1, -1, -1, -1}, {{2, 2, 12, 4, 15}, {4, 4, 8, 6, 9}, {3, 6, 4, 2, 12}}}, + {{5}, {{5}, {5}, {5}}}, + {{5}, {{5}, {5}, {5}}}, + {{5}, {{5}, {5}, {5}}}}, + {{{{1, 10}, {2, 20}, {4, 50}, -1, -1}, {{3, 12, 8, 6, 9}, {5, 10, 4, 8, 15}, {6, 8, 20, 4, 12}}}, + {{5}, {{5}, {5}, {5}}}, + {{5}, {{5}, {5}, {5}}}, + {{5}, {{5}, {5}, {5}}}}}; std::vector> dynamicInputShapes5DBlocked = { - { - {{-1, 16, -1, -1, -1}, {{2, 16, 4, 6, 9}, {5, 16, 16, 4, 6}, {7, 16, 8, 2, 3}}}, - {{5}, {{5}, {5}, {5}}}, - {{5}, {{5}, {5}, {5}}}, - {{5}, {{5}, {5}, {5}}} - } -}; - -const std::vector cpuParamsWithBlock_5D = { - CPUSpecificParams({nCdhw16c}, {nCdhw16c}, {}, {}), - CPUSpecificParams({nCdhw8c}, {nCdhw8c}, {}, {}), - CPUSpecificParams({ndhwc}, {ndhwc}, {}, {}), - CPUSpecificParams({ncdhw}, {ncdhw}, {}, {}) -}; - -const std::vector cpuParams_5D = { - CPUSpecificParams({ndhwc}, {ndhwc}, {}, {}), - CPUSpecificParams({ncdhw}, {ncdhw}, {}, {}) -}; - -const auto staticSpaceToBatchParamsSet5D = ::testing::Combine( - ::testing::ValuesIn(static_shapes_to_test_representation(staticInputShapes5D)), - ::testing::ValuesIn(blockShape5D), - ::testing::ValuesIn(padsBegin5D), - ::testing::ValuesIn(padsEnd5D), - ::testing::ValuesIn(netPrecision), - ::testing::ValuesIn(cpuParamsWithBlock_5D)); - -const auto dynamicSpaceToBatchParamsSet5D = ::testing::Combine( - ::testing::ValuesIn(dynamicInputShapes5D), - ::testing::ValuesIn(blockShape5D), - ::testing::ValuesIn(padsBegin5D), - ::testing::ValuesIn(padsEnd5D), - ::testing::ValuesIn(netPrecision), - ::testing::ValuesIn(cpuParams_5D)); - -const auto dynamicSpaceToBatchParamsWithBlockedSet5D = ::testing::Combine( - ::testing::ValuesIn(dynamicInputShapes5DBlocked), - ::testing::ValuesIn(blockShape5D), - ::testing::ValuesIn(padsBegin5D), - ::testing::ValuesIn(padsEnd5D), - ::testing::ValuesIn(netPrecision), - ::testing::ValuesIn(cpuParamsWithBlock_5D)); - -INSTANTIATE_TEST_SUITE_P(smoke_StaticSpaceToBatchCPULayerTestCase_5D, SpaceToBatchCPULayerTest, - staticSpaceToBatchParamsSet5D, SpaceToBatchCPULayerTest::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(smoke_DynamicSpaceToBatchCPULayerTestCase_5D, SpaceToBatchCPULayerTest, - dynamicSpaceToBatchParamsSet5D, SpaceToBatchCPULayerTest::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(smoke_DynamicSpaceToBatchCPULayerTestCaseWithBlocked_5D, SpaceToBatchCPULayerTest, - dynamicSpaceToBatchParamsWithBlockedSet5D, SpaceToBatchCPULayerTest::getTestCaseName); - + {{{-1, 16, -1, -1, -1}, {{2, 16, 4, 6, 9}, {5, 16, 16, 4, 6}, {7, 16, 8, 2, 3}}}, + {{5}, {{5}, {5}, {5}}}, + {{5}, {{5}, {5}, {5}}}, + {{5}, {{5}, {5}, {5}}}}}; + +const std::vector cpuParamsWithBlock_5D = {CPUSpecificParams({nCdhw16c}, {nCdhw16c}, {}, {}), + CPUSpecificParams({nCdhw8c}, {nCdhw8c}, {}, {}), + CPUSpecificParams({ndhwc}, {ndhwc}, {}, {}), + CPUSpecificParams({ncdhw}, {ncdhw}, {}, {})}; + +const std::vector cpuParams_5D = {CPUSpecificParams({ndhwc}, {ndhwc}, {}, {}), + CPUSpecificParams({ncdhw}, {ncdhw}, {}, {})}; + +const auto staticSpaceToBatchParamsSet5D = + ::testing::Combine(::testing::ValuesIn(static_shapes_to_test_representation(staticInputShapes5D)), + ::testing::ValuesIn(blockShape5D), + ::testing::ValuesIn(padsBegin5D), + ::testing::ValuesIn(padsEnd5D), + ::testing::ValuesIn(netPrecision), + ::testing::ValuesIn(cpuParamsWithBlock_5D)); + +const auto dynamicSpaceToBatchParamsSet5D = ::testing::Combine(::testing::ValuesIn(dynamicInputShapes5D), + ::testing::ValuesIn(blockShape5D), + ::testing::ValuesIn(padsBegin5D), + ::testing::ValuesIn(padsEnd5D), + ::testing::ValuesIn(netPrecision), + ::testing::ValuesIn(cpuParams_5D)); + +const auto dynamicSpaceToBatchParamsWithBlockedSet5D = + ::testing::Combine(::testing::ValuesIn(dynamicInputShapes5DBlocked), + ::testing::ValuesIn(blockShape5D), + ::testing::ValuesIn(padsBegin5D), + ::testing::ValuesIn(padsEnd5D), + ::testing::ValuesIn(netPrecision), + ::testing::ValuesIn(cpuParamsWithBlock_5D)); + +INSTANTIATE_TEST_SUITE_P(smoke_StaticSpaceToBatchCPULayerTestCase_5D, + SpaceToBatchCPULayerTest, + staticSpaceToBatchParamsSet5D, + SpaceToBatchCPULayerTest::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(smoke_DynamicSpaceToBatchCPULayerTestCase_5D, + SpaceToBatchCPULayerTest, + dynamicSpaceToBatchParamsSet5D, + SpaceToBatchCPULayerTest::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(smoke_DynamicSpaceToBatchCPULayerTestCaseWithBlocked_5D, + SpaceToBatchCPULayerTest, + dynamicSpaceToBatchParamsWithBlockedSet5D, + SpaceToBatchCPULayerTest::getTestCaseName); } // namespace -} // namespace CPULayerTestsDefinitions +} // namespace test +} // namespace ov diff --git a/src/plugins/intel_cpu/tests/functional/single_layer_tests/space_to_depth.cpp b/src/plugins/intel_cpu/tests/functional/single_layer_tests/space_to_depth.cpp index 5b3b4ea1632ab9..a9923fae3d89fa 100644 --- a/src/plugins/intel_cpu/tests/functional/single_layer_tests/space_to_depth.cpp +++ b/src/plugins/intel_cpu/tests/functional/single_layer_tests/space_to_depth.cpp @@ -2,34 +2,27 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "shared_test_classes/single_layer/space_to_depth.hpp" +#include "shared_test_classes/base/ov_subgraph.hpp" #include "test_utils/cpu_test_utils.hpp" #include "test_utils/filter_cpu_info.hpp" -#include "shared_test_classes/base/ov_subgraph.hpp" - -using namespace InferenceEngine; using namespace CPUTestUtils; -using namespace ngraph::opset3; using namespace ov::test; -namespace CPULayerTestsDefinitions { - -using SpaceToDepthLayerCPUTestParamSet = std::tuple< - InputShape, // Input shape - ElementType, // Input element type - SpaceToDepth::SpaceToDepthMode, // Mode - std::size_t, // Block size - CPUSpecificParams ->; +using SpaceToDepthLayerCPUTestParamSet = std::tuple; class SpaceToDepthLayerCPUTest : public testing::WithParamInterface, - virtual public ov::test::SubgraphBaseTest, public CPUTestsBase { + virtual public ov::test::SubgraphBaseTest, + public CPUTestsBase { public: static std::string getTestCaseName(testing::TestParamInfo obj) { InputShape shapes; ElementType inType; - SpaceToDepth::SpaceToDepthMode mode; + ov::op::v0::SpaceToDepth::SpaceToDepthMode mode; std::size_t blockSize; CPUSpecificParams cpuParams; std::tie(shapes, inType, mode, blockSize, cpuParams) = obj.param; @@ -42,25 +35,26 @@ class SpaceToDepthLayerCPUTest : public testing::WithParamInterfaceGetParam(); @@ -69,7 +63,7 @@ class SpaceToDepthLayerCPUTest : public testing::WithParamInterface(inType, shape)); } - auto d2s = std::make_shared (params[0], mode, blockSize); + auto d2s = std::make_shared(params[0], mode, blockSize); function = makeNgraphFunction(inType, params, d2s, "SpaceToDepthCPU"); } }; @@ -89,110 +83,89 @@ TEST_P(SpaceToDepthLayerCPUTest, CompareWithRefs) { namespace { -const auto cpuParams_nChw16c = CPUSpecificParams {{nChw16c}, {nChw16c}, {}, {}}; -const auto cpuParams_nCdhw16c = CPUSpecificParams {{nCdhw16c}, {nCdhw16c}, {}, {}}; +const auto cpuParams_nChw16c = CPUSpecificParams{{nChw16c}, {nChw16c}, {}, {}}; +const auto cpuParams_nCdhw16c = CPUSpecificParams{{nCdhw16c}, {nCdhw16c}, {}, {}}; -const auto cpuParams_nChw8c = CPUSpecificParams {{nChw8c}, {nChw8c}, {}, {}}; -const auto cpuParams_nCdhw8c = CPUSpecificParams {{nCdhw8c}, {nCdhw8c}, {}, {}}; +const auto cpuParams_nChw8c = CPUSpecificParams{{nChw8c}, {nChw8c}, {}, {}}; +const auto cpuParams_nCdhw8c = CPUSpecificParams{{nCdhw8c}, {nCdhw8c}, {}, {}}; -const auto cpuParams_nhwc = CPUSpecificParams {{nhwc}, {nhwc}, {}, {}}; -const auto cpuParams_ndhwc = CPUSpecificParams {{ndhwc}, {ndhwc}, {}, {}}; +const auto cpuParams_nhwc = CPUSpecificParams{{nhwc}, {nhwc}, {}, {}}; +const auto cpuParams_ndhwc = CPUSpecificParams{{ndhwc}, {ndhwc}, {}, {}}; -const auto cpuParams_nchw = CPUSpecificParams {{nchw}, {nchw}, {}, {}}; -const auto cpuParams_ncdhw = CPUSpecificParams {{ncdhw}, {ncdhw}, {}, {}}; +const auto cpuParams_nchw = CPUSpecificParams{{nchw}, {nchw}, {}, {}}; +const auto cpuParams_ncdhw = CPUSpecificParams{{ncdhw}, {ncdhw}, {}, {}}; -const std::vector CPUParams4D = { - cpuParams_nhwc, - cpuParams_nchw -}; +const std::vector CPUParams4D = {cpuParams_nhwc, cpuParams_nchw}; -const std::vector CPUParamsBlocked4D = { - cpuParams_nChw16c, - cpuParams_nChw8c, - cpuParams_nhwc -}; +const std::vector CPUParamsBlocked4D = {cpuParams_nChw16c, cpuParams_nChw8c, cpuParams_nhwc}; -const std::vector CPUParams5D = { - cpuParams_ndhwc, - cpuParams_ncdhw -}; +const std::vector CPUParams5D = {cpuParams_ndhwc, cpuParams_ncdhw}; -const std::vector CPUParamsBlocked5D = { - cpuParams_nCdhw16c, - cpuParams_nCdhw8c, - cpuParams_ndhwc -}; +const std::vector CPUParamsBlocked5D = {cpuParams_nCdhw16c, cpuParams_nCdhw8c, cpuParams_ndhwc}; -const std::vector inputElementType = { - ElementType::f32, - ElementType::bf16, - ElementType::i8 -}; +const std::vector inputElementType = {ElementType::f32, ElementType::bf16, ElementType::i8}; -const std::vector SpaceToDepthModes = { - SpaceToDepth::SpaceToDepthMode::BLOCKS_FIRST, - SpaceToDepth::SpaceToDepthMode::DEPTH_FIRST -}; +const std::vector SpaceToDepthModes = { + ov::op::v0::SpaceToDepth::SpaceToDepthMode::BLOCKS_FIRST, + ov::op::v0::SpaceToDepth::SpaceToDepthMode::DEPTH_FIRST}; /* *========================* Static Shapes Tests *========================* */ namespace static_shapes { -const std::vector inputShapesBS2_4D = {{1, 16, 2, 2}, {1, 16, 4, 2}, - {1, 32, 6, 8}, {2, 32, 4, 6}, - {2, 48, 4, 4}, {2, 64, 8, 2}}; +const std::vector inputShapesBS2_4D = + {{1, 16, 2, 2}, {1, 16, 4, 2}, {1, 32, 6, 8}, {2, 32, 4, 6}, {2, 48, 4, 4}, {2, 64, 8, 2}}; -const std::vector inputShapesBS3_4D = { - {1, 2, 3, 3}, {1, 3, 3, 6}, {1, 5, 6, 3}, {2, 5, 9, 3}, {3, 5, 6, 6}}; +const std::vector inputShapesBS3_4D = {{1, 2, 3, 3}, {1, 3, 3, 6}, {1, 5, 6, 3}, {2, 5, 9, 3}, {3, 5, 6, 6}}; -INSTANTIATE_TEST_SUITE_P(smoke_CPUSpaceToDepthBS2_4D, SpaceToDepthLayerCPUTest, - testing::Combine( - testing::ValuesIn(static_shapes_to_test_representation(inputShapesBS2_4D)), - testing::ValuesIn(inputElementType), - testing::ValuesIn(SpaceToDepthModes), - testing::Values(1, 2), - testing::ValuesIn(filterCPUInfoForDevice(CPUParamsBlocked4D))), +INSTANTIATE_TEST_SUITE_P(smoke_CPUSpaceToDepthBS2_4D, + SpaceToDepthLayerCPUTest, + testing::Combine(testing::ValuesIn(static_shapes_to_test_representation(inputShapesBS2_4D)), + testing::ValuesIn(inputElementType), + testing::ValuesIn(SpaceToDepthModes), + testing::Values(1, 2), + testing::ValuesIn(filterCPUInfoForDevice(CPUParamsBlocked4D))), SpaceToDepthLayerCPUTest::getTestCaseName); -INSTANTIATE_TEST_SUITE_P(smoke_CPUSpaceToDepthStaticBS3_4D, SpaceToDepthLayerCPUTest, - testing::Combine( - testing::ValuesIn(static_shapes_to_test_representation(inputShapesBS3_4D)), - testing::ValuesIn(inputElementType), - testing::ValuesIn(SpaceToDepthModes), - testing::Values(1, 3), - testing::ValuesIn(filterCPUInfoForDevice(CPUParams4D))), +INSTANTIATE_TEST_SUITE_P(smoke_CPUSpaceToDepthStaticBS3_4D, + SpaceToDepthLayerCPUTest, + testing::Combine(testing::ValuesIn(static_shapes_to_test_representation(inputShapesBS3_4D)), + testing::ValuesIn(inputElementType), + testing::ValuesIn(SpaceToDepthModes), + testing::Values(1, 3), + testing::ValuesIn(filterCPUInfoForDevice(CPUParams4D))), SpaceToDepthLayerCPUTest::getTestCaseName); -const std::vector inputShapesBS2_5D = { - {1, 16, 2, 2, 2}, {1, 16, 4, 4, 2}, {1, 32, 2, 6, 2}, {2, 32, 4, 2, 2}, {1, 48, 6, 2, 2}, {2, 64, 2, 2, 6} -}; - -const std::vector inputShapesBS3_5D = { - {1, 2, 3, 3, 3}, {1, 2, 3, 6, 9}, {1, 5, 6, 3, 3}, {2, 5, 3, 9, 3}, {3, 5, 3, 3, 6} -}; - -INSTANTIATE_TEST_SUITE_P(smoke_CPUSpaceToDepthStaticBS2_5D, SpaceToDepthLayerCPUTest, - testing::Combine( - testing::ValuesIn(static_shapes_to_test_representation(inputShapesBS2_5D)), - testing::ValuesIn(inputElementType), - testing::ValuesIn(SpaceToDepthModes), - testing::Values(1, 2), - testing::ValuesIn(filterCPUInfoForDevice(CPUParamsBlocked5D))), +const std::vector inputShapesBS2_5D = + {{1, 16, 2, 2, 2}, {1, 16, 4, 4, 2}, {1, 32, 2, 6, 2}, {2, 32, 4, 2, 2}, {1, 48, 6, 2, 2}, {2, 64, 2, 2, 6}}; + +const std::vector inputShapesBS3_5D = {{1, 2, 3, 3, 3}, + {1, 2, 3, 6, 9}, + {1, 5, 6, 3, 3}, + {2, 5, 3, 9, 3}, + {3, 5, 3, 3, 6}}; + +INSTANTIATE_TEST_SUITE_P(smoke_CPUSpaceToDepthStaticBS2_5D, + SpaceToDepthLayerCPUTest, + testing::Combine(testing::ValuesIn(static_shapes_to_test_representation(inputShapesBS2_5D)), + testing::ValuesIn(inputElementType), + testing::ValuesIn(SpaceToDepthModes), + testing::Values(1, 2), + testing::ValuesIn(filterCPUInfoForDevice(CPUParamsBlocked5D))), SpaceToDepthLayerCPUTest::getTestCaseName); -INSTANTIATE_TEST_SUITE_P(smoke_CPUSpaceToDepthStaticBS3_5D, SpaceToDepthLayerCPUTest, - testing::Combine( - testing::ValuesIn(static_shapes_to_test_representation(inputShapesBS3_5D)), - testing::ValuesIn(inputElementType), - testing::ValuesIn(SpaceToDepthModes), - testing::Values(1, 3), - testing::ValuesIn(filterCPUInfoForDevice(CPUParams5D))), +INSTANTIATE_TEST_SUITE_P(smoke_CPUSpaceToDepthStaticBS3_5D, + SpaceToDepthLayerCPUTest, + testing::Combine(testing::ValuesIn(static_shapes_to_test_representation(inputShapesBS3_5D)), + testing::ValuesIn(inputElementType), + testing::ValuesIn(SpaceToDepthModes), + testing::Values(1, 3), + testing::ValuesIn(filterCPUInfoForDevice(CPUParams5D))), SpaceToDepthLayerCPUTest::getTestCaseName); -} // namespace static_shapes +} // namespace static_shapes /* *========================* *==================* *========================* */ - /* *========================* Dynamic Shapes Tests *========================* */ namespace dynamic_shapes { @@ -223,52 +196,51 @@ const std::vector inputShapesBlocked5D = { {{1, 32, 4, 16, 10}, {1, 32, 18, 6, 14}, {3, 32, 2, 14, 12}, {1, 32, 18, 6, 14}}}, // target }; -INSTANTIATE_TEST_SUITE_P(smoke_CPUSpaceToDepthDynamic4D, SpaceToDepthLayerCPUTest, - testing::Combine( - testing::ValuesIn(inputShapes4D), - testing::ValuesIn(inputElementType), - testing::ValuesIn(SpaceToDepthModes), - testing::Values(1, 2, 3), - testing::ValuesIn(filterCPUInfoForDevice(CPUParams4D))), +INSTANTIATE_TEST_SUITE_P(smoke_CPUSpaceToDepthDynamic4D, + SpaceToDepthLayerCPUTest, + testing::Combine(testing::ValuesIn(inputShapes4D), + testing::ValuesIn(inputElementType), + testing::ValuesIn(SpaceToDepthModes), + testing::Values(1, 2, 3), + testing::ValuesIn(filterCPUInfoForDevice(CPUParams4D))), + SpaceToDepthLayerCPUTest::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(smoke_CPUSpaceToDepthDynamicBlocksFirstBlocked4D, + SpaceToDepthLayerCPUTest, + testing::Combine(testing::Values(inputShapes4D[1]), + testing::ValuesIn(inputElementType), + testing::Values(ov::op::v0::SpaceToDepth::SpaceToDepthMode::BLOCKS_FIRST), + testing::Values(1, 2, 3), + testing::ValuesIn(filterCPUInfoForDevice(CPUParamsBlocked4D))), SpaceToDepthLayerCPUTest::getTestCaseName); -INSTANTIATE_TEST_SUITE_P(smoke_CPUSpaceToDepthDynamicBlocksFirstBlocked4D, SpaceToDepthLayerCPUTest, - testing::Combine( - testing::Values(inputShapes4D[1]), - testing::ValuesIn(inputElementType), - testing::Values(SpaceToDepth::SpaceToDepthMode::BLOCKS_FIRST), - testing::Values(1, 2, 3), - testing::ValuesIn(filterCPUInfoForDevice(CPUParamsBlocked4D))), - SpaceToDepthLayerCPUTest::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(smoke_CPUSpaceToDepthDynamicDepthFirstBlocked4D, SpaceToDepthLayerCPUTest, - testing::Combine( - testing::Values(inputShapes4D[1]), - testing::ValuesIn(inputElementType), - testing::Values(SpaceToDepth::SpaceToDepthMode::DEPTH_FIRST), - testing::Values(1, 2), - testing::ValuesIn(filterCPUInfoForDevice(CPUParamsBlocked4D))), +INSTANTIATE_TEST_SUITE_P(smoke_CPUSpaceToDepthDynamicDepthFirstBlocked4D, + SpaceToDepthLayerCPUTest, + testing::Combine(testing::Values(inputShapes4D[1]), + testing::ValuesIn(inputElementType), + testing::Values(ov::op::v0::SpaceToDepth::SpaceToDepthMode::DEPTH_FIRST), + testing::Values(1, 2), + testing::ValuesIn(filterCPUInfoForDevice(CPUParamsBlocked4D))), SpaceToDepthLayerCPUTest::getTestCaseName); -INSTANTIATE_TEST_SUITE_P(smoke_CPUSpaceToDepthDynamic5D, SpaceToDepthLayerCPUTest, - testing::Combine( - testing::ValuesIn(inputShapes5D), - testing::ValuesIn(inputElementType), - testing::ValuesIn(SpaceToDepthModes), - testing::Values(1, 2, 3), - testing::ValuesIn(filterCPUInfoForDevice(CPUParams5D))), +INSTANTIATE_TEST_SUITE_P(smoke_CPUSpaceToDepthDynamic5D, + SpaceToDepthLayerCPUTest, + testing::Combine(testing::ValuesIn(inputShapes5D), + testing::ValuesIn(inputElementType), + testing::ValuesIn(SpaceToDepthModes), + testing::Values(1, 2, 3), + testing::ValuesIn(filterCPUInfoForDevice(CPUParams5D))), SpaceToDepthLayerCPUTest::getTestCaseName); -INSTANTIATE_TEST_SUITE_P(smoke_CPUSpaceToDepthDynamicCPUSpecific5D, SpaceToDepthLayerCPUTest, - testing::Combine( - testing::ValuesIn(inputShapesBlocked5D), - testing::ValuesIn(inputElementType), - testing::ValuesIn(SpaceToDepthModes), - testing::Values(1, 2), - testing::ValuesIn(filterCPUInfoForDevice(CPUParamsBlocked5D))), +INSTANTIATE_TEST_SUITE_P(smoke_CPUSpaceToDepthDynamicCPUSpecific5D, + SpaceToDepthLayerCPUTest, + testing::Combine(testing::ValuesIn(inputShapesBlocked5D), + testing::ValuesIn(inputElementType), + testing::ValuesIn(SpaceToDepthModes), + testing::Values(1, 2), + testing::ValuesIn(filterCPUInfoForDevice(CPUParamsBlocked5D))), SpaceToDepthLayerCPUTest::getTestCaseName); -} // namespace dynamic_shapes +} // namespace dynamic_shapes /* *========================* *==================* *========================* */ -} // namespace -} // namespace CPULayerTestsDefinitions +} // namespace diff --git a/src/plugins/intel_cpu/tests/functional/single_layer_tests/split.cpp b/src/plugins/intel_cpu/tests/functional/single_layer_tests/split.cpp index 47417778ff1156..ac2b9cef88ec44 100644 --- a/src/plugins/intel_cpu/tests/functional/single_layer_tests/split.cpp +++ b/src/plugins/intel_cpu/tests/functional/single_layer_tests/split.cpp @@ -3,25 +3,22 @@ // #include "shared_test_classes/base/ov_subgraph.hpp" -#include "ov_models/builders.hpp" #include "test_utils/cpu_test_utils.hpp" using namespace ov::test; using namespace CPUTestUtils; -namespace CPULayerTestsDefinitions { - -typedef std::tuple< - size_t, // Num splits - int64_t, // Axis - ElementType, // Net precision - InputShape, // Input shapes - std::vector, // Used outputs indices - CPUSpecificParams -> splitCPUTestParams; +typedef std::tuple, // Used outputs indices + CPUSpecificParams> + splitCPUTestParams; class SplitLayerCPUTest : public testing::WithParamInterface, - virtual public SubgraphBaseTest, public CPUTestsBase { + virtual public SubgraphBaseTest, + public CPUTestsBase { public: static std::string getTestCaseName(testing::TestParamInfo obj) { size_t numSplits; @@ -75,21 +72,22 @@ class SplitLayerCPUTest : public testing::WithParamInterface for (auto&& shape : inputDynamicShapes) params.push_back(std::make_shared(netPrecision, shape)); - auto split_axis_op = std::make_shared(ov::element::Type_t::i64, ov::Shape{}, std::vector{axis}); + auto split_axis_op = + std::make_shared(ov::element::Type_t::i64, ov::Shape{}, std::vector{axis}); auto split = std::make_shared(params[0], split_axis_op, numSplits); - ngraph::ResultVector results; + ov::ResultVector results; for (size_t i = 0; i < outIndices.size(); i++) { - // This WA is necessary because result nodes connected to the same output of the split node (or any node) are deduplicated - // on the CNNNetwork level. It might not be needed when the CPU plugin moves completely to nGraph. - // This is still a single layer test since the Relu nodes are added only as a WA. + // This WA is necessary because result nodes connected to the same output of the split node (or any node) + // are deduplicated on the CNNNetwork level. It might not be needed when the CPU plugin moves completely to + // model. This is still a single layer test since the Relu nodes are added only as a WA. - auto fakeEltwise = std::make_shared(split->output(outIndices[i])); - results.push_back(std::make_shared(fakeEltwise)); + auto fakeEltwise = std::make_shared(split->output(outIndices[i])); + results.push_back(std::make_shared(fakeEltwise)); } split->get_rt_info() = getCPUInfo(); - function = std::make_shared(results, params, "split"); + function = std::make_shared(results, params, "split"); } }; @@ -124,477 +122,360 @@ const auto blocked16_4D_ref = CPUSpecificParams{{nChw16c}, {nChw16c}, {}, "ref"} const auto blocked16_5D_ref = CPUSpecificParams{{nCdhw16c}, {nCdhw16c}, {}, "ref"}; // List of precisions natively supported by onednn. -const std::vector netPrecisions = { - ElementType::i8, - ElementType::i32, - ElementType::f32, - ElementType::bf16 -}; +const std::vector netPrecisions = {ElementType::i8, ElementType::i32, ElementType::f32, ElementType::bf16}; const std::vector> outIndices3 = {{0, 1, 2}, {0, 1, 1, 0, 2}, {0, 0, 0, 2}}; const std::vector> outIndices4 = {{0, 1, 2, 3}, {0, 1, 1, 0, 2, 3}, {0, 0, 0, 2, 3}}; const std::vector inputShapes4D_Nspc2NcspSpecial = { - { {}, {{3, 8, 11, 9}} }, - { - // dynamic - {-1, -1, -1, -1}, - // target - { - {1, 4, 5, 7}, - {3, 8, 5, 9}, - {5, 16, 1, 8} - } - }, - { - // dynamic - {{1, 5}, {1, 64}, {1, 25}, {2, 10}}, - // target - { - {2, 8, 5, 7}, - {1, 4, 10, 2}, - {3, 16, 5, 9} - } - }, - { - // dynamic - {{1, 5}, 8, 5, 7}, - // target - { - {2, 8, 5, 7}, - {1, 8, 5, 7}, - {2, 8, 5, 7}, - } - }, + {{}, {{3, 8, 11, 9}}}, + {// dynamic + {-1, -1, -1, -1}, + // target + {{1, 4, 5, 7}, {3, 8, 5, 9}, {5, 16, 1, 8}}}, + {// dynamic + {{1, 5}, {1, 64}, {1, 25}, {2, 10}}, + // target + {{2, 8, 5, 7}, {1, 4, 10, 2}, {3, 16, 5, 9}}}, + {// dynamic + {{1, 5}, 8, 5, 7}, + // target + { + {2, 8, 5, 7}, + {1, 8, 5, 7}, + {2, 8, 5, 7}, + }}, }; -INSTANTIATE_TEST_SUITE_P(smoke_Split4D_CPU_Nspc2NcspSpecial, SplitLayerCPUTest, - ::testing::Combine( - ::testing::Values(4), - ::testing::Values(1), - ::testing::ValuesIn(netPrecisions), - ::testing::ValuesIn(inputShapes4D_Nspc2NcspSpecial), - ::testing::ValuesIn(outIndices4), - ::testing::Values(perChannelsToPlanar_4D)), - SplitLayerCPUTest::getTestCaseName); +INSTANTIATE_TEST_SUITE_P(smoke_Split4D_CPU_Nspc2NcspSpecial, + SplitLayerCPUTest, + ::testing::Combine(::testing::Values(4), + ::testing::Values(1), + ::testing::ValuesIn(netPrecisions), + ::testing::ValuesIn(inputShapes4D_Nspc2NcspSpecial), + ::testing::ValuesIn(outIndices4), + ::testing::Values(perChannelsToPlanar_4D)), + SplitLayerCPUTest::getTestCaseName); const std::vector inputShapes5D_Nspc2NcspSpecial = { - { {}, {{3, 9, 5, 9, 11}} }, - { - // dynamic - {-1, -1, -1, -1, -1}, - // target - { - {1, 12, 5, 7, 5}, - {3, 6, 8, 9, 1}, - {5, 9, 1, 8, 2} - } - }, - { - // dynamic - {{1, 5}, {1, 64}, {1, 25}, {2, 10}, {1, 64}}, - // target - { - {2, 6, 5, 7, 7}, - {1, 3, 10, 2, 11}, - {3, 9, 4, 9, 8} - } - }, + {{}, {{3, 9, 5, 9, 11}}}, + {// dynamic + {-1, -1, -1, -1, -1}, + // target + {{1, 12, 5, 7, 5}, {3, 6, 8, 9, 1}, {5, 9, 1, 8, 2}}}, + {// dynamic + {{1, 5}, {1, 64}, {1, 25}, {2, 10}, {1, 64}}, + // target + {{2, 6, 5, 7, 7}, {1, 3, 10, 2, 11}, {3, 9, 4, 9, 8}}}, }; -INSTANTIATE_TEST_SUITE_P(smoke_Split5D_CPU_Nspc2NcspSpecial, SplitLayerCPUTest, - ::testing::Combine( - ::testing::Values(3), - ::testing::Values(1), - ::testing::ValuesIn(netPrecisions), - ::testing::ValuesIn(inputShapes5D_Nspc2NcspSpecial), - ::testing::ValuesIn(outIndices3), - ::testing::Values(perChannelsToPlanar_5D)), - SplitLayerCPUTest::getTestCaseName); +INSTANTIATE_TEST_SUITE_P(smoke_Split5D_CPU_Nspc2NcspSpecial, + SplitLayerCPUTest, + ::testing::Combine(::testing::Values(3), + ::testing::Values(1), + ::testing::ValuesIn(netPrecisions), + ::testing::ValuesIn(inputShapes5D_Nspc2NcspSpecial), + ::testing::ValuesIn(outIndices3), + ::testing::Values(perChannelsToPlanar_5D)), + SplitLayerCPUTest::getTestCaseName); const std::vector inputShapes4D_planar = { - { {}, {{3, 24, 24, 9}} }, - { - // dynamic - {-1, -1, -1, -1}, - // target - { - {1, 15, 12, 9}, - {3, 1, 9, 12}, - {5, 5, 6, 6} - } - }, - { - // dynamic - {{1, 5}, {1, 64}, {1, 48}, {2, 48}}, - // target - { - {2, 5, 6, 9}, - {1, 7, 12, 6}, - {3, 11, 9, 3} - } - }, - { - // dynamic - {{1, 5}, 5, 6, 9}, - // target - { - {2, 5, 6, 9}, - {1, 5, 6, 9}, - } - }, + {{}, {{3, 24, 24, 9}}}, + {// dynamic + {-1, -1, -1, -1}, + // target + {{1, 15, 12, 9}, {3, 1, 9, 12}, {5, 5, 6, 6}}}, + {// dynamic + {{1, 5}, {1, 64}, {1, 48}, {2, 48}}, + // target + {{2, 5, 6, 9}, {1, 7, 12, 6}, {3, 11, 9, 3}}}, + {// dynamic + {{1, 5}, 5, 6, 9}, + // target + { + {2, 5, 6, 9}, + {1, 5, 6, 9}, + }}, }; -INSTANTIATE_TEST_SUITE_P(smoke_Split4D_CPU_planar, SplitLayerCPUTest, - ::testing::Combine( - ::testing::Values(3), - ::testing::Values(2, 3), - ::testing::ValuesIn(netPrecisions), - ::testing::ValuesIn(inputShapes4D_planar), - ::testing::ValuesIn(outIndices3), - ::testing::Values(planar_4D_ref, perChannels_4D)), - SplitLayerCPUTest::getTestCaseName); +INSTANTIATE_TEST_SUITE_P(smoke_Split4D_CPU_planar, + SplitLayerCPUTest, + ::testing::Combine(::testing::Values(3), + ::testing::Values(2, 3), + ::testing::ValuesIn(netPrecisions), + ::testing::ValuesIn(inputShapes4D_planar), + ::testing::ValuesIn(outIndices3), + ::testing::Values(planar_4D_ref, perChannels_4D)), + SplitLayerCPUTest::getTestCaseName); const std::vector inputShapes4D_block = { - { {}, {{3, 16, 12, 12}} }, - { - // dynamic - {-1, 16, -1, -1}, - // target - { - {1, 16, 12, 12}, - {3, 16, 12, 12}, - {5, 16, 12, 12} - } - }, - { - // dynamic - {{1, 5}, 16, {1, 48}, {2, 24}}, - // target - { - {2, 16, 12, 12}, - {1, 16, 12, 12}, - {3, 16, 12, 12} - } - }, - { - // dynamic - {{1, 5}, 16, 12, 12}, - // target - { - {2, 16, 12, 12}, - {1, 16, 12, 12} - } - }, + {{}, {{3, 16, 12, 12}}}, + {// dynamic + {-1, 16, -1, -1}, + // target + {{1, 16, 12, 12}, {3, 16, 12, 12}, {5, 16, 12, 12}}}, + {// dynamic + {{1, 5}, 16, {1, 48}, {2, 24}}, + // target + {{2, 16, 12, 12}, {1, 16, 12, 12}, {3, 16, 12, 12}}}, + {// dynamic + {{1, 5}, 16, 12, 12}, + // target + {{2, 16, 12, 12}, {1, 16, 12, 12}}}, }; -INSTANTIATE_TEST_SUITE_P(smoke_Split4D_CPU_Block8, SplitLayerCPUTest, - ::testing::Combine( - ::testing::Values(3), - ::testing::Values(2, 3), - ::testing::ValuesIn(netPrecisions), - ::testing::ValuesIn(inputShapes4D_block), - ::testing::ValuesIn(outIndices3), - ::testing::Values(blocked8_4D_ref)), - SplitLayerCPUTest::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(smoke_Split4D_CPU_Block16, SplitLayerCPUTest, - ::testing::Combine( - ::testing::Values(4), - ::testing::Values(2, 3), - ::testing::ValuesIn(netPrecisions), - ::testing::ValuesIn(inputShapes4D_block), - ::testing::ValuesIn(outIndices4), - ::testing::Values(blocked16_4D_ref)), - SplitLayerCPUTest::getTestCaseName); +INSTANTIATE_TEST_SUITE_P(smoke_Split4D_CPU_Block8, + SplitLayerCPUTest, + ::testing::Combine(::testing::Values(3), + ::testing::Values(2, 3), + ::testing::ValuesIn(netPrecisions), + ::testing::ValuesIn(inputShapes4D_block), + ::testing::ValuesIn(outIndices3), + ::testing::Values(blocked8_4D_ref)), + SplitLayerCPUTest::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(smoke_Split4D_CPU_Block16, + SplitLayerCPUTest, + ::testing::Combine(::testing::Values(4), + ::testing::Values(2, 3), + ::testing::ValuesIn(netPrecisions), + ::testing::ValuesIn(inputShapes4D_block), + ::testing::ValuesIn(outIndices4), + ::testing::Values(blocked16_4D_ref)), + SplitLayerCPUTest::getTestCaseName); const std::vector inputShapes5D_planar = { - { {}, {{3, 5, 3, 6, 12}} }, - { - // dynamic - {-1, -1, -1, -1, -1}, - // target - { - {1, 15, 12, 3, 9}, - {3, 1, 6, 12, 3}, - {5, 5, 6, 6, 6} - } - }, - { - // dynamic - {{1, 5}, {1, 64}, {1, 48}, {2, 48}, {1, 40}}, - // target - { - {2, 5, 12, 3, 6}, - {1, 7, 12, 6, 9}, - {3, 11, 9, 3, 30} - } - }, + {{}, {{3, 5, 3, 6, 12}}}, + {// dynamic + {-1, -1, -1, -1, -1}, + // target + {{1, 15, 12, 3, 9}, {3, 1, 6, 12, 3}, {5, 5, 6, 6, 6}}}, + {// dynamic + {{1, 5}, {1, 64}, {1, 48}, {2, 48}, {1, 40}}, + // target + {{2, 5, 12, 3, 6}, {1, 7, 12, 6, 9}, {3, 11, 9, 3, 30}}}, }; -INSTANTIATE_TEST_SUITE_P(smoke_Split5D_CPU_planar, SplitLayerCPUTest, - ::testing::Combine( - ::testing::Values(3), - ::testing::Values(2, 3, 4), - ::testing::ValuesIn(netPrecisions), - ::testing::ValuesIn(inputShapes5D_planar), - ::testing::ValuesIn(outIndices3), - ::testing::Values(planar_5D_ref, perChannels_5D)), - SplitLayerCPUTest::getTestCaseName); +INSTANTIATE_TEST_SUITE_P(smoke_Split5D_CPU_planar, + SplitLayerCPUTest, + ::testing::Combine(::testing::Values(3), + ::testing::Values(2, 3, 4), + ::testing::ValuesIn(netPrecisions), + ::testing::ValuesIn(inputShapes5D_planar), + ::testing::ValuesIn(outIndices3), + ::testing::Values(planar_5D_ref, perChannels_5D)), + SplitLayerCPUTest::getTestCaseName); const std::vector inputShapes5D_block = { - { {}, {{3, 16, 24, 12, 36}} }, - { - // dynamic - {-1, 16, -1, -1, -1}, - // target - { - {1, 16, 12, 24, 24}, - {3, 16, 12, 12, 12}, - {5, 16, 12, 12, 24} - } - }, - { - // dynamic - {{1, 5}, 16, {1, 48}, {2, 24}, {3, 64}}, - // target - { - {2, 16, 12, 12, 24}, - {1, 16, 12, 12, 24}, - {3, 16, 12, 12, 12} - } - }, + {{}, {{3, 16, 24, 12, 36}}}, + {// dynamic + {-1, 16, -1, -1, -1}, + // target + {{1, 16, 12, 24, 24}, {3, 16, 12, 12, 12}, {5, 16, 12, 12, 24}}}, + {// dynamic + {{1, 5}, 16, {1, 48}, {2, 24}, {3, 64}}, + // target + {{2, 16, 12, 12, 24}, {1, 16, 12, 12, 24}, {3, 16, 12, 12, 12}}}, }; -INSTANTIATE_TEST_SUITE_P(smoke_Split5D_CPU_Block8, SplitLayerCPUTest, - ::testing::Combine( - ::testing::Values(3), - ::testing::Values(2, 3, 4), - ::testing::ValuesIn(netPrecisions), - ::testing::ValuesIn(inputShapes5D_block), - ::testing::ValuesIn(outIndices3), - ::testing::Values(blocked8_5D_ref)), - SplitLayerCPUTest::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(smoke_Split5D_CPU_Block16, SplitLayerCPUTest, - ::testing::Combine( - ::testing::Values(4), - ::testing::Values(2, 3, 4), - ::testing::ValuesIn(netPrecisions), - ::testing::ValuesIn(inputShapes5D_block), - ::testing::ValuesIn(outIndices4), - ::testing::Values(blocked16_5D_ref)), - SplitLayerCPUTest::getTestCaseName); +INSTANTIATE_TEST_SUITE_P(smoke_Split5D_CPU_Block8, + SplitLayerCPUTest, + ::testing::Combine(::testing::Values(3), + ::testing::Values(2, 3, 4), + ::testing::ValuesIn(netPrecisions), + ::testing::ValuesIn(inputShapes5D_block), + ::testing::ValuesIn(outIndices3), + ::testing::Values(blocked8_5D_ref)), + SplitLayerCPUTest::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(smoke_Split5D_CPU_Block16, + SplitLayerCPUTest, + ::testing::Combine(::testing::Values(4), + ::testing::Values(2, 3, 4), + ::testing::ValuesIn(netPrecisions), + ::testing::ValuesIn(inputShapes5D_block), + ::testing::ValuesIn(outIndices4), + ::testing::Values(blocked16_5D_ref)), + SplitLayerCPUTest::getTestCaseName); const std::vector inputShapes3D = { - { {}, {{14, 28, 21}} }, - { - // dynamic - {-1, -1, -1}, - // target - { - {7, 21, 14}, - {21, 7, 14}, - {21, 14, 7}, - } - }, - { - // dynamic - {{1, 60}, {1, 50}, {1, 48}}, - // target - { - {14, 21, 7}, - {21, 7, 14}, - {7, 14, 21}, - } - }, + {{}, {{14, 28, 21}}}, + {// dynamic + {-1, -1, -1}, + // target + { + {7, 21, 14}, + {21, 7, 14}, + {21, 14, 7}, + }}, + {// dynamic + {{1, 60}, {1, 50}, {1, 48}}, + // target + { + {14, 21, 7}, + {21, 7, 14}, + {7, 14, 21}, + }}, }; -INSTANTIATE_TEST_SUITE_P(smoke_Split3D, SplitLayerCPUTest, - ::testing::Combine( - ::testing::Values(7), - ::testing::Values(1, 2), - ::testing::ValuesIn(netPrecisions), - ::testing::ValuesIn(inputShapes3D), - ::testing::Values(std::vector({})), - ::testing::Values(CPUSpecificParams{{}, {}, {}, "ref"})), - SplitLayerCPUTest::getTestCaseName); +INSTANTIATE_TEST_SUITE_P(smoke_Split3D, + SplitLayerCPUTest, + ::testing::Combine(::testing::Values(7), + ::testing::Values(1, 2), + ::testing::ValuesIn(netPrecisions), + ::testing::ValuesIn(inputShapes3D), + ::testing::Values(std::vector({})), + ::testing::Values(CPUSpecificParams{{}, {}, {}, "ref"})), + SplitLayerCPUTest::getTestCaseName); const std::vector inputShapes2D = { - { {}, {{6, 12}} }, - { - // dynamic - {-1, -1}, - // target - { - {2, 8}, - {10, 4}, - {2, 6}, - } - }, - { - // dynamic - {{1, 60}, {1, 50}}, - // target - { - {2, 4}, - {4, 4}, - {6, 12}, - } - }, + {{}, {{6, 12}}}, + {// dynamic + {-1, -1}, + // target + { + {2, 8}, + {10, 4}, + {2, 6}, + }}, + {// dynamic + {{1, 60}, {1, 50}}, + // target + { + {2, 4}, + {4, 4}, + {6, 12}, + }}, }; -INSTANTIATE_TEST_SUITE_P(smoke_Split2D, SplitLayerCPUTest, - ::testing::Combine( - ::testing::Values(2), - ::testing::Values(1), - ::testing::ValuesIn(netPrecisions), - ::testing::ValuesIn(inputShapes2D), - ::testing::Values(std::vector({})), - ::testing::Values(CPUSpecificParams{{}, {}, {}, "ref"})), - SplitLayerCPUTest::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(smoke_Split1D_static, SplitLayerCPUTest, - ::testing::Combine( - ::testing::Values(5), - ::testing::Values(0), - ::testing::ValuesIn(netPrecisions), - ::testing::Values(InputShape{ {}, {{10}} }), - ::testing::Values(std::vector({})), - ::testing::Values(CPUSpecificParams{{}, {}, {}, "unknown"})), - SplitLayerCPUTest::getTestCaseName); +INSTANTIATE_TEST_SUITE_P(smoke_Split2D, + SplitLayerCPUTest, + ::testing::Combine(::testing::Values(2), + ::testing::Values(1), + ::testing::ValuesIn(netPrecisions), + ::testing::ValuesIn(inputShapes2D), + ::testing::Values(std::vector({})), + ::testing::Values(CPUSpecificParams{{}, {}, {}, "ref"})), + SplitLayerCPUTest::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(smoke_Split1D_static, + SplitLayerCPUTest, + ::testing::Combine(::testing::Values(5), + ::testing::Values(0), + ::testing::ValuesIn(netPrecisions), + ::testing::Values(InputShape{{}, {{10}}}), + ::testing::Values(std::vector({})), + ::testing::Values(CPUSpecificParams{{}, {}, {}, "unknown"})), + SplitLayerCPUTest::getTestCaseName); const std::vector inputShapes1D = { - { - // dynamic - {-1}, - // target - { - {5}, - {15}, - {10}, - } - }, - { - // dynamic - {{1, 60}}, - // target - { - {15}, - {5}, - {10}, - } - }, + {// dynamic + {-1}, + // target + { + {5}, + {15}, + {10}, + }}, + {// dynamic + {{1, 60}}, + // target + { + {15}, + {5}, + {10}, + }}, }; -INSTANTIATE_TEST_SUITE_P(smoke_Split1D, SplitLayerCPUTest, - ::testing::Combine( - ::testing::Values(5), - ::testing::Values(0), - ::testing::ValuesIn(netPrecisions), - ::testing::ValuesIn(inputShapes1D), - ::testing::Values(std::vector({})), - ::testing::Values(CPUSpecificParams{{}, {}, {}, "ref"})), - SplitLayerCPUTest::getTestCaseName); +INSTANTIATE_TEST_SUITE_P(smoke_Split1D, + SplitLayerCPUTest, + ::testing::Combine(::testing::Values(5), + ::testing::Values(0), + ::testing::ValuesIn(netPrecisions), + ::testing::ValuesIn(inputShapes1D), + ::testing::Values(std::vector({})), + ::testing::Values(CPUSpecificParams{{}, {}, {}, "ref"})), + SplitLayerCPUTest::getTestCaseName); const std::vector inputShapes4D_dynBatch = { - { - // dynamic - {{1, 10}, 6, 6, 9}, - // target - { - {6, 6, 6, 9}, - {9, 6, 6, 9}, - } - }, + {// dynamic + {{1, 10}, 6, 6, 9}, + // target + { + {6, 6, 6, 9}, + {9, 6, 6, 9}, + }}, }; -INSTANTIATE_TEST_SUITE_P(smoke_Split4D_CPU_by_batch, SplitLayerCPUTest, - ::testing::Combine( - ::testing::Values(3), - ::testing::Values(1), - ::testing::ValuesIn(netPrecisions), - ::testing::ValuesIn(inputShapes4D_dynBatch), - ::testing::ValuesIn(outIndices3), - ::testing::Values(planar_4D_ref, perChannels_4D)), - SplitLayerCPUTest::getTestCaseName); +INSTANTIATE_TEST_SUITE_P(smoke_Split4D_CPU_by_batch, + SplitLayerCPUTest, + ::testing::Combine(::testing::Values(3), + ::testing::Values(1), + ::testing::ValuesIn(netPrecisions), + ::testing::ValuesIn(inputShapes4D_dynBatch), + ::testing::ValuesIn(outIndices3), + ::testing::Values(planar_4D_ref, perChannels_4D)), + SplitLayerCPUTest::getTestCaseName); // ============================================== inPlace cases ============================================ -const std::vector inputShapes4D_inPlace_0 = { - {{}, {{3, 24, 24, 9}}}, - {{}, {{6, 24, 24}}}, - {{}, {{9, 24}}}, - { - // dynamic - {3, -1, -1, -1, -1}, - // target - { - {3, 24, 6, 9, 4}, - {3, 12, 12, 15, 5}, - } - }, - { - // dynamic - {6, -1, -1, -1}, - // target - { - {6, 24, 6, 9}, - {6, 12, 12, 15}, - } - }, - { - // dynamic - {9, -1, -1}, - // target - { - {9, 24, 6}, - {9, 12, 12}, - } - } -}; - -INSTANTIATE_TEST_SUITE_P(smoke_Split_CPU_planar_inPlace_0, SplitLayerCPUTest, - ::testing::Combine( - ::testing::Values(3), - ::testing::Values(0), - ::testing::Values(ElementType::f32), - ::testing::ValuesIn(inputShapes4D_inPlace_0), - ::testing::Values(std::vector{}), - ::testing::Values(CPUSpecificParams{{}, {}, {}, "unknown"})), - SplitLayerCPUTest::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(smoke_Split4D_CPU_Block8inPlace_1, SplitLayerCPUTest, - ::testing::Combine( - ::testing::Values(4), - ::testing::Values(1), - ::testing::ValuesIn(netPrecisions), - ::testing::Values(InputShape{ {}, {{1, 32, 5, 6}} }, - InputShape{ {1, 32, -1, -1}, +const std::vector inputShapes4D_inPlace_0 = {{{}, {{3, 24, 24, 9}}}, + {{}, {{6, 24, 24}}}, + {{}, {{9, 24}}}, + {// dynamic + {3, -1, -1, -1, -1}, + // target { - {1, 32, 5, 6}, - {1, 32, 5, 2}, - {1, 32, 5, 8} - } }), - ::testing::ValuesIn(outIndices4), - ::testing::Values(planar_4D, blocked8_4D)), - SplitLayerCPUTest::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(smoke_Split5D_CPU_Block16inPlace_1, SplitLayerCPUTest, - ::testing::Combine( - ::testing::Values(3), - ::testing::Values(1), - ::testing::ValuesIn(netPrecisions), - ::testing::Values(InputShape{ {}, {{1, 48, 5, 6, 3}} }, - InputShape{ {1, 48, -1, -1, 3}, + {3, 24, 6, 9, 4}, + {3, 12, 12, 15, 5}, + }}, + {// dynamic + {6, -1, -1, -1}, + // target { - {1, 48, 5, 6, 3}, - {1, 48, 5, 2, 3}, - {1, 48, 5, 8, 3} - } }), - ::testing::ValuesIn(outIndices3), - ::testing::Values(planar_5D, blocked16_5D)), - SplitLayerCPUTest::getTestCaseName); - -} // namespace - -} // namespace CPULayerTestsDefinitions + {6, 24, 6, 9}, + {6, 12, 12, 15}, + }}, + {// dynamic + {9, -1, -1}, + // target + { + {9, 24, 6}, + {9, 12, 12}, + }}}; + +INSTANTIATE_TEST_SUITE_P(smoke_Split_CPU_planar_inPlace_0, + SplitLayerCPUTest, + ::testing::Combine(::testing::Values(3), + ::testing::Values(0), + ::testing::Values(ElementType::f32), + ::testing::ValuesIn(inputShapes4D_inPlace_0), + ::testing::Values(std::vector{}), + ::testing::Values(CPUSpecificParams{{}, {}, {}, "unknown"})), + SplitLayerCPUTest::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P( + smoke_Split4D_CPU_Block8inPlace_1, + SplitLayerCPUTest, + ::testing::Combine(::testing::Values(4), + ::testing::Values(1), + ::testing::ValuesIn(netPrecisions), + ::testing::Values(InputShape{{}, {{1, 32, 5, 6}}}, + InputShape{{1, 32, -1, -1}, {{1, 32, 5, 6}, {1, 32, 5, 2}, {1, 32, 5, 8}}}), + ::testing::ValuesIn(outIndices4), + ::testing::Values(planar_4D, blocked8_4D)), + SplitLayerCPUTest::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P( + smoke_Split5D_CPU_Block16inPlace_1, + SplitLayerCPUTest, + ::testing::Combine(::testing::Values(3), + ::testing::Values(1), + ::testing::ValuesIn(netPrecisions), + ::testing::Values(InputShape{{}, {{1, 48, 5, 6, 3}}}, + InputShape{{1, 48, -1, -1, 3}, + {{1, 48, 5, 6, 3}, {1, 48, 5, 2, 3}, {1, 48, 5, 8, 3}}}), + ::testing::ValuesIn(outIndices3), + ::testing::Values(planar_5D, blocked16_5D)), + SplitLayerCPUTest::getTestCaseName); + +} // namespace \ No newline at end of file diff --git a/src/plugins/intel_cpu/tests/functional/single_layer_tests/strided_slice.cpp b/src/plugins/intel_cpu/tests/functional/single_layer_tests/strided_slice.cpp index 4c46b33a295155..58abdabb2c5b9d 100644 --- a/src/plugins/intel_cpu/tests/functional/single_layer_tests/strided_slice.cpp +++ b/src/plugins/intel_cpu/tests/functional/single_layer_tests/strided_slice.cpp @@ -2,15 +2,11 @@ // SPDX-License-Identifier: Apache-2.0 // -#include -#include "ov_models/builders.hpp" -#include "test_utils/cpu_test_utils.hpp" +#include "common_test_utils/ov_tensor_utils.hpp" #include "shared_test_classes/base/ov_subgraph.hpp" -#include +#include "test_utils/cpu_test_utils.hpp" -using namespace InferenceEngine; using namespace CPUTestUtils; -using namespace LayerTestsDefinitions; using namespace ov; using namespace test; @@ -27,20 +23,21 @@ struct StridedSliceParams { std::vector ellipsisAxisMask; }; -typedef std::tuple< - InputShape, // Input shapes - StridedSliceParams, - ngraph::helpers::InputLayerType, // Secondary input types - ElementType, // Element type - CPUSpecificParams> StridedSliceLayerCPUTestParamSet; +typedef std::tuple + StridedSliceLayerCPUTestParamSet; class StridedSliceLayerCPUTest : public testing::WithParamInterface, - virtual public SubgraphBaseTest, public CPUTestsBase { + virtual public SubgraphBaseTest, + public CPUTestsBase { public: static std::string getTestCaseName(testing::TestParamInfo obj) { InputShape shapes; StridedSliceParams params; - ngraph::helpers::InputLayerType secondaryInputType; + ov::test::utils::InputLayerType secondaryInputType; ElementType dataType; CPUSpecificParams cpuParams; std::tie(shapes, params, secondaryInputType, dataType, cpuParams) = obj.param; @@ -58,16 +55,19 @@ class StridedSliceLayerCPUTest : public testing::WithParamInterface& targetInputStaticShapes) override { + void generate_inputs(const std::vector& targetInputStaticShapes) override { std::vector inputValues = {ssParams.begin.data(), ssParams.end.data(), ssParams.strides.data()}; inputs.clear(); @@ -76,9 +76,13 @@ class StridedSliceLayerCPUTest : public testing::WithParamInterfaceGetParam(); @@ -108,7 +112,7 @@ class StridedSliceLayerCPUTest : public testing::WithParamInterface(dataType, shape)); } ov::NodeVector ss_inputs; - if (secondaryInputType == ngraph::helpers::InputLayerType::PARAMETER) { + if (secondaryInputType == ov::test::utils::InputLayerType::PARAMETER) { ov::Shape inShape = {ssParams.begin.size()}; auto beginNode = std::make_shared(ov::element::i64, inShape); @@ -125,9 +129,11 @@ class StridedSliceLayerCPUTest : public testing::WithParamInterface(ov::element::i64, constShape, ssParams.begin.data()); + auto beginNode = + std::make_shared(ov::element::i64, constShape, ssParams.begin.data()); auto endNode = std::make_shared(ov::element::i64, constShape, ssParams.end.data()); - auto strideNode = std::make_shared(ov::element::i64, constShape, ssParams.strides.data()); + auto strideNode = + std::make_shared(ov::element::i64, constShape, ssParams.strides.data()); ss_inputs.push_back(params[0]); ss_inputs.push_back(beginNode); @@ -157,343 +163,500 @@ TEST_P(StridedSliceLayerCPUTest, CompareWithRefs) { namespace { -const auto cpuParams_nChw16c = CPUSpecificParams {{nChw16c}, {nChw16c}, {}, {}}; -const auto cpuParams_nCdhw16c = CPUSpecificParams {{nCdhw16c}, {nCdhw16c}, {}, {}}; +const auto cpuParams_nChw16c = CPUSpecificParams{{nChw16c}, {nChw16c}, {}, {}}; +const auto cpuParams_nCdhw16c = CPUSpecificParams{{nCdhw16c}, {nCdhw16c}, {}, {}}; -const auto cpuParams_nChw8c = CPUSpecificParams {{nChw8c}, {nChw8c}, {}, {}}; -const auto cpuParams_nCdhw8c = CPUSpecificParams {{nCdhw8c}, {nCdhw8c}, {}, {}}; +const auto cpuParams_nChw8c = CPUSpecificParams{{nChw8c}, {nChw8c}, {}, {}}; +const auto cpuParams_nCdhw8c = CPUSpecificParams{{nCdhw8c}, {nCdhw8c}, {}, {}}; -const auto cpuParams_nhwc = CPUSpecificParams {{nhwc}, {nhwc}, {}, {}}; -const auto cpuParams_ndhwc = CPUSpecificParams {{ndhwc}, {ndhwc}, {}, {}}; +const auto cpuParams_nhwc = CPUSpecificParams{{nhwc}, {nhwc}, {}, {}}; +const auto cpuParams_ndhwc = CPUSpecificParams{{ndhwc}, {ndhwc}, {}, {}}; -const auto cpuParams_nchw = CPUSpecificParams {{nchw}, {nchw}, {}, {}}; -const auto cpuParams_ncdhw = CPUSpecificParams {{ncdhw}, {ncdhw}, {}, {}}; +const auto cpuParams_nchw = CPUSpecificParams{{nchw}, {nchw}, {}, {}}; +const auto cpuParams_ncdhw = CPUSpecificParams{{ncdhw}, {ncdhw}, {}, {}}; -const std::vector inputPrecisions = { - ElementType::f32, - ElementType::bf16, - ElementType::i8 -}; +const std::vector inputPrecisions = {ElementType::f32, ElementType::bf16, ElementType::i8}; -const std::vector inputLayerTypes = { - ngraph::helpers::InputLayerType::CONSTANT, - ngraph::helpers::InputLayerType::PARAMETER -}; +const std::vector inputLayerTypes = {ov::test::utils::InputLayerType::CONSTANT, + ov::test::utils::InputLayerType::PARAMETER}; const std::vector inputShapesDynamic2D = { - {{-1, -1}, - {{32, 20}, {16, 16}, {24, 16}}}, + {{-1, -1}, {{32, 20}, {16, 16}, {24, 16}}}, - {{-1, 16}, - {{16, 16}, {20, 16}, {32, 16}}}, + {{-1, 16}, {{16, 16}, {20, 16}, {32, 16}}}, - {{{16, 32}, {16, 32}}, - {{16, 32}, {32, 16}, {24, 24}}}, + {{{16, 32}, {16, 32}}, {{16, 32}, {32, 16}, {24, 24}}}, }; const std::vector paramsPlain2D = { - StridedSliceParams{ { 2, 5 }, { 16, 8 }, { 1, 1 }, { 0, 0 }, { 0, 0 }, { }, { }, { } }, - StridedSliceParams{ { -10, -11 }, { -2, -3 }, { 1, 1 }, { 0, 0 }, { 0, 0 }, { }, { }, { } }, - StridedSliceParams{ { -16, -17 }, { -2, -3 }, { 1, 1 }, { 0, 0 }, { 0, 0 }, { }, { }, { } }, - StridedSliceParams{ { 2, 44 }, { 55, -2 }, { 2, 3 }, { 0, 1 }, { 0, 0 }, { }, { }, { } }, - StridedSliceParams{ { 2, -7 }, { 1, -2 }, { 2, 3 }, { 1, 0 }, { 1, 0 }, { }, { }, { } }, - StridedSliceParams{ { 2 }, { 22 }, { 2 }, { 0 }, { 0 }, { }, { }, { } }, + StridedSliceParams{{2, 5}, {16, 8}, {1, 1}, {0, 0}, {0, 0}, {}, {}, {}}, + StridedSliceParams{{-10, -11}, {-2, -3}, {1, 1}, {0, 0}, {0, 0}, {}, {}, {}}, + StridedSliceParams{{-16, -17}, {-2, -3}, {1, 1}, {0, 0}, {0, 0}, {}, {}, {}}, + StridedSliceParams{{2, 44}, {55, -2}, {2, 3}, {0, 1}, {0, 0}, {}, {}, {}}, + StridedSliceParams{{2, -7}, {1, -2}, {2, 3}, {1, 0}, {1, 0}, {}, {}, {}}, + StridedSliceParams{{2}, {22}, {2}, {0}, {0}, {}, {}, {}}, }; -INSTANTIATE_TEST_SUITE_P(smoke_CompareWithRefs_Plain_Static_2D, StridedSliceLayerCPUTest, - ::testing::Combine( - ::testing::ValuesIn(static_shapes_to_test_representation({{32, 20}})), - ::testing::ValuesIn(paramsPlain2D), - ::testing::ValuesIn(inputLayerTypes), - ::testing::ValuesIn(inputPrecisions), - ::testing::Values(emptyCPUSpec)), +INSTANTIATE_TEST_SUITE_P(smoke_CompareWithRefs_Plain_Static_2D, + StridedSliceLayerCPUTest, + ::testing::Combine(::testing::ValuesIn(static_shapes_to_test_representation({{32, 20}})), + ::testing::ValuesIn(paramsPlain2D), + ::testing::ValuesIn(inputLayerTypes), + ::testing::ValuesIn(inputPrecisions), + ::testing::Values(emptyCPUSpec)), StridedSliceLayerCPUTest::getTestCaseName); -INSTANTIATE_TEST_SUITE_P(smoke_CompareWithRefs_Plain_Dynamic_2D, StridedSliceLayerCPUTest, - ::testing::Combine( - ::testing::ValuesIn(inputShapesDynamic2D), - ::testing::ValuesIn(paramsPlain2D), - ::testing::ValuesIn(inputLayerTypes), - ::testing::ValuesIn(inputPrecisions), - ::testing::Values(emptyCPUSpec)), +INSTANTIATE_TEST_SUITE_P(smoke_CompareWithRefs_Plain_Dynamic_2D, + StridedSliceLayerCPUTest, + ::testing::Combine(::testing::ValuesIn(inputShapesDynamic2D), + ::testing::ValuesIn(paramsPlain2D), + ::testing::ValuesIn(inputLayerTypes), + ::testing::ValuesIn(inputPrecisions), + ::testing::Values(emptyCPUSpec)), StridedSliceLayerCPUTest::getTestCaseName); const std::vector testCasesCommon4D = { - StridedSliceParams{ { 0, 2, 5, 4 }, { 1, 4, 28, 27 }, { 1, 1, 1, 1 }, { 0, 0, 0, 0 }, { 0, 0, 0, 0 }, { }, { }, { } }, - StridedSliceParams{ { 0, 0, 10, 0 }, { 1, 3, 20, 20 }, { 1, 1, 1, 1 }, { 0, 0, 0, 0 }, { 0, 1, 0, 0 }, { }, { }, { } }, - StridedSliceParams{ { 0, 0, 20, 20 }, { 1, 5, 25, 26 }, { 1, 1, 1, 2 }, { 0, 0, 0, 0 }, { 0, 0, 0, 0 }, { }, { }, { } }, - StridedSliceParams{ { 0, 0, 0, 20 }, { 1, 2, 30, 30 }, { 1, 1, 2, 1 }, { 0, 0, 0, 1 }, { 0, 1, 0, 1 }, { }, { }, { } }, - StridedSliceParams{ { 0, 0, 2, 10 }, { 1, 3, 32, 20 }, { 1, 1, 1, 1 }, { 0, 0, 1, 1 }, { 0, 0, 0, 0 }, { }, { }, { } }, - StridedSliceParams{ { 0, 1, 0, 10 }, { 1, 5, 32, 30 }, { 1, 1, 1, 1 }, { 0, 1, 0, 0 }, { 0, 0, 0, 0 }, { }, { }, { } }, - StridedSliceParams{ { 0, 0, 2, 10 }, { 1, 8, 32, 18 }, { 1, 2, 1, 2 }, { 0, 0, 1, 0 }, { 0, 0, 0, 1 }, { }, { }, { } }, - StridedSliceParams{ { 0, 0, 10 }, { 0, 32, 18 }, { 1, 1, 1 }, { 1, 1, 0 }, { 1, 1, 0 }, { }, { }, { 1, 0, 0 } }, - StridedSliceParams{ { 0, 4, 10 }, { 1, 8, 0 }, { 1, 1, 1 }, { 1, 0, 1 }, { 1, 1, 1 }, { }, { }, { 0, 0, 1 } } -}; - -const std::vector inputShapesStatic4D = { - { 1, 5, 32, 32 }, { 2, 5, 32, 48 } -}; + StridedSliceParams{{0, 2, 5, 4}, {1, 4, 28, 27}, {1, 1, 1, 1}, {0, 0, 0, 0}, {0, 0, 0, 0}, {}, {}, {}}, + StridedSliceParams{{0, 0, 10, 0}, {1, 3, 20, 20}, {1, 1, 1, 1}, {0, 0, 0, 0}, {0, 1, 0, 0}, {}, {}, {}}, + StridedSliceParams{{0, 0, 20, 20}, {1, 5, 25, 26}, {1, 1, 1, 2}, {0, 0, 0, 0}, {0, 0, 0, 0}, {}, {}, {}}, + StridedSliceParams{{0, 0, 0, 20}, {1, 2, 30, 30}, {1, 1, 2, 1}, {0, 0, 0, 1}, {0, 1, 0, 1}, {}, {}, {}}, + StridedSliceParams{{0, 0, 2, 10}, {1, 3, 32, 20}, {1, 1, 1, 1}, {0, 0, 1, 1}, {0, 0, 0, 0}, {}, {}, {}}, + StridedSliceParams{{0, 1, 0, 10}, {1, 5, 32, 30}, {1, 1, 1, 1}, {0, 1, 0, 0}, {0, 0, 0, 0}, {}, {}, {}}, + StridedSliceParams{{0, 0, 2, 10}, {1, 8, 32, 18}, {1, 2, 1, 2}, {0, 0, 1, 0}, {0, 0, 0, 1}, {}, {}, {}}, + StridedSliceParams{{0, 0, 10}, {0, 32, 18}, {1, 1, 1}, {1, 1, 0}, {1, 1, 0}, {}, {}, {1, 0, 0}}, + StridedSliceParams{{0, 4, 10}, {1, 8, 0}, {1, 1, 1}, {1, 0, 1}, {1, 1, 1}, {}, {}, {0, 0, 1}}}; + +const std::vector inputShapesStatic4D = {{1, 5, 32, 32}, {2, 5, 32, 48}}; const std::vector inputShapesDynamic4D = { - {{-1, -1, -1, -1}, - {{ 1, 5, 32, 32 }, { 2, 5, 32, 32 }, { 1, 5, 64, 64 }, {0, 0, 0, 0}}}, + {{-1, -1, -1, -1}, {{1, 5, 32, 32}, {2, 5, 32, 32}, {1, 5, 64, 64}, {0, 0, 0, 0}}}, - {{-1, 5, -1, -1}, - {{ 1, 5, 32, 32 }, { 2, 5, 32, 32 }, { 3, 5, 32, 36 }, {0, 5, 0, 0}}}, + {{-1, 5, -1, -1}, {{1, 5, 32, 32}, {2, 5, 32, 32}, {3, 5, 32, 36}, {0, 5, 0, 0}}}, - {{{1, 5}, 5, {32, 64}, {32, 64}}, - {{ 2, 5, 32, 32 }, { 1, 5, 48, 32 }, { 5, 5, 32, 32 }}}, + {{{1, 5}, 5, {32, 64}, {32, 64}}, {{2, 5, 32, 32}, {1, 5, 48, 32}, {5, 5, 32, 32}}}, }; const std::vector CPUParamsCommon4D = { - cpuParams_nchw, - cpuParams_nhwc, + cpuParams_nchw, + cpuParams_nhwc, }; -INSTANTIATE_TEST_SUITE_P(smoke_CompareWithRefs_Common_Static_4D, StridedSliceLayerCPUTest, - ::testing::Combine( - ::testing::ValuesIn(static_shapes_to_test_representation(inputShapesStatic4D)), - ::testing::ValuesIn(testCasesCommon4D), - ::testing::ValuesIn(inputLayerTypes), - ::testing::ValuesIn(inputPrecisions), - ::testing::ValuesIn(CPUParamsCommon4D)), - StridedSliceLayerCPUTest::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(smoke_CompareWithRefs_Common_Dynamic_4D, StridedSliceLayerCPUTest, - ::testing::Combine( - ::testing::ValuesIn(inputShapesDynamic4D), - ::testing::ValuesIn(testCasesCommon4D), - ::testing::ValuesIn(inputLayerTypes), - ::testing::ValuesIn(inputPrecisions), - ::testing::ValuesIn(CPUParamsCommon4D)), +INSTANTIATE_TEST_SUITE_P( + smoke_CompareWithRefs_Common_Static_4D, + StridedSliceLayerCPUTest, + ::testing::Combine(::testing::ValuesIn(static_shapes_to_test_representation(inputShapesStatic4D)), + ::testing::ValuesIn(testCasesCommon4D), + ::testing::ValuesIn(inputLayerTypes), + ::testing::ValuesIn(inputPrecisions), + ::testing::ValuesIn(CPUParamsCommon4D)), + StridedSliceLayerCPUTest::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(smoke_CompareWithRefs_Common_Dynamic_4D, + StridedSliceLayerCPUTest, + ::testing::Combine(::testing::ValuesIn(inputShapesDynamic4D), + ::testing::ValuesIn(testCasesCommon4D), + ::testing::ValuesIn(inputLayerTypes), + ::testing::ValuesIn(inputPrecisions), + ::testing::ValuesIn(CPUParamsCommon4D)), StridedSliceLayerCPUTest::getTestCaseName); const std::vector testCasesBlocked4DSubset1 = { - StridedSliceParams{ { 0, 0, 0, 0 }, { 1, 32, 32, 32 }, { 1, 1, 1, 1 }, { 0, 0, 0, 0 }, { 0, 0, 0, 0 }, { }, { }, { } }, - StridedSliceParams{ { 0, 0, 16, 0 }, { 1, 32, 32, 32 }, { 1, 1, 1, 1 }, { 0, 0, 0, 0 }, { 0, 0, 0, 1 }, { }, { }, { } }, - StridedSliceParams{ { 0, 0, 0, 0 }, { 1, 32, 32, 16 }, { 1, 1, 1, 1 }, { 0, 0, 0, 0 }, { 0, 0, 0, 1 }, { }, { }, { } }, - StridedSliceParams{ { 0, 0, 0, 0 }, { 1, 16, 32, 32 }, { 1, 1, 1, 1 }, { 0, 0, 0, 0 }, { 0, 0, 0, 1 }, { }, { }, { } }, + StridedSliceParams{{0, 0, 0, 0}, {1, 32, 32, 32}, {1, 1, 1, 1}, {0, 0, 0, 0}, {0, 0, 0, 0}, {}, {}, {}}, + StridedSliceParams{{0, 0, 16, 0}, {1, 32, 32, 32}, {1, 1, 1, 1}, {0, 0, 0, 0}, {0, 0, 0, 1}, {}, {}, {}}, + StridedSliceParams{{0, 0, 0, 0}, {1, 32, 32, 16}, {1, 1, 1, 1}, {0, 0, 0, 0}, {0, 0, 0, 1}, {}, {}, {}}, + StridedSliceParams{{0, 0, 0, 0}, {1, 16, 32, 32}, {1, 1, 1, 1}, {0, 0, 0, 0}, {0, 0, 0, 1}, {}, {}, {}}, }; const std::vector testCasesBlocked4DSubset2 = { - StridedSliceParams{ { 0, 0, 5, 4 }, { 1, 16, 28, 27 }, { 1, 1, 1, 1 }, { 0, 0, 0, 0 }, { 0, 0, 0, 0 }, { }, { }, { } }, - StridedSliceParams{ { 0, 16, 0, 0 }, { 1, 32, 10, 10 }, { 1, 1, 1, 1 }, { 0, 0, 0, 0 }, { 0, 0, 0, 0 }, { }, { }, { } }, - StridedSliceParams{ { 0, 0, 20, 20 }, { 1, 32, 25, 25 }, { 1, 1, 1, 1 }, { 0, 1, 0, 0 }, { 0, 1, 0, 0 }, { }, { }, { } }, - StridedSliceParams{ { 0, 16, 2, 10 }, { 1, 32, 32, 20 }, { 1, 1, 2, 1 }, { 0, 0, 0, 0 }, { 0, 0, 0, 0 }, { }, { }, { } }, - StridedSliceParams{ { 0, 16, 0, 0 }, { 2, 64, 32, 20 }, { 1, 1, 1, 1 }, { 0, 0, 0, 0 }, { 0, 0, 0, 0 }, { }, { }, { } }, - StridedSliceParams{ { 0, 32, 0, 0 }, { 2, 50, 32, 20 }, { 1, 1, 1, 1 }, { 0, 0, 0, 0 }, { 0, 0, 0, 0 }, { }, { }, { } }, - StridedSliceParams{ { 0, 0, 0, 0 }, { 2, 12, 32, 20 }, { 1, 1, 1, 1 }, { 0, 0, 0, 0 }, { 0, 0, 0, 0 }, { }, { }, { } }, - StridedSliceParams{ { 0, -16, 0, 10 }, { 2, 100, 32, 20 }, { 1, 1, 1, 1 }, { 0, 0, 0, 0 }, { 0, 0, 0, 0 }, { }, { }, { } }, - StridedSliceParams{ { 0, -32, 0, 0 }, { 2, -12, 32, 20 }, { 1, 1, 1, 1 }, { 0, 0, 0, 0 }, { 0, 0, 0, 0 }, { }, { }, { } }, - StridedSliceParams{ { 0, 10 }, { 0, 20 }, { 1, 1 }, { 1, 0 }, { 1, 0 }, { }, { }, { 1, 0 } }, - StridedSliceParams{ { 0, 16, 0 }, { 2, 32, 0 }, { 1, 1, 1 }, { 1, 0, 1 }, { 1, 1, 1 }, { }, { }, { 0, 0, 1 } }, + StridedSliceParams{{0, 0, 5, 4}, {1, 16, 28, 27}, {1, 1, 1, 1}, {0, 0, 0, 0}, {0, 0, 0, 0}, {}, {}, {}}, + StridedSliceParams{{0, 16, 0, 0}, {1, 32, 10, 10}, {1, 1, 1, 1}, {0, 0, 0, 0}, {0, 0, 0, 0}, {}, {}, {}}, + StridedSliceParams{{0, 0, 20, 20}, {1, 32, 25, 25}, {1, 1, 1, 1}, {0, 1, 0, 0}, {0, 1, 0, 0}, {}, {}, {}}, + StridedSliceParams{{0, 16, 2, 10}, {1, 32, 32, 20}, {1, 1, 2, 1}, {0, 0, 0, 0}, {0, 0, 0, 0}, {}, {}, {}}, + StridedSliceParams{{0, 16, 0, 0}, {2, 64, 32, 20}, {1, 1, 1, 1}, {0, 0, 0, 0}, {0, 0, 0, 0}, {}, {}, {}}, + StridedSliceParams{{0, 32, 0, 0}, {2, 50, 32, 20}, {1, 1, 1, 1}, {0, 0, 0, 0}, {0, 0, 0, 0}, {}, {}, {}}, + StridedSliceParams{{0, 0, 0, 0}, {2, 12, 32, 20}, {1, 1, 1, 1}, {0, 0, 0, 0}, {0, 0, 0, 0}, {}, {}, {}}, + StridedSliceParams{{0, -16, 0, 10}, {2, 100, 32, 20}, {1, 1, 1, 1}, {0, 0, 0, 0}, {0, 0, 0, 0}, {}, {}, {}}, + StridedSliceParams{{0, -32, 0, 0}, {2, -12, 32, 20}, {1, 1, 1, 1}, {0, 0, 0, 0}, {0, 0, 0, 0}, {}, {}, {}}, + StridedSliceParams{{0, 10}, {0, 20}, {1, 1}, {1, 0}, {1, 0}, {}, {}, {1, 0}}, + StridedSliceParams{{0, 16, 0}, {2, 32, 0}, {1, 1, 1}, {1, 0, 1}, {1, 1, 1}, {}, {}, {0, 0, 1}}, }; -const std::vector inputShapesBlockedStatic4DSubset1 = { - { 1, 32, 32, 32 }, { 1, 32, 32, 64 } -}; +const std::vector inputShapesBlockedStatic4DSubset1 = {{1, 32, 32, 32}, {1, 32, 32, 64}}; -const std::vector inputShapesBlockedStatic4DSubset2 = { - { 1, 64, 32, 32 }, { 1, 64, 32, 64 } -}; +const std::vector inputShapesBlockedStatic4DSubset2 = {{1, 64, 32, 32}, {1, 64, 32, 64}}; const std::vector inputShapesBlockedDynamic4DSubset1 = { - {{-1, 32, -1, -1}, - {{ 1, 32, 32, 32 }, { 2, 32, 32, 32 }, { 3, 32, 32, 48 }}}, + {{-1, 32, -1, -1}, {{1, 32, 32, 32}, {2, 32, 32, 32}, {3, 32, 32, 48}}}, - {{{1, 5}, 32, {32, 64}, {32, 64}}, - {{ 2, 32, 32, 32 }, { 1, 32, 48, 32 }, { 5, 32, 32, 48 }}}, + {{{1, 5}, 32, {32, 64}, {32, 64}}, {{2, 32, 32, 32}, {1, 32, 48, 32}, {5, 32, 32, 48}}}, }; const std::vector inputShapesBlockedDynamic4DSubset2 = { - {{-1, 64, -1, -1}, - {{ 1, 64, 64, 32 }, { 2, 64, 32, 32 }, { 3, 64, 32, 48 }}}, + {{-1, 64, -1, -1}, {{1, 64, 64, 32}, {2, 64, 32, 32}, {3, 64, 32, 48}}}, - {{{1, 5}, 64, {32, 64}, {32, 64}}, - {{ 2, 64, 32, 32 }, { 1, 64, 48, 32 }, { 1, 64, 64, 64 }}}, + {{{1, 5}, 64, {32, 64}, {32, 64}}, {{2, 64, 32, 32}, {1, 64, 48, 32}, {1, 64, 64, 64}}}, }; const std::vector CPUParamsBlocked4D = { - cpuParams_nChw16c, - cpuParams_nChw8c, + cpuParams_nChw16c, + cpuParams_nChw8c, }; -const std::vector inputLayerTypesBlocked = { - ngraph::helpers::InputLayerType::CONSTANT, +const std::vector inputLayerTypesBlocked = { + ov::test::utils::InputLayerType::CONSTANT, }; -INSTANTIATE_TEST_SUITE_P(smoke_CompareWithRefs_Common_Static_4D_Subset1, StridedSliceLayerCPUTest, - ::testing::Combine( - ::testing::ValuesIn(static_shapes_to_test_representation(inputShapesBlockedStatic4DSubset1)), - ::testing::ValuesIn(testCasesBlocked4DSubset1), - ::testing::ValuesIn(inputLayerTypesBlocked), - ::testing::ValuesIn(inputPrecisions), - ::testing::ValuesIn(CPUParamsBlocked4D)), - StridedSliceLayerCPUTest::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(smoke_CompareWithRefs_Common_Dynamic_4D_Subset1, StridedSliceLayerCPUTest, - ::testing::Combine( - ::testing::ValuesIn(inputShapesBlockedDynamic4DSubset1), - ::testing::ValuesIn(testCasesBlocked4DSubset1), - ::testing::ValuesIn(inputLayerTypesBlocked), - ::testing::ValuesIn(inputPrecisions), - ::testing::ValuesIn(CPUParamsBlocked4D)), - StridedSliceLayerCPUTest::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(smoke_CompareWithRefs_Common_Static_4D_Subset2, StridedSliceLayerCPUTest, - ::testing::Combine( - ::testing::ValuesIn(static_shapes_to_test_representation(inputShapesBlockedStatic4DSubset2)), - ::testing::ValuesIn(testCasesBlocked4DSubset2), - ::testing::ValuesIn(inputLayerTypesBlocked), - ::testing::ValuesIn(inputPrecisions), - ::testing::ValuesIn(CPUParamsBlocked4D)), +INSTANTIATE_TEST_SUITE_P( + smoke_CompareWithRefs_Common_Static_4D_Subset1, + StridedSliceLayerCPUTest, + ::testing::Combine(::testing::ValuesIn(static_shapes_to_test_representation(inputShapesBlockedStatic4DSubset1)), + ::testing::ValuesIn(testCasesBlocked4DSubset1), + ::testing::ValuesIn(inputLayerTypesBlocked), + ::testing::ValuesIn(inputPrecisions), + ::testing::ValuesIn(CPUParamsBlocked4D)), + StridedSliceLayerCPUTest::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(smoke_CompareWithRefs_Common_Dynamic_4D_Subset1, + StridedSliceLayerCPUTest, + ::testing::Combine(::testing::ValuesIn(inputShapesBlockedDynamic4DSubset1), + ::testing::ValuesIn(testCasesBlocked4DSubset1), + ::testing::ValuesIn(inputLayerTypesBlocked), + ::testing::ValuesIn(inputPrecisions), + ::testing::ValuesIn(CPUParamsBlocked4D)), StridedSliceLayerCPUTest::getTestCaseName); -INSTANTIATE_TEST_SUITE_P(smoke_CompareWithRefs_Common_Dynamic_4D_Subset2, StridedSliceLayerCPUTest, - ::testing::Combine( - ::testing::ValuesIn(inputShapesBlockedDynamic4DSubset2), - ::testing::ValuesIn(testCasesBlocked4DSubset2), - ::testing::ValuesIn(inputLayerTypesBlocked), - ::testing::ValuesIn(inputPrecisions), - ::testing::ValuesIn(CPUParamsBlocked4D)), +INSTANTIATE_TEST_SUITE_P( + smoke_CompareWithRefs_Common_Static_4D_Subset2, + StridedSliceLayerCPUTest, + ::testing::Combine(::testing::ValuesIn(static_shapes_to_test_representation(inputShapesBlockedStatic4DSubset2)), + ::testing::ValuesIn(testCasesBlocked4DSubset2), + ::testing::ValuesIn(inputLayerTypesBlocked), + ::testing::ValuesIn(inputPrecisions), + ::testing::ValuesIn(CPUParamsBlocked4D)), + StridedSliceLayerCPUTest::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(smoke_CompareWithRefs_Common_Dynamic_4D_Subset2, + StridedSliceLayerCPUTest, + ::testing::Combine(::testing::ValuesIn(inputShapesBlockedDynamic4DSubset2), + ::testing::ValuesIn(testCasesBlocked4DSubset2), + ::testing::ValuesIn(inputLayerTypesBlocked), + ::testing::ValuesIn(inputPrecisions), + ::testing::ValuesIn(CPUParamsBlocked4D)), StridedSliceLayerCPUTest::getTestCaseName); const std::vector testCasesCommon5D = { - StridedSliceParams{ { 0, 2, 0, 5, 4 }, { 1, 4, 5, 28, 27 }, { 1, 1, 1, 1, 1 }, { 0, 0, 0, 0, 0 }, { 0, 0, 0, 0, 0 }, { }, { }, { } }, - StridedSliceParams{ { 0, 0, 10, 0, 0 }, { 1, 5, 20, 32, 20 }, { 1, 1, 1, 1, 1 }, { 0, 0, 0, 0, 0 }, { 0, 0, 0, 0, 0 }, { }, { }, { } }, - StridedSliceParams{ { 0, 1, 10, 0, 0 }, { 1, 3, 20, 32, 20 }, { 1, 1, 1, 1, 1 }, { 0, 1, 0, 0, 0 }, { 0, 0, 0, 0, 0 }, { }, { }, { } }, - StridedSliceParams{ { 0, 0, 0, 20, 20 }, { 1, 5, 20, 30, 26 }, { 1, 1, 1, 2, 2 }, { 0, 0, 0, 0, 0 }, { 0, 0, 0, 0, 0 }, { }, { }, { } }, - StridedSliceParams{ { 0, 0, 10, 0, 20 }, { 1, 2, 20, 30, 30 }, { 1, 1, 2, 1, 1 }, { 0, 0, 0, 0, 1 }, { 0, 1, 0, 1, 0 }, { }, { }, { } }, - StridedSliceParams{ { 0, 0, 2, 10, 0 }, { 1, 5, 10, 32, 20 }, { 1, 1, 1, 1, 1 }, { 0, 0, 1, 1, 0 }, { 0, 0, 0, 0, 1 }, { }, { }, { } }, - StridedSliceParams{ { 0, 1, 0, 10, 0 }, { 1, 5, 20, 32, 32 }, { 1, 1, 1, 1, 1 }, { 0, 1, 0, 0, 0 }, { 0, 0, 0, 0, 0 }, { }, { }, { } }, - StridedSliceParams{ { 0, 0, 0, 0, 0 }, { 1, 5, 10, 16, 16 }, { 1, 1, 2, 1, 1 }, { 0, 0, 0, 0, 0 }, { 0, 0, 0, 0, 0 }, { }, { }, { } }, + StridedSliceParams{{0, 2, 0, 5, 4}, + {1, 4, 5, 28, 27}, + {1, 1, 1, 1, 1}, + {0, 0, 0, 0, 0}, + {0, 0, 0, 0, 0}, + {}, + {}, + {}}, + StridedSliceParams{{0, 0, 10, 0, 0}, + {1, 5, 20, 32, 20}, + {1, 1, 1, 1, 1}, + {0, 0, 0, 0, 0}, + {0, 0, 0, 0, 0}, + {}, + {}, + {}}, + StridedSliceParams{{0, 1, 10, 0, 0}, + {1, 3, 20, 32, 20}, + {1, 1, 1, 1, 1}, + {0, 1, 0, 0, 0}, + {0, 0, 0, 0, 0}, + {}, + {}, + {}}, + StridedSliceParams{{0, 0, 0, 20, 20}, + {1, 5, 20, 30, 26}, + {1, 1, 1, 2, 2}, + {0, 0, 0, 0, 0}, + {0, 0, 0, 0, 0}, + {}, + {}, + {}}, + StridedSliceParams{{0, 0, 10, 0, 20}, + {1, 2, 20, 30, 30}, + {1, 1, 2, 1, 1}, + {0, 0, 0, 0, 1}, + {0, 1, 0, 1, 0}, + {}, + {}, + {}}, + StridedSliceParams{{0, 0, 2, 10, 0}, + {1, 5, 10, 32, 20}, + {1, 1, 1, 1, 1}, + {0, 0, 1, 1, 0}, + {0, 0, 0, 0, 1}, + {}, + {}, + {}}, + StridedSliceParams{{0, 1, 0, 10, 0}, + {1, 5, 20, 32, 32}, + {1, 1, 1, 1, 1}, + {0, 1, 0, 0, 0}, + {0, 0, 0, 0, 0}, + {}, + {}, + {}}, + StridedSliceParams{{0, 0, 0, 0, 0}, + {1, 5, 10, 16, 16}, + {1, 1, 2, 1, 1}, + {0, 0, 0, 0, 0}, + {0, 0, 0, 0, 0}, + {}, + {}, + {}}, }; -const std::vector inputShapesStatic5D = { - { 1, 5, 20, 32, 32 }, { 2, 5, 32, 32, 32 } -}; +const std::vector inputShapesStatic5D = {{1, 5, 20, 32, 32}, {2, 5, 32, 32, 32}}; const std::vector inputShapesDynamic5D = { - {{-1, -1, -1, -1, -1}, - {{ 1, 5, 32, 32, 32 }, { 1, 5, 32, 32, 48 }, { 1, 5, 64, 64, 64 }, { 1, 10, 32, 32, 32 }, {0, 0, 0, 0, 0}}}, + {{-1, -1, -1, -1, -1}, + {{1, 5, 32, 32, 32}, {1, 5, 32, 32, 48}, {1, 5, 64, 64, 64}, {1, 10, 32, 32, 32}, {0, 0, 0, 0, 0}}}, - {{-1, 5, -1, -1, -1}, - {{ 1, 5, 32, 32, 48 }, { 1, 5, 32, 48, 32 }, { 1, 5, 48, 32, 32 }, {0, 5, 0, 0, 0}}}, + {{-1, 5, -1, -1, -1}, {{1, 5, 32, 32, 48}, {1, 5, 32, 48, 32}, {1, 5, 48, 32, 32}, {0, 5, 0, 0, 0}}}, - {{{1, 5}, 5, {32, 64}, {32, 64}, {32, 64}}, - {{ 2, 5, 32, 32, 32 }, { 1, 5, 48, 32, 32 }, { 5, 5, 32, 32, 48 }}}, + {{{1, 5}, 5, {32, 64}, {32, 64}, {32, 64}}, {{2, 5, 32, 32, 32}, {1, 5, 48, 32, 32}, {5, 5, 32, 32, 48}}}, }; const std::vector CPUParamsCommon5D = { - cpuParams_ncdhw, - cpuParams_ndhwc, + cpuParams_ncdhw, + cpuParams_ndhwc, }; -INSTANTIATE_TEST_SUITE_P(smoke_CompareWithRefs_Common_Static_5D, StridedSliceLayerCPUTest, - ::testing::Combine( - ::testing::ValuesIn(static_shapes_to_test_representation(inputShapesStatic5D)), - ::testing::ValuesIn(testCasesCommon5D), - ::testing::ValuesIn(inputLayerTypes), - ::testing::ValuesIn(inputPrecisions), - ::testing::ValuesIn(CPUParamsCommon5D)), - StridedSliceLayerCPUTest::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(smoke_CompareWithRefs_Common_Dynamic_5D, StridedSliceLayerCPUTest, - ::testing::Combine( - ::testing::ValuesIn(inputShapesDynamic5D), - ::testing::ValuesIn(testCasesCommon5D), - ::testing::ValuesIn(inputLayerTypes), - ::testing::ValuesIn(inputPrecisions), - ::testing::ValuesIn(CPUParamsCommon5D)), +INSTANTIATE_TEST_SUITE_P( + smoke_CompareWithRefs_Common_Static_5D, + StridedSliceLayerCPUTest, + ::testing::Combine(::testing::ValuesIn(static_shapes_to_test_representation(inputShapesStatic5D)), + ::testing::ValuesIn(testCasesCommon5D), + ::testing::ValuesIn(inputLayerTypes), + ::testing::ValuesIn(inputPrecisions), + ::testing::ValuesIn(CPUParamsCommon5D)), + StridedSliceLayerCPUTest::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(smoke_CompareWithRefs_Common_Dynamic_5D, + StridedSliceLayerCPUTest, + ::testing::Combine(::testing::ValuesIn(inputShapesDynamic5D), + ::testing::ValuesIn(testCasesCommon5D), + ::testing::ValuesIn(inputLayerTypes), + ::testing::ValuesIn(inputPrecisions), + ::testing::ValuesIn(CPUParamsCommon5D)), StridedSliceLayerCPUTest::getTestCaseName); const std::vector testCasesBlocked5DSubset1 = { - StridedSliceParams{ { 0, 0, 0, 5, 4 }, { 1, 16, 5, 28, 27 }, { 1, 1, 1, 1, 1 }, { 0, 0, 0, 0, 0 }, { 0, 0, 0, 0, 0 }, { }, { }, { } }, - StridedSliceParams{ { 0, 0, 10, 0, 0 }, { 1, 16, 20, 32, 20 }, { 1, 1, 1, 1, 1 }, { 0, 0, 0, 0, 0 }, { 0, 1, 0, 0, 0 }, { }, { }, { } }, - StridedSliceParams{ { 0, 0, 0, 20, 20 }, { 1, 16, 20, 30, 26 }, { 1, 1, 1, 2, 2 }, { 0, 0, 0, 0, 0 }, { 0, 0, 0, 0, 0 }, { }, { }, { } }, - StridedSliceParams{ { 0, 0, 10, 0, 20 }, { 1, 16, 20, 30, 30 }, { 1, 1, 2, 1, 1 }, { 0, 0, 0, 0, 1 }, { 0, 1, 0, 1, 0 }, { }, { }, { } }, - StridedSliceParams{ { 0, 0, 2, 10, 0 }, { 1, 16, 10, 32, 20 }, { 1, 1, 1, 1, 1 }, { 0, 0, 1, 1, 0 }, { 0, 0, 0, 0, 1 }, { }, { }, { } }, - StridedSliceParams{ { 0, 0, 0, 10, 0 }, { 1, 8, 20, 32, 32 }, { 1, 1, 1, 1, 1 }, { 0, 1, 0, 0, 0 }, { 0, 1, 0, 0, 0 }, { }, { }, { } }, - StridedSliceParams{ { 0, 0, 0, 0, 0 }, { 1, 16, 10, 16, 16 }, { 1, 1, 2, 1, 1 }, { 0, 0, 0, 0, 0 }, { 0, 0, 0, 0, 0 }, { }, { }, { } }, + StridedSliceParams{{0, 0, 0, 5, 4}, + {1, 16, 5, 28, 27}, + {1, 1, 1, 1, 1}, + {0, 0, 0, 0, 0}, + {0, 0, 0, 0, 0}, + {}, + {}, + {}}, + StridedSliceParams{{0, 0, 10, 0, 0}, + {1, 16, 20, 32, 20}, + {1, 1, 1, 1, 1}, + {0, 0, 0, 0, 0}, + {0, 1, 0, 0, 0}, + {}, + {}, + {}}, + StridedSliceParams{{0, 0, 0, 20, 20}, + {1, 16, 20, 30, 26}, + {1, 1, 1, 2, 2}, + {0, 0, 0, 0, 0}, + {0, 0, 0, 0, 0}, + {}, + {}, + {}}, + StridedSliceParams{{0, 0, 10, 0, 20}, + {1, 16, 20, 30, 30}, + {1, 1, 2, 1, 1}, + {0, 0, 0, 0, 1}, + {0, 1, 0, 1, 0}, + {}, + {}, + {}}, + StridedSliceParams{{0, 0, 2, 10, 0}, + {1, 16, 10, 32, 20}, + {1, 1, 1, 1, 1}, + {0, 0, 1, 1, 0}, + {0, 0, 0, 0, 1}, + {}, + {}, + {}}, + StridedSliceParams{{0, 0, 0, 10, 0}, + {1, 8, 20, 32, 32}, + {1, 1, 1, 1, 1}, + {0, 1, 0, 0, 0}, + {0, 1, 0, 0, 0}, + {}, + {}, + {}}, + StridedSliceParams{{0, 0, 0, 0, 0}, + {1, 16, 10, 16, 16}, + {1, 1, 2, 1, 1}, + {0, 0, 0, 0, 0}, + {0, 0, 0, 0, 0}, + {}, + {}, + {}}, }; const std::vector testCasesBlocked5DSubset2 = { - StridedSliceParams{ { 0, 0, 0, 5, 4 }, { 1, 16, 5, 28, 27 }, { 1, 1, 1, 1, 1 }, { 0, 0, 0, 0, 0 }, { 0, 0, 0, 0, 0 }, { }, { }, { } }, - StridedSliceParams{ { 0, 0, 10, 0, 0 }, { 1, 16, 20, 32, 20 }, { 1, 1, 1, 1, 1 }, { 0, 0, 0, 0, 0 }, { 0, 0, 0, 0, 0 }, { }, { }, { } }, - StridedSliceParams{ { 0, 0, 10, 0, 0 }, { 1, 16, 20, 32, 20 }, { 1, 1, 1, 1, 1 }, { 0, 0, 0, 0, 0 }, { 0, 1, 0, 0, 0 }, { }, { }, { } }, - StridedSliceParams{ { 0, 0, 0, 20, 20 }, { 1, 16, 20, 30, 26 }, { 1, 1, 1, 2, 2 }, { 0, 0, 0, 0, 0 }, { 0, 0, 0, 0, 0 }, { }, { }, { } }, - StridedSliceParams{ { 0, 0, 10, 0, 20 }, { 1, 16, 20, 30, 30 }, { 1, 1, 2, 1, 1 }, { 0, 0, 0, 0, 1 }, { 0, 1, 0, 1, 0 }, { }, { }, { } }, - StridedSliceParams{ { 0, 0, 2, 10, 0 }, { 1, 16, 10, 32, 20 }, { 1, 1, 1, 1, 1 }, { 0, 0, 1, 1, 0 }, { 0, 0, 0, 0, 1 }, { }, { }, { } }, - StridedSliceParams{ { 0, 0, 0, 10, 0 }, { 1, 8, 20, 32, 32 }, { 1, 1, 1, 1, 1 }, { 0, 1, 0, 0, 0 }, { 0, 1, 0, 0, 0 }, { }, { }, { } }, - StridedSliceParams{ { 0, 0, 0, 0, 0 }, { 1, 16, 10, 16, 16 }, { 1, 1, 2, 1, 1 }, { 0, 0, 0, 0, 0 }, { 0, 0, 0, 0, 0 }, { }, { }, { } }, - StridedSliceParams{ { 0, 0, 0, 0, 0 }, { 1, 25, 20, 10, 10 }, { 1, 1, 1, 1, 1 }, { 0, 0, 0, 0, 0 }, { 0, 0, 0, 0, 0 }, { }, { }, { } }, - StridedSliceParams{ { 0, 16, 0, 0, 0 }, { 1, 25, 20, 10, 10 }, { 1, 1, 1, 1, 1 }, { 0, 0, 0, 0, 0 }, { 0, 0, 0, 0, 0 }, { }, { }, { } }, - StridedSliceParams{ { 0, 16, 0, 0, 0 }, { 1, 64, 20, 10, 10 }, { 1, 1, 1, 1, 1 }, { 0, 0, 0, 0, 0 }, { 0, 0, 0, 0, 0 }, { }, { }, { } }, + StridedSliceParams{{0, 0, 0, 5, 4}, + {1, 16, 5, 28, 27}, + {1, 1, 1, 1, 1}, + {0, 0, 0, 0, 0}, + {0, 0, 0, 0, 0}, + {}, + {}, + {}}, + StridedSliceParams{{0, 0, 10, 0, 0}, + {1, 16, 20, 32, 20}, + {1, 1, 1, 1, 1}, + {0, 0, 0, 0, 0}, + {0, 0, 0, 0, 0}, + {}, + {}, + {}}, + StridedSliceParams{{0, 0, 10, 0, 0}, + {1, 16, 20, 32, 20}, + {1, 1, 1, 1, 1}, + {0, 0, 0, 0, 0}, + {0, 1, 0, 0, 0}, + {}, + {}, + {}}, + StridedSliceParams{{0, 0, 0, 20, 20}, + {1, 16, 20, 30, 26}, + {1, 1, 1, 2, 2}, + {0, 0, 0, 0, 0}, + {0, 0, 0, 0, 0}, + {}, + {}, + {}}, + StridedSliceParams{{0, 0, 10, 0, 20}, + {1, 16, 20, 30, 30}, + {1, 1, 2, 1, 1}, + {0, 0, 0, 0, 1}, + {0, 1, 0, 1, 0}, + {}, + {}, + {}}, + StridedSliceParams{{0, 0, 2, 10, 0}, + {1, 16, 10, 32, 20}, + {1, 1, 1, 1, 1}, + {0, 0, 1, 1, 0}, + {0, 0, 0, 0, 1}, + {}, + {}, + {}}, + StridedSliceParams{{0, 0, 0, 10, 0}, + {1, 8, 20, 32, 32}, + {1, 1, 1, 1, 1}, + {0, 1, 0, 0, 0}, + {0, 1, 0, 0, 0}, + {}, + {}, + {}}, + StridedSliceParams{{0, 0, 0, 0, 0}, + {1, 16, 10, 16, 16}, + {1, 1, 2, 1, 1}, + {0, 0, 0, 0, 0}, + {0, 0, 0, 0, 0}, + {}, + {}, + {}}, + StridedSliceParams{{0, 0, 0, 0, 0}, + {1, 25, 20, 10, 10}, + {1, 1, 1, 1, 1}, + {0, 0, 0, 0, 0}, + {0, 0, 0, 0, 0}, + {}, + {}, + {}}, + StridedSliceParams{{0, 16, 0, 0, 0}, + {1, 25, 20, 10, 10}, + {1, 1, 1, 1, 1}, + {0, 0, 0, 0, 0}, + {0, 0, 0, 0, 0}, + {}, + {}, + {}}, + StridedSliceParams{{0, 16, 0, 0, 0}, + {1, 64, 20, 10, 10}, + {1, 1, 1, 1, 1}, + {0, 0, 0, 0, 0}, + {0, 0, 0, 0, 0}, + {}, + {}, + {}}, }; -const std::vector inputShapesBlockedStatic5DSubset1 = { - { 1, 16, 32, 32, 32 }, { 2, 16, 32, 32, 32 }, { 2, 32, 32, 32, 32 } -}; +const std::vector inputShapesBlockedStatic5DSubset1 = {{1, 16, 32, 32, 32}, + {2, 16, 32, 32, 32}, + {2, 32, 32, 32, 32}}; -const std::vector inputShapesBlockedStatic5DSubset2 = { - { 1, 64, 32, 32, 32 }, { 2, 64, 32, 64, 32 }, { 2, 64, 32, 32, 32 } -}; +const std::vector inputShapesBlockedStatic5DSubset2 = {{1, 64, 32, 32, 32}, + {2, 64, 32, 64, 32}, + {2, 64, 32, 32, 32}}; const std::vector inputShapesBlockedDynamic5DSubset1 = { - {{-1, 16, -1, -1, -1}, - {{ 1, 16, 32, 32, 32 }, { 2, 16, 32, 32, 32 }, { 2, 16, 32, 32, 32 }}}, + {{-1, 16, -1, -1, -1}, {{1, 16, 32, 32, 32}, {2, 16, 32, 32, 32}, {2, 16, 32, 32, 32}}}, - {{{1, 5}, 16, {16, 32}, {16, 32}, {16, 32}}, - {{ 1, 16, 32, 32, 32 }, { 2, 16, 32, 32, 32 }, { 1, 16, 20, 32, 32 }}}, + {{{1, 5}, 16, {16, 32}, {16, 32}, {16, 32}}, {{1, 16, 32, 32, 32}, {2, 16, 32, 32, 32}, {1, 16, 20, 32, 32}}}, }; const std::vector inputShapesBlockedDynamic5DSubset2 = { - {{-1, 64, -1, -1, -1}, - {{ 1, 64, 64, 32, 32 }, { 2, 64, 32, 32, 32 }, { 3, 64, 32, 48, 32 }}}, + {{-1, 64, -1, -1, -1}, {{1, 64, 64, 32, 32}, {2, 64, 32, 32, 32}, {3, 64, 32, 48, 32}}}, - {{{1, 5}, 64, {16, 32}, {16, 32}, {16, 32}}, - {{ 1, 64, 32, 32, 32 }, { 2, 64, 32, 32, 32 }, { 1, 64, 20, 32, 32 }}}, + {{{1, 5}, 64, {16, 32}, {16, 32}, {16, 32}}, {{1, 64, 32, 32, 32}, {2, 64, 32, 32, 32}, {1, 64, 20, 32, 32}}}, }; const std::vector CPUParamsBlocked5D = { - cpuParams_nCdhw16c, - cpuParams_nCdhw8c, + cpuParams_nCdhw16c, + cpuParams_nCdhw8c, }; -INSTANTIATE_TEST_SUITE_P(smoke_CompareWithRefs_Common_Static_5D_Subset1, StridedSliceLayerCPUTest, - ::testing::Combine( - ::testing::ValuesIn(static_shapes_to_test_representation(inputShapesBlockedStatic5DSubset1)), - ::testing::ValuesIn(testCasesBlocked5DSubset1), - ::testing::ValuesIn(inputLayerTypesBlocked), - ::testing::ValuesIn(inputPrecisions), - ::testing::ValuesIn(CPUParamsBlocked5D)), +INSTANTIATE_TEST_SUITE_P( + smoke_CompareWithRefs_Common_Static_5D_Subset1, + StridedSliceLayerCPUTest, + ::testing::Combine(::testing::ValuesIn(static_shapes_to_test_representation(inputShapesBlockedStatic5DSubset1)), + ::testing::ValuesIn(testCasesBlocked5DSubset1), + ::testing::ValuesIn(inputLayerTypesBlocked), + ::testing::ValuesIn(inputPrecisions), + ::testing::ValuesIn(CPUParamsBlocked5D)), + StridedSliceLayerCPUTest::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(smoke_CompareWithRefs_Common_Dynamic_5D_Subset1, + StridedSliceLayerCPUTest, + ::testing::Combine(::testing::ValuesIn(inputShapesBlockedDynamic5DSubset1), + ::testing::ValuesIn(testCasesBlocked5DSubset1), + ::testing::ValuesIn(inputLayerTypesBlocked), + ::testing::ValuesIn(inputPrecisions), + ::testing::ValuesIn(CPUParamsBlocked5D)), StridedSliceLayerCPUTest::getTestCaseName); -INSTANTIATE_TEST_SUITE_P(smoke_CompareWithRefs_Common_Dynamic_5D_Subset1, StridedSliceLayerCPUTest, - ::testing::Combine( - ::testing::ValuesIn(inputShapesBlockedDynamic5DSubset1), - ::testing::ValuesIn(testCasesBlocked5DSubset1), - ::testing::ValuesIn(inputLayerTypesBlocked), - ::testing::ValuesIn(inputPrecisions), - ::testing::ValuesIn(CPUParamsBlocked5D)), - StridedSliceLayerCPUTest::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(smoke_CompareWithRefs_Common_Static_5D_Subset2, StridedSliceLayerCPUTest, - ::testing::Combine( - ::testing::ValuesIn(static_shapes_to_test_representation(inputShapesBlockedStatic4DSubset2)), - ::testing::ValuesIn(testCasesBlocked4DSubset2), - ::testing::ValuesIn(inputLayerTypesBlocked), - ::testing::ValuesIn(inputPrecisions), - ::testing::ValuesIn(CPUParamsBlocked4D)), - StridedSliceLayerCPUTest::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(smoke_CompareWithRefs_Common_Dynamic_5D_Subset2, StridedSliceLayerCPUTest, - ::testing::Combine( - ::testing::ValuesIn(inputShapesBlockedDynamic5DSubset2), - ::testing::ValuesIn(testCasesBlocked5DSubset2), - ::testing::ValuesIn(inputLayerTypesBlocked), - ::testing::ValuesIn(inputPrecisions), - ::testing::ValuesIn(CPUParamsBlocked5D)), +INSTANTIATE_TEST_SUITE_P( + smoke_CompareWithRefs_Common_Static_5D_Subset2, + StridedSliceLayerCPUTest, + ::testing::Combine(::testing::ValuesIn(static_shapes_to_test_representation(inputShapesBlockedStatic4DSubset2)), + ::testing::ValuesIn(testCasesBlocked4DSubset2), + ::testing::ValuesIn(inputLayerTypesBlocked), + ::testing::ValuesIn(inputPrecisions), + ::testing::ValuesIn(CPUParamsBlocked4D)), + StridedSliceLayerCPUTest::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(smoke_CompareWithRefs_Common_Dynamic_5D_Subset2, + StridedSliceLayerCPUTest, + ::testing::Combine(::testing::ValuesIn(inputShapesBlockedDynamic5DSubset2), + ::testing::ValuesIn(testCasesBlocked5DSubset2), + ::testing::ValuesIn(inputLayerTypesBlocked), + ::testing::ValuesIn(inputPrecisions), + ::testing::ValuesIn(CPUParamsBlocked5D)), StridedSliceLayerCPUTest::getTestCaseName); /* Descriptors check */ @@ -505,27 +668,39 @@ TEST_P(StridedSliceLayerDescriptorCPUTest, DescriptorsCheck) { } const std::vector testCasesDescriptors = { - StridedSliceParams{ { 0, -4, 0, 0 }, { 0, 2147483647, 0, 0 }, { 1, 1, 1, 1 }, { 1, 0, 1, 1 }, { 1, 0, 1, 1 }, { }, { }, { } }, - StridedSliceParams{ { 0, 5, 0, 0 }, { 1, 20, 28, 27 }, { 1, 1, 1, 1 }, { 0, 0, 0, 0 }, { 0, 0, 0, 0 }, { }, { }, { } }, - StridedSliceParams{ { 0, 0, 0, 0 }, { 1, 2147483647, 32, 32 }, { 1, 2, 1, 1 }, { 0, 0, 0, 0 }, { 0, 0, 0, 0 }, { }, { }, { } }, - StridedSliceParams{ { 0, 0, 0, 0 }, { 1, 2147483647, 32, 32 }, { 1, 2, 1, 1 }, { 0, 0, 0, 0 }, { 0, 0, 0, 0 }, { }, {0, 1, 0, 0 }, { } }, - StridedSliceParams{ { 0, 0, 0, 0 }, { 1, 2147483647, 32, 32 }, { 1, 2, 1, 1 }, { 0, 0, 0, 0 }, { 0, 0, 0, 0 }, {0, 0, 1, 0 }, { }, { } }, -}; - -const std::vector inputShapesDescriptors = { - {{}, {{ 1, 16, 32, 32 }}}, - {{}, {{ 1, 17, 32, 32 }}}, - {{1, -1, 32, 32}, {{ 1, 16, 32, 32 }, { 1, 32, 32, 32 }}} + StridedSliceParams{{0, -4, 0, 0}, {0, 2147483647, 0, 0}, {1, 1, 1, 1}, {1, 0, 1, 1}, {1, 0, 1, 1}, {}, {}, {}}, + StridedSliceParams{{0, 5, 0, 0}, {1, 20, 28, 27}, {1, 1, 1, 1}, {0, 0, 0, 0}, {0, 0, 0, 0}, {}, {}, {}}, + StridedSliceParams{{0, 0, 0, 0}, {1, 2147483647, 32, 32}, {1, 2, 1, 1}, {0, 0, 0, 0}, {0, 0, 0, 0}, {}, {}, {}}, + StridedSliceParams{{0, 0, 0, 0}, + {1, 2147483647, 32, 32}, + {1, 2, 1, 1}, + {0, 0, 0, 0}, + {0, 0, 0, 0}, + {}, + {0, 1, 0, 0}, + {}}, + StridedSliceParams{{0, 0, 0, 0}, + {1, 2147483647, 32, 32}, + {1, 2, 1, 1}, + {0, 0, 0, 0}, + {0, 0, 0, 0}, + {0, 0, 1, 0}, + {}, + {}}, }; -INSTANTIATE_TEST_SUITE_P(smoke_StridedSliceLayerDescriptorCPUTest, StridedSliceLayerDescriptorCPUTest, - ::testing::Combine( - ::testing::ValuesIn(inputShapesDescriptors), - ::testing::ValuesIn(testCasesDescriptors), - ::testing::Values(ngraph::helpers::InputLayerType::CONSTANT), - ::testing::Values(ElementType::f32), - ::testing::Values(cpuParams_nChw8c)), +const std::vector inputShapesDescriptors = {{{}, {{1, 16, 32, 32}}}, + {{}, {{1, 17, 32, 32}}}, + {{1, -1, 32, 32}, {{1, 16, 32, 32}, {1, 32, 32, 32}}}}; + +INSTANTIATE_TEST_SUITE_P(smoke_StridedSliceLayerDescriptorCPUTest, + StridedSliceLayerDescriptorCPUTest, + ::testing::Combine(::testing::ValuesIn(inputShapesDescriptors), + ::testing::ValuesIn(testCasesDescriptors), + ::testing::Values(ov::test::utils::InputLayerType::CONSTANT), + ::testing::Values(ElementType::f32), + ::testing::Values(cpuParams_nChw8c)), StridedSliceLayerDescriptorCPUTest::getTestCaseName); -} // namespace -} // namespace CPULayerTestsDefinitions +} // namespace +} // namespace CPULayerTestsDefinitions diff --git a/src/plugins/intel_cpu/tests/functional/single_layer_tests/tensor_iterator.cpp b/src/plugins/intel_cpu/tests/functional/single_layer_tests/tensor_iterator.cpp index 1b9e89095b0fcc..ba742ee86c819c 100644 --- a/src/plugins/intel_cpu/tests/functional/single_layer_tests/tensor_iterator.cpp +++ b/src/plugins/intel_cpu/tests/functional/single_layer_tests/tensor_iterator.cpp @@ -2,29 +2,23 @@ // SPDX-License-Identifier: Apache-2.0 // -#include +#include "common_test_utils/node_builders/activation.hpp" +#include "common_test_utils/ov_tensor_utils.hpp" #include "shared_test_classes/base/ov_subgraph.hpp" -#include "ov_models/builders.hpp" -#include -using namespace InferenceEngine; using namespace ov; using namespace test; -namespace CPULayerTestsDefinitions { - -using TensorIteratorParams = typename std::tuple< - std::vector, // Input shapes - ngraph::op::RecurrentSequenceDirection, // Direction - ElementType>; // element type - +using TensorIteratorParams = typename std::tuple, // Input shapes + ov::op::RecurrentSequenceDirection, // Direction + ElementType>; // element type class TensorIteratorCPUTest : public testing::WithParamInterface, virtual public SubgraphBaseTest { public: static std::string getTestCaseName(testing::TestParamInfo obj) { std::vector shapes; - ngraph::op::RecurrentSequenceDirection direction; + ov::op::RecurrentSequenceDirection direction; ElementType inType; std::tie(shapes, direction, inType) = obj.param; @@ -45,7 +39,7 @@ class TensorIteratorCPUTest : public testing::WithParamInterface shapes; - ngraph::op::RecurrentSequenceDirection direction; + ov::op::RecurrentSequenceDirection direction; ElementType inType; std::tie(shapes, direction, inType) = this->GetParam(); @@ -53,31 +47,31 @@ class TensorIteratorCPUTest : public testing::WithParamInterface(); + auto tensor_iterator = std::make_shared(); ov::ParameterVector params; for (auto&& shape : inputDynamicShapes) { params.push_back(std::make_shared(inType, shape)); } - ngraph::ParameterVector body_params; + ov::ParameterVector body_params; for (size_t i = 0; i < shapes.size(); i++) { - ngraph::PartialShape shape = shapes[i].first; + ov::PartialShape shape = shapes[i].first; shape[sequence_axis] = 1; - auto paramNode = std::make_shared(inType, shape); + auto paramNode = std::make_shared(inType, shape); body_params.push_back(paramNode); } - auto tanh = ngraph::builder::makeActivation(body_params[0], inType, ngraph::helpers::Tanh); - auto relu = ngraph::builder::makeActivation(body_params[1], inType, ngraph::helpers::Relu); - auto add = std::make_shared(tanh, relu); + auto tanh = ov::test::utils::make_activation(body_params[0], inType, ov::test::utils::ActivationTypes::Tanh); + auto relu = ov::test::utils::make_activation(body_params[1], inType, ov::test::utils::ActivationTypes::Relu); + auto add = std::make_shared(tanh, relu); - auto body = std::make_shared(ngraph::OutputVector{add}, body_params, "body"); + auto body = std::make_shared(ov::OutputVector{add}, body_params, "body"); tensor_iterator->set_function(body); - if (direction == ngraph::op::RecurrentSequenceDirection::FORWARD) { + if (direction == ov::op::RecurrentSequenceDirection::FORWARD) { tensor_iterator->set_sliced_input(body_params[0], params[0], 0, 1, 1, -1, sequence_axis); tensor_iterator->set_sliced_input(body_params[1], params[1], 0, 1, 1, -1, sequence_axis); tensor_iterator->get_concatenated_slices(add, 0, 1, 1, -1, sequence_axis); - } else if (direction == ngraph::op::RecurrentSequenceDirection::REVERSE) { + } else if (direction == ov::op::RecurrentSequenceDirection::REVERSE) { tensor_iterator->set_sliced_input(body_params[0], params[0], -1, -1, 1, 0, sequence_axis); tensor_iterator->set_sliced_input(body_params[1], params[1], -1, -1, 1, 0, sequence_axis); tensor_iterator->get_concatenated_slices(add, -1, -1, 1, 0, sequence_axis); @@ -85,7 +79,7 @@ class TensorIteratorCPUTest : public testing::WithParamInterface(ngraph::OutputVector{tensor_iterator->output(0)}, params); + function = std::make_shared(ov::OutputVector{tensor_iterator->output(0)}, params); } }; @@ -95,64 +89,51 @@ TEST_P(TensorIteratorCPUTest, CompareWithRefs) { namespace { -const std::vector inputPrecisions = { - ElementType::f32, - ElementType::bf16, - ElementType::i8 -}; - -std::vector direction = {ngraph::op::RecurrentSequenceDirection::FORWARD, - ngraph::op::RecurrentSequenceDirection::REVERSE}; -std::vector> inputs = { - { //first test suit - { //dynamic shape for first input - {-1, -1, -1}, - { // target static shapes - {10, 12, 10}, - {10, 8, 10}, - {1, 8, 2}, - {5, 3, 3} - } - }, - { //dynamic shape for second input - {-1, -1, -1}, - { // target static shapes - {1, 12, 1}, - {1, 8, 1}, - {5, 8, 2}, - {5, 3, 3} - } - }, - }, - - { //second test suit - { //dynamic shape for first input - {{1, 12}, 5, {1, 12}}, - { // target static shapes - {1, 5, 1}, - {5, 5, 5}, - {1, 5, 1}, - {5, 5, 5} - } - }, - { //dynamic shape for second input - {{1, 12}, 5, {1, 12}}, - { // target static shapes - {1, 5, 1}, - {1, 5, 1}, - {5, 5, 1}, - {5, 5, 5} - } - }, - } -}; - -INSTANTIATE_TEST_SUITE_P(smoke_TensorIteratorSimple, TensorIteratorCPUTest, - ::testing::Combine( - ::testing::ValuesIn(inputs), - ::testing::ValuesIn(direction), - ::testing::ValuesIn(inputPrecisions)), +const std::vector inputPrecisions = {ElementType::f32, ElementType::bf16, ElementType::i8}; + +std::vector direction = {ov::op::RecurrentSequenceDirection::FORWARD, + ov::op::RecurrentSequenceDirection::REVERSE}; +std::vector> inputs = {{ + // first test suit + {// dynamic shape for first input + {-1, -1, -1}, + {// target static shapes + {10, 12, 10}, + {10, 8, 10}, + {1, 8, 2}, + {5, 3, 3}}}, + {// dynamic shape for second input + {-1, -1, -1}, + {// target static shapes + {1, 12, 1}, + {1, 8, 1}, + {5, 8, 2}, + {5, 3, 3}}}, + }, + + { + // second test suit + {// dynamic shape for first input + {{1, 12}, 5, {1, 12}}, + {// target static shapes + {1, 5, 1}, + {5, 5, 5}, + {1, 5, 1}, + {5, 5, 5}}}, + {// dynamic shape for second input + {{1, 12}, 5, {1, 12}}, + {// target static shapes + {1, 5, 1}, + {1, 5, 1}, + {5, 5, 1}, + {5, 5, 5}}}, + }}; + +INSTANTIATE_TEST_SUITE_P(smoke_TensorIteratorSimple, + TensorIteratorCPUTest, + ::testing::Combine(::testing::ValuesIn(inputs), + ::testing::ValuesIn(direction), + ::testing::ValuesIn(inputPrecisions)), TensorIteratorCPUTest::getTestCaseName); -} // namespace -} // namespace CPULayerTestsDefinitions +} // namespace \ No newline at end of file diff --git a/src/plugins/intel_cpu/tests/functional/single_layer_tests/tile.cpp b/src/plugins/intel_cpu/tests/functional/single_layer_tests/tile.cpp index a0a68a6f85a2e4..ea79bd633852e7 100644 --- a/src/plugins/intel_cpu/tests/functional/single_layer_tests/tile.cpp +++ b/src/plugins/intel_cpu/tests/functional/single_layer_tests/tile.cpp @@ -3,27 +3,22 @@ // #include "test_utils/cpu_test_utils.hpp" -#include "ov_models/builders.hpp" +#include "common_test_utils/ov_tensor_utils.hpp" #include "shared_test_classes/base/ov_subgraph.hpp" -#include using namespace CPUTestUtils; -namespace CPULayerTestsDefinitions { +using TileLayerTestParamsSet = typename std::tuple, // Input shapes + std::vector, // Repeats + ov::element::Type_t, // Network precision + bool, // Is Repeats input constant + std::string>; // Device name -using TileLayerTestParamsSet = typename std::tuple< - std::vector, // Input shapes - std::vector, // Repeats - ov::element::Type_t, // Network precision - bool, // Is Repeats input constant - std::string>; // Device name - -typedef std::tuple< - TileLayerTestParamsSet, - CPUSpecificParams> TileLayerCPUTestParamsSet; +typedef std::tuple TileLayerCPUTestParamsSet; class TileLayerCPUTest : public testing::WithParamInterface, - virtual public ov::test::SubgraphBaseTest, public CPUTestsBase { + virtual public ov::test::SubgraphBaseTest, + public CPUTestsBase { public: static std::string getTestCaseName(testing::TestParamInfo obj) { TileLayerTestParamsSet basicParamsSet; @@ -48,7 +43,7 @@ class TileLayerCPUTest : public testing::WithParamInterface(repeatsData.size()) }); + inputDynamicShapes.push_back({static_cast(repeatsData.size())}); } } const size_t targetStaticShapeSize = inputShapes.front().second.size(); @@ -84,16 +79,18 @@ class TileLayerCPUTest : public testing::WithParamInterface(netPrecision, targetStaticShapes.front().front())); + functionParams.push_back( + std::make_shared(netPrecision, targetStaticShapes.front().front())); } else { functionParams.push_back(std::make_shared(netPrecision, inputDynamicShapes.front())); if (!isRepeatsConst) { - functionParams.push_back(std::make_shared(ov::element::i64, inputDynamicShapes[1])); + functionParams.push_back( + std::make_shared(ov::element::i64, inputDynamicShapes[1])); functionParams.back()->set_friendly_name("repeats"); } } @@ -101,15 +98,16 @@ class TileLayerCPUTest : public testing::WithParamInterface tileNode; if (isRepeatsConst) { - tileNode = std::make_shared(functionParams[0], - ov::op::v0::Constant::create(ov::element::i64, { repeatsData.size() }, repeatsData)); + tileNode = std::make_shared( + functionParams[0], + ov::op::v0::Constant::create(ov::element::i64, {repeatsData.size()}, repeatsData)); } else { tileNode = std::make_shared(functionParams[0], functionParams[1]); } function = makeNgraphFunction(netPrecision, functionParams, tileNode, "CPUTile"); } - void generate_inputs(const std::vector& targetInputStaticShapes) override { + void generate_inputs(const std::vector& targetInputStaticShapes) override { inputs.clear(); const auto& funcInputs = function->inputs(); for (size_t i = 0lu; i < funcInputs.size(); i++) { @@ -123,10 +121,14 @@ class TileLayerCPUTest : public testing::WithParamInterface netPrecisions = { - ov::element::f32, - ov::element::bf16, - ov::element::i32, - ov::element::i8 -}; - -const std::vector> staticInputShapes4D = { - { - {{}, - { // Static shapes - {2, 16, 3, 4} - } - } - }, - { - {{}, - { // Static shapes - {1, 16, 1, 1} - } - } - } -}; +const std::vector netPrecisions = {ov::element::f32, + ov::element::bf16, + ov::element::i32, + ov::element::i8}; + +const std::vector> staticInputShapes4D = {{{{}, + {// Static shapes + {2, 16, 3, 4}}}}, + {{{}, + {// Static shapes + {1, 16, 1, 1}}}}}; const std::vector> dynamicInputShapes4D = { - { - { // Origin dynamic shapes - {ov::Dimension(1, 20), ov::Dimension(10, 20), ov::Dimension(1, 20), ov::Dimension(1, 20)}, - { // Dynamic shapes instances - {2, 16, 3, 4}, - {1, 16, 1, 1}, - {1, 16, 2, 3} - } - } - }, - { - { // Origin dynamic shapes - {-1, -1, -1, -1}, - { // Dynamic shapes instances - {3, 15, 5, 7}, - {4, 55, 8, 24} - } - } - } -}; - -const std::vector> staticInputShapes5D = { - { - {{}, - { // Static shapes - {2, 16, 2, 3, 4} - } - } - } -}; + {{// Origin dynamic shapes + {ov::Dimension(1, 20), ov::Dimension(10, 20), ov::Dimension(1, 20), ov::Dimension(1, 20)}, + {// Dynamic shapes instances + {2, 16, 3, 4}, + {1, 16, 1, 1}, + {1, 16, 2, 3}}}}, + {{// Origin dynamic shapes + {-1, -1, -1, -1}, + {// Dynamic shapes instances + {3, 15, 5, 7}, + {4, 55, 8, 24}}}}}; + +const std::vector> staticInputShapes5D = {{{{}, + {// Static shapes + {2, 16, 2, 3, 4}}}}}; const std::vector> dynamicInputShapes5D = { - { - { // Origin dynamic shapes - {ov::Dimension(1, 20), ov::Dimension(1, 20), ov::Dimension(1, 20), ov::Dimension(1, 20), ov::Dimension(1, 70)}, - { // Dynamic shapes instances - {2, 16, 2, 3, 4}, - {1, 16, 8, 5, 4}, - {8, 1, 2, 3, 64} - } - } - }, - { - { // Origin dynamic shapes - {-1, -1, -1, -1, -1}, - { // Dynamic shapes instances - {2, 16, 2, 3, 4}, - {1, 16, 8, 5, 4}, - {8, 1, 2, 3, 64} - } - } - } -}; - -const std::vector> repeats4D = { - {2, 3}, - {1, 2, 3}, - {1, 1, 1, 1}, - {1, 1, 2, 3}, - {1, 2, 1, 3}, - {2, 1, 1, 1}, - {2, 3, 1, 1} -}; -const std::vector> repeats5D = { - {1, 2, 3}, - {1, 1, 2, 3}, - {1, 1, 1, 2, 3}, - {1, 2, 1, 1, 3}, - {2, 1, 1, 1, 1}, - {2, 3, 1, 1, 1} -}; + {{// Origin dynamic shapes + {ov::Dimension(1, 20), ov::Dimension(1, 20), ov::Dimension(1, 20), ov::Dimension(1, 20), ov::Dimension(1, 70)}, + {// Dynamic shapes instances + {2, 16, 2, 3, 4}, + {1, 16, 8, 5, 4}, + {8, 1, 2, 3, 64}}}}, + {{// Origin dynamic shapes + {-1, -1, -1, -1, -1}, + {// Dynamic shapes instances + {2, 16, 2, 3, 4}, + {1, 16, 8, 5, 4}, + {8, 1, 2, 3, 64}}}}}; + +const std::vector> repeats4D = + {{2, 3}, {1, 2, 3}, {1, 1, 1, 1}, {1, 1, 2, 3}, {1, 2, 1, 3}, {2, 1, 1, 1}, {2, 3, 1, 1}}; +const std::vector> repeats5D = + {{1, 2, 3}, {1, 1, 2, 3}, {1, 1, 1, 2, 3}, {1, 2, 1, 1, 3}, {2, 1, 1, 1, 1}, {2, 3, 1, 1, 1}}; const std::vector CPUParams4D = { - cpuParams_nchw, - cpuParams_nChw16c, - cpuParams_nChw8c, - cpuParams_nhwc, + cpuParams_nchw, + cpuParams_nChw16c, + cpuParams_nChw8c, + cpuParams_nhwc, }; const std::vector CPUParams5D = { - cpuParams_ncdhw, - cpuParams_nCdhw16c, - cpuParams_nCdhw8c, - cpuParams_ndhwc, + cpuParams_ncdhw, + cpuParams_nCdhw16c, + cpuParams_nCdhw8c, + cpuParams_ndhwc, }; /* ============= */ /* INSTANCES */ -INSTANTIATE_TEST_CASE_P(smoke_StaticShape4D, TileLayerCPUTest, - ::testing::Combine( - ::testing::Combine( - ::testing::ValuesIn(staticInputShapes4D), - ::testing::ValuesIn(repeats4D), - ::testing::ValuesIn(netPrecisions), - ::testing::Values(true), - ::testing::Values(ov::test::utils::DEVICE_CPU)), - ::testing::ValuesIn(CPUParams4D)), +INSTANTIATE_TEST_CASE_P(smoke_StaticShape4D, + TileLayerCPUTest, + ::testing::Combine(::testing::Combine(::testing::ValuesIn(staticInputShapes4D), + ::testing::ValuesIn(repeats4D), + ::testing::ValuesIn(netPrecisions), + ::testing::Values(true), + ::testing::Values(ov::test::utils::DEVICE_CPU)), + ::testing::ValuesIn(CPUParams4D)), TileLayerCPUTest::getTestCaseName); -INSTANTIATE_TEST_CASE_P(smoke_DynamicShape4D, TileLayerCPUTest, - ::testing::Combine( - ::testing::Combine( - ::testing::ValuesIn(dynamicInputShapes4D), - ::testing::ValuesIn(repeats4D), - ::testing::ValuesIn(netPrecisions), - ::testing::Values(true, false), - ::testing::Values(ov::test::utils::DEVICE_CPU)), - ::testing::Values(CPUSpecificParams{{}, {}, {}, "ref"})), +INSTANTIATE_TEST_CASE_P(smoke_DynamicShape4D, + TileLayerCPUTest, + ::testing::Combine(::testing::Combine(::testing::ValuesIn(dynamicInputShapes4D), + ::testing::ValuesIn(repeats4D), + ::testing::ValuesIn(netPrecisions), + ::testing::Values(true, false), + ::testing::Values(ov::test::utils::DEVICE_CPU)), + ::testing::Values(CPUSpecificParams{{}, {}, {}, "ref"})), TileLayerCPUTest::getTestCaseName); -const std::vector> dynBatchInputShapes4D = { - { // Origin dynamic shapes - { - {{1, 20}, 16, 3, 4}, - { // Dynamic shapes instances - {2, 16, 3, 4}, - {1, 16, 3, 4}, - {3, 16, 3, 4} - } - } - } -}; - -INSTANTIATE_TEST_CASE_P(smoke_DynBatch4D, TileLayerCPUTest, - ::testing::Combine( - ::testing::Combine( - ::testing::ValuesIn(dynBatchInputShapes4D), - ::testing::Values(std::vector{1, 2, 1, 3}), - ::testing::ValuesIn(netPrecisions), - ::testing::Values(true), - ::testing::Values(ov::test::utils::DEVICE_CPU)), - ::testing::Values(CPUSpecificParams{{}, {}, {}, "ref"})), +const std::vector> dynBatchInputShapes4D = {{// Origin dynamic shapes + {{{1, 20}, 16, 3, 4}, + {// Dynamic shapes instances + {2, 16, 3, 4}, + {1, 16, 3, 4}, + {3, 16, 3, 4}}}}}; + +INSTANTIATE_TEST_CASE_P(smoke_DynBatch4D, + TileLayerCPUTest, + ::testing::Combine(::testing::Combine(::testing::ValuesIn(dynBatchInputShapes4D), + ::testing::Values(std::vector{1, 2, 1, 3}), + ::testing::ValuesIn(netPrecisions), + ::testing::Values(true), + ::testing::Values(ov::test::utils::DEVICE_CPU)), + ::testing::Values(CPUSpecificParams{{}, {}, {}, "ref"})), TileLayerCPUTest::getTestCaseName); -INSTANTIATE_TEST_CASE_P(smoke_StaticShape5D, TileLayerCPUTest, - ::testing::Combine( - ::testing::Combine( - ::testing::ValuesIn(staticInputShapes5D), - ::testing::ValuesIn(repeats5D), - ::testing::ValuesIn(netPrecisions), - ::testing::Values(true), - ::testing::Values(ov::test::utils::DEVICE_CPU)), - ::testing::ValuesIn(CPUParams5D)), +INSTANTIATE_TEST_CASE_P(smoke_StaticShape5D, + TileLayerCPUTest, + ::testing::Combine(::testing::Combine(::testing::ValuesIn(staticInputShapes5D), + ::testing::ValuesIn(repeats5D), + ::testing::ValuesIn(netPrecisions), + ::testing::Values(true), + ::testing::Values(ov::test::utils::DEVICE_CPU)), + ::testing::ValuesIn(CPUParams5D)), TileLayerCPUTest::getTestCaseName); -INSTANTIATE_TEST_CASE_P(smoke_DynamicShape5D, TileLayerCPUTest, - ::testing::Combine( - ::testing::Combine( - ::testing::ValuesIn(dynamicInputShapes5D), - ::testing::ValuesIn(repeats5D), - ::testing::ValuesIn(netPrecisions), - ::testing::Values(true, false), - ::testing::Values(ov::test::utils::DEVICE_CPU)), - ::testing::Values(CPUSpecificParams{{}, {}, {}, "ref"})), +INSTANTIATE_TEST_CASE_P(smoke_DynamicShape5D, + TileLayerCPUTest, + ::testing::Combine(::testing::Combine(::testing::ValuesIn(dynamicInputShapes5D), + ::testing::ValuesIn(repeats5D), + ::testing::ValuesIn(netPrecisions), + ::testing::Values(true, false), + ::testing::Values(ov::test::utils::DEVICE_CPU)), + ::testing::Values(CPUSpecificParams{{}, {}, {}, "ref"})), TileLayerCPUTest::getTestCaseName); /* ========= */ -} // namespace - -} // namespace CPULayerTestsDefinitions +} // namespace \ No newline at end of file diff --git a/src/plugins/intel_cpu/tests/functional/single_layer_tests/topk.cpp b/src/plugins/intel_cpu/tests/functional/single_layer_tests/topk.cpp index 4d52c4f24d5dd7..afd63882920f7d 100644 --- a/src/plugins/intel_cpu/tests/functional/single_layer_tests/topk.cpp +++ b/src/plugins/intel_cpu/tests/functional/single_layer_tests/topk.cpp @@ -2,42 +2,36 @@ // SPDX-License-Identifier: Apache-2.0 // -#include -#include "test_utils/cpu_test_utils.hpp" +#include "common_test_utils/ov_tensor_utils.hpp" #include "shared_test_classes/base/ov_subgraph.hpp" -#include "ov_models/builders.hpp" +#include "test_utils/cpu_test_utils.hpp" -using namespace InferenceEngine; using namespace CPUTestUtils; using namespace ov::test; using SortMode = ov::op::TopKMode; using SortType = ov::op::TopKSortType; -namespace CPULayerTestsDefinitions { - -typedef std::tuple< - int64_t, // keepK - int64_t, // axis - SortMode, // mode - std::tuple, // sort and stable - ElementType, // Net precision - ElementType, // Input precision - ElementType, // Output precision - InputShape // inputShape -> basicTopKParams; +typedef std::tuple, // sort and stable + ElementType, // Net type + ElementType, // Input type + ElementType, // Output type + InputShape // inputShape + > + basicTopKParams; -typedef std::tuple< - basicTopKParams, - CPUSpecificParams, - std::map> TopKLayerCPUTestParamsSet; +typedef std::tuple TopKLayerCPUTestParamsSet; class TopKLayerCPUTest : public testing::WithParamInterface, - virtual public SubgraphBaseTest, public CPUTestsBase { + virtual public SubgraphBaseTest, + public CPUTestsBase { public: static std::string getTestCaseName(testing::TestParamInfo obj) { basicTopKParams basicParamsSet; CPUSpecificParams cpuParams; - std::map additionalConfig; + ov::AnyMap additionalConfig; std::tie(basicParamsSet, cpuParams, additionalConfig) = obj.param; int64_t keepK, axis; @@ -60,7 +54,8 @@ class TopKLayerCPUTest : public testing::WithParamInterface(ElementType::i64, ov::Shape{}, &keepK); topk = std::dynamic_pointer_cast( - std::make_shared(params[0], k, axis, mode, sort, ElementType::i32, stable)); + std::make_shared(params[0], k, axis, mode, sort, ElementType::i32, stable)); } else { auto k = std::make_shared(ElementType::i64, inputDynamicShapes[1]); params.push_back(k); topk = std::dynamic_pointer_cast( - std::make_shared(params[0], k, axis, mode, sort, ElementType::i32, stable)); + std::make_shared(params[0], k, axis, mode, sort, ElementType::i32, stable)); } topk->get_rt_info() = getCPUInfo(); - ngraph::ResultVector results; + ov::ResultVector results; for (size_t i = 0; i < topk->get_output_size(); i++) { results.push_back(std::make_shared(topk->output(i))); } - function = std::make_shared(results, params, "TopK"); + function = std::make_shared(results, params, "TopK"); } void generate_inputs(const std::vector& targetInputStaticShapes) override { @@ -160,7 +155,7 @@ class TopKLayerCPUTest : public testing::WithParamInterface data(size); // For int32, deliberately set big numbers which are not accurately representable in fp32 - int start = netPrecision == ElementType::i32 ? pow(2, 30) + 1 : - static_cast(size / 2); + int start = netPrecision == ElementType::i32 ? pow(2, 30) + 1 : -static_cast(size / 2); size_t set_size = sort == SortType::SORT_VALUES && stable ? size / 2 : size; std::iota(data.begin(), data.begin() + set_size, start); if (sort == SortType::SORT_VALUES && stable) { @@ -170,12 +165,12 @@ class TopKLayerCPUTest : public testing::WithParamInterface(tensor.data()); + auto* rawBlobDataPtr = static_cast(tensor.data()); for (size_t i = 0; i < size; ++i) { rawBlobDataPtr[i] = static_cast(data[i]); } } else { - auto *rawBlobDataPtr = static_cast(tensor.data()); + auto* rawBlobDataPtr = static_cast(tensor.data()); for (size_t i = 0; i < size; ++i) { rawBlobDataPtr[i] = static_cast(data[i]); } @@ -190,11 +185,11 @@ class TopKLayerCPUTest : public testing::WithParamInterface(tensor.data()); + auto* rawBlobDataPtr = static_cast(tensor.data()); for (size_t o = 0; o < O; o++) { for (size_t i = 0; i < I; i++) { std::vector data(A); - int start = - static_cast(A / 2); + int start = -static_cast(A / 2); std::iota(data.begin(), data.end(), start); const size_t seed = (o + 1) * (i + 1); std::mt19937 gen(seed); @@ -248,68 +243,57 @@ const std::vector netPrecisions = { ElementType::f32, }; -std::vector> additionalConfig = { - {{PluginConfigParams::KEY_ENFORCE_BF16, PluginConfigParams::NO}}, - {{PluginConfigParams::KEY_ENFORCE_BF16, PluginConfigParams::YES}} -}; +std::vector additionalConfig = {{{ov::hint::inference_precision(ov::element::f32)}}, + {{ov::hint::inference_precision(ov::element::bf16)}}}; const std::vector axes = {0, 1, 2, 3}; const std::vector k = {1, 5, 7, 18, 21}; -const std::vector modes = { - SortMode::MIN, - SortMode::MAX -}; +const std::vector modes = {SortMode::MIN, SortMode::MAX}; const std::vector> sortTypeStable = { std::tuple{SortType::SORT_VALUES, false}, std::tuple{SortType::SORT_VALUES, true}, - std::tuple{SortType::SORT_INDICES, false} -}; + std::tuple{SortType::SORT_INDICES, false}}; std::vector inputShapes = { {{}, {{21, 21, 21, 21}}}, }; std::vector inputShapesDynamic = { - {{21, {20, 25}, 21, {20, 25}}, {{21, 21, 21, 21}, {21, 22, 21, 23}}} -}; - -std::vector cpuParams = { - CPUSpecificParams({nChw16c, x}, {nChw16c, nChw16c}, {}, {}), - CPUSpecificParams({nchw, x}, {nchw, nchw}, {}, {}), - CPUSpecificParams({nhwc, x}, {nhwc, nhwc}, {}, {}) -}; - -INSTANTIATE_TEST_CASE_P(smoke_TopK, TopKLayerCPUTest, - ::testing::Combine( - ::testing::Combine( - ::testing::ValuesIn(k), - ::testing::ValuesIn(axes), - ::testing::ValuesIn(modes), - ::testing::ValuesIn(sortTypeStable), - ::testing::ValuesIn(netPrecisions), - ::testing::Values(ElementType::undefined), - ::testing::Values(ElementType::undefined), - ::testing::ValuesIn(inputShapes)), - ::testing::ValuesIn(filterCPUSpecificParams(cpuParams)), - ::testing::ValuesIn(additionalConfig)), - TopKLayerCPUTest::getTestCaseName); - -INSTANTIATE_TEST_CASE_P(smoke_TopK_dynamic, TopKLayerCPUTest, - ::testing::Combine( - ::testing::Combine( - ::testing::Values(1), - ::testing::ValuesIn(axes), - ::testing::ValuesIn(modes), - ::testing::ValuesIn(sortTypeStable), - ::testing::ValuesIn(netPrecisions), - ::testing::Values(ElementType::undefined), - ::testing::Values(ElementType::undefined), - ::testing::ValuesIn(inputShapesDynamic)), - ::testing::ValuesIn(filterCPUSpecificParams(cpuParams)), - ::testing::ValuesIn(additionalConfig)), - TopKLayerCPUTest::getTestCaseName); + {{21, {20, 25}, 21, {20, 25}}, {{21, 21, 21, 21}, {21, 22, 21, 23}}}}; + +std::vector cpuParams = {CPUSpecificParams({nChw16c, x}, {nChw16c, nChw16c}, {}, {}), + CPUSpecificParams({nchw, x}, {nchw, nchw}, {}, {}), + CPUSpecificParams({nhwc, x}, {nhwc, nhwc}, {}, {})}; + +INSTANTIATE_TEST_CASE_P(smoke_TopK, + TopKLayerCPUTest, + ::testing::Combine(::testing::Combine(::testing::ValuesIn(k), + ::testing::ValuesIn(axes), + ::testing::ValuesIn(modes), + ::testing::ValuesIn(sortTypeStable), + ::testing::ValuesIn(netPrecisions), + ::testing::Values(ElementType::undefined), + ::testing::Values(ElementType::undefined), + ::testing::ValuesIn(inputShapes)), + ::testing::ValuesIn(filterCPUSpecificParams(cpuParams)), + ::testing::ValuesIn(additionalConfig)), + TopKLayerCPUTest::getTestCaseName); + +INSTANTIATE_TEST_CASE_P(smoke_TopK_dynamic, + TopKLayerCPUTest, + ::testing::Combine(::testing::Combine(::testing::Values(1), + ::testing::ValuesIn(axes), + ::testing::ValuesIn(modes), + ::testing::ValuesIn(sortTypeStable), + ::testing::ValuesIn(netPrecisions), + ::testing::Values(ElementType::undefined), + ::testing::Values(ElementType::undefined), + ::testing::ValuesIn(inputShapesDynamic)), + ::testing::ValuesIn(filterCPUSpecificParams(cpuParams)), + ::testing::ValuesIn(additionalConfig)), + TopKLayerCPUTest::getTestCaseName); const std::vector k_int32 = {1, 5, 7, 9}; @@ -318,115 +302,107 @@ std::vector inputShapes_int32 = { }; std::vector inputShapesDynamic_int32 = { - {{9, {5, 10}, 9, {5, 10}}, {{9, 9, 9, 9}, {9, 10, 9, 10}}} -}; - -INSTANTIATE_TEST_CASE_P(smoke_TopK_int32, TopKLayerCPUTest, - ::testing::Combine( - ::testing::Combine( - ::testing::ValuesIn(k_int32), - ::testing::ValuesIn(axes), - ::testing::ValuesIn(modes), - ::testing::ValuesIn(sortTypeStable), - ::testing::Values(ElementType::i32), - ::testing::Values(ElementType::undefined), - ::testing::Values(ElementType::undefined), - ::testing::ValuesIn(inputShapes_int32)), - ::testing::ValuesIn(filterCPUSpecificParams(cpuParams)), - ::testing::Values(additionalConfig[0])), - TopKLayerCPUTest::getTestCaseName); - -INSTANTIATE_TEST_CASE_P(smoke_TopK_int32_dynamic, TopKLayerCPUTest, - ::testing::Combine( - ::testing::Combine( - ::testing::Values(1), - ::testing::ValuesIn(axes), - ::testing::ValuesIn(modes), - ::testing::ValuesIn(sortTypeStable), - ::testing::Values(ElementType::i32), - ::testing::Values(ElementType::undefined), - ::testing::Values(ElementType::undefined), - ::testing::ValuesIn(inputShapesDynamic_int32)), - ::testing::ValuesIn(filterCPUSpecificParams(cpuParams)), - ::testing::Values(additionalConfig[0])), - TopKLayerCPUTest::getTestCaseName); + {{9, {5, 10}, 9, {5, 10}}, {{9, 9, 9, 9}, {9, 10, 9, 10}}}}; + +INSTANTIATE_TEST_CASE_P(smoke_TopK_int32, + TopKLayerCPUTest, + ::testing::Combine(::testing::Combine(::testing::ValuesIn(k_int32), + ::testing::ValuesIn(axes), + ::testing::ValuesIn(modes), + ::testing::ValuesIn(sortTypeStable), + ::testing::Values(ElementType::i32), + ::testing::Values(ElementType::undefined), + ::testing::Values(ElementType::undefined), + ::testing::ValuesIn(inputShapes_int32)), + ::testing::ValuesIn(filterCPUSpecificParams(cpuParams)), + ::testing::Values(additionalConfig[0])), + TopKLayerCPUTest::getTestCaseName); + +INSTANTIATE_TEST_CASE_P(smoke_TopK_int32_dynamic, + TopKLayerCPUTest, + ::testing::Combine(::testing::Combine(::testing::Values(1), + ::testing::ValuesIn(axes), + ::testing::ValuesIn(modes), + ::testing::ValuesIn(sortTypeStable), + ::testing::Values(ElementType::i32), + ::testing::Values(ElementType::undefined), + ::testing::Values(ElementType::undefined), + ::testing::ValuesIn(inputShapesDynamic_int32)), + ::testing::ValuesIn(filterCPUSpecificParams(cpuParams)), + ::testing::Values(additionalConfig[0])), + TopKLayerCPUTest::getTestCaseName); std::vector inputShapes_bubble_BLK_on_channel_horiz = { {{}, {{2, 2, 2, 2}}}, }; std::vector inputShapesDynamic_bubble_BLK_on_channel_horiz = { - {{2, {2, 3}, 2, 2}, {{2, 2, 2, 2}, {2, 3, 2, 2}}} -}; - -INSTANTIATE_TEST_CASE_P(smoke_TopK_bubble_BLK_on_channel_horiz, TopKLayerCPUTest, - ::testing::Combine( - ::testing::Combine( - ::testing::Values(1), - ::testing::Values(1), - ::testing::ValuesIn(modes), - ::testing::ValuesIn(sortTypeStable), - ::testing::ValuesIn(netPrecisions), - ::testing::Values(ElementType::undefined), - ::testing::Values(ElementType::undefined), - ::testing::ValuesIn(inputShapes_bubble_BLK_on_channel_horiz)), - ::testing::Values(CPUSpecificParams({nChw16c, x}, {nChw16c, nChw16c}, {}, {})), - ::testing::ValuesIn(additionalConfig)), + {{2, {2, 3}, 2, 2}, {{2, 2, 2, 2}, {2, 3, 2, 2}}}}; + +INSTANTIATE_TEST_CASE_P( + smoke_TopK_bubble_BLK_on_channel_horiz, + TopKLayerCPUTest, + ::testing::Combine(::testing::Combine(::testing::Values(1), + ::testing::Values(1), + ::testing::ValuesIn(modes), + ::testing::ValuesIn(sortTypeStable), + ::testing::ValuesIn(netPrecisions), + ::testing::Values(ElementType::undefined), + ::testing::Values(ElementType::undefined), + ::testing::ValuesIn(inputShapes_bubble_BLK_on_channel_horiz)), + ::testing::Values(CPUSpecificParams({nChw16c, x}, {nChw16c, nChw16c}, {}, {})), + ::testing::ValuesIn(additionalConfig)), TopKLayerCPUTest::getTestCaseName); -INSTANTIATE_TEST_CASE_P(smoke_TopK_bubble_BLK_on_channel_horiz_dynamic, TopKLayerCPUTest, - ::testing::Combine( - ::testing::Combine( - ::testing::Values(1), - ::testing::Values(1), - ::testing::ValuesIn(modes), - ::testing::ValuesIn(sortTypeStable), - ::testing::ValuesIn(netPrecisions), - ::testing::Values(ElementType::undefined), - ::testing::Values(ElementType::undefined), - ::testing::ValuesIn(inputShapesDynamic_bubble_BLK_on_channel_horiz)), - ::testing::Values(CPUSpecificParams({nChw16c, x}, {nChw16c, nChw16c}, {}, {})), - ::testing::ValuesIn(additionalConfig)), +INSTANTIATE_TEST_CASE_P( + smoke_TopK_bubble_BLK_on_channel_horiz_dynamic, + TopKLayerCPUTest, + ::testing::Combine(::testing::Combine(::testing::Values(1), + ::testing::Values(1), + ::testing::ValuesIn(modes), + ::testing::ValuesIn(sortTypeStable), + ::testing::ValuesIn(netPrecisions), + ::testing::Values(ElementType::undefined), + ::testing::Values(ElementType::undefined), + ::testing::ValuesIn(inputShapesDynamic_bubble_BLK_on_channel_horiz)), + ::testing::Values(CPUSpecificParams({nChw16c, x}, {nChw16c, nChw16c}, {}, {})), + ::testing::ValuesIn(additionalConfig)), TopKLayerCPUTest::getTestCaseName); std::vector inputShapes_top1 = { {{}, {{1, 1, 2, 1}}}, }; -std::vector inputShapesDynamic_top1 = { - {{1, 1, 2, {1, 2}}, {{1, 1, 2, 1}, {1, 1, 2, 2}}} -}; - -INSTANTIATE_TEST_CASE_P(smoke_Top1, TopKLayerCPUTest, - ::testing::Combine( - ::testing::Combine( - ::testing::Values(1), - ::testing::Values(3), - ::testing::Values(SortMode::MAX), - ::testing::Values(std::tuple(SortType::SORT_INDICES, false)), - ::testing::ValuesIn(netPrecisions), - ::testing::Values(ElementType::undefined), - ::testing::Values(ElementType::undefined), - ::testing::ValuesIn(inputShapes_top1)), - ::testing::Values(CPUSpecificParams({nchw, x}, {nchw, nchw}, {}, {})), - ::testing::ValuesIn(additionalConfig)), +std::vector inputShapesDynamic_top1 = {{{1, 1, 2, {1, 2}}, {{1, 1, 2, 1}, {1, 1, 2, 2}}}}; + +INSTANTIATE_TEST_CASE_P( + smoke_Top1, + TopKLayerCPUTest, + ::testing::Combine(::testing::Combine(::testing::Values(1), + ::testing::Values(3), + ::testing::Values(SortMode::MAX), + ::testing::Values(std::tuple(SortType::SORT_INDICES, false)), + ::testing::ValuesIn(netPrecisions), + ::testing::Values(ElementType::undefined), + ::testing::Values(ElementType::undefined), + ::testing::ValuesIn(inputShapes_top1)), + ::testing::Values(CPUSpecificParams({nchw, x}, {nchw, nchw}, {}, {})), + ::testing::ValuesIn(additionalConfig)), TopKLayerCPUTest::getTestCaseName); -INSTANTIATE_TEST_CASE_P(smoke_Top1_dynamic, TopKLayerCPUTest, - ::testing::Combine( - ::testing::Combine( - ::testing::Values(1), - ::testing::Values(3), - ::testing::Values(SortMode::MAX), - ::testing::Values(std::tuple(SortType::SORT_INDICES, false)), - ::testing::ValuesIn(netPrecisions), - ::testing::Values(ElementType::undefined), - ::testing::Values(ElementType::undefined), - ::testing::ValuesIn(inputShapesDynamic_top1)), - ::testing::Values(CPUSpecificParams({nchw, x}, {nchw, nchw}, {}, {})), - ::testing::ValuesIn(additionalConfig)), +INSTANTIATE_TEST_CASE_P( + smoke_Top1_dynamic, + TopKLayerCPUTest, + ::testing::Combine(::testing::Combine(::testing::Values(1), + ::testing::Values(3), + ::testing::Values(SortMode::MAX), + ::testing::Values(std::tuple(SortType::SORT_INDICES, false)), + ::testing::ValuesIn(netPrecisions), + ::testing::Values(ElementType::undefined), + ::testing::Values(ElementType::undefined), + ::testing::ValuesIn(inputShapesDynamic_top1)), + ::testing::Values(CPUSpecificParams({nchw, x}, {nchw, nchw}, {}, {})), + ::testing::ValuesIn(additionalConfig)), TopKLayerCPUTest::getTestCaseName); -} // namespace - -} // namespace CPULayerTestsDefinitions +} // namespace \ No newline at end of file diff --git a/src/plugins/intel_cpu/tests/functional/single_layer_tests/unique.cpp b/src/plugins/intel_cpu/tests/functional/single_layer_tests/unique.cpp index 866cdbb9a3fcd8..5b03f0573947d7 100644 --- a/src/plugins/intel_cpu/tests/functional/single_layer_tests/unique.cpp +++ b/src/plugins/intel_cpu/tests/functional/single_layer_tests/unique.cpp @@ -2,27 +2,25 @@ // SPDX-License-Identifier: Apache-2.0 // +#include "common_test_utils/ov_tensor_utils.hpp" #include "shared_test_classes/base/ov_subgraph.hpp" -#include "ov_models/builders.hpp" #include "test_utils/cpu_test_utils.hpp" -#include using namespace CPUTestUtils; using namespace ov::test; -namespace CPULayerTestsDefinitions { - -typedef std::tuple< - std::vector, // Input shapes - std::tuple, // Is flattened and axis - bool, // Sorted - ElementType, // Data precision - CPUSpecificParams, // CPU specific params - std::map // Additional config -> UniqueLayerTestCPUParams; +typedef std::tuple, // Input shapes + std::tuple, // Is flattened and axis + bool, // Sorted + ElementType, // Data precision + CPUSpecificParams, // CPU specific params + ov::AnyMap // Additional config + > + UniqueLayerTestCPUParams; class UniqueLayerTestCPU : public testing::WithParamInterface, - virtual public SubgraphBaseTest, public CPUTestsBase { + virtual public SubgraphBaseTest, + public CPUTestsBase { public: static std::string getTestCaseName(testing::TestParamInfo obj) { std::vector inputShapes; @@ -30,20 +28,22 @@ class UniqueLayerTestCPU : public testing::WithParamInterface additionalConfig; + ov::AnyMap additionalConfig; std::tie(inputShapes, flatOrAxis, sorted, dataPrecision, cpuParams, additionalConfig) = obj.param; std::ostringstream result; result << "IS=("; for (size_t i = 0lu; i < inputShapes.size(); i++) { - result << ov::test::utils::partialShape2str({inputShapes[i].first}) << (i < inputShapes.size() - 1lu ? "_" : ""); + result << ov::test::utils::partialShape2str({inputShapes[i].first}) + << (i < inputShapes.size() - 1lu ? "_" : ""); } result << ")_TS="; for (size_t i = 0lu; i < inputShapes.front().second.size(); i++) { result << "{"; for (size_t j = 0lu; j < inputShapes.size(); j++) { - result << ov::test::utils::vec2str(inputShapes[j].second[i]) << (j < inputShapes.size() - 1lu ? "_" : ""); + result << ov::test::utils::vec2str(inputShapes[j].second[i]) + << (j < inputShapes.size() - 1lu ? "_" : ""); } result << "}_"; } @@ -51,7 +51,8 @@ class UniqueLayerTestCPU : public testing::WithParamInterface(flatOrAxis)) { result << "axis=" << std::get<1>(flatOrAxis) << "_"; } else { - result << "flattened" << "_"; + result << "flattened" + << "_"; } result << "sorted=" << (sorted ? "True" : "False") << "_"; result << "dataPrc=" << dataPrecision; @@ -59,9 +60,9 @@ class UniqueLayerTestCPU : public testing::WithParamInterfaceget_friendly_name() == "data") { - int32_t range = std::accumulate(targetInputStaticShapes[0].begin(), targetInputStaticShapes[0].end(), 1, std::multiplies()); - tensor = utils::create_and_fill_tensor( - funcInput.get_element_type(), targetInputStaticShapes[0], range, -range / 2, 1); + int32_t range = std::accumulate(targetInputStaticShapes[0].begin(), + targetInputStaticShapes[0].end(), + 1, + std::multiplies()); + tensor = utils::create_and_fill_tensor(funcInput.get_element_type(), + targetInputStaticShapes[0], + range, + -range / 2, + 1); } inputs.insert({funcInput.get_node_shared_ptr(), tensor}); } @@ -139,22 +147,15 @@ TEST_P(UniqueLayerTestCPU, CompareWithRefs) { namespace { -const std::vector dataPrecisionSmoke = { - ElementType::f32, - ElementType::i32 -}; -const std::vector dataPrecisionNightly = { - ElementType::bf16, - ElementType::i8 -}; +const std::vector dataPrecisionSmoke = {ElementType::f32, ElementType::i32}; +const std::vector dataPrecisionNightly = {ElementType::bf16, ElementType::i8}; -std::vector> flatOrAxis { {true, 0}, {false, 0}, {false, 1}, {false, -1} }; +std::vector> flatOrAxis{{true, 0}, {false, 0}, {false, 1}, {false, -1}}; -std::vector sorted { true, false}; +std::vector sorted{true, false}; -std::vector> additionalConfig - = {{{InferenceEngine::PluginConfigParams::KEY_ENFORCE_BF16, InferenceEngine::PluginConfigParams::NO}}, - {{InferenceEngine::PluginConfigParams::KEY_ENFORCE_BF16, InferenceEngine::PluginConfigParams::YES}}}; +std::vector additionalConfig = {{{ov::hint::inference_precision(ov::element::f32)}}, + {{ov::hint::inference_precision(ov::element::bf16)}}}; std::vector getCPUInfo() { std::vector resCPUParams; @@ -163,121 +164,121 @@ std::vector getCPUInfo() { } std::vector> statShapes1D = { - {{{}, {{1}}}}, // Static shapes - {{{}, {{5}}}}, // Static shapes - {{{}, {{8}}}}, // Static shapes - {{{}, {{16}}}}, // Static shapes - {{{}, {{32}}}}, // Static shapes - {{{}, {{64}}}}, // Static shapes - {{{}, {{99}}}}, // Static shapes + {{{}, {{1}}}}, // Static shapes + {{{}, {{5}}}}, // Static shapes + {{{}, {{8}}}}, // Static shapes + {{{}, {{16}}}}, // Static shapes + {{{}, {{32}}}}, // Static shapes + {{{}, {{64}}}}, // Static shapes + {{{}, {{99}}}}, // Static shapes }; -INSTANTIATE_TEST_SUITE_P(smoke_static_1D, UniqueLayerTestCPU, - ::testing::Combine( - ::testing::ValuesIn(statShapes1D), - ::testing::ValuesIn(std::vector>{{true, 0}, {false, 0}}), - ::testing::ValuesIn(sorted), - ::testing::ValuesIn(dataPrecisionSmoke), - ::testing::ValuesIn(getCPUInfo()), - ::testing::Values(additionalConfig[0])), - UniqueLayerTestCPU::getTestCaseName); +INSTANTIATE_TEST_SUITE_P(smoke_static_1D, + UniqueLayerTestCPU, + ::testing::Combine(::testing::ValuesIn(statShapes1D), + ::testing::ValuesIn(std::vector>{{true, 0}, + {false, 0}}), + ::testing::ValuesIn(sorted), + ::testing::ValuesIn(dataPrecisionSmoke), + ::testing::ValuesIn(getCPUInfo()), + ::testing::Values(additionalConfig[0])), + UniqueLayerTestCPU::getTestCaseName); std::vector> getStaticShapes() { std::vector> result = { - { { {}, { {1, 1, 1} } } }, // Static shapes - { { {}, { {1, 2, 1} } } }, // Static shapes - { { {}, { {1, 1, 3} } } }, // Static shapes - { { {}, { {2, 2, 1} } } }, // Static shapes - { { {}, { {1, 4, 1} } } }, // Static shapes - { { {}, { {1, 5, 1} } } }, // Static shapes - { { {}, { {3, 2, 1} } } }, // Static shapes - { { {}, { {1, 1, 7} } } }, // Static shapes - { { {}, { {2, 2, 2} } } }, // Static shapes - { { {}, { {1, 8, 1} } } }, // Static shapes - { { {}, { {3, 3, 1, 1} } } }, // Static shapes - { { {}, { {1, 5, 2, 1} } } }, // Static shapes - { { {}, { {1, 1, 11} } } }, // Static shapes - { { {}, { {32, 35, 37} } } }, // Static shapes - { { {}, { {2, 3, 2} } } }, // Static shapes - { { {}, { {1, 1, 13} } } }, // Static shapes - { { {}, { {7, 1, 2} } } }, // Static shapes - { { {}, { {3, 5, 1} } } }, // Static shapes - { { {}, { {4, 2, 2} } } }, // Static shapes - { { {}, { {1, 17, 1} } } }, // Static shapes - { { {}, { {3, 2, 3, 1} } } }, // Static shapes - { { {}, { {8, 16, 32} } } }, // Static shapes - { { {}, { {37, 19, 11} } } }, // Static shapes - { { {}, { {1, 19, 1} } } }, // Static shapes - { { {}, { {2, 5, 2} } } }, // Static shapes - { { {}, { {1, 3, 7} } } }, // Static shapes - { { {}, { {11, 1, 2} } } }, // Static shapes - { { {}, { {1, 1, 23} } } }, // Static shapes - { { {}, { {4, 3, 2} } } }, // Static shapes - { { {}, { {5, 1, 5} } } }, // Static shapes - { { {}, { {100, 1, 1} } } }, // Static shapes - { { {}, { {5, 5, 5} } } } // Static shapes + {{{}, {{1, 1, 1}}}}, // Static shapes + {{{}, {{1, 2, 1}}}}, // Static shapes + {{{}, {{1, 1, 3}}}}, // Static shapes + {{{}, {{2, 2, 1}}}}, // Static shapes + {{{}, {{1, 4, 1}}}}, // Static shapes + {{{}, {{1, 5, 1}}}}, // Static shapes + {{{}, {{3, 2, 1}}}}, // Static shapes + {{{}, {{1, 1, 7}}}}, // Static shapes + {{{}, {{2, 2, 2}}}}, // Static shapes + {{{}, {{1, 8, 1}}}}, // Static shapes + {{{}, {{3, 3, 1, 1}}}}, // Static shapes + {{{}, {{1, 5, 2, 1}}}}, // Static shapes + {{{}, {{1, 1, 11}}}}, // Static shapes + {{{}, {{32, 35, 37}}}}, // Static shapes + {{{}, {{2, 3, 2}}}}, // Static shapes + {{{}, {{1, 1, 13}}}}, // Static shapes + {{{}, {{7, 1, 2}}}}, // Static shapes + {{{}, {{3, 5, 1}}}}, // Static shapes + {{{}, {{4, 2, 2}}}}, // Static shapes + {{{}, {{1, 17, 1}}}}, // Static shapes + {{{}, {{3, 2, 3, 1}}}}, // Static shapes + {{{}, {{8, 16, 32}}}}, // Static shapes + {{{}, {{37, 19, 11}}}}, // Static shapes + {{{}, {{1, 19, 1}}}}, // Static shapes + {{{}, {{2, 5, 2}}}}, // Static shapes + {{{}, {{1, 3, 7}}}}, // Static shapes + {{{}, {{11, 1, 2}}}}, // Static shapes + {{{}, {{1, 1, 23}}}}, // Static shapes + {{{}, {{4, 3, 2}}}}, // Static shapes + {{{}, {{5, 1, 5}}}}, // Static shapes + {{{}, {{100, 1, 1}}}}, // Static shapes + {{{}, {{5, 5, 5}}}} // Static shapes }; return result; } -INSTANTIATE_TEST_SUITE_P(smoke_static, UniqueLayerTestCPU, - ::testing::Combine( - ::testing::ValuesIn(getStaticShapes()), - ::testing::ValuesIn(flatOrAxis), - ::testing::ValuesIn(sorted), - ::testing::ValuesIn(dataPrecisionSmoke), - ::testing::ValuesIn(getCPUInfo()), - ::testing::Values(additionalConfig[0])), - UniqueLayerTestCPU::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(nightly_static, UniqueLayerTestCPU, - ::testing::Combine( - ::testing::ValuesIn(getStaticShapes()), - ::testing::ValuesIn(flatOrAxis), - ::testing::ValuesIn(sorted), - ::testing::ValuesIn(dataPrecisionNightly), - ::testing::ValuesIn(getCPUInfo()), - ::testing::Values(additionalConfig[0])), - UniqueLayerTestCPU::getTestCaseName); +INSTANTIATE_TEST_SUITE_P(smoke_static, + UniqueLayerTestCPU, + ::testing::Combine(::testing::ValuesIn(getStaticShapes()), + ::testing::ValuesIn(flatOrAxis), + ::testing::ValuesIn(sorted), + ::testing::ValuesIn(dataPrecisionSmoke), + ::testing::ValuesIn(getCPUInfo()), + ::testing::Values(additionalConfig[0])), + UniqueLayerTestCPU::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(nightly_static, + UniqueLayerTestCPU, + ::testing::Combine(::testing::ValuesIn(getStaticShapes()), + ::testing::ValuesIn(flatOrAxis), + ::testing::ValuesIn(sorted), + ::testing::ValuesIn(dataPrecisionNightly), + ::testing::ValuesIn(getCPUInfo()), + ::testing::Values(additionalConfig[0])), + UniqueLayerTestCPU::getTestCaseName); const std::vector> dynamicInSapes = { - { { { ov::Dimension(1, 15), -1, -1, -1 }, // Dynamic shape - { {1, 1, 1, 1}, {6, 3, 1, 2}, {4, 5, 3, 1}, {2, 7, 2, 2} } } }, // Target shapes - { { { -1, -1, -1, -1 }, // Dynamic shape - { {1, 2, 1, 5}, {3, 4, 2, 3}, {5, 6, 7, 1}, {7, 8, 2, 4} } } }, // Target shapes - { { { ov::Dimension(2, 15), -1, -1, -1 }, // Dynamic shape - { {8, 3, 3, 3}, {6, 5, 2, 5}, {4, 7, 1, 11}, {2, 9, 3, 4} } } }, // Target shapes - { { { 3, 4, 4, 5 }, // Dynamic shape - { {3, 4, 4, 5}, {3, 4, 4, 5}, {3, 4, 4, 5}, {3, 4, 4, 5} } } }, // Target shapes - { { { -1, -1, -1, -1 }, // Dynamic shape - { {1, 2, 1, 13}, {3, 4, 7, 2}, {5, 6, 3, 5}, {7, 8, 4, 4} } } }, // Target shapes - { { { -1, -1, -1, -1 }, // Dynamic shape - { {2, 11, 1, 17}, {4, 9, 6, 3}, {6, 7, 7, 3}, {8, 3, 2, 11} } } }, // Target shapes - { { { 3, -1, -1, -1 }, // Dynamic shape - { {3, 2, 1, 23}, {3, 4, 3, 8}, {3, 6, 5, 5}, {3, 8, 31, 1} } } }, // Target shapes - { { { -1, 3, -1, -1 }, // Dynamic shape - { {8, 3, 8, 4}, {6, 3, 33, 1}, {4, 3, 8, 6}, {2, 3, 8, 8} } } } // Target shapes + {{{ov::Dimension(1, 15), -1, -1, -1}, // Dynamic shape + {{1, 1, 1, 1}, {6, 3, 1, 2}, {4, 5, 3, 1}, {2, 7, 2, 2}}}}, // Target shapes + {{{-1, -1, -1, -1}, // Dynamic shape + {{1, 2, 1, 5}, {3, 4, 2, 3}, {5, 6, 7, 1}, {7, 8, 2, 4}}}}, // Target shapes + {{{ov::Dimension(2, 15), -1, -1, -1}, // Dynamic shape + {{8, 3, 3, 3}, {6, 5, 2, 5}, {4, 7, 1, 11}, {2, 9, 3, 4}}}}, // Target shapes + {{{3, 4, 4, 5}, // Dynamic shape + {{3, 4, 4, 5}, {3, 4, 4, 5}, {3, 4, 4, 5}, {3, 4, 4, 5}}}}, // Target shapes + {{{-1, -1, -1, -1}, // Dynamic shape + {{1, 2, 1, 13}, {3, 4, 7, 2}, {5, 6, 3, 5}, {7, 8, 4, 4}}}}, // Target shapes + {{{-1, -1, -1, -1}, // Dynamic shape + {{2, 11, 1, 17}, {4, 9, 6, 3}, {6, 7, 7, 3}, {8, 3, 2, 11}}}}, // Target shapes + {{{3, -1, -1, -1}, // Dynamic shape + {{3, 2, 1, 23}, {3, 4, 3, 8}, {3, 6, 5, 5}, {3, 8, 31, 1}}}}, // Target shapes + {{{-1, 3, -1, -1}, // Dynamic shape + {{8, 3, 8, 4}, {6, 3, 33, 1}, {4, 3, 8, 6}, {2, 3, 8, 8}}}} // Target shapes }; -INSTANTIATE_TEST_SUITE_P(smoke_dynamic, UniqueLayerTestCPU, - ::testing::Combine( - ::testing::ValuesIn(dynamicInSapes), - ::testing::ValuesIn(flatOrAxis), - ::testing::ValuesIn(sorted), - ::testing::ValuesIn(dataPrecisionSmoke), - ::testing::ValuesIn(getCPUInfo()), - ::testing::Values(additionalConfig[0])), - UniqueLayerTestCPU::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(nightly_dynamic, UniqueLayerTestCPU, - ::testing::Combine( - ::testing::ValuesIn(dynamicInSapes), - ::testing::ValuesIn(flatOrAxis), - ::testing::ValuesIn(sorted), - ::testing::ValuesIn(dataPrecisionNightly), - ::testing::ValuesIn(getCPUInfo()), - ::testing::Values(additionalConfig[0])), +INSTANTIATE_TEST_SUITE_P(smoke_dynamic, + UniqueLayerTestCPU, + ::testing::Combine(::testing::ValuesIn(dynamicInSapes), + ::testing::ValuesIn(flatOrAxis), + ::testing::ValuesIn(sorted), + ::testing::ValuesIn(dataPrecisionSmoke), + ::testing::ValuesIn(getCPUInfo()), + ::testing::Values(additionalConfig[0])), + UniqueLayerTestCPU::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(nightly_dynamic, + UniqueLayerTestCPU, + ::testing::Combine(::testing::ValuesIn(dynamicInSapes), + ::testing::ValuesIn(flatOrAxis), + ::testing::ValuesIn(sorted), + ::testing::ValuesIn(dataPrecisionNightly), + ::testing::ValuesIn(getCPUInfo()), + ::testing::Values(additionalConfig[0])), UniqueLayerTestCPU::getTestCaseName); -} // namespace -} // namespace CPULayerTestsDefinitions +} // namespace \ No newline at end of file diff --git a/src/plugins/intel_cpu/tests/functional/single_layer_tests/variadic_split.cpp b/src/plugins/intel_cpu/tests/functional/single_layer_tests/variadic_split.cpp index 9ccd6bab8feeba..4bd2b50f3042a0 100644 --- a/src/plugins/intel_cpu/tests/functional/single_layer_tests/variadic_split.cpp +++ b/src/plugins/intel_cpu/tests/functional/single_layer_tests/variadic_split.cpp @@ -2,35 +2,31 @@ // SPDX-License-Identifier: Apache-2.0 // -#include - +#include "common_test_utils/ov_tensor_utils.hpp" #include "shared_test_classes/base/ov_subgraph.hpp" -#include -#include "ov_models/builders.hpp" #include "test_utils/cpu_test_utils.hpp" using namespace ov::test; using namespace CPUTestUtils; -namespace CPULayerTestsDefinitions { using LengthsPerInfer = std::vector>; -typedef std::tuple< - InputShape, - int64_t, // Axis - LengthsPerInfer, // Split lengths - ngraph::helpers::InputLayerType, // lengths input type - ElementType, // Net precision - CPUSpecificParams -> varSplitCPUTestParams; +typedef std::tuple + varSplitCPUTestParams; class VariadicSplitLayerCPUTest : public testing::WithParamInterface, - virtual public SubgraphBaseTest, public CPUTestsBase { + virtual public SubgraphBaseTest, + public CPUTestsBase { public: static std::string getTestCaseName(testing::TestParamInfo obj) { InputShape shapes; int64_t axis; LengthsPerInfer splitLengths; - ngraph::helpers::InputLayerType lengthsType; + ov::test::utils::InputLayerType lengthsType; ElementType netPrecision; CPUSpecificParams cpuParams; std::tie(shapes, axis, splitLengths, lengthsType, netPrecision, cpuParams) = obj.param; @@ -59,7 +55,7 @@ class VariadicSplitLayerCPUTest : public testing::WithParamInterfaceGetParam(); @@ -68,7 +64,7 @@ class VariadicSplitLayerCPUTest : public testing::WithParamInterface shapesToInit{inputShapes}; - if (lengthsType == ngraph::helpers::InputLayerType::PARAMETER) { + if (lengthsType == ov::test::utils::InputLayerType::PARAMETER) { std::vector lengthsStaticShapes(inputShapes.second.size(), {lengthsPerInfer[0].size()}); shapesToInit.emplace_back(InputShape{{static_cast(lengthsPerInfer[0].size())}, lengthsStaticShapes}); } @@ -77,25 +73,27 @@ class VariadicSplitLayerCPUTest : public testing::WithParamInterface(netPrecision, inputDynamicShapes[0])}; std::shared_ptr splitLengthsOp; - if (lengthsType == ngraph::helpers::InputLayerType::PARAMETER) { - auto param = std::make_shared(ov::element::i32, ov::Shape{lengthsPerInfer[0].size()}); + if (lengthsType == ov::test::utils::InputLayerType::PARAMETER) { + auto param = + std::make_shared(ov::element::i32, ov::Shape{lengthsPerInfer[0].size()}); params.push_back(param); splitLengthsOp = param; } else { - splitLengthsOp = ov::opset10::Constant::create(ov::element::i32, {lengthsPerInfer[0].size()}, lengthsPerInfer[0]); + splitLengthsOp = + ov::op::v0::Constant::create(ov::element::i32, {lengthsPerInfer[0].size()}, lengthsPerInfer[0]); } - auto splitAxisOp = ov::opset10::Constant::create(ov::element::i64, {}, {axis}); - auto varSplit = std::make_shared(params[0], splitAxisOp, splitLengthsOp); + auto splitAxisOp = ov::op::v0::Constant::create(ov::element::i64, {}, {axis}); + auto varSplit = std::make_shared(params[0], splitAxisOp, splitLengthsOp); varSplit->get_rt_info() = getCPUInfo(); ov::ResultVector results; for (const auto& out : varSplit->outputs()) - results.push_back(std::make_shared(out)); - function = std::make_shared(results, params, "VariadicSplitCPU"); + results.push_back(std::make_shared(out)); + function = std::make_shared(results, params, "VariadicSplitCPU"); } - void generate_inputs(const std::vector& targetInputStaticShapes) override { + void generate_inputs(const std::vector& targetInputStaticShapes) override { inputs.clear(); const auto& funcInputs = function->inputs(); @@ -116,6 +114,7 @@ class VariadicSplitLayerCPUTest : public testing::WithParamInterface netPrecisions = { - ElementType::i8, - ElementType::i32, - ElementType::f32, - ElementType::bf16 -}; +const std::vector netPrecisions = {ElementType::i8, ElementType::i32, ElementType::f32, ElementType::bf16}; -const std::vector lengthsTypes = { - ngraph::helpers::InputLayerType::CONSTANT, - ngraph::helpers::InputLayerType::PARAMETER -}; +const std::vector lengthsTypes = {ov::test::utils::InputLayerType::CONSTANT, + ov::test::utils::InputLayerType::PARAMETER}; const std::vector inputShapes4D_Nspc2NcspSpecial = { - { {}, {{3, 5, 24, 9}} }, - { - // dynamic - {-1, -1, -1, -1}, - // target - { - {1, 8, 5, 7}, - {3, 9, 7, 9}, - {5, 6, 1, 8} - } - }, - { - // dynamic - {{1, 5}, {1, 64}, {1, 25}, {2, 10}}, - // target - { - {2, 7, 5, 7}, - {1, 10, 10, 2}, - {3, 5, 6, 9} - } - }, + {{}, {{3, 5, 24, 9}}}, + {// dynamic + {-1, -1, -1, -1}, + // target + {{1, 8, 5, 7}, {3, 9, 7, 9}, {5, 6, 1, 8}}}, + {// dynamic + {{1, 5}, {1, 64}, {1, 25}, {2, 10}}, + // target + {{2, 7, 5, 7}, {1, 10, 10, 2}, {3, 5, 6, 9}}}, }; -INSTANTIATE_TEST_SUITE_P(smoke_VariadicSplit4D_CPU_Nspc2NcspSpecial, VariadicSplitLayerCPUTest, - ::testing::Combine( - ::testing::ValuesIn(inputShapes4D_Nspc2NcspSpecial), - ::testing::Values(1), - ::testing::Values(LengthsPerInfer{{1, 2, -1, 1}}), - ::testing::ValuesIn(lengthsTypes), - ::testing::ValuesIn(netPrecisions), - ::testing::Values(perChannelsToPlanar_4D)), - VariadicSplitLayerCPUTest::getTestCaseName); +INSTANTIATE_TEST_SUITE_P(smoke_VariadicSplit4D_CPU_Nspc2NcspSpecial, + VariadicSplitLayerCPUTest, + ::testing::Combine(::testing::ValuesIn(inputShapes4D_Nspc2NcspSpecial), + ::testing::Values(1), + ::testing::Values(LengthsPerInfer{{1, 2, -1, 1}}), + ::testing::ValuesIn(lengthsTypes), + ::testing::ValuesIn(netPrecisions), + ::testing::Values(perChannelsToPlanar_4D)), + VariadicSplitLayerCPUTest::getTestCaseName); const std::vector inputShapes5D_Nspc2NcspSpecial = { - { {}, {{3, 4, 7, 9, 3}} }, - { - // dynamic - {-1, -1, -1, -1, -1}, - // target - { - {1, 6, 5, 7, 5}, - {3, 8, 6, 9, 1}, - {5, 9, 1, 8, 2} - } - }, - { - // dynamic - {{1, 5}, {1, 64}, {1, 25}, {2, 10}, {1, 64}}, - // target - { - {2, 5, 5, 7, 7}, - {1, 4, 10, 2, 11}, - {3, 7, 5, 9, 8} - } - }, + {{}, {{3, 4, 7, 9, 3}}}, + {// dynamic + {-1, -1, -1, -1, -1}, + // target + {{1, 6, 5, 7, 5}, {3, 8, 6, 9, 1}, {5, 9, 1, 8, 2}}}, + {// dynamic + {{1, 5}, {1, 64}, {1, 25}, {2, 10}, {1, 64}}, + // target + {{2, 5, 5, 7, 7}, {1, 4, 10, 2, 11}, {3, 7, 5, 9, 8}}}, }; -INSTANTIATE_TEST_SUITE_P(smoke_VariadicSplit5D_CPU_Nspc2NcspSpecial, VariadicSplitLayerCPUTest, - ::testing::Combine( - ::testing::ValuesIn(inputShapes5D_Nspc2NcspSpecial), - ::testing::Values(1), - ::testing::Values(LengthsPerInfer{{2, 1, -1}}), - ::testing::ValuesIn(lengthsTypes), - ::testing::ValuesIn(netPrecisions), - ::testing::Values(perChannelsToPlanar_5D)), - VariadicSplitLayerCPUTest::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(smoke_VariadicSplit4D_CPU_planar_static, VariadicSplitLayerCPUTest, - ::testing::Combine( - ::testing::Values(InputShape{ {}, {{3, 6, 5, 6}} }), - ::testing::Values(2, 3), - ::testing::Values(LengthsPerInfer{{1, 3, -1}}), - ::testing::Values(ngraph::helpers::InputLayerType::CONSTANT), - ::testing::ValuesIn(netPrecisions), - ::testing::Values(planar_4D_ref, perChannels_4D)), - VariadicSplitLayerCPUTest::getTestCaseName); +INSTANTIATE_TEST_SUITE_P(smoke_VariadicSplit5D_CPU_Nspc2NcspSpecial, + VariadicSplitLayerCPUTest, + ::testing::Combine(::testing::ValuesIn(inputShapes5D_Nspc2NcspSpecial), + ::testing::Values(1), + ::testing::Values(LengthsPerInfer{{2, 1, -1}}), + ::testing::ValuesIn(lengthsTypes), + ::testing::ValuesIn(netPrecisions), + ::testing::Values(perChannelsToPlanar_5D)), + VariadicSplitLayerCPUTest::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(smoke_VariadicSplit4D_CPU_planar_static, + VariadicSplitLayerCPUTest, + ::testing::Combine(::testing::Values(InputShape{{}, {{3, 6, 5, 6}}}), + ::testing::Values(2, 3), + ::testing::Values(LengthsPerInfer{{1, 3, -1}}), + ::testing::Values(ov::test::utils::InputLayerType::CONSTANT), + ::testing::ValuesIn(netPrecisions), + ::testing::Values(planar_4D_ref, perChannels_4D)), + VariadicSplitLayerCPUTest::getTestCaseName); const std::vector inputShapes4D_planar = { - { - // dynamic - {-1, -1, -1, -1}, - // target - { - {1, 9, 8, 7}, - {3, 8, 6, 5}, - {5, 3, 7, 6} - } - }, - { - // dynamic - {{1, 5}, {1, 64}, {1, 48}, {2, 48}}, - // target - { - {2, 9, 5, 6}, - {1, 6, 9, 8}, - {3, 1, 6, 7} - } - }, + {// dynamic + {-1, -1, -1, -1}, + // target + {{1, 9, 8, 7}, {3, 8, 6, 5}, {5, 3, 7, 6}}}, + {// dynamic + {{1, 5}, {1, 64}, {1, 48}, {2, 48}}, + // target + {{2, 9, 5, 6}, {1, 6, 9, 8}, {3, 1, 6, 7}}}, }; -INSTANTIATE_TEST_SUITE_P(smoke_VariadicSplit4D_CPU_planar, VariadicSplitLayerCPUTest, - ::testing::Combine( - ::testing::ValuesIn(inputShapes4D_planar), - ::testing::Values(2, 3), - ::testing::Values(LengthsPerInfer{{1, 3, -1}}), - ::testing::ValuesIn(lengthsTypes), - ::testing::ValuesIn(netPrecisions), - ::testing::Values(planar_4D_ref, perChannels_4D)), - VariadicSplitLayerCPUTest::getTestCaseName); +INSTANTIATE_TEST_SUITE_P(smoke_VariadicSplit4D_CPU_planar, + VariadicSplitLayerCPUTest, + ::testing::Combine(::testing::ValuesIn(inputShapes4D_planar), + ::testing::Values(2, 3), + ::testing::Values(LengthsPerInfer{{1, 3, -1}}), + ::testing::ValuesIn(lengthsTypes), + ::testing::ValuesIn(netPrecisions), + ::testing::Values(planar_4D_ref, perChannels_4D)), + VariadicSplitLayerCPUTest::getTestCaseName); const std::vector inputShapes4D_block = { - { {}, {{3, 16, 6, 7}} }, - { - // dynamic - {-1, 16, -1, -1}, - // target - { - {1, 16, 8, 7}, - {3, 16, 7, 8}, - {5, 16, 9, 8} - } - }, - { - // dynamic - {{1, 5}, 16, {1, 48}, {2, 24}}, - // target - { - {2, 16, 12, 6}, - {1, 16, 6, 9}, - {3, 16, 7, 6} - } - }, + {{}, {{3, 16, 6, 7}}}, + {// dynamic + {-1, 16, -1, -1}, + // target + {{1, 16, 8, 7}, {3, 16, 7, 8}, {5, 16, 9, 8}}}, + {// dynamic + {{1, 5}, 16, {1, 48}, {2, 24}}, + // target + {{2, 16, 12, 6}, {1, 16, 6, 9}, {3, 16, 7, 6}}}, }; -INSTANTIATE_TEST_SUITE_P(smoke_VariadicSplit4D_CPU_Block8, VariadicSplitLayerCPUTest, - ::testing::Combine( - ::testing::ValuesIn(inputShapes4D_block), - ::testing::Values(2, 3), - ::testing::Values(LengthsPerInfer{{2, 2, -1}}), - ::testing::ValuesIn(lengthsTypes), - ::testing::ValuesIn(netPrecisions), - ::testing::Values(blocked8_4D_ref)), - VariadicSplitLayerCPUTest::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(smoke_VariadicSplit4D_CPU_Block16, VariadicSplitLayerCPUTest, - ::testing::Combine( - ::testing::ValuesIn(inputShapes4D_block), - ::testing::Values(2, 3), - ::testing::Values(LengthsPerInfer{{2, 2, -1, 1}}), - ::testing::ValuesIn(lengthsTypes), - ::testing::ValuesIn(netPrecisions), - ::testing::Values(blocked16_4D_ref)), - VariadicSplitLayerCPUTest::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(smoke_VariadicSplit5D_CPU_planar_static, VariadicSplitLayerCPUTest, - ::testing::Combine( - ::testing::Values(InputShape{ {}, {{3, 24, 4, 5, 6}} }), - ::testing::Values(2, 3, 4), - ::testing::Values(LengthsPerInfer{{2, 1, -1}}), - ::testing::Values(ngraph::helpers::InputLayerType::CONSTANT), - ::testing::ValuesIn(netPrecisions), - ::testing::Values(planar_5D_ref, perChannels_5D)), - VariadicSplitLayerCPUTest::getTestCaseName); +INSTANTIATE_TEST_SUITE_P(smoke_VariadicSplit4D_CPU_Block8, + VariadicSplitLayerCPUTest, + ::testing::Combine(::testing::ValuesIn(inputShapes4D_block), + ::testing::Values(2, 3), + ::testing::Values(LengthsPerInfer{{2, 2, -1}}), + ::testing::ValuesIn(lengthsTypes), + ::testing::ValuesIn(netPrecisions), + ::testing::Values(blocked8_4D_ref)), + VariadicSplitLayerCPUTest::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(smoke_VariadicSplit4D_CPU_Block16, + VariadicSplitLayerCPUTest, + ::testing::Combine(::testing::ValuesIn(inputShapes4D_block), + ::testing::Values(2, 3), + ::testing::Values(LengthsPerInfer{{2, 2, -1, 1}}), + ::testing::ValuesIn(lengthsTypes), + ::testing::ValuesIn(netPrecisions), + ::testing::Values(blocked16_4D_ref)), + VariadicSplitLayerCPUTest::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(smoke_VariadicSplit5D_CPU_planar_static, + VariadicSplitLayerCPUTest, + ::testing::Combine(::testing::Values(InputShape{{}, {{3, 24, 4, 5, 6}}}), + ::testing::Values(2, 3, 4), + ::testing::Values(LengthsPerInfer{{2, 1, -1}}), + ::testing::Values(ov::test::utils::InputLayerType::CONSTANT), + ::testing::ValuesIn(netPrecisions), + ::testing::Values(planar_5D_ref, perChannels_5D)), + VariadicSplitLayerCPUTest::getTestCaseName); const std::vector inputShapes5D_planar = { - { - // dynamic - {-1, -1, -1, -1, -1}, - // target - { - {1, 2, 4, 6, 5}, - {3, 1, 6, 4, 5}, - {5, 6, 5, 7, 4} - } - }, - { - // dynamic - {{1, 5}, {1, 64}, {1, 48}, {2, 48}, {2, 40}}, - // target - { - {2, 5, 4, 5, 6}, - {1, 7, 5, 4, 7}, - {3, 3, 5, 6, 4} - } - }, + {// dynamic + {-1, -1, -1, -1, -1}, + // target + {{1, 2, 4, 6, 5}, {3, 1, 6, 4, 5}, {5, 6, 5, 7, 4}}}, + {// dynamic + {{1, 5}, {1, 64}, {1, 48}, {2, 48}, {2, 40}}, + // target + {{2, 5, 4, 5, 6}, {1, 7, 5, 4, 7}, {3, 3, 5, 6, 4}}}, }; -INSTANTIATE_TEST_SUITE_P(smoke_VariadicSplit5D_CPU_planar, VariadicSplitLayerCPUTest, - ::testing::Combine( - ::testing::ValuesIn(inputShapes5D_planar), - ::testing::Values(2, 3, 4), - ::testing::Values(LengthsPerInfer{{2, 1, -1}}), - ::testing::ValuesIn(lengthsTypes), - ::testing::ValuesIn(netPrecisions), - ::testing::Values(planar_5D_ref, perChannels_5D)), - VariadicSplitLayerCPUTest::getTestCaseName); +INSTANTIATE_TEST_SUITE_P(smoke_VariadicSplit5D_CPU_planar, + VariadicSplitLayerCPUTest, + ::testing::Combine(::testing::ValuesIn(inputShapes5D_planar), + ::testing::Values(2, 3, 4), + ::testing::Values(LengthsPerInfer{{2, 1, -1}}), + ::testing::ValuesIn(lengthsTypes), + ::testing::ValuesIn(netPrecisions), + ::testing::Values(planar_5D_ref, perChannels_5D)), + VariadicSplitLayerCPUTest::getTestCaseName); const std::vector inputShapes5D_block = { - { {}, {{3, 16, 8, 5, 6}} }, - { - // dynamic - {-1, 16, -1, -1, -1}, - // target - { - {1, 16, 5, 6, 7}, - {3, 16, 24, 5, 8}, - {5, 16, 6, 7, 5} - } - }, - { - // dynamic - {{1, 5}, 16, {1, 48}, {2, 24}, {2, 64}}, - // target - { - {2, 16, 7, 6, 5}, - {1, 16, 6, 5, 7}, - {3, 16, 5, 7, 6} - } - }, + {{}, {{3, 16, 8, 5, 6}}}, + {// dynamic + {-1, 16, -1, -1, -1}, + // target + {{1, 16, 5, 6, 7}, {3, 16, 24, 5, 8}, {5, 16, 6, 7, 5}}}, + {// dynamic + {{1, 5}, 16, {1, 48}, {2, 24}, {2, 64}}, + // target + {{2, 16, 7, 6, 5}, {1, 16, 6, 5, 7}, {3, 16, 5, 7, 6}}}, }; -INSTANTIATE_TEST_SUITE_P(smoke_VariadicSplit5D_CPU_Block8, VariadicSplitLayerCPUTest, - ::testing::Combine( - ::testing::ValuesIn(inputShapes5D_block), - ::testing::Values(2, 3, 4), - ::testing::Values(LengthsPerInfer{{1, 2, -1}}), - ::testing::ValuesIn(lengthsTypes), - ::testing::ValuesIn(netPrecisions), - ::testing::Values(blocked8_5D_ref)), - VariadicSplitLayerCPUTest::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(smoke_VariadicSplit5D_CPU_Block16, VariadicSplitLayerCPUTest, - ::testing::Combine( - ::testing::ValuesIn(inputShapes5D_block), - ::testing::Values(2, 3, 4), - ::testing::Values(LengthsPerInfer{{2, 1, -1, 1}}), - ::testing::ValuesIn(lengthsTypes), - ::testing::ValuesIn(netPrecisions), - ::testing::Values(blocked16_5D_ref)), - VariadicSplitLayerCPUTest::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(smoke_VariadicSplit3D_static, VariadicSplitLayerCPUTest, - ::testing::Combine( - ::testing::Values(InputShape{ {}, {{14, 7, 21}} }), - ::testing::Values(1, 2), - ::testing::Values(LengthsPerInfer{{2, 4, -1}}), - ::testing::Values(ngraph::helpers::InputLayerType::CONSTANT), - ::testing::ValuesIn(netPrecisions), - ::testing::Values(CPUSpecificParams{{}, {}, {}, "ref"})), - VariadicSplitLayerCPUTest::getTestCaseName); +INSTANTIATE_TEST_SUITE_P(smoke_VariadicSplit5D_CPU_Block8, + VariadicSplitLayerCPUTest, + ::testing::Combine(::testing::ValuesIn(inputShapes5D_block), + ::testing::Values(2, 3, 4), + ::testing::Values(LengthsPerInfer{{1, 2, -1}}), + ::testing::ValuesIn(lengthsTypes), + ::testing::ValuesIn(netPrecisions), + ::testing::Values(blocked8_5D_ref)), + VariadicSplitLayerCPUTest::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(smoke_VariadicSplit5D_CPU_Block16, + VariadicSplitLayerCPUTest, + ::testing::Combine(::testing::ValuesIn(inputShapes5D_block), + ::testing::Values(2, 3, 4), + ::testing::Values(LengthsPerInfer{{2, 1, -1, 1}}), + ::testing::ValuesIn(lengthsTypes), + ::testing::ValuesIn(netPrecisions), + ::testing::Values(blocked16_5D_ref)), + VariadicSplitLayerCPUTest::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(smoke_VariadicSplit3D_static, + VariadicSplitLayerCPUTest, + ::testing::Combine(::testing::Values(InputShape{{}, {{14, 7, 21}}}), + ::testing::Values(1, 2), + ::testing::Values(LengthsPerInfer{{2, 4, -1}}), + ::testing::Values(ov::test::utils::InputLayerType::CONSTANT), + ::testing::ValuesIn(netPrecisions), + ::testing::Values(CPUSpecificParams{{}, {}, {}, "ref"})), + VariadicSplitLayerCPUTest::getTestCaseName); const std::vector inputShapes3D = { - { - // dynamic - {-1, -1, -1}, - // target - { - {7, 21, 14}, - {21, 7, 14}, - {21, 14, 7}, - } - }, - { - // dynamic - {{1, 60}, {1, 50}, {1, 48}}, - // target - { - {14, 21, 7}, - {21, 7, 14}, - {7, 14, 21}, - } - }, + {// dynamic + {-1, -1, -1}, + // target + { + {7, 21, 14}, + {21, 7, 14}, + {21, 14, 7}, + }}, + {// dynamic + {{1, 60}, {1, 50}, {1, 48}}, + // target + { + {14, 21, 7}, + {21, 7, 14}, + {7, 14, 21}, + }}, }; -INSTANTIATE_TEST_SUITE_P(smoke_VariadicSplit3D, VariadicSplitLayerCPUTest, - ::testing::Combine( - ::testing::ValuesIn(inputShapes3D), - ::testing::Values(0, 1, 2), - ::testing::Values(LengthsPerInfer{{2, 4, -1}}), - ::testing::ValuesIn(lengthsTypes), - ::testing::ValuesIn(netPrecisions), - ::testing::Values(CPUSpecificParams{{}, {}, {}, "ref"})), - VariadicSplitLayerCPUTest::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(smoke_VariadicSplit2D_static, VariadicSplitLayerCPUTest, - ::testing::Combine( - ::testing::Values(InputShape{ {}, {{6, 12}} }), - ::testing::Values(1), - ::testing::Values(LengthsPerInfer{{2, -1}}), - ::testing::Values(ngraph::helpers::InputLayerType::CONSTANT), - ::testing::ValuesIn(netPrecisions), - ::testing::Values(CPUSpecificParams{{}, {}, {}, "ref"})), - VariadicSplitLayerCPUTest::getTestCaseName); +INSTANTIATE_TEST_SUITE_P(smoke_VariadicSplit3D, + VariadicSplitLayerCPUTest, + ::testing::Combine(::testing::ValuesIn(inputShapes3D), + ::testing::Values(0, 1, 2), + ::testing::Values(LengthsPerInfer{{2, 4, -1}}), + ::testing::ValuesIn(lengthsTypes), + ::testing::ValuesIn(netPrecisions), + ::testing::Values(CPUSpecificParams{{}, {}, {}, "ref"})), + VariadicSplitLayerCPUTest::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(smoke_VariadicSplit2D_static, + VariadicSplitLayerCPUTest, + ::testing::Combine(::testing::Values(InputShape{{}, {{6, 12}}}), + ::testing::Values(1), + ::testing::Values(LengthsPerInfer{{2, -1}}), + ::testing::Values(ov::test::utils::InputLayerType::CONSTANT), + ::testing::ValuesIn(netPrecisions), + ::testing::Values(CPUSpecificParams{{}, {}, {}, "ref"})), + VariadicSplitLayerCPUTest::getTestCaseName); const std::vector inputShapes2D = { - { - // dynamic - {-1, -1}, - // target - { - {3, 8}, - {10, 4}, - {3, 6}, - } - }, - { - // dynamic - {{1, 60}, {1, 50}}, - // target - { - {3, 4}, - {4, 4}, - {6, 12}, - } - }, + {// dynamic + {-1, -1}, + // target + { + {3, 8}, + {10, 4}, + {3, 6}, + }}, + {// dynamic + {{1, 60}, {1, 50}}, + // target + { + {3, 4}, + {4, 4}, + {6, 12}, + }}, }; -INSTANTIATE_TEST_SUITE_P(smoke_VariadicSplit2D, VariadicSplitLayerCPUTest, - ::testing::Combine( - ::testing::ValuesIn(inputShapes2D), - ::testing::Values(0, 1), - ::testing::Values(LengthsPerInfer{{2, -1}}), - ::testing::ValuesIn(lengthsTypes), - ::testing::ValuesIn(netPrecisions), - ::testing::Values(CPUSpecificParams{{}, {}, {}, "ref"})), - VariadicSplitLayerCPUTest::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(smoke_VariadicSplit1D_static, VariadicSplitLayerCPUTest, - ::testing::Combine( - ::testing::Values(InputShape{ {}, {{10}} }), - ::testing::Values(0), - ::testing::Values(LengthsPerInfer{{2, 1, 1, -1}}), - ::testing::Values(ngraph::helpers::InputLayerType::CONSTANT), - ::testing::ValuesIn(netPrecisions), - ::testing::Values(CPUSpecificParams{{}, {}, {}, "unknown"}, CPUSpecificParams{{}, {}, {"ref"}, "ref"})), - VariadicSplitLayerCPUTest::getTestCaseName); +INSTANTIATE_TEST_SUITE_P(smoke_VariadicSplit2D, + VariadicSplitLayerCPUTest, + ::testing::Combine(::testing::ValuesIn(inputShapes2D), + ::testing::Values(0, 1), + ::testing::Values(LengthsPerInfer{{2, -1}}), + ::testing::ValuesIn(lengthsTypes), + ::testing::ValuesIn(netPrecisions), + ::testing::Values(CPUSpecificParams{{}, {}, {}, "ref"})), + VariadicSplitLayerCPUTest::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(smoke_VariadicSplit1D_static, + VariadicSplitLayerCPUTest, + ::testing::Combine(::testing::Values(InputShape{{}, {{10}}}), + ::testing::Values(0), + ::testing::Values(LengthsPerInfer{{2, 1, 1, -1}}), + ::testing::Values(ov::test::utils::InputLayerType::CONSTANT), + ::testing::ValuesIn(netPrecisions), + ::testing::Values(CPUSpecificParams{{}, {}, {}, "unknown"}, + CPUSpecificParams{{}, {}, {"ref"}, "ref"})), + VariadicSplitLayerCPUTest::getTestCaseName); const std::vector inputShapes1D = { - { - // dynamic - {-1}, - // target - { - {5}, - {15}, - {10}, - } - }, - { - // dynamic - {{1, 60}}, - // target - { - {15}, - {5}, - {10}, - } - }, + {// dynamic + {-1}, + // target + { + {5}, + {15}, + {10}, + }}, + {// dynamic + {{1, 60}}, + // target + { + {15}, + {5}, + {10}, + }}, }; -INSTANTIATE_TEST_SUITE_P(smoke_VariadicSplit1D, VariadicSplitLayerCPUTest, - ::testing::Combine( - ::testing::ValuesIn(inputShapes1D), - ::testing::Values(0), - ::testing::Values(LengthsPerInfer{{2, 1, 1, -1}}), - ::testing::ValuesIn(lengthsTypes), - ::testing::ValuesIn(netPrecisions), - ::testing::Values(CPUSpecificParams{{}, {}, {}, "ref"})), - VariadicSplitLayerCPUTest::getTestCaseName); - -const std::vector inputShapes4D_zero_dims = { - { - // dynamic - {-1, -1, -1, -1}, - // target - { - {1, 7, 7, 7}, - {3, 7, 7, 7}, - } - } -}; - -INSTANTIATE_TEST_SUITE_P(smoke_VariadicSplit4D_CPU_zero_dims, VariadicSplitLayerCPUTest, - ::testing::Combine( - ::testing::ValuesIn(inputShapes4D_zero_dims), - ::testing::Values(1, 2, 3), - ::testing::Values(LengthsPerInfer{{3, 4, -1}}, LengthsPerInfer{{3, -1, 4}}, LengthsPerInfer{{-1, 3, 4}}), - ::testing::ValuesIn(lengthsTypes), - ::testing::ValuesIn(netPrecisions), - ::testing::Values(planar_4D_ref)), - VariadicSplitLayerCPUTest::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(smoke_VariadicSplit4D_CPU_zero_dims_nspc_ncsp, VariadicSplitLayerCPUTest, - ::testing::Combine( - ::testing::ValuesIn(inputShapes4D_zero_dims), - ::testing::Values(1), - ::testing::Values(LengthsPerInfer{{3, 4, -1}}, LengthsPerInfer{{3, -1, 4}}, LengthsPerInfer{{-1, 3, 4}}), - ::testing::ValuesIn(lengthsTypes), - ::testing::ValuesIn(netPrecisions), - ::testing::Values(perChannelsToPlanar_4D)), - VariadicSplitLayerCPUTest::getTestCaseName); - +INSTANTIATE_TEST_SUITE_P(smoke_VariadicSplit1D, + VariadicSplitLayerCPUTest, + ::testing::Combine(::testing::ValuesIn(inputShapes1D), + ::testing::Values(0), + ::testing::Values(LengthsPerInfer{{2, 1, 1, -1}}), + ::testing::ValuesIn(lengthsTypes), + ::testing::ValuesIn(netPrecisions), + ::testing::Values(CPUSpecificParams{{}, {}, {}, "ref"})), + VariadicSplitLayerCPUTest::getTestCaseName); + +const std::vector inputShapes4D_zero_dims = {{// dynamic + {-1, -1, -1, -1}, + // target + { + {1, 7, 7, 7}, + {3, 7, 7, 7}, + }}}; + +INSTANTIATE_TEST_SUITE_P(smoke_VariadicSplit4D_CPU_zero_dims, + VariadicSplitLayerCPUTest, + ::testing::Combine(::testing::ValuesIn(inputShapes4D_zero_dims), + ::testing::Values(1, 2, 3), + ::testing::Values(LengthsPerInfer{{3, 4, -1}}, + LengthsPerInfer{{3, -1, 4}}, + LengthsPerInfer{{-1, 3, 4}}), + ::testing::ValuesIn(lengthsTypes), + ::testing::ValuesIn(netPrecisions), + ::testing::Values(planar_4D_ref)), + VariadicSplitLayerCPUTest::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P(smoke_VariadicSplit4D_CPU_zero_dims_nspc_ncsp, + VariadicSplitLayerCPUTest, + ::testing::Combine(::testing::ValuesIn(inputShapes4D_zero_dims), + ::testing::Values(1), + ::testing::Values(LengthsPerInfer{{3, 4, -1}}, + LengthsPerInfer{{3, -1, 4}}, + LengthsPerInfer{{-1, 3, 4}}), + ::testing::ValuesIn(lengthsTypes), + ::testing::ValuesIn(netPrecisions), + ::testing::Values(perChannelsToPlanar_4D)), + VariadicSplitLayerCPUTest::getTestCaseName); const std::vector inputShapes4D_dynamic_lengths = { - { - {1, 16, 8, 7}, - { - {1, 16, 8, 7}, - {1, 16, 8, 7}, - {1, 16, 8, 7} - } - }, - { - {-1, -1, -1, -1}, - { - {1, 16, 8, 7}, - {1, 16, 8, 7}, - {1, 16, 8, 7} - } - }, - { - {{1, 5}, -1, {1, 48}, {2, 24}}, - { - {2, 16, 12, 6}, - {1, 16, 6, 9}, - {3, 16, 7, 6} - } - }, + {{1, 16, 8, 7}, {{1, 16, 8, 7}, {1, 16, 8, 7}, {1, 16, 8, 7}}}, + {{-1, -1, -1, -1}, {{1, 16, 8, 7}, {1, 16, 8, 7}, {1, 16, 8, 7}}}, + {{{1, 5}, -1, {1, 48}, {2, 24}}, {{2, 16, 12, 6}, {1, 16, 6, 9}, {3, 16, 7, 6}}}, }; std::vector lengthsPerInfer = { @@ -601,108 +488,72 @@ std::vector lengthsPerInfer = { LengthsPerInfer{{10, 4, 2}, {2, 4, 10}, {4, 2, 10}}, }; -INSTANTIATE_TEST_SUITE_P(smoke_VariadicSplit4D_CPU_dynamic_lengths, VariadicSplitLayerCPUTest, - ::testing::Combine( - ::testing::ValuesIn(inputShapes4D_dynamic_lengths), - ::testing::Values(1), - ::testing::ValuesIn(lengthsPerInfer), - ::testing::Values(ngraph::helpers::InputLayerType::PARAMETER), - ::testing::Values(ElementType::f32), - ::testing::Values(planar_4D_ref)), - VariadicSplitLayerCPUTest::getTestCaseName); +INSTANTIATE_TEST_SUITE_P(smoke_VariadicSplit4D_CPU_dynamic_lengths, + VariadicSplitLayerCPUTest, + ::testing::Combine(::testing::ValuesIn(inputShapes4D_dynamic_lengths), + ::testing::Values(1), + ::testing::ValuesIn(lengthsPerInfer), + ::testing::Values(ov::test::utils::InputLayerType::PARAMETER), + ::testing::Values(ElementType::f32), + ::testing::Values(planar_4D_ref)), + VariadicSplitLayerCPUTest::getTestCaseName); // =========================================== in - place ============================================================// -INSTANTIATE_TEST_SUITE_P(smoke_VariadicSplit_CPU_planar_inPlace_0, VariadicSplitLayerCPUTest, - ::testing::Combine( - ::testing::Values(InputShape{ {}, {{5, 6, 5, 6, 7}} }, - InputShape{ {}, {{5, 6, 5, 6}} }, - InputShape{ {}, {{5, 6, 5}} }, - InputShape{ {5, -1, -1, -1, -1}, - { - {5, 6, 5, 6, 7}, - {5, 2, 5, 2, 7}, - {5, 8, 5, 8, 7} - } }, - InputShape{ {5, -1, -1, -1}, - { - {5, 6, 5, 6}, - {5, 2, 5, 2}, - {5, 8, 5, 8} - } }, - InputShape{ {5, -1, -1}, - { - {5, 6, 5}, - {5, 2, 5}, - {5, 8, 5} - } }), - ::testing::Values(0), - ::testing::Values(LengthsPerInfer{{1, 2, -1}}), - ::testing::Values(ngraph::helpers::InputLayerType::CONSTANT), - ::testing::Values(ElementType::f32), - ::testing::Values(CPUSpecificParams{{}, {}, {}, "unknown"})), - VariadicSplitLayerCPUTest::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(smoke_VariadicSplit_CPU_planar_inPlace_1, VariadicSplitLayerCPUTest, - ::testing::Combine( - ::testing::Values(InputShape{ {}, {{1, 6, 5, 6, 7}} }, - InputShape{ {}, {{1, 6, 5, 6}} }, - InputShape{ {}, {{1, 6, 5}} }, - InputShape{ {1, 6, -1, -1, -1}, - { - {1, 6, 5, 6, 7}, - {1, 6, 5, 2, 7}, - {1, 6, 5, 8, 7} - } }, - InputShape{ {1, 6, -1, -1}, - { - {1, 6, 5, 6}, - {1, 6, 5, 2}, - {1, 6, 5, 8} - } }, - InputShape{ {1, 6, -1}, - { - {1, 6, 5}, - {1, 6, 3}, - {1, 6, 7} - } }), - ::testing::Values(1), - ::testing::Values(LengthsPerInfer{{1, 2, -1}}), - ::testing::Values(ngraph::helpers::InputLayerType::CONSTANT), - ::testing::Values(ElementType::f32), - ::testing::Values(CPUSpecificParams{{}, {}, {}, "unknown"})), - VariadicSplitLayerCPUTest::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(smoke_VariadicSplit4D_CPU_block8_inPlace, VariadicSplitLayerCPUTest, - ::testing::Combine( - ::testing::Values(InputShape{ {}, {{1, 32, 5, 6}} }, - InputShape{ {1, 32, -1, -1}, - { - {1, 32, 5, 6}, - {1, 32, 5, 2}, - {1, 32, 5, 8} - } }), - ::testing::Values(1), - ::testing::Values(LengthsPerInfer{{8, 16, -1}}), - ::testing::Values(ngraph::helpers::InputLayerType::CONSTANT), - ::testing::Values(ElementType::f32), - ::testing::Values(blocked8_4D)), - VariadicSplitLayerCPUTest::getTestCaseName); - -INSTANTIATE_TEST_SUITE_P(smoke_VariadicSplit4D_CPU_block16_inPlace, VariadicSplitLayerCPUTest, - ::testing::Combine( - ::testing::Values(InputShape{ {}, {{1, 64, 5, 6}} }, - InputShape{ {1, 64, -1, -1}, - { - {1, 64, 5, 6}, - {1, 64, 5, 2}, - {1, 64, 5, 8} - } }), - ::testing::Values(1), - ::testing::Values(LengthsPerInfer{{16, 32, -1}}), - ::testing::Values(ngraph::helpers::InputLayerType::CONSTANT), - ::testing::Values(ElementType::f32), - ::testing::Values(blocked16_4D)), - VariadicSplitLayerCPUTest::getTestCaseName); -} // namespace - -} // namespace CPULayerTestsDefinitions +INSTANTIATE_TEST_SUITE_P( + smoke_VariadicSplit_CPU_planar_inPlace_0, + VariadicSplitLayerCPUTest, + ::testing::Combine(::testing::Values(InputShape{{}, {{5, 6, 5, 6, 7}}}, + InputShape{{}, {{5, 6, 5, 6}}}, + InputShape{{}, {{5, 6, 5}}}, + InputShape{{5, -1, -1, -1, -1}, + {{5, 6, 5, 6, 7}, {5, 2, 5, 2, 7}, {5, 8, 5, 8, 7}}}, + InputShape{{5, -1, -1, -1}, {{5, 6, 5, 6}, {5, 2, 5, 2}, {5, 8, 5, 8}}}, + InputShape{{5, -1, -1}, {{5, 6, 5}, {5, 2, 5}, {5, 8, 5}}}), + ::testing::Values(0), + ::testing::Values(LengthsPerInfer{{1, 2, -1}}), + ::testing::Values(ov::test::utils::InputLayerType::CONSTANT), + ::testing::Values(ElementType::f32), + ::testing::Values(CPUSpecificParams{{}, {}, {}, "unknown"})), + VariadicSplitLayerCPUTest::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P( + smoke_VariadicSplit_CPU_planar_inPlace_1, + VariadicSplitLayerCPUTest, + ::testing::Combine(::testing::Values(InputShape{{}, {{1, 6, 5, 6, 7}}}, + InputShape{{}, {{1, 6, 5, 6}}}, + InputShape{{}, {{1, 6, 5}}}, + InputShape{{1, 6, -1, -1, -1}, + {{1, 6, 5, 6, 7}, {1, 6, 5, 2, 7}, {1, 6, 5, 8, 7}}}, + InputShape{{1, 6, -1, -1}, {{1, 6, 5, 6}, {1, 6, 5, 2}, {1, 6, 5, 8}}}, + InputShape{{1, 6, -1}, {{1, 6, 5}, {1, 6, 3}, {1, 6, 7}}}), + ::testing::Values(1), + ::testing::Values(LengthsPerInfer{{1, 2, -1}}), + ::testing::Values(ov::test::utils::InputLayerType::CONSTANT), + ::testing::Values(ElementType::f32), + ::testing::Values(CPUSpecificParams{{}, {}, {}, "unknown"})), + VariadicSplitLayerCPUTest::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P( + smoke_VariadicSplit4D_CPU_block8_inPlace, + VariadicSplitLayerCPUTest, + ::testing::Combine(::testing::Values(InputShape{{}, {{1, 32, 5, 6}}}, + InputShape{{1, 32, -1, -1}, {{1, 32, 5, 6}, {1, 32, 5, 2}, {1, 32, 5, 8}}}), + ::testing::Values(1), + ::testing::Values(LengthsPerInfer{{8, 16, -1}}), + ::testing::Values(ov::test::utils::InputLayerType::CONSTANT), + ::testing::Values(ElementType::f32), + ::testing::Values(blocked8_4D)), + VariadicSplitLayerCPUTest::getTestCaseName); + +INSTANTIATE_TEST_SUITE_P( + smoke_VariadicSplit4D_CPU_block16_inPlace, + VariadicSplitLayerCPUTest, + ::testing::Combine(::testing::Values(InputShape{{}, {{1, 64, 5, 6}}}, + InputShape{{1, 64, -1, -1}, {{1, 64, 5, 6}, {1, 64, 5, 2}, {1, 64, 5, 8}}}), + ::testing::Values(1), + ::testing::Values(LengthsPerInfer{{16, 32, -1}}), + ::testing::Values(ov::test::utils::InputLayerType::CONSTANT), + ::testing::Values(ElementType::f32), + ::testing::Values(blocked16_4D)), + VariadicSplitLayerCPUTest::getTestCaseName); +} // namespace \ No newline at end of file