diff --git a/src/plugins/intel_npu/tests/functional/internal/overload/compile_and_infer.hpp b/src/plugins/intel_npu/tests/functional/internal/overload/compile_and_infer.hpp index c4388978b730e0..e3775ab13385bc 100644 --- a/src/plugins/intel_npu/tests/functional/internal/overload/compile_and_infer.hpp +++ b/src/plugins/intel_npu/tests/functional/internal/overload/compile_and_infer.hpp @@ -100,11 +100,12 @@ TEST_P(OVCompileAndInferRequest, PluginWorkloadType) { return property == workload_type.name(); }); + OV_ASSERT_NO_THROW(execNet = core->compile_model(function, target_device, configuration)); + + ov::InferRequest req; + if (isCommandQueueExtSupported()) { ASSERT_TRUE(workloadTypeSupported); - ov::InferRequest req; - OV_ASSERT_NO_THROW(execNet = core->compile_model(function, target_device, configuration)); - const auto properties = execNet.get_property(supported_properties.name()).as>(); ASSERT_TRUE(std::any_of(properties.begin(), properties.end(), [](const PropertyName& property) { return property == workload_type.name(); @@ -120,8 +121,9 @@ TEST_P(OVCompileAndInferRequest, PluginWorkloadType) { OV_ASSERT_NO_THROW(req.wait()); ASSERT_TRUE(is_called); } else { + OV_ASSERT_NO_THROW(req = execNet.create_infer_request()); ASSERT_FALSE(workloadTypeSupported); - OV_EXPECT_THROW_HAS_SUBSTRING(core->compile_model(function, target_device, configuration), + OV_EXPECT_THROW_HAS_SUBSTRING(req.infer(), ov::Exception, "WorkloadType property is not supported by the current Driver Version!"); } @@ -137,10 +139,11 @@ TEST_P(OVCompileAndInferRequest, CompiledModelWorkloadType) { return property == workload_type.name(); }); + ov::InferRequest req; + if (isCommandQueueExtSupported()) { ASSERT_TRUE(workloadTypeSupported); OV_ASSERT_NO_THROW(execNet.set_property(modelConfiguration)); - ov::InferRequest req; OV_ASSERT_NO_THROW(req = execNet.create_infer_request()); bool is_called = false; OV_ASSERT_NO_THROW(req.set_callback([&](std::exception_ptr exception_ptr) { @@ -151,8 +154,10 @@ TEST_P(OVCompileAndInferRequest, CompiledModelWorkloadType) { OV_ASSERT_NO_THROW(req.wait()); ASSERT_TRUE(is_called); } else { + OV_ASSERT_NO_THROW(req = execNet.create_infer_request()); ASSERT_FALSE(workloadTypeSupported); - OV_EXPECT_THROW_HAS_SUBSTRING(execNet.set_property(modelConfiguration), + OV_ASSERT_NO_THROW(execNet.set_property(modelConfiguration)); + OV_EXPECT_THROW_HAS_SUBSTRING(req.infer(), ov::Exception, "WorkloadType property is not supported by the current Driver Version!"); } @@ -164,9 +169,9 @@ TEST_P(OVCompileAndInferRequest, CompiledModelWorkloadTypeDelayedExecutor) { ov::AnyMap modelConfiguration; modelConfiguration[workload_type.name()] = WorkloadType::DEFAULT; OV_ASSERT_NO_THROW(execNet.set_property(modelConfiguration)); + ov::InferRequest req; if (isCommandQueueExtSupported()) { - ov::InferRequest req; OV_ASSERT_NO_THROW(req = execNet.create_infer_request()); bool is_called = false; OV_ASSERT_NO_THROW(req.set_callback([&](std::exception_ptr exception_ptr) { @@ -177,7 +182,8 @@ TEST_P(OVCompileAndInferRequest, CompiledModelWorkloadTypeDelayedExecutor) { OV_ASSERT_NO_THROW(req.wait()); ASSERT_TRUE(is_called); } else { - OV_EXPECT_THROW_HAS_SUBSTRING(execNet.create_infer_request(), + OV_ASSERT_NO_THROW(req = execNet.create_infer_request()); + OV_EXPECT_THROW_HAS_SUBSTRING(req.infer(), ov::Exception, "WorkloadType property is not supported by the current Driver Version!"); } @@ -258,12 +264,13 @@ TEST_P(OVCompileAndInferRequestTurbo, CompiledModelTurbo) { return property == intel_npu::turbo.name(); }); + ov::InferRequest req; + if (isCommandQueueExtSupported()) { ASSERT_TRUE(isTurboSupported); OV_ASSERT_NO_THROW(execNet = core->compile_model(function, target_device, configuration)); auto turbosetting_compiled_model = execNet.get_property(intel_npu::turbo.name()); OV_ASSERT_NO_THROW(turbosetting_compiled_model = true); - ov::InferRequest req; OV_ASSERT_NO_THROW(req = execNet.create_infer_request()); bool is_called = false; OV_ASSERT_NO_THROW(req.set_callback([&](std::exception_ptr exception_ptr) { @@ -274,17 +281,9 @@ TEST_P(OVCompileAndInferRequestTurbo, CompiledModelTurbo) { OV_ASSERT_NO_THROW(req.wait()); ASSERT_TRUE(is_called); } else { - auto cr_ex = configuration.find(intel_npu::defer_weights_load.name()); - if (cr_ex->second.as() == false) { - OV_EXPECT_THROW_HAS_SUBSTRING(core->compile_model(function, target_device, configuration), - ov::Exception, - "Turbo is not supported by the current driver"); - } else { - OV_ASSERT_NO_THROW(execNet = core->compile_model(function, target_device, configuration)); - OV_EXPECT_THROW_HAS_SUBSTRING(execNet.create_infer_request(), - ov::Exception, - "Turbo is not supported by the current driver"); - } + OV_ASSERT_NO_THROW(execNet = core->compile_model(function, target_device, configuration)); + OV_ASSERT_NO_THROW(req = execNet.create_infer_request()); + OV_EXPECT_THROW_HAS_SUBSTRING(req.infer(), ov::Exception, "Turbo is not supported by the current driver"); } }