Skip to content

Commit

Permalink
Changing set_tensor_data flow
Browse files Browse the repository at this point in the history
  • Loading branch information
pereanub committed Jan 14, 2025
1 parent 3e7d014 commit 58143ab
Showing 1 changed file with 23 additions and 20 deletions.
43 changes: 23 additions & 20 deletions src/plugins/intel_npu/src/backend/src/zero_infer_request.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -226,14 +226,15 @@ void ZeroInferRequest::set_tensor_data(const std::shared_ptr<ov::ITensor>& tenso
OV_ITT_TASK_CHAIN(ZERO_SET_TENSOR, itt::domains::LevelZeroBackend, "set_tensor", "set_tensor_data");
auto& levelZeroTensors = isInput ? get_level_zero_input(index) : _levelZeroOutputTensors.at(index);

const auto& zeroTensor = std::dynamic_pointer_cast<ZeroTensor>(tensor);
bool updateCommandListArg = false;

if (zeroTensor == nullptr) {
OV_ITT_TASK_NEXT(ZERO_SET_TENSOR, "check_data_allocation");
if (memory_was_allocated_in_the_same_l0_context(_initStructs->getContext(), tensor->data())) {
_logger.debug("ZeroInferRequest::set_tensor_data - tensor was created in the same L0 context");
levelZeroTensors = tensor;
} else {
OV_ITT_TASK_NEXT(ZERO_SET_TENSOR, "check_data_allocation");
if (memory_was_allocated_in_the_same_l0_context(_initStructs->getContext(), tensor->data())) {
_logger.debug("ZeroInferRequest::set_tensor_data - tensor was created in the same L0 context");
levelZeroTensors = tensor;
updateCommandListArg = true;
} else {
if (levelZeroTensors && std::dynamic_pointer_cast<ZeroTensor>(levelZeroTensors) == nullptr) {
_logger.debug("ZeroInferRequest::set_tensor_data - create locally L0 tensor");
OV_ITT_TASK_NEXT(ZERO_SET_TENSOR, "allocate tensor");

Expand All @@ -242,20 +243,22 @@ void ZeroInferRequest::set_tensor_data(const std::shared_ptr<ov::ITensor>& tenso
isInput,
isInput ? *_inputAllocator : *_outputAllocator,
_graph->get_batch_size());

updateCommandListArg = true;
}
}

if (_pipelineIsCreated) {
_logger.debug("ZeroInferRequest::infer_async - update command list");
if (_pipelineIsCreated && updateCommandListArg) {
_logger.debug("ZeroInferRequest::infer_async - update command list");

OPENVINO_ASSERT(levelZeroTensors->data(), "Empty buffer");
OPENVINO_ASSERT(levelZeroTensors->data(), "Empty buffer");

OV_ITT_TASK_NEXT(ZERO_SET_TENSOR, "updateCommandList");
_pipeline->updateCommandList(isInput ? _graph->get_input_descriptors().at(index).idx
: _graph->get_output_descriptors().at(index).idx,
levelZeroTensors->data(),
levelZeroTensors->get_byte_size());
_pipeline->closeCommandList();
}
OV_ITT_TASK_NEXT(ZERO_SET_TENSOR, "updateCommandList");
_pipeline->updateCommandList(
isInput ? _graph->get_input_descriptors().at(index).idx : _graph->get_output_descriptors().at(index).idx,
levelZeroTensors->data(),
levelZeroTensors->get_byte_size());
_pipeline->closeCommandList();
}
}

Expand All @@ -270,15 +273,15 @@ void ZeroInferRequest::set_remote_tensor_data(const std::shared_ptr<ZeroRemoteTe
OPENVINO_THROW("Using different context for creating the tensor is not supported");
}

auto data = extract_object(tensor->get_properties(), ov::intel_npu::mem_handle);
OPENVINO_ASSERT(data, "Empty buffer");

auto& levelZeroTensors = isInput ? get_level_zero_input(index) : _levelZeroOutputTensors.at(index);
levelZeroTensors = tensor;

if (_pipelineIsCreated) {
_logger.debug("ZeroInferRequest::infer_async - update command list");

auto data = extract_object(tensor->get_properties(), ov::intel_npu::mem_handle);
OPENVINO_ASSERT(data, "Empty buffer");

OV_ITT_TASK_NEXT(ZERO_SET_REMOTE_TENSOR, "updateCommandList");
_pipeline->updateCommandList(
isInput ? _graph->get_input_descriptors().at(index).idx : _graph->get_output_descriptors().at(index).idx,
Expand Down

0 comments on commit 58143ab

Please sign in to comment.