Skip to content

Commit

Permalink
[CPU] Replace custom OPENVINO_THROW macros usage in nodes implementat…
Browse files Browse the repository at this point in the history
…ion with THROW_CPU_NODE_ERR
  • Loading branch information
aobolensk committed Jan 31, 2025
1 parent 90ae7ac commit cc7f956
Show file tree
Hide file tree
Showing 59 changed files with 264 additions and 348 deletions.
6 changes: 3 additions & 3 deletions src/plugins/intel_cpu/src/nodes/batch_to_space.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -258,9 +258,9 @@ void BatchToSpace::execute(const dnnl::stream& strm) {
batchToSpaceKernel<element_type_traits<ov::element::i32>::value_type>();
break;
default:
OPENVINO_THROW("BatchToSpace layer does not support precision '",
std::string(getParentEdgeAt(0)->getMemory().getDesc().getPrecision().get_type_name()),
"'");
THROW_CPU_NODE_ERR("does not support precision '",
std::string(getParentEdgeAt(0)->getMemory().getDesc().getPrecision().get_type_name()),
"'");
}
}

Expand Down
6 changes: 3 additions & 3 deletions src/plugins/intel_cpu/src/nodes/bin_conv.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1084,7 +1084,7 @@ void BinaryConvolution::initSupportedPrimitiveDescriptors() {
void BinaryConvolution::createPrimitive() {
auto selectedPrimitiveDescriptor = getSelectedPrimitiveDescriptor();
if (!selectedPrimitiveDescriptor) {
OPENVINO_THROW("CPU binary convolution with name '", getName(), "' doesn't have primitive descriptors.");
THROW_CPU_NODE_ERR("CPU binary convolution with name '", getName(), "' doesn't have primitive descriptors.");
}

auto srcDims = getParentEdgeAt(0)->getMemory().getStaticDims();
Expand Down Expand Up @@ -1163,7 +1163,7 @@ void BinaryConvolution::createPrimitive() {
(jcp.l_pad <= jcp.ur_w) && (r_pad_no_tail <= jcp.ur_w) &&
IMPLICATION(jcp.kw > 7, (jcp.t_pad == 0 && jcp.l_pad == 0) || (jcp.stride_w == 1 && jcp.stride_h == 1));
if (!args_ok) {
OPENVINO_THROW("BinaryConvolution with name '", getName(), "' has unsupported parameters");
THROW_CPU_NODE_ERR("has unsupported parameters");
}
#if defined(OPENVINO_ARCH_X86_64)
jit_dw_conv_params jcp_dw_conv = {};
Expand Down Expand Up @@ -1416,7 +1416,7 @@ void BinaryConvolution::execute(const dnnl::stream& strm) {

auto selectedPrimitiveDescriptor = getSelectedPrimitiveDescriptor();
if (!selectedPrimitiveDescriptor) {
OPENVINO_THROW("CPU binary convolution with name '", getName(), "' doesn't have primitive descriptors.");
THROW_CPU_NODE_ERR("doesn't have primitive descriptors.");
}

auto implType = selectedPrimitiveDescriptor->getImplementationType();
Expand Down
12 changes: 5 additions & 7 deletions src/plugins/intel_cpu/src/nodes/bucketize.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -38,9 +38,7 @@ Bucketize::Bucketize(const std::shared_ptr<ov::Node>& op, const GraphContext::CP

const auto bucketsize = ov::as_type_ptr<const ov::opset3::Bucketize>(op);
if (bucketsize == nullptr) {
OPENVINO_THROW("Operation with name '",
op->get_friendly_name(),
"' is not an instance of Bucketize from opset3.");
THROW_CPU_NODE_ERR("is not an instance of Bucketize from opset3.");
}

if (getOriginalInputsNumber() != 2 || getOriginalOutputsNumber() != 1) {
Expand Down Expand Up @@ -191,16 +189,16 @@ void Bucketize::prepareParams() {
auto inputBinsMemPtr = getSrcMemoryAtPort(INPUT_BINS_PORT);
auto dstMemPtr = getDstMemoryAtPort(0);
if (!dstMemPtr || !dstMemPtr->isDefined()) {
OPENVINO_THROW("Destination memory is undefined.");
THROW_CPU_NODE_ERR("has destination memory undefined.");
}
if (!inputTensorMemPtr || !inputTensorMemPtr->isDefined()) {
OPENVINO_THROW("Input tensor is undefined.");
THROW_CPU_NODE_ERR("has input tensor undefined.");
}
if (!inputBinsMemPtr || !inputBinsMemPtr->isDefined()) {
OPENVINO_THROW("Input bins is undefined.");
THROW_CPU_NODE_ERR("has input bins undefined.");
}
if (getSelectedPrimitiveDescriptor() == nullptr) {
OPENVINO_THROW("Preferable primitive descriptor is not set.");
THROW_CPU_NODE_ERR("has preferable primitive descriptors unset.");
}

// update with_bins/num_values/num_bin_values
Expand Down
4 changes: 2 additions & 2 deletions src/plugins/intel_cpu/src/nodes/causal_mask_preprocess.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -103,7 +103,7 @@ CausalMaskPreprocess::CausalMaskPreprocess(const std::shared_ptr<ov::Node>& op,
: Node(op, context, InternalDynShapeInferFactory()) {
std::string errorMessage;
if (!isSupportedOperation(op, errorMessage)) {
OPENVINO_THROW("CPU: " + errorMessage);
OPENVINO_THROW_NOT_IMPLEMENTED(errorMessage);
}

const auto node = ov::as_type_ptr<const intel_cpu::CausalMaskPreprocessNode>(op);
Expand Down Expand Up @@ -146,7 +146,7 @@ void CausalMaskPreprocess::initSupportedPrimitiveDescriptors() {
prec = ov::element::i32;
}
} else {
OPENVINO_THROW("CPU: CausalMaskPreprocess type not supported : " + m_config.type);
THROW_CPU_NODE_ERR("type not supported : " + m_config.type);
}

std::vector<PortConfigurator> inPortConfigs;
Expand Down
5 changes: 2 additions & 3 deletions src/plugins/intel_cpu/src/nodes/color_convert.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1078,8 +1078,7 @@ void ColorConvert::initSupportedI420Impls() {
void ColorConvert::createPrimitive() {
const NodeDesc* desc = getSelectedPrimitiveDescriptor();
if (!desc) {
OPENVINO_THROW(getTypeStr() + " node with name '" + getName() + "' ",
"no optimal primitive descriptor selected");
THROW_CPU_NODE_ERR("has no optimal primitive descriptor selected");
}

if (!_impl) {
Expand All @@ -1094,7 +1093,7 @@ void ColorConvert::createPrimitive() {

void ColorConvert::execute(const dnnl::stream& strm) {
if (!_impl) {
OPENVINO_THROW(getTypeStr() + " node with name '" + getName() + "' ", "has no any implemented converter");
THROW_CPU_NODE_ERR("has no any implemented converter");
}
_impl->execute(strm);
}
Expand Down
18 changes: 9 additions & 9 deletions src/plugins/intel_cpu/src/nodes/concat.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -63,7 +63,7 @@ Concat::Concat(const std::shared_ptr<ov::Node>& op, const GraphContext::CPtr& co
axis += inRank;
}
if (axis >= static_cast<int64_t>(inRank) || axis < 0) {
OPENVINO_THROW("Concat node with name '", getName(), "' has invalid value of axis parameter: ", axis);
THROW_CPU_NODE_ERR("has invalid value of axis parameter: ", axis);
}
this->axis = axis;
}
Expand All @@ -83,7 +83,7 @@ void Concat::getSupportedDescriptors() {
}
}
if (incorrectDims || firstParentDims.size() == 0) {
OPENVINO_THROW("Incorrect input dimensions for concat node ", getName());
THROW_CPU_NODE_ERR("has incorrect input dimensions");
}
}

Expand Down Expand Up @@ -240,7 +240,7 @@ void Concat::selectOptimalPrimitiveDescriptor() {
const auto& parent_config = parent_pdesc->getConfig();
int outputIndex = parentEdge->getInputNum();
if (outputIndex < 0 || outputIndex >= static_cast<int>(parent_config.outConfs.size())) {
OPENVINO_THROW("Cannot find index of output node");
THROW_CPU_NODE_ERR("Cannot find index of output node");
}
const auto& port_desc = parent_config.outConfs[outputIndex].getMemDesc();
for (auto& item : supportedLayouts) {
Expand All @@ -260,7 +260,7 @@ void Concat::selectOptimalPrimitiveDescriptor() {
const auto& config = prim_desc->getConfig();
int inputIndex = childEdge->getOutputNum();
if (inputIndex < 0 || inputIndex >= static_cast<int>(config.inConfs.size())) {
OPENVINO_THROW("Cannot find index of output node");
THROW_CPU_NODE_ERR("Cannot find index of output node");
}
const auto& port_desc = config.inConfs[inputIndex].getMemDesc();
for (auto& item : supportedLayouts) {
Expand Down Expand Up @@ -353,11 +353,11 @@ void Concat::prepareParams() {

const auto& dstMemPtr = getDstMemoryAtPort(0);
if (!dstMemPtr || !dstMemPtr->isDefined()) {
OPENVINO_THROW("Destination memory is undefined.");
THROW_CPU_NODE_ERR("Destination memory is undefined.");
}
auto dstMemDesc = dstMemPtr->getDescWithType<BlockedMemoryDesc>();
if (getSelectedPrimitiveDescriptor() == nullptr) {
OPENVINO_THROW("Preferable primitive descriptor is not set.");
THROW_CPU_NODE_ERR("Preferable primitive descriptor is not set.");
}

const auto& outputStrides = dstMemDesc->getStrides();
Expand Down Expand Up @@ -404,7 +404,7 @@ void Concat::prepareParams() {
const auto& srcMemPtr = getSrcMemoryAtPort(i);
if (!srcMemPtr || !srcMemPtr->isDefined()) {
auto parent = getParentEdgeAt(i)->getParent();
OPENVINO_THROW("Source memory from ", parent->getName(), " is undefined for node ", getName(), ".");
THROW_CPU_NODE_ERR("Source memory from ", parent->getName(), " is undefined.");
}

if (canExecRef) {
Expand Down Expand Up @@ -469,7 +469,7 @@ size_t Concat::inverseOrder(const VectorDims& order, size_t axis) {
void Concat::initOptimalPrimitiveDescriptor() {
auto selected_pd = getSelectedPrimitiveDescriptor();
if (selected_pd == nullptr) {
OPENVINO_THROW("Preferable primitive descriptor is not set.");
THROW_CPU_NODE_ERR("Preferable primitive descriptor is not set.");
}

if (!isInPlace()) {
Expand Down Expand Up @@ -715,7 +715,7 @@ void Concat::resolveInPlaceEdges(Edge::LOOK look) {

auto selected_pd = getSelectedPrimitiveDescriptor();
if (selected_pd == nullptr) {
OPENVINO_THROW("Preferable primitive descriptor is not set.");
THROW_CPU_NODE_ERR("Preferable primitive descriptor is not set.");
}
auto& config = selected_pd->getConfig();
size_t numberOfInputs = config.inConfs.size();
Expand Down
54 changes: 23 additions & 31 deletions src/plugins/intel_cpu/src/nodes/conv.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -346,7 +346,7 @@ ov::element::Type Convolution::fusedEltwisePrecision(const NodePtr& fusingNode)
} else if (fusingPort == 1) {
eltwisePrecision = fusingNode->getOriginalInputPrecisionAtPort(0);
} else {
OPENVINO_THROW("Cannot determine Eltwise post op precision for Convolution node with name '", getName(), "'");
THROW_CPU_NODE_ERR("Cannot determine Eltwise post op precision");
}

return eltwisePrecision;
Expand Down Expand Up @@ -420,7 +420,7 @@ void Convolution::getSupportedDescriptors() {
return;
}
if (!attrs.empty()) {
OPENVINO_THROW("attrs vector is not empty '", getName(), "'");
THROW_CPU_NODE_ERR("has not empty attrs vector");
}

attrs.reserve(2);
Expand Down Expand Up @@ -474,22 +474,20 @@ void Convolution::getSupportedDescriptors() {
}

if (static_cast<int>(getParentEdges().size()) != expectedInputEdgesNum) {
OPENVINO_THROW("Incorrect number of input edges for layer ",
getName(),
", expected: ",
expectedInputEdgesNum,
" actual: ",
getParentEdges().size());
THROW_CPU_NODE_ERR("Incorrect number of input edges, expected: ",
expectedInputEdgesNum,
" actual: ",
getParentEdges().size());
}
if (getChildEdges().empty()) {
OPENVINO_THROW("Incorrect number of output edges for layer ", getName());
THROW_CPU_NODE_ERR("Incorrect number of output edges");
}

int ndims = getInputShapeAtPort(0).getRank();

withDWConv = isFusedWith(Type::Convolution);
if (withDWConv && isDynamicNode()) {
OPENVINO_THROW("DW convolution is fused into convolution node ", getName(), " with dynamic shape.");
THROW_CPU_NODE_ERR("DW convolution is fused into the node with dynamic shape.");
}

for (size_t i = 0; i < fusedWith.size(); i++) {
Expand Down Expand Up @@ -780,11 +778,11 @@ void Convolution::setPostOps(dnnl::primitive_attr& attr,
continue;
}

OPENVINO_THROW("Fusing of ",
NameFromType(node->getType()),
" operation to ",
NameFromType(this->getType()),
" node is not implemented");
THROW_CPU_NODE_ERR("Fusing of ",
NameFromType(node->getType()),
" operation to ",
NameFromType(this->getType()),
" node is not implemented");
}

attr.set_post_ops(ops);
Expand Down Expand Up @@ -1347,25 +1345,25 @@ void Convolution::prepareParams() {
auto wghMemPtr = getSrcMemoryAtPort(1);
auto dstMemPtr = getOutputMemory();
if (!dstMemPtr || !dstMemPtr->isDefined()) {
OPENVINO_THROW("Destination memory was undefined.");
THROW_CPU_NODE_ERR("Destination memory was undefined.");
}
if (!srcMemPtr || !srcMemPtr->isDefined()) {
OPENVINO_THROW("Input memory was undefined.");
THROW_CPU_NODE_ERR("Input memory was undefined.");
}
if (!wghMemPtr || !wghMemPtr->isDefined()) {
OPENVINO_THROW("Weight memory was undefined.");
THROW_CPU_NODE_ERR("Weight memory was undefined.");
}
MemoryPtr biasMemPtr = nullptr;
if (withBiases) {
biasMemPtr = getSrcMemoryAtPort(2);
if (!biasMemPtr || !biasMemPtr->isDefined()) {
OPENVINO_THROW("Input memory is undefined.");
THROW_CPU_NODE_ERR("Input memory is undefined.");
}
}

const NodeDesc* selected_pd = getSelectedPrimitiveDescriptor();
if (selected_pd == nullptr) {
OPENVINO_THROW("Preferable primitive descriptor is not set for node ", getName(), ".");
THROW_CPU_NODE_ERR("Preferable primitive descriptor is not set for node ", getName(), ".");
}

DnnlMemoryDescCPtr inMemoryDesc = srcMemPtr->getDescWithType<DnnlMemoryDesc>();
Expand Down Expand Up @@ -1525,7 +1523,7 @@ void Convolution::prepareParams() {
execPtr = result.first;

if (!execPtr) {
OPENVINO_THROW("Primitive descriptor was not found for node ", getName(), ".");
THROW_CPU_NODE_ERR("Primitive descriptor was not found");
}

primArgs[DNNL_ARG_SRC] = srcMemPtr->getPrimitive();
Expand Down Expand Up @@ -1631,7 +1629,7 @@ void Convolution::ConvolutionSumExecutor::reorder_exec(std::unordered_map<int, d

void Convolution::execute(const dnnl::stream& strm) {
if (!execPtr) {
OPENVINO_THROW("Can't execute Convolution node with name: ", getName(), ", because executor is not compiled");
THROW_CPU_NODE_ERR("executor is not compiled");
}

execPtr->exec(primArgs, strm);
Expand All @@ -1641,10 +1639,7 @@ void Convolution::executeDynamicImpl(const dnnl::stream& strm) {
execute(strm);
if (withSumBroadcast) {
if (!subgraph) {
OPENVINO_THROW("Unexpected: Fused ops subgraph has not been created in ",
getTypeStr(),
" with name ",
getName());
THROW_CPU_NODE_ERR("Fused ops subgraph has not been created");
}
const size_t sumPortNum = getParentEdges().size() - 1;
const auto& sumInpMem = getParentEdgeAt(sumPortNum)->getMemory();
Expand Down Expand Up @@ -1723,10 +1718,7 @@ MemoryDescPtr Convolution::getSumMemDesc(const primitive_desc& primitive_desc_it
MemoryPtr Convolution::getOutputMemory() const {
if (withSumBroadcast) {
if (!subgraph) {
OPENVINO_THROW("Unexpected: Fused ops subgraph has not been created in ",
getTypeStr(),
" with name ",
getName());
THROW_CPU_NODE_ERR("Fused ops subgraph has not been created");
}
auto inp0 = subgraph->getInput(0);
return inp0->getDstMemoryAtPort(0);
Expand Down Expand Up @@ -1776,7 +1768,7 @@ void Convolution::appendZeroPointsArgs() {

void Convolution::initializeInputZeroPoints(const uint8_t* inputZpData, const size_t inputZpSize) {
if (!inputZeroPoints.empty() || !legacyInputZeroPoints.empty()) {
OPENVINO_THROW("input zero point is not empty '", getName(), "'");
THROW_CPU_NODE_ERR("input zero point is not empty");
}
if (inputZpSize) {
inputZeroPointType = zpType::PerTensor;
Expand Down
2 changes: 1 addition & 1 deletion src/plugins/intel_cpu/src/nodes/cum_sum.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -50,7 +50,7 @@ CumSum::CumSum(const std::shared_ptr<ov::Node>& op, const GraphContext::CPtr& co

const auto cumsum = ov::as_type_ptr<const ov::opset3::CumSum>(op);
if (cumsum == nullptr) {
OPENVINO_THROW("Operation with name '", op->get_friendly_name(), "' is not an instance of CumSum from opset3.");
THROW_CPU_NODE_ERR("is not an instance of CumSum from opset3.");
}

exclusive = cumsum->is_exclusive();
Expand Down
Loading

0 comments on commit cc7f956

Please sign in to comment.