Skip to content

Commit

Permalink
Address review comments
Browse files Browse the repository at this point in the history
  • Loading branch information
aobolensk committed Feb 4, 2025
1 parent d785cab commit 1d3dc4b
Show file tree
Hide file tree
Showing 21 changed files with 22 additions and 23 deletions.
2 changes: 1 addition & 1 deletion src/plugins/intel_cpu/src/nodes/bin_conv.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1084,7 +1084,7 @@ void BinaryConvolution::initSupportedPrimitiveDescriptors() {
void BinaryConvolution::createPrimitive() {
auto selectedPrimitiveDescriptor = getSelectedPrimitiveDescriptor();
if (!selectedPrimitiveDescriptor) {
THROW_CPU_NODE_ERR("CPU binary convolution with name '", getName(), "' doesn't have primitive descriptors.");
THROW_CPU_NODE_ERR("doesn't have primitive descriptors.");
}

auto srcDims = getParentEdgeAt(0)->getMemory().getStaticDims();
Expand Down
4 changes: 2 additions & 2 deletions src/plugins/intel_cpu/src/nodes/conv.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -420,7 +420,7 @@ void Convolution::getSupportedDescriptors() {
return;
}
if (!attrs.empty()) {
THROW_CPU_NODE_ERR("has not empty attrs vector");
THROW_CPU_NODE_ERR("has a non-empty attrs vector");
}

attrs.reserve(2);
Expand Down Expand Up @@ -1363,7 +1363,7 @@ void Convolution::prepareParams() {

const NodeDesc* selected_pd = getSelectedPrimitiveDescriptor();
if (selected_pd == nullptr) {
THROW_CPU_NODE_ERR("Preferable primitive descriptor is not set for node ", getName(), ".");
THROW_CPU_NODE_ERR("Preferable primitive descriptor is not set.");
}

DnnlMemoryDescCPtr inMemoryDesc = srcMemPtr->getDescWithType<DnnlMemoryDesc>();
Expand Down
2 changes: 1 addition & 1 deletion src/plugins/intel_cpu/src/nodes/embedding_bag_packed.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -54,7 +54,7 @@ EmbeddingBagPacked::EmbeddingBagPacked(const std::shared_ptr<ov::Node>& op, cons
}
}
if (getInputShapeAtPort(INDICES_IDX).getRank() != 2ul) {
THROW_CPU_NODE_ERR("layer has indices data with invalid rank.");
THROW_CPU_NODE_ERR("has indices data with invalid rank.");
}
}

Expand Down
2 changes: 1 addition & 1 deletion src/plugins/intel_cpu/src/nodes/extract_image_patches.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -466,7 +466,7 @@ void ExtractImagePatches::execute(const dnnl::stream& strm) {
const auto outStrides = getChildEdgeAt(0)->getMemory().getDescWithType<BlockedMemoryDesc>()->getStrides();
execPtr->exec(src, dst, inStrides, outStrides);
} else {
THROW_CPU_NODE_ERR("Can't execute extract image patches node. Primitive wasn't created");
THROW_CPU_NODE_ERR("Primitive wasn't created");
}
}

Expand Down
2 changes: 1 addition & 1 deletion src/plugins/intel_cpu/src/nodes/grid_sample.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,7 @@ GridSample::GridSample(const std::shared_ptr<ov::Node>& op, const GraphContext::
: Node(op, context, NgraphShapeInferFactory(op)) {
std::string errorMessage;
if (!isSupportedOperation(op, errorMessage)) {
THROW_CPU_NODE_ERR(errorMessage);
OPENVINO_THROW_NOT_IMPLEMENTED(errorMessage);
}

if (op->get_input_size() != 2 || op->get_output_size() != 1) {
Expand Down
4 changes: 2 additions & 2 deletions src/plugins/intel_cpu/src/nodes/interpolate.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -2465,7 +2465,7 @@ void Interpolate::prepareParams() {
std::vector<float> dataScales =
getScales(getPaddedInputShape(srcDims, interpAttrs.padBegin, interpAttrs.padEnd), dstDims);
if (!NCHWAsNHWC && (getOutputShapeAtPort(0).getRank() > 2 && (dataScales[0] != 1.f || dataScales[1] != 1.f))) {
THROW_CPU_NODE_ERR("Interpolate layer only supports resize on spatial dimensions(depth, height and width)");
THROW_CPU_NODE_ERR("only supports resize on spatial dimensions(depth, height and width)");
}

if (canUseAclExecutor) {
Expand Down Expand Up @@ -2724,7 +2724,7 @@ void Interpolate::execute(const dnnl::stream& strm) {
} else if (aclExecPtr) {
aclExecPtr->exec({srcMemPtr}, {dstMemPtr}, postOpsDataPtrs.data());
} else {
THROW_CPU_NODE_ERR("Primitive didn't created");
THROW_CPU_NODE_ERR("Primitive wasn't created");
}
}

Expand Down
2 changes: 1 addition & 1 deletion src/plugins/intel_cpu/src/nodes/inverse.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@ Inverse::Inverse(const std::shared_ptr<ov::Node>& op, const GraphContext::CPtr&
: Node(op, context, NgraphShapeInferFactory(op)) {
std::string errorMessage;
if (!isSupportedOperation(op, errorMessage)) {
THROW_CPU_NODE_ERR(errorMessage);
OPENVINO_THROW_NOT_IMPLEMENTED(errorMessage);
}

auto inverse_op = as_type_ptr<op::v14::Inverse>(op);
Expand Down
2 changes: 1 addition & 1 deletion src/plugins/intel_cpu/src/nodes/llm_mlp.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -504,7 +504,7 @@ LLMMLP::LLMMLP(const std::shared_ptr<ov::Node>& op, const GraphContext::CPtr& co
std::string errorMessage;
const auto& config = context->getConfig();
if (!isSupportedOperation(op, errorMessage, config.fcDynamicQuantizationGroupSize)) {
THROW_CPU_NODE_ERR(errorMessage);
OPENVINO_THROW_NOT_IMPLEMENTED(errorMessage);
}
const auto node_mlp = ov::as_type_ptr<const LLMMLPNode>(op);
m_mlp_config = node_mlp->get_config();
Expand Down
2 changes: 1 addition & 1 deletion src/plugins/intel_cpu/src/nodes/multinomial.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@ Multinomial::Multinomial(const std::shared_ptr<ov::Node>& op, const GraphContext
: Node(op, context, NgraphShapeInferFactory(op)) {
std::string errorMessage;
if (!isSupportedOperation(op, errorMessage)) {
THROW_CPU_NODE_ERR(errorMessage);
OPENVINO_THROW_NOT_IMPLEMENTED(errorMessage);
}

auto multinomial_op = as_type_ptr<op::v13::Multinomial>(op);
Expand Down
2 changes: 1 addition & 1 deletion src/plugins/intel_cpu/src/nodes/mvn.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -2319,7 +2319,7 @@ void MVN::execute(const dnnl::stream& strm) {
} else if (aclExecPtr) {
aclExecPtr->exec({srcMemPtr}, {dstMemPtr}, postOpsDataPtrs.data());
} else {
THROW_CPU_NODE_ERR("Primitive didn't created");
THROW_CPU_NODE_ERR("Primitive wasn't created");
}
}

Expand Down
2 changes: 1 addition & 1 deletion src/plugins/intel_cpu/src/nodes/ngram.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -109,7 +109,7 @@ void Ngram::execute(const dnnl::stream& strm) {
} else if (idcesPrecision == ov::element::i64) {
batchLenghts = computeBatchLenghts<std::int64_t>();
} else {
THROW_CPU_NODE_ERR("Unsupported idces precision: ", idcesPrecision);
THROW_CPU_NODE_ERR("Unsupported indices precision: ", idcesPrecision);
}

/* The following procedure applied to each batch:
Expand Down
2 changes: 1 addition & 1 deletion src/plugins/intel_cpu/src/nodes/paged_attn.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -56,7 +56,7 @@ PagedAttention::PagedAttention(const std::shared_ptr<ov::Node>& op, const GraphC
: Node(op, context, InternalDynShapeInferFactory()) {
std::string errorMessage;
if (!isSupportedOperation(op, errorMessage)) {
THROW_CPU_NODE_ERR(errorMessage);
OPENVINO_THROW_NOT_IMPLEMENTED(errorMessage);
}
// output score may have no child
m_hasScore = !op->get_output_target_inputs(1).empty();
Expand Down
2 changes: 1 addition & 1 deletion src/plugins/intel_cpu/src/nodes/pooling.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -507,7 +507,7 @@ void Pooling::prepareParams() {
dnnlExecPtr = result.first;

if (!dnnlExecPtr) {
THROW_CPU_NODE_ERR("Primitive descriptor was not found for node ", getName(), ".");
THROW_CPU_NODE_ERR("Primitive descriptor was not found.");
}

auto scratchpadMem = getScratchPadMem(dnnlExecPtr->getScratchPadDesc());
Expand Down
2 changes: 1 addition & 1 deletion src/plugins/intel_cpu/src/nodes/qkv_proj.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -348,7 +348,7 @@ QKVProjection::QKVProjection(const std::shared_ptr<ov::Node>& op, const GraphCon
}

if (!isSupportedOperation(op, errorMessage, concurrency, config.fcDynamicQuantizationGroupSize)) {
THROW_CPU_NODE_ERR(errorMessage);
OPENVINO_THROW_NOT_IMPLEMENTED(errorMessage);
}
const auto node = ov::as_type_ptr<const QKVProjectionNode>(op);
m_config = node->get_config();
Expand Down
2 changes: 1 addition & 1 deletion src/plugins/intel_cpu/src/nodes/random_uniform.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,7 @@ RandomUniform::RandomUniform(const std::shared_ptr<ov::Node>& op, const GraphCon
: Node(op, context, NgraphShapeInferFactory(op)) {
std::string errorMessage;
if (!isSupportedOperation(op, errorMessage)) {
THROW_CPU_NODE_ERR(errorMessage);
OPENVINO_THROW_NOT_IMPLEMENTED(errorMessage);
}

// RandomUniform should generate new sequence each run even if all inputs are constants. So that method
Expand Down
1 change: 0 additions & 1 deletion src/plugins/intel_cpu/src/nodes/rdft.h
Original file line number Diff line number Diff line change
Expand Up @@ -118,7 +118,6 @@ class RDFT : public Node {
bool needShapeInfer() const override;
bool needPrepareParams() const override;

std::string errorMsgPrefix;
bool inverse;
std::vector<int> axes;
std::vector<int> signalSizes;
Expand Down
2 changes: 1 addition & 1 deletion src/plugins/intel_cpu/src/nodes/rms_norm.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -122,7 +122,7 @@ RMSNorm::RMSNorm(const std::shared_ptr<ov::Node>& op, const GraphContext::CPtr&
: Node(op, context, RMSNormShapeInferFactory(op)) {
std::string errorMessage;
if (!isSupportedOperation(op, errorMessage)) {
THROW_CPU_NODE_ERR(errorMessage);
OPENVINO_THROW_NOT_IMPLEMENTED(errorMessage);
}
const auto rms = ov::as_type_ptr<const ov::op::internal::RMS>(op);
m_eps = static_cast<float>(rms->get_epsilon());
Expand Down
2 changes: 1 addition & 1 deletion src/plugins/intel_cpu/src/nodes/rope.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@ RoPE::RoPE(const std::shared_ptr<ov::Node>& op, const GraphContext::CPtr& contex
: Node(op, context, NgraphShapeInferFactory(op)) {
std::string errorMessage;
if (!isSupportedOperation(op, errorMessage)) {
THROW_CPU_NODE_ERR(errorMessage);
OPENVINO_THROW_NOT_IMPLEMENTED(errorMessage);
}

const auto node = ov::as_type_ptr<const op::internal::RoPE>(op);
Expand Down
2 changes: 1 addition & 1 deletion src/plugins/intel_cpu/src/nodes/scaled_attn.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1084,7 +1084,7 @@ ScaledDotProductAttention::ScaledDotProductAttention(const std::shared_ptr<ov::N
: Node(op, context, SDPAShapeInferFactory(op)) {
std::string errorMessage;
if (!isSupportedOperation(op, errorMessage)) {
THROW_CPU_NODE_ERR(errorMessage);
OPENVINO_THROW_NOT_IMPLEMENTED(errorMessage);
}
const auto& cpuConfig = context->getConfig();
const auto& keyCachePrecision = cpuConfig.keyCachePrecision;
Expand Down
2 changes: 1 addition & 1 deletion src/plugins/intel_cpu/src/nodes/stft.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,7 @@ STFT::STFT(const std::shared_ptr<ov::Node>& op, const GraphContext::CPtr& contex
: Node(op, context, NgraphShapeInferFactory(op)) {
std::string errorMessage;
if (!isSupportedOperation(op, errorMessage)) {
THROW_CPU_NODE_ERR(errorMessage);
OPENVINO_THROW_NOT_IMPLEMENTED(errorMessage);
}

const auto stft_op = as_type_ptr<op::v15::STFT>(op);
Expand Down
2 changes: 1 addition & 1 deletion src/plugins/intel_cpu/src/nodes/transpose.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -148,7 +148,7 @@ void Transpose::prepareParams() {
auto srcDesc = dnnl::memory::desc(dstDesc.get_dims(), dstDesc.get_data_type(), memory::format_tag::acdb);
auto result = getReorderPrim(context->getParamsCache(), getEngine(), srcDesc, dstDesc);
if (!result) {
THROW_CPU_NODE_ERR("Reorder primitive descriptor was not found for Transpose node ", getName(), ".");
THROW_CPU_NODE_ERR("reorder primitive descriptor was not found.");
}
prim = result;

Expand Down

0 comments on commit 1d3dc4b

Please sign in to comment.