Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[CPU] Replace OPENVINO_ASSERT macros usage in nodes with CPU_NODE_ASSERT #28872

Merged
merged 5 commits into from
Feb 10, 2025
Merged
Show file tree
Hide file tree
Changes from 3 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
10 changes: 5 additions & 5 deletions src/plugins/intel_cpu/src/nodes/composite.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,7 @@ Composite::Composite(const std::shared_ptr<ov::Node>& op, const GraphContext::CP
OPENVINO_THROW_NOT_IMPLEMENTED(errorMessage);
}
const auto& subModel = ov::as_type_ptr<SubModel>(op);
OPENVINO_ASSERT(subModel, "Attempt to create SubGraph node from an invalid op type: ", op);
CPU_NODE_ASSERT(subModel, "Attempt to create SubGraph node from an invalid op type: ", op);

m_body = subModel->get_function();
}
Expand Down Expand Up @@ -78,27 +78,27 @@ void Composite::createPrimitive() {
}

int Composite::registerToAllocationContext(int offset, AllocationContext& context) {
OPENVINO_ASSERT(getOriginalInputsNumber() == m_graph.inputsNumber(),
CPU_NODE_ASSERT(getOriginalInputsNumber() == m_graph.inputsNumber(),
"Number of node inputs must be equal the number of inner graph's inputs");

for (size_t i = 0; i < getOriginalInputsNumber(); i++) {
auto parentEdge = getParentEdgeAt(i);
auto inputEdges = m_graph.getInputNodeByIndex(i)->getChildEdgesAtPort(0);
for (const auto& inputEdge : inputEdges) {
OPENVINO_ASSERT(inputEdge->getStatus() == Edge::Status::Uninitialized,
CPU_NODE_ASSERT(inputEdge->getStatus() == Edge::Status::Uninitialized,
"Expected Uninitialized state for edge: ",
*this);
inputEdge->sharedMemFrom(parentEdge);
}
}

OPENVINO_ASSERT(getOriginalOutputsNumber() == m_graph.outputsNumber(),
CPU_NODE_ASSERT(getOriginalOutputsNumber() == m_graph.outputsNumber(),
"Number of node outputs must be equal the number of inner graph's outputs");

for (size_t i = 0; i < getOriginalOutputsNumber(); i++) {
auto childEdge = getChildEdgeAt(i);
auto outputEdge = m_graph.getOutputNodeByIndex(i)->getParentEdgeAt(0);
OPENVINO_ASSERT(outputEdge->getStatus() == Edge::Status::Uninitialized,
CPU_NODE_ASSERT(outputEdge->getStatus() == Edge::Status::Uninitialized,
"Expected Uninitialized state for edge: ",
*outputEdge);
outputEdge->sharedMemFrom(childEdge);
Expand Down
20 changes: 8 additions & 12 deletions src/plugins/intel_cpu/src/nodes/concat.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -725,32 +725,28 @@ void Concat::resolveInPlaceEdges(Edge::LOOK look) {
size_t numberOfInputs = config.inConfs.size();
size_t inplaceOutIndx = selected_pd->getConfig().inConfs[0].inPlace();
auto baseDim = outputShapes.front().getDims()[axis];
OPENVINO_ASSERT(baseDim != Shape::UNDEFINED_DIM,
" Concat node: ",
getName(),
" can't use inPlace memory with concatenation on dynamic dimension");
CPU_NODE_ASSERT(baseDim != Shape::UNDEFINED_DIM,
"can't use inPlace memory with concatenation on dynamic dimension");

auto edges = getChildEdgesAtPort(inplaceOutIndx);
auto itr = std::find_if(edges.begin(), edges.end(), [](const EdgePtr& edge) {
return edge->getStatus() == Edge::Status::Allocated;
});
OPENVINO_ASSERT(itr != edges.end(), " Could not find allocated child edge for concat node: ", getName());
CPU_NODE_ASSERT(itr != edges.end(), "Could not find allocated child edge for concat node: ", getName());
maxnick marked this conversation as resolved.
Show resolved Hide resolved

auto baseMemBlock = (*itr)->getMemory().getMemoryBlock();
OPENVINO_ASSERT(baseMemBlock != nullptr, " NULL base memory block in concat node: ", getName());
CPU_NODE_ASSERT(baseMemBlock != nullptr, "NULL base memory block in concat node: ", getName());
maxnick marked this conversation as resolved.
Show resolved Hide resolved

ptrdiff_t offset = 0;
for (size_t i = 0; i < numberOfInputs; ++i) {
auto partDim = inputShapes[i].getDims()[axis];
OPENVINO_ASSERT(partDim != Shape::UNDEFINED_DIM,
" Concat node: ",
getName(),
" can't use inPlace memory with concatenation on dynamic dimension");
CPU_NODE_ASSERT(partDim != Shape::UNDEFINED_DIM,
"can't use inPlace memory with concatenation on dynamic dimension");

auto parentEdge = getParentEdgeAt(i);

OPENVINO_ASSERT(parentEdge->getStatus() == Edge::Status::NotAllocated,
" Unexpected inplace resolve call to an allocated edge: ",
CPU_NODE_ASSERT(parentEdge->getStatus() == Edge::Status::NotAllocated,
"Unexpected inplace resolve call to an allocated edge: ",
*parentEdge);

auto memDesc = selected_pd->getConfig().inConfs[i].getMemDesc();
Expand Down
2 changes: 1 addition & 1 deletion src/plugins/intel_cpu/src/nodes/eltwise.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -2192,7 +2192,7 @@ void Eltwise::appendPostOps(dnnl::post_ops& ops,
std::vector<MemoryPtr> postOpsMemPtrs;
appendPostOpsImpl(ops, postOpDims, postOpsMemPtrs, channelAxis);

OPENVINO_ASSERT(postOpsMemPtrs.size() <= 1, "at most 1 post ops memory args can be appended.");
CPU_NODE_ASSERT(postOpsMemPtrs.size() <= 1, "at most 1 post ops memory args can be appended.");

if (!postOpsMemPtrs.empty()) {
postOpsMem[DNNL_ARG_ATTR_MULTIPLE_POST_OP(ops.len() - 1) | DNNL_ARG_SRC_1] = postOpsMemPtrs[0];
Expand Down
20 changes: 10 additions & 10 deletions src/plugins/intel_cpu/src/nodes/fake_quantize.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1217,7 +1217,7 @@ FakeQuantize::FakeQuantize(const std::shared_ptr<ov::Node>& op, const GraphConte
if (isInputLowBroadcasted) {
binarizationThresholds.push_back(inputLowData[0]);
} else {
OPENVINO_ASSERT(axisSize != -1);
CPU_NODE_ASSERT(axisSize != -1, "axisSize is not set");
binarizationThresholds.resize(rnd_up(axisSize, 16));
for (int i = 0; i < axisSize; i++) {
binarizationThresholds[i] = inputLowData[i];
Expand All @@ -1227,7 +1227,7 @@ FakeQuantize::FakeQuantize(const std::shared_ptr<ov::Node>& op, const GraphConte
if (isOutputHighBroadcasted) {
binarizationOutputMask.push_back(outputHighData[0] == 1.f ? 0xffffffff : 0x00000000);
} else {
OPENVINO_ASSERT(axisSize != -1);
CPU_NODE_ASSERT(axisSize != -1, "axisSize is not set");
binarizationOutputMask.resize(rnd_up(axisSize, 16));
for (int i = 0; i < axisSize; i++) {
binarizationOutputMask[i] = outputHighData[i] == 1.f ? 0xffffffff : 0x00000000;
Expand Down Expand Up @@ -1563,7 +1563,7 @@ void FakeQuantize::prepareParams() {
if (isBinarization()) {
const size_t axisSize = getParentEdgeAt(0)->getMemory().getShape().getStaticDims()[getAxis()];
const size_t newPaddedSize = rnd_up(axisSize, 16);
OPENVINO_ASSERT(newPaddedSize != 0);
CPU_NODE_ASSERT(newPaddedSize != 0, "newPaddedSize is 0");

if (internalBlobMemory.empty() || newPaddedSize != rnd_up(currentAxisSize, 16) ||
((isInputLowBroadcasted || isOutputHighBroadcasted) && axisSize != currentAxisSize)) {
Expand Down Expand Up @@ -2140,7 +2140,7 @@ void FakeQuantize::appendPostOps(dnnl::post_ops& ops,
std::vector<MemoryPtr> postOpsMemPtrs;
appendPostOpsImpl(ops, postOpDims, postOpsMemPtrs);

OPENVINO_ASSERT(postOpsMemPtrs.size() <= 1, "at most 1 post ops memory args can be appended.");
CPU_NODE_ASSERT(postOpsMemPtrs.size() <= 1, "at most 1 post ops memory args can be appended.");

if (!postOpsMemPtrs.empty()) {
postOpsMem[DNNL_ARG_ATTR_MULTIPLE_POST_OP(ops.len() - 1) | DNNL_ARG_SRC_1] = postOpsMemPtrs[0];
Expand Down Expand Up @@ -2187,12 +2187,12 @@ void FakeQuantize::updateOptimizedFormula(bool do_rounding) {
outputScale.size(),
outputShift.size()});

OPENVINO_ASSERT(inputScale.size() == 1 || inputScale.size() == OC);
OPENVINO_ASSERT(inputShift.size() == 1 || inputShift.size() == OC);
OPENVINO_ASSERT(cropLow.size() == 1 || cropLow.size() == OC);
OPENVINO_ASSERT(cropHigh.size() == 1 || cropHigh.size() == OC);
OPENVINO_ASSERT(outputScale.size() == 1 || outputScale.size() == OC);
OPENVINO_ASSERT(outputShift.size() == 1 || outputShift.size() == OC);
CPU_NODE_ASSERT(inputScale.size() == 1 || inputScale.size() == OC, "inputScale.size() == ", inputScale.size());
CPU_NODE_ASSERT(inputShift.size() == 1 || inputShift.size() == OC, "inputShift.size() == ", inputShift.size());
CPU_NODE_ASSERT(cropLow.size() == 1 || cropLow.size() == OC, "cropLow.size() == ", cropLow.size());
CPU_NODE_ASSERT(cropHigh.size() == 1 || cropHigh.size() == OC, "cropHigh.size() == ", cropHigh.size());
CPU_NODE_ASSERT(outputScale.size() == 1 || outputScale.size() == OC, "outputScale.size() == ", outputScale.size());
CPU_NODE_ASSERT(outputShift.size() == 1 || outputShift.size() == OC, "outputShift.size() == ", outputShift.size());

// WA: a per-Tensor input shift may little drift away randomly
// from it's orginal value when FQ was fused with any
Expand Down
4 changes: 2 additions & 2 deletions src/plugins/intel_cpu/src/nodes/fullyconnected.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -228,7 +228,7 @@ void FullyConnected::needPrepareParamsForTensorParallel() {
if (dim < 0) {
dim += dims.size();
}
OPENVINO_ASSERT(static_cast<int>(dims[dim]) >= tp_cfg.w_size,
CPU_NODE_ASSERT(static_cast<int>(dims[dim]) >= tp_cfg.w_size,
getName() + " dim[" + std::to_string(dim) + "] is " + std::to_string(dims[dim]) +
", which is larger than w_size " + std::to_string(tp_cfg.w_size));
auto splited_dim_vec = split_parts(dims[dim], tp_cfg.w_size);
Expand All @@ -252,7 +252,7 @@ void FullyConnected::prepareParams() {
void FullyConnected::initTensorParallelSync() {
if (tp_cfg.enable_tensor_parallel) {
tp_cfg.id = tp_cfg.sub_memory->get_memory_id(tp_cfg.w_rank);
OPENVINO_ASSERT(tp_cfg.id >= 0, "Tensor Parallel Config ID cannot be negative.");
CPU_NODE_ASSERT(tp_cfg.id >= 0, "Tensor Parallel Config ID cannot be negative.");
tp_cfg.sub_memory->set_memory_used(tp_cfg.id, tp_cfg.w_rank);
while (true) {
std::lock_guard<std::mutex> lock(tp_cfg.sub_memory->_flagMutex);
Expand Down
12 changes: 3 additions & 9 deletions src/plugins/intel_cpu/src/nodes/gather.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -972,20 +972,14 @@ void Gather::resolveInPlaceEdges(Edge::LOOK look) {
auto& config = selected_pd->getConfig();
size_t inplaceInpIndx = selected_pd->getConfig().outConfs[outputPort].inPlace();
const auto baseDim = inputShapes.front().getDims()[axis];
OPENVINO_ASSERT(baseDim != Shape::UNDEFINED_DIM,
"Gather node: ",
getName(),
" can not use inPlace memory with splitting on dynamic dimention");
CPU_NODE_ASSERT(baseDim != Shape::UNDEFINED_DIM,
maxnick marked this conversation as resolved.
Show resolved Hide resolved
"can not use inPlace memory with splitting on dynamic dimention");
auto baseMemBlock = getParentEdgeAt(inplaceInpIndx)->getMemory().getMemoryBlock();
const auto index = constIndices.front();
const ptrdiff_t offset = index < 0 ? baseDim + index : index;
const auto& childEdges = getChildEdgesAtPort(outputPort);
for (auto& childEdge : childEdges) {
OPENVINO_ASSERT(childEdge->getStatus() == Edge::Status::NotAllocated,
" Unexpected edge status in node: ",
getName(),
" with type ",
getTypeStr());
CPU_NODE_ASSERT(childEdge->getStatus() == Edge::Status::NotAllocated, "Unexpected edge status");

auto memBlock = std::make_shared<PartitionedMemoryBlock>(baseMemBlock, baseDim, offset);
auto newMem = std::make_shared<Memory>(getEngine(), config.outConfs[outputPort].getMemDesc(), memBlock);
Expand Down
2 changes: 1 addition & 1 deletion src/plugins/intel_cpu/src/nodes/if.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -74,7 +74,7 @@ bool If::isSupportedOperation(const std::shared_ptr<const ov::Node>& op, std::st
If::If(const std::shared_ptr<ov::Node>& op, const GraphContext::CPtr& context)
: Node(op, context, InternalDynShapeInferFactory()),
m_op(ov::as_type_ptr<ov::op::v8::If>(op)) {
OPENVINO_ASSERT(m_op, "'If' operation is expected");
CPU_NODE_ASSERT(m_op, "'If' operation is expected");

std::string errorMessage;
if (!isSupportedOperation(op, errorMessage)) {
Expand Down
4 changes: 2 additions & 2 deletions src/plugins/intel_cpu/src/nodes/lora.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,7 @@ LoRA::LoRA(const std::shared_ptr<ov::Node>& op, const GraphContext::CPtr& contex
OPENVINO_THROW_NOT_IMPLEMENTED(errorMessage);
}
const auto& loraModel = ov::as_type_ptr<ov::op::internal::LoraSubgraph>(op);
OPENVINO_ASSERT(loraModel,
CPU_NODE_ASSERT(loraModel,
"Attempt to create LoRA node from an invalid op type: ",
op,
" with name ",
Expand Down Expand Up @@ -96,7 +96,7 @@ int LoRA::registerToAllocationContext(int offset, AllocationContext& context) {
auto parentEdge = getParentEdgeAt(i);
auto inputEdges = m_graph.getInputNodeByIndex(i)->getChildEdgesAtPort(0);
for (const auto& inputEdge : inputEdges) {
OPENVINO_ASSERT(inputEdge->getStatus() == Edge::Status::Uninitialized,
CPU_NODE_ASSERT(inputEdge->getStatus() == Edge::Status::Uninitialized,
"Expected Uninitialized Edge instead of: ",
static_cast<int>(inputEdge->getStatus()));
inputEdge->sharedMemFrom(parentEdge);
Expand Down
4 changes: 2 additions & 2 deletions src/plugins/intel_cpu/src/nodes/matmul.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -376,7 +376,7 @@ std::pair<Shape, Shape> MatMul::makeDummyInputShapes(const Shape& in0, const Sha
OPENVINO_THROW("Can't create dummy inputs with rank less 2");
}

OPENVINO_ASSERT((in0.getRank() == in1.getRank()) && (in1.getRank() == out.getRank()),
CPU_NODE_ASSERT((in0.getRank() == in1.getRank()) && (in1.getRank() == out.getRank()),
"Can't create dummy inputs if argument shapes ranks are not equal");

auto swapTranspDims = [&](VectorDims& in0, VectorDims& in1) {
Expand Down Expand Up @@ -587,7 +587,7 @@ void MatMul::prepareParams() {
if (src0MemPtr->getDesc().getShape().hasZeroDims() && src0MemPtr->getDesc().getShape().hasZeroDims() &&
!dstMemPtr->getDesc().getShape().hasZeroDims()) {
// todo: obviously we need a special executor that would process fused ops providing a correct result
OPENVINO_ASSERT(!withBiases && fusedWith.empty(),
CPU_NODE_ASSERT(!withBiases && fusedWith.empty(),
"Matmul doesn't support a degenerate case when other ops are fused");
// reset executor
execPtr.reset();
Expand Down
Loading
Loading