Skip to content

Commit

Permalink
[CPU] Apply 'modernize-*' clang-tidy remarks
Browse files Browse the repository at this point in the history
  • Loading branch information
aobolensk committed Feb 4, 2025
1 parent 3571d18 commit bb109ca
Show file tree
Hide file tree
Showing 626 changed files with 2,876 additions and 4,652 deletions.
6 changes: 2 additions & 4 deletions src/plugins/intel_cpu/src/cache/multi_cache.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -4,10 +4,8 @@

#include "multi_cache.h"

namespace ov {
namespace intel_cpu {
namespace ov::intel_cpu {

std::atomic_size_t MultiCache::_typeIdCounter{0};

} // namespace intel_cpu
} // namespace ov
} // namespace ov::intel_cpu
6 changes: 2 additions & 4 deletions src/plugins/intel_cpu/src/compiled_model.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -32,8 +32,7 @@

using namespace ov::threading;

namespace ov {
namespace intel_cpu {
namespace ov::intel_cpu {

struct ImmediateSerialExecutor : public ov::threading::ITaskExecutor {
void run(ov::threading::Task task) override {
Expand Down Expand Up @@ -359,5 +358,4 @@ void CompiledModel::release_memory() {
}
}

} // namespace intel_cpu
} // namespace ov
} // namespace ov::intel_cpu
6 changes: 2 additions & 4 deletions src/plugins/intel_cpu/src/config.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -18,8 +18,7 @@
#include "utils/debug_capabilities.h"
#include "utils/precision_support.h"

namespace ov {
namespace intel_cpu {
namespace ov::intel_cpu {

using namespace ov::threading;
using namespace dnnl::impl::cpu::x64;
Expand Down Expand Up @@ -509,5 +508,4 @@ void Config::applyRtInfo(const std::shared_ptr<const ov::Model>& model) {
}
}

} // namespace intel_cpu
} // namespace ov
} // namespace ov::intel_cpu
6 changes: 2 additions & 4 deletions src/plugins/intel_cpu/src/cpu_map_scheduling.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -9,8 +9,7 @@
#include "openvino/runtime/system_conf.hpp"
#include "openvino/runtime/threading/cpu_streams_info.hpp"

namespace ov {
namespace intel_cpu {
namespace ov::intel_cpu {

std::vector<std::vector<int>> apply_scheduling_core_type(ov::hint::SchedulingCoreType& input_type,
const std::vector<std::vector<int>>& proc_type_table) {
Expand Down Expand Up @@ -106,5 +105,4 @@ bool get_cpu_pinning(bool& input_value,
return result_value;
}

} // namespace intel_cpu
} // namespace ov
} // namespace ov::intel_cpu
6 changes: 2 additions & 4 deletions src/plugins/intel_cpu/src/cpu_map_scheduling.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -15,8 +15,7 @@
#include "openvino/runtime/properties.hpp"
#include "openvino/runtime/threading/istreams_executor.hpp"

namespace ov {
namespace intel_cpu {
namespace ov::intel_cpu {

/**
* @brief Limit available CPU resource in processors type table according to scheduling core type property
Expand Down Expand Up @@ -55,5 +54,4 @@ bool get_cpu_pinning(bool& input_value,
const std::vector<std::vector<int>>& proc_type_table,
const std::vector<std::vector<int>>& streams_info_table);

} // namespace intel_cpu
} // namespace ov
} // namespace ov::intel_cpu
8 changes: 3 additions & 5 deletions src/plugins/intel_cpu/src/cpu_memory.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -18,8 +18,7 @@
# include <utility>
#endif

namespace ov {
namespace intel_cpu {
namespace ov::intel_cpu {
template <>
DnnlMemoryDescPtr IMemory::getDescWithType<DnnlMemoryDesc, 0, 0>() const {
return MemoryDescUtils::convertToDnnlMemoryDesc(getDescPtr());
Expand All @@ -32,7 +31,7 @@ BlockedMemoryDescPtr IMemory::getDescWithType<BlockedMemoryDesc, 0, 0>() const {

namespace {
inline void setSubnormalsToZero(float* data, size_t size) {
uint32_t* u32data = reinterpret_cast<uint32_t*>(data);
auto* u32data = reinterpret_cast<uint32_t*>(data);
for (size_t i = 0; i < size; ++i) {
if ((u32data[i] & (0xFF << 23)) == 0) {
u32data[i] = 0;
Expand Down Expand Up @@ -729,5 +728,4 @@ MemoryPtr split_vertical(const dnnl::engine& eng,
return ptr;
}

} // namespace intel_cpu
} // namespace ov
} // namespace ov::intel_cpu
6 changes: 2 additions & 4 deletions src/plugins/intel_cpu/src/cpu_shape.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -6,8 +6,7 @@

#include "utils/general_utils.h"

namespace ov {
namespace intel_cpu {
namespace ov::intel_cpu {

bool Shape::isCompatible(const VectorDims& vecDims) const {
if (getRank() != vecDims.size()) {
Expand Down Expand Up @@ -77,5 +76,4 @@ Shape mergeShapes(const Shape& lhs, const Shape& rhs) {
return Shape{resultMinDims, resultMaxDims};
}

} // namespace intel_cpu
} // namespace ov
} // namespace ov::intel_cpu
18 changes: 8 additions & 10 deletions src/plugins/intel_cpu/src/cpu_streams_calculation.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -24,11 +24,10 @@
using namespace ov;
using namespace ov::threading;

#define INIT_VAL -100
#define TP_CPU_LIMIT 32
constexpr int INIT_VAL = -100;
constexpr int TP_CPU_LIMIT = 32;

namespace ov {
namespace intel_cpu {
namespace ov::intel_cpu {

void sort_table_by_numa_node_id(const int current_numa_node, std::vector<std::vector<int>>& proc_type_table) {
if (proc_type_table.size() > 1) {
Expand Down Expand Up @@ -167,7 +166,7 @@ std::vector<std::vector<int>> get_streams_info_table(

auto check_threads_per_stream = [&]() {
int count = 0;
while (1) {
while (true) {
for (int n_type = MAIN_CORE_PROC; n_type <= HYPER_THREADING_PROC; n_type++) {
count += static_cast<int>(proc_type_table[0][n_type] / n_threads_per_stream);
}
Expand Down Expand Up @@ -213,9 +212,9 @@ std::vector<std::vector<int>> get_streams_info_table(
((input_streams_changed == true) && (input_streams == 1))) {
n_streams = 1;
stream_info[NUMBER_OF_STREAMS] = n_streams;
for (size_t n = 0; n < proc_socket_table.size(); n++) {
if (proc_socket_table[n][ALL_PROC] > 0) {
current_socket_id = proc_socket_table[n][PROC_SOCKET_ID];
for (auto& n : proc_socket_table) {
if (n[ALL_PROC] > 0) {
current_socket_id = n[PROC_SOCKET_ID];
break;
}
}
Expand Down Expand Up @@ -761,5 +760,4 @@ void get_num_streams(const int streams, const std::shared_ptr<ov::Model>& model,
}
}

} // namespace intel_cpu
} // namespace ov
} // namespace ov::intel_cpu
6 changes: 2 additions & 4 deletions src/plugins/intel_cpu/src/cpu_streams_calculation.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -16,8 +16,7 @@
#include "graph.h"
#include "openvino/runtime/properties.hpp"

namespace ov {
namespace intel_cpu {
namespace ov::intel_cpu {
/**
* @brief Generate streams information table according to processors type table.
* @param[in] input_streams is the targeted number of streams set by user via ov::num_streams or the default value.
Expand Down Expand Up @@ -114,5 +113,4 @@ void get_num_streams(const int streams, const std::shared_ptr<ov::Model>& model,
*/
void sort_table_by_numa_node_id(const int current_numa_node, std::vector<std::vector<int>>& proc_type_table);

} // namespace intel_cpu
} // namespace ov
} // namespace ov::intel_cpu
6 changes: 2 additions & 4 deletions src/plugins/intel_cpu/src/cpu_tensor.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -10,8 +10,7 @@
#include "utils/debug_capabilities.h"
#include "utils/general_utils.h"

namespace ov {
namespace intel_cpu {
namespace ov::intel_cpu {

Tensor::Tensor(MemoryPtr memptr) : m_memptr{std::move(memptr)} {
OPENVINO_ASSERT(m_memptr != nullptr);
Expand Down Expand Up @@ -106,5 +105,4 @@ std::shared_ptr<ITensor> make_tensor(MemoryPtr mem) {
return std::make_shared<Tensor>(std::move(mem));
}

} // namespace intel_cpu
} // namespace ov
} // namespace ov::intel_cpu
6 changes: 2 additions & 4 deletions src/plugins/intel_cpu/src/cpu_types.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -8,8 +8,7 @@

#include "cpu_shape.h"

namespace ov {
namespace intel_cpu {
namespace ov::intel_cpu {

std::string dim2str(Dim dim) {
return dim == Shape::UNDEFINED_DIM ? "?" : std::to_string(dim);
Expand Down Expand Up @@ -530,5 +529,4 @@ std::string algToString(const Algorithm alg) {
return "Undefined";
}

} // namespace intel_cpu
} // namespace ov
} // namespace ov::intel_cpu
6 changes: 2 additions & 4 deletions src/plugins/intel_cpu/src/dnnl_extension_utils.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -16,8 +16,7 @@

using namespace dnnl;

namespace ov {
namespace intel_cpu {
namespace ov::intel_cpu {

uint8_t DnnlExtensionUtils::sizeOfDataType(dnnl::memory::data_type dataType) {
switch (dataType) {
Expand Down Expand Up @@ -295,5 +294,4 @@ std::string DnnlExtensionUtils::computeWeightsStringHash(const std::shared_ptr<c
return std::to_string(desc_hash) + "_" + std::to_string(reinterpret_cast<uint64_t>(memory->getData()));
}

} // namespace intel_cpu
} // namespace ov
} // namespace ov::intel_cpu
10 changes: 4 additions & 6 deletions src/plugins/intel_cpu/src/dnnl_postops_composer.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -19,8 +19,7 @@
#include "utils/cpu_utils.hpp"
#include "utils/debug_capabilities.h"

namespace ov {
namespace intel_cpu {
namespace ov::intel_cpu {

DnnlPostOpsComposer::DnnlPostOpsComposer(const PostOps& postOps,
const dnnl::engine& engine,
Expand Down Expand Up @@ -528,8 +527,8 @@ bool DnnlPostOpsComposer::appendScale(const std::vector<float>& scale, bool isLa
wei_scale_values[j] *= scale[j];
}
} else {
for (size_t j = 0; j < wei_scale_values.size(); j++) {
wei_scale_values[j] *= scale[0];
for (float& wei_scale_value : wei_scale_values) {
wei_scale_value *= scale[0];
}
}

Expand Down Expand Up @@ -802,5 +801,4 @@ DnnlPrimitiveAttrs DnnlPostOpsComposer::compose() {
return {attr, dnnlArgs, cpuArgs};
}

} // namespace intel_cpu
} // namespace ov
} // namespace ov::intel_cpu
10 changes: 4 additions & 6 deletions src/plugins/intel_cpu/src/dnnl_postops_composer_legacy.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -10,8 +10,7 @@

#include "utils/debug_capabilities.h"

namespace ov {
namespace intel_cpu {
namespace ov::intel_cpu {

DnnlPostOpsComposerLegacy::DnnlPostOpsComposerLegacy(const dnnl::engine& engine,
dnnl::primitive_attr& attr,
Expand Down Expand Up @@ -170,8 +169,8 @@ bool DnnlPostOpsComposerLegacy::appendScale(const std::vector<float>& scale, boo
wei_scale_values[j] *= scale[j];
}
} else {
for (size_t j = 0; j < wei_scale_values.size(); j++) {
wei_scale_values[j] *= scale[0];
for (float& wei_scale_value : wei_scale_values) {
wei_scale_value *= scale[0];
}
}

Expand Down Expand Up @@ -268,5 +267,4 @@ void DnnlPostOpsComposerLegacy::appendClip(const std::vector<float>& low, const
}
}

} // namespace intel_cpu
} // namespace ov
} // namespace ov::intel_cpu
7 changes: 3 additions & 4 deletions src/plugins/intel_cpu/src/edge.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -10,8 +10,8 @@
#include "openvino/util/pp.hpp"

using namespace dnnl;
namespace ov {
namespace intel_cpu {

namespace ov::intel_cpu {

Edge::Edge(const NodePtr& parent, const NodePtr& child, int pr_port, int ch_port)
: parent(parent),
Expand Down Expand Up @@ -663,5 +663,4 @@ std::ostream& operator<<(std::ostream& os, const Edge& edge) {
<< ":" << Edge::statusToString(edge.getStatus());
}

} // namespace intel_cpu
} // namespace ov
} // namespace ov::intel_cpu
Original file line number Diff line number Diff line change
Expand Up @@ -9,9 +9,7 @@
using namespace dnnl::impl::cpu::aarch64;
using namespace Xbyak_aarch64;

namespace ov {
namespace intel_cpu {
namespace aarch64 {
namespace ov::intel_cpu::aarch64 {

// In aarch64, conversion between f16 and i16/u16 can be done with single instruction. The supported
// conversion precicions are f32, i32, f16, i8 (byte), u8 (byte). If we introduce an intermediate
Expand Down Expand Up @@ -274,6 +272,4 @@ void jit_convert_saturation_emitter::emit_isa(const std::vector<size_t>& in_idxs
jit_convert_process<TReg>(src, dst, input_type, output_type, true);
}

} // namespace aarch64
} // namespace intel_cpu
} // namespace ov
} // namespace ov::intel_cpu::aarch64
Original file line number Diff line number Diff line change
Expand Up @@ -6,9 +6,7 @@

#include "jit_emitter.hpp"

namespace ov {
namespace intel_cpu {
namespace aarch64 {
namespace ov::intel_cpu::aarch64 {

class jit_convert_emitter : public jit_emitter {
public:
Expand Down Expand Up @@ -89,6 +87,4 @@ class jit_convert_saturation_emitter : public jit_convert_emitter {
void emit_isa(const std::vector<size_t>& in_idxs, const std::vector<size_t>& out_idxs) const;
};

} // namespace aarch64
} // namespace intel_cpu
} // namespace ov
} // namespace ov::intel_cpu::aarch64
Original file line number Diff line number Diff line change
Expand Up @@ -11,9 +11,7 @@
#include "openvino/core/type/element_type.hpp"
#include "transformations/cpu_opset/common/op/swish_cpu.hpp"

namespace ov {
namespace intel_cpu {
namespace aarch64 {
namespace ov::intel_cpu::aarch64 {

using namespace dnnl::impl::utils;
using namespace dnnl::impl::cpu;
Expand Down Expand Up @@ -2905,6 +2903,4 @@ std::set<std::vector<element::Type>> jit_tanh_emitter::get_supported_precisions(
return {{element::f32}};
}

} // namespace aarch64
} // namespace intel_cpu
} // namespace ov
} // namespace ov::intel_cpu::aarch64
Original file line number Diff line number Diff line change
Expand Up @@ -6,9 +6,7 @@

#include "jit_emitter.hpp"

namespace ov {
namespace intel_cpu {
namespace aarch64 {
namespace ov::intel_cpu::aarch64 {

class jit_abs_emitter : public jit_emitter {
public:
Expand Down Expand Up @@ -1194,6 +1192,4 @@ class jit_tanh_emitter : public jit_emitter {
void emit_isa(const std::vector<size_t>& in_vec_idxs, const std::vector<size_t>& out_vec_idxs) const;
};

} // namespace aarch64
} // namespace intel_cpu
} // namespace ov
} // namespace ov::intel_cpu::aarch64
Loading

0 comments on commit bb109ca

Please sign in to comment.