Skip to content

Commit

Permalink
test(kernel): 测试 MatMulInteger cpu kernel
Browse files Browse the repository at this point in the history
Signed-off-by: YdrMaster <ydrml@hotmail.com>
  • Loading branch information
YdrMaster committed Dec 18, 2023
1 parent 6462a27 commit 916fd3d
Show file tree
Hide file tree
Showing 7 changed files with 110 additions and 100 deletions.
2 changes: 1 addition & 1 deletion src/04kernel/src/collectors/mat_mul_integer.cc
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@ namespace refactor::kernel {
std::vector<KernelBox> ans;
switch (_target) {
case decltype(_target)::Cpu:
if (auto ptr = MatMulIntegerCPU::build(info); ptr) {
if (auto ptr = MatMulIntegerCpu::build(info); ptr) {
ans.emplace_back(std::move(ptr));
}
break;
Expand Down
4 changes: 2 additions & 2 deletions src/04kernel/src/kernels/mat_mul_integer/cpu_kernel.cc
Original file line number Diff line number Diff line change
Expand Up @@ -2,10 +2,10 @@
#include "../mat_mul_common/cpu_template.hpp"

namespace refactor::kernel {
using K = MatMulIntegerCPU;
using K = MatMulIntegerCpu;
using DT = DataType;

K::MatMulIntegerCPU(decltype(info) info_) noexcept
K::MatMulIntegerCpu(decltype(info) info_) noexcept
: Kernel(), info(std::move(info_)) {}

auto K::build(decltype(info) info) noexcept -> KernelBox {
Expand Down
4 changes: 2 additions & 2 deletions src/04kernel/src/kernels/mat_mul_integer/cpu_kernel.hh
Original file line number Diff line number Diff line change
Expand Up @@ -6,10 +6,10 @@

namespace refactor::kernel {

struct MatMulIntegerCPU final : public Kernel {
struct MatMulIntegerCpu final : public Kernel {
MatMulIntegerInfo info;

explicit MatMulIntegerCPU(decltype(info)) noexcept;
explicit MatMulIntegerCpu(decltype(info)) noexcept;

static KernelBox build(decltype(info)) noexcept;
static size_t typeId() noexcept;
Expand Down
74 changes: 74 additions & 0 deletions src/04kernel/test/kernels/mat_mul/test_cpu.cpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,74 @@
#include "../src/kernels/mat_mul/cpu_kernel.hh"
#include <gtest/gtest.h>

using namespace refactor;
using namespace kernel;

template<class T>
static void check(
Resources &&res,
Routine &&routine,
std::vector<T> ans,
std::vector<T> const &a,
std::vector<T> const &b,
std::vector<T> const &c) {
std::vector<T> result(ans.size());
// inference
void const *inputs[]{a.data(), b.data(), c.data()};
void *outputs[]{result.data()};
routine(res, nullptr, inputs, outputs);
// check
EXPECT_EQ(result, ans);
}

TEST(kernel, MatMulCPU_WithBias) {
// build routine
auto A = Tensor::share(DataType::F32, Shape{1, 2, 2});
auto B = Tensor::share(DataType::F32, Shape{2, 2});
auto C = Tensor::share(DataType::F32, Shape{});
auto Y = Tensor::share(DataType::F32, Shape{2, 2});
auto kernel = MatMulCPU::build(MatMulInfo(*A, *B, *C, false, false, 1, 1));
ASSERT_TRUE(kernel);
auto res = runtime::Resources();
check<float>(std::move(res), kernel->lower(res).routine,
{2, 4, 1, 1.25},
{1.0, 2.0, 0.0, 0.5},
{1.0, 2.0, 0.0, 0.5},
{1.0});
}

TEST(kernel, MatMulCPU_UINT16NoBias) {
// build routine
auto A = Tensor::share(DataType::U16, Shape{2, 2});
auto B = Tensor::share(DataType::U16, Shape{2, 2});
auto Y = Tensor::share(DataType::U16, Shape{2, 2});
auto kernel = MatMulCPU::build(MatMulInfo(*A, *B, std::nullopt, false, false, 1, 1));
ASSERT_TRUE(kernel);
auto res = runtime::Resources();
check<uint16_t>(std::move(res), kernel->lower(res).routine,
{7, 6, 2, 3},
{3, 2, 0, 1},
{1, 0, 2, 3},
{});
}

TEST(kernel, MatMulCPU_Broadcast) {
// build routine
auto A = Tensor::share(DataType::F32, Shape{2, 1, 2, 2});
auto B = Tensor::share(DataType::F32, Shape{1, 2, 2, 2});
auto C = Tensor::share(DataType::F32, Shape{2, 1});
auto Y = Tensor::share(DataType::F32, Shape{2, 2, 2, 2});
auto kernel = MatMulCPU::build(MatMulInfo(*A, *B, *C, false, false, 1, 1));
ASSERT_TRUE(kernel);
auto res = runtime::Resources();
check<float>(std::move(res), kernel->lower(res).routine,
{2.0, 4.0, 0.0, 0.25,
2.0, 3.0, 0.0, 0.5,
2.0, 3.0, 0.0, 0.5,
2.0, 1.0, 0.0, 1.0},
{1.0, 2.0, 0.0, 0.5,
1.0, 0.0, 0.0, 1.0},
{1.0, 2.0, 0.0, 0.5,
1.0, 0.0, 0.0, 1.0},
{1.0, 0.0});
}
31 changes: 31 additions & 0 deletions src/04kernel/test/kernels/mat_mul_integer/test_cpu_kernel.cpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,31 @@
#include "../src/kernels/mat_mul_integer/cpu_kernel.hh"
#include <gtest/gtest.h>

using namespace refactor;
using namespace kernel;

TEST(kernel, MatMulIntegerCpu) {
// build routine
auto A = Tensor::share(DataType::U8, Shape{2, 3});
auto B = Tensor::share(DataType::U8, Shape{3, 1});
auto Y = Tensor::share(DataType::I32, Shape{2, 1});
auto kernel = MatMulIntegerCpu::build(MatMulIntegerInfo(TensorRefs{*A, *B}));
ASSERT_TRUE(kernel);
auto res = runtime::Resources();
auto routine = kernel->lower(res).routine;
// put input data
std::vector<uint8_t>
dataA{1, 2, 3, 4, 5, 6},
dataB{1, 2, 3};
std::vector<int32_t>
result(Y->elementsSize()),
ans{14, 32};
// inference
{
void const *inputs[]{dataA.data(), dataB.data()};
void *outputs[]{result.data()};
routine(res, nullptr, inputs, outputs);
}
// check
EXPECT_EQ(result, ans);
}
95 changes: 0 additions & 95 deletions src/04kernel/test/kernels/matmul/test_matmul_cpu.cpp

This file was deleted.

0 comments on commit 916fd3d

Please sign in to comment.