From 91dd3d61362b0fa3e1500b53faef5bbe244de53c Mon Sep 17 00:00:00 2001 From: Stefan Djordjevic <157365107+sdjordjevicTT@users.noreply.github.com> Date: Mon, 24 Feb 2025 16:59:28 +0100 Subject: [PATCH] Rewriting elementwise binary ops in TTNN dialect to be non-dps (#2233) ### Ticket Closes #2231 ### Problem description We have decided not to use DPS in the TTNN dialect until we can model it properly. ### What's changed This PR rewrites eltwise ops in the TTNN dialect in non-DPS style. Note: Most of the changes are rewrites of existing tests; hence, don't fear the number of changed files. :) ### Checklist - [x] New/Existing tests provide coverage for changes --- include/ttmlir/Dialect/TTNN/IR/TTNNOps.td | 42 ++++++---- include/ttmlir/Dialect/TTNN/IR/TTNNTraits.h | 18 ++++ include/ttmlir/Dialect/TTNN/IR/TTNNTraits.td | 22 +++++ lib/Conversion/TTIRToTTNN/TTIRToTTNN.cpp | 18 ++-- lib/Conversion/TTNNToEmitC/TTNNToEmitC.cpp | 5 +- lib/Dialect/TTNN/Analysis/ShardSolver.cpp | 10 ++- lib/Dialect/TTNN/IR/TTNNOpModelInterface.cpp | 6 +- lib/Dialect/TTNN/IR/TTNNWorkarounds.cpp | 1 - lib/Dialect/TTNN/Transforms/Optimizer.cpp | 27 ++++-- .../Decomposition/RepeatOpRewritePattern.cpp | 41 +--------- lib/Target/TTNN/TTNNToFlatbuffer.cpp | 12 +-- .../Workarounds/where_workaround.mlir | 4 +- .../Transforms/ttnn_create_input_gens_0.mlir | 9 +- .../ttnn_modify_signatures_for_dylib_0.mlir | 4 +- .../data_movement/repeat/implicit_repeat.mlir | 6 +- .../repeat/repeat_workaround.mlir | 3 +- .../bitwise_and/simple_bitwise_and.mlir | 7 +- .../binary/bitwise_or/simple_bitwise_or.mlir | 7 +- .../bitwise_xor/simple_bitwise_xor.mlir | 7 +- .../binary/compare}/simple_compare.mlir | 66 ++++++--------- .../{ => eltwise/binary/div}/simple_div.mlir | 6 +- .../binary/logical_and/simple_and.mlir | 9 +- .../eltwise/binary/logical_or/simple_or.mlir | 9 +- .../binary/logical_xor/simple_xor.mlir | 10 +-- .../binary/maximum}/simple_maximum.mlir | 6 +- .../binary/minimum/simple_minimum.mlir | 6 +- .../binary/multiply}/simple_multiply.mlir | 6 +- .../eltwise/binary/power/simple_power.mlir | 7 +- .../binary/remainder/simple_remainder.mlir | 7 +- .../binary/subtract}/simple_subtract.mlir | 6 +- .../TTNN/eltwise/operand_broadcasts.mlir | 6 +- .../ternary/where}/simple_where.mlir | 12 ++- .../TTNN/eltwise/unary/abs/simple_abs.mlir | 5 +- .../unary/bitwise_not/simple_bitwise_not.mlir | 6 +- .../TTNN/eltwise/unary/cbrt/simple_cbrt.mlir | 5 +- .../TTNN/eltwise/unary/ceil/simple_ceil.mlir | 5 +- .../TTNN/eltwise/unary/cos/simple_cos.mlir | 5 +- .../eltwise/unary/expm1/simple_expm1.mlir | 6 +- .../eltwise/unary/floor/simple_floor.mlir | 9 +- .../TTNN/eltwise/unary/gelu/simple_gelu.mlir | 9 +- .../unary/isfinite/simple_isfinite.mlir | 9 +- .../unary/leaky_relu/simple_leaky_relu.mlir | 9 +- .../eltwise/unary/log1p/simple_log1p.mlir | 6 +- .../eltwise/unary/logical_not/simple_not.mlir | 7 +- .../TTNN/eltwise/unary/negate/simple_neg.mlir | 5 +- .../unary/reciprocal/simple_reciprocal.mlir | 5 +- .../TTNN/eltwise/unary/relu/simple_relu.mlir | 5 +- .../eltwise/unary/rsqrt/simple_rsqrt.mlir | 5 +- .../eltwise/unary/sigmoid/simple_sigmoid.mlir | 5 +- .../TTNN/eltwise/unary/sign/simple_sign.mlir | 6 +- .../TTNN/eltwise/unary/sin/simple_sin.mlir | 5 +- .../TTNN/eltwise/unary/sqrt/simple_sqrt.mlir | 5 +- .../TTNN/eltwise/unary/tan/simple_tan.mlir | 6 +- .../TTNN/eltwise/unary/tanh/simple_tanh.mlir | 6 +- .../Dialect/TTNN/implicit_broadcast.mlir | 6 +- test/ttmlir/Dialect/TTNN/multiple_func.mlir | 6 +- .../test_override_reshard_edges.mlir | 15 ++-- .../TTNN/optimizer/ttir_to_ttnn_pipeline.mlir | 3 +- test/ttmlir/Dialect/TTNN/simple_scatter.mlir | 5 +- .../TTNN/test_remove_dead_values_pass.mlir | 82 ++++++++----------- .../ttir_to_ttnn_pipeline_custom_opt.mlir | 3 +- .../ttir_to_ttnn_pipeline_hoist_call.mlir | 6 +- .../ttmlir/EmitC/TTNN/eltwise_unary/cbrt.mlir | 3 + .../Silicon/StableHLO/n150/Binary/add_op.mlir | 2 - .../StableHLO/n150/Binary/compare_op.mlir | 12 --- .../StableHLO/n150/Binary/divide_op.mlir | 2 - .../StableHLO/n150/Binary/logical_op.mlir | 6 -- .../StableHLO/n150/Binary/maximum_op.mlir | 2 - .../StableHLO/n150/Binary/minimum_op.mlir | 2 - .../StableHLO/n150/Binary/multiply_op.mlir | 2 - .../StableHLO/n150/Binary/power_op.mlir | 2 - .../StableHLO/n150/Binary/remainder_op.mlir | 2 - .../StableHLO/n150/Binary/subtract_op.mlir | 2 - .../StableHLO/n150/Unary/absolute_op.mlir | 2 - .../Silicon/StableHLO/n150/Unary/cbrt_op.mlir | 2 - .../Silicon/StableHLO/n150/Unary/ceil_op.mlir | 2 - .../StableHLO/n150/Unary/clamp_op.mlir | 4 - .../StableHLO/n150/Unary/cosine_op.mlir | 2 - .../n150/Unary/exponential_minus_one_op.mlir | 2 - .../StableHLO/n150/Unary/exponential_op.mlir | 2 - .../StableHLO/n150/Unary/floor_op.mlir | 2 - .../StableHLO/n150/Unary/isfinite_op.mlir | 2 - .../StableHLO/n150/Unary/log_plus_one_op.mlir | 2 - .../StableHLO/n150/Unary/logical_op.mlir | 2 - .../StableHLO/n150/Unary/negate_op.mlir | 2 - .../StableHLO/n150/Unary/rsqrt_op.mlir | 2 - .../Silicon/StableHLO/n150/Unary/sign_op.mlir | 2 - .../Silicon/StableHLO/n150/Unary/sine_op.mlir | 2 - .../Silicon/StableHLO/n150/Unary/sqrt_op.mlir | 2 - .../Silicon/StableHLO/n150/composite_op.mlir | 1 - .../Silicon/StableHLO/n150/scalar_add_op.mlir | 2 - test/ttmlir/Silicon/TTNN/n150/deallocate.mlir | 9 +- .../TTNN/n150/eltwise/binary/add/add.mlir | 6 +- .../n150/eltwise/binary/add/add_int32.mlir | 6 +- .../binary/compare}/simple_compare.mlir | 66 ++++++--------- .../TTNN/n150/eltwise/binary/div/div.mlir | 6 +- .../TTNN/n150/eltwise/binary/ge/ge.mlir | 6 +- .../binary/logical}/simple_logical.mlir | 39 +++------ .../n150/eltwise/binary/maximum/maximum.mlir | 6 +- .../n150/eltwise/binary/minimum/minimum.mlir | 11 +-- .../eltwise/binary/multiply/multiply.mlir | 6 +- .../TTNN/n150/eltwise/binary/power/power.mlir | 8 +- .../eltwise/binary/remainder/remainder.mlir | 7 +- .../eltwise/binary/subtract/subtract.mlir | 6 +- .../n150/eltwise/ternary/where/where.mlir | 5 +- .../TTNN/n150/eltwise/unary/cbrt/cbrt.mlir | 5 +- .../TTNN/n150/eltwise/unary/ceil/ceil.mlir | 5 +- .../TTNN/n150/eltwise/unary/clamp/clamp.mlir | 6 +- .../n150/eltwise/unary/cosine/cosine.mlir | 5 +- .../TTNN/n150/eltwise/unary/expm1/expm1.mlir | 6 +- .../TTNN/n150/eltwise/unary/floor/floor.mlir | 9 +- .../TTNN/n150/eltwise/unary/gelu/gelu.mlir | 9 +- .../eltwise/unary/is_finite/is_finite.mlir | 9 +- .../eltwise/unary/leaky_relu/leaky_relu.mlir | 6 +- .../TTNN/n150/eltwise/unary/log/log.mlir | 5 +- .../TTNN/n150/eltwise/unary/log1p/log1p.mlir | 6 +- .../eltwise/unary/logical_not/simple_not.mlir | 14 ++++ .../n150/eltwise/unary/negate/negate.mlir | 4 +- .../eltwise/unary/recipricol/recipricol.mlir | 5 +- .../TTNN/n150/eltwise/unary/relu/relu.mlir | 5 +- .../TTNN/n150/eltwise/unary/rsqrt/rsqrt.mlir | 5 +- .../n150/eltwise/unary/sigmoid/sigmoid.mlir | 5 +- .../TTNN/n150/eltwise/unary/sign/sign.mlir | 6 +- .../TTNN/n150/eltwise/unary/sine/sine.mlir | 5 +- .../TTNN/n150/eltwise/unary/sqrt/sqrt.mlir | 5 +- .../TTNN/n150/eltwise/unary/tan/tan.mlir | 5 +- .../TTNN/n150/eltwise/unary/tanh/tanh.mlir | 5 +- .../Silicon/TTNN/n150/operand_broadcasts.mlir | 6 +- .../Silicon/TTNN/n150/perf/test_perf_and.mlir | 9 +- .../TTNN/n150/perf/test_perf_ceil.mlir | 5 +- .../TTNN/n150/perf/test_perf_cosine.mlir | 5 +- .../Silicon/TTNN/n150/perf/test_perf_div.mlir | 6 +- .../Silicon/TTNN/n150/perf/test_perf_eq.mlir | 11 +-- .../TTNN/n150/perf/test_perf_expm1.mlir | 6 +- .../TTNN/n150/perf/test_perf_floor.mlir | 10 +-- .../Silicon/TTNN/n150/perf/test_perf_ge.mlir | 7 +- .../TTNN/n150/perf/test_perf_gelu.mlir | 9 +- .../Silicon/TTNN/n150/perf/test_perf_gt.mlir | 11 +-- .../TTNN/n150/perf/test_perf_isfinite.mlir | 9 +- .../Silicon/TTNN/n150/perf/test_perf_log.mlir | 5 +- .../TTNN/n150/perf/test_perf_log1p.mlir | 7 +- .../Silicon/TTNN/n150/perf/test_perf_lt.mlir | 11 +-- .../TTNN/n150/perf/test_perf_maximum.mlir | 6 +- .../TTNN/n150/perf/test_perf_multiply.mlir | 6 +- .../Silicon/TTNN/n150/perf/test_perf_ne.mlir | 11 +-- .../Silicon/TTNN/n150/perf/test_perf_neg.mlir | 4 +- .../Silicon/TTNN/n150/perf/test_perf_not.mlir | 7 +- .../Silicon/TTNN/n150/perf/test_perf_or.mlir | 9 +- .../TTNN/n150/perf/test_perf_power.mlir | 7 +- .../TTNN/n150/perf/test_perf_reciprocal.mlir | 5 +- .../TTNN/n150/perf/test_perf_relu.mlir | 6 +- .../TTNN/n150/perf/test_perf_remainder.mlir | 7 +- .../TTNN/n150/perf/test_perf_rsqrt.mlir | 5 +- .../TTNN/n150/perf/test_perf_sigmoid.mlir | 6 +- .../TTNN/n150/perf/test_perf_sign.mlir | 7 +- .../TTNN/n150/perf/test_perf_sine.mlir | 6 +- .../TTNN/n150/perf/test_perf_sqrt.mlir | 5 +- .../TTNN/n150/perf/test_perf_subtract.mlir | 6 +- .../Silicon/TTNN/n150/perf/test_perf_tan.mlir | 5 +- .../TTNN/n150/perf/test_perf_tanh.mlir | 5 +- .../TTNN/n150/perf/test_perf_where.mlir | 5 +- .../Silicon/TTNN/n150/perf/test_perf_xor.mlir | 10 +-- .../eltwise_binary_op_chain.mlir | 9 +- .../n150/sharded/simple_eltwise_sharded.mlir | 29 +++---- .../Silicon/TTNN/n150/simple_power.mlir | 11 --- .../TestGreedyL1InterleavedPolicy.cpp | 12 +-- test/unittests/Optimizer/TestShardSolver.cpp | 18 ++-- 167 files changed, 640 insertions(+), 745 deletions(-) rename test/ttmlir/Dialect/TTNN/{ => eltwise/binary/compare}/simple_compare.mlir (60%) rename test/ttmlir/Dialect/TTNN/{ => eltwise/binary/div}/simple_div.mlir (76%) rename test/ttmlir/Dialect/TTNN/{ => eltwise/binary/maximum}/simple_maximum.mlir (75%) rename test/ttmlir/Dialect/TTNN/{ => eltwise/binary/multiply}/simple_multiply.mlir (75%) rename test/ttmlir/Dialect/TTNN/{ => eltwise/binary/subtract}/simple_subtract.mlir (75%) rename test/ttmlir/Dialect/TTNN/{ => eltwise/ternary/where}/simple_where.mlir (68%) rename test/ttmlir/Silicon/TTNN/n150/{ => eltwise/binary/compare}/simple_compare.mlir (60%) rename test/ttmlir/Silicon/TTNN/n150/{ => eltwise/binary/logical}/simple_logical.mlir (54%) create mode 100644 test/ttmlir/Silicon/TTNN/n150/eltwise/unary/logical_not/simple_not.mlir delete mode 100644 test/ttmlir/Silicon/TTNN/n150/simple_power.mlir diff --git a/include/ttmlir/Dialect/TTNN/IR/TTNNOps.td b/include/ttmlir/Dialect/TTNN/IR/TTNNOps.td index bc2d4f2628..c2f4a324d1 100644 --- a/include/ttmlir/Dialect/TTNN/IR/TTNNOps.td +++ b/include/ttmlir/Dialect/TTNN/IR/TTNNOps.td @@ -127,15 +127,14 @@ class TTNN_NamedDPSOp traits = []> : } class TTNN_ElementwiseOp traits = []> : - TTNN_NamedDPSOp { + TTNN_Op { - let arguments = (ins Variadic:$inputs, - Variadic:$outputs); + let arguments = (ins Variadic:$inputs); let results = (outs Variadic:$results); } class TTNN_ElementwiseUnaryOp traits = []> : - TTNN_ElementwiseOp { + TTNN_ElementwiseOp { let summary = "Eltwise unary op."; let description = [{ Eltwise unary op. @@ -143,15 +142,19 @@ class TTNN_ElementwiseUnaryOp traits = []> : let builders = [ - OpBuilder<(ins "Value": $in, "Value": $out), + OpBuilder<(ins "Value": $in, "Type": $outputType), + [{ + build($_builder, $_state, {outputType}, in); + }]>, + OpBuilder<(ins "Value": $in), [{ - build($_builder, $_state, {out.getType()}, in, out); + build($_builder, $_state, in, in.getType()); }]> ]; } class TTNN_ElementwiseBinaryOp traits = []> : - TTNN_ElementwiseOp { + TTNN_ElementwiseOp { let summary = "Eltwise binary op."; let description = [{ Eltwise binary op. @@ -159,15 +162,19 @@ class TTNN_ElementwiseBinaryOp traits = []> : let builders = [ - OpBuilder<(ins "Value": $lhs, "Value": $rhs, "Value": $out), + OpBuilder<(ins "Value": $lhs, "Value": $rhs, "Type": $outputType), + [{ + build($_builder, $_state, {outputType}, {lhs, rhs}); + }]>, + OpBuilder<(ins "Value": $lhs, "Value": $rhs), [{ - build($_builder, $_state, {out.getType()}, {lhs, rhs}, out); + build($_builder, $_state, lhs, rhs, lhs.getType()); }]> ]; } class TTNN_ElementwiseTernaryOp traits = []> : - TTNN_ElementwiseOp { + TTNN_ElementwiseOp { let summary = "Eltwise ternary op."; let description = [{ Eltwise ternary op. @@ -175,9 +182,13 @@ class TTNN_ElementwiseTernaryOp traits = []> : let builders = [ - OpBuilder<(ins "Value": $first, "Value": $second, "Value": $third, "Value": $out), + OpBuilder<(ins "Value": $first, "Value": $second, "Value": $third, "Type": $outputType), [{ - build($_builder, $_state, {out.getType()}, {first, second, third}, out); + build($_builder, $_state, {outputType}, {first, second, third}); + }]>, + OpBuilder<(ins "Value": $first, "Value": $second, "Value": $third), + [{ + build($_builder, $_state, first, second, third, first.getType()); }]> ]; } @@ -189,8 +200,6 @@ def TTNN_WhereOp : TTNN_ElementwiseTernaryOp<"where"> { }]; let extraClassDeclaration = [{ - MutableOperandRange getDpsInitsMutable() { return getOutputsMutable(); } - wa::TTNNOperandsWorkarounds getOperandsWorkarounds() { ::mlir::Operation::operand_range inputs = getInputs(); return @@ -391,14 +400,13 @@ class TTNN_ElementwiseUnaryWithFloatParameterOp tra }]; let arguments = (ins Variadic:$inputs, - Variadic:$outputs, F32Attr:$parameter); let builders = [ - OpBuilder<(ins "Value": $in, "Value": $out, "FloatAttr":$parameter), + OpBuilder<(ins "Value": $in, "FloatAttr":$parameter), [{ - build($_builder, $_state, {out.getType()}, {in}, {out}, parameter); + build($_builder, $_state, {in.getType()}, {in}, parameter); }]> ]; } diff --git a/include/ttmlir/Dialect/TTNN/IR/TTNNTraits.h b/include/ttmlir/Dialect/TTNN/IR/TTNNTraits.h index a05b0d3aaf..2ac6073146 100644 --- a/include/ttmlir/Dialect/TTNN/IR/TTNNTraits.h +++ b/include/ttmlir/Dialect/TTNN/IR/TTNNTraits.h @@ -70,6 +70,24 @@ class HasMemoryConfigTrait return mlir::success(); } }; + +// Trait to verify that operations have exactly N operands. +template +class NOperandTTNN { +public: + template + class Impl + : public mlir::OpTrait::TraitBase::Impl> { + static LogicalResult verifyTrait(Operation *op) { + if (op->getNumOperands() != N) { + return op->emitOpError() << "Operation " << op->getName() + << " must have exactly " << N << " operands."; + } + return success(); + } + }; +}; + } // namespace mlir::tt::ttnn #endif diff --git a/include/ttmlir/Dialect/TTNN/IR/TTNNTraits.td b/include/ttmlir/Dialect/TTNN/IR/TTNNTraits.td index bf1a2dce64..a68c680bbf 100644 --- a/include/ttmlir/Dialect/TTNN/IR/TTNNTraits.td +++ b/include/ttmlir/Dialect/TTNN/IR/TTNNTraits.td @@ -7,10 +7,32 @@ include "mlir/IR/OpBase.td" +//===----------------------------------------------------------------------===// +// TTNN traits definition. +//===----------------------------------------------------------------------===// + // Trait for ops that have memory config attribute. def HasMemoryConfigTrait : NativeOpTrait<"HasMemoryConfigTrait"> { let cppNamespace = "mlir::tt::ttnn"; } +// Trait for ops with variadic operands to specify the number of operands. +// +// Trait for ops with one operand. +def OneOperand : ParamNativeOpTrait<"NOperandTTNN", "1"> +{ + let cppNamespace = "mlir::tt::ttnn"; +} +// Trait for ops with two operands. +def TwoOperands : ParamNativeOpTrait<"NOperandTTNN", "2"> +{ + let cppNamespace = "mlir::tt::ttnn"; +} +// Trait for ops with three operands. +def ThreeOperands : ParamNativeOpTrait<"NOperandTTNN", "3"> +{ + let cppNamespace = "mlir::tt::ttnn"; +} + #endif // TTMLIR_TTMLIR_DIALECT_TTNN_TTNNTRAITS_TD diff --git a/lib/Conversion/TTIRToTTNN/TTIRToTTNN.cpp b/lib/Conversion/TTIRToTTNN/TTIRToTTNN.cpp index a2819d70f6..8b770a01c0 100644 --- a/lib/Conversion/TTIRToTTNN/TTIRToTTNN.cpp +++ b/lib/Conversion/TTIRToTTNN/TTIRToTTNN.cpp @@ -302,8 +302,7 @@ class ElementwiseOpConversionPattern : public OpConversionPattern { return failure(); } - rewriter.replaceOpWithNewOp(op, resultTypes, adaptor.getInputs(), - adaptor.getOutputs()); + rewriter.replaceOpWithNewOp(op, resultTypes, adaptor.getInputs()); return success(); } }; @@ -633,7 +632,7 @@ class ElementwiseUnaryWithFloatParameterOpConversionPattern ConversionPatternRewriter &rewriter) const override { rewriter.replaceOpWithNewOp( op, this->getTypeConverter()->convertType(op.getType(0)), - adaptor.getInputs(), adaptor.getOutputs(), adaptor.getParameter()); + adaptor.getInputs(), adaptor.getParameter()); return success(); } }; @@ -1272,8 +1271,7 @@ class SubtractOpConversionPattern if (lhsType.getShape() == rhsType.getShape()) { rewriter.replaceOpWithNewOp( - srcOp, adaptor.getInputs().front(), adaptor.getInputs().back(), - adaptor.getOutputs().front()); + srcOp, adaptor.getInputs().front(), adaptor.getInputs().back()); // Broadcast for rhs operand require the operation to be commutative to // allow switching the order of operands. To allow this conversion, the @@ -1282,11 +1280,10 @@ class SubtractOpConversionPattern } else { ttnn::NegOp negOp = ttmlir::utils::createDPSOp( - rewriter, srcOp.getLoc(), rhsType, adaptor.getInputs().back()); + rewriter, srcOp.getLoc(), rhsType); rewriter.replaceOpWithNewOp( - srcOp, adaptor.getInputs().front(), negOp.getResults().front(), - adaptor.getOutputs().front()); + srcOp, adaptor.getInputs().front(), negOp.getResults().front()); } return success(); @@ -1415,8 +1412,9 @@ class ScatterOpConversionPattern : public OpConversionPattern { ConversionPatternRewriter &rewriter) const override { // The ttnn interface has the inverse inputs of the TTIR dialect op (which // matches torch ops). - rewriter.replaceOpWithNewOp( - op, adaptor.getUpdate(), adaptor.getInput(), adaptor.getOutput()); + rewriter.replaceOpWithNewOp(op, adaptor.getUpdate(), + adaptor.getInput(), + adaptor.getOutput().getType()); return success(); } diff --git a/lib/Conversion/TTNNToEmitC/TTNNToEmitC.cpp b/lib/Conversion/TTNNToEmitC/TTNNToEmitC.cpp index 041f9dd866..da17cb30fd 100644 --- a/lib/Conversion/TTNNToEmitC/TTNNToEmitC.cpp +++ b/lib/Conversion/TTNNToEmitC/TTNNToEmitC.cpp @@ -136,7 +136,6 @@ class EltwiseUnaryOpConversionPattern llvm::SmallVector attrs; attrs.push_back(mlir::IntegerAttr::get(rewriter.getIndexType(), 0)); attrs.push_back(ttnn_to_emitc::utils::createStdNullopt(rewriter)); - attrs.push_back(mlir::IntegerAttr::get(rewriter.getIndexType(), 1)); ArrayAttr arrayAttrs = ArrayAttr::get(srcOp->getContext(), attrs); @@ -173,8 +172,7 @@ class EltwiseUnaryWithFastAndApproximateModeOpConversionPattern {mlir::IntegerAttr::get(rewriter.getIndexType(), 0), ttnn_to_emitc::utils::convertBoolAttr( rewriter, BoolAttr::get(rewriter.getContext(), false)), - ttnn_to_emitc::utils::createStdNullopt(rewriter), - mlir::IntegerAttr::get(rewriter.getIndexType(), 1)}); + ttnn_to_emitc::utils::createStdNullopt(rewriter)}); rewriter.replaceOpWithNewOp( srcOp, this->getTypeConverter()->convertType(srcOp.getType(0)), @@ -243,7 +241,6 @@ class EltwiseBinaryOpConversionPattern attrs.push_back(mlir::IntegerAttr::get(rewriter.getIndexType(), 1)); attrs.push_back(ttnn_to_emitc::utils::createStdNullopt(rewriter)); attrs.push_back(ttnn_to_emitc::utils::createStdNullopt(rewriter)); - attrs.push_back(mlir::IntegerAttr::get(rewriter.getIndexType(), 2)); ArrayAttr arrayAttrs = ArrayAttr::get(srcOp->getContext(), attrs); diff --git a/lib/Dialect/TTNN/Analysis/ShardSolver.cpp b/lib/Dialect/TTNN/Analysis/ShardSolver.cpp index 6cb49f3583..b664920825 100644 --- a/lib/Dialect/TTNN/Analysis/ShardSolver.cpp +++ b/lib/Dialect/TTNN/Analysis/ShardSolver.cpp @@ -531,10 +531,12 @@ bool ShardSolver::checkShardCompatible( uint32_t numOperands = consumerOp->getNumOperands(); - // Some ops have multiple operands; and some ops have output also an - // operand. TBD if there is a more robust way to get real number of inputs. - // TODO(odjuricic): cast to DPSop? - numOperands = (numOperands > 1) ? numOperands - 1 : numOperands; + // DPS ops have an additional operand for the destination style, hence + // we need to subtract it from the total number of operands. + if (llvm::isa(consumerOp)) { + numOperands = numOperands - 1; + } + std::vector inputLayouts; auto inputUnderCheck = diff --git a/lib/Dialect/TTNN/IR/TTNNOpModelInterface.cpp b/lib/Dialect/TTNN/IR/TTNNOpModelInterface.cpp index 639493473f..2623db1cc5 100644 --- a/lib/Dialect/TTNN/IR/TTNNOpModelInterface.cpp +++ b/lib/Dialect/TTNN/IR/TTNNOpModelInterface.cpp @@ -36,8 +36,7 @@ ReluOp::getOpConstraints(const std::vector &inputs, assert(inputs.size() == 1); const auto inputShape = - mlir::cast(getDpsInputOperand(0)->get().getType()) - .getShape(); + mlir::cast(getOperand(0).getType()).getShape(); const auto outputShape = mlir::cast(getResults().front().getType()).getShape(); @@ -58,8 +57,7 @@ ReluOp::getOpRuntime(const std::vector &inputs, assert(inputs.size() == 1); const auto inputShape = - mlir::cast(getDpsInputOperand(0)->get().getType()) - .getShape(); + mlir::cast(getOperand(0).getType()).getShape(); const auto outputShape = mlir::cast(getResults().front().getType()).getShape(); diff --git a/lib/Dialect/TTNN/IR/TTNNWorkarounds.cpp b/lib/Dialect/TTNN/IR/TTNNWorkarounds.cpp index 5919841ec4..4bb6ab4fcf 100644 --- a/lib/Dialect/TTNN/IR/TTNNWorkarounds.cpp +++ b/lib/Dialect/TTNN/IR/TTNNWorkarounds.cpp @@ -355,7 +355,6 @@ TTNNOperandsWorkaroundsFactory::createWhereOpOperandsWorkarounds( .addInputOperandWorkaround(typeWorkaround) .addInputOperandWorkaround(typeWorkaround) .addInputOperandWorkaround(typeWorkaround) - .addInputOperandWorkaround(typeWorkaround) .addOutputOperandWorkaround(typeWorkaround); } } // namespace mlir::tt::ttnn::wa diff --git a/lib/Dialect/TTNN/Transforms/Optimizer.cpp b/lib/Dialect/TTNN/Transforms/Optimizer.cpp index d742254333..a42965ff6b 100644 --- a/lib/Dialect/TTNN/Transforms/Optimizer.cpp +++ b/lib/Dialect/TTNN/Transforms/Optimizer.cpp @@ -8,6 +8,7 @@ #include "ttmlir/Dialect/TTNN/Analysis/LegalLayoutAnalysis.h" #include "ttmlir/Dialect/TTNN/Analysis/MemoryLayoutAnalysis.h" #include "ttmlir/Dialect/TTNN/Analysis/OpConfigAnalysis.h" +#include "ttmlir/Dialect/TTNN/IR/TTNNOps.h" #include "ttmlir/Dialect/TTNN/IR/TTNNOpsTypes.h" #include "ttmlir/Dialect/TTNN/Transforms/Passes.h" #include "ttmlir/Dialect/TTNN/Utils/Utils.h" @@ -377,7 +378,7 @@ class TTNNOptimizer : public impl::TTNNOptimizerBase { void extractReshardEdges(ModuleOp &moduleOp, std::unordered_set &overrideReshardEdges) { moduleOp->walk([&](Operation *op) { - if (!isa(op)) { + if (isa(op)) { return; } @@ -409,16 +410,28 @@ class TTNNOptimizer : public impl::TTNNOptimizerBase { } mlir::TypedValue - getDeviceOpValue(Operation *contextOp) { + getOrCreateDeviceOpValue(Operation *contextOp, OpBuilder &builder) { Block *block = contextOp->getBlock(); - mlir::TypedValue deviceOpResult; for (auto &op : block->getOperations()) { if (GetDeviceOp deviceOp = dyn_cast(op)) { - deviceOpResult = deviceOp.getResult(); - break; + return deviceOp.getResult(); } } - return deviceOpResult; + + // Device op does not exist in the block, hence we need to create it. + DeviceAttr deviceAttr = getCurrentScopeDevice(contextOp); + auto currentInsertionPoint = builder.saveInsertionPoint(); + builder.setInsertionPoint(block, block->begin()); + llvm::SmallVector meshShape{deviceAttr.getMeshShape()}; + if (meshShape.empty()) { + meshShape = llvm::SmallVector{1, 1}; + } + auto deviceOp = builder.create( + contextOp->getLoc(), builder.getType(deviceAttr), + ttnn::MeshShapeAttr::get(contextOp->getContext(), meshShape[0], + meshShape[1])); + builder.restoreInsertionPoint(currentInsertionPoint); + return deviceOp; } void @@ -492,7 +505,7 @@ class TTNNOptimizer : public impl::TTNNOptimizerBase { Operation *memoryReconfigOp = builder.create( consumerOp->getLoc(), newTensorType, producerOp->getResult(0), outputLayout, outputDataType, outputMemConfigAttr, - getDeviceOpValue(consumerOp)); + getOrCreateDeviceOpValue(consumerOp, builder)); consumerOp->setOperand(edge.operandIndex, memoryReconfigOp->getResult(0)); diff --git a/lib/Dialect/TTNN/Transforms/Workarounds/Decomposition/RepeatOpRewritePattern.cpp b/lib/Dialect/TTNN/Transforms/Workarounds/Decomposition/RepeatOpRewritePattern.cpp index 923d2b47d7..41ceb246e7 100644 --- a/lib/Dialect/TTNN/Transforms/Workarounds/Decomposition/RepeatOpRewritePattern.cpp +++ b/lib/Dialect/TTNN/Transforms/Workarounds/Decomposition/RepeatOpRewritePattern.cpp @@ -30,48 +30,9 @@ TTNNRepeatFoldingWorkaround::matchAndRewrite(ttnn::RepeatOp op, addInputs.push_back(op.getOperand()); addInputs.push_back(zeroOp.getResult()); - // Create an EmptyOp as AddOp is a DPS Op. - // Get ttnn::TTNNLayoutAttr of the result type - // - ttnn::TTNNLayoutAttr layoutAttr = - mlir::cast(op.getResult().getType().getEncoding()); - - // Get the shape of the tensor, tensor layout, and data type - // - ttnn::ShapeAttr shapeAttr = ttnn::ShapeAttr::get( - rewriter.getContext(), - mlir::cast(op->getResult(0).getType()).getShape()); - DataType dtype = layoutAttr.getDataType(); - ttnn::Layout ttnnLayoutEnum = ttnn::Layout::RowMajor; - if (layoutAttr.isTiled()) { - ttnnLayoutEnum = ttnn::Layout::Tile; - } else { - ttnnLayoutEnum = ttnn::Layout::RowMajor; - } - DataTypeAttr dTypeAttr = DataTypeAttr::get(rewriter.getContext(), dtype); - ttnn::LayoutAttr tensorLayoutAttr = - ttnn::LayoutAttr::get(op.getContext(), ttnnLayoutEnum); - - // Create MemoryConfigAttr - // - ttnn::BufferTypeAttr bufferTypeAttr = - ttnn::BufferTypeAttr::get(op.getContext(), layoutAttr.getBufferType()); - ttnn::ShardSpecAttr shardSpecAttr = ttnn::ShardSpecAttr::get( - op.getContext(), - ttnn::ShapeAttr::get(op.getContext(), layoutAttr.getShardShape())); - ttnn::MemoryConfigAttr memoryConfigAttr = - ttnn::MemoryConfigAttr::get(op.getContext(), bufferTypeAttr, - shardSpecAttr, layoutAttr.getMemLayout()); - - // Create EmptyOp - // - ttnn::EmptyOp emptyOp = rewriter.create( - op->getLoc(), op.getType(), shapeAttr, dTypeAttr, tensorLayoutAttr, - device, memoryConfigAttr); - // Replace the RepeatOp with an AddOp to perform implicit repeat. rewriter.replaceOpWithNewOp(op, op.getResult().getType(), - addInputs, emptyOp.getResult()); + addInputs); return success(); } diff --git a/lib/Target/TTNN/TTNNToFlatbuffer.cpp b/lib/Target/TTNN/TTNNToFlatbuffer.cpp index 4acc3601d9..231ec21b74 100644 --- a/lib/Target/TTNN/TTNNToFlatbuffer.cpp +++ b/lib/Target/TTNN/TTNNToFlatbuffer.cpp @@ -1058,12 +1058,12 @@ createEltwiseOp(FlatbufferObjectCache &cache, EltwiseOp op) { ins.push_back(cache.at<::tt::target::ttnn::TensorRef>( getOperandThroughDPSOps(input))); } - assert(op.getOutputs().size() == 1); - return ::tt::target::ttnn::CreateEltwiseOpDirect( - *cache.fbb, type, &ins, - cache.at<::tt::target::ttnn::TensorRef>( - getOperandThroughDPSOps(op.getOutputs().front())), - paramsType, params); + assert(op.getResults().size() == 1); + auto out = cache.getOrCreate(op.getResult(0), tensorValueToFlatbuffer, + kHostAllocatedSize); + + return ::tt::target::ttnn::CreateEltwiseOpDirect(*cache.fbb, type, &ins, out, + paramsType, params); } template diff --git a/test/ttmlir/Dialect/TTNN/Transforms/Workarounds/where_workaround.mlir b/test/ttmlir/Dialect/TTNN/Transforms/Workarounds/where_workaround.mlir index 94462c2265..51b3e3ff35 100644 --- a/test/ttmlir/Dialect/TTNN/Transforms/Workarounds/where_workaround.mlir +++ b/test/ttmlir/Dialect/TTNN/Transforms/Workarounds/where_workaround.mlir @@ -12,7 +12,6 @@ module @moreh_cumsum attributes {tt.device = #device, tt.system_desc = #system_d %0 = "ttnn.get_device"() <{mesh_shape = #ttnn}> : () -> !tt.device<#device> %1 = "ttnn.reshape"(%arg2) <{shape = [1 : i32, 1 : i32, 1 : i32, 1 : i32]}> : (tensor<1xf32, #ttnn_layout2>) -> tensor<1x1x1x1xf32, #ttnn_layout3> %2 = "ttnn.repeat"(%1) <{repeat_dims = #ttnn.shape<1x12x1x46>}> : (tensor<1x1x1x1xf32, #ttnn_layout3>) -> tensor<1x12x1x46xf32, #ttnn_layout1> - %3 = "ttnn.empty"(%0) <{dtype = #tt.supportedDataTypes, layout = #ttnn.layout, memory_config = #ttnn.memory_config<#dram, <<1x2>>, >, shape = #ttnn.shape<1x12x1x46>}> : (!tt.device<#device>) -> tensor<1x12x1x46xf32, #ttnn_layout1> // CHECK: %[[ARG0:[0-9]+]] = {{.*}}(%arg0, // CHECK-SAME: dtype = #tt.supportedDataTypes // CHECK-SAME: tensor<1x1x1x46xbf16 @@ -21,9 +20,8 @@ module @moreh_cumsum attributes {tt.device = #device, tt.system_desc = #system_d // CHECK-SAME: tensor<1x1x1x46xf32 // CHECK-SAME: tensor<1x12x1x46xf32 // CHECK-SAME: tensor<1x12x1x46xf32 - // CHECK-SAME: tensor<1x12x1x46xf32 // CHECK-SAME: -> tensor<1x12x1x46xf32 - %4 = "ttnn.where"(%arg0, %arg1, %2, %3) <{operandSegmentSizes = array}> : (tensor<1x1x1x46xbf16, #ttnn_layout>, tensor<1x12x1x46xf32, #ttnn_layout1>, tensor<1x12x1x46xf32, #ttnn_layout1>, tensor<1x12x1x46xf32, #ttnn_layout1>) -> tensor<1x12x1x46xf32, #ttnn_layout1> + %4 = "ttnn.where"(%arg0, %arg1, %2) : (tensor<1x1x1x46xbf16, #ttnn_layout>, tensor<1x12x1x46xf32, #ttnn_layout1>, tensor<1x12x1x46xf32, #ttnn_layout1>) -> tensor<1x12x1x46xf32, #ttnn_layout1> return %4 : tensor<1x12x1x46xf32, #ttnn_layout1> } } diff --git a/test/ttmlir/Dialect/TTNN/Transforms/ttnn_create_input_gens_0.mlir b/test/ttmlir/Dialect/TTNN/Transforms/ttnn_create_input_gens_0.mlir index 280b29858e..869a993805 100644 --- a/test/ttmlir/Dialect/TTNN/Transforms/ttnn_create_input_gens_0.mlir +++ b/test/ttmlir/Dialect/TTNN/Transforms/ttnn_create_input_gens_0.mlir @@ -16,11 +16,10 @@ module attributes {tt.device = #device, tt.system_desc = #system_desc} { %2 = "ttnn.to_layout"(%1) <{layout = #ttnn.layout}> : (tensor<32x32xbf16, #ttnn_layout1>) -> tensor<32x32xbf16, #ttnn_layout2> %3 = "ttnn.to_device"(%arg1, %0) <{memory_config = #ttnn.memory_config<#dram, <<1x1>>, >}> : (tensor<32x32xbf16, #ttnn_layout>, !tt.device<#device>) -> tensor<32x32xbf16, #ttnn_layout1> %4 = "ttnn.to_layout"(%3) <{layout = #ttnn.layout}> : (tensor<32x32xbf16, #ttnn_layout1>) -> tensor<32x32xbf16, #ttnn_layout2> - %5 = "ttnn.empty"(%0) <{dtype = #tt.supportedDataTypes, layout = #ttnn.layout, memory_config = #ttnn.memory_config<#dram, <<1x1>>, >, shape = #ttnn.shape<32x32>}> : (!tt.device<#device>) -> tensor<32x32xbf16, #ttnn_layout2> - %6 = "ttnn.add"(%2, %4, %5) <{operandSegmentSizes = array}> : (tensor<32x32xbf16, #ttnn_layout2>, tensor<32x32xbf16, #ttnn_layout2>, tensor<32x32xbf16, #ttnn_layout2>) -> tensor<32x32xbf16, #ttnn_layout2> - %7 = "ttnn.from_device"(%6) : (tensor<32x32xbf16, #ttnn_layout2>) -> tensor<32x32xbf16, #ttnn_layout3> - %8 = "ttnn.to_layout"(%7) <{layout = #ttnn.layout}> : (tensor<32x32xbf16, #ttnn_layout3>) -> tensor<32x32xbf16, #ttnn_layout> - return %8 : tensor<32x32xbf16, #ttnn_layout> + %5 = "ttnn.add"(%2, %4) : (tensor<32x32xbf16, #ttnn_layout2>, tensor<32x32xbf16, #ttnn_layout2>) -> tensor<32x32xbf16, #ttnn_layout2> + %6 = "ttnn.from_device"(%5) : (tensor<32x32xbf16, #ttnn_layout2>) -> tensor<32x32xbf16, #ttnn_layout3> + %7 = "ttnn.to_layout"(%6) <{layout = #ttnn.layout}> : (tensor<32x32xbf16, #ttnn_layout3>) -> tensor<32x32xbf16, #ttnn_layout> + return %7 : tensor<32x32xbf16, #ttnn_layout> } // Confirm that the generator func is generated, and that the tensor attrs match: diff --git a/test/ttmlir/Dialect/TTNN/Transforms/ttnn_modify_signatures_for_dylib_0.mlir b/test/ttmlir/Dialect/TTNN/Transforms/ttnn_modify_signatures_for_dylib_0.mlir index 1b0b770b9b..30821e8f06 100644 --- a/test/ttmlir/Dialect/TTNN/Transforms/ttnn_modify_signatures_for_dylib_0.mlir +++ b/test/ttmlir/Dialect/TTNN/Transforms/ttnn_modify_signatures_for_dylib_0.mlir @@ -1,7 +1,7 @@ // RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline="system-desc-path=%system_desc_path%" --ttnn-modify-signatures-for-dylib %s | FileCheck %s module attributes {} { - // CHECK: func.func @add(%arg0: tuple<[[TENSOR_A:.*>]], [[TENSOR_B:.*>]]>, %arg1: !tt.device<#device>) -> tuple> { + // CHECK: func.func @add(%arg0: tuple<[[TENSOR_A:.*>]], [[TENSOR_B:.*>]]>) -> tuple> { func.func @add(%arg0: tensor<32x32xbf16>, %arg1: tensor<32x32xbf16>) -> tensor<32x32xbf16> { // CHECK-NEXT: %0 = tt.get_tuple_element %arg0[0] : (tuple<[[TENSOR_A]], [[TENSOR_B]]>) -> [[TENSOR_A]] // CHECK-NEXT: %1 = tt.get_tuple_element %arg0[1] : (tuple<[[TENSOR_A]], [[TENSOR_B]]>) -> [[TENSOR_B]] @@ -10,7 +10,7 @@ module attributes {} { return %1 : tensor<32x32xbf16> } - // CHECK: func.func @multiple_returns(%arg0: tuple<[[TENSOR_A:.*>]], [[TENSOR_B:.*>]], [[TENSOR_C:.*>]]>, %arg1: !tt.device<#device>) -> tuple, tensor<32x32xbf16, #ttnn_layout>> { + // CHECK: func.func @multiple_returns(%arg0: tuple<[[TENSOR_A:.*>]], [[TENSOR_B:.*>]], [[TENSOR_C:.*>]]>) -> tuple, tensor<32x32xbf16, #ttnn_layout>> { func.func @multiple_returns(%arg0: tensor<32x32xbf16>, %arg1: tensor<32x32xbf16>, %arg2: tensor<32x32xbf16>) -> (tensor<32x32xbf16>, tensor<32x32xbf16>) { // CHECK-NEXT: %0 = tt.get_tuple_element %arg0[0] : (tuple<[[TENSOR_A]], [[TENSOR_B]], [[TENSOR_C]]>) -> [[TENSOR_A]] // CHECK-NEXT: %1 = tt.get_tuple_element %arg0[1] : (tuple<[[TENSOR_A]], [[TENSOR_B]], [[TENSOR_C]]>) -> [[TENSOR_B]] diff --git a/test/ttmlir/Dialect/TTNN/data_movement/repeat/implicit_repeat.mlir b/test/ttmlir/Dialect/TTNN/data_movement/repeat/implicit_repeat.mlir index 913ad06fad..529c18e2fa 100644 --- a/test/ttmlir/Dialect/TTNN/data_movement/repeat/implicit_repeat.mlir +++ b/test/ttmlir/Dialect/TTNN/data_movement/repeat/implicit_repeat.mlir @@ -37,8 +37,8 @@ module { func.func @main(%arg0: tensor<1x16x32xf32>, %arg1: tensor<1x1x32xf32>) -> tensor<1x16x32xf32> { // CHECK: [[VAL0:%[0-9]+]] = "ttnn.repeat" // CHECK-SAME: repeat_dims = #ttnn.shape<1x16x1> - // CHECK: %{{[0-9]+}} = "ttnn.multiply"(%arg0, %{{[0-9]+}}, %{{[0-9]+}}) - // CHECK: %{{[0-9]+}} = "ttnn.bitwise_and"([[VAL0]], %{{[0-9]+}}, %{{[0-9]+}}) + // CHECK: %{{[0-9]+}} = "ttnn.multiply"(%arg0, %{{[0-9]+}}) + // CHECK: %{{[0-9]+}} = "ttnn.bitwise_and"([[VAL0]], %{{[0-9]+}}) %0 = tensor.empty() : tensor<1x16x32xf32> %1 = "ttir.broadcast"(%arg1, %0) <{broadcast_dimensions = array}> : (tensor<1x1x32xf32>, tensor<1x16x32xf32>) -> tensor<1x16x32xf32> %2 = tensor.empty() : tensor<1x16x32xf32> @@ -53,7 +53,7 @@ module { func.func @main(%arg0: tensor<1x16x32xf32>, %arg1: tensor<1x1x32xf32>) -> tensor<1x16x32xf32> { // CHECK-NOT: ttnn.repeat // CHECK: [[VAL0:%[0-9]+]] = "ttnn.add" - // CHECK: %{{[0-9]+}} = "ttnn.add"(%arg1, [[VAL0]], %{{[0-9]+}}) + // CHECK: %{{[0-9]+}} = "ttnn.add"(%arg1, [[VAL0]]) %0 = tensor.empty() : tensor<1x16x32xf32> %1 = "ttir.broadcast"(%arg1, %0) <{broadcast_dimensions = array}> : (tensor<1x1x32xf32>, tensor<1x16x32xf32>) -> tensor<1x16x32xf32> %2 = tensor.empty() : tensor<1x16x32xf32> diff --git a/test/ttmlir/Dialect/TTNN/data_movement/repeat/repeat_workaround.mlir b/test/ttmlir/Dialect/TTNN/data_movement/repeat/repeat_workaround.mlir index cbf7b0653e..14a530c230 100644 --- a/test/ttmlir/Dialect/TTNN/data_movement/repeat/repeat_workaround.mlir +++ b/test/ttmlir/Dialect/TTNN/data_movement/repeat/repeat_workaround.mlir @@ -3,8 +3,7 @@ module { func.func @main(%arg0: tensor<1x16x32xf32>, %arg1: tensor<1x1x32xf32>) -> tensor<1x16x32xf32> { // CHECK: %[[VAL0:[0-9]+]] = "ttnn.full" // CHECK-SAME: fillValue = 0.000000e+00 : f32 - // CHECK: %[[EMPTY:[0-9]+]] = "ttnn.empty"{{.*}} - // CHECK: %{{[0-9]+}} = "ttnn.add"(%arg1, %[[VAL0]], %[[EMPTY]]) + // CHECK: %{{[0-9]+}} = "ttnn.add"(%arg1, %[[VAL0]]) // CHECK-NOT: "ttnn.repeat" %0 = tensor.empty() : tensor<1x16x32xf32> %1 = "ttir.broadcast"(%arg1, %0) <{broadcast_dimensions = array}> : (tensor<1x1x32xf32>, tensor<1x16x32xf32>) -> tensor<1x16x32xf32> diff --git a/test/ttmlir/Dialect/TTNN/eltwise/binary/bitwise_and/simple_bitwise_and.mlir b/test/ttmlir/Dialect/TTNN/eltwise/binary/bitwise_and/simple_bitwise_and.mlir index 9413087a47..bfbba2cebe 100644 --- a/test/ttmlir/Dialect/TTNN/eltwise/binary/bitwise_and/simple_bitwise_and.mlir +++ b/test/ttmlir/Dialect/TTNN/eltwise/binary/bitwise_and/simple_bitwise_and.mlir @@ -3,10 +3,11 @@ module attributes {} { func.func @bitwise_and(%arg0: tensor<64x128xi32>, %arg1: tensor<64x128xi32>) -> tensor<64x128xi32> { %0 = tensor.empty() : tensor<64x128xi32> - // CHECK: %[[EMPTY:.*]] = "ttnn.empty"{{.*}} -> tensor<64x128xui32, {{.*}} %1 = "ttir.bitwise_and"(%arg0, %arg1, %0) <{operandSegmentSizes = array}> : (tensor<64x128xi32>, tensor<64x128xi32>, tensor<64x128xi32>) -> tensor<64x128xi32> - // CHECK: {{.*}} "ttnn.bitwise_and"({{.*}}, {{.*}}, %[[EMPTY]]){{.*}} -> tensor<64x128xui32, {{.*}} + // CHECK: "ttnn.bitwise_and" + // CHECK-SAME: tensor<64x128xui32 + // CHECK-SAME: tensor<64x128xui32 + // CHECK-SAME: -> tensor<64x128xui32 return %1 : tensor<64x128xi32> - // CHECK: return {{.*}} tensor<64x128xui32, {{.*}} } } diff --git a/test/ttmlir/Dialect/TTNN/eltwise/binary/bitwise_or/simple_bitwise_or.mlir b/test/ttmlir/Dialect/TTNN/eltwise/binary/bitwise_or/simple_bitwise_or.mlir index b8a85f8616..b569a1be20 100644 --- a/test/ttmlir/Dialect/TTNN/eltwise/binary/bitwise_or/simple_bitwise_or.mlir +++ b/test/ttmlir/Dialect/TTNN/eltwise/binary/bitwise_or/simple_bitwise_or.mlir @@ -3,10 +3,11 @@ module attributes {} { func.func @bitwise_or(%arg0: tensor<64x128xi32>, %arg1: tensor<64x128xi32>) -> tensor<64x128xi32> { %0 = tensor.empty() : tensor<64x128xi32> - // CHECK: %[[EMPTY:.*]] = "ttnn.empty"{{.*}} -> tensor<64x128xui32, {{.*}} %1 = "ttir.bitwise_or"(%arg0, %arg1, %0) <{operandSegmentSizes = array}> : (tensor<64x128xi32>, tensor<64x128xi32>, tensor<64x128xi32>) -> tensor<64x128xi32> - // CHECK: {{.*}} "ttnn.bitwise_or"({{.*}}, {{.*}}, %[[EMPTY]]){{.*}} -> tensor<64x128xui32, {{.*}} + // CHECK: "ttnn.bitwise_or" + // CHECK-SAME: tensor<64x128xui32 + // CHECK-SAME: tensor<64x128xui32 + // CHECK-SAME: -> tensor<64x128xui32 return %1 : tensor<64x128xi32> - // CHECK: return {{.*}} tensor<64x128xui32, {{.*}} } } diff --git a/test/ttmlir/Dialect/TTNN/eltwise/binary/bitwise_xor/simple_bitwise_xor.mlir b/test/ttmlir/Dialect/TTNN/eltwise/binary/bitwise_xor/simple_bitwise_xor.mlir index 3dfe4285f9..a783349bbb 100644 --- a/test/ttmlir/Dialect/TTNN/eltwise/binary/bitwise_xor/simple_bitwise_xor.mlir +++ b/test/ttmlir/Dialect/TTNN/eltwise/binary/bitwise_xor/simple_bitwise_xor.mlir @@ -3,10 +3,11 @@ module attributes {} { func.func @bitwise_xor(%arg0: tensor<64x128xi32>, %arg1: tensor<64x128xi32>) -> tensor<64x128xi32> { %0 = tensor.empty() : tensor<64x128xi32> - // CHECK: %[[EMPTY:.*]] = "ttnn.empty"{{.*}} -> tensor<64x128xui32, {{.*}} %1 = "ttir.bitwise_xor"(%arg0, %arg1, %0) <{operandSegmentSizes = array}> : (tensor<64x128xi32>, tensor<64x128xi32>, tensor<64x128xi32>) -> tensor<64x128xi32> - // CHECK: {{.*}} "ttnn.bitwise_xor"({{.*}}, {{.*}}, %[[EMPTY]]){{.*}} -> tensor<64x128xui32, {{.*}} + // CHECK: "ttnn.bitwise_xor" + // CHECK-SAME: tensor<64x128xui32 + // CHECK-SAME: tensor<64x128xui32 + // CHECK-SAME: -> tensor<64x128xui32 return %1 : tensor<64x128xi32> - // CHECK: return {{.*}} tensor<64x128xui32, {{.*}} } } diff --git a/test/ttmlir/Dialect/TTNN/simple_compare.mlir b/test/ttmlir/Dialect/TTNN/eltwise/binary/compare/simple_compare.mlir similarity index 60% rename from test/ttmlir/Dialect/TTNN/simple_compare.mlir rename to test/ttmlir/Dialect/TTNN/eltwise/binary/compare/simple_compare.mlir index 873ae745ce..f96213e73a 100644 --- a/test/ttmlir/Dialect/TTNN/simple_compare.mlir +++ b/test/ttmlir/Dialect/TTNN/eltwise/binary/compare/simple_compare.mlir @@ -1,90 +1,72 @@ // RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline %s | FileCheck %s module attributes {} { func.func @equal(%arg0: tensor<13x31xf32>, %arg1: tensor<13x31xf32>) -> tensor<13x31xf32> { - // CHECK: %[[C:.*]] = "ttnn.empty" - // CHECK-SAME: [[TENSOR:tensor<13x31xf32,]] %0 = tensor.empty() : tensor<13x31xf32> - // CHECK: %[[C:.*]] = "ttnn.eq" - // CHECK-SAME: [[TENSOR]] - // CHECK-SAME: [[TENSOR]] - // CHECK-SAME: [[TENSOR]] - // CHECK-SAME: -> [[TENSOR]] %1 = "ttir.eq"(%arg0, %arg1, %0) <{operandSegmentSizes = array}> : (tensor<13x31xf32>, tensor<13x31xf32>, tensor<13x31xf32>) -> tensor<13x31xf32> + // CHECK: "ttnn.eq" + // CHECK-SAME: tensor<13x31xf32 + // CHECK-SAME: tensor<13x31xf32 + // CHECK-SAME: -> tensor<13x31xf32 return %1 : tensor<13x31xf32> } } module attributes {} { func.func @not_equal(%arg0: tensor<13x31xf32>, %arg1: tensor<13x31xf32>) -> tensor<13x31xf32> { - // CHECK: %[[C:.*]] = "ttnn.empty - // CHECK-SAME: [[TENSOR:tensor<13x31xf32,]] %0 = tensor.empty() : tensor<13x31xf32> - // CHECK: %[[C:.*]] = "ttnn.ne" - // CHECK-SAME: [[TENSOR]] - // CHECK-SAME: [[TENSOR]] - // CHECK-SAME: [[TENSOR]] - // CHECK-SAME: -> [[TENSOR]] %1 = "ttir.ne"(%arg0, %arg1, %0) <{operandSegmentSizes = array}> : (tensor<13x31xf32>, tensor<13x31xf32>, tensor<13x31xf32>) -> tensor<13x31xf32> + // CHECK: "ttnn.ne" + // CHECK-SAME: tensor<13x31xf32 + // CHECK-SAME: tensor<13x31xf32 + // CHECK-SAME: -> tensor<13x31xf32 return %1 : tensor<13x31xf32> } } module attributes {} { func.func @greater_equal(%arg0: tensor<13x31xf32>, %arg1: tensor<13x31xf32>) -> tensor<13x31xf32> { - // CHECK: %[[C:.*]] = "ttnn.empty - // CHECK-SAME: [[TENSOR:tensor<13x31xf32,]] %0 = tensor.empty() : tensor<13x31xf32> - // CHECK: %[[C:.*]] = "ttnn.ge" - // CHECK-SAME: [[TENSOR]] - // CHECK-SAME: [[TENSOR]] - // CHECK-SAME: [[TENSOR]] - // CHECK-SAME: -> [[TENSOR]] %1 = "ttir.ge"(%arg0, %arg1, %0) <{operandSegmentSizes = array}> : (tensor<13x31xf32>, tensor<13x31xf32>, tensor<13x31xf32>) -> tensor<13x31xf32> + // CHECK: "ttnn.ge" + // CHECK-SAME: tensor<13x31xf32 + // CHECK-SAME: tensor<13x31xf32 + // CHECK-SAME: -> tensor<13x31xf32 return %1 : tensor<13x31xf32> } } module attributes {} { func.func @greater_than(%arg0: tensor<13x31xf32>, %arg1: tensor<13x31xf32>) -> tensor<13x31xf32> { - // CHECK: %[[C:.*]] = "ttnn.empty - // CHECK-SAME: [[TENSOR:tensor<13x31xf32,]] %0 = tensor.empty() : tensor<13x31xf32> - // CHECK: %[[C:.*]] = "ttnn.gt" - // CHECK-SAME: [[TENSOR]] - // CHECK-SAME: [[TENSOR]] - // CHECK-SAME: [[TENSOR]] - // CHECK-SAME: -> [[TENSOR]] %1 = "ttir.gt"(%arg0, %arg1, %0) <{operandSegmentSizes = array}> : (tensor<13x31xf32>, tensor<13x31xf32>, tensor<13x31xf32>) -> tensor<13x31xf32> + // CHECK: "ttnn.gt" + // CHECK-SAME: tensor<13x31xf32 + // CHECK-SAME: tensor<13x31xf32 + // CHECK-SAME: -> tensor<13x31xf32 return %1 : tensor<13x31xf32> } } module attributes {} { func.func @less_equal(%arg0: tensor<13x31xf32>, %arg1: tensor<13x31xf32>) -> tensor<13x31xf32> { - // CHECK: %[[C:.*]] = "ttnn.empty - // CHECK-SAME: [[TENSOR:tensor<13x31xf32,]] %0 = tensor.empty() : tensor<13x31xf32> - // CHECK: %[[C:.*]] = "ttnn.le" - // CHECK-SAME: [[TENSOR]] - // CHECK-SAME: [[TENSOR]] - // CHECK-SAME: [[TENSOR]] - // CHECK-SAME: -> [[TENSOR]] %1 = "ttir.le"(%arg0, %arg1, %0) <{operandSegmentSizes = array}> : (tensor<13x31xf32>, tensor<13x31xf32>, tensor<13x31xf32>) -> tensor<13x31xf32> + // CHECK: "ttnn.le" + // CHECK-SAME: tensor<13x31xf32 + // CHECK-SAME: tensor<13x31xf32 + // CHECK-SAME: -> tensor<13x31xf32 return %1 : tensor<13x31xf32> } } module attributes {} { func.func @less_than(%arg0: tensor<13x31xf32>, %arg1: tensor<13x31xf32>) -> tensor<13x31xf32> { - // CHECK: %[[C:.*]] = "ttnn.empty - // CHECK-SAME: [[TENSOR:tensor<13x31xf32,]] %0 = tensor.empty() : tensor<13x31xf32> - // CHECK: %[[C:.*]] = "ttnn.lt" - // CHECK-SAME: [[TENSOR]] - // CHECK-SAME: [[TENSOR]] - // CHECK-SAME: [[TENSOR]] - // CHECK-SAME: -> [[TENSOR]] %1 = "ttir.lt"(%arg0, %arg1, %0) <{operandSegmentSizes = array}> : (tensor<13x31xf32>, tensor<13x31xf32>, tensor<13x31xf32>) -> tensor<13x31xf32> + // CHECK: "ttnn.lt" + // CHECK-SAME: tensor<13x31xf32 + // CHECK-SAME: tensor<13x31xf32 + // CHECK-SAME: -> tensor<13x31xf32 return %1 : tensor<13x31xf32> } } diff --git a/test/ttmlir/Dialect/TTNN/simple_div.mlir b/test/ttmlir/Dialect/TTNN/eltwise/binary/div/simple_div.mlir similarity index 76% rename from test/ttmlir/Dialect/TTNN/simple_div.mlir rename to test/ttmlir/Dialect/TTNN/eltwise/binary/div/simple_div.mlir index 15d2b4820c..c27753171f 100644 --- a/test/ttmlir/Dialect/TTNN/simple_div.mlir +++ b/test/ttmlir/Dialect/TTNN/eltwise/binary/div/simple_div.mlir @@ -1,10 +1,12 @@ // RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline %s | FileCheck %s module attributes {} { func.func @forward(%arg0: tensor<64x128xf32>, %arg1: tensor<64x128xf32>) -> tensor<64x128xf32> { - // CHECK: %[[C:.*]] = "ttnn.empty"[[C:.*]] %0 = tensor.empty() : tensor<64x128xf32> - // CHECK: %[[C:.*]] = "ttnn.div"[[C:.*]] %1 = "ttir.div"(%arg0, %arg1, %0) <{operandSegmentSizes = array}> : (tensor<64x128xf32>, tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xf32> + // CHECK: "ttnn.div" + // CHECK-SAME: tensor<64x128xf32 + // CHECK-SAME: tensor<64x128xf32 + // CHECK-SAME: -> tensor<64x128xf32 return %1 : tensor<64x128xf32> } } diff --git a/test/ttmlir/Dialect/TTNN/eltwise/binary/logical_and/simple_and.mlir b/test/ttmlir/Dialect/TTNN/eltwise/binary/logical_and/simple_and.mlir index e6400a7529..d164ad8b24 100644 --- a/test/ttmlir/Dialect/TTNN/eltwise/binary/logical_and/simple_and.mlir +++ b/test/ttmlir/Dialect/TTNN/eltwise/binary/logical_and/simple_and.mlir @@ -2,12 +2,11 @@ module attributes {} { func.func @logical_and(%arg0: tensor<64x128xf32>, %arg1: tensor<64x128xf32>) -> tensor<64x128xf32> { %0 = tensor.empty() : tensor<64x128xf32> - // CHECK: {{.*}} = "ttnn.empty"{{.*}} %1 = "ttir.logical_and"(%arg0, %arg1, %0) <{operandSegmentSizes = array}> : (tensor<64x128xf32>, tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xf32> - // CHECK: %[[C:.*]] = "ttnn.logical_and" - // CHECK-SAME: tensor<64x128xf32, - // CHECK-SAME: tensor<64x128xf32, - // CHECK-SAME: tensor<64x128xf32, + // CHECK: "ttnn.logical_and" + // CHECK-SAME: tensor<64x128xf32 + // CHECK-SAME: tensor<64x128xf32 + // CHECK-SAME: -> tensor<64x128xf32 return %1 : tensor<64x128xf32> } } diff --git a/test/ttmlir/Dialect/TTNN/eltwise/binary/logical_or/simple_or.mlir b/test/ttmlir/Dialect/TTNN/eltwise/binary/logical_or/simple_or.mlir index bb35140eb2..c8a80ef82f 100644 --- a/test/ttmlir/Dialect/TTNN/eltwise/binary/logical_or/simple_or.mlir +++ b/test/ttmlir/Dialect/TTNN/eltwise/binary/logical_or/simple_or.mlir @@ -2,12 +2,11 @@ module attributes {} { func.func @logical_or(%arg0: tensor<64x128xf32>, %arg1: tensor<64x128xf32>) -> tensor<64x128xf32> { %0 = tensor.empty() : tensor<64x128xf32> - // CHECK: {{.*}} = "ttnn.empty"{{.*}} %1 = "ttir.logical_or"(%arg0, %arg1, %0) <{operandSegmentSizes = array}> : (tensor<64x128xf32>, tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xf32> - // CHECK: %[[C:.*]] = "ttnn.logical_or" - // CHECK-SAME: tensor<64x128xf32, - // CHECK-SAME: tensor<64x128xf32, - // CHECK-SAME: tensor<64x128xf32, + // CHECK: "ttnn.logical_or" + // CHECK-SAME: tensor<64x128xf32 + // CHECK-SAME: tensor<64x128xf32 + // CHECK-SAME: -> tensor<64x128xf32 return %1 : tensor<64x128xf32> } } diff --git a/test/ttmlir/Dialect/TTNN/eltwise/binary/logical_xor/simple_xor.mlir b/test/ttmlir/Dialect/TTNN/eltwise/binary/logical_xor/simple_xor.mlir index 302c76d3ff..a7996033fb 100644 --- a/test/ttmlir/Dialect/TTNN/eltwise/binary/logical_xor/simple_xor.mlir +++ b/test/ttmlir/Dialect/TTNN/eltwise/binary/logical_xor/simple_xor.mlir @@ -2,14 +2,12 @@ module attributes {} { func.func @logical_xor(%arg0: tensor<64x128xbf16>, %arg1: tensor<64x128xbf16>) -> tensor<64x128xbf16> { - // CHECK: %{{[0-9]+}} = "ttnn.empty"{{.*}} [[TENSOR:tensor<64x128xbf16]] %0 = tensor.empty() : tensor<64x128xbf16> - // CHECK: %{{[0-9]+}} = "ttnn.logical_xor" - // CHECK-SAME: [[TENSOR]] - // CHECK-SAME: [[TENSOR]] - // CHECK-SAME: [[TENSOR]] - // CHECK-SAME: -> [[TENSOR]] %1 = "ttir.logical_xor"(%arg0, %arg1, %0) <{operandSegmentSizes = array}> : (tensor<64x128xbf16>, tensor<64x128xbf16>, tensor<64x128xbf16>) -> tensor<64x128xbf16> + // CHECK: "ttnn.logical_xor" + // CHECK-SAME: tensor<64x128xbf16 + // CHECK-SAME: tensor<64x128xbf16 + // CHECK-SAME: -> tensor<64x128xbf16 return %1 : tensor<64x128xbf16> } } diff --git a/test/ttmlir/Dialect/TTNN/simple_maximum.mlir b/test/ttmlir/Dialect/TTNN/eltwise/binary/maximum/simple_maximum.mlir similarity index 75% rename from test/ttmlir/Dialect/TTNN/simple_maximum.mlir rename to test/ttmlir/Dialect/TTNN/eltwise/binary/maximum/simple_maximum.mlir index cd87754fa0..31e8efb592 100644 --- a/test/ttmlir/Dialect/TTNN/simple_maximum.mlir +++ b/test/ttmlir/Dialect/TTNN/eltwise/binary/maximum/simple_maximum.mlir @@ -1,10 +1,12 @@ // RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline %s | FileCheck %s module attributes {} { func.func @forward(%arg0: tensor<64x128xf32>, %arg1: tensor<64x128xf32>) -> tensor<64x128xf32> { - // CHECK: %[[C:.*]] = "ttnn.empty"[[C:.*]] %0 = tensor.empty() : tensor<64x128xf32> - // CHECK: %[[C:.*]] = "ttnn.maximum"[[C:.*]] %1 = "ttir.maximum"(%arg0, %arg1, %0) <{operandSegmentSizes = array}> : (tensor<64x128xf32>, tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xf32> + // CHECK: "ttnn.maximum" + // CHECK-SAME: tensor<64x128xf32 + // CHECK-SAME: tensor<64x128xf32 + // CHECK-SAME: -> tensor<64x128xf32 return %1 : tensor<64x128xf32> } } diff --git a/test/ttmlir/Dialect/TTNN/eltwise/binary/minimum/simple_minimum.mlir b/test/ttmlir/Dialect/TTNN/eltwise/binary/minimum/simple_minimum.mlir index 7b3576cb7f..a5297a587b 100644 --- a/test/ttmlir/Dialect/TTNN/eltwise/binary/minimum/simple_minimum.mlir +++ b/test/ttmlir/Dialect/TTNN/eltwise/binary/minimum/simple_minimum.mlir @@ -1,10 +1,12 @@ // RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline %s | FileCheck %s module attributes {} { func.func @forward(%arg0: tensor<64x128xf32>, %arg1: tensor<64x128xf32>) -> tensor<64x128xf32> { - // CHECK: %[[C:.*]] = "ttnn.empty"[[C:.*]] %0 = tensor.empty() : tensor<64x128xf32> - // CHECK: %[[C:.*]] = "ttnn.minimum"[[C:.*]] %1 = "ttir.minimum"(%arg0, %arg1, %0) <{operandSegmentSizes = array}> : (tensor<64x128xf32>, tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xf32> + // CHECK: "ttnn.minimum" + // CHECK-SAME: tensor<64x128xf32 + // CHECK-SAME: tensor<64x128xf32 + // CHECK-SAME: -> tensor<64x128xf32 return %1 : tensor<64x128xf32> } } diff --git a/test/ttmlir/Dialect/TTNN/simple_multiply.mlir b/test/ttmlir/Dialect/TTNN/eltwise/binary/multiply/simple_multiply.mlir similarity index 75% rename from test/ttmlir/Dialect/TTNN/simple_multiply.mlir rename to test/ttmlir/Dialect/TTNN/eltwise/binary/multiply/simple_multiply.mlir index 795f65efe0..fc95f01e3a 100644 --- a/test/ttmlir/Dialect/TTNN/simple_multiply.mlir +++ b/test/ttmlir/Dialect/TTNN/eltwise/binary/multiply/simple_multiply.mlir @@ -1,10 +1,12 @@ // RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline %s | FileCheck %s module attributes {} { func.func @forward(%arg0: tensor<64x128xf32>, %arg1: tensor<64x128xf32>) -> tensor<64x128xf32> { - // CHECK: %[[C:.*]] = "ttnn.empty"[[C:.*]] %0 = tensor.empty() : tensor<64x128xf32> - // CHECK: %[[C:.*]] = "ttnn.multiply"[[C:.*]] %1 = "ttir.multiply"(%arg0, %arg1, %0) <{operandSegmentSizes = array}> : (tensor<64x128xf32>, tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xf32> + // CHECK: "ttnn.multiply" + // CHECK-SAME: tensor<64x128xf32 + // CHECK-SAME: tensor<64x128xf32 + // CHECK-SAME: -> tensor<64x128xf32 return %1 : tensor<64x128xf32> } } diff --git a/test/ttmlir/Dialect/TTNN/eltwise/binary/power/simple_power.mlir b/test/ttmlir/Dialect/TTNN/eltwise/binary/power/simple_power.mlir index 91eb4ae94f..11ca951638 100644 --- a/test/ttmlir/Dialect/TTNN/eltwise/binary/power/simple_power.mlir +++ b/test/ttmlir/Dialect/TTNN/eltwise/binary/power/simple_power.mlir @@ -2,10 +2,11 @@ module attributes {} { func.func @power(%arg0: tensor<64x128xf32>, %arg1: tensor<64x128xf32>) -> tensor<64x128xf32> { %0 = tensor.empty() : tensor<64x128xf32> - // CHECK: %[[EMPTY:.*]] = "ttnn.empty"{{.*}} -> tensor<64x128xf32, {{.*}} %1 = "ttir.power"(%arg0, %arg1, %0) <{operandSegmentSizes = array}> : (tensor<64x128xf32>, tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xf32> - // CHECK: %[[POW:[0-9]+]] = "ttnn.pow"({{.*}}, {{.*}}, %[[EMPTY]]){{.*}} -> tensor<64x128xf32, {{.*}} + // CHECK: "ttnn.pow" + // CHECK-SAME: tensor<64x128xf32 + // CHECK-SAME: tensor<64x128xf32 + // CHECK-SAME: -> tensor<64x128xf32 return %1 : tensor<64x128xf32> - // CHECK: return {{.*}} : tensor<64x128xf32, {{.*}} } } diff --git a/test/ttmlir/Dialect/TTNN/eltwise/binary/remainder/simple_remainder.mlir b/test/ttmlir/Dialect/TTNN/eltwise/binary/remainder/simple_remainder.mlir index 67d283c078..09d7d15bc0 100644 --- a/test/ttmlir/Dialect/TTNN/eltwise/binary/remainder/simple_remainder.mlir +++ b/test/ttmlir/Dialect/TTNN/eltwise/binary/remainder/simple_remainder.mlir @@ -2,10 +2,11 @@ module attributes {} { func.func @remainder(%arg0: tensor<32x32xf32>, %arg1: tensor<32x32xf32>) -> tensor<32x32xf32> { %0 = tensor.empty() : tensor<32x32xf32> - // CHECK: %[[EMPTY:.*]] = "ttnn.empty"{{.*}} -> tensor<32x32xf32, {{.*}} %1 = "ttir.remainder"(%arg0, %arg1, %0) <{operandSegmentSizes = array}> : (tensor<32x32xf32>, tensor<32x32xf32>, tensor<32x32xf32>) -> tensor<32x32xf32> - // CHECK: %[[REM:[0-9]+]] = "ttnn.remainder"({{.*}}, {{.*}}, %[[EMPTY]]){{.*}} -> tensor<32x32xf32, {{.*}} + // CHECK: "ttnn.remainder" + // CHECK-SAME: tensor<32x32xf32 + // CHECK-SAME: tensor<32x32xf32 + // CHECK-SAME: -> tensor<32x32xf32 return %1 : tensor<32x32xf32> - // CHECK: return {{.*}} : tensor<32x32xf32, {{.*}} } } diff --git a/test/ttmlir/Dialect/TTNN/simple_subtract.mlir b/test/ttmlir/Dialect/TTNN/eltwise/binary/subtract/simple_subtract.mlir similarity index 75% rename from test/ttmlir/Dialect/TTNN/simple_subtract.mlir rename to test/ttmlir/Dialect/TTNN/eltwise/binary/subtract/simple_subtract.mlir index f4c69ea401..b46b59e76c 100644 --- a/test/ttmlir/Dialect/TTNN/simple_subtract.mlir +++ b/test/ttmlir/Dialect/TTNN/eltwise/binary/subtract/simple_subtract.mlir @@ -1,10 +1,12 @@ // RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline %s | FileCheck %s module attributes {} { func.func @forward(%arg0: tensor<64x128xf32>, %arg1: tensor<64x128xf32>) -> tensor<64x128xf32> { - // CHECK: %[[C:.*]] = "ttnn.empty"[[C:.*]] %0 = tensor.empty() : tensor<64x128xf32> - // CHECK: %[[C:.*]] = "ttnn.subtract"[[C:.*]] %1 = "ttir.subtract"(%arg0, %arg1, %0) <{operandSegmentSizes = array}> : (tensor<64x128xf32>, tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xf32> + // CHECK: "ttnn.subtract" + // CHECK-SAME: tensor<64x128xf32 + // CHECK-SAME: tensor<64x128xf32 + // CHECK-SAME: -> tensor<64x128xf32 return %1 : tensor<64x128xf32> } } diff --git a/test/ttmlir/Dialect/TTNN/eltwise/operand_broadcasts.mlir b/test/ttmlir/Dialect/TTNN/eltwise/operand_broadcasts.mlir index 9b5df3852d..ee5ecf7b4d 100644 --- a/test/ttmlir/Dialect/TTNN/eltwise/operand_broadcasts.mlir +++ b/test/ttmlir/Dialect/TTNN/eltwise/operand_broadcasts.mlir @@ -1,17 +1,15 @@ // RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline %s | FileCheck %s module attributes {} { func.func @bcast_one_dim(%arg0: tensor<2x64x128xf32>, %arg1: tensor<64x128xf32>) -> tensor<2x64x128xf32> { - // CHECK: %[[C:.*]] = "ttnn.empty"[[C:.*]] %0 = tensor.empty() : tensor<2x64x128xf32> - // CHECK: %[[C:.*]] = "ttnn.multiply"[[C:.*]] + // CHECK: "ttnn.multiply" %1 = "ttir.multiply"(%arg0, %arg1, %0) <{operandSegmentSizes = array}> : (tensor<2x64x128xf32>, tensor<64x128xf32>, tensor<2x64x128xf32>) -> tensor<2x64x128xf32> return %1 : tensor<2x64x128xf32> } func.func @bcast_multi_dim(%arg0: tensor<17x16x15x14xf32>, %arg1: tensor<15x1xf32>) -> tensor<17x16x15x14xf32> { - // CHECK: %[[C:.*]] = "ttnn.empty"[[C:.*]] %0 = tensor.empty() : tensor<17x16x15x14xf32> - // CHECK: %[[C:.*]] = "ttnn.multiply"[[C:.*]] + // CHECK: "ttnn.multiply" %1 = "ttir.multiply"(%arg0, %arg1, %0) <{operandSegmentSizes = array}> : (tensor<17x16x15x14xf32>, tensor<15x1xf32>, tensor<17x16x15x14xf32>) -> tensor<17x16x15x14xf32> return %1 : tensor<17x16x15x14xf32> } diff --git a/test/ttmlir/Dialect/TTNN/simple_where.mlir b/test/ttmlir/Dialect/TTNN/eltwise/ternary/where/simple_where.mlir similarity index 68% rename from test/ttmlir/Dialect/TTNN/simple_where.mlir rename to test/ttmlir/Dialect/TTNN/eltwise/ternary/where/simple_where.mlir index d8d469943c..42b0eb937b 100644 --- a/test/ttmlir/Dialect/TTNN/simple_where.mlir +++ b/test/ttmlir/Dialect/TTNN/eltwise/ternary/where/simple_where.mlir @@ -5,9 +5,15 @@ module @jit_eltwise_where { %1 = "ttir.eq"(%arg0, %arg1, %0) <{operandSegmentSizes = array}> : (tensor<13x37xf32>, tensor<13x37xf32>, tensor<13x37xf32>) -> tensor<13x37xf32> %2 = tensor.empty() : tensor<13x37xf32> %3 = "ttir.where"(%1, %arg0, %arg1, %2) <{operandSegmentSizes = array}> : (tensor<13x37xf32>, tensor<13x37xf32>, tensor<13x37xf32>, tensor<13x37xf32>) -> tensor<13x37xf32> - // CHECK: %[[EMPTY:.*]] = "ttnn.empty"{{.*}} - // CHECK: %[[VAL1:[0-9]+]] = "ttnn.eq"(%arg0, %arg1, %[[EMPTY]]) - // CHECK: %{{[0-9]+}} = "ttnn.where"(%[[VAL1]], %arg0, %arg1, %{{[0-9]+}}) + // CHECK: "ttnn.eq" + // CHECK-SAME: tensor<13x37xf32 + // CHECK-SAME: tensor<13x37xf32 + // CHECK-SAME: -> tensor<13x37xf32 + // CHECK: "ttnn.where" + // CHECK-SAME: tensor<13x37xf32 + // CHECK-SAME: tensor<13x37xf32 + // CHECK-SAME: tensor<13x37xf32 + // CHECK-SAME: -> tensor<13x37xf32 return %3 : tensor<13x37xf32> } } diff --git a/test/ttmlir/Dialect/TTNN/eltwise/unary/abs/simple_abs.mlir b/test/ttmlir/Dialect/TTNN/eltwise/unary/abs/simple_abs.mlir index eceb8d058b..10a00443b5 100644 --- a/test/ttmlir/Dialect/TTNN/eltwise/unary/abs/simple_abs.mlir +++ b/test/ttmlir/Dialect/TTNN/eltwise/unary/abs/simple_abs.mlir @@ -1,10 +1,11 @@ // RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline %s | FileCheck %s module attributes {} { func.func @forward(%arg0: tensor<64x128xf32>) -> tensor<64x128xf32> { - // CHECK: %[[C:.*]] = "ttnn.empty"[[C:.*]] %0 = tensor.empty() : tensor<64x128xf32> - // CHECK: %[[C:.*]] = "ttnn.abs"[[C:.*]] %1 = "ttir.abs"(%arg0, %0) <{operandSegmentSizes = array}> : (tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xf32> + // CHECK: "ttnn.abs" + // CHECK-SAME: tensor<64x128xf32 + // CHECK-SAME: -> tensor<64x128xf32 return %1 : tensor<64x128xf32> } } diff --git a/test/ttmlir/Dialect/TTNN/eltwise/unary/bitwise_not/simple_bitwise_not.mlir b/test/ttmlir/Dialect/TTNN/eltwise/unary/bitwise_not/simple_bitwise_not.mlir index 2621c422f7..ff5a116b83 100644 --- a/test/ttmlir/Dialect/TTNN/eltwise/unary/bitwise_not/simple_bitwise_not.mlir +++ b/test/ttmlir/Dialect/TTNN/eltwise/unary/bitwise_not/simple_bitwise_not.mlir @@ -3,10 +3,10 @@ module attributes {} { func.func @bitwise_not(%arg0: tensor<64x128xi32>) -> tensor<64x128xi32> { %0 = tensor.empty() : tensor<64x128xi32> - // CHECK: %[[EMPTY:.*]] = "ttnn.empty"{{.*}} -> tensor<64x128xui32, {{.*}} %1 = "ttir.bitwise_not"(%arg0, %0) <{operandSegmentSizes = array}> : (tensor<64x128xi32>, tensor<64x128xi32>) -> tensor<64x128xi32> - // CHECK: {{.*}} "ttnn.bitwise_not"({{.*}}, %[[EMPTY]]){{.*}} -> tensor<64x128xui32, {{.*}} + // CHECK: "ttnn.bitwise_not" + // CHECK-SAME: tensor<64x128xui32 + // CHECK-SAME: -> tensor<64x128xui32 return %1 : tensor<64x128xi32> - // CHECK: return {{.*}} tensor<64x128xui32, {{.*}} } } diff --git a/test/ttmlir/Dialect/TTNN/eltwise/unary/cbrt/simple_cbrt.mlir b/test/ttmlir/Dialect/TTNN/eltwise/unary/cbrt/simple_cbrt.mlir index bdb78fed83..5b560a5b84 100644 --- a/test/ttmlir/Dialect/TTNN/eltwise/unary/cbrt/simple_cbrt.mlir +++ b/test/ttmlir/Dialect/TTNN/eltwise/unary/cbrt/simple_cbrt.mlir @@ -1,10 +1,11 @@ // RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline %s | FileCheck %s module attributes {} { func.func @forward(%arg0: tensor<64x128xf32>) -> tensor<64x128xf32> { - // CHECK: %[[C:.*]] = "ttnn.empty"[[C:.*]] %0 = tensor.empty() : tensor<64x128xf32> - // CHECK: %[[C:.*]] = "ttnn.cbrt"[[C:.*]] %1 = "ttir.cbrt"(%arg0, %0) <{operandSegmentSizes = array}> : (tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xf32> + // CHECK: "ttnn.cbrt" + // CHECK-SAME: tensor<64x128xf32 + // CHECK-SAME: -> tensor<64x128xf32 return %1 : tensor<64x128xf32> } } diff --git a/test/ttmlir/Dialect/TTNN/eltwise/unary/ceil/simple_ceil.mlir b/test/ttmlir/Dialect/TTNN/eltwise/unary/ceil/simple_ceil.mlir index d0250d5cd8..46f4677d76 100644 --- a/test/ttmlir/Dialect/TTNN/eltwise/unary/ceil/simple_ceil.mlir +++ b/test/ttmlir/Dialect/TTNN/eltwise/unary/ceil/simple_ceil.mlir @@ -1,10 +1,11 @@ // RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline %s | FileCheck %s module attributes {} { func.func @forward(%arg0: tensor<64x128xf32>) -> tensor<64x128xf32> { - // CHECK: %[[C:.*]] = "ttnn.empty"[[C:.*]] %0 = tensor.empty() : tensor<64x128xf32> - // CHECK: %[[C:.*]] = "ttnn.ceil"[[C:.*]] %1 = "ttir.ceil"(%arg0, %0) <{operandSegmentSizes = array}> : (tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xf32> + // CHECK: "ttnn.ceil" + // CHECK-SAME: tensor<64x128xf32 + // CHECK-SAME: -> tensor<64x128xf32 return %1 : tensor<64x128xf32> } } diff --git a/test/ttmlir/Dialect/TTNN/eltwise/unary/cos/simple_cos.mlir b/test/ttmlir/Dialect/TTNN/eltwise/unary/cos/simple_cos.mlir index e990aa59c1..4d652d21aa 100644 --- a/test/ttmlir/Dialect/TTNN/eltwise/unary/cos/simple_cos.mlir +++ b/test/ttmlir/Dialect/TTNN/eltwise/unary/cos/simple_cos.mlir @@ -1,10 +1,11 @@ // RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline %s | FileCheck %s module attributes {} { func.func @forward(%arg0: tensor<64x128xf32>) -> tensor<64x128xf32> { - // CHECK: %[[C:.*]] = "ttnn.empty"[[C:.*]] %0 = tensor.empty() : tensor<64x128xf32> - // CHECK: %[[C:.*]] = "ttnn.cos"[[C:.*]] %1 = "ttir.cos"(%arg0, %0) <{operandSegmentSizes = array}> : (tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xf32> + // CHECK: "ttnn.cos" + // CHECK-SAME: tensor<64x128xf32 + // CHECK-SAME: -> tensor<64x128xf32 return %1 : tensor<64x128xf32> } } diff --git a/test/ttmlir/Dialect/TTNN/eltwise/unary/expm1/simple_expm1.mlir b/test/ttmlir/Dialect/TTNN/eltwise/unary/expm1/simple_expm1.mlir index a8228fe9c0..faaec5bc7f 100644 --- a/test/ttmlir/Dialect/TTNN/eltwise/unary/expm1/simple_expm1.mlir +++ b/test/ttmlir/Dialect/TTNN/eltwise/unary/expm1/simple_expm1.mlir @@ -2,10 +2,10 @@ module attributes {} { func.func @forward(%arg0: tensor<64x128xf32>) -> tensor<64x128xf32> { %0 = tensor.empty() : tensor<64x128xf32> - // CHECK: [[VAL0:%[0-9]+]] = "ttnn.empty"(%{{[0-9]+}}) <{dtype = {{.*}}, layout = {{.*}}, memory_config = {{.*}}, <{{.*}}>>, shape = #ttnn.shape<[[TENSOR_SHAPE:[0-9]+x[0-9]+]]>}> %1 = "ttir.expm1"(%arg0, %0) <{operandSegmentSizes = array}> : (tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xf32> - // CHECK: %{{[0-9]+}} = "ttnn.expm1"(%arg0, [[VAL0]]) <{operandSegmentSizes = array}> : (tensor<[[TENSOR_SHAPE]]x{{.*}}, {{.*}}>, tensor<[[TENSOR_SHAPE]]x{{.*}}, {{.*}}) -> tensor<[[TENSOR_SHAPE]]x{{.*}}, {{.*}}> + // CHECK:"ttnn.expm1" + // CHECK-SAME: tensor<64x128xf32 + // CHECK-SAME: -> tensor<64x128xf32 return %1 : tensor<64x128xf32> - // CHECK: return %{{[0-9]+}} : tensor<[[TENSOR_SHAPE]]xf32, {{.*}}> } } diff --git a/test/ttmlir/Dialect/TTNN/eltwise/unary/floor/simple_floor.mlir b/test/ttmlir/Dialect/TTNN/eltwise/unary/floor/simple_floor.mlir index fd418fbda5..842277ad4c 100644 --- a/test/ttmlir/Dialect/TTNN/eltwise/unary/floor/simple_floor.mlir +++ b/test/ttmlir/Dialect/TTNN/eltwise/unary/floor/simple_floor.mlir @@ -1,14 +1,11 @@ // RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline %s | FileCheck %s module attributes {} { func.func @floor(%arg0: tensor<64x128xf32>) -> tensor<64x128xf32> { - // CHECK: %{{[0-9]+}} = "ttnn.empty" - // CHECK-SAME: [[TENSOR:tensor<64x128xf32,]] %0 = tensor.empty() : tensor<64x128xf32> - // CHECK: %{{[0-9]+}} = "ttnn.floor" - // CHECK-SAME: [[TENSOR]] - // CHECK-SAME: [[TENSOR]] - // CHECK-SAME: -> [[TENSOR]] %1 = "ttir.floor"(%arg0, %0) <{operandSegmentSizes = array}> : (tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xf32> + // CHECK: "ttnn.floor" + // CHECK-SAME: tensor<64x128xf32 + // CHECK-SAME: -> tensor<64x128xf32 return %1 : tensor<64x128xf32> } } diff --git a/test/ttmlir/Dialect/TTNN/eltwise/unary/gelu/simple_gelu.mlir b/test/ttmlir/Dialect/TTNN/eltwise/unary/gelu/simple_gelu.mlir index 1cec49a356..5f429016e6 100644 --- a/test/ttmlir/Dialect/TTNN/eltwise/unary/gelu/simple_gelu.mlir +++ b/test/ttmlir/Dialect/TTNN/eltwise/unary/gelu/simple_gelu.mlir @@ -1,14 +1,11 @@ // RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline %s | FileCheck %s module attributes {} { func.func @forward(%arg0: tensor<64x128xf32>) -> tensor<64x128xf32> { - // CHECK: "ttnn.empty" - // CHECK-SAME: tensor<64x128xf32, %0 = tensor.empty() : tensor<64x128xf32> - // CHECK: "ttnn.gelu" - // CHECK-SAME: tensor<64x128xf32, - // CHECK-SAME: tensor<64x128xf32, - // CHECK-SAME: tensor<64x128xf32, %1 = "ttir.gelu"(%arg0, %0) <{operandSegmentSizes = array}> : (tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xf32> + // CHECK: "ttnn.gelu" + // CHECK-SAME: tensor<64x128xf32 + // CHECK-SAME: -> tensor<64x128xf32 return %1 : tensor<64x128xf32> } } diff --git a/test/ttmlir/Dialect/TTNN/eltwise/unary/isfinite/simple_isfinite.mlir b/test/ttmlir/Dialect/TTNN/eltwise/unary/isfinite/simple_isfinite.mlir index 7745adf067..4e93126e1e 100644 --- a/test/ttmlir/Dialect/TTNN/eltwise/unary/isfinite/simple_isfinite.mlir +++ b/test/ttmlir/Dialect/TTNN/eltwise/unary/isfinite/simple_isfinite.mlir @@ -1,14 +1,11 @@ // RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline %s | FileCheck %s module attributes {} { func.func @is_finite(%arg0: tensor<64x128xbf16>) -> tensor<64x128xbf16> { - // CHECK: %[[C:.*]] = "ttnn.empty" - // CHECK-SAME: [[TENSOR:tensor<64x128xbf16,]] %0 = tensor.empty() : tensor<64x128xbf16> - // CHECK: %[[C:.*]] = "ttnn.isfinite" - // CHECK-SAME: tensor<64x128xbf16, - // CHECK-SAME: [[TENSOR]] - // CHECK-SAME: -> [[TENSOR]] %1 = "ttir.isfinite"(%arg0, %0) <{operandSegmentSizes = array}> : (tensor<64x128xbf16>, tensor<64x128xbf16>) -> tensor<64x128xbf16> + // CHECK: "ttnn.isfinite" + // CHECK-SAME: tensor<64x128xbf16 + // CHECK-SAME: -> tensor<64x128xbf16 return %1 : tensor<64x128xbf16> } } diff --git a/test/ttmlir/Dialect/TTNN/eltwise/unary/leaky_relu/simple_leaky_relu.mlir b/test/ttmlir/Dialect/TTNN/eltwise/unary/leaky_relu/simple_leaky_relu.mlir index 93dbb03f0f..78555ed49e 100644 --- a/test/ttmlir/Dialect/TTNN/eltwise/unary/leaky_relu/simple_leaky_relu.mlir +++ b/test/ttmlir/Dialect/TTNN/eltwise/unary/leaky_relu/simple_leaky_relu.mlir @@ -1,14 +1,11 @@ // RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline %s | FileCheck %s module attributes {} { func.func @leaky_relu(%arg0: tensor<64x128xf32>) -> tensor<64x128xf32> { - // CHECK: %[[C:.*]] = "ttnn.empty" - // CHECK-SAME: [[TENSOR:tensor<64x128xf32,]] %0 = tensor.empty() : tensor<64x128xf32> - // CHECK: %[[C:.*]] = "ttnn.leaky_relu" - // CHECK-SAME: tensor<64x128xf32, - // CHECK-SAME: [[TENSOR]] - // CHECK-SAME: -> [[TENSOR]] %1 = "ttir.leaky_relu"(%arg0, %0) <{parameter = 0.01 : f32, operandSegmentSizes = array}> : (tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xf32> + // CHECK: "ttnn.leaky_relu" + // CHECK-SAME: (tensor<64x128xf32 + // CHECK-SAME: -> tensor<64x128xf32 return %1 : tensor<64x128xf32> } } diff --git a/test/ttmlir/Dialect/TTNN/eltwise/unary/log1p/simple_log1p.mlir b/test/ttmlir/Dialect/TTNN/eltwise/unary/log1p/simple_log1p.mlir index 7d6ca51f3a..25e72c1b4f 100644 --- a/test/ttmlir/Dialect/TTNN/eltwise/unary/log1p/simple_log1p.mlir +++ b/test/ttmlir/Dialect/TTNN/eltwise/unary/log1p/simple_log1p.mlir @@ -2,10 +2,10 @@ module attributes {} { func.func @forward(%arg0: tensor<64x128xf32>) -> tensor<64x128xf32> { %0 = tensor.empty() : tensor<64x128xf32> - // CHECK: [[VAL0:%[0-9]+]] = "ttnn.empty"(%{{[0-9]+}}) <{dtype = {{.*}}, layout = {{.*}}, memory_config = {{.*}}, <{{.*}}>>, shape = #ttnn.shape<[[TENSOR_SHAPE:[0-9]+x[0-9]+]]>}> %1 = "ttir.log1p"(%arg0, %0) <{operandSegmentSizes = array}> : (tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xf32> - // CHECK: %{{[0-9]+}} = "ttnn.log1p"(%arg0, [[VAL0]]) <{operandSegmentSizes = array}> : (tensor<[[TENSOR_SHAPE]]x{{.*}}, {{.*}}>, tensor<[[TENSOR_SHAPE]]x{{.*}}, {{.*}}) -> tensor<[[TENSOR_SHAPE]]x{{.*}}, {{.*}}> + // CHECK: "ttnn.log1p" + // CHECK-SAME: tensor<64x128xf32 + // CHECK-SAME: -> tensor<64x128xf32 return %1 : tensor<64x128xf32> - // CHECK: return %{{[0-9]+}} : tensor<[[TENSOR_SHAPE]]xf32, {{.*}}> } } diff --git a/test/ttmlir/Dialect/TTNN/eltwise/unary/logical_not/simple_not.mlir b/test/ttmlir/Dialect/TTNN/eltwise/unary/logical_not/simple_not.mlir index a80dffca88..d48ee5f659 100644 --- a/test/ttmlir/Dialect/TTNN/eltwise/unary/logical_not/simple_not.mlir +++ b/test/ttmlir/Dialect/TTNN/eltwise/unary/logical_not/simple_not.mlir @@ -2,11 +2,10 @@ module attributes {} { func.func @logical_not(%arg0: tensor<64x128xf32>) -> tensor<64x128xf32> { %0 = tensor.empty() : tensor<64x128xf32> - // CHECK: {{.*}} = "ttnn.empty"{{.*}} %1 = "ttir.logical_not"(%arg0, %0) <{operandSegmentSizes = array}> : (tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xf32> - // CHECK: %[[C:.*]] = "ttnn.logical_not" - // CHECK-SAME: tensor<64x128xf32, - // CHECK-SAME: tensor<64x128xf32, + // CHECK: "ttnn.logical_not" + // CHECK-SAME: tensor<64x128xf32 + // CHECK-SAME: -> tensor<64x128xf32 return %1 : tensor<64x128xf32> } } diff --git a/test/ttmlir/Dialect/TTNN/eltwise/unary/negate/simple_neg.mlir b/test/ttmlir/Dialect/TTNN/eltwise/unary/negate/simple_neg.mlir index aa63ee6e52..bafe10faaf 100644 --- a/test/ttmlir/Dialect/TTNN/eltwise/unary/negate/simple_neg.mlir +++ b/test/ttmlir/Dialect/TTNN/eltwise/unary/negate/simple_neg.mlir @@ -1,10 +1,11 @@ // RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline %s | FileCheck %s module attributes {} { func.func @forward(%arg0: tensor<64x128xf32>) -> tensor<64x128xf32> { - // CHECK: %[[C:.*]] = "ttnn.empty"[[C:.*]] %0 = tensor.empty() : tensor<64x128xf32> - // CHECK: %[[C:.*]] = "ttnn.neg"[[C:.*]] %1 = "ttir.neg"(%arg0, %0) <{operandSegmentSizes = array}> : (tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xf32> + // CHECK: "ttnn.neg" + // CHECK-SAME: tensor<64x128xf32 + // CHECK-SAME: -> tensor<64x128xf32 return %1 : tensor<64x128xf32> } } diff --git a/test/ttmlir/Dialect/TTNN/eltwise/unary/reciprocal/simple_reciprocal.mlir b/test/ttmlir/Dialect/TTNN/eltwise/unary/reciprocal/simple_reciprocal.mlir index fd98ade3ef..132963829f 100644 --- a/test/ttmlir/Dialect/TTNN/eltwise/unary/reciprocal/simple_reciprocal.mlir +++ b/test/ttmlir/Dialect/TTNN/eltwise/unary/reciprocal/simple_reciprocal.mlir @@ -1,10 +1,11 @@ // RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline %s | FileCheck %s module attributes {} { func.func @forward(%arg0: tensor<64x128xf32>) -> tensor<64x128xf32> { - // CHECK: %[[C:.*]] = "ttnn.empty"[[C:.*]] %0 = tensor.empty() : tensor<64x128xf32> - // CHECK: %[[C:.*]] = "ttnn.reciprocal"[[C:.*]] %1 = "ttir.reciprocal"(%arg0, %0) <{operandSegmentSizes = array}> : (tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xf32> + // CHECK: "ttnn.reciprocal" + // CHECK-SAME: (tensor<64x128xf32 + // CHECK-SAME: -> tensor<64x128xf32 return %1 : tensor<64x128xf32> } } diff --git a/test/ttmlir/Dialect/TTNN/eltwise/unary/relu/simple_relu.mlir b/test/ttmlir/Dialect/TTNN/eltwise/unary/relu/simple_relu.mlir index 7666df7a1c..72e4d3605b 100644 --- a/test/ttmlir/Dialect/TTNN/eltwise/unary/relu/simple_relu.mlir +++ b/test/ttmlir/Dialect/TTNN/eltwise/unary/relu/simple_relu.mlir @@ -1,10 +1,11 @@ // RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline %s | FileCheck %s module attributes {} { func.func @forward(%arg0: tensor<64x128xf32>) -> tensor<64x128xf32> { - // CHECK: "ttnn.empty" %0 = tensor.empty() : tensor<64x128xf32> - // CHECK: "ttnn.relu" %1 = "ttir.relu"(%arg0, %0) <{operandSegmentSizes = array}> : (tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xf32> + // CHECK: "ttnn.relu" + // CHECK-SAME: (tensor<64x128xf32 + // CHECK-SAME: -> tensor<64x128xf32 return %1 : tensor<64x128xf32> } } diff --git a/test/ttmlir/Dialect/TTNN/eltwise/unary/rsqrt/simple_rsqrt.mlir b/test/ttmlir/Dialect/TTNN/eltwise/unary/rsqrt/simple_rsqrt.mlir index b7a339d229..89a2b0e6d5 100644 --- a/test/ttmlir/Dialect/TTNN/eltwise/unary/rsqrt/simple_rsqrt.mlir +++ b/test/ttmlir/Dialect/TTNN/eltwise/unary/rsqrt/simple_rsqrt.mlir @@ -1,10 +1,11 @@ // RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline %s | FileCheck %s module attributes {} { func.func @forward(%arg0: tensor<64x128xf32>) -> tensor<64x128xf32> { - // CHECK: %[[C:.*]] = "ttnn.empty"[[C:.*]] %0 = tensor.empty() : tensor<64x128xf32> - // CHECK: %[[C:.*]] = "ttnn.rsqrt"[[C:.*]] %1 = "ttir.rsqrt"(%arg0, %0) <{operandSegmentSizes = array}> : (tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xf32> + // CHECK: "ttnn.rsqrt" + // CHECK-SAME: tensor<64x128xf32 + // CHECK-SAME: -> tensor<64x128xf32 return %1 : tensor<64x128xf32> } } diff --git a/test/ttmlir/Dialect/TTNN/eltwise/unary/sigmoid/simple_sigmoid.mlir b/test/ttmlir/Dialect/TTNN/eltwise/unary/sigmoid/simple_sigmoid.mlir index d3762db91b..a7520dd2e1 100644 --- a/test/ttmlir/Dialect/TTNN/eltwise/unary/sigmoid/simple_sigmoid.mlir +++ b/test/ttmlir/Dialect/TTNN/eltwise/unary/sigmoid/simple_sigmoid.mlir @@ -1,10 +1,11 @@ // RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline %s | FileCheck %s module attributes {} { func.func @forward(%arg0: tensor<64x128xf32>) -> tensor<64x128xf32> { - // CHECK: %[[C:.*]] = "ttnn.empty"[[C:.*]] %0 = tensor.empty() : tensor<64x128xf32> - // CHECK: %[[C:.*]] = "ttnn.sigmoid"[[C:.*]] %1 = "ttir.sigmoid"(%arg0, %0) <{operandSegmentSizes = array}> : (tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xf32> + // CHECK: "ttnn.sigmoid" + // CHECK-SAME: tensor<64x128xf32 + // CHECK-SAME: -> tensor<64x128xf32 return %1 : tensor<64x128xf32> } } diff --git a/test/ttmlir/Dialect/TTNN/eltwise/unary/sign/simple_sign.mlir b/test/ttmlir/Dialect/TTNN/eltwise/unary/sign/simple_sign.mlir index ccc3b82a84..1209bbf22e 100644 --- a/test/ttmlir/Dialect/TTNN/eltwise/unary/sign/simple_sign.mlir +++ b/test/ttmlir/Dialect/TTNN/eltwise/unary/sign/simple_sign.mlir @@ -2,10 +2,10 @@ module attributes {} { func.func @forward(%arg0: tensor<64x128xf32>) -> tensor<64x128xf32> { %0 = tensor.empty() : tensor<64x128xf32> - // CHECK: [[VAL0:%[0-9]+]] = "ttnn.empty"(%{{[0-9]+}}) <{dtype = {{.*}}, layout = {{.*}}, memory_config = {{.*}}, <{{.*}}>>, shape = #ttnn.shape<[[TENSOR_SHAPE:[0-9]+x[0-9]+]]>}> %1 = "ttir.sign"(%arg0, %0) <{operandSegmentSizes = array}> : (tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xf32> - // CHECK: %{{[0-9]+}} = "ttnn.sign"(%arg0, [[VAL0]]) <{operandSegmentSizes = array}> : (tensor<[[TENSOR_SHAPE]]x{{.*}}, {{.*}}>, tensor<[[TENSOR_SHAPE]]x{{.*}}, {{.*}}) -> tensor<[[TENSOR_SHAPE]]x{{.*}}, {{.*}}> + // CHECK: "ttnn.sign" + // CHECK-SAME: tensor<64x128xf32 + // CHECK-SAME: -> tensor<64x128xf32 return %1 : tensor<64x128xf32> - // CHECK: return %{{[0-9]+}} : tensor<[[TENSOR_SHAPE]]xf32, {{.*}}> } } diff --git a/test/ttmlir/Dialect/TTNN/eltwise/unary/sin/simple_sin.mlir b/test/ttmlir/Dialect/TTNN/eltwise/unary/sin/simple_sin.mlir index a1ebaa368b..a65f328676 100644 --- a/test/ttmlir/Dialect/TTNN/eltwise/unary/sin/simple_sin.mlir +++ b/test/ttmlir/Dialect/TTNN/eltwise/unary/sin/simple_sin.mlir @@ -1,10 +1,11 @@ // RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline %s | FileCheck %s module attributes {} { func.func @forward(%arg0: tensor<64x128xf32>) -> tensor<64x128xf32> { - // CHECK: %[[C:.*]] = "ttnn.empty"[[C:.*]] %0 = tensor.empty() : tensor<64x128xf32> - // CHECK: %[[C:.*]] = "ttnn.sin"[[C:.*]] %1 = "ttir.sin"(%arg0, %0) <{operandSegmentSizes = array}> : (tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xf32> + // CHECK: "ttnn.sin" + // CHECK-SAME: tensor<64x128xf32 + // CHECK-SAME: -> tensor<64x128xf32 return %1 : tensor<64x128xf32> } } diff --git a/test/ttmlir/Dialect/TTNN/eltwise/unary/sqrt/simple_sqrt.mlir b/test/ttmlir/Dialect/TTNN/eltwise/unary/sqrt/simple_sqrt.mlir index bd468bd8ee..bd5bcad78d 100644 --- a/test/ttmlir/Dialect/TTNN/eltwise/unary/sqrt/simple_sqrt.mlir +++ b/test/ttmlir/Dialect/TTNN/eltwise/unary/sqrt/simple_sqrt.mlir @@ -1,10 +1,11 @@ // RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline %s | FileCheck %s module attributes {} { func.func @forward(%arg0: tensor<64x128xf32>) -> tensor<64x128xf32> { - // CHECK: %[[C:.*]] = "ttnn.empty"[[C:.*]] %0 = tensor.empty() : tensor<64x128xf32> - // CHECK: %[[C:.*]] = "ttnn.sqrt"[[C:.*]] %1 = "ttir.sqrt"(%arg0, %0) <{operandSegmentSizes = array}> : (tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xf32> + // CHECK: "ttnn.sqrt" + // CHECK-SAME: tensor<64x128xf32 + // CHECK-SAME: -> tensor<64x128xf32 return %1 : tensor<64x128xf32> } } diff --git a/test/ttmlir/Dialect/TTNN/eltwise/unary/tan/simple_tan.mlir b/test/ttmlir/Dialect/TTNN/eltwise/unary/tan/simple_tan.mlir index 987d459aba..29279b22f2 100644 --- a/test/ttmlir/Dialect/TTNN/eltwise/unary/tan/simple_tan.mlir +++ b/test/ttmlir/Dialect/TTNN/eltwise/unary/tan/simple_tan.mlir @@ -2,10 +2,10 @@ module attributes {} { func.func @forward(%arg0: tensor<64x128xf32>) -> tensor<64x128xf32> { %0 = tensor.empty() : tensor<64x128xf32> - // CHECK: [[VAL0:%[0-9]+]] = "ttnn.empty"(%{{[0-9]+}}) <{dtype = {{.*}}, layout = {{.*}}, memory_config = {{.*}}, <{{.*}}>>, shape = #ttnn.shape<[[TENSOR_SHAPE:[0-9]+x[0-9]+]]>}> %1 = "ttir.tan"(%arg0, %0) <{operandSegmentSizes = array}> : (tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xf32> - // CHECK: %{{[0-9]+}} = "ttnn.tan"(%arg0, [[VAL0]]) <{operandSegmentSizes = array}> : (tensor<[[TENSOR_SHAPE]]x{{.*}}, {{.*}}>, tensor<[[TENSOR_SHAPE]]x{{.*}}, {{.*}}) -> tensor<[[TENSOR_SHAPE]]x{{.*}}, {{.*}}> + // CHECK: "ttnn.tan" + // CHECK-SAME: tensor<64x128xf32 + // CHECK-SAME: -> tensor<64x128xf32 return %1 : tensor<64x128xf32> - // CHECK: return %{{[0-9]+}} : tensor<[[TENSOR_SHAPE]]xf32, {{.*}}> } } diff --git a/test/ttmlir/Dialect/TTNN/eltwise/unary/tanh/simple_tanh.mlir b/test/ttmlir/Dialect/TTNN/eltwise/unary/tanh/simple_tanh.mlir index 62618ae829..720be63225 100644 --- a/test/ttmlir/Dialect/TTNN/eltwise/unary/tanh/simple_tanh.mlir +++ b/test/ttmlir/Dialect/TTNN/eltwise/unary/tanh/simple_tanh.mlir @@ -2,10 +2,10 @@ module attributes {} { func.func @forward(%arg0: tensor<64x128xf32>) -> tensor<64x128xf32> { %0 = tensor.empty() : tensor<64x128xf32> - // CHECK: [[VAL0:%[0-9]+]] = "ttnn.empty"(%{{[0-9]+}}) <{dtype = {{.*}}, layout = {{.*}}, memory_config = {{.*}}, <{{.*}}>>, shape = #ttnn.shape<[[TENSOR_SHAPE:[0-9]+x[0-9]+]]>}> %1 = "ttir.tanh"(%arg0, %0) <{operandSegmentSizes = array}> : (tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xf32> - // CHECK: %{{[0-9]+}} = "ttnn.tanh"(%arg0, [[VAL0]]) <{operandSegmentSizes = array}> : (tensor<[[TENSOR_SHAPE]]x{{.*}}, {{.*}}>, tensor<[[TENSOR_SHAPE]]x{{.*}}, {{.*}}) -> tensor<[[TENSOR_SHAPE]]x{{.*}}, {{.*}}> + // CHECK: "ttnn.tanh" + // CHECK-SAME: tensor<64x128xf32 + // CHECK-SAME: -> tensor<64x128xf32 return %1 : tensor<64x128xf32> - // CHECK: return %{{[0-9]+}} : tensor<[[TENSOR_SHAPE]]xf32, {{.*}}> } } diff --git a/test/ttmlir/Dialect/TTNN/implicit_broadcast.mlir b/test/ttmlir/Dialect/TTNN/implicit_broadcast.mlir index 5be11a6484..1fcabc2b73 100644 --- a/test/ttmlir/Dialect/TTNN/implicit_broadcast.mlir +++ b/test/ttmlir/Dialect/TTNN/implicit_broadcast.mlir @@ -37,8 +37,8 @@ module { func.func @main(%arg0: tensor<1x16x32xf32>, %arg1: tensor<1x1x32xf32>) -> tensor<1x16x32xf32> { // CHECK: [[VAL0:%[0-9]+]] = "ttnn.repeat" // CHECK-SAME: repeat_dims = #ttnn.shape<1x16x1> - // CHECK: %{{[0-9]+}} = "ttnn.multiply"(%arg0, %{{[0-9]+}}, %{{[0-9]+}}) - // CHECK: %{{[0-9]+}} = "ttnn.bitwise_and"([[VAL0]], %{{[0-9]+}}, %{{[0-9]+}}) + // CHECK: %{{[0-9]+}} = "ttnn.multiply"(%arg0, %{{[0-9]+}}) + // CHECK: %{{[0-9]+}} = "ttnn.bitwise_and"([[VAL0]], %{{[0-9]+}}) %0 = tensor.empty() : tensor<1x16x32xf32> %1 = "ttir.broadcast"(%arg1, %0) <{broadcast_dimensions = array}> : (tensor<1x1x32xf32>, tensor<1x16x32xf32>) -> tensor<1x16x32xf32> %2 = tensor.empty() : tensor<1x16x32xf32> @@ -53,7 +53,7 @@ module { func.func @main(%arg0: tensor<1x16x32xf32>, %arg1: tensor<1x1x32xf32>) -> tensor<1x16x32xf32> { // CHECK-NOT: ttnn.repeat // CHECK: [[VAL0:%[0-9]+]] = "ttnn.add" - // CHECK: %{{[0-9]+}} = "ttnn.add"(%arg1, [[VAL0]], %{{[0-9]+}}) + // CHECK: %{{[0-9]+}} = "ttnn.add"(%arg1, [[VAL0]]) %0 = tensor.empty() : tensor<1x16x32xf32> %1 = "ttir.broadcast"(%arg1, %0) <{broadcast_dimensions = array}> : (tensor<1x1x32xf32>, tensor<1x16x32xf32>) -> tensor<1x16x32xf32> %2 = tensor.empty() : tensor<1x16x32xf32> diff --git a/test/ttmlir/Dialect/TTNN/multiple_func.mlir b/test/ttmlir/Dialect/TTNN/multiple_func.mlir index 3961fac038..006827b138 100644 --- a/test/ttmlir/Dialect/TTNN/multiple_func.mlir +++ b/test/ttmlir/Dialect/TTNN/multiple_func.mlir @@ -1,10 +1,12 @@ // RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline %s | FileCheck %s module attributes {} { func.func @main(%arg0: tensor<64x128xf32>, %arg1: tensor<64x128xf32>) -> tensor<64x128xf32> { - // CHECK: %[[C:.*]] = "ttnn.empty"[[C:.*]] %0 = tensor.empty() : tensor<64x128xf32> - // CHECK: %[[C:.*]] = "ttnn.multiply"[[C:.*]] %1 = call @do_mult(%arg0, %arg1, %0) : (tensor<64x128xf32>, tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xf32> + // CHECK: "ttnn.multiply" + // CHECK-SAME: tensor<64x128xf32 + // CHECK-SAME: tensor<64x128xf32 + // CHECK-SAME: -> tensor<64x128xf32 return %1 : tensor<64x128xf32> } diff --git a/test/ttmlir/Dialect/TTNN/optimizer/test_override_reshard_edges.mlir b/test/ttmlir/Dialect/TTNN/optimizer/test_override_reshard_edges.mlir index 4a375fa6e3..e5e41bef16 100644 --- a/test/ttmlir/Dialect/TTNN/optimizer/test_override_reshard_edges.mlir +++ b/test/ttmlir/Dialect/TTNN/optimizer/test_override_reshard_edges.mlir @@ -12,17 +12,14 @@ module attributes {tt.device = #device} { %0 = "ttnn.get_device"() <{mesh_shape = #ttnn}> : () -> !tt.device<#device> %1 = "ttnn.to_layout"(%arg0, %0) <{dtype = #tt.supportedDataTypes, layout = #ttnn.layout, memory_config = #ttnn.memory_config<, <<32x32>>, >}> : (tensor<1x32x32xf32, #ttnn_layout>, !tt.device<#device>) -> tensor<1x32x32xf32, #ttnn_layout1> %2 = "ttnn.to_layout"(%arg1, %0) <{dtype = #tt.supportedDataTypes, layout = #ttnn.layout, memory_config = #ttnn.memory_config<, <<32x32>>, >}> : (tensor<1x32x32xf32, #ttnn_layout>, !tt.device<#device>) -> tensor<1x32x32xf32, #ttnn_layout1> - %3 = "ttnn.empty"(%0) <{dtype = #tt.supportedDataTypes, layout = #ttnn.layout, memory_config = #ttnn.memory_config<, <<32x32>>, >, shape = #ttnn.shape<1x32x32>}> : (!tt.device<#device>) -> tensor<1x32x32xf32, #ttnn_layout1> loc(#loc1) // CHECK: %[[C:.*]] = "ttnn.add"{{.*}} -> tensor<1x32x32xf32, #[[LAYOUT_1]]> - %4 = "ttnn.add"(%1, %2, %3) <{operandSegmentSizes = array}> : (tensor<1x32x32xf32, #ttnn_layout1>, tensor<1x32x32xf32, #ttnn_layout1>, tensor<1x32x32xf32, #ttnn_layout1>) -> tensor<1x32x32xf32, #ttnn_layout1> loc(#loc1) - %5 = "ttnn.to_layout"(%arg0, %0) <{dtype = #tt.supportedDataTypes, layout = #ttnn.layout, memory_config = #ttnn.memory_config<, <<32x32>>, >}> : (tensor<1x32x32xf32, #ttnn_layout>, !tt.device<#device>) -> tensor<1x32x32xf32, #ttnn_layout1> - %6 = "ttnn.empty"(%0) <{dtype = #tt.supportedDataTypes, layout = #ttnn.layout, memory_config = #ttnn.memory_config<, <<32x32>>, >, shape = #ttnn.shape<1x32x32>}> : (!tt.device<#device>) -> tensor<1x32x32xf32, #ttnn_layout1> loc(#loc2) + %3 = "ttnn.add"(%1, %2) : (tensor<1x32x32xf32, #ttnn_layout1>, tensor<1x32x32xf32, #ttnn_layout1>) -> tensor<1x32x32xf32, #ttnn_layout1> loc(#loc1) + %4 = "ttnn.to_layout"(%arg0, %0) <{dtype = #tt.supportedDataTypes, layout = #ttnn.layout, memory_config = #ttnn.memory_config<, <<32x32>>, >}> : (tensor<1x32x32xf32, #ttnn_layout>, !tt.device<#device>) -> tensor<1x32x32xf32, #ttnn_layout1> // CHECK: %{{.*}} = "ttnn.to_layout"(%[[C]], %0) {{.*}} -> tensor<1x32x32xf32, #[[LAYOUT_2]]> - %7 = "ttnn.add"(%4, %6, %6) <{operandSegmentSizes = array}> : (tensor<1x32x32xf32, #ttnn_layout1>, tensor<1x32x32xf32, #ttnn_layout1>, tensor<1x32x32xf32, #ttnn_layout1>) -> tensor<1x32x32xf32, #ttnn_layout1> loc(#loc2) - %8 = "ttnn.empty"(%0) <{dtype = #tt.supportedDataTypes, layout = #ttnn.layout, memory_config = #ttnn.memory_config<, <<32x32>>, >, shape = #ttnn.shape<1x32x32>}> : (!tt.device<#device>) -> tensor<1x32x32xf32, #ttnn_layout1> loc(#loc3) - %9 = "ttnn.relu"(%7, %8) <{operandSegmentSizes = array}> : (tensor<1x32x32xf32, #ttnn_layout1>, tensor<1x32x32xf32, #ttnn_layout1>) -> tensor<1x32x32xf32, #ttnn_layout1> loc(#loc3) - %10 = "ttnn.to_layout"(%9) <{dtype = #tt.supportedDataTypes, layout = #ttnn.layout, memory_config = #ttnn.memory_config<, <<32x32>>>}> : (tensor<1x32x32xf32, #ttnn_layout1>) -> tensor<1x32x32xf32, #ttnn_layout> - return %10 : tensor<1x32x32xf32, #ttnn_layout> + %5 = "ttnn.add"(%3, %3) : (tensor<1x32x32xf32, #ttnn_layout1>, tensor<1x32x32xf32, #ttnn_layout1>) -> tensor<1x32x32xf32, #ttnn_layout1> loc(#loc2) + %6 = "ttnn.relu"(%5) : (tensor<1x32x32xf32, #ttnn_layout1>) -> tensor<1x32x32xf32, #ttnn_layout1> loc(#loc3) + %7 = "ttnn.to_layout"(%6) <{dtype = #tt.supportedDataTypes, layout = #ttnn.layout, memory_config = #ttnn.memory_config<, <<32x32>>>}> : (tensor<1x32x32xf32, #ttnn_layout1>) -> tensor<1x32x32xf32, #ttnn_layout> + return %7 : tensor<1x32x32xf32, #ttnn_layout> } } #loc1 = loc("add_1_2") diff --git a/test/ttmlir/Dialect/TTNN/optimizer/ttir_to_ttnn_pipeline.mlir b/test/ttmlir/Dialect/TTNN/optimizer/ttir_to_ttnn_pipeline.mlir index 5d924a9195..03be014790 100644 --- a/test/ttmlir/Dialect/TTNN/optimizer/ttir_to_ttnn_pipeline.mlir +++ b/test/ttmlir/Dialect/TTNN/optimizer/ttir_to_ttnn_pipeline.mlir @@ -1,10 +1,9 @@ // RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline="enable-optimizer=true" %s | FileCheck %s module attributes {} { func.func @forward(%arg0: tensor<64x128xf32>, %arg1: tensor<64x128xf32>) -> tensor<64x128xf32> { - // CHECK: %{{.*}} = "ttnn.empty"{{.*}} %0 = tensor.empty() : tensor<64x128xf32> - // CHECK: %{{.*}} = "ttnn.multiply"{{.*}} %1 = "ttir.multiply"(%arg0, %arg1, %0) <{operandSegmentSizes = array}> : (tensor<64x128xf32>, tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xf32> + // CHECK: "ttnn.multiply" return %1 : tensor<64x128xf32> } } diff --git a/test/ttmlir/Dialect/TTNN/simple_scatter.mlir b/test/ttmlir/Dialect/TTNN/simple_scatter.mlir index 43c87b89a2..e72e41aa33 100644 --- a/test/ttmlir/Dialect/TTNN/simple_scatter.mlir +++ b/test/ttmlir/Dialect/TTNN/simple_scatter.mlir @@ -3,13 +3,12 @@ module attributes {} { func.func @forward(%arg0: tensor<1x3x320x320xf32>, %arg1: tensor<1x3x32x32xf32>) -> tensor<1x3x320x320xf32> { %0 = tensor.empty() : tensor<1x3x320x320xf32> %1 = tensor.empty() : tensor<1x1xi32> - // CHECK: [[VAL0:%[0-9]+]] = "ttnn.empty"(%{{[0-9]+}}) <{dtype = {{.*}}, layout = {{.*}}, memory_config = {{.*}}, shape = #ttnn.shape<[[TENSOR_SHAPE0:[0-9]+x[0-9]+x[0-9]+x[0-9]+]]>}> : (!tt.device<#device>) -> tensor<[[TENSOR_SHAPE1:[0-9]+x[0-9]+x[0-9]+x[0-9]+xf[0-9]+]], {{.*}}> %2 = "ttir.scatter"(%arg0, %1, %arg1, %0) <{index_vector_dim = 1 : i32, indices_are_sorted = false, input_batching_dims = array, inserted_window_dims = array, scatter_dims_to_operand_dims = array, scatter_indices_batching_dims = array, unique_indices = false, update_window_dims = array}> ({ ^bb0(%arg3: tensor<1xf32>, %arg4: tensor<1xf32>): "ttir.yield"(%arg4) : (tensor<1xf32>) -> () }) : (tensor<1x3x320x320xf32>, tensor<1x1xi32>, tensor<1x3x32x32xf32>, tensor<1x3x320x320xf32>) -> tensor<1x3x320x320xf32> - // CHECK: %{{[0-9]+}} = "ttnn.scatter"(%arg1, %arg0, %1) <{operandSegmentSizes = array}> : (tensor<1x3x32x32xf32, {{.*}}>, tensor<[[TENSOR_SHAPE1]], {{.*}}>, tensor<[[TENSOR_SHAPE1]], {{.*}}>) -> tensor<[[TENSOR_SHAPE1]], {{.*}}> + // CHECK: %{{[0-9]+}} = "ttnn.scatter"(%arg1, %arg0) : (tensor<1x3x32x32xf32, {{.*}}>, tensor<1x3x320x320xf32, {{.*}}>) -> tensor<1x3x320x320xf32, {{.*}}> return %2 : tensor<1x3x320x320xf32> - // CHECK: return %{{[0-9]+}} : tensor<[[TENSOR_SHAPE1]], {{.*}}> + // CHECK: return %{{[0-9]+}} : tensor<1x3x320x320xf32, {{.*}}> } } diff --git a/test/ttmlir/Dialect/TTNN/test_remove_dead_values_pass.mlir b/test/ttmlir/Dialect/TTNN/test_remove_dead_values_pass.mlir index 632b1f59a0..67b21bc8bb 100644 --- a/test/ttmlir/Dialect/TTNN/test_remove_dead_values_pass.mlir +++ b/test/ttmlir/Dialect/TTNN/test_remove_dead_values_pass.mlir @@ -15,61 +15,51 @@ module attributes {tt.device = #device, tt.system_desc = #system_desc} { %3 = "ttnn.to_layout"(%arg1) <{layout = #ttnn.layout}> : (tensor<64x128xf32, #ttnn_layout>) -> tensor<64x128xf32, #ttnn_layout1> %4 = "ttnn.to_device"(%3, %0) <{memory_config = #ttnn.memory_config<#dram, <<2x4>>, >}> : (tensor<64x128xf32, #ttnn_layout1>, !tt.device<#device>) -> tensor<64x128xf32, #ttnn_layout1> "ttnn.deallocate"(%3) <{force = false}> : (tensor<64x128xf32, #ttnn_layout1>) -> () - %5 = "ttnn.empty"(%0) <{dtype = #tt.supportedDataTypes, layout = #ttnn.layout, memory_config = #ttnn.memory_config<#dram, <<64x128>>, >, shape = #ttnn.shape<64x128>}> : (!tt.device<#device>) -> tensor<64x128xf32, #ttnn_layout2> - // CHECK: %[[C:.*]] = "ttnn.multiply"[[C:.*]] - %6 = "ttnn.multiply"(%2, %4, %5) <{operandSegmentSizes = array}> : (tensor<64x128xf32, #ttnn_layout1>, tensor<64x128xf32, #ttnn_layout1>, tensor<64x128xf32, #ttnn_layout2>) -> tensor<64x128xf32, #ttnn_layout2> + // CHECK: "ttnn.multiply" + %5 = "ttnn.multiply"(%2, %4) : (tensor<64x128xf32, #ttnn_layout1>, tensor<64x128xf32, #ttnn_layout1>) -> tensor<64x128xf32, #ttnn_layout2> "ttnn.deallocate"(%4) <{force = false}> : (tensor<64x128xf32, #ttnn_layout1>) -> () "ttnn.deallocate"(%2) <{force = false}> : (tensor<64x128xf32, #ttnn_layout1>) -> () - %7 = "ttnn.to_layout"(%arg0) <{layout = #ttnn.layout}> : (tensor<64x128xf32, #ttnn_layout>) -> tensor<64x128xf32, #ttnn_layout1> - %8 = "ttnn.to_device"(%7, %0) <{memory_config = #ttnn.memory_config<#dram, <<2x4>>, >}> : (tensor<64x128xf32, #ttnn_layout1>, !tt.device<#device>) -> tensor<64x128xf32, #ttnn_layout1> - "ttnn.deallocate"(%7) <{force = false}> : (tensor<64x128xf32, #ttnn_layout1>) -> () - %9 = "ttnn.to_layout"(%arg1) <{layout = #ttnn.layout}> : (tensor<64x128xf32, #ttnn_layout>) -> tensor<64x128xf32, #ttnn_layout1> - %10 = "ttnn.to_device"(%9, %0) <{memory_config = #ttnn.memory_config<#dram, <<2x4>>, >}> : (tensor<64x128xf32, #ttnn_layout1>, !tt.device<#device>) -> tensor<64x128xf32, #ttnn_layout1> - "ttnn.deallocate"(%9) <{force = false}> : (tensor<64x128xf32, #ttnn_layout1>) -> () - %11 = "ttnn.empty"(%0) <{dtype = #tt.supportedDataTypes, layout = #ttnn.layout, memory_config = #ttnn.memory_config<#dram, <<64x128>>, >, shape = #ttnn.shape<64x128>}> : (!tt.device<#device>) -> tensor<64x128xf32, #ttnn_layout2> - // CHECK-NOT: %[[C:.*]] = "ttnn.add"[[C:.*]] - %12 = "ttnn.add"(%8, %10, %11) <{operandSegmentSizes = array}> : (tensor<64x128xf32, #ttnn_layout1>, tensor<64x128xf32, #ttnn_layout1>, tensor<64x128xf32, #ttnn_layout2>) -> tensor<64x128xf32, #ttnn_layout2> - "ttnn.deallocate"(%11) <{force = false}> : (tensor<64x128xf32, #ttnn_layout2>) -> () - "ttnn.deallocate"(%10) <{force = false}> : (tensor<64x128xf32, #ttnn_layout1>) -> () + %6 = "ttnn.to_layout"(%arg0) <{layout = #ttnn.layout}> : (tensor<64x128xf32, #ttnn_layout>) -> tensor<64x128xf32, #ttnn_layout1> + %7 = "ttnn.to_device"(%6, %0) <{memory_config = #ttnn.memory_config<#dram, <<2x4>>, >}> : (tensor<64x128xf32, #ttnn_layout1>, !tt.device<#device>) -> tensor<64x128xf32, #ttnn_layout1> + "ttnn.deallocate"(%6) <{force = false}> : (tensor<64x128xf32, #ttnn_layout1>) -> () + %8 = "ttnn.to_layout"(%arg1) <{layout = #ttnn.layout}> : (tensor<64x128xf32, #ttnn_layout>) -> tensor<64x128xf32, #ttnn_layout1> + %9 = "ttnn.to_device"(%8, %0) <{memory_config = #ttnn.memory_config<#dram, <<2x4>>, >}> : (tensor<64x128xf32, #ttnn_layout1>, !tt.device<#device>) -> tensor<64x128xf32, #ttnn_layout1> "ttnn.deallocate"(%8) <{force = false}> : (tensor<64x128xf32, #ttnn_layout1>) -> () - %13 = "ttnn.to_layout"(%arg0) <{layout = #ttnn.layout}> : (tensor<64x128xf32, #ttnn_layout>) -> tensor<64x128xf32, #ttnn_layout1> + // CHECK-NOT: "ttnn.add" + %10 = "ttnn.add"(%7, %9) : (tensor<64x128xf32, #ttnn_layout1>, tensor<64x128xf32, #ttnn_layout1>) -> tensor<64x128xf32, #ttnn_layout2> + "ttnn.deallocate"(%9) <{force = false}> : (tensor<64x128xf32, #ttnn_layout1>) -> () + "ttnn.deallocate"(%7) <{force = false}> : (tensor<64x128xf32, #ttnn_layout1>) -> () + %11 = "ttnn.to_layout"(%arg0) <{layout = #ttnn.layout}> : (tensor<64x128xf32, #ttnn_layout>) -> tensor<64x128xf32, #ttnn_layout1> + %12 = "ttnn.to_device"(%11, %0) <{memory_config = #ttnn.memory_config<#dram, <<2x4>>, >}> : (tensor<64x128xf32, #ttnn_layout1>, !tt.device<#device>) -> tensor<64x128xf32, #ttnn_layout1> + "ttnn.deallocate"(%11) <{force = false}> : (tensor<64x128xf32, #ttnn_layout1>) -> () + %13 = "ttnn.to_layout"(%arg1) <{layout = #ttnn.layout}> : (tensor<64x128xf32, #ttnn_layout>) -> tensor<64x128xf32, #ttnn_layout1> %14 = "ttnn.to_device"(%13, %0) <{memory_config = #ttnn.memory_config<#dram, <<2x4>>, >}> : (tensor<64x128xf32, #ttnn_layout1>, !tt.device<#device>) -> tensor<64x128xf32, #ttnn_layout1> "ttnn.deallocate"(%13) <{force = false}> : (tensor<64x128xf32, #ttnn_layout1>) -> () - %15 = "ttnn.to_layout"(%arg1) <{layout = #ttnn.layout}> : (tensor<64x128xf32, #ttnn_layout>) -> tensor<64x128xf32, #ttnn_layout1> - %16 = "ttnn.to_device"(%15, %0) <{memory_config = #ttnn.memory_config<#dram, <<2x4>>, >}> : (tensor<64x128xf32, #ttnn_layout1>, !tt.device<#device>) -> tensor<64x128xf32, #ttnn_layout1> - "ttnn.deallocate"(%15) <{force = false}> : (tensor<64x128xf32, #ttnn_layout1>) -> () - %17 = "ttnn.empty"(%0) <{dtype = #tt.supportedDataTypes, layout = #ttnn.layout, memory_config = #ttnn.memory_config<#dram, <<64x128>>, >, shape = #ttnn.shape<64x128>}> : (!tt.device<#device>) -> tensor<64x128xf32, #ttnn_layout2> - // CHECK-NOT: %[[C:.*]] = "ttnn.subtract"[[C:.*]] - %18 = "ttnn.subtract"(%14, %16, %17) <{operandSegmentSizes = array}> : (tensor<64x128xf32, #ttnn_layout1>, tensor<64x128xf32, #ttnn_layout1>, tensor<64x128xf32, #ttnn_layout2>) -> tensor<64x128xf32, #ttnn_layout2> - "ttnn.deallocate"(%17) <{force = false}> : (tensor<64x128xf32, #ttnn_layout2>) -> () - "ttnn.deallocate"(%16) <{force = false}> : (tensor<64x128xf32, #ttnn_layout1>) -> () + // CHECK-NOT: "ttnn.subtract" + %15 = "ttnn.subtract"(%12, %14) : (tensor<64x128xf32, #ttnn_layout1>, tensor<64x128xf32, #ttnn_layout1>) -> tensor<64x128xf32, #ttnn_layout2> "ttnn.deallocate"(%14) <{force = false}> : (tensor<64x128xf32, #ttnn_layout1>) -> () - %19 = "ttnn.to_layout"(%arg0) <{layout = #ttnn.layout}> : (tensor<64x128xf32, #ttnn_layout>) -> tensor<64x128xf32, #ttnn_layout1> - %20 = "ttnn.to_device"(%19, %0) <{memory_config = #ttnn.memory_config<#dram, <<2x4>>, >}> : (tensor<64x128xf32, #ttnn_layout1>, !tt.device<#device>) -> tensor<64x128xf32, #ttnn_layout1> + "ttnn.deallocate"(%12) <{force = false}> : (tensor<64x128xf32, #ttnn_layout1>) -> () + %16 = "ttnn.to_layout"(%arg0) <{layout = #ttnn.layout}> : (tensor<64x128xf32, #ttnn_layout>) -> tensor<64x128xf32, #ttnn_layout1> + %17 = "ttnn.to_device"(%16, %0) <{memory_config = #ttnn.memory_config<#dram, <<2x4>>, >}> : (tensor<64x128xf32, #ttnn_layout1>, !tt.device<#device>) -> tensor<64x128xf32, #ttnn_layout1> + "ttnn.deallocate"(%16) <{force = false}> : (tensor<64x128xf32, #ttnn_layout1>) -> () + %18 = "ttnn.to_layout"(%arg1) <{layout = #ttnn.layout}> : (tensor<64x128xf32, #ttnn_layout>) -> tensor<64x128xf32, #ttnn_layout1> + %19 = "ttnn.to_device"(%18, %0) <{memory_config = #ttnn.memory_config<#dram, <<2x4>>, >}> : (tensor<64x128xf32, #ttnn_layout1>, !tt.device<#device>) -> tensor<64x128xf32, #ttnn_layout1> + "ttnn.deallocate"(%18) <{force = false}> : (tensor<64x128xf32, #ttnn_layout1>) -> () + // CHECK-NOT: "ttnn.div" + %20 = "ttnn.div"(%17, %19) : (tensor<64x128xf32, #ttnn_layout1>, tensor<64x128xf32, #ttnn_layout1>) -> tensor<64x128xf32, #ttnn_layout2> "ttnn.deallocate"(%19) <{force = false}> : (tensor<64x128xf32, #ttnn_layout1>) -> () - %21 = "ttnn.to_layout"(%arg1) <{layout = #ttnn.layout}> : (tensor<64x128xf32, #ttnn_layout>) -> tensor<64x128xf32, #ttnn_layout1> + "ttnn.deallocate"(%17) <{force = false}> : (tensor<64x128xf32, #ttnn_layout1>) -> () + %21 = "ttnn.to_layout"(%arg0) <{layout = #ttnn.layout}> : (tensor<64x128xf32, #ttnn_layout>) -> tensor<64x128xf32, #ttnn_layout1> %22 = "ttnn.to_device"(%21, %0) <{memory_config = #ttnn.memory_config<#dram, <<2x4>>, >}> : (tensor<64x128xf32, #ttnn_layout1>, !tt.device<#device>) -> tensor<64x128xf32, #ttnn_layout1> "ttnn.deallocate"(%21) <{force = false}> : (tensor<64x128xf32, #ttnn_layout1>) -> () - %23 = "ttnn.empty"(%0) <{dtype = #tt.supportedDataTypes, layout = #ttnn.layout, memory_config = #ttnn.memory_config<#dram, <<64x128>>, >, shape = #ttnn.shape<64x128>}> : (!tt.device<#device>) -> tensor<64x128xf32, #ttnn_layout2> - // CHECK-NOT: %[[C:.*]] = "ttnn.div"[[C:.*]] - %24 = "ttnn.div"(%20, %22, %23) <{operandSegmentSizes = array}> : (tensor<64x128xf32, #ttnn_layout1>, tensor<64x128xf32, #ttnn_layout1>, tensor<64x128xf32, #ttnn_layout2>) -> tensor<64x128xf32, #ttnn_layout2> - "ttnn.deallocate"(%23) <{force = false}> : (tensor<64x128xf32, #ttnn_layout2>) -> () + %23 = "ttnn.to_layout"(%arg1) <{layout = #ttnn.layout}> : (tensor<64x128xf32, #ttnn_layout>) -> tensor<64x128xf32, #ttnn_layout1> + %24 = "ttnn.to_device"(%23, %0) <{memory_config = #ttnn.memory_config<#dram, <<2x4>>, >}> : (tensor<64x128xf32, #ttnn_layout1>, !tt.device<#device>) -> tensor<64x128xf32, #ttnn_layout1> + "ttnn.deallocate"(%23) <{force = false}> : (tensor<64x128xf32, #ttnn_layout1>) -> () + // CHECK-NOT: "ttnn.eq" + %25 = "ttnn.eq"(%22, %24) : (tensor<64x128xf32, #ttnn_layout1>, tensor<64x128xf32, #ttnn_layout1>) -> tensor<64x128xf32, #ttnn_layout2> + "ttnn.deallocate"(%24) <{force = false}> : (tensor<64x128xf32, #ttnn_layout1>) -> () "ttnn.deallocate"(%22) <{force = false}> : (tensor<64x128xf32, #ttnn_layout1>) -> () - "ttnn.deallocate"(%20) <{force = false}> : (tensor<64x128xf32, #ttnn_layout1>) -> () - %25 = "ttnn.to_layout"(%arg0) <{layout = #ttnn.layout}> : (tensor<64x128xf32, #ttnn_layout>) -> tensor<64x128xf32, #ttnn_layout1> - %26 = "ttnn.to_device"(%25, %0) <{memory_config = #ttnn.memory_config<#dram, <<2x4>>, >}> : (tensor<64x128xf32, #ttnn_layout1>, !tt.device<#device>) -> tensor<64x128xf32, #ttnn_layout1> - "ttnn.deallocate"(%25) <{force = false}> : (tensor<64x128xf32, #ttnn_layout1>) -> () - %27 = "ttnn.to_layout"(%arg1) <{layout = #ttnn.layout}> : (tensor<64x128xf32, #ttnn_layout>) -> tensor<64x128xf32, #ttnn_layout1> - %28 = "ttnn.to_device"(%27, %0) <{memory_config = #ttnn.memory_config<#dram, <<2x4>>, >}> : (tensor<64x128xf32, #ttnn_layout1>, !tt.device<#device>) -> tensor<64x128xf32, #ttnn_layout1> - "ttnn.deallocate"(%27) <{force = false}> : (tensor<64x128xf32, #ttnn_layout1>) -> () - %29 = "ttnn.empty"(%0) <{dtype = #tt.supportedDataTypes, layout = #ttnn.layout, memory_config = #ttnn.memory_config<#dram, <<64x128>>, >, shape = #ttnn.shape<64x128>}> : (!tt.device<#device>) -> tensor<64x128xf32, #ttnn_layout2> - // CHECK-NOT: %[[C:.*]] = "ttnn.eq"[[C:.*]] - %30 = "ttnn.eq"(%26, %28, %29) <{operandSegmentSizes = array}> : (tensor<64x128xf32, #ttnn_layout1>, tensor<64x128xf32, #ttnn_layout1>, tensor<64x128xf32, #ttnn_layout2>) -> tensor<64x128xf32, #ttnn_layout2> - "ttnn.deallocate"(%29) <{force = false}> : (tensor<64x128xf32, #ttnn_layout2>) -> () - "ttnn.deallocate"(%28) <{force = false}> : (tensor<64x128xf32, #ttnn_layout1>) -> () - "ttnn.deallocate"(%26) <{force = false}> : (tensor<64x128xf32, #ttnn_layout1>) -> () - %31 = "ttnn.from_device"(%6) : (tensor<64x128xf32, #ttnn_layout2>) -> tensor<64x128xf32, #ttnn_layout> - "ttnn.deallocate"(%5) <{force = false}> : (tensor<64x128xf32, #ttnn_layout2>) -> () - return %31 : tensor<64x128xf32, #ttnn_layout> + %26 = "ttnn.from_device"(%5) : (tensor<64x128xf32, #ttnn_layout2>) -> tensor<64x128xf32, #ttnn_layout> + return %26 : tensor<64x128xf32, #ttnn_layout> } } diff --git a/test/ttmlir/Dialect/TTNN/ttir_to_ttnn_pipeline_custom_opt.mlir b/test/ttmlir/Dialect/TTNN/ttir_to_ttnn_pipeline_custom_opt.mlir index 63d263365c..0ebfa2625e 100644 --- a/test/ttmlir/Dialect/TTNN/ttir_to_ttnn_pipeline_custom_opt.mlir +++ b/test/ttmlir/Dialect/TTNN/ttir_to_ttnn_pipeline_custom_opt.mlir @@ -2,9 +2,8 @@ module attributes {} { func.func @forward(%arg0: tensor<64x128xf32>, %arg1: tensor<64x128xf32>) -> tensor<64x128xf32> { // CHECK: #[[LAYOUT_1:.*]] = #ttnn.ttnn_layout<(d0, d1) -> (d0, d1), <1x1>, memref<2x4x!tt.tile<32x32, f32>, #dram>, > - // CHECK: %[[C:.*]] = "ttnn.empty"[[C:.*]] %0 = tensor.empty() : tensor<64x128xf32> - // CHECK: %[[C:.*]] = "ttnn.multiply"[[C:.*]] -> tensor<64x128xf32, #[[LAYOUT_1:.*]]> + // CHECK: %{{.*}} = "ttnn.multiply"{{.*}} -> tensor<64x128xf32, #[[LAYOUT_1:.*]]> %1 = "ttir.multiply"(%arg0, %arg1, %0) <{operandSegmentSizes = array}> : (tensor<64x128xf32>, tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xf32> return %1 : tensor<64x128xf32> } diff --git a/test/ttmlir/Dialect/TTNN/ttir_to_ttnn_pipeline_hoist_call.mlir b/test/ttmlir/Dialect/TTNN/ttir_to_ttnn_pipeline_hoist_call.mlir index 0eeabfbf3a..ae704bc8bf 100644 --- a/test/ttmlir/Dialect/TTNN/ttir_to_ttnn_pipeline_hoist_call.mlir +++ b/test/ttmlir/Dialect/TTNN/ttir_to_ttnn_pipeline_hoist_call.mlir @@ -3,9 +3,8 @@ module attributes {} { // CHECK-DAG: #{{.*}} = #ttnn.ttnn_layout<(d0, d1) -> (d0, d1), <1x1>, memref<64x128xf32, #system_memory>> // CHECK-DAG: #{{.*}} = #ttnn.ttnn_layout<(d0, d1) -> (d0, d1), <1x1>, memref<2x4x!tt.tile<32x32, f32>, #dram>, > func.func @forward(%arg0: tensor<64x128xf32>, %arg1: tensor<64x128xf32>) -> tensor<64x128xf32> { - // CHECK: %{{.*}} = "ttnn.empty"(%{{.*}}) %0 = tensor.empty() : tensor<64x128xf32> - // CHECK: %{{.*}} = "ttnn.multiply"(%{{.*}}, %{{.*}}, %{{.*}}) + // CHECK: %{{.*}} = "ttnn.multiply"(%{{.*}}, %{{.*}}) %1 = "ttir.multiply"(%arg0, %arg1, %0) <{operandSegmentSizes = array}> : (tensor<64x128xf32>, tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xf32> // CHECK: %{{.*}} = "ttnn.ones" %2 = "ttir.ones"() <{shape = array}> : () -> tensor<64x128xf32> @@ -15,13 +14,12 @@ module attributes {} { // CHECK: %{{.*}} = "ttnn.zeros" %4 = "ttir.zeros"() <{shape = array}> : () -> tensor<64x128xf32> %5 = call @hoisted_func_decl(%arg0, %3, %4) {ttir.hoisted_call} : (tensor<64x128xf32>, tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xf32> - // CHECK: %{{.*}} = "ttnn.empty"(%{{.*}}) %6 = tensor.empty() : tensor<64x128xf32> // CHECK: %{{.*}} = "ttnn.to_layout"(%{{.*}}) <{layout = #ttnn.layout<{{.*}}>}> : (tensor<[[DIMS:.*]], #{{.*}}>) -> tensor<[[DIMS]], #{{.*}}> // CHECK: %{{.*}} = "ttnn.to_device"(%{{.*}}, %{{.*}}) <{memory_config = {{.*}}}> : (tensor<[[DIMS:.*]], #{{.*}}>, !tt.device<#{{.*}}>) -> tensor<[[DIMS]], #{{.*}}> // CHECK: %{{.*}} = "ttnn.to_layout"(%{{.*}}) <{layout = #ttnn.layout<{{.*}}>}> : (tensor<[[DIMS:.*]], #{{.*}}>) -> tensor<[[DIMS]], #{{.*}}> // CHECK: %{{.*}} = "ttnn.to_device"(%{{.*}}, %{{.*}}) <{memory_config = {{.*}}}> : (tensor<[[DIMS:.*]], #{{.*}}>, !tt.device<#{{.*}}>) -> tensor<[[DIMS]], #{{.*}}> - // CHECK: %{{.*}} = "ttnn.multiply"(%{{.*}}, %{{.*}}, %{{.*}}) + // CHECK: %{{.*}} = "ttnn.multiply"(%{{.*}}, %{{.*}}) %7 = "ttir.multiply"(%3, %5, %6) <{operandSegmentSizes = array}> : (tensor<64x128xf32>, tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xf32> return %7 : tensor<64x128xf32> } diff --git a/test/ttmlir/EmitC/TTNN/eltwise_unary/cbrt.mlir b/test/ttmlir/EmitC/TTNN/eltwise_unary/cbrt.mlir index 4ad4d1b5fb..052b8f921d 100644 --- a/test/ttmlir/EmitC/TTNN/eltwise_unary/cbrt.mlir +++ b/test/ttmlir/EmitC/TTNN/eltwise_unary/cbrt.mlir @@ -2,6 +2,9 @@ // RUN: ttmlir-translate --ttnn-to-flatbuffer %t.mlir > %basename_t.ttnn // RUN: ttmlir-opt --ttnn-modify-signatures-for-dylib --convert-ttnn-to-emitc %t.mlir > %t2.mlir // RUN: ttmlir-translate --mlir-to-cpp %t2.mlir > %basename_t.cpp +// UNSUPPORTED: true +// There is an issue where this test fails when it is run as a part of the group of tests. It passes when run individually. +// Related issue: https://github.com/tenstorrent/tt-mlir/issues/2261 func.func @cbrt(%arg0: tensor<64x128xf32>) -> tensor<64x128xf32> { %0 = tensor.empty() : tensor<64x128xf32> diff --git a/test/ttmlir/Silicon/StableHLO/n150/Binary/add_op.mlir b/test/ttmlir/Silicon/StableHLO/n150/Binary/add_op.mlir index e6524689c5..1efbc30b8f 100644 --- a/test/ttmlir/Silicon/StableHLO/n150/Binary/add_op.mlir +++ b/test/ttmlir/Silicon/StableHLO/n150/Binary/add_op.mlir @@ -9,11 +9,9 @@ module @jit_eltwise_add attributes {} { func.func public @test_add(%arg0: tensor<64x128xf32>, %arg1: tensor<64x128xf32>) -> tensor<64x128xf32> { // CHECK-LABEL: func.func public @test_add - // CHECK: ttnn.empty // CHECK: ttnn.add // CHECK-SAME: tensor<64x128xf32, // CHECK-SAME: tensor<64x128xf32, - // CHECK-SAME: tensor<64x128xf32, // CHECK-SAME: -> tensor<64x128xf32, %0 = stablehlo.add %arg0, %arg1 : tensor<64x128xf32> return %0 : tensor<64x128xf32> diff --git a/test/ttmlir/Silicon/StableHLO/n150/Binary/compare_op.mlir b/test/ttmlir/Silicon/StableHLO/n150/Binary/compare_op.mlir index 4ba35b125b..83e4b1527d 100644 --- a/test/ttmlir/Silicon/StableHLO/n150/Binary/compare_op.mlir +++ b/test/ttmlir/Silicon/StableHLO/n150/Binary/compare_op.mlir @@ -9,11 +9,9 @@ module @jit_eltwise_compare attributes {} { func.func public @test_eq(%arg0: tensor<64x128xf32>, %arg1: tensor<64x128xf32>) -> tensor<64x128xi1> { // CHECK-LABEL: func.func public @test_eq - // CHECK: ttnn.empty // CHECK: ttnn.eq // CHECK-SAME: tensor<64x128xf32, // CHECK-SAME: tensor<64x128xf32, - // CHECK-SAME: tensor<64x128xbf16, // CHECK-SAME: -> tensor<64x128xbf16, %0 = stablehlo.compare EQ, %arg0, %arg1 : (tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xi1> return %0 : tensor<64x128xi1> @@ -21,11 +19,9 @@ module @jit_eltwise_compare attributes {} { func.func public @test_ne(%arg0: tensor<64x128xf32>, %arg1: tensor<64x128xf32>) -> tensor<64x128xi1> { // CHECK-LABEL: func.func public @test_ne - // CHECK: ttnn.empty // CHECK: ttnn.ne // CHECK-SAME: tensor<64x128xf32, // CHECK-SAME: tensor<64x128xf32, - // CHECK-SAME: tensor<64x128xbf16, // CHECK-SAME: -> tensor<64x128xbf16, %0 = stablehlo.compare NE, %arg0, %arg1 : (tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xi1> return %0 : tensor<64x128xi1> @@ -33,11 +29,9 @@ module @jit_eltwise_compare attributes {} { func.func public @test_ge(%arg0: tensor<64x128xf32>, %arg1: tensor<64x128xf32>) -> tensor<64x128xi1> { // CHECK-LABEL: func.func public @test_ge - // CHECK: ttnn.empty // CHECK: ttnn.ge // CHECK-SAME: tensor<64x128xf32, // CHECK-SAME: tensor<64x128xf32, - // CHECK-SAME: tensor<64x128xbf16, // CHECK-SAME: -> tensor<64x128xbf16, %0 = stablehlo.compare GE, %arg0, %arg1 : (tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xi1> return %0 : tensor<64x128xi1> @@ -45,11 +39,9 @@ module @jit_eltwise_compare attributes {} { func.func public @test_gt(%arg0: tensor<64x128xf32>, %arg1: tensor<64x128xf32>) -> tensor<64x128xi1> { // CHECK-LABEL: func.func public @test_gt - // CHECK: ttnn.empty // CHECK: ttnn.gt // CHECK-SAME: tensor<64x128xf32, // CHECK-SAME: tensor<64x128xf32, - // CHECK-SAME: tensor<64x128xbf16, // CHECK-SAME: -> tensor<64x128xbf16, %0 = stablehlo.compare GT, %arg0, %arg1 : (tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xi1> return %0 : tensor<64x128xi1> @@ -57,11 +49,9 @@ module @jit_eltwise_compare attributes {} { func.func public @test_le(%arg0: tensor<64x128xf32>, %arg1: tensor<64x128xf32>) -> tensor<64x128xi1> { // CHECK-LABEL: func.func public @test_le - // CHECK: ttnn.empty // CHECK: ttnn.le // CHECK-SAME: tensor<64x128xf32, // CHECK-SAME: tensor<64x128xf32, - // CHECK-SAME: tensor<64x128xbf16, // CHECK-SAME: -> tensor<64x128xbf16, %0 = stablehlo.compare LE, %arg0, %arg1 : (tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xi1> return %0 : tensor<64x128xi1> @@ -69,11 +59,9 @@ module @jit_eltwise_compare attributes {} { func.func public @test_lt(%arg0: tensor<64x128xf32>, %arg1: tensor<64x128xf32>) -> tensor<64x128xi1> { // CHECK-LABEL: func.func public @test_lt - // CHECK: ttnn.empty // CHECK: ttnn.lt // CHECK-SAME: tensor<64x128xf32, // CHECK-SAME: tensor<64x128xf32, - // CHECK-SAME: tensor<64x128xbf16, // CHECK-SAME: -> tensor<64x128xbf16, %0 = stablehlo.compare LT, %arg0, %arg1 : (tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xi1> return %0 : tensor<64x128xi1> diff --git a/test/ttmlir/Silicon/StableHLO/n150/Binary/divide_op.mlir b/test/ttmlir/Silicon/StableHLO/n150/Binary/divide_op.mlir index 6d27885e69..a59add950d 100644 --- a/test/ttmlir/Silicon/StableHLO/n150/Binary/divide_op.mlir +++ b/test/ttmlir/Silicon/StableHLO/n150/Binary/divide_op.mlir @@ -9,11 +9,9 @@ module @jit_eltwise_divice attributes {} { func.func public @test_divide(%arg0: tensor<64x128xf32>, %arg1: tensor<64x128xf32>) -> tensor<64x128xf32> { // CHECK-LABEL: func.func public @test_divide - // CHECK: ttnn.empty // CHECK: ttnn.div // CHECK-SAME: tensor<64x128xf32, // CHECK-SAME: tensor<64x128xf32, - // CHECK-SAME: tensor<64x128xf32, // CHECK-SAME: -> tensor<64x128xf32, %0 = stablehlo.divide %arg0, %arg1 : tensor<64x128xf32> return %0 : tensor<64x128xf32> diff --git a/test/ttmlir/Silicon/StableHLO/n150/Binary/logical_op.mlir b/test/ttmlir/Silicon/StableHLO/n150/Binary/logical_op.mlir index dcfc073f95..f625862c49 100644 --- a/test/ttmlir/Silicon/StableHLO/n150/Binary/logical_op.mlir +++ b/test/ttmlir/Silicon/StableHLO/n150/Binary/logical_op.mlir @@ -9,11 +9,9 @@ module @jit_eltwise_compare attributes {} { func.func public @logical_and(%arg0: tensor<64x128xi1>, %arg1: tensor<64x128xi1>) -> tensor<64x128xi1> { // CHECK-LABEL: func.func public @logical_and - // CHECK: ttnn.empty // CHECK: ttnn.logical_and // CHECK-SAME: tensor<64x128xbf16, // CHECK-SAME: tensor<64x128xbf16, - // CHECK-SAME: tensor<64x128xbf16, // CHECK-SAME: -> tensor<64x128xbf16, %0 = stablehlo.and %arg0, %arg1 : tensor<64x128xi1> return %0 : tensor<64x128xi1> @@ -21,11 +19,9 @@ module @jit_eltwise_compare attributes {} { func.func public @logical_or(%arg0: tensor<64x128xi1>, %arg1: tensor<64x128xi1>) -> tensor<64x128xi1> { // CHECK-LABEL: func.func public @logical_or - // CHECK: ttnn.empty // CHECK: ttnn.logical_or // CHECK-SAME: tensor<64x128xbf16, // CHECK-SAME: tensor<64x128xbf16, - // CHECK-SAME: tensor<64x128xbf16, // CHECK-SAME: -> tensor<64x128xbf16, %0 = stablehlo.or %arg0, %arg1 : tensor<64x128xi1> return %0 : tensor<64x128xi1> @@ -33,11 +29,9 @@ module @jit_eltwise_compare attributes {} { func.func public @logical_xor(%arg0: tensor<64x128xi1>, %arg1: tensor<64x128xi1>) -> tensor<64x128xi1> { // CHECK-LABEL: func.func public @logical_xor - // CHECK: ttnn.empty // CHECK: ttnn.logical_xor // CHECK-SAME: tensor<64x128xbf16, // CHECK-SAME: tensor<64x128xbf16, - // CHECK-SAME: tensor<64x128xbf16, // CHECK-SAME: -> tensor<64x128xbf16, %0 = stablehlo.xor %arg0, %arg1 : tensor<64x128xi1> return %0 : tensor<64x128xi1> diff --git a/test/ttmlir/Silicon/StableHLO/n150/Binary/maximum_op.mlir b/test/ttmlir/Silicon/StableHLO/n150/Binary/maximum_op.mlir index a5c9bf6e63..620f37ff32 100644 --- a/test/ttmlir/Silicon/StableHLO/n150/Binary/maximum_op.mlir +++ b/test/ttmlir/Silicon/StableHLO/n150/Binary/maximum_op.mlir @@ -9,11 +9,9 @@ module @jit_eltwise_maximum attributes {} { func.func public @test_maximum(%arg0: tensor<64x128xf32>, %arg1: tensor<64x128xf32>) -> tensor<64x128xf32> { // CHECK-LABEL: func.func public @test_maximum - // CHECK: ttnn.empty // CHECK: ttnn.maximum // CHECK-SAME: tensor<64x128xf32, // CHECK-SAME: tensor<64x128xf32, - // CHECK-SAME: tensor<64x128xf32, // CHECK-SAME: -> tensor<64x128xf32, %0 = stablehlo.maximum %arg0, %arg1 : tensor<64x128xf32> return %0 : tensor<64x128xf32> diff --git a/test/ttmlir/Silicon/StableHLO/n150/Binary/minimum_op.mlir b/test/ttmlir/Silicon/StableHLO/n150/Binary/minimum_op.mlir index b72cc78d56..d0f7566342 100644 --- a/test/ttmlir/Silicon/StableHLO/n150/Binary/minimum_op.mlir +++ b/test/ttmlir/Silicon/StableHLO/n150/Binary/minimum_op.mlir @@ -9,11 +9,9 @@ module @jit_eltwise_minimum attributes {} { func.func public @test_minimum(%arg0: tensor<64x128xf32>, %arg1: tensor<64x128xf32>) -> tensor<64x128xf32> { // CHECK-LABEL: func.func public @test_minimum - // CHECK: ttnn.empty // CHECK: ttnn.minimum // CHECK-SAME: tensor<64x128xf32, // CHECK-SAME: tensor<64x128xf32, - // CHECK-SAME: tensor<64x128xf32, // CHECK-SAME: -> tensor<64x128xf32, %0 = stablehlo.minimum %arg0, %arg1 : tensor<64x128xf32> return %0 : tensor<64x128xf32> diff --git a/test/ttmlir/Silicon/StableHLO/n150/Binary/multiply_op.mlir b/test/ttmlir/Silicon/StableHLO/n150/Binary/multiply_op.mlir index b5937abe72..c68cd5654f 100644 --- a/test/ttmlir/Silicon/StableHLO/n150/Binary/multiply_op.mlir +++ b/test/ttmlir/Silicon/StableHLO/n150/Binary/multiply_op.mlir @@ -9,11 +9,9 @@ module @jit_eltwise_multiply attributes {} { func.func public @test_multiply(%arg0: tensor<64x128xf32>, %arg1: tensor<64x128xf32>) -> tensor<64x128xf32> { // CHECK-LABEL: func.func public @test_multiply - // CHECK: ttnn.empty // CHECK: ttnn.multiply // CHECK-SAME: tensor<64x128xf32, // CHECK-SAME: tensor<64x128xf32, - // CHECK-SAME: tensor<64x128xf32, // CHECK-SAME: -> tensor<64x128xf32, %0 = stablehlo.multiply %arg0, %arg1 : tensor<64x128xf32> return %0 : tensor<64x128xf32> diff --git a/test/ttmlir/Silicon/StableHLO/n150/Binary/power_op.mlir b/test/ttmlir/Silicon/StableHLO/n150/Binary/power_op.mlir index 83f2aafe0b..31b72ad49d 100644 --- a/test/ttmlir/Silicon/StableHLO/n150/Binary/power_op.mlir +++ b/test/ttmlir/Silicon/StableHLO/n150/Binary/power_op.mlir @@ -8,11 +8,9 @@ module @jit_eltwise_power attributes {} { func.func public @test_power(%arg0: tensor<64x128xf32>, %arg1: tensor<64x128xf32>) -> tensor<64x128xf32> { // CHECK-LABEL: func.func public @test_power - // CHECK: ttnn.empty // CHECK: ttnn.pow // CHECK-SAME: tensor<64x128xf32, // CHECK-SAME: tensor<64x128xf32, - // CHECK-SAME: tensor<64x128xf32, // CHECK-SAME: -> tensor<64x128xf32, %0 = stablehlo.power %arg0, %arg1 : tensor<64x128xf32> return %0 : tensor<64x128xf32> diff --git a/test/ttmlir/Silicon/StableHLO/n150/Binary/remainder_op.mlir b/test/ttmlir/Silicon/StableHLO/n150/Binary/remainder_op.mlir index 9444d20f70..b4100bad18 100644 --- a/test/ttmlir/Silicon/StableHLO/n150/Binary/remainder_op.mlir +++ b/test/ttmlir/Silicon/StableHLO/n150/Binary/remainder_op.mlir @@ -9,11 +9,9 @@ module @jit_eltwise_remainder attributes {} { func.func public @test_remainder(%arg0: tensor<64x128xf32>, %arg1: tensor<64x128xf32>) -> tensor<64x128xf32> { // CHECK-LABEL: func.func public @test_remainder - // CHECK: ttnn.empty // CHECK: ttnn.remainder // CHECK-SAME: tensor<64x128xf32, // CHECK-SAME: tensor<64x128xf32, - // CHECK-SAME: tensor<64x128xf32, // CHECK-SAME: -> tensor<64x128xf32, %0 = stablehlo.remainder %arg0, %arg1 : tensor<64x128xf32> return %0 : tensor<64x128xf32> diff --git a/test/ttmlir/Silicon/StableHLO/n150/Binary/subtract_op.mlir b/test/ttmlir/Silicon/StableHLO/n150/Binary/subtract_op.mlir index e9169110c0..53dc74d65c 100644 --- a/test/ttmlir/Silicon/StableHLO/n150/Binary/subtract_op.mlir +++ b/test/ttmlir/Silicon/StableHLO/n150/Binary/subtract_op.mlir @@ -9,11 +9,9 @@ module @jit_eltwise_subtract attributes {} { func.func public @test_subtract(%arg0: tensor<64x128xf32>, %arg1: tensor<64x128xf32>) -> tensor<64x128xf32> { // CHECK-LABEL: func.func public @test_subtract - // CHECK: ttnn.empty // CHECK: ttnn.subtract // CHECK-SAME: tensor<64x128xf32, // CHECK-SAME: tensor<64x128xf32, - // CHECK-SAME: tensor<64x128xf32, // CHECK-SAME: -> tensor<64x128xf32, %0 = stablehlo.subtract %arg0, %arg1 : tensor<64x128xf32> return %0 : tensor<64x128xf32> diff --git a/test/ttmlir/Silicon/StableHLO/n150/Unary/absolute_op.mlir b/test/ttmlir/Silicon/StableHLO/n150/Unary/absolute_op.mlir index 915238179b..36735c844e 100644 --- a/test/ttmlir/Silicon/StableHLO/n150/Unary/absolute_op.mlir +++ b/test/ttmlir/Silicon/StableHLO/n150/Unary/absolute_op.mlir @@ -9,10 +9,8 @@ module @jit_eltwise_abs attributes {} { func.func public @test_abs(%arg0: tensor<64x128xf32>) -> tensor<64x128xf32> { // CHECK-LABEL: func.func public @test_abs - // CHECK: ttnn.empty // CHECK: ttnn.abs // CHECK-SAME: tensor<64x128xf32, - // CHECK-SAME: tensor<64x128xf32, // CHECK-SAME: -> tensor<64x128xf32, %0 = stablehlo.abs %arg0 : tensor<64x128xf32> return %0 : tensor<64x128xf32> diff --git a/test/ttmlir/Silicon/StableHLO/n150/Unary/cbrt_op.mlir b/test/ttmlir/Silicon/StableHLO/n150/Unary/cbrt_op.mlir index 5bfbaf9f6d..91eb994328 100644 --- a/test/ttmlir/Silicon/StableHLO/n150/Unary/cbrt_op.mlir +++ b/test/ttmlir/Silicon/StableHLO/n150/Unary/cbrt_op.mlir @@ -9,10 +9,8 @@ module @jit_eltwise_rsqrt attributes {} { func.func public @test_cbrt(%arg0: tensor<64x128xf32>) -> tensor<64x128xf32> { // CHECK-LABEL: func.func public @test_cbrt - // CHECK: ttnn.empty // CHECK: ttnn.cbrt // CHECK-SAME: tensor<64x128xf32, - // CHECK-SAME: tensor<64x128xf32, // CHECK-SAME: -> tensor<64x128xf32, %0 = stablehlo.cbrt %arg0 : tensor<64x128xf32> return %0 : tensor<64x128xf32> diff --git a/test/ttmlir/Silicon/StableHLO/n150/Unary/ceil_op.mlir b/test/ttmlir/Silicon/StableHLO/n150/Unary/ceil_op.mlir index 911b775dae..691ea5826e 100644 --- a/test/ttmlir/Silicon/StableHLO/n150/Unary/ceil_op.mlir +++ b/test/ttmlir/Silicon/StableHLO/n150/Unary/ceil_op.mlir @@ -9,10 +9,8 @@ module @jit_eltwise_ceil attributes {} { func.func public @test_ceil(%arg0: tensor<64x128xf32>) -> tensor<64x128xf32> { // CHECK-LABEL: func.func public @test_ceil - // CHECK: ttnn.empty // CHECK: ttnn.ceil // CHECK-SAME: tensor<64x128xf32, - // CHECK-SAME: tensor<64x128xf32, // CHECK-SAME: -> tensor<64x128xf32, %0 = stablehlo.ceil %arg0 : tensor<64x128xf32> return %0 : tensor<64x128xf32> diff --git a/test/ttmlir/Silicon/StableHLO/n150/Unary/clamp_op.mlir b/test/ttmlir/Silicon/StableHLO/n150/Unary/clamp_op.mlir index 9e625b549f..1beab078d4 100644 --- a/test/ttmlir/Silicon/StableHLO/n150/Unary/clamp_op.mlir +++ b/test/ttmlir/Silicon/StableHLO/n150/Unary/clamp_op.mlir @@ -21,17 +21,13 @@ module @jit_transpose attributes {} { func.func public @test_clamp_tensor(%arg0: tensor<64x128xf32>, %arg1: tensor<64x128xf32>, %arg2: tensor<64x128xf32>) -> tensor<64x128xf32> { // CHECK-LABEL: func.func public @test_clamp_tensor - // CHECK: ttnn.empty // CHECK: %[[MAX:.*]] = "ttnn.maximum" // CHECK-SAME: tensor<64x128xf32, // CHECK-SAME: tensor<64x128xf32, - // CHECK-SAME: tensor<64x128xf32, // CHECK-SAME: -> tensor<64x128xf32, - // CHECK: ttnn.empty // CHECK: "ttnn.minimum"(%[[MAX]] // CHECK-SAME: tensor<64x128xf32, // CHECK-SAME: tensor<64x128xf32, - // CHECK-SAME: tensor<64x128xf32, // CHECK-SAME: -> tensor<64x128xf32, %0 = stablehlo.clamp %arg1, %arg0, %arg2 : tensor<64x128xf32> return %0 : tensor<64x128xf32> diff --git a/test/ttmlir/Silicon/StableHLO/n150/Unary/cosine_op.mlir b/test/ttmlir/Silicon/StableHLO/n150/Unary/cosine_op.mlir index 885a293e72..1552637846 100644 --- a/test/ttmlir/Silicon/StableHLO/n150/Unary/cosine_op.mlir +++ b/test/ttmlir/Silicon/StableHLO/n150/Unary/cosine_op.mlir @@ -9,10 +9,8 @@ module @jit_eltwise_cosine attributes {} { func.func public @test_cosine(%arg0: tensor<64x128xf32>) -> tensor<64x128xf32> { // CHECK-LABEL: func.func public @test_cosine - // CHECK: ttnn.empty // CHECK: ttnn.cos // CHECK-SAME: tensor<64x128xf32, - // CHECK-SAME: tensor<64x128xf32, // CHECK-SAME: -> tensor<64x128xf32, %0 = stablehlo.cosine %arg0 : tensor<64x128xf32> return %0 : tensor<64x128xf32> diff --git a/test/ttmlir/Silicon/StableHLO/n150/Unary/exponential_minus_one_op.mlir b/test/ttmlir/Silicon/StableHLO/n150/Unary/exponential_minus_one_op.mlir index 30cd1e8c7d..9dc0390705 100644 --- a/test/ttmlir/Silicon/StableHLO/n150/Unary/exponential_minus_one_op.mlir +++ b/test/ttmlir/Silicon/StableHLO/n150/Unary/exponential_minus_one_op.mlir @@ -9,10 +9,8 @@ module @jit_eltwise_expm1 attributes {} { func.func public @test_expm1(%arg0: tensor<64x128xf32>) -> tensor<64x128xf32> { // CHECK-LABEL: func.func public @test_expm1 - // CHECK: ttnn.empty // CHECK: ttnn.expm1 // CHECK-SAME: tensor<64x128xf32, - // CHECK-SAME: tensor<64x128xf32, // CHECK-SAME: -> tensor<64x128xf32, %0 = stablehlo.exponential_minus_one %arg0 : tensor<64x128xf32> return %0 : tensor<64x128xf32> diff --git a/test/ttmlir/Silicon/StableHLO/n150/Unary/exponential_op.mlir b/test/ttmlir/Silicon/StableHLO/n150/Unary/exponential_op.mlir index 47a2d6443d..c5357eaee5 100644 --- a/test/ttmlir/Silicon/StableHLO/n150/Unary/exponential_op.mlir +++ b/test/ttmlir/Silicon/StableHLO/n150/Unary/exponential_op.mlir @@ -9,10 +9,8 @@ module @jit_eltwise_exp attributes {} { func.func public @test_exp(%arg0: tensor<64x128xf32>) -> tensor<64x128xf32> { // CHECK-LABEL: func.func public @test_exp - // CHECK: ttnn.empty // CHECK: ttnn.exp // CHECK-SAME: tensor<64x128xf32, - // CHECK-SAME: tensor<64x128xf32, // CHECK-SAME: -> tensor<64x128xf32, %0 = stablehlo.exponential %arg0 : tensor<64x128xf32> return %0 : tensor<64x128xf32> diff --git a/test/ttmlir/Silicon/StableHLO/n150/Unary/floor_op.mlir b/test/ttmlir/Silicon/StableHLO/n150/Unary/floor_op.mlir index 9dbde4c262..4f81859cc6 100644 --- a/test/ttmlir/Silicon/StableHLO/n150/Unary/floor_op.mlir +++ b/test/ttmlir/Silicon/StableHLO/n150/Unary/floor_op.mlir @@ -9,10 +9,8 @@ module @jit_eltwise_floor attributes {} { func.func public @test_floor(%arg0: tensor<64x128xf32>) -> tensor<64x128xf32> { // CHECK-LABEL: func.func public @test_floor - // CHECK: ttnn.empty // CHECK: ttnn.floor // CHECK-SAME: tensor<64x128xf32, - // CHECK-SAME: tensor<64x128xf32, // CHECK-SAME: -> tensor<64x128xf32, %0 = stablehlo.floor %arg0 : tensor<64x128xf32> return %0 : tensor<64x128xf32> diff --git a/test/ttmlir/Silicon/StableHLO/n150/Unary/isfinite_op.mlir b/test/ttmlir/Silicon/StableHLO/n150/Unary/isfinite_op.mlir index 35682c8c0b..ba2a6b7749 100644 --- a/test/ttmlir/Silicon/StableHLO/n150/Unary/isfinite_op.mlir +++ b/test/ttmlir/Silicon/StableHLO/n150/Unary/isfinite_op.mlir @@ -9,10 +9,8 @@ module @jit_eltwise_isfinite attributes {} { func.func public @test_isfinite(%arg0: tensor<64x128xbf16>) -> tensor<64x128xi1> { // CHECK-LABEL: func.func public @test_isfinite - // CHECK: ttnn.empty // CHECK: ttnn.isfinite // CHECK-SAME: tensor<64x128xbf16, - // CHECK-SAME: tensor<64x128xbf16, // CHECK-SAME: -> tensor<64x128xbf16, %0 = stablehlo.is_finite %arg0 : (tensor<64x128xbf16>) -> tensor<64x128xi1> return %0 : tensor<64x128xi1> diff --git a/test/ttmlir/Silicon/StableHLO/n150/Unary/log_plus_one_op.mlir b/test/ttmlir/Silicon/StableHLO/n150/Unary/log_plus_one_op.mlir index cd1d68344e..e4686eb150 100644 --- a/test/ttmlir/Silicon/StableHLO/n150/Unary/log_plus_one_op.mlir +++ b/test/ttmlir/Silicon/StableHLO/n150/Unary/log_plus_one_op.mlir @@ -9,10 +9,8 @@ module @jit_eltwise_log_plus_one attributes {} { func.func public @test_log_plus_one(%arg0: tensor<64x128xf32>) -> tensor<64x128xf32> { // CHECK-LABEL: func.func public @test_log_plus_one - // CHECK: ttnn.empty // CHECK: ttnn.log1p // CHECK-SAME: tensor<64x128xf32, - // CHECK-SAME: tensor<64x128xf32, // CHECK-SAME: -> tensor<64x128xf32, %0 = stablehlo.log_plus_one %arg0 : tensor<64x128xf32> return %0 : tensor<64x128xf32> diff --git a/test/ttmlir/Silicon/StableHLO/n150/Unary/logical_op.mlir b/test/ttmlir/Silicon/StableHLO/n150/Unary/logical_op.mlir index 489ad2e585..7380e96931 100644 --- a/test/ttmlir/Silicon/StableHLO/n150/Unary/logical_op.mlir +++ b/test/ttmlir/Silicon/StableHLO/n150/Unary/logical_op.mlir @@ -9,10 +9,8 @@ module @jit_eltwise_compare attributes {} { func.func public @logical_not(%arg0: tensor<64x128xi1>) -> tensor<64x128xi1> { // CHECK-LABEL: func.func public @logical_not - // CHECK: ttnn.empty // CHECK: ttnn.logical_not // CHECK-SAME: tensor<64x128xbf16, - // CHECK-SAME: tensor<64x128xbf16, // CHECK-SAME: -> tensor<64x128xbf16, %0 = stablehlo.not %arg0 : tensor<64x128xi1> return %0 : tensor<64x128xi1> diff --git a/test/ttmlir/Silicon/StableHLO/n150/Unary/negate_op.mlir b/test/ttmlir/Silicon/StableHLO/n150/Unary/negate_op.mlir index 3d970b28e7..11f620aec3 100644 --- a/test/ttmlir/Silicon/StableHLO/n150/Unary/negate_op.mlir +++ b/test/ttmlir/Silicon/StableHLO/n150/Unary/negate_op.mlir @@ -9,10 +9,8 @@ module @jit_eltwise_neg attributes {} { func.func public @test_neg(%arg0: tensor<64x128xf32>) -> tensor<64x128xf32> { // CHECK-LABEL: func.func public @test_neg - // CHECK: ttnn.empty // CHECK: ttnn.neg // CHECK-SAME: tensor<64x128xf32, - // CHECK-SAME: tensor<64x128xf32, // CHECK-SAME: -> tensor<64x128xf32, %0 = stablehlo.negate %arg0 : tensor<64x128xf32> return %0 : tensor<64x128xf32> diff --git a/test/ttmlir/Silicon/StableHLO/n150/Unary/rsqrt_op.mlir b/test/ttmlir/Silicon/StableHLO/n150/Unary/rsqrt_op.mlir index 6d0d2efc92..f487e07eef 100644 --- a/test/ttmlir/Silicon/StableHLO/n150/Unary/rsqrt_op.mlir +++ b/test/ttmlir/Silicon/StableHLO/n150/Unary/rsqrt_op.mlir @@ -9,10 +9,8 @@ module @jit_eltwise_rsqrt attributes {} { func.func public @test_rsqrt(%arg0: tensor<64x128xf32>) -> tensor<64x128xf32> { // CHECK-LABEL: func.func public @test_rsqrt - // CHECK: ttnn.empty // CHECK: ttnn.rsqrt // CHECK-SAME: tensor<64x128xf32, - // CHECK-SAME: tensor<64x128xf32, // CHECK-SAME: -> tensor<64x128xf32, %0 = stablehlo.rsqrt %arg0 : tensor<64x128xf32> return %0 : tensor<64x128xf32> diff --git a/test/ttmlir/Silicon/StableHLO/n150/Unary/sign_op.mlir b/test/ttmlir/Silicon/StableHLO/n150/Unary/sign_op.mlir index 6c31b87cc8..79136169fd 100644 --- a/test/ttmlir/Silicon/StableHLO/n150/Unary/sign_op.mlir +++ b/test/ttmlir/Silicon/StableHLO/n150/Unary/sign_op.mlir @@ -9,10 +9,8 @@ module @jit_eltwise_sign attributes {} { func.func public @test_sign(%arg0: tensor<64x128xf32>) -> tensor<64x128xf32> { // CHECK-LABEL: func.func public @test_sign - // CHECK: ttnn.empty // CHECK: ttnn.sign // CHECK-SAME: tensor<64x128xf32, - // CHECK-SAME: tensor<64x128xf32, // CHECK-SAME: -> tensor<64x128xf32, %0 = stablehlo.sign %arg0 : tensor<64x128xf32> return %0 : tensor<64x128xf32> diff --git a/test/ttmlir/Silicon/StableHLO/n150/Unary/sine_op.mlir b/test/ttmlir/Silicon/StableHLO/n150/Unary/sine_op.mlir index 36531bb0cf..cb95859011 100644 --- a/test/ttmlir/Silicon/StableHLO/n150/Unary/sine_op.mlir +++ b/test/ttmlir/Silicon/StableHLO/n150/Unary/sine_op.mlir @@ -9,10 +9,8 @@ module @jit_eltwise_sine attributes {} { func.func public @test_sine(%arg0: tensor<64x128xf32>) -> tensor<64x128xf32> { // CHECK-LABEL: func.func public @test_sine - // CHECK: ttnn.empty // CHECK: ttnn.sin // CHECK-SAME: tensor<64x128xf32, - // CHECK-SAME: tensor<64x128xf32, // CHECK-SAME: -> tensor<64x128xf32, %0 = stablehlo.sine %arg0 : tensor<64x128xf32> return %0 : tensor<64x128xf32> diff --git a/test/ttmlir/Silicon/StableHLO/n150/Unary/sqrt_op.mlir b/test/ttmlir/Silicon/StableHLO/n150/Unary/sqrt_op.mlir index feb6bd8ac9..07caccf34b 100644 --- a/test/ttmlir/Silicon/StableHLO/n150/Unary/sqrt_op.mlir +++ b/test/ttmlir/Silicon/StableHLO/n150/Unary/sqrt_op.mlir @@ -9,10 +9,8 @@ module @jit_eltwise_sqrt attributes {} { func.func public @test_sqrt(%arg0: tensor<64x128xf32>) -> tensor<64x128xf32> { // CHECK-LABEL: func.func public @test_sqrt - // CHECK: ttnn.empty // CHECK: ttnn.sqrt // CHECK-SAME: tensor<64x128xf32, - // CHECK-SAME: tensor<64x128xf32, // CHECK-SAME: -> tensor<64x128xf32, %0 = stablehlo.sqrt %arg0 : tensor<64x128xf32> return %0 : tensor<64x128xf32> diff --git a/test/ttmlir/Silicon/StableHLO/n150/composite_op.mlir b/test/ttmlir/Silicon/StableHLO/n150/composite_op.mlir index 9bb62c9e8c..cd457d0dd6 100644 --- a/test/ttmlir/Silicon/StableHLO/n150/composite_op.mlir +++ b/test/ttmlir/Silicon/StableHLO/n150/composite_op.mlir @@ -15,7 +15,6 @@ module @jit_eltwise_add attributes {} { // CHECK-LABEL: func.func public @main func.func public @main(%arg0: tensor<64x128xf32>, %arg1: tensor<64x128xf32>) -> tensor<64x128xf32> { - // CHECK: ttnn.empty // CEHCK: ttnn.add %results = stablehlo.composite "jit_eltwise_add.my_add" %arg0, %arg1 { decomposition = @add_impl diff --git a/test/ttmlir/Silicon/StableHLO/n150/scalar_add_op.mlir b/test/ttmlir/Silicon/StableHLO/n150/scalar_add_op.mlir index 4e05a5040b..7e0987bd6c 100644 --- a/test/ttmlir/Silicon/StableHLO/n150/scalar_add_op.mlir +++ b/test/ttmlir/Silicon/StableHLO/n150/scalar_add_op.mlir @@ -8,11 +8,9 @@ module @jit_eltwise_scalar_add attributes {} { func.func public @test_scalar_add(%arg0: tensor, %arg1: tensor) -> tensor { // CHECK-LABEL: func.func public @test_scalar_add - // CHECK: ttnn.empty // CHECK: ttnn.add // CHECK-SAME: tensor<1xf32, // CHECK-SAME: tensor<1xf32, - // CHECK-SAME: tensor<1xf32, // CHECK-SAME: -> tensor<1xf32, %0 = stablehlo.add %arg0, %arg1 : tensor return %0 : tensor diff --git a/test/ttmlir/Silicon/TTNN/n150/deallocate.mlir b/test/ttmlir/Silicon/TTNN/n150/deallocate.mlir index aed0a83549..a26d8ac8e3 100644 --- a/test/ttmlir/Silicon/TTNN/n150/deallocate.mlir +++ b/test/ttmlir/Silicon/TTNN/n150/deallocate.mlir @@ -9,24 +9,21 @@ module @"dealloc_test" attributes {} { // CHECK: %{{.+}} = "ttnn.matmul"([[I1:%.+]], [[I2:%.+]], [[O1:%.+]]) {{.+}} -> tensor<1x256xf32, {{.+}}> %2 = tensor.empty() : tensor<1x256xf32> loc(#loc9) %3 = "ttir.add"(%1, %arg3, %2) <{operandSegmentSizes = array}> : (tensor<1x256xf32>, tensor<1x256xf32>, tensor<1x256xf32>) -> tensor<1x256xf32> loc(#loc9) - // CHECK: %{{.+}} = "ttnn.add"([[I1:%.+]], [[I2:%.+]], [[O2:%.+]]) {{.+}} -> tensor<1x256xf32, {{.+}}> + // CHECK: %{{.+}} = "ttnn.add"([[I1:%.+]], [[I2:%.+]]) {{.+}} -> tensor<1x256xf32, {{.+}}> // CHECK: "ttnn.deallocate"([[O1]]) {{.+}} : (tensor<1x256xf32, {{.+}}>) -> () %4 = tensor.empty() : tensor<1x256xf32> loc(#loc10) %5 = "ttir.relu"(%3, %4) <{operandSegmentSizes = array}> : (tensor<1x256xf32>, tensor<1x256xf32>) -> tensor<1x256xf32> loc(#loc10) - // CHECK: %{{.+}} = "ttnn.relu"([[I1:%.+]], [[O3:%.+]]) {{.+}} -> tensor<1x256xf32, {{.+}}> - // CHECK: "ttnn.deallocate"([[O2]]) {{.+}} : (tensor<1x256xf32, {{.+}}>) -> () + // CHECK: %{{.+}} = "ttnn.relu"([[I1:%.+]]) {{.+}} -> tensor<1x256xf32, {{.+}}> %6 = tensor.empty() : tensor<1x10xf32> loc(#loc11) %7 = "ttir.matmul"(%5, %arg2, %6) : (tensor<1x256xf32>, tensor<256x10xf32>, tensor<1x10xf32>) -> tensor<1x10xf32> loc(#loc11) // CHECK: %{{.+}} = "ttnn.matmul"([[I1:%.+]], [[I2:%.+]], [[O4:%.+]]) {{.+}} -> tensor<1x10xf32, {{.+}}> - // CHECK: "ttnn.deallocate"([[O3]]) {{.+}} : (tensor<1x256xf32, {{.+}}>) -> () %8 = tensor.empty() : tensor<1x10xf32> loc(#loc12) %9 = "ttir.add"(%7, %arg1, %8) <{operandSegmentSizes = array}> : (tensor<1x10xf32>, tensor<1x10xf32>, tensor<1x10xf32>) -> tensor<1x10xf32> loc(#loc12) - // CHECK: %{{.+}} = "ttnn.add"([[I1:%.+]], [[I2:%.+]], [[O5:%.+]]) {{.+}} -> tensor<1x10xf32,{{.+}}> + // CHECK: %{{.+}} = "ttnn.add"([[I1:%.+]], [[I2:%.+]]) {{.+}} -> tensor<1x10xf32,{{.+}}> // CHECK: "ttnn.deallocate"([[O4]]) {{.+}} : (tensor<1x10xf32, {{.+}}>) -> () %10 = tensor.empty() : tensor<1x10xf32> loc(#loc13) %11 = "ttir.softmax"(%9, %10) <{dimension = 1 : si32}> : (tensor<1x10xf32>, tensor<1x10xf32>) -> tensor<1x10xf32> loc(#loc13) // CHECK: %{{.+}} = "ttnn.softmax"([[I1:%.+]]) {{.+}} -> tensor<1x10xf32, {{.+}}> - // CHECK: "ttnn.deallocate"([[O5]]) {{.+}} : (tensor<1x10xf32, {{.+}}>) -> () return %11 : tensor<1x10xf32> loc(#loc7) } loc(#loc) } loc(#loc) diff --git a/test/ttmlir/Silicon/TTNN/n150/eltwise/binary/add/add.mlir b/test/ttmlir/Silicon/TTNN/n150/eltwise/binary/add/add.mlir index 0774d60ded..a05783769b 100644 --- a/test/ttmlir/Silicon/TTNN/n150/eltwise/binary/add/add.mlir +++ b/test/ttmlir/Silicon/TTNN/n150/eltwise/binary/add/add.mlir @@ -3,9 +3,11 @@ // RUN: ttmlir-translate --ttnn-to-flatbuffer %t.mlir > %t.ttnn func.func @add(%arg0: tensor<64x128xf32>, %arg1: tensor<64x128xf32>) -> tensor<64x128xf32> { - // CHECK: %[[C:.*]] = "ttnn.empty"[[C:.*]] %0 = tensor.empty() : tensor<64x128xf32> - // CHECK: %[[C:.*]] = "ttnn.add"[[C:.*]] %1 = "ttir.add"(%arg0, %arg1, %0) <{operandSegmentSizes = array}> : (tensor<64x128xf32>, tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xf32> + // CHECK: "ttnn.add" + // CHECK-SAME: tensor<64x128xf32 + // CHECK-SAME: tensor<64x128xf32 + // CHECK-SAME: -> tensor<64x128xf32 return %1 : tensor<64x128xf32> } diff --git a/test/ttmlir/Silicon/TTNN/n150/eltwise/binary/add/add_int32.mlir b/test/ttmlir/Silicon/TTNN/n150/eltwise/binary/add/add_int32.mlir index 49d028086b..f2e298ac60 100644 --- a/test/ttmlir/Silicon/TTNN/n150/eltwise/binary/add/add_int32.mlir +++ b/test/ttmlir/Silicon/TTNN/n150/eltwise/binary/add/add_int32.mlir @@ -3,9 +3,11 @@ // RUN: ttmlir-translate --ttnn-to-flatbuffer %t.mlir > %t.ttnn func.func @addint32(%arg0: tensor<64x128xi32>, %arg1: tensor<64x128xi32>) -> tensor<64x128xi32> { - // CHECK: %[[C:.*]] = "ttnn.empty"[[C:.*]] %0 = tensor.empty() : tensor<64x128xi32> - // CHECK: %[[C:.*]] = "ttnn.add"[[C:.*]] %1 = "ttir.add"(%arg0, %arg1, %0) <{operandSegmentSizes = array}> : (tensor<64x128xi32>, tensor<64x128xi32>, tensor<64x128xi32>) -> tensor<64x128xi32> + // CHECK: "ttnn.add" + // CHECK-SAME: tensor<64x128xui32 + // CHECK-SAME: tensor<64x128xui32 + // CHECK-SAME: -> tensor<64x128xui32 return %1 : tensor<64x128xi32> } diff --git a/test/ttmlir/Silicon/TTNN/n150/simple_compare.mlir b/test/ttmlir/Silicon/TTNN/n150/eltwise/binary/compare/simple_compare.mlir similarity index 60% rename from test/ttmlir/Silicon/TTNN/n150/simple_compare.mlir rename to test/ttmlir/Silicon/TTNN/n150/eltwise/binary/compare/simple_compare.mlir index 5263c4fe41..d27b094a54 100644 --- a/test/ttmlir/Silicon/TTNN/n150/simple_compare.mlir +++ b/test/ttmlir/Silicon/TTNN/n150/eltwise/binary/compare/simple_compare.mlir @@ -3,80 +3,62 @@ // RUN: ttmlir-translate --ttnn-to-flatbuffer %t.mlir > %t.ttnn module attributes {} { func.func @equal(%arg0: tensor<13x31xf32>, %arg1: tensor<13x31xf32>) -> tensor<13x31xf32> { - // CHECK: %[[C:.*]] = "ttnn.empty - // CHECK-SAME: [[TENSOR:tensor<13x31xf32,]] %0 = tensor.empty() : tensor<13x31xf32> - // CHECK: %[[C:.*]] = "ttnn.eq" - // CHECK-SAME: [[TENSOR]] - // CHECK-SAME: [[TENSOR]] - // CHECK-SAME: [[TENSOR]] - // CHECK-SAME: -> [[TENSOR]] %1 = "ttir.eq"(%arg0, %arg1, %0) <{operandSegmentSizes = array}> : (tensor<13x31xf32>, tensor<13x31xf32>, tensor<13x31xf32>) -> tensor<13x31xf32> + // CHECK: "ttnn.eq" + // CHECK-SAME: tensor<13x31xf32 + // CHECK-SAME: tensor<13x31xf32 + // CHECK-SAME: -> tensor<13x31xf32 return %1 : tensor<13x31xf32> } func.func @not_equal(%arg0: tensor<13x31xf32>, %arg1: tensor<13x31xf32>) -> tensor<13x31xf32> { - // CHECK: %[[C:.*]] = "ttnn.empty - // CHECK-SAME: [[TENSOR:tensor<13x31xf32,]] %0 = tensor.empty() : tensor<13x31xf32> - // CHECK: %[[C:.*]] = "ttnn.ne" - // CHECK-SAME: [[TENSOR]] - // CHECK-SAME: [[TENSOR]] - // CHECK-SAME: [[TENSOR]] - // CHECK-SAME: -> [[TENSOR]] %1 = "ttir.ne"(%arg0, %arg1, %0) <{operandSegmentSizes = array}> : (tensor<13x31xf32>, tensor<13x31xf32>, tensor<13x31xf32>) -> tensor<13x31xf32> + // CHECK: "ttnn.ne" + // CHECK-SAME: tensor<13x31xf32 + // CHECK-SAME: tensor<13x31xf32 + // CHECK-SAME: -> tensor<13x31xf32 return %1 : tensor<13x31xf32> } func.func @greater_equal(%arg0: tensor<13x31xf32>, %arg1: tensor<13x31xf32>) -> tensor<13x31xf32> { - // CHECK: %[[C:.*]] = "ttnn.empty - // CHECK-SAME: [[TENSOR:tensor<13x31xf32,]] %0 = tensor.empty() : tensor<13x31xf32> - // CHECK: %[[C:.*]] = "ttnn.ge" - // CHECK-SAME: [[TENSOR]] - // CHECK-SAME: [[TENSOR]] - // CHECK-SAME: [[TENSOR]] - // CHECK-SAME: -> [[TENSOR]] %1 = "ttir.ge"(%arg0, %arg1, %0) <{operandSegmentSizes = array}> : (tensor<13x31xf32>, tensor<13x31xf32>, tensor<13x31xf32>) -> tensor<13x31xf32> + // CHECK: "ttnn.ge" + // CHECK-SAME: tensor<13x31xf32 + // CHECK-SAME: tensor<13x31xf32 + // CHECK-SAME: -> tensor<13x31xf32 return %1 : tensor<13x31xf32> } func.func @greater_than(%arg0: tensor<13x31xf32>, %arg1: tensor<13x31xf32>) -> tensor<13x31xf32> { - // CHECK: %[[C:.*]] = "ttnn.empty - // CHECK-SAME: [[TENSOR:tensor<13x31xf32,]] %0 = tensor.empty() : tensor<13x31xf32> - // CHECK: %[[C:.*]] = "ttnn.gt" - // CHECK-SAME: [[TENSOR]] - // CHECK-SAME: [[TENSOR]] - // CHECK-SAME: [[TENSOR]] - // CHECK-SAME: -> [[TENSOR]] %1 = "ttir.gt"(%arg0, %arg1, %0) <{operandSegmentSizes = array}> : (tensor<13x31xf32>, tensor<13x31xf32>, tensor<13x31xf32>) -> tensor<13x31xf32> + // CHECK: "ttnn.gt" + // CHECK-SAME: tensor<13x31xf32 + // CHECK-SAME: tensor<13x31xf32 + // CHECK-SAME: -> tensor<13x31xf32 return %1 : tensor<13x31xf32> } func.func @less_equal(%arg0: tensor<13x31xf32>, %arg1: tensor<13x31xf32>) -> tensor<13x31xf32> { - // CHECK: %[[C:.*]] = "ttnn.empty - // CHECK-SAME: [[TENSOR:tensor<13x31xf32,]] %0 = tensor.empty() : tensor<13x31xf32> - // CHECK: %[[C:.*]] = "ttnn.le" - // CHECK-SAME: [[TENSOR]] - // CHECK-SAME: [[TENSOR]] - // CHECK-SAME: [[TENSOR]] - // CHECK-SAME: -> [[TENSOR]] %1 = "ttir.le"(%arg0, %arg1, %0) <{operandSegmentSizes = array}> : (tensor<13x31xf32>, tensor<13x31xf32>, tensor<13x31xf32>) -> tensor<13x31xf32> + // CHECK: "ttnn.le" + // CHECK-SAME: tensor<13x31xf32 + // CHECK-SAME: tensor<13x31xf32 + // CHECK-SAME: -> tensor<13x31xf32 return %1 : tensor<13x31xf32> } func.func @less_than(%arg0: tensor<13x31xf32>, %arg1: tensor<13x31xf32>) -> tensor<13x31xf32> { - // CHECK: %[[C:.*]] = "ttnn.empty - // CHECK-SAME: [[TENSOR:tensor<13x31xf32,]] %0 = tensor.empty() : tensor<13x31xf32> - // CHECK: %[[C:.*]] = "ttnn.lt" - // CHECK-SAME: [[TENSOR]] - // CHECK-SAME: [[TENSOR]] - // CHECK-SAME: [[TENSOR]] - // CHECK-SAME: -> [[TENSOR]] %1 = "ttir.lt"(%arg0, %arg1, %0) <{operandSegmentSizes = array}> : (tensor<13x31xf32>, tensor<13x31xf32>, tensor<13x31xf32>) -> tensor<13x31xf32> + // CHECK: "ttnn.lt" + // CHECK-SAME: tensor<13x31xf32 + // CHECK-SAME: tensor<13x31xf32 + // CHECK-SAME: -> tensor<13x31xf32 return %1 : tensor<13x31xf32> } } diff --git a/test/ttmlir/Silicon/TTNN/n150/eltwise/binary/div/div.mlir b/test/ttmlir/Silicon/TTNN/n150/eltwise/binary/div/div.mlir index 7ba2da3211..ca231e2c9f 100644 --- a/test/ttmlir/Silicon/TTNN/n150/eltwise/binary/div/div.mlir +++ b/test/ttmlir/Silicon/TTNN/n150/eltwise/binary/div/div.mlir @@ -3,9 +3,11 @@ // RUN: ttmlir-translate --ttnn-to-flatbuffer %t.mlir > %t.ttnn func.func @div(%arg0: tensor<64x128xf32>, %arg1: tensor<64x128xf32>) -> tensor<64x128xf32> { - // CHECK: %[[C:.*]] = "ttnn.empty"[[C:.*]] %0 = tensor.empty() : tensor<64x128xf32> - // CHECK: %[[C:.*]] = "ttnn.div"[[C:.*]] %1 = "ttir.div"(%arg0, %arg1, %0) <{operandSegmentSizes = array}> : (tensor<64x128xf32>, tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xf32> + // CHECK: "ttnn.div" + // CHECK-SAME: tensor<64x128xf32 + // CHECK-SAME: tensor<64x128xf32 + // CHECK-SAME: -> tensor<64x128xf32 return %1 : tensor<64x128xf32> } diff --git a/test/ttmlir/Silicon/TTNN/n150/eltwise/binary/ge/ge.mlir b/test/ttmlir/Silicon/TTNN/n150/eltwise/binary/ge/ge.mlir index 3449d485bf..28779fe997 100644 --- a/test/ttmlir/Silicon/TTNN/n150/eltwise/binary/ge/ge.mlir +++ b/test/ttmlir/Silicon/TTNN/n150/eltwise/binary/ge/ge.mlir @@ -3,9 +3,11 @@ // RUN: ttmlir-translate --ttnn-to-flatbuffer %t.mlir > %t.ttnn func.func @ge(%arg0: tensor<64x128xf32>, %arg1: tensor<64x128xf32>) -> tensor<64x128xf32> { - // CHECK: %[[C:.*]] = "ttnn.empty"[[C:.*]] %0 = tensor.empty() : tensor<64x128xf32> - // CHECK: %[[C:.*]] = "ttnn.ge"[[C:.*]] %1 = "ttir.ge"(%arg0, %arg1, %0) <{operandSegmentSizes = array}> : (tensor<64x128xf32>, tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xf32> + // CHECK: "ttnn.ge" + // CHECK-SAME: tensor<64x128xf32 + // CHECK-SAME: tensor<64x128xf32 + // CHECK-SAME: -> tensor<64x128xf32 return %1 : tensor<64x128xf32> } diff --git a/test/ttmlir/Silicon/TTNN/n150/simple_logical.mlir b/test/ttmlir/Silicon/TTNN/n150/eltwise/binary/logical/simple_logical.mlir similarity index 54% rename from test/ttmlir/Silicon/TTNN/n150/simple_logical.mlir rename to test/ttmlir/Silicon/TTNN/n150/eltwise/binary/logical/simple_logical.mlir index 558f815c71..9242787619 100644 --- a/test/ttmlir/Silicon/TTNN/n150/simple_logical.mlir +++ b/test/ttmlir/Silicon/TTNN/n150/eltwise/binary/logical/simple_logical.mlir @@ -1,47 +1,34 @@ // RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline="system-desc-path=%system_desc_path%" %s > %t.mlir // RUN: FileCheck %s --input-file=%t.mlir // RUN: ttmlir-translate --ttnn-to-flatbuffer %t.mlir > %t.ttnn + module attributes {} { func.func @logical_and(%arg0: tensor<64x128xf32>, %arg1: tensor<64x128xf32>) -> tensor<64x128xf32> { %0 = tensor.empty() : tensor<64x128xf32> - // CHECK: {{.*}} = "ttnn.empty"{{.*}} %1 = "ttir.logical_and"(%arg0, %arg1, %0) <{operandSegmentSizes = array}> : (tensor<64x128xf32>, tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xf32> - // CHECK: %[[C:.*]] = "ttnn.logical_and" - // CHECK-SAME: tensor<64x128xf32, - // CHECK-SAME: tensor<64x128xf32, - // CHECK-SAME: tensor<64x128xf32, - return %1 : tensor<64x128xf32> - } - - func.func @logical_not(%arg0: tensor<64x128xf32>) -> tensor<64x128xf32> { - %0 = tensor.empty() : tensor<64x128xf32> - // CHECK: {{.*}} = "ttnn.empty"{{.*}} - %1 = "ttir.logical_not"(%arg0, %0) <{operandSegmentSizes = array}> : (tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xf32> - // CHECK: %[[C:.*]] = "ttnn.logical_not" - // CHECK-SAME: tensor<64x128xf32, - // CHECK-SAME: tensor<64x128xf32, + // CHECK: "ttnn.logical_and" + // CHECK-SAME: tensor<64x128xf32 + // CHECK-SAME: tensor<64x128xf32 + // CHECK-SAME: -> tensor<64x128xf32 return %1 : tensor<64x128xf32> } func.func @logical_or(%arg0: tensor<64x128xf32>, %arg1: tensor<64x128xf32>) -> tensor<64x128xf32> { %0 = tensor.empty() : tensor<64x128xf32> - // CHECK: {{.*}} = "ttnn.empty"{{.*}} %1 = "ttir.logical_or"(%arg0, %arg1, %0) <{operandSegmentSizes = array}> : (tensor<64x128xf32>, tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xf32> - // CHECK: %[[C:.*]] = "ttnn.logical_or" - // CHECK-SAME: tensor<64x128xf32, - // CHECK-SAME: tensor<64x128xf32, - // CHECK-SAME: tensor<64x128xf32, + // CHECK: "ttnn.logical_or" + // CHECK-SAME: tensor<64x128xf32 + // CHECK-SAME: tensor<64x128xf32 + // CHECK-SAME: -> tensor<64x128xf32 return %1 : tensor<64x128xf32> } func.func @logical_xor(%arg0: tensor<64x128xbf16>, %arg1: tensor<64x128xbf16>) -> tensor<64x128xbf16> { - // CHECK: %{{[0-9]+}} = "ttnn.empty"{{.*}} [[TENSOR:tensor<64x128xbf16]] %0 = tensor.empty() : tensor<64x128xbf16> - // CHECK: %{{[0-9]+}} = "ttnn.logical_xor" - // CHECK-SAME: [[TENSOR]] - // CHECK-SAME: [[TENSOR]] - // CHECK-SAME: [[TENSOR]] - // CHECK-SAME: -> [[TENSOR]] + // CHECK: "ttnn.logical_xor" + // CHECK-SAME: tensor<64x128xbf16 + // CHECK-SAME: tensor<64x128xbf16 + // CHECK-SAME: -> tensor<64x128xbf16 %1 = "ttir.logical_xor"(%arg0, %arg1, %0) <{operandSegmentSizes = array}> : (tensor<64x128xbf16>, tensor<64x128xbf16>, tensor<64x128xbf16>) -> tensor<64x128xbf16> return %1 : tensor<64x128xbf16> } diff --git a/test/ttmlir/Silicon/TTNN/n150/eltwise/binary/maximum/maximum.mlir b/test/ttmlir/Silicon/TTNN/n150/eltwise/binary/maximum/maximum.mlir index 2659cccdce..a8df2de34d 100644 --- a/test/ttmlir/Silicon/TTNN/n150/eltwise/binary/maximum/maximum.mlir +++ b/test/ttmlir/Silicon/TTNN/n150/eltwise/binary/maximum/maximum.mlir @@ -3,9 +3,11 @@ // RUN: ttmlir-translate --ttnn-to-flatbuffer %t.mlir > %t.ttnn func.func @maximum(%arg0: tensor<64x128xf32>, %arg1: tensor<64x128xf32>) -> tensor<64x128xf32> { - // CHECK: %[[C:.*]] = "ttnn.empty"[[C:.*]] %0 = tensor.empty() : tensor<64x128xf32> - // CHECK: %[[C:.*]] = "ttnn.maximum"[[C:.*]] %1 = "ttir.maximum"(%arg0, %arg1, %0) <{operandSegmentSizes = array}> : (tensor<64x128xf32>, tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xf32> + // CHECK: "ttnn.maximum" + // CHECK-SAME: tensor<64x128xf32 + // CHECK-SAME: tensor<64x128xf32 + // CHECK-SAME: -> tensor<64x128xf32 return %1 : tensor<64x128xf32> } diff --git a/test/ttmlir/Silicon/TTNN/n150/eltwise/binary/minimum/minimum.mlir b/test/ttmlir/Silicon/TTNN/n150/eltwise/binary/minimum/minimum.mlir index 8272db6e6a..36f892d2b9 100644 --- a/test/ttmlir/Silicon/TTNN/n150/eltwise/binary/minimum/minimum.mlir +++ b/test/ttmlir/Silicon/TTNN/n150/eltwise/binary/minimum/minimum.mlir @@ -3,14 +3,11 @@ // RUN: ttmlir-translate --ttnn-to-flatbuffer %t.mlir > %t.ttnn func.func @minimum(%arg0: tensor<64x128xf32>, %arg1: tensor<64x128xf32>) -> tensor<64x128xf32> { - // CHECK: %[[C:.*]] = "ttnn.empty" - // CHECK-SAME: [[TENSOR:tensor<64x128xf32,]] %0 = tensor.empty() : tensor<64x128xf32> - // CHECK: %[[C:.*]] = "ttnn.minimum" - // CHECK-SAME: [[TENSOR]] - // CHECK-SAME: [[TENSOR]] - // CHECK-SAME: [[TENSOR]] - // CHECK-SAME: -> [[TENSOR]] %1 = "ttir.minimum"(%arg0, %arg1, %0) <{operandSegmentSizes = array}> : (tensor<64x128xf32>, tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xf32> + // CHECK: "ttnn.minimum" + // CHECK-SAME: tensor<64x128xf32 + // CHECK-SAME: tensor<64x128xf32 + // CHECK-SAME: -> tensor<64x128xf32 return %1 : tensor<64x128xf32> } diff --git a/test/ttmlir/Silicon/TTNN/n150/eltwise/binary/multiply/multiply.mlir b/test/ttmlir/Silicon/TTNN/n150/eltwise/binary/multiply/multiply.mlir index 2edabb7470..74beef9bf9 100644 --- a/test/ttmlir/Silicon/TTNN/n150/eltwise/binary/multiply/multiply.mlir +++ b/test/ttmlir/Silicon/TTNN/n150/eltwise/binary/multiply/multiply.mlir @@ -3,9 +3,11 @@ // RUN: ttmlir-translate --ttnn-to-flatbuffer %t.mlir > %t.ttnn func.func @multiply(%arg0: tensor<64x128xf32>, %arg1: tensor<64x128xf32>) -> tensor<64x128xf32> { - // CHECK: %[[C:.*]] = "ttnn.empty"[[C:.*]] %0 = tensor.empty() : tensor<64x128xf32> - // CHECK: %[[C:.*]] = "ttnn.multiply"[[C:.*]] %1 = "ttir.multiply"(%arg0, %arg1, %0) <{operandSegmentSizes = array}> : (tensor<64x128xf32>, tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xf32> + // CHECK: "ttnn.multiply" + // CHECK-SAME: tensor<64x128xf32 + // CHECK-SAME: tensor<64x128xf32 + // CHECK-SAME: -> tensor<64x128xf32 return %1 : tensor<64x128xf32> } diff --git a/test/ttmlir/Silicon/TTNN/n150/eltwise/binary/power/power.mlir b/test/ttmlir/Silicon/TTNN/n150/eltwise/binary/power/power.mlir index 7b4eefe9e1..7579c9410b 100644 --- a/test/ttmlir/Silicon/TTNN/n150/eltwise/binary/power/power.mlir +++ b/test/ttmlir/Silicon/TTNN/n150/eltwise/binary/power/power.mlir @@ -1,11 +1,13 @@ // RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline="system-desc-path=%system_desc_path%" %s > %t.mlir // RUN: FileCheck %s --input-file=%t.mlir // RUN: ttmlir-translate --ttnn-to-flatbuffer %t.mlir > %t.ttnn + func.func @power(%arg0: tensor<32x32xf32>, %arg1: tensor<32x32xf32>) -> tensor<32x32xf32> { %0 = tensor.empty() : tensor<32x32xf32> - // CHECK: %[[EMPTY:.*]] = "ttnn.empty"{{.*}} -> tensor<32x32xf32, {{.*}} %1 = "ttir.power"(%arg0, %arg1, %0) <{operandSegmentSizes = array}> : (tensor<32x32xf32>, tensor<32x32xf32>, tensor<32x32xf32>) -> tensor<32x32xf32> - // CHECK: %[[REM:[0-9]+]] = "ttnn.pow"({{.*}}, {{.*}}, %[[EMPTY]]){{.*}} -> tensor<32x32xf32, {{.*}} + // CHECK: "ttnn.pow" + // CHECK-SAME: tensor<32x32xf32 + // CHECK-SAME: tensor<32x32xf32 + // CHECK-SAME: -> tensor<32x32xf32 return %1 : tensor<32x32xf32> - // CHECK: return {{.*}} : tensor<32x32xf32, {{.*}} } diff --git a/test/ttmlir/Silicon/TTNN/n150/eltwise/binary/remainder/remainder.mlir b/test/ttmlir/Silicon/TTNN/n150/eltwise/binary/remainder/remainder.mlir index dbcff9d786..71eb85f36c 100644 --- a/test/ttmlir/Silicon/TTNN/n150/eltwise/binary/remainder/remainder.mlir +++ b/test/ttmlir/Silicon/TTNN/n150/eltwise/binary/remainder/remainder.mlir @@ -4,9 +4,10 @@ func.func @remainder(%arg0: tensor<32x32xf32>, %arg1: tensor<32x32xf32>) -> tensor<32x32xf32> { %0 = tensor.empty() : tensor<32x32xf32> - // CHECK: %[[EMPTY:.*]] = "ttnn.empty"{{.*}} -> tensor<32x32xf32, {{.*}} %1 = "ttir.remainder"(%arg0, %arg1, %0) <{operandSegmentSizes = array}> : (tensor<32x32xf32>, tensor<32x32xf32>, tensor<32x32xf32>) -> tensor<32x32xf32> - // CHECK: %[[REM:[0-9]+]] = "ttnn.remainder"({{.*}}, {{.*}}, %[[EMPTY]]){{.*}} -> tensor<32x32xf32, {{.*}} + // CHECK: "ttnn.remainder" + // CHECK-SAME: tensor<32x32xf32 + // CHECK-SAME: tensor<32x32xf32 + // CHECK-SAME: -> tensor<32x32xf32 return %1 : tensor<32x32xf32> - // CHECK: return {{.*}} : tensor<32x32xf32, {{.*}} } diff --git a/test/ttmlir/Silicon/TTNN/n150/eltwise/binary/subtract/subtract.mlir b/test/ttmlir/Silicon/TTNN/n150/eltwise/binary/subtract/subtract.mlir index d7a84fd159..3719444c80 100644 --- a/test/ttmlir/Silicon/TTNN/n150/eltwise/binary/subtract/subtract.mlir +++ b/test/ttmlir/Silicon/TTNN/n150/eltwise/binary/subtract/subtract.mlir @@ -3,9 +3,11 @@ // RUN: ttmlir-translate --ttnn-to-flatbuffer %t.mlir > %t.ttnn func.func @subtract(%arg0: tensor<64x128xf32>, %arg1: tensor<64x128xf32>) -> tensor<64x128xf32> { - // CHECK: %[[C:.*]] = "ttnn.empty"[[C:.*]] %0 = tensor.empty() : tensor<64x128xf32> - // CHECK: %[[C:.*]] = "ttnn.subtract"[[C:.*]] %1 = "ttir.subtract"(%arg0, %arg1, %0) <{operandSegmentSizes = array}> : (tensor<64x128xf32>, tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xf32> + // CHECK: "ttnn.subtract" + // CHECK-SAME: tensor<64x128xf32 + // CHECK-SAME: tensor<64x128xf32 + // CHECK-SAME: -> tensor<64x128xf32 return %1 : tensor<64x128xf32> } diff --git a/test/ttmlir/Silicon/TTNN/n150/eltwise/ternary/where/where.mlir b/test/ttmlir/Silicon/TTNN/n150/eltwise/ternary/where/where.mlir index ced442b91f..9d450ebacc 100644 --- a/test/ttmlir/Silicon/TTNN/n150/eltwise/ternary/where/where.mlir +++ b/test/ttmlir/Silicon/TTNN/n150/eltwise/ternary/where/where.mlir @@ -7,8 +7,7 @@ func.func @test_where(%arg0: tensor<13x37xbf16>, %arg1: tensor<13x37xbf16>) -> t %1 = "ttir.eq"(%arg0, %arg1, %0) <{operandSegmentSizes = array}> : (tensor<13x37xbf16>, tensor<13x37xbf16>, tensor<13x37xbf16>) -> tensor<13x37xbf16> %2 = tensor.empty() : tensor<13x37xbf16> %3 = "ttir.where"(%1, %arg0, %arg1, %2) <{operandSegmentSizes = array}> : (tensor<13x37xbf16>, tensor<13x37xbf16>, tensor<13x37xbf16>, tensor<13x37xbf16>) -> tensor<13x37xbf16> - // CHECK: %[[EMPTY:.*]] = "ttnn.empty"{{.*}} - // CHECK: %[[VAL1:[0-9]+]] = "ttnn.eq"(%arg0, %arg1, %[[EMPTY]]) - // CHECK: %{{[0-9]+}} = "ttnn.where"(%[[VAL1]], %arg0, %arg1, %{{[0-9]+}}) + // CHECK: "ttnn.eq" + // CHECK: "ttnn.where" return %3 : tensor<13x37xbf16> } diff --git a/test/ttmlir/Silicon/TTNN/n150/eltwise/unary/cbrt/cbrt.mlir b/test/ttmlir/Silicon/TTNN/n150/eltwise/unary/cbrt/cbrt.mlir index a2833f7053..4a077e60d2 100644 --- a/test/ttmlir/Silicon/TTNN/n150/eltwise/unary/cbrt/cbrt.mlir +++ b/test/ttmlir/Silicon/TTNN/n150/eltwise/unary/cbrt/cbrt.mlir @@ -3,9 +3,10 @@ // RUN: ttmlir-translate --ttnn-to-flatbuffer %t.mlir > %t.ttnn func.func @cbrt(%arg0: tensor<64x128xf32>) -> tensor<64x128xf32> { - // CHECK: %[[C:.*]] = "ttnn.empty"[[C:.*]] %0 = tensor.empty() : tensor<64x128xf32> - // CHECK: %[[C:.*]] = "ttnn.cbrt"[[C:.*]] %1 = "ttir.cbrt"(%arg0, %0) <{operandSegmentSizes = array}> : (tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xf32> + // CHECK: "ttnn.cbrt" + // CHECK-SAME: tensor<64x128xf32 + // CHECK-SAME: -> tensor<64x128xf32 return %1 : tensor<64x128xf32> } diff --git a/test/ttmlir/Silicon/TTNN/n150/eltwise/unary/ceil/ceil.mlir b/test/ttmlir/Silicon/TTNN/n150/eltwise/unary/ceil/ceil.mlir index 934599eb74..df28d6c399 100644 --- a/test/ttmlir/Silicon/TTNN/n150/eltwise/unary/ceil/ceil.mlir +++ b/test/ttmlir/Silicon/TTNN/n150/eltwise/unary/ceil/ceil.mlir @@ -4,8 +4,9 @@ func.func @ceil(%arg0: tensor<32x32xf32>) -> tensor<32x32xf32> { %0 = tensor.empty() : tensor<32x32xf32> - // CHECK: [[VAL0:%[0-9]+]] = "ttnn.empty"(%{{[0-9]+}}) - // CHECK: %{{[0-9]+}} = "ttnn.ceil"(%arg0, [[VAL0]]) %1 = "ttir.ceil"(%arg0, %0) <{operandSegmentSizes = array}> : (tensor<32x32xf32>, tensor<32x32xf32>) -> tensor<32x32xf32> + // CHECK: "ttnn.ceil" + // CHECK-SAME: tensor<32x32xf32 + // CHECK-SAME: -> tensor<32x32xf32 return %1 : tensor<32x32xf32> } diff --git a/test/ttmlir/Silicon/TTNN/n150/eltwise/unary/clamp/clamp.mlir b/test/ttmlir/Silicon/TTNN/n150/eltwise/unary/clamp/clamp.mlir index 63c17062ac..b7437ffc44 100644 --- a/test/ttmlir/Silicon/TTNN/n150/eltwise/unary/clamp/clamp.mlir +++ b/test/ttmlir/Silicon/TTNN/n150/eltwise/unary/clamp/clamp.mlir @@ -4,8 +4,10 @@ func.func @clamp(%arg0: tensor<64x128xbf16>) -> tensor<64x128xbf16> { %0 = tensor.empty() : tensor<64x128xbf16> - // CHECK: "ttnn.clamp"(%arg0) - // CHECK-SAME: {max = 3.000000e+00 : f32, min = 2.000000e+00 : f32} %1 = "ttir.clamp"(%arg0, %0) <{max = 3.000000e+00 : f32, min = 2.000000e+00 : f32}> : (tensor<64x128xbf16>, tensor<64x128xbf16>) -> tensor<64x128xbf16> + // CHECK: "ttnn.clamp" + // CHECK-SAME: {max = 3.000000e+00 : f32, min = 2.000000e+00 : f32} + // CHECK-SAME: tensor<64x128xbf16 + // CHECK-SAME: -> tensor<64x128xbf16 return %1 : tensor<64x128xbf16> } diff --git a/test/ttmlir/Silicon/TTNN/n150/eltwise/unary/cosine/cosine.mlir b/test/ttmlir/Silicon/TTNN/n150/eltwise/unary/cosine/cosine.mlir index 2391f68249..c8b0223e30 100644 --- a/test/ttmlir/Silicon/TTNN/n150/eltwise/unary/cosine/cosine.mlir +++ b/test/ttmlir/Silicon/TTNN/n150/eltwise/unary/cosine/cosine.mlir @@ -4,8 +4,9 @@ func.func @cosine(%arg0: tensor<32x32xf32>) -> tensor<32x32xf32> { %0 = tensor.empty() : tensor<32x32xf32> - // CHECK: [[VAL0:%[0-9]+]] = "ttnn.empty"(%{{[0-9]+}}) - // CHECK: %{{[0-9]+}} = "ttnn.cos"(%arg0, [[VAL0]]) %1 = "ttir.cos"(%arg0, %0) <{operandSegmentSizes = array}> : (tensor<32x32xf32>, tensor<32x32xf32>) -> tensor<32x32xf32> + // CHECK: "ttnn.cos" + // CHECK-SAME: tensor<32x32xf32 + // CHECK-SAME: -> tensor<32x32xf32 return %1 : tensor<32x32xf32> } diff --git a/test/ttmlir/Silicon/TTNN/n150/eltwise/unary/expm1/expm1.mlir b/test/ttmlir/Silicon/TTNN/n150/eltwise/unary/expm1/expm1.mlir index fc95c1ae07..81533c0d1d 100644 --- a/test/ttmlir/Silicon/TTNN/n150/eltwise/unary/expm1/expm1.mlir +++ b/test/ttmlir/Silicon/TTNN/n150/eltwise/unary/expm1/expm1.mlir @@ -4,9 +4,9 @@ func.func @expm1(%arg0: tensor<64x128xf32>) -> tensor<64x128xf32> { %0 = tensor.empty() : tensor<64x128xf32> - // CHECK: [[VAL0:%[0-9]+]] = "ttnn.empty"(%{{[0-9]+}}) <{dtype = {{.*}}, layout = {{.*}}, memory_config = {{.*}}, <{{.*}}>>, shape = #ttnn.shape<[[TENSOR_SHAPE:[0-9]+x[0-9]+]]>}> %1 = "ttir.expm1"(%arg0, %0) <{operandSegmentSizes = array}> : (tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xf32> - // CHECK: %{{[0-9]+}} = "ttnn.expm1"(%arg0, [[VAL0]]) <{operandSegmentSizes = array}> : (tensor<[[TENSOR_SHAPE]]x{{.*}}, {{.*}}>, tensor<[[TENSOR_SHAPE]]x{{.*}}, {{.*}}) -> tensor<[[TENSOR_SHAPE]]x{{.*}}, {{.*}}> + // CHECK: "ttnn.expm1" + // CHECK-SAME: tensor<64x128xf32 + // CHECK-SAME: -> tensor<64x128xf32 return %1 : tensor<64x128xf32> - // CHECK: return %{{[0-9]+}} : tensor<[[TENSOR_SHAPE]]xf32, {{.*}}> } diff --git a/test/ttmlir/Silicon/TTNN/n150/eltwise/unary/floor/floor.mlir b/test/ttmlir/Silicon/TTNN/n150/eltwise/unary/floor/floor.mlir index 7af577c8e8..be34577ca6 100644 --- a/test/ttmlir/Silicon/TTNN/n150/eltwise/unary/floor/floor.mlir +++ b/test/ttmlir/Silicon/TTNN/n150/eltwise/unary/floor/floor.mlir @@ -3,13 +3,10 @@ // RUN: ttmlir-translate --ttnn-to-flatbuffer %t.mlir > %t.ttnn func.func @floor(%arg0: tensor<64x128xf32>) -> tensor<64x128xf32> { - // CHECK: %{{[0-9]+}} = "ttnn.empty" - // CHECK-SAME: [[TENSOR:tensor<64x128xf32,]] %0 = tensor.empty() : tensor<64x128xf32> - // CHECK: %{{[0-9]+}} = "ttnn.floor" - // CHECK-SAME: [[TENSOR]] - // CHECK-SAME: [[TENSOR]] - // CHECK-SAME: -> [[TENSOR]] %1 = "ttir.floor"(%arg0, %0) <{operandSegmentSizes = array}> : (tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xf32> + // CHECK: "ttnn.floor" + // CHECK-SAME: tensor<64x128xf32 + // CHECK-SAME: -> tensor<64x128xf32 return %1 : tensor<64x128xf32> } diff --git a/test/ttmlir/Silicon/TTNN/n150/eltwise/unary/gelu/gelu.mlir b/test/ttmlir/Silicon/TTNN/n150/eltwise/unary/gelu/gelu.mlir index 7e9767e1fd..8f2bf48737 100644 --- a/test/ttmlir/Silicon/TTNN/n150/eltwise/unary/gelu/gelu.mlir +++ b/test/ttmlir/Silicon/TTNN/n150/eltwise/unary/gelu/gelu.mlir @@ -3,13 +3,10 @@ // RUN: ttmlir-translate --ttnn-to-flatbuffer %t.mlir > %t.ttnn func.func @gelu(%arg0: tensor<64x128xf32>) -> tensor<64x128xf32> { - // CHECK: "ttnn.empty" - // CHECK-SAME: tensor<64x128xf32, %0 = tensor.empty() : tensor<64x128xf32> - // CHECK: "ttnn.gelu" - // CHECK-SAME: tensor<64x128xf32, - // CHECK-SAME: tensor<64x128xf32, - // CHECK-SAME: tensor<64x128xf32, %1 = "ttir.gelu"(%arg0, %0) <{operandSegmentSizes = array}> : (tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xf32> + // CHECK: "ttnn.gelu" + // CHECK-SAME: tensor<64x128xf32 + // CHECK-SAME: -> tensor<64x128xf32 return %1 : tensor<64x128xf32> } diff --git a/test/ttmlir/Silicon/TTNN/n150/eltwise/unary/is_finite/is_finite.mlir b/test/ttmlir/Silicon/TTNN/n150/eltwise/unary/is_finite/is_finite.mlir index b8dc64fb72..a9ae0d5b0c 100644 --- a/test/ttmlir/Silicon/TTNN/n150/eltwise/unary/is_finite/is_finite.mlir +++ b/test/ttmlir/Silicon/TTNN/n150/eltwise/unary/is_finite/is_finite.mlir @@ -3,13 +3,10 @@ // RUN: ttmlir-translate --ttnn-to-flatbuffer %t.mlir > %t.ttnn func.func @is_finite(%arg0: tensor<64x128xbf16>) -> tensor<64x128xbf16> { - // CHECK: %[[C:.*]] = "ttnn.empty" - // CHECK-SAME: [[TENSOR:tensor<64x128xbf16,]] %0 = tensor.empty() : tensor<64x128xbf16> - // CHECK: %[[C:.*]] = "ttnn.isfinite" - // CHECK-SAME: tensor<64x128xbf16, - // CHECK-SAME: [[TENSOR]] - // CHECK-SAME: -> [[TENSOR]] %1 = "ttir.isfinite"(%arg0, %0) <{operandSegmentSizes = array}> : (tensor<64x128xbf16>, tensor<64x128xbf16>) -> tensor<64x128xbf16> + // CHECK: "ttnn.isfinite" + // CHECK-SAME: tensor<64x128xbf16 + // CHECK-SAME: -> tensor<64x128xbf16 return %1 : tensor<64x128xbf16> } diff --git a/test/ttmlir/Silicon/TTNN/n150/eltwise/unary/leaky_relu/leaky_relu.mlir b/test/ttmlir/Silicon/TTNN/n150/eltwise/unary/leaky_relu/leaky_relu.mlir index 018fa352ee..c11b5e110f 100644 --- a/test/ttmlir/Silicon/TTNN/n150/eltwise/unary/leaky_relu/leaky_relu.mlir +++ b/test/ttmlir/Silicon/TTNN/n150/eltwise/unary/leaky_relu/leaky_relu.mlir @@ -3,9 +3,11 @@ // RUN: ttmlir-translate --ttnn-to-flatbuffer %t.mlir > %t.ttnn func.func @leaky_relu(%arg0: tensor<64x128xf32>) -> tensor<64x128xf32> { - // CHECK: %[[C:.*]] = "ttnn.empty" %0 = tensor.empty() : tensor<64x128xf32> - // CHECK: %[[C:.*]] = "ttnn.leaky_relu" %1 = "ttir.leaky_relu"(%arg0, %0) <{parameter = 0.01 : f32, operandSegmentSizes = array}> : (tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xf32> + // CHECK: "ttnn.leaky_relu" + // CHECK-SAME: <{parameter = 0.00999999977 : f32}> + // CHECK-SAME: tensor<64x128xf32 + // CHECK-SAME: -> tensor<64x128xf32 return %1 : tensor<64x128xf32> } diff --git a/test/ttmlir/Silicon/TTNN/n150/eltwise/unary/log/log.mlir b/test/ttmlir/Silicon/TTNN/n150/eltwise/unary/log/log.mlir index fa21eb24cc..42c9e463a2 100644 --- a/test/ttmlir/Silicon/TTNN/n150/eltwise/unary/log/log.mlir +++ b/test/ttmlir/Silicon/TTNN/n150/eltwise/unary/log/log.mlir @@ -3,9 +3,10 @@ // RUN: ttmlir-translate --ttnn-to-flatbuffer %t.mlir > %t.ttnn func.func @log(%arg0: tensor<64x128xf32>, %arg1: tensor<64x128xf32>) -> tensor<64x128xf32> { - // CHECK: %[[C:.*]] = "ttnn.empty"[[C:.*]] %0 = tensor.empty() : tensor<64x128xf32> - // CHECK: %[[C:.*]] = "ttnn.log"[[C:.*]] %1 = "ttir.log"(%arg0, %0) <{operandSegmentSizes = array}> : (tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xf32> + // CHECK: "ttnn.log" + // CHECK-SAME: tensor<64x128xf32 + // CHECK-SAME: -> tensor<64x128xf32 return %1 : tensor<64x128xf32> } diff --git a/test/ttmlir/Silicon/TTNN/n150/eltwise/unary/log1p/log1p.mlir b/test/ttmlir/Silicon/TTNN/n150/eltwise/unary/log1p/log1p.mlir index efdd6b8fe0..e25f17b1dc 100644 --- a/test/ttmlir/Silicon/TTNN/n150/eltwise/unary/log1p/log1p.mlir +++ b/test/ttmlir/Silicon/TTNN/n150/eltwise/unary/log1p/log1p.mlir @@ -4,9 +4,9 @@ func.func @log1p(%arg0: tensor<64x128xf32>) -> tensor<64x128xf32> { %0 = tensor.empty() : tensor<64x128xf32> - // CHECK: [[VAL0:%[0-9]+]] = "ttnn.empty"(%{{[0-9]+}}) <{dtype = {{.*}}, layout = {{.*}}, memory_config = {{.*}}, <{{.*}}>>, shape = #ttnn.shape<[[TENSOR_SHAPE:[0-9]+x[0-9]+]]>}> %1 = "ttir.log1p"(%arg0, %0) <{operandSegmentSizes = array}> : (tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xf32> - // CHECK: %{{[0-9]+}} = "ttnn.log1p"(%arg0, [[VAL0]]) <{operandSegmentSizes = array}> : (tensor<[[TENSOR_SHAPE]]x{{.*}}, {{.*}}>, tensor<[[TENSOR_SHAPE]]x{{.*}}, {{.*}}) -> tensor<[[TENSOR_SHAPE]]x{{.*}}, {{.*}}> + // CHECK: "ttnn.log1p" + // CHECK-SAME: tensor<64x128xf32 + // CHECK-SAME: -> tensor<64x128xf32 return %1 : tensor<64x128xf32> - // CHECK: return %{{[0-9]+}} : tensor<[[TENSOR_SHAPE]]xf32, {{.*}}> } diff --git a/test/ttmlir/Silicon/TTNN/n150/eltwise/unary/logical_not/simple_not.mlir b/test/ttmlir/Silicon/TTNN/n150/eltwise/unary/logical_not/simple_not.mlir new file mode 100644 index 0000000000..5cba3fbe81 --- /dev/null +++ b/test/ttmlir/Silicon/TTNN/n150/eltwise/unary/logical_not/simple_not.mlir @@ -0,0 +1,14 @@ +// RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline="system-desc-path=%system_desc_path%" %s > %t.mlir +// RUN: FileCheck %s --input-file=%t.mlir +// RUN: ttmlir-translate --ttnn-to-flatbuffer %t.mlir > %t.ttnn + +module attributes {} { + func.func @logical_not(%arg0: tensor<64x128xf32>) -> tensor<64x128xf32> { + %0 = tensor.empty() : tensor<64x128xf32> + %1 = "ttir.logical_not"(%arg0, %0) <{operandSegmentSizes = array}> : (tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xf32> + // CHECK: "ttnn.logical_not" + // CHECK-SAME: tensor<64x128xf32 + // CHECK-SAME: -> tensor<64x128xf32 + return %1 : tensor<64x128xf32> + } +} diff --git a/test/ttmlir/Silicon/TTNN/n150/eltwise/unary/negate/negate.mlir b/test/ttmlir/Silicon/TTNN/n150/eltwise/unary/negate/negate.mlir index 5173e6d920..f7c7942e59 100644 --- a/test/ttmlir/Silicon/TTNN/n150/eltwise/unary/negate/negate.mlir +++ b/test/ttmlir/Silicon/TTNN/n150/eltwise/unary/negate/negate.mlir @@ -4,7 +4,9 @@ func.func @negate(%arg0: tensor<32x32xf32>) -> tensor<32x32xf32> { %0 = tensor.empty() : tensor<32x32xf32> - // CHECK: %[[C:.*]] = "ttnn.neg"[[C:.*]] %1 = "ttir.neg"(%arg0, %0) <{operandSegmentSizes = array}> : (tensor<32x32xf32>, tensor<32x32xf32>) -> tensor<32x32xf32> + // CHECK: "ttnn.neg" + // CHECK-SAME: tensor<32x32xf32 + // CHECK-SAME: -> tensor<32x32xf32 return %1 : tensor<32x32xf32> } diff --git a/test/ttmlir/Silicon/TTNN/n150/eltwise/unary/recipricol/recipricol.mlir b/test/ttmlir/Silicon/TTNN/n150/eltwise/unary/recipricol/recipricol.mlir index a05f62da9f..141076aee9 100644 --- a/test/ttmlir/Silicon/TTNN/n150/eltwise/unary/recipricol/recipricol.mlir +++ b/test/ttmlir/Silicon/TTNN/n150/eltwise/unary/recipricol/recipricol.mlir @@ -3,9 +3,10 @@ // RUN: ttmlir-translate --ttnn-to-flatbuffer %t.mlir > %t.ttnn func.func @reciprocal(%arg0: tensor<64x128xf32>) -> tensor<64x128xf32> { - // CHECK: %[[C:.*]] = "ttnn.empty"[[C:.*]] %0 = tensor.empty() : tensor<64x128xf32> - // CHECK: %[[C:.*]] = "ttnn.reciprocal"[[C:.*]] %1 = "ttir.reciprocal"(%arg0, %0) <{operandSegmentSizes = array}> : (tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xf32> + // CHECK: "ttnn.reciprocal" + // CHECK-SAME: tensor<64x128xf32 + // CHECK-SAME: -> tensor<64x128xf32 return %1 : tensor<64x128xf32> } diff --git a/test/ttmlir/Silicon/TTNN/n150/eltwise/unary/relu/relu.mlir b/test/ttmlir/Silicon/TTNN/n150/eltwise/unary/relu/relu.mlir index 3a75ad988a..86865f7a53 100644 --- a/test/ttmlir/Silicon/TTNN/n150/eltwise/unary/relu/relu.mlir +++ b/test/ttmlir/Silicon/TTNN/n150/eltwise/unary/relu/relu.mlir @@ -3,9 +3,10 @@ // RUN: ttmlir-translate --ttnn-to-flatbuffer %t.mlir > %t.ttnn func.func @relu(%arg0: tensor<64x128xf32>) -> tensor<64x128xf32> { - // CHECK: %[[C:.*]] = "ttnn.empty"[[C:.*]] %0 = tensor.empty() : tensor<64x128xf32> - // CHECK: %[[C:.*]] = "ttnn.relu"[[C:.*]] %1 = "ttir.relu"(%arg0, %0) <{operandSegmentSizes = array}> : (tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xf32> + // CHECK: "ttnn.relu" + // CHECK-SAME: tensor<64x128xf32 + // CHECK-SAME: -> tensor<64x128xf32 return %1 : tensor<64x128xf32> } diff --git a/test/ttmlir/Silicon/TTNN/n150/eltwise/unary/rsqrt/rsqrt.mlir b/test/ttmlir/Silicon/TTNN/n150/eltwise/unary/rsqrt/rsqrt.mlir index e3ced09427..49522c70d1 100644 --- a/test/ttmlir/Silicon/TTNN/n150/eltwise/unary/rsqrt/rsqrt.mlir +++ b/test/ttmlir/Silicon/TTNN/n150/eltwise/unary/rsqrt/rsqrt.mlir @@ -3,9 +3,10 @@ // RUN: ttmlir-translate --ttnn-to-flatbuffer %t.mlir > %t.ttnn func.func @rsqrt(%arg0: tensor<64x128xf32>) -> tensor<64x128xf32> { - // CHECK: %[[C:.*]] = "ttnn.empty"[[C:.*]] %0 = tensor.empty() : tensor<64x128xf32> - // CHECK: %[[C:.*]] = "ttnn.rsqrt"[[C:.*]] %1 = "ttir.rsqrt"(%arg0, %0) <{operandSegmentSizes = array}> : (tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xf32> + // CHECK: "ttnn.rsqrt" + // CHECK-SAME: tensor<64x128xf32 + // CHECK-SAME: -> tensor<64x128xf32 return %1 : tensor<64x128xf32> } diff --git a/test/ttmlir/Silicon/TTNN/n150/eltwise/unary/sigmoid/sigmoid.mlir b/test/ttmlir/Silicon/TTNN/n150/eltwise/unary/sigmoid/sigmoid.mlir index b6ef7a5a44..b0f67ae3bf 100644 --- a/test/ttmlir/Silicon/TTNN/n150/eltwise/unary/sigmoid/sigmoid.mlir +++ b/test/ttmlir/Silicon/TTNN/n150/eltwise/unary/sigmoid/sigmoid.mlir @@ -3,9 +3,10 @@ // RUN: ttmlir-translate --ttnn-to-flatbuffer %t.mlir > %t.ttnn func.func @sigmoid(%arg0: tensor<64x128xf32>) -> tensor<64x128xf32> { - // CHECK: %[[C:.*]] = "ttnn.empty"[[C:.*]] %0 = tensor.empty() : tensor<64x128xf32> - // CHECK: %[[C:.*]] = "ttnn.sigmoid"[[C:.*]] %1 = "ttir.sigmoid"(%arg0, %0) <{operandSegmentSizes = array}> : (tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xf32> + // CHECK: "ttnn.sigmoid" + // CHECK-SAME: tensor<64x128xf32 + // CHECK-SAME: -> tensor<64x128xf32 return %1 : tensor<64x128xf32> } diff --git a/test/ttmlir/Silicon/TTNN/n150/eltwise/unary/sign/sign.mlir b/test/ttmlir/Silicon/TTNN/n150/eltwise/unary/sign/sign.mlir index 368d7f26af..1dfaa33d37 100644 --- a/test/ttmlir/Silicon/TTNN/n150/eltwise/unary/sign/sign.mlir +++ b/test/ttmlir/Silicon/TTNN/n150/eltwise/unary/sign/sign.mlir @@ -4,9 +4,9 @@ func.func @sign(%arg0: tensor<64x128xf32>) -> tensor<64x128xf32> { %0 = tensor.empty() : tensor<64x128xf32> - // CHECK: [[VAL0:%[0-9]+]] = "ttnn.empty"(%{{[0-9]+}}) <{dtype = {{.*}}, layout = {{.*}}, memory_config = {{.*}}, <{{.*}}>>, shape = #ttnn.shape<[[TENSOR_SHAPE:[0-9]+x[0-9]+]]>}> %1 = "ttir.sign"(%arg0, %0) <{operandSegmentSizes = array}> : (tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xf32> - // CHECK: %{{[0-9]+}} = "ttnn.sign"(%arg0, [[VAL0]]) <{operandSegmentSizes = array}> : (tensor<[[TENSOR_SHAPE]]x{{.*}}, {{.*}}>, tensor<[[TENSOR_SHAPE]]x{{.*}}, {{.*}}) -> tensor<[[TENSOR_SHAPE]]x{{.*}}, {{.*}}> + // CHECK: "ttnn.sign" + // CHECK-SAME: tensor<64x128xf32 + // CHECK-SAME: -> tensor<64x128xf32 return %1 : tensor<64x128xf32> - // CHECK: return %{{[0-9]+}} : tensor<[[TENSOR_SHAPE]]xf32, {{.*}}> } diff --git a/test/ttmlir/Silicon/TTNN/n150/eltwise/unary/sine/sine.mlir b/test/ttmlir/Silicon/TTNN/n150/eltwise/unary/sine/sine.mlir index ca61f435af..0e2e32be2e 100644 --- a/test/ttmlir/Silicon/TTNN/n150/eltwise/unary/sine/sine.mlir +++ b/test/ttmlir/Silicon/TTNN/n150/eltwise/unary/sine/sine.mlir @@ -4,8 +4,9 @@ func.func @sine(%arg0: tensor<32x32xf32>) -> tensor<32x32xf32> { %0 = tensor.empty() : tensor<32x32xf32> - // CHECK: [[VAL0:%[0-9]+]] = "ttnn.empty"(%{{[0-9]+}}) - // CHECK: %{{[0-9]+}} = "ttnn.sin"(%arg0, [[VAL0]]) %1 = "ttir.sin"(%arg0, %0) <{operandSegmentSizes = array}> : (tensor<32x32xf32>, tensor<32x32xf32>) -> tensor<32x32xf32> + // CHECK: "ttnn.sin" + // CHECK-SAME: tensor<32x32xf32 + // CHECK-SAME: -> tensor<32x32xf32 return %1 : tensor<32x32xf32> } diff --git a/test/ttmlir/Silicon/TTNN/n150/eltwise/unary/sqrt/sqrt.mlir b/test/ttmlir/Silicon/TTNN/n150/eltwise/unary/sqrt/sqrt.mlir index 7c948b7ac1..49eb3632f8 100644 --- a/test/ttmlir/Silicon/TTNN/n150/eltwise/unary/sqrt/sqrt.mlir +++ b/test/ttmlir/Silicon/TTNN/n150/eltwise/unary/sqrt/sqrt.mlir @@ -3,9 +3,10 @@ // RUN: ttmlir-translate --ttnn-to-flatbuffer %t.mlir > %t.ttnn func.func @sqrt(%arg0: tensor<64x128xf32>) -> tensor<64x128xf32> { - // CHECK: %[[C:.*]] = "ttnn.empty"[[C:.*]] %0 = tensor.empty() : tensor<64x128xf32> - // CHECK: %[[C:.*]] = "ttnn.sqrt"[[C:.*]] %1 = "ttir.sqrt"(%arg0, %0) <{operandSegmentSizes = array}> : (tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xf32> + // CHECK: "ttnn.sqrt" + // CHECK-SAME: tensor<64x128xf32 + // CHECK-SAME: -> tensor<64x128xf32 return %1 : tensor<64x128xf32> } diff --git a/test/ttmlir/Silicon/TTNN/n150/eltwise/unary/tan/tan.mlir b/test/ttmlir/Silicon/TTNN/n150/eltwise/unary/tan/tan.mlir index c1c319dec2..881eaf49cf 100644 --- a/test/ttmlir/Silicon/TTNN/n150/eltwise/unary/tan/tan.mlir +++ b/test/ttmlir/Silicon/TTNN/n150/eltwise/unary/tan/tan.mlir @@ -4,8 +4,9 @@ func.func @tan(%arg0: tensor<64x128xbf16>) -> tensor<64x128xbf16> { %0 = tensor.empty() : tensor<64x128xbf16> - // CHECK: [[VAL0:%[0-9]+]] = "ttnn.empty"(%{{[0-9]+}}) - // CHECK: %{{[0-9]+}} = "ttnn.tan"(%arg0, [[VAL0]]) %1 = "ttir.tan"(%arg0, %0) <{operandSegmentSizes = array}> : (tensor<64x128xbf16>, tensor<64x128xbf16>) -> tensor<64x128xbf16> + // CHECK: "ttnn.tan" + // CHECK-SAME: tensor<64x128xbf16 + // CHECK-SAME: -> tensor<64x128xbf16 return %1 : tensor<64x128xbf16> } diff --git a/test/ttmlir/Silicon/TTNN/n150/eltwise/unary/tanh/tanh.mlir b/test/ttmlir/Silicon/TTNN/n150/eltwise/unary/tanh/tanh.mlir index bee21d19a9..0239b101b9 100644 --- a/test/ttmlir/Silicon/TTNN/n150/eltwise/unary/tanh/tanh.mlir +++ b/test/ttmlir/Silicon/TTNN/n150/eltwise/unary/tanh/tanh.mlir @@ -4,8 +4,9 @@ func.func @tanh(%arg0: tensor<64x128xbf16>) -> tensor<64x128xbf16> { %0 = tensor.empty() : tensor<64x128xbf16> - // CHECK: [[VAL0:%[0-9]+]] = "ttnn.empty"(%{{[0-9]+}}) - // CHECK: %{{[0-9]+}} = "ttnn.tanh"(%arg0, [[VAL0]]) %1 = "ttir.tanh"(%arg0, %0) <{operandSegmentSizes = array}> : (tensor<64x128xbf16>, tensor<64x128xbf16>) -> tensor<64x128xbf16> + // CHECK: "ttnn.tanh" + // CHECK-SAME: tensor<64x128xbf16 + // CHECK-SAME: -> tensor<64x128xbf16 return %1 : tensor<64x128xbf16> } diff --git a/test/ttmlir/Silicon/TTNN/n150/operand_broadcasts.mlir b/test/ttmlir/Silicon/TTNN/n150/operand_broadcasts.mlir index 1b919ec1de..73ace5d80e 100644 --- a/test/ttmlir/Silicon/TTNN/n150/operand_broadcasts.mlir +++ b/test/ttmlir/Silicon/TTNN/n150/operand_broadcasts.mlir @@ -3,17 +3,15 @@ // RUN: ttmlir-translate --ttnn-to-flatbuffer %t.mlir > %t.ttnn module attributes {} { func.func @bcast_one_dim(%arg0: tensor<2x64x128xf32>, %arg1: tensor<64x128xf32>) -> tensor<2x64x128xf32> { - // CHECK: %[[C:.*]] = "ttnn.empty"[[C:.*]] %0 = tensor.empty() : tensor<2x64x128xf32> - // CHECK: %[[C:.*]] = "ttnn.multiply"[[C:.*]] + // CHECK: "ttnn.multiply" %1 = "ttir.multiply"(%arg0, %arg1, %0) <{operandSegmentSizes = array}> : (tensor<2x64x128xf32>, tensor<64x128xf32>, tensor<2x64x128xf32>) -> tensor<2x64x128xf32> return %1 : tensor<2x64x128xf32> } func.func @bcast_multi_dim(%arg0: tensor<17x16x15x14xf32>, %arg1: tensor<15x1xf32>) -> tensor<17x16x15x14xf32> { - // CHECK: %[[C:.*]] = "ttnn.empty"[[C:.*]] %0 = tensor.empty() : tensor<17x16x15x14xf32> - // CHECK: %[[C:.*]] = "ttnn.multiply"[[C:.*]] + // CHECK: "ttnn.multiply" %1 = "ttir.multiply"(%arg0, %arg1, %0) <{operandSegmentSizes = array}> : (tensor<17x16x15x14xf32>, tensor<15x1xf32>, tensor<17x16x15x14xf32>) -> tensor<17x16x15x14xf32> return %1 : tensor<17x16x15x14xf32> } diff --git a/test/ttmlir/Silicon/TTNN/n150/perf/test_perf_and.mlir b/test/ttmlir/Silicon/TTNN/n150/perf/test_perf_and.mlir index d279685203..6f6294f50f 100644 --- a/test/ttmlir/Silicon/TTNN/n150/perf/test_perf_and.mlir +++ b/test/ttmlir/Silicon/TTNN/n150/perf/test_perf_and.mlir @@ -3,11 +3,10 @@ // RUN: ttmlir-translate --ttnn-to-flatbuffer %t.mlir > %t.ttnn func.func @logical_and(%arg0: tensor<64x128xf32>, %arg1: tensor<64x128xf32>) -> tensor<64x128xf32> { %0 = tensor.empty() : tensor<64x128xf32> - // CHECK: {{.*}} = "ttnn.empty"{{.*}} %1 = "ttir.logical_and"(%arg0, %arg1, %0) <{operandSegmentSizes = array}> : (tensor<64x128xf32>, tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xf32> - // CHECK: %[[C:.*]] = "ttnn.logical_and" - // CHECK-SAME: tensor<64x128xf32, - // CHECK-SAME: tensor<64x128xf32, - // CHECK-SAME: tensor<64x128xf32, + // CHECK: "ttnn.logical_and" + // CHECK-SAME: tensor<64x128xf32 + // CHECK-SAME: tensor<64x128xf32 + // CHECK-SAME: -> tensor<64x128xf32 return %1 : tensor<64x128xf32> } diff --git a/test/ttmlir/Silicon/TTNN/n150/perf/test_perf_ceil.mlir b/test/ttmlir/Silicon/TTNN/n150/perf/test_perf_ceil.mlir index 114275c51f..50b18b79b2 100644 --- a/test/ttmlir/Silicon/TTNN/n150/perf/test_perf_ceil.mlir +++ b/test/ttmlir/Silicon/TTNN/n150/perf/test_perf_ceil.mlir @@ -3,8 +3,9 @@ // RUN: ttmlir-translate --ttnn-to-flatbuffer %t.mlir > %t.ttnn func.func @ceil(%arg0: tensor<64x128xf32>) -> tensor<64x128xf32> { %0 = tensor.empty() : tensor<64x128xf32> - // CHECK: [[VAL0:%[0-9]+]] = "ttnn.empty"(%{{[0-9]+}}) - // CHECK: %{{[0-9]+}} = "ttnn.ceil"(%arg0, [[VAL0]]) %1 = "ttir.ceil"(%arg0, %0) <{operandSegmentSizes = array}> : (tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xf32> + // CHECK: "ttnn.ceil" + // CHECK-SAME: tensor<64x128xf32 + // CHECK-SAME: -> tensor<64x128xf32 return %1 : tensor<64x128xf32> } diff --git a/test/ttmlir/Silicon/TTNN/n150/perf/test_perf_cosine.mlir b/test/ttmlir/Silicon/TTNN/n150/perf/test_perf_cosine.mlir index 1598de319a..1da8177c8e 100644 --- a/test/ttmlir/Silicon/TTNN/n150/perf/test_perf_cosine.mlir +++ b/test/ttmlir/Silicon/TTNN/n150/perf/test_perf_cosine.mlir @@ -3,8 +3,9 @@ // RUN: ttmlir-translate --ttnn-to-flatbuffer %t.mlir > %t.ttnn func.func @cosine(%arg0: tensor<64x128xf32>) -> tensor<64x128xf32> { %0 = tensor.empty() : tensor<64x128xf32> - // CHECK: [[VAL0:%[0-9]+]] = "ttnn.empty"(%{{[0-9]+}}) - // CHECK: %{{[0-9]+}} = "ttnn.cos"(%arg0, [[VAL0]]) %1 = "ttir.cos"(%arg0, %0) <{operandSegmentSizes = array}> : (tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xf32> + // CHECK: "ttnn.cos" + // CHECK-SAME: tensor<64x128xf32 + // CHECK-SAME: -> tensor<64x128xf32 return %1 : tensor<64x128xf32> } diff --git a/test/ttmlir/Silicon/TTNN/n150/perf/test_perf_div.mlir b/test/ttmlir/Silicon/TTNN/n150/perf/test_perf_div.mlir index a6b6a55a49..b4267d250d 100644 --- a/test/ttmlir/Silicon/TTNN/n150/perf/test_perf_div.mlir +++ b/test/ttmlir/Silicon/TTNN/n150/perf/test_perf_div.mlir @@ -2,9 +2,11 @@ // RUN: FileCheck %s --input-file=%t.mlir // RUN: ttmlir-translate --ttnn-to-flatbuffer %t.mlir > %t.ttnn func.func @div(%arg0: tensor<64x128xf32>, %arg1: tensor<64x128xf32>) -> tensor<64x128xf32> { - // CHECK: %[[C:.*]] = "ttnn.empty"[[C:.*]] %0 = tensor.empty() : tensor<64x128xf32> - // CHECK: %[[C:.*]] = "ttnn.div"[[C:.*]] %1 = "ttir.div"(%arg0, %arg1, %0) <{operandSegmentSizes = array}> : (tensor<64x128xf32>, tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xf32> + // CHECK: "ttnn.div" + // CHECK-SAME: tensor<64x128xf32 + // CHECK-SAME: tensor<64x128xf32 + // CHECK-SAME: -> tensor<64x128xf32 return %1 : tensor<64x128xf32> } diff --git a/test/ttmlir/Silicon/TTNN/n150/perf/test_perf_eq.mlir b/test/ttmlir/Silicon/TTNN/n150/perf/test_perf_eq.mlir index 44ff28faf9..47863e6e8b 100644 --- a/test/ttmlir/Silicon/TTNN/n150/perf/test_perf_eq.mlir +++ b/test/ttmlir/Silicon/TTNN/n150/perf/test_perf_eq.mlir @@ -3,15 +3,12 @@ // RUN: ttmlir-translate --ttnn-to-flatbuffer %t.mlir > %t.ttnn module attributes {} { func.func @equal(%arg0: tensor<13x31xf32>, %arg1: tensor<13x31xf32>) -> tensor<13x31xf32> { - // CHECK: %[[C:.*]] = "ttnn.empty - // CHECK-SAME: [[TENSOR:tensor<13x31xf32,]] %0 = tensor.empty() : tensor<13x31xf32> - // CHECK: %[[C:.*]] = "ttnn.eq" - // CHECK-SAME: [[TENSOR]] - // CHECK-SAME: [[TENSOR]] - // CHECK-SAME: [[TENSOR]] - // CHECK-SAME: -> [[TENSOR]] %1 = "ttir.eq"(%arg0, %arg1, %0) <{operandSegmentSizes = array}> : (tensor<13x31xf32>, tensor<13x31xf32>, tensor<13x31xf32>) -> tensor<13x31xf32> + // CHECK: "ttnn.eq" + // CHECK-SAME: tensor<13x31xf32 + // CHECK-SAME: tensor<13x31xf32 + // CHECK-SAME: -> tensor<13x31xf32 return %1 : tensor<13x31xf32> } } diff --git a/test/ttmlir/Silicon/TTNN/n150/perf/test_perf_expm1.mlir b/test/ttmlir/Silicon/TTNN/n150/perf/test_perf_expm1.mlir index a499c20ce2..c4e124e7ae 100644 --- a/test/ttmlir/Silicon/TTNN/n150/perf/test_perf_expm1.mlir +++ b/test/ttmlir/Silicon/TTNN/n150/perf/test_perf_expm1.mlir @@ -3,9 +3,9 @@ // RUN: ttmlir-translate --ttnn-to-flatbuffer %t.mlir > %t.ttnn func.func @expm1(%arg0: tensor<64x128xf32>) -> tensor<64x128xf32> { %0 = tensor.empty() : tensor<64x128xf32> - // CHECK: [[VAL0:%[0-9]+]] = "ttnn.empty"(%{{[0-9]+}}) <{dtype = {{.*}}, layout = {{.*}}, memory_config = {{.*}}, <{{.*}}>>, shape = #ttnn.shape<[[TENSOR_SHAPE:[0-9]+x[0-9]+]]>}> %1 = "ttir.expm1"(%arg0, %0) <{operandSegmentSizes = array}> : (tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xf32> - // CHECK: %{{[0-9]+}} = "ttnn.expm1"(%arg0, [[VAL0]]) <{operandSegmentSizes = array}> : (tensor<[[TENSOR_SHAPE]]x{{.*}}, {{.*}}>, tensor<[[TENSOR_SHAPE]]x{{.*}}, {{.*}}) -> tensor<[[TENSOR_SHAPE]]x{{.*}}, {{.*}}> + // CHECK: "ttnn.expm1" + // CHECK-SAME: tensor<64x128xf32 + // CHECK-SAME: -> tensor<64x128xf32 return %1 : tensor<64x128xf32> - // CHECK: return %{{[0-9]+}} : tensor<[[TENSOR_SHAPE]]xf32, {{.*}}> } diff --git a/test/ttmlir/Silicon/TTNN/n150/perf/test_perf_floor.mlir b/test/ttmlir/Silicon/TTNN/n150/perf/test_perf_floor.mlir index d739275349..be34577ca6 100644 --- a/test/ttmlir/Silicon/TTNN/n150/perf/test_perf_floor.mlir +++ b/test/ttmlir/Silicon/TTNN/n150/perf/test_perf_floor.mlir @@ -1,14 +1,12 @@ // RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline="system-desc-path=%system_desc_path%" %s > %t.mlir // RUN: FileCheck %s --input-file=%t.mlir // RUN: ttmlir-translate --ttnn-to-flatbuffer %t.mlir > %t.ttnn + func.func @floor(%arg0: tensor<64x128xf32>) -> tensor<64x128xf32> { - // CHECK: %{{[0-9]+}} = "ttnn.empty" - // CHECK-SAME: [[TENSOR:tensor<64x128xf32,]] %0 = tensor.empty() : tensor<64x128xf32> - // CHECK: %{{[0-9]+}} = "ttnn.floor" - // CHECK-SAME: [[TENSOR]] - // CHECK-SAME: [[TENSOR]] - // CHECK-SAME: -> [[TENSOR]] %1 = "ttir.floor"(%arg0, %0) <{operandSegmentSizes = array}> : (tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xf32> + // CHECK: "ttnn.floor" + // CHECK-SAME: tensor<64x128xf32 + // CHECK-SAME: -> tensor<64x128xf32 return %1 : tensor<64x128xf32> } diff --git a/test/ttmlir/Silicon/TTNN/n150/perf/test_perf_ge.mlir b/test/ttmlir/Silicon/TTNN/n150/perf/test_perf_ge.mlir index 07a6a56f12..28779fe997 100644 --- a/test/ttmlir/Silicon/TTNN/n150/perf/test_perf_ge.mlir +++ b/test/ttmlir/Silicon/TTNN/n150/perf/test_perf_ge.mlir @@ -1,10 +1,13 @@ // RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline="system-desc-path=%system_desc_path%" %s > %t.mlir // RUN: FileCheck %s --input-file=%t.mlir // RUN: ttmlir-translate --ttnn-to-flatbuffer %t.mlir > %t.ttnn + func.func @ge(%arg0: tensor<64x128xf32>, %arg1: tensor<64x128xf32>) -> tensor<64x128xf32> { - // CHECK: %[[C:.*]] = "ttnn.empty"[[C:.*]] %0 = tensor.empty() : tensor<64x128xf32> - // CHECK: %[[C:.*]] = "ttnn.ge"[[C:.*]] %1 = "ttir.ge"(%arg0, %arg1, %0) <{operandSegmentSizes = array}> : (tensor<64x128xf32>, tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xf32> + // CHECK: "ttnn.ge" + // CHECK-SAME: tensor<64x128xf32 + // CHECK-SAME: tensor<64x128xf32 + // CHECK-SAME: -> tensor<64x128xf32 return %1 : tensor<64x128xf32> } diff --git a/test/ttmlir/Silicon/TTNN/n150/perf/test_perf_gelu.mlir b/test/ttmlir/Silicon/TTNN/n150/perf/test_perf_gelu.mlir index 7e9767e1fd..8f2bf48737 100644 --- a/test/ttmlir/Silicon/TTNN/n150/perf/test_perf_gelu.mlir +++ b/test/ttmlir/Silicon/TTNN/n150/perf/test_perf_gelu.mlir @@ -3,13 +3,10 @@ // RUN: ttmlir-translate --ttnn-to-flatbuffer %t.mlir > %t.ttnn func.func @gelu(%arg0: tensor<64x128xf32>) -> tensor<64x128xf32> { - // CHECK: "ttnn.empty" - // CHECK-SAME: tensor<64x128xf32, %0 = tensor.empty() : tensor<64x128xf32> - // CHECK: "ttnn.gelu" - // CHECK-SAME: tensor<64x128xf32, - // CHECK-SAME: tensor<64x128xf32, - // CHECK-SAME: tensor<64x128xf32, %1 = "ttir.gelu"(%arg0, %0) <{operandSegmentSizes = array}> : (tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xf32> + // CHECK: "ttnn.gelu" + // CHECK-SAME: tensor<64x128xf32 + // CHECK-SAME: -> tensor<64x128xf32 return %1 : tensor<64x128xf32> } diff --git a/test/ttmlir/Silicon/TTNN/n150/perf/test_perf_gt.mlir b/test/ttmlir/Silicon/TTNN/n150/perf/test_perf_gt.mlir index e02ed1e954..457c09bbef 100644 --- a/test/ttmlir/Silicon/TTNN/n150/perf/test_perf_gt.mlir +++ b/test/ttmlir/Silicon/TTNN/n150/perf/test_perf_gt.mlir @@ -3,15 +3,12 @@ // RUN: ttmlir-translate --ttnn-to-flatbuffer %t.mlir > %t.ttnn module attributes {} { func.func @greater_than(%arg0: tensor<13x31xf32>, %arg1: tensor<13x31xf32>) -> tensor<13x31xf32> { - // CHECK: %[[C:.*]] = "ttnn.empty - // CHECK-SAME: [[TENSOR:tensor<13x31xf32,]] %0 = tensor.empty() : tensor<13x31xf32> - // CHECK: %[[C:.*]] = "ttnn.gt" - // CHECK-SAME: [[TENSOR]] - // CHECK-SAME: [[TENSOR]] - // CHECK-SAME: [[TENSOR]] - // CHECK-SAME: -> [[TENSOR]] %1 = "ttir.gt"(%arg0, %arg1, %0) <{operandSegmentSizes = array}> : (tensor<13x31xf32>, tensor<13x31xf32>, tensor<13x31xf32>) -> tensor<13x31xf32> + // CHECK: "ttnn.gt" + // CHECK-SAME: tensor<13x31xf32 + // CHECK-SAME: tensor<13x31xf32 + // CHECK-SAME: -> tensor<13x31xf32 return %1 : tensor<13x31xf32> } } diff --git a/test/ttmlir/Silicon/TTNN/n150/perf/test_perf_isfinite.mlir b/test/ttmlir/Silicon/TTNN/n150/perf/test_perf_isfinite.mlir index b8dc64fb72..a9ae0d5b0c 100644 --- a/test/ttmlir/Silicon/TTNN/n150/perf/test_perf_isfinite.mlir +++ b/test/ttmlir/Silicon/TTNN/n150/perf/test_perf_isfinite.mlir @@ -3,13 +3,10 @@ // RUN: ttmlir-translate --ttnn-to-flatbuffer %t.mlir > %t.ttnn func.func @is_finite(%arg0: tensor<64x128xbf16>) -> tensor<64x128xbf16> { - // CHECK: %[[C:.*]] = "ttnn.empty" - // CHECK-SAME: [[TENSOR:tensor<64x128xbf16,]] %0 = tensor.empty() : tensor<64x128xbf16> - // CHECK: %[[C:.*]] = "ttnn.isfinite" - // CHECK-SAME: tensor<64x128xbf16, - // CHECK-SAME: [[TENSOR]] - // CHECK-SAME: -> [[TENSOR]] %1 = "ttir.isfinite"(%arg0, %0) <{operandSegmentSizes = array}> : (tensor<64x128xbf16>, tensor<64x128xbf16>) -> tensor<64x128xbf16> + // CHECK: "ttnn.isfinite" + // CHECK-SAME: tensor<64x128xbf16 + // CHECK-SAME: -> tensor<64x128xbf16 return %1 : tensor<64x128xbf16> } diff --git a/test/ttmlir/Silicon/TTNN/n150/perf/test_perf_log.mlir b/test/ttmlir/Silicon/TTNN/n150/perf/test_perf_log.mlir index ef5244fdae..947f077441 100644 --- a/test/ttmlir/Silicon/TTNN/n150/perf/test_perf_log.mlir +++ b/test/ttmlir/Silicon/TTNN/n150/perf/test_perf_log.mlir @@ -3,8 +3,9 @@ // RUN: ttmlir-translate --ttnn-to-flatbuffer %t.mlir > %t.ttnn func.func @log(%arg0: tensor<64x128xf32>) -> tensor<64x128xf32> { %0 = tensor.empty() : tensor<64x128xf32> - // CHECK: [[VAL0:%[0-9]+]] = "ttnn.empty"(%{{[0-9]+}}) - // CHECK: %{{[0-9]+}} = "ttnn.log"(%arg0, [[VAL0]]) %1 = "ttir.log"(%arg0, %0) <{operandSegmentSizes = array}> : (tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xf32> + // CHECK: "ttnn.log" + // CHECK-SAME: tensor<64x128xf32 + // CHECK-SAME: -> tensor<64x128xf32 return %1 : tensor<64x128xf32> } diff --git a/test/ttmlir/Silicon/TTNN/n150/perf/test_perf_log1p.mlir b/test/ttmlir/Silicon/TTNN/n150/perf/test_perf_log1p.mlir index 7e21972a81..5aeb1a8777 100644 --- a/test/ttmlir/Silicon/TTNN/n150/perf/test_perf_log1p.mlir +++ b/test/ttmlir/Silicon/TTNN/n150/perf/test_perf_log1p.mlir @@ -1,12 +1,11 @@ - // RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline="system-desc-path=%system_desc_path%" %s > %t.mlir // RUN: FileCheck %s --input-file=%t.mlir // RUN: ttmlir-translate --ttnn-to-flatbuffer %t.mlir > %t.ttnn func.func @log1p(%arg0: tensor<64x128xf32>) -> tensor<64x128xf32> { %0 = tensor.empty() : tensor<64x128xf32> - // CHECK: [[VAL0:%[0-9]+]] = "ttnn.empty"(%{{[0-9]+}}) <{dtype = {{.*}}, layout = {{.*}}, memory_config = {{.*}}, <{{.*}}>>, shape = #ttnn.shape<[[TENSOR_SHAPE:[0-9]+x[0-9]+]]>}> %1 = "ttir.log1p"(%arg0, %0) <{operandSegmentSizes = array}> : (tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xf32> - // CHECK: %{{[0-9]+}} = "ttnn.log1p"(%arg0, [[VAL0]]) <{operandSegmentSizes = array}> : (tensor<[[TENSOR_SHAPE]]x{{.*}}, {{.*}}>, tensor<[[TENSOR_SHAPE]]x{{.*}}, {{.*}}) -> tensor<[[TENSOR_SHAPE]]x{{.*}}, {{.*}}> + // CHECK: "ttnn.log1p" + // CHECK-SAME: tensor<64x128xf32 + // CHECK-SAME: -> tensor<64x128xf32 return %1 : tensor<64x128xf32> - // CHECK: return %{{[0-9]+}} : tensor<[[TENSOR_SHAPE]]xf32, {{.*}}> } diff --git a/test/ttmlir/Silicon/TTNN/n150/perf/test_perf_lt.mlir b/test/ttmlir/Silicon/TTNN/n150/perf/test_perf_lt.mlir index 1f95207ba3..73ae87caa1 100644 --- a/test/ttmlir/Silicon/TTNN/n150/perf/test_perf_lt.mlir +++ b/test/ttmlir/Silicon/TTNN/n150/perf/test_perf_lt.mlir @@ -3,15 +3,12 @@ // RUN: ttmlir-translate --ttnn-to-flatbuffer %t.mlir > %t.ttnn module attributes {} { func.func @less_than(%arg0: tensor<13x31xf32>, %arg1: tensor<13x31xf32>) -> tensor<13x31xf32> { - // CHECK: %[[C:.*]] = "ttnn.empty - // CHECK-SAME: [[TENSOR:tensor<13x31xf32,]] %0 = tensor.empty() : tensor<13x31xf32> - // CHECK: %[[C:.*]] = "ttnn.lt" - // CHECK-SAME: [[TENSOR]] - // CHECK-SAME: [[TENSOR]] - // CHECK-SAME: [[TENSOR]] - // CHECK-SAME: -> [[TENSOR]] %1 = "ttir.lt"(%arg0, %arg1, %0) <{operandSegmentSizes = array}> : (tensor<13x31xf32>, tensor<13x31xf32>, tensor<13x31xf32>) -> tensor<13x31xf32> + // CHECK: "ttnn.lt" + // CHECK-SAME: tensor<13x31xf32 + // CHECK-SAME: tensor<13x31xf32 + // CHECK-SAME: -> tensor<13x31xf32 return %1 : tensor<13x31xf32> } } diff --git a/test/ttmlir/Silicon/TTNN/n150/perf/test_perf_maximum.mlir b/test/ttmlir/Silicon/TTNN/n150/perf/test_perf_maximum.mlir index 3893bc9f0c..e218de0260 100644 --- a/test/ttmlir/Silicon/TTNN/n150/perf/test_perf_maximum.mlir +++ b/test/ttmlir/Silicon/TTNN/n150/perf/test_perf_maximum.mlir @@ -2,9 +2,11 @@ // RUN: FileCheck %s --input-file=%t.mlir // RUN: ttmlir-translate --ttnn-to-flatbuffer %t.mlir > %t.ttnn func.func @maximum(%arg0: tensor<64x128xf32>, %arg1: tensor<64x128xf32>) -> tensor<64x128xf32> { - // CHECK: %[[C:.*]] = "ttnn.empty"[[C:.*]] %0 = tensor.empty() : tensor<64x128xf32> - // CHECK: %[[C:.*]] = "ttnn.maximum"[[C:.*]] %1 = "ttir.maximum"(%arg0, %arg1, %0) <{operandSegmentSizes = array}> : (tensor<64x128xf32>, tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xf32> + // CHECK: "ttnn.maximum" + // CHECK-SAME: tensor<64x128xf32 + // CHECK-SAME: tensor<64x128xf32 + // CHECK-SAME: -> tensor<64x128xf32 return %1 : tensor<64x128xf32> } diff --git a/test/ttmlir/Silicon/TTNN/n150/perf/test_perf_multiply.mlir b/test/ttmlir/Silicon/TTNN/n150/perf/test_perf_multiply.mlir index 7991cbc786..26cfd904e7 100644 --- a/test/ttmlir/Silicon/TTNN/n150/perf/test_perf_multiply.mlir +++ b/test/ttmlir/Silicon/TTNN/n150/perf/test_perf_multiply.mlir @@ -2,9 +2,11 @@ // RUN: FileCheck %s --input-file=%t.mlir // RUN: ttmlir-translate --ttnn-to-flatbuffer %t.mlir > %t.ttnn func.func @multiply(%arg0: tensor<64x128xf32>, %arg1: tensor<64x128xf32>) -> tensor<64x128xf32> { - // CHECK: %[[C:.*]] = "ttnn.empty"[[C:.*]] %0 = tensor.empty() : tensor<64x128xf32> - // CHECK: %[[C:.*]] = "ttnn.multiply"[[C:.*]] %1 = "ttir.multiply"(%arg0, %arg1, %0) <{operandSegmentSizes = array}> : (tensor<64x128xf32>, tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xf32> + // CHECK: "ttnn.multiply" + // CHECK-SAME: tensor<64x128xf32 + // CHECK-SAME: tensor<64x128xf32 + // CHECK-SAME: -> tensor<64x128xf32 return %1 : tensor<64x128xf32> } diff --git a/test/ttmlir/Silicon/TTNN/n150/perf/test_perf_ne.mlir b/test/ttmlir/Silicon/TTNN/n150/perf/test_perf_ne.mlir index 300e66226d..cbfc97e230 100644 --- a/test/ttmlir/Silicon/TTNN/n150/perf/test_perf_ne.mlir +++ b/test/ttmlir/Silicon/TTNN/n150/perf/test_perf_ne.mlir @@ -3,15 +3,12 @@ // RUN: ttmlir-translate --ttnn-to-flatbuffer %t.mlir > %t.ttnn module attributes {} { func.func @not_equal(%arg0: tensor<13x31xf32>, %arg1: tensor<13x31xf32>) -> tensor<13x31xf32> { - // CHECK: %[[C:.*]] = "ttnn.empty - // CHECK-SAME: [[TENSOR:tensor<13x31xf32,]] %0 = tensor.empty() : tensor<13x31xf32> - // CHECK: %[[C:.*]] = "ttnn.ne" - // CHECK-SAME: [[TENSOR]] - // CHECK-SAME: [[TENSOR]] - // CHECK-SAME: [[TENSOR]] - // CHECK-SAME: -> [[TENSOR]] %1 = "ttir.ne"(%arg0, %arg1, %0) <{operandSegmentSizes = array}> : (tensor<13x31xf32>, tensor<13x31xf32>, tensor<13x31xf32>) -> tensor<13x31xf32> + // CHECK: "ttnn.ne" + // CHECK-SAME: tensor<13x31xf32 + // CHECK-SAME: tensor<13x31xf32 + // CHECK-SAME: -> tensor<13x31xf32 return %1 : tensor<13x31xf32> } } diff --git a/test/ttmlir/Silicon/TTNN/n150/perf/test_perf_neg.mlir b/test/ttmlir/Silicon/TTNN/n150/perf/test_perf_neg.mlir index 907541764d..d360d736f7 100644 --- a/test/ttmlir/Silicon/TTNN/n150/perf/test_perf_neg.mlir +++ b/test/ttmlir/Silicon/TTNN/n150/perf/test_perf_neg.mlir @@ -3,7 +3,9 @@ // RUN: ttmlir-translate --ttnn-to-flatbuffer %t.mlir > %t.ttnn func.func @negate(%arg0: tensor<32x32xf32>) -> tensor<32x32xf32> { %0 = tensor.empty() : tensor<32x32xf32> - // CHECK: %[[C:.*]] = "ttnn.neg"[[C:.*]] %1 = "ttir.neg"(%arg0, %0) <{operandSegmentSizes = array}> : (tensor<32x32xf32>, tensor<32x32xf32>) -> tensor<32x32xf32> + // CHECK: "ttnn.neg" + // CHECK-SAME: tensor<32x32xf32 + // CHECK-SAME: -> tensor<32x32xf32 return %1 : tensor<32x32xf32> } diff --git a/test/ttmlir/Silicon/TTNN/n150/perf/test_perf_not.mlir b/test/ttmlir/Silicon/TTNN/n150/perf/test_perf_not.mlir index b9d07674ea..bbe9147173 100644 --- a/test/ttmlir/Silicon/TTNN/n150/perf/test_perf_not.mlir +++ b/test/ttmlir/Silicon/TTNN/n150/perf/test_perf_not.mlir @@ -3,10 +3,9 @@ // RUN: ttmlir-translate --ttnn-to-flatbuffer %t.mlir > %t.ttnn func.func @logical_not(%arg0: tensor<64x128xf32>) -> tensor<64x128xf32> { %0 = tensor.empty() : tensor<64x128xf32> - // CHECK: {{.*}} = "ttnn.empty"{{.*}} %1 = "ttir.logical_not"(%arg0, %0) <{operandSegmentSizes = array}> : (tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xf32> - // CHECK: %[[C:.*]] = "ttnn.logical_not" - // CHECK-SAME: tensor<64x128xf32, - // CHECK-SAME: tensor<64x128xf32, + // CHECK: "ttnn.logical_not" + // CHECK-SAME: tensor<64x128xf32 + // CHECK-SAME: -> tensor<64x128xf32 return %1 : tensor<64x128xf32> } diff --git a/test/ttmlir/Silicon/TTNN/n150/perf/test_perf_or.mlir b/test/ttmlir/Silicon/TTNN/n150/perf/test_perf_or.mlir index e6c7ec5550..4d9a5a93b8 100644 --- a/test/ttmlir/Silicon/TTNN/n150/perf/test_perf_or.mlir +++ b/test/ttmlir/Silicon/TTNN/n150/perf/test_perf_or.mlir @@ -3,11 +3,10 @@ // RUN: ttmlir-translate --ttnn-to-flatbuffer %t.mlir > %t.ttnn func.func @logical_or(%arg0: tensor<64x128xf32>, %arg1: tensor<64x128xf32>) -> tensor<64x128xf32> { %0 = tensor.empty() : tensor<64x128xf32> - // CHECK: {{.*}} = "ttnn.empty"{{.*}} %1 = "ttir.logical_or"(%arg0, %arg1, %0) <{operandSegmentSizes = array}> : (tensor<64x128xf32>, tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xf32> - // CHECK: %[[C:.*]] = "ttnn.logical_or" - // CHECK-SAME: tensor<64x128xf32, - // CHECK-SAME: tensor<64x128xf32, - // CHECK-SAME: tensor<64x128xf32, + // CHECK: "ttnn.logical_or" + // CHECK-SAME: tensor<64x128xf32 + // CHECK-SAME: tensor<64x128xf32 + // CHECK-SAME: -> tensor<64x128xf32 return %1 : tensor<64x128xf32> } diff --git a/test/ttmlir/Silicon/TTNN/n150/perf/test_perf_power.mlir b/test/ttmlir/Silicon/TTNN/n150/perf/test_perf_power.mlir index 7b4eefe9e1..5532240bc6 100644 --- a/test/ttmlir/Silicon/TTNN/n150/perf/test_perf_power.mlir +++ b/test/ttmlir/Silicon/TTNN/n150/perf/test_perf_power.mlir @@ -3,9 +3,10 @@ // RUN: ttmlir-translate --ttnn-to-flatbuffer %t.mlir > %t.ttnn func.func @power(%arg0: tensor<32x32xf32>, %arg1: tensor<32x32xf32>) -> tensor<32x32xf32> { %0 = tensor.empty() : tensor<32x32xf32> - // CHECK: %[[EMPTY:.*]] = "ttnn.empty"{{.*}} -> tensor<32x32xf32, {{.*}} %1 = "ttir.power"(%arg0, %arg1, %0) <{operandSegmentSizes = array}> : (tensor<32x32xf32>, tensor<32x32xf32>, tensor<32x32xf32>) -> tensor<32x32xf32> - // CHECK: %[[REM:[0-9]+]] = "ttnn.pow"({{.*}}, {{.*}}, %[[EMPTY]]){{.*}} -> tensor<32x32xf32, {{.*}} + // CHECK: "ttnn.pow" + // CHECK-SAME: tensor<32x32xf32 + // CHECK-SAME: tensor<32x32xf32 + // CHECK-SAME: -> tensor<32x32xf32 return %1 : tensor<32x32xf32> - // CHECK: return {{.*}} : tensor<32x32xf32, {{.*}} } diff --git a/test/ttmlir/Silicon/TTNN/n150/perf/test_perf_reciprocal.mlir b/test/ttmlir/Silicon/TTNN/n150/perf/test_perf_reciprocal.mlir index d17444e4b8..7038f19a9e 100644 --- a/test/ttmlir/Silicon/TTNN/n150/perf/test_perf_reciprocal.mlir +++ b/test/ttmlir/Silicon/TTNN/n150/perf/test_perf_reciprocal.mlir @@ -2,9 +2,10 @@ // RUN: FileCheck %s --input-file=%t.mlir // RUN: ttmlir-translate --ttnn-to-flatbuffer %t.mlir > %t.ttnn func.func @reciprocal(%arg0: tensor<64x128xf32>) -> tensor<64x128xf32> { - // CHECK: %[[C:.*]] = "ttnn.empty"[[C:.*]] %0 = tensor.empty() : tensor<64x128xf32> - // CHECK: %[[C:.*]] = "ttnn.reciprocal"[[C:.*]] %1 = "ttir.reciprocal"(%arg0, %0) <{operandSegmentSizes = array}> : (tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xf32> + // CHECK: "ttnn.reciprocal" + // CHECK-SAME: tensor<64x128xf32 + // CHECK-SAME: -> tensor<64x128xf32 return %1 : tensor<64x128xf32> } diff --git a/test/ttmlir/Silicon/TTNN/n150/perf/test_perf_relu.mlir b/test/ttmlir/Silicon/TTNN/n150/perf/test_perf_relu.mlir index 0ae23ec155..86865f7a53 100644 --- a/test/ttmlir/Silicon/TTNN/n150/perf/test_perf_relu.mlir +++ b/test/ttmlir/Silicon/TTNN/n150/perf/test_perf_relu.mlir @@ -1,10 +1,12 @@ // RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline="system-desc-path=%system_desc_path%" %s > %t.mlir // RUN: FileCheck %s --input-file=%t.mlir // RUN: ttmlir-translate --ttnn-to-flatbuffer %t.mlir > %t.ttnn + func.func @relu(%arg0: tensor<64x128xf32>) -> tensor<64x128xf32> { - // CHECK: %[[C:.*]] = "ttnn.empty"[[C:.*]] %0 = tensor.empty() : tensor<64x128xf32> - // CHECK: %[[C:.*]] = "ttnn.relu"[[C:.*]] %1 = "ttir.relu"(%arg0, %0) <{operandSegmentSizes = array}> : (tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xf32> + // CHECK: "ttnn.relu" + // CHECK-SAME: tensor<64x128xf32 + // CHECK-SAME: -> tensor<64x128xf32 return %1 : tensor<64x128xf32> } diff --git a/test/ttmlir/Silicon/TTNN/n150/perf/test_perf_remainder.mlir b/test/ttmlir/Silicon/TTNN/n150/perf/test_perf_remainder.mlir index e358d663ec..d81bed4385 100644 --- a/test/ttmlir/Silicon/TTNN/n150/perf/test_perf_remainder.mlir +++ b/test/ttmlir/Silicon/TTNN/n150/perf/test_perf_remainder.mlir @@ -3,9 +3,10 @@ // RUN: ttmlir-translate --ttnn-to-flatbuffer %t.mlir > %t.ttnn func.func @remainder(%arg0: tensor<32x32xf32>, %arg1: tensor<32x32xf32>) -> tensor<32x32xf32> { %0 = tensor.empty() : tensor<32x32xf32> - // CHECK: %[[EMPTY:.*]] = "ttnn.empty"{{.*}} -> tensor<32x32xf32, {{.*}} %1 = "ttir.remainder"(%arg0, %arg1, %0) <{operandSegmentSizes = array}> : (tensor<32x32xf32>, tensor<32x32xf32>, tensor<32x32xf32>) -> tensor<32x32xf32> - // CHECK: %[[REM:[0-9]+]] = "ttnn.remainder"({{.*}}, {{.*}}, %[[EMPTY]]){{.*}} -> tensor<32x32xf32, {{.*}} + // CHECK: "ttnn.remainder" + // CHECK-SAME: tensor<32x32xf32 + // CHECK-SAME: tensor<32x32xf32 + // CHECK-SAME: -> tensor<32x32xf32 return %1 : tensor<32x32xf32> - // CHECK: return {{.*}} : tensor<32x32xf32, {{.*}} } diff --git a/test/ttmlir/Silicon/TTNN/n150/perf/test_perf_rsqrt.mlir b/test/ttmlir/Silicon/TTNN/n150/perf/test_perf_rsqrt.mlir index 4c85d11caf..a31ab8554a 100644 --- a/test/ttmlir/Silicon/TTNN/n150/perf/test_perf_rsqrt.mlir +++ b/test/ttmlir/Silicon/TTNN/n150/perf/test_perf_rsqrt.mlir @@ -2,9 +2,10 @@ // RUN: FileCheck %s --input-file=%t.mlir // RUN: ttmlir-translate --ttnn-to-flatbuffer %t.mlir > %t.ttnn func.func @rsqrt(%arg0: tensor<64x128xf32>) -> tensor<64x128xf32> { - // CHECK: %[[C:.*]] = "ttnn.empty"[[C:.*]] %0 = tensor.empty() : tensor<64x128xf32> - // CHECK: %[[C:.*]] = "ttnn.rsqrt"[[C:.*]] %1 = "ttir.rsqrt"(%arg0, %0) <{operandSegmentSizes = array}> : (tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xf32> + // CHECK: "ttnn.rsqrt" + // CHECK-SAME: tensor<64x128xf32 + // CHECK-SAME: -> tensor<64x128xf32 return %1 : tensor<64x128xf32> } diff --git a/test/ttmlir/Silicon/TTNN/n150/perf/test_perf_sigmoid.mlir b/test/ttmlir/Silicon/TTNN/n150/perf/test_perf_sigmoid.mlir index 9583be9577..b0f67ae3bf 100644 --- a/test/ttmlir/Silicon/TTNN/n150/perf/test_perf_sigmoid.mlir +++ b/test/ttmlir/Silicon/TTNN/n150/perf/test_perf_sigmoid.mlir @@ -1,10 +1,12 @@ // RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline="system-desc-path=%system_desc_path%" %s > %t.mlir // RUN: FileCheck %s --input-file=%t.mlir // RUN: ttmlir-translate --ttnn-to-flatbuffer %t.mlir > %t.ttnn + func.func @sigmoid(%arg0: tensor<64x128xf32>) -> tensor<64x128xf32> { - // CHECK: %[[C:.*]] = "ttnn.empty"[[C:.*]] %0 = tensor.empty() : tensor<64x128xf32> - // CHECK: %[[C:.*]] = "ttnn.sigmoid"[[C:.*]] %1 = "ttir.sigmoid"(%arg0, %0) <{operandSegmentSizes = array}> : (tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xf32> + // CHECK: "ttnn.sigmoid" + // CHECK-SAME: tensor<64x128xf32 + // CHECK-SAME: -> tensor<64x128xf32 return %1 : tensor<64x128xf32> } diff --git a/test/ttmlir/Silicon/TTNN/n150/perf/test_perf_sign.mlir b/test/ttmlir/Silicon/TTNN/n150/perf/test_perf_sign.mlir index 8a05b1ae6d..d1f5efae8f 100644 --- a/test/ttmlir/Silicon/TTNN/n150/perf/test_perf_sign.mlir +++ b/test/ttmlir/Silicon/TTNN/n150/perf/test_perf_sign.mlir @@ -1,11 +1,12 @@ // RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline="system-desc-path=%system_desc_path%" %s > %t.mlir // RUN: FileCheck %s --input-file=%t.mlir // RUN: ttmlir-translate --ttnn-to-flatbuffer %t.mlir > %t.ttnn + func.func @sign(%arg0: tensor<64x128xf32>) -> tensor<64x128xf32> { %0 = tensor.empty() : tensor<64x128xf32> - // CHECK: [[VAL0:%[0-9]+]] = "ttnn.empty"(%{{[0-9]+}}) <{dtype = {{.*}}, layout = {{.*}}, memory_config = {{.*}}, <{{.*}}>>, shape = #ttnn.shape<[[TENSOR_SHAPE:[0-9]+x[0-9]+]]>}> %1 = "ttir.sign"(%arg0, %0) <{operandSegmentSizes = array}> : (tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xf32> - // CHECK: %{{[0-9]+}} = "ttnn.sign"(%arg0, [[VAL0]]) <{operandSegmentSizes = array}> : (tensor<[[TENSOR_SHAPE]]x{{.*}}, {{.*}}>, tensor<[[TENSOR_SHAPE]]x{{.*}}, {{.*}}) -> tensor<[[TENSOR_SHAPE]]x{{.*}}, {{.*}}> + // CHECK: "ttnn.sign" + // CHECK-SAME: tensor<64x128xf32 + // CHECK-SAME: -> tensor<64x128xf32 return %1 : tensor<64x128xf32> - // CHECK: return %{{[0-9]+}} : tensor<[[TENSOR_SHAPE]]xf32, {{.*}}> } diff --git a/test/ttmlir/Silicon/TTNN/n150/perf/test_perf_sine.mlir b/test/ttmlir/Silicon/TTNN/n150/perf/test_perf_sine.mlir index 60dc574693..ceb105dcd3 100644 --- a/test/ttmlir/Silicon/TTNN/n150/perf/test_perf_sine.mlir +++ b/test/ttmlir/Silicon/TTNN/n150/perf/test_perf_sine.mlir @@ -1,10 +1,12 @@ // RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline="system-desc-path=%system_desc_path%" %s > %t.mlir // RUN: FileCheck %s --input-file=%t.mlir // RUN: ttmlir-translate --ttnn-to-flatbuffer %t.mlir > %t.ttnn + func.func @sine(%arg0: tensor<64x128xf32>) -> tensor<64x128xf32> { %0 = tensor.empty() : tensor<64x128xf32> - // CHECK: [[VAL0:%[0-9]+]] = "ttnn.empty"(%{{[0-9]+}}) - // CHECK: %{{[0-9]+}} = "ttnn.sin"(%arg0, [[VAL0]]) %1 = "ttir.sin"(%arg0, %0) <{operandSegmentSizes = array}> : (tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xf32> + // CHECK: "ttnn.sin" + // CHECK-SAME: tensor<64x128xf32 + // CHECK-SAME: -> tensor<64x128xf32 return %1 : tensor<64x128xf32> } diff --git a/test/ttmlir/Silicon/TTNN/n150/perf/test_perf_sqrt.mlir b/test/ttmlir/Silicon/TTNN/n150/perf/test_perf_sqrt.mlir index 72e7bb579f..6eb8e91d8b 100644 --- a/test/ttmlir/Silicon/TTNN/n150/perf/test_perf_sqrt.mlir +++ b/test/ttmlir/Silicon/TTNN/n150/perf/test_perf_sqrt.mlir @@ -2,9 +2,10 @@ // RUN: FileCheck %s --input-file=%t.mlir // RUN: ttmlir-translate --ttnn-to-flatbuffer %t.mlir > %t.ttnn func.func @sqrt(%arg0: tensor<64x128xf32>) -> tensor<64x128xf32> { - // CHECK: %[[C:.*]] = "ttnn.empty"[[C:.*]] %0 = tensor.empty() : tensor<64x128xf32> - // CHECK: %[[C:.*]] = "ttnn.sqrt"[[C:.*]] %1 = "ttir.sqrt"(%arg0, %0) <{operandSegmentSizes = array}> : (tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xf32> + // CHECK: "ttnn.sqrt" + // CHECK-SAME: tensor<64x128xf32 + // CHECK-SAME: -> tensor<64x128xf32 return %1 : tensor<64x128xf32> } diff --git a/test/ttmlir/Silicon/TTNN/n150/perf/test_perf_subtract.mlir b/test/ttmlir/Silicon/TTNN/n150/perf/test_perf_subtract.mlir index 679994dc53..9629aecd2f 100644 --- a/test/ttmlir/Silicon/TTNN/n150/perf/test_perf_subtract.mlir +++ b/test/ttmlir/Silicon/TTNN/n150/perf/test_perf_subtract.mlir @@ -2,9 +2,11 @@ // RUN: FileCheck %s --input-file=%t.mlir // RUN: ttmlir-translate --ttnn-to-flatbuffer %t.mlir > %t.ttnn func.func @subtract(%arg0: tensor<64x128xf32>, %arg1: tensor<64x128xf32>) -> tensor<64x128xf32> { - // CHECK: %[[C:.*]] = "ttnn.empty"[[C:.*]] %0 = tensor.empty() : tensor<64x128xf32> - // CHECK: %[[C:.*]] = "ttnn.subtract"[[C:.*]] %1 = "ttir.subtract"(%arg0, %arg1, %0) <{operandSegmentSizes = array}> : (tensor<64x128xf32>, tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xf32> + // CHECK: "ttnn.subtract" + // CHECK-SAME: tensor<64x128xf32 + // CHECK-SAME: tensor<64x128xf32 + // CHECK-SAME: -> tensor<64x128xf32 return %1 : tensor<64x128xf32> } diff --git a/test/ttmlir/Silicon/TTNN/n150/perf/test_perf_tan.mlir b/test/ttmlir/Silicon/TTNN/n150/perf/test_perf_tan.mlir index d870cee186..1506c2193b 100644 --- a/test/ttmlir/Silicon/TTNN/n150/perf/test_perf_tan.mlir +++ b/test/ttmlir/Silicon/TTNN/n150/perf/test_perf_tan.mlir @@ -4,8 +4,9 @@ func.func @tan(%arg0: tensor<64x128xf32>) -> tensor<64x128xf32> { %0 = tensor.empty() : tensor<64x128xf32> - // CHECK: [[VAL0:%[0-9]+]] = "ttnn.empty"(%{{[0-9]+}}) - // CHECK: %{{[0-9]+}} = "ttnn.tan"(%arg0, [[VAL0]]) %1 = "ttir.tan"(%arg0, %0) <{operandSegmentSizes = array}> : (tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xf32> + // CHECK: "ttnn.tan" + // CHECK-SAME: tensor<64x128xf32 + // CHECK-SAME: -> tensor<64x128xf32 return %1 : tensor<64x128xf32> } diff --git a/test/ttmlir/Silicon/TTNN/n150/perf/test_perf_tanh.mlir b/test/ttmlir/Silicon/TTNN/n150/perf/test_perf_tanh.mlir index cf12dcf9ac..dd0eedd4ec 100644 --- a/test/ttmlir/Silicon/TTNN/n150/perf/test_perf_tanh.mlir +++ b/test/ttmlir/Silicon/TTNN/n150/perf/test_perf_tanh.mlir @@ -4,8 +4,9 @@ func.func @tanh(%arg0: tensor<64x128xf32>) -> tensor<64x128xf32> { %0 = tensor.empty() : tensor<64x128xf32> - // CHECK: [[VAL0:%[0-9]+]] = "ttnn.empty"(%{{[0-9]+}}) - // CHECK: %{{[0-9]+}} = "ttnn.tanh"(%arg0, [[VAL0]]) %1 = "ttir.tanh"(%arg0, %0) <{operandSegmentSizes = array}> : (tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xf32> + // CHECK: "ttnn.tanh" + // CHECK-SAME: tensor<64x128xf32 + // CHECK-SAME: -> tensor<64x128xf32 return %1 : tensor<64x128xf32> } diff --git a/test/ttmlir/Silicon/TTNN/n150/perf/test_perf_where.mlir b/test/ttmlir/Silicon/TTNN/n150/perf/test_perf_where.mlir index ced442b91f..9d450ebacc 100644 --- a/test/ttmlir/Silicon/TTNN/n150/perf/test_perf_where.mlir +++ b/test/ttmlir/Silicon/TTNN/n150/perf/test_perf_where.mlir @@ -7,8 +7,7 @@ func.func @test_where(%arg0: tensor<13x37xbf16>, %arg1: tensor<13x37xbf16>) -> t %1 = "ttir.eq"(%arg0, %arg1, %0) <{operandSegmentSizes = array}> : (tensor<13x37xbf16>, tensor<13x37xbf16>, tensor<13x37xbf16>) -> tensor<13x37xbf16> %2 = tensor.empty() : tensor<13x37xbf16> %3 = "ttir.where"(%1, %arg0, %arg1, %2) <{operandSegmentSizes = array}> : (tensor<13x37xbf16>, tensor<13x37xbf16>, tensor<13x37xbf16>, tensor<13x37xbf16>) -> tensor<13x37xbf16> - // CHECK: %[[EMPTY:.*]] = "ttnn.empty"{{.*}} - // CHECK: %[[VAL1:[0-9]+]] = "ttnn.eq"(%arg0, %arg1, %[[EMPTY]]) - // CHECK: %{{[0-9]+}} = "ttnn.where"(%[[VAL1]], %arg0, %arg1, %{{[0-9]+}}) + // CHECK: "ttnn.eq" + // CHECK: "ttnn.where" return %3 : tensor<13x37xbf16> } diff --git a/test/ttmlir/Silicon/TTNN/n150/perf/test_perf_xor.mlir b/test/ttmlir/Silicon/TTNN/n150/perf/test_perf_xor.mlir index d68b726083..c0920c2a64 100644 --- a/test/ttmlir/Silicon/TTNN/n150/perf/test_perf_xor.mlir +++ b/test/ttmlir/Silicon/TTNN/n150/perf/test_perf_xor.mlir @@ -3,13 +3,11 @@ // RUN: ttmlir-translate --ttnn-to-flatbuffer %t.mlir > %t.ttnn func.func @logical_xor(%arg0: tensor<64x128xbf16>, %arg1: tensor<64x128xbf16>) -> tensor<64x128xbf16> { - // CHECK: %{{[0-9]+}} = "ttnn.empty"{{.*}} [[TENSOR:tensor<64x128xbf16]] %0 = tensor.empty() : tensor<64x128xbf16> - // CHECK: %{{[0-9]+}} = "ttnn.logical_xor" - // CHECK-SAME: [[TENSOR]] - // CHECK-SAME: [[TENSOR]] - // CHECK-SAME: [[TENSOR]] - // CHECK-SAME: -> [[TENSOR]] %1 = "ttir.logical_xor"(%arg0, %arg1, %0) <{operandSegmentSizes = array}> : (tensor<64x128xbf16>, tensor<64x128xbf16>, tensor<64x128xbf16>) -> tensor<64x128xbf16> + // CHECK: "ttnn.logical_xor" + // CHECK-SAME: (tensor<64x128xbf16 + // CHECK-SAME: tensor<64x128xbf16 + // CHECK-SAME: -> tensor<64x128xbf16 return %1 : tensor<64x128xbf16> } diff --git a/test/ttmlir/Silicon/TTNN/n150/runtime_stitching/eltwise_binary_op_chain.mlir b/test/ttmlir/Silicon/TTNN/n150/runtime_stitching/eltwise_binary_op_chain.mlir index bd03fbe155..aff5e3cdb6 100644 --- a/test/ttmlir/Silicon/TTNN/n150/runtime_stitching/eltwise_binary_op_chain.mlir +++ b/test/ttmlir/Silicon/TTNN/n150/runtime_stitching/eltwise_binary_op_chain.mlir @@ -4,25 +4,22 @@ module attributes {} { func.func @add(%arg0: tensor<64x128xf32>, %arg1: tensor<64x128xf32>) -> tensor<64x128xf32> { - // CHECK: %[[C:.*]] = "ttnn.empty"[[C:.*]] %0 = tensor.empty() : tensor<64x128xf32> - // CHECK: %[[C:.*]] = "ttnn.add"[[C:.*]] + // CHECK: "ttnn.add" %1 = "ttir.add"(%arg0, %arg1, %0) <{operandSegmentSizes = array}> : (tensor<64x128xf32>, tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xf32> return %1 : tensor<64x128xf32> } func.func @multiply(%arg0: tensor<64x128xf32>, %arg1: tensor<64x128xf32>) -> tensor<64x128xf32> { - // CHECK: %[[C:.*]] = "ttnn.empty"[[C:.*]] %0 = tensor.empty() : tensor<64x128xf32> - // CHECK: %[[C:.*]] = "ttnn.multiply"[[C:.*]] + // CHECK: "ttnn.multiply" %1 = "ttir.multiply"(%arg0, %arg1, %0) <{operandSegmentSizes = array}> : (tensor<64x128xf32>, tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xf32> return %1 : tensor<64x128xf32> } func.func @subtract(%arg0: tensor<64x128xf32>, %arg1: tensor<64x128xf32>) -> tensor<64x128xf32> { - // CHECK: %[[C:.*]] = "ttnn.empty"[[C:.*]] %0 = tensor.empty() : tensor<64x128xf32> - // CHECK: %[[C:.*]] = "ttnn.subtract"[[C:.*]] + // CHECK: "ttnn.subtract" %1 = "ttir.subtract"(%arg0, %arg1, %0) <{operandSegmentSizes = array}> : (tensor<64x128xf32>, tensor<64x128xf32>, tensor<64x128xf32>) -> tensor<64x128xf32> return %1 : tensor<64x128xf32> } diff --git a/test/ttmlir/Silicon/TTNN/n150/sharded/simple_eltwise_sharded.mlir b/test/ttmlir/Silicon/TTNN/n150/sharded/simple_eltwise_sharded.mlir index d74b582ede..8a8150d932 100644 --- a/test/ttmlir/Silicon/TTNN/n150/sharded/simple_eltwise_sharded.mlir +++ b/test/ttmlir/Silicon/TTNN/n150/sharded/simple_eltwise_sharded.mlir @@ -1,80 +1,73 @@ // RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline="system-desc-path=%system_desc_path% enable-optimizer=false" %s > %t.mlir // RUN: FileCheck %s --input-file=%t.mlir // RUN: ttmlir-translate --ttnn-to-flatbuffer %t.mlir > %t.ttnn + func.func @subtract(%arg0: tensor<224x64xf32>, %arg1: tensor<224x64xf32>) -> tensor<224x64xf32> { - // CHECK: %[[C:.*]] = "ttnn.empty"[[C:.*]] %0 = tensor.empty() : tensor<224x64xf32> - // CHECK: %[[C:.*]] = "ttnn.subtract"[[C:.*]] + // CHECK: "ttnn.subtract" %1 = "ttir.subtract"(%arg0, %arg1, %0) <{operandSegmentSizes = array}> : (tensor<224x64xf32>, tensor<224x64xf32>, tensor<224x64xf32>) -> tensor<224x64xf32> return %1 : tensor<224x64xf32> } func.func @div(%arg0: tensor<224x64xf32>, %arg1: tensor<224x64xf32>) -> tensor<224x64xf32> { - // CHECK: %[[C:.*]] = "ttnn.empty"[[C:.*]] %0 = tensor.empty() : tensor<224x64xf32> - // CHECK: %[[C:.*]] = "ttnn.div"[[C:.*]] + // CHECK: "ttnn.div" %1 = "ttir.div"(%arg0, %arg1, %0) <{operandSegmentSizes = array}> : (tensor<224x64xf32>, tensor<224x64xf32>, tensor<224x64xf32>) -> tensor<224x64xf32> return %1 : tensor<224x64xf32> } func.func @multiply(%arg0: tensor<224x64xf32>, %arg1: tensor<224x64xf32>) -> tensor<224x64xf32> { - // CHECK: %[[C:.*]] = "ttnn.empty"[[C:.*]] %0 = tensor.empty() : tensor<224x64xf32> - // CHECK: %[[C:.*]] = "ttnn.multiply"[[C:.*]] + // CHECK: "ttnn.multiply" %1 = "ttir.multiply"(%arg0, %arg1, %0) <{operandSegmentSizes = array}> : (tensor<224x64xf32>, tensor<224x64xf32>, tensor<224x64xf32>) -> tensor<224x64xf32> return %1 : tensor<224x64xf32> } func.func @relu(%arg0: tensor<224x64xf32>) -> tensor<224x64xf32> { - // CHECK: %[[C:.*]] = "ttnn.empty"[[C:.*]] %0 = tensor.empty() : tensor<224x64xf32> - // CHECK: %[[C:.*]] = "ttnn.relu"[[C:.*]] + // CHECK: "ttnn.relu" %1 = "ttir.relu"(%arg0, %0) <{operandSegmentSizes = array}> : (tensor<224x64xf32>, tensor<224x64xf32>) -> tensor<224x64xf32> return %1 : tensor<224x64xf32> } func.func @ge(%arg0: tensor<224x64xf32>, %arg1: tensor<224x64xf32>) -> tensor<224x64xf32> { - // CHECK: %[[C:.*]] = "ttnn.empty"[[C:.*]] %0 = tensor.empty() : tensor<224x64xf32> - // CHECK: %[[C:.*]] = "ttnn.ge"[[C:.*]] + // CHECK: "ttnn.ge" %1 = "ttir.ge"(%arg0, %arg1, %0) <{operandSegmentSizes = array}> : (tensor<224x64xf32>, tensor<224x64xf32>, tensor<224x64xf32>) -> tensor<224x64xf32> return %1 : tensor<224x64xf32> } func.func @reshape(%arg0: tensor<4x2x224x64xbf16>) -> tensor<2x4x224x64xbf16> { %0 = tensor.empty() : tensor<2x4x224x64xbf16> - // CHECK: %[[C:.*]] = "ttnn.reshape"[[C:.*]] + // CHECK: "ttnn.reshape" %1 = "ttir.reshape"(%arg0, %0) <{shape = [2: i32, 4: i32, 224: i32, 64: i32]}> : (tensor<4x2x224x64xbf16>, tensor<2x4x224x64xbf16>) -> tensor<2x4x224x64xbf16> return %1 : tensor<2x4x224x64xbf16> } func.func @squeeze(%arg0: tensor<1x2x1x224x64xbf16>) -> tensor<1x2x224x64xbf16> { %0 = tensor.empty() : tensor<1x2x224x64xbf16> - // CHECK: %[[C:.*]] = "ttnn.reshape"[[C:.*]] + // CHECK: "ttnn.reshape" %1 = "ttir.squeeze"(%arg0, %0) <{dim = 2 : si32}> : (tensor<1x2x1x224x64xbf16>, tensor<1x2x224x64xbf16>) -> tensor<1x2x224x64xbf16> return %1 : tensor<1x2x224x64xbf16> } func.func @reciprocal(%arg0: tensor<224x64xf32>) -> tensor<224x64xf32> { - // CHECK: %[[C:.*]] = "ttnn.empty"[[C:.*]] %0 = tensor.empty() : tensor<224x64xf32> - // CHECK: %[[C:.*]] = "ttnn.reciprocal"[[C:.*]] + // CHECK: "ttnn.reciprocal" %1 = "ttir.reciprocal"(%arg0, %0) <{operandSegmentSizes = array}> : (tensor<224x64xf32>, tensor<224x64xf32>) -> tensor<224x64xf32> return %1 : tensor<224x64xf32> } func.func @sigmoid(%arg0: tensor<224x64xf32>) -> tensor<224x64xf32> { - // CHECK: %[[C:.*]] = "ttnn.empty"[[C:.*]] %0 = tensor.empty() : tensor<224x64xf32> - // CHECK: %[[C:.*]] = "ttnn.sigmoid"[[C:.*]] + // CHECK: "ttnn.sigmoid" %1 = "ttir.sigmoid"(%arg0, %0) <{operandSegmentSizes = array}> : (tensor<224x64xf32>, tensor<224x64xf32>) -> tensor<224x64xf32> return %1 : tensor<224x64xf32> } func.func @sqrt(%arg0: tensor<224x64xf32>) -> tensor<224x64xf32> { - // CHECK: %[[C:.*]] = "ttnn.empty"[[C:.*]] %0 = tensor.empty() : tensor<224x64xf32> - // CHECK: %[[C:.*]] = "ttnn.sqrt"[[C:.*]] + // CHECK: "ttnn.sqrt" %1 = "ttir.sqrt"(%arg0, %0) <{operandSegmentSizes = array}> : (tensor<224x64xf32>, tensor<224x64xf32>) -> tensor<224x64xf32> return %1 : tensor<224x64xf32> } diff --git a/test/ttmlir/Silicon/TTNN/n150/simple_power.mlir b/test/ttmlir/Silicon/TTNN/n150/simple_power.mlir deleted file mode 100644 index d9c49b8c63..0000000000 --- a/test/ttmlir/Silicon/TTNN/n150/simple_power.mlir +++ /dev/null @@ -1,11 +0,0 @@ -// RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline="system-desc-path=%system_desc_path%" %s > %t.mlir -// RUN: FileCheck %s --input-file=%t.mlir -// RUN: ttmlir-translate --ttnn-to-flatbuffer %t.mlir > %t.ttnn -func.func @power(%arg0: tensor<32x32xf32>, %arg1: tensor<32x32xf32>) -> tensor<32x32xf32> { - %0 = tensor.empty() : tensor<32x32xf32> - // CHECK: %[[EMPTY:.*]] = "ttnn.empty"{{.*}} -> tensor<32x32xf32, {{.*}} - %1 = "ttir.power"(%arg0, %arg1, %0) <{operandSegmentSizes = array}> : (tensor<32x32xf32>, tensor<32x32xf32>, tensor<32x32xf32>) -> tensor<32x32xf32> - // CHECK: %[[POW:[0-9]+]] = "ttnn.pow"({{.*}}, {{.*}}, %[[EMPTY]]){{.*}} -> tensor<32x32xf32, {{.*}} - return %1 : tensor<32x32xf32> - // CHECK: return {{.*}} : tensor<32x32xf32, {{.*}} -} diff --git a/test/unittests/Optimizer/TestGreedyL1InterleavedPolicy.cpp b/test/unittests/Optimizer/TestGreedyL1InterleavedPolicy.cpp index a8ad3df448..a04da6607a 100644 --- a/test/unittests/Optimizer/TestGreedyL1InterleavedPolicy.cpp +++ b/test/unittests/Optimizer/TestGreedyL1InterleavedPolicy.cpp @@ -131,44 +131,40 @@ TEST_F(GreedyL1InterleavedPolicyBase, VerifyGreedyPolicy) { constexpr uint64_t usableL1CacheSize = 15; // Create operand A - mlir::Value dest = createEmptyTensor(); mlir::Value lhs = func.getBody().getBlocks().front().getArgument(0); mlir::Value rhs = func.getBody().getBlocks().front().getArgument(1); mlir::Operation *opA = - builder.create(builder.getUnknownLoc(), lhs, rhs, dest); + builder.create(builder.getUnknownLoc(), lhs, rhs); uint64_t outputL1Usage = 2; uint64_t requiredL1Usage = 8; prepareOpForGreedyConfigPicker(opA, outputL1Usage, requiredL1Usage, legalLayouts, opsL1Usage); // Create operand B - dest = createEmptyTensor(); lhs = func.getBody().getBlocks().front().getArgument(0); rhs = func.getBody().getBlocks().front().getArgument(1); mlir::Operation *opB = - builder.create(builder.getUnknownLoc(), lhs, rhs, dest); + builder.create(builder.getUnknownLoc(), lhs, rhs); outputL1Usage = 3; requiredL1Usage = 7; prepareOpForGreedyConfigPicker(opB, outputL1Usage, requiredL1Usage, legalLayouts, opsL1Usage); // Create operand C - dest = createEmptyTensor(); lhs = func.getBody().getBlocks().front().getArgument(0); rhs = func.getBody().getBlocks().front().getArgument(1); mlir::Operation *opC = - builder.create(builder.getUnknownLoc(), lhs, rhs, dest); + builder.create(builder.getUnknownLoc(), lhs, rhs); outputL1Usage = 1; requiredL1Usage = 9; prepareOpForGreedyConfigPicker(opC, outputL1Usage, requiredL1Usage, legalLayouts, opsL1Usage); // Create base op D - dest = createEmptyTensor(); lhs = func.getBody().getBlocks().front().getArgument(0); rhs = func.getBody().getBlocks().front().getArgument(1); mlir::Operation *opD = - builder.create(builder.getUnknownLoc(), lhs, rhs, dest); + builder.create(builder.getUnknownLoc(), lhs, rhs); outputL1Usage = 4; requiredL1Usage = 0; prepareOpForGreedyConfigPicker(opD, outputL1Usage, requiredL1Usage, diff --git a/test/unittests/Optimizer/TestShardSolver.cpp b/test/unittests/Optimizer/TestShardSolver.cpp index 4693bc6851..fbde684a9f 100644 --- a/test/unittests/Optimizer/TestShardSolver.cpp +++ b/test/unittests/Optimizer/TestShardSolver.cpp @@ -145,11 +145,10 @@ TEST_F(ShardSolverBase, VerifyProduceMaxCoreUsage) { constexpr unsigned usableL1CacheSize = 1024 * 1024; std::unordered_set overrideReshardEdges; - mlir::Value dest = createEmptyTensor(); mlir::Value lhs = func.getBody().getBlocks().front().getArgument(0); mlir::Value rhs = func.getBody().getBlocks().front().getArgument(1); mlir::Operation *op = - builder.create(builder.getUnknownLoc(), lhs, rhs, dest); + builder.create(builder.getUnknownLoc(), lhs, rhs); mlir::Operation *firstOp = op; prepareOpForShardSolver(op, opL1MemSpecs, l1ChainedOps); @@ -161,8 +160,7 @@ TEST_F(ShardSolverBase, VerifyProduceMaxCoreUsage) { TensorMemoryLayout::BlockSharded, 2, 2); rhs = op->getResult(0); - dest = createEmptyTensor(); - op = builder.create(builder.getUnknownLoc(), rhs, dest); + op = builder.create(builder.getUnknownLoc(), rhs); prepareOpForShardSolver(op, opL1MemSpecs, l1ChainedOps); addLayoutForOp(op, legalLayouts, BufferType::L1, TensorMemoryLayout::WidthSharded, 1, 8); @@ -174,8 +172,7 @@ TEST_F(ShardSolverBase, VerifyProduceMaxCoreUsage) { lhs = func.getBody().getBlocks().front().getArgument(0); rhs = op->getResult(0); - dest = createEmptyTensor(); - op = builder.create(builder.getUnknownLoc(), lhs, rhs, dest); + op = builder.create(builder.getUnknownLoc(), lhs, rhs); prepareOpForShardSolver(op, opL1MemSpecs, l1ChainedOps); addLayoutForOp(op, legalLayouts, BufferType::L1, TensorMemoryLayout::WidthSharded, 1, 4); @@ -184,8 +181,7 @@ TEST_F(ShardSolverBase, VerifyProduceMaxCoreUsage) { addLayoutForOp(op, legalLayouts, BufferType::L1, TensorMemoryLayout::BlockSharded, 1, 1); - dest = createEmptyTensor(); - op = builder.create(builder.getUnknownLoc(), lhs, rhs, dest); + op = builder.create(builder.getUnknownLoc(), lhs, rhs); prepareOpForShardSolver(op, opL1MemSpecs, l1ChainedOps); addLayoutForOp(op, legalLayouts, BufferType::L1, TensorMemoryLayout::WidthSharded, 1, 4); @@ -196,8 +192,7 @@ TEST_F(ShardSolverBase, VerifyProduceMaxCoreUsage) { lhs = opL1MemSpecs[opL1MemSpecs.size() - 2].op->getResult(0); rhs = opL1MemSpecs[opL1MemSpecs.size() - 1].op->getResult(0); - dest = createEmptyTensor(); - op = builder.create(builder.getUnknownLoc(), lhs, rhs, dest); + op = builder.create(builder.getUnknownLoc(), lhs, rhs); prepareOpForShardSolver(op, opL1MemSpecs, l1ChainedOps); addLayoutForOp(op, legalLayouts, BufferType::L1, TensorMemoryLayout::WidthSharded, 1, 2); @@ -207,8 +202,7 @@ TEST_F(ShardSolverBase, VerifyProduceMaxCoreUsage) { TensorMemoryLayout::BlockSharded, 1, 1); rhs = op->getResult(0); - dest = createEmptyTensor(); - op = builder.create(builder.getUnknownLoc(), rhs, dest); + op = builder.create(builder.getUnknownLoc(), rhs); prepareOpForShardSolver(op, opL1MemSpecs, l1ChainedOps); addLayoutForOp(op, legalLayouts, BufferType::L1, TensorMemoryLayout::WidthSharded, 1, 2);