Skip to content

Commit

Permalink
[mlir][tosa] Flip accessors used to prefixed form (NFC)
Browse files Browse the repository at this point in the history
Follow up from dialect flip, just flipping accessors. Both forms still
generated.
  • Loading branch information
jpienaar committed Jul 22, 2022
1 parent 475a39f commit 13448db
Show file tree
Hide file tree
Showing 13 changed files with 247 additions and 242 deletions.
20 changes: 10 additions & 10 deletions mlir/lib/Conversion/TosaToArith/TosaToArith.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@ class ConstOpConverter : public OpRewritePattern<tosa::ConstOp> {

LogicalResult matchAndRewrite(tosa::ConstOp op,
PatternRewriter &rewriter) const final {
rewriter.replaceOpWithNewOp<arith::ConstantOp>(op, op.value());
rewriter.replaceOpWithNewOp<arith::ConstantOp>(op, op.getValue());
return success();
}
};
Expand Down Expand Up @@ -66,8 +66,8 @@ class ApplyScaleGenericOpConverter
LogicalResult matchAndRewrite(tosa::ApplyScaleOp op,
PatternRewriter &rewriter) const final {
Location loc = op.getLoc();
Value value = op.value();
Value multiplier32 = op.multiplier();
Value value = op.getValue();
Value multiplier32 = op.getMultiplier();

Type resultTy = op.getType();
Type valueTy = value.getType();
Expand All @@ -78,7 +78,7 @@ class ApplyScaleGenericOpConverter
Value one64 = getConstantValue(loc, i64Ty, 1, rewriter);
Value thirtyOne32 = getConstantValue(loc, i32Ty, 31, rewriter);

Value shift32 = rewriter.create<arith::ExtUIOp>(loc, i32Ty, op.shift());
Value shift32 = rewriter.create<arith::ExtUIOp>(loc, i32Ty, op.getShift());

// Compute the multiplication in 64-bits then select the high / low parts.
Value value64 = rewriter.create<arith::ExtSIOp>(loc, i64Ty, value);
Expand All @@ -94,7 +94,7 @@ class ApplyScaleGenericOpConverter
multiply64 = rewriter.create<arith::AddIOp>(loc, multiply64, round);

// Apply double rounding if necessary.
if (op.double_round()) {
if (op.getDoubleRound()) {
int64_t roundInt = 1 << 30;
Value roundUp = getConstantValue(loc, i64Ty, roundInt, rewriter);
Value roundDown = getConstantValue(loc, i64Ty, -roundInt, rewriter);
Expand Down Expand Up @@ -129,14 +129,14 @@ class ApplyScale32BitOpConverter : public OpRewritePattern<tosa::ApplyScaleOp> {
Type i32Ty = matchContainerType(rewriter.getI32Type(), resultTy);
Type i64Ty = matchContainerType(rewriter.getI64Type(), resultTy);

Value value = op.value();
Value value = op.getValue();
if (getElementTypeOrSelf(value.getType()).getIntOrFloatBitWidth() > 32) {
return failure();
}

Value value32 = op.value();
Value multiplier32 = op.multiplier();
Value shift32 = rewriter.create<arith::ExtUIOp>(loc, i32Ty, op.shift());
Value value32 = op.getValue();
Value multiplier32 = op.getMultiplier();
Value shift32 = rewriter.create<arith::ExtUIOp>(loc, i32Ty, op.getShift());

// Constants used during the scaling operation.
Value zero32 = getConstantValue(loc, i32Ty, 0, rewriter);
Expand Down Expand Up @@ -176,7 +176,7 @@ class ApplyScale32BitOpConverter : public OpRewritePattern<tosa::ApplyScaleOp> {
rewriter.create<arith::SelectOp>(loc, shiftOver32, shiftHighR, zero32);

// Conditionally perform our double round.
if (op.double_round()) {
if (op.getDoubleRound()) {
Value negOne32 = getConstantValue(loc, i32Ty, -1, rewriter);
Value valuePositive = rewriter.create<arith::CmpIOp>(
loc, arith::CmpIPredicate::sge, value32, zero32);
Expand Down
100 changes: 50 additions & 50 deletions mlir/lib/Conversion/TosaToLinalg/TosaToLinalg.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -77,7 +77,7 @@ createLinalgBodyCalculationForElementwiseOp(Operation *op, ValueRange args,

// tosa::MulOp
if (isa<tosa::MulOp>(op) && elementTy.isa<FloatType>()) {
if (dyn_cast<tosa::MulOp>(op).shift() != 0) {
if (dyn_cast<tosa::MulOp>(op).getShift() != 0) {
(void)rewriter.notifyMatchFailure(op,
"Cannot have shift value for float");
return nullptr;
Expand Down Expand Up @@ -137,15 +137,15 @@ createLinalgBodyCalculationForElementwiseOp(Operation *op, ValueRange args,
return rewriter.create<arith::NegFOp>(loc, resultTypes, args);

if (isa<tosa::NegateOp>(op) && elementTy.isa<IntegerType>() &&
!cast<tosa::NegateOp>(op).quantization_info()) {
!cast<tosa::NegateOp>(op).getQuantizationInfo()) {
auto constant =
rewriter.create<arith::ConstantOp>(loc, IntegerAttr::get(elementTy, 0));
return rewriter.create<arith::SubIOp>(loc, resultTypes, constant, args[0]);
}

if (isa<tosa::NegateOp>(op) && elementTy.isa<IntegerType>() &&
cast<tosa::NegateOp>(op).quantization_info()) {
auto quantizationInfo = cast<tosa::NegateOp>(op).quantization_info();
cast<tosa::NegateOp>(op).getQuantizationInfo()) {
auto quantizationInfo = cast<tosa::NegateOp>(op).getQuantizationInfo();
int32_t inputBitWidth = elementTy.getIntOrFloatBitWidth();
int64_t inZp = quantizationInfo.value().getInputZp();
int64_t outZp = quantizationInfo.value().getOutputZp();
Expand Down Expand Up @@ -978,7 +978,7 @@ class ReshapeConverterCollapse : public OpConversionPattern<tosa::ReshapeOp> {
LogicalResult
matchAndRewrite(tosa::ReshapeOp reshape, OpAdaptor adaptor,
ConversionPatternRewriter &rewriter) const final {
ShapedType operandTy = adaptor.input1().getType().cast<ShapedType>();
ShapedType operandTy = adaptor.getInput1().getType().cast<ShapedType>();
ShapedType resultTy = reshape.getType().template cast<ShapedType>();
bool isDynamic = !operandTy.hasStaticShape();

Expand Down Expand Up @@ -1021,7 +1021,7 @@ class ReshapeConverterExpand : public OpConversionPattern<tosa::ReshapeOp> {
LogicalResult
matchAndRewrite(tosa::ReshapeOp reshape, OpAdaptor adaptor,
ConversionPatternRewriter &rewriter) const final {
ShapedType operandTy = adaptor.input1().getType().cast<ShapedType>();
ShapedType operandTy = adaptor.getInput1().getType().cast<ShapedType>();
ShapedType resultTy = reshape.getType().template cast<ShapedType>();
bool isDynamic = !operandTy.hasStaticShape();

Expand Down Expand Up @@ -1065,7 +1065,7 @@ class ReshapeConverterCollapseExpand
LogicalResult
matchAndRewrite(tosa::ReshapeOp reshape, OpAdaptor adaptor,
ConversionPatternRewriter &rewriter) const final {
ShapedType operandTy = adaptor.input1().getType().cast<ShapedType>();
ShapedType operandTy = adaptor.getInput1().getType().cast<ShapedType>();
ShapedType resultTy = reshape.getType().template cast<ShapedType>();
bool isDynamic = !operandTy.hasStaticShape();

Expand All @@ -1086,7 +1086,7 @@ class ReshapeConverterCollapseExpand
reshape.getLoc(),
RankedTensorType::get(intermediateShape,
reshape.getType().getElementType()),
adaptor.input1());
adaptor.getInput1());
Value expand =
rewriter.create<tosa::ReshapeOp>(reshape.getLoc(), resultTy, collapse);
rewriter.replaceOp(reshape, expand);
Expand All @@ -1102,7 +1102,7 @@ class TransposeConverter : public OpRewritePattern<tosa::TransposeOp> {
LogicalResult matchAndRewrite(tosa::TransposeOp op,
PatternRewriter &rewriter) const final {
DenseIntElementsAttr perms;
if (!matchPattern(op.perms(), m_Constant(&perms))) {
if (!matchPattern(op.getPerms(), m_Constant(&perms))) {
return failure();
}

Expand Down Expand Up @@ -1136,7 +1136,7 @@ class TransposeConverter : public OpRewritePattern<tosa::TransposeOp> {
rewriter.getMultiDimIdentityMap(resultTy.getRank())};

rewriter.replaceOpWithNewOp<linalg::GenericOp>(
op, resultTy, op.input1(), ValueRange{initTensor}, affineMaps,
op, resultTy, op.getInput1(), ValueRange{initTensor}, affineMaps,
getNParallelLoopsAttrs(resultTy.getRank()),
[&](OpBuilder &nestedBuilder, Location nestedLoc, ValueRange args) {
nestedBuilder.create<linalg::YieldOp>(loc, *args.begin());
Expand All @@ -1152,28 +1152,28 @@ class RescaleConverter : public OpRewritePattern<tosa::RescaleOp> {
LogicalResult matchAndRewrite(tosa::RescaleOp op,
PatternRewriter &rewriter) const final {
auto loc = op.getLoc();
auto input = op.input();
auto inputTy = op.input().getType().cast<ShapedType>();
auto outputTy = op.output().getType().cast<ShapedType>();
auto input = op.getInput();
auto inputTy = op.getInput().getType().cast<ShapedType>();
auto outputTy = op.getOutput().getType().cast<ShapedType>();
unsigned rank = inputTy.getRank();

// This is an illegal configuration. terminate and log an error
if (op.double_round() && !op.scale32())
if (op.getDoubleRound() && !op.getScale32())
return rewriter.notifyMatchFailure(
op, "tosa.rescale requires scale32 for double_round to be true");

auto dynamicDimsOr =
checkHasDynamicBatchDims(rewriter, op, {input, op.output()});
checkHasDynamicBatchDims(rewriter, op, {input, op.getOutput()});
if (!dynamicDimsOr.has_value())
return failure();
SmallVector<Value> dynamicDims = dynamicDimsOr.value();

// The shift and multiplier values.
SmallVector<int32_t> multiplierValues;
getValuesFromIntArrayAttribute(op.multiplier(), multiplierValues);
getValuesFromIntArrayAttribute(op.getMultiplier(), multiplierValues);

SmallVector<int8_t> shiftValues;
getValuesFromIntArrayAttribute(op.shift(), shiftValues);
getValuesFromIntArrayAttribute(op.getShift(), shiftValues);

// If we shift by more than the bitwidth, this just sets to 0.
for (int i = 0, s = multiplierValues.size(); i < s; i++) {
Expand All @@ -1186,7 +1186,7 @@ class RescaleConverter : public OpRewritePattern<tosa::RescaleOp> {
// Double round only occurs if shift is greater than 31, check that this
// is ever true.
bool doubleRound =
op.double_round() &&
op.getDoubleRound() &&
llvm::any_of(shiftValues, [](int32_t v) { return v > 31; });

SmallVector<AffineMap> indexingMaps = {
Expand Down Expand Up @@ -1346,7 +1346,7 @@ class ResizeConverter : public OpRewritePattern<tosa::ResizeOp> {
LogicalResult matchAndRewrite(tosa::ResizeOp op,
PatternRewriter &rewriter) const final {
Location loc = op.getLoc();
auto input = op.input();
auto input = op.getInput();
auto inputTy = input.getType().cast<ShapedType>();
auto resultTy = op.getType().cast<ShapedType>();
auto resultElementTy = resultTy.getElementType();
Expand All @@ -1355,12 +1355,12 @@ class ResizeConverter : public OpRewritePattern<tosa::ResizeOp> {
auto imageW = inputTy.getShape()[2];

auto dynamicDimsOr =
checkHasDynamicBatchDims(rewriter, op, {input, op.output()});
checkHasDynamicBatchDims(rewriter, op, {input, op.getOutput()});
if (!dynamicDimsOr.has_value())
return failure();
SmallVector<Value> dynamicDims = dynamicDimsOr.value();

if (op.mode() != "NEAREST_NEIGHBOR" && op.mode() != "BILINEAR")
if (op.getMode() != "NEAREST_NEIGHBOR" && op.getMode() != "BILINEAR")
return failure();

auto initTensor = rewriter.create<linalg::InitTensorOp>(
Expand Down Expand Up @@ -1394,19 +1394,19 @@ class ResizeConverter : public OpRewritePattern<tosa::ResizeOp> {
Value inX =
rewriter.create<arith::IndexCastOp>(loc, rewriter.getI32Type(), x);

int32_t shift = op.shift();
int32_t shift = op.getShift();
bool floatingPointMode = shift == 0;

Value yStride, xStride, yOffset, xOffset;
if (floatingPointMode) {
yStride = rewriter.create<arith::ConstantOp>(loc, op.stride_fp()[0]);
xStride = rewriter.create<arith::ConstantOp>(loc, op.stride_fp()[1]);
yOffset = rewriter.create<arith::ConstantOp>(loc, op.offset_fp()[0]);
xOffset = rewriter.create<arith::ConstantOp>(loc, op.offset_fp()[1]);
yStride = rewriter.create<arith::ConstantOp>(loc, op.getStrideFp()[0]);
xStride = rewriter.create<arith::ConstantOp>(loc, op.getStrideFp()[1]);
yOffset = rewriter.create<arith::ConstantOp>(loc, op.getOffsetFp()[0]);
xOffset = rewriter.create<arith::ConstantOp>(loc, op.getOffsetFp()[1]);
} else {
SmallVector<int32_t> stride, offset;
getValuesFromIntArrayAttribute(op.stride(), stride);
getValuesFromIntArrayAttribute(op.offset(), offset);
getValuesFromIntArrayAttribute(op.getStride(), stride);
getValuesFromIntArrayAttribute(op.getOffset(), offset);

yStride = rewriter.create<arith::ConstantOp>(
loc, rewriter.getI32IntegerAttr(stride[0]));
Expand Down Expand Up @@ -1463,7 +1463,7 @@ class ResizeConverter : public OpRewritePattern<tosa::ResizeOp> {
dx = rewriter.create<arith::SubIOp>(loc, x, xTrunc);
}

if (op.mode() == "NEAREST_NEIGHBOR") {
if (op.getMode() == "NEAREST_NEIGHBOR") {
Value yPred, xPred;
// Round the index position towards the closest pixel location.
if (floatingPointMode) {
Expand Down Expand Up @@ -1516,7 +1516,7 @@ class ResizeConverter : public OpRewritePattern<tosa::ResizeOp> {
return success();
}

if (op.mode() == "BILINEAR") {
if (op.getMode() == "BILINEAR") {
Value y0 = iy;
Value x0 = ix;

Expand Down Expand Up @@ -1634,7 +1634,7 @@ class ReduceConverter : public OpRewritePattern<SrcOp> {

LogicalResult matchAndRewrite(SrcOp reduceOp,
PatternRewriter &rewriter) const final {
return reduceMatchAndRewriteHelper(reduceOp, reduceOp.axis(), rewriter);
return reduceMatchAndRewriteHelper(reduceOp, reduceOp.getAxis(), rewriter);
}
};

Expand All @@ -1648,7 +1648,7 @@ struct ConcatConverter : public OpConversionPattern<tosa::ConcatOp> {
auto resultType = op.getType().dyn_cast<RankedTensorType>();

Location loc = op.getLoc();
int axis = op.axis();
int axis = op.getAxis();
Value axisValue = rewriter.createOrFold<arith::ConstantOp>(
loc, rewriter.getIndexAttr(axis));
int rank = resultType.getRank();
Expand Down Expand Up @@ -1713,10 +1713,10 @@ class ReverseConverter : public OpRewritePattern<tosa::ReverseOp> {
LogicalResult matchAndRewrite(tosa::ReverseOp op,
PatternRewriter &rewriter) const final {
auto loc = op.getLoc();
Value input = op.input();
Value input = op.getInput();
auto inputTy = input.getType().template cast<ShapedType>();
auto resultTy = op.getType().template cast<ShapedType>();
auto axis = op.axis();
auto axis = op.getAxis();

SmallVector<Value> dynDims;
for (int i = 0; i < inputTy.getRank(); i++) {
Expand Down Expand Up @@ -1775,15 +1775,15 @@ struct TileConverter : public OpConversionPattern<tosa::TileOp> {
matchAndRewrite(tosa::TileOp op, OpAdaptor adaptor,
ConversionPatternRewriter &rewriter) const override {
auto loc = op.getLoc();
auto input = op.input1();
auto input = op.getInput1();
auto inputTy = input.getType().cast<ShapedType>();
auto inputShape = inputTy.getShape();
auto resultTy = op.getType().cast<ShapedType>();
auto elementTy = inputTy.getElementType();
int64_t rank = inputTy.getRank();

SmallVector<int64_t> multiples;
getValuesFromIntArrayAttribute(op.multiples(), multiples);
getValuesFromIntArrayAttribute(op.getMultiples(), multiples);

// Broadcast the newly added dimensions to their appropriate multiple.
SmallVector<int64_t, 2> genericShape;
Expand Down Expand Up @@ -1837,8 +1837,8 @@ class PadConverter : public OpRewritePattern<tosa::PadOp> {
LogicalResult matchAndRewrite(tosa::PadOp padOp,
PatternRewriter &rewriter) const final {
auto loc = padOp.getLoc();
auto input = padOp.input1();
auto padding = padOp.padding();
auto input = padOp.getInput1();
auto padding = padOp.getPadding();

ShapedType inputTy = input.getType().cast<ShapedType>();
Type elementTy = inputTy.getElementType();
Expand All @@ -1848,17 +1848,17 @@ class PadConverter : public OpRewritePattern<tosa::PadOp> {

Value padConstant;

if (padOp.pad_const()) {
if (padOp.getPadConst()) {
padConstant = rewriter.createOrFold<tensor::ExtractOp>(
loc, padOp.pad_const(), ValueRange({}));
loc, padOp.getPadConst(), ValueRange({}));
} else {
Attribute constantAttr;
if (elementTy.isa<FloatType>()) {
constantAttr = rewriter.getFloatAttr(elementTy, 0.0);
} else if (elementTy.isa<IntegerType>() && !padOp.quantization_info()) {
} else if (elementTy.isa<IntegerType>() && !padOp.getQuantizationInfo()) {
constantAttr = rewriter.getIntegerAttr(elementTy, 0);
} else if (elementTy.isa<IntegerType>() && padOp.quantization_info()) {
int64_t value = padOp.quantization_info()->getInputZp();
} else if (elementTy.isa<IntegerType>() && padOp.getQuantizationInfo()) {
int64_t value = padOp.getQuantizationInfo()->getInputZp();
constantAttr = rewriter.getIntegerAttr(elementTy, value);
}
if (constantAttr)
Expand Down Expand Up @@ -1926,12 +1926,12 @@ class ArgMaxConverter : public OpRewritePattern<tosa::ArgMaxOp> {
LogicalResult matchAndRewrite(tosa::ArgMaxOp argmaxOp,
PatternRewriter &rewriter) const final {
auto loc = argmaxOp.getLoc();
Value input = argmaxOp.input();
Value input = argmaxOp.getInput();
auto inputTy = input.getType().cast<ShapedType>();
auto resultTy = argmaxOp.output().getType().cast<ShapedType>();
auto resultTy = argmaxOp.getOutput().getType().cast<ShapedType>();
auto inElementTy = inputTy.getElementType();
auto outElementTy = resultTy.getElementType();
int axis = argmaxOp.axis();
int axis = argmaxOp.getAxis();
auto resultMaxTy = RankedTensorType::get(resultTy.getShape(), inElementTy);

if (!outElementTy.isa<IntegerType>())
Expand Down Expand Up @@ -2049,8 +2049,8 @@ class GatherConverter : public OpConversionPattern<tosa::GatherOp> {

auto resultTy = op.getType().cast<ShapedType>();

auto dynamicDimsOr =
checkHasDynamicBatchDims(rewriter, op, {input, indices, op.output()});
auto dynamicDimsOr = checkHasDynamicBatchDims(
rewriter, op, {input, indices, op.getOutput()});
if (!dynamicDimsOr.has_value())
return failure();
SmallVector<Value> dynamicDims = dynamicDimsOr.value();
Expand Down Expand Up @@ -2101,8 +2101,8 @@ class TableConverter : public OpRewritePattern<tosa::TableOp> {
LogicalResult matchAndRewrite(tosa::TableOp op,
PatternRewriter &rewriter) const final {
auto loc = op.getLoc();
Value input = op.input();
Value table = op.table();
Value input = op.getInput();
Value table = op.getTable();
auto inputTy = input.getType().cast<ShapedType>();
auto tableTy = table.getType().cast<ShapedType>();
auto resultTy = op.getType().cast<ShapedType>();
Expand Down
Loading

0 comments on commit 13448db

Please sign in to comment.