Skip to content

Commit

Permalink
[build] Update llvm tag to a3f2751 (#2397)
Browse files Browse the repository at this point in the history
This commit updates the `llvm-project` and `mlir-hlo` submodules to
commits:

llvm-project: a3f2751
mlir-hlo: 97c7e4b4506c3a2441c923e592833f45da439009

Changes made:

- Rename `getSuccessorEntryOperands` with `getEntrySuccessorOperands`
and remove `operands` from
`getSuccessorRegions` (https://reviews.llvm.org/D157506)
- Make `TypeConverter` a `const` (https://reviews.llvm.org/D157601)
  • Loading branch information
ramiro050 authored Aug 15, 2023
1 parent 94f7593 commit 41bafe1
Show file tree
Hide file tree
Showing 15 changed files with 43 additions and 48 deletions.
2 changes: 1 addition & 1 deletion externals/llvm-project
Submodule llvm-project updated 3323 files
2 changes: 1 addition & 1 deletion externals/mlir-hlo
Submodule mlir-hlo updated 34 files
+1 −0 BUILD
+2 −2 WORKSPACE
+1 −1 build_tools/llvm_version.txt
+1 −1 gml_st/IR/gml_st_ops.cc
+3 −6 lhlo/IR/lhlo_ops.cc
+3 −1 lhlo_gpu/IR/lhlo_gpu_ops.td
+0 −63 mhlo/IR/hlo_ops.cc
+0 −1 mhlo/IR/hlo_ops.td
+1 −0 mhlo/transforms/CMakeLists.txt
+5 −5 mhlo/transforms/hlo_legalize_to_stablehlo/hlo_legalize_to_stablehlo.cc
+113 −0 mhlo/transforms/legalize_dot_general_to_dot/legalize_dot_general_to_dot.cc
+6 −0 mhlo/transforms/mhlo_passes.td
+2 −0 mhlo/transforms/passes.h
+6 −6 mhlo/transforms/stablehlo_legalize_to_hlo/stablehlo_legalize_to_hlo.cc
+2 −0 stablehlo/BUILD
+1 −1 stablehlo/BUILD.bazel
+2 −2 stablehlo/WORKSPACE.bazel
+1 −1 stablehlo/build_tools/llvm_version.txt
+29 −26 stablehlo/docs/spec.md
+2 −0 stablehlo/stablehlo/conversions/tosa/transforms/Passes.h
+4 −1 stablehlo/stablehlo/conversions/tosa/transforms/Passes.td
+1 −2 stablehlo/stablehlo/dialect/ChloOps.cpp
+2 −2 stablehlo/stablehlo/dialect/StablehloOps.td
+1 −1 stablehlo/stablehlo/dialect/Version.h
+3 −3 stablehlo/stablehlo/transforms/Passes.h
+3 −3 stablehlo/stablehlo/transforms/StablehloLegalizeToVhlo.cpp
+20 −14 stablehlo/stablehlo/transforms/VhloLegalizeToStablehlo.cpp
+1 −1 stablehlo/stablehlo/transforms/VhloToVersion.cpp
+1 −1 tests/Dialect/gml_st/simplify_dead_copy.mlir
+8 −8 tests/Dialect/lhlo/ops.mlir
+0 −23 tests/Dialect/mhlo/canonicalize/convolution.mlir
+25 −0 tests/Dialect/mhlo/hlo-legalize-dot-general-to-dot.mlir
+2 −2 tests/Dialect/mhlo/hlo-legalize-to-lhlo.mlir
+2 −2 tests/Dialect/mhlo/hlo-legalize-to-memref.mlir
2 changes: 1 addition & 1 deletion include/torch-mlir/Conversion/Utils/Utils.h
Original file line number Diff line number Diff line change
Expand Up @@ -76,7 +76,7 @@ SmallVector<Value> getAsConstantIndexValues(OpBuilder &b, Location loc,
// convert their elements to valid target type.
// TODO: remove this when list gets full support.
SmallVector<Value> getTypeConvertedValues(OpBuilder &b, Location loc,
TypeConverter *converter,
const TypeConverter *converter,
SmallVectorImpl<Value> &vs);

mlir::RankedTensorType GetTypeFromTensorShape(llvm::ArrayRef<int64_t> shape,
Expand Down
2 changes: 1 addition & 1 deletion include/torch-mlir/Dialect/Torch/IR/TorchOps.td
Original file line number Diff line number Diff line change
Expand Up @@ -507,7 +507,7 @@ def Torch_PrimCallMethodOp : Torch_Op<"prim.CallMethod", []> {
}

def Torch_PrimLoopOp : Torch_Op<"prim.Loop", [
DeclareOpInterfaceMethods<RegionBranchOpInterface, ["getSuccessorEntryOperands"]>]> {
DeclareOpInterfaceMethods<RegionBranchOpInterface, ["getEntrySuccessorOperands"]>]> {
let summary = "TorchScript prim::Loop op";
let description = [{
This op (together with prim.Loop.condition) define a looping construct
Expand Down
14 changes: 7 additions & 7 deletions lib/Conversion/TorchToLinalg/DataMovement.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -328,7 +328,7 @@ class ConvertAtenViewOp : public OpConversionPattern<AtenViewOp> {
SmallVector<int64_t> inputShape =
makeShapeTorchCompatible(inputType.getShape());
int64_t inputRank = inputType.getRank();
TypeConverter *typeConverter = getTypeConverter();
const TypeConverter *typeConverter = getTypeConverter();
auto resultType =
typeConverter->convertType(op.getType()).cast<RankedTensorType>();
int64_t resultRank = resultType.getRank();
Expand Down Expand Up @@ -695,7 +695,7 @@ class ConvertAtenSqueezeOp : public OpConversionPattern<AtenSqueezeOp> {
Value input = adaptor.getSelf();
auto inputType = input.getType().cast<RankedTensorType>();
int64_t inputRank = inputType.getRank();
TypeConverter *typeConverter = getTypeConverter();
const TypeConverter *typeConverter = getTypeConverter();
auto resultType =
typeConverter->convertType(op.getType()).cast<RankedTensorType>();
int64_t resultRank = resultType.getRank();
Expand Down Expand Up @@ -804,7 +804,7 @@ class ConvertAtenSqueezeDimOp : public OpConversionPattern<AtenSqueezeDimOp> {
op, "unimplemented: dim(th) dimension is not expected to be dynamic");
}

TypeConverter *typeConverter = getTypeConverter();
const TypeConverter *typeConverter = getTypeConverter();
auto resultType =
typeConverter->convertType(op.getType()).cast<RankedTensorType>();
int64_t resultRank = resultType.getRank();
Expand Down Expand Up @@ -1046,7 +1046,7 @@ class ConvertAtenSliceTensorOp : public OpConversionPattern<AtenSliceTensorOp> {
return failure();

Location loc = op.getLoc();
TypeConverter *typeConverter = getTypeConverter();
const TypeConverter *typeConverter = getTypeConverter();

auto input = adaptor.getSelf();
RankedTensorType resultType =
Expand Down Expand Up @@ -1081,7 +1081,7 @@ class ConvertAtenCatOp : public OpConversionPattern<AtenCatOp> {
if (failed(verifyLinalgCompatibleTypes(op, rewriter)))
return failure();
Location loc = op.getLoc();
TypeConverter *typeConverter = getTypeConverter();
const TypeConverter *typeConverter = getTypeConverter();

// Collect all the tensors to be concatenated.
auto tensorList = op.getTensors();
Expand Down Expand Up @@ -1312,7 +1312,7 @@ class ConvertAtenSliceScatterOp
return failure();

Location loc = op.getLoc();
TypeConverter *typeConverter = getTypeConverter();
const TypeConverter *typeConverter = getTypeConverter();

auto input = adaptor.getSelf();

Expand Down Expand Up @@ -1361,7 +1361,7 @@ class ConvertAtenViewAsComplexOp
return failure();

Location loc = op.getLoc();
TypeConverter *typeConverter = getTypeConverter();
const TypeConverter *typeConverter = getTypeConverter();
MLIRContext *context = rewriter.getContext();

auto input = adaptor.getSelf();
Expand Down
9 changes: 4 additions & 5 deletions lib/Conversion/TorchToLinalg/Pooling.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,7 @@ using namespace mlir::torch::Torch;
template <typename OpTy>
static LogicalResult
checkAndGetPoolingParameters(OpTy op, ConversionPatternRewriter &rewriter,
TypeConverter *typeConverter, bool &ceilMode,
const TypeConverter *typeConverter, bool &ceilMode,
SmallVectorImpl<Value> &kernelSizeIntValues,
SmallVectorImpl<int64_t> &strideInts,
SmallVectorImpl<int64_t> &paddingInts) {
Expand Down Expand Up @@ -72,7 +72,6 @@ checkAndGetPoolingParameters(OpTy op, ConversionPatternRewriter &rewriter,
return success();
}


// Creates a pooling operation based on the type specified by `OpTy` and
// arguments passed.
template <typename OpTy>
Expand Down Expand Up @@ -153,7 +152,7 @@ class ConvertAtenMaxPool2dOp : public OpConversionPattern<AtenMaxPool2dOp> {
if (failed(verifyLinalgCompatibleTypes(op, rewriter)))
return failure();

TypeConverter *typeConverter = getTypeConverter();
const TypeConverter *typeConverter = getTypeConverter();
Value self = adaptor.getSelf();
int64_t selfRank = self.getType().cast<RankedTensorType>().getRank();
// TODO: Add support for 3D inputs.
Expand Down Expand Up @@ -225,7 +224,7 @@ class ConvertAtenMaxPool2dWithIndicesOp
if (failed(verifyLinalgCompatibleTypes(op, rewriter)))
return failure();
Location loc = op->getLoc();
TypeConverter *typeConverter = getTypeConverter();
const TypeConverter *typeConverter = getTypeConverter();
Value self = adaptor.getSelf();
RankedTensorType selfType = self.getType().cast<RankedTensorType>();
Type elementType = selfType.getElementType();
Expand Down Expand Up @@ -386,7 +385,7 @@ class ConvertAtenAvgPoolOp : public OpConversionPattern<OpTy> {
return failure();

Location loc = op->getLoc();
TypeConverter *typeConverter = this->getTypeConverter();
const TypeConverter *typeConverter = this->getTypeConverter();
Value self = adaptor.getSelf();

Type inputElementType =
Expand Down
6 changes: 3 additions & 3 deletions lib/Conversion/TorchToLinalg/TensorConstructors.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -106,7 +106,7 @@ class ConvertConstantTensorAllocOp : public OpConversionPattern<OpTy> {
}

Location loc = op.getLoc();
TypeConverter *typeConverter = this->getTypeConverter();
const TypeConverter *typeConverter = this->getTypeConverter();
SmallVector<Value> resultSizeTorchInt, resultSize, resultSizeIndex;
if (!getListConstructElements(op.getSize(), resultSizeTorchInt)) {
return rewriter.notifyMatchFailure(
Expand Down Expand Up @@ -211,7 +211,7 @@ class ConvertAtenEmptyMemoryFormatOp
}

Location loc = op.getLoc();
TypeConverter *typeConverter = this->getTypeConverter();
const TypeConverter *typeConverter = this->getTypeConverter();
SmallVector<Value> resultSizeTorchInt, resultSize, resultSizeIndex;
if (!getListConstructElements(op.getSize(), resultSizeTorchInt)) {
return rewriter.notifyMatchFailure(
Expand Down Expand Up @@ -282,7 +282,7 @@ class ConvertAtenArangeStartStepOp
}

Location loc = op.getLoc();
TypeConverter *typeConverter = this->getTypeConverter();
const TypeConverter *typeConverter = this->getTypeConverter();
RankedTensorType resultType =
typeConverter->convertType(op->getResult(0).getType())
.cast<RankedTensorType>();
Expand Down
8 changes: 5 additions & 3 deletions lib/Conversion/TorchToLinalg/Uncategorized.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -127,8 +127,10 @@ static Value buildUnitNormalCdf(OpBuilder &b, Location &loc, Value x) {
}

template <typename MathOpTy>
static Value createCalculationForMathOpWithDtypeConversion(
OpBuilder &b, TypeConverter *converter, Value payloadArg, Operation *op) {
static Value
createCalculationForMathOpWithDtypeConversion(OpBuilder &b,
const TypeConverter *converter,
Value payloadArg, Operation *op) {
Type dtype = converter->convertType(op->getResult(0).getType())
.template cast<RankedTensorType>()
.getElementType();
Expand Down Expand Up @@ -207,7 +209,7 @@ createTriangularMatrix(OpBuilder &b, Location loc, ValueRange payloadArgs,
}

static Value createLinalgPayloadCalculationForElementwiseOp(
OpBuilder &b, Location loc, TypeConverter *converter,
OpBuilder &b, Location loc, const TypeConverter *converter,
ValueRange payloadArgs, Operation *op, ArrayRef<Value> operands) {
if (isa<AtenFloorOp>(op))
return b.create<math::FloorOp>(loc, payloadArgs[0]);
Expand Down
4 changes: 2 additions & 2 deletions lib/Conversion/TorchToSCF/TorchToSCF.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -77,7 +77,7 @@ class ConvertTorchPrimLoopWhileLikeOp : public OpConversionPattern<PrimLoopOp> {
if (op.isForLike())
return failure();

TypeConverter *typeConverter = getTypeConverter();
const TypeConverter *typeConverter = getTypeConverter();
SmallVector<Type, 1> newResultTypes;
if (failed(
typeConverter->convertTypes(op.getResultTypes(), newResultTypes)))
Expand Down Expand Up @@ -217,7 +217,7 @@ class ConvertTorchPrimLoopForLikeOp : public OpConversionPattern<PrimLoopOp> {
if (!op.isForLike())
return failure();

TypeConverter *typeConverter = getTypeConverter();
const TypeConverter *typeConverter = getTypeConverter();
SmallVector<Type, 1> newResultTypes;
if (failed(
typeConverter->convertTypes(op.getResultTypes(), newResultTypes)))
Expand Down
2 changes: 1 addition & 1 deletion lib/Conversion/TorchToStablehlo/Basic.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1555,7 +1555,7 @@ LogicalResult ConvertAtenOp<AtenEmptyMemoryFormatOp>::matchAndRewrite(
}

Location loc = op.getLoc();
TypeConverter *typeConverter = this->getTypeConverter();
const TypeConverter *typeConverter = this->getTypeConverter();
SmallVector<Value> resultSizeTorchInt, resultSize, resultSizeIndex;
if (!getListConstructElements(op.getSize(), resultSizeTorchInt)) {
return rewriter.notifyMatchFailure(
Expand Down
2 changes: 1 addition & 1 deletion lib/Conversion/TorchToStablehlo/GatherScatter.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -342,7 +342,7 @@ LogicalResult ConvertAtenOp<AtenSliceScatterOp>::matchAndRewrite(
return failure();

Location loc = op.getLoc();
TypeConverter *typeConverter = getTypeConverter();
const TypeConverter *typeConverter = getTypeConverter();

auto input = adaptor.getSelf();

Expand Down
4 changes: 2 additions & 2 deletions lib/Conversion/TorchToTMTensor/TorchToTMTensor.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -309,7 +309,7 @@ class ConvertAtenScatterSrcOp : public OpConversionPattern<AtenScatterSrcOp> {
if (failed(verifyLinalgCompatibleTypes(op, rewriter)))
return failure();
Location loc = op.getLoc();
TypeConverter *typeConverter = getTypeConverter();
const TypeConverter *typeConverter = getTypeConverter();
Value self = adaptor.getSelf();
Value index = adaptor.getIndex();
Value src = adaptor.getSrc();
Expand Down Expand Up @@ -361,7 +361,7 @@ class ConvertAtenBincountOp : public OpConversionPattern<AtenBincountOp> {
return failure();
Location loc = op.getLoc();
MLIRContext *context = op->getContext();
TypeConverter *typeConverter = getTypeConverter();
const TypeConverter *typeConverter = getTypeConverter();
Value input = adaptor.getSelf();
Value torchTypeInput = op.getSelf();
Value minlength = adaptor.getMinlength();
Expand Down
8 changes: 4 additions & 4 deletions lib/Conversion/TorchToTosa/TorchToTosa.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -2121,7 +2121,7 @@ LogicalResult ConvertAtenOp<AtenBatchNormOp>::matchAndRewrite(
// reshaped so it sits on the same dim as 'C'.
auto reshapeToNormInputDim = [&](Operation *op,
ConversionPatternRewriter &rewriter,
TypeConverter *converter, Type outType,
const TypeConverter *converter, Type outType,
const Value toBcast, Value &result) {
RankedTensorType toBcastType =
toBcast.getType().dyn_cast<RankedTensorType>();
Expand Down Expand Up @@ -3809,7 +3809,7 @@ LogicalResult ConvertAtenOp<AtenArangeStartStepOp>::matchAndRewrite(
AtenArangeStartStepOp op, OpAdaptor adaptor,
ConversionPatternRewriter &rewriter) const {

TypeConverter *typeConverter = this->getTypeConverter();
const TypeConverter *typeConverter = this->getTypeConverter();
RankedTensorType resultType =
typeConverter->convertType(op->getResult(0).getType())
.cast<RankedTensorType>();
Expand Down Expand Up @@ -3859,7 +3859,7 @@ LogicalResult ConvertAtenOp<PrimNumToTensorScalarOp>::matchAndRewrite(
PrimNumToTensorScalarOp op, OpAdaptor adaptor,
ConversionPatternRewriter &rewriter) const {

TypeConverter *typeConverter = this->getTypeConverter();
const TypeConverter *typeConverter = this->getTypeConverter();
RankedTensorType resultType =
typeConverter->convertType(op->getResult(0).getType())
.cast<RankedTensorType>();
Expand Down Expand Up @@ -4673,7 +4673,7 @@ template <>
LogicalResult ConvertAtenOp<AtenCatOp>::matchAndRewrite(
AtenCatOp op, OpAdaptor adaptor,
ConversionPatternRewriter &rewriter) const {
TypeConverter *typeConverter = this->getTypeConverter();
const TypeConverter *typeConverter = this->getTypeConverter();
auto outType =
typeConverter->convertType(op.getType()).cast<RankedTensorType>();
int64_t rank = outType.getRank();
Expand Down
2 changes: 1 addition & 1 deletion lib/Conversion/Utils/Utils.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -230,7 +230,7 @@ SmallVector<Value> getAsConstantIndexValues(OpBuilder &b, Location loc,
// convert their elements to valid target type.
// TODO: remove this when list gets full support.
SmallVector<Value> getTypeConvertedValues(OpBuilder &b, Location loc,
TypeConverter *converter,
const TypeConverter *converter,
SmallVectorImpl<Value> &vs) {
return llvm::to_vector<4>(llvm::map_range(vs, [&](Value v) {
return converter->materializeTargetConversion(
Expand Down
24 changes: 9 additions & 15 deletions lib/Dialect/Torch/IR/TorchOps.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -302,15 +302,13 @@ LogicalResult ClassTypeOp::verify() {
//===----------------------------------------------------------------------===//

OperandRange
PrimLoopOp::getSuccessorEntryOperands(std::optional<unsigned int> index) {
PrimLoopOp::getEntrySuccessorOperands(std::optional<unsigned int> index) {
assert(index.has_value() && index.value() == 0);
return getIterArgsInit();
}

void PrimLoopOp::getSuccessorRegions(
std::optional<unsigned> index, ArrayRef<Attribute> operands,
SmallVectorImpl<RegionSuccessor> &regions) {
(void)operands;
std::optional<unsigned> index, SmallVectorImpl<RegionSuccessor> &regions) {

if (!index.has_value()) {
regions.emplace_back(&getRegion(), getRegion().getArguments().slice(1));
Expand Down Expand Up @@ -381,7 +379,6 @@ void PrimIfOp::print(OpAsmPrinter &p) {
}

void PrimIfOp::getSuccessorRegions(std::optional<unsigned> index,
ArrayRef<Attribute> operands,
SmallVectorImpl<RegionSuccessor> &regions) {
// The `then` and the `else` region branch back to the parent operation.
if (index.has_value()) {
Expand All @@ -390,9 +387,9 @@ void PrimIfOp::getSuccessorRegions(std::optional<unsigned> index,
}

// If the condition is constant, we can give a more precise answer.
if (auto condAttr = operands.front().dyn_cast_or_null<IntegerAttr>()) {
Region *executedRegion =
condAttr.getValue().isOne() ? &getThenRegion() : &getElseRegion();
bool condition;
if (matchPattern(getCondition(), m_TorchConstantBool(&condition))) {
Region *executedRegion = condition ? &getThenRegion() : &getElseRegion();
regions.push_back(RegionSuccessor(executedRegion));
return;
}
Expand Down Expand Up @@ -2720,7 +2717,6 @@ OpFoldResult PrimMinIntOp::fold(FoldAdaptor adaptor) {
template <typename CalculateOp>
static void
getSuccessorRegionsForCalculateOp(CalculateOp op, std::optional<unsigned> index,
ArrayRef<Attribute> operands,
SmallVectorImpl<RegionSuccessor> &regions) {
if (!index.has_value()) {
// First thing the op does is branch into the calculation.
Expand All @@ -2738,19 +2734,17 @@ getSuccessorRegionsForCalculateOp(CalculateOp op, std::optional<unsigned> index,
}

void ShapeCalculateOp::getSuccessorRegions(
std::optional<unsigned> index, ArrayRef<Attribute> operands,
SmallVectorImpl<RegionSuccessor> &regions) {
getSuccessorRegionsForCalculateOp(*this, index, operands, regions);
std::optional<unsigned> index, SmallVectorImpl<RegionSuccessor> &regions) {
getSuccessorRegionsForCalculateOp(*this, index, regions);
}

//===----------------------------------------------------------------------===//
// DtypeCalculateOp
//===----------------------------------------------------------------------===//

void DtypeCalculateOp::getSuccessorRegions(
std::optional<unsigned> index, ArrayRef<Attribute> operands,
SmallVectorImpl<RegionSuccessor> &regions) {
getSuccessorRegionsForCalculateOp(*this, index, operands, regions);
std::optional<unsigned> index, SmallVectorImpl<RegionSuccessor> &regions) {
getSuccessorRegionsForCalculateOp(*this, index, regions);
}

//===----------------------------------------------------------------------===//
Expand Down

0 comments on commit 41bafe1

Please sign in to comment.