Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Update llvm sha #277

Merged
merged 14 commits into from
Aug 3, 2022
5 changes: 3 additions & 2 deletions .github/workflows/build.yml
Original file line number Diff line number Diff line change
Expand Up @@ -82,11 +82,11 @@ jobs:
id: cache-llvm-mlir
uses: actions/cache@v3
env:
CACHE_NUMBER: 1 # Increase to reset cache
LLVM_CACHE_NUMBER: 1 # Increase to reset cache
with:
path: |
/home/runner/work/llvm-mlir/_mlir_install/**
key: ${{ runner.os }}-build-llvm-${{ env.CACHE_NUMBER }}-${{ env.LLVM_SHA }}
key: ${{ runner.os }}-build-llvm-${{ env.LLVM_CACHE_NUMBER }}-${{ env.LLVM_SHA }}

- name: Download TBB
if: steps.cache-tbb.outputs.cache-hit != 'true'
Expand Down Expand Up @@ -158,6 +158,7 @@ jobs:
-DLLVM_ENABLE_RTTI=ON \
-DLLVM_USE_LINKER=gold \
-DLLVM_INSTALL_UTILS=ON \
-DLLVM_ENABLE_ZSTD=OFF \
-DCMAKE_INSTALL_PREFIX=/home/runner/work/llvm-mlir/_mlir_install || exit 1
cmake --build . -j ${np} || exit 1
cmake --install . || exit 1
Expand Down
2 changes: 1 addition & 1 deletion llvm-sha.txt
Original file line number Diff line number Diff line change
@@ -1 +1 @@
fbb51ac0ba1007250cba384d930ae965e395b657
78650b78618840563a05d840794519422998adbb
2 changes: 1 addition & 1 deletion mlir/lib/Conversion/SCFToAffine/SCFToAffine.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -61,7 +61,7 @@ class SCFParallelLowering : public OpRewritePattern<scf::ParallelOp> {
llvm::makeArrayRef(newSteps));

// Steal the body of the old affine for op.
newPloop.region().takeBody(op.getRegion());
newPloop.getRegion().takeBody(op.getRegion());

Operation *yieldOp = newPloop.getBody()->getTerminator();
assert(yieldOp);
Expand Down
46 changes: 2 additions & 44 deletions mlir/lib/Dialect/plier_util/dialect.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -272,48 +272,6 @@ struct GenGlobalId : public mlir::OpRewritePattern<mlir::arith::AddIOp> {
}
};

struct InvertCmpi : public mlir::OpRewritePattern<mlir::arith::CmpIOp> {
using OpRewritePattern::OpRewritePattern;

mlir::LogicalResult
matchAndRewrite(mlir::arith::CmpIOp op,
mlir::PatternRewriter &rewriter) const override {

if (!mlir::matchPattern(op.getLhs(), mlir::m_Constant()) ||
mlir::matchPattern(op.getRhs(), mlir::m_Constant()))
return mlir::failure();

using Pred = mlir::arith::CmpIPredicate;
const std::pair<Pred, Pred> inv[] = {
// clang-format off
{Pred::slt, Pred::sgt},
{Pred::sle, Pred::sge},
{Pred::ult, Pred::ugt},
{Pred::ule, Pred::uge},
{Pred::eq, Pred::eq},
{Pred::ne, Pred::ne},
// clang-format on
};

auto newPred = [&]() -> Pred {
auto oldPred = op.getPredicate();
for (auto it : inv) {
if (it.first == oldPred)
return it.second;
if (it.second == oldPred)
return it.first;
}

llvm_unreachable("Unknown predicate");
}();

rewriter.replaceOpWithNewOp<mlir::arith::CmpIOp>(op, newPred, op.getRhs(),
op.getLhs());
;
return mlir::success();
}
};

struct ReshapeAlloca : public mlir::OpRewritePattern<mlir::memref::ReshapeOp> {
using OpRewritePattern::OpRewritePattern;

Expand Down Expand Up @@ -355,7 +313,7 @@ void PlierUtilDialect::getCanonicalizationPatterns(
results.add<DimExpandShape<mlir::tensor::DimOp, mlir::tensor::ExpandShapeOp>,
DimExpandShape<mlir::memref::DimOp, mlir::memref::ExpandShapeOp>,
DimInsertSlice, FillExtractSlice, SpirvInputCSE, GenGlobalId,
InvertCmpi, ReshapeAlloca>(getContext());
ReshapeAlloca>(getContext());
}

OpaqueType OpaqueType::get(mlir::MLIRContext *context) {
Expand Down Expand Up @@ -1132,7 +1090,7 @@ static mlir::Value propagateCasts(mlir::Value val, mlir::Type thisType) {
mlir::OpFoldResult SignCastOp::fold(llvm::ArrayRef<mlir::Attribute> operands) {
assert(operands.size() == 1);
auto thisType = getType();
auto attrOperand = operands.front();
auto attrOperand = operands.front().dyn_cast_or_null<mlir::TypedAttr>();
if (attrOperand && attrOperand.getType() == thisType)
return attrOperand;

Expand Down
6 changes: 3 additions & 3 deletions numba_dpcomp/numba_dpcomp/mlir/tests/test_gpu.py
Original file line number Diff line number Diff line change
Expand Up @@ -294,7 +294,7 @@ def func(a, b):
b[i] = f(a[i])

_test_unary(
func, np.float32, "GPUToSpirvPass", lambda ir: ir.count(f"OCL.{op}") == 1
func, np.float32, "GPUToSpirvPass", lambda ir: ir.count(f"CL.{op}") == 1
)


Expand Down Expand Up @@ -647,15 +647,15 @@ def func(a, b, c, res):
gpu_func = kernel(fastmath=False)(func)
gpu_func[a.shape, DEFAULT_LOCAL_SIZE](a, b, c, gpu_res)
ir = get_print_buffer()
assert ir.count("spv.OCL.fma") == 0, ir
assert ir.count("spv.CL.fma") == 0, ir
assert_equal(gpu_res, sim_res)

with print_pass_ir([], ["GPUToSpirvPass"]):
gpu_res = np.zeros(a.shape, a.dtype)
gpu_func = kernel(fastmath=True)(func)
gpu_func[a.shape, DEFAULT_LOCAL_SIZE](a, b, c, gpu_res)
ir = get_print_buffer()
assert ir.count("spv.OCL.fma") == 1, ir
assert ir.count("spv.CL.fma") == 1, ir
assert_equal(gpu_res, sim_res)


Expand Down
13 changes: 1 addition & 12 deletions numba_dpcomp/numba_dpcomp/mlir/tests/test_numpy.py
Original file line number Diff line number Diff line change
Expand Up @@ -1217,9 +1217,6 @@ def _cov(m, y=None, rowvar=True, bias=False, ddof=None):
_rnd = np.random.RandomState(42)


@pytest.mark.skipif(
reason="Invalid folding in upstream MLIR, see https://github.com/llvm/llvm-project/issues/56557"
)
@parametrize_function_variants(
"m",
[
Expand Down Expand Up @@ -1259,9 +1256,6 @@ def test_cov_basic(m):
_cov_inputs_m = _rnd.randn(105).reshape(15, 7)


@pytest.mark.skipif(
reason="Invalid folding in upstream MLIR, see https://github.com/llvm/llvm-project/issues/56557"
)
@pytest.mark.parametrize("m", [_cov_inputs_m])
@pytest.mark.parametrize("y", [None, _cov_inputs_m[::-1]])
@pytest.mark.parametrize("rowvar", [False, True])
Expand All @@ -1278,9 +1272,6 @@ def test_cov_explicit_arguments(m, y, rowvar, bias, ddof):
)


@pytest.mark.skipif(
reason="Invalid folding in upstream MLIR, see https://github.com/llvm/llvm-project/issues/56557"
)
@parametrize_function_variants(
"m, y, rowvar",
[
Expand Down Expand Up @@ -1340,9 +1331,7 @@ def py_func(data):
assert_equal(py_func(arr), jit_func(arr))


@pytest.mark.skipif(
reason="Invalid folding in upstream MLIR, see https://github.com/llvm/llvm-project/issues/56557"
)
@pytest.mark.skipif(reason="Failure in propagate layout pass")
@pytest.mark.parametrize(
"arr",
[
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -816,78 +816,6 @@ struct DeallocOpLowering
}
};

struct ReshapeLowering
: public mlir::ConvertOpToLLVMPattern<mlir::memref::ReshapeOp> {
using ConvertOpToLLVMPattern<mlir::memref::ReshapeOp>::ConvertOpToLLVMPattern;

explicit ReshapeLowering(mlir::LLVMTypeConverter &converter)
: ConvertOpToLLVMPattern<mlir::memref::ReshapeOp>(converter,
/*benefit*/ 2) {}

mlir::LogicalResult
matchAndRewrite(mlir::memref::ReshapeOp op,
mlir::memref::ReshapeOp::Adaptor adaptor,
mlir::ConversionPatternRewriter &rewriter) const override {
auto converter = getTypeConverter();
auto dstType = converter->convertType(op.getType());
if (!dstType)
return mlir::failure();

mlir::MemRefDescriptor source(adaptor.source());
mlir::MemRefDescriptor shape(adaptor.shape());

auto loc = op.getLoc();
auto result = mlir::MemRefDescriptor::undef(rewriter, loc, dstType);
result.setAllocatedPtr(rewriter, loc, source.allocatedPtr(rewriter, loc));
result.setAlignedPtr(rewriter, loc, source.alignedPtr(rewriter, loc));
result.setOffset(rewriter, loc, source.offset(rewriter, loc));

auto memRefType = op.getType().cast<mlir::MemRefType>();
auto numDims = memRefType.getRank();
llvm::SmallVector<mlir::Value> sizes(static_cast<unsigned>(numDims));
auto indexType = getIndexType();
for (unsigned i = 0; i < numDims; ++i) {
auto ind = createIndexConstant(rewriter, loc, i);
mlir::Value dataPtr =
getStridedElementPtr(loc, memRefType, shape, ind, rewriter);
auto size = rewriter.create<mlir::LLVM::LoadOp>(loc, dataPtr).getResult();
if (size.getType() != indexType)
size = rewriter.create<mlir::LLVM::ZExtOp>(loc, indexType, size);

result.setSize(rewriter, loc, i, size);
sizes[i] = size;
}

// Strides: iterate sizes in reverse order and multiply.
int64_t stride = 1;
mlir::Value runningStride = createIndexConstant(rewriter, loc, 1);
for (auto i = static_cast<unsigned>(memRefType.getRank()); i-- > 0;) {
result.setStride(rewriter, loc, i, runningStride);

int64_t size = memRefType.getShape()[i];
if (size == 0)
continue;
bool useSizeAsStride = stride == 1;
if (size == mlir::ShapedType::kDynamicSize)
stride = mlir::ShapedType::kDynamicSize;
if (stride != mlir::ShapedType::kDynamicSize)
stride *= size;

if (useSizeAsStride)
runningStride = sizes[i];
else if (stride == mlir::ShapedType::kDynamicSize)
runningStride =
rewriter.create<mlir::LLVM::MulOp>(loc, runningStride, sizes[i]);
else
runningStride =
createIndexConstant(rewriter, loc, static_cast<uint64_t>(stride));
}

rewriter.replaceOp(op, static_cast<mlir::Value>(result));
return mlir::success();
}
};

struct ExpandShapeLowering
: public mlir::ConvertOpToLLVMPattern<mlir::memref::ExpandShapeOp> {
using ConvertOpToLLVMPattern<
Expand Down Expand Up @@ -1802,7 +1730,6 @@ struct PlierUtilToLLVMPass
LowerUndef,
LowerBuildTuple,
LowerRetainOp,
ReshapeLowering,
ExpandShapeLowering,
LowerExtractMemrefMetadataOp,
LowerTakeContextOp,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -302,7 +302,8 @@ static mlir::Type mapPlierType(mlir::Type type) {
static mlir::Type dropLiteralType(mlir::Type t) {
assert(t);
if (auto literal = t.dyn_cast<plier::LiteralType>())
return dropLiteralType(literal.getValue().getType());
return dropLiteralType(
literal.getValue().cast<mlir::TypedAttr>().getType());

return t;
}
Expand Down Expand Up @@ -339,7 +340,8 @@ struct ConstOpLowering : public mlir::OpConversionPattern<plier::ConstOp> {
return mlir::failure();

auto value = adaptor.val();
if (isSupportedType(value.getType())) {
auto typeAttr = value.dyn_cast_or_null<mlir::TypedAttr>();
if (typeAttr && isSupportedType(typeAttr.getType())) {
if (auto intAttr = value.dyn_cast<mlir::IntegerAttr>()) {
auto type = intAttr.getType().cast<mlir::IntegerType>();
if (!type.isSignless()) {
Expand Down Expand Up @@ -379,7 +381,7 @@ static bool isOmittedType(mlir::Type type) {
}

static mlir::Attribute makeSignlessAttr(mlir::Attribute val) {
auto type = val.getType();
auto type = val.cast<mlir::TypedAttr>().getType();
if (auto intType = type.dyn_cast<mlir::IntegerType>()) {
if (!intType.isSignless()) {
auto newType = plier::makeSignlessType(intType);
Expand Down Expand Up @@ -411,8 +413,8 @@ struct LiteralLowering : public mlir::OpConversionPattern<Op> {
if (auto literal = convertedType.template dyn_cast<plier::LiteralType>()) {
auto loc = op.getLoc();
auto attrVal = literal.getValue();
auto dstType = attrVal.getType();
auto val = makeSignlessAttr(attrVal);
auto dstType = attrVal.template cast<mlir::TypedAttr>().getType();
auto val = makeSignlessAttr(attrVal).template cast<mlir::TypedAttr>();
auto newVal =
rewriter.create<mlir::arith::ConstantOp>(loc, val).getResult();
if (dstType != val.getType())
Expand Down Expand Up @@ -470,11 +472,11 @@ struct OmittedLowering : public mlir::OpConversionPattern<plier::CastOp> {
if (auto omittedAttr =
getOmittedValue(adaptor.value().getType(), convertedType)) {
auto loc = op.getLoc();
auto dstType = omittedAttr.getType();
auto dstType = omittedAttr.cast<mlir::TypedAttr>().getType();
auto val = makeSignlessAttr(omittedAttr);
auto newVal =
rewriter.create<mlir::arith::ConstantOp>(loc, val).getResult();
if (dstType != val.getType())
if (dstType != val.cast<mlir::TypedAttr>().getType())
newVal = rewriter.create<plier::SignCastOp>(loc, dstType, newVal);

rewriter.replaceOp(op, newVal);
Expand Down Expand Up @@ -697,8 +699,8 @@ mlir::Value doCast(mlir::PatternRewriter &rewriter, mlir::Location loc,
assert(dstType);
auto srcType = val.getType();
if (auto literal = srcType.dyn_cast<plier::LiteralType>()) {
auto attr = literal.getValue();
auto signlessAttr = makeSignlessAttr(attr);
auto attr = literal.getValue().cast<mlir::TypedAttr>();
auto signlessAttr = makeSignlessAttr(attr).cast<mlir::TypedAttr>();
val = rewriter.create<mlir::arith::ConstantOp>(loc, signlessAttr);
if (signlessAttr.getType() != attr.getType())
val = rewriter.create<plier::SignCastOp>(loc, attr.getType(), val);
Expand Down Expand Up @@ -1413,7 +1415,7 @@ void PlierToStdPass::runOnOperation() {
return false;

if (auto literal = type.dyn_cast<plier::LiteralType>())
type = literal.getValue().getType();
type = literal.getValue().cast<mlir::TypedAttr>().getType();

return !type.isIntOrFloat();
});
Expand Down Expand Up @@ -1462,8 +1464,9 @@ struct ConvertLiteralTypesPass
typeConverter.addConversion([](mlir::Type type) { return type; });

auto context = &getContext();
typeConverter.addConversion(
[](plier::LiteralType type) { return type.getValue().getType(); });
typeConverter.addConversion([](plier::LiteralType type) {
return type.getValue().cast<mlir::TypedAttr>().getType();
});

auto materializeCast =
[](mlir::OpBuilder &builder, mlir::Type type, mlir::ValueRange inputs,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,7 @@

#include <pybind11/pybind11.h>

#include <mlir/AsmParser/AsmParser.h>
#include <mlir/Dialect/Arithmetic/IR/Arithmetic.h>
#include <mlir/Dialect/Bufferization/IR/Bufferization.h>
#include <mlir/Dialect/Func/IR/FuncOps.h>
Expand Down Expand Up @@ -220,7 +221,9 @@ static auto toValues(py::handle obj, UnwrapFunc &&unwrapFunc) {
static llvm::Optional<py::object> getPyLiteral(mlir::Attribute attr) {
assert(attr);
if (auto intAttr = attr.dyn_cast<mlir::IntegerAttr>()) {
if (auto intType = attr.getType().dyn_cast<mlir::IntegerType>()) {
if (auto intType = attr.cast<mlir::TypedAttr>()
.getType()
.dyn_cast<mlir::IntegerType>()) {
// Ignore index type
if (intType.getWidth() == 1)
return py::bool_(intAttr.getInt() != 0);
Expand Down
1 change: 1 addition & 0 deletions numba_dpcomp/setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -66,6 +66,7 @@
"-DIMEX_ENABLE_NUMBA_FE=ON",
"-DIMEX_ENABLE_NUMBA_HOTFIX=ON",
"-DIMEX_ENABLE_TBB_SUPPORT=ON",
"-DLLVM_ENABLE_ZSTD=OFF",
]

# DPNP
Expand Down