Skip to content

Commit

Permalink
[mlir][sparse] Favors synthetic tensor over other undefined tensors
Browse files Browse the repository at this point in the history
Reviewed By: aartbik

Differential Revision: https://reviews.llvm.org/D135385
  • Loading branch information
Peiming Liu committed Oct 6, 2022
1 parent ddb3553 commit d30dccd
Show file tree
Hide file tree
Showing 2 changed files with 104 additions and 31 deletions.
25 changes: 15 additions & 10 deletions mlir/lib/Dialect/SparseTensor/Utils/Merger.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -265,21 +265,26 @@ BitVector Merger::simplifyCond(unsigned s0, unsigned p0) {

BitVector simple = latPoints[p0].bits;
bool reset = isSingleton && hasAnySparse(simple);
unsigned offset = 0;
unsigned be = simple.size();
unsigned offset = 0; // relative to the end
if (!reset)
// Starts resetting from a dense dimension, so that the first bit (if kept)
// is not undefined dimension type.
for (unsigned b = 0, be = simple.size(); b < be; b++)
if (simple[b] && isDimLevelType(b, DimLvlType::kDense))
offset = b;
for (unsigned b = 0; b < be; b++) {
if (simple[b] && isDimLevelType(b, DimLvlType::kDense)) {
offset = be - b - 1; // relative to the end
break;
}
}

// Now apply the two basic rules.
for (unsigned b = 0, be = simple.size(); b < be; b++) {
unsigned i = (offset + b) % be;
if (simple[i] && (!isDimLevelType(i, DimLvlType::kCompressed) &&
!isDimLevelType(i, DimLvlType::kSingleton))) {
// Now apply the two basic rules. We also iterate the bits reversely to always
// keep the rightmost bit (which could possibly be a synthetic tensor).
for (unsigned b = be - 1 - offset, i = 0; i < be;
b = b == 0 ? be - 1 : b - 1, i++) {
if (simple[b] && (!isDimLevelType(b, DimLvlType::kCompressed) &&
!isDimLevelType(b, DimLvlType::kSingleton))) {
if (reset)
simple.reset(i);
simple.reset(b);
reset = true;
}
}
Expand Down
110 changes: 89 additions & 21 deletions mlir/unittests/Dialect/SparseTensor/MergerTest.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -380,15 +380,16 @@ class MergerTest3T1LD : public MergerTestBase {
///
/// Tests with both undef and dense input.
///
class MergerTest3T1LU : public MergerTestBase {

class MergerTest4T1LU : public MergerTestBase {
protected:
// Our three tensors (two inputs, one output).
const unsigned t0 = 0, t1 = 1, t2 = 2;
const unsigned t0 = 0, t1 = 1, t2 = 2, t3 = 3;

// Our single loop.
const unsigned l0 = 0;

MergerTest3T1LU() : MergerTestBase(3, 1) {
MergerTest4T1LU() : MergerTestBase(4, 1) {
// Tensor 0: undef input vector.
merger.addExp(Kind::kTensor, t0, -1u);
merger.setDimLevelFormat(t0, l0, DimLevelFormat(DimLvlType::kUndef));
Expand All @@ -397,43 +398,110 @@ class MergerTest3T1LU : public MergerTestBase {
merger.addExp(Kind::kTensor, t1, -1u);
merger.setDimLevelFormat(t1, l0, DimLevelFormat(DimLvlType::kDense));

// Tensor 2: dense output vector.
// Tensor 2: undef input vector.
merger.addExp(Kind::kTensor, t2, -1u);
merger.setDimLevelFormat(t2, l0, DimLevelFormat(DimLvlType::kDense));
merger.setDimLevelFormat(t2, l0, DimLevelFormat(DimLvlType::kUndef));

// Tensor 3: dense output vector.
merger.addExp(Kind::kTensor, t3, -1u);
merger.setDimLevelFormat(t3, l0, DimLevelFormat(DimLvlType::kDense));
}
};

///
/// Tests with operation on sparse output.
///

class MergerTest3T1L_SO : public MergerTestBase {
protected:
// Our three tensors (two inputs, one output, one synthetic).
const unsigned t0 = 0, t1 = 1, t2 = 2, t3 = 3;

// Our single loop.
const unsigned l0 = 0;

MergerTest3T1L_SO() : MergerTestBase(3, 1) {
merger.setHasSparseOut(true);

// Tensor 0: undef input vector.
merger.addExp(Kind::kTensor, t0, -1u);
merger.setDimLevelFormat(t0, l0, DimLevelFormat(DimLvlType::kUndef));

// Tensor 1: undef input vector.
merger.addExp(Kind::kTensor, t1, -1u);
merger.setDimLevelFormat(t1, l0, DimLevelFormat(DimLvlType::kUndef));

// Tensor 2: sparse output vector.
merger.addExp(Kind::kTensor, t2, -1u);
merger.setDimLevelFormat(t2, l0, DimLevelFormat(DimLvlType::kCompressed));
}
};

} // namespace

/// Vector multiplication (conjunction) of 2 vectors, i.e.;
/// a(i) = b(i) * c(i)
/// Vector multiplication (conjunction) of 3 vectors, i.e.;
/// a(i) = b(i) * c(i) * d(i)
/// which should form the single lattice point
/// {
/// lat( i_00_U i_01_D / (tensor_0 * tensor_1) )
/// lat( i_00_U i_01_D i_02_U / (tensor_0 * tensor_1 * tensor2) )
/// }
/// after optimization, the dense dimesion should be kept, despite it appears
/// after the undef dimension
/// in the middle
/// {
/// lat( i_01_D / (tensor_0 * tensor_1) )
/// lat( i_01_D / (tensor_0 * tensor_1 * tensor2) )
/// }
#define IMPL_MERGER_TEST_CONJ(OP) \
TEST_F(MergerTest3T1LU, vector_##OP) { \
auto e = OP##Expr(t0, t1); \
#define IMPL_MERGER_TEST_CONJ_CONJ_UNDEF(CONJ1, CONJ2) \
TEST_F(MergerTest4T1LU, vector_##CONJ1##_##CONJ2) { \
auto em = CONJ1##Expr(t0, t1); \
auto e = CONJ2##Expr(em, t2); \
auto p0 = tensorPattern(t0); \
auto p1 = tensorPattern(t1); \
auto p2 = tensorPattern(t2); \
auto s = merger.buildLattices(e, l0); \
\
expectNumLatPoints(s, 1); \
expectLatPoint(s, lat(0), OP##Pattern(p0, p1), \
loopsToBits({{l0, t0}, {l0, t1}})); \
\
expectLatPoint(s, lat(0), CONJ2##Pattern(CONJ1##Pattern(p0, p1), p2), \
loopsToBits({{l0, t0}, {l0, t1}, {l0, t2}})); \
s = merger.optimizeSet(s); \
expectNumLatPoints(s, 1); \
expectLatPoint(s, lat(0), OP##Pattern(p0, p1), loopsToBits({{l0, t1}}), \
true); \
expectLatPoint(s, lat(0), CONJ2##Pattern(CONJ1##Pattern(p0, p1), p2), \
loopsToBits({{l0, t1}}), true); \
}
FOREVERY_COMMON_CONJ_BINOP(IMPL_MERGER_TEST_CONJ)

#undef IMPL_MERGER_TEST_CONJ
FOREVERY_PAIR_OF_COMMON_CONJ_CONJ_BINOP(IMPL_MERGER_TEST_CONJ_CONJ_UNDEF)

#undef IMPL_MERGER_TEST_CONJ_CONJ_UNDEF

/// Vector multiplication (conjunction) of 2 vectors, i.e.;
/// o(i) = b(i) * c(i) * o(i)
/// which should form the single lattice point (note how a synthetic tensor
/// i_03_U is created for the sparse output)
/// {
/// lat( i_00_U i_01_U i_03_U / (tensor_0 * tensor_1 * output_tensor_2) )
/// }
/// after optimization, the synthetic tensor should be preserved.
/// {
/// lat( i_03_U / (tensor_0 * tensor_1 * output_tensor2) )
/// }
#define IMPL_MERGER_TEST_CONJ_CONJ_SPARSE_OUT(CONJ1, CONJ2) \
TEST_F(MergerTest3T1L_SO, vector_##CONJ1##_##CONJ2) { \
auto em = CONJ1##Expr(t0, t1); \
auto e = CONJ2##Expr(em, t2); \
auto p0 = tensorPattern(t0); \
auto p1 = tensorPattern(t1); \
auto p2 = tensorPattern(t2); \
auto s = merger.buildLattices(e, l0); \
expectNumLatPoints(s, 1); \
expectLatPoint(s, lat(0), CONJ2##Pattern(CONJ1##Pattern(p0, p1), p2), \
loopsToBits({{l0, t0}, {l0, t1}, {l0, t3}})); \
s = merger.optimizeSet(s); \
expectNumLatPoints(s, 1); \
expectLatPoint(s, lat(0), CONJ2##Pattern(CONJ1##Pattern(p0, p1), p2), \
loopsToBits({{l0, t3}}), true); \
}

FOREVERY_PAIR_OF_COMMON_CONJ_CONJ_BINOP(IMPL_MERGER_TEST_CONJ_CONJ_SPARSE_OUT)

#undef IMPL_MERGER_TEST_CONJ_CONJ_SPARSE_OUT

/// Vector addition (disjunction) of 2 vectors. i.e.;
/// a(i) = b(i) + c(i)
Expand Down

0 comments on commit d30dccd

Please sign in to comment.