diff --git a/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorAttrDefs.td b/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorAttrDefs.td index afd978c1c57ebd..38c7200afb41ff 100644 --- a/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorAttrDefs.td +++ b/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorAttrDefs.td @@ -134,7 +134,7 @@ def SparseTensorEncodingAttr : SparseTensor_Attr<"SparseTensorEncoding", level-coordinates. The dimension-expressions collectively define the inverse map, which only needs to be provided for elaborate cases where it cannot be inferred automatically. - + Each dimension could also have an optional `SparseTensorDimSliceAttr`. Within the sparse storage format, we refer to indices that are stored explicitly as **coordinates** and offsets into the storage format as **positions**. @@ -237,10 +237,10 @@ def SparseTensorEncodingAttr : SparseTensor_Attr<"SparseTensorEncoding", }> ... tensor<20x30xf32, #BSR_explicit> ... - // ELL format. + // ELL format. // In the simple format for matrix, one array stores values and another // array stores column indices. The arrays have the same number of rows - // as the original matrix, but only have as many columns as + // as the original matrix, but only have as many columns as // the maximum number of nonzeros on a row of the original matrix. // There are many variants for ELL such as jagged diagonal scheme. // To implement ELL, map provides a notion of "counting a @@ -376,6 +376,9 @@ def SparseTensorEncodingAttr : SparseTensor_Attr<"SparseTensorEncoding", /// the null encoding (since dense-tensors are always all-dense). bool isAllDense() const; + /// Returns true if it is a sparse tensor encoding in COO format. + bool isCOO() const; + /// Returns true if every level is ordered. Also returns true for /// the null encoding (since dense-tensors are always all-ordered). bool isAllOrdered() const; @@ -468,6 +471,10 @@ def SparseTensorStorageSpecifierKindAttr def IsSparseTensorPred : CPred<"!!::mlir::sparse_tensor::getSparseTensorEncoding($_self)">; +def IsCOOPred + : CPred<"!!::mlir::sparse_tensor::getSparseTensorEncoding($_self) && " + " ::mlir::sparse_tensor::getSparseTensorEncoding($_self).isCOO()">; + def IsSparseTensorSlicePred : CPred<"!!::mlir::sparse_tensor::getSparseTensorEncoding($_self) && " " ::mlir::sparse_tensor::getSparseTensorEncoding($_self).isSlice()">; @@ -478,10 +485,14 @@ def IsSparseTensorSlicePred class SparseTensorOf allowedTypes> : TensorOf; +class COOSparseTensorOf allowedTypes> + : TensorOf; + class SparseTensorSliceOf allowedTypes> : TensorOf; def AnySparseTensor : SparseTensorOf<[AnyType]>; +def AnyCOOSparseTensor : COOSparseTensorOf<[AnyType]>; def AnySparseTensorSlice : SparseTensorSliceOf<[AnyType]>; class RankedSparseTensorOf allowedTypes> diff --git a/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorOps.td b/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorOps.td index 042ae9693f486e..afbabb97eb71fc 100644 --- a/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorOps.td +++ b/mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorOps.td @@ -770,7 +770,7 @@ def SparseTensor_OutOp : SparseTensor_Op<"out", []>, } //===----------------------------------------------------------------------===// -// Sparse Tensor Sorting Operations. +// Sparse Tensor Sorting/Ordering Operations. //===----------------------------------------------------------------------===// def SparseTensor_SortOp : SparseTensor_Op<"sort">, @@ -809,6 +809,36 @@ def SparseTensor_SortOp : SparseTensor_Op<"sort">, let hasVerifier = 1; } +def SparseTensor_ReorderCOOOp : SparseTensor_Op<"reorder_coo", [Pure]>, + Arguments<(ins AnyCOOSparseTensor: $input_coo, + SparseTensorSortKindAttr:$algorithm)>, + Results<(outs AnyCOOSparseTensor: $result_coo)> { + let summary = "Reorder the input COO such that it has the the same order as " + "the output COO"; + let description = [{ + sparse_tensor.reorder_coo reorder input COO to the same order as specified by + the output format. E.g., reorder an unordered COO into an ordered one. + + The input and result COO tensor must have the same element type, position type and + coordinate type. At the moment, the operation also only supports ordering + input and result COO with the same dim2lvl map. + + Example: + + ```mlir + %res = sparse_tensor.reorder_coo quick_sort %coo : tensor to + tensor + + ``` + }]; + + let assemblyFormat = "$algorithm $input_coo attr-dict" + "`:` type($input_coo) `to` type($result_coo)"; + + let hasFolder = 1; + let hasVerifier = 1; +} + //===----------------------------------------------------------------------===// // Sparse Tensor Syntax Operations. //===----------------------------------------------------------------------===// diff --git a/mlir/lib/Dialect/SparseTensor/IR/SparseTensorDialect.cpp b/mlir/lib/Dialect/SparseTensor/IR/SparseTensorDialect.cpp index 5b84d2158bc828..ef9d4fea68628b 100644 --- a/mlir/lib/Dialect/SparseTensor/IR/SparseTensorDialect.cpp +++ b/mlir/lib/Dialect/SparseTensor/IR/SparseTensorDialect.cpp @@ -336,6 +336,10 @@ bool SparseTensorEncodingAttr::isAllDense() const { return !getImpl() || llvm::all_of(getLvlTypes(), isDenseDLT); } +bool SparseTensorEncodingAttr::isCOO() const { + return getImpl() && isCOOType(*this, 0, true); +} + bool SparseTensorEncodingAttr::isAllOrdered() const { return !getImpl() || llvm::all_of(getLvlTypes(), isOrderedDLT); } @@ -1417,6 +1421,29 @@ LogicalResult ForeachOp::verify() { return success(); } +OpFoldResult ReorderCOOOp::fold(FoldAdaptor adaptor) { + if (getSparseTensorEncoding(getInputCoo().getType()) == + getSparseTensorEncoding(getResultCoo().getType())) + return getInputCoo(); + + return {}; +} + +LogicalResult ReorderCOOOp::verify() { + SparseTensorType srcStt = getSparseTensorType(getInputCoo()); + SparseTensorType dstStt = getSparseTensorType(getResultCoo()); + + if (!srcStt.hasSameDimToLvl(dstStt)) + emitError("Unmatched dim2lvl map between input and result COO"); + + if (srcStt.getPosType() != dstStt.getPosType() || + srcStt.getCrdType() != dstStt.getCrdType() || + srcStt.getElementType() != dstStt.getElementType()) { + emitError("Unmatched storage format between input and result COO"); + } + return success(); +} + LogicalResult ReduceOp::verify() { Type inputType = getX().getType(); // Check correct number of block arguments and return type. diff --git a/mlir/test/Dialect/SparseTensor/fold.mlir b/mlir/test/Dialect/SparseTensor/fold.mlir index 089431f9e18e90..3dd1a629c129ff 100644 --- a/mlir/test/Dialect/SparseTensor/fold.mlir +++ b/mlir/test/Dialect/SparseTensor/fold.mlir @@ -62,3 +62,16 @@ func.func @sparse_get_specifier_dce_fold(%arg0: !sparse_tensor.storage_specifier : !sparse_tensor.storage_specifier<#SparseVector> return %2 : index } + + + +#COO = #sparse_tensor.encoding<{map = (d0, d1) -> (d0 : compressed(nonunique), d1 : singleton)}> + +// CHECK-LABEL: func @sparse_reorder_coo( +// CHECK-SAME: %[[A:.*]]: tensor> +// CHECK-NOT: %[[R:.*]] = sparse_tensor.reorder_coo +// CHECK: return %[[A]] +func.func @sparse_reorder_coo(%arg0 : tensor) -> tensor { + %ret = sparse_tensor.reorder_coo quick_sort %arg0 : tensor to tensor + return %ret : tensor +} diff --git a/mlir/test/Dialect/SparseTensor/invalid.mlir b/mlir/test/Dialect/SparseTensor/invalid.mlir index 2df4237efa0bbe..805f3d161921c1 100644 --- a/mlir/test/Dialect/SparseTensor/invalid.mlir +++ b/mlir/test/Dialect/SparseTensor/invalid.mlir @@ -839,3 +839,25 @@ func.func @sparse_alloc_escapes(%arg0: index) -> tensor<10x?xf64, #CSR> { %0 = bufferization.alloc_tensor(%arg0) : tensor<10x?xf64, #CSR> return %0: tensor<10x?xf64, #CSR> } + +// ----- + +#UnorderedCOO = #sparse_tensor.encoding<{map = (d0, d1) -> (d0 : compressed(nonunique, nonordered), d1 : singleton(nonordered))}> +#OrderedCOOPerm = #sparse_tensor.encoding<{map = (d0, d1) -> (d1 : compressed(nonunique), d0 : singleton)}> + +func.func @sparse_permuted_reorder_coo(%arg0 : tensor) -> tensor { + // expected-error@+1 {{Unmatched dim2lvl map between input and result COO}} + %ret = sparse_tensor.reorder_coo quick_sort %arg0 : tensor to tensor + return %ret : tensor +} + +// ----- + +#UnorderedCOO = #sparse_tensor.encoding<{map = (d0, d1) -> (d0 : compressed(nonunique, nonordered), d1 : singleton(nonordered))}> +#OrderedCOO = #sparse_tensor.encoding<{map = (d0, d1) -> (d0 : compressed(nonunique), d1 : singleton)}> + +func.func @sparse_permuted_reorder_coo(%arg0 : tensor) -> tensor { + // expected-error@+1 {{Unmatched storage format between input and result COO}} + %ret = sparse_tensor.reorder_coo quick_sort %arg0 : tensor to tensor + return %ret : tensor +} diff --git a/mlir/test/Dialect/SparseTensor/roundtrip.mlir b/mlir/test/Dialect/SparseTensor/roundtrip.mlir index 82267be34b9384..cbc3bb824924cd 100644 --- a/mlir/test/Dialect/SparseTensor/roundtrip.mlir +++ b/mlir/test/Dialect/SparseTensor/roundtrip.mlir @@ -633,3 +633,17 @@ func.func @sparse_sort_coo_stable(%arg0: index, %arg1: memref, %arg2: mem sparse_tensor.sort insertion_sort_stable %arg0, %arg1 jointly %arg2 {perm_map = #ID_MAP, ny = 1 : index}: memref jointly memref return %arg1, %arg2 : memref, memref } + +// ----- + +#UnorderedCOO = #sparse_tensor.encoding<{map = (d0, d1) -> (d0 : compressed(nonunique, nonordered), d1 : singleton(nonordered))}> +#OrderedCOO = #sparse_tensor.encoding<{map = (d0, d1) -> (d0 : compressed(nonunique), d1 : singleton)}> + +// CHECK-LABEL: func @sparse_reorder_coo( +// CHECK-SAME: %[[A:.*]]: tensor> +// CHECK: %[[R:.*]] = sparse_tensor.reorder_coo quick_sort %[[A]] +// CHECK: return %[[R]] +func.func @sparse_reorder_coo(%arg0 : tensor) -> tensor { + %ret = sparse_tensor.reorder_coo quick_sort %arg0 : tensor to tensor + return %ret : tensor +}