Welcome to mirror list, hosted at ThFree Co, Russian Federation.

github.com/llvm/llvm-project.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAart Bik <ajcbik@google.com>2022-07-09 07:12:25 +0300
committerAart Bik <ajcbik@google.com>2022-07-12 00:49:06 +0300
commitfaa00c131351725d8db74bac6a06459430344455 (patch)
tree57de9430a994249be45f128717bd3915692f23ba /mlir/test/Dialect/SparseTensor/sparse_reshape.mlir
parent0ed8d8209584daa5ff30aae51b5396d05d7aa997 (diff)
[mlir][sparse] implement sparse2sparse reshaping (expand/collapse)
A previous revision implemented expand/collapse reshaping between dense and sparse tensors for sparse2dense and dense2sparse since those could use the "cheap" view reshape on the already materialized dense tensor (at either the input or output side), and do some reshuffling from or to sparse. The dense2dense case, as always, is handled with a "cheap" view change. This revision implements the sparse2sparse cases. Lacking any "view" support on sparse tensors this operation necessarily has to perform data reshuffling on both ends. Tracker for improving this: https://github.com/llvm/llvm-project/issues/56477 Reviewed By: bixia Differential Revision: https://reviews.llvm.org/D129416
Diffstat (limited to 'mlir/test/Dialect/SparseTensor/sparse_reshape.mlir')
-rw-r--r--mlir/test/Dialect/SparseTensor/sparse_reshape.mlir79
1 files changed, 68 insertions, 11 deletions
diff --git a/mlir/test/Dialect/SparseTensor/sparse_reshape.mlir b/mlir/test/Dialect/SparseTensor/sparse_reshape.mlir
index c791536e1519..65eb56b9bac3 100644
--- a/mlir/test/Dialect/SparseTensor/sparse_reshape.mlir
+++ b/mlir/test/Dialect/SparseTensor/sparse_reshape.mlir
@@ -1,24 +1,81 @@
-// RUN: mlir-opt %s | mlir-opt | FileCheck %s
-
-// TODO: check lowering to an actual implementation
+// RUN: mlir-opt %s | mlir-opt | FileCheck %s --check-prefix=CHECK-ROUND
+// RUN: mlir-opt %s --sparse-tensor-conversion --cse | FileCheck %s --check-prefix=CHECK-CONV
#SparseVector = #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>
#SparseMatrix = #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>
-// CHECK-LABEL: func.func @sparse_expand(
-// CHECK-SAME: %[[A:.*]]: tensor<100xf64, #sparse_tensor.encoding<{{{.*}}}>>) -> tensor<10x10xf64, #sparse_tensor.encoding<{{{.*}}}>>
-// CHECK: %[[E:.*]] = tensor.expand_shape %[[A]] {{\[\[}}0, 1]] : tensor<100xf64, #sparse_tensor.encoding<{{{.*}}}>> into tensor<10x10xf64, #sparse_tensor.encoding<{{{.*}}}>>
-// CHECK: return %[[E]] : tensor<10x10xf64, #sparse_tensor.encoding<{{{.*}}}>>
+//
+// roundtrip:
+//
+// CHECK-ROUND-LABEL: func.func @sparse_expand(
+// CHECK-ROUND-SAME: %[[A:.*]]: tensor<100xf64, #sparse_tensor.encoding<{{{.*}}}>>) -> tensor<10x10xf64, #sparse_tensor.encoding<{{{.*}}}>>
+// CHECK-ROUND: %[[E:.*]] = tensor.expand_shape %[[A]] {{\[\[}}0, 1]] : tensor<100xf64, #sparse_tensor.encoding<{{{.*}}}>> into tensor<10x10xf64, #sparse_tensor.encoding<{{{.*}}}>>
+// CHECK-ROUND: return %[[E]] : tensor<10x10xf64, #sparse_tensor.encoding<{{{.*}}}>>
+//
+// conversion:
+//
+// CHECK-CONV-LABEL: func.func @sparse_expand(
+// CHECK-CONV-DAG: %[[C0:.*]] = arith.constant 0 : index
+// CHECK-CONV-DAG: %[[C1:.*]] = arith.constant 1 : index
+// CHECK-CONV-DAG: %[[C10:.*]] = arith.constant 10 : index
+// CHECK-CONV-DAG: call @newSparseTensor
+// CHECK-CONV-DAG: call @newSparseTensor
+// CHECK-CONV: scf.while : () -> () {
+// CHECK-CONV: call @getNextF64
+// CHECK-CONV: scf.condition(%13)
+// CHECK-CONV: } do {
+// CHECK-CONV: %[[X:.*]] = memref.load %{{.*}}[%[[C0]]] : memref<?xindex>
+// CHECK-CONV: %[[D:.*]] = arith.divui %[[X]], %[[C10]] : index
+// CHECK-CONV: memref.store %[[D]], %{{.*}}[%[[C0]]] : memref<?xindex>
+// CHECK-CONV: %[[R:.*]] = arith.remui %[[X]], %[[C10]] : index
+// CHECK-CONV: memref.store %[[R]], %{{.*}}[%[[C1]]] : memref<?xindex>
+// CHECK-CONV: call @addEltF64
+// CHECK-CONV: scf.yield
+// CHECK-CONV: }
+// CHECK-CONV: %[[N:.*]] = call @newSparseTensor
+// CHECK-CONV: call @delSparseTensorCOOF64
+// CHECK-CONV: call @delSparseTensorCOOF64
+// CHECK-CONV: return %[[N]] : !llvm.ptr<i8>
+//
func.func @sparse_expand(%arg0: tensor<100xf64, #SparseVector>) -> tensor<10x10xf64, #SparseMatrix> {
%0 = tensor.expand_shape %arg0 [[0, 1]] :
tensor<100xf64, #SparseVector> into tensor<10x10xf64, #SparseMatrix>
return %0 : tensor<10x10xf64, #SparseMatrix>
}
-// CHECK-LABEL: func.func @sparse_collapse(
-// CHECK-SAME: %[[A:.*]]: tensor<10x10xf64, #sparse_tensor.encoding<{{{.*}}}>>) -> tensor<100xf64, #sparse_tensor.encoding<{{{.*}}}>>
-// CHECK: %[[C:.*]] = tensor.collapse_shape %[[A]] {{\[\[}}0, 1]] : tensor<10x10xf64, #sparse_tensor.encoding<{{{.*}}}>> into tensor<100xf64, #sparse_tensor.encoding<{{{.*}}}>>
-// CHECK: return %[[C]] : tensor<100xf64, #sparse_tensor.encoding<{{{.*}}}>>
+//
+// roundtrip:
+//
+// CHECK-ROUND-LABEL: func.func @sparse_collapse(
+// CHECK-ROUND-SAME: %[[A:.*]]: tensor<10x10xf64, #sparse_tensor.encoding<{{{.*}}}>>) -> tensor<100xf64, #sparse_tensor.encoding<{{{.*}}}>>
+// CHECK-ROUND: %[[C:.*]] = tensor.collapse_shape %[[A]] {{\[\[}}0, 1]] : tensor<10x10xf64, #sparse_tensor.encoding<{{{.*}}}>> into tensor<100xf64, #sparse_tensor.encoding<{{{.*}}}>>
+// CHECK-ROUND: return %[[C]] : tensor<100xf64, #sparse_tensor.encoding<{{{.*}}}>>
+//
+// conversion:
+//
+// CHECK-CONV-LABEL: func.func @sparse_collapse(
+// CHECK-CONV-DAG: %[[C0:.*]] = arith.constant 0 : index
+// CHECK-CONV-DAG: %[[C1:.*]] = arith.constant 1 : index
+// CHECK-CONV-DAG: %[[C10:.*]] = arith.constant 10 : index
+// CHECK-CONV-DAG: call @newSparseTensor
+// CHECK-CONV-DAG: call @newSparseTensor
+// CHECK-CONV: scf.while : () -> () {
+// CHECK-CONV: call @getNextF64
+// CHECK-CONV: scf.condition(%13)
+// CHECK-CONV: } do {
+// CHECK-CONV: %[[X:.*]] = memref.load %{{.*}}[%[[C0]]] : memref<?xindex>
+// CHECK-CONV: %[[M:.*]] = arith.muli %[[X]], %[[C10]] : index
+// CHECK-CONV: %[[Y:.*]] = memref.load %{{.*}}[%[[C1]]] : memref<?xindex>
+// CHECK-CONV: %[[A:.*]] = arith.addi %[[M]], %[[Y]] : index
+// CHECK-CONV: memref.store %[[A]], %{{.*}}[%[[C0]]] : memref<?xindex>
+// CHECK-CONV: call @addEltF64
+// CHECK-CONV: scf.yield
+// CHECK-CONV: }
+// CHECK-CONV: %[[N:.*]] = call @newSparseTensor
+// CHECK-CONV: call @delSparseTensorCOOF64
+// CHECK-CONV: call @delSparseTensorCOOF64
+// CHECK-CONV: return %[[N]] : !llvm.ptr<i8>
+//
func.func @sparse_collapse(%arg0: tensor<10x10xf64, #SparseMatrix>) -> tensor<100xf64, #SparseVector> {
%0 = tensor.collapse_shape %arg0 [[0, 1]] :
tensor<10x10xf64, #SparseMatrix> into tensor<100xf64, #SparseVector>