diff options
author | Aart Bik <ajcbik@google.com> | 2022-07-09 07:12:25 +0300 |
---|---|---|
committer | Aart Bik <ajcbik@google.com> | 2022-07-12 00:49:06 +0300 |
commit | faa00c131351725d8db74bac6a06459430344455 (patch) | |
tree | 57de9430a994249be45f128717bd3915692f23ba /mlir/test/Dialect/SparseTensor | |
parent | 0ed8d8209584daa5ff30aae51b5396d05d7aa997 (diff) |
[mlir][sparse] implement sparse2sparse reshaping (expand/collapse)
A previous revision implemented expand/collapse reshaping between
dense and sparse tensors for sparse2dense and dense2sparse since those
could use the "cheap" view reshape on the already materialized
dense tensor (at either the input or output side), and do some
reshuffling from or to sparse. The dense2dense case, as always,
is handled with a "cheap" view change.
This revision implements the sparse2sparse cases. Lacking any "view"
support on sparse tensors this operation necessarily has to perform
data reshuffling on both ends.
Tracker for improving this:
https://github.com/llvm/llvm-project/issues/56477
Reviewed By: bixia
Differential Revision: https://reviews.llvm.org/D129416
Diffstat (limited to 'mlir/test/Dialect/SparseTensor')
-rwxr-xr-x[-rw-r--r--] | mlir/test/Dialect/SparseTensor/rewriting.mlir | 16 | ||||
-rw-r--r-- | mlir/test/Dialect/SparseTensor/sparse_reshape.mlir | 79 |
2 files changed, 82 insertions, 13 deletions
diff --git a/mlir/test/Dialect/SparseTensor/rewriting.mlir b/mlir/test/Dialect/SparseTensor/rewriting.mlir index 3955310fce9b..000c3560f1e0 100644..100755 --- a/mlir/test/Dialect/SparseTensor/rewriting.mlir +++ b/mlir/test/Dialect/SparseTensor/rewriting.mlir @@ -40,8 +40,14 @@ func.func @expand_to_sparse(%arg0: tensor<12xf64>) -> tensor<3x4xf64, #SparseMat return %0 : tensor<3x4xf64, #SparseMatrix> } -// TODO: make this work +// +// Not rewritten, needs conversion. +// // CHECK-LABEL: func.func @expand_sparse2sparse( +// CHECK-SAME: %[[A:.*]]: tensor<12xf64, #sparse_tensor.encoding<{{{.*}}}>>) -> tensor<3x4xf64, #sparse_tensor.encoding<{{{.*}}}>> { +// CHECK: %[[E:.*]] = tensor.expand_shape %[[A]] {{.*}} : tensor<12xf64, #sparse_tensor.encoding<{{{.*}}}>> into tensor<3x4xf64, #sparse_tensor.encoding<{{{.*}}}>> +// CHECK: return %[[E]] : tensor<3x4xf64, #sparse_tensor.encoding<{{{.*}}}>> +// CHECK: } func.func @expand_sparse2sparse(%arg0: tensor<12xf64, #SparseVector>) -> tensor<3x4xf64, #SparseMatrix> { %0 = tensor.expand_shape %arg0 [[0, 1]] : tensor<12xf64, #SparseVector> into tensor<3x4xf64, #SparseMatrix> return %0 : tensor<3x4xf64, #SparseMatrix> @@ -79,8 +85,14 @@ func.func @collapse_to_sparse(%arg0: tensor<3x4xf64>) -> tensor<12xf64, #SparseV return %0 : tensor<12xf64, #SparseVector> } -// TODO: make this work +// +// Not rewritten, needs conversion. +// // CHECK-LABEL: func.func @collapse_sparse2sparse( +// CHECK-SAME: %[[A:.*]]: tensor<3x4xf64, #sparse_tensor.encoding<{{{.*}}}>>) -> tensor<12xf64, #sparse_tensor.encoding<{{{.*}}}>> { +// CHECK: %[[C:.*]] = tensor.collapse_shape %[[A]] {{.*}} : tensor<3x4xf64, #sparse_tensor.encoding<{{{.*}}}>> into tensor<12xf64, #sparse_tensor.encoding<{{{.*}}}>> +// CHECK: return %[[C]] : tensor<12xf64, #sparse_tensor.encoding<{{{.*}}}>> +// CHECK: } func.func @collapse_sparse2sparse(%arg0: tensor<3x4xf64, #SparseMatrix>) -> tensor<12xf64, #SparseVector> { %0 = tensor.collapse_shape %arg0 [[0, 1]] : tensor<3x4xf64, #SparseMatrix> into tensor<12xf64, #SparseVector> return %0 : tensor<12xf64, #SparseVector> diff --git a/mlir/test/Dialect/SparseTensor/sparse_reshape.mlir b/mlir/test/Dialect/SparseTensor/sparse_reshape.mlir index c791536e1519..65eb56b9bac3 100644 --- a/mlir/test/Dialect/SparseTensor/sparse_reshape.mlir +++ b/mlir/test/Dialect/SparseTensor/sparse_reshape.mlir @@ -1,24 +1,81 @@ -// RUN: mlir-opt %s | mlir-opt | FileCheck %s - -// TODO: check lowering to an actual implementation +// RUN: mlir-opt %s | mlir-opt | FileCheck %s --check-prefix=CHECK-ROUND +// RUN: mlir-opt %s --sparse-tensor-conversion --cse | FileCheck %s --check-prefix=CHECK-CONV #SparseVector = #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }> #SparseMatrix = #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }> -// CHECK-LABEL: func.func @sparse_expand( -// CHECK-SAME: %[[A:.*]]: tensor<100xf64, #sparse_tensor.encoding<{{{.*}}}>>) -> tensor<10x10xf64, #sparse_tensor.encoding<{{{.*}}}>> -// CHECK: %[[E:.*]] = tensor.expand_shape %[[A]] {{\[\[}}0, 1]] : tensor<100xf64, #sparse_tensor.encoding<{{{.*}}}>> into tensor<10x10xf64, #sparse_tensor.encoding<{{{.*}}}>> -// CHECK: return %[[E]] : tensor<10x10xf64, #sparse_tensor.encoding<{{{.*}}}>> +// +// roundtrip: +// +// CHECK-ROUND-LABEL: func.func @sparse_expand( +// CHECK-ROUND-SAME: %[[A:.*]]: tensor<100xf64, #sparse_tensor.encoding<{{{.*}}}>>) -> tensor<10x10xf64, #sparse_tensor.encoding<{{{.*}}}>> +// CHECK-ROUND: %[[E:.*]] = tensor.expand_shape %[[A]] {{\[\[}}0, 1]] : tensor<100xf64, #sparse_tensor.encoding<{{{.*}}}>> into tensor<10x10xf64, #sparse_tensor.encoding<{{{.*}}}>> +// CHECK-ROUND: return %[[E]] : tensor<10x10xf64, #sparse_tensor.encoding<{{{.*}}}>> +// +// conversion: +// +// CHECK-CONV-LABEL: func.func @sparse_expand( +// CHECK-CONV-DAG: %[[C0:.*]] = arith.constant 0 : index +// CHECK-CONV-DAG: %[[C1:.*]] = arith.constant 1 : index +// CHECK-CONV-DAG: %[[C10:.*]] = arith.constant 10 : index +// CHECK-CONV-DAG: call @newSparseTensor +// CHECK-CONV-DAG: call @newSparseTensor +// CHECK-CONV: scf.while : () -> () { +// CHECK-CONV: call @getNextF64 +// CHECK-CONV: scf.condition(%13) +// CHECK-CONV: } do { +// CHECK-CONV: %[[X:.*]] = memref.load %{{.*}}[%[[C0]]] : memref<?xindex> +// CHECK-CONV: %[[D:.*]] = arith.divui %[[X]], %[[C10]] : index +// CHECK-CONV: memref.store %[[D]], %{{.*}}[%[[C0]]] : memref<?xindex> +// CHECK-CONV: %[[R:.*]] = arith.remui %[[X]], %[[C10]] : index +// CHECK-CONV: memref.store %[[R]], %{{.*}}[%[[C1]]] : memref<?xindex> +// CHECK-CONV: call @addEltF64 +// CHECK-CONV: scf.yield +// CHECK-CONV: } +// CHECK-CONV: %[[N:.*]] = call @newSparseTensor +// CHECK-CONV: call @delSparseTensorCOOF64 +// CHECK-CONV: call @delSparseTensorCOOF64 +// CHECK-CONV: return %[[N]] : !llvm.ptr<i8> +// func.func @sparse_expand(%arg0: tensor<100xf64, #SparseVector>) -> tensor<10x10xf64, #SparseMatrix> { %0 = tensor.expand_shape %arg0 [[0, 1]] : tensor<100xf64, #SparseVector> into tensor<10x10xf64, #SparseMatrix> return %0 : tensor<10x10xf64, #SparseMatrix> } -// CHECK-LABEL: func.func @sparse_collapse( -// CHECK-SAME: %[[A:.*]]: tensor<10x10xf64, #sparse_tensor.encoding<{{{.*}}}>>) -> tensor<100xf64, #sparse_tensor.encoding<{{{.*}}}>> -// CHECK: %[[C:.*]] = tensor.collapse_shape %[[A]] {{\[\[}}0, 1]] : tensor<10x10xf64, #sparse_tensor.encoding<{{{.*}}}>> into tensor<100xf64, #sparse_tensor.encoding<{{{.*}}}>> -// CHECK: return %[[C]] : tensor<100xf64, #sparse_tensor.encoding<{{{.*}}}>> +// +// roundtrip: +// +// CHECK-ROUND-LABEL: func.func @sparse_collapse( +// CHECK-ROUND-SAME: %[[A:.*]]: tensor<10x10xf64, #sparse_tensor.encoding<{{{.*}}}>>) -> tensor<100xf64, #sparse_tensor.encoding<{{{.*}}}>> +// CHECK-ROUND: %[[C:.*]] = tensor.collapse_shape %[[A]] {{\[\[}}0, 1]] : tensor<10x10xf64, #sparse_tensor.encoding<{{{.*}}}>> into tensor<100xf64, #sparse_tensor.encoding<{{{.*}}}>> +// CHECK-ROUND: return %[[C]] : tensor<100xf64, #sparse_tensor.encoding<{{{.*}}}>> +// +// conversion: +// +// CHECK-CONV-LABEL: func.func @sparse_collapse( +// CHECK-CONV-DAG: %[[C0:.*]] = arith.constant 0 : index +// CHECK-CONV-DAG: %[[C1:.*]] = arith.constant 1 : index +// CHECK-CONV-DAG: %[[C10:.*]] = arith.constant 10 : index +// CHECK-CONV-DAG: call @newSparseTensor +// CHECK-CONV-DAG: call @newSparseTensor +// CHECK-CONV: scf.while : () -> () { +// CHECK-CONV: call @getNextF64 +// CHECK-CONV: scf.condition(%13) +// CHECK-CONV: } do { +// CHECK-CONV: %[[X:.*]] = memref.load %{{.*}}[%[[C0]]] : memref<?xindex> +// CHECK-CONV: %[[M:.*]] = arith.muli %[[X]], %[[C10]] : index +// CHECK-CONV: %[[Y:.*]] = memref.load %{{.*}}[%[[C1]]] : memref<?xindex> +// CHECK-CONV: %[[A:.*]] = arith.addi %[[M]], %[[Y]] : index +// CHECK-CONV: memref.store %[[A]], %{{.*}}[%[[C0]]] : memref<?xindex> +// CHECK-CONV: call @addEltF64 +// CHECK-CONV: scf.yield +// CHECK-CONV: } +// CHECK-CONV: %[[N:.*]] = call @newSparseTensor +// CHECK-CONV: call @delSparseTensorCOOF64 +// CHECK-CONV: call @delSparseTensorCOOF64 +// CHECK-CONV: return %[[N]] : !llvm.ptr<i8> +// func.func @sparse_collapse(%arg0: tensor<10x10xf64, #SparseMatrix>) -> tensor<100xf64, #SparseVector> { %0 = tensor.collapse_shape %arg0 [[0, 1]] : tensor<10x10xf64, #SparseMatrix> into tensor<100xf64, #SparseVector> |