Welcome to mirror list, hosted at ThFree Co, Russian Federation.

github.com/llvm/llvm-project.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
path: root/mlir/test
diff options
context:
space:
mode:
Diffstat (limited to 'mlir/test')
-rwxr-xr-x[-rw-r--r--]mlir/test/Dialect/SparseTensor/rewriting.mlir16
-rw-r--r--mlir/test/Dialect/SparseTensor/sparse_reshape.mlir79
-rwxr-xr-x[-rw-r--r--]mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_reshape.mlir30
3 files changed, 102 insertions, 23 deletions
diff --git a/mlir/test/Dialect/SparseTensor/rewriting.mlir b/mlir/test/Dialect/SparseTensor/rewriting.mlir
index 3955310fce9b..000c3560f1e0 100644..100755
--- a/mlir/test/Dialect/SparseTensor/rewriting.mlir
+++ b/mlir/test/Dialect/SparseTensor/rewriting.mlir
@@ -40,8 +40,14 @@ func.func @expand_to_sparse(%arg0: tensor<12xf64>) -> tensor<3x4xf64, #SparseMat
return %0 : tensor<3x4xf64, #SparseMatrix>
}
-// TODO: make this work
+//
+// Not rewritten, needs conversion.
+//
// CHECK-LABEL: func.func @expand_sparse2sparse(
+// CHECK-SAME: %[[A:.*]]: tensor<12xf64, #sparse_tensor.encoding<{{{.*}}}>>) -> tensor<3x4xf64, #sparse_tensor.encoding<{{{.*}}}>> {
+// CHECK: %[[E:.*]] = tensor.expand_shape %[[A]] {{.*}} : tensor<12xf64, #sparse_tensor.encoding<{{{.*}}}>> into tensor<3x4xf64, #sparse_tensor.encoding<{{{.*}}}>>
+// CHECK: return %[[E]] : tensor<3x4xf64, #sparse_tensor.encoding<{{{.*}}}>>
+// CHECK: }
func.func @expand_sparse2sparse(%arg0: tensor<12xf64, #SparseVector>) -> tensor<3x4xf64, #SparseMatrix> {
%0 = tensor.expand_shape %arg0 [[0, 1]] : tensor<12xf64, #SparseVector> into tensor<3x4xf64, #SparseMatrix>
return %0 : tensor<3x4xf64, #SparseMatrix>
@@ -79,8 +85,14 @@ func.func @collapse_to_sparse(%arg0: tensor<3x4xf64>) -> tensor<12xf64, #SparseV
return %0 : tensor<12xf64, #SparseVector>
}
-// TODO: make this work
+//
+// Not rewritten, needs conversion.
+//
// CHECK-LABEL: func.func @collapse_sparse2sparse(
+// CHECK-SAME: %[[A:.*]]: tensor<3x4xf64, #sparse_tensor.encoding<{{{.*}}}>>) -> tensor<12xf64, #sparse_tensor.encoding<{{{.*}}}>> {
+// CHECK: %[[C:.*]] = tensor.collapse_shape %[[A]] {{.*}} : tensor<3x4xf64, #sparse_tensor.encoding<{{{.*}}}>> into tensor<12xf64, #sparse_tensor.encoding<{{{.*}}}>>
+// CHECK: return %[[C]] : tensor<12xf64, #sparse_tensor.encoding<{{{.*}}}>>
+// CHECK: }
func.func @collapse_sparse2sparse(%arg0: tensor<3x4xf64, #SparseMatrix>) -> tensor<12xf64, #SparseVector> {
%0 = tensor.collapse_shape %arg0 [[0, 1]] : tensor<3x4xf64, #SparseMatrix> into tensor<12xf64, #SparseVector>
return %0 : tensor<12xf64, #SparseVector>
diff --git a/mlir/test/Dialect/SparseTensor/sparse_reshape.mlir b/mlir/test/Dialect/SparseTensor/sparse_reshape.mlir
index c791536e1519..65eb56b9bac3 100644
--- a/mlir/test/Dialect/SparseTensor/sparse_reshape.mlir
+++ b/mlir/test/Dialect/SparseTensor/sparse_reshape.mlir
@@ -1,24 +1,81 @@
-// RUN: mlir-opt %s | mlir-opt | FileCheck %s
-
-// TODO: check lowering to an actual implementation
+// RUN: mlir-opt %s | mlir-opt | FileCheck %s --check-prefix=CHECK-ROUND
+// RUN: mlir-opt %s --sparse-tensor-conversion --cse | FileCheck %s --check-prefix=CHECK-CONV
#SparseVector = #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>
#SparseMatrix = #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>
-// CHECK-LABEL: func.func @sparse_expand(
-// CHECK-SAME: %[[A:.*]]: tensor<100xf64, #sparse_tensor.encoding<{{{.*}}}>>) -> tensor<10x10xf64, #sparse_tensor.encoding<{{{.*}}}>>
-// CHECK: %[[E:.*]] = tensor.expand_shape %[[A]] {{\[\[}}0, 1]] : tensor<100xf64, #sparse_tensor.encoding<{{{.*}}}>> into tensor<10x10xf64, #sparse_tensor.encoding<{{{.*}}}>>
-// CHECK: return %[[E]] : tensor<10x10xf64, #sparse_tensor.encoding<{{{.*}}}>>
+//
+// roundtrip:
+//
+// CHECK-ROUND-LABEL: func.func @sparse_expand(
+// CHECK-ROUND-SAME: %[[A:.*]]: tensor<100xf64, #sparse_tensor.encoding<{{{.*}}}>>) -> tensor<10x10xf64, #sparse_tensor.encoding<{{{.*}}}>>
+// CHECK-ROUND: %[[E:.*]] = tensor.expand_shape %[[A]] {{\[\[}}0, 1]] : tensor<100xf64, #sparse_tensor.encoding<{{{.*}}}>> into tensor<10x10xf64, #sparse_tensor.encoding<{{{.*}}}>>
+// CHECK-ROUND: return %[[E]] : tensor<10x10xf64, #sparse_tensor.encoding<{{{.*}}}>>
+//
+// conversion:
+//
+// CHECK-CONV-LABEL: func.func @sparse_expand(
+// CHECK-CONV-DAG: %[[C0:.*]] = arith.constant 0 : index
+// CHECK-CONV-DAG: %[[C1:.*]] = arith.constant 1 : index
+// CHECK-CONV-DAG: %[[C10:.*]] = arith.constant 10 : index
+// CHECK-CONV-DAG: call @newSparseTensor
+// CHECK-CONV-DAG: call @newSparseTensor
+// CHECK-CONV: scf.while : () -> () {
+// CHECK-CONV: call @getNextF64
+// CHECK-CONV: scf.condition(%13)
+// CHECK-CONV: } do {
+// CHECK-CONV: %[[X:.*]] = memref.load %{{.*}}[%[[C0]]] : memref<?xindex>
+// CHECK-CONV: %[[D:.*]] = arith.divui %[[X]], %[[C10]] : index
+// CHECK-CONV: memref.store %[[D]], %{{.*}}[%[[C0]]] : memref<?xindex>
+// CHECK-CONV: %[[R:.*]] = arith.remui %[[X]], %[[C10]] : index
+// CHECK-CONV: memref.store %[[R]], %{{.*}}[%[[C1]]] : memref<?xindex>
+// CHECK-CONV: call @addEltF64
+// CHECK-CONV: scf.yield
+// CHECK-CONV: }
+// CHECK-CONV: %[[N:.*]] = call @newSparseTensor
+// CHECK-CONV: call @delSparseTensorCOOF64
+// CHECK-CONV: call @delSparseTensorCOOF64
+// CHECK-CONV: return %[[N]] : !llvm.ptr<i8>
+//
func.func @sparse_expand(%arg0: tensor<100xf64, #SparseVector>) -> tensor<10x10xf64, #SparseMatrix> {
%0 = tensor.expand_shape %arg0 [[0, 1]] :
tensor<100xf64, #SparseVector> into tensor<10x10xf64, #SparseMatrix>
return %0 : tensor<10x10xf64, #SparseMatrix>
}
-// CHECK-LABEL: func.func @sparse_collapse(
-// CHECK-SAME: %[[A:.*]]: tensor<10x10xf64, #sparse_tensor.encoding<{{{.*}}}>>) -> tensor<100xf64, #sparse_tensor.encoding<{{{.*}}}>>
-// CHECK: %[[C:.*]] = tensor.collapse_shape %[[A]] {{\[\[}}0, 1]] : tensor<10x10xf64, #sparse_tensor.encoding<{{{.*}}}>> into tensor<100xf64, #sparse_tensor.encoding<{{{.*}}}>>
-// CHECK: return %[[C]] : tensor<100xf64, #sparse_tensor.encoding<{{{.*}}}>>
+//
+// roundtrip:
+//
+// CHECK-ROUND-LABEL: func.func @sparse_collapse(
+// CHECK-ROUND-SAME: %[[A:.*]]: tensor<10x10xf64, #sparse_tensor.encoding<{{{.*}}}>>) -> tensor<100xf64, #sparse_tensor.encoding<{{{.*}}}>>
+// CHECK-ROUND: %[[C:.*]] = tensor.collapse_shape %[[A]] {{\[\[}}0, 1]] : tensor<10x10xf64, #sparse_tensor.encoding<{{{.*}}}>> into tensor<100xf64, #sparse_tensor.encoding<{{{.*}}}>>
+// CHECK-ROUND: return %[[C]] : tensor<100xf64, #sparse_tensor.encoding<{{{.*}}}>>
+//
+// conversion:
+//
+// CHECK-CONV-LABEL: func.func @sparse_collapse(
+// CHECK-CONV-DAG: %[[C0:.*]] = arith.constant 0 : index
+// CHECK-CONV-DAG: %[[C1:.*]] = arith.constant 1 : index
+// CHECK-CONV-DAG: %[[C10:.*]] = arith.constant 10 : index
+// CHECK-CONV-DAG: call @newSparseTensor
+// CHECK-CONV-DAG: call @newSparseTensor
+// CHECK-CONV: scf.while : () -> () {
+// CHECK-CONV: call @getNextF64
+// CHECK-CONV: scf.condition(%13)
+// CHECK-CONV: } do {
+// CHECK-CONV: %[[X:.*]] = memref.load %{{.*}}[%[[C0]]] : memref<?xindex>
+// CHECK-CONV: %[[M:.*]] = arith.muli %[[X]], %[[C10]] : index
+// CHECK-CONV: %[[Y:.*]] = memref.load %{{.*}}[%[[C1]]] : memref<?xindex>
+// CHECK-CONV: %[[A:.*]] = arith.addi %[[M]], %[[Y]] : index
+// CHECK-CONV: memref.store %[[A]], %{{.*}}[%[[C0]]] : memref<?xindex>
+// CHECK-CONV: call @addEltF64
+// CHECK-CONV: scf.yield
+// CHECK-CONV: }
+// CHECK-CONV: %[[N:.*]] = call @newSparseTensor
+// CHECK-CONV: call @delSparseTensorCOOF64
+// CHECK-CONV: call @delSparseTensorCOOF64
+// CHECK-CONV: return %[[N]] : !llvm.ptr<i8>
+//
func.func @sparse_collapse(%arg0: tensor<10x10xf64, #SparseMatrix>) -> tensor<100xf64, #SparseVector> {
%0 = tensor.collapse_shape %arg0 [[0, 1]] :
tensor<10x10xf64, #SparseMatrix> into tensor<100xf64, #SparseVector>
diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_reshape.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_reshape.mlir
index eed880866884..57d2a931fad5 100644..100755
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_reshape.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_reshape.mlir
@@ -32,11 +32,10 @@ module {
return %0 : tensor<3x4xf64, #SparseMatrix>
}
-// TODO: make this work
-// func.func @expand_sparse2sparse(%arg0: tensor<12xf64, #SparseVector>) -> tensor<3x4xf64, #SparseMatrix> {
-// %0 = tensor.expand_shape %arg0 [[0, 1]] : tensor<12xf64, #SparseVector> into tensor<3x4xf64, #SparseMatrix>
-// return %0 : tensor<3x4xf64, #SparseMatrix>
-// }
+ func.func @expand_sparse2sparse(%arg0: tensor<12xf64, #SparseVector>) -> tensor<3x4xf64, #SparseMatrix> {
+ %0 = tensor.expand_shape %arg0 [[0, 1]] : tensor<12xf64, #SparseVector> into tensor<3x4xf64, #SparseMatrix>
+ return %0 : tensor<3x4xf64, #SparseMatrix>
+ }
func.func @collapse_dense(%arg0: tensor<3x4xf64>) -> tensor<12xf64> {
%0 = tensor.collapse_shape %arg0 [[0, 1]] : tensor<3x4xf64> into tensor<12xf64>
@@ -53,11 +52,10 @@ module {
return %0 : tensor<12xf64, #SparseVector>
}
-// TODO: make this work
-// func.func @collapse_sparse2sparse(%arg0: tensor<3x4xf64, #SparseMatrix>) -> tensor<12xf64, #SparseVector> {
-// %0 = tensor.collapse_shape %arg0 [[0, 1]] : tensor<3x4xf64, #SparseMatrix> into tensor<12xf64, #SparseVector>
-// return %0 : tensor<12xf64, #SparseVector>
-// }
+ func.func @collapse_sparse2sparse(%arg0: tensor<3x4xf64, #SparseMatrix>) -> tensor<12xf64, #SparseVector> {
+ %0 = tensor.collapse_shape %arg0 [[0, 1]] : tensor<3x4xf64, #SparseMatrix> into tensor<12xf64, #SparseVector>
+ return %0 : tensor<12xf64, #SparseVector>
+ }
//
@@ -81,10 +79,12 @@ module {
%expand0 = call @expand_dense(%v) : (tensor<12xf64>) -> tensor<3x4xf64>
%expand1 = call @expand_from_sparse(%sv) : (tensor<12xf64, #SparseVector>) -> tensor<3x4xf64>
%expand2 = call @expand_to_sparse(%v) : (tensor<12xf64>) -> tensor<3x4xf64, #SparseMatrix>
+ %expand3 = call @expand_sparse2sparse(%sv) : (tensor<12xf64, #SparseVector>) -> tensor<3x4xf64, #SparseMatrix>
%collapse0 = call @collapse_dense(%m) : (tensor<3x4xf64>) -> tensor<12xf64>
%collapse1 = call @collapse_from_sparse(%sm) : (tensor<3x4xf64, #SparseMatrix>) -> tensor<12xf64>
%collapse2 = call @collapse_to_sparse(%m) : (tensor<3x4xf64>) -> tensor<12xf64, #SparseVector>
+ %collapse3 = call @collapse_sparse2sparse(%sm) : (tensor<3x4xf64, #SparseMatrix>) -> tensor<12xf64, #SparseVector>
//
// Verify result.
@@ -92,9 +92,11 @@ module {
// CHECK: ( ( 1, 2, 3, 4 ), ( 5, 6, 7, 8 ), ( 9, 10, 11, 12 ) )
// CHECK-NEXT: ( ( 1, 2, 3, 4 ), ( 5, 6, 7, 8 ), ( 9, 10, 11, 12 ) )
// CHECK-NEXT: ( 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, -1, -1, -1, -1 )
+ // CHECK-NEXT: ( 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, -1, -1, -1, -1 )
// CHECK-NEXT: ( 1.1, 1.2, 1.3, 1.4, 2.1, 2.2, 2.3, 2.4, 3.1, 3.2, 3.3, 3.4 )
// CHECK-NEXT: ( 1.1, 1.2, 1.3, 1.4, 2.1, 2.2, 2.3, 2.4, 3.1, 3.2, 3.3, 3.4 )
// CHECK-NEXT: ( 1.1, 1.2, 1.3, 1.4, 2.1, 2.2, 2.3, 2.4, 3.1, 3.2, 3.3, 3.4, -1, -1, -1, -1 )
+ // CHECK-NEXT: ( 1.1, 1.2, 1.3, 1.4, 2.1, 2.2, 2.3, 2.4, 3.1, 3.2, 3.3, 3.4, -1, -1, -1, -1 )
//
%m0 = vector.transfer_read %expand0[%c0, %c0], %df: tensor<3x4xf64>, vector<3x4xf64>
vector.print %m0 : vector<3x4xf64>
@@ -103,6 +105,9 @@ module {
%a2 = sparse_tensor.values %expand2 : tensor<3x4xf64, #SparseMatrix> to memref<?xf64>
%m2 = vector.transfer_read %a2[%c0], %df: memref<?xf64>, vector<16xf64>
vector.print %m2 : vector<16xf64>
+ %a3 = sparse_tensor.values %expand3 : tensor<3x4xf64, #SparseMatrix> to memref<?xf64>
+ %m3 = vector.transfer_read %a3[%c0], %df: memref<?xf64>, vector<16xf64>
+ vector.print %m3 : vector<16xf64>
%v0 = vector.transfer_read %collapse0[%c0], %df: tensor<12xf64>, vector<12xf64>
vector.print %v0 : vector<12xf64>
@@ -111,12 +116,17 @@ module {
%b2 = sparse_tensor.values %collapse2 : tensor<12xf64, #SparseVector> to memref<?xf64>
%v2 = vector.transfer_read %b2[%c0], %df: memref<?xf64>, vector<16xf64>
vector.print %v2 : vector<16xf64>
+ %b3 = sparse_tensor.values %collapse3 : tensor<12xf64, #SparseVector> to memref<?xf64>
+ %v3 = vector.transfer_read %b3[%c0], %df: memref<?xf64>, vector<16xf64>
+ vector.print %v3 : vector<16xf64>
// Release sparse resources.
sparse_tensor.release %sv : tensor<12xf64, #SparseVector>
sparse_tensor.release %sm : tensor<3x4xf64, #SparseMatrix>
sparse_tensor.release %expand2 : tensor<3x4xf64, #SparseMatrix>
+ sparse_tensor.release %expand3 : tensor<3x4xf64, #SparseMatrix>
sparse_tensor.release %collapse2 : tensor<12xf64, #SparseVector>
+ sparse_tensor.release %collapse3 : tensor<12xf64, #SparseVector>
// Release dense resources.
%meme1 = bufferization.to_memref %expand1 : memref<3x4xf64>