Welcome to mirror list, hosted at ThFree Co, Russian Federation.

sparse_reshape.mlir « SparseTensor « Dialect « test « mlir - github.com/llvm/llvm-project.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
blob: 65eb56b9bac37700fa45ae7212e7f467abcaf8fb (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
// RUN: mlir-opt %s | mlir-opt | FileCheck %s --check-prefix=CHECK-ROUND
// RUN: mlir-opt %s --sparse-tensor-conversion --cse | FileCheck %s --check-prefix=CHECK-CONV

#SparseVector = #sparse_tensor.encoding<{ dimLevelType = [ "compressed" ] }>
#SparseMatrix = #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>

//
// roundtrip:
//
// CHECK-ROUND-LABEL: func.func @sparse_expand(
// CHECK-ROUND-SAME:  %[[A:.*]]: tensor<100xf64, #sparse_tensor.encoding<{{{.*}}}>>) -> tensor<10x10xf64, #sparse_tensor.encoding<{{{.*}}}>>
//      CHECK-ROUND:  %[[E:.*]] = tensor.expand_shape %[[A]] {{\[\[}}0, 1]] : tensor<100xf64, #sparse_tensor.encoding<{{{.*}}}>> into tensor<10x10xf64, #sparse_tensor.encoding<{{{.*}}}>>
//      CHECK-ROUND:  return %[[E]] : tensor<10x10xf64, #sparse_tensor.encoding<{{{.*}}}>>
//
// conversion:
//
// CHECK-CONV-LABEL: func.func @sparse_expand(
// CHECK-CONV-DAG:  %[[C0:.*]] = arith.constant 0 : index
// CHECK-CONV-DAG:  %[[C1:.*]] = arith.constant 1 : index
// CHECK-CONV-DAG:  %[[C10:.*]] = arith.constant 10 : index
// CHECK-CONV-DAG:  call @newSparseTensor
// CHECK-CONV-DAG:  call @newSparseTensor
// CHECK-CONV:      scf.while : () -> () {
// CHECK-CONV:        call @getNextF64
// CHECK-CONV:        scf.condition(%13)
// CHECK-CONV:      } do {
// CHECK-CONV:        %[[X:.*]] = memref.load %{{.*}}[%[[C0]]] : memref<?xindex>
// CHECK-CONV:        %[[D:.*]] = arith.divui %[[X]], %[[C10]] : index
// CHECK-CONV:        memref.store %[[D]], %{{.*}}[%[[C0]]] : memref<?xindex>
// CHECK-CONV:        %[[R:.*]] = arith.remui %[[X]], %[[C10]] : index
// CHECK-CONV:        memref.store %[[R]], %{{.*}}[%[[C1]]] : memref<?xindex>
// CHECK-CONV:        call @addEltF64
// CHECK-CONV:        scf.yield
// CHECK-CONV:      }
// CHECK-CONV:      %[[N:.*]] = call @newSparseTensor
// CHECK-CONV:      call @delSparseTensorCOOF64
// CHECK-CONV:      call @delSparseTensorCOOF64
// CHECK-CONV:      return %[[N]] : !llvm.ptr<i8>
//
func.func @sparse_expand(%arg0: tensor<100xf64, #SparseVector>) -> tensor<10x10xf64, #SparseMatrix> {
  %0 = tensor.expand_shape %arg0 [[0, 1]] :
    tensor<100xf64, #SparseVector> into tensor<10x10xf64, #SparseMatrix>
  return %0 : tensor<10x10xf64, #SparseMatrix>
}

//
// roundtrip:
//
// CHECK-ROUND-LABEL: func.func @sparse_collapse(
// CHECK-ROUND-SAME:  %[[A:.*]]: tensor<10x10xf64, #sparse_tensor.encoding<{{{.*}}}>>) -> tensor<100xf64, #sparse_tensor.encoding<{{{.*}}}>>
//      CHECK-ROUND:  %[[C:.*]] = tensor.collapse_shape %[[A]] {{\[\[}}0, 1]] : tensor<10x10xf64, #sparse_tensor.encoding<{{{.*}}}>> into tensor<100xf64, #sparse_tensor.encoding<{{{.*}}}>>
//      CHECK-ROUND:  return %[[C]] : tensor<100xf64, #sparse_tensor.encoding<{{{.*}}}>>
//
// conversion:
//
// CHECK-CONV-LABEL: func.func @sparse_collapse(
// CHECK-CONV-DAG:  %[[C0:.*]] = arith.constant 0 : index
// CHECK-CONV-DAG:  %[[C1:.*]] = arith.constant 1 : index
// CHECK-CONV-DAG:  %[[C10:.*]] = arith.constant 10 : index
// CHECK-CONV-DAG:  call @newSparseTensor
// CHECK-CONV-DAG:  call @newSparseTensor
// CHECK-CONV:      scf.while : () -> () {
// CHECK-CONV:        call @getNextF64
// CHECK-CONV:        scf.condition(%13)
// CHECK-CONV:      } do {
// CHECK-CONV:        %[[X:.*]] = memref.load %{{.*}}[%[[C0]]] : memref<?xindex>
// CHECK-CONV:        %[[M:.*]] = arith.muli %[[X]], %[[C10]] : index
// CHECK-CONV:        %[[Y:.*]] = memref.load %{{.*}}[%[[C1]]] : memref<?xindex>
// CHECK-CONV:        %[[A:.*]] = arith.addi %[[M]], %[[Y]] : index
// CHECK-CONV:        memref.store %[[A]], %{{.*}}[%[[C0]]] : memref<?xindex>
// CHECK-CONV:        call @addEltF64
// CHECK-CONV:        scf.yield
// CHECK-CONV:      }
// CHECK-CONV:      %[[N:.*]] = call @newSparseTensor
// CHECK-CONV:      call @delSparseTensorCOOF64
// CHECK-CONV:      call @delSparseTensorCOOF64
// CHECK-CONV:      return %[[N]] : !llvm.ptr<i8>
//
func.func @sparse_collapse(%arg0: tensor<10x10xf64, #SparseMatrix>) -> tensor<100xf64, #SparseVector> {
  %0 = tensor.collapse_shape %arg0 [[0, 1]] :
    tensor<10x10xf64, #SparseMatrix> into tensor<100xf64, #SparseVector>
  return %0 : tensor<100xf64, #SparseVector>
}