**COO**
`lvlTypes = [ "compressed_nu", "singleton" ]` to `map = (d0, d1) -> (d0
: compressed(nonunique), d1 : singleton)`
`lvlTypes = [ "compressed_nu_no", "singleton_no" ]` to `map = (d0, d1)
-> (d0 : compressed(nonunique, nonordered), d1 : singleton(nonordered))`
**SortedCOO**
`lvlTypes = [ "compressed_nu", "singleton" ]` to `map = (d0, d1) -> (d0
: compressed(nonunique), d1 : singleton)`
**BCOO**
`lvlTypes = [ "dense", "compressed_hi_nu", "singleton" ]` to `map = (d0,
d1, d2) -> (d0 : dense, d1 : compressed(nonunique, high), d2 :
singleton)`
**BCSR**
`lvlTypes = [ "compressed", "compressed", "dense", "dense" ], dimToLvl =
affine_map<(d0, d1) -> (d0 floordiv 2, d1 floordiv 3, d0 mod 2, d1 mod
3)>` to
`map = ( i, j ) ->
( i floordiv 2 : compressed,
j floordiv 3 : compressed,
i mod 2 : dense,
j mod 3 : dense
)`
**Tensor and other supported formats(e.g. CCC, CDC, CCCC)**
Currently, ELL and slice are not supported yet in the new syntax and the
CHECK tests will be updated once printing is set to output the new
syntax.
Previous PRs: #66146, #66309, #66443
74 lines
3.8 KiB
MLIR
74 lines
3.8 KiB
MLIR
// RUN: mlir-opt %s -post-sparsification-rewrite="enable-runtime-library=false enable-convert=false" | \
|
|
// RUN: FileCheck %s
|
|
|
|
#CSR = #sparse_tensor.encoding<{
|
|
map = (d0, d1) -> (d0 : dense, d1 : compressed)
|
|
}>
|
|
|
|
#CSC = #sparse_tensor.encoding<{
|
|
map = (d0, d1) -> (d1 : dense, d0 : compressed)
|
|
}>
|
|
|
|
#COO = #sparse_tensor.encoding<{
|
|
map = (d0, d1) -> (d0 : compressed(nonunique), d1 : singleton)
|
|
}>
|
|
|
|
// CHECK-LABEL: func.func @sparse_new(
|
|
// CHECK-SAME: %[[A:.*]]: !llvm.ptr<i8>) -> tensor<?x?xf32, #sparse_tensor.encoding<{ lvlTypes = [ "dense", "compressed" ] }>> {
|
|
// CHECK: %[[COO:.*]] = sparse_tensor.new %[[A]] : !llvm.ptr<i8> to tensor<?x?xf32, #sparse_tensor.encoding<{ lvlTypes = [ "compressed_nu", "singleton" ] }>>
|
|
// CHECK: %[[R:.*]] = sparse_tensor.convert %[[COO]]
|
|
// CHECK: bufferization.dealloc_tensor %[[COO]]
|
|
// CHECK: return %[[R]]
|
|
func.func @sparse_new(%arg0: !llvm.ptr<i8>) -> tensor<?x?xf32, #CSR> {
|
|
%0 = sparse_tensor.new %arg0 : !llvm.ptr<i8> to tensor<?x?xf32, #CSR>
|
|
return %0 : tensor<?x?xf32, #CSR>
|
|
}
|
|
|
|
// CHECK-LABEL: func.func @sparse_new_csc(
|
|
// CHECK-SAME: %[[A:.*]]: !llvm.ptr<i8>) -> tensor<?x?xf32, #sparse_tensor.encoding<{ lvlTypes = [ "dense", "compressed" ], dimToLvl = affine_map<(d0, d1) -> (d1, d0)> }>> {
|
|
// CHECK: %[[COO:.*]] = sparse_tensor.new %[[A]] : !llvm.ptr<i8> to tensor<?x?xf32, #sparse_tensor.encoding<{ lvlTypes = [ "compressed_nu", "singleton" ], dimToLvl = affine_map<(d0, d1) -> (d1, d0)> }>>
|
|
// CHECK: %[[R:.*]] = sparse_tensor.convert %[[COO]]
|
|
// CHECK: bufferization.dealloc_tensor %[[COO]]
|
|
// CHECK: return %[[R]]
|
|
func.func @sparse_new_csc(%arg0: !llvm.ptr<i8>) -> tensor<?x?xf32, #CSC> {
|
|
%0 = sparse_tensor.new %arg0 : !llvm.ptr<i8> to tensor<?x?xf32, #CSC>
|
|
return %0 : tensor<?x?xf32, #CSC>
|
|
}
|
|
|
|
// CHECK-LABEL: func.func @sparse_new_coo(
|
|
// CHECK-SAME: %[[A:.*]]: !llvm.ptr<i8>) -> tensor<?x?xf32, #sparse_tensor.encoding<{ lvlTypes = [ "compressed_nu", "singleton" ] }>> {
|
|
// CHECK: %[[COO:.*]] = sparse_tensor.new %[[A]] : !llvm.ptr<i8> to tensor<?x?xf32, #sparse_tensor.encoding<{ lvlTypes = [ "compressed_nu", "singleton" ] }>>
|
|
// CHECK: return %[[COO]]
|
|
func.func @sparse_new_coo(%arg0: !llvm.ptr<i8>) -> tensor<?x?xf32, #COO> {
|
|
%0 = sparse_tensor.new %arg0 : !llvm.ptr<i8> to tensor<?x?xf32, #COO>
|
|
return %0 : tensor<?x?xf32, #COO>
|
|
}
|
|
|
|
// CHECK-LABEL: func.func @sparse_out(
|
|
// CHECK-SAME: %[[A:.*]]: tensor<10x20xf32, #sparse_tensor.encoding<{ lvlTypes = [ "dense", "compressed" ] }>>,
|
|
// CHECK-SAME: %[[B:.*]]: !llvm.ptr<i8>) {
|
|
// CHECK-DAG: %[[C0:.*]] = arith.constant 0 : index
|
|
// CHECK-DAG: %[[C1:.*]] = arith.constant 1 : index
|
|
// CHECK-DAG: %[[C2:.*]] = arith.constant 2 : index
|
|
// CHECK-DAG: %[[C10:.*]] = arith.constant 10 : index
|
|
// CHECK-DAG: %[[C20:.*]] = arith.constant 20 : index
|
|
// CHECK: %[[NNZ:.*]] = sparse_tensor.number_of_entries %[[A]]
|
|
// CHECK: %[[DS:.*]] = memref.alloca(%[[C2]]) : memref<?xindex>
|
|
// CHECK: memref.store %[[C10]], %[[DS]]{{\[}}%[[C0]]] : memref<?xindex>
|
|
// CHECK: memref.store %[[C20]], %[[DS]]{{\[}}%[[C1]]] : memref<?xindex>
|
|
// CHECK: %[[W:.*]] = call @createSparseTensorWriter(%[[B]])
|
|
// CHECK: call @outSparseTensorWriterMetaData(%[[W]], %[[C2]], %[[NNZ]], %[[DS]])
|
|
// CHECK: %[[V:.*]] = memref.alloca() : memref<f32>
|
|
// CHECK: scf.for %{{.*}} = %[[C0]] to %[[C10]] step %[[C1]] {
|
|
// CHECK: scf.for {{.*}} {
|
|
// CHECK: func.call @outSparseTensorWriterNextF32(%[[W]], %[[C2]], %[[DS]], %[[V]])
|
|
// CHECK: }
|
|
// CHECK: }
|
|
// CHECK: call @delSparseTensorWriter(%[[W]])
|
|
// CHECK: return
|
|
// CHECK: }
|
|
func.func @sparse_out( %arg0: tensor<10x20xf32, #CSR>, %arg1: !llvm.ptr<i8>) -> () {
|
|
sparse_tensor.out %arg0, %arg1 : tensor<10x20xf32, #CSR>, !llvm.ptr<i8>
|
|
return
|
|
}
|