As described in issue llvm/llvm-project#91518, a previous PR llvm/llvm-project#78484 introduced the `defaultMemorySpaceFn` into bufferization options, allowing one to inform OneShotBufferize that it should use a specified function to derive the memory space attribute from the encoding attribute attached to tensor types. However, introducing this feature exposed unhandled edge cases, examples of which are introduced by this change in the new test under `test/Dialect/Bufferization/Transforms/one-shot-bufferize-encodings.mlir`. Fixing the inconsistencies introduced by `defaultMemorySpaceFn` is pretty simple. This change: - Updates the `bufferization.to_memref` and `bufferization.to_tensor` operations to explicitly include operand and destination types, whereas previously they relied on type inference to deduce the tensor types. Since the type inference cannot recover the correct tensor encoding/memory space, the operand and result types must be explicitly included. This is a small assembly format change, but it touches a large number of test files. - Makes minor updates to other bufferization functions to handle the changes in building the above ops. - Updates bufferization of `tensor.from_elements` to handle memory space. Integration/upgrade guide: In downstream projects, if you have tests or MLIR files that explicitly use `bufferization.to_tensor` or `bufferization.to_memref`, then update them to the new assembly format as follows: ``` %1 = bufferization.to_memref %0 : memref<10xf32> %2 = bufferization.to_tensor %1 : memref<10xf32> ``` becomes ``` %1 = bufferization.to_memref %0 : tensor<10xf32> to memref<10xf32> %2 = bufferization.to_tensor %0 : memref<10xf32> to tensor<10xf32> ```
101 lines
7.2 KiB
MLIR
101 lines
7.2 KiB
MLIR
// NOTE: Assertions have been autogenerated by utils/generate-test-checks.py
|
|
// RUN: mlir-opt %s --sparse-reinterpret-map -sparsification | FileCheck %s
|
|
|
|
// Example with cyclic iteration graph with sparse and dense constraints,
|
|
// but an acyclic iteration graph using sparse constraints only.
|
|
|
|
#SparseTensor = #sparse_tensor.encoding<{
|
|
map = (d0, d1, d2, d3,
|
|
d4, d5, d6, d7) -> (d0 : dense, d1 : dense, d2 : dense,
|
|
d3 : compressed, d4 : compressed, d5 : dense,
|
|
d6 : dense, d7 : dense)
|
|
}>
|
|
|
|
#trait_mul = {
|
|
indexing_maps = [
|
|
affine_map<(i,j,k,l,m,n,o,p) -> (i,j,k,l,m,n,o,p)>, // A
|
|
affine_map<(i,j,k,l,m,n,o,p) -> (p,o,n,m,l,k,j,i)>, // B
|
|
affine_map<(i,j,k,l,m,n,o,p) -> (i,j,k,l,m,n,o,p)> // X
|
|
],
|
|
iterator_types = ["parallel", "parallel", "parallel", "parallel",
|
|
"parallel", "parallel", "parallel", "parallel"],
|
|
doc = "X(i,j,k,l,m,n,o,p) = A(i,j,k,l,m,n,o,p) * B(p,o,n,m,l,k,j,i)"
|
|
}
|
|
|
|
// CHECK-LABEL: func @mul(
|
|
// CHECK-SAME: %[[VAL_0:.*]]: tensor<10x20x30x40x50x60x70x80xf32>,
|
|
// CHECK-SAME: %[[VAL_1:.*]]: tensor<80x70x60x50x40x30x20x10xf32, #sparse{{[0-9]*}}>,
|
|
// CHECK-SAME: %[[VAL_2:.*]]: tensor<10x20x30x40x50x60x70x80xf32>) -> tensor<10x20x30x40x50x60x70x80xf32> {
|
|
// CHECK-DAG: %[[ZERO:.*]] = arith.constant 0.000000e+00 : f32
|
|
// CHECK-DAG: %[[VAL_5:.*]] = arith.constant 10 : index
|
|
// CHECK-DAG: %[[VAL_6:.*]] = arith.constant 20 : index
|
|
// CHECK-DAG: %[[VAL_7:.*]] = arith.constant 30 : index
|
|
// CHECK-DAG: %[[VAL_8:.*]] = arith.constant 60 : index
|
|
// CHECK-DAG: %[[VAL_9:.*]] = arith.constant 70 : index
|
|
// CHECK-DAG: %[[VAL_10:.*]] = arith.constant 80 : index
|
|
// CHECK-DAG: %[[VAL_11:.*]] = arith.constant 0 : index
|
|
// CHECK-DAG: %[[VAL_12:.*]] = arith.constant 1 : index
|
|
// CHECK-DAG: %[[VAL_13:.*]] = bufferization.to_memref %[[VAL_0]] : tensor<10x20x30x40x50x60x70x80xf32> to memref<10x20x30x40x50x60x70x80xf32>
|
|
// CHECK-DAG: %[[VAL_14:.*]] = sparse_tensor.positions %[[VAL_1]] {level = 3 : index} : tensor<80x70x60x50x40x30x20x10xf32, #sparse{{[0-9]*}}> to memref<?xindex>
|
|
// CHECK-DAG: %[[VAL_15:.*]] = sparse_tensor.coordinates %[[VAL_1]] {level = 3 : index} : tensor<80x70x60x50x40x30x20x10xf32, #sparse{{[0-9]*}}> to memref<?xindex>
|
|
// CHECK-DAG: %[[VAL_16:.*]] = sparse_tensor.positions %[[VAL_1]] {level = 4 : index} : tensor<80x70x60x50x40x30x20x10xf32, #sparse{{[0-9]*}}> to memref<?xindex>
|
|
// CHECK-DAG: %[[VAL_17:.*]] = sparse_tensor.coordinates %[[VAL_1]] {level = 4 : index} : tensor<80x70x60x50x40x30x20x10xf32, #sparse{{[0-9]*}}> to memref<?xindex>
|
|
// CHECK-DAG: %[[VAL_18:.*]] = sparse_tensor.values %[[VAL_1]] : tensor<80x70x60x50x40x30x20x10xf32, #sparse{{[0-9]*}}> to memref<?xf32>
|
|
// CHECK-DAG: %[[VAL_20:.*]] = bufferization.to_memref %[[VAL_2]] : tensor<10x20x30x40x50x60x70x80xf32> to memref<10x20x30x40x50x60x70x80xf32>
|
|
// CHECK-DAG: linalg.fill ins(%[[ZERO]] : f32) outs(%[[VAL_20]] : memref<10x20x30x40x50x60x70x80xf32>
|
|
// CHECK: scf.for %[[VAL_21:.*]] = %[[VAL_11]] to %[[VAL_10]] step %[[VAL_12]] {
|
|
// CHECK: %[[VAL_23:.*]] = arith.muli %[[VAL_21]], %[[VAL_9]] : index
|
|
// CHECK: scf.for %[[VAL_22:.*]] = %[[VAL_11]] to %[[VAL_9]] step %[[VAL_12]] {
|
|
// CHECK: %[[VAL_24:.*]] = arith.addi %[[VAL_22]], %[[VAL_23]] : index
|
|
// CHECK: %[[VAL_26:.*]] = arith.muli %[[VAL_24]], %[[VAL_8]] : index
|
|
// CHECK: scf.for %[[VAL_25:.*]] = %[[VAL_11]] to %[[VAL_8]] step %[[VAL_12]] {
|
|
// CHECK: %[[VAL_27:.*]] = arith.addi %[[VAL_25]], %[[VAL_26]] : index
|
|
// CHECK: %[[VAL_28:.*]] = memref.load %[[VAL_14]]{{\[}}%[[VAL_27]]] : memref<?xindex>
|
|
// CHECK: %[[VAL_29:.*]] = arith.addi %[[VAL_27]], %[[VAL_12]] : index
|
|
// CHECK: %[[VAL_30:.*]] = memref.load %[[VAL_14]]{{\[}}%[[VAL_29]]] : memref<?xindex>
|
|
// CHECK: scf.for %[[VAL_31:.*]] = %[[VAL_28]] to %[[VAL_30]] step %[[VAL_12]] {
|
|
// CHECK: %[[VAL_32:.*]] = memref.load %[[VAL_15]]{{\[}}%[[VAL_31]]] : memref<?xindex>
|
|
// CHECK: %[[VAL_33:.*]] = memref.load %[[VAL_16]]{{\[}}%[[VAL_31]]] : memref<?xindex>
|
|
// CHECK: %[[VAL_34:.*]] = arith.addi %[[VAL_31]], %[[VAL_12]] : index
|
|
// CHECK: %[[VAL_35:.*]] = memref.load %[[VAL_16]]{{\[}}%[[VAL_34]]] : memref<?xindex>
|
|
// CHECK: scf.for %[[VAL_36:.*]] = %[[VAL_33]] to %[[VAL_35]] step %[[VAL_12]] {
|
|
// CHECK: %[[VAL_37:.*]] = memref.load %[[VAL_17]]{{\[}}%[[VAL_36]]] : memref<?xindex>
|
|
// CHECK: %[[VAL_39:.*]] = arith.muli %[[VAL_36]], %[[VAL_7]] : index
|
|
// CHECK: scf.for %[[VAL_38:.*]] = %[[VAL_11]] to %[[VAL_7]] step %[[VAL_12]] {
|
|
// CHECK: %[[VAL_40:.*]] = arith.addi %[[VAL_38]], %[[VAL_39]] : index
|
|
// CHECK: %[[VAL_42:.*]] = arith.muli %[[VAL_40]], %[[VAL_6]] : index
|
|
// CHECK: scf.for %[[VAL_41:.*]] = %[[VAL_11]] to %[[VAL_6]] step %[[VAL_12]] {
|
|
// CHECK: %[[VAL_43:.*]] = arith.addi %[[VAL_41]], %[[VAL_42]] : index
|
|
// CHECK: %[[VAL_45:.*]] = arith.muli %[[VAL_43]], %[[VAL_5]] : index
|
|
// CHECK: scf.for %[[VAL_44:.*]] = %[[VAL_11]] to %[[VAL_5]] step %[[VAL_12]] {
|
|
// CHECK: %[[VAL_46:.*]] = arith.addi %[[VAL_44]], %[[VAL_45]] : index
|
|
// CHECK: %[[VAL_47:.*]] = memref.load %[[VAL_13]]{{\[}}%[[VAL_44]], %[[VAL_41]], %[[VAL_38]], %[[VAL_37]], %[[VAL_32]], %[[VAL_25]], %[[VAL_22]], %[[VAL_21]]] : memref<10x20x30x40x50x60x70x80xf32>
|
|
// CHECK: %[[VAL_48:.*]] = memref.load %[[VAL_18]]{{\[}}%[[VAL_46]]] : memref<?xf32>
|
|
// CHECK: %[[VAL_49:.*]] = arith.mulf %[[VAL_47]], %[[VAL_48]] : f32
|
|
// CHECK: memref.store %[[VAL_49]], %[[VAL_20]]{{\[}}%[[VAL_44]], %[[VAL_41]], %[[VAL_38]], %[[VAL_37]], %[[VAL_32]], %[[VAL_25]], %[[VAL_22]], %[[VAL_21]]] : memref<10x20x30x40x50x60x70x80xf32>
|
|
// CHECK: }
|
|
// CHECK: }
|
|
// CHECK: }
|
|
// CHECK: }
|
|
// CHECK: }
|
|
// CHECK: }
|
|
// CHECK: }
|
|
// CHECK: }
|
|
// CHECK: %[[VAL_50:.*]] = bufferization.to_tensor %[[VAL_20]] : memref<10x20x30x40x50x60x70x80xf32>
|
|
// CHECK: return %[[VAL_50]] : tensor<10x20x30x40x50x60x70x80xf32>
|
|
// CHECK: }
|
|
func.func @mul(%arga: tensor<10x20x30x40x50x60x70x80xf32>,
|
|
%argb: tensor<80x70x60x50x40x30x20x10xf32, #SparseTensor>,
|
|
%argx: tensor<10x20x30x40x50x60x70x80xf32>)
|
|
-> tensor<10x20x30x40x50x60x70x80xf32> {
|
|
%0 = linalg.generic #trait_mul
|
|
ins(%arga, %argb: tensor<10x20x30x40x50x60x70x80xf32>,
|
|
tensor<80x70x60x50x40x30x20x10xf32, #SparseTensor>)
|
|
outs(%argx: tensor<10x20x30x40x50x60x70x80xf32>) {
|
|
^bb(%a: f32, %b: f32, %x: f32):
|
|
%0 = arith.mulf %a, %b : f32
|
|
linalg.yield %0 : f32
|
|
} -> tensor<10x20x30x40x50x60x70x80xf32>
|
|
return %0 : tensor<10x20x30x40x50x60x70x80xf32>
|
|
}
|