Files
clang-p2996/mlir/test/Dialect/SparseTensor/minipipeline_vector.mlir
Aart Bik 438a7d4c98 [mlir][sparse] expose optimization flags to mini pipeline (#95158)
Some of the options only fed into the full sparse pipeline. However,
some backends prefer to use the sparse minipipeline. This change exposes
some important optimization flags to the pass as well. This prepares
some SIMDization of PyTorch sparsified code.
2024-06-11 14:20:58 -07:00

44 lines
1.4 KiB
MLIR
Executable File

// RUN: mlir-opt %s --sparsification-and-bufferization | FileCheck %s --check-prefix=CHECK-NOVEC
// RUN: mlir-opt %s --sparsification-and-bufferization="vl=8" | FileCheck %s --check-prefix=CHECK-VEC
// Test to ensure we can pass optimization flags into
// the mini sparsification and bufferization pipeline.
#SV = #sparse_tensor.encoding<{ map = (d0) -> (d0 : compressed) }>
#trait_sum_reduction = {
indexing_maps = [
affine_map<(i) -> (i)>, // a
affine_map<(i) -> ()> // x (scalar out)
],
iterator_types = ["reduction"],
doc = "x += SUM_i a(i)"
}
//
// CHECK-NOVEC-LABEL: func.func @sum_reduction
// CHECK-NOVEC: scf.for
// CHECK-NOVEC: arith.addf %{{.*}} %{{.*}} : f32
// CHECK-NOVEC: }
//
// CHECK-VEC-LABEL: func.func @sum_reduction
// CHECK-VEC: vector.insertelement
// CHECK-VEC: scf.for
// CHECK-VEC: vector.create_mask
// CHECK-VEC: vector.maskedload
// CHECK-VEC: arith.addf %{{.*}} %{{.*}} : vector<8xf32>
// CHECK-VEC: }
// CHECK-VEC: vector.reduction <add>
//
func.func @sum_reduction(%arga: tensor<?xf32, #SV>,
%argx: tensor<f32>) -> tensor<f32> {
%0 = linalg.generic #trait_sum_reduction
ins(%arga: tensor<?xf32, #SV>)
outs(%argx: tensor<f32>) {
^bb(%a: f32, %x: f32):
%0 = arith.addf %x, %a : f32
linalg.yield %0 : f32
} -> tensor<f32>
return %0 : tensor<f32>
}