Files
clang-p2996/mlir/test/Dialect/SparseTensor/fold.mlir
wren romano a0615d020a [mlir][sparse] Renaming the STEA field dimLevelType to lvlTypes
This commit is part of the migration of towards the new STEA syntax/design.  In particular, this commit includes the following changes:
* Renaming compiler-internal functions/methods:
  * `SparseTensorEncodingAttr::{getDimLevelType => getLvlTypes}`
  * `Merger::{getDimLevelType => getLvlType}` (for consistency)
  * `sparse_tensor::{getDimLevelType => buildLevelType}` (to help reduce confusion vs actual getter methods)
* Renaming external facets to match:
  * the STEA parser and printer
  * the C and Python bindings
  * PyTACO

However, the actual renaming of the `DimLevelType` itself (along with all the "dlt" names) will be handled in a separate commit.

Reviewed By: aartbik

Differential Revision: https://reviews.llvm.org/D150330
2023-05-17 14:24:09 -07:00

65 lines
2.8 KiB
MLIR

// RUN: mlir-opt %s --canonicalize --cse | FileCheck %s
#SparseVector = #sparse_tensor.encoding<{lvlTypes = ["compressed"]}>
// CHECK-LABEL: func @sparse_nop_dense2dense_convert(
// CHECK-SAME: %[[A:.*]]: tensor<64xf32>)
// CHECK-NOT: sparse_tensor.convert
// CHECK: return %[[A]] : tensor<64xf32>
func.func @sparse_nop_dense2dense_convert(%arg0: tensor<64xf32>) -> tensor<64xf32> {
%0 = sparse_tensor.convert %arg0 : tensor<64xf32> to tensor<64xf32>
return %0 : tensor<64xf32>
}
// CHECK-LABEL: func @sparse_dce_convert(
// CHECK-SAME: %[[A:.*]]: tensor<64xf32>)
// CHECK-NOT: sparse_tensor.convert
// CHECK: return
func.func @sparse_dce_convert(%arg0: tensor<64xf32>) {
%0 = sparse_tensor.convert %arg0 : tensor<64xf32> to tensor<64xf32, #SparseVector>
return
}
// CHECK-LABEL: func @sparse_dce_getters(
// CHECK-SAME: %[[A:.*]]: tensor<64xf32, #sparse_tensor.encoding<{{{.*}}}>>)
// CHECK-NOT: sparse_tensor.positions
// CHECK-NOT: sparse_tensor.coordinates
// CHECK-NOT: sparse_tensor.values
// CHECK: return
func.func @sparse_dce_getters(%arg0: tensor<64xf32, #SparseVector>) {
%0 = sparse_tensor.positions %arg0 { level = 0 : index } : tensor<64xf32, #SparseVector> to memref<?xindex>
%1 = sparse_tensor.coordinates %arg0 { level = 0 : index } : tensor<64xf32, #SparseVector> to memref<?xindex>
%2 = sparse_tensor.values %arg0 : tensor<64xf32, #SparseVector> to memref<?xf32>
return
}
// CHECK-LABEL: func @sparse_concat_dce(
// CHECK-NOT: sparse_tensor.concatenate
// CHECK: return
func.func @sparse_concat_dce(%arg0: tensor<2xf64, #SparseVector>,
%arg1: tensor<3xf64, #SparseVector>,
%arg2: tensor<4xf64, #SparseVector>) {
%0 = sparse_tensor.concatenate %arg0, %arg1, %arg2 {dimension = 0 : index}
: tensor<2xf64, #SparseVector>,
tensor<3xf64, #SparseVector>,
tensor<4xf64, #SparseVector> to tensor<9xf64, #SparseVector>
return
}
// CHECK-LABEL: func @sparse_get_specifier_dce_fold(
// CHECK-SAME: %[[A0:.*]]: !sparse_tensor.storage_specifier
// CHECK-SAME: %[[A1:.*]]: index,
// CHECK-SAME: %[[A2:.*]]: index)
// CHECK-NOT: sparse_tensor.storage_specifier.set
// CHECK-NOT: sparse_tensor.storage_specifier.get
// CHECK: return %[[A1]]
func.func @sparse_get_specifier_dce_fold(%arg0: !sparse_tensor.storage_specifier<#SparseVector>, %arg1: index, %arg2: index) -> index {
%0 = sparse_tensor.storage_specifier.set %arg0 lvl_sz at 0 with %arg1
: !sparse_tensor.storage_specifier<#SparseVector>
%1 = sparse_tensor.storage_specifier.set %0 pos_mem_sz at 0 with %arg2
: !sparse_tensor.storage_specifier<#SparseVector>
%2 = sparse_tensor.storage_specifier.get %1 lvl_sz at 0
: !sparse_tensor.storage_specifier<#SparseVector>
return %2 : index
}