Create the memref dialect and move dialect-specific ops from std dialect to this dialect. Moved ops: AllocOp -> MemRef_AllocOp AllocaOp -> MemRef_AllocaOp AssumeAlignmentOp -> MemRef_AssumeAlignmentOp DeallocOp -> MemRef_DeallocOp DimOp -> MemRef_DimOp MemRefCastOp -> MemRef_CastOp MemRefReinterpretCastOp -> MemRef_ReinterpretCastOp GetGlobalMemRefOp -> MemRef_GetGlobalOp GlobalMemRefOp -> MemRef_GlobalOp LoadOp -> MemRef_LoadOp PrefetchOp -> MemRef_PrefetchOp ReshapeOp -> MemRef_ReshapeOp StoreOp -> MemRef_StoreOp SubViewOp -> MemRef_SubViewOp TransposeOp -> MemRef_TransposeOp TensorLoadOp -> MemRef_TensorLoadOp TensorStoreOp -> MemRef_TensorStoreOp TensorToMemRefOp -> MemRef_BufferCastOp ViewOp -> MemRef_ViewOp The roadmap to split the memref dialect from std is discussed here: https://llvm.discourse.group/t/rfc-split-the-memref-dialect-from-std/2667 Differential Revision: https://reviews.llvm.org/D98041
29 lines
975 B
MLIR
29 lines
975 B
MLIR
// RUN: mlir-opt %s -finalizing-bufferize -split-input-file -verify-diagnostics | FileCheck %s
|
|
|
|
// CHECK-LABEL: func @eliminate_materializations(
|
|
// CHECK-SAME: %[[ARG:.*]]: memref<f32>) -> memref<f32> {
|
|
// CHECK: return %[[ARG]] : memref<f32>
|
|
func @eliminate_materializations(%arg0: memref<f32>) -> memref<f32> {
|
|
%0 = memref.tensor_load %arg0 : memref<f32>
|
|
%1 = memref.buffer_cast %0 : memref<f32>
|
|
return %1 : memref<f32>
|
|
}
|
|
|
|
// -----
|
|
|
|
func @unable_to_convert_lone_buffer_cast() -> memref<f32> {
|
|
// expected-error @+1 {{failed to legalize operation 'test.source'}}
|
|
%0 = "test.source"() : () -> tensor<f32>
|
|
%1 = memref.buffer_cast %0 : memref<f32>
|
|
return %1 : memref<f32>
|
|
}
|
|
|
|
// -----
|
|
|
|
func @unable_to_convert_lone_tensor_load(%arg0: memref<f32>) {
|
|
%0 = memref.tensor_load %arg0 : memref<f32>
|
|
// expected-error @+1 {{failed to legalize operation 'test.sink'}}
|
|
"test.sink"(%0) : (tensor<f32>) -> ()
|
|
return
|
|
}
|