Create the memref dialect and move dialect-specific ops from std dialect to this dialect. Moved ops: AllocOp -> MemRef_AllocOp AllocaOp -> MemRef_AllocaOp AssumeAlignmentOp -> MemRef_AssumeAlignmentOp DeallocOp -> MemRef_DeallocOp DimOp -> MemRef_DimOp MemRefCastOp -> MemRef_CastOp MemRefReinterpretCastOp -> MemRef_ReinterpretCastOp GetGlobalMemRefOp -> MemRef_GetGlobalOp GlobalMemRefOp -> MemRef_GlobalOp LoadOp -> MemRef_LoadOp PrefetchOp -> MemRef_PrefetchOp ReshapeOp -> MemRef_ReshapeOp StoreOp -> MemRef_StoreOp SubViewOp -> MemRef_SubViewOp TransposeOp -> MemRef_TransposeOp TensorLoadOp -> MemRef_TensorLoadOp TensorStoreOp -> MemRef_TensorStoreOp TensorToMemRefOp -> MemRef_BufferCastOp ViewOp -> MemRef_ViewOp The roadmap to split the memref dialect from std is discussed here: https://llvm.discourse.group/t/rfc-split-the-memref-dialect-from-std/2667 Differential Revision: https://reviews.llvm.org/D98041
25 lines
1006 B
MLIR
25 lines
1006 B
MLIR
// RUN: mlir-opt -split-input-file -shape-bufferize <%s | FileCheck %s
|
|
|
|
// -----
|
|
|
|
// CHECK-LABEL: func @shape_assuming() {
|
|
// CHECK: %[[WTRUE:.*]] = shape.const_witness true
|
|
// CHECK: %[[MEMREF:.*]] = shape.assuming %[[WTRUE]] -> (memref<2xf16>) {
|
|
// CHECK: %[[TENSOR_VAL:.*]] = "test.source"() : () -> tensor<2xf16>
|
|
// CHECK: %[[YIELDED_MEMREF:.*]] = memref.buffer_cast %[[TENSOR_VAL]] : memref<2xf16>
|
|
// CHECK: shape.assuming_yield %[[YIELDED_MEMREF]] : memref<2xf16>
|
|
// CHECK: }
|
|
// CHECK: %[[TENSOR:.*]] = memref.tensor_load %[[MEMREF:.*]] : memref<2xf16>
|
|
// CHECK: "test.sink"(%[[TENSOR]]) : (tensor<2xf16>) -> ()
|
|
// CHECK: return
|
|
// CHECK: }
|
|
func @shape_assuming() {
|
|
%0 = shape.const_witness true
|
|
%1 = shape.assuming %0 -> (tensor<2xf16>) {
|
|
%2 = "test.source"() : () -> (tensor<2xf16>)
|
|
shape.assuming_yield %2 : tensor<2xf16>
|
|
}
|
|
"test.sink"(%1) : (tensor<2xf16>) -> ()
|
|
return
|
|
}
|