Moves the lowering of `bufferization.dealloc` to memref into a separate pass, but still registers the pattern in the conversion pass. This is helpful when some tensor values (and thus `to_memref` or `to_tensor` operations) still remain, e.g., when the function boundaries are not converted, or when constant tensors are converted to memref.get_global at a later point. However, it is still recommended to perform all bufferization before deallocation to avoid memory leaks as all memref allocations inserted after the deallocation pass was applied, have to be handled manually. Note: The buffer deallocation pass assumes that memref values defined by `bufferization.to_memref` don't return ownership and don't have to be deallocated. `bufferization.to_tensor` operations are handled similarly to `bufferization.clone` operations with the exception that the result value is not handled because it's a tensor (not a memref). Reviewed By: springerm Differential Revision: https://reviews.llvm.org/D159180
86 lines
3.3 KiB
MLIR
86 lines
3.3 KiB
MLIR
// RUN: mlir-opt -verify-diagnostics -convert-bufferization-to-memref -split-input-file %s | FileCheck %s
|
|
|
|
// CHECK-LABEL: @conversion_static
|
|
func.func @conversion_static(%arg0 : memref<2xf32>) -> memref<2xf32> {
|
|
%0 = bufferization.clone %arg0 : memref<2xf32> to memref<2xf32>
|
|
memref.dealloc %arg0 : memref<2xf32>
|
|
return %0 : memref<2xf32>
|
|
}
|
|
|
|
// CHECK: %[[ALLOC:.*]] = memref.alloc
|
|
// CHECK-NEXT: memref.copy %[[ARG:.*]], %[[ALLOC]]
|
|
// CHECK-NEXT: memref.dealloc %[[ARG]]
|
|
// CHECK-NEXT: return %[[ALLOC]]
|
|
|
|
// -----
|
|
|
|
// CHECK-LABEL: @conversion_dynamic
|
|
func.func @conversion_dynamic(%arg0 : memref<?xf32>) -> memref<?xf32> {
|
|
%1 = bufferization.clone %arg0 : memref<?xf32> to memref<?xf32>
|
|
memref.dealloc %arg0 : memref<?xf32>
|
|
return %1 : memref<?xf32>
|
|
}
|
|
|
|
// CHECK: %[[CONST:.*]] = arith.constant
|
|
// CHECK-NEXT: %[[DIM:.*]] = memref.dim %[[ARG:.*]], %[[CONST]]
|
|
// CHECK-NEXT: %[[ALLOC:.*]] = memref.alloc(%[[DIM]])
|
|
// CHECK-NEXT: memref.copy %[[ARG]], %[[ALLOC]]
|
|
// CHECK-NEXT: memref.dealloc %[[ARG]]
|
|
// CHECK-NEXT: return %[[ALLOC]]
|
|
|
|
// -----
|
|
|
|
func.func @conversion_unknown(%arg0 : memref<*xf32>) -> memref<*xf32> {
|
|
// expected-error@+1 {{failed to legalize operation 'bufferization.clone' that was explicitly marked illegal}}
|
|
%1 = bufferization.clone %arg0 : memref<*xf32> to memref<*xf32>
|
|
memref.dealloc %arg0 : memref<*xf32>
|
|
return %1 : memref<*xf32>
|
|
}
|
|
|
|
// -----
|
|
|
|
// CHECK-LABEL: func @conversion_with_layout_map(
|
|
// CHECK-SAME: %[[ARG:.*]]: memref<?xf32, strided<[?], offset: ?>>
|
|
// CHECK: %[[C0:.*]] = arith.constant 0 : index
|
|
// CHECK: %[[DIM:.*]] = memref.dim %[[ARG]], %[[C0]]
|
|
// CHECK: %[[ALLOC:.*]] = memref.alloc(%[[DIM]]) : memref<?xf32>
|
|
// CHECK: %[[CASTED:.*]] = memref.cast %[[ALLOC]] : memref<?xf32> to memref<?xf32, strided<[?], offset: ?>>
|
|
// CHECK: memref.copy
|
|
// CHECK: memref.dealloc
|
|
// CHECK: return %[[CASTED]]
|
|
func.func @conversion_with_layout_map(%arg0 : memref<?xf32, strided<[?], offset: ?>>) -> memref<?xf32, strided<[?], offset: ?>> {
|
|
%1 = bufferization.clone %arg0 : memref<?xf32, strided<[?], offset: ?>> to memref<?xf32, strided<[?], offset: ?>>
|
|
memref.dealloc %arg0 : memref<?xf32, strided<[?], offset: ?>>
|
|
return %1 : memref<?xf32, strided<[?], offset: ?>>
|
|
}
|
|
|
|
// -----
|
|
|
|
// This bufferization.clone cannot be lowered because a buffer with this layout
|
|
// map cannot be allocated (or casted to).
|
|
|
|
func.func @conversion_with_invalid_layout_map(%arg0 : memref<?xf32, strided<[10], offset: ?>>)
|
|
-> memref<?xf32, strided<[10], offset: ?>> {
|
|
// expected-error@+1 {{failed to legalize operation 'bufferization.clone' that was explicitly marked illegal}}
|
|
%1 = bufferization.clone %arg0 : memref<?xf32, strided<[10], offset: ?>> to memref<?xf32, strided<[10], offset: ?>>
|
|
memref.dealloc %arg0 : memref<?xf32, strided<[10], offset: ?>>
|
|
return %1 : memref<?xf32, strided<[10], offset: ?>>
|
|
}
|
|
|
|
// -----
|
|
// Test: check that the dealloc lowering pattern is registered.
|
|
|
|
// CHECK-NOT: func @deallocHelper
|
|
// CHECK-LABEL: func @conversion_dealloc_simple
|
|
// CHECK-SAME: [[ARG0:%.+]]: memref<2xf32>
|
|
// CHECK-SAME: [[ARG1:%.+]]: i1
|
|
func.func @conversion_dealloc_simple(%arg0: memref<2xf32>, %arg1: i1) {
|
|
bufferization.dealloc (%arg0 : memref<2xf32>) if (%arg1)
|
|
return
|
|
}
|
|
|
|
// CHECk: scf.if [[ARG1]] {
|
|
// CHECk-NEXT: memref.dealloc [[ARG0]] : memref<2xf32>
|
|
// CHECk-NEXT: }
|
|
// CHECk-NEXT: return
|