This commit removes the deallocation capabilities of one-shot-bufferization. One-shot-bufferization should never deallocate any memrefs as this should be entirely handled by the ownership-based-buffer-deallocation pass going forward. This means the `allow-return-allocs` pass option will default to true now, `create-deallocs` defaults to false and they, as well as the escape attribute indicating whether a memref escapes the current region, will be removed. A new `allow-return-allocs-from-loops` option is added as a temporary workaround for some bufferization limitations.
56 lines
2.0 KiB
MLIR
56 lines
2.0 KiB
MLIR
// RUN: mlir-opt %s -one-shot-bufferize="allow-unknown-ops allow-return-allocs-from-loops" -split-input-file -verify-diagnostics
|
|
|
|
func.func @inconsistent_memory_space_scf_if(%c: i1) -> tensor<10xf32> {
|
|
// Yielding tensors with different memory spaces. Such IR cannot be
|
|
// bufferized.
|
|
%0 = bufferization.alloc_tensor() {memory_space = 0 : ui64} : tensor<10xf32>
|
|
%1 = bufferization.alloc_tensor() {memory_space = 1 : ui64} : tensor<10xf32>
|
|
// expected-error @+1 {{inconsistent memory space on then/else branches}}
|
|
%r = scf.if %c -> tensor<10xf32> {
|
|
// expected-error @+1 {{failed to bufferize op}}
|
|
scf.yield %0 : tensor<10xf32>
|
|
} else {
|
|
scf.yield %1 : tensor<10xf32>
|
|
}
|
|
func.return %r : tensor<10xf32>
|
|
}
|
|
|
|
// -----
|
|
|
|
func.func @execute_region_multiple_yields(%t: tensor<5xf32>) -> tensor<5xf32> {
|
|
// expected-error @below{{op op without unique scf.yield is not supported}}
|
|
%0 = scf.execute_region -> tensor<5xf32> {
|
|
scf.yield %t : tensor<5xf32>
|
|
^bb1(%arg1 : tensor<5xf32>):
|
|
scf.yield %arg1 : tensor<5xf32>
|
|
}
|
|
func.return %0 : tensor<5xf32>
|
|
}
|
|
|
|
// -----
|
|
|
|
func.func @execute_region_no_yield(%t: tensor<5xf32>) -> tensor<5xf32> {
|
|
// expected-error @below{{op op without unique scf.yield is not supported}}
|
|
%0 = scf.execute_region -> tensor<5xf32> {
|
|
cf.br ^bb0(%t : tensor<5xf32>)
|
|
^bb0(%arg0 : tensor<5xf32>):
|
|
cf.br ^bb1(%arg0: tensor<5xf32>)
|
|
^bb1(%arg1 : tensor<5xf32>):
|
|
cf.br ^bb0(%arg1: tensor<5xf32>)
|
|
}
|
|
func.return %0 : tensor<5xf32>
|
|
}
|
|
|
|
// -----
|
|
|
|
func.func @inconsistent_memory_space_scf_for(%lb: index, %ub: index, %step: index) -> tensor<10xf32> {
|
|
%0 = bufferization.alloc_tensor() {memory_space = 0 : ui64} : tensor<10xf32>
|
|
%1 = bufferization.alloc_tensor() {memory_space = 1 : ui64} : tensor<10xf32>
|
|
// expected-error @below{{init_arg and yielded value bufferize to inconsistent memory spaces}}
|
|
%2 = scf.for %iv = %lb to %ub step %step iter_args(%arg = %0) -> tensor<10xf32> {
|
|
// expected-error @below {{failed to bufferize op}}
|
|
scf.yield %1 : tensor<10xf32>
|
|
}
|
|
return %2 : tensor<10xf32>
|
|
}
|