External functions have no body, so they cannot be analyzed. Assume conservatively that each tensor bbArg may be aliasing with each tensor result. Furthermore, assume that each function arg is read and written-to after bufferization. This default behavior can be controlled with `bufferization.access` (similar to `bufferization.memory_layout`) in test cases. Also fix a bug in the dialect attribute verifier, which did not run for region argument attributes. Differential Revision: https://reviews.llvm.org/D139517
98 lines
3.1 KiB
MLIR
98 lines
3.1 KiB
MLIR
// RUN: mlir-opt %s -split-input-file -verify-diagnostics
|
|
|
|
func.func @alloc_tensor_missing_dims(%arg0: index)
|
|
{
|
|
// expected-error @+1 {{expected 2 dynamic sizes}}
|
|
%0 = bufferization.alloc_tensor(%arg0) : tensor<4x?x?x5xf32>
|
|
return
|
|
}
|
|
|
|
// -----
|
|
|
|
// expected-note @+1 {{prior use here}}
|
|
func.func @alloc_tensor_type_mismatch(%t: tensor<?xf32>) {
|
|
// expected-error @+1{{expects different type than prior uses: 'tensor<5xf32>' vs 'tensor<?xf32>'}}
|
|
%0 = bufferization.alloc_tensor() copy(%t) : tensor<5xf32>
|
|
return
|
|
}
|
|
|
|
// -----
|
|
|
|
func.func @alloc_tensor_copy_and_dims(%t: tensor<?xf32>, %sz: index) {
|
|
// expected-error @+1{{dynamic sizes not needed when copying a tensor}}
|
|
%0 = bufferization.alloc_tensor(%sz) copy(%t) : tensor<?xf32>
|
|
return
|
|
}
|
|
|
|
// -----
|
|
|
|
func.func @alloc_tensor_invalid_escape_attr(%sz: index) {
|
|
// expected-error @+1{{'bufferization.escape' is expected to be a bool array attribute}}
|
|
%0 = bufferization.alloc_tensor(%sz) {bufferization.escape = 5} : tensor<?xf32>
|
|
return
|
|
}
|
|
|
|
// -----
|
|
|
|
func.func @alloc_tensor_invalid_escape_attr_size(%sz: index) {
|
|
// expected-error @+1{{'bufferization.escape' has wrong number of elements, expected 1, got 2}}
|
|
%0 = bufferization.alloc_tensor(%sz) {bufferization.escape = [true, false]} : tensor<?xf32>
|
|
return
|
|
}
|
|
|
|
// -----
|
|
|
|
func.func @escape_attr_non_allocating(%t0: tensor<?xf32>) {
|
|
// expected-error @+1{{'bufferization.escape' only valid for allocation results}}
|
|
%0 = tensor.extract_slice %t0[0][5][1] {bufferization.escape = [true]} : tensor<?xf32> to tensor<5xf32>
|
|
return
|
|
}
|
|
|
|
// -----
|
|
|
|
func.func @escape_attr_non_bufferizable(%m0: memref<?xf32>) {
|
|
// expected-error @+1{{'bufferization.escape' only valid on bufferizable ops}}
|
|
%0 = memref.cast %m0 {bufferization.escape = [true]} : memref<?xf32> to memref<10xf32>
|
|
return
|
|
}
|
|
|
|
// -----
|
|
|
|
#DCSR = #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>
|
|
|
|
func.func @sparse_alloc_direct_return() -> tensor<20x40xf32, #DCSR> {
|
|
// expected-error @+1{{sparse tensor allocation should not escape function}}
|
|
%0 = bufferization.alloc_tensor() : tensor<20x40xf32, #DCSR>
|
|
return %0 : tensor<20x40xf32, #DCSR>
|
|
}
|
|
|
|
// -----
|
|
|
|
#DCSR = #sparse_tensor.encoding<{ dimLevelType = [ "compressed", "compressed" ] }>
|
|
|
|
func.func private @foo(tensor<20x40xf32, #DCSR>) -> ()
|
|
|
|
func.func @sparse_alloc_call() {
|
|
// expected-error @+1{{sparse tensor allocation should not escape function}}
|
|
%0 = bufferization.alloc_tensor() : tensor<20x40xf32, #DCSR>
|
|
call @foo(%0) : (tensor<20x40xf32, #DCSR>) -> ()
|
|
return
|
|
}
|
|
|
|
// -----
|
|
|
|
// expected-error @+1{{invalid value for 'bufferization.access'}}
|
|
func.func private @invalid_buffer_access_type(tensor<*xf32> {bufferization.access = "foo"})
|
|
|
|
// -----
|
|
|
|
// expected-error @+1{{'bufferization.writable' is invalid on external functions}}
|
|
func.func private @invalid_writable_attribute(tensor<*xf32> {bufferization.writable = false})
|
|
|
|
// -----
|
|
|
|
func.func @invalid_writable_on_op() {
|
|
// expected-error @+1{{attribute '"bufferization.writable"' not supported as an op attribute by the bufferization dialect}}
|
|
arith.constant {bufferization.writable = true} 0 : index
|
|
}
|