This operation is a "copy" operation on tensors. It is guaranteed to bufferize to a memcpy. This is different from "tensor.insert_slice", which may fold away. Note: There is a symmetry between certain tensor, bufferization and memref ops: * `tensor.empty`, `bufferization.alloc_tensor`, `memref.alloc` * (none), `bufferization.dealloc_tensor`, `memref.dealloc` * `tensor.insert_slice`, `bufferization.copy_tensor`, `memref.copy` Tensor ops can generally canonicalize/fold away, while bufferization dialect ops can be used when a certain side effect is expected to materialize; so they do not fold away. Differential Revision: https://reviews.llvm.org/D153552
106 lines
3.3 KiB
MLIR
106 lines
3.3 KiB
MLIR
// RUN: mlir-opt %s -split-input-file -verify-diagnostics
|
|
|
|
func.func @alloc_tensor_missing_dims(%arg0: index)
|
|
{
|
|
// expected-error @+1 {{expected 2 dynamic sizes}}
|
|
%0 = bufferization.alloc_tensor(%arg0) : tensor<4x?x?x5xf32>
|
|
return
|
|
}
|
|
|
|
// -----
|
|
|
|
// expected-note @+1 {{prior use here}}
|
|
func.func @alloc_tensor_type_mismatch(%t: tensor<?xf32>) {
|
|
// expected-error @+1{{expects different type than prior uses: 'tensor<5xf32>' vs 'tensor<?xf32>'}}
|
|
%0 = bufferization.alloc_tensor() copy(%t) : tensor<5xf32>
|
|
return
|
|
}
|
|
|
|
// -----
|
|
|
|
func.func @alloc_tensor_copy_and_dims(%t: tensor<?xf32>, %sz: index) {
|
|
// expected-error @+1{{dynamic sizes not needed when copying a tensor}}
|
|
%0 = bufferization.alloc_tensor(%sz) copy(%t) : tensor<?xf32>
|
|
return
|
|
}
|
|
|
|
// -----
|
|
|
|
func.func @alloc_tensor_invalid_escape_attr(%sz: index) {
|
|
// expected-error @+1{{'bufferization.escape' is expected to be a bool array attribute}}
|
|
%0 = bufferization.alloc_tensor(%sz) {bufferization.escape = 5} : tensor<?xf32>
|
|
return
|
|
}
|
|
|
|
// -----
|
|
|
|
func.func @alloc_tensor_invalid_escape_attr_size(%sz: index) {
|
|
// expected-error @+1{{'bufferization.escape' has wrong number of elements, expected 1, got 2}}
|
|
%0 = bufferization.alloc_tensor(%sz) {bufferization.escape = [true, false]} : tensor<?xf32>
|
|
return
|
|
}
|
|
|
|
// -----
|
|
|
|
func.func @escape_attr_non_allocating(%t0: tensor<?xf32>) {
|
|
// expected-error @+1{{'bufferization.escape' only valid for allocation results}}
|
|
%0 = tensor.extract_slice %t0[0][5][1] {bufferization.escape = [true]} : tensor<?xf32> to tensor<5xf32>
|
|
return
|
|
}
|
|
|
|
// -----
|
|
|
|
func.func @escape_attr_non_bufferizable(%m0: memref<?xf32>) {
|
|
// expected-error @+1{{'bufferization.escape' only valid on bufferizable ops}}
|
|
%0 = memref.cast %m0 {bufferization.escape = [true]} : memref<?xf32> to memref<10xf32>
|
|
return
|
|
}
|
|
|
|
// -----
|
|
|
|
#DCSR = #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed" ] }>
|
|
|
|
func.func @sparse_alloc_direct_return() -> tensor<20x40xf32, #DCSR> {
|
|
// expected-error @+1{{sparse tensor allocation should not escape function}}
|
|
%0 = bufferization.alloc_tensor() : tensor<20x40xf32, #DCSR>
|
|
return %0 : tensor<20x40xf32, #DCSR>
|
|
}
|
|
|
|
// -----
|
|
|
|
#DCSR = #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed" ] }>
|
|
|
|
func.func private @foo(tensor<20x40xf32, #DCSR>) -> ()
|
|
|
|
func.func @sparse_alloc_call() {
|
|
// expected-error @+1{{sparse tensor allocation should not escape function}}
|
|
%0 = bufferization.alloc_tensor() : tensor<20x40xf32, #DCSR>
|
|
call @foo(%0) : (tensor<20x40xf32, #DCSR>) -> ()
|
|
return
|
|
}
|
|
|
|
// -----
|
|
|
|
// expected-error @+1{{invalid value for 'bufferization.access'}}
|
|
func.func private @invalid_buffer_access_type(tensor<*xf32> {bufferization.access = "foo"})
|
|
|
|
// -----
|
|
|
|
// expected-error @+1{{'bufferization.writable' is invalid on external functions}}
|
|
func.func private @invalid_writable_attribute(tensor<*xf32> {bufferization.writable = false})
|
|
|
|
// -----
|
|
|
|
func.func @invalid_writable_on_op() {
|
|
// expected-error @+1{{attribute '"bufferization.writable"' not supported as an op attribute by the bufferization dialect}}
|
|
arith.constant {bufferization.writable = true} 0 : index
|
|
}
|
|
|
|
// -----
|
|
|
|
// expected-note @below{{prior use here}}
|
|
func.func @invalid_tensor_copy(%arg0: tensor<?xf32>, %arg1: tensor<5xf32>) {
|
|
// expected-error @below{{expects different type than prior uses: 'tensor<?xf32>' vs 'tensor<5xf32>'}}
|
|
bufferization.copy_tensor %arg0, %arg1 : tensor<?xf32>
|
|
}
|