This change updates all remaining bufferization patterns (except for scf.while) and the remaining bufferization infrastructure to infer the memory space whenever possible instead of falling back to "0". (If a default memory space is set in the bufferization options, we still fall back to that value if the memory space could not be inferred.) Differential Revision: https://reviews.llvm.org/D128423
26 lines
1.2 KiB
MLIR
26 lines
1.2 KiB
MLIR
// RUN: mlir-opt %s -tensor-copy-insertion="must-infer-memory-space" -split-input-file | FileCheck %s
|
|
|
|
// CHECK-LABEL: func @unknown_op_copy
|
|
func.func @unknown_op_copy() -> (tensor<10xf32>, tensor<10xf32>) {
|
|
%c0 = arith.constant 0 : index
|
|
%cst = arith.constant 0.0 : f32
|
|
// CHECK: %[[dummy:.*]] = "test.dummy_op"() : () -> tensor<10xf32>
|
|
%t = "test.dummy_op"() : () -> tensor<10xf32>
|
|
// CHECK: %[[copy:.*]] = bufferization.alloc_tensor() copy(%[[dummy]]) {bufferization.escape = [false]} : tensor<10xf32>
|
|
%s = tensor.insert %cst into %t[%c0] : tensor<10xf32>
|
|
return %s, %t : tensor<10xf32>, tensor<10xf32>
|
|
}
|
|
|
|
// -----
|
|
|
|
// CHECK-LABEL: func @alloc_tensor_copy
|
|
func.func @alloc_tensor_copy() -> (tensor<10xf32>, tensor<10xf32>) {
|
|
%c0 = arith.constant 0 : index
|
|
%cst = arith.constant 0.0 : f32
|
|
// CHECK: bufferization.alloc_tensor() {bufferization.escape = [false], memory_space = 1 : ui64} : tensor<10xf32>
|
|
%t = bufferization.alloc_tensor() {memory_space = 1 : ui64} : tensor<10xf32>
|
|
// CHECK: bufferization.alloc_tensor() {bufferization.escape = [false], memory_space = 1 : ui64} : tensor<10xf32>
|
|
%s = tensor.insert %cst into %t[%c0] : tensor<10xf32>
|
|
return %s, %t : tensor<10xf32>, tensor<10xf32>
|
|
}
|