This commit is a further incremental step toward moving the whole mlir-vulkan-runner MLIR pass pipeline into mlir-opt (see #73457). The previous step was b225b3adf7b78387c9fcb97a3ff0e0a1e26eafe2, which moved all device passes prior to SPIR-V serialization into a new mlir-opt test pass, `-test-vulkan-runner-pipeline`. This commit changes how SPIR-V serialization is accomplished for Vulkan runner tests. Until now, this was done by the Vulkan-specific ConvertGpuLaunchFuncToVulkanLaunchFunc pass. With this commit, this responsibility is removed from that pass, and is instead done with the existing generic GpuModuleToBinaryPass. In addition, the SPIR-V serialization step is no longer done inside mlir-vulkan-runner, but rather inside mlir-opt (in the `-test-vulkan-runner-pipeline` pass). Both of these changes represent a greater alignment between mlir-vulkan-runner and the other GPU integration tests. Notably, the IR shapes produced by the mlir-opt pipelines for the Vulkan and SYCL runners are now much more similar, with both using a gpu.binary op for the serialized SPIR-V kernel. In order to enable this, this commit includes these supporting changes: - ConvertToSPIRVPass is enhanced to support producing the IR shape where a spirv.module is nested inside a gpu.module, since this is what GpuModuleToBinaryPass expects. - ConvertGPULaunchFuncToVulkanLaunchFunc is changed to remove its SPIR-V serialization functionality, and instead now extracts the SPIR-V from a gpu.binary operation (as produced by ConvertToSPIRVPass). - `-test-vulkan-runner-pipeline` now attaches SPIR-V target information required by GpuModuleToBinaryPass. - The WebGPU pass option, which had been removed from mlir-vulkan-runner in the previous commit in this series, is restored as an option to `-test-vulkan-runner-pipeline` instead, so that the WebGPU pass continues being inserted into the pipeline just before SPIR-V serialization.
31 lines
1.2 KiB
MLIR
31 lines
1.2 KiB
MLIR
// RUN: mlir-opt -convert-to-spirv="convert-gpu-modules=true nest-in-gpu-module=true run-signature-conversion=false run-vector-unrolling=false" %s | FileCheck %s
|
|
|
|
module attributes {
|
|
gpu.container_module,
|
|
spirv.target_env = #spirv.target_env<#spirv.vce<v1.0, [Shader], []>, #spirv.resource_limits<>>
|
|
} {
|
|
// CHECK-LABEL: func.func @main
|
|
// CHECK: %[[C1:.*]] = arith.constant 1 : index
|
|
// CHECK: gpu.launch_func @[[$KERNELS_1:.*]]::@[[$BUILTIN_WG_ID_X:.*]] blocks in (%[[C1]], %[[C1]], %[[C1]]) threads in (%[[C1]], %[[C1]], %[[C1]])
|
|
func.func @main() {
|
|
%c1 = arith.constant 1 : index
|
|
gpu.launch_func @kernels_1::@builtin_workgroup_id_x
|
|
blocks in (%c1, %c1, %c1) threads in (%c1, %c1, %c1)
|
|
return
|
|
}
|
|
|
|
// CHECK: gpu.module @[[$KERNELS_1]]
|
|
// CHECK: spirv.module @{{.*}} Logical GLSL450
|
|
// CHECK: spirv.func @[[$BUILTIN_WG_ID_X]]
|
|
// CHECK: spirv.mlir.addressof
|
|
// CHECK: spirv.Load "Input"
|
|
// CHECK: spirv.CompositeExtract
|
|
gpu.module @kernels_1 {
|
|
gpu.func @builtin_workgroup_id_x() kernel
|
|
attributes {spirv.entry_point_abi = #spirv.entry_point_abi<workgroup_size = [16, 1, 1]>} {
|
|
%0 = gpu.block_id x
|
|
gpu.return
|
|
}
|
|
}
|
|
}
|