Switch to using BitIntegerState for each of the inputs, and invert their meanings. This now diverges more from the old AMDGPUAnnotateKernelFeatures, but this isn't used yet anyway.
339 lines
18 KiB
LLVM
339 lines
18 KiB
LLVM
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --function-signature --check-globals
|
|
; RUN: opt -S -mtriple=amdgcn-unknown-unknown -amdgpu-annotate-kernel-features < %s | FileCheck -check-prefixes=CHECK,AKF_CHECK %s
|
|
; RUN: opt -S -mtriple=amdgcn-unknown-unknown -amdgpu-attributor < %s | FileCheck -check-prefixes=CHECK,ATTRIBUTOR_CHECK %s
|
|
|
|
declare i32 @llvm.r600.read.tgid.x() #0
|
|
declare i32 @llvm.r600.read.tgid.y() #0
|
|
declare i32 @llvm.r600.read.tgid.z() #0
|
|
|
|
declare i32 @llvm.r600.read.tidig.x() #0
|
|
declare i32 @llvm.r600.read.tidig.y() #0
|
|
declare i32 @llvm.r600.read.tidig.z() #0
|
|
|
|
declare i32 @llvm.r600.read.local.size.x() #0
|
|
declare i32 @llvm.r600.read.local.size.y() #0
|
|
declare i32 @llvm.r600.read.local.size.z() #0
|
|
|
|
define amdgpu_kernel void @use_tgid_x(i32 addrspace(1)* %ptr) #1 {
|
|
; CHECK-LABEL: define {{[^@]+}}@use_tgid_x
|
|
; CHECK-SAME: (i32 addrspace(1)* [[PTR:%.*]]) #[[ATTR1:[0-9]+]] {
|
|
; CHECK-NEXT: [[VAL:%.*]] = call i32 @llvm.r600.read.tgid.x()
|
|
; CHECK-NEXT: store i32 [[VAL]], i32 addrspace(1)* [[PTR]], align 4
|
|
; CHECK-NEXT: ret void
|
|
;
|
|
%val = call i32 @llvm.r600.read.tgid.x()
|
|
store i32 %val, i32 addrspace(1)* %ptr
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @use_tgid_y(i32 addrspace(1)* %ptr) #1 {
|
|
; CHECK-LABEL: define {{[^@]+}}@use_tgid_y
|
|
; CHECK-SAME: (i32 addrspace(1)* [[PTR:%.*]]) #[[ATTR2:[0-9]+]] {
|
|
; CHECK-NEXT: [[VAL:%.*]] = call i32 @llvm.r600.read.tgid.y()
|
|
; CHECK-NEXT: store i32 [[VAL]], i32 addrspace(1)* [[PTR]], align 4
|
|
; CHECK-NEXT: ret void
|
|
;
|
|
%val = call i32 @llvm.r600.read.tgid.y()
|
|
store i32 %val, i32 addrspace(1)* %ptr
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @multi_use_tgid_y(i32 addrspace(1)* %ptr) #1 {
|
|
; CHECK-LABEL: define {{[^@]+}}@multi_use_tgid_y
|
|
; CHECK-SAME: (i32 addrspace(1)* [[PTR:%.*]]) #[[ATTR2]] {
|
|
; CHECK-NEXT: [[VAL0:%.*]] = call i32 @llvm.r600.read.tgid.y()
|
|
; CHECK-NEXT: store volatile i32 [[VAL0]], i32 addrspace(1)* [[PTR]], align 4
|
|
; CHECK-NEXT: [[VAL1:%.*]] = call i32 @llvm.r600.read.tgid.y()
|
|
; CHECK-NEXT: store volatile i32 [[VAL1]], i32 addrspace(1)* [[PTR]], align 4
|
|
; CHECK-NEXT: ret void
|
|
;
|
|
%val0 = call i32 @llvm.r600.read.tgid.y()
|
|
store volatile i32 %val0, i32 addrspace(1)* %ptr
|
|
%val1 = call i32 @llvm.r600.read.tgid.y()
|
|
store volatile i32 %val1, i32 addrspace(1)* %ptr
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @use_tgid_x_y(i32 addrspace(1)* %ptr) #1 {
|
|
; CHECK-LABEL: define {{[^@]+}}@use_tgid_x_y
|
|
; CHECK-SAME: (i32 addrspace(1)* [[PTR:%.*]]) #[[ATTR2]] {
|
|
; CHECK-NEXT: [[VAL0:%.*]] = call i32 @llvm.r600.read.tgid.x()
|
|
; CHECK-NEXT: [[VAL1:%.*]] = call i32 @llvm.r600.read.tgid.y()
|
|
; CHECK-NEXT: store volatile i32 [[VAL0]], i32 addrspace(1)* [[PTR]], align 4
|
|
; CHECK-NEXT: store volatile i32 [[VAL1]], i32 addrspace(1)* [[PTR]], align 4
|
|
; CHECK-NEXT: ret void
|
|
;
|
|
%val0 = call i32 @llvm.r600.read.tgid.x()
|
|
%val1 = call i32 @llvm.r600.read.tgid.y()
|
|
store volatile i32 %val0, i32 addrspace(1)* %ptr
|
|
store volatile i32 %val1, i32 addrspace(1)* %ptr
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @use_tgid_z(i32 addrspace(1)* %ptr) #1 {
|
|
; CHECK-LABEL: define {{[^@]+}}@use_tgid_z
|
|
; CHECK-SAME: (i32 addrspace(1)* [[PTR:%.*]]) #[[ATTR3:[0-9]+]] {
|
|
; CHECK-NEXT: [[VAL:%.*]] = call i32 @llvm.r600.read.tgid.z()
|
|
; CHECK-NEXT: store i32 [[VAL]], i32 addrspace(1)* [[PTR]], align 4
|
|
; CHECK-NEXT: ret void
|
|
;
|
|
%val = call i32 @llvm.r600.read.tgid.z()
|
|
store i32 %val, i32 addrspace(1)* %ptr
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @use_tgid_x_z(i32 addrspace(1)* %ptr) #1 {
|
|
; CHECK-LABEL: define {{[^@]+}}@use_tgid_x_z
|
|
; CHECK-SAME: (i32 addrspace(1)* [[PTR:%.*]]) #[[ATTR3]] {
|
|
; CHECK-NEXT: [[VAL0:%.*]] = call i32 @llvm.r600.read.tgid.x()
|
|
; CHECK-NEXT: [[VAL1:%.*]] = call i32 @llvm.r600.read.tgid.z()
|
|
; CHECK-NEXT: store volatile i32 [[VAL0]], i32 addrspace(1)* [[PTR]], align 4
|
|
; CHECK-NEXT: store volatile i32 [[VAL1]], i32 addrspace(1)* [[PTR]], align 4
|
|
; CHECK-NEXT: ret void
|
|
;
|
|
%val0 = call i32 @llvm.r600.read.tgid.x()
|
|
%val1 = call i32 @llvm.r600.read.tgid.z()
|
|
store volatile i32 %val0, i32 addrspace(1)* %ptr
|
|
store volatile i32 %val1, i32 addrspace(1)* %ptr
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @use_tgid_y_z(i32 addrspace(1)* %ptr) #1 {
|
|
; CHECK-LABEL: define {{[^@]+}}@use_tgid_y_z
|
|
; CHECK-SAME: (i32 addrspace(1)* [[PTR:%.*]]) #[[ATTR4:[0-9]+]] {
|
|
; CHECK-NEXT: [[VAL0:%.*]] = call i32 @llvm.r600.read.tgid.y()
|
|
; CHECK-NEXT: [[VAL1:%.*]] = call i32 @llvm.r600.read.tgid.z()
|
|
; CHECK-NEXT: store volatile i32 [[VAL0]], i32 addrspace(1)* [[PTR]], align 4
|
|
; CHECK-NEXT: store volatile i32 [[VAL1]], i32 addrspace(1)* [[PTR]], align 4
|
|
; CHECK-NEXT: ret void
|
|
;
|
|
%val0 = call i32 @llvm.r600.read.tgid.y()
|
|
%val1 = call i32 @llvm.r600.read.tgid.z()
|
|
store volatile i32 %val0, i32 addrspace(1)* %ptr
|
|
store volatile i32 %val1, i32 addrspace(1)* %ptr
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @use_tgid_x_y_z(i32 addrspace(1)* %ptr) #1 {
|
|
; CHECK-LABEL: define {{[^@]+}}@use_tgid_x_y_z
|
|
; CHECK-SAME: (i32 addrspace(1)* [[PTR:%.*]]) #[[ATTR4]] {
|
|
; CHECK-NEXT: [[VAL0:%.*]] = call i32 @llvm.r600.read.tgid.x()
|
|
; CHECK-NEXT: [[VAL1:%.*]] = call i32 @llvm.r600.read.tgid.y()
|
|
; CHECK-NEXT: [[VAL2:%.*]] = call i32 @llvm.r600.read.tgid.z()
|
|
; CHECK-NEXT: store volatile i32 [[VAL0]], i32 addrspace(1)* [[PTR]], align 4
|
|
; CHECK-NEXT: store volatile i32 [[VAL1]], i32 addrspace(1)* [[PTR]], align 4
|
|
; CHECK-NEXT: store volatile i32 [[VAL2]], i32 addrspace(1)* [[PTR]], align 4
|
|
; CHECK-NEXT: ret void
|
|
;
|
|
%val0 = call i32 @llvm.r600.read.tgid.x()
|
|
%val1 = call i32 @llvm.r600.read.tgid.y()
|
|
%val2 = call i32 @llvm.r600.read.tgid.z()
|
|
store volatile i32 %val0, i32 addrspace(1)* %ptr
|
|
store volatile i32 %val1, i32 addrspace(1)* %ptr
|
|
store volatile i32 %val2, i32 addrspace(1)* %ptr
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @use_tidig_x(i32 addrspace(1)* %ptr) #1 {
|
|
; CHECK-LABEL: define {{[^@]+}}@use_tidig_x
|
|
; CHECK-SAME: (i32 addrspace(1)* [[PTR:%.*]]) #[[ATTR1]] {
|
|
; CHECK-NEXT: [[VAL:%.*]] = call i32 @llvm.r600.read.tidig.x()
|
|
; CHECK-NEXT: store i32 [[VAL]], i32 addrspace(1)* [[PTR]], align 4
|
|
; CHECK-NEXT: ret void
|
|
;
|
|
%val = call i32 @llvm.r600.read.tidig.x()
|
|
store i32 %val, i32 addrspace(1)* %ptr
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @use_tidig_y(i32 addrspace(1)* %ptr) #1 {
|
|
; CHECK-LABEL: define {{[^@]+}}@use_tidig_y
|
|
; CHECK-SAME: (i32 addrspace(1)* [[PTR:%.*]]) #[[ATTR5:[0-9]+]] {
|
|
; CHECK-NEXT: [[VAL:%.*]] = call i32 @llvm.r600.read.tidig.y()
|
|
; CHECK-NEXT: store i32 [[VAL]], i32 addrspace(1)* [[PTR]], align 4
|
|
; CHECK-NEXT: ret void
|
|
;
|
|
%val = call i32 @llvm.r600.read.tidig.y()
|
|
store i32 %val, i32 addrspace(1)* %ptr
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @use_tidig_z(i32 addrspace(1)* %ptr) #1 {
|
|
; CHECK-LABEL: define {{[^@]+}}@use_tidig_z
|
|
; CHECK-SAME: (i32 addrspace(1)* [[PTR:%.*]]) #[[ATTR6:[0-9]+]] {
|
|
; CHECK-NEXT: [[VAL:%.*]] = call i32 @llvm.r600.read.tidig.z()
|
|
; CHECK-NEXT: store i32 [[VAL]], i32 addrspace(1)* [[PTR]], align 4
|
|
; CHECK-NEXT: ret void
|
|
;
|
|
%val = call i32 @llvm.r600.read.tidig.z()
|
|
store i32 %val, i32 addrspace(1)* %ptr
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @use_tidig_x_tgid_x(i32 addrspace(1)* %ptr) #1 {
|
|
; CHECK-LABEL: define {{[^@]+}}@use_tidig_x_tgid_x
|
|
; CHECK-SAME: (i32 addrspace(1)* [[PTR:%.*]]) #[[ATTR1]] {
|
|
; CHECK-NEXT: [[VAL0:%.*]] = call i32 @llvm.r600.read.tidig.x()
|
|
; CHECK-NEXT: [[VAL1:%.*]] = call i32 @llvm.r600.read.tgid.x()
|
|
; CHECK-NEXT: store volatile i32 [[VAL0]], i32 addrspace(1)* [[PTR]], align 4
|
|
; CHECK-NEXT: store volatile i32 [[VAL1]], i32 addrspace(1)* [[PTR]], align 4
|
|
; CHECK-NEXT: ret void
|
|
;
|
|
%val0 = call i32 @llvm.r600.read.tidig.x()
|
|
%val1 = call i32 @llvm.r600.read.tgid.x()
|
|
store volatile i32 %val0, i32 addrspace(1)* %ptr
|
|
store volatile i32 %val1, i32 addrspace(1)* %ptr
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @use_tidig_y_tgid_y(i32 addrspace(1)* %ptr) #1 {
|
|
; CHECK-LABEL: define {{[^@]+}}@use_tidig_y_tgid_y
|
|
; CHECK-SAME: (i32 addrspace(1)* [[PTR:%.*]]) #[[ATTR7:[0-9]+]] {
|
|
; CHECK-NEXT: [[VAL0:%.*]] = call i32 @llvm.r600.read.tidig.y()
|
|
; CHECK-NEXT: [[VAL1:%.*]] = call i32 @llvm.r600.read.tgid.y()
|
|
; CHECK-NEXT: store volatile i32 [[VAL0]], i32 addrspace(1)* [[PTR]], align 4
|
|
; CHECK-NEXT: store volatile i32 [[VAL1]], i32 addrspace(1)* [[PTR]], align 4
|
|
; CHECK-NEXT: ret void
|
|
;
|
|
%val0 = call i32 @llvm.r600.read.tidig.y()
|
|
%val1 = call i32 @llvm.r600.read.tgid.y()
|
|
store volatile i32 %val0, i32 addrspace(1)* %ptr
|
|
store volatile i32 %val1, i32 addrspace(1)* %ptr
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @use_tidig_x_y_z(i32 addrspace(1)* %ptr) #1 {
|
|
; CHECK-LABEL: define {{[^@]+}}@use_tidig_x_y_z
|
|
; CHECK-SAME: (i32 addrspace(1)* [[PTR:%.*]]) #[[ATTR8:[0-9]+]] {
|
|
; CHECK-NEXT: [[VAL0:%.*]] = call i32 @llvm.r600.read.tidig.x()
|
|
; CHECK-NEXT: [[VAL1:%.*]] = call i32 @llvm.r600.read.tidig.y()
|
|
; CHECK-NEXT: [[VAL2:%.*]] = call i32 @llvm.r600.read.tidig.z()
|
|
; CHECK-NEXT: store volatile i32 [[VAL0]], i32 addrspace(1)* [[PTR]], align 4
|
|
; CHECK-NEXT: store volatile i32 [[VAL1]], i32 addrspace(1)* [[PTR]], align 4
|
|
; CHECK-NEXT: store volatile i32 [[VAL2]], i32 addrspace(1)* [[PTR]], align 4
|
|
; CHECK-NEXT: ret void
|
|
;
|
|
%val0 = call i32 @llvm.r600.read.tidig.x()
|
|
%val1 = call i32 @llvm.r600.read.tidig.y()
|
|
%val2 = call i32 @llvm.r600.read.tidig.z()
|
|
store volatile i32 %val0, i32 addrspace(1)* %ptr
|
|
store volatile i32 %val1, i32 addrspace(1)* %ptr
|
|
store volatile i32 %val2, i32 addrspace(1)* %ptr
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @use_all_workitems(i32 addrspace(1)* %ptr) #1 {
|
|
; CHECK-LABEL: define {{[^@]+}}@use_all_workitems
|
|
; CHECK-SAME: (i32 addrspace(1)* [[PTR:%.*]]) #[[ATTR9:[0-9]+]] {
|
|
; CHECK-NEXT: [[VAL0:%.*]] = call i32 @llvm.r600.read.tidig.x()
|
|
; CHECK-NEXT: [[VAL1:%.*]] = call i32 @llvm.r600.read.tidig.y()
|
|
; CHECK-NEXT: [[VAL2:%.*]] = call i32 @llvm.r600.read.tidig.z()
|
|
; CHECK-NEXT: [[VAL3:%.*]] = call i32 @llvm.r600.read.tgid.x()
|
|
; CHECK-NEXT: [[VAL4:%.*]] = call i32 @llvm.r600.read.tgid.y()
|
|
; CHECK-NEXT: [[VAL5:%.*]] = call i32 @llvm.r600.read.tgid.z()
|
|
; CHECK-NEXT: store volatile i32 [[VAL0]], i32 addrspace(1)* [[PTR]], align 4
|
|
; CHECK-NEXT: store volatile i32 [[VAL1]], i32 addrspace(1)* [[PTR]], align 4
|
|
; CHECK-NEXT: store volatile i32 [[VAL2]], i32 addrspace(1)* [[PTR]], align 4
|
|
; CHECK-NEXT: store volatile i32 [[VAL3]], i32 addrspace(1)* [[PTR]], align 4
|
|
; CHECK-NEXT: store volatile i32 [[VAL4]], i32 addrspace(1)* [[PTR]], align 4
|
|
; CHECK-NEXT: store volatile i32 [[VAL5]], i32 addrspace(1)* [[PTR]], align 4
|
|
; CHECK-NEXT: ret void
|
|
;
|
|
%val0 = call i32 @llvm.r600.read.tidig.x()
|
|
%val1 = call i32 @llvm.r600.read.tidig.y()
|
|
%val2 = call i32 @llvm.r600.read.tidig.z()
|
|
%val3 = call i32 @llvm.r600.read.tgid.x()
|
|
%val4 = call i32 @llvm.r600.read.tgid.y()
|
|
%val5 = call i32 @llvm.r600.read.tgid.z()
|
|
store volatile i32 %val0, i32 addrspace(1)* %ptr
|
|
store volatile i32 %val1, i32 addrspace(1)* %ptr
|
|
store volatile i32 %val2, i32 addrspace(1)* %ptr
|
|
store volatile i32 %val3, i32 addrspace(1)* %ptr
|
|
store volatile i32 %val4, i32 addrspace(1)* %ptr
|
|
store volatile i32 %val5, i32 addrspace(1)* %ptr
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @use_get_local_size_x(i32 addrspace(1)* %ptr) #1 {
|
|
; CHECK-LABEL: define {{[^@]+}}@use_get_local_size_x
|
|
; CHECK-SAME: (i32 addrspace(1)* [[PTR:%.*]]) #[[ATTR1]] {
|
|
; CHECK-NEXT: [[VAL:%.*]] = call i32 @llvm.r600.read.local.size.x()
|
|
; CHECK-NEXT: store i32 [[VAL]], i32 addrspace(1)* [[PTR]], align 4
|
|
; CHECK-NEXT: ret void
|
|
;
|
|
%val = call i32 @llvm.r600.read.local.size.x()
|
|
store i32 %val, i32 addrspace(1)* %ptr
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @use_get_local_size_y(i32 addrspace(1)* %ptr) #1 {
|
|
; CHECK-LABEL: define {{[^@]+}}@use_get_local_size_y
|
|
; CHECK-SAME: (i32 addrspace(1)* [[PTR:%.*]]) #[[ATTR1]] {
|
|
; CHECK-NEXT: [[VAL:%.*]] = call i32 @llvm.r600.read.local.size.y()
|
|
; CHECK-NEXT: store i32 [[VAL]], i32 addrspace(1)* [[PTR]], align 4
|
|
; CHECK-NEXT: ret void
|
|
;
|
|
%val = call i32 @llvm.r600.read.local.size.y()
|
|
store i32 %val, i32 addrspace(1)* %ptr
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @use_get_local_size_z(i32 addrspace(1)* %ptr) #1 {
|
|
; CHECK-LABEL: define {{[^@]+}}@use_get_local_size_z
|
|
; CHECK-SAME: (i32 addrspace(1)* [[PTR:%.*]]) #[[ATTR1]] {
|
|
; CHECK-NEXT: [[VAL:%.*]] = call i32 @llvm.r600.read.local.size.z()
|
|
; CHECK-NEXT: store i32 [[VAL]], i32 addrspace(1)* [[PTR]], align 4
|
|
; CHECK-NEXT: ret void
|
|
;
|
|
%val = call i32 @llvm.r600.read.local.size.z()
|
|
store i32 %val, i32 addrspace(1)* %ptr
|
|
ret void
|
|
}
|
|
|
|
attributes #0 = { nounwind readnone }
|
|
attributes #1 = { nounwind }
|
|
|
|
; ALL: attributes #[[ATTR0:[0-9]+]] = { nounwind readnone speculatable willreturn "uniform-work-group-size"="false" }
|
|
; ALL: attributes #[[ATTR1]] = { nounwind "uniform-work-group-size"="false" }
|
|
; ALL: attributes #[[ATTR2]] = { nounwind "amdgpu-work-group-id-y" "uniform-work-group-size"="false" }
|
|
; ALL: attributes #[[ATTR3]] = { nounwind "amdgpu-work-group-id-z" "uniform-work-group-size"="false" }
|
|
; ALL: attributes #[[ATTR4]] = { nounwind "amdgpu-work-group-id-y" "amdgpu-work-group-id-z" "uniform-work-group-size"="false" }
|
|
; ALL: attributes #[[ATTR5]] = { nounwind "amdgpu-work-item-id-y" "uniform-work-group-size"="false" }
|
|
; ALL: attributes #[[ATTR6]] = { nounwind "amdgpu-work-item-id-z" "uniform-work-group-size"="false" }
|
|
; ALL: attributes #[[ATTR7]] = { nounwind "amdgpu-work-group-id-y" "amdgpu-work-item-id-y" "uniform-work-group-size"="false" }
|
|
; ALL: attributes #[[ATTR8]] = { nounwind "amdgpu-work-item-id-y" "amdgpu-work-item-id-z" "uniform-work-group-size"="false" }
|
|
; ALL: attributes #[[ATTR9]] = { nounwind "amdgpu-work-group-id-y" "amdgpu-work-group-id-z" "amdgpu-work-item-id-y" "amdgpu-work-item-id-z" "uniform-work-group-size"="false" }
|
|
; NOHSA: attributes #[[ATTR0:[0-9]+]] = { nounwind readnone speculatable willreturn "uniform-work-group-size"="false" }
|
|
; NOHSA: attributes #[[ATTR1]] = { nounwind "uniform-work-group-size"="false" }
|
|
; NOHSA: attributes #[[ATTR2]] = { nounwind "amdgpu-work-group-id-y" "uniform-work-group-size"="false" }
|
|
; NOHSA: attributes #[[ATTR3]] = { nounwind "amdgpu-work-group-id-z" "uniform-work-group-size"="false" }
|
|
; NOHSA: attributes #[[ATTR4]] = { nounwind "amdgpu-work-group-id-y" "amdgpu-work-group-id-z" "uniform-work-group-size"="false" }
|
|
; NOHSA: attributes #[[ATTR5]] = { nounwind "amdgpu-work-item-id-y" "uniform-work-group-size"="false" }
|
|
; NOHSA: attributes #[[ATTR6]] = { nounwind "amdgpu-work-item-id-z" "uniform-work-group-size"="false" }
|
|
; NOHSA: attributes #[[ATTR7]] = { nounwind "amdgpu-work-group-id-y" "amdgpu-work-item-id-y" "uniform-work-group-size"="false" }
|
|
; NOHSA: attributes #[[ATTR8]] = { nounwind "amdgpu-work-item-id-y" "amdgpu-work-item-id-z" "uniform-work-group-size"="false" }
|
|
; NOHSA: attributes #[[ATTR9]] = { nounwind "amdgpu-work-group-id-y" "amdgpu-work-group-id-z" "amdgpu-work-item-id-y" "amdgpu-work-item-id-z" "uniform-work-group-size"="false" }
|
|
;.
|
|
; AKF_CHECK: attributes #[[ATTR0:[0-9]+]] = { nounwind readnone speculatable willreturn }
|
|
; AKF_CHECK: attributes #[[ATTR1]] = { nounwind }
|
|
; AKF_CHECK: attributes #[[ATTR2]] = { nounwind "amdgpu-work-group-id-y" }
|
|
; AKF_CHECK: attributes #[[ATTR3]] = { nounwind "amdgpu-work-group-id-z" }
|
|
; AKF_CHECK: attributes #[[ATTR4]] = { nounwind "amdgpu-work-group-id-y" "amdgpu-work-group-id-z" }
|
|
; AKF_CHECK: attributes #[[ATTR5]] = { nounwind "amdgpu-work-item-id-y" }
|
|
; AKF_CHECK: attributes #[[ATTR6]] = { nounwind "amdgpu-work-item-id-z" }
|
|
; AKF_CHECK: attributes #[[ATTR7]] = { nounwind "amdgpu-work-group-id-y" "amdgpu-work-item-id-y" }
|
|
; AKF_CHECK: attributes #[[ATTR8]] = { nounwind "amdgpu-work-item-id-y" "amdgpu-work-item-id-z" }
|
|
; AKF_CHECK: attributes #[[ATTR9]] = { nounwind "amdgpu-work-group-id-y" "amdgpu-work-group-id-z" "amdgpu-work-item-id-y" "amdgpu-work-item-id-z" }
|
|
;.
|
|
; ATTRIBUTOR_CHECK: attributes #[[ATTR0:[0-9]+]] = { nounwind readnone speculatable willreturn }
|
|
; ATTRIBUTOR_CHECK: attributes #[[ATTR1]] = { nounwind "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="false" }
|
|
; ATTRIBUTOR_CHECK: attributes #[[ATTR2]] = { nounwind "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="false" }
|
|
; ATTRIBUTOR_CHECK: attributes #[[ATTR3]] = { nounwind "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="false" }
|
|
; ATTRIBUTOR_CHECK: attributes #[[ATTR4]] = { nounwind "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="false" }
|
|
; ATTRIBUTOR_CHECK: attributes #[[ATTR5]] = { nounwind "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="false" }
|
|
; ATTRIBUTOR_CHECK: attributes #[[ATTR6]] = { nounwind "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "uniform-work-group-size"="false" }
|
|
; ATTRIBUTOR_CHECK: attributes #[[ATTR7]] = { nounwind "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="false" }
|
|
; ATTRIBUTOR_CHECK: attributes #[[ATTR8]] = { nounwind "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "uniform-work-group-size"="false" }
|
|
; ATTRIBUTOR_CHECK: attributes #[[ATTR9]] = { nounwind "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workitem-id-x" "uniform-work-group-size"="false" }
|
|
;.
|