This switches everything to use the memory attribute proposed in https://discourse.llvm.org/t/rfc-unify-memory-effect-attributes/65579. The old argmemonly, inaccessiblememonly and inaccessiblemem_or_argmemonly attributes are dropped. The readnone, readonly and writeonly attributes are restricted to parameters only. The old attributes are auto-upgraded both in bitcode and IR. The bitcode upgrade is a policy requirement that has to be retained indefinitely. The IR upgrade is mainly there so it's not necessary to update all tests using memory attributes in this patch, which is already large enough. We could drop that part after migrating tests, or retain it longer term, to make it easier to import IR from older LLVM versions. High-level Function/CallBase APIs like doesNotAccessMemory() or setDoesNotAccessMemory() are mapped transparently to the memory attribute. Code that directly manipulates attributes (e.g. via AttributeList) on the other hand needs to switch to working with the memory attribute instead. Differential Revision: https://reviews.llvm.org/D135780
431 lines
25 KiB
LLVM
431 lines
25 KiB
LLVM
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --function-signature --check-globals
|
|
; RUN: opt -S -mtriple=amdgcn-unknown-unknown -amdgpu-annotate-kernel-features < %s | FileCheck -check-prefixes=CHECK,AKF_CHECK %s
|
|
; RUN: opt -S -mtriple=amdgcn-unknown-unknown -amdgpu-attributor < %s | FileCheck -check-prefixes=CHECK,ATTRIBUTOR_CHECK %s
|
|
|
|
declare i32 @llvm.r600.read.tgid.x() #0
|
|
declare i32 @llvm.r600.read.tgid.y() #0
|
|
declare i32 @llvm.r600.read.tgid.z() #0
|
|
|
|
declare i32 @llvm.r600.read.tidig.x() #0
|
|
declare i32 @llvm.r600.read.tidig.y() #0
|
|
declare i32 @llvm.r600.read.tidig.z() #0
|
|
|
|
declare i32 @llvm.r600.read.local.size.x() #0
|
|
declare i32 @llvm.r600.read.local.size.y() #0
|
|
declare i32 @llvm.r600.read.local.size.z() #0
|
|
|
|
define amdgpu_kernel void @use_tgid_x(i32 addrspace(1)* %ptr) #1 {
|
|
; CHECK-LABEL: define {{[^@]+}}@use_tgid_x
|
|
; CHECK-SAME: (i32 addrspace(1)* [[PTR:%.*]]) #[[ATTR1:[0-9]+]] {
|
|
; CHECK-NEXT: [[VAL:%.*]] = call i32 @llvm.r600.read.tgid.x()
|
|
; CHECK-NEXT: store i32 [[VAL]], i32 addrspace(1)* [[PTR]], align 4
|
|
; CHECK-NEXT: ret void
|
|
;
|
|
%val = call i32 @llvm.r600.read.tgid.x()
|
|
store i32 %val, i32 addrspace(1)* %ptr
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @use_tgid_y(i32 addrspace(1)* %ptr) #1 {
|
|
; AKF_CHECK-LABEL: define {{[^@]+}}@use_tgid_y
|
|
; AKF_CHECK-SAME: (i32 addrspace(1)* [[PTR:%.*]]) #[[ATTR1]] {
|
|
; AKF_CHECK-NEXT: [[VAL:%.*]] = call i32 @llvm.r600.read.tgid.y()
|
|
; AKF_CHECK-NEXT: store i32 [[VAL]], i32 addrspace(1)* [[PTR]], align 4
|
|
; AKF_CHECK-NEXT: ret void
|
|
;
|
|
; ATTRIBUTOR_CHECK-LABEL: define {{[^@]+}}@use_tgid_y
|
|
; ATTRIBUTOR_CHECK-SAME: (i32 addrspace(1)* [[PTR:%.*]]) #[[ATTR2:[0-9]+]] {
|
|
; ATTRIBUTOR_CHECK-NEXT: [[VAL:%.*]] = call i32 @llvm.r600.read.tgid.y()
|
|
; ATTRIBUTOR_CHECK-NEXT: store i32 [[VAL]], i32 addrspace(1)* [[PTR]], align 4
|
|
; ATTRIBUTOR_CHECK-NEXT: ret void
|
|
;
|
|
%val = call i32 @llvm.r600.read.tgid.y()
|
|
store i32 %val, i32 addrspace(1)* %ptr
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @multi_use_tgid_y(i32 addrspace(1)* %ptr) #1 {
|
|
; AKF_CHECK-LABEL: define {{[^@]+}}@multi_use_tgid_y
|
|
; AKF_CHECK-SAME: (i32 addrspace(1)* [[PTR:%.*]]) #[[ATTR1]] {
|
|
; AKF_CHECK-NEXT: [[VAL0:%.*]] = call i32 @llvm.r600.read.tgid.y()
|
|
; AKF_CHECK-NEXT: store volatile i32 [[VAL0]], i32 addrspace(1)* [[PTR]], align 4
|
|
; AKF_CHECK-NEXT: [[VAL1:%.*]] = call i32 @llvm.r600.read.tgid.y()
|
|
; AKF_CHECK-NEXT: store volatile i32 [[VAL1]], i32 addrspace(1)* [[PTR]], align 4
|
|
; AKF_CHECK-NEXT: ret void
|
|
;
|
|
; ATTRIBUTOR_CHECK-LABEL: define {{[^@]+}}@multi_use_tgid_y
|
|
; ATTRIBUTOR_CHECK-SAME: (i32 addrspace(1)* [[PTR:%.*]]) #[[ATTR2]] {
|
|
; ATTRIBUTOR_CHECK-NEXT: [[VAL0:%.*]] = call i32 @llvm.r600.read.tgid.y()
|
|
; ATTRIBUTOR_CHECK-NEXT: store volatile i32 [[VAL0]], i32 addrspace(1)* [[PTR]], align 4
|
|
; ATTRIBUTOR_CHECK-NEXT: [[VAL1:%.*]] = call i32 @llvm.r600.read.tgid.y()
|
|
; ATTRIBUTOR_CHECK-NEXT: store volatile i32 [[VAL1]], i32 addrspace(1)* [[PTR]], align 4
|
|
; ATTRIBUTOR_CHECK-NEXT: ret void
|
|
;
|
|
%val0 = call i32 @llvm.r600.read.tgid.y()
|
|
store volatile i32 %val0, i32 addrspace(1)* %ptr
|
|
%val1 = call i32 @llvm.r600.read.tgid.y()
|
|
store volatile i32 %val1, i32 addrspace(1)* %ptr
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @use_tgid_x_y(i32 addrspace(1)* %ptr) #1 {
|
|
; AKF_CHECK-LABEL: define {{[^@]+}}@use_tgid_x_y
|
|
; AKF_CHECK-SAME: (i32 addrspace(1)* [[PTR:%.*]]) #[[ATTR1]] {
|
|
; AKF_CHECK-NEXT: [[VAL0:%.*]] = call i32 @llvm.r600.read.tgid.x()
|
|
; AKF_CHECK-NEXT: [[VAL1:%.*]] = call i32 @llvm.r600.read.tgid.y()
|
|
; AKF_CHECK-NEXT: store volatile i32 [[VAL0]], i32 addrspace(1)* [[PTR]], align 4
|
|
; AKF_CHECK-NEXT: store volatile i32 [[VAL1]], i32 addrspace(1)* [[PTR]], align 4
|
|
; AKF_CHECK-NEXT: ret void
|
|
;
|
|
; ATTRIBUTOR_CHECK-LABEL: define {{[^@]+}}@use_tgid_x_y
|
|
; ATTRIBUTOR_CHECK-SAME: (i32 addrspace(1)* [[PTR:%.*]]) #[[ATTR2]] {
|
|
; ATTRIBUTOR_CHECK-NEXT: [[VAL0:%.*]] = call i32 @llvm.r600.read.tgid.x()
|
|
; ATTRIBUTOR_CHECK-NEXT: [[VAL1:%.*]] = call i32 @llvm.r600.read.tgid.y()
|
|
; ATTRIBUTOR_CHECK-NEXT: store volatile i32 [[VAL0]], i32 addrspace(1)* [[PTR]], align 4
|
|
; ATTRIBUTOR_CHECK-NEXT: store volatile i32 [[VAL1]], i32 addrspace(1)* [[PTR]], align 4
|
|
; ATTRIBUTOR_CHECK-NEXT: ret void
|
|
;
|
|
%val0 = call i32 @llvm.r600.read.tgid.x()
|
|
%val1 = call i32 @llvm.r600.read.tgid.y()
|
|
store volatile i32 %val0, i32 addrspace(1)* %ptr
|
|
store volatile i32 %val1, i32 addrspace(1)* %ptr
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @use_tgid_z(i32 addrspace(1)* %ptr) #1 {
|
|
; AKF_CHECK-LABEL: define {{[^@]+}}@use_tgid_z
|
|
; AKF_CHECK-SAME: (i32 addrspace(1)* [[PTR:%.*]]) #[[ATTR1]] {
|
|
; AKF_CHECK-NEXT: [[VAL:%.*]] = call i32 @llvm.r600.read.tgid.z()
|
|
; AKF_CHECK-NEXT: store i32 [[VAL]], i32 addrspace(1)* [[PTR]], align 4
|
|
; AKF_CHECK-NEXT: ret void
|
|
;
|
|
; ATTRIBUTOR_CHECK-LABEL: define {{[^@]+}}@use_tgid_z
|
|
; ATTRIBUTOR_CHECK-SAME: (i32 addrspace(1)* [[PTR:%.*]]) #[[ATTR3:[0-9]+]] {
|
|
; ATTRIBUTOR_CHECK-NEXT: [[VAL:%.*]] = call i32 @llvm.r600.read.tgid.z()
|
|
; ATTRIBUTOR_CHECK-NEXT: store i32 [[VAL]], i32 addrspace(1)* [[PTR]], align 4
|
|
; ATTRIBUTOR_CHECK-NEXT: ret void
|
|
;
|
|
%val = call i32 @llvm.r600.read.tgid.z()
|
|
store i32 %val, i32 addrspace(1)* %ptr
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @use_tgid_x_z(i32 addrspace(1)* %ptr) #1 {
|
|
; AKF_CHECK-LABEL: define {{[^@]+}}@use_tgid_x_z
|
|
; AKF_CHECK-SAME: (i32 addrspace(1)* [[PTR:%.*]]) #[[ATTR1]] {
|
|
; AKF_CHECK-NEXT: [[VAL0:%.*]] = call i32 @llvm.r600.read.tgid.x()
|
|
; AKF_CHECK-NEXT: [[VAL1:%.*]] = call i32 @llvm.r600.read.tgid.z()
|
|
; AKF_CHECK-NEXT: store volatile i32 [[VAL0]], i32 addrspace(1)* [[PTR]], align 4
|
|
; AKF_CHECK-NEXT: store volatile i32 [[VAL1]], i32 addrspace(1)* [[PTR]], align 4
|
|
; AKF_CHECK-NEXT: ret void
|
|
;
|
|
; ATTRIBUTOR_CHECK-LABEL: define {{[^@]+}}@use_tgid_x_z
|
|
; ATTRIBUTOR_CHECK-SAME: (i32 addrspace(1)* [[PTR:%.*]]) #[[ATTR3]] {
|
|
; ATTRIBUTOR_CHECK-NEXT: [[VAL0:%.*]] = call i32 @llvm.r600.read.tgid.x()
|
|
; ATTRIBUTOR_CHECK-NEXT: [[VAL1:%.*]] = call i32 @llvm.r600.read.tgid.z()
|
|
; ATTRIBUTOR_CHECK-NEXT: store volatile i32 [[VAL0]], i32 addrspace(1)* [[PTR]], align 4
|
|
; ATTRIBUTOR_CHECK-NEXT: store volatile i32 [[VAL1]], i32 addrspace(1)* [[PTR]], align 4
|
|
; ATTRIBUTOR_CHECK-NEXT: ret void
|
|
;
|
|
%val0 = call i32 @llvm.r600.read.tgid.x()
|
|
%val1 = call i32 @llvm.r600.read.tgid.z()
|
|
store volatile i32 %val0, i32 addrspace(1)* %ptr
|
|
store volatile i32 %val1, i32 addrspace(1)* %ptr
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @use_tgid_y_z(i32 addrspace(1)* %ptr) #1 {
|
|
; AKF_CHECK-LABEL: define {{[^@]+}}@use_tgid_y_z
|
|
; AKF_CHECK-SAME: (i32 addrspace(1)* [[PTR:%.*]]) #[[ATTR1]] {
|
|
; AKF_CHECK-NEXT: [[VAL0:%.*]] = call i32 @llvm.r600.read.tgid.y()
|
|
; AKF_CHECK-NEXT: [[VAL1:%.*]] = call i32 @llvm.r600.read.tgid.z()
|
|
; AKF_CHECK-NEXT: store volatile i32 [[VAL0]], i32 addrspace(1)* [[PTR]], align 4
|
|
; AKF_CHECK-NEXT: store volatile i32 [[VAL1]], i32 addrspace(1)* [[PTR]], align 4
|
|
; AKF_CHECK-NEXT: ret void
|
|
;
|
|
; ATTRIBUTOR_CHECK-LABEL: define {{[^@]+}}@use_tgid_y_z
|
|
; ATTRIBUTOR_CHECK-SAME: (i32 addrspace(1)* [[PTR:%.*]]) #[[ATTR4:[0-9]+]] {
|
|
; ATTRIBUTOR_CHECK-NEXT: [[VAL0:%.*]] = call i32 @llvm.r600.read.tgid.y()
|
|
; ATTRIBUTOR_CHECK-NEXT: [[VAL1:%.*]] = call i32 @llvm.r600.read.tgid.z()
|
|
; ATTRIBUTOR_CHECK-NEXT: store volatile i32 [[VAL0]], i32 addrspace(1)* [[PTR]], align 4
|
|
; ATTRIBUTOR_CHECK-NEXT: store volatile i32 [[VAL1]], i32 addrspace(1)* [[PTR]], align 4
|
|
; ATTRIBUTOR_CHECK-NEXT: ret void
|
|
;
|
|
%val0 = call i32 @llvm.r600.read.tgid.y()
|
|
%val1 = call i32 @llvm.r600.read.tgid.z()
|
|
store volatile i32 %val0, i32 addrspace(1)* %ptr
|
|
store volatile i32 %val1, i32 addrspace(1)* %ptr
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @use_tgid_x_y_z(i32 addrspace(1)* %ptr) #1 {
|
|
; AKF_CHECK-LABEL: define {{[^@]+}}@use_tgid_x_y_z
|
|
; AKF_CHECK-SAME: (i32 addrspace(1)* [[PTR:%.*]]) #[[ATTR1]] {
|
|
; AKF_CHECK-NEXT: [[VAL0:%.*]] = call i32 @llvm.r600.read.tgid.x()
|
|
; AKF_CHECK-NEXT: [[VAL1:%.*]] = call i32 @llvm.r600.read.tgid.y()
|
|
; AKF_CHECK-NEXT: [[VAL2:%.*]] = call i32 @llvm.r600.read.tgid.z()
|
|
; AKF_CHECK-NEXT: store volatile i32 [[VAL0]], i32 addrspace(1)* [[PTR]], align 4
|
|
; AKF_CHECK-NEXT: store volatile i32 [[VAL1]], i32 addrspace(1)* [[PTR]], align 4
|
|
; AKF_CHECK-NEXT: store volatile i32 [[VAL2]], i32 addrspace(1)* [[PTR]], align 4
|
|
; AKF_CHECK-NEXT: ret void
|
|
;
|
|
; ATTRIBUTOR_CHECK-LABEL: define {{[^@]+}}@use_tgid_x_y_z
|
|
; ATTRIBUTOR_CHECK-SAME: (i32 addrspace(1)* [[PTR:%.*]]) #[[ATTR4]] {
|
|
; ATTRIBUTOR_CHECK-NEXT: [[VAL0:%.*]] = call i32 @llvm.r600.read.tgid.x()
|
|
; ATTRIBUTOR_CHECK-NEXT: [[VAL1:%.*]] = call i32 @llvm.r600.read.tgid.y()
|
|
; ATTRIBUTOR_CHECK-NEXT: [[VAL2:%.*]] = call i32 @llvm.r600.read.tgid.z()
|
|
; ATTRIBUTOR_CHECK-NEXT: store volatile i32 [[VAL0]], i32 addrspace(1)* [[PTR]], align 4
|
|
; ATTRIBUTOR_CHECK-NEXT: store volatile i32 [[VAL1]], i32 addrspace(1)* [[PTR]], align 4
|
|
; ATTRIBUTOR_CHECK-NEXT: store volatile i32 [[VAL2]], i32 addrspace(1)* [[PTR]], align 4
|
|
; ATTRIBUTOR_CHECK-NEXT: ret void
|
|
;
|
|
%val0 = call i32 @llvm.r600.read.tgid.x()
|
|
%val1 = call i32 @llvm.r600.read.tgid.y()
|
|
%val2 = call i32 @llvm.r600.read.tgid.z()
|
|
store volatile i32 %val0, i32 addrspace(1)* %ptr
|
|
store volatile i32 %val1, i32 addrspace(1)* %ptr
|
|
store volatile i32 %val2, i32 addrspace(1)* %ptr
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @use_tidig_x(i32 addrspace(1)* %ptr) #1 {
|
|
; CHECK-LABEL: define {{[^@]+}}@use_tidig_x
|
|
; CHECK-SAME: (i32 addrspace(1)* [[PTR:%.*]]) #[[ATTR1]] {
|
|
; CHECK-NEXT: [[VAL:%.*]] = call i32 @llvm.r600.read.tidig.x()
|
|
; CHECK-NEXT: store i32 [[VAL]], i32 addrspace(1)* [[PTR]], align 4
|
|
; CHECK-NEXT: ret void
|
|
;
|
|
%val = call i32 @llvm.r600.read.tidig.x()
|
|
store i32 %val, i32 addrspace(1)* %ptr
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @use_tidig_y(i32 addrspace(1)* %ptr) #1 {
|
|
; AKF_CHECK-LABEL: define {{[^@]+}}@use_tidig_y
|
|
; AKF_CHECK-SAME: (i32 addrspace(1)* [[PTR:%.*]]) #[[ATTR1]] {
|
|
; AKF_CHECK-NEXT: [[VAL:%.*]] = call i32 @llvm.r600.read.tidig.y()
|
|
; AKF_CHECK-NEXT: store i32 [[VAL]], i32 addrspace(1)* [[PTR]], align 4
|
|
; AKF_CHECK-NEXT: ret void
|
|
;
|
|
; ATTRIBUTOR_CHECK-LABEL: define {{[^@]+}}@use_tidig_y
|
|
; ATTRIBUTOR_CHECK-SAME: (i32 addrspace(1)* [[PTR:%.*]]) #[[ATTR5:[0-9]+]] {
|
|
; ATTRIBUTOR_CHECK-NEXT: [[VAL:%.*]] = call i32 @llvm.r600.read.tidig.y()
|
|
; ATTRIBUTOR_CHECK-NEXT: store i32 [[VAL]], i32 addrspace(1)* [[PTR]], align 4
|
|
; ATTRIBUTOR_CHECK-NEXT: ret void
|
|
;
|
|
%val = call i32 @llvm.r600.read.tidig.y()
|
|
store i32 %val, i32 addrspace(1)* %ptr
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @use_tidig_z(i32 addrspace(1)* %ptr) #1 {
|
|
; AKF_CHECK-LABEL: define {{[^@]+}}@use_tidig_z
|
|
; AKF_CHECK-SAME: (i32 addrspace(1)* [[PTR:%.*]]) #[[ATTR1]] {
|
|
; AKF_CHECK-NEXT: [[VAL:%.*]] = call i32 @llvm.r600.read.tidig.z()
|
|
; AKF_CHECK-NEXT: store i32 [[VAL]], i32 addrspace(1)* [[PTR]], align 4
|
|
; AKF_CHECK-NEXT: ret void
|
|
;
|
|
; ATTRIBUTOR_CHECK-LABEL: define {{[^@]+}}@use_tidig_z
|
|
; ATTRIBUTOR_CHECK-SAME: (i32 addrspace(1)* [[PTR:%.*]]) #[[ATTR6:[0-9]+]] {
|
|
; ATTRIBUTOR_CHECK-NEXT: [[VAL:%.*]] = call i32 @llvm.r600.read.tidig.z()
|
|
; ATTRIBUTOR_CHECK-NEXT: store i32 [[VAL]], i32 addrspace(1)* [[PTR]], align 4
|
|
; ATTRIBUTOR_CHECK-NEXT: ret void
|
|
;
|
|
%val = call i32 @llvm.r600.read.tidig.z()
|
|
store i32 %val, i32 addrspace(1)* %ptr
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @use_tidig_x_tgid_x(i32 addrspace(1)* %ptr) #1 {
|
|
; CHECK-LABEL: define {{[^@]+}}@use_tidig_x_tgid_x
|
|
; CHECK-SAME: (i32 addrspace(1)* [[PTR:%.*]]) #[[ATTR1]] {
|
|
; CHECK-NEXT: [[VAL0:%.*]] = call i32 @llvm.r600.read.tidig.x()
|
|
; CHECK-NEXT: [[VAL1:%.*]] = call i32 @llvm.r600.read.tgid.x()
|
|
; CHECK-NEXT: store volatile i32 [[VAL0]], i32 addrspace(1)* [[PTR]], align 4
|
|
; CHECK-NEXT: store volatile i32 [[VAL1]], i32 addrspace(1)* [[PTR]], align 4
|
|
; CHECK-NEXT: ret void
|
|
;
|
|
%val0 = call i32 @llvm.r600.read.tidig.x()
|
|
%val1 = call i32 @llvm.r600.read.tgid.x()
|
|
store volatile i32 %val0, i32 addrspace(1)* %ptr
|
|
store volatile i32 %val1, i32 addrspace(1)* %ptr
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @use_tidig_y_tgid_y(i32 addrspace(1)* %ptr) #1 {
|
|
; AKF_CHECK-LABEL: define {{[^@]+}}@use_tidig_y_tgid_y
|
|
; AKF_CHECK-SAME: (i32 addrspace(1)* [[PTR:%.*]]) #[[ATTR1]] {
|
|
; AKF_CHECK-NEXT: [[VAL0:%.*]] = call i32 @llvm.r600.read.tidig.y()
|
|
; AKF_CHECK-NEXT: [[VAL1:%.*]] = call i32 @llvm.r600.read.tgid.y()
|
|
; AKF_CHECK-NEXT: store volatile i32 [[VAL0]], i32 addrspace(1)* [[PTR]], align 4
|
|
; AKF_CHECK-NEXT: store volatile i32 [[VAL1]], i32 addrspace(1)* [[PTR]], align 4
|
|
; AKF_CHECK-NEXT: ret void
|
|
;
|
|
; ATTRIBUTOR_CHECK-LABEL: define {{[^@]+}}@use_tidig_y_tgid_y
|
|
; ATTRIBUTOR_CHECK-SAME: (i32 addrspace(1)* [[PTR:%.*]]) #[[ATTR7:[0-9]+]] {
|
|
; ATTRIBUTOR_CHECK-NEXT: [[VAL0:%.*]] = call i32 @llvm.r600.read.tidig.y()
|
|
; ATTRIBUTOR_CHECK-NEXT: [[VAL1:%.*]] = call i32 @llvm.r600.read.tgid.y()
|
|
; ATTRIBUTOR_CHECK-NEXT: store volatile i32 [[VAL0]], i32 addrspace(1)* [[PTR]], align 4
|
|
; ATTRIBUTOR_CHECK-NEXT: store volatile i32 [[VAL1]], i32 addrspace(1)* [[PTR]], align 4
|
|
; ATTRIBUTOR_CHECK-NEXT: ret void
|
|
;
|
|
%val0 = call i32 @llvm.r600.read.tidig.y()
|
|
%val1 = call i32 @llvm.r600.read.tgid.y()
|
|
store volatile i32 %val0, i32 addrspace(1)* %ptr
|
|
store volatile i32 %val1, i32 addrspace(1)* %ptr
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @use_tidig_x_y_z(i32 addrspace(1)* %ptr) #1 {
|
|
; AKF_CHECK-LABEL: define {{[^@]+}}@use_tidig_x_y_z
|
|
; AKF_CHECK-SAME: (i32 addrspace(1)* [[PTR:%.*]]) #[[ATTR1]] {
|
|
; AKF_CHECK-NEXT: [[VAL0:%.*]] = call i32 @llvm.r600.read.tidig.x()
|
|
; AKF_CHECK-NEXT: [[VAL1:%.*]] = call i32 @llvm.r600.read.tidig.y()
|
|
; AKF_CHECK-NEXT: [[VAL2:%.*]] = call i32 @llvm.r600.read.tidig.z()
|
|
; AKF_CHECK-NEXT: store volatile i32 [[VAL0]], i32 addrspace(1)* [[PTR]], align 4
|
|
; AKF_CHECK-NEXT: store volatile i32 [[VAL1]], i32 addrspace(1)* [[PTR]], align 4
|
|
; AKF_CHECK-NEXT: store volatile i32 [[VAL2]], i32 addrspace(1)* [[PTR]], align 4
|
|
; AKF_CHECK-NEXT: ret void
|
|
;
|
|
; ATTRIBUTOR_CHECK-LABEL: define {{[^@]+}}@use_tidig_x_y_z
|
|
; ATTRIBUTOR_CHECK-SAME: (i32 addrspace(1)* [[PTR:%.*]]) #[[ATTR8:[0-9]+]] {
|
|
; ATTRIBUTOR_CHECK-NEXT: [[VAL0:%.*]] = call i32 @llvm.r600.read.tidig.x()
|
|
; ATTRIBUTOR_CHECK-NEXT: [[VAL1:%.*]] = call i32 @llvm.r600.read.tidig.y()
|
|
; ATTRIBUTOR_CHECK-NEXT: [[VAL2:%.*]] = call i32 @llvm.r600.read.tidig.z()
|
|
; ATTRIBUTOR_CHECK-NEXT: store volatile i32 [[VAL0]], i32 addrspace(1)* [[PTR]], align 4
|
|
; ATTRIBUTOR_CHECK-NEXT: store volatile i32 [[VAL1]], i32 addrspace(1)* [[PTR]], align 4
|
|
; ATTRIBUTOR_CHECK-NEXT: store volatile i32 [[VAL2]], i32 addrspace(1)* [[PTR]], align 4
|
|
; ATTRIBUTOR_CHECK-NEXT: ret void
|
|
;
|
|
%val0 = call i32 @llvm.r600.read.tidig.x()
|
|
%val1 = call i32 @llvm.r600.read.tidig.y()
|
|
%val2 = call i32 @llvm.r600.read.tidig.z()
|
|
store volatile i32 %val0, i32 addrspace(1)* %ptr
|
|
store volatile i32 %val1, i32 addrspace(1)* %ptr
|
|
store volatile i32 %val2, i32 addrspace(1)* %ptr
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @use_all_workitems(i32 addrspace(1)* %ptr) #1 {
|
|
; AKF_CHECK-LABEL: define {{[^@]+}}@use_all_workitems
|
|
; AKF_CHECK-SAME: (i32 addrspace(1)* [[PTR:%.*]]) #[[ATTR1]] {
|
|
; AKF_CHECK-NEXT: [[VAL0:%.*]] = call i32 @llvm.r600.read.tidig.x()
|
|
; AKF_CHECK-NEXT: [[VAL1:%.*]] = call i32 @llvm.r600.read.tidig.y()
|
|
; AKF_CHECK-NEXT: [[VAL2:%.*]] = call i32 @llvm.r600.read.tidig.z()
|
|
; AKF_CHECK-NEXT: [[VAL3:%.*]] = call i32 @llvm.r600.read.tgid.x()
|
|
; AKF_CHECK-NEXT: [[VAL4:%.*]] = call i32 @llvm.r600.read.tgid.y()
|
|
; AKF_CHECK-NEXT: [[VAL5:%.*]] = call i32 @llvm.r600.read.tgid.z()
|
|
; AKF_CHECK-NEXT: store volatile i32 [[VAL0]], i32 addrspace(1)* [[PTR]], align 4
|
|
; AKF_CHECK-NEXT: store volatile i32 [[VAL1]], i32 addrspace(1)* [[PTR]], align 4
|
|
; AKF_CHECK-NEXT: store volatile i32 [[VAL2]], i32 addrspace(1)* [[PTR]], align 4
|
|
; AKF_CHECK-NEXT: store volatile i32 [[VAL3]], i32 addrspace(1)* [[PTR]], align 4
|
|
; AKF_CHECK-NEXT: store volatile i32 [[VAL4]], i32 addrspace(1)* [[PTR]], align 4
|
|
; AKF_CHECK-NEXT: store volatile i32 [[VAL5]], i32 addrspace(1)* [[PTR]], align 4
|
|
; AKF_CHECK-NEXT: ret void
|
|
;
|
|
; ATTRIBUTOR_CHECK-LABEL: define {{[^@]+}}@use_all_workitems
|
|
; ATTRIBUTOR_CHECK-SAME: (i32 addrspace(1)* [[PTR:%.*]]) #[[ATTR9:[0-9]+]] {
|
|
; ATTRIBUTOR_CHECK-NEXT: [[VAL0:%.*]] = call i32 @llvm.r600.read.tidig.x()
|
|
; ATTRIBUTOR_CHECK-NEXT: [[VAL1:%.*]] = call i32 @llvm.r600.read.tidig.y()
|
|
; ATTRIBUTOR_CHECK-NEXT: [[VAL2:%.*]] = call i32 @llvm.r600.read.tidig.z()
|
|
; ATTRIBUTOR_CHECK-NEXT: [[VAL3:%.*]] = call i32 @llvm.r600.read.tgid.x()
|
|
; ATTRIBUTOR_CHECK-NEXT: [[VAL4:%.*]] = call i32 @llvm.r600.read.tgid.y()
|
|
; ATTRIBUTOR_CHECK-NEXT: [[VAL5:%.*]] = call i32 @llvm.r600.read.tgid.z()
|
|
; ATTRIBUTOR_CHECK-NEXT: store volatile i32 [[VAL0]], i32 addrspace(1)* [[PTR]], align 4
|
|
; ATTRIBUTOR_CHECK-NEXT: store volatile i32 [[VAL1]], i32 addrspace(1)* [[PTR]], align 4
|
|
; ATTRIBUTOR_CHECK-NEXT: store volatile i32 [[VAL2]], i32 addrspace(1)* [[PTR]], align 4
|
|
; ATTRIBUTOR_CHECK-NEXT: store volatile i32 [[VAL3]], i32 addrspace(1)* [[PTR]], align 4
|
|
; ATTRIBUTOR_CHECK-NEXT: store volatile i32 [[VAL4]], i32 addrspace(1)* [[PTR]], align 4
|
|
; ATTRIBUTOR_CHECK-NEXT: store volatile i32 [[VAL5]], i32 addrspace(1)* [[PTR]], align 4
|
|
; ATTRIBUTOR_CHECK-NEXT: ret void
|
|
;
|
|
%val0 = call i32 @llvm.r600.read.tidig.x()
|
|
%val1 = call i32 @llvm.r600.read.tidig.y()
|
|
%val2 = call i32 @llvm.r600.read.tidig.z()
|
|
%val3 = call i32 @llvm.r600.read.tgid.x()
|
|
%val4 = call i32 @llvm.r600.read.tgid.y()
|
|
%val5 = call i32 @llvm.r600.read.tgid.z()
|
|
store volatile i32 %val0, i32 addrspace(1)* %ptr
|
|
store volatile i32 %val1, i32 addrspace(1)* %ptr
|
|
store volatile i32 %val2, i32 addrspace(1)* %ptr
|
|
store volatile i32 %val3, i32 addrspace(1)* %ptr
|
|
store volatile i32 %val4, i32 addrspace(1)* %ptr
|
|
store volatile i32 %val5, i32 addrspace(1)* %ptr
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @use_get_local_size_x(i32 addrspace(1)* %ptr) #1 {
|
|
; CHECK-LABEL: define {{[^@]+}}@use_get_local_size_x
|
|
; CHECK-SAME: (i32 addrspace(1)* [[PTR:%.*]]) #[[ATTR1]] {
|
|
; CHECK-NEXT: [[VAL:%.*]] = call i32 @llvm.r600.read.local.size.x()
|
|
; CHECK-NEXT: store i32 [[VAL]], i32 addrspace(1)* [[PTR]], align 4
|
|
; CHECK-NEXT: ret void
|
|
;
|
|
%val = call i32 @llvm.r600.read.local.size.x()
|
|
store i32 %val, i32 addrspace(1)* %ptr
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @use_get_local_size_y(i32 addrspace(1)* %ptr) #1 {
|
|
; CHECK-LABEL: define {{[^@]+}}@use_get_local_size_y
|
|
; CHECK-SAME: (i32 addrspace(1)* [[PTR:%.*]]) #[[ATTR1]] {
|
|
; CHECK-NEXT: [[VAL:%.*]] = call i32 @llvm.r600.read.local.size.y()
|
|
; CHECK-NEXT: store i32 [[VAL]], i32 addrspace(1)* [[PTR]], align 4
|
|
; CHECK-NEXT: ret void
|
|
;
|
|
%val = call i32 @llvm.r600.read.local.size.y()
|
|
store i32 %val, i32 addrspace(1)* %ptr
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @use_get_local_size_z(i32 addrspace(1)* %ptr) #1 {
|
|
; CHECK-LABEL: define {{[^@]+}}@use_get_local_size_z
|
|
; CHECK-SAME: (i32 addrspace(1)* [[PTR:%.*]]) #[[ATTR1]] {
|
|
; CHECK-NEXT: [[VAL:%.*]] = call i32 @llvm.r600.read.local.size.z()
|
|
; CHECK-NEXT: store i32 [[VAL]], i32 addrspace(1)* [[PTR]], align 4
|
|
; CHECK-NEXT: ret void
|
|
;
|
|
%val = call i32 @llvm.r600.read.local.size.z()
|
|
store i32 %val, i32 addrspace(1)* %ptr
|
|
ret void
|
|
}
|
|
|
|
attributes #0 = { nounwind readnone }
|
|
attributes #1 = { nounwind }
|
|
|
|
; ALL: attributes #[[ATTR0:[0-9]+]] = { nounwind readnone speculatable willreturn "uniform-work-group-size"="false" }
|
|
; ALL: attributes #[[ATTR1]] = { nounwind "uniform-work-group-size"="false" }
|
|
; ALL: attributes #[[ATTR2]] = { nounwind "amdgpu-work-group-id-y" "uniform-work-group-size"="false" }
|
|
; ALL: attributes #[[ATTR3]] = { nounwind "amdgpu-work-group-id-z" "uniform-work-group-size"="false" }
|
|
; ALL: attributes #[[ATTR4]] = { nounwind "amdgpu-work-group-id-y" "amdgpu-work-group-id-z" "uniform-work-group-size"="false" }
|
|
; ALL: attributes #[[ATTR5]] = { nounwind "amdgpu-work-item-id-y" "uniform-work-group-size"="false" }
|
|
; ALL: attributes #[[ATTR6]] = { nounwind "amdgpu-work-item-id-z" "uniform-work-group-size"="false" }
|
|
; ALL: attributes #[[ATTR7]] = { nounwind "amdgpu-work-group-id-y" "amdgpu-work-item-id-y" "uniform-work-group-size"="false" }
|
|
; ALL: attributes #[[ATTR8]] = { nounwind "amdgpu-work-item-id-y" "amdgpu-work-item-id-z" "uniform-work-group-size"="false" }
|
|
; ALL: attributes #[[ATTR9]] = { nounwind "amdgpu-work-group-id-y" "amdgpu-work-group-id-z" "amdgpu-work-item-id-y" "amdgpu-work-item-id-z" "uniform-work-group-size"="false" }
|
|
; NOHSA: attributes #[[ATTR0:[0-9]+]] = { nounwind readnone speculatable willreturn "uniform-work-group-size"="false" }
|
|
; NOHSA: attributes #[[ATTR1]] = { nounwind "uniform-work-group-size"="false" }
|
|
; NOHSA: attributes #[[ATTR2]] = { nounwind "amdgpu-work-group-id-y" "uniform-work-group-size"="false" }
|
|
; NOHSA: attributes #[[ATTR3]] = { nounwind "amdgpu-work-group-id-z" "uniform-work-group-size"="false" }
|
|
; NOHSA: attributes #[[ATTR4]] = { nounwind "amdgpu-work-group-id-y" "amdgpu-work-group-id-z" "uniform-work-group-size"="false" }
|
|
; NOHSA: attributes #[[ATTR5]] = { nounwind "amdgpu-work-item-id-y" "uniform-work-group-size"="false" }
|
|
; NOHSA: attributes #[[ATTR6]] = { nounwind "amdgpu-work-item-id-z" "uniform-work-group-size"="false" }
|
|
; NOHSA: attributes #[[ATTR7]] = { nounwind "amdgpu-work-group-id-y" "amdgpu-work-item-id-y" "uniform-work-group-size"="false" }
|
|
; NOHSA: attributes #[[ATTR8]] = { nounwind "amdgpu-work-item-id-y" "amdgpu-work-item-id-z" "uniform-work-group-size"="false" }
|
|
; NOHSA: attributes #[[ATTR9]] = { nounwind "amdgpu-work-group-id-y" "amdgpu-work-group-id-z" "amdgpu-work-item-id-y" "amdgpu-work-item-id-z" "uniform-work-group-size"="false" }
|
|
;.
|
|
; AKF_CHECK: attributes #[[ATTR0:[0-9]+]] = { nounwind speculatable willreturn memory(none) }
|
|
; AKF_CHECK: attributes #[[ATTR1]] = { nounwind }
|
|
;.
|
|
; ATTRIBUTOR_CHECK: attributes #[[ATTR0:[0-9]+]] = { nounwind speculatable willreturn memory(none) }
|
|
; ATTRIBUTOR_CHECK: attributes #[[ATTR1]] = { nounwind "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="false" }
|
|
; ATTRIBUTOR_CHECK: attributes #[[ATTR2]] = { nounwind "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="false" }
|
|
; ATTRIBUTOR_CHECK: attributes #[[ATTR3]] = { nounwind "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="false" }
|
|
; ATTRIBUTOR_CHECK: attributes #[[ATTR4]] = { nounwind "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="false" }
|
|
; ATTRIBUTOR_CHECK: attributes #[[ATTR5]] = { nounwind "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="false" }
|
|
; ATTRIBUTOR_CHECK: attributes #[[ATTR6]] = { nounwind "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "uniform-work-group-size"="false" }
|
|
; ATTRIBUTOR_CHECK: attributes #[[ATTR7]] = { nounwind "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="false" }
|
|
; ATTRIBUTOR_CHECK: attributes #[[ATTR8]] = { nounwind "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "uniform-work-group-size"="false" }
|
|
; ATTRIBUTOR_CHECK: attributes #[[ATTR9]] = { nounwind "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workitem-id-x" "uniform-work-group-size"="false" }
|
|
;.
|