Files
clang-p2996/llvm/test/CodeGen/AMDGPU/global_atomics.ll
Christudasan Devadasan 229e118559 [AMDGPU] Codegen support for constrained multi-dword sloads (#96163)
For targets that support xnack replay feature (gfx8+), the
multi-dword scalar loads shouldn't clobber any register that
holds the src address. The constrained version of the scalar
loads have the early clobber flag attached to the dst operand
to restrict RA from re-allocating any of the src regs for its
dst operand.
2024-07-23 13:59:15 +05:30

7245 lines
261 KiB
LLVM

; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=amdgcn -amdgpu-atomic-optimizer-strategy=None -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefixes=SI %s
; RUN: llc -mtriple=amdgcn -mcpu=tonga -amdgpu-atomic-optimizer-strategy=None -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefixes=VI %s
; RUN: llc -mtriple=amdgcn -mcpu=gfx900 -amdgpu-atomic-optimizer-strategy=None -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefixes=GFX9 %s
define amdgpu_kernel void @atomic_add_i32_offset(ptr addrspace(1) %out, i32 %in) {
; SI-LABEL: atomic_add_i32_offset:
; SI: ; %bb.0: ; %entry
; SI-NEXT: s_load_dword s4, s[2:3], 0xb
; SI-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x9
; SI-NEXT: s_mov_b32 s3, 0xf000
; SI-NEXT: s_mov_b32 s2, -1
; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: v_mov_b32_e32 v0, s4
; SI-NEXT: buffer_atomic_add v0, off, s[0:3], 0 offset:16
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: buffer_wbinvl1
; SI-NEXT: s_endpgm
;
; VI-LABEL: atomic_add_i32_offset:
; VI: ; %bb.0: ; %entry
; VI-NEXT: s_load_dword s4, s[2:3], 0x2c
; VI-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x24
; VI-NEXT: s_mov_b32 s3, 0xf000
; VI-NEXT: s_mov_b32 s2, -1
; VI-NEXT: s_waitcnt lgkmcnt(0)
; VI-NEXT: v_mov_b32_e32 v0, s4
; VI-NEXT: buffer_atomic_add v0, off, s[0:3], 0 offset:16
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: buffer_wbinvl1_vol
; VI-NEXT: s_endpgm
;
; GFX9-LABEL: atomic_add_i32_offset:
; GFX9: ; %bb.0: ; %entry
; GFX9-NEXT: s_load_dword s4, s[2:3], 0x2c
; GFX9-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x24
; GFX9-NEXT: v_mov_b32_e32 v0, 0
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NEXT: v_mov_b32_e32 v1, s4
; GFX9-NEXT: global_atomic_add v0, v1, s[0:1] offset:16
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: buffer_wbinvl1_vol
; GFX9-NEXT: s_endpgm
entry:
%gep = getelementptr i32, ptr addrspace(1) %out, i64 4
%val = atomicrmw volatile add ptr addrspace(1) %gep, i32 %in syncscope("agent") seq_cst
ret void
}
define amdgpu_kernel void @atomic_add_i32_max_neg_offset(ptr addrspace(1) %out, i32 %in) {
; SI-LABEL: atomic_add_i32_max_neg_offset:
; SI: ; %bb.0: ; %entry
; SI-NEXT: s_load_dword s4, s[2:3], 0xb
; SI-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x9
; SI-NEXT: s_mov_b32 s3, 0xf000
; SI-NEXT: s_mov_b32 s2, 0
; SI-NEXT: v_mov_b32_e32 v0, 0xfffff000
; SI-NEXT: v_mov_b32_e32 v1, -1
; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: v_mov_b32_e32 v2, s4
; SI-NEXT: buffer_atomic_add v2, v[0:1], s[0:3], 0 addr64
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: buffer_wbinvl1
; SI-NEXT: s_endpgm
;
; VI-LABEL: atomic_add_i32_max_neg_offset:
; VI: ; %bb.0: ; %entry
; VI-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x24
; VI-NEXT: s_load_dword s2, s[2:3], 0x2c
; VI-NEXT: s_waitcnt lgkmcnt(0)
; VI-NEXT: s_add_u32 s0, s0, 0xfffff000
; VI-NEXT: s_addc_u32 s1, s1, -1
; VI-NEXT: v_mov_b32_e32 v0, s0
; VI-NEXT: v_mov_b32_e32 v1, s1
; VI-NEXT: v_mov_b32_e32 v2, s2
; VI-NEXT: flat_atomic_add v[0:1], v2
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: buffer_wbinvl1_vol
; VI-NEXT: s_endpgm
;
; GFX9-LABEL: atomic_add_i32_max_neg_offset:
; GFX9: ; %bb.0: ; %entry
; GFX9-NEXT: s_load_dword s4, s[2:3], 0x2c
; GFX9-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x24
; GFX9-NEXT: v_mov_b32_e32 v0, 0
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NEXT: v_mov_b32_e32 v1, s4
; GFX9-NEXT: global_atomic_add v0, v1, s[0:1] offset:-4096
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: buffer_wbinvl1_vol
; GFX9-NEXT: s_endpgm
entry:
%gep = getelementptr i32, ptr addrspace(1) %out, i64 -1024
%val = atomicrmw volatile add ptr addrspace(1) %gep, i32 %in syncscope("agent") seq_cst
ret void
}
define amdgpu_kernel void @atomic_add_i32_soffset(ptr addrspace(1) %out, i32 %in) {
; SI-LABEL: atomic_add_i32_soffset:
; SI: ; %bb.0: ; %entry
; SI-NEXT: s_load_dword s4, s[2:3], 0xb
; SI-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x9
; SI-NEXT: s_mov_b32 s3, 0xf000
; SI-NEXT: s_mov_b32 s2, -1
; SI-NEXT: s_mov_b32 s5, 0x8ca0
; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: v_mov_b32_e32 v0, s4
; SI-NEXT: buffer_atomic_add v0, off, s[0:3], s5
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: buffer_wbinvl1
; SI-NEXT: s_endpgm
;
; VI-LABEL: atomic_add_i32_soffset:
; VI: ; %bb.0: ; %entry
; VI-NEXT: s_load_dword s4, s[2:3], 0x2c
; VI-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x24
; VI-NEXT: s_mov_b32 s3, 0xf000
; VI-NEXT: s_mov_b32 s2, -1
; VI-NEXT: s_mov_b32 s5, 0x8ca0
; VI-NEXT: s_waitcnt lgkmcnt(0)
; VI-NEXT: v_mov_b32_e32 v0, s4
; VI-NEXT: buffer_atomic_add v0, off, s[0:3], s5
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: buffer_wbinvl1_vol
; VI-NEXT: s_endpgm
;
; GFX9-LABEL: atomic_add_i32_soffset:
; GFX9: ; %bb.0: ; %entry
; GFX9-NEXT: s_load_dword s4, s[2:3], 0x2c
; GFX9-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x24
; GFX9-NEXT: v_mov_b32_e32 v0, 0x8000
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NEXT: v_mov_b32_e32 v1, s4
; GFX9-NEXT: global_atomic_add v0, v1, s[0:1] offset:3232
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: buffer_wbinvl1_vol
; GFX9-NEXT: s_endpgm
entry:
%gep = getelementptr i32, ptr addrspace(1) %out, i64 9000
%val = atomicrmw volatile add ptr addrspace(1) %gep, i32 %in syncscope("agent") seq_cst
ret void
}
define amdgpu_kernel void @atomic_add_i32_huge_offset(ptr addrspace(1) %out, i32 %in) {
; SI-LABEL: atomic_add_i32_huge_offset:
; SI: ; %bb.0: ; %entry
; SI-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x9
; SI-NEXT: s_load_dword s4, s[2:3], 0xb
; SI-NEXT: s_mov_b32 s3, 0xf000
; SI-NEXT: s_mov_b32 s2, 0
; SI-NEXT: v_mov_b32_e32 v0, 0xdeac
; SI-NEXT: v_mov_b32_e32 v1, 0xabcd
; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: v_mov_b32_e32 v2, s4
; SI-NEXT: buffer_atomic_add v2, v[0:1], s[0:3], 0 addr64
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: buffer_wbinvl1
; SI-NEXT: s_endpgm
;
; VI-LABEL: atomic_add_i32_huge_offset:
; VI: ; %bb.0: ; %entry
; VI-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x24
; VI-NEXT: s_load_dword s2, s[2:3], 0x2c
; VI-NEXT: s_waitcnt lgkmcnt(0)
; VI-NEXT: s_add_u32 s0, s0, 0xdeac
; VI-NEXT: s_addc_u32 s1, s1, 0xabcd
; VI-NEXT: v_mov_b32_e32 v0, s0
; VI-NEXT: v_mov_b32_e32 v1, s1
; VI-NEXT: v_mov_b32_e32 v2, s2
; VI-NEXT: flat_atomic_add v[0:1], v2
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: buffer_wbinvl1_vol
; VI-NEXT: s_endpgm
;
; GFX9-LABEL: atomic_add_i32_huge_offset:
; GFX9: ; %bb.0: ; %entry
; GFX9-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x24
; GFX9-NEXT: s_load_dword s4, s[2:3], 0x2c
; GFX9-NEXT: v_mov_b32_e32 v0, 0
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NEXT: s_add_u32 s0, s0, 0xdeac
; GFX9-NEXT: s_addc_u32 s1, s1, 0xabcd
; GFX9-NEXT: v_mov_b32_e32 v1, s4
; GFX9-NEXT: global_atomic_add v0, v1, s[0:1]
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: buffer_wbinvl1_vol
; GFX9-NEXT: s_endpgm
entry:
%gep = getelementptr i32, ptr addrspace(1) %out, i64 47224239175595
%val = atomicrmw volatile add ptr addrspace(1) %gep, i32 %in syncscope("agent") seq_cst
ret void
}
define amdgpu_kernel void @atomic_add_i32_ret_offset(ptr addrspace(1) %out, ptr addrspace(1) %out2, i32 %in) {
; SI-LABEL: atomic_add_i32_ret_offset:
; SI: ; %bb.0: ; %entry
; SI-NEXT: s_load_dwordx4 s[4:7], s[2:3], 0x9
; SI-NEXT: s_load_dword s8, s[2:3], 0xd
; SI-NEXT: s_mov_b32 s3, 0xf000
; SI-NEXT: s_mov_b32 s2, -1
; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: s_mov_b32 s0, s6
; SI-NEXT: s_mov_b32 s1, s7
; SI-NEXT: s_mov_b32 s6, s2
; SI-NEXT: s_mov_b32 s7, s3
; SI-NEXT: v_mov_b32_e32 v0, s8
; SI-NEXT: buffer_atomic_add v0, off, s[4:7], 0 offset:16 glc
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: buffer_wbinvl1
; SI-NEXT: buffer_store_dword v0, off, s[0:3], 0
; SI-NEXT: s_endpgm
;
; VI-LABEL: atomic_add_i32_ret_offset:
; VI: ; %bb.0: ; %entry
; VI-NEXT: s_load_dwordx4 s[4:7], s[2:3], 0x24
; VI-NEXT: s_load_dword s0, s[2:3], 0x34
; VI-NEXT: s_mov_b32 s11, 0xf000
; VI-NEXT: s_mov_b32 s10, -1
; VI-NEXT: s_waitcnt lgkmcnt(0)
; VI-NEXT: s_mov_b32 s8, s6
; VI-NEXT: s_mov_b32 s9, s7
; VI-NEXT: s_mov_b32 s6, s10
; VI-NEXT: s_mov_b32 s7, s11
; VI-NEXT: v_mov_b32_e32 v0, s0
; VI-NEXT: buffer_atomic_add v0, off, s[4:7], 0 offset:16 glc
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: buffer_wbinvl1_vol
; VI-NEXT: buffer_store_dword v0, off, s[8:11], 0
; VI-NEXT: s_endpgm
;
; GFX9-LABEL: atomic_add_i32_ret_offset:
; GFX9: ; %bb.0: ; %entry
; GFX9-NEXT: s_load_dword s0, s[2:3], 0x34
; GFX9-NEXT: s_load_dwordx4 s[4:7], s[2:3], 0x24
; GFX9-NEXT: v_mov_b32_e32 v0, 0
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NEXT: v_mov_b32_e32 v1, s0
; GFX9-NEXT: global_atomic_add v1, v0, v1, s[4:5] offset:16 glc
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: buffer_wbinvl1_vol
; GFX9-NEXT: global_store_dword v0, v1, s[6:7]
; GFX9-NEXT: s_endpgm
entry:
%gep = getelementptr i32, ptr addrspace(1) %out, i64 4
%val = atomicrmw volatile add ptr addrspace(1) %gep, i32 %in syncscope("agent") seq_cst
store i32 %val, ptr addrspace(1) %out2
ret void
}
define amdgpu_kernel void @atomic_add_i32_addr64_offset(ptr addrspace(1) %out, i32 %in, i64 %index) {
; SI-LABEL: atomic_add_i32_addr64_offset:
; SI: ; %bb.0: ; %entry
; SI-NEXT: s_load_dwordx2 s[4:5], s[2:3], 0xd
; SI-NEXT: s_load_dword s6, s[2:3], 0xb
; SI-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x9
; SI-NEXT: s_mov_b32 s3, 0xf000
; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: s_lshl_b64 s[4:5], s[4:5], 2
; SI-NEXT: s_mov_b32 s2, 0
; SI-NEXT: v_mov_b32_e32 v2, s6
; SI-NEXT: v_mov_b32_e32 v0, s4
; SI-NEXT: v_mov_b32_e32 v1, s5
; SI-NEXT: buffer_atomic_add v2, v[0:1], s[0:3], 0 addr64 offset:16
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: buffer_wbinvl1
; SI-NEXT: s_endpgm
;
; VI-LABEL: atomic_add_i32_addr64_offset:
; VI: ; %bb.0: ; %entry
; VI-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x34
; VI-NEXT: s_load_dwordx2 s[4:5], s[2:3], 0x24
; VI-NEXT: s_load_dword s2, s[2:3], 0x2c
; VI-NEXT: s_waitcnt lgkmcnt(0)
; VI-NEXT: s_lshl_b64 s[0:1], s[0:1], 2
; VI-NEXT: s_add_u32 s0, s4, s0
; VI-NEXT: s_addc_u32 s1, s5, s1
; VI-NEXT: s_add_u32 s0, s0, 16
; VI-NEXT: s_addc_u32 s1, s1, 0
; VI-NEXT: v_mov_b32_e32 v0, s0
; VI-NEXT: v_mov_b32_e32 v1, s1
; VI-NEXT: v_mov_b32_e32 v2, s2
; VI-NEXT: flat_atomic_add v[0:1], v2
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: buffer_wbinvl1_vol
; VI-NEXT: s_endpgm
;
; GFX9-LABEL: atomic_add_i32_addr64_offset:
; GFX9: ; %bb.0: ; %entry
; GFX9-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x34
; GFX9-NEXT: s_load_dwordx2 s[4:5], s[2:3], 0x24
; GFX9-NEXT: s_load_dword s6, s[2:3], 0x2c
; GFX9-NEXT: v_mov_b32_e32 v0, 0
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NEXT: s_lshl_b64 s[0:1], s[0:1], 2
; GFX9-NEXT: s_add_u32 s0, s4, s0
; GFX9-NEXT: s_addc_u32 s1, s5, s1
; GFX9-NEXT: v_mov_b32_e32 v1, s6
; GFX9-NEXT: global_atomic_add v0, v1, s[0:1] offset:16
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: buffer_wbinvl1_vol
; GFX9-NEXT: s_endpgm
entry:
%ptr = getelementptr i32, ptr addrspace(1) %out, i64 %index
%gep = getelementptr i32, ptr addrspace(1) %ptr, i64 4
%val = atomicrmw volatile add ptr addrspace(1) %gep, i32 %in syncscope("agent") seq_cst
ret void
}
define amdgpu_kernel void @atomic_add_i32_ret_addr64_offset(ptr addrspace(1) %out, ptr addrspace(1) %out2, i32 %in, i64 %index) {
; SI-LABEL: atomic_add_i32_ret_addr64_offset:
; SI: ; %bb.0: ; %entry
; SI-NEXT: s_load_dwordx4 s[4:7], s[2:3], 0x9
; SI-NEXT: s_load_dwordx2 s[8:9], s[2:3], 0xf
; SI-NEXT: s_load_dword s2, s[2:3], 0xd
; SI-NEXT: s_mov_b32 s3, 0xf000
; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: s_mov_b32 s0, s6
; SI-NEXT: s_mov_b32 s1, s7
; SI-NEXT: s_lshl_b64 s[8:9], s[8:9], 2
; SI-NEXT: s_mov_b32 s6, 0
; SI-NEXT: s_mov_b32 s7, s3
; SI-NEXT: v_mov_b32_e32 v2, s2
; SI-NEXT: v_mov_b32_e32 v0, s8
; SI-NEXT: v_mov_b32_e32 v1, s9
; SI-NEXT: buffer_atomic_add v2, v[0:1], s[4:7], 0 addr64 offset:16 glc
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: buffer_wbinvl1
; SI-NEXT: s_mov_b32 s2, -1
; SI-NEXT: buffer_store_dword v2, off, s[0:3], 0
; SI-NEXT: s_endpgm
;
; VI-LABEL: atomic_add_i32_ret_addr64_offset:
; VI: ; %bb.0: ; %entry
; VI-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x3c
; VI-NEXT: s_load_dwordx4 s[4:7], s[2:3], 0x24
; VI-NEXT: s_load_dword s2, s[2:3], 0x34
; VI-NEXT: s_mov_b32 s3, 0xf000
; VI-NEXT: s_waitcnt lgkmcnt(0)
; VI-NEXT: s_lshl_b64 s[0:1], s[0:1], 2
; VI-NEXT: s_add_u32 s0, s4, s0
; VI-NEXT: s_addc_u32 s1, s5, s1
; VI-NEXT: s_add_u32 s0, s0, 16
; VI-NEXT: s_addc_u32 s1, s1, 0
; VI-NEXT: v_mov_b32_e32 v0, s0
; VI-NEXT: v_mov_b32_e32 v1, s1
; VI-NEXT: v_mov_b32_e32 v2, s2
; VI-NEXT: flat_atomic_add v0, v[0:1], v2 glc
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: buffer_wbinvl1_vol
; VI-NEXT: s_mov_b32 s2, -1
; VI-NEXT: s_mov_b32 s0, s6
; VI-NEXT: s_mov_b32 s1, s7
; VI-NEXT: buffer_store_dword v0, off, s[0:3], 0
; VI-NEXT: s_endpgm
;
; GFX9-LABEL: atomic_add_i32_ret_addr64_offset:
; GFX9: ; %bb.0: ; %entry
; GFX9-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x3c
; GFX9-NEXT: s_load_dwordx4 s[4:7], s[2:3], 0x24
; GFX9-NEXT: s_load_dword s8, s[2:3], 0x34
; GFX9-NEXT: v_mov_b32_e32 v0, 0
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NEXT: s_lshl_b64 s[0:1], s[0:1], 2
; GFX9-NEXT: s_add_u32 s0, s4, s0
; GFX9-NEXT: s_addc_u32 s1, s5, s1
; GFX9-NEXT: v_mov_b32_e32 v1, s8
; GFX9-NEXT: global_atomic_add v1, v0, v1, s[0:1] offset:16 glc
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: buffer_wbinvl1_vol
; GFX9-NEXT: global_store_dword v0, v1, s[6:7]
; GFX9-NEXT: s_endpgm
entry:
%ptr = getelementptr i32, ptr addrspace(1) %out, i64 %index
%gep = getelementptr i32, ptr addrspace(1) %ptr, i64 4
%val = atomicrmw volatile add ptr addrspace(1) %gep, i32 %in syncscope("agent") seq_cst
store i32 %val, ptr addrspace(1) %out2
ret void
}
define amdgpu_kernel void @atomic_add_i32(ptr addrspace(1) %out, i32 %in) {
; SI-LABEL: atomic_add_i32:
; SI: ; %bb.0: ; %entry
; SI-NEXT: s_load_dword s4, s[2:3], 0xb
; SI-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x9
; SI-NEXT: s_mov_b32 s3, 0xf000
; SI-NEXT: s_mov_b32 s2, -1
; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: v_mov_b32_e32 v0, s4
; SI-NEXT: buffer_atomic_add v0, off, s[0:3], 0
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: buffer_wbinvl1
; SI-NEXT: s_endpgm
;
; VI-LABEL: atomic_add_i32:
; VI: ; %bb.0: ; %entry
; VI-NEXT: s_load_dword s4, s[2:3], 0x2c
; VI-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x24
; VI-NEXT: s_mov_b32 s3, 0xf000
; VI-NEXT: s_mov_b32 s2, -1
; VI-NEXT: s_waitcnt lgkmcnt(0)
; VI-NEXT: v_mov_b32_e32 v0, s4
; VI-NEXT: buffer_atomic_add v0, off, s[0:3], 0
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: buffer_wbinvl1_vol
; VI-NEXT: s_endpgm
;
; GFX9-LABEL: atomic_add_i32:
; GFX9: ; %bb.0: ; %entry
; GFX9-NEXT: s_load_dword s4, s[2:3], 0x2c
; GFX9-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x24
; GFX9-NEXT: v_mov_b32_e32 v0, 0
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NEXT: v_mov_b32_e32 v1, s4
; GFX9-NEXT: global_atomic_add v0, v1, s[0:1]
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: buffer_wbinvl1_vol
; GFX9-NEXT: s_endpgm
entry:
%val = atomicrmw volatile add ptr addrspace(1) %out, i32 %in syncscope("agent") seq_cst
ret void
}
define amdgpu_kernel void @atomic_add_i32_ret(ptr addrspace(1) %out, ptr addrspace(1) %out2, i32 %in) {
; SI-LABEL: atomic_add_i32_ret:
; SI: ; %bb.0: ; %entry
; SI-NEXT: s_load_dwordx4 s[4:7], s[2:3], 0x9
; SI-NEXT: s_load_dword s8, s[2:3], 0xd
; SI-NEXT: s_mov_b32 s3, 0xf000
; SI-NEXT: s_mov_b32 s2, -1
; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: s_mov_b32 s0, s4
; SI-NEXT: s_mov_b32 s1, s5
; SI-NEXT: v_mov_b32_e32 v0, s8
; SI-NEXT: buffer_atomic_add v0, off, s[0:3], 0 glc
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: buffer_wbinvl1
; SI-NEXT: s_mov_b32 s0, s6
; SI-NEXT: s_mov_b32 s1, s7
; SI-NEXT: buffer_store_dword v0, off, s[0:3], 0
; SI-NEXT: s_endpgm
;
; VI-LABEL: atomic_add_i32_ret:
; VI: ; %bb.0: ; %entry
; VI-NEXT: s_load_dwordx4 s[4:7], s[2:3], 0x24
; VI-NEXT: s_load_dword s8, s[2:3], 0x34
; VI-NEXT: s_mov_b32 s3, 0xf000
; VI-NEXT: s_mov_b32 s2, -1
; VI-NEXT: s_waitcnt lgkmcnt(0)
; VI-NEXT: s_mov_b32 s0, s4
; VI-NEXT: s_mov_b32 s1, s5
; VI-NEXT: v_mov_b32_e32 v0, s8
; VI-NEXT: buffer_atomic_add v0, off, s[0:3], 0 glc
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: buffer_wbinvl1_vol
; VI-NEXT: s_mov_b32 s0, s6
; VI-NEXT: s_mov_b32 s1, s7
; VI-NEXT: buffer_store_dword v0, off, s[0:3], 0
; VI-NEXT: s_endpgm
;
; GFX9-LABEL: atomic_add_i32_ret:
; GFX9: ; %bb.0: ; %entry
; GFX9-NEXT: s_load_dword s0, s[2:3], 0x34
; GFX9-NEXT: s_load_dwordx4 s[4:7], s[2:3], 0x24
; GFX9-NEXT: v_mov_b32_e32 v0, 0
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NEXT: v_mov_b32_e32 v1, s0
; GFX9-NEXT: global_atomic_add v1, v0, v1, s[4:5] glc
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: buffer_wbinvl1_vol
; GFX9-NEXT: global_store_dword v0, v1, s[6:7]
; GFX9-NEXT: s_endpgm
entry:
%val = atomicrmw volatile add ptr addrspace(1) %out, i32 %in syncscope("agent") seq_cst
store i32 %val, ptr addrspace(1) %out2
ret void
}
define amdgpu_kernel void @atomic_add_i32_addr64(ptr addrspace(1) %out, i32 %in, i64 %index) {
; SI-LABEL: atomic_add_i32_addr64:
; SI: ; %bb.0: ; %entry
; SI-NEXT: s_load_dwordx2 s[4:5], s[2:3], 0xd
; SI-NEXT: s_load_dword s6, s[2:3], 0xb
; SI-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x9
; SI-NEXT: s_mov_b32 s3, 0xf000
; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: s_lshl_b64 s[4:5], s[4:5], 2
; SI-NEXT: s_mov_b32 s2, 0
; SI-NEXT: v_mov_b32_e32 v2, s6
; SI-NEXT: v_mov_b32_e32 v0, s4
; SI-NEXT: v_mov_b32_e32 v1, s5
; SI-NEXT: buffer_atomic_add v2, v[0:1], s[0:3], 0 addr64
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: buffer_wbinvl1
; SI-NEXT: s_endpgm
;
; VI-LABEL: atomic_add_i32_addr64:
; VI: ; %bb.0: ; %entry
; VI-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x34
; VI-NEXT: s_load_dwordx2 s[4:5], s[2:3], 0x24
; VI-NEXT: s_load_dword s2, s[2:3], 0x2c
; VI-NEXT: s_waitcnt lgkmcnt(0)
; VI-NEXT: s_lshl_b64 s[0:1], s[0:1], 2
; VI-NEXT: s_add_u32 s0, s4, s0
; VI-NEXT: s_addc_u32 s1, s5, s1
; VI-NEXT: v_mov_b32_e32 v0, s0
; VI-NEXT: v_mov_b32_e32 v1, s1
; VI-NEXT: v_mov_b32_e32 v2, s2
; VI-NEXT: flat_atomic_add v[0:1], v2
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: buffer_wbinvl1_vol
; VI-NEXT: s_endpgm
;
; GFX9-LABEL: atomic_add_i32_addr64:
; GFX9: ; %bb.0: ; %entry
; GFX9-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x34
; GFX9-NEXT: s_load_dwordx2 s[4:5], s[2:3], 0x24
; GFX9-NEXT: s_load_dword s6, s[2:3], 0x2c
; GFX9-NEXT: v_mov_b32_e32 v0, 0
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NEXT: s_lshl_b64 s[0:1], s[0:1], 2
; GFX9-NEXT: s_add_u32 s0, s4, s0
; GFX9-NEXT: s_addc_u32 s1, s5, s1
; GFX9-NEXT: v_mov_b32_e32 v1, s6
; GFX9-NEXT: global_atomic_add v0, v1, s[0:1]
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: buffer_wbinvl1_vol
; GFX9-NEXT: s_endpgm
entry:
%ptr = getelementptr i32, ptr addrspace(1) %out, i64 %index
%val = atomicrmw volatile add ptr addrspace(1) %ptr, i32 %in syncscope("agent") seq_cst
ret void
}
define amdgpu_kernel void @atomic_add_i32_ret_addr64(ptr addrspace(1) %out, ptr addrspace(1) %out2, i32 %in, i64 %index) {
; SI-LABEL: atomic_add_i32_ret_addr64:
; SI: ; %bb.0: ; %entry
; SI-NEXT: s_load_dwordx4 s[4:7], s[2:3], 0x9
; SI-NEXT: s_load_dwordx2 s[8:9], s[2:3], 0xf
; SI-NEXT: s_load_dword s2, s[2:3], 0xd
; SI-NEXT: s_mov_b32 s3, 0xf000
; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: s_mov_b32 s0, s6
; SI-NEXT: s_mov_b32 s1, s7
; SI-NEXT: s_lshl_b64 s[8:9], s[8:9], 2
; SI-NEXT: s_mov_b32 s6, 0
; SI-NEXT: s_mov_b32 s7, s3
; SI-NEXT: v_mov_b32_e32 v2, s2
; SI-NEXT: v_mov_b32_e32 v0, s8
; SI-NEXT: v_mov_b32_e32 v1, s9
; SI-NEXT: buffer_atomic_add v2, v[0:1], s[4:7], 0 addr64 glc
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: buffer_wbinvl1
; SI-NEXT: s_mov_b32 s2, -1
; SI-NEXT: buffer_store_dword v2, off, s[0:3], 0
; SI-NEXT: s_endpgm
;
; VI-LABEL: atomic_add_i32_ret_addr64:
; VI: ; %bb.0: ; %entry
; VI-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x3c
; VI-NEXT: s_load_dwordx4 s[4:7], s[2:3], 0x24
; VI-NEXT: s_load_dword s2, s[2:3], 0x34
; VI-NEXT: s_mov_b32 s3, 0xf000
; VI-NEXT: s_waitcnt lgkmcnt(0)
; VI-NEXT: s_lshl_b64 s[0:1], s[0:1], 2
; VI-NEXT: s_add_u32 s0, s4, s0
; VI-NEXT: s_addc_u32 s1, s5, s1
; VI-NEXT: v_mov_b32_e32 v0, s0
; VI-NEXT: v_mov_b32_e32 v1, s1
; VI-NEXT: v_mov_b32_e32 v2, s2
; VI-NEXT: flat_atomic_add v0, v[0:1], v2 glc
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: buffer_wbinvl1_vol
; VI-NEXT: s_mov_b32 s2, -1
; VI-NEXT: s_mov_b32 s0, s6
; VI-NEXT: s_mov_b32 s1, s7
; VI-NEXT: buffer_store_dword v0, off, s[0:3], 0
; VI-NEXT: s_endpgm
;
; GFX9-LABEL: atomic_add_i32_ret_addr64:
; GFX9: ; %bb.0: ; %entry
; GFX9-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x3c
; GFX9-NEXT: s_load_dwordx4 s[4:7], s[2:3], 0x24
; GFX9-NEXT: s_load_dword s8, s[2:3], 0x34
; GFX9-NEXT: v_mov_b32_e32 v0, 0
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NEXT: s_lshl_b64 s[0:1], s[0:1], 2
; GFX9-NEXT: s_add_u32 s0, s4, s0
; GFX9-NEXT: s_addc_u32 s1, s5, s1
; GFX9-NEXT: v_mov_b32_e32 v1, s8
; GFX9-NEXT: global_atomic_add v1, v0, v1, s[0:1] glc
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: buffer_wbinvl1_vol
; GFX9-NEXT: global_store_dword v0, v1, s[6:7]
; GFX9-NEXT: s_endpgm
entry:
%ptr = getelementptr i32, ptr addrspace(1) %out, i64 %index
%val = atomicrmw volatile add ptr addrspace(1) %ptr, i32 %in syncscope("agent") seq_cst
store i32 %val, ptr addrspace(1) %out2
ret void
}
define amdgpu_kernel void @atomic_and_i32_offset(ptr addrspace(1) %out, i32 %in) {
; SI-LABEL: atomic_and_i32_offset:
; SI: ; %bb.0: ; %entry
; SI-NEXT: s_load_dword s4, s[2:3], 0xb
; SI-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x9
; SI-NEXT: s_mov_b32 s3, 0xf000
; SI-NEXT: s_mov_b32 s2, -1
; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: v_mov_b32_e32 v0, s4
; SI-NEXT: buffer_atomic_and v0, off, s[0:3], 0 offset:16
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: buffer_wbinvl1
; SI-NEXT: s_endpgm
;
; VI-LABEL: atomic_and_i32_offset:
; VI: ; %bb.0: ; %entry
; VI-NEXT: s_load_dword s4, s[2:3], 0x2c
; VI-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x24
; VI-NEXT: s_mov_b32 s3, 0xf000
; VI-NEXT: s_mov_b32 s2, -1
; VI-NEXT: s_waitcnt lgkmcnt(0)
; VI-NEXT: v_mov_b32_e32 v0, s4
; VI-NEXT: buffer_atomic_and v0, off, s[0:3], 0 offset:16
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: buffer_wbinvl1_vol
; VI-NEXT: s_endpgm
;
; GFX9-LABEL: atomic_and_i32_offset:
; GFX9: ; %bb.0: ; %entry
; GFX9-NEXT: s_load_dword s4, s[2:3], 0x2c
; GFX9-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x24
; GFX9-NEXT: v_mov_b32_e32 v0, 0
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NEXT: v_mov_b32_e32 v1, s4
; GFX9-NEXT: global_atomic_and v0, v1, s[0:1] offset:16
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: buffer_wbinvl1_vol
; GFX9-NEXT: s_endpgm
entry:
%gep = getelementptr i32, ptr addrspace(1) %out, i64 4
%val = atomicrmw volatile and ptr addrspace(1) %gep, i32 %in syncscope("agent") seq_cst
ret void
}
define amdgpu_kernel void @atomic_and_i32_ret_offset(ptr addrspace(1) %out, ptr addrspace(1) %out2, i32 %in) {
; SI-LABEL: atomic_and_i32_ret_offset:
; SI: ; %bb.0: ; %entry
; SI-NEXT: s_load_dwordx4 s[4:7], s[2:3], 0x9
; SI-NEXT: s_load_dword s8, s[2:3], 0xd
; SI-NEXT: s_mov_b32 s3, 0xf000
; SI-NEXT: s_mov_b32 s2, -1
; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: s_mov_b32 s0, s6
; SI-NEXT: s_mov_b32 s1, s7
; SI-NEXT: s_mov_b32 s6, s2
; SI-NEXT: s_mov_b32 s7, s3
; SI-NEXT: v_mov_b32_e32 v0, s8
; SI-NEXT: buffer_atomic_and v0, off, s[4:7], 0 offset:16 glc
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: buffer_wbinvl1
; SI-NEXT: buffer_store_dword v0, off, s[0:3], 0
; SI-NEXT: s_endpgm
;
; VI-LABEL: atomic_and_i32_ret_offset:
; VI: ; %bb.0: ; %entry
; VI-NEXT: s_load_dwordx4 s[4:7], s[2:3], 0x24
; VI-NEXT: s_load_dword s0, s[2:3], 0x34
; VI-NEXT: s_mov_b32 s11, 0xf000
; VI-NEXT: s_mov_b32 s10, -1
; VI-NEXT: s_waitcnt lgkmcnt(0)
; VI-NEXT: s_mov_b32 s8, s6
; VI-NEXT: s_mov_b32 s9, s7
; VI-NEXT: s_mov_b32 s6, s10
; VI-NEXT: s_mov_b32 s7, s11
; VI-NEXT: v_mov_b32_e32 v0, s0
; VI-NEXT: buffer_atomic_and v0, off, s[4:7], 0 offset:16 glc
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: buffer_wbinvl1_vol
; VI-NEXT: buffer_store_dword v0, off, s[8:11], 0
; VI-NEXT: s_endpgm
;
; GFX9-LABEL: atomic_and_i32_ret_offset:
; GFX9: ; %bb.0: ; %entry
; GFX9-NEXT: s_load_dword s0, s[2:3], 0x34
; GFX9-NEXT: s_load_dwordx4 s[4:7], s[2:3], 0x24
; GFX9-NEXT: v_mov_b32_e32 v0, 0
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NEXT: v_mov_b32_e32 v1, s0
; GFX9-NEXT: global_atomic_and v1, v0, v1, s[4:5] offset:16 glc
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: buffer_wbinvl1_vol
; GFX9-NEXT: global_store_dword v0, v1, s[6:7]
; GFX9-NEXT: s_endpgm
entry:
%gep = getelementptr i32, ptr addrspace(1) %out, i64 4
%val = atomicrmw volatile and ptr addrspace(1) %gep, i32 %in syncscope("agent") seq_cst
store i32 %val, ptr addrspace(1) %out2
ret void
}
define amdgpu_kernel void @atomic_and_i32_addr64_offset(ptr addrspace(1) %out, i32 %in, i64 %index) {
; SI-LABEL: atomic_and_i32_addr64_offset:
; SI: ; %bb.0: ; %entry
; SI-NEXT: s_load_dwordx2 s[4:5], s[2:3], 0xd
; SI-NEXT: s_load_dword s6, s[2:3], 0xb
; SI-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x9
; SI-NEXT: s_mov_b32 s3, 0xf000
; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: s_lshl_b64 s[4:5], s[4:5], 2
; SI-NEXT: s_mov_b32 s2, 0
; SI-NEXT: v_mov_b32_e32 v2, s6
; SI-NEXT: v_mov_b32_e32 v0, s4
; SI-NEXT: v_mov_b32_e32 v1, s5
; SI-NEXT: buffer_atomic_and v2, v[0:1], s[0:3], 0 addr64 offset:16
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: buffer_wbinvl1
; SI-NEXT: s_endpgm
;
; VI-LABEL: atomic_and_i32_addr64_offset:
; VI: ; %bb.0: ; %entry
; VI-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x34
; VI-NEXT: s_load_dwordx2 s[4:5], s[2:3], 0x24
; VI-NEXT: s_load_dword s2, s[2:3], 0x2c
; VI-NEXT: s_waitcnt lgkmcnt(0)
; VI-NEXT: s_lshl_b64 s[0:1], s[0:1], 2
; VI-NEXT: s_add_u32 s0, s4, s0
; VI-NEXT: s_addc_u32 s1, s5, s1
; VI-NEXT: s_add_u32 s0, s0, 16
; VI-NEXT: s_addc_u32 s1, s1, 0
; VI-NEXT: v_mov_b32_e32 v0, s0
; VI-NEXT: v_mov_b32_e32 v1, s1
; VI-NEXT: v_mov_b32_e32 v2, s2
; VI-NEXT: flat_atomic_and v[0:1], v2
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: buffer_wbinvl1_vol
; VI-NEXT: s_endpgm
;
; GFX9-LABEL: atomic_and_i32_addr64_offset:
; GFX9: ; %bb.0: ; %entry
; GFX9-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x34
; GFX9-NEXT: s_load_dwordx2 s[4:5], s[2:3], 0x24
; GFX9-NEXT: s_load_dword s6, s[2:3], 0x2c
; GFX9-NEXT: v_mov_b32_e32 v0, 0
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NEXT: s_lshl_b64 s[0:1], s[0:1], 2
; GFX9-NEXT: s_add_u32 s0, s4, s0
; GFX9-NEXT: s_addc_u32 s1, s5, s1
; GFX9-NEXT: v_mov_b32_e32 v1, s6
; GFX9-NEXT: global_atomic_and v0, v1, s[0:1] offset:16
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: buffer_wbinvl1_vol
; GFX9-NEXT: s_endpgm
entry:
%ptr = getelementptr i32, ptr addrspace(1) %out, i64 %index
%gep = getelementptr i32, ptr addrspace(1) %ptr, i64 4
%val = atomicrmw volatile and ptr addrspace(1) %gep, i32 %in syncscope("agent") seq_cst
ret void
}
define amdgpu_kernel void @atomic_and_i32_ret_addr64_offset(ptr addrspace(1) %out, ptr addrspace(1) %out2, i32 %in, i64 %index) {
; SI-LABEL: atomic_and_i32_ret_addr64_offset:
; SI: ; %bb.0: ; %entry
; SI-NEXT: s_load_dwordx4 s[4:7], s[2:3], 0x9
; SI-NEXT: s_load_dwordx2 s[8:9], s[2:3], 0xf
; SI-NEXT: s_load_dword s2, s[2:3], 0xd
; SI-NEXT: s_mov_b32 s3, 0xf000
; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: s_mov_b32 s0, s6
; SI-NEXT: s_mov_b32 s1, s7
; SI-NEXT: s_lshl_b64 s[8:9], s[8:9], 2
; SI-NEXT: s_mov_b32 s6, 0
; SI-NEXT: s_mov_b32 s7, s3
; SI-NEXT: v_mov_b32_e32 v2, s2
; SI-NEXT: v_mov_b32_e32 v0, s8
; SI-NEXT: v_mov_b32_e32 v1, s9
; SI-NEXT: buffer_atomic_and v2, v[0:1], s[4:7], 0 addr64 offset:16 glc
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: buffer_wbinvl1
; SI-NEXT: s_mov_b32 s2, -1
; SI-NEXT: buffer_store_dword v2, off, s[0:3], 0
; SI-NEXT: s_endpgm
;
; VI-LABEL: atomic_and_i32_ret_addr64_offset:
; VI: ; %bb.0: ; %entry
; VI-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x3c
; VI-NEXT: s_load_dwordx4 s[4:7], s[2:3], 0x24
; VI-NEXT: s_load_dword s2, s[2:3], 0x34
; VI-NEXT: s_mov_b32 s3, 0xf000
; VI-NEXT: s_waitcnt lgkmcnt(0)
; VI-NEXT: s_lshl_b64 s[0:1], s[0:1], 2
; VI-NEXT: s_add_u32 s0, s4, s0
; VI-NEXT: s_addc_u32 s1, s5, s1
; VI-NEXT: s_add_u32 s0, s0, 16
; VI-NEXT: s_addc_u32 s1, s1, 0
; VI-NEXT: v_mov_b32_e32 v0, s0
; VI-NEXT: v_mov_b32_e32 v1, s1
; VI-NEXT: v_mov_b32_e32 v2, s2
; VI-NEXT: flat_atomic_and v0, v[0:1], v2 glc
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: buffer_wbinvl1_vol
; VI-NEXT: s_mov_b32 s2, -1
; VI-NEXT: s_mov_b32 s0, s6
; VI-NEXT: s_mov_b32 s1, s7
; VI-NEXT: buffer_store_dword v0, off, s[0:3], 0
; VI-NEXT: s_endpgm
;
; GFX9-LABEL: atomic_and_i32_ret_addr64_offset:
; GFX9: ; %bb.0: ; %entry
; GFX9-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x3c
; GFX9-NEXT: s_load_dwordx4 s[4:7], s[2:3], 0x24
; GFX9-NEXT: s_load_dword s8, s[2:3], 0x34
; GFX9-NEXT: v_mov_b32_e32 v0, 0
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NEXT: s_lshl_b64 s[0:1], s[0:1], 2
; GFX9-NEXT: s_add_u32 s0, s4, s0
; GFX9-NEXT: s_addc_u32 s1, s5, s1
; GFX9-NEXT: v_mov_b32_e32 v1, s8
; GFX9-NEXT: global_atomic_and v1, v0, v1, s[0:1] offset:16 glc
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: buffer_wbinvl1_vol
; GFX9-NEXT: global_store_dword v0, v1, s[6:7]
; GFX9-NEXT: s_endpgm
entry:
%ptr = getelementptr i32, ptr addrspace(1) %out, i64 %index
%gep = getelementptr i32, ptr addrspace(1) %ptr, i64 4
%val = atomicrmw volatile and ptr addrspace(1) %gep, i32 %in syncscope("agent") seq_cst
store i32 %val, ptr addrspace(1) %out2
ret void
}
define amdgpu_kernel void @atomic_and_i32(ptr addrspace(1) %out, i32 %in) {
; SI-LABEL: atomic_and_i32:
; SI: ; %bb.0: ; %entry
; SI-NEXT: s_load_dword s4, s[2:3], 0xb
; SI-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x9
; SI-NEXT: s_mov_b32 s3, 0xf000
; SI-NEXT: s_mov_b32 s2, -1
; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: v_mov_b32_e32 v0, s4
; SI-NEXT: buffer_atomic_and v0, off, s[0:3], 0
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: buffer_wbinvl1
; SI-NEXT: s_endpgm
;
; VI-LABEL: atomic_and_i32:
; VI: ; %bb.0: ; %entry
; VI-NEXT: s_load_dword s4, s[2:3], 0x2c
; VI-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x24
; VI-NEXT: s_mov_b32 s3, 0xf000
; VI-NEXT: s_mov_b32 s2, -1
; VI-NEXT: s_waitcnt lgkmcnt(0)
; VI-NEXT: v_mov_b32_e32 v0, s4
; VI-NEXT: buffer_atomic_and v0, off, s[0:3], 0
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: buffer_wbinvl1_vol
; VI-NEXT: s_endpgm
;
; GFX9-LABEL: atomic_and_i32:
; GFX9: ; %bb.0: ; %entry
; GFX9-NEXT: s_load_dword s4, s[2:3], 0x2c
; GFX9-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x24
; GFX9-NEXT: v_mov_b32_e32 v0, 0
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NEXT: v_mov_b32_e32 v1, s4
; GFX9-NEXT: global_atomic_and v0, v1, s[0:1]
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: buffer_wbinvl1_vol
; GFX9-NEXT: s_endpgm
entry:
%val = atomicrmw volatile and ptr addrspace(1) %out, i32 %in syncscope("agent") seq_cst
ret void
}
define amdgpu_kernel void @atomic_and_i32_ret(ptr addrspace(1) %out, ptr addrspace(1) %out2, i32 %in) {
; SI-LABEL: atomic_and_i32_ret:
; SI: ; %bb.0: ; %entry
; SI-NEXT: s_load_dwordx4 s[4:7], s[2:3], 0x9
; SI-NEXT: s_load_dword s8, s[2:3], 0xd
; SI-NEXT: s_mov_b32 s3, 0xf000
; SI-NEXT: s_mov_b32 s2, -1
; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: s_mov_b32 s0, s4
; SI-NEXT: s_mov_b32 s1, s5
; SI-NEXT: v_mov_b32_e32 v0, s8
; SI-NEXT: buffer_atomic_and v0, off, s[0:3], 0 glc
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: buffer_wbinvl1
; SI-NEXT: s_mov_b32 s0, s6
; SI-NEXT: s_mov_b32 s1, s7
; SI-NEXT: buffer_store_dword v0, off, s[0:3], 0
; SI-NEXT: s_endpgm
;
; VI-LABEL: atomic_and_i32_ret:
; VI: ; %bb.0: ; %entry
; VI-NEXT: s_load_dwordx4 s[4:7], s[2:3], 0x24
; VI-NEXT: s_load_dword s8, s[2:3], 0x34
; VI-NEXT: s_mov_b32 s3, 0xf000
; VI-NEXT: s_mov_b32 s2, -1
; VI-NEXT: s_waitcnt lgkmcnt(0)
; VI-NEXT: s_mov_b32 s0, s4
; VI-NEXT: s_mov_b32 s1, s5
; VI-NEXT: v_mov_b32_e32 v0, s8
; VI-NEXT: buffer_atomic_and v0, off, s[0:3], 0 glc
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: buffer_wbinvl1_vol
; VI-NEXT: s_mov_b32 s0, s6
; VI-NEXT: s_mov_b32 s1, s7
; VI-NEXT: buffer_store_dword v0, off, s[0:3], 0
; VI-NEXT: s_endpgm
;
; GFX9-LABEL: atomic_and_i32_ret:
; GFX9: ; %bb.0: ; %entry
; GFX9-NEXT: s_load_dword s0, s[2:3], 0x34
; GFX9-NEXT: s_load_dwordx4 s[4:7], s[2:3], 0x24
; GFX9-NEXT: v_mov_b32_e32 v0, 0
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NEXT: v_mov_b32_e32 v1, s0
; GFX9-NEXT: global_atomic_and v1, v0, v1, s[4:5] glc
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: buffer_wbinvl1_vol
; GFX9-NEXT: global_store_dword v0, v1, s[6:7]
; GFX9-NEXT: s_endpgm
entry:
%val = atomicrmw volatile and ptr addrspace(1) %out, i32 %in syncscope("agent") seq_cst
store i32 %val, ptr addrspace(1) %out2
ret void
}
define amdgpu_kernel void @atomic_and_i32_addr64(ptr addrspace(1) %out, i32 %in, i64 %index) {
; SI-LABEL: atomic_and_i32_addr64:
; SI: ; %bb.0: ; %entry
; SI-NEXT: s_load_dwordx2 s[4:5], s[2:3], 0xd
; SI-NEXT: s_load_dword s6, s[2:3], 0xb
; SI-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x9
; SI-NEXT: s_mov_b32 s3, 0xf000
; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: s_lshl_b64 s[4:5], s[4:5], 2
; SI-NEXT: s_mov_b32 s2, 0
; SI-NEXT: v_mov_b32_e32 v2, s6
; SI-NEXT: v_mov_b32_e32 v0, s4
; SI-NEXT: v_mov_b32_e32 v1, s5
; SI-NEXT: buffer_atomic_and v2, v[0:1], s[0:3], 0 addr64
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: buffer_wbinvl1
; SI-NEXT: s_endpgm
;
; VI-LABEL: atomic_and_i32_addr64:
; VI: ; %bb.0: ; %entry
; VI-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x34
; VI-NEXT: s_load_dwordx2 s[4:5], s[2:3], 0x24
; VI-NEXT: s_load_dword s2, s[2:3], 0x2c
; VI-NEXT: s_waitcnt lgkmcnt(0)
; VI-NEXT: s_lshl_b64 s[0:1], s[0:1], 2
; VI-NEXT: s_add_u32 s0, s4, s0
; VI-NEXT: s_addc_u32 s1, s5, s1
; VI-NEXT: v_mov_b32_e32 v0, s0
; VI-NEXT: v_mov_b32_e32 v1, s1
; VI-NEXT: v_mov_b32_e32 v2, s2
; VI-NEXT: flat_atomic_and v[0:1], v2
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: buffer_wbinvl1_vol
; VI-NEXT: s_endpgm
;
; GFX9-LABEL: atomic_and_i32_addr64:
; GFX9: ; %bb.0: ; %entry
; GFX9-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x34
; GFX9-NEXT: s_load_dwordx2 s[4:5], s[2:3], 0x24
; GFX9-NEXT: s_load_dword s6, s[2:3], 0x2c
; GFX9-NEXT: v_mov_b32_e32 v0, 0
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NEXT: s_lshl_b64 s[0:1], s[0:1], 2
; GFX9-NEXT: s_add_u32 s0, s4, s0
; GFX9-NEXT: s_addc_u32 s1, s5, s1
; GFX9-NEXT: v_mov_b32_e32 v1, s6
; GFX9-NEXT: global_atomic_and v0, v1, s[0:1]
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: buffer_wbinvl1_vol
; GFX9-NEXT: s_endpgm
entry:
%ptr = getelementptr i32, ptr addrspace(1) %out, i64 %index
%val = atomicrmw volatile and ptr addrspace(1) %ptr, i32 %in syncscope("agent") seq_cst
ret void
}
define amdgpu_kernel void @atomic_and_i32_ret_addr64(ptr addrspace(1) %out, ptr addrspace(1) %out2, i32 %in, i64 %index) {
; SI-LABEL: atomic_and_i32_ret_addr64:
; SI: ; %bb.0: ; %entry
; SI-NEXT: s_load_dwordx4 s[4:7], s[2:3], 0x9
; SI-NEXT: s_load_dwordx2 s[8:9], s[2:3], 0xf
; SI-NEXT: s_load_dword s2, s[2:3], 0xd
; SI-NEXT: s_mov_b32 s3, 0xf000
; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: s_mov_b32 s0, s6
; SI-NEXT: s_mov_b32 s1, s7
; SI-NEXT: s_lshl_b64 s[8:9], s[8:9], 2
; SI-NEXT: s_mov_b32 s6, 0
; SI-NEXT: s_mov_b32 s7, s3
; SI-NEXT: v_mov_b32_e32 v2, s2
; SI-NEXT: v_mov_b32_e32 v0, s8
; SI-NEXT: v_mov_b32_e32 v1, s9
; SI-NEXT: buffer_atomic_and v2, v[0:1], s[4:7], 0 addr64 glc
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: buffer_wbinvl1
; SI-NEXT: s_mov_b32 s2, -1
; SI-NEXT: buffer_store_dword v2, off, s[0:3], 0
; SI-NEXT: s_endpgm
;
; VI-LABEL: atomic_and_i32_ret_addr64:
; VI: ; %bb.0: ; %entry
; VI-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x3c
; VI-NEXT: s_load_dwordx4 s[4:7], s[2:3], 0x24
; VI-NEXT: s_load_dword s2, s[2:3], 0x34
; VI-NEXT: s_mov_b32 s3, 0xf000
; VI-NEXT: s_waitcnt lgkmcnt(0)
; VI-NEXT: s_lshl_b64 s[0:1], s[0:1], 2
; VI-NEXT: s_add_u32 s0, s4, s0
; VI-NEXT: s_addc_u32 s1, s5, s1
; VI-NEXT: v_mov_b32_e32 v0, s0
; VI-NEXT: v_mov_b32_e32 v1, s1
; VI-NEXT: v_mov_b32_e32 v2, s2
; VI-NEXT: flat_atomic_and v0, v[0:1], v2 glc
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: buffer_wbinvl1_vol
; VI-NEXT: s_mov_b32 s2, -1
; VI-NEXT: s_mov_b32 s0, s6
; VI-NEXT: s_mov_b32 s1, s7
; VI-NEXT: buffer_store_dword v0, off, s[0:3], 0
; VI-NEXT: s_endpgm
;
; GFX9-LABEL: atomic_and_i32_ret_addr64:
; GFX9: ; %bb.0: ; %entry
; GFX9-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x3c
; GFX9-NEXT: s_load_dwordx4 s[4:7], s[2:3], 0x24
; GFX9-NEXT: s_load_dword s8, s[2:3], 0x34
; GFX9-NEXT: v_mov_b32_e32 v0, 0
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NEXT: s_lshl_b64 s[0:1], s[0:1], 2
; GFX9-NEXT: s_add_u32 s0, s4, s0
; GFX9-NEXT: s_addc_u32 s1, s5, s1
; GFX9-NEXT: v_mov_b32_e32 v1, s8
; GFX9-NEXT: global_atomic_and v1, v0, v1, s[0:1] glc
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: buffer_wbinvl1_vol
; GFX9-NEXT: global_store_dword v0, v1, s[6:7]
; GFX9-NEXT: s_endpgm
entry:
%ptr = getelementptr i32, ptr addrspace(1) %out, i64 %index
%val = atomicrmw volatile and ptr addrspace(1) %ptr, i32 %in syncscope("agent") seq_cst
store i32 %val, ptr addrspace(1) %out2
ret void
}
define amdgpu_kernel void @atomic_sub_i32_offset(ptr addrspace(1) %out, i32 %in) {
; SI-LABEL: atomic_sub_i32_offset:
; SI: ; %bb.0: ; %entry
; SI-NEXT: s_load_dword s4, s[2:3], 0xb
; SI-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x9
; SI-NEXT: s_mov_b32 s3, 0xf000
; SI-NEXT: s_mov_b32 s2, -1
; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: v_mov_b32_e32 v0, s4
; SI-NEXT: buffer_atomic_sub v0, off, s[0:3], 0 offset:16
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: buffer_wbinvl1
; SI-NEXT: s_endpgm
;
; VI-LABEL: atomic_sub_i32_offset:
; VI: ; %bb.0: ; %entry
; VI-NEXT: s_load_dword s4, s[2:3], 0x2c
; VI-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x24
; VI-NEXT: s_mov_b32 s3, 0xf000
; VI-NEXT: s_mov_b32 s2, -1
; VI-NEXT: s_waitcnt lgkmcnt(0)
; VI-NEXT: v_mov_b32_e32 v0, s4
; VI-NEXT: buffer_atomic_sub v0, off, s[0:3], 0 offset:16
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: buffer_wbinvl1_vol
; VI-NEXT: s_endpgm
;
; GFX9-LABEL: atomic_sub_i32_offset:
; GFX9: ; %bb.0: ; %entry
; GFX9-NEXT: s_load_dword s4, s[2:3], 0x2c
; GFX9-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x24
; GFX9-NEXT: v_mov_b32_e32 v0, 0
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NEXT: v_mov_b32_e32 v1, s4
; GFX9-NEXT: global_atomic_sub v0, v1, s[0:1] offset:16
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: buffer_wbinvl1_vol
; GFX9-NEXT: s_endpgm
entry:
%gep = getelementptr i32, ptr addrspace(1) %out, i64 4
%val = atomicrmw volatile sub ptr addrspace(1) %gep, i32 %in syncscope("agent") seq_cst
ret void
}
define amdgpu_kernel void @atomic_sub_i32_ret_offset(ptr addrspace(1) %out, ptr addrspace(1) %out2, i32 %in) {
; SI-LABEL: atomic_sub_i32_ret_offset:
; SI: ; %bb.0: ; %entry
; SI-NEXT: s_load_dwordx4 s[4:7], s[2:3], 0x9
; SI-NEXT: s_load_dword s8, s[2:3], 0xd
; SI-NEXT: s_mov_b32 s3, 0xf000
; SI-NEXT: s_mov_b32 s2, -1
; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: s_mov_b32 s0, s6
; SI-NEXT: s_mov_b32 s1, s7
; SI-NEXT: s_mov_b32 s6, s2
; SI-NEXT: s_mov_b32 s7, s3
; SI-NEXT: v_mov_b32_e32 v0, s8
; SI-NEXT: buffer_atomic_sub v0, off, s[4:7], 0 offset:16 glc
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: buffer_wbinvl1
; SI-NEXT: buffer_store_dword v0, off, s[0:3], 0
; SI-NEXT: s_endpgm
;
; VI-LABEL: atomic_sub_i32_ret_offset:
; VI: ; %bb.0: ; %entry
; VI-NEXT: s_load_dwordx4 s[4:7], s[2:3], 0x24
; VI-NEXT: s_load_dword s0, s[2:3], 0x34
; VI-NEXT: s_mov_b32 s11, 0xf000
; VI-NEXT: s_mov_b32 s10, -1
; VI-NEXT: s_waitcnt lgkmcnt(0)
; VI-NEXT: s_mov_b32 s8, s6
; VI-NEXT: s_mov_b32 s9, s7
; VI-NEXT: s_mov_b32 s6, s10
; VI-NEXT: s_mov_b32 s7, s11
; VI-NEXT: v_mov_b32_e32 v0, s0
; VI-NEXT: buffer_atomic_sub v0, off, s[4:7], 0 offset:16 glc
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: buffer_wbinvl1_vol
; VI-NEXT: buffer_store_dword v0, off, s[8:11], 0
; VI-NEXT: s_endpgm
;
; GFX9-LABEL: atomic_sub_i32_ret_offset:
; GFX9: ; %bb.0: ; %entry
; GFX9-NEXT: s_load_dword s0, s[2:3], 0x34
; GFX9-NEXT: s_load_dwordx4 s[4:7], s[2:3], 0x24
; GFX9-NEXT: v_mov_b32_e32 v0, 0
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NEXT: v_mov_b32_e32 v1, s0
; GFX9-NEXT: global_atomic_sub v1, v0, v1, s[4:5] offset:16 glc
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: buffer_wbinvl1_vol
; GFX9-NEXT: global_store_dword v0, v1, s[6:7]
; GFX9-NEXT: s_endpgm
entry:
%gep = getelementptr i32, ptr addrspace(1) %out, i64 4
%val = atomicrmw volatile sub ptr addrspace(1) %gep, i32 %in syncscope("agent") seq_cst
store i32 %val, ptr addrspace(1) %out2
ret void
}
define amdgpu_kernel void @atomic_sub_i32_addr64_offset(ptr addrspace(1) %out, i32 %in, i64 %index) {
; SI-LABEL: atomic_sub_i32_addr64_offset:
; SI: ; %bb.0: ; %entry
; SI-NEXT: s_load_dwordx2 s[4:5], s[2:3], 0xd
; SI-NEXT: s_load_dword s6, s[2:3], 0xb
; SI-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x9
; SI-NEXT: s_mov_b32 s3, 0xf000
; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: s_lshl_b64 s[4:5], s[4:5], 2
; SI-NEXT: s_mov_b32 s2, 0
; SI-NEXT: v_mov_b32_e32 v2, s6
; SI-NEXT: v_mov_b32_e32 v0, s4
; SI-NEXT: v_mov_b32_e32 v1, s5
; SI-NEXT: buffer_atomic_sub v2, v[0:1], s[0:3], 0 addr64 offset:16
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: buffer_wbinvl1
; SI-NEXT: s_endpgm
;
; VI-LABEL: atomic_sub_i32_addr64_offset:
; VI: ; %bb.0: ; %entry
; VI-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x34
; VI-NEXT: s_load_dwordx2 s[4:5], s[2:3], 0x24
; VI-NEXT: s_load_dword s2, s[2:3], 0x2c
; VI-NEXT: s_waitcnt lgkmcnt(0)
; VI-NEXT: s_lshl_b64 s[0:1], s[0:1], 2
; VI-NEXT: s_add_u32 s0, s4, s0
; VI-NEXT: s_addc_u32 s1, s5, s1
; VI-NEXT: s_add_u32 s0, s0, 16
; VI-NEXT: s_addc_u32 s1, s1, 0
; VI-NEXT: v_mov_b32_e32 v0, s0
; VI-NEXT: v_mov_b32_e32 v1, s1
; VI-NEXT: v_mov_b32_e32 v2, s2
; VI-NEXT: flat_atomic_sub v[0:1], v2
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: buffer_wbinvl1_vol
; VI-NEXT: s_endpgm
;
; GFX9-LABEL: atomic_sub_i32_addr64_offset:
; GFX9: ; %bb.0: ; %entry
; GFX9-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x34
; GFX9-NEXT: s_load_dwordx2 s[4:5], s[2:3], 0x24
; GFX9-NEXT: s_load_dword s6, s[2:3], 0x2c
; GFX9-NEXT: v_mov_b32_e32 v0, 0
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NEXT: s_lshl_b64 s[0:1], s[0:1], 2
; GFX9-NEXT: s_add_u32 s0, s4, s0
; GFX9-NEXT: s_addc_u32 s1, s5, s1
; GFX9-NEXT: v_mov_b32_e32 v1, s6
; GFX9-NEXT: global_atomic_sub v0, v1, s[0:1] offset:16
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: buffer_wbinvl1_vol
; GFX9-NEXT: s_endpgm
entry:
%ptr = getelementptr i32, ptr addrspace(1) %out, i64 %index
%gep = getelementptr i32, ptr addrspace(1) %ptr, i64 4
%val = atomicrmw volatile sub ptr addrspace(1) %gep, i32 %in syncscope("agent") seq_cst
ret void
}
define amdgpu_kernel void @atomic_sub_i32_ret_addr64_offset(ptr addrspace(1) %out, ptr addrspace(1) %out2, i32 %in, i64 %index) {
; SI-LABEL: atomic_sub_i32_ret_addr64_offset:
; SI: ; %bb.0: ; %entry
; SI-NEXT: s_load_dwordx4 s[4:7], s[2:3], 0x9
; SI-NEXT: s_load_dwordx2 s[8:9], s[2:3], 0xf
; SI-NEXT: s_load_dword s2, s[2:3], 0xd
; SI-NEXT: s_mov_b32 s3, 0xf000
; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: s_mov_b32 s0, s6
; SI-NEXT: s_mov_b32 s1, s7
; SI-NEXT: s_lshl_b64 s[8:9], s[8:9], 2
; SI-NEXT: s_mov_b32 s6, 0
; SI-NEXT: s_mov_b32 s7, s3
; SI-NEXT: v_mov_b32_e32 v2, s2
; SI-NEXT: v_mov_b32_e32 v0, s8
; SI-NEXT: v_mov_b32_e32 v1, s9
; SI-NEXT: buffer_atomic_sub v2, v[0:1], s[4:7], 0 addr64 offset:16 glc
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: buffer_wbinvl1
; SI-NEXT: s_mov_b32 s2, -1
; SI-NEXT: buffer_store_dword v2, off, s[0:3], 0
; SI-NEXT: s_endpgm
;
; VI-LABEL: atomic_sub_i32_ret_addr64_offset:
; VI: ; %bb.0: ; %entry
; VI-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x3c
; VI-NEXT: s_load_dwordx4 s[4:7], s[2:3], 0x24
; VI-NEXT: s_load_dword s2, s[2:3], 0x34
; VI-NEXT: s_mov_b32 s3, 0xf000
; VI-NEXT: s_waitcnt lgkmcnt(0)
; VI-NEXT: s_lshl_b64 s[0:1], s[0:1], 2
; VI-NEXT: s_add_u32 s0, s4, s0
; VI-NEXT: s_addc_u32 s1, s5, s1
; VI-NEXT: s_add_u32 s0, s0, 16
; VI-NEXT: s_addc_u32 s1, s1, 0
; VI-NEXT: v_mov_b32_e32 v0, s0
; VI-NEXT: v_mov_b32_e32 v1, s1
; VI-NEXT: v_mov_b32_e32 v2, s2
; VI-NEXT: flat_atomic_sub v0, v[0:1], v2 glc
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: buffer_wbinvl1_vol
; VI-NEXT: s_mov_b32 s2, -1
; VI-NEXT: s_mov_b32 s0, s6
; VI-NEXT: s_mov_b32 s1, s7
; VI-NEXT: buffer_store_dword v0, off, s[0:3], 0
; VI-NEXT: s_endpgm
;
; GFX9-LABEL: atomic_sub_i32_ret_addr64_offset:
; GFX9: ; %bb.0: ; %entry
; GFX9-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x3c
; GFX9-NEXT: s_load_dwordx4 s[4:7], s[2:3], 0x24
; GFX9-NEXT: s_load_dword s8, s[2:3], 0x34
; GFX9-NEXT: v_mov_b32_e32 v0, 0
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NEXT: s_lshl_b64 s[0:1], s[0:1], 2
; GFX9-NEXT: s_add_u32 s0, s4, s0
; GFX9-NEXT: s_addc_u32 s1, s5, s1
; GFX9-NEXT: v_mov_b32_e32 v1, s8
; GFX9-NEXT: global_atomic_sub v1, v0, v1, s[0:1] offset:16 glc
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: buffer_wbinvl1_vol
; GFX9-NEXT: global_store_dword v0, v1, s[6:7]
; GFX9-NEXT: s_endpgm
entry:
%ptr = getelementptr i32, ptr addrspace(1) %out, i64 %index
%gep = getelementptr i32, ptr addrspace(1) %ptr, i64 4
%val = atomicrmw volatile sub ptr addrspace(1) %gep, i32 %in syncscope("agent") seq_cst
store i32 %val, ptr addrspace(1) %out2
ret void
}
define amdgpu_kernel void @atomic_sub_i32(ptr addrspace(1) %out, i32 %in) {
; SI-LABEL: atomic_sub_i32:
; SI: ; %bb.0: ; %entry
; SI-NEXT: s_load_dword s4, s[2:3], 0xb
; SI-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x9
; SI-NEXT: s_mov_b32 s3, 0xf000
; SI-NEXT: s_mov_b32 s2, -1
; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: v_mov_b32_e32 v0, s4
; SI-NEXT: buffer_atomic_sub v0, off, s[0:3], 0
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: buffer_wbinvl1
; SI-NEXT: s_endpgm
;
; VI-LABEL: atomic_sub_i32:
; VI: ; %bb.0: ; %entry
; VI-NEXT: s_load_dword s4, s[2:3], 0x2c
; VI-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x24
; VI-NEXT: s_mov_b32 s3, 0xf000
; VI-NEXT: s_mov_b32 s2, -1
; VI-NEXT: s_waitcnt lgkmcnt(0)
; VI-NEXT: v_mov_b32_e32 v0, s4
; VI-NEXT: buffer_atomic_sub v0, off, s[0:3], 0
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: buffer_wbinvl1_vol
; VI-NEXT: s_endpgm
;
; GFX9-LABEL: atomic_sub_i32:
; GFX9: ; %bb.0: ; %entry
; GFX9-NEXT: s_load_dword s4, s[2:3], 0x2c
; GFX9-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x24
; GFX9-NEXT: v_mov_b32_e32 v0, 0
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NEXT: v_mov_b32_e32 v1, s4
; GFX9-NEXT: global_atomic_sub v0, v1, s[0:1]
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: buffer_wbinvl1_vol
; GFX9-NEXT: s_endpgm
entry:
%val = atomicrmw volatile sub ptr addrspace(1) %out, i32 %in syncscope("agent") seq_cst
ret void
}
define amdgpu_kernel void @atomic_sub_i32_ret(ptr addrspace(1) %out, ptr addrspace(1) %out2, i32 %in) {
; SI-LABEL: atomic_sub_i32_ret:
; SI: ; %bb.0: ; %entry
; SI-NEXT: s_load_dwordx4 s[4:7], s[2:3], 0x9
; SI-NEXT: s_load_dword s8, s[2:3], 0xd
; SI-NEXT: s_mov_b32 s3, 0xf000
; SI-NEXT: s_mov_b32 s2, -1
; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: s_mov_b32 s0, s4
; SI-NEXT: s_mov_b32 s1, s5
; SI-NEXT: v_mov_b32_e32 v0, s8
; SI-NEXT: buffer_atomic_sub v0, off, s[0:3], 0 glc
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: buffer_wbinvl1
; SI-NEXT: s_mov_b32 s0, s6
; SI-NEXT: s_mov_b32 s1, s7
; SI-NEXT: buffer_store_dword v0, off, s[0:3], 0
; SI-NEXT: s_endpgm
;
; VI-LABEL: atomic_sub_i32_ret:
; VI: ; %bb.0: ; %entry
; VI-NEXT: s_load_dwordx4 s[4:7], s[2:3], 0x24
; VI-NEXT: s_load_dword s8, s[2:3], 0x34
; VI-NEXT: s_mov_b32 s3, 0xf000
; VI-NEXT: s_mov_b32 s2, -1
; VI-NEXT: s_waitcnt lgkmcnt(0)
; VI-NEXT: s_mov_b32 s0, s4
; VI-NEXT: s_mov_b32 s1, s5
; VI-NEXT: v_mov_b32_e32 v0, s8
; VI-NEXT: buffer_atomic_sub v0, off, s[0:3], 0 glc
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: buffer_wbinvl1_vol
; VI-NEXT: s_mov_b32 s0, s6
; VI-NEXT: s_mov_b32 s1, s7
; VI-NEXT: buffer_store_dword v0, off, s[0:3], 0
; VI-NEXT: s_endpgm
;
; GFX9-LABEL: atomic_sub_i32_ret:
; GFX9: ; %bb.0: ; %entry
; GFX9-NEXT: s_load_dword s0, s[2:3], 0x34
; GFX9-NEXT: s_load_dwordx4 s[4:7], s[2:3], 0x24
; GFX9-NEXT: v_mov_b32_e32 v0, 0
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NEXT: v_mov_b32_e32 v1, s0
; GFX9-NEXT: global_atomic_sub v1, v0, v1, s[4:5] glc
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: buffer_wbinvl1_vol
; GFX9-NEXT: global_store_dword v0, v1, s[6:7]
; GFX9-NEXT: s_endpgm
entry:
%val = atomicrmw volatile sub ptr addrspace(1) %out, i32 %in syncscope("agent") seq_cst
store i32 %val, ptr addrspace(1) %out2
ret void
}
define amdgpu_kernel void @atomic_sub_i32_addr64(ptr addrspace(1) %out, i32 %in, i64 %index) {
; SI-LABEL: atomic_sub_i32_addr64:
; SI: ; %bb.0: ; %entry
; SI-NEXT: s_load_dwordx2 s[4:5], s[2:3], 0xd
; SI-NEXT: s_load_dword s6, s[2:3], 0xb
; SI-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x9
; SI-NEXT: s_mov_b32 s3, 0xf000
; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: s_lshl_b64 s[4:5], s[4:5], 2
; SI-NEXT: s_mov_b32 s2, 0
; SI-NEXT: v_mov_b32_e32 v2, s6
; SI-NEXT: v_mov_b32_e32 v0, s4
; SI-NEXT: v_mov_b32_e32 v1, s5
; SI-NEXT: buffer_atomic_sub v2, v[0:1], s[0:3], 0 addr64
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: buffer_wbinvl1
; SI-NEXT: s_endpgm
;
; VI-LABEL: atomic_sub_i32_addr64:
; VI: ; %bb.0: ; %entry
; VI-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x34
; VI-NEXT: s_load_dwordx2 s[4:5], s[2:3], 0x24
; VI-NEXT: s_load_dword s2, s[2:3], 0x2c
; VI-NEXT: s_waitcnt lgkmcnt(0)
; VI-NEXT: s_lshl_b64 s[0:1], s[0:1], 2
; VI-NEXT: s_add_u32 s0, s4, s0
; VI-NEXT: s_addc_u32 s1, s5, s1
; VI-NEXT: v_mov_b32_e32 v0, s0
; VI-NEXT: v_mov_b32_e32 v1, s1
; VI-NEXT: v_mov_b32_e32 v2, s2
; VI-NEXT: flat_atomic_sub v[0:1], v2
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: buffer_wbinvl1_vol
; VI-NEXT: s_endpgm
;
; GFX9-LABEL: atomic_sub_i32_addr64:
; GFX9: ; %bb.0: ; %entry
; GFX9-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x34
; GFX9-NEXT: s_load_dwordx2 s[4:5], s[2:3], 0x24
; GFX9-NEXT: s_load_dword s6, s[2:3], 0x2c
; GFX9-NEXT: v_mov_b32_e32 v0, 0
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NEXT: s_lshl_b64 s[0:1], s[0:1], 2
; GFX9-NEXT: s_add_u32 s0, s4, s0
; GFX9-NEXT: s_addc_u32 s1, s5, s1
; GFX9-NEXT: v_mov_b32_e32 v1, s6
; GFX9-NEXT: global_atomic_sub v0, v1, s[0:1]
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: buffer_wbinvl1_vol
; GFX9-NEXT: s_endpgm
entry:
%ptr = getelementptr i32, ptr addrspace(1) %out, i64 %index
%val = atomicrmw volatile sub ptr addrspace(1) %ptr, i32 %in syncscope("agent") seq_cst
ret void
}
define amdgpu_kernel void @atomic_sub_i32_ret_addr64(ptr addrspace(1) %out, ptr addrspace(1) %out2, i32 %in, i64 %index) {
; SI-LABEL: atomic_sub_i32_ret_addr64:
; SI: ; %bb.0: ; %entry
; SI-NEXT: s_load_dwordx4 s[4:7], s[2:3], 0x9
; SI-NEXT: s_load_dwordx2 s[8:9], s[2:3], 0xf
; SI-NEXT: s_load_dword s2, s[2:3], 0xd
; SI-NEXT: s_mov_b32 s3, 0xf000
; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: s_mov_b32 s0, s6
; SI-NEXT: s_mov_b32 s1, s7
; SI-NEXT: s_lshl_b64 s[8:9], s[8:9], 2
; SI-NEXT: s_mov_b32 s6, 0
; SI-NEXT: s_mov_b32 s7, s3
; SI-NEXT: v_mov_b32_e32 v2, s2
; SI-NEXT: v_mov_b32_e32 v0, s8
; SI-NEXT: v_mov_b32_e32 v1, s9
; SI-NEXT: buffer_atomic_sub v2, v[0:1], s[4:7], 0 addr64 glc
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: buffer_wbinvl1
; SI-NEXT: s_mov_b32 s2, -1
; SI-NEXT: buffer_store_dword v2, off, s[0:3], 0
; SI-NEXT: s_endpgm
;
; VI-LABEL: atomic_sub_i32_ret_addr64:
; VI: ; %bb.0: ; %entry
; VI-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x3c
; VI-NEXT: s_load_dwordx4 s[4:7], s[2:3], 0x24
; VI-NEXT: s_load_dword s2, s[2:3], 0x34
; VI-NEXT: s_mov_b32 s3, 0xf000
; VI-NEXT: s_waitcnt lgkmcnt(0)
; VI-NEXT: s_lshl_b64 s[0:1], s[0:1], 2
; VI-NEXT: s_add_u32 s0, s4, s0
; VI-NEXT: s_addc_u32 s1, s5, s1
; VI-NEXT: v_mov_b32_e32 v0, s0
; VI-NEXT: v_mov_b32_e32 v1, s1
; VI-NEXT: v_mov_b32_e32 v2, s2
; VI-NEXT: flat_atomic_sub v0, v[0:1], v2 glc
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: buffer_wbinvl1_vol
; VI-NEXT: s_mov_b32 s2, -1
; VI-NEXT: s_mov_b32 s0, s6
; VI-NEXT: s_mov_b32 s1, s7
; VI-NEXT: buffer_store_dword v0, off, s[0:3], 0
; VI-NEXT: s_endpgm
;
; GFX9-LABEL: atomic_sub_i32_ret_addr64:
; GFX9: ; %bb.0: ; %entry
; GFX9-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x3c
; GFX9-NEXT: s_load_dwordx4 s[4:7], s[2:3], 0x24
; GFX9-NEXT: s_load_dword s8, s[2:3], 0x34
; GFX9-NEXT: v_mov_b32_e32 v0, 0
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NEXT: s_lshl_b64 s[0:1], s[0:1], 2
; GFX9-NEXT: s_add_u32 s0, s4, s0
; GFX9-NEXT: s_addc_u32 s1, s5, s1
; GFX9-NEXT: v_mov_b32_e32 v1, s8
; GFX9-NEXT: global_atomic_sub v1, v0, v1, s[0:1] glc
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: buffer_wbinvl1_vol
; GFX9-NEXT: global_store_dword v0, v1, s[6:7]
; GFX9-NEXT: s_endpgm
entry:
%ptr = getelementptr i32, ptr addrspace(1) %out, i64 %index
%val = atomicrmw volatile sub ptr addrspace(1) %ptr, i32 %in syncscope("agent") seq_cst
store i32 %val, ptr addrspace(1) %out2
ret void
}
define amdgpu_kernel void @atomic_max_i32_offset(ptr addrspace(1) %out, i32 %in) {
; SI-LABEL: atomic_max_i32_offset:
; SI: ; %bb.0: ; %entry
; SI-NEXT: s_load_dword s4, s[2:3], 0xb
; SI-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x9
; SI-NEXT: s_mov_b32 s3, 0xf000
; SI-NEXT: s_mov_b32 s2, -1
; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: v_mov_b32_e32 v0, s4
; SI-NEXT: buffer_atomic_smax v0, off, s[0:3], 0 offset:16
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: buffer_wbinvl1
; SI-NEXT: s_endpgm
;
; VI-LABEL: atomic_max_i32_offset:
; VI: ; %bb.0: ; %entry
; VI-NEXT: s_load_dword s4, s[2:3], 0x2c
; VI-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x24
; VI-NEXT: s_mov_b32 s3, 0xf000
; VI-NEXT: s_mov_b32 s2, -1
; VI-NEXT: s_waitcnt lgkmcnt(0)
; VI-NEXT: v_mov_b32_e32 v0, s4
; VI-NEXT: buffer_atomic_smax v0, off, s[0:3], 0 offset:16
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: buffer_wbinvl1_vol
; VI-NEXT: s_endpgm
;
; GFX9-LABEL: atomic_max_i32_offset:
; GFX9: ; %bb.0: ; %entry
; GFX9-NEXT: s_load_dword s4, s[2:3], 0x2c
; GFX9-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x24
; GFX9-NEXT: v_mov_b32_e32 v0, 0
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NEXT: v_mov_b32_e32 v1, s4
; GFX9-NEXT: global_atomic_smax v0, v1, s[0:1] offset:16
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: buffer_wbinvl1_vol
; GFX9-NEXT: s_endpgm
entry:
%gep = getelementptr i32, ptr addrspace(1) %out, i64 4
%val = atomicrmw volatile max ptr addrspace(1) %gep, i32 %in syncscope("agent") seq_cst
ret void
}
define amdgpu_kernel void @atomic_max_i32_ret_offset(ptr addrspace(1) %out, ptr addrspace(1) %out2, i32 %in) {
; SI-LABEL: atomic_max_i32_ret_offset:
; SI: ; %bb.0: ; %entry
; SI-NEXT: s_load_dwordx4 s[4:7], s[2:3], 0x9
; SI-NEXT: s_load_dword s8, s[2:3], 0xd
; SI-NEXT: s_mov_b32 s3, 0xf000
; SI-NEXT: s_mov_b32 s2, -1
; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: s_mov_b32 s0, s6
; SI-NEXT: s_mov_b32 s1, s7
; SI-NEXT: s_mov_b32 s6, s2
; SI-NEXT: s_mov_b32 s7, s3
; SI-NEXT: v_mov_b32_e32 v0, s8
; SI-NEXT: buffer_atomic_smax v0, off, s[4:7], 0 offset:16 glc
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: buffer_store_dword v0, off, s[0:3], 0
; SI-NEXT: s_endpgm
;
; VI-LABEL: atomic_max_i32_ret_offset:
; VI: ; %bb.0: ; %entry
; VI-NEXT: s_load_dwordx4 s[4:7], s[2:3], 0x24
; VI-NEXT: s_load_dword s0, s[2:3], 0x34
; VI-NEXT: s_mov_b32 s11, 0xf000
; VI-NEXT: s_mov_b32 s10, -1
; VI-NEXT: s_waitcnt lgkmcnt(0)
; VI-NEXT: s_mov_b32 s8, s6
; VI-NEXT: s_mov_b32 s9, s7
; VI-NEXT: s_mov_b32 s6, s10
; VI-NEXT: s_mov_b32 s7, s11
; VI-NEXT: v_mov_b32_e32 v0, s0
; VI-NEXT: buffer_atomic_smax v0, off, s[4:7], 0 offset:16 glc
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: buffer_store_dword v0, off, s[8:11], 0
; VI-NEXT: s_endpgm
;
; GFX9-LABEL: atomic_max_i32_ret_offset:
; GFX9: ; %bb.0: ; %entry
; GFX9-NEXT: s_load_dword s0, s[2:3], 0x34
; GFX9-NEXT: s_load_dwordx4 s[4:7], s[2:3], 0x24
; GFX9-NEXT: v_mov_b32_e32 v0, 0
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NEXT: v_mov_b32_e32 v1, s0
; GFX9-NEXT: global_atomic_smax v1, v0, v1, s[4:5] offset:16 glc
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: global_store_dword v0, v1, s[6:7]
; GFX9-NEXT: s_endpgm
entry:
%gep = getelementptr i32, ptr addrspace(1) %out, i64 4
%val = atomicrmw volatile max ptr addrspace(1) %gep, i32 %in syncscope("workgroup") seq_cst
store i32 %val, ptr addrspace(1) %out2
ret void
}
define amdgpu_kernel void @atomic_max_i32_addr64_offset(ptr addrspace(1) %out, i32 %in, i64 %index) {
; SI-LABEL: atomic_max_i32_addr64_offset:
; SI: ; %bb.0: ; %entry
; SI-NEXT: s_load_dwordx2 s[4:5], s[2:3], 0xd
; SI-NEXT: s_load_dword s6, s[2:3], 0xb
; SI-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x9
; SI-NEXT: s_mov_b32 s3, 0xf000
; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: s_lshl_b64 s[4:5], s[4:5], 2
; SI-NEXT: s_mov_b32 s2, 0
; SI-NEXT: v_mov_b32_e32 v2, s6
; SI-NEXT: v_mov_b32_e32 v0, s4
; SI-NEXT: v_mov_b32_e32 v1, s5
; SI-NEXT: buffer_atomic_smax v2, v[0:1], s[0:3], 0 addr64 offset:16
; SI-NEXT: s_endpgm
;
; VI-LABEL: atomic_max_i32_addr64_offset:
; VI: ; %bb.0: ; %entry
; VI-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x34
; VI-NEXT: s_load_dwordx2 s[4:5], s[2:3], 0x24
; VI-NEXT: s_load_dword s2, s[2:3], 0x2c
; VI-NEXT: s_waitcnt lgkmcnt(0)
; VI-NEXT: s_lshl_b64 s[0:1], s[0:1], 2
; VI-NEXT: s_add_u32 s0, s4, s0
; VI-NEXT: s_addc_u32 s1, s5, s1
; VI-NEXT: s_add_u32 s0, s0, 16
; VI-NEXT: s_addc_u32 s1, s1, 0
; VI-NEXT: v_mov_b32_e32 v0, s0
; VI-NEXT: v_mov_b32_e32 v1, s1
; VI-NEXT: v_mov_b32_e32 v2, s2
; VI-NEXT: flat_atomic_smax v[0:1], v2
; VI-NEXT: s_endpgm
;
; GFX9-LABEL: atomic_max_i32_addr64_offset:
; GFX9: ; %bb.0: ; %entry
; GFX9-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x34
; GFX9-NEXT: s_load_dwordx2 s[4:5], s[2:3], 0x24
; GFX9-NEXT: s_load_dword s6, s[2:3], 0x2c
; GFX9-NEXT: v_mov_b32_e32 v0, 0
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NEXT: s_lshl_b64 s[0:1], s[0:1], 2
; GFX9-NEXT: s_add_u32 s0, s4, s0
; GFX9-NEXT: s_addc_u32 s1, s5, s1
; GFX9-NEXT: v_mov_b32_e32 v1, s6
; GFX9-NEXT: global_atomic_smax v0, v1, s[0:1] offset:16
; GFX9-NEXT: s_endpgm
entry:
%ptr = getelementptr i32, ptr addrspace(1) %out, i64 %index
%gep = getelementptr i32, ptr addrspace(1) %ptr, i64 4
%val = atomicrmw volatile max ptr addrspace(1) %gep, i32 %in syncscope("workgroup") seq_cst
ret void
}
define amdgpu_kernel void @atomic_max_i32_ret_addr64_offset(ptr addrspace(1) %out, ptr addrspace(1) %out2, i32 %in, i64 %index) {
; SI-LABEL: atomic_max_i32_ret_addr64_offset:
; SI: ; %bb.0: ; %entry
; SI-NEXT: s_load_dwordx4 s[4:7], s[2:3], 0x9
; SI-NEXT: s_load_dwordx2 s[8:9], s[2:3], 0xf
; SI-NEXT: s_load_dword s2, s[2:3], 0xd
; SI-NEXT: s_mov_b32 s3, 0xf000
; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: s_mov_b32 s0, s6
; SI-NEXT: s_mov_b32 s1, s7
; SI-NEXT: s_lshl_b64 s[8:9], s[8:9], 2
; SI-NEXT: s_mov_b32 s6, 0
; SI-NEXT: s_mov_b32 s7, s3
; SI-NEXT: v_mov_b32_e32 v2, s2
; SI-NEXT: v_mov_b32_e32 v0, s8
; SI-NEXT: v_mov_b32_e32 v1, s9
; SI-NEXT: buffer_atomic_smax v2, v[0:1], s[4:7], 0 addr64 offset:16 glc
; SI-NEXT: s_mov_b32 s2, -1
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: buffer_store_dword v2, off, s[0:3], 0
; SI-NEXT: s_endpgm
;
; VI-LABEL: atomic_max_i32_ret_addr64_offset:
; VI: ; %bb.0: ; %entry
; VI-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x3c
; VI-NEXT: s_load_dwordx4 s[4:7], s[2:3], 0x24
; VI-NEXT: s_load_dword s2, s[2:3], 0x34
; VI-NEXT: s_mov_b32 s3, 0xf000
; VI-NEXT: s_waitcnt lgkmcnt(0)
; VI-NEXT: s_lshl_b64 s[0:1], s[0:1], 2
; VI-NEXT: s_add_u32 s0, s4, s0
; VI-NEXT: s_addc_u32 s1, s5, s1
; VI-NEXT: s_add_u32 s0, s0, 16
; VI-NEXT: s_addc_u32 s1, s1, 0
; VI-NEXT: v_mov_b32_e32 v0, s0
; VI-NEXT: v_mov_b32_e32 v1, s1
; VI-NEXT: v_mov_b32_e32 v2, s2
; VI-NEXT: flat_atomic_smax v0, v[0:1], v2 glc
; VI-NEXT: s_mov_b32 s2, -1
; VI-NEXT: s_mov_b32 s0, s6
; VI-NEXT: s_mov_b32 s1, s7
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: buffer_store_dword v0, off, s[0:3], 0
; VI-NEXT: s_endpgm
;
; GFX9-LABEL: atomic_max_i32_ret_addr64_offset:
; GFX9: ; %bb.0: ; %entry
; GFX9-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x3c
; GFX9-NEXT: s_load_dwordx4 s[4:7], s[2:3], 0x24
; GFX9-NEXT: s_load_dword s8, s[2:3], 0x34
; GFX9-NEXT: v_mov_b32_e32 v0, 0
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NEXT: s_lshl_b64 s[0:1], s[0:1], 2
; GFX9-NEXT: s_add_u32 s0, s4, s0
; GFX9-NEXT: s_addc_u32 s1, s5, s1
; GFX9-NEXT: v_mov_b32_e32 v1, s8
; GFX9-NEXT: global_atomic_smax v1, v0, v1, s[0:1] offset:16 glc
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: global_store_dword v0, v1, s[6:7]
; GFX9-NEXT: s_endpgm
entry:
%ptr = getelementptr i32, ptr addrspace(1) %out, i64 %index
%gep = getelementptr i32, ptr addrspace(1) %ptr, i64 4
%val = atomicrmw volatile max ptr addrspace(1) %gep, i32 %in syncscope("workgroup") seq_cst
store i32 %val, ptr addrspace(1) %out2
ret void
}
define amdgpu_kernel void @atomic_max_i32(ptr addrspace(1) %out, i32 %in) {
; SI-LABEL: atomic_max_i32:
; SI: ; %bb.0: ; %entry
; SI-NEXT: s_load_dword s4, s[2:3], 0xb
; SI-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x9
; SI-NEXT: s_mov_b32 s3, 0xf000
; SI-NEXT: s_mov_b32 s2, -1
; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: v_mov_b32_e32 v0, s4
; SI-NEXT: buffer_atomic_smax v0, off, s[0:3], 0
; SI-NEXT: s_endpgm
;
; VI-LABEL: atomic_max_i32:
; VI: ; %bb.0: ; %entry
; VI-NEXT: s_load_dword s4, s[2:3], 0x2c
; VI-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x24
; VI-NEXT: s_mov_b32 s3, 0xf000
; VI-NEXT: s_mov_b32 s2, -1
; VI-NEXT: s_waitcnt lgkmcnt(0)
; VI-NEXT: v_mov_b32_e32 v0, s4
; VI-NEXT: buffer_atomic_smax v0, off, s[0:3], 0
; VI-NEXT: s_endpgm
;
; GFX9-LABEL: atomic_max_i32:
; GFX9: ; %bb.0: ; %entry
; GFX9-NEXT: s_load_dword s4, s[2:3], 0x2c
; GFX9-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x24
; GFX9-NEXT: v_mov_b32_e32 v0, 0
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NEXT: v_mov_b32_e32 v1, s4
; GFX9-NEXT: global_atomic_smax v0, v1, s[0:1]
; GFX9-NEXT: s_endpgm
entry:
%val = atomicrmw volatile max ptr addrspace(1) %out, i32 %in syncscope("workgroup") seq_cst
ret void
}
define amdgpu_kernel void @atomic_max_i32_ret(ptr addrspace(1) %out, ptr addrspace(1) %out2, i32 %in) {
; SI-LABEL: atomic_max_i32_ret:
; SI: ; %bb.0: ; %entry
; SI-NEXT: s_load_dwordx4 s[4:7], s[2:3], 0x9
; SI-NEXT: s_load_dword s8, s[2:3], 0xd
; SI-NEXT: s_mov_b32 s3, 0xf000
; SI-NEXT: s_mov_b32 s2, -1
; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: s_mov_b32 s0, s4
; SI-NEXT: s_mov_b32 s1, s5
; SI-NEXT: v_mov_b32_e32 v0, s8
; SI-NEXT: buffer_atomic_smax v0, off, s[0:3], 0 glc
; SI-NEXT: s_mov_b32 s0, s6
; SI-NEXT: s_mov_b32 s1, s7
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: buffer_store_dword v0, off, s[0:3], 0
; SI-NEXT: s_endpgm
;
; VI-LABEL: atomic_max_i32_ret:
; VI: ; %bb.0: ; %entry
; VI-NEXT: s_load_dwordx4 s[4:7], s[2:3], 0x24
; VI-NEXT: s_load_dword s8, s[2:3], 0x34
; VI-NEXT: s_mov_b32 s3, 0xf000
; VI-NEXT: s_mov_b32 s2, -1
; VI-NEXT: s_waitcnt lgkmcnt(0)
; VI-NEXT: s_mov_b32 s0, s4
; VI-NEXT: s_mov_b32 s1, s5
; VI-NEXT: v_mov_b32_e32 v0, s8
; VI-NEXT: buffer_atomic_smax v0, off, s[0:3], 0 glc
; VI-NEXT: s_mov_b32 s0, s6
; VI-NEXT: s_mov_b32 s1, s7
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: buffer_store_dword v0, off, s[0:3], 0
; VI-NEXT: s_endpgm
;
; GFX9-LABEL: atomic_max_i32_ret:
; GFX9: ; %bb.0: ; %entry
; GFX9-NEXT: s_load_dword s0, s[2:3], 0x34
; GFX9-NEXT: s_load_dwordx4 s[4:7], s[2:3], 0x24
; GFX9-NEXT: v_mov_b32_e32 v0, 0
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NEXT: v_mov_b32_e32 v1, s0
; GFX9-NEXT: global_atomic_smax v1, v0, v1, s[4:5] glc
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: global_store_dword v0, v1, s[6:7]
; GFX9-NEXT: s_endpgm
entry:
%val = atomicrmw volatile max ptr addrspace(1) %out, i32 %in syncscope("workgroup") seq_cst
store i32 %val, ptr addrspace(1) %out2
ret void
}
define amdgpu_kernel void @atomic_max_i32_addr64(ptr addrspace(1) %out, i32 %in, i64 %index) {
; SI-LABEL: atomic_max_i32_addr64:
; SI: ; %bb.0: ; %entry
; SI-NEXT: s_load_dwordx2 s[4:5], s[2:3], 0xd
; SI-NEXT: s_load_dword s6, s[2:3], 0xb
; SI-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x9
; SI-NEXT: s_mov_b32 s3, 0xf000
; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: s_lshl_b64 s[4:5], s[4:5], 2
; SI-NEXT: s_mov_b32 s2, 0
; SI-NEXT: v_mov_b32_e32 v2, s6
; SI-NEXT: v_mov_b32_e32 v0, s4
; SI-NEXT: v_mov_b32_e32 v1, s5
; SI-NEXT: buffer_atomic_smax v2, v[0:1], s[0:3], 0 addr64
; SI-NEXT: s_endpgm
;
; VI-LABEL: atomic_max_i32_addr64:
; VI: ; %bb.0: ; %entry
; VI-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x34
; VI-NEXT: s_load_dwordx2 s[4:5], s[2:3], 0x24
; VI-NEXT: s_load_dword s2, s[2:3], 0x2c
; VI-NEXT: s_waitcnt lgkmcnt(0)
; VI-NEXT: s_lshl_b64 s[0:1], s[0:1], 2
; VI-NEXT: s_add_u32 s0, s4, s0
; VI-NEXT: s_addc_u32 s1, s5, s1
; VI-NEXT: v_mov_b32_e32 v0, s0
; VI-NEXT: v_mov_b32_e32 v1, s1
; VI-NEXT: v_mov_b32_e32 v2, s2
; VI-NEXT: flat_atomic_smax v[0:1], v2
; VI-NEXT: s_endpgm
;
; GFX9-LABEL: atomic_max_i32_addr64:
; GFX9: ; %bb.0: ; %entry
; GFX9-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x34
; GFX9-NEXT: s_load_dwordx2 s[4:5], s[2:3], 0x24
; GFX9-NEXT: s_load_dword s6, s[2:3], 0x2c
; GFX9-NEXT: v_mov_b32_e32 v0, 0
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NEXT: s_lshl_b64 s[0:1], s[0:1], 2
; GFX9-NEXT: s_add_u32 s0, s4, s0
; GFX9-NEXT: s_addc_u32 s1, s5, s1
; GFX9-NEXT: v_mov_b32_e32 v1, s6
; GFX9-NEXT: global_atomic_smax v0, v1, s[0:1]
; GFX9-NEXT: s_endpgm
entry:
%ptr = getelementptr i32, ptr addrspace(1) %out, i64 %index
%val = atomicrmw volatile max ptr addrspace(1) %ptr, i32 %in syncscope("workgroup") seq_cst
ret void
}
define amdgpu_kernel void @atomic_max_i32_ret_addr64(ptr addrspace(1) %out, ptr addrspace(1) %out2, i32 %in, i64 %index) {
; SI-LABEL: atomic_max_i32_ret_addr64:
; SI: ; %bb.0: ; %entry
; SI-NEXT: s_load_dwordx4 s[4:7], s[2:3], 0x9
; SI-NEXT: s_load_dwordx2 s[8:9], s[2:3], 0xf
; SI-NEXT: s_load_dword s2, s[2:3], 0xd
; SI-NEXT: s_mov_b32 s3, 0xf000
; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: s_mov_b32 s0, s6
; SI-NEXT: s_mov_b32 s1, s7
; SI-NEXT: s_lshl_b64 s[8:9], s[8:9], 2
; SI-NEXT: s_mov_b32 s6, 0
; SI-NEXT: s_mov_b32 s7, s3
; SI-NEXT: v_mov_b32_e32 v2, s2
; SI-NEXT: v_mov_b32_e32 v0, s8
; SI-NEXT: v_mov_b32_e32 v1, s9
; SI-NEXT: buffer_atomic_smax v2, v[0:1], s[4:7], 0 addr64 glc
; SI-NEXT: s_mov_b32 s2, -1
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: buffer_store_dword v2, off, s[0:3], 0
; SI-NEXT: s_endpgm
;
; VI-LABEL: atomic_max_i32_ret_addr64:
; VI: ; %bb.0: ; %entry
; VI-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x3c
; VI-NEXT: s_load_dwordx4 s[4:7], s[2:3], 0x24
; VI-NEXT: s_load_dword s2, s[2:3], 0x34
; VI-NEXT: s_mov_b32 s3, 0xf000
; VI-NEXT: s_waitcnt lgkmcnt(0)
; VI-NEXT: s_lshl_b64 s[0:1], s[0:1], 2
; VI-NEXT: s_add_u32 s0, s4, s0
; VI-NEXT: s_addc_u32 s1, s5, s1
; VI-NEXT: v_mov_b32_e32 v0, s0
; VI-NEXT: v_mov_b32_e32 v1, s1
; VI-NEXT: v_mov_b32_e32 v2, s2
; VI-NEXT: flat_atomic_smax v0, v[0:1], v2 glc
; VI-NEXT: s_mov_b32 s2, -1
; VI-NEXT: s_mov_b32 s0, s6
; VI-NEXT: s_mov_b32 s1, s7
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: buffer_store_dword v0, off, s[0:3], 0
; VI-NEXT: s_endpgm
;
; GFX9-LABEL: atomic_max_i32_ret_addr64:
; GFX9: ; %bb.0: ; %entry
; GFX9-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x3c
; GFX9-NEXT: s_load_dwordx4 s[4:7], s[2:3], 0x24
; GFX9-NEXT: s_load_dword s8, s[2:3], 0x34
; GFX9-NEXT: v_mov_b32_e32 v0, 0
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NEXT: s_lshl_b64 s[0:1], s[0:1], 2
; GFX9-NEXT: s_add_u32 s0, s4, s0
; GFX9-NEXT: s_addc_u32 s1, s5, s1
; GFX9-NEXT: v_mov_b32_e32 v1, s8
; GFX9-NEXT: global_atomic_smax v1, v0, v1, s[0:1] glc
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: global_store_dword v0, v1, s[6:7]
; GFX9-NEXT: s_endpgm
entry:
%ptr = getelementptr i32, ptr addrspace(1) %out, i64 %index
%val = atomicrmw volatile max ptr addrspace(1) %ptr, i32 %in syncscope("workgroup") seq_cst
store i32 %val, ptr addrspace(1) %out2
ret void
}
define amdgpu_kernel void @atomic_umax_i32_offset(ptr addrspace(1) %out, i32 %in) {
; SI-LABEL: atomic_umax_i32_offset:
; SI: ; %bb.0: ; %entry
; SI-NEXT: s_load_dword s4, s[2:3], 0xb
; SI-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x9
; SI-NEXT: s_mov_b32 s3, 0xf000
; SI-NEXT: s_mov_b32 s2, -1
; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: v_mov_b32_e32 v0, s4
; SI-NEXT: buffer_atomic_umax v0, off, s[0:3], 0 offset:16
; SI-NEXT: s_endpgm
;
; VI-LABEL: atomic_umax_i32_offset:
; VI: ; %bb.0: ; %entry
; VI-NEXT: s_load_dword s4, s[2:3], 0x2c
; VI-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x24
; VI-NEXT: s_mov_b32 s3, 0xf000
; VI-NEXT: s_mov_b32 s2, -1
; VI-NEXT: s_waitcnt lgkmcnt(0)
; VI-NEXT: v_mov_b32_e32 v0, s4
; VI-NEXT: buffer_atomic_umax v0, off, s[0:3], 0 offset:16
; VI-NEXT: s_endpgm
;
; GFX9-LABEL: atomic_umax_i32_offset:
; GFX9: ; %bb.0: ; %entry
; GFX9-NEXT: s_load_dword s4, s[2:3], 0x2c
; GFX9-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x24
; GFX9-NEXT: v_mov_b32_e32 v0, 0
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NEXT: v_mov_b32_e32 v1, s4
; GFX9-NEXT: global_atomic_umax v0, v1, s[0:1] offset:16
; GFX9-NEXT: s_endpgm
entry:
%gep = getelementptr i32, ptr addrspace(1) %out, i64 4
%val = atomicrmw volatile umax ptr addrspace(1) %gep, i32 %in syncscope("workgroup") seq_cst
ret void
}
define amdgpu_kernel void @atomic_umax_i32_ret_offset(ptr addrspace(1) %out, ptr addrspace(1) %out2, i32 %in) {
; SI-LABEL: atomic_umax_i32_ret_offset:
; SI: ; %bb.0: ; %entry
; SI-NEXT: s_load_dwordx4 s[4:7], s[2:3], 0x9
; SI-NEXT: s_load_dword s8, s[2:3], 0xd
; SI-NEXT: s_mov_b32 s3, 0xf000
; SI-NEXT: s_mov_b32 s2, -1
; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: s_mov_b32 s0, s6
; SI-NEXT: s_mov_b32 s1, s7
; SI-NEXT: s_mov_b32 s6, s2
; SI-NEXT: s_mov_b32 s7, s3
; SI-NEXT: v_mov_b32_e32 v0, s8
; SI-NEXT: buffer_atomic_umax v0, off, s[4:7], 0 offset:16 glc
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: buffer_store_dword v0, off, s[0:3], 0
; SI-NEXT: s_endpgm
;
; VI-LABEL: atomic_umax_i32_ret_offset:
; VI: ; %bb.0: ; %entry
; VI-NEXT: s_load_dwordx4 s[4:7], s[2:3], 0x24
; VI-NEXT: s_load_dword s0, s[2:3], 0x34
; VI-NEXT: s_mov_b32 s11, 0xf000
; VI-NEXT: s_mov_b32 s10, -1
; VI-NEXT: s_waitcnt lgkmcnt(0)
; VI-NEXT: s_mov_b32 s8, s6
; VI-NEXT: s_mov_b32 s9, s7
; VI-NEXT: s_mov_b32 s6, s10
; VI-NEXT: s_mov_b32 s7, s11
; VI-NEXT: v_mov_b32_e32 v0, s0
; VI-NEXT: buffer_atomic_umax v0, off, s[4:7], 0 offset:16 glc
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: buffer_store_dword v0, off, s[8:11], 0
; VI-NEXT: s_endpgm
;
; GFX9-LABEL: atomic_umax_i32_ret_offset:
; GFX9: ; %bb.0: ; %entry
; GFX9-NEXT: s_load_dword s0, s[2:3], 0x34
; GFX9-NEXT: s_load_dwordx4 s[4:7], s[2:3], 0x24
; GFX9-NEXT: v_mov_b32_e32 v0, 0
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NEXT: v_mov_b32_e32 v1, s0
; GFX9-NEXT: global_atomic_umax v1, v0, v1, s[4:5] offset:16 glc
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: global_store_dword v0, v1, s[6:7]
; GFX9-NEXT: s_endpgm
entry:
%gep = getelementptr i32, ptr addrspace(1) %out, i64 4
%val = atomicrmw volatile umax ptr addrspace(1) %gep, i32 %in syncscope("workgroup") seq_cst
store i32 %val, ptr addrspace(1) %out2
ret void
}
define amdgpu_kernel void @atomic_umax_i32_addr64_offset(ptr addrspace(1) %out, i32 %in, i64 %index) {
; SI-LABEL: atomic_umax_i32_addr64_offset:
; SI: ; %bb.0: ; %entry
; SI-NEXT: s_load_dwordx2 s[4:5], s[2:3], 0xd
; SI-NEXT: s_load_dword s6, s[2:3], 0xb
; SI-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x9
; SI-NEXT: s_mov_b32 s3, 0xf000
; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: s_lshl_b64 s[4:5], s[4:5], 2
; SI-NEXT: s_mov_b32 s2, 0
; SI-NEXT: v_mov_b32_e32 v2, s6
; SI-NEXT: v_mov_b32_e32 v0, s4
; SI-NEXT: v_mov_b32_e32 v1, s5
; SI-NEXT: buffer_atomic_umax v2, v[0:1], s[0:3], 0 addr64 offset:16
; SI-NEXT: s_endpgm
;
; VI-LABEL: atomic_umax_i32_addr64_offset:
; VI: ; %bb.0: ; %entry
; VI-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x34
; VI-NEXT: s_load_dwordx2 s[4:5], s[2:3], 0x24
; VI-NEXT: s_load_dword s2, s[2:3], 0x2c
; VI-NEXT: s_waitcnt lgkmcnt(0)
; VI-NEXT: s_lshl_b64 s[0:1], s[0:1], 2
; VI-NEXT: s_add_u32 s0, s4, s0
; VI-NEXT: s_addc_u32 s1, s5, s1
; VI-NEXT: s_add_u32 s0, s0, 16
; VI-NEXT: s_addc_u32 s1, s1, 0
; VI-NEXT: v_mov_b32_e32 v0, s0
; VI-NEXT: v_mov_b32_e32 v1, s1
; VI-NEXT: v_mov_b32_e32 v2, s2
; VI-NEXT: flat_atomic_umax v[0:1], v2
; VI-NEXT: s_endpgm
;
; GFX9-LABEL: atomic_umax_i32_addr64_offset:
; GFX9: ; %bb.0: ; %entry
; GFX9-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x34
; GFX9-NEXT: s_load_dwordx2 s[4:5], s[2:3], 0x24
; GFX9-NEXT: s_load_dword s6, s[2:3], 0x2c
; GFX9-NEXT: v_mov_b32_e32 v0, 0
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NEXT: s_lshl_b64 s[0:1], s[0:1], 2
; GFX9-NEXT: s_add_u32 s0, s4, s0
; GFX9-NEXT: s_addc_u32 s1, s5, s1
; GFX9-NEXT: v_mov_b32_e32 v1, s6
; GFX9-NEXT: global_atomic_umax v0, v1, s[0:1] offset:16
; GFX9-NEXT: s_endpgm
entry:
%ptr = getelementptr i32, ptr addrspace(1) %out, i64 %index
%gep = getelementptr i32, ptr addrspace(1) %ptr, i64 4
%val = atomicrmw volatile umax ptr addrspace(1) %gep, i32 %in syncscope("workgroup") seq_cst
ret void
}
define amdgpu_kernel void @atomic_umax_i32_ret_addr64_offset(ptr addrspace(1) %out, ptr addrspace(1) %out2, i32 %in, i64 %index) {
; SI-LABEL: atomic_umax_i32_ret_addr64_offset:
; SI: ; %bb.0: ; %entry
; SI-NEXT: s_load_dwordx4 s[4:7], s[2:3], 0x9
; SI-NEXT: s_load_dwordx2 s[8:9], s[2:3], 0xf
; SI-NEXT: s_load_dword s2, s[2:3], 0xd
; SI-NEXT: s_mov_b32 s3, 0xf000
; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: s_mov_b32 s0, s6
; SI-NEXT: s_mov_b32 s1, s7
; SI-NEXT: s_lshl_b64 s[8:9], s[8:9], 2
; SI-NEXT: s_mov_b32 s6, 0
; SI-NEXT: s_mov_b32 s7, s3
; SI-NEXT: v_mov_b32_e32 v2, s2
; SI-NEXT: v_mov_b32_e32 v0, s8
; SI-NEXT: v_mov_b32_e32 v1, s9
; SI-NEXT: buffer_atomic_umax v2, v[0:1], s[4:7], 0 addr64 offset:16 glc
; SI-NEXT: s_mov_b32 s2, -1
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: buffer_store_dword v2, off, s[0:3], 0
; SI-NEXT: s_endpgm
;
; VI-LABEL: atomic_umax_i32_ret_addr64_offset:
; VI: ; %bb.0: ; %entry
; VI-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x3c
; VI-NEXT: s_load_dwordx4 s[4:7], s[2:3], 0x24
; VI-NEXT: s_load_dword s2, s[2:3], 0x34
; VI-NEXT: s_mov_b32 s3, 0xf000
; VI-NEXT: s_waitcnt lgkmcnt(0)
; VI-NEXT: s_lshl_b64 s[0:1], s[0:1], 2
; VI-NEXT: s_add_u32 s0, s4, s0
; VI-NEXT: s_addc_u32 s1, s5, s1
; VI-NEXT: s_add_u32 s0, s0, 16
; VI-NEXT: s_addc_u32 s1, s1, 0
; VI-NEXT: v_mov_b32_e32 v0, s0
; VI-NEXT: v_mov_b32_e32 v1, s1
; VI-NEXT: v_mov_b32_e32 v2, s2
; VI-NEXT: flat_atomic_umax v0, v[0:1], v2 glc
; VI-NEXT: s_mov_b32 s2, -1
; VI-NEXT: s_mov_b32 s0, s6
; VI-NEXT: s_mov_b32 s1, s7
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: buffer_store_dword v0, off, s[0:3], 0
; VI-NEXT: s_endpgm
;
; GFX9-LABEL: atomic_umax_i32_ret_addr64_offset:
; GFX9: ; %bb.0: ; %entry
; GFX9-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x3c
; GFX9-NEXT: s_load_dwordx4 s[4:7], s[2:3], 0x24
; GFX9-NEXT: s_load_dword s8, s[2:3], 0x34
; GFX9-NEXT: v_mov_b32_e32 v0, 0
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NEXT: s_lshl_b64 s[0:1], s[0:1], 2
; GFX9-NEXT: s_add_u32 s0, s4, s0
; GFX9-NEXT: s_addc_u32 s1, s5, s1
; GFX9-NEXT: v_mov_b32_e32 v1, s8
; GFX9-NEXT: global_atomic_umax v1, v0, v1, s[0:1] offset:16 glc
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: global_store_dword v0, v1, s[6:7]
; GFX9-NEXT: s_endpgm
entry:
%ptr = getelementptr i32, ptr addrspace(1) %out, i64 %index
%gep = getelementptr i32, ptr addrspace(1) %ptr, i64 4
%val = atomicrmw volatile umax ptr addrspace(1) %gep, i32 %in syncscope("workgroup") seq_cst
store i32 %val, ptr addrspace(1) %out2
ret void
}
define amdgpu_kernel void @atomic_umax_i32(ptr addrspace(1) %out, i32 %in) {
; SI-LABEL: atomic_umax_i32:
; SI: ; %bb.0: ; %entry
; SI-NEXT: s_load_dword s4, s[2:3], 0xb
; SI-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x9
; SI-NEXT: s_mov_b32 s3, 0xf000
; SI-NEXT: s_mov_b32 s2, -1
; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: v_mov_b32_e32 v0, s4
; SI-NEXT: buffer_atomic_umax v0, off, s[0:3], 0
; SI-NEXT: s_endpgm
;
; VI-LABEL: atomic_umax_i32:
; VI: ; %bb.0: ; %entry
; VI-NEXT: s_load_dword s4, s[2:3], 0x2c
; VI-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x24
; VI-NEXT: s_mov_b32 s3, 0xf000
; VI-NEXT: s_mov_b32 s2, -1
; VI-NEXT: s_waitcnt lgkmcnt(0)
; VI-NEXT: v_mov_b32_e32 v0, s4
; VI-NEXT: buffer_atomic_umax v0, off, s[0:3], 0
; VI-NEXT: s_endpgm
;
; GFX9-LABEL: atomic_umax_i32:
; GFX9: ; %bb.0: ; %entry
; GFX9-NEXT: s_load_dword s4, s[2:3], 0x2c
; GFX9-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x24
; GFX9-NEXT: v_mov_b32_e32 v0, 0
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NEXT: v_mov_b32_e32 v1, s4
; GFX9-NEXT: global_atomic_umax v0, v1, s[0:1]
; GFX9-NEXT: s_endpgm
entry:
%val = atomicrmw volatile umax ptr addrspace(1) %out, i32 %in syncscope("workgroup") seq_cst
ret void
}
define amdgpu_kernel void @atomic_umax_i32_ret(ptr addrspace(1) %out, ptr addrspace(1) %out2, i32 %in) {
; SI-LABEL: atomic_umax_i32_ret:
; SI: ; %bb.0: ; %entry
; SI-NEXT: s_load_dwordx4 s[4:7], s[2:3], 0x9
; SI-NEXT: s_load_dword s8, s[2:3], 0xd
; SI-NEXT: s_mov_b32 s3, 0xf000
; SI-NEXT: s_mov_b32 s2, -1
; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: s_mov_b32 s0, s4
; SI-NEXT: s_mov_b32 s1, s5
; SI-NEXT: v_mov_b32_e32 v0, s8
; SI-NEXT: buffer_atomic_umax v0, off, s[0:3], 0 glc
; SI-NEXT: s_mov_b32 s0, s6
; SI-NEXT: s_mov_b32 s1, s7
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: buffer_store_dword v0, off, s[0:3], 0
; SI-NEXT: s_endpgm
;
; VI-LABEL: atomic_umax_i32_ret:
; VI: ; %bb.0: ; %entry
; VI-NEXT: s_load_dwordx4 s[4:7], s[2:3], 0x24
; VI-NEXT: s_load_dword s8, s[2:3], 0x34
; VI-NEXT: s_mov_b32 s3, 0xf000
; VI-NEXT: s_mov_b32 s2, -1
; VI-NEXT: s_waitcnt lgkmcnt(0)
; VI-NEXT: s_mov_b32 s0, s4
; VI-NEXT: s_mov_b32 s1, s5
; VI-NEXT: v_mov_b32_e32 v0, s8
; VI-NEXT: buffer_atomic_umax v0, off, s[0:3], 0 glc
; VI-NEXT: s_mov_b32 s0, s6
; VI-NEXT: s_mov_b32 s1, s7
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: buffer_store_dword v0, off, s[0:3], 0
; VI-NEXT: s_endpgm
;
; GFX9-LABEL: atomic_umax_i32_ret:
; GFX9: ; %bb.0: ; %entry
; GFX9-NEXT: s_load_dword s0, s[2:3], 0x34
; GFX9-NEXT: s_load_dwordx4 s[4:7], s[2:3], 0x24
; GFX9-NEXT: v_mov_b32_e32 v0, 0
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NEXT: v_mov_b32_e32 v1, s0
; GFX9-NEXT: global_atomic_umax v1, v0, v1, s[4:5] glc
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: global_store_dword v0, v1, s[6:7]
; GFX9-NEXT: s_endpgm
entry:
%val = atomicrmw volatile umax ptr addrspace(1) %out, i32 %in syncscope("workgroup") seq_cst
store i32 %val, ptr addrspace(1) %out2
ret void
}
define amdgpu_kernel void @atomic_umax_i32_addr64(ptr addrspace(1) %out, i32 %in, i64 %index) {
; SI-LABEL: atomic_umax_i32_addr64:
; SI: ; %bb.0: ; %entry
; SI-NEXT: s_load_dwordx2 s[4:5], s[2:3], 0xd
; SI-NEXT: s_load_dword s6, s[2:3], 0xb
; SI-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x9
; SI-NEXT: s_mov_b32 s3, 0xf000
; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: s_lshl_b64 s[4:5], s[4:5], 2
; SI-NEXT: s_mov_b32 s2, 0
; SI-NEXT: v_mov_b32_e32 v2, s6
; SI-NEXT: v_mov_b32_e32 v0, s4
; SI-NEXT: v_mov_b32_e32 v1, s5
; SI-NEXT: buffer_atomic_umax v2, v[0:1], s[0:3], 0 addr64
; SI-NEXT: s_endpgm
;
; VI-LABEL: atomic_umax_i32_addr64:
; VI: ; %bb.0: ; %entry
; VI-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x34
; VI-NEXT: s_load_dwordx2 s[4:5], s[2:3], 0x24
; VI-NEXT: s_load_dword s2, s[2:3], 0x2c
; VI-NEXT: s_waitcnt lgkmcnt(0)
; VI-NEXT: s_lshl_b64 s[0:1], s[0:1], 2
; VI-NEXT: s_add_u32 s0, s4, s0
; VI-NEXT: s_addc_u32 s1, s5, s1
; VI-NEXT: v_mov_b32_e32 v0, s0
; VI-NEXT: v_mov_b32_e32 v1, s1
; VI-NEXT: v_mov_b32_e32 v2, s2
; VI-NEXT: flat_atomic_umax v[0:1], v2
; VI-NEXT: s_endpgm
;
; GFX9-LABEL: atomic_umax_i32_addr64:
; GFX9: ; %bb.0: ; %entry
; GFX9-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x34
; GFX9-NEXT: s_load_dwordx2 s[4:5], s[2:3], 0x24
; GFX9-NEXT: s_load_dword s6, s[2:3], 0x2c
; GFX9-NEXT: v_mov_b32_e32 v0, 0
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NEXT: s_lshl_b64 s[0:1], s[0:1], 2
; GFX9-NEXT: s_add_u32 s0, s4, s0
; GFX9-NEXT: s_addc_u32 s1, s5, s1
; GFX9-NEXT: v_mov_b32_e32 v1, s6
; GFX9-NEXT: global_atomic_umax v0, v1, s[0:1]
; GFX9-NEXT: s_endpgm
entry:
%ptr = getelementptr i32, ptr addrspace(1) %out, i64 %index
%val = atomicrmw volatile umax ptr addrspace(1) %ptr, i32 %in syncscope("workgroup") seq_cst
ret void
}
define amdgpu_kernel void @atomic_umax_i32_ret_addr64(ptr addrspace(1) %out, ptr addrspace(1) %out2, i32 %in, i64 %index) {
; SI-LABEL: atomic_umax_i32_ret_addr64:
; SI: ; %bb.0: ; %entry
; SI-NEXT: s_load_dwordx4 s[4:7], s[2:3], 0x9
; SI-NEXT: s_load_dwordx2 s[8:9], s[2:3], 0xf
; SI-NEXT: s_load_dword s2, s[2:3], 0xd
; SI-NEXT: s_mov_b32 s3, 0xf000
; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: s_mov_b32 s0, s6
; SI-NEXT: s_mov_b32 s1, s7
; SI-NEXT: s_lshl_b64 s[8:9], s[8:9], 2
; SI-NEXT: s_mov_b32 s6, 0
; SI-NEXT: s_mov_b32 s7, s3
; SI-NEXT: v_mov_b32_e32 v2, s2
; SI-NEXT: v_mov_b32_e32 v0, s8
; SI-NEXT: v_mov_b32_e32 v1, s9
; SI-NEXT: buffer_atomic_umax v2, v[0:1], s[4:7], 0 addr64 glc
; SI-NEXT: s_mov_b32 s2, -1
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: buffer_store_dword v2, off, s[0:3], 0
; SI-NEXT: s_endpgm
;
; VI-LABEL: atomic_umax_i32_ret_addr64:
; VI: ; %bb.0: ; %entry
; VI-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x3c
; VI-NEXT: s_load_dwordx4 s[4:7], s[2:3], 0x24
; VI-NEXT: s_load_dword s2, s[2:3], 0x34
; VI-NEXT: s_mov_b32 s3, 0xf000
; VI-NEXT: s_waitcnt lgkmcnt(0)
; VI-NEXT: s_lshl_b64 s[0:1], s[0:1], 2
; VI-NEXT: s_add_u32 s0, s4, s0
; VI-NEXT: s_addc_u32 s1, s5, s1
; VI-NEXT: v_mov_b32_e32 v0, s0
; VI-NEXT: v_mov_b32_e32 v1, s1
; VI-NEXT: v_mov_b32_e32 v2, s2
; VI-NEXT: flat_atomic_umax v0, v[0:1], v2 glc
; VI-NEXT: s_mov_b32 s2, -1
; VI-NEXT: s_mov_b32 s0, s6
; VI-NEXT: s_mov_b32 s1, s7
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: buffer_store_dword v0, off, s[0:3], 0
; VI-NEXT: s_endpgm
;
; GFX9-LABEL: atomic_umax_i32_ret_addr64:
; GFX9: ; %bb.0: ; %entry
; GFX9-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x3c
; GFX9-NEXT: s_load_dwordx4 s[4:7], s[2:3], 0x24
; GFX9-NEXT: s_load_dword s8, s[2:3], 0x34
; GFX9-NEXT: v_mov_b32_e32 v0, 0
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NEXT: s_lshl_b64 s[0:1], s[0:1], 2
; GFX9-NEXT: s_add_u32 s0, s4, s0
; GFX9-NEXT: s_addc_u32 s1, s5, s1
; GFX9-NEXT: v_mov_b32_e32 v1, s8
; GFX9-NEXT: global_atomic_umax v1, v0, v1, s[0:1] glc
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: global_store_dword v0, v1, s[6:7]
; GFX9-NEXT: s_endpgm
entry:
%ptr = getelementptr i32, ptr addrspace(1) %out, i64 %index
%val = atomicrmw volatile umax ptr addrspace(1) %ptr, i32 %in syncscope("workgroup") seq_cst
store i32 %val, ptr addrspace(1) %out2
ret void
}
define amdgpu_kernel void @atomic_min_i32_offset(ptr addrspace(1) %out, i32 %in) {
; SI-LABEL: atomic_min_i32_offset:
; SI: ; %bb.0: ; %entry
; SI-NEXT: s_load_dword s4, s[2:3], 0xb
; SI-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x9
; SI-NEXT: s_mov_b32 s3, 0xf000
; SI-NEXT: s_mov_b32 s2, -1
; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: v_mov_b32_e32 v0, s4
; SI-NEXT: buffer_atomic_smin v0, off, s[0:3], 0 offset:16
; SI-NEXT: s_endpgm
;
; VI-LABEL: atomic_min_i32_offset:
; VI: ; %bb.0: ; %entry
; VI-NEXT: s_load_dword s4, s[2:3], 0x2c
; VI-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x24
; VI-NEXT: s_mov_b32 s3, 0xf000
; VI-NEXT: s_mov_b32 s2, -1
; VI-NEXT: s_waitcnt lgkmcnt(0)
; VI-NEXT: v_mov_b32_e32 v0, s4
; VI-NEXT: buffer_atomic_smin v0, off, s[0:3], 0 offset:16
; VI-NEXT: s_endpgm
;
; GFX9-LABEL: atomic_min_i32_offset:
; GFX9: ; %bb.0: ; %entry
; GFX9-NEXT: s_load_dword s4, s[2:3], 0x2c
; GFX9-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x24
; GFX9-NEXT: v_mov_b32_e32 v0, 0
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NEXT: v_mov_b32_e32 v1, s4
; GFX9-NEXT: global_atomic_smin v0, v1, s[0:1] offset:16
; GFX9-NEXT: s_endpgm
entry:
%gep = getelementptr i32, ptr addrspace(1) %out, i64 4
%val = atomicrmw volatile min ptr addrspace(1) %gep, i32 %in syncscope("workgroup") seq_cst
ret void
}
define amdgpu_kernel void @atomic_min_i32_ret_offset(ptr addrspace(1) %out, ptr addrspace(1) %out2, i32 %in) {
; SI-LABEL: atomic_min_i32_ret_offset:
; SI: ; %bb.0: ; %entry
; SI-NEXT: s_load_dwordx4 s[4:7], s[2:3], 0x9
; SI-NEXT: s_load_dword s8, s[2:3], 0xd
; SI-NEXT: s_mov_b32 s3, 0xf000
; SI-NEXT: s_mov_b32 s2, -1
; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: s_mov_b32 s0, s6
; SI-NEXT: s_mov_b32 s1, s7
; SI-NEXT: s_mov_b32 s6, s2
; SI-NEXT: s_mov_b32 s7, s3
; SI-NEXT: v_mov_b32_e32 v0, s8
; SI-NEXT: buffer_atomic_smin v0, off, s[4:7], 0 offset:16 glc
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: buffer_store_dword v0, off, s[0:3], 0
; SI-NEXT: s_endpgm
;
; VI-LABEL: atomic_min_i32_ret_offset:
; VI: ; %bb.0: ; %entry
; VI-NEXT: s_load_dwordx4 s[4:7], s[2:3], 0x24
; VI-NEXT: s_load_dword s0, s[2:3], 0x34
; VI-NEXT: s_mov_b32 s11, 0xf000
; VI-NEXT: s_mov_b32 s10, -1
; VI-NEXT: s_waitcnt lgkmcnt(0)
; VI-NEXT: s_mov_b32 s8, s6
; VI-NEXT: s_mov_b32 s9, s7
; VI-NEXT: s_mov_b32 s6, s10
; VI-NEXT: s_mov_b32 s7, s11
; VI-NEXT: v_mov_b32_e32 v0, s0
; VI-NEXT: buffer_atomic_smin v0, off, s[4:7], 0 offset:16 glc
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: buffer_store_dword v0, off, s[8:11], 0
; VI-NEXT: s_endpgm
;
; GFX9-LABEL: atomic_min_i32_ret_offset:
; GFX9: ; %bb.0: ; %entry
; GFX9-NEXT: s_load_dword s0, s[2:3], 0x34
; GFX9-NEXT: s_load_dwordx4 s[4:7], s[2:3], 0x24
; GFX9-NEXT: v_mov_b32_e32 v0, 0
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NEXT: v_mov_b32_e32 v1, s0
; GFX9-NEXT: global_atomic_smin v1, v0, v1, s[4:5] offset:16 glc
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: global_store_dword v0, v1, s[6:7]
; GFX9-NEXT: s_endpgm
entry:
%gep = getelementptr i32, ptr addrspace(1) %out, i64 4
%val = atomicrmw volatile min ptr addrspace(1) %gep, i32 %in syncscope("workgroup") seq_cst
store i32 %val, ptr addrspace(1) %out2
ret void
}
define amdgpu_kernel void @atomic_min_i32_addr64_offset(ptr addrspace(1) %out, i32 %in, i64 %index) {
; SI-LABEL: atomic_min_i32_addr64_offset:
; SI: ; %bb.0: ; %entry
; SI-NEXT: s_load_dwordx2 s[4:5], s[2:3], 0xd
; SI-NEXT: s_load_dword s6, s[2:3], 0xb
; SI-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x9
; SI-NEXT: s_mov_b32 s3, 0xf000
; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: s_lshl_b64 s[4:5], s[4:5], 2
; SI-NEXT: s_mov_b32 s2, 0
; SI-NEXT: v_mov_b32_e32 v2, s6
; SI-NEXT: v_mov_b32_e32 v0, s4
; SI-NEXT: v_mov_b32_e32 v1, s5
; SI-NEXT: buffer_atomic_smin v2, v[0:1], s[0:3], 0 addr64 offset:16
; SI-NEXT: s_endpgm
;
; VI-LABEL: atomic_min_i32_addr64_offset:
; VI: ; %bb.0: ; %entry
; VI-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x34
; VI-NEXT: s_load_dwordx2 s[4:5], s[2:3], 0x24
; VI-NEXT: s_load_dword s2, s[2:3], 0x2c
; VI-NEXT: s_waitcnt lgkmcnt(0)
; VI-NEXT: s_lshl_b64 s[0:1], s[0:1], 2
; VI-NEXT: s_add_u32 s0, s4, s0
; VI-NEXT: s_addc_u32 s1, s5, s1
; VI-NEXT: s_add_u32 s0, s0, 16
; VI-NEXT: s_addc_u32 s1, s1, 0
; VI-NEXT: v_mov_b32_e32 v0, s0
; VI-NEXT: v_mov_b32_e32 v1, s1
; VI-NEXT: v_mov_b32_e32 v2, s2
; VI-NEXT: flat_atomic_smin v[0:1], v2
; VI-NEXT: s_endpgm
;
; GFX9-LABEL: atomic_min_i32_addr64_offset:
; GFX9: ; %bb.0: ; %entry
; GFX9-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x34
; GFX9-NEXT: s_load_dwordx2 s[4:5], s[2:3], 0x24
; GFX9-NEXT: s_load_dword s6, s[2:3], 0x2c
; GFX9-NEXT: v_mov_b32_e32 v0, 0
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NEXT: s_lshl_b64 s[0:1], s[0:1], 2
; GFX9-NEXT: s_add_u32 s0, s4, s0
; GFX9-NEXT: s_addc_u32 s1, s5, s1
; GFX9-NEXT: v_mov_b32_e32 v1, s6
; GFX9-NEXT: global_atomic_smin v0, v1, s[0:1] offset:16
; GFX9-NEXT: s_endpgm
entry:
%ptr = getelementptr i32, ptr addrspace(1) %out, i64 %index
%gep = getelementptr i32, ptr addrspace(1) %ptr, i64 4
%val = atomicrmw volatile min ptr addrspace(1) %gep, i32 %in syncscope("workgroup") seq_cst
ret void
}
define amdgpu_kernel void @atomic_min_i32_ret_addr64_offset(ptr addrspace(1) %out, ptr addrspace(1) %out2, i32 %in, i64 %index) {
; SI-LABEL: atomic_min_i32_ret_addr64_offset:
; SI: ; %bb.0: ; %entry
; SI-NEXT: s_load_dwordx4 s[4:7], s[2:3], 0x9
; SI-NEXT: s_load_dwordx2 s[8:9], s[2:3], 0xf
; SI-NEXT: s_load_dword s2, s[2:3], 0xd
; SI-NEXT: s_mov_b32 s3, 0xf000
; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: s_mov_b32 s0, s6
; SI-NEXT: s_mov_b32 s1, s7
; SI-NEXT: s_lshl_b64 s[8:9], s[8:9], 2
; SI-NEXT: s_mov_b32 s6, 0
; SI-NEXT: s_mov_b32 s7, s3
; SI-NEXT: v_mov_b32_e32 v2, s2
; SI-NEXT: v_mov_b32_e32 v0, s8
; SI-NEXT: v_mov_b32_e32 v1, s9
; SI-NEXT: buffer_atomic_smin v2, v[0:1], s[4:7], 0 addr64 offset:16 glc
; SI-NEXT: s_mov_b32 s2, -1
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: buffer_store_dword v2, off, s[0:3], 0
; SI-NEXT: s_endpgm
;
; VI-LABEL: atomic_min_i32_ret_addr64_offset:
; VI: ; %bb.0: ; %entry
; VI-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x3c
; VI-NEXT: s_load_dwordx4 s[4:7], s[2:3], 0x24
; VI-NEXT: s_load_dword s2, s[2:3], 0x34
; VI-NEXT: s_mov_b32 s3, 0xf000
; VI-NEXT: s_waitcnt lgkmcnt(0)
; VI-NEXT: s_lshl_b64 s[0:1], s[0:1], 2
; VI-NEXT: s_add_u32 s0, s4, s0
; VI-NEXT: s_addc_u32 s1, s5, s1
; VI-NEXT: s_add_u32 s0, s0, 16
; VI-NEXT: s_addc_u32 s1, s1, 0
; VI-NEXT: v_mov_b32_e32 v0, s0
; VI-NEXT: v_mov_b32_e32 v1, s1
; VI-NEXT: v_mov_b32_e32 v2, s2
; VI-NEXT: flat_atomic_smin v0, v[0:1], v2 glc
; VI-NEXT: s_mov_b32 s2, -1
; VI-NEXT: s_mov_b32 s0, s6
; VI-NEXT: s_mov_b32 s1, s7
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: buffer_store_dword v0, off, s[0:3], 0
; VI-NEXT: s_endpgm
;
; GFX9-LABEL: atomic_min_i32_ret_addr64_offset:
; GFX9: ; %bb.0: ; %entry
; GFX9-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x3c
; GFX9-NEXT: s_load_dwordx4 s[4:7], s[2:3], 0x24
; GFX9-NEXT: s_load_dword s8, s[2:3], 0x34
; GFX9-NEXT: v_mov_b32_e32 v0, 0
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NEXT: s_lshl_b64 s[0:1], s[0:1], 2
; GFX9-NEXT: s_add_u32 s0, s4, s0
; GFX9-NEXT: s_addc_u32 s1, s5, s1
; GFX9-NEXT: v_mov_b32_e32 v1, s8
; GFX9-NEXT: global_atomic_smin v1, v0, v1, s[0:1] offset:16 glc
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: global_store_dword v0, v1, s[6:7]
; GFX9-NEXT: s_endpgm
entry:
%ptr = getelementptr i32, ptr addrspace(1) %out, i64 %index
%gep = getelementptr i32, ptr addrspace(1) %ptr, i64 4
%val = atomicrmw volatile min ptr addrspace(1) %gep, i32 %in syncscope("workgroup") seq_cst
store i32 %val, ptr addrspace(1) %out2
ret void
}
define amdgpu_kernel void @atomic_min_i32(ptr addrspace(1) %out, i32 %in) {
; SI-LABEL: atomic_min_i32:
; SI: ; %bb.0: ; %entry
; SI-NEXT: s_load_dword s4, s[2:3], 0xb
; SI-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x9
; SI-NEXT: s_mov_b32 s3, 0xf000
; SI-NEXT: s_mov_b32 s2, -1
; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: v_mov_b32_e32 v0, s4
; SI-NEXT: buffer_atomic_smin v0, off, s[0:3], 0
; SI-NEXT: s_endpgm
;
; VI-LABEL: atomic_min_i32:
; VI: ; %bb.0: ; %entry
; VI-NEXT: s_load_dword s4, s[2:3], 0x2c
; VI-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x24
; VI-NEXT: s_mov_b32 s3, 0xf000
; VI-NEXT: s_mov_b32 s2, -1
; VI-NEXT: s_waitcnt lgkmcnt(0)
; VI-NEXT: v_mov_b32_e32 v0, s4
; VI-NEXT: buffer_atomic_smin v0, off, s[0:3], 0
; VI-NEXT: s_endpgm
;
; GFX9-LABEL: atomic_min_i32:
; GFX9: ; %bb.0: ; %entry
; GFX9-NEXT: s_load_dword s4, s[2:3], 0x2c
; GFX9-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x24
; GFX9-NEXT: v_mov_b32_e32 v0, 0
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NEXT: v_mov_b32_e32 v1, s4
; GFX9-NEXT: global_atomic_smin v0, v1, s[0:1]
; GFX9-NEXT: s_endpgm
entry:
%val = atomicrmw volatile min ptr addrspace(1) %out, i32 %in syncscope("workgroup") seq_cst
ret void
}
define amdgpu_kernel void @atomic_min_i32_ret(ptr addrspace(1) %out, ptr addrspace(1) %out2, i32 %in) {
; SI-LABEL: atomic_min_i32_ret:
; SI: ; %bb.0: ; %entry
; SI-NEXT: s_load_dwordx4 s[4:7], s[2:3], 0x9
; SI-NEXT: s_load_dword s8, s[2:3], 0xd
; SI-NEXT: s_mov_b32 s3, 0xf000
; SI-NEXT: s_mov_b32 s2, -1
; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: s_mov_b32 s0, s4
; SI-NEXT: s_mov_b32 s1, s5
; SI-NEXT: v_mov_b32_e32 v0, s8
; SI-NEXT: buffer_atomic_smin v0, off, s[0:3], 0 glc
; SI-NEXT: s_mov_b32 s0, s6
; SI-NEXT: s_mov_b32 s1, s7
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: buffer_store_dword v0, off, s[0:3], 0
; SI-NEXT: s_endpgm
;
; VI-LABEL: atomic_min_i32_ret:
; VI: ; %bb.0: ; %entry
; VI-NEXT: s_load_dwordx4 s[4:7], s[2:3], 0x24
; VI-NEXT: s_load_dword s8, s[2:3], 0x34
; VI-NEXT: s_mov_b32 s3, 0xf000
; VI-NEXT: s_mov_b32 s2, -1
; VI-NEXT: s_waitcnt lgkmcnt(0)
; VI-NEXT: s_mov_b32 s0, s4
; VI-NEXT: s_mov_b32 s1, s5
; VI-NEXT: v_mov_b32_e32 v0, s8
; VI-NEXT: buffer_atomic_smin v0, off, s[0:3], 0 glc
; VI-NEXT: s_mov_b32 s0, s6
; VI-NEXT: s_mov_b32 s1, s7
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: buffer_store_dword v0, off, s[0:3], 0
; VI-NEXT: s_endpgm
;
; GFX9-LABEL: atomic_min_i32_ret:
; GFX9: ; %bb.0: ; %entry
; GFX9-NEXT: s_load_dword s0, s[2:3], 0x34
; GFX9-NEXT: s_load_dwordx4 s[4:7], s[2:3], 0x24
; GFX9-NEXT: v_mov_b32_e32 v0, 0
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NEXT: v_mov_b32_e32 v1, s0
; GFX9-NEXT: global_atomic_smin v1, v0, v1, s[4:5] glc
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: global_store_dword v0, v1, s[6:7]
; GFX9-NEXT: s_endpgm
entry:
%val = atomicrmw volatile min ptr addrspace(1) %out, i32 %in syncscope("workgroup") seq_cst
store i32 %val, ptr addrspace(1) %out2
ret void
}
define amdgpu_kernel void @atomic_min_i32_addr64(ptr addrspace(1) %out, i32 %in, i64 %index) {
; SI-LABEL: atomic_min_i32_addr64:
; SI: ; %bb.0: ; %entry
; SI-NEXT: s_load_dwordx2 s[4:5], s[2:3], 0xd
; SI-NEXT: s_load_dword s6, s[2:3], 0xb
; SI-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x9
; SI-NEXT: s_mov_b32 s3, 0xf000
; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: s_lshl_b64 s[4:5], s[4:5], 2
; SI-NEXT: s_mov_b32 s2, 0
; SI-NEXT: v_mov_b32_e32 v2, s6
; SI-NEXT: v_mov_b32_e32 v0, s4
; SI-NEXT: v_mov_b32_e32 v1, s5
; SI-NEXT: buffer_atomic_smin v2, v[0:1], s[0:3], 0 addr64
; SI-NEXT: s_endpgm
;
; VI-LABEL: atomic_min_i32_addr64:
; VI: ; %bb.0: ; %entry
; VI-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x34
; VI-NEXT: s_load_dwordx2 s[4:5], s[2:3], 0x24
; VI-NEXT: s_load_dword s2, s[2:3], 0x2c
; VI-NEXT: s_waitcnt lgkmcnt(0)
; VI-NEXT: s_lshl_b64 s[0:1], s[0:1], 2
; VI-NEXT: s_add_u32 s0, s4, s0
; VI-NEXT: s_addc_u32 s1, s5, s1
; VI-NEXT: v_mov_b32_e32 v0, s0
; VI-NEXT: v_mov_b32_e32 v1, s1
; VI-NEXT: v_mov_b32_e32 v2, s2
; VI-NEXT: flat_atomic_smin v[0:1], v2
; VI-NEXT: s_endpgm
;
; GFX9-LABEL: atomic_min_i32_addr64:
; GFX9: ; %bb.0: ; %entry
; GFX9-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x34
; GFX9-NEXT: s_load_dwordx2 s[4:5], s[2:3], 0x24
; GFX9-NEXT: s_load_dword s6, s[2:3], 0x2c
; GFX9-NEXT: v_mov_b32_e32 v0, 0
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NEXT: s_lshl_b64 s[0:1], s[0:1], 2
; GFX9-NEXT: s_add_u32 s0, s4, s0
; GFX9-NEXT: s_addc_u32 s1, s5, s1
; GFX9-NEXT: v_mov_b32_e32 v1, s6
; GFX9-NEXT: global_atomic_smin v0, v1, s[0:1]
; GFX9-NEXT: s_endpgm
entry:
%ptr = getelementptr i32, ptr addrspace(1) %out, i64 %index
%val = atomicrmw volatile min ptr addrspace(1) %ptr, i32 %in syncscope("workgroup") seq_cst
ret void
}
define amdgpu_kernel void @atomic_min_i32_ret_addr64(ptr addrspace(1) %out, ptr addrspace(1) %out2, i32 %in, i64 %index) {
; SI-LABEL: atomic_min_i32_ret_addr64:
; SI: ; %bb.0: ; %entry
; SI-NEXT: s_load_dwordx4 s[4:7], s[2:3], 0x9
; SI-NEXT: s_load_dwordx2 s[8:9], s[2:3], 0xf
; SI-NEXT: s_load_dword s2, s[2:3], 0xd
; SI-NEXT: s_mov_b32 s3, 0xf000
; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: s_mov_b32 s0, s6
; SI-NEXT: s_mov_b32 s1, s7
; SI-NEXT: s_lshl_b64 s[8:9], s[8:9], 2
; SI-NEXT: s_mov_b32 s6, 0
; SI-NEXT: s_mov_b32 s7, s3
; SI-NEXT: v_mov_b32_e32 v2, s2
; SI-NEXT: v_mov_b32_e32 v0, s8
; SI-NEXT: v_mov_b32_e32 v1, s9
; SI-NEXT: buffer_atomic_smin v2, v[0:1], s[4:7], 0 addr64 glc
; SI-NEXT: s_mov_b32 s2, -1
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: buffer_store_dword v2, off, s[0:3], 0
; SI-NEXT: s_endpgm
;
; VI-LABEL: atomic_min_i32_ret_addr64:
; VI: ; %bb.0: ; %entry
; VI-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x3c
; VI-NEXT: s_load_dwordx4 s[4:7], s[2:3], 0x24
; VI-NEXT: s_load_dword s2, s[2:3], 0x34
; VI-NEXT: s_mov_b32 s3, 0xf000
; VI-NEXT: s_waitcnt lgkmcnt(0)
; VI-NEXT: s_lshl_b64 s[0:1], s[0:1], 2
; VI-NEXT: s_add_u32 s0, s4, s0
; VI-NEXT: s_addc_u32 s1, s5, s1
; VI-NEXT: v_mov_b32_e32 v0, s0
; VI-NEXT: v_mov_b32_e32 v1, s1
; VI-NEXT: v_mov_b32_e32 v2, s2
; VI-NEXT: flat_atomic_smin v0, v[0:1], v2 glc
; VI-NEXT: s_mov_b32 s2, -1
; VI-NEXT: s_mov_b32 s0, s6
; VI-NEXT: s_mov_b32 s1, s7
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: buffer_store_dword v0, off, s[0:3], 0
; VI-NEXT: s_endpgm
;
; GFX9-LABEL: atomic_min_i32_ret_addr64:
; GFX9: ; %bb.0: ; %entry
; GFX9-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x3c
; GFX9-NEXT: s_load_dwordx4 s[4:7], s[2:3], 0x24
; GFX9-NEXT: s_load_dword s8, s[2:3], 0x34
; GFX9-NEXT: v_mov_b32_e32 v0, 0
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NEXT: s_lshl_b64 s[0:1], s[0:1], 2
; GFX9-NEXT: s_add_u32 s0, s4, s0
; GFX9-NEXT: s_addc_u32 s1, s5, s1
; GFX9-NEXT: v_mov_b32_e32 v1, s8
; GFX9-NEXT: global_atomic_smin v1, v0, v1, s[0:1] glc
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: global_store_dword v0, v1, s[6:7]
; GFX9-NEXT: s_endpgm
entry:
%ptr = getelementptr i32, ptr addrspace(1) %out, i64 %index
%val = atomicrmw volatile min ptr addrspace(1) %ptr, i32 %in syncscope("workgroup") seq_cst
store i32 %val, ptr addrspace(1) %out2
ret void
}
define amdgpu_kernel void @atomic_umin_i32_offset(ptr addrspace(1) %out, i32 %in) {
; SI-LABEL: atomic_umin_i32_offset:
; SI: ; %bb.0: ; %entry
; SI-NEXT: s_load_dword s4, s[2:3], 0xb
; SI-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x9
; SI-NEXT: s_mov_b32 s3, 0xf000
; SI-NEXT: s_mov_b32 s2, -1
; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: v_mov_b32_e32 v0, s4
; SI-NEXT: buffer_atomic_umin v0, off, s[0:3], 0 offset:16
; SI-NEXT: s_endpgm
;
; VI-LABEL: atomic_umin_i32_offset:
; VI: ; %bb.0: ; %entry
; VI-NEXT: s_load_dword s4, s[2:3], 0x2c
; VI-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x24
; VI-NEXT: s_mov_b32 s3, 0xf000
; VI-NEXT: s_mov_b32 s2, -1
; VI-NEXT: s_waitcnt lgkmcnt(0)
; VI-NEXT: v_mov_b32_e32 v0, s4
; VI-NEXT: buffer_atomic_umin v0, off, s[0:3], 0 offset:16
; VI-NEXT: s_endpgm
;
; GFX9-LABEL: atomic_umin_i32_offset:
; GFX9: ; %bb.0: ; %entry
; GFX9-NEXT: s_load_dword s4, s[2:3], 0x2c
; GFX9-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x24
; GFX9-NEXT: v_mov_b32_e32 v0, 0
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NEXT: v_mov_b32_e32 v1, s4
; GFX9-NEXT: global_atomic_umin v0, v1, s[0:1] offset:16
; GFX9-NEXT: s_endpgm
entry:
%gep = getelementptr i32, ptr addrspace(1) %out, i64 4
%val = atomicrmw volatile umin ptr addrspace(1) %gep, i32 %in syncscope("workgroup") seq_cst
ret void
}
define amdgpu_kernel void @atomic_umin_i32_ret_offset(ptr addrspace(1) %out, ptr addrspace(1) %out2, i32 %in) {
; SI-LABEL: atomic_umin_i32_ret_offset:
; SI: ; %bb.0: ; %entry
; SI-NEXT: s_load_dwordx4 s[4:7], s[2:3], 0x9
; SI-NEXT: s_load_dword s8, s[2:3], 0xd
; SI-NEXT: s_mov_b32 s3, 0xf000
; SI-NEXT: s_mov_b32 s2, -1
; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: s_mov_b32 s0, s6
; SI-NEXT: s_mov_b32 s1, s7
; SI-NEXT: s_mov_b32 s6, s2
; SI-NEXT: s_mov_b32 s7, s3
; SI-NEXT: v_mov_b32_e32 v0, s8
; SI-NEXT: buffer_atomic_umin v0, off, s[4:7], 0 offset:16 glc
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: buffer_store_dword v0, off, s[0:3], 0
; SI-NEXT: s_endpgm
;
; VI-LABEL: atomic_umin_i32_ret_offset:
; VI: ; %bb.0: ; %entry
; VI-NEXT: s_load_dwordx4 s[4:7], s[2:3], 0x24
; VI-NEXT: s_load_dword s0, s[2:3], 0x34
; VI-NEXT: s_mov_b32 s11, 0xf000
; VI-NEXT: s_mov_b32 s10, -1
; VI-NEXT: s_waitcnt lgkmcnt(0)
; VI-NEXT: s_mov_b32 s8, s6
; VI-NEXT: s_mov_b32 s9, s7
; VI-NEXT: s_mov_b32 s6, s10
; VI-NEXT: s_mov_b32 s7, s11
; VI-NEXT: v_mov_b32_e32 v0, s0
; VI-NEXT: buffer_atomic_umin v0, off, s[4:7], 0 offset:16 glc
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: buffer_store_dword v0, off, s[8:11], 0
; VI-NEXT: s_endpgm
;
; GFX9-LABEL: atomic_umin_i32_ret_offset:
; GFX9: ; %bb.0: ; %entry
; GFX9-NEXT: s_load_dword s0, s[2:3], 0x34
; GFX9-NEXT: s_load_dwordx4 s[4:7], s[2:3], 0x24
; GFX9-NEXT: v_mov_b32_e32 v0, 0
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NEXT: v_mov_b32_e32 v1, s0
; GFX9-NEXT: global_atomic_umin v1, v0, v1, s[4:5] offset:16 glc
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: global_store_dword v0, v1, s[6:7]
; GFX9-NEXT: s_endpgm
entry:
%gep = getelementptr i32, ptr addrspace(1) %out, i64 4
%val = atomicrmw volatile umin ptr addrspace(1) %gep, i32 %in syncscope("workgroup") seq_cst
store i32 %val, ptr addrspace(1) %out2
ret void
}
define amdgpu_kernel void @atomic_umin_i32_addr64_offset(ptr addrspace(1) %out, i32 %in, i64 %index) {
; SI-LABEL: atomic_umin_i32_addr64_offset:
; SI: ; %bb.0: ; %entry
; SI-NEXT: s_load_dwordx2 s[4:5], s[2:3], 0xd
; SI-NEXT: s_load_dword s6, s[2:3], 0xb
; SI-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x9
; SI-NEXT: s_mov_b32 s3, 0xf000
; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: s_lshl_b64 s[4:5], s[4:5], 2
; SI-NEXT: s_mov_b32 s2, 0
; SI-NEXT: v_mov_b32_e32 v2, s6
; SI-NEXT: v_mov_b32_e32 v0, s4
; SI-NEXT: v_mov_b32_e32 v1, s5
; SI-NEXT: buffer_atomic_umin v2, v[0:1], s[0:3], 0 addr64 offset:16
; SI-NEXT: s_endpgm
;
; VI-LABEL: atomic_umin_i32_addr64_offset:
; VI: ; %bb.0: ; %entry
; VI-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x34
; VI-NEXT: s_load_dwordx2 s[4:5], s[2:3], 0x24
; VI-NEXT: s_load_dword s2, s[2:3], 0x2c
; VI-NEXT: s_waitcnt lgkmcnt(0)
; VI-NEXT: s_lshl_b64 s[0:1], s[0:1], 2
; VI-NEXT: s_add_u32 s0, s4, s0
; VI-NEXT: s_addc_u32 s1, s5, s1
; VI-NEXT: s_add_u32 s0, s0, 16
; VI-NEXT: s_addc_u32 s1, s1, 0
; VI-NEXT: v_mov_b32_e32 v0, s0
; VI-NEXT: v_mov_b32_e32 v1, s1
; VI-NEXT: v_mov_b32_e32 v2, s2
; VI-NEXT: flat_atomic_umin v[0:1], v2
; VI-NEXT: s_endpgm
;
; GFX9-LABEL: atomic_umin_i32_addr64_offset:
; GFX9: ; %bb.0: ; %entry
; GFX9-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x34
; GFX9-NEXT: s_load_dwordx2 s[4:5], s[2:3], 0x24
; GFX9-NEXT: s_load_dword s6, s[2:3], 0x2c
; GFX9-NEXT: v_mov_b32_e32 v0, 0
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NEXT: s_lshl_b64 s[0:1], s[0:1], 2
; GFX9-NEXT: s_add_u32 s0, s4, s0
; GFX9-NEXT: s_addc_u32 s1, s5, s1
; GFX9-NEXT: v_mov_b32_e32 v1, s6
; GFX9-NEXT: global_atomic_umin v0, v1, s[0:1] offset:16
; GFX9-NEXT: s_endpgm
entry:
%ptr = getelementptr i32, ptr addrspace(1) %out, i64 %index
%gep = getelementptr i32, ptr addrspace(1) %ptr, i64 4
%val = atomicrmw volatile umin ptr addrspace(1) %gep, i32 %in syncscope("workgroup") seq_cst
ret void
}
define amdgpu_kernel void @atomic_umin_i32_ret_addr64_offset(ptr addrspace(1) %out, ptr addrspace(1) %out2, i32 %in, i64 %index) {
; SI-LABEL: atomic_umin_i32_ret_addr64_offset:
; SI: ; %bb.0: ; %entry
; SI-NEXT: s_load_dwordx4 s[4:7], s[2:3], 0x9
; SI-NEXT: s_load_dwordx2 s[8:9], s[2:3], 0xf
; SI-NEXT: s_load_dword s2, s[2:3], 0xd
; SI-NEXT: s_mov_b32 s3, 0xf000
; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: s_mov_b32 s0, s6
; SI-NEXT: s_mov_b32 s1, s7
; SI-NEXT: s_lshl_b64 s[8:9], s[8:9], 2
; SI-NEXT: s_mov_b32 s6, 0
; SI-NEXT: s_mov_b32 s7, s3
; SI-NEXT: v_mov_b32_e32 v2, s2
; SI-NEXT: v_mov_b32_e32 v0, s8
; SI-NEXT: v_mov_b32_e32 v1, s9
; SI-NEXT: buffer_atomic_umin v2, v[0:1], s[4:7], 0 addr64 offset:16 glc
; SI-NEXT: s_mov_b32 s2, -1
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: buffer_store_dword v2, off, s[0:3], 0
; SI-NEXT: s_endpgm
;
; VI-LABEL: atomic_umin_i32_ret_addr64_offset:
; VI: ; %bb.0: ; %entry
; VI-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x3c
; VI-NEXT: s_load_dwordx4 s[4:7], s[2:3], 0x24
; VI-NEXT: s_load_dword s2, s[2:3], 0x34
; VI-NEXT: s_mov_b32 s3, 0xf000
; VI-NEXT: s_waitcnt lgkmcnt(0)
; VI-NEXT: s_lshl_b64 s[0:1], s[0:1], 2
; VI-NEXT: s_add_u32 s0, s4, s0
; VI-NEXT: s_addc_u32 s1, s5, s1
; VI-NEXT: s_add_u32 s0, s0, 16
; VI-NEXT: s_addc_u32 s1, s1, 0
; VI-NEXT: v_mov_b32_e32 v0, s0
; VI-NEXT: v_mov_b32_e32 v1, s1
; VI-NEXT: v_mov_b32_e32 v2, s2
; VI-NEXT: flat_atomic_umin v0, v[0:1], v2 glc
; VI-NEXT: s_mov_b32 s2, -1
; VI-NEXT: s_mov_b32 s0, s6
; VI-NEXT: s_mov_b32 s1, s7
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: buffer_store_dword v0, off, s[0:3], 0
; VI-NEXT: s_endpgm
;
; GFX9-LABEL: atomic_umin_i32_ret_addr64_offset:
; GFX9: ; %bb.0: ; %entry
; GFX9-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x3c
; GFX9-NEXT: s_load_dwordx4 s[4:7], s[2:3], 0x24
; GFX9-NEXT: s_load_dword s8, s[2:3], 0x34
; GFX9-NEXT: v_mov_b32_e32 v0, 0
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NEXT: s_lshl_b64 s[0:1], s[0:1], 2
; GFX9-NEXT: s_add_u32 s0, s4, s0
; GFX9-NEXT: s_addc_u32 s1, s5, s1
; GFX9-NEXT: v_mov_b32_e32 v1, s8
; GFX9-NEXT: global_atomic_umin v1, v0, v1, s[0:1] offset:16 glc
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: global_store_dword v0, v1, s[6:7]
; GFX9-NEXT: s_endpgm
entry:
%ptr = getelementptr i32, ptr addrspace(1) %out, i64 %index
%gep = getelementptr i32, ptr addrspace(1) %ptr, i64 4
%val = atomicrmw volatile umin ptr addrspace(1) %gep, i32 %in syncscope("workgroup") seq_cst
store i32 %val, ptr addrspace(1) %out2
ret void
}
define amdgpu_kernel void @atomic_umin_i32(ptr addrspace(1) %out, i32 %in) {
; SI-LABEL: atomic_umin_i32:
; SI: ; %bb.0: ; %entry
; SI-NEXT: s_load_dword s4, s[2:3], 0xb
; SI-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x9
; SI-NEXT: s_mov_b32 s3, 0xf000
; SI-NEXT: s_mov_b32 s2, -1
; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: v_mov_b32_e32 v0, s4
; SI-NEXT: buffer_atomic_umin v0, off, s[0:3], 0
; SI-NEXT: s_endpgm
;
; VI-LABEL: atomic_umin_i32:
; VI: ; %bb.0: ; %entry
; VI-NEXT: s_load_dword s4, s[2:3], 0x2c
; VI-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x24
; VI-NEXT: s_mov_b32 s3, 0xf000
; VI-NEXT: s_mov_b32 s2, -1
; VI-NEXT: s_waitcnt lgkmcnt(0)
; VI-NEXT: v_mov_b32_e32 v0, s4
; VI-NEXT: buffer_atomic_umin v0, off, s[0:3], 0
; VI-NEXT: s_endpgm
;
; GFX9-LABEL: atomic_umin_i32:
; GFX9: ; %bb.0: ; %entry
; GFX9-NEXT: s_load_dword s4, s[2:3], 0x2c
; GFX9-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x24
; GFX9-NEXT: v_mov_b32_e32 v0, 0
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NEXT: v_mov_b32_e32 v1, s4
; GFX9-NEXT: global_atomic_umin v0, v1, s[0:1]
; GFX9-NEXT: s_endpgm
entry:
%val = atomicrmw volatile umin ptr addrspace(1) %out, i32 %in syncscope("workgroup") seq_cst
ret void
}
define amdgpu_kernel void @atomic_umin_i32_ret(ptr addrspace(1) %out, ptr addrspace(1) %out2, i32 %in) {
; SI-LABEL: atomic_umin_i32_ret:
; SI: ; %bb.0: ; %entry
; SI-NEXT: s_load_dwordx4 s[4:7], s[2:3], 0x9
; SI-NEXT: s_load_dword s8, s[2:3], 0xd
; SI-NEXT: s_mov_b32 s3, 0xf000
; SI-NEXT: s_mov_b32 s2, -1
; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: s_mov_b32 s0, s4
; SI-NEXT: s_mov_b32 s1, s5
; SI-NEXT: v_mov_b32_e32 v0, s8
; SI-NEXT: buffer_atomic_umin v0, off, s[0:3], 0 glc
; SI-NEXT: s_mov_b32 s0, s6
; SI-NEXT: s_mov_b32 s1, s7
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: buffer_store_dword v0, off, s[0:3], 0
; SI-NEXT: s_endpgm
;
; VI-LABEL: atomic_umin_i32_ret:
; VI: ; %bb.0: ; %entry
; VI-NEXT: s_load_dwordx4 s[4:7], s[2:3], 0x24
; VI-NEXT: s_load_dword s8, s[2:3], 0x34
; VI-NEXT: s_mov_b32 s3, 0xf000
; VI-NEXT: s_mov_b32 s2, -1
; VI-NEXT: s_waitcnt lgkmcnt(0)
; VI-NEXT: s_mov_b32 s0, s4
; VI-NEXT: s_mov_b32 s1, s5
; VI-NEXT: v_mov_b32_e32 v0, s8
; VI-NEXT: buffer_atomic_umin v0, off, s[0:3], 0 glc
; VI-NEXT: s_mov_b32 s0, s6
; VI-NEXT: s_mov_b32 s1, s7
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: buffer_store_dword v0, off, s[0:3], 0
; VI-NEXT: s_endpgm
;
; GFX9-LABEL: atomic_umin_i32_ret:
; GFX9: ; %bb.0: ; %entry
; GFX9-NEXT: s_load_dword s0, s[2:3], 0x34
; GFX9-NEXT: s_load_dwordx4 s[4:7], s[2:3], 0x24
; GFX9-NEXT: v_mov_b32_e32 v0, 0
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NEXT: v_mov_b32_e32 v1, s0
; GFX9-NEXT: global_atomic_umin v1, v0, v1, s[4:5] glc
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: global_store_dword v0, v1, s[6:7]
; GFX9-NEXT: s_endpgm
entry:
%val = atomicrmw volatile umin ptr addrspace(1) %out, i32 %in syncscope("workgroup") seq_cst
store i32 %val, ptr addrspace(1) %out2
ret void
}
define amdgpu_kernel void @atomic_umin_i32_addr64(ptr addrspace(1) %out, i32 %in, i64 %index) {
; SI-LABEL: atomic_umin_i32_addr64:
; SI: ; %bb.0: ; %entry
; SI-NEXT: s_load_dwordx2 s[4:5], s[2:3], 0xd
; SI-NEXT: s_load_dword s6, s[2:3], 0xb
; SI-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x9
; SI-NEXT: s_mov_b32 s3, 0xf000
; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: s_lshl_b64 s[4:5], s[4:5], 2
; SI-NEXT: s_mov_b32 s2, 0
; SI-NEXT: v_mov_b32_e32 v2, s6
; SI-NEXT: v_mov_b32_e32 v0, s4
; SI-NEXT: v_mov_b32_e32 v1, s5
; SI-NEXT: buffer_atomic_umin v2, v[0:1], s[0:3], 0 addr64
; SI-NEXT: s_endpgm
;
; VI-LABEL: atomic_umin_i32_addr64:
; VI: ; %bb.0: ; %entry
; VI-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x34
; VI-NEXT: s_load_dwordx2 s[4:5], s[2:3], 0x24
; VI-NEXT: s_load_dword s2, s[2:3], 0x2c
; VI-NEXT: s_waitcnt lgkmcnt(0)
; VI-NEXT: s_lshl_b64 s[0:1], s[0:1], 2
; VI-NEXT: s_add_u32 s0, s4, s0
; VI-NEXT: s_addc_u32 s1, s5, s1
; VI-NEXT: v_mov_b32_e32 v0, s0
; VI-NEXT: v_mov_b32_e32 v1, s1
; VI-NEXT: v_mov_b32_e32 v2, s2
; VI-NEXT: flat_atomic_umin v[0:1], v2
; VI-NEXT: s_endpgm
;
; GFX9-LABEL: atomic_umin_i32_addr64:
; GFX9: ; %bb.0: ; %entry
; GFX9-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x34
; GFX9-NEXT: s_load_dwordx2 s[4:5], s[2:3], 0x24
; GFX9-NEXT: s_load_dword s6, s[2:3], 0x2c
; GFX9-NEXT: v_mov_b32_e32 v0, 0
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NEXT: s_lshl_b64 s[0:1], s[0:1], 2
; GFX9-NEXT: s_add_u32 s0, s4, s0
; GFX9-NEXT: s_addc_u32 s1, s5, s1
; GFX9-NEXT: v_mov_b32_e32 v1, s6
; GFX9-NEXT: global_atomic_umin v0, v1, s[0:1]
; GFX9-NEXT: s_endpgm
entry:
%ptr = getelementptr i32, ptr addrspace(1) %out, i64 %index
%val = atomicrmw volatile umin ptr addrspace(1) %ptr, i32 %in syncscope("workgroup") seq_cst
ret void
}
define amdgpu_kernel void @atomic_umin_i32_ret_addr64(ptr addrspace(1) %out, ptr addrspace(1) %out2, i32 %in, i64 %index) {
; SI-LABEL: atomic_umin_i32_ret_addr64:
; SI: ; %bb.0: ; %entry
; SI-NEXT: s_load_dwordx4 s[4:7], s[2:3], 0x9
; SI-NEXT: s_load_dwordx2 s[8:9], s[2:3], 0xf
; SI-NEXT: s_load_dword s2, s[2:3], 0xd
; SI-NEXT: s_mov_b32 s3, 0xf000
; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: s_mov_b32 s0, s6
; SI-NEXT: s_mov_b32 s1, s7
; SI-NEXT: s_lshl_b64 s[8:9], s[8:9], 2
; SI-NEXT: s_mov_b32 s6, 0
; SI-NEXT: s_mov_b32 s7, s3
; SI-NEXT: v_mov_b32_e32 v2, s2
; SI-NEXT: v_mov_b32_e32 v0, s8
; SI-NEXT: v_mov_b32_e32 v1, s9
; SI-NEXT: buffer_atomic_umin v2, v[0:1], s[4:7], 0 addr64 glc
; SI-NEXT: s_mov_b32 s2, -1
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: buffer_store_dword v2, off, s[0:3], 0
; SI-NEXT: s_endpgm
;
; VI-LABEL: atomic_umin_i32_ret_addr64:
; VI: ; %bb.0: ; %entry
; VI-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x3c
; VI-NEXT: s_load_dwordx4 s[4:7], s[2:3], 0x24
; VI-NEXT: s_load_dword s2, s[2:3], 0x34
; VI-NEXT: s_mov_b32 s3, 0xf000
; VI-NEXT: s_waitcnt lgkmcnt(0)
; VI-NEXT: s_lshl_b64 s[0:1], s[0:1], 2
; VI-NEXT: s_add_u32 s0, s4, s0
; VI-NEXT: s_addc_u32 s1, s5, s1
; VI-NEXT: v_mov_b32_e32 v0, s0
; VI-NEXT: v_mov_b32_e32 v1, s1
; VI-NEXT: v_mov_b32_e32 v2, s2
; VI-NEXT: flat_atomic_umin v0, v[0:1], v2 glc
; VI-NEXT: s_mov_b32 s2, -1
; VI-NEXT: s_mov_b32 s0, s6
; VI-NEXT: s_mov_b32 s1, s7
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: buffer_store_dword v0, off, s[0:3], 0
; VI-NEXT: s_endpgm
;
; GFX9-LABEL: atomic_umin_i32_ret_addr64:
; GFX9: ; %bb.0: ; %entry
; GFX9-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x3c
; GFX9-NEXT: s_load_dwordx4 s[4:7], s[2:3], 0x24
; GFX9-NEXT: s_load_dword s8, s[2:3], 0x34
; GFX9-NEXT: v_mov_b32_e32 v0, 0
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NEXT: s_lshl_b64 s[0:1], s[0:1], 2
; GFX9-NEXT: s_add_u32 s0, s4, s0
; GFX9-NEXT: s_addc_u32 s1, s5, s1
; GFX9-NEXT: v_mov_b32_e32 v1, s8
; GFX9-NEXT: global_atomic_umin v1, v0, v1, s[0:1] glc
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: global_store_dword v0, v1, s[6:7]
; GFX9-NEXT: s_endpgm
entry:
%ptr = getelementptr i32, ptr addrspace(1) %out, i64 %index
%val = atomicrmw volatile umin ptr addrspace(1) %ptr, i32 %in syncscope("workgroup") seq_cst
store i32 %val, ptr addrspace(1) %out2
ret void
}
define amdgpu_kernel void @atomic_or_i32_offset(ptr addrspace(1) %out, i32 %in) {
; SI-LABEL: atomic_or_i32_offset:
; SI: ; %bb.0: ; %entry
; SI-NEXT: s_load_dword s4, s[2:3], 0xb
; SI-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x9
; SI-NEXT: s_mov_b32 s3, 0xf000
; SI-NEXT: s_mov_b32 s2, -1
; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: v_mov_b32_e32 v0, s4
; SI-NEXT: buffer_atomic_or v0, off, s[0:3], 0 offset:16
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: buffer_wbinvl1
; SI-NEXT: s_endpgm
;
; VI-LABEL: atomic_or_i32_offset:
; VI: ; %bb.0: ; %entry
; VI-NEXT: s_load_dword s4, s[2:3], 0x2c
; VI-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x24
; VI-NEXT: s_mov_b32 s3, 0xf000
; VI-NEXT: s_mov_b32 s2, -1
; VI-NEXT: s_waitcnt lgkmcnt(0)
; VI-NEXT: v_mov_b32_e32 v0, s4
; VI-NEXT: buffer_atomic_or v0, off, s[0:3], 0 offset:16
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: buffer_wbinvl1_vol
; VI-NEXT: s_endpgm
;
; GFX9-LABEL: atomic_or_i32_offset:
; GFX9: ; %bb.0: ; %entry
; GFX9-NEXT: s_load_dword s4, s[2:3], 0x2c
; GFX9-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x24
; GFX9-NEXT: v_mov_b32_e32 v0, 0
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NEXT: v_mov_b32_e32 v1, s4
; GFX9-NEXT: global_atomic_or v0, v1, s[0:1] offset:16
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: buffer_wbinvl1_vol
; GFX9-NEXT: s_endpgm
entry:
%gep = getelementptr i32, ptr addrspace(1) %out, i64 4
%val = atomicrmw volatile or ptr addrspace(1) %gep, i32 %in syncscope("agent") seq_cst
ret void
}
define amdgpu_kernel void @atomic_or_i32_ret_offset(ptr addrspace(1) %out, ptr addrspace(1) %out2, i32 %in) {
; SI-LABEL: atomic_or_i32_ret_offset:
; SI: ; %bb.0: ; %entry
; SI-NEXT: s_load_dwordx4 s[4:7], s[2:3], 0x9
; SI-NEXT: s_load_dword s8, s[2:3], 0xd
; SI-NEXT: s_mov_b32 s3, 0xf000
; SI-NEXT: s_mov_b32 s2, -1
; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: s_mov_b32 s0, s6
; SI-NEXT: s_mov_b32 s1, s7
; SI-NEXT: s_mov_b32 s6, s2
; SI-NEXT: s_mov_b32 s7, s3
; SI-NEXT: v_mov_b32_e32 v0, s8
; SI-NEXT: buffer_atomic_or v0, off, s[4:7], 0 offset:16 glc
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: buffer_wbinvl1
; SI-NEXT: buffer_store_dword v0, off, s[0:3], 0
; SI-NEXT: s_endpgm
;
; VI-LABEL: atomic_or_i32_ret_offset:
; VI: ; %bb.0: ; %entry
; VI-NEXT: s_load_dwordx4 s[4:7], s[2:3], 0x24
; VI-NEXT: s_load_dword s0, s[2:3], 0x34
; VI-NEXT: s_mov_b32 s11, 0xf000
; VI-NEXT: s_mov_b32 s10, -1
; VI-NEXT: s_waitcnt lgkmcnt(0)
; VI-NEXT: s_mov_b32 s8, s6
; VI-NEXT: s_mov_b32 s9, s7
; VI-NEXT: s_mov_b32 s6, s10
; VI-NEXT: s_mov_b32 s7, s11
; VI-NEXT: v_mov_b32_e32 v0, s0
; VI-NEXT: buffer_atomic_or v0, off, s[4:7], 0 offset:16 glc
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: buffer_wbinvl1_vol
; VI-NEXT: buffer_store_dword v0, off, s[8:11], 0
; VI-NEXT: s_endpgm
;
; GFX9-LABEL: atomic_or_i32_ret_offset:
; GFX9: ; %bb.0: ; %entry
; GFX9-NEXT: s_load_dword s0, s[2:3], 0x34
; GFX9-NEXT: s_load_dwordx4 s[4:7], s[2:3], 0x24
; GFX9-NEXT: v_mov_b32_e32 v0, 0
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NEXT: v_mov_b32_e32 v1, s0
; GFX9-NEXT: global_atomic_or v1, v0, v1, s[4:5] offset:16 glc
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: buffer_wbinvl1_vol
; GFX9-NEXT: global_store_dword v0, v1, s[6:7]
; GFX9-NEXT: s_endpgm
entry:
%gep = getelementptr i32, ptr addrspace(1) %out, i64 4
%val = atomicrmw volatile or ptr addrspace(1) %gep, i32 %in syncscope("agent") seq_cst
store i32 %val, ptr addrspace(1) %out2
ret void
}
define amdgpu_kernel void @atomic_or_i32_addr64_offset(ptr addrspace(1) %out, i32 %in, i64 %index) {
; SI-LABEL: atomic_or_i32_addr64_offset:
; SI: ; %bb.0: ; %entry
; SI-NEXT: s_load_dwordx2 s[4:5], s[2:3], 0xd
; SI-NEXT: s_load_dword s6, s[2:3], 0xb
; SI-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x9
; SI-NEXT: s_mov_b32 s3, 0xf000
; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: s_lshl_b64 s[4:5], s[4:5], 2
; SI-NEXT: s_mov_b32 s2, 0
; SI-NEXT: v_mov_b32_e32 v2, s6
; SI-NEXT: v_mov_b32_e32 v0, s4
; SI-NEXT: v_mov_b32_e32 v1, s5
; SI-NEXT: buffer_atomic_or v2, v[0:1], s[0:3], 0 addr64 offset:16
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: buffer_wbinvl1
; SI-NEXT: s_endpgm
;
; VI-LABEL: atomic_or_i32_addr64_offset:
; VI: ; %bb.0: ; %entry
; VI-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x34
; VI-NEXT: s_load_dwordx2 s[4:5], s[2:3], 0x24
; VI-NEXT: s_load_dword s2, s[2:3], 0x2c
; VI-NEXT: s_waitcnt lgkmcnt(0)
; VI-NEXT: s_lshl_b64 s[0:1], s[0:1], 2
; VI-NEXT: s_add_u32 s0, s4, s0
; VI-NEXT: s_addc_u32 s1, s5, s1
; VI-NEXT: s_add_u32 s0, s0, 16
; VI-NEXT: s_addc_u32 s1, s1, 0
; VI-NEXT: v_mov_b32_e32 v0, s0
; VI-NEXT: v_mov_b32_e32 v1, s1
; VI-NEXT: v_mov_b32_e32 v2, s2
; VI-NEXT: flat_atomic_or v[0:1], v2
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: buffer_wbinvl1_vol
; VI-NEXT: s_endpgm
;
; GFX9-LABEL: atomic_or_i32_addr64_offset:
; GFX9: ; %bb.0: ; %entry
; GFX9-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x34
; GFX9-NEXT: s_load_dwordx2 s[4:5], s[2:3], 0x24
; GFX9-NEXT: s_load_dword s6, s[2:3], 0x2c
; GFX9-NEXT: v_mov_b32_e32 v0, 0
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NEXT: s_lshl_b64 s[0:1], s[0:1], 2
; GFX9-NEXT: s_add_u32 s0, s4, s0
; GFX9-NEXT: s_addc_u32 s1, s5, s1
; GFX9-NEXT: v_mov_b32_e32 v1, s6
; GFX9-NEXT: global_atomic_or v0, v1, s[0:1] offset:16
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: buffer_wbinvl1_vol
; GFX9-NEXT: s_endpgm
entry:
%ptr = getelementptr i32, ptr addrspace(1) %out, i64 %index
%gep = getelementptr i32, ptr addrspace(1) %ptr, i64 4
%val = atomicrmw volatile or ptr addrspace(1) %gep, i32 %in syncscope("agent") seq_cst
ret void
}
define amdgpu_kernel void @atomic_or_i32_ret_addr64_offset(ptr addrspace(1) %out, ptr addrspace(1) %out2, i32 %in, i64 %index) {
; SI-LABEL: atomic_or_i32_ret_addr64_offset:
; SI: ; %bb.0: ; %entry
; SI-NEXT: s_load_dwordx4 s[4:7], s[2:3], 0x9
; SI-NEXT: s_load_dwordx2 s[8:9], s[2:3], 0xf
; SI-NEXT: s_load_dword s2, s[2:3], 0xd
; SI-NEXT: s_mov_b32 s3, 0xf000
; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: s_mov_b32 s0, s6
; SI-NEXT: s_mov_b32 s1, s7
; SI-NEXT: s_lshl_b64 s[8:9], s[8:9], 2
; SI-NEXT: s_mov_b32 s6, 0
; SI-NEXT: s_mov_b32 s7, s3
; SI-NEXT: v_mov_b32_e32 v2, s2
; SI-NEXT: v_mov_b32_e32 v0, s8
; SI-NEXT: v_mov_b32_e32 v1, s9
; SI-NEXT: buffer_atomic_or v2, v[0:1], s[4:7], 0 addr64 offset:16 glc
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: buffer_wbinvl1
; SI-NEXT: s_mov_b32 s2, -1
; SI-NEXT: buffer_store_dword v2, off, s[0:3], 0
; SI-NEXT: s_endpgm
;
; VI-LABEL: atomic_or_i32_ret_addr64_offset:
; VI: ; %bb.0: ; %entry
; VI-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x3c
; VI-NEXT: s_load_dwordx4 s[4:7], s[2:3], 0x24
; VI-NEXT: s_load_dword s2, s[2:3], 0x34
; VI-NEXT: s_mov_b32 s3, 0xf000
; VI-NEXT: s_waitcnt lgkmcnt(0)
; VI-NEXT: s_lshl_b64 s[0:1], s[0:1], 2
; VI-NEXT: s_add_u32 s0, s4, s0
; VI-NEXT: s_addc_u32 s1, s5, s1
; VI-NEXT: s_add_u32 s0, s0, 16
; VI-NEXT: s_addc_u32 s1, s1, 0
; VI-NEXT: v_mov_b32_e32 v0, s0
; VI-NEXT: v_mov_b32_e32 v1, s1
; VI-NEXT: v_mov_b32_e32 v2, s2
; VI-NEXT: flat_atomic_or v0, v[0:1], v2 glc
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: buffer_wbinvl1_vol
; VI-NEXT: s_mov_b32 s2, -1
; VI-NEXT: s_mov_b32 s0, s6
; VI-NEXT: s_mov_b32 s1, s7
; VI-NEXT: buffer_store_dword v0, off, s[0:3], 0
; VI-NEXT: s_endpgm
;
; GFX9-LABEL: atomic_or_i32_ret_addr64_offset:
; GFX9: ; %bb.0: ; %entry
; GFX9-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x3c
; GFX9-NEXT: s_load_dwordx4 s[4:7], s[2:3], 0x24
; GFX9-NEXT: s_load_dword s8, s[2:3], 0x34
; GFX9-NEXT: v_mov_b32_e32 v0, 0
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NEXT: s_lshl_b64 s[0:1], s[0:1], 2
; GFX9-NEXT: s_add_u32 s0, s4, s0
; GFX9-NEXT: s_addc_u32 s1, s5, s1
; GFX9-NEXT: v_mov_b32_e32 v1, s8
; GFX9-NEXT: global_atomic_or v1, v0, v1, s[0:1] offset:16 glc
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: buffer_wbinvl1_vol
; GFX9-NEXT: global_store_dword v0, v1, s[6:7]
; GFX9-NEXT: s_endpgm
entry:
%ptr = getelementptr i32, ptr addrspace(1) %out, i64 %index
%gep = getelementptr i32, ptr addrspace(1) %ptr, i64 4
%val = atomicrmw volatile or ptr addrspace(1) %gep, i32 %in syncscope("agent") seq_cst
store i32 %val, ptr addrspace(1) %out2
ret void
}
define amdgpu_kernel void @atomic_or_i32(ptr addrspace(1) %out, i32 %in) {
; SI-LABEL: atomic_or_i32:
; SI: ; %bb.0: ; %entry
; SI-NEXT: s_load_dword s4, s[2:3], 0xb
; SI-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x9
; SI-NEXT: s_mov_b32 s3, 0xf000
; SI-NEXT: s_mov_b32 s2, -1
; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: v_mov_b32_e32 v0, s4
; SI-NEXT: buffer_atomic_or v0, off, s[0:3], 0
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: buffer_wbinvl1
; SI-NEXT: s_endpgm
;
; VI-LABEL: atomic_or_i32:
; VI: ; %bb.0: ; %entry
; VI-NEXT: s_load_dword s4, s[2:3], 0x2c
; VI-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x24
; VI-NEXT: s_mov_b32 s3, 0xf000
; VI-NEXT: s_mov_b32 s2, -1
; VI-NEXT: s_waitcnt lgkmcnt(0)
; VI-NEXT: v_mov_b32_e32 v0, s4
; VI-NEXT: buffer_atomic_or v0, off, s[0:3], 0
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: buffer_wbinvl1_vol
; VI-NEXT: s_endpgm
;
; GFX9-LABEL: atomic_or_i32:
; GFX9: ; %bb.0: ; %entry
; GFX9-NEXT: s_load_dword s4, s[2:3], 0x2c
; GFX9-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x24
; GFX9-NEXT: v_mov_b32_e32 v0, 0
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NEXT: v_mov_b32_e32 v1, s4
; GFX9-NEXT: global_atomic_or v0, v1, s[0:1]
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: buffer_wbinvl1_vol
; GFX9-NEXT: s_endpgm
entry:
%val = atomicrmw volatile or ptr addrspace(1) %out, i32 %in syncscope("agent") seq_cst
ret void
}
define amdgpu_kernel void @atomic_or_i32_ret(ptr addrspace(1) %out, ptr addrspace(1) %out2, i32 %in) {
; SI-LABEL: atomic_or_i32_ret:
; SI: ; %bb.0: ; %entry
; SI-NEXT: s_load_dwordx4 s[4:7], s[2:3], 0x9
; SI-NEXT: s_load_dword s8, s[2:3], 0xd
; SI-NEXT: s_mov_b32 s3, 0xf000
; SI-NEXT: s_mov_b32 s2, -1
; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: s_mov_b32 s0, s4
; SI-NEXT: s_mov_b32 s1, s5
; SI-NEXT: v_mov_b32_e32 v0, s8
; SI-NEXT: buffer_atomic_or v0, off, s[0:3], 0 glc
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: buffer_wbinvl1
; SI-NEXT: s_mov_b32 s0, s6
; SI-NEXT: s_mov_b32 s1, s7
; SI-NEXT: buffer_store_dword v0, off, s[0:3], 0
; SI-NEXT: s_endpgm
;
; VI-LABEL: atomic_or_i32_ret:
; VI: ; %bb.0: ; %entry
; VI-NEXT: s_load_dwordx4 s[4:7], s[2:3], 0x24
; VI-NEXT: s_load_dword s8, s[2:3], 0x34
; VI-NEXT: s_mov_b32 s3, 0xf000
; VI-NEXT: s_mov_b32 s2, -1
; VI-NEXT: s_waitcnt lgkmcnt(0)
; VI-NEXT: s_mov_b32 s0, s4
; VI-NEXT: s_mov_b32 s1, s5
; VI-NEXT: v_mov_b32_e32 v0, s8
; VI-NEXT: buffer_atomic_or v0, off, s[0:3], 0 glc
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: buffer_wbinvl1_vol
; VI-NEXT: s_mov_b32 s0, s6
; VI-NEXT: s_mov_b32 s1, s7
; VI-NEXT: buffer_store_dword v0, off, s[0:3], 0
; VI-NEXT: s_endpgm
;
; GFX9-LABEL: atomic_or_i32_ret:
; GFX9: ; %bb.0: ; %entry
; GFX9-NEXT: s_load_dword s0, s[2:3], 0x34
; GFX9-NEXT: s_load_dwordx4 s[4:7], s[2:3], 0x24
; GFX9-NEXT: v_mov_b32_e32 v0, 0
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NEXT: v_mov_b32_e32 v1, s0
; GFX9-NEXT: global_atomic_or v1, v0, v1, s[4:5] glc
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: buffer_wbinvl1_vol
; GFX9-NEXT: global_store_dword v0, v1, s[6:7]
; GFX9-NEXT: s_endpgm
entry:
%val = atomicrmw volatile or ptr addrspace(1) %out, i32 %in syncscope("agent") seq_cst
store i32 %val, ptr addrspace(1) %out2
ret void
}
define amdgpu_kernel void @atomic_or_i32_addr64(ptr addrspace(1) %out, i32 %in, i64 %index) {
; SI-LABEL: atomic_or_i32_addr64:
; SI: ; %bb.0: ; %entry
; SI-NEXT: s_load_dwordx2 s[4:5], s[2:3], 0xd
; SI-NEXT: s_load_dword s6, s[2:3], 0xb
; SI-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x9
; SI-NEXT: s_mov_b32 s3, 0xf000
; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: s_lshl_b64 s[4:5], s[4:5], 2
; SI-NEXT: s_mov_b32 s2, 0
; SI-NEXT: v_mov_b32_e32 v2, s6
; SI-NEXT: v_mov_b32_e32 v0, s4
; SI-NEXT: v_mov_b32_e32 v1, s5
; SI-NEXT: buffer_atomic_or v2, v[0:1], s[0:3], 0 addr64
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: buffer_wbinvl1
; SI-NEXT: s_endpgm
;
; VI-LABEL: atomic_or_i32_addr64:
; VI: ; %bb.0: ; %entry
; VI-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x34
; VI-NEXT: s_load_dwordx2 s[4:5], s[2:3], 0x24
; VI-NEXT: s_load_dword s2, s[2:3], 0x2c
; VI-NEXT: s_waitcnt lgkmcnt(0)
; VI-NEXT: s_lshl_b64 s[0:1], s[0:1], 2
; VI-NEXT: s_add_u32 s0, s4, s0
; VI-NEXT: s_addc_u32 s1, s5, s1
; VI-NEXT: v_mov_b32_e32 v0, s0
; VI-NEXT: v_mov_b32_e32 v1, s1
; VI-NEXT: v_mov_b32_e32 v2, s2
; VI-NEXT: flat_atomic_or v[0:1], v2
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: buffer_wbinvl1_vol
; VI-NEXT: s_endpgm
;
; GFX9-LABEL: atomic_or_i32_addr64:
; GFX9: ; %bb.0: ; %entry
; GFX9-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x34
; GFX9-NEXT: s_load_dwordx2 s[4:5], s[2:3], 0x24
; GFX9-NEXT: s_load_dword s6, s[2:3], 0x2c
; GFX9-NEXT: v_mov_b32_e32 v0, 0
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NEXT: s_lshl_b64 s[0:1], s[0:1], 2
; GFX9-NEXT: s_add_u32 s0, s4, s0
; GFX9-NEXT: s_addc_u32 s1, s5, s1
; GFX9-NEXT: v_mov_b32_e32 v1, s6
; GFX9-NEXT: global_atomic_or v0, v1, s[0:1]
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: buffer_wbinvl1_vol
; GFX9-NEXT: s_endpgm
entry:
%ptr = getelementptr i32, ptr addrspace(1) %out, i64 %index
%val = atomicrmw volatile or ptr addrspace(1) %ptr, i32 %in syncscope("agent") seq_cst
ret void
}
define amdgpu_kernel void @atomic_or_i32_ret_addr64(ptr addrspace(1) %out, ptr addrspace(1) %out2, i32 %in, i64 %index) {
; SI-LABEL: atomic_or_i32_ret_addr64:
; SI: ; %bb.0: ; %entry
; SI-NEXT: s_load_dwordx4 s[4:7], s[2:3], 0x9
; SI-NEXT: s_load_dwordx2 s[8:9], s[2:3], 0xf
; SI-NEXT: s_load_dword s2, s[2:3], 0xd
; SI-NEXT: s_mov_b32 s3, 0xf000
; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: s_mov_b32 s0, s6
; SI-NEXT: s_mov_b32 s1, s7
; SI-NEXT: s_lshl_b64 s[8:9], s[8:9], 2
; SI-NEXT: s_mov_b32 s6, 0
; SI-NEXT: s_mov_b32 s7, s3
; SI-NEXT: v_mov_b32_e32 v2, s2
; SI-NEXT: v_mov_b32_e32 v0, s8
; SI-NEXT: v_mov_b32_e32 v1, s9
; SI-NEXT: buffer_atomic_or v2, v[0:1], s[4:7], 0 addr64 glc
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: buffer_wbinvl1
; SI-NEXT: s_mov_b32 s2, -1
; SI-NEXT: buffer_store_dword v2, off, s[0:3], 0
; SI-NEXT: s_endpgm
;
; VI-LABEL: atomic_or_i32_ret_addr64:
; VI: ; %bb.0: ; %entry
; VI-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x3c
; VI-NEXT: s_load_dwordx4 s[4:7], s[2:3], 0x24
; VI-NEXT: s_load_dword s2, s[2:3], 0x34
; VI-NEXT: s_mov_b32 s3, 0xf000
; VI-NEXT: s_waitcnt lgkmcnt(0)
; VI-NEXT: s_lshl_b64 s[0:1], s[0:1], 2
; VI-NEXT: s_add_u32 s0, s4, s0
; VI-NEXT: s_addc_u32 s1, s5, s1
; VI-NEXT: v_mov_b32_e32 v0, s0
; VI-NEXT: v_mov_b32_e32 v1, s1
; VI-NEXT: v_mov_b32_e32 v2, s2
; VI-NEXT: flat_atomic_or v0, v[0:1], v2 glc
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: buffer_wbinvl1_vol
; VI-NEXT: s_mov_b32 s2, -1
; VI-NEXT: s_mov_b32 s0, s6
; VI-NEXT: s_mov_b32 s1, s7
; VI-NEXT: buffer_store_dword v0, off, s[0:3], 0
; VI-NEXT: s_endpgm
;
; GFX9-LABEL: atomic_or_i32_ret_addr64:
; GFX9: ; %bb.0: ; %entry
; GFX9-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x3c
; GFX9-NEXT: s_load_dwordx4 s[4:7], s[2:3], 0x24
; GFX9-NEXT: s_load_dword s8, s[2:3], 0x34
; GFX9-NEXT: v_mov_b32_e32 v0, 0
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NEXT: s_lshl_b64 s[0:1], s[0:1], 2
; GFX9-NEXT: s_add_u32 s0, s4, s0
; GFX9-NEXT: s_addc_u32 s1, s5, s1
; GFX9-NEXT: v_mov_b32_e32 v1, s8
; GFX9-NEXT: global_atomic_or v1, v0, v1, s[0:1] glc
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: buffer_wbinvl1_vol
; GFX9-NEXT: global_store_dword v0, v1, s[6:7]
; GFX9-NEXT: s_endpgm
entry:
%ptr = getelementptr i32, ptr addrspace(1) %out, i64 %index
%val = atomicrmw volatile or ptr addrspace(1) %ptr, i32 %in syncscope("agent") seq_cst
store i32 %val, ptr addrspace(1) %out2
ret void
}
define amdgpu_kernel void @atomic_xchg_i32_offset(ptr addrspace(1) %out, i32 %in) {
; SI-LABEL: atomic_xchg_i32_offset:
; SI: ; %bb.0: ; %entry
; SI-NEXT: s_load_dword s4, s[2:3], 0xb
; SI-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x9
; SI-NEXT: s_mov_b32 s3, 0xf000
; SI-NEXT: s_mov_b32 s2, -1
; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: v_mov_b32_e32 v0, s4
; SI-NEXT: buffer_atomic_swap v0, off, s[0:3], 0 offset:16
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: buffer_wbinvl1
; SI-NEXT: s_endpgm
;
; VI-LABEL: atomic_xchg_i32_offset:
; VI: ; %bb.0: ; %entry
; VI-NEXT: s_load_dword s4, s[2:3], 0x2c
; VI-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x24
; VI-NEXT: s_mov_b32 s3, 0xf000
; VI-NEXT: s_mov_b32 s2, -1
; VI-NEXT: s_waitcnt lgkmcnt(0)
; VI-NEXT: v_mov_b32_e32 v0, s4
; VI-NEXT: buffer_atomic_swap v0, off, s[0:3], 0 offset:16
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: buffer_wbinvl1_vol
; VI-NEXT: s_endpgm
;
; GFX9-LABEL: atomic_xchg_i32_offset:
; GFX9: ; %bb.0: ; %entry
; GFX9-NEXT: s_load_dword s4, s[2:3], 0x2c
; GFX9-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x24
; GFX9-NEXT: v_mov_b32_e32 v0, 0
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NEXT: v_mov_b32_e32 v1, s4
; GFX9-NEXT: global_atomic_swap v0, v1, s[0:1] offset:16
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: buffer_wbinvl1_vol
; GFX9-NEXT: s_endpgm
entry:
%gep = getelementptr i32, ptr addrspace(1) %out, i64 4
%val = atomicrmw volatile xchg ptr addrspace(1) %gep, i32 %in syncscope("agent") seq_cst
ret void
}
define amdgpu_kernel void @atomic_xchg_f32_offset(ptr addrspace(1) %out, float %in) {
; SI-LABEL: atomic_xchg_f32_offset:
; SI: ; %bb.0: ; %entry
; SI-NEXT: s_load_dword s4, s[2:3], 0xb
; SI-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x9
; SI-NEXT: s_mov_b32 s3, 0xf000
; SI-NEXT: s_mov_b32 s2, -1
; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: v_mov_b32_e32 v0, s4
; SI-NEXT: buffer_atomic_swap v0, off, s[0:3], 0 offset:16
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: buffer_wbinvl1
; SI-NEXT: s_endpgm
;
; VI-LABEL: atomic_xchg_f32_offset:
; VI: ; %bb.0: ; %entry
; VI-NEXT: s_load_dword s4, s[2:3], 0x2c
; VI-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x24
; VI-NEXT: s_mov_b32 s3, 0xf000
; VI-NEXT: s_mov_b32 s2, -1
; VI-NEXT: s_waitcnt lgkmcnt(0)
; VI-NEXT: v_mov_b32_e32 v0, s4
; VI-NEXT: buffer_atomic_swap v0, off, s[0:3], 0 offset:16
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: buffer_wbinvl1_vol
; VI-NEXT: s_endpgm
;
; GFX9-LABEL: atomic_xchg_f32_offset:
; GFX9: ; %bb.0: ; %entry
; GFX9-NEXT: s_load_dword s4, s[2:3], 0x2c
; GFX9-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x24
; GFX9-NEXT: v_mov_b32_e32 v0, 0
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NEXT: v_mov_b32_e32 v1, s4
; GFX9-NEXT: global_atomic_swap v0, v1, s[0:1] offset:16
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: buffer_wbinvl1_vol
; GFX9-NEXT: s_endpgm
entry:
%gep = getelementptr float, ptr addrspace(1) %out, i64 4
%val = atomicrmw volatile xchg ptr addrspace(1) %gep, float %in syncscope("agent") seq_cst
ret void
}
define amdgpu_kernel void @atomic_xchg_i32_ret_offset(ptr addrspace(1) %out, ptr addrspace(1) %out2, i32 %in) {
; SI-LABEL: atomic_xchg_i32_ret_offset:
; SI: ; %bb.0: ; %entry
; SI-NEXT: s_load_dwordx4 s[4:7], s[2:3], 0x9
; SI-NEXT: s_load_dword s8, s[2:3], 0xd
; SI-NEXT: s_mov_b32 s3, 0xf000
; SI-NEXT: s_mov_b32 s2, -1
; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: s_mov_b32 s0, s6
; SI-NEXT: s_mov_b32 s1, s7
; SI-NEXT: s_mov_b32 s6, s2
; SI-NEXT: s_mov_b32 s7, s3
; SI-NEXT: v_mov_b32_e32 v0, s8
; SI-NEXT: buffer_atomic_swap v0, off, s[4:7], 0 offset:16 glc
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: buffer_wbinvl1
; SI-NEXT: buffer_store_dword v0, off, s[0:3], 0
; SI-NEXT: s_endpgm
;
; VI-LABEL: atomic_xchg_i32_ret_offset:
; VI: ; %bb.0: ; %entry
; VI-NEXT: s_load_dwordx4 s[4:7], s[2:3], 0x24
; VI-NEXT: s_load_dword s0, s[2:3], 0x34
; VI-NEXT: s_mov_b32 s11, 0xf000
; VI-NEXT: s_mov_b32 s10, -1
; VI-NEXT: s_waitcnt lgkmcnt(0)
; VI-NEXT: s_mov_b32 s8, s6
; VI-NEXT: s_mov_b32 s9, s7
; VI-NEXT: s_mov_b32 s6, s10
; VI-NEXT: s_mov_b32 s7, s11
; VI-NEXT: v_mov_b32_e32 v0, s0
; VI-NEXT: buffer_atomic_swap v0, off, s[4:7], 0 offset:16 glc
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: buffer_wbinvl1_vol
; VI-NEXT: buffer_store_dword v0, off, s[8:11], 0
; VI-NEXT: s_endpgm
;
; GFX9-LABEL: atomic_xchg_i32_ret_offset:
; GFX9: ; %bb.0: ; %entry
; GFX9-NEXT: s_load_dword s0, s[2:3], 0x34
; GFX9-NEXT: s_load_dwordx4 s[4:7], s[2:3], 0x24
; GFX9-NEXT: v_mov_b32_e32 v0, 0
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NEXT: v_mov_b32_e32 v1, s0
; GFX9-NEXT: global_atomic_swap v1, v0, v1, s[4:5] offset:16 glc
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: buffer_wbinvl1_vol
; GFX9-NEXT: global_store_dword v0, v1, s[6:7]
; GFX9-NEXT: s_endpgm
entry:
%gep = getelementptr i32, ptr addrspace(1) %out, i64 4
%val = atomicrmw volatile xchg ptr addrspace(1) %gep, i32 %in syncscope("agent") seq_cst
store i32 %val, ptr addrspace(1) %out2
ret void
}
define amdgpu_kernel void @atomic_xchg_i32_addr64_offset(ptr addrspace(1) %out, i32 %in, i64 %index) {
; SI-LABEL: atomic_xchg_i32_addr64_offset:
; SI: ; %bb.0: ; %entry
; SI-NEXT: s_load_dwordx2 s[4:5], s[2:3], 0xd
; SI-NEXT: s_load_dword s6, s[2:3], 0xb
; SI-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x9
; SI-NEXT: s_mov_b32 s3, 0xf000
; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: s_lshl_b64 s[4:5], s[4:5], 2
; SI-NEXT: s_mov_b32 s2, 0
; SI-NEXT: v_mov_b32_e32 v2, s6
; SI-NEXT: v_mov_b32_e32 v0, s4
; SI-NEXT: v_mov_b32_e32 v1, s5
; SI-NEXT: buffer_atomic_swap v2, v[0:1], s[0:3], 0 addr64 offset:16
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: buffer_wbinvl1
; SI-NEXT: s_endpgm
;
; VI-LABEL: atomic_xchg_i32_addr64_offset:
; VI: ; %bb.0: ; %entry
; VI-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x34
; VI-NEXT: s_load_dwordx2 s[4:5], s[2:3], 0x24
; VI-NEXT: s_load_dword s2, s[2:3], 0x2c
; VI-NEXT: s_waitcnt lgkmcnt(0)
; VI-NEXT: s_lshl_b64 s[0:1], s[0:1], 2
; VI-NEXT: s_add_u32 s0, s4, s0
; VI-NEXT: s_addc_u32 s1, s5, s1
; VI-NEXT: s_add_u32 s0, s0, 16
; VI-NEXT: s_addc_u32 s1, s1, 0
; VI-NEXT: v_mov_b32_e32 v0, s0
; VI-NEXT: v_mov_b32_e32 v1, s1
; VI-NEXT: v_mov_b32_e32 v2, s2
; VI-NEXT: flat_atomic_swap v[0:1], v2
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: buffer_wbinvl1_vol
; VI-NEXT: s_endpgm
;
; GFX9-LABEL: atomic_xchg_i32_addr64_offset:
; GFX9: ; %bb.0: ; %entry
; GFX9-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x34
; GFX9-NEXT: s_load_dwordx2 s[4:5], s[2:3], 0x24
; GFX9-NEXT: s_load_dword s6, s[2:3], 0x2c
; GFX9-NEXT: v_mov_b32_e32 v0, 0
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NEXT: s_lshl_b64 s[0:1], s[0:1], 2
; GFX9-NEXT: s_add_u32 s0, s4, s0
; GFX9-NEXT: s_addc_u32 s1, s5, s1
; GFX9-NEXT: v_mov_b32_e32 v1, s6
; GFX9-NEXT: global_atomic_swap v0, v1, s[0:1] offset:16
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: buffer_wbinvl1_vol
; GFX9-NEXT: s_endpgm
entry:
%ptr = getelementptr i32, ptr addrspace(1) %out, i64 %index
%gep = getelementptr i32, ptr addrspace(1) %ptr, i64 4
%val = atomicrmw volatile xchg ptr addrspace(1) %gep, i32 %in syncscope("agent") seq_cst
ret void
}
define amdgpu_kernel void @atomic_xchg_i32_ret_addr64_offset(ptr addrspace(1) %out, ptr addrspace(1) %out2, i32 %in, i64 %index) {
; SI-LABEL: atomic_xchg_i32_ret_addr64_offset:
; SI: ; %bb.0: ; %entry
; SI-NEXT: s_load_dwordx4 s[4:7], s[2:3], 0x9
; SI-NEXT: s_load_dwordx2 s[8:9], s[2:3], 0xf
; SI-NEXT: s_load_dword s2, s[2:3], 0xd
; SI-NEXT: s_mov_b32 s3, 0xf000
; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: s_mov_b32 s0, s6
; SI-NEXT: s_mov_b32 s1, s7
; SI-NEXT: s_lshl_b64 s[8:9], s[8:9], 2
; SI-NEXT: s_mov_b32 s6, 0
; SI-NEXT: s_mov_b32 s7, s3
; SI-NEXT: v_mov_b32_e32 v2, s2
; SI-NEXT: v_mov_b32_e32 v0, s8
; SI-NEXT: v_mov_b32_e32 v1, s9
; SI-NEXT: buffer_atomic_swap v2, v[0:1], s[4:7], 0 addr64 offset:16 glc
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: buffer_wbinvl1
; SI-NEXT: s_mov_b32 s2, -1
; SI-NEXT: buffer_store_dword v2, off, s[0:3], 0
; SI-NEXT: s_endpgm
;
; VI-LABEL: atomic_xchg_i32_ret_addr64_offset:
; VI: ; %bb.0: ; %entry
; VI-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x3c
; VI-NEXT: s_load_dwordx4 s[4:7], s[2:3], 0x24
; VI-NEXT: s_load_dword s2, s[2:3], 0x34
; VI-NEXT: s_mov_b32 s3, 0xf000
; VI-NEXT: s_waitcnt lgkmcnt(0)
; VI-NEXT: s_lshl_b64 s[0:1], s[0:1], 2
; VI-NEXT: s_add_u32 s0, s4, s0
; VI-NEXT: s_addc_u32 s1, s5, s1
; VI-NEXT: s_add_u32 s0, s0, 16
; VI-NEXT: s_addc_u32 s1, s1, 0
; VI-NEXT: v_mov_b32_e32 v0, s0
; VI-NEXT: v_mov_b32_e32 v1, s1
; VI-NEXT: v_mov_b32_e32 v2, s2
; VI-NEXT: flat_atomic_swap v0, v[0:1], v2 glc
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: buffer_wbinvl1_vol
; VI-NEXT: s_mov_b32 s2, -1
; VI-NEXT: s_mov_b32 s0, s6
; VI-NEXT: s_mov_b32 s1, s7
; VI-NEXT: buffer_store_dword v0, off, s[0:3], 0
; VI-NEXT: s_endpgm
;
; GFX9-LABEL: atomic_xchg_i32_ret_addr64_offset:
; GFX9: ; %bb.0: ; %entry
; GFX9-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x3c
; GFX9-NEXT: s_load_dwordx4 s[4:7], s[2:3], 0x24
; GFX9-NEXT: s_load_dword s8, s[2:3], 0x34
; GFX9-NEXT: v_mov_b32_e32 v0, 0
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NEXT: s_lshl_b64 s[0:1], s[0:1], 2
; GFX9-NEXT: s_add_u32 s0, s4, s0
; GFX9-NEXT: s_addc_u32 s1, s5, s1
; GFX9-NEXT: v_mov_b32_e32 v1, s8
; GFX9-NEXT: global_atomic_swap v1, v0, v1, s[0:1] offset:16 glc
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: buffer_wbinvl1_vol
; GFX9-NEXT: global_store_dword v0, v1, s[6:7]
; GFX9-NEXT: s_endpgm
entry:
%ptr = getelementptr i32, ptr addrspace(1) %out, i64 %index
%gep = getelementptr i32, ptr addrspace(1) %ptr, i64 4
%val = atomicrmw volatile xchg ptr addrspace(1) %gep, i32 %in syncscope("agent") seq_cst
store i32 %val, ptr addrspace(1) %out2
ret void
}
define amdgpu_kernel void @atomic_xchg_i32(ptr addrspace(1) %out, i32 %in) {
; SI-LABEL: atomic_xchg_i32:
; SI: ; %bb.0: ; %entry
; SI-NEXT: s_load_dword s4, s[2:3], 0xb
; SI-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x9
; SI-NEXT: s_mov_b32 s3, 0xf000
; SI-NEXT: s_mov_b32 s2, -1
; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: v_mov_b32_e32 v0, s4
; SI-NEXT: buffer_atomic_swap v0, off, s[0:3], 0
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: buffer_wbinvl1
; SI-NEXT: s_endpgm
;
; VI-LABEL: atomic_xchg_i32:
; VI: ; %bb.0: ; %entry
; VI-NEXT: s_load_dword s4, s[2:3], 0x2c
; VI-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x24
; VI-NEXT: s_mov_b32 s3, 0xf000
; VI-NEXT: s_mov_b32 s2, -1
; VI-NEXT: s_waitcnt lgkmcnt(0)
; VI-NEXT: v_mov_b32_e32 v0, s4
; VI-NEXT: buffer_atomic_swap v0, off, s[0:3], 0
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: buffer_wbinvl1_vol
; VI-NEXT: s_endpgm
;
; GFX9-LABEL: atomic_xchg_i32:
; GFX9: ; %bb.0: ; %entry
; GFX9-NEXT: s_load_dword s4, s[2:3], 0x2c
; GFX9-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x24
; GFX9-NEXT: v_mov_b32_e32 v0, 0
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NEXT: v_mov_b32_e32 v1, s4
; GFX9-NEXT: global_atomic_swap v0, v1, s[0:1]
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: buffer_wbinvl1_vol
; GFX9-NEXT: s_endpgm
entry:
%val = atomicrmw volatile xchg ptr addrspace(1) %out, i32 %in syncscope("agent") seq_cst
ret void
}
define amdgpu_kernel void @atomic_xchg_i32_ret(ptr addrspace(1) %out, ptr addrspace(1) %out2, i32 %in) {
; SI-LABEL: atomic_xchg_i32_ret:
; SI: ; %bb.0: ; %entry
; SI-NEXT: s_load_dwordx4 s[4:7], s[2:3], 0x9
; SI-NEXT: s_load_dword s8, s[2:3], 0xd
; SI-NEXT: s_mov_b32 s3, 0xf000
; SI-NEXT: s_mov_b32 s2, -1
; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: s_mov_b32 s0, s4
; SI-NEXT: s_mov_b32 s1, s5
; SI-NEXT: v_mov_b32_e32 v0, s8
; SI-NEXT: buffer_atomic_swap v0, off, s[0:3], 0 glc
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: buffer_wbinvl1
; SI-NEXT: s_mov_b32 s0, s6
; SI-NEXT: s_mov_b32 s1, s7
; SI-NEXT: buffer_store_dword v0, off, s[0:3], 0
; SI-NEXT: s_endpgm
;
; VI-LABEL: atomic_xchg_i32_ret:
; VI: ; %bb.0: ; %entry
; VI-NEXT: s_load_dwordx4 s[4:7], s[2:3], 0x24
; VI-NEXT: s_load_dword s8, s[2:3], 0x34
; VI-NEXT: s_mov_b32 s3, 0xf000
; VI-NEXT: s_mov_b32 s2, -1
; VI-NEXT: s_waitcnt lgkmcnt(0)
; VI-NEXT: s_mov_b32 s0, s4
; VI-NEXT: s_mov_b32 s1, s5
; VI-NEXT: v_mov_b32_e32 v0, s8
; VI-NEXT: buffer_atomic_swap v0, off, s[0:3], 0 glc
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: buffer_wbinvl1_vol
; VI-NEXT: s_mov_b32 s0, s6
; VI-NEXT: s_mov_b32 s1, s7
; VI-NEXT: buffer_store_dword v0, off, s[0:3], 0
; VI-NEXT: s_endpgm
;
; GFX9-LABEL: atomic_xchg_i32_ret:
; GFX9: ; %bb.0: ; %entry
; GFX9-NEXT: s_load_dword s0, s[2:3], 0x34
; GFX9-NEXT: s_load_dwordx4 s[4:7], s[2:3], 0x24
; GFX9-NEXT: v_mov_b32_e32 v0, 0
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NEXT: v_mov_b32_e32 v1, s0
; GFX9-NEXT: global_atomic_swap v1, v0, v1, s[4:5] glc
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: buffer_wbinvl1_vol
; GFX9-NEXT: global_store_dword v0, v1, s[6:7]
; GFX9-NEXT: s_endpgm
entry:
%val = atomicrmw volatile xchg ptr addrspace(1) %out, i32 %in syncscope("agent") seq_cst
store i32 %val, ptr addrspace(1) %out2
ret void
}
define amdgpu_kernel void @atomic_xchg_i32_addr64(ptr addrspace(1) %out, i32 %in, i64 %index) {
; SI-LABEL: atomic_xchg_i32_addr64:
; SI: ; %bb.0: ; %entry
; SI-NEXT: s_load_dwordx2 s[4:5], s[2:3], 0xd
; SI-NEXT: s_load_dword s6, s[2:3], 0xb
; SI-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x9
; SI-NEXT: s_mov_b32 s3, 0xf000
; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: s_lshl_b64 s[4:5], s[4:5], 2
; SI-NEXT: s_mov_b32 s2, 0
; SI-NEXT: v_mov_b32_e32 v2, s6
; SI-NEXT: v_mov_b32_e32 v0, s4
; SI-NEXT: v_mov_b32_e32 v1, s5
; SI-NEXT: buffer_atomic_swap v2, v[0:1], s[0:3], 0 addr64
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: buffer_wbinvl1
; SI-NEXT: s_endpgm
;
; VI-LABEL: atomic_xchg_i32_addr64:
; VI: ; %bb.0: ; %entry
; VI-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x34
; VI-NEXT: s_load_dwordx2 s[4:5], s[2:3], 0x24
; VI-NEXT: s_load_dword s2, s[2:3], 0x2c
; VI-NEXT: s_waitcnt lgkmcnt(0)
; VI-NEXT: s_lshl_b64 s[0:1], s[0:1], 2
; VI-NEXT: s_add_u32 s0, s4, s0
; VI-NEXT: s_addc_u32 s1, s5, s1
; VI-NEXT: v_mov_b32_e32 v0, s0
; VI-NEXT: v_mov_b32_e32 v1, s1
; VI-NEXT: v_mov_b32_e32 v2, s2
; VI-NEXT: flat_atomic_swap v[0:1], v2
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: buffer_wbinvl1_vol
; VI-NEXT: s_endpgm
;
; GFX9-LABEL: atomic_xchg_i32_addr64:
; GFX9: ; %bb.0: ; %entry
; GFX9-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x34
; GFX9-NEXT: s_load_dwordx2 s[4:5], s[2:3], 0x24
; GFX9-NEXT: s_load_dword s6, s[2:3], 0x2c
; GFX9-NEXT: v_mov_b32_e32 v0, 0
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NEXT: s_lshl_b64 s[0:1], s[0:1], 2
; GFX9-NEXT: s_add_u32 s0, s4, s0
; GFX9-NEXT: s_addc_u32 s1, s5, s1
; GFX9-NEXT: v_mov_b32_e32 v1, s6
; GFX9-NEXT: global_atomic_swap v0, v1, s[0:1]
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: buffer_wbinvl1_vol
; GFX9-NEXT: s_endpgm
entry:
%ptr = getelementptr i32, ptr addrspace(1) %out, i64 %index
%val = atomicrmw volatile xchg ptr addrspace(1) %ptr, i32 %in syncscope("agent") seq_cst
ret void
}
define amdgpu_kernel void @atomic_xchg_i32_ret_addr64(ptr addrspace(1) %out, ptr addrspace(1) %out2, i32 %in, i64 %index) {
; SI-LABEL: atomic_xchg_i32_ret_addr64:
; SI: ; %bb.0: ; %entry
; SI-NEXT: s_load_dwordx4 s[4:7], s[2:3], 0x9
; SI-NEXT: s_load_dwordx2 s[8:9], s[2:3], 0xf
; SI-NEXT: s_load_dword s2, s[2:3], 0xd
; SI-NEXT: s_mov_b32 s3, 0xf000
; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: s_mov_b32 s0, s6
; SI-NEXT: s_mov_b32 s1, s7
; SI-NEXT: s_lshl_b64 s[8:9], s[8:9], 2
; SI-NEXT: s_mov_b32 s6, 0
; SI-NEXT: s_mov_b32 s7, s3
; SI-NEXT: v_mov_b32_e32 v2, s2
; SI-NEXT: v_mov_b32_e32 v0, s8
; SI-NEXT: v_mov_b32_e32 v1, s9
; SI-NEXT: buffer_atomic_swap v2, v[0:1], s[4:7], 0 addr64 glc
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: buffer_wbinvl1
; SI-NEXT: s_mov_b32 s2, -1
; SI-NEXT: buffer_store_dword v2, off, s[0:3], 0
; SI-NEXT: s_endpgm
;
; VI-LABEL: atomic_xchg_i32_ret_addr64:
; VI: ; %bb.0: ; %entry
; VI-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x3c
; VI-NEXT: s_load_dwordx4 s[4:7], s[2:3], 0x24
; VI-NEXT: s_load_dword s2, s[2:3], 0x34
; VI-NEXT: s_mov_b32 s3, 0xf000
; VI-NEXT: s_waitcnt lgkmcnt(0)
; VI-NEXT: s_lshl_b64 s[0:1], s[0:1], 2
; VI-NEXT: s_add_u32 s0, s4, s0
; VI-NEXT: s_addc_u32 s1, s5, s1
; VI-NEXT: v_mov_b32_e32 v0, s0
; VI-NEXT: v_mov_b32_e32 v1, s1
; VI-NEXT: v_mov_b32_e32 v2, s2
; VI-NEXT: flat_atomic_swap v0, v[0:1], v2 glc
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: buffer_wbinvl1_vol
; VI-NEXT: s_mov_b32 s2, -1
; VI-NEXT: s_mov_b32 s0, s6
; VI-NEXT: s_mov_b32 s1, s7
; VI-NEXT: buffer_store_dword v0, off, s[0:3], 0
; VI-NEXT: s_endpgm
;
; GFX9-LABEL: atomic_xchg_i32_ret_addr64:
; GFX9: ; %bb.0: ; %entry
; GFX9-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x3c
; GFX9-NEXT: s_load_dwordx4 s[4:7], s[2:3], 0x24
; GFX9-NEXT: s_load_dword s8, s[2:3], 0x34
; GFX9-NEXT: v_mov_b32_e32 v0, 0
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NEXT: s_lshl_b64 s[0:1], s[0:1], 2
; GFX9-NEXT: s_add_u32 s0, s4, s0
; GFX9-NEXT: s_addc_u32 s1, s5, s1
; GFX9-NEXT: v_mov_b32_e32 v1, s8
; GFX9-NEXT: global_atomic_swap v1, v0, v1, s[0:1] glc
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: buffer_wbinvl1_vol
; GFX9-NEXT: global_store_dword v0, v1, s[6:7]
; GFX9-NEXT: s_endpgm
entry:
%ptr = getelementptr i32, ptr addrspace(1) %out, i64 %index
%val = atomicrmw volatile xchg ptr addrspace(1) %ptr, i32 %in syncscope("agent") seq_cst
store i32 %val, ptr addrspace(1) %out2
ret void
}
define amdgpu_kernel void @atomic_cmpxchg_i32_offset(ptr addrspace(1) %out, i32 %in, i32 %old) {
; SI-LABEL: atomic_cmpxchg_i32_offset:
; SI: ; %bb.0: ; %entry
; SI-NEXT: s_load_dwordx4 s[0:3], s[2:3], 0x9
; SI-NEXT: s_mov_b32 s7, 0xf000
; SI-NEXT: s_mov_b32 s6, -1
; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: s_mov_b32 s4, s0
; SI-NEXT: s_mov_b32 s5, s1
; SI-NEXT: v_mov_b32_e32 v0, s2
; SI-NEXT: v_mov_b32_e32 v1, s3
; SI-NEXT: buffer_atomic_cmpswap v[0:1], off, s[4:7], 0 offset:16
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: buffer_wbinvl1
; SI-NEXT: s_endpgm
;
; VI-LABEL: atomic_cmpxchg_i32_offset:
; VI: ; %bb.0: ; %entry
; VI-NEXT: s_load_dwordx4 s[0:3], s[2:3], 0x24
; VI-NEXT: s_mov_b32 s7, 0xf000
; VI-NEXT: s_mov_b32 s6, -1
; VI-NEXT: s_waitcnt lgkmcnt(0)
; VI-NEXT: v_mov_b32_e32 v0, s2
; VI-NEXT: s_mov_b32 s4, s0
; VI-NEXT: s_mov_b32 s5, s1
; VI-NEXT: v_mov_b32_e32 v1, s3
; VI-NEXT: buffer_atomic_cmpswap v[0:1], off, s[4:7], 0 offset:16
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: buffer_wbinvl1_vol
; VI-NEXT: s_endpgm
;
; GFX9-LABEL: atomic_cmpxchg_i32_offset:
; GFX9: ; %bb.0: ; %entry
; GFX9-NEXT: s_load_dwordx4 s[4:7], s[2:3], 0x24
; GFX9-NEXT: v_mov_b32_e32 v2, 0
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NEXT: v_mov_b32_e32 v0, s6
; GFX9-NEXT: v_mov_b32_e32 v1, s7
; GFX9-NEXT: global_atomic_cmpswap v2, v[0:1], s[4:5] offset:16
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: buffer_wbinvl1_vol
; GFX9-NEXT: s_endpgm
entry:
%gep = getelementptr i32, ptr addrspace(1) %out, i64 4
%val = cmpxchg volatile ptr addrspace(1) %gep, i32 %old, i32 %in syncscope("agent") seq_cst seq_cst
ret void
}
define amdgpu_kernel void @atomic_cmpxchg_i32_ret_offset(ptr addrspace(1) %out, ptr addrspace(1) %out2, i32 %in, i32 %old) {
; SI-LABEL: atomic_cmpxchg_i32_ret_offset:
; SI: ; %bb.0: ; %entry
; SI-NEXT: s_load_dwordx4 s[4:7], s[2:3], 0x9
; SI-NEXT: s_load_dwordx2 s[8:9], s[2:3], 0xd
; SI-NEXT: s_mov_b32 s3, 0xf000
; SI-NEXT: s_mov_b32 s2, -1
; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: s_mov_b32 s0, s6
; SI-NEXT: s_mov_b32 s1, s7
; SI-NEXT: s_mov_b32 s6, s2
; SI-NEXT: s_mov_b32 s7, s3
; SI-NEXT: v_mov_b32_e32 v0, s8
; SI-NEXT: v_mov_b32_e32 v1, s9
; SI-NEXT: buffer_atomic_cmpswap v[0:1], off, s[4:7], 0 offset:16 glc
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: buffer_wbinvl1
; SI-NEXT: buffer_store_dword v0, off, s[0:3], 0
; SI-NEXT: s_endpgm
;
; VI-LABEL: atomic_cmpxchg_i32_ret_offset:
; VI: ; %bb.0: ; %entry
; VI-NEXT: s_load_dwordx4 s[4:7], s[2:3], 0x24
; VI-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x34
; VI-NEXT: s_mov_b32 s11, 0xf000
; VI-NEXT: s_mov_b32 s10, -1
; VI-NEXT: s_waitcnt lgkmcnt(0)
; VI-NEXT: s_mov_b32 s8, s6
; VI-NEXT: v_mov_b32_e32 v0, s0
; VI-NEXT: s_mov_b32 s9, s7
; VI-NEXT: s_mov_b32 s6, s10
; VI-NEXT: s_mov_b32 s7, s11
; VI-NEXT: v_mov_b32_e32 v1, s1
; VI-NEXT: buffer_atomic_cmpswap v[0:1], off, s[4:7], 0 offset:16 glc
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: buffer_wbinvl1_vol
; VI-NEXT: buffer_store_dword v0, off, s[8:11], 0
; VI-NEXT: s_endpgm
;
; GFX9-LABEL: atomic_cmpxchg_i32_ret_offset:
; GFX9: ; %bb.0: ; %entry
; GFX9-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x34
; GFX9-NEXT: s_load_dwordx4 s[4:7], s[2:3], 0x24
; GFX9-NEXT: v_mov_b32_e32 v2, 0
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NEXT: v_mov_b32_e32 v0, s0
; GFX9-NEXT: v_mov_b32_e32 v1, s1
; GFX9-NEXT: global_atomic_cmpswap v0, v2, v[0:1], s[4:5] offset:16 glc
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: buffer_wbinvl1_vol
; GFX9-NEXT: global_store_dword v2, v0, s[6:7]
; GFX9-NEXT: s_endpgm
entry:
%gep = getelementptr i32, ptr addrspace(1) %out, i64 4
%val = cmpxchg volatile ptr addrspace(1) %gep, i32 %old, i32 %in syncscope("agent") seq_cst seq_cst
%extract0 = extractvalue { i32, i1 } %val, 0
store i32 %extract0, ptr addrspace(1) %out2
ret void
}
define amdgpu_kernel void @atomic_cmpxchg_i32_addr64_offset(ptr addrspace(1) %out, i32 %in, i64 %index, i32 %old) {
; SI-LABEL: atomic_cmpxchg_i32_addr64_offset:
; SI: ; %bb.0: ; %entry
; SI-NEXT: s_load_dword s6, s[2:3], 0xb
; SI-NEXT: s_load_dwordx2 s[4:5], s[2:3], 0xd
; SI-NEXT: s_load_dword s7, s[2:3], 0xf
; SI-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x9
; SI-NEXT: s_mov_b32 s3, 0xf000
; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: s_lshl_b64 s[4:5], s[4:5], 2
; SI-NEXT: s_mov_b32 s2, 0
; SI-NEXT: v_mov_b32_e32 v0, s6
; SI-NEXT: v_mov_b32_e32 v1, s7
; SI-NEXT: v_mov_b32_e32 v2, s4
; SI-NEXT: v_mov_b32_e32 v3, s5
; SI-NEXT: buffer_atomic_cmpswap v[0:1], v[2:3], s[0:3], 0 addr64 offset:16
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: buffer_wbinvl1
; SI-NEXT: s_endpgm
;
; VI-LABEL: atomic_cmpxchg_i32_addr64_offset:
; VI: ; %bb.0: ; %entry
; VI-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x34
; VI-NEXT: s_load_dword s6, s[2:3], 0x2c
; VI-NEXT: s_load_dwordx2 s[4:5], s[2:3], 0x24
; VI-NEXT: s_load_dword s2, s[2:3], 0x3c
; VI-NEXT: s_waitcnt lgkmcnt(0)
; VI-NEXT: s_lshl_b64 s[0:1], s[0:1], 2
; VI-NEXT: v_mov_b32_e32 v0, s6
; VI-NEXT: s_add_u32 s0, s4, s0
; VI-NEXT: s_addc_u32 s1, s5, s1
; VI-NEXT: s_add_u32 s0, s0, 16
; VI-NEXT: s_addc_u32 s1, s1, 0
; VI-NEXT: v_mov_b32_e32 v3, s1
; VI-NEXT: v_mov_b32_e32 v1, s2
; VI-NEXT: v_mov_b32_e32 v2, s0
; VI-NEXT: flat_atomic_cmpswap v[2:3], v[0:1]
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: buffer_wbinvl1_vol
; VI-NEXT: s_endpgm
;
; GFX9-LABEL: atomic_cmpxchg_i32_addr64_offset:
; GFX9: ; %bb.0: ; %entry
; GFX9-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x34
; GFX9-NEXT: s_load_dwordx2 s[4:5], s[2:3], 0x24
; GFX9-NEXT: s_load_dword s6, s[2:3], 0x2c
; GFX9-NEXT: v_mov_b32_e32 v2, 0
; GFX9-NEXT: s_load_dword s2, s[2:3], 0x3c
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NEXT: s_lshl_b64 s[0:1], s[0:1], 2
; GFX9-NEXT: s_add_u32 s0, s4, s0
; GFX9-NEXT: s_addc_u32 s1, s5, s1
; GFX9-NEXT: v_mov_b32_e32 v0, s6
; GFX9-NEXT: v_mov_b32_e32 v1, s2
; GFX9-NEXT: global_atomic_cmpswap v2, v[0:1], s[0:1] offset:16
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: buffer_wbinvl1_vol
; GFX9-NEXT: s_endpgm
entry:
%ptr = getelementptr i32, ptr addrspace(1) %out, i64 %index
%gep = getelementptr i32, ptr addrspace(1) %ptr, i64 4
%val = cmpxchg volatile ptr addrspace(1) %gep, i32 %old, i32 %in syncscope("agent") seq_cst seq_cst
ret void
}
define amdgpu_kernel void @atomic_cmpxchg_i32_ret_addr64_offset(ptr addrspace(1) %out, ptr addrspace(1) %out2, i32 %in, i64 %index, i32 %old) {
; SI-LABEL: atomic_cmpxchg_i32_ret_addr64_offset:
; SI: ; %bb.0: ; %entry
; SI-NEXT: s_load_dwordx4 s[4:7], s[2:3], 0x9
; SI-NEXT: s_load_dword s10, s[2:3], 0xd
; SI-NEXT: s_load_dwordx2 s[8:9], s[2:3], 0xf
; SI-NEXT: s_load_dword s2, s[2:3], 0x11
; SI-NEXT: s_mov_b32 s3, 0xf000
; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: s_mov_b32 s0, s6
; SI-NEXT: s_mov_b32 s1, s7
; SI-NEXT: s_lshl_b64 s[8:9], s[8:9], 2
; SI-NEXT: s_mov_b32 s6, 0
; SI-NEXT: s_mov_b32 s7, s3
; SI-NEXT: v_mov_b32_e32 v0, s10
; SI-NEXT: v_mov_b32_e32 v1, s2
; SI-NEXT: v_mov_b32_e32 v2, s8
; SI-NEXT: v_mov_b32_e32 v3, s9
; SI-NEXT: buffer_atomic_cmpswap v[0:1], v[2:3], s[4:7], 0 addr64 offset:16 glc
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: buffer_wbinvl1
; SI-NEXT: s_mov_b32 s2, -1
; SI-NEXT: buffer_store_dword v0, off, s[0:3], 0
; SI-NEXT: s_endpgm
;
; VI-LABEL: atomic_cmpxchg_i32_ret_addr64_offset:
; VI: ; %bb.0: ; %entry
; VI-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x3c
; VI-NEXT: s_load_dword s8, s[2:3], 0x34
; VI-NEXT: s_load_dwordx4 s[4:7], s[2:3], 0x24
; VI-NEXT: s_load_dword s2, s[2:3], 0x44
; VI-NEXT: s_mov_b32 s3, 0xf000
; VI-NEXT: s_waitcnt lgkmcnt(0)
; VI-NEXT: s_lshl_b64 s[0:1], s[0:1], 2
; VI-NEXT: v_mov_b32_e32 v0, s8
; VI-NEXT: s_add_u32 s0, s4, s0
; VI-NEXT: s_addc_u32 s1, s5, s1
; VI-NEXT: s_add_u32 s0, s0, 16
; VI-NEXT: s_addc_u32 s1, s1, 0
; VI-NEXT: v_mov_b32_e32 v3, s1
; VI-NEXT: v_mov_b32_e32 v1, s2
; VI-NEXT: v_mov_b32_e32 v2, s0
; VI-NEXT: flat_atomic_cmpswap v0, v[2:3], v[0:1] glc
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: buffer_wbinvl1_vol
; VI-NEXT: s_mov_b32 s2, -1
; VI-NEXT: s_mov_b32 s0, s6
; VI-NEXT: s_mov_b32 s1, s7
; VI-NEXT: buffer_store_dword v0, off, s[0:3], 0
; VI-NEXT: s_endpgm
;
; GFX9-LABEL: atomic_cmpxchg_i32_ret_addr64_offset:
; GFX9: ; %bb.0: ; %entry
; GFX9-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x3c
; GFX9-NEXT: s_load_dwordx4 s[4:7], s[2:3], 0x24
; GFX9-NEXT: s_load_dword s8, s[2:3], 0x34
; GFX9-NEXT: s_load_dword s9, s[2:3], 0x44
; GFX9-NEXT: v_mov_b32_e32 v2, 0
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NEXT: s_lshl_b64 s[0:1], s[0:1], 2
; GFX9-NEXT: s_add_u32 s0, s4, s0
; GFX9-NEXT: s_addc_u32 s1, s5, s1
; GFX9-NEXT: v_mov_b32_e32 v0, s8
; GFX9-NEXT: v_mov_b32_e32 v1, s9
; GFX9-NEXT: global_atomic_cmpswap v0, v2, v[0:1], s[0:1] offset:16 glc
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: buffer_wbinvl1_vol
; GFX9-NEXT: global_store_dword v2, v0, s[6:7]
; GFX9-NEXT: s_endpgm
entry:
%ptr = getelementptr i32, ptr addrspace(1) %out, i64 %index
%gep = getelementptr i32, ptr addrspace(1) %ptr, i64 4
%val = cmpxchg volatile ptr addrspace(1) %gep, i32 %old, i32 %in syncscope("agent") seq_cst seq_cst
%extract0 = extractvalue { i32, i1 } %val, 0
store i32 %extract0, ptr addrspace(1) %out2
ret void
}
define amdgpu_kernel void @atomic_cmpxchg_i32(ptr addrspace(1) %out, i32 %in, i32 %old) {
; SI-LABEL: atomic_cmpxchg_i32:
; SI: ; %bb.0: ; %entry
; SI-NEXT: s_load_dwordx4 s[0:3], s[2:3], 0x9
; SI-NEXT: s_mov_b32 s7, 0xf000
; SI-NEXT: s_mov_b32 s6, -1
; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: s_mov_b32 s4, s0
; SI-NEXT: s_mov_b32 s5, s1
; SI-NEXT: v_mov_b32_e32 v0, s2
; SI-NEXT: v_mov_b32_e32 v1, s3
; SI-NEXT: buffer_atomic_cmpswap v[0:1], off, s[4:7], 0
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: buffer_wbinvl1
; SI-NEXT: s_endpgm
;
; VI-LABEL: atomic_cmpxchg_i32:
; VI: ; %bb.0: ; %entry
; VI-NEXT: s_load_dwordx4 s[0:3], s[2:3], 0x24
; VI-NEXT: s_mov_b32 s7, 0xf000
; VI-NEXT: s_mov_b32 s6, -1
; VI-NEXT: s_waitcnt lgkmcnt(0)
; VI-NEXT: v_mov_b32_e32 v0, s2
; VI-NEXT: s_mov_b32 s4, s0
; VI-NEXT: s_mov_b32 s5, s1
; VI-NEXT: v_mov_b32_e32 v1, s3
; VI-NEXT: buffer_atomic_cmpswap v[0:1], off, s[4:7], 0
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: buffer_wbinvl1_vol
; VI-NEXT: s_endpgm
;
; GFX9-LABEL: atomic_cmpxchg_i32:
; GFX9: ; %bb.0: ; %entry
; GFX9-NEXT: s_load_dwordx4 s[4:7], s[2:3], 0x24
; GFX9-NEXT: v_mov_b32_e32 v2, 0
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NEXT: v_mov_b32_e32 v0, s6
; GFX9-NEXT: v_mov_b32_e32 v1, s7
; GFX9-NEXT: global_atomic_cmpswap v2, v[0:1], s[4:5]
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: buffer_wbinvl1_vol
; GFX9-NEXT: s_endpgm
entry:
%val = cmpxchg volatile ptr addrspace(1) %out, i32 %old, i32 %in syncscope("agent") seq_cst seq_cst
ret void
}
define amdgpu_kernel void @atomic_cmpxchg_i32_ret(ptr addrspace(1) %out, ptr addrspace(1) %out2, i32 %in, i32 %old) {
; SI-LABEL: atomic_cmpxchg_i32_ret:
; SI: ; %bb.0: ; %entry
; SI-NEXT: s_load_dwordx4 s[4:7], s[2:3], 0x9
; SI-NEXT: s_load_dwordx2 s[8:9], s[2:3], 0xd
; SI-NEXT: s_mov_b32 s3, 0xf000
; SI-NEXT: s_mov_b32 s2, -1
; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: s_mov_b32 s0, s4
; SI-NEXT: s_mov_b32 s1, s5
; SI-NEXT: v_mov_b32_e32 v0, s8
; SI-NEXT: v_mov_b32_e32 v1, s9
; SI-NEXT: buffer_atomic_cmpswap v[0:1], off, s[0:3], 0 glc
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: buffer_wbinvl1
; SI-NEXT: s_mov_b32 s0, s6
; SI-NEXT: s_mov_b32 s1, s7
; SI-NEXT: buffer_store_dword v0, off, s[0:3], 0
; SI-NEXT: s_endpgm
;
; VI-LABEL: atomic_cmpxchg_i32_ret:
; VI: ; %bb.0: ; %entry
; VI-NEXT: s_load_dwordx4 s[4:7], s[2:3], 0x24
; VI-NEXT: s_load_dwordx2 s[8:9], s[2:3], 0x34
; VI-NEXT: s_mov_b32 s3, 0xf000
; VI-NEXT: s_mov_b32 s2, -1
; VI-NEXT: s_waitcnt lgkmcnt(0)
; VI-NEXT: s_mov_b32 s0, s4
; VI-NEXT: v_mov_b32_e32 v0, s8
; VI-NEXT: s_mov_b32 s1, s5
; VI-NEXT: v_mov_b32_e32 v1, s9
; VI-NEXT: buffer_atomic_cmpswap v[0:1], off, s[0:3], 0 glc
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: buffer_wbinvl1_vol
; VI-NEXT: s_mov_b32 s0, s6
; VI-NEXT: s_mov_b32 s1, s7
; VI-NEXT: buffer_store_dword v0, off, s[0:3], 0
; VI-NEXT: s_endpgm
;
; GFX9-LABEL: atomic_cmpxchg_i32_ret:
; GFX9: ; %bb.0: ; %entry
; GFX9-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x34
; GFX9-NEXT: s_load_dwordx4 s[4:7], s[2:3], 0x24
; GFX9-NEXT: v_mov_b32_e32 v2, 0
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NEXT: v_mov_b32_e32 v0, s0
; GFX9-NEXT: v_mov_b32_e32 v1, s1
; GFX9-NEXT: global_atomic_cmpswap v0, v2, v[0:1], s[4:5] glc
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: buffer_wbinvl1_vol
; GFX9-NEXT: global_store_dword v2, v0, s[6:7]
; GFX9-NEXT: s_endpgm
entry:
%val = cmpxchg volatile ptr addrspace(1) %out, i32 %old, i32 %in syncscope("agent") seq_cst seq_cst
%extract0 = extractvalue { i32, i1 } %val, 0
store i32 %extract0, ptr addrspace(1) %out2
ret void
}
define amdgpu_kernel void @atomic_cmpxchg_i32_addr64(ptr addrspace(1) %out, i32 %in, i64 %index, i32 %old) {
; SI-LABEL: atomic_cmpxchg_i32_addr64:
; SI: ; %bb.0: ; %entry
; SI-NEXT: s_load_dword s6, s[2:3], 0xb
; SI-NEXT: s_load_dwordx2 s[4:5], s[2:3], 0xd
; SI-NEXT: s_load_dword s7, s[2:3], 0xf
; SI-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x9
; SI-NEXT: s_mov_b32 s3, 0xf000
; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: s_lshl_b64 s[4:5], s[4:5], 2
; SI-NEXT: s_mov_b32 s2, 0
; SI-NEXT: v_mov_b32_e32 v0, s6
; SI-NEXT: v_mov_b32_e32 v1, s7
; SI-NEXT: v_mov_b32_e32 v2, s4
; SI-NEXT: v_mov_b32_e32 v3, s5
; SI-NEXT: buffer_atomic_cmpswap v[0:1], v[2:3], s[0:3], 0 addr64
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: buffer_wbinvl1
; SI-NEXT: s_endpgm
;
; VI-LABEL: atomic_cmpxchg_i32_addr64:
; VI: ; %bb.0: ; %entry
; VI-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x34
; VI-NEXT: s_load_dword s6, s[2:3], 0x2c
; VI-NEXT: s_load_dwordx2 s[4:5], s[2:3], 0x24
; VI-NEXT: s_load_dword s2, s[2:3], 0x3c
; VI-NEXT: s_waitcnt lgkmcnt(0)
; VI-NEXT: s_lshl_b64 s[0:1], s[0:1], 2
; VI-NEXT: v_mov_b32_e32 v0, s6
; VI-NEXT: s_add_u32 s0, s4, s0
; VI-NEXT: s_addc_u32 s1, s5, s1
; VI-NEXT: v_mov_b32_e32 v3, s1
; VI-NEXT: v_mov_b32_e32 v1, s2
; VI-NEXT: v_mov_b32_e32 v2, s0
; VI-NEXT: flat_atomic_cmpswap v[2:3], v[0:1]
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: buffer_wbinvl1_vol
; VI-NEXT: s_endpgm
;
; GFX9-LABEL: atomic_cmpxchg_i32_addr64:
; GFX9: ; %bb.0: ; %entry
; GFX9-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x34
; GFX9-NEXT: s_load_dwordx2 s[4:5], s[2:3], 0x24
; GFX9-NEXT: s_load_dword s6, s[2:3], 0x2c
; GFX9-NEXT: v_mov_b32_e32 v2, 0
; GFX9-NEXT: s_load_dword s2, s[2:3], 0x3c
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NEXT: s_lshl_b64 s[0:1], s[0:1], 2
; GFX9-NEXT: s_add_u32 s0, s4, s0
; GFX9-NEXT: s_addc_u32 s1, s5, s1
; GFX9-NEXT: v_mov_b32_e32 v0, s6
; GFX9-NEXT: v_mov_b32_e32 v1, s2
; GFX9-NEXT: global_atomic_cmpswap v2, v[0:1], s[0:1]
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: buffer_wbinvl1_vol
; GFX9-NEXT: s_endpgm
entry:
%ptr = getelementptr i32, ptr addrspace(1) %out, i64 %index
%val = cmpxchg volatile ptr addrspace(1) %ptr, i32 %old, i32 %in syncscope("agent") seq_cst seq_cst
ret void
}
define amdgpu_kernel void @atomic_cmpxchg_i32_ret_addr64(ptr addrspace(1) %out, ptr addrspace(1) %out2, i32 %in, i64 %index, i32 %old) {
; SI-LABEL: atomic_cmpxchg_i32_ret_addr64:
; SI: ; %bb.0: ; %entry
; SI-NEXT: s_load_dwordx4 s[4:7], s[2:3], 0x9
; SI-NEXT: s_load_dword s10, s[2:3], 0xd
; SI-NEXT: s_load_dwordx2 s[8:9], s[2:3], 0xf
; SI-NEXT: s_load_dword s2, s[2:3], 0x11
; SI-NEXT: s_mov_b32 s3, 0xf000
; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: s_mov_b32 s0, s6
; SI-NEXT: s_mov_b32 s1, s7
; SI-NEXT: s_lshl_b64 s[8:9], s[8:9], 2
; SI-NEXT: s_mov_b32 s6, 0
; SI-NEXT: s_mov_b32 s7, s3
; SI-NEXT: v_mov_b32_e32 v0, s10
; SI-NEXT: v_mov_b32_e32 v1, s2
; SI-NEXT: v_mov_b32_e32 v2, s8
; SI-NEXT: v_mov_b32_e32 v3, s9
; SI-NEXT: buffer_atomic_cmpswap v[0:1], v[2:3], s[4:7], 0 addr64 glc
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: buffer_wbinvl1
; SI-NEXT: s_mov_b32 s2, -1
; SI-NEXT: buffer_store_dword v0, off, s[0:3], 0
; SI-NEXT: s_endpgm
;
; VI-LABEL: atomic_cmpxchg_i32_ret_addr64:
; VI: ; %bb.0: ; %entry
; VI-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x3c
; VI-NEXT: s_load_dword s8, s[2:3], 0x34
; VI-NEXT: s_load_dwordx4 s[4:7], s[2:3], 0x24
; VI-NEXT: s_load_dword s2, s[2:3], 0x44
; VI-NEXT: s_mov_b32 s3, 0xf000
; VI-NEXT: s_waitcnt lgkmcnt(0)
; VI-NEXT: s_lshl_b64 s[0:1], s[0:1], 2
; VI-NEXT: v_mov_b32_e32 v0, s8
; VI-NEXT: s_add_u32 s0, s4, s0
; VI-NEXT: s_addc_u32 s1, s5, s1
; VI-NEXT: v_mov_b32_e32 v3, s1
; VI-NEXT: v_mov_b32_e32 v1, s2
; VI-NEXT: v_mov_b32_e32 v2, s0
; VI-NEXT: flat_atomic_cmpswap v0, v[2:3], v[0:1] glc
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: buffer_wbinvl1_vol
; VI-NEXT: s_mov_b32 s2, -1
; VI-NEXT: s_mov_b32 s0, s6
; VI-NEXT: s_mov_b32 s1, s7
; VI-NEXT: buffer_store_dword v0, off, s[0:3], 0
; VI-NEXT: s_endpgm
;
; GFX9-LABEL: atomic_cmpxchg_i32_ret_addr64:
; GFX9: ; %bb.0: ; %entry
; GFX9-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x3c
; GFX9-NEXT: s_load_dwordx4 s[4:7], s[2:3], 0x24
; GFX9-NEXT: s_load_dword s8, s[2:3], 0x34
; GFX9-NEXT: s_load_dword s9, s[2:3], 0x44
; GFX9-NEXT: v_mov_b32_e32 v2, 0
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NEXT: s_lshl_b64 s[0:1], s[0:1], 2
; GFX9-NEXT: s_add_u32 s0, s4, s0
; GFX9-NEXT: s_addc_u32 s1, s5, s1
; GFX9-NEXT: v_mov_b32_e32 v0, s8
; GFX9-NEXT: v_mov_b32_e32 v1, s9
; GFX9-NEXT: global_atomic_cmpswap v0, v2, v[0:1], s[0:1] glc
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: buffer_wbinvl1_vol
; GFX9-NEXT: global_store_dword v2, v0, s[6:7]
; GFX9-NEXT: s_endpgm
entry:
%ptr = getelementptr i32, ptr addrspace(1) %out, i64 %index
%val = cmpxchg volatile ptr addrspace(1) %ptr, i32 %old, i32 %in syncscope("agent") seq_cst seq_cst
%extract0 = extractvalue { i32, i1 } %val, 0
store i32 %extract0, ptr addrspace(1) %out2
ret void
}
define amdgpu_kernel void @atomic_xor_i32_offset(ptr addrspace(1) %out, i32 %in) {
; SI-LABEL: atomic_xor_i32_offset:
; SI: ; %bb.0: ; %entry
; SI-NEXT: s_load_dword s4, s[2:3], 0xb
; SI-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x9
; SI-NEXT: s_mov_b32 s3, 0xf000
; SI-NEXT: s_mov_b32 s2, -1
; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: v_mov_b32_e32 v0, s4
; SI-NEXT: buffer_atomic_xor v0, off, s[0:3], 0 offset:16
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: buffer_wbinvl1
; SI-NEXT: s_endpgm
;
; VI-LABEL: atomic_xor_i32_offset:
; VI: ; %bb.0: ; %entry
; VI-NEXT: s_load_dword s4, s[2:3], 0x2c
; VI-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x24
; VI-NEXT: s_mov_b32 s3, 0xf000
; VI-NEXT: s_mov_b32 s2, -1
; VI-NEXT: s_waitcnt lgkmcnt(0)
; VI-NEXT: v_mov_b32_e32 v0, s4
; VI-NEXT: buffer_atomic_xor v0, off, s[0:3], 0 offset:16
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: buffer_wbinvl1_vol
; VI-NEXT: s_endpgm
;
; GFX9-LABEL: atomic_xor_i32_offset:
; GFX9: ; %bb.0: ; %entry
; GFX9-NEXT: s_load_dword s4, s[2:3], 0x2c
; GFX9-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x24
; GFX9-NEXT: v_mov_b32_e32 v0, 0
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NEXT: v_mov_b32_e32 v1, s4
; GFX9-NEXT: global_atomic_xor v0, v1, s[0:1] offset:16
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: buffer_wbinvl1_vol
; GFX9-NEXT: s_endpgm
entry:
%gep = getelementptr i32, ptr addrspace(1) %out, i64 4
%val = atomicrmw volatile xor ptr addrspace(1) %gep, i32 %in syncscope("agent") seq_cst
ret void
}
define amdgpu_kernel void @atomic_xor_i32_ret_offset(ptr addrspace(1) %out, ptr addrspace(1) %out2, i32 %in) {
; SI-LABEL: atomic_xor_i32_ret_offset:
; SI: ; %bb.0: ; %entry
; SI-NEXT: s_load_dwordx4 s[4:7], s[2:3], 0x9
; SI-NEXT: s_load_dword s8, s[2:3], 0xd
; SI-NEXT: s_mov_b32 s3, 0xf000
; SI-NEXT: s_mov_b32 s2, -1
; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: s_mov_b32 s0, s6
; SI-NEXT: s_mov_b32 s1, s7
; SI-NEXT: s_mov_b32 s6, s2
; SI-NEXT: s_mov_b32 s7, s3
; SI-NEXT: v_mov_b32_e32 v0, s8
; SI-NEXT: buffer_atomic_xor v0, off, s[4:7], 0 offset:16 glc
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: buffer_wbinvl1
; SI-NEXT: buffer_store_dword v0, off, s[0:3], 0
; SI-NEXT: s_endpgm
;
; VI-LABEL: atomic_xor_i32_ret_offset:
; VI: ; %bb.0: ; %entry
; VI-NEXT: s_load_dwordx4 s[4:7], s[2:3], 0x24
; VI-NEXT: s_load_dword s0, s[2:3], 0x34
; VI-NEXT: s_mov_b32 s11, 0xf000
; VI-NEXT: s_mov_b32 s10, -1
; VI-NEXT: s_waitcnt lgkmcnt(0)
; VI-NEXT: s_mov_b32 s8, s6
; VI-NEXT: s_mov_b32 s9, s7
; VI-NEXT: s_mov_b32 s6, s10
; VI-NEXT: s_mov_b32 s7, s11
; VI-NEXT: v_mov_b32_e32 v0, s0
; VI-NEXT: buffer_atomic_xor v0, off, s[4:7], 0 offset:16 glc
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: buffer_wbinvl1_vol
; VI-NEXT: buffer_store_dword v0, off, s[8:11], 0
; VI-NEXT: s_endpgm
;
; GFX9-LABEL: atomic_xor_i32_ret_offset:
; GFX9: ; %bb.0: ; %entry
; GFX9-NEXT: s_load_dword s0, s[2:3], 0x34
; GFX9-NEXT: s_load_dwordx4 s[4:7], s[2:3], 0x24
; GFX9-NEXT: v_mov_b32_e32 v0, 0
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NEXT: v_mov_b32_e32 v1, s0
; GFX9-NEXT: global_atomic_xor v1, v0, v1, s[4:5] offset:16 glc
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: buffer_wbinvl1_vol
; GFX9-NEXT: global_store_dword v0, v1, s[6:7]
; GFX9-NEXT: s_endpgm
entry:
%gep = getelementptr i32, ptr addrspace(1) %out, i64 4
%val = atomicrmw volatile xor ptr addrspace(1) %gep, i32 %in syncscope("agent") seq_cst
store i32 %val, ptr addrspace(1) %out2
ret void
}
define amdgpu_kernel void @atomic_xor_i32_addr64_offset(ptr addrspace(1) %out, i32 %in, i64 %index) {
; SI-LABEL: atomic_xor_i32_addr64_offset:
; SI: ; %bb.0: ; %entry
; SI-NEXT: s_load_dwordx2 s[4:5], s[2:3], 0xd
; SI-NEXT: s_load_dword s6, s[2:3], 0xb
; SI-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x9
; SI-NEXT: s_mov_b32 s3, 0xf000
; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: s_lshl_b64 s[4:5], s[4:5], 2
; SI-NEXT: s_mov_b32 s2, 0
; SI-NEXT: v_mov_b32_e32 v2, s6
; SI-NEXT: v_mov_b32_e32 v0, s4
; SI-NEXT: v_mov_b32_e32 v1, s5
; SI-NEXT: buffer_atomic_xor v2, v[0:1], s[0:3], 0 addr64 offset:16
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: buffer_wbinvl1
; SI-NEXT: s_endpgm
;
; VI-LABEL: atomic_xor_i32_addr64_offset:
; VI: ; %bb.0: ; %entry
; VI-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x34
; VI-NEXT: s_load_dwordx2 s[4:5], s[2:3], 0x24
; VI-NEXT: s_load_dword s2, s[2:3], 0x2c
; VI-NEXT: s_waitcnt lgkmcnt(0)
; VI-NEXT: s_lshl_b64 s[0:1], s[0:1], 2
; VI-NEXT: s_add_u32 s0, s4, s0
; VI-NEXT: s_addc_u32 s1, s5, s1
; VI-NEXT: s_add_u32 s0, s0, 16
; VI-NEXT: s_addc_u32 s1, s1, 0
; VI-NEXT: v_mov_b32_e32 v0, s0
; VI-NEXT: v_mov_b32_e32 v1, s1
; VI-NEXT: v_mov_b32_e32 v2, s2
; VI-NEXT: flat_atomic_xor v[0:1], v2
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: buffer_wbinvl1_vol
; VI-NEXT: s_endpgm
;
; GFX9-LABEL: atomic_xor_i32_addr64_offset:
; GFX9: ; %bb.0: ; %entry
; GFX9-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x34
; GFX9-NEXT: s_load_dwordx2 s[4:5], s[2:3], 0x24
; GFX9-NEXT: s_load_dword s6, s[2:3], 0x2c
; GFX9-NEXT: v_mov_b32_e32 v0, 0
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NEXT: s_lshl_b64 s[0:1], s[0:1], 2
; GFX9-NEXT: s_add_u32 s0, s4, s0
; GFX9-NEXT: s_addc_u32 s1, s5, s1
; GFX9-NEXT: v_mov_b32_e32 v1, s6
; GFX9-NEXT: global_atomic_xor v0, v1, s[0:1] offset:16
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: buffer_wbinvl1_vol
; GFX9-NEXT: s_endpgm
entry:
%ptr = getelementptr i32, ptr addrspace(1) %out, i64 %index
%gep = getelementptr i32, ptr addrspace(1) %ptr, i64 4
%val = atomicrmw volatile xor ptr addrspace(1) %gep, i32 %in syncscope("agent") seq_cst
ret void
}
define amdgpu_kernel void @atomic_xor_i32_ret_addr64_offset(ptr addrspace(1) %out, ptr addrspace(1) %out2, i32 %in, i64 %index) {
; SI-LABEL: atomic_xor_i32_ret_addr64_offset:
; SI: ; %bb.0: ; %entry
; SI-NEXT: s_load_dwordx4 s[4:7], s[2:3], 0x9
; SI-NEXT: s_load_dwordx2 s[8:9], s[2:3], 0xf
; SI-NEXT: s_load_dword s2, s[2:3], 0xd
; SI-NEXT: s_mov_b32 s3, 0xf000
; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: s_mov_b32 s0, s6
; SI-NEXT: s_mov_b32 s1, s7
; SI-NEXT: s_lshl_b64 s[8:9], s[8:9], 2
; SI-NEXT: s_mov_b32 s6, 0
; SI-NEXT: s_mov_b32 s7, s3
; SI-NEXT: v_mov_b32_e32 v2, s2
; SI-NEXT: v_mov_b32_e32 v0, s8
; SI-NEXT: v_mov_b32_e32 v1, s9
; SI-NEXT: buffer_atomic_xor v2, v[0:1], s[4:7], 0 addr64 offset:16 glc
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: buffer_wbinvl1
; SI-NEXT: s_mov_b32 s2, -1
; SI-NEXT: buffer_store_dword v2, off, s[0:3], 0
; SI-NEXT: s_endpgm
;
; VI-LABEL: atomic_xor_i32_ret_addr64_offset:
; VI: ; %bb.0: ; %entry
; VI-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x3c
; VI-NEXT: s_load_dwordx4 s[4:7], s[2:3], 0x24
; VI-NEXT: s_load_dword s2, s[2:3], 0x34
; VI-NEXT: s_mov_b32 s3, 0xf000
; VI-NEXT: s_waitcnt lgkmcnt(0)
; VI-NEXT: s_lshl_b64 s[0:1], s[0:1], 2
; VI-NEXT: s_add_u32 s0, s4, s0
; VI-NEXT: s_addc_u32 s1, s5, s1
; VI-NEXT: s_add_u32 s0, s0, 16
; VI-NEXT: s_addc_u32 s1, s1, 0
; VI-NEXT: v_mov_b32_e32 v0, s0
; VI-NEXT: v_mov_b32_e32 v1, s1
; VI-NEXT: v_mov_b32_e32 v2, s2
; VI-NEXT: flat_atomic_xor v0, v[0:1], v2 glc
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: buffer_wbinvl1_vol
; VI-NEXT: s_mov_b32 s2, -1
; VI-NEXT: s_mov_b32 s0, s6
; VI-NEXT: s_mov_b32 s1, s7
; VI-NEXT: buffer_store_dword v0, off, s[0:3], 0
; VI-NEXT: s_endpgm
;
; GFX9-LABEL: atomic_xor_i32_ret_addr64_offset:
; GFX9: ; %bb.0: ; %entry
; GFX9-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x3c
; GFX9-NEXT: s_load_dwordx4 s[4:7], s[2:3], 0x24
; GFX9-NEXT: s_load_dword s8, s[2:3], 0x34
; GFX9-NEXT: v_mov_b32_e32 v0, 0
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NEXT: s_lshl_b64 s[0:1], s[0:1], 2
; GFX9-NEXT: s_add_u32 s0, s4, s0
; GFX9-NEXT: s_addc_u32 s1, s5, s1
; GFX9-NEXT: v_mov_b32_e32 v1, s8
; GFX9-NEXT: global_atomic_xor v1, v0, v1, s[0:1] offset:16 glc
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: buffer_wbinvl1_vol
; GFX9-NEXT: global_store_dword v0, v1, s[6:7]
; GFX9-NEXT: s_endpgm
entry:
%ptr = getelementptr i32, ptr addrspace(1) %out, i64 %index
%gep = getelementptr i32, ptr addrspace(1) %ptr, i64 4
%val = atomicrmw volatile xor ptr addrspace(1) %gep, i32 %in syncscope("agent") seq_cst
store i32 %val, ptr addrspace(1) %out2
ret void
}
define amdgpu_kernel void @atomic_xor_i32(ptr addrspace(1) %out, i32 %in) {
; SI-LABEL: atomic_xor_i32:
; SI: ; %bb.0: ; %entry
; SI-NEXT: s_load_dword s4, s[2:3], 0xb
; SI-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x9
; SI-NEXT: s_mov_b32 s3, 0xf000
; SI-NEXT: s_mov_b32 s2, -1
; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: v_mov_b32_e32 v0, s4
; SI-NEXT: buffer_atomic_xor v0, off, s[0:3], 0
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: buffer_wbinvl1
; SI-NEXT: s_endpgm
;
; VI-LABEL: atomic_xor_i32:
; VI: ; %bb.0: ; %entry
; VI-NEXT: s_load_dword s4, s[2:3], 0x2c
; VI-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x24
; VI-NEXT: s_mov_b32 s3, 0xf000
; VI-NEXT: s_mov_b32 s2, -1
; VI-NEXT: s_waitcnt lgkmcnt(0)
; VI-NEXT: v_mov_b32_e32 v0, s4
; VI-NEXT: buffer_atomic_xor v0, off, s[0:3], 0
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: buffer_wbinvl1_vol
; VI-NEXT: s_endpgm
;
; GFX9-LABEL: atomic_xor_i32:
; GFX9: ; %bb.0: ; %entry
; GFX9-NEXT: s_load_dword s4, s[2:3], 0x2c
; GFX9-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x24
; GFX9-NEXT: v_mov_b32_e32 v0, 0
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NEXT: v_mov_b32_e32 v1, s4
; GFX9-NEXT: global_atomic_xor v0, v1, s[0:1]
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: buffer_wbinvl1_vol
; GFX9-NEXT: s_endpgm
entry:
%val = atomicrmw volatile xor ptr addrspace(1) %out, i32 %in syncscope("agent") seq_cst
ret void
}
define amdgpu_kernel void @atomic_xor_i32_ret(ptr addrspace(1) %out, ptr addrspace(1) %out2, i32 %in) {
; SI-LABEL: atomic_xor_i32_ret:
; SI: ; %bb.0: ; %entry
; SI-NEXT: s_load_dwordx4 s[4:7], s[2:3], 0x9
; SI-NEXT: s_load_dword s8, s[2:3], 0xd
; SI-NEXT: s_mov_b32 s3, 0xf000
; SI-NEXT: s_mov_b32 s2, -1
; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: s_mov_b32 s0, s4
; SI-NEXT: s_mov_b32 s1, s5
; SI-NEXT: v_mov_b32_e32 v0, s8
; SI-NEXT: buffer_atomic_xor v0, off, s[0:3], 0 glc
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: buffer_wbinvl1
; SI-NEXT: s_mov_b32 s0, s6
; SI-NEXT: s_mov_b32 s1, s7
; SI-NEXT: buffer_store_dword v0, off, s[0:3], 0
; SI-NEXT: s_endpgm
;
; VI-LABEL: atomic_xor_i32_ret:
; VI: ; %bb.0: ; %entry
; VI-NEXT: s_load_dwordx4 s[4:7], s[2:3], 0x24
; VI-NEXT: s_load_dword s8, s[2:3], 0x34
; VI-NEXT: s_mov_b32 s3, 0xf000
; VI-NEXT: s_mov_b32 s2, -1
; VI-NEXT: s_waitcnt lgkmcnt(0)
; VI-NEXT: s_mov_b32 s0, s4
; VI-NEXT: s_mov_b32 s1, s5
; VI-NEXT: v_mov_b32_e32 v0, s8
; VI-NEXT: buffer_atomic_xor v0, off, s[0:3], 0 glc
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: buffer_wbinvl1_vol
; VI-NEXT: s_mov_b32 s0, s6
; VI-NEXT: s_mov_b32 s1, s7
; VI-NEXT: buffer_store_dword v0, off, s[0:3], 0
; VI-NEXT: s_endpgm
;
; GFX9-LABEL: atomic_xor_i32_ret:
; GFX9: ; %bb.0: ; %entry
; GFX9-NEXT: s_load_dword s0, s[2:3], 0x34
; GFX9-NEXT: s_load_dwordx4 s[4:7], s[2:3], 0x24
; GFX9-NEXT: v_mov_b32_e32 v0, 0
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NEXT: v_mov_b32_e32 v1, s0
; GFX9-NEXT: global_atomic_xor v1, v0, v1, s[4:5] glc
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: buffer_wbinvl1_vol
; GFX9-NEXT: global_store_dword v0, v1, s[6:7]
; GFX9-NEXT: s_endpgm
entry:
%val = atomicrmw volatile xor ptr addrspace(1) %out, i32 %in syncscope("agent") seq_cst
store i32 %val, ptr addrspace(1) %out2
ret void
}
define amdgpu_kernel void @atomic_xor_i32_addr64(ptr addrspace(1) %out, i32 %in, i64 %index) {
; SI-LABEL: atomic_xor_i32_addr64:
; SI: ; %bb.0: ; %entry
; SI-NEXT: s_load_dwordx2 s[4:5], s[2:3], 0xd
; SI-NEXT: s_load_dword s6, s[2:3], 0xb
; SI-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x9
; SI-NEXT: s_mov_b32 s3, 0xf000
; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: s_lshl_b64 s[4:5], s[4:5], 2
; SI-NEXT: s_mov_b32 s2, 0
; SI-NEXT: v_mov_b32_e32 v2, s6
; SI-NEXT: v_mov_b32_e32 v0, s4
; SI-NEXT: v_mov_b32_e32 v1, s5
; SI-NEXT: buffer_atomic_xor v2, v[0:1], s[0:3], 0 addr64
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: buffer_wbinvl1
; SI-NEXT: s_endpgm
;
; VI-LABEL: atomic_xor_i32_addr64:
; VI: ; %bb.0: ; %entry
; VI-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x34
; VI-NEXT: s_load_dwordx2 s[4:5], s[2:3], 0x24
; VI-NEXT: s_load_dword s2, s[2:3], 0x2c
; VI-NEXT: s_waitcnt lgkmcnt(0)
; VI-NEXT: s_lshl_b64 s[0:1], s[0:1], 2
; VI-NEXT: s_add_u32 s0, s4, s0
; VI-NEXT: s_addc_u32 s1, s5, s1
; VI-NEXT: v_mov_b32_e32 v0, s0
; VI-NEXT: v_mov_b32_e32 v1, s1
; VI-NEXT: v_mov_b32_e32 v2, s2
; VI-NEXT: flat_atomic_xor v[0:1], v2
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: buffer_wbinvl1_vol
; VI-NEXT: s_endpgm
;
; GFX9-LABEL: atomic_xor_i32_addr64:
; GFX9: ; %bb.0: ; %entry
; GFX9-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x34
; GFX9-NEXT: s_load_dwordx2 s[4:5], s[2:3], 0x24
; GFX9-NEXT: s_load_dword s6, s[2:3], 0x2c
; GFX9-NEXT: v_mov_b32_e32 v0, 0
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NEXT: s_lshl_b64 s[0:1], s[0:1], 2
; GFX9-NEXT: s_add_u32 s0, s4, s0
; GFX9-NEXT: s_addc_u32 s1, s5, s1
; GFX9-NEXT: v_mov_b32_e32 v1, s6
; GFX9-NEXT: global_atomic_xor v0, v1, s[0:1]
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: buffer_wbinvl1_vol
; GFX9-NEXT: s_endpgm
entry:
%ptr = getelementptr i32, ptr addrspace(1) %out, i64 %index
%val = atomicrmw volatile xor ptr addrspace(1) %ptr, i32 %in syncscope("agent") seq_cst
ret void
}
define amdgpu_kernel void @atomic_xor_i32_ret_addr64(ptr addrspace(1) %out, ptr addrspace(1) %out2, i32 %in, i64 %index) {
; SI-LABEL: atomic_xor_i32_ret_addr64:
; SI: ; %bb.0: ; %entry
; SI-NEXT: s_load_dwordx4 s[4:7], s[2:3], 0x9
; SI-NEXT: s_load_dwordx2 s[8:9], s[2:3], 0xf
; SI-NEXT: s_load_dword s2, s[2:3], 0xd
; SI-NEXT: s_mov_b32 s3, 0xf000
; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: s_mov_b32 s0, s6
; SI-NEXT: s_mov_b32 s1, s7
; SI-NEXT: s_lshl_b64 s[8:9], s[8:9], 2
; SI-NEXT: s_mov_b32 s6, 0
; SI-NEXT: s_mov_b32 s7, s3
; SI-NEXT: v_mov_b32_e32 v2, s2
; SI-NEXT: v_mov_b32_e32 v0, s8
; SI-NEXT: v_mov_b32_e32 v1, s9
; SI-NEXT: buffer_atomic_xor v2, v[0:1], s[4:7], 0 addr64 glc
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: buffer_wbinvl1
; SI-NEXT: s_mov_b32 s2, -1
; SI-NEXT: buffer_store_dword v2, off, s[0:3], 0
; SI-NEXT: s_endpgm
;
; VI-LABEL: atomic_xor_i32_ret_addr64:
; VI: ; %bb.0: ; %entry
; VI-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x3c
; VI-NEXT: s_load_dwordx4 s[4:7], s[2:3], 0x24
; VI-NEXT: s_load_dword s2, s[2:3], 0x34
; VI-NEXT: s_mov_b32 s3, 0xf000
; VI-NEXT: s_waitcnt lgkmcnt(0)
; VI-NEXT: s_lshl_b64 s[0:1], s[0:1], 2
; VI-NEXT: s_add_u32 s0, s4, s0
; VI-NEXT: s_addc_u32 s1, s5, s1
; VI-NEXT: v_mov_b32_e32 v0, s0
; VI-NEXT: v_mov_b32_e32 v1, s1
; VI-NEXT: v_mov_b32_e32 v2, s2
; VI-NEXT: flat_atomic_xor v0, v[0:1], v2 glc
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: buffer_wbinvl1_vol
; VI-NEXT: s_mov_b32 s2, -1
; VI-NEXT: s_mov_b32 s0, s6
; VI-NEXT: s_mov_b32 s1, s7
; VI-NEXT: buffer_store_dword v0, off, s[0:3], 0
; VI-NEXT: s_endpgm
;
; GFX9-LABEL: atomic_xor_i32_ret_addr64:
; GFX9: ; %bb.0: ; %entry
; GFX9-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x3c
; GFX9-NEXT: s_load_dwordx4 s[4:7], s[2:3], 0x24
; GFX9-NEXT: s_load_dword s8, s[2:3], 0x34
; GFX9-NEXT: v_mov_b32_e32 v0, 0
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NEXT: s_lshl_b64 s[0:1], s[0:1], 2
; GFX9-NEXT: s_add_u32 s0, s4, s0
; GFX9-NEXT: s_addc_u32 s1, s5, s1
; GFX9-NEXT: v_mov_b32_e32 v1, s8
; GFX9-NEXT: global_atomic_xor v1, v0, v1, s[0:1] glc
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: buffer_wbinvl1_vol
; GFX9-NEXT: global_store_dword v0, v1, s[6:7]
; GFX9-NEXT: s_endpgm
entry:
%ptr = getelementptr i32, ptr addrspace(1) %out, i64 %index
%val = atomicrmw volatile xor ptr addrspace(1) %ptr, i32 %in syncscope("agent") seq_cst
store i32 %val, ptr addrspace(1) %out2
ret void
}
define amdgpu_kernel void @atomic_load_i32_offset(ptr addrspace(1) %in, ptr addrspace(1) %out) {
; SI-LABEL: atomic_load_i32_offset:
; SI: ; %bb.0: ; %entry
; SI-NEXT: s_load_dwordx4 s[0:3], s[2:3], 0x9
; SI-NEXT: s_mov_b32 s7, 0xf000
; SI-NEXT: s_mov_b32 s6, -1
; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: s_mov_b32 s4, s2
; SI-NEXT: s_mov_b32 s5, s3
; SI-NEXT: s_mov_b32 s2, s6
; SI-NEXT: s_mov_b32 s3, s7
; SI-NEXT: buffer_load_dword v0, off, s[0:3], 0 offset:16 glc
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: buffer_wbinvl1
; SI-NEXT: buffer_store_dword v0, off, s[4:7], 0
; SI-NEXT: s_endpgm
;
; VI-LABEL: atomic_load_i32_offset:
; VI: ; %bb.0: ; %entry
; VI-NEXT: s_load_dwordx4 s[0:3], s[2:3], 0x24
; VI-NEXT: s_mov_b32 s7, 0xf000
; VI-NEXT: s_mov_b32 s6, -1
; VI-NEXT: s_waitcnt lgkmcnt(0)
; VI-NEXT: s_add_u32 s0, s0, 16
; VI-NEXT: s_addc_u32 s1, s1, 0
; VI-NEXT: v_mov_b32_e32 v0, s0
; VI-NEXT: v_mov_b32_e32 v1, s1
; VI-NEXT: flat_load_dword v0, v[0:1] glc
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: buffer_wbinvl1_vol
; VI-NEXT: s_mov_b32 s4, s2
; VI-NEXT: s_mov_b32 s5, s3
; VI-NEXT: buffer_store_dword v0, off, s[4:7], 0
; VI-NEXT: s_endpgm
;
; GFX9-LABEL: atomic_load_i32_offset:
; GFX9: ; %bb.0: ; %entry
; GFX9-NEXT: s_load_dwordx4 s[4:7], s[2:3], 0x24
; GFX9-NEXT: v_mov_b32_e32 v0, 0
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NEXT: global_load_dword v1, v0, s[4:5] offset:16 glc
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: buffer_wbinvl1_vol
; GFX9-NEXT: global_store_dword v0, v1, s[6:7]
; GFX9-NEXT: s_endpgm
entry:
%gep = getelementptr i32, ptr addrspace(1) %in, i64 4
%val = load atomic i32, ptr addrspace(1) %gep seq_cst, align 4
store i32 %val, ptr addrspace(1) %out
ret void
}
define amdgpu_kernel void @atomic_load_i32_negoffset(ptr addrspace(1) %in, ptr addrspace(1) %out) {
; SI-LABEL: atomic_load_i32_negoffset:
; SI: ; %bb.0: ; %entry
; SI-NEXT: s_load_dwordx4 s[0:3], s[2:3], 0x9
; SI-NEXT: s_mov_b32 s7, 0xf000
; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: s_mov_b32 s4, s2
; SI-NEXT: s_mov_b32 s5, s3
; SI-NEXT: s_mov_b32 s2, 0
; SI-NEXT: s_mov_b32 s3, s7
; SI-NEXT: v_mov_b32_e32 v0, 0xfffffe00
; SI-NEXT: v_mov_b32_e32 v1, -1
; SI-NEXT: buffer_load_dword v0, v[0:1], s[0:3], 0 addr64 glc
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: buffer_wbinvl1
; SI-NEXT: s_mov_b32 s6, -1
; SI-NEXT: buffer_store_dword v0, off, s[4:7], 0
; SI-NEXT: s_endpgm
;
; VI-LABEL: atomic_load_i32_negoffset:
; VI: ; %bb.0: ; %entry
; VI-NEXT: s_load_dwordx4 s[0:3], s[2:3], 0x24
; VI-NEXT: s_mov_b32 s7, 0xf000
; VI-NEXT: s_mov_b32 s6, -1
; VI-NEXT: s_waitcnt lgkmcnt(0)
; VI-NEXT: s_add_u32 s0, s0, 0xfffffe00
; VI-NEXT: s_addc_u32 s1, s1, -1
; VI-NEXT: v_mov_b32_e32 v0, s0
; VI-NEXT: v_mov_b32_e32 v1, s1
; VI-NEXT: flat_load_dword v0, v[0:1] glc
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: buffer_wbinvl1_vol
; VI-NEXT: s_mov_b32 s4, s2
; VI-NEXT: s_mov_b32 s5, s3
; VI-NEXT: buffer_store_dword v0, off, s[4:7], 0
; VI-NEXT: s_endpgm
;
; GFX9-LABEL: atomic_load_i32_negoffset:
; GFX9: ; %bb.0: ; %entry
; GFX9-NEXT: s_load_dwordx4 s[4:7], s[2:3], 0x24
; GFX9-NEXT: v_mov_b32_e32 v0, 0
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NEXT: global_load_dword v1, v0, s[4:5] offset:-512 glc
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: buffer_wbinvl1_vol
; GFX9-NEXT: global_store_dword v0, v1, s[6:7]
; GFX9-NEXT: s_endpgm
entry:
%gep = getelementptr i32, ptr addrspace(1) %in, i64 -128
%val = load atomic i32, ptr addrspace(1) %gep seq_cst, align 4
store i32 %val, ptr addrspace(1) %out
ret void
}
define amdgpu_kernel void @atomic_load_f32_offset(ptr addrspace(1) %in, ptr addrspace(1) %out) {
; SI-LABEL: atomic_load_f32_offset:
; SI: ; %bb.0: ; %entry
; SI-NEXT: s_load_dwordx4 s[0:3], s[2:3], 0x9
; SI-NEXT: s_mov_b32 s7, 0xf000
; SI-NEXT: s_mov_b32 s6, -1
; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: s_mov_b32 s4, s2
; SI-NEXT: s_mov_b32 s5, s3
; SI-NEXT: s_mov_b32 s2, s6
; SI-NEXT: s_mov_b32 s3, s7
; SI-NEXT: buffer_load_dword v0, off, s[0:3], 0 offset:16 glc
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: buffer_wbinvl1
; SI-NEXT: buffer_store_dword v0, off, s[4:7], 0
; SI-NEXT: s_endpgm
;
; VI-LABEL: atomic_load_f32_offset:
; VI: ; %bb.0: ; %entry
; VI-NEXT: s_load_dwordx4 s[0:3], s[2:3], 0x24
; VI-NEXT: s_mov_b32 s7, 0xf000
; VI-NEXT: s_mov_b32 s6, -1
; VI-NEXT: s_waitcnt lgkmcnt(0)
; VI-NEXT: s_add_u32 s0, s0, 16
; VI-NEXT: s_addc_u32 s1, s1, 0
; VI-NEXT: v_mov_b32_e32 v0, s0
; VI-NEXT: v_mov_b32_e32 v1, s1
; VI-NEXT: flat_load_dword v0, v[0:1] glc
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: buffer_wbinvl1_vol
; VI-NEXT: s_mov_b32 s4, s2
; VI-NEXT: s_mov_b32 s5, s3
; VI-NEXT: buffer_store_dword v0, off, s[4:7], 0
; VI-NEXT: s_endpgm
;
; GFX9-LABEL: atomic_load_f32_offset:
; GFX9: ; %bb.0: ; %entry
; GFX9-NEXT: s_load_dwordx4 s[4:7], s[2:3], 0x24
; GFX9-NEXT: v_mov_b32_e32 v0, 0
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NEXT: global_load_dword v1, v0, s[4:5] offset:16 glc
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: buffer_wbinvl1_vol
; GFX9-NEXT: global_store_dword v0, v1, s[6:7]
; GFX9-NEXT: s_endpgm
entry:
%gep = getelementptr float, ptr addrspace(1) %in, i64 4
%val = load atomic float, ptr addrspace(1) %gep seq_cst, align 4
store float %val, ptr addrspace(1) %out
ret void
}
define amdgpu_kernel void @atomic_load_i32(ptr addrspace(1) %in, ptr addrspace(1) %out) {
; SI-LABEL: atomic_load_i32:
; SI: ; %bb.0: ; %entry
; SI-NEXT: s_load_dwordx4 s[0:3], s[2:3], 0x9
; SI-NEXT: s_mov_b32 s7, 0xf000
; SI-NEXT: s_mov_b32 s6, -1
; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: s_mov_b32 s4, s0
; SI-NEXT: s_mov_b32 s5, s1
; SI-NEXT: buffer_load_dword v0, off, s[4:7], 0 glc
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: buffer_wbinvl1
; SI-NEXT: s_mov_b32 s4, s2
; SI-NEXT: s_mov_b32 s5, s3
; SI-NEXT: buffer_store_dword v0, off, s[4:7], 0
; SI-NEXT: s_endpgm
;
; VI-LABEL: atomic_load_i32:
; VI: ; %bb.0: ; %entry
; VI-NEXT: s_load_dwordx4 s[0:3], s[2:3], 0x24
; VI-NEXT: s_mov_b32 s7, 0xf000
; VI-NEXT: s_mov_b32 s6, -1
; VI-NEXT: s_waitcnt lgkmcnt(0)
; VI-NEXT: v_mov_b32_e32 v0, s0
; VI-NEXT: v_mov_b32_e32 v1, s1
; VI-NEXT: flat_load_dword v0, v[0:1] glc
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: buffer_wbinvl1_vol
; VI-NEXT: s_mov_b32 s4, s2
; VI-NEXT: s_mov_b32 s5, s3
; VI-NEXT: buffer_store_dword v0, off, s[4:7], 0
; VI-NEXT: s_endpgm
;
; GFX9-LABEL: atomic_load_i32:
; GFX9: ; %bb.0: ; %entry
; GFX9-NEXT: s_load_dwordx4 s[4:7], s[2:3], 0x24
; GFX9-NEXT: v_mov_b32_e32 v0, 0
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NEXT: global_load_dword v1, v0, s[4:5] glc
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: buffer_wbinvl1_vol
; GFX9-NEXT: global_store_dword v0, v1, s[6:7]
; GFX9-NEXT: s_endpgm
entry:
%val = load atomic i32, ptr addrspace(1) %in syncscope("agent") seq_cst, align 4
store i32 %val, ptr addrspace(1) %out
ret void
}
define amdgpu_kernel void @atomic_load_i32_addr64_offset(ptr addrspace(1) %in, ptr addrspace(1) %out, i64 %index) {
; SI-LABEL: atomic_load_i32_addr64_offset:
; SI: ; %bb.0: ; %entry
; SI-NEXT: s_load_dwordx4 s[4:7], s[2:3], 0x9
; SI-NEXT: s_load_dwordx2 s[8:9], s[2:3], 0xd
; SI-NEXT: s_mov_b32 s3, 0xf000
; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: s_mov_b32 s0, s6
; SI-NEXT: s_mov_b32 s1, s7
; SI-NEXT: s_lshl_b64 s[8:9], s[8:9], 2
; SI-NEXT: s_mov_b32 s6, 0
; SI-NEXT: s_mov_b32 s7, s3
; SI-NEXT: v_mov_b32_e32 v0, s8
; SI-NEXT: v_mov_b32_e32 v1, s9
; SI-NEXT: buffer_load_dword v0, v[0:1], s[4:7], 0 addr64 offset:16 glc
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: buffer_wbinvl1
; SI-NEXT: s_mov_b32 s2, -1
; SI-NEXT: buffer_store_dword v0, off, s[0:3], 0
; SI-NEXT: s_endpgm
;
; VI-LABEL: atomic_load_i32_addr64_offset:
; VI: ; %bb.0: ; %entry
; VI-NEXT: s_load_dwordx2 s[4:5], s[2:3], 0x34
; VI-NEXT: s_load_dwordx4 s[0:3], s[2:3], 0x24
; VI-NEXT: s_mov_b32 s7, 0xf000
; VI-NEXT: s_mov_b32 s6, -1
; VI-NEXT: s_waitcnt lgkmcnt(0)
; VI-NEXT: s_lshl_b64 s[4:5], s[4:5], 2
; VI-NEXT: s_add_u32 s0, s0, s4
; VI-NEXT: s_addc_u32 s1, s1, s5
; VI-NEXT: s_add_u32 s0, s0, 16
; VI-NEXT: s_addc_u32 s1, s1, 0
; VI-NEXT: v_mov_b32_e32 v0, s0
; VI-NEXT: v_mov_b32_e32 v1, s1
; VI-NEXT: flat_load_dword v0, v[0:1] glc
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: buffer_wbinvl1_vol
; VI-NEXT: s_mov_b32 s4, s2
; VI-NEXT: s_mov_b32 s5, s3
; VI-NEXT: buffer_store_dword v0, off, s[4:7], 0
; VI-NEXT: s_endpgm
;
; GFX9-LABEL: atomic_load_i32_addr64_offset:
; GFX9: ; %bb.0: ; %entry
; GFX9-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x34
; GFX9-NEXT: s_load_dwordx4 s[4:7], s[2:3], 0x24
; GFX9-NEXT: v_mov_b32_e32 v0, 0
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NEXT: s_lshl_b64 s[0:1], s[0:1], 2
; GFX9-NEXT: s_add_u32 s0, s4, s0
; GFX9-NEXT: s_addc_u32 s1, s5, s1
; GFX9-NEXT: global_load_dword v1, v0, s[0:1] offset:16 glc
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: buffer_wbinvl1_vol
; GFX9-NEXT: global_store_dword v0, v1, s[6:7]
; GFX9-NEXT: s_endpgm
entry:
%ptr = getelementptr i32, ptr addrspace(1) %in, i64 %index
%gep = getelementptr i32, ptr addrspace(1) %ptr, i64 4
%val = load atomic i32, ptr addrspace(1) %gep seq_cst, align 4
store i32 %val, ptr addrspace(1) %out
ret void
}
define amdgpu_kernel void @atomic_load_i32_addr64(ptr addrspace(1) %in, ptr addrspace(1) %out, i64 %index) {
; SI-LABEL: atomic_load_i32_addr64:
; SI: ; %bb.0: ; %entry
; SI-NEXT: s_load_dwordx4 s[4:7], s[2:3], 0x9
; SI-NEXT: s_load_dwordx2 s[8:9], s[2:3], 0xd
; SI-NEXT: s_mov_b32 s3, 0xf000
; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: s_mov_b32 s0, s6
; SI-NEXT: s_mov_b32 s1, s7
; SI-NEXT: s_lshl_b64 s[8:9], s[8:9], 2
; SI-NEXT: s_mov_b32 s6, 0
; SI-NEXT: s_mov_b32 s7, s3
; SI-NEXT: v_mov_b32_e32 v0, s8
; SI-NEXT: v_mov_b32_e32 v1, s9
; SI-NEXT: buffer_load_dword v0, v[0:1], s[4:7], 0 addr64 glc
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: buffer_wbinvl1
; SI-NEXT: s_mov_b32 s2, -1
; SI-NEXT: buffer_store_dword v0, off, s[0:3], 0
; SI-NEXT: s_endpgm
;
; VI-LABEL: atomic_load_i32_addr64:
; VI: ; %bb.0: ; %entry
; VI-NEXT: s_load_dwordx2 s[4:5], s[2:3], 0x34
; VI-NEXT: s_load_dwordx4 s[0:3], s[2:3], 0x24
; VI-NEXT: s_mov_b32 s7, 0xf000
; VI-NEXT: s_mov_b32 s6, -1
; VI-NEXT: s_waitcnt lgkmcnt(0)
; VI-NEXT: s_lshl_b64 s[4:5], s[4:5], 2
; VI-NEXT: s_add_u32 s0, s0, s4
; VI-NEXT: s_addc_u32 s1, s1, s5
; VI-NEXT: v_mov_b32_e32 v0, s0
; VI-NEXT: v_mov_b32_e32 v1, s1
; VI-NEXT: flat_load_dword v0, v[0:1] glc
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: buffer_wbinvl1_vol
; VI-NEXT: s_mov_b32 s4, s2
; VI-NEXT: s_mov_b32 s5, s3
; VI-NEXT: buffer_store_dword v0, off, s[4:7], 0
; VI-NEXT: s_endpgm
;
; GFX9-LABEL: atomic_load_i32_addr64:
; GFX9: ; %bb.0: ; %entry
; GFX9-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x34
; GFX9-NEXT: s_load_dwordx4 s[4:7], s[2:3], 0x24
; GFX9-NEXT: v_mov_b32_e32 v0, 0
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NEXT: s_lshl_b64 s[0:1], s[0:1], 2
; GFX9-NEXT: s_add_u32 s0, s4, s0
; GFX9-NEXT: s_addc_u32 s1, s5, s1
; GFX9-NEXT: global_load_dword v1, v0, s[0:1] glc
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: buffer_wbinvl1_vol
; GFX9-NEXT: global_store_dword v0, v1, s[6:7]
; GFX9-NEXT: s_endpgm
entry:
%ptr = getelementptr i32, ptr addrspace(1) %in, i64 %index
%val = load atomic i32, ptr addrspace(1) %ptr seq_cst, align 4
store i32 %val, ptr addrspace(1) %out
ret void
}
define amdgpu_kernel void @atomic_load_f32_addr64_offset(ptr addrspace(1) %in, ptr addrspace(1) %out, i64 %index) {
; SI-LABEL: atomic_load_f32_addr64_offset:
; SI: ; %bb.0: ; %entry
; SI-NEXT: s_load_dwordx4 s[4:7], s[2:3], 0x9
; SI-NEXT: s_load_dwordx2 s[8:9], s[2:3], 0xd
; SI-NEXT: s_mov_b32 s3, 0xf000
; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: s_mov_b32 s0, s6
; SI-NEXT: s_mov_b32 s1, s7
; SI-NEXT: s_lshl_b64 s[8:9], s[8:9], 2
; SI-NEXT: s_mov_b32 s6, 0
; SI-NEXT: s_mov_b32 s7, s3
; SI-NEXT: v_mov_b32_e32 v0, s8
; SI-NEXT: v_mov_b32_e32 v1, s9
; SI-NEXT: buffer_load_dword v0, v[0:1], s[4:7], 0 addr64 offset:16 glc
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: buffer_wbinvl1
; SI-NEXT: s_mov_b32 s2, -1
; SI-NEXT: buffer_store_dword v0, off, s[0:3], 0
; SI-NEXT: s_endpgm
;
; VI-LABEL: atomic_load_f32_addr64_offset:
; VI: ; %bb.0: ; %entry
; VI-NEXT: s_load_dwordx2 s[4:5], s[2:3], 0x34
; VI-NEXT: s_load_dwordx4 s[0:3], s[2:3], 0x24
; VI-NEXT: s_mov_b32 s7, 0xf000
; VI-NEXT: s_mov_b32 s6, -1
; VI-NEXT: s_waitcnt lgkmcnt(0)
; VI-NEXT: s_lshl_b64 s[4:5], s[4:5], 2
; VI-NEXT: s_add_u32 s0, s0, s4
; VI-NEXT: s_addc_u32 s1, s1, s5
; VI-NEXT: s_add_u32 s0, s0, 16
; VI-NEXT: s_addc_u32 s1, s1, 0
; VI-NEXT: v_mov_b32_e32 v0, s0
; VI-NEXT: v_mov_b32_e32 v1, s1
; VI-NEXT: flat_load_dword v0, v[0:1] glc
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: buffer_wbinvl1_vol
; VI-NEXT: s_mov_b32 s4, s2
; VI-NEXT: s_mov_b32 s5, s3
; VI-NEXT: buffer_store_dword v0, off, s[4:7], 0
; VI-NEXT: s_endpgm
;
; GFX9-LABEL: atomic_load_f32_addr64_offset:
; GFX9: ; %bb.0: ; %entry
; GFX9-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x34
; GFX9-NEXT: s_load_dwordx4 s[4:7], s[2:3], 0x24
; GFX9-NEXT: v_mov_b32_e32 v0, 0
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NEXT: s_lshl_b64 s[0:1], s[0:1], 2
; GFX9-NEXT: s_add_u32 s0, s4, s0
; GFX9-NEXT: s_addc_u32 s1, s5, s1
; GFX9-NEXT: global_load_dword v1, v0, s[0:1] offset:16 glc
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: buffer_wbinvl1_vol
; GFX9-NEXT: global_store_dword v0, v1, s[6:7]
; GFX9-NEXT: s_endpgm
entry:
%ptr = getelementptr float, ptr addrspace(1) %in, i64 %index
%gep = getelementptr float, ptr addrspace(1) %ptr, i64 4
%val = load atomic float, ptr addrspace(1) %gep seq_cst, align 4
store float %val, ptr addrspace(1) %out
ret void
}
define amdgpu_kernel void @atomic_store_i32_offset(i32 %in, ptr addrspace(1) %out) {
; SI-LABEL: atomic_store_i32_offset:
; SI: ; %bb.0: ; %entry
; SI-NEXT: s_load_dword s4, s[2:3], 0x9
; SI-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0xb
; SI-NEXT: s_mov_b32 s3, 0xf000
; SI-NEXT: s_mov_b32 s2, -1
; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: v_mov_b32_e32 v0, s4
; SI-NEXT: buffer_store_dword v0, off, s[0:3], 0 offset:16
; SI-NEXT: s_endpgm
;
; VI-LABEL: atomic_store_i32_offset:
; VI: ; %bb.0: ; %entry
; VI-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x2c
; VI-NEXT: s_load_dword s2, s[2:3], 0x24
; VI-NEXT: s_waitcnt lgkmcnt(0)
; VI-NEXT: s_add_u32 s0, s0, 16
; VI-NEXT: s_addc_u32 s1, s1, 0
; VI-NEXT: v_mov_b32_e32 v0, s0
; VI-NEXT: v_mov_b32_e32 v1, s1
; VI-NEXT: v_mov_b32_e32 v2, s2
; VI-NEXT: flat_store_dword v[0:1], v2
; VI-NEXT: s_endpgm
;
; GFX9-LABEL: atomic_store_i32_offset:
; GFX9: ; %bb.0: ; %entry
; GFX9-NEXT: s_load_dword s4, s[2:3], 0x24
; GFX9-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x2c
; GFX9-NEXT: v_mov_b32_e32 v0, 0
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NEXT: v_mov_b32_e32 v1, s4
; GFX9-NEXT: global_store_dword v0, v1, s[0:1] offset:16
; GFX9-NEXT: s_endpgm
entry:
%gep = getelementptr i32, ptr addrspace(1) %out, i64 4
store atomic i32 %in, ptr addrspace(1) %gep seq_cst, align 4
ret void
}
define amdgpu_kernel void @atomic_store_i32(i32 %in, ptr addrspace(1) %out) {
; SI-LABEL: atomic_store_i32:
; SI: ; %bb.0: ; %entry
; SI-NEXT: s_load_dword s4, s[2:3], 0x9
; SI-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0xb
; SI-NEXT: s_mov_b32 s3, 0xf000
; SI-NEXT: s_mov_b32 s2, -1
; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: v_mov_b32_e32 v0, s4
; SI-NEXT: buffer_store_dword v0, off, s[0:3], 0
; SI-NEXT: s_endpgm
;
; VI-LABEL: atomic_store_i32:
; VI: ; %bb.0: ; %entry
; VI-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x2c
; VI-NEXT: s_load_dword s2, s[2:3], 0x24
; VI-NEXT: s_waitcnt lgkmcnt(0)
; VI-NEXT: v_mov_b32_e32 v0, s0
; VI-NEXT: v_mov_b32_e32 v1, s1
; VI-NEXT: v_mov_b32_e32 v2, s2
; VI-NEXT: flat_store_dword v[0:1], v2
; VI-NEXT: s_endpgm
;
; GFX9-LABEL: atomic_store_i32:
; GFX9: ; %bb.0: ; %entry
; GFX9-NEXT: s_load_dword s4, s[2:3], 0x24
; GFX9-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x2c
; GFX9-NEXT: v_mov_b32_e32 v0, 0
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NEXT: v_mov_b32_e32 v1, s4
; GFX9-NEXT: global_store_dword v0, v1, s[0:1]
; GFX9-NEXT: s_endpgm
entry:
store atomic i32 %in, ptr addrspace(1) %out seq_cst, align 4
ret void
}
define amdgpu_kernel void @atomic_store_f32(float %in, ptr addrspace(1) %out) {
; SI-LABEL: atomic_store_f32:
; SI: ; %bb.0: ; %entry
; SI-NEXT: s_load_dword s4, s[2:3], 0x9
; SI-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0xb
; SI-NEXT: s_mov_b32 s3, 0xf000
; SI-NEXT: s_mov_b32 s2, -1
; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: v_mov_b32_e32 v0, s4
; SI-NEXT: buffer_store_dword v0, off, s[0:3], 0
; SI-NEXT: s_endpgm
;
; VI-LABEL: atomic_store_f32:
; VI: ; %bb.0: ; %entry
; VI-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x2c
; VI-NEXT: s_load_dword s2, s[2:3], 0x24
; VI-NEXT: s_waitcnt lgkmcnt(0)
; VI-NEXT: v_mov_b32_e32 v0, s0
; VI-NEXT: v_mov_b32_e32 v1, s1
; VI-NEXT: v_mov_b32_e32 v2, s2
; VI-NEXT: flat_store_dword v[0:1], v2
; VI-NEXT: s_endpgm
;
; GFX9-LABEL: atomic_store_f32:
; GFX9: ; %bb.0: ; %entry
; GFX9-NEXT: s_load_dword s4, s[2:3], 0x24
; GFX9-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x2c
; GFX9-NEXT: v_mov_b32_e32 v0, 0
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NEXT: v_mov_b32_e32 v1, s4
; GFX9-NEXT: global_store_dword v0, v1, s[0:1]
; GFX9-NEXT: s_endpgm
entry:
store atomic float %in, ptr addrspace(1) %out seq_cst, align 4
ret void
}
define amdgpu_kernel void @atomic_store_i32_addr64_offset(i32 %in, ptr addrspace(1) %out, i64 %index) {
; SI-LABEL: atomic_store_i32_addr64_offset:
; SI: ; %bb.0: ; %entry
; SI-NEXT: s_load_dwordx4 s[4:7], s[2:3], 0xb
; SI-NEXT: s_load_dword s2, s[2:3], 0x9
; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: s_lshl_b64 s[0:1], s[6:7], 2
; SI-NEXT: s_mov_b32 s7, 0xf000
; SI-NEXT: s_mov_b32 s6, 0
; SI-NEXT: v_mov_b32_e32 v2, s2
; SI-NEXT: v_mov_b32_e32 v0, s0
; SI-NEXT: v_mov_b32_e32 v1, s1
; SI-NEXT: buffer_store_dword v2, v[0:1], s[4:7], 0 addr64 offset:16
; SI-NEXT: s_endpgm
;
; VI-LABEL: atomic_store_i32_addr64_offset:
; VI: ; %bb.0: ; %entry
; VI-NEXT: s_load_dwordx4 s[4:7], s[2:3], 0x2c
; VI-NEXT: s_load_dword s2, s[2:3], 0x24
; VI-NEXT: s_waitcnt lgkmcnt(0)
; VI-NEXT: s_lshl_b64 s[0:1], s[6:7], 2
; VI-NEXT: s_add_u32 s0, s4, s0
; VI-NEXT: s_addc_u32 s1, s5, s1
; VI-NEXT: s_add_u32 s0, s0, 16
; VI-NEXT: s_addc_u32 s1, s1, 0
; VI-NEXT: v_mov_b32_e32 v0, s0
; VI-NEXT: v_mov_b32_e32 v1, s1
; VI-NEXT: v_mov_b32_e32 v2, s2
; VI-NEXT: flat_store_dword v[0:1], v2
; VI-NEXT: s_endpgm
;
; GFX9-LABEL: atomic_store_i32_addr64_offset:
; GFX9: ; %bb.0: ; %entry
; GFX9-NEXT: s_load_dwordx4 s[4:7], s[2:3], 0x2c
; GFX9-NEXT: s_load_dword s8, s[2:3], 0x24
; GFX9-NEXT: v_mov_b32_e32 v0, 0
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NEXT: s_lshl_b64 s[0:1], s[6:7], 2
; GFX9-NEXT: s_add_u32 s0, s4, s0
; GFX9-NEXT: s_addc_u32 s1, s5, s1
; GFX9-NEXT: v_mov_b32_e32 v1, s8
; GFX9-NEXT: global_store_dword v0, v1, s[0:1] offset:16
; GFX9-NEXT: s_endpgm
entry:
%ptr = getelementptr i32, ptr addrspace(1) %out, i64 %index
%gep = getelementptr i32, ptr addrspace(1) %ptr, i64 4
store atomic i32 %in, ptr addrspace(1) %gep seq_cst, align 4
ret void
}
define amdgpu_kernel void @atomic_store_f32_addr64_offset(float %in, ptr addrspace(1) %out, i64 %index) {
; SI-LABEL: atomic_store_f32_addr64_offset:
; SI: ; %bb.0: ; %entry
; SI-NEXT: s_load_dwordx4 s[4:7], s[2:3], 0xb
; SI-NEXT: s_load_dword s2, s[2:3], 0x9
; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: s_lshl_b64 s[0:1], s[6:7], 2
; SI-NEXT: s_mov_b32 s7, 0xf000
; SI-NEXT: s_mov_b32 s6, 0
; SI-NEXT: v_mov_b32_e32 v2, s2
; SI-NEXT: v_mov_b32_e32 v0, s0
; SI-NEXT: v_mov_b32_e32 v1, s1
; SI-NEXT: buffer_store_dword v2, v[0:1], s[4:7], 0 addr64 offset:16
; SI-NEXT: s_endpgm
;
; VI-LABEL: atomic_store_f32_addr64_offset:
; VI: ; %bb.0: ; %entry
; VI-NEXT: s_load_dwordx4 s[4:7], s[2:3], 0x2c
; VI-NEXT: s_load_dword s2, s[2:3], 0x24
; VI-NEXT: s_waitcnt lgkmcnt(0)
; VI-NEXT: s_lshl_b64 s[0:1], s[6:7], 2
; VI-NEXT: s_add_u32 s0, s4, s0
; VI-NEXT: s_addc_u32 s1, s5, s1
; VI-NEXT: s_add_u32 s0, s0, 16
; VI-NEXT: s_addc_u32 s1, s1, 0
; VI-NEXT: v_mov_b32_e32 v0, s0
; VI-NEXT: v_mov_b32_e32 v1, s1
; VI-NEXT: v_mov_b32_e32 v2, s2
; VI-NEXT: flat_store_dword v[0:1], v2
; VI-NEXT: s_endpgm
;
; GFX9-LABEL: atomic_store_f32_addr64_offset:
; GFX9: ; %bb.0: ; %entry
; GFX9-NEXT: s_load_dwordx4 s[4:7], s[2:3], 0x2c
; GFX9-NEXT: s_load_dword s8, s[2:3], 0x24
; GFX9-NEXT: v_mov_b32_e32 v0, 0
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NEXT: s_lshl_b64 s[0:1], s[6:7], 2
; GFX9-NEXT: s_add_u32 s0, s4, s0
; GFX9-NEXT: s_addc_u32 s1, s5, s1
; GFX9-NEXT: v_mov_b32_e32 v1, s8
; GFX9-NEXT: global_store_dword v0, v1, s[0:1] offset:16
; GFX9-NEXT: s_endpgm
entry:
%ptr = getelementptr float, ptr addrspace(1) %out, i64 %index
%gep = getelementptr float, ptr addrspace(1) %ptr, i64 4
store atomic float %in, ptr addrspace(1) %gep seq_cst, align 4
ret void
}
define amdgpu_kernel void @atomic_store_i32_addr64(i32 %in, ptr addrspace(1) %out, i64 %index) {
; SI-LABEL: atomic_store_i32_addr64:
; SI: ; %bb.0: ; %entry
; SI-NEXT: s_load_dwordx4 s[4:7], s[2:3], 0xb
; SI-NEXT: s_load_dword s8, s[2:3], 0x9
; SI-NEXT: s_mov_b32 s3, 0xf000
; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: s_lshl_b64 s[6:7], s[6:7], 2
; SI-NEXT: s_mov_b32 s2, 0
; SI-NEXT: s_mov_b64 s[0:1], s[4:5]
; SI-NEXT: v_mov_b32_e32 v2, s8
; SI-NEXT: v_mov_b32_e32 v0, s6
; SI-NEXT: v_mov_b32_e32 v1, s7
; SI-NEXT: buffer_store_dword v2, v[0:1], s[0:3], 0 addr64
; SI-NEXT: s_endpgm
;
; VI-LABEL: atomic_store_i32_addr64:
; VI: ; %bb.0: ; %entry
; VI-NEXT: s_load_dwordx4 s[4:7], s[2:3], 0x2c
; VI-NEXT: s_load_dword s2, s[2:3], 0x24
; VI-NEXT: s_waitcnt lgkmcnt(0)
; VI-NEXT: s_lshl_b64 s[0:1], s[6:7], 2
; VI-NEXT: s_add_u32 s0, s4, s0
; VI-NEXT: s_addc_u32 s1, s5, s1
; VI-NEXT: v_mov_b32_e32 v0, s0
; VI-NEXT: v_mov_b32_e32 v1, s1
; VI-NEXT: v_mov_b32_e32 v2, s2
; VI-NEXT: flat_store_dword v[0:1], v2
; VI-NEXT: s_endpgm
;
; GFX9-LABEL: atomic_store_i32_addr64:
; GFX9: ; %bb.0: ; %entry
; GFX9-NEXT: s_load_dwordx4 s[4:7], s[2:3], 0x2c
; GFX9-NEXT: s_load_dword s8, s[2:3], 0x24
; GFX9-NEXT: v_mov_b32_e32 v0, 0
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NEXT: s_lshl_b64 s[0:1], s[6:7], 2
; GFX9-NEXT: s_add_u32 s0, s4, s0
; GFX9-NEXT: s_addc_u32 s1, s5, s1
; GFX9-NEXT: v_mov_b32_e32 v1, s8
; GFX9-NEXT: global_store_dword v0, v1, s[0:1]
; GFX9-NEXT: s_endpgm
entry:
%ptr = getelementptr i32, ptr addrspace(1) %out, i64 %index
store atomic i32 %in, ptr addrspace(1) %ptr seq_cst, align 4
ret void
}
define amdgpu_kernel void @atomic_store_f32_addr64(float %in, ptr addrspace(1) %out, i64 %index) {
; SI-LABEL: atomic_store_f32_addr64:
; SI: ; %bb.0: ; %entry
; SI-NEXT: s_load_dwordx4 s[4:7], s[2:3], 0xb
; SI-NEXT: s_load_dword s8, s[2:3], 0x9
; SI-NEXT: s_mov_b32 s3, 0xf000
; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: s_lshl_b64 s[6:7], s[6:7], 2
; SI-NEXT: s_mov_b32 s2, 0
; SI-NEXT: s_mov_b64 s[0:1], s[4:5]
; SI-NEXT: v_mov_b32_e32 v2, s8
; SI-NEXT: v_mov_b32_e32 v0, s6
; SI-NEXT: v_mov_b32_e32 v1, s7
; SI-NEXT: buffer_store_dword v2, v[0:1], s[0:3], 0 addr64
; SI-NEXT: s_endpgm
;
; VI-LABEL: atomic_store_f32_addr64:
; VI: ; %bb.0: ; %entry
; VI-NEXT: s_load_dwordx4 s[4:7], s[2:3], 0x2c
; VI-NEXT: s_load_dword s2, s[2:3], 0x24
; VI-NEXT: s_waitcnt lgkmcnt(0)
; VI-NEXT: s_lshl_b64 s[0:1], s[6:7], 2
; VI-NEXT: s_add_u32 s0, s4, s0
; VI-NEXT: s_addc_u32 s1, s5, s1
; VI-NEXT: v_mov_b32_e32 v0, s0
; VI-NEXT: v_mov_b32_e32 v1, s1
; VI-NEXT: v_mov_b32_e32 v2, s2
; VI-NEXT: flat_store_dword v[0:1], v2
; VI-NEXT: s_endpgm
;
; GFX9-LABEL: atomic_store_f32_addr64:
; GFX9: ; %bb.0: ; %entry
; GFX9-NEXT: s_load_dwordx4 s[4:7], s[2:3], 0x2c
; GFX9-NEXT: s_load_dword s8, s[2:3], 0x24
; GFX9-NEXT: v_mov_b32_e32 v0, 0
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NEXT: s_lshl_b64 s[0:1], s[6:7], 2
; GFX9-NEXT: s_add_u32 s0, s4, s0
; GFX9-NEXT: s_addc_u32 s1, s5, s1
; GFX9-NEXT: v_mov_b32_e32 v1, s8
; GFX9-NEXT: global_store_dword v0, v1, s[0:1]
; GFX9-NEXT: s_endpgm
entry:
%ptr = getelementptr float, ptr addrspace(1) %out, i64 %index
store atomic float %in, ptr addrspace(1) %ptr seq_cst, align 4
ret void
}
define amdgpu_kernel void @atomic_load_i8_offset(ptr addrspace(1) %in, ptr addrspace(1) %out) {
; SI-LABEL: atomic_load_i8_offset:
; SI: ; %bb.0: ; %entry
; SI-NEXT: s_load_dwordx4 s[0:3], s[2:3], 0x9
; SI-NEXT: s_mov_b32 s7, 0xf000
; SI-NEXT: s_mov_b32 s6, -1
; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: s_mov_b32 s4, s2
; SI-NEXT: s_mov_b32 s5, s3
; SI-NEXT: s_mov_b32 s2, s6
; SI-NEXT: s_mov_b32 s3, s7
; SI-NEXT: buffer_load_ubyte v0, off, s[0:3], 0 offset:16 glc
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: buffer_wbinvl1
; SI-NEXT: buffer_store_byte v0, off, s[4:7], 0
; SI-NEXT: s_endpgm
;
; VI-LABEL: atomic_load_i8_offset:
; VI: ; %bb.0: ; %entry
; VI-NEXT: s_load_dwordx4 s[0:3], s[2:3], 0x24
; VI-NEXT: s_mov_b32 s7, 0xf000
; VI-NEXT: s_mov_b32 s6, -1
; VI-NEXT: s_waitcnt lgkmcnt(0)
; VI-NEXT: s_mov_b32 s4, s2
; VI-NEXT: s_mov_b32 s5, s3
; VI-NEXT: s_mov_b32 s2, s6
; VI-NEXT: s_mov_b32 s3, s7
; VI-NEXT: buffer_load_ubyte v0, off, s[0:3], 0 offset:16 glc
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: buffer_wbinvl1_vol
; VI-NEXT: buffer_store_byte v0, off, s[4:7], 0
; VI-NEXT: s_endpgm
;
; GFX9-LABEL: atomic_load_i8_offset:
; GFX9: ; %bb.0: ; %entry
; GFX9-NEXT: s_load_dwordx4 s[4:7], s[2:3], 0x24
; GFX9-NEXT: v_mov_b32_e32 v0, 0
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NEXT: global_load_ubyte v1, v0, s[4:5] offset:16 glc
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: buffer_wbinvl1_vol
; GFX9-NEXT: global_store_byte v0, v1, s[6:7]
; GFX9-NEXT: s_endpgm
entry:
%gep = getelementptr i8, ptr addrspace(1) %in, i64 16
%val = load atomic i8, ptr addrspace(1) %gep seq_cst, align 1
store i8 %val, ptr addrspace(1) %out
ret void
}
define amdgpu_kernel void @atomic_load_i8_negoffset(ptr addrspace(1) %in, ptr addrspace(1) %out) {
; SI-LABEL: atomic_load_i8_negoffset:
; SI: ; %bb.0: ; %entry
; SI-NEXT: s_load_dwordx4 s[0:3], s[2:3], 0x9
; SI-NEXT: s_mov_b32 s7, 0xf000
; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: s_mov_b32 s4, s2
; SI-NEXT: s_mov_b32 s5, s3
; SI-NEXT: s_mov_b32 s2, 0
; SI-NEXT: s_mov_b32 s3, s7
; SI-NEXT: v_mov_b32_e32 v0, 0xfffffe00
; SI-NEXT: v_mov_b32_e32 v1, -1
; SI-NEXT: buffer_load_ubyte v0, v[0:1], s[0:3], 0 addr64 glc
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: buffer_wbinvl1
; SI-NEXT: s_mov_b32 s6, -1
; SI-NEXT: buffer_store_byte v0, off, s[4:7], 0
; SI-NEXT: s_endpgm
;
; VI-LABEL: atomic_load_i8_negoffset:
; VI: ; %bb.0: ; %entry
; VI-NEXT: s_load_dwordx4 s[0:3], s[2:3], 0x24
; VI-NEXT: s_mov_b32 s7, 0xf000
; VI-NEXT: s_mov_b32 s6, -1
; VI-NEXT: s_waitcnt lgkmcnt(0)
; VI-NEXT: s_add_u32 s0, s0, 0xfffffe00
; VI-NEXT: s_addc_u32 s1, s1, -1
; VI-NEXT: v_mov_b32_e32 v0, s0
; VI-NEXT: v_mov_b32_e32 v1, s1
; VI-NEXT: flat_load_ubyte v0, v[0:1] glc
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: buffer_wbinvl1_vol
; VI-NEXT: s_mov_b32 s4, s2
; VI-NEXT: s_mov_b32 s5, s3
; VI-NEXT: buffer_store_byte v0, off, s[4:7], 0
; VI-NEXT: s_endpgm
;
; GFX9-LABEL: atomic_load_i8_negoffset:
; GFX9: ; %bb.0: ; %entry
; GFX9-NEXT: s_load_dwordx4 s[4:7], s[2:3], 0x24
; GFX9-NEXT: v_mov_b32_e32 v0, 0
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NEXT: global_load_ubyte v1, v0, s[4:5] offset:-512 glc
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: buffer_wbinvl1_vol
; GFX9-NEXT: global_store_byte v0, v1, s[6:7]
; GFX9-NEXT: s_endpgm
entry:
%gep = getelementptr i8, ptr addrspace(1) %in, i64 -512
%val = load atomic i8, ptr addrspace(1) %gep seq_cst, align 1
store i8 %val, ptr addrspace(1) %out
ret void
}
define amdgpu_kernel void @atomic_store_i8_offset(i8 %in, ptr addrspace(1) %out) {
; SI-LABEL: atomic_store_i8_offset:
; SI: ; %bb.0: ; %entry
; SI-NEXT: s_load_dword s4, s[2:3], 0x9
; SI-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0xb
; SI-NEXT: s_mov_b32 s3, 0xf000
; SI-NEXT: s_mov_b32 s2, -1
; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: v_mov_b32_e32 v0, s4
; SI-NEXT: buffer_store_byte v0, off, s[0:3], 0 offset:16
; SI-NEXT: s_endpgm
;
; VI-LABEL: atomic_store_i8_offset:
; VI: ; %bb.0: ; %entry
; VI-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x2c
; VI-NEXT: s_load_dword s2, s[2:3], 0x24
; VI-NEXT: s_waitcnt lgkmcnt(0)
; VI-NEXT: s_add_u32 s0, s0, 16
; VI-NEXT: s_addc_u32 s1, s1, 0
; VI-NEXT: v_mov_b32_e32 v0, s0
; VI-NEXT: v_mov_b32_e32 v1, s1
; VI-NEXT: v_mov_b32_e32 v2, s2
; VI-NEXT: flat_store_byte v[0:1], v2
; VI-NEXT: s_endpgm
;
; GFX9-LABEL: atomic_store_i8_offset:
; GFX9: ; %bb.0: ; %entry
; GFX9-NEXT: s_load_dword s4, s[2:3], 0x24
; GFX9-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x2c
; GFX9-NEXT: v_mov_b32_e32 v0, 0
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NEXT: v_mov_b32_e32 v1, s4
; GFX9-NEXT: global_store_byte v0, v1, s[0:1] offset:16
; GFX9-NEXT: s_endpgm
entry:
%gep = getelementptr i8, ptr addrspace(1) %out, i64 16
store atomic i8 %in, ptr addrspace(1) %gep seq_cst, align 1
ret void
}
define amdgpu_kernel void @atomic_store_i8(i8 %in, ptr addrspace(1) %out) {
; SI-LABEL: atomic_store_i8:
; SI: ; %bb.0: ; %entry
; SI-NEXT: s_load_dword s4, s[2:3], 0x9
; SI-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0xb
; SI-NEXT: s_mov_b32 s3, 0xf000
; SI-NEXT: s_mov_b32 s2, -1
; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: v_mov_b32_e32 v0, s4
; SI-NEXT: buffer_store_byte v0, off, s[0:3], 0
; SI-NEXT: s_endpgm
;
; VI-LABEL: atomic_store_i8:
; VI: ; %bb.0: ; %entry
; VI-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x2c
; VI-NEXT: s_load_dword s2, s[2:3], 0x24
; VI-NEXT: s_waitcnt lgkmcnt(0)
; VI-NEXT: v_mov_b32_e32 v0, s0
; VI-NEXT: v_mov_b32_e32 v1, s1
; VI-NEXT: v_mov_b32_e32 v2, s2
; VI-NEXT: flat_store_byte v[0:1], v2
; VI-NEXT: s_endpgm
;
; GFX9-LABEL: atomic_store_i8:
; GFX9: ; %bb.0: ; %entry
; GFX9-NEXT: s_load_dword s4, s[2:3], 0x24
; GFX9-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x2c
; GFX9-NEXT: v_mov_b32_e32 v0, 0
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NEXT: v_mov_b32_e32 v1, s4
; GFX9-NEXT: global_store_byte v0, v1, s[0:1]
; GFX9-NEXT: s_endpgm
entry:
store atomic i8 %in, ptr addrspace(1) %out seq_cst, align 1
ret void
}
define amdgpu_kernel void @atomic_load_i16_offset(ptr addrspace(1) %in, ptr addrspace(1) %out) {
; SI-LABEL: atomic_load_i16_offset:
; SI: ; %bb.0: ; %entry
; SI-NEXT: s_load_dwordx4 s[0:3], s[2:3], 0x9
; SI-NEXT: s_mov_b32 s7, 0xf000
; SI-NEXT: s_mov_b32 s6, -1
; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: s_mov_b32 s4, s2
; SI-NEXT: s_mov_b32 s5, s3
; SI-NEXT: s_mov_b32 s2, s6
; SI-NEXT: s_mov_b32 s3, s7
; SI-NEXT: buffer_load_ushort v0, off, s[0:3], 0 offset:16 glc
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: buffer_wbinvl1
; SI-NEXT: buffer_store_short v0, off, s[4:7], 0
; SI-NEXT: s_endpgm
;
; VI-LABEL: atomic_load_i16_offset:
; VI: ; %bb.0: ; %entry
; VI-NEXT: s_load_dwordx4 s[0:3], s[2:3], 0x24
; VI-NEXT: s_mov_b32 s7, 0xf000
; VI-NEXT: s_mov_b32 s6, -1
; VI-NEXT: s_waitcnt lgkmcnt(0)
; VI-NEXT: s_mov_b32 s4, s2
; VI-NEXT: s_mov_b32 s5, s3
; VI-NEXT: s_mov_b32 s2, s6
; VI-NEXT: s_mov_b32 s3, s7
; VI-NEXT: buffer_load_ushort v0, off, s[0:3], 0 offset:16 glc
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: buffer_wbinvl1_vol
; VI-NEXT: buffer_store_short v0, off, s[4:7], 0
; VI-NEXT: s_endpgm
;
; GFX9-LABEL: atomic_load_i16_offset:
; GFX9: ; %bb.0: ; %entry
; GFX9-NEXT: s_load_dwordx4 s[4:7], s[2:3], 0x24
; GFX9-NEXT: v_mov_b32_e32 v0, 0
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NEXT: global_load_ushort v1, v0, s[4:5] offset:16 glc
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: buffer_wbinvl1_vol
; GFX9-NEXT: global_store_short v0, v1, s[6:7]
; GFX9-NEXT: s_endpgm
entry:
%gep = getelementptr i16, ptr addrspace(1) %in, i64 8
%val = load atomic i16, ptr addrspace(1) %gep seq_cst, align 2
store i16 %val, ptr addrspace(1) %out
ret void
}
define amdgpu_kernel void @atomic_load_i16_negoffset(ptr addrspace(1) %in, ptr addrspace(1) %out) {
; SI-LABEL: atomic_load_i16_negoffset:
; SI: ; %bb.0: ; %entry
; SI-NEXT: s_load_dwordx4 s[0:3], s[2:3], 0x9
; SI-NEXT: s_mov_b32 s7, 0xf000
; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: s_mov_b32 s4, s2
; SI-NEXT: s_mov_b32 s5, s3
; SI-NEXT: s_mov_b32 s2, 0
; SI-NEXT: s_mov_b32 s3, s7
; SI-NEXT: v_mov_b32_e32 v0, 0xfffffe00
; SI-NEXT: v_mov_b32_e32 v1, -1
; SI-NEXT: buffer_load_ushort v0, v[0:1], s[0:3], 0 addr64 glc
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: buffer_wbinvl1
; SI-NEXT: s_mov_b32 s6, -1
; SI-NEXT: buffer_store_short v0, off, s[4:7], 0
; SI-NEXT: s_endpgm
;
; VI-LABEL: atomic_load_i16_negoffset:
; VI: ; %bb.0: ; %entry
; VI-NEXT: s_load_dwordx4 s[0:3], s[2:3], 0x24
; VI-NEXT: s_mov_b32 s7, 0xf000
; VI-NEXT: s_mov_b32 s6, -1
; VI-NEXT: s_waitcnt lgkmcnt(0)
; VI-NEXT: s_add_u32 s0, s0, 0xfffffe00
; VI-NEXT: s_addc_u32 s1, s1, -1
; VI-NEXT: v_mov_b32_e32 v0, s0
; VI-NEXT: v_mov_b32_e32 v1, s1
; VI-NEXT: flat_load_ushort v0, v[0:1] glc
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: buffer_wbinvl1_vol
; VI-NEXT: s_mov_b32 s4, s2
; VI-NEXT: s_mov_b32 s5, s3
; VI-NEXT: buffer_store_short v0, off, s[4:7], 0
; VI-NEXT: s_endpgm
;
; GFX9-LABEL: atomic_load_i16_negoffset:
; GFX9: ; %bb.0: ; %entry
; GFX9-NEXT: s_load_dwordx4 s[4:7], s[2:3], 0x24
; GFX9-NEXT: v_mov_b32_e32 v0, 0
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NEXT: global_load_ushort v1, v0, s[4:5] offset:-512 glc
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: buffer_wbinvl1_vol
; GFX9-NEXT: global_store_short v0, v1, s[6:7]
; GFX9-NEXT: s_endpgm
entry:
%gep = getelementptr i16, ptr addrspace(1) %in, i64 -256
%val = load atomic i16, ptr addrspace(1) %gep seq_cst, align 2
store i16 %val, ptr addrspace(1) %out
ret void
}
define amdgpu_kernel void @atomic_store_i16_offset(i16 %in, ptr addrspace(1) %out) {
; SI-LABEL: atomic_store_i16_offset:
; SI: ; %bb.0: ; %entry
; SI-NEXT: s_load_dword s4, s[2:3], 0x9
; SI-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0xb
; SI-NEXT: s_mov_b32 s3, 0xf000
; SI-NEXT: s_mov_b32 s2, -1
; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: v_mov_b32_e32 v0, s4
; SI-NEXT: buffer_store_short v0, off, s[0:3], 0 offset:16
; SI-NEXT: s_endpgm
;
; VI-LABEL: atomic_store_i16_offset:
; VI: ; %bb.0: ; %entry
; VI-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x2c
; VI-NEXT: s_load_dword s2, s[2:3], 0x24
; VI-NEXT: s_waitcnt lgkmcnt(0)
; VI-NEXT: s_add_u32 s0, s0, 16
; VI-NEXT: s_addc_u32 s1, s1, 0
; VI-NEXT: v_mov_b32_e32 v0, s0
; VI-NEXT: v_mov_b32_e32 v1, s1
; VI-NEXT: v_mov_b32_e32 v2, s2
; VI-NEXT: flat_store_short v[0:1], v2
; VI-NEXT: s_endpgm
;
; GFX9-LABEL: atomic_store_i16_offset:
; GFX9: ; %bb.0: ; %entry
; GFX9-NEXT: s_load_dword s4, s[2:3], 0x24
; GFX9-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x2c
; GFX9-NEXT: v_mov_b32_e32 v0, 0
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NEXT: v_mov_b32_e32 v1, s4
; GFX9-NEXT: global_store_short v0, v1, s[0:1] offset:16
; GFX9-NEXT: s_endpgm
entry:
%gep = getelementptr i16, ptr addrspace(1) %out, i64 8
store atomic i16 %in, ptr addrspace(1) %gep seq_cst, align 2
ret void
}
define amdgpu_kernel void @atomic_store_i16(i16 %in, ptr addrspace(1) %out) {
; SI-LABEL: atomic_store_i16:
; SI: ; %bb.0: ; %entry
; SI-NEXT: s_load_dword s4, s[2:3], 0x9
; SI-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0xb
; SI-NEXT: s_mov_b32 s3, 0xf000
; SI-NEXT: s_mov_b32 s2, -1
; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: v_mov_b32_e32 v0, s4
; SI-NEXT: buffer_store_short v0, off, s[0:3], 0
; SI-NEXT: s_endpgm
;
; VI-LABEL: atomic_store_i16:
; VI: ; %bb.0: ; %entry
; VI-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x2c
; VI-NEXT: s_load_dword s2, s[2:3], 0x24
; VI-NEXT: s_waitcnt lgkmcnt(0)
; VI-NEXT: v_mov_b32_e32 v0, s0
; VI-NEXT: v_mov_b32_e32 v1, s1
; VI-NEXT: v_mov_b32_e32 v2, s2
; VI-NEXT: flat_store_short v[0:1], v2
; VI-NEXT: s_endpgm
;
; GFX9-LABEL: atomic_store_i16:
; GFX9: ; %bb.0: ; %entry
; GFX9-NEXT: s_load_dword s4, s[2:3], 0x24
; GFX9-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x2c
; GFX9-NEXT: v_mov_b32_e32 v0, 0
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NEXT: v_mov_b32_e32 v1, s4
; GFX9-NEXT: global_store_short v0, v1, s[0:1]
; GFX9-NEXT: s_endpgm
entry:
store atomic i16 %in, ptr addrspace(1) %out seq_cst, align 2
ret void
}
define amdgpu_kernel void @atomic_store_f16_offset(half %in, ptr addrspace(1) %out) {
; SI-LABEL: atomic_store_f16_offset:
; SI: ; %bb.0: ; %entry
; SI-NEXT: s_load_dword s4, s[2:3], 0x9
; SI-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0xb
; SI-NEXT: s_mov_b32 s3, 0xf000
; SI-NEXT: s_mov_b32 s2, -1
; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: v_mov_b32_e32 v0, s4
; SI-NEXT: buffer_store_short v0, off, s[0:3], 0 offset:16
; SI-NEXT: s_endpgm
;
; VI-LABEL: atomic_store_f16_offset:
; VI: ; %bb.0: ; %entry
; VI-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x2c
; VI-NEXT: s_load_dword s2, s[2:3], 0x24
; VI-NEXT: s_waitcnt lgkmcnt(0)
; VI-NEXT: s_add_u32 s0, s0, 16
; VI-NEXT: s_addc_u32 s1, s1, 0
; VI-NEXT: v_mov_b32_e32 v0, s0
; VI-NEXT: v_mov_b32_e32 v1, s1
; VI-NEXT: v_mov_b32_e32 v2, s2
; VI-NEXT: flat_store_short v[0:1], v2
; VI-NEXT: s_endpgm
;
; GFX9-LABEL: atomic_store_f16_offset:
; GFX9: ; %bb.0: ; %entry
; GFX9-NEXT: s_load_dword s4, s[2:3], 0x24
; GFX9-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x2c
; GFX9-NEXT: v_mov_b32_e32 v0, 0
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NEXT: v_mov_b32_e32 v1, s4
; GFX9-NEXT: global_store_short v0, v1, s[0:1] offset:16
; GFX9-NEXT: s_endpgm
entry:
%gep = getelementptr half, ptr addrspace(1) %out, i64 8
store atomic half %in, ptr addrspace(1) %gep seq_cst, align 2
ret void
}
define amdgpu_kernel void @atomic_store_f16(half %in, ptr addrspace(1) %out) {
; SI-LABEL: atomic_store_f16:
; SI: ; %bb.0: ; %entry
; SI-NEXT: s_load_dword s4, s[2:3], 0x9
; SI-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0xb
; SI-NEXT: s_mov_b32 s3, 0xf000
; SI-NEXT: s_mov_b32 s2, -1
; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: v_mov_b32_e32 v0, s4
; SI-NEXT: buffer_store_short v0, off, s[0:3], 0
; SI-NEXT: s_endpgm
;
; VI-LABEL: atomic_store_f16:
; VI: ; %bb.0: ; %entry
; VI-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x2c
; VI-NEXT: s_load_dword s2, s[2:3], 0x24
; VI-NEXT: s_waitcnt lgkmcnt(0)
; VI-NEXT: v_mov_b32_e32 v0, s0
; VI-NEXT: v_mov_b32_e32 v1, s1
; VI-NEXT: v_mov_b32_e32 v2, s2
; VI-NEXT: flat_store_short v[0:1], v2
; VI-NEXT: s_endpgm
;
; GFX9-LABEL: atomic_store_f16:
; GFX9: ; %bb.0: ; %entry
; GFX9-NEXT: s_load_dword s4, s[2:3], 0x24
; GFX9-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x2c
; GFX9-NEXT: v_mov_b32_e32 v0, 0
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NEXT: v_mov_b32_e32 v1, s4
; GFX9-NEXT: global_store_short v0, v1, s[0:1]
; GFX9-NEXT: s_endpgm
entry:
store atomic half %in, ptr addrspace(1) %out seq_cst, align 2
ret void
}
define amdgpu_kernel void @atomic_store_bf16_offset(bfloat %in, ptr addrspace(1) %out) {
; SI-LABEL: atomic_store_bf16_offset:
; SI: ; %bb.0:
; SI-NEXT: s_load_dword s4, s[2:3], 0x9
; SI-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0xb
; SI-NEXT: s_mov_b32 s3, 0xf000
; SI-NEXT: s_mov_b32 s2, -1
; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: v_mov_b32_e32 v0, s4
; SI-NEXT: buffer_store_short v0, off, s[0:3], 0 offset:16
; SI-NEXT: s_endpgm
;
; VI-LABEL: atomic_store_bf16_offset:
; VI: ; %bb.0:
; VI-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x2c
; VI-NEXT: s_load_dword s2, s[2:3], 0x24
; VI-NEXT: s_waitcnt lgkmcnt(0)
; VI-NEXT: s_add_u32 s0, s0, 16
; VI-NEXT: s_addc_u32 s1, s1, 0
; VI-NEXT: v_mov_b32_e32 v0, s0
; VI-NEXT: v_mov_b32_e32 v1, s1
; VI-NEXT: v_mov_b32_e32 v2, s2
; VI-NEXT: flat_store_short v[0:1], v2
; VI-NEXT: s_endpgm
;
; GFX9-LABEL: atomic_store_bf16_offset:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_load_dword s4, s[2:3], 0x24
; GFX9-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x2c
; GFX9-NEXT: v_mov_b32_e32 v0, 0
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NEXT: v_mov_b32_e32 v1, s4
; GFX9-NEXT: global_store_short v0, v1, s[0:1] offset:16
; GFX9-NEXT: s_endpgm
%gep = getelementptr bfloat, ptr addrspace(1) %out, i64 8
store atomic bfloat %in, ptr addrspace(1) %gep seq_cst, align 2
ret void
}
define amdgpu_kernel void @atomic_store_bf16(bfloat %in, ptr addrspace(1) %out) {
; SI-LABEL: atomic_store_bf16:
; SI: ; %bb.0:
; SI-NEXT: s_load_dword s4, s[2:3], 0x9
; SI-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0xb
; SI-NEXT: s_mov_b32 s3, 0xf000
; SI-NEXT: s_mov_b32 s2, -1
; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: v_mov_b32_e32 v0, s4
; SI-NEXT: buffer_store_short v0, off, s[0:3], 0
; SI-NEXT: s_endpgm
;
; VI-LABEL: atomic_store_bf16:
; VI: ; %bb.0:
; VI-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x2c
; VI-NEXT: s_load_dword s2, s[2:3], 0x24
; VI-NEXT: s_waitcnt lgkmcnt(0)
; VI-NEXT: v_mov_b32_e32 v0, s0
; VI-NEXT: v_mov_b32_e32 v1, s1
; VI-NEXT: v_mov_b32_e32 v2, s2
; VI-NEXT: flat_store_short v[0:1], v2
; VI-NEXT: s_endpgm
;
; GFX9-LABEL: atomic_store_bf16:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_load_dword s4, s[2:3], 0x24
; GFX9-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x2c
; GFX9-NEXT: v_mov_b32_e32 v0, 0
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NEXT: v_mov_b32_e32 v1, s4
; GFX9-NEXT: global_store_short v0, v1, s[0:1]
; GFX9-NEXT: s_endpgm
store atomic bfloat %in, ptr addrspace(1) %out seq_cst, align 2
ret void
}
define amdgpu_kernel void @atomic_inc_i32_offset(ptr addrspace(1) %out, i32 %in) {
; SI-LABEL: atomic_inc_i32_offset:
; SI: ; %bb.0: ; %entry
; SI-NEXT: s_load_dword s4, s[2:3], 0xb
; SI-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x9
; SI-NEXT: s_mov_b32 s3, 0xf000
; SI-NEXT: s_mov_b32 s2, -1
; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: v_mov_b32_e32 v0, s4
; SI-NEXT: buffer_atomic_inc v0, off, s[0:3], 0 offset:16
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: buffer_wbinvl1
; SI-NEXT: s_endpgm
;
; VI-LABEL: atomic_inc_i32_offset:
; VI: ; %bb.0: ; %entry
; VI-NEXT: s_load_dword s4, s[2:3], 0x2c
; VI-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x24
; VI-NEXT: s_mov_b32 s3, 0xf000
; VI-NEXT: s_mov_b32 s2, -1
; VI-NEXT: s_waitcnt lgkmcnt(0)
; VI-NEXT: v_mov_b32_e32 v0, s4
; VI-NEXT: buffer_atomic_inc v0, off, s[0:3], 0 offset:16
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: buffer_wbinvl1_vol
; VI-NEXT: s_endpgm
;
; GFX9-LABEL: atomic_inc_i32_offset:
; GFX9: ; %bb.0: ; %entry
; GFX9-NEXT: s_load_dword s4, s[2:3], 0x2c
; GFX9-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x24
; GFX9-NEXT: v_mov_b32_e32 v0, 0
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NEXT: v_mov_b32_e32 v1, s4
; GFX9-NEXT: global_atomic_inc v0, v1, s[0:1] offset:16
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: buffer_wbinvl1_vol
; GFX9-NEXT: s_endpgm
entry:
%gep = getelementptr i32, ptr addrspace(1) %out, i64 4
%val = atomicrmw volatile uinc_wrap ptr addrspace(1) %gep, i32 %in syncscope("agent") seq_cst
ret void
}
define amdgpu_kernel void @atomic_inc_i32_max_neg_offset(ptr addrspace(1) %out, i32 %in) {
; SI-LABEL: atomic_inc_i32_max_neg_offset:
; SI: ; %bb.0: ; %entry
; SI-NEXT: s_load_dword s4, s[2:3], 0xb
; SI-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x9
; SI-NEXT: s_mov_b32 s3, 0xf000
; SI-NEXT: s_mov_b32 s2, 0
; SI-NEXT: v_mov_b32_e32 v0, 0xfffff000
; SI-NEXT: v_mov_b32_e32 v1, -1
; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: v_mov_b32_e32 v2, s4
; SI-NEXT: buffer_atomic_inc v2, v[0:1], s[0:3], 0 addr64
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: buffer_wbinvl1
; SI-NEXT: s_endpgm
;
; VI-LABEL: atomic_inc_i32_max_neg_offset:
; VI: ; %bb.0: ; %entry
; VI-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x24
; VI-NEXT: s_load_dword s2, s[2:3], 0x2c
; VI-NEXT: s_waitcnt lgkmcnt(0)
; VI-NEXT: s_add_u32 s0, s0, 0xfffff000
; VI-NEXT: s_addc_u32 s1, s1, -1
; VI-NEXT: v_mov_b32_e32 v0, s0
; VI-NEXT: v_mov_b32_e32 v1, s1
; VI-NEXT: v_mov_b32_e32 v2, s2
; VI-NEXT: flat_atomic_inc v[0:1], v2
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: buffer_wbinvl1_vol
; VI-NEXT: s_endpgm
;
; GFX9-LABEL: atomic_inc_i32_max_neg_offset:
; GFX9: ; %bb.0: ; %entry
; GFX9-NEXT: s_load_dword s4, s[2:3], 0x2c
; GFX9-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x24
; GFX9-NEXT: v_mov_b32_e32 v0, 0
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NEXT: v_mov_b32_e32 v1, s4
; GFX9-NEXT: global_atomic_inc v0, v1, s[0:1] offset:-4096
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: buffer_wbinvl1_vol
; GFX9-NEXT: s_endpgm
entry:
%gep = getelementptr i32, ptr addrspace(1) %out, i64 -1024
%val = atomicrmw volatile uinc_wrap ptr addrspace(1) %gep, i32 %in syncscope("agent") seq_cst
ret void
}
define amdgpu_kernel void @atomic_inc_i32_soffset(ptr addrspace(1) %out, i32 %in) {
; SI-LABEL: atomic_inc_i32_soffset:
; SI: ; %bb.0: ; %entry
; SI-NEXT: s_load_dword s4, s[2:3], 0xb
; SI-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x9
; SI-NEXT: s_mov_b32 s3, 0xf000
; SI-NEXT: s_mov_b32 s2, -1
; SI-NEXT: s_mov_b32 s5, 0x8ca0
; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: v_mov_b32_e32 v0, s4
; SI-NEXT: buffer_atomic_inc v0, off, s[0:3], s5
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: buffer_wbinvl1
; SI-NEXT: s_endpgm
;
; VI-LABEL: atomic_inc_i32_soffset:
; VI: ; %bb.0: ; %entry
; VI-NEXT: s_load_dword s4, s[2:3], 0x2c
; VI-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x24
; VI-NEXT: s_mov_b32 s3, 0xf000
; VI-NEXT: s_mov_b32 s2, -1
; VI-NEXT: s_mov_b32 s5, 0x8ca0
; VI-NEXT: s_waitcnt lgkmcnt(0)
; VI-NEXT: v_mov_b32_e32 v0, s4
; VI-NEXT: buffer_atomic_inc v0, off, s[0:3], s5
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: buffer_wbinvl1_vol
; VI-NEXT: s_endpgm
;
; GFX9-LABEL: atomic_inc_i32_soffset:
; GFX9: ; %bb.0: ; %entry
; GFX9-NEXT: s_load_dword s4, s[2:3], 0x2c
; GFX9-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x24
; GFX9-NEXT: v_mov_b32_e32 v0, 0x8000
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NEXT: v_mov_b32_e32 v1, s4
; GFX9-NEXT: global_atomic_inc v0, v1, s[0:1] offset:3232
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: buffer_wbinvl1_vol
; GFX9-NEXT: s_endpgm
entry:
%gep = getelementptr i32, ptr addrspace(1) %out, i64 9000
%val = atomicrmw volatile uinc_wrap ptr addrspace(1) %gep, i32 %in syncscope("agent") seq_cst
ret void
}
define amdgpu_kernel void @atomic_inc_i32_huge_offset(ptr addrspace(1) %out, i32 %in) {
; SI-LABEL: atomic_inc_i32_huge_offset:
; SI: ; %bb.0: ; %entry
; SI-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x9
; SI-NEXT: s_load_dword s4, s[2:3], 0xb
; SI-NEXT: s_mov_b32 s3, 0xf000
; SI-NEXT: s_mov_b32 s2, 0
; SI-NEXT: v_mov_b32_e32 v0, 0xdeac
; SI-NEXT: v_mov_b32_e32 v1, 0xabcd
; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: v_mov_b32_e32 v2, s4
; SI-NEXT: buffer_atomic_inc v2, v[0:1], s[0:3], 0 addr64
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: buffer_wbinvl1
; SI-NEXT: s_endpgm
;
; VI-LABEL: atomic_inc_i32_huge_offset:
; VI: ; %bb.0: ; %entry
; VI-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x24
; VI-NEXT: s_load_dword s2, s[2:3], 0x2c
; VI-NEXT: s_waitcnt lgkmcnt(0)
; VI-NEXT: s_add_u32 s0, s0, 0xdeac
; VI-NEXT: s_addc_u32 s1, s1, 0xabcd
; VI-NEXT: v_mov_b32_e32 v0, s0
; VI-NEXT: v_mov_b32_e32 v1, s1
; VI-NEXT: v_mov_b32_e32 v2, s2
; VI-NEXT: flat_atomic_inc v[0:1], v2
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: buffer_wbinvl1_vol
; VI-NEXT: s_endpgm
;
; GFX9-LABEL: atomic_inc_i32_huge_offset:
; GFX9: ; %bb.0: ; %entry
; GFX9-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x24
; GFX9-NEXT: s_load_dword s4, s[2:3], 0x2c
; GFX9-NEXT: v_mov_b32_e32 v0, 0
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NEXT: s_add_u32 s0, s0, 0xdeac
; GFX9-NEXT: s_addc_u32 s1, s1, 0xabcd
; GFX9-NEXT: v_mov_b32_e32 v1, s4
; GFX9-NEXT: global_atomic_inc v0, v1, s[0:1]
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: buffer_wbinvl1_vol
; GFX9-NEXT: s_endpgm
entry:
%gep = getelementptr i32, ptr addrspace(1) %out, i64 47224239175595
%val = atomicrmw volatile uinc_wrap ptr addrspace(1) %gep, i32 %in syncscope("agent") seq_cst
ret void
}
define amdgpu_kernel void @atomic_inc_i32_ret_offset(ptr addrspace(1) %out, ptr addrspace(1) %out2, i32 %in) {
; SI-LABEL: atomic_inc_i32_ret_offset:
; SI: ; %bb.0: ; %entry
; SI-NEXT: s_load_dwordx4 s[4:7], s[2:3], 0x9
; SI-NEXT: s_load_dword s8, s[2:3], 0xd
; SI-NEXT: s_mov_b32 s3, 0xf000
; SI-NEXT: s_mov_b32 s2, -1
; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: s_mov_b32 s0, s6
; SI-NEXT: s_mov_b32 s1, s7
; SI-NEXT: s_mov_b32 s6, s2
; SI-NEXT: s_mov_b32 s7, s3
; SI-NEXT: v_mov_b32_e32 v0, s8
; SI-NEXT: buffer_atomic_inc v0, off, s[4:7], 0 offset:16 glc
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: buffer_wbinvl1
; SI-NEXT: buffer_store_dword v0, off, s[0:3], 0
; SI-NEXT: s_endpgm
;
; VI-LABEL: atomic_inc_i32_ret_offset:
; VI: ; %bb.0: ; %entry
; VI-NEXT: s_load_dwordx4 s[4:7], s[2:3], 0x24
; VI-NEXT: s_load_dword s0, s[2:3], 0x34
; VI-NEXT: s_mov_b32 s11, 0xf000
; VI-NEXT: s_mov_b32 s10, -1
; VI-NEXT: s_waitcnt lgkmcnt(0)
; VI-NEXT: s_mov_b32 s8, s6
; VI-NEXT: s_mov_b32 s9, s7
; VI-NEXT: s_mov_b32 s6, s10
; VI-NEXT: s_mov_b32 s7, s11
; VI-NEXT: v_mov_b32_e32 v0, s0
; VI-NEXT: buffer_atomic_inc v0, off, s[4:7], 0 offset:16 glc
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: buffer_wbinvl1_vol
; VI-NEXT: buffer_store_dword v0, off, s[8:11], 0
; VI-NEXT: s_endpgm
;
; GFX9-LABEL: atomic_inc_i32_ret_offset:
; GFX9: ; %bb.0: ; %entry
; GFX9-NEXT: s_load_dword s0, s[2:3], 0x34
; GFX9-NEXT: s_load_dwordx4 s[4:7], s[2:3], 0x24
; GFX9-NEXT: v_mov_b32_e32 v0, 0
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NEXT: v_mov_b32_e32 v1, s0
; GFX9-NEXT: global_atomic_inc v1, v0, v1, s[4:5] offset:16 glc
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: buffer_wbinvl1_vol
; GFX9-NEXT: global_store_dword v0, v1, s[6:7]
; GFX9-NEXT: s_endpgm
entry:
%gep = getelementptr i32, ptr addrspace(1) %out, i64 4
%val = atomicrmw volatile uinc_wrap ptr addrspace(1) %gep, i32 %in syncscope("agent") seq_cst
store i32 %val, ptr addrspace(1) %out2
ret void
}
define amdgpu_kernel void @atomic_inc_i32_addr64_offset(ptr addrspace(1) %out, i32 %in, i64 %index) {
; SI-LABEL: atomic_inc_i32_addr64_offset:
; SI: ; %bb.0: ; %entry
; SI-NEXT: s_load_dwordx2 s[4:5], s[2:3], 0xd
; SI-NEXT: s_load_dword s6, s[2:3], 0xb
; SI-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x9
; SI-NEXT: s_mov_b32 s3, 0xf000
; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: s_lshl_b64 s[4:5], s[4:5], 2
; SI-NEXT: s_mov_b32 s2, 0
; SI-NEXT: v_mov_b32_e32 v2, s6
; SI-NEXT: v_mov_b32_e32 v0, s4
; SI-NEXT: v_mov_b32_e32 v1, s5
; SI-NEXT: buffer_atomic_inc v2, v[0:1], s[0:3], 0 addr64 offset:16
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: buffer_wbinvl1
; SI-NEXT: s_endpgm
;
; VI-LABEL: atomic_inc_i32_addr64_offset:
; VI: ; %bb.0: ; %entry
; VI-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x34
; VI-NEXT: s_load_dwordx2 s[4:5], s[2:3], 0x24
; VI-NEXT: s_load_dword s2, s[2:3], 0x2c
; VI-NEXT: s_waitcnt lgkmcnt(0)
; VI-NEXT: s_lshl_b64 s[0:1], s[0:1], 2
; VI-NEXT: s_add_u32 s0, s4, s0
; VI-NEXT: s_addc_u32 s1, s5, s1
; VI-NEXT: s_add_u32 s0, s0, 16
; VI-NEXT: s_addc_u32 s1, s1, 0
; VI-NEXT: v_mov_b32_e32 v0, s0
; VI-NEXT: v_mov_b32_e32 v1, s1
; VI-NEXT: v_mov_b32_e32 v2, s2
; VI-NEXT: flat_atomic_inc v[0:1], v2
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: buffer_wbinvl1_vol
; VI-NEXT: s_endpgm
;
; GFX9-LABEL: atomic_inc_i32_addr64_offset:
; GFX9: ; %bb.0: ; %entry
; GFX9-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x34
; GFX9-NEXT: s_load_dwordx2 s[4:5], s[2:3], 0x24
; GFX9-NEXT: s_load_dword s6, s[2:3], 0x2c
; GFX9-NEXT: v_mov_b32_e32 v0, 0
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NEXT: s_lshl_b64 s[0:1], s[0:1], 2
; GFX9-NEXT: s_add_u32 s0, s4, s0
; GFX9-NEXT: s_addc_u32 s1, s5, s1
; GFX9-NEXT: v_mov_b32_e32 v1, s6
; GFX9-NEXT: global_atomic_inc v0, v1, s[0:1] offset:16
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: buffer_wbinvl1_vol
; GFX9-NEXT: s_endpgm
entry:
%ptr = getelementptr i32, ptr addrspace(1) %out, i64 %index
%gep = getelementptr i32, ptr addrspace(1) %ptr, i64 4
%val = atomicrmw volatile uinc_wrap ptr addrspace(1) %gep, i32 %in syncscope("agent") seq_cst
ret void
}
define amdgpu_kernel void @atomic_inc_i32_ret_addr64_offset(ptr addrspace(1) %out, ptr addrspace(1) %out2, i32 %in, i64 %index) {
; SI-LABEL: atomic_inc_i32_ret_addr64_offset:
; SI: ; %bb.0: ; %entry
; SI-NEXT: s_load_dwordx4 s[4:7], s[2:3], 0x9
; SI-NEXT: s_load_dwordx2 s[8:9], s[2:3], 0xf
; SI-NEXT: s_load_dword s2, s[2:3], 0xd
; SI-NEXT: s_mov_b32 s3, 0xf000
; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: s_mov_b32 s0, s6
; SI-NEXT: s_mov_b32 s1, s7
; SI-NEXT: s_lshl_b64 s[8:9], s[8:9], 2
; SI-NEXT: s_mov_b32 s6, 0
; SI-NEXT: s_mov_b32 s7, s3
; SI-NEXT: v_mov_b32_e32 v2, s2
; SI-NEXT: v_mov_b32_e32 v0, s8
; SI-NEXT: v_mov_b32_e32 v1, s9
; SI-NEXT: buffer_atomic_inc v2, v[0:1], s[4:7], 0 addr64 offset:16 glc
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: buffer_wbinvl1
; SI-NEXT: s_mov_b32 s2, -1
; SI-NEXT: buffer_store_dword v2, off, s[0:3], 0
; SI-NEXT: s_endpgm
;
; VI-LABEL: atomic_inc_i32_ret_addr64_offset:
; VI: ; %bb.0: ; %entry
; VI-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x3c
; VI-NEXT: s_load_dwordx4 s[4:7], s[2:3], 0x24
; VI-NEXT: s_load_dword s2, s[2:3], 0x34
; VI-NEXT: s_mov_b32 s3, 0xf000
; VI-NEXT: s_waitcnt lgkmcnt(0)
; VI-NEXT: s_lshl_b64 s[0:1], s[0:1], 2
; VI-NEXT: s_add_u32 s0, s4, s0
; VI-NEXT: s_addc_u32 s1, s5, s1
; VI-NEXT: s_add_u32 s0, s0, 16
; VI-NEXT: s_addc_u32 s1, s1, 0
; VI-NEXT: v_mov_b32_e32 v0, s0
; VI-NEXT: v_mov_b32_e32 v1, s1
; VI-NEXT: v_mov_b32_e32 v2, s2
; VI-NEXT: flat_atomic_inc v0, v[0:1], v2 glc
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: buffer_wbinvl1_vol
; VI-NEXT: s_mov_b32 s2, -1
; VI-NEXT: s_mov_b32 s0, s6
; VI-NEXT: s_mov_b32 s1, s7
; VI-NEXT: buffer_store_dword v0, off, s[0:3], 0
; VI-NEXT: s_endpgm
;
; GFX9-LABEL: atomic_inc_i32_ret_addr64_offset:
; GFX9: ; %bb.0: ; %entry
; GFX9-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x3c
; GFX9-NEXT: s_load_dwordx4 s[4:7], s[2:3], 0x24
; GFX9-NEXT: s_load_dword s8, s[2:3], 0x34
; GFX9-NEXT: v_mov_b32_e32 v0, 0
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NEXT: s_lshl_b64 s[0:1], s[0:1], 2
; GFX9-NEXT: s_add_u32 s0, s4, s0
; GFX9-NEXT: s_addc_u32 s1, s5, s1
; GFX9-NEXT: v_mov_b32_e32 v1, s8
; GFX9-NEXT: global_atomic_inc v1, v0, v1, s[0:1] offset:16 glc
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: buffer_wbinvl1_vol
; GFX9-NEXT: global_store_dword v0, v1, s[6:7]
; GFX9-NEXT: s_endpgm
entry:
%ptr = getelementptr i32, ptr addrspace(1) %out, i64 %index
%gep = getelementptr i32, ptr addrspace(1) %ptr, i64 4
%val = atomicrmw volatile uinc_wrap ptr addrspace(1) %gep, i32 %in syncscope("agent") seq_cst
store i32 %val, ptr addrspace(1) %out2
ret void
}
define amdgpu_kernel void @atomic_dec_i32_offset(ptr addrspace(1) %out, i32 %in) {
; SI-LABEL: atomic_dec_i32_offset:
; SI: ; %bb.0: ; %entry
; SI-NEXT: s_load_dword s4, s[2:3], 0xb
; SI-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x9
; SI-NEXT: s_mov_b32 s3, 0xf000
; SI-NEXT: s_mov_b32 s2, -1
; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: v_mov_b32_e32 v0, s4
; SI-NEXT: buffer_atomic_dec v0, off, s[0:3], 0 offset:16
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: buffer_wbinvl1
; SI-NEXT: s_endpgm
;
; VI-LABEL: atomic_dec_i32_offset:
; VI: ; %bb.0: ; %entry
; VI-NEXT: s_load_dword s4, s[2:3], 0x2c
; VI-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x24
; VI-NEXT: s_mov_b32 s3, 0xf000
; VI-NEXT: s_mov_b32 s2, -1
; VI-NEXT: s_waitcnt lgkmcnt(0)
; VI-NEXT: v_mov_b32_e32 v0, s4
; VI-NEXT: buffer_atomic_dec v0, off, s[0:3], 0 offset:16
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: buffer_wbinvl1_vol
; VI-NEXT: s_endpgm
;
; GFX9-LABEL: atomic_dec_i32_offset:
; GFX9: ; %bb.0: ; %entry
; GFX9-NEXT: s_load_dword s4, s[2:3], 0x2c
; GFX9-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x24
; GFX9-NEXT: v_mov_b32_e32 v0, 0
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NEXT: v_mov_b32_e32 v1, s4
; GFX9-NEXT: global_atomic_dec v0, v1, s[0:1] offset:16
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: buffer_wbinvl1_vol
; GFX9-NEXT: s_endpgm
entry:
%gep = getelementptr i32, ptr addrspace(1) %out, i64 4
%val = atomicrmw volatile udec_wrap ptr addrspace(1) %gep, i32 %in syncscope("agent") seq_cst
ret void
}
define amdgpu_kernel void @atomic_dec_i32_max_neg_offset(ptr addrspace(1) %out, i32 %in) {
; SI-LABEL: atomic_dec_i32_max_neg_offset:
; SI: ; %bb.0: ; %entry
; SI-NEXT: s_load_dword s4, s[2:3], 0xb
; SI-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x9
; SI-NEXT: s_mov_b32 s3, 0xf000
; SI-NEXT: s_mov_b32 s2, 0
; SI-NEXT: v_mov_b32_e32 v0, 0xfffff000
; SI-NEXT: v_mov_b32_e32 v1, -1
; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: v_mov_b32_e32 v2, s4
; SI-NEXT: buffer_atomic_dec v2, v[0:1], s[0:3], 0 addr64
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: buffer_wbinvl1
; SI-NEXT: s_endpgm
;
; VI-LABEL: atomic_dec_i32_max_neg_offset:
; VI: ; %bb.0: ; %entry
; VI-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x24
; VI-NEXT: s_load_dword s2, s[2:3], 0x2c
; VI-NEXT: s_waitcnt lgkmcnt(0)
; VI-NEXT: s_add_u32 s0, s0, 0xfffff000
; VI-NEXT: s_addc_u32 s1, s1, -1
; VI-NEXT: v_mov_b32_e32 v0, s0
; VI-NEXT: v_mov_b32_e32 v1, s1
; VI-NEXT: v_mov_b32_e32 v2, s2
; VI-NEXT: flat_atomic_dec v[0:1], v2
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: buffer_wbinvl1_vol
; VI-NEXT: s_endpgm
;
; GFX9-LABEL: atomic_dec_i32_max_neg_offset:
; GFX9: ; %bb.0: ; %entry
; GFX9-NEXT: s_load_dword s4, s[2:3], 0x2c
; GFX9-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x24
; GFX9-NEXT: v_mov_b32_e32 v0, 0
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NEXT: v_mov_b32_e32 v1, s4
; GFX9-NEXT: global_atomic_dec v0, v1, s[0:1] offset:-4096
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: buffer_wbinvl1_vol
; GFX9-NEXT: s_endpgm
entry:
%gep = getelementptr i32, ptr addrspace(1) %out, i64 -1024
%val = atomicrmw volatile udec_wrap ptr addrspace(1) %gep, i32 %in syncscope("agent") seq_cst
ret void
}
define amdgpu_kernel void @atomic_dec_i32_soffset(ptr addrspace(1) %out, i32 %in) {
; SI-LABEL: atomic_dec_i32_soffset:
; SI: ; %bb.0: ; %entry
; SI-NEXT: s_load_dword s4, s[2:3], 0xb
; SI-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x9
; SI-NEXT: s_mov_b32 s3, 0xf000
; SI-NEXT: s_mov_b32 s2, -1
; SI-NEXT: s_mov_b32 s5, 0x8ca0
; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: v_mov_b32_e32 v0, s4
; SI-NEXT: buffer_atomic_dec v0, off, s[0:3], s5
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: buffer_wbinvl1
; SI-NEXT: s_endpgm
;
; VI-LABEL: atomic_dec_i32_soffset:
; VI: ; %bb.0: ; %entry
; VI-NEXT: s_load_dword s4, s[2:3], 0x2c
; VI-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x24
; VI-NEXT: s_mov_b32 s3, 0xf000
; VI-NEXT: s_mov_b32 s2, -1
; VI-NEXT: s_mov_b32 s5, 0x8ca0
; VI-NEXT: s_waitcnt lgkmcnt(0)
; VI-NEXT: v_mov_b32_e32 v0, s4
; VI-NEXT: buffer_atomic_dec v0, off, s[0:3], s5
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: buffer_wbinvl1_vol
; VI-NEXT: s_endpgm
;
; GFX9-LABEL: atomic_dec_i32_soffset:
; GFX9: ; %bb.0: ; %entry
; GFX9-NEXT: s_load_dword s4, s[2:3], 0x2c
; GFX9-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x24
; GFX9-NEXT: v_mov_b32_e32 v0, 0x8000
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NEXT: v_mov_b32_e32 v1, s4
; GFX9-NEXT: global_atomic_dec v0, v1, s[0:1] offset:3232
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: buffer_wbinvl1_vol
; GFX9-NEXT: s_endpgm
entry:
%gep = getelementptr i32, ptr addrspace(1) %out, i64 9000
%val = atomicrmw volatile udec_wrap ptr addrspace(1) %gep, i32 %in syncscope("agent") seq_cst
ret void
}
define amdgpu_kernel void @atomic_dec_i32_huge_offset(ptr addrspace(1) %out, i32 %in) {
; SI-LABEL: atomic_dec_i32_huge_offset:
; SI: ; %bb.0: ; %entry
; SI-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x9
; SI-NEXT: s_load_dword s4, s[2:3], 0xb
; SI-NEXT: s_mov_b32 s3, 0xf000
; SI-NEXT: s_mov_b32 s2, 0
; SI-NEXT: v_mov_b32_e32 v0, 0xdeac
; SI-NEXT: v_mov_b32_e32 v1, 0xabcd
; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: v_mov_b32_e32 v2, s4
; SI-NEXT: buffer_atomic_dec v2, v[0:1], s[0:3], 0 addr64
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: buffer_wbinvl1
; SI-NEXT: s_endpgm
;
; VI-LABEL: atomic_dec_i32_huge_offset:
; VI: ; %bb.0: ; %entry
; VI-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x24
; VI-NEXT: s_load_dword s2, s[2:3], 0x2c
; VI-NEXT: s_waitcnt lgkmcnt(0)
; VI-NEXT: s_add_u32 s0, s0, 0xdeac
; VI-NEXT: s_addc_u32 s1, s1, 0xabcd
; VI-NEXT: v_mov_b32_e32 v0, s0
; VI-NEXT: v_mov_b32_e32 v1, s1
; VI-NEXT: v_mov_b32_e32 v2, s2
; VI-NEXT: flat_atomic_dec v[0:1], v2
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: buffer_wbinvl1_vol
; VI-NEXT: s_endpgm
;
; GFX9-LABEL: atomic_dec_i32_huge_offset:
; GFX9: ; %bb.0: ; %entry
; GFX9-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x24
; GFX9-NEXT: s_load_dword s4, s[2:3], 0x2c
; GFX9-NEXT: v_mov_b32_e32 v0, 0
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NEXT: s_add_u32 s0, s0, 0xdeac
; GFX9-NEXT: s_addc_u32 s1, s1, 0xabcd
; GFX9-NEXT: v_mov_b32_e32 v1, s4
; GFX9-NEXT: global_atomic_dec v0, v1, s[0:1]
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: buffer_wbinvl1_vol
; GFX9-NEXT: s_endpgm
entry:
%gep = getelementptr i32, ptr addrspace(1) %out, i64 47224239175595
%val = atomicrmw volatile udec_wrap ptr addrspace(1) %gep, i32 %in syncscope("agent") seq_cst
ret void
}
define amdgpu_kernel void @atomic_dec_i32_ret_offset(ptr addrspace(1) %out, ptr addrspace(1) %out2, i32 %in) {
; SI-LABEL: atomic_dec_i32_ret_offset:
; SI: ; %bb.0: ; %entry
; SI-NEXT: s_load_dwordx4 s[4:7], s[2:3], 0x9
; SI-NEXT: s_load_dword s8, s[2:3], 0xd
; SI-NEXT: s_mov_b32 s3, 0xf000
; SI-NEXT: s_mov_b32 s2, -1
; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: s_mov_b32 s0, s6
; SI-NEXT: s_mov_b32 s1, s7
; SI-NEXT: s_mov_b32 s6, s2
; SI-NEXT: s_mov_b32 s7, s3
; SI-NEXT: v_mov_b32_e32 v0, s8
; SI-NEXT: buffer_atomic_dec v0, off, s[4:7], 0 offset:16 glc
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: buffer_wbinvl1
; SI-NEXT: buffer_store_dword v0, off, s[0:3], 0
; SI-NEXT: s_endpgm
;
; VI-LABEL: atomic_dec_i32_ret_offset:
; VI: ; %bb.0: ; %entry
; VI-NEXT: s_load_dwordx4 s[4:7], s[2:3], 0x24
; VI-NEXT: s_load_dword s0, s[2:3], 0x34
; VI-NEXT: s_mov_b32 s11, 0xf000
; VI-NEXT: s_mov_b32 s10, -1
; VI-NEXT: s_waitcnt lgkmcnt(0)
; VI-NEXT: s_mov_b32 s8, s6
; VI-NEXT: s_mov_b32 s9, s7
; VI-NEXT: s_mov_b32 s6, s10
; VI-NEXT: s_mov_b32 s7, s11
; VI-NEXT: v_mov_b32_e32 v0, s0
; VI-NEXT: buffer_atomic_dec v0, off, s[4:7], 0 offset:16 glc
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: buffer_wbinvl1_vol
; VI-NEXT: buffer_store_dword v0, off, s[8:11], 0
; VI-NEXT: s_endpgm
;
; GFX9-LABEL: atomic_dec_i32_ret_offset:
; GFX9: ; %bb.0: ; %entry
; GFX9-NEXT: s_load_dword s0, s[2:3], 0x34
; GFX9-NEXT: s_load_dwordx4 s[4:7], s[2:3], 0x24
; GFX9-NEXT: v_mov_b32_e32 v0, 0
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NEXT: v_mov_b32_e32 v1, s0
; GFX9-NEXT: global_atomic_dec v1, v0, v1, s[4:5] offset:16 glc
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: buffer_wbinvl1_vol
; GFX9-NEXT: global_store_dword v0, v1, s[6:7]
; GFX9-NEXT: s_endpgm
entry:
%gep = getelementptr i32, ptr addrspace(1) %out, i64 4
%val = atomicrmw volatile udec_wrap ptr addrspace(1) %gep, i32 %in syncscope("agent") seq_cst
store i32 %val, ptr addrspace(1) %out2
ret void
}
define amdgpu_kernel void @atomic_dec_i32_addr64_offset(ptr addrspace(1) %out, i32 %in, i64 %index) {
; SI-LABEL: atomic_dec_i32_addr64_offset:
; SI: ; %bb.0: ; %entry
; SI-NEXT: s_load_dwordx2 s[4:5], s[2:3], 0xd
; SI-NEXT: s_load_dword s6, s[2:3], 0xb
; SI-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x9
; SI-NEXT: s_mov_b32 s3, 0xf000
; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: s_lshl_b64 s[4:5], s[4:5], 2
; SI-NEXT: s_mov_b32 s2, 0
; SI-NEXT: v_mov_b32_e32 v2, s6
; SI-NEXT: v_mov_b32_e32 v0, s4
; SI-NEXT: v_mov_b32_e32 v1, s5
; SI-NEXT: buffer_atomic_dec v2, v[0:1], s[0:3], 0 addr64 offset:16
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: buffer_wbinvl1
; SI-NEXT: s_endpgm
;
; VI-LABEL: atomic_dec_i32_addr64_offset:
; VI: ; %bb.0: ; %entry
; VI-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x34
; VI-NEXT: s_load_dwordx2 s[4:5], s[2:3], 0x24
; VI-NEXT: s_load_dword s2, s[2:3], 0x2c
; VI-NEXT: s_waitcnt lgkmcnt(0)
; VI-NEXT: s_lshl_b64 s[0:1], s[0:1], 2
; VI-NEXT: s_add_u32 s0, s4, s0
; VI-NEXT: s_addc_u32 s1, s5, s1
; VI-NEXT: s_add_u32 s0, s0, 16
; VI-NEXT: s_addc_u32 s1, s1, 0
; VI-NEXT: v_mov_b32_e32 v0, s0
; VI-NEXT: v_mov_b32_e32 v1, s1
; VI-NEXT: v_mov_b32_e32 v2, s2
; VI-NEXT: flat_atomic_dec v[0:1], v2
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: buffer_wbinvl1_vol
; VI-NEXT: s_endpgm
;
; GFX9-LABEL: atomic_dec_i32_addr64_offset:
; GFX9: ; %bb.0: ; %entry
; GFX9-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x34
; GFX9-NEXT: s_load_dwordx2 s[4:5], s[2:3], 0x24
; GFX9-NEXT: s_load_dword s6, s[2:3], 0x2c
; GFX9-NEXT: v_mov_b32_e32 v0, 0
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NEXT: s_lshl_b64 s[0:1], s[0:1], 2
; GFX9-NEXT: s_add_u32 s0, s4, s0
; GFX9-NEXT: s_addc_u32 s1, s5, s1
; GFX9-NEXT: v_mov_b32_e32 v1, s6
; GFX9-NEXT: global_atomic_dec v0, v1, s[0:1] offset:16
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: buffer_wbinvl1_vol
; GFX9-NEXT: s_endpgm
entry:
%ptr = getelementptr i32, ptr addrspace(1) %out, i64 %index
%gep = getelementptr i32, ptr addrspace(1) %ptr, i64 4
%val = atomicrmw volatile udec_wrap ptr addrspace(1) %gep, i32 %in syncscope("agent") seq_cst
ret void
}
define amdgpu_kernel void @atomic_dec_i32_ret_addr64_offset(ptr addrspace(1) %out, ptr addrspace(1) %out2, i32 %in, i64 %index) {
; SI-LABEL: atomic_dec_i32_ret_addr64_offset:
; SI: ; %bb.0: ; %entry
; SI-NEXT: s_load_dwordx4 s[4:7], s[2:3], 0x9
; SI-NEXT: s_load_dwordx2 s[8:9], s[2:3], 0xf
; SI-NEXT: s_load_dword s2, s[2:3], 0xd
; SI-NEXT: s_mov_b32 s3, 0xf000
; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: s_mov_b32 s0, s6
; SI-NEXT: s_mov_b32 s1, s7
; SI-NEXT: s_lshl_b64 s[8:9], s[8:9], 2
; SI-NEXT: s_mov_b32 s6, 0
; SI-NEXT: s_mov_b32 s7, s3
; SI-NEXT: v_mov_b32_e32 v2, s2
; SI-NEXT: v_mov_b32_e32 v0, s8
; SI-NEXT: v_mov_b32_e32 v1, s9
; SI-NEXT: buffer_atomic_dec v2, v[0:1], s[4:7], 0 addr64 offset:16 glc
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: buffer_wbinvl1
; SI-NEXT: s_mov_b32 s2, -1
; SI-NEXT: buffer_store_dword v2, off, s[0:3], 0
; SI-NEXT: s_endpgm
;
; VI-LABEL: atomic_dec_i32_ret_addr64_offset:
; VI: ; %bb.0: ; %entry
; VI-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x3c
; VI-NEXT: s_load_dwordx4 s[4:7], s[2:3], 0x24
; VI-NEXT: s_load_dword s2, s[2:3], 0x34
; VI-NEXT: s_mov_b32 s3, 0xf000
; VI-NEXT: s_waitcnt lgkmcnt(0)
; VI-NEXT: s_lshl_b64 s[0:1], s[0:1], 2
; VI-NEXT: s_add_u32 s0, s4, s0
; VI-NEXT: s_addc_u32 s1, s5, s1
; VI-NEXT: s_add_u32 s0, s0, 16
; VI-NEXT: s_addc_u32 s1, s1, 0
; VI-NEXT: v_mov_b32_e32 v0, s0
; VI-NEXT: v_mov_b32_e32 v1, s1
; VI-NEXT: v_mov_b32_e32 v2, s2
; VI-NEXT: flat_atomic_dec v0, v[0:1], v2 glc
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: buffer_wbinvl1_vol
; VI-NEXT: s_mov_b32 s2, -1
; VI-NEXT: s_mov_b32 s0, s6
; VI-NEXT: s_mov_b32 s1, s7
; VI-NEXT: buffer_store_dword v0, off, s[0:3], 0
; VI-NEXT: s_endpgm
;
; GFX9-LABEL: atomic_dec_i32_ret_addr64_offset:
; GFX9: ; %bb.0: ; %entry
; GFX9-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x3c
; GFX9-NEXT: s_load_dwordx4 s[4:7], s[2:3], 0x24
; GFX9-NEXT: s_load_dword s8, s[2:3], 0x34
; GFX9-NEXT: v_mov_b32_e32 v0, 0
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NEXT: s_lshl_b64 s[0:1], s[0:1], 2
; GFX9-NEXT: s_add_u32 s0, s4, s0
; GFX9-NEXT: s_addc_u32 s1, s5, s1
; GFX9-NEXT: v_mov_b32_e32 v1, s8
; GFX9-NEXT: global_atomic_dec v1, v0, v1, s[0:1] offset:16 glc
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: buffer_wbinvl1_vol
; GFX9-NEXT: global_store_dword v0, v1, s[6:7]
; GFX9-NEXT: s_endpgm
entry:
%ptr = getelementptr i32, ptr addrspace(1) %out, i64 %index
%gep = getelementptr i32, ptr addrspace(1) %ptr, i64 4
%val = atomicrmw volatile udec_wrap ptr addrspace(1) %gep, i32 %in syncscope("agent") seq_cst
store i32 %val, ptr addrspace(1) %out2
ret void
}
define amdgpu_kernel void @atomic_load_f16_offset(ptr addrspace(1) %in, ptr addrspace(1) %out) {
; SI-LABEL: atomic_load_f16_offset:
; SI: ; %bb.0:
; SI-NEXT: s_load_dwordx4 s[0:3], s[2:3], 0x9
; SI-NEXT: s_mov_b32 s7, 0xf000
; SI-NEXT: s_mov_b32 s6, -1
; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: s_mov_b32 s4, s2
; SI-NEXT: s_mov_b32 s5, s3
; SI-NEXT: s_mov_b32 s2, s6
; SI-NEXT: s_mov_b32 s3, s7
; SI-NEXT: buffer_load_ushort v0, off, s[0:3], 0 offset:16 glc
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: buffer_wbinvl1
; SI-NEXT: buffer_store_short v0, off, s[4:7], 0
; SI-NEXT: s_endpgm
;
; VI-LABEL: atomic_load_f16_offset:
; VI: ; %bb.0:
; VI-NEXT: s_load_dwordx4 s[0:3], s[2:3], 0x24
; VI-NEXT: s_mov_b32 s7, 0xf000
; VI-NEXT: s_mov_b32 s6, -1
; VI-NEXT: s_waitcnt lgkmcnt(0)
; VI-NEXT: s_mov_b32 s4, s2
; VI-NEXT: s_mov_b32 s5, s3
; VI-NEXT: s_mov_b32 s2, s6
; VI-NEXT: s_mov_b32 s3, s7
; VI-NEXT: buffer_load_ushort v0, off, s[0:3], 0 offset:16 glc
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: buffer_wbinvl1_vol
; VI-NEXT: buffer_store_short v0, off, s[4:7], 0
; VI-NEXT: s_endpgm
;
; GFX9-LABEL: atomic_load_f16_offset:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_load_dwordx4 s[4:7], s[2:3], 0x24
; GFX9-NEXT: v_mov_b32_e32 v0, 0
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NEXT: global_load_ushort v1, v0, s[4:5] offset:16 glc
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: buffer_wbinvl1_vol
; GFX9-NEXT: global_store_short v0, v1, s[6:7]
; GFX9-NEXT: s_endpgm
%gep = getelementptr half, ptr addrspace(1) %in, i64 8
%val = load atomic half, ptr addrspace(1) %gep seq_cst, align 2
store half %val, ptr addrspace(1) %out
ret void
}
define amdgpu_kernel void @atomic_load_f16_negoffset(ptr addrspace(1) %in, ptr addrspace(1) %out) {
; SI-LABEL: atomic_load_f16_negoffset:
; SI: ; %bb.0:
; SI-NEXT: s_load_dwordx4 s[0:3], s[2:3], 0x9
; SI-NEXT: s_mov_b32 s7, 0xf000
; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: s_mov_b32 s4, s2
; SI-NEXT: s_mov_b32 s5, s3
; SI-NEXT: s_mov_b32 s2, 0
; SI-NEXT: s_mov_b32 s3, s7
; SI-NEXT: v_mov_b32_e32 v0, 0xfffffe00
; SI-NEXT: v_mov_b32_e32 v1, -1
; SI-NEXT: buffer_load_ushort v0, v[0:1], s[0:3], 0 addr64 glc
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: buffer_wbinvl1
; SI-NEXT: s_mov_b32 s6, -1
; SI-NEXT: buffer_store_short v0, off, s[4:7], 0
; SI-NEXT: s_endpgm
;
; VI-LABEL: atomic_load_f16_negoffset:
; VI: ; %bb.0:
; VI-NEXT: s_load_dwordx4 s[0:3], s[2:3], 0x24
; VI-NEXT: s_mov_b32 s7, 0xf000
; VI-NEXT: s_mov_b32 s6, -1
; VI-NEXT: s_waitcnt lgkmcnt(0)
; VI-NEXT: s_add_u32 s0, s0, 0xfffffe00
; VI-NEXT: s_addc_u32 s1, s1, -1
; VI-NEXT: v_mov_b32_e32 v0, s0
; VI-NEXT: v_mov_b32_e32 v1, s1
; VI-NEXT: flat_load_ushort v0, v[0:1] glc
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: buffer_wbinvl1_vol
; VI-NEXT: s_mov_b32 s4, s2
; VI-NEXT: s_mov_b32 s5, s3
; VI-NEXT: buffer_store_short v0, off, s[4:7], 0
; VI-NEXT: s_endpgm
;
; GFX9-LABEL: atomic_load_f16_negoffset:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_load_dwordx4 s[4:7], s[2:3], 0x24
; GFX9-NEXT: v_mov_b32_e32 v0, 0
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NEXT: global_load_ushort v1, v0, s[4:5] offset:-512 glc
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: buffer_wbinvl1_vol
; GFX9-NEXT: global_store_short v0, v1, s[6:7]
; GFX9-NEXT: s_endpgm
%gep = getelementptr half, ptr addrspace(1) %in, i64 -256
%val = load atomic half, ptr addrspace(1) %gep seq_cst, align 2
store half %val, ptr addrspace(1) %out
ret void
}
define amdgpu_kernel void @atomic_load_bf16_offset(ptr addrspace(1) %in, ptr addrspace(1) %out) {
; SI-LABEL: atomic_load_bf16_offset:
; SI: ; %bb.0:
; SI-NEXT: s_load_dwordx4 s[0:3], s[2:3], 0x9
; SI-NEXT: s_mov_b32 s7, 0xf000
; SI-NEXT: s_mov_b32 s6, -1
; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: s_mov_b32 s4, s2
; SI-NEXT: s_mov_b32 s5, s3
; SI-NEXT: s_mov_b32 s2, s6
; SI-NEXT: s_mov_b32 s3, s7
; SI-NEXT: buffer_load_ushort v0, off, s[0:3], 0 offset:16 glc
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: buffer_wbinvl1
; SI-NEXT: buffer_store_short v0, off, s[4:7], 0
; SI-NEXT: s_endpgm
;
; VI-LABEL: atomic_load_bf16_offset:
; VI: ; %bb.0:
; VI-NEXT: s_load_dwordx4 s[0:3], s[2:3], 0x24
; VI-NEXT: s_mov_b32 s7, 0xf000
; VI-NEXT: s_mov_b32 s6, -1
; VI-NEXT: s_waitcnt lgkmcnt(0)
; VI-NEXT: s_mov_b32 s4, s2
; VI-NEXT: s_mov_b32 s5, s3
; VI-NEXT: s_mov_b32 s2, s6
; VI-NEXT: s_mov_b32 s3, s7
; VI-NEXT: buffer_load_ushort v0, off, s[0:3], 0 offset:16 glc
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: buffer_wbinvl1_vol
; VI-NEXT: buffer_store_short v0, off, s[4:7], 0
; VI-NEXT: s_endpgm
;
; GFX9-LABEL: atomic_load_bf16_offset:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_load_dwordx4 s[4:7], s[2:3], 0x24
; GFX9-NEXT: v_mov_b32_e32 v0, 0
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NEXT: global_load_ushort v1, v0, s[4:5] offset:16 glc
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: buffer_wbinvl1_vol
; GFX9-NEXT: global_store_short v0, v1, s[6:7]
; GFX9-NEXT: s_endpgm
%gep = getelementptr bfloat, ptr addrspace(1) %in, i64 8
%val = load atomic bfloat, ptr addrspace(1) %gep seq_cst, align 2
store bfloat %val, ptr addrspace(1) %out
ret void
}
define amdgpu_kernel void @atomic_load_bf16_negoffset(ptr addrspace(1) %in, ptr addrspace(1) %out) {
; SI-LABEL: atomic_load_bf16_negoffset:
; SI: ; %bb.0:
; SI-NEXT: s_load_dwordx4 s[0:3], s[2:3], 0x9
; SI-NEXT: s_mov_b32 s7, 0xf000
; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: s_mov_b32 s4, s2
; SI-NEXT: s_mov_b32 s5, s3
; SI-NEXT: s_mov_b32 s2, 0
; SI-NEXT: s_mov_b32 s3, s7
; SI-NEXT: v_mov_b32_e32 v0, 0xfffffe00
; SI-NEXT: v_mov_b32_e32 v1, -1
; SI-NEXT: buffer_load_ushort v0, v[0:1], s[0:3], 0 addr64 glc
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: buffer_wbinvl1
; SI-NEXT: s_mov_b32 s6, -1
; SI-NEXT: buffer_store_short v0, off, s[4:7], 0
; SI-NEXT: s_endpgm
;
; VI-LABEL: atomic_load_bf16_negoffset:
; VI: ; %bb.0:
; VI-NEXT: s_load_dwordx4 s[0:3], s[2:3], 0x24
; VI-NEXT: s_mov_b32 s7, 0xf000
; VI-NEXT: s_mov_b32 s6, -1
; VI-NEXT: s_waitcnt lgkmcnt(0)
; VI-NEXT: s_add_u32 s0, s0, 0xfffffe00
; VI-NEXT: s_addc_u32 s1, s1, -1
; VI-NEXT: v_mov_b32_e32 v0, s0
; VI-NEXT: v_mov_b32_e32 v1, s1
; VI-NEXT: flat_load_ushort v0, v[0:1] glc
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: buffer_wbinvl1_vol
; VI-NEXT: s_mov_b32 s4, s2
; VI-NEXT: s_mov_b32 s5, s3
; VI-NEXT: buffer_store_short v0, off, s[4:7], 0
; VI-NEXT: s_endpgm
;
; GFX9-LABEL: atomic_load_bf16_negoffset:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_load_dwordx4 s[4:7], s[2:3], 0x24
; GFX9-NEXT: v_mov_b32_e32 v0, 0
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NEXT: global_load_ushort v1, v0, s[4:5] offset:-512 glc
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: buffer_wbinvl1_vol
; GFX9-NEXT: global_store_short v0, v1, s[6:7]
; GFX9-NEXT: s_endpgm
%gep = getelementptr bfloat, ptr addrspace(1) %in, i64 -256
%val = load atomic bfloat, ptr addrspace(1) %gep seq_cst, align 2
store bfloat %val, ptr addrspace(1) %out
ret void
}