For targets that support xnack replay feature (gfx8+), the multi-dword scalar loads shouldn't clobber any register that holds the src address. The constrained version of the scalar loads have the early clobber flag attached to the dst operand to restrict RA from re-allocating any of the src regs for its dst operand.
10190 lines
400 KiB
LLVM
10190 lines
400 KiB
LLVM
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
|
|
; RUN: llc -mtriple=amdgcn -amdgpu-atomic-optimizer-strategy=None -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefixes=SI %s
|
|
; RUN: llc -mtriple=amdgcn -mcpu=tonga -amdgpu-atomic-optimizer-strategy=None -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefixes=VI %s
|
|
; RUN: llc -mtriple=amdgcn -mcpu=gfx900 -amdgpu-atomic-optimizer-strategy=None -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefixes=GFX9 %s
|
|
|
|
; ---------------------------------------------------------------------
|
|
; atomicrmw xchg
|
|
; ---------------------------------------------------------------------
|
|
|
|
define void @global_atomic_xchg_i64_noret(ptr addrspace(1) %ptr, i64 %in) {
|
|
; SI-LABEL: global_atomic_xchg_i64_noret:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; SI-NEXT: s_mov_b32 s6, 0
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s4, s6
|
|
; SI-NEXT: s_mov_b32 s5, s6
|
|
; SI-NEXT: buffer_atomic_swap_x2 v[2:3], v[0:1], s[4:7], 0 addr64
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_wbinvl1
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; VI-LABEL: global_atomic_xchg_i64_noret:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; VI-NEXT: flat_atomic_swap_x2 v[0:1], v[2:3]
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: buffer_wbinvl1_vol
|
|
; VI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX9-LABEL: global_atomic_xchg_i64_noret:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: global_atomic_swap_x2 v[0:1], v[2:3], off
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: buffer_wbinvl1_vol
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
%tmp0 = atomicrmw xchg ptr addrspace(1) %ptr, i64 %in seq_cst
|
|
ret void
|
|
}
|
|
|
|
define void @global_atomic_xchg_i64_noret_offset(ptr addrspace(1) %out, i64 %in) {
|
|
; SI-LABEL: global_atomic_xchg_i64_noret_offset:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; SI-NEXT: s_mov_b32 s6, 0
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s4, s6
|
|
; SI-NEXT: s_mov_b32 s5, s6
|
|
; SI-NEXT: buffer_atomic_swap_x2 v[2:3], v[0:1], s[4:7], 0 addr64 offset:32
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_wbinvl1
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; VI-LABEL: global_atomic_xchg_i64_noret_offset:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; VI-NEXT: v_add_u32_e32 v0, vcc, 32, v0
|
|
; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
|
|
; VI-NEXT: flat_atomic_swap_x2 v[0:1], v[2:3]
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: buffer_wbinvl1_vol
|
|
; VI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX9-LABEL: global_atomic_xchg_i64_noret_offset:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: global_atomic_swap_x2 v[0:1], v[2:3], off offset:32
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: buffer_wbinvl1_vol
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
%gep = getelementptr i64, ptr addrspace(1) %out, i64 4
|
|
%tmp0 = atomicrmw xchg ptr addrspace(1) %gep, i64 %in seq_cst
|
|
ret void
|
|
}
|
|
|
|
define i64 @global_atomic_xchg_i64_ret(ptr addrspace(1) %ptr, i64 %in) {
|
|
; SI-LABEL: global_atomic_xchg_i64_ret:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; SI-NEXT: s_mov_b32 s6, 0
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s4, s6
|
|
; SI-NEXT: s_mov_b32 s5, s6
|
|
; SI-NEXT: buffer_atomic_swap_x2 v[2:3], v[0:1], s[4:7], 0 addr64 glc
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_wbinvl1
|
|
; SI-NEXT: v_mov_b32_e32 v0, v2
|
|
; SI-NEXT: v_mov_b32_e32 v1, v3
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; VI-LABEL: global_atomic_xchg_i64_ret:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; VI-NEXT: flat_atomic_swap_x2 v[0:1], v[0:1], v[2:3] glc
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: buffer_wbinvl1_vol
|
|
; VI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX9-LABEL: global_atomic_xchg_i64_ret:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: global_atomic_swap_x2 v[0:1], v[0:1], v[2:3], off glc
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: buffer_wbinvl1_vol
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
%result = atomicrmw xchg ptr addrspace(1) %ptr, i64 %in seq_cst
|
|
ret i64 %result
|
|
}
|
|
|
|
define i64 @global_atomic_xchg_i64_ret_offset(ptr addrspace(1) %out, i64 %in) {
|
|
; SI-LABEL: global_atomic_xchg_i64_ret_offset:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; SI-NEXT: s_mov_b32 s6, 0
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s4, s6
|
|
; SI-NEXT: s_mov_b32 s5, s6
|
|
; SI-NEXT: buffer_atomic_swap_x2 v[2:3], v[0:1], s[4:7], 0 addr64 offset:32 glc
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_wbinvl1
|
|
; SI-NEXT: v_mov_b32_e32 v0, v2
|
|
; SI-NEXT: v_mov_b32_e32 v1, v3
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; VI-LABEL: global_atomic_xchg_i64_ret_offset:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; VI-NEXT: v_add_u32_e32 v0, vcc, 32, v0
|
|
; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
|
|
; VI-NEXT: flat_atomic_swap_x2 v[0:1], v[0:1], v[2:3] glc
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: buffer_wbinvl1_vol
|
|
; VI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX9-LABEL: global_atomic_xchg_i64_ret_offset:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: global_atomic_swap_x2 v[0:1], v[0:1], v[2:3], off offset:32 glc
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: buffer_wbinvl1_vol
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
%gep = getelementptr i64, ptr addrspace(1) %out, i64 4
|
|
%result = atomicrmw xchg ptr addrspace(1) %gep, i64 %in seq_cst
|
|
ret i64 %result
|
|
}
|
|
|
|
define amdgpu_gfx void @global_atomic_xchg_i64_noret_scalar(ptr addrspace(1) inreg %ptr, i64 inreg %in) {
|
|
; SI-LABEL: global_atomic_xchg_i64_noret_scalar:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1
|
|
; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 ; 4-byte Folded Spill
|
|
; SI-NEXT: s_mov_b64 exec, s[34:35]
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: v_writelane_b32 v2, s6, 0
|
|
; SI-NEXT: v_writelane_b32 v2, s7, 1
|
|
; SI-NEXT: s_mov_b32 s34, s7
|
|
; SI-NEXT: s_mov_b32 s35, s6
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s6, -1
|
|
; SI-NEXT: v_mov_b32_e32 v0, s35
|
|
; SI-NEXT: v_mov_b32_e32 v1, s34
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_atomic_swap_x2 v[0:1], off, s[4:7], 0
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_wbinvl1
|
|
; SI-NEXT: v_readlane_b32 s7, v2, 1
|
|
; SI-NEXT: v_readlane_b32 s6, v2, 0
|
|
; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1
|
|
; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 ; 4-byte Folded Reload
|
|
; SI-NEXT: s_mov_b64 exec, s[34:35]
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0)
|
|
; SI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; VI-LABEL: global_atomic_xchg_i64_noret_scalar:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; VI-NEXT: v_mov_b32_e32 v0, s6
|
|
; VI-NEXT: v_mov_b32_e32 v1, s7
|
|
; VI-NEXT: v_mov_b32_e32 v2, s4
|
|
; VI-NEXT: v_mov_b32_e32 v3, s5
|
|
; VI-NEXT: flat_atomic_swap_x2 v[2:3], v[0:1]
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: buffer_wbinvl1_vol
|
|
; VI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX9-LABEL: global_atomic_xchg_i64_noret_scalar:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: v_mov_b32_e32 v0, s6
|
|
; GFX9-NEXT: v_mov_b32_e32 v1, s7
|
|
; GFX9-NEXT: v_mov_b32_e32 v2, 0
|
|
; GFX9-NEXT: global_atomic_swap_x2 v2, v[0:1], s[4:5]
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: buffer_wbinvl1_vol
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
%tmp0 = atomicrmw xchg ptr addrspace(1) %ptr, i64 %in seq_cst
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_gfx void @global_atomic_xchg_i64_noret_offset_scalar(ptr addrspace(1) inreg %out, i64 inreg %in) {
|
|
; SI-LABEL: global_atomic_xchg_i64_noret_offset_scalar:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1
|
|
; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 ; 4-byte Folded Spill
|
|
; SI-NEXT: s_mov_b64 exec, s[34:35]
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: v_writelane_b32 v2, s6, 0
|
|
; SI-NEXT: v_writelane_b32 v2, s7, 1
|
|
; SI-NEXT: v_mov_b32_e32 v0, s6
|
|
; SI-NEXT: v_mov_b32_e32 v1, s7
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s6, -1
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_atomic_swap_x2 v[0:1], off, s[4:7], 0 offset:32
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_wbinvl1
|
|
; SI-NEXT: v_readlane_b32 s7, v2, 1
|
|
; SI-NEXT: v_readlane_b32 s6, v2, 0
|
|
; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1
|
|
; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 ; 4-byte Folded Reload
|
|
; SI-NEXT: s_mov_b64 exec, s[34:35]
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0)
|
|
; SI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; VI-LABEL: global_atomic_xchg_i64_noret_offset_scalar:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; VI-NEXT: s_add_u32 s34, s4, 32
|
|
; VI-NEXT: s_addc_u32 s35, s5, 0
|
|
; VI-NEXT: v_mov_b32_e32 v2, s34
|
|
; VI-NEXT: v_mov_b32_e32 v0, s6
|
|
; VI-NEXT: v_mov_b32_e32 v1, s7
|
|
; VI-NEXT: v_mov_b32_e32 v3, s35
|
|
; VI-NEXT: flat_atomic_swap_x2 v[2:3], v[0:1]
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: buffer_wbinvl1_vol
|
|
; VI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX9-LABEL: global_atomic_xchg_i64_noret_offset_scalar:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: v_mov_b32_e32 v0, s6
|
|
; GFX9-NEXT: v_mov_b32_e32 v1, s7
|
|
; GFX9-NEXT: v_mov_b32_e32 v2, 0
|
|
; GFX9-NEXT: global_atomic_swap_x2 v2, v[0:1], s[4:5] offset:32
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: buffer_wbinvl1_vol
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
%gep = getelementptr i64, ptr addrspace(1) %out, i64 4
|
|
%tmp0 = atomicrmw xchg ptr addrspace(1) %gep, i64 %in seq_cst
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_gfx i64 @global_atomic_xchg_i64_ret_scalar(ptr addrspace(1) inreg %ptr, i64 inreg %in) {
|
|
; SI-LABEL: global_atomic_xchg_i64_ret_scalar:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1
|
|
; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 ; 4-byte Folded Spill
|
|
; SI-NEXT: s_mov_b64 exec, s[34:35]
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: v_writelane_b32 v2, s6, 0
|
|
; SI-NEXT: v_writelane_b32 v2, s7, 1
|
|
; SI-NEXT: s_mov_b32 s34, s7
|
|
; SI-NEXT: s_mov_b32 s35, s6
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s6, -1
|
|
; SI-NEXT: v_mov_b32_e32 v0, s35
|
|
; SI-NEXT: v_mov_b32_e32 v1, s34
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_atomic_swap_x2 v[0:1], off, s[4:7], 0 glc
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_wbinvl1
|
|
; SI-NEXT: v_readlane_b32 s7, v2, 1
|
|
; SI-NEXT: v_readlane_b32 s6, v2, 0
|
|
; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1
|
|
; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 ; 4-byte Folded Reload
|
|
; SI-NEXT: s_mov_b64 exec, s[34:35]
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0)
|
|
; SI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; VI-LABEL: global_atomic_xchg_i64_ret_scalar:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; VI-NEXT: v_mov_b32_e32 v0, s6
|
|
; VI-NEXT: v_mov_b32_e32 v1, s7
|
|
; VI-NEXT: v_mov_b32_e32 v2, s4
|
|
; VI-NEXT: v_mov_b32_e32 v3, s5
|
|
; VI-NEXT: flat_atomic_swap_x2 v[0:1], v[2:3], v[0:1] glc
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: buffer_wbinvl1_vol
|
|
; VI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX9-LABEL: global_atomic_xchg_i64_ret_scalar:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: v_mov_b32_e32 v0, s6
|
|
; GFX9-NEXT: v_mov_b32_e32 v1, s7
|
|
; GFX9-NEXT: v_mov_b32_e32 v2, 0
|
|
; GFX9-NEXT: global_atomic_swap_x2 v[0:1], v2, v[0:1], s[4:5] glc
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: buffer_wbinvl1_vol
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
%result = atomicrmw xchg ptr addrspace(1) %ptr, i64 %in seq_cst
|
|
ret i64 %result
|
|
}
|
|
|
|
define amdgpu_gfx i64 @global_atomic_xchg_i64_ret_offset_scalar(ptr addrspace(1) inreg %out, i64 inreg %in) {
|
|
; SI-LABEL: global_atomic_xchg_i64_ret_offset_scalar:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1
|
|
; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 ; 4-byte Folded Spill
|
|
; SI-NEXT: s_mov_b64 exec, s[34:35]
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: v_writelane_b32 v2, s6, 0
|
|
; SI-NEXT: v_writelane_b32 v2, s7, 1
|
|
; SI-NEXT: v_mov_b32_e32 v0, s6
|
|
; SI-NEXT: v_mov_b32_e32 v1, s7
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s6, -1
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_atomic_swap_x2 v[0:1], off, s[4:7], 0 offset:32 glc
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_wbinvl1
|
|
; SI-NEXT: v_readlane_b32 s7, v2, 1
|
|
; SI-NEXT: v_readlane_b32 s6, v2, 0
|
|
; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1
|
|
; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 ; 4-byte Folded Reload
|
|
; SI-NEXT: s_mov_b64 exec, s[34:35]
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0)
|
|
; SI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; VI-LABEL: global_atomic_xchg_i64_ret_offset_scalar:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; VI-NEXT: s_add_u32 s34, s4, 32
|
|
; VI-NEXT: s_addc_u32 s35, s5, 0
|
|
; VI-NEXT: v_mov_b32_e32 v2, s34
|
|
; VI-NEXT: v_mov_b32_e32 v0, s6
|
|
; VI-NEXT: v_mov_b32_e32 v1, s7
|
|
; VI-NEXT: v_mov_b32_e32 v3, s35
|
|
; VI-NEXT: flat_atomic_swap_x2 v[0:1], v[2:3], v[0:1] glc
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: buffer_wbinvl1_vol
|
|
; VI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX9-LABEL: global_atomic_xchg_i64_ret_offset_scalar:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: v_mov_b32_e32 v0, s6
|
|
; GFX9-NEXT: v_mov_b32_e32 v1, s7
|
|
; GFX9-NEXT: v_mov_b32_e32 v2, 0
|
|
; GFX9-NEXT: global_atomic_swap_x2 v[0:1], v2, v[0:1], s[4:5] offset:32 glc
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: buffer_wbinvl1_vol
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
%gep = getelementptr i64, ptr addrspace(1) %out, i64 4
|
|
%result = atomicrmw xchg ptr addrspace(1) %gep, i64 %in seq_cst
|
|
ret i64 %result
|
|
}
|
|
|
|
define void @global_atomic_xchg_i64_noret_offset__amdgpu_no_remote_memory(ptr addrspace(1) %out, i64 %in) {
|
|
; SI-LABEL: global_atomic_xchg_i64_noret_offset__amdgpu_no_remote_memory:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; SI-NEXT: s_mov_b32 s6, 0
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s4, s6
|
|
; SI-NEXT: s_mov_b32 s5, s6
|
|
; SI-NEXT: buffer_atomic_swap_x2 v[2:3], v[0:1], s[4:7], 0 addr64 offset:32
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_wbinvl1
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; VI-LABEL: global_atomic_xchg_i64_noret_offset__amdgpu_no_remote_memory:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; VI-NEXT: v_add_u32_e32 v0, vcc, 32, v0
|
|
; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
|
|
; VI-NEXT: flat_atomic_swap_x2 v[0:1], v[2:3]
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: buffer_wbinvl1_vol
|
|
; VI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX9-LABEL: global_atomic_xchg_i64_noret_offset__amdgpu_no_remote_memory:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: global_atomic_swap_x2 v[0:1], v[2:3], off offset:32
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: buffer_wbinvl1_vol
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
%gep = getelementptr i64, ptr addrspace(1) %out, i64 4
|
|
%tmp0 = atomicrmw xchg ptr addrspace(1) %gep, i64 %in seq_cst, !amdgpu.no.remote.memory !0
|
|
ret void
|
|
}
|
|
|
|
define i64 @global_atomic_xchg_i64_ret_offset__amdgpu_no_remote_memory(ptr addrspace(1) %out, i64 %in) {
|
|
; SI-LABEL: global_atomic_xchg_i64_ret_offset__amdgpu_no_remote_memory:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; SI-NEXT: s_mov_b32 s6, 0
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s4, s6
|
|
; SI-NEXT: s_mov_b32 s5, s6
|
|
; SI-NEXT: buffer_atomic_swap_x2 v[2:3], v[0:1], s[4:7], 0 addr64 offset:32 glc
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_wbinvl1
|
|
; SI-NEXT: v_mov_b32_e32 v0, v2
|
|
; SI-NEXT: v_mov_b32_e32 v1, v3
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; VI-LABEL: global_atomic_xchg_i64_ret_offset__amdgpu_no_remote_memory:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; VI-NEXT: v_add_u32_e32 v0, vcc, 32, v0
|
|
; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
|
|
; VI-NEXT: flat_atomic_swap_x2 v[0:1], v[0:1], v[2:3] glc
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: buffer_wbinvl1_vol
|
|
; VI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX9-LABEL: global_atomic_xchg_i64_ret_offset__amdgpu_no_remote_memory:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: global_atomic_swap_x2 v[0:1], v[0:1], v[2:3], off offset:32 glc
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: buffer_wbinvl1_vol
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
%gep = getelementptr i64, ptr addrspace(1) %out, i64 4
|
|
%result = atomicrmw xchg ptr addrspace(1) %gep, i64 %in seq_cst, !amdgpu.no.remote.memory !0
|
|
ret i64 %result
|
|
}
|
|
|
|
; ---------------------------------------------------------------------
|
|
; atomicrmw xchg f64
|
|
; ---------------------------------------------------------------------
|
|
|
|
define void @global_atomic_xchg_f64_noret(ptr addrspace(1) %ptr, double %in) {
|
|
; SI-LABEL: global_atomic_xchg_f64_noret:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; SI-NEXT: s_mov_b32 s6, 0
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s4, s6
|
|
; SI-NEXT: s_mov_b32 s5, s6
|
|
; SI-NEXT: buffer_atomic_swap_x2 v[2:3], v[0:1], s[4:7], 0 addr64
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_wbinvl1
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; VI-LABEL: global_atomic_xchg_f64_noret:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; VI-NEXT: flat_atomic_swap_x2 v[0:1], v[2:3]
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: buffer_wbinvl1_vol
|
|
; VI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX9-LABEL: global_atomic_xchg_f64_noret:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: global_atomic_swap_x2 v[0:1], v[2:3], off
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: buffer_wbinvl1_vol
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
%tmp0 = atomicrmw xchg ptr addrspace(1) %ptr, double %in seq_cst
|
|
ret void
|
|
}
|
|
|
|
define void @global_atomic_xchg_f64_noret_offset(ptr addrspace(1) %out, double %in) {
|
|
; SI-LABEL: global_atomic_xchg_f64_noret_offset:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; SI-NEXT: s_mov_b32 s6, 0
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s4, s6
|
|
; SI-NEXT: s_mov_b32 s5, s6
|
|
; SI-NEXT: buffer_atomic_swap_x2 v[2:3], v[0:1], s[4:7], 0 addr64 offset:32
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_wbinvl1
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; VI-LABEL: global_atomic_xchg_f64_noret_offset:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; VI-NEXT: v_add_u32_e32 v0, vcc, 32, v0
|
|
; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
|
|
; VI-NEXT: flat_atomic_swap_x2 v[0:1], v[2:3]
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: buffer_wbinvl1_vol
|
|
; VI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX9-LABEL: global_atomic_xchg_f64_noret_offset:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: global_atomic_swap_x2 v[0:1], v[2:3], off offset:32
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: buffer_wbinvl1_vol
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
%gep = getelementptr double, ptr addrspace(1) %out, i32 4
|
|
%tmp0 = atomicrmw xchg ptr addrspace(1) %gep, double %in seq_cst
|
|
ret void
|
|
}
|
|
|
|
define double @global_atomic_xchg_f64_ret(ptr addrspace(1) %ptr, double %in) {
|
|
; SI-LABEL: global_atomic_xchg_f64_ret:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; SI-NEXT: s_mov_b32 s6, 0
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s4, s6
|
|
; SI-NEXT: s_mov_b32 s5, s6
|
|
; SI-NEXT: buffer_atomic_swap_x2 v[2:3], v[0:1], s[4:7], 0 addr64 glc
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_wbinvl1
|
|
; SI-NEXT: v_mov_b32_e32 v0, v2
|
|
; SI-NEXT: v_mov_b32_e32 v1, v3
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; VI-LABEL: global_atomic_xchg_f64_ret:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; VI-NEXT: flat_atomic_swap_x2 v[0:1], v[0:1], v[2:3] glc
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: buffer_wbinvl1_vol
|
|
; VI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX9-LABEL: global_atomic_xchg_f64_ret:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: global_atomic_swap_x2 v[0:1], v[0:1], v[2:3], off glc
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: buffer_wbinvl1_vol
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
%result = atomicrmw xchg ptr addrspace(1) %ptr, double %in seq_cst
|
|
ret double %result
|
|
}
|
|
|
|
define double @global_atomic_xchg_f64_ret_offset(ptr addrspace(1) %out, double %in) {
|
|
; SI-LABEL: global_atomic_xchg_f64_ret_offset:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; SI-NEXT: s_mov_b32 s6, 0
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s4, s6
|
|
; SI-NEXT: s_mov_b32 s5, s6
|
|
; SI-NEXT: buffer_atomic_swap_x2 v[2:3], v[0:1], s[4:7], 0 addr64 offset:32 glc
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_wbinvl1
|
|
; SI-NEXT: v_mov_b32_e32 v0, v2
|
|
; SI-NEXT: v_mov_b32_e32 v1, v3
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; VI-LABEL: global_atomic_xchg_f64_ret_offset:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; VI-NEXT: v_add_u32_e32 v0, vcc, 32, v0
|
|
; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
|
|
; VI-NEXT: flat_atomic_swap_x2 v[0:1], v[0:1], v[2:3] glc
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: buffer_wbinvl1_vol
|
|
; VI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX9-LABEL: global_atomic_xchg_f64_ret_offset:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: global_atomic_swap_x2 v[0:1], v[0:1], v[2:3], off offset:32 glc
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: buffer_wbinvl1_vol
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
%gep = getelementptr double, ptr addrspace(1) %out, i32 4
|
|
%result = atomicrmw xchg ptr addrspace(1) %gep, double %in seq_cst
|
|
ret double %result
|
|
}
|
|
|
|
define amdgpu_gfx void @global_atomic_xchg_f64_noret_scalar(ptr addrspace(1) inreg %ptr, double inreg %in) {
|
|
; SI-LABEL: global_atomic_xchg_f64_noret_scalar:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1
|
|
; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 ; 4-byte Folded Spill
|
|
; SI-NEXT: s_mov_b64 exec, s[34:35]
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: v_writelane_b32 v2, s6, 0
|
|
; SI-NEXT: v_writelane_b32 v2, s7, 1
|
|
; SI-NEXT: s_mov_b32 s34, s7
|
|
; SI-NEXT: s_mov_b32 s35, s6
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s6, -1
|
|
; SI-NEXT: v_mov_b32_e32 v0, s35
|
|
; SI-NEXT: v_mov_b32_e32 v1, s34
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_atomic_swap_x2 v[0:1], off, s[4:7], 0
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_wbinvl1
|
|
; SI-NEXT: v_readlane_b32 s7, v2, 1
|
|
; SI-NEXT: v_readlane_b32 s6, v2, 0
|
|
; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1
|
|
; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 ; 4-byte Folded Reload
|
|
; SI-NEXT: s_mov_b64 exec, s[34:35]
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0)
|
|
; SI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; VI-LABEL: global_atomic_xchg_f64_noret_scalar:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; VI-NEXT: v_mov_b32_e32 v0, s6
|
|
; VI-NEXT: v_mov_b32_e32 v1, s7
|
|
; VI-NEXT: v_mov_b32_e32 v2, s4
|
|
; VI-NEXT: v_mov_b32_e32 v3, s5
|
|
; VI-NEXT: flat_atomic_swap_x2 v[2:3], v[0:1]
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: buffer_wbinvl1_vol
|
|
; VI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX9-LABEL: global_atomic_xchg_f64_noret_scalar:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: v_mov_b32_e32 v0, s6
|
|
; GFX9-NEXT: v_mov_b32_e32 v1, s7
|
|
; GFX9-NEXT: v_mov_b32_e32 v2, 0
|
|
; GFX9-NEXT: global_atomic_swap_x2 v2, v[0:1], s[4:5]
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: buffer_wbinvl1_vol
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
%tmp0 = atomicrmw xchg ptr addrspace(1) %ptr, double %in seq_cst
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_gfx void @global_atomic_xchg_f64_noret_offset_scalar(ptr addrspace(1) inreg %out, double inreg %in) {
|
|
; SI-LABEL: global_atomic_xchg_f64_noret_offset_scalar:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1
|
|
; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 ; 4-byte Folded Spill
|
|
; SI-NEXT: s_mov_b64 exec, s[34:35]
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: v_writelane_b32 v2, s6, 0
|
|
; SI-NEXT: v_writelane_b32 v2, s7, 1
|
|
; SI-NEXT: v_mov_b32_e32 v0, s6
|
|
; SI-NEXT: v_mov_b32_e32 v1, s7
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s6, -1
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_atomic_swap_x2 v[0:1], off, s[4:7], 0 offset:32
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_wbinvl1
|
|
; SI-NEXT: v_readlane_b32 s7, v2, 1
|
|
; SI-NEXT: v_readlane_b32 s6, v2, 0
|
|
; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1
|
|
; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 ; 4-byte Folded Reload
|
|
; SI-NEXT: s_mov_b64 exec, s[34:35]
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0)
|
|
; SI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; VI-LABEL: global_atomic_xchg_f64_noret_offset_scalar:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; VI-NEXT: s_add_u32 s34, s4, 32
|
|
; VI-NEXT: s_addc_u32 s35, s5, 0
|
|
; VI-NEXT: v_mov_b32_e32 v2, s34
|
|
; VI-NEXT: v_mov_b32_e32 v0, s6
|
|
; VI-NEXT: v_mov_b32_e32 v1, s7
|
|
; VI-NEXT: v_mov_b32_e32 v3, s35
|
|
; VI-NEXT: flat_atomic_swap_x2 v[2:3], v[0:1]
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: buffer_wbinvl1_vol
|
|
; VI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX9-LABEL: global_atomic_xchg_f64_noret_offset_scalar:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: v_mov_b32_e32 v0, s6
|
|
; GFX9-NEXT: v_mov_b32_e32 v1, s7
|
|
; GFX9-NEXT: v_mov_b32_e32 v2, 0
|
|
; GFX9-NEXT: global_atomic_swap_x2 v2, v[0:1], s[4:5] offset:32
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: buffer_wbinvl1_vol
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
%gep = getelementptr double, ptr addrspace(1) %out, i32 4
|
|
%tmp0 = atomicrmw xchg ptr addrspace(1) %gep, double %in seq_cst
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_gfx double @global_atomic_xchg_f64_ret_scalar(ptr addrspace(1) inreg %ptr, double inreg %in) {
|
|
; SI-LABEL: global_atomic_xchg_f64_ret_scalar:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1
|
|
; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 ; 4-byte Folded Spill
|
|
; SI-NEXT: s_mov_b64 exec, s[34:35]
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: v_writelane_b32 v2, s6, 0
|
|
; SI-NEXT: v_writelane_b32 v2, s7, 1
|
|
; SI-NEXT: s_mov_b32 s34, s7
|
|
; SI-NEXT: s_mov_b32 s35, s6
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s6, -1
|
|
; SI-NEXT: v_mov_b32_e32 v0, s35
|
|
; SI-NEXT: v_mov_b32_e32 v1, s34
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_atomic_swap_x2 v[0:1], off, s[4:7], 0 glc
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_wbinvl1
|
|
; SI-NEXT: v_readlane_b32 s7, v2, 1
|
|
; SI-NEXT: v_readlane_b32 s6, v2, 0
|
|
; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1
|
|
; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 ; 4-byte Folded Reload
|
|
; SI-NEXT: s_mov_b64 exec, s[34:35]
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0)
|
|
; SI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; VI-LABEL: global_atomic_xchg_f64_ret_scalar:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; VI-NEXT: v_mov_b32_e32 v0, s6
|
|
; VI-NEXT: v_mov_b32_e32 v1, s7
|
|
; VI-NEXT: v_mov_b32_e32 v2, s4
|
|
; VI-NEXT: v_mov_b32_e32 v3, s5
|
|
; VI-NEXT: flat_atomic_swap_x2 v[0:1], v[2:3], v[0:1] glc
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: buffer_wbinvl1_vol
|
|
; VI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX9-LABEL: global_atomic_xchg_f64_ret_scalar:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: v_mov_b32_e32 v0, s6
|
|
; GFX9-NEXT: v_mov_b32_e32 v1, s7
|
|
; GFX9-NEXT: v_mov_b32_e32 v2, 0
|
|
; GFX9-NEXT: global_atomic_swap_x2 v[0:1], v2, v[0:1], s[4:5] glc
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: buffer_wbinvl1_vol
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
%result = atomicrmw xchg ptr addrspace(1) %ptr, double %in seq_cst
|
|
ret double %result
|
|
}
|
|
|
|
define amdgpu_gfx double @global_atomic_xchg_f64_ret_offset_scalar(ptr addrspace(1) inreg %out, double inreg %in) {
|
|
; SI-LABEL: global_atomic_xchg_f64_ret_offset_scalar:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1
|
|
; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 ; 4-byte Folded Spill
|
|
; SI-NEXT: s_mov_b64 exec, s[34:35]
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: v_writelane_b32 v2, s6, 0
|
|
; SI-NEXT: v_writelane_b32 v2, s7, 1
|
|
; SI-NEXT: v_mov_b32_e32 v0, s6
|
|
; SI-NEXT: v_mov_b32_e32 v1, s7
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s6, -1
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_atomic_swap_x2 v[0:1], off, s[4:7], 0 offset:32 glc
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_wbinvl1
|
|
; SI-NEXT: v_readlane_b32 s7, v2, 1
|
|
; SI-NEXT: v_readlane_b32 s6, v2, 0
|
|
; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1
|
|
; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 ; 4-byte Folded Reload
|
|
; SI-NEXT: s_mov_b64 exec, s[34:35]
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0)
|
|
; SI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; VI-LABEL: global_atomic_xchg_f64_ret_offset_scalar:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; VI-NEXT: s_add_u32 s34, s4, 32
|
|
; VI-NEXT: s_addc_u32 s35, s5, 0
|
|
; VI-NEXT: v_mov_b32_e32 v2, s34
|
|
; VI-NEXT: v_mov_b32_e32 v0, s6
|
|
; VI-NEXT: v_mov_b32_e32 v1, s7
|
|
; VI-NEXT: v_mov_b32_e32 v3, s35
|
|
; VI-NEXT: flat_atomic_swap_x2 v[0:1], v[2:3], v[0:1] glc
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: buffer_wbinvl1_vol
|
|
; VI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX9-LABEL: global_atomic_xchg_f64_ret_offset_scalar:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: v_mov_b32_e32 v0, s6
|
|
; GFX9-NEXT: v_mov_b32_e32 v1, s7
|
|
; GFX9-NEXT: v_mov_b32_e32 v2, 0
|
|
; GFX9-NEXT: global_atomic_swap_x2 v[0:1], v2, v[0:1], s[4:5] offset:32 glc
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: buffer_wbinvl1_vol
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
%gep = getelementptr double, ptr addrspace(1) %out, i32 4
|
|
%result = atomicrmw xchg ptr addrspace(1) %gep, double %in seq_cst
|
|
ret double %result
|
|
}
|
|
|
|
define void @global_atomic_xchg_f64_noret_offset__amdgpu_no_remote_memory(ptr addrspace(1) %out, double %in) {
|
|
; SI-LABEL: global_atomic_xchg_f64_noret_offset__amdgpu_no_remote_memory:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; SI-NEXT: s_mov_b32 s6, 0
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s4, s6
|
|
; SI-NEXT: s_mov_b32 s5, s6
|
|
; SI-NEXT: buffer_atomic_swap_x2 v[2:3], v[0:1], s[4:7], 0 addr64 offset:16
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_wbinvl1
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; VI-LABEL: global_atomic_xchg_f64_noret_offset__amdgpu_no_remote_memory:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; VI-NEXT: v_add_u32_e32 v0, vcc, 16, v0
|
|
; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
|
|
; VI-NEXT: flat_atomic_swap_x2 v[0:1], v[2:3]
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: buffer_wbinvl1_vol
|
|
; VI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX9-LABEL: global_atomic_xchg_f64_noret_offset__amdgpu_no_remote_memory:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: global_atomic_swap_x2 v[0:1], v[2:3], off offset:16
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: buffer_wbinvl1_vol
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
%gep = getelementptr i32, ptr addrspace(1) %out, i64 4
|
|
%tmp0 = atomicrmw xchg ptr addrspace(1) %gep, double %in seq_cst, !amdgpu.no.remote.memory !0
|
|
ret void
|
|
}
|
|
|
|
define double @global_atomic_xchg_f64_ret_offset__amdgpu_no_remote_memory(ptr addrspace(1) %out, double %in) {
|
|
; SI-LABEL: global_atomic_xchg_f64_ret_offset__amdgpu_no_remote_memory:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; SI-NEXT: s_mov_b32 s6, 0
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s4, s6
|
|
; SI-NEXT: s_mov_b32 s5, s6
|
|
; SI-NEXT: buffer_atomic_swap_x2 v[2:3], v[0:1], s[4:7], 0 addr64 offset:16 glc
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_wbinvl1
|
|
; SI-NEXT: v_mov_b32_e32 v0, v2
|
|
; SI-NEXT: v_mov_b32_e32 v1, v3
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; VI-LABEL: global_atomic_xchg_f64_ret_offset__amdgpu_no_remote_memory:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; VI-NEXT: v_add_u32_e32 v0, vcc, 16, v0
|
|
; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
|
|
; VI-NEXT: flat_atomic_swap_x2 v[0:1], v[0:1], v[2:3] glc
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: buffer_wbinvl1_vol
|
|
; VI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX9-LABEL: global_atomic_xchg_f64_ret_offset__amdgpu_no_remote_memory:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: global_atomic_swap_x2 v[0:1], v[0:1], v[2:3], off offset:16 glc
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: buffer_wbinvl1_vol
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
%gep = getelementptr i32, ptr addrspace(1) %out, i64 4
|
|
%result = atomicrmw xchg ptr addrspace(1) %gep, double %in seq_cst, !amdgpu.no.remote.memory !0
|
|
ret double %result
|
|
}
|
|
|
|
; ---------------------------------------------------------------------
|
|
; atomicrmw add
|
|
; ---------------------------------------------------------------------
|
|
|
|
define void @global_atomic_add_i64_noret(ptr addrspace(1) %ptr, i64 %in) {
|
|
; SI-LABEL: global_atomic_add_i64_noret:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; SI-NEXT: s_mov_b32 s6, 0
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s4, s6
|
|
; SI-NEXT: s_mov_b32 s5, s6
|
|
; SI-NEXT: buffer_atomic_add_x2 v[2:3], v[0:1], s[4:7], 0 addr64
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_wbinvl1
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; VI-LABEL: global_atomic_add_i64_noret:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; VI-NEXT: flat_atomic_add_x2 v[0:1], v[2:3]
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: buffer_wbinvl1_vol
|
|
; VI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX9-LABEL: global_atomic_add_i64_noret:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: global_atomic_add_x2 v[0:1], v[2:3], off
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: buffer_wbinvl1_vol
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
%tmp0 = atomicrmw add ptr addrspace(1) %ptr, i64 %in seq_cst
|
|
ret void
|
|
}
|
|
|
|
define void @global_atomic_add_i64_noret_offset(ptr addrspace(1) %out, i64 %in) {
|
|
; SI-LABEL: global_atomic_add_i64_noret_offset:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; SI-NEXT: s_mov_b32 s6, 0
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s4, s6
|
|
; SI-NEXT: s_mov_b32 s5, s6
|
|
; SI-NEXT: buffer_atomic_add_x2 v[2:3], v[0:1], s[4:7], 0 addr64 offset:32
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_wbinvl1
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; VI-LABEL: global_atomic_add_i64_noret_offset:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; VI-NEXT: v_add_u32_e32 v0, vcc, 32, v0
|
|
; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
|
|
; VI-NEXT: flat_atomic_add_x2 v[0:1], v[2:3]
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: buffer_wbinvl1_vol
|
|
; VI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX9-LABEL: global_atomic_add_i64_noret_offset:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: global_atomic_add_x2 v[0:1], v[2:3], off offset:32
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: buffer_wbinvl1_vol
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
%gep = getelementptr i64, ptr addrspace(1) %out, i64 4
|
|
%tmp0 = atomicrmw add ptr addrspace(1) %gep, i64 %in seq_cst
|
|
ret void
|
|
}
|
|
|
|
define i64 @global_atomic_add_i64_ret(ptr addrspace(1) %ptr, i64 %in) {
|
|
; SI-LABEL: global_atomic_add_i64_ret:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; SI-NEXT: s_mov_b32 s6, 0
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s4, s6
|
|
; SI-NEXT: s_mov_b32 s5, s6
|
|
; SI-NEXT: buffer_atomic_add_x2 v[2:3], v[0:1], s[4:7], 0 addr64 glc
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_wbinvl1
|
|
; SI-NEXT: v_mov_b32_e32 v0, v2
|
|
; SI-NEXT: v_mov_b32_e32 v1, v3
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; VI-LABEL: global_atomic_add_i64_ret:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; VI-NEXT: flat_atomic_add_x2 v[0:1], v[0:1], v[2:3] glc
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: buffer_wbinvl1_vol
|
|
; VI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX9-LABEL: global_atomic_add_i64_ret:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: global_atomic_add_x2 v[0:1], v[0:1], v[2:3], off glc
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: buffer_wbinvl1_vol
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
%result = atomicrmw add ptr addrspace(1) %ptr, i64 %in seq_cst
|
|
ret i64 %result
|
|
}
|
|
|
|
define i64 @global_atomic_add_i64_ret_offset(ptr addrspace(1) %out, i64 %in) {
|
|
; SI-LABEL: global_atomic_add_i64_ret_offset:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; SI-NEXT: s_mov_b32 s6, 0
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s4, s6
|
|
; SI-NEXT: s_mov_b32 s5, s6
|
|
; SI-NEXT: buffer_atomic_add_x2 v[2:3], v[0:1], s[4:7], 0 addr64 offset:32 glc
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_wbinvl1
|
|
; SI-NEXT: v_mov_b32_e32 v0, v2
|
|
; SI-NEXT: v_mov_b32_e32 v1, v3
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; VI-LABEL: global_atomic_add_i64_ret_offset:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; VI-NEXT: v_add_u32_e32 v0, vcc, 32, v0
|
|
; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
|
|
; VI-NEXT: flat_atomic_add_x2 v[0:1], v[0:1], v[2:3] glc
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: buffer_wbinvl1_vol
|
|
; VI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX9-LABEL: global_atomic_add_i64_ret_offset:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: global_atomic_add_x2 v[0:1], v[0:1], v[2:3], off offset:32 glc
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: buffer_wbinvl1_vol
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
%gep = getelementptr i64, ptr addrspace(1) %out, i64 4
|
|
%result = atomicrmw add ptr addrspace(1) %gep, i64 %in seq_cst
|
|
ret i64 %result
|
|
}
|
|
|
|
define amdgpu_gfx void @global_atomic_add_i64_noret_scalar(ptr addrspace(1) inreg %ptr, i64 inreg %in) {
|
|
; SI-LABEL: global_atomic_add_i64_noret_scalar:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1
|
|
; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 ; 4-byte Folded Spill
|
|
; SI-NEXT: s_mov_b64 exec, s[34:35]
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: v_writelane_b32 v2, s6, 0
|
|
; SI-NEXT: v_writelane_b32 v2, s7, 1
|
|
; SI-NEXT: s_mov_b32 s34, s7
|
|
; SI-NEXT: s_mov_b32 s35, s6
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s6, -1
|
|
; SI-NEXT: v_mov_b32_e32 v0, s35
|
|
; SI-NEXT: v_mov_b32_e32 v1, s34
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_atomic_add_x2 v[0:1], off, s[4:7], 0
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_wbinvl1
|
|
; SI-NEXT: v_readlane_b32 s7, v2, 1
|
|
; SI-NEXT: v_readlane_b32 s6, v2, 0
|
|
; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1
|
|
; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 ; 4-byte Folded Reload
|
|
; SI-NEXT: s_mov_b64 exec, s[34:35]
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0)
|
|
; SI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; VI-LABEL: global_atomic_add_i64_noret_scalar:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; VI-NEXT: v_mov_b32_e32 v0, s6
|
|
; VI-NEXT: v_mov_b32_e32 v1, s7
|
|
; VI-NEXT: v_mov_b32_e32 v2, s4
|
|
; VI-NEXT: v_mov_b32_e32 v3, s5
|
|
; VI-NEXT: flat_atomic_add_x2 v[2:3], v[0:1]
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: buffer_wbinvl1_vol
|
|
; VI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX9-LABEL: global_atomic_add_i64_noret_scalar:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: v_mov_b32_e32 v0, s6
|
|
; GFX9-NEXT: v_mov_b32_e32 v1, s7
|
|
; GFX9-NEXT: v_mov_b32_e32 v2, 0
|
|
; GFX9-NEXT: global_atomic_add_x2 v2, v[0:1], s[4:5]
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: buffer_wbinvl1_vol
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
%tmp0 = atomicrmw add ptr addrspace(1) %ptr, i64 %in seq_cst
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_gfx void @global_atomic_add_i64_noret_offset_scalar(ptr addrspace(1) inreg %out, i64 inreg %in) {
|
|
; SI-LABEL: global_atomic_add_i64_noret_offset_scalar:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1
|
|
; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 ; 4-byte Folded Spill
|
|
; SI-NEXT: s_mov_b64 exec, s[34:35]
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: v_writelane_b32 v2, s6, 0
|
|
; SI-NEXT: v_writelane_b32 v2, s7, 1
|
|
; SI-NEXT: v_mov_b32_e32 v0, s6
|
|
; SI-NEXT: v_mov_b32_e32 v1, s7
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s6, -1
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_atomic_add_x2 v[0:1], off, s[4:7], 0 offset:32
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_wbinvl1
|
|
; SI-NEXT: v_readlane_b32 s7, v2, 1
|
|
; SI-NEXT: v_readlane_b32 s6, v2, 0
|
|
; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1
|
|
; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 ; 4-byte Folded Reload
|
|
; SI-NEXT: s_mov_b64 exec, s[34:35]
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0)
|
|
; SI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; VI-LABEL: global_atomic_add_i64_noret_offset_scalar:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; VI-NEXT: s_add_u32 s34, s4, 32
|
|
; VI-NEXT: s_addc_u32 s35, s5, 0
|
|
; VI-NEXT: v_mov_b32_e32 v2, s34
|
|
; VI-NEXT: v_mov_b32_e32 v0, s6
|
|
; VI-NEXT: v_mov_b32_e32 v1, s7
|
|
; VI-NEXT: v_mov_b32_e32 v3, s35
|
|
; VI-NEXT: flat_atomic_add_x2 v[2:3], v[0:1]
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: buffer_wbinvl1_vol
|
|
; VI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX9-LABEL: global_atomic_add_i64_noret_offset_scalar:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: v_mov_b32_e32 v0, s6
|
|
; GFX9-NEXT: v_mov_b32_e32 v1, s7
|
|
; GFX9-NEXT: v_mov_b32_e32 v2, 0
|
|
; GFX9-NEXT: global_atomic_add_x2 v2, v[0:1], s[4:5] offset:32
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: buffer_wbinvl1_vol
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
%gep = getelementptr i64, ptr addrspace(1) %out, i64 4
|
|
%tmp0 = atomicrmw add ptr addrspace(1) %gep, i64 %in seq_cst
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_gfx i64 @global_atomic_add_i64_ret_scalar(ptr addrspace(1) inreg %ptr, i64 inreg %in) {
|
|
; SI-LABEL: global_atomic_add_i64_ret_scalar:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1
|
|
; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 ; 4-byte Folded Spill
|
|
; SI-NEXT: s_mov_b64 exec, s[34:35]
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: v_writelane_b32 v2, s6, 0
|
|
; SI-NEXT: v_writelane_b32 v2, s7, 1
|
|
; SI-NEXT: s_mov_b32 s34, s7
|
|
; SI-NEXT: s_mov_b32 s35, s6
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s6, -1
|
|
; SI-NEXT: v_mov_b32_e32 v0, s35
|
|
; SI-NEXT: v_mov_b32_e32 v1, s34
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_atomic_add_x2 v[0:1], off, s[4:7], 0 glc
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_wbinvl1
|
|
; SI-NEXT: v_readlane_b32 s7, v2, 1
|
|
; SI-NEXT: v_readlane_b32 s6, v2, 0
|
|
; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1
|
|
; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 ; 4-byte Folded Reload
|
|
; SI-NEXT: s_mov_b64 exec, s[34:35]
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0)
|
|
; SI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; VI-LABEL: global_atomic_add_i64_ret_scalar:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; VI-NEXT: v_mov_b32_e32 v0, s6
|
|
; VI-NEXT: v_mov_b32_e32 v1, s7
|
|
; VI-NEXT: v_mov_b32_e32 v2, s4
|
|
; VI-NEXT: v_mov_b32_e32 v3, s5
|
|
; VI-NEXT: flat_atomic_add_x2 v[0:1], v[2:3], v[0:1] glc
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: buffer_wbinvl1_vol
|
|
; VI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX9-LABEL: global_atomic_add_i64_ret_scalar:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: v_mov_b32_e32 v0, s6
|
|
; GFX9-NEXT: v_mov_b32_e32 v1, s7
|
|
; GFX9-NEXT: v_mov_b32_e32 v2, 0
|
|
; GFX9-NEXT: global_atomic_add_x2 v[0:1], v2, v[0:1], s[4:5] glc
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: buffer_wbinvl1_vol
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
%result = atomicrmw add ptr addrspace(1) %ptr, i64 %in seq_cst
|
|
ret i64 %result
|
|
}
|
|
|
|
define amdgpu_gfx i64 @global_atomic_add_i64_ret_offset_scalar(ptr addrspace(1) inreg %out, i64 inreg %in) {
|
|
; SI-LABEL: global_atomic_add_i64_ret_offset_scalar:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1
|
|
; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 ; 4-byte Folded Spill
|
|
; SI-NEXT: s_mov_b64 exec, s[34:35]
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: v_writelane_b32 v2, s6, 0
|
|
; SI-NEXT: v_writelane_b32 v2, s7, 1
|
|
; SI-NEXT: v_mov_b32_e32 v0, s6
|
|
; SI-NEXT: v_mov_b32_e32 v1, s7
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s6, -1
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_atomic_add_x2 v[0:1], off, s[4:7], 0 offset:32 glc
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_wbinvl1
|
|
; SI-NEXT: v_readlane_b32 s7, v2, 1
|
|
; SI-NEXT: v_readlane_b32 s6, v2, 0
|
|
; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1
|
|
; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 ; 4-byte Folded Reload
|
|
; SI-NEXT: s_mov_b64 exec, s[34:35]
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0)
|
|
; SI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; VI-LABEL: global_atomic_add_i64_ret_offset_scalar:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; VI-NEXT: s_add_u32 s34, s4, 32
|
|
; VI-NEXT: s_addc_u32 s35, s5, 0
|
|
; VI-NEXT: v_mov_b32_e32 v2, s34
|
|
; VI-NEXT: v_mov_b32_e32 v0, s6
|
|
; VI-NEXT: v_mov_b32_e32 v1, s7
|
|
; VI-NEXT: v_mov_b32_e32 v3, s35
|
|
; VI-NEXT: flat_atomic_add_x2 v[0:1], v[2:3], v[0:1] glc
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: buffer_wbinvl1_vol
|
|
; VI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX9-LABEL: global_atomic_add_i64_ret_offset_scalar:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: v_mov_b32_e32 v0, s6
|
|
; GFX9-NEXT: v_mov_b32_e32 v1, s7
|
|
; GFX9-NEXT: v_mov_b32_e32 v2, 0
|
|
; GFX9-NEXT: global_atomic_add_x2 v[0:1], v2, v[0:1], s[4:5] offset:32 glc
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: buffer_wbinvl1_vol
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
%gep = getelementptr i64, ptr addrspace(1) %out, i64 4
|
|
%result = atomicrmw add ptr addrspace(1) %gep, i64 %in seq_cst
|
|
ret i64 %result
|
|
}
|
|
|
|
define void @global_atomic_add_i64_noret_offset__amdgpu_no_remote_memory(ptr addrspace(1) %out, i64 %in) {
|
|
; SI-LABEL: global_atomic_add_i64_noret_offset__amdgpu_no_remote_memory:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; SI-NEXT: s_mov_b32 s6, 0
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s4, s6
|
|
; SI-NEXT: s_mov_b32 s5, s6
|
|
; SI-NEXT: buffer_atomic_add_x2 v[2:3], v[0:1], s[4:7], 0 addr64 offset:32
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_wbinvl1
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; VI-LABEL: global_atomic_add_i64_noret_offset__amdgpu_no_remote_memory:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; VI-NEXT: v_add_u32_e32 v0, vcc, 32, v0
|
|
; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
|
|
; VI-NEXT: flat_atomic_add_x2 v[0:1], v[2:3]
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: buffer_wbinvl1_vol
|
|
; VI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX9-LABEL: global_atomic_add_i64_noret_offset__amdgpu_no_remote_memory:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: global_atomic_add_x2 v[0:1], v[2:3], off offset:32
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: buffer_wbinvl1_vol
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
%gep = getelementptr i64, ptr addrspace(1) %out, i64 4
|
|
%tmp0 = atomicrmw add ptr addrspace(1) %gep, i64 %in seq_cst, !amdgpu.no.remote.memory !0
|
|
ret void
|
|
}
|
|
|
|
define i64 @global_atomic_add_i64_ret_offset__amdgpu_no_remote_memory(ptr addrspace(1) %out, i64 %in) {
|
|
; SI-LABEL: global_atomic_add_i64_ret_offset__amdgpu_no_remote_memory:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; SI-NEXT: s_mov_b32 s6, 0
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s4, s6
|
|
; SI-NEXT: s_mov_b32 s5, s6
|
|
; SI-NEXT: buffer_atomic_add_x2 v[2:3], v[0:1], s[4:7], 0 addr64 offset:32 glc
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_wbinvl1
|
|
; SI-NEXT: v_mov_b32_e32 v0, v2
|
|
; SI-NEXT: v_mov_b32_e32 v1, v3
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; VI-LABEL: global_atomic_add_i64_ret_offset__amdgpu_no_remote_memory:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; VI-NEXT: v_add_u32_e32 v0, vcc, 32, v0
|
|
; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
|
|
; VI-NEXT: flat_atomic_add_x2 v[0:1], v[0:1], v[2:3] glc
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: buffer_wbinvl1_vol
|
|
; VI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX9-LABEL: global_atomic_add_i64_ret_offset__amdgpu_no_remote_memory:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: global_atomic_add_x2 v[0:1], v[0:1], v[2:3], off offset:32 glc
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: buffer_wbinvl1_vol
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
%gep = getelementptr i64, ptr addrspace(1) %out, i64 4
|
|
%result = atomicrmw add ptr addrspace(1) %gep, i64 %in seq_cst, !amdgpu.no.remote.memory !0
|
|
ret i64 %result
|
|
}
|
|
|
|
; ---------------------------------------------------------------------
|
|
; atomicrmw sub
|
|
; ---------------------------------------------------------------------
|
|
|
|
define void @global_atomic_sub_i64_noret(ptr addrspace(1) %ptr, i64 %in) {
|
|
; SI-LABEL: global_atomic_sub_i64_noret:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; SI-NEXT: s_mov_b32 s6, 0
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s4, s6
|
|
; SI-NEXT: s_mov_b32 s5, s6
|
|
; SI-NEXT: buffer_atomic_sub_x2 v[2:3], v[0:1], s[4:7], 0 addr64
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_wbinvl1
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; VI-LABEL: global_atomic_sub_i64_noret:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; VI-NEXT: flat_atomic_sub_x2 v[0:1], v[2:3]
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: buffer_wbinvl1_vol
|
|
; VI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX9-LABEL: global_atomic_sub_i64_noret:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: global_atomic_sub_x2 v[0:1], v[2:3], off
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: buffer_wbinvl1_vol
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
%tmp0 = atomicrmw sub ptr addrspace(1) %ptr, i64 %in seq_cst
|
|
ret void
|
|
}
|
|
|
|
define void @global_atomic_sub_i64_noret_offset(ptr addrspace(1) %out, i64 %in) {
|
|
; SI-LABEL: global_atomic_sub_i64_noret_offset:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; SI-NEXT: s_mov_b32 s6, 0
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s4, s6
|
|
; SI-NEXT: s_mov_b32 s5, s6
|
|
; SI-NEXT: buffer_atomic_sub_x2 v[2:3], v[0:1], s[4:7], 0 addr64 offset:32
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_wbinvl1
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; VI-LABEL: global_atomic_sub_i64_noret_offset:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; VI-NEXT: v_add_u32_e32 v0, vcc, 32, v0
|
|
; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
|
|
; VI-NEXT: flat_atomic_sub_x2 v[0:1], v[2:3]
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: buffer_wbinvl1_vol
|
|
; VI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX9-LABEL: global_atomic_sub_i64_noret_offset:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: global_atomic_sub_x2 v[0:1], v[2:3], off offset:32
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: buffer_wbinvl1_vol
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
%gep = getelementptr i64, ptr addrspace(1) %out, i64 4
|
|
%tmp0 = atomicrmw sub ptr addrspace(1) %gep, i64 %in seq_cst
|
|
ret void
|
|
}
|
|
|
|
define i64 @global_atomic_sub_i64_ret(ptr addrspace(1) %ptr, i64 %in) {
|
|
; SI-LABEL: global_atomic_sub_i64_ret:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; SI-NEXT: s_mov_b32 s6, 0
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s4, s6
|
|
; SI-NEXT: s_mov_b32 s5, s6
|
|
; SI-NEXT: buffer_atomic_sub_x2 v[2:3], v[0:1], s[4:7], 0 addr64 glc
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_wbinvl1
|
|
; SI-NEXT: v_mov_b32_e32 v0, v2
|
|
; SI-NEXT: v_mov_b32_e32 v1, v3
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; VI-LABEL: global_atomic_sub_i64_ret:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; VI-NEXT: flat_atomic_sub_x2 v[0:1], v[0:1], v[2:3] glc
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: buffer_wbinvl1_vol
|
|
; VI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX9-LABEL: global_atomic_sub_i64_ret:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: global_atomic_sub_x2 v[0:1], v[0:1], v[2:3], off glc
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: buffer_wbinvl1_vol
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
%result = atomicrmw sub ptr addrspace(1) %ptr, i64 %in seq_cst
|
|
ret i64 %result
|
|
}
|
|
|
|
define i64 @global_atomic_sub_i64_ret_offset(ptr addrspace(1) %out, i64 %in) {
|
|
; SI-LABEL: global_atomic_sub_i64_ret_offset:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; SI-NEXT: s_mov_b32 s6, 0
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s4, s6
|
|
; SI-NEXT: s_mov_b32 s5, s6
|
|
; SI-NEXT: buffer_atomic_sub_x2 v[2:3], v[0:1], s[4:7], 0 addr64 offset:32 glc
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_wbinvl1
|
|
; SI-NEXT: v_mov_b32_e32 v0, v2
|
|
; SI-NEXT: v_mov_b32_e32 v1, v3
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; VI-LABEL: global_atomic_sub_i64_ret_offset:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; VI-NEXT: v_add_u32_e32 v0, vcc, 32, v0
|
|
; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
|
|
; VI-NEXT: flat_atomic_sub_x2 v[0:1], v[0:1], v[2:3] glc
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: buffer_wbinvl1_vol
|
|
; VI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX9-LABEL: global_atomic_sub_i64_ret_offset:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: global_atomic_sub_x2 v[0:1], v[0:1], v[2:3], off offset:32 glc
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: buffer_wbinvl1_vol
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
%gep = getelementptr i64, ptr addrspace(1) %out, i64 4
|
|
%result = atomicrmw sub ptr addrspace(1) %gep, i64 %in seq_cst
|
|
ret i64 %result
|
|
}
|
|
|
|
define amdgpu_gfx void @global_atomic_sub_i64_noret_scalar(ptr addrspace(1) inreg %ptr, i64 inreg %in) {
|
|
; SI-LABEL: global_atomic_sub_i64_noret_scalar:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1
|
|
; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 ; 4-byte Folded Spill
|
|
; SI-NEXT: s_mov_b64 exec, s[34:35]
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: v_writelane_b32 v2, s6, 0
|
|
; SI-NEXT: v_writelane_b32 v2, s7, 1
|
|
; SI-NEXT: s_mov_b32 s34, s7
|
|
; SI-NEXT: s_mov_b32 s35, s6
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s6, -1
|
|
; SI-NEXT: v_mov_b32_e32 v0, s35
|
|
; SI-NEXT: v_mov_b32_e32 v1, s34
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_atomic_sub_x2 v[0:1], off, s[4:7], 0
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_wbinvl1
|
|
; SI-NEXT: v_readlane_b32 s7, v2, 1
|
|
; SI-NEXT: v_readlane_b32 s6, v2, 0
|
|
; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1
|
|
; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 ; 4-byte Folded Reload
|
|
; SI-NEXT: s_mov_b64 exec, s[34:35]
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0)
|
|
; SI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; VI-LABEL: global_atomic_sub_i64_noret_scalar:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; VI-NEXT: v_mov_b32_e32 v0, s6
|
|
; VI-NEXT: v_mov_b32_e32 v1, s7
|
|
; VI-NEXT: v_mov_b32_e32 v2, s4
|
|
; VI-NEXT: v_mov_b32_e32 v3, s5
|
|
; VI-NEXT: flat_atomic_sub_x2 v[2:3], v[0:1]
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: buffer_wbinvl1_vol
|
|
; VI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX9-LABEL: global_atomic_sub_i64_noret_scalar:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: v_mov_b32_e32 v0, s6
|
|
; GFX9-NEXT: v_mov_b32_e32 v1, s7
|
|
; GFX9-NEXT: v_mov_b32_e32 v2, 0
|
|
; GFX9-NEXT: global_atomic_sub_x2 v2, v[0:1], s[4:5]
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: buffer_wbinvl1_vol
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
%tmp0 = atomicrmw sub ptr addrspace(1) %ptr, i64 %in seq_cst
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_gfx void @global_atomic_sub_i64_noret_offset_scalar(ptr addrspace(1) inreg %out, i64 inreg %in) {
|
|
; SI-LABEL: global_atomic_sub_i64_noret_offset_scalar:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1
|
|
; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 ; 4-byte Folded Spill
|
|
; SI-NEXT: s_mov_b64 exec, s[34:35]
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: v_writelane_b32 v2, s6, 0
|
|
; SI-NEXT: v_writelane_b32 v2, s7, 1
|
|
; SI-NEXT: v_mov_b32_e32 v0, s6
|
|
; SI-NEXT: v_mov_b32_e32 v1, s7
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s6, -1
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_atomic_sub_x2 v[0:1], off, s[4:7], 0 offset:32
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_wbinvl1
|
|
; SI-NEXT: v_readlane_b32 s7, v2, 1
|
|
; SI-NEXT: v_readlane_b32 s6, v2, 0
|
|
; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1
|
|
; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 ; 4-byte Folded Reload
|
|
; SI-NEXT: s_mov_b64 exec, s[34:35]
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0)
|
|
; SI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; VI-LABEL: global_atomic_sub_i64_noret_offset_scalar:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; VI-NEXT: s_add_u32 s34, s4, 32
|
|
; VI-NEXT: s_addc_u32 s35, s5, 0
|
|
; VI-NEXT: v_mov_b32_e32 v2, s34
|
|
; VI-NEXT: v_mov_b32_e32 v0, s6
|
|
; VI-NEXT: v_mov_b32_e32 v1, s7
|
|
; VI-NEXT: v_mov_b32_e32 v3, s35
|
|
; VI-NEXT: flat_atomic_sub_x2 v[2:3], v[0:1]
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: buffer_wbinvl1_vol
|
|
; VI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX9-LABEL: global_atomic_sub_i64_noret_offset_scalar:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: v_mov_b32_e32 v0, s6
|
|
; GFX9-NEXT: v_mov_b32_e32 v1, s7
|
|
; GFX9-NEXT: v_mov_b32_e32 v2, 0
|
|
; GFX9-NEXT: global_atomic_sub_x2 v2, v[0:1], s[4:5] offset:32
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: buffer_wbinvl1_vol
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
%gep = getelementptr i64, ptr addrspace(1) %out, i64 4
|
|
%tmp0 = atomicrmw sub ptr addrspace(1) %gep, i64 %in seq_cst
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_gfx i64 @global_atomic_sub_i64_ret_scalar(ptr addrspace(1) inreg %ptr, i64 inreg %in) {
|
|
; SI-LABEL: global_atomic_sub_i64_ret_scalar:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1
|
|
; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 ; 4-byte Folded Spill
|
|
; SI-NEXT: s_mov_b64 exec, s[34:35]
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: v_writelane_b32 v2, s6, 0
|
|
; SI-NEXT: v_writelane_b32 v2, s7, 1
|
|
; SI-NEXT: s_mov_b32 s34, s7
|
|
; SI-NEXT: s_mov_b32 s35, s6
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s6, -1
|
|
; SI-NEXT: v_mov_b32_e32 v0, s35
|
|
; SI-NEXT: v_mov_b32_e32 v1, s34
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_atomic_sub_x2 v[0:1], off, s[4:7], 0 glc
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_wbinvl1
|
|
; SI-NEXT: v_readlane_b32 s7, v2, 1
|
|
; SI-NEXT: v_readlane_b32 s6, v2, 0
|
|
; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1
|
|
; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 ; 4-byte Folded Reload
|
|
; SI-NEXT: s_mov_b64 exec, s[34:35]
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0)
|
|
; SI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; VI-LABEL: global_atomic_sub_i64_ret_scalar:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; VI-NEXT: v_mov_b32_e32 v0, s6
|
|
; VI-NEXT: v_mov_b32_e32 v1, s7
|
|
; VI-NEXT: v_mov_b32_e32 v2, s4
|
|
; VI-NEXT: v_mov_b32_e32 v3, s5
|
|
; VI-NEXT: flat_atomic_sub_x2 v[0:1], v[2:3], v[0:1] glc
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: buffer_wbinvl1_vol
|
|
; VI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX9-LABEL: global_atomic_sub_i64_ret_scalar:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: v_mov_b32_e32 v0, s6
|
|
; GFX9-NEXT: v_mov_b32_e32 v1, s7
|
|
; GFX9-NEXT: v_mov_b32_e32 v2, 0
|
|
; GFX9-NEXT: global_atomic_sub_x2 v[0:1], v2, v[0:1], s[4:5] glc
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: buffer_wbinvl1_vol
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
%result = atomicrmw sub ptr addrspace(1) %ptr, i64 %in seq_cst
|
|
ret i64 %result
|
|
}
|
|
|
|
define amdgpu_gfx i64 @global_atomic_sub_i64_ret_offset_scalar(ptr addrspace(1) inreg %out, i64 inreg %in) {
|
|
; SI-LABEL: global_atomic_sub_i64_ret_offset_scalar:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1
|
|
; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 ; 4-byte Folded Spill
|
|
; SI-NEXT: s_mov_b64 exec, s[34:35]
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: v_writelane_b32 v2, s6, 0
|
|
; SI-NEXT: v_writelane_b32 v2, s7, 1
|
|
; SI-NEXT: v_mov_b32_e32 v0, s6
|
|
; SI-NEXT: v_mov_b32_e32 v1, s7
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s6, -1
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_atomic_sub_x2 v[0:1], off, s[4:7], 0 offset:32 glc
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_wbinvl1
|
|
; SI-NEXT: v_readlane_b32 s7, v2, 1
|
|
; SI-NEXT: v_readlane_b32 s6, v2, 0
|
|
; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1
|
|
; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 ; 4-byte Folded Reload
|
|
; SI-NEXT: s_mov_b64 exec, s[34:35]
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0)
|
|
; SI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; VI-LABEL: global_atomic_sub_i64_ret_offset_scalar:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; VI-NEXT: s_add_u32 s34, s4, 32
|
|
; VI-NEXT: s_addc_u32 s35, s5, 0
|
|
; VI-NEXT: v_mov_b32_e32 v2, s34
|
|
; VI-NEXT: v_mov_b32_e32 v0, s6
|
|
; VI-NEXT: v_mov_b32_e32 v1, s7
|
|
; VI-NEXT: v_mov_b32_e32 v3, s35
|
|
; VI-NEXT: flat_atomic_sub_x2 v[0:1], v[2:3], v[0:1] glc
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: buffer_wbinvl1_vol
|
|
; VI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX9-LABEL: global_atomic_sub_i64_ret_offset_scalar:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: v_mov_b32_e32 v0, s6
|
|
; GFX9-NEXT: v_mov_b32_e32 v1, s7
|
|
; GFX9-NEXT: v_mov_b32_e32 v2, 0
|
|
; GFX9-NEXT: global_atomic_sub_x2 v[0:1], v2, v[0:1], s[4:5] offset:32 glc
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: buffer_wbinvl1_vol
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
%gep = getelementptr i64, ptr addrspace(1) %out, i64 4
|
|
%result = atomicrmw sub ptr addrspace(1) %gep, i64 %in seq_cst
|
|
ret i64 %result
|
|
}
|
|
|
|
define void @global_atomic_sub_i64_noret_offset__amdgpu_no_remote_memory(ptr addrspace(1) %out, i64 %in) {
|
|
; SI-LABEL: global_atomic_sub_i64_noret_offset__amdgpu_no_remote_memory:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; SI-NEXT: s_mov_b32 s6, 0
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s4, s6
|
|
; SI-NEXT: s_mov_b32 s5, s6
|
|
; SI-NEXT: buffer_atomic_sub_x2 v[2:3], v[0:1], s[4:7], 0 addr64 offset:32
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_wbinvl1
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; VI-LABEL: global_atomic_sub_i64_noret_offset__amdgpu_no_remote_memory:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; VI-NEXT: v_add_u32_e32 v0, vcc, 32, v0
|
|
; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
|
|
; VI-NEXT: flat_atomic_sub_x2 v[0:1], v[2:3]
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: buffer_wbinvl1_vol
|
|
; VI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX9-LABEL: global_atomic_sub_i64_noret_offset__amdgpu_no_remote_memory:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: global_atomic_sub_x2 v[0:1], v[2:3], off offset:32
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: buffer_wbinvl1_vol
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
%gep = getelementptr i64, ptr addrspace(1) %out, i64 4
|
|
%tmp0 = atomicrmw sub ptr addrspace(1) %gep, i64 %in seq_cst, !amdgpu.no.remote.memory !0
|
|
ret void
|
|
}
|
|
|
|
define i64 @global_atomic_sub_i64_ret_offset__amdgpu_no_remote_memory(ptr addrspace(1) %out, i64 %in) {
|
|
; SI-LABEL: global_atomic_sub_i64_ret_offset__amdgpu_no_remote_memory:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; SI-NEXT: s_mov_b32 s6, 0
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s4, s6
|
|
; SI-NEXT: s_mov_b32 s5, s6
|
|
; SI-NEXT: buffer_atomic_sub_x2 v[2:3], v[0:1], s[4:7], 0 addr64 offset:32 glc
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_wbinvl1
|
|
; SI-NEXT: v_mov_b32_e32 v0, v2
|
|
; SI-NEXT: v_mov_b32_e32 v1, v3
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; VI-LABEL: global_atomic_sub_i64_ret_offset__amdgpu_no_remote_memory:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; VI-NEXT: v_add_u32_e32 v0, vcc, 32, v0
|
|
; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
|
|
; VI-NEXT: flat_atomic_sub_x2 v[0:1], v[0:1], v[2:3] glc
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: buffer_wbinvl1_vol
|
|
; VI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX9-LABEL: global_atomic_sub_i64_ret_offset__amdgpu_no_remote_memory:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: global_atomic_sub_x2 v[0:1], v[0:1], v[2:3], off offset:32 glc
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: buffer_wbinvl1_vol
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
%gep = getelementptr i64, ptr addrspace(1) %out, i64 4
|
|
%result = atomicrmw sub ptr addrspace(1) %gep, i64 %in seq_cst, !amdgpu.no.remote.memory !0
|
|
ret i64 %result
|
|
}
|
|
|
|
; ---------------------------------------------------------------------
|
|
; atomicrmw and
|
|
; ---------------------------------------------------------------------
|
|
|
|
define void @global_atomic_and_i64_noret(ptr addrspace(1) %ptr, i64 %in) {
|
|
; SI-LABEL: global_atomic_and_i64_noret:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; SI-NEXT: s_mov_b32 s6, 0
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s4, s6
|
|
; SI-NEXT: s_mov_b32 s5, s6
|
|
; SI-NEXT: buffer_atomic_and_x2 v[2:3], v[0:1], s[4:7], 0 addr64
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_wbinvl1
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; VI-LABEL: global_atomic_and_i64_noret:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; VI-NEXT: flat_atomic_and_x2 v[0:1], v[2:3]
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: buffer_wbinvl1_vol
|
|
; VI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX9-LABEL: global_atomic_and_i64_noret:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: global_atomic_and_x2 v[0:1], v[2:3], off
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: buffer_wbinvl1_vol
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
%tmp0 = atomicrmw and ptr addrspace(1) %ptr, i64 %in seq_cst
|
|
ret void
|
|
}
|
|
|
|
define void @global_atomic_and_i64_noret_offset(ptr addrspace(1) %out, i64 %in) {
|
|
; SI-LABEL: global_atomic_and_i64_noret_offset:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; SI-NEXT: s_mov_b32 s6, 0
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s4, s6
|
|
; SI-NEXT: s_mov_b32 s5, s6
|
|
; SI-NEXT: buffer_atomic_and_x2 v[2:3], v[0:1], s[4:7], 0 addr64 offset:32
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_wbinvl1
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; VI-LABEL: global_atomic_and_i64_noret_offset:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; VI-NEXT: v_add_u32_e32 v0, vcc, 32, v0
|
|
; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
|
|
; VI-NEXT: flat_atomic_and_x2 v[0:1], v[2:3]
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: buffer_wbinvl1_vol
|
|
; VI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX9-LABEL: global_atomic_and_i64_noret_offset:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: global_atomic_and_x2 v[0:1], v[2:3], off offset:32
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: buffer_wbinvl1_vol
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
%gep = getelementptr i64, ptr addrspace(1) %out, i64 4
|
|
%tmp0 = atomicrmw and ptr addrspace(1) %gep, i64 %in seq_cst
|
|
ret void
|
|
}
|
|
|
|
define i64 @global_atomic_and_i64_ret(ptr addrspace(1) %ptr, i64 %in) {
|
|
; SI-LABEL: global_atomic_and_i64_ret:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; SI-NEXT: s_mov_b32 s6, 0
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s4, s6
|
|
; SI-NEXT: s_mov_b32 s5, s6
|
|
; SI-NEXT: buffer_atomic_and_x2 v[2:3], v[0:1], s[4:7], 0 addr64 glc
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_wbinvl1
|
|
; SI-NEXT: v_mov_b32_e32 v0, v2
|
|
; SI-NEXT: v_mov_b32_e32 v1, v3
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; VI-LABEL: global_atomic_and_i64_ret:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; VI-NEXT: flat_atomic_and_x2 v[0:1], v[0:1], v[2:3] glc
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: buffer_wbinvl1_vol
|
|
; VI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX9-LABEL: global_atomic_and_i64_ret:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: global_atomic_and_x2 v[0:1], v[0:1], v[2:3], off glc
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: buffer_wbinvl1_vol
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
%result = atomicrmw and ptr addrspace(1) %ptr, i64 %in seq_cst
|
|
ret i64 %result
|
|
}
|
|
|
|
define i64 @global_atomic_and_i64_ret_offset(ptr addrspace(1) %out, i64 %in) {
|
|
; SI-LABEL: global_atomic_and_i64_ret_offset:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; SI-NEXT: s_mov_b32 s6, 0
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s4, s6
|
|
; SI-NEXT: s_mov_b32 s5, s6
|
|
; SI-NEXT: buffer_atomic_and_x2 v[2:3], v[0:1], s[4:7], 0 addr64 offset:32 glc
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_wbinvl1
|
|
; SI-NEXT: v_mov_b32_e32 v0, v2
|
|
; SI-NEXT: v_mov_b32_e32 v1, v3
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; VI-LABEL: global_atomic_and_i64_ret_offset:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; VI-NEXT: v_add_u32_e32 v0, vcc, 32, v0
|
|
; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
|
|
; VI-NEXT: flat_atomic_and_x2 v[0:1], v[0:1], v[2:3] glc
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: buffer_wbinvl1_vol
|
|
; VI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX9-LABEL: global_atomic_and_i64_ret_offset:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: global_atomic_and_x2 v[0:1], v[0:1], v[2:3], off offset:32 glc
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: buffer_wbinvl1_vol
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
%gep = getelementptr i64, ptr addrspace(1) %out, i64 4
|
|
%result = atomicrmw and ptr addrspace(1) %gep, i64 %in seq_cst
|
|
ret i64 %result
|
|
}
|
|
|
|
define amdgpu_gfx void @global_atomic_and_i64_noret_scalar(ptr addrspace(1) inreg %ptr, i64 inreg %in) {
|
|
; SI-LABEL: global_atomic_and_i64_noret_scalar:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1
|
|
; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 ; 4-byte Folded Spill
|
|
; SI-NEXT: s_mov_b64 exec, s[34:35]
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: v_writelane_b32 v2, s6, 0
|
|
; SI-NEXT: v_writelane_b32 v2, s7, 1
|
|
; SI-NEXT: s_mov_b32 s34, s7
|
|
; SI-NEXT: s_mov_b32 s35, s6
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s6, -1
|
|
; SI-NEXT: v_mov_b32_e32 v0, s35
|
|
; SI-NEXT: v_mov_b32_e32 v1, s34
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_atomic_and_x2 v[0:1], off, s[4:7], 0
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_wbinvl1
|
|
; SI-NEXT: v_readlane_b32 s7, v2, 1
|
|
; SI-NEXT: v_readlane_b32 s6, v2, 0
|
|
; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1
|
|
; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 ; 4-byte Folded Reload
|
|
; SI-NEXT: s_mov_b64 exec, s[34:35]
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0)
|
|
; SI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; VI-LABEL: global_atomic_and_i64_noret_scalar:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; VI-NEXT: v_mov_b32_e32 v0, s6
|
|
; VI-NEXT: v_mov_b32_e32 v1, s7
|
|
; VI-NEXT: v_mov_b32_e32 v2, s4
|
|
; VI-NEXT: v_mov_b32_e32 v3, s5
|
|
; VI-NEXT: flat_atomic_and_x2 v[2:3], v[0:1]
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: buffer_wbinvl1_vol
|
|
; VI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX9-LABEL: global_atomic_and_i64_noret_scalar:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: v_mov_b32_e32 v0, s6
|
|
; GFX9-NEXT: v_mov_b32_e32 v1, s7
|
|
; GFX9-NEXT: v_mov_b32_e32 v2, 0
|
|
; GFX9-NEXT: global_atomic_and_x2 v2, v[0:1], s[4:5]
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: buffer_wbinvl1_vol
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
%tmp0 = atomicrmw and ptr addrspace(1) %ptr, i64 %in seq_cst
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_gfx void @global_atomic_and_i64_noret_offset_scalar(ptr addrspace(1) inreg %out, i64 inreg %in) {
|
|
; SI-LABEL: global_atomic_and_i64_noret_offset_scalar:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1
|
|
; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 ; 4-byte Folded Spill
|
|
; SI-NEXT: s_mov_b64 exec, s[34:35]
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: v_writelane_b32 v2, s6, 0
|
|
; SI-NEXT: v_writelane_b32 v2, s7, 1
|
|
; SI-NEXT: v_mov_b32_e32 v0, s6
|
|
; SI-NEXT: v_mov_b32_e32 v1, s7
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s6, -1
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_atomic_and_x2 v[0:1], off, s[4:7], 0 offset:32
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_wbinvl1
|
|
; SI-NEXT: v_readlane_b32 s7, v2, 1
|
|
; SI-NEXT: v_readlane_b32 s6, v2, 0
|
|
; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1
|
|
; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 ; 4-byte Folded Reload
|
|
; SI-NEXT: s_mov_b64 exec, s[34:35]
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0)
|
|
; SI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; VI-LABEL: global_atomic_and_i64_noret_offset_scalar:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; VI-NEXT: s_add_u32 s34, s4, 32
|
|
; VI-NEXT: s_addc_u32 s35, s5, 0
|
|
; VI-NEXT: v_mov_b32_e32 v2, s34
|
|
; VI-NEXT: v_mov_b32_e32 v0, s6
|
|
; VI-NEXT: v_mov_b32_e32 v1, s7
|
|
; VI-NEXT: v_mov_b32_e32 v3, s35
|
|
; VI-NEXT: flat_atomic_and_x2 v[2:3], v[0:1]
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: buffer_wbinvl1_vol
|
|
; VI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX9-LABEL: global_atomic_and_i64_noret_offset_scalar:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: v_mov_b32_e32 v0, s6
|
|
; GFX9-NEXT: v_mov_b32_e32 v1, s7
|
|
; GFX9-NEXT: v_mov_b32_e32 v2, 0
|
|
; GFX9-NEXT: global_atomic_and_x2 v2, v[0:1], s[4:5] offset:32
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: buffer_wbinvl1_vol
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
%gep = getelementptr i64, ptr addrspace(1) %out, i64 4
|
|
%tmp0 = atomicrmw and ptr addrspace(1) %gep, i64 %in seq_cst
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_gfx i64 @global_atomic_and_i64_ret_scalar(ptr addrspace(1) inreg %ptr, i64 inreg %in) {
|
|
; SI-LABEL: global_atomic_and_i64_ret_scalar:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1
|
|
; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 ; 4-byte Folded Spill
|
|
; SI-NEXT: s_mov_b64 exec, s[34:35]
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: v_writelane_b32 v2, s6, 0
|
|
; SI-NEXT: v_writelane_b32 v2, s7, 1
|
|
; SI-NEXT: s_mov_b32 s34, s7
|
|
; SI-NEXT: s_mov_b32 s35, s6
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s6, -1
|
|
; SI-NEXT: v_mov_b32_e32 v0, s35
|
|
; SI-NEXT: v_mov_b32_e32 v1, s34
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_atomic_and_x2 v[0:1], off, s[4:7], 0 glc
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_wbinvl1
|
|
; SI-NEXT: v_readlane_b32 s7, v2, 1
|
|
; SI-NEXT: v_readlane_b32 s6, v2, 0
|
|
; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1
|
|
; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 ; 4-byte Folded Reload
|
|
; SI-NEXT: s_mov_b64 exec, s[34:35]
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0)
|
|
; SI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; VI-LABEL: global_atomic_and_i64_ret_scalar:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; VI-NEXT: v_mov_b32_e32 v0, s6
|
|
; VI-NEXT: v_mov_b32_e32 v1, s7
|
|
; VI-NEXT: v_mov_b32_e32 v2, s4
|
|
; VI-NEXT: v_mov_b32_e32 v3, s5
|
|
; VI-NEXT: flat_atomic_and_x2 v[0:1], v[2:3], v[0:1] glc
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: buffer_wbinvl1_vol
|
|
; VI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX9-LABEL: global_atomic_and_i64_ret_scalar:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: v_mov_b32_e32 v0, s6
|
|
; GFX9-NEXT: v_mov_b32_e32 v1, s7
|
|
; GFX9-NEXT: v_mov_b32_e32 v2, 0
|
|
; GFX9-NEXT: global_atomic_and_x2 v[0:1], v2, v[0:1], s[4:5] glc
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: buffer_wbinvl1_vol
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
%result = atomicrmw and ptr addrspace(1) %ptr, i64 %in seq_cst
|
|
ret i64 %result
|
|
}
|
|
|
|
define amdgpu_gfx i64 @global_atomic_and_i64_ret_offset_scalar(ptr addrspace(1) inreg %out, i64 inreg %in) {
|
|
; SI-LABEL: global_atomic_and_i64_ret_offset_scalar:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1
|
|
; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 ; 4-byte Folded Spill
|
|
; SI-NEXT: s_mov_b64 exec, s[34:35]
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: v_writelane_b32 v2, s6, 0
|
|
; SI-NEXT: v_writelane_b32 v2, s7, 1
|
|
; SI-NEXT: v_mov_b32_e32 v0, s6
|
|
; SI-NEXT: v_mov_b32_e32 v1, s7
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s6, -1
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_atomic_and_x2 v[0:1], off, s[4:7], 0 offset:32 glc
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_wbinvl1
|
|
; SI-NEXT: v_readlane_b32 s7, v2, 1
|
|
; SI-NEXT: v_readlane_b32 s6, v2, 0
|
|
; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1
|
|
; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 ; 4-byte Folded Reload
|
|
; SI-NEXT: s_mov_b64 exec, s[34:35]
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0)
|
|
; SI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; VI-LABEL: global_atomic_and_i64_ret_offset_scalar:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; VI-NEXT: s_add_u32 s34, s4, 32
|
|
; VI-NEXT: s_addc_u32 s35, s5, 0
|
|
; VI-NEXT: v_mov_b32_e32 v2, s34
|
|
; VI-NEXT: v_mov_b32_e32 v0, s6
|
|
; VI-NEXT: v_mov_b32_e32 v1, s7
|
|
; VI-NEXT: v_mov_b32_e32 v3, s35
|
|
; VI-NEXT: flat_atomic_and_x2 v[0:1], v[2:3], v[0:1] glc
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: buffer_wbinvl1_vol
|
|
; VI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX9-LABEL: global_atomic_and_i64_ret_offset_scalar:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: v_mov_b32_e32 v0, s6
|
|
; GFX9-NEXT: v_mov_b32_e32 v1, s7
|
|
; GFX9-NEXT: v_mov_b32_e32 v2, 0
|
|
; GFX9-NEXT: global_atomic_and_x2 v[0:1], v2, v[0:1], s[4:5] offset:32 glc
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: buffer_wbinvl1_vol
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
%gep = getelementptr i64, ptr addrspace(1) %out, i64 4
|
|
%result = atomicrmw and ptr addrspace(1) %gep, i64 %in seq_cst
|
|
ret i64 %result
|
|
}
|
|
|
|
define void @global_atomic_and_i64_noret_offset__amdgpu_no_remote_memory(ptr addrspace(1) %out, i64 %in) {
|
|
; SI-LABEL: global_atomic_and_i64_noret_offset__amdgpu_no_remote_memory:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; SI-NEXT: s_mov_b32 s6, 0
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s4, s6
|
|
; SI-NEXT: s_mov_b32 s5, s6
|
|
; SI-NEXT: buffer_atomic_and_x2 v[2:3], v[0:1], s[4:7], 0 addr64 offset:32
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_wbinvl1
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; VI-LABEL: global_atomic_and_i64_noret_offset__amdgpu_no_remote_memory:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; VI-NEXT: v_add_u32_e32 v0, vcc, 32, v0
|
|
; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
|
|
; VI-NEXT: flat_atomic_and_x2 v[0:1], v[2:3]
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: buffer_wbinvl1_vol
|
|
; VI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX9-LABEL: global_atomic_and_i64_noret_offset__amdgpu_no_remote_memory:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: global_atomic_and_x2 v[0:1], v[2:3], off offset:32
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: buffer_wbinvl1_vol
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
%gep = getelementptr i64, ptr addrspace(1) %out, i64 4
|
|
%tmp0 = atomicrmw and ptr addrspace(1) %gep, i64 %in seq_cst, !amdgpu.no.remote.memory !0
|
|
ret void
|
|
}
|
|
|
|
define i64 @global_atomic_and_i64_ret_offset__amdgpu_no_remote_memory(ptr addrspace(1) %out, i64 %in) {
|
|
; SI-LABEL: global_atomic_and_i64_ret_offset__amdgpu_no_remote_memory:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; SI-NEXT: s_mov_b32 s6, 0
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s4, s6
|
|
; SI-NEXT: s_mov_b32 s5, s6
|
|
; SI-NEXT: buffer_atomic_and_x2 v[2:3], v[0:1], s[4:7], 0 addr64 offset:32 glc
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_wbinvl1
|
|
; SI-NEXT: v_mov_b32_e32 v0, v2
|
|
; SI-NEXT: v_mov_b32_e32 v1, v3
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; VI-LABEL: global_atomic_and_i64_ret_offset__amdgpu_no_remote_memory:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; VI-NEXT: v_add_u32_e32 v0, vcc, 32, v0
|
|
; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
|
|
; VI-NEXT: flat_atomic_and_x2 v[0:1], v[0:1], v[2:3] glc
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: buffer_wbinvl1_vol
|
|
; VI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX9-LABEL: global_atomic_and_i64_ret_offset__amdgpu_no_remote_memory:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: global_atomic_and_x2 v[0:1], v[0:1], v[2:3], off offset:32 glc
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: buffer_wbinvl1_vol
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
%gep = getelementptr i64, ptr addrspace(1) %out, i64 4
|
|
%result = atomicrmw and ptr addrspace(1) %gep, i64 %in seq_cst, !amdgpu.no.remote.memory !0
|
|
ret i64 %result
|
|
}
|
|
|
|
; ---------------------------------------------------------------------
|
|
; atomicrmw nand
|
|
; ---------------------------------------------------------------------
|
|
|
|
define void @global_atomic_nand_i64_noret(ptr addrspace(1) %ptr, i64 %in) {
|
|
; SI-LABEL: global_atomic_nand_i64_noret:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; SI-NEXT: s_mov_b32 s6, 0
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s4, s6
|
|
; SI-NEXT: s_mov_b32 s5, s6
|
|
; SI-NEXT: buffer_load_dwordx2 v[6:7], v[0:1], s[4:7], 0 addr64
|
|
; SI-NEXT: s_mov_b64 s[8:9], 0
|
|
; SI-NEXT: .LBB50_1: ; %atomicrmw.start
|
|
; SI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: v_and_b32_e32 v4, v7, v3
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: v_and_b32_e32 v8, v6, v2
|
|
; SI-NEXT: v_not_b32_e32 v5, v4
|
|
; SI-NEXT: v_not_b32_e32 v4, v8
|
|
; SI-NEXT: v_mov_b32_e32 v11, v7
|
|
; SI-NEXT: v_mov_b32_e32 v10, v6
|
|
; SI-NEXT: v_mov_b32_e32 v9, v5
|
|
; SI-NEXT: v_mov_b32_e32 v8, v4
|
|
; SI-NEXT: buffer_atomic_cmpswap_x2 v[8:11], v[0:1], s[4:7], 0 addr64 glc
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_wbinvl1
|
|
; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[8:9], v[6:7]
|
|
; SI-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
|
|
; SI-NEXT: v_mov_b32_e32 v6, v8
|
|
; SI-NEXT: v_mov_b32_e32 v7, v9
|
|
; SI-NEXT: s_andn2_b64 exec, exec, s[8:9]
|
|
; SI-NEXT: s_cbranch_execnz .LBB50_1
|
|
; SI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; SI-NEXT: s_or_b64 exec, exec, s[8:9]
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; VI-LABEL: global_atomic_nand_i64_noret:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; VI-NEXT: flat_load_dwordx2 v[6:7], v[0:1]
|
|
; VI-NEXT: s_mov_b64 s[4:5], 0
|
|
; VI-NEXT: .LBB50_1: ; %atomicrmw.start
|
|
; VI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: v_and_b32_e32 v4, v7, v3
|
|
; VI-NEXT: v_and_b32_e32 v8, v6, v2
|
|
; VI-NEXT: v_not_b32_e32 v5, v4
|
|
; VI-NEXT: v_not_b32_e32 v4, v8
|
|
; VI-NEXT: flat_atomic_cmpswap_x2 v[4:5], v[0:1], v[4:7] glc
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: buffer_wbinvl1_vol
|
|
; VI-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[6:7]
|
|
; VI-NEXT: v_mov_b32_e32 v7, v5
|
|
; VI-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
|
|
; VI-NEXT: v_mov_b32_e32 v6, v4
|
|
; VI-NEXT: s_andn2_b64 exec, exec, s[4:5]
|
|
; VI-NEXT: s_cbranch_execnz .LBB50_1
|
|
; VI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; VI-NEXT: s_or_b64 exec, exec, s[4:5]
|
|
; VI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX9-LABEL: global_atomic_nand_i64_noret:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: global_load_dwordx2 v[6:7], v[0:1], off
|
|
; GFX9-NEXT: s_mov_b64 s[4:5], 0
|
|
; GFX9-NEXT: .LBB50_1: ; %atomicrmw.start
|
|
; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: v_and_b32_e32 v4, v7, v3
|
|
; GFX9-NEXT: v_and_b32_e32 v8, v6, v2
|
|
; GFX9-NEXT: v_not_b32_e32 v5, v4
|
|
; GFX9-NEXT: v_not_b32_e32 v4, v8
|
|
; GFX9-NEXT: global_atomic_cmpswap_x2 v[4:5], v[0:1], v[4:7], off glc
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: buffer_wbinvl1_vol
|
|
; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[6:7]
|
|
; GFX9-NEXT: v_mov_b32_e32 v7, v5
|
|
; GFX9-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
|
|
; GFX9-NEXT: v_mov_b32_e32 v6, v4
|
|
; GFX9-NEXT: s_andn2_b64 exec, exec, s[4:5]
|
|
; GFX9-NEXT: s_cbranch_execnz .LBB50_1
|
|
; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; GFX9-NEXT: s_or_b64 exec, exec, s[4:5]
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
%tmp0 = atomicrmw nand ptr addrspace(1) %ptr, i64 %in seq_cst
|
|
ret void
|
|
}
|
|
|
|
define void @global_atomic_nand_i64_noret_offset(ptr addrspace(1) %out, i64 %in) {
|
|
; SI-LABEL: global_atomic_nand_i64_noret_offset:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; SI-NEXT: s_mov_b32 s6, 0
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s4, s6
|
|
; SI-NEXT: s_mov_b32 s5, s6
|
|
; SI-NEXT: buffer_load_dwordx2 v[6:7], v[0:1], s[4:7], 0 addr64 offset:32
|
|
; SI-NEXT: s_mov_b64 s[8:9], 0
|
|
; SI-NEXT: .LBB51_1: ; %atomicrmw.start
|
|
; SI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: v_and_b32_e32 v4, v7, v3
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: v_and_b32_e32 v8, v6, v2
|
|
; SI-NEXT: v_not_b32_e32 v5, v4
|
|
; SI-NEXT: v_not_b32_e32 v4, v8
|
|
; SI-NEXT: v_mov_b32_e32 v11, v7
|
|
; SI-NEXT: v_mov_b32_e32 v10, v6
|
|
; SI-NEXT: v_mov_b32_e32 v9, v5
|
|
; SI-NEXT: v_mov_b32_e32 v8, v4
|
|
; SI-NEXT: buffer_atomic_cmpswap_x2 v[8:11], v[0:1], s[4:7], 0 addr64 offset:32 glc
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_wbinvl1
|
|
; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[8:9], v[6:7]
|
|
; SI-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
|
|
; SI-NEXT: v_mov_b32_e32 v6, v8
|
|
; SI-NEXT: v_mov_b32_e32 v7, v9
|
|
; SI-NEXT: s_andn2_b64 exec, exec, s[8:9]
|
|
; SI-NEXT: s_cbranch_execnz .LBB51_1
|
|
; SI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; SI-NEXT: s_or_b64 exec, exec, s[8:9]
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; VI-LABEL: global_atomic_nand_i64_noret_offset:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; VI-NEXT: v_add_u32_e32 v0, vcc, 32, v0
|
|
; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
|
|
; VI-NEXT: flat_load_dwordx2 v[6:7], v[0:1]
|
|
; VI-NEXT: s_mov_b64 s[4:5], 0
|
|
; VI-NEXT: .LBB51_1: ; %atomicrmw.start
|
|
; VI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: v_and_b32_e32 v4, v7, v3
|
|
; VI-NEXT: v_and_b32_e32 v8, v6, v2
|
|
; VI-NEXT: v_not_b32_e32 v5, v4
|
|
; VI-NEXT: v_not_b32_e32 v4, v8
|
|
; VI-NEXT: flat_atomic_cmpswap_x2 v[4:5], v[0:1], v[4:7] glc
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: buffer_wbinvl1_vol
|
|
; VI-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[6:7]
|
|
; VI-NEXT: v_mov_b32_e32 v7, v5
|
|
; VI-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
|
|
; VI-NEXT: v_mov_b32_e32 v6, v4
|
|
; VI-NEXT: s_andn2_b64 exec, exec, s[4:5]
|
|
; VI-NEXT: s_cbranch_execnz .LBB51_1
|
|
; VI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; VI-NEXT: s_or_b64 exec, exec, s[4:5]
|
|
; VI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX9-LABEL: global_atomic_nand_i64_noret_offset:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: global_load_dwordx2 v[6:7], v[0:1], off offset:32
|
|
; GFX9-NEXT: s_mov_b64 s[4:5], 0
|
|
; GFX9-NEXT: .LBB51_1: ; %atomicrmw.start
|
|
; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: v_and_b32_e32 v4, v7, v3
|
|
; GFX9-NEXT: v_and_b32_e32 v8, v6, v2
|
|
; GFX9-NEXT: v_not_b32_e32 v5, v4
|
|
; GFX9-NEXT: v_not_b32_e32 v4, v8
|
|
; GFX9-NEXT: global_atomic_cmpswap_x2 v[4:5], v[0:1], v[4:7], off offset:32 glc
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: buffer_wbinvl1_vol
|
|
; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[6:7]
|
|
; GFX9-NEXT: v_mov_b32_e32 v7, v5
|
|
; GFX9-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
|
|
; GFX9-NEXT: v_mov_b32_e32 v6, v4
|
|
; GFX9-NEXT: s_andn2_b64 exec, exec, s[4:5]
|
|
; GFX9-NEXT: s_cbranch_execnz .LBB51_1
|
|
; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; GFX9-NEXT: s_or_b64 exec, exec, s[4:5]
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
%gep = getelementptr i64, ptr addrspace(1) %out, i64 4
|
|
%tmp0 = atomicrmw nand ptr addrspace(1) %gep, i64 %in seq_cst
|
|
ret void
|
|
}
|
|
|
|
define i64 @global_atomic_nand_i64_ret(ptr addrspace(1) %ptr, i64 %in) {
|
|
; SI-LABEL: global_atomic_nand_i64_ret:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; SI-NEXT: v_mov_b32_e32 v6, v3
|
|
; SI-NEXT: v_mov_b32_e32 v7, v2
|
|
; SI-NEXT: v_mov_b32_e32 v5, v1
|
|
; SI-NEXT: v_mov_b32_e32 v4, v0
|
|
; SI-NEXT: s_mov_b32 s6, 0
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s4, s6
|
|
; SI-NEXT: s_mov_b32 s5, s6
|
|
; SI-NEXT: buffer_load_dwordx2 v[0:1], v[4:5], s[4:7], 0 addr64
|
|
; SI-NEXT: s_mov_b64 s[8:9], 0
|
|
; SI-NEXT: .LBB52_1: ; %atomicrmw.start
|
|
; SI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: v_mov_b32_e32 v11, v1
|
|
; SI-NEXT: v_mov_b32_e32 v10, v0
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: v_and_b32_e32 v0, v11, v6
|
|
; SI-NEXT: v_and_b32_e32 v1, v10, v7
|
|
; SI-NEXT: v_not_b32_e32 v9, v0
|
|
; SI-NEXT: v_not_b32_e32 v8, v1
|
|
; SI-NEXT: v_mov_b32_e32 v0, v8
|
|
; SI-NEXT: v_mov_b32_e32 v1, v9
|
|
; SI-NEXT: v_mov_b32_e32 v2, v10
|
|
; SI-NEXT: v_mov_b32_e32 v3, v11
|
|
; SI-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v[4:5], s[4:7], 0 addr64 glc
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_wbinvl1
|
|
; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[10:11]
|
|
; SI-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
|
|
; SI-NEXT: s_andn2_b64 exec, exec, s[8:9]
|
|
; SI-NEXT: s_cbranch_execnz .LBB52_1
|
|
; SI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; SI-NEXT: s_or_b64 exec, exec, s[8:9]
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; VI-LABEL: global_atomic_nand_i64_ret:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; VI-NEXT: flat_load_dwordx2 v[4:5], v[0:1]
|
|
; VI-NEXT: s_mov_b64 s[4:5], 0
|
|
; VI-NEXT: .LBB52_1: ; %atomicrmw.start
|
|
; VI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: v_mov_b32_e32 v7, v5
|
|
; VI-NEXT: v_mov_b32_e32 v6, v4
|
|
; VI-NEXT: v_and_b32_e32 v4, v7, v3
|
|
; VI-NEXT: v_and_b32_e32 v8, v6, v2
|
|
; VI-NEXT: v_not_b32_e32 v5, v4
|
|
; VI-NEXT: v_not_b32_e32 v4, v8
|
|
; VI-NEXT: flat_atomic_cmpswap_x2 v[4:5], v[0:1], v[4:7] glc
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: buffer_wbinvl1_vol
|
|
; VI-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[6:7]
|
|
; VI-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
|
|
; VI-NEXT: s_andn2_b64 exec, exec, s[4:5]
|
|
; VI-NEXT: s_cbranch_execnz .LBB52_1
|
|
; VI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; VI-NEXT: s_or_b64 exec, exec, s[4:5]
|
|
; VI-NEXT: v_mov_b32_e32 v0, v4
|
|
; VI-NEXT: v_mov_b32_e32 v1, v5
|
|
; VI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX9-LABEL: global_atomic_nand_i64_ret:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: global_load_dwordx2 v[4:5], v[0:1], off
|
|
; GFX9-NEXT: s_mov_b64 s[4:5], 0
|
|
; GFX9-NEXT: .LBB52_1: ; %atomicrmw.start
|
|
; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: v_mov_b32_e32 v7, v5
|
|
; GFX9-NEXT: v_mov_b32_e32 v6, v4
|
|
; GFX9-NEXT: v_and_b32_e32 v4, v7, v3
|
|
; GFX9-NEXT: v_and_b32_e32 v8, v6, v2
|
|
; GFX9-NEXT: v_not_b32_e32 v5, v4
|
|
; GFX9-NEXT: v_not_b32_e32 v4, v8
|
|
; GFX9-NEXT: global_atomic_cmpswap_x2 v[4:5], v[0:1], v[4:7], off glc
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: buffer_wbinvl1_vol
|
|
; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[6:7]
|
|
; GFX9-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
|
|
; GFX9-NEXT: s_andn2_b64 exec, exec, s[4:5]
|
|
; GFX9-NEXT: s_cbranch_execnz .LBB52_1
|
|
; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; GFX9-NEXT: s_or_b64 exec, exec, s[4:5]
|
|
; GFX9-NEXT: v_mov_b32_e32 v0, v4
|
|
; GFX9-NEXT: v_mov_b32_e32 v1, v5
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
%result = atomicrmw nand ptr addrspace(1) %ptr, i64 %in seq_cst
|
|
ret i64 %result
|
|
}
|
|
|
|
define i64 @global_atomic_nand_i64_ret_offset(ptr addrspace(1) %out, i64 %in) {
|
|
; SI-LABEL: global_atomic_nand_i64_ret_offset:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; SI-NEXT: v_mov_b32_e32 v6, v3
|
|
; SI-NEXT: v_mov_b32_e32 v7, v2
|
|
; SI-NEXT: v_mov_b32_e32 v5, v1
|
|
; SI-NEXT: v_mov_b32_e32 v4, v0
|
|
; SI-NEXT: s_mov_b32 s6, 0
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s4, s6
|
|
; SI-NEXT: s_mov_b32 s5, s6
|
|
; SI-NEXT: buffer_load_dwordx2 v[0:1], v[4:5], s[4:7], 0 addr64 offset:32
|
|
; SI-NEXT: s_mov_b64 s[8:9], 0
|
|
; SI-NEXT: .LBB53_1: ; %atomicrmw.start
|
|
; SI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: v_mov_b32_e32 v11, v1
|
|
; SI-NEXT: v_mov_b32_e32 v10, v0
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: v_and_b32_e32 v0, v11, v6
|
|
; SI-NEXT: v_and_b32_e32 v1, v10, v7
|
|
; SI-NEXT: v_not_b32_e32 v9, v0
|
|
; SI-NEXT: v_not_b32_e32 v8, v1
|
|
; SI-NEXT: v_mov_b32_e32 v0, v8
|
|
; SI-NEXT: v_mov_b32_e32 v1, v9
|
|
; SI-NEXT: v_mov_b32_e32 v2, v10
|
|
; SI-NEXT: v_mov_b32_e32 v3, v11
|
|
; SI-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v[4:5], s[4:7], 0 addr64 offset:32 glc
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_wbinvl1
|
|
; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[10:11]
|
|
; SI-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
|
|
; SI-NEXT: s_andn2_b64 exec, exec, s[8:9]
|
|
; SI-NEXT: s_cbranch_execnz .LBB53_1
|
|
; SI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; SI-NEXT: s_or_b64 exec, exec, s[8:9]
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; VI-LABEL: global_atomic_nand_i64_ret_offset:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; VI-NEXT: v_add_u32_e32 v4, vcc, 32, v0
|
|
; VI-NEXT: v_addc_u32_e32 v5, vcc, 0, v1, vcc
|
|
; VI-NEXT: flat_load_dwordx2 v[0:1], v[4:5]
|
|
; VI-NEXT: s_mov_b64 s[4:5], 0
|
|
; VI-NEXT: .LBB53_1: ; %atomicrmw.start
|
|
; VI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: v_mov_b32_e32 v9, v1
|
|
; VI-NEXT: v_mov_b32_e32 v8, v0
|
|
; VI-NEXT: v_and_b32_e32 v0, v9, v3
|
|
; VI-NEXT: v_and_b32_e32 v1, v8, v2
|
|
; VI-NEXT: v_not_b32_e32 v7, v0
|
|
; VI-NEXT: v_not_b32_e32 v6, v1
|
|
; VI-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[6:9] glc
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: buffer_wbinvl1_vol
|
|
; VI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[8:9]
|
|
; VI-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
|
|
; VI-NEXT: s_andn2_b64 exec, exec, s[4:5]
|
|
; VI-NEXT: s_cbranch_execnz .LBB53_1
|
|
; VI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; VI-NEXT: s_or_b64 exec, exec, s[4:5]
|
|
; VI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX9-LABEL: global_atomic_nand_i64_ret_offset:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: global_load_dwordx2 v[4:5], v[0:1], off offset:32
|
|
; GFX9-NEXT: s_mov_b64 s[4:5], 0
|
|
; GFX9-NEXT: .LBB53_1: ; %atomicrmw.start
|
|
; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: v_mov_b32_e32 v7, v5
|
|
; GFX9-NEXT: v_mov_b32_e32 v6, v4
|
|
; GFX9-NEXT: v_and_b32_e32 v4, v7, v3
|
|
; GFX9-NEXT: v_and_b32_e32 v8, v6, v2
|
|
; GFX9-NEXT: v_not_b32_e32 v5, v4
|
|
; GFX9-NEXT: v_not_b32_e32 v4, v8
|
|
; GFX9-NEXT: global_atomic_cmpswap_x2 v[4:5], v[0:1], v[4:7], off offset:32 glc
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: buffer_wbinvl1_vol
|
|
; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[6:7]
|
|
; GFX9-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
|
|
; GFX9-NEXT: s_andn2_b64 exec, exec, s[4:5]
|
|
; GFX9-NEXT: s_cbranch_execnz .LBB53_1
|
|
; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; GFX9-NEXT: s_or_b64 exec, exec, s[4:5]
|
|
; GFX9-NEXT: v_mov_b32_e32 v0, v4
|
|
; GFX9-NEXT: v_mov_b32_e32 v1, v5
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
%gep = getelementptr i64, ptr addrspace(1) %out, i64 4
|
|
%result = atomicrmw nand ptr addrspace(1) %gep, i64 %in seq_cst
|
|
ret i64 %result
|
|
}
|
|
|
|
define amdgpu_gfx void @global_atomic_nand_i64_noret_scalar(ptr addrspace(1) inreg %ptr, i64 inreg %in) {
|
|
; SI-LABEL: global_atomic_nand_i64_noret_scalar:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1
|
|
; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 ; 4-byte Folded Spill
|
|
; SI-NEXT: s_mov_b64 exec, s[34:35]
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: v_writelane_b32 v8, s6, 0
|
|
; SI-NEXT: v_writelane_b32 v8, s7, 1
|
|
; SI-NEXT: s_mov_b32 s34, s7
|
|
; SI-NEXT: s_mov_b32 s35, s6
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s6, -1
|
|
; SI-NEXT: buffer_load_dwordx2 v[2:3], off, s[4:7], 0
|
|
; SI-NEXT: s_mov_b64 s[36:37], 0
|
|
; SI-NEXT: .LBB54_1: ; %atomicrmw.start
|
|
; SI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: v_and_b32_e32 v0, s34, v3
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: v_and_b32_e32 v4, s35, v2
|
|
; SI-NEXT: v_not_b32_e32 v1, v0
|
|
; SI-NEXT: v_not_b32_e32 v0, v4
|
|
; SI-NEXT: v_mov_b32_e32 v7, v3
|
|
; SI-NEXT: v_mov_b32_e32 v6, v2
|
|
; SI-NEXT: v_mov_b32_e32 v5, v1
|
|
; SI-NEXT: v_mov_b32_e32 v4, v0
|
|
; SI-NEXT: buffer_atomic_cmpswap_x2 v[4:7], off, s[4:7], 0 glc
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_wbinvl1
|
|
; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[2:3]
|
|
; SI-NEXT: s_or_b64 s[36:37], vcc, s[36:37]
|
|
; SI-NEXT: v_mov_b32_e32 v2, v4
|
|
; SI-NEXT: v_mov_b32_e32 v3, v5
|
|
; SI-NEXT: s_andn2_b64 exec, exec, s[36:37]
|
|
; SI-NEXT: s_cbranch_execnz .LBB54_1
|
|
; SI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; SI-NEXT: s_or_b64 exec, exec, s[36:37]
|
|
; SI-NEXT: v_readlane_b32 s7, v8, 1
|
|
; SI-NEXT: v_readlane_b32 s6, v8, 0
|
|
; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1
|
|
; SI-NEXT: buffer_load_dword v8, off, s[0:3], s32 ; 4-byte Folded Reload
|
|
; SI-NEXT: s_mov_b64 exec, s[34:35]
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0)
|
|
; SI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; VI-LABEL: global_atomic_nand_i64_noret_scalar:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; VI-NEXT: v_mov_b32_e32 v0, s4
|
|
; VI-NEXT: v_mov_b32_e32 v1, s5
|
|
; VI-NEXT: flat_load_dwordx2 v[2:3], v[0:1]
|
|
; VI-NEXT: v_mov_b32_e32 v4, s4
|
|
; VI-NEXT: s_mov_b64 s[34:35], 0
|
|
; VI-NEXT: v_mov_b32_e32 v5, s5
|
|
; VI-NEXT: .LBB54_1: ; %atomicrmw.start
|
|
; VI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: v_and_b32_e32 v0, s7, v3
|
|
; VI-NEXT: v_and_b32_e32 v6, s6, v2
|
|
; VI-NEXT: v_not_b32_e32 v1, v0
|
|
; VI-NEXT: v_not_b32_e32 v0, v6
|
|
; VI-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: buffer_wbinvl1_vol
|
|
; VI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
|
|
; VI-NEXT: v_mov_b32_e32 v3, v1
|
|
; VI-NEXT: s_or_b64 s[34:35], vcc, s[34:35]
|
|
; VI-NEXT: v_mov_b32_e32 v2, v0
|
|
; VI-NEXT: s_andn2_b64 exec, exec, s[34:35]
|
|
; VI-NEXT: s_cbranch_execnz .LBB54_1
|
|
; VI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; VI-NEXT: s_or_b64 exec, exec, s[34:35]
|
|
; VI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX9-LABEL: global_atomic_nand_i64_noret_scalar:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: v_mov_b32_e32 v4, 0
|
|
; GFX9-NEXT: global_load_dwordx2 v[2:3], v4, s[4:5]
|
|
; GFX9-NEXT: s_mov_b64 s[34:35], 0
|
|
; GFX9-NEXT: .LBB54_1: ; %atomicrmw.start
|
|
; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: v_and_b32_e32 v0, s7, v3
|
|
; GFX9-NEXT: v_and_b32_e32 v5, s6, v2
|
|
; GFX9-NEXT: v_not_b32_e32 v1, v0
|
|
; GFX9-NEXT: v_not_b32_e32 v0, v5
|
|
; GFX9-NEXT: global_atomic_cmpswap_x2 v[0:1], v4, v[0:3], s[4:5] glc
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: buffer_wbinvl1_vol
|
|
; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
|
|
; GFX9-NEXT: v_mov_b32_e32 v3, v1
|
|
; GFX9-NEXT: s_or_b64 s[34:35], vcc, s[34:35]
|
|
; GFX9-NEXT: v_mov_b32_e32 v2, v0
|
|
; GFX9-NEXT: s_andn2_b64 exec, exec, s[34:35]
|
|
; GFX9-NEXT: s_cbranch_execnz .LBB54_1
|
|
; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; GFX9-NEXT: s_or_b64 exec, exec, s[34:35]
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
%tmp0 = atomicrmw nand ptr addrspace(1) %ptr, i64 %in seq_cst
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_gfx void @global_atomic_nand_i64_noret_offset_scalar(ptr addrspace(1) inreg %out, i64 inreg %in) {
|
|
; SI-LABEL: global_atomic_nand_i64_noret_offset_scalar:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1
|
|
; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 ; 4-byte Folded Spill
|
|
; SI-NEXT: s_mov_b64 exec, s[34:35]
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: v_writelane_b32 v8, s6, 0
|
|
; SI-NEXT: v_writelane_b32 v8, s7, 1
|
|
; SI-NEXT: s_mov_b32 s34, s7
|
|
; SI-NEXT: s_mov_b32 s35, s6
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s6, -1
|
|
; SI-NEXT: buffer_load_dwordx2 v[2:3], off, s[4:7], 0 offset:32
|
|
; SI-NEXT: s_mov_b64 s[36:37], 0
|
|
; SI-NEXT: .LBB55_1: ; %atomicrmw.start
|
|
; SI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: v_and_b32_e32 v0, s34, v3
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: v_and_b32_e32 v4, s35, v2
|
|
; SI-NEXT: v_not_b32_e32 v1, v0
|
|
; SI-NEXT: v_not_b32_e32 v0, v4
|
|
; SI-NEXT: v_mov_b32_e32 v7, v3
|
|
; SI-NEXT: v_mov_b32_e32 v6, v2
|
|
; SI-NEXT: v_mov_b32_e32 v5, v1
|
|
; SI-NEXT: v_mov_b32_e32 v4, v0
|
|
; SI-NEXT: buffer_atomic_cmpswap_x2 v[4:7], off, s[4:7], 0 offset:32 glc
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_wbinvl1
|
|
; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[2:3]
|
|
; SI-NEXT: s_or_b64 s[36:37], vcc, s[36:37]
|
|
; SI-NEXT: v_mov_b32_e32 v2, v4
|
|
; SI-NEXT: v_mov_b32_e32 v3, v5
|
|
; SI-NEXT: s_andn2_b64 exec, exec, s[36:37]
|
|
; SI-NEXT: s_cbranch_execnz .LBB55_1
|
|
; SI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; SI-NEXT: s_or_b64 exec, exec, s[36:37]
|
|
; SI-NEXT: v_readlane_b32 s7, v8, 1
|
|
; SI-NEXT: v_readlane_b32 s6, v8, 0
|
|
; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1
|
|
; SI-NEXT: buffer_load_dword v8, off, s[0:3], s32 ; 4-byte Folded Reload
|
|
; SI-NEXT: s_mov_b64 exec, s[34:35]
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0)
|
|
; SI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; VI-LABEL: global_atomic_nand_i64_noret_offset_scalar:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; VI-NEXT: s_add_u32 s34, s4, 32
|
|
; VI-NEXT: s_addc_u32 s35, s5, 0
|
|
; VI-NEXT: v_mov_b32_e32 v4, s34
|
|
; VI-NEXT: v_mov_b32_e32 v5, s35
|
|
; VI-NEXT: flat_load_dwordx2 v[2:3], v[4:5]
|
|
; VI-NEXT: s_mov_b64 s[34:35], 0
|
|
; VI-NEXT: .LBB55_1: ; %atomicrmw.start
|
|
; VI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: v_and_b32_e32 v0, s7, v3
|
|
; VI-NEXT: v_and_b32_e32 v6, s6, v2
|
|
; VI-NEXT: v_not_b32_e32 v1, v0
|
|
; VI-NEXT: v_not_b32_e32 v0, v6
|
|
; VI-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: buffer_wbinvl1_vol
|
|
; VI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
|
|
; VI-NEXT: v_mov_b32_e32 v3, v1
|
|
; VI-NEXT: s_or_b64 s[34:35], vcc, s[34:35]
|
|
; VI-NEXT: v_mov_b32_e32 v2, v0
|
|
; VI-NEXT: s_andn2_b64 exec, exec, s[34:35]
|
|
; VI-NEXT: s_cbranch_execnz .LBB55_1
|
|
; VI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; VI-NEXT: s_or_b64 exec, exec, s[34:35]
|
|
; VI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX9-LABEL: global_atomic_nand_i64_noret_offset_scalar:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: v_mov_b32_e32 v4, 0
|
|
; GFX9-NEXT: global_load_dwordx2 v[2:3], v4, s[4:5] offset:32
|
|
; GFX9-NEXT: s_mov_b64 s[34:35], 0
|
|
; GFX9-NEXT: .LBB55_1: ; %atomicrmw.start
|
|
; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: v_and_b32_e32 v0, s7, v3
|
|
; GFX9-NEXT: v_and_b32_e32 v5, s6, v2
|
|
; GFX9-NEXT: v_not_b32_e32 v1, v0
|
|
; GFX9-NEXT: v_not_b32_e32 v0, v5
|
|
; GFX9-NEXT: global_atomic_cmpswap_x2 v[0:1], v4, v[0:3], s[4:5] offset:32 glc
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: buffer_wbinvl1_vol
|
|
; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
|
|
; GFX9-NEXT: v_mov_b32_e32 v3, v1
|
|
; GFX9-NEXT: s_or_b64 s[34:35], vcc, s[34:35]
|
|
; GFX9-NEXT: v_mov_b32_e32 v2, v0
|
|
; GFX9-NEXT: s_andn2_b64 exec, exec, s[34:35]
|
|
; GFX9-NEXT: s_cbranch_execnz .LBB55_1
|
|
; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; GFX9-NEXT: s_or_b64 exec, exec, s[34:35]
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
%gep = getelementptr i64, ptr addrspace(1) %out, i64 4
|
|
%tmp0 = atomicrmw nand ptr addrspace(1) %gep, i64 %in seq_cst
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_gfx i64 @global_atomic_nand_i64_ret_scalar(ptr addrspace(1) inreg %ptr, i64 inreg %in) {
|
|
; SI-LABEL: global_atomic_nand_i64_ret_scalar:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1
|
|
; SI-NEXT: buffer_store_dword v6, off, s[0:3], s32 ; 4-byte Folded Spill
|
|
; SI-NEXT: s_mov_b64 exec, s[34:35]
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: v_writelane_b32 v6, s6, 0
|
|
; SI-NEXT: v_writelane_b32 v6, s7, 1
|
|
; SI-NEXT: s_mov_b32 s34, s7
|
|
; SI-NEXT: s_mov_b32 s35, s6
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s6, -1
|
|
; SI-NEXT: buffer_load_dwordx2 v[0:1], off, s[4:7], 0
|
|
; SI-NEXT: s_mov_b64 s[36:37], 0
|
|
; SI-NEXT: .LBB56_1: ; %atomicrmw.start
|
|
; SI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: v_mov_b32_e32 v5, v1
|
|
; SI-NEXT: v_mov_b32_e32 v4, v0
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: v_and_b32_e32 v0, s34, v5
|
|
; SI-NEXT: v_and_b32_e32 v1, s35, v4
|
|
; SI-NEXT: v_not_b32_e32 v3, v0
|
|
; SI-NEXT: v_not_b32_e32 v2, v1
|
|
; SI-NEXT: v_mov_b32_e32 v0, v2
|
|
; SI-NEXT: v_mov_b32_e32 v1, v3
|
|
; SI-NEXT: v_mov_b32_e32 v2, v4
|
|
; SI-NEXT: v_mov_b32_e32 v3, v5
|
|
; SI-NEXT: buffer_atomic_cmpswap_x2 v[0:3], off, s[4:7], 0 glc
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_wbinvl1
|
|
; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[4:5]
|
|
; SI-NEXT: s_or_b64 s[36:37], vcc, s[36:37]
|
|
; SI-NEXT: s_andn2_b64 exec, exec, s[36:37]
|
|
; SI-NEXT: s_cbranch_execnz .LBB56_1
|
|
; SI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; SI-NEXT: s_or_b64 exec, exec, s[36:37]
|
|
; SI-NEXT: v_readlane_b32 s7, v6, 1
|
|
; SI-NEXT: v_readlane_b32 s6, v6, 0
|
|
; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1
|
|
; SI-NEXT: buffer_load_dword v6, off, s[0:3], s32 ; 4-byte Folded Reload
|
|
; SI-NEXT: s_mov_b64 exec, s[34:35]
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0)
|
|
; SI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; VI-LABEL: global_atomic_nand_i64_ret_scalar:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; VI-NEXT: v_mov_b32_e32 v0, s4
|
|
; VI-NEXT: v_mov_b32_e32 v1, s5
|
|
; VI-NEXT: flat_load_dwordx2 v[0:1], v[0:1]
|
|
; VI-NEXT: v_mov_b32_e32 v2, s4
|
|
; VI-NEXT: s_mov_b64 s[34:35], 0
|
|
; VI-NEXT: v_mov_b32_e32 v3, s5
|
|
; VI-NEXT: .LBB56_1: ; %atomicrmw.start
|
|
; VI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: v_mov_b32_e32 v7, v1
|
|
; VI-NEXT: v_mov_b32_e32 v6, v0
|
|
; VI-NEXT: v_and_b32_e32 v0, s7, v7
|
|
; VI-NEXT: v_and_b32_e32 v1, s6, v6
|
|
; VI-NEXT: v_not_b32_e32 v5, v0
|
|
; VI-NEXT: v_not_b32_e32 v4, v1
|
|
; VI-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[2:3], v[4:7] glc
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: buffer_wbinvl1_vol
|
|
; VI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[6:7]
|
|
; VI-NEXT: s_or_b64 s[34:35], vcc, s[34:35]
|
|
; VI-NEXT: s_andn2_b64 exec, exec, s[34:35]
|
|
; VI-NEXT: s_cbranch_execnz .LBB56_1
|
|
; VI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; VI-NEXT: s_or_b64 exec, exec, s[34:35]
|
|
; VI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX9-LABEL: global_atomic_nand_i64_ret_scalar:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: v_mov_b32_e32 v2, 0
|
|
; GFX9-NEXT: global_load_dwordx2 v[0:1], v2, s[4:5]
|
|
; GFX9-NEXT: s_mov_b64 s[34:35], 0
|
|
; GFX9-NEXT: .LBB56_1: ; %atomicrmw.start
|
|
; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: v_mov_b32_e32 v6, v1
|
|
; GFX9-NEXT: v_mov_b32_e32 v5, v0
|
|
; GFX9-NEXT: v_and_b32_e32 v0, s7, v6
|
|
; GFX9-NEXT: v_and_b32_e32 v1, s6, v5
|
|
; GFX9-NEXT: v_not_b32_e32 v4, v0
|
|
; GFX9-NEXT: v_not_b32_e32 v3, v1
|
|
; GFX9-NEXT: global_atomic_cmpswap_x2 v[0:1], v2, v[3:6], s[4:5] glc
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: buffer_wbinvl1_vol
|
|
; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[5:6]
|
|
; GFX9-NEXT: s_or_b64 s[34:35], vcc, s[34:35]
|
|
; GFX9-NEXT: s_andn2_b64 exec, exec, s[34:35]
|
|
; GFX9-NEXT: s_cbranch_execnz .LBB56_1
|
|
; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; GFX9-NEXT: s_or_b64 exec, exec, s[34:35]
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
%result = atomicrmw nand ptr addrspace(1) %ptr, i64 %in seq_cst
|
|
ret i64 %result
|
|
}
|
|
|
|
define amdgpu_gfx i64 @global_atomic_nand_i64_ret_offset_scalar(ptr addrspace(1) inreg %out, i64 inreg %in) {
|
|
; SI-LABEL: global_atomic_nand_i64_ret_offset_scalar:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1
|
|
; SI-NEXT: buffer_store_dword v6, off, s[0:3], s32 ; 4-byte Folded Spill
|
|
; SI-NEXT: s_mov_b64 exec, s[34:35]
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: v_writelane_b32 v6, s6, 0
|
|
; SI-NEXT: v_writelane_b32 v6, s7, 1
|
|
; SI-NEXT: s_mov_b32 s34, s7
|
|
; SI-NEXT: s_mov_b32 s35, s6
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s6, -1
|
|
; SI-NEXT: buffer_load_dwordx2 v[0:1], off, s[4:7], 0 offset:32
|
|
; SI-NEXT: s_mov_b64 s[36:37], 0
|
|
; SI-NEXT: .LBB57_1: ; %atomicrmw.start
|
|
; SI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: v_mov_b32_e32 v5, v1
|
|
; SI-NEXT: v_mov_b32_e32 v4, v0
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: v_and_b32_e32 v0, s34, v5
|
|
; SI-NEXT: v_and_b32_e32 v1, s35, v4
|
|
; SI-NEXT: v_not_b32_e32 v3, v0
|
|
; SI-NEXT: v_not_b32_e32 v2, v1
|
|
; SI-NEXT: v_mov_b32_e32 v0, v2
|
|
; SI-NEXT: v_mov_b32_e32 v1, v3
|
|
; SI-NEXT: v_mov_b32_e32 v2, v4
|
|
; SI-NEXT: v_mov_b32_e32 v3, v5
|
|
; SI-NEXT: buffer_atomic_cmpswap_x2 v[0:3], off, s[4:7], 0 offset:32 glc
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_wbinvl1
|
|
; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[4:5]
|
|
; SI-NEXT: s_or_b64 s[36:37], vcc, s[36:37]
|
|
; SI-NEXT: s_andn2_b64 exec, exec, s[36:37]
|
|
; SI-NEXT: s_cbranch_execnz .LBB57_1
|
|
; SI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; SI-NEXT: s_or_b64 exec, exec, s[36:37]
|
|
; SI-NEXT: v_readlane_b32 s7, v6, 1
|
|
; SI-NEXT: v_readlane_b32 s6, v6, 0
|
|
; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1
|
|
; SI-NEXT: buffer_load_dword v6, off, s[0:3], s32 ; 4-byte Folded Reload
|
|
; SI-NEXT: s_mov_b64 exec, s[34:35]
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0)
|
|
; SI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; VI-LABEL: global_atomic_nand_i64_ret_offset_scalar:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; VI-NEXT: s_add_u32 s34, s4, 32
|
|
; VI-NEXT: s_addc_u32 s35, s5, 0
|
|
; VI-NEXT: v_mov_b32_e32 v2, s34
|
|
; VI-NEXT: v_mov_b32_e32 v3, s35
|
|
; VI-NEXT: flat_load_dwordx2 v[0:1], v[2:3]
|
|
; VI-NEXT: s_mov_b64 s[34:35], 0
|
|
; VI-NEXT: .LBB57_1: ; %atomicrmw.start
|
|
; VI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: v_mov_b32_e32 v7, v1
|
|
; VI-NEXT: v_mov_b32_e32 v6, v0
|
|
; VI-NEXT: v_and_b32_e32 v0, s7, v7
|
|
; VI-NEXT: v_and_b32_e32 v1, s6, v6
|
|
; VI-NEXT: v_not_b32_e32 v5, v0
|
|
; VI-NEXT: v_not_b32_e32 v4, v1
|
|
; VI-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[2:3], v[4:7] glc
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: buffer_wbinvl1_vol
|
|
; VI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[6:7]
|
|
; VI-NEXT: s_or_b64 s[34:35], vcc, s[34:35]
|
|
; VI-NEXT: s_andn2_b64 exec, exec, s[34:35]
|
|
; VI-NEXT: s_cbranch_execnz .LBB57_1
|
|
; VI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; VI-NEXT: s_or_b64 exec, exec, s[34:35]
|
|
; VI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX9-LABEL: global_atomic_nand_i64_ret_offset_scalar:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: v_mov_b32_e32 v2, 0
|
|
; GFX9-NEXT: global_load_dwordx2 v[0:1], v2, s[4:5] offset:32
|
|
; GFX9-NEXT: s_mov_b64 s[34:35], 0
|
|
; GFX9-NEXT: .LBB57_1: ; %atomicrmw.start
|
|
; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: v_mov_b32_e32 v6, v1
|
|
; GFX9-NEXT: v_mov_b32_e32 v5, v0
|
|
; GFX9-NEXT: v_and_b32_e32 v0, s7, v6
|
|
; GFX9-NEXT: v_and_b32_e32 v1, s6, v5
|
|
; GFX9-NEXT: v_not_b32_e32 v4, v0
|
|
; GFX9-NEXT: v_not_b32_e32 v3, v1
|
|
; GFX9-NEXT: global_atomic_cmpswap_x2 v[0:1], v2, v[3:6], s[4:5] offset:32 glc
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: buffer_wbinvl1_vol
|
|
; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[5:6]
|
|
; GFX9-NEXT: s_or_b64 s[34:35], vcc, s[34:35]
|
|
; GFX9-NEXT: s_andn2_b64 exec, exec, s[34:35]
|
|
; GFX9-NEXT: s_cbranch_execnz .LBB57_1
|
|
; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; GFX9-NEXT: s_or_b64 exec, exec, s[34:35]
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
%gep = getelementptr i64, ptr addrspace(1) %out, i64 4
|
|
%result = atomicrmw nand ptr addrspace(1) %gep, i64 %in seq_cst
|
|
ret i64 %result
|
|
}
|
|
|
|
define void @global_atomic_nand_i64_noret_offset__amdgpu_no_remote_memory(ptr addrspace(1) %out, i64 %in) {
|
|
; SI-LABEL: global_atomic_nand_i64_noret_offset__amdgpu_no_remote_memory:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; SI-NEXT: s_mov_b32 s6, 0
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s4, s6
|
|
; SI-NEXT: s_mov_b32 s5, s6
|
|
; SI-NEXT: buffer_load_dwordx2 v[6:7], v[0:1], s[4:7], 0 addr64 offset:32
|
|
; SI-NEXT: s_mov_b64 s[8:9], 0
|
|
; SI-NEXT: .LBB58_1: ; %atomicrmw.start
|
|
; SI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: v_and_b32_e32 v4, v7, v3
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: v_and_b32_e32 v8, v6, v2
|
|
; SI-NEXT: v_not_b32_e32 v5, v4
|
|
; SI-NEXT: v_not_b32_e32 v4, v8
|
|
; SI-NEXT: v_mov_b32_e32 v11, v7
|
|
; SI-NEXT: v_mov_b32_e32 v10, v6
|
|
; SI-NEXT: v_mov_b32_e32 v9, v5
|
|
; SI-NEXT: v_mov_b32_e32 v8, v4
|
|
; SI-NEXT: buffer_atomic_cmpswap_x2 v[8:11], v[0:1], s[4:7], 0 addr64 offset:32 glc
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_wbinvl1
|
|
; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[8:9], v[6:7]
|
|
; SI-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
|
|
; SI-NEXT: v_mov_b32_e32 v6, v8
|
|
; SI-NEXT: v_mov_b32_e32 v7, v9
|
|
; SI-NEXT: s_andn2_b64 exec, exec, s[8:9]
|
|
; SI-NEXT: s_cbranch_execnz .LBB58_1
|
|
; SI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; SI-NEXT: s_or_b64 exec, exec, s[8:9]
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; VI-LABEL: global_atomic_nand_i64_noret_offset__amdgpu_no_remote_memory:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; VI-NEXT: v_add_u32_e32 v0, vcc, 32, v0
|
|
; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
|
|
; VI-NEXT: flat_load_dwordx2 v[6:7], v[0:1]
|
|
; VI-NEXT: s_mov_b64 s[4:5], 0
|
|
; VI-NEXT: .LBB58_1: ; %atomicrmw.start
|
|
; VI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: v_and_b32_e32 v4, v7, v3
|
|
; VI-NEXT: v_and_b32_e32 v8, v6, v2
|
|
; VI-NEXT: v_not_b32_e32 v5, v4
|
|
; VI-NEXT: v_not_b32_e32 v4, v8
|
|
; VI-NEXT: flat_atomic_cmpswap_x2 v[4:5], v[0:1], v[4:7] glc
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: buffer_wbinvl1_vol
|
|
; VI-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[6:7]
|
|
; VI-NEXT: v_mov_b32_e32 v7, v5
|
|
; VI-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
|
|
; VI-NEXT: v_mov_b32_e32 v6, v4
|
|
; VI-NEXT: s_andn2_b64 exec, exec, s[4:5]
|
|
; VI-NEXT: s_cbranch_execnz .LBB58_1
|
|
; VI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; VI-NEXT: s_or_b64 exec, exec, s[4:5]
|
|
; VI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX9-LABEL: global_atomic_nand_i64_noret_offset__amdgpu_no_remote_memory:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: global_load_dwordx2 v[6:7], v[0:1], off offset:32
|
|
; GFX9-NEXT: s_mov_b64 s[4:5], 0
|
|
; GFX9-NEXT: .LBB58_1: ; %atomicrmw.start
|
|
; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: v_and_b32_e32 v4, v7, v3
|
|
; GFX9-NEXT: v_and_b32_e32 v8, v6, v2
|
|
; GFX9-NEXT: v_not_b32_e32 v5, v4
|
|
; GFX9-NEXT: v_not_b32_e32 v4, v8
|
|
; GFX9-NEXT: global_atomic_cmpswap_x2 v[4:5], v[0:1], v[4:7], off offset:32 glc
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: buffer_wbinvl1_vol
|
|
; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[6:7]
|
|
; GFX9-NEXT: v_mov_b32_e32 v7, v5
|
|
; GFX9-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
|
|
; GFX9-NEXT: v_mov_b32_e32 v6, v4
|
|
; GFX9-NEXT: s_andn2_b64 exec, exec, s[4:5]
|
|
; GFX9-NEXT: s_cbranch_execnz .LBB58_1
|
|
; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; GFX9-NEXT: s_or_b64 exec, exec, s[4:5]
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
%gep = getelementptr i64, ptr addrspace(1) %out, i64 4
|
|
%tmp0 = atomicrmw nand ptr addrspace(1) %gep, i64 %in seq_cst, !amdgpu.no.remote.memory !0
|
|
ret void
|
|
}
|
|
|
|
define i64 @global_atomic_nand_i64_ret_offset__amdgpu_no_remote_memory(ptr addrspace(1) %out, i64 %in) {
|
|
; SI-LABEL: global_atomic_nand_i64_ret_offset__amdgpu_no_remote_memory:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; SI-NEXT: v_mov_b32_e32 v6, v3
|
|
; SI-NEXT: v_mov_b32_e32 v7, v2
|
|
; SI-NEXT: v_mov_b32_e32 v5, v1
|
|
; SI-NEXT: v_mov_b32_e32 v4, v0
|
|
; SI-NEXT: s_mov_b32 s6, 0
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s4, s6
|
|
; SI-NEXT: s_mov_b32 s5, s6
|
|
; SI-NEXT: buffer_load_dwordx2 v[0:1], v[4:5], s[4:7], 0 addr64 offset:32
|
|
; SI-NEXT: s_mov_b64 s[8:9], 0
|
|
; SI-NEXT: .LBB59_1: ; %atomicrmw.start
|
|
; SI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: v_mov_b32_e32 v11, v1
|
|
; SI-NEXT: v_mov_b32_e32 v10, v0
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: v_and_b32_e32 v0, v11, v6
|
|
; SI-NEXT: v_and_b32_e32 v1, v10, v7
|
|
; SI-NEXT: v_not_b32_e32 v9, v0
|
|
; SI-NEXT: v_not_b32_e32 v8, v1
|
|
; SI-NEXT: v_mov_b32_e32 v0, v8
|
|
; SI-NEXT: v_mov_b32_e32 v1, v9
|
|
; SI-NEXT: v_mov_b32_e32 v2, v10
|
|
; SI-NEXT: v_mov_b32_e32 v3, v11
|
|
; SI-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v[4:5], s[4:7], 0 addr64 offset:32 glc
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_wbinvl1
|
|
; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[10:11]
|
|
; SI-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
|
|
; SI-NEXT: s_andn2_b64 exec, exec, s[8:9]
|
|
; SI-NEXT: s_cbranch_execnz .LBB59_1
|
|
; SI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; SI-NEXT: s_or_b64 exec, exec, s[8:9]
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; VI-LABEL: global_atomic_nand_i64_ret_offset__amdgpu_no_remote_memory:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; VI-NEXT: v_add_u32_e32 v4, vcc, 32, v0
|
|
; VI-NEXT: v_addc_u32_e32 v5, vcc, 0, v1, vcc
|
|
; VI-NEXT: flat_load_dwordx2 v[0:1], v[4:5]
|
|
; VI-NEXT: s_mov_b64 s[4:5], 0
|
|
; VI-NEXT: .LBB59_1: ; %atomicrmw.start
|
|
; VI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: v_mov_b32_e32 v9, v1
|
|
; VI-NEXT: v_mov_b32_e32 v8, v0
|
|
; VI-NEXT: v_and_b32_e32 v0, v9, v3
|
|
; VI-NEXT: v_and_b32_e32 v1, v8, v2
|
|
; VI-NEXT: v_not_b32_e32 v7, v0
|
|
; VI-NEXT: v_not_b32_e32 v6, v1
|
|
; VI-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[6:9] glc
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: buffer_wbinvl1_vol
|
|
; VI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[8:9]
|
|
; VI-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
|
|
; VI-NEXT: s_andn2_b64 exec, exec, s[4:5]
|
|
; VI-NEXT: s_cbranch_execnz .LBB59_1
|
|
; VI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; VI-NEXT: s_or_b64 exec, exec, s[4:5]
|
|
; VI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX9-LABEL: global_atomic_nand_i64_ret_offset__amdgpu_no_remote_memory:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: global_load_dwordx2 v[4:5], v[0:1], off offset:32
|
|
; GFX9-NEXT: s_mov_b64 s[4:5], 0
|
|
; GFX9-NEXT: .LBB59_1: ; %atomicrmw.start
|
|
; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: v_mov_b32_e32 v7, v5
|
|
; GFX9-NEXT: v_mov_b32_e32 v6, v4
|
|
; GFX9-NEXT: v_and_b32_e32 v4, v7, v3
|
|
; GFX9-NEXT: v_and_b32_e32 v8, v6, v2
|
|
; GFX9-NEXT: v_not_b32_e32 v5, v4
|
|
; GFX9-NEXT: v_not_b32_e32 v4, v8
|
|
; GFX9-NEXT: global_atomic_cmpswap_x2 v[4:5], v[0:1], v[4:7], off offset:32 glc
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: buffer_wbinvl1_vol
|
|
; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[6:7]
|
|
; GFX9-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
|
|
; GFX9-NEXT: s_andn2_b64 exec, exec, s[4:5]
|
|
; GFX9-NEXT: s_cbranch_execnz .LBB59_1
|
|
; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; GFX9-NEXT: s_or_b64 exec, exec, s[4:5]
|
|
; GFX9-NEXT: v_mov_b32_e32 v0, v4
|
|
; GFX9-NEXT: v_mov_b32_e32 v1, v5
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
%gep = getelementptr i64, ptr addrspace(1) %out, i64 4
|
|
%result = atomicrmw nand ptr addrspace(1) %gep, i64 %in seq_cst, !amdgpu.no.remote.memory !0
|
|
ret i64 %result
|
|
}
|
|
|
|
; ---------------------------------------------------------------------
|
|
; atomicrmw or
|
|
; ---------------------------------------------------------------------
|
|
|
|
define void @global_atomic_or_i64_noret(ptr addrspace(1) %ptr, i64 %in) {
|
|
; SI-LABEL: global_atomic_or_i64_noret:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; SI-NEXT: s_mov_b32 s6, 0
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s4, s6
|
|
; SI-NEXT: s_mov_b32 s5, s6
|
|
; SI-NEXT: buffer_atomic_or_x2 v[2:3], v[0:1], s[4:7], 0 addr64
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_wbinvl1
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; VI-LABEL: global_atomic_or_i64_noret:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; VI-NEXT: flat_atomic_or_x2 v[0:1], v[2:3]
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: buffer_wbinvl1_vol
|
|
; VI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX9-LABEL: global_atomic_or_i64_noret:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: global_atomic_or_x2 v[0:1], v[2:3], off
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: buffer_wbinvl1_vol
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
%tmp0 = atomicrmw or ptr addrspace(1) %ptr, i64 %in seq_cst
|
|
ret void
|
|
}
|
|
|
|
define void @global_atomic_or_i64_noret_offset(ptr addrspace(1) %out, i64 %in) {
|
|
; SI-LABEL: global_atomic_or_i64_noret_offset:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; SI-NEXT: s_mov_b32 s6, 0
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s4, s6
|
|
; SI-NEXT: s_mov_b32 s5, s6
|
|
; SI-NEXT: buffer_atomic_or_x2 v[2:3], v[0:1], s[4:7], 0 addr64 offset:32
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_wbinvl1
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; VI-LABEL: global_atomic_or_i64_noret_offset:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; VI-NEXT: v_add_u32_e32 v0, vcc, 32, v0
|
|
; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
|
|
; VI-NEXT: flat_atomic_or_x2 v[0:1], v[2:3]
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: buffer_wbinvl1_vol
|
|
; VI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX9-LABEL: global_atomic_or_i64_noret_offset:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: global_atomic_or_x2 v[0:1], v[2:3], off offset:32
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: buffer_wbinvl1_vol
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
%gep = getelementptr i64, ptr addrspace(1) %out, i64 4
|
|
%tmp0 = atomicrmw or ptr addrspace(1) %gep, i64 %in seq_cst
|
|
ret void
|
|
}
|
|
|
|
define i64 @global_atomic_or_i64_ret(ptr addrspace(1) %ptr, i64 %in) {
|
|
; SI-LABEL: global_atomic_or_i64_ret:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; SI-NEXT: s_mov_b32 s6, 0
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s4, s6
|
|
; SI-NEXT: s_mov_b32 s5, s6
|
|
; SI-NEXT: buffer_atomic_or_x2 v[2:3], v[0:1], s[4:7], 0 addr64 glc
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_wbinvl1
|
|
; SI-NEXT: v_mov_b32_e32 v0, v2
|
|
; SI-NEXT: v_mov_b32_e32 v1, v3
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; VI-LABEL: global_atomic_or_i64_ret:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; VI-NEXT: flat_atomic_or_x2 v[0:1], v[0:1], v[2:3] glc
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: buffer_wbinvl1_vol
|
|
; VI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX9-LABEL: global_atomic_or_i64_ret:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: global_atomic_or_x2 v[0:1], v[0:1], v[2:3], off glc
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: buffer_wbinvl1_vol
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
%result = atomicrmw or ptr addrspace(1) %ptr, i64 %in seq_cst
|
|
ret i64 %result
|
|
}
|
|
|
|
define i64 @global_atomic_or_i64_ret_offset(ptr addrspace(1) %out, i64 %in) {
|
|
; SI-LABEL: global_atomic_or_i64_ret_offset:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; SI-NEXT: s_mov_b32 s6, 0
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s4, s6
|
|
; SI-NEXT: s_mov_b32 s5, s6
|
|
; SI-NEXT: buffer_atomic_or_x2 v[2:3], v[0:1], s[4:7], 0 addr64 offset:32 glc
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_wbinvl1
|
|
; SI-NEXT: v_mov_b32_e32 v0, v2
|
|
; SI-NEXT: v_mov_b32_e32 v1, v3
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; VI-LABEL: global_atomic_or_i64_ret_offset:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; VI-NEXT: v_add_u32_e32 v0, vcc, 32, v0
|
|
; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
|
|
; VI-NEXT: flat_atomic_or_x2 v[0:1], v[0:1], v[2:3] glc
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: buffer_wbinvl1_vol
|
|
; VI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX9-LABEL: global_atomic_or_i64_ret_offset:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: global_atomic_or_x2 v[0:1], v[0:1], v[2:3], off offset:32 glc
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: buffer_wbinvl1_vol
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
%gep = getelementptr i64, ptr addrspace(1) %out, i64 4
|
|
%result = atomicrmw or ptr addrspace(1) %gep, i64 %in seq_cst
|
|
ret i64 %result
|
|
}
|
|
|
|
define amdgpu_gfx void @global_atomic_or_i64_noret_scalar(ptr addrspace(1) inreg %ptr, i64 inreg %in) {
|
|
; SI-LABEL: global_atomic_or_i64_noret_scalar:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1
|
|
; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 ; 4-byte Folded Spill
|
|
; SI-NEXT: s_mov_b64 exec, s[34:35]
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: v_writelane_b32 v2, s6, 0
|
|
; SI-NEXT: v_writelane_b32 v2, s7, 1
|
|
; SI-NEXT: s_mov_b32 s34, s7
|
|
; SI-NEXT: s_mov_b32 s35, s6
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s6, -1
|
|
; SI-NEXT: v_mov_b32_e32 v0, s35
|
|
; SI-NEXT: v_mov_b32_e32 v1, s34
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_atomic_or_x2 v[0:1], off, s[4:7], 0
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_wbinvl1
|
|
; SI-NEXT: v_readlane_b32 s7, v2, 1
|
|
; SI-NEXT: v_readlane_b32 s6, v2, 0
|
|
; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1
|
|
; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 ; 4-byte Folded Reload
|
|
; SI-NEXT: s_mov_b64 exec, s[34:35]
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0)
|
|
; SI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; VI-LABEL: global_atomic_or_i64_noret_scalar:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; VI-NEXT: v_mov_b32_e32 v0, s6
|
|
; VI-NEXT: v_mov_b32_e32 v1, s7
|
|
; VI-NEXT: v_mov_b32_e32 v2, s4
|
|
; VI-NEXT: v_mov_b32_e32 v3, s5
|
|
; VI-NEXT: flat_atomic_or_x2 v[2:3], v[0:1]
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: buffer_wbinvl1_vol
|
|
; VI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX9-LABEL: global_atomic_or_i64_noret_scalar:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: v_mov_b32_e32 v0, s6
|
|
; GFX9-NEXT: v_mov_b32_e32 v1, s7
|
|
; GFX9-NEXT: v_mov_b32_e32 v2, 0
|
|
; GFX9-NEXT: global_atomic_or_x2 v2, v[0:1], s[4:5]
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: buffer_wbinvl1_vol
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
%tmp0 = atomicrmw or ptr addrspace(1) %ptr, i64 %in seq_cst
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_gfx void @global_atomic_or_i64_noret_offset_scalar(ptr addrspace(1) inreg %out, i64 inreg %in) {
|
|
; SI-LABEL: global_atomic_or_i64_noret_offset_scalar:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1
|
|
; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 ; 4-byte Folded Spill
|
|
; SI-NEXT: s_mov_b64 exec, s[34:35]
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: v_writelane_b32 v2, s6, 0
|
|
; SI-NEXT: v_writelane_b32 v2, s7, 1
|
|
; SI-NEXT: v_mov_b32_e32 v0, s6
|
|
; SI-NEXT: v_mov_b32_e32 v1, s7
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s6, -1
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_atomic_or_x2 v[0:1], off, s[4:7], 0 offset:32
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_wbinvl1
|
|
; SI-NEXT: v_readlane_b32 s7, v2, 1
|
|
; SI-NEXT: v_readlane_b32 s6, v2, 0
|
|
; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1
|
|
; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 ; 4-byte Folded Reload
|
|
; SI-NEXT: s_mov_b64 exec, s[34:35]
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0)
|
|
; SI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; VI-LABEL: global_atomic_or_i64_noret_offset_scalar:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; VI-NEXT: s_add_u32 s34, s4, 32
|
|
; VI-NEXT: s_addc_u32 s35, s5, 0
|
|
; VI-NEXT: v_mov_b32_e32 v2, s34
|
|
; VI-NEXT: v_mov_b32_e32 v0, s6
|
|
; VI-NEXT: v_mov_b32_e32 v1, s7
|
|
; VI-NEXT: v_mov_b32_e32 v3, s35
|
|
; VI-NEXT: flat_atomic_or_x2 v[2:3], v[0:1]
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: buffer_wbinvl1_vol
|
|
; VI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX9-LABEL: global_atomic_or_i64_noret_offset_scalar:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: v_mov_b32_e32 v0, s6
|
|
; GFX9-NEXT: v_mov_b32_e32 v1, s7
|
|
; GFX9-NEXT: v_mov_b32_e32 v2, 0
|
|
; GFX9-NEXT: global_atomic_or_x2 v2, v[0:1], s[4:5] offset:32
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: buffer_wbinvl1_vol
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
%gep = getelementptr i64, ptr addrspace(1) %out, i64 4
|
|
%tmp0 = atomicrmw or ptr addrspace(1) %gep, i64 %in seq_cst
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_gfx i64 @global_atomic_or_i64_ret_scalar(ptr addrspace(1) inreg %ptr, i64 inreg %in) {
|
|
; SI-LABEL: global_atomic_or_i64_ret_scalar:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1
|
|
; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 ; 4-byte Folded Spill
|
|
; SI-NEXT: s_mov_b64 exec, s[34:35]
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: v_writelane_b32 v2, s6, 0
|
|
; SI-NEXT: v_writelane_b32 v2, s7, 1
|
|
; SI-NEXT: s_mov_b32 s34, s7
|
|
; SI-NEXT: s_mov_b32 s35, s6
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s6, -1
|
|
; SI-NEXT: v_mov_b32_e32 v0, s35
|
|
; SI-NEXT: v_mov_b32_e32 v1, s34
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_atomic_or_x2 v[0:1], off, s[4:7], 0 glc
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_wbinvl1
|
|
; SI-NEXT: v_readlane_b32 s7, v2, 1
|
|
; SI-NEXT: v_readlane_b32 s6, v2, 0
|
|
; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1
|
|
; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 ; 4-byte Folded Reload
|
|
; SI-NEXT: s_mov_b64 exec, s[34:35]
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0)
|
|
; SI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; VI-LABEL: global_atomic_or_i64_ret_scalar:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; VI-NEXT: v_mov_b32_e32 v0, s6
|
|
; VI-NEXT: v_mov_b32_e32 v1, s7
|
|
; VI-NEXT: v_mov_b32_e32 v2, s4
|
|
; VI-NEXT: v_mov_b32_e32 v3, s5
|
|
; VI-NEXT: flat_atomic_or_x2 v[0:1], v[2:3], v[0:1] glc
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: buffer_wbinvl1_vol
|
|
; VI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX9-LABEL: global_atomic_or_i64_ret_scalar:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: v_mov_b32_e32 v0, s6
|
|
; GFX9-NEXT: v_mov_b32_e32 v1, s7
|
|
; GFX9-NEXT: v_mov_b32_e32 v2, 0
|
|
; GFX9-NEXT: global_atomic_or_x2 v[0:1], v2, v[0:1], s[4:5] glc
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: buffer_wbinvl1_vol
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
%result = atomicrmw or ptr addrspace(1) %ptr, i64 %in seq_cst
|
|
ret i64 %result
|
|
}
|
|
|
|
define amdgpu_gfx i64 @global_atomic_or_i64_ret_offset_scalar(ptr addrspace(1) inreg %out, i64 inreg %in) {
|
|
; SI-LABEL: global_atomic_or_i64_ret_offset_scalar:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1
|
|
; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 ; 4-byte Folded Spill
|
|
; SI-NEXT: s_mov_b64 exec, s[34:35]
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: v_writelane_b32 v2, s6, 0
|
|
; SI-NEXT: v_writelane_b32 v2, s7, 1
|
|
; SI-NEXT: v_mov_b32_e32 v0, s6
|
|
; SI-NEXT: v_mov_b32_e32 v1, s7
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s6, -1
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_atomic_or_x2 v[0:1], off, s[4:7], 0 offset:32 glc
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_wbinvl1
|
|
; SI-NEXT: v_readlane_b32 s7, v2, 1
|
|
; SI-NEXT: v_readlane_b32 s6, v2, 0
|
|
; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1
|
|
; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 ; 4-byte Folded Reload
|
|
; SI-NEXT: s_mov_b64 exec, s[34:35]
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0)
|
|
; SI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; VI-LABEL: global_atomic_or_i64_ret_offset_scalar:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; VI-NEXT: s_add_u32 s34, s4, 32
|
|
; VI-NEXT: s_addc_u32 s35, s5, 0
|
|
; VI-NEXT: v_mov_b32_e32 v2, s34
|
|
; VI-NEXT: v_mov_b32_e32 v0, s6
|
|
; VI-NEXT: v_mov_b32_e32 v1, s7
|
|
; VI-NEXT: v_mov_b32_e32 v3, s35
|
|
; VI-NEXT: flat_atomic_or_x2 v[0:1], v[2:3], v[0:1] glc
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: buffer_wbinvl1_vol
|
|
; VI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX9-LABEL: global_atomic_or_i64_ret_offset_scalar:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: v_mov_b32_e32 v0, s6
|
|
; GFX9-NEXT: v_mov_b32_e32 v1, s7
|
|
; GFX9-NEXT: v_mov_b32_e32 v2, 0
|
|
; GFX9-NEXT: global_atomic_or_x2 v[0:1], v2, v[0:1], s[4:5] offset:32 glc
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: buffer_wbinvl1_vol
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
%gep = getelementptr i64, ptr addrspace(1) %out, i64 4
|
|
%result = atomicrmw or ptr addrspace(1) %gep, i64 %in seq_cst
|
|
ret i64 %result
|
|
}
|
|
|
|
define void @global_atomic_or_i64_noret_offset__amdgpu_no_remote_memory(ptr addrspace(1) %out, i64 %in) {
|
|
; SI-LABEL: global_atomic_or_i64_noret_offset__amdgpu_no_remote_memory:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; SI-NEXT: s_mov_b32 s6, 0
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s4, s6
|
|
; SI-NEXT: s_mov_b32 s5, s6
|
|
; SI-NEXT: buffer_atomic_or_x2 v[2:3], v[0:1], s[4:7], 0 addr64 offset:32
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_wbinvl1
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; VI-LABEL: global_atomic_or_i64_noret_offset__amdgpu_no_remote_memory:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; VI-NEXT: v_add_u32_e32 v0, vcc, 32, v0
|
|
; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
|
|
; VI-NEXT: flat_atomic_or_x2 v[0:1], v[2:3]
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: buffer_wbinvl1_vol
|
|
; VI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX9-LABEL: global_atomic_or_i64_noret_offset__amdgpu_no_remote_memory:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: global_atomic_or_x2 v[0:1], v[2:3], off offset:32
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: buffer_wbinvl1_vol
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
%gep = getelementptr i64, ptr addrspace(1) %out, i64 4
|
|
%tmp0 = atomicrmw or ptr addrspace(1) %gep, i64 %in seq_cst, !amdgpu.no.remote.memory !0
|
|
ret void
|
|
}
|
|
|
|
define i64 @global_atomic_or_i64_ret_offset__amdgpu_no_remote_memory(ptr addrspace(1) %out, i64 %in) {
|
|
; SI-LABEL: global_atomic_or_i64_ret_offset__amdgpu_no_remote_memory:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; SI-NEXT: s_mov_b32 s6, 0
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s4, s6
|
|
; SI-NEXT: s_mov_b32 s5, s6
|
|
; SI-NEXT: buffer_atomic_or_x2 v[2:3], v[0:1], s[4:7], 0 addr64 offset:32 glc
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_wbinvl1
|
|
; SI-NEXT: v_mov_b32_e32 v0, v2
|
|
; SI-NEXT: v_mov_b32_e32 v1, v3
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; VI-LABEL: global_atomic_or_i64_ret_offset__amdgpu_no_remote_memory:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; VI-NEXT: v_add_u32_e32 v0, vcc, 32, v0
|
|
; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
|
|
; VI-NEXT: flat_atomic_or_x2 v[0:1], v[0:1], v[2:3] glc
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: buffer_wbinvl1_vol
|
|
; VI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX9-LABEL: global_atomic_or_i64_ret_offset__amdgpu_no_remote_memory:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: global_atomic_or_x2 v[0:1], v[0:1], v[2:3], off offset:32 glc
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: buffer_wbinvl1_vol
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
%gep = getelementptr i64, ptr addrspace(1) %out, i64 4
|
|
%result = atomicrmw or ptr addrspace(1) %gep, i64 %in seq_cst, !amdgpu.no.remote.memory !0
|
|
ret i64 %result
|
|
}
|
|
|
|
; ---------------------------------------------------------------------
|
|
; atomicrmw xor
|
|
; ---------------------------------------------------------------------
|
|
|
|
define void @global_atomic_xor_i64_noret(ptr addrspace(1) %ptr, i64 %in) {
|
|
; SI-LABEL: global_atomic_xor_i64_noret:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; SI-NEXT: s_mov_b32 s6, 0
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s4, s6
|
|
; SI-NEXT: s_mov_b32 s5, s6
|
|
; SI-NEXT: buffer_atomic_xor_x2 v[2:3], v[0:1], s[4:7], 0 addr64
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_wbinvl1
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; VI-LABEL: global_atomic_xor_i64_noret:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; VI-NEXT: flat_atomic_xor_x2 v[0:1], v[2:3]
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: buffer_wbinvl1_vol
|
|
; VI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX9-LABEL: global_atomic_xor_i64_noret:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: global_atomic_xor_x2 v[0:1], v[2:3], off
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: buffer_wbinvl1_vol
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
%tmp0 = atomicrmw xor ptr addrspace(1) %ptr, i64 %in seq_cst
|
|
ret void
|
|
}
|
|
|
|
define void @global_atomic_xor_i64_noret_offset(ptr addrspace(1) %out, i64 %in) {
|
|
; SI-LABEL: global_atomic_xor_i64_noret_offset:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; SI-NEXT: s_mov_b32 s6, 0
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s4, s6
|
|
; SI-NEXT: s_mov_b32 s5, s6
|
|
; SI-NEXT: buffer_atomic_xor_x2 v[2:3], v[0:1], s[4:7], 0 addr64 offset:32
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_wbinvl1
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; VI-LABEL: global_atomic_xor_i64_noret_offset:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; VI-NEXT: v_add_u32_e32 v0, vcc, 32, v0
|
|
; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
|
|
; VI-NEXT: flat_atomic_xor_x2 v[0:1], v[2:3]
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: buffer_wbinvl1_vol
|
|
; VI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX9-LABEL: global_atomic_xor_i64_noret_offset:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: global_atomic_xor_x2 v[0:1], v[2:3], off offset:32
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: buffer_wbinvl1_vol
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
%gep = getelementptr i64, ptr addrspace(1) %out, i64 4
|
|
%tmp0 = atomicrmw xor ptr addrspace(1) %gep, i64 %in seq_cst
|
|
ret void
|
|
}
|
|
|
|
define i64 @global_atomic_xor_i64_ret(ptr addrspace(1) %ptr, i64 %in) {
|
|
; SI-LABEL: global_atomic_xor_i64_ret:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; SI-NEXT: s_mov_b32 s6, 0
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s4, s6
|
|
; SI-NEXT: s_mov_b32 s5, s6
|
|
; SI-NEXT: buffer_atomic_xor_x2 v[2:3], v[0:1], s[4:7], 0 addr64 glc
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_wbinvl1
|
|
; SI-NEXT: v_mov_b32_e32 v0, v2
|
|
; SI-NEXT: v_mov_b32_e32 v1, v3
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; VI-LABEL: global_atomic_xor_i64_ret:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; VI-NEXT: flat_atomic_xor_x2 v[0:1], v[0:1], v[2:3] glc
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: buffer_wbinvl1_vol
|
|
; VI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX9-LABEL: global_atomic_xor_i64_ret:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: global_atomic_xor_x2 v[0:1], v[0:1], v[2:3], off glc
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: buffer_wbinvl1_vol
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
%result = atomicrmw xor ptr addrspace(1) %ptr, i64 %in seq_cst
|
|
ret i64 %result
|
|
}
|
|
|
|
define i64 @global_atomic_xor_i64_ret_offset(ptr addrspace(1) %out, i64 %in) {
|
|
; SI-LABEL: global_atomic_xor_i64_ret_offset:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; SI-NEXT: s_mov_b32 s6, 0
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s4, s6
|
|
; SI-NEXT: s_mov_b32 s5, s6
|
|
; SI-NEXT: buffer_atomic_xor_x2 v[2:3], v[0:1], s[4:7], 0 addr64 offset:32 glc
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_wbinvl1
|
|
; SI-NEXT: v_mov_b32_e32 v0, v2
|
|
; SI-NEXT: v_mov_b32_e32 v1, v3
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; VI-LABEL: global_atomic_xor_i64_ret_offset:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; VI-NEXT: v_add_u32_e32 v0, vcc, 32, v0
|
|
; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
|
|
; VI-NEXT: flat_atomic_xor_x2 v[0:1], v[0:1], v[2:3] glc
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: buffer_wbinvl1_vol
|
|
; VI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX9-LABEL: global_atomic_xor_i64_ret_offset:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: global_atomic_xor_x2 v[0:1], v[0:1], v[2:3], off offset:32 glc
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: buffer_wbinvl1_vol
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
%gep = getelementptr i64, ptr addrspace(1) %out, i64 4
|
|
%result = atomicrmw xor ptr addrspace(1) %gep, i64 %in seq_cst
|
|
ret i64 %result
|
|
}
|
|
|
|
define amdgpu_gfx void @global_atomic_xor_i64_noret_scalar(ptr addrspace(1) inreg %ptr, i64 inreg %in) {
|
|
; SI-LABEL: global_atomic_xor_i64_noret_scalar:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1
|
|
; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 ; 4-byte Folded Spill
|
|
; SI-NEXT: s_mov_b64 exec, s[34:35]
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: v_writelane_b32 v2, s6, 0
|
|
; SI-NEXT: v_writelane_b32 v2, s7, 1
|
|
; SI-NEXT: s_mov_b32 s34, s7
|
|
; SI-NEXT: s_mov_b32 s35, s6
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s6, -1
|
|
; SI-NEXT: v_mov_b32_e32 v0, s35
|
|
; SI-NEXT: v_mov_b32_e32 v1, s34
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_atomic_xor_x2 v[0:1], off, s[4:7], 0
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_wbinvl1
|
|
; SI-NEXT: v_readlane_b32 s7, v2, 1
|
|
; SI-NEXT: v_readlane_b32 s6, v2, 0
|
|
; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1
|
|
; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 ; 4-byte Folded Reload
|
|
; SI-NEXT: s_mov_b64 exec, s[34:35]
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0)
|
|
; SI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; VI-LABEL: global_atomic_xor_i64_noret_scalar:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; VI-NEXT: v_mov_b32_e32 v0, s6
|
|
; VI-NEXT: v_mov_b32_e32 v1, s7
|
|
; VI-NEXT: v_mov_b32_e32 v2, s4
|
|
; VI-NEXT: v_mov_b32_e32 v3, s5
|
|
; VI-NEXT: flat_atomic_xor_x2 v[2:3], v[0:1]
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: buffer_wbinvl1_vol
|
|
; VI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX9-LABEL: global_atomic_xor_i64_noret_scalar:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: v_mov_b32_e32 v0, s6
|
|
; GFX9-NEXT: v_mov_b32_e32 v1, s7
|
|
; GFX9-NEXT: v_mov_b32_e32 v2, 0
|
|
; GFX9-NEXT: global_atomic_xor_x2 v2, v[0:1], s[4:5]
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: buffer_wbinvl1_vol
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
%tmp0 = atomicrmw xor ptr addrspace(1) %ptr, i64 %in seq_cst
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_gfx void @global_atomic_xor_i64_noret_offset_scalar(ptr addrspace(1) inreg %out, i64 inreg %in) {
|
|
; SI-LABEL: global_atomic_xor_i64_noret_offset_scalar:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1
|
|
; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 ; 4-byte Folded Spill
|
|
; SI-NEXT: s_mov_b64 exec, s[34:35]
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: v_writelane_b32 v2, s6, 0
|
|
; SI-NEXT: v_writelane_b32 v2, s7, 1
|
|
; SI-NEXT: v_mov_b32_e32 v0, s6
|
|
; SI-NEXT: v_mov_b32_e32 v1, s7
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s6, -1
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_atomic_xor_x2 v[0:1], off, s[4:7], 0 offset:32
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_wbinvl1
|
|
; SI-NEXT: v_readlane_b32 s7, v2, 1
|
|
; SI-NEXT: v_readlane_b32 s6, v2, 0
|
|
; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1
|
|
; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 ; 4-byte Folded Reload
|
|
; SI-NEXT: s_mov_b64 exec, s[34:35]
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0)
|
|
; SI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; VI-LABEL: global_atomic_xor_i64_noret_offset_scalar:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; VI-NEXT: s_add_u32 s34, s4, 32
|
|
; VI-NEXT: s_addc_u32 s35, s5, 0
|
|
; VI-NEXT: v_mov_b32_e32 v2, s34
|
|
; VI-NEXT: v_mov_b32_e32 v0, s6
|
|
; VI-NEXT: v_mov_b32_e32 v1, s7
|
|
; VI-NEXT: v_mov_b32_e32 v3, s35
|
|
; VI-NEXT: flat_atomic_xor_x2 v[2:3], v[0:1]
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: buffer_wbinvl1_vol
|
|
; VI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX9-LABEL: global_atomic_xor_i64_noret_offset_scalar:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: v_mov_b32_e32 v0, s6
|
|
; GFX9-NEXT: v_mov_b32_e32 v1, s7
|
|
; GFX9-NEXT: v_mov_b32_e32 v2, 0
|
|
; GFX9-NEXT: global_atomic_xor_x2 v2, v[0:1], s[4:5] offset:32
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: buffer_wbinvl1_vol
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
%gep = getelementptr i64, ptr addrspace(1) %out, i64 4
|
|
%tmp0 = atomicrmw xor ptr addrspace(1) %gep, i64 %in seq_cst
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_gfx i64 @global_atomic_xor_i64_ret_scalar(ptr addrspace(1) inreg %ptr, i64 inreg %in) {
|
|
; SI-LABEL: global_atomic_xor_i64_ret_scalar:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1
|
|
; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 ; 4-byte Folded Spill
|
|
; SI-NEXT: s_mov_b64 exec, s[34:35]
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: v_writelane_b32 v2, s6, 0
|
|
; SI-NEXT: v_writelane_b32 v2, s7, 1
|
|
; SI-NEXT: s_mov_b32 s34, s7
|
|
; SI-NEXT: s_mov_b32 s35, s6
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s6, -1
|
|
; SI-NEXT: v_mov_b32_e32 v0, s35
|
|
; SI-NEXT: v_mov_b32_e32 v1, s34
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_atomic_xor_x2 v[0:1], off, s[4:7], 0 glc
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_wbinvl1
|
|
; SI-NEXT: v_readlane_b32 s7, v2, 1
|
|
; SI-NEXT: v_readlane_b32 s6, v2, 0
|
|
; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1
|
|
; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 ; 4-byte Folded Reload
|
|
; SI-NEXT: s_mov_b64 exec, s[34:35]
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0)
|
|
; SI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; VI-LABEL: global_atomic_xor_i64_ret_scalar:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; VI-NEXT: v_mov_b32_e32 v0, s6
|
|
; VI-NEXT: v_mov_b32_e32 v1, s7
|
|
; VI-NEXT: v_mov_b32_e32 v2, s4
|
|
; VI-NEXT: v_mov_b32_e32 v3, s5
|
|
; VI-NEXT: flat_atomic_xor_x2 v[0:1], v[2:3], v[0:1] glc
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: buffer_wbinvl1_vol
|
|
; VI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX9-LABEL: global_atomic_xor_i64_ret_scalar:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: v_mov_b32_e32 v0, s6
|
|
; GFX9-NEXT: v_mov_b32_e32 v1, s7
|
|
; GFX9-NEXT: v_mov_b32_e32 v2, 0
|
|
; GFX9-NEXT: global_atomic_xor_x2 v[0:1], v2, v[0:1], s[4:5] glc
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: buffer_wbinvl1_vol
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
%result = atomicrmw xor ptr addrspace(1) %ptr, i64 %in seq_cst
|
|
ret i64 %result
|
|
}
|
|
|
|
define amdgpu_gfx i64 @global_atomic_xor_i64_ret_offset_scalar(ptr addrspace(1) inreg %out, i64 inreg %in) {
|
|
; SI-LABEL: global_atomic_xor_i64_ret_offset_scalar:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1
|
|
; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 ; 4-byte Folded Spill
|
|
; SI-NEXT: s_mov_b64 exec, s[34:35]
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: v_writelane_b32 v2, s6, 0
|
|
; SI-NEXT: v_writelane_b32 v2, s7, 1
|
|
; SI-NEXT: v_mov_b32_e32 v0, s6
|
|
; SI-NEXT: v_mov_b32_e32 v1, s7
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s6, -1
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_atomic_xor_x2 v[0:1], off, s[4:7], 0 offset:32 glc
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_wbinvl1
|
|
; SI-NEXT: v_readlane_b32 s7, v2, 1
|
|
; SI-NEXT: v_readlane_b32 s6, v2, 0
|
|
; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1
|
|
; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 ; 4-byte Folded Reload
|
|
; SI-NEXT: s_mov_b64 exec, s[34:35]
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0)
|
|
; SI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; VI-LABEL: global_atomic_xor_i64_ret_offset_scalar:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; VI-NEXT: s_add_u32 s34, s4, 32
|
|
; VI-NEXT: s_addc_u32 s35, s5, 0
|
|
; VI-NEXT: v_mov_b32_e32 v2, s34
|
|
; VI-NEXT: v_mov_b32_e32 v0, s6
|
|
; VI-NEXT: v_mov_b32_e32 v1, s7
|
|
; VI-NEXT: v_mov_b32_e32 v3, s35
|
|
; VI-NEXT: flat_atomic_xor_x2 v[0:1], v[2:3], v[0:1] glc
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: buffer_wbinvl1_vol
|
|
; VI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX9-LABEL: global_atomic_xor_i64_ret_offset_scalar:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: v_mov_b32_e32 v0, s6
|
|
; GFX9-NEXT: v_mov_b32_e32 v1, s7
|
|
; GFX9-NEXT: v_mov_b32_e32 v2, 0
|
|
; GFX9-NEXT: global_atomic_xor_x2 v[0:1], v2, v[0:1], s[4:5] offset:32 glc
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: buffer_wbinvl1_vol
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
%gep = getelementptr i64, ptr addrspace(1) %out, i64 4
|
|
%result = atomicrmw xor ptr addrspace(1) %gep, i64 %in seq_cst
|
|
ret i64 %result
|
|
}
|
|
|
|
define void @global_atomic_xor_i64_noret_offset__amdgpu_no_remote_memory(ptr addrspace(1) %out, i64 %in) {
|
|
; SI-LABEL: global_atomic_xor_i64_noret_offset__amdgpu_no_remote_memory:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; SI-NEXT: s_mov_b32 s6, 0
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s4, s6
|
|
; SI-NEXT: s_mov_b32 s5, s6
|
|
; SI-NEXT: buffer_atomic_xor_x2 v[2:3], v[0:1], s[4:7], 0 addr64 offset:32
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_wbinvl1
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; VI-LABEL: global_atomic_xor_i64_noret_offset__amdgpu_no_remote_memory:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; VI-NEXT: v_add_u32_e32 v0, vcc, 32, v0
|
|
; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
|
|
; VI-NEXT: flat_atomic_xor_x2 v[0:1], v[2:3]
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: buffer_wbinvl1_vol
|
|
; VI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX9-LABEL: global_atomic_xor_i64_noret_offset__amdgpu_no_remote_memory:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: global_atomic_xor_x2 v[0:1], v[2:3], off offset:32
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: buffer_wbinvl1_vol
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
%gep = getelementptr i64, ptr addrspace(1) %out, i64 4
|
|
%tmp0 = atomicrmw xor ptr addrspace(1) %gep, i64 %in seq_cst, !amdgpu.no.remote.memory !0
|
|
ret void
|
|
}
|
|
|
|
define i64 @global_atomic_xor_i64_ret_offset__amdgpu_no_remote_memory(ptr addrspace(1) %out, i64 %in) {
|
|
; SI-LABEL: global_atomic_xor_i64_ret_offset__amdgpu_no_remote_memory:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; SI-NEXT: s_mov_b32 s6, 0
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s4, s6
|
|
; SI-NEXT: s_mov_b32 s5, s6
|
|
; SI-NEXT: buffer_atomic_xor_x2 v[2:3], v[0:1], s[4:7], 0 addr64 offset:32 glc
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_wbinvl1
|
|
; SI-NEXT: v_mov_b32_e32 v0, v2
|
|
; SI-NEXT: v_mov_b32_e32 v1, v3
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; VI-LABEL: global_atomic_xor_i64_ret_offset__amdgpu_no_remote_memory:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; VI-NEXT: v_add_u32_e32 v0, vcc, 32, v0
|
|
; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
|
|
; VI-NEXT: flat_atomic_xor_x2 v[0:1], v[0:1], v[2:3] glc
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: buffer_wbinvl1_vol
|
|
; VI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX9-LABEL: global_atomic_xor_i64_ret_offset__amdgpu_no_remote_memory:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: global_atomic_xor_x2 v[0:1], v[0:1], v[2:3], off offset:32 glc
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: buffer_wbinvl1_vol
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
%gep = getelementptr i64, ptr addrspace(1) %out, i64 4
|
|
%result = atomicrmw xor ptr addrspace(1) %gep, i64 %in seq_cst, !amdgpu.no.remote.memory !0
|
|
ret i64 %result
|
|
}
|
|
|
|
; ---------------------------------------------------------------------
|
|
; atomicrmw max
|
|
; ---------------------------------------------------------------------
|
|
|
|
define void @global_atomic_max_i64_noret(ptr addrspace(1) %ptr, i64 %in) {
|
|
; SI-LABEL: global_atomic_max_i64_noret:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; SI-NEXT: s_mov_b32 s6, 0
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s4, s6
|
|
; SI-NEXT: s_mov_b32 s5, s6
|
|
; SI-NEXT: buffer_load_dwordx2 v[6:7], v[0:1], s[4:7], 0 addr64
|
|
; SI-NEXT: s_mov_b64 s[8:9], 0
|
|
; SI-NEXT: .LBB80_1: ; %atomicrmw.start
|
|
; SI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: v_cmp_gt_i64_e32 vcc, v[6:7], v[2:3]
|
|
; SI-NEXT: v_cndmask_b32_e32 v5, v3, v7, vcc
|
|
; SI-NEXT: v_cndmask_b32_e32 v4, v2, v6, vcc
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: v_mov_b32_e32 v11, v7
|
|
; SI-NEXT: v_mov_b32_e32 v10, v6
|
|
; SI-NEXT: v_mov_b32_e32 v9, v5
|
|
; SI-NEXT: v_mov_b32_e32 v8, v4
|
|
; SI-NEXT: buffer_atomic_cmpswap_x2 v[8:11], v[0:1], s[4:7], 0 addr64 glc
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_wbinvl1
|
|
; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[8:9], v[6:7]
|
|
; SI-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
|
|
; SI-NEXT: v_mov_b32_e32 v6, v8
|
|
; SI-NEXT: v_mov_b32_e32 v7, v9
|
|
; SI-NEXT: s_andn2_b64 exec, exec, s[8:9]
|
|
; SI-NEXT: s_cbranch_execnz .LBB80_1
|
|
; SI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; SI-NEXT: s_or_b64 exec, exec, s[8:9]
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; VI-LABEL: global_atomic_max_i64_noret:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; VI-NEXT: flat_load_dwordx2 v[6:7], v[0:1]
|
|
; VI-NEXT: s_mov_b64 s[4:5], 0
|
|
; VI-NEXT: .LBB80_1: ; %atomicrmw.start
|
|
; VI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: v_cmp_gt_i64_e32 vcc, v[6:7], v[2:3]
|
|
; VI-NEXT: v_cndmask_b32_e32 v5, v3, v7, vcc
|
|
; VI-NEXT: v_cndmask_b32_e32 v4, v2, v6, vcc
|
|
; VI-NEXT: flat_atomic_cmpswap_x2 v[4:5], v[0:1], v[4:7] glc
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: buffer_wbinvl1_vol
|
|
; VI-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[6:7]
|
|
; VI-NEXT: v_mov_b32_e32 v7, v5
|
|
; VI-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
|
|
; VI-NEXT: v_mov_b32_e32 v6, v4
|
|
; VI-NEXT: s_andn2_b64 exec, exec, s[4:5]
|
|
; VI-NEXT: s_cbranch_execnz .LBB80_1
|
|
; VI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; VI-NEXT: s_or_b64 exec, exec, s[4:5]
|
|
; VI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX9-LABEL: global_atomic_max_i64_noret:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: global_load_dwordx2 v[6:7], v[0:1], off
|
|
; GFX9-NEXT: s_mov_b64 s[4:5], 0
|
|
; GFX9-NEXT: .LBB80_1: ; %atomicrmw.start
|
|
; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: v_cmp_gt_i64_e32 vcc, v[6:7], v[2:3]
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v5, v3, v7, vcc
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v4, v2, v6, vcc
|
|
; GFX9-NEXT: global_atomic_cmpswap_x2 v[4:5], v[0:1], v[4:7], off glc
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: buffer_wbinvl1_vol
|
|
; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[6:7]
|
|
; GFX9-NEXT: v_mov_b32_e32 v7, v5
|
|
; GFX9-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
|
|
; GFX9-NEXT: v_mov_b32_e32 v6, v4
|
|
; GFX9-NEXT: s_andn2_b64 exec, exec, s[4:5]
|
|
; GFX9-NEXT: s_cbranch_execnz .LBB80_1
|
|
; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; GFX9-NEXT: s_or_b64 exec, exec, s[4:5]
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
%tmp0 = atomicrmw max ptr addrspace(1) %ptr, i64 %in seq_cst
|
|
ret void
|
|
}
|
|
|
|
define void @global_atomic_max_i64_noret_offset(ptr addrspace(1) %out, i64 %in) {
|
|
; SI-LABEL: global_atomic_max_i64_noret_offset:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; SI-NEXT: s_mov_b32 s6, 0
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s4, s6
|
|
; SI-NEXT: s_mov_b32 s5, s6
|
|
; SI-NEXT: buffer_load_dwordx2 v[6:7], v[0:1], s[4:7], 0 addr64 offset:32
|
|
; SI-NEXT: s_mov_b64 s[8:9], 0
|
|
; SI-NEXT: .LBB81_1: ; %atomicrmw.start
|
|
; SI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: v_cmp_gt_i64_e32 vcc, v[6:7], v[2:3]
|
|
; SI-NEXT: v_cndmask_b32_e32 v5, v3, v7, vcc
|
|
; SI-NEXT: v_cndmask_b32_e32 v4, v2, v6, vcc
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: v_mov_b32_e32 v11, v7
|
|
; SI-NEXT: v_mov_b32_e32 v10, v6
|
|
; SI-NEXT: v_mov_b32_e32 v9, v5
|
|
; SI-NEXT: v_mov_b32_e32 v8, v4
|
|
; SI-NEXT: buffer_atomic_cmpswap_x2 v[8:11], v[0:1], s[4:7], 0 addr64 offset:32 glc
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_wbinvl1
|
|
; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[8:9], v[6:7]
|
|
; SI-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
|
|
; SI-NEXT: v_mov_b32_e32 v6, v8
|
|
; SI-NEXT: v_mov_b32_e32 v7, v9
|
|
; SI-NEXT: s_andn2_b64 exec, exec, s[8:9]
|
|
; SI-NEXT: s_cbranch_execnz .LBB81_1
|
|
; SI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; SI-NEXT: s_or_b64 exec, exec, s[8:9]
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; VI-LABEL: global_atomic_max_i64_noret_offset:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; VI-NEXT: v_add_u32_e32 v0, vcc, 32, v0
|
|
; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
|
|
; VI-NEXT: flat_load_dwordx2 v[6:7], v[0:1]
|
|
; VI-NEXT: s_mov_b64 s[4:5], 0
|
|
; VI-NEXT: .LBB81_1: ; %atomicrmw.start
|
|
; VI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: v_cmp_gt_i64_e32 vcc, v[6:7], v[2:3]
|
|
; VI-NEXT: v_cndmask_b32_e32 v5, v3, v7, vcc
|
|
; VI-NEXT: v_cndmask_b32_e32 v4, v2, v6, vcc
|
|
; VI-NEXT: flat_atomic_cmpswap_x2 v[4:5], v[0:1], v[4:7] glc
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: buffer_wbinvl1_vol
|
|
; VI-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[6:7]
|
|
; VI-NEXT: v_mov_b32_e32 v7, v5
|
|
; VI-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
|
|
; VI-NEXT: v_mov_b32_e32 v6, v4
|
|
; VI-NEXT: s_andn2_b64 exec, exec, s[4:5]
|
|
; VI-NEXT: s_cbranch_execnz .LBB81_1
|
|
; VI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; VI-NEXT: s_or_b64 exec, exec, s[4:5]
|
|
; VI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX9-LABEL: global_atomic_max_i64_noret_offset:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: global_load_dwordx2 v[6:7], v[0:1], off offset:32
|
|
; GFX9-NEXT: s_mov_b64 s[4:5], 0
|
|
; GFX9-NEXT: .LBB81_1: ; %atomicrmw.start
|
|
; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: v_cmp_gt_i64_e32 vcc, v[6:7], v[2:3]
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v5, v3, v7, vcc
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v4, v2, v6, vcc
|
|
; GFX9-NEXT: global_atomic_cmpswap_x2 v[4:5], v[0:1], v[4:7], off offset:32 glc
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: buffer_wbinvl1_vol
|
|
; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[6:7]
|
|
; GFX9-NEXT: v_mov_b32_e32 v7, v5
|
|
; GFX9-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
|
|
; GFX9-NEXT: v_mov_b32_e32 v6, v4
|
|
; GFX9-NEXT: s_andn2_b64 exec, exec, s[4:5]
|
|
; GFX9-NEXT: s_cbranch_execnz .LBB81_1
|
|
; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; GFX9-NEXT: s_or_b64 exec, exec, s[4:5]
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
%gep = getelementptr i64, ptr addrspace(1) %out, i64 4
|
|
%tmp0 = atomicrmw max ptr addrspace(1) %gep, i64 %in seq_cst
|
|
ret void
|
|
}
|
|
|
|
define i64 @global_atomic_max_i64_ret(ptr addrspace(1) %ptr, i64 %in) {
|
|
; SI-LABEL: global_atomic_max_i64_ret:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; SI-NEXT: v_mov_b32_e32 v5, v3
|
|
; SI-NEXT: v_mov_b32_e32 v4, v2
|
|
; SI-NEXT: v_mov_b32_e32 v7, v1
|
|
; SI-NEXT: v_mov_b32_e32 v6, v0
|
|
; SI-NEXT: s_mov_b32 s6, 0
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s4, s6
|
|
; SI-NEXT: s_mov_b32 s5, s6
|
|
; SI-NEXT: buffer_load_dwordx2 v[0:1], v[6:7], s[4:7], 0 addr64
|
|
; SI-NEXT: s_mov_b64 s[8:9], 0
|
|
; SI-NEXT: .LBB82_1: ; %atomicrmw.start
|
|
; SI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: v_mov_b32_e32 v11, v1
|
|
; SI-NEXT: v_mov_b32_e32 v10, v0
|
|
; SI-NEXT: v_cmp_gt_i64_e32 vcc, v[10:11], v[4:5]
|
|
; SI-NEXT: v_cndmask_b32_e32 v9, v5, v11, vcc
|
|
; SI-NEXT: v_cndmask_b32_e32 v8, v4, v10, vcc
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: v_mov_b32_e32 v0, v8
|
|
; SI-NEXT: v_mov_b32_e32 v1, v9
|
|
; SI-NEXT: v_mov_b32_e32 v2, v10
|
|
; SI-NEXT: v_mov_b32_e32 v3, v11
|
|
; SI-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v[6:7], s[4:7], 0 addr64 glc
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_wbinvl1
|
|
; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[10:11]
|
|
; SI-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
|
|
; SI-NEXT: s_andn2_b64 exec, exec, s[8:9]
|
|
; SI-NEXT: s_cbranch_execnz .LBB82_1
|
|
; SI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; SI-NEXT: s_or_b64 exec, exec, s[8:9]
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; VI-LABEL: global_atomic_max_i64_ret:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; VI-NEXT: flat_load_dwordx2 v[4:5], v[0:1]
|
|
; VI-NEXT: s_mov_b64 s[4:5], 0
|
|
; VI-NEXT: .LBB82_1: ; %atomicrmw.start
|
|
; VI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: v_mov_b32_e32 v7, v5
|
|
; VI-NEXT: v_mov_b32_e32 v6, v4
|
|
; VI-NEXT: v_cmp_gt_i64_e32 vcc, v[6:7], v[2:3]
|
|
; VI-NEXT: v_cndmask_b32_e32 v5, v3, v7, vcc
|
|
; VI-NEXT: v_cndmask_b32_e32 v4, v2, v6, vcc
|
|
; VI-NEXT: flat_atomic_cmpswap_x2 v[4:5], v[0:1], v[4:7] glc
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: buffer_wbinvl1_vol
|
|
; VI-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[6:7]
|
|
; VI-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
|
|
; VI-NEXT: s_andn2_b64 exec, exec, s[4:5]
|
|
; VI-NEXT: s_cbranch_execnz .LBB82_1
|
|
; VI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; VI-NEXT: s_or_b64 exec, exec, s[4:5]
|
|
; VI-NEXT: v_mov_b32_e32 v0, v4
|
|
; VI-NEXT: v_mov_b32_e32 v1, v5
|
|
; VI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX9-LABEL: global_atomic_max_i64_ret:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: global_load_dwordx2 v[4:5], v[0:1], off
|
|
; GFX9-NEXT: s_mov_b64 s[4:5], 0
|
|
; GFX9-NEXT: .LBB82_1: ; %atomicrmw.start
|
|
; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: v_mov_b32_e32 v7, v5
|
|
; GFX9-NEXT: v_mov_b32_e32 v6, v4
|
|
; GFX9-NEXT: v_cmp_gt_i64_e32 vcc, v[6:7], v[2:3]
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v5, v3, v7, vcc
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v4, v2, v6, vcc
|
|
; GFX9-NEXT: global_atomic_cmpswap_x2 v[4:5], v[0:1], v[4:7], off glc
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: buffer_wbinvl1_vol
|
|
; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[6:7]
|
|
; GFX9-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
|
|
; GFX9-NEXT: s_andn2_b64 exec, exec, s[4:5]
|
|
; GFX9-NEXT: s_cbranch_execnz .LBB82_1
|
|
; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; GFX9-NEXT: s_or_b64 exec, exec, s[4:5]
|
|
; GFX9-NEXT: v_mov_b32_e32 v0, v4
|
|
; GFX9-NEXT: v_mov_b32_e32 v1, v5
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
%result = atomicrmw max ptr addrspace(1) %ptr, i64 %in seq_cst
|
|
ret i64 %result
|
|
}
|
|
|
|
define i64 @global_atomic_max_i64_ret_offset(ptr addrspace(1) %out, i64 %in) {
|
|
; SI-LABEL: global_atomic_max_i64_ret_offset:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; SI-NEXT: v_mov_b32_e32 v5, v3
|
|
; SI-NEXT: v_mov_b32_e32 v4, v2
|
|
; SI-NEXT: v_mov_b32_e32 v7, v1
|
|
; SI-NEXT: v_mov_b32_e32 v6, v0
|
|
; SI-NEXT: s_mov_b32 s6, 0
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s4, s6
|
|
; SI-NEXT: s_mov_b32 s5, s6
|
|
; SI-NEXT: buffer_load_dwordx2 v[0:1], v[6:7], s[4:7], 0 addr64 offset:32
|
|
; SI-NEXT: s_mov_b64 s[8:9], 0
|
|
; SI-NEXT: .LBB83_1: ; %atomicrmw.start
|
|
; SI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: v_mov_b32_e32 v11, v1
|
|
; SI-NEXT: v_mov_b32_e32 v10, v0
|
|
; SI-NEXT: v_cmp_gt_i64_e32 vcc, v[10:11], v[4:5]
|
|
; SI-NEXT: v_cndmask_b32_e32 v9, v5, v11, vcc
|
|
; SI-NEXT: v_cndmask_b32_e32 v8, v4, v10, vcc
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: v_mov_b32_e32 v0, v8
|
|
; SI-NEXT: v_mov_b32_e32 v1, v9
|
|
; SI-NEXT: v_mov_b32_e32 v2, v10
|
|
; SI-NEXT: v_mov_b32_e32 v3, v11
|
|
; SI-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v[6:7], s[4:7], 0 addr64 offset:32 glc
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_wbinvl1
|
|
; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[10:11]
|
|
; SI-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
|
|
; SI-NEXT: s_andn2_b64 exec, exec, s[8:9]
|
|
; SI-NEXT: s_cbranch_execnz .LBB83_1
|
|
; SI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; SI-NEXT: s_or_b64 exec, exec, s[8:9]
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; VI-LABEL: global_atomic_max_i64_ret_offset:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; VI-NEXT: v_add_u32_e32 v4, vcc, 32, v0
|
|
; VI-NEXT: v_addc_u32_e32 v5, vcc, 0, v1, vcc
|
|
; VI-NEXT: flat_load_dwordx2 v[0:1], v[4:5]
|
|
; VI-NEXT: s_mov_b64 s[4:5], 0
|
|
; VI-NEXT: .LBB83_1: ; %atomicrmw.start
|
|
; VI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: v_mov_b32_e32 v9, v1
|
|
; VI-NEXT: v_mov_b32_e32 v8, v0
|
|
; VI-NEXT: v_cmp_gt_i64_e32 vcc, v[8:9], v[2:3]
|
|
; VI-NEXT: v_cndmask_b32_e32 v7, v3, v9, vcc
|
|
; VI-NEXT: v_cndmask_b32_e32 v6, v2, v8, vcc
|
|
; VI-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[6:9] glc
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: buffer_wbinvl1_vol
|
|
; VI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[8:9]
|
|
; VI-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
|
|
; VI-NEXT: s_andn2_b64 exec, exec, s[4:5]
|
|
; VI-NEXT: s_cbranch_execnz .LBB83_1
|
|
; VI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; VI-NEXT: s_or_b64 exec, exec, s[4:5]
|
|
; VI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX9-LABEL: global_atomic_max_i64_ret_offset:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: global_load_dwordx2 v[4:5], v[0:1], off offset:32
|
|
; GFX9-NEXT: s_mov_b64 s[4:5], 0
|
|
; GFX9-NEXT: .LBB83_1: ; %atomicrmw.start
|
|
; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: v_mov_b32_e32 v7, v5
|
|
; GFX9-NEXT: v_mov_b32_e32 v6, v4
|
|
; GFX9-NEXT: v_cmp_gt_i64_e32 vcc, v[6:7], v[2:3]
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v5, v3, v7, vcc
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v4, v2, v6, vcc
|
|
; GFX9-NEXT: global_atomic_cmpswap_x2 v[4:5], v[0:1], v[4:7], off offset:32 glc
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: buffer_wbinvl1_vol
|
|
; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[6:7]
|
|
; GFX9-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
|
|
; GFX9-NEXT: s_andn2_b64 exec, exec, s[4:5]
|
|
; GFX9-NEXT: s_cbranch_execnz .LBB83_1
|
|
; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; GFX9-NEXT: s_or_b64 exec, exec, s[4:5]
|
|
; GFX9-NEXT: v_mov_b32_e32 v0, v4
|
|
; GFX9-NEXT: v_mov_b32_e32 v1, v5
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
%gep = getelementptr i64, ptr addrspace(1) %out, i64 4
|
|
%result = atomicrmw max ptr addrspace(1) %gep, i64 %in seq_cst
|
|
ret i64 %result
|
|
}
|
|
|
|
define amdgpu_gfx void @global_atomic_max_i64_noret_scalar(ptr addrspace(1) inreg %ptr, i64 inreg %in) {
|
|
; SI-LABEL: global_atomic_max_i64_noret_scalar:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1
|
|
; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 ; 4-byte Folded Spill
|
|
; SI-NEXT: s_mov_b64 exec, s[34:35]
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: v_writelane_b32 v10, s6, 0
|
|
; SI-NEXT: v_writelane_b32 v10, s7, 1
|
|
; SI-NEXT: s_mov_b32 s35, s7
|
|
; SI-NEXT: s_mov_b32 s34, s6
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s6, -1
|
|
; SI-NEXT: buffer_load_dwordx2 v[2:3], off, s[4:7], 0
|
|
; SI-NEXT: s_mov_b64 s[36:37], 0
|
|
; SI-NEXT: v_mov_b32_e32 v4, s35
|
|
; SI-NEXT: v_mov_b32_e32 v5, s34
|
|
; SI-NEXT: .LBB84_1: ; %atomicrmw.start
|
|
; SI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: v_cmp_lt_i64_e32 vcc, s[34:35], v[2:3]
|
|
; SI-NEXT: v_cndmask_b32_e32 v1, v4, v3, vcc
|
|
; SI-NEXT: v_cndmask_b32_e32 v0, v5, v2, vcc
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: v_mov_b32_e32 v9, v3
|
|
; SI-NEXT: v_mov_b32_e32 v8, v2
|
|
; SI-NEXT: v_mov_b32_e32 v7, v1
|
|
; SI-NEXT: v_mov_b32_e32 v6, v0
|
|
; SI-NEXT: buffer_atomic_cmpswap_x2 v[6:9], off, s[4:7], 0 glc
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_wbinvl1
|
|
; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[6:7], v[2:3]
|
|
; SI-NEXT: s_or_b64 s[36:37], vcc, s[36:37]
|
|
; SI-NEXT: v_mov_b32_e32 v2, v6
|
|
; SI-NEXT: v_mov_b32_e32 v3, v7
|
|
; SI-NEXT: s_andn2_b64 exec, exec, s[36:37]
|
|
; SI-NEXT: s_cbranch_execnz .LBB84_1
|
|
; SI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; SI-NEXT: s_or_b64 exec, exec, s[36:37]
|
|
; SI-NEXT: v_readlane_b32 s7, v10, 1
|
|
; SI-NEXT: v_readlane_b32 s6, v10, 0
|
|
; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1
|
|
; SI-NEXT: buffer_load_dword v10, off, s[0:3], s32 ; 4-byte Folded Reload
|
|
; SI-NEXT: s_mov_b64 exec, s[34:35]
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0)
|
|
; SI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; VI-LABEL: global_atomic_max_i64_noret_scalar:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; VI-NEXT: v_mov_b32_e32 v0, s4
|
|
; VI-NEXT: v_mov_b32_e32 v1, s5
|
|
; VI-NEXT: flat_load_dwordx2 v[2:3], v[0:1]
|
|
; VI-NEXT: v_mov_b32_e32 v4, s4
|
|
; VI-NEXT: s_mov_b64 s[34:35], 0
|
|
; VI-NEXT: v_mov_b32_e32 v6, s7
|
|
; VI-NEXT: v_mov_b32_e32 v7, s6
|
|
; VI-NEXT: v_mov_b32_e32 v5, s5
|
|
; VI-NEXT: .LBB84_1: ; %atomicrmw.start
|
|
; VI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: v_cmp_lt_i64_e32 vcc, s[6:7], v[2:3]
|
|
; VI-NEXT: v_cndmask_b32_e32 v1, v6, v3, vcc
|
|
; VI-NEXT: v_cndmask_b32_e32 v0, v7, v2, vcc
|
|
; VI-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: buffer_wbinvl1_vol
|
|
; VI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
|
|
; VI-NEXT: v_mov_b32_e32 v3, v1
|
|
; VI-NEXT: s_or_b64 s[34:35], vcc, s[34:35]
|
|
; VI-NEXT: v_mov_b32_e32 v2, v0
|
|
; VI-NEXT: s_andn2_b64 exec, exec, s[34:35]
|
|
; VI-NEXT: s_cbranch_execnz .LBB84_1
|
|
; VI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; VI-NEXT: s_or_b64 exec, exec, s[34:35]
|
|
; VI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX9-LABEL: global_atomic_max_i64_noret_scalar:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: v_mov_b32_e32 v4, 0
|
|
; GFX9-NEXT: global_load_dwordx2 v[2:3], v4, s[4:5]
|
|
; GFX9-NEXT: s_mov_b64 s[34:35], 0
|
|
; GFX9-NEXT: v_mov_b32_e32 v5, s7
|
|
; GFX9-NEXT: v_mov_b32_e32 v6, s6
|
|
; GFX9-NEXT: .LBB84_1: ; %atomicrmw.start
|
|
; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: v_cmp_lt_i64_e32 vcc, s[6:7], v[2:3]
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v1, v5, v3, vcc
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v0, v6, v2, vcc
|
|
; GFX9-NEXT: global_atomic_cmpswap_x2 v[0:1], v4, v[0:3], s[4:5] glc
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: buffer_wbinvl1_vol
|
|
; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
|
|
; GFX9-NEXT: v_mov_b32_e32 v3, v1
|
|
; GFX9-NEXT: s_or_b64 s[34:35], vcc, s[34:35]
|
|
; GFX9-NEXT: v_mov_b32_e32 v2, v0
|
|
; GFX9-NEXT: s_andn2_b64 exec, exec, s[34:35]
|
|
; GFX9-NEXT: s_cbranch_execnz .LBB84_1
|
|
; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; GFX9-NEXT: s_or_b64 exec, exec, s[34:35]
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
%tmp0 = atomicrmw max ptr addrspace(1) %ptr, i64 %in seq_cst
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_gfx void @global_atomic_max_i64_noret_offset_scalar(ptr addrspace(1) inreg %out, i64 inreg %in) {
|
|
; SI-LABEL: global_atomic_max_i64_noret_offset_scalar:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1
|
|
; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 ; 4-byte Folded Spill
|
|
; SI-NEXT: s_mov_b64 exec, s[34:35]
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: v_writelane_b32 v10, s6, 0
|
|
; SI-NEXT: v_writelane_b32 v10, s7, 1
|
|
; SI-NEXT: s_mov_b32 s35, s7
|
|
; SI-NEXT: s_mov_b32 s34, s6
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s6, -1
|
|
; SI-NEXT: buffer_load_dwordx2 v[2:3], off, s[4:7], 0 offset:32
|
|
; SI-NEXT: s_mov_b64 s[36:37], 0
|
|
; SI-NEXT: v_mov_b32_e32 v4, s35
|
|
; SI-NEXT: v_mov_b32_e32 v5, s34
|
|
; SI-NEXT: .LBB85_1: ; %atomicrmw.start
|
|
; SI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: v_cmp_lt_i64_e32 vcc, s[34:35], v[2:3]
|
|
; SI-NEXT: v_cndmask_b32_e32 v1, v4, v3, vcc
|
|
; SI-NEXT: v_cndmask_b32_e32 v0, v5, v2, vcc
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: v_mov_b32_e32 v9, v3
|
|
; SI-NEXT: v_mov_b32_e32 v8, v2
|
|
; SI-NEXT: v_mov_b32_e32 v7, v1
|
|
; SI-NEXT: v_mov_b32_e32 v6, v0
|
|
; SI-NEXT: buffer_atomic_cmpswap_x2 v[6:9], off, s[4:7], 0 offset:32 glc
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_wbinvl1
|
|
; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[6:7], v[2:3]
|
|
; SI-NEXT: s_or_b64 s[36:37], vcc, s[36:37]
|
|
; SI-NEXT: v_mov_b32_e32 v2, v6
|
|
; SI-NEXT: v_mov_b32_e32 v3, v7
|
|
; SI-NEXT: s_andn2_b64 exec, exec, s[36:37]
|
|
; SI-NEXT: s_cbranch_execnz .LBB85_1
|
|
; SI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; SI-NEXT: s_or_b64 exec, exec, s[36:37]
|
|
; SI-NEXT: v_readlane_b32 s7, v10, 1
|
|
; SI-NEXT: v_readlane_b32 s6, v10, 0
|
|
; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1
|
|
; SI-NEXT: buffer_load_dword v10, off, s[0:3], s32 ; 4-byte Folded Reload
|
|
; SI-NEXT: s_mov_b64 exec, s[34:35]
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0)
|
|
; SI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; VI-LABEL: global_atomic_max_i64_noret_offset_scalar:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; VI-NEXT: s_add_u32 s34, s4, 32
|
|
; VI-NEXT: s_addc_u32 s35, s5, 0
|
|
; VI-NEXT: v_mov_b32_e32 v4, s34
|
|
; VI-NEXT: v_mov_b32_e32 v5, s35
|
|
; VI-NEXT: flat_load_dwordx2 v[2:3], v[4:5]
|
|
; VI-NEXT: s_mov_b64 s[34:35], 0
|
|
; VI-NEXT: v_mov_b32_e32 v6, s7
|
|
; VI-NEXT: v_mov_b32_e32 v7, s6
|
|
; VI-NEXT: .LBB85_1: ; %atomicrmw.start
|
|
; VI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: v_cmp_lt_i64_e32 vcc, s[6:7], v[2:3]
|
|
; VI-NEXT: v_cndmask_b32_e32 v1, v6, v3, vcc
|
|
; VI-NEXT: v_cndmask_b32_e32 v0, v7, v2, vcc
|
|
; VI-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: buffer_wbinvl1_vol
|
|
; VI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
|
|
; VI-NEXT: v_mov_b32_e32 v3, v1
|
|
; VI-NEXT: s_or_b64 s[34:35], vcc, s[34:35]
|
|
; VI-NEXT: v_mov_b32_e32 v2, v0
|
|
; VI-NEXT: s_andn2_b64 exec, exec, s[34:35]
|
|
; VI-NEXT: s_cbranch_execnz .LBB85_1
|
|
; VI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; VI-NEXT: s_or_b64 exec, exec, s[34:35]
|
|
; VI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX9-LABEL: global_atomic_max_i64_noret_offset_scalar:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: v_mov_b32_e32 v4, 0
|
|
; GFX9-NEXT: global_load_dwordx2 v[2:3], v4, s[4:5] offset:32
|
|
; GFX9-NEXT: s_mov_b64 s[34:35], 0
|
|
; GFX9-NEXT: v_mov_b32_e32 v5, s7
|
|
; GFX9-NEXT: v_mov_b32_e32 v6, s6
|
|
; GFX9-NEXT: .LBB85_1: ; %atomicrmw.start
|
|
; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: v_cmp_lt_i64_e32 vcc, s[6:7], v[2:3]
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v1, v5, v3, vcc
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v0, v6, v2, vcc
|
|
; GFX9-NEXT: global_atomic_cmpswap_x2 v[0:1], v4, v[0:3], s[4:5] offset:32 glc
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: buffer_wbinvl1_vol
|
|
; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
|
|
; GFX9-NEXT: v_mov_b32_e32 v3, v1
|
|
; GFX9-NEXT: s_or_b64 s[34:35], vcc, s[34:35]
|
|
; GFX9-NEXT: v_mov_b32_e32 v2, v0
|
|
; GFX9-NEXT: s_andn2_b64 exec, exec, s[34:35]
|
|
; GFX9-NEXT: s_cbranch_execnz .LBB85_1
|
|
; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; GFX9-NEXT: s_or_b64 exec, exec, s[34:35]
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
%gep = getelementptr i64, ptr addrspace(1) %out, i64 4
|
|
%tmp0 = atomicrmw max ptr addrspace(1) %gep, i64 %in seq_cst
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_gfx i64 @global_atomic_max_i64_ret_scalar(ptr addrspace(1) inreg %ptr, i64 inreg %in) {
|
|
; SI-LABEL: global_atomic_max_i64_ret_scalar:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1
|
|
; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 ; 4-byte Folded Spill
|
|
; SI-NEXT: s_mov_b64 exec, s[34:35]
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: v_writelane_b32 v10, s6, 0
|
|
; SI-NEXT: v_writelane_b32 v10, s7, 1
|
|
; SI-NEXT: s_mov_b32 s35, s7
|
|
; SI-NEXT: s_mov_b32 s34, s6
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s6, -1
|
|
; SI-NEXT: buffer_load_dwordx2 v[0:1], off, s[4:7], 0
|
|
; SI-NEXT: s_mov_b64 s[36:37], 0
|
|
; SI-NEXT: v_mov_b32_e32 v4, s35
|
|
; SI-NEXT: v_mov_b32_e32 v5, s34
|
|
; SI-NEXT: .LBB86_1: ; %atomicrmw.start
|
|
; SI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: v_mov_b32_e32 v9, v1
|
|
; SI-NEXT: v_mov_b32_e32 v8, v0
|
|
; SI-NEXT: v_cmp_lt_i64_e32 vcc, s[34:35], v[8:9]
|
|
; SI-NEXT: v_cndmask_b32_e32 v7, v4, v9, vcc
|
|
; SI-NEXT: v_cndmask_b32_e32 v6, v5, v8, vcc
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: v_mov_b32_e32 v0, v6
|
|
; SI-NEXT: v_mov_b32_e32 v1, v7
|
|
; SI-NEXT: v_mov_b32_e32 v2, v8
|
|
; SI-NEXT: v_mov_b32_e32 v3, v9
|
|
; SI-NEXT: buffer_atomic_cmpswap_x2 v[0:3], off, s[4:7], 0 glc
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_wbinvl1
|
|
; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[8:9]
|
|
; SI-NEXT: s_or_b64 s[36:37], vcc, s[36:37]
|
|
; SI-NEXT: s_andn2_b64 exec, exec, s[36:37]
|
|
; SI-NEXT: s_cbranch_execnz .LBB86_1
|
|
; SI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; SI-NEXT: s_or_b64 exec, exec, s[36:37]
|
|
; SI-NEXT: v_readlane_b32 s7, v10, 1
|
|
; SI-NEXT: v_readlane_b32 s6, v10, 0
|
|
; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1
|
|
; SI-NEXT: buffer_load_dword v10, off, s[0:3], s32 ; 4-byte Folded Reload
|
|
; SI-NEXT: s_mov_b64 exec, s[34:35]
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0)
|
|
; SI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; VI-LABEL: global_atomic_max_i64_ret_scalar:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; VI-NEXT: v_mov_b32_e32 v0, s4
|
|
; VI-NEXT: v_mov_b32_e32 v1, s5
|
|
; VI-NEXT: flat_load_dwordx2 v[0:1], v[0:1]
|
|
; VI-NEXT: v_mov_b32_e32 v2, s4
|
|
; VI-NEXT: s_mov_b64 s[34:35], 0
|
|
; VI-NEXT: v_mov_b32_e32 v4, s7
|
|
; VI-NEXT: v_mov_b32_e32 v5, s6
|
|
; VI-NEXT: v_mov_b32_e32 v3, s5
|
|
; VI-NEXT: .LBB86_1: ; %atomicrmw.start
|
|
; VI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: v_mov_b32_e32 v9, v1
|
|
; VI-NEXT: v_mov_b32_e32 v8, v0
|
|
; VI-NEXT: v_cmp_lt_i64_e32 vcc, s[6:7], v[8:9]
|
|
; VI-NEXT: v_cndmask_b32_e32 v7, v4, v9, vcc
|
|
; VI-NEXT: v_cndmask_b32_e32 v6, v5, v8, vcc
|
|
; VI-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[2:3], v[6:9] glc
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: buffer_wbinvl1_vol
|
|
; VI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[8:9]
|
|
; VI-NEXT: s_or_b64 s[34:35], vcc, s[34:35]
|
|
; VI-NEXT: s_andn2_b64 exec, exec, s[34:35]
|
|
; VI-NEXT: s_cbranch_execnz .LBB86_1
|
|
; VI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; VI-NEXT: s_or_b64 exec, exec, s[34:35]
|
|
; VI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX9-LABEL: global_atomic_max_i64_ret_scalar:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: v_mov_b32_e32 v2, 0
|
|
; GFX9-NEXT: global_load_dwordx2 v[0:1], v2, s[4:5]
|
|
; GFX9-NEXT: s_mov_b64 s[34:35], 0
|
|
; GFX9-NEXT: v_mov_b32_e32 v3, s7
|
|
; GFX9-NEXT: v_mov_b32_e32 v4, s6
|
|
; GFX9-NEXT: .LBB86_1: ; %atomicrmw.start
|
|
; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: v_mov_b32_e32 v8, v1
|
|
; GFX9-NEXT: v_mov_b32_e32 v7, v0
|
|
; GFX9-NEXT: v_cmp_lt_i64_e32 vcc, s[6:7], v[7:8]
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v6, v3, v8, vcc
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v5, v4, v7, vcc
|
|
; GFX9-NEXT: global_atomic_cmpswap_x2 v[0:1], v2, v[5:8], s[4:5] glc
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: buffer_wbinvl1_vol
|
|
; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[7:8]
|
|
; GFX9-NEXT: s_or_b64 s[34:35], vcc, s[34:35]
|
|
; GFX9-NEXT: s_andn2_b64 exec, exec, s[34:35]
|
|
; GFX9-NEXT: s_cbranch_execnz .LBB86_1
|
|
; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; GFX9-NEXT: s_or_b64 exec, exec, s[34:35]
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
%result = atomicrmw max ptr addrspace(1) %ptr, i64 %in seq_cst
|
|
ret i64 %result
|
|
}
|
|
|
|
define amdgpu_gfx i64 @global_atomic_max_i64_ret_offset_scalar(ptr addrspace(1) inreg %out, i64 inreg %in) {
|
|
; SI-LABEL: global_atomic_max_i64_ret_offset_scalar:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1
|
|
; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 ; 4-byte Folded Spill
|
|
; SI-NEXT: s_mov_b64 exec, s[34:35]
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: v_writelane_b32 v10, s6, 0
|
|
; SI-NEXT: v_writelane_b32 v10, s7, 1
|
|
; SI-NEXT: s_mov_b32 s35, s7
|
|
; SI-NEXT: s_mov_b32 s34, s6
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s6, -1
|
|
; SI-NEXT: buffer_load_dwordx2 v[0:1], off, s[4:7], 0 offset:32
|
|
; SI-NEXT: s_mov_b64 s[36:37], 0
|
|
; SI-NEXT: v_mov_b32_e32 v4, s35
|
|
; SI-NEXT: v_mov_b32_e32 v5, s34
|
|
; SI-NEXT: .LBB87_1: ; %atomicrmw.start
|
|
; SI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: v_mov_b32_e32 v9, v1
|
|
; SI-NEXT: v_mov_b32_e32 v8, v0
|
|
; SI-NEXT: v_cmp_lt_i64_e32 vcc, s[34:35], v[8:9]
|
|
; SI-NEXT: v_cndmask_b32_e32 v7, v4, v9, vcc
|
|
; SI-NEXT: v_cndmask_b32_e32 v6, v5, v8, vcc
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: v_mov_b32_e32 v0, v6
|
|
; SI-NEXT: v_mov_b32_e32 v1, v7
|
|
; SI-NEXT: v_mov_b32_e32 v2, v8
|
|
; SI-NEXT: v_mov_b32_e32 v3, v9
|
|
; SI-NEXT: buffer_atomic_cmpswap_x2 v[0:3], off, s[4:7], 0 offset:32 glc
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_wbinvl1
|
|
; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[8:9]
|
|
; SI-NEXT: s_or_b64 s[36:37], vcc, s[36:37]
|
|
; SI-NEXT: s_andn2_b64 exec, exec, s[36:37]
|
|
; SI-NEXT: s_cbranch_execnz .LBB87_1
|
|
; SI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; SI-NEXT: s_or_b64 exec, exec, s[36:37]
|
|
; SI-NEXT: v_readlane_b32 s7, v10, 1
|
|
; SI-NEXT: v_readlane_b32 s6, v10, 0
|
|
; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1
|
|
; SI-NEXT: buffer_load_dword v10, off, s[0:3], s32 ; 4-byte Folded Reload
|
|
; SI-NEXT: s_mov_b64 exec, s[34:35]
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0)
|
|
; SI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; VI-LABEL: global_atomic_max_i64_ret_offset_scalar:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; VI-NEXT: s_add_u32 s34, s4, 32
|
|
; VI-NEXT: s_addc_u32 s35, s5, 0
|
|
; VI-NEXT: v_mov_b32_e32 v2, s34
|
|
; VI-NEXT: v_mov_b32_e32 v3, s35
|
|
; VI-NEXT: flat_load_dwordx2 v[0:1], v[2:3]
|
|
; VI-NEXT: s_mov_b64 s[34:35], 0
|
|
; VI-NEXT: v_mov_b32_e32 v4, s7
|
|
; VI-NEXT: v_mov_b32_e32 v5, s6
|
|
; VI-NEXT: .LBB87_1: ; %atomicrmw.start
|
|
; VI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: v_mov_b32_e32 v9, v1
|
|
; VI-NEXT: v_mov_b32_e32 v8, v0
|
|
; VI-NEXT: v_cmp_lt_i64_e32 vcc, s[6:7], v[8:9]
|
|
; VI-NEXT: v_cndmask_b32_e32 v7, v4, v9, vcc
|
|
; VI-NEXT: v_cndmask_b32_e32 v6, v5, v8, vcc
|
|
; VI-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[2:3], v[6:9] glc
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: buffer_wbinvl1_vol
|
|
; VI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[8:9]
|
|
; VI-NEXT: s_or_b64 s[34:35], vcc, s[34:35]
|
|
; VI-NEXT: s_andn2_b64 exec, exec, s[34:35]
|
|
; VI-NEXT: s_cbranch_execnz .LBB87_1
|
|
; VI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; VI-NEXT: s_or_b64 exec, exec, s[34:35]
|
|
; VI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX9-LABEL: global_atomic_max_i64_ret_offset_scalar:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: v_mov_b32_e32 v2, 0
|
|
; GFX9-NEXT: global_load_dwordx2 v[0:1], v2, s[4:5] offset:32
|
|
; GFX9-NEXT: s_mov_b64 s[34:35], 0
|
|
; GFX9-NEXT: v_mov_b32_e32 v3, s7
|
|
; GFX9-NEXT: v_mov_b32_e32 v4, s6
|
|
; GFX9-NEXT: .LBB87_1: ; %atomicrmw.start
|
|
; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: v_mov_b32_e32 v8, v1
|
|
; GFX9-NEXT: v_mov_b32_e32 v7, v0
|
|
; GFX9-NEXT: v_cmp_lt_i64_e32 vcc, s[6:7], v[7:8]
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v6, v3, v8, vcc
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v5, v4, v7, vcc
|
|
; GFX9-NEXT: global_atomic_cmpswap_x2 v[0:1], v2, v[5:8], s[4:5] offset:32 glc
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: buffer_wbinvl1_vol
|
|
; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[7:8]
|
|
; GFX9-NEXT: s_or_b64 s[34:35], vcc, s[34:35]
|
|
; GFX9-NEXT: s_andn2_b64 exec, exec, s[34:35]
|
|
; GFX9-NEXT: s_cbranch_execnz .LBB87_1
|
|
; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; GFX9-NEXT: s_or_b64 exec, exec, s[34:35]
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
%gep = getelementptr i64, ptr addrspace(1) %out, i64 4
|
|
%result = atomicrmw max ptr addrspace(1) %gep, i64 %in seq_cst
|
|
ret i64 %result
|
|
}
|
|
|
|
define amdgpu_kernel void @atomic_max_i64_addr64_offset(ptr addrspace(1) %out, i64 %in, i64 %index) {
|
|
; SI-LABEL: atomic_max_i64_addr64_offset:
|
|
; SI: ; %bb.0: ; %entry
|
|
; SI-NEXT: s_load_dwordx2 s[4:5], s[2:3], 0xd
|
|
; SI-NEXT: s_load_dwordx4 s[0:3], s[2:3], 0x9
|
|
; SI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; SI-NEXT: s_lshl_b64 s[4:5], s[4:5], 3
|
|
; SI-NEXT: s_add_u32 s4, s0, s4
|
|
; SI-NEXT: s_addc_u32 s5, s1, s5
|
|
; SI-NEXT: s_load_dwordx2 s[8:9], s[4:5], 0x8
|
|
; SI-NEXT: s_mov_b64 s[0:1], 0
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: v_mov_b32_e32 v4, s3
|
|
; SI-NEXT: v_mov_b32_e32 v5, s2
|
|
; SI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; SI-NEXT: v_mov_b32_e32 v2, s8
|
|
; SI-NEXT: v_mov_b32_e32 v3, s9
|
|
; SI-NEXT: s_mov_b32 s6, -1
|
|
; SI-NEXT: .LBB88_1: ; %atomicrmw.start
|
|
; SI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; SI-NEXT: v_cmp_lt_i64_e32 vcc, s[2:3], v[2:3]
|
|
; SI-NEXT: v_cndmask_b32_e32 v1, v4, v3, vcc
|
|
; SI-NEXT: v_cndmask_b32_e32 v0, v5, v2, vcc
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: v_mov_b32_e32 v9, v3
|
|
; SI-NEXT: v_mov_b32_e32 v8, v2
|
|
; SI-NEXT: v_mov_b32_e32 v7, v1
|
|
; SI-NEXT: v_mov_b32_e32 v6, v0
|
|
; SI-NEXT: buffer_atomic_cmpswap_x2 v[6:9], off, s[4:7], 0 offset:32 glc
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_wbinvl1
|
|
; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[6:7], v[2:3]
|
|
; SI-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
|
|
; SI-NEXT: v_mov_b32_e32 v2, v6
|
|
; SI-NEXT: v_mov_b32_e32 v3, v7
|
|
; SI-NEXT: s_andn2_b64 exec, exec, s[0:1]
|
|
; SI-NEXT: s_cbranch_execnz .LBB88_1
|
|
; SI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; SI-NEXT: s_endpgm
|
|
;
|
|
; VI-LABEL: atomic_max_i64_addr64_offset:
|
|
; VI: ; %bb.0: ; %entry
|
|
; VI-NEXT: s_load_dwordx2 s[6:7], s[2:3], 0x34
|
|
; VI-NEXT: s_load_dwordx4 s[0:3], s[2:3], 0x24
|
|
; VI-NEXT: s_mov_b64 s[4:5], 0
|
|
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; VI-NEXT: s_lshl_b64 s[6:7], s[6:7], 3
|
|
; VI-NEXT: s_add_u32 s0, s0, s6
|
|
; VI-NEXT: s_addc_u32 s1, s1, s7
|
|
; VI-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x20
|
|
; VI-NEXT: s_add_u32 s0, s0, 32
|
|
; VI-NEXT: s_addc_u32 s1, s1, 0
|
|
; VI-NEXT: v_mov_b32_e32 v5, s1
|
|
; VI-NEXT: v_mov_b32_e32 v6, s3
|
|
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; VI-NEXT: v_mov_b32_e32 v2, s6
|
|
; VI-NEXT: v_mov_b32_e32 v7, s2
|
|
; VI-NEXT: v_mov_b32_e32 v3, s7
|
|
; VI-NEXT: v_mov_b32_e32 v4, s0
|
|
; VI-NEXT: .LBB88_1: ; %atomicrmw.start
|
|
; VI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; VI-NEXT: v_cmp_lt_i64_e32 vcc, s[2:3], v[2:3]
|
|
; VI-NEXT: v_cndmask_b32_e32 v1, v6, v3, vcc
|
|
; VI-NEXT: v_cndmask_b32_e32 v0, v7, v2, vcc
|
|
; VI-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: buffer_wbinvl1_vol
|
|
; VI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
|
|
; VI-NEXT: v_mov_b32_e32 v3, v1
|
|
; VI-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
|
|
; VI-NEXT: v_mov_b32_e32 v2, v0
|
|
; VI-NEXT: s_andn2_b64 exec, exec, s[4:5]
|
|
; VI-NEXT: s_cbranch_execnz .LBB88_1
|
|
; VI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; VI-NEXT: s_endpgm
|
|
;
|
|
; GFX9-LABEL: atomic_max_i64_addr64_offset:
|
|
; GFX9: ; %bb.0: ; %entry
|
|
; GFX9-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x34
|
|
; GFX9-NEXT: s_load_dwordx4 s[4:7], s[2:3], 0x24
|
|
; GFX9-NEXT: s_mov_b64 s[2:3], 0
|
|
; GFX9-NEXT: v_mov_b32_e32 v6, 0
|
|
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX9-NEXT: s_lshl_b64 s[0:1], s[0:1], 3
|
|
; GFX9-NEXT: s_add_u32 s0, s4, s0
|
|
; GFX9-NEXT: s_addc_u32 s1, s5, s1
|
|
; GFX9-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x20
|
|
; GFX9-NEXT: v_mov_b32_e32 v4, s7
|
|
; GFX9-NEXT: v_mov_b32_e32 v5, s6
|
|
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX9-NEXT: v_mov_b32_e32 v2, s4
|
|
; GFX9-NEXT: v_mov_b32_e32 v3, s5
|
|
; GFX9-NEXT: .LBB88_1: ; %atomicrmw.start
|
|
; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; GFX9-NEXT: v_cmp_lt_i64_e32 vcc, s[6:7], v[2:3]
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v1, v4, v3, vcc
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v0, v5, v2, vcc
|
|
; GFX9-NEXT: global_atomic_cmpswap_x2 v[0:1], v6, v[0:3], s[0:1] offset:32 glc
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: buffer_wbinvl1_vol
|
|
; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
|
|
; GFX9-NEXT: v_mov_b32_e32 v3, v1
|
|
; GFX9-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
|
|
; GFX9-NEXT: v_mov_b32_e32 v2, v0
|
|
; GFX9-NEXT: s_andn2_b64 exec, exec, s[2:3]
|
|
; GFX9-NEXT: s_cbranch_execnz .LBB88_1
|
|
; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; GFX9-NEXT: s_endpgm
|
|
entry:
|
|
%ptr = getelementptr i64, ptr addrspace(1) %out, i64 %index
|
|
%gep = getelementptr i64, ptr addrspace(1) %ptr, i64 4
|
|
%tmp0 = atomicrmw max ptr addrspace(1) %gep, i64 %in seq_cst
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @atomic_max_i64_ret_addr64_offset(ptr addrspace(1) %out, ptr addrspace(1) %out2, i64 %in, i64 %index) {
|
|
; SI-LABEL: atomic_max_i64_ret_addr64_offset:
|
|
; SI: ; %bb.0: ; %entry
|
|
; SI-NEXT: s_load_dwordx8 s[0:7], s[2:3], 0x9
|
|
; SI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; SI-NEXT: s_lshl_b64 s[6:7], s[6:7], 3
|
|
; SI-NEXT: s_add_u32 s8, s0, s6
|
|
; SI-NEXT: s_addc_u32 s9, s1, s7
|
|
; SI-NEXT: s_load_dwordx2 s[6:7], s[8:9], 0x8
|
|
; SI-NEXT: s_mov_b64 s[0:1], 0
|
|
; SI-NEXT: s_mov_b32 s11, 0xf000
|
|
; SI-NEXT: v_mov_b32_e32 v8, s5
|
|
; SI-NEXT: v_mov_b32_e32 v9, s4
|
|
; SI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; SI-NEXT: v_mov_b32_e32 v2, s6
|
|
; SI-NEXT: v_mov_b32_e32 v3, s7
|
|
; SI-NEXT: s_mov_b32 s10, -1
|
|
; SI-NEXT: .LBB89_1: ; %atomicrmw.start
|
|
; SI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; SI-NEXT: v_cmp_lt_i64_e32 vcc, s[4:5], v[2:3]
|
|
; SI-NEXT: v_cndmask_b32_e32 v1, v8, v3, vcc
|
|
; SI-NEXT: v_cndmask_b32_e32 v0, v9, v2, vcc
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: v_mov_b32_e32 v7, v3
|
|
; SI-NEXT: v_mov_b32_e32 v6, v2
|
|
; SI-NEXT: v_mov_b32_e32 v5, v1
|
|
; SI-NEXT: v_mov_b32_e32 v4, v0
|
|
; SI-NEXT: buffer_atomic_cmpswap_x2 v[4:7], off, s[8:11], 0 offset:32 glc
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_wbinvl1
|
|
; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[2:3]
|
|
; SI-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
|
|
; SI-NEXT: v_mov_b32_e32 v2, v4
|
|
; SI-NEXT: v_mov_b32_e32 v3, v5
|
|
; SI-NEXT: s_andn2_b64 exec, exec, s[0:1]
|
|
; SI-NEXT: s_cbranch_execnz .LBB89_1
|
|
; SI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; SI-NEXT: s_or_b64 exec, exec, s[0:1]
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s6, -1
|
|
; SI-NEXT: s_mov_b32 s4, s2
|
|
; SI-NEXT: s_mov_b32 s5, s3
|
|
; SI-NEXT: buffer_store_dwordx2 v[4:5], off, s[4:7], 0
|
|
; SI-NEXT: s_endpgm
|
|
;
|
|
; VI-LABEL: atomic_max_i64_ret_addr64_offset:
|
|
; VI: ; %bb.0: ; %entry
|
|
; VI-NEXT: s_load_dwordx8 s[0:7], s[2:3], 0x24
|
|
; VI-NEXT: s_mov_b64 s[8:9], 0
|
|
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; VI-NEXT: s_lshl_b64 s[6:7], s[6:7], 3
|
|
; VI-NEXT: s_add_u32 s0, s0, s6
|
|
; VI-NEXT: s_addc_u32 s1, s1, s7
|
|
; VI-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x20
|
|
; VI-NEXT: s_add_u32 s0, s0, 32
|
|
; VI-NEXT: s_addc_u32 s1, s1, 0
|
|
; VI-NEXT: v_mov_b32_e32 v0, s0
|
|
; VI-NEXT: v_mov_b32_e32 v4, s5
|
|
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; VI-NEXT: v_mov_b32_e32 v2, s6
|
|
; VI-NEXT: v_mov_b32_e32 v5, s4
|
|
; VI-NEXT: v_mov_b32_e32 v3, s7
|
|
; VI-NEXT: v_mov_b32_e32 v1, s1
|
|
; VI-NEXT: .LBB89_1: ; %atomicrmw.start
|
|
; VI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; VI-NEXT: v_mov_b32_e32 v9, v3
|
|
; VI-NEXT: v_mov_b32_e32 v8, v2
|
|
; VI-NEXT: v_cmp_lt_i64_e32 vcc, s[4:5], v[8:9]
|
|
; VI-NEXT: v_cndmask_b32_e32 v7, v4, v9, vcc
|
|
; VI-NEXT: v_cndmask_b32_e32 v6, v5, v8, vcc
|
|
; VI-NEXT: flat_atomic_cmpswap_x2 v[2:3], v[0:1], v[6:9] glc
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: buffer_wbinvl1_vol
|
|
; VI-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[8:9]
|
|
; VI-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
|
|
; VI-NEXT: s_andn2_b64 exec, exec, s[8:9]
|
|
; VI-NEXT: s_cbranch_execnz .LBB89_1
|
|
; VI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; VI-NEXT: s_or_b64 exec, exec, s[8:9]
|
|
; VI-NEXT: v_mov_b32_e32 v0, s2
|
|
; VI-NEXT: v_mov_b32_e32 v1, s3
|
|
; VI-NEXT: flat_store_dwordx2 v[0:1], v[2:3]
|
|
; VI-NEXT: s_endpgm
|
|
;
|
|
; GFX9-LABEL: atomic_max_i64_ret_addr64_offset:
|
|
; GFX9: ; %bb.0: ; %entry
|
|
; GFX9-NEXT: s_load_dwordx8 s[4:11], s[2:3], 0x24
|
|
; GFX9-NEXT: s_mov_b64 s[2:3], 0
|
|
; GFX9-NEXT: v_mov_b32_e32 v4, 0
|
|
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX9-NEXT: s_lshl_b64 s[0:1], s[10:11], 3
|
|
; GFX9-NEXT: s_add_u32 s0, s4, s0
|
|
; GFX9-NEXT: s_addc_u32 s1, s5, s1
|
|
; GFX9-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x20
|
|
; GFX9-NEXT: v_mov_b32_e32 v2, s9
|
|
; GFX9-NEXT: v_mov_b32_e32 v3, s8
|
|
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX9-NEXT: v_mov_b32_e32 v0, s4
|
|
; GFX9-NEXT: v_mov_b32_e32 v1, s5
|
|
; GFX9-NEXT: .LBB89_1: ; %atomicrmw.start
|
|
; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; GFX9-NEXT: v_mov_b32_e32 v8, v1
|
|
; GFX9-NEXT: v_mov_b32_e32 v7, v0
|
|
; GFX9-NEXT: v_cmp_lt_i64_e32 vcc, s[8:9], v[7:8]
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v6, v2, v8, vcc
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v5, v3, v7, vcc
|
|
; GFX9-NEXT: global_atomic_cmpswap_x2 v[0:1], v4, v[5:8], s[0:1] offset:32 glc
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: buffer_wbinvl1_vol
|
|
; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[7:8]
|
|
; GFX9-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
|
|
; GFX9-NEXT: s_andn2_b64 exec, exec, s[2:3]
|
|
; GFX9-NEXT: s_cbranch_execnz .LBB89_1
|
|
; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; GFX9-NEXT: s_or_b64 exec, exec, s[2:3]
|
|
; GFX9-NEXT: v_mov_b32_e32 v2, 0
|
|
; GFX9-NEXT: global_store_dwordx2 v2, v[0:1], s[6:7]
|
|
; GFX9-NEXT: s_endpgm
|
|
entry:
|
|
%ptr = getelementptr i64, ptr addrspace(1) %out, i64 %index
|
|
%gep = getelementptr i64, ptr addrspace(1) %ptr, i64 4
|
|
%tmp0 = atomicrmw max ptr addrspace(1) %gep, i64 %in seq_cst
|
|
store i64 %tmp0, ptr addrspace(1) %out2
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @atomic_max_i64_addr64(ptr addrspace(1) %out, i64 %in, i64 %index) {
|
|
; SI-LABEL: atomic_max_i64_addr64:
|
|
; SI: ; %bb.0: ; %entry
|
|
; SI-NEXT: s_load_dwordx2 s[4:5], s[2:3], 0xd
|
|
; SI-NEXT: s_load_dwordx4 s[0:3], s[2:3], 0x9
|
|
; SI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; SI-NEXT: s_lshl_b64 s[4:5], s[4:5], 3
|
|
; SI-NEXT: s_add_u32 s4, s0, s4
|
|
; SI-NEXT: s_addc_u32 s5, s1, s5
|
|
; SI-NEXT: s_load_dwordx2 s[8:9], s[4:5], 0x0
|
|
; SI-NEXT: s_mov_b64 s[0:1], 0
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: v_mov_b32_e32 v4, s3
|
|
; SI-NEXT: v_mov_b32_e32 v5, s2
|
|
; SI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; SI-NEXT: v_mov_b32_e32 v2, s8
|
|
; SI-NEXT: v_mov_b32_e32 v3, s9
|
|
; SI-NEXT: s_mov_b32 s6, -1
|
|
; SI-NEXT: .LBB90_1: ; %atomicrmw.start
|
|
; SI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; SI-NEXT: v_cmp_lt_i64_e32 vcc, s[2:3], v[2:3]
|
|
; SI-NEXT: v_cndmask_b32_e32 v1, v4, v3, vcc
|
|
; SI-NEXT: v_cndmask_b32_e32 v0, v5, v2, vcc
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: v_mov_b32_e32 v9, v3
|
|
; SI-NEXT: v_mov_b32_e32 v8, v2
|
|
; SI-NEXT: v_mov_b32_e32 v7, v1
|
|
; SI-NEXT: v_mov_b32_e32 v6, v0
|
|
; SI-NEXT: buffer_atomic_cmpswap_x2 v[6:9], off, s[4:7], 0 glc
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_wbinvl1
|
|
; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[6:7], v[2:3]
|
|
; SI-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
|
|
; SI-NEXT: v_mov_b32_e32 v2, v6
|
|
; SI-NEXT: v_mov_b32_e32 v3, v7
|
|
; SI-NEXT: s_andn2_b64 exec, exec, s[0:1]
|
|
; SI-NEXT: s_cbranch_execnz .LBB90_1
|
|
; SI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; SI-NEXT: s_endpgm
|
|
;
|
|
; VI-LABEL: atomic_max_i64_addr64:
|
|
; VI: ; %bb.0: ; %entry
|
|
; VI-NEXT: s_load_dwordx2 s[4:5], s[2:3], 0x34
|
|
; VI-NEXT: s_load_dwordx4 s[0:3], s[2:3], 0x24
|
|
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; VI-NEXT: s_lshl_b64 s[4:5], s[4:5], 3
|
|
; VI-NEXT: s_add_u32 s4, s0, s4
|
|
; VI-NEXT: s_addc_u32 s5, s1, s5
|
|
; VI-NEXT: s_load_dwordx2 s[6:7], s[4:5], 0x0
|
|
; VI-NEXT: v_mov_b32_e32 v4, s4
|
|
; VI-NEXT: s_mov_b64 s[0:1], 0
|
|
; VI-NEXT: v_mov_b32_e32 v6, s3
|
|
; VI-NEXT: v_mov_b32_e32 v7, s2
|
|
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; VI-NEXT: v_mov_b32_e32 v2, s6
|
|
; VI-NEXT: v_mov_b32_e32 v3, s7
|
|
; VI-NEXT: v_mov_b32_e32 v5, s5
|
|
; VI-NEXT: .LBB90_1: ; %atomicrmw.start
|
|
; VI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; VI-NEXT: v_cmp_lt_i64_e32 vcc, s[2:3], v[2:3]
|
|
; VI-NEXT: v_cndmask_b32_e32 v1, v6, v3, vcc
|
|
; VI-NEXT: v_cndmask_b32_e32 v0, v7, v2, vcc
|
|
; VI-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: buffer_wbinvl1_vol
|
|
; VI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
|
|
; VI-NEXT: v_mov_b32_e32 v3, v1
|
|
; VI-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
|
|
; VI-NEXT: v_mov_b32_e32 v2, v0
|
|
; VI-NEXT: s_andn2_b64 exec, exec, s[0:1]
|
|
; VI-NEXT: s_cbranch_execnz .LBB90_1
|
|
; VI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; VI-NEXT: s_endpgm
|
|
;
|
|
; GFX9-LABEL: atomic_max_i64_addr64:
|
|
; GFX9: ; %bb.0: ; %entry
|
|
; GFX9-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x34
|
|
; GFX9-NEXT: s_load_dwordx4 s[4:7], s[2:3], 0x24
|
|
; GFX9-NEXT: s_mov_b64 s[2:3], 0
|
|
; GFX9-NEXT: v_mov_b32_e32 v6, 0
|
|
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX9-NEXT: s_lshl_b64 s[0:1], s[0:1], 3
|
|
; GFX9-NEXT: s_add_u32 s0, s4, s0
|
|
; GFX9-NEXT: s_addc_u32 s1, s5, s1
|
|
; GFX9-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
|
|
; GFX9-NEXT: v_mov_b32_e32 v4, s7
|
|
; GFX9-NEXT: v_mov_b32_e32 v5, s6
|
|
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX9-NEXT: v_mov_b32_e32 v2, s4
|
|
; GFX9-NEXT: v_mov_b32_e32 v3, s5
|
|
; GFX9-NEXT: .LBB90_1: ; %atomicrmw.start
|
|
; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; GFX9-NEXT: v_cmp_lt_i64_e32 vcc, s[6:7], v[2:3]
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v1, v4, v3, vcc
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v0, v5, v2, vcc
|
|
; GFX9-NEXT: global_atomic_cmpswap_x2 v[0:1], v6, v[0:3], s[0:1] glc
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: buffer_wbinvl1_vol
|
|
; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
|
|
; GFX9-NEXT: v_mov_b32_e32 v3, v1
|
|
; GFX9-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
|
|
; GFX9-NEXT: v_mov_b32_e32 v2, v0
|
|
; GFX9-NEXT: s_andn2_b64 exec, exec, s[2:3]
|
|
; GFX9-NEXT: s_cbranch_execnz .LBB90_1
|
|
; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; GFX9-NEXT: s_endpgm
|
|
entry:
|
|
%ptr = getelementptr i64, ptr addrspace(1) %out, i64 %index
|
|
%tmp0 = atomicrmw max ptr addrspace(1) %ptr, i64 %in seq_cst
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @atomic_max_i64_ret_addr64(ptr addrspace(1) %out, ptr addrspace(1) %out2, i64 %in, i64 %index) {
|
|
; SI-LABEL: atomic_max_i64_ret_addr64:
|
|
; SI: ; %bb.0: ; %entry
|
|
; SI-NEXT: s_load_dwordx8 s[0:7], s[2:3], 0x9
|
|
; SI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; SI-NEXT: s_lshl_b64 s[6:7], s[6:7], 3
|
|
; SI-NEXT: s_add_u32 s8, s0, s6
|
|
; SI-NEXT: s_addc_u32 s9, s1, s7
|
|
; SI-NEXT: s_load_dwordx2 s[6:7], s[8:9], 0x0
|
|
; SI-NEXT: s_mov_b64 s[0:1], 0
|
|
; SI-NEXT: s_mov_b32 s11, 0xf000
|
|
; SI-NEXT: v_mov_b32_e32 v8, s5
|
|
; SI-NEXT: v_mov_b32_e32 v9, s4
|
|
; SI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; SI-NEXT: v_mov_b32_e32 v2, s6
|
|
; SI-NEXT: v_mov_b32_e32 v3, s7
|
|
; SI-NEXT: s_mov_b32 s10, -1
|
|
; SI-NEXT: .LBB91_1: ; %atomicrmw.start
|
|
; SI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; SI-NEXT: v_cmp_lt_i64_e32 vcc, s[4:5], v[2:3]
|
|
; SI-NEXT: v_cndmask_b32_e32 v1, v8, v3, vcc
|
|
; SI-NEXT: v_cndmask_b32_e32 v0, v9, v2, vcc
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: v_mov_b32_e32 v7, v3
|
|
; SI-NEXT: v_mov_b32_e32 v6, v2
|
|
; SI-NEXT: v_mov_b32_e32 v5, v1
|
|
; SI-NEXT: v_mov_b32_e32 v4, v0
|
|
; SI-NEXT: buffer_atomic_cmpswap_x2 v[4:7], off, s[8:11], 0 glc
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_wbinvl1
|
|
; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[2:3]
|
|
; SI-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
|
|
; SI-NEXT: v_mov_b32_e32 v2, v4
|
|
; SI-NEXT: v_mov_b32_e32 v3, v5
|
|
; SI-NEXT: s_andn2_b64 exec, exec, s[0:1]
|
|
; SI-NEXT: s_cbranch_execnz .LBB91_1
|
|
; SI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; SI-NEXT: s_or_b64 exec, exec, s[0:1]
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s6, -1
|
|
; SI-NEXT: s_mov_b32 s4, s2
|
|
; SI-NEXT: s_mov_b32 s5, s3
|
|
; SI-NEXT: buffer_store_dwordx2 v[4:5], off, s[4:7], 0
|
|
; SI-NEXT: s_endpgm
|
|
;
|
|
; VI-LABEL: atomic_max_i64_ret_addr64:
|
|
; VI: ; %bb.0: ; %entry
|
|
; VI-NEXT: s_load_dwordx8 s[0:7], s[2:3], 0x24
|
|
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; VI-NEXT: s_lshl_b64 s[6:7], s[6:7], 3
|
|
; VI-NEXT: s_add_u32 s6, s0, s6
|
|
; VI-NEXT: s_addc_u32 s7, s1, s7
|
|
; VI-NEXT: s_load_dwordx2 s[8:9], s[6:7], 0x0
|
|
; VI-NEXT: v_mov_b32_e32 v0, s6
|
|
; VI-NEXT: s_mov_b64 s[0:1], 0
|
|
; VI-NEXT: v_mov_b32_e32 v4, s5
|
|
; VI-NEXT: v_mov_b32_e32 v5, s4
|
|
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; VI-NEXT: v_mov_b32_e32 v2, s8
|
|
; VI-NEXT: v_mov_b32_e32 v3, s9
|
|
; VI-NEXT: v_mov_b32_e32 v1, s7
|
|
; VI-NEXT: .LBB91_1: ; %atomicrmw.start
|
|
; VI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; VI-NEXT: v_mov_b32_e32 v9, v3
|
|
; VI-NEXT: v_mov_b32_e32 v8, v2
|
|
; VI-NEXT: v_cmp_lt_i64_e32 vcc, s[4:5], v[8:9]
|
|
; VI-NEXT: v_cndmask_b32_e32 v7, v4, v9, vcc
|
|
; VI-NEXT: v_cndmask_b32_e32 v6, v5, v8, vcc
|
|
; VI-NEXT: flat_atomic_cmpswap_x2 v[2:3], v[0:1], v[6:9] glc
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: buffer_wbinvl1_vol
|
|
; VI-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[8:9]
|
|
; VI-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
|
|
; VI-NEXT: s_andn2_b64 exec, exec, s[0:1]
|
|
; VI-NEXT: s_cbranch_execnz .LBB91_1
|
|
; VI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; VI-NEXT: s_or_b64 exec, exec, s[0:1]
|
|
; VI-NEXT: v_mov_b32_e32 v0, s2
|
|
; VI-NEXT: v_mov_b32_e32 v1, s3
|
|
; VI-NEXT: flat_store_dwordx2 v[0:1], v[2:3]
|
|
; VI-NEXT: s_endpgm
|
|
;
|
|
; GFX9-LABEL: atomic_max_i64_ret_addr64:
|
|
; GFX9: ; %bb.0: ; %entry
|
|
; GFX9-NEXT: s_load_dwordx8 s[4:11], s[2:3], 0x24
|
|
; GFX9-NEXT: s_mov_b64 s[2:3], 0
|
|
; GFX9-NEXT: v_mov_b32_e32 v4, 0
|
|
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX9-NEXT: s_lshl_b64 s[0:1], s[10:11], 3
|
|
; GFX9-NEXT: s_add_u32 s0, s4, s0
|
|
; GFX9-NEXT: s_addc_u32 s1, s5, s1
|
|
; GFX9-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
|
|
; GFX9-NEXT: v_mov_b32_e32 v2, s9
|
|
; GFX9-NEXT: v_mov_b32_e32 v3, s8
|
|
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX9-NEXT: v_mov_b32_e32 v0, s4
|
|
; GFX9-NEXT: v_mov_b32_e32 v1, s5
|
|
; GFX9-NEXT: .LBB91_1: ; %atomicrmw.start
|
|
; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; GFX9-NEXT: v_mov_b32_e32 v8, v1
|
|
; GFX9-NEXT: v_mov_b32_e32 v7, v0
|
|
; GFX9-NEXT: v_cmp_lt_i64_e32 vcc, s[8:9], v[7:8]
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v6, v2, v8, vcc
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v5, v3, v7, vcc
|
|
; GFX9-NEXT: global_atomic_cmpswap_x2 v[0:1], v4, v[5:8], s[0:1] glc
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: buffer_wbinvl1_vol
|
|
; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[7:8]
|
|
; GFX9-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
|
|
; GFX9-NEXT: s_andn2_b64 exec, exec, s[2:3]
|
|
; GFX9-NEXT: s_cbranch_execnz .LBB91_1
|
|
; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; GFX9-NEXT: s_or_b64 exec, exec, s[2:3]
|
|
; GFX9-NEXT: v_mov_b32_e32 v2, 0
|
|
; GFX9-NEXT: global_store_dwordx2 v2, v[0:1], s[6:7]
|
|
; GFX9-NEXT: s_endpgm
|
|
entry:
|
|
%ptr = getelementptr i64, ptr addrspace(1) %out, i64 %index
|
|
%tmp0 = atomicrmw max ptr addrspace(1) %ptr, i64 %in seq_cst
|
|
store i64 %tmp0, ptr addrspace(1) %out2
|
|
ret void
|
|
}
|
|
|
|
define void @global_atomic_max_i64_noret_offset__amdgpu_no_remote_memory(ptr addrspace(1) %out, i64 %in) {
|
|
; SI-LABEL: global_atomic_max_i64_noret_offset__amdgpu_no_remote_memory:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; SI-NEXT: s_mov_b32 s6, 0
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s4, s6
|
|
; SI-NEXT: s_mov_b32 s5, s6
|
|
; SI-NEXT: buffer_load_dwordx2 v[6:7], v[0:1], s[4:7], 0 addr64 offset:32
|
|
; SI-NEXT: s_mov_b64 s[8:9], 0
|
|
; SI-NEXT: .LBB92_1: ; %atomicrmw.start
|
|
; SI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: v_cmp_gt_i64_e32 vcc, v[6:7], v[2:3]
|
|
; SI-NEXT: v_cndmask_b32_e32 v5, v3, v7, vcc
|
|
; SI-NEXT: v_cndmask_b32_e32 v4, v2, v6, vcc
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: v_mov_b32_e32 v11, v7
|
|
; SI-NEXT: v_mov_b32_e32 v10, v6
|
|
; SI-NEXT: v_mov_b32_e32 v9, v5
|
|
; SI-NEXT: v_mov_b32_e32 v8, v4
|
|
; SI-NEXT: buffer_atomic_cmpswap_x2 v[8:11], v[0:1], s[4:7], 0 addr64 offset:32 glc
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_wbinvl1
|
|
; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[8:9], v[6:7]
|
|
; SI-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
|
|
; SI-NEXT: v_mov_b32_e32 v6, v8
|
|
; SI-NEXT: v_mov_b32_e32 v7, v9
|
|
; SI-NEXT: s_andn2_b64 exec, exec, s[8:9]
|
|
; SI-NEXT: s_cbranch_execnz .LBB92_1
|
|
; SI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; SI-NEXT: s_or_b64 exec, exec, s[8:9]
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; VI-LABEL: global_atomic_max_i64_noret_offset__amdgpu_no_remote_memory:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; VI-NEXT: v_add_u32_e32 v0, vcc, 32, v0
|
|
; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
|
|
; VI-NEXT: flat_load_dwordx2 v[6:7], v[0:1]
|
|
; VI-NEXT: s_mov_b64 s[4:5], 0
|
|
; VI-NEXT: .LBB92_1: ; %atomicrmw.start
|
|
; VI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: v_cmp_gt_i64_e32 vcc, v[6:7], v[2:3]
|
|
; VI-NEXT: v_cndmask_b32_e32 v5, v3, v7, vcc
|
|
; VI-NEXT: v_cndmask_b32_e32 v4, v2, v6, vcc
|
|
; VI-NEXT: flat_atomic_cmpswap_x2 v[4:5], v[0:1], v[4:7] glc
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: buffer_wbinvl1_vol
|
|
; VI-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[6:7]
|
|
; VI-NEXT: v_mov_b32_e32 v7, v5
|
|
; VI-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
|
|
; VI-NEXT: v_mov_b32_e32 v6, v4
|
|
; VI-NEXT: s_andn2_b64 exec, exec, s[4:5]
|
|
; VI-NEXT: s_cbranch_execnz .LBB92_1
|
|
; VI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; VI-NEXT: s_or_b64 exec, exec, s[4:5]
|
|
; VI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX9-LABEL: global_atomic_max_i64_noret_offset__amdgpu_no_remote_memory:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: global_load_dwordx2 v[6:7], v[0:1], off offset:32
|
|
; GFX9-NEXT: s_mov_b64 s[4:5], 0
|
|
; GFX9-NEXT: .LBB92_1: ; %atomicrmw.start
|
|
; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: v_cmp_gt_i64_e32 vcc, v[6:7], v[2:3]
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v5, v3, v7, vcc
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v4, v2, v6, vcc
|
|
; GFX9-NEXT: global_atomic_cmpswap_x2 v[4:5], v[0:1], v[4:7], off offset:32 glc
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: buffer_wbinvl1_vol
|
|
; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[6:7]
|
|
; GFX9-NEXT: v_mov_b32_e32 v7, v5
|
|
; GFX9-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
|
|
; GFX9-NEXT: v_mov_b32_e32 v6, v4
|
|
; GFX9-NEXT: s_andn2_b64 exec, exec, s[4:5]
|
|
; GFX9-NEXT: s_cbranch_execnz .LBB92_1
|
|
; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; GFX9-NEXT: s_or_b64 exec, exec, s[4:5]
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
%gep = getelementptr i64, ptr addrspace(1) %out, i64 4
|
|
%tmp0 = atomicrmw max ptr addrspace(1) %gep, i64 %in seq_cst, !amdgpu.no.remote.memory !0
|
|
ret void
|
|
}
|
|
|
|
define i64 @global_atomic_max_i64_ret_offset__amdgpu_no_remote_memory(ptr addrspace(1) %out, i64 %in) {
|
|
; SI-LABEL: global_atomic_max_i64_ret_offset__amdgpu_no_remote_memory:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; SI-NEXT: v_mov_b32_e32 v5, v3
|
|
; SI-NEXT: v_mov_b32_e32 v4, v2
|
|
; SI-NEXT: v_mov_b32_e32 v7, v1
|
|
; SI-NEXT: v_mov_b32_e32 v6, v0
|
|
; SI-NEXT: s_mov_b32 s6, 0
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s4, s6
|
|
; SI-NEXT: s_mov_b32 s5, s6
|
|
; SI-NEXT: buffer_load_dwordx2 v[0:1], v[6:7], s[4:7], 0 addr64 offset:32
|
|
; SI-NEXT: s_mov_b64 s[8:9], 0
|
|
; SI-NEXT: .LBB93_1: ; %atomicrmw.start
|
|
; SI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: v_mov_b32_e32 v11, v1
|
|
; SI-NEXT: v_mov_b32_e32 v10, v0
|
|
; SI-NEXT: v_cmp_gt_i64_e32 vcc, v[10:11], v[4:5]
|
|
; SI-NEXT: v_cndmask_b32_e32 v9, v5, v11, vcc
|
|
; SI-NEXT: v_cndmask_b32_e32 v8, v4, v10, vcc
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: v_mov_b32_e32 v0, v8
|
|
; SI-NEXT: v_mov_b32_e32 v1, v9
|
|
; SI-NEXT: v_mov_b32_e32 v2, v10
|
|
; SI-NEXT: v_mov_b32_e32 v3, v11
|
|
; SI-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v[6:7], s[4:7], 0 addr64 offset:32 glc
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_wbinvl1
|
|
; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[10:11]
|
|
; SI-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
|
|
; SI-NEXT: s_andn2_b64 exec, exec, s[8:9]
|
|
; SI-NEXT: s_cbranch_execnz .LBB93_1
|
|
; SI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; SI-NEXT: s_or_b64 exec, exec, s[8:9]
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; VI-LABEL: global_atomic_max_i64_ret_offset__amdgpu_no_remote_memory:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; VI-NEXT: v_add_u32_e32 v4, vcc, 32, v0
|
|
; VI-NEXT: v_addc_u32_e32 v5, vcc, 0, v1, vcc
|
|
; VI-NEXT: flat_load_dwordx2 v[0:1], v[4:5]
|
|
; VI-NEXT: s_mov_b64 s[4:5], 0
|
|
; VI-NEXT: .LBB93_1: ; %atomicrmw.start
|
|
; VI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: v_mov_b32_e32 v9, v1
|
|
; VI-NEXT: v_mov_b32_e32 v8, v0
|
|
; VI-NEXT: v_cmp_gt_i64_e32 vcc, v[8:9], v[2:3]
|
|
; VI-NEXT: v_cndmask_b32_e32 v7, v3, v9, vcc
|
|
; VI-NEXT: v_cndmask_b32_e32 v6, v2, v8, vcc
|
|
; VI-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[6:9] glc
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: buffer_wbinvl1_vol
|
|
; VI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[8:9]
|
|
; VI-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
|
|
; VI-NEXT: s_andn2_b64 exec, exec, s[4:5]
|
|
; VI-NEXT: s_cbranch_execnz .LBB93_1
|
|
; VI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; VI-NEXT: s_or_b64 exec, exec, s[4:5]
|
|
; VI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX9-LABEL: global_atomic_max_i64_ret_offset__amdgpu_no_remote_memory:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: global_load_dwordx2 v[4:5], v[0:1], off offset:32
|
|
; GFX9-NEXT: s_mov_b64 s[4:5], 0
|
|
; GFX9-NEXT: .LBB93_1: ; %atomicrmw.start
|
|
; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: v_mov_b32_e32 v7, v5
|
|
; GFX9-NEXT: v_mov_b32_e32 v6, v4
|
|
; GFX9-NEXT: v_cmp_gt_i64_e32 vcc, v[6:7], v[2:3]
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v5, v3, v7, vcc
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v4, v2, v6, vcc
|
|
; GFX9-NEXT: global_atomic_cmpswap_x2 v[4:5], v[0:1], v[4:7], off offset:32 glc
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: buffer_wbinvl1_vol
|
|
; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[6:7]
|
|
; GFX9-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
|
|
; GFX9-NEXT: s_andn2_b64 exec, exec, s[4:5]
|
|
; GFX9-NEXT: s_cbranch_execnz .LBB93_1
|
|
; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; GFX9-NEXT: s_or_b64 exec, exec, s[4:5]
|
|
; GFX9-NEXT: v_mov_b32_e32 v0, v4
|
|
; GFX9-NEXT: v_mov_b32_e32 v1, v5
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
%gep = getelementptr i64, ptr addrspace(1) %out, i64 4
|
|
%result = atomicrmw max ptr addrspace(1) %gep, i64 %in seq_cst, !amdgpu.no.remote.memory !0
|
|
ret i64 %result
|
|
}
|
|
|
|
; ---------------------------------------------------------------------
|
|
; atomicrmw umax
|
|
; ---------------------------------------------------------------------
|
|
|
|
define void @global_atomic_umax_i64_noret(ptr addrspace(1) %ptr, i64 %in) {
|
|
; SI-LABEL: global_atomic_umax_i64_noret:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; SI-NEXT: s_mov_b32 s6, 0
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s4, s6
|
|
; SI-NEXT: s_mov_b32 s5, s6
|
|
; SI-NEXT: buffer_load_dwordx2 v[6:7], v[0:1], s[4:7], 0 addr64
|
|
; SI-NEXT: s_mov_b64 s[8:9], 0
|
|
; SI-NEXT: .LBB94_1: ; %atomicrmw.start
|
|
; SI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: v_cmp_gt_u64_e32 vcc, v[6:7], v[2:3]
|
|
; SI-NEXT: v_cndmask_b32_e32 v5, v3, v7, vcc
|
|
; SI-NEXT: v_cndmask_b32_e32 v4, v2, v6, vcc
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: v_mov_b32_e32 v11, v7
|
|
; SI-NEXT: v_mov_b32_e32 v10, v6
|
|
; SI-NEXT: v_mov_b32_e32 v9, v5
|
|
; SI-NEXT: v_mov_b32_e32 v8, v4
|
|
; SI-NEXT: buffer_atomic_cmpswap_x2 v[8:11], v[0:1], s[4:7], 0 addr64 glc
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_wbinvl1
|
|
; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[8:9], v[6:7]
|
|
; SI-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
|
|
; SI-NEXT: v_mov_b32_e32 v6, v8
|
|
; SI-NEXT: v_mov_b32_e32 v7, v9
|
|
; SI-NEXT: s_andn2_b64 exec, exec, s[8:9]
|
|
; SI-NEXT: s_cbranch_execnz .LBB94_1
|
|
; SI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; SI-NEXT: s_or_b64 exec, exec, s[8:9]
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; VI-LABEL: global_atomic_umax_i64_noret:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; VI-NEXT: flat_load_dwordx2 v[6:7], v[0:1]
|
|
; VI-NEXT: s_mov_b64 s[4:5], 0
|
|
; VI-NEXT: .LBB94_1: ; %atomicrmw.start
|
|
; VI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: v_cmp_gt_u64_e32 vcc, v[6:7], v[2:3]
|
|
; VI-NEXT: v_cndmask_b32_e32 v5, v3, v7, vcc
|
|
; VI-NEXT: v_cndmask_b32_e32 v4, v2, v6, vcc
|
|
; VI-NEXT: flat_atomic_cmpswap_x2 v[4:5], v[0:1], v[4:7] glc
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: buffer_wbinvl1_vol
|
|
; VI-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[6:7]
|
|
; VI-NEXT: v_mov_b32_e32 v7, v5
|
|
; VI-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
|
|
; VI-NEXT: v_mov_b32_e32 v6, v4
|
|
; VI-NEXT: s_andn2_b64 exec, exec, s[4:5]
|
|
; VI-NEXT: s_cbranch_execnz .LBB94_1
|
|
; VI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; VI-NEXT: s_or_b64 exec, exec, s[4:5]
|
|
; VI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX9-LABEL: global_atomic_umax_i64_noret:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: global_load_dwordx2 v[6:7], v[0:1], off
|
|
; GFX9-NEXT: s_mov_b64 s[4:5], 0
|
|
; GFX9-NEXT: .LBB94_1: ; %atomicrmw.start
|
|
; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: v_cmp_gt_u64_e32 vcc, v[6:7], v[2:3]
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v5, v3, v7, vcc
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v4, v2, v6, vcc
|
|
; GFX9-NEXT: global_atomic_cmpswap_x2 v[4:5], v[0:1], v[4:7], off glc
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: buffer_wbinvl1_vol
|
|
; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[6:7]
|
|
; GFX9-NEXT: v_mov_b32_e32 v7, v5
|
|
; GFX9-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
|
|
; GFX9-NEXT: v_mov_b32_e32 v6, v4
|
|
; GFX9-NEXT: s_andn2_b64 exec, exec, s[4:5]
|
|
; GFX9-NEXT: s_cbranch_execnz .LBB94_1
|
|
; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; GFX9-NEXT: s_or_b64 exec, exec, s[4:5]
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
%tmp0 = atomicrmw umax ptr addrspace(1) %ptr, i64 %in seq_cst
|
|
ret void
|
|
}
|
|
|
|
define void @global_atomic_umax_i64_noret_offset(ptr addrspace(1) %out, i64 %in) {
|
|
; SI-LABEL: global_atomic_umax_i64_noret_offset:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; SI-NEXT: s_mov_b32 s6, 0
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s4, s6
|
|
; SI-NEXT: s_mov_b32 s5, s6
|
|
; SI-NEXT: buffer_load_dwordx2 v[6:7], v[0:1], s[4:7], 0 addr64 offset:32
|
|
; SI-NEXT: s_mov_b64 s[8:9], 0
|
|
; SI-NEXT: .LBB95_1: ; %atomicrmw.start
|
|
; SI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: v_cmp_gt_u64_e32 vcc, v[6:7], v[2:3]
|
|
; SI-NEXT: v_cndmask_b32_e32 v5, v3, v7, vcc
|
|
; SI-NEXT: v_cndmask_b32_e32 v4, v2, v6, vcc
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: v_mov_b32_e32 v11, v7
|
|
; SI-NEXT: v_mov_b32_e32 v10, v6
|
|
; SI-NEXT: v_mov_b32_e32 v9, v5
|
|
; SI-NEXT: v_mov_b32_e32 v8, v4
|
|
; SI-NEXT: buffer_atomic_cmpswap_x2 v[8:11], v[0:1], s[4:7], 0 addr64 offset:32 glc
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_wbinvl1
|
|
; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[8:9], v[6:7]
|
|
; SI-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
|
|
; SI-NEXT: v_mov_b32_e32 v6, v8
|
|
; SI-NEXT: v_mov_b32_e32 v7, v9
|
|
; SI-NEXT: s_andn2_b64 exec, exec, s[8:9]
|
|
; SI-NEXT: s_cbranch_execnz .LBB95_1
|
|
; SI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; SI-NEXT: s_or_b64 exec, exec, s[8:9]
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; VI-LABEL: global_atomic_umax_i64_noret_offset:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; VI-NEXT: v_add_u32_e32 v0, vcc, 32, v0
|
|
; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
|
|
; VI-NEXT: flat_load_dwordx2 v[6:7], v[0:1]
|
|
; VI-NEXT: s_mov_b64 s[4:5], 0
|
|
; VI-NEXT: .LBB95_1: ; %atomicrmw.start
|
|
; VI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: v_cmp_gt_u64_e32 vcc, v[6:7], v[2:3]
|
|
; VI-NEXT: v_cndmask_b32_e32 v5, v3, v7, vcc
|
|
; VI-NEXT: v_cndmask_b32_e32 v4, v2, v6, vcc
|
|
; VI-NEXT: flat_atomic_cmpswap_x2 v[4:5], v[0:1], v[4:7] glc
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: buffer_wbinvl1_vol
|
|
; VI-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[6:7]
|
|
; VI-NEXT: v_mov_b32_e32 v7, v5
|
|
; VI-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
|
|
; VI-NEXT: v_mov_b32_e32 v6, v4
|
|
; VI-NEXT: s_andn2_b64 exec, exec, s[4:5]
|
|
; VI-NEXT: s_cbranch_execnz .LBB95_1
|
|
; VI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; VI-NEXT: s_or_b64 exec, exec, s[4:5]
|
|
; VI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX9-LABEL: global_atomic_umax_i64_noret_offset:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: global_load_dwordx2 v[6:7], v[0:1], off offset:32
|
|
; GFX9-NEXT: s_mov_b64 s[4:5], 0
|
|
; GFX9-NEXT: .LBB95_1: ; %atomicrmw.start
|
|
; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: v_cmp_gt_u64_e32 vcc, v[6:7], v[2:3]
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v5, v3, v7, vcc
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v4, v2, v6, vcc
|
|
; GFX9-NEXT: global_atomic_cmpswap_x2 v[4:5], v[0:1], v[4:7], off offset:32 glc
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: buffer_wbinvl1_vol
|
|
; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[6:7]
|
|
; GFX9-NEXT: v_mov_b32_e32 v7, v5
|
|
; GFX9-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
|
|
; GFX9-NEXT: v_mov_b32_e32 v6, v4
|
|
; GFX9-NEXT: s_andn2_b64 exec, exec, s[4:5]
|
|
; GFX9-NEXT: s_cbranch_execnz .LBB95_1
|
|
; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; GFX9-NEXT: s_or_b64 exec, exec, s[4:5]
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
%gep = getelementptr i64, ptr addrspace(1) %out, i64 4
|
|
%tmp0 = atomicrmw umax ptr addrspace(1) %gep, i64 %in seq_cst
|
|
ret void
|
|
}
|
|
|
|
define i64 @global_atomic_umax_i64_ret(ptr addrspace(1) %ptr, i64 %in) {
|
|
; SI-LABEL: global_atomic_umax_i64_ret:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; SI-NEXT: v_mov_b32_e32 v5, v3
|
|
; SI-NEXT: v_mov_b32_e32 v4, v2
|
|
; SI-NEXT: v_mov_b32_e32 v7, v1
|
|
; SI-NEXT: v_mov_b32_e32 v6, v0
|
|
; SI-NEXT: s_mov_b32 s6, 0
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s4, s6
|
|
; SI-NEXT: s_mov_b32 s5, s6
|
|
; SI-NEXT: buffer_load_dwordx2 v[0:1], v[6:7], s[4:7], 0 addr64
|
|
; SI-NEXT: s_mov_b64 s[8:9], 0
|
|
; SI-NEXT: .LBB96_1: ; %atomicrmw.start
|
|
; SI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: v_mov_b32_e32 v11, v1
|
|
; SI-NEXT: v_mov_b32_e32 v10, v0
|
|
; SI-NEXT: v_cmp_gt_u64_e32 vcc, v[10:11], v[4:5]
|
|
; SI-NEXT: v_cndmask_b32_e32 v9, v5, v11, vcc
|
|
; SI-NEXT: v_cndmask_b32_e32 v8, v4, v10, vcc
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: v_mov_b32_e32 v0, v8
|
|
; SI-NEXT: v_mov_b32_e32 v1, v9
|
|
; SI-NEXT: v_mov_b32_e32 v2, v10
|
|
; SI-NEXT: v_mov_b32_e32 v3, v11
|
|
; SI-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v[6:7], s[4:7], 0 addr64 glc
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_wbinvl1
|
|
; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[10:11]
|
|
; SI-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
|
|
; SI-NEXT: s_andn2_b64 exec, exec, s[8:9]
|
|
; SI-NEXT: s_cbranch_execnz .LBB96_1
|
|
; SI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; SI-NEXT: s_or_b64 exec, exec, s[8:9]
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; VI-LABEL: global_atomic_umax_i64_ret:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; VI-NEXT: flat_load_dwordx2 v[4:5], v[0:1]
|
|
; VI-NEXT: s_mov_b64 s[4:5], 0
|
|
; VI-NEXT: .LBB96_1: ; %atomicrmw.start
|
|
; VI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: v_mov_b32_e32 v7, v5
|
|
; VI-NEXT: v_mov_b32_e32 v6, v4
|
|
; VI-NEXT: v_cmp_gt_u64_e32 vcc, v[6:7], v[2:3]
|
|
; VI-NEXT: v_cndmask_b32_e32 v5, v3, v7, vcc
|
|
; VI-NEXT: v_cndmask_b32_e32 v4, v2, v6, vcc
|
|
; VI-NEXT: flat_atomic_cmpswap_x2 v[4:5], v[0:1], v[4:7] glc
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: buffer_wbinvl1_vol
|
|
; VI-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[6:7]
|
|
; VI-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
|
|
; VI-NEXT: s_andn2_b64 exec, exec, s[4:5]
|
|
; VI-NEXT: s_cbranch_execnz .LBB96_1
|
|
; VI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; VI-NEXT: s_or_b64 exec, exec, s[4:5]
|
|
; VI-NEXT: v_mov_b32_e32 v0, v4
|
|
; VI-NEXT: v_mov_b32_e32 v1, v5
|
|
; VI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX9-LABEL: global_atomic_umax_i64_ret:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: global_load_dwordx2 v[4:5], v[0:1], off
|
|
; GFX9-NEXT: s_mov_b64 s[4:5], 0
|
|
; GFX9-NEXT: .LBB96_1: ; %atomicrmw.start
|
|
; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: v_mov_b32_e32 v7, v5
|
|
; GFX9-NEXT: v_mov_b32_e32 v6, v4
|
|
; GFX9-NEXT: v_cmp_gt_u64_e32 vcc, v[6:7], v[2:3]
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v5, v3, v7, vcc
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v4, v2, v6, vcc
|
|
; GFX9-NEXT: global_atomic_cmpswap_x2 v[4:5], v[0:1], v[4:7], off glc
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: buffer_wbinvl1_vol
|
|
; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[6:7]
|
|
; GFX9-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
|
|
; GFX9-NEXT: s_andn2_b64 exec, exec, s[4:5]
|
|
; GFX9-NEXT: s_cbranch_execnz .LBB96_1
|
|
; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; GFX9-NEXT: s_or_b64 exec, exec, s[4:5]
|
|
; GFX9-NEXT: v_mov_b32_e32 v0, v4
|
|
; GFX9-NEXT: v_mov_b32_e32 v1, v5
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
%result = atomicrmw umax ptr addrspace(1) %ptr, i64 %in seq_cst
|
|
ret i64 %result
|
|
}
|
|
|
|
define i64 @global_atomic_umax_i64_ret_offset(ptr addrspace(1) %out, i64 %in) {
|
|
; SI-LABEL: global_atomic_umax_i64_ret_offset:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; SI-NEXT: v_mov_b32_e32 v5, v3
|
|
; SI-NEXT: v_mov_b32_e32 v4, v2
|
|
; SI-NEXT: v_mov_b32_e32 v7, v1
|
|
; SI-NEXT: v_mov_b32_e32 v6, v0
|
|
; SI-NEXT: s_mov_b32 s6, 0
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s4, s6
|
|
; SI-NEXT: s_mov_b32 s5, s6
|
|
; SI-NEXT: buffer_load_dwordx2 v[0:1], v[6:7], s[4:7], 0 addr64 offset:32
|
|
; SI-NEXT: s_mov_b64 s[8:9], 0
|
|
; SI-NEXT: .LBB97_1: ; %atomicrmw.start
|
|
; SI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: v_mov_b32_e32 v11, v1
|
|
; SI-NEXT: v_mov_b32_e32 v10, v0
|
|
; SI-NEXT: v_cmp_gt_u64_e32 vcc, v[10:11], v[4:5]
|
|
; SI-NEXT: v_cndmask_b32_e32 v9, v5, v11, vcc
|
|
; SI-NEXT: v_cndmask_b32_e32 v8, v4, v10, vcc
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: v_mov_b32_e32 v0, v8
|
|
; SI-NEXT: v_mov_b32_e32 v1, v9
|
|
; SI-NEXT: v_mov_b32_e32 v2, v10
|
|
; SI-NEXT: v_mov_b32_e32 v3, v11
|
|
; SI-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v[6:7], s[4:7], 0 addr64 offset:32 glc
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_wbinvl1
|
|
; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[10:11]
|
|
; SI-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
|
|
; SI-NEXT: s_andn2_b64 exec, exec, s[8:9]
|
|
; SI-NEXT: s_cbranch_execnz .LBB97_1
|
|
; SI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; SI-NEXT: s_or_b64 exec, exec, s[8:9]
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; VI-LABEL: global_atomic_umax_i64_ret_offset:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; VI-NEXT: v_add_u32_e32 v4, vcc, 32, v0
|
|
; VI-NEXT: v_addc_u32_e32 v5, vcc, 0, v1, vcc
|
|
; VI-NEXT: flat_load_dwordx2 v[0:1], v[4:5]
|
|
; VI-NEXT: s_mov_b64 s[4:5], 0
|
|
; VI-NEXT: .LBB97_1: ; %atomicrmw.start
|
|
; VI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: v_mov_b32_e32 v9, v1
|
|
; VI-NEXT: v_mov_b32_e32 v8, v0
|
|
; VI-NEXT: v_cmp_gt_u64_e32 vcc, v[8:9], v[2:3]
|
|
; VI-NEXT: v_cndmask_b32_e32 v7, v3, v9, vcc
|
|
; VI-NEXT: v_cndmask_b32_e32 v6, v2, v8, vcc
|
|
; VI-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[6:9] glc
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: buffer_wbinvl1_vol
|
|
; VI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[8:9]
|
|
; VI-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
|
|
; VI-NEXT: s_andn2_b64 exec, exec, s[4:5]
|
|
; VI-NEXT: s_cbranch_execnz .LBB97_1
|
|
; VI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; VI-NEXT: s_or_b64 exec, exec, s[4:5]
|
|
; VI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX9-LABEL: global_atomic_umax_i64_ret_offset:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: global_load_dwordx2 v[4:5], v[0:1], off offset:32
|
|
; GFX9-NEXT: s_mov_b64 s[4:5], 0
|
|
; GFX9-NEXT: .LBB97_1: ; %atomicrmw.start
|
|
; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: v_mov_b32_e32 v7, v5
|
|
; GFX9-NEXT: v_mov_b32_e32 v6, v4
|
|
; GFX9-NEXT: v_cmp_gt_u64_e32 vcc, v[6:7], v[2:3]
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v5, v3, v7, vcc
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v4, v2, v6, vcc
|
|
; GFX9-NEXT: global_atomic_cmpswap_x2 v[4:5], v[0:1], v[4:7], off offset:32 glc
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: buffer_wbinvl1_vol
|
|
; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[6:7]
|
|
; GFX9-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
|
|
; GFX9-NEXT: s_andn2_b64 exec, exec, s[4:5]
|
|
; GFX9-NEXT: s_cbranch_execnz .LBB97_1
|
|
; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; GFX9-NEXT: s_or_b64 exec, exec, s[4:5]
|
|
; GFX9-NEXT: v_mov_b32_e32 v0, v4
|
|
; GFX9-NEXT: v_mov_b32_e32 v1, v5
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
%gep = getelementptr i64, ptr addrspace(1) %out, i64 4
|
|
%result = atomicrmw umax ptr addrspace(1) %gep, i64 %in seq_cst
|
|
ret i64 %result
|
|
}
|
|
|
|
define amdgpu_gfx void @global_atomic_umax_i64_noret_scalar(ptr addrspace(1) inreg %ptr, i64 inreg %in) {
|
|
; SI-LABEL: global_atomic_umax_i64_noret_scalar:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1
|
|
; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 ; 4-byte Folded Spill
|
|
; SI-NEXT: s_mov_b64 exec, s[34:35]
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: v_writelane_b32 v10, s6, 0
|
|
; SI-NEXT: v_writelane_b32 v10, s7, 1
|
|
; SI-NEXT: s_mov_b32 s35, s7
|
|
; SI-NEXT: s_mov_b32 s34, s6
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s6, -1
|
|
; SI-NEXT: buffer_load_dwordx2 v[2:3], off, s[4:7], 0
|
|
; SI-NEXT: s_mov_b64 s[36:37], 0
|
|
; SI-NEXT: v_mov_b32_e32 v4, s35
|
|
; SI-NEXT: v_mov_b32_e32 v5, s34
|
|
; SI-NEXT: .LBB98_1: ; %atomicrmw.start
|
|
; SI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: v_cmp_lt_u64_e32 vcc, s[34:35], v[2:3]
|
|
; SI-NEXT: v_cndmask_b32_e32 v1, v4, v3, vcc
|
|
; SI-NEXT: v_cndmask_b32_e32 v0, v5, v2, vcc
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: v_mov_b32_e32 v9, v3
|
|
; SI-NEXT: v_mov_b32_e32 v8, v2
|
|
; SI-NEXT: v_mov_b32_e32 v7, v1
|
|
; SI-NEXT: v_mov_b32_e32 v6, v0
|
|
; SI-NEXT: buffer_atomic_cmpswap_x2 v[6:9], off, s[4:7], 0 glc
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_wbinvl1
|
|
; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[6:7], v[2:3]
|
|
; SI-NEXT: s_or_b64 s[36:37], vcc, s[36:37]
|
|
; SI-NEXT: v_mov_b32_e32 v2, v6
|
|
; SI-NEXT: v_mov_b32_e32 v3, v7
|
|
; SI-NEXT: s_andn2_b64 exec, exec, s[36:37]
|
|
; SI-NEXT: s_cbranch_execnz .LBB98_1
|
|
; SI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; SI-NEXT: s_or_b64 exec, exec, s[36:37]
|
|
; SI-NEXT: v_readlane_b32 s7, v10, 1
|
|
; SI-NEXT: v_readlane_b32 s6, v10, 0
|
|
; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1
|
|
; SI-NEXT: buffer_load_dword v10, off, s[0:3], s32 ; 4-byte Folded Reload
|
|
; SI-NEXT: s_mov_b64 exec, s[34:35]
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0)
|
|
; SI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; VI-LABEL: global_atomic_umax_i64_noret_scalar:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; VI-NEXT: v_mov_b32_e32 v0, s4
|
|
; VI-NEXT: v_mov_b32_e32 v1, s5
|
|
; VI-NEXT: flat_load_dwordx2 v[2:3], v[0:1]
|
|
; VI-NEXT: v_mov_b32_e32 v4, s4
|
|
; VI-NEXT: s_mov_b64 s[34:35], 0
|
|
; VI-NEXT: v_mov_b32_e32 v6, s7
|
|
; VI-NEXT: v_mov_b32_e32 v7, s6
|
|
; VI-NEXT: v_mov_b32_e32 v5, s5
|
|
; VI-NEXT: .LBB98_1: ; %atomicrmw.start
|
|
; VI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: v_cmp_lt_u64_e32 vcc, s[6:7], v[2:3]
|
|
; VI-NEXT: v_cndmask_b32_e32 v1, v6, v3, vcc
|
|
; VI-NEXT: v_cndmask_b32_e32 v0, v7, v2, vcc
|
|
; VI-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: buffer_wbinvl1_vol
|
|
; VI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
|
|
; VI-NEXT: v_mov_b32_e32 v3, v1
|
|
; VI-NEXT: s_or_b64 s[34:35], vcc, s[34:35]
|
|
; VI-NEXT: v_mov_b32_e32 v2, v0
|
|
; VI-NEXT: s_andn2_b64 exec, exec, s[34:35]
|
|
; VI-NEXT: s_cbranch_execnz .LBB98_1
|
|
; VI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; VI-NEXT: s_or_b64 exec, exec, s[34:35]
|
|
; VI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX9-LABEL: global_atomic_umax_i64_noret_scalar:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: v_mov_b32_e32 v4, 0
|
|
; GFX9-NEXT: global_load_dwordx2 v[2:3], v4, s[4:5]
|
|
; GFX9-NEXT: s_mov_b64 s[34:35], 0
|
|
; GFX9-NEXT: v_mov_b32_e32 v5, s7
|
|
; GFX9-NEXT: v_mov_b32_e32 v6, s6
|
|
; GFX9-NEXT: .LBB98_1: ; %atomicrmw.start
|
|
; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: v_cmp_lt_u64_e32 vcc, s[6:7], v[2:3]
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v1, v5, v3, vcc
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v0, v6, v2, vcc
|
|
; GFX9-NEXT: global_atomic_cmpswap_x2 v[0:1], v4, v[0:3], s[4:5] glc
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: buffer_wbinvl1_vol
|
|
; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
|
|
; GFX9-NEXT: v_mov_b32_e32 v3, v1
|
|
; GFX9-NEXT: s_or_b64 s[34:35], vcc, s[34:35]
|
|
; GFX9-NEXT: v_mov_b32_e32 v2, v0
|
|
; GFX9-NEXT: s_andn2_b64 exec, exec, s[34:35]
|
|
; GFX9-NEXT: s_cbranch_execnz .LBB98_1
|
|
; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; GFX9-NEXT: s_or_b64 exec, exec, s[34:35]
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
%tmp0 = atomicrmw umax ptr addrspace(1) %ptr, i64 %in seq_cst
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_gfx void @global_atomic_umax_i64_noret_offset_scalar(ptr addrspace(1) inreg %out, i64 inreg %in) {
|
|
; SI-LABEL: global_atomic_umax_i64_noret_offset_scalar:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1
|
|
; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 ; 4-byte Folded Spill
|
|
; SI-NEXT: s_mov_b64 exec, s[34:35]
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: v_writelane_b32 v10, s6, 0
|
|
; SI-NEXT: v_writelane_b32 v10, s7, 1
|
|
; SI-NEXT: s_mov_b32 s35, s7
|
|
; SI-NEXT: s_mov_b32 s34, s6
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s6, -1
|
|
; SI-NEXT: buffer_load_dwordx2 v[2:3], off, s[4:7], 0 offset:32
|
|
; SI-NEXT: s_mov_b64 s[36:37], 0
|
|
; SI-NEXT: v_mov_b32_e32 v4, s35
|
|
; SI-NEXT: v_mov_b32_e32 v5, s34
|
|
; SI-NEXT: .LBB99_1: ; %atomicrmw.start
|
|
; SI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: v_cmp_lt_u64_e32 vcc, s[34:35], v[2:3]
|
|
; SI-NEXT: v_cndmask_b32_e32 v1, v4, v3, vcc
|
|
; SI-NEXT: v_cndmask_b32_e32 v0, v5, v2, vcc
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: v_mov_b32_e32 v9, v3
|
|
; SI-NEXT: v_mov_b32_e32 v8, v2
|
|
; SI-NEXT: v_mov_b32_e32 v7, v1
|
|
; SI-NEXT: v_mov_b32_e32 v6, v0
|
|
; SI-NEXT: buffer_atomic_cmpswap_x2 v[6:9], off, s[4:7], 0 offset:32 glc
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_wbinvl1
|
|
; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[6:7], v[2:3]
|
|
; SI-NEXT: s_or_b64 s[36:37], vcc, s[36:37]
|
|
; SI-NEXT: v_mov_b32_e32 v2, v6
|
|
; SI-NEXT: v_mov_b32_e32 v3, v7
|
|
; SI-NEXT: s_andn2_b64 exec, exec, s[36:37]
|
|
; SI-NEXT: s_cbranch_execnz .LBB99_1
|
|
; SI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; SI-NEXT: s_or_b64 exec, exec, s[36:37]
|
|
; SI-NEXT: v_readlane_b32 s7, v10, 1
|
|
; SI-NEXT: v_readlane_b32 s6, v10, 0
|
|
; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1
|
|
; SI-NEXT: buffer_load_dword v10, off, s[0:3], s32 ; 4-byte Folded Reload
|
|
; SI-NEXT: s_mov_b64 exec, s[34:35]
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0)
|
|
; SI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; VI-LABEL: global_atomic_umax_i64_noret_offset_scalar:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; VI-NEXT: s_add_u32 s34, s4, 32
|
|
; VI-NEXT: s_addc_u32 s35, s5, 0
|
|
; VI-NEXT: v_mov_b32_e32 v4, s34
|
|
; VI-NEXT: v_mov_b32_e32 v5, s35
|
|
; VI-NEXT: flat_load_dwordx2 v[2:3], v[4:5]
|
|
; VI-NEXT: s_mov_b64 s[34:35], 0
|
|
; VI-NEXT: v_mov_b32_e32 v6, s7
|
|
; VI-NEXT: v_mov_b32_e32 v7, s6
|
|
; VI-NEXT: .LBB99_1: ; %atomicrmw.start
|
|
; VI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: v_cmp_lt_u64_e32 vcc, s[6:7], v[2:3]
|
|
; VI-NEXT: v_cndmask_b32_e32 v1, v6, v3, vcc
|
|
; VI-NEXT: v_cndmask_b32_e32 v0, v7, v2, vcc
|
|
; VI-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: buffer_wbinvl1_vol
|
|
; VI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
|
|
; VI-NEXT: v_mov_b32_e32 v3, v1
|
|
; VI-NEXT: s_or_b64 s[34:35], vcc, s[34:35]
|
|
; VI-NEXT: v_mov_b32_e32 v2, v0
|
|
; VI-NEXT: s_andn2_b64 exec, exec, s[34:35]
|
|
; VI-NEXT: s_cbranch_execnz .LBB99_1
|
|
; VI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; VI-NEXT: s_or_b64 exec, exec, s[34:35]
|
|
; VI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX9-LABEL: global_atomic_umax_i64_noret_offset_scalar:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: v_mov_b32_e32 v4, 0
|
|
; GFX9-NEXT: global_load_dwordx2 v[2:3], v4, s[4:5] offset:32
|
|
; GFX9-NEXT: s_mov_b64 s[34:35], 0
|
|
; GFX9-NEXT: v_mov_b32_e32 v5, s7
|
|
; GFX9-NEXT: v_mov_b32_e32 v6, s6
|
|
; GFX9-NEXT: .LBB99_1: ; %atomicrmw.start
|
|
; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: v_cmp_lt_u64_e32 vcc, s[6:7], v[2:3]
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v1, v5, v3, vcc
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v0, v6, v2, vcc
|
|
; GFX9-NEXT: global_atomic_cmpswap_x2 v[0:1], v4, v[0:3], s[4:5] offset:32 glc
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: buffer_wbinvl1_vol
|
|
; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
|
|
; GFX9-NEXT: v_mov_b32_e32 v3, v1
|
|
; GFX9-NEXT: s_or_b64 s[34:35], vcc, s[34:35]
|
|
; GFX9-NEXT: v_mov_b32_e32 v2, v0
|
|
; GFX9-NEXT: s_andn2_b64 exec, exec, s[34:35]
|
|
; GFX9-NEXT: s_cbranch_execnz .LBB99_1
|
|
; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; GFX9-NEXT: s_or_b64 exec, exec, s[34:35]
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
%gep = getelementptr i64, ptr addrspace(1) %out, i64 4
|
|
%tmp0 = atomicrmw umax ptr addrspace(1) %gep, i64 %in seq_cst
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_gfx i64 @global_atomic_umax_i64_ret_scalar(ptr addrspace(1) inreg %ptr, i64 inreg %in) {
|
|
; SI-LABEL: global_atomic_umax_i64_ret_scalar:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1
|
|
; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 ; 4-byte Folded Spill
|
|
; SI-NEXT: s_mov_b64 exec, s[34:35]
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: v_writelane_b32 v10, s6, 0
|
|
; SI-NEXT: v_writelane_b32 v10, s7, 1
|
|
; SI-NEXT: s_mov_b32 s35, s7
|
|
; SI-NEXT: s_mov_b32 s34, s6
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s6, -1
|
|
; SI-NEXT: buffer_load_dwordx2 v[0:1], off, s[4:7], 0
|
|
; SI-NEXT: s_mov_b64 s[36:37], 0
|
|
; SI-NEXT: v_mov_b32_e32 v4, s35
|
|
; SI-NEXT: v_mov_b32_e32 v5, s34
|
|
; SI-NEXT: .LBB100_1: ; %atomicrmw.start
|
|
; SI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: v_mov_b32_e32 v9, v1
|
|
; SI-NEXT: v_mov_b32_e32 v8, v0
|
|
; SI-NEXT: v_cmp_lt_u64_e32 vcc, s[34:35], v[8:9]
|
|
; SI-NEXT: v_cndmask_b32_e32 v7, v4, v9, vcc
|
|
; SI-NEXT: v_cndmask_b32_e32 v6, v5, v8, vcc
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: v_mov_b32_e32 v0, v6
|
|
; SI-NEXT: v_mov_b32_e32 v1, v7
|
|
; SI-NEXT: v_mov_b32_e32 v2, v8
|
|
; SI-NEXT: v_mov_b32_e32 v3, v9
|
|
; SI-NEXT: buffer_atomic_cmpswap_x2 v[0:3], off, s[4:7], 0 glc
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_wbinvl1
|
|
; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[8:9]
|
|
; SI-NEXT: s_or_b64 s[36:37], vcc, s[36:37]
|
|
; SI-NEXT: s_andn2_b64 exec, exec, s[36:37]
|
|
; SI-NEXT: s_cbranch_execnz .LBB100_1
|
|
; SI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; SI-NEXT: s_or_b64 exec, exec, s[36:37]
|
|
; SI-NEXT: v_readlane_b32 s7, v10, 1
|
|
; SI-NEXT: v_readlane_b32 s6, v10, 0
|
|
; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1
|
|
; SI-NEXT: buffer_load_dword v10, off, s[0:3], s32 ; 4-byte Folded Reload
|
|
; SI-NEXT: s_mov_b64 exec, s[34:35]
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0)
|
|
; SI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; VI-LABEL: global_atomic_umax_i64_ret_scalar:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; VI-NEXT: v_mov_b32_e32 v0, s4
|
|
; VI-NEXT: v_mov_b32_e32 v1, s5
|
|
; VI-NEXT: flat_load_dwordx2 v[0:1], v[0:1]
|
|
; VI-NEXT: v_mov_b32_e32 v2, s4
|
|
; VI-NEXT: s_mov_b64 s[34:35], 0
|
|
; VI-NEXT: v_mov_b32_e32 v4, s7
|
|
; VI-NEXT: v_mov_b32_e32 v5, s6
|
|
; VI-NEXT: v_mov_b32_e32 v3, s5
|
|
; VI-NEXT: .LBB100_1: ; %atomicrmw.start
|
|
; VI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: v_mov_b32_e32 v9, v1
|
|
; VI-NEXT: v_mov_b32_e32 v8, v0
|
|
; VI-NEXT: v_cmp_lt_u64_e32 vcc, s[6:7], v[8:9]
|
|
; VI-NEXT: v_cndmask_b32_e32 v7, v4, v9, vcc
|
|
; VI-NEXT: v_cndmask_b32_e32 v6, v5, v8, vcc
|
|
; VI-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[2:3], v[6:9] glc
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: buffer_wbinvl1_vol
|
|
; VI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[8:9]
|
|
; VI-NEXT: s_or_b64 s[34:35], vcc, s[34:35]
|
|
; VI-NEXT: s_andn2_b64 exec, exec, s[34:35]
|
|
; VI-NEXT: s_cbranch_execnz .LBB100_1
|
|
; VI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; VI-NEXT: s_or_b64 exec, exec, s[34:35]
|
|
; VI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX9-LABEL: global_atomic_umax_i64_ret_scalar:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: v_mov_b32_e32 v2, 0
|
|
; GFX9-NEXT: global_load_dwordx2 v[0:1], v2, s[4:5]
|
|
; GFX9-NEXT: s_mov_b64 s[34:35], 0
|
|
; GFX9-NEXT: v_mov_b32_e32 v3, s7
|
|
; GFX9-NEXT: v_mov_b32_e32 v4, s6
|
|
; GFX9-NEXT: .LBB100_1: ; %atomicrmw.start
|
|
; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: v_mov_b32_e32 v8, v1
|
|
; GFX9-NEXT: v_mov_b32_e32 v7, v0
|
|
; GFX9-NEXT: v_cmp_lt_u64_e32 vcc, s[6:7], v[7:8]
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v6, v3, v8, vcc
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v5, v4, v7, vcc
|
|
; GFX9-NEXT: global_atomic_cmpswap_x2 v[0:1], v2, v[5:8], s[4:5] glc
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: buffer_wbinvl1_vol
|
|
; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[7:8]
|
|
; GFX9-NEXT: s_or_b64 s[34:35], vcc, s[34:35]
|
|
; GFX9-NEXT: s_andn2_b64 exec, exec, s[34:35]
|
|
; GFX9-NEXT: s_cbranch_execnz .LBB100_1
|
|
; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; GFX9-NEXT: s_or_b64 exec, exec, s[34:35]
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
%result = atomicrmw umax ptr addrspace(1) %ptr, i64 %in seq_cst
|
|
ret i64 %result
|
|
}
|
|
|
|
define amdgpu_gfx i64 @global_atomic_umax_i64_ret_offset_scalar(ptr addrspace(1) inreg %out, i64 inreg %in) {
|
|
; SI-LABEL: global_atomic_umax_i64_ret_offset_scalar:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1
|
|
; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 ; 4-byte Folded Spill
|
|
; SI-NEXT: s_mov_b64 exec, s[34:35]
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: v_writelane_b32 v10, s6, 0
|
|
; SI-NEXT: v_writelane_b32 v10, s7, 1
|
|
; SI-NEXT: s_mov_b32 s35, s7
|
|
; SI-NEXT: s_mov_b32 s34, s6
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s6, -1
|
|
; SI-NEXT: buffer_load_dwordx2 v[0:1], off, s[4:7], 0 offset:32
|
|
; SI-NEXT: s_mov_b64 s[36:37], 0
|
|
; SI-NEXT: v_mov_b32_e32 v4, s35
|
|
; SI-NEXT: v_mov_b32_e32 v5, s34
|
|
; SI-NEXT: .LBB101_1: ; %atomicrmw.start
|
|
; SI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: v_mov_b32_e32 v9, v1
|
|
; SI-NEXT: v_mov_b32_e32 v8, v0
|
|
; SI-NEXT: v_cmp_lt_u64_e32 vcc, s[34:35], v[8:9]
|
|
; SI-NEXT: v_cndmask_b32_e32 v7, v4, v9, vcc
|
|
; SI-NEXT: v_cndmask_b32_e32 v6, v5, v8, vcc
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: v_mov_b32_e32 v0, v6
|
|
; SI-NEXT: v_mov_b32_e32 v1, v7
|
|
; SI-NEXT: v_mov_b32_e32 v2, v8
|
|
; SI-NEXT: v_mov_b32_e32 v3, v9
|
|
; SI-NEXT: buffer_atomic_cmpswap_x2 v[0:3], off, s[4:7], 0 offset:32 glc
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_wbinvl1
|
|
; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[8:9]
|
|
; SI-NEXT: s_or_b64 s[36:37], vcc, s[36:37]
|
|
; SI-NEXT: s_andn2_b64 exec, exec, s[36:37]
|
|
; SI-NEXT: s_cbranch_execnz .LBB101_1
|
|
; SI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; SI-NEXT: s_or_b64 exec, exec, s[36:37]
|
|
; SI-NEXT: v_readlane_b32 s7, v10, 1
|
|
; SI-NEXT: v_readlane_b32 s6, v10, 0
|
|
; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1
|
|
; SI-NEXT: buffer_load_dword v10, off, s[0:3], s32 ; 4-byte Folded Reload
|
|
; SI-NEXT: s_mov_b64 exec, s[34:35]
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0)
|
|
; SI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; VI-LABEL: global_atomic_umax_i64_ret_offset_scalar:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; VI-NEXT: s_add_u32 s34, s4, 32
|
|
; VI-NEXT: s_addc_u32 s35, s5, 0
|
|
; VI-NEXT: v_mov_b32_e32 v2, s34
|
|
; VI-NEXT: v_mov_b32_e32 v3, s35
|
|
; VI-NEXT: flat_load_dwordx2 v[0:1], v[2:3]
|
|
; VI-NEXT: s_mov_b64 s[34:35], 0
|
|
; VI-NEXT: v_mov_b32_e32 v4, s7
|
|
; VI-NEXT: v_mov_b32_e32 v5, s6
|
|
; VI-NEXT: .LBB101_1: ; %atomicrmw.start
|
|
; VI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: v_mov_b32_e32 v9, v1
|
|
; VI-NEXT: v_mov_b32_e32 v8, v0
|
|
; VI-NEXT: v_cmp_lt_u64_e32 vcc, s[6:7], v[8:9]
|
|
; VI-NEXT: v_cndmask_b32_e32 v7, v4, v9, vcc
|
|
; VI-NEXT: v_cndmask_b32_e32 v6, v5, v8, vcc
|
|
; VI-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[2:3], v[6:9] glc
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: buffer_wbinvl1_vol
|
|
; VI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[8:9]
|
|
; VI-NEXT: s_or_b64 s[34:35], vcc, s[34:35]
|
|
; VI-NEXT: s_andn2_b64 exec, exec, s[34:35]
|
|
; VI-NEXT: s_cbranch_execnz .LBB101_1
|
|
; VI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; VI-NEXT: s_or_b64 exec, exec, s[34:35]
|
|
; VI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX9-LABEL: global_atomic_umax_i64_ret_offset_scalar:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: v_mov_b32_e32 v2, 0
|
|
; GFX9-NEXT: global_load_dwordx2 v[0:1], v2, s[4:5] offset:32
|
|
; GFX9-NEXT: s_mov_b64 s[34:35], 0
|
|
; GFX9-NEXT: v_mov_b32_e32 v3, s7
|
|
; GFX9-NEXT: v_mov_b32_e32 v4, s6
|
|
; GFX9-NEXT: .LBB101_1: ; %atomicrmw.start
|
|
; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: v_mov_b32_e32 v8, v1
|
|
; GFX9-NEXT: v_mov_b32_e32 v7, v0
|
|
; GFX9-NEXT: v_cmp_lt_u64_e32 vcc, s[6:7], v[7:8]
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v6, v3, v8, vcc
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v5, v4, v7, vcc
|
|
; GFX9-NEXT: global_atomic_cmpswap_x2 v[0:1], v2, v[5:8], s[4:5] offset:32 glc
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: buffer_wbinvl1_vol
|
|
; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[7:8]
|
|
; GFX9-NEXT: s_or_b64 s[34:35], vcc, s[34:35]
|
|
; GFX9-NEXT: s_andn2_b64 exec, exec, s[34:35]
|
|
; GFX9-NEXT: s_cbranch_execnz .LBB101_1
|
|
; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; GFX9-NEXT: s_or_b64 exec, exec, s[34:35]
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
%gep = getelementptr i64, ptr addrspace(1) %out, i64 4
|
|
%result = atomicrmw umax ptr addrspace(1) %gep, i64 %in seq_cst
|
|
ret i64 %result
|
|
}
|
|
|
|
define amdgpu_kernel void @atomic_umax_i64_addr64_offset(ptr addrspace(1) %out, i64 %in, i64 %index) {
|
|
; SI-LABEL: atomic_umax_i64_addr64_offset:
|
|
; SI: ; %bb.0: ; %entry
|
|
; SI-NEXT: s_load_dwordx2 s[4:5], s[2:3], 0xd
|
|
; SI-NEXT: s_load_dwordx4 s[0:3], s[2:3], 0x9
|
|
; SI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; SI-NEXT: s_lshl_b64 s[4:5], s[4:5], 3
|
|
; SI-NEXT: s_add_u32 s4, s0, s4
|
|
; SI-NEXT: s_addc_u32 s5, s1, s5
|
|
; SI-NEXT: s_load_dwordx2 s[8:9], s[4:5], 0x8
|
|
; SI-NEXT: s_mov_b64 s[0:1], 0
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: v_mov_b32_e32 v4, s3
|
|
; SI-NEXT: v_mov_b32_e32 v5, s2
|
|
; SI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; SI-NEXT: v_mov_b32_e32 v2, s8
|
|
; SI-NEXT: v_mov_b32_e32 v3, s9
|
|
; SI-NEXT: s_mov_b32 s6, -1
|
|
; SI-NEXT: .LBB102_1: ; %atomicrmw.start
|
|
; SI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; SI-NEXT: v_cmp_lt_u64_e32 vcc, s[2:3], v[2:3]
|
|
; SI-NEXT: v_cndmask_b32_e32 v1, v4, v3, vcc
|
|
; SI-NEXT: v_cndmask_b32_e32 v0, v5, v2, vcc
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: v_mov_b32_e32 v9, v3
|
|
; SI-NEXT: v_mov_b32_e32 v8, v2
|
|
; SI-NEXT: v_mov_b32_e32 v7, v1
|
|
; SI-NEXT: v_mov_b32_e32 v6, v0
|
|
; SI-NEXT: buffer_atomic_cmpswap_x2 v[6:9], off, s[4:7], 0 offset:32 glc
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_wbinvl1
|
|
; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[6:7], v[2:3]
|
|
; SI-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
|
|
; SI-NEXT: v_mov_b32_e32 v2, v6
|
|
; SI-NEXT: v_mov_b32_e32 v3, v7
|
|
; SI-NEXT: s_andn2_b64 exec, exec, s[0:1]
|
|
; SI-NEXT: s_cbranch_execnz .LBB102_1
|
|
; SI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; SI-NEXT: s_endpgm
|
|
;
|
|
; VI-LABEL: atomic_umax_i64_addr64_offset:
|
|
; VI: ; %bb.0: ; %entry
|
|
; VI-NEXT: s_load_dwordx2 s[6:7], s[2:3], 0x34
|
|
; VI-NEXT: s_load_dwordx4 s[0:3], s[2:3], 0x24
|
|
; VI-NEXT: s_mov_b64 s[4:5], 0
|
|
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; VI-NEXT: s_lshl_b64 s[6:7], s[6:7], 3
|
|
; VI-NEXT: s_add_u32 s0, s0, s6
|
|
; VI-NEXT: s_addc_u32 s1, s1, s7
|
|
; VI-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x20
|
|
; VI-NEXT: s_add_u32 s0, s0, 32
|
|
; VI-NEXT: s_addc_u32 s1, s1, 0
|
|
; VI-NEXT: v_mov_b32_e32 v5, s1
|
|
; VI-NEXT: v_mov_b32_e32 v6, s3
|
|
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; VI-NEXT: v_mov_b32_e32 v2, s6
|
|
; VI-NEXT: v_mov_b32_e32 v7, s2
|
|
; VI-NEXT: v_mov_b32_e32 v3, s7
|
|
; VI-NEXT: v_mov_b32_e32 v4, s0
|
|
; VI-NEXT: .LBB102_1: ; %atomicrmw.start
|
|
; VI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; VI-NEXT: v_cmp_lt_u64_e32 vcc, s[2:3], v[2:3]
|
|
; VI-NEXT: v_cndmask_b32_e32 v1, v6, v3, vcc
|
|
; VI-NEXT: v_cndmask_b32_e32 v0, v7, v2, vcc
|
|
; VI-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: buffer_wbinvl1_vol
|
|
; VI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
|
|
; VI-NEXT: v_mov_b32_e32 v3, v1
|
|
; VI-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
|
|
; VI-NEXT: v_mov_b32_e32 v2, v0
|
|
; VI-NEXT: s_andn2_b64 exec, exec, s[4:5]
|
|
; VI-NEXT: s_cbranch_execnz .LBB102_1
|
|
; VI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; VI-NEXT: s_endpgm
|
|
;
|
|
; GFX9-LABEL: atomic_umax_i64_addr64_offset:
|
|
; GFX9: ; %bb.0: ; %entry
|
|
; GFX9-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x34
|
|
; GFX9-NEXT: s_load_dwordx4 s[4:7], s[2:3], 0x24
|
|
; GFX9-NEXT: s_mov_b64 s[2:3], 0
|
|
; GFX9-NEXT: v_mov_b32_e32 v6, 0
|
|
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX9-NEXT: s_lshl_b64 s[0:1], s[0:1], 3
|
|
; GFX9-NEXT: s_add_u32 s0, s4, s0
|
|
; GFX9-NEXT: s_addc_u32 s1, s5, s1
|
|
; GFX9-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x20
|
|
; GFX9-NEXT: v_mov_b32_e32 v4, s7
|
|
; GFX9-NEXT: v_mov_b32_e32 v5, s6
|
|
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX9-NEXT: v_mov_b32_e32 v2, s4
|
|
; GFX9-NEXT: v_mov_b32_e32 v3, s5
|
|
; GFX9-NEXT: .LBB102_1: ; %atomicrmw.start
|
|
; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; GFX9-NEXT: v_cmp_lt_u64_e32 vcc, s[6:7], v[2:3]
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v1, v4, v3, vcc
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v0, v5, v2, vcc
|
|
; GFX9-NEXT: global_atomic_cmpswap_x2 v[0:1], v6, v[0:3], s[0:1] offset:32 glc
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: buffer_wbinvl1_vol
|
|
; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
|
|
; GFX9-NEXT: v_mov_b32_e32 v3, v1
|
|
; GFX9-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
|
|
; GFX9-NEXT: v_mov_b32_e32 v2, v0
|
|
; GFX9-NEXT: s_andn2_b64 exec, exec, s[2:3]
|
|
; GFX9-NEXT: s_cbranch_execnz .LBB102_1
|
|
; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; GFX9-NEXT: s_endpgm
|
|
entry:
|
|
%ptr = getelementptr i64, ptr addrspace(1) %out, i64 %index
|
|
%gep = getelementptr i64, ptr addrspace(1) %ptr, i64 4
|
|
%tmp0 = atomicrmw umax ptr addrspace(1) %gep, i64 %in seq_cst
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @atomic_umax_i64_ret_addr64_offset(ptr addrspace(1) %out, ptr addrspace(1) %out2, i64 %in, i64 %index) {
|
|
; SI-LABEL: atomic_umax_i64_ret_addr64_offset:
|
|
; SI: ; %bb.0: ; %entry
|
|
; SI-NEXT: s_load_dwordx8 s[0:7], s[2:3], 0x9
|
|
; SI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; SI-NEXT: s_lshl_b64 s[6:7], s[6:7], 3
|
|
; SI-NEXT: s_add_u32 s8, s0, s6
|
|
; SI-NEXT: s_addc_u32 s9, s1, s7
|
|
; SI-NEXT: s_load_dwordx2 s[6:7], s[8:9], 0x8
|
|
; SI-NEXT: s_mov_b64 s[0:1], 0
|
|
; SI-NEXT: s_mov_b32 s11, 0xf000
|
|
; SI-NEXT: v_mov_b32_e32 v8, s5
|
|
; SI-NEXT: v_mov_b32_e32 v9, s4
|
|
; SI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; SI-NEXT: v_mov_b32_e32 v2, s6
|
|
; SI-NEXT: v_mov_b32_e32 v3, s7
|
|
; SI-NEXT: s_mov_b32 s10, -1
|
|
; SI-NEXT: .LBB103_1: ; %atomicrmw.start
|
|
; SI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; SI-NEXT: v_cmp_lt_u64_e32 vcc, s[4:5], v[2:3]
|
|
; SI-NEXT: v_cndmask_b32_e32 v1, v8, v3, vcc
|
|
; SI-NEXT: v_cndmask_b32_e32 v0, v9, v2, vcc
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: v_mov_b32_e32 v7, v3
|
|
; SI-NEXT: v_mov_b32_e32 v6, v2
|
|
; SI-NEXT: v_mov_b32_e32 v5, v1
|
|
; SI-NEXT: v_mov_b32_e32 v4, v0
|
|
; SI-NEXT: buffer_atomic_cmpswap_x2 v[4:7], off, s[8:11], 0 offset:32 glc
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_wbinvl1
|
|
; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[2:3]
|
|
; SI-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
|
|
; SI-NEXT: v_mov_b32_e32 v2, v4
|
|
; SI-NEXT: v_mov_b32_e32 v3, v5
|
|
; SI-NEXT: s_andn2_b64 exec, exec, s[0:1]
|
|
; SI-NEXT: s_cbranch_execnz .LBB103_1
|
|
; SI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; SI-NEXT: s_or_b64 exec, exec, s[0:1]
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s6, -1
|
|
; SI-NEXT: s_mov_b32 s4, s2
|
|
; SI-NEXT: s_mov_b32 s5, s3
|
|
; SI-NEXT: buffer_store_dwordx2 v[4:5], off, s[4:7], 0
|
|
; SI-NEXT: s_endpgm
|
|
;
|
|
; VI-LABEL: atomic_umax_i64_ret_addr64_offset:
|
|
; VI: ; %bb.0: ; %entry
|
|
; VI-NEXT: s_load_dwordx8 s[0:7], s[2:3], 0x24
|
|
; VI-NEXT: s_mov_b64 s[8:9], 0
|
|
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; VI-NEXT: s_lshl_b64 s[6:7], s[6:7], 3
|
|
; VI-NEXT: s_add_u32 s0, s0, s6
|
|
; VI-NEXT: s_addc_u32 s1, s1, s7
|
|
; VI-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x20
|
|
; VI-NEXT: s_add_u32 s0, s0, 32
|
|
; VI-NEXT: s_addc_u32 s1, s1, 0
|
|
; VI-NEXT: v_mov_b32_e32 v0, s0
|
|
; VI-NEXT: v_mov_b32_e32 v4, s5
|
|
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; VI-NEXT: v_mov_b32_e32 v2, s6
|
|
; VI-NEXT: v_mov_b32_e32 v5, s4
|
|
; VI-NEXT: v_mov_b32_e32 v3, s7
|
|
; VI-NEXT: v_mov_b32_e32 v1, s1
|
|
; VI-NEXT: .LBB103_1: ; %atomicrmw.start
|
|
; VI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; VI-NEXT: v_mov_b32_e32 v9, v3
|
|
; VI-NEXT: v_mov_b32_e32 v8, v2
|
|
; VI-NEXT: v_cmp_lt_u64_e32 vcc, s[4:5], v[8:9]
|
|
; VI-NEXT: v_cndmask_b32_e32 v7, v4, v9, vcc
|
|
; VI-NEXT: v_cndmask_b32_e32 v6, v5, v8, vcc
|
|
; VI-NEXT: flat_atomic_cmpswap_x2 v[2:3], v[0:1], v[6:9] glc
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: buffer_wbinvl1_vol
|
|
; VI-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[8:9]
|
|
; VI-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
|
|
; VI-NEXT: s_andn2_b64 exec, exec, s[8:9]
|
|
; VI-NEXT: s_cbranch_execnz .LBB103_1
|
|
; VI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; VI-NEXT: s_or_b64 exec, exec, s[8:9]
|
|
; VI-NEXT: v_mov_b32_e32 v0, s2
|
|
; VI-NEXT: v_mov_b32_e32 v1, s3
|
|
; VI-NEXT: flat_store_dwordx2 v[0:1], v[2:3]
|
|
; VI-NEXT: s_endpgm
|
|
;
|
|
; GFX9-LABEL: atomic_umax_i64_ret_addr64_offset:
|
|
; GFX9: ; %bb.0: ; %entry
|
|
; GFX9-NEXT: s_load_dwordx8 s[4:11], s[2:3], 0x24
|
|
; GFX9-NEXT: s_mov_b64 s[2:3], 0
|
|
; GFX9-NEXT: v_mov_b32_e32 v4, 0
|
|
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX9-NEXT: s_lshl_b64 s[0:1], s[10:11], 3
|
|
; GFX9-NEXT: s_add_u32 s0, s4, s0
|
|
; GFX9-NEXT: s_addc_u32 s1, s5, s1
|
|
; GFX9-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x20
|
|
; GFX9-NEXT: v_mov_b32_e32 v2, s9
|
|
; GFX9-NEXT: v_mov_b32_e32 v3, s8
|
|
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX9-NEXT: v_mov_b32_e32 v0, s4
|
|
; GFX9-NEXT: v_mov_b32_e32 v1, s5
|
|
; GFX9-NEXT: .LBB103_1: ; %atomicrmw.start
|
|
; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; GFX9-NEXT: v_mov_b32_e32 v8, v1
|
|
; GFX9-NEXT: v_mov_b32_e32 v7, v0
|
|
; GFX9-NEXT: v_cmp_lt_u64_e32 vcc, s[8:9], v[7:8]
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v6, v2, v8, vcc
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v5, v3, v7, vcc
|
|
; GFX9-NEXT: global_atomic_cmpswap_x2 v[0:1], v4, v[5:8], s[0:1] offset:32 glc
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: buffer_wbinvl1_vol
|
|
; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[7:8]
|
|
; GFX9-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
|
|
; GFX9-NEXT: s_andn2_b64 exec, exec, s[2:3]
|
|
; GFX9-NEXT: s_cbranch_execnz .LBB103_1
|
|
; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; GFX9-NEXT: s_or_b64 exec, exec, s[2:3]
|
|
; GFX9-NEXT: v_mov_b32_e32 v2, 0
|
|
; GFX9-NEXT: global_store_dwordx2 v2, v[0:1], s[6:7]
|
|
; GFX9-NEXT: s_endpgm
|
|
entry:
|
|
%ptr = getelementptr i64, ptr addrspace(1) %out, i64 %index
|
|
%gep = getelementptr i64, ptr addrspace(1) %ptr, i64 4
|
|
%tmp0 = atomicrmw umax ptr addrspace(1) %gep, i64 %in seq_cst
|
|
store i64 %tmp0, ptr addrspace(1) %out2
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @atomic_umax_i64_ret_addr64(ptr addrspace(1) %out, ptr addrspace(1) %out2, i64 %in, i64 %index) {
|
|
; SI-LABEL: atomic_umax_i64_ret_addr64:
|
|
; SI: ; %bb.0: ; %entry
|
|
; SI-NEXT: s_load_dwordx8 s[0:7], s[2:3], 0x9
|
|
; SI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; SI-NEXT: s_lshl_b64 s[6:7], s[6:7], 3
|
|
; SI-NEXT: s_add_u32 s8, s0, s6
|
|
; SI-NEXT: s_addc_u32 s9, s1, s7
|
|
; SI-NEXT: s_load_dwordx2 s[6:7], s[8:9], 0x0
|
|
; SI-NEXT: s_mov_b64 s[0:1], 0
|
|
; SI-NEXT: s_mov_b32 s11, 0xf000
|
|
; SI-NEXT: v_mov_b32_e32 v8, s5
|
|
; SI-NEXT: v_mov_b32_e32 v9, s4
|
|
; SI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; SI-NEXT: v_mov_b32_e32 v2, s6
|
|
; SI-NEXT: v_mov_b32_e32 v3, s7
|
|
; SI-NEXT: s_mov_b32 s10, -1
|
|
; SI-NEXT: .LBB104_1: ; %atomicrmw.start
|
|
; SI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; SI-NEXT: v_cmp_lt_u64_e32 vcc, s[4:5], v[2:3]
|
|
; SI-NEXT: v_cndmask_b32_e32 v1, v8, v3, vcc
|
|
; SI-NEXT: v_cndmask_b32_e32 v0, v9, v2, vcc
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: v_mov_b32_e32 v7, v3
|
|
; SI-NEXT: v_mov_b32_e32 v6, v2
|
|
; SI-NEXT: v_mov_b32_e32 v5, v1
|
|
; SI-NEXT: v_mov_b32_e32 v4, v0
|
|
; SI-NEXT: buffer_atomic_cmpswap_x2 v[4:7], off, s[8:11], 0 glc
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_wbinvl1
|
|
; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[2:3]
|
|
; SI-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
|
|
; SI-NEXT: v_mov_b32_e32 v2, v4
|
|
; SI-NEXT: v_mov_b32_e32 v3, v5
|
|
; SI-NEXT: s_andn2_b64 exec, exec, s[0:1]
|
|
; SI-NEXT: s_cbranch_execnz .LBB104_1
|
|
; SI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; SI-NEXT: s_or_b64 exec, exec, s[0:1]
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s6, -1
|
|
; SI-NEXT: s_mov_b32 s4, s2
|
|
; SI-NEXT: s_mov_b32 s5, s3
|
|
; SI-NEXT: buffer_store_dwordx2 v[4:5], off, s[4:7], 0
|
|
; SI-NEXT: s_endpgm
|
|
;
|
|
; VI-LABEL: atomic_umax_i64_ret_addr64:
|
|
; VI: ; %bb.0: ; %entry
|
|
; VI-NEXT: s_load_dwordx8 s[0:7], s[2:3], 0x24
|
|
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; VI-NEXT: s_lshl_b64 s[6:7], s[6:7], 3
|
|
; VI-NEXT: s_add_u32 s6, s0, s6
|
|
; VI-NEXT: s_addc_u32 s7, s1, s7
|
|
; VI-NEXT: s_load_dwordx2 s[8:9], s[6:7], 0x0
|
|
; VI-NEXT: v_mov_b32_e32 v0, s6
|
|
; VI-NEXT: s_mov_b64 s[0:1], 0
|
|
; VI-NEXT: v_mov_b32_e32 v4, s5
|
|
; VI-NEXT: v_mov_b32_e32 v5, s4
|
|
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; VI-NEXT: v_mov_b32_e32 v2, s8
|
|
; VI-NEXT: v_mov_b32_e32 v3, s9
|
|
; VI-NEXT: v_mov_b32_e32 v1, s7
|
|
; VI-NEXT: .LBB104_1: ; %atomicrmw.start
|
|
; VI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; VI-NEXT: v_mov_b32_e32 v9, v3
|
|
; VI-NEXT: v_mov_b32_e32 v8, v2
|
|
; VI-NEXT: v_cmp_lt_u64_e32 vcc, s[4:5], v[8:9]
|
|
; VI-NEXT: v_cndmask_b32_e32 v7, v4, v9, vcc
|
|
; VI-NEXT: v_cndmask_b32_e32 v6, v5, v8, vcc
|
|
; VI-NEXT: flat_atomic_cmpswap_x2 v[2:3], v[0:1], v[6:9] glc
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: buffer_wbinvl1_vol
|
|
; VI-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[8:9]
|
|
; VI-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
|
|
; VI-NEXT: s_andn2_b64 exec, exec, s[0:1]
|
|
; VI-NEXT: s_cbranch_execnz .LBB104_1
|
|
; VI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; VI-NEXT: s_or_b64 exec, exec, s[0:1]
|
|
; VI-NEXT: v_mov_b32_e32 v0, s2
|
|
; VI-NEXT: v_mov_b32_e32 v1, s3
|
|
; VI-NEXT: flat_store_dwordx2 v[0:1], v[2:3]
|
|
; VI-NEXT: s_endpgm
|
|
;
|
|
; GFX9-LABEL: atomic_umax_i64_ret_addr64:
|
|
; GFX9: ; %bb.0: ; %entry
|
|
; GFX9-NEXT: s_load_dwordx8 s[4:11], s[2:3], 0x24
|
|
; GFX9-NEXT: s_mov_b64 s[2:3], 0
|
|
; GFX9-NEXT: v_mov_b32_e32 v4, 0
|
|
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX9-NEXT: s_lshl_b64 s[0:1], s[10:11], 3
|
|
; GFX9-NEXT: s_add_u32 s0, s4, s0
|
|
; GFX9-NEXT: s_addc_u32 s1, s5, s1
|
|
; GFX9-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
|
|
; GFX9-NEXT: v_mov_b32_e32 v2, s9
|
|
; GFX9-NEXT: v_mov_b32_e32 v3, s8
|
|
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX9-NEXT: v_mov_b32_e32 v0, s4
|
|
; GFX9-NEXT: v_mov_b32_e32 v1, s5
|
|
; GFX9-NEXT: .LBB104_1: ; %atomicrmw.start
|
|
; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; GFX9-NEXT: v_mov_b32_e32 v8, v1
|
|
; GFX9-NEXT: v_mov_b32_e32 v7, v0
|
|
; GFX9-NEXT: v_cmp_lt_u64_e32 vcc, s[8:9], v[7:8]
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v6, v2, v8, vcc
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v5, v3, v7, vcc
|
|
; GFX9-NEXT: global_atomic_cmpswap_x2 v[0:1], v4, v[5:8], s[0:1] glc
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: buffer_wbinvl1_vol
|
|
; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[7:8]
|
|
; GFX9-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
|
|
; GFX9-NEXT: s_andn2_b64 exec, exec, s[2:3]
|
|
; GFX9-NEXT: s_cbranch_execnz .LBB104_1
|
|
; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; GFX9-NEXT: s_or_b64 exec, exec, s[2:3]
|
|
; GFX9-NEXT: v_mov_b32_e32 v2, 0
|
|
; GFX9-NEXT: global_store_dwordx2 v2, v[0:1], s[6:7]
|
|
; GFX9-NEXT: s_endpgm
|
|
entry:
|
|
%ptr = getelementptr i64, ptr addrspace(1) %out, i64 %index
|
|
%tmp0 = atomicrmw umax ptr addrspace(1) %ptr, i64 %in seq_cst
|
|
store i64 %tmp0, ptr addrspace(1) %out2
|
|
ret void
|
|
}
|
|
|
|
define void @global_atomic_umax_i64_noret_offset__amdgpu_no_remote_memory(ptr addrspace(1) %out, i64 %in) {
|
|
; SI-LABEL: global_atomic_umax_i64_noret_offset__amdgpu_no_remote_memory:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; SI-NEXT: s_mov_b32 s6, 0
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s4, s6
|
|
; SI-NEXT: s_mov_b32 s5, s6
|
|
; SI-NEXT: buffer_load_dwordx2 v[6:7], v[0:1], s[4:7], 0 addr64 offset:32
|
|
; SI-NEXT: s_mov_b64 s[8:9], 0
|
|
; SI-NEXT: .LBB105_1: ; %atomicrmw.start
|
|
; SI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: v_cmp_gt_u64_e32 vcc, v[6:7], v[2:3]
|
|
; SI-NEXT: v_cndmask_b32_e32 v5, v3, v7, vcc
|
|
; SI-NEXT: v_cndmask_b32_e32 v4, v2, v6, vcc
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: v_mov_b32_e32 v11, v7
|
|
; SI-NEXT: v_mov_b32_e32 v10, v6
|
|
; SI-NEXT: v_mov_b32_e32 v9, v5
|
|
; SI-NEXT: v_mov_b32_e32 v8, v4
|
|
; SI-NEXT: buffer_atomic_cmpswap_x2 v[8:11], v[0:1], s[4:7], 0 addr64 offset:32 glc
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_wbinvl1
|
|
; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[8:9], v[6:7]
|
|
; SI-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
|
|
; SI-NEXT: v_mov_b32_e32 v6, v8
|
|
; SI-NEXT: v_mov_b32_e32 v7, v9
|
|
; SI-NEXT: s_andn2_b64 exec, exec, s[8:9]
|
|
; SI-NEXT: s_cbranch_execnz .LBB105_1
|
|
; SI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; SI-NEXT: s_or_b64 exec, exec, s[8:9]
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; VI-LABEL: global_atomic_umax_i64_noret_offset__amdgpu_no_remote_memory:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; VI-NEXT: v_add_u32_e32 v0, vcc, 32, v0
|
|
; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
|
|
; VI-NEXT: flat_load_dwordx2 v[6:7], v[0:1]
|
|
; VI-NEXT: s_mov_b64 s[4:5], 0
|
|
; VI-NEXT: .LBB105_1: ; %atomicrmw.start
|
|
; VI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: v_cmp_gt_u64_e32 vcc, v[6:7], v[2:3]
|
|
; VI-NEXT: v_cndmask_b32_e32 v5, v3, v7, vcc
|
|
; VI-NEXT: v_cndmask_b32_e32 v4, v2, v6, vcc
|
|
; VI-NEXT: flat_atomic_cmpswap_x2 v[4:5], v[0:1], v[4:7] glc
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: buffer_wbinvl1_vol
|
|
; VI-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[6:7]
|
|
; VI-NEXT: v_mov_b32_e32 v7, v5
|
|
; VI-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
|
|
; VI-NEXT: v_mov_b32_e32 v6, v4
|
|
; VI-NEXT: s_andn2_b64 exec, exec, s[4:5]
|
|
; VI-NEXT: s_cbranch_execnz .LBB105_1
|
|
; VI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; VI-NEXT: s_or_b64 exec, exec, s[4:5]
|
|
; VI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX9-LABEL: global_atomic_umax_i64_noret_offset__amdgpu_no_remote_memory:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: global_load_dwordx2 v[6:7], v[0:1], off offset:32
|
|
; GFX9-NEXT: s_mov_b64 s[4:5], 0
|
|
; GFX9-NEXT: .LBB105_1: ; %atomicrmw.start
|
|
; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: v_cmp_gt_u64_e32 vcc, v[6:7], v[2:3]
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v5, v3, v7, vcc
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v4, v2, v6, vcc
|
|
; GFX9-NEXT: global_atomic_cmpswap_x2 v[4:5], v[0:1], v[4:7], off offset:32 glc
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: buffer_wbinvl1_vol
|
|
; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[6:7]
|
|
; GFX9-NEXT: v_mov_b32_e32 v7, v5
|
|
; GFX9-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
|
|
; GFX9-NEXT: v_mov_b32_e32 v6, v4
|
|
; GFX9-NEXT: s_andn2_b64 exec, exec, s[4:5]
|
|
; GFX9-NEXT: s_cbranch_execnz .LBB105_1
|
|
; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; GFX9-NEXT: s_or_b64 exec, exec, s[4:5]
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
%gep = getelementptr i64, ptr addrspace(1) %out, i64 4
|
|
%tmp0 = atomicrmw umax ptr addrspace(1) %gep, i64 %in seq_cst, !amdgpu.no.remote.memory !0
|
|
ret void
|
|
}
|
|
|
|
define i64 @global_atomic_umax_i64_ret_offset__amdgpu_no_remote_memory(ptr addrspace(1) %out, i64 %in) {
|
|
; SI-LABEL: global_atomic_umax_i64_ret_offset__amdgpu_no_remote_memory:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; SI-NEXT: v_mov_b32_e32 v5, v3
|
|
; SI-NEXT: v_mov_b32_e32 v4, v2
|
|
; SI-NEXT: v_mov_b32_e32 v7, v1
|
|
; SI-NEXT: v_mov_b32_e32 v6, v0
|
|
; SI-NEXT: s_mov_b32 s6, 0
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s4, s6
|
|
; SI-NEXT: s_mov_b32 s5, s6
|
|
; SI-NEXT: buffer_load_dwordx2 v[0:1], v[6:7], s[4:7], 0 addr64 offset:32
|
|
; SI-NEXT: s_mov_b64 s[8:9], 0
|
|
; SI-NEXT: .LBB106_1: ; %atomicrmw.start
|
|
; SI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: v_mov_b32_e32 v11, v1
|
|
; SI-NEXT: v_mov_b32_e32 v10, v0
|
|
; SI-NEXT: v_cmp_gt_u64_e32 vcc, v[10:11], v[4:5]
|
|
; SI-NEXT: v_cndmask_b32_e32 v9, v5, v11, vcc
|
|
; SI-NEXT: v_cndmask_b32_e32 v8, v4, v10, vcc
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: v_mov_b32_e32 v0, v8
|
|
; SI-NEXT: v_mov_b32_e32 v1, v9
|
|
; SI-NEXT: v_mov_b32_e32 v2, v10
|
|
; SI-NEXT: v_mov_b32_e32 v3, v11
|
|
; SI-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v[6:7], s[4:7], 0 addr64 offset:32 glc
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_wbinvl1
|
|
; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[10:11]
|
|
; SI-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
|
|
; SI-NEXT: s_andn2_b64 exec, exec, s[8:9]
|
|
; SI-NEXT: s_cbranch_execnz .LBB106_1
|
|
; SI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; SI-NEXT: s_or_b64 exec, exec, s[8:9]
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; VI-LABEL: global_atomic_umax_i64_ret_offset__amdgpu_no_remote_memory:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; VI-NEXT: v_add_u32_e32 v4, vcc, 32, v0
|
|
; VI-NEXT: v_addc_u32_e32 v5, vcc, 0, v1, vcc
|
|
; VI-NEXT: flat_load_dwordx2 v[0:1], v[4:5]
|
|
; VI-NEXT: s_mov_b64 s[4:5], 0
|
|
; VI-NEXT: .LBB106_1: ; %atomicrmw.start
|
|
; VI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: v_mov_b32_e32 v9, v1
|
|
; VI-NEXT: v_mov_b32_e32 v8, v0
|
|
; VI-NEXT: v_cmp_gt_u64_e32 vcc, v[8:9], v[2:3]
|
|
; VI-NEXT: v_cndmask_b32_e32 v7, v3, v9, vcc
|
|
; VI-NEXT: v_cndmask_b32_e32 v6, v2, v8, vcc
|
|
; VI-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[6:9] glc
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: buffer_wbinvl1_vol
|
|
; VI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[8:9]
|
|
; VI-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
|
|
; VI-NEXT: s_andn2_b64 exec, exec, s[4:5]
|
|
; VI-NEXT: s_cbranch_execnz .LBB106_1
|
|
; VI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; VI-NEXT: s_or_b64 exec, exec, s[4:5]
|
|
; VI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX9-LABEL: global_atomic_umax_i64_ret_offset__amdgpu_no_remote_memory:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: global_load_dwordx2 v[4:5], v[0:1], off offset:32
|
|
; GFX9-NEXT: s_mov_b64 s[4:5], 0
|
|
; GFX9-NEXT: .LBB106_1: ; %atomicrmw.start
|
|
; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: v_mov_b32_e32 v7, v5
|
|
; GFX9-NEXT: v_mov_b32_e32 v6, v4
|
|
; GFX9-NEXT: v_cmp_gt_u64_e32 vcc, v[6:7], v[2:3]
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v5, v3, v7, vcc
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v4, v2, v6, vcc
|
|
; GFX9-NEXT: global_atomic_cmpswap_x2 v[4:5], v[0:1], v[4:7], off offset:32 glc
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: buffer_wbinvl1_vol
|
|
; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[6:7]
|
|
; GFX9-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
|
|
; GFX9-NEXT: s_andn2_b64 exec, exec, s[4:5]
|
|
; GFX9-NEXT: s_cbranch_execnz .LBB106_1
|
|
; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; GFX9-NEXT: s_or_b64 exec, exec, s[4:5]
|
|
; GFX9-NEXT: v_mov_b32_e32 v0, v4
|
|
; GFX9-NEXT: v_mov_b32_e32 v1, v5
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
%gep = getelementptr i64, ptr addrspace(1) %out, i64 4
|
|
%result = atomicrmw umax ptr addrspace(1) %gep, i64 %in seq_cst, !amdgpu.no.remote.memory !0
|
|
ret i64 %result
|
|
}
|
|
|
|
; ---------------------------------------------------------------------
|
|
; atomicrmw umin
|
|
; ---------------------------------------------------------------------
|
|
|
|
define void @global_atomic_umin_i64_noret(ptr addrspace(1) %ptr, i64 %in) {
|
|
; SI-LABEL: global_atomic_umin_i64_noret:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; SI-NEXT: s_mov_b32 s6, 0
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s4, s6
|
|
; SI-NEXT: s_mov_b32 s5, s6
|
|
; SI-NEXT: buffer_load_dwordx2 v[6:7], v[0:1], s[4:7], 0 addr64
|
|
; SI-NEXT: s_mov_b64 s[8:9], 0
|
|
; SI-NEXT: .LBB107_1: ; %atomicrmw.start
|
|
; SI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: v_cmp_le_u64_e32 vcc, v[6:7], v[2:3]
|
|
; SI-NEXT: v_cndmask_b32_e32 v5, v3, v7, vcc
|
|
; SI-NEXT: v_cndmask_b32_e32 v4, v2, v6, vcc
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: v_mov_b32_e32 v11, v7
|
|
; SI-NEXT: v_mov_b32_e32 v10, v6
|
|
; SI-NEXT: v_mov_b32_e32 v9, v5
|
|
; SI-NEXT: v_mov_b32_e32 v8, v4
|
|
; SI-NEXT: buffer_atomic_cmpswap_x2 v[8:11], v[0:1], s[4:7], 0 addr64 glc
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_wbinvl1
|
|
; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[8:9], v[6:7]
|
|
; SI-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
|
|
; SI-NEXT: v_mov_b32_e32 v6, v8
|
|
; SI-NEXT: v_mov_b32_e32 v7, v9
|
|
; SI-NEXT: s_andn2_b64 exec, exec, s[8:9]
|
|
; SI-NEXT: s_cbranch_execnz .LBB107_1
|
|
; SI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; SI-NEXT: s_or_b64 exec, exec, s[8:9]
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; VI-LABEL: global_atomic_umin_i64_noret:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; VI-NEXT: flat_load_dwordx2 v[6:7], v[0:1]
|
|
; VI-NEXT: s_mov_b64 s[4:5], 0
|
|
; VI-NEXT: .LBB107_1: ; %atomicrmw.start
|
|
; VI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: v_cmp_le_u64_e32 vcc, v[6:7], v[2:3]
|
|
; VI-NEXT: v_cndmask_b32_e32 v5, v3, v7, vcc
|
|
; VI-NEXT: v_cndmask_b32_e32 v4, v2, v6, vcc
|
|
; VI-NEXT: flat_atomic_cmpswap_x2 v[4:5], v[0:1], v[4:7] glc
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: buffer_wbinvl1_vol
|
|
; VI-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[6:7]
|
|
; VI-NEXT: v_mov_b32_e32 v7, v5
|
|
; VI-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
|
|
; VI-NEXT: v_mov_b32_e32 v6, v4
|
|
; VI-NEXT: s_andn2_b64 exec, exec, s[4:5]
|
|
; VI-NEXT: s_cbranch_execnz .LBB107_1
|
|
; VI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; VI-NEXT: s_or_b64 exec, exec, s[4:5]
|
|
; VI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX9-LABEL: global_atomic_umin_i64_noret:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: global_load_dwordx2 v[6:7], v[0:1], off
|
|
; GFX9-NEXT: s_mov_b64 s[4:5], 0
|
|
; GFX9-NEXT: .LBB107_1: ; %atomicrmw.start
|
|
; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: v_cmp_le_u64_e32 vcc, v[6:7], v[2:3]
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v5, v3, v7, vcc
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v4, v2, v6, vcc
|
|
; GFX9-NEXT: global_atomic_cmpswap_x2 v[4:5], v[0:1], v[4:7], off glc
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: buffer_wbinvl1_vol
|
|
; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[6:7]
|
|
; GFX9-NEXT: v_mov_b32_e32 v7, v5
|
|
; GFX9-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
|
|
; GFX9-NEXT: v_mov_b32_e32 v6, v4
|
|
; GFX9-NEXT: s_andn2_b64 exec, exec, s[4:5]
|
|
; GFX9-NEXT: s_cbranch_execnz .LBB107_1
|
|
; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; GFX9-NEXT: s_or_b64 exec, exec, s[4:5]
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
%tmp0 = atomicrmw umin ptr addrspace(1) %ptr, i64 %in seq_cst
|
|
ret void
|
|
}
|
|
|
|
define void @global_atomic_umin_i64_noret_offset(ptr addrspace(1) %out, i64 %in) {
|
|
; SI-LABEL: global_atomic_umin_i64_noret_offset:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; SI-NEXT: s_mov_b32 s6, 0
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s4, s6
|
|
; SI-NEXT: s_mov_b32 s5, s6
|
|
; SI-NEXT: buffer_load_dwordx2 v[6:7], v[0:1], s[4:7], 0 addr64 offset:32
|
|
; SI-NEXT: s_mov_b64 s[8:9], 0
|
|
; SI-NEXT: .LBB108_1: ; %atomicrmw.start
|
|
; SI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: v_cmp_le_u64_e32 vcc, v[6:7], v[2:3]
|
|
; SI-NEXT: v_cndmask_b32_e32 v5, v3, v7, vcc
|
|
; SI-NEXT: v_cndmask_b32_e32 v4, v2, v6, vcc
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: v_mov_b32_e32 v11, v7
|
|
; SI-NEXT: v_mov_b32_e32 v10, v6
|
|
; SI-NEXT: v_mov_b32_e32 v9, v5
|
|
; SI-NEXT: v_mov_b32_e32 v8, v4
|
|
; SI-NEXT: buffer_atomic_cmpswap_x2 v[8:11], v[0:1], s[4:7], 0 addr64 offset:32 glc
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_wbinvl1
|
|
; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[8:9], v[6:7]
|
|
; SI-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
|
|
; SI-NEXT: v_mov_b32_e32 v6, v8
|
|
; SI-NEXT: v_mov_b32_e32 v7, v9
|
|
; SI-NEXT: s_andn2_b64 exec, exec, s[8:9]
|
|
; SI-NEXT: s_cbranch_execnz .LBB108_1
|
|
; SI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; SI-NEXT: s_or_b64 exec, exec, s[8:9]
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; VI-LABEL: global_atomic_umin_i64_noret_offset:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; VI-NEXT: v_add_u32_e32 v0, vcc, 32, v0
|
|
; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
|
|
; VI-NEXT: flat_load_dwordx2 v[6:7], v[0:1]
|
|
; VI-NEXT: s_mov_b64 s[4:5], 0
|
|
; VI-NEXT: .LBB108_1: ; %atomicrmw.start
|
|
; VI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: v_cmp_le_u64_e32 vcc, v[6:7], v[2:3]
|
|
; VI-NEXT: v_cndmask_b32_e32 v5, v3, v7, vcc
|
|
; VI-NEXT: v_cndmask_b32_e32 v4, v2, v6, vcc
|
|
; VI-NEXT: flat_atomic_cmpswap_x2 v[4:5], v[0:1], v[4:7] glc
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: buffer_wbinvl1_vol
|
|
; VI-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[6:7]
|
|
; VI-NEXT: v_mov_b32_e32 v7, v5
|
|
; VI-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
|
|
; VI-NEXT: v_mov_b32_e32 v6, v4
|
|
; VI-NEXT: s_andn2_b64 exec, exec, s[4:5]
|
|
; VI-NEXT: s_cbranch_execnz .LBB108_1
|
|
; VI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; VI-NEXT: s_or_b64 exec, exec, s[4:5]
|
|
; VI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX9-LABEL: global_atomic_umin_i64_noret_offset:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: global_load_dwordx2 v[6:7], v[0:1], off offset:32
|
|
; GFX9-NEXT: s_mov_b64 s[4:5], 0
|
|
; GFX9-NEXT: .LBB108_1: ; %atomicrmw.start
|
|
; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: v_cmp_le_u64_e32 vcc, v[6:7], v[2:3]
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v5, v3, v7, vcc
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v4, v2, v6, vcc
|
|
; GFX9-NEXT: global_atomic_cmpswap_x2 v[4:5], v[0:1], v[4:7], off offset:32 glc
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: buffer_wbinvl1_vol
|
|
; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[6:7]
|
|
; GFX9-NEXT: v_mov_b32_e32 v7, v5
|
|
; GFX9-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
|
|
; GFX9-NEXT: v_mov_b32_e32 v6, v4
|
|
; GFX9-NEXT: s_andn2_b64 exec, exec, s[4:5]
|
|
; GFX9-NEXT: s_cbranch_execnz .LBB108_1
|
|
; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; GFX9-NEXT: s_or_b64 exec, exec, s[4:5]
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
%gep = getelementptr i64, ptr addrspace(1) %out, i64 4
|
|
%tmp0 = atomicrmw umin ptr addrspace(1) %gep, i64 %in seq_cst
|
|
ret void
|
|
}
|
|
|
|
define i64 @global_atomic_umin_i64_ret(ptr addrspace(1) %ptr, i64 %in) {
|
|
; SI-LABEL: global_atomic_umin_i64_ret:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; SI-NEXT: v_mov_b32_e32 v5, v3
|
|
; SI-NEXT: v_mov_b32_e32 v4, v2
|
|
; SI-NEXT: v_mov_b32_e32 v7, v1
|
|
; SI-NEXT: v_mov_b32_e32 v6, v0
|
|
; SI-NEXT: s_mov_b32 s6, 0
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s4, s6
|
|
; SI-NEXT: s_mov_b32 s5, s6
|
|
; SI-NEXT: buffer_load_dwordx2 v[0:1], v[6:7], s[4:7], 0 addr64
|
|
; SI-NEXT: s_mov_b64 s[8:9], 0
|
|
; SI-NEXT: .LBB109_1: ; %atomicrmw.start
|
|
; SI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: v_mov_b32_e32 v11, v1
|
|
; SI-NEXT: v_mov_b32_e32 v10, v0
|
|
; SI-NEXT: v_cmp_le_u64_e32 vcc, v[10:11], v[4:5]
|
|
; SI-NEXT: v_cndmask_b32_e32 v9, v5, v11, vcc
|
|
; SI-NEXT: v_cndmask_b32_e32 v8, v4, v10, vcc
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: v_mov_b32_e32 v0, v8
|
|
; SI-NEXT: v_mov_b32_e32 v1, v9
|
|
; SI-NEXT: v_mov_b32_e32 v2, v10
|
|
; SI-NEXT: v_mov_b32_e32 v3, v11
|
|
; SI-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v[6:7], s[4:7], 0 addr64 glc
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_wbinvl1
|
|
; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[10:11]
|
|
; SI-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
|
|
; SI-NEXT: s_andn2_b64 exec, exec, s[8:9]
|
|
; SI-NEXT: s_cbranch_execnz .LBB109_1
|
|
; SI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; SI-NEXT: s_or_b64 exec, exec, s[8:9]
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; VI-LABEL: global_atomic_umin_i64_ret:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; VI-NEXT: flat_load_dwordx2 v[4:5], v[0:1]
|
|
; VI-NEXT: s_mov_b64 s[4:5], 0
|
|
; VI-NEXT: .LBB109_1: ; %atomicrmw.start
|
|
; VI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: v_mov_b32_e32 v7, v5
|
|
; VI-NEXT: v_mov_b32_e32 v6, v4
|
|
; VI-NEXT: v_cmp_le_u64_e32 vcc, v[6:7], v[2:3]
|
|
; VI-NEXT: v_cndmask_b32_e32 v5, v3, v7, vcc
|
|
; VI-NEXT: v_cndmask_b32_e32 v4, v2, v6, vcc
|
|
; VI-NEXT: flat_atomic_cmpswap_x2 v[4:5], v[0:1], v[4:7] glc
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: buffer_wbinvl1_vol
|
|
; VI-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[6:7]
|
|
; VI-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
|
|
; VI-NEXT: s_andn2_b64 exec, exec, s[4:5]
|
|
; VI-NEXT: s_cbranch_execnz .LBB109_1
|
|
; VI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; VI-NEXT: s_or_b64 exec, exec, s[4:5]
|
|
; VI-NEXT: v_mov_b32_e32 v0, v4
|
|
; VI-NEXT: v_mov_b32_e32 v1, v5
|
|
; VI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX9-LABEL: global_atomic_umin_i64_ret:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: global_load_dwordx2 v[4:5], v[0:1], off
|
|
; GFX9-NEXT: s_mov_b64 s[4:5], 0
|
|
; GFX9-NEXT: .LBB109_1: ; %atomicrmw.start
|
|
; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: v_mov_b32_e32 v7, v5
|
|
; GFX9-NEXT: v_mov_b32_e32 v6, v4
|
|
; GFX9-NEXT: v_cmp_le_u64_e32 vcc, v[6:7], v[2:3]
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v5, v3, v7, vcc
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v4, v2, v6, vcc
|
|
; GFX9-NEXT: global_atomic_cmpswap_x2 v[4:5], v[0:1], v[4:7], off glc
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: buffer_wbinvl1_vol
|
|
; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[6:7]
|
|
; GFX9-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
|
|
; GFX9-NEXT: s_andn2_b64 exec, exec, s[4:5]
|
|
; GFX9-NEXT: s_cbranch_execnz .LBB109_1
|
|
; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; GFX9-NEXT: s_or_b64 exec, exec, s[4:5]
|
|
; GFX9-NEXT: v_mov_b32_e32 v0, v4
|
|
; GFX9-NEXT: v_mov_b32_e32 v1, v5
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
%result = atomicrmw umin ptr addrspace(1) %ptr, i64 %in seq_cst
|
|
ret i64 %result
|
|
}
|
|
|
|
define i64 @global_atomic_umin_i64_ret_offset(ptr addrspace(1) %out, i64 %in) {
|
|
; SI-LABEL: global_atomic_umin_i64_ret_offset:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; SI-NEXT: v_mov_b32_e32 v5, v3
|
|
; SI-NEXT: v_mov_b32_e32 v4, v2
|
|
; SI-NEXT: v_mov_b32_e32 v7, v1
|
|
; SI-NEXT: v_mov_b32_e32 v6, v0
|
|
; SI-NEXT: s_mov_b32 s6, 0
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s4, s6
|
|
; SI-NEXT: s_mov_b32 s5, s6
|
|
; SI-NEXT: buffer_load_dwordx2 v[0:1], v[6:7], s[4:7], 0 addr64 offset:32
|
|
; SI-NEXT: s_mov_b64 s[8:9], 0
|
|
; SI-NEXT: .LBB110_1: ; %atomicrmw.start
|
|
; SI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: v_mov_b32_e32 v11, v1
|
|
; SI-NEXT: v_mov_b32_e32 v10, v0
|
|
; SI-NEXT: v_cmp_le_u64_e32 vcc, v[10:11], v[4:5]
|
|
; SI-NEXT: v_cndmask_b32_e32 v9, v5, v11, vcc
|
|
; SI-NEXT: v_cndmask_b32_e32 v8, v4, v10, vcc
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: v_mov_b32_e32 v0, v8
|
|
; SI-NEXT: v_mov_b32_e32 v1, v9
|
|
; SI-NEXT: v_mov_b32_e32 v2, v10
|
|
; SI-NEXT: v_mov_b32_e32 v3, v11
|
|
; SI-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v[6:7], s[4:7], 0 addr64 offset:32 glc
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_wbinvl1
|
|
; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[10:11]
|
|
; SI-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
|
|
; SI-NEXT: s_andn2_b64 exec, exec, s[8:9]
|
|
; SI-NEXT: s_cbranch_execnz .LBB110_1
|
|
; SI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; SI-NEXT: s_or_b64 exec, exec, s[8:9]
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; VI-LABEL: global_atomic_umin_i64_ret_offset:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; VI-NEXT: v_add_u32_e32 v4, vcc, 32, v0
|
|
; VI-NEXT: v_addc_u32_e32 v5, vcc, 0, v1, vcc
|
|
; VI-NEXT: flat_load_dwordx2 v[0:1], v[4:5]
|
|
; VI-NEXT: s_mov_b64 s[4:5], 0
|
|
; VI-NEXT: .LBB110_1: ; %atomicrmw.start
|
|
; VI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: v_mov_b32_e32 v9, v1
|
|
; VI-NEXT: v_mov_b32_e32 v8, v0
|
|
; VI-NEXT: v_cmp_le_u64_e32 vcc, v[8:9], v[2:3]
|
|
; VI-NEXT: v_cndmask_b32_e32 v7, v3, v9, vcc
|
|
; VI-NEXT: v_cndmask_b32_e32 v6, v2, v8, vcc
|
|
; VI-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[6:9] glc
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: buffer_wbinvl1_vol
|
|
; VI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[8:9]
|
|
; VI-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
|
|
; VI-NEXT: s_andn2_b64 exec, exec, s[4:5]
|
|
; VI-NEXT: s_cbranch_execnz .LBB110_1
|
|
; VI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; VI-NEXT: s_or_b64 exec, exec, s[4:5]
|
|
; VI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX9-LABEL: global_atomic_umin_i64_ret_offset:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: global_load_dwordx2 v[4:5], v[0:1], off offset:32
|
|
; GFX9-NEXT: s_mov_b64 s[4:5], 0
|
|
; GFX9-NEXT: .LBB110_1: ; %atomicrmw.start
|
|
; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: v_mov_b32_e32 v7, v5
|
|
; GFX9-NEXT: v_mov_b32_e32 v6, v4
|
|
; GFX9-NEXT: v_cmp_le_u64_e32 vcc, v[6:7], v[2:3]
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v5, v3, v7, vcc
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v4, v2, v6, vcc
|
|
; GFX9-NEXT: global_atomic_cmpswap_x2 v[4:5], v[0:1], v[4:7], off offset:32 glc
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: buffer_wbinvl1_vol
|
|
; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[6:7]
|
|
; GFX9-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
|
|
; GFX9-NEXT: s_andn2_b64 exec, exec, s[4:5]
|
|
; GFX9-NEXT: s_cbranch_execnz .LBB110_1
|
|
; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; GFX9-NEXT: s_or_b64 exec, exec, s[4:5]
|
|
; GFX9-NEXT: v_mov_b32_e32 v0, v4
|
|
; GFX9-NEXT: v_mov_b32_e32 v1, v5
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
%gep = getelementptr i64, ptr addrspace(1) %out, i64 4
|
|
%result = atomicrmw umin ptr addrspace(1) %gep, i64 %in seq_cst
|
|
ret i64 %result
|
|
}
|
|
|
|
define amdgpu_gfx void @global_atomic_umin_i64_noret_scalar(ptr addrspace(1) inreg %ptr, i64 inreg %in) {
|
|
; SI-LABEL: global_atomic_umin_i64_noret_scalar:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1
|
|
; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 ; 4-byte Folded Spill
|
|
; SI-NEXT: s_mov_b64 exec, s[34:35]
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: v_writelane_b32 v10, s6, 0
|
|
; SI-NEXT: v_writelane_b32 v10, s7, 1
|
|
; SI-NEXT: s_mov_b32 s35, s7
|
|
; SI-NEXT: s_mov_b32 s34, s6
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s6, -1
|
|
; SI-NEXT: buffer_load_dwordx2 v[2:3], off, s[4:7], 0
|
|
; SI-NEXT: s_mov_b64 s[36:37], 0
|
|
; SI-NEXT: v_mov_b32_e32 v4, s35
|
|
; SI-NEXT: v_mov_b32_e32 v5, s34
|
|
; SI-NEXT: .LBB111_1: ; %atomicrmw.start
|
|
; SI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: v_cmp_ge_u64_e32 vcc, s[34:35], v[2:3]
|
|
; SI-NEXT: v_cndmask_b32_e32 v1, v4, v3, vcc
|
|
; SI-NEXT: v_cndmask_b32_e32 v0, v5, v2, vcc
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: v_mov_b32_e32 v9, v3
|
|
; SI-NEXT: v_mov_b32_e32 v8, v2
|
|
; SI-NEXT: v_mov_b32_e32 v7, v1
|
|
; SI-NEXT: v_mov_b32_e32 v6, v0
|
|
; SI-NEXT: buffer_atomic_cmpswap_x2 v[6:9], off, s[4:7], 0 glc
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_wbinvl1
|
|
; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[6:7], v[2:3]
|
|
; SI-NEXT: s_or_b64 s[36:37], vcc, s[36:37]
|
|
; SI-NEXT: v_mov_b32_e32 v2, v6
|
|
; SI-NEXT: v_mov_b32_e32 v3, v7
|
|
; SI-NEXT: s_andn2_b64 exec, exec, s[36:37]
|
|
; SI-NEXT: s_cbranch_execnz .LBB111_1
|
|
; SI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; SI-NEXT: s_or_b64 exec, exec, s[36:37]
|
|
; SI-NEXT: v_readlane_b32 s7, v10, 1
|
|
; SI-NEXT: v_readlane_b32 s6, v10, 0
|
|
; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1
|
|
; SI-NEXT: buffer_load_dword v10, off, s[0:3], s32 ; 4-byte Folded Reload
|
|
; SI-NEXT: s_mov_b64 exec, s[34:35]
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0)
|
|
; SI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; VI-LABEL: global_atomic_umin_i64_noret_scalar:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; VI-NEXT: v_mov_b32_e32 v0, s4
|
|
; VI-NEXT: v_mov_b32_e32 v1, s5
|
|
; VI-NEXT: flat_load_dwordx2 v[2:3], v[0:1]
|
|
; VI-NEXT: v_mov_b32_e32 v4, s4
|
|
; VI-NEXT: s_mov_b64 s[34:35], 0
|
|
; VI-NEXT: v_mov_b32_e32 v6, s7
|
|
; VI-NEXT: v_mov_b32_e32 v7, s6
|
|
; VI-NEXT: v_mov_b32_e32 v5, s5
|
|
; VI-NEXT: .LBB111_1: ; %atomicrmw.start
|
|
; VI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: v_cmp_ge_u64_e32 vcc, s[6:7], v[2:3]
|
|
; VI-NEXT: v_cndmask_b32_e32 v1, v6, v3, vcc
|
|
; VI-NEXT: v_cndmask_b32_e32 v0, v7, v2, vcc
|
|
; VI-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: buffer_wbinvl1_vol
|
|
; VI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
|
|
; VI-NEXT: v_mov_b32_e32 v3, v1
|
|
; VI-NEXT: s_or_b64 s[34:35], vcc, s[34:35]
|
|
; VI-NEXT: v_mov_b32_e32 v2, v0
|
|
; VI-NEXT: s_andn2_b64 exec, exec, s[34:35]
|
|
; VI-NEXT: s_cbranch_execnz .LBB111_1
|
|
; VI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; VI-NEXT: s_or_b64 exec, exec, s[34:35]
|
|
; VI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX9-LABEL: global_atomic_umin_i64_noret_scalar:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: v_mov_b32_e32 v4, 0
|
|
; GFX9-NEXT: global_load_dwordx2 v[2:3], v4, s[4:5]
|
|
; GFX9-NEXT: s_mov_b64 s[34:35], 0
|
|
; GFX9-NEXT: v_mov_b32_e32 v5, s7
|
|
; GFX9-NEXT: v_mov_b32_e32 v6, s6
|
|
; GFX9-NEXT: .LBB111_1: ; %atomicrmw.start
|
|
; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: v_cmp_ge_u64_e32 vcc, s[6:7], v[2:3]
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v1, v5, v3, vcc
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v0, v6, v2, vcc
|
|
; GFX9-NEXT: global_atomic_cmpswap_x2 v[0:1], v4, v[0:3], s[4:5] glc
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: buffer_wbinvl1_vol
|
|
; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
|
|
; GFX9-NEXT: v_mov_b32_e32 v3, v1
|
|
; GFX9-NEXT: s_or_b64 s[34:35], vcc, s[34:35]
|
|
; GFX9-NEXT: v_mov_b32_e32 v2, v0
|
|
; GFX9-NEXT: s_andn2_b64 exec, exec, s[34:35]
|
|
; GFX9-NEXT: s_cbranch_execnz .LBB111_1
|
|
; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; GFX9-NEXT: s_or_b64 exec, exec, s[34:35]
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
%tmp0 = atomicrmw umin ptr addrspace(1) %ptr, i64 %in seq_cst
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_gfx void @global_atomic_umin_i64_noret_offset_scalar(ptr addrspace(1) inreg %out, i64 inreg %in) {
|
|
; SI-LABEL: global_atomic_umin_i64_noret_offset_scalar:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1
|
|
; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 ; 4-byte Folded Spill
|
|
; SI-NEXT: s_mov_b64 exec, s[34:35]
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: v_writelane_b32 v10, s6, 0
|
|
; SI-NEXT: v_writelane_b32 v10, s7, 1
|
|
; SI-NEXT: s_mov_b32 s35, s7
|
|
; SI-NEXT: s_mov_b32 s34, s6
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s6, -1
|
|
; SI-NEXT: buffer_load_dwordx2 v[2:3], off, s[4:7], 0 offset:32
|
|
; SI-NEXT: s_mov_b64 s[36:37], 0
|
|
; SI-NEXT: v_mov_b32_e32 v4, s35
|
|
; SI-NEXT: v_mov_b32_e32 v5, s34
|
|
; SI-NEXT: .LBB112_1: ; %atomicrmw.start
|
|
; SI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: v_cmp_ge_u64_e32 vcc, s[34:35], v[2:3]
|
|
; SI-NEXT: v_cndmask_b32_e32 v1, v4, v3, vcc
|
|
; SI-NEXT: v_cndmask_b32_e32 v0, v5, v2, vcc
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: v_mov_b32_e32 v9, v3
|
|
; SI-NEXT: v_mov_b32_e32 v8, v2
|
|
; SI-NEXT: v_mov_b32_e32 v7, v1
|
|
; SI-NEXT: v_mov_b32_e32 v6, v0
|
|
; SI-NEXT: buffer_atomic_cmpswap_x2 v[6:9], off, s[4:7], 0 offset:32 glc
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_wbinvl1
|
|
; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[6:7], v[2:3]
|
|
; SI-NEXT: s_or_b64 s[36:37], vcc, s[36:37]
|
|
; SI-NEXT: v_mov_b32_e32 v2, v6
|
|
; SI-NEXT: v_mov_b32_e32 v3, v7
|
|
; SI-NEXT: s_andn2_b64 exec, exec, s[36:37]
|
|
; SI-NEXT: s_cbranch_execnz .LBB112_1
|
|
; SI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; SI-NEXT: s_or_b64 exec, exec, s[36:37]
|
|
; SI-NEXT: v_readlane_b32 s7, v10, 1
|
|
; SI-NEXT: v_readlane_b32 s6, v10, 0
|
|
; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1
|
|
; SI-NEXT: buffer_load_dword v10, off, s[0:3], s32 ; 4-byte Folded Reload
|
|
; SI-NEXT: s_mov_b64 exec, s[34:35]
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0)
|
|
; SI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; VI-LABEL: global_atomic_umin_i64_noret_offset_scalar:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; VI-NEXT: s_add_u32 s34, s4, 32
|
|
; VI-NEXT: s_addc_u32 s35, s5, 0
|
|
; VI-NEXT: v_mov_b32_e32 v4, s34
|
|
; VI-NEXT: v_mov_b32_e32 v5, s35
|
|
; VI-NEXT: flat_load_dwordx2 v[2:3], v[4:5]
|
|
; VI-NEXT: s_mov_b64 s[34:35], 0
|
|
; VI-NEXT: v_mov_b32_e32 v6, s7
|
|
; VI-NEXT: v_mov_b32_e32 v7, s6
|
|
; VI-NEXT: .LBB112_1: ; %atomicrmw.start
|
|
; VI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: v_cmp_ge_u64_e32 vcc, s[6:7], v[2:3]
|
|
; VI-NEXT: v_cndmask_b32_e32 v1, v6, v3, vcc
|
|
; VI-NEXT: v_cndmask_b32_e32 v0, v7, v2, vcc
|
|
; VI-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: buffer_wbinvl1_vol
|
|
; VI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
|
|
; VI-NEXT: v_mov_b32_e32 v3, v1
|
|
; VI-NEXT: s_or_b64 s[34:35], vcc, s[34:35]
|
|
; VI-NEXT: v_mov_b32_e32 v2, v0
|
|
; VI-NEXT: s_andn2_b64 exec, exec, s[34:35]
|
|
; VI-NEXT: s_cbranch_execnz .LBB112_1
|
|
; VI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; VI-NEXT: s_or_b64 exec, exec, s[34:35]
|
|
; VI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX9-LABEL: global_atomic_umin_i64_noret_offset_scalar:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: v_mov_b32_e32 v4, 0
|
|
; GFX9-NEXT: global_load_dwordx2 v[2:3], v4, s[4:5] offset:32
|
|
; GFX9-NEXT: s_mov_b64 s[34:35], 0
|
|
; GFX9-NEXT: v_mov_b32_e32 v5, s7
|
|
; GFX9-NEXT: v_mov_b32_e32 v6, s6
|
|
; GFX9-NEXT: .LBB112_1: ; %atomicrmw.start
|
|
; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: v_cmp_ge_u64_e32 vcc, s[6:7], v[2:3]
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v1, v5, v3, vcc
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v0, v6, v2, vcc
|
|
; GFX9-NEXT: global_atomic_cmpswap_x2 v[0:1], v4, v[0:3], s[4:5] offset:32 glc
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: buffer_wbinvl1_vol
|
|
; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
|
|
; GFX9-NEXT: v_mov_b32_e32 v3, v1
|
|
; GFX9-NEXT: s_or_b64 s[34:35], vcc, s[34:35]
|
|
; GFX9-NEXT: v_mov_b32_e32 v2, v0
|
|
; GFX9-NEXT: s_andn2_b64 exec, exec, s[34:35]
|
|
; GFX9-NEXT: s_cbranch_execnz .LBB112_1
|
|
; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; GFX9-NEXT: s_or_b64 exec, exec, s[34:35]
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
%gep = getelementptr i64, ptr addrspace(1) %out, i64 4
|
|
%tmp0 = atomicrmw umin ptr addrspace(1) %gep, i64 %in seq_cst
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_gfx i64 @global_atomic_umin_i64_ret_scalar(ptr addrspace(1) inreg %ptr, i64 inreg %in) {
|
|
; SI-LABEL: global_atomic_umin_i64_ret_scalar:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1
|
|
; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 ; 4-byte Folded Spill
|
|
; SI-NEXT: s_mov_b64 exec, s[34:35]
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: v_writelane_b32 v10, s6, 0
|
|
; SI-NEXT: v_writelane_b32 v10, s7, 1
|
|
; SI-NEXT: s_mov_b32 s35, s7
|
|
; SI-NEXT: s_mov_b32 s34, s6
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s6, -1
|
|
; SI-NEXT: buffer_load_dwordx2 v[0:1], off, s[4:7], 0
|
|
; SI-NEXT: s_mov_b64 s[36:37], 0
|
|
; SI-NEXT: v_mov_b32_e32 v4, s35
|
|
; SI-NEXT: v_mov_b32_e32 v5, s34
|
|
; SI-NEXT: .LBB113_1: ; %atomicrmw.start
|
|
; SI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: v_mov_b32_e32 v9, v1
|
|
; SI-NEXT: v_mov_b32_e32 v8, v0
|
|
; SI-NEXT: v_cmp_ge_u64_e32 vcc, s[34:35], v[8:9]
|
|
; SI-NEXT: v_cndmask_b32_e32 v7, v4, v9, vcc
|
|
; SI-NEXT: v_cndmask_b32_e32 v6, v5, v8, vcc
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: v_mov_b32_e32 v0, v6
|
|
; SI-NEXT: v_mov_b32_e32 v1, v7
|
|
; SI-NEXT: v_mov_b32_e32 v2, v8
|
|
; SI-NEXT: v_mov_b32_e32 v3, v9
|
|
; SI-NEXT: buffer_atomic_cmpswap_x2 v[0:3], off, s[4:7], 0 glc
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_wbinvl1
|
|
; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[8:9]
|
|
; SI-NEXT: s_or_b64 s[36:37], vcc, s[36:37]
|
|
; SI-NEXT: s_andn2_b64 exec, exec, s[36:37]
|
|
; SI-NEXT: s_cbranch_execnz .LBB113_1
|
|
; SI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; SI-NEXT: s_or_b64 exec, exec, s[36:37]
|
|
; SI-NEXT: v_readlane_b32 s7, v10, 1
|
|
; SI-NEXT: v_readlane_b32 s6, v10, 0
|
|
; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1
|
|
; SI-NEXT: buffer_load_dword v10, off, s[0:3], s32 ; 4-byte Folded Reload
|
|
; SI-NEXT: s_mov_b64 exec, s[34:35]
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0)
|
|
; SI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; VI-LABEL: global_atomic_umin_i64_ret_scalar:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; VI-NEXT: v_mov_b32_e32 v0, s4
|
|
; VI-NEXT: v_mov_b32_e32 v1, s5
|
|
; VI-NEXT: flat_load_dwordx2 v[0:1], v[0:1]
|
|
; VI-NEXT: v_mov_b32_e32 v2, s4
|
|
; VI-NEXT: s_mov_b64 s[34:35], 0
|
|
; VI-NEXT: v_mov_b32_e32 v4, s7
|
|
; VI-NEXT: v_mov_b32_e32 v5, s6
|
|
; VI-NEXT: v_mov_b32_e32 v3, s5
|
|
; VI-NEXT: .LBB113_1: ; %atomicrmw.start
|
|
; VI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: v_mov_b32_e32 v9, v1
|
|
; VI-NEXT: v_mov_b32_e32 v8, v0
|
|
; VI-NEXT: v_cmp_ge_u64_e32 vcc, s[6:7], v[8:9]
|
|
; VI-NEXT: v_cndmask_b32_e32 v7, v4, v9, vcc
|
|
; VI-NEXT: v_cndmask_b32_e32 v6, v5, v8, vcc
|
|
; VI-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[2:3], v[6:9] glc
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: buffer_wbinvl1_vol
|
|
; VI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[8:9]
|
|
; VI-NEXT: s_or_b64 s[34:35], vcc, s[34:35]
|
|
; VI-NEXT: s_andn2_b64 exec, exec, s[34:35]
|
|
; VI-NEXT: s_cbranch_execnz .LBB113_1
|
|
; VI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; VI-NEXT: s_or_b64 exec, exec, s[34:35]
|
|
; VI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX9-LABEL: global_atomic_umin_i64_ret_scalar:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: v_mov_b32_e32 v2, 0
|
|
; GFX9-NEXT: global_load_dwordx2 v[0:1], v2, s[4:5]
|
|
; GFX9-NEXT: s_mov_b64 s[34:35], 0
|
|
; GFX9-NEXT: v_mov_b32_e32 v3, s7
|
|
; GFX9-NEXT: v_mov_b32_e32 v4, s6
|
|
; GFX9-NEXT: .LBB113_1: ; %atomicrmw.start
|
|
; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: v_mov_b32_e32 v8, v1
|
|
; GFX9-NEXT: v_mov_b32_e32 v7, v0
|
|
; GFX9-NEXT: v_cmp_ge_u64_e32 vcc, s[6:7], v[7:8]
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v6, v3, v8, vcc
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v5, v4, v7, vcc
|
|
; GFX9-NEXT: global_atomic_cmpswap_x2 v[0:1], v2, v[5:8], s[4:5] glc
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: buffer_wbinvl1_vol
|
|
; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[7:8]
|
|
; GFX9-NEXT: s_or_b64 s[34:35], vcc, s[34:35]
|
|
; GFX9-NEXT: s_andn2_b64 exec, exec, s[34:35]
|
|
; GFX9-NEXT: s_cbranch_execnz .LBB113_1
|
|
; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; GFX9-NEXT: s_or_b64 exec, exec, s[34:35]
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
%result = atomicrmw umin ptr addrspace(1) %ptr, i64 %in seq_cst
|
|
ret i64 %result
|
|
}
|
|
|
|
define amdgpu_gfx i64 @global_atomic_umin_i64_ret_offset_scalar(ptr addrspace(1) inreg %out, i64 inreg %in) {
|
|
; SI-LABEL: global_atomic_umin_i64_ret_offset_scalar:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1
|
|
; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 ; 4-byte Folded Spill
|
|
; SI-NEXT: s_mov_b64 exec, s[34:35]
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: v_writelane_b32 v10, s6, 0
|
|
; SI-NEXT: v_writelane_b32 v10, s7, 1
|
|
; SI-NEXT: s_mov_b32 s35, s7
|
|
; SI-NEXT: s_mov_b32 s34, s6
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s6, -1
|
|
; SI-NEXT: buffer_load_dwordx2 v[0:1], off, s[4:7], 0 offset:32
|
|
; SI-NEXT: s_mov_b64 s[36:37], 0
|
|
; SI-NEXT: v_mov_b32_e32 v4, s35
|
|
; SI-NEXT: v_mov_b32_e32 v5, s34
|
|
; SI-NEXT: .LBB114_1: ; %atomicrmw.start
|
|
; SI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: v_mov_b32_e32 v9, v1
|
|
; SI-NEXT: v_mov_b32_e32 v8, v0
|
|
; SI-NEXT: v_cmp_ge_u64_e32 vcc, s[34:35], v[8:9]
|
|
; SI-NEXT: v_cndmask_b32_e32 v7, v4, v9, vcc
|
|
; SI-NEXT: v_cndmask_b32_e32 v6, v5, v8, vcc
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: v_mov_b32_e32 v0, v6
|
|
; SI-NEXT: v_mov_b32_e32 v1, v7
|
|
; SI-NEXT: v_mov_b32_e32 v2, v8
|
|
; SI-NEXT: v_mov_b32_e32 v3, v9
|
|
; SI-NEXT: buffer_atomic_cmpswap_x2 v[0:3], off, s[4:7], 0 offset:32 glc
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_wbinvl1
|
|
; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[8:9]
|
|
; SI-NEXT: s_or_b64 s[36:37], vcc, s[36:37]
|
|
; SI-NEXT: s_andn2_b64 exec, exec, s[36:37]
|
|
; SI-NEXT: s_cbranch_execnz .LBB114_1
|
|
; SI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; SI-NEXT: s_or_b64 exec, exec, s[36:37]
|
|
; SI-NEXT: v_readlane_b32 s7, v10, 1
|
|
; SI-NEXT: v_readlane_b32 s6, v10, 0
|
|
; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1
|
|
; SI-NEXT: buffer_load_dword v10, off, s[0:3], s32 ; 4-byte Folded Reload
|
|
; SI-NEXT: s_mov_b64 exec, s[34:35]
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0)
|
|
; SI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; VI-LABEL: global_atomic_umin_i64_ret_offset_scalar:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; VI-NEXT: s_add_u32 s34, s4, 32
|
|
; VI-NEXT: s_addc_u32 s35, s5, 0
|
|
; VI-NEXT: v_mov_b32_e32 v2, s34
|
|
; VI-NEXT: v_mov_b32_e32 v3, s35
|
|
; VI-NEXT: flat_load_dwordx2 v[0:1], v[2:3]
|
|
; VI-NEXT: s_mov_b64 s[34:35], 0
|
|
; VI-NEXT: v_mov_b32_e32 v4, s7
|
|
; VI-NEXT: v_mov_b32_e32 v5, s6
|
|
; VI-NEXT: .LBB114_1: ; %atomicrmw.start
|
|
; VI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: v_mov_b32_e32 v9, v1
|
|
; VI-NEXT: v_mov_b32_e32 v8, v0
|
|
; VI-NEXT: v_cmp_ge_u64_e32 vcc, s[6:7], v[8:9]
|
|
; VI-NEXT: v_cndmask_b32_e32 v7, v4, v9, vcc
|
|
; VI-NEXT: v_cndmask_b32_e32 v6, v5, v8, vcc
|
|
; VI-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[2:3], v[6:9] glc
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: buffer_wbinvl1_vol
|
|
; VI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[8:9]
|
|
; VI-NEXT: s_or_b64 s[34:35], vcc, s[34:35]
|
|
; VI-NEXT: s_andn2_b64 exec, exec, s[34:35]
|
|
; VI-NEXT: s_cbranch_execnz .LBB114_1
|
|
; VI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; VI-NEXT: s_or_b64 exec, exec, s[34:35]
|
|
; VI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX9-LABEL: global_atomic_umin_i64_ret_offset_scalar:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: v_mov_b32_e32 v2, 0
|
|
; GFX9-NEXT: global_load_dwordx2 v[0:1], v2, s[4:5] offset:32
|
|
; GFX9-NEXT: s_mov_b64 s[34:35], 0
|
|
; GFX9-NEXT: v_mov_b32_e32 v3, s7
|
|
; GFX9-NEXT: v_mov_b32_e32 v4, s6
|
|
; GFX9-NEXT: .LBB114_1: ; %atomicrmw.start
|
|
; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: v_mov_b32_e32 v8, v1
|
|
; GFX9-NEXT: v_mov_b32_e32 v7, v0
|
|
; GFX9-NEXT: v_cmp_ge_u64_e32 vcc, s[6:7], v[7:8]
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v6, v3, v8, vcc
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v5, v4, v7, vcc
|
|
; GFX9-NEXT: global_atomic_cmpswap_x2 v[0:1], v2, v[5:8], s[4:5] offset:32 glc
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: buffer_wbinvl1_vol
|
|
; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[7:8]
|
|
; GFX9-NEXT: s_or_b64 s[34:35], vcc, s[34:35]
|
|
; GFX9-NEXT: s_andn2_b64 exec, exec, s[34:35]
|
|
; GFX9-NEXT: s_cbranch_execnz .LBB114_1
|
|
; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; GFX9-NEXT: s_or_b64 exec, exec, s[34:35]
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
%gep = getelementptr i64, ptr addrspace(1) %out, i64 4
|
|
%result = atomicrmw umin ptr addrspace(1) %gep, i64 %in seq_cst
|
|
ret i64 %result
|
|
}
|
|
|
|
define void @global_atomic_umin_i64_noret_offset__amdgpu_no_remote_memory(ptr addrspace(1) %out, i64 %in) {
|
|
; SI-LABEL: global_atomic_umin_i64_noret_offset__amdgpu_no_remote_memory:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; SI-NEXT: s_mov_b32 s6, 0
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s4, s6
|
|
; SI-NEXT: s_mov_b32 s5, s6
|
|
; SI-NEXT: buffer_load_dwordx2 v[6:7], v[0:1], s[4:7], 0 addr64 offset:32
|
|
; SI-NEXT: s_mov_b64 s[8:9], 0
|
|
; SI-NEXT: .LBB115_1: ; %atomicrmw.start
|
|
; SI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: v_cmp_le_u64_e32 vcc, v[6:7], v[2:3]
|
|
; SI-NEXT: v_cndmask_b32_e32 v5, v3, v7, vcc
|
|
; SI-NEXT: v_cndmask_b32_e32 v4, v2, v6, vcc
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: v_mov_b32_e32 v11, v7
|
|
; SI-NEXT: v_mov_b32_e32 v10, v6
|
|
; SI-NEXT: v_mov_b32_e32 v9, v5
|
|
; SI-NEXT: v_mov_b32_e32 v8, v4
|
|
; SI-NEXT: buffer_atomic_cmpswap_x2 v[8:11], v[0:1], s[4:7], 0 addr64 offset:32 glc
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_wbinvl1
|
|
; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[8:9], v[6:7]
|
|
; SI-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
|
|
; SI-NEXT: v_mov_b32_e32 v6, v8
|
|
; SI-NEXT: v_mov_b32_e32 v7, v9
|
|
; SI-NEXT: s_andn2_b64 exec, exec, s[8:9]
|
|
; SI-NEXT: s_cbranch_execnz .LBB115_1
|
|
; SI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; SI-NEXT: s_or_b64 exec, exec, s[8:9]
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; VI-LABEL: global_atomic_umin_i64_noret_offset__amdgpu_no_remote_memory:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; VI-NEXT: v_add_u32_e32 v0, vcc, 32, v0
|
|
; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
|
|
; VI-NEXT: flat_load_dwordx2 v[6:7], v[0:1]
|
|
; VI-NEXT: s_mov_b64 s[4:5], 0
|
|
; VI-NEXT: .LBB115_1: ; %atomicrmw.start
|
|
; VI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: v_cmp_le_u64_e32 vcc, v[6:7], v[2:3]
|
|
; VI-NEXT: v_cndmask_b32_e32 v5, v3, v7, vcc
|
|
; VI-NEXT: v_cndmask_b32_e32 v4, v2, v6, vcc
|
|
; VI-NEXT: flat_atomic_cmpswap_x2 v[4:5], v[0:1], v[4:7] glc
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: buffer_wbinvl1_vol
|
|
; VI-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[6:7]
|
|
; VI-NEXT: v_mov_b32_e32 v7, v5
|
|
; VI-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
|
|
; VI-NEXT: v_mov_b32_e32 v6, v4
|
|
; VI-NEXT: s_andn2_b64 exec, exec, s[4:5]
|
|
; VI-NEXT: s_cbranch_execnz .LBB115_1
|
|
; VI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; VI-NEXT: s_or_b64 exec, exec, s[4:5]
|
|
; VI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX9-LABEL: global_atomic_umin_i64_noret_offset__amdgpu_no_remote_memory:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: global_load_dwordx2 v[6:7], v[0:1], off offset:32
|
|
; GFX9-NEXT: s_mov_b64 s[4:5], 0
|
|
; GFX9-NEXT: .LBB115_1: ; %atomicrmw.start
|
|
; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: v_cmp_le_u64_e32 vcc, v[6:7], v[2:3]
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v5, v3, v7, vcc
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v4, v2, v6, vcc
|
|
; GFX9-NEXT: global_atomic_cmpswap_x2 v[4:5], v[0:1], v[4:7], off offset:32 glc
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: buffer_wbinvl1_vol
|
|
; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[6:7]
|
|
; GFX9-NEXT: v_mov_b32_e32 v7, v5
|
|
; GFX9-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
|
|
; GFX9-NEXT: v_mov_b32_e32 v6, v4
|
|
; GFX9-NEXT: s_andn2_b64 exec, exec, s[4:5]
|
|
; GFX9-NEXT: s_cbranch_execnz .LBB115_1
|
|
; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; GFX9-NEXT: s_or_b64 exec, exec, s[4:5]
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
%gep = getelementptr i64, ptr addrspace(1) %out, i64 4
|
|
%tmp0 = atomicrmw umin ptr addrspace(1) %gep, i64 %in seq_cst, !amdgpu.no.remote.memory !0
|
|
ret void
|
|
}
|
|
|
|
define i64 @global_atomic_umin_i64_ret_offset__amdgpu_no_remote_memory(ptr addrspace(1) %out, i64 %in) {
|
|
; SI-LABEL: global_atomic_umin_i64_ret_offset__amdgpu_no_remote_memory:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; SI-NEXT: v_mov_b32_e32 v5, v3
|
|
; SI-NEXT: v_mov_b32_e32 v4, v2
|
|
; SI-NEXT: v_mov_b32_e32 v7, v1
|
|
; SI-NEXT: v_mov_b32_e32 v6, v0
|
|
; SI-NEXT: s_mov_b32 s6, 0
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s4, s6
|
|
; SI-NEXT: s_mov_b32 s5, s6
|
|
; SI-NEXT: buffer_load_dwordx2 v[0:1], v[6:7], s[4:7], 0 addr64 offset:32
|
|
; SI-NEXT: s_mov_b64 s[8:9], 0
|
|
; SI-NEXT: .LBB116_1: ; %atomicrmw.start
|
|
; SI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: v_mov_b32_e32 v11, v1
|
|
; SI-NEXT: v_mov_b32_e32 v10, v0
|
|
; SI-NEXT: v_cmp_le_u64_e32 vcc, v[10:11], v[4:5]
|
|
; SI-NEXT: v_cndmask_b32_e32 v9, v5, v11, vcc
|
|
; SI-NEXT: v_cndmask_b32_e32 v8, v4, v10, vcc
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: v_mov_b32_e32 v0, v8
|
|
; SI-NEXT: v_mov_b32_e32 v1, v9
|
|
; SI-NEXT: v_mov_b32_e32 v2, v10
|
|
; SI-NEXT: v_mov_b32_e32 v3, v11
|
|
; SI-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v[6:7], s[4:7], 0 addr64 offset:32 glc
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_wbinvl1
|
|
; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[10:11]
|
|
; SI-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
|
|
; SI-NEXT: s_andn2_b64 exec, exec, s[8:9]
|
|
; SI-NEXT: s_cbranch_execnz .LBB116_1
|
|
; SI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; SI-NEXT: s_or_b64 exec, exec, s[8:9]
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; VI-LABEL: global_atomic_umin_i64_ret_offset__amdgpu_no_remote_memory:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; VI-NEXT: v_add_u32_e32 v4, vcc, 32, v0
|
|
; VI-NEXT: v_addc_u32_e32 v5, vcc, 0, v1, vcc
|
|
; VI-NEXT: flat_load_dwordx2 v[0:1], v[4:5]
|
|
; VI-NEXT: s_mov_b64 s[4:5], 0
|
|
; VI-NEXT: .LBB116_1: ; %atomicrmw.start
|
|
; VI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: v_mov_b32_e32 v9, v1
|
|
; VI-NEXT: v_mov_b32_e32 v8, v0
|
|
; VI-NEXT: v_cmp_le_u64_e32 vcc, v[8:9], v[2:3]
|
|
; VI-NEXT: v_cndmask_b32_e32 v7, v3, v9, vcc
|
|
; VI-NEXT: v_cndmask_b32_e32 v6, v2, v8, vcc
|
|
; VI-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[6:9] glc
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: buffer_wbinvl1_vol
|
|
; VI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[8:9]
|
|
; VI-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
|
|
; VI-NEXT: s_andn2_b64 exec, exec, s[4:5]
|
|
; VI-NEXT: s_cbranch_execnz .LBB116_1
|
|
; VI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; VI-NEXT: s_or_b64 exec, exec, s[4:5]
|
|
; VI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX9-LABEL: global_atomic_umin_i64_ret_offset__amdgpu_no_remote_memory:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: global_load_dwordx2 v[4:5], v[0:1], off offset:32
|
|
; GFX9-NEXT: s_mov_b64 s[4:5], 0
|
|
; GFX9-NEXT: .LBB116_1: ; %atomicrmw.start
|
|
; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: v_mov_b32_e32 v7, v5
|
|
; GFX9-NEXT: v_mov_b32_e32 v6, v4
|
|
; GFX9-NEXT: v_cmp_le_u64_e32 vcc, v[6:7], v[2:3]
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v5, v3, v7, vcc
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v4, v2, v6, vcc
|
|
; GFX9-NEXT: global_atomic_cmpswap_x2 v[4:5], v[0:1], v[4:7], off offset:32 glc
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: buffer_wbinvl1_vol
|
|
; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[6:7]
|
|
; GFX9-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
|
|
; GFX9-NEXT: s_andn2_b64 exec, exec, s[4:5]
|
|
; GFX9-NEXT: s_cbranch_execnz .LBB116_1
|
|
; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; GFX9-NEXT: s_or_b64 exec, exec, s[4:5]
|
|
; GFX9-NEXT: v_mov_b32_e32 v0, v4
|
|
; GFX9-NEXT: v_mov_b32_e32 v1, v5
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
%gep = getelementptr i64, ptr addrspace(1) %out, i64 4
|
|
%result = atomicrmw umin ptr addrspace(1) %gep, i64 %in seq_cst, !amdgpu.no.remote.memory !0
|
|
ret i64 %result
|
|
}
|
|
|
|
; ---------------------------------------------------------------------
|
|
; atomicrmw min
|
|
; ---------------------------------------------------------------------
|
|
|
|
define void @global_atomic_min_i64_noret(ptr addrspace(1) %ptr, i64 %in) {
|
|
; SI-LABEL: global_atomic_min_i64_noret:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; SI-NEXT: s_mov_b32 s6, 0
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s4, s6
|
|
; SI-NEXT: s_mov_b32 s5, s6
|
|
; SI-NEXT: buffer_load_dwordx2 v[6:7], v[0:1], s[4:7], 0 addr64
|
|
; SI-NEXT: s_mov_b64 s[8:9], 0
|
|
; SI-NEXT: .LBB117_1: ; %atomicrmw.start
|
|
; SI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: v_cmp_le_i64_e32 vcc, v[6:7], v[2:3]
|
|
; SI-NEXT: v_cndmask_b32_e32 v5, v3, v7, vcc
|
|
; SI-NEXT: v_cndmask_b32_e32 v4, v2, v6, vcc
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: v_mov_b32_e32 v11, v7
|
|
; SI-NEXT: v_mov_b32_e32 v10, v6
|
|
; SI-NEXT: v_mov_b32_e32 v9, v5
|
|
; SI-NEXT: v_mov_b32_e32 v8, v4
|
|
; SI-NEXT: buffer_atomic_cmpswap_x2 v[8:11], v[0:1], s[4:7], 0 addr64 glc
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_wbinvl1
|
|
; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[8:9], v[6:7]
|
|
; SI-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
|
|
; SI-NEXT: v_mov_b32_e32 v6, v8
|
|
; SI-NEXT: v_mov_b32_e32 v7, v9
|
|
; SI-NEXT: s_andn2_b64 exec, exec, s[8:9]
|
|
; SI-NEXT: s_cbranch_execnz .LBB117_1
|
|
; SI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; SI-NEXT: s_or_b64 exec, exec, s[8:9]
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; VI-LABEL: global_atomic_min_i64_noret:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; VI-NEXT: flat_load_dwordx2 v[6:7], v[0:1]
|
|
; VI-NEXT: s_mov_b64 s[4:5], 0
|
|
; VI-NEXT: .LBB117_1: ; %atomicrmw.start
|
|
; VI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: v_cmp_le_i64_e32 vcc, v[6:7], v[2:3]
|
|
; VI-NEXT: v_cndmask_b32_e32 v5, v3, v7, vcc
|
|
; VI-NEXT: v_cndmask_b32_e32 v4, v2, v6, vcc
|
|
; VI-NEXT: flat_atomic_cmpswap_x2 v[4:5], v[0:1], v[4:7] glc
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: buffer_wbinvl1_vol
|
|
; VI-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[6:7]
|
|
; VI-NEXT: v_mov_b32_e32 v7, v5
|
|
; VI-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
|
|
; VI-NEXT: v_mov_b32_e32 v6, v4
|
|
; VI-NEXT: s_andn2_b64 exec, exec, s[4:5]
|
|
; VI-NEXT: s_cbranch_execnz .LBB117_1
|
|
; VI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; VI-NEXT: s_or_b64 exec, exec, s[4:5]
|
|
; VI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX9-LABEL: global_atomic_min_i64_noret:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: global_load_dwordx2 v[6:7], v[0:1], off
|
|
; GFX9-NEXT: s_mov_b64 s[4:5], 0
|
|
; GFX9-NEXT: .LBB117_1: ; %atomicrmw.start
|
|
; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: v_cmp_le_i64_e32 vcc, v[6:7], v[2:3]
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v5, v3, v7, vcc
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v4, v2, v6, vcc
|
|
; GFX9-NEXT: global_atomic_cmpswap_x2 v[4:5], v[0:1], v[4:7], off glc
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: buffer_wbinvl1_vol
|
|
; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[6:7]
|
|
; GFX9-NEXT: v_mov_b32_e32 v7, v5
|
|
; GFX9-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
|
|
; GFX9-NEXT: v_mov_b32_e32 v6, v4
|
|
; GFX9-NEXT: s_andn2_b64 exec, exec, s[4:5]
|
|
; GFX9-NEXT: s_cbranch_execnz .LBB117_1
|
|
; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; GFX9-NEXT: s_or_b64 exec, exec, s[4:5]
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
%tmp0 = atomicrmw min ptr addrspace(1) %ptr, i64 %in seq_cst
|
|
ret void
|
|
}
|
|
|
|
define void @global_atomic_min_i64_noret_offset(ptr addrspace(1) %out, i64 %in) {
|
|
; SI-LABEL: global_atomic_min_i64_noret_offset:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; SI-NEXT: s_mov_b32 s6, 0
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s4, s6
|
|
; SI-NEXT: s_mov_b32 s5, s6
|
|
; SI-NEXT: buffer_load_dwordx2 v[6:7], v[0:1], s[4:7], 0 addr64 offset:32
|
|
; SI-NEXT: s_mov_b64 s[8:9], 0
|
|
; SI-NEXT: .LBB118_1: ; %atomicrmw.start
|
|
; SI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: v_cmp_le_i64_e32 vcc, v[6:7], v[2:3]
|
|
; SI-NEXT: v_cndmask_b32_e32 v5, v3, v7, vcc
|
|
; SI-NEXT: v_cndmask_b32_e32 v4, v2, v6, vcc
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: v_mov_b32_e32 v11, v7
|
|
; SI-NEXT: v_mov_b32_e32 v10, v6
|
|
; SI-NEXT: v_mov_b32_e32 v9, v5
|
|
; SI-NEXT: v_mov_b32_e32 v8, v4
|
|
; SI-NEXT: buffer_atomic_cmpswap_x2 v[8:11], v[0:1], s[4:7], 0 addr64 offset:32 glc
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_wbinvl1
|
|
; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[8:9], v[6:7]
|
|
; SI-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
|
|
; SI-NEXT: v_mov_b32_e32 v6, v8
|
|
; SI-NEXT: v_mov_b32_e32 v7, v9
|
|
; SI-NEXT: s_andn2_b64 exec, exec, s[8:9]
|
|
; SI-NEXT: s_cbranch_execnz .LBB118_1
|
|
; SI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; SI-NEXT: s_or_b64 exec, exec, s[8:9]
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; VI-LABEL: global_atomic_min_i64_noret_offset:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; VI-NEXT: v_add_u32_e32 v0, vcc, 32, v0
|
|
; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
|
|
; VI-NEXT: flat_load_dwordx2 v[6:7], v[0:1]
|
|
; VI-NEXT: s_mov_b64 s[4:5], 0
|
|
; VI-NEXT: .LBB118_1: ; %atomicrmw.start
|
|
; VI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: v_cmp_le_i64_e32 vcc, v[6:7], v[2:3]
|
|
; VI-NEXT: v_cndmask_b32_e32 v5, v3, v7, vcc
|
|
; VI-NEXT: v_cndmask_b32_e32 v4, v2, v6, vcc
|
|
; VI-NEXT: flat_atomic_cmpswap_x2 v[4:5], v[0:1], v[4:7] glc
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: buffer_wbinvl1_vol
|
|
; VI-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[6:7]
|
|
; VI-NEXT: v_mov_b32_e32 v7, v5
|
|
; VI-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
|
|
; VI-NEXT: v_mov_b32_e32 v6, v4
|
|
; VI-NEXT: s_andn2_b64 exec, exec, s[4:5]
|
|
; VI-NEXT: s_cbranch_execnz .LBB118_1
|
|
; VI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; VI-NEXT: s_or_b64 exec, exec, s[4:5]
|
|
; VI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX9-LABEL: global_atomic_min_i64_noret_offset:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: global_load_dwordx2 v[6:7], v[0:1], off offset:32
|
|
; GFX9-NEXT: s_mov_b64 s[4:5], 0
|
|
; GFX9-NEXT: .LBB118_1: ; %atomicrmw.start
|
|
; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: v_cmp_le_i64_e32 vcc, v[6:7], v[2:3]
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v5, v3, v7, vcc
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v4, v2, v6, vcc
|
|
; GFX9-NEXT: global_atomic_cmpswap_x2 v[4:5], v[0:1], v[4:7], off offset:32 glc
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: buffer_wbinvl1_vol
|
|
; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[6:7]
|
|
; GFX9-NEXT: v_mov_b32_e32 v7, v5
|
|
; GFX9-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
|
|
; GFX9-NEXT: v_mov_b32_e32 v6, v4
|
|
; GFX9-NEXT: s_andn2_b64 exec, exec, s[4:5]
|
|
; GFX9-NEXT: s_cbranch_execnz .LBB118_1
|
|
; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; GFX9-NEXT: s_or_b64 exec, exec, s[4:5]
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
%gep = getelementptr i64, ptr addrspace(1) %out, i64 4
|
|
%tmp0 = atomicrmw min ptr addrspace(1) %gep, i64 %in seq_cst
|
|
ret void
|
|
}
|
|
|
|
define i64 @global_atomic_min_i64_ret(ptr addrspace(1) %ptr, i64 %in) {
|
|
; SI-LABEL: global_atomic_min_i64_ret:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; SI-NEXT: v_mov_b32_e32 v5, v3
|
|
; SI-NEXT: v_mov_b32_e32 v4, v2
|
|
; SI-NEXT: v_mov_b32_e32 v7, v1
|
|
; SI-NEXT: v_mov_b32_e32 v6, v0
|
|
; SI-NEXT: s_mov_b32 s6, 0
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s4, s6
|
|
; SI-NEXT: s_mov_b32 s5, s6
|
|
; SI-NEXT: buffer_load_dwordx2 v[0:1], v[6:7], s[4:7], 0 addr64
|
|
; SI-NEXT: s_mov_b64 s[8:9], 0
|
|
; SI-NEXT: .LBB119_1: ; %atomicrmw.start
|
|
; SI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: v_mov_b32_e32 v11, v1
|
|
; SI-NEXT: v_mov_b32_e32 v10, v0
|
|
; SI-NEXT: v_cmp_le_i64_e32 vcc, v[10:11], v[4:5]
|
|
; SI-NEXT: v_cndmask_b32_e32 v9, v5, v11, vcc
|
|
; SI-NEXT: v_cndmask_b32_e32 v8, v4, v10, vcc
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: v_mov_b32_e32 v0, v8
|
|
; SI-NEXT: v_mov_b32_e32 v1, v9
|
|
; SI-NEXT: v_mov_b32_e32 v2, v10
|
|
; SI-NEXT: v_mov_b32_e32 v3, v11
|
|
; SI-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v[6:7], s[4:7], 0 addr64 glc
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_wbinvl1
|
|
; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[10:11]
|
|
; SI-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
|
|
; SI-NEXT: s_andn2_b64 exec, exec, s[8:9]
|
|
; SI-NEXT: s_cbranch_execnz .LBB119_1
|
|
; SI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; SI-NEXT: s_or_b64 exec, exec, s[8:9]
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; VI-LABEL: global_atomic_min_i64_ret:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; VI-NEXT: flat_load_dwordx2 v[4:5], v[0:1]
|
|
; VI-NEXT: s_mov_b64 s[4:5], 0
|
|
; VI-NEXT: .LBB119_1: ; %atomicrmw.start
|
|
; VI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: v_mov_b32_e32 v7, v5
|
|
; VI-NEXT: v_mov_b32_e32 v6, v4
|
|
; VI-NEXT: v_cmp_le_i64_e32 vcc, v[6:7], v[2:3]
|
|
; VI-NEXT: v_cndmask_b32_e32 v5, v3, v7, vcc
|
|
; VI-NEXT: v_cndmask_b32_e32 v4, v2, v6, vcc
|
|
; VI-NEXT: flat_atomic_cmpswap_x2 v[4:5], v[0:1], v[4:7] glc
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: buffer_wbinvl1_vol
|
|
; VI-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[6:7]
|
|
; VI-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
|
|
; VI-NEXT: s_andn2_b64 exec, exec, s[4:5]
|
|
; VI-NEXT: s_cbranch_execnz .LBB119_1
|
|
; VI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; VI-NEXT: s_or_b64 exec, exec, s[4:5]
|
|
; VI-NEXT: v_mov_b32_e32 v0, v4
|
|
; VI-NEXT: v_mov_b32_e32 v1, v5
|
|
; VI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX9-LABEL: global_atomic_min_i64_ret:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: global_load_dwordx2 v[4:5], v[0:1], off
|
|
; GFX9-NEXT: s_mov_b64 s[4:5], 0
|
|
; GFX9-NEXT: .LBB119_1: ; %atomicrmw.start
|
|
; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: v_mov_b32_e32 v7, v5
|
|
; GFX9-NEXT: v_mov_b32_e32 v6, v4
|
|
; GFX9-NEXT: v_cmp_le_i64_e32 vcc, v[6:7], v[2:3]
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v5, v3, v7, vcc
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v4, v2, v6, vcc
|
|
; GFX9-NEXT: global_atomic_cmpswap_x2 v[4:5], v[0:1], v[4:7], off glc
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: buffer_wbinvl1_vol
|
|
; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[6:7]
|
|
; GFX9-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
|
|
; GFX9-NEXT: s_andn2_b64 exec, exec, s[4:5]
|
|
; GFX9-NEXT: s_cbranch_execnz .LBB119_1
|
|
; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; GFX9-NEXT: s_or_b64 exec, exec, s[4:5]
|
|
; GFX9-NEXT: v_mov_b32_e32 v0, v4
|
|
; GFX9-NEXT: v_mov_b32_e32 v1, v5
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
%result = atomicrmw min ptr addrspace(1) %ptr, i64 %in seq_cst
|
|
ret i64 %result
|
|
}
|
|
|
|
define i64 @global_atomic_min_i64_ret_offset(ptr addrspace(1) %out, i64 %in) {
|
|
; SI-LABEL: global_atomic_min_i64_ret_offset:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; SI-NEXT: v_mov_b32_e32 v5, v3
|
|
; SI-NEXT: v_mov_b32_e32 v4, v2
|
|
; SI-NEXT: v_mov_b32_e32 v7, v1
|
|
; SI-NEXT: v_mov_b32_e32 v6, v0
|
|
; SI-NEXT: s_mov_b32 s6, 0
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s4, s6
|
|
; SI-NEXT: s_mov_b32 s5, s6
|
|
; SI-NEXT: buffer_load_dwordx2 v[0:1], v[6:7], s[4:7], 0 addr64 offset:32
|
|
; SI-NEXT: s_mov_b64 s[8:9], 0
|
|
; SI-NEXT: .LBB120_1: ; %atomicrmw.start
|
|
; SI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: v_mov_b32_e32 v11, v1
|
|
; SI-NEXT: v_mov_b32_e32 v10, v0
|
|
; SI-NEXT: v_cmp_le_i64_e32 vcc, v[10:11], v[4:5]
|
|
; SI-NEXT: v_cndmask_b32_e32 v9, v5, v11, vcc
|
|
; SI-NEXT: v_cndmask_b32_e32 v8, v4, v10, vcc
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: v_mov_b32_e32 v0, v8
|
|
; SI-NEXT: v_mov_b32_e32 v1, v9
|
|
; SI-NEXT: v_mov_b32_e32 v2, v10
|
|
; SI-NEXT: v_mov_b32_e32 v3, v11
|
|
; SI-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v[6:7], s[4:7], 0 addr64 offset:32 glc
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_wbinvl1
|
|
; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[10:11]
|
|
; SI-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
|
|
; SI-NEXT: s_andn2_b64 exec, exec, s[8:9]
|
|
; SI-NEXT: s_cbranch_execnz .LBB120_1
|
|
; SI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; SI-NEXT: s_or_b64 exec, exec, s[8:9]
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; VI-LABEL: global_atomic_min_i64_ret_offset:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; VI-NEXT: v_add_u32_e32 v4, vcc, 32, v0
|
|
; VI-NEXT: v_addc_u32_e32 v5, vcc, 0, v1, vcc
|
|
; VI-NEXT: flat_load_dwordx2 v[0:1], v[4:5]
|
|
; VI-NEXT: s_mov_b64 s[4:5], 0
|
|
; VI-NEXT: .LBB120_1: ; %atomicrmw.start
|
|
; VI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: v_mov_b32_e32 v9, v1
|
|
; VI-NEXT: v_mov_b32_e32 v8, v0
|
|
; VI-NEXT: v_cmp_le_i64_e32 vcc, v[8:9], v[2:3]
|
|
; VI-NEXT: v_cndmask_b32_e32 v7, v3, v9, vcc
|
|
; VI-NEXT: v_cndmask_b32_e32 v6, v2, v8, vcc
|
|
; VI-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[6:9] glc
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: buffer_wbinvl1_vol
|
|
; VI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[8:9]
|
|
; VI-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
|
|
; VI-NEXT: s_andn2_b64 exec, exec, s[4:5]
|
|
; VI-NEXT: s_cbranch_execnz .LBB120_1
|
|
; VI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; VI-NEXT: s_or_b64 exec, exec, s[4:5]
|
|
; VI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX9-LABEL: global_atomic_min_i64_ret_offset:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: global_load_dwordx2 v[4:5], v[0:1], off offset:32
|
|
; GFX9-NEXT: s_mov_b64 s[4:5], 0
|
|
; GFX9-NEXT: .LBB120_1: ; %atomicrmw.start
|
|
; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: v_mov_b32_e32 v7, v5
|
|
; GFX9-NEXT: v_mov_b32_e32 v6, v4
|
|
; GFX9-NEXT: v_cmp_le_i64_e32 vcc, v[6:7], v[2:3]
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v5, v3, v7, vcc
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v4, v2, v6, vcc
|
|
; GFX9-NEXT: global_atomic_cmpswap_x2 v[4:5], v[0:1], v[4:7], off offset:32 glc
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: buffer_wbinvl1_vol
|
|
; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[6:7]
|
|
; GFX9-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
|
|
; GFX9-NEXT: s_andn2_b64 exec, exec, s[4:5]
|
|
; GFX9-NEXT: s_cbranch_execnz .LBB120_1
|
|
; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; GFX9-NEXT: s_or_b64 exec, exec, s[4:5]
|
|
; GFX9-NEXT: v_mov_b32_e32 v0, v4
|
|
; GFX9-NEXT: v_mov_b32_e32 v1, v5
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
%gep = getelementptr i64, ptr addrspace(1) %out, i64 4
|
|
%result = atomicrmw min ptr addrspace(1) %gep, i64 %in seq_cst
|
|
ret i64 %result
|
|
}
|
|
|
|
define amdgpu_gfx void @global_atomic_min_i64_noret_scalar(ptr addrspace(1) inreg %ptr, i64 inreg %in) {
|
|
; SI-LABEL: global_atomic_min_i64_noret_scalar:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1
|
|
; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 ; 4-byte Folded Spill
|
|
; SI-NEXT: s_mov_b64 exec, s[34:35]
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: v_writelane_b32 v10, s6, 0
|
|
; SI-NEXT: v_writelane_b32 v10, s7, 1
|
|
; SI-NEXT: s_mov_b32 s35, s7
|
|
; SI-NEXT: s_mov_b32 s34, s6
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s6, -1
|
|
; SI-NEXT: buffer_load_dwordx2 v[2:3], off, s[4:7], 0
|
|
; SI-NEXT: s_mov_b64 s[36:37], 0
|
|
; SI-NEXT: v_mov_b32_e32 v4, s35
|
|
; SI-NEXT: v_mov_b32_e32 v5, s34
|
|
; SI-NEXT: .LBB121_1: ; %atomicrmw.start
|
|
; SI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: v_cmp_ge_i64_e32 vcc, s[34:35], v[2:3]
|
|
; SI-NEXT: v_cndmask_b32_e32 v1, v4, v3, vcc
|
|
; SI-NEXT: v_cndmask_b32_e32 v0, v5, v2, vcc
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: v_mov_b32_e32 v9, v3
|
|
; SI-NEXT: v_mov_b32_e32 v8, v2
|
|
; SI-NEXT: v_mov_b32_e32 v7, v1
|
|
; SI-NEXT: v_mov_b32_e32 v6, v0
|
|
; SI-NEXT: buffer_atomic_cmpswap_x2 v[6:9], off, s[4:7], 0 glc
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_wbinvl1
|
|
; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[6:7], v[2:3]
|
|
; SI-NEXT: s_or_b64 s[36:37], vcc, s[36:37]
|
|
; SI-NEXT: v_mov_b32_e32 v2, v6
|
|
; SI-NEXT: v_mov_b32_e32 v3, v7
|
|
; SI-NEXT: s_andn2_b64 exec, exec, s[36:37]
|
|
; SI-NEXT: s_cbranch_execnz .LBB121_1
|
|
; SI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; SI-NEXT: s_or_b64 exec, exec, s[36:37]
|
|
; SI-NEXT: v_readlane_b32 s7, v10, 1
|
|
; SI-NEXT: v_readlane_b32 s6, v10, 0
|
|
; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1
|
|
; SI-NEXT: buffer_load_dword v10, off, s[0:3], s32 ; 4-byte Folded Reload
|
|
; SI-NEXT: s_mov_b64 exec, s[34:35]
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0)
|
|
; SI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; VI-LABEL: global_atomic_min_i64_noret_scalar:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; VI-NEXT: v_mov_b32_e32 v0, s4
|
|
; VI-NEXT: v_mov_b32_e32 v1, s5
|
|
; VI-NEXT: flat_load_dwordx2 v[2:3], v[0:1]
|
|
; VI-NEXT: v_mov_b32_e32 v4, s4
|
|
; VI-NEXT: s_mov_b64 s[34:35], 0
|
|
; VI-NEXT: v_mov_b32_e32 v6, s7
|
|
; VI-NEXT: v_mov_b32_e32 v7, s6
|
|
; VI-NEXT: v_mov_b32_e32 v5, s5
|
|
; VI-NEXT: .LBB121_1: ; %atomicrmw.start
|
|
; VI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: v_cmp_ge_i64_e32 vcc, s[6:7], v[2:3]
|
|
; VI-NEXT: v_cndmask_b32_e32 v1, v6, v3, vcc
|
|
; VI-NEXT: v_cndmask_b32_e32 v0, v7, v2, vcc
|
|
; VI-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: buffer_wbinvl1_vol
|
|
; VI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
|
|
; VI-NEXT: v_mov_b32_e32 v3, v1
|
|
; VI-NEXT: s_or_b64 s[34:35], vcc, s[34:35]
|
|
; VI-NEXT: v_mov_b32_e32 v2, v0
|
|
; VI-NEXT: s_andn2_b64 exec, exec, s[34:35]
|
|
; VI-NEXT: s_cbranch_execnz .LBB121_1
|
|
; VI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; VI-NEXT: s_or_b64 exec, exec, s[34:35]
|
|
; VI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX9-LABEL: global_atomic_min_i64_noret_scalar:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: v_mov_b32_e32 v4, 0
|
|
; GFX9-NEXT: global_load_dwordx2 v[2:3], v4, s[4:5]
|
|
; GFX9-NEXT: s_mov_b64 s[34:35], 0
|
|
; GFX9-NEXT: v_mov_b32_e32 v5, s7
|
|
; GFX9-NEXT: v_mov_b32_e32 v6, s6
|
|
; GFX9-NEXT: .LBB121_1: ; %atomicrmw.start
|
|
; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: v_cmp_ge_i64_e32 vcc, s[6:7], v[2:3]
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v1, v5, v3, vcc
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v0, v6, v2, vcc
|
|
; GFX9-NEXT: global_atomic_cmpswap_x2 v[0:1], v4, v[0:3], s[4:5] glc
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: buffer_wbinvl1_vol
|
|
; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
|
|
; GFX9-NEXT: v_mov_b32_e32 v3, v1
|
|
; GFX9-NEXT: s_or_b64 s[34:35], vcc, s[34:35]
|
|
; GFX9-NEXT: v_mov_b32_e32 v2, v0
|
|
; GFX9-NEXT: s_andn2_b64 exec, exec, s[34:35]
|
|
; GFX9-NEXT: s_cbranch_execnz .LBB121_1
|
|
; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; GFX9-NEXT: s_or_b64 exec, exec, s[34:35]
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
%tmp0 = atomicrmw min ptr addrspace(1) %ptr, i64 %in seq_cst
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_gfx void @global_atomic_min_i64_noret_offset_scalar(ptr addrspace(1) inreg %out, i64 inreg %in) {
|
|
; SI-LABEL: global_atomic_min_i64_noret_offset_scalar:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1
|
|
; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 ; 4-byte Folded Spill
|
|
; SI-NEXT: s_mov_b64 exec, s[34:35]
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: v_writelane_b32 v10, s6, 0
|
|
; SI-NEXT: v_writelane_b32 v10, s7, 1
|
|
; SI-NEXT: s_mov_b32 s35, s7
|
|
; SI-NEXT: s_mov_b32 s34, s6
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s6, -1
|
|
; SI-NEXT: buffer_load_dwordx2 v[2:3], off, s[4:7], 0 offset:32
|
|
; SI-NEXT: s_mov_b64 s[36:37], 0
|
|
; SI-NEXT: v_mov_b32_e32 v4, s35
|
|
; SI-NEXT: v_mov_b32_e32 v5, s34
|
|
; SI-NEXT: .LBB122_1: ; %atomicrmw.start
|
|
; SI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: v_cmp_ge_i64_e32 vcc, s[34:35], v[2:3]
|
|
; SI-NEXT: v_cndmask_b32_e32 v1, v4, v3, vcc
|
|
; SI-NEXT: v_cndmask_b32_e32 v0, v5, v2, vcc
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: v_mov_b32_e32 v9, v3
|
|
; SI-NEXT: v_mov_b32_e32 v8, v2
|
|
; SI-NEXT: v_mov_b32_e32 v7, v1
|
|
; SI-NEXT: v_mov_b32_e32 v6, v0
|
|
; SI-NEXT: buffer_atomic_cmpswap_x2 v[6:9], off, s[4:7], 0 offset:32 glc
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_wbinvl1
|
|
; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[6:7], v[2:3]
|
|
; SI-NEXT: s_or_b64 s[36:37], vcc, s[36:37]
|
|
; SI-NEXT: v_mov_b32_e32 v2, v6
|
|
; SI-NEXT: v_mov_b32_e32 v3, v7
|
|
; SI-NEXT: s_andn2_b64 exec, exec, s[36:37]
|
|
; SI-NEXT: s_cbranch_execnz .LBB122_1
|
|
; SI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; SI-NEXT: s_or_b64 exec, exec, s[36:37]
|
|
; SI-NEXT: v_readlane_b32 s7, v10, 1
|
|
; SI-NEXT: v_readlane_b32 s6, v10, 0
|
|
; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1
|
|
; SI-NEXT: buffer_load_dword v10, off, s[0:3], s32 ; 4-byte Folded Reload
|
|
; SI-NEXT: s_mov_b64 exec, s[34:35]
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0)
|
|
; SI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; VI-LABEL: global_atomic_min_i64_noret_offset_scalar:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; VI-NEXT: s_add_u32 s34, s4, 32
|
|
; VI-NEXT: s_addc_u32 s35, s5, 0
|
|
; VI-NEXT: v_mov_b32_e32 v4, s34
|
|
; VI-NEXT: v_mov_b32_e32 v5, s35
|
|
; VI-NEXT: flat_load_dwordx2 v[2:3], v[4:5]
|
|
; VI-NEXT: s_mov_b64 s[34:35], 0
|
|
; VI-NEXT: v_mov_b32_e32 v6, s7
|
|
; VI-NEXT: v_mov_b32_e32 v7, s6
|
|
; VI-NEXT: .LBB122_1: ; %atomicrmw.start
|
|
; VI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: v_cmp_ge_i64_e32 vcc, s[6:7], v[2:3]
|
|
; VI-NEXT: v_cndmask_b32_e32 v1, v6, v3, vcc
|
|
; VI-NEXT: v_cndmask_b32_e32 v0, v7, v2, vcc
|
|
; VI-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: buffer_wbinvl1_vol
|
|
; VI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
|
|
; VI-NEXT: v_mov_b32_e32 v3, v1
|
|
; VI-NEXT: s_or_b64 s[34:35], vcc, s[34:35]
|
|
; VI-NEXT: v_mov_b32_e32 v2, v0
|
|
; VI-NEXT: s_andn2_b64 exec, exec, s[34:35]
|
|
; VI-NEXT: s_cbranch_execnz .LBB122_1
|
|
; VI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; VI-NEXT: s_or_b64 exec, exec, s[34:35]
|
|
; VI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX9-LABEL: global_atomic_min_i64_noret_offset_scalar:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: v_mov_b32_e32 v4, 0
|
|
; GFX9-NEXT: global_load_dwordx2 v[2:3], v4, s[4:5] offset:32
|
|
; GFX9-NEXT: s_mov_b64 s[34:35], 0
|
|
; GFX9-NEXT: v_mov_b32_e32 v5, s7
|
|
; GFX9-NEXT: v_mov_b32_e32 v6, s6
|
|
; GFX9-NEXT: .LBB122_1: ; %atomicrmw.start
|
|
; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: v_cmp_ge_i64_e32 vcc, s[6:7], v[2:3]
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v1, v5, v3, vcc
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v0, v6, v2, vcc
|
|
; GFX9-NEXT: global_atomic_cmpswap_x2 v[0:1], v4, v[0:3], s[4:5] offset:32 glc
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: buffer_wbinvl1_vol
|
|
; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
|
|
; GFX9-NEXT: v_mov_b32_e32 v3, v1
|
|
; GFX9-NEXT: s_or_b64 s[34:35], vcc, s[34:35]
|
|
; GFX9-NEXT: v_mov_b32_e32 v2, v0
|
|
; GFX9-NEXT: s_andn2_b64 exec, exec, s[34:35]
|
|
; GFX9-NEXT: s_cbranch_execnz .LBB122_1
|
|
; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; GFX9-NEXT: s_or_b64 exec, exec, s[34:35]
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
%gep = getelementptr i64, ptr addrspace(1) %out, i64 4
|
|
%tmp0 = atomicrmw min ptr addrspace(1) %gep, i64 %in seq_cst
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_gfx i64 @global_atomic_min_i64_ret_scalar(ptr addrspace(1) inreg %ptr, i64 inreg %in) {
|
|
; SI-LABEL: global_atomic_min_i64_ret_scalar:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1
|
|
; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 ; 4-byte Folded Spill
|
|
; SI-NEXT: s_mov_b64 exec, s[34:35]
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: v_writelane_b32 v10, s6, 0
|
|
; SI-NEXT: v_writelane_b32 v10, s7, 1
|
|
; SI-NEXT: s_mov_b32 s35, s7
|
|
; SI-NEXT: s_mov_b32 s34, s6
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s6, -1
|
|
; SI-NEXT: buffer_load_dwordx2 v[0:1], off, s[4:7], 0
|
|
; SI-NEXT: s_mov_b64 s[36:37], 0
|
|
; SI-NEXT: v_mov_b32_e32 v4, s35
|
|
; SI-NEXT: v_mov_b32_e32 v5, s34
|
|
; SI-NEXT: .LBB123_1: ; %atomicrmw.start
|
|
; SI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: v_mov_b32_e32 v9, v1
|
|
; SI-NEXT: v_mov_b32_e32 v8, v0
|
|
; SI-NEXT: v_cmp_ge_i64_e32 vcc, s[34:35], v[8:9]
|
|
; SI-NEXT: v_cndmask_b32_e32 v7, v4, v9, vcc
|
|
; SI-NEXT: v_cndmask_b32_e32 v6, v5, v8, vcc
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: v_mov_b32_e32 v0, v6
|
|
; SI-NEXT: v_mov_b32_e32 v1, v7
|
|
; SI-NEXT: v_mov_b32_e32 v2, v8
|
|
; SI-NEXT: v_mov_b32_e32 v3, v9
|
|
; SI-NEXT: buffer_atomic_cmpswap_x2 v[0:3], off, s[4:7], 0 glc
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_wbinvl1
|
|
; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[8:9]
|
|
; SI-NEXT: s_or_b64 s[36:37], vcc, s[36:37]
|
|
; SI-NEXT: s_andn2_b64 exec, exec, s[36:37]
|
|
; SI-NEXT: s_cbranch_execnz .LBB123_1
|
|
; SI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; SI-NEXT: s_or_b64 exec, exec, s[36:37]
|
|
; SI-NEXT: v_readlane_b32 s7, v10, 1
|
|
; SI-NEXT: v_readlane_b32 s6, v10, 0
|
|
; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1
|
|
; SI-NEXT: buffer_load_dword v10, off, s[0:3], s32 ; 4-byte Folded Reload
|
|
; SI-NEXT: s_mov_b64 exec, s[34:35]
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0)
|
|
; SI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; VI-LABEL: global_atomic_min_i64_ret_scalar:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; VI-NEXT: v_mov_b32_e32 v0, s4
|
|
; VI-NEXT: v_mov_b32_e32 v1, s5
|
|
; VI-NEXT: flat_load_dwordx2 v[0:1], v[0:1]
|
|
; VI-NEXT: v_mov_b32_e32 v2, s4
|
|
; VI-NEXT: s_mov_b64 s[34:35], 0
|
|
; VI-NEXT: v_mov_b32_e32 v4, s7
|
|
; VI-NEXT: v_mov_b32_e32 v5, s6
|
|
; VI-NEXT: v_mov_b32_e32 v3, s5
|
|
; VI-NEXT: .LBB123_1: ; %atomicrmw.start
|
|
; VI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: v_mov_b32_e32 v9, v1
|
|
; VI-NEXT: v_mov_b32_e32 v8, v0
|
|
; VI-NEXT: v_cmp_ge_i64_e32 vcc, s[6:7], v[8:9]
|
|
; VI-NEXT: v_cndmask_b32_e32 v7, v4, v9, vcc
|
|
; VI-NEXT: v_cndmask_b32_e32 v6, v5, v8, vcc
|
|
; VI-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[2:3], v[6:9] glc
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: buffer_wbinvl1_vol
|
|
; VI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[8:9]
|
|
; VI-NEXT: s_or_b64 s[34:35], vcc, s[34:35]
|
|
; VI-NEXT: s_andn2_b64 exec, exec, s[34:35]
|
|
; VI-NEXT: s_cbranch_execnz .LBB123_1
|
|
; VI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; VI-NEXT: s_or_b64 exec, exec, s[34:35]
|
|
; VI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX9-LABEL: global_atomic_min_i64_ret_scalar:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: v_mov_b32_e32 v2, 0
|
|
; GFX9-NEXT: global_load_dwordx2 v[0:1], v2, s[4:5]
|
|
; GFX9-NEXT: s_mov_b64 s[34:35], 0
|
|
; GFX9-NEXT: v_mov_b32_e32 v3, s7
|
|
; GFX9-NEXT: v_mov_b32_e32 v4, s6
|
|
; GFX9-NEXT: .LBB123_1: ; %atomicrmw.start
|
|
; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: v_mov_b32_e32 v8, v1
|
|
; GFX9-NEXT: v_mov_b32_e32 v7, v0
|
|
; GFX9-NEXT: v_cmp_ge_i64_e32 vcc, s[6:7], v[7:8]
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v6, v3, v8, vcc
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v5, v4, v7, vcc
|
|
; GFX9-NEXT: global_atomic_cmpswap_x2 v[0:1], v2, v[5:8], s[4:5] glc
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: buffer_wbinvl1_vol
|
|
; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[7:8]
|
|
; GFX9-NEXT: s_or_b64 s[34:35], vcc, s[34:35]
|
|
; GFX9-NEXT: s_andn2_b64 exec, exec, s[34:35]
|
|
; GFX9-NEXT: s_cbranch_execnz .LBB123_1
|
|
; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; GFX9-NEXT: s_or_b64 exec, exec, s[34:35]
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
%result = atomicrmw min ptr addrspace(1) %ptr, i64 %in seq_cst
|
|
ret i64 %result
|
|
}
|
|
|
|
define amdgpu_gfx i64 @global_atomic_min_i64_ret_offset_scalar(ptr addrspace(1) inreg %out, i64 inreg %in) {
|
|
; SI-LABEL: global_atomic_min_i64_ret_offset_scalar:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1
|
|
; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 ; 4-byte Folded Spill
|
|
; SI-NEXT: s_mov_b64 exec, s[34:35]
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: v_writelane_b32 v10, s6, 0
|
|
; SI-NEXT: v_writelane_b32 v10, s7, 1
|
|
; SI-NEXT: s_mov_b32 s35, s7
|
|
; SI-NEXT: s_mov_b32 s34, s6
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s6, -1
|
|
; SI-NEXT: buffer_load_dwordx2 v[0:1], off, s[4:7], 0 offset:32
|
|
; SI-NEXT: s_mov_b64 s[36:37], 0
|
|
; SI-NEXT: v_mov_b32_e32 v4, s35
|
|
; SI-NEXT: v_mov_b32_e32 v5, s34
|
|
; SI-NEXT: .LBB124_1: ; %atomicrmw.start
|
|
; SI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: v_mov_b32_e32 v9, v1
|
|
; SI-NEXT: v_mov_b32_e32 v8, v0
|
|
; SI-NEXT: v_cmp_ge_i64_e32 vcc, s[34:35], v[8:9]
|
|
; SI-NEXT: v_cndmask_b32_e32 v7, v4, v9, vcc
|
|
; SI-NEXT: v_cndmask_b32_e32 v6, v5, v8, vcc
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: v_mov_b32_e32 v0, v6
|
|
; SI-NEXT: v_mov_b32_e32 v1, v7
|
|
; SI-NEXT: v_mov_b32_e32 v2, v8
|
|
; SI-NEXT: v_mov_b32_e32 v3, v9
|
|
; SI-NEXT: buffer_atomic_cmpswap_x2 v[0:3], off, s[4:7], 0 offset:32 glc
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_wbinvl1
|
|
; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[8:9]
|
|
; SI-NEXT: s_or_b64 s[36:37], vcc, s[36:37]
|
|
; SI-NEXT: s_andn2_b64 exec, exec, s[36:37]
|
|
; SI-NEXT: s_cbranch_execnz .LBB124_1
|
|
; SI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; SI-NEXT: s_or_b64 exec, exec, s[36:37]
|
|
; SI-NEXT: v_readlane_b32 s7, v10, 1
|
|
; SI-NEXT: v_readlane_b32 s6, v10, 0
|
|
; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1
|
|
; SI-NEXT: buffer_load_dword v10, off, s[0:3], s32 ; 4-byte Folded Reload
|
|
; SI-NEXT: s_mov_b64 exec, s[34:35]
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0)
|
|
; SI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; VI-LABEL: global_atomic_min_i64_ret_offset_scalar:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; VI-NEXT: s_add_u32 s34, s4, 32
|
|
; VI-NEXT: s_addc_u32 s35, s5, 0
|
|
; VI-NEXT: v_mov_b32_e32 v2, s34
|
|
; VI-NEXT: v_mov_b32_e32 v3, s35
|
|
; VI-NEXT: flat_load_dwordx2 v[0:1], v[2:3]
|
|
; VI-NEXT: s_mov_b64 s[34:35], 0
|
|
; VI-NEXT: v_mov_b32_e32 v4, s7
|
|
; VI-NEXT: v_mov_b32_e32 v5, s6
|
|
; VI-NEXT: .LBB124_1: ; %atomicrmw.start
|
|
; VI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: v_mov_b32_e32 v9, v1
|
|
; VI-NEXT: v_mov_b32_e32 v8, v0
|
|
; VI-NEXT: v_cmp_ge_i64_e32 vcc, s[6:7], v[8:9]
|
|
; VI-NEXT: v_cndmask_b32_e32 v7, v4, v9, vcc
|
|
; VI-NEXT: v_cndmask_b32_e32 v6, v5, v8, vcc
|
|
; VI-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[2:3], v[6:9] glc
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: buffer_wbinvl1_vol
|
|
; VI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[8:9]
|
|
; VI-NEXT: s_or_b64 s[34:35], vcc, s[34:35]
|
|
; VI-NEXT: s_andn2_b64 exec, exec, s[34:35]
|
|
; VI-NEXT: s_cbranch_execnz .LBB124_1
|
|
; VI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; VI-NEXT: s_or_b64 exec, exec, s[34:35]
|
|
; VI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX9-LABEL: global_atomic_min_i64_ret_offset_scalar:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: v_mov_b32_e32 v2, 0
|
|
; GFX9-NEXT: global_load_dwordx2 v[0:1], v2, s[4:5] offset:32
|
|
; GFX9-NEXT: s_mov_b64 s[34:35], 0
|
|
; GFX9-NEXT: v_mov_b32_e32 v3, s7
|
|
; GFX9-NEXT: v_mov_b32_e32 v4, s6
|
|
; GFX9-NEXT: .LBB124_1: ; %atomicrmw.start
|
|
; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: v_mov_b32_e32 v8, v1
|
|
; GFX9-NEXT: v_mov_b32_e32 v7, v0
|
|
; GFX9-NEXT: v_cmp_ge_i64_e32 vcc, s[6:7], v[7:8]
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v6, v3, v8, vcc
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v5, v4, v7, vcc
|
|
; GFX9-NEXT: global_atomic_cmpswap_x2 v[0:1], v2, v[5:8], s[4:5] offset:32 glc
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: buffer_wbinvl1_vol
|
|
; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[7:8]
|
|
; GFX9-NEXT: s_or_b64 s[34:35], vcc, s[34:35]
|
|
; GFX9-NEXT: s_andn2_b64 exec, exec, s[34:35]
|
|
; GFX9-NEXT: s_cbranch_execnz .LBB124_1
|
|
; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; GFX9-NEXT: s_or_b64 exec, exec, s[34:35]
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
%gep = getelementptr i64, ptr addrspace(1) %out, i64 4
|
|
%result = atomicrmw min ptr addrspace(1) %gep, i64 %in seq_cst
|
|
ret i64 %result
|
|
}
|
|
|
|
define amdgpu_kernel void @atomic_min_i64_addr64_offset(ptr addrspace(1) %out, i64 %in, i64 %index) {
|
|
; SI-LABEL: atomic_min_i64_addr64_offset:
|
|
; SI: ; %bb.0: ; %entry
|
|
; SI-NEXT: s_load_dwordx2 s[4:5], s[2:3], 0xd
|
|
; SI-NEXT: s_load_dwordx4 s[0:3], s[2:3], 0x9
|
|
; SI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; SI-NEXT: s_lshl_b64 s[4:5], s[4:5], 3
|
|
; SI-NEXT: s_add_u32 s4, s0, s4
|
|
; SI-NEXT: s_addc_u32 s5, s1, s5
|
|
; SI-NEXT: s_load_dwordx2 s[8:9], s[4:5], 0x8
|
|
; SI-NEXT: s_mov_b64 s[0:1], 0
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: v_mov_b32_e32 v4, s3
|
|
; SI-NEXT: v_mov_b32_e32 v5, s2
|
|
; SI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; SI-NEXT: v_mov_b32_e32 v2, s8
|
|
; SI-NEXT: v_mov_b32_e32 v3, s9
|
|
; SI-NEXT: s_mov_b32 s6, -1
|
|
; SI-NEXT: .LBB125_1: ; %atomicrmw.start
|
|
; SI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; SI-NEXT: v_cmp_ge_i64_e32 vcc, s[2:3], v[2:3]
|
|
; SI-NEXT: v_cndmask_b32_e32 v1, v4, v3, vcc
|
|
; SI-NEXT: v_cndmask_b32_e32 v0, v5, v2, vcc
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: v_mov_b32_e32 v9, v3
|
|
; SI-NEXT: v_mov_b32_e32 v8, v2
|
|
; SI-NEXT: v_mov_b32_e32 v7, v1
|
|
; SI-NEXT: v_mov_b32_e32 v6, v0
|
|
; SI-NEXT: buffer_atomic_cmpswap_x2 v[6:9], off, s[4:7], 0 offset:32 glc
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_wbinvl1
|
|
; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[6:7], v[2:3]
|
|
; SI-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
|
|
; SI-NEXT: v_mov_b32_e32 v2, v6
|
|
; SI-NEXT: v_mov_b32_e32 v3, v7
|
|
; SI-NEXT: s_andn2_b64 exec, exec, s[0:1]
|
|
; SI-NEXT: s_cbranch_execnz .LBB125_1
|
|
; SI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; SI-NEXT: s_endpgm
|
|
;
|
|
; VI-LABEL: atomic_min_i64_addr64_offset:
|
|
; VI: ; %bb.0: ; %entry
|
|
; VI-NEXT: s_load_dwordx2 s[6:7], s[2:3], 0x34
|
|
; VI-NEXT: s_load_dwordx4 s[0:3], s[2:3], 0x24
|
|
; VI-NEXT: s_mov_b64 s[4:5], 0
|
|
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; VI-NEXT: s_lshl_b64 s[6:7], s[6:7], 3
|
|
; VI-NEXT: s_add_u32 s0, s0, s6
|
|
; VI-NEXT: s_addc_u32 s1, s1, s7
|
|
; VI-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x20
|
|
; VI-NEXT: s_add_u32 s0, s0, 32
|
|
; VI-NEXT: s_addc_u32 s1, s1, 0
|
|
; VI-NEXT: v_mov_b32_e32 v5, s1
|
|
; VI-NEXT: v_mov_b32_e32 v6, s3
|
|
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; VI-NEXT: v_mov_b32_e32 v2, s6
|
|
; VI-NEXT: v_mov_b32_e32 v7, s2
|
|
; VI-NEXT: v_mov_b32_e32 v3, s7
|
|
; VI-NEXT: v_mov_b32_e32 v4, s0
|
|
; VI-NEXT: .LBB125_1: ; %atomicrmw.start
|
|
; VI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; VI-NEXT: v_cmp_ge_i64_e32 vcc, s[2:3], v[2:3]
|
|
; VI-NEXT: v_cndmask_b32_e32 v1, v6, v3, vcc
|
|
; VI-NEXT: v_cndmask_b32_e32 v0, v7, v2, vcc
|
|
; VI-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: buffer_wbinvl1_vol
|
|
; VI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
|
|
; VI-NEXT: v_mov_b32_e32 v3, v1
|
|
; VI-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
|
|
; VI-NEXT: v_mov_b32_e32 v2, v0
|
|
; VI-NEXT: s_andn2_b64 exec, exec, s[4:5]
|
|
; VI-NEXT: s_cbranch_execnz .LBB125_1
|
|
; VI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; VI-NEXT: s_endpgm
|
|
;
|
|
; GFX9-LABEL: atomic_min_i64_addr64_offset:
|
|
; GFX9: ; %bb.0: ; %entry
|
|
; GFX9-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x34
|
|
; GFX9-NEXT: s_load_dwordx4 s[4:7], s[2:3], 0x24
|
|
; GFX9-NEXT: s_mov_b64 s[2:3], 0
|
|
; GFX9-NEXT: v_mov_b32_e32 v6, 0
|
|
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX9-NEXT: s_lshl_b64 s[0:1], s[0:1], 3
|
|
; GFX9-NEXT: s_add_u32 s0, s4, s0
|
|
; GFX9-NEXT: s_addc_u32 s1, s5, s1
|
|
; GFX9-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x20
|
|
; GFX9-NEXT: v_mov_b32_e32 v4, s7
|
|
; GFX9-NEXT: v_mov_b32_e32 v5, s6
|
|
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX9-NEXT: v_mov_b32_e32 v2, s4
|
|
; GFX9-NEXT: v_mov_b32_e32 v3, s5
|
|
; GFX9-NEXT: .LBB125_1: ; %atomicrmw.start
|
|
; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; GFX9-NEXT: v_cmp_ge_i64_e32 vcc, s[6:7], v[2:3]
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v1, v4, v3, vcc
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v0, v5, v2, vcc
|
|
; GFX9-NEXT: global_atomic_cmpswap_x2 v[0:1], v6, v[0:3], s[0:1] offset:32 glc
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: buffer_wbinvl1_vol
|
|
; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
|
|
; GFX9-NEXT: v_mov_b32_e32 v3, v1
|
|
; GFX9-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
|
|
; GFX9-NEXT: v_mov_b32_e32 v2, v0
|
|
; GFX9-NEXT: s_andn2_b64 exec, exec, s[2:3]
|
|
; GFX9-NEXT: s_cbranch_execnz .LBB125_1
|
|
; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; GFX9-NEXT: s_endpgm
|
|
entry:
|
|
%ptr = getelementptr i64, ptr addrspace(1) %out, i64 %index
|
|
%gep = getelementptr i64, ptr addrspace(1) %ptr, i64 4
|
|
%tmp0 = atomicrmw min ptr addrspace(1) %gep, i64 %in seq_cst
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @atomic_min_i64_ret_addr64_offset(ptr addrspace(1) %out, ptr addrspace(1) %out2, i64 %in, i64 %index) {
|
|
; SI-LABEL: atomic_min_i64_ret_addr64_offset:
|
|
; SI: ; %bb.0: ; %entry
|
|
; SI-NEXT: s_load_dwordx8 s[0:7], s[2:3], 0x9
|
|
; SI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; SI-NEXT: s_lshl_b64 s[6:7], s[6:7], 3
|
|
; SI-NEXT: s_add_u32 s8, s0, s6
|
|
; SI-NEXT: s_addc_u32 s9, s1, s7
|
|
; SI-NEXT: s_load_dwordx2 s[6:7], s[8:9], 0x8
|
|
; SI-NEXT: s_mov_b64 s[0:1], 0
|
|
; SI-NEXT: s_mov_b32 s11, 0xf000
|
|
; SI-NEXT: v_mov_b32_e32 v8, s5
|
|
; SI-NEXT: v_mov_b32_e32 v9, s4
|
|
; SI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; SI-NEXT: v_mov_b32_e32 v2, s6
|
|
; SI-NEXT: v_mov_b32_e32 v3, s7
|
|
; SI-NEXT: s_mov_b32 s10, -1
|
|
; SI-NEXT: .LBB126_1: ; %atomicrmw.start
|
|
; SI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; SI-NEXT: v_cmp_ge_i64_e32 vcc, s[4:5], v[2:3]
|
|
; SI-NEXT: v_cndmask_b32_e32 v1, v8, v3, vcc
|
|
; SI-NEXT: v_cndmask_b32_e32 v0, v9, v2, vcc
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: v_mov_b32_e32 v7, v3
|
|
; SI-NEXT: v_mov_b32_e32 v6, v2
|
|
; SI-NEXT: v_mov_b32_e32 v5, v1
|
|
; SI-NEXT: v_mov_b32_e32 v4, v0
|
|
; SI-NEXT: buffer_atomic_cmpswap_x2 v[4:7], off, s[8:11], 0 offset:32 glc
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_wbinvl1
|
|
; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[2:3]
|
|
; SI-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
|
|
; SI-NEXT: v_mov_b32_e32 v2, v4
|
|
; SI-NEXT: v_mov_b32_e32 v3, v5
|
|
; SI-NEXT: s_andn2_b64 exec, exec, s[0:1]
|
|
; SI-NEXT: s_cbranch_execnz .LBB126_1
|
|
; SI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; SI-NEXT: s_or_b64 exec, exec, s[0:1]
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s6, -1
|
|
; SI-NEXT: s_mov_b32 s4, s2
|
|
; SI-NEXT: s_mov_b32 s5, s3
|
|
; SI-NEXT: buffer_store_dwordx2 v[4:5], off, s[4:7], 0
|
|
; SI-NEXT: s_endpgm
|
|
;
|
|
; VI-LABEL: atomic_min_i64_ret_addr64_offset:
|
|
; VI: ; %bb.0: ; %entry
|
|
; VI-NEXT: s_load_dwordx8 s[0:7], s[2:3], 0x24
|
|
; VI-NEXT: s_mov_b64 s[8:9], 0
|
|
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; VI-NEXT: s_lshl_b64 s[6:7], s[6:7], 3
|
|
; VI-NEXT: s_add_u32 s0, s0, s6
|
|
; VI-NEXT: s_addc_u32 s1, s1, s7
|
|
; VI-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x20
|
|
; VI-NEXT: s_add_u32 s0, s0, 32
|
|
; VI-NEXT: s_addc_u32 s1, s1, 0
|
|
; VI-NEXT: v_mov_b32_e32 v0, s0
|
|
; VI-NEXT: v_mov_b32_e32 v4, s5
|
|
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; VI-NEXT: v_mov_b32_e32 v2, s6
|
|
; VI-NEXT: v_mov_b32_e32 v5, s4
|
|
; VI-NEXT: v_mov_b32_e32 v3, s7
|
|
; VI-NEXT: v_mov_b32_e32 v1, s1
|
|
; VI-NEXT: .LBB126_1: ; %atomicrmw.start
|
|
; VI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; VI-NEXT: v_mov_b32_e32 v9, v3
|
|
; VI-NEXT: v_mov_b32_e32 v8, v2
|
|
; VI-NEXT: v_cmp_ge_i64_e32 vcc, s[4:5], v[8:9]
|
|
; VI-NEXT: v_cndmask_b32_e32 v7, v4, v9, vcc
|
|
; VI-NEXT: v_cndmask_b32_e32 v6, v5, v8, vcc
|
|
; VI-NEXT: flat_atomic_cmpswap_x2 v[2:3], v[0:1], v[6:9] glc
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: buffer_wbinvl1_vol
|
|
; VI-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[8:9]
|
|
; VI-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
|
|
; VI-NEXT: s_andn2_b64 exec, exec, s[8:9]
|
|
; VI-NEXT: s_cbranch_execnz .LBB126_1
|
|
; VI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; VI-NEXT: s_or_b64 exec, exec, s[8:9]
|
|
; VI-NEXT: v_mov_b32_e32 v0, s2
|
|
; VI-NEXT: v_mov_b32_e32 v1, s3
|
|
; VI-NEXT: flat_store_dwordx2 v[0:1], v[2:3]
|
|
; VI-NEXT: s_endpgm
|
|
;
|
|
; GFX9-LABEL: atomic_min_i64_ret_addr64_offset:
|
|
; GFX9: ; %bb.0: ; %entry
|
|
; GFX9-NEXT: s_load_dwordx8 s[4:11], s[2:3], 0x24
|
|
; GFX9-NEXT: s_mov_b64 s[2:3], 0
|
|
; GFX9-NEXT: v_mov_b32_e32 v4, 0
|
|
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX9-NEXT: s_lshl_b64 s[0:1], s[10:11], 3
|
|
; GFX9-NEXT: s_add_u32 s0, s4, s0
|
|
; GFX9-NEXT: s_addc_u32 s1, s5, s1
|
|
; GFX9-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x20
|
|
; GFX9-NEXT: v_mov_b32_e32 v2, s9
|
|
; GFX9-NEXT: v_mov_b32_e32 v3, s8
|
|
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX9-NEXT: v_mov_b32_e32 v0, s4
|
|
; GFX9-NEXT: v_mov_b32_e32 v1, s5
|
|
; GFX9-NEXT: .LBB126_1: ; %atomicrmw.start
|
|
; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; GFX9-NEXT: v_mov_b32_e32 v8, v1
|
|
; GFX9-NEXT: v_mov_b32_e32 v7, v0
|
|
; GFX9-NEXT: v_cmp_ge_i64_e32 vcc, s[8:9], v[7:8]
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v6, v2, v8, vcc
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v5, v3, v7, vcc
|
|
; GFX9-NEXT: global_atomic_cmpswap_x2 v[0:1], v4, v[5:8], s[0:1] offset:32 glc
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: buffer_wbinvl1_vol
|
|
; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[7:8]
|
|
; GFX9-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
|
|
; GFX9-NEXT: s_andn2_b64 exec, exec, s[2:3]
|
|
; GFX9-NEXT: s_cbranch_execnz .LBB126_1
|
|
; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; GFX9-NEXT: s_or_b64 exec, exec, s[2:3]
|
|
; GFX9-NEXT: v_mov_b32_e32 v2, 0
|
|
; GFX9-NEXT: global_store_dwordx2 v2, v[0:1], s[6:7]
|
|
; GFX9-NEXT: s_endpgm
|
|
entry:
|
|
%ptr = getelementptr i64, ptr addrspace(1) %out, i64 %index
|
|
%gep = getelementptr i64, ptr addrspace(1) %ptr, i64 4
|
|
%tmp0 = atomicrmw min ptr addrspace(1) %gep, i64 %in seq_cst
|
|
store i64 %tmp0, ptr addrspace(1) %out2
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @atomic_min_i64(ptr addrspace(1) %out, i64 %in) {
|
|
; SI-LABEL: atomic_min_i64:
|
|
; SI: ; %bb.0: ; %entry
|
|
; SI-NEXT: s_load_dwordx4 s[0:3], s[2:3], 0x9
|
|
; SI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; SI-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
|
|
; SI-NEXT: s_mov_b64 s[8:9], 0
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: v_mov_b32_e32 v4, s3
|
|
; SI-NEXT: v_mov_b32_e32 v5, s2
|
|
; SI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; SI-NEXT: v_mov_b32_e32 v2, s4
|
|
; SI-NEXT: v_mov_b32_e32 v3, s5
|
|
; SI-NEXT: s_mov_b32 s6, -1
|
|
; SI-NEXT: s_mov_b32 s4, s0
|
|
; SI-NEXT: s_mov_b32 s5, s1
|
|
; SI-NEXT: .LBB127_1: ; %atomicrmw.start
|
|
; SI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; SI-NEXT: v_cmp_ge_i64_e32 vcc, s[2:3], v[2:3]
|
|
; SI-NEXT: v_cndmask_b32_e32 v1, v4, v3, vcc
|
|
; SI-NEXT: v_cndmask_b32_e32 v0, v5, v2, vcc
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: v_mov_b32_e32 v9, v3
|
|
; SI-NEXT: v_mov_b32_e32 v8, v2
|
|
; SI-NEXT: v_mov_b32_e32 v7, v1
|
|
; SI-NEXT: v_mov_b32_e32 v6, v0
|
|
; SI-NEXT: buffer_atomic_cmpswap_x2 v[6:9], off, s[4:7], 0 glc
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_wbinvl1
|
|
; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[6:7], v[2:3]
|
|
; SI-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
|
|
; SI-NEXT: v_mov_b32_e32 v2, v6
|
|
; SI-NEXT: v_mov_b32_e32 v3, v7
|
|
; SI-NEXT: s_andn2_b64 exec, exec, s[8:9]
|
|
; SI-NEXT: s_cbranch_execnz .LBB127_1
|
|
; SI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; SI-NEXT: s_endpgm
|
|
;
|
|
; VI-LABEL: atomic_min_i64:
|
|
; VI: ; %bb.0: ; %entry
|
|
; VI-NEXT: s_load_dwordx4 s[0:3], s[2:3], 0x24
|
|
; VI-NEXT: s_mov_b64 s[4:5], 0
|
|
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; VI-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
|
|
; VI-NEXT: v_mov_b32_e32 v5, s1
|
|
; VI-NEXT: v_mov_b32_e32 v6, s3
|
|
; VI-NEXT: v_mov_b32_e32 v7, s2
|
|
; VI-NEXT: v_mov_b32_e32 v4, s0
|
|
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; VI-NEXT: v_mov_b32_e32 v2, s6
|
|
; VI-NEXT: v_mov_b32_e32 v3, s7
|
|
; VI-NEXT: .LBB127_1: ; %atomicrmw.start
|
|
; VI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; VI-NEXT: v_cmp_ge_i64_e32 vcc, s[2:3], v[2:3]
|
|
; VI-NEXT: v_cndmask_b32_e32 v1, v6, v3, vcc
|
|
; VI-NEXT: v_cndmask_b32_e32 v0, v7, v2, vcc
|
|
; VI-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: buffer_wbinvl1_vol
|
|
; VI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
|
|
; VI-NEXT: v_mov_b32_e32 v3, v1
|
|
; VI-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
|
|
; VI-NEXT: v_mov_b32_e32 v2, v0
|
|
; VI-NEXT: s_andn2_b64 exec, exec, s[4:5]
|
|
; VI-NEXT: s_cbranch_execnz .LBB127_1
|
|
; VI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; VI-NEXT: s_endpgm
|
|
;
|
|
; GFX9-LABEL: atomic_min_i64:
|
|
; GFX9: ; %bb.0: ; %entry
|
|
; GFX9-NEXT: s_load_dwordx4 s[4:7], s[2:3], 0x24
|
|
; GFX9-NEXT: s_mov_b64 s[0:1], 0
|
|
; GFX9-NEXT: v_mov_b32_e32 v6, 0
|
|
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX9-NEXT: s_load_dwordx2 s[2:3], s[4:5], 0x0
|
|
; GFX9-NEXT: v_mov_b32_e32 v4, s7
|
|
; GFX9-NEXT: v_mov_b32_e32 v5, s6
|
|
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX9-NEXT: v_mov_b32_e32 v2, s2
|
|
; GFX9-NEXT: v_mov_b32_e32 v3, s3
|
|
; GFX9-NEXT: .LBB127_1: ; %atomicrmw.start
|
|
; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; GFX9-NEXT: v_cmp_ge_i64_e32 vcc, s[6:7], v[2:3]
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v1, v4, v3, vcc
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v0, v5, v2, vcc
|
|
; GFX9-NEXT: global_atomic_cmpswap_x2 v[0:1], v6, v[0:3], s[4:5] glc
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: buffer_wbinvl1_vol
|
|
; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
|
|
; GFX9-NEXT: v_mov_b32_e32 v3, v1
|
|
; GFX9-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
|
|
; GFX9-NEXT: v_mov_b32_e32 v2, v0
|
|
; GFX9-NEXT: s_andn2_b64 exec, exec, s[0:1]
|
|
; GFX9-NEXT: s_cbranch_execnz .LBB127_1
|
|
; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; GFX9-NEXT: s_endpgm
|
|
entry:
|
|
%tmp0 = atomicrmw min ptr addrspace(1) %out, i64 %in seq_cst
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @atomic_min_i64_ret_addr64(ptr addrspace(1) %out, ptr addrspace(1) %out2, i64 %in, i64 %index) {
|
|
; SI-LABEL: atomic_min_i64_ret_addr64:
|
|
; SI: ; %bb.0: ; %entry
|
|
; SI-NEXT: s_load_dwordx8 s[0:7], s[2:3], 0x9
|
|
; SI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; SI-NEXT: s_lshl_b64 s[6:7], s[6:7], 3
|
|
; SI-NEXT: s_add_u32 s8, s0, s6
|
|
; SI-NEXT: s_addc_u32 s9, s1, s7
|
|
; SI-NEXT: s_load_dwordx2 s[6:7], s[8:9], 0x0
|
|
; SI-NEXT: s_mov_b64 s[0:1], 0
|
|
; SI-NEXT: s_mov_b32 s11, 0xf000
|
|
; SI-NEXT: v_mov_b32_e32 v8, s5
|
|
; SI-NEXT: v_mov_b32_e32 v9, s4
|
|
; SI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; SI-NEXT: v_mov_b32_e32 v2, s6
|
|
; SI-NEXT: v_mov_b32_e32 v3, s7
|
|
; SI-NEXT: s_mov_b32 s10, -1
|
|
; SI-NEXT: .LBB128_1: ; %atomicrmw.start
|
|
; SI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; SI-NEXT: v_cmp_ge_i64_e32 vcc, s[4:5], v[2:3]
|
|
; SI-NEXT: v_cndmask_b32_e32 v1, v8, v3, vcc
|
|
; SI-NEXT: v_cndmask_b32_e32 v0, v9, v2, vcc
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: v_mov_b32_e32 v7, v3
|
|
; SI-NEXT: v_mov_b32_e32 v6, v2
|
|
; SI-NEXT: v_mov_b32_e32 v5, v1
|
|
; SI-NEXT: v_mov_b32_e32 v4, v0
|
|
; SI-NEXT: buffer_atomic_cmpswap_x2 v[4:7], off, s[8:11], 0 glc
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_wbinvl1
|
|
; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[2:3]
|
|
; SI-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
|
|
; SI-NEXT: v_mov_b32_e32 v2, v4
|
|
; SI-NEXT: v_mov_b32_e32 v3, v5
|
|
; SI-NEXT: s_andn2_b64 exec, exec, s[0:1]
|
|
; SI-NEXT: s_cbranch_execnz .LBB128_1
|
|
; SI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; SI-NEXT: s_or_b64 exec, exec, s[0:1]
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s6, -1
|
|
; SI-NEXT: s_mov_b32 s4, s2
|
|
; SI-NEXT: s_mov_b32 s5, s3
|
|
; SI-NEXT: buffer_store_dwordx2 v[4:5], off, s[4:7], 0
|
|
; SI-NEXT: s_endpgm
|
|
;
|
|
; VI-LABEL: atomic_min_i64_ret_addr64:
|
|
; VI: ; %bb.0: ; %entry
|
|
; VI-NEXT: s_load_dwordx8 s[0:7], s[2:3], 0x24
|
|
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; VI-NEXT: s_lshl_b64 s[6:7], s[6:7], 3
|
|
; VI-NEXT: s_add_u32 s6, s0, s6
|
|
; VI-NEXT: s_addc_u32 s7, s1, s7
|
|
; VI-NEXT: s_load_dwordx2 s[8:9], s[6:7], 0x0
|
|
; VI-NEXT: v_mov_b32_e32 v0, s6
|
|
; VI-NEXT: s_mov_b64 s[0:1], 0
|
|
; VI-NEXT: v_mov_b32_e32 v4, s5
|
|
; VI-NEXT: v_mov_b32_e32 v5, s4
|
|
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; VI-NEXT: v_mov_b32_e32 v2, s8
|
|
; VI-NEXT: v_mov_b32_e32 v3, s9
|
|
; VI-NEXT: v_mov_b32_e32 v1, s7
|
|
; VI-NEXT: .LBB128_1: ; %atomicrmw.start
|
|
; VI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; VI-NEXT: v_mov_b32_e32 v9, v3
|
|
; VI-NEXT: v_mov_b32_e32 v8, v2
|
|
; VI-NEXT: v_cmp_ge_i64_e32 vcc, s[4:5], v[8:9]
|
|
; VI-NEXT: v_cndmask_b32_e32 v7, v4, v9, vcc
|
|
; VI-NEXT: v_cndmask_b32_e32 v6, v5, v8, vcc
|
|
; VI-NEXT: flat_atomic_cmpswap_x2 v[2:3], v[0:1], v[6:9] glc
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: buffer_wbinvl1_vol
|
|
; VI-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[8:9]
|
|
; VI-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
|
|
; VI-NEXT: s_andn2_b64 exec, exec, s[0:1]
|
|
; VI-NEXT: s_cbranch_execnz .LBB128_1
|
|
; VI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; VI-NEXT: s_or_b64 exec, exec, s[0:1]
|
|
; VI-NEXT: v_mov_b32_e32 v0, s2
|
|
; VI-NEXT: v_mov_b32_e32 v1, s3
|
|
; VI-NEXT: flat_store_dwordx2 v[0:1], v[2:3]
|
|
; VI-NEXT: s_endpgm
|
|
;
|
|
; GFX9-LABEL: atomic_min_i64_ret_addr64:
|
|
; GFX9: ; %bb.0: ; %entry
|
|
; GFX9-NEXT: s_load_dwordx8 s[4:11], s[2:3], 0x24
|
|
; GFX9-NEXT: s_mov_b64 s[2:3], 0
|
|
; GFX9-NEXT: v_mov_b32_e32 v4, 0
|
|
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX9-NEXT: s_lshl_b64 s[0:1], s[10:11], 3
|
|
; GFX9-NEXT: s_add_u32 s0, s4, s0
|
|
; GFX9-NEXT: s_addc_u32 s1, s5, s1
|
|
; GFX9-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
|
|
; GFX9-NEXT: v_mov_b32_e32 v2, s9
|
|
; GFX9-NEXT: v_mov_b32_e32 v3, s8
|
|
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX9-NEXT: v_mov_b32_e32 v0, s4
|
|
; GFX9-NEXT: v_mov_b32_e32 v1, s5
|
|
; GFX9-NEXT: .LBB128_1: ; %atomicrmw.start
|
|
; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; GFX9-NEXT: v_mov_b32_e32 v8, v1
|
|
; GFX9-NEXT: v_mov_b32_e32 v7, v0
|
|
; GFX9-NEXT: v_cmp_ge_i64_e32 vcc, s[8:9], v[7:8]
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v6, v2, v8, vcc
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v5, v3, v7, vcc
|
|
; GFX9-NEXT: global_atomic_cmpswap_x2 v[0:1], v4, v[5:8], s[0:1] glc
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: buffer_wbinvl1_vol
|
|
; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[7:8]
|
|
; GFX9-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
|
|
; GFX9-NEXT: s_andn2_b64 exec, exec, s[2:3]
|
|
; GFX9-NEXT: s_cbranch_execnz .LBB128_1
|
|
; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; GFX9-NEXT: s_or_b64 exec, exec, s[2:3]
|
|
; GFX9-NEXT: v_mov_b32_e32 v2, 0
|
|
; GFX9-NEXT: global_store_dwordx2 v2, v[0:1], s[6:7]
|
|
; GFX9-NEXT: s_endpgm
|
|
entry:
|
|
%ptr = getelementptr i64, ptr addrspace(1) %out, i64 %index
|
|
%tmp0 = atomicrmw min ptr addrspace(1) %ptr, i64 %in seq_cst
|
|
store i64 %tmp0, ptr addrspace(1) %out2
|
|
ret void
|
|
}
|
|
|
|
define void @global_atomic_min_i64_noret_offset__amdgpu_no_remote_memory(ptr addrspace(1) %out, i64 %in) {
|
|
; SI-LABEL: global_atomic_min_i64_noret_offset__amdgpu_no_remote_memory:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; SI-NEXT: s_mov_b32 s6, 0
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s4, s6
|
|
; SI-NEXT: s_mov_b32 s5, s6
|
|
; SI-NEXT: buffer_load_dwordx2 v[6:7], v[0:1], s[4:7], 0 addr64 offset:32
|
|
; SI-NEXT: s_mov_b64 s[8:9], 0
|
|
; SI-NEXT: .LBB129_1: ; %atomicrmw.start
|
|
; SI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: v_cmp_le_i64_e32 vcc, v[6:7], v[2:3]
|
|
; SI-NEXT: v_cndmask_b32_e32 v5, v3, v7, vcc
|
|
; SI-NEXT: v_cndmask_b32_e32 v4, v2, v6, vcc
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: v_mov_b32_e32 v11, v7
|
|
; SI-NEXT: v_mov_b32_e32 v10, v6
|
|
; SI-NEXT: v_mov_b32_e32 v9, v5
|
|
; SI-NEXT: v_mov_b32_e32 v8, v4
|
|
; SI-NEXT: buffer_atomic_cmpswap_x2 v[8:11], v[0:1], s[4:7], 0 addr64 offset:32 glc
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_wbinvl1
|
|
; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[8:9], v[6:7]
|
|
; SI-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
|
|
; SI-NEXT: v_mov_b32_e32 v6, v8
|
|
; SI-NEXT: v_mov_b32_e32 v7, v9
|
|
; SI-NEXT: s_andn2_b64 exec, exec, s[8:9]
|
|
; SI-NEXT: s_cbranch_execnz .LBB129_1
|
|
; SI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; SI-NEXT: s_or_b64 exec, exec, s[8:9]
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; VI-LABEL: global_atomic_min_i64_noret_offset__amdgpu_no_remote_memory:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; VI-NEXT: v_add_u32_e32 v0, vcc, 32, v0
|
|
; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
|
|
; VI-NEXT: flat_load_dwordx2 v[6:7], v[0:1]
|
|
; VI-NEXT: s_mov_b64 s[4:5], 0
|
|
; VI-NEXT: .LBB129_1: ; %atomicrmw.start
|
|
; VI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: v_cmp_le_i64_e32 vcc, v[6:7], v[2:3]
|
|
; VI-NEXT: v_cndmask_b32_e32 v5, v3, v7, vcc
|
|
; VI-NEXT: v_cndmask_b32_e32 v4, v2, v6, vcc
|
|
; VI-NEXT: flat_atomic_cmpswap_x2 v[4:5], v[0:1], v[4:7] glc
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: buffer_wbinvl1_vol
|
|
; VI-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[6:7]
|
|
; VI-NEXT: v_mov_b32_e32 v7, v5
|
|
; VI-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
|
|
; VI-NEXT: v_mov_b32_e32 v6, v4
|
|
; VI-NEXT: s_andn2_b64 exec, exec, s[4:5]
|
|
; VI-NEXT: s_cbranch_execnz .LBB129_1
|
|
; VI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; VI-NEXT: s_or_b64 exec, exec, s[4:5]
|
|
; VI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX9-LABEL: global_atomic_min_i64_noret_offset__amdgpu_no_remote_memory:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: global_load_dwordx2 v[6:7], v[0:1], off offset:32
|
|
; GFX9-NEXT: s_mov_b64 s[4:5], 0
|
|
; GFX9-NEXT: .LBB129_1: ; %atomicrmw.start
|
|
; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: v_cmp_le_i64_e32 vcc, v[6:7], v[2:3]
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v5, v3, v7, vcc
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v4, v2, v6, vcc
|
|
; GFX9-NEXT: global_atomic_cmpswap_x2 v[4:5], v[0:1], v[4:7], off offset:32 glc
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: buffer_wbinvl1_vol
|
|
; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[6:7]
|
|
; GFX9-NEXT: v_mov_b32_e32 v7, v5
|
|
; GFX9-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
|
|
; GFX9-NEXT: v_mov_b32_e32 v6, v4
|
|
; GFX9-NEXT: s_andn2_b64 exec, exec, s[4:5]
|
|
; GFX9-NEXT: s_cbranch_execnz .LBB129_1
|
|
; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; GFX9-NEXT: s_or_b64 exec, exec, s[4:5]
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
%gep = getelementptr i64, ptr addrspace(1) %out, i64 4
|
|
%tmp0 = atomicrmw min ptr addrspace(1) %gep, i64 %in seq_cst, !amdgpu.no.remote.memory !0
|
|
ret void
|
|
}
|
|
|
|
define i64 @global_atomic_min_i64_ret_offset__amdgpu_no_remote_memory(ptr addrspace(1) %out, i64 %in) {
|
|
; SI-LABEL: global_atomic_min_i64_ret_offset__amdgpu_no_remote_memory:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; SI-NEXT: v_mov_b32_e32 v5, v3
|
|
; SI-NEXT: v_mov_b32_e32 v4, v2
|
|
; SI-NEXT: v_mov_b32_e32 v7, v1
|
|
; SI-NEXT: v_mov_b32_e32 v6, v0
|
|
; SI-NEXT: s_mov_b32 s6, 0
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s4, s6
|
|
; SI-NEXT: s_mov_b32 s5, s6
|
|
; SI-NEXT: buffer_load_dwordx2 v[0:1], v[6:7], s[4:7], 0 addr64 offset:32
|
|
; SI-NEXT: s_mov_b64 s[8:9], 0
|
|
; SI-NEXT: .LBB130_1: ; %atomicrmw.start
|
|
; SI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: v_mov_b32_e32 v11, v1
|
|
; SI-NEXT: v_mov_b32_e32 v10, v0
|
|
; SI-NEXT: v_cmp_le_i64_e32 vcc, v[10:11], v[4:5]
|
|
; SI-NEXT: v_cndmask_b32_e32 v9, v5, v11, vcc
|
|
; SI-NEXT: v_cndmask_b32_e32 v8, v4, v10, vcc
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: v_mov_b32_e32 v0, v8
|
|
; SI-NEXT: v_mov_b32_e32 v1, v9
|
|
; SI-NEXT: v_mov_b32_e32 v2, v10
|
|
; SI-NEXT: v_mov_b32_e32 v3, v11
|
|
; SI-NEXT: buffer_atomic_cmpswap_x2 v[0:3], v[6:7], s[4:7], 0 addr64 offset:32 glc
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_wbinvl1
|
|
; SI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[10:11]
|
|
; SI-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
|
|
; SI-NEXT: s_andn2_b64 exec, exec, s[8:9]
|
|
; SI-NEXT: s_cbranch_execnz .LBB130_1
|
|
; SI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; SI-NEXT: s_or_b64 exec, exec, s[8:9]
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; VI-LABEL: global_atomic_min_i64_ret_offset__amdgpu_no_remote_memory:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; VI-NEXT: v_add_u32_e32 v4, vcc, 32, v0
|
|
; VI-NEXT: v_addc_u32_e32 v5, vcc, 0, v1, vcc
|
|
; VI-NEXT: flat_load_dwordx2 v[0:1], v[4:5]
|
|
; VI-NEXT: s_mov_b64 s[4:5], 0
|
|
; VI-NEXT: .LBB130_1: ; %atomicrmw.start
|
|
; VI-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: v_mov_b32_e32 v9, v1
|
|
; VI-NEXT: v_mov_b32_e32 v8, v0
|
|
; VI-NEXT: v_cmp_le_i64_e32 vcc, v[8:9], v[2:3]
|
|
; VI-NEXT: v_cndmask_b32_e32 v7, v3, v9, vcc
|
|
; VI-NEXT: v_cndmask_b32_e32 v6, v2, v8, vcc
|
|
; VI-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[6:9] glc
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: buffer_wbinvl1_vol
|
|
; VI-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[8:9]
|
|
; VI-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
|
|
; VI-NEXT: s_andn2_b64 exec, exec, s[4:5]
|
|
; VI-NEXT: s_cbranch_execnz .LBB130_1
|
|
; VI-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; VI-NEXT: s_or_b64 exec, exec, s[4:5]
|
|
; VI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX9-LABEL: global_atomic_min_i64_ret_offset__amdgpu_no_remote_memory:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: global_load_dwordx2 v[4:5], v[0:1], off offset:32
|
|
; GFX9-NEXT: s_mov_b64 s[4:5], 0
|
|
; GFX9-NEXT: .LBB130_1: ; %atomicrmw.start
|
|
; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: v_mov_b32_e32 v7, v5
|
|
; GFX9-NEXT: v_mov_b32_e32 v6, v4
|
|
; GFX9-NEXT: v_cmp_le_i64_e32 vcc, v[6:7], v[2:3]
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v5, v3, v7, vcc
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v4, v2, v6, vcc
|
|
; GFX9-NEXT: global_atomic_cmpswap_x2 v[4:5], v[0:1], v[4:7], off offset:32 glc
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: buffer_wbinvl1_vol
|
|
; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[6:7]
|
|
; GFX9-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
|
|
; GFX9-NEXT: s_andn2_b64 exec, exec, s[4:5]
|
|
; GFX9-NEXT: s_cbranch_execnz .LBB130_1
|
|
; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
|
|
; GFX9-NEXT: s_or_b64 exec, exec, s[4:5]
|
|
; GFX9-NEXT: v_mov_b32_e32 v0, v4
|
|
; GFX9-NEXT: v_mov_b32_e32 v1, v5
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
%gep = getelementptr i64, ptr addrspace(1) %out, i64 4
|
|
%result = atomicrmw min ptr addrspace(1) %gep, i64 %in seq_cst, !amdgpu.no.remote.memory !0
|
|
ret i64 %result
|
|
}
|
|
|
|
; ---------------------------------------------------------------------
|
|
; atomicrmw uinc_wrap
|
|
; ---------------------------------------------------------------------
|
|
|
|
define void @global_atomic_uinc_wrap_i64_noret(ptr addrspace(1) %ptr, i64 %in) {
|
|
; SI-LABEL: global_atomic_uinc_wrap_i64_noret:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; SI-NEXT: s_mov_b32 s6, 0
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s4, s6
|
|
; SI-NEXT: s_mov_b32 s5, s6
|
|
; SI-NEXT: buffer_atomic_inc_x2 v[2:3], v[0:1], s[4:7], 0 addr64
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_wbinvl1
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; VI-LABEL: global_atomic_uinc_wrap_i64_noret:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; VI-NEXT: flat_atomic_inc_x2 v[0:1], v[2:3]
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: buffer_wbinvl1_vol
|
|
; VI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX9-LABEL: global_atomic_uinc_wrap_i64_noret:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: global_atomic_inc_x2 v[0:1], v[2:3], off
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: buffer_wbinvl1_vol
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
%tmp0 = atomicrmw uinc_wrap ptr addrspace(1) %ptr, i64 %in seq_cst
|
|
ret void
|
|
}
|
|
|
|
define void @global_atomic_uinc_wrap_i64_noret_offset(ptr addrspace(1) %out, i64 %in) {
|
|
; SI-LABEL: global_atomic_uinc_wrap_i64_noret_offset:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; SI-NEXT: s_mov_b32 s6, 0
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s4, s6
|
|
; SI-NEXT: s_mov_b32 s5, s6
|
|
; SI-NEXT: buffer_atomic_inc_x2 v[2:3], v[0:1], s[4:7], 0 addr64 offset:32
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_wbinvl1
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; VI-LABEL: global_atomic_uinc_wrap_i64_noret_offset:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; VI-NEXT: v_add_u32_e32 v0, vcc, 32, v0
|
|
; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
|
|
; VI-NEXT: flat_atomic_inc_x2 v[0:1], v[2:3]
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: buffer_wbinvl1_vol
|
|
; VI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX9-LABEL: global_atomic_uinc_wrap_i64_noret_offset:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: global_atomic_inc_x2 v[0:1], v[2:3], off offset:32
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: buffer_wbinvl1_vol
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
%gep = getelementptr i64, ptr addrspace(1) %out, i64 4
|
|
%tmp0 = atomicrmw uinc_wrap ptr addrspace(1) %gep, i64 %in seq_cst
|
|
ret void
|
|
}
|
|
|
|
define i64 @global_atomic_uinc_wrap_i64_ret(ptr addrspace(1) %ptr, i64 %in) {
|
|
; SI-LABEL: global_atomic_uinc_wrap_i64_ret:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; SI-NEXT: s_mov_b32 s6, 0
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s4, s6
|
|
; SI-NEXT: s_mov_b32 s5, s6
|
|
; SI-NEXT: buffer_atomic_inc_x2 v[2:3], v[0:1], s[4:7], 0 addr64 glc
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_wbinvl1
|
|
; SI-NEXT: v_mov_b32_e32 v0, v2
|
|
; SI-NEXT: v_mov_b32_e32 v1, v3
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; VI-LABEL: global_atomic_uinc_wrap_i64_ret:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; VI-NEXT: flat_atomic_inc_x2 v[0:1], v[0:1], v[2:3] glc
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: buffer_wbinvl1_vol
|
|
; VI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX9-LABEL: global_atomic_uinc_wrap_i64_ret:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: global_atomic_inc_x2 v[0:1], v[0:1], v[2:3], off glc
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: buffer_wbinvl1_vol
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
%result = atomicrmw uinc_wrap ptr addrspace(1) %ptr, i64 %in seq_cst
|
|
ret i64 %result
|
|
}
|
|
|
|
define i64 @global_atomic_uinc_wrap_i64_ret_offset(ptr addrspace(1) %out, i64 %in) {
|
|
; SI-LABEL: global_atomic_uinc_wrap_i64_ret_offset:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; SI-NEXT: s_mov_b32 s6, 0
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s4, s6
|
|
; SI-NEXT: s_mov_b32 s5, s6
|
|
; SI-NEXT: buffer_atomic_inc_x2 v[2:3], v[0:1], s[4:7], 0 addr64 offset:32 glc
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_wbinvl1
|
|
; SI-NEXT: v_mov_b32_e32 v0, v2
|
|
; SI-NEXT: v_mov_b32_e32 v1, v3
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; VI-LABEL: global_atomic_uinc_wrap_i64_ret_offset:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; VI-NEXT: v_add_u32_e32 v0, vcc, 32, v0
|
|
; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
|
|
; VI-NEXT: flat_atomic_inc_x2 v[0:1], v[0:1], v[2:3] glc
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: buffer_wbinvl1_vol
|
|
; VI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX9-LABEL: global_atomic_uinc_wrap_i64_ret_offset:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: global_atomic_inc_x2 v[0:1], v[0:1], v[2:3], off offset:32 glc
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: buffer_wbinvl1_vol
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
%gep = getelementptr i64, ptr addrspace(1) %out, i64 4
|
|
%result = atomicrmw uinc_wrap ptr addrspace(1) %gep, i64 %in seq_cst
|
|
ret i64 %result
|
|
}
|
|
|
|
define amdgpu_gfx void @global_atomic_uinc_wrap_i64_noret_scalar(ptr addrspace(1) inreg %ptr, i64 inreg %in) {
|
|
; SI-LABEL: global_atomic_uinc_wrap_i64_noret_scalar:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1
|
|
; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 ; 4-byte Folded Spill
|
|
; SI-NEXT: s_mov_b64 exec, s[34:35]
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: v_writelane_b32 v2, s6, 0
|
|
; SI-NEXT: v_writelane_b32 v2, s7, 1
|
|
; SI-NEXT: s_mov_b32 s34, s7
|
|
; SI-NEXT: s_mov_b32 s35, s6
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s6, -1
|
|
; SI-NEXT: v_mov_b32_e32 v0, s35
|
|
; SI-NEXT: v_mov_b32_e32 v1, s34
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_atomic_inc_x2 v[0:1], off, s[4:7], 0
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_wbinvl1
|
|
; SI-NEXT: v_readlane_b32 s7, v2, 1
|
|
; SI-NEXT: v_readlane_b32 s6, v2, 0
|
|
; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1
|
|
; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 ; 4-byte Folded Reload
|
|
; SI-NEXT: s_mov_b64 exec, s[34:35]
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0)
|
|
; SI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; VI-LABEL: global_atomic_uinc_wrap_i64_noret_scalar:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; VI-NEXT: v_mov_b32_e32 v0, s6
|
|
; VI-NEXT: v_mov_b32_e32 v1, s7
|
|
; VI-NEXT: v_mov_b32_e32 v2, s4
|
|
; VI-NEXT: v_mov_b32_e32 v3, s5
|
|
; VI-NEXT: flat_atomic_inc_x2 v[2:3], v[0:1]
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: buffer_wbinvl1_vol
|
|
; VI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX9-LABEL: global_atomic_uinc_wrap_i64_noret_scalar:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: v_mov_b32_e32 v0, s6
|
|
; GFX9-NEXT: v_mov_b32_e32 v1, s7
|
|
; GFX9-NEXT: v_mov_b32_e32 v2, 0
|
|
; GFX9-NEXT: global_atomic_inc_x2 v2, v[0:1], s[4:5]
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: buffer_wbinvl1_vol
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
%tmp0 = atomicrmw uinc_wrap ptr addrspace(1) %ptr, i64 %in seq_cst
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_gfx void @global_atomic_uinc_wrap_i64_noret_offset_scalar(ptr addrspace(1) inreg %out, i64 inreg %in) {
|
|
; SI-LABEL: global_atomic_uinc_wrap_i64_noret_offset_scalar:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1
|
|
; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 ; 4-byte Folded Spill
|
|
; SI-NEXT: s_mov_b64 exec, s[34:35]
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: v_writelane_b32 v2, s6, 0
|
|
; SI-NEXT: v_writelane_b32 v2, s7, 1
|
|
; SI-NEXT: v_mov_b32_e32 v0, s6
|
|
; SI-NEXT: v_mov_b32_e32 v1, s7
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s6, -1
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_atomic_inc_x2 v[0:1], off, s[4:7], 0 offset:32
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_wbinvl1
|
|
; SI-NEXT: v_readlane_b32 s7, v2, 1
|
|
; SI-NEXT: v_readlane_b32 s6, v2, 0
|
|
; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1
|
|
; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 ; 4-byte Folded Reload
|
|
; SI-NEXT: s_mov_b64 exec, s[34:35]
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0)
|
|
; SI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; VI-LABEL: global_atomic_uinc_wrap_i64_noret_offset_scalar:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; VI-NEXT: s_add_u32 s34, s4, 32
|
|
; VI-NEXT: s_addc_u32 s35, s5, 0
|
|
; VI-NEXT: v_mov_b32_e32 v2, s34
|
|
; VI-NEXT: v_mov_b32_e32 v0, s6
|
|
; VI-NEXT: v_mov_b32_e32 v1, s7
|
|
; VI-NEXT: v_mov_b32_e32 v3, s35
|
|
; VI-NEXT: flat_atomic_inc_x2 v[2:3], v[0:1]
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: buffer_wbinvl1_vol
|
|
; VI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX9-LABEL: global_atomic_uinc_wrap_i64_noret_offset_scalar:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: v_mov_b32_e32 v0, s6
|
|
; GFX9-NEXT: v_mov_b32_e32 v1, s7
|
|
; GFX9-NEXT: v_mov_b32_e32 v2, 0
|
|
; GFX9-NEXT: global_atomic_inc_x2 v2, v[0:1], s[4:5] offset:32
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: buffer_wbinvl1_vol
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
%gep = getelementptr i64, ptr addrspace(1) %out, i64 4
|
|
%tmp0 = atomicrmw uinc_wrap ptr addrspace(1) %gep, i64 %in seq_cst
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_gfx i64 @global_atomic_uinc_wrap_i64_ret_scalar(ptr addrspace(1) inreg %ptr, i64 inreg %in) {
|
|
; SI-LABEL: global_atomic_uinc_wrap_i64_ret_scalar:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1
|
|
; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 ; 4-byte Folded Spill
|
|
; SI-NEXT: s_mov_b64 exec, s[34:35]
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: v_writelane_b32 v2, s6, 0
|
|
; SI-NEXT: v_writelane_b32 v2, s7, 1
|
|
; SI-NEXT: s_mov_b32 s34, s7
|
|
; SI-NEXT: s_mov_b32 s35, s6
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s6, -1
|
|
; SI-NEXT: v_mov_b32_e32 v0, s35
|
|
; SI-NEXT: v_mov_b32_e32 v1, s34
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_atomic_inc_x2 v[0:1], off, s[4:7], 0 glc
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_wbinvl1
|
|
; SI-NEXT: v_readlane_b32 s7, v2, 1
|
|
; SI-NEXT: v_readlane_b32 s6, v2, 0
|
|
; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1
|
|
; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 ; 4-byte Folded Reload
|
|
; SI-NEXT: s_mov_b64 exec, s[34:35]
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0)
|
|
; SI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; VI-LABEL: global_atomic_uinc_wrap_i64_ret_scalar:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; VI-NEXT: v_mov_b32_e32 v0, s6
|
|
; VI-NEXT: v_mov_b32_e32 v1, s7
|
|
; VI-NEXT: v_mov_b32_e32 v2, s4
|
|
; VI-NEXT: v_mov_b32_e32 v3, s5
|
|
; VI-NEXT: flat_atomic_inc_x2 v[0:1], v[2:3], v[0:1] glc
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: buffer_wbinvl1_vol
|
|
; VI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX9-LABEL: global_atomic_uinc_wrap_i64_ret_scalar:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: v_mov_b32_e32 v0, s6
|
|
; GFX9-NEXT: v_mov_b32_e32 v1, s7
|
|
; GFX9-NEXT: v_mov_b32_e32 v2, 0
|
|
; GFX9-NEXT: global_atomic_inc_x2 v[0:1], v2, v[0:1], s[4:5] glc
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: buffer_wbinvl1_vol
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
%result = atomicrmw uinc_wrap ptr addrspace(1) %ptr, i64 %in seq_cst
|
|
ret i64 %result
|
|
}
|
|
|
|
define amdgpu_gfx i64 @global_atomic_uinc_wrap_i64_ret_offset_scalar(ptr addrspace(1) inreg %out, i64 inreg %in) {
|
|
; SI-LABEL: global_atomic_uinc_wrap_i64_ret_offset_scalar:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1
|
|
; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 ; 4-byte Folded Spill
|
|
; SI-NEXT: s_mov_b64 exec, s[34:35]
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: v_writelane_b32 v2, s6, 0
|
|
; SI-NEXT: v_writelane_b32 v2, s7, 1
|
|
; SI-NEXT: v_mov_b32_e32 v0, s6
|
|
; SI-NEXT: v_mov_b32_e32 v1, s7
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s6, -1
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_atomic_inc_x2 v[0:1], off, s[4:7], 0 offset:32 glc
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_wbinvl1
|
|
; SI-NEXT: v_readlane_b32 s7, v2, 1
|
|
; SI-NEXT: v_readlane_b32 s6, v2, 0
|
|
; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1
|
|
; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 ; 4-byte Folded Reload
|
|
; SI-NEXT: s_mov_b64 exec, s[34:35]
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0)
|
|
; SI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; VI-LABEL: global_atomic_uinc_wrap_i64_ret_offset_scalar:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; VI-NEXT: s_add_u32 s34, s4, 32
|
|
; VI-NEXT: s_addc_u32 s35, s5, 0
|
|
; VI-NEXT: v_mov_b32_e32 v2, s34
|
|
; VI-NEXT: v_mov_b32_e32 v0, s6
|
|
; VI-NEXT: v_mov_b32_e32 v1, s7
|
|
; VI-NEXT: v_mov_b32_e32 v3, s35
|
|
; VI-NEXT: flat_atomic_inc_x2 v[0:1], v[2:3], v[0:1] glc
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: buffer_wbinvl1_vol
|
|
; VI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX9-LABEL: global_atomic_uinc_wrap_i64_ret_offset_scalar:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: v_mov_b32_e32 v0, s6
|
|
; GFX9-NEXT: v_mov_b32_e32 v1, s7
|
|
; GFX9-NEXT: v_mov_b32_e32 v2, 0
|
|
; GFX9-NEXT: global_atomic_inc_x2 v[0:1], v2, v[0:1], s[4:5] offset:32 glc
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: buffer_wbinvl1_vol
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
%gep = getelementptr i64, ptr addrspace(1) %out, i64 4
|
|
%result = atomicrmw uinc_wrap ptr addrspace(1) %gep, i64 %in seq_cst
|
|
ret i64 %result
|
|
}
|
|
|
|
define void @global_atomic_uinc_wrap_i64_noret_offset__amdgpu_no_remote_memory(ptr addrspace(1) %out, i64 %in) {
|
|
; SI-LABEL: global_atomic_uinc_wrap_i64_noret_offset__amdgpu_no_remote_memory:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; SI-NEXT: s_mov_b32 s6, 0
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s4, s6
|
|
; SI-NEXT: s_mov_b32 s5, s6
|
|
; SI-NEXT: buffer_atomic_inc_x2 v[2:3], v[0:1], s[4:7], 0 addr64 offset:32
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_wbinvl1
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; VI-LABEL: global_atomic_uinc_wrap_i64_noret_offset__amdgpu_no_remote_memory:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; VI-NEXT: v_add_u32_e32 v0, vcc, 32, v0
|
|
; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
|
|
; VI-NEXT: flat_atomic_inc_x2 v[0:1], v[2:3]
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: buffer_wbinvl1_vol
|
|
; VI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX9-LABEL: global_atomic_uinc_wrap_i64_noret_offset__amdgpu_no_remote_memory:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: global_atomic_inc_x2 v[0:1], v[2:3], off offset:32
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: buffer_wbinvl1_vol
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
%gep = getelementptr i64, ptr addrspace(1) %out, i64 4
|
|
%tmp0 = atomicrmw uinc_wrap ptr addrspace(1) %gep, i64 %in seq_cst, !amdgpu.no.remote.memory !0
|
|
ret void
|
|
}
|
|
|
|
define i64 @global_atomic_uinc_wrap_i64_ret_offset__amdgpu_no_remote_memory(ptr addrspace(1) %out, i64 %in) {
|
|
; SI-LABEL: global_atomic_uinc_wrap_i64_ret_offset__amdgpu_no_remote_memory:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; SI-NEXT: s_mov_b32 s6, 0
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s4, s6
|
|
; SI-NEXT: s_mov_b32 s5, s6
|
|
; SI-NEXT: buffer_atomic_inc_x2 v[2:3], v[0:1], s[4:7], 0 addr64 offset:32 glc
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_wbinvl1
|
|
; SI-NEXT: v_mov_b32_e32 v0, v2
|
|
; SI-NEXT: v_mov_b32_e32 v1, v3
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; VI-LABEL: global_atomic_uinc_wrap_i64_ret_offset__amdgpu_no_remote_memory:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; VI-NEXT: v_add_u32_e32 v0, vcc, 32, v0
|
|
; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
|
|
; VI-NEXT: flat_atomic_inc_x2 v[0:1], v[0:1], v[2:3] glc
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: buffer_wbinvl1_vol
|
|
; VI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX9-LABEL: global_atomic_uinc_wrap_i64_ret_offset__amdgpu_no_remote_memory:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: global_atomic_inc_x2 v[0:1], v[0:1], v[2:3], off offset:32 glc
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: buffer_wbinvl1_vol
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
%gep = getelementptr i64, ptr addrspace(1) %out, i64 4
|
|
%result = atomicrmw uinc_wrap ptr addrspace(1) %gep, i64 %in seq_cst, !amdgpu.no.remote.memory !0
|
|
ret i64 %result
|
|
}
|
|
|
|
; ---------------------------------------------------------------------
|
|
; atomicrmw udec_wrap
|
|
; ---------------------------------------------------------------------
|
|
|
|
define void @global_atomic_udec_wrap_i64_noret(ptr addrspace(1) %ptr, i64 %in) {
|
|
; SI-LABEL: global_atomic_udec_wrap_i64_noret:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; SI-NEXT: s_mov_b32 s6, 0
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s4, s6
|
|
; SI-NEXT: s_mov_b32 s5, s6
|
|
; SI-NEXT: buffer_atomic_dec_x2 v[2:3], v[0:1], s[4:7], 0 addr64
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_wbinvl1
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; VI-LABEL: global_atomic_udec_wrap_i64_noret:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; VI-NEXT: flat_atomic_dec_x2 v[0:1], v[2:3]
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: buffer_wbinvl1_vol
|
|
; VI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX9-LABEL: global_atomic_udec_wrap_i64_noret:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: global_atomic_dec_x2 v[0:1], v[2:3], off
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: buffer_wbinvl1_vol
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
%tmp0 = atomicrmw udec_wrap ptr addrspace(1) %ptr, i64 %in seq_cst
|
|
ret void
|
|
}
|
|
|
|
define void @global_atomic_udec_wrap_i64_noret_offset(ptr addrspace(1) %out, i64 %in) {
|
|
; SI-LABEL: global_atomic_udec_wrap_i64_noret_offset:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; SI-NEXT: s_mov_b32 s6, 0
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s4, s6
|
|
; SI-NEXT: s_mov_b32 s5, s6
|
|
; SI-NEXT: buffer_atomic_dec_x2 v[2:3], v[0:1], s[4:7], 0 addr64 offset:32
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_wbinvl1
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; VI-LABEL: global_atomic_udec_wrap_i64_noret_offset:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; VI-NEXT: v_add_u32_e32 v0, vcc, 32, v0
|
|
; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
|
|
; VI-NEXT: flat_atomic_dec_x2 v[0:1], v[2:3]
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: buffer_wbinvl1_vol
|
|
; VI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX9-LABEL: global_atomic_udec_wrap_i64_noret_offset:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: global_atomic_dec_x2 v[0:1], v[2:3], off offset:32
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: buffer_wbinvl1_vol
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
%gep = getelementptr i64, ptr addrspace(1) %out, i64 4
|
|
%tmp0 = atomicrmw udec_wrap ptr addrspace(1) %gep, i64 %in seq_cst
|
|
ret void
|
|
}
|
|
|
|
define i64 @global_atomic_udec_wrap_i64_ret(ptr addrspace(1) %ptr, i64 %in) {
|
|
; SI-LABEL: global_atomic_udec_wrap_i64_ret:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; SI-NEXT: s_mov_b32 s6, 0
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s4, s6
|
|
; SI-NEXT: s_mov_b32 s5, s6
|
|
; SI-NEXT: buffer_atomic_dec_x2 v[2:3], v[0:1], s[4:7], 0 addr64 glc
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_wbinvl1
|
|
; SI-NEXT: v_mov_b32_e32 v0, v2
|
|
; SI-NEXT: v_mov_b32_e32 v1, v3
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; VI-LABEL: global_atomic_udec_wrap_i64_ret:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; VI-NEXT: flat_atomic_dec_x2 v[0:1], v[0:1], v[2:3] glc
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: buffer_wbinvl1_vol
|
|
; VI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX9-LABEL: global_atomic_udec_wrap_i64_ret:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: global_atomic_dec_x2 v[0:1], v[0:1], v[2:3], off glc
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: buffer_wbinvl1_vol
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
%result = atomicrmw udec_wrap ptr addrspace(1) %ptr, i64 %in seq_cst
|
|
ret i64 %result
|
|
}
|
|
|
|
define i64 @global_atomic_udec_wrap_i64_ret_offset(ptr addrspace(1) %out, i64 %in) {
|
|
; SI-LABEL: global_atomic_udec_wrap_i64_ret_offset:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; SI-NEXT: s_mov_b32 s6, 0
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s4, s6
|
|
; SI-NEXT: s_mov_b32 s5, s6
|
|
; SI-NEXT: buffer_atomic_dec_x2 v[2:3], v[0:1], s[4:7], 0 addr64 offset:32 glc
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_wbinvl1
|
|
; SI-NEXT: v_mov_b32_e32 v0, v2
|
|
; SI-NEXT: v_mov_b32_e32 v1, v3
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; VI-LABEL: global_atomic_udec_wrap_i64_ret_offset:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; VI-NEXT: v_add_u32_e32 v0, vcc, 32, v0
|
|
; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
|
|
; VI-NEXT: flat_atomic_dec_x2 v[0:1], v[0:1], v[2:3] glc
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: buffer_wbinvl1_vol
|
|
; VI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX9-LABEL: global_atomic_udec_wrap_i64_ret_offset:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: global_atomic_dec_x2 v[0:1], v[0:1], v[2:3], off offset:32 glc
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: buffer_wbinvl1_vol
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
%gep = getelementptr i64, ptr addrspace(1) %out, i64 4
|
|
%result = atomicrmw udec_wrap ptr addrspace(1) %gep, i64 %in seq_cst
|
|
ret i64 %result
|
|
}
|
|
|
|
define amdgpu_gfx void @global_atomic_udec_wrap_i64_noret_scalar(ptr addrspace(1) inreg %ptr, i64 inreg %in) {
|
|
; SI-LABEL: global_atomic_udec_wrap_i64_noret_scalar:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1
|
|
; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 ; 4-byte Folded Spill
|
|
; SI-NEXT: s_mov_b64 exec, s[34:35]
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: v_writelane_b32 v2, s6, 0
|
|
; SI-NEXT: v_writelane_b32 v2, s7, 1
|
|
; SI-NEXT: s_mov_b32 s34, s7
|
|
; SI-NEXT: s_mov_b32 s35, s6
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s6, -1
|
|
; SI-NEXT: v_mov_b32_e32 v0, s35
|
|
; SI-NEXT: v_mov_b32_e32 v1, s34
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_atomic_dec_x2 v[0:1], off, s[4:7], 0
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_wbinvl1
|
|
; SI-NEXT: v_readlane_b32 s7, v2, 1
|
|
; SI-NEXT: v_readlane_b32 s6, v2, 0
|
|
; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1
|
|
; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 ; 4-byte Folded Reload
|
|
; SI-NEXT: s_mov_b64 exec, s[34:35]
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0)
|
|
; SI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; VI-LABEL: global_atomic_udec_wrap_i64_noret_scalar:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; VI-NEXT: v_mov_b32_e32 v0, s6
|
|
; VI-NEXT: v_mov_b32_e32 v1, s7
|
|
; VI-NEXT: v_mov_b32_e32 v2, s4
|
|
; VI-NEXT: v_mov_b32_e32 v3, s5
|
|
; VI-NEXT: flat_atomic_dec_x2 v[2:3], v[0:1]
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: buffer_wbinvl1_vol
|
|
; VI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX9-LABEL: global_atomic_udec_wrap_i64_noret_scalar:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: v_mov_b32_e32 v0, s6
|
|
; GFX9-NEXT: v_mov_b32_e32 v1, s7
|
|
; GFX9-NEXT: v_mov_b32_e32 v2, 0
|
|
; GFX9-NEXT: global_atomic_dec_x2 v2, v[0:1], s[4:5]
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: buffer_wbinvl1_vol
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
%tmp0 = atomicrmw udec_wrap ptr addrspace(1) %ptr, i64 %in seq_cst
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_gfx void @global_atomic_udec_wrap_i64_noret_offset_scalar(ptr addrspace(1) inreg %out, i64 inreg %in) {
|
|
; SI-LABEL: global_atomic_udec_wrap_i64_noret_offset_scalar:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1
|
|
; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 ; 4-byte Folded Spill
|
|
; SI-NEXT: s_mov_b64 exec, s[34:35]
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: v_writelane_b32 v2, s6, 0
|
|
; SI-NEXT: v_writelane_b32 v2, s7, 1
|
|
; SI-NEXT: v_mov_b32_e32 v0, s6
|
|
; SI-NEXT: v_mov_b32_e32 v1, s7
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s6, -1
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_atomic_dec_x2 v[0:1], off, s[4:7], 0 offset:32
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_wbinvl1
|
|
; SI-NEXT: v_readlane_b32 s7, v2, 1
|
|
; SI-NEXT: v_readlane_b32 s6, v2, 0
|
|
; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1
|
|
; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 ; 4-byte Folded Reload
|
|
; SI-NEXT: s_mov_b64 exec, s[34:35]
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0)
|
|
; SI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; VI-LABEL: global_atomic_udec_wrap_i64_noret_offset_scalar:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; VI-NEXT: s_add_u32 s34, s4, 32
|
|
; VI-NEXT: s_addc_u32 s35, s5, 0
|
|
; VI-NEXT: v_mov_b32_e32 v2, s34
|
|
; VI-NEXT: v_mov_b32_e32 v0, s6
|
|
; VI-NEXT: v_mov_b32_e32 v1, s7
|
|
; VI-NEXT: v_mov_b32_e32 v3, s35
|
|
; VI-NEXT: flat_atomic_dec_x2 v[2:3], v[0:1]
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: buffer_wbinvl1_vol
|
|
; VI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX9-LABEL: global_atomic_udec_wrap_i64_noret_offset_scalar:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: v_mov_b32_e32 v0, s6
|
|
; GFX9-NEXT: v_mov_b32_e32 v1, s7
|
|
; GFX9-NEXT: v_mov_b32_e32 v2, 0
|
|
; GFX9-NEXT: global_atomic_dec_x2 v2, v[0:1], s[4:5] offset:32
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: buffer_wbinvl1_vol
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
%gep = getelementptr i64, ptr addrspace(1) %out, i64 4
|
|
%tmp0 = atomicrmw udec_wrap ptr addrspace(1) %gep, i64 %in seq_cst
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_gfx i64 @global_atomic_udec_wrap_i64_ret_scalar(ptr addrspace(1) inreg %ptr, i64 inreg %in) {
|
|
; SI-LABEL: global_atomic_udec_wrap_i64_ret_scalar:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1
|
|
; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 ; 4-byte Folded Spill
|
|
; SI-NEXT: s_mov_b64 exec, s[34:35]
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: v_writelane_b32 v2, s6, 0
|
|
; SI-NEXT: v_writelane_b32 v2, s7, 1
|
|
; SI-NEXT: s_mov_b32 s34, s7
|
|
; SI-NEXT: s_mov_b32 s35, s6
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s6, -1
|
|
; SI-NEXT: v_mov_b32_e32 v0, s35
|
|
; SI-NEXT: v_mov_b32_e32 v1, s34
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_atomic_dec_x2 v[0:1], off, s[4:7], 0 glc
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_wbinvl1
|
|
; SI-NEXT: v_readlane_b32 s7, v2, 1
|
|
; SI-NEXT: v_readlane_b32 s6, v2, 0
|
|
; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1
|
|
; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 ; 4-byte Folded Reload
|
|
; SI-NEXT: s_mov_b64 exec, s[34:35]
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0)
|
|
; SI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; VI-LABEL: global_atomic_udec_wrap_i64_ret_scalar:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; VI-NEXT: v_mov_b32_e32 v0, s6
|
|
; VI-NEXT: v_mov_b32_e32 v1, s7
|
|
; VI-NEXT: v_mov_b32_e32 v2, s4
|
|
; VI-NEXT: v_mov_b32_e32 v3, s5
|
|
; VI-NEXT: flat_atomic_dec_x2 v[0:1], v[2:3], v[0:1] glc
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: buffer_wbinvl1_vol
|
|
; VI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX9-LABEL: global_atomic_udec_wrap_i64_ret_scalar:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: v_mov_b32_e32 v0, s6
|
|
; GFX9-NEXT: v_mov_b32_e32 v1, s7
|
|
; GFX9-NEXT: v_mov_b32_e32 v2, 0
|
|
; GFX9-NEXT: global_atomic_dec_x2 v[0:1], v2, v[0:1], s[4:5] glc
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: buffer_wbinvl1_vol
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
%result = atomicrmw udec_wrap ptr addrspace(1) %ptr, i64 %in seq_cst
|
|
ret i64 %result
|
|
}
|
|
|
|
define amdgpu_gfx i64 @global_atomic_udec_wrap_i64_ret_offset_scalar(ptr addrspace(1) inreg %out, i64 inreg %in) {
|
|
; SI-LABEL: global_atomic_udec_wrap_i64_ret_offset_scalar:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1
|
|
; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 ; 4-byte Folded Spill
|
|
; SI-NEXT: s_mov_b64 exec, s[34:35]
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: v_writelane_b32 v2, s6, 0
|
|
; SI-NEXT: v_writelane_b32 v2, s7, 1
|
|
; SI-NEXT: v_mov_b32_e32 v0, s6
|
|
; SI-NEXT: v_mov_b32_e32 v1, s7
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s6, -1
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_atomic_dec_x2 v[0:1], off, s[4:7], 0 offset:32 glc
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_wbinvl1
|
|
; SI-NEXT: v_readlane_b32 s7, v2, 1
|
|
; SI-NEXT: v_readlane_b32 s6, v2, 0
|
|
; SI-NEXT: s_xor_saveexec_b64 s[34:35], -1
|
|
; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 ; 4-byte Folded Reload
|
|
; SI-NEXT: s_mov_b64 exec, s[34:35]
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0)
|
|
; SI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; VI-LABEL: global_atomic_udec_wrap_i64_ret_offset_scalar:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; VI-NEXT: s_add_u32 s34, s4, 32
|
|
; VI-NEXT: s_addc_u32 s35, s5, 0
|
|
; VI-NEXT: v_mov_b32_e32 v2, s34
|
|
; VI-NEXT: v_mov_b32_e32 v0, s6
|
|
; VI-NEXT: v_mov_b32_e32 v1, s7
|
|
; VI-NEXT: v_mov_b32_e32 v3, s35
|
|
; VI-NEXT: flat_atomic_dec_x2 v[0:1], v[2:3], v[0:1] glc
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: buffer_wbinvl1_vol
|
|
; VI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX9-LABEL: global_atomic_udec_wrap_i64_ret_offset_scalar:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: v_mov_b32_e32 v0, s6
|
|
; GFX9-NEXT: v_mov_b32_e32 v1, s7
|
|
; GFX9-NEXT: v_mov_b32_e32 v2, 0
|
|
; GFX9-NEXT: global_atomic_dec_x2 v[0:1], v2, v[0:1], s[4:5] offset:32 glc
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: buffer_wbinvl1_vol
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
%gep = getelementptr i64, ptr addrspace(1) %out, i64 4
|
|
%result = atomicrmw udec_wrap ptr addrspace(1) %gep, i64 %in seq_cst
|
|
ret i64 %result
|
|
}
|
|
|
|
define void @global_atomic_udec_wrap_i64_noret_offset__amdgpu_no_remote_memory(ptr addrspace(1) %out, i64 %in) {
|
|
; SI-LABEL: global_atomic_udec_wrap_i64_noret_offset__amdgpu_no_remote_memory:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; SI-NEXT: s_mov_b32 s6, 0
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s4, s6
|
|
; SI-NEXT: s_mov_b32 s5, s6
|
|
; SI-NEXT: buffer_atomic_dec_x2 v[2:3], v[0:1], s[4:7], 0 addr64 offset:32
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_wbinvl1
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; VI-LABEL: global_atomic_udec_wrap_i64_noret_offset__amdgpu_no_remote_memory:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; VI-NEXT: v_add_u32_e32 v0, vcc, 32, v0
|
|
; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
|
|
; VI-NEXT: flat_atomic_dec_x2 v[0:1], v[2:3]
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: buffer_wbinvl1_vol
|
|
; VI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX9-LABEL: global_atomic_udec_wrap_i64_noret_offset__amdgpu_no_remote_memory:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: global_atomic_dec_x2 v[0:1], v[2:3], off offset:32
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: buffer_wbinvl1_vol
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
%gep = getelementptr i64, ptr addrspace(1) %out, i64 4
|
|
%tmp0 = atomicrmw udec_wrap ptr addrspace(1) %gep, i64 %in seq_cst, !amdgpu.no.remote.memory !0
|
|
ret void
|
|
}
|
|
|
|
define i64 @global_atomic_udec_wrap_i64_ret_offset__amdgpu_no_remote_memory(ptr addrspace(1) %out, i64 %in) {
|
|
; SI-LABEL: global_atomic_udec_wrap_i64_ret_offset__amdgpu_no_remote_memory:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; SI-NEXT: s_mov_b32 s6, 0
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s4, s6
|
|
; SI-NEXT: s_mov_b32 s5, s6
|
|
; SI-NEXT: buffer_atomic_dec_x2 v[2:3], v[0:1], s[4:7], 0 addr64 offset:32 glc
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: buffer_wbinvl1
|
|
; SI-NEXT: v_mov_b32_e32 v0, v2
|
|
; SI-NEXT: v_mov_b32_e32 v1, v3
|
|
; SI-NEXT: s_waitcnt expcnt(0)
|
|
; SI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; VI-LABEL: global_atomic_udec_wrap_i64_ret_offset__amdgpu_no_remote_memory:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; VI-NEXT: v_add_u32_e32 v0, vcc, 32, v0
|
|
; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
|
|
; VI-NEXT: flat_atomic_dec_x2 v[0:1], v[0:1], v[2:3] glc
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: buffer_wbinvl1_vol
|
|
; VI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX9-LABEL: global_atomic_udec_wrap_i64_ret_offset__amdgpu_no_remote_memory:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: global_atomic_dec_x2 v[0:1], v[0:1], v[2:3], off offset:32 glc
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: buffer_wbinvl1_vol
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
%gep = getelementptr i64, ptr addrspace(1) %out, i64 4
|
|
%result = atomicrmw udec_wrap ptr addrspace(1) %gep, i64 %in seq_cst, !amdgpu.no.remote.memory !0
|
|
ret i64 %result
|
|
}
|
|
|
|
!0 = !{}
|