The earlier implementation on AMDGPU used explicit token operands at SI_CALL and SI_CALL_ISEL. This is now replaced with CONVERGENCECTRL_GLUE operands, with the following effects: - The treatment of tokens at call-like operations is now consistent with the treatment at intrinsics. - Support for tail calls using implicit tokens at SI_TCRETURN "just works". - The extra parameter at call-like instructions is eliminated, thus restoring those instructions and their handling to the original state. The new glue node is placed after the existing glue node for the outgoing call parameters, which seems to not interfere with selection of the call-like nodes.
1764 lines
78 KiB
LLVM
1764 lines
78 KiB
LLVM
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 3
|
|
; RUN: llc -O0 -mtriple=amdgcn- -mcpu=gfx900 -amdgpu-dpp-combine=false -verify-machineinstrs < %s | FileCheck -check-prefix=GFX9-O0 %s
|
|
; RUN: llc -mtriple=amdgcn- -mcpu=gfx900 -amdgpu-dpp-combine=false -verify-machineinstrs < %s | FileCheck -check-prefix=GFX9-O3 %s
|
|
|
|
; NOTE: llvm.amdgcn.wwm is deprecated, use llvm.amdgcn.strict.wwm instead.
|
|
|
|
define amdgpu_cs void @no_cfg(ptr addrspace(8) inreg %tmp14) {
|
|
; GFX9-O0-LABEL: no_cfg:
|
|
; GFX9-O0: ; %bb.0:
|
|
; GFX9-O0-NEXT: s_mov_b32 s6, s2
|
|
; GFX9-O0-NEXT: s_mov_b32 s4, s0
|
|
; GFX9-O0-NEXT: ; kill: def $sgpr6 killed $sgpr6 def $sgpr6_sgpr7
|
|
; GFX9-O0-NEXT: s_mov_b32 s7, s3
|
|
; GFX9-O0-NEXT: s_mov_b32 s8, s7
|
|
; GFX9-O0-NEXT: s_mov_b32 s9, s6
|
|
; GFX9-O0-NEXT: ; kill: def $sgpr4 killed $sgpr4 def $sgpr4_sgpr5
|
|
; GFX9-O0-NEXT: s_mov_b32 s5, s1
|
|
; GFX9-O0-NEXT: s_mov_b32 s10, s5
|
|
; GFX9-O0-NEXT: s_mov_b32 s0, s4
|
|
; GFX9-O0-NEXT: ; kill: def $sgpr0 killed $sgpr0 def $sgpr0_sgpr1_sgpr2_sgpr3
|
|
; GFX9-O0-NEXT: s_mov_b32 s1, s10
|
|
; GFX9-O0-NEXT: s_mov_b32 s2, s9
|
|
; GFX9-O0-NEXT: s_mov_b32 s3, s8
|
|
; GFX9-O0-NEXT: s_mov_b32 s4, 0
|
|
; GFX9-O0-NEXT: buffer_load_dwordx2 v[5:6], off, s[0:3], s4
|
|
; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-O0-NEXT: v_mov_b32_e32 v4, v5
|
|
; GFX9-O0-NEXT: v_mov_b32_e32 v3, v6
|
|
; GFX9-O0-NEXT: v_mov_b32_e32 v0, v4
|
|
; GFX9-O0-NEXT: s_not_b64 exec, exec
|
|
; GFX9-O0-NEXT: v_mov_b32_e32 v0, s4
|
|
; GFX9-O0-NEXT: s_not_b64 exec, exec
|
|
; GFX9-O0-NEXT: v_mov_b32_e32 v1, v3
|
|
; GFX9-O0-NEXT: s_not_b64 exec, exec
|
|
; GFX9-O0-NEXT: v_mov_b32_e32 v1, s4
|
|
; GFX9-O0-NEXT: s_not_b64 exec, exec
|
|
; GFX9-O0-NEXT: s_or_saveexec_b64 s[6:7], -1
|
|
; GFX9-O0-NEXT: v_mov_b32_e32 v2, s4
|
|
; GFX9-O0-NEXT: s_nop 1
|
|
; GFX9-O0-NEXT: v_mov_b32_dpp v2, v0 row_bcast:31 row_mask:0xc bank_mask:0xf
|
|
; GFX9-O0-NEXT: v_add_u32_e64 v0, v0, v2
|
|
; GFX9-O0-NEXT: s_mov_b64 exec, s[6:7]
|
|
; GFX9-O0-NEXT: v_mov_b32_e32 v3, v0
|
|
; GFX9-O0-NEXT: s_or_saveexec_b64 s[6:7], -1
|
|
; GFX9-O0-NEXT: v_mov_b32_e32 v0, s4
|
|
; GFX9-O0-NEXT: s_nop 1
|
|
; GFX9-O0-NEXT: v_mov_b32_dpp v0, v1 row_bcast:31 row_mask:0xc bank_mask:0xf
|
|
; GFX9-O0-NEXT: v_add_u32_e64 v0, v1, v0
|
|
; GFX9-O0-NEXT: s_mov_b64 exec, s[6:7]
|
|
; GFX9-O0-NEXT: v_mov_b32_e32 v4, v0
|
|
; GFX9-O0-NEXT: v_cmp_eq_u32_e64 s[6:7], v3, v4
|
|
; GFX9-O0-NEXT: v_cndmask_b32_e64 v3, 0, 1, s[6:7]
|
|
; GFX9-O0-NEXT: s_mov_b32 s5, 1
|
|
; GFX9-O0-NEXT: v_lshlrev_b32_e64 v3, s5, v3
|
|
; GFX9-O0-NEXT: s_mov_b32 s5, 2
|
|
; GFX9-O0-NEXT: v_and_b32_e64 v3, v3, s5
|
|
; GFX9-O0-NEXT: buffer_store_dword v3, off, s[0:3], s4 offset:4
|
|
; GFX9-O0-NEXT: s_endpgm
|
|
;
|
|
; GFX9-O3-LABEL: no_cfg:
|
|
; GFX9-O3: ; %bb.0:
|
|
; GFX9-O3-NEXT: buffer_load_dwordx2 v[4:5], off, s[0:3], 0
|
|
; GFX9-O3-NEXT: s_or_saveexec_b64 s[4:5], -1
|
|
; GFX9-O3-NEXT: v_mov_b32_e32 v0, 0
|
|
; GFX9-O3-NEXT: v_mov_b32_e32 v1, 0
|
|
; GFX9-O3-NEXT: s_mov_b64 exec, s[4:5]
|
|
; GFX9-O3-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-O3-NEXT: v_mov_b32_e32 v2, v4
|
|
; GFX9-O3-NEXT: s_not_b64 exec, exec
|
|
; GFX9-O3-NEXT: v_mov_b32_e32 v2, 0
|
|
; GFX9-O3-NEXT: s_not_b64 exec, exec
|
|
; GFX9-O3-NEXT: v_mov_b32_e32 v3, v5
|
|
; GFX9-O3-NEXT: s_not_b64 exec, exec
|
|
; GFX9-O3-NEXT: v_mov_b32_e32 v3, 0
|
|
; GFX9-O3-NEXT: s_not_b64 exec, exec
|
|
; GFX9-O3-NEXT: s_or_saveexec_b64 s[4:5], -1
|
|
; GFX9-O3-NEXT: v_mov_b32_dpp v1, v2 row_bcast:31 row_mask:0xc bank_mask:0xf
|
|
; GFX9-O3-NEXT: v_mov_b32_dpp v0, v3 row_bcast:31 row_mask:0xc bank_mask:0xf
|
|
; GFX9-O3-NEXT: v_add_u32_e32 v1, v2, v1
|
|
; GFX9-O3-NEXT: v_add_u32_e32 v0, v3, v0
|
|
; GFX9-O3-NEXT: s_mov_b64 exec, s[4:5]
|
|
; GFX9-O3-NEXT: v_mov_b32_e32 v4, v1
|
|
; GFX9-O3-NEXT: v_mov_b32_e32 v5, v0
|
|
; GFX9-O3-NEXT: v_cmp_eq_u32_e32 vcc, v4, v5
|
|
; GFX9-O3-NEXT: v_cndmask_b32_e64 v4, 0, 1, vcc
|
|
; GFX9-O3-NEXT: v_lshlrev_b32_e32 v4, 1, v4
|
|
; GFX9-O3-NEXT: v_and_b32_e32 v4, 2, v4
|
|
; GFX9-O3-NEXT: buffer_store_dword v4, off, s[0:3], 0 offset:4
|
|
; GFX9-O3-NEXT: s_endpgm
|
|
%tmp100 = call <2 x float> @llvm.amdgcn.raw.ptr.buffer.load.v2f32(ptr addrspace(8) %tmp14, i32 0, i32 0, i32 0)
|
|
%tmp101 = bitcast <2 x float> %tmp100 to <2 x i32>
|
|
%tmp102 = extractelement <2 x i32> %tmp101, i32 0
|
|
%tmp103 = extractelement <2 x i32> %tmp101, i32 1
|
|
%tmp105 = tail call i32 @llvm.amdgcn.set.inactive.i32(i32 %tmp102, i32 0)
|
|
%tmp107 = tail call i32 @llvm.amdgcn.set.inactive.i32(i32 %tmp103, i32 0)
|
|
|
|
|
|
%tmp120 = tail call i32 @llvm.amdgcn.update.dpp.i32(i32 0, i32 %tmp105, i32 323, i32 12, i32 15, i1 false)
|
|
%tmp121 = add i32 %tmp105, %tmp120
|
|
%tmp122 = tail call i32 @llvm.amdgcn.wwm.i32(i32 %tmp121)
|
|
|
|
%tmp135 = tail call i32 @llvm.amdgcn.update.dpp.i32(i32 0, i32 %tmp107, i32 323, i32 12, i32 15, i1 false)
|
|
%tmp136 = add i32 %tmp107, %tmp135
|
|
%tmp137 = tail call i32 @llvm.amdgcn.wwm.i32(i32 %tmp136)
|
|
|
|
%tmp138 = icmp eq i32 %tmp122, %tmp137
|
|
%tmp139 = sext i1 %tmp138 to i32
|
|
%tmp140 = shl nsw i32 %tmp139, 1
|
|
%tmp141 = and i32 %tmp140, 2
|
|
%tmp145 = bitcast i32 %tmp141 to float
|
|
call void @llvm.amdgcn.raw.ptr.buffer.store.f32(float %tmp145, ptr addrspace(8) %tmp14, i32 4, i32 0, i32 0)
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_cs void @cfg(ptr addrspace(8) inreg %tmp14, i32 %arg) {
|
|
; GFX9-O0-LABEL: cfg:
|
|
; GFX9-O0: ; %bb.0: ; %entry
|
|
; GFX9-O0-NEXT: s_mov_b32 s16, SCRATCH_RSRC_DWORD0
|
|
; GFX9-O0-NEXT: s_mov_b32 s17, SCRATCH_RSRC_DWORD1
|
|
; GFX9-O0-NEXT: s_mov_b32 s18, -1
|
|
; GFX9-O0-NEXT: s_mov_b32 s19, 0xe00000
|
|
; GFX9-O0-NEXT: s_add_u32 s16, s16, s4
|
|
; GFX9-O0-NEXT: s_addc_u32 s17, s17, 0
|
|
; GFX9-O0-NEXT: ; implicit-def: $vgpr3 : SGPR spill to VGPR lane
|
|
; GFX9-O0-NEXT: v_mov_b32_e32 v3, v0
|
|
; GFX9-O0-NEXT: s_or_saveexec_b64 s[12:13], -1
|
|
; GFX9-O0-NEXT: buffer_load_dword v0, off, s[16:19], 0 ; 4-byte Folded Reload
|
|
; GFX9-O0-NEXT: s_mov_b64 exec, s[12:13]
|
|
; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-O0-NEXT: v_writelane_b32 v0, s3, 0
|
|
; GFX9-O0-NEXT: s_mov_b32 s4, s1
|
|
; GFX9-O0-NEXT: v_readlane_b32 s1, v0, 0
|
|
; GFX9-O0-NEXT: ; kill: def $sgpr2 killed $sgpr2 def $sgpr2_sgpr3
|
|
; GFX9-O0-NEXT: s_mov_b32 s3, s1
|
|
; GFX9-O0-NEXT: s_mov_b32 s8, s3
|
|
; GFX9-O0-NEXT: s_mov_b32 s9, s2
|
|
; GFX9-O0-NEXT: ; kill: def $sgpr0 killed $sgpr0 def $sgpr0_sgpr1
|
|
; GFX9-O0-NEXT: s_mov_b32 s1, s4
|
|
; GFX9-O0-NEXT: s_mov_b32 s10, s1
|
|
; GFX9-O0-NEXT: s_mov_b32 s4, s0
|
|
; GFX9-O0-NEXT: ; kill: def $sgpr4 killed $sgpr4 def $sgpr4_sgpr5_sgpr6_sgpr7
|
|
; GFX9-O0-NEXT: s_mov_b32 s5, s10
|
|
; GFX9-O0-NEXT: s_mov_b32 s6, s9
|
|
; GFX9-O0-NEXT: s_mov_b32 s7, s8
|
|
; GFX9-O0-NEXT: v_writelane_b32 v0, s2, 1
|
|
; GFX9-O0-NEXT: v_writelane_b32 v0, s3, 2
|
|
; GFX9-O0-NEXT: v_writelane_b32 v0, s0, 3
|
|
; GFX9-O0-NEXT: v_writelane_b32 v0, s1, 4
|
|
; GFX9-O0-NEXT: s_mov_b32 s0, 0
|
|
; GFX9-O0-NEXT: s_nop 2
|
|
; GFX9-O0-NEXT: buffer_load_dwordx2 v[4:5], off, s[4:7], s0
|
|
; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-O0-NEXT: buffer_store_dword v4, off, s[16:19], 0 offset:12 ; 4-byte Folded Spill
|
|
; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-O0-NEXT: buffer_store_dword v5, off, s[16:19], 0 offset:16 ; 4-byte Folded Spill
|
|
; GFX9-O0-NEXT: ; implicit-def: $sgpr2_sgpr3
|
|
; GFX9-O0-NEXT: v_mov_b32_e32 v1, v4
|
|
; GFX9-O0-NEXT: s_not_b64 exec, exec
|
|
; GFX9-O0-NEXT: v_mov_b32_e32 v1, s0
|
|
; GFX9-O0-NEXT: s_not_b64 exec, exec
|
|
; GFX9-O0-NEXT: s_or_saveexec_b64 s[2:3], -1
|
|
; GFX9-O0-NEXT: v_mov_b32_e32 v2, s0
|
|
; GFX9-O0-NEXT: s_nop 1
|
|
; GFX9-O0-NEXT: v_mov_b32_dpp v2, v1 row_bcast:31 row_mask:0xc bank_mask:0xf
|
|
; GFX9-O0-NEXT: v_add_u32_e64 v1, v1, v2
|
|
; GFX9-O0-NEXT: s_mov_b64 exec, s[2:3]
|
|
; GFX9-O0-NEXT: v_mov_b32_e32 v4, v1
|
|
; GFX9-O0-NEXT: buffer_store_dword v4, off, s[16:19], 0 offset:8 ; 4-byte Folded Spill
|
|
; GFX9-O0-NEXT: v_cmp_eq_u32_e64 s[2:3], v3, s0
|
|
; GFX9-O0-NEXT: v_mov_b32_e32 v3, s0
|
|
; GFX9-O0-NEXT: buffer_store_dword v3, off, s[16:19], 0 offset:4 ; 4-byte Folded Spill
|
|
; GFX9-O0-NEXT: s_mov_b64 s[0:1], exec
|
|
; GFX9-O0-NEXT: v_writelane_b32 v0, s0, 5
|
|
; GFX9-O0-NEXT: v_writelane_b32 v0, s1, 6
|
|
; GFX9-O0-NEXT: s_or_saveexec_b64 s[12:13], -1
|
|
; GFX9-O0-NEXT: buffer_store_dword v0, off, s[16:19], 0 ; 4-byte Folded Spill
|
|
; GFX9-O0-NEXT: s_mov_b64 exec, s[12:13]
|
|
; GFX9-O0-NEXT: s_and_b64 s[0:1], s[0:1], s[2:3]
|
|
; GFX9-O0-NEXT: s_mov_b64 exec, s[0:1]
|
|
; GFX9-O0-NEXT: s_cbranch_execz .LBB1_2
|
|
; GFX9-O0-NEXT: ; %bb.1: ; %if
|
|
; GFX9-O0-NEXT: buffer_load_dword v3, off, s[16:19], 0 offset:12 ; 4-byte Folded Reload
|
|
; GFX9-O0-NEXT: buffer_load_dword v4, off, s[16:19], 0 offset:16 ; 4-byte Folded Reload
|
|
; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-O0-NEXT: v_mov_b32_e32 v0, v4
|
|
; GFX9-O0-NEXT: s_or_saveexec_b64 s[0:1], -1
|
|
; GFX9-O0-NEXT: v_mov_b32_e32 v1, 0
|
|
; GFX9-O0-NEXT: s_mov_b64 exec, s[0:1]
|
|
; GFX9-O0-NEXT: v_mov_b32_e32 v2, v0
|
|
; GFX9-O0-NEXT: s_not_b64 exec, exec
|
|
; GFX9-O0-NEXT: v_mov_b32_e32 v2, v1
|
|
; GFX9-O0-NEXT: s_not_b64 exec, exec
|
|
; GFX9-O0-NEXT: s_or_saveexec_b64 s[0:1], -1
|
|
; GFX9-O0-NEXT: v_mov_b32_dpp v1, v2 row_bcast:31 row_mask:0xc bank_mask:0xf
|
|
; GFX9-O0-NEXT: v_add_u32_e64 v1, v2, v1
|
|
; GFX9-O0-NEXT: s_mov_b64 exec, s[0:1]
|
|
; GFX9-O0-NEXT: v_mov_b32_e32 v0, v1
|
|
; GFX9-O0-NEXT: buffer_store_dword v0, off, s[16:19], 0 offset:4 ; 4-byte Folded Spill
|
|
; GFX9-O0-NEXT: .LBB1_2: ; %merge
|
|
; GFX9-O0-NEXT: s_or_saveexec_b64 s[12:13], -1
|
|
; GFX9-O0-NEXT: buffer_load_dword v0, off, s[16:19], 0 ; 4-byte Folded Reload
|
|
; GFX9-O0-NEXT: s_mov_b64 exec, s[12:13]
|
|
; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-O0-NEXT: v_readlane_b32 s4, v0, 5
|
|
; GFX9-O0-NEXT: v_readlane_b32 s5, v0, 6
|
|
; GFX9-O0-NEXT: s_or_b64 exec, exec, s[4:5]
|
|
; GFX9-O0-NEXT: v_readlane_b32 s2, v0, 1
|
|
; GFX9-O0-NEXT: v_readlane_b32 s3, v0, 2
|
|
; GFX9-O0-NEXT: v_readlane_b32 s0, v0, 3
|
|
; GFX9-O0-NEXT: v_readlane_b32 s1, v0, 4
|
|
; GFX9-O0-NEXT: buffer_load_dword v3, off, s[16:19], 0 offset:8 ; 4-byte Folded Reload
|
|
; GFX9-O0-NEXT: buffer_load_dword v4, off, s[16:19], 0 offset:4 ; 4-byte Folded Reload
|
|
; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-O0-NEXT: v_cmp_eq_u32_e64 s[4:5], v3, v4
|
|
; GFX9-O0-NEXT: v_cndmask_b32_e64 v3, 0, 1, s[4:5]
|
|
; GFX9-O0-NEXT: s_mov_b32 s4, 1
|
|
; GFX9-O0-NEXT: v_lshlrev_b32_e64 v3, s4, v3
|
|
; GFX9-O0-NEXT: s_mov_b32 s4, 2
|
|
; GFX9-O0-NEXT: v_and_b32_e64 v3, v3, s4
|
|
; GFX9-O0-NEXT: s_mov_b32 s6, s1
|
|
; GFX9-O0-NEXT: ; kill: def $sgpr0 killed $sgpr0 killed $sgpr0_sgpr1
|
|
; GFX9-O0-NEXT: s_mov_b32 s4, s3
|
|
; GFX9-O0-NEXT: s_mov_b32 s5, s2
|
|
; GFX9-O0-NEXT: ; kill: def $sgpr0 killed $sgpr0 def $sgpr0_sgpr1_sgpr2_sgpr3
|
|
; GFX9-O0-NEXT: s_mov_b32 s1, s6
|
|
; GFX9-O0-NEXT: s_mov_b32 s2, s5
|
|
; GFX9-O0-NEXT: s_mov_b32 s3, s4
|
|
; GFX9-O0-NEXT: s_mov_b32 s4, 0
|
|
; GFX9-O0-NEXT: buffer_store_dword v3, off, s[0:3], s4 offset:4
|
|
; GFX9-O0-NEXT: ; kill: killed $vgpr0
|
|
; GFX9-O0-NEXT: s_endpgm
|
|
;
|
|
; GFX9-O3-LABEL: cfg:
|
|
; GFX9-O3: ; %bb.0: ; %entry
|
|
; GFX9-O3-NEXT: buffer_load_dwordx2 v[3:4], off, s[0:3], 0
|
|
; GFX9-O3-NEXT: v_mov_b32_e32 v5, 0
|
|
; GFX9-O3-NEXT: s_or_saveexec_b64 s[4:5], -1
|
|
; GFX9-O3-NEXT: v_mov_b32_e32 v1, 0
|
|
; GFX9-O3-NEXT: s_mov_b64 exec, s[4:5]
|
|
; GFX9-O3-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-O3-NEXT: v_mov_b32_e32 v2, v3
|
|
; GFX9-O3-NEXT: s_not_b64 exec, exec
|
|
; GFX9-O3-NEXT: v_mov_b32_e32 v2, 0
|
|
; GFX9-O3-NEXT: s_not_b64 exec, exec
|
|
; GFX9-O3-NEXT: s_or_saveexec_b64 s[4:5], -1
|
|
; GFX9-O3-NEXT: v_mov_b32_dpp v1, v2 row_bcast:31 row_mask:0xc bank_mask:0xf
|
|
; GFX9-O3-NEXT: v_add_u32_e32 v1, v2, v1
|
|
; GFX9-O3-NEXT: s_mov_b64 exec, s[4:5]
|
|
; GFX9-O3-NEXT: v_mov_b32_e32 v3, v1
|
|
; GFX9-O3-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
|
|
; GFX9-O3-NEXT: s_and_saveexec_b64 s[4:5], vcc
|
|
; GFX9-O3-NEXT: s_cbranch_execz .LBB1_2
|
|
; GFX9-O3-NEXT: ; %bb.1: ; %if
|
|
; GFX9-O3-NEXT: s_or_saveexec_b64 s[6:7], -1
|
|
; GFX9-O3-NEXT: v_mov_b32_e32 v1, 0
|
|
; GFX9-O3-NEXT: s_mov_b64 exec, s[6:7]
|
|
; GFX9-O3-NEXT: v_mov_b32_e32 v2, v4
|
|
; GFX9-O3-NEXT: s_not_b64 exec, exec
|
|
; GFX9-O3-NEXT: v_mov_b32_e32 v2, 0
|
|
; GFX9-O3-NEXT: s_not_b64 exec, exec
|
|
; GFX9-O3-NEXT: s_or_saveexec_b64 s[6:7], -1
|
|
; GFX9-O3-NEXT: v_mov_b32_dpp v1, v2 row_bcast:31 row_mask:0xc bank_mask:0xf
|
|
; GFX9-O3-NEXT: v_add_u32_e32 v1, v2, v1
|
|
; GFX9-O3-NEXT: s_mov_b64 exec, s[6:7]
|
|
; GFX9-O3-NEXT: v_mov_b32_e32 v5, v1
|
|
; GFX9-O3-NEXT: .LBB1_2: ; %merge
|
|
; GFX9-O3-NEXT: s_or_b64 exec, exec, s[4:5]
|
|
; GFX9-O3-NEXT: v_cmp_eq_u32_e32 vcc, v3, v5
|
|
; GFX9-O3-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc
|
|
; GFX9-O3-NEXT: v_lshlrev_b32_e32 v0, 1, v0
|
|
; GFX9-O3-NEXT: v_and_b32_e32 v0, 2, v0
|
|
; GFX9-O3-NEXT: buffer_store_dword v0, off, s[0:3], 0 offset:4
|
|
; GFX9-O3-NEXT: s_endpgm
|
|
entry:
|
|
%tmp100 = call <2 x float> @llvm.amdgcn.raw.ptr.buffer.load.v2f32(ptr addrspace(8) %tmp14, i32 0, i32 0, i32 0)
|
|
%tmp101 = bitcast <2 x float> %tmp100 to <2 x i32>
|
|
%tmp102 = extractelement <2 x i32> %tmp101, i32 0
|
|
%tmp105 = tail call i32 @llvm.amdgcn.set.inactive.i32(i32 %tmp102, i32 0)
|
|
|
|
%tmp120 = tail call i32 @llvm.amdgcn.update.dpp.i32(i32 0, i32 %tmp105, i32 323, i32 12, i32 15, i1 false)
|
|
%tmp121 = add i32 %tmp105, %tmp120
|
|
%tmp122 = tail call i32 @llvm.amdgcn.wwm.i32(i32 %tmp121)
|
|
|
|
%cond = icmp eq i32 %arg, 0
|
|
br i1 %cond, label %if, label %merge
|
|
if:
|
|
%tmp103 = extractelement <2 x i32> %tmp101, i32 1
|
|
%tmp107 = tail call i32 @llvm.amdgcn.set.inactive.i32(i32 %tmp103, i32 0)
|
|
|
|
%tmp135 = tail call i32 @llvm.amdgcn.update.dpp.i32(i32 0, i32 %tmp107, i32 323, i32 12, i32 15, i1 false)
|
|
%tmp136 = add i32 %tmp107, %tmp135
|
|
%tmp137 = tail call i32 @llvm.amdgcn.wwm.i32(i32 %tmp136)
|
|
br label %merge
|
|
|
|
merge:
|
|
%merge_value = phi i32 [ 0, %entry ], [%tmp137, %if ]
|
|
%tmp138 = icmp eq i32 %tmp122, %merge_value
|
|
%tmp139 = sext i1 %tmp138 to i32
|
|
%tmp140 = shl nsw i32 %tmp139, 1
|
|
%tmp141 = and i32 %tmp140, 2
|
|
%tmp145 = bitcast i32 %tmp141 to float
|
|
call void @llvm.amdgcn.raw.ptr.buffer.store.f32(float %tmp145, ptr addrspace(8) %tmp14, i32 4, i32 0, i32 0)
|
|
ret void
|
|
}
|
|
|
|
define hidden i32 @called(i32 %a) noinline {
|
|
; GFX9-O0-LABEL: called:
|
|
; GFX9-O0: ; %bb.0:
|
|
; GFX9-O0-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-O0-NEXT: v_add_u32_e64 v1, v0, v0
|
|
; GFX9-O0-NEXT: v_mul_lo_u32 v0, v1, v0
|
|
; GFX9-O0-NEXT: v_sub_u32_e64 v0, v0, v1
|
|
; GFX9-O0-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX9-O3-LABEL: called:
|
|
; GFX9-O3: ; %bb.0:
|
|
; GFX9-O3-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-O3-NEXT: v_add_u32_e32 v1, v0, v0
|
|
; GFX9-O3-NEXT: v_mul_lo_u32 v0, v1, v0
|
|
; GFX9-O3-NEXT: v_sub_u32_e32 v0, v0, v1
|
|
; GFX9-O3-NEXT: s_setpc_b64 s[30:31]
|
|
%add = add i32 %a, %a
|
|
%mul = mul i32 %add, %a
|
|
%sub = sub i32 %mul, %add
|
|
ret i32 %sub
|
|
}
|
|
|
|
define amdgpu_kernel void @call(ptr addrspace(8) inreg %tmp14, i32 inreg %arg) {
|
|
; GFX9-O0-LABEL: call:
|
|
; GFX9-O0: ; %bb.0:
|
|
; GFX9-O0-NEXT: s_mov_b32 s32, 0x400
|
|
; GFX9-O0-NEXT: s_mov_b32 s24, SCRATCH_RSRC_DWORD0
|
|
; GFX9-O0-NEXT: s_mov_b32 s25, SCRATCH_RSRC_DWORD1
|
|
; GFX9-O0-NEXT: s_mov_b32 s26, -1
|
|
; GFX9-O0-NEXT: s_mov_b32 s27, 0xe00000
|
|
; GFX9-O0-NEXT: s_add_u32 s24, s24, s9
|
|
; GFX9-O0-NEXT: s_addc_u32 s25, s25, 0
|
|
; GFX9-O0-NEXT: ; implicit-def: $vgpr7 : SGPR spill to VGPR lane
|
|
; GFX9-O0-NEXT: s_or_saveexec_b64 s[10:11], -1
|
|
; GFX9-O0-NEXT: v_writelane_b32 v7, s10, 0
|
|
; GFX9-O0-NEXT: v_writelane_b32 v7, s11, 1
|
|
; GFX9-O0-NEXT: s_mov_b32 s14, s8
|
|
; GFX9-O0-NEXT: s_mov_b32 s13, s7
|
|
; GFX9-O0-NEXT: s_mov_b32 s12, s6
|
|
; GFX9-O0-NEXT: s_mov_b64 s[10:11], s[4:5]
|
|
; GFX9-O0-NEXT: s_mov_b64 s[4:5], s[2:3]
|
|
; GFX9-O0-NEXT: v_readlane_b32 s2, v7, 0
|
|
; GFX9-O0-NEXT: v_readlane_b32 s3, v7, 1
|
|
; GFX9-O0-NEXT: v_writelane_b32 v7, s4, 2
|
|
; GFX9-O0-NEXT: v_writelane_b32 v7, s5, 3
|
|
; GFX9-O0-NEXT: s_or_saveexec_b64 s[20:21], -1
|
|
; GFX9-O0-NEXT: buffer_store_dword v7, off, s[24:27], 0 ; 4-byte Folded Spill
|
|
; GFX9-O0-NEXT: s_mov_b64 exec, s[20:21]
|
|
; GFX9-O0-NEXT: s_mov_b64 s[4:5], s[0:1]
|
|
; GFX9-O0-NEXT: v_readlane_b32 s0, v7, 2
|
|
; GFX9-O0-NEXT: v_readlane_b32 s1, v7, 3
|
|
; GFX9-O0-NEXT: v_mov_b32_e32 v3, v2
|
|
; GFX9-O0-NEXT: v_mov_b32_e32 v4, v1
|
|
; GFX9-O0-NEXT: v_mov_b32_e32 v5, v0
|
|
; GFX9-O0-NEXT: s_or_saveexec_b64 s[20:21], -1
|
|
; GFX9-O0-NEXT: buffer_load_dword v0, off, s[24:27], 0 ; 4-byte Folded Reload
|
|
; GFX9-O0-NEXT: s_mov_b64 exec, s[20:21]
|
|
; GFX9-O0-NEXT: s_mov_b64 exec, s[2:3]
|
|
; GFX9-O0-NEXT: s_load_dwordx2 s[8:9], s[0:1], 0x24
|
|
; GFX9-O0-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x2c
|
|
; GFX9-O0-NEXT: s_load_dword s2, s[0:1], 0x34
|
|
; GFX9-O0-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX9-O0-NEXT: s_mov_b32 s3, s7
|
|
; GFX9-O0-NEXT: ; kill: def $sgpr6 killed $sgpr6 killed $sgpr6_sgpr7
|
|
; GFX9-O0-NEXT: s_mov_b32 s7, s9
|
|
; GFX9-O0-NEXT: s_mov_b32 s16, s8
|
|
; GFX9-O0-NEXT: ; kill: def $sgpr16 killed $sgpr16 def $sgpr16_sgpr17_sgpr18_sgpr19
|
|
; GFX9-O0-NEXT: s_mov_b32 s17, s7
|
|
; GFX9-O0-NEXT: s_mov_b32 s18, s6
|
|
; GFX9-O0-NEXT: s_mov_b32 s19, s3
|
|
; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-O0-NEXT: v_writelane_b32 v0, s16, 4
|
|
; GFX9-O0-NEXT: v_writelane_b32 v0, s17, 5
|
|
; GFX9-O0-NEXT: v_writelane_b32 v0, s18, 6
|
|
; GFX9-O0-NEXT: v_writelane_b32 v0, s19, 7
|
|
; GFX9-O0-NEXT: s_mov_b32 s3, 0
|
|
; GFX9-O0-NEXT: v_writelane_b32 v0, s3, 8
|
|
; GFX9-O0-NEXT: v_mov_b32_e32 v6, s2
|
|
; GFX9-O0-NEXT: s_not_b64 exec, exec
|
|
; GFX9-O0-NEXT: v_mov_b32_e32 v6, s3
|
|
; GFX9-O0-NEXT: s_not_b64 exec, exec
|
|
; GFX9-O0-NEXT: s_or_saveexec_b64 s[2:3], -1
|
|
; GFX9-O0-NEXT: v_writelane_b32 v0, s2, 9
|
|
; GFX9-O0-NEXT: v_writelane_b32 v0, s3, 10
|
|
; GFX9-O0-NEXT: s_or_saveexec_b64 s[20:21], -1
|
|
; GFX9-O0-NEXT: buffer_store_dword v0, off, s[24:27], 0 ; 4-byte Folded Spill
|
|
; GFX9-O0-NEXT: s_mov_b64 exec, s[20:21]
|
|
; GFX9-O0-NEXT: s_mov_b64 s[6:7], 56
|
|
; GFX9-O0-NEXT: s_mov_b32 s2, s0
|
|
; GFX9-O0-NEXT: s_mov_b32 s0, s1
|
|
; GFX9-O0-NEXT: s_mov_b32 s3, s6
|
|
; GFX9-O0-NEXT: s_mov_b32 s1, s7
|
|
; GFX9-O0-NEXT: s_add_u32 s8, s2, s3
|
|
; GFX9-O0-NEXT: s_addc_u32 s0, s0, s1
|
|
; GFX9-O0-NEXT: ; kill: def $sgpr8 killed $sgpr8 def $sgpr8_sgpr9
|
|
; GFX9-O0-NEXT: s_mov_b32 s9, s0
|
|
; GFX9-O0-NEXT: s_getpc_b64 s[16:17]
|
|
; GFX9-O0-NEXT: s_add_u32 s16, s16, called@rel32@lo+4
|
|
; GFX9-O0-NEXT: s_addc_u32 s17, s17, called@rel32@hi+12
|
|
; GFX9-O0-NEXT: s_mov_b64 s[0:1], s[24:25]
|
|
; GFX9-O0-NEXT: s_mov_b64 s[2:3], s[26:27]
|
|
; GFX9-O0-NEXT: s_mov_b32 s6, 20
|
|
; GFX9-O0-NEXT: v_lshlrev_b32_e64 v3, s6, v3
|
|
; GFX9-O0-NEXT: s_mov_b32 s6, 10
|
|
; GFX9-O0-NEXT: v_lshlrev_b32_e64 v4, s6, v4
|
|
; GFX9-O0-NEXT: v_or3_b32 v3, v5, v4, v3
|
|
; GFX9-O0-NEXT: ; implicit-def: $sgpr6_sgpr7
|
|
; GFX9-O0-NEXT: ; implicit-def: $sgpr15
|
|
; GFX9-O0-NEXT: v_mov_b32_e32 v31, v3
|
|
; GFX9-O0-NEXT: v_mov_b32_e32 v0, v6
|
|
; GFX9-O0-NEXT: s_swappc_b64 s[30:31], s[16:17]
|
|
; GFX9-O0-NEXT: s_or_saveexec_b64 s[20:21], -1
|
|
; GFX9-O0-NEXT: buffer_load_dword v1, off, s[24:27], 0 ; 4-byte Folded Reload
|
|
; GFX9-O0-NEXT: s_mov_b64 exec, s[20:21]
|
|
; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-O0-NEXT: v_readlane_b32 s0, v1, 4
|
|
; GFX9-O0-NEXT: v_readlane_b32 s1, v1, 5
|
|
; GFX9-O0-NEXT: v_readlane_b32 s2, v1, 6
|
|
; GFX9-O0-NEXT: v_readlane_b32 s3, v1, 7
|
|
; GFX9-O0-NEXT: v_readlane_b32 s6, v1, 9
|
|
; GFX9-O0-NEXT: v_readlane_b32 s7, v1, 10
|
|
; GFX9-O0-NEXT: v_readlane_b32 s4, v1, 8
|
|
; GFX9-O0-NEXT: v_mov_b32_e32 v3, v0
|
|
; GFX9-O0-NEXT: s_or_saveexec_b64 s[20:21], -1
|
|
; GFX9-O0-NEXT: buffer_load_dword v0, off, s[24:27], 0 ; 4-byte Folded Reload
|
|
; GFX9-O0-NEXT: s_mov_b64 exec, s[20:21]
|
|
; GFX9-O0-NEXT: v_add_u32_e64 v3, v3, v6
|
|
; GFX9-O0-NEXT: s_mov_b64 exec, s[6:7]
|
|
; GFX9-O0-NEXT: v_mov_b32_e32 v1, v3
|
|
; GFX9-O0-NEXT: buffer_store_dword v1, off, s[0:3], s4 offset:4
|
|
; GFX9-O0-NEXT: ; kill: killed $vgpr0
|
|
; GFX9-O0-NEXT: s_endpgm
|
|
;
|
|
; GFX9-O3-LABEL: call:
|
|
; GFX9-O3: ; %bb.0:
|
|
; GFX9-O3-NEXT: s_mov_b32 s12, SCRATCH_RSRC_DWORD0
|
|
; GFX9-O3-NEXT: s_mov_b32 s13, SCRATCH_RSRC_DWORD1
|
|
; GFX9-O3-NEXT: s_mov_b32 s14, -1
|
|
; GFX9-O3-NEXT: s_load_dword s2, s[0:1], 0x34
|
|
; GFX9-O3-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
|
|
; GFX9-O3-NEXT: s_mov_b32 s15, 0xe00000
|
|
; GFX9-O3-NEXT: s_add_u32 s12, s12, s3
|
|
; GFX9-O3-NEXT: s_addc_u32 s13, s13, 0
|
|
; GFX9-O3-NEXT: s_mov_b32 s32, 0
|
|
; GFX9-O3-NEXT: s_getpc_b64 s[8:9]
|
|
; GFX9-O3-NEXT: s_add_u32 s8, s8, called@rel32@lo+4
|
|
; GFX9-O3-NEXT: s_addc_u32 s9, s9, called@rel32@hi+12
|
|
; GFX9-O3-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX9-O3-NEXT: v_mov_b32_e32 v2, s2
|
|
; GFX9-O3-NEXT: s_not_b64 exec, exec
|
|
; GFX9-O3-NEXT: v_mov_b32_e32 v2, 0
|
|
; GFX9-O3-NEXT: s_not_b64 exec, exec
|
|
; GFX9-O3-NEXT: s_or_saveexec_b64 s[10:11], -1
|
|
; GFX9-O3-NEXT: s_mov_b64 s[0:1], s[12:13]
|
|
; GFX9-O3-NEXT: s_mov_b64 s[2:3], s[14:15]
|
|
; GFX9-O3-NEXT: v_mov_b32_e32 v0, v2
|
|
; GFX9-O3-NEXT: s_swappc_b64 s[30:31], s[8:9]
|
|
; GFX9-O3-NEXT: v_mov_b32_e32 v1, v0
|
|
; GFX9-O3-NEXT: v_add_u32_e32 v1, v1, v2
|
|
; GFX9-O3-NEXT: s_mov_b64 exec, s[10:11]
|
|
; GFX9-O3-NEXT: v_mov_b32_e32 v0, v1
|
|
; GFX9-O3-NEXT: buffer_store_dword v0, off, s[4:7], 0 offset:4
|
|
; GFX9-O3-NEXT: s_endpgm
|
|
|
|
|
|
%tmp107 = tail call i32 @llvm.amdgcn.set.inactive.i32(i32 %arg, i32 0)
|
|
%tmp134 = call i32 @called(i32 %tmp107)
|
|
%tmp136 = add i32 %tmp134, %tmp107
|
|
%tmp137 = tail call i32 @llvm.amdgcn.wwm.i32(i32 %tmp136)
|
|
call void @llvm.amdgcn.raw.ptr.buffer.store.i32(i32 %tmp137, ptr addrspace(8) %tmp14, i32 4, i32 0, i32 0)
|
|
ret void
|
|
}
|
|
|
|
define i64 @called_i64(i64 %a) noinline {
|
|
; GFX9-O0-LABEL: called_i64:
|
|
; GFX9-O0: ; %bb.0:
|
|
; GFX9-O0-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-O0-NEXT: v_mov_b32_e32 v2, v0
|
|
; GFX9-O0-NEXT: ; implicit-def: $sgpr4
|
|
; GFX9-O0-NEXT: ; implicit-def: $sgpr4
|
|
; GFX9-O0-NEXT: ; kill: def $vgpr2 killed $vgpr2 def $vgpr2_vgpr3 killed $exec
|
|
; GFX9-O0-NEXT: v_mov_b32_e32 v3, v1
|
|
; GFX9-O0-NEXT: ; implicit-def: $sgpr4_sgpr5
|
|
; GFX9-O0-NEXT: v_mov_b32_e32 v4, v2
|
|
; GFX9-O0-NEXT: v_mov_b32_e32 v5, v2
|
|
; GFX9-O0-NEXT: v_mov_b32_e32 v0, v3
|
|
; GFX9-O0-NEXT: v_mov_b32_e32 v1, v3
|
|
; GFX9-O0-NEXT: v_add_co_u32_e64 v4, s[4:5], v4, v5
|
|
; GFX9-O0-NEXT: v_addc_co_u32_e64 v0, s[4:5], v0, v1, s[4:5]
|
|
; GFX9-O0-NEXT: ; kill: def $vgpr4 killed $vgpr4 def $vgpr4_vgpr5 killed $exec
|
|
; GFX9-O0-NEXT: v_mov_b32_e32 v5, v0
|
|
; GFX9-O0-NEXT: s_mov_b32 s4, 32
|
|
; GFX9-O0-NEXT: v_mov_b32_e32 v0, v2
|
|
; GFX9-O0-NEXT: v_mov_b32_e32 v1, v3
|
|
; GFX9-O0-NEXT: v_lshrrev_b64 v[0:1], s4, v[0:1]
|
|
; GFX9-O0-NEXT: v_mov_b32_e32 v1, v0
|
|
; GFX9-O0-NEXT: v_mov_b32_e32 v0, v4
|
|
; GFX9-O0-NEXT: v_mul_lo_u32 v1, v0, v1
|
|
; GFX9-O0-NEXT: v_mov_b32_e32 v3, v2
|
|
; GFX9-O0-NEXT: v_lshrrev_b64 v[6:7], s4, v[4:5]
|
|
; GFX9-O0-NEXT: v_mov_b32_e32 v2, v6
|
|
; GFX9-O0-NEXT: v_mul_lo_u32 v2, v2, v3
|
|
; GFX9-O0-NEXT: v_mad_u64_u32 v[6:7], s[6:7], v0, v3, 0
|
|
; GFX9-O0-NEXT: v_mov_b32_e32 v0, v7
|
|
; GFX9-O0-NEXT: v_add3_u32 v0, v0, v1, v2
|
|
; GFX9-O0-NEXT: ; implicit-def: $sgpr5
|
|
; GFX9-O0-NEXT: ; implicit-def: $sgpr6
|
|
; GFX9-O0-NEXT: ; implicit-def: $sgpr6
|
|
; GFX9-O0-NEXT: v_mov_b32_e32 v2, s5
|
|
; GFX9-O0-NEXT: ; kill: def $vgpr0 killed $vgpr0 def $vgpr0_vgpr1 killed $exec
|
|
; GFX9-O0-NEXT: v_mov_b32_e32 v1, v2
|
|
; GFX9-O0-NEXT: v_lshlrev_b64 v[1:2], s4, v[0:1]
|
|
; GFX9-O0-NEXT: v_mov_b32_e32 v3, v2
|
|
; GFX9-O0-NEXT: ; kill: def $vgpr6 killed $vgpr6 killed $vgpr6_vgpr7 killed $exec
|
|
; GFX9-O0-NEXT: s_mov_b32 s5, 0
|
|
; GFX9-O0-NEXT: ; implicit-def: $sgpr5
|
|
; GFX9-O0-NEXT: v_mov_b32_e32 v0, 0
|
|
; GFX9-O0-NEXT: ; kill: def $vgpr6 killed $vgpr6 def $vgpr6_vgpr7 killed $exec
|
|
; GFX9-O0-NEXT: v_mov_b32_e32 v7, v0
|
|
; GFX9-O0-NEXT: v_mov_b32_e32 v0, v7
|
|
; GFX9-O0-NEXT: v_or_b32_e64 v0, v0, v3
|
|
; GFX9-O0-NEXT: v_mov_b32_e32 v2, v1
|
|
; GFX9-O0-NEXT: v_mov_b32_e32 v1, v6
|
|
; GFX9-O0-NEXT: v_or_b32_e64 v6, v1, v2
|
|
; GFX9-O0-NEXT: ; kill: def $vgpr6 killed $vgpr6 def $vgpr6_vgpr7 killed $exec
|
|
; GFX9-O0-NEXT: v_mov_b32_e32 v7, v0
|
|
; GFX9-O0-NEXT: v_mov_b32_e32 v1, v6
|
|
; GFX9-O0-NEXT: v_mov_b32_e32 v3, v4
|
|
; GFX9-O0-NEXT: v_mov_b32_e32 v0, v7
|
|
; GFX9-O0-NEXT: v_mov_b32_e32 v2, v5
|
|
; GFX9-O0-NEXT: v_sub_co_u32_e64 v1, s[6:7], v1, v3
|
|
; GFX9-O0-NEXT: v_subb_co_u32_e64 v0, s[6:7], v0, v2, s[6:7]
|
|
; GFX9-O0-NEXT: ; kill: def $vgpr1 killed $vgpr1 def $vgpr1_vgpr2 killed $exec
|
|
; GFX9-O0-NEXT: v_mov_b32_e32 v2, v0
|
|
; GFX9-O0-NEXT: v_mov_b32_e32 v0, v1
|
|
; GFX9-O0-NEXT: v_lshrrev_b64 v[1:2], s4, v[1:2]
|
|
; GFX9-O0-NEXT: ; kill: def $vgpr1 killed $vgpr1 killed $vgpr1_vgpr2 killed $exec
|
|
; GFX9-O0-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX9-O3-LABEL: called_i64:
|
|
; GFX9-O3: ; %bb.0:
|
|
; GFX9-O3-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-O3-NEXT: v_add_co_u32_e32 v2, vcc, v0, v0
|
|
; GFX9-O3-NEXT: v_addc_co_u32_e32 v3, vcc, v1, v1, vcc
|
|
; GFX9-O3-NEXT: v_mul_lo_u32 v4, v3, v0
|
|
; GFX9-O3-NEXT: v_mul_lo_u32 v5, v2, v1
|
|
; GFX9-O3-NEXT: v_mad_u64_u32 v[0:1], s[4:5], v2, v0, 0
|
|
; GFX9-O3-NEXT: v_add3_u32 v1, v1, v5, v4
|
|
; GFX9-O3-NEXT: v_sub_co_u32_e32 v0, vcc, v0, v2
|
|
; GFX9-O3-NEXT: v_subb_co_u32_e32 v1, vcc, v1, v3, vcc
|
|
; GFX9-O3-NEXT: s_setpc_b64 s[30:31]
|
|
%add = add i64 %a, %a
|
|
%mul = mul i64 %add, %a
|
|
%sub = sub i64 %mul, %add
|
|
ret i64 %sub
|
|
}
|
|
|
|
define amdgpu_kernel void @call_i64(ptr addrspace(8) inreg %tmp14, i64 inreg %arg) {
|
|
; GFX9-O0-LABEL: call_i64:
|
|
; GFX9-O0: ; %bb.0:
|
|
; GFX9-O0-NEXT: s_mov_b32 s32, 0x400
|
|
; GFX9-O0-NEXT: s_mov_b32 s24, SCRATCH_RSRC_DWORD0
|
|
; GFX9-O0-NEXT: s_mov_b32 s25, SCRATCH_RSRC_DWORD1
|
|
; GFX9-O0-NEXT: s_mov_b32 s26, -1
|
|
; GFX9-O0-NEXT: s_mov_b32 s27, 0xe00000
|
|
; GFX9-O0-NEXT: s_add_u32 s24, s24, s9
|
|
; GFX9-O0-NEXT: s_addc_u32 s25, s25, 0
|
|
; GFX9-O0-NEXT: ; implicit-def: $vgpr12 : SGPR spill to VGPR lane
|
|
; GFX9-O0-NEXT: s_or_saveexec_b64 s[10:11], -1
|
|
; GFX9-O0-NEXT: v_writelane_b32 v12, s10, 0
|
|
; GFX9-O0-NEXT: v_writelane_b32 v12, s11, 1
|
|
; GFX9-O0-NEXT: s_mov_b32 s14, s8
|
|
; GFX9-O0-NEXT: s_mov_b32 s13, s7
|
|
; GFX9-O0-NEXT: s_mov_b32 s12, s6
|
|
; GFX9-O0-NEXT: s_mov_b64 s[10:11], s[4:5]
|
|
; GFX9-O0-NEXT: s_mov_b64 s[4:5], s[2:3]
|
|
; GFX9-O0-NEXT: v_readlane_b32 s2, v12, 0
|
|
; GFX9-O0-NEXT: v_readlane_b32 s3, v12, 1
|
|
; GFX9-O0-NEXT: v_writelane_b32 v12, s4, 2
|
|
; GFX9-O0-NEXT: v_writelane_b32 v12, s5, 3
|
|
; GFX9-O0-NEXT: s_or_saveexec_b64 s[20:21], -1
|
|
; GFX9-O0-NEXT: buffer_store_dword v12, off, s[24:27], 0 ; 4-byte Folded Spill
|
|
; GFX9-O0-NEXT: s_mov_b64 exec, s[20:21]
|
|
; GFX9-O0-NEXT: s_mov_b64 s[4:5], s[0:1]
|
|
; GFX9-O0-NEXT: v_readlane_b32 s0, v12, 2
|
|
; GFX9-O0-NEXT: v_readlane_b32 s1, v12, 3
|
|
; GFX9-O0-NEXT: v_mov_b32_e32 v3, v2
|
|
; GFX9-O0-NEXT: v_mov_b32_e32 v4, v1
|
|
; GFX9-O0-NEXT: v_mov_b32_e32 v5, v0
|
|
; GFX9-O0-NEXT: s_or_saveexec_b64 s[20:21], -1
|
|
; GFX9-O0-NEXT: buffer_load_dword v0, off, s[24:27], 0 ; 4-byte Folded Reload
|
|
; GFX9-O0-NEXT: s_mov_b64 exec, s[20:21]
|
|
; GFX9-O0-NEXT: s_mov_b64 exec, s[2:3]
|
|
; GFX9-O0-NEXT: s_load_dwordx2 s[16:17], s[0:1], 0x24
|
|
; GFX9-O0-NEXT: s_load_dwordx2 s[8:9], s[0:1], 0x2c
|
|
; GFX9-O0-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x34
|
|
; GFX9-O0-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX9-O0-NEXT: s_mov_b32 s6, s9
|
|
; GFX9-O0-NEXT: s_mov_b32 s7, s8
|
|
; GFX9-O0-NEXT: s_mov_b32 s8, s17
|
|
; GFX9-O0-NEXT: ; kill: def $sgpr16 killed $sgpr16 killed $sgpr16_sgpr17
|
|
; GFX9-O0-NEXT: ; kill: def $sgpr16 killed $sgpr16 def $sgpr16_sgpr17_sgpr18_sgpr19
|
|
; GFX9-O0-NEXT: s_mov_b32 s17, s8
|
|
; GFX9-O0-NEXT: s_mov_b32 s18, s7
|
|
; GFX9-O0-NEXT: s_mov_b32 s19, s6
|
|
; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-O0-NEXT: v_writelane_b32 v0, s16, 4
|
|
; GFX9-O0-NEXT: v_writelane_b32 v0, s17, 5
|
|
; GFX9-O0-NEXT: v_writelane_b32 v0, s18, 6
|
|
; GFX9-O0-NEXT: v_writelane_b32 v0, s19, 7
|
|
; GFX9-O0-NEXT: s_mov_b64 s[6:7], 0
|
|
; GFX9-O0-NEXT: v_mov_b32_e32 v8, s2
|
|
; GFX9-O0-NEXT: v_mov_b32_e32 v9, s3
|
|
; GFX9-O0-NEXT: s_not_b64 exec, exec
|
|
; GFX9-O0-NEXT: v_mov_b32_e32 v8, s6
|
|
; GFX9-O0-NEXT: v_mov_b32_e32 v9, s7
|
|
; GFX9-O0-NEXT: s_not_b64 exec, exec
|
|
; GFX9-O0-NEXT: s_or_saveexec_b64 s[2:3], -1
|
|
; GFX9-O0-NEXT: v_writelane_b32 v0, s2, 8
|
|
; GFX9-O0-NEXT: v_writelane_b32 v0, s3, 9
|
|
; GFX9-O0-NEXT: s_or_saveexec_b64 s[20:21], -1
|
|
; GFX9-O0-NEXT: buffer_store_dword v0, off, s[24:27], 0 ; 4-byte Folded Spill
|
|
; GFX9-O0-NEXT: s_mov_b64 exec, s[20:21]
|
|
; GFX9-O0-NEXT: s_mov_b64 s[6:7], 60
|
|
; GFX9-O0-NEXT: s_mov_b32 s2, s0
|
|
; GFX9-O0-NEXT: s_mov_b32 s0, s1
|
|
; GFX9-O0-NEXT: s_mov_b32 s3, s6
|
|
; GFX9-O0-NEXT: s_mov_b32 s1, s7
|
|
; GFX9-O0-NEXT: s_add_u32 s8, s2, s3
|
|
; GFX9-O0-NEXT: s_addc_u32 s0, s0, s1
|
|
; GFX9-O0-NEXT: ; kill: def $sgpr8 killed $sgpr8 def $sgpr8_sgpr9
|
|
; GFX9-O0-NEXT: s_mov_b32 s9, s0
|
|
; GFX9-O0-NEXT: v_mov_b32_e32 v6, v8
|
|
; GFX9-O0-NEXT: s_mov_b32 s0, 32
|
|
; GFX9-O0-NEXT: ; implicit-def: $sgpr2_sgpr3
|
|
; GFX9-O0-NEXT: v_lshrrev_b64 v[10:11], s0, v[8:9]
|
|
; GFX9-O0-NEXT: v_mov_b32_e32 v7, v10
|
|
; GFX9-O0-NEXT: s_getpc_b64 s[0:1]
|
|
; GFX9-O0-NEXT: s_add_u32 s0, s0, called_i64@gotpcrel32@lo+4
|
|
; GFX9-O0-NEXT: s_addc_u32 s1, s1, called_i64@gotpcrel32@hi+12
|
|
; GFX9-O0-NEXT: s_load_dwordx2 s[16:17], s[0:1], 0x0
|
|
; GFX9-O0-NEXT: s_mov_b64 s[0:1], s[24:25]
|
|
; GFX9-O0-NEXT: s_mov_b64 s[2:3], s[26:27]
|
|
; GFX9-O0-NEXT: s_mov_b32 s6, 20
|
|
; GFX9-O0-NEXT: v_lshlrev_b32_e64 v3, s6, v3
|
|
; GFX9-O0-NEXT: s_mov_b32 s6, 10
|
|
; GFX9-O0-NEXT: v_lshlrev_b32_e64 v4, s6, v4
|
|
; GFX9-O0-NEXT: v_or3_b32 v3, v5, v4, v3
|
|
; GFX9-O0-NEXT: ; implicit-def: $sgpr6_sgpr7
|
|
; GFX9-O0-NEXT: ; implicit-def: $sgpr15
|
|
; GFX9-O0-NEXT: v_mov_b32_e32 v31, v3
|
|
; GFX9-O0-NEXT: v_mov_b32_e32 v0, v6
|
|
; GFX9-O0-NEXT: v_mov_b32_e32 v1, v7
|
|
; GFX9-O0-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX9-O0-NEXT: s_swappc_b64 s[30:31], s[16:17]
|
|
; GFX9-O0-NEXT: s_or_saveexec_b64 s[20:21], -1
|
|
; GFX9-O0-NEXT: buffer_load_dword v2, off, s[24:27], 0 ; 4-byte Folded Reload
|
|
; GFX9-O0-NEXT: s_mov_b64 exec, s[20:21]
|
|
; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-O0-NEXT: v_readlane_b32 s0, v2, 4
|
|
; GFX9-O0-NEXT: v_readlane_b32 s1, v2, 5
|
|
; GFX9-O0-NEXT: v_readlane_b32 s2, v2, 6
|
|
; GFX9-O0-NEXT: v_readlane_b32 s3, v2, 7
|
|
; GFX9-O0-NEXT: v_readlane_b32 s4, v2, 8
|
|
; GFX9-O0-NEXT: v_readlane_b32 s5, v2, 9
|
|
; GFX9-O0-NEXT: v_mov_b32_e32 v3, v0
|
|
; GFX9-O0-NEXT: s_or_saveexec_b64 s[20:21], -1
|
|
; GFX9-O0-NEXT: buffer_load_dword v0, off, s[24:27], 0 ; 4-byte Folded Reload
|
|
; GFX9-O0-NEXT: s_mov_b64 exec, s[20:21]
|
|
; GFX9-O0-NEXT: v_mov_b32_e32 v4, v1
|
|
; GFX9-O0-NEXT: ; implicit-def: $sgpr6
|
|
; GFX9-O0-NEXT: ; implicit-def: $sgpr6
|
|
; GFX9-O0-NEXT: v_mov_b32_e32 v5, v8
|
|
; GFX9-O0-NEXT: v_mov_b32_e32 v6, v9
|
|
; GFX9-O0-NEXT: v_add_co_u32_e64 v3, s[6:7], v3, v5
|
|
; GFX9-O0-NEXT: v_addc_co_u32_e64 v4, s[6:7], v4, v6, s[6:7]
|
|
; GFX9-O0-NEXT: s_mov_b64 exec, s[4:5]
|
|
; GFX9-O0-NEXT: v_mov_b32_e32 v1, v3
|
|
; GFX9-O0-NEXT: v_mov_b32_e32 v2, v4
|
|
; GFX9-O0-NEXT: s_mov_b32 s4, 0
|
|
; GFX9-O0-NEXT: buffer_store_dwordx2 v[1:2], off, s[0:3], s4 offset:4
|
|
; GFX9-O0-NEXT: ; kill: killed $vgpr0
|
|
; GFX9-O0-NEXT: s_endpgm
|
|
;
|
|
; GFX9-O3-LABEL: call_i64:
|
|
; GFX9-O3: ; %bb.0:
|
|
; GFX9-O3-NEXT: s_mov_b32 s12, SCRATCH_RSRC_DWORD0
|
|
; GFX9-O3-NEXT: s_mov_b32 s13, SCRATCH_RSRC_DWORD1
|
|
; GFX9-O3-NEXT: s_mov_b32 s14, -1
|
|
; GFX9-O3-NEXT: s_mov_b32 s15, 0xe00000
|
|
; GFX9-O3-NEXT: s_add_u32 s12, s12, s3
|
|
; GFX9-O3-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x34
|
|
; GFX9-O3-NEXT: s_load_dwordx4 s[8:11], s[0:1], 0x24
|
|
; GFX9-O3-NEXT: s_mov_b32 s32, 0
|
|
; GFX9-O3-NEXT: s_addc_u32 s13, s13, 0
|
|
; GFX9-O3-NEXT: s_or_saveexec_b64 s[0:1], -1
|
|
; GFX9-O3-NEXT: s_getpc_b64 s[4:5]
|
|
; GFX9-O3-NEXT: s_add_u32 s4, s4, called_i64@gotpcrel32@lo+4
|
|
; GFX9-O3-NEXT: s_addc_u32 s5, s5, called_i64@gotpcrel32@hi+12
|
|
; GFX9-O3-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0x0
|
|
; GFX9-O3-NEXT: s_mov_b64 exec, s[0:1]
|
|
; GFX9-O3-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX9-O3-NEXT: v_mov_b32_e32 v6, s2
|
|
; GFX9-O3-NEXT: v_mov_b32_e32 v7, s3
|
|
; GFX9-O3-NEXT: s_not_b64 exec, exec
|
|
; GFX9-O3-NEXT: v_mov_b32_e32 v6, 0
|
|
; GFX9-O3-NEXT: v_mov_b32_e32 v7, 0
|
|
; GFX9-O3-NEXT: s_not_b64 exec, exec
|
|
; GFX9-O3-NEXT: s_or_saveexec_b64 s[6:7], -1
|
|
; GFX9-O3-NEXT: s_mov_b64 s[0:1], s[12:13]
|
|
; GFX9-O3-NEXT: s_mov_b64 s[2:3], s[14:15]
|
|
; GFX9-O3-NEXT: v_mov_b32_e32 v0, v6
|
|
; GFX9-O3-NEXT: v_mov_b32_e32 v1, v7
|
|
; GFX9-O3-NEXT: s_swappc_b64 s[30:31], s[4:5]
|
|
; GFX9-O3-NEXT: v_mov_b32_e32 v2, v0
|
|
; GFX9-O3-NEXT: v_mov_b32_e32 v3, v1
|
|
; GFX9-O3-NEXT: v_add_co_u32_e32 v2, vcc, v2, v6
|
|
; GFX9-O3-NEXT: v_addc_co_u32_e32 v3, vcc, v3, v7, vcc
|
|
; GFX9-O3-NEXT: s_mov_b64 exec, s[6:7]
|
|
; GFX9-O3-NEXT: v_mov_b32_e32 v0, v2
|
|
; GFX9-O3-NEXT: v_mov_b32_e32 v1, v3
|
|
; GFX9-O3-NEXT: buffer_store_dwordx2 v[0:1], off, s[8:11], 0 offset:4
|
|
; GFX9-O3-NEXT: s_endpgm
|
|
|
|
|
|
|
|
%tmp107 = tail call i64 @llvm.amdgcn.set.inactive.i64(i64 %arg, i64 0)
|
|
%tmp134 = call i64 @called_i64(i64 %tmp107)
|
|
%tmp136 = add i64 %tmp134, %tmp107
|
|
%tmp137 = tail call i64 @llvm.amdgcn.wwm.i64(i64 %tmp136)
|
|
%tmp138 = bitcast i64 %tmp137 to <2 x i32>
|
|
call void @llvm.amdgcn.raw.ptr.buffer.store.v2i32(<2 x i32> %tmp138, ptr addrspace(8) %tmp14, i32 4, i32 0, i32 0)
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_cs void @_amdgpu_cs_main(<4 x i32> inreg %desc, i32 %index) {
|
|
; GFX9-O0-LABEL: _amdgpu_cs_main:
|
|
; GFX9-O0: ; %bb.0:
|
|
; GFX9-O0-NEXT: s_mov_b32 s4, s3
|
|
; GFX9-O0-NEXT: s_mov_b32 s5, s2
|
|
; GFX9-O0-NEXT: s_mov_b32 s6, s1
|
|
; GFX9-O0-NEXT: ; kill: def $sgpr0 killed $sgpr0 def $sgpr0_sgpr1_sgpr2_sgpr3
|
|
; GFX9-O0-NEXT: s_mov_b32 s1, s6
|
|
; GFX9-O0-NEXT: s_mov_b32 s2, s5
|
|
; GFX9-O0-NEXT: s_mov_b32 s3, s4
|
|
; GFX9-O0-NEXT: ; kill: def $sgpr4_sgpr5_sgpr6_sgpr7 killed $sgpr0_sgpr1_sgpr2_sgpr3
|
|
; GFX9-O0-NEXT: s_mov_b32 s4, 5
|
|
; GFX9-O0-NEXT: v_lshlrev_b32_e64 v0, s4, v0
|
|
; GFX9-O0-NEXT: s_mov_b32 s4, 0
|
|
; GFX9-O0-NEXT: buffer_load_dwordx4 v[10:13], v0, s[0:3], s4 offen
|
|
; GFX9-O0-NEXT: buffer_load_dwordx2 v[3:4], v0, s[0:3], s4 offen offset:16
|
|
; GFX9-O0-NEXT: s_waitcnt vmcnt(1)
|
|
; GFX9-O0-NEXT: v_mov_b32_e32 v7, v11
|
|
; GFX9-O0-NEXT: v_mov_b32_e32 v5, v10
|
|
; GFX9-O0-NEXT: ; implicit-def: $sgpr5
|
|
; GFX9-O0-NEXT: ; implicit-def: $sgpr5
|
|
; GFX9-O0-NEXT: ; kill: def $vgpr5 killed $vgpr5 def $vgpr5_vgpr6 killed $exec
|
|
; GFX9-O0-NEXT: v_mov_b32_e32 v6, v7
|
|
; GFX9-O0-NEXT: s_mov_b32 s5, 0x7fffffff
|
|
; GFX9-O0-NEXT: s_mov_b32 s6, -1
|
|
; GFX9-O0-NEXT: ; kill: def $sgpr6 killed $sgpr6 def $sgpr6_sgpr7
|
|
; GFX9-O0-NEXT: s_mov_b32 s7, s5
|
|
; GFX9-O0-NEXT: v_mov_b32_e32 v1, v5
|
|
; GFX9-O0-NEXT: v_mov_b32_e32 v2, v6
|
|
; GFX9-O0-NEXT: s_not_b64 exec, exec
|
|
; GFX9-O0-NEXT: v_mov_b32_e32 v1, s6
|
|
; GFX9-O0-NEXT: v_mov_b32_e32 v2, s7
|
|
; GFX9-O0-NEXT: s_not_b64 exec, exec
|
|
; GFX9-O0-NEXT: v_mov_b32_e32 v8, v1
|
|
; GFX9-O0-NEXT: v_mov_b32_e32 v9, v2
|
|
; GFX9-O0-NEXT: v_mov_b32_e32 v7, v13
|
|
; GFX9-O0-NEXT: v_mov_b32_e32 v5, v12
|
|
; GFX9-O0-NEXT: ; implicit-def: $sgpr5
|
|
; GFX9-O0-NEXT: ; implicit-def: $sgpr5
|
|
; GFX9-O0-NEXT: ; kill: def $vgpr5 killed $vgpr5 def $vgpr5_vgpr6 killed $exec
|
|
; GFX9-O0-NEXT: v_mov_b32_e32 v6, v7
|
|
; GFX9-O0-NEXT: v_mov_b32_e32 v1, v5
|
|
; GFX9-O0-NEXT: v_mov_b32_e32 v2, v6
|
|
; GFX9-O0-NEXT: s_not_b64 exec, exec
|
|
; GFX9-O0-NEXT: v_mov_b32_e32 v1, s6
|
|
; GFX9-O0-NEXT: v_mov_b32_e32 v2, s7
|
|
; GFX9-O0-NEXT: s_not_b64 exec, exec
|
|
; GFX9-O0-NEXT: v_mov_b32_e32 v6, v1
|
|
; GFX9-O0-NEXT: v_mov_b32_e32 v7, v2
|
|
; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-O0-NEXT: v_mov_b32_e32 v1, v3
|
|
; GFX9-O0-NEXT: v_mov_b32_e32 v2, v4
|
|
; GFX9-O0-NEXT: s_not_b64 exec, exec
|
|
; GFX9-O0-NEXT: v_mov_b32_e32 v1, s6
|
|
; GFX9-O0-NEXT: v_mov_b32_e32 v2, s7
|
|
; GFX9-O0-NEXT: s_not_b64 exec, exec
|
|
; GFX9-O0-NEXT: v_mov_b32_e32 v3, v1
|
|
; GFX9-O0-NEXT: v_mov_b32_e32 v4, v2
|
|
; GFX9-O0-NEXT: v_mov_b32_e32 v11, v9
|
|
; GFX9-O0-NEXT: v_mov_b32_e32 v5, v8
|
|
; GFX9-O0-NEXT: v_mov_b32_e32 v9, v7
|
|
; GFX9-O0-NEXT: v_mov_b32_e32 v10, v6
|
|
; GFX9-O0-NEXT: ; implicit-def: $sgpr5
|
|
; GFX9-O0-NEXT: ; implicit-def: $sgpr5
|
|
; GFX9-O0-NEXT: ; implicit-def: $sgpr5
|
|
; GFX9-O0-NEXT: ; implicit-def: $sgpr5
|
|
; GFX9-O0-NEXT: ; kill: def $vgpr5 killed $vgpr5 def $vgpr5_vgpr6_vgpr7_vgpr8 killed $exec
|
|
; GFX9-O0-NEXT: v_mov_b32_e32 v6, v11
|
|
; GFX9-O0-NEXT: v_mov_b32_e32 v7, v10
|
|
; GFX9-O0-NEXT: v_mov_b32_e32 v8, v9
|
|
; GFX9-O0-NEXT: buffer_store_dwordx4 v[5:8], v0, s[0:3], s4 offen
|
|
; GFX9-O0-NEXT: buffer_store_dwordx2 v[3:4], v0, s[0:3], s4 offen offset:16
|
|
; GFX9-O0-NEXT: s_endpgm
|
|
;
|
|
; GFX9-O3-LABEL: _amdgpu_cs_main:
|
|
; GFX9-O3: ; %bb.0:
|
|
; GFX9-O3-NEXT: v_lshlrev_b32_e32 v0, 5, v0
|
|
; GFX9-O3-NEXT: buffer_load_dwordx4 v[7:10], v0, s[0:3], 0 offen
|
|
; GFX9-O3-NEXT: buffer_load_dwordx2 v[11:12], v0, s[0:3], 0 offen offset:16
|
|
; GFX9-O3-NEXT: s_mov_b32 s4, -1
|
|
; GFX9-O3-NEXT: s_brev_b32 s5, -2
|
|
; GFX9-O3-NEXT: s_waitcnt vmcnt(1)
|
|
; GFX9-O3-NEXT: v_mov_b32_e32 v1, v7
|
|
; GFX9-O3-NEXT: v_mov_b32_e32 v2, v8
|
|
; GFX9-O3-NEXT: s_not_b64 exec, exec
|
|
; GFX9-O3-NEXT: v_mov_b32_e32 v1, s4
|
|
; GFX9-O3-NEXT: v_mov_b32_e32 v2, s5
|
|
; GFX9-O3-NEXT: s_not_b64 exec, exec
|
|
; GFX9-O3-NEXT: v_mov_b32_e32 v3, v9
|
|
; GFX9-O3-NEXT: v_mov_b32_e32 v4, v10
|
|
; GFX9-O3-NEXT: s_not_b64 exec, exec
|
|
; GFX9-O3-NEXT: v_mov_b32_e32 v3, s4
|
|
; GFX9-O3-NEXT: v_mov_b32_e32 v4, s5
|
|
; GFX9-O3-NEXT: s_not_b64 exec, exec
|
|
; GFX9-O3-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-O3-NEXT: v_mov_b32_e32 v5, v11
|
|
; GFX9-O3-NEXT: v_mov_b32_e32 v6, v12
|
|
; GFX9-O3-NEXT: s_not_b64 exec, exec
|
|
; GFX9-O3-NEXT: v_mov_b32_e32 v5, s4
|
|
; GFX9-O3-NEXT: v_mov_b32_e32 v6, s5
|
|
; GFX9-O3-NEXT: s_not_b64 exec, exec
|
|
; GFX9-O3-NEXT: v_mov_b32_e32 v7, v1
|
|
; GFX9-O3-NEXT: v_mov_b32_e32 v9, v3
|
|
; GFX9-O3-NEXT: v_mov_b32_e32 v8, v2
|
|
; GFX9-O3-NEXT: v_mov_b32_e32 v10, v4
|
|
; GFX9-O3-NEXT: v_mov_b32_e32 v11, v5
|
|
; GFX9-O3-NEXT: v_mov_b32_e32 v12, v6
|
|
; GFX9-O3-NEXT: buffer_store_dwordx4 v[7:10], v0, s[0:3], 0 offen
|
|
; GFX9-O3-NEXT: buffer_store_dwordx2 v[11:12], v0, s[0:3], 0 offen offset:16
|
|
; GFX9-O3-NEXT: s_endpgm
|
|
%tmp17 = shl i32 %index, 5
|
|
%tmp18 = tail call <4 x i32> @llvm.amdgcn.s.buffer.load.v4i32(<4 x i32> %desc, i32 %tmp17, i32 0)
|
|
%.i0.upto1.bc = bitcast <4 x i32> %tmp18 to <2 x i64>
|
|
%tmp19 = or i32 %tmp17, 16
|
|
%tmp20 = tail call <2 x i32> @llvm.amdgcn.s.buffer.load.v2i32(<4 x i32> %desc, i32 %tmp19, i32 0)
|
|
%.i0.upto1.extract = extractelement <2 x i64> %.i0.upto1.bc, i32 0
|
|
%tmp22 = tail call i64 @llvm.amdgcn.set.inactive.i64(i64 %.i0.upto1.extract, i64 9223372036854775807)
|
|
%tmp97 = tail call i64 @llvm.amdgcn.wwm.i64(i64 %tmp22)
|
|
%.i1.upto1.extract = extractelement <2 x i64> %.i0.upto1.bc, i32 1
|
|
%tmp99 = tail call i64 @llvm.amdgcn.set.inactive.i64(i64 %.i1.upto1.extract, i64 9223372036854775807)
|
|
%tmp174 = tail call i64 @llvm.amdgcn.wwm.i64(i64 %tmp99)
|
|
%.i25 = bitcast <2 x i32> %tmp20 to i64
|
|
%tmp176 = tail call i64 @llvm.amdgcn.set.inactive.i64(i64 %.i25, i64 9223372036854775807)
|
|
%tmp251 = tail call i64 @llvm.amdgcn.wwm.i64(i64 %tmp176)
|
|
%.cast = bitcast i64 %tmp97 to <2 x float>
|
|
%.cast6 = bitcast i64 %tmp174 to <2 x float>
|
|
%.cast7 = bitcast i64 %tmp251 to <2 x float>
|
|
%tmp254 = shufflevector <2 x float> %.cast, <2 x float> %.cast6, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
|
|
%desc.int = bitcast <4 x i32> %desc to i128
|
|
%desc.ptr = inttoptr i128 %desc.int to ptr addrspace(8)
|
|
tail call void @llvm.amdgcn.raw.ptr.buffer.store.v4f32(<4 x float> %tmp254, ptr addrspace(8) %desc.ptr, i32 %tmp17, i32 0, i32 0)
|
|
tail call void @llvm.amdgcn.raw.ptr.buffer.store.v2f32(<2 x float> %.cast7, ptr addrspace(8) %desc.ptr, i32 %tmp19, i32 0, i32 0)
|
|
ret void
|
|
}
|
|
|
|
|
|
define amdgpu_cs void @strict_wwm_no_cfg(ptr addrspace(8) inreg %tmp14) {
|
|
; GFX9-O0-LABEL: strict_wwm_no_cfg:
|
|
; GFX9-O0: ; %bb.0:
|
|
; GFX9-O0-NEXT: s_mov_b32 s6, s2
|
|
; GFX9-O0-NEXT: s_mov_b32 s4, s0
|
|
; GFX9-O0-NEXT: ; kill: def $sgpr6 killed $sgpr6 def $sgpr6_sgpr7
|
|
; GFX9-O0-NEXT: s_mov_b32 s7, s3
|
|
; GFX9-O0-NEXT: s_mov_b32 s8, s7
|
|
; GFX9-O0-NEXT: s_mov_b32 s9, s6
|
|
; GFX9-O0-NEXT: ; kill: def $sgpr4 killed $sgpr4 def $sgpr4_sgpr5
|
|
; GFX9-O0-NEXT: s_mov_b32 s5, s1
|
|
; GFX9-O0-NEXT: s_mov_b32 s10, s5
|
|
; GFX9-O0-NEXT: s_mov_b32 s0, s4
|
|
; GFX9-O0-NEXT: ; kill: def $sgpr0 killed $sgpr0 def $sgpr0_sgpr1_sgpr2_sgpr3
|
|
; GFX9-O0-NEXT: s_mov_b32 s1, s10
|
|
; GFX9-O0-NEXT: s_mov_b32 s2, s9
|
|
; GFX9-O0-NEXT: s_mov_b32 s3, s8
|
|
; GFX9-O0-NEXT: s_mov_b32 s4, 0
|
|
; GFX9-O0-NEXT: buffer_load_dwordx2 v[5:6], off, s[0:3], s4
|
|
; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-O0-NEXT: v_mov_b32_e32 v4, v5
|
|
; GFX9-O0-NEXT: v_mov_b32_e32 v3, v6
|
|
; GFX9-O0-NEXT: v_mov_b32_e32 v0, v4
|
|
; GFX9-O0-NEXT: s_not_b64 exec, exec
|
|
; GFX9-O0-NEXT: v_mov_b32_e32 v0, s4
|
|
; GFX9-O0-NEXT: s_not_b64 exec, exec
|
|
; GFX9-O0-NEXT: v_mov_b32_e32 v1, v3
|
|
; GFX9-O0-NEXT: s_not_b64 exec, exec
|
|
; GFX9-O0-NEXT: v_mov_b32_e32 v1, s4
|
|
; GFX9-O0-NEXT: s_not_b64 exec, exec
|
|
; GFX9-O0-NEXT: s_or_saveexec_b64 s[6:7], -1
|
|
; GFX9-O0-NEXT: v_mov_b32_e32 v2, s4
|
|
; GFX9-O0-NEXT: s_nop 1
|
|
; GFX9-O0-NEXT: v_mov_b32_dpp v2, v0 row_bcast:31 row_mask:0xc bank_mask:0xf
|
|
; GFX9-O0-NEXT: v_add_u32_e64 v0, v0, v2
|
|
; GFX9-O0-NEXT: s_mov_b64 exec, s[6:7]
|
|
; GFX9-O0-NEXT: v_mov_b32_e32 v3, v0
|
|
; GFX9-O0-NEXT: s_or_saveexec_b64 s[6:7], -1
|
|
; GFX9-O0-NEXT: v_mov_b32_e32 v0, s4
|
|
; GFX9-O0-NEXT: s_nop 1
|
|
; GFX9-O0-NEXT: v_mov_b32_dpp v0, v1 row_bcast:31 row_mask:0xc bank_mask:0xf
|
|
; GFX9-O0-NEXT: v_add_u32_e64 v0, v1, v0
|
|
; GFX9-O0-NEXT: s_mov_b64 exec, s[6:7]
|
|
; GFX9-O0-NEXT: v_mov_b32_e32 v4, v0
|
|
; GFX9-O0-NEXT: v_cmp_eq_u32_e64 s[6:7], v3, v4
|
|
; GFX9-O0-NEXT: v_cndmask_b32_e64 v3, 0, 1, s[6:7]
|
|
; GFX9-O0-NEXT: s_mov_b32 s5, 1
|
|
; GFX9-O0-NEXT: v_lshlrev_b32_e64 v3, s5, v3
|
|
; GFX9-O0-NEXT: s_mov_b32 s5, 2
|
|
; GFX9-O0-NEXT: v_and_b32_e64 v3, v3, s5
|
|
; GFX9-O0-NEXT: buffer_store_dword v3, off, s[0:3], s4 offset:4
|
|
; GFX9-O0-NEXT: s_endpgm
|
|
;
|
|
; GFX9-O3-LABEL: strict_wwm_no_cfg:
|
|
; GFX9-O3: ; %bb.0:
|
|
; GFX9-O3-NEXT: buffer_load_dwordx2 v[4:5], off, s[0:3], 0
|
|
; GFX9-O3-NEXT: s_or_saveexec_b64 s[4:5], -1
|
|
; GFX9-O3-NEXT: v_mov_b32_e32 v0, 0
|
|
; GFX9-O3-NEXT: v_mov_b32_e32 v1, 0
|
|
; GFX9-O3-NEXT: s_mov_b64 exec, s[4:5]
|
|
; GFX9-O3-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-O3-NEXT: v_mov_b32_e32 v2, v4
|
|
; GFX9-O3-NEXT: s_not_b64 exec, exec
|
|
; GFX9-O3-NEXT: v_mov_b32_e32 v2, 0
|
|
; GFX9-O3-NEXT: s_not_b64 exec, exec
|
|
; GFX9-O3-NEXT: v_mov_b32_e32 v3, v5
|
|
; GFX9-O3-NEXT: s_not_b64 exec, exec
|
|
; GFX9-O3-NEXT: v_mov_b32_e32 v3, 0
|
|
; GFX9-O3-NEXT: s_not_b64 exec, exec
|
|
; GFX9-O3-NEXT: s_or_saveexec_b64 s[4:5], -1
|
|
; GFX9-O3-NEXT: v_mov_b32_dpp v1, v2 row_bcast:31 row_mask:0xc bank_mask:0xf
|
|
; GFX9-O3-NEXT: v_mov_b32_dpp v0, v3 row_bcast:31 row_mask:0xc bank_mask:0xf
|
|
; GFX9-O3-NEXT: v_add_u32_e32 v1, v2, v1
|
|
; GFX9-O3-NEXT: v_add_u32_e32 v0, v3, v0
|
|
; GFX9-O3-NEXT: s_mov_b64 exec, s[4:5]
|
|
; GFX9-O3-NEXT: v_mov_b32_e32 v4, v1
|
|
; GFX9-O3-NEXT: v_mov_b32_e32 v5, v0
|
|
; GFX9-O3-NEXT: v_cmp_eq_u32_e32 vcc, v4, v5
|
|
; GFX9-O3-NEXT: v_cndmask_b32_e64 v4, 0, 1, vcc
|
|
; GFX9-O3-NEXT: v_lshlrev_b32_e32 v4, 1, v4
|
|
; GFX9-O3-NEXT: v_and_b32_e32 v4, 2, v4
|
|
; GFX9-O3-NEXT: buffer_store_dword v4, off, s[0:3], 0 offset:4
|
|
; GFX9-O3-NEXT: s_endpgm
|
|
%tmp100 = call <2 x float> @llvm.amdgcn.raw.ptr.buffer.load.v2f32(ptr addrspace(8) %tmp14, i32 0, i32 0, i32 0)
|
|
%tmp101 = bitcast <2 x float> %tmp100 to <2 x i32>
|
|
%tmp102 = extractelement <2 x i32> %tmp101, i32 0
|
|
%tmp103 = extractelement <2 x i32> %tmp101, i32 1
|
|
%tmp105 = tail call i32 @llvm.amdgcn.set.inactive.i32(i32 %tmp102, i32 0)
|
|
%tmp107 = tail call i32 @llvm.amdgcn.set.inactive.i32(i32 %tmp103, i32 0)
|
|
|
|
|
|
%tmp120 = tail call i32 @llvm.amdgcn.update.dpp.i32(i32 0, i32 %tmp105, i32 323, i32 12, i32 15, i1 false)
|
|
%tmp121 = add i32 %tmp105, %tmp120
|
|
%tmp122 = tail call i32 @llvm.amdgcn.strict.wwm.i32(i32 %tmp121)
|
|
|
|
%tmp135 = tail call i32 @llvm.amdgcn.update.dpp.i32(i32 0, i32 %tmp107, i32 323, i32 12, i32 15, i1 false)
|
|
%tmp136 = add i32 %tmp107, %tmp135
|
|
%tmp137 = tail call i32 @llvm.amdgcn.strict.wwm.i32(i32 %tmp136)
|
|
|
|
%tmp138 = icmp eq i32 %tmp122, %tmp137
|
|
%tmp139 = sext i1 %tmp138 to i32
|
|
%tmp140 = shl nsw i32 %tmp139, 1
|
|
%tmp141 = and i32 %tmp140, 2
|
|
%tmp145 = bitcast i32 %tmp141 to float
|
|
call void @llvm.amdgcn.raw.ptr.buffer.store.f32(float %tmp145, ptr addrspace(8) %tmp14, i32 4, i32 0, i32 0)
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_cs void @strict_wwm_cfg(ptr addrspace(8) inreg %tmp14, i32 %arg) {
|
|
; GFX9-O0-LABEL: strict_wwm_cfg:
|
|
; GFX9-O0: ; %bb.0: ; %entry
|
|
; GFX9-O0-NEXT: s_mov_b32 s16, SCRATCH_RSRC_DWORD0
|
|
; GFX9-O0-NEXT: s_mov_b32 s17, SCRATCH_RSRC_DWORD1
|
|
; GFX9-O0-NEXT: s_mov_b32 s18, -1
|
|
; GFX9-O0-NEXT: s_mov_b32 s19, 0xe00000
|
|
; GFX9-O0-NEXT: s_add_u32 s16, s16, s4
|
|
; GFX9-O0-NEXT: s_addc_u32 s17, s17, 0
|
|
; GFX9-O0-NEXT: ; implicit-def: $vgpr3 : SGPR spill to VGPR lane
|
|
; GFX9-O0-NEXT: v_mov_b32_e32 v3, v0
|
|
; GFX9-O0-NEXT: s_or_saveexec_b64 s[12:13], -1
|
|
; GFX9-O0-NEXT: buffer_load_dword v0, off, s[16:19], 0 ; 4-byte Folded Reload
|
|
; GFX9-O0-NEXT: s_mov_b64 exec, s[12:13]
|
|
; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-O0-NEXT: v_writelane_b32 v0, s3, 0
|
|
; GFX9-O0-NEXT: s_mov_b32 s4, s1
|
|
; GFX9-O0-NEXT: v_readlane_b32 s1, v0, 0
|
|
; GFX9-O0-NEXT: ; kill: def $sgpr2 killed $sgpr2 def $sgpr2_sgpr3
|
|
; GFX9-O0-NEXT: s_mov_b32 s3, s1
|
|
; GFX9-O0-NEXT: s_mov_b32 s8, s3
|
|
; GFX9-O0-NEXT: s_mov_b32 s9, s2
|
|
; GFX9-O0-NEXT: ; kill: def $sgpr0 killed $sgpr0 def $sgpr0_sgpr1
|
|
; GFX9-O0-NEXT: s_mov_b32 s1, s4
|
|
; GFX9-O0-NEXT: s_mov_b32 s10, s1
|
|
; GFX9-O0-NEXT: s_mov_b32 s4, s0
|
|
; GFX9-O0-NEXT: ; kill: def $sgpr4 killed $sgpr4 def $sgpr4_sgpr5_sgpr6_sgpr7
|
|
; GFX9-O0-NEXT: s_mov_b32 s5, s10
|
|
; GFX9-O0-NEXT: s_mov_b32 s6, s9
|
|
; GFX9-O0-NEXT: s_mov_b32 s7, s8
|
|
; GFX9-O0-NEXT: v_writelane_b32 v0, s2, 1
|
|
; GFX9-O0-NEXT: v_writelane_b32 v0, s3, 2
|
|
; GFX9-O0-NEXT: v_writelane_b32 v0, s0, 3
|
|
; GFX9-O0-NEXT: v_writelane_b32 v0, s1, 4
|
|
; GFX9-O0-NEXT: s_mov_b32 s0, 0
|
|
; GFX9-O0-NEXT: s_nop 2
|
|
; GFX9-O0-NEXT: buffer_load_dwordx2 v[4:5], off, s[4:7], s0
|
|
; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-O0-NEXT: buffer_store_dword v4, off, s[16:19], 0 offset:12 ; 4-byte Folded Spill
|
|
; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-O0-NEXT: buffer_store_dword v5, off, s[16:19], 0 offset:16 ; 4-byte Folded Spill
|
|
; GFX9-O0-NEXT: ; implicit-def: $sgpr2_sgpr3
|
|
; GFX9-O0-NEXT: v_mov_b32_e32 v1, v4
|
|
; GFX9-O0-NEXT: s_not_b64 exec, exec
|
|
; GFX9-O0-NEXT: v_mov_b32_e32 v1, s0
|
|
; GFX9-O0-NEXT: s_not_b64 exec, exec
|
|
; GFX9-O0-NEXT: s_or_saveexec_b64 s[2:3], -1
|
|
; GFX9-O0-NEXT: v_mov_b32_e32 v2, s0
|
|
; GFX9-O0-NEXT: s_nop 1
|
|
; GFX9-O0-NEXT: v_mov_b32_dpp v2, v1 row_bcast:31 row_mask:0xc bank_mask:0xf
|
|
; GFX9-O0-NEXT: v_add_u32_e64 v1, v1, v2
|
|
; GFX9-O0-NEXT: s_mov_b64 exec, s[2:3]
|
|
; GFX9-O0-NEXT: v_mov_b32_e32 v4, v1
|
|
; GFX9-O0-NEXT: buffer_store_dword v4, off, s[16:19], 0 offset:8 ; 4-byte Folded Spill
|
|
; GFX9-O0-NEXT: v_cmp_eq_u32_e64 s[2:3], v3, s0
|
|
; GFX9-O0-NEXT: v_mov_b32_e32 v3, s0
|
|
; GFX9-O0-NEXT: buffer_store_dword v3, off, s[16:19], 0 offset:4 ; 4-byte Folded Spill
|
|
; GFX9-O0-NEXT: s_mov_b64 s[0:1], exec
|
|
; GFX9-O0-NEXT: v_writelane_b32 v0, s0, 5
|
|
; GFX9-O0-NEXT: v_writelane_b32 v0, s1, 6
|
|
; GFX9-O0-NEXT: s_or_saveexec_b64 s[12:13], -1
|
|
; GFX9-O0-NEXT: buffer_store_dword v0, off, s[16:19], 0 ; 4-byte Folded Spill
|
|
; GFX9-O0-NEXT: s_mov_b64 exec, s[12:13]
|
|
; GFX9-O0-NEXT: s_and_b64 s[0:1], s[0:1], s[2:3]
|
|
; GFX9-O0-NEXT: s_mov_b64 exec, s[0:1]
|
|
; GFX9-O0-NEXT: s_cbranch_execz .LBB8_2
|
|
; GFX9-O0-NEXT: ; %bb.1: ; %if
|
|
; GFX9-O0-NEXT: buffer_load_dword v3, off, s[16:19], 0 offset:12 ; 4-byte Folded Reload
|
|
; GFX9-O0-NEXT: buffer_load_dword v4, off, s[16:19], 0 offset:16 ; 4-byte Folded Reload
|
|
; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-O0-NEXT: v_mov_b32_e32 v0, v4
|
|
; GFX9-O0-NEXT: s_or_saveexec_b64 s[0:1], -1
|
|
; GFX9-O0-NEXT: v_mov_b32_e32 v1, 0
|
|
; GFX9-O0-NEXT: s_mov_b64 exec, s[0:1]
|
|
; GFX9-O0-NEXT: v_mov_b32_e32 v2, v0
|
|
; GFX9-O0-NEXT: s_not_b64 exec, exec
|
|
; GFX9-O0-NEXT: v_mov_b32_e32 v2, v1
|
|
; GFX9-O0-NEXT: s_not_b64 exec, exec
|
|
; GFX9-O0-NEXT: s_or_saveexec_b64 s[0:1], -1
|
|
; GFX9-O0-NEXT: v_mov_b32_dpp v1, v2 row_bcast:31 row_mask:0xc bank_mask:0xf
|
|
; GFX9-O0-NEXT: v_add_u32_e64 v1, v2, v1
|
|
; GFX9-O0-NEXT: s_mov_b64 exec, s[0:1]
|
|
; GFX9-O0-NEXT: v_mov_b32_e32 v0, v1
|
|
; GFX9-O0-NEXT: buffer_store_dword v0, off, s[16:19], 0 offset:4 ; 4-byte Folded Spill
|
|
; GFX9-O0-NEXT: .LBB8_2: ; %merge
|
|
; GFX9-O0-NEXT: s_or_saveexec_b64 s[12:13], -1
|
|
; GFX9-O0-NEXT: buffer_load_dword v0, off, s[16:19], 0 ; 4-byte Folded Reload
|
|
; GFX9-O0-NEXT: s_mov_b64 exec, s[12:13]
|
|
; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-O0-NEXT: v_readlane_b32 s4, v0, 5
|
|
; GFX9-O0-NEXT: v_readlane_b32 s5, v0, 6
|
|
; GFX9-O0-NEXT: s_or_b64 exec, exec, s[4:5]
|
|
; GFX9-O0-NEXT: v_readlane_b32 s2, v0, 1
|
|
; GFX9-O0-NEXT: v_readlane_b32 s3, v0, 2
|
|
; GFX9-O0-NEXT: v_readlane_b32 s0, v0, 3
|
|
; GFX9-O0-NEXT: v_readlane_b32 s1, v0, 4
|
|
; GFX9-O0-NEXT: buffer_load_dword v3, off, s[16:19], 0 offset:8 ; 4-byte Folded Reload
|
|
; GFX9-O0-NEXT: buffer_load_dword v4, off, s[16:19], 0 offset:4 ; 4-byte Folded Reload
|
|
; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-O0-NEXT: v_cmp_eq_u32_e64 s[4:5], v3, v4
|
|
; GFX9-O0-NEXT: v_cndmask_b32_e64 v3, 0, 1, s[4:5]
|
|
; GFX9-O0-NEXT: s_mov_b32 s4, 1
|
|
; GFX9-O0-NEXT: v_lshlrev_b32_e64 v3, s4, v3
|
|
; GFX9-O0-NEXT: s_mov_b32 s4, 2
|
|
; GFX9-O0-NEXT: v_and_b32_e64 v3, v3, s4
|
|
; GFX9-O0-NEXT: s_mov_b32 s6, s1
|
|
; GFX9-O0-NEXT: ; kill: def $sgpr0 killed $sgpr0 killed $sgpr0_sgpr1
|
|
; GFX9-O0-NEXT: s_mov_b32 s4, s3
|
|
; GFX9-O0-NEXT: s_mov_b32 s5, s2
|
|
; GFX9-O0-NEXT: ; kill: def $sgpr0 killed $sgpr0 def $sgpr0_sgpr1_sgpr2_sgpr3
|
|
; GFX9-O0-NEXT: s_mov_b32 s1, s6
|
|
; GFX9-O0-NEXT: s_mov_b32 s2, s5
|
|
; GFX9-O0-NEXT: s_mov_b32 s3, s4
|
|
; GFX9-O0-NEXT: s_mov_b32 s4, 0
|
|
; GFX9-O0-NEXT: buffer_store_dword v3, off, s[0:3], s4 offset:4
|
|
; GFX9-O0-NEXT: ; kill: killed $vgpr0
|
|
; GFX9-O0-NEXT: s_endpgm
|
|
;
|
|
; GFX9-O3-LABEL: strict_wwm_cfg:
|
|
; GFX9-O3: ; %bb.0: ; %entry
|
|
; GFX9-O3-NEXT: buffer_load_dwordx2 v[3:4], off, s[0:3], 0
|
|
; GFX9-O3-NEXT: v_mov_b32_e32 v5, 0
|
|
; GFX9-O3-NEXT: s_or_saveexec_b64 s[4:5], -1
|
|
; GFX9-O3-NEXT: v_mov_b32_e32 v1, 0
|
|
; GFX9-O3-NEXT: s_mov_b64 exec, s[4:5]
|
|
; GFX9-O3-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-O3-NEXT: v_mov_b32_e32 v2, v3
|
|
; GFX9-O3-NEXT: s_not_b64 exec, exec
|
|
; GFX9-O3-NEXT: v_mov_b32_e32 v2, 0
|
|
; GFX9-O3-NEXT: s_not_b64 exec, exec
|
|
; GFX9-O3-NEXT: s_or_saveexec_b64 s[4:5], -1
|
|
; GFX9-O3-NEXT: v_mov_b32_dpp v1, v2 row_bcast:31 row_mask:0xc bank_mask:0xf
|
|
; GFX9-O3-NEXT: v_add_u32_e32 v1, v2, v1
|
|
; GFX9-O3-NEXT: s_mov_b64 exec, s[4:5]
|
|
; GFX9-O3-NEXT: v_mov_b32_e32 v3, v1
|
|
; GFX9-O3-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
|
|
; GFX9-O3-NEXT: s_and_saveexec_b64 s[4:5], vcc
|
|
; GFX9-O3-NEXT: s_cbranch_execz .LBB8_2
|
|
; GFX9-O3-NEXT: ; %bb.1: ; %if
|
|
; GFX9-O3-NEXT: s_or_saveexec_b64 s[6:7], -1
|
|
; GFX9-O3-NEXT: v_mov_b32_e32 v1, 0
|
|
; GFX9-O3-NEXT: s_mov_b64 exec, s[6:7]
|
|
; GFX9-O3-NEXT: v_mov_b32_e32 v2, v4
|
|
; GFX9-O3-NEXT: s_not_b64 exec, exec
|
|
; GFX9-O3-NEXT: v_mov_b32_e32 v2, 0
|
|
; GFX9-O3-NEXT: s_not_b64 exec, exec
|
|
; GFX9-O3-NEXT: s_or_saveexec_b64 s[6:7], -1
|
|
; GFX9-O3-NEXT: v_mov_b32_dpp v1, v2 row_bcast:31 row_mask:0xc bank_mask:0xf
|
|
; GFX9-O3-NEXT: v_add_u32_e32 v1, v2, v1
|
|
; GFX9-O3-NEXT: s_mov_b64 exec, s[6:7]
|
|
; GFX9-O3-NEXT: v_mov_b32_e32 v5, v1
|
|
; GFX9-O3-NEXT: .LBB8_2: ; %merge
|
|
; GFX9-O3-NEXT: s_or_b64 exec, exec, s[4:5]
|
|
; GFX9-O3-NEXT: v_cmp_eq_u32_e32 vcc, v3, v5
|
|
; GFX9-O3-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc
|
|
; GFX9-O3-NEXT: v_lshlrev_b32_e32 v0, 1, v0
|
|
; GFX9-O3-NEXT: v_and_b32_e32 v0, 2, v0
|
|
; GFX9-O3-NEXT: buffer_store_dword v0, off, s[0:3], 0 offset:4
|
|
; GFX9-O3-NEXT: s_endpgm
|
|
entry:
|
|
%tmp100 = call <2 x float> @llvm.amdgcn.raw.ptr.buffer.load.v2f32(ptr addrspace(8) %tmp14, i32 0, i32 0, i32 0)
|
|
%tmp101 = bitcast <2 x float> %tmp100 to <2 x i32>
|
|
%tmp102 = extractelement <2 x i32> %tmp101, i32 0
|
|
%tmp105 = tail call i32 @llvm.amdgcn.set.inactive.i32(i32 %tmp102, i32 0)
|
|
|
|
%tmp120 = tail call i32 @llvm.amdgcn.update.dpp.i32(i32 0, i32 %tmp105, i32 323, i32 12, i32 15, i1 false)
|
|
%tmp121 = add i32 %tmp105, %tmp120
|
|
%tmp122 = tail call i32 @llvm.amdgcn.strict.wwm.i32(i32 %tmp121)
|
|
|
|
%cond = icmp eq i32 %arg, 0
|
|
br i1 %cond, label %if, label %merge
|
|
if:
|
|
%tmp103 = extractelement <2 x i32> %tmp101, i32 1
|
|
%tmp107 = tail call i32 @llvm.amdgcn.set.inactive.i32(i32 %tmp103, i32 0)
|
|
|
|
%tmp135 = tail call i32 @llvm.amdgcn.update.dpp.i32(i32 0, i32 %tmp107, i32 323, i32 12, i32 15, i1 false)
|
|
%tmp136 = add i32 %tmp107, %tmp135
|
|
%tmp137 = tail call i32 @llvm.amdgcn.strict.wwm.i32(i32 %tmp136)
|
|
br label %merge
|
|
|
|
merge:
|
|
%merge_value = phi i32 [ 0, %entry ], [%tmp137, %if ]
|
|
%tmp138 = icmp eq i32 %tmp122, %merge_value
|
|
%tmp139 = sext i1 %tmp138 to i32
|
|
%tmp140 = shl nsw i32 %tmp139, 1
|
|
%tmp141 = and i32 %tmp140, 2
|
|
%tmp145 = bitcast i32 %tmp141 to float
|
|
call void @llvm.amdgcn.raw.ptr.buffer.store.f32(float %tmp145, ptr addrspace(8) %tmp14, i32 4, i32 0, i32 0)
|
|
ret void
|
|
}
|
|
|
|
define hidden i32 @strict_wwm_called(i32 %a) noinline {
|
|
; GFX9-O0-LABEL: strict_wwm_called:
|
|
; GFX9-O0: ; %bb.0:
|
|
; GFX9-O0-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-O0-NEXT: v_add_u32_e64 v1, v0, v0
|
|
; GFX9-O0-NEXT: v_mul_lo_u32 v0, v1, v0
|
|
; GFX9-O0-NEXT: v_sub_u32_e64 v0, v0, v1
|
|
; GFX9-O0-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX9-O3-LABEL: strict_wwm_called:
|
|
; GFX9-O3: ; %bb.0:
|
|
; GFX9-O3-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-O3-NEXT: v_add_u32_e32 v1, v0, v0
|
|
; GFX9-O3-NEXT: v_mul_lo_u32 v0, v1, v0
|
|
; GFX9-O3-NEXT: v_sub_u32_e32 v0, v0, v1
|
|
; GFX9-O3-NEXT: s_setpc_b64 s[30:31]
|
|
%add = add i32 %a, %a
|
|
%mul = mul i32 %add, %a
|
|
%sub = sub i32 %mul, %add
|
|
ret i32 %sub
|
|
}
|
|
|
|
define amdgpu_kernel void @strict_wwm_call(ptr addrspace(8) inreg %tmp14, i32 inreg %arg) {
|
|
; GFX9-O0-LABEL: strict_wwm_call:
|
|
; GFX9-O0: ; %bb.0:
|
|
; GFX9-O0-NEXT: s_mov_b32 s32, 0x400
|
|
; GFX9-O0-NEXT: s_mov_b32 s24, SCRATCH_RSRC_DWORD0
|
|
; GFX9-O0-NEXT: s_mov_b32 s25, SCRATCH_RSRC_DWORD1
|
|
; GFX9-O0-NEXT: s_mov_b32 s26, -1
|
|
; GFX9-O0-NEXT: s_mov_b32 s27, 0xe00000
|
|
; GFX9-O0-NEXT: s_add_u32 s24, s24, s9
|
|
; GFX9-O0-NEXT: s_addc_u32 s25, s25, 0
|
|
; GFX9-O0-NEXT: ; implicit-def: $vgpr7 : SGPR spill to VGPR lane
|
|
; GFX9-O0-NEXT: s_or_saveexec_b64 s[10:11], -1
|
|
; GFX9-O0-NEXT: v_writelane_b32 v7, s10, 0
|
|
; GFX9-O0-NEXT: v_writelane_b32 v7, s11, 1
|
|
; GFX9-O0-NEXT: s_mov_b32 s14, s8
|
|
; GFX9-O0-NEXT: s_mov_b32 s13, s7
|
|
; GFX9-O0-NEXT: s_mov_b32 s12, s6
|
|
; GFX9-O0-NEXT: s_mov_b64 s[10:11], s[4:5]
|
|
; GFX9-O0-NEXT: s_mov_b64 s[4:5], s[2:3]
|
|
; GFX9-O0-NEXT: v_readlane_b32 s2, v7, 0
|
|
; GFX9-O0-NEXT: v_readlane_b32 s3, v7, 1
|
|
; GFX9-O0-NEXT: v_writelane_b32 v7, s4, 2
|
|
; GFX9-O0-NEXT: v_writelane_b32 v7, s5, 3
|
|
; GFX9-O0-NEXT: s_or_saveexec_b64 s[20:21], -1
|
|
; GFX9-O0-NEXT: buffer_store_dword v7, off, s[24:27], 0 ; 4-byte Folded Spill
|
|
; GFX9-O0-NEXT: s_mov_b64 exec, s[20:21]
|
|
; GFX9-O0-NEXT: s_mov_b64 s[4:5], s[0:1]
|
|
; GFX9-O0-NEXT: v_readlane_b32 s0, v7, 2
|
|
; GFX9-O0-NEXT: v_readlane_b32 s1, v7, 3
|
|
; GFX9-O0-NEXT: v_mov_b32_e32 v3, v2
|
|
; GFX9-O0-NEXT: v_mov_b32_e32 v4, v1
|
|
; GFX9-O0-NEXT: v_mov_b32_e32 v5, v0
|
|
; GFX9-O0-NEXT: s_or_saveexec_b64 s[20:21], -1
|
|
; GFX9-O0-NEXT: buffer_load_dword v0, off, s[24:27], 0 ; 4-byte Folded Reload
|
|
; GFX9-O0-NEXT: s_mov_b64 exec, s[20:21]
|
|
; GFX9-O0-NEXT: s_mov_b64 exec, s[2:3]
|
|
; GFX9-O0-NEXT: s_load_dwordx2 s[8:9], s[0:1], 0x24
|
|
; GFX9-O0-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x2c
|
|
; GFX9-O0-NEXT: s_load_dword s2, s[0:1], 0x34
|
|
; GFX9-O0-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX9-O0-NEXT: s_mov_b32 s3, s7
|
|
; GFX9-O0-NEXT: ; kill: def $sgpr6 killed $sgpr6 killed $sgpr6_sgpr7
|
|
; GFX9-O0-NEXT: s_mov_b32 s7, s9
|
|
; GFX9-O0-NEXT: s_mov_b32 s16, s8
|
|
; GFX9-O0-NEXT: ; kill: def $sgpr16 killed $sgpr16 def $sgpr16_sgpr17_sgpr18_sgpr19
|
|
; GFX9-O0-NEXT: s_mov_b32 s17, s7
|
|
; GFX9-O0-NEXT: s_mov_b32 s18, s6
|
|
; GFX9-O0-NEXT: s_mov_b32 s19, s3
|
|
; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-O0-NEXT: v_writelane_b32 v0, s16, 4
|
|
; GFX9-O0-NEXT: v_writelane_b32 v0, s17, 5
|
|
; GFX9-O0-NEXT: v_writelane_b32 v0, s18, 6
|
|
; GFX9-O0-NEXT: v_writelane_b32 v0, s19, 7
|
|
; GFX9-O0-NEXT: s_mov_b32 s3, 0
|
|
; GFX9-O0-NEXT: v_writelane_b32 v0, s3, 8
|
|
; GFX9-O0-NEXT: v_mov_b32_e32 v6, s2
|
|
; GFX9-O0-NEXT: s_not_b64 exec, exec
|
|
; GFX9-O0-NEXT: v_mov_b32_e32 v6, s3
|
|
; GFX9-O0-NEXT: s_not_b64 exec, exec
|
|
; GFX9-O0-NEXT: s_or_saveexec_b64 s[2:3], -1
|
|
; GFX9-O0-NEXT: v_writelane_b32 v0, s2, 9
|
|
; GFX9-O0-NEXT: v_writelane_b32 v0, s3, 10
|
|
; GFX9-O0-NEXT: s_or_saveexec_b64 s[20:21], -1
|
|
; GFX9-O0-NEXT: buffer_store_dword v0, off, s[24:27], 0 ; 4-byte Folded Spill
|
|
; GFX9-O0-NEXT: s_mov_b64 exec, s[20:21]
|
|
; GFX9-O0-NEXT: s_mov_b64 s[6:7], 56
|
|
; GFX9-O0-NEXT: s_mov_b32 s2, s0
|
|
; GFX9-O0-NEXT: s_mov_b32 s0, s1
|
|
; GFX9-O0-NEXT: s_mov_b32 s3, s6
|
|
; GFX9-O0-NEXT: s_mov_b32 s1, s7
|
|
; GFX9-O0-NEXT: s_add_u32 s8, s2, s3
|
|
; GFX9-O0-NEXT: s_addc_u32 s0, s0, s1
|
|
; GFX9-O0-NEXT: ; kill: def $sgpr8 killed $sgpr8 def $sgpr8_sgpr9
|
|
; GFX9-O0-NEXT: s_mov_b32 s9, s0
|
|
; GFX9-O0-NEXT: s_getpc_b64 s[16:17]
|
|
; GFX9-O0-NEXT: s_add_u32 s16, s16, strict_wwm_called@rel32@lo+4
|
|
; GFX9-O0-NEXT: s_addc_u32 s17, s17, strict_wwm_called@rel32@hi+12
|
|
; GFX9-O0-NEXT: s_mov_b64 s[0:1], s[24:25]
|
|
; GFX9-O0-NEXT: s_mov_b64 s[2:3], s[26:27]
|
|
; GFX9-O0-NEXT: s_mov_b32 s6, 20
|
|
; GFX9-O0-NEXT: v_lshlrev_b32_e64 v3, s6, v3
|
|
; GFX9-O0-NEXT: s_mov_b32 s6, 10
|
|
; GFX9-O0-NEXT: v_lshlrev_b32_e64 v4, s6, v4
|
|
; GFX9-O0-NEXT: v_or3_b32 v3, v5, v4, v3
|
|
; GFX9-O0-NEXT: ; implicit-def: $sgpr6_sgpr7
|
|
; GFX9-O0-NEXT: ; implicit-def: $sgpr15
|
|
; GFX9-O0-NEXT: v_mov_b32_e32 v31, v3
|
|
; GFX9-O0-NEXT: v_mov_b32_e32 v0, v6
|
|
; GFX9-O0-NEXT: s_swappc_b64 s[30:31], s[16:17]
|
|
; GFX9-O0-NEXT: s_or_saveexec_b64 s[20:21], -1
|
|
; GFX9-O0-NEXT: buffer_load_dword v1, off, s[24:27], 0 ; 4-byte Folded Reload
|
|
; GFX9-O0-NEXT: s_mov_b64 exec, s[20:21]
|
|
; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-O0-NEXT: v_readlane_b32 s0, v1, 4
|
|
; GFX9-O0-NEXT: v_readlane_b32 s1, v1, 5
|
|
; GFX9-O0-NEXT: v_readlane_b32 s2, v1, 6
|
|
; GFX9-O0-NEXT: v_readlane_b32 s3, v1, 7
|
|
; GFX9-O0-NEXT: v_readlane_b32 s6, v1, 9
|
|
; GFX9-O0-NEXT: v_readlane_b32 s7, v1, 10
|
|
; GFX9-O0-NEXT: v_readlane_b32 s4, v1, 8
|
|
; GFX9-O0-NEXT: v_mov_b32_e32 v3, v0
|
|
; GFX9-O0-NEXT: s_or_saveexec_b64 s[20:21], -1
|
|
; GFX9-O0-NEXT: buffer_load_dword v0, off, s[24:27], 0 ; 4-byte Folded Reload
|
|
; GFX9-O0-NEXT: s_mov_b64 exec, s[20:21]
|
|
; GFX9-O0-NEXT: v_add_u32_e64 v3, v3, v6
|
|
; GFX9-O0-NEXT: s_mov_b64 exec, s[6:7]
|
|
; GFX9-O0-NEXT: v_mov_b32_e32 v1, v3
|
|
; GFX9-O0-NEXT: buffer_store_dword v1, off, s[0:3], s4 offset:4
|
|
; GFX9-O0-NEXT: ; kill: killed $vgpr0
|
|
; GFX9-O0-NEXT: s_endpgm
|
|
;
|
|
; GFX9-O3-LABEL: strict_wwm_call:
|
|
; GFX9-O3: ; %bb.0:
|
|
; GFX9-O3-NEXT: s_mov_b32 s12, SCRATCH_RSRC_DWORD0
|
|
; GFX9-O3-NEXT: s_mov_b32 s13, SCRATCH_RSRC_DWORD1
|
|
; GFX9-O3-NEXT: s_mov_b32 s14, -1
|
|
; GFX9-O3-NEXT: s_load_dword s2, s[0:1], 0x34
|
|
; GFX9-O3-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
|
|
; GFX9-O3-NEXT: s_mov_b32 s15, 0xe00000
|
|
; GFX9-O3-NEXT: s_add_u32 s12, s12, s3
|
|
; GFX9-O3-NEXT: s_addc_u32 s13, s13, 0
|
|
; GFX9-O3-NEXT: s_mov_b32 s32, 0
|
|
; GFX9-O3-NEXT: s_getpc_b64 s[8:9]
|
|
; GFX9-O3-NEXT: s_add_u32 s8, s8, strict_wwm_called@rel32@lo+4
|
|
; GFX9-O3-NEXT: s_addc_u32 s9, s9, strict_wwm_called@rel32@hi+12
|
|
; GFX9-O3-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX9-O3-NEXT: v_mov_b32_e32 v2, s2
|
|
; GFX9-O3-NEXT: s_not_b64 exec, exec
|
|
; GFX9-O3-NEXT: v_mov_b32_e32 v2, 0
|
|
; GFX9-O3-NEXT: s_not_b64 exec, exec
|
|
; GFX9-O3-NEXT: s_or_saveexec_b64 s[10:11], -1
|
|
; GFX9-O3-NEXT: s_mov_b64 s[0:1], s[12:13]
|
|
; GFX9-O3-NEXT: s_mov_b64 s[2:3], s[14:15]
|
|
; GFX9-O3-NEXT: v_mov_b32_e32 v0, v2
|
|
; GFX9-O3-NEXT: s_swappc_b64 s[30:31], s[8:9]
|
|
; GFX9-O3-NEXT: v_mov_b32_e32 v1, v0
|
|
; GFX9-O3-NEXT: v_add_u32_e32 v1, v1, v2
|
|
; GFX9-O3-NEXT: s_mov_b64 exec, s[10:11]
|
|
; GFX9-O3-NEXT: v_mov_b32_e32 v0, v1
|
|
; GFX9-O3-NEXT: buffer_store_dword v0, off, s[4:7], 0 offset:4
|
|
; GFX9-O3-NEXT: s_endpgm
|
|
|
|
|
|
%tmp107 = tail call i32 @llvm.amdgcn.set.inactive.i32(i32 %arg, i32 0)
|
|
%tmp134 = call i32 @strict_wwm_called(i32 %tmp107)
|
|
%tmp136 = add i32 %tmp134, %tmp107
|
|
%tmp137 = tail call i32 @llvm.amdgcn.strict.wwm.i32(i32 %tmp136)
|
|
call void @llvm.amdgcn.raw.ptr.buffer.store.i32(i32 %tmp137, ptr addrspace(8) %tmp14, i32 4, i32 0, i32 0)
|
|
ret void
|
|
}
|
|
|
|
define i64 @strict_wwm_called_i64(i64 %a) noinline {
|
|
; GFX9-O0-LABEL: strict_wwm_called_i64:
|
|
; GFX9-O0: ; %bb.0:
|
|
; GFX9-O0-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-O0-NEXT: v_mov_b32_e32 v2, v0
|
|
; GFX9-O0-NEXT: ; implicit-def: $sgpr4
|
|
; GFX9-O0-NEXT: ; implicit-def: $sgpr4
|
|
; GFX9-O0-NEXT: ; kill: def $vgpr2 killed $vgpr2 def $vgpr2_vgpr3 killed $exec
|
|
; GFX9-O0-NEXT: v_mov_b32_e32 v3, v1
|
|
; GFX9-O0-NEXT: ; implicit-def: $sgpr4_sgpr5
|
|
; GFX9-O0-NEXT: v_mov_b32_e32 v4, v2
|
|
; GFX9-O0-NEXT: v_mov_b32_e32 v5, v2
|
|
; GFX9-O0-NEXT: v_mov_b32_e32 v0, v3
|
|
; GFX9-O0-NEXT: v_mov_b32_e32 v1, v3
|
|
; GFX9-O0-NEXT: v_add_co_u32_e64 v4, s[4:5], v4, v5
|
|
; GFX9-O0-NEXT: v_addc_co_u32_e64 v0, s[4:5], v0, v1, s[4:5]
|
|
; GFX9-O0-NEXT: ; kill: def $vgpr4 killed $vgpr4 def $vgpr4_vgpr5 killed $exec
|
|
; GFX9-O0-NEXT: v_mov_b32_e32 v5, v0
|
|
; GFX9-O0-NEXT: s_mov_b32 s4, 32
|
|
; GFX9-O0-NEXT: v_mov_b32_e32 v0, v2
|
|
; GFX9-O0-NEXT: v_mov_b32_e32 v1, v3
|
|
; GFX9-O0-NEXT: v_lshrrev_b64 v[0:1], s4, v[0:1]
|
|
; GFX9-O0-NEXT: v_mov_b32_e32 v1, v0
|
|
; GFX9-O0-NEXT: v_mov_b32_e32 v0, v4
|
|
; GFX9-O0-NEXT: v_mul_lo_u32 v1, v0, v1
|
|
; GFX9-O0-NEXT: v_mov_b32_e32 v3, v2
|
|
; GFX9-O0-NEXT: v_lshrrev_b64 v[6:7], s4, v[4:5]
|
|
; GFX9-O0-NEXT: v_mov_b32_e32 v2, v6
|
|
; GFX9-O0-NEXT: v_mul_lo_u32 v2, v2, v3
|
|
; GFX9-O0-NEXT: v_mad_u64_u32 v[6:7], s[6:7], v0, v3, 0
|
|
; GFX9-O0-NEXT: v_mov_b32_e32 v0, v7
|
|
; GFX9-O0-NEXT: v_add3_u32 v0, v0, v1, v2
|
|
; GFX9-O0-NEXT: ; implicit-def: $sgpr5
|
|
; GFX9-O0-NEXT: ; implicit-def: $sgpr6
|
|
; GFX9-O0-NEXT: ; implicit-def: $sgpr6
|
|
; GFX9-O0-NEXT: v_mov_b32_e32 v2, s5
|
|
; GFX9-O0-NEXT: ; kill: def $vgpr0 killed $vgpr0 def $vgpr0_vgpr1 killed $exec
|
|
; GFX9-O0-NEXT: v_mov_b32_e32 v1, v2
|
|
; GFX9-O0-NEXT: v_lshlrev_b64 v[1:2], s4, v[0:1]
|
|
; GFX9-O0-NEXT: v_mov_b32_e32 v3, v2
|
|
; GFX9-O0-NEXT: ; kill: def $vgpr6 killed $vgpr6 killed $vgpr6_vgpr7 killed $exec
|
|
; GFX9-O0-NEXT: s_mov_b32 s5, 0
|
|
; GFX9-O0-NEXT: ; implicit-def: $sgpr5
|
|
; GFX9-O0-NEXT: v_mov_b32_e32 v0, 0
|
|
; GFX9-O0-NEXT: ; kill: def $vgpr6 killed $vgpr6 def $vgpr6_vgpr7 killed $exec
|
|
; GFX9-O0-NEXT: v_mov_b32_e32 v7, v0
|
|
; GFX9-O0-NEXT: v_mov_b32_e32 v0, v7
|
|
; GFX9-O0-NEXT: v_or_b32_e64 v0, v0, v3
|
|
; GFX9-O0-NEXT: v_mov_b32_e32 v2, v1
|
|
; GFX9-O0-NEXT: v_mov_b32_e32 v1, v6
|
|
; GFX9-O0-NEXT: v_or_b32_e64 v6, v1, v2
|
|
; GFX9-O0-NEXT: ; kill: def $vgpr6 killed $vgpr6 def $vgpr6_vgpr7 killed $exec
|
|
; GFX9-O0-NEXT: v_mov_b32_e32 v7, v0
|
|
; GFX9-O0-NEXT: v_mov_b32_e32 v1, v6
|
|
; GFX9-O0-NEXT: v_mov_b32_e32 v3, v4
|
|
; GFX9-O0-NEXT: v_mov_b32_e32 v0, v7
|
|
; GFX9-O0-NEXT: v_mov_b32_e32 v2, v5
|
|
; GFX9-O0-NEXT: v_sub_co_u32_e64 v1, s[6:7], v1, v3
|
|
; GFX9-O0-NEXT: v_subb_co_u32_e64 v0, s[6:7], v0, v2, s[6:7]
|
|
; GFX9-O0-NEXT: ; kill: def $vgpr1 killed $vgpr1 def $vgpr1_vgpr2 killed $exec
|
|
; GFX9-O0-NEXT: v_mov_b32_e32 v2, v0
|
|
; GFX9-O0-NEXT: v_mov_b32_e32 v0, v1
|
|
; GFX9-O0-NEXT: v_lshrrev_b64 v[1:2], s4, v[1:2]
|
|
; GFX9-O0-NEXT: ; kill: def $vgpr1 killed $vgpr1 killed $vgpr1_vgpr2 killed $exec
|
|
; GFX9-O0-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX9-O3-LABEL: strict_wwm_called_i64:
|
|
; GFX9-O3: ; %bb.0:
|
|
; GFX9-O3-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-O3-NEXT: v_add_co_u32_e32 v2, vcc, v0, v0
|
|
; GFX9-O3-NEXT: v_addc_co_u32_e32 v3, vcc, v1, v1, vcc
|
|
; GFX9-O3-NEXT: v_mul_lo_u32 v4, v3, v0
|
|
; GFX9-O3-NEXT: v_mul_lo_u32 v5, v2, v1
|
|
; GFX9-O3-NEXT: v_mad_u64_u32 v[0:1], s[4:5], v2, v0, 0
|
|
; GFX9-O3-NEXT: v_add3_u32 v1, v1, v5, v4
|
|
; GFX9-O3-NEXT: v_sub_co_u32_e32 v0, vcc, v0, v2
|
|
; GFX9-O3-NEXT: v_subb_co_u32_e32 v1, vcc, v1, v3, vcc
|
|
; GFX9-O3-NEXT: s_setpc_b64 s[30:31]
|
|
%add = add i64 %a, %a
|
|
%mul = mul i64 %add, %a
|
|
%sub = sub i64 %mul, %add
|
|
ret i64 %sub
|
|
}
|
|
|
|
define amdgpu_kernel void @strict_wwm_call_i64(ptr addrspace(8) inreg %tmp14, i64 inreg %arg) {
|
|
; GFX9-O0-LABEL: strict_wwm_call_i64:
|
|
; GFX9-O0: ; %bb.0:
|
|
; GFX9-O0-NEXT: s_mov_b32 s32, 0x400
|
|
; GFX9-O0-NEXT: s_mov_b32 s24, SCRATCH_RSRC_DWORD0
|
|
; GFX9-O0-NEXT: s_mov_b32 s25, SCRATCH_RSRC_DWORD1
|
|
; GFX9-O0-NEXT: s_mov_b32 s26, -1
|
|
; GFX9-O0-NEXT: s_mov_b32 s27, 0xe00000
|
|
; GFX9-O0-NEXT: s_add_u32 s24, s24, s9
|
|
; GFX9-O0-NEXT: s_addc_u32 s25, s25, 0
|
|
; GFX9-O0-NEXT: ; implicit-def: $vgpr12 : SGPR spill to VGPR lane
|
|
; GFX9-O0-NEXT: s_or_saveexec_b64 s[10:11], -1
|
|
; GFX9-O0-NEXT: v_writelane_b32 v12, s10, 0
|
|
; GFX9-O0-NEXT: v_writelane_b32 v12, s11, 1
|
|
; GFX9-O0-NEXT: s_mov_b32 s14, s8
|
|
; GFX9-O0-NEXT: s_mov_b32 s13, s7
|
|
; GFX9-O0-NEXT: s_mov_b32 s12, s6
|
|
; GFX9-O0-NEXT: s_mov_b64 s[10:11], s[4:5]
|
|
; GFX9-O0-NEXT: s_mov_b64 s[4:5], s[2:3]
|
|
; GFX9-O0-NEXT: v_readlane_b32 s2, v12, 0
|
|
; GFX9-O0-NEXT: v_readlane_b32 s3, v12, 1
|
|
; GFX9-O0-NEXT: v_writelane_b32 v12, s4, 2
|
|
; GFX9-O0-NEXT: v_writelane_b32 v12, s5, 3
|
|
; GFX9-O0-NEXT: s_or_saveexec_b64 s[20:21], -1
|
|
; GFX9-O0-NEXT: buffer_store_dword v12, off, s[24:27], 0 ; 4-byte Folded Spill
|
|
; GFX9-O0-NEXT: s_mov_b64 exec, s[20:21]
|
|
; GFX9-O0-NEXT: s_mov_b64 s[4:5], s[0:1]
|
|
; GFX9-O0-NEXT: v_readlane_b32 s0, v12, 2
|
|
; GFX9-O0-NEXT: v_readlane_b32 s1, v12, 3
|
|
; GFX9-O0-NEXT: v_mov_b32_e32 v3, v2
|
|
; GFX9-O0-NEXT: v_mov_b32_e32 v4, v1
|
|
; GFX9-O0-NEXT: v_mov_b32_e32 v5, v0
|
|
; GFX9-O0-NEXT: s_or_saveexec_b64 s[20:21], -1
|
|
; GFX9-O0-NEXT: buffer_load_dword v0, off, s[24:27], 0 ; 4-byte Folded Reload
|
|
; GFX9-O0-NEXT: s_mov_b64 exec, s[20:21]
|
|
; GFX9-O0-NEXT: s_mov_b64 exec, s[2:3]
|
|
; GFX9-O0-NEXT: s_load_dwordx2 s[16:17], s[0:1], 0x24
|
|
; GFX9-O0-NEXT: s_load_dwordx2 s[8:9], s[0:1], 0x2c
|
|
; GFX9-O0-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x34
|
|
; GFX9-O0-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX9-O0-NEXT: s_mov_b32 s6, s9
|
|
; GFX9-O0-NEXT: s_mov_b32 s7, s8
|
|
; GFX9-O0-NEXT: s_mov_b32 s8, s17
|
|
; GFX9-O0-NEXT: ; kill: def $sgpr16 killed $sgpr16 killed $sgpr16_sgpr17
|
|
; GFX9-O0-NEXT: ; kill: def $sgpr16 killed $sgpr16 def $sgpr16_sgpr17_sgpr18_sgpr19
|
|
; GFX9-O0-NEXT: s_mov_b32 s17, s8
|
|
; GFX9-O0-NEXT: s_mov_b32 s18, s7
|
|
; GFX9-O0-NEXT: s_mov_b32 s19, s6
|
|
; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-O0-NEXT: v_writelane_b32 v0, s16, 4
|
|
; GFX9-O0-NEXT: v_writelane_b32 v0, s17, 5
|
|
; GFX9-O0-NEXT: v_writelane_b32 v0, s18, 6
|
|
; GFX9-O0-NEXT: v_writelane_b32 v0, s19, 7
|
|
; GFX9-O0-NEXT: s_mov_b64 s[6:7], 0
|
|
; GFX9-O0-NEXT: v_mov_b32_e32 v8, s2
|
|
; GFX9-O0-NEXT: v_mov_b32_e32 v9, s3
|
|
; GFX9-O0-NEXT: s_not_b64 exec, exec
|
|
; GFX9-O0-NEXT: v_mov_b32_e32 v8, s6
|
|
; GFX9-O0-NEXT: v_mov_b32_e32 v9, s7
|
|
; GFX9-O0-NEXT: s_not_b64 exec, exec
|
|
; GFX9-O0-NEXT: s_or_saveexec_b64 s[2:3], -1
|
|
; GFX9-O0-NEXT: v_writelane_b32 v0, s2, 8
|
|
; GFX9-O0-NEXT: v_writelane_b32 v0, s3, 9
|
|
; GFX9-O0-NEXT: s_or_saveexec_b64 s[20:21], -1
|
|
; GFX9-O0-NEXT: buffer_store_dword v0, off, s[24:27], 0 ; 4-byte Folded Spill
|
|
; GFX9-O0-NEXT: s_mov_b64 exec, s[20:21]
|
|
; GFX9-O0-NEXT: s_mov_b64 s[6:7], 60
|
|
; GFX9-O0-NEXT: s_mov_b32 s2, s0
|
|
; GFX9-O0-NEXT: s_mov_b32 s0, s1
|
|
; GFX9-O0-NEXT: s_mov_b32 s3, s6
|
|
; GFX9-O0-NEXT: s_mov_b32 s1, s7
|
|
; GFX9-O0-NEXT: s_add_u32 s8, s2, s3
|
|
; GFX9-O0-NEXT: s_addc_u32 s0, s0, s1
|
|
; GFX9-O0-NEXT: ; kill: def $sgpr8 killed $sgpr8 def $sgpr8_sgpr9
|
|
; GFX9-O0-NEXT: s_mov_b32 s9, s0
|
|
; GFX9-O0-NEXT: v_mov_b32_e32 v6, v8
|
|
; GFX9-O0-NEXT: s_mov_b32 s0, 32
|
|
; GFX9-O0-NEXT: ; implicit-def: $sgpr2_sgpr3
|
|
; GFX9-O0-NEXT: v_lshrrev_b64 v[10:11], s0, v[8:9]
|
|
; GFX9-O0-NEXT: v_mov_b32_e32 v7, v10
|
|
; GFX9-O0-NEXT: s_getpc_b64 s[0:1]
|
|
; GFX9-O0-NEXT: s_add_u32 s0, s0, strict_wwm_called_i64@gotpcrel32@lo+4
|
|
; GFX9-O0-NEXT: s_addc_u32 s1, s1, strict_wwm_called_i64@gotpcrel32@hi+12
|
|
; GFX9-O0-NEXT: s_load_dwordx2 s[16:17], s[0:1], 0x0
|
|
; GFX9-O0-NEXT: s_mov_b64 s[0:1], s[24:25]
|
|
; GFX9-O0-NEXT: s_mov_b64 s[2:3], s[26:27]
|
|
; GFX9-O0-NEXT: s_mov_b32 s6, 20
|
|
; GFX9-O0-NEXT: v_lshlrev_b32_e64 v3, s6, v3
|
|
; GFX9-O0-NEXT: s_mov_b32 s6, 10
|
|
; GFX9-O0-NEXT: v_lshlrev_b32_e64 v4, s6, v4
|
|
; GFX9-O0-NEXT: v_or3_b32 v3, v5, v4, v3
|
|
; GFX9-O0-NEXT: ; implicit-def: $sgpr6_sgpr7
|
|
; GFX9-O0-NEXT: ; implicit-def: $sgpr15
|
|
; GFX9-O0-NEXT: v_mov_b32_e32 v31, v3
|
|
; GFX9-O0-NEXT: v_mov_b32_e32 v0, v6
|
|
; GFX9-O0-NEXT: v_mov_b32_e32 v1, v7
|
|
; GFX9-O0-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX9-O0-NEXT: s_swappc_b64 s[30:31], s[16:17]
|
|
; GFX9-O0-NEXT: s_or_saveexec_b64 s[20:21], -1
|
|
; GFX9-O0-NEXT: buffer_load_dword v2, off, s[24:27], 0 ; 4-byte Folded Reload
|
|
; GFX9-O0-NEXT: s_mov_b64 exec, s[20:21]
|
|
; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-O0-NEXT: v_readlane_b32 s0, v2, 4
|
|
; GFX9-O0-NEXT: v_readlane_b32 s1, v2, 5
|
|
; GFX9-O0-NEXT: v_readlane_b32 s2, v2, 6
|
|
; GFX9-O0-NEXT: v_readlane_b32 s3, v2, 7
|
|
; GFX9-O0-NEXT: v_readlane_b32 s4, v2, 8
|
|
; GFX9-O0-NEXT: v_readlane_b32 s5, v2, 9
|
|
; GFX9-O0-NEXT: v_mov_b32_e32 v3, v0
|
|
; GFX9-O0-NEXT: s_or_saveexec_b64 s[20:21], -1
|
|
; GFX9-O0-NEXT: buffer_load_dword v0, off, s[24:27], 0 ; 4-byte Folded Reload
|
|
; GFX9-O0-NEXT: s_mov_b64 exec, s[20:21]
|
|
; GFX9-O0-NEXT: v_mov_b32_e32 v4, v1
|
|
; GFX9-O0-NEXT: ; implicit-def: $sgpr6
|
|
; GFX9-O0-NEXT: ; implicit-def: $sgpr6
|
|
; GFX9-O0-NEXT: v_mov_b32_e32 v5, v8
|
|
; GFX9-O0-NEXT: v_mov_b32_e32 v6, v9
|
|
; GFX9-O0-NEXT: v_add_co_u32_e64 v3, s[6:7], v3, v5
|
|
; GFX9-O0-NEXT: v_addc_co_u32_e64 v4, s[6:7], v4, v6, s[6:7]
|
|
; GFX9-O0-NEXT: s_mov_b64 exec, s[4:5]
|
|
; GFX9-O0-NEXT: v_mov_b32_e32 v1, v3
|
|
; GFX9-O0-NEXT: v_mov_b32_e32 v2, v4
|
|
; GFX9-O0-NEXT: s_mov_b32 s4, 0
|
|
; GFX9-O0-NEXT: buffer_store_dwordx2 v[1:2], off, s[0:3], s4 offset:4
|
|
; GFX9-O0-NEXT: ; kill: killed $vgpr0
|
|
; GFX9-O0-NEXT: s_endpgm
|
|
;
|
|
; GFX9-O3-LABEL: strict_wwm_call_i64:
|
|
; GFX9-O3: ; %bb.0:
|
|
; GFX9-O3-NEXT: s_mov_b32 s12, SCRATCH_RSRC_DWORD0
|
|
; GFX9-O3-NEXT: s_mov_b32 s13, SCRATCH_RSRC_DWORD1
|
|
; GFX9-O3-NEXT: s_mov_b32 s14, -1
|
|
; GFX9-O3-NEXT: s_mov_b32 s15, 0xe00000
|
|
; GFX9-O3-NEXT: s_add_u32 s12, s12, s3
|
|
; GFX9-O3-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x34
|
|
; GFX9-O3-NEXT: s_load_dwordx4 s[8:11], s[0:1], 0x24
|
|
; GFX9-O3-NEXT: s_mov_b32 s32, 0
|
|
; GFX9-O3-NEXT: s_addc_u32 s13, s13, 0
|
|
; GFX9-O3-NEXT: s_or_saveexec_b64 s[0:1], -1
|
|
; GFX9-O3-NEXT: s_getpc_b64 s[4:5]
|
|
; GFX9-O3-NEXT: s_add_u32 s4, s4, strict_wwm_called_i64@gotpcrel32@lo+4
|
|
; GFX9-O3-NEXT: s_addc_u32 s5, s5, strict_wwm_called_i64@gotpcrel32@hi+12
|
|
; GFX9-O3-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0x0
|
|
; GFX9-O3-NEXT: s_mov_b64 exec, s[0:1]
|
|
; GFX9-O3-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX9-O3-NEXT: v_mov_b32_e32 v6, s2
|
|
; GFX9-O3-NEXT: v_mov_b32_e32 v7, s3
|
|
; GFX9-O3-NEXT: s_not_b64 exec, exec
|
|
; GFX9-O3-NEXT: v_mov_b32_e32 v6, 0
|
|
; GFX9-O3-NEXT: v_mov_b32_e32 v7, 0
|
|
; GFX9-O3-NEXT: s_not_b64 exec, exec
|
|
; GFX9-O3-NEXT: s_or_saveexec_b64 s[6:7], -1
|
|
; GFX9-O3-NEXT: s_mov_b64 s[0:1], s[12:13]
|
|
; GFX9-O3-NEXT: s_mov_b64 s[2:3], s[14:15]
|
|
; GFX9-O3-NEXT: v_mov_b32_e32 v0, v6
|
|
; GFX9-O3-NEXT: v_mov_b32_e32 v1, v7
|
|
; GFX9-O3-NEXT: s_swappc_b64 s[30:31], s[4:5]
|
|
; GFX9-O3-NEXT: v_mov_b32_e32 v2, v0
|
|
; GFX9-O3-NEXT: v_mov_b32_e32 v3, v1
|
|
; GFX9-O3-NEXT: v_add_co_u32_e32 v2, vcc, v2, v6
|
|
; GFX9-O3-NEXT: v_addc_co_u32_e32 v3, vcc, v3, v7, vcc
|
|
; GFX9-O3-NEXT: s_mov_b64 exec, s[6:7]
|
|
; GFX9-O3-NEXT: v_mov_b32_e32 v0, v2
|
|
; GFX9-O3-NEXT: v_mov_b32_e32 v1, v3
|
|
; GFX9-O3-NEXT: buffer_store_dwordx2 v[0:1], off, s[8:11], 0 offset:4
|
|
; GFX9-O3-NEXT: s_endpgm
|
|
|
|
|
|
|
|
%tmp107 = tail call i64 @llvm.amdgcn.set.inactive.i64(i64 %arg, i64 0)
|
|
%tmp134 = call i64 @strict_wwm_called_i64(i64 %tmp107)
|
|
%tmp136 = add i64 %tmp134, %tmp107
|
|
%tmp137 = tail call i64 @llvm.amdgcn.strict.wwm.i64(i64 %tmp136)
|
|
%tmp138 = bitcast i64 %tmp137 to <2 x i32>
|
|
call void @llvm.amdgcn.raw.ptr.buffer.store.v2i32(<2 x i32> %tmp138, ptr addrspace(8) %tmp14, i32 4, i32 0, i32 0)
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_cs void @strict_wwm_amdgpu_cs_main(<4 x i32> inreg %desc, i32 %index) {
|
|
; GFX9-O0-LABEL: strict_wwm_amdgpu_cs_main:
|
|
; GFX9-O0: ; %bb.0:
|
|
; GFX9-O0-NEXT: s_mov_b32 s4, s3
|
|
; GFX9-O0-NEXT: s_mov_b32 s5, s2
|
|
; GFX9-O0-NEXT: s_mov_b32 s6, s1
|
|
; GFX9-O0-NEXT: ; kill: def $sgpr0 killed $sgpr0 def $sgpr0_sgpr1_sgpr2_sgpr3
|
|
; GFX9-O0-NEXT: s_mov_b32 s1, s6
|
|
; GFX9-O0-NEXT: s_mov_b32 s2, s5
|
|
; GFX9-O0-NEXT: s_mov_b32 s3, s4
|
|
; GFX9-O0-NEXT: ; kill: def $sgpr4_sgpr5_sgpr6_sgpr7 killed $sgpr0_sgpr1_sgpr2_sgpr3
|
|
; GFX9-O0-NEXT: s_mov_b32 s4, 5
|
|
; GFX9-O0-NEXT: v_lshlrev_b32_e64 v0, s4, v0
|
|
; GFX9-O0-NEXT: s_mov_b32 s4, 0
|
|
; GFX9-O0-NEXT: buffer_load_dwordx4 v[10:13], v0, s[0:3], s4 offen
|
|
; GFX9-O0-NEXT: buffer_load_dwordx2 v[3:4], v0, s[0:3], s4 offen offset:16
|
|
; GFX9-O0-NEXT: s_waitcnt vmcnt(1)
|
|
; GFX9-O0-NEXT: v_mov_b32_e32 v7, v11
|
|
; GFX9-O0-NEXT: v_mov_b32_e32 v5, v10
|
|
; GFX9-O0-NEXT: ; implicit-def: $sgpr5
|
|
; GFX9-O0-NEXT: ; implicit-def: $sgpr5
|
|
; GFX9-O0-NEXT: ; kill: def $vgpr5 killed $vgpr5 def $vgpr5_vgpr6 killed $exec
|
|
; GFX9-O0-NEXT: v_mov_b32_e32 v6, v7
|
|
; GFX9-O0-NEXT: s_mov_b32 s5, 0x7fffffff
|
|
; GFX9-O0-NEXT: s_mov_b32 s6, -1
|
|
; GFX9-O0-NEXT: ; kill: def $sgpr6 killed $sgpr6 def $sgpr6_sgpr7
|
|
; GFX9-O0-NEXT: s_mov_b32 s7, s5
|
|
; GFX9-O0-NEXT: v_mov_b32_e32 v1, v5
|
|
; GFX9-O0-NEXT: v_mov_b32_e32 v2, v6
|
|
; GFX9-O0-NEXT: s_not_b64 exec, exec
|
|
; GFX9-O0-NEXT: v_mov_b32_e32 v1, s6
|
|
; GFX9-O0-NEXT: v_mov_b32_e32 v2, s7
|
|
; GFX9-O0-NEXT: s_not_b64 exec, exec
|
|
; GFX9-O0-NEXT: v_mov_b32_e32 v8, v1
|
|
; GFX9-O0-NEXT: v_mov_b32_e32 v9, v2
|
|
; GFX9-O0-NEXT: v_mov_b32_e32 v7, v13
|
|
; GFX9-O0-NEXT: v_mov_b32_e32 v5, v12
|
|
; GFX9-O0-NEXT: ; implicit-def: $sgpr5
|
|
; GFX9-O0-NEXT: ; implicit-def: $sgpr5
|
|
; GFX9-O0-NEXT: ; kill: def $vgpr5 killed $vgpr5 def $vgpr5_vgpr6 killed $exec
|
|
; GFX9-O0-NEXT: v_mov_b32_e32 v6, v7
|
|
; GFX9-O0-NEXT: v_mov_b32_e32 v1, v5
|
|
; GFX9-O0-NEXT: v_mov_b32_e32 v2, v6
|
|
; GFX9-O0-NEXT: s_not_b64 exec, exec
|
|
; GFX9-O0-NEXT: v_mov_b32_e32 v1, s6
|
|
; GFX9-O0-NEXT: v_mov_b32_e32 v2, s7
|
|
; GFX9-O0-NEXT: s_not_b64 exec, exec
|
|
; GFX9-O0-NEXT: v_mov_b32_e32 v6, v1
|
|
; GFX9-O0-NEXT: v_mov_b32_e32 v7, v2
|
|
; GFX9-O0-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-O0-NEXT: v_mov_b32_e32 v1, v3
|
|
; GFX9-O0-NEXT: v_mov_b32_e32 v2, v4
|
|
; GFX9-O0-NEXT: s_not_b64 exec, exec
|
|
; GFX9-O0-NEXT: v_mov_b32_e32 v1, s6
|
|
; GFX9-O0-NEXT: v_mov_b32_e32 v2, s7
|
|
; GFX9-O0-NEXT: s_not_b64 exec, exec
|
|
; GFX9-O0-NEXT: v_mov_b32_e32 v3, v1
|
|
; GFX9-O0-NEXT: v_mov_b32_e32 v4, v2
|
|
; GFX9-O0-NEXT: v_mov_b32_e32 v11, v9
|
|
; GFX9-O0-NEXT: v_mov_b32_e32 v5, v8
|
|
; GFX9-O0-NEXT: v_mov_b32_e32 v9, v7
|
|
; GFX9-O0-NEXT: v_mov_b32_e32 v10, v6
|
|
; GFX9-O0-NEXT: ; implicit-def: $sgpr5
|
|
; GFX9-O0-NEXT: ; implicit-def: $sgpr5
|
|
; GFX9-O0-NEXT: ; implicit-def: $sgpr5
|
|
; GFX9-O0-NEXT: ; implicit-def: $sgpr5
|
|
; GFX9-O0-NEXT: ; kill: def $vgpr5 killed $vgpr5 def $vgpr5_vgpr6_vgpr7_vgpr8 killed $exec
|
|
; GFX9-O0-NEXT: v_mov_b32_e32 v6, v11
|
|
; GFX9-O0-NEXT: v_mov_b32_e32 v7, v10
|
|
; GFX9-O0-NEXT: v_mov_b32_e32 v8, v9
|
|
; GFX9-O0-NEXT: buffer_store_dwordx4 v[5:8], v0, s[0:3], s4 offen
|
|
; GFX9-O0-NEXT: buffer_store_dwordx2 v[3:4], v0, s[0:3], s4 offen offset:16
|
|
; GFX9-O0-NEXT: s_endpgm
|
|
;
|
|
; GFX9-O3-LABEL: strict_wwm_amdgpu_cs_main:
|
|
; GFX9-O3: ; %bb.0:
|
|
; GFX9-O3-NEXT: v_lshlrev_b32_e32 v0, 5, v0
|
|
; GFX9-O3-NEXT: buffer_load_dwordx4 v[7:10], v0, s[0:3], 0 offen
|
|
; GFX9-O3-NEXT: buffer_load_dwordx2 v[11:12], v0, s[0:3], 0 offen offset:16
|
|
; GFX9-O3-NEXT: s_mov_b32 s4, -1
|
|
; GFX9-O3-NEXT: s_brev_b32 s5, -2
|
|
; GFX9-O3-NEXT: s_waitcnt vmcnt(1)
|
|
; GFX9-O3-NEXT: v_mov_b32_e32 v1, v7
|
|
; GFX9-O3-NEXT: v_mov_b32_e32 v2, v8
|
|
; GFX9-O3-NEXT: s_not_b64 exec, exec
|
|
; GFX9-O3-NEXT: v_mov_b32_e32 v1, s4
|
|
; GFX9-O3-NEXT: v_mov_b32_e32 v2, s5
|
|
; GFX9-O3-NEXT: s_not_b64 exec, exec
|
|
; GFX9-O3-NEXT: v_mov_b32_e32 v3, v9
|
|
; GFX9-O3-NEXT: v_mov_b32_e32 v4, v10
|
|
; GFX9-O3-NEXT: s_not_b64 exec, exec
|
|
; GFX9-O3-NEXT: v_mov_b32_e32 v3, s4
|
|
; GFX9-O3-NEXT: v_mov_b32_e32 v4, s5
|
|
; GFX9-O3-NEXT: s_not_b64 exec, exec
|
|
; GFX9-O3-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-O3-NEXT: v_mov_b32_e32 v5, v11
|
|
; GFX9-O3-NEXT: v_mov_b32_e32 v6, v12
|
|
; GFX9-O3-NEXT: s_not_b64 exec, exec
|
|
; GFX9-O3-NEXT: v_mov_b32_e32 v5, s4
|
|
; GFX9-O3-NEXT: v_mov_b32_e32 v6, s5
|
|
; GFX9-O3-NEXT: s_not_b64 exec, exec
|
|
; GFX9-O3-NEXT: v_mov_b32_e32 v7, v1
|
|
; GFX9-O3-NEXT: v_mov_b32_e32 v9, v3
|
|
; GFX9-O3-NEXT: v_mov_b32_e32 v8, v2
|
|
; GFX9-O3-NEXT: v_mov_b32_e32 v10, v4
|
|
; GFX9-O3-NEXT: v_mov_b32_e32 v11, v5
|
|
; GFX9-O3-NEXT: v_mov_b32_e32 v12, v6
|
|
; GFX9-O3-NEXT: buffer_store_dwordx4 v[7:10], v0, s[0:3], 0 offen
|
|
; GFX9-O3-NEXT: buffer_store_dwordx2 v[11:12], v0, s[0:3], 0 offen offset:16
|
|
; GFX9-O3-NEXT: s_endpgm
|
|
%tmp17 = shl i32 %index, 5
|
|
%tmp18 = tail call <4 x i32> @llvm.amdgcn.s.buffer.load.v4i32(<4 x i32> %desc, i32 %tmp17, i32 0)
|
|
%.i0.upto1.bc = bitcast <4 x i32> %tmp18 to <2 x i64>
|
|
%tmp19 = or i32 %tmp17, 16
|
|
%tmp20 = tail call <2 x i32> @llvm.amdgcn.s.buffer.load.v2i32(<4 x i32> %desc, i32 %tmp19, i32 0)
|
|
%.i0.upto1.extract = extractelement <2 x i64> %.i0.upto1.bc, i32 0
|
|
%tmp22 = tail call i64 @llvm.amdgcn.set.inactive.i64(i64 %.i0.upto1.extract, i64 9223372036854775807)
|
|
%tmp97 = tail call i64 @llvm.amdgcn.strict.wwm.i64(i64 %tmp22)
|
|
%.i1.upto1.extract = extractelement <2 x i64> %.i0.upto1.bc, i32 1
|
|
%tmp99 = tail call i64 @llvm.amdgcn.set.inactive.i64(i64 %.i1.upto1.extract, i64 9223372036854775807)
|
|
%tmp174 = tail call i64 @llvm.amdgcn.strict.wwm.i64(i64 %tmp99)
|
|
%.i25 = bitcast <2 x i32> %tmp20 to i64
|
|
%tmp176 = tail call i64 @llvm.amdgcn.set.inactive.i64(i64 %.i25, i64 9223372036854775807)
|
|
%tmp251 = tail call i64 @llvm.amdgcn.strict.wwm.i64(i64 %tmp176)
|
|
%.cast = bitcast i64 %tmp97 to <2 x float>
|
|
%.cast6 = bitcast i64 %tmp174 to <2 x float>
|
|
%.cast7 = bitcast i64 %tmp251 to <2 x float>
|
|
%tmp254 = shufflevector <2 x float> %.cast, <2 x float> %.cast6, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
|
|
%desc.int = bitcast <4 x i32> %desc to i128
|
|
%desc.ptr = inttoptr i128 %desc.int to ptr addrspace(8)
|
|
tail call void @llvm.amdgcn.raw.ptr.buffer.store.v4f32(<4 x float> %tmp254, ptr addrspace(8) %desc.ptr, i32 %tmp17, i32 0, i32 0)
|
|
tail call void @llvm.amdgcn.raw.ptr.buffer.store.v2f32(<2 x float> %.cast7, ptr addrspace(8)%desc.ptr, i32 %tmp19, i32 0, i32 0)
|
|
ret void
|
|
}
|
|
|
|
declare i32 @llvm.amdgcn.strict.wwm.i32(i32)
|
|
declare i64 @llvm.amdgcn.strict.wwm.i64(i64)
|
|
declare i32 @llvm.amdgcn.wwm.i32(i32)
|
|
declare i64 @llvm.amdgcn.wwm.i64(i64)
|
|
declare i32 @llvm.amdgcn.set.inactive.i32(i32, i32)
|
|
declare i64 @llvm.amdgcn.set.inactive.i64(i64, i64)
|
|
declare i32 @llvm.amdgcn.update.dpp.i32(i32, i32, i32, i32, i32, i1)
|
|
declare <2 x float> @llvm.amdgcn.raw.ptr.buffer.load.v2f32(ptr addrspace(8), i32, i32, i32)
|
|
declare void @llvm.amdgcn.raw.ptr.buffer.store.f32(float, ptr addrspace(8), i32, i32, i32)
|
|
declare void @llvm.amdgcn.raw.ptr.buffer.store.i32(i32, ptr addrspace(8), i32, i32, i32)
|
|
declare void @llvm.amdgcn.raw.ptr.buffer.store.v2i32(<2 x i32>, ptr addrspace(8), i32, i32, i32)
|
|
declare void @llvm.amdgcn.raw.ptr.buffer.store.v2f32(<2 x float>, ptr addrspace(8), i32, i32, i32)
|
|
declare void @llvm.amdgcn.raw.ptr.buffer.store.v4f32(<4 x float>, ptr addrspace(8), i32, i32, i32)
|
|
declare <2 x i32> @llvm.amdgcn.s.buffer.load.v2i32(<4 x i32>, i32, i32)
|
|
declare <4 x i32> @llvm.amdgcn.s.buffer.load.v4i32(<4 x i32>, i32, i32)
|
|
|
|
!llvm.module.flags = !{!0}
|
|
!0 = !{i32 1, !"amdhsa_code_object_version", i32 500}
|