Files
clang-p2996/llvm/test/CodeGen/AMDGPU/skip-if-dead.ll
2021-02-08 12:57:51 +00:00

1088 lines
36 KiB
LLVM

; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -march=amdgcn -mcpu=tahiti -verify-machineinstrs -simplifycfg-require-and-preserve-domtree=1 < %s | FileCheck -check-prefixes=GCN,SI %s
; RUN: llc -march=amdgcn -mcpu=gfx1010 -mattr=-wavefrontsize32,+wavefrontsize64 -verify-machineinstrs -simplifycfg-require-and-preserve-domtree=1 < %s | FileCheck -check-prefixes=GCN,GFX10 %s
define amdgpu_ps void @test_kill_depth_0_imm_pos() #0 {
; GCN-LABEL: test_kill_depth_0_imm_pos:
; GCN: ; %bb.0:
; GCN-NEXT: ; %bb.1:
; GCN-NEXT: s_endpgm
call void @llvm.amdgcn.kill(i1 true)
ret void
}
define amdgpu_ps void @test_kill_depth_0_imm_neg() #0 {
; GCN-LABEL: test_kill_depth_0_imm_neg:
; GCN: ; %bb.0:
; GCN-NEXT: s_mov_b64 exec, 0
; GCN-NEXT: s_cbranch_execz BB1_2
; GCN-NEXT: ; %bb.1:
; GCN-NEXT: s_endpgm
; GCN-NEXT: BB1_2:
; GCN-NEXT: exp null off, off, off, off done vm
; GCN-NEXT: s_endpgm
call void @llvm.amdgcn.kill(i1 false)
ret void
}
; FIXME: Ideally only one early-exit would be emitted
define amdgpu_ps void @test_kill_depth_0_imm_neg_x2() #0 {
; GCN-LABEL: test_kill_depth_0_imm_neg_x2:
; GCN: ; %bb.0:
; GCN-NEXT: s_mov_b64 exec, 0
; GCN-NEXT: s_cbranch_execz BB2_3
; GCN-NEXT: ; %bb.1:
; GCN-NEXT: s_mov_b64 exec, 0
; GCN-NEXT: s_cbranch_execz BB2_3
; GCN-NEXT: ; %bb.2:
; GCN-NEXT: s_endpgm
; GCN-NEXT: BB2_3:
; GCN-NEXT: exp null off, off, off, off done vm
; GCN-NEXT: s_endpgm
call void @llvm.amdgcn.kill(i1 false)
call void @llvm.amdgcn.kill(i1 false)
ret void
}
define amdgpu_ps void @test_kill_depth_var(float %x) #0 {
; SI-LABEL: test_kill_depth_var:
; SI: ; %bb.0:
; SI-NEXT: v_cmpx_gt_f32_e32 vcc, 0, v0
; SI-NEXT: s_cbranch_execz BB3_2
; SI-NEXT: ; %bb.1:
; SI-NEXT: s_endpgm
; SI-NEXT: BB3_2:
; SI-NEXT: exp null off, off, off, off done vm
; SI-NEXT: s_endpgm
;
; GFX10-LABEL: test_kill_depth_var:
; GFX10: ; %bb.0:
; GFX10-NEXT: v_cmpx_gt_f32_e32 0, v0
; GFX10-NEXT: s_cbranch_execz BB3_2
; GFX10-NEXT: ; %bb.1:
; GFX10-NEXT: s_endpgm
; GFX10-NEXT: BB3_2:
; GFX10-NEXT: exp null off, off, off, off done vm
; GFX10-NEXT: s_endpgm
%cmp = fcmp olt float %x, 0.0
call void @llvm.amdgcn.kill(i1 %cmp)
ret void
}
; FIXME: Ideally only one early-exit would be emitted
define amdgpu_ps void @test_kill_depth_var_x2_same(float %x) #0 {
; SI-LABEL: test_kill_depth_var_x2_same:
; SI: ; %bb.0:
; SI-NEXT: v_cmpx_gt_f32_e32 vcc, 0, v0
; SI-NEXT: s_cbranch_execz BB4_3
; SI-NEXT: ; %bb.1:
; SI-NEXT: v_cmpx_gt_f32_e32 vcc, 0, v0
; SI-NEXT: s_cbranch_execz BB4_3
; SI-NEXT: ; %bb.2:
; SI-NEXT: s_endpgm
; SI-NEXT: BB4_3:
; SI-NEXT: exp null off, off, off, off done vm
; SI-NEXT: s_endpgm
;
; GFX10-LABEL: test_kill_depth_var_x2_same:
; GFX10: ; %bb.0:
; GFX10-NEXT: v_cmpx_gt_f32_e32 0, v0
; GFX10-NEXT: s_cbranch_execz BB4_3
; GFX10-NEXT: ; %bb.1:
; GFX10-NEXT: s_waitcnt_depctr 0xfffe
; GFX10-NEXT: v_cmpx_gt_f32_e32 0, v0
; GFX10-NEXT: s_cbranch_execz BB4_3
; GFX10-NEXT: ; %bb.2:
; GFX10-NEXT: s_endpgm
; GFX10-NEXT: BB4_3:
; GFX10-NEXT: exp null off, off, off, off done vm
; GFX10-NEXT: s_endpgm
%cmp = fcmp olt float %x, 0.0
call void @llvm.amdgcn.kill(i1 %cmp)
call void @llvm.amdgcn.kill(i1 %cmp)
ret void
}
; FIXME: Ideally only one early-exit would be emitted
define amdgpu_ps void @test_kill_depth_var_x2(float %x, float %y) #0 {
; SI-LABEL: test_kill_depth_var_x2:
; SI: ; %bb.0:
; SI-NEXT: v_cmpx_gt_f32_e32 vcc, 0, v0
; SI-NEXT: s_cbranch_execz BB5_3
; SI-NEXT: ; %bb.1:
; SI-NEXT: v_cmpx_gt_f32_e32 vcc, 0, v1
; SI-NEXT: s_cbranch_execz BB5_3
; SI-NEXT: ; %bb.2:
; SI-NEXT: s_endpgm
; SI-NEXT: BB5_3:
; SI-NEXT: exp null off, off, off, off done vm
; SI-NEXT: s_endpgm
;
; GFX10-LABEL: test_kill_depth_var_x2:
; GFX10: ; %bb.0:
; GFX10-NEXT: v_cmpx_gt_f32_e32 0, v0
; GFX10-NEXT: s_cbranch_execz BB5_3
; GFX10-NEXT: ; %bb.1:
; GFX10-NEXT: s_waitcnt_depctr 0xfffe
; GFX10-NEXT: v_cmpx_gt_f32_e32 0, v1
; GFX10-NEXT: s_cbranch_execz BB5_3
; GFX10-NEXT: ; %bb.2:
; GFX10-NEXT: s_endpgm
; GFX10-NEXT: BB5_3:
; GFX10-NEXT: exp null off, off, off, off done vm
; GFX10-NEXT: s_endpgm
%cmp.x = fcmp olt float %x, 0.0
call void @llvm.amdgcn.kill(i1 %cmp.x)
%cmp.y = fcmp olt float %y, 0.0
call void @llvm.amdgcn.kill(i1 %cmp.y)
ret void
}
define amdgpu_ps void @test_kill_depth_var_x2_instructions(float %x) #0 {
; SI-LABEL: test_kill_depth_var_x2_instructions:
; SI: ; %bb.0:
; SI-NEXT: v_cmpx_gt_f32_e32 vcc, 0, v0
; SI-NEXT: s_cbranch_execz BB6_3
; SI-NEXT: ; %bb.1:
; SI-NEXT: ;;#ASMSTART
; SI-NEXT: v_mov_b32_e64 v7, -1
; SI-NEXT: ;;#ASMEND
; SI-NEXT: v_cmpx_gt_f32_e32 vcc, 0, v7
; SI-NEXT: s_cbranch_execz BB6_3
; SI-NEXT: ; %bb.2:
; SI-NEXT: s_endpgm
; SI-NEXT: BB6_3:
; SI-NEXT: exp null off, off, off, off done vm
; SI-NEXT: s_endpgm
;
; GFX10-LABEL: test_kill_depth_var_x2_instructions:
; GFX10: ; %bb.0:
; GFX10-NEXT: v_cmpx_gt_f32_e32 0, v0
; GFX10-NEXT: s_cbranch_execz BB6_3
; GFX10-NEXT: ; %bb.1:
; GFX10-NEXT: ;;#ASMSTART
; GFX10-NEXT: v_mov_b32_e64 v7, -1
; GFX10-NEXT: ;;#ASMEND
; GFX10-NEXT: s_waitcnt_depctr 0xfffe
; GFX10-NEXT: v_cmpx_gt_f32_e32 0, v7
; GFX10-NEXT: s_cbranch_execz BB6_3
; GFX10-NEXT: ; %bb.2:
; GFX10-NEXT: s_endpgm
; GFX10-NEXT: BB6_3:
; GFX10-NEXT: exp null off, off, off, off done vm
; GFX10-NEXT: s_endpgm
%cmp.x = fcmp olt float %x, 0.0
call void @llvm.amdgcn.kill(i1 %cmp.x)
%y = call float asm sideeffect "v_mov_b32_e64 v7, -1", "={v7}"()
%cmp.y = fcmp olt float %y, 0.0
call void @llvm.amdgcn.kill(i1 %cmp.y)
ret void
}
; FIXME: why does the skip depend on the asm length in the same block?
define amdgpu_ps float @test_kill_control_flow(i32 inreg %arg) #0 {
; SI-LABEL: test_kill_control_flow:
; SI: ; %bb.0: ; %entry
; SI-NEXT: s_cmp_lg_u32 s0, 0
; SI-NEXT: s_cbranch_scc1 BB7_2
; SI-NEXT: ; %bb.1: ; %bb
; SI-NEXT: ;;#ASMSTART
; SI-NEXT: v_mov_b32_e64 v7, -1
; SI-NEXT: v_nop_e64
; SI-NEXT: v_nop_e64
; SI-NEXT: v_nop_e64
; SI-NEXT: v_nop_e64
; SI-NEXT: v_nop_e64
; SI-NEXT: v_nop_e64
; SI-NEXT: v_nop_e64
; SI-NEXT: v_nop_e64
; SI-NEXT: v_nop_e64
; SI-NEXT: v_nop_e64
; SI-NEXT: ;;#ASMEND
; SI-NEXT: v_cmpx_gt_f32_e32 vcc, 0, v7
; SI-NEXT: BB7_2: ; %exit
; SI-NEXT: v_mov_b32_e32 v0, 1.0
; SI-NEXT: ; return to shader part epilog
;
; GFX10-LABEL: test_kill_control_flow:
; GFX10: ; %bb.0: ; %entry
; GFX10-NEXT: s_cmp_lg_u32 s0, 0
; GFX10-NEXT: s_cbranch_scc1 BB7_2
; GFX10-NEXT: ; %bb.1: ; %bb
; GFX10-NEXT: ;;#ASMSTART
; GFX10-NEXT: v_mov_b32_e64 v7, -1
; GFX10-NEXT: v_nop_e64
; GFX10-NEXT: v_nop_e64
; GFX10-NEXT: v_nop_e64
; GFX10-NEXT: v_nop_e64
; GFX10-NEXT: v_nop_e64
; GFX10-NEXT: v_nop_e64
; GFX10-NEXT: v_nop_e64
; GFX10-NEXT: v_nop_e64
; GFX10-NEXT: v_nop_e64
; GFX10-NEXT: v_nop_e64
; GFX10-NEXT: ;;#ASMEND
; GFX10-NEXT: v_cmpx_gt_f32_e32 0, v7
; GFX10-NEXT: BB7_2: ; %exit
; GFX10-NEXT: v_mov_b32_e32 v0, 1.0
; GFX10-NEXT: ; return to shader part epilog
entry:
%cmp = icmp eq i32 %arg, 0
br i1 %cmp, label %bb, label %exit
bb:
%var = call float asm sideeffect "v_mov_b32_e64 v7, -1
v_nop_e64
v_nop_e64
v_nop_e64
v_nop_e64
v_nop_e64
v_nop_e64
v_nop_e64
v_nop_e64
v_nop_e64
v_nop_e64", "={v7}"()
%cmp.var = fcmp olt float %var, 0.0
; TODO: We could do an early-exit here (the branch above is uniform!)
call void @llvm.amdgcn.kill(i1 %cmp.var)
br label %exit
exit:
ret float 1.0
}
define amdgpu_ps void @test_kill_control_flow_remainder(i32 inreg %arg) #0 {
; SI-LABEL: test_kill_control_flow_remainder:
; SI: ; %bb.0: ; %entry
; SI-NEXT: s_cmp_lg_u32 s0, 0
; SI-NEXT: v_mov_b32_e32 v9, 0
; SI-NEXT: s_cbranch_scc1 BB8_3
; SI-NEXT: ; %bb.1: ; %bb
; SI-NEXT: ;;#ASMSTART
; SI-NEXT: v_mov_b32_e64 v7, -1
; SI-NEXT: v_nop_e64
; SI-NEXT: v_nop_e64
; SI-NEXT: v_nop_e64
; SI-NEXT: v_nop_e64
; SI-NEXT: v_nop_e64
; SI-NEXT: v_nop_e64
; SI-NEXT: v_nop_e64
; SI-NEXT: v_nop_e64
; SI-NEXT: v_nop_e64
; SI-NEXT: v_nop_e64
; SI-NEXT: v_nop_e64
; SI-NEXT: ;;#ASMEND
; SI-NEXT: ;;#ASMSTART
; SI-NEXT: v_mov_b32_e64 v8, -1
; SI-NEXT: ;;#ASMEND
; SI-NEXT: v_cmpx_gt_f32_e32 vcc, 0, v7
; SI-NEXT: ; %bb.2: ; %bb
; SI-NEXT: s_mov_b32 s3, 0xf000
; SI-NEXT: s_mov_b32 s2, -1
; SI-NEXT: buffer_store_dword v8, off, s[0:3], 0
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: ;;#ASMSTART
; SI-NEXT: v_mov_b32_e64 v9, -2
; SI-NEXT: ;;#ASMEND
; SI-NEXT: BB8_3: ; %exit
; SI-NEXT: s_mov_b32 s3, 0xf000
; SI-NEXT: s_mov_b32 s2, -1
; SI-NEXT: buffer_store_dword v9, off, s[0:3], 0
; SI-NEXT: s_endpgm
;
; GFX10-LABEL: test_kill_control_flow_remainder:
; GFX10: ; %bb.0: ; %entry
; GFX10-NEXT: v_mov_b32_e32 v9, 0
; GFX10-NEXT: s_cmp_lg_u32 s0, 0
; GFX10-NEXT: s_cbranch_scc0 BB8_2
; GFX10-NEXT: ; %bb.1: ; %exit
; GFX10-NEXT: global_store_dword v[0:1], v9, off
; GFX10-NEXT: s_endpgm
; GFX10-NEXT: BB8_2: ; %bb
; GFX10-NEXT: ;;#ASMSTART
; GFX10-NEXT: v_mov_b32_e64 v7, -1
; GFX10-NEXT: v_nop_e64
; GFX10-NEXT: v_nop_e64
; GFX10-NEXT: v_nop_e64
; GFX10-NEXT: v_nop_e64
; GFX10-NEXT: v_nop_e64
; GFX10-NEXT: v_nop_e64
; GFX10-NEXT: v_nop_e64
; GFX10-NEXT: v_nop_e64
; GFX10-NEXT: v_nop_e64
; GFX10-NEXT: v_nop_e64
; GFX10-NEXT: v_nop_e64
; GFX10-NEXT: ;;#ASMEND
; GFX10-NEXT: ;;#ASMSTART
; GFX10-NEXT: v_mov_b32_e64 v8, -1
; GFX10-NEXT: ;;#ASMEND
; GFX10-NEXT: v_cmpx_gt_f32_e32 0, v7
; GFX10-NEXT: s_cbranch_execz BB8_4
; GFX10-NEXT: ; %bb.3: ; %bb
; GFX10-NEXT: s_nop 3
; GFX10-NEXT: global_store_dword v[0:1], v8, off
; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-NEXT: ;;#ASMSTART
; GFX10-NEXT: v_mov_b32_e64 v9, -2
; GFX10-NEXT: ;;#ASMEND
; GFX10-NEXT: global_store_dword v[0:1], v9, off
; GFX10-NEXT: s_endpgm
; GFX10-NEXT: BB8_4:
; GFX10-NEXT: exp null off, off, off, off done vm
; GFX10-NEXT: s_endpgm
entry:
%cmp = icmp eq i32 %arg, 0
br i1 %cmp, label %bb, label %exit
bb:
%var = call float asm sideeffect "v_mov_b32_e64 v7, -1
v_nop_e64
v_nop_e64
v_nop_e64
v_nop_e64
v_nop_e64
v_nop_e64
v_nop_e64
v_nop_e64
v_nop_e64
v_nop_e64
v_nop_e64", "={v7}"()
%live.across = call float asm sideeffect "v_mov_b32_e64 v8, -1", "={v8}"()
%cmp.var = fcmp olt float %var, 0.0
; TODO: We could do an early-exit here (the branch above is uniform!)
call void @llvm.amdgcn.kill(i1 %cmp.var)
store volatile float %live.across, float addrspace(1)* undef
%live.out = call float asm sideeffect "v_mov_b32_e64 v9, -2", "={v9}"()
br label %exit
exit:
%phi = phi float [ 0.0, %entry ], [ %live.out, %bb ]
store float %phi, float addrspace(1)* undef
ret void
}
define amdgpu_ps float @test_kill_control_flow_return(i32 inreg %arg) #0 {
; SI-LABEL: test_kill_control_flow_return:
; SI: ; %bb.0: ; %entry
; SI-NEXT: v_cmp_eq_u32_e64 s[2:3], s0, 1
; SI-NEXT: s_and_b64 exec, exec, s[2:3]
; SI-NEXT: s_cbranch_execz BB9_4
; SI-NEXT: ; %bb.1: ; %entry
; SI-NEXT: s_cmp_lg_u32 s0, 0
; SI-NEXT: v_mov_b32_e32 v0, 0
; SI-NEXT: s_cbranch_scc0 BB9_3
; SI-NEXT: ; %bb.2: ; %exit
; SI-NEXT: s_branch BB9_5
; SI-NEXT: BB9_3: ; %bb
; SI-NEXT: ;;#ASMSTART
; SI-NEXT: v_mov_b32_e64 v7, -1
; SI-NEXT: v_nop_e64
; SI-NEXT: v_nop_e64
; SI-NEXT: v_nop_e64
; SI-NEXT: v_nop_e64
; SI-NEXT: v_nop_e64
; SI-NEXT: v_nop_e64
; SI-NEXT: v_nop_e64
; SI-NEXT: v_nop_e64
; SI-NEXT: v_nop_e64
; SI-NEXT: v_nop_e64
; SI-NEXT: ;;#ASMEND
; SI-NEXT: v_mov_b32_e32 v0, v7
; SI-NEXT: s_branch BB9_5
; SI-NEXT: BB9_4:
; SI-NEXT: exp null off, off, off, off done vm
; SI-NEXT: s_endpgm
; SI-NEXT: BB9_5:
;
; GFX10-LABEL: test_kill_control_flow_return:
; GFX10: ; %bb.0: ; %entry
; GFX10-NEXT: v_cmp_eq_u32_e64 s[2:3], s0, 1
; GFX10-NEXT: s_and_b64 exec, exec, s[2:3]
; GFX10-NEXT: s_cbranch_execz BB9_4
; GFX10-NEXT: ; %bb.1: ; %entry
; GFX10-NEXT: v_mov_b32_e32 v0, 0
; GFX10-NEXT: s_cmp_lg_u32 s0, 0
; GFX10-NEXT: s_cbranch_scc0 BB9_3
; GFX10-NEXT: ; %bb.2: ; %exit
; GFX10-NEXT: s_branch BB9_5
; GFX10-NEXT: BB9_3: ; %bb
; GFX10-NEXT: ;;#ASMSTART
; GFX10-NEXT: v_mov_b32_e64 v7, -1
; GFX10-NEXT: v_nop_e64
; GFX10-NEXT: v_nop_e64
; GFX10-NEXT: v_nop_e64
; GFX10-NEXT: v_nop_e64
; GFX10-NEXT: v_nop_e64
; GFX10-NEXT: v_nop_e64
; GFX10-NEXT: v_nop_e64
; GFX10-NEXT: v_nop_e64
; GFX10-NEXT: v_nop_e64
; GFX10-NEXT: v_nop_e64
; GFX10-NEXT: ;;#ASMEND
; GFX10-NEXT: v_mov_b32_e32 v0, v7
; GFX10-NEXT: s_branch BB9_5
; GFX10-NEXT: BB9_4:
; GFX10-NEXT: exp null off, off, off, off done vm
; GFX10-NEXT: s_endpgm
; GFX10-NEXT: BB9_5:
entry:
%kill = icmp eq i32 %arg, 1
%cmp = icmp eq i32 %arg, 0
call void @llvm.amdgcn.kill(i1 %kill)
br i1 %cmp, label %bb, label %exit
bb:
%var = call float asm sideeffect "v_mov_b32_e64 v7, -1
v_nop_e64
v_nop_e64
v_nop_e64
v_nop_e64
v_nop_e64
v_nop_e64
v_nop_e64
v_nop_e64
v_nop_e64
v_nop_e64", "={v7}"()
br label %exit
exit:
%ret = phi float [ %var, %bb ], [ 0.0, %entry ]
ret float %ret
}
define amdgpu_ps void @test_kill_divergent_loop(i32 %arg) #0 {
; SI-LABEL: test_kill_divergent_loop:
; SI: ; %bb.0: ; %entry
; SI-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
; SI-NEXT: s_and_saveexec_b64 s[0:1], vcc
; SI-NEXT: s_xor_b64 s[0:1], exec, s[0:1]
; SI-NEXT: s_cbranch_execz BB10_4
; SI-NEXT: ; %bb.1: ; %bb.preheader
; SI-NEXT: s_mov_b32 s3, 0xf000
; SI-NEXT: s_mov_b32 s2, -1
; SI-NEXT: BB10_2: ; %bb
; SI-NEXT: ; =>This Inner Loop Header: Depth=1
; SI-NEXT: ;;#ASMSTART
; SI-NEXT: v_mov_b32_e64 v7, -1
; SI-NEXT: v_nop_e64
; SI-NEXT: v_nop_e64
; SI-NEXT: v_nop_e64
; SI-NEXT: v_nop_e64
; SI-NEXT: v_nop_e64
; SI-NEXT: v_nop_e64
; SI-NEXT: v_nop_e64
; SI-NEXT: v_nop_e64
; SI-NEXT: v_nop_e64
; SI-NEXT: v_nop_e64
; SI-NEXT: ;;#ASMEND
; SI-NEXT: v_cmpx_gt_f32_e32 vcc, 0, v7
; SI-NEXT: ; %bb.3: ; %bb
; SI-NEXT: ; in Loop: Header=BB10_2 Depth=1
; SI-NEXT: buffer_load_dword v0, off, s[0:3], 0 glc
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
; SI-NEXT: s_and_b64 vcc, exec, vcc
; SI-NEXT: s_cbranch_vccnz BB10_2
; SI-NEXT: BB10_4: ; %Flow1
; SI-NEXT: s_or_b64 exec, exec, s[0:1]
; SI-NEXT: s_cbranch_execz BB10_6
; SI-NEXT: ; %bb.5: ; %Flow1
; SI-NEXT: s_mov_b32 s3, 0xf000
; SI-NEXT: s_mov_b32 s2, -1
; SI-NEXT: v_mov_b32_e32 v0, 8
; SI-NEXT: buffer_store_dword v0, off, s[0:3], 0
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: s_endpgm
; SI-NEXT: BB10_6:
; SI-NEXT: exp null off, off, off, off done vm
; SI-NEXT: s_endpgm
;
; GFX10-LABEL: test_kill_divergent_loop:
; GFX10: ; %bb.0: ; %entry
; GFX10-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
; GFX10-NEXT: s_and_saveexec_b64 s[0:1], vcc
; GFX10-NEXT: s_xor_b64 s[0:1], exec, s[0:1]
; GFX10-NEXT: s_cbranch_execz BB10_3
; GFX10-NEXT: BB10_1: ; %bb
; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX10-NEXT: ;;#ASMSTART
; GFX10-NEXT: v_mov_b32_e64 v7, -1
; GFX10-NEXT: v_nop_e64
; GFX10-NEXT: v_nop_e64
; GFX10-NEXT: v_nop_e64
; GFX10-NEXT: v_nop_e64
; GFX10-NEXT: v_nop_e64
; GFX10-NEXT: v_nop_e64
; GFX10-NEXT: v_nop_e64
; GFX10-NEXT: v_nop_e64
; GFX10-NEXT: v_nop_e64
; GFX10-NEXT: v_nop_e64
; GFX10-NEXT: ;;#ASMEND
; GFX10-NEXT: s_waitcnt_depctr 0xfffe
; GFX10-NEXT: v_cmpx_gt_f32_e32 0, v7
; GFX10-NEXT: ; %bb.2: ; %bb
; GFX10-NEXT: ; in Loop: Header=BB10_1 Depth=1
; GFX10-NEXT: s_nop 4
; GFX10-NEXT: global_load_dword v0, v[0:1], off glc dlc
; GFX10-NEXT: s_waitcnt vmcnt(0)
; GFX10-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
; GFX10-NEXT: s_and_b64 vcc, exec, vcc
; GFX10-NEXT: s_cbranch_vccnz BB10_1
; GFX10-NEXT: BB10_3: ; %Flow1
; GFX10-NEXT: s_or_b64 exec, exec, s[0:1]
; GFX10-NEXT: s_cbranch_execz BB10_5
; GFX10-NEXT: ; %bb.4: ; %Flow1
; GFX10-NEXT: v_mov_b32_e32 v0, 8
; GFX10-NEXT: global_store_dword v[0:1], v0, off
; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-NEXT: s_endpgm
; GFX10-NEXT: BB10_5:
; GFX10-NEXT: exp null off, off, off, off done vm
; GFX10-NEXT: s_endpgm
entry:
%cmp = icmp eq i32 %arg, 0
br i1 %cmp, label %bb, label %exit
bb:
%var = call float asm sideeffect "v_mov_b32_e64 v7, -1
v_nop_e64
v_nop_e64
v_nop_e64
v_nop_e64
v_nop_e64
v_nop_e64
v_nop_e64
v_nop_e64
v_nop_e64
v_nop_e64", "={v7}"()
%cmp.var = fcmp olt float %var, 0.0
call void @llvm.amdgcn.kill(i1 %cmp.var)
%vgpr = load volatile i32, i32 addrspace(1)* undef
%loop.cond = icmp eq i32 %vgpr, 0
br i1 %loop.cond, label %bb, label %exit
exit:
store volatile i32 8, i32 addrspace(1)* undef
ret void
}
; bug 28550
define amdgpu_ps void @phi_use_def_before_kill(float inreg %x) #0 {
; SI-LABEL: phi_use_def_before_kill:
; SI: ; %bb.0: ; %bb
; SI-NEXT: v_add_f32_e64 v1, s0, 1.0
; SI-NEXT: v_cmp_lt_f32_e32 vcc, 0, v1
; SI-NEXT: v_cndmask_b32_e64 v0, 0, -1.0, vcc
; SI-NEXT: v_cmpx_lt_f32_e32 vcc, 0, v1
; SI-NEXT: s_cbranch_execz BB11_6
; SI-NEXT: ; %bb.1: ; %bb
; SI-NEXT: s_cbranch_scc0 BB11_3
; SI-NEXT: ; %bb.2: ; %bb8
; SI-NEXT: s_mov_b32 s3, 0xf000
; SI-NEXT: s_mov_b32 s2, -1
; SI-NEXT: v_mov_b32_e32 v0, 8
; SI-NEXT: buffer_store_dword v0, off, s[0:3], 0
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0)
; SI-NEXT: v_mov_b32_e32 v0, 4.0
; SI-NEXT: BB11_3: ; %phibb
; SI-NEXT: v_cmp_eq_f32_e32 vcc, 0, v0
; SI-NEXT: s_and_b64 vcc, exec, vcc
; SI-NEXT: s_cbranch_vccz BB11_5
; SI-NEXT: ; %bb.4: ; %bb10
; SI-NEXT: s_mov_b32 s3, 0xf000
; SI-NEXT: s_mov_b32 s2, -1
; SI-NEXT: v_mov_b32_e32 v0, 9
; SI-NEXT: buffer_store_dword v0, off, s[0:3], 0
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: BB11_5: ; %end
; SI-NEXT: s_endpgm
; SI-NEXT: BB11_6:
; SI-NEXT: exp null off, off, off, off done vm
; SI-NEXT: s_endpgm
;
; GFX10-LABEL: phi_use_def_before_kill:
; GFX10: ; %bb.0: ; %bb
; GFX10-NEXT: v_add_f32_e64 v1, s0, 1.0
; GFX10-NEXT: v_cmp_lt_f32_e32 vcc, 0, v1
; GFX10-NEXT: v_cndmask_b32_e64 v0, 0, -1.0, vcc
; GFX10-NEXT: v_cmpx_lt_f32_e32 0, v1
; GFX10-NEXT: s_cbranch_execz BB11_6
; GFX10-NEXT: ; %bb.1: ; %bb
; GFX10-NEXT: s_cbranch_scc0 BB11_3
; GFX10-NEXT: ; %bb.2: ; %bb8
; GFX10-NEXT: v_mov_b32_e32 v1, 8
; GFX10-NEXT: v_mov_b32_e32 v0, 4.0
; GFX10-NEXT: s_nop 0
; GFX10-NEXT: global_store_dword v[0:1], v1, off
; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-NEXT: BB11_3: ; %phibb
; GFX10-NEXT: v_cmp_eq_f32_e32 vcc, 0, v0
; GFX10-NEXT: s_and_b64 vcc, exec, vcc
; GFX10-NEXT: s_cbranch_vccz BB11_5
; GFX10-NEXT: ; %bb.4: ; %bb10
; GFX10-NEXT: v_mov_b32_e32 v0, 9
; GFX10-NEXT: global_store_dword v[0:1], v0, off
; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-NEXT: BB11_5: ; %end
; GFX10-NEXT: s_endpgm
; GFX10-NEXT: BB11_6:
; GFX10-NEXT: exp null off, off, off, off done vm
; GFX10-NEXT: s_endpgm
bb:
%tmp = fadd float %x, 1.000000e+00
%tmp1 = fcmp olt float 0.000000e+00, %tmp
%tmp2 = select i1 %tmp1, float -1.000000e+00, float 0.000000e+00
%cmp.tmp2 = fcmp olt float %tmp2, 0.0
call void @llvm.amdgcn.kill(i1 %cmp.tmp2)
br i1 undef, label %phibb, label %bb8
phibb:
%tmp5 = phi float [ %tmp2, %bb ], [ 4.0, %bb8 ]
%tmp6 = fcmp oeq float %tmp5, 0.000000e+00
br i1 %tmp6, label %bb10, label %end
bb8:
store volatile i32 8, i32 addrspace(1)* undef
br label %phibb
bb10:
store volatile i32 9, i32 addrspace(1)* undef
br label %end
end:
ret void
}
define amdgpu_ps void @no_skip_no_successors(float inreg %arg, float inreg %arg1) #0 {
; SI-LABEL: no_skip_no_successors:
; SI: ; %bb.0: ; %bb
; SI-NEXT: v_cmp_nge_f32_e64 s[2:3], s1, 0
; SI-NEXT: s_and_b64 vcc, exec, s[2:3]
; SI-NEXT: s_cbranch_vccz BB12_2
; SI-NEXT: ; %bb.1: ; %bb6
; SI-NEXT: s_mov_b64 exec, 0
; SI-NEXT: exp null off, off, off, off done vm
; SI-NEXT: s_endpgm
; SI-NEXT: BB12_2: ; %bb3
; SI-NEXT: v_mov_b32_e32 v0, 0x3e7ae148
; SI-NEXT: v_cmp_nge_f32_e32 vcc, s0, v0
; SI-NEXT: s_and_b64 vcc, exec, vcc
; SI-NEXT: ; %bb.3: ; %bb5
;
; GFX10-LABEL: no_skip_no_successors:
; GFX10: ; %bb.0: ; %bb
; GFX10-NEXT: v_cmp_nge_f32_e64 s[2:3], s1, 0
; GFX10-NEXT: s_and_b64 vcc, exec, s[2:3]
; GFX10-NEXT: s_cbranch_vccz BB12_2
; GFX10-NEXT: ; %bb.1: ; %bb6
; GFX10-NEXT: s_mov_b64 exec, 0
; GFX10-NEXT: exp null off, off, off, off done vm
; GFX10-NEXT: s_endpgm
; GFX10-NEXT: BB12_2: ; %bb3
; GFX10-NEXT: v_cmp_nle_f32_e64 s[0:1], 0x3e7ae148, s0
; GFX10-NEXT: s_and_b64 vcc, exec, s[0:1]
; GFX10-NEXT: ; %bb.3: ; %bb5
bb:
%tmp = fcmp ult float %arg1, 0.000000e+00
%tmp2 = fcmp ult float %arg, 0x3FCF5C2900000000
br i1 %tmp, label %bb6, label %bb3
bb3: ; preds = %bb
br i1 %tmp2, label %bb5, label %bb4
bb4: ; preds = %bb3
br i1 true, label %bb5, label %bb7
bb5: ; preds = %bb4, %bb3
unreachable
bb6: ; preds = %bb
call void @llvm.amdgcn.kill(i1 false)
unreachable
bb7: ; preds = %bb4
ret void
}
define amdgpu_ps void @if_after_kill_block(float %arg, float %arg1, float %arg2, float %arg3) #0 {
; SI-LABEL: if_after_kill_block:
; SI: ; %bb.0: ; %bb
; SI-NEXT: s_wqm_b64 exec, exec
; SI-NEXT: s_mov_b32 s0, 0
; SI-NEXT: v_cmp_nle_f32_e32 vcc, 0, v1
; SI-NEXT: s_and_saveexec_b64 s[2:3], vcc
; SI-NEXT: s_xor_b64 s[2:3], exec, s[2:3]
; SI-NEXT: s_cbranch_execz BB13_2
; SI-NEXT: ; %bb.1: ; %bb3
; SI-NEXT: v_cmpx_gt_f32_e32 vcc, 0, v0
; SI-NEXT: BB13_2: ; %bb4
; SI-NEXT: s_or_b64 exec, exec, s[2:3]
; SI-NEXT: s_cbranch_execz BB13_6
; SI-NEXT: ; %bb.3: ; %bb4
; SI-NEXT: s_mov_b32 s1, s0
; SI-NEXT: s_mov_b32 s2, s0
; SI-NEXT: s_mov_b32 s3, s0
; SI-NEXT: s_mov_b32 s4, s0
; SI-NEXT: s_mov_b32 s5, s0
; SI-NEXT: s_mov_b32 s6, s0
; SI-NEXT: s_mov_b32 s7, s0
; SI-NEXT: image_sample_c v0, v[2:3], s[0:7], s[0:3] dmask:0x10
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_cmp_neq_f32_e32 vcc, 0, v0
; SI-NEXT: s_and_saveexec_b64 s[0:1], vcc
; SI-NEXT: s_cbranch_execz BB13_5
; SI-NEXT: ; %bb.4: ; %bb8
; SI-NEXT: s_mov_b32 s3, 0xf000
; SI-NEXT: s_mov_b32 s2, -1
; SI-NEXT: v_mov_b32_e32 v0, 9
; SI-NEXT: buffer_store_dword v0, off, s[0:3], 0
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: BB13_5: ; %UnifiedReturnBlock
; SI-NEXT: s_endpgm
; SI-NEXT: BB13_6:
; SI-NEXT: exp null off, off, off, off done vm
; SI-NEXT: s_endpgm
;
; GFX10-LABEL: if_after_kill_block:
; GFX10: ; %bb.0: ; %bb
; GFX10-NEXT: s_wqm_b64 exec, exec
; GFX10-NEXT: v_cmp_nle_f32_e32 vcc, 0, v1
; GFX10-NEXT: s_mov_b32 s0, 0
; GFX10-NEXT: s_and_saveexec_b64 s[2:3], vcc
; GFX10-NEXT: s_xor_b64 s[2:3], exec, s[2:3]
; GFX10-NEXT: s_cbranch_execz BB13_2
; GFX10-NEXT: ; %bb.1: ; %bb3
; GFX10-NEXT: s_waitcnt_depctr 0xfffe
; GFX10-NEXT: v_cmpx_gt_f32_e32 0, v0
; GFX10-NEXT: BB13_2: ; %bb4
; GFX10-NEXT: s_or_b64 exec, exec, s[2:3]
; GFX10-NEXT: s_cbranch_execz BB13_6
; GFX10-NEXT: ; %bb.3: ; %bb4
; GFX10-NEXT: s_mov_b32 s1, s0
; GFX10-NEXT: s_mov_b32 s2, s0
; GFX10-NEXT: s_mov_b32 s3, s0
; GFX10-NEXT: s_mov_b32 s4, s0
; GFX10-NEXT: s_mov_b32 s5, s0
; GFX10-NEXT: s_mov_b32 s6, s0
; GFX10-NEXT: s_mov_b32 s7, s0
; GFX10-NEXT: image_sample_c v0, v[2:3], s[0:7], s[0:3] dmask:0x10 dim:SQ_RSRC_IMG_1D
; GFX10-NEXT: s_waitcnt vmcnt(0)
; GFX10-NEXT: v_cmp_neq_f32_e32 vcc, 0, v0
; GFX10-NEXT: s_and_saveexec_b64 s[0:1], vcc
; GFX10-NEXT: s_cbranch_execz BB13_5
; GFX10-NEXT: ; %bb.4: ; %bb8
; GFX10-NEXT: v_mov_b32_e32 v0, 9
; GFX10-NEXT: global_store_dword v[0:1], v0, off
; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-NEXT: BB13_5: ; %UnifiedReturnBlock
; GFX10-NEXT: s_endpgm
; GFX10-NEXT: BB13_6:
; GFX10-NEXT: exp null off, off, off, off done vm
; GFX10-NEXT: s_endpgm
bb:
%tmp = fcmp ult float %arg1, 0.000000e+00
br i1 %tmp, label %bb3, label %bb4
bb3: ; preds = %bb
%cmp.arg = fcmp olt float %arg, 0.0
call void @llvm.amdgcn.kill(i1 %cmp.arg)
br label %bb4
bb4: ; preds = %bb3, %bb
%tmp5 = call <4 x float> @llvm.amdgcn.image.sample.c.1d.v4f32.f32(i32 16, float %arg2, float %arg3, <8 x i32> undef, <4 x i32> undef, i1 0, i32 0, i32 0)
%tmp6 = extractelement <4 x float> %tmp5, i32 0
%tmp7 = fcmp une float %tmp6, 0.000000e+00
br i1 %tmp7, label %bb8, label %bb9
bb8: ; preds = %bb9, %bb4
store volatile i32 9, i32 addrspace(1)* undef
ret void
bb9: ; preds = %bb4
ret void
}
define amdgpu_ps void @cbranch_kill(i32 inreg %0, <2 x float> %1) {
; SI-LABEL: cbranch_kill:
; SI: ; %bb.0: ; %.entry
; SI-NEXT: s_mov_b32 m0, s0
; SI-NEXT: s_mov_b32 s4, 0
; SI-NEXT: v_interp_p1_f32 v2, v0, attr1.x
; SI-NEXT: v_mov_b32_e32 v3, v2
; SI-NEXT: v_mov_b32_e32 v4, v2
; SI-NEXT: s_mov_b32 s5, s4
; SI-NEXT: s_mov_b32 s6, s4
; SI-NEXT: s_mov_b32 s7, s4
; SI-NEXT: s_mov_b32 s8, s4
; SI-NEXT: s_mov_b32 s9, s4
; SI-NEXT: s_mov_b32 s10, s4
; SI-NEXT: s_mov_b32 s11, s4
; SI-NEXT: image_sample_lz v2, v[2:4], s[4:11], s[0:3] dmask:0x1 da
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_cmp_ge_f32_e32 vcc, 0, v2
; SI-NEXT: s_and_saveexec_b64 s[2:3], vcc
; SI-NEXT: s_xor_b64 s[2:3], exec, s[2:3]
; SI-NEXT: s_cbranch_execz BB14_2
; SI-NEXT: ; %bb.1: ; %kill
; SI-NEXT: s_mov_b64 exec, 0
; SI-NEXT: BB14_2: ; %Flow
; SI-NEXT: s_or_saveexec_b64 s[2:3], s[2:3]
; SI-NEXT: ; implicit-def: $vgpr3
; SI-NEXT: ; implicit-def: $vgpr4
; SI-NEXT: ; implicit-def: $vgpr5
; SI-NEXT: ; implicit-def: $vgpr6
; SI-NEXT: s_xor_b64 exec, exec, s[2:3]
; SI-NEXT: ; %bb.3: ; %live
; SI-NEXT: s_mov_b32 m0, s0
; SI-NEXT: v_interp_p1_f32 v4, v0, attr0.x
; SI-NEXT: v_interp_p1_f32 v0, v0, attr0.y
; SI-NEXT: v_mul_f32_e32 v3, v4, v2
; SI-NEXT: v_interp_p2_f32 v4, v1, attr0.x
; SI-NEXT: v_mul_f32_e32 v5, v0, v2
; SI-NEXT: v_interp_p2_f32 v0, v1, attr0.y
; SI-NEXT: v_mul_f32_e32 v4, v4, v2
; SI-NEXT: v_mul_f32_e32 v6, v0, v2
; SI-NEXT: ; %bb.4: ; %export
; SI-NEXT: s_or_b64 exec, exec, s[2:3]
; SI-NEXT: s_cbranch_execz BB14_6
; SI-NEXT: ; %bb.5: ; %export
; SI-NEXT: v_cvt_pkrtz_f16_f32_e32 v0, v3, v4
; SI-NEXT: v_cvt_pkrtz_f16_f32_e32 v1, v5, v6
; SI-NEXT: exp mrt0 v0, v0, v1, v1 done compr vm
; SI-NEXT: s_endpgm
; SI-NEXT: BB14_6:
; SI-NEXT: exp null off, off, off, off done vm
; SI-NEXT: s_endpgm
;
; GFX10-LABEL: cbranch_kill:
; GFX10: ; %bb.0: ; %.entry
; GFX10-NEXT: s_mov_b32 m0, s0
; GFX10-NEXT: s_mov_b32 s4, 0
; GFX10-NEXT: v_interp_p1_f32_e32 v2, v0, attr1.x
; GFX10-NEXT: s_mov_b32 s5, s4
; GFX10-NEXT: s_mov_b32 s6, s4
; GFX10-NEXT: s_mov_b32 s7, s4
; GFX10-NEXT: s_mov_b32 s8, s4
; GFX10-NEXT: s_mov_b32 s9, s4
; GFX10-NEXT: s_mov_b32 s10, s4
; GFX10-NEXT: s_mov_b32 s11, s4
; GFX10-NEXT: image_sample_lz v2, [v2, v2, v2], s[4:11], s[0:3] dmask:0x1 dim:SQ_RSRC_IMG_2D_ARRAY
; GFX10-NEXT: s_waitcnt vmcnt(0)
; GFX10-NEXT: v_cmp_ge_f32_e32 vcc, 0, v2
; GFX10-NEXT: s_and_saveexec_b64 s[2:3], vcc
; GFX10-NEXT: s_xor_b64 s[2:3], exec, s[2:3]
; GFX10-NEXT: s_cbranch_execz BB14_2
; GFX10-NEXT: ; %bb.1: ; %kill
; GFX10-NEXT: s_mov_b64 exec, 0
; GFX10-NEXT: BB14_2: ; %Flow
; GFX10-NEXT: s_or_saveexec_b64 s[2:3], s[2:3]
; GFX10-NEXT: ; implicit-def: $vgpr3
; GFX10-NEXT: ; implicit-def: $vgpr5
; GFX10-NEXT: ; implicit-def: $vgpr4
; GFX10-NEXT: ; implicit-def: $vgpr6
; GFX10-NEXT: s_xor_b64 exec, exec, s[2:3]
; GFX10-NEXT: ; %bb.3: ; %live
; GFX10-NEXT: s_mov_b32 m0, s0
; GFX10-NEXT: v_interp_p1_f32_e32 v3, v0, attr0.x
; GFX10-NEXT: v_interp_p1_f32_e32 v0, v0, attr0.y
; GFX10-NEXT: v_mov_b32_e32 v7, v3
; GFX10-NEXT: v_mov_b32_e32 v11, v0
; GFX10-NEXT: v_mul_f32_e32 v3, v3, v2
; GFX10-NEXT: v_mul_f32_e32 v4, v0, v2
; GFX10-NEXT: v_interp_p2_f32_e32 v7, v1, attr0.x
; GFX10-NEXT: v_interp_p2_f32_e32 v11, v1, attr0.y
; GFX10-NEXT: v_mul_f32_e32 v5, v7, v2
; GFX10-NEXT: v_mul_f32_e32 v6, v11, v2
; GFX10-NEXT: ; %bb.4: ; %export
; GFX10-NEXT: s_or_b64 exec, exec, s[2:3]
; GFX10-NEXT: s_cbranch_execz BB14_6
; GFX10-NEXT: ; %bb.5: ; %export
; GFX10-NEXT: v_cvt_pkrtz_f16_f32_e32 v0, v3, v5
; GFX10-NEXT: v_cvt_pkrtz_f16_f32_e32 v1, v4, v6
; GFX10-NEXT: exp mrt0 v0, v0, v1, v1 done compr vm
; GFX10-NEXT: s_endpgm
; GFX10-NEXT: BB14_6:
; GFX10-NEXT: exp null off, off, off, off done vm
; GFX10-NEXT: s_endpgm
.entry:
%val0 = extractelement <2 x float> %1, i32 0
%val1 = extractelement <2 x float> %1, i32 1
%p0 = call float @llvm.amdgcn.interp.p1(float %val0, i32 immarg 0, i32 immarg 1, i32 %0) #2
%sample = call float @llvm.amdgcn.image.sample.l.2darray.f32.f32(i32 1, float %p0, float %p0, float %p0, float 0.000000e+00, <8 x i32> undef, <4 x i32> undef, i1 false, i32 0, i32 0)
%cond0 = fcmp ugt float %sample, 0.000000e+00
br i1 %cond0, label %live, label %kill
kill:
call void @llvm.amdgcn.kill(i1 false)
br label %export
live:
%i0 = call float @llvm.amdgcn.interp.p1(float %val0, i32 immarg 0, i32 immarg 0, i32 %0) #2
%i1 = call float @llvm.amdgcn.interp.p2(float %i0, float %val1, i32 immarg 0, i32 immarg 0, i32 %0) #2
%i2 = call float @llvm.amdgcn.interp.p1(float %val0, i32 immarg 1, i32 immarg 0, i32 %0) #2
%i3 = call float @llvm.amdgcn.interp.p2(float %i2, float %val1, i32 immarg 1, i32 immarg 0, i32 %0) #2
%scale.i0 = fmul reassoc nnan nsz arcp contract float %i0, %sample
%scale.i1 = fmul reassoc nnan nsz arcp contract float %i1, %sample
%scale.i2 = fmul reassoc nnan nsz arcp contract float %i2, %sample
%scale.i3 = fmul reassoc nnan nsz arcp contract float %i3, %sample
br label %export
export:
%proxy.0.0 = phi float [ undef, %kill ], [ %scale.i0, %live ]
%proxy.0.1 = phi float [ undef, %kill ], [ %scale.i1, %live ]
%proxy.0.2 = phi float [ undef, %kill ], [ %scale.i2, %live ]
%proxy.0.3 = phi float [ undef, %kill ], [ %scale.i3, %live ]
%out.0 = call <2 x half> @llvm.amdgcn.cvt.pkrtz(float %proxy.0.0, float %proxy.0.1) #2
%out.1 = call <2 x half> @llvm.amdgcn.cvt.pkrtz(float %proxy.0.2, float %proxy.0.3) #2
call void @llvm.amdgcn.exp.compr.v2f16(i32 immarg 0, i32 immarg 15, <2 x half> %out.0, <2 x half> %out.1, i1 immarg true, i1 immarg true) #3
ret void
}
define amdgpu_ps void @complex_loop(i32 inreg %cmpa, i32 %cmpb, i32 %cmpc) {
; SI-LABEL: complex_loop:
; SI: ; %bb.0: ; %.entry
; SI-NEXT: s_cmp_lt_i32 s0, 1
; SI-NEXT: v_mov_b32_e32 v2, -1
; SI-NEXT: s_cbranch_scc1 BB15_6
; SI-NEXT: ; %bb.1: ; %.lr.ph
; SI-NEXT: s_mov_b32 s2, 0
; SI-NEXT: s_mov_b64 s[0:1], 0
; SI-NEXT: s_branch BB15_4
; SI-NEXT: BB15_2: ; %kill
; SI-NEXT: ; in Loop: Header=BB15_4 Depth=1
; SI-NEXT: s_mov_b64 exec, 0
; SI-NEXT: BB15_3: ; %latch
; SI-NEXT: ; in Loop: Header=BB15_4 Depth=1
; SI-NEXT: s_or_b64 exec, exec, s[4:5]
; SI-NEXT: s_add_i32 s2, s2, 1
; SI-NEXT: v_cmp_ge_i32_e32 vcc, s2, v1
; SI-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
; SI-NEXT: v_mov_b32_e32 v2, s2
; SI-NEXT: s_andn2_b64 exec, exec, s[0:1]
; SI-NEXT: s_cbranch_execz BB15_5
; SI-NEXT: BB15_4: ; %hdr
; SI-NEXT: ; =>This Inner Loop Header: Depth=1
; SI-NEXT: v_cmp_gt_u32_e32 vcc, s2, v0
; SI-NEXT: s_and_saveexec_b64 s[4:5], vcc
; SI-NEXT: s_xor_b64 s[4:5], exec, s[4:5]
; SI-NEXT: s_cbranch_execnz BB15_2
; SI-NEXT: s_branch BB15_3
; SI-NEXT: BB15_5: ; %Flow
; SI-NEXT: s_or_b64 exec, exec, s[0:1]
; SI-NEXT: BB15_6: ; %._crit_edge
; SI-NEXT: exp mrt0 v2, v2, v0, v0 done compr vm
; SI-NEXT: s_endpgm
;
; GFX10-LABEL: complex_loop:
; GFX10: ; %bb.0: ; %.entry
; GFX10-NEXT: v_mov_b32_e32 v2, -1
; GFX10-NEXT: s_cmp_lt_i32 s0, 1
; GFX10-NEXT: s_cbranch_scc1 BB15_6
; GFX10-NEXT: ; %bb.1: ; %.lr.ph
; GFX10-NEXT: s_mov_b32 s2, 0
; GFX10-NEXT: s_mov_b64 s[0:1], 0
; GFX10-NEXT: s_branch BB15_4
; GFX10-NEXT: BB15_2: ; %kill
; GFX10-NEXT: ; in Loop: Header=BB15_4 Depth=1
; GFX10-NEXT: s_mov_b64 exec, 0
; GFX10-NEXT: BB15_3: ; %latch
; GFX10-NEXT: ; in Loop: Header=BB15_4 Depth=1
; GFX10-NEXT: s_or_b64 exec, exec, s[4:5]
; GFX10-NEXT: s_add_i32 s2, s2, 1
; GFX10-NEXT: v_cmp_ge_i32_e32 vcc, s2, v1
; GFX10-NEXT: v_mov_b32_e32 v2, s2
; GFX10-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
; GFX10-NEXT: s_andn2_b64 exec, exec, s[0:1]
; GFX10-NEXT: s_cbranch_execz BB15_5
; GFX10-NEXT: BB15_4: ; %hdr
; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX10-NEXT: v_cmp_gt_u32_e32 vcc, s2, v0
; GFX10-NEXT: s_and_saveexec_b64 s[4:5], vcc
; GFX10-NEXT: s_xor_b64 s[4:5], exec, s[4:5]
; GFX10-NEXT: s_cbranch_execnz BB15_2
; GFX10-NEXT: s_branch BB15_3
; GFX10-NEXT: BB15_5: ; %Flow
; GFX10-NEXT: s_or_b64 exec, exec, s[0:1]
; GFX10-NEXT: BB15_6: ; %._crit_edge
; GFX10-NEXT: exp mrt0 v2, v2, v0, v0 done compr vm
; GFX10-NEXT: s_endpgm
.entry:
%flaga = icmp sgt i32 %cmpa, 0
br i1 %flaga, label %.lr.ph, label %._crit_edge
.lr.ph:
br label %hdr
hdr:
%ctr = phi i32 [ 0, %.lr.ph ], [ %ctr.next, %latch ]
%flagb = icmp ugt i32 %ctr, %cmpb
br i1 %flagb, label %kill, label %latch
kill:
call void @llvm.amdgcn.kill(i1 false)
br label %latch
latch:
%ctr.next = add nuw nsw i32 %ctr, 1
%flagc = icmp slt i32 %ctr.next, %cmpc
br i1 %flagc, label %hdr, label %._crit_edge
._crit_edge:
%tmp = phi i32 [ -1, %.entry ], [ %ctr.next, %latch ]
%out = bitcast i32 %tmp to <2 x half>
call void @llvm.amdgcn.exp.compr.v2f16(i32 immarg 0, i32 immarg 15, <2 x half> %out, <2 x half> undef, i1 immarg true, i1 immarg true)
ret void
}
define void @skip_mode_switch(i32 %arg) {
; SI-LABEL: skip_mode_switch:
; SI: ; %bb.0: ; %entry
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
; SI-NEXT: s_and_saveexec_b64 s[4:5], vcc
; SI-NEXT: s_cbranch_execz BB16_2
; SI-NEXT: ; %bb.1: ; %bb.0
; SI-NEXT: s_setreg_imm32_b32 hwreg(HW_REG_MODE, 0, 2), 3
; SI-NEXT: BB16_2: ; %bb.1
; SI-NEXT: s_or_b64 exec, exec, s[4:5]
; SI-NEXT: s_setpc_b64 s[30:31]
;
; GFX10-LABEL: skip_mode_switch:
; GFX10: ; %bb.0: ; %entry
; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
; GFX10-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
; GFX10-NEXT: s_and_saveexec_b64 s[4:5], vcc
; GFX10-NEXT: s_cbranch_execz BB16_2
; GFX10-NEXT: ; %bb.1: ; %bb.0
; GFX10-NEXT: s_setreg_imm32_b32 hwreg(HW_REG_MODE, 0, 2), 3
; GFX10-NEXT: BB16_2: ; %bb.1
; GFX10-NEXT: s_or_b64 exec, exec, s[4:5]
; GFX10-NEXT: s_setpc_b64 s[30:31]
entry:
%cmp = icmp eq i32 %arg, 0
br i1 %cmp, label %bb.0, label %bb.1
bb.0:
call void @llvm.amdgcn.s.setreg(i32 2049, i32 3)
br label %bb.1
bb.1:
ret void
}
declare float @llvm.amdgcn.interp.p1(float, i32 immarg, i32 immarg, i32) #2
declare float @llvm.amdgcn.interp.p2(float, float, i32 immarg, i32 immarg, i32) #2
declare void @llvm.amdgcn.exp.compr.v2f16(i32 immarg, i32 immarg, <2 x half>, <2 x half>, i1 immarg, i1 immarg) #3
declare <2 x half> @llvm.amdgcn.cvt.pkrtz(float, float) #2
declare float @llvm.amdgcn.image.sample.l.2darray.f32.f32(i32 immarg, float, float, float, float, <8 x i32>, <4 x i32>, i1 immarg, i32 immarg, i32 immarg) #1
declare <4 x float> @llvm.amdgcn.image.sample.c.1d.v4f32.f32(i32, float, float, <8 x i32>, <4 x i32>, i1, i32, i32) #1
declare void @llvm.amdgcn.kill(i1) #0
declare void @llvm.amdgcn.s.setreg(i32 immarg, i32)
attributes #0 = { nounwind }
attributes #1 = { nounwind readonly }
attributes #2 = { nounwind readnone speculatable }
attributes #3 = { inaccessiblememonly nounwind writeonly }