Files
clang-p2996/llvm/test/CodeGen/AMDGPU/cf-loop-on-constant.ll
Fangrui Song 9e9907f1cf [AMDGPU,test] Change llc -march= to -mtriple= (#75982)
Similar to 806761a762.

For IR files without a target triple, -mtriple= specifies the full
target triple while -march= merely sets the architecture part of the
default target triple, leaving a target triple which may not make sense,
e.g. amdgpu-apple-darwin.

Therefore, -march= is error-prone and not recommended for tests without
a target triple. The issue has been benign as we recognize
$unknown-apple-darwin as ELF instead of rejecting it outrightly.

This patch changes AMDGPU tests to not rely on the default
OS/environment components. Tests that need fixes are not changed:

```
  LLVM :: CodeGen/AMDGPU/fabs.f64.ll
  LLVM :: CodeGen/AMDGPU/fabs.ll
  LLVM :: CodeGen/AMDGPU/floor.ll
  LLVM :: CodeGen/AMDGPU/fneg-fabs.f64.ll
  LLVM :: CodeGen/AMDGPU/fneg-fabs.ll
  LLVM :: CodeGen/AMDGPU/r600-infinite-loop-bug-while-reorganizing-vector.ll
  LLVM :: CodeGen/AMDGPU/schedule-if-2.ll
```
2024-01-16 21:54:58 -08:00

508 lines
20 KiB
LLVM

; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=GCN %s
; RUN: llc -mtriple=amdgcn -verify-machineinstrs -O0 < %s | FileCheck -check-prefix=GCN_DBG %s
define amdgpu_kernel void @test_loop(ptr addrspace(3) %ptr, i32 %n) nounwind {
; GCN-LABEL: test_loop:
; GCN: ; %bb.0: ; %entry
; GCN-NEXT: s_load_dword s2, s[0:1], 0xa
; GCN-NEXT: s_waitcnt lgkmcnt(0)
; GCN-NEXT: s_cmp_eq_u32 s2, -1
; GCN-NEXT: s_cbranch_scc1 .LBB0_3
; GCN-NEXT: ; %bb.1: ; %for.body.preheader
; GCN-NEXT: s_load_dword s0, s[0:1], 0x9
; GCN-NEXT: s_waitcnt lgkmcnt(0)
; GCN-NEXT: s_addk_i32 s0, 0x80
; GCN-NEXT: s_and_b64 vcc, exec, -1
; GCN-NEXT: s_mov_b32 m0, -1
; GCN-NEXT: .LBB0_2: ; %for.body
; GCN-NEXT: ; =>This Inner Loop Header: Depth=1
; GCN-NEXT: v_mov_b32_e32 v0, s0
; GCN-NEXT: ds_read_b32 v1, v0
; GCN-NEXT: s_waitcnt lgkmcnt(0)
; GCN-NEXT: v_add_f32_e32 v1, 1.0, v1
; GCN-NEXT: ds_write_b32 v0, v1
; GCN-NEXT: s_add_i32 s0, s0, 4
; GCN-NEXT: s_mov_b64 vcc, vcc
; GCN-NEXT: s_cbranch_vccnz .LBB0_2
; GCN-NEXT: .LBB0_3: ; %for.exit
; GCN-NEXT: s_endpgm
;
; GCN_DBG-LABEL: test_loop:
; GCN_DBG: ; %bb.0: ; %entry
; GCN_DBG-NEXT: s_mov_b32 s12, SCRATCH_RSRC_DWORD0
; GCN_DBG-NEXT: s_mov_b32 s13, SCRATCH_RSRC_DWORD1
; GCN_DBG-NEXT: s_mov_b32 s14, -1
; GCN_DBG-NEXT: s_mov_b32 s15, 0xe8f000
; GCN_DBG-NEXT: s_add_u32 s12, s12, s9
; GCN_DBG-NEXT: s_addc_u32 s13, s13, 0
; GCN_DBG-NEXT: ; implicit-def: $vgpr0 : SGPR spill to VGPR lane
; GCN_DBG-NEXT: s_load_dword s0, s[2:3], 0x9
; GCN_DBG-NEXT: s_waitcnt lgkmcnt(0)
; GCN_DBG-NEXT: v_writelane_b32 v0, s0, 0
; GCN_DBG-NEXT: s_load_dword s1, s[2:3], 0xa
; GCN_DBG-NEXT: s_mov_b32 s0, 0
; GCN_DBG-NEXT: s_mov_b32 s2, -1
; GCN_DBG-NEXT: s_waitcnt lgkmcnt(0)
; GCN_DBG-NEXT: s_cmp_lg_u32 s1, s2
; GCN_DBG-NEXT: v_writelane_b32 v0, s0, 1
; GCN_DBG-NEXT: s_mov_b64 s[4:5], exec
; GCN_DBG-NEXT: s_mov_b64 exec, -1
; GCN_DBG-NEXT: buffer_store_dword v0, off, s[12:15], 0 offset:4 ; 4-byte Folded Spill
; GCN_DBG-NEXT: s_mov_b64 exec, s[4:5]
; GCN_DBG-NEXT: s_cbranch_scc1 .LBB0_2
; GCN_DBG-NEXT: ; %bb.1: ; %for.exit
; GCN_DBG-NEXT: s_or_saveexec_b64 s[4:5], -1
; GCN_DBG-NEXT: s_waitcnt expcnt(0)
; GCN_DBG-NEXT: buffer_load_dword v0, off, s[12:15], 0 offset:4 ; 4-byte Folded Reload
; GCN_DBG-NEXT: s_mov_b64 exec, s[4:5]
; GCN_DBG-NEXT: ; kill: killed $vgpr0
; GCN_DBG-NEXT: s_endpgm
; GCN_DBG-NEXT: .LBB0_2: ; %for.body
; GCN_DBG-NEXT: ; =>This Inner Loop Header: Depth=1
; GCN_DBG-NEXT: s_or_saveexec_b64 s[4:5], -1
; GCN_DBG-NEXT: s_waitcnt expcnt(0)
; GCN_DBG-NEXT: buffer_load_dword v0, off, s[12:15], 0 offset:4 ; 4-byte Folded Reload
; GCN_DBG-NEXT: s_mov_b64 exec, s[4:5]
; GCN_DBG-NEXT: s_waitcnt vmcnt(0)
; GCN_DBG-NEXT: v_readlane_b32 s0, v0, 1
; GCN_DBG-NEXT: v_readlane_b32 s2, v0, 0
; GCN_DBG-NEXT: s_mov_b32 s1, 2
; GCN_DBG-NEXT: s_lshl_b32 s1, s0, s1
; GCN_DBG-NEXT: s_add_i32 s1, s1, s2
; GCN_DBG-NEXT: s_mov_b32 s2, 0x80
; GCN_DBG-NEXT: s_add_i32 s1, s1, s2
; GCN_DBG-NEXT: s_mov_b32 m0, -1
; GCN_DBG-NEXT: v_mov_b32_e32 v1, s1
; GCN_DBG-NEXT: ds_read_b32 v1, v1
; GCN_DBG-NEXT: s_mov_b32 s2, 1.0
; GCN_DBG-NEXT: s_waitcnt lgkmcnt(0)
; GCN_DBG-NEXT: v_add_f32_e64 v2, v1, s2
; GCN_DBG-NEXT: s_mov_b32 m0, -1
; GCN_DBG-NEXT: v_mov_b32_e32 v1, s1
; GCN_DBG-NEXT: ds_write_b32 v1, v2
; GCN_DBG-NEXT: s_mov_b32 s1, 1
; GCN_DBG-NEXT: s_add_i32 s0, s0, s1
; GCN_DBG-NEXT: s_mov_b64 s[2:3], -1
; GCN_DBG-NEXT: s_and_b64 vcc, exec, s[2:3]
; GCN_DBG-NEXT: v_writelane_b32 v0, s0, 1
; GCN_DBG-NEXT: s_or_saveexec_b64 s[4:5], -1
; GCN_DBG-NEXT: buffer_store_dword v0, off, s[12:15], 0 offset:4 ; 4-byte Folded Spill
; GCN_DBG-NEXT: s_mov_b64 exec, s[4:5]
; GCN_DBG-NEXT: s_cbranch_vccnz .LBB0_2
; GCN_DBG-NEXT: ; %bb.3: ; %DummyReturnBlock
; GCN_DBG-NEXT: s_or_saveexec_b64 s[4:5], -1
; GCN_DBG-NEXT: s_waitcnt expcnt(0)
; GCN_DBG-NEXT: buffer_load_dword v0, off, s[12:15], 0 offset:4 ; 4-byte Folded Reload
; GCN_DBG-NEXT: s_mov_b64 exec, s[4:5]
; GCN_DBG-NEXT: ; kill: killed $vgpr0
; GCN_DBG-NEXT: s_endpgm
entry:
%cmp = icmp eq i32 %n, -1
br i1 %cmp, label %for.exit, label %for.body
for.exit:
ret void
for.body:
%indvar = phi i32 [ %inc, %for.body ], [ 0, %entry ]
%tmp = add i32 %indvar, 32
%arrayidx = getelementptr float, ptr addrspace(3) %ptr, i32 %tmp
%vecload = load float, ptr addrspace(3) %arrayidx, align 4
%add = fadd float %vecload, 1.0
store float %add, ptr addrspace(3) %arrayidx, align 8
%inc = add i32 %indvar, 1
br label %for.body
}
define amdgpu_kernel void @loop_const_true(ptr addrspace(3) %ptr, i32 %n) nounwind {
; GCN-LABEL: loop_const_true:
; GCN: ; %bb.0: ; %entry
; GCN-NEXT: s_load_dword s0, s[0:1], 0x9
; GCN-NEXT: s_waitcnt lgkmcnt(0)
; GCN-NEXT: s_addk_i32 s0, 0x80
; GCN-NEXT: s_and_b64 vcc, exec, -1
; GCN-NEXT: s_mov_b32 m0, -1
; GCN-NEXT: .LBB1_1: ; %for.body
; GCN-NEXT: ; =>This Inner Loop Header: Depth=1
; GCN-NEXT: v_mov_b32_e32 v0, s0
; GCN-NEXT: ds_read_b32 v1, v0
; GCN-NEXT: s_waitcnt lgkmcnt(0)
; GCN-NEXT: v_add_f32_e32 v1, 1.0, v1
; GCN-NEXT: ds_write_b32 v0, v1
; GCN-NEXT: s_add_i32 s0, s0, 4
; GCN-NEXT: s_mov_b64 vcc, vcc
; GCN-NEXT: s_cbranch_vccnz .LBB1_1
; GCN-NEXT: ; %bb.2: ; %DummyReturnBlock
; GCN-NEXT: s_endpgm
;
; GCN_DBG-LABEL: loop_const_true:
; GCN_DBG: ; %bb.0: ; %entry
; GCN_DBG-NEXT: s_mov_b32 s12, SCRATCH_RSRC_DWORD0
; GCN_DBG-NEXT: s_mov_b32 s13, SCRATCH_RSRC_DWORD1
; GCN_DBG-NEXT: s_mov_b32 s14, -1
; GCN_DBG-NEXT: s_mov_b32 s15, 0xe8f000
; GCN_DBG-NEXT: s_add_u32 s12, s12, s9
; GCN_DBG-NEXT: s_addc_u32 s13, s13, 0
; GCN_DBG-NEXT: ; implicit-def: $vgpr0 : SGPR spill to VGPR lane
; GCN_DBG-NEXT: s_load_dword s0, s[2:3], 0x9
; GCN_DBG-NEXT: s_waitcnt lgkmcnt(0)
; GCN_DBG-NEXT: v_writelane_b32 v0, s0, 0
; GCN_DBG-NEXT: s_mov_b32 s0, 0
; GCN_DBG-NEXT: v_writelane_b32 v0, s0, 1
; GCN_DBG-NEXT: s_or_saveexec_b64 s[4:5], -1
; GCN_DBG-NEXT: buffer_store_dword v0, off, s[12:15], 0 offset:4 ; 4-byte Folded Spill
; GCN_DBG-NEXT: s_mov_b64 exec, s[4:5]
; GCN_DBG-NEXT: s_branch .LBB1_2
; GCN_DBG-NEXT: .LBB1_1: ; %for.exit
; GCN_DBG-NEXT: s_or_saveexec_b64 s[4:5], -1
; GCN_DBG-NEXT: s_waitcnt expcnt(0)
; GCN_DBG-NEXT: buffer_load_dword v0, off, s[12:15], 0 offset:4 ; 4-byte Folded Reload
; GCN_DBG-NEXT: s_mov_b64 exec, s[4:5]
; GCN_DBG-NEXT: ; kill: killed $vgpr0
; GCN_DBG-NEXT: s_endpgm
; GCN_DBG-NEXT: .LBB1_2: ; %for.body
; GCN_DBG-NEXT: ; =>This Inner Loop Header: Depth=1
; GCN_DBG-NEXT: s_or_saveexec_b64 s[4:5], -1
; GCN_DBG-NEXT: s_waitcnt expcnt(0)
; GCN_DBG-NEXT: buffer_load_dword v0, off, s[12:15], 0 offset:4 ; 4-byte Folded Reload
; GCN_DBG-NEXT: s_mov_b64 exec, s[4:5]
; GCN_DBG-NEXT: s_waitcnt vmcnt(0)
; GCN_DBG-NEXT: v_readlane_b32 s0, v0, 1
; GCN_DBG-NEXT: v_readlane_b32 s2, v0, 0
; GCN_DBG-NEXT: s_mov_b32 s1, 2
; GCN_DBG-NEXT: s_lshl_b32 s1, s0, s1
; GCN_DBG-NEXT: s_add_i32 s1, s1, s2
; GCN_DBG-NEXT: s_mov_b32 s2, 0x80
; GCN_DBG-NEXT: s_add_i32 s1, s1, s2
; GCN_DBG-NEXT: s_mov_b32 m0, -1
; GCN_DBG-NEXT: v_mov_b32_e32 v1, s1
; GCN_DBG-NEXT: ds_read_b32 v1, v1
; GCN_DBG-NEXT: s_mov_b32 s2, 1.0
; GCN_DBG-NEXT: s_waitcnt lgkmcnt(0)
; GCN_DBG-NEXT: v_add_f32_e64 v2, v1, s2
; GCN_DBG-NEXT: s_mov_b32 m0, -1
; GCN_DBG-NEXT: v_mov_b32_e32 v1, s1
; GCN_DBG-NEXT: ds_write_b32 v1, v2
; GCN_DBG-NEXT: s_mov_b32 s1, 1
; GCN_DBG-NEXT: s_add_i32 s0, s0, s1
; GCN_DBG-NEXT: s_mov_b64 s[2:3], 0
; GCN_DBG-NEXT: s_and_b64 vcc, exec, s[2:3]
; GCN_DBG-NEXT: v_writelane_b32 v0, s0, 1
; GCN_DBG-NEXT: s_or_saveexec_b64 s[4:5], -1
; GCN_DBG-NEXT: buffer_store_dword v0, off, s[12:15], 0 offset:4 ; 4-byte Folded Spill
; GCN_DBG-NEXT: s_mov_b64 exec, s[4:5]
; GCN_DBG-NEXT: s_cbranch_vccnz .LBB1_1
; GCN_DBG-NEXT: s_branch .LBB1_2
entry:
br label %for.body
for.exit:
ret void
for.body:
%indvar = phi i32 [ %inc, %for.body ], [ 0, %entry ]
%tmp = add i32 %indvar, 32
%arrayidx = getelementptr float, ptr addrspace(3) %ptr, i32 %tmp
%vecload = load float, ptr addrspace(3) %arrayidx, align 4
%add = fadd float %vecload, 1.0
store float %add, ptr addrspace(3) %arrayidx, align 8
%inc = add i32 %indvar, 1
br i1 true, label %for.body, label %for.exit
}
define amdgpu_kernel void @loop_const_false(ptr addrspace(3) %ptr, i32 %n) nounwind {
; GCN-LABEL: loop_const_false:
; GCN: ; %bb.0: ; %entry
; GCN-NEXT: s_load_dword s0, s[0:1], 0x9
; GCN-NEXT: s_waitcnt lgkmcnt(0)
; GCN-NEXT: v_mov_b32_e32 v0, s0
; GCN-NEXT: s_mov_b32 m0, -1
; GCN-NEXT: ds_read_b32 v1, v0 offset:128
; GCN-NEXT: s_waitcnt lgkmcnt(0)
; GCN-NEXT: v_add_f32_e32 v1, 1.0, v1
; GCN-NEXT: ds_write_b32 v0, v1 offset:128
; GCN-NEXT: s_endpgm
;
; GCN_DBG-LABEL: loop_const_false:
; GCN_DBG: ; %bb.0: ; %entry
; GCN_DBG-NEXT: s_mov_b32 s12, SCRATCH_RSRC_DWORD0
; GCN_DBG-NEXT: s_mov_b32 s13, SCRATCH_RSRC_DWORD1
; GCN_DBG-NEXT: s_mov_b32 s14, -1
; GCN_DBG-NEXT: s_mov_b32 s15, 0xe8f000
; GCN_DBG-NEXT: s_add_u32 s12, s12, s9
; GCN_DBG-NEXT: s_addc_u32 s13, s13, 0
; GCN_DBG-NEXT: ; implicit-def: $vgpr0 : SGPR spill to VGPR lane
; GCN_DBG-NEXT: s_load_dword s0, s[2:3], 0x9
; GCN_DBG-NEXT: s_waitcnt lgkmcnt(0)
; GCN_DBG-NEXT: v_writelane_b32 v0, s0, 0
; GCN_DBG-NEXT: s_mov_b32 s0, 0
; GCN_DBG-NEXT: v_writelane_b32 v0, s0, 1
; GCN_DBG-NEXT: s_or_saveexec_b64 s[4:5], -1
; GCN_DBG-NEXT: buffer_store_dword v0, off, s[12:15], 0 offset:4 ; 4-byte Folded Spill
; GCN_DBG-NEXT: s_mov_b64 exec, s[4:5]
; GCN_DBG-NEXT: s_branch .LBB2_2
; GCN_DBG-NEXT: .LBB2_1: ; %for.exit
; GCN_DBG-NEXT: s_or_saveexec_b64 s[4:5], -1
; GCN_DBG-NEXT: s_waitcnt expcnt(0)
; GCN_DBG-NEXT: buffer_load_dword v0, off, s[12:15], 0 offset:4 ; 4-byte Folded Reload
; GCN_DBG-NEXT: s_mov_b64 exec, s[4:5]
; GCN_DBG-NEXT: ; kill: killed $vgpr0
; GCN_DBG-NEXT: s_endpgm
; GCN_DBG-NEXT: .LBB2_2: ; %for.body
; GCN_DBG-NEXT: ; =>This Inner Loop Header: Depth=1
; GCN_DBG-NEXT: s_or_saveexec_b64 s[4:5], -1
; GCN_DBG-NEXT: s_waitcnt expcnt(0)
; GCN_DBG-NEXT: buffer_load_dword v0, off, s[12:15], 0 offset:4 ; 4-byte Folded Reload
; GCN_DBG-NEXT: s_mov_b64 exec, s[4:5]
; GCN_DBG-NEXT: s_waitcnt vmcnt(0)
; GCN_DBG-NEXT: v_readlane_b32 s0, v0, 1
; GCN_DBG-NEXT: v_readlane_b32 s2, v0, 0
; GCN_DBG-NEXT: s_mov_b32 s1, 2
; GCN_DBG-NEXT: s_lshl_b32 s1, s0, s1
; GCN_DBG-NEXT: s_add_i32 s1, s1, s2
; GCN_DBG-NEXT: s_mov_b32 s2, 0x80
; GCN_DBG-NEXT: s_add_i32 s1, s1, s2
; GCN_DBG-NEXT: s_mov_b32 m0, -1
; GCN_DBG-NEXT: v_mov_b32_e32 v1, s1
; GCN_DBG-NEXT: ds_read_b32 v1, v1
; GCN_DBG-NEXT: s_mov_b32 s2, 1.0
; GCN_DBG-NEXT: s_waitcnt lgkmcnt(0)
; GCN_DBG-NEXT: v_add_f32_e64 v2, v1, s2
; GCN_DBG-NEXT: s_mov_b32 m0, -1
; GCN_DBG-NEXT: v_mov_b32_e32 v1, s1
; GCN_DBG-NEXT: ds_write_b32 v1, v2
; GCN_DBG-NEXT: s_mov_b32 s1, 1
; GCN_DBG-NEXT: s_add_i32 s0, s0, s1
; GCN_DBG-NEXT: s_mov_b64 s[2:3], -1
; GCN_DBG-NEXT: s_and_b64 vcc, exec, s[2:3]
; GCN_DBG-NEXT: v_writelane_b32 v0, s0, 1
; GCN_DBG-NEXT: s_or_saveexec_b64 s[4:5], -1
; GCN_DBG-NEXT: buffer_store_dword v0, off, s[12:15], 0 offset:4 ; 4-byte Folded Spill
; GCN_DBG-NEXT: s_mov_b64 exec, s[4:5]
; GCN_DBG-NEXT: s_cbranch_vccnz .LBB2_1
; GCN_DBG-NEXT: s_branch .LBB2_2
entry:
br label %for.body
for.exit:
ret void
; XXX - Should there be an S_ENDPGM?
for.body:
%indvar = phi i32 [ %inc, %for.body ], [ 0, %entry ]
%tmp = add i32 %indvar, 32
%arrayidx = getelementptr float, ptr addrspace(3) %ptr, i32 %tmp
%vecload = load float, ptr addrspace(3) %arrayidx, align 4
%add = fadd float %vecload, 1.0
store float %add, ptr addrspace(3) %arrayidx, align 8
%inc = add i32 %indvar, 1
br i1 false, label %for.body, label %for.exit
}
define amdgpu_kernel void @loop_const_undef(ptr addrspace(3) %ptr, i32 %n) nounwind {
; GCN-LABEL: loop_const_undef:
; GCN: ; %bb.0: ; %entry
; GCN-NEXT: s_load_dword s0, s[0:1], 0x9
; GCN-NEXT: s_waitcnt lgkmcnt(0)
; GCN-NEXT: v_mov_b32_e32 v0, s0
; GCN-NEXT: s_mov_b32 m0, -1
; GCN-NEXT: ds_read_b32 v1, v0 offset:128
; GCN-NEXT: s_waitcnt lgkmcnt(0)
; GCN-NEXT: v_add_f32_e32 v1, 1.0, v1
; GCN-NEXT: ds_write_b32 v0, v1 offset:128
; GCN-NEXT: s_endpgm
;
; GCN_DBG-LABEL: loop_const_undef:
; GCN_DBG: ; %bb.0: ; %entry
; GCN_DBG-NEXT: s_mov_b32 s12, SCRATCH_RSRC_DWORD0
; GCN_DBG-NEXT: s_mov_b32 s13, SCRATCH_RSRC_DWORD1
; GCN_DBG-NEXT: s_mov_b32 s14, -1
; GCN_DBG-NEXT: s_mov_b32 s15, 0xe8f000
; GCN_DBG-NEXT: s_add_u32 s12, s12, s9
; GCN_DBG-NEXT: s_addc_u32 s13, s13, 0
; GCN_DBG-NEXT: ; implicit-def: $vgpr0 : SGPR spill to VGPR lane
; GCN_DBG-NEXT: s_load_dword s0, s[2:3], 0x9
; GCN_DBG-NEXT: s_waitcnt lgkmcnt(0)
; GCN_DBG-NEXT: v_writelane_b32 v0, s0, 0
; GCN_DBG-NEXT: s_mov_b32 s0, 0
; GCN_DBG-NEXT: v_writelane_b32 v0, s0, 1
; GCN_DBG-NEXT: s_or_saveexec_b64 s[4:5], -1
; GCN_DBG-NEXT: buffer_store_dword v0, off, s[12:15], 0 offset:4 ; 4-byte Folded Spill
; GCN_DBG-NEXT: s_mov_b64 exec, s[4:5]
; GCN_DBG-NEXT: s_branch .LBB3_2
; GCN_DBG-NEXT: .LBB3_1: ; %for.exit
; GCN_DBG-NEXT: s_or_saveexec_b64 s[4:5], -1
; GCN_DBG-NEXT: s_waitcnt expcnt(0)
; GCN_DBG-NEXT: buffer_load_dword v0, off, s[12:15], 0 offset:4 ; 4-byte Folded Reload
; GCN_DBG-NEXT: s_mov_b64 exec, s[4:5]
; GCN_DBG-NEXT: ; kill: killed $vgpr0
; GCN_DBG-NEXT: s_endpgm
; GCN_DBG-NEXT: .LBB3_2: ; %for.body
; GCN_DBG-NEXT: ; =>This Inner Loop Header: Depth=1
; GCN_DBG-NEXT: s_or_saveexec_b64 s[4:5], -1
; GCN_DBG-NEXT: s_waitcnt expcnt(0)
; GCN_DBG-NEXT: buffer_load_dword v0, off, s[12:15], 0 offset:4 ; 4-byte Folded Reload
; GCN_DBG-NEXT: s_mov_b64 exec, s[4:5]
; GCN_DBG-NEXT: s_waitcnt vmcnt(0)
; GCN_DBG-NEXT: v_readlane_b32 s0, v0, 1
; GCN_DBG-NEXT: v_readlane_b32 s2, v0, 0
; GCN_DBG-NEXT: s_mov_b32 s1, 2
; GCN_DBG-NEXT: s_lshl_b32 s1, s0, s1
; GCN_DBG-NEXT: s_add_i32 s1, s1, s2
; GCN_DBG-NEXT: s_mov_b32 s2, 0x80
; GCN_DBG-NEXT: s_add_i32 s1, s1, s2
; GCN_DBG-NEXT: s_mov_b32 m0, -1
; GCN_DBG-NEXT: v_mov_b32_e32 v1, s1
; GCN_DBG-NEXT: ds_read_b32 v1, v1
; GCN_DBG-NEXT: s_mov_b32 s2, 1.0
; GCN_DBG-NEXT: s_waitcnt lgkmcnt(0)
; GCN_DBG-NEXT: v_add_f32_e64 v2, v1, s2
; GCN_DBG-NEXT: s_mov_b32 m0, -1
; GCN_DBG-NEXT: v_mov_b32_e32 v1, s1
; GCN_DBG-NEXT: ds_write_b32 v1, v2
; GCN_DBG-NEXT: s_mov_b32 s1, 1
; GCN_DBG-NEXT: s_add_i32 s0, s0, s1
; GCN_DBG-NEXT: v_writelane_b32 v0, s0, 1
; GCN_DBG-NEXT: s_or_saveexec_b64 s[4:5], -1
; GCN_DBG-NEXT: buffer_store_dword v0, off, s[12:15], 0 offset:4 ; 4-byte Folded Spill
; GCN_DBG-NEXT: s_mov_b64 exec, s[4:5]
; GCN_DBG-NEXT: s_cbranch_scc1 .LBB3_1
; GCN_DBG-NEXT: s_branch .LBB3_2
entry:
br label %for.body
for.exit:
ret void
; XXX - Should there be an s_endpgm?
for.body:
%indvar = phi i32 [ %inc, %for.body ], [ 0, %entry ]
%tmp = add i32 %indvar, 32
%arrayidx = getelementptr float, ptr addrspace(3) %ptr, i32 %tmp
%vecload = load float, ptr addrspace(3) %arrayidx, align 4
%add = fadd float %vecload, 1.0
store float %add, ptr addrspace(3) %arrayidx, align 8
%inc = add i32 %indvar, 1
br i1 undef, label %for.body, label %for.exit
}
define amdgpu_kernel void @loop_arg_0(ptr addrspace(3) %ptr, i32 %n) nounwind {
; GCN-LABEL: loop_arg_0:
; GCN: ; %bb.0: ; %entry
; GCN-NEXT: v_mov_b32_e32 v0, 0
; GCN-NEXT: s_mov_b32 m0, -1
; GCN-NEXT: ds_read_u8 v0, v0
; GCN-NEXT: s_load_dword s4, s[0:1], 0x9
; GCN-NEXT: s_waitcnt lgkmcnt(0)
; GCN-NEXT: v_readfirstlane_b32 s0, v0
; GCN-NEXT: s_bitcmp1_b32 s0, 0
; GCN-NEXT: s_cselect_b64 s[0:1], -1, 0
; GCN-NEXT: s_xor_b64 s[2:3], s[0:1], -1
; GCN-NEXT: s_add_i32 s0, s4, 0x80
; GCN-NEXT: s_and_b64 vcc, exec, s[2:3]
; GCN-NEXT: .LBB4_1: ; %for.body
; GCN-NEXT: ; =>This Inner Loop Header: Depth=1
; GCN-NEXT: v_mov_b32_e32 v0, s0
; GCN-NEXT: ds_read_b32 v1, v0
; GCN-NEXT: s_waitcnt lgkmcnt(0)
; GCN-NEXT: v_add_f32_e32 v1, 1.0, v1
; GCN-NEXT: ds_write_b32 v0, v1
; GCN-NEXT: s_add_i32 s0, s0, 4
; GCN-NEXT: s_mov_b64 vcc, vcc
; GCN-NEXT: s_cbranch_vccz .LBB4_1
; GCN-NEXT: ; %bb.2: ; %for.exit
; GCN-NEXT: s_endpgm
;
; GCN_DBG-LABEL: loop_arg_0:
; GCN_DBG: ; %bb.0: ; %entry
; GCN_DBG-NEXT: s_mov_b32 s12, SCRATCH_RSRC_DWORD0
; GCN_DBG-NEXT: s_mov_b32 s13, SCRATCH_RSRC_DWORD1
; GCN_DBG-NEXT: s_mov_b32 s14, -1
; GCN_DBG-NEXT: s_mov_b32 s15, 0xe8f000
; GCN_DBG-NEXT: s_add_u32 s12, s12, s9
; GCN_DBG-NEXT: s_addc_u32 s13, s13, 0
; GCN_DBG-NEXT: ; implicit-def: $vgpr0 : SGPR spill to VGPR lane
; GCN_DBG-NEXT: s_load_dword s0, s[2:3], 0x9
; GCN_DBG-NEXT: s_waitcnt lgkmcnt(0)
; GCN_DBG-NEXT: v_writelane_b32 v0, s0, 0
; GCN_DBG-NEXT: v_mov_b32_e32 v1, 0
; GCN_DBG-NEXT: s_mov_b32 m0, -1
; GCN_DBG-NEXT: ds_read_u8 v1, v1
; GCN_DBG-NEXT: s_waitcnt lgkmcnt(0)
; GCN_DBG-NEXT: v_readfirstlane_b32 s0, v1
; GCN_DBG-NEXT: s_and_b32 s0, 1, s0
; GCN_DBG-NEXT: s_cmp_eq_u32 s0, 1
; GCN_DBG-NEXT: s_cselect_b64 s[0:1], -1, 0
; GCN_DBG-NEXT: s_mov_b64 s[2:3], -1
; GCN_DBG-NEXT: s_xor_b64 s[0:1], s[0:1], s[2:3]
; GCN_DBG-NEXT: v_writelane_b32 v0, s0, 1
; GCN_DBG-NEXT: v_writelane_b32 v0, s1, 2
; GCN_DBG-NEXT: s_mov_b32 s0, 0
; GCN_DBG-NEXT: v_writelane_b32 v0, s0, 3
; GCN_DBG-NEXT: s_or_saveexec_b64 s[6:7], -1
; GCN_DBG-NEXT: buffer_store_dword v0, off, s[12:15], 0 offset:4 ; 4-byte Folded Spill
; GCN_DBG-NEXT: s_mov_b64 exec, s[6:7]
; GCN_DBG-NEXT: s_branch .LBB4_2
; GCN_DBG-NEXT: .LBB4_1: ; %for.exit
; GCN_DBG-NEXT: s_or_saveexec_b64 s[6:7], -1
; GCN_DBG-NEXT: s_waitcnt expcnt(0)
; GCN_DBG-NEXT: buffer_load_dword v0, off, s[12:15], 0 offset:4 ; 4-byte Folded Reload
; GCN_DBG-NEXT: s_mov_b64 exec, s[6:7]
; GCN_DBG-NEXT: ; kill: killed $vgpr0
; GCN_DBG-NEXT: s_endpgm
; GCN_DBG-NEXT: .LBB4_2: ; %for.body
; GCN_DBG-NEXT: ; =>This Inner Loop Header: Depth=1
; GCN_DBG-NEXT: s_or_saveexec_b64 s[6:7], -1
; GCN_DBG-NEXT: s_waitcnt expcnt(0)
; GCN_DBG-NEXT: buffer_load_dword v0, off, s[12:15], 0 offset:4 ; 4-byte Folded Reload
; GCN_DBG-NEXT: s_mov_b64 exec, s[6:7]
; GCN_DBG-NEXT: s_waitcnt vmcnt(0)
; GCN_DBG-NEXT: v_readlane_b32 s0, v0, 3
; GCN_DBG-NEXT: v_readlane_b32 s2, v0, 1
; GCN_DBG-NEXT: v_readlane_b32 s3, v0, 2
; GCN_DBG-NEXT: v_readlane_b32 s4, v0, 0
; GCN_DBG-NEXT: s_mov_b32 s1, 2
; GCN_DBG-NEXT: s_lshl_b32 s1, s0, s1
; GCN_DBG-NEXT: s_add_i32 s1, s1, s4
; GCN_DBG-NEXT: s_mov_b32 s4, 0x80
; GCN_DBG-NEXT: s_add_i32 s1, s1, s4
; GCN_DBG-NEXT: s_mov_b32 m0, -1
; GCN_DBG-NEXT: v_mov_b32_e32 v1, s1
; GCN_DBG-NEXT: ds_read_b32 v1, v1
; GCN_DBG-NEXT: s_mov_b32 s4, 1.0
; GCN_DBG-NEXT: s_waitcnt lgkmcnt(0)
; GCN_DBG-NEXT: v_add_f32_e64 v2, v1, s4
; GCN_DBG-NEXT: s_mov_b32 m0, -1
; GCN_DBG-NEXT: v_mov_b32_e32 v1, s1
; GCN_DBG-NEXT: ds_write_b32 v1, v2
; GCN_DBG-NEXT: s_mov_b32 s1, 1
; GCN_DBG-NEXT: s_add_i32 s0, s0, s1
; GCN_DBG-NEXT: s_and_b64 vcc, exec, s[2:3]
; GCN_DBG-NEXT: v_writelane_b32 v0, s0, 3
; GCN_DBG-NEXT: s_or_saveexec_b64 s[6:7], -1
; GCN_DBG-NEXT: buffer_store_dword v0, off, s[12:15], 0 offset:4 ; 4-byte Folded Spill
; GCN_DBG-NEXT: s_mov_b64 exec, s[6:7]
; GCN_DBG-NEXT: s_cbranch_vccnz .LBB4_1
; GCN_DBG-NEXT: s_branch .LBB4_2
entry:
%cond = load volatile i1, ptr addrspace(3) null
br label %for.body
for.exit:
ret void
for.body:
%indvar = phi i32 [ %inc, %for.body ], [ 0, %entry ]
%tmp = add i32 %indvar, 32
%arrayidx = getelementptr float, ptr addrspace(3) %ptr, i32 %tmp
%vecload = load float, ptr addrspace(3) %arrayidx, align 4
%add = fadd float %vecload, 1.0
store float %add, ptr addrspace(3) %arrayidx, align 8
%inc = add i32 %indvar, 1
br i1 %cond, label %for.body, label %for.exit
}
!llvm.module.flags = !{!0}
!0 = !{i32 1, !"amdgpu_code_object_version", i32 500}