Files
clang-p2996/llvm/test/CodeGen/AMDGPU/cf-loop-on-constant.ll
Tim Renouf 6eaad1e539 [AMDGPU] Fixed incorrect uniform branch condition
Summary:
I had a case where multiple nested uniform ifs resulted in code that did
v_cmp comparisons, combining the results with s_and_b64, s_or_b64 and
s_xor_b64 and using the resulting mask in s_cbranch_vccnz, without first
ensuring that bits for inactive lanes were clear.

There was already code for inserting an "s_and_b64 vcc, exec, vcc" to
clear bits for inactive lanes in the case that the branch is instruction
selected as s_cbranch_scc1 and is then changed to s_cbranch_vccnz in
SIFixSGPRCopies. I have added the same code into SILowerControlFlow for
the case that the branch is instruction selected as s_cbranch_vccnz.

This de-optimizes the code in some cases where the s_and is not needed,
because vcc is the result of a v_cmp, or multiple v_cmp instructions
combined by s_and/s_or. We should add a pass to re-optimize those cases.

Reviewers: arsenm, kzhuravl

Subscribers: wdng, yaxunl, t-tye, llvm-commits, dstuttard, timcorringham, nhaehnle

Differential Revision: https://reviews.llvm.org/D41292

llvm-svn: 322119
2018-01-09 21:34:43 +00:00

124 lines
3.6 KiB
LLVM

; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=GCN %s
; RUN: llc -march=amdgcn -verify-machineinstrs -O0 < %s
; GCN-LABEL: {{^}}test_loop:
; GCN: [[LABEL:BB[0-9+]_[0-9]+]]: ; %for.body{{$}}
; GCN: ds_read_b32
; GCN: ds_write_b32
; GCN: s_branch [[LABEL]]
; GCN: s_endpgm
define amdgpu_kernel void @test_loop(float addrspace(3)* %ptr, i32 %n) nounwind {
entry:
%cmp = icmp eq i32 %n, -1
br i1 %cmp, label %for.exit, label %for.body
for.exit:
ret void
for.body:
%indvar = phi i32 [ %inc, %for.body ], [ 0, %entry ]
%tmp = add i32 %indvar, 32
%arrayidx = getelementptr float, float addrspace(3)* %ptr, i32 %tmp
%vecload = load float, float addrspace(3)* %arrayidx, align 4
%add = fadd float %vecload, 1.0
store float %add, float addrspace(3)* %arrayidx, align 8
%inc = add i32 %indvar, 1
br label %for.body
}
; GCN-LABEL: @loop_const_true
; GCN: [[LABEL:BB[0-9+]_[0-9]+]]:
; GCN: ds_read_b32
; GCN: ds_write_b32
; GCN: s_branch [[LABEL]]
define amdgpu_kernel void @loop_const_true(float addrspace(3)* %ptr, i32 %n) nounwind {
entry:
br label %for.body
for.exit:
ret void
for.body:
%indvar = phi i32 [ %inc, %for.body ], [ 0, %entry ]
%tmp = add i32 %indvar, 32
%arrayidx = getelementptr float, float addrspace(3)* %ptr, i32 %tmp
%vecload = load float, float addrspace(3)* %arrayidx, align 4
%add = fadd float %vecload, 1.0
store float %add, float addrspace(3)* %arrayidx, align 8
%inc = add i32 %indvar, 1
br i1 true, label %for.body, label %for.exit
}
; GCN-LABEL: {{^}}loop_const_false:
; GCN-NOT: s_branch
; GCN: s_endpgm
define amdgpu_kernel void @loop_const_false(float addrspace(3)* %ptr, i32 %n) nounwind {
entry:
br label %for.body
for.exit:
ret void
; XXX - Should there be an S_ENDPGM?
for.body:
%indvar = phi i32 [ %inc, %for.body ], [ 0, %entry ]
%tmp = add i32 %indvar, 32
%arrayidx = getelementptr float, float addrspace(3)* %ptr, i32 %tmp
%vecload = load float, float addrspace(3)* %arrayidx, align 4
%add = fadd float %vecload, 1.0
store float %add, float addrspace(3)* %arrayidx, align 8
%inc = add i32 %indvar, 1
br i1 false, label %for.body, label %for.exit
}
; GCN-LABEL: {{^}}loop_const_undef:
; GCN-NOT: s_branch
; GCN: s_endpgm
define amdgpu_kernel void @loop_const_undef(float addrspace(3)* %ptr, i32 %n) nounwind {
entry:
br label %for.body
for.exit:
ret void
; XXX - Should there be an s_endpgm?
for.body:
%indvar = phi i32 [ %inc, %for.body ], [ 0, %entry ]
%tmp = add i32 %indvar, 32
%arrayidx = getelementptr float, float addrspace(3)* %ptr, i32 %tmp
%vecload = load float, float addrspace(3)* %arrayidx, align 4
%add = fadd float %vecload, 1.0
store float %add, float addrspace(3)* %arrayidx, align 8
%inc = add i32 %indvar, 1
br i1 undef, label %for.body, label %for.exit
}
; GCN-LABEL: {{^}}loop_arg_0:
; GCN: v_and_b32_e32 v{{[0-9]+}}, 1, v{{[0-9]+}}
; GCN: v_cmp_eq_u32{{[^,]*}}, 1,
; GCN: [[LOOPBB:BB[0-9]+_[0-9]+]]
; GCN: s_add_i32 s{{[0-9]+}}, s{{[0-9]+}}, 0x80
; GCN: s_add_i32 s{{[0-9]+}}, s{{[0-9]+}}, 4
; GCN: s_cbranch_vccnz [[LOOPBB]]
; GCN-NEXT: ; %bb.2
; GCN-NEXT: s_endpgm
define amdgpu_kernel void @loop_arg_0(float addrspace(3)* %ptr, i32 %n, i1 %cond) nounwind {
entry:
br label %for.body
for.exit:
ret void
for.body:
%indvar = phi i32 [ %inc, %for.body ], [ 0, %entry ]
%tmp = add i32 %indvar, 32
%arrayidx = getelementptr float, float addrspace(3)* %ptr, i32 %tmp
%vecload = load float, float addrspace(3)* %arrayidx, align 4
%add = fadd float %vecload, 1.0
store float %add, float addrspace(3)* %arrayidx, align 8
%inc = add i32 %indvar, 1
br i1 %cond, label %for.body, label %for.exit
}