Follow-up as discussed when using VPInstruction::ResumePhi for all resume values (#112147). This patch explicitly adds incoming values for each predecessor in VPlan. This simplifies codegen and allows transformations adjusting the predecessors of blocks with NFC modulo incoming block order in phis.
132 lines
7.2 KiB
LLVM
132 lines
7.2 KiB
LLVM
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4
|
|
; RUN: opt -mtriple=x86_64 -mattr=-avx,-avx2,-avx512f,+sse,-sse2,-sse3,-sse4.2 -passes=loop-vectorize -S %s | FileCheck --check-prefix=NOVEC %s
|
|
; RUN: opt -mtriple=x86_64 -mattr=-avx,-avx2,-avx512f,+sse,-sse2,-sse3,-sse4.2 -passes=loop-vectorize -force-vector-width=4 -S %s | FileCheck --check-prefix=VEC %s
|
|
|
|
@h = global i64 0
|
|
|
|
define void @test(ptr %p) {
|
|
; NOVEC-LABEL: define void @test(
|
|
; NOVEC-SAME: ptr [[P:%.*]]) #[[ATTR0:[0-9]+]] {
|
|
; NOVEC-NEXT: entry:
|
|
; NOVEC-NEXT: br label [[FOR_BODY:%.*]]
|
|
; NOVEC: for.body:
|
|
; NOVEC-NEXT: [[IDX_EXT_MERGE:%.*]] = phi i64 [ 1, [[ENTRY:%.*]] ], [ [[IDX:%.*]], [[FOR_BODY]] ]
|
|
; NOVEC-NEXT: [[INC_MERGE:%.*]] = phi i16 [ 1, [[ENTRY]] ], [ [[INC:%.*]], [[FOR_BODY]] ]
|
|
; NOVEC-NEXT: [[IDX_MERGE:%.*]] = phi i64 [ 0, [[ENTRY]] ], [ [[IDX_EXT_MERGE]], [[FOR_BODY]] ]
|
|
; NOVEC-NEXT: [[ADD:%.*]] = shl i64 [[IDX_MERGE]], 1
|
|
; NOVEC-NEXT: [[ARRAYIDX:%.*]] = getelementptr i64, ptr [[P]], i64 [[ADD]]
|
|
; NOVEC-NEXT: store i64 0, ptr [[ARRAYIDX]], align 8
|
|
; NOVEC-NEXT: [[INC]] = add i16 [[INC_MERGE]], 1
|
|
; NOVEC-NEXT: [[IDX]] = zext i16 [[INC]] to i64
|
|
; NOVEC-NEXT: [[GEP:%.*]] = getelementptr i64, ptr [[P]], i64 [[IDX]]
|
|
; NOVEC-NEXT: [[CMP:%.*]] = icmp ugt ptr [[GEP]], @h
|
|
; NOVEC-NEXT: br i1 [[CMP]], label [[EXIT:%.*]], label [[FOR_BODY]]
|
|
; NOVEC: exit:
|
|
; NOVEC-NEXT: ret void
|
|
;
|
|
; VEC-LABEL: define void @test(
|
|
; VEC-SAME: ptr [[P:%.*]]) #[[ATTR0:[0-9]+]] {
|
|
; VEC-NEXT: entry:
|
|
; VEC-NEXT: [[P1:%.*]] = ptrtoint ptr [[P]] to i64
|
|
; VEC-NEXT: [[TMP0:%.*]] = add i64 [[P1]], 16
|
|
; VEC-NEXT: [[UMAX2:%.*]] = call i64 @llvm.umax.i64(i64 [[TMP0]], i64 add (i64 ptrtoint (ptr @h to i64), i64 1))
|
|
; VEC-NEXT: [[TMP1:%.*]] = add i64 [[UMAX2]], -9
|
|
; VEC-NEXT: [[TMP2:%.*]] = sub i64 [[TMP1]], [[P1]]
|
|
; VEC-NEXT: [[TMP3:%.*]] = lshr i64 [[TMP2]], 3
|
|
; VEC-NEXT: [[TMP4:%.*]] = add nuw nsw i64 [[TMP3]], 1
|
|
; VEC-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[TMP4]], 4
|
|
; VEC-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_SCEVCHECK:%.*]]
|
|
; VEC: vector.scevcheck:
|
|
; VEC-NEXT: [[TMP5:%.*]] = add i64 [[P1]], 16
|
|
; VEC-NEXT: [[UMAX:%.*]] = call i64 @llvm.umax.i64(i64 [[TMP5]], i64 add (i64 ptrtoint (ptr @h to i64), i64 1))
|
|
; VEC-NEXT: [[TMP6:%.*]] = add i64 [[UMAX]], -9
|
|
; VEC-NEXT: [[TMP7:%.*]] = sub i64 [[TMP6]], [[P1]]
|
|
; VEC-NEXT: [[TMP8:%.*]] = lshr i64 [[TMP7]], 3
|
|
; VEC-NEXT: [[TMP10:%.*]] = trunc i64 [[TMP8]] to i16
|
|
; VEC-NEXT: [[TMP11:%.*]] = add i16 2, [[TMP10]]
|
|
; VEC-NEXT: [[TMP12:%.*]] = icmp ult i16 [[TMP11]], 2
|
|
; VEC-NEXT: [[TMP13:%.*]] = icmp ugt i64 [[TMP8]], 65535
|
|
; VEC-NEXT: [[TMP14:%.*]] = or i1 [[TMP12]], [[TMP13]]
|
|
; VEC-NEXT: br i1 [[TMP14]], label [[SCALAR_PH]], label [[VECTOR_PH:%.*]]
|
|
; VEC: vector.ph:
|
|
; VEC-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[TMP4]], 4
|
|
; VEC-NEXT: [[N_VEC:%.*]] = sub i64 [[TMP4]], [[N_MOD_VF]]
|
|
; VEC-NEXT: [[DOTCAST:%.*]] = trunc i64 [[N_VEC]] to i16
|
|
; VEC-NEXT: [[IND_END:%.*]] = add i16 1, [[DOTCAST]]
|
|
; VEC-NEXT: br label [[VECTOR_BODY:%.*]]
|
|
; VEC: vector.body:
|
|
; VEC-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
|
|
; VEC-NEXT: [[VECTOR_RECUR:%.*]] = phi <4 x i64> [ <i64 poison, i64 poison, i64 poison, i64 1>, [[VECTOR_PH]] ], [ [[TMP28:%.*]], [[VECTOR_BODY]] ]
|
|
; VEC-NEXT: [[VEC_IND:%.*]] = phi <4 x i16> [ <i16 1, i16 2, i16 3, i16 4>, [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ]
|
|
; VEC-NEXT: [[TMP15:%.*]] = add i64 [[INDEX]], 0
|
|
; VEC-NEXT: [[TMP16:%.*]] = add i64 [[INDEX]], 1
|
|
; VEC-NEXT: [[TMP17:%.*]] = add i64 [[INDEX]], 2
|
|
; VEC-NEXT: [[TMP18:%.*]] = add i64 [[INDEX]], 3
|
|
; VEC-NEXT: [[TMP19:%.*]] = shl i64 [[TMP15]], 1
|
|
; VEC-NEXT: [[TMP20:%.*]] = shl i64 [[TMP16]], 1
|
|
; VEC-NEXT: [[TMP21:%.*]] = shl i64 [[TMP17]], 1
|
|
; VEC-NEXT: [[TMP22:%.*]] = shl i64 [[TMP18]], 1
|
|
; VEC-NEXT: [[TMP23:%.*]] = getelementptr i64, ptr [[P]], i64 [[TMP19]]
|
|
; VEC-NEXT: [[TMP24:%.*]] = getelementptr i64, ptr [[P]], i64 [[TMP20]]
|
|
; VEC-NEXT: [[TMP25:%.*]] = getelementptr i64, ptr [[P]], i64 [[TMP21]]
|
|
; VEC-NEXT: [[TMP26:%.*]] = getelementptr i64, ptr [[P]], i64 [[TMP22]]
|
|
; VEC-NEXT: store i64 0, ptr [[TMP23]], align 8
|
|
; VEC-NEXT: store i64 0, ptr [[TMP24]], align 8
|
|
; VEC-NEXT: store i64 0, ptr [[TMP25]], align 8
|
|
; VEC-NEXT: store i64 0, ptr [[TMP26]], align 8
|
|
; VEC-NEXT: [[TMP27:%.*]] = add <4 x i16> [[VEC_IND]], splat (i16 1)
|
|
; VEC-NEXT: [[TMP28]] = zext <4 x i16> [[TMP27]] to <4 x i64>
|
|
; VEC-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
|
|
; VEC-NEXT: [[VEC_IND_NEXT]] = add <4 x i16> [[VEC_IND]], splat (i16 4)
|
|
; VEC-NEXT: [[TMP30:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
|
|
; VEC-NEXT: br i1 [[TMP30]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
|
|
; VEC: middle.block:
|
|
; VEC-NEXT: [[VECTOR_RECUR_EXTRACT:%.*]] = extractelement <4 x i64> [[TMP28]], i32 3
|
|
; VEC-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[TMP4]], [[N_VEC]]
|
|
; VEC-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH]]
|
|
; VEC: scalar.ph:
|
|
; VEC-NEXT: [[SCALAR_RECUR_INIT:%.*]] = phi i64 [ [[VECTOR_RECUR_EXTRACT]], [[MIDDLE_BLOCK]] ], [ 1, [[ENTRY:%.*]] ], [ 1, [[VECTOR_SCEVCHECK]] ]
|
|
; VEC-NEXT: [[BC_RESUME_VAL:%.*]] = phi i16 [ [[IND_END]], [[MIDDLE_BLOCK]] ], [ 1, [[ENTRY]] ], [ 1, [[VECTOR_SCEVCHECK]] ]
|
|
; VEC-NEXT: [[BC_RESUME_VAL3:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY]] ], [ 0, [[VECTOR_SCEVCHECK]] ]
|
|
; VEC-NEXT: br label [[FOR_BODY:%.*]]
|
|
; VEC: for.body:
|
|
; VEC-NEXT: [[SCALAR_RECUR:%.*]] = phi i64 [ [[SCALAR_RECUR_INIT]], [[SCALAR_PH]] ], [ [[IDX:%.*]], [[FOR_BODY]] ]
|
|
; VEC-NEXT: [[INC_MERGE:%.*]] = phi i16 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[INC:%.*]], [[FOR_BODY]] ]
|
|
; VEC-NEXT: [[IDX_MERGE:%.*]] = phi i64 [ [[BC_RESUME_VAL3]], [[SCALAR_PH]] ], [ [[SCALAR_RECUR]], [[FOR_BODY]] ]
|
|
; VEC-NEXT: [[ADD:%.*]] = shl i64 [[IDX_MERGE]], 1
|
|
; VEC-NEXT: [[ARRAYIDX:%.*]] = getelementptr i64, ptr [[P]], i64 [[ADD]]
|
|
; VEC-NEXT: store i64 0, ptr [[ARRAYIDX]], align 8
|
|
; VEC-NEXT: [[INC]] = add i16 [[INC_MERGE]], 1
|
|
; VEC-NEXT: [[IDX]] = zext i16 [[INC]] to i64
|
|
; VEC-NEXT: [[GEP:%.*]] = getelementptr i64, ptr [[P]], i64 [[IDX]]
|
|
; VEC-NEXT: [[CMP:%.*]] = icmp ugt ptr [[GEP]], @h
|
|
; VEC-NEXT: br i1 [[CMP]], label [[EXIT]], label [[FOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]]
|
|
; VEC: exit:
|
|
; VEC-NEXT: ret void
|
|
;
|
|
entry:
|
|
br label %for.body
|
|
|
|
for.body:
|
|
%idx.ext.merge = phi i64 [ 1, %entry ], [ %idx, %for.body ]
|
|
%inc.merge = phi i16 [ 1, %entry ], [ %inc, %for.body ]
|
|
%idx.merge = phi i64 [ 0, %entry ], [ %idx.ext.merge, %for.body ]
|
|
%add = shl i64 %idx.merge, 1
|
|
%arrayidx = getelementptr i64, ptr %p, i64 %add
|
|
store i64 0, ptr %arrayidx
|
|
%inc = add i16 %inc.merge, 1
|
|
%idx = zext i16 %inc to i64
|
|
%gep = getelementptr i64, ptr %p, i64 %idx
|
|
%cmp = icmp ugt ptr %gep, @h
|
|
br i1 %cmp, label %exit, label %for.body
|
|
|
|
exit:
|
|
ret void
|
|
}
|
|
;.
|
|
; VEC: [[LOOP0]] = distinct !{[[LOOP0]], [[META1:![0-9]+]], [[META2:![0-9]+]]}
|
|
; VEC: [[META1]] = !{!"llvm.loop.isvectorized", i32 1}
|
|
; VEC: [[META2]] = !{!"llvm.loop.unroll.runtime.disable"}
|
|
; VEC: [[LOOP3]] = distinct !{[[LOOP3]], [[META1]]}
|
|
;.
|