This was reverted because of a miscompilation. At closer inspection, the problem was actually visible in a changed llvm regression test too. This one-line follow up fix/recommit will splat the IV, which is what we are trying to avoid if unnecessary in general, if tail-folding is requested even if all users are scalar instructions after vectorisation. Because with tail-folding, the splat IV will be used by the predicate of the masked loads/stores instructions. The previous version omitted this, which caused the miscompilation. The original commit message was: If tail-folding of the scalar remainder loop is applied, the primary induction variable is splat to a vector and used by the masked load/store vector instructions, thus the IV does not remain scalar. Because we now mark that the IV does not remain scalar for these cases, we don't emit the vector IV if it is not used. Thus, the vectoriser produces less dead code. Thanks to Ayal Zaks for the direction how to fix this.
95 lines
4.8 KiB
LLVM
95 lines
4.8 KiB
LLVM
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
|
|
; RUN: opt -S -loop-vectorize -mtriple=powerpc64le-unknown-unknown \
|
|
; RUN: -force-target-max-vector-interleave=1 -mcpu=pwr9 < %s | FileCheck %s
|
|
define dso_local void @test(i32* %Arr, i32 signext %Len) {
|
|
; CHECK-LABEL: @test(
|
|
; CHECK-NEXT: entry:
|
|
; CHECK-NEXT: [[CMP1:%.*]] = icmp slt i32 0, [[LEN:%.*]]
|
|
; CHECK-NEXT: br i1 [[CMP1]], label [[FOR_BODY_LR_PH:%.*]], label [[FOR_COND_CLEANUP:%.*]]
|
|
; CHECK: for.body.lr.ph:
|
|
; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i32 [[LEN]], 4
|
|
; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
|
|
; CHECK: vector.ph:
|
|
; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i32 [[LEN]], 4
|
|
; CHECK-NEXT: [[N_VEC:%.*]] = sub i32 [[LEN]], [[N_MOD_VF]]
|
|
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
|
|
; CHECK: vector.body:
|
|
; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
|
|
; CHECK-NEXT: [[TMP0:%.*]] = add i32 [[INDEX]], 0
|
|
; CHECK-NEXT: [[TMP1:%.*]] = sext i32 [[TMP0]] to i64
|
|
; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds i32, i32* [[ARR:%.*]], i64 [[TMP1]]
|
|
; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds i32, i32* [[TMP2]], i32 0
|
|
; CHECK-NEXT: [[TMP4:%.*]] = bitcast i32* [[TMP3]] to <4 x i32>*
|
|
; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i32>, <4 x i32>* [[TMP4]], align 4
|
|
; CHECK-NEXT: [[TMP5:%.*]] = call <4 x i32> @llvm.bswap.v4i32(<4 x i32> [[WIDE_LOAD]])
|
|
; CHECK-NEXT: [[TMP6:%.*]] = sext i32 [[TMP0]] to i64
|
|
; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds i32, i32* [[ARR]], i64 [[TMP6]]
|
|
; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds i32, i32* [[TMP7]], i32 0
|
|
; CHECK-NEXT: [[TMP9:%.*]] = bitcast i32* [[TMP8]] to <4 x i32>*
|
|
; CHECK-NEXT: store <4 x i32> [[TMP5]], <4 x i32>* [[TMP9]], align 4
|
|
; CHECK-NEXT: [[INDEX_NEXT]] = add i32 [[INDEX]], 4
|
|
; CHECK-NEXT: [[TMP10:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]]
|
|
; CHECK-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop !0
|
|
; CHECK: middle.block:
|
|
; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i32 [[LEN]], [[N_VEC]]
|
|
; CHECK-NEXT: br i1 [[CMP_N]], label [[FOR_COND_FOR_COND_CLEANUP_CRIT_EDGE:%.*]], label [[SCALAR_PH]]
|
|
; CHECK: scalar.ph:
|
|
; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i32 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[FOR_BODY_LR_PH]] ]
|
|
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
|
|
; CHECK: for.cond.for.cond.cleanup_crit_edge:
|
|
; CHECK-NEXT: br label [[FOR_COND_CLEANUP]]
|
|
; CHECK: for.cond.cleanup:
|
|
; CHECK-NEXT: br label [[FOR_END:%.*]]
|
|
; CHECK: for.body:
|
|
; CHECK-NEXT: [[I_02:%.*]] = phi i32 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[INC:%.*]], [[FOR_INC:%.*]] ]
|
|
; CHECK-NEXT: [[IDXPROM:%.*]] = sext i32 [[I_02]] to i64
|
|
; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[ARR]], i64 [[IDXPROM]]
|
|
; CHECK-NEXT: [[TMP11:%.*]] = load i32, i32* [[ARRAYIDX]], align 4
|
|
; CHECK-NEXT: [[TMP12:%.*]] = call i32 @llvm.bswap.i32(i32 [[TMP11]])
|
|
; CHECK-NEXT: [[IDXPROM1:%.*]] = sext i32 [[I_02]] to i64
|
|
; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i32, i32* [[ARR]], i64 [[IDXPROM1]]
|
|
; CHECK-NEXT: store i32 [[TMP12]], i32* [[ARRAYIDX2]], align 4
|
|
; CHECK-NEXT: br label [[FOR_INC]]
|
|
; CHECK: for.inc:
|
|
; CHECK-NEXT: [[INC]] = add nsw i32 [[I_02]], 1
|
|
; CHECK-NEXT: [[CMP:%.*]] = icmp slt i32 [[INC]], [[LEN]]
|
|
; CHECK-NEXT: br i1 [[CMP]], label [[FOR_BODY]], label [[FOR_COND_FOR_COND_CLEANUP_CRIT_EDGE]], !llvm.loop !2
|
|
; CHECK: for.end:
|
|
; CHECK-NEXT: ret void
|
|
;
|
|
entry:
|
|
%cmp1 = icmp slt i32 0, %Len
|
|
br i1 %cmp1, label %for.body.lr.ph, label %for.cond.cleanup
|
|
|
|
for.body.lr.ph: ; preds = %entry
|
|
br label %for.body
|
|
|
|
for.cond.for.cond.cleanup_crit_edge: ; preds = %for.inc
|
|
br label %for.cond.cleanup
|
|
|
|
for.cond.cleanup: ; preds = %for.cond.for.cond.cleanup_crit_edge, %entry
|
|
br label %for.end
|
|
|
|
for.body: ; preds = %for.body.lr.ph, %for.inc
|
|
%i.02 = phi i32 [ 0, %for.body.lr.ph ], [ %inc, %for.inc ]
|
|
%idxprom = sext i32 %i.02 to i64
|
|
%arrayidx = getelementptr inbounds i32, i32* %Arr, i64 %idxprom
|
|
%0 = load i32, i32* %arrayidx, align 4
|
|
%1 = call i32 @llvm.bswap.i32(i32 %0)
|
|
%idxprom1 = sext i32 %i.02 to i64
|
|
%arrayidx2 = getelementptr inbounds i32, i32* %Arr, i64 %idxprom1
|
|
store i32 %1, i32* %arrayidx2, align 4
|
|
br label %for.inc
|
|
|
|
for.inc: ; preds = %for.body
|
|
%inc = add nsw i32 %i.02, 1
|
|
%cmp = icmp slt i32 %inc, %Len
|
|
br i1 %cmp, label %for.body, label %for.cond.for.cond.cleanup_crit_edge
|
|
|
|
for.end: ; preds = %for.cond.cleanup
|
|
ret void
|
|
}
|
|
|
|
; Function Attrs: nounwind readnone speculatable willreturn
|
|
declare i32 @llvm.bswap.i32(i32)
|