Remove legacy ILV sinkScalarOperands, which is superseded by the sinkScalarOperands VPlan transforms. There are a few cases that aren't handled by VPlan's sinkScalarOperands, because the recipes doesn't support replicating. Those are pointer inductions and blends. We could probably improve this further, by allowing replication for more recipes, but I don't think the extra complexity is warranted. Depends on https://github.com/llvm/llvm-project/pull/136021. PR: https://github.com/llvm/llvm-project/pull/136023
165 lines
8.4 KiB
LLVM
165 lines
8.4 KiB
LLVM
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
|
|
; RUN: opt < %s -passes=loop-vectorize -force-vector-interleave=4 -pass-remarks='loop-vectorize' -disable-output -S 2>&1 | FileCheck %s --check-prefix=CHECK-REMARKS
|
|
; RUN: opt < %s -passes=loop-vectorize -force-vector-interleave=4 -S | FileCheck %s
|
|
|
|
; These tests are to check that fold-tail procedure produces correct scalar code when
|
|
; loop-vectorization is only unrolling but not vectorizing.
|
|
|
|
; CHECK-REMARKS: remark: {{.*}} interleaved loop (interleaved count: 4)
|
|
; CHECK-REMARKS-NEXT: remark: {{.*}} interleaved loop (interleaved count: 4)
|
|
; CHECK-REMARKS-NOT: remark: {{.*}} vectorized loop
|
|
|
|
define void @VF1-VPlanExe(ptr %dst) {
|
|
; CHECK-LABEL: @VF1-VPlanExe(
|
|
; CHECK-NEXT: entry:
|
|
; CHECK-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
|
|
; CHECK: vector.ph:
|
|
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
|
|
; CHECK: vector.body:
|
|
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[PRED_STORE_CONTINUE6:%.*]] ]
|
|
; CHECK-NEXT: [[TMP0:%.*]] = add i64 [[INDEX]], 0
|
|
; CHECK-NEXT: [[TMP1:%.*]] = add i64 [[INDEX]], 1
|
|
; CHECK-NEXT: [[TMP2:%.*]] = add i64 [[INDEX]], 2
|
|
; CHECK-NEXT: [[TMP3:%.*]] = add i64 [[INDEX]], 3
|
|
; CHECK-NEXT: [[TMP4:%.*]] = icmp ule i64 [[TMP0]], 14
|
|
; CHECK-NEXT: [[TMP5:%.*]] = icmp ule i64 [[TMP1]], 14
|
|
; CHECK-NEXT: [[TMP6:%.*]] = icmp ule i64 [[TMP2]], 14
|
|
; CHECK-NEXT: [[TMP7:%.*]] = icmp ule i64 [[TMP3]], 14
|
|
; CHECK-NEXT: br i1 [[TMP4]], label [[PRED_STORE_IF:%.*]], label [[PRED_STORE_CONTINUE:%.*]]
|
|
; CHECK: pred.store.if:
|
|
; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds i32, ptr [[DST:%.*]], i64 [[TMP0]]
|
|
; CHECK-NEXT: store i32 0, ptr [[TMP8]], align 4
|
|
; CHECK-NEXT: br label [[PRED_STORE_CONTINUE]]
|
|
; CHECK: pred.store.continue:
|
|
; CHECK-NEXT: br i1 [[TMP5]], label [[PRED_STORE_IF1:%.*]], label [[PRED_STORE_CONTINUE2:%.*]]
|
|
; CHECK: pred.store.if1:
|
|
; CHECK-NEXT: [[TMP9:%.*]] = getelementptr inbounds i32, ptr [[DST]], i64 [[TMP1]]
|
|
; CHECK-NEXT: store i32 0, ptr [[TMP9]], align 4
|
|
; CHECK-NEXT: br label [[PRED_STORE_CONTINUE2]]
|
|
; CHECK: pred.store.continue2:
|
|
; CHECK-NEXT: br i1 [[TMP6]], label [[PRED_STORE_IF3:%.*]], label [[PRED_STORE_CONTINUE4:%.*]]
|
|
; CHECK: pred.store.if3:
|
|
; CHECK-NEXT: [[TMP10:%.*]] = getelementptr inbounds i32, ptr [[DST]], i64 [[TMP2]]
|
|
; CHECK-NEXT: store i32 0, ptr [[TMP10]], align 4
|
|
; CHECK-NEXT: br label [[PRED_STORE_CONTINUE4]]
|
|
; CHECK: pred.store.continue4:
|
|
; CHECK-NEXT: br i1 [[TMP7]], label [[PRED_STORE_IF5:%.*]], label [[PRED_STORE_CONTINUE6]]
|
|
; CHECK: pred.store.if5:
|
|
; CHECK-NEXT: [[TMP11:%.*]] = getelementptr inbounds i32, ptr [[DST]], i64 [[TMP3]]
|
|
; CHECK-NEXT: store i32 0, ptr [[TMP11]], align 4
|
|
; CHECK-NEXT: br label [[PRED_STORE_CONTINUE6]]
|
|
; CHECK: pred.store.continue6:
|
|
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
|
|
; CHECK-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_NEXT]], 16
|
|
; CHECK-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
|
|
; CHECK: middle.block:
|
|
; CHECK-NEXT: br label [[FOR_COND_CLEANUP:%.*]]
|
|
; CHECK: scalar.ph:
|
|
; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ]
|
|
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
|
|
; CHECK: for.cond.cleanup:
|
|
; CHECK-NEXT: ret void
|
|
; CHECK: for.body:
|
|
; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ]
|
|
; CHECK-NEXT: [[DST_PTR:%.*]] = getelementptr inbounds i32, ptr [[DST]], i64 [[INDVARS_IV]]
|
|
; CHECK-NEXT: store i32 0, ptr [[DST_PTR]], align 4
|
|
; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
|
|
; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], 15
|
|
; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_COND_CLEANUP]], label [[FOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]]
|
|
;
|
|
entry:
|
|
br label %for.body
|
|
|
|
for.cond.cleanup:
|
|
ret void
|
|
|
|
for.body:
|
|
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
|
|
%dst.ptr = getelementptr inbounds i32, ptr %dst, i64 %indvars.iv
|
|
store i32 0, ptr %dst.ptr
|
|
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
|
|
%exitcond = icmp eq i64 %indvars.iv.next, 15
|
|
br i1 %exitcond, label %for.cond.cleanup, label %for.body
|
|
}
|
|
|
|
; Note: Most scalar pointer induction GEPs could be sunk into the conditional
|
|
; blocks.
|
|
define void @VF1-VPWidenCanonicalIVRecipeExe(ptr %ptr1) {
|
|
; CHECK-LABEL: @VF1-VPWidenCanonicalIVRecipeExe(
|
|
; CHECK-NEXT: entry:
|
|
; CHECK-NEXT: [[PTR2:%.*]] = getelementptr inbounds double, ptr [[PTR1:%.*]], i64 15
|
|
; CHECK-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
|
|
; CHECK: vector.ph:
|
|
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
|
|
; CHECK: vector.body:
|
|
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[PRED_STORE_CONTINUE12:%.*]] ]
|
|
; CHECK-NEXT: [[OFFSET_IDX:%.*]] = mul i64 [[INDEX]], 8
|
|
; CHECK-NEXT: [[TMP4:%.*]] = add i64 [[OFFSET_IDX]], 8
|
|
; CHECK-NEXT: [[TMP5:%.*]] = add i64 [[OFFSET_IDX]], 16
|
|
; CHECK-NEXT: [[TMP6:%.*]] = add i64 [[OFFSET_IDX]], 24
|
|
; CHECK-NEXT: [[NEXT_GEP:%.*]] = getelementptr i8, ptr [[PTR1]], i64 [[OFFSET_IDX]]
|
|
; CHECK-NEXT: [[NEXT_GEP1:%.*]] = getelementptr i8, ptr [[PTR1]], i64 [[TMP4]]
|
|
; CHECK-NEXT: [[NEXT_GEP2:%.*]] = getelementptr i8, ptr [[PTR1]], i64 [[TMP5]]
|
|
; CHECK-NEXT: [[NEXT_GEP3:%.*]] = getelementptr i8, ptr [[PTR1]], i64 [[TMP6]]
|
|
; CHECK-NEXT: [[VEC_IV:%.*]] = add i64 [[INDEX]], 0
|
|
; CHECK-NEXT: [[VEC_IV4:%.*]] = add i64 [[INDEX]], 1
|
|
; CHECK-NEXT: [[VEC_IV5:%.*]] = add i64 [[INDEX]], 2
|
|
; CHECK-NEXT: [[VEC_IV6:%.*]] = add i64 [[INDEX]], 3
|
|
; CHECK-NEXT: [[TMP0:%.*]] = icmp ule i64 [[VEC_IV]], 14
|
|
; CHECK-NEXT: [[TMP1:%.*]] = icmp ule i64 [[VEC_IV4]], 14
|
|
; CHECK-NEXT: [[TMP2:%.*]] = icmp ule i64 [[VEC_IV5]], 14
|
|
; CHECK-NEXT: [[TMP3:%.*]] = icmp ule i64 [[VEC_IV6]], 14
|
|
; CHECK-NEXT: br i1 [[TMP0]], label [[PRED_STORE_IF:%.*]], label [[PRED_STORE_CONTINUE:%.*]]
|
|
; CHECK: pred.store.if:
|
|
; CHECK-NEXT: store double 0.000000e+00, ptr [[NEXT_GEP]], align 8
|
|
; CHECK-NEXT: br label [[PRED_STORE_CONTINUE]]
|
|
; CHECK: pred.store.continue:
|
|
; CHECK-NEXT: br i1 [[TMP1]], label [[PRED_STORE_IF7:%.*]], label [[PRED_STORE_CONTINUE8:%.*]]
|
|
; CHECK: pred.store.if7:
|
|
; CHECK-NEXT: store double 0.000000e+00, ptr [[NEXT_GEP1]], align 8
|
|
; CHECK-NEXT: br label [[PRED_STORE_CONTINUE8]]
|
|
; CHECK: pred.store.continue8:
|
|
; CHECK-NEXT: br i1 [[TMP2]], label [[PRED_STORE_IF9:%.*]], label [[PRED_STORE_CONTINUE10:%.*]]
|
|
; CHECK: pred.store.if9:
|
|
; CHECK-NEXT: store double 0.000000e+00, ptr [[NEXT_GEP2]], align 8
|
|
; CHECK-NEXT: br label [[PRED_STORE_CONTINUE10]]
|
|
; CHECK: pred.store.continue10:
|
|
; CHECK-NEXT: br i1 [[TMP3]], label [[PRED_STORE_IF11:%.*]], label [[PRED_STORE_CONTINUE12]]
|
|
; CHECK: pred.store.if11:
|
|
; CHECK-NEXT: store double 0.000000e+00, ptr [[NEXT_GEP3]], align 8
|
|
; CHECK-NEXT: br label [[PRED_STORE_CONTINUE12]]
|
|
; CHECK: pred.store.continue12:
|
|
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
|
|
; CHECK-NEXT: [[TMP8:%.*]] = icmp eq i64 [[INDEX_NEXT]], 16
|
|
; CHECK-NEXT: br i1 [[TMP8]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
|
|
; CHECK: middle.block:
|
|
; CHECK-NEXT: br label [[FOR_COND_CLEANUP:%.*]]
|
|
; CHECK: scalar.ph:
|
|
; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi ptr [ [[PTR1]], [[ENTRY:%.*]] ]
|
|
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
|
|
; CHECK: for.cond.cleanup:
|
|
; CHECK-NEXT: ret void
|
|
; CHECK: for.body:
|
|
; CHECK-NEXT: [[ADDR:%.*]] = phi ptr [ [[PTR:%.*]], [[FOR_BODY]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ]
|
|
; CHECK-NEXT: store double 0.000000e+00, ptr [[ADDR]], align 8
|
|
; CHECK-NEXT: [[PTR]] = getelementptr inbounds double, ptr [[ADDR]], i64 1
|
|
; CHECK-NEXT: [[COND:%.*]] = icmp eq ptr [[PTR]], [[PTR2]]
|
|
; CHECK-NEXT: br i1 [[COND]], label [[FOR_COND_CLEANUP]], label [[FOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]]
|
|
;
|
|
entry:
|
|
%ptr2 = getelementptr inbounds double, ptr %ptr1, i64 15
|
|
br label %for.body
|
|
|
|
for.cond.cleanup:
|
|
ret void
|
|
|
|
for.body:
|
|
%addr = phi ptr [ %ptr, %for.body ], [ %ptr1, %entry ]
|
|
store double 0.0, ptr %addr
|
|
%ptr = getelementptr inbounds double, ptr %addr, i64 1
|
|
%cond = icmp eq ptr %ptr, %ptr2
|
|
br i1 %cond, label %for.cond.cleanup, label %for.body
|
|
}
|
|
;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
|
|
; CHECK-REMARKS: {{.*}}
|