Files
clang-p2996/llvm/test/Transforms/LoopVectorize/runtime-checks-difference-simplifications.ll
Florian Hahn 32d1197a8f [LV] Use SCEV for subtraction of src/sink for diff runtime checks.
Instead of expanding the src/sink SCEV expressions and emitting an IR
sub to compute the difference, the subtraction can be directly be
performed by ScalarEvolution. This allows the subtraction to be
simplified by SCEV, which in turn can reduced the number of redundant
runtime check instructions generated.

It also allows to generate checks that are invariant w.r.t. an outer
loop, if he inner loop AddRecs have the same outer loop AddRec as start.
2023-11-22 12:48:04 +00:00

307 lines
19 KiB
LLVM

; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
; RUN: opt -passes=loop-vectorize -force-vector-width=4 -force-vector-interleave=1 -o - -S %s | FileCheck %s
; Test case with a large number of pointer groups to check for memory
; conflicts, but with many redundant checks that can be simplified.
define void @test_large_number_of_group(ptr %dst, i64 %off, i64 %N) {
; CHECK-LABEL: @test_large_number_of_group(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[OFF_MUL_2:%.*]] = shl i64 [[OFF:%.*]], 1
; CHECK-NEXT: [[OFF_MUL_3:%.*]] = mul i64 [[OFF]], 3
; CHECK-NEXT: [[OFF_MUL_4:%.*]] = shl i64 [[OFF]], 2
; CHECK-NEXT: [[OFF_MUL_5:%.*]] = mul i64 [[OFF]], 5
; CHECK-NEXT: [[OFF_MUL_6:%.*]] = mul i64 [[OFF]], 6
; CHECK-NEXT: [[OFF_MUL_7:%.*]] = mul i64 [[OFF]], 7
; CHECK-NEXT: [[OFF_MUL_8:%.*]] = shl i64 [[OFF]], 3
; CHECK-NEXT: [[OFF_MUL_9:%.*]] = mul i64 [[OFF]], 9
; CHECK-NEXT: [[OFF_MUL_10:%.*]] = mul i64 [[OFF]], 10
; CHECK-NEXT: [[OFF_MUL_11:%.*]] = mul i64 [[OFF]], 11
; CHECK-NEXT: [[OFF_MUL_12:%.*]] = mul i64 [[OFF]], 12
; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N:%.*]], 4
; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_MEMCHECK:%.*]]
; CHECK: vector.memcheck:
; CHECK-NEXT: [[DIFF_CHECK:%.*]] = icmp ult i64 [[OFF_MUL_8]], 32
; CHECK-NEXT: [[TMP0:%.*]] = shl i64 [[OFF]], 4
; CHECK-NEXT: [[DIFF_CHECK1:%.*]] = icmp ult i64 [[TMP0]], 32
; CHECK-NEXT: [[CONFLICT_RDX:%.*]] = or i1 [[DIFF_CHECK]], [[DIFF_CHECK1]]
; CHECK-NEXT: [[TMP1:%.*]] = mul i64 [[OFF]], 24
; CHECK-NEXT: [[DIFF_CHECK2:%.*]] = icmp ult i64 [[TMP1]], 32
; CHECK-NEXT: [[CONFLICT_RDX3:%.*]] = or i1 [[CONFLICT_RDX]], [[DIFF_CHECK2]]
; CHECK-NEXT: [[TMP2:%.*]] = shl i64 [[OFF]], 5
; CHECK-NEXT: [[DIFF_CHECK4:%.*]] = icmp ult i64 [[TMP2]], 32
; CHECK-NEXT: [[CONFLICT_RDX5:%.*]] = or i1 [[CONFLICT_RDX3]], [[DIFF_CHECK4]]
; CHECK-NEXT: [[TMP3:%.*]] = mul i64 [[OFF]], 40
; CHECK-NEXT: [[DIFF_CHECK6:%.*]] = icmp ult i64 [[TMP3]], 32
; CHECK-NEXT: [[CONFLICT_RDX7:%.*]] = or i1 [[CONFLICT_RDX5]], [[DIFF_CHECK6]]
; CHECK-NEXT: [[TMP4:%.*]] = mul i64 [[OFF]], 48
; CHECK-NEXT: [[DIFF_CHECK8:%.*]] = icmp ult i64 [[TMP4]], 32
; CHECK-NEXT: [[CONFLICT_RDX9:%.*]] = or i1 [[CONFLICT_RDX7]], [[DIFF_CHECK8]]
; CHECK-NEXT: [[TMP5:%.*]] = mul i64 [[OFF]], 56
; CHECK-NEXT: [[DIFF_CHECK10:%.*]] = icmp ult i64 [[TMP5]], 32
; CHECK-NEXT: [[CONFLICT_RDX11:%.*]] = or i1 [[CONFLICT_RDX9]], [[DIFF_CHECK10]]
; CHECK-NEXT: [[TMP6:%.*]] = shl i64 [[OFF]], 6
; CHECK-NEXT: [[DIFF_CHECK12:%.*]] = icmp ult i64 [[TMP6]], 32
; CHECK-NEXT: [[CONFLICT_RDX13:%.*]] = or i1 [[CONFLICT_RDX11]], [[DIFF_CHECK12]]
; CHECK-NEXT: [[TMP7:%.*]] = mul i64 [[OFF]], 72
; CHECK-NEXT: [[DIFF_CHECK14:%.*]] = icmp ult i64 [[TMP7]], 32
; CHECK-NEXT: [[CONFLICT_RDX15:%.*]] = or i1 [[CONFLICT_RDX13]], [[DIFF_CHECK14]]
; CHECK-NEXT: [[TMP8:%.*]] = mul i64 [[OFF]], 80
; CHECK-NEXT: [[DIFF_CHECK16:%.*]] = icmp ult i64 [[TMP8]], 32
; CHECK-NEXT: [[CONFLICT_RDX17:%.*]] = or i1 [[CONFLICT_RDX15]], [[DIFF_CHECK16]]
; CHECK-NEXT: [[TMP9:%.*]] = mul i64 [[OFF]], 88
; CHECK-NEXT: [[DIFF_CHECK18:%.*]] = icmp ult i64 [[TMP9]], 32
; CHECK-NEXT: [[CONFLICT_RDX19:%.*]] = or i1 [[CONFLICT_RDX17]], [[DIFF_CHECK18]]
; CHECK-NEXT: [[DIFF_CHECK20:%.*]] = icmp ult i64 [[OFF_MUL_8]], 32
; CHECK-NEXT: [[CONFLICT_RDX21:%.*]] = or i1 [[CONFLICT_RDX19]], [[DIFF_CHECK20]]
; CHECK-NEXT: [[DIFF_CHECK22:%.*]] = icmp ult i64 [[TMP0]], 32
; CHECK-NEXT: [[CONFLICT_RDX23:%.*]] = or i1 [[CONFLICT_RDX21]], [[DIFF_CHECK22]]
; CHECK-NEXT: [[DIFF_CHECK24:%.*]] = icmp ult i64 [[TMP1]], 32
; CHECK-NEXT: [[CONFLICT_RDX25:%.*]] = or i1 [[CONFLICT_RDX23]], [[DIFF_CHECK24]]
; CHECK-NEXT: [[DIFF_CHECK26:%.*]] = icmp ult i64 [[TMP2]], 32
; CHECK-NEXT: [[CONFLICT_RDX27:%.*]] = or i1 [[CONFLICT_RDX25]], [[DIFF_CHECK26]]
; CHECK-NEXT: [[DIFF_CHECK28:%.*]] = icmp ult i64 [[TMP3]], 32
; CHECK-NEXT: [[CONFLICT_RDX29:%.*]] = or i1 [[CONFLICT_RDX27]], [[DIFF_CHECK28]]
; CHECK-NEXT: [[DIFF_CHECK30:%.*]] = icmp ult i64 [[TMP4]], 32
; CHECK-NEXT: [[CONFLICT_RDX31:%.*]] = or i1 [[CONFLICT_RDX29]], [[DIFF_CHECK30]]
; CHECK-NEXT: [[DIFF_CHECK32:%.*]] = icmp ult i64 [[TMP5]], 32
; CHECK-NEXT: [[CONFLICT_RDX33:%.*]] = or i1 [[CONFLICT_RDX31]], [[DIFF_CHECK32]]
; CHECK-NEXT: [[DIFF_CHECK34:%.*]] = icmp ult i64 [[TMP6]], 32
; CHECK-NEXT: [[CONFLICT_RDX35:%.*]] = or i1 [[CONFLICT_RDX33]], [[DIFF_CHECK34]]
; CHECK-NEXT: [[DIFF_CHECK36:%.*]] = icmp ult i64 [[TMP7]], 32
; CHECK-NEXT: [[CONFLICT_RDX37:%.*]] = or i1 [[CONFLICT_RDX35]], [[DIFF_CHECK36]]
; CHECK-NEXT: [[DIFF_CHECK38:%.*]] = icmp ult i64 [[TMP8]], 32
; CHECK-NEXT: [[CONFLICT_RDX39:%.*]] = or i1 [[CONFLICT_RDX37]], [[DIFF_CHECK38]]
; CHECK-NEXT: [[DIFF_CHECK40:%.*]] = icmp ult i64 [[OFF_MUL_8]], 32
; CHECK-NEXT: [[CONFLICT_RDX41:%.*]] = or i1 [[CONFLICT_RDX39]], [[DIFF_CHECK40]]
; CHECK-NEXT: [[DIFF_CHECK42:%.*]] = icmp ult i64 [[TMP0]], 32
; CHECK-NEXT: [[CONFLICT_RDX43:%.*]] = or i1 [[CONFLICT_RDX41]], [[DIFF_CHECK42]]
; CHECK-NEXT: [[DIFF_CHECK44:%.*]] = icmp ult i64 [[TMP1]], 32
; CHECK-NEXT: [[CONFLICT_RDX45:%.*]] = or i1 [[CONFLICT_RDX43]], [[DIFF_CHECK44]]
; CHECK-NEXT: [[DIFF_CHECK46:%.*]] = icmp ult i64 [[TMP2]], 32
; CHECK-NEXT: [[CONFLICT_RDX47:%.*]] = or i1 [[CONFLICT_RDX45]], [[DIFF_CHECK46]]
; CHECK-NEXT: [[DIFF_CHECK48:%.*]] = icmp ult i64 [[TMP3]], 32
; CHECK-NEXT: [[CONFLICT_RDX49:%.*]] = or i1 [[CONFLICT_RDX47]], [[DIFF_CHECK48]]
; CHECK-NEXT: [[DIFF_CHECK50:%.*]] = icmp ult i64 [[TMP4]], 32
; CHECK-NEXT: [[CONFLICT_RDX51:%.*]] = or i1 [[CONFLICT_RDX49]], [[DIFF_CHECK50]]
; CHECK-NEXT: [[DIFF_CHECK52:%.*]] = icmp ult i64 [[TMP5]], 32
; CHECK-NEXT: [[CONFLICT_RDX53:%.*]] = or i1 [[CONFLICT_RDX51]], [[DIFF_CHECK52]]
; CHECK-NEXT: [[DIFF_CHECK54:%.*]] = icmp ult i64 [[TMP6]], 32
; CHECK-NEXT: [[CONFLICT_RDX55:%.*]] = or i1 [[CONFLICT_RDX53]], [[DIFF_CHECK54]]
; CHECK-NEXT: [[DIFF_CHECK56:%.*]] = icmp ult i64 [[TMP7]], 32
; CHECK-NEXT: [[CONFLICT_RDX57:%.*]] = or i1 [[CONFLICT_RDX55]], [[DIFF_CHECK56]]
; CHECK-NEXT: [[DIFF_CHECK58:%.*]] = icmp ult i64 [[OFF_MUL_8]], 32
; CHECK-NEXT: [[CONFLICT_RDX59:%.*]] = or i1 [[CONFLICT_RDX57]], [[DIFF_CHECK58]]
; CHECK-NEXT: [[DIFF_CHECK60:%.*]] = icmp ult i64 [[TMP0]], 32
; CHECK-NEXT: [[CONFLICT_RDX61:%.*]] = or i1 [[CONFLICT_RDX59]], [[DIFF_CHECK60]]
; CHECK-NEXT: [[DIFF_CHECK62:%.*]] = icmp ult i64 [[TMP1]], 32
; CHECK-NEXT: [[CONFLICT_RDX63:%.*]] = or i1 [[CONFLICT_RDX61]], [[DIFF_CHECK62]]
; CHECK-NEXT: [[DIFF_CHECK64:%.*]] = icmp ult i64 [[TMP2]], 32
; CHECK-NEXT: [[CONFLICT_RDX65:%.*]] = or i1 [[CONFLICT_RDX63]], [[DIFF_CHECK64]]
; CHECK-NEXT: [[DIFF_CHECK66:%.*]] = icmp ult i64 [[TMP3]], 32
; CHECK-NEXT: [[CONFLICT_RDX67:%.*]] = or i1 [[CONFLICT_RDX65]], [[DIFF_CHECK66]]
; CHECK-NEXT: [[DIFF_CHECK68:%.*]] = icmp ult i64 [[TMP4]], 32
; CHECK-NEXT: [[CONFLICT_RDX69:%.*]] = or i1 [[CONFLICT_RDX67]], [[DIFF_CHECK68]]
; CHECK-NEXT: [[DIFF_CHECK70:%.*]] = icmp ult i64 [[TMP5]], 32
; CHECK-NEXT: [[CONFLICT_RDX71:%.*]] = or i1 [[CONFLICT_RDX69]], [[DIFF_CHECK70]]
; CHECK-NEXT: [[DIFF_CHECK72:%.*]] = icmp ult i64 [[TMP6]], 32
; CHECK-NEXT: [[CONFLICT_RDX73:%.*]] = or i1 [[CONFLICT_RDX71]], [[DIFF_CHECK72]]
; CHECK-NEXT: [[DIFF_CHECK74:%.*]] = icmp ult i64 [[OFF_MUL_8]], 32
; CHECK-NEXT: [[DIFF_CHECK75:%.*]] = icmp ult i64 [[TMP0]], 32
; CHECK-NEXT: [[DIFF_CHECK76:%.*]] = icmp ult i64 [[TMP1]], 32
; CHECK-NEXT: [[DIFF_CHECK77:%.*]] = icmp ult i64 [[TMP2]], 32
; CHECK-NEXT: [[DIFF_CHECK78:%.*]] = icmp ult i64 [[TMP3]], 32
; CHECK-NEXT: [[DIFF_CHECK79:%.*]] = icmp ult i64 [[TMP4]], 32
; CHECK-NEXT: [[DIFF_CHECK80:%.*]] = icmp ult i64 [[TMP5]], 32
; CHECK-NEXT: [[DIFF_CHECK81:%.*]] = icmp ult i64 [[OFF_MUL_8]], 32
; CHECK-NEXT: [[DIFF_CHECK82:%.*]] = icmp ult i64 [[TMP0]], 32
; CHECK-NEXT: [[DIFF_CHECK83:%.*]] = icmp ult i64 [[TMP1]], 32
; CHECK-NEXT: [[DIFF_CHECK84:%.*]] = icmp ult i64 [[TMP2]], 32
; CHECK-NEXT: [[DIFF_CHECK85:%.*]] = icmp ult i64 [[TMP3]], 32
; CHECK-NEXT: [[DIFF_CHECK86:%.*]] = icmp ult i64 [[TMP4]], 32
; CHECK-NEXT: [[DIFF_CHECK87:%.*]] = icmp ult i64 [[OFF_MUL_8]], 32
; CHECK-NEXT: [[DIFF_CHECK88:%.*]] = icmp ult i64 [[TMP0]], 32
; CHECK-NEXT: [[DIFF_CHECK89:%.*]] = icmp ult i64 [[TMP1]], 32
; CHECK-NEXT: [[DIFF_CHECK90:%.*]] = icmp ult i64 [[TMP2]], 32
; CHECK-NEXT: [[DIFF_CHECK91:%.*]] = icmp ult i64 [[TMP3]], 32
; CHECK-NEXT: [[DIFF_CHECK92:%.*]] = icmp ult i64 [[OFF_MUL_8]], 32
; CHECK-NEXT: [[DIFF_CHECK93:%.*]] = icmp ult i64 [[TMP0]], 32
; CHECK-NEXT: [[DIFF_CHECK94:%.*]] = icmp ult i64 [[TMP1]], 32
; CHECK-NEXT: [[DIFF_CHECK95:%.*]] = icmp ult i64 [[TMP2]], 32
; CHECK-NEXT: [[DIFF_CHECK96:%.*]] = icmp ult i64 [[OFF_MUL_8]], 32
; CHECK-NEXT: [[DIFF_CHECK97:%.*]] = icmp ult i64 [[TMP0]], 32
; CHECK-NEXT: [[DIFF_CHECK98:%.*]] = icmp ult i64 [[TMP1]], 32
; CHECK-NEXT: [[DIFF_CHECK99:%.*]] = icmp ult i64 [[OFF_MUL_8]], 32
; CHECK-NEXT: [[DIFF_CHECK100:%.*]] = icmp ult i64 [[TMP0]], 32
; CHECK-NEXT: [[DIFF_CHECK101:%.*]] = icmp ult i64 [[OFF_MUL_8]], 32
; CHECK-NEXT: br i1 [[CONFLICT_RDX73]], label [[SCALAR_PH]], label [[VECTOR_PH:%.*]]
; CHECK: vector.ph:
; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], 4
; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[TMP10:%.*]] = add i64 [[INDEX]], 0
; CHECK-NEXT: [[TMP11:%.*]] = add nsw i64 [[TMP10]], -5
; CHECK-NEXT: [[TMP12:%.*]] = add i64 [[TMP11]], [[OFF]]
; CHECK-NEXT: [[TMP13:%.*]] = getelementptr i64, ptr [[DST:%.*]], i64 [[TMP12]]
; CHECK-NEXT: [[TMP14:%.*]] = getelementptr double, ptr [[TMP13]], i32 0
; CHECK-NEXT: store <4 x double> zeroinitializer, ptr [[TMP14]], align 8
; CHECK-NEXT: [[TMP15:%.*]] = add i64 [[TMP11]], [[OFF_MUL_2]]
; CHECK-NEXT: [[TMP16:%.*]] = getelementptr i64, ptr [[DST]], i64 [[TMP15]]
; CHECK-NEXT: [[TMP17:%.*]] = getelementptr double, ptr [[TMP16]], i32 0
; CHECK-NEXT: store <4 x double> zeroinitializer, ptr [[TMP17]], align 8
; CHECK-NEXT: [[TMP18:%.*]] = add i64 [[TMP11]], [[OFF_MUL_3]]
; CHECK-NEXT: [[TMP19:%.*]] = getelementptr i64, ptr [[DST]], i64 [[TMP18]]
; CHECK-NEXT: [[TMP20:%.*]] = getelementptr double, ptr [[TMP19]], i32 0
; CHECK-NEXT: store <4 x double> zeroinitializer, ptr [[TMP20]], align 8
; CHECK-NEXT: [[TMP21:%.*]] = add i64 [[TMP11]], [[OFF_MUL_4]]
; CHECK-NEXT: [[TMP22:%.*]] = getelementptr i64, ptr [[DST]], i64 [[TMP21]]
; CHECK-NEXT: [[TMP23:%.*]] = getelementptr double, ptr [[TMP22]], i32 0
; CHECK-NEXT: store <4 x double> zeroinitializer, ptr [[TMP23]], align 8
; CHECK-NEXT: [[TMP24:%.*]] = add i64 [[TMP11]], [[OFF_MUL_5]]
; CHECK-NEXT: [[TMP25:%.*]] = getelementptr i64, ptr [[DST]], i64 [[TMP24]]
; CHECK-NEXT: [[TMP26:%.*]] = getelementptr double, ptr [[TMP25]], i32 0
; CHECK-NEXT: store <4 x double> zeroinitializer, ptr [[TMP26]], align 8
; CHECK-NEXT: [[TMP27:%.*]] = add i64 [[TMP11]], [[OFF_MUL_6]]
; CHECK-NEXT: [[TMP28:%.*]] = getelementptr i64, ptr [[DST]], i64 [[TMP27]]
; CHECK-NEXT: [[TMP29:%.*]] = getelementptr double, ptr [[TMP28]], i32 0
; CHECK-NEXT: store <4 x double> zeroinitializer, ptr [[TMP29]], align 8
; CHECK-NEXT: [[TMP30:%.*]] = add i64 [[TMP11]], [[OFF_MUL_7]]
; CHECK-NEXT: [[TMP31:%.*]] = getelementptr i64, ptr [[DST]], i64 [[TMP30]]
; CHECK-NEXT: [[TMP32:%.*]] = getelementptr double, ptr [[TMP31]], i32 0
; CHECK-NEXT: store <4 x double> zeroinitializer, ptr [[TMP32]], align 8
; CHECK-NEXT: [[TMP33:%.*]] = add i64 [[TMP11]], [[OFF_MUL_8]]
; CHECK-NEXT: [[TMP34:%.*]] = getelementptr i64, ptr [[DST]], i64 [[TMP33]]
; CHECK-NEXT: [[TMP35:%.*]] = getelementptr double, ptr [[TMP34]], i32 0
; CHECK-NEXT: store <4 x double> zeroinitializer, ptr [[TMP35]], align 8
; CHECK-NEXT: [[TMP36:%.*]] = add i64 [[TMP11]], [[OFF_MUL_9]]
; CHECK-NEXT: [[TMP37:%.*]] = getelementptr i64, ptr [[DST]], i64 [[TMP36]]
; CHECK-NEXT: [[TMP38:%.*]] = getelementptr double, ptr [[TMP37]], i32 0
; CHECK-NEXT: store <4 x double> zeroinitializer, ptr [[TMP38]], align 8
; CHECK-NEXT: [[TMP39:%.*]] = add i64 [[TMP11]], [[OFF_MUL_10]]
; CHECK-NEXT: [[TMP40:%.*]] = getelementptr i64, ptr [[DST]], i64 [[TMP39]]
; CHECK-NEXT: [[TMP41:%.*]] = getelementptr double, ptr [[TMP40]], i32 0
; CHECK-NEXT: store <4 x double> zeroinitializer, ptr [[TMP41]], align 8
; CHECK-NEXT: [[TMP42:%.*]] = add i64 [[TMP11]], [[OFF_MUL_11]]
; CHECK-NEXT: [[TMP43:%.*]] = getelementptr i64, ptr [[DST]], i64 [[TMP42]]
; CHECK-NEXT: [[TMP44:%.*]] = getelementptr double, ptr [[TMP43]], i32 0
; CHECK-NEXT: store <4 x double> zeroinitializer, ptr [[TMP44]], align 8
; CHECK-NEXT: [[TMP45:%.*]] = add i64 [[TMP11]], [[OFF_MUL_12]]
; CHECK-NEXT: [[TMP46:%.*]] = getelementptr i64, ptr [[DST]], i64 [[TMP45]]
; CHECK-NEXT: [[TMP47:%.*]] = getelementptr double, ptr [[TMP46]], i32 0
; CHECK-NEXT: store <4 x double> zeroinitializer, ptr [[TMP47]], align 8
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
; CHECK-NEXT: [[TMP48:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-NEXT: br i1 [[TMP48]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; CHECK: middle.block:
; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]]
; CHECK-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH]]
; CHECK: scalar.ph:
; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ], [ 0, [[VECTOR_MEMCHECK]] ]
; CHECK-NEXT: br label [[LOOP:%.*]]
; CHECK: loop:
; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ]
; CHECK-NEXT: [[IV_SUB_5:%.*]] = add nsw i64 [[IV]], -5
; CHECK-NEXT: [[IDX_1:%.*]] = add i64 [[IV_SUB_5]], [[OFF]]
; CHECK-NEXT: [[GEP_1:%.*]] = getelementptr i64, ptr [[DST]], i64 [[IDX_1]]
; CHECK-NEXT: store double 0.000000e+00, ptr [[GEP_1]], align 8
; CHECK-NEXT: [[IDX_2:%.*]] = add i64 [[IV_SUB_5]], [[OFF_MUL_2]]
; CHECK-NEXT: [[GEP_2:%.*]] = getelementptr i64, ptr [[DST]], i64 [[IDX_2]]
; CHECK-NEXT: store double 0.000000e+00, ptr [[GEP_2]], align 8
; CHECK-NEXT: [[IDX_3:%.*]] = add i64 [[IV_SUB_5]], [[OFF_MUL_3]]
; CHECK-NEXT: [[GEP_3:%.*]] = getelementptr i64, ptr [[DST]], i64 [[IDX_3]]
; CHECK-NEXT: store double 0.000000e+00, ptr [[GEP_3]], align 8
; CHECK-NEXT: [[IDX_4:%.*]] = add i64 [[IV_SUB_5]], [[OFF_MUL_4]]
; CHECK-NEXT: [[GEP_4:%.*]] = getelementptr i64, ptr [[DST]], i64 [[IDX_4]]
; CHECK-NEXT: store double 0.000000e+00, ptr [[GEP_4]], align 8
; CHECK-NEXT: [[IDX_5:%.*]] = add i64 [[IV_SUB_5]], [[OFF_MUL_5]]
; CHECK-NEXT: [[GEP_5:%.*]] = getelementptr i64, ptr [[DST]], i64 [[IDX_5]]
; CHECK-NEXT: store double 0.000000e+00, ptr [[GEP_5]], align 8
; CHECK-NEXT: [[IDX_6:%.*]] = add i64 [[IV_SUB_5]], [[OFF_MUL_6]]
; CHECK-NEXT: [[GEP_6:%.*]] = getelementptr i64, ptr [[DST]], i64 [[IDX_6]]
; CHECK-NEXT: store double 0.000000e+00, ptr [[GEP_6]], align 8
; CHECK-NEXT: [[IDX_7:%.*]] = add i64 [[IV_SUB_5]], [[OFF_MUL_7]]
; CHECK-NEXT: [[GEP_7:%.*]] = getelementptr i64, ptr [[DST]], i64 [[IDX_7]]
; CHECK-NEXT: store double 0.000000e+00, ptr [[GEP_7]], align 8
; CHECK-NEXT: [[IDX_8:%.*]] = add i64 [[IV_SUB_5]], [[OFF_MUL_8]]
; CHECK-NEXT: [[GEP_8:%.*]] = getelementptr i64, ptr [[DST]], i64 [[IDX_8]]
; CHECK-NEXT: store double 0.000000e+00, ptr [[GEP_8]], align 8
; CHECK-NEXT: [[IDX_9:%.*]] = add i64 [[IV_SUB_5]], [[OFF_MUL_9]]
; CHECK-NEXT: [[GEP_9:%.*]] = getelementptr i64, ptr [[DST]], i64 [[IDX_9]]
; CHECK-NEXT: store double 0.000000e+00, ptr [[GEP_9]], align 8
; CHECK-NEXT: [[IDX_10:%.*]] = add i64 [[IV_SUB_5]], [[OFF_MUL_10]]
; CHECK-NEXT: [[GEP_10:%.*]] = getelementptr i64, ptr [[DST]], i64 [[IDX_10]]
; CHECK-NEXT: store double 0.000000e+00, ptr [[GEP_10]], align 8
; CHECK-NEXT: [[IDX_11:%.*]] = add i64 [[IV_SUB_5]], [[OFF_MUL_11]]
; CHECK-NEXT: [[GEP_11:%.*]] = getelementptr i64, ptr [[DST]], i64 [[IDX_11]]
; CHECK-NEXT: store double 0.000000e+00, ptr [[GEP_11]], align 8
; CHECK-NEXT: [[IDX_12:%.*]] = add i64 [[IV_SUB_5]], [[OFF_MUL_12]]
; CHECK-NEXT: [[GEP_12:%.*]] = getelementptr i64, ptr [[DST]], i64 [[IDX_12]]
; CHECK-NEXT: store double 0.000000e+00, ptr [[GEP_12]], align 8
; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
; CHECK-NEXT: [[EC:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]]
; CHECK-NEXT: br i1 [[EC]], label [[EXIT]], label [[LOOP]], !llvm.loop [[LOOP3:![0-9]+]]
; CHECK: exit:
; CHECK-NEXT: ret void
;
entry:
%off.mul.2 = shl i64 %off, 1
%off.mul.3 = mul i64 %off, 3
%off.mul.4 = shl i64 %off, 2
%off.mul.5 = mul i64 %off, 5
%off.mul.6 = mul i64 %off, 6
%off.mul.7 = mul i64 %off, 7
%off.mul.8 = shl i64 %off, 3
%off.mul.9 = mul i64 %off, 9
%off.mul.10 = mul i64 %off, 10
%off.mul.11 = mul i64 %off, 11
%off.mul.12 = mul i64 %off, 12
br label %loop
loop:
%iv = phi i64 [ 0, %entry ], [ %iv.next, %loop ]
%iv.sub.5 = add nsw i64 %iv, -5
%idx.1 = add i64 %iv.sub.5, %off
%gep.1 = getelementptr i64, ptr %dst, i64 %idx.1
store double 0.000000e+00, ptr %gep.1, align 8
%idx.2 = add i64 %iv.sub.5, %off.mul.2
%gep.2 = getelementptr i64, ptr %dst, i64 %idx.2
store double 0.000000e+00, ptr %gep.2, align 8
%idx.3 = add i64 %iv.sub.5, %off.mul.3
%gep.3 = getelementptr i64, ptr %dst, i64 %idx.3
store double 0.000000e+00, ptr %gep.3, align 8
%idx.4 = add i64 %iv.sub.5, %off.mul.4
%gep.4 = getelementptr i64, ptr %dst, i64 %idx.4
store double 0.000000e+00, ptr %gep.4, align 8
%idx.5 = add i64 %iv.sub.5, %off.mul.5
%gep.5 = getelementptr i64, ptr %dst, i64 %idx.5
store double 0.000000e+00, ptr %gep.5, align 8
%idx.6 = add i64 %iv.sub.5, %off.mul.6
%gep.6 = getelementptr i64, ptr %dst, i64 %idx.6
store double 0.000000e+00, ptr %gep.6, align 8
%idx.7 = add i64 %iv.sub.5, %off.mul.7
%gep.7 = getelementptr i64, ptr %dst, i64 %idx.7
store double 0.000000e+00, ptr %gep.7, align 8
%idx.8 = add i64 %iv.sub.5, %off.mul.8
%gep.8 = getelementptr i64, ptr %dst, i64 %idx.8
store double 0.000000e+00, ptr %gep.8, align 8
%idx.9 = add i64 %iv.sub.5, %off.mul.9
%gep.9 = getelementptr i64, ptr %dst, i64 %idx.9
store double 0.000000e+00, ptr %gep.9, align 8
%idx.10 = add i64 %iv.sub.5, %off.mul.10
%gep.10 = getelementptr i64, ptr %dst, i64 %idx.10
store double 0.000000e+00, ptr %gep.10, align 8
%idx.11 = add i64 %iv.sub.5, %off.mul.11
%gep.11 = getelementptr i64, ptr %dst, i64 %idx.11
store double 0.000000e+00, ptr %gep.11, align 8
%idx.12 = add i64 %iv.sub.5, %off.mul.12
%gep.12 = getelementptr i64, ptr %dst, i64 %idx.12
store double 0.000000e+00, ptr %gep.12, align 8
%iv.next = add nuw nsw i64 %iv, 1
%ec = icmp eq i64 %iv.next, %N
br i1 %ec, label %exit, label %loop
exit:
ret void
}