Files
clang-p2996/llvm/test/Transforms/LoopVectorize/runtime-checks-difference-simplifications.ll
Florian Hahn 19e6d54188 [LV] Re-use existing compare if possible for diff checks.
SCEV simplifying the subtraction may result in redundant compares that
are all OR'd together. Keep track of the generated operands in
SeenCompares, with the key being the pair of operands for the compare.

If we alrady generated the same compare previously, skip it.
2023-11-23 11:35:21 +00:00

225 lines
13 KiB
LLVM

; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
; RUN: opt -passes=loop-vectorize -force-vector-width=4 -force-vector-interleave=1 -o - -S %s | FileCheck %s
; Test case with a large number of pointer groups to check for memory
; conflicts, but with many redundant checks that can be simplified.
define void @test_large_number_of_group(ptr %dst, i64 %off, i64 %N) {
; CHECK-LABEL: @test_large_number_of_group(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[OFF_MUL_2:%.*]] = shl i64 [[OFF:%.*]], 1
; CHECK-NEXT: [[OFF_MUL_3:%.*]] = mul i64 [[OFF]], 3
; CHECK-NEXT: [[OFF_MUL_4:%.*]] = shl i64 [[OFF]], 2
; CHECK-NEXT: [[OFF_MUL_5:%.*]] = mul i64 [[OFF]], 5
; CHECK-NEXT: [[OFF_MUL_6:%.*]] = mul i64 [[OFF]], 6
; CHECK-NEXT: [[OFF_MUL_7:%.*]] = mul i64 [[OFF]], 7
; CHECK-NEXT: [[OFF_MUL_8:%.*]] = shl i64 [[OFF]], 3
; CHECK-NEXT: [[OFF_MUL_9:%.*]] = mul i64 [[OFF]], 9
; CHECK-NEXT: [[OFF_MUL_10:%.*]] = mul i64 [[OFF]], 10
; CHECK-NEXT: [[OFF_MUL_11:%.*]] = mul i64 [[OFF]], 11
; CHECK-NEXT: [[OFF_MUL_12:%.*]] = mul i64 [[OFF]], 12
; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N:%.*]], 4
; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_MEMCHECK:%.*]]
; CHECK: vector.memcheck:
; CHECK-NEXT: [[DIFF_CHECK:%.*]] = icmp ult i64 [[OFF_MUL_8]], 32
; CHECK-NEXT: [[TMP0:%.*]] = shl i64 [[OFF]], 4
; CHECK-NEXT: [[DIFF_CHECK1:%.*]] = icmp ult i64 [[TMP0]], 32
; CHECK-NEXT: [[CONFLICT_RDX:%.*]] = or i1 [[DIFF_CHECK]], [[DIFF_CHECK1]]
; CHECK-NEXT: [[TMP1:%.*]] = mul i64 [[OFF]], 24
; CHECK-NEXT: [[DIFF_CHECK2:%.*]] = icmp ult i64 [[TMP1]], 32
; CHECK-NEXT: [[CONFLICT_RDX3:%.*]] = or i1 [[CONFLICT_RDX]], [[DIFF_CHECK2]]
; CHECK-NEXT: [[TMP2:%.*]] = shl i64 [[OFF]], 5
; CHECK-NEXT: [[DIFF_CHECK4:%.*]] = icmp ult i64 [[TMP2]], 32
; CHECK-NEXT: [[CONFLICT_RDX5:%.*]] = or i1 [[CONFLICT_RDX3]], [[DIFF_CHECK4]]
; CHECK-NEXT: [[TMP3:%.*]] = mul i64 [[OFF]], 40
; CHECK-NEXT: [[DIFF_CHECK6:%.*]] = icmp ult i64 [[TMP3]], 32
; CHECK-NEXT: [[CONFLICT_RDX7:%.*]] = or i1 [[CONFLICT_RDX5]], [[DIFF_CHECK6]]
; CHECK-NEXT: [[TMP4:%.*]] = mul i64 [[OFF]], 48
; CHECK-NEXT: [[DIFF_CHECK8:%.*]] = icmp ult i64 [[TMP4]], 32
; CHECK-NEXT: [[CONFLICT_RDX9:%.*]] = or i1 [[CONFLICT_RDX7]], [[DIFF_CHECK8]]
; CHECK-NEXT: [[TMP5:%.*]] = mul i64 [[OFF]], 56
; CHECK-NEXT: [[DIFF_CHECK10:%.*]] = icmp ult i64 [[TMP5]], 32
; CHECK-NEXT: [[CONFLICT_RDX11:%.*]] = or i1 [[CONFLICT_RDX9]], [[DIFF_CHECK10]]
; CHECK-NEXT: [[TMP6:%.*]] = shl i64 [[OFF]], 6
; CHECK-NEXT: [[DIFF_CHECK12:%.*]] = icmp ult i64 [[TMP6]], 32
; CHECK-NEXT: [[CONFLICT_RDX13:%.*]] = or i1 [[CONFLICT_RDX11]], [[DIFF_CHECK12]]
; CHECK-NEXT: [[TMP7:%.*]] = mul i64 [[OFF]], 72
; CHECK-NEXT: [[DIFF_CHECK14:%.*]] = icmp ult i64 [[TMP7]], 32
; CHECK-NEXT: [[CONFLICT_RDX15:%.*]] = or i1 [[CONFLICT_RDX13]], [[DIFF_CHECK14]]
; CHECK-NEXT: [[TMP8:%.*]] = mul i64 [[OFF]], 80
; CHECK-NEXT: [[DIFF_CHECK16:%.*]] = icmp ult i64 [[TMP8]], 32
; CHECK-NEXT: [[CONFLICT_RDX17:%.*]] = or i1 [[CONFLICT_RDX15]], [[DIFF_CHECK16]]
; CHECK-NEXT: [[TMP9:%.*]] = mul i64 [[OFF]], 88
; CHECK-NEXT: [[DIFF_CHECK18:%.*]] = icmp ult i64 [[TMP9]], 32
; CHECK-NEXT: [[CONFLICT_RDX19:%.*]] = or i1 [[CONFLICT_RDX17]], [[DIFF_CHECK18]]
; CHECK-NEXT: br i1 [[CONFLICT_RDX19]], label [[SCALAR_PH]], label [[VECTOR_PH:%.*]]
; CHECK: vector.ph:
; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], 4
; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[TMP10:%.*]] = add i64 [[INDEX]], 0
; CHECK-NEXT: [[TMP11:%.*]] = add nsw i64 [[TMP10]], -5
; CHECK-NEXT: [[TMP12:%.*]] = add i64 [[TMP11]], [[OFF]]
; CHECK-NEXT: [[TMP13:%.*]] = getelementptr i64, ptr [[DST:%.*]], i64 [[TMP12]]
; CHECK-NEXT: [[TMP14:%.*]] = getelementptr double, ptr [[TMP13]], i32 0
; CHECK-NEXT: store <4 x double> zeroinitializer, ptr [[TMP14]], align 8
; CHECK-NEXT: [[TMP15:%.*]] = add i64 [[TMP11]], [[OFF_MUL_2]]
; CHECK-NEXT: [[TMP16:%.*]] = getelementptr i64, ptr [[DST]], i64 [[TMP15]]
; CHECK-NEXT: [[TMP17:%.*]] = getelementptr double, ptr [[TMP16]], i32 0
; CHECK-NEXT: store <4 x double> zeroinitializer, ptr [[TMP17]], align 8
; CHECK-NEXT: [[TMP18:%.*]] = add i64 [[TMP11]], [[OFF_MUL_3]]
; CHECK-NEXT: [[TMP19:%.*]] = getelementptr i64, ptr [[DST]], i64 [[TMP18]]
; CHECK-NEXT: [[TMP20:%.*]] = getelementptr double, ptr [[TMP19]], i32 0
; CHECK-NEXT: store <4 x double> zeroinitializer, ptr [[TMP20]], align 8
; CHECK-NEXT: [[TMP21:%.*]] = add i64 [[TMP11]], [[OFF_MUL_4]]
; CHECK-NEXT: [[TMP22:%.*]] = getelementptr i64, ptr [[DST]], i64 [[TMP21]]
; CHECK-NEXT: [[TMP23:%.*]] = getelementptr double, ptr [[TMP22]], i32 0
; CHECK-NEXT: store <4 x double> zeroinitializer, ptr [[TMP23]], align 8
; CHECK-NEXT: [[TMP24:%.*]] = add i64 [[TMP11]], [[OFF_MUL_5]]
; CHECK-NEXT: [[TMP25:%.*]] = getelementptr i64, ptr [[DST]], i64 [[TMP24]]
; CHECK-NEXT: [[TMP26:%.*]] = getelementptr double, ptr [[TMP25]], i32 0
; CHECK-NEXT: store <4 x double> zeroinitializer, ptr [[TMP26]], align 8
; CHECK-NEXT: [[TMP27:%.*]] = add i64 [[TMP11]], [[OFF_MUL_6]]
; CHECK-NEXT: [[TMP28:%.*]] = getelementptr i64, ptr [[DST]], i64 [[TMP27]]
; CHECK-NEXT: [[TMP29:%.*]] = getelementptr double, ptr [[TMP28]], i32 0
; CHECK-NEXT: store <4 x double> zeroinitializer, ptr [[TMP29]], align 8
; CHECK-NEXT: [[TMP30:%.*]] = add i64 [[TMP11]], [[OFF_MUL_7]]
; CHECK-NEXT: [[TMP31:%.*]] = getelementptr i64, ptr [[DST]], i64 [[TMP30]]
; CHECK-NEXT: [[TMP32:%.*]] = getelementptr double, ptr [[TMP31]], i32 0
; CHECK-NEXT: store <4 x double> zeroinitializer, ptr [[TMP32]], align 8
; CHECK-NEXT: [[TMP33:%.*]] = add i64 [[TMP11]], [[OFF_MUL_8]]
; CHECK-NEXT: [[TMP34:%.*]] = getelementptr i64, ptr [[DST]], i64 [[TMP33]]
; CHECK-NEXT: [[TMP35:%.*]] = getelementptr double, ptr [[TMP34]], i32 0
; CHECK-NEXT: store <4 x double> zeroinitializer, ptr [[TMP35]], align 8
; CHECK-NEXT: [[TMP36:%.*]] = add i64 [[TMP11]], [[OFF_MUL_9]]
; CHECK-NEXT: [[TMP37:%.*]] = getelementptr i64, ptr [[DST]], i64 [[TMP36]]
; CHECK-NEXT: [[TMP38:%.*]] = getelementptr double, ptr [[TMP37]], i32 0
; CHECK-NEXT: store <4 x double> zeroinitializer, ptr [[TMP38]], align 8
; CHECK-NEXT: [[TMP39:%.*]] = add i64 [[TMP11]], [[OFF_MUL_10]]
; CHECK-NEXT: [[TMP40:%.*]] = getelementptr i64, ptr [[DST]], i64 [[TMP39]]
; CHECK-NEXT: [[TMP41:%.*]] = getelementptr double, ptr [[TMP40]], i32 0
; CHECK-NEXT: store <4 x double> zeroinitializer, ptr [[TMP41]], align 8
; CHECK-NEXT: [[TMP42:%.*]] = add i64 [[TMP11]], [[OFF_MUL_11]]
; CHECK-NEXT: [[TMP43:%.*]] = getelementptr i64, ptr [[DST]], i64 [[TMP42]]
; CHECK-NEXT: [[TMP44:%.*]] = getelementptr double, ptr [[TMP43]], i32 0
; CHECK-NEXT: store <4 x double> zeroinitializer, ptr [[TMP44]], align 8
; CHECK-NEXT: [[TMP45:%.*]] = add i64 [[TMP11]], [[OFF_MUL_12]]
; CHECK-NEXT: [[TMP46:%.*]] = getelementptr i64, ptr [[DST]], i64 [[TMP45]]
; CHECK-NEXT: [[TMP47:%.*]] = getelementptr double, ptr [[TMP46]], i32 0
; CHECK-NEXT: store <4 x double> zeroinitializer, ptr [[TMP47]], align 8
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
; CHECK-NEXT: [[TMP48:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-NEXT: br i1 [[TMP48]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; CHECK: middle.block:
; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]]
; CHECK-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH]]
; CHECK: scalar.ph:
; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ], [ 0, [[VECTOR_MEMCHECK]] ]
; CHECK-NEXT: br label [[LOOP:%.*]]
; CHECK: loop:
; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ]
; CHECK-NEXT: [[IV_SUB_5:%.*]] = add nsw i64 [[IV]], -5
; CHECK-NEXT: [[IDX_1:%.*]] = add i64 [[IV_SUB_5]], [[OFF]]
; CHECK-NEXT: [[GEP_1:%.*]] = getelementptr i64, ptr [[DST]], i64 [[IDX_1]]
; CHECK-NEXT: store double 0.000000e+00, ptr [[GEP_1]], align 8
; CHECK-NEXT: [[IDX_2:%.*]] = add i64 [[IV_SUB_5]], [[OFF_MUL_2]]
; CHECK-NEXT: [[GEP_2:%.*]] = getelementptr i64, ptr [[DST]], i64 [[IDX_2]]
; CHECK-NEXT: store double 0.000000e+00, ptr [[GEP_2]], align 8
; CHECK-NEXT: [[IDX_3:%.*]] = add i64 [[IV_SUB_5]], [[OFF_MUL_3]]
; CHECK-NEXT: [[GEP_3:%.*]] = getelementptr i64, ptr [[DST]], i64 [[IDX_3]]
; CHECK-NEXT: store double 0.000000e+00, ptr [[GEP_3]], align 8
; CHECK-NEXT: [[IDX_4:%.*]] = add i64 [[IV_SUB_5]], [[OFF_MUL_4]]
; CHECK-NEXT: [[GEP_4:%.*]] = getelementptr i64, ptr [[DST]], i64 [[IDX_4]]
; CHECK-NEXT: store double 0.000000e+00, ptr [[GEP_4]], align 8
; CHECK-NEXT: [[IDX_5:%.*]] = add i64 [[IV_SUB_5]], [[OFF_MUL_5]]
; CHECK-NEXT: [[GEP_5:%.*]] = getelementptr i64, ptr [[DST]], i64 [[IDX_5]]
; CHECK-NEXT: store double 0.000000e+00, ptr [[GEP_5]], align 8
; CHECK-NEXT: [[IDX_6:%.*]] = add i64 [[IV_SUB_5]], [[OFF_MUL_6]]
; CHECK-NEXT: [[GEP_6:%.*]] = getelementptr i64, ptr [[DST]], i64 [[IDX_6]]
; CHECK-NEXT: store double 0.000000e+00, ptr [[GEP_6]], align 8
; CHECK-NEXT: [[IDX_7:%.*]] = add i64 [[IV_SUB_5]], [[OFF_MUL_7]]
; CHECK-NEXT: [[GEP_7:%.*]] = getelementptr i64, ptr [[DST]], i64 [[IDX_7]]
; CHECK-NEXT: store double 0.000000e+00, ptr [[GEP_7]], align 8
; CHECK-NEXT: [[IDX_8:%.*]] = add i64 [[IV_SUB_5]], [[OFF_MUL_8]]
; CHECK-NEXT: [[GEP_8:%.*]] = getelementptr i64, ptr [[DST]], i64 [[IDX_8]]
; CHECK-NEXT: store double 0.000000e+00, ptr [[GEP_8]], align 8
; CHECK-NEXT: [[IDX_9:%.*]] = add i64 [[IV_SUB_5]], [[OFF_MUL_9]]
; CHECK-NEXT: [[GEP_9:%.*]] = getelementptr i64, ptr [[DST]], i64 [[IDX_9]]
; CHECK-NEXT: store double 0.000000e+00, ptr [[GEP_9]], align 8
; CHECK-NEXT: [[IDX_10:%.*]] = add i64 [[IV_SUB_5]], [[OFF_MUL_10]]
; CHECK-NEXT: [[GEP_10:%.*]] = getelementptr i64, ptr [[DST]], i64 [[IDX_10]]
; CHECK-NEXT: store double 0.000000e+00, ptr [[GEP_10]], align 8
; CHECK-NEXT: [[IDX_11:%.*]] = add i64 [[IV_SUB_5]], [[OFF_MUL_11]]
; CHECK-NEXT: [[GEP_11:%.*]] = getelementptr i64, ptr [[DST]], i64 [[IDX_11]]
; CHECK-NEXT: store double 0.000000e+00, ptr [[GEP_11]], align 8
; CHECK-NEXT: [[IDX_12:%.*]] = add i64 [[IV_SUB_5]], [[OFF_MUL_12]]
; CHECK-NEXT: [[GEP_12:%.*]] = getelementptr i64, ptr [[DST]], i64 [[IDX_12]]
; CHECK-NEXT: store double 0.000000e+00, ptr [[GEP_12]], align 8
; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
; CHECK-NEXT: [[EC:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]]
; CHECK-NEXT: br i1 [[EC]], label [[EXIT]], label [[LOOP]], !llvm.loop [[LOOP3:![0-9]+]]
; CHECK: exit:
; CHECK-NEXT: ret void
;
entry:
%off.mul.2 = shl i64 %off, 1
%off.mul.3 = mul i64 %off, 3
%off.mul.4 = shl i64 %off, 2
%off.mul.5 = mul i64 %off, 5
%off.mul.6 = mul i64 %off, 6
%off.mul.7 = mul i64 %off, 7
%off.mul.8 = shl i64 %off, 3
%off.mul.9 = mul i64 %off, 9
%off.mul.10 = mul i64 %off, 10
%off.mul.11 = mul i64 %off, 11
%off.mul.12 = mul i64 %off, 12
br label %loop
loop:
%iv = phi i64 [ 0, %entry ], [ %iv.next, %loop ]
%iv.sub.5 = add nsw i64 %iv, -5
%idx.1 = add i64 %iv.sub.5, %off
%gep.1 = getelementptr i64, ptr %dst, i64 %idx.1
store double 0.000000e+00, ptr %gep.1, align 8
%idx.2 = add i64 %iv.sub.5, %off.mul.2
%gep.2 = getelementptr i64, ptr %dst, i64 %idx.2
store double 0.000000e+00, ptr %gep.2, align 8
%idx.3 = add i64 %iv.sub.5, %off.mul.3
%gep.3 = getelementptr i64, ptr %dst, i64 %idx.3
store double 0.000000e+00, ptr %gep.3, align 8
%idx.4 = add i64 %iv.sub.5, %off.mul.4
%gep.4 = getelementptr i64, ptr %dst, i64 %idx.4
store double 0.000000e+00, ptr %gep.4, align 8
%idx.5 = add i64 %iv.sub.5, %off.mul.5
%gep.5 = getelementptr i64, ptr %dst, i64 %idx.5
store double 0.000000e+00, ptr %gep.5, align 8
%idx.6 = add i64 %iv.sub.5, %off.mul.6
%gep.6 = getelementptr i64, ptr %dst, i64 %idx.6
store double 0.000000e+00, ptr %gep.6, align 8
%idx.7 = add i64 %iv.sub.5, %off.mul.7
%gep.7 = getelementptr i64, ptr %dst, i64 %idx.7
store double 0.000000e+00, ptr %gep.7, align 8
%idx.8 = add i64 %iv.sub.5, %off.mul.8
%gep.8 = getelementptr i64, ptr %dst, i64 %idx.8
store double 0.000000e+00, ptr %gep.8, align 8
%idx.9 = add i64 %iv.sub.5, %off.mul.9
%gep.9 = getelementptr i64, ptr %dst, i64 %idx.9
store double 0.000000e+00, ptr %gep.9, align 8
%idx.10 = add i64 %iv.sub.5, %off.mul.10
%gep.10 = getelementptr i64, ptr %dst, i64 %idx.10
store double 0.000000e+00, ptr %gep.10, align 8
%idx.11 = add i64 %iv.sub.5, %off.mul.11
%gep.11 = getelementptr i64, ptr %dst, i64 %idx.11
store double 0.000000e+00, ptr %gep.11, align 8
%idx.12 = add i64 %iv.sub.5, %off.mul.12
%gep.12 = getelementptr i64, ptr %dst, i64 %idx.12
store double 0.000000e+00, ptr %gep.12, align 8
%iv.next = add nuw nsw i64 %iv, 1
%ec = icmp eq i64 %iv.next, %N
br i1 %ec, label %exit, label %loop
exit:
ret void
}