Files
clang-p2996/llvm/test/Transforms/PhaseOrdering/AArch64/indvars-vectorization.ll
Florian Hahn dce77a3579 [IndVars] Preserve flags of narrow IV inc if replacing with wider inc. (#80446)
We are replacing a narrow IV increment with a wider one. If the original
(narrow) increment did not wrap, the wider one should not wrap either.
Set the flags to be the union of both wide increment and original
increment; this ensures we preserve flags SCEV could infer for the wider
increment.

Fixes https://github.com/llvm/llvm-project/issues/71517.
2024-02-10 18:11:17 +00:00

140 lines
7.7 KiB
LLVM

; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4
; RUN: opt -passes='default<O3>' -S -o - %s | FileCheck %s
target datalayout = "e-m:o-i64:64-i128:128-n32:64-S128"
target triple = "arm64-apple-macosx14.0.0"
define void @s172(i32 noundef %xa, i32 noundef %xb, ptr noundef %a, ptr noundef %b) {
; CHECK-LABEL: define void @s172(
; CHECK-SAME: i32 noundef [[XA:%.*]], i32 noundef [[XB:%.*]], ptr nocapture noundef [[A:%.*]], ptr nocapture noundef readonly [[B:%.*]]) local_unnamed_addr #[[ATTR0:[0-9]+]] {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[CMP1:%.*]] = icmp slt i32 [[XA]], 32001
; CHECK-NEXT: br i1 [[CMP1]], label [[FOR_BODY_PREHEADER:%.*]], label [[FOR_END:%.*]]
; CHECK: for.body.preheader:
; CHECK-NEXT: [[SUB:%.*]] = add i32 [[XA]], -1
; CHECK-NEXT: [[TMP0:%.*]] = sext i32 [[SUB]] to i64
; CHECK-NEXT: [[TMP1:%.*]] = sext i32 [[XB]] to i64
; CHECK-NEXT: [[TMP2:%.*]] = add nsw i64 [[TMP1]], [[TMP0]]
; CHECK-NEXT: [[SMAX7:%.*]] = tail call i64 @llvm.smax.i64(i64 [[TMP2]], i64 32000)
; CHECK-NEXT: [[TMP3:%.*]] = icmp slt i64 [[TMP2]], 32000
; CHECK-NEXT: [[UMIN8:%.*]] = zext i1 [[TMP3]] to i64
; CHECK-NEXT: [[TMP4:%.*]] = add nsw i64 [[TMP2]], [[UMIN8]]
; CHECK-NEXT: [[TMP5:%.*]] = sub i64 [[SMAX7]], [[TMP4]]
; CHECK-NEXT: [[UMAX9:%.*]] = tail call i64 @llvm.umax.i64(i64 [[TMP1]], i64 1)
; CHECK-NEXT: [[TMP6:%.*]] = udiv i64 [[TMP5]], [[UMAX9]]
; CHECK-NEXT: [[TMP7:%.*]] = add i64 [[TMP6]], [[UMIN8]]
; CHECK-NEXT: [[TMP8:%.*]] = add i64 [[TMP7]], 1
; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ugt i64 [[TMP8]], 23
; CHECK-NEXT: [[IDENT_CHECK_NOT:%.*]] = icmp eq i32 [[XB]], 1
; CHECK-NEXT: [[OR_COND:%.*]] = and i1 [[MIN_ITERS_CHECK]], [[IDENT_CHECK_NOT]]
; CHECK-NEXT: br i1 [[OR_COND]], label [[VECTOR_MEMCHECK:%.*]], label [[FOR_BODY_PREHEADER13:%.*]]
; CHECK: vector.memcheck:
; CHECK-NEXT: [[TMP9:%.*]] = shl nsw i64 [[TMP0]], 2
; CHECK-NEXT: [[SCEVGEP:%.*]] = getelementptr i8, ptr [[A]], i64 [[TMP9]]
; CHECK-NEXT: [[TMP10:%.*]] = add nsw i64 [[TMP1]], [[TMP0]]
; CHECK-NEXT: [[SMAX:%.*]] = tail call i64 @llvm.smax.i64(i64 [[TMP10]], i64 32000)
; CHECK-NEXT: [[TMP11:%.*]] = icmp slt i64 [[TMP10]], 32000
; CHECK-NEXT: [[UMIN:%.*]] = zext i1 [[TMP11]] to i64
; CHECK-NEXT: [[TMP12:%.*]] = add nsw i64 [[TMP10]], [[UMIN]]
; CHECK-NEXT: [[TMP13:%.*]] = sub i64 [[SMAX]], [[TMP12]]
; CHECK-NEXT: [[TMP14:%.*]] = add i64 [[TMP13]], [[UMIN]]
; CHECK-NEXT: [[TMP15:%.*]] = add i64 [[TMP14]], [[TMP0]]
; CHECK-NEXT: [[TMP16:%.*]] = shl i64 [[TMP15]], 2
; CHECK-NEXT: [[TMP17:%.*]] = add i64 [[TMP16]], 4
; CHECK-NEXT: [[SCEVGEP4:%.*]] = getelementptr i8, ptr [[A]], i64 [[TMP17]]
; CHECK-NEXT: [[SCEVGEP5:%.*]] = getelementptr i8, ptr [[B]], i64 [[TMP9]]
; CHECK-NEXT: [[SCEVGEP6:%.*]] = getelementptr i8, ptr [[B]], i64 [[TMP17]]
; CHECK-NEXT: [[BOUND0:%.*]] = icmp ult ptr [[SCEVGEP]], [[SCEVGEP6]]
; CHECK-NEXT: [[BOUND1:%.*]] = icmp ult ptr [[SCEVGEP5]], [[SCEVGEP4]]
; CHECK-NEXT: [[FOUND_CONFLICT:%.*]] = and i1 [[BOUND0]], [[BOUND1]]
; CHECK-NEXT: br i1 [[FOUND_CONFLICT]], label [[FOR_BODY_PREHEADER13]], label [[VECTOR_PH:%.*]]
; CHECK: vector.ph:
; CHECK-NEXT: [[N_VEC:%.*]] = and i64 [[TMP8]], -8
; CHECK-NEXT: [[TMP18:%.*]] = mul nuw i64 [[N_VEC]], [[TMP1]]
; CHECK-NEXT: [[IND_END:%.*]] = add i64 [[TMP18]], [[TMP0]]
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[TMP19:%.*]] = mul nuw i64 [[INDEX]], [[TMP1]]
; CHECK-NEXT: [[OFFSET_IDX:%.*]] = add i64 [[TMP19]], [[TMP0]]
; CHECK-NEXT: [[TMP20:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[OFFSET_IDX]]
; CHECK-NEXT: [[TMP21:%.*]] = getelementptr inbounds i8, ptr [[TMP20]], i64 16
; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i32>, ptr [[TMP20]], align 4, !alias.scope [[META0:![0-9]+]]
; CHECK-NEXT: [[WIDE_LOAD10:%.*]] = load <4 x i32>, ptr [[TMP21]], align 4, !alias.scope [[META0]]
; CHECK-NEXT: [[TMP22:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[OFFSET_IDX]]
; CHECK-NEXT: [[TMP23:%.*]] = getelementptr inbounds i8, ptr [[TMP22]], i64 16
; CHECK-NEXT: [[WIDE_LOAD11:%.*]] = load <4 x i32>, ptr [[TMP22]], align 4, !alias.scope [[META3:![0-9]+]], !noalias [[META0]]
; CHECK-NEXT: [[WIDE_LOAD12:%.*]] = load <4 x i32>, ptr [[TMP23]], align 4, !alias.scope [[META3]], !noalias [[META0]]
; CHECK-NEXT: [[TMP24:%.*]] = add nsw <4 x i32> [[WIDE_LOAD11]], [[WIDE_LOAD]]
; CHECK-NEXT: [[TMP25:%.*]] = add nsw <4 x i32> [[WIDE_LOAD12]], [[WIDE_LOAD10]]
; CHECK-NEXT: store <4 x i32> [[TMP24]], ptr [[TMP22]], align 4, !alias.scope [[META3]], !noalias [[META0]]
; CHECK-NEXT: store <4 x i32> [[TMP25]], ptr [[TMP23]], align 4, !alias.scope [[META3]], !noalias [[META0]]
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 8
; CHECK-NEXT: [[TMP26:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; CHECK-NEXT: br i1 [[TMP26]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]]
; CHECK: middle.block:
; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[TMP8]], [[N_VEC]]
; CHECK-NEXT: br i1 [[CMP_N]], label [[FOR_END]], label [[FOR_BODY_PREHEADER13]]
; CHECK: for.body.preheader13:
; CHECK-NEXT: [[INDVARS_IV_PH:%.*]] = phi i64 [ [[TMP0]], [[VECTOR_MEMCHECK]] ], [ [[TMP0]], [[FOR_BODY_PREHEADER]] ], [ [[IND_END]], [[MIDDLE_BLOCK]] ]
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK: for.body:
; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ], [ [[INDVARS_IV_PH]], [[FOR_BODY_PREHEADER13]] ]
; CHECK-NEXT: [[GEP_B:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[INDVARS_IV]]
; CHECK-NEXT: [[L_B:%.*]] = load i32, ptr [[GEP_B]], align 4
; CHECK-NEXT: [[GEP_A:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[INDVARS_IV]]
; CHECK-NEXT: [[L_A:%.*]] = load i32, ptr [[GEP_A]], align 4
; CHECK-NEXT: [[ADD:%.*]] = add nsw i32 [[L_A]], [[L_B]]
; CHECK-NEXT: store i32 [[ADD]], ptr [[GEP_A]], align 4
; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add nsw i64 [[INDVARS_IV]], [[TMP1]]
; CHECK-NEXT: [[CMP:%.*]] = icmp slt i64 [[INDVARS_IV_NEXT]], 32000
; CHECK-NEXT: br i1 [[CMP]], label [[FOR_BODY]], label [[FOR_END]], !llvm.loop [[LOOP9:![0-9]+]]
; CHECK: for.end:
; CHECK-NEXT: ret void
;
entry:
%sub = sub nsw i32 %xa, 1
br label %for.cond
for.cond:
%i.0 = phi i32 [ %sub, %entry ], [ %add3, %for.inc ]
%cmp = icmp slt i32 %i.0, 32000
br i1 %cmp, label %for.body, label %for.cond.cleanup
for.body:
%idxprom = sext i32 %i.0 to i64
%gep.b = getelementptr inbounds i32, ptr %b, i64 %idxprom
%l.b = load i32, ptr %gep.b, align 4
%idxprom1 = sext i32 %i.0 to i64
%gep.a = getelementptr inbounds i32, ptr %a, i64 %idxprom1
%l.a = load i32, ptr %gep.a , align 4
%add = add nsw i32 %l.a, %l.b
store i32 %add, ptr %gep.a, align 4
br label %for.inc
for.inc:
%add3 = add nsw i32 %i.0, %xb
br label %for.cond, !llvm.loop !0
for.cond.cleanup:
br label %for.end
for.end:
ret void
}
!0 = distinct !{!0, !1}
!1 = !{!"llvm.loop.mustprogress"}
;.
; CHECK: [[META0]] = !{[[META1:![0-9]+]]}
; CHECK: [[META1]] = distinct !{[[META1]], [[META2:![0-9]+]]}
; CHECK: [[META2]] = distinct !{[[META2]], !"LVerDomain"}
; CHECK: [[META3]] = !{[[META4:![0-9]+]]}
; CHECK: [[META4]] = distinct !{[[META4]], [[META2]]}
; CHECK: [[LOOP5]] = distinct !{[[LOOP5]], [[META6:![0-9]+]], [[META7:![0-9]+]], [[META8:![0-9]+]]}
; CHECK: [[META6]] = !{!"llvm.loop.mustprogress"}
; CHECK: [[META7]] = !{!"llvm.loop.isvectorized", i32 1}
; CHECK: [[META8]] = !{!"llvm.loop.unroll.runtime.disable"}
; CHECK: [[LOOP9]] = distinct !{[[LOOP9]], [[META6]], [[META7]]}
;.