Files
clang-p2996/llvm/test/Transforms/LoopStrengthReduce/AArch64/lsr-reuse.ll
Graham Hunter e16f2f5d24 [AArch64] Override isLSRCostLess, take number of instructions into account (#84189)
Adds an AArch64-specific version of isLSRCostLess, changing the relative
importance of the various terms from the formulae being evaluated.

This has been split out from my vscale-aware LSR work, see the RFC for
reference:
https://discourse.llvm.org/t/rfc-vscale-aware-loopstrengthreduce/77131
2024-06-06 14:45:36 +01:00

37 lines
1.1 KiB
LLVM

; RUN: llc -mtriple=arm64-unknown-unknown -print-lsr-output < %s 2>&1 | FileCheck %s
declare void @foo(i64)
; Verify that redundant adds or geps aren't inserted by LSR.
; CHECK-LABEL: @bar(
define void @bar(ptr %A) {
entry:
br label %while.cond
while.cond:
; CHECK-LABEL: while.cond:
; CHECK-NOT: add i64 %lsr.iv, 1
; CHECK-LABEL: land.rhs:
; CHECK: getelementptr i8, ptr %lsr.iv, i64 -8
; CHECK-NOT: getelementptr i8, ptr %lsr.iv, i64 -8
; CHECK-NOT: add i64, %lsr.iv, 1
%indvars.iv28 = phi i64 [ %indvars.iv.next29, %land.rhs ], [ 50, %entry ]
%cmp = icmp sgt i64 %indvars.iv28, 0
br i1 %cmp, label %land.rhs, label %while.end
land.rhs:
%indvars.iv.next29 = add nsw i64 %indvars.iv28, -1
%arrayidx = getelementptr inbounds double, ptr %A, i64 %indvars.iv.next29
%Aload = load double, ptr %arrayidx, align 8
%cmp1 = fcmp oeq double %Aload, 0.000000e+00
br i1 %cmp1, label %while.cond, label %if.end
while.end:
%indvars.iv28.lcssa = phi i64 [ %indvars.iv28, %while.cond ]
tail call void @foo(i64 %indvars.iv28.lcssa)
br label %if.end
if.end:
ret void
}