Loop unrolling tends to produce chains of `%x1 = add %x0, 1; %x2 = add %x1, 1; ...` with one add per unrolled iteration. This patch simplifies these adds to `%xN = add %x0, N` directly during unrolling, rather than waiting for InstCombine to do so. The motivation for this is that having a single add (rather than an add chain) on the induction variable makes it a simple recurrence, which we specially recognize in a number of places. This allows InstCombine to directly perform folds with that knowledge, instead of first folding the add chains, and then doing other folds in another InstCombine iteration. Due to the reduced number of InstCombine iterations, this also results in a small compile-time improvement. Differential Revision: https://reviews.llvm.org/D153540
163 lines
11 KiB
LLVM
163 lines
11 KiB
LLVM
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
|
|
; RUN: opt %s -S -mtriple=riscv64 -passes=loop-unroll -mcpu=sifive-s76 | FileCheck %s
|
|
|
|
define dso_local void @saxpy(float %a, ptr %x, ptr %y) {
|
|
; CHECK-LABEL: @saxpy(
|
|
; CHECK-NEXT: entry:
|
|
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
|
|
; CHECK: for.body:
|
|
; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[INDVARS_IV_NEXT_15:%.*]], [[FOR_BODY]] ]
|
|
; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[X:%.*]], i64 [[INDVARS_IV]]
|
|
; CHECK-NEXT: [[TMP0:%.*]] = load float, ptr [[ARRAYIDX]], align 4
|
|
; CHECK-NEXT: [[MUL:%.*]] = fmul fast float [[TMP0]], [[A:%.*]]
|
|
; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds float, ptr [[Y:%.*]], i64 [[INDVARS_IV]]
|
|
; CHECK-NEXT: [[TMP1:%.*]] = load float, ptr [[ARRAYIDX2]], align 4
|
|
; CHECK-NEXT: [[ADD:%.*]] = fadd fast float [[MUL]], [[TMP1]]
|
|
; CHECK-NEXT: store float [[ADD]], ptr [[ARRAYIDX2]], align 4
|
|
; CHECK-NEXT: [[INDVARS_IV_NEXT:%.*]] = add nuw nsw i64 [[INDVARS_IV]], 1
|
|
; CHECK-NEXT: [[ARRAYIDX_1:%.*]] = getelementptr inbounds float, ptr [[X]], i64 [[INDVARS_IV_NEXT]]
|
|
; CHECK-NEXT: [[TMP2:%.*]] = load float, ptr [[ARRAYIDX_1]], align 4
|
|
; CHECK-NEXT: [[MUL_1:%.*]] = fmul fast float [[TMP2]], [[A]]
|
|
; CHECK-NEXT: [[ARRAYIDX2_1:%.*]] = getelementptr inbounds float, ptr [[Y]], i64 [[INDVARS_IV_NEXT]]
|
|
; CHECK-NEXT: [[TMP3:%.*]] = load float, ptr [[ARRAYIDX2_1]], align 4
|
|
; CHECK-NEXT: [[ADD_1:%.*]] = fadd fast float [[MUL_1]], [[TMP3]]
|
|
; CHECK-NEXT: store float [[ADD_1]], ptr [[ARRAYIDX2_1]], align 4
|
|
; CHECK-NEXT: [[INDVARS_IV_NEXT_1:%.*]] = add nuw nsw i64 [[INDVARS_IV]], 2
|
|
; CHECK-NEXT: [[ARRAYIDX_2:%.*]] = getelementptr inbounds float, ptr [[X]], i64 [[INDVARS_IV_NEXT_1]]
|
|
; CHECK-NEXT: [[TMP4:%.*]] = load float, ptr [[ARRAYIDX_2]], align 4
|
|
; CHECK-NEXT: [[MUL_2:%.*]] = fmul fast float [[TMP4]], [[A]]
|
|
; CHECK-NEXT: [[ARRAYIDX2_2:%.*]] = getelementptr inbounds float, ptr [[Y]], i64 [[INDVARS_IV_NEXT_1]]
|
|
; CHECK-NEXT: [[TMP5:%.*]] = load float, ptr [[ARRAYIDX2_2]], align 4
|
|
; CHECK-NEXT: [[ADD_2:%.*]] = fadd fast float [[MUL_2]], [[TMP5]]
|
|
; CHECK-NEXT: store float [[ADD_2]], ptr [[ARRAYIDX2_2]], align 4
|
|
; CHECK-NEXT: [[INDVARS_IV_NEXT_2:%.*]] = add nuw nsw i64 [[INDVARS_IV]], 3
|
|
; CHECK-NEXT: [[ARRAYIDX_3:%.*]] = getelementptr inbounds float, ptr [[X]], i64 [[INDVARS_IV_NEXT_2]]
|
|
; CHECK-NEXT: [[TMP6:%.*]] = load float, ptr [[ARRAYIDX_3]], align 4
|
|
; CHECK-NEXT: [[MUL_3:%.*]] = fmul fast float [[TMP6]], [[A]]
|
|
; CHECK-NEXT: [[ARRAYIDX2_3:%.*]] = getelementptr inbounds float, ptr [[Y]], i64 [[INDVARS_IV_NEXT_2]]
|
|
; CHECK-NEXT: [[TMP7:%.*]] = load float, ptr [[ARRAYIDX2_3]], align 4
|
|
; CHECK-NEXT: [[ADD_3:%.*]] = fadd fast float [[MUL_3]], [[TMP7]]
|
|
; CHECK-NEXT: store float [[ADD_3]], ptr [[ARRAYIDX2_3]], align 4
|
|
; CHECK-NEXT: [[INDVARS_IV_NEXT_3:%.*]] = add nuw nsw i64 [[INDVARS_IV]], 4
|
|
; CHECK-NEXT: [[ARRAYIDX_4:%.*]] = getelementptr inbounds float, ptr [[X]], i64 [[INDVARS_IV_NEXT_3]]
|
|
; CHECK-NEXT: [[TMP8:%.*]] = load float, ptr [[ARRAYIDX_4]], align 4
|
|
; CHECK-NEXT: [[MUL_4:%.*]] = fmul fast float [[TMP8]], [[A]]
|
|
; CHECK-NEXT: [[ARRAYIDX2_4:%.*]] = getelementptr inbounds float, ptr [[Y]], i64 [[INDVARS_IV_NEXT_3]]
|
|
; CHECK-NEXT: [[TMP9:%.*]] = load float, ptr [[ARRAYIDX2_4]], align 4
|
|
; CHECK-NEXT: [[ADD_4:%.*]] = fadd fast float [[MUL_4]], [[TMP9]]
|
|
; CHECK-NEXT: store float [[ADD_4]], ptr [[ARRAYIDX2_4]], align 4
|
|
; CHECK-NEXT: [[INDVARS_IV_NEXT_4:%.*]] = add nuw nsw i64 [[INDVARS_IV]], 5
|
|
; CHECK-NEXT: [[ARRAYIDX_5:%.*]] = getelementptr inbounds float, ptr [[X]], i64 [[INDVARS_IV_NEXT_4]]
|
|
; CHECK-NEXT: [[TMP10:%.*]] = load float, ptr [[ARRAYIDX_5]], align 4
|
|
; CHECK-NEXT: [[MUL_5:%.*]] = fmul fast float [[TMP10]], [[A]]
|
|
; CHECK-NEXT: [[ARRAYIDX2_5:%.*]] = getelementptr inbounds float, ptr [[Y]], i64 [[INDVARS_IV_NEXT_4]]
|
|
; CHECK-NEXT: [[TMP11:%.*]] = load float, ptr [[ARRAYIDX2_5]], align 4
|
|
; CHECK-NEXT: [[ADD_5:%.*]] = fadd fast float [[MUL_5]], [[TMP11]]
|
|
; CHECK-NEXT: store float [[ADD_5]], ptr [[ARRAYIDX2_5]], align 4
|
|
; CHECK-NEXT: [[INDVARS_IV_NEXT_5:%.*]] = add nuw nsw i64 [[INDVARS_IV]], 6
|
|
; CHECK-NEXT: [[ARRAYIDX_6:%.*]] = getelementptr inbounds float, ptr [[X]], i64 [[INDVARS_IV_NEXT_5]]
|
|
; CHECK-NEXT: [[TMP12:%.*]] = load float, ptr [[ARRAYIDX_6]], align 4
|
|
; CHECK-NEXT: [[MUL_6:%.*]] = fmul fast float [[TMP12]], [[A]]
|
|
; CHECK-NEXT: [[ARRAYIDX2_6:%.*]] = getelementptr inbounds float, ptr [[Y]], i64 [[INDVARS_IV_NEXT_5]]
|
|
; CHECK-NEXT: [[TMP13:%.*]] = load float, ptr [[ARRAYIDX2_6]], align 4
|
|
; CHECK-NEXT: [[ADD_6:%.*]] = fadd fast float [[MUL_6]], [[TMP13]]
|
|
; CHECK-NEXT: store float [[ADD_6]], ptr [[ARRAYIDX2_6]], align 4
|
|
; CHECK-NEXT: [[INDVARS_IV_NEXT_6:%.*]] = add nuw nsw i64 [[INDVARS_IV]], 7
|
|
; CHECK-NEXT: [[ARRAYIDX_7:%.*]] = getelementptr inbounds float, ptr [[X]], i64 [[INDVARS_IV_NEXT_6]]
|
|
; CHECK-NEXT: [[TMP14:%.*]] = load float, ptr [[ARRAYIDX_7]], align 4
|
|
; CHECK-NEXT: [[MUL_7:%.*]] = fmul fast float [[TMP14]], [[A]]
|
|
; CHECK-NEXT: [[ARRAYIDX2_7:%.*]] = getelementptr inbounds float, ptr [[Y]], i64 [[INDVARS_IV_NEXT_6]]
|
|
; CHECK-NEXT: [[TMP15:%.*]] = load float, ptr [[ARRAYIDX2_7]], align 4
|
|
; CHECK-NEXT: [[ADD_7:%.*]] = fadd fast float [[MUL_7]], [[TMP15]]
|
|
; CHECK-NEXT: store float [[ADD_7]], ptr [[ARRAYIDX2_7]], align 4
|
|
; CHECK-NEXT: [[INDVARS_IV_NEXT_7:%.*]] = add nuw nsw i64 [[INDVARS_IV]], 8
|
|
; CHECK-NEXT: [[ARRAYIDX_8:%.*]] = getelementptr inbounds float, ptr [[X]], i64 [[INDVARS_IV_NEXT_7]]
|
|
; CHECK-NEXT: [[TMP16:%.*]] = load float, ptr [[ARRAYIDX_8]], align 4
|
|
; CHECK-NEXT: [[MUL_8:%.*]] = fmul fast float [[TMP16]], [[A]]
|
|
; CHECK-NEXT: [[ARRAYIDX2_8:%.*]] = getelementptr inbounds float, ptr [[Y]], i64 [[INDVARS_IV_NEXT_7]]
|
|
; CHECK-NEXT: [[TMP17:%.*]] = load float, ptr [[ARRAYIDX2_8]], align 4
|
|
; CHECK-NEXT: [[ADD_8:%.*]] = fadd fast float [[MUL_8]], [[TMP17]]
|
|
; CHECK-NEXT: store float [[ADD_8]], ptr [[ARRAYIDX2_8]], align 4
|
|
; CHECK-NEXT: [[INDVARS_IV_NEXT_8:%.*]] = add nuw nsw i64 [[INDVARS_IV]], 9
|
|
; CHECK-NEXT: [[ARRAYIDX_9:%.*]] = getelementptr inbounds float, ptr [[X]], i64 [[INDVARS_IV_NEXT_8]]
|
|
; CHECK-NEXT: [[TMP18:%.*]] = load float, ptr [[ARRAYIDX_9]], align 4
|
|
; CHECK-NEXT: [[MUL_9:%.*]] = fmul fast float [[TMP18]], [[A]]
|
|
; CHECK-NEXT: [[ARRAYIDX2_9:%.*]] = getelementptr inbounds float, ptr [[Y]], i64 [[INDVARS_IV_NEXT_8]]
|
|
; CHECK-NEXT: [[TMP19:%.*]] = load float, ptr [[ARRAYIDX2_9]], align 4
|
|
; CHECK-NEXT: [[ADD_9:%.*]] = fadd fast float [[MUL_9]], [[TMP19]]
|
|
; CHECK-NEXT: store float [[ADD_9]], ptr [[ARRAYIDX2_9]], align 4
|
|
; CHECK-NEXT: [[INDVARS_IV_NEXT_9:%.*]] = add nuw nsw i64 [[INDVARS_IV]], 10
|
|
; CHECK-NEXT: [[ARRAYIDX_10:%.*]] = getelementptr inbounds float, ptr [[X]], i64 [[INDVARS_IV_NEXT_9]]
|
|
; CHECK-NEXT: [[TMP20:%.*]] = load float, ptr [[ARRAYIDX_10]], align 4
|
|
; CHECK-NEXT: [[MUL_10:%.*]] = fmul fast float [[TMP20]], [[A]]
|
|
; CHECK-NEXT: [[ARRAYIDX2_10:%.*]] = getelementptr inbounds float, ptr [[Y]], i64 [[INDVARS_IV_NEXT_9]]
|
|
; CHECK-NEXT: [[TMP21:%.*]] = load float, ptr [[ARRAYIDX2_10]], align 4
|
|
; CHECK-NEXT: [[ADD_10:%.*]] = fadd fast float [[MUL_10]], [[TMP21]]
|
|
; CHECK-NEXT: store float [[ADD_10]], ptr [[ARRAYIDX2_10]], align 4
|
|
; CHECK-NEXT: [[INDVARS_IV_NEXT_10:%.*]] = add nuw nsw i64 [[INDVARS_IV]], 11
|
|
; CHECK-NEXT: [[ARRAYIDX_11:%.*]] = getelementptr inbounds float, ptr [[X]], i64 [[INDVARS_IV_NEXT_10]]
|
|
; CHECK-NEXT: [[TMP22:%.*]] = load float, ptr [[ARRAYIDX_11]], align 4
|
|
; CHECK-NEXT: [[MUL_11:%.*]] = fmul fast float [[TMP22]], [[A]]
|
|
; CHECK-NEXT: [[ARRAYIDX2_11:%.*]] = getelementptr inbounds float, ptr [[Y]], i64 [[INDVARS_IV_NEXT_10]]
|
|
; CHECK-NEXT: [[TMP23:%.*]] = load float, ptr [[ARRAYIDX2_11]], align 4
|
|
; CHECK-NEXT: [[ADD_11:%.*]] = fadd fast float [[MUL_11]], [[TMP23]]
|
|
; CHECK-NEXT: store float [[ADD_11]], ptr [[ARRAYIDX2_11]], align 4
|
|
; CHECK-NEXT: [[INDVARS_IV_NEXT_11:%.*]] = add nuw nsw i64 [[INDVARS_IV]], 12
|
|
; CHECK-NEXT: [[ARRAYIDX_12:%.*]] = getelementptr inbounds float, ptr [[X]], i64 [[INDVARS_IV_NEXT_11]]
|
|
; CHECK-NEXT: [[TMP24:%.*]] = load float, ptr [[ARRAYIDX_12]], align 4
|
|
; CHECK-NEXT: [[MUL_12:%.*]] = fmul fast float [[TMP24]], [[A]]
|
|
; CHECK-NEXT: [[ARRAYIDX2_12:%.*]] = getelementptr inbounds float, ptr [[Y]], i64 [[INDVARS_IV_NEXT_11]]
|
|
; CHECK-NEXT: [[TMP25:%.*]] = load float, ptr [[ARRAYIDX2_12]], align 4
|
|
; CHECK-NEXT: [[ADD_12:%.*]] = fadd fast float [[MUL_12]], [[TMP25]]
|
|
; CHECK-NEXT: store float [[ADD_12]], ptr [[ARRAYIDX2_12]], align 4
|
|
; CHECK-NEXT: [[INDVARS_IV_NEXT_12:%.*]] = add nuw nsw i64 [[INDVARS_IV]], 13
|
|
; CHECK-NEXT: [[ARRAYIDX_13:%.*]] = getelementptr inbounds float, ptr [[X]], i64 [[INDVARS_IV_NEXT_12]]
|
|
; CHECK-NEXT: [[TMP26:%.*]] = load float, ptr [[ARRAYIDX_13]], align 4
|
|
; CHECK-NEXT: [[MUL_13:%.*]] = fmul fast float [[TMP26]], [[A]]
|
|
; CHECK-NEXT: [[ARRAYIDX2_13:%.*]] = getelementptr inbounds float, ptr [[Y]], i64 [[INDVARS_IV_NEXT_12]]
|
|
; CHECK-NEXT: [[TMP27:%.*]] = load float, ptr [[ARRAYIDX2_13]], align 4
|
|
; CHECK-NEXT: [[ADD_13:%.*]] = fadd fast float [[MUL_13]], [[TMP27]]
|
|
; CHECK-NEXT: store float [[ADD_13]], ptr [[ARRAYIDX2_13]], align 4
|
|
; CHECK-NEXT: [[INDVARS_IV_NEXT_13:%.*]] = add nuw nsw i64 [[INDVARS_IV]], 14
|
|
; CHECK-NEXT: [[ARRAYIDX_14:%.*]] = getelementptr inbounds float, ptr [[X]], i64 [[INDVARS_IV_NEXT_13]]
|
|
; CHECK-NEXT: [[TMP28:%.*]] = load float, ptr [[ARRAYIDX_14]], align 4
|
|
; CHECK-NEXT: [[MUL_14:%.*]] = fmul fast float [[TMP28]], [[A]]
|
|
; CHECK-NEXT: [[ARRAYIDX2_14:%.*]] = getelementptr inbounds float, ptr [[Y]], i64 [[INDVARS_IV_NEXT_13]]
|
|
; CHECK-NEXT: [[TMP29:%.*]] = load float, ptr [[ARRAYIDX2_14]], align 4
|
|
; CHECK-NEXT: [[ADD_14:%.*]] = fadd fast float [[MUL_14]], [[TMP29]]
|
|
; CHECK-NEXT: store float [[ADD_14]], ptr [[ARRAYIDX2_14]], align 4
|
|
; CHECK-NEXT: [[INDVARS_IV_NEXT_14:%.*]] = add nuw nsw i64 [[INDVARS_IV]], 15
|
|
; CHECK-NEXT: [[ARRAYIDX_15:%.*]] = getelementptr inbounds float, ptr [[X]], i64 [[INDVARS_IV_NEXT_14]]
|
|
; CHECK-NEXT: [[TMP30:%.*]] = load float, ptr [[ARRAYIDX_15]], align 4
|
|
; CHECK-NEXT: [[MUL_15:%.*]] = fmul fast float [[TMP30]], [[A]]
|
|
; CHECK-NEXT: [[ARRAYIDX2_15:%.*]] = getelementptr inbounds float, ptr [[Y]], i64 [[INDVARS_IV_NEXT_14]]
|
|
; CHECK-NEXT: [[TMP31:%.*]] = load float, ptr [[ARRAYIDX2_15]], align 4
|
|
; CHECK-NEXT: [[ADD_15:%.*]] = fadd fast float [[MUL_15]], [[TMP31]]
|
|
; CHECK-NEXT: store float [[ADD_15]], ptr [[ARRAYIDX2_15]], align 4
|
|
; CHECK-NEXT: [[INDVARS_IV_NEXT_15]] = add nuw nsw i64 [[INDVARS_IV]], 16
|
|
; CHECK-NEXT: [[EXITCOND_NOT_15:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT_15]], 64
|
|
; CHECK-NEXT: br i1 [[EXITCOND_NOT_15]], label [[EXIT_LOOP:%.*]], label [[FOR_BODY]]
|
|
; CHECK: exit_loop:
|
|
; CHECK-NEXT: ret void
|
|
;
|
|
entry:
|
|
br label %for.body
|
|
|
|
for.body:
|
|
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
|
|
%arrayidx = getelementptr inbounds float, ptr %x, i64 %indvars.iv
|
|
%0 = load float, ptr %arrayidx, align 4
|
|
%mul = fmul fast float %0, %a
|
|
%arrayidx2 = getelementptr inbounds float, ptr %y, i64 %indvars.iv
|
|
%1 = load float, ptr %arrayidx2, align 4
|
|
%add = fadd fast float %mul, %1
|
|
store float %add, ptr %arrayidx2, align 4
|
|
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
|
|
%exitcond.not = icmp eq i64 %indvars.iv.next, 64
|
|
br i1 %exitcond.not, label %exit_loop, label %for.body
|
|
|
|
exit_loop:
|
|
ret void
|
|
}
|
|
|