The patch attempts to optimize a sequence of SIMD loads from the same
base pointer:
%0 = gep float*, float* base, i32 4
%1 = bitcast float* %0 to <4 x float>*
%2 = load <4 x float>, <4 x float>* %1
...
%n1 = gep float*, float* base, i32 N
%n2 = bitcast float* %n1 to <4 x float>*
%n3 = load <4 x float>, <4 x float>* %n2
For AArch64 the compiler generates a sequence of LDR Qt, [Xn, #16].
However, 32-bit NEON VLD1/VST1 lack the [Wn, #imm] addressing mode, so
the address is computed before every ld/st instruction:
add r2, r0, #32
add r0, r0, #16
vld1.32 {d18, d19}, [r2]
vld1.32 {d22, d23}, [r0]
This can be improved by computing address for the first load, and then
using a post-indexed form of VLD1/VST1 to load the rest:
add r0, r0, #16
vld1.32 {d18, d19}, [r0]!
vld1.32 {d22, d23}, [r0]
In order to do that, the patch adds more patterns to DAGCombine:
- (load (add ptr inc1)) and (add ptr inc2) are now folded if inc1
and inc2 are constants.
- (or ptr inc) is now recognized as a pointer increment if ptr is
sufficiently aligned.
In addition to that, we now search for all possible base updates and
then pick the best one.
Differential Revision: https://reviews.llvm.org/D108988
227 lines
7.4 KiB
LLVM
227 lines
7.4 KiB
LLVM
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
|
|
; RUN: llc -mtriple=armv8a -mattr=+armv8.2-a,+fullfp16,+neon -target-abi=aapcs-gnu -float-abi=soft -o - %s | FileCheck %s --check-prefix=SOFT
|
|
; RUN: llc -mtriple=armv8a -mattr=+armv8.2-a,+fullfp16,+neon -target-abi=aapcs-gnu -float-abi=hard -o - %s | FileCheck %s --check-prefix=HARD
|
|
; RUN: llc -mtriple=armeb-eabi -mattr=+armv8.2-a,+fullfp16,+neon -target-abi=aapcs-gnu -float-abi=soft -o - %s | FileCheck %s --check-prefix=SOFTEB
|
|
; RUN: llc -mtriple=armeb-eabi -mattr=+armv8.2-a,+fullfp16,+neon -target-abi=aapcs-gnu -float-abi=hard -o - %s | FileCheck %s --check-prefix=HARDEB
|
|
|
|
declare <4 x half> @llvm.fabs.v4f16(<4 x half>)
|
|
declare <8 x half> @llvm.fabs.v8f16(<8 x half>)
|
|
declare void @use(double, float, <4 x half>, i16, <8 x half>)
|
|
define <4 x half> @test_vabs_f16(<4 x half> %a) {
|
|
; SOFT-LABEL: test_vabs_f16:
|
|
; SOFT: @ %bb.0: @ %entry
|
|
; SOFT-NEXT: vmov d16, r0, r1
|
|
; SOFT-NEXT: vabs.f16 d16, d16
|
|
; SOFT-NEXT: vmov r0, r1, d16
|
|
; SOFT-NEXT: bx lr
|
|
;
|
|
; HARD-LABEL: test_vabs_f16:
|
|
; HARD: @ %bb.0: @ %entry
|
|
; HARD-NEXT: vabs.f16 d0, d0
|
|
; HARD-NEXT: bx lr
|
|
;
|
|
; SOFTEB-LABEL: test_vabs_f16:
|
|
; SOFTEB: @ %bb.0: @ %entry
|
|
; SOFTEB-NEXT: vmov d16, r1, r0
|
|
; SOFTEB-NEXT: vrev64.16 d16, d16
|
|
; SOFTEB-NEXT: vabs.f16 d16, d16
|
|
; SOFTEB-NEXT: vrev64.16 d16, d16
|
|
; SOFTEB-NEXT: vmov r1, r0, d16
|
|
; SOFTEB-NEXT: bx lr
|
|
;
|
|
; HARDEB-LABEL: test_vabs_f16:
|
|
; HARDEB: @ %bb.0: @ %entry
|
|
; HARDEB-NEXT: vrev64.16 d16, d0
|
|
; HARDEB-NEXT: vabs.f16 d16, d16
|
|
; HARDEB-NEXT: vrev64.16 d0, d16
|
|
; HARDEB-NEXT: bx lr
|
|
entry:
|
|
%vabs1.i = tail call <4 x half> @llvm.fabs.v4f16(<4 x half> %a)
|
|
ret <4 x half> %vabs1.i
|
|
}
|
|
|
|
|
|
define <8 x half> @test2_vabs_f16(<8 x half> %a) {
|
|
; SOFT-LABEL: test2_vabs_f16:
|
|
; SOFT: @ %bb.0: @ %entry
|
|
; SOFT-NEXT: vmov d17, r2, r3
|
|
; SOFT-NEXT: vmov d16, r0, r1
|
|
; SOFT-NEXT: vabs.f16 q8, q8
|
|
; SOFT-NEXT: vmov r0, r1, d16
|
|
; SOFT-NEXT: vmov r2, r3, d17
|
|
; SOFT-NEXT: bx lr
|
|
;
|
|
; HARD-LABEL: test2_vabs_f16:
|
|
; HARD: @ %bb.0: @ %entry
|
|
; HARD-NEXT: vabs.f16 q0, q0
|
|
; HARD-NEXT: bx lr
|
|
;
|
|
; SOFTEB-LABEL: test2_vabs_f16:
|
|
; SOFTEB: @ %bb.0: @ %entry
|
|
; SOFTEB-NEXT: vmov d17, r3, r2
|
|
; SOFTEB-NEXT: vmov d16, r1, r0
|
|
; SOFTEB-NEXT: vrev64.16 q8, q8
|
|
; SOFTEB-NEXT: vabs.f16 q8, q8
|
|
; SOFTEB-NEXT: vrev64.16 q8, q8
|
|
; SOFTEB-NEXT: vmov r1, r0, d16
|
|
; SOFTEB-NEXT: vmov r3, r2, d17
|
|
; SOFTEB-NEXT: bx lr
|
|
;
|
|
; HARDEB-LABEL: test2_vabs_f16:
|
|
; HARDEB: @ %bb.0: @ %entry
|
|
; HARDEB-NEXT: vrev64.16 q8, q0
|
|
; HARDEB-NEXT: vabs.f16 q8, q8
|
|
; HARDEB-NEXT: vrev64.16 q0, q8
|
|
; HARDEB-NEXT: bx lr
|
|
entry:
|
|
%vabs1.i = tail call <8 x half> @llvm.fabs.v8f16(<8 x half> %a)
|
|
ret <8 x half> %vabs1.i
|
|
}
|
|
|
|
define void @test(double, float, i16, <4 x half>, <8 x half>) {
|
|
; SOFT-LABEL: test:
|
|
; SOFT: @ %bb.0: @ %entry
|
|
; SOFT-NEXT: push {r11, lr}
|
|
; SOFT-NEXT: sub sp, sp, #32
|
|
; SOFT-NEXT: add r12, sp, #48
|
|
; SOFT-NEXT: vld1.64 {d16, d17}, [r12]
|
|
; SOFT-NEXT: add r12, sp, #16
|
|
; SOFT-NEXT: vabs.f16 q8, q8
|
|
; SOFT-NEXT: vst1.64 {d16, d17}, [r12]
|
|
; SOFT-NEXT: mov r12, sp
|
|
; SOFT-NEXT: vldr d16, [sp, #40]
|
|
; SOFT-NEXT: vabs.f16 d16, d16
|
|
; SOFT-NEXT: vst1.16 {d16}, [r12:64]!
|
|
; SOFT-NEXT: str r3, [r12]
|
|
; SOFT-NEXT: bl use
|
|
; SOFT-NEXT: add sp, sp, #32
|
|
; SOFT-NEXT: pop {r11, pc}
|
|
;
|
|
; HARD-LABEL: test:
|
|
; HARD: @ %bb.0: @ %entry
|
|
; HARD-NEXT: vabs.f16 q2, q2
|
|
; HARD-NEXT: vabs.f16 d2, d2
|
|
; HARD-NEXT: b use
|
|
;
|
|
; SOFTEB-LABEL: test:
|
|
; SOFTEB: @ %bb.0: @ %entry
|
|
; SOFTEB-NEXT: .save {r4, lr}
|
|
; SOFTEB-NEXT: push {r4, lr}
|
|
; SOFTEB-NEXT: .pad #32
|
|
; SOFTEB-NEXT: sub sp, sp, #32
|
|
; SOFTEB-NEXT: vldr d16, [sp, #40]
|
|
; SOFTEB-NEXT: mov lr, sp
|
|
; SOFTEB-NEXT: add r4, sp, #48
|
|
; SOFTEB-NEXT: add r12, sp, #16
|
|
; SOFTEB-NEXT: vrev64.16 d16, d16
|
|
; SOFTEB-NEXT: vabs.f16 d16, d16
|
|
; SOFTEB-NEXT: vst1.16 {d16}, [lr:64]!
|
|
; SOFTEB-NEXT: vld1.64 {d16, d17}, [r4]
|
|
; SOFTEB-NEXT: vrev64.16 q8, q8
|
|
; SOFTEB-NEXT: str r3, [lr]
|
|
; SOFTEB-NEXT: vabs.f16 q8, q8
|
|
; SOFTEB-NEXT: vrev64.16 q8, q8
|
|
; SOFTEB-NEXT: vst1.64 {d16, d17}, [r12]
|
|
; SOFTEB-NEXT: bl use
|
|
; SOFTEB-NEXT: add sp, sp, #32
|
|
; SOFTEB-NEXT: pop {r4, pc}
|
|
;
|
|
; HARDEB-LABEL: test:
|
|
; HARDEB: @ %bb.0: @ %entry
|
|
; HARDEB-NEXT: vrev64.16 d16, d2
|
|
; HARDEB-NEXT: vabs.f16 d16, d16
|
|
; HARDEB-NEXT: vrev64.16 d2, d16
|
|
; HARDEB-NEXT: vrev64.16 q8, q2
|
|
; HARDEB-NEXT: vabs.f16 q8, q8
|
|
; HARDEB-NEXT: vrev64.16 q2, q8
|
|
; HARDEB-NEXT: b use
|
|
entry:
|
|
%5 = tail call <4 x half> @llvm.fabs.v4f16(<4 x half> %3)
|
|
%6 = tail call <8 x half> @llvm.fabs.v8f16(<8 x half> %4)
|
|
tail call void @use(double %0, float %1, <4 x half> %5, i16 %2, <8 x half> %6)
|
|
ret void
|
|
}
|
|
|
|
define void @many_args_test(double, float, i16, <4 x half>, <8 x half>, <8 x half>, <8 x half>) {
|
|
; SOFT-LABEL: many_args_test:
|
|
; SOFT: @ %bb.0: @ %entry
|
|
; SOFT-NEXT: push {r11, lr}
|
|
; SOFT-NEXT: sub sp, sp, #32
|
|
; SOFT-NEXT: add r12, sp, #80
|
|
; SOFT-NEXT: vld1.64 {d16, d17}, [r12]
|
|
; SOFT-NEXT: add r12, sp, #48
|
|
; SOFT-NEXT: vabs.f16 q8, q8
|
|
; SOFT-NEXT: vld1.64 {d18, d19}, [r12]
|
|
; SOFT-NEXT: add r12, sp, #64
|
|
; SOFT-NEXT: vadd.f16 q8, q8, q9
|
|
; SOFT-NEXT: vld1.64 {d18, d19}, [r12]
|
|
; SOFT-NEXT: add r12, sp, #16
|
|
; SOFT-NEXT: vmul.f16 q8, q9, q8
|
|
; SOFT-NEXT: vst1.64 {d16, d17}, [r12]
|
|
; SOFT-NEXT: mov r12, sp
|
|
; SOFT-NEXT: vldr d16, [sp, #40]
|
|
; SOFT-NEXT: vst1.16 {d16}, [r12:64]!
|
|
; SOFT-NEXT: str r3, [r12]
|
|
; SOFT-NEXT: bl use
|
|
; SOFT-NEXT: add sp, sp, #32
|
|
; SOFT-NEXT: pop {r11, pc}
|
|
;
|
|
; HARD-LABEL: many_args_test:
|
|
; HARD: @ %bb.0: @ %entry
|
|
; HARD-NEXT: mov r1, sp
|
|
; HARD-NEXT: vld1.64 {d16, d17}, [r1]
|
|
; HARD-NEXT: vabs.f16 q8, q8
|
|
; HARD-NEXT: vadd.f16 q8, q8, q2
|
|
; HARD-NEXT: vmul.f16 q2, q3, q8
|
|
; HARD-NEXT: b use
|
|
;
|
|
; SOFTEB-LABEL: many_args_test:
|
|
; SOFTEB: @ %bb.0: @ %entry
|
|
; SOFTEB-NEXT: .save {r11, lr}
|
|
; SOFTEB-NEXT: push {r11, lr}
|
|
; SOFTEB-NEXT: .pad #32
|
|
; SOFTEB-NEXT: sub sp, sp, #32
|
|
; SOFTEB-NEXT: add r12, sp, #80
|
|
; SOFTEB-NEXT: mov lr, sp
|
|
; SOFTEB-NEXT: vld1.64 {d16, d17}, [r12]
|
|
; SOFTEB-NEXT: add r12, sp, #48
|
|
; SOFTEB-NEXT: vrev64.16 q8, q8
|
|
; SOFTEB-NEXT: vabs.f16 q8, q8
|
|
; SOFTEB-NEXT: vld1.64 {d18, d19}, [r12]
|
|
; SOFTEB-NEXT: add r12, sp, #64
|
|
; SOFTEB-NEXT: vrev64.16 q9, q9
|
|
; SOFTEB-NEXT: vadd.f16 q8, q8, q9
|
|
; SOFTEB-NEXT: vld1.64 {d18, d19}, [r12]
|
|
; SOFTEB-NEXT: add r12, sp, #16
|
|
; SOFTEB-NEXT: vrev64.16 q9, q9
|
|
; SOFTEB-NEXT: vmul.f16 q8, q9, q8
|
|
; SOFTEB-NEXT: vldr d18, [sp, #40]
|
|
; SOFTEB-NEXT: vrev64.16 d18, d18
|
|
; SOFTEB-NEXT: vst1.16 {d18}, [lr:64]!
|
|
; SOFTEB-NEXT: str r3, [lr]
|
|
; SOFTEB-NEXT: vrev64.16 q8, q8
|
|
; SOFTEB-NEXT: vst1.64 {d16, d17}, [r12]
|
|
; SOFTEB-NEXT: bl use
|
|
; SOFTEB-NEXT: add sp, sp, #32
|
|
; SOFTEB-NEXT: pop {r11, pc}
|
|
;
|
|
; HARDEB-LABEL: many_args_test:
|
|
; HARDEB: @ %bb.0: @ %entry
|
|
; HARDEB-NEXT: mov r1, sp
|
|
; HARDEB-NEXT: vld1.64 {d16, d17}, [r1]
|
|
; HARDEB-NEXT: vrev64.16 q8, q8
|
|
; HARDEB-NEXT: vabs.f16 q8, q8
|
|
; HARDEB-NEXT: vrev64.16 q9, q2
|
|
; HARDEB-NEXT: vadd.f16 q8, q8, q9
|
|
; HARDEB-NEXT: vrev64.16 q9, q3
|
|
; HARDEB-NEXT: vmul.f16 q8, q9, q8
|
|
; HARDEB-NEXT: vrev64.16 q2, q8
|
|
; HARDEB-NEXT: b use
|
|
entry:
|
|
%7 = tail call <8 x half> @llvm.fabs.v8f16(<8 x half> %6)
|
|
%8 = fadd <8 x half> %7, %4
|
|
%9 = fmul <8 x half> %5, %8
|
|
tail call void @use(double %0, float %1, <4 x half> %3, i16 %2, <8 x half> %9)
|
|
ret void
|
|
}
|