Refresh of the generic scheduling model to use A510 instead of A55. Main benefits are to the little core, and introducing SVE scheduling information. Changes tested on various OoO cores, no performance degradation is seen. Differential Revision: https://reviews.llvm.org/D156799
327 lines
12 KiB
LLVM
327 lines
12 KiB
LLVM
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
|
|
; RUN: llc --mtriple=aarch64-eabi < %s | FileCheck %s
|
|
|
|
define float @add_f32(<8 x float> %a, <4 x float> %b) {
|
|
; CHECK-LABEL: add_f32:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: fadd v0.4s, v0.4s, v1.4s
|
|
; CHECK-NEXT: fadd v0.4s, v0.4s, v2.4s
|
|
; CHECK-NEXT: faddp v0.4s, v0.4s, v0.4s
|
|
; CHECK-NEXT: faddp s0, v0.2s
|
|
; CHECK-NEXT: ret
|
|
%r1 = call fast float @llvm.vector.reduce.fadd.f32.v8f32(float -0.0, <8 x float> %a)
|
|
%r2 = call fast float @llvm.vector.reduce.fadd.f32.v4f32(float -0.0, <4 x float> %b)
|
|
%r = fadd fast float %r1, %r2
|
|
ret float %r
|
|
}
|
|
|
|
define float @fmul_f32(<8 x float> %a, <4 x float> %b) {
|
|
; CHECK-LABEL: fmul_f32:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: fmul v0.4s, v0.4s, v1.4s
|
|
; CHECK-NEXT: fmul v0.4s, v0.4s, v2.4s
|
|
; CHECK-NEXT: ext v1.16b, v0.16b, v0.16b, #8
|
|
; CHECK-NEXT: fmul v0.2s, v0.2s, v1.2s
|
|
; CHECK-NEXT: fmul s0, s0, v0.s[1]
|
|
; CHECK-NEXT: ret
|
|
%r1 = call fast float @llvm.vector.reduce.fmul.f32.v8f32(float 1.0, <8 x float> %a)
|
|
%r2 = call fast float @llvm.vector.reduce.fmul.f32.v4f32(float 1.0, <4 x float> %b)
|
|
%r = fmul fast float %r1, %r2
|
|
ret float %r
|
|
}
|
|
|
|
define float @fmin_f32(<8 x float> %a, <4 x float> %b) {
|
|
; CHECK-LABEL: fmin_f32:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: fminnm v0.4s, v0.4s, v1.4s
|
|
; CHECK-NEXT: fminnm v0.4s, v0.4s, v2.4s
|
|
; CHECK-NEXT: fminnmv s0, v0.4s
|
|
; CHECK-NEXT: ret
|
|
%r1 = call float @llvm.vector.reduce.fmin.v8f32(<8 x float> %a)
|
|
%r2 = call float @llvm.vector.reduce.fmin.v4f32(<4 x float> %b)
|
|
%r = call float @llvm.minnum.f32(float %r1, float %r2)
|
|
ret float %r
|
|
}
|
|
|
|
define float @fmax_f32(<8 x float> %a, <4 x float> %b) {
|
|
; CHECK-LABEL: fmax_f32:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: fmaxnm v0.4s, v0.4s, v1.4s
|
|
; CHECK-NEXT: fmaxnm v0.4s, v0.4s, v2.4s
|
|
; CHECK-NEXT: fmaxnmv s0, v0.4s
|
|
; CHECK-NEXT: ret
|
|
%r1 = call float @llvm.vector.reduce.fmax.v8f32(<8 x float> %a)
|
|
%r2 = call float @llvm.vector.reduce.fmax.v4f32(<4 x float> %b)
|
|
%r = call float @llvm.maxnum.f32(float %r1, float %r2)
|
|
ret float %r
|
|
}
|
|
|
|
define float @fminimum_f32(<8 x float> %a, <4 x float> %b) {
|
|
; CHECK-LABEL: fminimum_f32:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: fmin v0.4s, v0.4s, v1.4s
|
|
; CHECK-NEXT: fmin v0.4s, v0.4s, v2.4s
|
|
; CHECK-NEXT: fminv s0, v0.4s
|
|
; CHECK-NEXT: ret
|
|
%r1 = call float @llvm.vector.reduce.fminimum.v8f32(<8 x float> %a)
|
|
%r2 = call float @llvm.vector.reduce.fminimum.v4f32(<4 x float> %b)
|
|
%r = call float @llvm.minimum.f32(float %r1, float %r2)
|
|
ret float %r
|
|
}
|
|
|
|
define float @fmaximum_f32(<8 x float> %a, <4 x float> %b) {
|
|
; CHECK-LABEL: fmaximum_f32:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: fmax v0.4s, v0.4s, v1.4s
|
|
; CHECK-NEXT: fmax v0.4s, v0.4s, v2.4s
|
|
; CHECK-NEXT: fmaxv s0, v0.4s
|
|
; CHECK-NEXT: ret
|
|
%r1 = call float @llvm.vector.reduce.fmaximum.v8f32(<8 x float> %a)
|
|
%r2 = call float @llvm.vector.reduce.fmaximum.v4f32(<4 x float> %b)
|
|
%r = call float @llvm.maximum.f32(float %r1, float %r2)
|
|
ret float %r
|
|
}
|
|
|
|
; These next two tests have incorrect minnum/minimum combinations
|
|
define float @fminimumnum_f32(<8 x float> %a, <4 x float> %b) {
|
|
; CHECK-LABEL: fminimumnum_f32:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: fmin v0.4s, v0.4s, v1.4s
|
|
; CHECK-NEXT: fminv s1, v2.4s
|
|
; CHECK-NEXT: fminv s0, v0.4s
|
|
; CHECK-NEXT: fminnm s0, s0, s1
|
|
; CHECK-NEXT: ret
|
|
%r1 = call float @llvm.vector.reduce.fminimum.v8f32(<8 x float> %a)
|
|
%r2 = call float @llvm.vector.reduce.fminimum.v4f32(<4 x float> %b)
|
|
%r = call float @llvm.minnum.f32(float %r1, float %r2)
|
|
ret float %r
|
|
}
|
|
|
|
define float @fmaxnumimum_f32(<8 x float> %a, <4 x float> %b) {
|
|
; CHECK-LABEL: fmaxnumimum_f32:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: fmaxnm v0.4s, v0.4s, v1.4s
|
|
; CHECK-NEXT: fmaxnmv s1, v2.4s
|
|
; CHECK-NEXT: fmaxnmv s0, v0.4s
|
|
; CHECK-NEXT: fmax s0, s0, s1
|
|
; CHECK-NEXT: ret
|
|
%r1 = call float @llvm.vector.reduce.fmax.v8f32(<8 x float> %a)
|
|
%r2 = call float @llvm.vector.reduce.fmax.v4f32(<4 x float> %b)
|
|
%r = call float @llvm.maximum.f32(float %r1, float %r2)
|
|
ret float %r
|
|
}
|
|
|
|
|
|
define i32 @add_i32(<8 x i32> %a, <4 x i32> %b) {
|
|
; CHECK-LABEL: add_i32:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: add v0.4s, v0.4s, v1.4s
|
|
; CHECK-NEXT: add v0.4s, v0.4s, v2.4s
|
|
; CHECK-NEXT: addv s0, v0.4s
|
|
; CHECK-NEXT: fmov w0, s0
|
|
; CHECK-NEXT: ret
|
|
%r1 = call i32 @llvm.vector.reduce.add.i32.v8i32(<8 x i32> %a)
|
|
%r2 = call i32 @llvm.vector.reduce.add.i32.v4i32(<4 x i32> %b)
|
|
%r = add i32 %r1, %r2
|
|
ret i32 %r
|
|
}
|
|
|
|
define i16 @add_ext_i16(<16 x i8> %a, <16 x i8> %b) {
|
|
; CHECK-LABEL: add_ext_i16:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: uaddlp v1.8h, v1.16b
|
|
; CHECK-NEXT: uadalp v1.8h, v0.16b
|
|
; CHECK-NEXT: addv h0, v1.8h
|
|
; CHECK-NEXT: fmov w0, s0
|
|
; CHECK-NEXT: ret
|
|
%ae = zext <16 x i8> %a to <16 x i16>
|
|
%be = zext <16 x i8> %b to <16 x i16>
|
|
%r1 = call i16 @llvm.vector.reduce.add.i16.v16i16(<16 x i16> %ae)
|
|
%r2 = call i16 @llvm.vector.reduce.add.i16.v16i16(<16 x i16> %be)
|
|
%r = add i16 %r1, %r2
|
|
ret i16 %r
|
|
}
|
|
|
|
define i16 @add_ext_v32i16(<32 x i8> %a, <16 x i8> %b) {
|
|
; CHECK-LABEL: add_ext_v32i16:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: uaddl2 v3.8h, v0.16b, v1.16b
|
|
; CHECK-NEXT: uaddl v0.8h, v0.8b, v1.8b
|
|
; CHECK-NEXT: add v0.8h, v0.8h, v3.8h
|
|
; CHECK-NEXT: uadalp v0.8h, v2.16b
|
|
; CHECK-NEXT: addv h0, v0.8h
|
|
; CHECK-NEXT: fmov w0, s0
|
|
; CHECK-NEXT: ret
|
|
%ae = zext <32 x i8> %a to <32 x i16>
|
|
%be = zext <16 x i8> %b to <16 x i16>
|
|
%r1 = call i16 @llvm.vector.reduce.add.i16.v32i16(<32 x i16> %ae)
|
|
%r2 = call i16 @llvm.vector.reduce.add.i16.v16i16(<16 x i16> %be)
|
|
%r = add i16 %r1, %r2
|
|
ret i16 %r
|
|
}
|
|
|
|
define i32 @mul_i32(<8 x i32> %a, <4 x i32> %b) {
|
|
; CHECK-LABEL: mul_i32:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: mul v0.4s, v0.4s, v1.4s
|
|
; CHECK-NEXT: mul v0.4s, v0.4s, v2.4s
|
|
; CHECK-NEXT: ext v1.16b, v0.16b, v0.16b, #8
|
|
; CHECK-NEXT: mul v0.2s, v0.2s, v1.2s
|
|
; CHECK-NEXT: mov w8, v0.s[1]
|
|
; CHECK-NEXT: fmov w9, s0
|
|
; CHECK-NEXT: mul w0, w9, w8
|
|
; CHECK-NEXT: ret
|
|
%r1 = call i32 @llvm.vector.reduce.mul.i32.v8i32(<8 x i32> %a)
|
|
%r2 = call i32 @llvm.vector.reduce.mul.i32.v4i32(<4 x i32> %b)
|
|
%r = mul i32 %r1, %r2
|
|
ret i32 %r
|
|
}
|
|
|
|
define i32 @and_i32(<8 x i32> %a, <4 x i32> %b) {
|
|
; CHECK-LABEL: and_i32:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: and v0.16b, v0.16b, v1.16b
|
|
; CHECK-NEXT: and v0.16b, v0.16b, v2.16b
|
|
; CHECK-NEXT: ext v1.16b, v0.16b, v0.16b, #8
|
|
; CHECK-NEXT: and v0.8b, v0.8b, v1.8b
|
|
; CHECK-NEXT: fmov x8, d0
|
|
; CHECK-NEXT: lsr x9, x8, #32
|
|
; CHECK-NEXT: and w0, w8, w9
|
|
; CHECK-NEXT: ret
|
|
%r1 = call i32 @llvm.vector.reduce.and.i32.v8i32(<8 x i32> %a)
|
|
%r2 = call i32 @llvm.vector.reduce.and.i32.v4i32(<4 x i32> %b)
|
|
%r = and i32 %r1, %r2
|
|
ret i32 %r
|
|
}
|
|
|
|
define i32 @or_i32(<8 x i32> %a, <4 x i32> %b) {
|
|
; CHECK-LABEL: or_i32:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: orr v0.16b, v0.16b, v1.16b
|
|
; CHECK-NEXT: orr v0.16b, v0.16b, v2.16b
|
|
; CHECK-NEXT: ext v1.16b, v0.16b, v0.16b, #8
|
|
; CHECK-NEXT: orr v0.8b, v0.8b, v1.8b
|
|
; CHECK-NEXT: fmov x8, d0
|
|
; CHECK-NEXT: lsr x9, x8, #32
|
|
; CHECK-NEXT: orr w0, w8, w9
|
|
; CHECK-NEXT: ret
|
|
%r1 = call i32 @llvm.vector.reduce.or.i32.v8i32(<8 x i32> %a)
|
|
%r2 = call i32 @llvm.vector.reduce.or.i32.v4i32(<4 x i32> %b)
|
|
%r = or i32 %r1, %r2
|
|
ret i32 %r
|
|
}
|
|
|
|
define i32 @xor_i32(<8 x i32> %a, <4 x i32> %b) {
|
|
; CHECK-LABEL: xor_i32:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: eor v0.16b, v0.16b, v1.16b
|
|
; CHECK-NEXT: eor v0.16b, v0.16b, v2.16b
|
|
; CHECK-NEXT: ext v1.16b, v0.16b, v0.16b, #8
|
|
; CHECK-NEXT: eor v0.8b, v0.8b, v1.8b
|
|
; CHECK-NEXT: fmov x8, d0
|
|
; CHECK-NEXT: lsr x9, x8, #32
|
|
; CHECK-NEXT: eor w0, w8, w9
|
|
; CHECK-NEXT: ret
|
|
%r1 = call i32 @llvm.vector.reduce.xor.i32.v8i32(<8 x i32> %a)
|
|
%r2 = call i32 @llvm.vector.reduce.xor.i32.v4i32(<4 x i32> %b)
|
|
%r = xor i32 %r1, %r2
|
|
ret i32 %r
|
|
}
|
|
|
|
define i32 @umin_i32(<8 x i32> %a, <4 x i32> %b) {
|
|
; CHECK-LABEL: umin_i32:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: umin v0.4s, v0.4s, v1.4s
|
|
; CHECK-NEXT: umin v0.4s, v0.4s, v2.4s
|
|
; CHECK-NEXT: uminv s0, v0.4s
|
|
; CHECK-NEXT: fmov w0, s0
|
|
; CHECK-NEXT: ret
|
|
%r1 = call i32 @llvm.vector.reduce.umin.i32.v8i32(<8 x i32> %a)
|
|
%r2 = call i32 @llvm.vector.reduce.umin.i32.v4i32(<4 x i32> %b)
|
|
%r = call i32 @llvm.umin.i32(i32 %r1, i32 %r2)
|
|
ret i32 %r
|
|
}
|
|
|
|
define i32 @umax_i32(<8 x i32> %a, <4 x i32> %b) {
|
|
; CHECK-LABEL: umax_i32:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: umax v0.4s, v0.4s, v1.4s
|
|
; CHECK-NEXT: umax v0.4s, v0.4s, v2.4s
|
|
; CHECK-NEXT: umaxv s0, v0.4s
|
|
; CHECK-NEXT: fmov w0, s0
|
|
; CHECK-NEXT: ret
|
|
%r1 = call i32 @llvm.vector.reduce.umax.i32.v8i32(<8 x i32> %a)
|
|
%r2 = call i32 @llvm.vector.reduce.umax.i32.v4i32(<4 x i32> %b)
|
|
%r = call i32 @llvm.umax.i32(i32 %r1, i32 %r2)
|
|
ret i32 %r
|
|
}
|
|
|
|
define i32 @smin_i32(<8 x i32> %a, <4 x i32> %b) {
|
|
; CHECK-LABEL: smin_i32:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: smin v0.4s, v0.4s, v1.4s
|
|
; CHECK-NEXT: smin v0.4s, v0.4s, v2.4s
|
|
; CHECK-NEXT: sminv s0, v0.4s
|
|
; CHECK-NEXT: fmov w0, s0
|
|
; CHECK-NEXT: ret
|
|
%r1 = call i32 @llvm.vector.reduce.smin.i32.v8i32(<8 x i32> %a)
|
|
%r2 = call i32 @llvm.vector.reduce.smin.i32.v4i32(<4 x i32> %b)
|
|
%r = call i32 @llvm.smin.i32(i32 %r1, i32 %r2)
|
|
ret i32 %r
|
|
}
|
|
|
|
define i32 @smax_i32(<8 x i32> %a, <4 x i32> %b) {
|
|
; CHECK-LABEL: smax_i32:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: smax v0.4s, v0.4s, v1.4s
|
|
; CHECK-NEXT: smax v0.4s, v0.4s, v2.4s
|
|
; CHECK-NEXT: smaxv s0, v0.4s
|
|
; CHECK-NEXT: fmov w0, s0
|
|
; CHECK-NEXT: ret
|
|
%r1 = call i32 @llvm.vector.reduce.smax.i32.v8i32(<8 x i32> %a)
|
|
%r2 = call i32 @llvm.vector.reduce.smax.i32.v4i32(<4 x i32> %b)
|
|
%r = call i32 @llvm.smax.i32(i32 %r1, i32 %r2)
|
|
ret i32 %r
|
|
}
|
|
|
|
declare float @llvm.vector.reduce.fadd.f32.v8f32(float, <8 x float>)
|
|
declare float @llvm.vector.reduce.fadd.f32.v4f32(float, <4 x float>)
|
|
declare float @llvm.vector.reduce.fmul.f32.v8f32(float, <8 x float>)
|
|
declare float @llvm.vector.reduce.fmul.f32.v4f32(float, <4 x float>)
|
|
declare float @llvm.vector.reduce.fmin.v8f32(<8 x float>)
|
|
declare float @llvm.vector.reduce.fmin.v4f32(<4 x float>)
|
|
declare float @llvm.vector.reduce.fmax.v8f32(<8 x float>)
|
|
declare float @llvm.vector.reduce.fmax.v4f32(<4 x float>)
|
|
declare float @llvm.vector.reduce.fminimum.v8f32(<8 x float>)
|
|
declare float @llvm.vector.reduce.fminimum.v4f32(<4 x float>)
|
|
declare float @llvm.vector.reduce.fmaximum.v8f32(<8 x float>)
|
|
declare float @llvm.vector.reduce.fmaximum.v4f32(<4 x float>)
|
|
declare i32 @llvm.vector.reduce.add.i32.v8i32(<8 x i32>)
|
|
declare i32 @llvm.vector.reduce.add.i32.v4i32(<4 x i32>)
|
|
declare i16 @llvm.vector.reduce.add.i16.v32i16(<32 x i16>)
|
|
declare i16 @llvm.vector.reduce.add.i16.v16i16(<16 x i16>)
|
|
declare i32 @llvm.vector.reduce.mul.i32.v8i32(<8 x i32>)
|
|
declare i32 @llvm.vector.reduce.mul.i32.v4i32(<4 x i32>)
|
|
declare i32 @llvm.vector.reduce.and.i32.v8i32(<8 x i32>)
|
|
declare i32 @llvm.vector.reduce.and.i32.v4i32(<4 x i32>)
|
|
declare i32 @llvm.vector.reduce.or.i32.v8i32(<8 x i32>)
|
|
declare i32 @llvm.vector.reduce.or.i32.v4i32(<4 x i32>)
|
|
declare i32 @llvm.vector.reduce.xor.i32.v8i32(<8 x i32>)
|
|
declare i32 @llvm.vector.reduce.xor.i32.v4i32(<4 x i32>)
|
|
declare i32 @llvm.vector.reduce.umin.i32.v8i32(<8 x i32>)
|
|
declare i32 @llvm.vector.reduce.umin.i32.v4i32(<4 x i32>)
|
|
declare i32 @llvm.vector.reduce.umax.i32.v8i32(<8 x i32>)
|
|
declare i32 @llvm.vector.reduce.umax.i32.v4i32(<4 x i32>)
|
|
declare i32 @llvm.vector.reduce.smin.i32.v8i32(<8 x i32>)
|
|
declare i32 @llvm.vector.reduce.smin.i32.v4i32(<4 x i32>)
|
|
declare i32 @llvm.vector.reduce.smax.i32.v8i32(<8 x i32>)
|
|
declare i32 @llvm.vector.reduce.smax.i32.v4i32(<4 x i32>)
|
|
declare float @llvm.minnum.f32(float, float)
|
|
declare float @llvm.maxnum.f32(float, float)
|
|
declare float @llvm.minimum.f32(float, float)
|
|
declare float @llvm.maximum.f32(float, float)
|
|
declare i32 @llvm.umin.i32(i32, i32)
|
|
declare i32 @llvm.umax.i32(i32, i32)
|
|
declare i32 @llvm.smin.i32(i32, i32)
|
|
declare i32 @llvm.smax.i32(i32, i32)
|