We would like to start pushing -mcpu=generic towards enabling the set of
features that improves performance for some CPUs, without hurting any
others. A blend of the performance options hopefully beneficial to all
CPUs. The largest part of that is enabling in-order scheduling using the
Cortex-A55 schedule model. This is similar to the Arm backend change
from eecb353d0e which made -mcpu=generic perform in-order scheduling
using the cortex-a8 schedule model.
The idea is that in-order cpu's require the most help in instruction
scheduling, whereas out-of-order cpus can for the most part out-of-order
schedule around different codegen. Our benchmarking suggests that
hypothesis holds. When running on an in-order core this improved
performance by 3.8% geomean on a set of DSP workloads, 2% geomean on
some other embedded benchmark and between 1% and 1.8% on a set of
singlecore and multicore workloads, all running on a Cortex-A55 cluster.
On an out-of-order cpu the results are a lot more noisy but show flat
performance or an improvement. On the set of DSP and embedded
benchmarks, run on a Cortex-A78 there was a very noisy 1% speed
improvement. Using the most detailed results I could find, SPEC2006 runs
on a Neoverse N1 show a small increase in instruction count (+0.127%),
but a decrease in cycle counts (-0.155%, on average). The instruction
count is very low noise, the cycle count is more noisy with a 0.15%
decrease not being significant. SPEC2k17 shows a small decrease (-0.2%)
in instruction count leading to a -0.296% decrease in cycle count. These
results are within noise margins but tend to show a small improvement in
general.
When specifying an Apple target, clang will set "-target-cpu apple-a7"
on the command line, so should not be affected by this change when
running from clang. This also doesn't enable more runtime unrolling like
-mcpu=cortex-a55 does, only changing the schedule used.
A lot of existing tests have updated. This is a summary of the important
differences:
- Most changes are the same instructions in a different order.
- Sometimes this leads to very minor inefficiencies, such as requiring
an extra mov to move variables into r0/v0 for the return value of a test
function.
- misched-fusion.ll was no longer fusing the pairs of instructions it
should, as per D110561. I've changed the schedule used in the test
for now.
- neon-mla-mls.ll now uses "mul; sub" as opposed to "neg; mla" due to
the different latencies. This seems fine to me.
- Some SVE tests do not always remove movprfx where they did before due
to different register allocation giving different destructive forms.
- The tests argument-blocks-array-of-struct.ll and arm64-windows-calls.ll
produce two LDR where they previously produced an LDP due to
store-pair-suppress kicking in.
- arm64-ldp.ll and arm64-neon-copy.ll are missing pre/postinc on LPD.
- Some tests such as arm64-neon-mul-div.ll and
ragreedy-local-interval-cost.ll have more, less or just different
spilling.
- In aarch64_generated_funcs.ll.generated.expected one part of the
function is no longer outlined. Interestingly if I switch this to use
any other scheduled even less is outlined.
Some of these are expected to happen, such as differences in outlining
or register spilling. There will be places where these result in worse
codegen, places where they are better, with the SPEC instruction counts
suggesting it is not a decrease overall, on average.
Differential Revision: https://reviews.llvm.org/D110830
531 lines
15 KiB
LLVM
531 lines
15 KiB
LLVM
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
|
|
; RUN: llc < %s | FileCheck %s
|
|
|
|
target triple = "aarch64-unknown-linux-gnu"
|
|
|
|
define i8 @test_lane0_16xi8(<vscale x 16 x i8> %a) #0 {
|
|
; CHECK-LABEL: test_lane0_16xi8:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: fmov w0, s0
|
|
; CHECK-NEXT: ret
|
|
%b = extractelement <vscale x 16 x i8> %a, i32 0
|
|
ret i8 %b
|
|
}
|
|
|
|
define i8 @test_lane15_16xi8(<vscale x 16 x i8> %a) #0 {
|
|
; CHECK-LABEL: test_lane15_16xi8:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: umov w0, v0.b[15]
|
|
; CHECK-NEXT: ret
|
|
%b = extractelement <vscale x 16 x i8> %a, i32 15
|
|
ret i8 %b
|
|
}
|
|
|
|
define i8 @test_lane16_16xi8(<vscale x 16 x i8> %a) #0 {
|
|
; CHECK-LABEL: test_lane16_16xi8:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: mov z0.b, z0.b[16]
|
|
; CHECK-NEXT: fmov w0, s0
|
|
; CHECK-NEXT: ret
|
|
%b = extractelement <vscale x 16 x i8> %a, i32 16
|
|
ret i8 %b
|
|
}
|
|
|
|
define i16 @test_lane0_8xi16(<vscale x 8 x i16> %a) #0 {
|
|
; CHECK-LABEL: test_lane0_8xi16:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: fmov w0, s0
|
|
; CHECK-NEXT: ret
|
|
%b = extractelement <vscale x 8 x i16> %a, i32 0
|
|
ret i16 %b
|
|
}
|
|
|
|
define i16 @test_lane7_8xi16(<vscale x 8 x i16> %a) #0 {
|
|
; CHECK-LABEL: test_lane7_8xi16:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: umov w0, v0.h[7]
|
|
; CHECK-NEXT: ret
|
|
%b = extractelement <vscale x 8 x i16> %a, i32 7
|
|
ret i16 %b
|
|
}
|
|
|
|
define i16 @test_lane8_8xi16(<vscale x 8 x i16> %a) #0 {
|
|
; CHECK-LABEL: test_lane8_8xi16:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: mov z0.h, z0.h[8]
|
|
; CHECK-NEXT: fmov w0, s0
|
|
; CHECK-NEXT: ret
|
|
%b = extractelement <vscale x 8 x i16> %a, i32 8
|
|
ret i16 %b
|
|
}
|
|
|
|
define i32 @test_lane0_4xi32(<vscale x 4 x i32> %a) #0 {
|
|
; CHECK-LABEL: test_lane0_4xi32:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: fmov w0, s0
|
|
; CHECK-NEXT: ret
|
|
%b = extractelement <vscale x 4 x i32> %a, i32 0
|
|
ret i32 %b
|
|
}
|
|
|
|
define i32 @test_lane3_4xi32(<vscale x 4 x i32> %a) #0 {
|
|
; CHECK-LABEL: test_lane3_4xi32:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: mov w0, v0.s[3]
|
|
; CHECK-NEXT: ret
|
|
%b = extractelement <vscale x 4 x i32> %a, i32 3
|
|
ret i32 %b
|
|
}
|
|
|
|
define i32 @test_lane4_4xi32(<vscale x 4 x i32> %a) #0 {
|
|
; CHECK-LABEL: test_lane4_4xi32:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: mov z0.s, z0.s[4]
|
|
; CHECK-NEXT: fmov w0, s0
|
|
; CHECK-NEXT: ret
|
|
%b = extractelement <vscale x 4 x i32> %a, i32 4
|
|
ret i32 %b
|
|
}
|
|
|
|
define i64 @test_lane0_2xi64(<vscale x 2 x i64> %a) #0 {
|
|
; CHECK-LABEL: test_lane0_2xi64:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: fmov x0, d0
|
|
; CHECK-NEXT: ret
|
|
%b = extractelement <vscale x 2 x i64> %a, i32 0
|
|
ret i64 %b
|
|
}
|
|
|
|
define i64 @test_lane1_2xi64(<vscale x 2 x i64> %a) #0 {
|
|
; CHECK-LABEL: test_lane1_2xi64:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: mov x0, v0.d[1]
|
|
; CHECK-NEXT: ret
|
|
%b = extractelement <vscale x 2 x i64> %a, i32 1
|
|
ret i64 %b
|
|
}
|
|
|
|
define i64 @test_lane2_2xi64(<vscale x 2 x i64> %a) #0 {
|
|
; CHECK-LABEL: test_lane2_2xi64:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: mov z0.d, z0.d[2]
|
|
; CHECK-NEXT: fmov x0, d0
|
|
; CHECK-NEXT: ret
|
|
%b = extractelement <vscale x 2 x i64> %a, i32 2
|
|
ret i64 %b
|
|
}
|
|
|
|
define half @test_lane0_8xf16(<vscale x 8 x half> %a) #0 {
|
|
; CHECK-LABEL: test_lane0_8xf16:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: // kill: def $h0 killed $h0 killed $z0
|
|
; CHECK-NEXT: ret
|
|
%b = extractelement <vscale x 8 x half> %a, i32 0
|
|
ret half %b
|
|
}
|
|
|
|
define half @test_lane7_8xf16(<vscale x 8 x half> %a) #0 {
|
|
; CHECK-LABEL: test_lane7_8xf16:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: mov z0.h, z0.h[7]
|
|
; CHECK-NEXT: // kill: def $h0 killed $h0 killed $z0
|
|
; CHECK-NEXT: ret
|
|
%b = extractelement <vscale x 8 x half> %a, i32 7
|
|
ret half %b
|
|
}
|
|
|
|
define half @test_lane8_8xf16(<vscale x 8 x half> %a) #0 {
|
|
; CHECK-LABEL: test_lane8_8xf16:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: mov z0.h, z0.h[8]
|
|
; CHECK-NEXT: // kill: def $h0 killed $h0 killed $z0
|
|
; CHECK-NEXT: ret
|
|
%b = extractelement <vscale x 8 x half> %a, i32 8
|
|
ret half %b
|
|
}
|
|
|
|
define half @test_lane0_4xf16(<vscale x 4 x half> %a) #0 {
|
|
; CHECK-LABEL: test_lane0_4xf16:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: // kill: def $h0 killed $h0 killed $z0
|
|
; CHECK-NEXT: ret
|
|
%b = extractelement <vscale x 4 x half> %a, i32 0
|
|
ret half %b
|
|
}
|
|
|
|
define half @test_lane3_4xf16(<vscale x 4 x half> %a) #0 {
|
|
; CHECK-LABEL: test_lane3_4xf16:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: mov z0.s, z0.s[3]
|
|
; CHECK-NEXT: // kill: def $h0 killed $h0 killed $z0
|
|
; CHECK-NEXT: ret
|
|
%b = extractelement <vscale x 4 x half> %a, i32 3
|
|
ret half %b
|
|
}
|
|
|
|
define half @test_lane4_4xf16(<vscale x 4 x half> %a) #0 {
|
|
; CHECK-LABEL: test_lane4_4xf16:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: mov z0.s, z0.s[4]
|
|
; CHECK-NEXT: // kill: def $h0 killed $h0 killed $z0
|
|
; CHECK-NEXT: ret
|
|
%b = extractelement <vscale x 4 x half> %a, i32 4
|
|
ret half %b
|
|
}
|
|
|
|
define half @test_lane0_2xf16(<vscale x 2 x half> %a) #0 {
|
|
; CHECK-LABEL: test_lane0_2xf16:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: // kill: def $h0 killed $h0 killed $z0
|
|
; CHECK-NEXT: ret
|
|
%b = extractelement <vscale x 2 x half> %a, i32 0
|
|
ret half %b
|
|
}
|
|
|
|
define half @test_lane1_2xf16(<vscale x 2 x half> %a) #0 {
|
|
; CHECK-LABEL: test_lane1_2xf16:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: mov z0.d, z0.d[1]
|
|
; CHECK-NEXT: // kill: def $h0 killed $h0 killed $z0
|
|
; CHECK-NEXT: ret
|
|
%b = extractelement <vscale x 2 x half> %a, i32 1
|
|
ret half %b
|
|
}
|
|
|
|
define half @test_lane2_2xf16(<vscale x 2 x half> %a) #0 {
|
|
; CHECK-LABEL: test_lane2_2xf16:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: mov z0.d, z0.d[2]
|
|
; CHECK-NEXT: // kill: def $h0 killed $h0 killed $z0
|
|
; CHECK-NEXT: ret
|
|
%b = extractelement <vscale x 2 x half> %a, i32 2
|
|
ret half %b
|
|
}
|
|
|
|
define float @test_lane0_4xf32(<vscale x 4 x float> %a) #0 {
|
|
; CHECK-LABEL: test_lane0_4xf32:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: // kill: def $s0 killed $s0 killed $z0
|
|
; CHECK-NEXT: ret
|
|
%b = extractelement <vscale x 4 x float> %a, i32 0
|
|
ret float %b
|
|
}
|
|
|
|
define float @test_lane3_4xf32(<vscale x 4 x float> %a) #0 {
|
|
; CHECK-LABEL: test_lane3_4xf32:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: mov z0.s, z0.s[3]
|
|
; CHECK-NEXT: // kill: def $s0 killed $s0 killed $z0
|
|
; CHECK-NEXT: ret
|
|
%b = extractelement <vscale x 4 x float> %a, i32 3
|
|
ret float %b
|
|
}
|
|
|
|
define float @test_lane4_4xf32(<vscale x 4 x float> %a) #0 {
|
|
; CHECK-LABEL: test_lane4_4xf32:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: mov z0.s, z0.s[4]
|
|
; CHECK-NEXT: // kill: def $s0 killed $s0 killed $z0
|
|
; CHECK-NEXT: ret
|
|
%b = extractelement <vscale x 4 x float> %a, i32 4
|
|
ret float %b
|
|
}
|
|
|
|
define float @test_lane0_2xf32(<vscale x 2 x float> %a) #0 {
|
|
; CHECK-LABEL: test_lane0_2xf32:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: // kill: def $s0 killed $s0 killed $z0
|
|
; CHECK-NEXT: ret
|
|
%b = extractelement <vscale x 2 x float> %a, i32 0
|
|
ret float %b
|
|
}
|
|
|
|
define float @test_lane1_2xf32(<vscale x 2 x float> %a) #0 {
|
|
; CHECK-LABEL: test_lane1_2xf32:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: mov z0.d, z0.d[1]
|
|
; CHECK-NEXT: // kill: def $s0 killed $s0 killed $z0
|
|
; CHECK-NEXT: ret
|
|
%b = extractelement <vscale x 2 x float> %a, i32 1
|
|
ret float %b
|
|
}
|
|
|
|
define float @test_lane2_2xf32(<vscale x 2 x float> %a) #0 {
|
|
; CHECK-LABEL: test_lane2_2xf32:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: mov z0.d, z0.d[2]
|
|
; CHECK-NEXT: // kill: def $s0 killed $s0 killed $z0
|
|
; CHECK-NEXT: ret
|
|
%b = extractelement <vscale x 2 x float> %a, i32 2
|
|
ret float %b
|
|
}
|
|
|
|
define double @test_lane0_2xf64(<vscale x 2 x double> %a) #0 {
|
|
; CHECK-LABEL: test_lane0_2xf64:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0
|
|
; CHECK-NEXT: ret
|
|
%b = extractelement <vscale x 2 x double> %a, i32 0
|
|
ret double %b
|
|
}
|
|
|
|
define double @test_lane1_2xf64(<vscale x 2 x double> %a) #0 {
|
|
; CHECK-LABEL: test_lane1_2xf64:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: mov z0.d, z0.d[1]
|
|
; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0
|
|
; CHECK-NEXT: ret
|
|
%b = extractelement <vscale x 2 x double> %a, i32 1
|
|
ret double %b
|
|
}
|
|
|
|
define double @test_lane2_2xf64(<vscale x 2 x double> %a) #0 {
|
|
; CHECK-LABEL: test_lane2_2xf64:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: mov z0.d, z0.d[2]
|
|
; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0
|
|
; CHECK-NEXT: ret
|
|
%b = extractelement <vscale x 2 x double> %a, i32 2
|
|
ret double %b
|
|
}
|
|
|
|
define i8 @test_lanex_16xi8(<vscale x 16 x i8> %a, i32 %x) #0 {
|
|
; CHECK-LABEL: test_lanex_16xi8:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: // kill: def $w0 killed $w0 def $x0
|
|
; CHECK-NEXT: sxtw x8, w0
|
|
; CHECK-NEXT: whilels p0.b, xzr, x8
|
|
; CHECK-NEXT: lastb w0, p0, z0.b
|
|
; CHECK-NEXT: ret
|
|
%b = extractelement <vscale x 16 x i8> %a, i32 %x
|
|
ret i8 %b
|
|
}
|
|
|
|
define i16 @test_lanex_8xi16(<vscale x 8 x i16> %a, i32 %x) #0 {
|
|
; CHECK-LABEL: test_lanex_8xi16:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: // kill: def $w0 killed $w0 def $x0
|
|
; CHECK-NEXT: sxtw x8, w0
|
|
; CHECK-NEXT: whilels p0.h, xzr, x8
|
|
; CHECK-NEXT: lastb w0, p0, z0.h
|
|
; CHECK-NEXT: ret
|
|
%b = extractelement <vscale x 8 x i16> %a, i32 %x
|
|
ret i16 %b
|
|
}
|
|
|
|
define i32 @test_lanex_4xi32(<vscale x 4 x i32> %a, i32 %x) #0 {
|
|
; CHECK-LABEL: test_lanex_4xi32:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: // kill: def $w0 killed $w0 def $x0
|
|
; CHECK-NEXT: sxtw x8, w0
|
|
; CHECK-NEXT: whilels p0.s, xzr, x8
|
|
; CHECK-NEXT: lastb w0, p0, z0.s
|
|
; CHECK-NEXT: ret
|
|
%b = extractelement <vscale x 4 x i32> %a, i32 %x
|
|
ret i32 %b
|
|
}
|
|
|
|
define i64 @test_lanex_2xi64(<vscale x 2 x i64> %a, i32 %x) #0 {
|
|
; CHECK-LABEL: test_lanex_2xi64:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: // kill: def $w0 killed $w0 def $x0
|
|
; CHECK-NEXT: sxtw x8, w0
|
|
; CHECK-NEXT: whilels p0.d, xzr, x8
|
|
; CHECK-NEXT: lastb x0, p0, z0.d
|
|
; CHECK-NEXT: ret
|
|
%b = extractelement <vscale x 2 x i64> %a, i32 %x
|
|
ret i64 %b
|
|
}
|
|
|
|
define half @test_lanex_8xf16(<vscale x 8 x half> %a, i32 %x) #0 {
|
|
; CHECK-LABEL: test_lanex_8xf16:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: // kill: def $w0 killed $w0 def $x0
|
|
; CHECK-NEXT: sxtw x8, w0
|
|
; CHECK-NEXT: whilels p0.h, xzr, x8
|
|
; CHECK-NEXT: lastb h0, p0, z0.h
|
|
; CHECK-NEXT: ret
|
|
%b = extractelement <vscale x 8 x half> %a, i32 %x
|
|
ret half %b
|
|
}
|
|
|
|
define half @test_lanex_4xf16(<vscale x 4 x half> %a, i32 %x) #0 {
|
|
; CHECK-LABEL: test_lanex_4xf16:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: // kill: def $w0 killed $w0 def $x0
|
|
; CHECK-NEXT: sxtw x8, w0
|
|
; CHECK-NEXT: whilels p0.s, xzr, x8
|
|
; CHECK-NEXT: lastb h0, p0, z0.h
|
|
; CHECK-NEXT: ret
|
|
%b = extractelement <vscale x 4 x half> %a, i32 %x
|
|
ret half %b
|
|
}
|
|
|
|
define half @test_lanex_2xf16(<vscale x 2 x half> %a, i32 %x) #0 {
|
|
; CHECK-LABEL: test_lanex_2xf16:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: // kill: def $w0 killed $w0 def $x0
|
|
; CHECK-NEXT: sxtw x8, w0
|
|
; CHECK-NEXT: whilels p0.d, xzr, x8
|
|
; CHECK-NEXT: lastb h0, p0, z0.h
|
|
; CHECK-NEXT: ret
|
|
%b = extractelement <vscale x 2 x half> %a, i32 %x
|
|
ret half %b
|
|
}
|
|
|
|
define float @test_lanex_4xf32(<vscale x 4 x float> %a, i32 %x) #0 {
|
|
; CHECK-LABEL: test_lanex_4xf32:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: // kill: def $w0 killed $w0 def $x0
|
|
; CHECK-NEXT: sxtw x8, w0
|
|
; CHECK-NEXT: whilels p0.s, xzr, x8
|
|
; CHECK-NEXT: lastb s0, p0, z0.s
|
|
; CHECK-NEXT: ret
|
|
%b = extractelement <vscale x 4 x float> %a, i32 %x
|
|
ret float %b
|
|
}
|
|
|
|
define float @test_lanex_2xf32(<vscale x 2 x float> %a, i32 %x) #0 {
|
|
; CHECK-LABEL: test_lanex_2xf32:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: // kill: def $w0 killed $w0 def $x0
|
|
; CHECK-NEXT: sxtw x8, w0
|
|
; CHECK-NEXT: whilels p0.d, xzr, x8
|
|
; CHECK-NEXT: lastb s0, p0, z0.s
|
|
; CHECK-NEXT: ret
|
|
%b = extractelement <vscale x 2 x float> %a, i32 %x
|
|
ret float %b
|
|
}
|
|
|
|
define double @test_lanex_2xf64(<vscale x 2 x double> %a, i32 %x) #0 {
|
|
; CHECK-LABEL: test_lanex_2xf64:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: // kill: def $w0 killed $w0 def $x0
|
|
; CHECK-NEXT: sxtw x8, w0
|
|
; CHECK-NEXT: whilels p0.d, xzr, x8
|
|
; CHECK-NEXT: lastb d0, p0, z0.d
|
|
; CHECK-NEXT: ret
|
|
%b = extractelement <vscale x 2 x double> %a, i32 %x
|
|
ret double %b
|
|
}
|
|
|
|
; Deliberately choose an index that is undefined
|
|
define i32 @test_undef_lane_4xi32(<vscale x 4 x i32> %a) #0 {
|
|
; CHECK-LABEL: test_undef_lane_4xi32:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: fmov w0, s0
|
|
; CHECK-NEXT: ret
|
|
%b = extractelement <vscale x 4 x i32> %a, i32 undef
|
|
ret i32 %b
|
|
}
|
|
|
|
define i8 @extract_of_insert_undef_16xi8(i8 %a) #0 {
|
|
; CHECK-LABEL: extract_of_insert_undef_16xi8:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: ret
|
|
%b = insertelement <vscale x 16 x i8> undef, i8 %a, i32 0
|
|
%c = extractelement <vscale x 16 x i8> %b, i32 0
|
|
ret i8 %c
|
|
}
|
|
|
|
define i8 @extract0_of_insert0_16xi8(<vscale x 16 x i8> %a, i8 %b) #0 {
|
|
; CHECK-LABEL: extract0_of_insert0_16xi8:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: ret
|
|
%c = insertelement <vscale x 16 x i8> %a, i8 %b, i32 0
|
|
%d = extractelement <vscale x 16 x i8> %c, i32 0
|
|
ret i8 %d
|
|
}
|
|
|
|
define i8 @extract64_of_insert64_16xi8(<vscale x 16 x i8> %a, i8 %b) #0 {
|
|
; CHECK-LABEL: extract64_of_insert64_16xi8:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: ret
|
|
%c = insertelement <vscale x 16 x i8> %a, i8 %b, i32 64
|
|
%d = extractelement <vscale x 16 x i8> %c, i32 64
|
|
ret i8 %d
|
|
}
|
|
|
|
define i8 @extract_of_insert_diff_lanes_16xi8(<vscale x 16 x i8> %a, i8 %b) #0 {
|
|
; CHECK-LABEL: extract_of_insert_diff_lanes_16xi8:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: umov w0, v0.b[3]
|
|
; CHECK-NEXT: ret
|
|
%c = insertelement <vscale x 16 x i8> %a, i8 %b, i32 0
|
|
%d = extractelement <vscale x 16 x i8> %c, i32 3
|
|
ret i8 %d
|
|
}
|
|
|
|
define i8 @test_lane0_zero_16xi8(<vscale x 16 x i8> %a) #0 {
|
|
; CHECK-LABEL: test_lane0_zero_16xi8:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: mov w0, wzr
|
|
; CHECK-NEXT: ret
|
|
%b = extractelement <vscale x 16 x i8> zeroinitializer, i32 0
|
|
ret i8 %b
|
|
}
|
|
|
|
; The DAG combiner should fold the extract of a splat to give element zero
|
|
; of the splat, i.e. %x. If the index is beyond the end of the scalable
|
|
; vector the result is undefined anyway.
|
|
define i64 @test_lanex_splat_2xi64(i64 %x, i32 %y) #0 {
|
|
; CHECK-LABEL: test_lanex_splat_2xi64:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: ret
|
|
%a = insertelement <vscale x 2 x i64> undef, i64 %x, i32 0
|
|
%b = shufflevector <vscale x 2 x i64> %a, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
|
|
%c = extractelement <vscale x 2 x i64> %b, i32 %y
|
|
ret i64 %c
|
|
}
|
|
|
|
define i1 @test_lane0_16xi1(<vscale x 16 x i1> %a) #0 {
|
|
; CHECK-LABEL: test_lane0_16xi1:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: mov z0.b, p0/z, #1 // =0x1
|
|
; CHECK-NEXT: fmov w8, s0
|
|
; CHECK-NEXT: and w0, w8, #0x1
|
|
; CHECK-NEXT: ret
|
|
%b = extractelement <vscale x 16 x i1> %a, i32 0
|
|
ret i1 %b
|
|
}
|
|
|
|
define i1 @test_lane9_8xi1(<vscale x 8 x i1> %a) #0 {
|
|
; CHECK-LABEL: test_lane9_8xi1:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: mov z0.h, p0/z, #1 // =0x1
|
|
; CHECK-NEXT: mov z0.h, z0.h[9]
|
|
; CHECK-NEXT: fmov w8, s0
|
|
; CHECK-NEXT: and w0, w8, #0x1
|
|
; CHECK-NEXT: ret
|
|
%b = extractelement <vscale x 8 x i1> %a, i32 9
|
|
ret i1 %b
|
|
}
|
|
|
|
define i1 @test_lanex_4xi1(<vscale x 4 x i1> %a, i32 %x) #0 {
|
|
; CHECK-LABEL: test_lanex_4xi1:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: // kill: def $w0 killed $w0 def $x0
|
|
; CHECK-NEXT: sxtw x8, w0
|
|
; CHECK-NEXT: mov z0.s, p0/z, #1 // =0x1
|
|
; CHECK-NEXT: whilels p0.s, xzr, x8
|
|
; CHECK-NEXT: lastb w8, p0, z0.s
|
|
; CHECK-NEXT: and w0, w8, #0x1
|
|
; CHECK-NEXT: ret
|
|
%b = extractelement <vscale x 4 x i1> %a, i32 %x
|
|
ret i1 %b
|
|
}
|
|
|
|
define i1 @test_lane4_2xi1(<vscale x 2 x i1> %a) #0 {
|
|
; CHECK-LABEL: test_lane4_2xi1:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: mov z0.d, p0/z, #1 // =0x1
|
|
; CHECK-NEXT: mov z0.d, z0.d[4]
|
|
; CHECK-NEXT: fmov x8, d0
|
|
; CHECK-NEXT: and w0, w8, #0x1
|
|
; CHECK-NEXT: ret
|
|
%b = extractelement <vscale x 2 x i1> %a, i32 4
|
|
ret i1 %b
|
|
}
|
|
|
|
attributes #0 = { "target-features"="+sve" }
|