We would like to start pushing -mcpu=generic towards enabling the set of
features that improves performance for some CPUs, without hurting any
others. A blend of the performance options hopefully beneficial to all
CPUs. The largest part of that is enabling in-order scheduling using the
Cortex-A55 schedule model. This is similar to the Arm backend change
from eecb353d0e which made -mcpu=generic perform in-order scheduling
using the cortex-a8 schedule model.
The idea is that in-order cpu's require the most help in instruction
scheduling, whereas out-of-order cpus can for the most part out-of-order
schedule around different codegen. Our benchmarking suggests that
hypothesis holds. When running on an in-order core this improved
performance by 3.8% geomean on a set of DSP workloads, 2% geomean on
some other embedded benchmark and between 1% and 1.8% on a set of
singlecore and multicore workloads, all running on a Cortex-A55 cluster.
On an out-of-order cpu the results are a lot more noisy but show flat
performance or an improvement. On the set of DSP and embedded
benchmarks, run on a Cortex-A78 there was a very noisy 1% speed
improvement. Using the most detailed results I could find, SPEC2006 runs
on a Neoverse N1 show a small increase in instruction count (+0.127%),
but a decrease in cycle counts (-0.155%, on average). The instruction
count is very low noise, the cycle count is more noisy with a 0.15%
decrease not being significant. SPEC2k17 shows a small decrease (-0.2%)
in instruction count leading to a -0.296% decrease in cycle count. These
results are within noise margins but tend to show a small improvement in
general.
When specifying an Apple target, clang will set "-target-cpu apple-a7"
on the command line, so should not be affected by this change when
running from clang. This also doesn't enable more runtime unrolling like
-mcpu=cortex-a55 does, only changing the schedule used.
A lot of existing tests have updated. This is a summary of the important
differences:
- Most changes are the same instructions in a different order.
- Sometimes this leads to very minor inefficiencies, such as requiring
an extra mov to move variables into r0/v0 for the return value of a test
function.
- misched-fusion.ll was no longer fusing the pairs of instructions it
should, as per D110561. I've changed the schedule used in the test
for now.
- neon-mla-mls.ll now uses "mul; sub" as opposed to "neg; mla" due to
the different latencies. This seems fine to me.
- Some SVE tests do not always remove movprfx where they did before due
to different register allocation giving different destructive forms.
- The tests argument-blocks-array-of-struct.ll and arm64-windows-calls.ll
produce two LDR where they previously produced an LDP due to
store-pair-suppress kicking in.
- arm64-ldp.ll and arm64-neon-copy.ll are missing pre/postinc on LPD.
- Some tests such as arm64-neon-mul-div.ll and
ragreedy-local-interval-cost.ll have more, less or just different
spilling.
- In aarch64_generated_funcs.ll.generated.expected one part of the
function is no longer outlined. Interestingly if I switch this to use
any other scheduled even less is outlined.
Some of these are expected to happen, such as differences in outlining
or register spilling. There will be places where these result in worse
codegen, places where they are better, with the SPEC instruction counts
suggesting it is not a decrease overall, on average.
Differential Revision: https://reviews.llvm.org/D110830
428 lines
11 KiB
LLVM
428 lines
11 KiB
LLVM
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
|
|
; RUN: llc < %s -verify-machineinstrs -O3 -mtriple=aarch64-unknown-unknown -enable-implicit-null-checks | FileCheck %s
|
|
|
|
; Basic test for implicit null check conversion - this is analogous to the
|
|
; file with the same name in the X86 tree, but adjusted to remove patterns
|
|
; related to memory folding of arithmetic (since aarch64 doesn't), and add
|
|
; a couple of aarch64 specific tests.
|
|
|
|
define i32 @imp_null_check_load_fallthrough(i32* %x) {
|
|
; CHECK-LABEL: imp_null_check_load_fallthrough:
|
|
; CHECK: // %bb.0: // %entry
|
|
; CHECK-NEXT: .Ltmp0:
|
|
; CHECK-NEXT: ldr w0, [x0] // on-fault: .LBB0_2
|
|
; CHECK-NEXT: // %bb.1: // %not_null
|
|
; CHECK-NEXT: ret
|
|
; CHECK-NEXT: .LBB0_2:
|
|
; CHECK-NEXT: mov w0, #42
|
|
; CHECK-NEXT: ret
|
|
entry:
|
|
%c = icmp eq i32* %x, null
|
|
br i1 %c, label %is_null, label %not_null, !make.implicit !0
|
|
|
|
not_null:
|
|
%t = load i32, i32* %x
|
|
ret i32 %t
|
|
|
|
is_null:
|
|
ret i32 42
|
|
}
|
|
|
|
|
|
define i32 @imp_null_check_load_reorder(i32* %x) {
|
|
; CHECK-LABEL: imp_null_check_load_reorder:
|
|
; CHECK: // %bb.0: // %entry
|
|
; CHECK-NEXT: .Ltmp1:
|
|
; CHECK-NEXT: ldr w0, [x0] // on-fault: .LBB1_2
|
|
; CHECK-NEXT: // %bb.1: // %not_null
|
|
; CHECK-NEXT: ret
|
|
; CHECK-NEXT: .LBB1_2:
|
|
; CHECK-NEXT: mov w0, #42
|
|
; CHECK-NEXT: ret
|
|
entry:
|
|
%c = icmp eq i32* %x, null
|
|
br i1 %c, label %is_null, label %not_null, !make.implicit !0
|
|
|
|
is_null:
|
|
ret i32 42
|
|
|
|
not_null:
|
|
%t = load i32, i32* %x
|
|
ret i32 %t
|
|
}
|
|
|
|
define i32 @imp_null_check_unordered_load(i32* %x) {
|
|
; CHECK-LABEL: imp_null_check_unordered_load:
|
|
; CHECK: // %bb.0: // %entry
|
|
; CHECK-NEXT: .Ltmp2:
|
|
; CHECK-NEXT: ldr w0, [x0] // on-fault: .LBB2_2
|
|
; CHECK-NEXT: // %bb.1: // %not_null
|
|
; CHECK-NEXT: ret
|
|
; CHECK-NEXT: .LBB2_2:
|
|
; CHECK-NEXT: mov w0, #42
|
|
; CHECK-NEXT: ret
|
|
entry:
|
|
%c = icmp eq i32* %x, null
|
|
br i1 %c, label %is_null, label %not_null, !make.implicit !0
|
|
|
|
is_null:
|
|
ret i32 42
|
|
|
|
not_null:
|
|
%t = load atomic i32, i32* %x unordered, align 4
|
|
ret i32 %t
|
|
}
|
|
|
|
|
|
; TODO: Can be converted into implicit check.
|
|
;; Probably could be implicit, but we're conservative for now
|
|
define i32 @imp_null_check_seq_cst_load(i32* %x) {
|
|
; CHECK-LABEL: imp_null_check_seq_cst_load:
|
|
; CHECK: // %bb.0: // %entry
|
|
; CHECK-NEXT: cbz x0, .LBB3_2
|
|
; CHECK-NEXT: // %bb.1: // %not_null
|
|
; CHECK-NEXT: ldar w0, [x0]
|
|
; CHECK-NEXT: ret
|
|
; CHECK-NEXT: .LBB3_2:
|
|
; CHECK-NEXT: mov w0, #42
|
|
; CHECK-NEXT: ret
|
|
entry:
|
|
%c = icmp eq i32* %x, null
|
|
br i1 %c, label %is_null, label %not_null, !make.implicit !0
|
|
|
|
is_null:
|
|
ret i32 42
|
|
|
|
not_null:
|
|
%t = load atomic i32, i32* %x seq_cst, align 4
|
|
ret i32 %t
|
|
}
|
|
|
|
;; Might be memory mapped IO, so can't rely on fault behavior
|
|
define i32 @imp_null_check_volatile_load(i32* %x) {
|
|
; CHECK-LABEL: imp_null_check_volatile_load:
|
|
; CHECK: // %bb.0: // %entry
|
|
; CHECK-NEXT: cbz x0, .LBB4_2
|
|
; CHECK-NEXT: // %bb.1: // %not_null
|
|
; CHECK-NEXT: ldr w0, [x0]
|
|
; CHECK-NEXT: ret
|
|
; CHECK-NEXT: .LBB4_2:
|
|
; CHECK-NEXT: mov w0, #42
|
|
; CHECK-NEXT: ret
|
|
entry:
|
|
%c = icmp eq i32* %x, null
|
|
br i1 %c, label %is_null, label %not_null, !make.implicit !0
|
|
|
|
is_null:
|
|
ret i32 42
|
|
|
|
not_null:
|
|
%t = load volatile i32, i32* %x, align 4
|
|
ret i32 %t
|
|
}
|
|
|
|
|
|
define i8 @imp_null_check_load_i8(i8* %x) {
|
|
; CHECK-LABEL: imp_null_check_load_i8:
|
|
; CHECK: // %bb.0: // %entry
|
|
; CHECK-NEXT: .Ltmp3:
|
|
; CHECK-NEXT: ldrb w0, [x0] // on-fault: .LBB5_2
|
|
; CHECK-NEXT: // %bb.1: // %not_null
|
|
; CHECK-NEXT: ret
|
|
; CHECK-NEXT: .LBB5_2:
|
|
; CHECK-NEXT: mov w0, #42
|
|
; CHECK-NEXT: ret
|
|
entry:
|
|
%c = icmp eq i8* %x, null
|
|
br i1 %c, label %is_null, label %not_null, !make.implicit !0
|
|
|
|
is_null:
|
|
ret i8 42
|
|
|
|
not_null:
|
|
%t = load i8, i8* %x
|
|
ret i8 %t
|
|
}
|
|
|
|
define i256 @imp_null_check_load_i256(i256* %x) {
|
|
; CHECK-LABEL: imp_null_check_load_i256:
|
|
; CHECK: // %bb.0: // %entry
|
|
; CHECK-NEXT: cbz x0, .LBB6_2
|
|
; CHECK-NEXT: // %bb.1: // %not_null
|
|
; CHECK-NEXT: ldp x2, x3, [x0, #16]
|
|
; CHECK-NEXT: ldp x0, x1, [x0]
|
|
; CHECK-NEXT: ret
|
|
; CHECK-NEXT: .LBB6_2:
|
|
; CHECK-NEXT: mov x1, xzr
|
|
; CHECK-NEXT: mov x2, xzr
|
|
; CHECK-NEXT: mov x3, xzr
|
|
; CHECK-NEXT: mov w0, #42
|
|
; CHECK-NEXT: ret
|
|
entry:
|
|
%c = icmp eq i256* %x, null
|
|
br i1 %c, label %is_null, label %not_null, !make.implicit !0
|
|
|
|
is_null:
|
|
ret i256 42
|
|
|
|
not_null:
|
|
%t = load i256, i256* %x
|
|
ret i256 %t
|
|
}
|
|
|
|
|
|
|
|
define i32 @imp_null_check_gep_load(i32* %x) {
|
|
; CHECK-LABEL: imp_null_check_gep_load:
|
|
; CHECK: // %bb.0: // %entry
|
|
; CHECK-NEXT: .Ltmp4:
|
|
; CHECK-NEXT: ldr w0, [x0, #128] // on-fault: .LBB7_2
|
|
; CHECK-NEXT: // %bb.1: // %not_null
|
|
; CHECK-NEXT: ret
|
|
; CHECK-NEXT: .LBB7_2:
|
|
; CHECK-NEXT: mov w0, #42
|
|
; CHECK-NEXT: ret
|
|
entry:
|
|
%c = icmp eq i32* %x, null
|
|
br i1 %c, label %is_null, label %not_null, !make.implicit !0
|
|
|
|
is_null:
|
|
ret i32 42
|
|
|
|
not_null:
|
|
%x.gep = getelementptr i32, i32* %x, i32 32
|
|
%t = load i32, i32* %x.gep
|
|
ret i32 %t
|
|
}
|
|
|
|
define i32 @imp_null_check_add_result(i32* %x, i32 %p) {
|
|
; CHECK-LABEL: imp_null_check_add_result:
|
|
; CHECK: // %bb.0: // %entry
|
|
; CHECK-NEXT: .Ltmp5:
|
|
; CHECK-NEXT: ldr w8, [x0] // on-fault: .LBB8_2
|
|
; CHECK-NEXT: // %bb.1: // %not_null
|
|
; CHECK-NEXT: add w0, w8, w1
|
|
; CHECK-NEXT: ret
|
|
; CHECK-NEXT: .LBB8_2:
|
|
; CHECK-NEXT: mov w0, #42
|
|
; CHECK-NEXT: ret
|
|
entry:
|
|
%c = icmp eq i32* %x, null
|
|
br i1 %c, label %is_null, label %not_null, !make.implicit !0
|
|
|
|
is_null:
|
|
ret i32 42
|
|
|
|
not_null:
|
|
%t = load i32, i32* %x
|
|
%p1 = add i32 %t, %p
|
|
ret i32 %p1
|
|
}
|
|
|
|
; Can hoist over a potential faulting instruction as long as we don't
|
|
; change the conditions under which the instruction faults.
|
|
define i32 @imp_null_check_hoist_over_udiv(i32* %x, i32 %a, i32 %b) {
|
|
; CHECK-LABEL: imp_null_check_hoist_over_udiv:
|
|
; CHECK: // %bb.0: // %entry
|
|
; CHECK-NEXT: cbz x0, .LBB9_2
|
|
; CHECK-NEXT: // %bb.1: // %not_null
|
|
; CHECK-NEXT: udiv w8, w1, w2
|
|
; CHECK-NEXT: ldr w9, [x0]
|
|
; CHECK-NEXT: add w0, w9, w8
|
|
; CHECK-NEXT: ret
|
|
; CHECK-NEXT: .LBB9_2:
|
|
; CHECK-NEXT: mov w0, #42
|
|
; CHECK-NEXT: ret
|
|
entry:
|
|
%c = icmp eq i32* %x, null
|
|
br i1 %c, label %is_null, label %not_null, !make.implicit !0
|
|
|
|
is_null:
|
|
ret i32 42
|
|
|
|
not_null:
|
|
%p1 = udiv i32 %a, %b
|
|
%t = load i32, i32* %x
|
|
%res = add i32 %t, %p1
|
|
ret i32 %res
|
|
}
|
|
|
|
|
|
; TODO: We should be able to hoist this - we can on x86, why isn't this
|
|
; working for aarch64? Aliasing?
|
|
define i32 @imp_null_check_hoist_over_unrelated_load(i32* %x, i32* %y, i32* %z) {
|
|
; CHECK-LABEL: imp_null_check_hoist_over_unrelated_load:
|
|
; CHECK: // %bb.0: // %entry
|
|
; CHECK-NEXT: cbz x0, .LBB10_2
|
|
; CHECK-NEXT: // %bb.1: // %not_null
|
|
; CHECK-NEXT: ldr w8, [x1]
|
|
; CHECK-NEXT: ldr w0, [x0]
|
|
; CHECK-NEXT: str w8, [x2]
|
|
; CHECK-NEXT: ret
|
|
; CHECK-NEXT: .LBB10_2:
|
|
; CHECK-NEXT: mov w0, #42
|
|
; CHECK-NEXT: ret
|
|
entry:
|
|
%c = icmp eq i32* %x, null
|
|
br i1 %c, label %is_null, label %not_null, !make.implicit !0
|
|
|
|
is_null:
|
|
ret i32 42
|
|
|
|
not_null:
|
|
%t0 = load i32, i32* %y
|
|
%t1 = load i32, i32* %x
|
|
store i32 %t0, i32* %z
|
|
ret i32 %t1
|
|
}
|
|
|
|
define i32 @imp_null_check_gep_load_with_use_dep(i32* %x, i32 %a) {
|
|
; CHECK-LABEL: imp_null_check_gep_load_with_use_dep:
|
|
; CHECK: // %bb.0: // %entry
|
|
; CHECK-NEXT: .Ltmp6:
|
|
; CHECK-NEXT: ldr w8, [x0] // on-fault: .LBB11_2
|
|
; CHECK-NEXT: // %bb.1: // %not_null
|
|
; CHECK-NEXT: add w9, w0, w1
|
|
; CHECK-NEXT: add w8, w9, w8
|
|
; CHECK-NEXT: add w0, w8, #4
|
|
; CHECK-NEXT: ret
|
|
; CHECK-NEXT: .LBB11_2:
|
|
; CHECK-NEXT: mov w0, #42
|
|
; CHECK-NEXT: ret
|
|
entry:
|
|
%c = icmp eq i32* %x, null
|
|
br i1 %c, label %is_null, label %not_null, !make.implicit !0
|
|
|
|
is_null:
|
|
ret i32 42
|
|
|
|
not_null:
|
|
%x.loc = getelementptr i32, i32* %x, i32 1
|
|
%y = ptrtoint i32* %x.loc to i32
|
|
%b = add i32 %a, %y
|
|
%t = load i32, i32* %x
|
|
%z = add i32 %t, %b
|
|
ret i32 %z
|
|
}
|
|
|
|
;; TODO: We could handle this case as we can lift the fence into the
|
|
;; previous block before the conditional without changing behavior.
|
|
define i32 @imp_null_check_load_fence1(i32* %x) {
|
|
; CHECK-LABEL: imp_null_check_load_fence1:
|
|
; CHECK: // %bb.0: // %entry
|
|
; CHECK-NEXT: cbz x0, .LBB12_2
|
|
; CHECK-NEXT: // %bb.1: // %not_null
|
|
; CHECK-NEXT: dmb ishld
|
|
; CHECK-NEXT: ldr w0, [x0]
|
|
; CHECK-NEXT: ret
|
|
; CHECK-NEXT: .LBB12_2:
|
|
; CHECK-NEXT: mov w0, #42
|
|
; CHECK-NEXT: ret
|
|
entry:
|
|
%c = icmp eq i32* %x, null
|
|
br i1 %c, label %is_null, label %not_null, !make.implicit !0
|
|
|
|
is_null:
|
|
ret i32 42
|
|
|
|
not_null:
|
|
fence acquire
|
|
%t = load i32, i32* %x
|
|
ret i32 %t
|
|
}
|
|
|
|
;; TODO: We could handle this case as we can lift the fence into the
|
|
;; previous block before the conditional without changing behavior.
|
|
define i32 @imp_null_check_load_fence2(i32* %x) {
|
|
; CHECK-LABEL: imp_null_check_load_fence2:
|
|
; CHECK: // %bb.0: // %entry
|
|
; CHECK-NEXT: cbz x0, .LBB13_2
|
|
; CHECK-NEXT: // %bb.1: // %not_null
|
|
; CHECK-NEXT: dmb ish
|
|
; CHECK-NEXT: ldr w0, [x0]
|
|
; CHECK-NEXT: ret
|
|
; CHECK-NEXT: .LBB13_2:
|
|
; CHECK-NEXT: mov w0, #42
|
|
; CHECK-NEXT: ret
|
|
entry:
|
|
%c = icmp eq i32* %x, null
|
|
br i1 %c, label %is_null, label %not_null, !make.implicit !0
|
|
|
|
is_null:
|
|
ret i32 42
|
|
|
|
not_null:
|
|
fence seq_cst
|
|
%t = load i32, i32* %x
|
|
ret i32 %t
|
|
}
|
|
|
|
; TODO: We can fold to implicit null here, not sure why this isn't working
|
|
define void @imp_null_check_store(i32* %x) {
|
|
; CHECK-LABEL: imp_null_check_store:
|
|
; CHECK: // %bb.0: // %entry
|
|
; CHECK-NEXT: cbz x0, .LBB14_2
|
|
; CHECK-NEXT: // %bb.1: // %not_null
|
|
; CHECK-NEXT: mov w8, #1
|
|
; CHECK-NEXT: str w8, [x0]
|
|
; CHECK-NEXT: .LBB14_2: // %common.ret
|
|
; CHECK-NEXT: ret
|
|
entry:
|
|
%c = icmp eq i32* %x, null
|
|
br i1 %c, label %is_null, label %not_null, !make.implicit !0
|
|
|
|
is_null:
|
|
ret void
|
|
|
|
not_null:
|
|
store i32 1, i32* %x
|
|
ret void
|
|
}
|
|
|
|
;; TODO: can be implicit
|
|
define void @imp_null_check_unordered_store(i32* %x) {
|
|
; CHECK-LABEL: imp_null_check_unordered_store:
|
|
; CHECK: // %bb.0: // %entry
|
|
; CHECK-NEXT: cbz x0, .LBB15_2
|
|
; CHECK-NEXT: // %bb.1: // %not_null
|
|
; CHECK-NEXT: mov w8, #1
|
|
; CHECK-NEXT: str w8, [x0]
|
|
; CHECK-NEXT: .LBB15_2: // %common.ret
|
|
; CHECK-NEXT: ret
|
|
entry:
|
|
%c = icmp eq i32* %x, null
|
|
br i1 %c, label %is_null, label %not_null, !make.implicit !0
|
|
|
|
is_null:
|
|
ret void
|
|
|
|
not_null:
|
|
store atomic i32 1, i32* %x unordered, align 4
|
|
ret void
|
|
}
|
|
|
|
define i32 @imp_null_check_neg_gep_load(i32* %x) {
|
|
; CHECK-LABEL: imp_null_check_neg_gep_load:
|
|
; CHECK: // %bb.0: // %entry
|
|
; CHECK-NEXT: .Ltmp7:
|
|
; CHECK-NEXT: ldur w0, [x0, #-128] // on-fault: .LBB16_2
|
|
; CHECK-NEXT: // %bb.1: // %not_null
|
|
; CHECK-NEXT: ret
|
|
; CHECK-NEXT: .LBB16_2:
|
|
; CHECK-NEXT: mov w0, #42
|
|
; CHECK-NEXT: ret
|
|
entry:
|
|
%c = icmp eq i32* %x, null
|
|
br i1 %c, label %is_null, label %not_null, !make.implicit !0
|
|
|
|
is_null:
|
|
ret i32 42
|
|
|
|
not_null:
|
|
%x.gep = getelementptr i32, i32* %x, i32 -32
|
|
%t = load i32, i32* %x.gep
|
|
ret i32 %t
|
|
}
|
|
|
|
!0 = !{}
|