Files
clang-p2996/llvm/test/CodeGen/AArch64/arm64-memset-inline.ll
David Green adec922361 [AArch64] Make -mcpu=generic schedule for an in-order core
We would like to start pushing -mcpu=generic towards enabling the set of
features that improves performance for some CPUs, without hurting any
others. A blend of the performance options hopefully beneficial to all
CPUs. The largest part of that is enabling in-order scheduling using the
Cortex-A55 schedule model. This is similar to the Arm backend change
from eecb353d0e which made -mcpu=generic perform in-order scheduling
using the cortex-a8 schedule model.

The idea is that in-order cpu's require the most help in instruction
scheduling, whereas out-of-order cpus can for the most part out-of-order
schedule around different codegen. Our benchmarking suggests that
hypothesis holds. When running on an in-order core this improved
performance by 3.8% geomean on a set of DSP workloads, 2% geomean on
some other embedded benchmark and between 1% and 1.8% on a set of
singlecore and multicore workloads, all running on a Cortex-A55 cluster.

On an out-of-order cpu the results are a lot more noisy but show flat
performance or an improvement. On the set of DSP and embedded
benchmarks, run on a Cortex-A78 there was a very noisy 1% speed
improvement. Using the most detailed results I could find, SPEC2006 runs
on a Neoverse N1 show a small increase in instruction count (+0.127%),
but a decrease in cycle counts (-0.155%, on average). The instruction
count is very low noise, the cycle count is more noisy with a 0.15%
decrease not being significant. SPEC2k17 shows a small decrease (-0.2%)
in instruction count leading to a -0.296% decrease in cycle count. These
results are within noise margins but tend to show a small improvement in
general.

When specifying an Apple target, clang will set "-target-cpu apple-a7"
on the command line, so should not be affected by this change when
running from clang. This also doesn't enable more runtime unrolling like
-mcpu=cortex-a55 does, only changing the schedule used.

A lot of existing tests have updated. This is a summary of the important
differences:
 - Most changes are the same instructions in a different order.
 - Sometimes this leads to very minor inefficiencies, such as requiring
   an extra mov to move variables into r0/v0 for the return value of a test
   function.
 - misched-fusion.ll was no longer fusing the pairs of instructions it
   should, as per D110561. I've changed the schedule used in the test
   for now.
 - neon-mla-mls.ll now uses "mul; sub" as opposed to "neg; mla" due to
   the different latencies. This seems fine to me.
 - Some SVE tests do not always remove movprfx where they did before due
   to different register allocation giving different destructive forms.
 - The tests argument-blocks-array-of-struct.ll and arm64-windows-calls.ll
   produce two LDR where they previously produced an LDP due to
   store-pair-suppress kicking in.
 - arm64-ldp.ll and arm64-neon-copy.ll are missing pre/postinc on LPD.
 - Some tests such as arm64-neon-mul-div.ll and
   ragreedy-local-interval-cost.ll have more, less or just different
   spilling.
 - In aarch64_generated_funcs.ll.generated.expected one part of the
   function is no longer outlined. Interestingly if I switch this to use
   any other scheduled even less is outlined.

Some of these are expected to happen, such as differences in outlining
or register spilling. There will be places where these result in worse
codegen, places where they are better, with the SPEC instruction counts
suggesting it is not a decrease overall, on average.

Differential Revision: https://reviews.llvm.org/D110830
2021-10-09 15:58:31 +01:00

593 lines
19 KiB
LLVM

; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc < %s -mtriple=arm64-eabi | FileCheck %s
define void @bzero_4_heap(i8* nocapture %c) {
; CHECK-LABEL: bzero_4_heap:
; CHECK: // %bb.0:
; CHECK-NEXT: str wzr, [x0]
; CHECK-NEXT: ret
call void @llvm.memset.p0i8.i64(i8* align 4 %c, i8 0, i64 4, i1 false)
ret void
}
define void @bzero_8_heap(i8* nocapture %c) {
; CHECK-LABEL: bzero_8_heap:
; CHECK: // %bb.0:
; CHECK-NEXT: str xzr, [x0]
; CHECK-NEXT: ret
call void @llvm.memset.p0i8.i64(i8* align 8 %c, i8 0, i64 8, i1 false)
ret void
}
define void @bzero_12_heap(i8* nocapture %c) {
; CHECK-LABEL: bzero_12_heap:
; CHECK: // %bb.0:
; CHECK-NEXT: str wzr, [x0, #8]
; CHECK-NEXT: str xzr, [x0]
; CHECK-NEXT: ret
call void @llvm.memset.p0i8.i64(i8* align 8 %c, i8 0, i64 12, i1 false)
ret void
}
define void @bzero_16_heap(i8* nocapture %c) {
; CHECK-LABEL: bzero_16_heap:
; CHECK: // %bb.0:
; CHECK-NEXT: stp xzr, xzr, [x0]
; CHECK-NEXT: ret
call void @llvm.memset.p0i8.i64(i8* align 8 %c, i8 0, i64 16, i1 false)
ret void
}
define void @bzero_32_heap(i8* nocapture %c) {
; CHECK-LABEL: bzero_32_heap:
; CHECK: // %bb.0:
; CHECK-NEXT: movi v0.2d, #0000000000000000
; CHECK-NEXT: stp q0, q0, [x0]
; CHECK-NEXT: ret
call void @llvm.memset.p0i8.i64(i8* align 8 %c, i8 0, i64 32, i1 false)
ret void
}
define void @bzero_64_heap(i8* nocapture %c) {
; CHECK-LABEL: bzero_64_heap:
; CHECK: // %bb.0:
; CHECK-NEXT: movi v0.2d, #0000000000000000
; CHECK-NEXT: stp q0, q0, [x0, #32]
; CHECK-NEXT: stp q0, q0, [x0]
; CHECK-NEXT: ret
call void @llvm.memset.p0i8.i64(i8* align 8 %c, i8 0, i64 64, i1 false)
ret void
}
define void @bzero_4_stack() {
; CHECK-LABEL: bzero_4_stack:
; CHECK: // %bb.0:
; CHECK-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill
; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: .cfi_offset w30, -16
; CHECK-NEXT: add x0, sp, #12
; CHECK-NEXT: str wzr, [sp, #12]
; CHECK-NEXT: bl something
; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload
; CHECK-NEXT: ret
%buf = alloca [4 x i8], align 1
%cast = bitcast [4 x i8]* %buf to i8*
call void @llvm.memset.p0i8.i32(i8* %cast, i8 0, i32 4, i1 false)
call void @something(i8* %cast)
ret void
}
define void @bzero_8_stack() {
; CHECK-LABEL: bzero_8_stack:
; CHECK: // %bb.0:
; CHECK-NEXT: stp x30, xzr, [sp, #-16]! // 8-byte Folded Spill
; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: .cfi_offset w30, -16
; CHECK-NEXT: add x0, sp, #8
; CHECK-NEXT: bl something
; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload
; CHECK-NEXT: ret
%buf = alloca [8 x i8], align 1
%cast = bitcast [8 x i8]* %buf to i8*
call void @llvm.memset.p0i8.i32(i8* %cast, i8 0, i32 8, i1 false)
call void @something(i8* %cast)
ret void
}
define void @bzero_12_stack() {
; CHECK-LABEL: bzero_12_stack:
; CHECK: // %bb.0:
; CHECK-NEXT: sub sp, sp, #32
; CHECK-NEXT: str x30, [sp, #16] // 8-byte Folded Spill
; CHECK-NEXT: .cfi_def_cfa_offset 32
; CHECK-NEXT: .cfi_offset w30, -16
; CHECK-NEXT: mov x0, sp
; CHECK-NEXT: str wzr, [sp, #8]
; CHECK-NEXT: str xzr, [sp]
; CHECK-NEXT: bl something
; CHECK-NEXT: ldr x30, [sp, #16] // 8-byte Folded Reload
; CHECK-NEXT: add sp, sp, #32
; CHECK-NEXT: ret
%buf = alloca [12 x i8], align 1
%cast = bitcast [12 x i8]* %buf to i8*
call void @llvm.memset.p0i8.i32(i8* %cast, i8 0, i32 12, i1 false)
call void @something(i8* %cast)
ret void
}
define void @bzero_16_stack() {
; CHECK-LABEL: bzero_16_stack:
; CHECK: // %bb.0:
; CHECK-NEXT: sub sp, sp, #32
; CHECK-NEXT: stp xzr, x30, [sp, #8] // 8-byte Folded Spill
; CHECK-NEXT: .cfi_def_cfa_offset 32
; CHECK-NEXT: .cfi_offset w30, -16
; CHECK-NEXT: mov x0, sp
; CHECK-NEXT: str xzr, [sp]
; CHECK-NEXT: bl something
; CHECK-NEXT: ldr x30, [sp, #16] // 8-byte Folded Reload
; CHECK-NEXT: add sp, sp, #32
; CHECK-NEXT: ret
%buf = alloca [16 x i8], align 1
%cast = bitcast [16 x i8]* %buf to i8*
call void @llvm.memset.p0i8.i32(i8* %cast, i8 0, i32 16, i1 false)
call void @something(i8* %cast)
ret void
}
define void @bzero_20_stack() {
; CHECK-LABEL: bzero_20_stack:
; CHECK: // %bb.0:
; CHECK-NEXT: sub sp, sp, #48
; CHECK-NEXT: str x30, [sp, #32] // 8-byte Folded Spill
; CHECK-NEXT: .cfi_def_cfa_offset 48
; CHECK-NEXT: .cfi_offset w30, -16
; CHECK-NEXT: add x0, sp, #8
; CHECK-NEXT: stp xzr, xzr, [sp, #8]
; CHECK-NEXT: str wzr, [sp, #24]
; CHECK-NEXT: bl something
; CHECK-NEXT: ldr x30, [sp, #32] // 8-byte Folded Reload
; CHECK-NEXT: add sp, sp, #48
; CHECK-NEXT: ret
%buf = alloca [20 x i8], align 1
%cast = bitcast [20 x i8]* %buf to i8*
call void @llvm.memset.p0i8.i32(i8* %cast, i8 0, i32 20, i1 false)
call void @something(i8* %cast)
ret void
}
define void @bzero_26_stack() {
; CHECK-LABEL: bzero_26_stack:
; CHECK: // %bb.0:
; CHECK-NEXT: sub sp, sp, #48
; CHECK-NEXT: str x30, [sp, #32] // 8-byte Folded Spill
; CHECK-NEXT: .cfi_def_cfa_offset 48
; CHECK-NEXT: .cfi_offset w30, -16
; CHECK-NEXT: mov x0, sp
; CHECK-NEXT: stp xzr, xzr, [sp]
; CHECK-NEXT: strh wzr, [sp, #24]
; CHECK-NEXT: str xzr, [sp, #16]
; CHECK-NEXT: bl something
; CHECK-NEXT: ldr x30, [sp, #32] // 8-byte Folded Reload
; CHECK-NEXT: add sp, sp, #48
; CHECK-NEXT: ret
%buf = alloca [26 x i8], align 1
%cast = bitcast [26 x i8]* %buf to i8*
call void @llvm.memset.p0i8.i32(i8* %cast, i8 0, i32 26, i1 false)
call void @something(i8* %cast)
ret void
}
define void @bzero_32_stack() {
; CHECK-LABEL: bzero_32_stack:
; CHECK: // %bb.0:
; CHECK-NEXT: sub sp, sp, #48
; CHECK-NEXT: str x30, [sp, #32] // 8-byte Folded Spill
; CHECK-NEXT: .cfi_def_cfa_offset 48
; CHECK-NEXT: .cfi_offset w30, -16
; CHECK-NEXT: movi v0.2d, #0000000000000000
; CHECK-NEXT: mov x0, sp
; CHECK-NEXT: stp q0, q0, [sp]
; CHECK-NEXT: bl something
; CHECK-NEXT: ldr x30, [sp, #32] // 8-byte Folded Reload
; CHECK-NEXT: add sp, sp, #48
; CHECK-NEXT: ret
%buf = alloca [32 x i8], align 1
%cast = bitcast [32 x i8]* %buf to i8*
call void @llvm.memset.p0i8.i32(i8* %cast, i8 0, i32 32, i1 false)
call void @something(i8* %cast)
ret void
}
define void @bzero_40_stack() {
; CHECK-LABEL: bzero_40_stack:
; CHECK: // %bb.0:
; CHECK-NEXT: sub sp, sp, #64
; CHECK-NEXT: str x30, [sp, #48] // 8-byte Folded Spill
; CHECK-NEXT: .cfi_def_cfa_offset 64
; CHECK-NEXT: .cfi_offset w30, -16
; CHECK-NEXT: movi v0.2d, #0000000000000000
; CHECK-NEXT: mov x0, sp
; CHECK-NEXT: str xzr, [sp, #32]
; CHECK-NEXT: stp q0, q0, [sp]
; CHECK-NEXT: bl something
; CHECK-NEXT: ldr x30, [sp, #48] // 8-byte Folded Reload
; CHECK-NEXT: add sp, sp, #64
; CHECK-NEXT: ret
%buf = alloca [40 x i8], align 1
%cast = bitcast [40 x i8]* %buf to i8*
call void @llvm.memset.p0i8.i32(i8* %cast, i8 0, i32 40, i1 false)
call void @something(i8* %cast)
ret void
}
define void @bzero_64_stack() {
; CHECK-LABEL: bzero_64_stack:
; CHECK: // %bb.0:
; CHECK-NEXT: sub sp, sp, #80
; CHECK-NEXT: str x30, [sp, #64] // 8-byte Folded Spill
; CHECK-NEXT: .cfi_def_cfa_offset 80
; CHECK-NEXT: .cfi_offset w30, -16
; CHECK-NEXT: movi v0.2d, #0000000000000000
; CHECK-NEXT: mov x0, sp
; CHECK-NEXT: stp q0, q0, [sp, #32]
; CHECK-NEXT: stp q0, q0, [sp]
; CHECK-NEXT: bl something
; CHECK-NEXT: ldr x30, [sp, #64] // 8-byte Folded Reload
; CHECK-NEXT: add sp, sp, #80
; CHECK-NEXT: ret
%buf = alloca [64 x i8], align 1
%cast = bitcast [64 x i8]* %buf to i8*
call void @llvm.memset.p0i8.i32(i8* %cast, i8 0, i32 64, i1 false)
call void @something(i8* %cast)
ret void
}
define void @bzero_72_stack() {
; CHECK-LABEL: bzero_72_stack:
; CHECK: // %bb.0:
; CHECK-NEXT: sub sp, sp, #96
; CHECK-NEXT: str x30, [sp, #80] // 8-byte Folded Spill
; CHECK-NEXT: .cfi_def_cfa_offset 96
; CHECK-NEXT: .cfi_offset w30, -16
; CHECK-NEXT: movi v0.2d, #0000000000000000
; CHECK-NEXT: mov x0, sp
; CHECK-NEXT: str xzr, [sp, #64]
; CHECK-NEXT: stp q0, q0, [sp, #32]
; CHECK-NEXT: stp q0, q0, [sp]
; CHECK-NEXT: bl something
; CHECK-NEXT: ldr x30, [sp, #80] // 8-byte Folded Reload
; CHECK-NEXT: add sp, sp, #96
; CHECK-NEXT: ret
%buf = alloca [72 x i8], align 1
%cast = bitcast [72 x i8]* %buf to i8*
call void @llvm.memset.p0i8.i32(i8* %cast, i8 0, i32 72, i1 false)
call void @something(i8* %cast)
ret void
}
define void @bzero_128_stack() {
; CHECK-LABEL: bzero_128_stack:
; CHECK: // %bb.0:
; CHECK-NEXT: sub sp, sp, #144
; CHECK-NEXT: str x30, [sp, #128] // 8-byte Folded Spill
; CHECK-NEXT: .cfi_def_cfa_offset 144
; CHECK-NEXT: .cfi_offset w30, -16
; CHECK-NEXT: movi v0.2d, #0000000000000000
; CHECK-NEXT: mov x0, sp
; CHECK-NEXT: stp q0, q0, [sp, #96]
; CHECK-NEXT: stp q0, q0, [sp, #64]
; CHECK-NEXT: stp q0, q0, [sp, #32]
; CHECK-NEXT: stp q0, q0, [sp]
; CHECK-NEXT: bl something
; CHECK-NEXT: ldr x30, [sp, #128] // 8-byte Folded Reload
; CHECK-NEXT: add sp, sp, #144
; CHECK-NEXT: ret
%buf = alloca [128 x i8], align 1
%cast = bitcast [128 x i8]* %buf to i8*
call void @llvm.memset.p0i8.i32(i8* %cast, i8 0, i32 128, i1 false)
call void @something(i8* %cast)
ret void
}
define void @bzero_256_stack() {
; CHECK-LABEL: bzero_256_stack:
; CHECK: // %bb.0:
; CHECK-NEXT: sub sp, sp, #272
; CHECK-NEXT: stp x29, x30, [sp, #256] // 16-byte Folded Spill
; CHECK-NEXT: .cfi_def_cfa_offset 272
; CHECK-NEXT: .cfi_offset w30, -8
; CHECK-NEXT: .cfi_offset w29, -16
; CHECK-NEXT: movi v0.2d, #0000000000000000
; CHECK-NEXT: mov x0, sp
; CHECK-NEXT: stp q0, q0, [sp, #224]
; CHECK-NEXT: stp q0, q0, [sp, #192]
; CHECK-NEXT: stp q0, q0, [sp, #160]
; CHECK-NEXT: stp q0, q0, [sp, #128]
; CHECK-NEXT: stp q0, q0, [sp, #96]
; CHECK-NEXT: stp q0, q0, [sp, #64]
; CHECK-NEXT: stp q0, q0, [sp, #32]
; CHECK-NEXT: stp q0, q0, [sp]
; CHECK-NEXT: bl something
; CHECK-NEXT: ldp x29, x30, [sp, #256] // 16-byte Folded Reload
; CHECK-NEXT: add sp, sp, #272
; CHECK-NEXT: ret
%buf = alloca [256 x i8], align 1
%cast = bitcast [256 x i8]* %buf to i8*
call void @llvm.memset.p0i8.i32(i8* %cast, i8 0, i32 256, i1 false)
call void @something(i8* %cast)
ret void
}
define void @memset_4_stack() {
; CHECK-LABEL: memset_4_stack:
; CHECK: // %bb.0:
; CHECK-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill
; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: .cfi_offset w30, -16
; CHECK-NEXT: mov w8, #-1431655766
; CHECK-NEXT: add x0, sp, #12
; CHECK-NEXT: str w8, [sp, #12]
; CHECK-NEXT: bl something
; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload
; CHECK-NEXT: ret
%buf = alloca [4 x i8], align 1
%cast = bitcast [4 x i8]* %buf to i8*
call void @llvm.memset.p0i8.i32(i8* %cast, i8 -86, i32 4, i1 false)
call void @something(i8* %cast)
ret void
}
define void @memset_8_stack() {
; CHECK-LABEL: memset_8_stack:
; CHECK: // %bb.0:
; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: .cfi_offset w30, -16
; CHECK-NEXT: mov x8, #-6148914691236517206
; CHECK-NEXT: stp x30, x8, [sp, #-16]! // 8-byte Folded Spill
; CHECK-NEXT: add x0, sp, #8
; CHECK-NEXT: bl something
; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload
; CHECK-NEXT: ret
%buf = alloca [8 x i8], align 1
%cast = bitcast [8 x i8]* %buf to i8*
call void @llvm.memset.p0i8.i32(i8* %cast, i8 -86, i32 8, i1 false)
call void @something(i8* %cast)
ret void
}
define void @memset_12_stack() {
; CHECK-LABEL: memset_12_stack:
; CHECK: // %bb.0:
; CHECK-NEXT: sub sp, sp, #32
; CHECK-NEXT: str x30, [sp, #16] // 8-byte Folded Spill
; CHECK-NEXT: .cfi_def_cfa_offset 32
; CHECK-NEXT: .cfi_offset w30, -16
; CHECK-NEXT: mov x8, #-6148914691236517206
; CHECK-NEXT: mov x0, sp
; CHECK-NEXT: str x8, [sp]
; CHECK-NEXT: str w8, [sp, #8]
; CHECK-NEXT: bl something
; CHECK-NEXT: ldr x30, [sp, #16] // 8-byte Folded Reload
; CHECK-NEXT: add sp, sp, #32
; CHECK-NEXT: ret
%buf = alloca [12 x i8], align 1
%cast = bitcast [12 x i8]* %buf to i8*
call void @llvm.memset.p0i8.i32(i8* %cast, i8 -86, i32 12, i1 false)
call void @something(i8* %cast)
ret void
}
define void @memset_16_stack() {
; CHECK-LABEL: memset_16_stack:
; CHECK: // %bb.0:
; CHECK-NEXT: sub sp, sp, #32
; CHECK-NEXT: .cfi_def_cfa_offset 32
; CHECK-NEXT: .cfi_offset w30, -16
; CHECK-NEXT: mov x8, #-6148914691236517206
; CHECK-NEXT: mov x0, sp
; CHECK-NEXT: stp x8, x30, [sp, #8] // 8-byte Folded Spill
; CHECK-NEXT: str x8, [sp]
; CHECK-NEXT: bl something
; CHECK-NEXT: ldr x30, [sp, #16] // 8-byte Folded Reload
; CHECK-NEXT: add sp, sp, #32
; CHECK-NEXT: ret
%buf = alloca [16 x i8], align 1
%cast = bitcast [16 x i8]* %buf to i8*
call void @llvm.memset.p0i8.i32(i8* %cast, i8 -86, i32 16, i1 false)
call void @something(i8* %cast)
ret void
}
define void @memset_20_stack() {
; CHECK-LABEL: memset_20_stack:
; CHECK: // %bb.0:
; CHECK-NEXT: sub sp, sp, #48
; CHECK-NEXT: str x30, [sp, #32] // 8-byte Folded Spill
; CHECK-NEXT: .cfi_def_cfa_offset 48
; CHECK-NEXT: .cfi_offset w30, -16
; CHECK-NEXT: mov x8, #-6148914691236517206
; CHECK-NEXT: add x0, sp, #8
; CHECK-NEXT: stp x8, x8, [sp, #8]
; CHECK-NEXT: str w8, [sp, #24]
; CHECK-NEXT: bl something
; CHECK-NEXT: ldr x30, [sp, #32] // 8-byte Folded Reload
; CHECK-NEXT: add sp, sp, #48
; CHECK-NEXT: ret
%buf = alloca [20 x i8], align 1
%cast = bitcast [20 x i8]* %buf to i8*
call void @llvm.memset.p0i8.i32(i8* %cast, i8 -86, i32 20, i1 false)
call void @something(i8* %cast)
ret void
}
define void @memset_26_stack() {
; CHECK-LABEL: memset_26_stack:
; CHECK: // %bb.0:
; CHECK-NEXT: sub sp, sp, #48
; CHECK-NEXT: str x30, [sp, #32] // 8-byte Folded Spill
; CHECK-NEXT: .cfi_def_cfa_offset 48
; CHECK-NEXT: .cfi_offset w30, -16
; CHECK-NEXT: mov x8, #-6148914691236517206
; CHECK-NEXT: mov x0, sp
; CHECK-NEXT: stp x8, x8, [sp, #8]
; CHECK-NEXT: str x8, [sp]
; CHECK-NEXT: strh w8, [sp, #24]
; CHECK-NEXT: bl something
; CHECK-NEXT: ldr x30, [sp, #32] // 8-byte Folded Reload
; CHECK-NEXT: add sp, sp, #48
; CHECK-NEXT: ret
%buf = alloca [26 x i8], align 1
%cast = bitcast [26 x i8]* %buf to i8*
call void @llvm.memset.p0i8.i32(i8* %cast, i8 -86, i32 26, i1 false)
call void @something(i8* %cast)
ret void
}
define void @memset_32_stack() {
; CHECK-LABEL: memset_32_stack:
; CHECK: // %bb.0:
; CHECK-NEXT: sub sp, sp, #48
; CHECK-NEXT: str x30, [sp, #32] // 8-byte Folded Spill
; CHECK-NEXT: .cfi_def_cfa_offset 48
; CHECK-NEXT: .cfi_offset w30, -16
; CHECK-NEXT: movi v0.16b, #170
; CHECK-NEXT: mov x0, sp
; CHECK-NEXT: stp q0, q0, [sp]
; CHECK-NEXT: bl something
; CHECK-NEXT: ldr x30, [sp, #32] // 8-byte Folded Reload
; CHECK-NEXT: add sp, sp, #48
; CHECK-NEXT: ret
%buf = alloca [32 x i8], align 1
%cast = bitcast [32 x i8]* %buf to i8*
call void @llvm.memset.p0i8.i32(i8* %cast, i8 -86, i32 32, i1 false)
call void @something(i8* %cast)
ret void
}
define void @memset_40_stack() {
; CHECK-LABEL: memset_40_stack:
; CHECK: // %bb.0:
; CHECK-NEXT: sub sp, sp, #64
; CHECK-NEXT: str x30, [sp, #48] // 8-byte Folded Spill
; CHECK-NEXT: .cfi_def_cfa_offset 64
; CHECK-NEXT: .cfi_offset w30, -16
; CHECK-NEXT: movi v0.16b, #170
; CHECK-NEXT: mov x8, #-6148914691236517206
; CHECK-NEXT: mov x0, sp
; CHECK-NEXT: str x8, [sp, #32]
; CHECK-NEXT: stp q0, q0, [sp]
; CHECK-NEXT: bl something
; CHECK-NEXT: ldr x30, [sp, #48] // 8-byte Folded Reload
; CHECK-NEXT: add sp, sp, #64
; CHECK-NEXT: ret
%buf = alloca [40 x i8], align 1
%cast = bitcast [40 x i8]* %buf to i8*
call void @llvm.memset.p0i8.i32(i8* %cast, i8 -86, i32 40, i1 false)
call void @something(i8* %cast)
ret void
}
define void @memset_64_stack() {
; CHECK-LABEL: memset_64_stack:
; CHECK: // %bb.0:
; CHECK-NEXT: sub sp, sp, #80
; CHECK-NEXT: str x30, [sp, #64] // 8-byte Folded Spill
; CHECK-NEXT: .cfi_def_cfa_offset 80
; CHECK-NEXT: .cfi_offset w30, -16
; CHECK-NEXT: movi v0.16b, #170
; CHECK-NEXT: mov x0, sp
; CHECK-NEXT: stp q0, q0, [sp, #32]
; CHECK-NEXT: stp q0, q0, [sp]
; CHECK-NEXT: bl something
; CHECK-NEXT: ldr x30, [sp, #64] // 8-byte Folded Reload
; CHECK-NEXT: add sp, sp, #80
; CHECK-NEXT: ret
%buf = alloca [64 x i8], align 1
%cast = bitcast [64 x i8]* %buf to i8*
call void @llvm.memset.p0i8.i32(i8* %cast, i8 -86, i32 64, i1 false)
call void @something(i8* %cast)
ret void
}
define void @memset_72_stack() {
; CHECK-LABEL: memset_72_stack:
; CHECK: // %bb.0:
; CHECK-NEXT: sub sp, sp, #96
; CHECK-NEXT: str x30, [sp, #80] // 8-byte Folded Spill
; CHECK-NEXT: .cfi_def_cfa_offset 96
; CHECK-NEXT: .cfi_offset w30, -16
; CHECK-NEXT: movi v0.16b, #170
; CHECK-NEXT: mov x8, #-6148914691236517206
; CHECK-NEXT: mov x0, sp
; CHECK-NEXT: str x8, [sp, #64]
; CHECK-NEXT: stp q0, q0, [sp, #32]
; CHECK-NEXT: stp q0, q0, [sp]
; CHECK-NEXT: bl something
; CHECK-NEXT: ldr x30, [sp, #80] // 8-byte Folded Reload
; CHECK-NEXT: add sp, sp, #96
; CHECK-NEXT: ret
%buf = alloca [72 x i8], align 1
%cast = bitcast [72 x i8]* %buf to i8*
call void @llvm.memset.p0i8.i32(i8* %cast, i8 -86, i32 72, i1 false)
call void @something(i8* %cast)
ret void
}
define void @memset_128_stack() {
; CHECK-LABEL: memset_128_stack:
; CHECK: // %bb.0:
; CHECK-NEXT: sub sp, sp, #144
; CHECK-NEXT: str x30, [sp, #128] // 8-byte Folded Spill
; CHECK-NEXT: .cfi_def_cfa_offset 144
; CHECK-NEXT: .cfi_offset w30, -16
; CHECK-NEXT: movi v0.16b, #170
; CHECK-NEXT: mov x0, sp
; CHECK-NEXT: stp q0, q0, [sp, #96]
; CHECK-NEXT: stp q0, q0, [sp, #64]
; CHECK-NEXT: stp q0, q0, [sp, #32]
; CHECK-NEXT: stp q0, q0, [sp]
; CHECK-NEXT: bl something
; CHECK-NEXT: ldr x30, [sp, #128] // 8-byte Folded Reload
; CHECK-NEXT: add sp, sp, #144
; CHECK-NEXT: ret
%buf = alloca [128 x i8], align 1
%cast = bitcast [128 x i8]* %buf to i8*
call void @llvm.memset.p0i8.i32(i8* %cast, i8 -86, i32 128, i1 false)
call void @something(i8* %cast)
ret void
}
define void @memset_256_stack() {
; CHECK-LABEL: memset_256_stack:
; CHECK: // %bb.0:
; CHECK-NEXT: sub sp, sp, #272
; CHECK-NEXT: stp x29, x30, [sp, #256] // 16-byte Folded Spill
; CHECK-NEXT: .cfi_def_cfa_offset 272
; CHECK-NEXT: .cfi_offset w30, -8
; CHECK-NEXT: .cfi_offset w29, -16
; CHECK-NEXT: movi v0.16b, #170
; CHECK-NEXT: mov x0, sp
; CHECK-NEXT: stp q0, q0, [sp, #224]
; CHECK-NEXT: stp q0, q0, [sp, #192]
; CHECK-NEXT: stp q0, q0, [sp, #160]
; CHECK-NEXT: stp q0, q0, [sp, #128]
; CHECK-NEXT: stp q0, q0, [sp, #96]
; CHECK-NEXT: stp q0, q0, [sp, #64]
; CHECK-NEXT: stp q0, q0, [sp, #32]
; CHECK-NEXT: stp q0, q0, [sp]
; CHECK-NEXT: bl something
; CHECK-NEXT: ldp x29, x30, [sp, #256] // 16-byte Folded Reload
; CHECK-NEXT: add sp, sp, #272
; CHECK-NEXT: ret
%buf = alloca [256 x i8], align 1
%cast = bitcast [256 x i8]* %buf to i8*
call void @llvm.memset.p0i8.i32(i8* %cast, i8 -86, i32 256, i1 false)
call void @something(i8* %cast)
ret void
}
declare void @something(i8*)
declare void @llvm.memset.p0i8.i32(i8* nocapture, i8, i32, i1) nounwind
declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i1) nounwind