Files
clang-p2996/llvm/test/CodeGen/AArch64/sve-split-load.ll
Ricardo Jesus 15fbdc2b96 [AArch64][SVE] Lower unpredicated loads/stores as LDR/STR. (#127837)
Currently, given:
```cpp
svuint8_t foo(uint8_t *x) {
  return svld1(svptrue_b8(), x);
}
```
We generate:
```gas
foo:
  ptrue   p0.b
  ld1b    { z0.b }, p0/z, [x0]
  ret
```
However, on little-endian and with unaligned memory accesses allowed, we
could instead be using LDR as follows:
```gas
foo:
  ldr     z0, [x0]
  ret
```

The second form avoids the predicate dependency.
Likewise for other types and stores.
2025-02-26 13:56:35 +00:00

140 lines
5.1 KiB
LLVM

; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve < %s | FileCheck %s
; UNPREDICATED
define <vscale x 4 x i16> @load_promote_4i16(ptr %a) {
; CHECK-LABEL: load_promote_4i16:
; CHECK: // %bb.0:
; CHECK-NEXT: ptrue p0.s
; CHECK-NEXT: ld1h { z0.s }, p0/z, [x0]
; CHECK-NEXT: ret
%load = load <vscale x 4 x i16>, ptr %a
ret <vscale x 4 x i16> %load
}
define <vscale x 16 x i16> @load_split_i16(ptr %a) {
; CHECK-LABEL: load_split_i16:
; CHECK: // %bb.0:
; CHECK-NEXT: ldr z0, [x0]
; CHECK-NEXT: ldr z1, [x0, #1, mul vl]
; CHECK-NEXT: ret
%load = load <vscale x 16 x i16>, ptr %a
ret <vscale x 16 x i16> %load
}
define <vscale x 24 x i16> @load_split_24i16(ptr %a) {
; CHECK-LABEL: load_split_24i16:
; CHECK: // %bb.0:
; CHECK-NEXT: ldr z0, [x0]
; CHECK-NEXT: ldr z1, [x0, #1, mul vl]
; CHECK-NEXT: ldr z2, [x0, #2, mul vl]
; CHECK-NEXT: ret
%load = load <vscale x 24 x i16>, ptr %a
ret <vscale x 24 x i16> %load
}
define <vscale x 32 x i16> @load_split_32i16(ptr %a) {
; CHECK-LABEL: load_split_32i16:
; CHECK: // %bb.0:
; CHECK-NEXT: ldr z0, [x0]
; CHECK-NEXT: ldr z1, [x0, #1, mul vl]
; CHECK-NEXT: ldr z2, [x0, #2, mul vl]
; CHECK-NEXT: ldr z3, [x0, #3, mul vl]
; CHECK-NEXT: ret
%load = load <vscale x 32 x i16>, ptr %a
ret <vscale x 32 x i16> %load
}
define <vscale x 16 x i64> @load_split_16i64(ptr %a) {
; CHECK-LABEL: load_split_16i64:
; CHECK: // %bb.0:
; CHECK-NEXT: ldr z0, [x0]
; CHECK-NEXT: ldr z1, [x0, #1, mul vl]
; CHECK-NEXT: ldr z2, [x0, #2, mul vl]
; CHECK-NEXT: ldr z3, [x0, #3, mul vl]
; CHECK-NEXT: ldr z4, [x0, #4, mul vl]
; CHECK-NEXT: ldr z5, [x0, #5, mul vl]
; CHECK-NEXT: ldr z6, [x0, #6, mul vl]
; CHECK-NEXT: ldr z7, [x0, #7, mul vl]
; CHECK-NEXT: ret
%load = load <vscale x 16 x i64>, ptr %a
ret <vscale x 16 x i64> %load
}
; MASKED
define <vscale x 2 x i32> @masked_load_promote_2i32(ptr %a, <vscale x 2 x i1> %pg) {
; CHECK-LABEL: masked_load_promote_2i32:
; CHECK: // %bb.0:
; CHECK-NEXT: ld1w { z0.d }, p0/z, [x0]
; CHECK-NEXT: ret
%load = call <vscale x 2 x i32> @llvm.masked.load.nxv2i32(ptr %a, i32 1, <vscale x 2 x i1> %pg, <vscale x 2 x i32> poison)
ret <vscale x 2 x i32> %load
}
define <vscale x 32 x i8> @masked_load_split_32i8(ptr %a, <vscale x 32 x i1> %pg) {
; CHECK-LABEL: masked_load_split_32i8:
; CHECK: // %bb.0:
; CHECK-NEXT: ld1b { z0.b }, p0/z, [x0]
; CHECK-NEXT: ld1b { z1.b }, p1/z, [x0, #1, mul vl]
; CHECK-NEXT: ret
%load = call <vscale x 32 x i8> @llvm.masked.load.nxv32i8(ptr %a, i32 1, <vscale x 32 x i1> %pg, <vscale x 32 x i8> poison)
ret <vscale x 32 x i8> %load
}
define <vscale x 32 x i16> @masked_load_split_32i16(ptr %a, <vscale x 32 x i1> %pg) {
; CHECK-LABEL: masked_load_split_32i16:
; CHECK: // %bb.0:
; CHECK-NEXT: punpklo p2.h, p0.b
; CHECK-NEXT: punpkhi p0.h, p0.b
; CHECK-NEXT: punpklo p3.h, p1.b
; CHECK-NEXT: ld1h { z0.h }, p2/z, [x0]
; CHECK-NEXT: punpkhi p1.h, p1.b
; CHECK-NEXT: ld1h { z1.h }, p0/z, [x0, #1, mul vl]
; CHECK-NEXT: ld1h { z2.h }, p3/z, [x0, #2, mul vl]
; CHECK-NEXT: ld1h { z3.h }, p1/z, [x0, #3, mul vl]
; CHECK-NEXT: ret
%load = call <vscale x 32 x i16> @llvm.masked.load.nxv32i16(ptr %a, i32 1, <vscale x 32 x i1> %pg, <vscale x 32 x i16> poison)
ret <vscale x 32 x i16> %load
}
define <vscale x 8 x i32> @masked_load_split_8i32(ptr %a, <vscale x 8 x i1> %pg) {
; CHECK-LABEL: masked_load_split_8i32:
; CHECK: // %bb.0:
; CHECK-NEXT: punpklo p1.h, p0.b
; CHECK-NEXT: punpkhi p0.h, p0.b
; CHECK-NEXT: ld1w { z0.s }, p1/z, [x0]
; CHECK-NEXT: ld1w { z1.s }, p0/z, [x0, #1, mul vl]
; CHECK-NEXT: ret
%load = call <vscale x 8 x i32> @llvm.masked.load.nxv8i32(ptr %a, i32 1, <vscale x 8 x i1> %pg, <vscale x 8 x i32> poison)
ret <vscale x 8 x i32> %load
}
define <vscale x 8 x i64> @masked_load_split_8i64(ptr %a, <vscale x 8 x i1> %pg) {
; CHECK-LABEL: masked_load_split_8i64:
; CHECK: // %bb.0:
; CHECK-NEXT: punpklo p1.h, p0.b
; CHECK-NEXT: punpkhi p0.h, p0.b
; CHECK-NEXT: punpklo p2.h, p1.b
; CHECK-NEXT: punpkhi p1.h, p1.b
; CHECK-NEXT: punpklo p3.h, p0.b
; CHECK-NEXT: ld1d { z0.d }, p2/z, [x0]
; CHECK-NEXT: punpkhi p0.h, p0.b
; CHECK-NEXT: ld1d { z1.d }, p1/z, [x0, #1, mul vl]
; CHECK-NEXT: ld1d { z2.d }, p3/z, [x0, #2, mul vl]
; CHECK-NEXT: ld1d { z3.d }, p0/z, [x0, #3, mul vl]
; CHECK-NEXT: ret
%load = call <vscale x 8 x i64> @llvm.masked.load.nxv8i64(ptr %a, i32 1, <vscale x 8 x i1> %pg, <vscale x 8 x i64> poison)
ret <vscale x 8 x i64> %load
}
declare <vscale x 32 x i8> @llvm.masked.load.nxv32i8(ptr, i32, <vscale x 32 x i1>, <vscale x 32 x i8>)
declare <vscale x 32 x i16> @llvm.masked.load.nxv32i16(ptr, i32, <vscale x 32 x i1>, <vscale x 32 x i16>)
declare <vscale x 2 x i32> @llvm.masked.load.nxv2i32(ptr, i32, <vscale x 2 x i1>, <vscale x 2 x i32>)
declare <vscale x 8 x i32> @llvm.masked.load.nxv8i32(ptr, i32, <vscale x 8 x i1>, <vscale x 8 x i32>)
declare <vscale x 8 x i64> @llvm.masked.load.nxv8i64(ptr, i32, <vscale x 8 x i1>, <vscale x 8 x i64>)