Files
clang-p2996/llvm/test/CodeGen/BPF/addr-space-auto-casts.ll
4ast 2aacb56e83 BPF address space insn (#84410)
This commit aims to support BPF arena kernel side
[feature](https://lore.kernel.org/bpf/20240209040608.98927-1-alexei.starovoitov@gmail.com/):
- arena is a memory region accessible from both BPF program and
userspace;
- base pointers for this memory region differ between kernel and user
spaces;
- `dst_reg = addr_space_cast(src_reg, dst_addr_space, src_addr_space)`
translates src_reg, a pointer in src_addr_space to dst_reg, equivalent
pointer in dst_addr_space, {src,dst}_addr_space are immediate constants;
- number 0 is assigned to kernel address space;
- number 1 is assigned to user address space.

On the LLVM side, the goal is to make load and store operations on arena
pointers "transparent" for BPF programs:
- assume that pointers with non-zero address space are pointers to
  arena memory;
- assume that arena is identified by address space number;
- assume that address space zero corresponds to kernel address space;
- assume that every BPF-side load or store from arena is done via
pointer in user address space, thus convert base pointers using
`addr_space_cast(src_reg, 0, 1)`;

Only load, store, cmpxchg and atomicrmw IR instructions are handled by
this transformation.

For example, the following C code:

```c
   #define __as __attribute__((address_space(1)))
   void copy(int __as *from, int __as *to) { *to = *from; }
```

Compiled to the following IR:

```llvm
    define void @copy(ptr addrspace(1) %from, ptr addrspace(1) %to) {
    entry:
      %0 = load i32, ptr addrspace(1) %from, align 4
      store i32 %0, ptr addrspace(1) %to, align 4
      ret void
    }
```

Is transformed to:

```llvm
    %to2 = addrspacecast ptr addrspace(1) %to to ptr     ;; !
    %from1 = addrspacecast ptr addrspace(1) %from to ptr ;; !
    %0 = load i32, ptr %from1, align 4, !tbaa !3
    store i32 %0, ptr %to2, align 4, !tbaa !3
    ret void
```

And compiled as:

```asm
    r2 = addr_space_cast(r2, 0, 1)
    r1 = addr_space_cast(r1, 0, 1)
    r1 = *(u32 *)(r1 + 0)
    *(u32 *)(r2 + 0) = r1
    exit
```

Co-authored-by: Eduard Zingerman <eddyz87@gmail.com>
2024-03-13 02:27:25 +02:00

79 lines
3.3 KiB
LLVM

; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4
; RUN: opt --bpf-check-and-opt-ir -S -mtriple=bpf-pc-linux < %s | FileCheck %s
define void @simple_store(ptr addrspace(272) %foo) {
; CHECK-LABEL: define void @simple_store(
; CHECK-SAME: ptr addrspace(272) [[FOO:%.*]]) {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[FOO1:%.*]] = addrspacecast ptr addrspace(272) [[FOO]] to ptr
; CHECK-NEXT: [[ADD_PTR2:%.*]] = getelementptr inbounds i8, ptr [[FOO1]], i64 16
; CHECK-NEXT: store volatile i32 57005, ptr [[ADD_PTR2]], align 4
; CHECK-NEXT: [[ADD_PTR13:%.*]] = getelementptr inbounds i8, ptr [[FOO1]], i64 12
; CHECK-NEXT: store volatile i32 48879, ptr [[ADD_PTR13]], align 4
; CHECK-NEXT: ret void
;
entry:
%add.ptr = getelementptr inbounds i8, ptr addrspace(272) %foo, i64 16
store volatile i32 57005, ptr addrspace(272) %add.ptr, align 4
%add.ptr1 = getelementptr inbounds i8, ptr addrspace(272) %foo, i64 12
store volatile i32 48879, ptr addrspace(272) %add.ptr1, align 4
ret void
}
define void @separate_addr_store(ptr addrspace(272) %foo, ptr addrspace(272) %bar) {
; CHECK-LABEL: define void @separate_addr_store(
; CHECK-SAME: ptr addrspace(272) [[FOO:%.*]], ptr addrspace(272) [[BAR:%.*]]) {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[BAR3:%.*]] = addrspacecast ptr addrspace(272) [[BAR]] to ptr
; CHECK-NEXT: [[FOO1:%.*]] = addrspacecast ptr addrspace(272) [[FOO]] to ptr
; CHECK-NEXT: [[ADD_PTR2:%.*]] = getelementptr inbounds i8, ptr [[FOO1]], i64 16
; CHECK-NEXT: store volatile i32 57005, ptr [[ADD_PTR2]], align 4
; CHECK-NEXT: [[ADD_PTR14:%.*]] = getelementptr inbounds i8, ptr [[BAR3]], i64 12
; CHECK-NEXT: store volatile i32 48879, ptr [[ADD_PTR14]], align 4
; CHECK-NEXT: ret void
;
entry:
%add.ptr = getelementptr inbounds i8, ptr addrspace(272) %foo, i64 16
store volatile i32 57005, ptr addrspace(272) %add.ptr, align 4
%add.ptr1 = getelementptr inbounds i8, ptr addrspace(272) %bar, i64 12
store volatile i32 48879, ptr addrspace(272) %add.ptr1, align 4
ret void
}
define i32 @simple_load(ptr addrspace(272) %foo) {
; CHECK-LABEL: define i32 @simple_load(
; CHECK-SAME: ptr addrspace(272) [[FOO:%.*]]) {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[FOO1:%.*]] = addrspacecast ptr addrspace(272) [[FOO]] to ptr
; CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr [[FOO1]], align 4
; CHECK-NEXT: ret i32 [[TMP0]]
;
entry:
%0 = load i32, ptr addrspace(272) %foo, align 4
ret i32 %0
}
define { i32, i1 } @simple_cmpxchg(ptr addrspace(1) %i) {
; CHECK-LABEL: define { i32, i1 } @simple_cmpxchg(
; CHECK-SAME: ptr addrspace(1) [[I:%.*]]) {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[I1:%.*]] = addrspacecast ptr addrspace(1) [[I]] to ptr
; CHECK-NEXT: [[A:%.*]] = cmpxchg ptr [[I1]], i32 7, i32 42 monotonic monotonic, align 4
; CHECK-NEXT: ret { i32, i1 } [[A]]
;
entry:
%a = cmpxchg ptr addrspace(1) %i, i32 7, i32 42 monotonic monotonic, align 4
ret { i32, i1 } %a
}
define void @simple_atomicrmw(ptr addrspace(1) %p) {
; CHECK-LABEL: define void @simple_atomicrmw(
; CHECK-SAME: ptr addrspace(1) [[P:%.*]]) {
; CHECK-NEXT: [[P1:%.*]] = addrspacecast ptr addrspace(1) [[P]] to ptr
; CHECK-NEXT: [[A:%.*]] = atomicrmw add ptr [[P1]], i64 42 monotonic, align 8
; CHECK-NEXT: ret void
;
%a = atomicrmw add ptr addrspace(1) %p, i64 42 monotonic, align 8
ret void
}