Files
clang-p2996/llvm/test/CodeGen/BPF/cc_args.ll
yonghong-song 7852ebc088 [BPF] Make -mcpu=v3 as the default (#107008)
Before llvm20, (void)__sync_fetch_and_add(...) always generates locked
xadd insns. In linux kernel upstream discussion [1], it is found that
for arm64 architecture, the original semantics of
(void)__sync_fetch_and_add(...), i.e., __atomic_fetch_add(...), is
preferred in order for jit to emit proper native barrier insns.

In llvm commits [2] and [3], (void)__sync_fetch_and_add(...) will
generate the following insns:
  - for cpu v1/v2: locked xadd insns to keep backward compatibility
  - for cpu v3/v4: __atomic_fetch_add() insns

To ensure proper barrier semantics for (void)__sync_fetch_and_add(...),
cpu v3/v4 is recommended.

This patch enables cpu=v3 as the default cpu version. For users wanting
to use cpu v1, -mcpu=v1 needs to be explicitly added to clang/llc
command line.

  [1]
https://lore.kernel.org/bpf/ZqqiQQWRnz7H93Hc@google.com/T/#mb68d67bc8f39e35a0c3db52468b9de59b79f021f
  [2] https://github.com/llvm/llvm-project/pull/101428
  [3] https://github.com/llvm/llvm-project/pull/106494
2024-09-03 07:15:18 -07:00

96 lines
2.6 KiB
LLVM

; RUN: llc < %s -march=bpfel -mcpu=v1 -show-mc-encoding | FileCheck %s
define void @test() #0 {
entry:
; CHECK: test:
; CHECK: r1 = 123 # encoding: [0xb7,0x01,0x00,0x00,0x7b,0x00,0x00,0x00]
; CHECK: call f_i16
call void @f_i16(i16 123)
; CHECK: r1 = 12345678 # encoding: [0xb7,0x01,0x00,0x00,0x4e,0x61,0xbc,0x00]
; CHECK: call f_i32
call void @f_i32(i32 12345678)
; CHECK: r1 = 72623859790382856 ll # encoding: [0x18,0x01,0x00,0x00,0x08,0x07,0x06,0x05,0x00,0x00,0x00,0x00,0x04,0x03,0x02,0x01]
; CHECK: call f_i64
call void @f_i64(i64 72623859790382856)
; CHECK: r1 = 1234
; CHECK: r2 = 5678
; CHECK: call f_i32_i32
call void @f_i32_i32(i32 1234, i32 5678)
; CHECK: r1 = 2
; CHECK: r2 = 3
; CHECK: r3 = 4
; CHECK: call f_i16_i32_i16
call void @f_i16_i32_i16(i16 2, i32 3, i16 4)
; CHECK: r1 = 5
; CHECK: r2 = 7262385979038285 ll
; CHECK: r3 = 6
; CHECK: call f_i16_i64_i16
call void @f_i16_i64_i16(i16 5, i64 7262385979038285, i16 6)
ret void
}
@g_i16 = common global i16 0, align 2
@g_i32 = common global i32 0, align 2
@g_i64 = common global i64 0, align 4
define void @f_i16(i16 %a) #0 {
; CHECK: f_i16:
; CHECK: *(u16 *)(r2 + 0) = r1 # encoding: [0x6b,0x12,0x00,0x00,0x00,0x00,0x00,0x00]
store volatile i16 %a, ptr @g_i16, align 2
ret void
}
define void @f_i32(i32 %a) #0 {
; CHECK: f_i32:
; CHECK: *(u16 *)(r2 + 0) = r1 # encoding: [0x6b,0x12,0x00,0x00,0x00,0x00,0x00,0x00]
; CHECK: *(u16 *)(r2 + 2) = r1 # encoding: [0x6b,0x12,0x02,0x00,0x00,0x00,0x00,0x00]
store volatile i32 %a, ptr @g_i32, align 2
ret void
}
define void @f_i64(i64 %a) #0 {
; CHECK: f_i64:
; CHECK: *(u32 *)(r2 + 0) = r1
; CHECK: *(u32 *)(r2 + 4) = r1 # encoding: [0x63,0x12,0x04,0x00,0x00,0x00,0x00,0x00]
store volatile i64 %a, ptr @g_i64, align 2
ret void
}
define void @f_i32_i32(i32 %a, i32 %b) #0 {
; CHECK: f_i32_i32:
; CHECK: *(u32 *)(r3 + 0) = r1
store volatile i32 %a, ptr @g_i32, align 4
; CHECK: *(u32 *)(r3 + 0) = r2
store volatile i32 %b, ptr @g_i32, align 4
ret void
}
define void @f_i16_i32_i16(i16 %a, i32 %b, i16 %c) #0 {
; CHECK: f_i16_i32_i16:
; CHECK: *(u16 *)(r4 + 0) = r1
store volatile i16 %a, ptr @g_i16, align 2
; CHECK: *(u32 *)(r1 + 0) = r2
store volatile i32 %b, ptr @g_i32, align 4
; CHECK: *(u16 *)(r4 + 0) = r3
store volatile i16 %c, ptr @g_i16, align 2
ret void
}
define void @f_i16_i64_i16(i16 %a, i64 %b, i16 %c) #0 {
; CHECK: f_i16_i64_i16:
; CHECK: *(u16 *)(r4 + 0) = r1
store volatile i16 %a, ptr @g_i16, align 2
; CHECK: *(u64 *)(r1 + 0) = r2 # encoding: [0x7b,0x21,0x00,0x00,0x00,0x00,0x00,0x00]
store volatile i64 %b, ptr @g_i64, align 8
; CHECK: *(u16 *)(r4 + 0) = r3
store volatile i16 %c, ptr @g_i16, align 2
ret void
}