Before llvm20, (void)__sync_fetch_and_add(...) always generates locked xadd insns. In linux kernel upstream discussion [1], it is found that for arm64 architecture, the original semantics of (void)__sync_fetch_and_add(...), i.e., __atomic_fetch_add(...), is preferred in order for jit to emit proper native barrier insns. In llvm commits [2] and [3], (void)__sync_fetch_and_add(...) will generate the following insns: - for cpu v1/v2: locked xadd insns to keep backward compatibility - for cpu v3/v4: __atomic_fetch_add() insns To ensure proper barrier semantics for (void)__sync_fetch_and_add(...), cpu v3/v4 is recommended. This patch enables cpu=v3 as the default cpu version. For users wanting to use cpu v1, -mcpu=v1 needs to be explicitly added to clang/llc command line. [1] https://lore.kernel.org/bpf/ZqqiQQWRnz7H93Hc@google.com/T/#mb68d67bc8f39e35a0c3db52468b9de59b79f021f [2] https://github.com/llvm/llvm-project/pull/101428 [3] https://github.com/llvm/llvm-project/pull/106494
25 lines
878 B
LLVM
25 lines
878 B
LLVM
; RUN: llc < %s -march=bpfel -verify-machineinstrs -show-mc-encoding -mcpu=v1 | FileCheck %s
|
|
; RUN: llc < %s -march=bpfel -verify-machineinstrs -show-mc-encoding -mcpu=v3 | FileCheck --check-prefix=CHECK-V3 %s
|
|
|
|
; CHECK-LABEL: test_load_add_32
|
|
; CHECK: lock *(u32 *)(r1 + 0) += r2
|
|
; CHECK: encoding: [0xc3,0x21
|
|
; CHECK-V3: w2 = atomic_fetch_add((u32 *)(r1 + 0), w2)
|
|
; CHECK-V3: encoding: [0xc3,0x21,0x00,0x00,0x01,0x00,0x00,0x00]
|
|
define void @test_load_add_32(ptr %p, i32 zeroext %v) {
|
|
entry:
|
|
atomicrmw add ptr %p, i32 %v seq_cst
|
|
ret void
|
|
}
|
|
|
|
; CHECK-LABEL: test_load_add_64
|
|
; CHECK: lock *(u64 *)(r1 + 0) += r2
|
|
; CHECK: encoding: [0xdb,0x21
|
|
; CHECK-V3: r2 = atomic_fetch_add((u64 *)(r1 + 0), r2)
|
|
; CHECK-V3: encoding: [0xdb,0x21,0x00,0x00,0x01,0x00,0x00,0x00]
|
|
define void @test_load_add_64(ptr %p, i64 zeroext %v) {
|
|
entry:
|
|
atomicrmw add ptr %p, i64 %v seq_cst
|
|
ret void
|
|
}
|