Before llvm20, (void)__sync_fetch_and_add(...) always generates locked xadd insns. In linux kernel upstream discussion [1], it is found that for arm64 architecture, the original semantics of (void)__sync_fetch_and_add(...), i.e., __atomic_fetch_add(...), is preferred in order for jit to emit proper native barrier insns. In llvm commits [2] and [3], (void)__sync_fetch_and_add(...) will generate the following insns: - for cpu v1/v2: locked xadd insns to keep backward compatibility - for cpu v3/v4: __atomic_fetch_add() insns To ensure proper barrier semantics for (void)__sync_fetch_and_add(...), cpu v3/v4 is recommended. This patch enables cpu=v3 as the default cpu version. For users wanting to use cpu v1, -mcpu=v1 needs to be explicitly added to clang/llc command line. [1] https://lore.kernel.org/bpf/ZqqiQQWRnz7H93Hc@google.com/T/#mb68d67bc8f39e35a0c3db52468b9de59b79f021f [2] https://github.com/llvm/llvm-project/pull/101428 [3] https://github.com/llvm/llvm-project/pull/106494
26 lines
634 B
LLVM
26 lines
634 B
LLVM
; RUN: llc -march=bpf -mcpu=v1 < %s | FileCheck %s
|
|
;
|
|
; Source code:
|
|
; struct t1 { int a; };
|
|
; volatile const struct t1 data = { .a = 3 };
|
|
; int foo(void) {
|
|
; return data.a + 20;
|
|
; }
|
|
; Compilation flag:
|
|
; clang -target bpf -O2 -S -emit-llvm test.c
|
|
|
|
%struct.t1 = type { i32 }
|
|
|
|
@data = dso_local constant %struct.t1 { i32 3 }, align 4
|
|
|
|
; Function Attrs: nofree norecurse nounwind
|
|
define dso_local i32 @foo() local_unnamed_addr {
|
|
entry:
|
|
%0 = load volatile i32, ptr @data, align 4
|
|
%add = add nsw i32 %0, 20
|
|
; CHECK: [[REG1:r[0-9]+]] = data ll
|
|
; CHECK: r0 = *(u32 *)([[REG1]] + 0)
|
|
; CHECK: r0 += 20
|
|
ret i32 %add
|
|
}
|