Before llvm20, (void)__sync_fetch_and_add(...) always generates locked xadd insns. In linux kernel upstream discussion [1], it is found that for arm64 architecture, the original semantics of (void)__sync_fetch_and_add(...), i.e., __atomic_fetch_add(...), is preferred in order for jit to emit proper native barrier insns. In llvm commits [2] and [3], (void)__sync_fetch_and_add(...) will generate the following insns: - for cpu v1/v2: locked xadd insns to keep backward compatibility - for cpu v3/v4: __atomic_fetch_add() insns To ensure proper barrier semantics for (void)__sync_fetch_and_add(...), cpu v3/v4 is recommended. This patch enables cpu=v3 as the default cpu version. For users wanting to use cpu v1, -mcpu=v1 needs to be explicitly added to clang/llc command line. [1] https://lore.kernel.org/bpf/ZqqiQQWRnz7H93Hc@google.com/T/#mb68d67bc8f39e35a0c3db52468b9de59b79f021f [2] https://github.com/llvm/llvm-project/pull/101428 [3] https://github.com/llvm/llvm-project/pull/106494
25 lines
687 B
LLVM
25 lines
687 B
LLVM
; RUN: llc < %s -march=bpf -mcpu=v1 | FileCheck %s
|
|
|
|
%struct.key_t = type { i32, [16 x i8] }
|
|
|
|
; Function Attrs: nounwind uwtable
|
|
define i32 @test() #0 {
|
|
%key = alloca %struct.key_t, align 4
|
|
; CHECK: r1 = 0
|
|
; CHECK: *(u32 *)(r10 - 8) = r1
|
|
; CHECK: *(u64 *)(r10 - 16) = r1
|
|
; CHECK: *(u64 *)(r10 - 24) = r1
|
|
call void @llvm.memset.p0.i64(ptr align 4 %key, i8 0, i64 20, i1 false)
|
|
; CHECK: r1 = r10
|
|
; CHECK: r1 += -20
|
|
%1 = getelementptr inbounds %struct.key_t, ptr %key, i64 0, i32 1, i64 0
|
|
; CHECK: call test1
|
|
call void @test1(ptr %1) #3
|
|
ret i32 0
|
|
}
|
|
|
|
; Function Attrs: nounwind argmemonly
|
|
declare void @llvm.memset.p0.i64(ptr nocapture, i8, i64, i1) #1
|
|
|
|
declare void @test1(ptr) #2
|