Before llvm20, (void)__sync_fetch_and_add(...) always generates locked xadd insns. In linux kernel upstream discussion [1], it is found that for arm64 architecture, the original semantics of (void)__sync_fetch_and_add(...), i.e., __atomic_fetch_add(...), is preferred in order for jit to emit proper native barrier insns. In llvm commits [2] and [3], (void)__sync_fetch_and_add(...) will generate the following insns: - for cpu v1/v2: locked xadd insns to keep backward compatibility - for cpu v3/v4: __atomic_fetch_add() insns To ensure proper barrier semantics for (void)__sync_fetch_and_add(...), cpu v3/v4 is recommended. This patch enables cpu=v3 as the default cpu version. For users wanting to use cpu v1, -mcpu=v1 needs to be explicitly added to clang/llc command line. [1] https://lore.kernel.org/bpf/ZqqiQQWRnz7H93Hc@google.com/T/#mb68d67bc8f39e35a0c3db52468b9de59b79f021f [2] https://github.com/llvm/llvm-project/pull/101428 [3] https://github.com/llvm/llvm-project/pull/106494
65 lines
1.7 KiB
LLVM
65 lines
1.7 KiB
LLVM
; RUN: llc < %s -march=bpf -mcpu=v1 -verify-machineinstrs | FileCheck %s
|
|
|
|
; Source code:
|
|
; struct xdp_md {
|
|
; unsigned data;
|
|
; unsigned data_end;
|
|
; };
|
|
;
|
|
; int gbl;
|
|
; int xdp_dummy(struct xdp_md *xdp)
|
|
; {
|
|
; char addr = *(char *)(long)xdp->data;
|
|
; if (gbl) {
|
|
; if (gbl == 1)
|
|
; return 1;
|
|
; if (addr == 1)
|
|
; return 3;
|
|
; } else if (addr == 0)
|
|
; return 2;
|
|
; return 0;
|
|
; }
|
|
|
|
%struct.xdp_md = type { i32, i32 }
|
|
|
|
@gbl = common local_unnamed_addr global i32 0, align 4
|
|
|
|
; Function Attrs: norecurse nounwind readonly
|
|
define i32 @xdp_dummy(ptr nocapture readonly %xdp) local_unnamed_addr #0 {
|
|
entry:
|
|
%0 = load i32, ptr %xdp, align 4
|
|
%conv = zext i32 %0 to i64
|
|
%1 = inttoptr i64 %conv to ptr
|
|
%2 = load i8, ptr %1, align 1
|
|
; CHECK: r1 = *(u32 *)(r1 + 0)
|
|
; CHECK: r1 = *(u8 *)(r1 + 0)
|
|
%3 = load i32, ptr @gbl, align 4
|
|
switch i32 %3, label %if.end [
|
|
i32 0, label %if.else
|
|
i32 1, label %cleanup
|
|
]
|
|
|
|
if.end: ; preds = %entry
|
|
%cmp4 = icmp eq i8 %2, 1
|
|
; CHECK: r0 = 3
|
|
; CHECK-NOT: r1 &= 255
|
|
; CHECK: if r1 == 1 goto
|
|
br i1 %cmp4, label %cleanup, label %if.end13
|
|
|
|
if.else: ; preds = %entry
|
|
%cmp9 = icmp eq i8 %2, 0
|
|
; CHECK: r0 = 2
|
|
; CHECK-NOT: r1 &= 255
|
|
; CHECK: if r1 == 0 goto
|
|
br i1 %cmp9, label %cleanup, label %if.end13
|
|
|
|
if.end13: ; preds = %if.else, %if.end
|
|
br label %cleanup
|
|
|
|
cleanup: ; preds = %if.else, %if.end, %entry, %if.end13
|
|
%retval.0 = phi i32 [ 0, %if.end13 ], [ 1, %entry ], [ 3, %if.end ], [ 2, %if.else ]
|
|
ret i32 %retval.0
|
|
}
|
|
|
|
attributes #0 = { norecurse nounwind readonly }
|