Before llvm20, (void)__sync_fetch_and_add(...) always generates locked xadd insns. In linux kernel upstream discussion [1], it is found that for arm64 architecture, the original semantics of (void)__sync_fetch_and_add(...), i.e., __atomic_fetch_add(...), is preferred in order for jit to emit proper native barrier insns. In llvm commits [2] and [3], (void)__sync_fetch_and_add(...) will generate the following insns: - for cpu v1/v2: locked xadd insns to keep backward compatibility - for cpu v3/v4: __atomic_fetch_add() insns To ensure proper barrier semantics for (void)__sync_fetch_and_add(...), cpu v3/v4 is recommended. This patch enables cpu=v3 as the default cpu version. For users wanting to use cpu v1, -mcpu=v1 needs to be explicitly added to clang/llc command line. [1] https://lore.kernel.org/bpf/ZqqiQQWRnz7H93Hc@google.com/T/#mb68d67bc8f39e35a0c3db52468b9de59b79f021f [2] https://github.com/llvm/llvm-project/pull/101428 [3] https://github.com/llvm/llvm-project/pull/106494
44 lines
895 B
LLVM
44 lines
895 B
LLVM
; RUN: llc < %s -march=bpfel -mcpu=v1 | FileCheck %s
|
|
|
|
define i16 @am1(ptr %a) nounwind {
|
|
%1 = load i16, ptr %a
|
|
ret i16 %1
|
|
}
|
|
; CHECK-LABEL: am1:
|
|
; CHECK: r0 = *(u16 *)(r1 + 0)
|
|
|
|
@foo = external global i16
|
|
|
|
define i16 @am2() nounwind {
|
|
%1 = load i16, ptr @foo
|
|
ret i16 %1
|
|
}
|
|
; CHECK-LABEL: am2:
|
|
; CHECK: r0 = *(u16 *)(r1 + 0)
|
|
|
|
define i16 @am4() nounwind {
|
|
%1 = load volatile i16, ptr inttoptr(i16 32 to ptr)
|
|
ret i16 %1
|
|
}
|
|
; CHECK-LABEL: am4:
|
|
; CHECK: r1 = 32
|
|
; CHECK: r0 = *(u16 *)(r1 + 0)
|
|
|
|
define i16 @am5(ptr %a) nounwind {
|
|
%1 = getelementptr i16, ptr %a, i16 2
|
|
%2 = load i16, ptr %1
|
|
ret i16 %2
|
|
}
|
|
; CHECK-LABEL: am5:
|
|
; CHECK: r0 = *(u16 *)(r1 + 4)
|
|
|
|
%S = type { i16, i16 }
|
|
@baz = common global %S zeroinitializer, align 1
|
|
|
|
define i16 @am6() nounwind {
|
|
%1 = load i16, ptr getelementptr (%S, ptr @baz, i32 0, i32 1)
|
|
ret i16 %1
|
|
}
|
|
; CHECK-LABEL: am6:
|
|
; CHECK: r0 = *(u16 *)(r1 + 2)
|