Before llvm20, (void)__sync_fetch_and_add(...) always generates locked xadd insns. In linux kernel upstream discussion [1], it is found that for arm64 architecture, the original semantics of (void)__sync_fetch_and_add(...), i.e., __atomic_fetch_add(...), is preferred in order for jit to emit proper native barrier insns. In llvm commits [2] and [3], (void)__sync_fetch_and_add(...) will generate the following insns: - for cpu v1/v2: locked xadd insns to keep backward compatibility - for cpu v3/v4: __atomic_fetch_add() insns To ensure proper barrier semantics for (void)__sync_fetch_and_add(...), cpu v3/v4 is recommended. This patch enables cpu=v3 as the default cpu version. For users wanting to use cpu v1, -mcpu=v1 needs to be explicitly added to clang/llc command line. [1] https://lore.kernel.org/bpf/ZqqiQQWRnz7H93Hc@google.com/T/#mb68d67bc8f39e35a0c3db52468b9de59b79f021f [2] https://github.com/llvm/llvm-project/pull/101428 [3] https://github.com/llvm/llvm-project/pull/106494
55 lines
1.5 KiB
LLVM
55 lines
1.5 KiB
LLVM
; RUN: llc < %s -march=bpfel -mcpu=v1 -verify-machineinstrs | FileCheck %s
|
|
; RUN: llc < %s -march=bpfeb -mcpu=v1 -verify-machineinstrs | FileCheck %s
|
|
|
|
; Source code:
|
|
; struct test_t1 {
|
|
; char a;
|
|
; int b;
|
|
; };
|
|
; struct test_t2 {
|
|
; char a, b;
|
|
; struct test_t1 c[2];
|
|
; int d[2];
|
|
; int e;
|
|
; };
|
|
; struct test_t2 g;
|
|
; int test()
|
|
; {
|
|
; struct test_t2 t2 = {.c = {{}, {.b = 1}}, .d = {2, 3}};
|
|
; g = t2;
|
|
; return 0;
|
|
; }
|
|
|
|
%struct.test_t2 = type { i8, i8, [2 x %struct.test_t1], [2 x i32], i32 }
|
|
%struct.test_t1 = type { i8, i32 }
|
|
|
|
@test.t2 = private unnamed_addr constant %struct.test_t2 { i8 0, i8 0, [2 x %struct.test_t1] [%struct.test_t1 zeroinitializer, %struct.test_t1 { i8 0, i32 1 }], [2 x i32] [i32 2, i32 3], i32 0 }, align 4
|
|
@g = common local_unnamed_addr global %struct.test_t2 zeroinitializer, align 4
|
|
|
|
; Function Attrs: nounwind
|
|
define i32 @test() local_unnamed_addr #0 {
|
|
; CHECK-LABEL: test:
|
|
|
|
entry:
|
|
tail call void @llvm.memcpy.p0.p0.i64(ptr align 4 @g, ptr align 4 @test.t2, i64 32, i1 false)
|
|
; CHECK: r1 = g ll
|
|
; CHECK: r2 = 3
|
|
; CHECK: *(u32 *)(r1 + 24) = r2
|
|
; CHECK: r2 = 2
|
|
; CHECK: *(u32 *)(r1 + 20) = r2
|
|
; CHECK: r2 = 1
|
|
; CHECK: *(u32 *)(r1 + 16) = r2
|
|
; CHECK: r2 = 0
|
|
; CHECK: *(u32 *)(r1 + 28) = r2
|
|
; CHECK: *(u32 *)(r1 + 8) = r2
|
|
; CHECK: *(u32 *)(r1 + 4) = r2
|
|
; CHECK: *(u32 *)(r1 + 0) = r2
|
|
ret i32 0
|
|
}
|
|
; CHECK: .section .rodata.cst32,"aM",@progbits,32
|
|
|
|
declare void @llvm.memcpy.p0.p0.i64(ptr nocapture writeonly, ptr nocapture readonly, i64, i1) #1
|
|
|
|
attributes #0 = { nounwind }
|
|
attributes #1 = { argmemonly nounwind }
|