Before llvm20, (void)__sync_fetch_and_add(...) always generates locked xadd insns. In linux kernel upstream discussion [1], it is found that for arm64 architecture, the original semantics of (void)__sync_fetch_and_add(...), i.e., __atomic_fetch_add(...), is preferred in order for jit to emit proper native barrier insns. In llvm commits [2] and [3], (void)__sync_fetch_and_add(...) will generate the following insns: - for cpu v1/v2: locked xadd insns to keep backward compatibility - for cpu v3/v4: __atomic_fetch_add() insns To ensure proper barrier semantics for (void)__sync_fetch_and_add(...), cpu v3/v4 is recommended. This patch enables cpu=v3 as the default cpu version. For users wanting to use cpu v1, -mcpu=v1 needs to be explicitly added to clang/llc command line. [1] https://lore.kernel.org/bpf/ZqqiQQWRnz7H93Hc@google.com/T/#mb68d67bc8f39e35a0c3db52468b9de59b79f021f [2] https://github.com/llvm/llvm-project/pull/101428 [3] https://github.com/llvm/llvm-project/pull/106494
113 lines
2.4 KiB
LLVM
113 lines
2.4 KiB
LLVM
; RUN: llc -march=bpfel -mcpu=v1 < %s | FileCheck --check-prefix=CHECK-V1 %s
|
|
; RUN: llc -march=bpfel -mcpu=v2 < %s | FileCheck --check-prefix=CHECK-V2 %s
|
|
|
|
define i16 @sccweqand(i16 %a, i16 %b) nounwind {
|
|
%t1 = and i16 %a, %b
|
|
%t2 = icmp eq i16 %t1, 0
|
|
%t3 = zext i1 %t2 to i16
|
|
ret i16 %t3
|
|
}
|
|
; CHECK-LABEL: sccweqand:
|
|
; CHECK-V1: if r1 == 0
|
|
; CHECK-V2: if r1 == 0
|
|
|
|
define i16 @sccwneand(i16 %a, i16 %b) nounwind {
|
|
%t1 = and i16 %a, %b
|
|
%t2 = icmp ne i16 %t1, 0
|
|
%t3 = zext i1 %t2 to i16
|
|
ret i16 %t3
|
|
}
|
|
; CHECK-LABEL: sccwneand:
|
|
; CHECK-V1: if r1 != 0
|
|
; CHECK-V2: if r1 != 0
|
|
|
|
define i16 @sccwne(i16 %a, i16 %b) nounwind {
|
|
%t1 = icmp ne i16 %a, %b
|
|
%t2 = zext i1 %t1 to i16
|
|
ret i16 %t2
|
|
}
|
|
; CHECK-LABEL:sccwne:
|
|
; CHECK-V1: if r1 != r2
|
|
; CHECK-V2: if r1 != r2
|
|
|
|
define i16 @sccweq(i16 %a, i16 %b) nounwind {
|
|
%t1 = icmp eq i16 %a, %b
|
|
%t2 = zext i1 %t1 to i16
|
|
ret i16 %t2
|
|
}
|
|
; CHECK-LABEL:sccweq:
|
|
; CHECK-V1: if r1 == r2
|
|
; CHECK-V2: if r1 == r2
|
|
|
|
define i16 @sccwugt(i16 %a, i16 %b) nounwind {
|
|
%t1 = icmp ugt i16 %a, %b
|
|
%t2 = zext i1 %t1 to i16
|
|
ret i16 %t2
|
|
}
|
|
; CHECK-LABEL:sccwugt:
|
|
; CHECK-V1: if r1 > r2
|
|
; CHECK-V2: if r1 > r2
|
|
|
|
define i16 @sccwuge(i16 %a, i16 %b) nounwind {
|
|
%t1 = icmp uge i16 %a, %b
|
|
%t2 = zext i1 %t1 to i16
|
|
ret i16 %t2
|
|
}
|
|
; CHECK-LABEL:sccwuge:
|
|
; CHECK-V1: if r1 >= r2
|
|
; CHECK-V2: if r1 >= r2
|
|
|
|
define i16 @sccwult(i16 %a, i16 %b) nounwind {
|
|
%t1 = icmp ult i16 %a, %b
|
|
%t2 = zext i1 %t1 to i16
|
|
ret i16 %t2
|
|
}
|
|
; CHECK-LABEL:sccwult:
|
|
; CHECK-V1: if r2 > r1
|
|
; CHECK-V2: if r1 < r2
|
|
|
|
define i16 @sccwule(i16 %a, i16 %b) nounwind {
|
|
%t1 = icmp ule i16 %a, %b
|
|
%t2 = zext i1 %t1 to i16
|
|
ret i16 %t2
|
|
}
|
|
; CHECK-LABEL:sccwule:
|
|
; CHECK-V1: if r2 >= r1
|
|
; CHECK-V2: if r1 <= r2
|
|
|
|
define i16 @sccwsgt(i16 %a, i16 %b) nounwind {
|
|
%t1 = icmp sgt i16 %a, %b
|
|
%t2 = zext i1 %t1 to i16
|
|
ret i16 %t2
|
|
}
|
|
; CHECK-LABEL:sccwsgt:
|
|
; CHECK-V1: if r1 s> r2
|
|
; CHECK-V2: if r1 s> r2
|
|
|
|
define i16 @sccwsge(i16 %a, i16 %b) nounwind {
|
|
%t1 = icmp sge i16 %a, %b
|
|
%t2 = zext i1 %t1 to i16
|
|
ret i16 %t2
|
|
}
|
|
; CHECK-LABEL:sccwsge:
|
|
; CHECK-V1: if r1 s>= r2
|
|
; CHECK-V2: if r1 s>= r2
|
|
|
|
define i16 @sccwslt(i16 %a, i16 %b) nounwind {
|
|
%t1 = icmp slt i16 %a, %b
|
|
%t2 = zext i1 %t1 to i16
|
|
ret i16 %t2
|
|
}
|
|
; CHECK-LABEL:sccwslt:
|
|
; CHECK-V1: if r2 s> r1
|
|
; CHECK-V2: if r1 s< r2
|
|
|
|
define i16 @sccwsle(i16 %a, i16 %b) nounwind {
|
|
%t1 = icmp sle i16 %a, %b
|
|
%t2 = zext i1 %t1 to i16
|
|
ret i16 %t2
|
|
}
|
|
; CHECK-LABEL:sccwsle:
|
|
; CHECK-V1: if r2 s>= r1
|
|
; CHECK-V2: if r1 s<= r2
|