Files
clang-p2996/llvm/test/CodeGen/AArch64/atomic-ops.ll
David Green 8a645fc44b [AArch64] Enable type promotion for AArch64
This enables the type promotion pass for AArch64, which acts as a
CodeGenPrepare pass to promote illegal integers to legal ones,
especially useful for removing extends that would otherwise require
cross-basic-block analysis.

I have enabled this generally, for both ISel and GlobalISel. In some
quick experiments it appeared to help GlobalISel remove extra extends in
places too, but that might just be missing optimizations that are better
left for later. We can disable it again if required.

In my experiments, this can improvement performance in some cases, and
codesize was a small improvement. SPEC was a very small improvement,
within the noise. Some of the test cases show extends being moved out of
loops, often when the extend would be part of a cmp operand, but that
should reduce the latency of the instruction in the loop on many cpus.
The signed-truncation-check tests are increasing as they are no longer
matching specific DAG combines.

We also hope to add some additional improvements to the pass in the near
future, to capture more cases of promoting extends through phis that
have come up in a few places lately.

Differential Revision: https://reviews.llvm.org/D110239
2021-09-29 15:13:12 +01:00

1274 lines
49 KiB
LLVM

; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=aarch64-none-linux-gnu -disable-post-ra -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,INLINE_ATOMICS
; RUN: llc -mtriple=aarch64-none-linux-gnu -disable-post-ra -verify-machineinstrs -mattr=+outline-atomics < %s | FileCheck %s --check-prefixes=CHECK,OUTLINE_ATOMICS
@var8 = dso_local global i8 0
@var16 = dso_local global i16 0
@var32 = dso_local global i32 0
@var64 = dso_local global i64 0
define dso_local i8 @test_atomic_load_add_i8(i8 %offset) nounwind {
; INLINE_ATOMICS-LABEL: test_atomic_load_add_i8:
; INLINE_ATOMICS: // %bb.0:
; INLINE_ATOMICS-NEXT: adrp x9, var8
; INLINE_ATOMICS-NEXT: add x9, x9, :lo12:var8
; INLINE_ATOMICS-NEXT: .LBB0_1: // %atomicrmw.start
; INLINE_ATOMICS-NEXT: // =>This Inner Loop Header: Depth=1
; INLINE_ATOMICS-NEXT: ldaxrb w8, [x9]
; INLINE_ATOMICS-NEXT: add w10, w8, w0
; INLINE_ATOMICS-NEXT: stlxrb w11, w10, [x9]
; INLINE_ATOMICS-NEXT: cbnz w11, .LBB0_1
; INLINE_ATOMICS-NEXT: // %bb.2: // %atomicrmw.end
; INLINE_ATOMICS-NEXT: mov w0, w8
; INLINE_ATOMICS-NEXT: ret
;
; OUTLINE_ATOMICS-LABEL: test_atomic_load_add_i8:
; OUTLINE_ATOMICS: // %bb.0:
; OUTLINE_ATOMICS-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill
; OUTLINE_ATOMICS-NEXT: adrp x1, var8
; OUTLINE_ATOMICS-NEXT: add x1, x1, :lo12:var8
; OUTLINE_ATOMICS-NEXT: bl __aarch64_ldadd1_acq_rel
; OUTLINE_ATOMICS-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload
; OUTLINE_ATOMICS-NEXT: ret
%old = atomicrmw add i8* @var8, i8 %offset seq_cst
ret i8 %old
}
define dso_local i16 @test_atomic_load_add_i16(i16 %offset) nounwind {
; INLINE_ATOMICS-LABEL: test_atomic_load_add_i16:
; INLINE_ATOMICS: // %bb.0:
; INLINE_ATOMICS-NEXT: adrp x9, var16
; INLINE_ATOMICS-NEXT: add x9, x9, :lo12:var16
; INLINE_ATOMICS-NEXT: .LBB1_1: // %atomicrmw.start
; INLINE_ATOMICS-NEXT: // =>This Inner Loop Header: Depth=1
; INLINE_ATOMICS-NEXT: ldaxrh w8, [x9]
; INLINE_ATOMICS-NEXT: add w10, w8, w0
; INLINE_ATOMICS-NEXT: stxrh w11, w10, [x9]
; INLINE_ATOMICS-NEXT: cbnz w11, .LBB1_1
; INLINE_ATOMICS-NEXT: // %bb.2: // %atomicrmw.end
; INLINE_ATOMICS-NEXT: mov w0, w8
; INLINE_ATOMICS-NEXT: ret
;
; OUTLINE_ATOMICS-LABEL: test_atomic_load_add_i16:
; OUTLINE_ATOMICS: // %bb.0:
; OUTLINE_ATOMICS-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill
; OUTLINE_ATOMICS-NEXT: adrp x1, var16
; OUTLINE_ATOMICS-NEXT: add x1, x1, :lo12:var16
; OUTLINE_ATOMICS-NEXT: bl __aarch64_ldadd2_acq
; OUTLINE_ATOMICS-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload
; OUTLINE_ATOMICS-NEXT: ret
%old = atomicrmw add i16* @var16, i16 %offset acquire
ret i16 %old
}
define dso_local i32 @test_atomic_load_add_i32(i32 %offset) nounwind {
; INLINE_ATOMICS-LABEL: test_atomic_load_add_i32:
; INLINE_ATOMICS: // %bb.0:
; INLINE_ATOMICS-NEXT: adrp x9, var32
; INLINE_ATOMICS-NEXT: add x9, x9, :lo12:var32
; INLINE_ATOMICS-NEXT: .LBB2_1: // %atomicrmw.start
; INLINE_ATOMICS-NEXT: // =>This Inner Loop Header: Depth=1
; INLINE_ATOMICS-NEXT: ldxr w8, [x9]
; INLINE_ATOMICS-NEXT: add w10, w8, w0
; INLINE_ATOMICS-NEXT: stlxr w11, w10, [x9]
; INLINE_ATOMICS-NEXT: cbnz w11, .LBB2_1
; INLINE_ATOMICS-NEXT: // %bb.2: // %atomicrmw.end
; INLINE_ATOMICS-NEXT: mov w0, w8
; INLINE_ATOMICS-NEXT: ret
;
; OUTLINE_ATOMICS-LABEL: test_atomic_load_add_i32:
; OUTLINE_ATOMICS: // %bb.0:
; OUTLINE_ATOMICS-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill
; OUTLINE_ATOMICS-NEXT: adrp x1, var32
; OUTLINE_ATOMICS-NEXT: add x1, x1, :lo12:var32
; OUTLINE_ATOMICS-NEXT: bl __aarch64_ldadd4_rel
; OUTLINE_ATOMICS-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload
; OUTLINE_ATOMICS-NEXT: ret
%old = atomicrmw add i32* @var32, i32 %offset release
ret i32 %old
}
define dso_local i64 @test_atomic_load_add_i64(i64 %offset) nounwind {
; INLINE_ATOMICS-LABEL: test_atomic_load_add_i64:
; INLINE_ATOMICS: // %bb.0:
; INLINE_ATOMICS-NEXT: adrp x9, var64
; INLINE_ATOMICS-NEXT: add x9, x9, :lo12:var64
; INLINE_ATOMICS-NEXT: .LBB3_1: // %atomicrmw.start
; INLINE_ATOMICS-NEXT: // =>This Inner Loop Header: Depth=1
; INLINE_ATOMICS-NEXT: ldxr x8, [x9]
; INLINE_ATOMICS-NEXT: add x10, x8, x0
; INLINE_ATOMICS-NEXT: stxr w11, x10, [x9]
; INLINE_ATOMICS-NEXT: cbnz w11, .LBB3_1
; INLINE_ATOMICS-NEXT: // %bb.2: // %atomicrmw.end
; INLINE_ATOMICS-NEXT: mov x0, x8
; INLINE_ATOMICS-NEXT: ret
;
; OUTLINE_ATOMICS-LABEL: test_atomic_load_add_i64:
; OUTLINE_ATOMICS: // %bb.0:
; OUTLINE_ATOMICS-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill
; OUTLINE_ATOMICS-NEXT: adrp x1, var64
; OUTLINE_ATOMICS-NEXT: add x1, x1, :lo12:var64
; OUTLINE_ATOMICS-NEXT: bl __aarch64_ldadd8_relax
; OUTLINE_ATOMICS-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload
; OUTLINE_ATOMICS-NEXT: ret
%old = atomicrmw add i64* @var64, i64 %offset monotonic
ret i64 %old
}
define dso_local i8 @test_atomic_load_sub_i8(i8 %offset) nounwind {
; INLINE_ATOMICS-LABEL: test_atomic_load_sub_i8:
; INLINE_ATOMICS: // %bb.0:
; INLINE_ATOMICS-NEXT: adrp x9, var8
; INLINE_ATOMICS-NEXT: add x9, x9, :lo12:var8
; INLINE_ATOMICS-NEXT: .LBB4_1: // %atomicrmw.start
; INLINE_ATOMICS-NEXT: // =>This Inner Loop Header: Depth=1
; INLINE_ATOMICS-NEXT: ldxrb w8, [x9]
; INLINE_ATOMICS-NEXT: sub w10, w8, w0
; INLINE_ATOMICS-NEXT: stxrb w11, w10, [x9]
; INLINE_ATOMICS-NEXT: cbnz w11, .LBB4_1
; INLINE_ATOMICS-NEXT: // %bb.2: // %atomicrmw.end
; INLINE_ATOMICS-NEXT: mov w0, w8
; INLINE_ATOMICS-NEXT: ret
;
; OUTLINE_ATOMICS-LABEL: test_atomic_load_sub_i8:
; OUTLINE_ATOMICS: // %bb.0:
; OUTLINE_ATOMICS-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill
; OUTLINE_ATOMICS-NEXT: neg w0, w0
; OUTLINE_ATOMICS-NEXT: adrp x1, var8
; OUTLINE_ATOMICS-NEXT: add x1, x1, :lo12:var8
; OUTLINE_ATOMICS-NEXT: bl __aarch64_ldadd1_relax
; OUTLINE_ATOMICS-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload
; OUTLINE_ATOMICS-NEXT: ret
%old = atomicrmw sub i8* @var8, i8 %offset monotonic
ret i8 %old
}
define dso_local i16 @test_atomic_load_sub_i16(i16 %offset) nounwind {
; INLINE_ATOMICS-LABEL: test_atomic_load_sub_i16:
; INLINE_ATOMICS: // %bb.0:
; INLINE_ATOMICS-NEXT: adrp x9, var16
; INLINE_ATOMICS-NEXT: add x9, x9, :lo12:var16
; INLINE_ATOMICS-NEXT: .LBB5_1: // %atomicrmw.start
; INLINE_ATOMICS-NEXT: // =>This Inner Loop Header: Depth=1
; INLINE_ATOMICS-NEXT: ldxrh w8, [x9]
; INLINE_ATOMICS-NEXT: sub w10, w8, w0
; INLINE_ATOMICS-NEXT: stlxrh w11, w10, [x9]
; INLINE_ATOMICS-NEXT: cbnz w11, .LBB5_1
; INLINE_ATOMICS-NEXT: // %bb.2: // %atomicrmw.end
; INLINE_ATOMICS-NEXT: mov w0, w8
; INLINE_ATOMICS-NEXT: ret
;
; OUTLINE_ATOMICS-LABEL: test_atomic_load_sub_i16:
; OUTLINE_ATOMICS: // %bb.0:
; OUTLINE_ATOMICS-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill
; OUTLINE_ATOMICS-NEXT: neg w0, w0
; OUTLINE_ATOMICS-NEXT: adrp x1, var16
; OUTLINE_ATOMICS-NEXT: add x1, x1, :lo12:var16
; OUTLINE_ATOMICS-NEXT: bl __aarch64_ldadd2_rel
; OUTLINE_ATOMICS-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload
; OUTLINE_ATOMICS-NEXT: ret
%old = atomicrmw sub i16* @var16, i16 %offset release
ret i16 %old
}
define dso_local i32 @test_atomic_load_sub_i32(i32 %offset) nounwind {
; INLINE_ATOMICS-LABEL: test_atomic_load_sub_i32:
; INLINE_ATOMICS: // %bb.0:
; INLINE_ATOMICS-NEXT: adrp x9, var32
; INLINE_ATOMICS-NEXT: add x9, x9, :lo12:var32
; INLINE_ATOMICS-NEXT: .LBB6_1: // %atomicrmw.start
; INLINE_ATOMICS-NEXT: // =>This Inner Loop Header: Depth=1
; INLINE_ATOMICS-NEXT: ldaxr w8, [x9]
; INLINE_ATOMICS-NEXT: sub w10, w8, w0
; INLINE_ATOMICS-NEXT: stxr w11, w10, [x9]
; INLINE_ATOMICS-NEXT: cbnz w11, .LBB6_1
; INLINE_ATOMICS-NEXT: // %bb.2: // %atomicrmw.end
; INLINE_ATOMICS-NEXT: mov w0, w8
; INLINE_ATOMICS-NEXT: ret
;
; OUTLINE_ATOMICS-LABEL: test_atomic_load_sub_i32:
; OUTLINE_ATOMICS: // %bb.0:
; OUTLINE_ATOMICS-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill
; OUTLINE_ATOMICS-NEXT: neg w0, w0
; OUTLINE_ATOMICS-NEXT: adrp x1, var32
; OUTLINE_ATOMICS-NEXT: add x1, x1, :lo12:var32
; OUTLINE_ATOMICS-NEXT: bl __aarch64_ldadd4_acq
; OUTLINE_ATOMICS-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload
; OUTLINE_ATOMICS-NEXT: ret
%old = atomicrmw sub i32* @var32, i32 %offset acquire
ret i32 %old
}
define dso_local i64 @test_atomic_load_sub_i64(i64 %offset) nounwind {
; INLINE_ATOMICS-LABEL: test_atomic_load_sub_i64:
; INLINE_ATOMICS: // %bb.0:
; INLINE_ATOMICS-NEXT: adrp x9, var64
; INLINE_ATOMICS-NEXT: add x9, x9, :lo12:var64
; INLINE_ATOMICS-NEXT: .LBB7_1: // %atomicrmw.start
; INLINE_ATOMICS-NEXT: // =>This Inner Loop Header: Depth=1
; INLINE_ATOMICS-NEXT: ldaxr x8, [x9]
; INLINE_ATOMICS-NEXT: sub x10, x8, x0
; INLINE_ATOMICS-NEXT: stlxr w11, x10, [x9]
; INLINE_ATOMICS-NEXT: cbnz w11, .LBB7_1
; INLINE_ATOMICS-NEXT: // %bb.2: // %atomicrmw.end
; INLINE_ATOMICS-NEXT: mov x0, x8
; INLINE_ATOMICS-NEXT: ret
;
; OUTLINE_ATOMICS-LABEL: test_atomic_load_sub_i64:
; OUTLINE_ATOMICS: // %bb.0:
; OUTLINE_ATOMICS-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill
; OUTLINE_ATOMICS-NEXT: neg x0, x0
; OUTLINE_ATOMICS-NEXT: adrp x1, var64
; OUTLINE_ATOMICS-NEXT: add x1, x1, :lo12:var64
; OUTLINE_ATOMICS-NEXT: bl __aarch64_ldadd8_acq_rel
; OUTLINE_ATOMICS-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload
; OUTLINE_ATOMICS-NEXT: ret
%old = atomicrmw sub i64* @var64, i64 %offset seq_cst
ret i64 %old
}
define dso_local i8 @test_atomic_load_and_i8(i8 %offset) nounwind {
; INLINE_ATOMICS-LABEL: test_atomic_load_and_i8:
; INLINE_ATOMICS: // %bb.0:
; INLINE_ATOMICS-NEXT: adrp x9, var8
; INLINE_ATOMICS-NEXT: add x9, x9, :lo12:var8
; INLINE_ATOMICS-NEXT: .LBB8_1: // %atomicrmw.start
; INLINE_ATOMICS-NEXT: // =>This Inner Loop Header: Depth=1
; INLINE_ATOMICS-NEXT: ldxrb w8, [x9]
; INLINE_ATOMICS-NEXT: and w10, w8, w0
; INLINE_ATOMICS-NEXT: stlxrb w11, w10, [x9]
; INLINE_ATOMICS-NEXT: cbnz w11, .LBB8_1
; INLINE_ATOMICS-NEXT: // %bb.2: // %atomicrmw.end
; INLINE_ATOMICS-NEXT: mov w0, w8
; INLINE_ATOMICS-NEXT: ret
;
; OUTLINE_ATOMICS-LABEL: test_atomic_load_and_i8:
; OUTLINE_ATOMICS: // %bb.0:
; OUTLINE_ATOMICS-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill
; OUTLINE_ATOMICS-NEXT: mvn w0, w0
; OUTLINE_ATOMICS-NEXT: adrp x1, var8
; OUTLINE_ATOMICS-NEXT: add x1, x1, :lo12:var8
; OUTLINE_ATOMICS-NEXT: bl __aarch64_ldclr1_rel
; OUTLINE_ATOMICS-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload
; OUTLINE_ATOMICS-NEXT: ret
%old = atomicrmw and i8* @var8, i8 %offset release
ret i8 %old
}
define dso_local i16 @test_atomic_load_and_i16(i16 %offset) nounwind {
; INLINE_ATOMICS-LABEL: test_atomic_load_and_i16:
; INLINE_ATOMICS: // %bb.0:
; INLINE_ATOMICS-NEXT: adrp x9, var16
; INLINE_ATOMICS-NEXT: add x9, x9, :lo12:var16
; INLINE_ATOMICS-NEXT: .LBB9_1: // %atomicrmw.start
; INLINE_ATOMICS-NEXT: // =>This Inner Loop Header: Depth=1
; INLINE_ATOMICS-NEXT: ldxrh w8, [x9]
; INLINE_ATOMICS-NEXT: and w10, w8, w0
; INLINE_ATOMICS-NEXT: stxrh w11, w10, [x9]
; INLINE_ATOMICS-NEXT: cbnz w11, .LBB9_1
; INLINE_ATOMICS-NEXT: // %bb.2: // %atomicrmw.end
; INLINE_ATOMICS-NEXT: mov w0, w8
; INLINE_ATOMICS-NEXT: ret
;
; OUTLINE_ATOMICS-LABEL: test_atomic_load_and_i16:
; OUTLINE_ATOMICS: // %bb.0:
; OUTLINE_ATOMICS-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill
; OUTLINE_ATOMICS-NEXT: mvn w0, w0
; OUTLINE_ATOMICS-NEXT: adrp x1, var16
; OUTLINE_ATOMICS-NEXT: add x1, x1, :lo12:var16
; OUTLINE_ATOMICS-NEXT: bl __aarch64_ldclr2_relax
; OUTLINE_ATOMICS-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload
; OUTLINE_ATOMICS-NEXT: ret
%old = atomicrmw and i16* @var16, i16 %offset monotonic
ret i16 %old
}
define dso_local i32 @test_atomic_load_and_i32(i32 %offset) nounwind {
; INLINE_ATOMICS-LABEL: test_atomic_load_and_i32:
; INLINE_ATOMICS: // %bb.0:
; INLINE_ATOMICS-NEXT: adrp x9, var32
; INLINE_ATOMICS-NEXT: add x9, x9, :lo12:var32
; INLINE_ATOMICS-NEXT: .LBB10_1: // %atomicrmw.start
; INLINE_ATOMICS-NEXT: // =>This Inner Loop Header: Depth=1
; INLINE_ATOMICS-NEXT: ldaxr w8, [x9]
; INLINE_ATOMICS-NEXT: and w10, w8, w0
; INLINE_ATOMICS-NEXT: stlxr w11, w10, [x9]
; INLINE_ATOMICS-NEXT: cbnz w11, .LBB10_1
; INLINE_ATOMICS-NEXT: // %bb.2: // %atomicrmw.end
; INLINE_ATOMICS-NEXT: mov w0, w8
; INLINE_ATOMICS-NEXT: ret
;
; OUTLINE_ATOMICS-LABEL: test_atomic_load_and_i32:
; OUTLINE_ATOMICS: // %bb.0:
; OUTLINE_ATOMICS-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill
; OUTLINE_ATOMICS-NEXT: mvn w0, w0
; OUTLINE_ATOMICS-NEXT: adrp x1, var32
; OUTLINE_ATOMICS-NEXT: add x1, x1, :lo12:var32
; OUTLINE_ATOMICS-NEXT: bl __aarch64_ldclr4_acq_rel
; OUTLINE_ATOMICS-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload
; OUTLINE_ATOMICS-NEXT: ret
%old = atomicrmw and i32* @var32, i32 %offset seq_cst
ret i32 %old
}
define dso_local i64 @test_atomic_load_and_i64(i64 %offset) nounwind {
; INLINE_ATOMICS-LABEL: test_atomic_load_and_i64:
; INLINE_ATOMICS: // %bb.0:
; INLINE_ATOMICS-NEXT: adrp x9, var64
; INLINE_ATOMICS-NEXT: add x9, x9, :lo12:var64
; INLINE_ATOMICS-NEXT: .LBB11_1: // %atomicrmw.start
; INLINE_ATOMICS-NEXT: // =>This Inner Loop Header: Depth=1
; INLINE_ATOMICS-NEXT: ldaxr x8, [x9]
; INLINE_ATOMICS-NEXT: and x10, x8, x0
; INLINE_ATOMICS-NEXT: stxr w11, x10, [x9]
; INLINE_ATOMICS-NEXT: cbnz w11, .LBB11_1
; INLINE_ATOMICS-NEXT: // %bb.2: // %atomicrmw.end
; INLINE_ATOMICS-NEXT: mov x0, x8
; INLINE_ATOMICS-NEXT: ret
;
; OUTLINE_ATOMICS-LABEL: test_atomic_load_and_i64:
; OUTLINE_ATOMICS: // %bb.0:
; OUTLINE_ATOMICS-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill
; OUTLINE_ATOMICS-NEXT: mvn x0, x0
; OUTLINE_ATOMICS-NEXT: adrp x1, var64
; OUTLINE_ATOMICS-NEXT: add x1, x1, :lo12:var64
; OUTLINE_ATOMICS-NEXT: bl __aarch64_ldclr8_acq
; OUTLINE_ATOMICS-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload
; OUTLINE_ATOMICS-NEXT: ret
%old = atomicrmw and i64* @var64, i64 %offset acquire
ret i64 %old
}
define dso_local i8 @test_atomic_load_or_i8(i8 %offset) nounwind {
; INLINE_ATOMICS-LABEL: test_atomic_load_or_i8:
; INLINE_ATOMICS: // %bb.0:
; INLINE_ATOMICS-NEXT: adrp x9, var8
; INLINE_ATOMICS-NEXT: add x9, x9, :lo12:var8
; INLINE_ATOMICS-NEXT: .LBB12_1: // %atomicrmw.start
; INLINE_ATOMICS-NEXT: // =>This Inner Loop Header: Depth=1
; INLINE_ATOMICS-NEXT: ldaxrb w8, [x9]
; INLINE_ATOMICS-NEXT: orr w10, w8, w0
; INLINE_ATOMICS-NEXT: stlxrb w11, w10, [x9]
; INLINE_ATOMICS-NEXT: cbnz w11, .LBB12_1
; INLINE_ATOMICS-NEXT: // %bb.2: // %atomicrmw.end
; INLINE_ATOMICS-NEXT: mov w0, w8
; INLINE_ATOMICS-NEXT: ret
;
; OUTLINE_ATOMICS-LABEL: test_atomic_load_or_i8:
; OUTLINE_ATOMICS: // %bb.0:
; OUTLINE_ATOMICS-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill
; OUTLINE_ATOMICS-NEXT: adrp x1, var8
; OUTLINE_ATOMICS-NEXT: add x1, x1, :lo12:var8
; OUTLINE_ATOMICS-NEXT: bl __aarch64_ldset1_acq_rel
; OUTLINE_ATOMICS-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload
; OUTLINE_ATOMICS-NEXT: ret
%old = atomicrmw or i8* @var8, i8 %offset seq_cst
ret i8 %old
}
define dso_local i16 @test_atomic_load_or_i16(i16 %offset) nounwind {
; INLINE_ATOMICS-LABEL: test_atomic_load_or_i16:
; INLINE_ATOMICS: // %bb.0:
; INLINE_ATOMICS-NEXT: adrp x9, var16
; INLINE_ATOMICS-NEXT: add x9, x9, :lo12:var16
; INLINE_ATOMICS-NEXT: .LBB13_1: // %atomicrmw.start
; INLINE_ATOMICS-NEXT: // =>This Inner Loop Header: Depth=1
; INLINE_ATOMICS-NEXT: ldxrh w8, [x9]
; INLINE_ATOMICS-NEXT: orr w10, w8, w0
; INLINE_ATOMICS-NEXT: stxrh w11, w10, [x9]
; INLINE_ATOMICS-NEXT: cbnz w11, .LBB13_1
; INLINE_ATOMICS-NEXT: // %bb.2: // %atomicrmw.end
; INLINE_ATOMICS-NEXT: mov w0, w8
; INLINE_ATOMICS-NEXT: ret
;
; OUTLINE_ATOMICS-LABEL: test_atomic_load_or_i16:
; OUTLINE_ATOMICS: // %bb.0:
; OUTLINE_ATOMICS-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill
; OUTLINE_ATOMICS-NEXT: adrp x1, var16
; OUTLINE_ATOMICS-NEXT: add x1, x1, :lo12:var16
; OUTLINE_ATOMICS-NEXT: bl __aarch64_ldset2_relax
; OUTLINE_ATOMICS-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload
; OUTLINE_ATOMICS-NEXT: ret
%old = atomicrmw or i16* @var16, i16 %offset monotonic
ret i16 %old
}
define dso_local i32 @test_atomic_load_or_i32(i32 %offset) nounwind {
; INLINE_ATOMICS-LABEL: test_atomic_load_or_i32:
; INLINE_ATOMICS: // %bb.0:
; INLINE_ATOMICS-NEXT: adrp x9, var32
; INLINE_ATOMICS-NEXT: add x9, x9, :lo12:var32
; INLINE_ATOMICS-NEXT: .LBB14_1: // %atomicrmw.start
; INLINE_ATOMICS-NEXT: // =>This Inner Loop Header: Depth=1
; INLINE_ATOMICS-NEXT: ldaxr w8, [x9]
; INLINE_ATOMICS-NEXT: orr w10, w8, w0
; INLINE_ATOMICS-NEXT: stxr w11, w10, [x9]
; INLINE_ATOMICS-NEXT: cbnz w11, .LBB14_1
; INLINE_ATOMICS-NEXT: // %bb.2: // %atomicrmw.end
; INLINE_ATOMICS-NEXT: mov w0, w8
; INLINE_ATOMICS-NEXT: ret
;
; OUTLINE_ATOMICS-LABEL: test_atomic_load_or_i32:
; OUTLINE_ATOMICS: // %bb.0:
; OUTLINE_ATOMICS-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill
; OUTLINE_ATOMICS-NEXT: adrp x1, var32
; OUTLINE_ATOMICS-NEXT: add x1, x1, :lo12:var32
; OUTLINE_ATOMICS-NEXT: bl __aarch64_ldset4_acq
; OUTLINE_ATOMICS-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload
; OUTLINE_ATOMICS-NEXT: ret
%old = atomicrmw or i32* @var32, i32 %offset acquire
ret i32 %old
}
define dso_local i64 @test_atomic_load_or_i64(i64 %offset) nounwind {
; INLINE_ATOMICS-LABEL: test_atomic_load_or_i64:
; INLINE_ATOMICS: // %bb.0:
; INLINE_ATOMICS-NEXT: adrp x9, var64
; INLINE_ATOMICS-NEXT: add x9, x9, :lo12:var64
; INLINE_ATOMICS-NEXT: .LBB15_1: // %atomicrmw.start
; INLINE_ATOMICS-NEXT: // =>This Inner Loop Header: Depth=1
; INLINE_ATOMICS-NEXT: ldxr x8, [x9]
; INLINE_ATOMICS-NEXT: orr x10, x8, x0
; INLINE_ATOMICS-NEXT: stlxr w11, x10, [x9]
; INLINE_ATOMICS-NEXT: cbnz w11, .LBB15_1
; INLINE_ATOMICS-NEXT: // %bb.2: // %atomicrmw.end
; INLINE_ATOMICS-NEXT: mov x0, x8
; INLINE_ATOMICS-NEXT: ret
;
; OUTLINE_ATOMICS-LABEL: test_atomic_load_or_i64:
; OUTLINE_ATOMICS: // %bb.0:
; OUTLINE_ATOMICS-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill
; OUTLINE_ATOMICS-NEXT: adrp x1, var64
; OUTLINE_ATOMICS-NEXT: add x1, x1, :lo12:var64
; OUTLINE_ATOMICS-NEXT: bl __aarch64_ldset8_rel
; OUTLINE_ATOMICS-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload
; OUTLINE_ATOMICS-NEXT: ret
%old = atomicrmw or i64* @var64, i64 %offset release
ret i64 %old
}
define dso_local i8 @test_atomic_load_xor_i8(i8 %offset) nounwind {
; INLINE_ATOMICS-LABEL: test_atomic_load_xor_i8:
; INLINE_ATOMICS: // %bb.0:
; INLINE_ATOMICS-NEXT: adrp x9, var8
; INLINE_ATOMICS-NEXT: add x9, x9, :lo12:var8
; INLINE_ATOMICS-NEXT: .LBB16_1: // %atomicrmw.start
; INLINE_ATOMICS-NEXT: // =>This Inner Loop Header: Depth=1
; INLINE_ATOMICS-NEXT: ldaxrb w8, [x9]
; INLINE_ATOMICS-NEXT: eor w10, w8, w0
; INLINE_ATOMICS-NEXT: stxrb w11, w10, [x9]
; INLINE_ATOMICS-NEXT: cbnz w11, .LBB16_1
; INLINE_ATOMICS-NEXT: // %bb.2: // %atomicrmw.end
; INLINE_ATOMICS-NEXT: mov w0, w8
; INLINE_ATOMICS-NEXT: ret
;
; OUTLINE_ATOMICS-LABEL: test_atomic_load_xor_i8:
; OUTLINE_ATOMICS: // %bb.0:
; OUTLINE_ATOMICS-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill
; OUTLINE_ATOMICS-NEXT: adrp x1, var8
; OUTLINE_ATOMICS-NEXT: add x1, x1, :lo12:var8
; OUTLINE_ATOMICS-NEXT: bl __aarch64_ldeor1_acq
; OUTLINE_ATOMICS-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload
; OUTLINE_ATOMICS-NEXT: ret
%old = atomicrmw xor i8* @var8, i8 %offset acquire
ret i8 %old
}
define dso_local i16 @test_atomic_load_xor_i16(i16 %offset) nounwind {
; INLINE_ATOMICS-LABEL: test_atomic_load_xor_i16:
; INLINE_ATOMICS: // %bb.0:
; INLINE_ATOMICS-NEXT: adrp x9, var16
; INLINE_ATOMICS-NEXT: add x9, x9, :lo12:var16
; INLINE_ATOMICS-NEXT: .LBB17_1: // %atomicrmw.start
; INLINE_ATOMICS-NEXT: // =>This Inner Loop Header: Depth=1
; INLINE_ATOMICS-NEXT: ldxrh w8, [x9]
; INLINE_ATOMICS-NEXT: eor w10, w8, w0
; INLINE_ATOMICS-NEXT: stlxrh w11, w10, [x9]
; INLINE_ATOMICS-NEXT: cbnz w11, .LBB17_1
; INLINE_ATOMICS-NEXT: // %bb.2: // %atomicrmw.end
; INLINE_ATOMICS-NEXT: mov w0, w8
; INLINE_ATOMICS-NEXT: ret
;
; OUTLINE_ATOMICS-LABEL: test_atomic_load_xor_i16:
; OUTLINE_ATOMICS: // %bb.0:
; OUTLINE_ATOMICS-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill
; OUTLINE_ATOMICS-NEXT: adrp x1, var16
; OUTLINE_ATOMICS-NEXT: add x1, x1, :lo12:var16
; OUTLINE_ATOMICS-NEXT: bl __aarch64_ldeor2_rel
; OUTLINE_ATOMICS-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload
; OUTLINE_ATOMICS-NEXT: ret
%old = atomicrmw xor i16* @var16, i16 %offset release
ret i16 %old
}
define dso_local i32 @test_atomic_load_xor_i32(i32 %offset) nounwind {
; INLINE_ATOMICS-LABEL: test_atomic_load_xor_i32:
; INLINE_ATOMICS: // %bb.0:
; INLINE_ATOMICS-NEXT: adrp x9, var32
; INLINE_ATOMICS-NEXT: add x9, x9, :lo12:var32
; INLINE_ATOMICS-NEXT: .LBB18_1: // %atomicrmw.start
; INLINE_ATOMICS-NEXT: // =>This Inner Loop Header: Depth=1
; INLINE_ATOMICS-NEXT: ldaxr w8, [x9]
; INLINE_ATOMICS-NEXT: eor w10, w8, w0
; INLINE_ATOMICS-NEXT: stlxr w11, w10, [x9]
; INLINE_ATOMICS-NEXT: cbnz w11, .LBB18_1
; INLINE_ATOMICS-NEXT: // %bb.2: // %atomicrmw.end
; INLINE_ATOMICS-NEXT: mov w0, w8
; INLINE_ATOMICS-NEXT: ret
;
; OUTLINE_ATOMICS-LABEL: test_atomic_load_xor_i32:
; OUTLINE_ATOMICS: // %bb.0:
; OUTLINE_ATOMICS-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill
; OUTLINE_ATOMICS-NEXT: adrp x1, var32
; OUTLINE_ATOMICS-NEXT: add x1, x1, :lo12:var32
; OUTLINE_ATOMICS-NEXT: bl __aarch64_ldeor4_acq_rel
; OUTLINE_ATOMICS-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload
; OUTLINE_ATOMICS-NEXT: ret
%old = atomicrmw xor i32* @var32, i32 %offset seq_cst
ret i32 %old
}
define dso_local i64 @test_atomic_load_xor_i64(i64 %offset) nounwind {
; INLINE_ATOMICS-LABEL: test_atomic_load_xor_i64:
; INLINE_ATOMICS: // %bb.0:
; INLINE_ATOMICS-NEXT: adrp x9, var64
; INLINE_ATOMICS-NEXT: add x9, x9, :lo12:var64
; INLINE_ATOMICS-NEXT: .LBB19_1: // %atomicrmw.start
; INLINE_ATOMICS-NEXT: // =>This Inner Loop Header: Depth=1
; INLINE_ATOMICS-NEXT: ldxr x8, [x9]
; INLINE_ATOMICS-NEXT: eor x10, x8, x0
; INLINE_ATOMICS-NEXT: stxr w11, x10, [x9]
; INLINE_ATOMICS-NEXT: cbnz w11, .LBB19_1
; INLINE_ATOMICS-NEXT: // %bb.2: // %atomicrmw.end
; INLINE_ATOMICS-NEXT: mov x0, x8
; INLINE_ATOMICS-NEXT: ret
;
; OUTLINE_ATOMICS-LABEL: test_atomic_load_xor_i64:
; OUTLINE_ATOMICS: // %bb.0:
; OUTLINE_ATOMICS-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill
; OUTLINE_ATOMICS-NEXT: adrp x1, var64
; OUTLINE_ATOMICS-NEXT: add x1, x1, :lo12:var64
; OUTLINE_ATOMICS-NEXT: bl __aarch64_ldeor8_relax
; OUTLINE_ATOMICS-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload
; OUTLINE_ATOMICS-NEXT: ret
%old = atomicrmw xor i64* @var64, i64 %offset monotonic
ret i64 %old
}
define dso_local i8 @test_atomic_load_xchg_i8(i8 %offset) nounwind {
; INLINE_ATOMICS-LABEL: test_atomic_load_xchg_i8:
; INLINE_ATOMICS: // %bb.0:
; INLINE_ATOMICS-NEXT: // kill: def $w0 killed $w0 def $x0
; INLINE_ATOMICS-NEXT: adrp x9, var8
; INLINE_ATOMICS-NEXT: add x9, x9, :lo12:var8
; INLINE_ATOMICS-NEXT: .LBB20_1: // %atomicrmw.start
; INLINE_ATOMICS-NEXT: // =>This Inner Loop Header: Depth=1
; INLINE_ATOMICS-NEXT: ldxrb w8, [x9]
; INLINE_ATOMICS-NEXT: stxrb w10, w0, [x9]
; INLINE_ATOMICS-NEXT: cbnz w10, .LBB20_1
; INLINE_ATOMICS-NEXT: // %bb.2: // %atomicrmw.end
; INLINE_ATOMICS-NEXT: mov w0, w8
; INLINE_ATOMICS-NEXT: ret
;
; OUTLINE_ATOMICS-LABEL: test_atomic_load_xchg_i8:
; OUTLINE_ATOMICS: // %bb.0:
; OUTLINE_ATOMICS-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill
; OUTLINE_ATOMICS-NEXT: adrp x1, var8
; OUTLINE_ATOMICS-NEXT: add x1, x1, :lo12:var8
; OUTLINE_ATOMICS-NEXT: bl __aarch64_swp1_relax
; OUTLINE_ATOMICS-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload
; OUTLINE_ATOMICS-NEXT: ret
%old = atomicrmw xchg i8* @var8, i8 %offset monotonic
ret i8 %old
}
define dso_local i16 @test_atomic_load_xchg_i16(i16 %offset) nounwind {
; INLINE_ATOMICS-LABEL: test_atomic_load_xchg_i16:
; INLINE_ATOMICS: // %bb.0:
; INLINE_ATOMICS-NEXT: // kill: def $w0 killed $w0 def $x0
; INLINE_ATOMICS-NEXT: adrp x9, var16
; INLINE_ATOMICS-NEXT: add x9, x9, :lo12:var16
; INLINE_ATOMICS-NEXT: .LBB21_1: // %atomicrmw.start
; INLINE_ATOMICS-NEXT: // =>This Inner Loop Header: Depth=1
; INLINE_ATOMICS-NEXT: ldaxrh w8, [x9]
; INLINE_ATOMICS-NEXT: stlxrh w10, w0, [x9]
; INLINE_ATOMICS-NEXT: cbnz w10, .LBB21_1
; INLINE_ATOMICS-NEXT: // %bb.2: // %atomicrmw.end
; INLINE_ATOMICS-NEXT: mov w0, w8
; INLINE_ATOMICS-NEXT: ret
;
; OUTLINE_ATOMICS-LABEL: test_atomic_load_xchg_i16:
; OUTLINE_ATOMICS: // %bb.0:
; OUTLINE_ATOMICS-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill
; OUTLINE_ATOMICS-NEXT: adrp x1, var16
; OUTLINE_ATOMICS-NEXT: add x1, x1, :lo12:var16
; OUTLINE_ATOMICS-NEXT: bl __aarch64_swp2_acq_rel
; OUTLINE_ATOMICS-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload
; OUTLINE_ATOMICS-NEXT: ret
%old = atomicrmw xchg i16* @var16, i16 %offset seq_cst
ret i16 %old
}
define dso_local i32 @test_atomic_load_xchg_i32(i32 %offset) nounwind {
; INLINE_ATOMICS-LABEL: test_atomic_load_xchg_i32:
; INLINE_ATOMICS: // %bb.0:
; INLINE_ATOMICS-NEXT: mov w8, w0
; INLINE_ATOMICS-NEXT: adrp x9, var32
; INLINE_ATOMICS-NEXT: add x9, x9, :lo12:var32
; INLINE_ATOMICS-NEXT: .LBB22_1: // %atomicrmw.start
; INLINE_ATOMICS-NEXT: // =>This Inner Loop Header: Depth=1
; INLINE_ATOMICS-NEXT: ldxr w0, [x9]
; INLINE_ATOMICS-NEXT: stlxr w10, w8, [x9]
; INLINE_ATOMICS-NEXT: cbnz w10, .LBB22_1
; INLINE_ATOMICS-NEXT: // %bb.2: // %atomicrmw.end
; INLINE_ATOMICS-NEXT: // kill: def $w0 killed $w0 killed $x0
; INLINE_ATOMICS-NEXT: ret
;
; OUTLINE_ATOMICS-LABEL: test_atomic_load_xchg_i32:
; OUTLINE_ATOMICS: // %bb.0:
; OUTLINE_ATOMICS-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill
; OUTLINE_ATOMICS-NEXT: adrp x1, var32
; OUTLINE_ATOMICS-NEXT: add x1, x1, :lo12:var32
; OUTLINE_ATOMICS-NEXT: bl __aarch64_swp4_rel
; OUTLINE_ATOMICS-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload
; OUTLINE_ATOMICS-NEXT: ret
%old = atomicrmw xchg i32* @var32, i32 %offset release
ret i32 %old
}
define dso_local i64 @test_atomic_load_xchg_i64(i64 %offset) nounwind {
; INLINE_ATOMICS-LABEL: test_atomic_load_xchg_i64:
; INLINE_ATOMICS: // %bb.0:
; INLINE_ATOMICS-NEXT: adrp x9, var64
; INLINE_ATOMICS-NEXT: add x9, x9, :lo12:var64
; INLINE_ATOMICS-NEXT: .LBB23_1: // %atomicrmw.start
; INLINE_ATOMICS-NEXT: // =>This Inner Loop Header: Depth=1
; INLINE_ATOMICS-NEXT: ldaxr x8, [x9]
; INLINE_ATOMICS-NEXT: stxr w10, x0, [x9]
; INLINE_ATOMICS-NEXT: cbnz w10, .LBB23_1
; INLINE_ATOMICS-NEXT: // %bb.2: // %atomicrmw.end
; INLINE_ATOMICS-NEXT: mov x0, x8
; INLINE_ATOMICS-NEXT: ret
;
; OUTLINE_ATOMICS-LABEL: test_atomic_load_xchg_i64:
; OUTLINE_ATOMICS: // %bb.0:
; OUTLINE_ATOMICS-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill
; OUTLINE_ATOMICS-NEXT: adrp x1, var64
; OUTLINE_ATOMICS-NEXT: add x1, x1, :lo12:var64
; OUTLINE_ATOMICS-NEXT: bl __aarch64_swp8_acq
; OUTLINE_ATOMICS-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload
; OUTLINE_ATOMICS-NEXT: ret
%old = atomicrmw xchg i64* @var64, i64 %offset acquire
ret i64 %old
}
define dso_local i8 @test_atomic_load_min_i8(i8 %offset) nounwind {
; CHECK-LABEL: test_atomic_load_min_i8:
; CHECK: // %bb.0:
; CHECK-NEXT: adrp x9, var8
; CHECK-NEXT: add x9, x9, :lo12:var8
; CHECK-NEXT: .LBB24_1: // %atomicrmw.start
; CHECK-NEXT: // =>This Inner Loop Header: Depth=1
; CHECK-NEXT: ldaxrb w10, [x9]
; CHECK-NEXT: sxtb w8, w10
; CHECK-NEXT: cmp w8, w0, sxtb
; CHECK-NEXT: csel w10, w10, w0, le
; CHECK-NEXT: stxrb w11, w10, [x9]
; CHECK-NEXT: cbnz w11, .LBB24_1
; CHECK-NEXT: // %bb.2: // %atomicrmw.end
; CHECK-NEXT: mov w0, w8
; CHECK-NEXT: ret
%old = atomicrmw min i8* @var8, i8 %offset acquire
ret i8 %old
}
define dso_local i16 @test_atomic_load_min_i16(i16 %offset) nounwind {
; CHECK-LABEL: test_atomic_load_min_i16:
; CHECK: // %bb.0:
; CHECK-NEXT: adrp x9, var16
; CHECK-NEXT: add x9, x9, :lo12:var16
; CHECK-NEXT: .LBB25_1: // %atomicrmw.start
; CHECK-NEXT: // =>This Inner Loop Header: Depth=1
; CHECK-NEXT: ldxrh w10, [x9]
; CHECK-NEXT: sxth w8, w10
; CHECK-NEXT: cmp w8, w0, sxth
; CHECK-NEXT: csel w10, w10, w0, le
; CHECK-NEXT: stlxrh w11, w10, [x9]
; CHECK-NEXT: cbnz w11, .LBB25_1
; CHECK-NEXT: // %bb.2: // %atomicrmw.end
; CHECK-NEXT: mov w0, w8
; CHECK-NEXT: ret
%old = atomicrmw min i16* @var16, i16 %offset release
ret i16 %old
}
define dso_local i32 @test_atomic_load_min_i32(i32 %offset) nounwind {
; CHECK-LABEL: test_atomic_load_min_i32:
; CHECK: // %bb.0:
; CHECK-NEXT: adrp x9, var32
; CHECK-NEXT: add x9, x9, :lo12:var32
; CHECK-NEXT: .LBB26_1: // %atomicrmw.start
; CHECK-NEXT: // =>This Inner Loop Header: Depth=1
; CHECK-NEXT: ldxr w8, [x9]
; CHECK-NEXT: cmp w8, w0
; CHECK-NEXT: csel w10, w8, w0, le
; CHECK-NEXT: stxr w11, w10, [x9]
; CHECK-NEXT: cbnz w11, .LBB26_1
; CHECK-NEXT: // %bb.2: // %atomicrmw.end
; CHECK-NEXT: mov w0, w8
; CHECK-NEXT: ret
%old = atomicrmw min i32* @var32, i32 %offset monotonic
ret i32 %old
}
define dso_local i64 @test_atomic_load_min_i64(i64 %offset) nounwind {
; CHECK-LABEL: test_atomic_load_min_i64:
; CHECK: // %bb.0:
; CHECK-NEXT: adrp x9, var64
; CHECK-NEXT: add x9, x9, :lo12:var64
; CHECK-NEXT: .LBB27_1: // %atomicrmw.start
; CHECK-NEXT: // =>This Inner Loop Header: Depth=1
; CHECK-NEXT: ldaxr x8, [x9]
; CHECK-NEXT: cmp x8, x0
; CHECK-NEXT: csel x10, x8, x0, le
; CHECK-NEXT: stlxr w11, x10, [x9]
; CHECK-NEXT: cbnz w11, .LBB27_1
; CHECK-NEXT: // %bb.2: // %atomicrmw.end
; CHECK-NEXT: mov x0, x8
; CHECK-NEXT: ret
%old = atomicrmw min i64* @var64, i64 %offset seq_cst
ret i64 %old
}
define dso_local i8 @test_atomic_load_max_i8(i8 %offset) nounwind {
; CHECK-LABEL: test_atomic_load_max_i8:
; CHECK: // %bb.0:
; CHECK-NEXT: adrp x9, var8
; CHECK-NEXT: add x9, x9, :lo12:var8
; CHECK-NEXT: .LBB28_1: // %atomicrmw.start
; CHECK-NEXT: // =>This Inner Loop Header: Depth=1
; CHECK-NEXT: ldaxrb w10, [x9]
; CHECK-NEXT: sxtb w8, w10
; CHECK-NEXT: cmp w8, w0, sxtb
; CHECK-NEXT: csel w10, w10, w0, gt
; CHECK-NEXT: stlxrb w11, w10, [x9]
; CHECK-NEXT: cbnz w11, .LBB28_1
; CHECK-NEXT: // %bb.2: // %atomicrmw.end
; CHECK-NEXT: mov w0, w8
; CHECK-NEXT: ret
%old = atomicrmw max i8* @var8, i8 %offset seq_cst
ret i8 %old
}
define dso_local i16 @test_atomic_load_max_i16(i16 %offset) nounwind {
; CHECK-LABEL: test_atomic_load_max_i16:
; CHECK: // %bb.0:
; CHECK-NEXT: adrp x9, var16
; CHECK-NEXT: add x9, x9, :lo12:var16
; CHECK-NEXT: .LBB29_1: // %atomicrmw.start
; CHECK-NEXT: // =>This Inner Loop Header: Depth=1
; CHECK-NEXT: ldaxrh w10, [x9]
; CHECK-NEXT: sxth w8, w10
; CHECK-NEXT: cmp w8, w0, sxth
; CHECK-NEXT: csel w10, w10, w0, gt
; CHECK-NEXT: stxrh w11, w10, [x9]
; CHECK-NEXT: cbnz w11, .LBB29_1
; CHECK-NEXT: // %bb.2: // %atomicrmw.end
; CHECK-NEXT: mov w0, w8
; CHECK-NEXT: ret
%old = atomicrmw max i16* @var16, i16 %offset acquire
ret i16 %old
}
define dso_local i32 @test_atomic_load_max_i32(i32 %offset) nounwind {
; CHECK-LABEL: test_atomic_load_max_i32:
; CHECK: // %bb.0:
; CHECK-NEXT: adrp x9, var32
; CHECK-NEXT: add x9, x9, :lo12:var32
; CHECK-NEXT: .LBB30_1: // %atomicrmw.start
; CHECK-NEXT: // =>This Inner Loop Header: Depth=1
; CHECK-NEXT: ldxr w8, [x9]
; CHECK-NEXT: cmp w8, w0
; CHECK-NEXT: csel w10, w8, w0, gt
; CHECK-NEXT: stlxr w11, w10, [x9]
; CHECK-NEXT: cbnz w11, .LBB30_1
; CHECK-NEXT: // %bb.2: // %atomicrmw.end
; CHECK-NEXT: mov w0, w8
; CHECK-NEXT: ret
%old = atomicrmw max i32* @var32, i32 %offset release
ret i32 %old
}
define dso_local i64 @test_atomic_load_max_i64(i64 %offset) nounwind {
; CHECK-LABEL: test_atomic_load_max_i64:
; CHECK: // %bb.0:
; CHECK-NEXT: adrp x9, var64
; CHECK-NEXT: add x9, x9, :lo12:var64
; CHECK-NEXT: .LBB31_1: // %atomicrmw.start
; CHECK-NEXT: // =>This Inner Loop Header: Depth=1
; CHECK-NEXT: ldxr x8, [x9]
; CHECK-NEXT: cmp x8, x0
; CHECK-NEXT: csel x10, x8, x0, gt
; CHECK-NEXT: stxr w11, x10, [x9]
; CHECK-NEXT: cbnz w11, .LBB31_1
; CHECK-NEXT: // %bb.2: // %atomicrmw.end
; CHECK-NEXT: mov x0, x8
; CHECK-NEXT: ret
%old = atomicrmw max i64* @var64, i64 %offset monotonic
ret i64 %old
}
define dso_local i8 @test_atomic_load_umin_i8(i8 %offset) nounwind {
; CHECK-LABEL: test_atomic_load_umin_i8:
; CHECK: // %bb.0:
; CHECK-NEXT: and w8, w0, #0xff
; CHECK-NEXT: adrp x9, var8
; CHECK-NEXT: add x9, x9, :lo12:var8
; CHECK-NEXT: .LBB32_1: // %atomicrmw.start
; CHECK-NEXT: // =>This Inner Loop Header: Depth=1
; CHECK-NEXT: ldxrb w0, [x9]
; CHECK-NEXT: cmp w0, w8
; CHECK-NEXT: csel w10, w0, w8, ls
; CHECK-NEXT: stxrb w11, w10, [x9]
; CHECK-NEXT: cbnz w11, .LBB32_1
; CHECK-NEXT: // %bb.2: // %atomicrmw.end
; CHECK-NEXT: // kill: def $w0 killed $w0 killed $x0
; CHECK-NEXT: ret
%old = atomicrmw umin i8* @var8, i8 %offset monotonic
ret i8 %old
}
define dso_local i16 @test_atomic_load_umin_i16(i16 %offset) nounwind {
; CHECK-LABEL: test_atomic_load_umin_i16:
; CHECK: // %bb.0:
; CHECK-NEXT: and w8, w0, #0xffff
; CHECK-NEXT: adrp x9, var16
; CHECK-NEXT: add x9, x9, :lo12:var16
; CHECK-NEXT: .LBB33_1: // %atomicrmw.start
; CHECK-NEXT: // =>This Inner Loop Header: Depth=1
; CHECK-NEXT: ldaxrh w0, [x9]
; CHECK-NEXT: cmp w0, w8
; CHECK-NEXT: csel w10, w0, w8, ls
; CHECK-NEXT: stxrh w11, w10, [x9]
; CHECK-NEXT: cbnz w11, .LBB33_1
; CHECK-NEXT: // %bb.2: // %atomicrmw.end
; CHECK-NEXT: // kill: def $w0 killed $w0 killed $x0
; CHECK-NEXT: ret
%old = atomicrmw umin i16* @var16, i16 %offset acquire
ret i16 %old
}
define dso_local i32 @test_atomic_load_umin_i32(i32 %offset) nounwind {
; CHECK-LABEL: test_atomic_load_umin_i32:
; CHECK: // %bb.0:
; CHECK-NEXT: adrp x9, var32
; CHECK-NEXT: add x9, x9, :lo12:var32
; CHECK-NEXT: .LBB34_1: // %atomicrmw.start
; CHECK-NEXT: // =>This Inner Loop Header: Depth=1
; CHECK-NEXT: ldaxr w8, [x9]
; CHECK-NEXT: cmp w8, w0
; CHECK-NEXT: csel w10, w8, w0, ls
; CHECK-NEXT: stlxr w11, w10, [x9]
; CHECK-NEXT: cbnz w11, .LBB34_1
; CHECK-NEXT: // %bb.2: // %atomicrmw.end
; CHECK-NEXT: mov w0, w8
; CHECK-NEXT: ret
%old = atomicrmw umin i32* @var32, i32 %offset seq_cst
ret i32 %old
}
define dso_local i64 @test_atomic_load_umin_i64(i64 %offset) nounwind {
; CHECK-LABEL: test_atomic_load_umin_i64:
; CHECK: // %bb.0:
; CHECK-NEXT: adrp x9, var64
; CHECK-NEXT: add x9, x9, :lo12:var64
; CHECK-NEXT: .LBB35_1: // %atomicrmw.start
; CHECK-NEXT: // =>This Inner Loop Header: Depth=1
; CHECK-NEXT: ldaxr x8, [x9]
; CHECK-NEXT: cmp x8, x0
; CHECK-NEXT: csel x10, x8, x0, ls
; CHECK-NEXT: stlxr w11, x10, [x9]
; CHECK-NEXT: cbnz w11, .LBB35_1
; CHECK-NEXT: // %bb.2: // %atomicrmw.end
; CHECK-NEXT: mov x0, x8
; CHECK-NEXT: ret
%old = atomicrmw umin i64* @var64, i64 %offset acq_rel
ret i64 %old
}
define dso_local i8 @test_atomic_load_umax_i8(i8 %offset) nounwind {
; CHECK-LABEL: test_atomic_load_umax_i8:
; CHECK: // %bb.0:
; CHECK-NEXT: and w8, w0, #0xff
; CHECK-NEXT: adrp x9, var8
; CHECK-NEXT: add x9, x9, :lo12:var8
; CHECK-NEXT: .LBB36_1: // %atomicrmw.start
; CHECK-NEXT: // =>This Inner Loop Header: Depth=1
; CHECK-NEXT: ldaxrb w0, [x9]
; CHECK-NEXT: cmp w0, w8
; CHECK-NEXT: csel w10, w0, w8, hi
; CHECK-NEXT: stlxrb w11, w10, [x9]
; CHECK-NEXT: cbnz w11, .LBB36_1
; CHECK-NEXT: // %bb.2: // %atomicrmw.end
; CHECK-NEXT: // kill: def $w0 killed $w0 killed $x0
; CHECK-NEXT: ret
%old = atomicrmw umax i8* @var8, i8 %offset acq_rel
ret i8 %old
}
define dso_local i16 @test_atomic_load_umax_i16(i16 %offset) nounwind {
; CHECK-LABEL: test_atomic_load_umax_i16:
; CHECK: // %bb.0:
; CHECK-NEXT: and w8, w0, #0xffff
; CHECK-NEXT: adrp x9, var16
; CHECK-NEXT: add x9, x9, :lo12:var16
; CHECK-NEXT: .LBB37_1: // %atomicrmw.start
; CHECK-NEXT: // =>This Inner Loop Header: Depth=1
; CHECK-NEXT: ldxrh w0, [x9]
; CHECK-NEXT: cmp w0, w8
; CHECK-NEXT: csel w10, w0, w8, hi
; CHECK-NEXT: stxrh w11, w10, [x9]
; CHECK-NEXT: cbnz w11, .LBB37_1
; CHECK-NEXT: // %bb.2: // %atomicrmw.end
; CHECK-NEXT: // kill: def $w0 killed $w0 killed $x0
; CHECK-NEXT: ret
%old = atomicrmw umax i16* @var16, i16 %offset monotonic
ret i16 %old
}
define dso_local i32 @test_atomic_load_umax_i32(i32 %offset) nounwind {
; CHECK-LABEL: test_atomic_load_umax_i32:
; CHECK: // %bb.0:
; CHECK-NEXT: adrp x9, var32
; CHECK-NEXT: add x9, x9, :lo12:var32
; CHECK-NEXT: .LBB38_1: // %atomicrmw.start
; CHECK-NEXT: // =>This Inner Loop Header: Depth=1
; CHECK-NEXT: ldaxr w8, [x9]
; CHECK-NEXT: cmp w8, w0
; CHECK-NEXT: csel w10, w8, w0, hi
; CHECK-NEXT: stlxr w11, w10, [x9]
; CHECK-NEXT: cbnz w11, .LBB38_1
; CHECK-NEXT: // %bb.2: // %atomicrmw.end
; CHECK-NEXT: mov w0, w8
; CHECK-NEXT: ret
%old = atomicrmw umax i32* @var32, i32 %offset seq_cst
ret i32 %old
}
define dso_local i64 @test_atomic_load_umax_i64(i64 %offset) nounwind {
; CHECK-LABEL: test_atomic_load_umax_i64:
; CHECK: // %bb.0:
; CHECK-NEXT: adrp x9, var64
; CHECK-NEXT: add x9, x9, :lo12:var64
; CHECK-NEXT: .LBB39_1: // %atomicrmw.start
; CHECK-NEXT: // =>This Inner Loop Header: Depth=1
; CHECK-NEXT: ldxr x8, [x9]
; CHECK-NEXT: cmp x8, x0
; CHECK-NEXT: csel x10, x8, x0, hi
; CHECK-NEXT: stlxr w11, x10, [x9]
; CHECK-NEXT: cbnz w11, .LBB39_1
; CHECK-NEXT: // %bb.2: // %atomicrmw.end
; CHECK-NEXT: mov x0, x8
; CHECK-NEXT: ret
%old = atomicrmw umax i64* @var64, i64 %offset release
ret i64 %old
}
define dso_local i8 @test_atomic_cmpxchg_i8(i8 %wanted, i8 %new) nounwind {
; INLINE_ATOMICS-LABEL: test_atomic_cmpxchg_i8:
; INLINE_ATOMICS: // %bb.0:
; INLINE_ATOMICS-NEXT: // kill: def $w1 killed $w1 def $x1
; INLINE_ATOMICS-NEXT: and w8, w0, #0xff
; INLINE_ATOMICS-NEXT: adrp x9, var8
; INLINE_ATOMICS-NEXT: add x9, x9, :lo12:var8
; INLINE_ATOMICS-NEXT: .LBB40_1: // %cmpxchg.start
; INLINE_ATOMICS-NEXT: // =>This Inner Loop Header: Depth=1
; INLINE_ATOMICS-NEXT: ldaxrb w0, [x9]
; INLINE_ATOMICS-NEXT: cmp w0, w8
; INLINE_ATOMICS-NEXT: b.ne .LBB40_4
; INLINE_ATOMICS-NEXT: // %bb.2: // %cmpxchg.trystore
; INLINE_ATOMICS-NEXT: // in Loop: Header=BB40_1 Depth=1
; INLINE_ATOMICS-NEXT: stxrb w10, w1, [x9]
; INLINE_ATOMICS-NEXT: cbnz w10, .LBB40_1
; INLINE_ATOMICS-NEXT: // %bb.3: // %cmpxchg.end
; INLINE_ATOMICS-NEXT: // kill: def $w0 killed $w0 killed $x0
; INLINE_ATOMICS-NEXT: ret
; INLINE_ATOMICS-NEXT: .LBB40_4: // %cmpxchg.nostore
; INLINE_ATOMICS-NEXT: clrex
; INLINE_ATOMICS-NEXT: // kill: def $w0 killed $w0 killed $x0
; INLINE_ATOMICS-NEXT: ret
;
; OUTLINE_ATOMICS-LABEL: test_atomic_cmpxchg_i8:
; OUTLINE_ATOMICS: // %bb.0:
; OUTLINE_ATOMICS-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill
; OUTLINE_ATOMICS-NEXT: adrp x2, var8
; OUTLINE_ATOMICS-NEXT: add x2, x2, :lo12:var8
; OUTLINE_ATOMICS-NEXT: bl __aarch64_cas1_acq
; OUTLINE_ATOMICS-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload
; OUTLINE_ATOMICS-NEXT: ret
%pair = cmpxchg i8* @var8, i8 %wanted, i8 %new acquire acquire
%old = extractvalue { i8, i1 } %pair, 0
ret i8 %old
}
define dso_local i16 @test_atomic_cmpxchg_i16(i16 %wanted, i16 %new) nounwind {
; INLINE_ATOMICS-LABEL: test_atomic_cmpxchg_i16:
; INLINE_ATOMICS: // %bb.0:
; INLINE_ATOMICS-NEXT: // kill: def $w1 killed $w1 def $x1
; INLINE_ATOMICS-NEXT: and w8, w0, #0xffff
; INLINE_ATOMICS-NEXT: adrp x9, var16
; INLINE_ATOMICS-NEXT: add x9, x9, :lo12:var16
; INLINE_ATOMICS-NEXT: .LBB41_1: // %cmpxchg.start
; INLINE_ATOMICS-NEXT: // =>This Inner Loop Header: Depth=1
; INLINE_ATOMICS-NEXT: ldaxrh w0, [x9]
; INLINE_ATOMICS-NEXT: cmp w0, w8
; INLINE_ATOMICS-NEXT: b.ne .LBB41_4
; INLINE_ATOMICS-NEXT: // %bb.2: // %cmpxchg.trystore
; INLINE_ATOMICS-NEXT: // in Loop: Header=BB41_1 Depth=1
; INLINE_ATOMICS-NEXT: stlxrh w10, w1, [x9]
; INLINE_ATOMICS-NEXT: cbnz w10, .LBB41_1
; INLINE_ATOMICS-NEXT: // %bb.3: // %cmpxchg.end
; INLINE_ATOMICS-NEXT: // kill: def $w0 killed $w0 killed $x0
; INLINE_ATOMICS-NEXT: ret
; INLINE_ATOMICS-NEXT: .LBB41_4: // %cmpxchg.nostore
; INLINE_ATOMICS-NEXT: clrex
; INLINE_ATOMICS-NEXT: // kill: def $w0 killed $w0 killed $x0
; INLINE_ATOMICS-NEXT: ret
;
; OUTLINE_ATOMICS-LABEL: test_atomic_cmpxchg_i16:
; OUTLINE_ATOMICS: // %bb.0:
; OUTLINE_ATOMICS-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill
; OUTLINE_ATOMICS-NEXT: adrp x2, var16
; OUTLINE_ATOMICS-NEXT: add x2, x2, :lo12:var16
; OUTLINE_ATOMICS-NEXT: bl __aarch64_cas2_acq_rel
; OUTLINE_ATOMICS-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload
; OUTLINE_ATOMICS-NEXT: ret
%pair = cmpxchg i16* @var16, i16 %wanted, i16 %new seq_cst seq_cst
%old = extractvalue { i16, i1 } %pair, 0
ret i16 %old
}
define dso_local i32 @test_atomic_cmpxchg_i32(i32 %wanted, i32 %new) nounwind {
; INLINE_ATOMICS-LABEL: test_atomic_cmpxchg_i32:
; INLINE_ATOMICS: // %bb.0:
; INLINE_ATOMICS-NEXT: mov w8, w0
; INLINE_ATOMICS-NEXT: adrp x9, var32
; INLINE_ATOMICS-NEXT: add x9, x9, :lo12:var32
; INLINE_ATOMICS-NEXT: .LBB42_1: // %cmpxchg.start
; INLINE_ATOMICS-NEXT: // =>This Inner Loop Header: Depth=1
; INLINE_ATOMICS-NEXT: ldxr w0, [x9]
; INLINE_ATOMICS-NEXT: cmp w0, w8
; INLINE_ATOMICS-NEXT: b.ne .LBB42_4
; INLINE_ATOMICS-NEXT: // %bb.2: // %cmpxchg.trystore
; INLINE_ATOMICS-NEXT: // in Loop: Header=BB42_1 Depth=1
; INLINE_ATOMICS-NEXT: stlxr w10, w1, [x9]
; INLINE_ATOMICS-NEXT: cbnz w10, .LBB42_1
; INLINE_ATOMICS-NEXT: // %bb.3: // %cmpxchg.end
; INLINE_ATOMICS-NEXT: // kill: def $w0 killed $w0 killed $x0
; INLINE_ATOMICS-NEXT: ret
; INLINE_ATOMICS-NEXT: .LBB42_4: // %cmpxchg.nostore
; INLINE_ATOMICS-NEXT: clrex
; INLINE_ATOMICS-NEXT: // kill: def $w0 killed $w0 killed $x0
; INLINE_ATOMICS-NEXT: ret
;
; OUTLINE_ATOMICS-LABEL: test_atomic_cmpxchg_i32:
; OUTLINE_ATOMICS: // %bb.0:
; OUTLINE_ATOMICS-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill
; OUTLINE_ATOMICS-NEXT: adrp x2, var32
; OUTLINE_ATOMICS-NEXT: add x2, x2, :lo12:var32
; OUTLINE_ATOMICS-NEXT: bl __aarch64_cas4_rel
; OUTLINE_ATOMICS-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload
; OUTLINE_ATOMICS-NEXT: ret
%pair = cmpxchg i32* @var32, i32 %wanted, i32 %new release monotonic
%old = extractvalue { i32, i1 } %pair, 0
ret i32 %old
}
define dso_local void @test_atomic_cmpxchg_i64(i64 %wanted, i64 %new) nounwind {
; INLINE_ATOMICS-LABEL: test_atomic_cmpxchg_i64:
; INLINE_ATOMICS: // %bb.0:
; INLINE_ATOMICS-NEXT: adrp x9, var64
; INLINE_ATOMICS-NEXT: add x9, x9, :lo12:var64
; INLINE_ATOMICS-NEXT: .LBB43_1: // %cmpxchg.start
; INLINE_ATOMICS-NEXT: // =>This Inner Loop Header: Depth=1
; INLINE_ATOMICS-NEXT: ldxr x8, [x9]
; INLINE_ATOMICS-NEXT: cmp x8, x0
; INLINE_ATOMICS-NEXT: b.ne .LBB43_3
; INLINE_ATOMICS-NEXT: // %bb.2: // %cmpxchg.trystore
; INLINE_ATOMICS-NEXT: // in Loop: Header=BB43_1 Depth=1
; INLINE_ATOMICS-NEXT: stxr w10, x1, [x9]
; INLINE_ATOMICS-NEXT: cbnz w10, .LBB43_1
; INLINE_ATOMICS-NEXT: b .LBB43_4
; INLINE_ATOMICS-NEXT: .LBB43_3: // %cmpxchg.nostore
; INLINE_ATOMICS-NEXT: clrex
; INLINE_ATOMICS-NEXT: .LBB43_4: // %cmpxchg.end
; INLINE_ATOMICS-NEXT: adrp x9, var64
; INLINE_ATOMICS-NEXT: str x8, [x9, :lo12:var64]
; INLINE_ATOMICS-NEXT: ret
;
; OUTLINE_ATOMICS-LABEL: test_atomic_cmpxchg_i64:
; OUTLINE_ATOMICS: // %bb.0:
; OUTLINE_ATOMICS-NEXT: stp x30, x19, [sp, #-16]! // 16-byte Folded Spill
; OUTLINE_ATOMICS-NEXT: adrp x19, var64
; OUTLINE_ATOMICS-NEXT: add x19, x19, :lo12:var64
; OUTLINE_ATOMICS-NEXT: mov x2, x19
; OUTLINE_ATOMICS-NEXT: bl __aarch64_cas8_relax
; OUTLINE_ATOMICS-NEXT: str x0, [x19]
; OUTLINE_ATOMICS-NEXT: ldp x30, x19, [sp], #16 // 16-byte Folded Reload
; OUTLINE_ATOMICS-NEXT: ret
%pair = cmpxchg i64* @var64, i64 %wanted, i64 %new monotonic monotonic
%old = extractvalue { i64, i1 } %pair, 0
store i64 %old, i64* @var64
ret void
}
define dso_local i8 @test_atomic_load_monotonic_i8() nounwind {
; CHECK-LABEL: test_atomic_load_monotonic_i8:
; CHECK: // %bb.0:
; CHECK-NEXT: adrp x8, var8
; CHECK-NEXT: ldrb w0, [x8, :lo12:var8]
; CHECK-NEXT: ret
%val = load atomic i8, i8* @var8 monotonic, align 1
ret i8 %val
}
define dso_local i8 @test_atomic_load_monotonic_regoff_i8(i64 %base, i64 %off) nounwind {
; CHECK-LABEL: test_atomic_load_monotonic_regoff_i8:
; CHECK: // %bb.0:
; CHECK-NEXT: ldrb w0, [x0, x1]
; CHECK-NEXT: ret
%addr_int = add i64 %base, %off
%addr = inttoptr i64 %addr_int to i8*
%val = load atomic i8, i8* %addr monotonic, align 1
ret i8 %val
}
define dso_local i8 @test_atomic_load_acquire_i8() nounwind {
; CHECK-LABEL: test_atomic_load_acquire_i8:
; CHECK: // %bb.0:
; CHECK-NEXT: adrp x8, var8
; CHECK-NEXT: add x8, x8, :lo12:var8
; CHECK-NEXT: ldarb w0, [x8]
; CHECK-NEXT: ret
%val = load atomic i8, i8* @var8 acquire, align 1
ret i8 %val
}
define dso_local i8 @test_atomic_load_seq_cst_i8() nounwind {
; CHECK-LABEL: test_atomic_load_seq_cst_i8:
; CHECK: // %bb.0:
; CHECK-NEXT: adrp x8, var8
; CHECK-NEXT: add x8, x8, :lo12:var8
; CHECK-NEXT: ldarb w0, [x8]
; CHECK-NEXT: ret
%val = load atomic i8, i8* @var8 seq_cst, align 1
ret i8 %val
}
define dso_local i16 @test_atomic_load_monotonic_i16() nounwind {
; CHECK-LABEL: test_atomic_load_monotonic_i16:
; CHECK: // %bb.0:
; CHECK-NEXT: adrp x8, var16
; CHECK-NEXT: ldrh w0, [x8, :lo12:var16]
; CHECK-NEXT: ret
%val = load atomic i16, i16* @var16 monotonic, align 2
ret i16 %val
}
define dso_local i32 @test_atomic_load_monotonic_regoff_i32(i64 %base, i64 %off) nounwind {
; CHECK-LABEL: test_atomic_load_monotonic_regoff_i32:
; CHECK: // %bb.0:
; CHECK-NEXT: ldr w0, [x0, x1]
; CHECK-NEXT: ret
%addr_int = add i64 %base, %off
%addr = inttoptr i64 %addr_int to i32*
%val = load atomic i32, i32* %addr monotonic, align 4
ret i32 %val
}
define dso_local i64 @test_atomic_load_seq_cst_i64() nounwind {
; CHECK-LABEL: test_atomic_load_seq_cst_i64:
; CHECK: // %bb.0:
; CHECK-NEXT: adrp x8, var64
; CHECK-NEXT: add x8, x8, :lo12:var64
; CHECK-NEXT: ldar x0, [x8]
; CHECK-NEXT: ret
%val = load atomic i64, i64* @var64 seq_cst, align 8
ret i64 %val
}
define dso_local void @test_atomic_store_monotonic_i8(i8 %val) nounwind {
; CHECK-LABEL: test_atomic_store_monotonic_i8:
; CHECK: // %bb.0:
; CHECK-NEXT: adrp x8, var8
; CHECK-NEXT: strb w0, [x8, :lo12:var8]
; CHECK-NEXT: ret
store atomic i8 %val, i8* @var8 monotonic, align 1
ret void
}
define dso_local void @test_atomic_store_monotonic_regoff_i8(i64 %base, i64 %off, i8 %val) nounwind {
; CHECK-LABEL: test_atomic_store_monotonic_regoff_i8:
; CHECK: // %bb.0:
; CHECK-NEXT: strb w2, [x0, x1]
; CHECK-NEXT: ret
%addr_int = add i64 %base, %off
%addr = inttoptr i64 %addr_int to i8*
store atomic i8 %val, i8* %addr monotonic, align 1
ret void
}
define dso_local void @test_atomic_store_release_i8(i8 %val) nounwind {
; CHECK-LABEL: test_atomic_store_release_i8:
; CHECK: // %bb.0:
; CHECK-NEXT: adrp x8, var8
; CHECK-NEXT: add x8, x8, :lo12:var8
; CHECK-NEXT: stlrb w0, [x8]
; CHECK-NEXT: ret
store atomic i8 %val, i8* @var8 release, align 1
ret void
}
define dso_local void @test_atomic_store_seq_cst_i8(i8 %val) nounwind {
; CHECK-LABEL: test_atomic_store_seq_cst_i8:
; CHECK: // %bb.0:
; CHECK-NEXT: adrp x8, var8
; CHECK-NEXT: add x8, x8, :lo12:var8
; CHECK-NEXT: stlrb w0, [x8]
; CHECK-NEXT: ret
store atomic i8 %val, i8* @var8 seq_cst, align 1
ret void
}
define dso_local void @test_atomic_store_monotonic_i16(i16 %val) nounwind {
; CHECK-LABEL: test_atomic_store_monotonic_i16:
; CHECK: // %bb.0:
; CHECK-NEXT: adrp x8, var16
; CHECK-NEXT: strh w0, [x8, :lo12:var16]
; CHECK-NEXT: ret
store atomic i16 %val, i16* @var16 monotonic, align 2
ret void
}
define dso_local void @test_atomic_store_monotonic_regoff_i32(i64 %base, i64 %off, i32 %val) nounwind {
; CHECK-LABEL: test_atomic_store_monotonic_regoff_i32:
; CHECK: // %bb.0:
; CHECK-NEXT: str w2, [x0, x1]
; CHECK-NEXT: ret
%addr_int = add i64 %base, %off
%addr = inttoptr i64 %addr_int to i32*
store atomic i32 %val, i32* %addr monotonic, align 4
ret void
}
define dso_local void @test_atomic_store_release_i64(i64 %val) nounwind {
; CHECK-LABEL: test_atomic_store_release_i64:
; CHECK: // %bb.0:
; CHECK-NEXT: adrp x8, var64
; CHECK-NEXT: add x8, x8, :lo12:var64
; CHECK-NEXT: stlr x0, [x8]
; CHECK-NEXT: ret
store atomic i64 %val, i64* @var64 release, align 8
ret void
}