Files
clang-p2996/llvm/test/CodeGen/CSKY/atomic-cmpxchg.ll
Zi Xuan Wu ec2de74908 [CSKY] Add atomic expand pass to support atomic operation with libcall
For now, just support atomic operations by libcall. Further, should investigate atomic
implementation in CSKY target and codegen with atomic and fence related instructions.
2022-04-06 15:05:34 +08:00

1091 lines
35 KiB
LLVM

; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=csky -verify-machineinstrs -csky-no-aliases -mattr=+2e3 < %s \
; RUN: | FileCheck -check-prefix=CSKY %s
define void @cmpxchg_i8_monotonic_monotonic(i8* %ptr, i8 %cmp, i8 %val) nounwind {
; CSKY-LABEL: cmpxchg_i8_monotonic_monotonic:
; CSKY: # %bb.0:
; CSKY-NEXT: subi16 sp, sp, 4
; CSKY-NEXT: st32.w lr, (sp, 0) # 4-byte Folded Spill
; CSKY-NEXT: subi16 sp, sp, 8
; CSKY-NEXT: st32.b a1, (sp, 7)
; CSKY-NEXT: movi16 a1, 0
; CSKY-NEXT: st16.w a1, (sp, 0)
; CSKY-NEXT: addi32 a1, sp, 7
; CSKY-NEXT: movi16 a3, 0
; CSKY-NEXT: jsri32 [.LCPI0_0]
; CSKY-NEXT: addi16 sp, sp, 8
; CSKY-NEXT: ld32.w lr, (sp, 0) # 4-byte Folded Reload
; CSKY-NEXT: addi16 sp, sp, 4
; CSKY-NEXT: rts16
; CSKY-NEXT: .p2align 1
; CSKY-NEXT: # %bb.1:
; CSKY-NEXT: .p2align 2
; CSKY-NEXT: .LCPI0_0:
; CSKY-NEXT: .long __atomic_compare_exchange_1
;
%res = cmpxchg i8* %ptr, i8 %cmp, i8 %val monotonic monotonic
ret void
}
define void @cmpxchg_i8_acquire_monotonic(i8* %ptr, i8 %cmp, i8 %val) nounwind {
; CSKY-LABEL: cmpxchg_i8_acquire_monotonic:
; CSKY: # %bb.0:
; CSKY-NEXT: subi16 sp, sp, 4
; CSKY-NEXT: st32.w lr, (sp, 0) # 4-byte Folded Spill
; CSKY-NEXT: subi16 sp, sp, 8
; CSKY-NEXT: st32.b a1, (sp, 7)
; CSKY-NEXT: movi16 a1, 0
; CSKY-NEXT: st16.w a1, (sp, 0)
; CSKY-NEXT: addi32 a1, sp, 7
; CSKY-NEXT: movi16 a3, 2
; CSKY-NEXT: jsri32 [.LCPI1_0]
; CSKY-NEXT: addi16 sp, sp, 8
; CSKY-NEXT: ld32.w lr, (sp, 0) # 4-byte Folded Reload
; CSKY-NEXT: addi16 sp, sp, 4
; CSKY-NEXT: rts16
; CSKY-NEXT: .p2align 1
; CSKY-NEXT: # %bb.1:
; CSKY-NEXT: .p2align 2
; CSKY-NEXT: .LCPI1_0:
; CSKY-NEXT: .long __atomic_compare_exchange_1
;
%res = cmpxchg i8* %ptr, i8 %cmp, i8 %val acquire monotonic
ret void
}
define void @cmpxchg_i8_acquire_acquire(i8* %ptr, i8 %cmp, i8 %val) nounwind {
; CSKY-LABEL: cmpxchg_i8_acquire_acquire:
; CSKY: # %bb.0:
; CSKY-NEXT: subi16 sp, sp, 4
; CSKY-NEXT: st32.w lr, (sp, 0) # 4-byte Folded Spill
; CSKY-NEXT: subi16 sp, sp, 8
; CSKY-NEXT: st32.b a1, (sp, 7)
; CSKY-NEXT: movi16 a1, 2
; CSKY-NEXT: st16.w a1, (sp, 0)
; CSKY-NEXT: addi32 a1, sp, 7
; CSKY-NEXT: movi16 a3, 2
; CSKY-NEXT: jsri32 [.LCPI2_0]
; CSKY-NEXT: addi16 sp, sp, 8
; CSKY-NEXT: ld32.w lr, (sp, 0) # 4-byte Folded Reload
; CSKY-NEXT: addi16 sp, sp, 4
; CSKY-NEXT: rts16
; CSKY-NEXT: .p2align 1
; CSKY-NEXT: # %bb.1:
; CSKY-NEXT: .p2align 2
; CSKY-NEXT: .LCPI2_0:
; CSKY-NEXT: .long __atomic_compare_exchange_1
;
%res = cmpxchg i8* %ptr, i8 %cmp, i8 %val acquire acquire
ret void
}
define void @cmpxchg_i8_release_monotonic(i8* %ptr, i8 %cmp, i8 %val) nounwind {
; CSKY-LABEL: cmpxchg_i8_release_monotonic:
; CSKY: # %bb.0:
; CSKY-NEXT: subi16 sp, sp, 4
; CSKY-NEXT: st32.w lr, (sp, 0) # 4-byte Folded Spill
; CSKY-NEXT: subi16 sp, sp, 8
; CSKY-NEXT: st32.b a1, (sp, 7)
; CSKY-NEXT: movi16 a1, 0
; CSKY-NEXT: st16.w a1, (sp, 0)
; CSKY-NEXT: addi32 a1, sp, 7
; CSKY-NEXT: movi16 a3, 3
; CSKY-NEXT: jsri32 [.LCPI3_0]
; CSKY-NEXT: addi16 sp, sp, 8
; CSKY-NEXT: ld32.w lr, (sp, 0) # 4-byte Folded Reload
; CSKY-NEXT: addi16 sp, sp, 4
; CSKY-NEXT: rts16
; CSKY-NEXT: .p2align 1
; CSKY-NEXT: # %bb.1:
; CSKY-NEXT: .p2align 2
; CSKY-NEXT: .LCPI3_0:
; CSKY-NEXT: .long __atomic_compare_exchange_1
;
%res = cmpxchg i8* %ptr, i8 %cmp, i8 %val release monotonic
ret void
}
define void @cmpxchg_i8_release_acquire(i8* %ptr, i8 %cmp, i8 %val) nounwind {
; CSKY-LABEL: cmpxchg_i8_release_acquire:
; CSKY: # %bb.0:
; CSKY-NEXT: subi16 sp, sp, 4
; CSKY-NEXT: st32.w lr, (sp, 0) # 4-byte Folded Spill
; CSKY-NEXT: subi16 sp, sp, 8
; CSKY-NEXT: st32.b a1, (sp, 7)
; CSKY-NEXT: movi16 a1, 2
; CSKY-NEXT: st16.w a1, (sp, 0)
; CSKY-NEXT: addi32 a1, sp, 7
; CSKY-NEXT: movi16 a3, 3
; CSKY-NEXT: jsri32 [.LCPI4_0]
; CSKY-NEXT: addi16 sp, sp, 8
; CSKY-NEXT: ld32.w lr, (sp, 0) # 4-byte Folded Reload
; CSKY-NEXT: addi16 sp, sp, 4
; CSKY-NEXT: rts16
; CSKY-NEXT: .p2align 1
; CSKY-NEXT: # %bb.1:
; CSKY-NEXT: .p2align 2
; CSKY-NEXT: .LCPI4_0:
; CSKY-NEXT: .long __atomic_compare_exchange_1
;
%res = cmpxchg i8* %ptr, i8 %cmp, i8 %val release acquire
ret void
}
define void @cmpxchg_i8_acq_rel_monotonic(i8* %ptr, i8 %cmp, i8 %val) nounwind {
; CSKY-LABEL: cmpxchg_i8_acq_rel_monotonic:
; CSKY: # %bb.0:
; CSKY-NEXT: subi16 sp, sp, 4
; CSKY-NEXT: st32.w lr, (sp, 0) # 4-byte Folded Spill
; CSKY-NEXT: subi16 sp, sp, 8
; CSKY-NEXT: st32.b a1, (sp, 7)
; CSKY-NEXT: movi16 a1, 0
; CSKY-NEXT: st16.w a1, (sp, 0)
; CSKY-NEXT: addi32 a1, sp, 7
; CSKY-NEXT: movi16 a3, 4
; CSKY-NEXT: jsri32 [.LCPI5_0]
; CSKY-NEXT: addi16 sp, sp, 8
; CSKY-NEXT: ld32.w lr, (sp, 0) # 4-byte Folded Reload
; CSKY-NEXT: addi16 sp, sp, 4
; CSKY-NEXT: rts16
; CSKY-NEXT: .p2align 1
; CSKY-NEXT: # %bb.1:
; CSKY-NEXT: .p2align 2
; CSKY-NEXT: .LCPI5_0:
; CSKY-NEXT: .long __atomic_compare_exchange_1
;
%res = cmpxchg i8* %ptr, i8 %cmp, i8 %val acq_rel monotonic
ret void
}
define void @cmpxchg_i8_acq_rel_acquire(i8* %ptr, i8 %cmp, i8 %val) nounwind {
; CSKY-LABEL: cmpxchg_i8_acq_rel_acquire:
; CSKY: # %bb.0:
; CSKY-NEXT: subi16 sp, sp, 4
; CSKY-NEXT: st32.w lr, (sp, 0) # 4-byte Folded Spill
; CSKY-NEXT: subi16 sp, sp, 8
; CSKY-NEXT: st32.b a1, (sp, 7)
; CSKY-NEXT: movi16 a1, 2
; CSKY-NEXT: st16.w a1, (sp, 0)
; CSKY-NEXT: addi32 a1, sp, 7
; CSKY-NEXT: movi16 a3, 4
; CSKY-NEXT: jsri32 [.LCPI6_0]
; CSKY-NEXT: addi16 sp, sp, 8
; CSKY-NEXT: ld32.w lr, (sp, 0) # 4-byte Folded Reload
; CSKY-NEXT: addi16 sp, sp, 4
; CSKY-NEXT: rts16
; CSKY-NEXT: .p2align 1
; CSKY-NEXT: # %bb.1:
; CSKY-NEXT: .p2align 2
; CSKY-NEXT: .LCPI6_0:
; CSKY-NEXT: .long __atomic_compare_exchange_1
;
%res = cmpxchg i8* %ptr, i8 %cmp, i8 %val acq_rel acquire
ret void
}
define void @cmpxchg_i8_seq_cst_monotonic(i8* %ptr, i8 %cmp, i8 %val) nounwind {
; CSKY-LABEL: cmpxchg_i8_seq_cst_monotonic:
; CSKY: # %bb.0:
; CSKY-NEXT: subi16 sp, sp, 4
; CSKY-NEXT: st32.w lr, (sp, 0) # 4-byte Folded Spill
; CSKY-NEXT: subi16 sp, sp, 8
; CSKY-NEXT: st32.b a1, (sp, 7)
; CSKY-NEXT: movi16 a1, 0
; CSKY-NEXT: st16.w a1, (sp, 0)
; CSKY-NEXT: addi32 a1, sp, 7
; CSKY-NEXT: movi16 a3, 5
; CSKY-NEXT: jsri32 [.LCPI7_0]
; CSKY-NEXT: addi16 sp, sp, 8
; CSKY-NEXT: ld32.w lr, (sp, 0) # 4-byte Folded Reload
; CSKY-NEXT: addi16 sp, sp, 4
; CSKY-NEXT: rts16
; CSKY-NEXT: .p2align 1
; CSKY-NEXT: # %bb.1:
; CSKY-NEXT: .p2align 2
; CSKY-NEXT: .LCPI7_0:
; CSKY-NEXT: .long __atomic_compare_exchange_1
;
%res = cmpxchg i8* %ptr, i8 %cmp, i8 %val seq_cst monotonic
ret void
}
define void @cmpxchg_i8_seq_cst_acquire(i8* %ptr, i8 %cmp, i8 %val) nounwind {
; CSKY-LABEL: cmpxchg_i8_seq_cst_acquire:
; CSKY: # %bb.0:
; CSKY-NEXT: subi16 sp, sp, 4
; CSKY-NEXT: st32.w lr, (sp, 0) # 4-byte Folded Spill
; CSKY-NEXT: subi16 sp, sp, 8
; CSKY-NEXT: st32.b a1, (sp, 7)
; CSKY-NEXT: movi16 a1, 2
; CSKY-NEXT: st16.w a1, (sp, 0)
; CSKY-NEXT: addi32 a1, sp, 7
; CSKY-NEXT: movi16 a3, 5
; CSKY-NEXT: jsri32 [.LCPI8_0]
; CSKY-NEXT: addi16 sp, sp, 8
; CSKY-NEXT: ld32.w lr, (sp, 0) # 4-byte Folded Reload
; CSKY-NEXT: addi16 sp, sp, 4
; CSKY-NEXT: rts16
; CSKY-NEXT: .p2align 1
; CSKY-NEXT: # %bb.1:
; CSKY-NEXT: .p2align 2
; CSKY-NEXT: .LCPI8_0:
; CSKY-NEXT: .long __atomic_compare_exchange_1
;
%res = cmpxchg i8* %ptr, i8 %cmp, i8 %val seq_cst acquire
ret void
}
define void @cmpxchg_i8_seq_cst_seq_cst(i8* %ptr, i8 %cmp, i8 %val) nounwind {
; CSKY-LABEL: cmpxchg_i8_seq_cst_seq_cst:
; CSKY: # %bb.0:
; CSKY-NEXT: subi16 sp, sp, 4
; CSKY-NEXT: st32.w lr, (sp, 0) # 4-byte Folded Spill
; CSKY-NEXT: subi16 sp, sp, 8
; CSKY-NEXT: st32.b a1, (sp, 7)
; CSKY-NEXT: movi16 a1, 5
; CSKY-NEXT: st16.w a1, (sp, 0)
; CSKY-NEXT: addi32 a1, sp, 7
; CSKY-NEXT: movi16 a3, 5
; CSKY-NEXT: jsri32 [.LCPI9_0]
; CSKY-NEXT: addi16 sp, sp, 8
; CSKY-NEXT: ld32.w lr, (sp, 0) # 4-byte Folded Reload
; CSKY-NEXT: addi16 sp, sp, 4
; CSKY-NEXT: rts16
; CSKY-NEXT: .p2align 1
; CSKY-NEXT: # %bb.1:
; CSKY-NEXT: .p2align 2
; CSKY-NEXT: .LCPI9_0:
; CSKY-NEXT: .long __atomic_compare_exchange_1
;
%res = cmpxchg i8* %ptr, i8 %cmp, i8 %val seq_cst seq_cst
ret void
}
define void @cmpxchg_i16_monotonic_monotonic(i16* %ptr, i16 %cmp, i16 %val) nounwind {
; CSKY-LABEL: cmpxchg_i16_monotonic_monotonic:
; CSKY: # %bb.0:
; CSKY-NEXT: subi16 sp, sp, 4
; CSKY-NEXT: st32.w lr, (sp, 0) # 4-byte Folded Spill
; CSKY-NEXT: subi16 sp, sp, 8
; CSKY-NEXT: st32.h a1, (sp, 6)
; CSKY-NEXT: movi16 a1, 0
; CSKY-NEXT: st16.w a1, (sp, 0)
; CSKY-NEXT: addi32 a1, sp, 6
; CSKY-NEXT: movi16 a3, 0
; CSKY-NEXT: jsri32 [.LCPI10_0]
; CSKY-NEXT: addi16 sp, sp, 8
; CSKY-NEXT: ld32.w lr, (sp, 0) # 4-byte Folded Reload
; CSKY-NEXT: addi16 sp, sp, 4
; CSKY-NEXT: rts16
; CSKY-NEXT: .p2align 1
; CSKY-NEXT: # %bb.1:
; CSKY-NEXT: .p2align 2
; CSKY-NEXT: .LCPI10_0:
; CSKY-NEXT: .long __atomic_compare_exchange_2
;
%res = cmpxchg i16* %ptr, i16 %cmp, i16 %val monotonic monotonic
ret void
}
define void @cmpxchg_i16_acquire_monotonic(i16* %ptr, i16 %cmp, i16 %val) nounwind {
; CSKY-LABEL: cmpxchg_i16_acquire_monotonic:
; CSKY: # %bb.0:
; CSKY-NEXT: subi16 sp, sp, 4
; CSKY-NEXT: st32.w lr, (sp, 0) # 4-byte Folded Spill
; CSKY-NEXT: subi16 sp, sp, 8
; CSKY-NEXT: st32.h a1, (sp, 6)
; CSKY-NEXT: movi16 a1, 0
; CSKY-NEXT: st16.w a1, (sp, 0)
; CSKY-NEXT: addi32 a1, sp, 6
; CSKY-NEXT: movi16 a3, 2
; CSKY-NEXT: jsri32 [.LCPI11_0]
; CSKY-NEXT: addi16 sp, sp, 8
; CSKY-NEXT: ld32.w lr, (sp, 0) # 4-byte Folded Reload
; CSKY-NEXT: addi16 sp, sp, 4
; CSKY-NEXT: rts16
; CSKY-NEXT: .p2align 1
; CSKY-NEXT: # %bb.1:
; CSKY-NEXT: .p2align 2
; CSKY-NEXT: .LCPI11_0:
; CSKY-NEXT: .long __atomic_compare_exchange_2
;
%res = cmpxchg i16* %ptr, i16 %cmp, i16 %val acquire monotonic
ret void
}
define void @cmpxchg_i16_acquire_acquire(i16* %ptr, i16 %cmp, i16 %val) nounwind {
; CSKY-LABEL: cmpxchg_i16_acquire_acquire:
; CSKY: # %bb.0:
; CSKY-NEXT: subi16 sp, sp, 4
; CSKY-NEXT: st32.w lr, (sp, 0) # 4-byte Folded Spill
; CSKY-NEXT: subi16 sp, sp, 8
; CSKY-NEXT: st32.h a1, (sp, 6)
; CSKY-NEXT: movi16 a1, 2
; CSKY-NEXT: st16.w a1, (sp, 0)
; CSKY-NEXT: addi32 a1, sp, 6
; CSKY-NEXT: movi16 a3, 2
; CSKY-NEXT: jsri32 [.LCPI12_0]
; CSKY-NEXT: addi16 sp, sp, 8
; CSKY-NEXT: ld32.w lr, (sp, 0) # 4-byte Folded Reload
; CSKY-NEXT: addi16 sp, sp, 4
; CSKY-NEXT: rts16
; CSKY-NEXT: .p2align 1
; CSKY-NEXT: # %bb.1:
; CSKY-NEXT: .p2align 2
; CSKY-NEXT: .LCPI12_0:
; CSKY-NEXT: .long __atomic_compare_exchange_2
;
%res = cmpxchg i16* %ptr, i16 %cmp, i16 %val acquire acquire
ret void
}
define void @cmpxchg_i16_release_monotonic(i16* %ptr, i16 %cmp, i16 %val) nounwind {
; CSKY-LABEL: cmpxchg_i16_release_monotonic:
; CSKY: # %bb.0:
; CSKY-NEXT: subi16 sp, sp, 4
; CSKY-NEXT: st32.w lr, (sp, 0) # 4-byte Folded Spill
; CSKY-NEXT: subi16 sp, sp, 8
; CSKY-NEXT: st32.h a1, (sp, 6)
; CSKY-NEXT: movi16 a1, 0
; CSKY-NEXT: st16.w a1, (sp, 0)
; CSKY-NEXT: addi32 a1, sp, 6
; CSKY-NEXT: movi16 a3, 3
; CSKY-NEXT: jsri32 [.LCPI13_0]
; CSKY-NEXT: addi16 sp, sp, 8
; CSKY-NEXT: ld32.w lr, (sp, 0) # 4-byte Folded Reload
; CSKY-NEXT: addi16 sp, sp, 4
; CSKY-NEXT: rts16
; CSKY-NEXT: .p2align 1
; CSKY-NEXT: # %bb.1:
; CSKY-NEXT: .p2align 2
; CSKY-NEXT: .LCPI13_0:
; CSKY-NEXT: .long __atomic_compare_exchange_2
;
%res = cmpxchg i16* %ptr, i16 %cmp, i16 %val release monotonic
ret void
}
define void @cmpxchg_i16_release_acquire(i16* %ptr, i16 %cmp, i16 %val) nounwind {
; CSKY-LABEL: cmpxchg_i16_release_acquire:
; CSKY: # %bb.0:
; CSKY-NEXT: subi16 sp, sp, 4
; CSKY-NEXT: st32.w lr, (sp, 0) # 4-byte Folded Spill
; CSKY-NEXT: subi16 sp, sp, 8
; CSKY-NEXT: st32.h a1, (sp, 6)
; CSKY-NEXT: movi16 a1, 2
; CSKY-NEXT: st16.w a1, (sp, 0)
; CSKY-NEXT: addi32 a1, sp, 6
; CSKY-NEXT: movi16 a3, 3
; CSKY-NEXT: jsri32 [.LCPI14_0]
; CSKY-NEXT: addi16 sp, sp, 8
; CSKY-NEXT: ld32.w lr, (sp, 0) # 4-byte Folded Reload
; CSKY-NEXT: addi16 sp, sp, 4
; CSKY-NEXT: rts16
; CSKY-NEXT: .p2align 1
; CSKY-NEXT: # %bb.1:
; CSKY-NEXT: .p2align 2
; CSKY-NEXT: .LCPI14_0:
; CSKY-NEXT: .long __atomic_compare_exchange_2
;
%res = cmpxchg i16* %ptr, i16 %cmp, i16 %val release acquire
ret void
}
define void @cmpxchg_i16_acq_rel_monotonic(i16* %ptr, i16 %cmp, i16 %val) nounwind {
; CSKY-LABEL: cmpxchg_i16_acq_rel_monotonic:
; CSKY: # %bb.0:
; CSKY-NEXT: subi16 sp, sp, 4
; CSKY-NEXT: st32.w lr, (sp, 0) # 4-byte Folded Spill
; CSKY-NEXT: subi16 sp, sp, 8
; CSKY-NEXT: st32.h a1, (sp, 6)
; CSKY-NEXT: movi16 a1, 0
; CSKY-NEXT: st16.w a1, (sp, 0)
; CSKY-NEXT: addi32 a1, sp, 6
; CSKY-NEXT: movi16 a3, 4
; CSKY-NEXT: jsri32 [.LCPI15_0]
; CSKY-NEXT: addi16 sp, sp, 8
; CSKY-NEXT: ld32.w lr, (sp, 0) # 4-byte Folded Reload
; CSKY-NEXT: addi16 sp, sp, 4
; CSKY-NEXT: rts16
; CSKY-NEXT: .p2align 1
; CSKY-NEXT: # %bb.1:
; CSKY-NEXT: .p2align 2
; CSKY-NEXT: .LCPI15_0:
; CSKY-NEXT: .long __atomic_compare_exchange_2
;
%res = cmpxchg i16* %ptr, i16 %cmp, i16 %val acq_rel monotonic
ret void
}
define void @cmpxchg_i16_acq_rel_acquire(i16* %ptr, i16 %cmp, i16 %val) nounwind {
; CSKY-LABEL: cmpxchg_i16_acq_rel_acquire:
; CSKY: # %bb.0:
; CSKY-NEXT: subi16 sp, sp, 4
; CSKY-NEXT: st32.w lr, (sp, 0) # 4-byte Folded Spill
; CSKY-NEXT: subi16 sp, sp, 8
; CSKY-NEXT: st32.h a1, (sp, 6)
; CSKY-NEXT: movi16 a1, 2
; CSKY-NEXT: st16.w a1, (sp, 0)
; CSKY-NEXT: addi32 a1, sp, 6
; CSKY-NEXT: movi16 a3, 4
; CSKY-NEXT: jsri32 [.LCPI16_0]
; CSKY-NEXT: addi16 sp, sp, 8
; CSKY-NEXT: ld32.w lr, (sp, 0) # 4-byte Folded Reload
; CSKY-NEXT: addi16 sp, sp, 4
; CSKY-NEXT: rts16
; CSKY-NEXT: .p2align 1
; CSKY-NEXT: # %bb.1:
; CSKY-NEXT: .p2align 2
; CSKY-NEXT: .LCPI16_0:
; CSKY-NEXT: .long __atomic_compare_exchange_2
;
%res = cmpxchg i16* %ptr, i16 %cmp, i16 %val acq_rel acquire
ret void
}
define void @cmpxchg_i16_seq_cst_monotonic(i16* %ptr, i16 %cmp, i16 %val) nounwind {
; CSKY-LABEL: cmpxchg_i16_seq_cst_monotonic:
; CSKY: # %bb.0:
; CSKY-NEXT: subi16 sp, sp, 4
; CSKY-NEXT: st32.w lr, (sp, 0) # 4-byte Folded Spill
; CSKY-NEXT: subi16 sp, sp, 8
; CSKY-NEXT: st32.h a1, (sp, 6)
; CSKY-NEXT: movi16 a1, 0
; CSKY-NEXT: st16.w a1, (sp, 0)
; CSKY-NEXT: addi32 a1, sp, 6
; CSKY-NEXT: movi16 a3, 5
; CSKY-NEXT: jsri32 [.LCPI17_0]
; CSKY-NEXT: addi16 sp, sp, 8
; CSKY-NEXT: ld32.w lr, (sp, 0) # 4-byte Folded Reload
; CSKY-NEXT: addi16 sp, sp, 4
; CSKY-NEXT: rts16
; CSKY-NEXT: .p2align 1
; CSKY-NEXT: # %bb.1:
; CSKY-NEXT: .p2align 2
; CSKY-NEXT: .LCPI17_0:
; CSKY-NEXT: .long __atomic_compare_exchange_2
;
%res = cmpxchg i16* %ptr, i16 %cmp, i16 %val seq_cst monotonic
ret void
}
define void @cmpxchg_i16_seq_cst_acquire(i16* %ptr, i16 %cmp, i16 %val) nounwind {
; CSKY-LABEL: cmpxchg_i16_seq_cst_acquire:
; CSKY: # %bb.0:
; CSKY-NEXT: subi16 sp, sp, 4
; CSKY-NEXT: st32.w lr, (sp, 0) # 4-byte Folded Spill
; CSKY-NEXT: subi16 sp, sp, 8
; CSKY-NEXT: st32.h a1, (sp, 6)
; CSKY-NEXT: movi16 a1, 2
; CSKY-NEXT: st16.w a1, (sp, 0)
; CSKY-NEXT: addi32 a1, sp, 6
; CSKY-NEXT: movi16 a3, 5
; CSKY-NEXT: jsri32 [.LCPI18_0]
; CSKY-NEXT: addi16 sp, sp, 8
; CSKY-NEXT: ld32.w lr, (sp, 0) # 4-byte Folded Reload
; CSKY-NEXT: addi16 sp, sp, 4
; CSKY-NEXT: rts16
; CSKY-NEXT: .p2align 1
; CSKY-NEXT: # %bb.1:
; CSKY-NEXT: .p2align 2
; CSKY-NEXT: .LCPI18_0:
; CSKY-NEXT: .long __atomic_compare_exchange_2
;
%res = cmpxchg i16* %ptr, i16 %cmp, i16 %val seq_cst acquire
ret void
}
define void @cmpxchg_i16_seq_cst_seq_cst(i16* %ptr, i16 %cmp, i16 %val) nounwind {
; CSKY-LABEL: cmpxchg_i16_seq_cst_seq_cst:
; CSKY: # %bb.0:
; CSKY-NEXT: subi16 sp, sp, 4
; CSKY-NEXT: st32.w lr, (sp, 0) # 4-byte Folded Spill
; CSKY-NEXT: subi16 sp, sp, 8
; CSKY-NEXT: st32.h a1, (sp, 6)
; CSKY-NEXT: movi16 a1, 5
; CSKY-NEXT: st16.w a1, (sp, 0)
; CSKY-NEXT: addi32 a1, sp, 6
; CSKY-NEXT: movi16 a3, 5
; CSKY-NEXT: jsri32 [.LCPI19_0]
; CSKY-NEXT: addi16 sp, sp, 8
; CSKY-NEXT: ld32.w lr, (sp, 0) # 4-byte Folded Reload
; CSKY-NEXT: addi16 sp, sp, 4
; CSKY-NEXT: rts16
; CSKY-NEXT: .p2align 1
; CSKY-NEXT: # %bb.1:
; CSKY-NEXT: .p2align 2
; CSKY-NEXT: .LCPI19_0:
; CSKY-NEXT: .long __atomic_compare_exchange_2
;
%res = cmpxchg i16* %ptr, i16 %cmp, i16 %val seq_cst seq_cst
ret void
}
define void @cmpxchg_i32_monotonic_monotonic(i32* %ptr, i32 %cmp, i32 %val) nounwind {
; CSKY-LABEL: cmpxchg_i32_monotonic_monotonic:
; CSKY: # %bb.0:
; CSKY-NEXT: subi16 sp, sp, 4
; CSKY-NEXT: st32.w lr, (sp, 0) # 4-byte Folded Spill
; CSKY-NEXT: subi16 sp, sp, 8
; CSKY-NEXT: st16.w a1, (sp, 4)
; CSKY-NEXT: movi16 a1, 0
; CSKY-NEXT: st16.w a1, (sp, 0)
; CSKY-NEXT: addi16 a1, sp, 4
; CSKY-NEXT: movi16 a3, 0
; CSKY-NEXT: jsri32 [.LCPI20_0]
; CSKY-NEXT: addi16 sp, sp, 8
; CSKY-NEXT: ld32.w lr, (sp, 0) # 4-byte Folded Reload
; CSKY-NEXT: addi16 sp, sp, 4
; CSKY-NEXT: rts16
; CSKY-NEXT: .p2align 1
; CSKY-NEXT: # %bb.1:
; CSKY-NEXT: .p2align 2
; CSKY-NEXT: .LCPI20_0:
; CSKY-NEXT: .long __atomic_compare_exchange_4
;
%res = cmpxchg i32* %ptr, i32 %cmp, i32 %val monotonic monotonic
ret void
}
define void @cmpxchg_i32_acquire_monotonic(i32* %ptr, i32 %cmp, i32 %val) nounwind {
; CSKY-LABEL: cmpxchg_i32_acquire_monotonic:
; CSKY: # %bb.0:
; CSKY-NEXT: subi16 sp, sp, 4
; CSKY-NEXT: st32.w lr, (sp, 0) # 4-byte Folded Spill
; CSKY-NEXT: subi16 sp, sp, 8
; CSKY-NEXT: st16.w a1, (sp, 4)
; CSKY-NEXT: movi16 a1, 0
; CSKY-NEXT: st16.w a1, (sp, 0)
; CSKY-NEXT: addi16 a1, sp, 4
; CSKY-NEXT: movi16 a3, 2
; CSKY-NEXT: jsri32 [.LCPI21_0]
; CSKY-NEXT: addi16 sp, sp, 8
; CSKY-NEXT: ld32.w lr, (sp, 0) # 4-byte Folded Reload
; CSKY-NEXT: addi16 sp, sp, 4
; CSKY-NEXT: rts16
; CSKY-NEXT: .p2align 1
; CSKY-NEXT: # %bb.1:
; CSKY-NEXT: .p2align 2
; CSKY-NEXT: .LCPI21_0:
; CSKY-NEXT: .long __atomic_compare_exchange_4
;
%res = cmpxchg i32* %ptr, i32 %cmp, i32 %val acquire monotonic
ret void
}
define void @cmpxchg_i32_acquire_acquire(i32* %ptr, i32 %cmp, i32 %val) nounwind {
; CSKY-LABEL: cmpxchg_i32_acquire_acquire:
; CSKY: # %bb.0:
; CSKY-NEXT: subi16 sp, sp, 4
; CSKY-NEXT: st32.w lr, (sp, 0) # 4-byte Folded Spill
; CSKY-NEXT: subi16 sp, sp, 8
; CSKY-NEXT: st16.w a1, (sp, 4)
; CSKY-NEXT: movi16 a1, 2
; CSKY-NEXT: st16.w a1, (sp, 0)
; CSKY-NEXT: addi16 a1, sp, 4
; CSKY-NEXT: movi16 a3, 2
; CSKY-NEXT: jsri32 [.LCPI22_0]
; CSKY-NEXT: addi16 sp, sp, 8
; CSKY-NEXT: ld32.w lr, (sp, 0) # 4-byte Folded Reload
; CSKY-NEXT: addi16 sp, sp, 4
; CSKY-NEXT: rts16
; CSKY-NEXT: .p2align 1
; CSKY-NEXT: # %bb.1:
; CSKY-NEXT: .p2align 2
; CSKY-NEXT: .LCPI22_0:
; CSKY-NEXT: .long __atomic_compare_exchange_4
;
%res = cmpxchg i32* %ptr, i32 %cmp, i32 %val acquire acquire
ret void
}
define void @cmpxchg_i32_release_monotonic(i32* %ptr, i32 %cmp, i32 %val) nounwind {
; CSKY-LABEL: cmpxchg_i32_release_monotonic:
; CSKY: # %bb.0:
; CSKY-NEXT: subi16 sp, sp, 4
; CSKY-NEXT: st32.w lr, (sp, 0) # 4-byte Folded Spill
; CSKY-NEXT: subi16 sp, sp, 8
; CSKY-NEXT: st16.w a1, (sp, 4)
; CSKY-NEXT: movi16 a1, 0
; CSKY-NEXT: st16.w a1, (sp, 0)
; CSKY-NEXT: addi16 a1, sp, 4
; CSKY-NEXT: movi16 a3, 3
; CSKY-NEXT: jsri32 [.LCPI23_0]
; CSKY-NEXT: addi16 sp, sp, 8
; CSKY-NEXT: ld32.w lr, (sp, 0) # 4-byte Folded Reload
; CSKY-NEXT: addi16 sp, sp, 4
; CSKY-NEXT: rts16
; CSKY-NEXT: .p2align 1
; CSKY-NEXT: # %bb.1:
; CSKY-NEXT: .p2align 2
; CSKY-NEXT: .LCPI23_0:
; CSKY-NEXT: .long __atomic_compare_exchange_4
;
%res = cmpxchg i32* %ptr, i32 %cmp, i32 %val release monotonic
ret void
}
define void @cmpxchg_i32_release_acquire(i32* %ptr, i32 %cmp, i32 %val) nounwind {
; CSKY-LABEL: cmpxchg_i32_release_acquire:
; CSKY: # %bb.0:
; CSKY-NEXT: subi16 sp, sp, 4
; CSKY-NEXT: st32.w lr, (sp, 0) # 4-byte Folded Spill
; CSKY-NEXT: subi16 sp, sp, 8
; CSKY-NEXT: st16.w a1, (sp, 4)
; CSKY-NEXT: movi16 a1, 2
; CSKY-NEXT: st16.w a1, (sp, 0)
; CSKY-NEXT: addi16 a1, sp, 4
; CSKY-NEXT: movi16 a3, 3
; CSKY-NEXT: jsri32 [.LCPI24_0]
; CSKY-NEXT: addi16 sp, sp, 8
; CSKY-NEXT: ld32.w lr, (sp, 0) # 4-byte Folded Reload
; CSKY-NEXT: addi16 sp, sp, 4
; CSKY-NEXT: rts16
; CSKY-NEXT: .p2align 1
; CSKY-NEXT: # %bb.1:
; CSKY-NEXT: .p2align 2
; CSKY-NEXT: .LCPI24_0:
; CSKY-NEXT: .long __atomic_compare_exchange_4
;
%res = cmpxchg i32* %ptr, i32 %cmp, i32 %val release acquire
ret void
}
define void @cmpxchg_i32_acq_rel_monotonic(i32* %ptr, i32 %cmp, i32 %val) nounwind {
; CSKY-LABEL: cmpxchg_i32_acq_rel_monotonic:
; CSKY: # %bb.0:
; CSKY-NEXT: subi16 sp, sp, 4
; CSKY-NEXT: st32.w lr, (sp, 0) # 4-byte Folded Spill
; CSKY-NEXT: subi16 sp, sp, 8
; CSKY-NEXT: st16.w a1, (sp, 4)
; CSKY-NEXT: movi16 a1, 0
; CSKY-NEXT: st16.w a1, (sp, 0)
; CSKY-NEXT: addi16 a1, sp, 4
; CSKY-NEXT: movi16 a3, 4
; CSKY-NEXT: jsri32 [.LCPI25_0]
; CSKY-NEXT: addi16 sp, sp, 8
; CSKY-NEXT: ld32.w lr, (sp, 0) # 4-byte Folded Reload
; CSKY-NEXT: addi16 sp, sp, 4
; CSKY-NEXT: rts16
; CSKY-NEXT: .p2align 1
; CSKY-NEXT: # %bb.1:
; CSKY-NEXT: .p2align 2
; CSKY-NEXT: .LCPI25_0:
; CSKY-NEXT: .long __atomic_compare_exchange_4
;
%res = cmpxchg i32* %ptr, i32 %cmp, i32 %val acq_rel monotonic
ret void
}
define void @cmpxchg_i32_acq_rel_acquire(i32* %ptr, i32 %cmp, i32 %val) nounwind {
; CSKY-LABEL: cmpxchg_i32_acq_rel_acquire:
; CSKY: # %bb.0:
; CSKY-NEXT: subi16 sp, sp, 4
; CSKY-NEXT: st32.w lr, (sp, 0) # 4-byte Folded Spill
; CSKY-NEXT: subi16 sp, sp, 8
; CSKY-NEXT: st16.w a1, (sp, 4)
; CSKY-NEXT: movi16 a1, 2
; CSKY-NEXT: st16.w a1, (sp, 0)
; CSKY-NEXT: addi16 a1, sp, 4
; CSKY-NEXT: movi16 a3, 4
; CSKY-NEXT: jsri32 [.LCPI26_0]
; CSKY-NEXT: addi16 sp, sp, 8
; CSKY-NEXT: ld32.w lr, (sp, 0) # 4-byte Folded Reload
; CSKY-NEXT: addi16 sp, sp, 4
; CSKY-NEXT: rts16
; CSKY-NEXT: .p2align 1
; CSKY-NEXT: # %bb.1:
; CSKY-NEXT: .p2align 2
; CSKY-NEXT: .LCPI26_0:
; CSKY-NEXT: .long __atomic_compare_exchange_4
;
%res = cmpxchg i32* %ptr, i32 %cmp, i32 %val acq_rel acquire
ret void
}
define void @cmpxchg_i32_seq_cst_monotonic(i32* %ptr, i32 %cmp, i32 %val) nounwind {
; CSKY-LABEL: cmpxchg_i32_seq_cst_monotonic:
; CSKY: # %bb.0:
; CSKY-NEXT: subi16 sp, sp, 4
; CSKY-NEXT: st32.w lr, (sp, 0) # 4-byte Folded Spill
; CSKY-NEXT: subi16 sp, sp, 8
; CSKY-NEXT: st16.w a1, (sp, 4)
; CSKY-NEXT: movi16 a1, 0
; CSKY-NEXT: st16.w a1, (sp, 0)
; CSKY-NEXT: addi16 a1, sp, 4
; CSKY-NEXT: movi16 a3, 5
; CSKY-NEXT: jsri32 [.LCPI27_0]
; CSKY-NEXT: addi16 sp, sp, 8
; CSKY-NEXT: ld32.w lr, (sp, 0) # 4-byte Folded Reload
; CSKY-NEXT: addi16 sp, sp, 4
; CSKY-NEXT: rts16
; CSKY-NEXT: .p2align 1
; CSKY-NEXT: # %bb.1:
; CSKY-NEXT: .p2align 2
; CSKY-NEXT: .LCPI27_0:
; CSKY-NEXT: .long __atomic_compare_exchange_4
;
%res = cmpxchg i32* %ptr, i32 %cmp, i32 %val seq_cst monotonic
ret void
}
define void @cmpxchg_i32_seq_cst_acquire(i32* %ptr, i32 %cmp, i32 %val) nounwind {
; CSKY-LABEL: cmpxchg_i32_seq_cst_acquire:
; CSKY: # %bb.0:
; CSKY-NEXT: subi16 sp, sp, 4
; CSKY-NEXT: st32.w lr, (sp, 0) # 4-byte Folded Spill
; CSKY-NEXT: subi16 sp, sp, 8
; CSKY-NEXT: st16.w a1, (sp, 4)
; CSKY-NEXT: movi16 a1, 2
; CSKY-NEXT: st16.w a1, (sp, 0)
; CSKY-NEXT: addi16 a1, sp, 4
; CSKY-NEXT: movi16 a3, 5
; CSKY-NEXT: jsri32 [.LCPI28_0]
; CSKY-NEXT: addi16 sp, sp, 8
; CSKY-NEXT: ld32.w lr, (sp, 0) # 4-byte Folded Reload
; CSKY-NEXT: addi16 sp, sp, 4
; CSKY-NEXT: rts16
; CSKY-NEXT: .p2align 1
; CSKY-NEXT: # %bb.1:
; CSKY-NEXT: .p2align 2
; CSKY-NEXT: .LCPI28_0:
; CSKY-NEXT: .long __atomic_compare_exchange_4
;
%res = cmpxchg i32* %ptr, i32 %cmp, i32 %val seq_cst acquire
ret void
}
define void @cmpxchg_i32_seq_cst_seq_cst(i32* %ptr, i32 %cmp, i32 %val) nounwind {
; CSKY-LABEL: cmpxchg_i32_seq_cst_seq_cst:
; CSKY: # %bb.0:
; CSKY-NEXT: subi16 sp, sp, 4
; CSKY-NEXT: st32.w lr, (sp, 0) # 4-byte Folded Spill
; CSKY-NEXT: subi16 sp, sp, 8
; CSKY-NEXT: st16.w a1, (sp, 4)
; CSKY-NEXT: movi16 a1, 5
; CSKY-NEXT: st16.w a1, (sp, 0)
; CSKY-NEXT: addi16 a1, sp, 4
; CSKY-NEXT: movi16 a3, 5
; CSKY-NEXT: jsri32 [.LCPI29_0]
; CSKY-NEXT: addi16 sp, sp, 8
; CSKY-NEXT: ld32.w lr, (sp, 0) # 4-byte Folded Reload
; CSKY-NEXT: addi16 sp, sp, 4
; CSKY-NEXT: rts16
; CSKY-NEXT: .p2align 1
; CSKY-NEXT: # %bb.1:
; CSKY-NEXT: .p2align 2
; CSKY-NEXT: .LCPI29_0:
; CSKY-NEXT: .long __atomic_compare_exchange_4
;
%res = cmpxchg i32* %ptr, i32 %cmp, i32 %val seq_cst seq_cst
ret void
}
define void @cmpxchg_i64_monotonic_monotonic(i64* %ptr, i64 %cmp, i64 %val) nounwind {
; CSKY-LABEL: cmpxchg_i64_monotonic_monotonic:
; CSKY: # %bb.0:
; CSKY-NEXT: subi16 sp, sp, 4
; CSKY-NEXT: st32.w lr, (sp, 0) # 4-byte Folded Spill
; CSKY-NEXT: subi16 sp, sp, 16
; CSKY-NEXT: mov16 t0, a3
; CSKY-NEXT: ld16.w a3, (sp, 20)
; CSKY-NEXT: st16.w a2, (sp, 12)
; CSKY-NEXT: st16.w a1, (sp, 8)
; CSKY-NEXT: movi16 a1, 0
; CSKY-NEXT: st16.w a1, (sp, 4)
; CSKY-NEXT: st16.w a1, (sp, 0)
; CSKY-NEXT: addi16 a1, sp, 8
; CSKY-NEXT: mov16 a2, t0
; CSKY-NEXT: jsri32 [.LCPI30_0]
; CSKY-NEXT: addi16 sp, sp, 16
; CSKY-NEXT: ld32.w lr, (sp, 0) # 4-byte Folded Reload
; CSKY-NEXT: addi16 sp, sp, 4
; CSKY-NEXT: rts16
; CSKY-NEXT: .p2align 1
; CSKY-NEXT: # %bb.1:
; CSKY-NEXT: .p2align 2
; CSKY-NEXT: .LCPI30_0:
; CSKY-NEXT: .long __atomic_compare_exchange_8
;
%res = cmpxchg i64* %ptr, i64 %cmp, i64 %val monotonic monotonic
ret void
}
define void @cmpxchg_i64_acquire_monotonic(i64* %ptr, i64 %cmp, i64 %val) nounwind {
; CSKY-LABEL: cmpxchg_i64_acquire_monotonic:
; CSKY: # %bb.0:
; CSKY-NEXT: subi16 sp, sp, 4
; CSKY-NEXT: st32.w lr, (sp, 0) # 4-byte Folded Spill
; CSKY-NEXT: subi16 sp, sp, 16
; CSKY-NEXT: mov16 t0, a3
; CSKY-NEXT: ld16.w a3, (sp, 20)
; CSKY-NEXT: st16.w a2, (sp, 12)
; CSKY-NEXT: st16.w a1, (sp, 8)
; CSKY-NEXT: movi16 a1, 0
; CSKY-NEXT: st16.w a1, (sp, 4)
; CSKY-NEXT: movi16 a1, 2
; CSKY-NEXT: st16.w a1, (sp, 0)
; CSKY-NEXT: addi16 a1, sp, 8
; CSKY-NEXT: mov16 a2, t0
; CSKY-NEXT: jsri32 [.LCPI31_0]
; CSKY-NEXT: addi16 sp, sp, 16
; CSKY-NEXT: ld32.w lr, (sp, 0) # 4-byte Folded Reload
; CSKY-NEXT: addi16 sp, sp, 4
; CSKY-NEXT: rts16
; CSKY-NEXT: .p2align 1
; CSKY-NEXT: # %bb.1:
; CSKY-NEXT: .p2align 2
; CSKY-NEXT: .LCPI31_0:
; CSKY-NEXT: .long __atomic_compare_exchange_8
;
%res = cmpxchg i64* %ptr, i64 %cmp, i64 %val acquire monotonic
ret void
}
define void @cmpxchg_i64_acquire_acquire(i64* %ptr, i64 %cmp, i64 %val) nounwind {
; CSKY-LABEL: cmpxchg_i64_acquire_acquire:
; CSKY: # %bb.0:
; CSKY-NEXT: subi16 sp, sp, 4
; CSKY-NEXT: st32.w lr, (sp, 0) # 4-byte Folded Spill
; CSKY-NEXT: subi16 sp, sp, 16
; CSKY-NEXT: mov16 t0, a3
; CSKY-NEXT: ld16.w a3, (sp, 20)
; CSKY-NEXT: st16.w a2, (sp, 12)
; CSKY-NEXT: st16.w a1, (sp, 8)
; CSKY-NEXT: movi16 a1, 2
; CSKY-NEXT: st16.w a1, (sp, 4)
; CSKY-NEXT: st16.w a1, (sp, 0)
; CSKY-NEXT: addi16 a1, sp, 8
; CSKY-NEXT: mov16 a2, t0
; CSKY-NEXT: jsri32 [.LCPI32_0]
; CSKY-NEXT: addi16 sp, sp, 16
; CSKY-NEXT: ld32.w lr, (sp, 0) # 4-byte Folded Reload
; CSKY-NEXT: addi16 sp, sp, 4
; CSKY-NEXT: rts16
; CSKY-NEXT: .p2align 1
; CSKY-NEXT: # %bb.1:
; CSKY-NEXT: .p2align 2
; CSKY-NEXT: .LCPI32_0:
; CSKY-NEXT: .long __atomic_compare_exchange_8
;
%res = cmpxchg i64* %ptr, i64 %cmp, i64 %val acquire acquire
ret void
}
define void @cmpxchg_i64_release_monotonic(i64* %ptr, i64 %cmp, i64 %val) nounwind {
; CSKY-LABEL: cmpxchg_i64_release_monotonic:
; CSKY: # %bb.0:
; CSKY-NEXT: subi16 sp, sp, 4
; CSKY-NEXT: st32.w lr, (sp, 0) # 4-byte Folded Spill
; CSKY-NEXT: subi16 sp, sp, 16
; CSKY-NEXT: mov16 t0, a3
; CSKY-NEXT: ld16.w a3, (sp, 20)
; CSKY-NEXT: st16.w a2, (sp, 12)
; CSKY-NEXT: st16.w a1, (sp, 8)
; CSKY-NEXT: movi16 a1, 0
; CSKY-NEXT: st16.w a1, (sp, 4)
; CSKY-NEXT: movi16 a1, 3
; CSKY-NEXT: st16.w a1, (sp, 0)
; CSKY-NEXT: addi16 a1, sp, 8
; CSKY-NEXT: mov16 a2, t0
; CSKY-NEXT: jsri32 [.LCPI33_0]
; CSKY-NEXT: addi16 sp, sp, 16
; CSKY-NEXT: ld32.w lr, (sp, 0) # 4-byte Folded Reload
; CSKY-NEXT: addi16 sp, sp, 4
; CSKY-NEXT: rts16
; CSKY-NEXT: .p2align 1
; CSKY-NEXT: # %bb.1:
; CSKY-NEXT: .p2align 2
; CSKY-NEXT: .LCPI33_0:
; CSKY-NEXT: .long __atomic_compare_exchange_8
;
%res = cmpxchg i64* %ptr, i64 %cmp, i64 %val release monotonic
ret void
}
define void @cmpxchg_i64_release_acquire(i64* %ptr, i64 %cmp, i64 %val) nounwind {
; CSKY-LABEL: cmpxchg_i64_release_acquire:
; CSKY: # %bb.0:
; CSKY-NEXT: subi16 sp, sp, 4
; CSKY-NEXT: st32.w lr, (sp, 0) # 4-byte Folded Spill
; CSKY-NEXT: subi16 sp, sp, 16
; CSKY-NEXT: mov16 t0, a3
; CSKY-NEXT: ld16.w a3, (sp, 20)
; CSKY-NEXT: st16.w a2, (sp, 12)
; CSKY-NEXT: st16.w a1, (sp, 8)
; CSKY-NEXT: movi16 a1, 2
; CSKY-NEXT: st16.w a1, (sp, 4)
; CSKY-NEXT: movi16 a1, 3
; CSKY-NEXT: st16.w a1, (sp, 0)
; CSKY-NEXT: addi16 a1, sp, 8
; CSKY-NEXT: mov16 a2, t0
; CSKY-NEXT: jsri32 [.LCPI34_0]
; CSKY-NEXT: addi16 sp, sp, 16
; CSKY-NEXT: ld32.w lr, (sp, 0) # 4-byte Folded Reload
; CSKY-NEXT: addi16 sp, sp, 4
; CSKY-NEXT: rts16
; CSKY-NEXT: .p2align 1
; CSKY-NEXT: # %bb.1:
; CSKY-NEXT: .p2align 2
; CSKY-NEXT: .LCPI34_0:
; CSKY-NEXT: .long __atomic_compare_exchange_8
;
%res = cmpxchg i64* %ptr, i64 %cmp, i64 %val release acquire
ret void
}
define void @cmpxchg_i64_acq_rel_monotonic(i64* %ptr, i64 %cmp, i64 %val) nounwind {
; CSKY-LABEL: cmpxchg_i64_acq_rel_monotonic:
; CSKY: # %bb.0:
; CSKY-NEXT: subi16 sp, sp, 4
; CSKY-NEXT: st32.w lr, (sp, 0) # 4-byte Folded Spill
; CSKY-NEXT: subi16 sp, sp, 16
; CSKY-NEXT: mov16 t0, a3
; CSKY-NEXT: ld16.w a3, (sp, 20)
; CSKY-NEXT: st16.w a2, (sp, 12)
; CSKY-NEXT: st16.w a1, (sp, 8)
; CSKY-NEXT: movi16 a1, 0
; CSKY-NEXT: st16.w a1, (sp, 4)
; CSKY-NEXT: movi16 a1, 4
; CSKY-NEXT: st16.w a1, (sp, 0)
; CSKY-NEXT: addi16 a1, sp, 8
; CSKY-NEXT: mov16 a2, t0
; CSKY-NEXT: jsri32 [.LCPI35_0]
; CSKY-NEXT: addi16 sp, sp, 16
; CSKY-NEXT: ld32.w lr, (sp, 0) # 4-byte Folded Reload
; CSKY-NEXT: addi16 sp, sp, 4
; CSKY-NEXT: rts16
; CSKY-NEXT: .p2align 1
; CSKY-NEXT: # %bb.1:
; CSKY-NEXT: .p2align 2
; CSKY-NEXT: .LCPI35_0:
; CSKY-NEXT: .long __atomic_compare_exchange_8
;
%res = cmpxchg i64* %ptr, i64 %cmp, i64 %val acq_rel monotonic
ret void
}
define void @cmpxchg_i64_acq_rel_acquire(i64* %ptr, i64 %cmp, i64 %val) nounwind {
; CSKY-LABEL: cmpxchg_i64_acq_rel_acquire:
; CSKY: # %bb.0:
; CSKY-NEXT: subi16 sp, sp, 4
; CSKY-NEXT: st32.w lr, (sp, 0) # 4-byte Folded Spill
; CSKY-NEXT: subi16 sp, sp, 16
; CSKY-NEXT: mov16 t0, a3
; CSKY-NEXT: ld16.w a3, (sp, 20)
; CSKY-NEXT: st16.w a2, (sp, 12)
; CSKY-NEXT: st16.w a1, (sp, 8)
; CSKY-NEXT: movi16 a1, 2
; CSKY-NEXT: st16.w a1, (sp, 4)
; CSKY-NEXT: movi16 a1, 4
; CSKY-NEXT: st16.w a1, (sp, 0)
; CSKY-NEXT: addi16 a1, sp, 8
; CSKY-NEXT: mov16 a2, t0
; CSKY-NEXT: jsri32 [.LCPI36_0]
; CSKY-NEXT: addi16 sp, sp, 16
; CSKY-NEXT: ld32.w lr, (sp, 0) # 4-byte Folded Reload
; CSKY-NEXT: addi16 sp, sp, 4
; CSKY-NEXT: rts16
; CSKY-NEXT: .p2align 1
; CSKY-NEXT: # %bb.1:
; CSKY-NEXT: .p2align 2
; CSKY-NEXT: .LCPI36_0:
; CSKY-NEXT: .long __atomic_compare_exchange_8
;
%res = cmpxchg i64* %ptr, i64 %cmp, i64 %val acq_rel acquire
ret void
}
define void @cmpxchg_i64_seq_cst_monotonic(i64* %ptr, i64 %cmp, i64 %val) nounwind {
; CSKY-LABEL: cmpxchg_i64_seq_cst_monotonic:
; CSKY: # %bb.0:
; CSKY-NEXT: subi16 sp, sp, 4
; CSKY-NEXT: st32.w lr, (sp, 0) # 4-byte Folded Spill
; CSKY-NEXT: subi16 sp, sp, 16
; CSKY-NEXT: mov16 t0, a3
; CSKY-NEXT: ld16.w a3, (sp, 20)
; CSKY-NEXT: st16.w a2, (sp, 12)
; CSKY-NEXT: st16.w a1, (sp, 8)
; CSKY-NEXT: movi16 a1, 0
; CSKY-NEXT: st16.w a1, (sp, 4)
; CSKY-NEXT: movi16 a1, 5
; CSKY-NEXT: st16.w a1, (sp, 0)
; CSKY-NEXT: addi16 a1, sp, 8
; CSKY-NEXT: mov16 a2, t0
; CSKY-NEXT: jsri32 [.LCPI37_0]
; CSKY-NEXT: addi16 sp, sp, 16
; CSKY-NEXT: ld32.w lr, (sp, 0) # 4-byte Folded Reload
; CSKY-NEXT: addi16 sp, sp, 4
; CSKY-NEXT: rts16
; CSKY-NEXT: .p2align 1
; CSKY-NEXT: # %bb.1:
; CSKY-NEXT: .p2align 2
; CSKY-NEXT: .LCPI37_0:
; CSKY-NEXT: .long __atomic_compare_exchange_8
;
%res = cmpxchg i64* %ptr, i64 %cmp, i64 %val seq_cst monotonic
ret void
}
define void @cmpxchg_i64_seq_cst_acquire(i64* %ptr, i64 %cmp, i64 %val) nounwind {
; CSKY-LABEL: cmpxchg_i64_seq_cst_acquire:
; CSKY: # %bb.0:
; CSKY-NEXT: subi16 sp, sp, 4
; CSKY-NEXT: st32.w lr, (sp, 0) # 4-byte Folded Spill
; CSKY-NEXT: subi16 sp, sp, 16
; CSKY-NEXT: mov16 t0, a3
; CSKY-NEXT: ld16.w a3, (sp, 20)
; CSKY-NEXT: st16.w a2, (sp, 12)
; CSKY-NEXT: st16.w a1, (sp, 8)
; CSKY-NEXT: movi16 a1, 2
; CSKY-NEXT: st16.w a1, (sp, 4)
; CSKY-NEXT: movi16 a1, 5
; CSKY-NEXT: st16.w a1, (sp, 0)
; CSKY-NEXT: addi16 a1, sp, 8
; CSKY-NEXT: mov16 a2, t0
; CSKY-NEXT: jsri32 [.LCPI38_0]
; CSKY-NEXT: addi16 sp, sp, 16
; CSKY-NEXT: ld32.w lr, (sp, 0) # 4-byte Folded Reload
; CSKY-NEXT: addi16 sp, sp, 4
; CSKY-NEXT: rts16
; CSKY-NEXT: .p2align 1
; CSKY-NEXT: # %bb.1:
; CSKY-NEXT: .p2align 2
; CSKY-NEXT: .LCPI38_0:
; CSKY-NEXT: .long __atomic_compare_exchange_8
;
%res = cmpxchg i64* %ptr, i64 %cmp, i64 %val seq_cst acquire
ret void
}
define void @cmpxchg_i64_seq_cst_seq_cst(i64* %ptr, i64 %cmp, i64 %val) nounwind {
; CSKY-LABEL: cmpxchg_i64_seq_cst_seq_cst:
; CSKY: # %bb.0:
; CSKY-NEXT: subi16 sp, sp, 4
; CSKY-NEXT: st32.w lr, (sp, 0) # 4-byte Folded Spill
; CSKY-NEXT: subi16 sp, sp, 16
; CSKY-NEXT: mov16 t0, a3
; CSKY-NEXT: ld16.w a3, (sp, 20)
; CSKY-NEXT: st16.w a2, (sp, 12)
; CSKY-NEXT: st16.w a1, (sp, 8)
; CSKY-NEXT: movi16 a1, 5
; CSKY-NEXT: st16.w a1, (sp, 4)
; CSKY-NEXT: st16.w a1, (sp, 0)
; CSKY-NEXT: addi16 a1, sp, 8
; CSKY-NEXT: mov16 a2, t0
; CSKY-NEXT: jsri32 [.LCPI39_0]
; CSKY-NEXT: addi16 sp, sp, 16
; CSKY-NEXT: ld32.w lr, (sp, 0) # 4-byte Folded Reload
; CSKY-NEXT: addi16 sp, sp, 4
; CSKY-NEXT: rts16
; CSKY-NEXT: .p2align 1
; CSKY-NEXT: # %bb.1:
; CSKY-NEXT: .p2align 2
; CSKY-NEXT: .LCPI39_0:
; CSKY-NEXT: .long __atomic_compare_exchange_8
;
%res = cmpxchg i64* %ptr, i64 %cmp, i64 %val seq_cst seq_cst
ret void
}