Add a memory wait "MEMW" instruction before volatile load/store operations, as implemented in GCC.
177 lines
4.8 KiB
LLVM
177 lines
4.8 KiB
LLVM
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
|
|
; RUN: llc -mtriple=xtensa -verify-machineinstrs < %s \
|
|
; RUN: | FileCheck %s
|
|
|
|
@x_i8 = common dso_local global i8 0, align 8
|
|
@y_i8 = common dso_local global i8 0, align 8
|
|
@x_i16 = common dso_local global i16 0, align 8
|
|
@y_i16 = common dso_local global i16 0, align 8
|
|
@x_i32 = common dso_local global i32 0, align 8
|
|
@y_i32 = common dso_local global i32 0, align 8
|
|
@x_i64 = common dso_local global i64 0, align 8
|
|
@y_i64 = common dso_local global i64 0, align 8
|
|
@x_float = common dso_local global float 0.0, align 8
|
|
@y_float = common dso_local global float 0.0, align 8
|
|
@x_double = common dso_local global double 0.0, align 8
|
|
@y_double = common dso_local global double 0.0, align 8
|
|
@x_vec = common dso_local global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 8
|
|
@y_vec = common dso_local global <4 x i32> <i32 0, i32 0, i32 0, i32 0>, align 8
|
|
|
|
define void @test() {
|
|
; CHECK-LABEL: test:
|
|
; CHECK: l32r a8, .LCPI0_0
|
|
; CHECK-NEXT: memw
|
|
; CHECK-NEXT: l8ui a8, a8, 0
|
|
; CHECK-NEXT: l32r a9, .LCPI0_1
|
|
; CHECK-NEXT: memw
|
|
; CHECK-NEXT: s8i a8, a9, 0
|
|
; CHECK-NEXT: l32r a8, .LCPI0_2
|
|
; CHECK-NEXT: memw
|
|
; CHECK-NEXT: l16ui a8, a8, 0
|
|
; CHECK-NEXT: l32r a9, .LCPI0_3
|
|
; CHECK-NEXT: memw
|
|
; CHECK-NEXT: s16i a8, a9, 0
|
|
; CHECK-NEXT: l32r a8, .LCPI0_4
|
|
; CHECK-NEXT: memw
|
|
; CHECK-NEXT: l32i a8, a8, 0
|
|
; CHECK-NEXT: l32r a9, .LCPI0_5
|
|
; CHECK-NEXT: memw
|
|
; CHECK-NEXT: s32i a8, a9, 0
|
|
; CHECK-NEXT: ret
|
|
|
|
entry:
|
|
%a = load volatile i8, ptr @x_i8, align 4
|
|
store volatile i8 %a, ptr @y_i8, align 4
|
|
%b = load volatile i16, ptr @x_i16, align 4
|
|
store volatile i16 %b, ptr @y_i16, align 4
|
|
%c = load volatile i32, ptr @x_i32, align 4
|
|
store volatile i32 %c, ptr @y_i32, align 4
|
|
ret void
|
|
}
|
|
|
|
|
|
define void @test_i8() {
|
|
; CHECK-LABEL: test_i8:
|
|
; CHECK: l32r a8, .LCPI1_0
|
|
; CHECK-NEXT: memw
|
|
; CHECK-NEXT: l8ui a8, a8, 0
|
|
; CHECK-NEXT: l32r a9, .LCPI1_1
|
|
; CHECK-NEXT: memw
|
|
; CHECK-NEXT: s8i a8, a9, 0
|
|
; CHECK-NEXT: ret
|
|
entry:
|
|
%a = load volatile i8, ptr @x_i8, align 4
|
|
store volatile i8 %a, ptr @y_i8, align 4
|
|
ret void
|
|
}
|
|
|
|
define void @test_i16() {
|
|
; CHECK-LABEL: test_i16:
|
|
; CHECK: l32r a8, .LCPI2_0
|
|
; CHECK-NEXT: memw
|
|
; CHECK-NEXT: l16ui a8, a8, 0
|
|
; CHECK-NEXT: l32r a9, .LCPI2_1
|
|
; CHECK-NEXT: memw
|
|
; CHECK-NEXT: s16i a8, a9, 0
|
|
; CHECK-NEXT: ret
|
|
entry:
|
|
%a = load volatile i16, ptr @x_i16, align 4
|
|
store volatile i16 %a, ptr @y_i16, align 4
|
|
ret void
|
|
}
|
|
|
|
define void @test_i32() {
|
|
; CHECK-LABEL: test_i32:
|
|
; CHECK: l32r a8, .LCPI3_0
|
|
; CHECK-NEXT: memw
|
|
; CHECK-NEXT: l32i a8, a8, 0
|
|
; CHECK-NEXT: l32r a9, .LCPI3_1
|
|
; CHECK-NEXT: memw
|
|
; CHECK-NEXT: s32i a8, a9, 0
|
|
; CHECK-NEXT: ret
|
|
entry:
|
|
%a = load volatile i32, ptr @x_i32, align 4
|
|
store volatile i32 %a, ptr @y_i32, align 4
|
|
ret void
|
|
}
|
|
|
|
define void @test_i64() {
|
|
; CHECK-LABEL: test_i64:
|
|
; CHECK: l32r a8, .LCPI4_0
|
|
; CHECK-NEXT: memw
|
|
; CHECK-NEXT: l32i a9, a8, 0
|
|
; CHECK-NEXT: memw
|
|
; CHECK-NEXT: l32i a8, a8, 4
|
|
; CHECK-NEXT: l32r a10, .LCPI4_1
|
|
; CHECK-NEXT: memw
|
|
; CHECK-NEXT: s32i a8, a10, 4
|
|
; CHECK-NEXT: memw
|
|
; CHECK-NEXT: s32i a9, a10, 0
|
|
; CHECK-NEXT: ret
|
|
entry:
|
|
%a = load volatile i64, ptr @x_i64, align 4
|
|
store volatile i64 %a, ptr @y_i64, align 4
|
|
ret void
|
|
}
|
|
|
|
define void @test_float() {
|
|
; CHECK-LABEL: test_float:
|
|
; CHECK: l32r a8, .LCPI5_0
|
|
; CHECK-NEXT: memw
|
|
; CHECK-NEXT: l32i a8, a8, 0
|
|
; CHECK-NEXT: l32r a9, .LCPI5_1
|
|
; CHECK-NEXT: memw
|
|
; CHECK-NEXT: s32i a8, a9, 0
|
|
; CHECK-NEXT: ret
|
|
entry:
|
|
%a = load volatile float, ptr @x_float, align 4
|
|
store volatile float %a, ptr @y_float, align 4
|
|
ret void
|
|
}
|
|
|
|
define void @test_double() {
|
|
; CHECK-LABEL: test_double:
|
|
; CHECK: l32r a8, .LCPI6_0
|
|
; CHECK-NEXT: memw
|
|
; CHECK-NEXT: l32i a9, a8, 0
|
|
; CHECK-NEXT: memw
|
|
; CHECK-NEXT: l32i a8, a8, 4
|
|
; CHECK-NEXT: l32r a10, .LCPI6_1
|
|
; CHECK-NEXT: memw
|
|
; CHECK-NEXT: s32i a8, a10, 4
|
|
; CHECK-NEXT: memw
|
|
; CHECK-NEXT: s32i a9, a10, 0
|
|
; CHECK-NEXT: ret
|
|
entry:
|
|
%a = load volatile double, ptr @x_double, align 4
|
|
store volatile double %a, ptr @y_double, align 4
|
|
ret void
|
|
}
|
|
|
|
define void @test_vec() {
|
|
; CHECK-LABEL: test_vec:
|
|
; CHECK: l32r a8, .LCPI7_0
|
|
; CHECK-NEXT: memw
|
|
; CHECK-NEXT: l32i a9, a8, 0
|
|
; CHECK-NEXT: memw
|
|
; CHECK-NEXT: l32i a10, a8, 4
|
|
; CHECK-NEXT: memw
|
|
; CHECK-NEXT: l32i a11, a8, 8
|
|
; CHECK-NEXT: memw
|
|
; CHECK-NEXT: l32i a8, a8, 12
|
|
; CHECK-NEXT: l32r a7, .LCPI7_1
|
|
; CHECK-NEXT: memw
|
|
; CHECK-NEXT: s32i a8, a7, 12
|
|
; CHECK-NEXT: memw
|
|
; CHECK-NEXT: s32i a11, a7, 8
|
|
; CHECK-NEXT: memw
|
|
; CHECK-NEXT: s32i a10, a7, 4
|
|
; CHECK-NEXT: memw
|
|
; CHECK-NEXT: s32i a9, a7, 0
|
|
; CHECK-NEXT: ret
|
|
entry:
|
|
%a = load volatile <4 x i32>, ptr @x_vec, align 4
|
|
store volatile <4 x i32> %a, ptr @y_vec, align 4
|
|
ret void
|
|
}
|