This adds the FPC (floating-point control register) as a reserved physical register and models its use by SystemZ instructions. Note that only the current rounding modes and the IEEE exception masks are modeled. *Changes* of the FPC due to exceptions (in particular the IEEE exception flags and the DXC) are not modeled. At this point, this patch is mostly NFC, but it will prevent scheduling of floating-point instructions across SPFC/LFPC etc. llvm-svn: 360570
203 lines
8.3 KiB
YAML
203 lines
8.3 KiB
YAML
# RUN: llc -mtriple=s390x-linux-gnu -mcpu=z10 -start-before=greedy %s -o - \
|
|
# RUN: | FileCheck %s
|
|
--- |
|
|
define void @f0(double* %ptr1, float* %ptr2) {
|
|
%val0 = load volatile float, float* %ptr2
|
|
%val1 = load volatile float, float* %ptr2
|
|
%val2 = load volatile float, float* %ptr2
|
|
%val3 = load volatile float, float* %ptr2
|
|
%val4 = load volatile float, float* %ptr2
|
|
%val5 = load volatile float, float* %ptr2
|
|
%val6 = load volatile float, float* %ptr2
|
|
%val7 = load volatile float, float* %ptr2
|
|
%val8 = load volatile float, float* %ptr2
|
|
%val9 = load volatile float, float* %ptr2
|
|
%val10 = load volatile float, float* %ptr2
|
|
%val11 = load volatile float, float* %ptr2
|
|
%val12 = load volatile float, float* %ptr2
|
|
%val13 = load volatile float, float* %ptr2
|
|
%val14 = load volatile float, float* %ptr2
|
|
%val15 = load volatile float, float* %ptr2
|
|
%val16 = load volatile float, float* %ptr2
|
|
%ext0 = fpext float %val0 to double
|
|
%ext1 = fpext float %val1 to double
|
|
%ext2 = fpext float %val2 to double
|
|
%ext3 = fpext float %val3 to double
|
|
%ext4 = fpext float %val4 to double
|
|
%ext5 = fpext float %val5 to double
|
|
%ext6 = fpext float %val6 to double
|
|
%ext7 = fpext float %val7 to double
|
|
%ext8 = fpext float %val8 to double
|
|
%ext9 = fpext float %val9 to double
|
|
%ext10 = fpext float %val10 to double
|
|
%ext11 = fpext float %val11 to double
|
|
%ext12 = fpext float %val12 to double
|
|
%ext13 = fpext float %val13 to double
|
|
%ext14 = fpext float %val14 to double
|
|
%ext15 = fpext float %val15 to double
|
|
%ext16 = fpext float %val16 to double
|
|
store volatile float %val0, float* %ptr2
|
|
store volatile float %val1, float* %ptr2
|
|
store volatile float %val2, float* %ptr2
|
|
store volatile float %val3, float* %ptr2
|
|
store volatile float %val4, float* %ptr2
|
|
store volatile float %val5, float* %ptr2
|
|
store volatile float %val6, float* %ptr2
|
|
store volatile float %val7, float* %ptr2
|
|
store volatile float %val8, float* %ptr2
|
|
store volatile float %val9, float* %ptr2
|
|
store volatile float %val10, float* %ptr2
|
|
store volatile float %val11, float* %ptr2
|
|
store volatile float %val12, float* %ptr2
|
|
store volatile float %val13, float* %ptr2
|
|
store volatile float %val14, float* %ptr2
|
|
store volatile float %val15, float* %ptr2
|
|
store volatile float %val16, float* %ptr2
|
|
store volatile double %ext0, double* %ptr1
|
|
store volatile double %ext1, double* %ptr1
|
|
store volatile double %ext2, double* %ptr1
|
|
store volatile double %ext3, double* %ptr1
|
|
store volatile double %ext4, double* %ptr1
|
|
store volatile double %ext5, double* %ptr1
|
|
store volatile double %ext6, double* %ptr1
|
|
store volatile double %ext7, double* %ptr1
|
|
store volatile double %ext8, double* %ptr1
|
|
store volatile double %ext9, double* %ptr1
|
|
store volatile double %ext10, double* %ptr1
|
|
store volatile double %ext11, double* %ptr1
|
|
store volatile double %ext12, double* %ptr1
|
|
store volatile double %ext13, double* %ptr1
|
|
store volatile double %ext14, double* %ptr1
|
|
store volatile double %ext15, double* %ptr1
|
|
store volatile double %ext16, double* %ptr1
|
|
ret void
|
|
}
|
|
|
|
...
|
|
|
|
# CHECK-LABEL: f0:
|
|
# CHECK: ldeb {{%f[0-9]+}}, 16{{[04]}}(%r15)
|
|
# CHECK: br %r14
|
|
|
|
---
|
|
name: f0
|
|
alignment: 2
|
|
tracksRegLiveness: true
|
|
registers:
|
|
- { id: 0, class: addr64bit }
|
|
- { id: 1, class: addr64bit }
|
|
- { id: 2, class: fp32bit }
|
|
- { id: 3, class: fp32bit }
|
|
- { id: 4, class: fp32bit }
|
|
- { id: 5, class: fp32bit }
|
|
- { id: 6, class: fp32bit }
|
|
- { id: 7, class: fp32bit }
|
|
- { id: 8, class: fp32bit }
|
|
- { id: 9, class: fp32bit }
|
|
- { id: 10, class: fp32bit }
|
|
- { id: 11, class: fp32bit }
|
|
- { id: 12, class: fp32bit }
|
|
- { id: 13, class: fp32bit }
|
|
- { id: 14, class: fp32bit }
|
|
- { id: 15, class: fp32bit }
|
|
- { id: 16, class: fp32bit }
|
|
- { id: 17, class: fp32bit }
|
|
- { id: 18, class: fp32bit }
|
|
- { id: 19, class: fp64bit }
|
|
- { id: 20, class: fp64bit }
|
|
- { id: 21, class: fp64bit }
|
|
- { id: 22, class: fp64bit }
|
|
- { id: 23, class: fp64bit }
|
|
- { id: 24, class: fp64bit }
|
|
- { id: 25, class: fp64bit }
|
|
- { id: 26, class: fp64bit }
|
|
- { id: 27, class: fp64bit }
|
|
- { id: 28, class: fp64bit }
|
|
- { id: 29, class: fp64bit }
|
|
- { id: 30, class: fp64bit }
|
|
- { id: 31, class: fp64bit }
|
|
- { id: 32, class: fp64bit }
|
|
- { id: 33, class: fp64bit }
|
|
- { id: 34, class: fp64bit }
|
|
- { id: 35, class: fp64bit }
|
|
liveins:
|
|
- { reg: '$r2d', virtual-reg: '%0' }
|
|
- { reg: '$r3d', virtual-reg: '%1' }
|
|
body: |
|
|
bb.0 (%ir-block.0):
|
|
liveins: $r2d, $r3d
|
|
|
|
%1 = COPY $r3d
|
|
%0 = COPY $r2d
|
|
%2 = LE %1, 0, $noreg :: (volatile load 4 from %ir.ptr2)
|
|
%3 = LE %1, 0, $noreg :: (volatile load 4 from %ir.ptr2)
|
|
%4 = LE %1, 0, $noreg :: (volatile load 4 from %ir.ptr2)
|
|
%5 = LE %1, 0, $noreg :: (volatile load 4 from %ir.ptr2)
|
|
%6 = LE %1, 0, $noreg :: (volatile load 4 from %ir.ptr2)
|
|
%7 = LE %1, 0, $noreg :: (volatile load 4 from %ir.ptr2)
|
|
%8 = LE %1, 0, $noreg :: (volatile load 4 from %ir.ptr2)
|
|
%9 = LE %1, 0, $noreg :: (volatile load 4 from %ir.ptr2)
|
|
%10 = LE %1, 0, $noreg :: (volatile load 4 from %ir.ptr2)
|
|
%11 = LE %1, 0, $noreg :: (volatile load 4 from %ir.ptr2)
|
|
%12 = LE %1, 0, $noreg :: (volatile load 4 from %ir.ptr2)
|
|
%13 = LE %1, 0, $noreg :: (volatile load 4 from %ir.ptr2)
|
|
%14 = LE %1, 0, $noreg :: (volatile load 4 from %ir.ptr2)
|
|
%15 = LE %1, 0, $noreg :: (volatile load 4 from %ir.ptr2)
|
|
%16 = LE %1, 0, $noreg :: (volatile load 4 from %ir.ptr2)
|
|
%17 = LE %1, 0, $noreg :: (volatile load 4 from %ir.ptr2)
|
|
%18 = LE %1, 0, $noreg :: (volatile load 4 from %ir.ptr2)
|
|
STE %2, %1, 0, $noreg :: (volatile store 4 into %ir.ptr2)
|
|
STE %3, %1, 0, $noreg :: (volatile store 4 into %ir.ptr2)
|
|
STE %4, %1, 0, $noreg :: (volatile store 4 into %ir.ptr2)
|
|
STE %5, %1, 0, $noreg :: (volatile store 4 into %ir.ptr2)
|
|
STE %6, %1, 0, $noreg :: (volatile store 4 into %ir.ptr2)
|
|
STE %7, %1, 0, $noreg :: (volatile store 4 into %ir.ptr2)
|
|
STE %8, %1, 0, $noreg :: (volatile store 4 into %ir.ptr2)
|
|
STE %9, %1, 0, $noreg :: (volatile store 4 into %ir.ptr2)
|
|
STE %10, %1, 0, $noreg :: (volatile store 4 into %ir.ptr2)
|
|
STE %11, %1, 0, $noreg :: (volatile store 4 into %ir.ptr2)
|
|
STE %12, %1, 0, $noreg :: (volatile store 4 into %ir.ptr2)
|
|
STE %13, %1, 0, $noreg :: (volatile store 4 into %ir.ptr2)
|
|
STE %14, %1, 0, $noreg :: (volatile store 4 into %ir.ptr2)
|
|
STE %15, %1, 0, $noreg :: (volatile store 4 into %ir.ptr2)
|
|
STE %16, %1, 0, $noreg :: (volatile store 4 into %ir.ptr2)
|
|
STE %17, %1, 0, $noreg :: (volatile store 4 into %ir.ptr2)
|
|
STE %18, %1, 0, $noreg :: (volatile store 4 into %ir.ptr2)
|
|
%19 = LDEBR %2, implicit $fpc
|
|
STD %19, %0, 0, $noreg :: (volatile store 8 into %ir.ptr1)
|
|
%20 = LDEBR %3, implicit $fpc
|
|
STD %20, %0, 0, $noreg :: (volatile store 8 into %ir.ptr1)
|
|
%21 = LDEBR %4, implicit $fpc
|
|
STD %21, %0, 0, $noreg :: (volatile store 8 into %ir.ptr1)
|
|
%22 = LDEBR %5, implicit $fpc
|
|
STD %22, %0, 0, $noreg :: (volatile store 8 into %ir.ptr1)
|
|
%23 = LDEBR %6, implicit $fpc
|
|
STD %23, %0, 0, $noreg :: (volatile store 8 into %ir.ptr1)
|
|
%24 = LDEBR %7, implicit $fpc
|
|
STD %24, %0, 0, $noreg :: (volatile store 8 into %ir.ptr1)
|
|
%25 = LDEBR %8, implicit $fpc
|
|
STD %25, %0, 0, $noreg :: (volatile store 8 into %ir.ptr1)
|
|
%26 = LDEBR %9, implicit $fpc
|
|
STD %26, %0, 0, $noreg :: (volatile store 8 into %ir.ptr1)
|
|
%27 = LDEBR %10, implicit $fpc
|
|
STD %27, %0, 0, $noreg :: (volatile store 8 into %ir.ptr1)
|
|
%28 = LDEBR %11, implicit $fpc
|
|
STD %28, %0, 0, $noreg :: (volatile store 8 into %ir.ptr1)
|
|
%29 = LDEBR %12, implicit $fpc
|
|
STD %29, %0, 0, $noreg :: (volatile store 8 into %ir.ptr1)
|
|
%30 = LDEBR %13, implicit $fpc
|
|
STD %30, %0, 0, $noreg :: (volatile store 8 into %ir.ptr1)
|
|
%31 = LDEBR %14, implicit $fpc
|
|
STD %31, %0, 0, $noreg :: (volatile store 8 into %ir.ptr1)
|
|
%32 = LDEBR %15, implicit $fpc
|
|
STD %32, %0, 0, $noreg :: (volatile store 8 into %ir.ptr1)
|
|
%33 = LDEBR %16, implicit $fpc
|
|
STD %33, %0, 0, $noreg :: (volatile store 8 into %ir.ptr1)
|
|
%34 = LDEBR %17, implicit $fpc
|
|
STD %34, %0, 0, $noreg :: (volatile store 8 into %ir.ptr1)
|
|
%35 = LDEBR %18, implicit $fpc
|
|
STD %35, %0, 0, $noreg :: (volatile store 8 into %ir.ptr1)
|
|
Return
|
|
|
|
...
|