Files
clang-p2996/llvm/test/CodeGen/PowerPC/fp-strict-conv-spe.ll
Chen Zheng eb7d16ea25 [PowerPC] make expensive mflr be away from its user in the function prologue
mflr is kind of expensive on Power version smaller than 10, so we should
schedule the store for the mflr's def away from mflr.

In epilogue, the expensive mtlr has no user for its def, so it doesn't
matter that the load and the mtlr are back-to-back.

Reviewed By: RolandF

Differential Revision: https://reviews.llvm.org/D137423
2022-11-14 21:14:20 -05:00

275 lines
9.1 KiB
LLVM

; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -verify-machineinstrs -ppc-asm-full-reg-names < %s -mcpu=e500 \
; RUN: -mtriple=powerpc-unknown-linux-gnu -mattr=spe | FileCheck %s \
; RUN: -check-prefix=SPE
declare i32 @llvm.experimental.constrained.fptosi.i32.f64(double, metadata)
declare i64 @llvm.experimental.constrained.fptosi.i64.f64(double, metadata)
declare i64 @llvm.experimental.constrained.fptoui.i64.f64(double, metadata)
declare i32 @llvm.experimental.constrained.fptoui.i32.f64(double, metadata)
declare i32 @llvm.experimental.constrained.fptosi.i32.f32(float, metadata)
declare i64 @llvm.experimental.constrained.fptosi.i64.f32(float, metadata)
declare i64 @llvm.experimental.constrained.fptoui.i64.f32(float, metadata)
declare i32 @llvm.experimental.constrained.fptoui.i32.f32(float, metadata)
declare double @llvm.experimental.constrained.sitofp.f64.i32(i32, metadata, metadata)
declare double @llvm.experimental.constrained.sitofp.f64.i64(i64, metadata, metadata)
declare double @llvm.experimental.constrained.uitofp.f64.i32(i32, metadata, metadata)
declare double @llvm.experimental.constrained.uitofp.f64.i64(i64, metadata, metadata)
declare float @llvm.experimental.constrained.sitofp.f32.i64(i64, metadata, metadata)
declare float @llvm.experimental.constrained.sitofp.f32.i32(i32, metadata, metadata)
declare float @llvm.experimental.constrained.uitofp.f32.i32(i32, metadata, metadata)
declare float @llvm.experimental.constrained.uitofp.f32.i64(i64, metadata, metadata)
define i32 @d_to_i32(double %m) #0 {
; SPE-LABEL: d_to_i32:
; SPE: # %bb.0: # %entry
; SPE-NEXT: evmergelo r3, r3, r4
; SPE-NEXT: efdctsiz r3, r3
; SPE-NEXT: blr
entry:
%conv = call i32 @llvm.experimental.constrained.fptosi.i32.f64(double %m, metadata !"fpexcept.strict") #0
ret i32 %conv
}
define i64 @d_to_i64(double %m) #0 {
; SPE-LABEL: d_to_i64:
; SPE: # %bb.0: # %entry
; SPE-NEXT: mflr r0
; SPE-NEXT: stwu r1, -16(r1)
; SPE-NEXT: stw r0, 20(r1)
; SPE-NEXT: .cfi_def_cfa_offset 16
; SPE-NEXT: .cfi_offset lr, 4
; SPE-NEXT: evmergelo r4, r3, r4
; SPE-NEXT: evmergehi r3, r4, r4
; SPE-NEXT: # kill: def $r4 killed $r4 killed $s4
; SPE-NEXT: # kill: def $r3 killed $r3 killed $s3
; SPE-NEXT: bl __fixdfdi
; SPE-NEXT: lwz r0, 20(r1)
; SPE-NEXT: addi r1, r1, 16
; SPE-NEXT: mtlr r0
; SPE-NEXT: blr
entry:
%conv = call i64 @llvm.experimental.constrained.fptosi.i64.f64(double %m, metadata !"fpexcept.strict") #0
ret i64 %conv
}
define i64 @d_to_u64(double %m) #0 {
; SPE-LABEL: d_to_u64:
; SPE: # %bb.0: # %entry
; SPE-NEXT: mflr r0
; SPE-NEXT: stwu r1, -16(r1)
; SPE-NEXT: stw r0, 20(r1)
; SPE-NEXT: .cfi_def_cfa_offset 16
; SPE-NEXT: .cfi_offset lr, 4
; SPE-NEXT: evmergelo r4, r3, r4
; SPE-NEXT: evmergehi r3, r4, r4
; SPE-NEXT: # kill: def $r4 killed $r4 killed $s4
; SPE-NEXT: # kill: def $r3 killed $r3 killed $s3
; SPE-NEXT: bl __fixunsdfdi
; SPE-NEXT: lwz r0, 20(r1)
; SPE-NEXT: addi r1, r1, 16
; SPE-NEXT: mtlr r0
; SPE-NEXT: blr
entry:
%conv = call i64 @llvm.experimental.constrained.fptoui.i64.f64(double %m, metadata !"fpexcept.strict") #0
ret i64 %conv
}
define zeroext i32 @d_to_u32(double %m) #0 {
; SPE-LABEL: d_to_u32:
; SPE: # %bb.0: # %entry
; SPE-NEXT: evmergelo r3, r3, r4
; SPE-NEXT: efdctuiz r3, r3
; SPE-NEXT: blr
entry:
%conv = call i32 @llvm.experimental.constrained.fptoui.i32.f64(double %m, metadata !"fpexcept.strict") #0
ret i32 %conv
}
define signext i32 @f_to_i32(float %m) #0 {
; SPE-LABEL: f_to_i32:
; SPE: # %bb.0: # %entry
; SPE-NEXT: efsctsiz r3, r3
; SPE-NEXT: blr
entry:
%conv = call i32 @llvm.experimental.constrained.fptosi.i32.f32(float %m, metadata !"fpexcept.strict") #0
ret i32 %conv
}
define i64 @f_to_i64(float %m) #0 {
; SPE-LABEL: f_to_i64:
; SPE: # %bb.0: # %entry
; SPE-NEXT: mflr r0
; SPE-NEXT: stwu r1, -16(r1)
; SPE-NEXT: stw r0, 20(r1)
; SPE-NEXT: .cfi_def_cfa_offset 16
; SPE-NEXT: .cfi_offset lr, 4
; SPE-NEXT: bl __fixsfdi
; SPE-NEXT: lwz r0, 20(r1)
; SPE-NEXT: addi r1, r1, 16
; SPE-NEXT: mtlr r0
; SPE-NEXT: blr
entry:
%conv = call i64 @llvm.experimental.constrained.fptosi.i64.f32(float %m, metadata !"fpexcept.strict") #0
ret i64 %conv
}
define i64 @f_to_u64(float %m) #0 {
; SPE-LABEL: f_to_u64:
; SPE: # %bb.0: # %entry
; SPE-NEXT: mflr r0
; SPE-NEXT: stwu r1, -16(r1)
; SPE-NEXT: stw r0, 20(r1)
; SPE-NEXT: .cfi_def_cfa_offset 16
; SPE-NEXT: .cfi_offset lr, 4
; SPE-NEXT: bl __fixunssfdi
; SPE-NEXT: lwz r0, 20(r1)
; SPE-NEXT: addi r1, r1, 16
; SPE-NEXT: mtlr r0
; SPE-NEXT: blr
entry:
%conv = call i64 @llvm.experimental.constrained.fptoui.i64.f32(float %m, metadata !"fpexcept.strict") #0
ret i64 %conv
}
define zeroext i32 @f_to_u32(float %m) #0 {
; SPE-LABEL: f_to_u32:
; SPE: # %bb.0: # %entry
; SPE-NEXT: efsctuiz r3, r3
; SPE-NEXT: blr
entry:
%conv = call i32 @llvm.experimental.constrained.fptoui.i32.f32(float %m, metadata !"fpexcept.strict") #0
ret i32 %conv
}
define double @i32_to_d(i32 signext %m) #0 {
; SPE-LABEL: i32_to_d:
; SPE: # %bb.0: # %entry
; SPE-NEXT: efdcfsi r4, r3
; SPE-NEXT: evmergehi r3, r4, r4
; SPE-NEXT: # kill: def $r4 killed $r4 killed $s4
; SPE-NEXT: # kill: def $r3 killed $r3 killed $s3
; SPE-NEXT: blr
entry:
%conv = tail call double @llvm.experimental.constrained.sitofp.f64.i32(i32 %m, metadata !"round.dynamic", metadata !"fpexcept.strict") #0
ret double %conv
}
define double @i64_to_d(i64 %m) #0 {
; SPE-LABEL: i64_to_d:
; SPE: # %bb.0: # %entry
; SPE-NEXT: mflr r0
; SPE-NEXT: stwu r1, -16(r1)
; SPE-NEXT: stw r0, 20(r1)
; SPE-NEXT: .cfi_def_cfa_offset 16
; SPE-NEXT: .cfi_offset lr, 4
; SPE-NEXT: bl __floatdidf
; SPE-NEXT: evmergelo r4, r3, r4
; SPE-NEXT: evmergehi r3, r4, r4
; SPE-NEXT: lwz r0, 20(r1)
; SPE-NEXT: # kill: def $r3 killed $r3 killed $s3
; SPE-NEXT: # kill: def $r4 killed $r4 killed $s4
; SPE-NEXT: addi r1, r1, 16
; SPE-NEXT: mtlr r0
; SPE-NEXT: blr
entry:
%conv = tail call double @llvm.experimental.constrained.sitofp.f64.i64(i64 %m, metadata !"round.dynamic", metadata !"fpexcept.strict") #0
ret double %conv
}
define double @u32_to_d(i32 zeroext %m) #0 {
; SPE-LABEL: u32_to_d:
; SPE: # %bb.0: # %entry
; SPE-NEXT: efdcfui r4, r3
; SPE-NEXT: evmergehi r3, r4, r4
; SPE-NEXT: # kill: def $r4 killed $r4 killed $s4
; SPE-NEXT: # kill: def $r3 killed $r3 killed $s3
; SPE-NEXT: blr
entry:
%conv = tail call double @llvm.experimental.constrained.uitofp.f64.i32(i32 %m, metadata !"round.dynamic", metadata !"fpexcept.strict") #0
ret double %conv
}
define double @u64_to_d(i64 %m) #0 {
; SPE-LABEL: u64_to_d:
; SPE: # %bb.0: # %entry
; SPE-NEXT: mflr r0
; SPE-NEXT: stwu r1, -16(r1)
; SPE-NEXT: stw r0, 20(r1)
; SPE-NEXT: .cfi_def_cfa_offset 16
; SPE-NEXT: .cfi_offset lr, 4
; SPE-NEXT: bl __floatundidf
; SPE-NEXT: evmergelo r4, r3, r4
; SPE-NEXT: evmergehi r3, r4, r4
; SPE-NEXT: lwz r0, 20(r1)
; SPE-NEXT: # kill: def $r3 killed $r3 killed $s3
; SPE-NEXT: # kill: def $r4 killed $r4 killed $s4
; SPE-NEXT: addi r1, r1, 16
; SPE-NEXT: mtlr r0
; SPE-NEXT: blr
entry:
%conv = tail call double @llvm.experimental.constrained.uitofp.f64.i64(i64 %m, metadata !"round.dynamic", metadata !"fpexcept.strict") #0
ret double %conv
}
define float @i32_to_f(i32 signext %m) #0 {
; SPE-LABEL: i32_to_f:
; SPE: # %bb.0: # %entry
; SPE-NEXT: efscfsi r3, r3
; SPE-NEXT: blr
entry:
%conv = tail call float @llvm.experimental.constrained.sitofp.f32.i32(i32 %m, metadata !"round.dynamic", metadata !"fpexcept.strict") #0
ret float %conv
}
define float @i64_to_f(i64 %m) #0 {
; SPE-LABEL: i64_to_f:
; SPE: # %bb.0: # %entry
; SPE-NEXT: mflr r0
; SPE-NEXT: stwu r1, -16(r1)
; SPE-NEXT: stw r0, 20(r1)
; SPE-NEXT: .cfi_def_cfa_offset 16
; SPE-NEXT: .cfi_offset lr, 4
; SPE-NEXT: bl __floatdisf
; SPE-NEXT: lwz r0, 20(r1)
; SPE-NEXT: addi r1, r1, 16
; SPE-NEXT: mtlr r0
; SPE-NEXT: blr
entry:
%conv = tail call float @llvm.experimental.constrained.sitofp.f32.i64(i64 %m, metadata !"round.dynamic", metadata !"fpexcept.strict") #0
ret float %conv
}
define float @u32_to_f(i32 zeroext %m) #0 {
; SPE-LABEL: u32_to_f:
; SPE: # %bb.0: # %entry
; SPE-NEXT: efscfui r3, r3
; SPE-NEXT: blr
entry:
%conv = tail call float @llvm.experimental.constrained.uitofp.f32.i32(i32 %m, metadata !"round.dynamic", metadata !"fpexcept.strict") #0
ret float %conv
}
define float @u64_to_f(i64 %m) #0 {
; SPE-LABEL: u64_to_f:
; SPE: # %bb.0: # %entry
; SPE-NEXT: mflr r0
; SPE-NEXT: stwu r1, -16(r1)
; SPE-NEXT: stw r0, 20(r1)
; SPE-NEXT: .cfi_def_cfa_offset 16
; SPE-NEXT: .cfi_offset lr, 4
; SPE-NEXT: bl __floatundisf
; SPE-NEXT: lwz r0, 20(r1)
; SPE-NEXT: addi r1, r1, 16
; SPE-NEXT: mtlr r0
; SPE-NEXT: blr
entry:
%conv = tail call float @llvm.experimental.constrained.uitofp.f32.i64(i64 %m, metadata !"round.dynamic", metadata !"fpexcept.strict") #0
ret float %conv
}
attributes #0 = { strictfp }