Files
clang-p2996/llvm/test/CodeGen/RISCV/rv64i-double-softfloat.ll
Craig Topper eb44f3fc58 [RISCV] Add rv32i/rv64i command lines to some floating point tests. NFC
This improves our coverage of soft float libcalls lowering.

Remove most of the test cases from rv64i-single-softfloat.ll. They
were duplicated in the test files that now test softflow. Only
a couple test cases for constrained FP remain. Those should be
removed when we start supporting constrained FP.

This is follow up from D113528.
2021-11-11 10:56:27 -08:00

59 lines
2.1 KiB
LLVM

; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=riscv64 -verify-machineinstrs < %s \
; RUN: | FileCheck -check-prefix=RV64I %s
; RUN: llc -mtriple=riscv64 -mattr=+f -target-abi lp64f -verify-machineinstrs < %s \
; RUN: | FileCheck -check-prefix=RV64IF %s
; The test cases check that we use the si versions of the conversions from
; double.
declare i32 @llvm.experimental.constrained.fptosi.i32.f64(double, metadata)
declare i32 @llvm.experimental.constrained.fptoui.i32.f64(double, metadata)
define i32 @strict_fp64_to_ui32(double %a) nounwind strictfp {
; RV64I-LABEL: strict_fp64_to_ui32:
; RV64I: # %bb.0: # %entry
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: call __fixunsdfsi@plt
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
;
; RV64IF-LABEL: strict_fp64_to_ui32:
; RV64IF: # %bb.0: # %entry
; RV64IF-NEXT: addi sp, sp, -16
; RV64IF-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64IF-NEXT: call __fixunsdfsi@plt
; RV64IF-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64IF-NEXT: addi sp, sp, 16
; RV64IF-NEXT: ret
entry:
%conv = tail call i32 @llvm.experimental.constrained.fptoui.i32.f64(double %a, metadata !"fpexcept.strict")
ret i32 %conv
}
define i32 @strict_fp64_to_si32(double %a) nounwind strictfp {
; RV64I-LABEL: strict_fp64_to_si32:
; RV64I: # %bb.0: # %entry
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: call __fixdfsi@plt
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
;
; RV64IF-LABEL: strict_fp64_to_si32:
; RV64IF: # %bb.0: # %entry
; RV64IF-NEXT: addi sp, sp, -16
; RV64IF-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64IF-NEXT: call __fixdfsi@plt
; RV64IF-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64IF-NEXT: addi sp, sp, 16
; RV64IF-NEXT: ret
entry:
%conv = tail call i32 @llvm.experimental.constrained.fptosi.i32.f64(double %a, metadata !"fpexcept.strict")
ret i32 %conv
}