Files
clang-p2996/llvm/test/CodeGen/RISCV/float-round-conv.ll
Craig Topper 7b0c41841e [RISCV] Move compressible registers to the beginning of the FP allocation order.
We don't have very many compressible FP instructions, just load and store.
These instruction require the FP register to be f8-f15.

This patch changes the FP allocation order to prioritize f10-f15 first.
These are also the FP argument registers. So I allocated them in reverse
order starting at f15 to avoid taking the first argument registers.
This appears to match gcc allocation order.

Reviewed By: asb

Differential Revision: https://reviews.llvm.org/D146488
2023-03-27 17:29:28 -07:00

1037 lines
30 KiB
LLVM

; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=riscv32 -mattr=+f -verify-machineinstrs < %s \
; RUN: -target-abi=ilp32f | FileCheck -check-prefix=RV32IF %s
; RUN: llc -mtriple=riscv64 -mattr=+f -verify-machineinstrs < %s \
; RUN: -target-abi=lp64f | FileCheck -check-prefix=RV64IF %s
define signext i8 @test_floor_si8(float %x) {
; RV32IF-LABEL: test_floor_si8:
; RV32IF: # %bb.0:
; RV32IF-NEXT: fcvt.w.s a0, fa0, rdn
; RV32IF-NEXT: ret
;
; RV64IF-LABEL: test_floor_si8:
; RV64IF: # %bb.0:
; RV64IF-NEXT: fcvt.l.s a0, fa0, rdn
; RV64IF-NEXT: ret
%a = call float @llvm.floor.f32(float %x)
%b = fptosi float %a to i8
ret i8 %b
}
define signext i16 @test_floor_si16(float %x) {
; RV32IF-LABEL: test_floor_si16:
; RV32IF: # %bb.0:
; RV32IF-NEXT: fcvt.w.s a0, fa0, rdn
; RV32IF-NEXT: ret
;
; RV64IF-LABEL: test_floor_si16:
; RV64IF: # %bb.0:
; RV64IF-NEXT: fcvt.l.s a0, fa0, rdn
; RV64IF-NEXT: ret
%a = call float @llvm.floor.f32(float %x)
%b = fptosi float %a to i16
ret i16 %b
}
define signext i32 @test_floor_si32(float %x) {
; RV32IF-LABEL: test_floor_si32:
; RV32IF: # %bb.0:
; RV32IF-NEXT: fcvt.w.s a0, fa0, rdn
; RV32IF-NEXT: ret
;
; RV64IF-LABEL: test_floor_si32:
; RV64IF: # %bb.0:
; RV64IF-NEXT: fcvt.w.s a0, fa0, rdn
; RV64IF-NEXT: ret
%a = call float @llvm.floor.f32(float %x)
%b = fptosi float %a to i32
ret i32 %b
}
define i64 @test_floor_si64(float %x) {
; RV32IF-LABEL: test_floor_si64:
; RV32IF: # %bb.0:
; RV32IF-NEXT: lui a0, 307200
; RV32IF-NEXT: fmv.w.x fa5, a0
; RV32IF-NEXT: fabs.s fa4, fa0
; RV32IF-NEXT: flt.s a0, fa4, fa5
; RV32IF-NEXT: beqz a0, .LBB3_2
; RV32IF-NEXT: # %bb.1:
; RV32IF-NEXT: fcvt.w.s a0, fa0, rdn
; RV32IF-NEXT: fcvt.s.w fa5, a0, rdn
; RV32IF-NEXT: fsgnj.s fa0, fa5, fa0
; RV32IF-NEXT: .LBB3_2:
; RV32IF-NEXT: addi sp, sp, -16
; RV32IF-NEXT: .cfi_def_cfa_offset 16
; RV32IF-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IF-NEXT: .cfi_offset ra, -4
; RV32IF-NEXT: call __fixsfdi@plt
; RV32IF-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IF-NEXT: addi sp, sp, 16
; RV32IF-NEXT: ret
;
; RV64IF-LABEL: test_floor_si64:
; RV64IF: # %bb.0:
; RV64IF-NEXT: fcvt.l.s a0, fa0, rdn
; RV64IF-NEXT: ret
%a = call float @llvm.floor.f32(float %x)
%b = fptosi float %a to i64
ret i64 %b
}
define zeroext i8 @test_floor_ui8(float %x) {
; RV32IF-LABEL: test_floor_ui8:
; RV32IF: # %bb.0:
; RV32IF-NEXT: fcvt.wu.s a0, fa0, rdn
; RV32IF-NEXT: ret
;
; RV64IF-LABEL: test_floor_ui8:
; RV64IF: # %bb.0:
; RV64IF-NEXT: fcvt.lu.s a0, fa0, rdn
; RV64IF-NEXT: ret
%a = call float @llvm.floor.f32(float %x)
%b = fptoui float %a to i8
ret i8 %b
}
define zeroext i16 @test_floor_ui16(float %x) {
; RV32IF-LABEL: test_floor_ui16:
; RV32IF: # %bb.0:
; RV32IF-NEXT: fcvt.wu.s a0, fa0, rdn
; RV32IF-NEXT: ret
;
; RV64IF-LABEL: test_floor_ui16:
; RV64IF: # %bb.0:
; RV64IF-NEXT: fcvt.lu.s a0, fa0, rdn
; RV64IF-NEXT: ret
%a = call float @llvm.floor.f32(float %x)
%b = fptoui float %a to i16
ret i16 %b
}
define signext i32 @test_floor_ui32(float %x) {
; RV32IF-LABEL: test_floor_ui32:
; RV32IF: # %bb.0:
; RV32IF-NEXT: fcvt.wu.s a0, fa0, rdn
; RV32IF-NEXT: ret
;
; RV64IF-LABEL: test_floor_ui32:
; RV64IF: # %bb.0:
; RV64IF-NEXT: fcvt.wu.s a0, fa0, rdn
; RV64IF-NEXT: ret
%a = call float @llvm.floor.f32(float %x)
%b = fptoui float %a to i32
ret i32 %b
}
define i64 @test_floor_ui64(float %x) {
; RV32IF-LABEL: test_floor_ui64:
; RV32IF: # %bb.0:
; RV32IF-NEXT: lui a0, 307200
; RV32IF-NEXT: fmv.w.x fa5, a0
; RV32IF-NEXT: fabs.s fa4, fa0
; RV32IF-NEXT: flt.s a0, fa4, fa5
; RV32IF-NEXT: beqz a0, .LBB7_2
; RV32IF-NEXT: # %bb.1:
; RV32IF-NEXT: fcvt.w.s a0, fa0, rdn
; RV32IF-NEXT: fcvt.s.w fa5, a0, rdn
; RV32IF-NEXT: fsgnj.s fa0, fa5, fa0
; RV32IF-NEXT: .LBB7_2:
; RV32IF-NEXT: addi sp, sp, -16
; RV32IF-NEXT: .cfi_def_cfa_offset 16
; RV32IF-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IF-NEXT: .cfi_offset ra, -4
; RV32IF-NEXT: call __fixunssfdi@plt
; RV32IF-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IF-NEXT: addi sp, sp, 16
; RV32IF-NEXT: ret
;
; RV64IF-LABEL: test_floor_ui64:
; RV64IF: # %bb.0:
; RV64IF-NEXT: fcvt.lu.s a0, fa0, rdn
; RV64IF-NEXT: ret
%a = call float @llvm.floor.f32(float %x)
%b = fptoui float %a to i64
ret i64 %b
}
define signext i8 @test_ceil_si8(float %x) {
; RV32IF-LABEL: test_ceil_si8:
; RV32IF: # %bb.0:
; RV32IF-NEXT: fcvt.w.s a0, fa0, rup
; RV32IF-NEXT: ret
;
; RV64IF-LABEL: test_ceil_si8:
; RV64IF: # %bb.0:
; RV64IF-NEXT: fcvt.l.s a0, fa0, rup
; RV64IF-NEXT: ret
%a = call float @llvm.ceil.f32(float %x)
%b = fptosi float %a to i8
ret i8 %b
}
define signext i16 @test_ceil_si16(float %x) {
; RV32IF-LABEL: test_ceil_si16:
; RV32IF: # %bb.0:
; RV32IF-NEXT: fcvt.w.s a0, fa0, rup
; RV32IF-NEXT: ret
;
; RV64IF-LABEL: test_ceil_si16:
; RV64IF: # %bb.0:
; RV64IF-NEXT: fcvt.l.s a0, fa0, rup
; RV64IF-NEXT: ret
%a = call float @llvm.ceil.f32(float %x)
%b = fptosi float %a to i16
ret i16 %b
}
define signext i32 @test_ceil_si32(float %x) {
; RV32IF-LABEL: test_ceil_si32:
; RV32IF: # %bb.0:
; RV32IF-NEXT: fcvt.w.s a0, fa0, rup
; RV32IF-NEXT: ret
;
; RV64IF-LABEL: test_ceil_si32:
; RV64IF: # %bb.0:
; RV64IF-NEXT: fcvt.w.s a0, fa0, rup
; RV64IF-NEXT: ret
%a = call float @llvm.ceil.f32(float %x)
%b = fptosi float %a to i32
ret i32 %b
}
define i64 @test_ceil_si64(float %x) {
; RV32IF-LABEL: test_ceil_si64:
; RV32IF: # %bb.0:
; RV32IF-NEXT: lui a0, 307200
; RV32IF-NEXT: fmv.w.x fa5, a0
; RV32IF-NEXT: fabs.s fa4, fa0
; RV32IF-NEXT: flt.s a0, fa4, fa5
; RV32IF-NEXT: beqz a0, .LBB11_2
; RV32IF-NEXT: # %bb.1:
; RV32IF-NEXT: fcvt.w.s a0, fa0, rup
; RV32IF-NEXT: fcvt.s.w fa5, a0, rup
; RV32IF-NEXT: fsgnj.s fa0, fa5, fa0
; RV32IF-NEXT: .LBB11_2:
; RV32IF-NEXT: addi sp, sp, -16
; RV32IF-NEXT: .cfi_def_cfa_offset 16
; RV32IF-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IF-NEXT: .cfi_offset ra, -4
; RV32IF-NEXT: call __fixsfdi@plt
; RV32IF-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IF-NEXT: addi sp, sp, 16
; RV32IF-NEXT: ret
;
; RV64IF-LABEL: test_ceil_si64:
; RV64IF: # %bb.0:
; RV64IF-NEXT: fcvt.l.s a0, fa0, rup
; RV64IF-NEXT: ret
%a = call float @llvm.ceil.f32(float %x)
%b = fptosi float %a to i64
ret i64 %b
}
define zeroext i8 @test_ceil_ui8(float %x) {
; RV32IF-LABEL: test_ceil_ui8:
; RV32IF: # %bb.0:
; RV32IF-NEXT: fcvt.wu.s a0, fa0, rup
; RV32IF-NEXT: ret
;
; RV64IF-LABEL: test_ceil_ui8:
; RV64IF: # %bb.0:
; RV64IF-NEXT: fcvt.lu.s a0, fa0, rup
; RV64IF-NEXT: ret
%a = call float @llvm.ceil.f32(float %x)
%b = fptoui float %a to i8
ret i8 %b
}
define zeroext i16 @test_ceil_ui16(float %x) {
; RV32IF-LABEL: test_ceil_ui16:
; RV32IF: # %bb.0:
; RV32IF-NEXT: fcvt.wu.s a0, fa0, rup
; RV32IF-NEXT: ret
;
; RV64IF-LABEL: test_ceil_ui16:
; RV64IF: # %bb.0:
; RV64IF-NEXT: fcvt.lu.s a0, fa0, rup
; RV64IF-NEXT: ret
%a = call float @llvm.ceil.f32(float %x)
%b = fptoui float %a to i16
ret i16 %b
}
define signext i32 @test_ceil_ui32(float %x) {
; RV32IF-LABEL: test_ceil_ui32:
; RV32IF: # %bb.0:
; RV32IF-NEXT: fcvt.wu.s a0, fa0, rup
; RV32IF-NEXT: ret
;
; RV64IF-LABEL: test_ceil_ui32:
; RV64IF: # %bb.0:
; RV64IF-NEXT: fcvt.wu.s a0, fa0, rup
; RV64IF-NEXT: ret
%a = call float @llvm.ceil.f32(float %x)
%b = fptoui float %a to i32
ret i32 %b
}
define i64 @test_ceil_ui64(float %x) {
; RV32IF-LABEL: test_ceil_ui64:
; RV32IF: # %bb.0:
; RV32IF-NEXT: lui a0, 307200
; RV32IF-NEXT: fmv.w.x fa5, a0
; RV32IF-NEXT: fabs.s fa4, fa0
; RV32IF-NEXT: flt.s a0, fa4, fa5
; RV32IF-NEXT: beqz a0, .LBB15_2
; RV32IF-NEXT: # %bb.1:
; RV32IF-NEXT: fcvt.w.s a0, fa0, rup
; RV32IF-NEXT: fcvt.s.w fa5, a0, rup
; RV32IF-NEXT: fsgnj.s fa0, fa5, fa0
; RV32IF-NEXT: .LBB15_2:
; RV32IF-NEXT: addi sp, sp, -16
; RV32IF-NEXT: .cfi_def_cfa_offset 16
; RV32IF-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IF-NEXT: .cfi_offset ra, -4
; RV32IF-NEXT: call __fixunssfdi@plt
; RV32IF-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IF-NEXT: addi sp, sp, 16
; RV32IF-NEXT: ret
;
; RV64IF-LABEL: test_ceil_ui64:
; RV64IF: # %bb.0:
; RV64IF-NEXT: fcvt.lu.s a0, fa0, rup
; RV64IF-NEXT: ret
%a = call float @llvm.ceil.f32(float %x)
%b = fptoui float %a to i64
ret i64 %b
}
define signext i8 @test_trunc_si8(float %x) {
; RV32IF-LABEL: test_trunc_si8:
; RV32IF: # %bb.0:
; RV32IF-NEXT: fcvt.w.s a0, fa0, rtz
; RV32IF-NEXT: ret
;
; RV64IF-LABEL: test_trunc_si8:
; RV64IF: # %bb.0:
; RV64IF-NEXT: fcvt.l.s a0, fa0, rtz
; RV64IF-NEXT: ret
%a = call float @llvm.trunc.f32(float %x)
%b = fptosi float %a to i8
ret i8 %b
}
define signext i16 @test_trunc_si16(float %x) {
; RV32IF-LABEL: test_trunc_si16:
; RV32IF: # %bb.0:
; RV32IF-NEXT: fcvt.w.s a0, fa0, rtz
; RV32IF-NEXT: ret
;
; RV64IF-LABEL: test_trunc_si16:
; RV64IF: # %bb.0:
; RV64IF-NEXT: fcvt.l.s a0, fa0, rtz
; RV64IF-NEXT: ret
%a = call float @llvm.trunc.f32(float %x)
%b = fptosi float %a to i16
ret i16 %b
}
define signext i32 @test_trunc_si32(float %x) {
; RV32IF-LABEL: test_trunc_si32:
; RV32IF: # %bb.0:
; RV32IF-NEXT: fcvt.w.s a0, fa0, rtz
; RV32IF-NEXT: ret
;
; RV64IF-LABEL: test_trunc_si32:
; RV64IF: # %bb.0:
; RV64IF-NEXT: fcvt.w.s a0, fa0, rtz
; RV64IF-NEXT: ret
%a = call float @llvm.trunc.f32(float %x)
%b = fptosi float %a to i32
ret i32 %b
}
define i64 @test_trunc_si64(float %x) {
; RV32IF-LABEL: test_trunc_si64:
; RV32IF: # %bb.0:
; RV32IF-NEXT: lui a0, 307200
; RV32IF-NEXT: fmv.w.x fa5, a0
; RV32IF-NEXT: fabs.s fa4, fa0
; RV32IF-NEXT: flt.s a0, fa4, fa5
; RV32IF-NEXT: beqz a0, .LBB19_2
; RV32IF-NEXT: # %bb.1:
; RV32IF-NEXT: fcvt.w.s a0, fa0, rtz
; RV32IF-NEXT: fcvt.s.w fa5, a0, rtz
; RV32IF-NEXT: fsgnj.s fa0, fa5, fa0
; RV32IF-NEXT: .LBB19_2:
; RV32IF-NEXT: addi sp, sp, -16
; RV32IF-NEXT: .cfi_def_cfa_offset 16
; RV32IF-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IF-NEXT: .cfi_offset ra, -4
; RV32IF-NEXT: call __fixsfdi@plt
; RV32IF-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IF-NEXT: addi sp, sp, 16
; RV32IF-NEXT: ret
;
; RV64IF-LABEL: test_trunc_si64:
; RV64IF: # %bb.0:
; RV64IF-NEXT: fcvt.l.s a0, fa0, rtz
; RV64IF-NEXT: ret
%a = call float @llvm.trunc.f32(float %x)
%b = fptosi float %a to i64
ret i64 %b
}
define zeroext i8 @test_trunc_ui8(float %x) {
; RV32IF-LABEL: test_trunc_ui8:
; RV32IF: # %bb.0:
; RV32IF-NEXT: fcvt.wu.s a0, fa0, rtz
; RV32IF-NEXT: ret
;
; RV64IF-LABEL: test_trunc_ui8:
; RV64IF: # %bb.0:
; RV64IF-NEXT: fcvt.lu.s a0, fa0, rtz
; RV64IF-NEXT: ret
%a = call float @llvm.trunc.f32(float %x)
%b = fptoui float %a to i8
ret i8 %b
}
define zeroext i16 @test_trunc_ui16(float %x) {
; RV32IF-LABEL: test_trunc_ui16:
; RV32IF: # %bb.0:
; RV32IF-NEXT: fcvt.wu.s a0, fa0, rtz
; RV32IF-NEXT: ret
;
; RV64IF-LABEL: test_trunc_ui16:
; RV64IF: # %bb.0:
; RV64IF-NEXT: fcvt.lu.s a0, fa0, rtz
; RV64IF-NEXT: ret
%a = call float @llvm.trunc.f32(float %x)
%b = fptoui float %a to i16
ret i16 %b
}
define signext i32 @test_trunc_ui32(float %x) {
; RV32IF-LABEL: test_trunc_ui32:
; RV32IF: # %bb.0:
; RV32IF-NEXT: fcvt.wu.s a0, fa0, rtz
; RV32IF-NEXT: ret
;
; RV64IF-LABEL: test_trunc_ui32:
; RV64IF: # %bb.0:
; RV64IF-NEXT: fcvt.wu.s a0, fa0, rtz
; RV64IF-NEXT: ret
%a = call float @llvm.trunc.f32(float %x)
%b = fptoui float %a to i32
ret i32 %b
}
define i64 @test_trunc_ui64(float %x) {
; RV32IF-LABEL: test_trunc_ui64:
; RV32IF: # %bb.0:
; RV32IF-NEXT: lui a0, 307200
; RV32IF-NEXT: fmv.w.x fa5, a0
; RV32IF-NEXT: fabs.s fa4, fa0
; RV32IF-NEXT: flt.s a0, fa4, fa5
; RV32IF-NEXT: beqz a0, .LBB23_2
; RV32IF-NEXT: # %bb.1:
; RV32IF-NEXT: fcvt.w.s a0, fa0, rtz
; RV32IF-NEXT: fcvt.s.w fa5, a0, rtz
; RV32IF-NEXT: fsgnj.s fa0, fa5, fa0
; RV32IF-NEXT: .LBB23_2:
; RV32IF-NEXT: addi sp, sp, -16
; RV32IF-NEXT: .cfi_def_cfa_offset 16
; RV32IF-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IF-NEXT: .cfi_offset ra, -4
; RV32IF-NEXT: call __fixunssfdi@plt
; RV32IF-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IF-NEXT: addi sp, sp, 16
; RV32IF-NEXT: ret
;
; RV64IF-LABEL: test_trunc_ui64:
; RV64IF: # %bb.0:
; RV64IF-NEXT: fcvt.lu.s a0, fa0, rtz
; RV64IF-NEXT: ret
%a = call float @llvm.trunc.f32(float %x)
%b = fptoui float %a to i64
ret i64 %b
}
define signext i8 @test_round_si8(float %x) {
; RV32IF-LABEL: test_round_si8:
; RV32IF: # %bb.0:
; RV32IF-NEXT: fcvt.w.s a0, fa0, rmm
; RV32IF-NEXT: ret
;
; RV64IF-LABEL: test_round_si8:
; RV64IF: # %bb.0:
; RV64IF-NEXT: fcvt.l.s a0, fa0, rmm
; RV64IF-NEXT: ret
%a = call float @llvm.round.f32(float %x)
%b = fptosi float %a to i8
ret i8 %b
}
define signext i16 @test_round_si16(float %x) {
; RV32IF-LABEL: test_round_si16:
; RV32IF: # %bb.0:
; RV32IF-NEXT: fcvt.w.s a0, fa0, rmm
; RV32IF-NEXT: ret
;
; RV64IF-LABEL: test_round_si16:
; RV64IF: # %bb.0:
; RV64IF-NEXT: fcvt.l.s a0, fa0, rmm
; RV64IF-NEXT: ret
%a = call float @llvm.round.f32(float %x)
%b = fptosi float %a to i16
ret i16 %b
}
define signext i32 @test_round_si32(float %x) {
; RV32IF-LABEL: test_round_si32:
; RV32IF: # %bb.0:
; RV32IF-NEXT: fcvt.w.s a0, fa0, rmm
; RV32IF-NEXT: ret
;
; RV64IF-LABEL: test_round_si32:
; RV64IF: # %bb.0:
; RV64IF-NEXT: fcvt.w.s a0, fa0, rmm
; RV64IF-NEXT: ret
%a = call float @llvm.round.f32(float %x)
%b = fptosi float %a to i32
ret i32 %b
}
define i64 @test_round_si64(float %x) {
; RV32IF-LABEL: test_round_si64:
; RV32IF: # %bb.0:
; RV32IF-NEXT: lui a0, 307200
; RV32IF-NEXT: fmv.w.x fa5, a0
; RV32IF-NEXT: fabs.s fa4, fa0
; RV32IF-NEXT: flt.s a0, fa4, fa5
; RV32IF-NEXT: beqz a0, .LBB27_2
; RV32IF-NEXT: # %bb.1:
; RV32IF-NEXT: fcvt.w.s a0, fa0, rmm
; RV32IF-NEXT: fcvt.s.w fa5, a0, rmm
; RV32IF-NEXT: fsgnj.s fa0, fa5, fa0
; RV32IF-NEXT: .LBB27_2:
; RV32IF-NEXT: addi sp, sp, -16
; RV32IF-NEXT: .cfi_def_cfa_offset 16
; RV32IF-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IF-NEXT: .cfi_offset ra, -4
; RV32IF-NEXT: call __fixsfdi@plt
; RV32IF-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IF-NEXT: addi sp, sp, 16
; RV32IF-NEXT: ret
;
; RV64IF-LABEL: test_round_si64:
; RV64IF: # %bb.0:
; RV64IF-NEXT: fcvt.l.s a0, fa0, rmm
; RV64IF-NEXT: ret
%a = call float @llvm.round.f32(float %x)
%b = fptosi float %a to i64
ret i64 %b
}
define zeroext i8 @test_round_ui8(float %x) {
; RV32IF-LABEL: test_round_ui8:
; RV32IF: # %bb.0:
; RV32IF-NEXT: fcvt.wu.s a0, fa0, rmm
; RV32IF-NEXT: ret
;
; RV64IF-LABEL: test_round_ui8:
; RV64IF: # %bb.0:
; RV64IF-NEXT: fcvt.lu.s a0, fa0, rmm
; RV64IF-NEXT: ret
%a = call float @llvm.round.f32(float %x)
%b = fptoui float %a to i8
ret i8 %b
}
define zeroext i16 @test_round_ui16(float %x) {
; RV32IF-LABEL: test_round_ui16:
; RV32IF: # %bb.0:
; RV32IF-NEXT: fcvt.wu.s a0, fa0, rmm
; RV32IF-NEXT: ret
;
; RV64IF-LABEL: test_round_ui16:
; RV64IF: # %bb.0:
; RV64IF-NEXT: fcvt.lu.s a0, fa0, rmm
; RV64IF-NEXT: ret
%a = call float @llvm.round.f32(float %x)
%b = fptoui float %a to i16
ret i16 %b
}
define signext i32 @test_round_ui32(float %x) {
; RV32IF-LABEL: test_round_ui32:
; RV32IF: # %bb.0:
; RV32IF-NEXT: fcvt.wu.s a0, fa0, rmm
; RV32IF-NEXT: ret
;
; RV64IF-LABEL: test_round_ui32:
; RV64IF: # %bb.0:
; RV64IF-NEXT: fcvt.wu.s a0, fa0, rmm
; RV64IF-NEXT: ret
%a = call float @llvm.round.f32(float %x)
%b = fptoui float %a to i32
ret i32 %b
}
define i64 @test_round_ui64(float %x) {
; RV32IF-LABEL: test_round_ui64:
; RV32IF: # %bb.0:
; RV32IF-NEXT: lui a0, 307200
; RV32IF-NEXT: fmv.w.x fa5, a0
; RV32IF-NEXT: fabs.s fa4, fa0
; RV32IF-NEXT: flt.s a0, fa4, fa5
; RV32IF-NEXT: beqz a0, .LBB31_2
; RV32IF-NEXT: # %bb.1:
; RV32IF-NEXT: fcvt.w.s a0, fa0, rmm
; RV32IF-NEXT: fcvt.s.w fa5, a0, rmm
; RV32IF-NEXT: fsgnj.s fa0, fa5, fa0
; RV32IF-NEXT: .LBB31_2:
; RV32IF-NEXT: addi sp, sp, -16
; RV32IF-NEXT: .cfi_def_cfa_offset 16
; RV32IF-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IF-NEXT: .cfi_offset ra, -4
; RV32IF-NEXT: call __fixunssfdi@plt
; RV32IF-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IF-NEXT: addi sp, sp, 16
; RV32IF-NEXT: ret
;
; RV64IF-LABEL: test_round_ui64:
; RV64IF: # %bb.0:
; RV64IF-NEXT: fcvt.lu.s a0, fa0, rmm
; RV64IF-NEXT: ret
%a = call float @llvm.round.f32(float %x)
%b = fptoui float %a to i64
ret i64 %b
}
define signext i8 @test_roundeven_si8(float %x) {
; RV32IF-LABEL: test_roundeven_si8:
; RV32IF: # %bb.0:
; RV32IF-NEXT: fcvt.w.s a0, fa0, rne
; RV32IF-NEXT: ret
;
; RV64IF-LABEL: test_roundeven_si8:
; RV64IF: # %bb.0:
; RV64IF-NEXT: fcvt.l.s a0, fa0, rne
; RV64IF-NEXT: ret
%a = call float @llvm.roundeven.f32(float %x)
%b = fptosi float %a to i8
ret i8 %b
}
define signext i16 @test_roundeven_si16(float %x) {
; RV32IF-LABEL: test_roundeven_si16:
; RV32IF: # %bb.0:
; RV32IF-NEXT: fcvt.w.s a0, fa0, rne
; RV32IF-NEXT: ret
;
; RV64IF-LABEL: test_roundeven_si16:
; RV64IF: # %bb.0:
; RV64IF-NEXT: fcvt.l.s a0, fa0, rne
; RV64IF-NEXT: ret
%a = call float @llvm.roundeven.f32(float %x)
%b = fptosi float %a to i16
ret i16 %b
}
define signext i32 @test_roundeven_si32(float %x) {
; RV32IF-LABEL: test_roundeven_si32:
; RV32IF: # %bb.0:
; RV32IF-NEXT: fcvt.w.s a0, fa0, rne
; RV32IF-NEXT: ret
;
; RV64IF-LABEL: test_roundeven_si32:
; RV64IF: # %bb.0:
; RV64IF-NEXT: fcvt.w.s a0, fa0, rne
; RV64IF-NEXT: ret
%a = call float @llvm.roundeven.f32(float %x)
%b = fptosi float %a to i32
ret i32 %b
}
define i64 @test_roundeven_si64(float %x) {
; RV32IF-LABEL: test_roundeven_si64:
; RV32IF: # %bb.0:
; RV32IF-NEXT: lui a0, 307200
; RV32IF-NEXT: fmv.w.x fa5, a0
; RV32IF-NEXT: fabs.s fa4, fa0
; RV32IF-NEXT: flt.s a0, fa4, fa5
; RV32IF-NEXT: beqz a0, .LBB35_2
; RV32IF-NEXT: # %bb.1:
; RV32IF-NEXT: fcvt.w.s a0, fa0, rne
; RV32IF-NEXT: fcvt.s.w fa5, a0, rne
; RV32IF-NEXT: fsgnj.s fa0, fa5, fa0
; RV32IF-NEXT: .LBB35_2:
; RV32IF-NEXT: addi sp, sp, -16
; RV32IF-NEXT: .cfi_def_cfa_offset 16
; RV32IF-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IF-NEXT: .cfi_offset ra, -4
; RV32IF-NEXT: call __fixsfdi@plt
; RV32IF-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IF-NEXT: addi sp, sp, 16
; RV32IF-NEXT: ret
;
; RV64IF-LABEL: test_roundeven_si64:
; RV64IF: # %bb.0:
; RV64IF-NEXT: fcvt.l.s a0, fa0, rne
; RV64IF-NEXT: ret
%a = call float @llvm.roundeven.f32(float %x)
%b = fptosi float %a to i64
ret i64 %b
}
define zeroext i8 @test_roundeven_ui8(float %x) {
; RV32IF-LABEL: test_roundeven_ui8:
; RV32IF: # %bb.0:
; RV32IF-NEXT: fcvt.wu.s a0, fa0, rne
; RV32IF-NEXT: ret
;
; RV64IF-LABEL: test_roundeven_ui8:
; RV64IF: # %bb.0:
; RV64IF-NEXT: fcvt.lu.s a0, fa0, rne
; RV64IF-NEXT: ret
%a = call float @llvm.roundeven.f32(float %x)
%b = fptoui float %a to i8
ret i8 %b
}
define zeroext i16 @test_roundeven_ui16(float %x) {
; RV32IF-LABEL: test_roundeven_ui16:
; RV32IF: # %bb.0:
; RV32IF-NEXT: fcvt.wu.s a0, fa0, rne
; RV32IF-NEXT: ret
;
; RV64IF-LABEL: test_roundeven_ui16:
; RV64IF: # %bb.0:
; RV64IF-NEXT: fcvt.lu.s a0, fa0, rne
; RV64IF-NEXT: ret
%a = call float @llvm.roundeven.f32(float %x)
%b = fptoui float %a to i16
ret i16 %b
}
define signext i32 @test_roundeven_ui32(float %x) {
; RV32IF-LABEL: test_roundeven_ui32:
; RV32IF: # %bb.0:
; RV32IF-NEXT: fcvt.wu.s a0, fa0, rne
; RV32IF-NEXT: ret
;
; RV64IF-LABEL: test_roundeven_ui32:
; RV64IF: # %bb.0:
; RV64IF-NEXT: fcvt.wu.s a0, fa0, rne
; RV64IF-NEXT: ret
%a = call float @llvm.roundeven.f32(float %x)
%b = fptoui float %a to i32
ret i32 %b
}
define i64 @test_roundeven_ui64(float %x) {
; RV32IF-LABEL: test_roundeven_ui64:
; RV32IF: # %bb.0:
; RV32IF-NEXT: lui a0, 307200
; RV32IF-NEXT: fmv.w.x fa5, a0
; RV32IF-NEXT: fabs.s fa4, fa0
; RV32IF-NEXT: flt.s a0, fa4, fa5
; RV32IF-NEXT: beqz a0, .LBB39_2
; RV32IF-NEXT: # %bb.1:
; RV32IF-NEXT: fcvt.w.s a0, fa0, rne
; RV32IF-NEXT: fcvt.s.w fa5, a0, rne
; RV32IF-NEXT: fsgnj.s fa0, fa5, fa0
; RV32IF-NEXT: .LBB39_2:
; RV32IF-NEXT: addi sp, sp, -16
; RV32IF-NEXT: .cfi_def_cfa_offset 16
; RV32IF-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IF-NEXT: .cfi_offset ra, -4
; RV32IF-NEXT: call __fixunssfdi@plt
; RV32IF-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IF-NEXT: addi sp, sp, 16
; RV32IF-NEXT: ret
;
; RV64IF-LABEL: test_roundeven_ui64:
; RV64IF: # %bb.0:
; RV64IF-NEXT: fcvt.lu.s a0, fa0, rne
; RV64IF-NEXT: ret
%a = call float @llvm.roundeven.f32(float %x)
%b = fptoui float %a to i64
ret i64 %b
}
define float @test_floor_float(float %x) {
; RV32IFD-LABEL: test_floor_float:
; RV32IFD: # %bb.0:
; RV32IFD-NEXT: addi sp, sp, -16
; RV32IFD-NEXT: .cfi_def_cfa_offset 16
; RV32IFD-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IFD-NEXT: .cfi_offset ra, -4
; RV32IFD-NEXT: call floor@plt
; RV32IFD-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IFD-NEXT: addi sp, sp, 16
; RV32IFD-NEXT: ret
;
; RV64IFD-LABEL: test_floor_float:
; RV64IFD: # %bb.0:
; RV64IFD-NEXT: addi sp, sp, -16
; RV64IFD-NEXT: .cfi_def_cfa_offset 16
; RV64IFD-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64IFD-NEXT: .cfi_offset ra, -8
; RV64IFD-NEXT: call floor@plt
; RV64IFD-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64IFD-NEXT: addi sp, sp, 16
; RV64IFD-NEXT: ret
; RV32IF-LABEL: test_floor_float:
; RV32IF: # %bb.0:
; RV32IF-NEXT: lui a0, 307200
; RV32IF-NEXT: fmv.w.x fa5, a0
; RV32IF-NEXT: fabs.s fa4, fa0
; RV32IF-NEXT: flt.s a0, fa4, fa5
; RV32IF-NEXT: beqz a0, .LBB40_2
; RV32IF-NEXT: # %bb.1:
; RV32IF-NEXT: fcvt.w.s a0, fa0, rdn
; RV32IF-NEXT: fcvt.s.w fa5, a0, rdn
; RV32IF-NEXT: fsgnj.s fa0, fa5, fa0
; RV32IF-NEXT: .LBB40_2:
; RV32IF-NEXT: ret
;
; RV64IF-LABEL: test_floor_float:
; RV64IF: # %bb.0:
; RV64IF-NEXT: lui a0, 307200
; RV64IF-NEXT: fmv.w.x fa5, a0
; RV64IF-NEXT: fabs.s fa4, fa0
; RV64IF-NEXT: flt.s a0, fa4, fa5
; RV64IF-NEXT: beqz a0, .LBB40_2
; RV64IF-NEXT: # %bb.1:
; RV64IF-NEXT: fcvt.w.s a0, fa0, rdn
; RV64IF-NEXT: fcvt.s.w fa5, a0, rdn
; RV64IF-NEXT: fsgnj.s fa0, fa5, fa0
; RV64IF-NEXT: .LBB40_2:
; RV64IF-NEXT: ret
%a = call float @llvm.floor.f32(float %x)
ret float %a
}
define float @test_ceil_float(float %x) {
; RV32IFD-LABEL: test_ceil_float:
; RV32IFD: # %bb.0:
; RV32IFD-NEXT: addi sp, sp, -16
; RV32IFD-NEXT: .cfi_def_cfa_offset 16
; RV32IFD-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IFD-NEXT: .cfi_offset ra, -4
; RV32IFD-NEXT: call ceil@plt
; RV32IFD-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IFD-NEXT: addi sp, sp, 16
; RV32IFD-NEXT: ret
;
; RV64IFD-LABEL: test_ceil_float:
; RV64IFD: # %bb.0:
; RV64IFD-NEXT: addi sp, sp, -16
; RV64IFD-NEXT: .cfi_def_cfa_offset 16
; RV64IFD-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64IFD-NEXT: .cfi_offset ra, -8
; RV64IFD-NEXT: call ceil@plt
; RV64IFD-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64IFD-NEXT: addi sp, sp, 16
; RV64IFD-NEXT: ret
; RV32IF-LABEL: test_ceil_float:
; RV32IF: # %bb.0:
; RV32IF-NEXT: lui a0, 307200
; RV32IF-NEXT: fmv.w.x fa5, a0
; RV32IF-NEXT: fabs.s fa4, fa0
; RV32IF-NEXT: flt.s a0, fa4, fa5
; RV32IF-NEXT: beqz a0, .LBB41_2
; RV32IF-NEXT: # %bb.1:
; RV32IF-NEXT: fcvt.w.s a0, fa0, rup
; RV32IF-NEXT: fcvt.s.w fa5, a0, rup
; RV32IF-NEXT: fsgnj.s fa0, fa5, fa0
; RV32IF-NEXT: .LBB41_2:
; RV32IF-NEXT: ret
;
; RV64IF-LABEL: test_ceil_float:
; RV64IF: # %bb.0:
; RV64IF-NEXT: lui a0, 307200
; RV64IF-NEXT: fmv.w.x fa5, a0
; RV64IF-NEXT: fabs.s fa4, fa0
; RV64IF-NEXT: flt.s a0, fa4, fa5
; RV64IF-NEXT: beqz a0, .LBB41_2
; RV64IF-NEXT: # %bb.1:
; RV64IF-NEXT: fcvt.w.s a0, fa0, rup
; RV64IF-NEXT: fcvt.s.w fa5, a0, rup
; RV64IF-NEXT: fsgnj.s fa0, fa5, fa0
; RV64IF-NEXT: .LBB41_2:
; RV64IF-NEXT: ret
%a = call float @llvm.ceil.f32(float %x)
ret float %a
}
define float @test_trunc_float(float %x) {
; RV32IFD-LABEL: test_trunc_float:
; RV32IFD: # %bb.0:
; RV32IFD-NEXT: addi sp, sp, -16
; RV32IFD-NEXT: .cfi_def_cfa_offset 16
; RV32IFD-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IFD-NEXT: .cfi_offset ra, -4
; RV32IFD-NEXT: call trunc@plt
; RV32IFD-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IFD-NEXT: addi sp, sp, 16
; RV32IFD-NEXT: ret
;
; RV64IFD-LABEL: test_trunc_float:
; RV64IFD: # %bb.0:
; RV64IFD-NEXT: addi sp, sp, -16
; RV64IFD-NEXT: .cfi_def_cfa_offset 16
; RV64IFD-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64IFD-NEXT: .cfi_offset ra, -8
; RV64IFD-NEXT: call trunc@plt
; RV64IFD-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64IFD-NEXT: addi sp, sp, 16
; RV64IFD-NEXT: ret
; RV32IF-LABEL: test_trunc_float:
; RV32IF: # %bb.0:
; RV32IF-NEXT: lui a0, 307200
; RV32IF-NEXT: fmv.w.x fa5, a0
; RV32IF-NEXT: fabs.s fa4, fa0
; RV32IF-NEXT: flt.s a0, fa4, fa5
; RV32IF-NEXT: beqz a0, .LBB42_2
; RV32IF-NEXT: # %bb.1:
; RV32IF-NEXT: fcvt.w.s a0, fa0, rtz
; RV32IF-NEXT: fcvt.s.w fa5, a0, rtz
; RV32IF-NEXT: fsgnj.s fa0, fa5, fa0
; RV32IF-NEXT: .LBB42_2:
; RV32IF-NEXT: ret
;
; RV64IF-LABEL: test_trunc_float:
; RV64IF: # %bb.0:
; RV64IF-NEXT: lui a0, 307200
; RV64IF-NEXT: fmv.w.x fa5, a0
; RV64IF-NEXT: fabs.s fa4, fa0
; RV64IF-NEXT: flt.s a0, fa4, fa5
; RV64IF-NEXT: beqz a0, .LBB42_2
; RV64IF-NEXT: # %bb.1:
; RV64IF-NEXT: fcvt.w.s a0, fa0, rtz
; RV64IF-NEXT: fcvt.s.w fa5, a0, rtz
; RV64IF-NEXT: fsgnj.s fa0, fa5, fa0
; RV64IF-NEXT: .LBB42_2:
; RV64IF-NEXT: ret
%a = call float @llvm.trunc.f32(float %x)
ret float %a
}
define float @test_round_float(float %x) {
; RV32IFD-LABEL: test_round_float:
; RV32IFD: # %bb.0:
; RV32IFD-NEXT: addi sp, sp, -16
; RV32IFD-NEXT: .cfi_def_cfa_offset 16
; RV32IFD-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IFD-NEXT: .cfi_offset ra, -4
; RV32IFD-NEXT: call round@plt
; RV32IFD-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IFD-NEXT: addi sp, sp, 16
; RV32IFD-NEXT: ret
;
; RV64IFD-LABEL: test_round_float:
; RV64IFD: # %bb.0:
; RV64IFD-NEXT: addi sp, sp, -16
; RV64IFD-NEXT: .cfi_def_cfa_offset 16
; RV64IFD-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64IFD-NEXT: .cfi_offset ra, -8
; RV64IFD-NEXT: call round@plt
; RV64IFD-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64IFD-NEXT: addi sp, sp, 16
; RV64IFD-NEXT: ret
; RV32IF-LABEL: test_round_float:
; RV32IF: # %bb.0:
; RV32IF-NEXT: lui a0, 307200
; RV32IF-NEXT: fmv.w.x fa5, a0
; RV32IF-NEXT: fabs.s fa4, fa0
; RV32IF-NEXT: flt.s a0, fa4, fa5
; RV32IF-NEXT: beqz a0, .LBB43_2
; RV32IF-NEXT: # %bb.1:
; RV32IF-NEXT: fcvt.w.s a0, fa0, rmm
; RV32IF-NEXT: fcvt.s.w fa5, a0, rmm
; RV32IF-NEXT: fsgnj.s fa0, fa5, fa0
; RV32IF-NEXT: .LBB43_2:
; RV32IF-NEXT: ret
;
; RV64IF-LABEL: test_round_float:
; RV64IF: # %bb.0:
; RV64IF-NEXT: lui a0, 307200
; RV64IF-NEXT: fmv.w.x fa5, a0
; RV64IF-NEXT: fabs.s fa4, fa0
; RV64IF-NEXT: flt.s a0, fa4, fa5
; RV64IF-NEXT: beqz a0, .LBB43_2
; RV64IF-NEXT: # %bb.1:
; RV64IF-NEXT: fcvt.w.s a0, fa0, rmm
; RV64IF-NEXT: fcvt.s.w fa5, a0, rmm
; RV64IF-NEXT: fsgnj.s fa0, fa5, fa0
; RV64IF-NEXT: .LBB43_2:
; RV64IF-NEXT: ret
%a = call float @llvm.round.f32(float %x)
ret float %a
}
define float @test_roundeven_float(float %x) {
; RV32IFD-LABEL: test_roundeven_float:
; RV32IFD: # %bb.0:
; RV32IFD-NEXT: addi sp, sp, -16
; RV32IFD-NEXT: .cfi_def_cfa_offset 16
; RV32IFD-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IFD-NEXT: .cfi_offset ra, -4
; RV32IFD-NEXT: call roundeven@plt
; RV32IFD-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IFD-NEXT: addi sp, sp, 16
; RV32IFD-NEXT: ret
;
; RV64IFD-LABEL: test_roundeven_float:
; RV64IFD: # %bb.0:
; RV64IFD-NEXT: addi sp, sp, -16
; RV64IFD-NEXT: .cfi_def_cfa_offset 16
; RV64IFD-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64IFD-NEXT: .cfi_offset ra, -8
; RV64IFD-NEXT: call roundeven@plt
; RV64IFD-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64IFD-NEXT: addi sp, sp, 16
; RV64IFD-NEXT: ret
; RV32IF-LABEL: test_roundeven_float:
; RV32IF: # %bb.0:
; RV32IF-NEXT: lui a0, 307200
; RV32IF-NEXT: fmv.w.x fa5, a0
; RV32IF-NEXT: fabs.s fa4, fa0
; RV32IF-NEXT: flt.s a0, fa4, fa5
; RV32IF-NEXT: beqz a0, .LBB44_2
; RV32IF-NEXT: # %bb.1:
; RV32IF-NEXT: fcvt.w.s a0, fa0, rne
; RV32IF-NEXT: fcvt.s.w fa5, a0, rne
; RV32IF-NEXT: fsgnj.s fa0, fa5, fa0
; RV32IF-NEXT: .LBB44_2:
; RV32IF-NEXT: ret
;
; RV64IF-LABEL: test_roundeven_float:
; RV64IF: # %bb.0:
; RV64IF-NEXT: lui a0, 307200
; RV64IF-NEXT: fmv.w.x fa5, a0
; RV64IF-NEXT: fabs.s fa4, fa0
; RV64IF-NEXT: flt.s a0, fa4, fa5
; RV64IF-NEXT: beqz a0, .LBB44_2
; RV64IF-NEXT: # %bb.1:
; RV64IF-NEXT: fcvt.w.s a0, fa0, rne
; RV64IF-NEXT: fcvt.s.w fa5, a0, rne
; RV64IF-NEXT: fsgnj.s fa0, fa5, fa0
; RV64IF-NEXT: .LBB44_2:
; RV64IF-NEXT: ret
%a = call float @llvm.roundeven.f32(float %x)
ret float %a
}
declare float @llvm.floor.f32(float)
declare float @llvm.ceil.f32(float)
declare float @llvm.trunc.f32(float)
declare float @llvm.round.f32(float)
declare float @llvm.roundeven.f32(float)