Files
clang-p2996/llvm/test/CodeGen/RISCV/float-round-conv.ll
Craig Topper e94dc58dff [RISCV] Inline scalar ceil/floor/trunc/rint/round/roundeven.
This avoids the call overhead as well as the the save/restore of
fflags and the snan handling in the libm function.

The save/restore of fflags and snan handling are needed to be
correct for -ftrapping-math. I think we can ignore them in the
default environment.

The inline sequence will generate an invalid exception for nan
and an inexact exception if fractional bits are discarded.

I've used a custom inserter to explicitly create the control flow
around the float->int->float conversion.

We can probably avoid the final fsgnj after the conversion for
no signed zeros FMF, but I'll leave that for future work.

Note the comparison constant is slightly different than glibc uses.
They use 1<<53 for double, I'm using 1<<52. I believe either are valid.
Numbers >= 1<<52 can't have any fractional bits. It's ok to do the
float->int->float conversion on numbers between 1<<53 and 1<<52 since
they will all fit in 64. We only have a problem if the double can't fit
in i64

Reviewed By: reames

Differential Revision: https://reviews.llvm.org/D136508
2022-10-26 14:36:49 -07:00

1037 lines
30 KiB
LLVM

; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=riscv32 -mattr=+f -verify-machineinstrs < %s \
; RUN: -target-abi=ilp32f | FileCheck -check-prefix=RV32IF %s
; RUN: llc -mtriple=riscv64 -mattr=+f -verify-machineinstrs < %s \
; RUN: -target-abi=lp64f | FileCheck -check-prefix=RV64IF %s
define signext i8 @test_floor_si8(float %x) {
; RV32IF-LABEL: test_floor_si8:
; RV32IF: # %bb.0:
; RV32IF-NEXT: fcvt.w.s a0, fa0, rdn
; RV32IF-NEXT: ret
;
; RV64IF-LABEL: test_floor_si8:
; RV64IF: # %bb.0:
; RV64IF-NEXT: fcvt.l.s a0, fa0, rdn
; RV64IF-NEXT: ret
%a = call float @llvm.floor.f32(float %x)
%b = fptosi float %a to i8
ret i8 %b
}
define signext i16 @test_floor_si16(float %x) {
; RV32IF-LABEL: test_floor_si16:
; RV32IF: # %bb.0:
; RV32IF-NEXT: fcvt.w.s a0, fa0, rdn
; RV32IF-NEXT: ret
;
; RV64IF-LABEL: test_floor_si16:
; RV64IF: # %bb.0:
; RV64IF-NEXT: fcvt.l.s a0, fa0, rdn
; RV64IF-NEXT: ret
%a = call float @llvm.floor.f32(float %x)
%b = fptosi float %a to i16
ret i16 %b
}
define signext i32 @test_floor_si32(float %x) {
; RV32IF-LABEL: test_floor_si32:
; RV32IF: # %bb.0:
; RV32IF-NEXT: fcvt.w.s a0, fa0, rdn
; RV32IF-NEXT: ret
;
; RV64IF-LABEL: test_floor_si32:
; RV64IF: # %bb.0:
; RV64IF-NEXT: fcvt.w.s a0, fa0, rdn
; RV64IF-NEXT: ret
%a = call float @llvm.floor.f32(float %x)
%b = fptosi float %a to i32
ret i32 %b
}
define i64 @test_floor_si64(float %x) {
; RV32IF-LABEL: test_floor_si64:
; RV32IF: # %bb.0:
; RV32IF-NEXT: addi sp, sp, -16
; RV32IF-NEXT: .cfi_def_cfa_offset 16
; RV32IF-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IF-NEXT: .cfi_offset ra, -4
; RV32IF-NEXT: lui a0, %hi(.LCPI3_0)
; RV32IF-NEXT: flw ft0, %lo(.LCPI3_0)(a0)
; RV32IF-NEXT: fabs.s ft1, fa0
; RV32IF-NEXT: flt.s a0, ft1, ft0
; RV32IF-NEXT: beqz a0, .LBB3_2
; RV32IF-NEXT: # %bb.1:
; RV32IF-NEXT: fcvt.w.s a0, fa0, rdn
; RV32IF-NEXT: fcvt.s.w ft0, a0, rdn
; RV32IF-NEXT: fsgnj.s fa0, ft0, fa0
; RV32IF-NEXT: .LBB3_2:
; RV32IF-NEXT: call __fixsfdi@plt
; RV32IF-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IF-NEXT: addi sp, sp, 16
; RV32IF-NEXT: ret
;
; RV64IF-LABEL: test_floor_si64:
; RV64IF: # %bb.0:
; RV64IF-NEXT: fcvt.l.s a0, fa0, rdn
; RV64IF-NEXT: ret
%a = call float @llvm.floor.f32(float %x)
%b = fptosi float %a to i64
ret i64 %b
}
define zeroext i8 @test_floor_ui8(float %x) {
; RV32IF-LABEL: test_floor_ui8:
; RV32IF: # %bb.0:
; RV32IF-NEXT: fcvt.wu.s a0, fa0, rdn
; RV32IF-NEXT: ret
;
; RV64IF-LABEL: test_floor_ui8:
; RV64IF: # %bb.0:
; RV64IF-NEXT: fcvt.lu.s a0, fa0, rdn
; RV64IF-NEXT: ret
%a = call float @llvm.floor.f32(float %x)
%b = fptoui float %a to i8
ret i8 %b
}
define zeroext i16 @test_floor_ui16(float %x) {
; RV32IF-LABEL: test_floor_ui16:
; RV32IF: # %bb.0:
; RV32IF-NEXT: fcvt.wu.s a0, fa0, rdn
; RV32IF-NEXT: ret
;
; RV64IF-LABEL: test_floor_ui16:
; RV64IF: # %bb.0:
; RV64IF-NEXT: fcvt.lu.s a0, fa0, rdn
; RV64IF-NEXT: ret
%a = call float @llvm.floor.f32(float %x)
%b = fptoui float %a to i16
ret i16 %b
}
define signext i32 @test_floor_ui32(float %x) {
; RV32IF-LABEL: test_floor_ui32:
; RV32IF: # %bb.0:
; RV32IF-NEXT: fcvt.wu.s a0, fa0, rdn
; RV32IF-NEXT: ret
;
; RV64IF-LABEL: test_floor_ui32:
; RV64IF: # %bb.0:
; RV64IF-NEXT: fcvt.wu.s a0, fa0, rdn
; RV64IF-NEXT: ret
%a = call float @llvm.floor.f32(float %x)
%b = fptoui float %a to i32
ret i32 %b
}
define i64 @test_floor_ui64(float %x) {
; RV32IF-LABEL: test_floor_ui64:
; RV32IF: # %bb.0:
; RV32IF-NEXT: addi sp, sp, -16
; RV32IF-NEXT: .cfi_def_cfa_offset 16
; RV32IF-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IF-NEXT: .cfi_offset ra, -4
; RV32IF-NEXT: lui a0, %hi(.LCPI7_0)
; RV32IF-NEXT: flw ft0, %lo(.LCPI7_0)(a0)
; RV32IF-NEXT: fabs.s ft1, fa0
; RV32IF-NEXT: flt.s a0, ft1, ft0
; RV32IF-NEXT: beqz a0, .LBB7_2
; RV32IF-NEXT: # %bb.1:
; RV32IF-NEXT: fcvt.w.s a0, fa0, rdn
; RV32IF-NEXT: fcvt.s.w ft0, a0, rdn
; RV32IF-NEXT: fsgnj.s fa0, ft0, fa0
; RV32IF-NEXT: .LBB7_2:
; RV32IF-NEXT: call __fixunssfdi@plt
; RV32IF-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IF-NEXT: addi sp, sp, 16
; RV32IF-NEXT: ret
;
; RV64IF-LABEL: test_floor_ui64:
; RV64IF: # %bb.0:
; RV64IF-NEXT: fcvt.lu.s a0, fa0, rdn
; RV64IF-NEXT: ret
%a = call float @llvm.floor.f32(float %x)
%b = fptoui float %a to i64
ret i64 %b
}
define signext i8 @test_ceil_si8(float %x) {
; RV32IF-LABEL: test_ceil_si8:
; RV32IF: # %bb.0:
; RV32IF-NEXT: fcvt.w.s a0, fa0, rup
; RV32IF-NEXT: ret
;
; RV64IF-LABEL: test_ceil_si8:
; RV64IF: # %bb.0:
; RV64IF-NEXT: fcvt.l.s a0, fa0, rup
; RV64IF-NEXT: ret
%a = call float @llvm.ceil.f32(float %x)
%b = fptosi float %a to i8
ret i8 %b
}
define signext i16 @test_ceil_si16(float %x) {
; RV32IF-LABEL: test_ceil_si16:
; RV32IF: # %bb.0:
; RV32IF-NEXT: fcvt.w.s a0, fa0, rup
; RV32IF-NEXT: ret
;
; RV64IF-LABEL: test_ceil_si16:
; RV64IF: # %bb.0:
; RV64IF-NEXT: fcvt.l.s a0, fa0, rup
; RV64IF-NEXT: ret
%a = call float @llvm.ceil.f32(float %x)
%b = fptosi float %a to i16
ret i16 %b
}
define signext i32 @test_ceil_si32(float %x) {
; RV32IF-LABEL: test_ceil_si32:
; RV32IF: # %bb.0:
; RV32IF-NEXT: fcvt.w.s a0, fa0, rup
; RV32IF-NEXT: ret
;
; RV64IF-LABEL: test_ceil_si32:
; RV64IF: # %bb.0:
; RV64IF-NEXT: fcvt.w.s a0, fa0, rup
; RV64IF-NEXT: ret
%a = call float @llvm.ceil.f32(float %x)
%b = fptosi float %a to i32
ret i32 %b
}
define i64 @test_ceil_si64(float %x) {
; RV32IF-LABEL: test_ceil_si64:
; RV32IF: # %bb.0:
; RV32IF-NEXT: addi sp, sp, -16
; RV32IF-NEXT: .cfi_def_cfa_offset 16
; RV32IF-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IF-NEXT: .cfi_offset ra, -4
; RV32IF-NEXT: lui a0, %hi(.LCPI11_0)
; RV32IF-NEXT: flw ft0, %lo(.LCPI11_0)(a0)
; RV32IF-NEXT: fabs.s ft1, fa0
; RV32IF-NEXT: flt.s a0, ft1, ft0
; RV32IF-NEXT: beqz a0, .LBB11_2
; RV32IF-NEXT: # %bb.1:
; RV32IF-NEXT: fcvt.w.s a0, fa0, rup
; RV32IF-NEXT: fcvt.s.w ft0, a0, rup
; RV32IF-NEXT: fsgnj.s fa0, ft0, fa0
; RV32IF-NEXT: .LBB11_2:
; RV32IF-NEXT: call __fixsfdi@plt
; RV32IF-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IF-NEXT: addi sp, sp, 16
; RV32IF-NEXT: ret
;
; RV64IF-LABEL: test_ceil_si64:
; RV64IF: # %bb.0:
; RV64IF-NEXT: fcvt.l.s a0, fa0, rup
; RV64IF-NEXT: ret
%a = call float @llvm.ceil.f32(float %x)
%b = fptosi float %a to i64
ret i64 %b
}
define zeroext i8 @test_ceil_ui8(float %x) {
; RV32IF-LABEL: test_ceil_ui8:
; RV32IF: # %bb.0:
; RV32IF-NEXT: fcvt.wu.s a0, fa0, rup
; RV32IF-NEXT: ret
;
; RV64IF-LABEL: test_ceil_ui8:
; RV64IF: # %bb.0:
; RV64IF-NEXT: fcvt.lu.s a0, fa0, rup
; RV64IF-NEXT: ret
%a = call float @llvm.ceil.f32(float %x)
%b = fptoui float %a to i8
ret i8 %b
}
define zeroext i16 @test_ceil_ui16(float %x) {
; RV32IF-LABEL: test_ceil_ui16:
; RV32IF: # %bb.0:
; RV32IF-NEXT: fcvt.wu.s a0, fa0, rup
; RV32IF-NEXT: ret
;
; RV64IF-LABEL: test_ceil_ui16:
; RV64IF: # %bb.0:
; RV64IF-NEXT: fcvt.lu.s a0, fa0, rup
; RV64IF-NEXT: ret
%a = call float @llvm.ceil.f32(float %x)
%b = fptoui float %a to i16
ret i16 %b
}
define signext i32 @test_ceil_ui32(float %x) {
; RV32IF-LABEL: test_ceil_ui32:
; RV32IF: # %bb.0:
; RV32IF-NEXT: fcvt.wu.s a0, fa0, rup
; RV32IF-NEXT: ret
;
; RV64IF-LABEL: test_ceil_ui32:
; RV64IF: # %bb.0:
; RV64IF-NEXT: fcvt.wu.s a0, fa0, rup
; RV64IF-NEXT: ret
%a = call float @llvm.ceil.f32(float %x)
%b = fptoui float %a to i32
ret i32 %b
}
define i64 @test_ceil_ui64(float %x) {
; RV32IF-LABEL: test_ceil_ui64:
; RV32IF: # %bb.0:
; RV32IF-NEXT: addi sp, sp, -16
; RV32IF-NEXT: .cfi_def_cfa_offset 16
; RV32IF-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IF-NEXT: .cfi_offset ra, -4
; RV32IF-NEXT: lui a0, %hi(.LCPI15_0)
; RV32IF-NEXT: flw ft0, %lo(.LCPI15_0)(a0)
; RV32IF-NEXT: fabs.s ft1, fa0
; RV32IF-NEXT: flt.s a0, ft1, ft0
; RV32IF-NEXT: beqz a0, .LBB15_2
; RV32IF-NEXT: # %bb.1:
; RV32IF-NEXT: fcvt.w.s a0, fa0, rup
; RV32IF-NEXT: fcvt.s.w ft0, a0, rup
; RV32IF-NEXT: fsgnj.s fa0, ft0, fa0
; RV32IF-NEXT: .LBB15_2:
; RV32IF-NEXT: call __fixunssfdi@plt
; RV32IF-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IF-NEXT: addi sp, sp, 16
; RV32IF-NEXT: ret
;
; RV64IF-LABEL: test_ceil_ui64:
; RV64IF: # %bb.0:
; RV64IF-NEXT: fcvt.lu.s a0, fa0, rup
; RV64IF-NEXT: ret
%a = call float @llvm.ceil.f32(float %x)
%b = fptoui float %a to i64
ret i64 %b
}
define signext i8 @test_trunc_si8(float %x) {
; RV32IF-LABEL: test_trunc_si8:
; RV32IF: # %bb.0:
; RV32IF-NEXT: fcvt.w.s a0, fa0, rtz
; RV32IF-NEXT: ret
;
; RV64IF-LABEL: test_trunc_si8:
; RV64IF: # %bb.0:
; RV64IF-NEXT: fcvt.l.s a0, fa0, rtz
; RV64IF-NEXT: ret
%a = call float @llvm.trunc.f32(float %x)
%b = fptosi float %a to i8
ret i8 %b
}
define signext i16 @test_trunc_si16(float %x) {
; RV32IF-LABEL: test_trunc_si16:
; RV32IF: # %bb.0:
; RV32IF-NEXT: fcvt.w.s a0, fa0, rtz
; RV32IF-NEXT: ret
;
; RV64IF-LABEL: test_trunc_si16:
; RV64IF: # %bb.0:
; RV64IF-NEXT: fcvt.l.s a0, fa0, rtz
; RV64IF-NEXT: ret
%a = call float @llvm.trunc.f32(float %x)
%b = fptosi float %a to i16
ret i16 %b
}
define signext i32 @test_trunc_si32(float %x) {
; RV32IF-LABEL: test_trunc_si32:
; RV32IF: # %bb.0:
; RV32IF-NEXT: fcvt.w.s a0, fa0, rtz
; RV32IF-NEXT: ret
;
; RV64IF-LABEL: test_trunc_si32:
; RV64IF: # %bb.0:
; RV64IF-NEXT: fcvt.w.s a0, fa0, rtz
; RV64IF-NEXT: ret
%a = call float @llvm.trunc.f32(float %x)
%b = fptosi float %a to i32
ret i32 %b
}
define i64 @test_trunc_si64(float %x) {
; RV32IF-LABEL: test_trunc_si64:
; RV32IF: # %bb.0:
; RV32IF-NEXT: addi sp, sp, -16
; RV32IF-NEXT: .cfi_def_cfa_offset 16
; RV32IF-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IF-NEXT: .cfi_offset ra, -4
; RV32IF-NEXT: lui a0, %hi(.LCPI19_0)
; RV32IF-NEXT: flw ft0, %lo(.LCPI19_0)(a0)
; RV32IF-NEXT: fabs.s ft1, fa0
; RV32IF-NEXT: flt.s a0, ft1, ft0
; RV32IF-NEXT: beqz a0, .LBB19_2
; RV32IF-NEXT: # %bb.1:
; RV32IF-NEXT: fcvt.w.s a0, fa0, rtz
; RV32IF-NEXT: fcvt.s.w ft0, a0, rtz
; RV32IF-NEXT: fsgnj.s fa0, ft0, fa0
; RV32IF-NEXT: .LBB19_2:
; RV32IF-NEXT: call __fixsfdi@plt
; RV32IF-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IF-NEXT: addi sp, sp, 16
; RV32IF-NEXT: ret
;
; RV64IF-LABEL: test_trunc_si64:
; RV64IF: # %bb.0:
; RV64IF-NEXT: fcvt.l.s a0, fa0, rtz
; RV64IF-NEXT: ret
%a = call float @llvm.trunc.f32(float %x)
%b = fptosi float %a to i64
ret i64 %b
}
define zeroext i8 @test_trunc_ui8(float %x) {
; RV32IF-LABEL: test_trunc_ui8:
; RV32IF: # %bb.0:
; RV32IF-NEXT: fcvt.wu.s a0, fa0, rtz
; RV32IF-NEXT: ret
;
; RV64IF-LABEL: test_trunc_ui8:
; RV64IF: # %bb.0:
; RV64IF-NEXT: fcvt.lu.s a0, fa0, rtz
; RV64IF-NEXT: ret
%a = call float @llvm.trunc.f32(float %x)
%b = fptoui float %a to i8
ret i8 %b
}
define zeroext i16 @test_trunc_ui16(float %x) {
; RV32IF-LABEL: test_trunc_ui16:
; RV32IF: # %bb.0:
; RV32IF-NEXT: fcvt.wu.s a0, fa0, rtz
; RV32IF-NEXT: ret
;
; RV64IF-LABEL: test_trunc_ui16:
; RV64IF: # %bb.0:
; RV64IF-NEXT: fcvt.lu.s a0, fa0, rtz
; RV64IF-NEXT: ret
%a = call float @llvm.trunc.f32(float %x)
%b = fptoui float %a to i16
ret i16 %b
}
define signext i32 @test_trunc_ui32(float %x) {
; RV32IF-LABEL: test_trunc_ui32:
; RV32IF: # %bb.0:
; RV32IF-NEXT: fcvt.wu.s a0, fa0, rtz
; RV32IF-NEXT: ret
;
; RV64IF-LABEL: test_trunc_ui32:
; RV64IF: # %bb.0:
; RV64IF-NEXT: fcvt.wu.s a0, fa0, rtz
; RV64IF-NEXT: ret
%a = call float @llvm.trunc.f32(float %x)
%b = fptoui float %a to i32
ret i32 %b
}
define i64 @test_trunc_ui64(float %x) {
; RV32IF-LABEL: test_trunc_ui64:
; RV32IF: # %bb.0:
; RV32IF-NEXT: addi sp, sp, -16
; RV32IF-NEXT: .cfi_def_cfa_offset 16
; RV32IF-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IF-NEXT: .cfi_offset ra, -4
; RV32IF-NEXT: lui a0, %hi(.LCPI23_0)
; RV32IF-NEXT: flw ft0, %lo(.LCPI23_0)(a0)
; RV32IF-NEXT: fabs.s ft1, fa0
; RV32IF-NEXT: flt.s a0, ft1, ft0
; RV32IF-NEXT: beqz a0, .LBB23_2
; RV32IF-NEXT: # %bb.1:
; RV32IF-NEXT: fcvt.w.s a0, fa0, rtz
; RV32IF-NEXT: fcvt.s.w ft0, a0, rtz
; RV32IF-NEXT: fsgnj.s fa0, ft0, fa0
; RV32IF-NEXT: .LBB23_2:
; RV32IF-NEXT: call __fixunssfdi@plt
; RV32IF-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IF-NEXT: addi sp, sp, 16
; RV32IF-NEXT: ret
;
; RV64IF-LABEL: test_trunc_ui64:
; RV64IF: # %bb.0:
; RV64IF-NEXT: fcvt.lu.s a0, fa0, rtz
; RV64IF-NEXT: ret
%a = call float @llvm.trunc.f32(float %x)
%b = fptoui float %a to i64
ret i64 %b
}
define signext i8 @test_round_si8(float %x) {
; RV32IF-LABEL: test_round_si8:
; RV32IF: # %bb.0:
; RV32IF-NEXT: fcvt.w.s a0, fa0, rmm
; RV32IF-NEXT: ret
;
; RV64IF-LABEL: test_round_si8:
; RV64IF: # %bb.0:
; RV64IF-NEXT: fcvt.l.s a0, fa0, rmm
; RV64IF-NEXT: ret
%a = call float @llvm.round.f32(float %x)
%b = fptosi float %a to i8
ret i8 %b
}
define signext i16 @test_round_si16(float %x) {
; RV32IF-LABEL: test_round_si16:
; RV32IF: # %bb.0:
; RV32IF-NEXT: fcvt.w.s a0, fa0, rmm
; RV32IF-NEXT: ret
;
; RV64IF-LABEL: test_round_si16:
; RV64IF: # %bb.0:
; RV64IF-NEXT: fcvt.l.s a0, fa0, rmm
; RV64IF-NEXT: ret
%a = call float @llvm.round.f32(float %x)
%b = fptosi float %a to i16
ret i16 %b
}
define signext i32 @test_round_si32(float %x) {
; RV32IF-LABEL: test_round_si32:
; RV32IF: # %bb.0:
; RV32IF-NEXT: fcvt.w.s a0, fa0, rmm
; RV32IF-NEXT: ret
;
; RV64IF-LABEL: test_round_si32:
; RV64IF: # %bb.0:
; RV64IF-NEXT: fcvt.w.s a0, fa0, rmm
; RV64IF-NEXT: ret
%a = call float @llvm.round.f32(float %x)
%b = fptosi float %a to i32
ret i32 %b
}
define i64 @test_round_si64(float %x) {
; RV32IF-LABEL: test_round_si64:
; RV32IF: # %bb.0:
; RV32IF-NEXT: addi sp, sp, -16
; RV32IF-NEXT: .cfi_def_cfa_offset 16
; RV32IF-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IF-NEXT: .cfi_offset ra, -4
; RV32IF-NEXT: lui a0, %hi(.LCPI27_0)
; RV32IF-NEXT: flw ft0, %lo(.LCPI27_0)(a0)
; RV32IF-NEXT: fabs.s ft1, fa0
; RV32IF-NEXT: flt.s a0, ft1, ft0
; RV32IF-NEXT: beqz a0, .LBB27_2
; RV32IF-NEXT: # %bb.1:
; RV32IF-NEXT: fcvt.w.s a0, fa0, rmm
; RV32IF-NEXT: fcvt.s.w ft0, a0, rmm
; RV32IF-NEXT: fsgnj.s fa0, ft0, fa0
; RV32IF-NEXT: .LBB27_2:
; RV32IF-NEXT: call __fixsfdi@plt
; RV32IF-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IF-NEXT: addi sp, sp, 16
; RV32IF-NEXT: ret
;
; RV64IF-LABEL: test_round_si64:
; RV64IF: # %bb.0:
; RV64IF-NEXT: fcvt.l.s a0, fa0, rmm
; RV64IF-NEXT: ret
%a = call float @llvm.round.f32(float %x)
%b = fptosi float %a to i64
ret i64 %b
}
define zeroext i8 @test_round_ui8(float %x) {
; RV32IF-LABEL: test_round_ui8:
; RV32IF: # %bb.0:
; RV32IF-NEXT: fcvt.wu.s a0, fa0, rmm
; RV32IF-NEXT: ret
;
; RV64IF-LABEL: test_round_ui8:
; RV64IF: # %bb.0:
; RV64IF-NEXT: fcvt.lu.s a0, fa0, rmm
; RV64IF-NEXT: ret
%a = call float @llvm.round.f32(float %x)
%b = fptoui float %a to i8
ret i8 %b
}
define zeroext i16 @test_round_ui16(float %x) {
; RV32IF-LABEL: test_round_ui16:
; RV32IF: # %bb.0:
; RV32IF-NEXT: fcvt.wu.s a0, fa0, rmm
; RV32IF-NEXT: ret
;
; RV64IF-LABEL: test_round_ui16:
; RV64IF: # %bb.0:
; RV64IF-NEXT: fcvt.lu.s a0, fa0, rmm
; RV64IF-NEXT: ret
%a = call float @llvm.round.f32(float %x)
%b = fptoui float %a to i16
ret i16 %b
}
define signext i32 @test_round_ui32(float %x) {
; RV32IF-LABEL: test_round_ui32:
; RV32IF: # %bb.0:
; RV32IF-NEXT: fcvt.wu.s a0, fa0, rmm
; RV32IF-NEXT: ret
;
; RV64IF-LABEL: test_round_ui32:
; RV64IF: # %bb.0:
; RV64IF-NEXT: fcvt.wu.s a0, fa0, rmm
; RV64IF-NEXT: ret
%a = call float @llvm.round.f32(float %x)
%b = fptoui float %a to i32
ret i32 %b
}
define i64 @test_round_ui64(float %x) {
; RV32IF-LABEL: test_round_ui64:
; RV32IF: # %bb.0:
; RV32IF-NEXT: addi sp, sp, -16
; RV32IF-NEXT: .cfi_def_cfa_offset 16
; RV32IF-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IF-NEXT: .cfi_offset ra, -4
; RV32IF-NEXT: lui a0, %hi(.LCPI31_0)
; RV32IF-NEXT: flw ft0, %lo(.LCPI31_0)(a0)
; RV32IF-NEXT: fabs.s ft1, fa0
; RV32IF-NEXT: flt.s a0, ft1, ft0
; RV32IF-NEXT: beqz a0, .LBB31_2
; RV32IF-NEXT: # %bb.1:
; RV32IF-NEXT: fcvt.w.s a0, fa0, rmm
; RV32IF-NEXT: fcvt.s.w ft0, a0, rmm
; RV32IF-NEXT: fsgnj.s fa0, ft0, fa0
; RV32IF-NEXT: .LBB31_2:
; RV32IF-NEXT: call __fixunssfdi@plt
; RV32IF-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IF-NEXT: addi sp, sp, 16
; RV32IF-NEXT: ret
;
; RV64IF-LABEL: test_round_ui64:
; RV64IF: # %bb.0:
; RV64IF-NEXT: fcvt.lu.s a0, fa0, rmm
; RV64IF-NEXT: ret
%a = call float @llvm.round.f32(float %x)
%b = fptoui float %a to i64
ret i64 %b
}
define signext i8 @test_roundeven_si8(float %x) {
; RV32IF-LABEL: test_roundeven_si8:
; RV32IF: # %bb.0:
; RV32IF-NEXT: fcvt.w.s a0, fa0, rne
; RV32IF-NEXT: ret
;
; RV64IF-LABEL: test_roundeven_si8:
; RV64IF: # %bb.0:
; RV64IF-NEXT: fcvt.l.s a0, fa0, rne
; RV64IF-NEXT: ret
%a = call float @llvm.roundeven.f32(float %x)
%b = fptosi float %a to i8
ret i8 %b
}
define signext i16 @test_roundeven_si16(float %x) {
; RV32IF-LABEL: test_roundeven_si16:
; RV32IF: # %bb.0:
; RV32IF-NEXT: fcvt.w.s a0, fa0, rne
; RV32IF-NEXT: ret
;
; RV64IF-LABEL: test_roundeven_si16:
; RV64IF: # %bb.0:
; RV64IF-NEXT: fcvt.l.s a0, fa0, rne
; RV64IF-NEXT: ret
%a = call float @llvm.roundeven.f32(float %x)
%b = fptosi float %a to i16
ret i16 %b
}
define signext i32 @test_roundeven_si32(float %x) {
; RV32IF-LABEL: test_roundeven_si32:
; RV32IF: # %bb.0:
; RV32IF-NEXT: fcvt.w.s a0, fa0, rne
; RV32IF-NEXT: ret
;
; RV64IF-LABEL: test_roundeven_si32:
; RV64IF: # %bb.0:
; RV64IF-NEXT: fcvt.w.s a0, fa0, rne
; RV64IF-NEXT: ret
%a = call float @llvm.roundeven.f32(float %x)
%b = fptosi float %a to i32
ret i32 %b
}
define i64 @test_roundeven_si64(float %x) {
; RV32IF-LABEL: test_roundeven_si64:
; RV32IF: # %bb.0:
; RV32IF-NEXT: addi sp, sp, -16
; RV32IF-NEXT: .cfi_def_cfa_offset 16
; RV32IF-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IF-NEXT: .cfi_offset ra, -4
; RV32IF-NEXT: lui a0, %hi(.LCPI35_0)
; RV32IF-NEXT: flw ft0, %lo(.LCPI35_0)(a0)
; RV32IF-NEXT: fabs.s ft1, fa0
; RV32IF-NEXT: flt.s a0, ft1, ft0
; RV32IF-NEXT: beqz a0, .LBB35_2
; RV32IF-NEXT: # %bb.1:
; RV32IF-NEXT: fcvt.w.s a0, fa0, rne
; RV32IF-NEXT: fcvt.s.w ft0, a0, rne
; RV32IF-NEXT: fsgnj.s fa0, ft0, fa0
; RV32IF-NEXT: .LBB35_2:
; RV32IF-NEXT: call __fixsfdi@plt
; RV32IF-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IF-NEXT: addi sp, sp, 16
; RV32IF-NEXT: ret
;
; RV64IF-LABEL: test_roundeven_si64:
; RV64IF: # %bb.0:
; RV64IF-NEXT: fcvt.l.s a0, fa0, rne
; RV64IF-NEXT: ret
%a = call float @llvm.roundeven.f32(float %x)
%b = fptosi float %a to i64
ret i64 %b
}
define zeroext i8 @test_roundeven_ui8(float %x) {
; RV32IF-LABEL: test_roundeven_ui8:
; RV32IF: # %bb.0:
; RV32IF-NEXT: fcvt.wu.s a0, fa0, rne
; RV32IF-NEXT: ret
;
; RV64IF-LABEL: test_roundeven_ui8:
; RV64IF: # %bb.0:
; RV64IF-NEXT: fcvt.lu.s a0, fa0, rne
; RV64IF-NEXT: ret
%a = call float @llvm.roundeven.f32(float %x)
%b = fptoui float %a to i8
ret i8 %b
}
define zeroext i16 @test_roundeven_ui16(float %x) {
; RV32IF-LABEL: test_roundeven_ui16:
; RV32IF: # %bb.0:
; RV32IF-NEXT: fcvt.wu.s a0, fa0, rne
; RV32IF-NEXT: ret
;
; RV64IF-LABEL: test_roundeven_ui16:
; RV64IF: # %bb.0:
; RV64IF-NEXT: fcvt.lu.s a0, fa0, rne
; RV64IF-NEXT: ret
%a = call float @llvm.roundeven.f32(float %x)
%b = fptoui float %a to i16
ret i16 %b
}
define signext i32 @test_roundeven_ui32(float %x) {
; RV32IF-LABEL: test_roundeven_ui32:
; RV32IF: # %bb.0:
; RV32IF-NEXT: fcvt.wu.s a0, fa0, rne
; RV32IF-NEXT: ret
;
; RV64IF-LABEL: test_roundeven_ui32:
; RV64IF: # %bb.0:
; RV64IF-NEXT: fcvt.wu.s a0, fa0, rne
; RV64IF-NEXT: ret
%a = call float @llvm.roundeven.f32(float %x)
%b = fptoui float %a to i32
ret i32 %b
}
define i64 @test_roundeven_ui64(float %x) {
; RV32IF-LABEL: test_roundeven_ui64:
; RV32IF: # %bb.0:
; RV32IF-NEXT: addi sp, sp, -16
; RV32IF-NEXT: .cfi_def_cfa_offset 16
; RV32IF-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IF-NEXT: .cfi_offset ra, -4
; RV32IF-NEXT: lui a0, %hi(.LCPI39_0)
; RV32IF-NEXT: flw ft0, %lo(.LCPI39_0)(a0)
; RV32IF-NEXT: fabs.s ft1, fa0
; RV32IF-NEXT: flt.s a0, ft1, ft0
; RV32IF-NEXT: beqz a0, .LBB39_2
; RV32IF-NEXT: # %bb.1:
; RV32IF-NEXT: fcvt.w.s a0, fa0, rne
; RV32IF-NEXT: fcvt.s.w ft0, a0, rne
; RV32IF-NEXT: fsgnj.s fa0, ft0, fa0
; RV32IF-NEXT: .LBB39_2:
; RV32IF-NEXT: call __fixunssfdi@plt
; RV32IF-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IF-NEXT: addi sp, sp, 16
; RV32IF-NEXT: ret
;
; RV64IF-LABEL: test_roundeven_ui64:
; RV64IF: # %bb.0:
; RV64IF-NEXT: fcvt.lu.s a0, fa0, rne
; RV64IF-NEXT: ret
%a = call float @llvm.roundeven.f32(float %x)
%b = fptoui float %a to i64
ret i64 %b
}
define float @test_floor_float(float %x) {
; RV32IFD-LABEL: test_floor_float:
; RV32IFD: # %bb.0:
; RV32IFD-NEXT: addi sp, sp, -16
; RV32IFD-NEXT: .cfi_def_cfa_offset 16
; RV32IFD-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IFD-NEXT: .cfi_offset ra, -4
; RV32IFD-NEXT: call floor@plt
; RV32IFD-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IFD-NEXT: addi sp, sp, 16
; RV32IFD-NEXT: ret
;
; RV64IFD-LABEL: test_floor_float:
; RV64IFD: # %bb.0:
; RV64IFD-NEXT: addi sp, sp, -16
; RV64IFD-NEXT: .cfi_def_cfa_offset 16
; RV64IFD-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64IFD-NEXT: .cfi_offset ra, -8
; RV64IFD-NEXT: call floor@plt
; RV64IFD-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64IFD-NEXT: addi sp, sp, 16
; RV64IFD-NEXT: ret
; RV32IF-LABEL: test_floor_float:
; RV32IF: # %bb.0:
; RV32IF-NEXT: lui a0, %hi(.LCPI40_0)
; RV32IF-NEXT: flw ft0, %lo(.LCPI40_0)(a0)
; RV32IF-NEXT: fabs.s ft1, fa0
; RV32IF-NEXT: flt.s a0, ft1, ft0
; RV32IF-NEXT: beqz a0, .LBB40_2
; RV32IF-NEXT: # %bb.1:
; RV32IF-NEXT: fcvt.w.s a0, fa0, rdn
; RV32IF-NEXT: fcvt.s.w ft0, a0, rdn
; RV32IF-NEXT: fsgnj.s fa0, ft0, fa0
; RV32IF-NEXT: .LBB40_2:
; RV32IF-NEXT: ret
;
; RV64IF-LABEL: test_floor_float:
; RV64IF: # %bb.0:
; RV64IF-NEXT: lui a0, %hi(.LCPI40_0)
; RV64IF-NEXT: flw ft0, %lo(.LCPI40_0)(a0)
; RV64IF-NEXT: fabs.s ft1, fa0
; RV64IF-NEXT: flt.s a0, ft1, ft0
; RV64IF-NEXT: beqz a0, .LBB40_2
; RV64IF-NEXT: # %bb.1:
; RV64IF-NEXT: fcvt.w.s a0, fa0, rdn
; RV64IF-NEXT: fcvt.s.w ft0, a0, rdn
; RV64IF-NEXT: fsgnj.s fa0, ft0, fa0
; RV64IF-NEXT: .LBB40_2:
; RV64IF-NEXT: ret
%a = call float @llvm.floor.f32(float %x)
ret float %a
}
define float @test_ceil_float(float %x) {
; RV32IFD-LABEL: test_ceil_float:
; RV32IFD: # %bb.0:
; RV32IFD-NEXT: addi sp, sp, -16
; RV32IFD-NEXT: .cfi_def_cfa_offset 16
; RV32IFD-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IFD-NEXT: .cfi_offset ra, -4
; RV32IFD-NEXT: call ceil@plt
; RV32IFD-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IFD-NEXT: addi sp, sp, 16
; RV32IFD-NEXT: ret
;
; RV64IFD-LABEL: test_ceil_float:
; RV64IFD: # %bb.0:
; RV64IFD-NEXT: addi sp, sp, -16
; RV64IFD-NEXT: .cfi_def_cfa_offset 16
; RV64IFD-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64IFD-NEXT: .cfi_offset ra, -8
; RV64IFD-NEXT: call ceil@plt
; RV64IFD-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64IFD-NEXT: addi sp, sp, 16
; RV64IFD-NEXT: ret
; RV32IF-LABEL: test_ceil_float:
; RV32IF: # %bb.0:
; RV32IF-NEXT: lui a0, %hi(.LCPI41_0)
; RV32IF-NEXT: flw ft0, %lo(.LCPI41_0)(a0)
; RV32IF-NEXT: fabs.s ft1, fa0
; RV32IF-NEXT: flt.s a0, ft1, ft0
; RV32IF-NEXT: beqz a0, .LBB41_2
; RV32IF-NEXT: # %bb.1:
; RV32IF-NEXT: fcvt.w.s a0, fa0, rup
; RV32IF-NEXT: fcvt.s.w ft0, a0, rup
; RV32IF-NEXT: fsgnj.s fa0, ft0, fa0
; RV32IF-NEXT: .LBB41_2:
; RV32IF-NEXT: ret
;
; RV64IF-LABEL: test_ceil_float:
; RV64IF: # %bb.0:
; RV64IF-NEXT: lui a0, %hi(.LCPI41_0)
; RV64IF-NEXT: flw ft0, %lo(.LCPI41_0)(a0)
; RV64IF-NEXT: fabs.s ft1, fa0
; RV64IF-NEXT: flt.s a0, ft1, ft0
; RV64IF-NEXT: beqz a0, .LBB41_2
; RV64IF-NEXT: # %bb.1:
; RV64IF-NEXT: fcvt.w.s a0, fa0, rup
; RV64IF-NEXT: fcvt.s.w ft0, a0, rup
; RV64IF-NEXT: fsgnj.s fa0, ft0, fa0
; RV64IF-NEXT: .LBB41_2:
; RV64IF-NEXT: ret
%a = call float @llvm.ceil.f32(float %x)
ret float %a
}
define float @test_trunc_float(float %x) {
; RV32IFD-LABEL: test_trunc_float:
; RV32IFD: # %bb.0:
; RV32IFD-NEXT: addi sp, sp, -16
; RV32IFD-NEXT: .cfi_def_cfa_offset 16
; RV32IFD-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IFD-NEXT: .cfi_offset ra, -4
; RV32IFD-NEXT: call trunc@plt
; RV32IFD-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IFD-NEXT: addi sp, sp, 16
; RV32IFD-NEXT: ret
;
; RV64IFD-LABEL: test_trunc_float:
; RV64IFD: # %bb.0:
; RV64IFD-NEXT: addi sp, sp, -16
; RV64IFD-NEXT: .cfi_def_cfa_offset 16
; RV64IFD-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64IFD-NEXT: .cfi_offset ra, -8
; RV64IFD-NEXT: call trunc@plt
; RV64IFD-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64IFD-NEXT: addi sp, sp, 16
; RV64IFD-NEXT: ret
; RV32IF-LABEL: test_trunc_float:
; RV32IF: # %bb.0:
; RV32IF-NEXT: lui a0, %hi(.LCPI42_0)
; RV32IF-NEXT: flw ft0, %lo(.LCPI42_0)(a0)
; RV32IF-NEXT: fabs.s ft1, fa0
; RV32IF-NEXT: flt.s a0, ft1, ft0
; RV32IF-NEXT: beqz a0, .LBB42_2
; RV32IF-NEXT: # %bb.1:
; RV32IF-NEXT: fcvt.w.s a0, fa0, rtz
; RV32IF-NEXT: fcvt.s.w ft0, a0, rtz
; RV32IF-NEXT: fsgnj.s fa0, ft0, fa0
; RV32IF-NEXT: .LBB42_2:
; RV32IF-NEXT: ret
;
; RV64IF-LABEL: test_trunc_float:
; RV64IF: # %bb.0:
; RV64IF-NEXT: lui a0, %hi(.LCPI42_0)
; RV64IF-NEXT: flw ft0, %lo(.LCPI42_0)(a0)
; RV64IF-NEXT: fabs.s ft1, fa0
; RV64IF-NEXT: flt.s a0, ft1, ft0
; RV64IF-NEXT: beqz a0, .LBB42_2
; RV64IF-NEXT: # %bb.1:
; RV64IF-NEXT: fcvt.w.s a0, fa0, rtz
; RV64IF-NEXT: fcvt.s.w ft0, a0, rtz
; RV64IF-NEXT: fsgnj.s fa0, ft0, fa0
; RV64IF-NEXT: .LBB42_2:
; RV64IF-NEXT: ret
%a = call float @llvm.trunc.f32(float %x)
ret float %a
}
define float @test_round_float(float %x) {
; RV32IFD-LABEL: test_round_float:
; RV32IFD: # %bb.0:
; RV32IFD-NEXT: addi sp, sp, -16
; RV32IFD-NEXT: .cfi_def_cfa_offset 16
; RV32IFD-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IFD-NEXT: .cfi_offset ra, -4
; RV32IFD-NEXT: call round@plt
; RV32IFD-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IFD-NEXT: addi sp, sp, 16
; RV32IFD-NEXT: ret
;
; RV64IFD-LABEL: test_round_float:
; RV64IFD: # %bb.0:
; RV64IFD-NEXT: addi sp, sp, -16
; RV64IFD-NEXT: .cfi_def_cfa_offset 16
; RV64IFD-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64IFD-NEXT: .cfi_offset ra, -8
; RV64IFD-NEXT: call round@plt
; RV64IFD-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64IFD-NEXT: addi sp, sp, 16
; RV64IFD-NEXT: ret
; RV32IF-LABEL: test_round_float:
; RV32IF: # %bb.0:
; RV32IF-NEXT: lui a0, %hi(.LCPI43_0)
; RV32IF-NEXT: flw ft0, %lo(.LCPI43_0)(a0)
; RV32IF-NEXT: fabs.s ft1, fa0
; RV32IF-NEXT: flt.s a0, ft1, ft0
; RV32IF-NEXT: beqz a0, .LBB43_2
; RV32IF-NEXT: # %bb.1:
; RV32IF-NEXT: fcvt.w.s a0, fa0, rmm
; RV32IF-NEXT: fcvt.s.w ft0, a0, rmm
; RV32IF-NEXT: fsgnj.s fa0, ft0, fa0
; RV32IF-NEXT: .LBB43_2:
; RV32IF-NEXT: ret
;
; RV64IF-LABEL: test_round_float:
; RV64IF: # %bb.0:
; RV64IF-NEXT: lui a0, %hi(.LCPI43_0)
; RV64IF-NEXT: flw ft0, %lo(.LCPI43_0)(a0)
; RV64IF-NEXT: fabs.s ft1, fa0
; RV64IF-NEXT: flt.s a0, ft1, ft0
; RV64IF-NEXT: beqz a0, .LBB43_2
; RV64IF-NEXT: # %bb.1:
; RV64IF-NEXT: fcvt.w.s a0, fa0, rmm
; RV64IF-NEXT: fcvt.s.w ft0, a0, rmm
; RV64IF-NEXT: fsgnj.s fa0, ft0, fa0
; RV64IF-NEXT: .LBB43_2:
; RV64IF-NEXT: ret
%a = call float @llvm.round.f32(float %x)
ret float %a
}
define float @test_roundeven_float(float %x) {
; RV32IFD-LABEL: test_roundeven_float:
; RV32IFD: # %bb.0:
; RV32IFD-NEXT: addi sp, sp, -16
; RV32IFD-NEXT: .cfi_def_cfa_offset 16
; RV32IFD-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IFD-NEXT: .cfi_offset ra, -4
; RV32IFD-NEXT: call roundeven@plt
; RV32IFD-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IFD-NEXT: addi sp, sp, 16
; RV32IFD-NEXT: ret
;
; RV64IFD-LABEL: test_roundeven_float:
; RV64IFD: # %bb.0:
; RV64IFD-NEXT: addi sp, sp, -16
; RV64IFD-NEXT: .cfi_def_cfa_offset 16
; RV64IFD-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64IFD-NEXT: .cfi_offset ra, -8
; RV64IFD-NEXT: call roundeven@plt
; RV64IFD-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64IFD-NEXT: addi sp, sp, 16
; RV64IFD-NEXT: ret
; RV32IF-LABEL: test_roundeven_float:
; RV32IF: # %bb.0:
; RV32IF-NEXT: lui a0, %hi(.LCPI44_0)
; RV32IF-NEXT: flw ft0, %lo(.LCPI44_0)(a0)
; RV32IF-NEXT: fabs.s ft1, fa0
; RV32IF-NEXT: flt.s a0, ft1, ft0
; RV32IF-NEXT: beqz a0, .LBB44_2
; RV32IF-NEXT: # %bb.1:
; RV32IF-NEXT: fcvt.w.s a0, fa0, rne
; RV32IF-NEXT: fcvt.s.w ft0, a0, rne
; RV32IF-NEXT: fsgnj.s fa0, ft0, fa0
; RV32IF-NEXT: .LBB44_2:
; RV32IF-NEXT: ret
;
; RV64IF-LABEL: test_roundeven_float:
; RV64IF: # %bb.0:
; RV64IF-NEXT: lui a0, %hi(.LCPI44_0)
; RV64IF-NEXT: flw ft0, %lo(.LCPI44_0)(a0)
; RV64IF-NEXT: fabs.s ft1, fa0
; RV64IF-NEXT: flt.s a0, ft1, ft0
; RV64IF-NEXT: beqz a0, .LBB44_2
; RV64IF-NEXT: # %bb.1:
; RV64IF-NEXT: fcvt.w.s a0, fa0, rne
; RV64IF-NEXT: fcvt.s.w ft0, a0, rne
; RV64IF-NEXT: fsgnj.s fa0, ft0, fa0
; RV64IF-NEXT: .LBB44_2:
; RV64IF-NEXT: ret
%a = call float @llvm.roundeven.f32(float %x)
ret float %a
}
declare float @llvm.floor.f32(float)
declare float @llvm.ceil.f32(float)
declare float @llvm.trunc.f32(float)
declare float @llvm.round.f32(float)
declare float @llvm.roundeven.f32(float)