Files
clang-p2996/llvm/test/CodeGen/AArch64/isinf.ll
Yingwei Zheng 38a44bdc93 [CodeGenPrepare] Reverse the canonicalization of isInf/isNanOrInf (#81572)
In commit
2b582440c1,
we canonicalize the isInf/isNanOrInf idiom into fabs+fcmp for better
analysis/codegen (See also the discussion in
https://github.com/llvm/llvm-project/pull/76338).

This patch reverses the fabs+fcmp to `is.fpclass`. If the `is.fpclass`
is not supported by the target, it will be expanded by TLI.

Fixes the regression introduced by
2b582440c1
and
https://github.com/llvm/llvm-project/pull/80414#issuecomment-1936374206.
2024-03-18 18:27:45 +08:00

75 lines
2.4 KiB
LLVM

; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=aarch64-none-linux-gnu -mattr=+neon,+fullfp16 < %s -o -| FileCheck %s
declare half @llvm.fabs.f16(half)
declare float @llvm.fabs.f32(float)
declare double @llvm.fabs.f64(double)
declare fp128 @llvm.fabs.f128(fp128)
; Check if INFINITY for _Float16 is materialized
define i32 @replace_isinf_call_f16(half %x) {
; CHECK-LABEL: replace_isinf_call_f16:
; CHECK: // %bb.0:
; CHECK-NEXT: mov w8, #31744 // =0x7c00
; CHECK-NEXT: fabs h0, h0
; CHECK-NEXT: fmov h1, w8
; CHECK-NEXT: fcmp h0, h1
; CHECK-NEXT: cset w0, eq
; CHECK-NEXT: ret
%abs = tail call half @llvm.fabs.f16(half %x)
%cmpinf = fcmp oeq half %abs, 0xH7C00
%ret = zext i1 %cmpinf to i32
ret i32 %ret
}
; Check if INFINITY for float is materialized
define i32 @replace_isinf_call_f32(float %x) {
; CHECK-LABEL: replace_isinf_call_f32:
; CHECK: // %bb.0:
; CHECK-NEXT: fabs s0, s0
; CHECK-NEXT: mov w8, #2139095040 // =0x7f800000
; CHECK-NEXT: fmov s1, w8
; CHECK-NEXT: fcmp s0, s1
; CHECK-NEXT: cset w0, eq
; CHECK-NEXT: ret
%abs = tail call float @llvm.fabs.f32(float %x)
%cmpinf = fcmp oeq float %abs, 0x7FF0000000000000
%ret = zext i1 %cmpinf to i32
ret i32 %ret
}
; Check if INFINITY for double is materialized
define i32 @replace_isinf_call_f64(double %x) {
; CHECK-LABEL: replace_isinf_call_f64:
; CHECK: // %bb.0:
; CHECK-NEXT: fabs d0, d0
; CHECK-NEXT: mov x8, #9218868437227405312 // =0x7ff0000000000000
; CHECK-NEXT: fmov d1, x8
; CHECK-NEXT: fcmp d0, d1
; CHECK-NEXT: cset w0, eq
; CHECK-NEXT: ret
%abs = tail call double @llvm.fabs.f64(double %x)
%cmpinf = fcmp oeq double %abs, 0x7FF0000000000000
%ret = zext i1 %cmpinf to i32
ret i32 %ret
}
; For long double it still requires loading the constant.
define i32 @replace_isinf_call_f128(fp128 %x) {
; CHECK-LABEL: replace_isinf_call_f128:
; CHECK: // %bb.0:
; CHECK-NEXT: str q0, [sp, #-16]!
; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: ldp x9, x8, [sp], #16
; CHECK-NEXT: and x8, x8, #0x7fffffffffffffff
; CHECK-NEXT: eor x8, x8, #0x7fff000000000000
; CHECK-NEXT: orr x8, x9, x8
; CHECK-NEXT: cmp x8, #0
; CHECK-NEXT: cset w0, eq
; CHECK-NEXT: ret
%abs = tail call fp128 @llvm.fabs.f128(fp128 %x)
%cmpinf = fcmp oeq fp128 %abs, 0xL00000000000000007FFF000000000000
%ret = zext i1 %cmpinf to i32
ret i32 %ret
}