Files
clang-p2996/llvm/test/CodeGen/AMDGPU/fsqrt.f64.ll
Lucas Ramirez 6206f5444f [AMDGPU] Occupancy w.r.t. workgroup size range is also a range (#123748)
Occupancy (i.e., the number of waves per EU) depends, in addition to
register usage, on per-workgroup LDS usage as well as on the range of
possible workgroup sizes. Mirroring the latter, occupancy should
therefore be expressed as a range since different group sizes generally
yield different achievable occupancies.

`getOccupancyWithLocalMemSize` currently returns a scalar occupancy
based on the maximum workgroup size and LDS usage. With respect to the
workgroup size range, this scalar can be the minimum, the maximum, or
neither of the two of the range of achievable occupancies. This commit
fixes the function by making it compute and return the range of
achievable occupancies w.r.t. workgroup size and LDS usage; it also
renames it to `getOccupancyWithWorkGroupSizes` since it is the range of
workgroup sizes that produces the range of achievable occupancies.

Computing the achievable occupancy range is surprisingly involved.
Minimum/maximum workgroup sizes do not necessarily yield maximum/minimum
occupancies i.e., sometimes workgroup sizes inside the range yield the
occupancy bounds. The implementation finds these sizes in constant time;
heavy documentation explains the rationale behind the sometimes
relatively obscure calculations.

As a justifying example, consider a target with 10 waves / EU, 4 EUs/CU,
64-wide waves. Also consider a function with no LDS usage and a flat
workgroup size range of [513,1024].

- A group of 513 items requires 9 waves per group. Only 4 groups made up
of 9 waves each can fit fully on a CU at any given time, for a total of
36 waves on the CU, or 9 per EU. However, filling as much as possible
the remaining 40-36=4 wave slots without decreasing the number of groups
reveals that a larger group of 640 items yields 40 waves on the CU, or
10 per EU.
- Similarly, a group of 1024 items requires 16 waves per group. Only 2
groups made up of 16 waves each can fit fully on a CU ay any given time,
for a total of 32 waves on the CU, or 8 per EU. However, removing as
many waves as possible from the groups without being able to fit another
equal-sized group on the CU reveals that a smaller group of 896 items
yields 28 waves on the CU, or 7 per EU.

Therefore the achievable occupancy range for this function is not [8,9]
as the group size bounds directly yield, but [7,10].

Naturally this change causes a lot of test churn as instruction
scheduling is driven by achievable occupancy estimates. In most unit
tests the flat workgroup size range is the default [1,1024] which,
ignoring potential LDS limitations, would previously produce a scalar
occupancy of 8 (derived from 1024) on a lot of targets, whereas we now
consider the maximum occupancy to be 10 in such cases. Most tests are
updated automatically and checked manually for sanity. I also manually
changed some non-automatically generated assertions when necessary.

Fixes #118220.
2025-01-23 16:07:57 +01:00

1346 lines
62 KiB
LLVM

; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -global-isel=0 -mtriple=amdgcn -mcpu=pitcairn -verify-machineinstrs < %s | FileCheck -check-prefixes=SDAG %s
; RUN: llc -global-isel=0 -mtriple=amdgcn -mcpu=fiji -verify-machineinstrs < %s | FileCheck -check-prefixes=SDAG %s
; RUN: llc -global-isel=1 -mtriple=amdgcn -mcpu=pitcairn -verify-machineinstrs < %s | FileCheck -check-prefixes=GISEL %s
; RUN: llc -global-isel=1 -mtriple=amdgcn -mcpu=fiji -verify-machineinstrs < %s | FileCheck -check-prefixes=GISEL %s
define double @v_sqrt_f64(double %x) {
; GISEL-LABEL: v_sqrt_f64:
; GISEL: ; %bb.0:
; GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GISEL-NEXT: v_mov_b32_e32 v2, 0
; GISEL-NEXT: v_bfrev_b32_e32 v3, 8
; GISEL-NEXT: v_cmp_lt_f64_e32 vcc, v[0:1], v[2:3]
; GISEL-NEXT: v_cndmask_b32_e64 v2, 0, 1, vcc
; GISEL-NEXT: v_lshlrev_b32_e32 v2, 8, v2
; GISEL-NEXT: v_ldexp_f64 v[0:1], v[0:1], v2
; GISEL-NEXT: v_rsq_f64_e32 v[2:3], v[0:1]
; GISEL-NEXT: v_mul_f64 v[4:5], v[2:3], 0.5
; GISEL-NEXT: v_mul_f64 v[2:3], v[0:1], v[2:3]
; GISEL-NEXT: v_fma_f64 v[6:7], -v[4:5], v[2:3], 0.5
; GISEL-NEXT: v_fma_f64 v[2:3], v[2:3], v[6:7], v[2:3]
; GISEL-NEXT: v_fma_f64 v[4:5], v[4:5], v[6:7], v[4:5]
; GISEL-NEXT: v_fma_f64 v[6:7], -v[2:3], v[2:3], v[0:1]
; GISEL-NEXT: v_fma_f64 v[2:3], v[6:7], v[4:5], v[2:3]
; GISEL-NEXT: v_fma_f64 v[6:7], -v[2:3], v[2:3], v[0:1]
; GISEL-NEXT: v_fma_f64 v[2:3], v[6:7], v[4:5], v[2:3]
; GISEL-NEXT: v_mov_b32_e32 v4, 0xffffff80
; GISEL-NEXT: v_mov_b32_e32 v5, 0x260
; GISEL-NEXT: v_cndmask_b32_e32 v4, 0, v4, vcc
; GISEL-NEXT: v_cmp_class_f64_e32 vcc, v[0:1], v5
; GISEL-NEXT: v_ldexp_f64 v[2:3], v[2:3], v4
; GISEL-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc
; GISEL-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc
; GISEL-NEXT: s_setpc_b64 s[30:31]
%result = call double @llvm.sqrt.f64(double %x)
ret double %result
}
define double @v_sqrt_f64_fneg(double %x) {
; GISEL-LABEL: v_sqrt_f64_fneg:
; GISEL: ; %bb.0:
; GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GISEL-NEXT: v_mov_b32_e32 v2, 0
; GISEL-NEXT: v_bfrev_b32_e32 v3, 8
; GISEL-NEXT: v_cmp_lt_f64_e64 vcc, -v[0:1], v[2:3]
; GISEL-NEXT: v_cndmask_b32_e64 v2, 0, 1, vcc
; GISEL-NEXT: v_lshlrev_b32_e32 v2, 8, v2
; GISEL-NEXT: v_ldexp_f64 v[0:1], -v[0:1], v2
; GISEL-NEXT: v_rsq_f64_e32 v[2:3], v[0:1]
; GISEL-NEXT: v_mul_f64 v[4:5], v[2:3], 0.5
; GISEL-NEXT: v_mul_f64 v[2:3], v[0:1], v[2:3]
; GISEL-NEXT: v_fma_f64 v[6:7], -v[4:5], v[2:3], 0.5
; GISEL-NEXT: v_fma_f64 v[2:3], v[2:3], v[6:7], v[2:3]
; GISEL-NEXT: v_fma_f64 v[4:5], v[4:5], v[6:7], v[4:5]
; GISEL-NEXT: v_fma_f64 v[6:7], -v[2:3], v[2:3], v[0:1]
; GISEL-NEXT: v_fma_f64 v[2:3], v[6:7], v[4:5], v[2:3]
; GISEL-NEXT: v_fma_f64 v[6:7], -v[2:3], v[2:3], v[0:1]
; GISEL-NEXT: v_fma_f64 v[2:3], v[6:7], v[4:5], v[2:3]
; GISEL-NEXT: v_mov_b32_e32 v4, 0xffffff80
; GISEL-NEXT: v_mov_b32_e32 v5, 0x260
; GISEL-NEXT: v_cndmask_b32_e32 v4, 0, v4, vcc
; GISEL-NEXT: v_cmp_class_f64_e32 vcc, v[0:1], v5
; GISEL-NEXT: v_ldexp_f64 v[2:3], v[2:3], v4
; GISEL-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc
; GISEL-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc
; GISEL-NEXT: s_setpc_b64 s[30:31]
%x.neg = fneg double %x
%result = call double @llvm.sqrt.f64(double %x.neg)
ret double %result
}
define double @v_sqrt_f64_fabs(double %x) {
; GISEL-LABEL: v_sqrt_f64_fabs:
; GISEL: ; %bb.0:
; GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GISEL-NEXT: v_mov_b32_e32 v2, 0
; GISEL-NEXT: v_bfrev_b32_e32 v3, 8
; GISEL-NEXT: v_cmp_lt_f64_e64 vcc, |v[0:1]|, v[2:3]
; GISEL-NEXT: v_cndmask_b32_e64 v2, 0, 1, vcc
; GISEL-NEXT: v_lshlrev_b32_e32 v2, 8, v2
; GISEL-NEXT: v_ldexp_f64 v[0:1], |v[0:1]|, v2
; GISEL-NEXT: v_rsq_f64_e32 v[2:3], v[0:1]
; GISEL-NEXT: v_mul_f64 v[4:5], v[2:3], 0.5
; GISEL-NEXT: v_mul_f64 v[2:3], v[0:1], v[2:3]
; GISEL-NEXT: v_fma_f64 v[6:7], -v[4:5], v[2:3], 0.5
; GISEL-NEXT: v_fma_f64 v[2:3], v[2:3], v[6:7], v[2:3]
; GISEL-NEXT: v_fma_f64 v[4:5], v[4:5], v[6:7], v[4:5]
; GISEL-NEXT: v_fma_f64 v[6:7], -v[2:3], v[2:3], v[0:1]
; GISEL-NEXT: v_fma_f64 v[2:3], v[6:7], v[4:5], v[2:3]
; GISEL-NEXT: v_fma_f64 v[6:7], -v[2:3], v[2:3], v[0:1]
; GISEL-NEXT: v_fma_f64 v[2:3], v[6:7], v[4:5], v[2:3]
; GISEL-NEXT: v_mov_b32_e32 v4, 0xffffff80
; GISEL-NEXT: v_mov_b32_e32 v5, 0x260
; GISEL-NEXT: v_cndmask_b32_e32 v4, 0, v4, vcc
; GISEL-NEXT: v_cmp_class_f64_e32 vcc, v[0:1], v5
; GISEL-NEXT: v_ldexp_f64 v[2:3], v[2:3], v4
; GISEL-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc
; GISEL-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc
; GISEL-NEXT: s_setpc_b64 s[30:31]
%x.fabs = call double @llvm.fabs.f64(double %x)
%result = call double @llvm.sqrt.f64(double %x.fabs)
ret double %result
}
define double @v_sqrt_f64_fneg_fabs(double %x) {
; GISEL-LABEL: v_sqrt_f64_fneg_fabs:
; GISEL: ; %bb.0:
; GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GISEL-NEXT: v_mov_b32_e32 v2, 0
; GISEL-NEXT: v_bfrev_b32_e32 v3, 8
; GISEL-NEXT: v_cmp_lt_f64_e64 vcc, -|v[0:1]|, v[2:3]
; GISEL-NEXT: v_cndmask_b32_e64 v2, 0, 1, vcc
; GISEL-NEXT: v_lshlrev_b32_e32 v2, 8, v2
; GISEL-NEXT: v_ldexp_f64 v[0:1], -|v[0:1]|, v2
; GISEL-NEXT: v_rsq_f64_e32 v[2:3], v[0:1]
; GISEL-NEXT: v_mul_f64 v[4:5], v[2:3], 0.5
; GISEL-NEXT: v_mul_f64 v[2:3], v[0:1], v[2:3]
; GISEL-NEXT: v_fma_f64 v[6:7], -v[4:5], v[2:3], 0.5
; GISEL-NEXT: v_fma_f64 v[2:3], v[2:3], v[6:7], v[2:3]
; GISEL-NEXT: v_fma_f64 v[4:5], v[4:5], v[6:7], v[4:5]
; GISEL-NEXT: v_fma_f64 v[6:7], -v[2:3], v[2:3], v[0:1]
; GISEL-NEXT: v_fma_f64 v[2:3], v[6:7], v[4:5], v[2:3]
; GISEL-NEXT: v_fma_f64 v[6:7], -v[2:3], v[2:3], v[0:1]
; GISEL-NEXT: v_fma_f64 v[2:3], v[6:7], v[4:5], v[2:3]
; GISEL-NEXT: v_mov_b32_e32 v4, 0xffffff80
; GISEL-NEXT: v_mov_b32_e32 v5, 0x260
; GISEL-NEXT: v_cndmask_b32_e32 v4, 0, v4, vcc
; GISEL-NEXT: v_cmp_class_f64_e32 vcc, v[0:1], v5
; GISEL-NEXT: v_ldexp_f64 v[2:3], v[2:3], v4
; GISEL-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc
; GISEL-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc
; GISEL-NEXT: s_setpc_b64 s[30:31]
%x.fabs = call double @llvm.fabs.f64(double %x)
%x.fabs.neg = fneg double %x.fabs
%result = call double @llvm.sqrt.f64(double %x.fabs.neg)
ret double %result
}
define double @v_sqrt_f64_ninf(double %x) {
; GISEL-LABEL: v_sqrt_f64_ninf:
; GISEL: ; %bb.0:
; GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GISEL-NEXT: v_mov_b32_e32 v2, 0
; GISEL-NEXT: v_bfrev_b32_e32 v3, 8
; GISEL-NEXT: v_cmp_lt_f64_e32 vcc, v[0:1], v[2:3]
; GISEL-NEXT: v_cndmask_b32_e64 v2, 0, 1, vcc
; GISEL-NEXT: v_lshlrev_b32_e32 v2, 8, v2
; GISEL-NEXT: v_ldexp_f64 v[0:1], v[0:1], v2
; GISEL-NEXT: v_rsq_f64_e32 v[2:3], v[0:1]
; GISEL-NEXT: v_mul_f64 v[4:5], v[2:3], 0.5
; GISEL-NEXT: v_mul_f64 v[2:3], v[0:1], v[2:3]
; GISEL-NEXT: v_fma_f64 v[6:7], -v[4:5], v[2:3], 0.5
; GISEL-NEXT: v_fma_f64 v[2:3], v[2:3], v[6:7], v[2:3]
; GISEL-NEXT: v_fma_f64 v[4:5], v[4:5], v[6:7], v[4:5]
; GISEL-NEXT: v_fma_f64 v[6:7], -v[2:3], v[2:3], v[0:1]
; GISEL-NEXT: v_fma_f64 v[2:3], v[6:7], v[4:5], v[2:3]
; GISEL-NEXT: v_fma_f64 v[6:7], -v[2:3], v[2:3], v[0:1]
; GISEL-NEXT: v_fma_f64 v[2:3], v[6:7], v[4:5], v[2:3]
; GISEL-NEXT: v_mov_b32_e32 v4, 0xffffff80
; GISEL-NEXT: v_mov_b32_e32 v5, 0x260
; GISEL-NEXT: v_cndmask_b32_e32 v4, 0, v4, vcc
; GISEL-NEXT: v_cmp_class_f64_e32 vcc, v[0:1], v5
; GISEL-NEXT: v_ldexp_f64 v[2:3], v[2:3], v4
; GISEL-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc
; GISEL-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc
; GISEL-NEXT: s_setpc_b64 s[30:31]
%result = call ninf double @llvm.sqrt.f64(double %x)
ret double %result
}
define double @v_sqrt_f64_no_infs_attribute(double %x) "no-infs-fp-math"="true" {
; GISEL-LABEL: v_sqrt_f64_no_infs_attribute:
; GISEL: ; %bb.0:
; GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GISEL-NEXT: v_mov_b32_e32 v2, 0
; GISEL-NEXT: v_bfrev_b32_e32 v3, 8
; GISEL-NEXT: v_cmp_lt_f64_e32 vcc, v[0:1], v[2:3]
; GISEL-NEXT: v_cndmask_b32_e64 v2, 0, 1, vcc
; GISEL-NEXT: v_lshlrev_b32_e32 v2, 8, v2
; GISEL-NEXT: v_ldexp_f64 v[0:1], v[0:1], v2
; GISEL-NEXT: v_rsq_f64_e32 v[2:3], v[0:1]
; GISEL-NEXT: v_mul_f64 v[4:5], v[2:3], 0.5
; GISEL-NEXT: v_mul_f64 v[2:3], v[0:1], v[2:3]
; GISEL-NEXT: v_fma_f64 v[6:7], -v[4:5], v[2:3], 0.5
; GISEL-NEXT: v_fma_f64 v[2:3], v[2:3], v[6:7], v[2:3]
; GISEL-NEXT: v_fma_f64 v[4:5], v[4:5], v[6:7], v[4:5]
; GISEL-NEXT: v_fma_f64 v[6:7], -v[2:3], v[2:3], v[0:1]
; GISEL-NEXT: v_fma_f64 v[2:3], v[6:7], v[4:5], v[2:3]
; GISEL-NEXT: v_fma_f64 v[6:7], -v[2:3], v[2:3], v[0:1]
; GISEL-NEXT: v_fma_f64 v[2:3], v[6:7], v[4:5], v[2:3]
; GISEL-NEXT: v_mov_b32_e32 v4, 0xffffff80
; GISEL-NEXT: v_mov_b32_e32 v5, 0x260
; GISEL-NEXT: v_cndmask_b32_e32 v4, 0, v4, vcc
; GISEL-NEXT: v_cmp_class_f64_e32 vcc, v[0:1], v5
; GISEL-NEXT: v_ldexp_f64 v[2:3], v[2:3], v4
; GISEL-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc
; GISEL-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc
; GISEL-NEXT: s_setpc_b64 s[30:31]
%result = call ninf double @llvm.sqrt.f64(double %x)
ret double %result
}
define double @v_sqrt_f64_nnan(double %x) {
; GISEL-LABEL: v_sqrt_f64_nnan:
; GISEL: ; %bb.0:
; GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GISEL-NEXT: v_mov_b32_e32 v2, 0
; GISEL-NEXT: v_bfrev_b32_e32 v3, 8
; GISEL-NEXT: v_cmp_lt_f64_e32 vcc, v[0:1], v[2:3]
; GISEL-NEXT: v_cndmask_b32_e64 v2, 0, 1, vcc
; GISEL-NEXT: v_lshlrev_b32_e32 v2, 8, v2
; GISEL-NEXT: v_ldexp_f64 v[0:1], v[0:1], v2
; GISEL-NEXT: v_rsq_f64_e32 v[2:3], v[0:1]
; GISEL-NEXT: v_mul_f64 v[4:5], v[2:3], 0.5
; GISEL-NEXT: v_mul_f64 v[2:3], v[0:1], v[2:3]
; GISEL-NEXT: v_fma_f64 v[6:7], -v[4:5], v[2:3], 0.5
; GISEL-NEXT: v_fma_f64 v[2:3], v[2:3], v[6:7], v[2:3]
; GISEL-NEXT: v_fma_f64 v[4:5], v[4:5], v[6:7], v[4:5]
; GISEL-NEXT: v_fma_f64 v[6:7], -v[2:3], v[2:3], v[0:1]
; GISEL-NEXT: v_fma_f64 v[2:3], v[6:7], v[4:5], v[2:3]
; GISEL-NEXT: v_fma_f64 v[6:7], -v[2:3], v[2:3], v[0:1]
; GISEL-NEXT: v_fma_f64 v[2:3], v[6:7], v[4:5], v[2:3]
; GISEL-NEXT: v_mov_b32_e32 v4, 0xffffff80
; GISEL-NEXT: v_mov_b32_e32 v5, 0x260
; GISEL-NEXT: v_cndmask_b32_e32 v4, 0, v4, vcc
; GISEL-NEXT: v_cmp_class_f64_e32 vcc, v[0:1], v5
; GISEL-NEXT: v_ldexp_f64 v[2:3], v[2:3], v4
; GISEL-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc
; GISEL-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc
; GISEL-NEXT: s_setpc_b64 s[30:31]
%result = call nnan double @llvm.sqrt.f64(double %x)
ret double %result
}
define amdgpu_ps <2 x i32> @s_sqrt_f64(double inreg %x) {
; SDAG-LABEL: s_sqrt_f64:
; SDAG: ; %bb.0:
; SDAG-NEXT: v_mov_b32_e32 v0, 0
; SDAG-NEXT: v_bfrev_b32_e32 v1, 8
; SDAG-NEXT: v_cmp_lt_f64_e32 vcc, s[0:1], v[0:1]
; SDAG-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc
; SDAG-NEXT: v_lshlrev_b32_e32 v0, 8, v0
; SDAG-NEXT: v_ldexp_f64 v[0:1], s[0:1], v0
; SDAG-NEXT: s_and_b64 s[0:1], vcc, exec
; SDAG-NEXT: s_cselect_b32 s0, 0xffffff80, 0
; SDAG-NEXT: v_rsq_f64_e32 v[2:3], v[0:1]
; SDAG-NEXT: v_mul_f64 v[4:5], v[0:1], v[2:3]
; SDAG-NEXT: v_mul_f64 v[2:3], v[2:3], 0.5
; SDAG-NEXT: v_fma_f64 v[6:7], -v[2:3], v[4:5], 0.5
; SDAG-NEXT: v_fma_f64 v[4:5], v[4:5], v[6:7], v[4:5]
; SDAG-NEXT: v_fma_f64 v[2:3], v[2:3], v[6:7], v[2:3]
; SDAG-NEXT: v_fma_f64 v[6:7], -v[4:5], v[4:5], v[0:1]
; SDAG-NEXT: v_fma_f64 v[4:5], v[6:7], v[2:3], v[4:5]
; SDAG-NEXT: v_fma_f64 v[6:7], -v[4:5], v[4:5], v[0:1]
; SDAG-NEXT: v_fma_f64 v[2:3], v[6:7], v[2:3], v[4:5]
; SDAG-NEXT: v_mov_b32_e32 v4, 0x260
; SDAG-NEXT: v_cmp_class_f64_e32 vcc, v[0:1], v4
; SDAG-NEXT: v_ldexp_f64 v[2:3], v[2:3], s0
; SDAG-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc
; SDAG-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc
; SDAG-NEXT: v_readfirstlane_b32 s0, v0
; SDAG-NEXT: v_readfirstlane_b32 s1, v1
; SDAG-NEXT: ; return to shader part epilog
;
; GISEL-LABEL: s_sqrt_f64:
; GISEL: ; %bb.0:
; GISEL-NEXT: v_mov_b32_e32 v0, 0
; GISEL-NEXT: v_bfrev_b32_e32 v1, 8
; GISEL-NEXT: v_cmp_lt_f64_e32 vcc, s[0:1], v[0:1]
; GISEL-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc
; GISEL-NEXT: v_lshlrev_b32_e32 v0, 8, v0
; GISEL-NEXT: v_ldexp_f64 v[0:1], s[0:1], v0
; GISEL-NEXT: v_rsq_f64_e32 v[2:3], v[0:1]
; GISEL-NEXT: v_mul_f64 v[4:5], v[2:3], 0.5
; GISEL-NEXT: v_mul_f64 v[2:3], v[0:1], v[2:3]
; GISEL-NEXT: v_fma_f64 v[6:7], -v[4:5], v[2:3], 0.5
; GISEL-NEXT: v_fma_f64 v[2:3], v[2:3], v[6:7], v[2:3]
; GISEL-NEXT: v_fma_f64 v[4:5], v[4:5], v[6:7], v[4:5]
; GISEL-NEXT: v_fma_f64 v[6:7], -v[2:3], v[2:3], v[0:1]
; GISEL-NEXT: v_fma_f64 v[2:3], v[6:7], v[4:5], v[2:3]
; GISEL-NEXT: v_fma_f64 v[6:7], -v[2:3], v[2:3], v[0:1]
; GISEL-NEXT: v_fma_f64 v[2:3], v[6:7], v[4:5], v[2:3]
; GISEL-NEXT: v_mov_b32_e32 v4, 0xffffff80
; GISEL-NEXT: v_mov_b32_e32 v5, 0x260
; GISEL-NEXT: v_cndmask_b32_e32 v4, 0, v4, vcc
; GISEL-NEXT: v_cmp_class_f64_e32 vcc, v[0:1], v5
; GISEL-NEXT: v_ldexp_f64 v[2:3], v[2:3], v4
; GISEL-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc
; GISEL-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc
; GISEL-NEXT: v_readfirstlane_b32 s0, v0
; GISEL-NEXT: v_readfirstlane_b32 s1, v1
; GISEL-NEXT: ; return to shader part epilog
%result = call double @llvm.sqrt.f64(double %x)
%cast = bitcast double %result to <2 x i32>
%cast.0 = extractelement <2 x i32> %cast, i32 0
%cast.1 = extractelement <2 x i32> %cast, i32 1
%lane.0 = call i32 @llvm.amdgcn.readfirstlane(i32 %cast.0)
%lane.1 = call i32 @llvm.amdgcn.readfirstlane(i32 %cast.1)
%insert.0 = insertelement <2 x i32> poison, i32 %lane.0, i32 0
%insert.1 = insertelement <2 x i32> %insert.0, i32 %lane.1, i32 1
ret <2 x i32> %insert.1
}
define amdgpu_ps <2 x i32> @s_sqrt_f64_ninf(double inreg %x) {
; SDAG-LABEL: s_sqrt_f64_ninf:
; SDAG: ; %bb.0:
; SDAG-NEXT: v_mov_b32_e32 v0, 0
; SDAG-NEXT: v_bfrev_b32_e32 v1, 8
; SDAG-NEXT: v_cmp_lt_f64_e32 vcc, s[0:1], v[0:1]
; SDAG-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc
; SDAG-NEXT: v_lshlrev_b32_e32 v0, 8, v0
; SDAG-NEXT: v_ldexp_f64 v[0:1], s[0:1], v0
; SDAG-NEXT: s_and_b64 s[0:1], vcc, exec
; SDAG-NEXT: s_cselect_b32 s0, 0xffffff80, 0
; SDAG-NEXT: v_rsq_f64_e32 v[2:3], v[0:1]
; SDAG-NEXT: v_mul_f64 v[4:5], v[0:1], v[2:3]
; SDAG-NEXT: v_mul_f64 v[2:3], v[2:3], 0.5
; SDAG-NEXT: v_fma_f64 v[6:7], -v[2:3], v[4:5], 0.5
; SDAG-NEXT: v_fma_f64 v[4:5], v[4:5], v[6:7], v[4:5]
; SDAG-NEXT: v_fma_f64 v[2:3], v[2:3], v[6:7], v[2:3]
; SDAG-NEXT: v_fma_f64 v[6:7], -v[4:5], v[4:5], v[0:1]
; SDAG-NEXT: v_fma_f64 v[4:5], v[6:7], v[2:3], v[4:5]
; SDAG-NEXT: v_fma_f64 v[6:7], -v[4:5], v[4:5], v[0:1]
; SDAG-NEXT: v_fma_f64 v[2:3], v[6:7], v[2:3], v[4:5]
; SDAG-NEXT: v_mov_b32_e32 v4, 0x260
; SDAG-NEXT: v_cmp_class_f64_e32 vcc, v[0:1], v4
; SDAG-NEXT: v_ldexp_f64 v[2:3], v[2:3], s0
; SDAG-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc
; SDAG-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc
; SDAG-NEXT: v_readfirstlane_b32 s0, v0
; SDAG-NEXT: v_readfirstlane_b32 s1, v1
; SDAG-NEXT: ; return to shader part epilog
;
; GISEL-LABEL: s_sqrt_f64_ninf:
; GISEL: ; %bb.0:
; GISEL-NEXT: v_mov_b32_e32 v0, 0
; GISEL-NEXT: v_bfrev_b32_e32 v1, 8
; GISEL-NEXT: v_cmp_lt_f64_e32 vcc, s[0:1], v[0:1]
; GISEL-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc
; GISEL-NEXT: v_lshlrev_b32_e32 v0, 8, v0
; GISEL-NEXT: v_ldexp_f64 v[0:1], s[0:1], v0
; GISEL-NEXT: v_rsq_f64_e32 v[2:3], v[0:1]
; GISEL-NEXT: v_mul_f64 v[4:5], v[2:3], 0.5
; GISEL-NEXT: v_mul_f64 v[2:3], v[0:1], v[2:3]
; GISEL-NEXT: v_fma_f64 v[6:7], -v[4:5], v[2:3], 0.5
; GISEL-NEXT: v_fma_f64 v[2:3], v[2:3], v[6:7], v[2:3]
; GISEL-NEXT: v_fma_f64 v[4:5], v[4:5], v[6:7], v[4:5]
; GISEL-NEXT: v_fma_f64 v[6:7], -v[2:3], v[2:3], v[0:1]
; GISEL-NEXT: v_fma_f64 v[2:3], v[6:7], v[4:5], v[2:3]
; GISEL-NEXT: v_fma_f64 v[6:7], -v[2:3], v[2:3], v[0:1]
; GISEL-NEXT: v_fma_f64 v[2:3], v[6:7], v[4:5], v[2:3]
; GISEL-NEXT: v_mov_b32_e32 v4, 0xffffff80
; GISEL-NEXT: v_mov_b32_e32 v5, 0x260
; GISEL-NEXT: v_cndmask_b32_e32 v4, 0, v4, vcc
; GISEL-NEXT: v_cmp_class_f64_e32 vcc, v[0:1], v5
; GISEL-NEXT: v_ldexp_f64 v[2:3], v[2:3], v4
; GISEL-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc
; GISEL-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc
; GISEL-NEXT: v_readfirstlane_b32 s0, v0
; GISEL-NEXT: v_readfirstlane_b32 s1, v1
; GISEL-NEXT: ; return to shader part epilog
%result = call ninf double @llvm.sqrt.f64(double %x)
%cast = bitcast double %result to <2 x i32>
%cast.0 = extractelement <2 x i32> %cast, i32 0
%cast.1 = extractelement <2 x i32> %cast, i32 1
%lane.0 = call i32 @llvm.amdgcn.readfirstlane(i32 %cast.0)
%lane.1 = call i32 @llvm.amdgcn.readfirstlane(i32 %cast.1)
%insert.0 = insertelement <2 x i32> poison, i32 %lane.0, i32 0
%insert.1 = insertelement <2 x i32> %insert.0, i32 %lane.1, i32 1
ret <2 x i32> %insert.1
}
define amdgpu_ps <2 x i32> @s_sqrt_f64_afn(double inreg %x) {
; SDAG-LABEL: s_sqrt_f64_afn:
; SDAG: ; %bb.0:
; SDAG-NEXT: v_mov_b32_e32 v0, 0
; SDAG-NEXT: v_bfrev_b32_e32 v1, 8
; SDAG-NEXT: v_cmp_lt_f64_e32 vcc, s[0:1], v[0:1]
; SDAG-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc
; SDAG-NEXT: v_lshlrev_b32_e32 v0, 8, v0
; SDAG-NEXT: v_ldexp_f64 v[0:1], s[0:1], v0
; SDAG-NEXT: s_and_b64 s[0:1], vcc, exec
; SDAG-NEXT: s_cselect_b32 s0, 0xffffff80, 0
; SDAG-NEXT: v_rsq_f64_e32 v[2:3], v[0:1]
; SDAG-NEXT: v_mul_f64 v[4:5], v[0:1], v[2:3]
; SDAG-NEXT: v_mul_f64 v[2:3], v[2:3], 0.5
; SDAG-NEXT: v_fma_f64 v[6:7], -v[2:3], v[4:5], 0.5
; SDAG-NEXT: v_fma_f64 v[4:5], v[4:5], v[6:7], v[4:5]
; SDAG-NEXT: v_fma_f64 v[2:3], v[2:3], v[6:7], v[2:3]
; SDAG-NEXT: v_fma_f64 v[6:7], -v[4:5], v[4:5], v[0:1]
; SDAG-NEXT: v_fma_f64 v[4:5], v[6:7], v[2:3], v[4:5]
; SDAG-NEXT: v_fma_f64 v[6:7], -v[4:5], v[4:5], v[0:1]
; SDAG-NEXT: v_fma_f64 v[2:3], v[6:7], v[2:3], v[4:5]
; SDAG-NEXT: v_mov_b32_e32 v4, 0x260
; SDAG-NEXT: v_cmp_class_f64_e32 vcc, v[0:1], v4
; SDAG-NEXT: v_ldexp_f64 v[2:3], v[2:3], s0
; SDAG-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc
; SDAG-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc
; SDAG-NEXT: v_readfirstlane_b32 s0, v0
; SDAG-NEXT: v_readfirstlane_b32 s1, v1
; SDAG-NEXT: ; return to shader part epilog
;
; GISEL-LABEL: s_sqrt_f64_afn:
; GISEL: ; %bb.0:
; GISEL-NEXT: v_mov_b32_e32 v0, 0
; GISEL-NEXT: v_bfrev_b32_e32 v1, 8
; GISEL-NEXT: v_cmp_lt_f64_e32 vcc, s[0:1], v[0:1]
; GISEL-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc
; GISEL-NEXT: v_lshlrev_b32_e32 v0, 8, v0
; GISEL-NEXT: v_ldexp_f64 v[0:1], s[0:1], v0
; GISEL-NEXT: v_rsq_f64_e32 v[2:3], v[0:1]
; GISEL-NEXT: v_mul_f64 v[4:5], v[2:3], 0.5
; GISEL-NEXT: v_mul_f64 v[2:3], v[0:1], v[2:3]
; GISEL-NEXT: v_fma_f64 v[6:7], -v[4:5], v[2:3], 0.5
; GISEL-NEXT: v_fma_f64 v[2:3], v[2:3], v[6:7], v[2:3]
; GISEL-NEXT: v_fma_f64 v[4:5], v[4:5], v[6:7], v[4:5]
; GISEL-NEXT: v_fma_f64 v[6:7], -v[2:3], v[2:3], v[0:1]
; GISEL-NEXT: v_fma_f64 v[2:3], v[6:7], v[4:5], v[2:3]
; GISEL-NEXT: v_fma_f64 v[6:7], -v[2:3], v[2:3], v[0:1]
; GISEL-NEXT: v_fma_f64 v[2:3], v[6:7], v[4:5], v[2:3]
; GISEL-NEXT: v_mov_b32_e32 v4, 0xffffff80
; GISEL-NEXT: v_mov_b32_e32 v5, 0x260
; GISEL-NEXT: v_cndmask_b32_e32 v4, 0, v4, vcc
; GISEL-NEXT: v_cmp_class_f64_e32 vcc, v[0:1], v5
; GISEL-NEXT: v_ldexp_f64 v[2:3], v[2:3], v4
; GISEL-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc
; GISEL-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc
; GISEL-NEXT: v_readfirstlane_b32 s0, v0
; GISEL-NEXT: v_readfirstlane_b32 s1, v1
; GISEL-NEXT: ; return to shader part epilog
%result = call afn double @llvm.sqrt.f64(double %x)
%cast = bitcast double %result to <2 x i32>
%cast.0 = extractelement <2 x i32> %cast, i32 0
%cast.1 = extractelement <2 x i32> %cast, i32 1
%lane.0 = call i32 @llvm.amdgcn.readfirstlane(i32 %cast.0)
%lane.1 = call i32 @llvm.amdgcn.readfirstlane(i32 %cast.1)
%insert.0 = insertelement <2 x i32> poison, i32 %lane.0, i32 0
%insert.1 = insertelement <2 x i32> %insert.0, i32 %lane.1, i32 1
ret <2 x i32> %insert.1
}
define amdgpu_ps <2 x i32> @s_sqrt_f64_afn_nnan_ninf(double inreg %x) {
; SDAG-LABEL: s_sqrt_f64_afn_nnan_ninf:
; SDAG: ; %bb.0:
; SDAG-NEXT: v_mov_b32_e32 v0, 0
; SDAG-NEXT: v_bfrev_b32_e32 v1, 8
; SDAG-NEXT: v_cmp_lt_f64_e32 vcc, s[0:1], v[0:1]
; SDAG-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc
; SDAG-NEXT: v_lshlrev_b32_e32 v0, 8, v0
; SDAG-NEXT: v_ldexp_f64 v[0:1], s[0:1], v0
; SDAG-NEXT: s_and_b64 s[0:1], vcc, exec
; SDAG-NEXT: s_cselect_b32 s0, 0xffffff80, 0
; SDAG-NEXT: v_rsq_f64_e32 v[2:3], v[0:1]
; SDAG-NEXT: v_mul_f64 v[4:5], v[0:1], v[2:3]
; SDAG-NEXT: v_mul_f64 v[2:3], v[2:3], 0.5
; SDAG-NEXT: v_fma_f64 v[6:7], -v[2:3], v[4:5], 0.5
; SDAG-NEXT: v_fma_f64 v[4:5], v[4:5], v[6:7], v[4:5]
; SDAG-NEXT: v_fma_f64 v[2:3], v[2:3], v[6:7], v[2:3]
; SDAG-NEXT: v_fma_f64 v[6:7], -v[4:5], v[4:5], v[0:1]
; SDAG-NEXT: v_fma_f64 v[4:5], v[6:7], v[2:3], v[4:5]
; SDAG-NEXT: v_fma_f64 v[6:7], -v[4:5], v[4:5], v[0:1]
; SDAG-NEXT: v_fma_f64 v[2:3], v[6:7], v[2:3], v[4:5]
; SDAG-NEXT: v_mov_b32_e32 v4, 0x260
; SDAG-NEXT: v_cmp_class_f64_e32 vcc, v[0:1], v4
; SDAG-NEXT: v_ldexp_f64 v[2:3], v[2:3], s0
; SDAG-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc
; SDAG-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc
; SDAG-NEXT: v_readfirstlane_b32 s0, v0
; SDAG-NEXT: v_readfirstlane_b32 s1, v1
; SDAG-NEXT: ; return to shader part epilog
;
; GISEL-LABEL: s_sqrt_f64_afn_nnan_ninf:
; GISEL: ; %bb.0:
; GISEL-NEXT: v_mov_b32_e32 v0, 0
; GISEL-NEXT: v_bfrev_b32_e32 v1, 8
; GISEL-NEXT: v_cmp_lt_f64_e32 vcc, s[0:1], v[0:1]
; GISEL-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc
; GISEL-NEXT: v_lshlrev_b32_e32 v0, 8, v0
; GISEL-NEXT: v_ldexp_f64 v[0:1], s[0:1], v0
; GISEL-NEXT: v_rsq_f64_e32 v[2:3], v[0:1]
; GISEL-NEXT: v_mul_f64 v[4:5], v[2:3], 0.5
; GISEL-NEXT: v_mul_f64 v[2:3], v[0:1], v[2:3]
; GISEL-NEXT: v_fma_f64 v[6:7], -v[4:5], v[2:3], 0.5
; GISEL-NEXT: v_fma_f64 v[2:3], v[2:3], v[6:7], v[2:3]
; GISEL-NEXT: v_fma_f64 v[4:5], v[4:5], v[6:7], v[4:5]
; GISEL-NEXT: v_fma_f64 v[6:7], -v[2:3], v[2:3], v[0:1]
; GISEL-NEXT: v_fma_f64 v[2:3], v[6:7], v[4:5], v[2:3]
; GISEL-NEXT: v_fma_f64 v[6:7], -v[2:3], v[2:3], v[0:1]
; GISEL-NEXT: v_fma_f64 v[2:3], v[6:7], v[4:5], v[2:3]
; GISEL-NEXT: v_mov_b32_e32 v4, 0xffffff80
; GISEL-NEXT: v_mov_b32_e32 v5, 0x260
; GISEL-NEXT: v_cndmask_b32_e32 v4, 0, v4, vcc
; GISEL-NEXT: v_cmp_class_f64_e32 vcc, v[0:1], v5
; GISEL-NEXT: v_ldexp_f64 v[2:3], v[2:3], v4
; GISEL-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc
; GISEL-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc
; GISEL-NEXT: v_readfirstlane_b32 s0, v0
; GISEL-NEXT: v_readfirstlane_b32 s1, v1
; GISEL-NEXT: ; return to shader part epilog
%result = call afn nnan ninf double @llvm.sqrt.f64(double %x)
%cast = bitcast double %result to <2 x i32>
%cast.0 = extractelement <2 x i32> %cast, i32 0
%cast.1 = extractelement <2 x i32> %cast, i32 1
%lane.0 = call i32 @llvm.amdgcn.readfirstlane(i32 %cast.0)
%lane.1 = call i32 @llvm.amdgcn.readfirstlane(i32 %cast.1)
%insert.0 = insertelement <2 x i32> poison, i32 %lane.0, i32 0
%insert.1 = insertelement <2 x i32> %insert.0, i32 %lane.1, i32 1
ret <2 x i32> %insert.1
}
define double @v_sqrt_f64_nsz(double %x) {
; GISEL-LABEL: v_sqrt_f64_nsz:
; GISEL: ; %bb.0:
; GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GISEL-NEXT: v_mov_b32_e32 v2, 0
; GISEL-NEXT: v_bfrev_b32_e32 v3, 8
; GISEL-NEXT: v_cmp_lt_f64_e32 vcc, v[0:1], v[2:3]
; GISEL-NEXT: v_cndmask_b32_e64 v2, 0, 1, vcc
; GISEL-NEXT: v_lshlrev_b32_e32 v2, 8, v2
; GISEL-NEXT: v_ldexp_f64 v[0:1], v[0:1], v2
; GISEL-NEXT: v_rsq_f64_e32 v[2:3], v[0:1]
; GISEL-NEXT: v_mul_f64 v[4:5], v[2:3], 0.5
; GISEL-NEXT: v_mul_f64 v[2:3], v[0:1], v[2:3]
; GISEL-NEXT: v_fma_f64 v[6:7], -v[4:5], v[2:3], 0.5
; GISEL-NEXT: v_fma_f64 v[2:3], v[2:3], v[6:7], v[2:3]
; GISEL-NEXT: v_fma_f64 v[4:5], v[4:5], v[6:7], v[4:5]
; GISEL-NEXT: v_fma_f64 v[6:7], -v[2:3], v[2:3], v[0:1]
; GISEL-NEXT: v_fma_f64 v[2:3], v[6:7], v[4:5], v[2:3]
; GISEL-NEXT: v_fma_f64 v[6:7], -v[2:3], v[2:3], v[0:1]
; GISEL-NEXT: v_fma_f64 v[2:3], v[6:7], v[4:5], v[2:3]
; GISEL-NEXT: v_mov_b32_e32 v4, 0xffffff80
; GISEL-NEXT: v_mov_b32_e32 v5, 0x260
; GISEL-NEXT: v_cndmask_b32_e32 v4, 0, v4, vcc
; GISEL-NEXT: v_cmp_class_f64_e32 vcc, v[0:1], v5
; GISEL-NEXT: v_ldexp_f64 v[2:3], v[2:3], v4
; GISEL-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc
; GISEL-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc
; GISEL-NEXT: s_setpc_b64 s[30:31]
%result = call nsz double @llvm.sqrt.f64(double %x)
ret double %result
}
define double @v_sqrt_f64_nnan_ninf(double %x) {
; GISEL-LABEL: v_sqrt_f64_nnan_ninf:
; GISEL: ; %bb.0:
; GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GISEL-NEXT: v_mov_b32_e32 v2, 0
; GISEL-NEXT: v_bfrev_b32_e32 v3, 8
; GISEL-NEXT: v_cmp_lt_f64_e32 vcc, v[0:1], v[2:3]
; GISEL-NEXT: v_cndmask_b32_e64 v2, 0, 1, vcc
; GISEL-NEXT: v_lshlrev_b32_e32 v2, 8, v2
; GISEL-NEXT: v_ldexp_f64 v[0:1], v[0:1], v2
; GISEL-NEXT: v_rsq_f64_e32 v[2:3], v[0:1]
; GISEL-NEXT: v_mul_f64 v[4:5], v[2:3], 0.5
; GISEL-NEXT: v_mul_f64 v[2:3], v[0:1], v[2:3]
; GISEL-NEXT: v_fma_f64 v[6:7], -v[4:5], v[2:3], 0.5
; GISEL-NEXT: v_fma_f64 v[2:3], v[2:3], v[6:7], v[2:3]
; GISEL-NEXT: v_fma_f64 v[4:5], v[4:5], v[6:7], v[4:5]
; GISEL-NEXT: v_fma_f64 v[6:7], -v[2:3], v[2:3], v[0:1]
; GISEL-NEXT: v_fma_f64 v[2:3], v[6:7], v[4:5], v[2:3]
; GISEL-NEXT: v_fma_f64 v[6:7], -v[2:3], v[2:3], v[0:1]
; GISEL-NEXT: v_fma_f64 v[2:3], v[6:7], v[4:5], v[2:3]
; GISEL-NEXT: v_mov_b32_e32 v4, 0xffffff80
; GISEL-NEXT: v_mov_b32_e32 v5, 0x260
; GISEL-NEXT: v_cndmask_b32_e32 v4, 0, v4, vcc
; GISEL-NEXT: v_cmp_class_f64_e32 vcc, v[0:1], v5
; GISEL-NEXT: v_ldexp_f64 v[2:3], v[2:3], v4
; GISEL-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc
; GISEL-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc
; GISEL-NEXT: s_setpc_b64 s[30:31]
%result = call nnan ninf double @llvm.sqrt.f64(double %x)
ret double %result
}
define double @v_sqrt_f64_nnan_ninf_nsz(double %x) {
; GISEL-LABEL: v_sqrt_f64_nnan_ninf_nsz:
; GISEL: ; %bb.0:
; GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GISEL-NEXT: v_mov_b32_e32 v2, 0
; GISEL-NEXT: v_bfrev_b32_e32 v3, 8
; GISEL-NEXT: v_cmp_lt_f64_e32 vcc, v[0:1], v[2:3]
; GISEL-NEXT: v_cndmask_b32_e64 v2, 0, 1, vcc
; GISEL-NEXT: v_lshlrev_b32_e32 v2, 8, v2
; GISEL-NEXT: v_ldexp_f64 v[0:1], v[0:1], v2
; GISEL-NEXT: v_rsq_f64_e32 v[2:3], v[0:1]
; GISEL-NEXT: v_mul_f64 v[4:5], v[2:3], 0.5
; GISEL-NEXT: v_mul_f64 v[2:3], v[0:1], v[2:3]
; GISEL-NEXT: v_fma_f64 v[6:7], -v[4:5], v[2:3], 0.5
; GISEL-NEXT: v_fma_f64 v[2:3], v[2:3], v[6:7], v[2:3]
; GISEL-NEXT: v_fma_f64 v[4:5], v[4:5], v[6:7], v[4:5]
; GISEL-NEXT: v_fma_f64 v[6:7], -v[2:3], v[2:3], v[0:1]
; GISEL-NEXT: v_fma_f64 v[2:3], v[6:7], v[4:5], v[2:3]
; GISEL-NEXT: v_fma_f64 v[6:7], -v[2:3], v[2:3], v[0:1]
; GISEL-NEXT: v_fma_f64 v[2:3], v[6:7], v[4:5], v[2:3]
; GISEL-NEXT: v_mov_b32_e32 v4, 0xffffff80
; GISEL-NEXT: v_mov_b32_e32 v5, 0x260
; GISEL-NEXT: v_cndmask_b32_e32 v4, 0, v4, vcc
; GISEL-NEXT: v_cmp_class_f64_e32 vcc, v[0:1], v5
; GISEL-NEXT: v_ldexp_f64 v[2:3], v[2:3], v4
; GISEL-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc
; GISEL-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc
; GISEL-NEXT: s_setpc_b64 s[30:31]
%result = call nnan ninf nsz double @llvm.sqrt.f64(double %x)
ret double %result
}
define double @v_sqrt_f64_afn(double %x) {
; GISEL-LABEL: v_sqrt_f64_afn:
; GISEL: ; %bb.0:
; GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GISEL-NEXT: v_mov_b32_e32 v2, 0
; GISEL-NEXT: v_bfrev_b32_e32 v3, 8
; GISEL-NEXT: v_cmp_lt_f64_e32 vcc, v[0:1], v[2:3]
; GISEL-NEXT: v_cndmask_b32_e64 v2, 0, 1, vcc
; GISEL-NEXT: v_lshlrev_b32_e32 v2, 8, v2
; GISEL-NEXT: v_ldexp_f64 v[0:1], v[0:1], v2
; GISEL-NEXT: v_rsq_f64_e32 v[2:3], v[0:1]
; GISEL-NEXT: v_mul_f64 v[4:5], v[2:3], 0.5
; GISEL-NEXT: v_mul_f64 v[2:3], v[0:1], v[2:3]
; GISEL-NEXT: v_fma_f64 v[6:7], -v[4:5], v[2:3], 0.5
; GISEL-NEXT: v_fma_f64 v[2:3], v[2:3], v[6:7], v[2:3]
; GISEL-NEXT: v_fma_f64 v[4:5], v[4:5], v[6:7], v[4:5]
; GISEL-NEXT: v_fma_f64 v[6:7], -v[2:3], v[2:3], v[0:1]
; GISEL-NEXT: v_fma_f64 v[2:3], v[6:7], v[4:5], v[2:3]
; GISEL-NEXT: v_fma_f64 v[6:7], -v[2:3], v[2:3], v[0:1]
; GISEL-NEXT: v_fma_f64 v[2:3], v[6:7], v[4:5], v[2:3]
; GISEL-NEXT: v_mov_b32_e32 v4, 0xffffff80
; GISEL-NEXT: v_mov_b32_e32 v5, 0x260
; GISEL-NEXT: v_cndmask_b32_e32 v4, 0, v4, vcc
; GISEL-NEXT: v_cmp_class_f64_e32 vcc, v[0:1], v5
; GISEL-NEXT: v_ldexp_f64 v[2:3], v[2:3], v4
; GISEL-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc
; GISEL-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc
; GISEL-NEXT: s_setpc_b64 s[30:31]
%result = call afn double @llvm.sqrt.f64(double %x)
ret double %result
}
define double @v_sqrt_f64_afn_nsz(double %x) {
; GISEL-LABEL: v_sqrt_f64_afn_nsz:
; GISEL: ; %bb.0:
; GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GISEL-NEXT: v_mov_b32_e32 v2, 0
; GISEL-NEXT: v_bfrev_b32_e32 v3, 8
; GISEL-NEXT: v_cmp_lt_f64_e32 vcc, v[0:1], v[2:3]
; GISEL-NEXT: v_cndmask_b32_e64 v2, 0, 1, vcc
; GISEL-NEXT: v_lshlrev_b32_e32 v2, 8, v2
; GISEL-NEXT: v_ldexp_f64 v[0:1], v[0:1], v2
; GISEL-NEXT: v_rsq_f64_e32 v[2:3], v[0:1]
; GISEL-NEXT: v_mul_f64 v[4:5], v[2:3], 0.5
; GISEL-NEXT: v_mul_f64 v[2:3], v[0:1], v[2:3]
; GISEL-NEXT: v_fma_f64 v[6:7], -v[4:5], v[2:3], 0.5
; GISEL-NEXT: v_fma_f64 v[2:3], v[2:3], v[6:7], v[2:3]
; GISEL-NEXT: v_fma_f64 v[4:5], v[4:5], v[6:7], v[4:5]
; GISEL-NEXT: v_fma_f64 v[6:7], -v[2:3], v[2:3], v[0:1]
; GISEL-NEXT: v_fma_f64 v[2:3], v[6:7], v[4:5], v[2:3]
; GISEL-NEXT: v_fma_f64 v[6:7], -v[2:3], v[2:3], v[0:1]
; GISEL-NEXT: v_fma_f64 v[2:3], v[6:7], v[4:5], v[2:3]
; GISEL-NEXT: v_mov_b32_e32 v4, 0xffffff80
; GISEL-NEXT: v_mov_b32_e32 v5, 0x260
; GISEL-NEXT: v_cndmask_b32_e32 v4, 0, v4, vcc
; GISEL-NEXT: v_cmp_class_f64_e32 vcc, v[0:1], v5
; GISEL-NEXT: v_ldexp_f64 v[2:3], v[2:3], v4
; GISEL-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc
; GISEL-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc
; GISEL-NEXT: s_setpc_b64 s[30:31]
%result = call afn nsz double @llvm.sqrt.f64(double %x)
ret double %result
}
define <2 x double> @v_sqrt_v2f64_afn(<2 x double> %x) {
; SDAG-LABEL: v_sqrt_v2f64_afn:
; SDAG: ; %bb.0:
; SDAG-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SDAG-NEXT: s_mov_b32 s4, 0
; SDAG-NEXT: s_brev_b32 s5, 8
; SDAG-NEXT: v_cmp_gt_f64_e32 vcc, s[4:5], v[0:1]
; SDAG-NEXT: v_cmp_gt_f64_e64 s[4:5], s[4:5], v[2:3]
; SDAG-NEXT: v_cndmask_b32_e64 v4, 0, 1, vcc
; SDAG-NEXT: v_lshlrev_b32_e32 v4, 8, v4
; SDAG-NEXT: v_ldexp_f64 v[0:1], v[0:1], v4
; SDAG-NEXT: v_cndmask_b32_e64 v4, 0, 1, s[4:5]
; SDAG-NEXT: v_lshlrev_b32_e32 v4, 8, v4
; SDAG-NEXT: v_ldexp_f64 v[2:3], v[2:3], v4
; SDAG-NEXT: v_rsq_f64_e32 v[4:5], v[0:1]
; SDAG-NEXT: v_rsq_f64_e32 v[6:7], v[2:3]
; SDAG-NEXT: v_mul_f64 v[8:9], v[0:1], v[4:5]
; SDAG-NEXT: v_mul_f64 v[4:5], v[4:5], 0.5
; SDAG-NEXT: v_mul_f64 v[10:11], v[2:3], v[6:7]
; SDAG-NEXT: v_mul_f64 v[6:7], v[6:7], 0.5
; SDAG-NEXT: v_fma_f64 v[12:13], -v[4:5], v[8:9], 0.5
; SDAG-NEXT: v_fma_f64 v[14:15], -v[6:7], v[10:11], 0.5
; SDAG-NEXT: v_fma_f64 v[8:9], v[8:9], v[12:13], v[8:9]
; SDAG-NEXT: v_fma_f64 v[4:5], v[4:5], v[12:13], v[4:5]
; SDAG-NEXT: v_fma_f64 v[10:11], v[10:11], v[14:15], v[10:11]
; SDAG-NEXT: v_fma_f64 v[6:7], v[6:7], v[14:15], v[6:7]
; SDAG-NEXT: v_fma_f64 v[12:13], -v[8:9], v[8:9], v[0:1]
; SDAG-NEXT: v_fma_f64 v[14:15], -v[10:11], v[10:11], v[2:3]
; SDAG-NEXT: v_fma_f64 v[8:9], v[12:13], v[4:5], v[8:9]
; SDAG-NEXT: v_fma_f64 v[10:11], v[14:15], v[6:7], v[10:11]
; SDAG-NEXT: v_fma_f64 v[12:13], -v[8:9], v[8:9], v[0:1]
; SDAG-NEXT: v_fma_f64 v[14:15], -v[10:11], v[10:11], v[2:3]
; SDAG-NEXT: v_fma_f64 v[4:5], v[12:13], v[4:5], v[8:9]
; SDAG-NEXT: v_mov_b32_e32 v8, 0xffffff80
; SDAG-NEXT: v_mov_b32_e32 v9, 0x260
; SDAG-NEXT: v_fma_f64 v[6:7], v[14:15], v[6:7], v[10:11]
; SDAG-NEXT: v_cndmask_b32_e32 v10, 0, v8, vcc
; SDAG-NEXT: v_cndmask_b32_e64 v8, 0, v8, s[4:5]
; SDAG-NEXT: v_cmp_class_f64_e32 vcc, v[0:1], v9
; SDAG-NEXT: v_cmp_class_f64_e64 s[4:5], v[2:3], v9
; SDAG-NEXT: v_ldexp_f64 v[4:5], v[4:5], v10
; SDAG-NEXT: v_ldexp_f64 v[6:7], v[6:7], v8
; SDAG-NEXT: v_cndmask_b32_e32 v0, v4, v0, vcc
; SDAG-NEXT: v_cndmask_b32_e32 v1, v5, v1, vcc
; SDAG-NEXT: v_cndmask_b32_e64 v2, v6, v2, s[4:5]
; SDAG-NEXT: v_cndmask_b32_e64 v3, v7, v3, s[4:5]
; SDAG-NEXT: s_setpc_b64 s[30:31]
;
; GISEL-LABEL: v_sqrt_v2f64_afn:
; GISEL: ; %bb.0:
; GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GISEL-NEXT: s_mov_b32 s4, 0
; GISEL-NEXT: s_brev_b32 s5, 8
; GISEL-NEXT: v_mov_b32_e32 v4, s4
; GISEL-NEXT: v_mov_b32_e32 v5, s5
; GISEL-NEXT: v_cmp_gt_f64_e32 vcc, s[4:5], v[0:1]
; GISEL-NEXT: v_cmp_lt_f64_e64 s[4:5], v[2:3], v[4:5]
; GISEL-NEXT: v_cndmask_b32_e64 v6, 0, 1, vcc
; GISEL-NEXT: v_cndmask_b32_e64 v4, 0, 1, s[4:5]
; GISEL-NEXT: v_lshlrev_b32_e32 v6, 8, v6
; GISEL-NEXT: v_lshlrev_b32_e32 v4, 8, v4
; GISEL-NEXT: v_ldexp_f64 v[0:1], v[0:1], v6
; GISEL-NEXT: v_ldexp_f64 v[2:3], v[2:3], v4
; GISEL-NEXT: v_rsq_f64_e32 v[4:5], v[0:1]
; GISEL-NEXT: v_rsq_f64_e32 v[6:7], v[2:3]
; GISEL-NEXT: v_mul_f64 v[8:9], v[4:5], 0.5
; GISEL-NEXT: v_mul_f64 v[4:5], v[0:1], v[4:5]
; GISEL-NEXT: v_mul_f64 v[10:11], v[6:7], 0.5
; GISEL-NEXT: v_mul_f64 v[6:7], v[2:3], v[6:7]
; GISEL-NEXT: v_fma_f64 v[12:13], -v[8:9], v[4:5], 0.5
; GISEL-NEXT: v_fma_f64 v[14:15], -v[10:11], v[6:7], 0.5
; GISEL-NEXT: v_fma_f64 v[4:5], v[4:5], v[12:13], v[4:5]
; GISEL-NEXT: v_fma_f64 v[8:9], v[8:9], v[12:13], v[8:9]
; GISEL-NEXT: v_fma_f64 v[6:7], v[6:7], v[14:15], v[6:7]
; GISEL-NEXT: v_fma_f64 v[10:11], v[10:11], v[14:15], v[10:11]
; GISEL-NEXT: v_fma_f64 v[12:13], -v[4:5], v[4:5], v[0:1]
; GISEL-NEXT: v_fma_f64 v[14:15], -v[6:7], v[6:7], v[2:3]
; GISEL-NEXT: v_fma_f64 v[4:5], v[12:13], v[8:9], v[4:5]
; GISEL-NEXT: v_fma_f64 v[6:7], v[14:15], v[10:11], v[6:7]
; GISEL-NEXT: v_fma_f64 v[12:13], -v[4:5], v[4:5], v[0:1]
; GISEL-NEXT: v_fma_f64 v[14:15], -v[6:7], v[6:7], v[2:3]
; GISEL-NEXT: v_fma_f64 v[4:5], v[12:13], v[8:9], v[4:5]
; GISEL-NEXT: v_mov_b32_e32 v8, 0xffffff80
; GISEL-NEXT: v_fma_f64 v[6:7], v[14:15], v[10:11], v[6:7]
; GISEL-NEXT: v_mov_b32_e32 v9, 0x260
; GISEL-NEXT: v_cndmask_b32_e32 v10, 0, v8, vcc
; GISEL-NEXT: v_cndmask_b32_e64 v8, 0, v8, s[4:5]
; GISEL-NEXT: v_cmp_class_f64_e32 vcc, v[0:1], v9
; GISEL-NEXT: v_cmp_class_f64_e64 s[4:5], v[2:3], v9
; GISEL-NEXT: v_ldexp_f64 v[4:5], v[4:5], v10
; GISEL-NEXT: v_ldexp_f64 v[6:7], v[6:7], v8
; GISEL-NEXT: v_cndmask_b32_e32 v0, v4, v0, vcc
; GISEL-NEXT: v_cndmask_b32_e32 v1, v5, v1, vcc
; GISEL-NEXT: v_cndmask_b32_e64 v2, v6, v2, s[4:5]
; GISEL-NEXT: v_cndmask_b32_e64 v3, v7, v3, s[4:5]
; GISEL-NEXT: s_setpc_b64 s[30:31]
%result = call afn <2 x double> @llvm.sqrt.v2f64(<2 x double> %x)
ret <2 x double> %result
}
define double @v_sqrt_f64_afn_nnan(double %x) {
; GISEL-LABEL: v_sqrt_f64_afn_nnan:
; GISEL: ; %bb.0:
; GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GISEL-NEXT: v_mov_b32_e32 v2, 0
; GISEL-NEXT: v_bfrev_b32_e32 v3, 8
; GISEL-NEXT: v_cmp_lt_f64_e32 vcc, v[0:1], v[2:3]
; GISEL-NEXT: v_cndmask_b32_e64 v2, 0, 1, vcc
; GISEL-NEXT: v_lshlrev_b32_e32 v2, 8, v2
; GISEL-NEXT: v_ldexp_f64 v[0:1], v[0:1], v2
; GISEL-NEXT: v_rsq_f64_e32 v[2:3], v[0:1]
; GISEL-NEXT: v_mul_f64 v[4:5], v[2:3], 0.5
; GISEL-NEXT: v_mul_f64 v[2:3], v[0:1], v[2:3]
; GISEL-NEXT: v_fma_f64 v[6:7], -v[4:5], v[2:3], 0.5
; GISEL-NEXT: v_fma_f64 v[2:3], v[2:3], v[6:7], v[2:3]
; GISEL-NEXT: v_fma_f64 v[4:5], v[4:5], v[6:7], v[4:5]
; GISEL-NEXT: v_fma_f64 v[6:7], -v[2:3], v[2:3], v[0:1]
; GISEL-NEXT: v_fma_f64 v[2:3], v[6:7], v[4:5], v[2:3]
; GISEL-NEXT: v_fma_f64 v[6:7], -v[2:3], v[2:3], v[0:1]
; GISEL-NEXT: v_fma_f64 v[2:3], v[6:7], v[4:5], v[2:3]
; GISEL-NEXT: v_mov_b32_e32 v4, 0xffffff80
; GISEL-NEXT: v_mov_b32_e32 v5, 0x260
; GISEL-NEXT: v_cndmask_b32_e32 v4, 0, v4, vcc
; GISEL-NEXT: v_cmp_class_f64_e32 vcc, v[0:1], v5
; GISEL-NEXT: v_ldexp_f64 v[2:3], v[2:3], v4
; GISEL-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc
; GISEL-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc
; GISEL-NEXT: s_setpc_b64 s[30:31]
%result = call afn nnan double @llvm.sqrt.f64(double %x)
ret double %result
}
define double @v_sqrt_f64_fabs_afn_ninf(double %x) {
; GISEL-LABEL: v_sqrt_f64_fabs_afn_ninf:
; GISEL: ; %bb.0:
; GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GISEL-NEXT: v_mov_b32_e32 v2, 0
; GISEL-NEXT: v_bfrev_b32_e32 v3, 8
; GISEL-NEXT: v_cmp_lt_f64_e64 vcc, |v[0:1]|, v[2:3]
; GISEL-NEXT: v_cndmask_b32_e64 v2, 0, 1, vcc
; GISEL-NEXT: v_lshlrev_b32_e32 v2, 8, v2
; GISEL-NEXT: v_ldexp_f64 v[0:1], |v[0:1]|, v2
; GISEL-NEXT: v_rsq_f64_e32 v[2:3], v[0:1]
; GISEL-NEXT: v_mul_f64 v[4:5], v[2:3], 0.5
; GISEL-NEXT: v_mul_f64 v[2:3], v[0:1], v[2:3]
; GISEL-NEXT: v_fma_f64 v[6:7], -v[4:5], v[2:3], 0.5
; GISEL-NEXT: v_fma_f64 v[2:3], v[2:3], v[6:7], v[2:3]
; GISEL-NEXT: v_fma_f64 v[4:5], v[4:5], v[6:7], v[4:5]
; GISEL-NEXT: v_fma_f64 v[6:7], -v[2:3], v[2:3], v[0:1]
; GISEL-NEXT: v_fma_f64 v[2:3], v[6:7], v[4:5], v[2:3]
; GISEL-NEXT: v_fma_f64 v[6:7], -v[2:3], v[2:3], v[0:1]
; GISEL-NEXT: v_fma_f64 v[2:3], v[6:7], v[4:5], v[2:3]
; GISEL-NEXT: v_mov_b32_e32 v4, 0xffffff80
; GISEL-NEXT: v_mov_b32_e32 v5, 0x260
; GISEL-NEXT: v_cndmask_b32_e32 v4, 0, v4, vcc
; GISEL-NEXT: v_cmp_class_f64_e32 vcc, v[0:1], v5
; GISEL-NEXT: v_ldexp_f64 v[2:3], v[2:3], v4
; GISEL-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc
; GISEL-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc
; GISEL-NEXT: s_setpc_b64 s[30:31]
%fabs = call double @llvm.fabs.f64(double %x)
%result = call afn ninf double @llvm.sqrt.f64(double %fabs)
ret double %result
}
define double @v_sqrt_f64_afn_nnan_ninf(double %x) {
; GISEL-LABEL: v_sqrt_f64_afn_nnan_ninf:
; GISEL: ; %bb.0:
; GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GISEL-NEXT: v_mov_b32_e32 v2, 0
; GISEL-NEXT: v_bfrev_b32_e32 v3, 8
; GISEL-NEXT: v_cmp_lt_f64_e32 vcc, v[0:1], v[2:3]
; GISEL-NEXT: v_cndmask_b32_e64 v2, 0, 1, vcc
; GISEL-NEXT: v_lshlrev_b32_e32 v2, 8, v2
; GISEL-NEXT: v_ldexp_f64 v[0:1], v[0:1], v2
; GISEL-NEXT: v_rsq_f64_e32 v[2:3], v[0:1]
; GISEL-NEXT: v_mul_f64 v[4:5], v[2:3], 0.5
; GISEL-NEXT: v_mul_f64 v[2:3], v[0:1], v[2:3]
; GISEL-NEXT: v_fma_f64 v[6:7], -v[4:5], v[2:3], 0.5
; GISEL-NEXT: v_fma_f64 v[2:3], v[2:3], v[6:7], v[2:3]
; GISEL-NEXT: v_fma_f64 v[4:5], v[4:5], v[6:7], v[4:5]
; GISEL-NEXT: v_fma_f64 v[6:7], -v[2:3], v[2:3], v[0:1]
; GISEL-NEXT: v_fma_f64 v[2:3], v[6:7], v[4:5], v[2:3]
; GISEL-NEXT: v_fma_f64 v[6:7], -v[2:3], v[2:3], v[0:1]
; GISEL-NEXT: v_fma_f64 v[2:3], v[6:7], v[4:5], v[2:3]
; GISEL-NEXT: v_mov_b32_e32 v4, 0xffffff80
; GISEL-NEXT: v_mov_b32_e32 v5, 0x260
; GISEL-NEXT: v_cndmask_b32_e32 v4, 0, v4, vcc
; GISEL-NEXT: v_cmp_class_f64_e32 vcc, v[0:1], v5
; GISEL-NEXT: v_ldexp_f64 v[2:3], v[2:3], v4
; GISEL-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc
; GISEL-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc
; GISEL-NEXT: s_setpc_b64 s[30:31]
%result = call afn nnan ninf double @llvm.sqrt.f64(double %x)
ret double %result
}
define <2 x double> @v_sqrt_v2f64_afn_nnan_ninf(<2 x double> %x) {
; SDAG-LABEL: v_sqrt_v2f64_afn_nnan_ninf:
; SDAG: ; %bb.0:
; SDAG-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SDAG-NEXT: s_mov_b32 s4, 0
; SDAG-NEXT: s_brev_b32 s5, 8
; SDAG-NEXT: v_cmp_gt_f64_e32 vcc, s[4:5], v[0:1]
; SDAG-NEXT: v_cmp_gt_f64_e64 s[4:5], s[4:5], v[2:3]
; SDAG-NEXT: v_cndmask_b32_e64 v4, 0, 1, vcc
; SDAG-NEXT: v_lshlrev_b32_e32 v4, 8, v4
; SDAG-NEXT: v_ldexp_f64 v[0:1], v[0:1], v4
; SDAG-NEXT: v_cndmask_b32_e64 v4, 0, 1, s[4:5]
; SDAG-NEXT: v_lshlrev_b32_e32 v4, 8, v4
; SDAG-NEXT: v_ldexp_f64 v[2:3], v[2:3], v4
; SDAG-NEXT: v_rsq_f64_e32 v[4:5], v[0:1]
; SDAG-NEXT: v_rsq_f64_e32 v[6:7], v[2:3]
; SDAG-NEXT: v_mul_f64 v[8:9], v[0:1], v[4:5]
; SDAG-NEXT: v_mul_f64 v[4:5], v[4:5], 0.5
; SDAG-NEXT: v_mul_f64 v[10:11], v[2:3], v[6:7]
; SDAG-NEXT: v_mul_f64 v[6:7], v[6:7], 0.5
; SDAG-NEXT: v_fma_f64 v[12:13], -v[4:5], v[8:9], 0.5
; SDAG-NEXT: v_fma_f64 v[14:15], -v[6:7], v[10:11], 0.5
; SDAG-NEXT: v_fma_f64 v[8:9], v[8:9], v[12:13], v[8:9]
; SDAG-NEXT: v_fma_f64 v[4:5], v[4:5], v[12:13], v[4:5]
; SDAG-NEXT: v_fma_f64 v[10:11], v[10:11], v[14:15], v[10:11]
; SDAG-NEXT: v_fma_f64 v[6:7], v[6:7], v[14:15], v[6:7]
; SDAG-NEXT: v_fma_f64 v[12:13], -v[8:9], v[8:9], v[0:1]
; SDAG-NEXT: v_fma_f64 v[14:15], -v[10:11], v[10:11], v[2:3]
; SDAG-NEXT: v_fma_f64 v[8:9], v[12:13], v[4:5], v[8:9]
; SDAG-NEXT: v_fma_f64 v[10:11], v[14:15], v[6:7], v[10:11]
; SDAG-NEXT: v_fma_f64 v[12:13], -v[8:9], v[8:9], v[0:1]
; SDAG-NEXT: v_fma_f64 v[14:15], -v[10:11], v[10:11], v[2:3]
; SDAG-NEXT: v_fma_f64 v[4:5], v[12:13], v[4:5], v[8:9]
; SDAG-NEXT: v_mov_b32_e32 v8, 0xffffff80
; SDAG-NEXT: v_mov_b32_e32 v9, 0x260
; SDAG-NEXT: v_fma_f64 v[6:7], v[14:15], v[6:7], v[10:11]
; SDAG-NEXT: v_cndmask_b32_e32 v10, 0, v8, vcc
; SDAG-NEXT: v_cndmask_b32_e64 v8, 0, v8, s[4:5]
; SDAG-NEXT: v_cmp_class_f64_e32 vcc, v[0:1], v9
; SDAG-NEXT: v_cmp_class_f64_e64 s[4:5], v[2:3], v9
; SDAG-NEXT: v_ldexp_f64 v[4:5], v[4:5], v10
; SDAG-NEXT: v_ldexp_f64 v[6:7], v[6:7], v8
; SDAG-NEXT: v_cndmask_b32_e32 v0, v4, v0, vcc
; SDAG-NEXT: v_cndmask_b32_e32 v1, v5, v1, vcc
; SDAG-NEXT: v_cndmask_b32_e64 v2, v6, v2, s[4:5]
; SDAG-NEXT: v_cndmask_b32_e64 v3, v7, v3, s[4:5]
; SDAG-NEXT: s_setpc_b64 s[30:31]
;
; GISEL-LABEL: v_sqrt_v2f64_afn_nnan_ninf:
; GISEL: ; %bb.0:
; GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GISEL-NEXT: s_mov_b32 s4, 0
; GISEL-NEXT: s_brev_b32 s5, 8
; GISEL-NEXT: v_mov_b32_e32 v4, s4
; GISEL-NEXT: v_mov_b32_e32 v5, s5
; GISEL-NEXT: v_cmp_gt_f64_e32 vcc, s[4:5], v[0:1]
; GISEL-NEXT: v_cmp_lt_f64_e64 s[4:5], v[2:3], v[4:5]
; GISEL-NEXT: v_cndmask_b32_e64 v6, 0, 1, vcc
; GISEL-NEXT: v_cndmask_b32_e64 v4, 0, 1, s[4:5]
; GISEL-NEXT: v_lshlrev_b32_e32 v6, 8, v6
; GISEL-NEXT: v_lshlrev_b32_e32 v4, 8, v4
; GISEL-NEXT: v_ldexp_f64 v[0:1], v[0:1], v6
; GISEL-NEXT: v_ldexp_f64 v[2:3], v[2:3], v4
; GISEL-NEXT: v_rsq_f64_e32 v[4:5], v[0:1]
; GISEL-NEXT: v_rsq_f64_e32 v[6:7], v[2:3]
; GISEL-NEXT: v_mul_f64 v[8:9], v[4:5], 0.5
; GISEL-NEXT: v_mul_f64 v[4:5], v[0:1], v[4:5]
; GISEL-NEXT: v_mul_f64 v[10:11], v[6:7], 0.5
; GISEL-NEXT: v_mul_f64 v[6:7], v[2:3], v[6:7]
; GISEL-NEXT: v_fma_f64 v[12:13], -v[8:9], v[4:5], 0.5
; GISEL-NEXT: v_fma_f64 v[14:15], -v[10:11], v[6:7], 0.5
; GISEL-NEXT: v_fma_f64 v[4:5], v[4:5], v[12:13], v[4:5]
; GISEL-NEXT: v_fma_f64 v[8:9], v[8:9], v[12:13], v[8:9]
; GISEL-NEXT: v_fma_f64 v[6:7], v[6:7], v[14:15], v[6:7]
; GISEL-NEXT: v_fma_f64 v[10:11], v[10:11], v[14:15], v[10:11]
; GISEL-NEXT: v_fma_f64 v[12:13], -v[4:5], v[4:5], v[0:1]
; GISEL-NEXT: v_fma_f64 v[14:15], -v[6:7], v[6:7], v[2:3]
; GISEL-NEXT: v_fma_f64 v[4:5], v[12:13], v[8:9], v[4:5]
; GISEL-NEXT: v_fma_f64 v[6:7], v[14:15], v[10:11], v[6:7]
; GISEL-NEXT: v_fma_f64 v[12:13], -v[4:5], v[4:5], v[0:1]
; GISEL-NEXT: v_fma_f64 v[14:15], -v[6:7], v[6:7], v[2:3]
; GISEL-NEXT: v_fma_f64 v[4:5], v[12:13], v[8:9], v[4:5]
; GISEL-NEXT: v_mov_b32_e32 v8, 0xffffff80
; GISEL-NEXT: v_fma_f64 v[6:7], v[14:15], v[10:11], v[6:7]
; GISEL-NEXT: v_mov_b32_e32 v9, 0x260
; GISEL-NEXT: v_cndmask_b32_e32 v10, 0, v8, vcc
; GISEL-NEXT: v_cndmask_b32_e64 v8, 0, v8, s[4:5]
; GISEL-NEXT: v_cmp_class_f64_e32 vcc, v[0:1], v9
; GISEL-NEXT: v_cmp_class_f64_e64 s[4:5], v[2:3], v9
; GISEL-NEXT: v_ldexp_f64 v[4:5], v[4:5], v10
; GISEL-NEXT: v_ldexp_f64 v[6:7], v[6:7], v8
; GISEL-NEXT: v_cndmask_b32_e32 v0, v4, v0, vcc
; GISEL-NEXT: v_cndmask_b32_e32 v1, v5, v1, vcc
; GISEL-NEXT: v_cndmask_b32_e64 v2, v6, v2, s[4:5]
; GISEL-NEXT: v_cndmask_b32_e64 v3, v7, v3, s[4:5]
; GISEL-NEXT: s_setpc_b64 s[30:31]
%result = call afn nnan ninf <2 x double> @llvm.sqrt.v2f64(<2 x double> %x)
ret <2 x double> %result
}
define double @v_sqrt_f64_afn_nnan_ninf_nsz(double %x) {
; GISEL-LABEL: v_sqrt_f64_afn_nnan_ninf_nsz:
; GISEL: ; %bb.0:
; GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GISEL-NEXT: v_mov_b32_e32 v2, 0
; GISEL-NEXT: v_bfrev_b32_e32 v3, 8
; GISEL-NEXT: v_cmp_lt_f64_e32 vcc, v[0:1], v[2:3]
; GISEL-NEXT: v_cndmask_b32_e64 v2, 0, 1, vcc
; GISEL-NEXT: v_lshlrev_b32_e32 v2, 8, v2
; GISEL-NEXT: v_ldexp_f64 v[0:1], v[0:1], v2
; GISEL-NEXT: v_rsq_f64_e32 v[2:3], v[0:1]
; GISEL-NEXT: v_mul_f64 v[4:5], v[2:3], 0.5
; GISEL-NEXT: v_mul_f64 v[2:3], v[0:1], v[2:3]
; GISEL-NEXT: v_fma_f64 v[6:7], -v[4:5], v[2:3], 0.5
; GISEL-NEXT: v_fma_f64 v[2:3], v[2:3], v[6:7], v[2:3]
; GISEL-NEXT: v_fma_f64 v[4:5], v[4:5], v[6:7], v[4:5]
; GISEL-NEXT: v_fma_f64 v[6:7], -v[2:3], v[2:3], v[0:1]
; GISEL-NEXT: v_fma_f64 v[2:3], v[6:7], v[4:5], v[2:3]
; GISEL-NEXT: v_fma_f64 v[6:7], -v[2:3], v[2:3], v[0:1]
; GISEL-NEXT: v_fma_f64 v[2:3], v[6:7], v[4:5], v[2:3]
; GISEL-NEXT: v_mov_b32_e32 v4, 0xffffff80
; GISEL-NEXT: v_mov_b32_e32 v5, 0x260
; GISEL-NEXT: v_cndmask_b32_e32 v4, 0, v4, vcc
; GISEL-NEXT: v_cmp_class_f64_e32 vcc, v[0:1], v5
; GISEL-NEXT: v_ldexp_f64 v[2:3], v[2:3], v4
; GISEL-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc
; GISEL-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc
; GISEL-NEXT: s_setpc_b64 s[30:31]
%result = call afn nnan ninf nsz double @llvm.sqrt.f64(double %x)
ret double %result
}
define double @v_sqrt_f64__approx_func_fp_math(double %x) #2 {
; GISEL-LABEL: v_sqrt_f64__approx_func_fp_math:
; GISEL: ; %bb.0:
; GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GISEL-NEXT: v_mov_b32_e32 v2, 0
; GISEL-NEXT: v_bfrev_b32_e32 v3, 8
; GISEL-NEXT: v_cmp_lt_f64_e32 vcc, v[0:1], v[2:3]
; GISEL-NEXT: v_cndmask_b32_e64 v2, 0, 1, vcc
; GISEL-NEXT: v_lshlrev_b32_e32 v2, 8, v2
; GISEL-NEXT: v_ldexp_f64 v[0:1], v[0:1], v2
; GISEL-NEXT: v_rsq_f64_e32 v[2:3], v[0:1]
; GISEL-NEXT: v_mul_f64 v[4:5], v[2:3], 0.5
; GISEL-NEXT: v_mul_f64 v[2:3], v[0:1], v[2:3]
; GISEL-NEXT: v_fma_f64 v[6:7], -v[4:5], v[2:3], 0.5
; GISEL-NEXT: v_fma_f64 v[2:3], v[2:3], v[6:7], v[2:3]
; GISEL-NEXT: v_fma_f64 v[4:5], v[4:5], v[6:7], v[4:5]
; GISEL-NEXT: v_fma_f64 v[6:7], -v[2:3], v[2:3], v[0:1]
; GISEL-NEXT: v_fma_f64 v[2:3], v[6:7], v[4:5], v[2:3]
; GISEL-NEXT: v_fma_f64 v[6:7], -v[2:3], v[2:3], v[0:1]
; GISEL-NEXT: v_fma_f64 v[2:3], v[6:7], v[4:5], v[2:3]
; GISEL-NEXT: v_mov_b32_e32 v4, 0xffffff80
; GISEL-NEXT: v_mov_b32_e32 v5, 0x260
; GISEL-NEXT: v_cndmask_b32_e32 v4, 0, v4, vcc
; GISEL-NEXT: v_cmp_class_f64_e32 vcc, v[0:1], v5
; GISEL-NEXT: v_ldexp_f64 v[2:3], v[2:3], v4
; GISEL-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc
; GISEL-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc
; GISEL-NEXT: s_setpc_b64 s[30:31]
%result = call nsz double @llvm.sqrt.f64(double %x)
ret double %result
}
define double @v_sqrt_f64__enough_unsafe_attrs(double %x) #3 {
; GISEL-LABEL: v_sqrt_f64__enough_unsafe_attrs:
; GISEL: ; %bb.0:
; GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GISEL-NEXT: v_mov_b32_e32 v2, 0
; GISEL-NEXT: v_bfrev_b32_e32 v3, 8
; GISEL-NEXT: v_cmp_lt_f64_e32 vcc, v[0:1], v[2:3]
; GISEL-NEXT: v_cndmask_b32_e64 v2, 0, 1, vcc
; GISEL-NEXT: v_lshlrev_b32_e32 v2, 8, v2
; GISEL-NEXT: v_ldexp_f64 v[0:1], v[0:1], v2
; GISEL-NEXT: v_rsq_f64_e32 v[2:3], v[0:1]
; GISEL-NEXT: v_mul_f64 v[4:5], v[2:3], 0.5
; GISEL-NEXT: v_mul_f64 v[2:3], v[0:1], v[2:3]
; GISEL-NEXT: v_fma_f64 v[6:7], -v[4:5], v[2:3], 0.5
; GISEL-NEXT: v_fma_f64 v[2:3], v[2:3], v[6:7], v[2:3]
; GISEL-NEXT: v_fma_f64 v[4:5], v[4:5], v[6:7], v[4:5]
; GISEL-NEXT: v_fma_f64 v[6:7], -v[2:3], v[2:3], v[0:1]
; GISEL-NEXT: v_fma_f64 v[2:3], v[6:7], v[4:5], v[2:3]
; GISEL-NEXT: v_fma_f64 v[6:7], -v[2:3], v[2:3], v[0:1]
; GISEL-NEXT: v_fma_f64 v[2:3], v[6:7], v[4:5], v[2:3]
; GISEL-NEXT: v_mov_b32_e32 v4, 0xffffff80
; GISEL-NEXT: v_mov_b32_e32 v5, 0x260
; GISEL-NEXT: v_cndmask_b32_e32 v4, 0, v4, vcc
; GISEL-NEXT: v_cmp_class_f64_e32 vcc, v[0:1], v5
; GISEL-NEXT: v_ldexp_f64 v[2:3], v[2:3], v4
; GISEL-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc
; GISEL-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc
; GISEL-NEXT: s_setpc_b64 s[30:31]
%result = call nsz double @llvm.sqrt.f64(double %x)
ret double %result
}
define double @v_sqrt_f64__unsafe_attr(double %x) #4 {
; GISEL-LABEL: v_sqrt_f64__unsafe_attr:
; GISEL: ; %bb.0:
; GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GISEL-NEXT: v_mov_b32_e32 v2, 0
; GISEL-NEXT: v_bfrev_b32_e32 v3, 8
; GISEL-NEXT: v_cmp_lt_f64_e32 vcc, v[0:1], v[2:3]
; GISEL-NEXT: v_cndmask_b32_e64 v2, 0, 1, vcc
; GISEL-NEXT: v_lshlrev_b32_e32 v2, 8, v2
; GISEL-NEXT: v_ldexp_f64 v[0:1], v[0:1], v2
; GISEL-NEXT: v_rsq_f64_e32 v[2:3], v[0:1]
; GISEL-NEXT: v_mul_f64 v[4:5], v[2:3], 0.5
; GISEL-NEXT: v_mul_f64 v[2:3], v[0:1], v[2:3]
; GISEL-NEXT: v_fma_f64 v[6:7], -v[4:5], v[2:3], 0.5
; GISEL-NEXT: v_fma_f64 v[2:3], v[2:3], v[6:7], v[2:3]
; GISEL-NEXT: v_fma_f64 v[4:5], v[4:5], v[6:7], v[4:5]
; GISEL-NEXT: v_fma_f64 v[6:7], -v[2:3], v[2:3], v[0:1]
; GISEL-NEXT: v_fma_f64 v[2:3], v[6:7], v[4:5], v[2:3]
; GISEL-NEXT: v_fma_f64 v[6:7], -v[2:3], v[2:3], v[0:1]
; GISEL-NEXT: v_fma_f64 v[2:3], v[6:7], v[4:5], v[2:3]
; GISEL-NEXT: v_mov_b32_e32 v4, 0xffffff80
; GISEL-NEXT: v_mov_b32_e32 v5, 0x260
; GISEL-NEXT: v_cndmask_b32_e32 v4, 0, v4, vcc
; GISEL-NEXT: v_cmp_class_f64_e32 vcc, v[0:1], v5
; GISEL-NEXT: v_ldexp_f64 v[2:3], v[2:3], v4
; GISEL-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc
; GISEL-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc
; GISEL-NEXT: s_setpc_b64 s[30:31]
%result = call nsz double @llvm.sqrt.f64(double %x)
ret double %result
}
define <2 x double> @v_sqrt_v2f64(<2 x double> %x) {
; SDAG-LABEL: v_sqrt_v2f64:
; SDAG: ; %bb.0:
; SDAG-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SDAG-NEXT: s_mov_b32 s4, 0
; SDAG-NEXT: s_brev_b32 s5, 8
; SDAG-NEXT: v_cmp_gt_f64_e32 vcc, s[4:5], v[0:1]
; SDAG-NEXT: v_cmp_gt_f64_e64 s[4:5], s[4:5], v[2:3]
; SDAG-NEXT: v_cndmask_b32_e64 v4, 0, 1, vcc
; SDAG-NEXT: v_lshlrev_b32_e32 v4, 8, v4
; SDAG-NEXT: v_ldexp_f64 v[0:1], v[0:1], v4
; SDAG-NEXT: v_cndmask_b32_e64 v4, 0, 1, s[4:5]
; SDAG-NEXT: v_lshlrev_b32_e32 v4, 8, v4
; SDAG-NEXT: v_ldexp_f64 v[2:3], v[2:3], v4
; SDAG-NEXT: v_rsq_f64_e32 v[4:5], v[0:1]
; SDAG-NEXT: v_rsq_f64_e32 v[6:7], v[2:3]
; SDAG-NEXT: v_mul_f64 v[8:9], v[0:1], v[4:5]
; SDAG-NEXT: v_mul_f64 v[4:5], v[4:5], 0.5
; SDAG-NEXT: v_mul_f64 v[10:11], v[2:3], v[6:7]
; SDAG-NEXT: v_mul_f64 v[6:7], v[6:7], 0.5
; SDAG-NEXT: v_fma_f64 v[12:13], -v[4:5], v[8:9], 0.5
; SDAG-NEXT: v_fma_f64 v[14:15], -v[6:7], v[10:11], 0.5
; SDAG-NEXT: v_fma_f64 v[8:9], v[8:9], v[12:13], v[8:9]
; SDAG-NEXT: v_fma_f64 v[4:5], v[4:5], v[12:13], v[4:5]
; SDAG-NEXT: v_fma_f64 v[10:11], v[10:11], v[14:15], v[10:11]
; SDAG-NEXT: v_fma_f64 v[6:7], v[6:7], v[14:15], v[6:7]
; SDAG-NEXT: v_fma_f64 v[12:13], -v[8:9], v[8:9], v[0:1]
; SDAG-NEXT: v_fma_f64 v[14:15], -v[10:11], v[10:11], v[2:3]
; SDAG-NEXT: v_fma_f64 v[8:9], v[12:13], v[4:5], v[8:9]
; SDAG-NEXT: v_fma_f64 v[10:11], v[14:15], v[6:7], v[10:11]
; SDAG-NEXT: v_fma_f64 v[12:13], -v[8:9], v[8:9], v[0:1]
; SDAG-NEXT: v_fma_f64 v[14:15], -v[10:11], v[10:11], v[2:3]
; SDAG-NEXT: v_fma_f64 v[4:5], v[12:13], v[4:5], v[8:9]
; SDAG-NEXT: v_mov_b32_e32 v8, 0xffffff80
; SDAG-NEXT: v_mov_b32_e32 v9, 0x260
; SDAG-NEXT: v_fma_f64 v[6:7], v[14:15], v[6:7], v[10:11]
; SDAG-NEXT: v_cndmask_b32_e32 v10, 0, v8, vcc
; SDAG-NEXT: v_cndmask_b32_e64 v8, 0, v8, s[4:5]
; SDAG-NEXT: v_cmp_class_f64_e32 vcc, v[0:1], v9
; SDAG-NEXT: v_cmp_class_f64_e64 s[4:5], v[2:3], v9
; SDAG-NEXT: v_ldexp_f64 v[4:5], v[4:5], v10
; SDAG-NEXT: v_ldexp_f64 v[6:7], v[6:7], v8
; SDAG-NEXT: v_cndmask_b32_e32 v0, v4, v0, vcc
; SDAG-NEXT: v_cndmask_b32_e32 v1, v5, v1, vcc
; SDAG-NEXT: v_cndmask_b32_e64 v2, v6, v2, s[4:5]
; SDAG-NEXT: v_cndmask_b32_e64 v3, v7, v3, s[4:5]
; SDAG-NEXT: s_setpc_b64 s[30:31]
;
; GISEL-LABEL: v_sqrt_v2f64:
; GISEL: ; %bb.0:
; GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GISEL-NEXT: s_mov_b32 s4, 0
; GISEL-NEXT: s_brev_b32 s5, 8
; GISEL-NEXT: v_mov_b32_e32 v4, s4
; GISEL-NEXT: v_mov_b32_e32 v5, s5
; GISEL-NEXT: v_cmp_gt_f64_e32 vcc, s[4:5], v[0:1]
; GISEL-NEXT: v_cmp_lt_f64_e64 s[4:5], v[2:3], v[4:5]
; GISEL-NEXT: v_cndmask_b32_e64 v6, 0, 1, vcc
; GISEL-NEXT: v_cndmask_b32_e64 v4, 0, 1, s[4:5]
; GISEL-NEXT: v_lshlrev_b32_e32 v6, 8, v6
; GISEL-NEXT: v_lshlrev_b32_e32 v4, 8, v4
; GISEL-NEXT: v_ldexp_f64 v[0:1], v[0:1], v6
; GISEL-NEXT: v_ldexp_f64 v[2:3], v[2:3], v4
; GISEL-NEXT: v_rsq_f64_e32 v[4:5], v[0:1]
; GISEL-NEXT: v_rsq_f64_e32 v[6:7], v[2:3]
; GISEL-NEXT: v_mul_f64 v[8:9], v[4:5], 0.5
; GISEL-NEXT: v_mul_f64 v[4:5], v[0:1], v[4:5]
; GISEL-NEXT: v_mul_f64 v[10:11], v[6:7], 0.5
; GISEL-NEXT: v_mul_f64 v[6:7], v[2:3], v[6:7]
; GISEL-NEXT: v_fma_f64 v[12:13], -v[8:9], v[4:5], 0.5
; GISEL-NEXT: v_fma_f64 v[14:15], -v[10:11], v[6:7], 0.5
; GISEL-NEXT: v_fma_f64 v[4:5], v[4:5], v[12:13], v[4:5]
; GISEL-NEXT: v_fma_f64 v[8:9], v[8:9], v[12:13], v[8:9]
; GISEL-NEXT: v_fma_f64 v[6:7], v[6:7], v[14:15], v[6:7]
; GISEL-NEXT: v_fma_f64 v[10:11], v[10:11], v[14:15], v[10:11]
; GISEL-NEXT: v_fma_f64 v[12:13], -v[4:5], v[4:5], v[0:1]
; GISEL-NEXT: v_fma_f64 v[14:15], -v[6:7], v[6:7], v[2:3]
; GISEL-NEXT: v_fma_f64 v[4:5], v[12:13], v[8:9], v[4:5]
; GISEL-NEXT: v_fma_f64 v[6:7], v[14:15], v[10:11], v[6:7]
; GISEL-NEXT: v_fma_f64 v[12:13], -v[4:5], v[4:5], v[0:1]
; GISEL-NEXT: v_fma_f64 v[14:15], -v[6:7], v[6:7], v[2:3]
; GISEL-NEXT: v_fma_f64 v[4:5], v[12:13], v[8:9], v[4:5]
; GISEL-NEXT: v_mov_b32_e32 v8, 0xffffff80
; GISEL-NEXT: v_fma_f64 v[6:7], v[14:15], v[10:11], v[6:7]
; GISEL-NEXT: v_mov_b32_e32 v9, 0x260
; GISEL-NEXT: v_cndmask_b32_e32 v10, 0, v8, vcc
; GISEL-NEXT: v_cndmask_b32_e64 v8, 0, v8, s[4:5]
; GISEL-NEXT: v_cmp_class_f64_e32 vcc, v[0:1], v9
; GISEL-NEXT: v_cmp_class_f64_e64 s[4:5], v[2:3], v9
; GISEL-NEXT: v_ldexp_f64 v[4:5], v[4:5], v10
; GISEL-NEXT: v_ldexp_f64 v[6:7], v[6:7], v8
; GISEL-NEXT: v_cndmask_b32_e32 v0, v4, v0, vcc
; GISEL-NEXT: v_cndmask_b32_e32 v1, v5, v1, vcc
; GISEL-NEXT: v_cndmask_b32_e64 v2, v6, v2, s[4:5]
; GISEL-NEXT: v_cndmask_b32_e64 v3, v7, v3, s[4:5]
; GISEL-NEXT: s_setpc_b64 s[30:31]
%result = call <2 x double> @llvm.sqrt.v2f64(<2 x double> %x)
ret <2 x double> %result
}
define <3 x double> @v_sqrt_v3f64(<3 x double> %x) {
; SDAG-LABEL: v_sqrt_v3f64:
; SDAG: ; %bb.0:
; SDAG-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SDAG-NEXT: s_mov_b32 s6, 0
; SDAG-NEXT: s_brev_b32 s7, 8
; SDAG-NEXT: v_cmp_gt_f64_e32 vcc, s[6:7], v[0:1]
; SDAG-NEXT: v_cmp_gt_f64_e64 s[4:5], s[6:7], v[2:3]
; SDAG-NEXT: v_cmp_gt_f64_e64 s[6:7], s[6:7], v[4:5]
; SDAG-NEXT: v_cndmask_b32_e64 v6, 0, 1, vcc
; SDAG-NEXT: v_lshlrev_b32_e32 v6, 8, v6
; SDAG-NEXT: v_ldexp_f64 v[0:1], v[0:1], v6
; SDAG-NEXT: v_cndmask_b32_e64 v6, 0, 1, s[4:5]
; SDAG-NEXT: v_lshlrev_b32_e32 v6, 8, v6
; SDAG-NEXT: v_ldexp_f64 v[2:3], v[2:3], v6
; SDAG-NEXT: v_cndmask_b32_e64 v10, 0, 1, s[6:7]
; SDAG-NEXT: v_lshlrev_b32_e32 v10, 8, v10
; SDAG-NEXT: v_ldexp_f64 v[4:5], v[4:5], v10
; SDAG-NEXT: v_rsq_f64_e32 v[6:7], v[0:1]
; SDAG-NEXT: v_rsq_f64_e32 v[8:9], v[2:3]
; SDAG-NEXT: v_rsq_f64_e32 v[12:13], v[4:5]
; SDAG-NEXT: v_mul_f64 v[10:11], v[0:1], v[6:7]
; SDAG-NEXT: v_mul_f64 v[6:7], v[6:7], 0.5
; SDAG-NEXT: v_mul_f64 v[14:15], v[2:3], v[8:9]
; SDAG-NEXT: v_mul_f64 v[8:9], v[8:9], 0.5
; SDAG-NEXT: v_fma_f64 v[16:17], -v[6:7], v[10:11], 0.5
; SDAG-NEXT: v_fma_f64 v[18:19], -v[8:9], v[14:15], 0.5
; SDAG-NEXT: v_fma_f64 v[10:11], v[10:11], v[16:17], v[10:11]
; SDAG-NEXT: v_fma_f64 v[6:7], v[6:7], v[16:17], v[6:7]
; SDAG-NEXT: v_mul_f64 v[16:17], v[4:5], v[12:13]
; SDAG-NEXT: v_mul_f64 v[12:13], v[12:13], 0.5
; SDAG-NEXT: v_fma_f64 v[14:15], v[14:15], v[18:19], v[14:15]
; SDAG-NEXT: v_fma_f64 v[8:9], v[8:9], v[18:19], v[8:9]
; SDAG-NEXT: v_fma_f64 v[18:19], -v[12:13], v[16:17], 0.5
; SDAG-NEXT: v_fma_f64 v[16:17], v[16:17], v[18:19], v[16:17]
; SDAG-NEXT: v_fma_f64 v[12:13], v[12:13], v[18:19], v[12:13]
; SDAG-NEXT: v_fma_f64 v[18:19], -v[10:11], v[10:11], v[0:1]
; SDAG-NEXT: v_fma_f64 v[10:11], v[18:19], v[6:7], v[10:11]
; SDAG-NEXT: v_fma_f64 v[18:19], -v[14:15], v[14:15], v[2:3]
; SDAG-NEXT: v_fma_f64 v[14:15], v[18:19], v[8:9], v[14:15]
; SDAG-NEXT: v_fma_f64 v[18:19], -v[16:17], v[16:17], v[4:5]
; SDAG-NEXT: v_fma_f64 v[16:17], v[18:19], v[12:13], v[16:17]
; SDAG-NEXT: v_fma_f64 v[18:19], -v[10:11], v[10:11], v[0:1]
; SDAG-NEXT: v_fma_f64 v[6:7], v[18:19], v[6:7], v[10:11]
; SDAG-NEXT: v_fma_f64 v[10:11], -v[14:15], v[14:15], v[2:3]
; SDAG-NEXT: v_fma_f64 v[18:19], -v[16:17], v[16:17], v[4:5]
; SDAG-NEXT: v_fma_f64 v[8:9], v[10:11], v[8:9], v[14:15]
; SDAG-NEXT: v_fma_f64 v[10:11], v[18:19], v[12:13], v[16:17]
; SDAG-NEXT: v_mov_b32_e32 v14, 0xffffff80
; SDAG-NEXT: v_mov_b32_e32 v15, 0x260
; SDAG-NEXT: v_cndmask_b32_e32 v12, 0, v14, vcc
; SDAG-NEXT: v_cndmask_b32_e64 v13, 0, v14, s[4:5]
; SDAG-NEXT: v_cndmask_b32_e64 v14, 0, v14, s[6:7]
; SDAG-NEXT: v_ldexp_f64 v[6:7], v[6:7], v12
; SDAG-NEXT: v_cmp_class_f64_e32 vcc, v[0:1], v15
; SDAG-NEXT: v_ldexp_f64 v[8:9], v[8:9], v13
; SDAG-NEXT: v_cmp_class_f64_e64 s[4:5], v[2:3], v15
; SDAG-NEXT: v_ldexp_f64 v[10:11], v[10:11], v14
; SDAG-NEXT: v_cmp_class_f64_e64 s[6:7], v[4:5], v15
; SDAG-NEXT: v_cndmask_b32_e32 v0, v6, v0, vcc
; SDAG-NEXT: v_cndmask_b32_e32 v1, v7, v1, vcc
; SDAG-NEXT: v_cndmask_b32_e64 v2, v8, v2, s[4:5]
; SDAG-NEXT: v_cndmask_b32_e64 v3, v9, v3, s[4:5]
; SDAG-NEXT: v_cndmask_b32_e64 v4, v10, v4, s[6:7]
; SDAG-NEXT: v_cndmask_b32_e64 v5, v11, v5, s[6:7]
; SDAG-NEXT: s_setpc_b64 s[30:31]
;
; GISEL-LABEL: v_sqrt_v3f64:
; GISEL: ; %bb.0:
; GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GISEL-NEXT: s_mov_b32 s4, 0
; GISEL-NEXT: s_brev_b32 s5, 8
; GISEL-NEXT: v_cmp_gt_f64_e32 vcc, s[4:5], v[0:1]
; GISEL-NEXT: v_cndmask_b32_e64 v6, 0, 1, vcc
; GISEL-NEXT: v_lshlrev_b32_e32 v6, 8, v6
; GISEL-NEXT: v_ldexp_f64 v[0:1], v[0:1], v6
; GISEL-NEXT: v_mov_b32_e32 v6, s4
; GISEL-NEXT: v_mov_b32_e32 v7, s5
; GISEL-NEXT: v_cmp_lt_f64_e64 s[4:5], v[2:3], v[6:7]
; GISEL-NEXT: v_cmp_lt_f64_e64 s[6:7], v[4:5], v[6:7]
; GISEL-NEXT: v_cndmask_b32_e64 v8, 0, 1, s[4:5]
; GISEL-NEXT: v_lshlrev_b32_e32 v8, 8, v8
; GISEL-NEXT: v_ldexp_f64 v[2:3], v[2:3], v8
; GISEL-NEXT: v_rsq_f64_e32 v[8:9], v[0:1]
; GISEL-NEXT: v_cndmask_b32_e64 v6, 0, 1, s[6:7]
; GISEL-NEXT: v_lshlrev_b32_e32 v6, 8, v6
; GISEL-NEXT: v_ldexp_f64 v[4:5], v[4:5], v6
; GISEL-NEXT: v_rsq_f64_e32 v[10:11], v[2:3]
; GISEL-NEXT: v_rsq_f64_e32 v[12:13], v[4:5]
; GISEL-NEXT: v_mul_f64 v[6:7], v[8:9], 0.5
; GISEL-NEXT: v_mul_f64 v[8:9], v[0:1], v[8:9]
; GISEL-NEXT: v_mul_f64 v[14:15], v[10:11], 0.5
; GISEL-NEXT: v_mul_f64 v[10:11], v[2:3], v[10:11]
; GISEL-NEXT: v_fma_f64 v[16:17], -v[6:7], v[8:9], 0.5
; GISEL-NEXT: v_fma_f64 v[18:19], -v[14:15], v[10:11], 0.5
; GISEL-NEXT: v_fma_f64 v[8:9], v[8:9], v[16:17], v[8:9]
; GISEL-NEXT: v_fma_f64 v[6:7], v[6:7], v[16:17], v[6:7]
; GISEL-NEXT: v_mul_f64 v[16:17], v[12:13], 0.5
; GISEL-NEXT: v_mul_f64 v[12:13], v[4:5], v[12:13]
; GISEL-NEXT: v_fma_f64 v[10:11], v[10:11], v[18:19], v[10:11]
; GISEL-NEXT: v_fma_f64 v[14:15], v[14:15], v[18:19], v[14:15]
; GISEL-NEXT: v_fma_f64 v[18:19], -v[16:17], v[12:13], 0.5
; GISEL-NEXT: v_fma_f64 v[12:13], v[12:13], v[18:19], v[12:13]
; GISEL-NEXT: v_fma_f64 v[16:17], v[16:17], v[18:19], v[16:17]
; GISEL-NEXT: v_fma_f64 v[18:19], -v[8:9], v[8:9], v[0:1]
; GISEL-NEXT: v_fma_f64 v[8:9], v[18:19], v[6:7], v[8:9]
; GISEL-NEXT: v_fma_f64 v[18:19], -v[10:11], v[10:11], v[2:3]
; GISEL-NEXT: v_fma_f64 v[10:11], v[18:19], v[14:15], v[10:11]
; GISEL-NEXT: v_fma_f64 v[18:19], -v[12:13], v[12:13], v[4:5]
; GISEL-NEXT: v_fma_f64 v[12:13], v[18:19], v[16:17], v[12:13]
; GISEL-NEXT: v_fma_f64 v[18:19], -v[8:9], v[8:9], v[0:1]
; GISEL-NEXT: v_fma_f64 v[6:7], v[18:19], v[6:7], v[8:9]
; GISEL-NEXT: v_fma_f64 v[8:9], -v[10:11], v[10:11], v[2:3]
; GISEL-NEXT: v_fma_f64 v[18:19], -v[12:13], v[12:13], v[4:5]
; GISEL-NEXT: v_fma_f64 v[8:9], v[8:9], v[14:15], v[10:11]
; GISEL-NEXT: v_fma_f64 v[10:11], v[18:19], v[16:17], v[12:13]
; GISEL-NEXT: v_mov_b32_e32 v14, 0xffffff80
; GISEL-NEXT: v_mov_b32_e32 v15, 0x260
; GISEL-NEXT: v_cndmask_b32_e32 v12, 0, v14, vcc
; GISEL-NEXT: v_cndmask_b32_e64 v13, 0, v14, s[4:5]
; GISEL-NEXT: v_cndmask_b32_e64 v14, 0, v14, s[6:7]
; GISEL-NEXT: v_ldexp_f64 v[6:7], v[6:7], v12
; GISEL-NEXT: v_cmp_class_f64_e32 vcc, v[0:1], v15
; GISEL-NEXT: v_ldexp_f64 v[8:9], v[8:9], v13
; GISEL-NEXT: v_cmp_class_f64_e64 s[4:5], v[2:3], v15
; GISEL-NEXT: v_ldexp_f64 v[10:11], v[10:11], v14
; GISEL-NEXT: v_cmp_class_f64_e64 s[6:7], v[4:5], v15
; GISEL-NEXT: v_cndmask_b32_e32 v0, v6, v0, vcc
; GISEL-NEXT: v_cndmask_b32_e32 v1, v7, v1, vcc
; GISEL-NEXT: v_cndmask_b32_e64 v2, v8, v2, s[4:5]
; GISEL-NEXT: v_cndmask_b32_e64 v3, v9, v3, s[4:5]
; GISEL-NEXT: v_cndmask_b32_e64 v4, v10, v4, s[6:7]
; GISEL-NEXT: v_cndmask_b32_e64 v5, v11, v5, s[6:7]
; GISEL-NEXT: s_setpc_b64 s[30:31]
%result = call <3 x double> @llvm.sqrt.v3f64(<3 x double> %x)
ret <3 x double> %result
}
declare double @llvm.fabs.f64(double) #0
declare double @llvm.sqrt.f64(double) #0
declare <2 x double> @llvm.sqrt.v2f64(<2 x double>) #0
declare <3 x double> @llvm.sqrt.v3f64(<3 x double>) #0
declare i32 @llvm.amdgcn.readfirstlane(i32) #1
attributes #0 = { nocallback nofree nosync nounwind speculatable willreturn memory(none) }
attributes #1 = { convergent nounwind willreturn memory(none) }
attributes #2 = { "approx-func-fp-math"="true" }
attributes #3 = { "approx-func-fp-math"="true" "no-nans-fp-math"="true" "no-infs-fp-math"="true" }
attributes #4 = { "unsafe-fp-math"="true" }