The existing way of managing clustered nodes was done through adding
weak edges between the neighbouring cluster nodes, which is a sort of
ordered queue. And this will be later recorded as `NextClusterPred` or
`NextClusterSucc` in `ScheduleDAGMI`.
But actually the instruction may be picked not in the exact order of the
queue. For example, we have a queue of cluster nodes A B C. But during
scheduling, node B might be picked first, then it will be very likely
that we only cluster B and C for Top-Down scheduling (leaving A alone).
Another issue is:
```
if (!ReorderWhileClustering && SUa->NodeNum > SUb->NodeNum)
std::swap(SUa, SUb);
if (!DAG->addEdge(SUb, SDep(SUa, SDep::Cluster)))
```
may break the cluster queue.
For example, we want to cluster nodes (order as in `MemOpRecords`): 1 3
2. 1(SUa) will be pred of 3(SUb) normally. But when it comes to (3, 2),
As 3(SUa) > 2(SUb), we would reorder the two nodes, which makes 2 be
pred of 3. This makes both 1 and 2 become preds of 3, but there is no
edge between 1 and 2. Thus we get a broken cluster chain.
To fix both issues, we introduce an unordered set in the change. This
could help improve clustering in some hard case.
One key reason the change causes so many test check changes is: As the
cluster candidates are not ordered now, the candidates might be picked
in different order from before.
The most affected targets are: AMDGPU, AArch64, RISCV.
For RISCV, it seems to me most are just minor instruction reorder, don't
see obvious regression.
For AArch64, there were some combining of ldr into ldp being affected.
With two cases being regressed and two being improved. This has more
deeper reason that machine scheduler cannot cluster them well both
before and after the change, and the load combine algorithm later is
also not smart enough.
For AMDGPU, some cases have more v_dual instructions used while some are
regressed. It seems less critical. Seems like test `v_vselect_v32bf16`
gets more buffer_load being claused.
750 lines
28 KiB
LLVM
750 lines
28 KiB
LLVM
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 2
|
|
; RUN: llc -mtriple=amdgcn -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefixes=SI %s
|
|
; RUN: llc -mtriple=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefixes=VI %s
|
|
; RUN: llc -mtriple=amdgcn -mcpu=gfx1100 -mattr=+real-true16 -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefixes=GFX11,GFX11-TRUE16 %s
|
|
; RUN: llc -mtriple=amdgcn -mcpu=gfx1100 -mattr=-real-true16 -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefixes=GFX11,GFX11-FAKE16 %s
|
|
|
|
declare float @llvm.fabs.f32(float) #1
|
|
declare double @llvm.fabs.f64(double) #1
|
|
|
|
define amdgpu_kernel void @test_isinf_pattern(ptr addrspace(1) nocapture %out, float %x) #0 {
|
|
; SI-LABEL: test_isinf_pattern:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x9
|
|
; SI-NEXT: s_load_dword s4, s[4:5], 0xb
|
|
; SI-NEXT: s_mov_b32 s3, 0xf000
|
|
; SI-NEXT: s_mov_b32 s2, -1
|
|
; SI-NEXT: v_mov_b32_e32 v0, 0x204
|
|
; SI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; SI-NEXT: v_cmp_class_f32_e32 vcc, s4, v0
|
|
; SI-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc
|
|
; SI-NEXT: buffer_store_dword v0, off, s[0:3], 0
|
|
; SI-NEXT: s_endpgm
|
|
;
|
|
; VI-LABEL: test_isinf_pattern:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_load_dword s2, s[4:5], 0x2c
|
|
; VI-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
|
|
; VI-NEXT: v_mov_b32_e32 v0, 0x204
|
|
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; VI-NEXT: v_cmp_class_f32_e32 vcc, s2, v0
|
|
; VI-NEXT: v_mov_b32_e32 v0, s0
|
|
; VI-NEXT: v_cndmask_b32_e64 v2, 0, 1, vcc
|
|
; VI-NEXT: v_mov_b32_e32 v1, s1
|
|
; VI-NEXT: flat_store_dword v[0:1], v2
|
|
; VI-NEXT: s_endpgm
|
|
;
|
|
; GFX11-LABEL: test_isinf_pattern:
|
|
; GFX11: ; %bb.0:
|
|
; GFX11-NEXT: s_clause 0x1
|
|
; GFX11-NEXT: s_load_b32 s2, s[4:5], 0x2c
|
|
; GFX11-NEXT: s_load_b64 s[0:1], s[4:5], 0x24
|
|
; GFX11-NEXT: v_mov_b32_e32 v0, 0
|
|
; GFX11-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX11-NEXT: v_cmp_class_f32_e64 s2, s2, 0x204
|
|
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
|
|
; GFX11-NEXT: v_cndmask_b32_e64 v1, 0, 1, s2
|
|
; GFX11-NEXT: global_store_b32 v0, v1, s[0:1]
|
|
; GFX11-NEXT: s_endpgm
|
|
%fabs = tail call float @llvm.fabs.f32(float %x) #1
|
|
%cmp = fcmp oeq float %fabs, 0x7FF0000000000000
|
|
%ext = zext i1 %cmp to i32
|
|
store i32 %ext, ptr addrspace(1) %out, align 4
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @test_not_isinf_pattern_0(ptr addrspace(1) nocapture %out, float %x) #0 {
|
|
; SI-LABEL: test_not_isinf_pattern_0:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x9
|
|
; SI-NEXT: s_load_dword s4, s[4:5], 0xb
|
|
; SI-NEXT: s_mov_b32 s3, 0xf000
|
|
; SI-NEXT: s_mov_b32 s2, -1
|
|
; SI-NEXT: v_mov_b32_e32 v0, 0x7f800000
|
|
; SI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; SI-NEXT: v_cmp_nlg_f32_e64 s[4:5], |s4|, v0
|
|
; SI-NEXT: v_cndmask_b32_e64 v0, 0, 1, s[4:5]
|
|
; SI-NEXT: buffer_store_dword v0, off, s[0:3], 0
|
|
; SI-NEXT: s_endpgm
|
|
;
|
|
; VI-LABEL: test_not_isinf_pattern_0:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_load_dword s2, s[4:5], 0x2c
|
|
; VI-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
|
|
; VI-NEXT: v_mov_b32_e32 v0, 0x7f800000
|
|
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; VI-NEXT: v_cmp_nlg_f32_e64 s[2:3], |s2|, v0
|
|
; VI-NEXT: v_mov_b32_e32 v0, s0
|
|
; VI-NEXT: v_cndmask_b32_e64 v2, 0, 1, s[2:3]
|
|
; VI-NEXT: v_mov_b32_e32 v1, s1
|
|
; VI-NEXT: flat_store_dword v[0:1], v2
|
|
; VI-NEXT: s_endpgm
|
|
;
|
|
; GFX11-LABEL: test_not_isinf_pattern_0:
|
|
; GFX11: ; %bb.0:
|
|
; GFX11-NEXT: s_clause 0x1
|
|
; GFX11-NEXT: s_load_b32 s2, s[4:5], 0x2c
|
|
; GFX11-NEXT: s_load_b64 s[0:1], s[4:5], 0x24
|
|
; GFX11-NEXT: v_mov_b32_e32 v0, 0
|
|
; GFX11-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX11-NEXT: v_cmp_nlg_f32_e64 s2, 0x7f800000, |s2|
|
|
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
|
|
; GFX11-NEXT: v_cndmask_b32_e64 v1, 0, 1, s2
|
|
; GFX11-NEXT: global_store_b32 v0, v1, s[0:1]
|
|
; GFX11-NEXT: s_endpgm
|
|
%fabs = tail call float @llvm.fabs.f32(float %x) #1
|
|
%cmp = fcmp ueq float %fabs, 0x7FF0000000000000
|
|
%ext = zext i1 %cmp to i32
|
|
store i32 %ext, ptr addrspace(1) %out, align 4
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @test_not_isinf_pattern_1(ptr addrspace(1) nocapture %out, float %x) #0 {
|
|
; SI-LABEL: test_not_isinf_pattern_1:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x9
|
|
; SI-NEXT: s_mov_b32 s3, 0xf000
|
|
; SI-NEXT: s_mov_b32 s2, -1
|
|
; SI-NEXT: v_mov_b32_e32 v0, 0
|
|
; SI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; SI-NEXT: buffer_store_dword v0, off, s[0:3], 0
|
|
; SI-NEXT: s_endpgm
|
|
;
|
|
; VI-LABEL: test_not_isinf_pattern_1:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
|
|
; VI-NEXT: v_mov_b32_e32 v2, 0
|
|
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; VI-NEXT: v_mov_b32_e32 v0, s0
|
|
; VI-NEXT: v_mov_b32_e32 v1, s1
|
|
; VI-NEXT: flat_store_dword v[0:1], v2
|
|
; VI-NEXT: s_endpgm
|
|
;
|
|
; GFX11-LABEL: test_not_isinf_pattern_1:
|
|
; GFX11: ; %bb.0:
|
|
; GFX11-NEXT: s_load_b64 s[0:1], s[4:5], 0x24
|
|
; GFX11-NEXT: v_mov_b32_e32 v0, 0
|
|
; GFX11-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX11-NEXT: global_store_b32 v0, v0, s[0:1]
|
|
; GFX11-NEXT: s_endpgm
|
|
%fabs = tail call float @llvm.fabs.f32(float %x) #1
|
|
%cmp = fcmp oeq float %fabs, 0xFFF0000000000000
|
|
%ext = zext i1 %cmp to i32
|
|
store i32 %ext, ptr addrspace(1) %out, align 4
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @test_isfinite_pattern_0(ptr addrspace(1) nocapture %out, float %x) #0 {
|
|
; SI-LABEL: test_isfinite_pattern_0:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x9
|
|
; SI-NEXT: s_load_dword s4, s[4:5], 0xb
|
|
; SI-NEXT: s_mov_b32 s3, 0xf000
|
|
; SI-NEXT: s_mov_b32 s2, -1
|
|
; SI-NEXT: v_mov_b32_e32 v0, 0x1f8
|
|
; SI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; SI-NEXT: v_cmp_class_f32_e32 vcc, s4, v0
|
|
; SI-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc
|
|
; SI-NEXT: buffer_store_dword v0, off, s[0:3], 0
|
|
; SI-NEXT: s_endpgm
|
|
;
|
|
; VI-LABEL: test_isfinite_pattern_0:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_load_dword s2, s[4:5], 0x2c
|
|
; VI-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
|
|
; VI-NEXT: v_mov_b32_e32 v0, 0x1f8
|
|
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; VI-NEXT: v_cmp_class_f32_e32 vcc, s2, v0
|
|
; VI-NEXT: v_mov_b32_e32 v0, s0
|
|
; VI-NEXT: v_cndmask_b32_e64 v2, 0, 1, vcc
|
|
; VI-NEXT: v_mov_b32_e32 v1, s1
|
|
; VI-NEXT: flat_store_dword v[0:1], v2
|
|
; VI-NEXT: s_endpgm
|
|
;
|
|
; GFX11-LABEL: test_isfinite_pattern_0:
|
|
; GFX11: ; %bb.0:
|
|
; GFX11-NEXT: s_clause 0x1
|
|
; GFX11-NEXT: s_load_b32 s2, s[4:5], 0x2c
|
|
; GFX11-NEXT: s_load_b64 s[0:1], s[4:5], 0x24
|
|
; GFX11-NEXT: v_mov_b32_e32 v0, 0
|
|
; GFX11-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX11-NEXT: v_cmp_class_f32_e64 s2, s2, 0x1f8
|
|
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
|
|
; GFX11-NEXT: v_cndmask_b32_e64 v1, 0, 1, s2
|
|
; GFX11-NEXT: global_store_b32 v0, v1, s[0:1]
|
|
; GFX11-NEXT: s_endpgm
|
|
%ord = fcmp ord float %x, 0.000000e+00
|
|
%x.fabs = tail call float @llvm.fabs.f32(float %x) #1
|
|
%ninf = fcmp une float %x.fabs, 0x7FF0000000000000
|
|
%and = and i1 %ord, %ninf
|
|
%ext = zext i1 %and to i32
|
|
store i32 %ext, ptr addrspace(1) %out, align 4
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @test_isfinite_pattern_1(ptr addrspace(1) nocapture %out, float %x) #0 {
|
|
; SI-LABEL: test_isfinite_pattern_1:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x9
|
|
; SI-NEXT: s_load_dword s4, s[4:5], 0xb
|
|
; SI-NEXT: s_mov_b32 s3, 0xf000
|
|
; SI-NEXT: s_mov_b32 s2, -1
|
|
; SI-NEXT: v_mov_b32_e32 v0, 0x1f8
|
|
; SI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; SI-NEXT: v_cmp_class_f32_e32 vcc, s4, v0
|
|
; SI-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc
|
|
; SI-NEXT: buffer_store_dword v0, off, s[0:3], 0
|
|
; SI-NEXT: s_endpgm
|
|
;
|
|
; VI-LABEL: test_isfinite_pattern_1:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_load_dword s2, s[4:5], 0x2c
|
|
; VI-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
|
|
; VI-NEXT: v_mov_b32_e32 v0, 0x1f8
|
|
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; VI-NEXT: v_cmp_class_f32_e32 vcc, s2, v0
|
|
; VI-NEXT: v_mov_b32_e32 v0, s0
|
|
; VI-NEXT: v_cndmask_b32_e64 v2, 0, 1, vcc
|
|
; VI-NEXT: v_mov_b32_e32 v1, s1
|
|
; VI-NEXT: flat_store_dword v[0:1], v2
|
|
; VI-NEXT: s_endpgm
|
|
;
|
|
; GFX11-LABEL: test_isfinite_pattern_1:
|
|
; GFX11: ; %bb.0:
|
|
; GFX11-NEXT: s_clause 0x1
|
|
; GFX11-NEXT: s_load_b32 s2, s[4:5], 0x2c
|
|
; GFX11-NEXT: s_load_b64 s[0:1], s[4:5], 0x24
|
|
; GFX11-NEXT: v_mov_b32_e32 v0, 0
|
|
; GFX11-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX11-NEXT: v_cmp_class_f32_e64 s2, s2, 0x1f8
|
|
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
|
|
; GFX11-NEXT: v_cndmask_b32_e64 v1, 0, 1, s2
|
|
; GFX11-NEXT: global_store_b32 v0, v1, s[0:1]
|
|
; GFX11-NEXT: s_endpgm
|
|
%x.fabs = tail call float @llvm.fabs.f32(float %x) #3
|
|
%cmpinf = fcmp one float %x.fabs, 0x7FF0000000000000
|
|
%ext = zext i1 %cmpinf to i32
|
|
store i32 %ext, ptr addrspace(1) %out, align 4
|
|
ret void
|
|
}
|
|
|
|
; Use negative infinity
|
|
define amdgpu_kernel void @test_isfinite_not_pattern_0(ptr addrspace(1) nocapture %out, float %x) #0 {
|
|
; SI-LABEL: test_isfinite_not_pattern_0:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_load_dword s6, s[4:5], 0xb
|
|
; SI-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x9
|
|
; SI-NEXT: s_mov_b32 s3, 0xf000
|
|
; SI-NEXT: s_mov_b32 s2, -1
|
|
; SI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; SI-NEXT: v_cmp_o_f32_e64 s[4:5], s6, s6
|
|
; SI-NEXT: v_cndmask_b32_e64 v0, 0, 1, s[4:5]
|
|
; SI-NEXT: buffer_store_dword v0, off, s[0:3], 0
|
|
; SI-NEXT: s_endpgm
|
|
;
|
|
; VI-LABEL: test_isfinite_not_pattern_0:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_load_dword s2, s[4:5], 0x2c
|
|
; VI-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
|
|
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; VI-NEXT: v_cmp_o_f32_e64 s[2:3], s2, s2
|
|
; VI-NEXT: v_mov_b32_e32 v0, s0
|
|
; VI-NEXT: v_cndmask_b32_e64 v2, 0, 1, s[2:3]
|
|
; VI-NEXT: v_mov_b32_e32 v1, s1
|
|
; VI-NEXT: flat_store_dword v[0:1], v2
|
|
; VI-NEXT: s_endpgm
|
|
;
|
|
; GFX11-LABEL: test_isfinite_not_pattern_0:
|
|
; GFX11: ; %bb.0:
|
|
; GFX11-NEXT: s_clause 0x1
|
|
; GFX11-NEXT: s_load_b32 s2, s[4:5], 0x2c
|
|
; GFX11-NEXT: s_load_b64 s[0:1], s[4:5], 0x24
|
|
; GFX11-NEXT: v_mov_b32_e32 v0, 0
|
|
; GFX11-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX11-NEXT: v_cmp_o_f32_e64 s2, s2, s2
|
|
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
|
|
; GFX11-NEXT: v_cndmask_b32_e64 v1, 0, 1, s2
|
|
; GFX11-NEXT: global_store_b32 v0, v1, s[0:1]
|
|
; GFX11-NEXT: s_endpgm
|
|
%ord = fcmp ord float %x, 0.000000e+00
|
|
%x.fabs = tail call float @llvm.fabs.f32(float %x) #1
|
|
%ninf = fcmp une float %x.fabs, 0xFFF0000000000000
|
|
%and = and i1 %ord, %ninf
|
|
%ext = zext i1 %and to i32
|
|
store i32 %ext, ptr addrspace(1) %out, align 4
|
|
ret void
|
|
}
|
|
|
|
; No fabs
|
|
define amdgpu_kernel void @test_isfinite_not_pattern_1(ptr addrspace(1) nocapture %out, float %x) #0 {
|
|
; SI-LABEL: test_isfinite_not_pattern_1:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x9
|
|
; SI-NEXT: s_load_dword s6, s[4:5], 0xb
|
|
; SI-NEXT: s_mov_b32 s3, 0xf000
|
|
; SI-NEXT: s_mov_b32 s2, -1
|
|
; SI-NEXT: v_mov_b32_e32 v0, 0x7f800000
|
|
; SI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; SI-NEXT: v_cmp_o_f32_e64 s[4:5], s6, s6
|
|
; SI-NEXT: v_cmp_neq_f32_e32 vcc, s6, v0
|
|
; SI-NEXT: s_and_b64 s[4:5], s[4:5], vcc
|
|
; SI-NEXT: v_cndmask_b32_e64 v0, 0, 1, s[4:5]
|
|
; SI-NEXT: buffer_store_dword v0, off, s[0:3], 0
|
|
; SI-NEXT: s_endpgm
|
|
;
|
|
; VI-LABEL: test_isfinite_not_pattern_1:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_load_dword s6, s[4:5], 0x2c
|
|
; VI-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
|
|
; VI-NEXT: v_mov_b32_e32 v0, 0x7f800000
|
|
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; VI-NEXT: v_cmp_o_f32_e64 s[2:3], s6, s6
|
|
; VI-NEXT: v_cmp_neq_f32_e32 vcc, s6, v0
|
|
; VI-NEXT: s_and_b64 s[2:3], s[2:3], vcc
|
|
; VI-NEXT: v_mov_b32_e32 v0, s0
|
|
; VI-NEXT: v_cndmask_b32_e64 v2, 0, 1, s[2:3]
|
|
; VI-NEXT: v_mov_b32_e32 v1, s1
|
|
; VI-NEXT: flat_store_dword v[0:1], v2
|
|
; VI-NEXT: s_endpgm
|
|
;
|
|
; GFX11-LABEL: test_isfinite_not_pattern_1:
|
|
; GFX11: ; %bb.0:
|
|
; GFX11-NEXT: s_clause 0x1
|
|
; GFX11-NEXT: s_load_b32 s2, s[4:5], 0x2c
|
|
; GFX11-NEXT: s_load_b64 s[0:1], s[4:5], 0x24
|
|
; GFX11-NEXT: v_mov_b32_e32 v0, 0
|
|
; GFX11-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX11-NEXT: v_cmp_o_f32_e64 s3, s2, s2
|
|
; GFX11-NEXT: v_cmp_neq_f32_e64 s2, 0x7f800000, s2
|
|
; GFX11-NEXT: s_and_b32 s2, s3, s2
|
|
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
|
|
; GFX11-NEXT: v_cndmask_b32_e64 v1, 0, 1, s2
|
|
; GFX11-NEXT: global_store_b32 v0, v1, s[0:1]
|
|
; GFX11-NEXT: s_endpgm
|
|
%ord = fcmp ord float %x, 0.000000e+00
|
|
%ninf = fcmp une float %x, 0x7FF0000000000000
|
|
%and = and i1 %ord, %ninf
|
|
%ext = zext i1 %and to i32
|
|
store i32 %ext, ptr addrspace(1) %out, align 4
|
|
ret void
|
|
}
|
|
|
|
; fabs of different value
|
|
define amdgpu_kernel void @test_isfinite_not_pattern_2(ptr addrspace(1) nocapture %out, float %x, float %y) #0 {
|
|
; SI-LABEL: test_isfinite_not_pattern_2:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s6, -1
|
|
; SI-NEXT: v_mov_b32_e32 v0, 0x7f800000
|
|
; SI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; SI-NEXT: s_mov_b32 s4, s0
|
|
; SI-NEXT: s_mov_b32 s5, s1
|
|
; SI-NEXT: v_cmp_o_f32_e64 s[0:1], s2, s2
|
|
; SI-NEXT: v_cmp_neq_f32_e64 s[2:3], |s3|, v0
|
|
; SI-NEXT: s_and_b64 s[0:1], s[0:1], s[2:3]
|
|
; SI-NEXT: v_cndmask_b32_e64 v0, 0, 1, s[0:1]
|
|
; SI-NEXT: buffer_store_dword v0, off, s[4:7], 0
|
|
; SI-NEXT: s_endpgm
|
|
;
|
|
; VI-LABEL: test_isfinite_not_pattern_2:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
|
|
; VI-NEXT: v_mov_b32_e32 v0, 0x7f800000
|
|
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; VI-NEXT: v_cmp_o_f32_e64 s[4:5], s2, s2
|
|
; VI-NEXT: v_cmp_neq_f32_e64 s[2:3], |s3|, v0
|
|
; VI-NEXT: s_and_b64 s[2:3], s[4:5], s[2:3]
|
|
; VI-NEXT: v_mov_b32_e32 v0, s0
|
|
; VI-NEXT: v_cndmask_b32_e64 v2, 0, 1, s[2:3]
|
|
; VI-NEXT: v_mov_b32_e32 v1, s1
|
|
; VI-NEXT: flat_store_dword v[0:1], v2
|
|
; VI-NEXT: s_endpgm
|
|
;
|
|
; GFX11-LABEL: test_isfinite_not_pattern_2:
|
|
; GFX11: ; %bb.0:
|
|
; GFX11-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
|
|
; GFX11-NEXT: v_mov_b32_e32 v0, 0
|
|
; GFX11-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX11-NEXT: v_cmp_o_f32_e64 s2, s2, s2
|
|
; GFX11-NEXT: v_cmp_neq_f32_e64 s3, 0x7f800000, |s3|
|
|
; GFX11-NEXT: s_and_b32 s2, s2, s3
|
|
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
|
|
; GFX11-NEXT: v_cndmask_b32_e64 v1, 0, 1, s2
|
|
; GFX11-NEXT: global_store_b32 v0, v1, s[0:1]
|
|
; GFX11-NEXT: s_endpgm
|
|
%ord = fcmp ord float %x, 0.000000e+00
|
|
%x.fabs = tail call float @llvm.fabs.f32(float %y) #1
|
|
%ninf = fcmp une float %x.fabs, 0x7FF0000000000000
|
|
%and = and i1 %ord, %ninf
|
|
%ext = zext i1 %and to i32
|
|
store i32 %ext, ptr addrspace(1) %out, align 4
|
|
ret void
|
|
}
|
|
|
|
; Wrong ordered compare type
|
|
define amdgpu_kernel void @test_isfinite_not_pattern_3(ptr addrspace(1) nocapture %out, float %x) #0 {
|
|
; SI-LABEL: test_isfinite_not_pattern_3:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x9
|
|
; SI-NEXT: s_load_dword s6, s[4:5], 0xb
|
|
; SI-NEXT: s_mov_b32 s3, 0xf000
|
|
; SI-NEXT: s_mov_b32 s2, -1
|
|
; SI-NEXT: v_mov_b32_e32 v0, 0x7f800000
|
|
; SI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; SI-NEXT: v_cmp_u_f32_e64 s[4:5], s6, s6
|
|
; SI-NEXT: v_cmp_neq_f32_e64 s[6:7], |s6|, v0
|
|
; SI-NEXT: s_and_b64 s[4:5], s[4:5], s[6:7]
|
|
; SI-NEXT: v_cndmask_b32_e64 v0, 0, 1, s[4:5]
|
|
; SI-NEXT: buffer_store_dword v0, off, s[0:3], 0
|
|
; SI-NEXT: s_endpgm
|
|
;
|
|
; VI-LABEL: test_isfinite_not_pattern_3:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_load_dword s6, s[4:5], 0x2c
|
|
; VI-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
|
|
; VI-NEXT: v_mov_b32_e32 v0, 0x7f800000
|
|
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; VI-NEXT: v_cmp_u_f32_e64 s[2:3], s6, s6
|
|
; VI-NEXT: v_cmp_neq_f32_e64 s[4:5], |s6|, v0
|
|
; VI-NEXT: s_and_b64 s[2:3], s[2:3], s[4:5]
|
|
; VI-NEXT: v_mov_b32_e32 v0, s0
|
|
; VI-NEXT: v_cndmask_b32_e64 v2, 0, 1, s[2:3]
|
|
; VI-NEXT: v_mov_b32_e32 v1, s1
|
|
; VI-NEXT: flat_store_dword v[0:1], v2
|
|
; VI-NEXT: s_endpgm
|
|
;
|
|
; GFX11-LABEL: test_isfinite_not_pattern_3:
|
|
; GFX11: ; %bb.0:
|
|
; GFX11-NEXT: s_clause 0x1
|
|
; GFX11-NEXT: s_load_b32 s2, s[4:5], 0x2c
|
|
; GFX11-NEXT: s_load_b64 s[0:1], s[4:5], 0x24
|
|
; GFX11-NEXT: v_mov_b32_e32 v0, 0
|
|
; GFX11-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX11-NEXT: v_cmp_u_f32_e64 s3, s2, s2
|
|
; GFX11-NEXT: v_cmp_neq_f32_e64 s2, 0x7f800000, |s2|
|
|
; GFX11-NEXT: s_and_b32 s2, s3, s2
|
|
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
|
|
; GFX11-NEXT: v_cndmask_b32_e64 v1, 0, 1, s2
|
|
; GFX11-NEXT: global_store_b32 v0, v1, s[0:1]
|
|
; GFX11-NEXT: s_endpgm
|
|
%ord = fcmp uno float %x, 0.000000e+00
|
|
%x.fabs = tail call float @llvm.fabs.f32(float %x) #1
|
|
%ninf = fcmp une float %x.fabs, 0x7FF0000000000000
|
|
%and = and i1 %ord, %ninf
|
|
%ext = zext i1 %and to i32
|
|
store i32 %ext, ptr addrspace(1) %out, align 4
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @test_isfinite_pattern_4(ptr addrspace(1) nocapture %out, float %x) #0 {
|
|
; SI-LABEL: test_isfinite_pattern_4:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x9
|
|
; SI-NEXT: s_load_dword s4, s[4:5], 0xb
|
|
; SI-NEXT: s_mov_b32 s3, 0xf000
|
|
; SI-NEXT: s_mov_b32 s2, -1
|
|
; SI-NEXT: v_mov_b32_e32 v0, 0x1f8
|
|
; SI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; SI-NEXT: v_cmp_class_f32_e32 vcc, s4, v0
|
|
; SI-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc
|
|
; SI-NEXT: buffer_store_dword v0, off, s[0:3], 0
|
|
; SI-NEXT: s_endpgm
|
|
;
|
|
; VI-LABEL: test_isfinite_pattern_4:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_load_dword s2, s[4:5], 0x2c
|
|
; VI-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
|
|
; VI-NEXT: v_mov_b32_e32 v0, 0x1f8
|
|
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; VI-NEXT: v_cmp_class_f32_e32 vcc, s2, v0
|
|
; VI-NEXT: v_mov_b32_e32 v0, s0
|
|
; VI-NEXT: v_cndmask_b32_e64 v2, 0, 1, vcc
|
|
; VI-NEXT: v_mov_b32_e32 v1, s1
|
|
; VI-NEXT: flat_store_dword v[0:1], v2
|
|
; VI-NEXT: s_endpgm
|
|
;
|
|
; GFX11-LABEL: test_isfinite_pattern_4:
|
|
; GFX11: ; %bb.0:
|
|
; GFX11-NEXT: s_clause 0x1
|
|
; GFX11-NEXT: s_load_b32 s2, s[4:5], 0x2c
|
|
; GFX11-NEXT: s_load_b64 s[0:1], s[4:5], 0x24
|
|
; GFX11-NEXT: v_mov_b32_e32 v0, 0
|
|
; GFX11-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX11-NEXT: v_cmp_class_f32_e64 s2, s2, 0x1f8
|
|
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
|
|
; GFX11-NEXT: v_cndmask_b32_e64 v1, 0, 1, s2
|
|
; GFX11-NEXT: global_store_b32 v0, v1, s[0:1]
|
|
; GFX11-NEXT: s_endpgm
|
|
%ord = fcmp ord float %x, 0.000000e+00
|
|
%x.fabs = tail call float @llvm.fabs.f32(float %x) #1
|
|
%ninf = fcmp one float %x.fabs, 0x7FF0000000000000
|
|
%and = and i1 %ord, %ninf
|
|
%ext = zext i1 %and to i32
|
|
store i32 %ext, ptr addrspace(1) %out, align 4
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @test_isfinite_pattern_4_commute_and(ptr addrspace(1) nocapture %out, float %x) #0 {
|
|
; SI-LABEL: test_isfinite_pattern_4_commute_and:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x9
|
|
; SI-NEXT: s_load_dword s4, s[4:5], 0xb
|
|
; SI-NEXT: s_mov_b32 s3, 0xf000
|
|
; SI-NEXT: s_mov_b32 s2, -1
|
|
; SI-NEXT: v_mov_b32_e32 v0, 0x1f8
|
|
; SI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; SI-NEXT: v_cmp_class_f32_e32 vcc, s4, v0
|
|
; SI-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc
|
|
; SI-NEXT: buffer_store_dword v0, off, s[0:3], 0
|
|
; SI-NEXT: s_endpgm
|
|
;
|
|
; VI-LABEL: test_isfinite_pattern_4_commute_and:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_load_dword s2, s[4:5], 0x2c
|
|
; VI-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
|
|
; VI-NEXT: v_mov_b32_e32 v0, 0x1f8
|
|
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; VI-NEXT: v_cmp_class_f32_e32 vcc, s2, v0
|
|
; VI-NEXT: v_mov_b32_e32 v0, s0
|
|
; VI-NEXT: v_cndmask_b32_e64 v2, 0, 1, vcc
|
|
; VI-NEXT: v_mov_b32_e32 v1, s1
|
|
; VI-NEXT: flat_store_dword v[0:1], v2
|
|
; VI-NEXT: s_endpgm
|
|
;
|
|
; GFX11-LABEL: test_isfinite_pattern_4_commute_and:
|
|
; GFX11: ; %bb.0:
|
|
; GFX11-NEXT: s_clause 0x1
|
|
; GFX11-NEXT: s_load_b32 s2, s[4:5], 0x2c
|
|
; GFX11-NEXT: s_load_b64 s[0:1], s[4:5], 0x24
|
|
; GFX11-NEXT: v_mov_b32_e32 v0, 0
|
|
; GFX11-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX11-NEXT: v_cmp_class_f32_e64 s2, s2, 0x1f8
|
|
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
|
|
; GFX11-NEXT: v_cndmask_b32_e64 v1, 0, 1, s2
|
|
; GFX11-NEXT: global_store_b32 v0, v1, s[0:1]
|
|
; GFX11-NEXT: s_endpgm
|
|
%ord = fcmp ord float %x, 0.000000e+00
|
|
%x.fabs = tail call float @llvm.fabs.f32(float %x) #1
|
|
%ninf = fcmp one float %x.fabs, 0x7FF0000000000000
|
|
%and = and i1 %ninf, %ord
|
|
%ext = zext i1 %and to i32
|
|
store i32 %ext, ptr addrspace(1) %out, align 4
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @test_not_isfinite_pattern_4_wrong_ord_test(ptr addrspace(1) nocapture %out, float %x, [8 x i32], float %y) #0 {
|
|
; SI-LABEL: test_not_isfinite_pattern_4_wrong_ord_test:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_load_dwordx2 s[8:9], s[4:5], 0x9
|
|
; SI-NEXT: s_load_dword s0, s[4:5], 0x14
|
|
; SI-NEXT: s_load_dword s1, s[4:5], 0xb
|
|
; SI-NEXT: s_mov_b32 s11, 0xf000
|
|
; SI-NEXT: s_mov_b32 s10, -1
|
|
; SI-NEXT: v_mov_b32_e32 v0, 0x1f8
|
|
; SI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; SI-NEXT: v_mov_b32_e32 v1, s0
|
|
; SI-NEXT: v_cmp_o_f32_e32 vcc, s1, v1
|
|
; SI-NEXT: v_cmp_class_f32_e64 s[0:1], s1, v0
|
|
; SI-NEXT: s_and_b64 s[0:1], vcc, s[0:1]
|
|
; SI-NEXT: v_cndmask_b32_e64 v0, 0, 1, s[0:1]
|
|
; SI-NEXT: buffer_store_dword v0, off, s[8:11], 0
|
|
; SI-NEXT: s_endpgm
|
|
;
|
|
; VI-LABEL: test_not_isfinite_pattern_4_wrong_ord_test:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_load_dword s0, s[4:5], 0x50
|
|
; VI-NEXT: s_load_dword s1, s[4:5], 0x2c
|
|
; VI-NEXT: s_load_dwordx2 s[2:3], s[4:5], 0x24
|
|
; VI-NEXT: v_mov_b32_e32 v0, 0x1f8
|
|
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; VI-NEXT: v_mov_b32_e32 v1, s0
|
|
; VI-NEXT: v_cmp_class_f32_e32 vcc, s1, v0
|
|
; VI-NEXT: v_cmp_o_f32_e64 s[0:1], s1, v1
|
|
; VI-NEXT: s_and_b64 s[0:1], s[0:1], vcc
|
|
; VI-NEXT: v_mov_b32_e32 v0, s2
|
|
; VI-NEXT: v_cndmask_b32_e64 v2, 0, 1, s[0:1]
|
|
; VI-NEXT: v_mov_b32_e32 v1, s3
|
|
; VI-NEXT: flat_store_dword v[0:1], v2
|
|
; VI-NEXT: s_endpgm
|
|
;
|
|
; GFX11-LABEL: test_not_isfinite_pattern_4_wrong_ord_test:
|
|
; GFX11: ; %bb.0:
|
|
; GFX11-NEXT: s_clause 0x2
|
|
; GFX11-NEXT: s_load_b32 s2, s[4:5], 0x2c
|
|
; GFX11-NEXT: s_load_b32 s3, s[4:5], 0x50
|
|
; GFX11-NEXT: s_load_b64 s[0:1], s[4:5], 0x24
|
|
; GFX11-NEXT: v_mov_b32_e32 v0, 0
|
|
; GFX11-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX11-NEXT: v_cmp_o_f32_e64 s3, s2, s3
|
|
; GFX11-NEXT: v_cmp_class_f32_e64 s2, s2, 0x1f8
|
|
; GFX11-NEXT: s_and_b32 s2, s3, s2
|
|
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
|
|
; GFX11-NEXT: v_cndmask_b32_e64 v1, 0, 1, s2
|
|
; GFX11-NEXT: global_store_b32 v0, v1, s[0:1]
|
|
; GFX11-NEXT: s_endpgm
|
|
%ord = fcmp ord float %x, %y
|
|
%x.fabs = tail call float @llvm.fabs.f32(float %x) #1
|
|
%ninf = fcmp one float %x.fabs, 0x7FF0000000000000
|
|
%and = and i1 %ord, %ninf
|
|
%ext = zext i1 %and to i32
|
|
store i32 %ext, ptr addrspace(1) %out, align 4
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @test_isinf_pattern_f16(ptr addrspace(1) nocapture %out, half %x) #0 {
|
|
; SI-LABEL: test_isinf_pattern_f16:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_load_dword s6, s[4:5], 0xb
|
|
; SI-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x9
|
|
; SI-NEXT: s_mov_b32 s3, 0xf000
|
|
; SI-NEXT: s_mov_b32 s2, -1
|
|
; SI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; SI-NEXT: s_and_b32 s4, s6, 0x7fff
|
|
; SI-NEXT: s_cmpk_eq_i32 s4, 0x7c00
|
|
; SI-NEXT: s_cselect_b64 s[4:5], -1, 0
|
|
; SI-NEXT: v_cndmask_b32_e64 v0, 0, 1, s[4:5]
|
|
; SI-NEXT: buffer_store_dword v0, off, s[0:3], 0
|
|
; SI-NEXT: s_endpgm
|
|
;
|
|
; VI-LABEL: test_isinf_pattern_f16:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_load_dword s2, s[4:5], 0x2c
|
|
; VI-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
|
|
; VI-NEXT: v_mov_b32_e32 v0, 0x204
|
|
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; VI-NEXT: v_cmp_class_f16_e32 vcc, s2, v0
|
|
; VI-NEXT: v_mov_b32_e32 v0, s0
|
|
; VI-NEXT: v_cndmask_b32_e64 v2, 0, 1, vcc
|
|
; VI-NEXT: v_mov_b32_e32 v1, s1
|
|
; VI-NEXT: flat_store_dword v[0:1], v2
|
|
; VI-NEXT: s_endpgm
|
|
;
|
|
; GFX11-LABEL: test_isinf_pattern_f16:
|
|
; GFX11: ; %bb.0:
|
|
; GFX11-NEXT: s_clause 0x1
|
|
; GFX11-NEXT: s_load_b32 s2, s[4:5], 0x2c
|
|
; GFX11-NEXT: s_load_b64 s[0:1], s[4:5], 0x24
|
|
; GFX11-NEXT: v_mov_b32_e32 v0, 0
|
|
; GFX11-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX11-NEXT: v_cmp_class_f16_e64 s2, s2, 0x204
|
|
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
|
|
; GFX11-NEXT: v_cndmask_b32_e64 v1, 0, 1, s2
|
|
; GFX11-NEXT: global_store_b32 v0, v1, s[0:1]
|
|
; GFX11-NEXT: s_endpgm
|
|
%fabs = tail call half @llvm.fabs.f16(half %x) #1
|
|
%cmp = fcmp oeq half %fabs, 0xH7C00
|
|
%ext = zext i1 %cmp to i32
|
|
store i32 %ext, ptr addrspace(1) %out, align 4
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @test_isfinite_pattern_0_f16(ptr addrspace(1) nocapture %out, half %x) #0 {
|
|
; SI-LABEL: test_isfinite_pattern_0_f16:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_load_dword s6, s[4:5], 0xb
|
|
; SI-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x9
|
|
; SI-NEXT: s_mov_b32 s3, 0xf000
|
|
; SI-NEXT: s_mov_b32 s2, -1
|
|
; SI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; SI-NEXT: v_cvt_f32_f16_e32 v0, s6
|
|
; SI-NEXT: s_and_b32 s4, s6, 0x7fff
|
|
; SI-NEXT: v_cmp_o_f32_e32 vcc, v0, v0
|
|
; SI-NEXT: s_cmpk_lg_i32 s4, 0x7c00
|
|
; SI-NEXT: s_cselect_b64 s[4:5], -1, 0
|
|
; SI-NEXT: s_and_b64 s[4:5], vcc, s[4:5]
|
|
; SI-NEXT: v_cndmask_b32_e64 v0, 0, 1, s[4:5]
|
|
; SI-NEXT: buffer_store_dword v0, off, s[0:3], 0
|
|
; SI-NEXT: s_endpgm
|
|
;
|
|
; VI-LABEL: test_isfinite_pattern_0_f16:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_load_dword s2, s[4:5], 0x2c
|
|
; VI-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
|
|
; VI-NEXT: v_mov_b32_e32 v0, 0x1f8
|
|
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; VI-NEXT: v_cmp_class_f16_e32 vcc, s2, v0
|
|
; VI-NEXT: v_mov_b32_e32 v0, s0
|
|
; VI-NEXT: v_cndmask_b32_e64 v2, 0, 1, vcc
|
|
; VI-NEXT: v_mov_b32_e32 v1, s1
|
|
; VI-NEXT: flat_store_dword v[0:1], v2
|
|
; VI-NEXT: s_endpgm
|
|
;
|
|
; GFX11-LABEL: test_isfinite_pattern_0_f16:
|
|
; GFX11: ; %bb.0:
|
|
; GFX11-NEXT: s_clause 0x1
|
|
; GFX11-NEXT: s_load_b32 s2, s[4:5], 0x2c
|
|
; GFX11-NEXT: s_load_b64 s[0:1], s[4:5], 0x24
|
|
; GFX11-NEXT: v_mov_b32_e32 v0, 0
|
|
; GFX11-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX11-NEXT: v_cmp_class_f16_e64 s2, s2, 0x1f8
|
|
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
|
|
; GFX11-NEXT: v_cndmask_b32_e64 v1, 0, 1, s2
|
|
; GFX11-NEXT: global_store_b32 v0, v1, s[0:1]
|
|
; GFX11-NEXT: s_endpgm
|
|
%ord = fcmp ord half %x, 0.0
|
|
%x.fabs = tail call half @llvm.fabs.f16(half %x) #1
|
|
%ninf = fcmp une half %x.fabs, 0xH7C00
|
|
%and = and i1 %ord, %ninf
|
|
%ext = zext i1 %and to i32
|
|
store i32 %ext, ptr addrspace(1) %out, align 4
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @test_isfinite_pattern_4_f16(ptr addrspace(1) nocapture %out, half %x) #0 {
|
|
; SI-LABEL: test_isfinite_pattern_4_f16:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_load_dword s6, s[4:5], 0xb
|
|
; SI-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x9
|
|
; SI-NEXT: s_mov_b32 s3, 0xf000
|
|
; SI-NEXT: s_mov_b32 s2, -1
|
|
; SI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; SI-NEXT: s_and_b32 s4, s6, 0x7fff
|
|
; SI-NEXT: s_cmpk_lt_i32 s4, 0x7c00
|
|
; SI-NEXT: s_cselect_b64 s[4:5], -1, 0
|
|
; SI-NEXT: v_cndmask_b32_e64 v0, 0, 1, s[4:5]
|
|
; SI-NEXT: buffer_store_dword v0, off, s[0:3], 0
|
|
; SI-NEXT: s_endpgm
|
|
;
|
|
; VI-LABEL: test_isfinite_pattern_4_f16:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_load_dword s2, s[4:5], 0x2c
|
|
; VI-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
|
|
; VI-NEXT: v_mov_b32_e32 v0, 0x1f8
|
|
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; VI-NEXT: v_cmp_class_f16_e32 vcc, s2, v0
|
|
; VI-NEXT: v_mov_b32_e32 v0, s0
|
|
; VI-NEXT: v_cndmask_b32_e64 v2, 0, 1, vcc
|
|
; VI-NEXT: v_mov_b32_e32 v1, s1
|
|
; VI-NEXT: flat_store_dword v[0:1], v2
|
|
; VI-NEXT: s_endpgm
|
|
;
|
|
; GFX11-LABEL: test_isfinite_pattern_4_f16:
|
|
; GFX11: ; %bb.0:
|
|
; GFX11-NEXT: s_clause 0x1
|
|
; GFX11-NEXT: s_load_b32 s2, s[4:5], 0x2c
|
|
; GFX11-NEXT: s_load_b64 s[0:1], s[4:5], 0x24
|
|
; GFX11-NEXT: v_mov_b32_e32 v0, 0
|
|
; GFX11-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX11-NEXT: v_cmp_class_f16_e64 s2, s2, 0x1f8
|
|
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
|
|
; GFX11-NEXT: v_cndmask_b32_e64 v1, 0, 1, s2
|
|
; GFX11-NEXT: global_store_b32 v0, v1, s[0:1]
|
|
; GFX11-NEXT: s_endpgm
|
|
%ord = fcmp ord half %x, 0.0
|
|
%x.fabs = tail call half @llvm.fabs.f16(half %x) #1
|
|
%ninf = fcmp one half %x.fabs, 0xH7C00
|
|
%and = and i1 %ord, %ninf
|
|
%ext = zext i1 %and to i32
|
|
store i32 %ext, ptr addrspace(1) %out, align 4
|
|
ret void
|
|
}
|
|
|
|
declare half @llvm.fabs.f16(half) #1
|
|
|
|
attributes #0 = { nounwind }
|
|
attributes #1 = { nounwind readnone }
|
|
;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
|
|
; GFX11-FAKE16: {{.*}}
|
|
; GFX11-TRUE16: {{.*}}
|