There are some patterns in td files without MVT/class set for some operands in target pattern that are from the source pattern. This prevents GlobalISelEmitter from adding them as a valid rule, because the target child operand is an unsupported kind operand. For now, for a leaf child, only IntInit and DefInit are handled in GlobalISelEmitter. This issue can be workaround by adding MVT/class to the patterns in the td files, like the workarounds for patterns anyext and setcc in PPCInstrInfo.td in D140878. To avoid adding the same workarounds for other patterns in td files, this patch tries to handle the UnsetInit case in GlobalISelEmitter. Adding the new handling allows us to remove the workarounds in the td files and also generates many selection rules for PPC target. Reviewed By: arsenm Differential Revision: https://reviews.llvm.org/D141247
1103 lines
45 KiB
LLVM
1103 lines
45 KiB
LLVM
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
|
|
; RUN: llc -global-isel -amdgpu-scalarize-global-loads=false -enable-misched=0 -march=amdgcn -mcpu=bonaire -verify-machineinstrs < %s | FileCheck --check-prefix=CI %s
|
|
; RUN: llc -global-isel -amdgpu-scalarize-global-loads=false -enable-misched=0 -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck --check-prefix=VI %s
|
|
|
|
define amdgpu_kernel void @frem_f16(ptr addrspace(1) %out, ptr addrspace(1) %in1, ptr addrspace(1) %in2) #0 {
|
|
; CI-LABEL: frem_f16:
|
|
; CI: ; %bb.0:
|
|
; CI-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x9
|
|
; CI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0xd
|
|
; CI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; CI-NEXT: s_load_dword s2, s[6:7], 0x0
|
|
; CI-NEXT: s_load_dword s0, s[0:1], 0x2
|
|
; CI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; CI-NEXT: v_cvt_f32_f16_e32 v0, s2
|
|
; CI-NEXT: v_cvt_f32_f16_e32 v1, s0
|
|
; CI-NEXT: v_div_scale_f32 v2, s[0:1], v1, v1, v0
|
|
; CI-NEXT: v_div_scale_f32 v3, vcc, v0, v1, v0
|
|
; CI-NEXT: v_rcp_f32_e32 v4, v2
|
|
; CI-NEXT: s_setreg_imm32_b32 hwreg(HW_REG_MODE, 4, 2), 3
|
|
; CI-NEXT: v_fma_f32 v5, -v2, v4, 1.0
|
|
; CI-NEXT: v_fma_f32 v4, v5, v4, v4
|
|
; CI-NEXT: v_mul_f32_e32 v5, v3, v4
|
|
; CI-NEXT: v_fma_f32 v6, -v2, v5, v3
|
|
; CI-NEXT: v_fma_f32 v5, v6, v4, v5
|
|
; CI-NEXT: v_fma_f32 v2, -v2, v5, v3
|
|
; CI-NEXT: s_setreg_imm32_b32 hwreg(HW_REG_MODE, 4, 2), 0
|
|
; CI-NEXT: v_div_fmas_f32 v2, v2, v4, v5
|
|
; CI-NEXT: s_mov_b32 s6, -1
|
|
; CI-NEXT: s_mov_b32 s7, 0xf000
|
|
; CI-NEXT: v_div_fixup_f32 v2, v2, v1, v0
|
|
; CI-NEXT: v_trunc_f32_e32 v2, v2
|
|
; CI-NEXT: v_fma_f32 v0, -v2, v1, v0
|
|
; CI-NEXT: s_setreg_imm32_b32 hwreg(HW_REG_MODE, 2, 2), 0
|
|
; CI-NEXT: v_cvt_f16_f32_e32 v0, v0
|
|
; CI-NEXT: buffer_store_short v0, off, s[4:7], 0
|
|
; CI-NEXT: s_endpgm
|
|
;
|
|
; VI-LABEL: frem_f16:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
|
|
; VI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x34
|
|
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; VI-NEXT: s_load_dword s2, s[6:7], 0x0
|
|
; VI-NEXT: s_load_dword s0, s[0:1], 0x8
|
|
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; VI-NEXT: v_cvt_f32_f16_e32 v0, s2
|
|
; VI-NEXT: v_cvt_f32_f16_e32 v2, s0
|
|
; VI-NEXT: v_mov_b32_e32 v1, s0
|
|
; VI-NEXT: v_rcp_f32_e32 v2, v2
|
|
; VI-NEXT: v_mul_f32_e32 v0, v0, v2
|
|
; VI-NEXT: v_cvt_f16_f32_e32 v0, v0
|
|
; VI-NEXT: v_div_fixup_f16 v0, v0, v1, s2
|
|
; VI-NEXT: v_trunc_f16_e32 v0, v0
|
|
; VI-NEXT: v_fma_f16 v2, -v0, v1, s2
|
|
; VI-NEXT: v_mov_b32_e32 v0, s4
|
|
; VI-NEXT: v_mov_b32_e32 v1, s5
|
|
; VI-NEXT: flat_store_short v[0:1], v2
|
|
; VI-NEXT: s_endpgm
|
|
%gep2 = getelementptr half, ptr addrspace(1) %in2, i32 4
|
|
%r0 = load half, ptr addrspace(1) %in1, align 4
|
|
%r1 = load half, ptr addrspace(1) %gep2, align 4
|
|
%r2 = frem half %r0, %r1
|
|
store half %r2, ptr addrspace(1) %out, align 4
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @fast_frem_f16(ptr addrspace(1) %out, ptr addrspace(1) %in1, ptr addrspace(1) %in2) #0 {
|
|
; CI-LABEL: fast_frem_f16:
|
|
; CI: ; %bb.0:
|
|
; CI-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x9
|
|
; CI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0xd
|
|
; CI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; CI-NEXT: s_load_dword s2, s[6:7], 0x0
|
|
; CI-NEXT: s_load_dword s0, s[0:1], 0x2
|
|
; CI-NEXT: s_mov_b32 s6, -1
|
|
; CI-NEXT: s_mov_b32 s7, 0xf000
|
|
; CI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; CI-NEXT: v_cvt_f32_f16_e32 v0, s2
|
|
; CI-NEXT: v_cvt_f32_f16_e32 v1, s0
|
|
; CI-NEXT: v_rcp_f32_e32 v2, v1
|
|
; CI-NEXT: v_mul_f32_e32 v2, v0, v2
|
|
; CI-NEXT: v_trunc_f32_e32 v2, v2
|
|
; CI-NEXT: v_fma_f32 v0, -v2, v1, v0
|
|
; CI-NEXT: v_cvt_f16_f32_e32 v0, v0
|
|
; CI-NEXT: buffer_store_short v0, off, s[4:7], 0
|
|
; CI-NEXT: s_endpgm
|
|
;
|
|
; VI-LABEL: fast_frem_f16:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
|
|
; VI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x34
|
|
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; VI-NEXT: s_load_dword s2, s[6:7], 0x0
|
|
; VI-NEXT: s_load_dword s0, s[0:1], 0x8
|
|
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; VI-NEXT: v_mov_b32_e32 v1, s2
|
|
; VI-NEXT: v_rcp_f16_e32 v0, s0
|
|
; VI-NEXT: v_mul_f16_e32 v0, s2, v0
|
|
; VI-NEXT: v_trunc_f16_e32 v0, v0
|
|
; VI-NEXT: v_fma_f16 v2, -v0, s0, v1
|
|
; VI-NEXT: v_mov_b32_e32 v0, s4
|
|
; VI-NEXT: v_mov_b32_e32 v1, s5
|
|
; VI-NEXT: flat_store_short v[0:1], v2
|
|
; VI-NEXT: s_endpgm
|
|
%gep2 = getelementptr half, ptr addrspace(1) %in2, i32 4
|
|
%r0 = load half, ptr addrspace(1) %in1, align 4
|
|
%r1 = load half, ptr addrspace(1) %gep2, align 4
|
|
%r2 = frem fast half %r0, %r1
|
|
store half %r2, ptr addrspace(1) %out, align 4
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @unsafe_frem_f16(ptr addrspace(1) %out, ptr addrspace(1) %in1, ptr addrspace(1) %in2) #1 {
|
|
; CI-LABEL: unsafe_frem_f16:
|
|
; CI: ; %bb.0:
|
|
; CI-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x9
|
|
; CI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0xd
|
|
; CI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; CI-NEXT: s_load_dword s2, s[6:7], 0x0
|
|
; CI-NEXT: s_load_dword s0, s[0:1], 0x2
|
|
; CI-NEXT: s_mov_b32 s6, -1
|
|
; CI-NEXT: s_mov_b32 s7, 0xf000
|
|
; CI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; CI-NEXT: v_cvt_f32_f16_e32 v0, s2
|
|
; CI-NEXT: v_cvt_f32_f16_e32 v1, s0
|
|
; CI-NEXT: v_rcp_f32_e32 v2, v1
|
|
; CI-NEXT: v_mul_f32_e32 v2, v0, v2
|
|
; CI-NEXT: v_trunc_f32_e32 v2, v2
|
|
; CI-NEXT: v_fma_f32 v0, -v2, v1, v0
|
|
; CI-NEXT: v_cvt_f16_f32_e32 v0, v0
|
|
; CI-NEXT: buffer_store_short v0, off, s[4:7], 0
|
|
; CI-NEXT: s_endpgm
|
|
;
|
|
; VI-LABEL: unsafe_frem_f16:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
|
|
; VI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x34
|
|
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; VI-NEXT: s_load_dword s2, s[6:7], 0x0
|
|
; VI-NEXT: s_load_dword s0, s[0:1], 0x8
|
|
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; VI-NEXT: v_mov_b32_e32 v1, s2
|
|
; VI-NEXT: v_rcp_f16_e32 v0, s0
|
|
; VI-NEXT: v_mul_f16_e32 v0, s2, v0
|
|
; VI-NEXT: v_trunc_f16_e32 v0, v0
|
|
; VI-NEXT: v_fma_f16 v2, -v0, s0, v1
|
|
; VI-NEXT: v_mov_b32_e32 v0, s4
|
|
; VI-NEXT: v_mov_b32_e32 v1, s5
|
|
; VI-NEXT: flat_store_short v[0:1], v2
|
|
; VI-NEXT: s_endpgm
|
|
%gep2 = getelementptr half, ptr addrspace(1) %in2, i32 4
|
|
%r0 = load half, ptr addrspace(1) %in1, align 4
|
|
%r1 = load half, ptr addrspace(1) %gep2, align 4
|
|
%r2 = frem half %r0, %r1
|
|
store half %r2, ptr addrspace(1) %out, align 4
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @frem_f32(ptr addrspace(1) %out, ptr addrspace(1) %in1, ptr addrspace(1) %in2) #0 {
|
|
; CI-LABEL: frem_f32:
|
|
; CI: ; %bb.0:
|
|
; CI-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x9
|
|
; CI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0xd
|
|
; CI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; CI-NEXT: s_load_dword s2, s[6:7], 0x0
|
|
; CI-NEXT: s_load_dword s0, s[0:1], 0x4
|
|
; CI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; CI-NEXT: v_mov_b32_e32 v0, s0
|
|
; CI-NEXT: v_div_scale_f32 v1, s[0:1], v0, v0, s2
|
|
; CI-NEXT: v_div_scale_f32 v2, vcc, s2, v0, s2
|
|
; CI-NEXT: v_rcp_f32_e32 v3, v1
|
|
; CI-NEXT: s_setreg_imm32_b32 hwreg(HW_REG_MODE, 4, 2), 3
|
|
; CI-NEXT: v_fma_f32 v4, -v1, v3, 1.0
|
|
; CI-NEXT: v_fma_f32 v3, v4, v3, v3
|
|
; CI-NEXT: v_mul_f32_e32 v4, v2, v3
|
|
; CI-NEXT: v_fma_f32 v5, -v1, v4, v2
|
|
; CI-NEXT: v_fma_f32 v4, v5, v3, v4
|
|
; CI-NEXT: v_fma_f32 v1, -v1, v4, v2
|
|
; CI-NEXT: s_setreg_imm32_b32 hwreg(HW_REG_MODE, 4, 2), 0
|
|
; CI-NEXT: v_div_fmas_f32 v1, v1, v3, v4
|
|
; CI-NEXT: s_mov_b32 s6, -1
|
|
; CI-NEXT: s_mov_b32 s7, 0xf000
|
|
; CI-NEXT: v_div_fixup_f32 v1, v1, v0, s2
|
|
; CI-NEXT: v_trunc_f32_e32 v1, v1
|
|
; CI-NEXT: v_fma_f32 v0, -v1, v0, s2
|
|
; CI-NEXT: buffer_store_dword v0, off, s[4:7], 0
|
|
; CI-NEXT: s_endpgm
|
|
;
|
|
; VI-LABEL: frem_f32:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
|
|
; VI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x34
|
|
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; VI-NEXT: s_load_dword s2, s[6:7], 0x0
|
|
; VI-NEXT: s_load_dword s0, s[0:1], 0x10
|
|
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; VI-NEXT: v_mov_b32_e32 v0, s0
|
|
; VI-NEXT: v_div_scale_f32 v1, s[0:1], v0, v0, s2
|
|
; VI-NEXT: v_div_scale_f32 v2, vcc, s2, v0, s2
|
|
; VI-NEXT: v_rcp_f32_e32 v3, v1
|
|
; VI-NEXT: s_setreg_imm32_b32 hwreg(HW_REG_MODE, 4, 2), 3
|
|
; VI-NEXT: v_fma_f32 v4, -v1, v3, 1.0
|
|
; VI-NEXT: v_fma_f32 v3, v4, v3, v3
|
|
; VI-NEXT: v_mul_f32_e32 v4, v2, v3
|
|
; VI-NEXT: v_fma_f32 v5, -v1, v4, v2
|
|
; VI-NEXT: v_fma_f32 v4, v5, v3, v4
|
|
; VI-NEXT: v_fma_f32 v1, -v1, v4, v2
|
|
; VI-NEXT: s_setreg_imm32_b32 hwreg(HW_REG_MODE, 4, 2), 0
|
|
; VI-NEXT: v_div_fmas_f32 v1, v1, v3, v4
|
|
; VI-NEXT: v_div_fixup_f32 v1, v1, v0, s2
|
|
; VI-NEXT: v_trunc_f32_e32 v1, v1
|
|
; VI-NEXT: v_fma_f32 v2, -v1, v0, s2
|
|
; VI-NEXT: v_mov_b32_e32 v0, s4
|
|
; VI-NEXT: v_mov_b32_e32 v1, s5
|
|
; VI-NEXT: flat_store_dword v[0:1], v2
|
|
; VI-NEXT: s_endpgm
|
|
%gep2 = getelementptr float, ptr addrspace(1) %in2, i32 4
|
|
%r0 = load float, ptr addrspace(1) %in1, align 4
|
|
%r1 = load float, ptr addrspace(1) %gep2, align 4
|
|
%r2 = frem float %r0, %r1
|
|
store float %r2, ptr addrspace(1) %out, align 4
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @fast_frem_f32(ptr addrspace(1) %out, ptr addrspace(1) %in1, ptr addrspace(1) %in2) #0 {
|
|
; CI-LABEL: fast_frem_f32:
|
|
; CI: ; %bb.0:
|
|
; CI-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x9
|
|
; CI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0xd
|
|
; CI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; CI-NEXT: s_load_dword s2, s[6:7], 0x0
|
|
; CI-NEXT: s_load_dword s0, s[0:1], 0x4
|
|
; CI-NEXT: s_mov_b32 s6, -1
|
|
; CI-NEXT: s_mov_b32 s7, 0xf000
|
|
; CI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; CI-NEXT: v_mov_b32_e32 v1, s2
|
|
; CI-NEXT: v_rcp_f32_e32 v0, s0
|
|
; CI-NEXT: v_mul_f32_e32 v0, s2, v0
|
|
; CI-NEXT: v_trunc_f32_e32 v0, v0
|
|
; CI-NEXT: v_fma_f32 v0, -v0, s0, v1
|
|
; CI-NEXT: buffer_store_dword v0, off, s[4:7], 0
|
|
; CI-NEXT: s_endpgm
|
|
;
|
|
; VI-LABEL: fast_frem_f32:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
|
|
; VI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x34
|
|
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; VI-NEXT: s_load_dword s2, s[6:7], 0x0
|
|
; VI-NEXT: s_load_dword s0, s[0:1], 0x10
|
|
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; VI-NEXT: v_mov_b32_e32 v1, s2
|
|
; VI-NEXT: v_rcp_f32_e32 v0, s0
|
|
; VI-NEXT: v_mul_f32_e32 v0, s2, v0
|
|
; VI-NEXT: v_trunc_f32_e32 v0, v0
|
|
; VI-NEXT: v_fma_f32 v2, -v0, s0, v1
|
|
; VI-NEXT: v_mov_b32_e32 v0, s4
|
|
; VI-NEXT: v_mov_b32_e32 v1, s5
|
|
; VI-NEXT: flat_store_dword v[0:1], v2
|
|
; VI-NEXT: s_endpgm
|
|
%gep2 = getelementptr float, ptr addrspace(1) %in2, i32 4
|
|
%r0 = load float, ptr addrspace(1) %in1, align 4
|
|
%r1 = load float, ptr addrspace(1) %gep2, align 4
|
|
%r2 = frem fast float %r0, %r1
|
|
store float %r2, ptr addrspace(1) %out, align 4
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @unsafe_frem_f32(ptr addrspace(1) %out, ptr addrspace(1) %in1, ptr addrspace(1) %in2) #1 {
|
|
; CI-LABEL: unsafe_frem_f32:
|
|
; CI: ; %bb.0:
|
|
; CI-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x9
|
|
; CI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0xd
|
|
; CI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; CI-NEXT: s_load_dword s2, s[6:7], 0x0
|
|
; CI-NEXT: s_load_dword s0, s[0:1], 0x4
|
|
; CI-NEXT: s_mov_b32 s6, -1
|
|
; CI-NEXT: s_mov_b32 s7, 0xf000
|
|
; CI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; CI-NEXT: v_mov_b32_e32 v1, s2
|
|
; CI-NEXT: v_rcp_f32_e32 v0, s0
|
|
; CI-NEXT: v_mul_f32_e32 v0, s2, v0
|
|
; CI-NEXT: v_trunc_f32_e32 v0, v0
|
|
; CI-NEXT: v_fma_f32 v0, -v0, s0, v1
|
|
; CI-NEXT: buffer_store_dword v0, off, s[4:7], 0
|
|
; CI-NEXT: s_endpgm
|
|
;
|
|
; VI-LABEL: unsafe_frem_f32:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
|
|
; VI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x34
|
|
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; VI-NEXT: s_load_dword s2, s[6:7], 0x0
|
|
; VI-NEXT: s_load_dword s0, s[0:1], 0x10
|
|
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; VI-NEXT: v_mov_b32_e32 v1, s2
|
|
; VI-NEXT: v_rcp_f32_e32 v0, s0
|
|
; VI-NEXT: v_mul_f32_e32 v0, s2, v0
|
|
; VI-NEXT: v_trunc_f32_e32 v0, v0
|
|
; VI-NEXT: v_fma_f32 v2, -v0, s0, v1
|
|
; VI-NEXT: v_mov_b32_e32 v0, s4
|
|
; VI-NEXT: v_mov_b32_e32 v1, s5
|
|
; VI-NEXT: flat_store_dword v[0:1], v2
|
|
; VI-NEXT: s_endpgm
|
|
%gep2 = getelementptr float, ptr addrspace(1) %in2, i32 4
|
|
%r0 = load float, ptr addrspace(1) %in1, align 4
|
|
%r1 = load float, ptr addrspace(1) %gep2, align 4
|
|
%r2 = frem float %r0, %r1
|
|
store float %r2, ptr addrspace(1) %out, align 4
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @frem_f64(ptr addrspace(1) %out, ptr addrspace(1) %in1, ptr addrspace(1) %in2) #0 {
|
|
; CI-LABEL: frem_f64:
|
|
; CI: ; %bb.0:
|
|
; CI-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x9
|
|
; CI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0xd
|
|
; CI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; CI-NEXT: s_load_dwordx2 s[2:3], s[6:7], 0x0
|
|
; CI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x0
|
|
; CI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; CI-NEXT: v_mov_b32_e32 v0, s0
|
|
; CI-NEXT: v_mov_b32_e32 v1, s1
|
|
; CI-NEXT: v_div_scale_f64 v[2:3], s[0:1], v[0:1], v[0:1], s[2:3]
|
|
; CI-NEXT: v_div_scale_f64 v[8:9], vcc, s[2:3], v[0:1], s[2:3]
|
|
; CI-NEXT: v_rcp_f64_e32 v[4:5], v[2:3]
|
|
; CI-NEXT: v_fma_f64 v[6:7], -v[2:3], v[4:5], 1.0
|
|
; CI-NEXT: v_fma_f64 v[4:5], v[4:5], v[6:7], v[4:5]
|
|
; CI-NEXT: v_fma_f64 v[6:7], -v[2:3], v[4:5], 1.0
|
|
; CI-NEXT: v_fma_f64 v[4:5], v[4:5], v[6:7], v[4:5]
|
|
; CI-NEXT: v_mul_f64 v[6:7], v[8:9], v[4:5]
|
|
; CI-NEXT: v_fma_f64 v[2:3], -v[2:3], v[6:7], v[8:9]
|
|
; CI-NEXT: v_div_fmas_f64 v[2:3], v[2:3], v[4:5], v[6:7]
|
|
; CI-NEXT: v_div_fixup_f64 v[2:3], v[2:3], v[0:1], s[2:3]
|
|
; CI-NEXT: v_trunc_f64_e32 v[2:3], v[2:3]
|
|
; CI-NEXT: v_fma_f64 v[0:1], -v[2:3], v[0:1], s[2:3]
|
|
; CI-NEXT: v_mov_b32_e32 v2, s4
|
|
; CI-NEXT: v_mov_b32_e32 v3, s5
|
|
; CI-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
|
|
; CI-NEXT: s_endpgm
|
|
;
|
|
; VI-LABEL: frem_f64:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
|
|
; VI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x34
|
|
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; VI-NEXT: s_load_dwordx2 s[2:3], s[6:7], 0x0
|
|
; VI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x0
|
|
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; VI-NEXT: v_mov_b32_e32 v0, s0
|
|
; VI-NEXT: v_mov_b32_e32 v1, s1
|
|
; VI-NEXT: v_div_scale_f64 v[2:3], s[0:1], v[0:1], v[0:1], s[2:3]
|
|
; VI-NEXT: v_div_scale_f64 v[8:9], vcc, s[2:3], v[0:1], s[2:3]
|
|
; VI-NEXT: v_rcp_f64_e32 v[4:5], v[2:3]
|
|
; VI-NEXT: v_fma_f64 v[6:7], -v[2:3], v[4:5], 1.0
|
|
; VI-NEXT: v_fma_f64 v[4:5], v[4:5], v[6:7], v[4:5]
|
|
; VI-NEXT: v_fma_f64 v[6:7], -v[2:3], v[4:5], 1.0
|
|
; VI-NEXT: v_fma_f64 v[4:5], v[4:5], v[6:7], v[4:5]
|
|
; VI-NEXT: v_mul_f64 v[6:7], v[8:9], v[4:5]
|
|
; VI-NEXT: v_fma_f64 v[2:3], -v[2:3], v[6:7], v[8:9]
|
|
; VI-NEXT: v_div_fmas_f64 v[2:3], v[2:3], v[4:5], v[6:7]
|
|
; VI-NEXT: v_div_fixup_f64 v[2:3], v[2:3], v[0:1], s[2:3]
|
|
; VI-NEXT: v_trunc_f64_e32 v[2:3], v[2:3]
|
|
; VI-NEXT: v_fma_f64 v[0:1], -v[2:3], v[0:1], s[2:3]
|
|
; VI-NEXT: v_mov_b32_e32 v2, s4
|
|
; VI-NEXT: v_mov_b32_e32 v3, s5
|
|
; VI-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
|
|
; VI-NEXT: s_endpgm
|
|
%r0 = load double, ptr addrspace(1) %in1, align 8
|
|
%r1 = load double, ptr addrspace(1) %in2, align 8
|
|
%r2 = frem double %r0, %r1
|
|
store double %r2, ptr addrspace(1) %out, align 8
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @fast_frem_f64(ptr addrspace(1) %out, ptr addrspace(1) %in1, ptr addrspace(1) %in2) #0 {
|
|
; CI-LABEL: fast_frem_f64:
|
|
; CI: ; %bb.0:
|
|
; CI-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x9
|
|
; CI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0xd
|
|
; CI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; CI-NEXT: s_load_dwordx2 s[2:3], s[6:7], 0x0
|
|
; CI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x0
|
|
; CI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; CI-NEXT: v_rcp_f64_e32 v[0:1], s[0:1]
|
|
; CI-NEXT: v_fma_f64 v[2:3], -s[0:1], v[0:1], 1.0
|
|
; CI-NEXT: v_fma_f64 v[0:1], v[2:3], v[0:1], v[0:1]
|
|
; CI-NEXT: v_fma_f64 v[2:3], -s[0:1], v[0:1], 1.0
|
|
; CI-NEXT: v_fma_f64 v[0:1], v[2:3], v[0:1], v[0:1]
|
|
; CI-NEXT: v_mov_b32_e32 v2, s2
|
|
; CI-NEXT: v_mov_b32_e32 v3, s3
|
|
; CI-NEXT: v_mul_f64 v[4:5], s[2:3], v[0:1]
|
|
; CI-NEXT: v_fma_f64 v[6:7], -s[0:1], v[4:5], v[2:3]
|
|
; CI-NEXT: v_fma_f64 v[0:1], v[6:7], v[0:1], v[4:5]
|
|
; CI-NEXT: v_trunc_f64_e32 v[0:1], v[0:1]
|
|
; CI-NEXT: v_fma_f64 v[0:1], -v[0:1], s[0:1], v[2:3]
|
|
; CI-NEXT: v_mov_b32_e32 v2, s4
|
|
; CI-NEXT: v_mov_b32_e32 v3, s5
|
|
; CI-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
|
|
; CI-NEXT: s_endpgm
|
|
;
|
|
; VI-LABEL: fast_frem_f64:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
|
|
; VI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x34
|
|
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; VI-NEXT: s_load_dwordx2 s[2:3], s[6:7], 0x0
|
|
; VI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x0
|
|
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; VI-NEXT: v_rcp_f64_e32 v[0:1], s[0:1]
|
|
; VI-NEXT: v_fma_f64 v[2:3], -s[0:1], v[0:1], 1.0
|
|
; VI-NEXT: v_fma_f64 v[0:1], v[2:3], v[0:1], v[0:1]
|
|
; VI-NEXT: v_fma_f64 v[2:3], -s[0:1], v[0:1], 1.0
|
|
; VI-NEXT: v_fma_f64 v[0:1], v[2:3], v[0:1], v[0:1]
|
|
; VI-NEXT: v_mov_b32_e32 v2, s2
|
|
; VI-NEXT: v_mov_b32_e32 v3, s3
|
|
; VI-NEXT: v_mul_f64 v[4:5], s[2:3], v[0:1]
|
|
; VI-NEXT: v_fma_f64 v[6:7], -s[0:1], v[4:5], v[2:3]
|
|
; VI-NEXT: v_fma_f64 v[0:1], v[6:7], v[0:1], v[4:5]
|
|
; VI-NEXT: v_trunc_f64_e32 v[0:1], v[0:1]
|
|
; VI-NEXT: v_fma_f64 v[0:1], -v[0:1], s[0:1], v[2:3]
|
|
; VI-NEXT: v_mov_b32_e32 v2, s4
|
|
; VI-NEXT: v_mov_b32_e32 v3, s5
|
|
; VI-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
|
|
; VI-NEXT: s_endpgm
|
|
%r0 = load double, ptr addrspace(1) %in1, align 8
|
|
%r1 = load double, ptr addrspace(1) %in2, align 8
|
|
%r2 = frem fast double %r0, %r1
|
|
store double %r2, ptr addrspace(1) %out, align 8
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @unsafe_frem_f64(ptr addrspace(1) %out, ptr addrspace(1) %in1,
|
|
; CI-LABEL: unsafe_frem_f64:
|
|
; CI: ; %bb.0:
|
|
; CI-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x9
|
|
; CI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0xd
|
|
; CI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; CI-NEXT: s_load_dwordx2 s[2:3], s[6:7], 0x0
|
|
; CI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x0
|
|
; CI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; CI-NEXT: v_rcp_f64_e32 v[0:1], s[0:1]
|
|
; CI-NEXT: v_fma_f64 v[2:3], -s[0:1], v[0:1], 1.0
|
|
; CI-NEXT: v_fma_f64 v[0:1], v[2:3], v[0:1], v[0:1]
|
|
; CI-NEXT: v_fma_f64 v[2:3], -s[0:1], v[0:1], 1.0
|
|
; CI-NEXT: v_fma_f64 v[0:1], v[2:3], v[0:1], v[0:1]
|
|
; CI-NEXT: v_mov_b32_e32 v2, s2
|
|
; CI-NEXT: v_mov_b32_e32 v3, s3
|
|
; CI-NEXT: v_mul_f64 v[4:5], s[2:3], v[0:1]
|
|
; CI-NEXT: v_fma_f64 v[6:7], -s[0:1], v[4:5], v[2:3]
|
|
; CI-NEXT: v_fma_f64 v[0:1], v[6:7], v[0:1], v[4:5]
|
|
; CI-NEXT: v_trunc_f64_e32 v[0:1], v[0:1]
|
|
; CI-NEXT: v_fma_f64 v[0:1], -v[0:1], s[0:1], v[2:3]
|
|
; CI-NEXT: v_mov_b32_e32 v2, s4
|
|
; CI-NEXT: v_mov_b32_e32 v3, s5
|
|
; CI-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
|
|
; CI-NEXT: s_endpgm
|
|
;
|
|
; VI-LABEL: unsafe_frem_f64:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
|
|
; VI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x34
|
|
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; VI-NEXT: s_load_dwordx2 s[2:3], s[6:7], 0x0
|
|
; VI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x0
|
|
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; VI-NEXT: v_rcp_f64_e32 v[0:1], s[0:1]
|
|
; VI-NEXT: v_fma_f64 v[2:3], -s[0:1], v[0:1], 1.0
|
|
; VI-NEXT: v_fma_f64 v[0:1], v[2:3], v[0:1], v[0:1]
|
|
; VI-NEXT: v_fma_f64 v[2:3], -s[0:1], v[0:1], 1.0
|
|
; VI-NEXT: v_fma_f64 v[0:1], v[2:3], v[0:1], v[0:1]
|
|
; VI-NEXT: v_mov_b32_e32 v2, s2
|
|
; VI-NEXT: v_mov_b32_e32 v3, s3
|
|
; VI-NEXT: v_mul_f64 v[4:5], s[2:3], v[0:1]
|
|
; VI-NEXT: v_fma_f64 v[6:7], -s[0:1], v[4:5], v[2:3]
|
|
; VI-NEXT: v_fma_f64 v[0:1], v[6:7], v[0:1], v[4:5]
|
|
; VI-NEXT: v_trunc_f64_e32 v[0:1], v[0:1]
|
|
; VI-NEXT: v_fma_f64 v[0:1], -v[0:1], s[0:1], v[2:3]
|
|
; VI-NEXT: v_mov_b32_e32 v2, s4
|
|
; VI-NEXT: v_mov_b32_e32 v3, s5
|
|
; VI-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
|
|
; VI-NEXT: s_endpgm
|
|
ptr addrspace(1) %in2) #1 {
|
|
%r0 = load double, ptr addrspace(1) %in1, align 8
|
|
%r1 = load double, ptr addrspace(1) %in2, align 8
|
|
%r2 = frem double %r0, %r1
|
|
store double %r2, ptr addrspace(1) %out, align 8
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @frem_v2f16(ptr addrspace(1) %out, ptr addrspace(1) %in1, ptr addrspace(1) %in2) #0 {
|
|
; CI-LABEL: frem_v2f16:
|
|
; CI: ; %bb.0:
|
|
; CI-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x9
|
|
; CI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0xd
|
|
; CI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; CI-NEXT: s_load_dword s2, s[6:7], 0x0
|
|
; CI-NEXT: s_load_dword s0, s[0:1], 0x4
|
|
; CI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; CI-NEXT: v_cvt_f32_f16_e32 v0, s2
|
|
; CI-NEXT: v_cvt_f32_f16_e32 v1, s0
|
|
; CI-NEXT: s_lshr_b32 s6, s0, 16
|
|
; CI-NEXT: s_lshr_b32 s3, s2, 16
|
|
; CI-NEXT: v_div_scale_f32 v2, s[0:1], v1, v1, v0
|
|
; CI-NEXT: v_div_scale_f32 v3, vcc, v0, v1, v0
|
|
; CI-NEXT: v_rcp_f32_e32 v4, v2
|
|
; CI-NEXT: s_setreg_imm32_b32 hwreg(HW_REG_MODE, 4, 2), 3
|
|
; CI-NEXT: v_fma_f32 v5, -v2, v4, 1.0
|
|
; CI-NEXT: v_fma_f32 v4, v5, v4, v4
|
|
; CI-NEXT: v_mul_f32_e32 v5, v3, v4
|
|
; CI-NEXT: v_fma_f32 v6, -v2, v5, v3
|
|
; CI-NEXT: v_fma_f32 v5, v6, v4, v5
|
|
; CI-NEXT: v_fma_f32 v2, -v2, v5, v3
|
|
; CI-NEXT: s_setreg_imm32_b32 hwreg(HW_REG_MODE, 4, 2), 0
|
|
; CI-NEXT: v_div_fmas_f32 v2, v2, v4, v5
|
|
; CI-NEXT: v_div_fixup_f32 v2, v2, v1, v0
|
|
; CI-NEXT: v_trunc_f32_e32 v2, v2
|
|
; CI-NEXT: v_fma_f32 v0, -v2, v1, v0
|
|
; CI-NEXT: v_cvt_f32_f16_e32 v1, s3
|
|
; CI-NEXT: v_cvt_f32_f16_e32 v2, s6
|
|
; CI-NEXT: s_setreg_imm32_b32 hwreg(HW_REG_MODE, 2, 2), 0
|
|
; CI-NEXT: v_cvt_f16_f32_e32 v0, v0
|
|
; CI-NEXT: v_div_scale_f32 v3, s[0:1], v2, v2, v1
|
|
; CI-NEXT: v_div_scale_f32 v4, vcc, v1, v2, v1
|
|
; CI-NEXT: v_rcp_f32_e32 v5, v3
|
|
; CI-NEXT: s_setreg_imm32_b32 hwreg(HW_REG_MODE, 4, 2), 3
|
|
; CI-NEXT: v_fma_f32 v6, -v3, v5, 1.0
|
|
; CI-NEXT: v_fma_f32 v5, v6, v5, v5
|
|
; CI-NEXT: v_mul_f32_e32 v6, v4, v5
|
|
; CI-NEXT: v_fma_f32 v7, -v3, v6, v4
|
|
; CI-NEXT: v_fma_f32 v6, v7, v5, v6
|
|
; CI-NEXT: v_fma_f32 v3, -v3, v6, v4
|
|
; CI-NEXT: s_setreg_imm32_b32 hwreg(HW_REG_MODE, 4, 2), 0
|
|
; CI-NEXT: v_div_fmas_f32 v3, v3, v5, v6
|
|
; CI-NEXT: v_div_fixup_f32 v3, v3, v2, v1
|
|
; CI-NEXT: v_trunc_f32_e32 v3, v3
|
|
; CI-NEXT: v_fma_f32 v1, -v3, v2, v1
|
|
; CI-NEXT: v_cvt_f16_f32_e32 v1, v1
|
|
; CI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
|
|
; CI-NEXT: v_or_b32_e32 v2, v0, v1
|
|
; CI-NEXT: v_mov_b32_e32 v0, s4
|
|
; CI-NEXT: v_mov_b32_e32 v1, s5
|
|
; CI-NEXT: flat_store_dword v[0:1], v2
|
|
; CI-NEXT: s_endpgm
|
|
;
|
|
; VI-LABEL: frem_v2f16:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
|
|
; VI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x34
|
|
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; VI-NEXT: s_load_dword s2, s[6:7], 0x0
|
|
; VI-NEXT: s_load_dword s0, s[0:1], 0x10
|
|
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; VI-NEXT: v_cvt_f32_f16_e32 v0, s2
|
|
; VI-NEXT: v_cvt_f32_f16_e32 v2, s0
|
|
; VI-NEXT: s_lshr_b32 s3, s0, 16
|
|
; VI-NEXT: v_cvt_f32_f16_e32 v3, s3
|
|
; VI-NEXT: v_mov_b32_e32 v1, s0
|
|
; VI-NEXT: v_rcp_f32_e32 v2, v2
|
|
; VI-NEXT: s_lshr_b32 s1, s2, 16
|
|
; VI-NEXT: v_rcp_f32_e32 v3, v3
|
|
; VI-NEXT: v_mul_f32_e32 v0, v0, v2
|
|
; VI-NEXT: v_cvt_f16_f32_e32 v0, v0
|
|
; VI-NEXT: v_mov_b32_e32 v2, s3
|
|
; VI-NEXT: v_div_fixup_f16 v0, v0, v1, s2
|
|
; VI-NEXT: v_trunc_f16_e32 v0, v0
|
|
; VI-NEXT: v_fma_f16 v0, -v0, v1, s2
|
|
; VI-NEXT: v_cvt_f32_f16_e32 v1, s1
|
|
; VI-NEXT: v_mul_f32_e32 v1, v1, v3
|
|
; VI-NEXT: v_cvt_f16_f32_e32 v1, v1
|
|
; VI-NEXT: v_div_fixup_f16 v1, v1, v2, s1
|
|
; VI-NEXT: v_trunc_f16_e32 v1, v1
|
|
; VI-NEXT: v_fma_f16 v1, -v1, v2, s1
|
|
; VI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
|
|
; VI-NEXT: v_or_b32_e32 v2, v0, v1
|
|
; VI-NEXT: v_mov_b32_e32 v0, s4
|
|
; VI-NEXT: v_mov_b32_e32 v1, s5
|
|
; VI-NEXT: flat_store_dword v[0:1], v2
|
|
; VI-NEXT: s_endpgm
|
|
%gep2 = getelementptr <2 x half>, ptr addrspace(1) %in2, i32 4
|
|
%r0 = load <2 x half>, ptr addrspace(1) %in1, align 8
|
|
%r1 = load <2 x half>, ptr addrspace(1) %gep2, align 8
|
|
%r2 = frem <2 x half> %r0, %r1
|
|
store <2 x half> %r2, ptr addrspace(1) %out, align 8
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @frem_v4f16(ptr addrspace(1) %out, ptr addrspace(1) %in1, ptr addrspace(1) %in2) #0 {
|
|
; CI-LABEL: frem_v4f16:
|
|
; CI: ; %bb.0:
|
|
; CI-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x9
|
|
; CI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0xd
|
|
; CI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; CI-NEXT: s_load_dwordx2 s[2:3], s[6:7], 0x0
|
|
; CI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x8
|
|
; CI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; CI-NEXT: v_cvt_f32_f16_e32 v0, s2
|
|
; CI-NEXT: v_cvt_f32_f16_e32 v1, s0
|
|
; CI-NEXT: s_lshr_b32 s8, s2, 16
|
|
; CI-NEXT: s_lshr_b32 s9, s3, 16
|
|
; CI-NEXT: s_lshr_b32 s10, s0, 16
|
|
; CI-NEXT: v_div_scale_f32 v2, s[6:7], v1, v1, v0
|
|
; CI-NEXT: s_lshr_b32 s11, s1, 16
|
|
; CI-NEXT: v_div_scale_f32 v3, vcc, v0, v1, v0
|
|
; CI-NEXT: v_rcp_f32_e32 v4, v2
|
|
; CI-NEXT: s_setreg_imm32_b32 hwreg(HW_REG_MODE, 4, 2), 3
|
|
; CI-NEXT: v_fma_f32 v5, -v2, v4, 1.0
|
|
; CI-NEXT: v_fma_f32 v4, v5, v4, v4
|
|
; CI-NEXT: v_mul_f32_e32 v5, v3, v4
|
|
; CI-NEXT: v_fma_f32 v6, -v2, v5, v3
|
|
; CI-NEXT: v_fma_f32 v5, v6, v4, v5
|
|
; CI-NEXT: v_fma_f32 v2, -v2, v5, v3
|
|
; CI-NEXT: s_setreg_imm32_b32 hwreg(HW_REG_MODE, 4, 2), 0
|
|
; CI-NEXT: v_div_fmas_f32 v2, v2, v4, v5
|
|
; CI-NEXT: v_div_fixup_f32 v2, v2, v1, v0
|
|
; CI-NEXT: v_trunc_f32_e32 v2, v2
|
|
; CI-NEXT: v_fma_f32 v0, -v2, v1, v0
|
|
; CI-NEXT: v_cvt_f32_f16_e32 v1, s8
|
|
; CI-NEXT: v_cvt_f32_f16_e32 v2, s10
|
|
; CI-NEXT: s_setreg_imm32_b32 hwreg(HW_REG_MODE, 2, 2), 0
|
|
; CI-NEXT: v_cvt_f16_f32_e32 v0, v0
|
|
; CI-NEXT: v_div_scale_f32 v3, s[6:7], v2, v2, v1
|
|
; CI-NEXT: v_div_scale_f32 v4, vcc, v1, v2, v1
|
|
; CI-NEXT: v_rcp_f32_e32 v5, v3
|
|
; CI-NEXT: s_setreg_imm32_b32 hwreg(HW_REG_MODE, 4, 2), 3
|
|
; CI-NEXT: v_fma_f32 v6, -v3, v5, 1.0
|
|
; CI-NEXT: v_fma_f32 v5, v6, v5, v5
|
|
; CI-NEXT: v_mul_f32_e32 v6, v4, v5
|
|
; CI-NEXT: v_fma_f32 v7, -v3, v6, v4
|
|
; CI-NEXT: v_fma_f32 v6, v7, v5, v6
|
|
; CI-NEXT: v_fma_f32 v3, -v3, v6, v4
|
|
; CI-NEXT: s_setreg_imm32_b32 hwreg(HW_REG_MODE, 4, 2), 0
|
|
; CI-NEXT: v_div_fmas_f32 v3, v3, v5, v6
|
|
; CI-NEXT: v_div_fixup_f32 v3, v3, v2, v1
|
|
; CI-NEXT: v_trunc_f32_e32 v3, v3
|
|
; CI-NEXT: v_fma_f32 v1, -v3, v2, v1
|
|
; CI-NEXT: v_cvt_f32_f16_e32 v2, s3
|
|
; CI-NEXT: v_cvt_f32_f16_e32 v3, s1
|
|
; CI-NEXT: v_cvt_f16_f32_e32 v1, v1
|
|
; CI-NEXT: v_div_scale_f32 v4, s[0:1], v3, v3, v2
|
|
; CI-NEXT: v_div_scale_f32 v5, vcc, v2, v3, v2
|
|
; CI-NEXT: v_rcp_f32_e32 v6, v4
|
|
; CI-NEXT: s_setreg_imm32_b32 hwreg(HW_REG_MODE, 4, 2), 3
|
|
; CI-NEXT: v_fma_f32 v7, -v4, v6, 1.0
|
|
; CI-NEXT: v_fma_f32 v6, v7, v6, v6
|
|
; CI-NEXT: v_mul_f32_e32 v7, v5, v6
|
|
; CI-NEXT: v_fma_f32 v8, -v4, v7, v5
|
|
; CI-NEXT: v_fma_f32 v7, v8, v6, v7
|
|
; CI-NEXT: v_fma_f32 v4, -v4, v7, v5
|
|
; CI-NEXT: s_setreg_imm32_b32 hwreg(HW_REG_MODE, 4, 2), 0
|
|
; CI-NEXT: v_div_fmas_f32 v4, v4, v6, v7
|
|
; CI-NEXT: v_div_fixup_f32 v4, v4, v3, v2
|
|
; CI-NEXT: v_trunc_f32_e32 v4, v4
|
|
; CI-NEXT: v_fma_f32 v2, -v4, v3, v2
|
|
; CI-NEXT: v_cvt_f32_f16_e32 v3, s9
|
|
; CI-NEXT: v_cvt_f32_f16_e32 v4, s11
|
|
; CI-NEXT: v_cvt_f16_f32_e32 v2, v2
|
|
; CI-NEXT: v_div_scale_f32 v5, s[0:1], v4, v4, v3
|
|
; CI-NEXT: v_div_scale_f32 v6, vcc, v3, v4, v3
|
|
; CI-NEXT: v_rcp_f32_e32 v7, v5
|
|
; CI-NEXT: s_setreg_imm32_b32 hwreg(HW_REG_MODE, 4, 2), 3
|
|
; CI-NEXT: v_fma_f32 v8, -v5, v7, 1.0
|
|
; CI-NEXT: v_fma_f32 v7, v8, v7, v7
|
|
; CI-NEXT: v_mul_f32_e32 v8, v6, v7
|
|
; CI-NEXT: v_fma_f32 v9, -v5, v8, v6
|
|
; CI-NEXT: v_fma_f32 v8, v9, v7, v8
|
|
; CI-NEXT: v_fma_f32 v5, -v5, v8, v6
|
|
; CI-NEXT: s_setreg_imm32_b32 hwreg(HW_REG_MODE, 4, 2), 0
|
|
; CI-NEXT: v_div_fmas_f32 v5, v5, v7, v8
|
|
; CI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
|
|
; CI-NEXT: v_or_b32_e32 v0, v0, v1
|
|
; CI-NEXT: v_div_fixup_f32 v5, v5, v4, v3
|
|
; CI-NEXT: v_trunc_f32_e32 v5, v5
|
|
; CI-NEXT: v_fma_f32 v3, -v5, v4, v3
|
|
; CI-NEXT: v_cvt_f16_f32_e32 v3, v3
|
|
; CI-NEXT: v_lshlrev_b32_e32 v1, 16, v3
|
|
; CI-NEXT: v_or_b32_e32 v1, v2, v1
|
|
; CI-NEXT: v_mov_b32_e32 v2, s4
|
|
; CI-NEXT: v_mov_b32_e32 v3, s5
|
|
; CI-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
|
|
; CI-NEXT: s_endpgm
|
|
;
|
|
; VI-LABEL: frem_v4f16:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
|
|
; VI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x34
|
|
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; VI-NEXT: s_load_dwordx2 s[2:3], s[6:7], 0x0
|
|
; VI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x20
|
|
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; VI-NEXT: v_cvt_f32_f16_e32 v0, s2
|
|
; VI-NEXT: v_cvt_f32_f16_e32 v2, s0
|
|
; VI-NEXT: s_lshr_b32 s8, s0, 16
|
|
; VI-NEXT: v_cvt_f32_f16_e32 v3, s8
|
|
; VI-NEXT: v_mov_b32_e32 v1, s0
|
|
; VI-NEXT: v_rcp_f32_e32 v2, v2
|
|
; VI-NEXT: s_lshr_b32 s6, s2, 16
|
|
; VI-NEXT: v_rcp_f32_e32 v3, v3
|
|
; VI-NEXT: v_cvt_f32_f16_e32 v4, s1
|
|
; VI-NEXT: v_mul_f32_e32 v0, v0, v2
|
|
; VI-NEXT: v_cvt_f16_f32_e32 v0, v0
|
|
; VI-NEXT: v_mov_b32_e32 v2, s8
|
|
; VI-NEXT: v_rcp_f32_e32 v4, v4
|
|
; VI-NEXT: s_lshr_b32 s9, s1, 16
|
|
; VI-NEXT: v_div_fixup_f16 v0, v0, v1, s2
|
|
; VI-NEXT: v_trunc_f16_e32 v0, v0
|
|
; VI-NEXT: v_fma_f16 v0, -v0, v1, s2
|
|
; VI-NEXT: v_cvt_f32_f16_e32 v1, s6
|
|
; VI-NEXT: v_cvt_f32_f16_e32 v5, s9
|
|
; VI-NEXT: s_lshr_b32 s7, s3, 16
|
|
; VI-NEXT: v_mul_f32_e32 v1, v1, v3
|
|
; VI-NEXT: v_cvt_f16_f32_e32 v1, v1
|
|
; VI-NEXT: v_mov_b32_e32 v3, s1
|
|
; VI-NEXT: v_rcp_f32_e32 v5, v5
|
|
; VI-NEXT: v_div_fixup_f16 v1, v1, v2, s6
|
|
; VI-NEXT: v_trunc_f16_e32 v1, v1
|
|
; VI-NEXT: v_fma_f16 v1, -v1, v2, s6
|
|
; VI-NEXT: v_cvt_f32_f16_e32 v2, s3
|
|
; VI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
|
|
; VI-NEXT: v_or_b32_e32 v0, v0, v1
|
|
; VI-NEXT: v_mul_f32_e32 v2, v2, v4
|
|
; VI-NEXT: v_cvt_f16_f32_e32 v2, v2
|
|
; VI-NEXT: v_mov_b32_e32 v4, s9
|
|
; VI-NEXT: v_div_fixup_f16 v2, v2, v3, s3
|
|
; VI-NEXT: v_trunc_f16_e32 v2, v2
|
|
; VI-NEXT: v_fma_f16 v2, -v2, v3, s3
|
|
; VI-NEXT: v_cvt_f32_f16_e32 v3, s7
|
|
; VI-NEXT: v_mul_f32_e32 v3, v3, v5
|
|
; VI-NEXT: v_cvt_f16_f32_e32 v3, v3
|
|
; VI-NEXT: v_div_fixup_f16 v3, v3, v4, s7
|
|
; VI-NEXT: v_trunc_f16_e32 v3, v3
|
|
; VI-NEXT: v_fma_f16 v3, -v3, v4, s7
|
|
; VI-NEXT: v_lshlrev_b32_e32 v1, 16, v3
|
|
; VI-NEXT: v_or_b32_e32 v1, v2, v1
|
|
; VI-NEXT: v_mov_b32_e32 v2, s4
|
|
; VI-NEXT: v_mov_b32_e32 v3, s5
|
|
; VI-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
|
|
; VI-NEXT: s_endpgm
|
|
%gep2 = getelementptr <4 x half>, ptr addrspace(1) %in2, i32 4
|
|
%r0 = load <4 x half>, ptr addrspace(1) %in1, align 16
|
|
%r1 = load <4 x half>, ptr addrspace(1) %gep2, align 16
|
|
%r2 = frem <4 x half> %r0, %r1
|
|
store <4 x half> %r2, ptr addrspace(1) %out, align 16
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @frem_v2f32(ptr addrspace(1) %out, ptr addrspace(1) %in1, ptr addrspace(1) %in2) #0 {
|
|
; CI-LABEL: frem_v2f32:
|
|
; CI: ; %bb.0:
|
|
; CI-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x9
|
|
; CI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0xd
|
|
; CI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; CI-NEXT: s_load_dwordx2 s[2:3], s[6:7], 0x0
|
|
; CI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x8
|
|
; CI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; CI-NEXT: v_mov_b32_e32 v0, s0
|
|
; CI-NEXT: v_div_scale_f32 v1, s[6:7], v0, v0, s2
|
|
; CI-NEXT: v_div_scale_f32 v2, vcc, s2, v0, s2
|
|
; CI-NEXT: v_rcp_f32_e32 v3, v1
|
|
; CI-NEXT: s_setreg_imm32_b32 hwreg(HW_REG_MODE, 4, 2), 3
|
|
; CI-NEXT: v_fma_f32 v4, -v1, v3, 1.0
|
|
; CI-NEXT: v_fma_f32 v3, v4, v3, v3
|
|
; CI-NEXT: v_mul_f32_e32 v4, v2, v3
|
|
; CI-NEXT: v_fma_f32 v5, -v1, v4, v2
|
|
; CI-NEXT: v_fma_f32 v4, v5, v3, v4
|
|
; CI-NEXT: v_fma_f32 v1, -v1, v4, v2
|
|
; CI-NEXT: s_setreg_imm32_b32 hwreg(HW_REG_MODE, 4, 2), 0
|
|
; CI-NEXT: v_div_fmas_f32 v1, v1, v3, v4
|
|
; CI-NEXT: v_div_fixup_f32 v1, v1, v0, s2
|
|
; CI-NEXT: v_trunc_f32_e32 v1, v1
|
|
; CI-NEXT: v_fma_f32 v0, -v1, v0, s2
|
|
; CI-NEXT: v_mov_b32_e32 v1, s1
|
|
; CI-NEXT: v_div_scale_f32 v2, s[0:1], v1, v1, s3
|
|
; CI-NEXT: v_div_scale_f32 v3, vcc, s3, v1, s3
|
|
; CI-NEXT: v_rcp_f32_e32 v4, v2
|
|
; CI-NEXT: s_setreg_imm32_b32 hwreg(HW_REG_MODE, 4, 2), 3
|
|
; CI-NEXT: v_fma_f32 v5, -v2, v4, 1.0
|
|
; CI-NEXT: v_fma_f32 v4, v5, v4, v4
|
|
; CI-NEXT: v_mul_f32_e32 v5, v3, v4
|
|
; CI-NEXT: v_fma_f32 v6, -v2, v5, v3
|
|
; CI-NEXT: v_fma_f32 v5, v6, v4, v5
|
|
; CI-NEXT: v_fma_f32 v2, -v2, v5, v3
|
|
; CI-NEXT: s_setreg_imm32_b32 hwreg(HW_REG_MODE, 4, 2), 0
|
|
; CI-NEXT: v_div_fmas_f32 v2, v2, v4, v5
|
|
; CI-NEXT: s_mov_b32 s6, -1
|
|
; CI-NEXT: s_mov_b32 s7, 0xf000
|
|
; CI-NEXT: v_div_fixup_f32 v2, v2, v1, s3
|
|
; CI-NEXT: v_trunc_f32_e32 v2, v2
|
|
; CI-NEXT: v_fma_f32 v1, -v2, v1, s3
|
|
; CI-NEXT: buffer_store_dwordx2 v[0:1], off, s[4:7], 0
|
|
; CI-NEXT: s_endpgm
|
|
;
|
|
; VI-LABEL: frem_v2f32:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
|
|
; VI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x34
|
|
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; VI-NEXT: s_load_dwordx2 s[2:3], s[6:7], 0x0
|
|
; VI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x20
|
|
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; VI-NEXT: v_mov_b32_e32 v0, s0
|
|
; VI-NEXT: v_div_scale_f32 v1, s[6:7], v0, v0, s2
|
|
; VI-NEXT: v_div_scale_f32 v2, vcc, s2, v0, s2
|
|
; VI-NEXT: v_rcp_f32_e32 v3, v1
|
|
; VI-NEXT: s_setreg_imm32_b32 hwreg(HW_REG_MODE, 4, 2), 3
|
|
; VI-NEXT: v_fma_f32 v4, -v1, v3, 1.0
|
|
; VI-NEXT: v_fma_f32 v3, v4, v3, v3
|
|
; VI-NEXT: v_mul_f32_e32 v4, v2, v3
|
|
; VI-NEXT: v_fma_f32 v5, -v1, v4, v2
|
|
; VI-NEXT: v_fma_f32 v4, v5, v3, v4
|
|
; VI-NEXT: v_fma_f32 v1, -v1, v4, v2
|
|
; VI-NEXT: s_setreg_imm32_b32 hwreg(HW_REG_MODE, 4, 2), 0
|
|
; VI-NEXT: v_div_fmas_f32 v1, v1, v3, v4
|
|
; VI-NEXT: v_div_fixup_f32 v1, v1, v0, s2
|
|
; VI-NEXT: v_trunc_f32_e32 v1, v1
|
|
; VI-NEXT: v_fma_f32 v0, -v1, v0, s2
|
|
; VI-NEXT: v_mov_b32_e32 v1, s1
|
|
; VI-NEXT: v_div_scale_f32 v2, s[0:1], v1, v1, s3
|
|
; VI-NEXT: v_div_scale_f32 v3, vcc, s3, v1, s3
|
|
; VI-NEXT: v_rcp_f32_e32 v4, v2
|
|
; VI-NEXT: s_setreg_imm32_b32 hwreg(HW_REG_MODE, 4, 2), 3
|
|
; VI-NEXT: v_fma_f32 v5, -v2, v4, 1.0
|
|
; VI-NEXT: v_fma_f32 v4, v5, v4, v4
|
|
; VI-NEXT: v_mul_f32_e32 v5, v3, v4
|
|
; VI-NEXT: v_fma_f32 v6, -v2, v5, v3
|
|
; VI-NEXT: v_fma_f32 v5, v6, v4, v5
|
|
; VI-NEXT: v_fma_f32 v2, -v2, v5, v3
|
|
; VI-NEXT: s_setreg_imm32_b32 hwreg(HW_REG_MODE, 4, 2), 0
|
|
; VI-NEXT: v_div_fmas_f32 v2, v2, v4, v5
|
|
; VI-NEXT: v_div_fixup_f32 v2, v2, v1, s3
|
|
; VI-NEXT: v_trunc_f32_e32 v2, v2
|
|
; VI-NEXT: v_fma_f32 v1, -v2, v1, s3
|
|
; VI-NEXT: v_mov_b32_e32 v2, s4
|
|
; VI-NEXT: v_mov_b32_e32 v3, s5
|
|
; VI-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
|
|
; VI-NEXT: s_endpgm
|
|
%gep2 = getelementptr <2 x float>, ptr addrspace(1) %in2, i32 4
|
|
%r0 = load <2 x float>, ptr addrspace(1) %in1, align 8
|
|
%r1 = load <2 x float>, ptr addrspace(1) %gep2, align 8
|
|
%r2 = frem <2 x float> %r0, %r1
|
|
store <2 x float> %r2, ptr addrspace(1) %out, align 8
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @frem_v4f32(ptr addrspace(1) %out, ptr addrspace(1) %in1, ptr addrspace(1) %in2) #0 {
|
|
; CI-LABEL: frem_v4f32:
|
|
; CI: ; %bb.0:
|
|
; CI-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x9
|
|
; CI-NEXT: s_load_dwordx2 s[8:9], s[0:1], 0xd
|
|
; CI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; CI-NEXT: s_load_dwordx4 s[0:3], s[6:7], 0x0
|
|
; CI-NEXT: s_load_dwordx4 s[8:11], s[8:9], 0x10
|
|
; CI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; CI-NEXT: v_mov_b32_e32 v0, s8
|
|
; CI-NEXT: v_div_scale_f32 v1, s[6:7], v0, v0, s0
|
|
; CI-NEXT: v_div_scale_f32 v2, vcc, s0, v0, s0
|
|
; CI-NEXT: v_rcp_f32_e32 v3, v1
|
|
; CI-NEXT: s_setreg_imm32_b32 hwreg(HW_REG_MODE, 4, 2), 3
|
|
; CI-NEXT: v_fma_f32 v4, -v1, v3, 1.0
|
|
; CI-NEXT: v_fma_f32 v3, v4, v3, v3
|
|
; CI-NEXT: v_mul_f32_e32 v4, v2, v3
|
|
; CI-NEXT: v_fma_f32 v5, -v1, v4, v2
|
|
; CI-NEXT: v_fma_f32 v4, v5, v3, v4
|
|
; CI-NEXT: v_fma_f32 v1, -v1, v4, v2
|
|
; CI-NEXT: s_setreg_imm32_b32 hwreg(HW_REG_MODE, 4, 2), 0
|
|
; CI-NEXT: v_div_fmas_f32 v1, v1, v3, v4
|
|
; CI-NEXT: v_div_fixup_f32 v1, v1, v0, s0
|
|
; CI-NEXT: v_trunc_f32_e32 v1, v1
|
|
; CI-NEXT: v_fma_f32 v0, -v1, v0, s0
|
|
; CI-NEXT: v_mov_b32_e32 v1, s9
|
|
; CI-NEXT: v_div_scale_f32 v2, s[6:7], v1, v1, s1
|
|
; CI-NEXT: v_div_scale_f32 v3, vcc, s1, v1, s1
|
|
; CI-NEXT: v_rcp_f32_e32 v4, v2
|
|
; CI-NEXT: s_setreg_imm32_b32 hwreg(HW_REG_MODE, 4, 2), 3
|
|
; CI-NEXT: v_fma_f32 v5, -v2, v4, 1.0
|
|
; CI-NEXT: v_fma_f32 v4, v5, v4, v4
|
|
; CI-NEXT: v_mul_f32_e32 v5, v3, v4
|
|
; CI-NEXT: v_fma_f32 v6, -v2, v5, v3
|
|
; CI-NEXT: v_fma_f32 v5, v6, v4, v5
|
|
; CI-NEXT: v_fma_f32 v2, -v2, v5, v3
|
|
; CI-NEXT: s_setreg_imm32_b32 hwreg(HW_REG_MODE, 4, 2), 0
|
|
; CI-NEXT: v_div_fmas_f32 v2, v2, v4, v5
|
|
; CI-NEXT: v_div_fixup_f32 v2, v2, v1, s1
|
|
; CI-NEXT: v_trunc_f32_e32 v2, v2
|
|
; CI-NEXT: v_fma_f32 v1, -v2, v1, s1
|
|
; CI-NEXT: v_mov_b32_e32 v2, s10
|
|
; CI-NEXT: v_div_scale_f32 v3, s[0:1], v2, v2, s2
|
|
; CI-NEXT: v_div_scale_f32 v4, vcc, s2, v2, s2
|
|
; CI-NEXT: v_rcp_f32_e32 v5, v3
|
|
; CI-NEXT: s_setreg_imm32_b32 hwreg(HW_REG_MODE, 4, 2), 3
|
|
; CI-NEXT: v_fma_f32 v6, -v3, v5, 1.0
|
|
; CI-NEXT: v_fma_f32 v5, v6, v5, v5
|
|
; CI-NEXT: v_mul_f32_e32 v6, v4, v5
|
|
; CI-NEXT: v_fma_f32 v7, -v3, v6, v4
|
|
; CI-NEXT: v_fma_f32 v6, v7, v5, v6
|
|
; CI-NEXT: v_fma_f32 v3, -v3, v6, v4
|
|
; CI-NEXT: s_setreg_imm32_b32 hwreg(HW_REG_MODE, 4, 2), 0
|
|
; CI-NEXT: v_div_fmas_f32 v3, v3, v5, v6
|
|
; CI-NEXT: v_div_fixup_f32 v3, v3, v2, s2
|
|
; CI-NEXT: v_trunc_f32_e32 v3, v3
|
|
; CI-NEXT: v_fma_f32 v2, -v3, v2, s2
|
|
; CI-NEXT: v_mov_b32_e32 v3, s11
|
|
; CI-NEXT: v_div_scale_f32 v4, s[0:1], v3, v3, s3
|
|
; CI-NEXT: v_div_scale_f32 v5, vcc, s3, v3, s3
|
|
; CI-NEXT: v_rcp_f32_e32 v6, v4
|
|
; CI-NEXT: s_setreg_imm32_b32 hwreg(HW_REG_MODE, 4, 2), 3
|
|
; CI-NEXT: v_fma_f32 v7, -v4, v6, 1.0
|
|
; CI-NEXT: v_fma_f32 v6, v7, v6, v6
|
|
; CI-NEXT: v_mul_f32_e32 v7, v5, v6
|
|
; CI-NEXT: v_fma_f32 v8, -v4, v7, v5
|
|
; CI-NEXT: v_fma_f32 v7, v8, v6, v7
|
|
; CI-NEXT: v_fma_f32 v4, -v4, v7, v5
|
|
; CI-NEXT: s_setreg_imm32_b32 hwreg(HW_REG_MODE, 4, 2), 0
|
|
; CI-NEXT: v_div_fmas_f32 v4, v4, v6, v7
|
|
; CI-NEXT: s_mov_b32 s6, -1
|
|
; CI-NEXT: s_mov_b32 s7, 0xf000
|
|
; CI-NEXT: v_div_fixup_f32 v4, v4, v3, s3
|
|
; CI-NEXT: v_trunc_f32_e32 v4, v4
|
|
; CI-NEXT: v_fma_f32 v3, -v4, v3, s3
|
|
; CI-NEXT: buffer_store_dwordx4 v[0:3], off, s[4:7], 0
|
|
; CI-NEXT: s_endpgm
|
|
;
|
|
; VI-LABEL: frem_v4f32:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
|
|
; VI-NEXT: s_load_dwordx2 s[8:9], s[0:1], 0x34
|
|
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; VI-NEXT: s_load_dwordx4 s[0:3], s[6:7], 0x0
|
|
; VI-NEXT: s_load_dwordx4 s[8:11], s[8:9], 0x40
|
|
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; VI-NEXT: v_mov_b32_e32 v0, s8
|
|
; VI-NEXT: v_div_scale_f32 v1, s[6:7], v0, v0, s0
|
|
; VI-NEXT: v_div_scale_f32 v2, vcc, s0, v0, s0
|
|
; VI-NEXT: v_rcp_f32_e32 v3, v1
|
|
; VI-NEXT: s_setreg_imm32_b32 hwreg(HW_REG_MODE, 4, 2), 3
|
|
; VI-NEXT: v_fma_f32 v4, -v1, v3, 1.0
|
|
; VI-NEXT: v_fma_f32 v3, v4, v3, v3
|
|
; VI-NEXT: v_mul_f32_e32 v4, v2, v3
|
|
; VI-NEXT: v_fma_f32 v5, -v1, v4, v2
|
|
; VI-NEXT: v_fma_f32 v4, v5, v3, v4
|
|
; VI-NEXT: v_fma_f32 v1, -v1, v4, v2
|
|
; VI-NEXT: s_setreg_imm32_b32 hwreg(HW_REG_MODE, 4, 2), 0
|
|
; VI-NEXT: v_div_fmas_f32 v1, v1, v3, v4
|
|
; VI-NEXT: v_div_fixup_f32 v1, v1, v0, s0
|
|
; VI-NEXT: v_trunc_f32_e32 v1, v1
|
|
; VI-NEXT: v_fma_f32 v0, -v1, v0, s0
|
|
; VI-NEXT: v_mov_b32_e32 v1, s9
|
|
; VI-NEXT: v_div_scale_f32 v2, s[6:7], v1, v1, s1
|
|
; VI-NEXT: v_div_scale_f32 v3, vcc, s1, v1, s1
|
|
; VI-NEXT: v_rcp_f32_e32 v4, v2
|
|
; VI-NEXT: s_setreg_imm32_b32 hwreg(HW_REG_MODE, 4, 2), 3
|
|
; VI-NEXT: v_fma_f32 v5, -v2, v4, 1.0
|
|
; VI-NEXT: v_fma_f32 v4, v5, v4, v4
|
|
; VI-NEXT: v_mul_f32_e32 v5, v3, v4
|
|
; VI-NEXT: v_fma_f32 v6, -v2, v5, v3
|
|
; VI-NEXT: v_fma_f32 v5, v6, v4, v5
|
|
; VI-NEXT: v_fma_f32 v2, -v2, v5, v3
|
|
; VI-NEXT: s_setreg_imm32_b32 hwreg(HW_REG_MODE, 4, 2), 0
|
|
; VI-NEXT: v_div_fmas_f32 v2, v2, v4, v5
|
|
; VI-NEXT: v_div_fixup_f32 v2, v2, v1, s1
|
|
; VI-NEXT: v_trunc_f32_e32 v2, v2
|
|
; VI-NEXT: v_fma_f32 v1, -v2, v1, s1
|
|
; VI-NEXT: v_mov_b32_e32 v2, s10
|
|
; VI-NEXT: v_div_scale_f32 v3, s[0:1], v2, v2, s2
|
|
; VI-NEXT: v_div_scale_f32 v4, vcc, s2, v2, s2
|
|
; VI-NEXT: v_rcp_f32_e32 v5, v3
|
|
; VI-NEXT: s_setreg_imm32_b32 hwreg(HW_REG_MODE, 4, 2), 3
|
|
; VI-NEXT: v_fma_f32 v6, -v3, v5, 1.0
|
|
; VI-NEXT: v_fma_f32 v5, v6, v5, v5
|
|
; VI-NEXT: v_mul_f32_e32 v6, v4, v5
|
|
; VI-NEXT: v_fma_f32 v7, -v3, v6, v4
|
|
; VI-NEXT: v_fma_f32 v6, v7, v5, v6
|
|
; VI-NEXT: v_fma_f32 v3, -v3, v6, v4
|
|
; VI-NEXT: s_setreg_imm32_b32 hwreg(HW_REG_MODE, 4, 2), 0
|
|
; VI-NEXT: v_div_fmas_f32 v3, v3, v5, v6
|
|
; VI-NEXT: v_div_fixup_f32 v3, v3, v2, s2
|
|
; VI-NEXT: v_trunc_f32_e32 v3, v3
|
|
; VI-NEXT: v_fma_f32 v2, -v3, v2, s2
|
|
; VI-NEXT: v_mov_b32_e32 v3, s11
|
|
; VI-NEXT: v_div_scale_f32 v4, s[0:1], v3, v3, s3
|
|
; VI-NEXT: v_div_scale_f32 v5, vcc, s3, v3, s3
|
|
; VI-NEXT: v_rcp_f32_e32 v6, v4
|
|
; VI-NEXT: s_setreg_imm32_b32 hwreg(HW_REG_MODE, 4, 2), 3
|
|
; VI-NEXT: v_fma_f32 v7, -v4, v6, 1.0
|
|
; VI-NEXT: v_fma_f32 v6, v7, v6, v6
|
|
; VI-NEXT: v_mul_f32_e32 v7, v5, v6
|
|
; VI-NEXT: v_fma_f32 v8, -v4, v7, v5
|
|
; VI-NEXT: v_fma_f32 v7, v8, v6, v7
|
|
; VI-NEXT: v_fma_f32 v4, -v4, v7, v5
|
|
; VI-NEXT: s_setreg_imm32_b32 hwreg(HW_REG_MODE, 4, 2), 0
|
|
; VI-NEXT: v_div_fmas_f32 v4, v4, v6, v7
|
|
; VI-NEXT: v_div_fixup_f32 v4, v4, v3, s3
|
|
; VI-NEXT: v_trunc_f32_e32 v4, v4
|
|
; VI-NEXT: v_fma_f32 v3, -v4, v3, s3
|
|
; VI-NEXT: v_mov_b32_e32 v4, s4
|
|
; VI-NEXT: v_mov_b32_e32 v5, s5
|
|
; VI-NEXT: flat_store_dwordx4 v[4:5], v[0:3]
|
|
; VI-NEXT: s_endpgm
|
|
%gep2 = getelementptr <4 x float>, ptr addrspace(1) %in2, i32 4
|
|
%r0 = load <4 x float>, ptr addrspace(1) %in1, align 16
|
|
%r1 = load <4 x float>, ptr addrspace(1) %gep2, align 16
|
|
%r2 = frem <4 x float> %r0, %r1
|
|
store <4 x float> %r2, ptr addrspace(1) %out, align 16
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @frem_v2f64(ptr addrspace(1) %out, ptr addrspace(1) %in1, ptr addrspace(1) %in2) #0 {
|
|
; CI-LABEL: frem_v2f64:
|
|
; CI: ; %bb.0:
|
|
; CI-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x9
|
|
; CI-NEXT: s_load_dwordx2 s[8:9], s[0:1], 0xd
|
|
; CI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; CI-NEXT: s_load_dwordx4 s[0:3], s[6:7], 0x0
|
|
; CI-NEXT: s_load_dwordx4 s[8:11], s[8:9], 0x10
|
|
; CI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; CI-NEXT: v_mov_b32_e32 v0, s8
|
|
; CI-NEXT: v_mov_b32_e32 v1, s9
|
|
; CI-NEXT: v_div_scale_f64 v[2:3], s[6:7], v[0:1], v[0:1], s[0:1]
|
|
; CI-NEXT: v_div_scale_f64 v[8:9], vcc, s[0:1], v[0:1], s[0:1]
|
|
; CI-NEXT: v_rcp_f64_e32 v[4:5], v[2:3]
|
|
; CI-NEXT: v_fma_f64 v[6:7], -v[2:3], v[4:5], 1.0
|
|
; CI-NEXT: v_fma_f64 v[4:5], v[4:5], v[6:7], v[4:5]
|
|
; CI-NEXT: v_fma_f64 v[6:7], -v[2:3], v[4:5], 1.0
|
|
; CI-NEXT: v_fma_f64 v[4:5], v[4:5], v[6:7], v[4:5]
|
|
; CI-NEXT: v_mul_f64 v[6:7], v[8:9], v[4:5]
|
|
; CI-NEXT: v_fma_f64 v[2:3], -v[2:3], v[6:7], v[8:9]
|
|
; CI-NEXT: v_div_fmas_f64 v[2:3], v[2:3], v[4:5], v[6:7]
|
|
; CI-NEXT: v_div_fixup_f64 v[2:3], v[2:3], v[0:1], s[0:1]
|
|
; CI-NEXT: v_trunc_f64_e32 v[2:3], v[2:3]
|
|
; CI-NEXT: v_fma_f64 v[0:1], -v[2:3], v[0:1], s[0:1]
|
|
; CI-NEXT: v_mov_b32_e32 v2, s10
|
|
; CI-NEXT: v_mov_b32_e32 v3, s11
|
|
; CI-NEXT: v_div_scale_f64 v[4:5], s[0:1], v[2:3], v[2:3], s[2:3]
|
|
; CI-NEXT: v_div_scale_f64 v[10:11], vcc, s[2:3], v[2:3], s[2:3]
|
|
; CI-NEXT: v_rcp_f64_e32 v[6:7], v[4:5]
|
|
; CI-NEXT: v_fma_f64 v[8:9], -v[4:5], v[6:7], 1.0
|
|
; CI-NEXT: v_fma_f64 v[6:7], v[6:7], v[8:9], v[6:7]
|
|
; CI-NEXT: v_fma_f64 v[8:9], -v[4:5], v[6:7], 1.0
|
|
; CI-NEXT: v_fma_f64 v[6:7], v[6:7], v[8:9], v[6:7]
|
|
; CI-NEXT: v_mul_f64 v[8:9], v[10:11], v[6:7]
|
|
; CI-NEXT: v_fma_f64 v[4:5], -v[4:5], v[8:9], v[10:11]
|
|
; CI-NEXT: v_div_fmas_f64 v[4:5], v[4:5], v[6:7], v[8:9]
|
|
; CI-NEXT: v_div_fixup_f64 v[4:5], v[4:5], v[2:3], s[2:3]
|
|
; CI-NEXT: v_trunc_f64_e32 v[4:5], v[4:5]
|
|
; CI-NEXT: v_fma_f64 v[2:3], -v[4:5], v[2:3], s[2:3]
|
|
; CI-NEXT: v_mov_b32_e32 v4, s4
|
|
; CI-NEXT: v_mov_b32_e32 v5, s5
|
|
; CI-NEXT: flat_store_dwordx4 v[4:5], v[0:3]
|
|
; CI-NEXT: s_endpgm
|
|
;
|
|
; VI-LABEL: frem_v2f64:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
|
|
; VI-NEXT: s_load_dwordx2 s[8:9], s[0:1], 0x34
|
|
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; VI-NEXT: s_load_dwordx4 s[0:3], s[6:7], 0x0
|
|
; VI-NEXT: s_load_dwordx4 s[8:11], s[8:9], 0x40
|
|
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; VI-NEXT: v_mov_b32_e32 v0, s8
|
|
; VI-NEXT: v_mov_b32_e32 v1, s9
|
|
; VI-NEXT: v_div_scale_f64 v[2:3], s[6:7], v[0:1], v[0:1], s[0:1]
|
|
; VI-NEXT: v_div_scale_f64 v[8:9], vcc, s[0:1], v[0:1], s[0:1]
|
|
; VI-NEXT: v_rcp_f64_e32 v[4:5], v[2:3]
|
|
; VI-NEXT: v_fma_f64 v[6:7], -v[2:3], v[4:5], 1.0
|
|
; VI-NEXT: v_fma_f64 v[4:5], v[4:5], v[6:7], v[4:5]
|
|
; VI-NEXT: v_fma_f64 v[6:7], -v[2:3], v[4:5], 1.0
|
|
; VI-NEXT: v_fma_f64 v[4:5], v[4:5], v[6:7], v[4:5]
|
|
; VI-NEXT: v_mul_f64 v[6:7], v[8:9], v[4:5]
|
|
; VI-NEXT: v_fma_f64 v[2:3], -v[2:3], v[6:7], v[8:9]
|
|
; VI-NEXT: v_div_fmas_f64 v[2:3], v[2:3], v[4:5], v[6:7]
|
|
; VI-NEXT: v_div_fixup_f64 v[2:3], v[2:3], v[0:1], s[0:1]
|
|
; VI-NEXT: v_trunc_f64_e32 v[2:3], v[2:3]
|
|
; VI-NEXT: v_fma_f64 v[0:1], -v[2:3], v[0:1], s[0:1]
|
|
; VI-NEXT: v_mov_b32_e32 v2, s10
|
|
; VI-NEXT: v_mov_b32_e32 v3, s11
|
|
; VI-NEXT: v_div_scale_f64 v[4:5], s[0:1], v[2:3], v[2:3], s[2:3]
|
|
; VI-NEXT: v_div_scale_f64 v[10:11], vcc, s[2:3], v[2:3], s[2:3]
|
|
; VI-NEXT: v_rcp_f64_e32 v[6:7], v[4:5]
|
|
; VI-NEXT: v_fma_f64 v[8:9], -v[4:5], v[6:7], 1.0
|
|
; VI-NEXT: v_fma_f64 v[6:7], v[6:7], v[8:9], v[6:7]
|
|
; VI-NEXT: v_fma_f64 v[8:9], -v[4:5], v[6:7], 1.0
|
|
; VI-NEXT: v_fma_f64 v[6:7], v[6:7], v[8:9], v[6:7]
|
|
; VI-NEXT: v_mul_f64 v[8:9], v[10:11], v[6:7]
|
|
; VI-NEXT: v_fma_f64 v[4:5], -v[4:5], v[8:9], v[10:11]
|
|
; VI-NEXT: v_div_fmas_f64 v[4:5], v[4:5], v[6:7], v[8:9]
|
|
; VI-NEXT: v_div_fixup_f64 v[4:5], v[4:5], v[2:3], s[2:3]
|
|
; VI-NEXT: v_trunc_f64_e32 v[4:5], v[4:5]
|
|
; VI-NEXT: v_fma_f64 v[2:3], -v[4:5], v[2:3], s[2:3]
|
|
; VI-NEXT: v_mov_b32_e32 v4, s4
|
|
; VI-NEXT: v_mov_b32_e32 v5, s5
|
|
; VI-NEXT: flat_store_dwordx4 v[4:5], v[0:3]
|
|
; VI-NEXT: s_endpgm
|
|
%gep2 = getelementptr <2 x double>, ptr addrspace(1) %in2, i32 4
|
|
%r0 = load <2 x double>, ptr addrspace(1) %in1, align 16
|
|
%r1 = load <2 x double>, ptr addrspace(1) %gep2, align 16
|
|
%r2 = frem <2 x double> %r0, %r1
|
|
store <2 x double> %r2, ptr addrspace(1) %out, align 16
|
|
ret void
|
|
}
|
|
|
|
attributes #0 = { nounwind "unsafe-fp-math"="false" "denormal-fp-math-f32"="preserve-sign,preserve-sign" }
|
|
attributes #1 = { nounwind "unsafe-fp-math"="true" "denormal-fp-math-f32"="preserve-sign,preserve-sign" }
|