SIInsertWaitcnts inserts waitcnt instructions to resolve data dependencies. The GFX10+ vscnt (VMEM store count) counter is never used in this way. It is only used to resolve memory dependencies, and that is handled by SIMemoryLegalizer. Hence there is no need to conservatively wait for vscnt to be 0 on function entry and before returns. Differential Revision: https://reviews.llvm.org/D153537
2096 lines
80 KiB
LLVM
2096 lines
80 KiB
LLVM
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
|
|
; RUN: llc -march=amdgcn -mcpu=tahiti < %s | FileCheck -enable-var-scope --check-prefixes=SI %s
|
|
; RUN: llc -march=amdgcn -mcpu=tonga < %s | FileCheck -enable-var-scope --check-prefixes=VI %s
|
|
; RUN: llc -march=amdgcn -mcpu=gfx900 < %s | FileCheck -enable-var-scope --check-prefixes=GFX9 %s
|
|
; RUN: llc -march=amdgcn -mcpu=gfx1100 < %s | FileCheck -enable-var-scope --check-prefixes=GFX11 %s
|
|
|
|
declare half @llvm.copysign.f16(half, half) #0
|
|
declare float @llvm.copysign.f32(float, float) #0
|
|
declare double @llvm.copysign.f64(double, double) #0
|
|
declare <2 x half> @llvm.copysign.v2f16(<2 x half>, <2 x half>) #0
|
|
declare <3 x half> @llvm.copysign.v3f16(<3 x half>, <3 x half>) #0
|
|
declare <4 x half> @llvm.copysign.v4f16(<4 x half>, <4 x half>) #0
|
|
declare i32 @llvm.amdgcn.workitem.id.x() #0
|
|
|
|
define amdgpu_kernel void @s_copysign_f16(ptr addrspace(1) %arg_out, half %mag, half %sign) {
|
|
; SI-LABEL: s_copysign_f16:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_load_dword s2, s[0:1], 0xb
|
|
; SI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x9
|
|
; SI-NEXT: s_mov_b32 s3, 0xf000
|
|
; SI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; SI-NEXT: v_cvt_f32_f16_e32 v0, s2
|
|
; SI-NEXT: s_lshr_b32 s2, s2, 16
|
|
; SI-NEXT: v_cvt_f32_f16_e32 v1, s2
|
|
; SI-NEXT: s_brev_b32 s2, -2
|
|
; SI-NEXT: v_bfi_b32 v0, s2, v0, v1
|
|
; SI-NEXT: v_cvt_f16_f32_e32 v0, v0
|
|
; SI-NEXT: s_mov_b32 s2, -1
|
|
; SI-NEXT: buffer_store_short v0, off, s[0:3], 0
|
|
; SI-NEXT: s_endpgm
|
|
;
|
|
; VI-LABEL: s_copysign_f16:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_load_dword s2, s[0:1], 0x2c
|
|
; VI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
|
|
; VI-NEXT: s_movk_i32 s3, 0x7fff
|
|
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; VI-NEXT: s_lshr_b32 s4, s2, 16
|
|
; VI-NEXT: v_mov_b32_e32 v0, s2
|
|
; VI-NEXT: v_mov_b32_e32 v1, s4
|
|
; VI-NEXT: v_bfi_b32 v2, s3, v0, v1
|
|
; VI-NEXT: v_mov_b32_e32 v0, s0
|
|
; VI-NEXT: v_mov_b32_e32 v1, s1
|
|
; VI-NEXT: flat_store_short v[0:1], v2
|
|
; VI-NEXT: s_endpgm
|
|
;
|
|
; GFX9-LABEL: s_copysign_f16:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_load_dword s4, s[0:1], 0x2c
|
|
; GFX9-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x24
|
|
; GFX9-NEXT: s_movk_i32 s0, 0x7fff
|
|
; GFX9-NEXT: v_mov_b32_e32 v0, 0
|
|
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX9-NEXT: s_lshr_b32 s1, s4, 16
|
|
; GFX9-NEXT: v_mov_b32_e32 v1, s4
|
|
; GFX9-NEXT: v_mov_b32_e32 v2, s1
|
|
; GFX9-NEXT: v_bfi_b32 v1, s0, v1, v2
|
|
; GFX9-NEXT: global_store_short v0, v1, s[2:3]
|
|
; GFX9-NEXT: s_endpgm
|
|
;
|
|
; GFX11-LABEL: s_copysign_f16:
|
|
; GFX11: ; %bb.0:
|
|
; GFX11-NEXT: s_clause 0x1
|
|
; GFX11-NEXT: s_load_b32 s2, s[0:1], 0x2c
|
|
; GFX11-NEXT: s_load_b64 s[0:1], s[0:1], 0x24
|
|
; GFX11-NEXT: v_mov_b32_e32 v1, 0
|
|
; GFX11-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX11-NEXT: s_lshr_b32 s3, s2, 16
|
|
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1)
|
|
; GFX11-NEXT: v_mov_b32_e32 v0, s3
|
|
; GFX11-NEXT: v_bfi_b32 v0, 0x7fff, s2, v0
|
|
; GFX11-NEXT: global_store_b16 v1, v0, s[0:1]
|
|
; GFX11-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
|
|
; GFX11-NEXT: s_endpgm
|
|
%out = call half @llvm.copysign.f16(half %mag, half %sign)
|
|
store half %out, ptr addrspace(1) %arg_out
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @s_test_copysign_f16_0(ptr addrspace(1) %out, half %mag) {
|
|
; SI-LABEL: s_test_copysign_f16_0:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_load_dword s4, s[0:1], 0xb
|
|
; SI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x9
|
|
; SI-NEXT: s_mov_b32 s3, 0xf000
|
|
; SI-NEXT: s_mov_b32 s2, -1
|
|
; SI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; SI-NEXT: s_and_b32 s4, s4, 0x7fff
|
|
; SI-NEXT: v_mov_b32_e32 v0, s4
|
|
; SI-NEXT: buffer_store_short v0, off, s[0:3], 0
|
|
; SI-NEXT: s_endpgm
|
|
;
|
|
; VI-LABEL: s_test_copysign_f16_0:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_load_dword s2, s[0:1], 0x2c
|
|
; VI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
|
|
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; VI-NEXT: s_and_b32 s2, s2, 0x7fff
|
|
; VI-NEXT: v_mov_b32_e32 v0, s0
|
|
; VI-NEXT: v_mov_b32_e32 v1, s1
|
|
; VI-NEXT: v_mov_b32_e32 v2, s2
|
|
; VI-NEXT: flat_store_short v[0:1], v2
|
|
; VI-NEXT: s_endpgm
|
|
;
|
|
; GFX9-LABEL: s_test_copysign_f16_0:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_load_dword s4, s[0:1], 0x2c
|
|
; GFX9-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x24
|
|
; GFX9-NEXT: v_mov_b32_e32 v0, 0
|
|
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX9-NEXT: s_and_b32 s0, s4, 0x7fff
|
|
; GFX9-NEXT: v_mov_b32_e32 v1, s0
|
|
; GFX9-NEXT: global_store_short v0, v1, s[2:3]
|
|
; GFX9-NEXT: s_endpgm
|
|
;
|
|
; GFX11-LABEL: s_test_copysign_f16_0:
|
|
; GFX11: ; %bb.0:
|
|
; GFX11-NEXT: s_clause 0x1
|
|
; GFX11-NEXT: s_load_b32 s2, s[0:1], 0x2c
|
|
; GFX11-NEXT: s_load_b64 s[0:1], s[0:1], 0x24
|
|
; GFX11-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX11-NEXT: s_and_b32 s2, s2, 0x7fff
|
|
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
|
|
; GFX11-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, s2
|
|
; GFX11-NEXT: global_store_b16 v0, v1, s[0:1]
|
|
; GFX11-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
|
|
; GFX11-NEXT: s_endpgm
|
|
%result = call half @llvm.copysign.f16(half %mag, half 0.0)
|
|
store half %result, ptr addrspace(1) %out, align 4
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @s_test_copysign_f16_1(ptr addrspace(1) %out, half %mag) {
|
|
; SI-LABEL: s_test_copysign_f16_1:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_load_dword s4, s[0:1], 0xb
|
|
; SI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x9
|
|
; SI-NEXT: s_mov_b32 s3, 0xf000
|
|
; SI-NEXT: s_mov_b32 s2, -1
|
|
; SI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; SI-NEXT: s_and_b32 s4, s4, 0x7fff
|
|
; SI-NEXT: v_mov_b32_e32 v0, s4
|
|
; SI-NEXT: buffer_store_short v0, off, s[0:3], 0
|
|
; SI-NEXT: s_endpgm
|
|
;
|
|
; VI-LABEL: s_test_copysign_f16_1:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_load_dword s2, s[0:1], 0x2c
|
|
; VI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
|
|
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; VI-NEXT: s_and_b32 s2, s2, 0x7fff
|
|
; VI-NEXT: v_mov_b32_e32 v0, s0
|
|
; VI-NEXT: v_mov_b32_e32 v1, s1
|
|
; VI-NEXT: v_mov_b32_e32 v2, s2
|
|
; VI-NEXT: flat_store_short v[0:1], v2
|
|
; VI-NEXT: s_endpgm
|
|
;
|
|
; GFX9-LABEL: s_test_copysign_f16_1:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_load_dword s4, s[0:1], 0x2c
|
|
; GFX9-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x24
|
|
; GFX9-NEXT: v_mov_b32_e32 v0, 0
|
|
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX9-NEXT: s_and_b32 s0, s4, 0x7fff
|
|
; GFX9-NEXT: v_mov_b32_e32 v1, s0
|
|
; GFX9-NEXT: global_store_short v0, v1, s[2:3]
|
|
; GFX9-NEXT: s_endpgm
|
|
;
|
|
; GFX11-LABEL: s_test_copysign_f16_1:
|
|
; GFX11: ; %bb.0:
|
|
; GFX11-NEXT: s_clause 0x1
|
|
; GFX11-NEXT: s_load_b32 s2, s[0:1], 0x2c
|
|
; GFX11-NEXT: s_load_b64 s[0:1], s[0:1], 0x24
|
|
; GFX11-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX11-NEXT: s_and_b32 s2, s2, 0x7fff
|
|
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
|
|
; GFX11-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, s2
|
|
; GFX11-NEXT: global_store_b16 v0, v1, s[0:1]
|
|
; GFX11-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
|
|
; GFX11-NEXT: s_endpgm
|
|
%result = call half @llvm.copysign.f16(half %mag, half 1.0)
|
|
store half %result, ptr addrspace(1) %out, align 4
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @s_test_copysign_f16_10.0(ptr addrspace(1) %out, half %mag) {
|
|
; SI-LABEL: s_test_copysign_f16_10.0:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_load_dword s4, s[0:1], 0xb
|
|
; SI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x9
|
|
; SI-NEXT: s_mov_b32 s3, 0xf000
|
|
; SI-NEXT: s_mov_b32 s2, -1
|
|
; SI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; SI-NEXT: s_and_b32 s4, s4, 0x7fff
|
|
; SI-NEXT: v_mov_b32_e32 v0, s4
|
|
; SI-NEXT: buffer_store_short v0, off, s[0:3], 0
|
|
; SI-NEXT: s_endpgm
|
|
;
|
|
; VI-LABEL: s_test_copysign_f16_10.0:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_load_dword s2, s[0:1], 0x2c
|
|
; VI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
|
|
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; VI-NEXT: s_and_b32 s2, s2, 0x7fff
|
|
; VI-NEXT: v_mov_b32_e32 v0, s0
|
|
; VI-NEXT: v_mov_b32_e32 v1, s1
|
|
; VI-NEXT: v_mov_b32_e32 v2, s2
|
|
; VI-NEXT: flat_store_short v[0:1], v2
|
|
; VI-NEXT: s_endpgm
|
|
;
|
|
; GFX9-LABEL: s_test_copysign_f16_10.0:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_load_dword s4, s[0:1], 0x2c
|
|
; GFX9-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x24
|
|
; GFX9-NEXT: v_mov_b32_e32 v0, 0
|
|
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX9-NEXT: s_and_b32 s0, s4, 0x7fff
|
|
; GFX9-NEXT: v_mov_b32_e32 v1, s0
|
|
; GFX9-NEXT: global_store_short v0, v1, s[2:3]
|
|
; GFX9-NEXT: s_endpgm
|
|
;
|
|
; GFX11-LABEL: s_test_copysign_f16_10.0:
|
|
; GFX11: ; %bb.0:
|
|
; GFX11-NEXT: s_clause 0x1
|
|
; GFX11-NEXT: s_load_b32 s2, s[0:1], 0x2c
|
|
; GFX11-NEXT: s_load_b64 s[0:1], s[0:1], 0x24
|
|
; GFX11-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX11-NEXT: s_and_b32 s2, s2, 0x7fff
|
|
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
|
|
; GFX11-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, s2
|
|
; GFX11-NEXT: global_store_b16 v0, v1, s[0:1]
|
|
; GFX11-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
|
|
; GFX11-NEXT: s_endpgm
|
|
%result = call half @llvm.copysign.f16(half %mag, half 10.0)
|
|
store half %result, ptr addrspace(1) %out, align 4
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @s_test_copysign_f16_neg1(ptr addrspace(1) %out, half %mag) {
|
|
; SI-LABEL: s_test_copysign_f16_neg1:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_load_dword s4, s[0:1], 0xb
|
|
; SI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x9
|
|
; SI-NEXT: s_mov_b32 s3, 0xf000
|
|
; SI-NEXT: s_mov_b32 s2, -1
|
|
; SI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; SI-NEXT: s_bitset1_b32 s4, 15
|
|
; SI-NEXT: v_mov_b32_e32 v0, s4
|
|
; SI-NEXT: buffer_store_short v0, off, s[0:3], 0
|
|
; SI-NEXT: s_endpgm
|
|
;
|
|
; VI-LABEL: s_test_copysign_f16_neg1:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_load_dword s2, s[0:1], 0x2c
|
|
; VI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
|
|
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; VI-NEXT: s_bitset1_b32 s2, 15
|
|
; VI-NEXT: v_mov_b32_e32 v0, s0
|
|
; VI-NEXT: v_mov_b32_e32 v1, s1
|
|
; VI-NEXT: v_mov_b32_e32 v2, s2
|
|
; VI-NEXT: flat_store_short v[0:1], v2
|
|
; VI-NEXT: s_endpgm
|
|
;
|
|
; GFX9-LABEL: s_test_copysign_f16_neg1:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_load_dword s4, s[0:1], 0x2c
|
|
; GFX9-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x24
|
|
; GFX9-NEXT: v_mov_b32_e32 v0, 0
|
|
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX9-NEXT: s_or_b32 s0, s4, 0x8000
|
|
; GFX9-NEXT: v_mov_b32_e32 v1, s0
|
|
; GFX9-NEXT: global_store_short v0, v1, s[2:3]
|
|
; GFX9-NEXT: s_endpgm
|
|
;
|
|
; GFX11-LABEL: s_test_copysign_f16_neg1:
|
|
; GFX11: ; %bb.0:
|
|
; GFX11-NEXT: s_clause 0x1
|
|
; GFX11-NEXT: s_load_b32 s2, s[0:1], 0x2c
|
|
; GFX11-NEXT: s_load_b64 s[0:1], s[0:1], 0x24
|
|
; GFX11-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX11-NEXT: s_bitset1_b32 s2, 15
|
|
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
|
|
; GFX11-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, s2
|
|
; GFX11-NEXT: global_store_b16 v0, v1, s[0:1]
|
|
; GFX11-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
|
|
; GFX11-NEXT: s_endpgm
|
|
%result = call half @llvm.copysign.f16(half %mag, half -1.0)
|
|
store half %result, ptr addrspace(1) %out, align 4
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @s_test_copysign_f16_neg10(ptr addrspace(1) %out, half %mag) {
|
|
; SI-LABEL: s_test_copysign_f16_neg10:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_load_dword s4, s[0:1], 0xb
|
|
; SI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x9
|
|
; SI-NEXT: s_mov_b32 s3, 0xf000
|
|
; SI-NEXT: s_mov_b32 s2, -1
|
|
; SI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; SI-NEXT: s_bitset1_b32 s4, 15
|
|
; SI-NEXT: v_mov_b32_e32 v0, s4
|
|
; SI-NEXT: buffer_store_short v0, off, s[0:3], 0
|
|
; SI-NEXT: s_endpgm
|
|
;
|
|
; VI-LABEL: s_test_copysign_f16_neg10:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_load_dword s2, s[0:1], 0x2c
|
|
; VI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
|
|
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; VI-NEXT: s_bitset1_b32 s2, 15
|
|
; VI-NEXT: v_mov_b32_e32 v0, s0
|
|
; VI-NEXT: v_mov_b32_e32 v1, s1
|
|
; VI-NEXT: v_mov_b32_e32 v2, s2
|
|
; VI-NEXT: flat_store_short v[0:1], v2
|
|
; VI-NEXT: s_endpgm
|
|
;
|
|
; GFX9-LABEL: s_test_copysign_f16_neg10:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_load_dword s4, s[0:1], 0x2c
|
|
; GFX9-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x24
|
|
; GFX9-NEXT: v_mov_b32_e32 v0, 0
|
|
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX9-NEXT: s_or_b32 s0, s4, 0x8000
|
|
; GFX9-NEXT: v_mov_b32_e32 v1, s0
|
|
; GFX9-NEXT: global_store_short v0, v1, s[2:3]
|
|
; GFX9-NEXT: s_endpgm
|
|
;
|
|
; GFX11-LABEL: s_test_copysign_f16_neg10:
|
|
; GFX11: ; %bb.0:
|
|
; GFX11-NEXT: s_clause 0x1
|
|
; GFX11-NEXT: s_load_b32 s2, s[0:1], 0x2c
|
|
; GFX11-NEXT: s_load_b64 s[0:1], s[0:1], 0x24
|
|
; GFX11-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX11-NEXT: s_bitset1_b32 s2, 15
|
|
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
|
|
; GFX11-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, s2
|
|
; GFX11-NEXT: global_store_b16 v0, v1, s[0:1]
|
|
; GFX11-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
|
|
; GFX11-NEXT: s_endpgm
|
|
%result = call half @llvm.copysign.f16(half %mag, half -10.0)
|
|
store half %result, ptr addrspace(1) %out, align 4
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @s_test_copysign_f16_0_mag(ptr addrspace(1) %out, half %sign) {
|
|
; SI-LABEL: s_test_copysign_f16_0_mag:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_load_dword s2, s[0:1], 0xb
|
|
; SI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x9
|
|
; SI-NEXT: s_mov_b32 s3, 0xf000
|
|
; SI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; SI-NEXT: v_cvt_f32_f16_e32 v0, s2
|
|
; SI-NEXT: s_brev_b32 s2, -2
|
|
; SI-NEXT: v_bfi_b32 v0, s2, 0, v0
|
|
; SI-NEXT: v_cvt_f16_f32_e32 v0, v0
|
|
; SI-NEXT: s_mov_b32 s2, -1
|
|
; SI-NEXT: buffer_store_short v0, off, s[0:3], 0
|
|
; SI-NEXT: s_endpgm
|
|
;
|
|
; VI-LABEL: s_test_copysign_f16_0_mag:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_load_dword s2, s[0:1], 0x2c
|
|
; VI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
|
|
; VI-NEXT: v_mov_b32_e32 v0, 0xffff8000
|
|
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; VI-NEXT: v_and_b32_e32 v2, s2, v0
|
|
; VI-NEXT: v_mov_b32_e32 v0, s0
|
|
; VI-NEXT: v_mov_b32_e32 v1, s1
|
|
; VI-NEXT: flat_store_short v[0:1], v2
|
|
; VI-NEXT: s_endpgm
|
|
;
|
|
; GFX9-LABEL: s_test_copysign_f16_0_mag:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_load_dword s4, s[0:1], 0x2c
|
|
; GFX9-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x24
|
|
; GFX9-NEXT: v_mov_b32_e32 v1, 0xffff8000
|
|
; GFX9-NEXT: v_mov_b32_e32 v0, 0
|
|
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX9-NEXT: v_and_b32_e32 v1, s4, v1
|
|
; GFX9-NEXT: global_store_short v0, v1, s[2:3]
|
|
; GFX9-NEXT: s_endpgm
|
|
;
|
|
; GFX11-LABEL: s_test_copysign_f16_0_mag:
|
|
; GFX11: ; %bb.0:
|
|
; GFX11-NEXT: s_clause 0x1
|
|
; GFX11-NEXT: s_load_b32 s2, s[0:1], 0x2c
|
|
; GFX11-NEXT: s_load_b64 s[0:1], s[0:1], 0x24
|
|
; GFX11-NEXT: v_mov_b32_e32 v0, 0
|
|
; GFX11-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX11-NEXT: v_and_b32_e64 v1, 0xffff8000, s2
|
|
; GFX11-NEXT: global_store_b16 v0, v1, s[0:1]
|
|
; GFX11-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
|
|
; GFX11-NEXT: s_endpgm
|
|
%result = call half @llvm.copysign.f16(half 0.0, half %sign)
|
|
store half %result, ptr addrspace(1) %out, align 4
|
|
ret void
|
|
}
|
|
|
|
|
|
define amdgpu_kernel void @s_test_copysign_f16_1_mag(ptr addrspace(1) %out, half %sign) {
|
|
; SI-LABEL: s_test_copysign_f16_1_mag:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_load_dword s2, s[0:1], 0xb
|
|
; SI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x9
|
|
; SI-NEXT: s_mov_b32 s3, 0xf000
|
|
; SI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; SI-NEXT: v_cvt_f32_f16_e32 v0, s2
|
|
; SI-NEXT: s_brev_b32 s2, -2
|
|
; SI-NEXT: v_bfi_b32 v0, s2, 1.0, v0
|
|
; SI-NEXT: v_cvt_f16_f32_e32 v0, v0
|
|
; SI-NEXT: s_mov_b32 s2, -1
|
|
; SI-NEXT: buffer_store_short v0, off, s[0:3], 0
|
|
; SI-NEXT: s_endpgm
|
|
;
|
|
; VI-LABEL: s_test_copysign_f16_1_mag:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_load_dword s2, s[0:1], 0x2c
|
|
; VI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
|
|
; VI-NEXT: v_mov_b32_e32 v0, 0xffff8000
|
|
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; VI-NEXT: v_and_b32_e32 v0, s2, v0
|
|
; VI-NEXT: v_or_b32_e32 v2, 0x3c00, v0
|
|
; VI-NEXT: v_mov_b32_e32 v0, s0
|
|
; VI-NEXT: v_mov_b32_e32 v1, s1
|
|
; VI-NEXT: flat_store_short v[0:1], v2
|
|
; VI-NEXT: s_endpgm
|
|
;
|
|
; GFX9-LABEL: s_test_copysign_f16_1_mag:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_load_dword s4, s[0:1], 0x2c
|
|
; GFX9-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x24
|
|
; GFX9-NEXT: v_mov_b32_e32 v1, 0xffff8000
|
|
; GFX9-NEXT: v_mov_b32_e32 v0, 0
|
|
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX9-NEXT: v_and_b32_e32 v1, s4, v1
|
|
; GFX9-NEXT: v_or_b32_e32 v1, 0x3c00, v1
|
|
; GFX9-NEXT: global_store_short v0, v1, s[2:3]
|
|
; GFX9-NEXT: s_endpgm
|
|
;
|
|
; GFX11-LABEL: s_test_copysign_f16_1_mag:
|
|
; GFX11: ; %bb.0:
|
|
; GFX11-NEXT: s_clause 0x1
|
|
; GFX11-NEXT: s_load_b32 s2, s[0:1], 0x2c
|
|
; GFX11-NEXT: s_load_b64 s[0:1], s[0:1], 0x24
|
|
; GFX11-NEXT: v_mov_b32_e32 v1, 0
|
|
; GFX11-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX11-NEXT: v_and_b32_e64 v0, 0xffff8000, s2
|
|
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
|
|
; GFX11-NEXT: v_or_b32_e32 v0, 0x3c00, v0
|
|
; GFX11-NEXT: global_store_b16 v1, v0, s[0:1]
|
|
; GFX11-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
|
|
; GFX11-NEXT: s_endpgm
|
|
%result = call half @llvm.copysign.f16(half 1.0, half %sign)
|
|
store half %result, ptr addrspace(1) %out, align 4
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @s_test_copysign_f16_10_mag(ptr addrspace(1) %out, half %sign) {
|
|
; SI-LABEL: s_test_copysign_f16_10_mag:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_load_dword s2, s[0:1], 0xb
|
|
; SI-NEXT: v_mov_b32_e32 v1, 0x41200000
|
|
; SI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x9
|
|
; SI-NEXT: s_mov_b32 s3, 0xf000
|
|
; SI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; SI-NEXT: v_cvt_f32_f16_e32 v0, s2
|
|
; SI-NEXT: s_brev_b32 s2, -2
|
|
; SI-NEXT: v_bfi_b32 v0, s2, v1, v0
|
|
; SI-NEXT: v_cvt_f16_f32_e32 v0, v0
|
|
; SI-NEXT: s_mov_b32 s2, -1
|
|
; SI-NEXT: buffer_store_short v0, off, s[0:3], 0
|
|
; SI-NEXT: s_endpgm
|
|
;
|
|
; VI-LABEL: s_test_copysign_f16_10_mag:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_load_dword s2, s[0:1], 0x2c
|
|
; VI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
|
|
; VI-NEXT: v_mov_b32_e32 v0, 0xffff8000
|
|
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; VI-NEXT: v_and_b32_e32 v0, s2, v0
|
|
; VI-NEXT: v_or_b32_e32 v2, 0x4900, v0
|
|
; VI-NEXT: v_mov_b32_e32 v0, s0
|
|
; VI-NEXT: v_mov_b32_e32 v1, s1
|
|
; VI-NEXT: flat_store_short v[0:1], v2
|
|
; VI-NEXT: s_endpgm
|
|
;
|
|
; GFX9-LABEL: s_test_copysign_f16_10_mag:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_load_dword s4, s[0:1], 0x2c
|
|
; GFX9-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x24
|
|
; GFX9-NEXT: v_mov_b32_e32 v1, 0xffff8000
|
|
; GFX9-NEXT: v_mov_b32_e32 v0, 0
|
|
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX9-NEXT: v_and_b32_e32 v1, s4, v1
|
|
; GFX9-NEXT: v_or_b32_e32 v1, 0x4900, v1
|
|
; GFX9-NEXT: global_store_short v0, v1, s[2:3]
|
|
; GFX9-NEXT: s_endpgm
|
|
;
|
|
; GFX11-LABEL: s_test_copysign_f16_10_mag:
|
|
; GFX11: ; %bb.0:
|
|
; GFX11-NEXT: s_clause 0x1
|
|
; GFX11-NEXT: s_load_b32 s2, s[0:1], 0x2c
|
|
; GFX11-NEXT: s_load_b64 s[0:1], s[0:1], 0x24
|
|
; GFX11-NEXT: v_mov_b32_e32 v1, 0
|
|
; GFX11-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX11-NEXT: v_and_b32_e64 v0, 0xffff8000, s2
|
|
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
|
|
; GFX11-NEXT: v_or_b32_e32 v0, 0x4900, v0
|
|
; GFX11-NEXT: global_store_b16 v1, v0, s[0:1]
|
|
; GFX11-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
|
|
; GFX11-NEXT: s_endpgm
|
|
%result = call half @llvm.copysign.f16(half 10.0, half %sign)
|
|
store half %result, ptr addrspace(1) %out, align 4
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @s_test_copysign_f16_neg1_mag(ptr addrspace(1) %out, half %sign) {
|
|
; SI-LABEL: s_test_copysign_f16_neg1_mag:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_load_dword s2, s[0:1], 0xb
|
|
; SI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x9
|
|
; SI-NEXT: s_mov_b32 s3, 0xf000
|
|
; SI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; SI-NEXT: v_cvt_f32_f16_e32 v0, s2
|
|
; SI-NEXT: s_brev_b32 s2, -2
|
|
; SI-NEXT: v_bfi_b32 v0, s2, -1.0, v0
|
|
; SI-NEXT: v_cvt_f16_f32_e32 v0, v0
|
|
; SI-NEXT: s_mov_b32 s2, -1
|
|
; SI-NEXT: buffer_store_short v0, off, s[0:3], 0
|
|
; SI-NEXT: s_endpgm
|
|
;
|
|
; VI-LABEL: s_test_copysign_f16_neg1_mag:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_load_dword s2, s[0:1], 0x2c
|
|
; VI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
|
|
; VI-NEXT: v_mov_b32_e32 v0, 0xffff8000
|
|
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; VI-NEXT: v_and_b32_e32 v0, s2, v0
|
|
; VI-NEXT: v_or_b32_e32 v2, 0x3c00, v0
|
|
; VI-NEXT: v_mov_b32_e32 v0, s0
|
|
; VI-NEXT: v_mov_b32_e32 v1, s1
|
|
; VI-NEXT: flat_store_short v[0:1], v2
|
|
; VI-NEXT: s_endpgm
|
|
;
|
|
; GFX9-LABEL: s_test_copysign_f16_neg1_mag:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_load_dword s4, s[0:1], 0x2c
|
|
; GFX9-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x24
|
|
; GFX9-NEXT: v_mov_b32_e32 v1, 0xffff8000
|
|
; GFX9-NEXT: v_mov_b32_e32 v0, 0
|
|
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX9-NEXT: v_and_b32_e32 v1, s4, v1
|
|
; GFX9-NEXT: v_or_b32_e32 v1, 0x3c00, v1
|
|
; GFX9-NEXT: global_store_short v0, v1, s[2:3]
|
|
; GFX9-NEXT: s_endpgm
|
|
;
|
|
; GFX11-LABEL: s_test_copysign_f16_neg1_mag:
|
|
; GFX11: ; %bb.0:
|
|
; GFX11-NEXT: s_clause 0x1
|
|
; GFX11-NEXT: s_load_b32 s2, s[0:1], 0x2c
|
|
; GFX11-NEXT: s_load_b64 s[0:1], s[0:1], 0x24
|
|
; GFX11-NEXT: v_mov_b32_e32 v1, 0
|
|
; GFX11-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX11-NEXT: v_and_b32_e64 v0, 0xffff8000, s2
|
|
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
|
|
; GFX11-NEXT: v_or_b32_e32 v0, 0x3c00, v0
|
|
; GFX11-NEXT: global_store_b16 v1, v0, s[0:1]
|
|
; GFX11-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
|
|
; GFX11-NEXT: s_endpgm
|
|
%result = call half @llvm.copysign.f16(half -1.0, half %sign)
|
|
store half %result, ptr addrspace(1) %out, align 4
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @s_test_copysign_f16_neg10_mag(ptr addrspace(1) %out, half %sign) {
|
|
; SI-LABEL: s_test_copysign_f16_neg10_mag:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_load_dword s2, s[0:1], 0xb
|
|
; SI-NEXT: v_mov_b32_e32 v1, 0xc1200000
|
|
; SI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x9
|
|
; SI-NEXT: s_mov_b32 s3, 0xf000
|
|
; SI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; SI-NEXT: v_cvt_f32_f16_e32 v0, s2
|
|
; SI-NEXT: s_brev_b32 s2, -2
|
|
; SI-NEXT: v_bfi_b32 v0, s2, v1, v0
|
|
; SI-NEXT: v_cvt_f16_f32_e32 v0, v0
|
|
; SI-NEXT: s_mov_b32 s2, -1
|
|
; SI-NEXT: buffer_store_short v0, off, s[0:3], 0
|
|
; SI-NEXT: s_endpgm
|
|
;
|
|
; VI-LABEL: s_test_copysign_f16_neg10_mag:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_load_dword s2, s[0:1], 0x2c
|
|
; VI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
|
|
; VI-NEXT: v_mov_b32_e32 v0, 0xffff8000
|
|
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; VI-NEXT: v_and_b32_e32 v0, s2, v0
|
|
; VI-NEXT: v_or_b32_e32 v2, 0x4900, v0
|
|
; VI-NEXT: v_mov_b32_e32 v0, s0
|
|
; VI-NEXT: v_mov_b32_e32 v1, s1
|
|
; VI-NEXT: flat_store_short v[0:1], v2
|
|
; VI-NEXT: s_endpgm
|
|
;
|
|
; GFX9-LABEL: s_test_copysign_f16_neg10_mag:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_load_dword s4, s[0:1], 0x2c
|
|
; GFX9-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x24
|
|
; GFX9-NEXT: v_mov_b32_e32 v1, 0xffff8000
|
|
; GFX9-NEXT: v_mov_b32_e32 v0, 0
|
|
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX9-NEXT: v_and_b32_e32 v1, s4, v1
|
|
; GFX9-NEXT: v_or_b32_e32 v1, 0x4900, v1
|
|
; GFX9-NEXT: global_store_short v0, v1, s[2:3]
|
|
; GFX9-NEXT: s_endpgm
|
|
;
|
|
; GFX11-LABEL: s_test_copysign_f16_neg10_mag:
|
|
; GFX11: ; %bb.0:
|
|
; GFX11-NEXT: s_clause 0x1
|
|
; GFX11-NEXT: s_load_b32 s2, s[0:1], 0x2c
|
|
; GFX11-NEXT: s_load_b64 s[0:1], s[0:1], 0x24
|
|
; GFX11-NEXT: v_mov_b32_e32 v1, 0
|
|
; GFX11-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX11-NEXT: v_and_b32_e64 v0, 0xffff8000, s2
|
|
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
|
|
; GFX11-NEXT: v_or_b32_e32 v0, 0x4900, v0
|
|
; GFX11-NEXT: global_store_b16 v1, v0, s[0:1]
|
|
; GFX11-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
|
|
; GFX11-NEXT: s_endpgm
|
|
%result = call half @llvm.copysign.f16(half -10.0, half %sign)
|
|
store half %result, ptr addrspace(1) %out, align 4
|
|
ret void
|
|
}
|
|
|
|
define half @v_copysign_f16(half %mag, half %sign) {
|
|
; SI-LABEL: v_copysign_f16:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; SI-NEXT: v_cvt_f16_f32_e32 v0, v0
|
|
; SI-NEXT: s_brev_b32 s4, -2
|
|
; SI-NEXT: v_cvt_f32_f16_e32 v0, v0
|
|
; SI-NEXT: v_bfi_b32 v0, s4, v0, v1
|
|
; SI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; VI-LABEL: v_copysign_f16:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; VI-NEXT: s_movk_i32 s4, 0x7fff
|
|
; VI-NEXT: v_bfi_b32 v0, s4, v0, v1
|
|
; VI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX9-LABEL: v_copysign_f16:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: s_movk_i32 s4, 0x7fff
|
|
; GFX9-NEXT: v_bfi_b32 v0, s4, v0, v1
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX11-LABEL: v_copysign_f16:
|
|
; GFX11: ; %bb.0:
|
|
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX11-NEXT: v_bfi_b32 v0, 0x7fff, v0, v1
|
|
; GFX11-NEXT: s_setpc_b64 s[30:31]
|
|
%result = call half @llvm.copysign.f16(half %mag, half %sign)
|
|
ret half %result
|
|
}
|
|
|
|
define half @v_test_copysign_f16_0(half %mag) {
|
|
; SI-LABEL: v_test_copysign_f16_0:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; SI-NEXT: v_cvt_f16_f32_e32 v0, v0
|
|
; SI-NEXT: v_cvt_f32_f16_e64 v0, |v0|
|
|
; SI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; VI-LABEL: v_test_copysign_f16_0:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; VI-NEXT: v_and_b32_e32 v0, 0x7fff, v0
|
|
; VI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX9-LABEL: v_test_copysign_f16_0:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: v_and_b32_e32 v0, 0x7fff, v0
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX11-LABEL: v_test_copysign_f16_0:
|
|
; GFX11: ; %bb.0:
|
|
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX11-NEXT: v_and_b32_e32 v0, 0x7fff, v0
|
|
; GFX11-NEXT: s_setpc_b64 s[30:31]
|
|
%result = call half @llvm.copysign.f16(half %mag, half 0.0)
|
|
ret half %result
|
|
}
|
|
|
|
define half @v_test_copysign_f16_1(half %mag) {
|
|
; SI-LABEL: v_test_copysign_f16_1:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; SI-NEXT: v_cvt_f16_f32_e32 v0, v0
|
|
; SI-NEXT: v_cvt_f32_f16_e64 v0, |v0|
|
|
; SI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; VI-LABEL: v_test_copysign_f16_1:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; VI-NEXT: v_and_b32_e32 v0, 0x7fff, v0
|
|
; VI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX9-LABEL: v_test_copysign_f16_1:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: v_and_b32_e32 v0, 0x7fff, v0
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX11-LABEL: v_test_copysign_f16_1:
|
|
; GFX11: ; %bb.0:
|
|
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX11-NEXT: v_and_b32_e32 v0, 0x7fff, v0
|
|
; GFX11-NEXT: s_setpc_b64 s[30:31]
|
|
%result = call half @llvm.copysign.f16(half %mag, half 1.0)
|
|
ret half %result
|
|
}
|
|
|
|
define half @v_test_copysign_f16_10(half %mag) {
|
|
; SI-LABEL: v_test_copysign_f16_10:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; SI-NEXT: v_cvt_f16_f32_e32 v0, v0
|
|
; SI-NEXT: v_cvt_f32_f16_e64 v0, |v0|
|
|
; SI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; VI-LABEL: v_test_copysign_f16_10:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; VI-NEXT: v_and_b32_e32 v0, 0x7fff, v0
|
|
; VI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX9-LABEL: v_test_copysign_f16_10:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: v_and_b32_e32 v0, 0x7fff, v0
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX11-LABEL: v_test_copysign_f16_10:
|
|
; GFX11: ; %bb.0:
|
|
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX11-NEXT: v_and_b32_e32 v0, 0x7fff, v0
|
|
; GFX11-NEXT: s_setpc_b64 s[30:31]
|
|
%result = call half @llvm.copysign.f16(half %mag, half 10.0)
|
|
ret half %result
|
|
}
|
|
|
|
define half @v_test_copysign_f16_neg1(half %mag) {
|
|
; SI-LABEL: v_test_copysign_f16_neg1:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; SI-NEXT: v_cvt_f16_f32_e32 v0, v0
|
|
; SI-NEXT: v_cvt_f32_f16_e64 v0, -|v0|
|
|
; SI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; VI-LABEL: v_test_copysign_f16_neg1:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; VI-NEXT: v_or_b32_e32 v0, 0x8000, v0
|
|
; VI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX9-LABEL: v_test_copysign_f16_neg1:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: v_or_b32_e32 v0, 0x8000, v0
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX11-LABEL: v_test_copysign_f16_neg1:
|
|
; GFX11: ; %bb.0:
|
|
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX11-NEXT: v_or_b32_e32 v0, 0x8000, v0
|
|
; GFX11-NEXT: s_setpc_b64 s[30:31]
|
|
%result = call half @llvm.copysign.f16(half %mag, half -1.0)
|
|
ret half %result
|
|
}
|
|
|
|
define half @v_test_copysign_f16_neg10(half %mag) {
|
|
; SI-LABEL: v_test_copysign_f16_neg10:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; SI-NEXT: v_cvt_f16_f32_e32 v0, v0
|
|
; SI-NEXT: v_cvt_f32_f16_e64 v0, -|v0|
|
|
; SI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; VI-LABEL: v_test_copysign_f16_neg10:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; VI-NEXT: v_or_b32_e32 v0, 0x8000, v0
|
|
; VI-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX9-LABEL: v_test_copysign_f16_neg10:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: v_or_b32_e32 v0, 0x8000, v0
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX11-LABEL: v_test_copysign_f16_neg10:
|
|
; GFX11: ; %bb.0:
|
|
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX11-NEXT: v_or_b32_e32 v0, 0x8000, v0
|
|
; GFX11-NEXT: s_setpc_b64 s[30:31]
|
|
%result = call half @llvm.copysign.f16(half %mag, half -10.0)
|
|
ret half %result
|
|
}
|
|
|
|
define amdgpu_kernel void @v_copysign_out_f32_mag_f16_sign_f32(ptr addrspace(1) %arg_out, ptr addrspace(1) %arg_mag, ptr addrspace(1) %arg_sign) {
|
|
; SI-LABEL: v_copysign_out_f32_mag_f16_sign_f32:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x9
|
|
; SI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0xd
|
|
; SI-NEXT: s_mov_b32 s11, 0xf000
|
|
; SI-NEXT: s_mov_b32 s14, 0
|
|
; SI-NEXT: s_mov_b32 s15, s11
|
|
; SI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; SI-NEXT: s_mov_b64 s[12:13], s[6:7]
|
|
; SI-NEXT: v_lshlrev_b32_e32 v1, 1, v0
|
|
; SI-NEXT: v_mov_b32_e32 v2, 0
|
|
; SI-NEXT: buffer_load_ushort v3, v[1:2], s[12:15], 0 addr64
|
|
; SI-NEXT: s_mov_b64 s[2:3], s[14:15]
|
|
; SI-NEXT: v_lshlrev_b32_e32 v1, 2, v0
|
|
; SI-NEXT: buffer_load_dword v0, v[1:2], s[0:3], 0 addr64
|
|
; SI-NEXT: s_brev_b32 s0, -2
|
|
; SI-NEXT: s_mov_b32 s10, -1
|
|
; SI-NEXT: s_mov_b32 s8, s4
|
|
; SI-NEXT: s_mov_b32 s9, s5
|
|
; SI-NEXT: s_waitcnt vmcnt(1)
|
|
; SI-NEXT: v_cvt_f32_f16_e32 v1, v3
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: v_bfi_b32 v0, s0, v1, v0
|
|
; SI-NEXT: buffer_store_dword v0, off, s[8:11], 0
|
|
; SI-NEXT: s_endpgm
|
|
;
|
|
; VI-LABEL: v_copysign_out_f32_mag_f16_sign_f32:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
|
|
; VI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x34
|
|
; VI-NEXT: v_lshlrev_b32_e32 v1, 1, v0
|
|
; VI-NEXT: v_lshlrev_b32_e32 v0, 2, v0
|
|
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; VI-NEXT: v_mov_b32_e32 v2, s7
|
|
; VI-NEXT: v_add_u32_e32 v1, vcc, s6, v1
|
|
; VI-NEXT: v_addc_u32_e32 v2, vcc, 0, v2, vcc
|
|
; VI-NEXT: flat_load_ushort v2, v[1:2]
|
|
; VI-NEXT: v_mov_b32_e32 v1, s1
|
|
; VI-NEXT: v_add_u32_e32 v0, vcc, s0, v0
|
|
; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
|
|
; VI-NEXT: flat_load_dword v3, v[0:1]
|
|
; VI-NEXT: s_brev_b32 s0, -2
|
|
; VI-NEXT: v_mov_b32_e32 v0, s4
|
|
; VI-NEXT: v_mov_b32_e32 v1, s5
|
|
; VI-NEXT: s_waitcnt vmcnt(1)
|
|
; VI-NEXT: v_cvt_f32_f16_e32 v2, v2
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: v_bfi_b32 v2, s0, v2, v3
|
|
; VI-NEXT: flat_store_dword v[0:1], v2
|
|
; VI-NEXT: s_endpgm
|
|
;
|
|
; GFX9-LABEL: v_copysign_out_f32_mag_f16_sign_f32:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
|
|
; GFX9-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x34
|
|
; GFX9-NEXT: v_lshlrev_b32_e32 v1, 1, v0
|
|
; GFX9-NEXT: v_lshlrev_b32_e32 v0, 2, v0
|
|
; GFX9-NEXT: s_brev_b32 s0, -2
|
|
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX9-NEXT: global_load_ushort v1, v1, s[6:7]
|
|
; GFX9-NEXT: v_mov_b32_e32 v2, 0
|
|
; GFX9-NEXT: global_load_dword v0, v0, s[2:3]
|
|
; GFX9-NEXT: s_waitcnt vmcnt(1)
|
|
; GFX9-NEXT: v_cvt_f32_f16_e32 v1, v1
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: v_bfi_b32 v0, s0, v1, v0
|
|
; GFX9-NEXT: global_store_dword v2, v0, s[4:5]
|
|
; GFX9-NEXT: s_endpgm
|
|
;
|
|
; GFX11-LABEL: v_copysign_out_f32_mag_f16_sign_f32:
|
|
; GFX11: ; %bb.0:
|
|
; GFX11-NEXT: s_clause 0x1
|
|
; GFX11-NEXT: s_load_b128 s[4:7], s[0:1], 0x24
|
|
; GFX11-NEXT: s_load_b64 s[0:1], s[0:1], 0x34
|
|
; GFX11-NEXT: v_dual_mov_b32 v2, 0 :: v_dual_lshlrev_b32 v1, 1, v0
|
|
; GFX11-NEXT: v_lshlrev_b32_e32 v0, 2, v0
|
|
; GFX11-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX11-NEXT: global_load_u16 v1, v1, s[6:7]
|
|
; GFX11-NEXT: global_load_b32 v0, v0, s[0:1]
|
|
; GFX11-NEXT: s_waitcnt vmcnt(1)
|
|
; GFX11-NEXT: v_cvt_f32_f16_e32 v1, v1
|
|
; GFX11-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
|
|
; GFX11-NEXT: v_bfi_b32 v0, 0x7fffffff, v1, v0
|
|
; GFX11-NEXT: global_store_b32 v2, v0, s[4:5]
|
|
; GFX11-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
|
|
; GFX11-NEXT: s_endpgm
|
|
%tid = call i32 @llvm.amdgcn.workitem.id.x()
|
|
%arg_mag_gep = getelementptr half, ptr addrspace(1) %arg_mag, i32 %tid
|
|
%mag = load half, ptr addrspace(1) %arg_mag_gep
|
|
%mag.ext = fpext half %mag to float
|
|
%arg_sign_gep = getelementptr float, ptr addrspace(1) %arg_sign, i32 %tid
|
|
%sign = load float, ptr addrspace(1) %arg_sign_gep
|
|
%out = call float @llvm.copysign.f32(float %mag.ext, float %sign)
|
|
store float %out, ptr addrspace(1) %arg_out
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @v_copysign_out_f64_mag_f16_sign_f64(ptr addrspace(1) %arg_out, ptr addrspace(1) %arg_mag, ptr addrspace(1) %arg_sign) {
|
|
; SI-LABEL: v_copysign_out_f64_mag_f16_sign_f64:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x9
|
|
; SI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0xd
|
|
; SI-NEXT: s_mov_b32 s11, 0xf000
|
|
; SI-NEXT: s_mov_b32 s14, 0
|
|
; SI-NEXT: s_mov_b32 s15, s11
|
|
; SI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; SI-NEXT: s_mov_b64 s[12:13], s[6:7]
|
|
; SI-NEXT: v_lshlrev_b32_e32 v1, 1, v0
|
|
; SI-NEXT: v_mov_b32_e32 v2, 0
|
|
; SI-NEXT: buffer_load_ushort v3, v[1:2], s[12:15], 0 addr64
|
|
; SI-NEXT: s_mov_b64 s[2:3], s[14:15]
|
|
; SI-NEXT: v_lshlrev_b32_e32 v1, 3, v0
|
|
; SI-NEXT: buffer_load_dwordx2 v[0:1], v[1:2], s[0:3], 0 addr64
|
|
; SI-NEXT: s_brev_b32 s0, -2
|
|
; SI-NEXT: s_mov_b32 s10, -1
|
|
; SI-NEXT: s_mov_b32 s8, s4
|
|
; SI-NEXT: s_mov_b32 s9, s5
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: v_cvt_f32_f16_e32 v0, v3
|
|
; SI-NEXT: v_cvt_f64_f32_e32 v[2:3], v0
|
|
; SI-NEXT: v_bfi_b32 v3, s0, v3, v1
|
|
; SI-NEXT: buffer_store_dwordx2 v[2:3], off, s[8:11], 0
|
|
; SI-NEXT: s_endpgm
|
|
;
|
|
; VI-LABEL: v_copysign_out_f64_mag_f16_sign_f64:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
|
|
; VI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x34
|
|
; VI-NEXT: v_lshlrev_b32_e32 v1, 1, v0
|
|
; VI-NEXT: v_lshlrev_b32_e32 v0, 3, v0
|
|
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; VI-NEXT: v_mov_b32_e32 v2, s7
|
|
; VI-NEXT: v_add_u32_e32 v1, vcc, s6, v1
|
|
; VI-NEXT: v_addc_u32_e32 v2, vcc, 0, v2, vcc
|
|
; VI-NEXT: flat_load_ushort v2, v[1:2]
|
|
; VI-NEXT: v_mov_b32_e32 v1, s1
|
|
; VI-NEXT: v_add_u32_e32 v0, vcc, s0, v0
|
|
; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
|
|
; VI-NEXT: flat_load_dwordx2 v[0:1], v[0:1]
|
|
; VI-NEXT: s_brev_b32 s0, -2
|
|
; VI-NEXT: v_mov_b32_e32 v4, s4
|
|
; VI-NEXT: v_mov_b32_e32 v5, s5
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: v_cvt_f32_f16_e32 v0, v2
|
|
; VI-NEXT: v_cvt_f64_f32_e32 v[2:3], v0
|
|
; VI-NEXT: v_bfi_b32 v3, s0, v3, v1
|
|
; VI-NEXT: flat_store_dwordx2 v[4:5], v[2:3]
|
|
; VI-NEXT: s_endpgm
|
|
;
|
|
; GFX9-LABEL: v_copysign_out_f64_mag_f16_sign_f64:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
|
|
; GFX9-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x34
|
|
; GFX9-NEXT: v_lshlrev_b32_e32 v1, 1, v0
|
|
; GFX9-NEXT: v_lshlrev_b32_e32 v0, 3, v0
|
|
; GFX9-NEXT: s_brev_b32 s0, -2
|
|
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX9-NEXT: global_load_ushort v2, v1, s[6:7]
|
|
; GFX9-NEXT: s_nop 0
|
|
; GFX9-NEXT: global_load_dwordx2 v[0:1], v0, s[2:3]
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: v_cvt_f32_f16_e32 v0, v2
|
|
; GFX9-NEXT: v_cvt_f64_f32_e32 v[2:3], v0
|
|
; GFX9-NEXT: v_mov_b32_e32 v0, 0
|
|
; GFX9-NEXT: v_bfi_b32 v3, s0, v3, v1
|
|
; GFX9-NEXT: global_store_dwordx2 v0, v[2:3], s[4:5]
|
|
; GFX9-NEXT: s_endpgm
|
|
;
|
|
; GFX11-LABEL: v_copysign_out_f64_mag_f16_sign_f64:
|
|
; GFX11: ; %bb.0:
|
|
; GFX11-NEXT: s_load_b128 s[4:7], s[0:1], 0x24
|
|
; GFX11-NEXT: v_lshlrev_b32_e32 v1, 1, v0
|
|
; GFX11-NEXT: s_load_b64 s[0:1], s[0:1], 0x34
|
|
; GFX11-NEXT: v_lshlrev_b32_e32 v0, 3, v0
|
|
; GFX11-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX11-NEXT: global_load_u16 v2, v1, s[6:7]
|
|
; GFX11-NEXT: global_load_b64 v[0:1], v0, s[0:1]
|
|
; GFX11-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX11-NEXT: v_cvt_f32_f16_e32 v0, v2
|
|
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
|
|
; GFX11-NEXT: v_cvt_f64_f32_e32 v[2:3], v0
|
|
; GFX11-NEXT: v_mov_b32_e32 v0, 0
|
|
; GFX11-NEXT: v_bfi_b32 v3, 0x7fffffff, v3, v1
|
|
; GFX11-NEXT: global_store_b64 v0, v[2:3], s[4:5]
|
|
; GFX11-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
|
|
; GFX11-NEXT: s_endpgm
|
|
%tid = call i32 @llvm.amdgcn.workitem.id.x()
|
|
%arg_mag_gep = getelementptr half, ptr addrspace(1) %arg_mag, i32 %tid
|
|
%mag = load half, ptr addrspace(1) %arg_mag_gep
|
|
%mag.ext = fpext half %mag to double
|
|
%arg_sign_gep = getelementptr double, ptr addrspace(1) %arg_sign, i32 %tid
|
|
%sign = load double, ptr addrspace(1) %arg_sign_gep
|
|
%out = call double @llvm.copysign.f64(double %mag.ext, double %sign)
|
|
store double %out, ptr addrspace(1) %arg_out
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @v_copysign_out_f32_mag_f32_sign_f16(ptr addrspace(1) %arg_out, ptr addrspace(1) %arg_mag, ptr addrspace(1) %arg_sign) {
|
|
; SI-LABEL: v_copysign_out_f32_mag_f32_sign_f16:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x9
|
|
; SI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0xd
|
|
; SI-NEXT: s_mov_b32 s11, 0xf000
|
|
; SI-NEXT: s_mov_b32 s14, 0
|
|
; SI-NEXT: s_mov_b32 s15, s11
|
|
; SI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; SI-NEXT: s_mov_b64 s[12:13], s[6:7]
|
|
; SI-NEXT: v_lshlrev_b32_e32 v1, 2, v0
|
|
; SI-NEXT: v_mov_b32_e32 v2, 0
|
|
; SI-NEXT: buffer_load_dword v3, v[1:2], s[12:15], 0 addr64
|
|
; SI-NEXT: s_mov_b64 s[2:3], s[14:15]
|
|
; SI-NEXT: v_lshlrev_b32_e32 v1, 1, v0
|
|
; SI-NEXT: buffer_load_ushort v0, v[1:2], s[0:3], 0 addr64
|
|
; SI-NEXT: s_brev_b32 s0, -2
|
|
; SI-NEXT: s_mov_b32 s10, -1
|
|
; SI-NEXT: s_mov_b32 s8, s4
|
|
; SI-NEXT: s_mov_b32 s9, s5
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: v_cvt_f32_f16_e32 v0, v0
|
|
; SI-NEXT: v_bfi_b32 v0, s0, v3, v0
|
|
; SI-NEXT: buffer_store_dword v0, off, s[8:11], 0
|
|
; SI-NEXT: s_endpgm
|
|
;
|
|
; VI-LABEL: v_copysign_out_f32_mag_f32_sign_f16:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
|
|
; VI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x34
|
|
; VI-NEXT: v_lshlrev_b32_e32 v2, 2, v0
|
|
; VI-NEXT: v_lshlrev_b32_e32 v0, 1, v0
|
|
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; VI-NEXT: v_mov_b32_e32 v3, s7
|
|
; VI-NEXT: v_mov_b32_e32 v1, s1
|
|
; VI-NEXT: v_add_u32_e32 v0, vcc, s0, v0
|
|
; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
|
|
; VI-NEXT: flat_load_ushort v4, v[0:1]
|
|
; VI-NEXT: v_add_u32_e32 v0, vcc, s6, v2
|
|
; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v3, vcc
|
|
; VI-NEXT: flat_load_dword v2, v[0:1]
|
|
; VI-NEXT: s_brev_b32 s0, -2
|
|
; VI-NEXT: v_mov_b32_e32 v0, s4
|
|
; VI-NEXT: v_mov_b32_e32 v1, s5
|
|
; VI-NEXT: s_waitcnt vmcnt(1)
|
|
; VI-NEXT: v_lshlrev_b32_e32 v3, 16, v4
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: v_bfi_b32 v2, s0, v2, v3
|
|
; VI-NEXT: flat_store_dword v[0:1], v2
|
|
; VI-NEXT: s_endpgm
|
|
;
|
|
; GFX9-LABEL: v_copysign_out_f32_mag_f32_sign_f16:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x34
|
|
; GFX9-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
|
|
; GFX9-NEXT: v_lshlrev_b32_e32 v1, 1, v0
|
|
; GFX9-NEXT: v_lshlrev_b32_e32 v0, 2, v0
|
|
; GFX9-NEXT: s_brev_b32 s0, -2
|
|
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX9-NEXT: global_load_ushort v1, v1, s[2:3]
|
|
; GFX9-NEXT: v_mov_b32_e32 v2, 0
|
|
; GFX9-NEXT: global_load_dword v0, v0, s[6:7]
|
|
; GFX9-NEXT: s_waitcnt vmcnt(1)
|
|
; GFX9-NEXT: v_lshlrev_b32_e32 v1, 16, v1
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: v_bfi_b32 v0, s0, v0, v1
|
|
; GFX9-NEXT: global_store_dword v2, v0, s[4:5]
|
|
; GFX9-NEXT: s_endpgm
|
|
;
|
|
; GFX11-LABEL: v_copysign_out_f32_mag_f32_sign_f16:
|
|
; GFX11: ; %bb.0:
|
|
; GFX11-NEXT: s_clause 0x1
|
|
; GFX11-NEXT: s_load_b64 s[4:5], s[0:1], 0x34
|
|
; GFX11-NEXT: s_load_b128 s[0:3], s[0:1], 0x24
|
|
; GFX11-NEXT: v_dual_mov_b32 v2, 0 :: v_dual_lshlrev_b32 v1, 1, v0
|
|
; GFX11-NEXT: v_lshlrev_b32_e32 v0, 2, v0
|
|
; GFX11-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX11-NEXT: global_load_u16 v1, v1, s[4:5]
|
|
; GFX11-NEXT: global_load_b32 v0, v0, s[2:3]
|
|
; GFX11-NEXT: s_waitcnt vmcnt(1)
|
|
; GFX11-NEXT: v_lshlrev_b32_e32 v1, 16, v1
|
|
; GFX11-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
|
|
; GFX11-NEXT: v_bfi_b32 v0, 0x7fffffff, v0, v1
|
|
; GFX11-NEXT: global_store_b32 v2, v0, s[0:1]
|
|
; GFX11-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
|
|
; GFX11-NEXT: s_endpgm
|
|
%tid = call i32 @llvm.amdgcn.workitem.id.x()
|
|
%arg_mag_gep = getelementptr float, ptr addrspace(1) %arg_mag, i32 %tid
|
|
%mag = load float, ptr addrspace(1) %arg_mag_gep
|
|
%arg_sign_gep = getelementptr half, ptr addrspace(1) %arg_sign, i32 %tid
|
|
%sign = load half, ptr addrspace(1) %arg_sign_gep
|
|
%sign.ext = fpext half %sign to float
|
|
%out = call float @llvm.copysign.f32(float %mag, float %sign.ext)
|
|
store float %out, ptr addrspace(1) %arg_out
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @v_copysign_out_f64_mag_f64_sign_f16(ptr addrspace(1) %arg_out, ptr addrspace(1) %arg_mag, ptr addrspace(1) %arg_sign) {
|
|
; SI-LABEL: v_copysign_out_f64_mag_f64_sign_f16:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x9
|
|
; SI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0xd
|
|
; SI-NEXT: s_mov_b32 s11, 0xf000
|
|
; SI-NEXT: s_mov_b32 s14, 0
|
|
; SI-NEXT: s_mov_b32 s15, s11
|
|
; SI-NEXT: v_mov_b32_e32 v1, 0
|
|
; SI-NEXT: s_mov_b64 s[2:3], s[14:15]
|
|
; SI-NEXT: v_lshlrev_b32_e32 v2, 1, v0
|
|
; SI-NEXT: v_mov_b32_e32 v3, v1
|
|
; SI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; SI-NEXT: buffer_load_ushort v2, v[2:3], s[0:3], 0 addr64
|
|
; SI-NEXT: s_mov_b64 s[12:13], s[6:7]
|
|
; SI-NEXT: v_lshlrev_b32_e32 v0, 3, v0
|
|
; SI-NEXT: buffer_load_dwordx2 v[0:1], v[0:1], s[12:15], 0 addr64
|
|
; SI-NEXT: s_brev_b32 s0, -2
|
|
; SI-NEXT: s_mov_b32 s10, -1
|
|
; SI-NEXT: s_mov_b32 s8, s4
|
|
; SI-NEXT: s_mov_b32 s9, s5
|
|
; SI-NEXT: s_waitcnt vmcnt(1)
|
|
; SI-NEXT: v_cvt_f32_f16_e32 v2, v2
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: v_bfi_b32 v1, s0, v1, v2
|
|
; SI-NEXT: buffer_store_dwordx2 v[0:1], off, s[8:11], 0
|
|
; SI-NEXT: s_endpgm
|
|
;
|
|
; VI-LABEL: v_copysign_out_f64_mag_f64_sign_f16:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
|
|
; VI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x34
|
|
; VI-NEXT: v_lshlrev_b32_e32 v2, 3, v0
|
|
; VI-NEXT: v_lshlrev_b32_e32 v0, 1, v0
|
|
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; VI-NEXT: v_mov_b32_e32 v3, s7
|
|
; VI-NEXT: v_mov_b32_e32 v1, s1
|
|
; VI-NEXT: v_add_u32_e32 v0, vcc, s0, v0
|
|
; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
|
|
; VI-NEXT: flat_load_ushort v4, v[0:1]
|
|
; VI-NEXT: v_add_u32_e32 v0, vcc, s6, v2
|
|
; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v3, vcc
|
|
; VI-NEXT: flat_load_dwordx2 v[0:1], v[0:1]
|
|
; VI-NEXT: s_brev_b32 s0, -2
|
|
; VI-NEXT: v_mov_b32_e32 v2, s4
|
|
; VI-NEXT: v_mov_b32_e32 v3, s5
|
|
; VI-NEXT: s_waitcnt vmcnt(1)
|
|
; VI-NEXT: v_lshlrev_b32_e32 v4, 16, v4
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: v_bfi_b32 v1, s0, v1, v4
|
|
; VI-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
|
|
; VI-NEXT: s_endpgm
|
|
;
|
|
; GFX9-LABEL: v_copysign_out_f64_mag_f64_sign_f16:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x34
|
|
; GFX9-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
|
|
; GFX9-NEXT: v_lshlrev_b32_e32 v1, 1, v0
|
|
; GFX9-NEXT: v_lshlrev_b32_e32 v0, 3, v0
|
|
; GFX9-NEXT: s_brev_b32 s0, -2
|
|
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX9-NEXT: global_load_ushort v2, v1, s[2:3]
|
|
; GFX9-NEXT: v_mov_b32_e32 v3, 0
|
|
; GFX9-NEXT: global_load_dwordx2 v[0:1], v0, s[6:7]
|
|
; GFX9-NEXT: s_waitcnt vmcnt(1)
|
|
; GFX9-NEXT: v_lshlrev_b32_e32 v2, 16, v2
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: v_bfi_b32 v1, s0, v1, v2
|
|
; GFX9-NEXT: global_store_dwordx2 v3, v[0:1], s[4:5]
|
|
; GFX9-NEXT: s_endpgm
|
|
;
|
|
; GFX11-LABEL: v_copysign_out_f64_mag_f64_sign_f16:
|
|
; GFX11: ; %bb.0:
|
|
; GFX11-NEXT: s_clause 0x1
|
|
; GFX11-NEXT: s_load_b64 s[4:5], s[0:1], 0x34
|
|
; GFX11-NEXT: s_load_b128 s[0:3], s[0:1], 0x24
|
|
; GFX11-NEXT: v_lshlrev_b32_e32 v1, 1, v0
|
|
; GFX11-NEXT: v_dual_mov_b32 v3, 0 :: v_dual_lshlrev_b32 v0, 3, v0
|
|
; GFX11-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX11-NEXT: global_load_u16 v2, v1, s[4:5]
|
|
; GFX11-NEXT: global_load_b64 v[0:1], v0, s[2:3]
|
|
; GFX11-NEXT: s_waitcnt vmcnt(1)
|
|
; GFX11-NEXT: v_lshlrev_b32_e32 v2, 16, v2
|
|
; GFX11-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
|
|
; GFX11-NEXT: v_bfi_b32 v1, 0x7fffffff, v1, v2
|
|
; GFX11-NEXT: global_store_b64 v3, v[0:1], s[0:1]
|
|
; GFX11-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
|
|
; GFX11-NEXT: s_endpgm
|
|
%tid = call i32 @llvm.amdgcn.workitem.id.x()
|
|
%arg_mag_gep = getelementptr double, ptr addrspace(1) %arg_mag, i32 %tid
|
|
%mag = load double, ptr addrspace(1) %arg_mag_gep
|
|
%arg_sign_gep = getelementptr half, ptr addrspace(1) %arg_sign, i32 %tid
|
|
%sign = load half, ptr addrspace(1) %arg_sign_gep
|
|
%sign.ext = fpext half %sign to double
|
|
%out = call double @llvm.copysign.f64(double %mag, double %sign.ext)
|
|
store double %out, ptr addrspace(1) %arg_out
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @v_copysign_out_f16_mag_f16_sign_f32(ptr addrspace(1) %arg_out, ptr addrspace(1) %arg_mag, ptr addrspace(1) %arg_sign) {
|
|
; SI-LABEL: v_copysign_out_f16_mag_f16_sign_f32:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x9
|
|
; SI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0xd
|
|
; SI-NEXT: s_mov_b32 s11, 0xf000
|
|
; SI-NEXT: s_mov_b32 s14, 0
|
|
; SI-NEXT: s_mov_b32 s15, s11
|
|
; SI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; SI-NEXT: s_mov_b64 s[12:13], s[6:7]
|
|
; SI-NEXT: v_lshlrev_b32_e32 v1, 1, v0
|
|
; SI-NEXT: v_mov_b32_e32 v2, 0
|
|
; SI-NEXT: buffer_load_ushort v3, v[1:2], s[12:15], 0 addr64
|
|
; SI-NEXT: s_mov_b64 s[2:3], s[14:15]
|
|
; SI-NEXT: v_lshlrev_b32_e32 v1, 2, v0
|
|
; SI-NEXT: buffer_load_dword v0, v[1:2], s[0:3], 0 addr64
|
|
; SI-NEXT: s_brev_b32 s0, -2
|
|
; SI-NEXT: s_mov_b32 s10, -1
|
|
; SI-NEXT: s_mov_b32 s8, s4
|
|
; SI-NEXT: s_mov_b32 s9, s5
|
|
; SI-NEXT: s_waitcnt vmcnt(1)
|
|
; SI-NEXT: v_cvt_f32_f16_e32 v1, v3
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: v_bfi_b32 v0, s0, v1, v0
|
|
; SI-NEXT: v_cvt_f16_f32_e32 v0, v0
|
|
; SI-NEXT: buffer_store_short v0, off, s[8:11], 0
|
|
; SI-NEXT: s_endpgm
|
|
;
|
|
; VI-LABEL: v_copysign_out_f16_mag_f16_sign_f32:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
|
|
; VI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x34
|
|
; VI-NEXT: v_lshlrev_b32_e32 v2, 1, v0
|
|
; VI-NEXT: v_lshlrev_b32_e32 v0, 2, v0
|
|
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; VI-NEXT: v_mov_b32_e32 v3, s7
|
|
; VI-NEXT: v_mov_b32_e32 v1, s1
|
|
; VI-NEXT: v_add_u32_e32 v0, vcc, s0, v0
|
|
; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
|
|
; VI-NEXT: flat_load_dword v4, v[0:1]
|
|
; VI-NEXT: v_add_u32_e32 v0, vcc, s6, v2
|
|
; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v3, vcc
|
|
; VI-NEXT: flat_load_ushort v2, v[0:1]
|
|
; VI-NEXT: s_movk_i32 s0, 0x7fff
|
|
; VI-NEXT: v_mov_b32_e32 v0, s4
|
|
; VI-NEXT: v_mov_b32_e32 v1, s5
|
|
; VI-NEXT: s_waitcnt vmcnt(1)
|
|
; VI-NEXT: v_lshrrev_b32_e32 v3, 16, v4
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: v_bfi_b32 v2, s0, v2, v3
|
|
; VI-NEXT: flat_store_short v[0:1], v2
|
|
; VI-NEXT: s_endpgm
|
|
;
|
|
; GFX9-LABEL: v_copysign_out_f16_mag_f16_sign_f32:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x34
|
|
; GFX9-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
|
|
; GFX9-NEXT: v_lshlrev_b32_e32 v1, 2, v0
|
|
; GFX9-NEXT: v_lshlrev_b32_e32 v0, 1, v0
|
|
; GFX9-NEXT: s_movk_i32 s0, 0x7fff
|
|
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX9-NEXT: global_load_dword v1, v1, s[2:3]
|
|
; GFX9-NEXT: v_mov_b32_e32 v2, 0
|
|
; GFX9-NEXT: global_load_ushort v0, v0, s[6:7]
|
|
; GFX9-NEXT: s_waitcnt vmcnt(1)
|
|
; GFX9-NEXT: v_lshrrev_b32_e32 v1, 16, v1
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: v_bfi_b32 v0, s0, v0, v1
|
|
; GFX9-NEXT: global_store_short v2, v0, s[4:5]
|
|
; GFX9-NEXT: s_endpgm
|
|
;
|
|
; GFX11-LABEL: v_copysign_out_f16_mag_f16_sign_f32:
|
|
; GFX11: ; %bb.0:
|
|
; GFX11-NEXT: s_clause 0x1
|
|
; GFX11-NEXT: s_load_b64 s[4:5], s[0:1], 0x34
|
|
; GFX11-NEXT: s_load_b128 s[0:3], s[0:1], 0x24
|
|
; GFX11-NEXT: v_dual_mov_b32 v2, 0 :: v_dual_lshlrev_b32 v1, 2, v0
|
|
; GFX11-NEXT: v_lshlrev_b32_e32 v0, 1, v0
|
|
; GFX11-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX11-NEXT: global_load_b32 v1, v1, s[4:5]
|
|
; GFX11-NEXT: global_load_u16 v0, v0, s[2:3]
|
|
; GFX11-NEXT: s_waitcnt vmcnt(1)
|
|
; GFX11-NEXT: v_lshrrev_b32_e32 v1, 16, v1
|
|
; GFX11-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
|
|
; GFX11-NEXT: v_bfi_b32 v0, 0x7fff, v0, v1
|
|
; GFX11-NEXT: global_store_b16 v2, v0, s[0:1]
|
|
; GFX11-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
|
|
; GFX11-NEXT: s_endpgm
|
|
%tid = call i32 @llvm.amdgcn.workitem.id.x()
|
|
%arg_mag_gep = getelementptr half, ptr addrspace(1) %arg_mag, i32 %tid
|
|
%mag = load half, ptr addrspace(1) %arg_mag_gep
|
|
%arg_sign_gep = getelementptr float, ptr addrspace(1) %arg_sign, i32 %tid
|
|
%sign = load float, ptr addrspace(1) %arg_sign_gep
|
|
%sign.trunc = fptrunc float %sign to half
|
|
%out = call half @llvm.copysign.f16(half %mag, half %sign.trunc)
|
|
store half %out, ptr addrspace(1) %arg_out
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @v_copysign_out_f16_mag_f16_sign_f64(ptr addrspace(1) %arg_out, ptr addrspace(1) %arg_mag, ptr addrspace(1) %arg_sign) {
|
|
; SI-LABEL: v_copysign_out_f16_mag_f16_sign_f64:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x9
|
|
; SI-NEXT: s_load_dwordx2 s[8:9], s[0:1], 0xd
|
|
; SI-NEXT: s_mov_b32 s3, 0xf000
|
|
; SI-NEXT: s_mov_b32 s2, -1
|
|
; SI-NEXT: s_mov_b32 s14, s2
|
|
; SI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; SI-NEXT: s_mov_b32 s12, s6
|
|
; SI-NEXT: s_mov_b32 s13, s7
|
|
; SI-NEXT: s_mov_b32 s15, s3
|
|
; SI-NEXT: buffer_load_ushort v2, off, s[12:15], 0
|
|
; SI-NEXT: s_mov_b32 s10, 0
|
|
; SI-NEXT: s_mov_b32 s11, s3
|
|
; SI-NEXT: v_lshlrev_b32_e32 v0, 3, v0
|
|
; SI-NEXT: v_mov_b32_e32 v1, 0
|
|
; SI-NEXT: buffer_load_dwordx2 v[0:1], v[0:1], s[8:11], 0 addr64
|
|
; SI-NEXT: s_brev_b32 s0, -2
|
|
; SI-NEXT: s_mov_b32 s1, s5
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: v_cvt_f32_f16_e32 v0, v2
|
|
; SI-NEXT: v_bfi_b32 v0, s0, v0, v1
|
|
; SI-NEXT: v_cvt_f16_f32_e32 v0, v0
|
|
; SI-NEXT: s_mov_b32 s0, s4
|
|
; SI-NEXT: buffer_store_short v0, off, s[0:3], 0
|
|
; SI-NEXT: s_endpgm
|
|
;
|
|
; VI-LABEL: v_copysign_out_f16_mag_f16_sign_f64:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
|
|
; VI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x34
|
|
; VI-NEXT: v_lshlrev_b32_e32 v1, 3, v0
|
|
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; VI-NEXT: v_mov_b32_e32 v0, s6
|
|
; VI-NEXT: v_mov_b32_e32 v2, s1
|
|
; VI-NEXT: v_add_u32_e32 v1, vcc, s0, v1
|
|
; VI-NEXT: v_addc_u32_e32 v2, vcc, 0, v2, vcc
|
|
; VI-NEXT: flat_load_dwordx2 v[1:2], v[1:2]
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: v_mov_b32_e32 v1, s7
|
|
; VI-NEXT: s_movk_i32 s0, 0x7fff
|
|
; VI-NEXT: flat_load_ushort v3, v[0:1]
|
|
; VI-NEXT: v_lshrrev_b32_e32 v2, 16, v2
|
|
; VI-NEXT: v_mov_b32_e32 v0, s4
|
|
; VI-NEXT: v_mov_b32_e32 v1, s5
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: v_bfi_b32 v2, s0, v3, v2
|
|
; VI-NEXT: flat_store_short v[0:1], v2
|
|
; VI-NEXT: s_endpgm
|
|
;
|
|
; GFX9-LABEL: v_copysign_out_f16_mag_f16_sign_f64:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x34
|
|
; GFX9-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
|
|
; GFX9-NEXT: v_lshlrev_b32_e32 v0, 3, v0
|
|
; GFX9-NEXT: s_movk_i32 s0, 0x7fff
|
|
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX9-NEXT: global_load_dwordx2 v[0:1], v0, s[2:3]
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: v_mov_b32_e32 v0, 0
|
|
; GFX9-NEXT: global_load_ushort v2, v0, s[6:7]
|
|
; GFX9-NEXT: v_lshrrev_b32_e32 v1, 16, v1
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: v_bfi_b32 v1, s0, v2, v1
|
|
; GFX9-NEXT: global_store_short v0, v1, s[4:5]
|
|
; GFX9-NEXT: s_endpgm
|
|
;
|
|
; GFX11-LABEL: v_copysign_out_f16_mag_f16_sign_f64:
|
|
; GFX11: ; %bb.0:
|
|
; GFX11-NEXT: s_clause 0x1
|
|
; GFX11-NEXT: s_load_b64 s[4:5], s[0:1], 0x34
|
|
; GFX11-NEXT: s_load_b128 s[0:3], s[0:1], 0x24
|
|
; GFX11-NEXT: v_lshlrev_b32_e32 v0, 3, v0
|
|
; GFX11-NEXT: v_mov_b32_e32 v2, 0
|
|
; GFX11-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX11-NEXT: global_load_b64 v[0:1], v0, s[4:5]
|
|
; GFX11-NEXT: global_load_u16 v0, v2, s[2:3]
|
|
; GFX11-NEXT: s_waitcnt vmcnt(1)
|
|
; GFX11-NEXT: v_lshrrev_b32_e32 v1, 16, v1
|
|
; GFX11-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
|
|
; GFX11-NEXT: v_bfi_b32 v0, 0x7fff, v0, v1
|
|
; GFX11-NEXT: global_store_b16 v2, v0, s[0:1]
|
|
; GFX11-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
|
|
; GFX11-NEXT: s_endpgm
|
|
%tid = call i32 @llvm.amdgcn.workitem.id.x()
|
|
%arg_mag_gep = getelementptr half, ptr addrspace(1) %arg_mag, i32 %tid
|
|
%mag = load half, ptr addrspace(1) %arg_mag
|
|
%arg_sign_gep = getelementptr double, ptr addrspace(1) %arg_sign, i32 %tid
|
|
%sign = load double, ptr addrspace(1) %arg_sign_gep
|
|
%sign.trunc = fptrunc double %sign to half
|
|
%out = call half @llvm.copysign.f16(half %mag, half %sign.trunc)
|
|
store half %out, ptr addrspace(1) %arg_out
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @v_copysign_out_f16_mag_f32_sign_f16(ptr addrspace(1) %arg_out, ptr addrspace(1) %arg_mag, ptr addrspace(1) %arg_sign) {
|
|
; SI-LABEL: v_copysign_out_f16_mag_f32_sign_f16:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x9
|
|
; SI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0xd
|
|
; SI-NEXT: s_mov_b32 s11, 0xf000
|
|
; SI-NEXT: s_mov_b32 s14, 0
|
|
; SI-NEXT: s_mov_b32 s15, s11
|
|
; SI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; SI-NEXT: s_mov_b64 s[12:13], s[6:7]
|
|
; SI-NEXT: v_lshlrev_b32_e32 v1, 2, v0
|
|
; SI-NEXT: v_mov_b32_e32 v2, 0
|
|
; SI-NEXT: buffer_load_dword v3, v[1:2], s[12:15], 0 addr64
|
|
; SI-NEXT: s_mov_b64 s[2:3], s[14:15]
|
|
; SI-NEXT: v_lshlrev_b32_e32 v1, 1, v0
|
|
; SI-NEXT: buffer_load_ushort v0, v[1:2], s[0:3], 0 addr64
|
|
; SI-NEXT: s_brev_b32 s0, -2
|
|
; SI-NEXT: s_mov_b32 s10, -1
|
|
; SI-NEXT: s_mov_b32 s8, s4
|
|
; SI-NEXT: s_mov_b32 s9, s5
|
|
; SI-NEXT: s_waitcnt vmcnt(1)
|
|
; SI-NEXT: v_cvt_f16_f32_e32 v1, v3
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: v_cvt_f32_f16_e32 v0, v0
|
|
; SI-NEXT: v_cvt_f32_f16_e32 v1, v1
|
|
; SI-NEXT: v_bfi_b32 v0, s0, v1, v0
|
|
; SI-NEXT: v_cvt_f16_f32_e32 v0, v0
|
|
; SI-NEXT: buffer_store_short v0, off, s[8:11], 0
|
|
; SI-NEXT: s_endpgm
|
|
;
|
|
; VI-LABEL: v_copysign_out_f16_mag_f32_sign_f16:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
|
|
; VI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x34
|
|
; VI-NEXT: v_lshlrev_b32_e32 v1, 2, v0
|
|
; VI-NEXT: v_lshlrev_b32_e32 v0, 1, v0
|
|
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; VI-NEXT: v_mov_b32_e32 v2, s7
|
|
; VI-NEXT: v_add_u32_e32 v1, vcc, s6, v1
|
|
; VI-NEXT: v_addc_u32_e32 v2, vcc, 0, v2, vcc
|
|
; VI-NEXT: flat_load_dword v2, v[1:2]
|
|
; VI-NEXT: v_mov_b32_e32 v1, s1
|
|
; VI-NEXT: v_add_u32_e32 v0, vcc, s0, v0
|
|
; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
|
|
; VI-NEXT: flat_load_ushort v3, v[0:1]
|
|
; VI-NEXT: s_movk_i32 s0, 0x7fff
|
|
; VI-NEXT: v_mov_b32_e32 v0, s4
|
|
; VI-NEXT: v_mov_b32_e32 v1, s5
|
|
; VI-NEXT: s_waitcnt vmcnt(1)
|
|
; VI-NEXT: v_cvt_f16_f32_e32 v2, v2
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: v_bfi_b32 v2, s0, v2, v3
|
|
; VI-NEXT: flat_store_short v[0:1], v2
|
|
; VI-NEXT: s_endpgm
|
|
;
|
|
; GFX9-LABEL: v_copysign_out_f16_mag_f32_sign_f16:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
|
|
; GFX9-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x34
|
|
; GFX9-NEXT: v_lshlrev_b32_e32 v1, 2, v0
|
|
; GFX9-NEXT: v_lshlrev_b32_e32 v0, 1, v0
|
|
; GFX9-NEXT: s_movk_i32 s0, 0x7fff
|
|
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX9-NEXT: global_load_dword v1, v1, s[6:7]
|
|
; GFX9-NEXT: v_mov_b32_e32 v2, 0
|
|
; GFX9-NEXT: global_load_ushort v0, v0, s[2:3]
|
|
; GFX9-NEXT: s_waitcnt vmcnt(1)
|
|
; GFX9-NEXT: v_cvt_f16_f32_e32 v1, v1
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX9-NEXT: v_bfi_b32 v0, s0, v1, v0
|
|
; GFX9-NEXT: global_store_short v2, v0, s[4:5]
|
|
; GFX9-NEXT: s_endpgm
|
|
;
|
|
; GFX11-LABEL: v_copysign_out_f16_mag_f32_sign_f16:
|
|
; GFX11: ; %bb.0:
|
|
; GFX11-NEXT: s_clause 0x1
|
|
; GFX11-NEXT: s_load_b128 s[4:7], s[0:1], 0x24
|
|
; GFX11-NEXT: s_load_b64 s[0:1], s[0:1], 0x34
|
|
; GFX11-NEXT: v_dual_mov_b32 v2, 0 :: v_dual_lshlrev_b32 v1, 2, v0
|
|
; GFX11-NEXT: v_lshlrev_b32_e32 v0, 1, v0
|
|
; GFX11-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX11-NEXT: global_load_b32 v1, v1, s[6:7]
|
|
; GFX11-NEXT: global_load_u16 v0, v0, s[0:1]
|
|
; GFX11-NEXT: s_waitcnt vmcnt(1)
|
|
; GFX11-NEXT: v_cvt_f16_f32_e32 v1, v1
|
|
; GFX11-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
|
|
; GFX11-NEXT: v_bfi_b32 v0, 0x7fff, v1, v0
|
|
; GFX11-NEXT: global_store_b16 v2, v0, s[4:5]
|
|
; GFX11-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
|
|
; GFX11-NEXT: s_endpgm
|
|
%tid = call i32 @llvm.amdgcn.workitem.id.x()
|
|
%arg_mag_gep = getelementptr float, ptr addrspace(1) %arg_mag, i32 %tid
|
|
%mag = load float, ptr addrspace(1) %arg_mag_gep
|
|
%mag.trunc = fptrunc float %mag to half
|
|
%arg_sign_gep = getelementptr half, ptr addrspace(1) %arg_sign, i32 %tid
|
|
%sign = load half, ptr addrspace(1) %arg_sign_gep
|
|
%out = call half @llvm.copysign.f16(half %mag.trunc, half %sign)
|
|
store half %out, ptr addrspace(1) %arg_out
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @s_copysign_out_f16_mag_f64_sign_f16(ptr addrspace(1) %arg_out, double %mag, half %sign) {
|
|
; SI-LABEL: s_copysign_out_f16_mag_f64_sign_f16:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_load_dword s4, s[0:1], 0xd
|
|
; SI-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x9
|
|
; SI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; SI-NEXT: v_cvt_f32_f16_e32 v0, s4
|
|
; SI-NEXT: s_lshr_b32 s4, s3, 8
|
|
; SI-NEXT: s_and_b32 s5, s3, 0x1ff
|
|
; SI-NEXT: s_and_b32 s6, s4, 0xffe
|
|
; SI-NEXT: s_or_b32 s2, s5, s2
|
|
; SI-NEXT: s_cmp_lg_u32 s2, 0
|
|
; SI-NEXT: s_cselect_b64 s[4:5], -1, 0
|
|
; SI-NEXT: v_cndmask_b32_e64 v1, 0, 1, s[4:5]
|
|
; SI-NEXT: v_readfirstlane_b32 s2, v1
|
|
; SI-NEXT: s_bfe_u32 s5, s3, 0xb0014
|
|
; SI-NEXT: s_or_b32 s2, s6, s2
|
|
; SI-NEXT: s_sub_i32 s6, 0x3f1, s5
|
|
; SI-NEXT: v_med3_i32 v1, s6, 0, 13
|
|
; SI-NEXT: s_or_b32 s4, s2, 0x1000
|
|
; SI-NEXT: v_readfirstlane_b32 s6, v1
|
|
; SI-NEXT: s_lshr_b32 s6, s4, s6
|
|
; SI-NEXT: v_lshl_b32_e32 v1, s6, v1
|
|
; SI-NEXT: v_cmp_ne_u32_e32 vcc, s4, v1
|
|
; SI-NEXT: v_cndmask_b32_e64 v1, 0, 1, vcc
|
|
; SI-NEXT: s_add_i32 s8, s5, 0xfffffc10
|
|
; SI-NEXT: v_readfirstlane_b32 s4, v1
|
|
; SI-NEXT: s_lshl_b32 s5, s8, 12
|
|
; SI-NEXT: s_or_b32 s4, s6, s4
|
|
; SI-NEXT: s_or_b32 s5, s2, s5
|
|
; SI-NEXT: s_cmp_lt_i32 s8, 1
|
|
; SI-NEXT: s_cselect_b32 s9, s4, s5
|
|
; SI-NEXT: s_and_b32 s6, s9, 7
|
|
; SI-NEXT: s_cmp_gt_i32 s6, 5
|
|
; SI-NEXT: s_cselect_b64 s[4:5], -1, 0
|
|
; SI-NEXT: s_cmp_eq_u32 s6, 3
|
|
; SI-NEXT: s_cselect_b64 s[6:7], -1, 0
|
|
; SI-NEXT: s_or_b64 s[4:5], s[6:7], s[4:5]
|
|
; SI-NEXT: s_lshr_b32 s6, s9, 2
|
|
; SI-NEXT: s_or_b32 s4, s4, s5
|
|
; SI-NEXT: s_cmp_lg_u32 s4, 0
|
|
; SI-NEXT: s_addc_u32 s4, s6, 0
|
|
; SI-NEXT: s_cmp_lt_i32 s8, 31
|
|
; SI-NEXT: s_cselect_b32 s6, s4, 0x7c00
|
|
; SI-NEXT: s_cmp_lg_u32 s2, 0
|
|
; SI-NEXT: s_cselect_b64 s[4:5], -1, 0
|
|
; SI-NEXT: v_cndmask_b32_e64 v1, 0, 1, s[4:5]
|
|
; SI-NEXT: v_lshlrev_b32_e32 v1, 9, v1
|
|
; SI-NEXT: s_cmpk_eq_i32 s8, 0x40f
|
|
; SI-NEXT: v_or_b32_e32 v1, 0x7c00, v1
|
|
; SI-NEXT: v_mov_b32_e32 v2, s6
|
|
; SI-NEXT: s_cselect_b64 vcc, -1, 0
|
|
; SI-NEXT: s_lshr_b32 s2, s3, 16
|
|
; SI-NEXT: v_cndmask_b32_e32 v1, v2, v1, vcc
|
|
; SI-NEXT: s_and_b32 s2, s2, 0x8000
|
|
; SI-NEXT: v_or_b32_e32 v1, s2, v1
|
|
; SI-NEXT: v_cvt_f32_f16_e32 v1, v1
|
|
; SI-NEXT: s_brev_b32 s2, -2
|
|
; SI-NEXT: s_mov_b32 s3, 0xf000
|
|
; SI-NEXT: v_bfi_b32 v0, s2, v1, v0
|
|
; SI-NEXT: v_cvt_f16_f32_e32 v0, v0
|
|
; SI-NEXT: s_mov_b32 s2, -1
|
|
; SI-NEXT: buffer_store_short v0, off, s[0:3], 0
|
|
; SI-NEXT: s_endpgm
|
|
;
|
|
; VI-LABEL: s_copysign_out_f16_mag_f64_sign_f16:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
|
|
; VI-NEXT: s_load_dword s8, s[0:1], 0x34
|
|
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; VI-NEXT: s_lshr_b32 s0, s7, 8
|
|
; VI-NEXT: s_and_b32 s1, s7, 0x1ff
|
|
; VI-NEXT: s_and_b32 s2, s0, 0xffe
|
|
; VI-NEXT: s_or_b32 s0, s1, s6
|
|
; VI-NEXT: s_cmp_lg_u32 s0, 0
|
|
; VI-NEXT: s_cselect_b64 s[0:1], -1, 0
|
|
; VI-NEXT: v_cndmask_b32_e64 v2, 0, 1, s[0:1]
|
|
; VI-NEXT: v_readfirstlane_b32 s0, v2
|
|
; VI-NEXT: s_bfe_u32 s1, s7, 0xb0014
|
|
; VI-NEXT: v_mov_b32_e32 v0, s4
|
|
; VI-NEXT: s_or_b32 s4, s2, s0
|
|
; VI-NEXT: s_sub_i32 s2, 0x3f1, s1
|
|
; VI-NEXT: v_med3_i32 v2, s2, 0, 13
|
|
; VI-NEXT: s_or_b32 s0, s4, 0x1000
|
|
; VI-NEXT: v_readfirstlane_b32 s2, v2
|
|
; VI-NEXT: s_lshr_b32 s2, s0, s2
|
|
; VI-NEXT: v_lshlrev_b32_e64 v2, v2, s2
|
|
; VI-NEXT: v_cmp_ne_u32_e32 vcc, s0, v2
|
|
; VI-NEXT: v_mov_b32_e32 v1, s5
|
|
; VI-NEXT: v_cndmask_b32_e64 v2, 0, 1, vcc
|
|
; VI-NEXT: s_add_i32 s5, s1, 0xfffffc10
|
|
; VI-NEXT: v_readfirstlane_b32 s0, v2
|
|
; VI-NEXT: s_lshl_b32 s1, s5, 12
|
|
; VI-NEXT: s_or_b32 s0, s2, s0
|
|
; VI-NEXT: s_or_b32 s1, s4, s1
|
|
; VI-NEXT: s_cmp_lt_i32 s5, 1
|
|
; VI-NEXT: s_cselect_b32 s6, s0, s1
|
|
; VI-NEXT: s_and_b32 s2, s6, 7
|
|
; VI-NEXT: s_cmp_gt_i32 s2, 5
|
|
; VI-NEXT: s_cselect_b64 s[0:1], -1, 0
|
|
; VI-NEXT: s_cmp_eq_u32 s2, 3
|
|
; VI-NEXT: s_cselect_b64 s[2:3], -1, 0
|
|
; VI-NEXT: s_or_b64 s[0:1], s[2:3], s[0:1]
|
|
; VI-NEXT: s_lshr_b32 s2, s6, 2
|
|
; VI-NEXT: s_cmp_lg_u64 s[0:1], 0
|
|
; VI-NEXT: s_addc_u32 s0, s2, 0
|
|
; VI-NEXT: s_cmp_lt_i32 s5, 31
|
|
; VI-NEXT: s_cselect_b32 s2, s0, 0x7c00
|
|
; VI-NEXT: s_cmp_lg_u32 s4, 0
|
|
; VI-NEXT: s_cselect_b64 s[0:1], -1, 0
|
|
; VI-NEXT: v_cndmask_b32_e64 v2, 0, 1, s[0:1]
|
|
; VI-NEXT: v_lshlrev_b32_e32 v2, 9, v2
|
|
; VI-NEXT: s_cmpk_eq_i32 s5, 0x40f
|
|
; VI-NEXT: v_or_b32_e32 v2, 0x7c00, v2
|
|
; VI-NEXT: v_mov_b32_e32 v3, s2
|
|
; VI-NEXT: s_cselect_b64 vcc, -1, 0
|
|
; VI-NEXT: s_lshr_b32 s0, s7, 16
|
|
; VI-NEXT: v_cndmask_b32_e32 v2, v3, v2, vcc
|
|
; VI-NEXT: s_and_b32 s0, s0, 0x8000
|
|
; VI-NEXT: v_or_b32_e32 v2, s0, v2
|
|
; VI-NEXT: s_movk_i32 s0, 0x7fff
|
|
; VI-NEXT: v_mov_b32_e32 v3, s8
|
|
; VI-NEXT: v_bfi_b32 v2, s0, v2, v3
|
|
; VI-NEXT: flat_store_short v[0:1], v2
|
|
; VI-NEXT: s_endpgm
|
|
;
|
|
; GFX9-LABEL: s_copysign_out_f16_mag_f64_sign_f16:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
|
|
; GFX9-NEXT: s_load_dword s8, s[0:1], 0x34
|
|
; GFX9-NEXT: v_mov_b32_e32 v0, 0
|
|
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX9-NEXT: s_lshr_b32 s0, s7, 8
|
|
; GFX9-NEXT: s_and_b32 s1, s7, 0x1ff
|
|
; GFX9-NEXT: s_and_b32 s2, s0, 0xffe
|
|
; GFX9-NEXT: s_or_b32 s0, s1, s6
|
|
; GFX9-NEXT: s_cmp_lg_u32 s0, 0
|
|
; GFX9-NEXT: s_cselect_b64 s[0:1], -1, 0
|
|
; GFX9-NEXT: v_cndmask_b32_e64 v1, 0, 1, s[0:1]
|
|
; GFX9-NEXT: v_readfirstlane_b32 s0, v1
|
|
; GFX9-NEXT: s_bfe_u32 s1, s7, 0xb0014
|
|
; GFX9-NEXT: s_or_b32 s6, s2, s0
|
|
; GFX9-NEXT: s_sub_i32 s2, 0x3f1, s1
|
|
; GFX9-NEXT: v_med3_i32 v1, s2, 0, 13
|
|
; GFX9-NEXT: s_or_b32 s0, s6, 0x1000
|
|
; GFX9-NEXT: v_readfirstlane_b32 s2, v1
|
|
; GFX9-NEXT: s_lshr_b32 s2, s0, s2
|
|
; GFX9-NEXT: v_lshlrev_b32_e64 v1, v1, s2
|
|
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, s0, v1
|
|
; GFX9-NEXT: v_cndmask_b32_e64 v1, 0, 1, vcc
|
|
; GFX9-NEXT: s_add_i32 s9, s1, 0xfffffc10
|
|
; GFX9-NEXT: v_readfirstlane_b32 s0, v1
|
|
; GFX9-NEXT: s_lshl_b32 s1, s9, 12
|
|
; GFX9-NEXT: s_or_b32 s0, s2, s0
|
|
; GFX9-NEXT: s_or_b32 s1, s6, s1
|
|
; GFX9-NEXT: s_cmp_lt_i32 s9, 1
|
|
; GFX9-NEXT: s_cselect_b32 s10, s0, s1
|
|
; GFX9-NEXT: s_and_b32 s2, s10, 7
|
|
; GFX9-NEXT: s_cmp_gt_i32 s2, 5
|
|
; GFX9-NEXT: s_cselect_b64 s[0:1], -1, 0
|
|
; GFX9-NEXT: s_cmp_eq_u32 s2, 3
|
|
; GFX9-NEXT: s_cselect_b64 s[2:3], -1, 0
|
|
; GFX9-NEXT: s_or_b64 s[0:1], s[2:3], s[0:1]
|
|
; GFX9-NEXT: s_lshr_b32 s2, s10, 2
|
|
; GFX9-NEXT: s_cmp_lg_u64 s[0:1], 0
|
|
; GFX9-NEXT: s_addc_u32 s0, s2, 0
|
|
; GFX9-NEXT: s_cmp_lt_i32 s9, 31
|
|
; GFX9-NEXT: s_cselect_b32 s2, s0, 0x7c00
|
|
; GFX9-NEXT: s_cmp_lg_u32 s6, 0
|
|
; GFX9-NEXT: s_cselect_b64 s[0:1], -1, 0
|
|
; GFX9-NEXT: v_cndmask_b32_e64 v1, 0, 1, s[0:1]
|
|
; GFX9-NEXT: v_lshlrev_b32_e32 v1, 9, v1
|
|
; GFX9-NEXT: s_cmpk_eq_i32 s9, 0x40f
|
|
; GFX9-NEXT: v_or_b32_e32 v1, 0x7c00, v1
|
|
; GFX9-NEXT: v_mov_b32_e32 v2, s2
|
|
; GFX9-NEXT: s_cselect_b64 vcc, -1, 0
|
|
; GFX9-NEXT: s_lshr_b32 s0, s7, 16
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v1, v2, v1, vcc
|
|
; GFX9-NEXT: s_and_b32 s0, s0, 0x8000
|
|
; GFX9-NEXT: v_or_b32_e32 v1, s0, v1
|
|
; GFX9-NEXT: s_movk_i32 s0, 0x7fff
|
|
; GFX9-NEXT: v_mov_b32_e32 v2, s8
|
|
; GFX9-NEXT: v_bfi_b32 v1, s0, v1, v2
|
|
; GFX9-NEXT: global_store_short v0, v1, s[4:5]
|
|
; GFX9-NEXT: s_endpgm
|
|
;
|
|
; GFX11-LABEL: s_copysign_out_f16_mag_f64_sign_f16:
|
|
; GFX11: ; %bb.0:
|
|
; GFX11-NEXT: s_clause 0x1
|
|
; GFX11-NEXT: s_load_b128 s[4:7], s[0:1], 0x24
|
|
; GFX11-NEXT: s_load_b32 s0, s[0:1], 0x34
|
|
; GFX11-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX11-NEXT: s_and_b32 s1, s7, 0x1ff
|
|
; GFX11-NEXT: s_lshr_b32 s2, s7, 8
|
|
; GFX11-NEXT: s_or_b32 s1, s1, s6
|
|
; GFX11-NEXT: s_and_b32 s2, s2, 0xffe
|
|
; GFX11-NEXT: s_cmp_lg_u32 s1, 0
|
|
; GFX11-NEXT: s_cselect_b32 s1, -1, 0
|
|
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
|
|
; GFX11-NEXT: v_cndmask_b32_e64 v0, 0, 1, s1
|
|
; GFX11-NEXT: s_bfe_u32 s1, s7, 0xb0014
|
|
; GFX11-NEXT: s_sub_i32 s3, 0x3f1, s1
|
|
; GFX11-NEXT: s_addk_i32 s1, 0xfc10
|
|
; GFX11-NEXT: v_med3_i32 v1, s3, 0, 13
|
|
; GFX11-NEXT: v_readfirstlane_b32 s3, v0
|
|
; GFX11-NEXT: s_lshl_b32 s8, s1, 12
|
|
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
|
|
; GFX11-NEXT: v_readfirstlane_b32 s6, v1
|
|
; GFX11-NEXT: s_or_b32 s2, s2, s3
|
|
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
|
|
; GFX11-NEXT: s_or_b32 s3, s2, 0x1000
|
|
; GFX11-NEXT: s_or_b32 s8, s2, s8
|
|
; GFX11-NEXT: s_lshr_b32 s6, s3, s6
|
|
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
|
|
; GFX11-NEXT: v_lshlrev_b32_e64 v0, v1, s6
|
|
; GFX11-NEXT: v_mov_b32_e32 v1, 0
|
|
; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, s3, v0
|
|
; GFX11-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
|
|
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
|
|
; GFX11-NEXT: v_readfirstlane_b32 s3, v0
|
|
; GFX11-NEXT: s_or_b32 s3, s6, s3
|
|
; GFX11-NEXT: s_cmp_lt_i32 s1, 1
|
|
; GFX11-NEXT: s_cselect_b32 s3, s3, s8
|
|
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
|
|
; GFX11-NEXT: s_and_b32 s6, s3, 7
|
|
; GFX11-NEXT: s_cmp_gt_i32 s6, 5
|
|
; GFX11-NEXT: s_cselect_b32 s8, -1, 0
|
|
; GFX11-NEXT: s_cmp_eq_u32 s6, 3
|
|
; GFX11-NEXT: s_cselect_b32 s6, -1, 0
|
|
; GFX11-NEXT: s_lshr_b32 s3, s3, 2
|
|
; GFX11-NEXT: s_or_b32 s6, s6, s8
|
|
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
|
|
; GFX11-NEXT: s_cmp_lg_u32 s6, 0
|
|
; GFX11-NEXT: s_addc_u32 s3, s3, 0
|
|
; GFX11-NEXT: s_cmp_lt_i32 s1, 31
|
|
; GFX11-NEXT: s_cselect_b32 s3, s3, 0x7c00
|
|
; GFX11-NEXT: s_cmp_lg_u32 s2, 0
|
|
; GFX11-NEXT: s_cselect_b32 s2, -1, 0
|
|
; GFX11-NEXT: s_cmpk_eq_i32 s1, 0x40f
|
|
; GFX11-NEXT: v_cndmask_b32_e64 v0, 0, 1, s2
|
|
; GFX11-NEXT: s_cselect_b32 vcc_lo, -1, 0
|
|
; GFX11-NEXT: s_lshr_b32 s1, s7, 16
|
|
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1)
|
|
; GFX11-NEXT: s_and_b32 s1, s1, 0x8000
|
|
; GFX11-NEXT: v_lshlrev_b32_e32 v0, 9, v0
|
|
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
|
|
; GFX11-NEXT: v_or_b32_e32 v0, 0x7c00, v0
|
|
; GFX11-NEXT: v_cndmask_b32_e32 v0, s3, v0, vcc_lo
|
|
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
|
|
; GFX11-NEXT: v_or_b32_e32 v0, s1, v0
|
|
; GFX11-NEXT: v_bfi_b32 v0, 0x7fff, v0, s0
|
|
; GFX11-NEXT: global_store_b16 v1, v0, s[4:5]
|
|
; GFX11-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
|
|
; GFX11-NEXT: s_endpgm
|
|
%mag.trunc = fptrunc double %mag to half
|
|
%result = call half @llvm.copysign.f16(half %mag.trunc, half %sign)
|
|
store half %result, ptr addrspace(1) %arg_out
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @s_copysign_v2f16(ptr addrspace(1) %arg_out, <2 x half> %arg_mag, <2 x half> %arg_sign) {
|
|
; SI-LABEL: s_copysign_v2f16:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x9
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s6, -1
|
|
; SI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; SI-NEXT: s_lshr_b32 s4, s2, 16
|
|
; SI-NEXT: s_lshr_b32 s5, s3, 16
|
|
; SI-NEXT: v_cvt_f32_f16_e32 v0, s4
|
|
; SI-NEXT: v_cvt_f32_f16_e32 v1, s5
|
|
; SI-NEXT: v_cvt_f32_f16_e32 v2, s2
|
|
; SI-NEXT: v_cvt_f32_f16_e32 v3, s3
|
|
; SI-NEXT: s_brev_b32 s2, -2
|
|
; SI-NEXT: v_bfi_b32 v0, s2, v0, v1
|
|
; SI-NEXT: v_cvt_f16_f32_e32 v0, v0
|
|
; SI-NEXT: v_bfi_b32 v1, s2, v2, v3
|
|
; SI-NEXT: v_cvt_f16_f32_e32 v1, v1
|
|
; SI-NEXT: s_mov_b32 s4, s0
|
|
; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v0
|
|
; SI-NEXT: s_mov_b32 s5, s1
|
|
; SI-NEXT: v_or_b32_e32 v0, v1, v0
|
|
; SI-NEXT: buffer_store_dword v0, off, s[4:7], 0
|
|
; SI-NEXT: s_endpgm
|
|
;
|
|
; VI-LABEL: s_copysign_v2f16:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x24
|
|
; VI-NEXT: s_movk_i32 s4, 0x7fff
|
|
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; VI-NEXT: v_mov_b32_e32 v0, s2
|
|
; VI-NEXT: v_mov_b32_e32 v1, s3
|
|
; VI-NEXT: s_lshr_b32 s3, s3, 16
|
|
; VI-NEXT: s_lshr_b32 s2, s2, 16
|
|
; VI-NEXT: v_bfi_b32 v0, s4, v0, v1
|
|
; VI-NEXT: v_mov_b32_e32 v1, s2
|
|
; VI-NEXT: v_mov_b32_e32 v2, s3
|
|
; VI-NEXT: v_bfi_b32 v1, s4, v1, v2
|
|
; VI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
|
|
; VI-NEXT: v_or_b32_sdwa v2, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
|
|
; VI-NEXT: v_mov_b32_e32 v0, s0
|
|
; VI-NEXT: v_mov_b32_e32 v1, s1
|
|
; VI-NEXT: flat_store_dword v[0:1], v2
|
|
; VI-NEXT: s_endpgm
|
|
;
|
|
; GFX9-LABEL: s_copysign_v2f16:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x24
|
|
; GFX9-NEXT: s_movk_i32 s4, 0x7fff
|
|
; GFX9-NEXT: v_mov_b32_e32 v0, 0
|
|
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX9-NEXT: v_mov_b32_e32 v1, s2
|
|
; GFX9-NEXT: v_mov_b32_e32 v2, s3
|
|
; GFX9-NEXT: s_lshr_b32 s3, s3, 16
|
|
; GFX9-NEXT: s_lshr_b32 s2, s2, 16
|
|
; GFX9-NEXT: v_bfi_b32 v1, s4, v1, v2
|
|
; GFX9-NEXT: v_mov_b32_e32 v2, s2
|
|
; GFX9-NEXT: v_mov_b32_e32 v3, s3
|
|
; GFX9-NEXT: v_bfi_b32 v2, s4, v2, v3
|
|
; GFX9-NEXT: v_and_b32_e32 v1, 0xffff, v1
|
|
; GFX9-NEXT: v_lshl_or_b32 v1, v2, 16, v1
|
|
; GFX9-NEXT: global_store_dword v0, v1, s[0:1]
|
|
; GFX9-NEXT: s_endpgm
|
|
;
|
|
; GFX11-LABEL: s_copysign_v2f16:
|
|
; GFX11: ; %bb.0:
|
|
; GFX11-NEXT: s_load_b128 s[0:3], s[0:1], 0x24
|
|
; GFX11-NEXT: v_mov_b32_e32 v2, 0
|
|
; GFX11-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX11-NEXT: v_mov_b32_e32 v0, s3
|
|
; GFX11-NEXT: s_lshr_b32 s3, s3, 16
|
|
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_2)
|
|
; GFX11-NEXT: v_mov_b32_e32 v1, s3
|
|
; GFX11-NEXT: v_bfi_b32 v0, 0x7fff, s2, v0
|
|
; GFX11-NEXT: s_lshr_b32 s2, s2, 16
|
|
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instid1(SALU_CYCLE_1)
|
|
; GFX11-NEXT: v_bfi_b32 v1, 0x7fff, s2, v1
|
|
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
|
|
; GFX11-NEXT: v_and_b32_e32 v0, 0xffff, v0
|
|
; GFX11-NEXT: v_lshl_or_b32 v0, v1, 16, v0
|
|
; GFX11-NEXT: global_store_b32 v2, v0, s[0:1]
|
|
; GFX11-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
|
|
; GFX11-NEXT: s_endpgm
|
|
%out = call <2 x half> @llvm.copysign.v2f16(<2 x half> %arg_mag, <2 x half> %arg_sign)
|
|
store <2 x half> %out, ptr addrspace(1) %arg_out
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @s_copysign_v3f16(ptr addrspace(1) %arg_out, <3 x half> %arg_mag, <3 x half> %arg_sign) {
|
|
; SI-LABEL: s_copysign_v3f16:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0xb
|
|
; SI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x9
|
|
; SI-NEXT: s_mov_b32 s3, 0xf000
|
|
; SI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; SI-NEXT: s_lshr_b32 s2, s4, 16
|
|
; SI-NEXT: v_cvt_f32_f16_e32 v2, s2
|
|
; SI-NEXT: s_lshr_b32 s2, s6, 16
|
|
; SI-NEXT: v_cvt_f32_f16_e32 v3, s2
|
|
; SI-NEXT: v_cvt_f32_f16_e32 v0, s5
|
|
; SI-NEXT: v_cvt_f32_f16_e32 v1, s4
|
|
; SI-NEXT: v_cvt_f32_f16_e32 v4, s7
|
|
; SI-NEXT: v_cvt_f32_f16_e32 v5, s6
|
|
; SI-NEXT: s_brev_b32 s2, -2
|
|
; SI-NEXT: v_bfi_b32 v2, s2, v2, v3
|
|
; SI-NEXT: v_cvt_f16_f32_e32 v2, v2
|
|
; SI-NEXT: v_bfi_b32 v1, s2, v1, v5
|
|
; SI-NEXT: v_bfi_b32 v0, s2, v0, v4
|
|
; SI-NEXT: v_cvt_f16_f32_e32 v1, v1
|
|
; SI-NEXT: v_cvt_f16_f32_e32 v0, v0
|
|
; SI-NEXT: s_mov_b32 s2, -1
|
|
; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v2
|
|
; SI-NEXT: v_or_b32_e32 v1, v1, v2
|
|
; SI-NEXT: buffer_store_short v0, off, s[0:3], 0 offset:4
|
|
; SI-NEXT: buffer_store_dword v1, off, s[0:3], 0
|
|
; SI-NEXT: s_endpgm
|
|
;
|
|
; VI-LABEL: s_copysign_v3f16:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x2c
|
|
; VI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
|
|
; VI-NEXT: s_movk_i32 s2, 0x7fff
|
|
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; VI-NEXT: v_mov_b32_e32 v0, s4
|
|
; VI-NEXT: v_mov_b32_e32 v1, s6
|
|
; VI-NEXT: s_lshr_b32 s3, s6, 16
|
|
; VI-NEXT: s_lshr_b32 s4, s4, 16
|
|
; VI-NEXT: v_bfi_b32 v0, s2, v0, v1
|
|
; VI-NEXT: v_mov_b32_e32 v1, s4
|
|
; VI-NEXT: v_mov_b32_e32 v2, s3
|
|
; VI-NEXT: v_bfi_b32 v1, s2, v1, v2
|
|
; VI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
|
|
; VI-NEXT: v_or_b32_sdwa v2, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
|
|
; VI-NEXT: v_mov_b32_e32 v0, s5
|
|
; VI-NEXT: v_mov_b32_e32 v1, s7
|
|
; VI-NEXT: v_bfi_b32 v3, s2, v0, v1
|
|
; VI-NEXT: s_add_u32 s2, s0, 4
|
|
; VI-NEXT: s_addc_u32 s3, s1, 0
|
|
; VI-NEXT: v_mov_b32_e32 v0, s2
|
|
; VI-NEXT: v_mov_b32_e32 v1, s3
|
|
; VI-NEXT: flat_store_short v[0:1], v3
|
|
; VI-NEXT: v_mov_b32_e32 v0, s0
|
|
; VI-NEXT: v_mov_b32_e32 v1, s1
|
|
; VI-NEXT: flat_store_dword v[0:1], v2
|
|
; VI-NEXT: s_endpgm
|
|
;
|
|
; GFX9-LABEL: s_copysign_v3f16:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x2c
|
|
; GFX9-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x24
|
|
; GFX9-NEXT: s_movk_i32 s0, 0x7fff
|
|
; GFX9-NEXT: v_mov_b32_e32 v0, 0
|
|
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX9-NEXT: v_mov_b32_e32 v1, s4
|
|
; GFX9-NEXT: v_mov_b32_e32 v2, s6
|
|
; GFX9-NEXT: s_lshr_b32 s1, s6, 16
|
|
; GFX9-NEXT: s_lshr_b32 s4, s4, 16
|
|
; GFX9-NEXT: v_bfi_b32 v1, s0, v1, v2
|
|
; GFX9-NEXT: v_mov_b32_e32 v2, s4
|
|
; GFX9-NEXT: v_mov_b32_e32 v3, s1
|
|
; GFX9-NEXT: v_bfi_b32 v2, s0, v2, v3
|
|
; GFX9-NEXT: v_and_b32_e32 v1, 0xffff, v1
|
|
; GFX9-NEXT: v_lshl_or_b32 v1, v2, 16, v1
|
|
; GFX9-NEXT: v_mov_b32_e32 v2, s5
|
|
; GFX9-NEXT: v_mov_b32_e32 v3, s7
|
|
; GFX9-NEXT: v_bfi_b32 v2, s0, v2, v3
|
|
; GFX9-NEXT: global_store_short v0, v2, s[2:3] offset:4
|
|
; GFX9-NEXT: global_store_dword v0, v1, s[2:3]
|
|
; GFX9-NEXT: s_endpgm
|
|
;
|
|
; GFX11-LABEL: s_copysign_v3f16:
|
|
; GFX11: ; %bb.0:
|
|
; GFX11-NEXT: s_clause 0x1
|
|
; GFX11-NEXT: s_load_b128 s[4:7], s[0:1], 0x2c
|
|
; GFX11-NEXT: s_load_b64 s[0:1], s[0:1], 0x24
|
|
; GFX11-NEXT: v_mov_b32_e32 v3, 0
|
|
; GFX11-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX11-NEXT: s_lshr_b32 s2, s6, 16
|
|
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_2) | instid1(VALU_DEP_2)
|
|
; GFX11-NEXT: v_dual_mov_b32 v0, s6 :: v_dual_mov_b32 v1, s2
|
|
; GFX11-NEXT: s_lshr_b32 s2, s4, 16
|
|
; GFX11-NEXT: v_mov_b32_e32 v2, s7
|
|
; GFX11-NEXT: v_bfi_b32 v0, 0x7fff, s4, v0
|
|
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
|
|
; GFX11-NEXT: v_bfi_b32 v1, 0x7fff, s2, v1
|
|
; GFX11-NEXT: v_bfi_b32 v2, 0x7fff, s5, v2
|
|
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
|
|
; GFX11-NEXT: v_and_b32_e32 v0, 0xffff, v0
|
|
; GFX11-NEXT: v_lshl_or_b32 v0, v1, 16, v0
|
|
; GFX11-NEXT: s_clause 0x1
|
|
; GFX11-NEXT: global_store_b16 v3, v2, s[0:1] offset:4
|
|
; GFX11-NEXT: global_store_b32 v3, v0, s[0:1]
|
|
; GFX11-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
|
|
; GFX11-NEXT: s_endpgm
|
|
%out = call <3 x half> @llvm.copysign.v3f16(<3 x half> %arg_mag, <3 x half> %arg_sign)
|
|
store <3 x half> %out, ptr addrspace(1) %arg_out
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @s_copysign_v4f16(ptr addrspace(1) %arg_out, <4 x half> %arg_mag, <4 x half> %arg_sign) {
|
|
; SI-LABEL: s_copysign_v4f16:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0xb
|
|
; SI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x9
|
|
; SI-NEXT: s_mov_b32 s3, 0xf000
|
|
; SI-NEXT: s_mov_b32 s2, -1
|
|
; SI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; SI-NEXT: s_lshr_b32 s8, s4, 16
|
|
; SI-NEXT: v_cvt_f32_f16_e32 v2, s4
|
|
; SI-NEXT: s_lshr_b32 s4, s6, 16
|
|
; SI-NEXT: s_lshr_b32 s9, s5, 16
|
|
; SI-NEXT: v_cvt_f32_f16_e32 v4, s4
|
|
; SI-NEXT: s_lshr_b32 s4, s7, 16
|
|
; SI-NEXT: v_cvt_f32_f16_e32 v0, s8
|
|
; SI-NEXT: v_cvt_f32_f16_e32 v1, s9
|
|
; SI-NEXT: v_cvt_f32_f16_e32 v5, s4
|
|
; SI-NEXT: v_cvt_f32_f16_e32 v3, s5
|
|
; SI-NEXT: v_cvt_f32_f16_e32 v6, s6
|
|
; SI-NEXT: v_cvt_f32_f16_e32 v7, s7
|
|
; SI-NEXT: s_brev_b32 s4, -2
|
|
; SI-NEXT: v_bfi_b32 v1, s4, v1, v5
|
|
; SI-NEXT: v_bfi_b32 v0, s4, v0, v4
|
|
; SI-NEXT: v_cvt_f16_f32_e32 v1, v1
|
|
; SI-NEXT: v_bfi_b32 v3, s4, v3, v7
|
|
; SI-NEXT: v_cvt_f16_f32_e32 v0, v0
|
|
; SI-NEXT: v_bfi_b32 v2, s4, v2, v6
|
|
; SI-NEXT: v_cvt_f16_f32_e32 v3, v3
|
|
; SI-NEXT: v_cvt_f16_f32_e32 v2, v2
|
|
; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
|
|
; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v0
|
|
; SI-NEXT: v_or_b32_e32 v1, v3, v1
|
|
; SI-NEXT: v_or_b32_e32 v0, v2, v0
|
|
; SI-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0
|
|
; SI-NEXT: s_endpgm
|
|
;
|
|
; VI-LABEL: s_copysign_v4f16:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x2c
|
|
; VI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
|
|
; VI-NEXT: s_movk_i32 s2, 0x7fff
|
|
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; VI-NEXT: v_mov_b32_e32 v0, s5
|
|
; VI-NEXT: v_mov_b32_e32 v1, s7
|
|
; VI-NEXT: s_lshr_b32 s3, s7, 16
|
|
; VI-NEXT: s_lshr_b32 s5, s5, 16
|
|
; VI-NEXT: v_bfi_b32 v0, s2, v0, v1
|
|
; VI-NEXT: v_mov_b32_e32 v1, s5
|
|
; VI-NEXT: v_mov_b32_e32 v2, s3
|
|
; VI-NEXT: v_bfi_b32 v1, s2, v1, v2
|
|
; VI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
|
|
; VI-NEXT: v_or_b32_sdwa v1, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
|
|
; VI-NEXT: v_mov_b32_e32 v0, s4
|
|
; VI-NEXT: v_mov_b32_e32 v2, s6
|
|
; VI-NEXT: s_lshr_b32 s3, s6, 16
|
|
; VI-NEXT: s_lshr_b32 s4, s4, 16
|
|
; VI-NEXT: v_bfi_b32 v0, s2, v0, v2
|
|
; VI-NEXT: v_mov_b32_e32 v2, s4
|
|
; VI-NEXT: v_mov_b32_e32 v3, s3
|
|
; VI-NEXT: v_bfi_b32 v2, s2, v2, v3
|
|
; VI-NEXT: v_lshlrev_b32_e32 v2, 16, v2
|
|
; VI-NEXT: v_or_b32_sdwa v0, v0, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
|
|
; VI-NEXT: v_mov_b32_e32 v3, s1
|
|
; VI-NEXT: v_mov_b32_e32 v2, s0
|
|
; VI-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
|
|
; VI-NEXT: s_endpgm
|
|
;
|
|
; GFX9-LABEL: s_copysign_v4f16:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x2c
|
|
; GFX9-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x24
|
|
; GFX9-NEXT: s_movk_i32 s0, 0x7fff
|
|
; GFX9-NEXT: v_mov_b32_e32 v2, 0
|
|
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX9-NEXT: v_mov_b32_e32 v0, s5
|
|
; GFX9-NEXT: v_mov_b32_e32 v1, s7
|
|
; GFX9-NEXT: s_lshr_b32 s1, s7, 16
|
|
; GFX9-NEXT: s_lshr_b32 s5, s5, 16
|
|
; GFX9-NEXT: v_bfi_b32 v0, s0, v0, v1
|
|
; GFX9-NEXT: v_mov_b32_e32 v1, s5
|
|
; GFX9-NEXT: v_mov_b32_e32 v3, s1
|
|
; GFX9-NEXT: v_bfi_b32 v1, s0, v1, v3
|
|
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0
|
|
; GFX9-NEXT: v_lshl_or_b32 v1, v1, 16, v0
|
|
; GFX9-NEXT: v_mov_b32_e32 v0, s4
|
|
; GFX9-NEXT: v_mov_b32_e32 v3, s6
|
|
; GFX9-NEXT: s_lshr_b32 s1, s6, 16
|
|
; GFX9-NEXT: s_lshr_b32 s4, s4, 16
|
|
; GFX9-NEXT: v_bfi_b32 v0, s0, v0, v3
|
|
; GFX9-NEXT: v_mov_b32_e32 v3, s4
|
|
; GFX9-NEXT: v_mov_b32_e32 v4, s1
|
|
; GFX9-NEXT: v_bfi_b32 v3, s0, v3, v4
|
|
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0
|
|
; GFX9-NEXT: v_lshl_or_b32 v0, v3, 16, v0
|
|
; GFX9-NEXT: global_store_dwordx2 v2, v[0:1], s[2:3]
|
|
; GFX9-NEXT: s_endpgm
|
|
;
|
|
; GFX11-LABEL: s_copysign_v4f16:
|
|
; GFX11: ; %bb.0:
|
|
; GFX11-NEXT: s_clause 0x1
|
|
; GFX11-NEXT: s_load_b128 s[4:7], s[0:1], 0x2c
|
|
; GFX11-NEXT: s_load_b64 s[0:1], s[0:1], 0x24
|
|
; GFX11-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX11-NEXT: v_dual_mov_b32 v5, 0 :: v_dual_mov_b32 v0, s7
|
|
; GFX11-NEXT: v_mov_b32_e32 v1, s6
|
|
; GFX11-NEXT: s_lshr_b32 s2, s7, 16
|
|
; GFX11-NEXT: s_lshr_b32 s6, s6, 16
|
|
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
|
|
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s6
|
|
; GFX11-NEXT: v_bfi_b32 v0, 0x7fff, s5, v0
|
|
; GFX11-NEXT: v_bfi_b32 v1, 0x7fff, s4, v1
|
|
; GFX11-NEXT: s_lshr_b32 s3, s5, 16
|
|
; GFX11-NEXT: s_lshr_b32 s2, s4, 16
|
|
; GFX11-NEXT: v_bfi_b32 v2, 0x7fff, s3, v2
|
|
; GFX11-NEXT: v_bfi_b32 v3, 0x7fff, s2, v3
|
|
; GFX11-NEXT: v_and_b32_e32 v0, 0xffff, v0
|
|
; GFX11-NEXT: v_and_b32_e32 v4, 0xffff, v1
|
|
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
|
|
; GFX11-NEXT: v_lshl_or_b32 v1, v2, 16, v0
|
|
; GFX11-NEXT: v_lshl_or_b32 v0, v3, 16, v4
|
|
; GFX11-NEXT: global_store_b64 v5, v[0:1], s[0:1]
|
|
; GFX11-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
|
|
; GFX11-NEXT: s_endpgm
|
|
%out = call <4 x half> @llvm.copysign.v4f16(<4 x half> %arg_mag, <4 x half> %arg_sign)
|
|
store <4 x half> %out, ptr addrspace(1) %arg_out
|
|
ret void
|
|
}
|
|
|
|
attributes #0 = { nocallback nofree nosync nounwind speculatable willreturn memory(none) }
|