The increase in fallbacks that was previously reported were not caused by this change. Original description: This matches InstCombine and DAGCombine. RISC-V only has an ADDI instruction so without this we need additional patterns to do the conversion. Some of the AMDGPU tests look like possible regressions. Maybe some patterns from isel aren't imported.
8214 lines
336 KiB
LLVM
8214 lines
336 KiB
LLVM
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
|
|
; RUN: llc -global-isel -mtriple=amdgcn-amd-amdpal -mcpu=tahiti -o - %s | FileCheck -check-prefixes=GCN,GFX6 %s
|
|
; RUN: llc -global-isel -mtriple=amdgcn-amd-amdpal -mcpu=fiji -o - %s | FileCheck -check-prefixes=GCN,GFX8 %s
|
|
; RUN: llc -global-isel -mtriple=amdgcn-amd-amdpal -mcpu=gfx900 -o - %s | FileCheck -check-prefixes=GCN,GFX9 %s
|
|
; RUN: llc -global-isel -mtriple=amdgcn-amd-amdpal -mcpu=gfx1010 -o - %s | FileCheck -check-prefixes=GCN,GFX10 %s
|
|
; RUN: llc -global-isel -mtriple=amdgcn-amd-amdpal -mcpu=gfx1100 -o - %s | FileCheck -check-prefixes=GFX11 %s
|
|
|
|
define amdgpu_ps i7 @s_fshr_i7(i7 inreg %lhs, i7 inreg %rhs, i7 inreg %amt) {
|
|
; GFX6-LABEL: s_fshr_i7:
|
|
; GFX6: ; %bb.0:
|
|
; GFX6-NEXT: v_cvt_f32_ubyte0_e32 v0, 7
|
|
; GFX6-NEXT: v_rcp_iflag_f32_e32 v0, v0
|
|
; GFX6-NEXT: s_and_b32 s2, s2, 0x7f
|
|
; GFX6-NEXT: s_lshl_b32 s0, s0, 1
|
|
; GFX6-NEXT: s_and_b32 s1, s1, 0x7f
|
|
; GFX6-NEXT: v_mul_f32_e32 v0, 0x4f7ffffe, v0
|
|
; GFX6-NEXT: v_cvt_u32_f32_e32 v0, v0
|
|
; GFX6-NEXT: v_mul_lo_u32 v1, v0, -7
|
|
; GFX6-NEXT: v_mul_hi_u32 v1, v0, v1
|
|
; GFX6-NEXT: v_add_i32_e32 v0, vcc, v0, v1
|
|
; GFX6-NEXT: v_mul_hi_u32 v0, s2, v0
|
|
; GFX6-NEXT: v_mul_lo_u32 v0, v0, 7
|
|
; GFX6-NEXT: v_sub_i32_e32 v0, vcc, s2, v0
|
|
; GFX6-NEXT: v_add_i32_e32 v1, vcc, -7, v0
|
|
; GFX6-NEXT: v_cmp_le_u32_e32 vcc, 7, v0
|
|
; GFX6-NEXT: v_cndmask_b32_e32 v0, v0, v1, vcc
|
|
; GFX6-NEXT: v_add_i32_e32 v1, vcc, -7, v0
|
|
; GFX6-NEXT: v_cmp_le_u32_e32 vcc, 7, v0
|
|
; GFX6-NEXT: v_cndmask_b32_e32 v0, v0, v1, vcc
|
|
; GFX6-NEXT: v_sub_i32_e32 v1, vcc, 6, v0
|
|
; GFX6-NEXT: v_and_b32_e32 v0, 0x7f, v0
|
|
; GFX6-NEXT: v_and_b32_e32 v1, 0x7f, v1
|
|
; GFX6-NEXT: v_lshl_b32_e32 v1, s0, v1
|
|
; GFX6-NEXT: v_lshr_b32_e32 v0, s1, v0
|
|
; GFX6-NEXT: v_or_b32_e32 v0, v1, v0
|
|
; GFX6-NEXT: v_readfirstlane_b32 s0, v0
|
|
; GFX6-NEXT: ; return to shader part epilog
|
|
;
|
|
; GFX8-LABEL: s_fshr_i7:
|
|
; GFX8: ; %bb.0:
|
|
; GFX8-NEXT: v_cvt_f32_ubyte0_e32 v0, 7
|
|
; GFX8-NEXT: v_rcp_iflag_f32_e32 v0, v0
|
|
; GFX8-NEXT: s_and_b32 s2, s2, 0x7f
|
|
; GFX8-NEXT: s_lshl_b32 s0, s0, 1
|
|
; GFX8-NEXT: s_and_b32 s1, s1, 0x7f
|
|
; GFX8-NEXT: v_mul_f32_e32 v0, 0x4f7ffffe, v0
|
|
; GFX8-NEXT: v_cvt_u32_f32_e32 v0, v0
|
|
; GFX8-NEXT: v_mul_lo_u32 v1, v0, -7
|
|
; GFX8-NEXT: v_mul_hi_u32 v1, v0, v1
|
|
; GFX8-NEXT: v_add_u32_e32 v0, vcc, v0, v1
|
|
; GFX8-NEXT: v_mul_hi_u32 v0, s2, v0
|
|
; GFX8-NEXT: v_mul_lo_u32 v0, v0, 7
|
|
; GFX8-NEXT: v_sub_u32_e32 v0, vcc, s2, v0
|
|
; GFX8-NEXT: v_add_u32_e32 v1, vcc, -7, v0
|
|
; GFX8-NEXT: v_cmp_le_u32_e32 vcc, 7, v0
|
|
; GFX8-NEXT: v_cndmask_b32_e32 v0, v0, v1, vcc
|
|
; GFX8-NEXT: v_add_u32_e32 v1, vcc, -7, v0
|
|
; GFX8-NEXT: v_cmp_le_u32_e32 vcc, 7, v0
|
|
; GFX8-NEXT: v_cndmask_b32_e32 v0, v0, v1, vcc
|
|
; GFX8-NEXT: v_sub_u16_e32 v1, 6, v0
|
|
; GFX8-NEXT: v_and_b32_e32 v0, 0x7f, v0
|
|
; GFX8-NEXT: v_and_b32_e32 v1, 0x7f, v1
|
|
; GFX8-NEXT: v_lshlrev_b16_e64 v1, v1, s0
|
|
; GFX8-NEXT: v_lshrrev_b16_e64 v0, v0, s1
|
|
; GFX8-NEXT: v_or_b32_e32 v0, v1, v0
|
|
; GFX8-NEXT: v_readfirstlane_b32 s0, v0
|
|
; GFX8-NEXT: ; return to shader part epilog
|
|
;
|
|
; GFX9-LABEL: s_fshr_i7:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: v_cvt_f32_ubyte0_e32 v0, 7
|
|
; GFX9-NEXT: v_rcp_iflag_f32_e32 v0, v0
|
|
; GFX9-NEXT: s_and_b32 s2, s2, 0x7f
|
|
; GFX9-NEXT: s_lshl_b32 s0, s0, 1
|
|
; GFX9-NEXT: s_and_b32 s1, s1, 0x7f
|
|
; GFX9-NEXT: v_mul_f32_e32 v0, 0x4f7ffffe, v0
|
|
; GFX9-NEXT: v_cvt_u32_f32_e32 v0, v0
|
|
; GFX9-NEXT: v_mul_lo_u32 v1, v0, -7
|
|
; GFX9-NEXT: v_mul_hi_u32 v1, v0, v1
|
|
; GFX9-NEXT: v_add_u32_e32 v0, v0, v1
|
|
; GFX9-NEXT: v_mul_hi_u32 v0, s2, v0
|
|
; GFX9-NEXT: v_mul_lo_u32 v0, v0, 7
|
|
; GFX9-NEXT: v_sub_u32_e32 v0, s2, v0
|
|
; GFX9-NEXT: v_add_u32_e32 v1, -7, v0
|
|
; GFX9-NEXT: v_cmp_le_u32_e32 vcc, 7, v0
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v0, v0, v1, vcc
|
|
; GFX9-NEXT: v_add_u32_e32 v1, -7, v0
|
|
; GFX9-NEXT: v_cmp_le_u32_e32 vcc, 7, v0
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v0, v0, v1, vcc
|
|
; GFX9-NEXT: v_sub_u16_e32 v1, 6, v0
|
|
; GFX9-NEXT: v_and_b32_e32 v0, 0x7f, v0
|
|
; GFX9-NEXT: v_and_b32_e32 v1, 0x7f, v1
|
|
; GFX9-NEXT: v_lshlrev_b16_e64 v1, v1, s0
|
|
; GFX9-NEXT: v_lshrrev_b16_e64 v0, v0, s1
|
|
; GFX9-NEXT: v_or_b32_e32 v0, v1, v0
|
|
; GFX9-NEXT: v_readfirstlane_b32 s0, v0
|
|
; GFX9-NEXT: ; return to shader part epilog
|
|
;
|
|
; GFX10-LABEL: s_fshr_i7:
|
|
; GFX10: ; %bb.0:
|
|
; GFX10-NEXT: v_cvt_f32_ubyte0_e32 v0, 7
|
|
; GFX10-NEXT: s_and_b32 s2, s2, 0x7f
|
|
; GFX10-NEXT: s_lshl_b32 s0, s0, 1
|
|
; GFX10-NEXT: s_and_b32 s1, s1, 0x7f
|
|
; GFX10-NEXT: v_rcp_iflag_f32_e32 v0, v0
|
|
; GFX10-NEXT: v_mul_f32_e32 v0, 0x4f7ffffe, v0
|
|
; GFX10-NEXT: v_cvt_u32_f32_e32 v0, v0
|
|
; GFX10-NEXT: v_mul_lo_u32 v1, v0, -7
|
|
; GFX10-NEXT: v_mul_hi_u32 v1, v0, v1
|
|
; GFX10-NEXT: v_add_nc_u32_e32 v0, v0, v1
|
|
; GFX10-NEXT: v_mul_hi_u32 v0, s2, v0
|
|
; GFX10-NEXT: v_mul_lo_u32 v0, v0, 7
|
|
; GFX10-NEXT: v_sub_nc_u32_e32 v0, s2, v0
|
|
; GFX10-NEXT: v_add_nc_u32_e32 v1, -7, v0
|
|
; GFX10-NEXT: v_cmp_le_u32_e32 vcc_lo, 7, v0
|
|
; GFX10-NEXT: v_cndmask_b32_e32 v0, v0, v1, vcc_lo
|
|
; GFX10-NEXT: v_add_nc_u32_e32 v1, -7, v0
|
|
; GFX10-NEXT: v_cmp_le_u32_e32 vcc_lo, 7, v0
|
|
; GFX10-NEXT: v_cndmask_b32_e32 v0, v0, v1, vcc_lo
|
|
; GFX10-NEXT: v_sub_nc_u16 v1, 6, v0
|
|
; GFX10-NEXT: v_and_b32_e32 v0, 0x7f, v0
|
|
; GFX10-NEXT: v_and_b32_e32 v1, 0x7f, v1
|
|
; GFX10-NEXT: v_lshrrev_b16 v0, v0, s1
|
|
; GFX10-NEXT: v_lshlrev_b16 v1, v1, s0
|
|
; GFX10-NEXT: v_or_b32_e32 v0, v1, v0
|
|
; GFX10-NEXT: v_readfirstlane_b32 s0, v0
|
|
; GFX10-NEXT: ; return to shader part epilog
|
|
;
|
|
; GFX11-LABEL: s_fshr_i7:
|
|
; GFX11: ; %bb.0:
|
|
; GFX11-NEXT: v_cvt_f32_ubyte0_e32 v0, 7
|
|
; GFX11-NEXT: s_and_b32 s2, s2, 0x7f
|
|
; GFX11-NEXT: s_lshl_b32 s0, s0, 1
|
|
; GFX11-NEXT: s_and_b32 s1, s1, 0x7f
|
|
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
|
|
; GFX11-NEXT: v_rcp_iflag_f32_e32 v0, v0
|
|
; GFX11-NEXT: s_waitcnt_depctr 0xfff
|
|
; GFX11-NEXT: v_mul_f32_e32 v0, 0x4f7ffffe, v0
|
|
; GFX11-NEXT: v_cvt_u32_f32_e32 v0, v0
|
|
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
|
|
; GFX11-NEXT: v_mul_lo_u32 v1, v0, -7
|
|
; GFX11-NEXT: v_mul_hi_u32 v1, v0, v1
|
|
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
|
|
; GFX11-NEXT: v_add_nc_u32_e32 v0, v0, v1
|
|
; GFX11-NEXT: v_mul_hi_u32 v0, s2, v0
|
|
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
|
|
; GFX11-NEXT: v_mul_lo_u32 v0, v0, 7
|
|
; GFX11-NEXT: v_sub_nc_u32_e32 v0, s2, v0
|
|
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
|
|
; GFX11-NEXT: v_add_nc_u32_e32 v1, -7, v0
|
|
; GFX11-NEXT: v_cmp_le_u32_e32 vcc_lo, 7, v0
|
|
; GFX11-NEXT: v_cndmask_b32_e32 v0, v0, v1, vcc_lo
|
|
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
|
|
; GFX11-NEXT: v_add_nc_u32_e32 v1, -7, v0
|
|
; GFX11-NEXT: v_cmp_le_u32_e32 vcc_lo, 7, v0
|
|
; GFX11-NEXT: v_cndmask_b32_e32 v0, v0, v1, vcc_lo
|
|
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
|
|
; GFX11-NEXT: v_sub_nc_u16 v1, 6, v0
|
|
; GFX11-NEXT: v_and_b32_e32 v0, 0x7f, v0
|
|
; GFX11-NEXT: v_and_b32_e32 v1, 0x7f, v1
|
|
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
|
|
; GFX11-NEXT: v_lshrrev_b16 v0, v0, s1
|
|
; GFX11-NEXT: v_lshlrev_b16 v1, v1, s0
|
|
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
|
|
; GFX11-NEXT: v_or_b32_e32 v0, v1, v0
|
|
; GFX11-NEXT: v_readfirstlane_b32 s0, v0
|
|
; GFX11-NEXT: ; return to shader part epilog
|
|
%result = call i7 @llvm.fshr.i7(i7 %lhs, i7 %rhs, i7 %amt)
|
|
ret i7 %result
|
|
}
|
|
|
|
define i7 @v_fshr_i7(i7 %lhs, i7 %rhs, i7 %amt) {
|
|
; GFX6-LABEL: v_fshr_i7:
|
|
; GFX6: ; %bb.0:
|
|
; GFX6-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX6-NEXT: v_cvt_f32_ubyte0_e32 v3, 7
|
|
; GFX6-NEXT: v_rcp_iflag_f32_e32 v3, v3
|
|
; GFX6-NEXT: v_and_b32_e32 v2, 0x7f, v2
|
|
; GFX6-NEXT: v_lshlrev_b32_e32 v0, 1, v0
|
|
; GFX6-NEXT: v_and_b32_e32 v1, 0x7f, v1
|
|
; GFX6-NEXT: v_mul_f32_e32 v3, 0x4f7ffffe, v3
|
|
; GFX6-NEXT: v_cvt_u32_f32_e32 v3, v3
|
|
; GFX6-NEXT: v_mul_lo_u32 v4, v3, -7
|
|
; GFX6-NEXT: v_mul_hi_u32 v4, v3, v4
|
|
; GFX6-NEXT: v_add_i32_e32 v3, vcc, v3, v4
|
|
; GFX6-NEXT: v_mul_hi_u32 v3, v2, v3
|
|
; GFX6-NEXT: v_mul_lo_u32 v3, v3, 7
|
|
; GFX6-NEXT: v_sub_i32_e32 v2, vcc, v2, v3
|
|
; GFX6-NEXT: v_add_i32_e32 v3, vcc, -7, v2
|
|
; GFX6-NEXT: v_cmp_le_u32_e32 vcc, 7, v2
|
|
; GFX6-NEXT: v_cndmask_b32_e32 v2, v2, v3, vcc
|
|
; GFX6-NEXT: v_add_i32_e32 v3, vcc, -7, v2
|
|
; GFX6-NEXT: v_cmp_le_u32_e32 vcc, 7, v2
|
|
; GFX6-NEXT: v_cndmask_b32_e32 v2, v2, v3, vcc
|
|
; GFX6-NEXT: v_sub_i32_e32 v3, vcc, 6, v2
|
|
; GFX6-NEXT: v_and_b32_e32 v2, 0x7f, v2
|
|
; GFX6-NEXT: v_and_b32_e32 v3, 0x7f, v3
|
|
; GFX6-NEXT: v_lshlrev_b32_e32 v0, v3, v0
|
|
; GFX6-NEXT: v_lshrrev_b32_e32 v1, v2, v1
|
|
; GFX6-NEXT: v_or_b32_e32 v0, v0, v1
|
|
; GFX6-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX8-LABEL: v_fshr_i7:
|
|
; GFX8: ; %bb.0:
|
|
; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX8-NEXT: v_cvt_f32_ubyte0_e32 v3, 7
|
|
; GFX8-NEXT: v_rcp_iflag_f32_e32 v3, v3
|
|
; GFX8-NEXT: v_and_b32_e32 v2, 0x7f, v2
|
|
; GFX8-NEXT: v_lshlrev_b16_e32 v0, 1, v0
|
|
; GFX8-NEXT: v_and_b32_e32 v1, 0x7f, v1
|
|
; GFX8-NEXT: v_mul_f32_e32 v3, 0x4f7ffffe, v3
|
|
; GFX8-NEXT: v_cvt_u32_f32_e32 v3, v3
|
|
; GFX8-NEXT: v_mul_lo_u32 v4, v3, -7
|
|
; GFX8-NEXT: v_mul_hi_u32 v4, v3, v4
|
|
; GFX8-NEXT: v_add_u32_e32 v3, vcc, v3, v4
|
|
; GFX8-NEXT: v_mul_hi_u32 v3, v2, v3
|
|
; GFX8-NEXT: v_mul_lo_u32 v3, v3, 7
|
|
; GFX8-NEXT: v_sub_u32_e32 v2, vcc, v2, v3
|
|
; GFX8-NEXT: v_add_u32_e32 v3, vcc, -7, v2
|
|
; GFX8-NEXT: v_cmp_le_u32_e32 vcc, 7, v2
|
|
; GFX8-NEXT: v_cndmask_b32_e32 v2, v2, v3, vcc
|
|
; GFX8-NEXT: v_add_u32_e32 v3, vcc, -7, v2
|
|
; GFX8-NEXT: v_cmp_le_u32_e32 vcc, 7, v2
|
|
; GFX8-NEXT: v_cndmask_b32_e32 v2, v2, v3, vcc
|
|
; GFX8-NEXT: v_sub_u16_e32 v3, 6, v2
|
|
; GFX8-NEXT: v_and_b32_e32 v2, 0x7f, v2
|
|
; GFX8-NEXT: v_and_b32_e32 v3, 0x7f, v3
|
|
; GFX8-NEXT: v_lshlrev_b16_e32 v0, v3, v0
|
|
; GFX8-NEXT: v_lshrrev_b16_e32 v1, v2, v1
|
|
; GFX8-NEXT: v_or_b32_e32 v0, v0, v1
|
|
; GFX8-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX9-LABEL: v_fshr_i7:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: v_cvt_f32_ubyte0_e32 v3, 7
|
|
; GFX9-NEXT: v_rcp_iflag_f32_e32 v3, v3
|
|
; GFX9-NEXT: v_and_b32_e32 v2, 0x7f, v2
|
|
; GFX9-NEXT: v_lshlrev_b16_e32 v0, 1, v0
|
|
; GFX9-NEXT: v_and_b32_e32 v1, 0x7f, v1
|
|
; GFX9-NEXT: v_mul_f32_e32 v3, 0x4f7ffffe, v3
|
|
; GFX9-NEXT: v_cvt_u32_f32_e32 v3, v3
|
|
; GFX9-NEXT: v_mul_lo_u32 v4, v3, -7
|
|
; GFX9-NEXT: v_mul_hi_u32 v4, v3, v4
|
|
; GFX9-NEXT: v_add_u32_e32 v3, v3, v4
|
|
; GFX9-NEXT: v_mul_hi_u32 v3, v2, v3
|
|
; GFX9-NEXT: v_mul_lo_u32 v3, v3, 7
|
|
; GFX9-NEXT: v_sub_u32_e32 v2, v2, v3
|
|
; GFX9-NEXT: v_add_u32_e32 v3, -7, v2
|
|
; GFX9-NEXT: v_cmp_le_u32_e32 vcc, 7, v2
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v2, v2, v3, vcc
|
|
; GFX9-NEXT: v_add_u32_e32 v3, -7, v2
|
|
; GFX9-NEXT: v_cmp_le_u32_e32 vcc, 7, v2
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v2, v2, v3, vcc
|
|
; GFX9-NEXT: v_sub_u16_e32 v3, 6, v2
|
|
; GFX9-NEXT: v_and_b32_e32 v2, 0x7f, v2
|
|
; GFX9-NEXT: v_and_b32_e32 v3, 0x7f, v3
|
|
; GFX9-NEXT: v_lshlrev_b16_e32 v0, v3, v0
|
|
; GFX9-NEXT: v_lshrrev_b16_e32 v1, v2, v1
|
|
; GFX9-NEXT: v_or_b32_e32 v0, v0, v1
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX10-LABEL: v_fshr_i7:
|
|
; GFX10: ; %bb.0:
|
|
; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX10-NEXT: v_cvt_f32_ubyte0_e32 v3, 7
|
|
; GFX10-NEXT: v_and_b32_e32 v2, 0x7f, v2
|
|
; GFX10-NEXT: v_lshlrev_b16 v0, 1, v0
|
|
; GFX10-NEXT: v_and_b32_e32 v1, 0x7f, v1
|
|
; GFX10-NEXT: v_rcp_iflag_f32_e32 v3, v3
|
|
; GFX10-NEXT: v_mul_f32_e32 v3, 0x4f7ffffe, v3
|
|
; GFX10-NEXT: v_cvt_u32_f32_e32 v3, v3
|
|
; GFX10-NEXT: v_mul_lo_u32 v4, v3, -7
|
|
; GFX10-NEXT: v_mul_hi_u32 v4, v3, v4
|
|
; GFX10-NEXT: v_add_nc_u32_e32 v3, v3, v4
|
|
; GFX10-NEXT: v_mul_hi_u32 v3, v2, v3
|
|
; GFX10-NEXT: v_mul_lo_u32 v3, v3, 7
|
|
; GFX10-NEXT: v_sub_nc_u32_e32 v2, v2, v3
|
|
; GFX10-NEXT: v_add_nc_u32_e32 v3, -7, v2
|
|
; GFX10-NEXT: v_cmp_le_u32_e32 vcc_lo, 7, v2
|
|
; GFX10-NEXT: v_cndmask_b32_e32 v2, v2, v3, vcc_lo
|
|
; GFX10-NEXT: v_add_nc_u32_e32 v3, -7, v2
|
|
; GFX10-NEXT: v_cmp_le_u32_e32 vcc_lo, 7, v2
|
|
; GFX10-NEXT: v_cndmask_b32_e32 v2, v2, v3, vcc_lo
|
|
; GFX10-NEXT: v_sub_nc_u16 v3, 6, v2
|
|
; GFX10-NEXT: v_and_b32_e32 v2, 0x7f, v2
|
|
; GFX10-NEXT: v_and_b32_e32 v3, 0x7f, v3
|
|
; GFX10-NEXT: v_lshrrev_b16 v1, v2, v1
|
|
; GFX10-NEXT: v_lshlrev_b16 v0, v3, v0
|
|
; GFX10-NEXT: v_or_b32_e32 v0, v0, v1
|
|
; GFX10-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX11-LABEL: v_fshr_i7:
|
|
; GFX11: ; %bb.0:
|
|
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX11-NEXT: v_cvt_f32_ubyte0_e32 v3, 7
|
|
; GFX11-NEXT: v_and_b32_e32 v2, 0x7f, v2
|
|
; GFX11-NEXT: v_lshlrev_b16 v0, 1, v0
|
|
; GFX11-NEXT: v_and_b32_e32 v1, 0x7f, v1
|
|
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_2) | instid1(VALU_DEP_1)
|
|
; GFX11-NEXT: v_rcp_iflag_f32_e32 v3, v3
|
|
; GFX11-NEXT: s_waitcnt_depctr 0xfff
|
|
; GFX11-NEXT: v_mul_f32_e32 v3, 0x4f7ffffe, v3
|
|
; GFX11-NEXT: v_cvt_u32_f32_e32 v3, v3
|
|
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
|
|
; GFX11-NEXT: v_mul_lo_u32 v4, v3, -7
|
|
; GFX11-NEXT: v_mul_hi_u32 v4, v3, v4
|
|
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
|
|
; GFX11-NEXT: v_add_nc_u32_e32 v3, v3, v4
|
|
; GFX11-NEXT: v_mul_hi_u32 v3, v2, v3
|
|
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
|
|
; GFX11-NEXT: v_mul_lo_u32 v3, v3, 7
|
|
; GFX11-NEXT: v_sub_nc_u32_e32 v2, v2, v3
|
|
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
|
|
; GFX11-NEXT: v_add_nc_u32_e32 v3, -7, v2
|
|
; GFX11-NEXT: v_cmp_le_u32_e32 vcc_lo, 7, v2
|
|
; GFX11-NEXT: v_cndmask_b32_e32 v2, v2, v3, vcc_lo
|
|
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
|
|
; GFX11-NEXT: v_add_nc_u32_e32 v3, -7, v2
|
|
; GFX11-NEXT: v_cmp_le_u32_e32 vcc_lo, 7, v2
|
|
; GFX11-NEXT: v_cndmask_b32_e32 v2, v2, v3, vcc_lo
|
|
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
|
|
; GFX11-NEXT: v_sub_nc_u16 v3, 6, v2
|
|
; GFX11-NEXT: v_and_b32_e32 v2, 0x7f, v2
|
|
; GFX11-NEXT: v_and_b32_e32 v3, 0x7f, v3
|
|
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
|
|
; GFX11-NEXT: v_lshrrev_b16 v1, v2, v1
|
|
; GFX11-NEXT: v_lshlrev_b16 v0, v3, v0
|
|
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
|
|
; GFX11-NEXT: v_or_b32_e32 v0, v0, v1
|
|
; GFX11-NEXT: s_setpc_b64 s[30:31]
|
|
%result = call i7 @llvm.fshr.i7(i7 %lhs, i7 %rhs, i7 %amt)
|
|
ret i7 %result
|
|
}
|
|
|
|
define amdgpu_ps i8 @s_fshr_i8(i8 inreg %lhs, i8 inreg %rhs, i8 inreg %amt) {
|
|
; GFX6-LABEL: s_fshr_i8:
|
|
; GFX6: ; %bb.0:
|
|
; GFX6-NEXT: s_and_b32 s3, s2, 7
|
|
; GFX6-NEXT: s_andn2_b32 s2, 7, s2
|
|
; GFX6-NEXT: s_lshl_b32 s0, s0, 1
|
|
; GFX6-NEXT: s_and_b32 s1, s1, 0xff
|
|
; GFX6-NEXT: s_lshl_b32 s0, s0, s2
|
|
; GFX6-NEXT: s_lshr_b32 s1, s1, s3
|
|
; GFX6-NEXT: s_or_b32 s0, s0, s1
|
|
; GFX6-NEXT: ; return to shader part epilog
|
|
;
|
|
; GFX8-LABEL: s_fshr_i8:
|
|
; GFX8: ; %bb.0:
|
|
; GFX8-NEXT: s_andn2_b32 s3, 7, s2
|
|
; GFX8-NEXT: s_and_b32 s2, s2, 7
|
|
; GFX8-NEXT: s_and_b32 s1, s1, 0xff
|
|
; GFX8-NEXT: s_lshl_b32 s0, s0, 1
|
|
; GFX8-NEXT: s_and_b32 s3, 0xffff, s3
|
|
; GFX8-NEXT: s_and_b32 s1, 0xffff, s1
|
|
; GFX8-NEXT: s_and_b32 s2, 0xffff, s2
|
|
; GFX8-NEXT: s_lshl_b32 s0, s0, s3
|
|
; GFX8-NEXT: s_lshr_b32 s1, s1, s2
|
|
; GFX8-NEXT: s_or_b32 s0, s0, s1
|
|
; GFX8-NEXT: ; return to shader part epilog
|
|
;
|
|
; GFX9-LABEL: s_fshr_i8:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_andn2_b32 s3, 7, s2
|
|
; GFX9-NEXT: s_and_b32 s2, s2, 7
|
|
; GFX9-NEXT: s_and_b32 s1, s1, 0xff
|
|
; GFX9-NEXT: s_lshl_b32 s0, s0, 1
|
|
; GFX9-NEXT: s_and_b32 s3, 0xffff, s3
|
|
; GFX9-NEXT: s_and_b32 s1, 0xffff, s1
|
|
; GFX9-NEXT: s_and_b32 s2, 0xffff, s2
|
|
; GFX9-NEXT: s_lshl_b32 s0, s0, s3
|
|
; GFX9-NEXT: s_lshr_b32 s1, s1, s2
|
|
; GFX9-NEXT: s_or_b32 s0, s0, s1
|
|
; GFX9-NEXT: ; return to shader part epilog
|
|
;
|
|
; GFX10-LABEL: s_fshr_i8:
|
|
; GFX10: ; %bb.0:
|
|
; GFX10-NEXT: s_andn2_b32 s3, 7, s2
|
|
; GFX10-NEXT: s_and_b32 s1, s1, 0xff
|
|
; GFX10-NEXT: s_and_b32 s2, s2, 7
|
|
; GFX10-NEXT: s_lshl_b32 s0, s0, 1
|
|
; GFX10-NEXT: s_and_b32 s3, 0xffff, s3
|
|
; GFX10-NEXT: s_and_b32 s1, 0xffff, s1
|
|
; GFX10-NEXT: s_and_b32 s2, 0xffff, s2
|
|
; GFX10-NEXT: s_lshl_b32 s0, s0, s3
|
|
; GFX10-NEXT: s_lshr_b32 s1, s1, s2
|
|
; GFX10-NEXT: s_or_b32 s0, s0, s1
|
|
; GFX10-NEXT: ; return to shader part epilog
|
|
;
|
|
; GFX11-LABEL: s_fshr_i8:
|
|
; GFX11: ; %bb.0:
|
|
; GFX11-NEXT: s_and_not1_b32 s3, 7, s2
|
|
; GFX11-NEXT: s_and_b32 s1, s1, 0xff
|
|
; GFX11-NEXT: s_and_b32 s2, s2, 7
|
|
; GFX11-NEXT: s_lshl_b32 s0, s0, 1
|
|
; GFX11-NEXT: s_and_b32 s3, 0xffff, s3
|
|
; GFX11-NEXT: s_and_b32 s1, 0xffff, s1
|
|
; GFX11-NEXT: s_and_b32 s2, 0xffff, s2
|
|
; GFX11-NEXT: s_lshl_b32 s0, s0, s3
|
|
; GFX11-NEXT: s_lshr_b32 s1, s1, s2
|
|
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
|
|
; GFX11-NEXT: s_or_b32 s0, s0, s1
|
|
; GFX11-NEXT: ; return to shader part epilog
|
|
%result = call i8 @llvm.fshr.i8(i8 %lhs, i8 %rhs, i8 %amt)
|
|
ret i8 %result
|
|
}
|
|
|
|
define i8 @v_fshr_i8(i8 %lhs, i8 %rhs, i8 %amt) {
|
|
; GFX6-LABEL: v_fshr_i8:
|
|
; GFX6: ; %bb.0:
|
|
; GFX6-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX6-NEXT: v_and_b32_e32 v3, 7, v2
|
|
; GFX6-NEXT: v_not_b32_e32 v2, v2
|
|
; GFX6-NEXT: v_and_b32_e32 v2, 7, v2
|
|
; GFX6-NEXT: v_lshlrev_b32_e32 v0, 1, v0
|
|
; GFX6-NEXT: v_and_b32_e32 v1, 0xff, v1
|
|
; GFX6-NEXT: v_lshlrev_b32_e32 v0, v2, v0
|
|
; GFX6-NEXT: v_lshrrev_b32_e32 v1, v3, v1
|
|
; GFX6-NEXT: v_or_b32_e32 v0, v0, v1
|
|
; GFX6-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX8-LABEL: v_fshr_i8:
|
|
; GFX8: ; %bb.0:
|
|
; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX8-NEXT: v_xor_b32_e32 v3, -1, v2
|
|
; GFX8-NEXT: v_lshlrev_b16_e32 v0, 1, v0
|
|
; GFX8-NEXT: v_and_b32_e32 v3, 7, v3
|
|
; GFX8-NEXT: v_and_b32_e32 v2, 7, v2
|
|
; GFX8-NEXT: v_lshlrev_b16_e32 v0, v3, v0
|
|
; GFX8-NEXT: v_lshrrev_b16_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
|
|
; GFX8-NEXT: v_or_b32_e32 v0, v0, v1
|
|
; GFX8-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX9-LABEL: v_fshr_i8:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: v_xor_b32_e32 v3, -1, v2
|
|
; GFX9-NEXT: v_lshlrev_b16_e32 v0, 1, v0
|
|
; GFX9-NEXT: v_and_b32_e32 v3, 7, v3
|
|
; GFX9-NEXT: v_and_b32_e32 v2, 7, v2
|
|
; GFX9-NEXT: v_lshlrev_b16_e32 v0, v3, v0
|
|
; GFX9-NEXT: v_lshrrev_b16_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
|
|
; GFX9-NEXT: v_or_b32_e32 v0, v0, v1
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX10-LABEL: v_fshr_i8:
|
|
; GFX10: ; %bb.0:
|
|
; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX10-NEXT: v_xor_b32_e32 v3, -1, v2
|
|
; GFX10-NEXT: v_lshlrev_b16 v0, 1, v0
|
|
; GFX10-NEXT: v_and_b32_e32 v2, 7, v2
|
|
; GFX10-NEXT: v_and_b32_e32 v1, 0xff, v1
|
|
; GFX10-NEXT: v_and_b32_e32 v3, 7, v3
|
|
; GFX10-NEXT: v_lshrrev_b16 v1, v2, v1
|
|
; GFX10-NEXT: v_lshlrev_b16 v0, v3, v0
|
|
; GFX10-NEXT: v_or_b32_e32 v0, v0, v1
|
|
; GFX10-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX11-LABEL: v_fshr_i8:
|
|
; GFX11: ; %bb.0:
|
|
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX11-NEXT: v_xor_b32_e32 v3, -1, v2
|
|
; GFX11-NEXT: v_lshlrev_b16 v0, 1, v0
|
|
; GFX11-NEXT: v_and_b32_e32 v2, 7, v2
|
|
; GFX11-NEXT: v_and_b32_e32 v1, 0xff, v1
|
|
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_2)
|
|
; GFX11-NEXT: v_and_b32_e32 v3, 7, v3
|
|
; GFX11-NEXT: v_lshrrev_b16 v1, v2, v1
|
|
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
|
|
; GFX11-NEXT: v_lshlrev_b16 v0, v3, v0
|
|
; GFX11-NEXT: v_or_b32_e32 v0, v0, v1
|
|
; GFX11-NEXT: s_setpc_b64 s[30:31]
|
|
%result = call i8 @llvm.fshr.i8(i8 %lhs, i8 %rhs, i8 %amt)
|
|
ret i8 %result
|
|
}
|
|
|
|
define amdgpu_ps i8 @s_fshr_i8_4(i8 inreg %lhs, i8 inreg %rhs) {
|
|
; GFX6-LABEL: s_fshr_i8_4:
|
|
; GFX6: ; %bb.0:
|
|
; GFX6-NEXT: s_lshl_b32 s0, s0, 4
|
|
; GFX6-NEXT: s_bfe_u32 s1, s1, 0x40004
|
|
; GFX6-NEXT: s_or_b32 s0, s0, s1
|
|
; GFX6-NEXT: ; return to shader part epilog
|
|
;
|
|
; GFX8-LABEL: s_fshr_i8_4:
|
|
; GFX8: ; %bb.0:
|
|
; GFX8-NEXT: s_and_b32 s1, s1, 0xff
|
|
; GFX8-NEXT: s_and_b32 s1, 0xffff, s1
|
|
; GFX8-NEXT: s_lshl_b32 s0, s0, 4
|
|
; GFX8-NEXT: s_lshr_b32 s1, s1, 4
|
|
; GFX8-NEXT: s_or_b32 s0, s0, s1
|
|
; GFX8-NEXT: ; return to shader part epilog
|
|
;
|
|
; GFX9-LABEL: s_fshr_i8_4:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_and_b32 s1, s1, 0xff
|
|
; GFX9-NEXT: s_and_b32 s1, 0xffff, s1
|
|
; GFX9-NEXT: s_lshl_b32 s0, s0, 4
|
|
; GFX9-NEXT: s_lshr_b32 s1, s1, 4
|
|
; GFX9-NEXT: s_or_b32 s0, s0, s1
|
|
; GFX9-NEXT: ; return to shader part epilog
|
|
;
|
|
; GFX10-LABEL: s_fshr_i8_4:
|
|
; GFX10: ; %bb.0:
|
|
; GFX10-NEXT: s_and_b32 s1, s1, 0xff
|
|
; GFX10-NEXT: s_lshl_b32 s0, s0, 4
|
|
; GFX10-NEXT: s_and_b32 s1, 0xffff, s1
|
|
; GFX10-NEXT: s_lshr_b32 s1, s1, 4
|
|
; GFX10-NEXT: s_or_b32 s0, s0, s1
|
|
; GFX10-NEXT: ; return to shader part epilog
|
|
;
|
|
; GFX11-LABEL: s_fshr_i8_4:
|
|
; GFX11: ; %bb.0:
|
|
; GFX11-NEXT: s_and_b32 s1, s1, 0xff
|
|
; GFX11-NEXT: s_lshl_b32 s0, s0, 4
|
|
; GFX11-NEXT: s_and_b32 s1, 0xffff, s1
|
|
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
|
|
; GFX11-NEXT: s_lshr_b32 s1, s1, 4
|
|
; GFX11-NEXT: s_or_b32 s0, s0, s1
|
|
; GFX11-NEXT: ; return to shader part epilog
|
|
%result = call i8 @llvm.fshr.i8(i8 %lhs, i8 %rhs, i8 4)
|
|
ret i8 %result
|
|
}
|
|
|
|
define i8 @v_fshr_i8_4(i8 %lhs, i8 %rhs) {
|
|
; GFX6-LABEL: v_fshr_i8_4:
|
|
; GFX6: ; %bb.0:
|
|
; GFX6-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX6-NEXT: v_lshlrev_b32_e32 v0, 4, v0
|
|
; GFX6-NEXT: v_bfe_u32 v1, v1, 4, 4
|
|
; GFX6-NEXT: v_or_b32_e32 v0, v0, v1
|
|
; GFX6-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX8-LABEL: v_fshr_i8_4:
|
|
; GFX8: ; %bb.0:
|
|
; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX8-NEXT: v_mov_b32_e32 v2, 4
|
|
; GFX8-NEXT: v_lshlrev_b16_e32 v0, 4, v0
|
|
; GFX8-NEXT: v_lshrrev_b16_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
|
|
; GFX8-NEXT: v_or_b32_e32 v0, v0, v1
|
|
; GFX8-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX9-LABEL: v_fshr_i8_4:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: v_mov_b32_e32 v2, 4
|
|
; GFX9-NEXT: v_lshlrev_b16_e32 v0, 4, v0
|
|
; GFX9-NEXT: v_lshrrev_b16_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
|
|
; GFX9-NEXT: v_or_b32_e32 v0, v0, v1
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX10-LABEL: v_fshr_i8_4:
|
|
; GFX10: ; %bb.0:
|
|
; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX10-NEXT: v_and_b32_e32 v1, 0xff, v1
|
|
; GFX10-NEXT: v_lshlrev_b16 v0, 4, v0
|
|
; GFX10-NEXT: v_lshrrev_b16 v1, 4, v1
|
|
; GFX10-NEXT: v_or_b32_e32 v0, v0, v1
|
|
; GFX10-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX11-LABEL: v_fshr_i8_4:
|
|
; GFX11: ; %bb.0:
|
|
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX11-NEXT: v_and_b32_e32 v1, 0xff, v1
|
|
; GFX11-NEXT: v_lshlrev_b16 v0, 4, v0
|
|
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
|
|
; GFX11-NEXT: v_lshrrev_b16 v1, 4, v1
|
|
; GFX11-NEXT: v_or_b32_e32 v0, v0, v1
|
|
; GFX11-NEXT: s_setpc_b64 s[30:31]
|
|
%result = call i8 @llvm.fshr.i8(i8 %lhs, i8 %rhs, i8 4)
|
|
ret i8 %result
|
|
}
|
|
|
|
define amdgpu_ps i8 @s_fshr_i8_5(i8 inreg %lhs, i8 inreg %rhs) {
|
|
; GFX6-LABEL: s_fshr_i8_5:
|
|
; GFX6: ; %bb.0:
|
|
; GFX6-NEXT: s_lshl_b32 s0, s0, 3
|
|
; GFX6-NEXT: s_bfe_u32 s1, s1, 0x30005
|
|
; GFX6-NEXT: s_or_b32 s0, s0, s1
|
|
; GFX6-NEXT: ; return to shader part epilog
|
|
;
|
|
; GFX8-LABEL: s_fshr_i8_5:
|
|
; GFX8: ; %bb.0:
|
|
; GFX8-NEXT: s_and_b32 s1, s1, 0xff
|
|
; GFX8-NEXT: s_and_b32 s1, 0xffff, s1
|
|
; GFX8-NEXT: s_lshl_b32 s0, s0, 3
|
|
; GFX8-NEXT: s_lshr_b32 s1, s1, 5
|
|
; GFX8-NEXT: s_or_b32 s0, s0, s1
|
|
; GFX8-NEXT: ; return to shader part epilog
|
|
;
|
|
; GFX9-LABEL: s_fshr_i8_5:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_and_b32 s1, s1, 0xff
|
|
; GFX9-NEXT: s_and_b32 s1, 0xffff, s1
|
|
; GFX9-NEXT: s_lshl_b32 s0, s0, 3
|
|
; GFX9-NEXT: s_lshr_b32 s1, s1, 5
|
|
; GFX9-NEXT: s_or_b32 s0, s0, s1
|
|
; GFX9-NEXT: ; return to shader part epilog
|
|
;
|
|
; GFX10-LABEL: s_fshr_i8_5:
|
|
; GFX10: ; %bb.0:
|
|
; GFX10-NEXT: s_and_b32 s1, s1, 0xff
|
|
; GFX10-NEXT: s_lshl_b32 s0, s0, 3
|
|
; GFX10-NEXT: s_and_b32 s1, 0xffff, s1
|
|
; GFX10-NEXT: s_lshr_b32 s1, s1, 5
|
|
; GFX10-NEXT: s_or_b32 s0, s0, s1
|
|
; GFX10-NEXT: ; return to shader part epilog
|
|
;
|
|
; GFX11-LABEL: s_fshr_i8_5:
|
|
; GFX11: ; %bb.0:
|
|
; GFX11-NEXT: s_and_b32 s1, s1, 0xff
|
|
; GFX11-NEXT: s_lshl_b32 s0, s0, 3
|
|
; GFX11-NEXT: s_and_b32 s1, 0xffff, s1
|
|
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
|
|
; GFX11-NEXT: s_lshr_b32 s1, s1, 5
|
|
; GFX11-NEXT: s_or_b32 s0, s0, s1
|
|
; GFX11-NEXT: ; return to shader part epilog
|
|
%result = call i8 @llvm.fshr.i8(i8 %lhs, i8 %rhs, i8 5)
|
|
ret i8 %result
|
|
}
|
|
|
|
define i8 @v_fshr_i8_5(i8 %lhs, i8 %rhs) {
|
|
; GFX6-LABEL: v_fshr_i8_5:
|
|
; GFX6: ; %bb.0:
|
|
; GFX6-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX6-NEXT: v_lshlrev_b32_e32 v0, 3, v0
|
|
; GFX6-NEXT: v_bfe_u32 v1, v1, 5, 3
|
|
; GFX6-NEXT: v_or_b32_e32 v0, v0, v1
|
|
; GFX6-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX8-LABEL: v_fshr_i8_5:
|
|
; GFX8: ; %bb.0:
|
|
; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX8-NEXT: v_mov_b32_e32 v2, 5
|
|
; GFX8-NEXT: v_lshlrev_b16_e32 v0, 3, v0
|
|
; GFX8-NEXT: v_lshrrev_b16_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
|
|
; GFX8-NEXT: v_or_b32_e32 v0, v0, v1
|
|
; GFX8-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX9-LABEL: v_fshr_i8_5:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: v_mov_b32_e32 v2, 5
|
|
; GFX9-NEXT: v_lshlrev_b16_e32 v0, 3, v0
|
|
; GFX9-NEXT: v_lshrrev_b16_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
|
|
; GFX9-NEXT: v_or_b32_e32 v0, v0, v1
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX10-LABEL: v_fshr_i8_5:
|
|
; GFX10: ; %bb.0:
|
|
; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX10-NEXT: v_and_b32_e32 v1, 0xff, v1
|
|
; GFX10-NEXT: v_lshlrev_b16 v0, 3, v0
|
|
; GFX10-NEXT: v_lshrrev_b16 v1, 5, v1
|
|
; GFX10-NEXT: v_or_b32_e32 v0, v0, v1
|
|
; GFX10-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX11-LABEL: v_fshr_i8_5:
|
|
; GFX11: ; %bb.0:
|
|
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX11-NEXT: v_and_b32_e32 v1, 0xff, v1
|
|
; GFX11-NEXT: v_lshlrev_b16 v0, 3, v0
|
|
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
|
|
; GFX11-NEXT: v_lshrrev_b16 v1, 5, v1
|
|
; GFX11-NEXT: v_or_b32_e32 v0, v0, v1
|
|
; GFX11-NEXT: s_setpc_b64 s[30:31]
|
|
%result = call i8 @llvm.fshr.i8(i8 %lhs, i8 %rhs, i8 5)
|
|
ret i8 %result
|
|
}
|
|
|
|
define amdgpu_ps i16 @s_fshr_v2i8(i16 inreg %lhs.arg, i16 inreg %rhs.arg, i16 inreg %amt.arg) {
|
|
; GFX6-LABEL: s_fshr_v2i8:
|
|
; GFX6: ; %bb.0:
|
|
; GFX6-NEXT: s_lshr_b32 s3, s0, 8
|
|
; GFX6-NEXT: s_lshr_b32 s4, s2, 8
|
|
; GFX6-NEXT: s_and_b32 s5, s2, 7
|
|
; GFX6-NEXT: s_andn2_b32 s2, 7, s2
|
|
; GFX6-NEXT: s_lshl_b32 s0, s0, 1
|
|
; GFX6-NEXT: s_lshl_b32 s0, s0, s2
|
|
; GFX6-NEXT: s_and_b32 s2, s1, 0xff
|
|
; GFX6-NEXT: s_lshr_b32 s2, s2, s5
|
|
; GFX6-NEXT: s_or_b32 s0, s0, s2
|
|
; GFX6-NEXT: s_and_b32 s2, s4, 7
|
|
; GFX6-NEXT: s_andn2_b32 s4, 7, s4
|
|
; GFX6-NEXT: s_lshl_b32 s3, s3, 1
|
|
; GFX6-NEXT: s_bfe_u32 s1, s1, 0x80008
|
|
; GFX6-NEXT: s_lshl_b32 s3, s3, s4
|
|
; GFX6-NEXT: s_lshr_b32 s1, s1, s2
|
|
; GFX6-NEXT: s_or_b32 s1, s3, s1
|
|
; GFX6-NEXT: s_and_b32 s1, s1, 0xff
|
|
; GFX6-NEXT: s_and_b32 s0, s0, 0xff
|
|
; GFX6-NEXT: s_lshl_b32 s1, s1, 8
|
|
; GFX6-NEXT: s_or_b32 s0, s0, s1
|
|
; GFX6-NEXT: ; return to shader part epilog
|
|
;
|
|
; GFX8-LABEL: s_fshr_v2i8:
|
|
; GFX8: ; %bb.0:
|
|
; GFX8-NEXT: s_lshr_b32 s4, s1, 8
|
|
; GFX8-NEXT: s_lshr_b32 s5, s2, 8
|
|
; GFX8-NEXT: s_andn2_b32 s6, 7, s2
|
|
; GFX8-NEXT: s_and_b32 s2, s2, 7
|
|
; GFX8-NEXT: s_and_b32 s1, s1, 0xff
|
|
; GFX8-NEXT: s_lshr_b32 s3, s0, 8
|
|
; GFX8-NEXT: s_lshl_b32 s0, s0, 1
|
|
; GFX8-NEXT: s_and_b32 s6, 0xffff, s6
|
|
; GFX8-NEXT: s_and_b32 s1, 0xffff, s1
|
|
; GFX8-NEXT: s_and_b32 s2, 0xffff, s2
|
|
; GFX8-NEXT: s_lshl_b32 s0, s0, s6
|
|
; GFX8-NEXT: s_lshr_b32 s1, s1, s2
|
|
; GFX8-NEXT: s_andn2_b32 s2, 7, s5
|
|
; GFX8-NEXT: s_or_b32 s0, s0, s1
|
|
; GFX8-NEXT: s_lshl_b32 s1, s3, 1
|
|
; GFX8-NEXT: s_and_b32 s2, 0xffff, s2
|
|
; GFX8-NEXT: s_lshl_b32 s1, s1, s2
|
|
; GFX8-NEXT: s_and_b32 s2, s5, 7
|
|
; GFX8-NEXT: s_and_b32 s3, s4, 0xff
|
|
; GFX8-NEXT: s_and_b32 s3, 0xffff, s3
|
|
; GFX8-NEXT: s_and_b32 s2, 0xffff, s2
|
|
; GFX8-NEXT: s_lshr_b32 s2, s3, s2
|
|
; GFX8-NEXT: s_or_b32 s1, s1, s2
|
|
; GFX8-NEXT: s_and_b32 s1, s1, 0xff
|
|
; GFX8-NEXT: s_and_b32 s0, s0, 0xff
|
|
; GFX8-NEXT: s_lshl_b32 s1, s1, 8
|
|
; GFX8-NEXT: s_or_b32 s0, s0, s1
|
|
; GFX8-NEXT: ; return to shader part epilog
|
|
;
|
|
; GFX9-LABEL: s_fshr_v2i8:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_lshr_b32 s4, s1, 8
|
|
; GFX9-NEXT: s_lshr_b32 s5, s2, 8
|
|
; GFX9-NEXT: s_andn2_b32 s6, 7, s2
|
|
; GFX9-NEXT: s_and_b32 s2, s2, 7
|
|
; GFX9-NEXT: s_and_b32 s1, s1, 0xff
|
|
; GFX9-NEXT: s_lshr_b32 s3, s0, 8
|
|
; GFX9-NEXT: s_lshl_b32 s0, s0, 1
|
|
; GFX9-NEXT: s_and_b32 s6, 0xffff, s6
|
|
; GFX9-NEXT: s_and_b32 s1, 0xffff, s1
|
|
; GFX9-NEXT: s_and_b32 s2, 0xffff, s2
|
|
; GFX9-NEXT: s_lshl_b32 s0, s0, s6
|
|
; GFX9-NEXT: s_lshr_b32 s1, s1, s2
|
|
; GFX9-NEXT: s_andn2_b32 s2, 7, s5
|
|
; GFX9-NEXT: s_or_b32 s0, s0, s1
|
|
; GFX9-NEXT: s_lshl_b32 s1, s3, 1
|
|
; GFX9-NEXT: s_and_b32 s2, 0xffff, s2
|
|
; GFX9-NEXT: s_lshl_b32 s1, s1, s2
|
|
; GFX9-NEXT: s_and_b32 s2, s5, 7
|
|
; GFX9-NEXT: s_and_b32 s3, s4, 0xff
|
|
; GFX9-NEXT: s_and_b32 s3, 0xffff, s3
|
|
; GFX9-NEXT: s_and_b32 s2, 0xffff, s2
|
|
; GFX9-NEXT: s_lshr_b32 s2, s3, s2
|
|
; GFX9-NEXT: s_or_b32 s1, s1, s2
|
|
; GFX9-NEXT: s_and_b32 s1, s1, 0xff
|
|
; GFX9-NEXT: s_and_b32 s0, s0, 0xff
|
|
; GFX9-NEXT: s_lshl_b32 s1, s1, 8
|
|
; GFX9-NEXT: s_or_b32 s0, s0, s1
|
|
; GFX9-NEXT: ; return to shader part epilog
|
|
;
|
|
; GFX10-LABEL: s_fshr_v2i8:
|
|
; GFX10: ; %bb.0:
|
|
; GFX10-NEXT: s_andn2_b32 s5, 7, s2
|
|
; GFX10-NEXT: s_lshr_b32 s3, s0, 8
|
|
; GFX10-NEXT: s_lshr_b32 s4, s1, 8
|
|
; GFX10-NEXT: s_lshl_b32 s0, s0, 1
|
|
; GFX10-NEXT: s_and_b32 s5, 0xffff, s5
|
|
; GFX10-NEXT: s_lshr_b32 s6, s2, 8
|
|
; GFX10-NEXT: s_lshl_b32 s0, s0, s5
|
|
; GFX10-NEXT: s_andn2_b32 s5, 7, s6
|
|
; GFX10-NEXT: s_and_b32 s4, s4, 0xff
|
|
; GFX10-NEXT: s_and_b32 s6, s6, 7
|
|
; GFX10-NEXT: s_and_b32 s1, s1, 0xff
|
|
; GFX10-NEXT: s_and_b32 s2, s2, 7
|
|
; GFX10-NEXT: s_lshl_b32 s3, s3, 1
|
|
; GFX10-NEXT: s_and_b32 s5, 0xffff, s5
|
|
; GFX10-NEXT: s_and_b32 s4, 0xffff, s4
|
|
; GFX10-NEXT: s_and_b32 s6, 0xffff, s6
|
|
; GFX10-NEXT: s_and_b32 s1, 0xffff, s1
|
|
; GFX10-NEXT: s_and_b32 s2, 0xffff, s2
|
|
; GFX10-NEXT: s_lshl_b32 s3, s3, s5
|
|
; GFX10-NEXT: s_lshr_b32 s4, s4, s6
|
|
; GFX10-NEXT: s_lshr_b32 s1, s1, s2
|
|
; GFX10-NEXT: s_or_b32 s2, s3, s4
|
|
; GFX10-NEXT: s_or_b32 s0, s0, s1
|
|
; GFX10-NEXT: s_and_b32 s1, s2, 0xff
|
|
; GFX10-NEXT: s_and_b32 s0, s0, 0xff
|
|
; GFX10-NEXT: s_lshl_b32 s1, s1, 8
|
|
; GFX10-NEXT: s_or_b32 s0, s0, s1
|
|
; GFX10-NEXT: ; return to shader part epilog
|
|
;
|
|
; GFX11-LABEL: s_fshr_v2i8:
|
|
; GFX11: ; %bb.0:
|
|
; GFX11-NEXT: s_and_not1_b32 s5, 7, s2
|
|
; GFX11-NEXT: s_lshr_b32 s3, s0, 8
|
|
; GFX11-NEXT: s_lshr_b32 s4, s1, 8
|
|
; GFX11-NEXT: s_lshl_b32 s0, s0, 1
|
|
; GFX11-NEXT: s_and_b32 s5, 0xffff, s5
|
|
; GFX11-NEXT: s_lshr_b32 s6, s2, 8
|
|
; GFX11-NEXT: s_lshl_b32 s0, s0, s5
|
|
; GFX11-NEXT: s_and_not1_b32 s5, 7, s6
|
|
; GFX11-NEXT: s_and_b32 s4, s4, 0xff
|
|
; GFX11-NEXT: s_and_b32 s6, s6, 7
|
|
; GFX11-NEXT: s_and_b32 s1, s1, 0xff
|
|
; GFX11-NEXT: s_and_b32 s2, s2, 7
|
|
; GFX11-NEXT: s_lshl_b32 s3, s3, 1
|
|
; GFX11-NEXT: s_and_b32 s5, 0xffff, s5
|
|
; GFX11-NEXT: s_and_b32 s4, 0xffff, s4
|
|
; GFX11-NEXT: s_and_b32 s6, 0xffff, s6
|
|
; GFX11-NEXT: s_and_b32 s1, 0xffff, s1
|
|
; GFX11-NEXT: s_and_b32 s2, 0xffff, s2
|
|
; GFX11-NEXT: s_lshl_b32 s3, s3, s5
|
|
; GFX11-NEXT: s_lshr_b32 s4, s4, s6
|
|
; GFX11-NEXT: s_lshr_b32 s1, s1, s2
|
|
; GFX11-NEXT: s_or_b32 s2, s3, s4
|
|
; GFX11-NEXT: s_or_b32 s0, s0, s1
|
|
; GFX11-NEXT: s_and_b32 s1, s2, 0xff
|
|
; GFX11-NEXT: s_and_b32 s0, s0, 0xff
|
|
; GFX11-NEXT: s_lshl_b32 s1, s1, 8
|
|
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
|
|
; GFX11-NEXT: s_or_b32 s0, s0, s1
|
|
; GFX11-NEXT: ; return to shader part epilog
|
|
%lhs = bitcast i16 %lhs.arg to <2 x i8>
|
|
%rhs = bitcast i16 %rhs.arg to <2 x i8>
|
|
%amt = bitcast i16 %amt.arg to <2 x i8>
|
|
%result = call <2 x i8> @llvm.fshr.v2i8(<2 x i8> %lhs, <2 x i8> %rhs, <2 x i8> %amt)
|
|
%cast.result = bitcast <2 x i8> %result to i16
|
|
ret i16 %cast.result
|
|
}
|
|
|
|
define i16 @v_fshr_v2i8(i16 %lhs.arg, i16 %rhs.arg, i16 %amt.arg) {
|
|
; GFX6-LABEL: v_fshr_v2i8:
|
|
; GFX6: ; %bb.0:
|
|
; GFX6-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX6-NEXT: v_lshrrev_b32_e32 v4, 8, v2
|
|
; GFX6-NEXT: v_and_b32_e32 v5, 7, v2
|
|
; GFX6-NEXT: v_not_b32_e32 v2, v2
|
|
; GFX6-NEXT: v_lshrrev_b32_e32 v3, 8, v0
|
|
; GFX6-NEXT: v_and_b32_e32 v2, 7, v2
|
|
; GFX6-NEXT: v_lshlrev_b32_e32 v0, 1, v0
|
|
; GFX6-NEXT: v_lshlrev_b32_e32 v0, v2, v0
|
|
; GFX6-NEXT: v_and_b32_e32 v2, 0xff, v1
|
|
; GFX6-NEXT: v_lshrrev_b32_e32 v2, v5, v2
|
|
; GFX6-NEXT: v_or_b32_e32 v0, v0, v2
|
|
; GFX6-NEXT: v_and_b32_e32 v2, 7, v4
|
|
; GFX6-NEXT: v_not_b32_e32 v4, v4
|
|
; GFX6-NEXT: v_and_b32_e32 v4, 7, v4
|
|
; GFX6-NEXT: v_lshlrev_b32_e32 v3, 1, v3
|
|
; GFX6-NEXT: v_bfe_u32 v1, v1, 8, 8
|
|
; GFX6-NEXT: v_lshlrev_b32_e32 v3, v4, v3
|
|
; GFX6-NEXT: v_lshrrev_b32_e32 v1, v2, v1
|
|
; GFX6-NEXT: v_or_b32_e32 v1, v3, v1
|
|
; GFX6-NEXT: v_and_b32_e32 v1, 0xff, v1
|
|
; GFX6-NEXT: v_and_b32_e32 v0, 0xff, v0
|
|
; GFX6-NEXT: v_lshlrev_b32_e32 v1, 8, v1
|
|
; GFX6-NEXT: v_or_b32_e32 v0, v0, v1
|
|
; GFX6-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX8-LABEL: v_fshr_v2i8:
|
|
; GFX8: ; %bb.0:
|
|
; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX8-NEXT: v_xor_b32_e32 v6, -1, v2
|
|
; GFX8-NEXT: v_lshrrev_b32_e32 v3, 8, v0
|
|
; GFX8-NEXT: v_lshrrev_b32_e32 v5, 8, v2
|
|
; GFX8-NEXT: v_lshlrev_b16_e32 v0, 1, v0
|
|
; GFX8-NEXT: v_and_b32_e32 v6, 7, v6
|
|
; GFX8-NEXT: v_and_b32_e32 v2, 7, v2
|
|
; GFX8-NEXT: v_lshrrev_b32_e32 v4, 8, v1
|
|
; GFX8-NEXT: v_lshlrev_b16_e32 v0, v6, v0
|
|
; GFX8-NEXT: v_lshrrev_b16_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
|
|
; GFX8-NEXT: v_xor_b32_e32 v2, -1, v5
|
|
; GFX8-NEXT: v_or_b32_e32 v0, v0, v1
|
|
; GFX8-NEXT: v_lshlrev_b16_e32 v1, 1, v3
|
|
; GFX8-NEXT: v_and_b32_e32 v2, 7, v2
|
|
; GFX8-NEXT: v_lshlrev_b16_e32 v1, v2, v1
|
|
; GFX8-NEXT: v_and_b32_e32 v2, 7, v5
|
|
; GFX8-NEXT: v_lshrrev_b16_sdwa v2, v2, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
|
|
; GFX8-NEXT: v_or_b32_e32 v1, v1, v2
|
|
; GFX8-NEXT: v_and_b32_e32 v1, 0xff, v1
|
|
; GFX8-NEXT: v_lshlrev_b16_e32 v1, 8, v1
|
|
; GFX8-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
|
|
; GFX8-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX9-LABEL: v_fshr_v2i8:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: v_xor_b32_e32 v6, -1, v2
|
|
; GFX9-NEXT: v_lshrrev_b32_e32 v3, 8, v0
|
|
; GFX9-NEXT: v_lshrrev_b32_e32 v5, 8, v2
|
|
; GFX9-NEXT: v_lshlrev_b16_e32 v0, 1, v0
|
|
; GFX9-NEXT: v_and_b32_e32 v6, 7, v6
|
|
; GFX9-NEXT: v_and_b32_e32 v2, 7, v2
|
|
; GFX9-NEXT: v_lshrrev_b32_e32 v4, 8, v1
|
|
; GFX9-NEXT: v_lshlrev_b16_e32 v0, v6, v0
|
|
; GFX9-NEXT: v_lshrrev_b16_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
|
|
; GFX9-NEXT: v_xor_b32_e32 v2, -1, v5
|
|
; GFX9-NEXT: v_or_b32_e32 v0, v0, v1
|
|
; GFX9-NEXT: v_lshlrev_b16_e32 v1, 1, v3
|
|
; GFX9-NEXT: v_and_b32_e32 v2, 7, v2
|
|
; GFX9-NEXT: v_lshlrev_b16_e32 v1, v2, v1
|
|
; GFX9-NEXT: v_and_b32_e32 v2, 7, v5
|
|
; GFX9-NEXT: v_lshrrev_b16_sdwa v2, v2, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
|
|
; GFX9-NEXT: v_or_b32_e32 v1, v1, v2
|
|
; GFX9-NEXT: v_and_b32_e32 v1, 0xff, v1
|
|
; GFX9-NEXT: v_lshlrev_b16_e32 v1, 8, v1
|
|
; GFX9-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX10-LABEL: v_fshr_v2i8:
|
|
; GFX10: ; %bb.0:
|
|
; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX10-NEXT: v_lshrrev_b32_e32 v3, 8, v2
|
|
; GFX10-NEXT: v_lshrrev_b32_e32 v4, 8, v0
|
|
; GFX10-NEXT: v_lshrrev_b32_e32 v5, 8, v1
|
|
; GFX10-NEXT: v_xor_b32_e32 v7, -1, v2
|
|
; GFX10-NEXT: v_lshlrev_b16 v0, 1, v0
|
|
; GFX10-NEXT: v_xor_b32_e32 v6, -1, v3
|
|
; GFX10-NEXT: v_lshlrev_b16 v4, 1, v4
|
|
; GFX10-NEXT: v_and_b32_e32 v3, 7, v3
|
|
; GFX10-NEXT: v_and_b32_e32 v5, 0xff, v5
|
|
; GFX10-NEXT: v_and_b32_e32 v2, 7, v2
|
|
; GFX10-NEXT: v_and_b32_e32 v6, 7, v6
|
|
; GFX10-NEXT: v_and_b32_e32 v1, 0xff, v1
|
|
; GFX10-NEXT: v_and_b32_e32 v7, 7, v7
|
|
; GFX10-NEXT: v_lshrrev_b16 v3, v3, v5
|
|
; GFX10-NEXT: v_lshlrev_b16 v4, v6, v4
|
|
; GFX10-NEXT: v_lshrrev_b16 v1, v2, v1
|
|
; GFX10-NEXT: v_lshlrev_b16 v0, v7, v0
|
|
; GFX10-NEXT: v_or_b32_e32 v2, v4, v3
|
|
; GFX10-NEXT: v_mov_b32_e32 v3, 0xff
|
|
; GFX10-NEXT: v_or_b32_e32 v0, v0, v1
|
|
; GFX10-NEXT: v_and_b32_sdwa v1, v2, v3 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
|
|
; GFX10-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
|
|
; GFX10-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX11-LABEL: v_fshr_v2i8:
|
|
; GFX11: ; %bb.0:
|
|
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX11-NEXT: v_lshrrev_b32_e32 v3, 8, v2
|
|
; GFX11-NEXT: v_lshrrev_b32_e32 v4, 8, v0
|
|
; GFX11-NEXT: v_lshrrev_b32_e32 v5, 8, v1
|
|
; GFX11-NEXT: v_xor_b32_e32 v7, -1, v2
|
|
; GFX11-NEXT: v_lshlrev_b16 v0, 1, v0
|
|
; GFX11-NEXT: v_xor_b32_e32 v6, -1, v3
|
|
; GFX11-NEXT: v_lshlrev_b16 v4, 1, v4
|
|
; GFX11-NEXT: v_and_b32_e32 v3, 7, v3
|
|
; GFX11-NEXT: v_and_b32_e32 v5, 0xff, v5
|
|
; GFX11-NEXT: v_and_b32_e32 v2, 7, v2
|
|
; GFX11-NEXT: v_and_b32_e32 v6, 7, v6
|
|
; GFX11-NEXT: v_and_b32_e32 v1, 0xff, v1
|
|
; GFX11-NEXT: v_and_b32_e32 v7, 7, v7
|
|
; GFX11-NEXT: v_lshrrev_b16 v3, v3, v5
|
|
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
|
|
; GFX11-NEXT: v_lshlrev_b16 v4, v6, v4
|
|
; GFX11-NEXT: v_lshrrev_b16 v1, v2, v1
|
|
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3)
|
|
; GFX11-NEXT: v_lshlrev_b16 v0, v7, v0
|
|
; GFX11-NEXT: v_or_b32_e32 v2, v4, v3
|
|
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
|
|
; GFX11-NEXT: v_or_b32_e32 v0, v0, v1
|
|
; GFX11-NEXT: v_and_b32_e32 v1, 0xff, v2
|
|
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
|
|
; GFX11-NEXT: v_and_b32_e32 v0, 0xff, v0
|
|
; GFX11-NEXT: v_lshlrev_b16 v1, 8, v1
|
|
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
|
|
; GFX11-NEXT: v_or_b32_e32 v0, v0, v1
|
|
; GFX11-NEXT: s_setpc_b64 s[30:31]
|
|
%lhs = bitcast i16 %lhs.arg to <2 x i8>
|
|
%rhs = bitcast i16 %rhs.arg to <2 x i8>
|
|
%amt = bitcast i16 %amt.arg to <2 x i8>
|
|
%result = call <2 x i8> @llvm.fshr.v2i8(<2 x i8> %lhs, <2 x i8> %rhs, <2 x i8> %amt)
|
|
%cast.result = bitcast <2 x i8> %result to i16
|
|
ret i16 %cast.result
|
|
}
|
|
|
|
define amdgpu_ps i32 @s_fshr_v4i8(i32 inreg %lhs.arg, i32 inreg %rhs.arg, i32 inreg %amt.arg) {
|
|
; GFX6-LABEL: s_fshr_v4i8:
|
|
; GFX6: ; %bb.0:
|
|
; GFX6-NEXT: s_lshr_b32 s3, s0, 8
|
|
; GFX6-NEXT: s_lshr_b32 s4, s0, 16
|
|
; GFX6-NEXT: s_lshr_b32 s5, s0, 24
|
|
; GFX6-NEXT: s_lshr_b32 s7, s2, 8
|
|
; GFX6-NEXT: s_lshr_b32 s8, s2, 16
|
|
; GFX6-NEXT: s_lshr_b32 s9, s2, 24
|
|
; GFX6-NEXT: s_and_b32 s10, s2, 7
|
|
; GFX6-NEXT: s_andn2_b32 s2, 7, s2
|
|
; GFX6-NEXT: s_lshl_b32 s0, s0, 1
|
|
; GFX6-NEXT: s_lshl_b32 s0, s0, s2
|
|
; GFX6-NEXT: s_and_b32 s2, s1, 0xff
|
|
; GFX6-NEXT: s_lshr_b32 s2, s2, s10
|
|
; GFX6-NEXT: s_or_b32 s0, s0, s2
|
|
; GFX6-NEXT: s_and_b32 s2, s7, 7
|
|
; GFX6-NEXT: s_andn2_b32 s7, 7, s7
|
|
; GFX6-NEXT: s_lshl_b32 s3, s3, 1
|
|
; GFX6-NEXT: s_lshl_b32 s3, s3, s7
|
|
; GFX6-NEXT: s_bfe_u32 s7, s1, 0x80008
|
|
; GFX6-NEXT: s_lshr_b32 s2, s7, s2
|
|
; GFX6-NEXT: s_lshr_b32 s6, s1, 24
|
|
; GFX6-NEXT: s_or_b32 s2, s3, s2
|
|
; GFX6-NEXT: s_and_b32 s3, s8, 7
|
|
; GFX6-NEXT: s_andn2_b32 s7, 7, s8
|
|
; GFX6-NEXT: s_lshl_b32 s4, s4, 1
|
|
; GFX6-NEXT: s_bfe_u32 s1, s1, 0x80010
|
|
; GFX6-NEXT: s_lshl_b32 s4, s4, s7
|
|
; GFX6-NEXT: s_lshr_b32 s1, s1, s3
|
|
; GFX6-NEXT: s_or_b32 s1, s4, s1
|
|
; GFX6-NEXT: s_and_b32 s3, s9, 7
|
|
; GFX6-NEXT: s_andn2_b32 s4, 7, s9
|
|
; GFX6-NEXT: s_lshl_b32 s5, s5, 1
|
|
; GFX6-NEXT: s_and_b32 s2, s2, 0xff
|
|
; GFX6-NEXT: s_lshl_b32 s4, s5, s4
|
|
; GFX6-NEXT: s_lshr_b32 s3, s6, s3
|
|
; GFX6-NEXT: s_and_b32 s0, s0, 0xff
|
|
; GFX6-NEXT: s_lshl_b32 s2, s2, 8
|
|
; GFX6-NEXT: s_and_b32 s1, s1, 0xff
|
|
; GFX6-NEXT: s_or_b32 s3, s4, s3
|
|
; GFX6-NEXT: s_or_b32 s0, s0, s2
|
|
; GFX6-NEXT: s_lshl_b32 s1, s1, 16
|
|
; GFX6-NEXT: s_or_b32 s0, s0, s1
|
|
; GFX6-NEXT: s_and_b32 s1, s3, 0xff
|
|
; GFX6-NEXT: s_lshl_b32 s1, s1, 24
|
|
; GFX6-NEXT: s_or_b32 s0, s0, s1
|
|
; GFX6-NEXT: ; return to shader part epilog
|
|
;
|
|
; GFX8-LABEL: s_fshr_v4i8:
|
|
; GFX8: ; %bb.0:
|
|
; GFX8-NEXT: s_lshr_b32 s6, s1, 8
|
|
; GFX8-NEXT: s_lshr_b32 s7, s1, 16
|
|
; GFX8-NEXT: s_lshr_b32 s8, s1, 24
|
|
; GFX8-NEXT: s_lshr_b32 s9, s2, 8
|
|
; GFX8-NEXT: s_lshr_b32 s10, s2, 16
|
|
; GFX8-NEXT: s_lshr_b32 s11, s2, 24
|
|
; GFX8-NEXT: s_andn2_b32 s12, 7, s2
|
|
; GFX8-NEXT: s_and_b32 s2, s2, 7
|
|
; GFX8-NEXT: s_and_b32 s1, s1, 0xff
|
|
; GFX8-NEXT: s_lshr_b32 s3, s0, 8
|
|
; GFX8-NEXT: s_lshr_b32 s4, s0, 16
|
|
; GFX8-NEXT: s_lshr_b32 s5, s0, 24
|
|
; GFX8-NEXT: s_lshl_b32 s0, s0, 1
|
|
; GFX8-NEXT: s_and_b32 s12, 0xffff, s12
|
|
; GFX8-NEXT: s_and_b32 s1, 0xffff, s1
|
|
; GFX8-NEXT: s_and_b32 s2, 0xffff, s2
|
|
; GFX8-NEXT: s_lshl_b32 s0, s0, s12
|
|
; GFX8-NEXT: s_lshr_b32 s1, s1, s2
|
|
; GFX8-NEXT: s_andn2_b32 s2, 7, s9
|
|
; GFX8-NEXT: s_or_b32 s0, s0, s1
|
|
; GFX8-NEXT: s_lshl_b32 s1, s3, 1
|
|
; GFX8-NEXT: s_and_b32 s2, 0xffff, s2
|
|
; GFX8-NEXT: s_lshl_b32 s1, s1, s2
|
|
; GFX8-NEXT: s_and_b32 s2, s9, 7
|
|
; GFX8-NEXT: s_and_b32 s3, s6, 0xff
|
|
; GFX8-NEXT: s_and_b32 s3, 0xffff, s3
|
|
; GFX8-NEXT: s_and_b32 s2, 0xffff, s2
|
|
; GFX8-NEXT: s_lshr_b32 s2, s3, s2
|
|
; GFX8-NEXT: s_andn2_b32 s3, 7, s10
|
|
; GFX8-NEXT: s_or_b32 s1, s1, s2
|
|
; GFX8-NEXT: s_lshl_b32 s2, s4, 1
|
|
; GFX8-NEXT: s_and_b32 s3, 0xffff, s3
|
|
; GFX8-NEXT: s_lshl_b32 s2, s2, s3
|
|
; GFX8-NEXT: s_and_b32 s3, s10, 7
|
|
; GFX8-NEXT: s_and_b32 s4, s7, 0xff
|
|
; GFX8-NEXT: s_and_b32 s4, 0xffff, s4
|
|
; GFX8-NEXT: s_and_b32 s3, 0xffff, s3
|
|
; GFX8-NEXT: s_lshr_b32 s3, s4, s3
|
|
; GFX8-NEXT: s_andn2_b32 s4, 7, s11
|
|
; GFX8-NEXT: s_or_b32 s2, s2, s3
|
|
; GFX8-NEXT: s_lshl_b32 s3, s5, 1
|
|
; GFX8-NEXT: s_and_b32 s4, 0xffff, s4
|
|
; GFX8-NEXT: s_lshl_b32 s3, s3, s4
|
|
; GFX8-NEXT: s_and_b32 s4, s11, 7
|
|
; GFX8-NEXT: s_and_b32 s1, s1, 0xff
|
|
; GFX8-NEXT: s_and_b32 s4, 0xffff, s4
|
|
; GFX8-NEXT: s_and_b32 s0, s0, 0xff
|
|
; GFX8-NEXT: s_lshl_b32 s1, s1, 8
|
|
; GFX8-NEXT: s_lshr_b32 s4, s8, s4
|
|
; GFX8-NEXT: s_or_b32 s0, s0, s1
|
|
; GFX8-NEXT: s_and_b32 s1, s2, 0xff
|
|
; GFX8-NEXT: s_or_b32 s3, s3, s4
|
|
; GFX8-NEXT: s_lshl_b32 s1, s1, 16
|
|
; GFX8-NEXT: s_or_b32 s0, s0, s1
|
|
; GFX8-NEXT: s_and_b32 s1, s3, 0xff
|
|
; GFX8-NEXT: s_lshl_b32 s1, s1, 24
|
|
; GFX8-NEXT: s_or_b32 s0, s0, s1
|
|
; GFX8-NEXT: ; return to shader part epilog
|
|
;
|
|
; GFX9-LABEL: s_fshr_v4i8:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_lshr_b32 s6, s1, 8
|
|
; GFX9-NEXT: s_lshr_b32 s7, s1, 16
|
|
; GFX9-NEXT: s_lshr_b32 s8, s1, 24
|
|
; GFX9-NEXT: s_lshr_b32 s9, s2, 8
|
|
; GFX9-NEXT: s_lshr_b32 s10, s2, 16
|
|
; GFX9-NEXT: s_lshr_b32 s11, s2, 24
|
|
; GFX9-NEXT: s_andn2_b32 s12, 7, s2
|
|
; GFX9-NEXT: s_and_b32 s2, s2, 7
|
|
; GFX9-NEXT: s_and_b32 s1, s1, 0xff
|
|
; GFX9-NEXT: s_lshr_b32 s3, s0, 8
|
|
; GFX9-NEXT: s_lshr_b32 s4, s0, 16
|
|
; GFX9-NEXT: s_lshr_b32 s5, s0, 24
|
|
; GFX9-NEXT: s_lshl_b32 s0, s0, 1
|
|
; GFX9-NEXT: s_and_b32 s12, 0xffff, s12
|
|
; GFX9-NEXT: s_and_b32 s1, 0xffff, s1
|
|
; GFX9-NEXT: s_and_b32 s2, 0xffff, s2
|
|
; GFX9-NEXT: s_lshl_b32 s0, s0, s12
|
|
; GFX9-NEXT: s_lshr_b32 s1, s1, s2
|
|
; GFX9-NEXT: s_andn2_b32 s2, 7, s9
|
|
; GFX9-NEXT: s_or_b32 s0, s0, s1
|
|
; GFX9-NEXT: s_lshl_b32 s1, s3, 1
|
|
; GFX9-NEXT: s_and_b32 s2, 0xffff, s2
|
|
; GFX9-NEXT: s_lshl_b32 s1, s1, s2
|
|
; GFX9-NEXT: s_and_b32 s2, s9, 7
|
|
; GFX9-NEXT: s_and_b32 s3, s6, 0xff
|
|
; GFX9-NEXT: s_and_b32 s3, 0xffff, s3
|
|
; GFX9-NEXT: s_and_b32 s2, 0xffff, s2
|
|
; GFX9-NEXT: s_lshr_b32 s2, s3, s2
|
|
; GFX9-NEXT: s_andn2_b32 s3, 7, s10
|
|
; GFX9-NEXT: s_or_b32 s1, s1, s2
|
|
; GFX9-NEXT: s_lshl_b32 s2, s4, 1
|
|
; GFX9-NEXT: s_and_b32 s3, 0xffff, s3
|
|
; GFX9-NEXT: s_lshl_b32 s2, s2, s3
|
|
; GFX9-NEXT: s_and_b32 s3, s10, 7
|
|
; GFX9-NEXT: s_and_b32 s4, s7, 0xff
|
|
; GFX9-NEXT: s_and_b32 s4, 0xffff, s4
|
|
; GFX9-NEXT: s_and_b32 s3, 0xffff, s3
|
|
; GFX9-NEXT: s_lshr_b32 s3, s4, s3
|
|
; GFX9-NEXT: s_andn2_b32 s4, 7, s11
|
|
; GFX9-NEXT: s_or_b32 s2, s2, s3
|
|
; GFX9-NEXT: s_lshl_b32 s3, s5, 1
|
|
; GFX9-NEXT: s_and_b32 s4, 0xffff, s4
|
|
; GFX9-NEXT: s_lshl_b32 s3, s3, s4
|
|
; GFX9-NEXT: s_and_b32 s4, s11, 7
|
|
; GFX9-NEXT: s_and_b32 s1, s1, 0xff
|
|
; GFX9-NEXT: s_and_b32 s4, 0xffff, s4
|
|
; GFX9-NEXT: s_and_b32 s0, s0, 0xff
|
|
; GFX9-NEXT: s_lshl_b32 s1, s1, 8
|
|
; GFX9-NEXT: s_lshr_b32 s4, s8, s4
|
|
; GFX9-NEXT: s_or_b32 s0, s0, s1
|
|
; GFX9-NEXT: s_and_b32 s1, s2, 0xff
|
|
; GFX9-NEXT: s_or_b32 s3, s3, s4
|
|
; GFX9-NEXT: s_lshl_b32 s1, s1, 16
|
|
; GFX9-NEXT: s_or_b32 s0, s0, s1
|
|
; GFX9-NEXT: s_and_b32 s1, s3, 0xff
|
|
; GFX9-NEXT: s_lshl_b32 s1, s1, 24
|
|
; GFX9-NEXT: s_or_b32 s0, s0, s1
|
|
; GFX9-NEXT: ; return to shader part epilog
|
|
;
|
|
; GFX10-LABEL: s_fshr_v4i8:
|
|
; GFX10: ; %bb.0:
|
|
; GFX10-NEXT: s_lshr_b32 s6, s1, 8
|
|
; GFX10-NEXT: s_lshr_b32 s7, s1, 16
|
|
; GFX10-NEXT: s_lshr_b32 s8, s1, 24
|
|
; GFX10-NEXT: s_lshr_b32 s9, s2, 8
|
|
; GFX10-NEXT: s_lshr_b32 s10, s2, 16
|
|
; GFX10-NEXT: s_lshr_b32 s11, s2, 24
|
|
; GFX10-NEXT: s_andn2_b32 s12, 7, s2
|
|
; GFX10-NEXT: s_and_b32 s1, s1, 0xff
|
|
; GFX10-NEXT: s_and_b32 s2, s2, 7
|
|
; GFX10-NEXT: s_and_b32 s1, 0xffff, s1
|
|
; GFX10-NEXT: s_and_b32 s2, 0xffff, s2
|
|
; GFX10-NEXT: s_lshr_b32 s3, s0, 8
|
|
; GFX10-NEXT: s_lshr_b32 s1, s1, s2
|
|
; GFX10-NEXT: s_andn2_b32 s2, 7, s9
|
|
; GFX10-NEXT: s_and_b32 s6, s6, 0xff
|
|
; GFX10-NEXT: s_and_b32 s9, s9, 7
|
|
; GFX10-NEXT: s_lshr_b32 s4, s0, 16
|
|
; GFX10-NEXT: s_lshr_b32 s5, s0, 24
|
|
; GFX10-NEXT: s_lshl_b32 s0, s0, 1
|
|
; GFX10-NEXT: s_and_b32 s12, 0xffff, s12
|
|
; GFX10-NEXT: s_lshl_b32 s3, s3, 1
|
|
; GFX10-NEXT: s_and_b32 s2, 0xffff, s2
|
|
; GFX10-NEXT: s_and_b32 s6, 0xffff, s6
|
|
; GFX10-NEXT: s_and_b32 s9, 0xffff, s9
|
|
; GFX10-NEXT: s_lshl_b32 s0, s0, s12
|
|
; GFX10-NEXT: s_lshl_b32 s2, s3, s2
|
|
; GFX10-NEXT: s_lshr_b32 s3, s6, s9
|
|
; GFX10-NEXT: s_or_b32 s0, s0, s1
|
|
; GFX10-NEXT: s_or_b32 s1, s2, s3
|
|
; GFX10-NEXT: s_andn2_b32 s2, 7, s10
|
|
; GFX10-NEXT: s_lshl_b32 s3, s4, 1
|
|
; GFX10-NEXT: s_and_b32 s4, s7, 0xff
|
|
; GFX10-NEXT: s_and_b32 s6, s10, 7
|
|
; GFX10-NEXT: s_and_b32 s2, 0xffff, s2
|
|
; GFX10-NEXT: s_and_b32 s4, 0xffff, s4
|
|
; GFX10-NEXT: s_and_b32 s6, 0xffff, s6
|
|
; GFX10-NEXT: s_lshl_b32 s2, s3, s2
|
|
; GFX10-NEXT: s_lshr_b32 s3, s4, s6
|
|
; GFX10-NEXT: s_lshl_b32 s4, s5, 1
|
|
; GFX10-NEXT: s_andn2_b32 s5, 7, s11
|
|
; GFX10-NEXT: s_and_b32 s6, s11, 7
|
|
; GFX10-NEXT: s_and_b32 s5, 0xffff, s5
|
|
; GFX10-NEXT: s_and_b32 s6, 0xffff, s6
|
|
; GFX10-NEXT: s_lshl_b32 s4, s4, s5
|
|
; GFX10-NEXT: s_lshr_b32 s5, s8, s6
|
|
; GFX10-NEXT: s_or_b32 s2, s2, s3
|
|
; GFX10-NEXT: s_and_b32 s1, s1, 0xff
|
|
; GFX10-NEXT: s_or_b32 s3, s4, s5
|
|
; GFX10-NEXT: s_and_b32 s0, s0, 0xff
|
|
; GFX10-NEXT: s_lshl_b32 s1, s1, 8
|
|
; GFX10-NEXT: s_and_b32 s2, s2, 0xff
|
|
; GFX10-NEXT: s_or_b32 s0, s0, s1
|
|
; GFX10-NEXT: s_lshl_b32 s1, s2, 16
|
|
; GFX10-NEXT: s_and_b32 s2, s3, 0xff
|
|
; GFX10-NEXT: s_or_b32 s0, s0, s1
|
|
; GFX10-NEXT: s_lshl_b32 s1, s2, 24
|
|
; GFX10-NEXT: s_or_b32 s0, s0, s1
|
|
; GFX10-NEXT: ; return to shader part epilog
|
|
;
|
|
; GFX11-LABEL: s_fshr_v4i8:
|
|
; GFX11: ; %bb.0:
|
|
; GFX11-NEXT: s_lshr_b32 s6, s1, 8
|
|
; GFX11-NEXT: s_lshr_b32 s7, s1, 16
|
|
; GFX11-NEXT: s_lshr_b32 s8, s1, 24
|
|
; GFX11-NEXT: s_lshr_b32 s9, s2, 8
|
|
; GFX11-NEXT: s_lshr_b32 s10, s2, 16
|
|
; GFX11-NEXT: s_lshr_b32 s11, s2, 24
|
|
; GFX11-NEXT: s_and_not1_b32 s12, 7, s2
|
|
; GFX11-NEXT: s_and_b32 s1, s1, 0xff
|
|
; GFX11-NEXT: s_and_b32 s2, s2, 7
|
|
; GFX11-NEXT: s_and_b32 s1, 0xffff, s1
|
|
; GFX11-NEXT: s_and_b32 s2, 0xffff, s2
|
|
; GFX11-NEXT: s_lshr_b32 s3, s0, 8
|
|
; GFX11-NEXT: s_lshr_b32 s1, s1, s2
|
|
; GFX11-NEXT: s_and_not1_b32 s2, 7, s9
|
|
; GFX11-NEXT: s_and_b32 s6, s6, 0xff
|
|
; GFX11-NEXT: s_and_b32 s9, s9, 7
|
|
; GFX11-NEXT: s_lshr_b32 s4, s0, 16
|
|
; GFX11-NEXT: s_lshr_b32 s5, s0, 24
|
|
; GFX11-NEXT: s_lshl_b32 s0, s0, 1
|
|
; GFX11-NEXT: s_and_b32 s12, 0xffff, s12
|
|
; GFX11-NEXT: s_lshl_b32 s3, s3, 1
|
|
; GFX11-NEXT: s_and_b32 s2, 0xffff, s2
|
|
; GFX11-NEXT: s_and_b32 s6, 0xffff, s6
|
|
; GFX11-NEXT: s_and_b32 s9, 0xffff, s9
|
|
; GFX11-NEXT: s_lshl_b32 s0, s0, s12
|
|
; GFX11-NEXT: s_lshl_b32 s2, s3, s2
|
|
; GFX11-NEXT: s_lshr_b32 s3, s6, s9
|
|
; GFX11-NEXT: s_or_b32 s0, s0, s1
|
|
; GFX11-NEXT: s_or_b32 s1, s2, s3
|
|
; GFX11-NEXT: s_and_not1_b32 s2, 7, s10
|
|
; GFX11-NEXT: s_lshl_b32 s3, s4, 1
|
|
; GFX11-NEXT: s_and_b32 s4, s7, 0xff
|
|
; GFX11-NEXT: s_and_b32 s6, s10, 7
|
|
; GFX11-NEXT: s_and_b32 s2, 0xffff, s2
|
|
; GFX11-NEXT: s_and_b32 s4, 0xffff, s4
|
|
; GFX11-NEXT: s_and_b32 s6, 0xffff, s6
|
|
; GFX11-NEXT: s_lshl_b32 s2, s3, s2
|
|
; GFX11-NEXT: s_lshr_b32 s3, s4, s6
|
|
; GFX11-NEXT: s_lshl_b32 s4, s5, 1
|
|
; GFX11-NEXT: s_and_not1_b32 s5, 7, s11
|
|
; GFX11-NEXT: s_and_b32 s6, s11, 7
|
|
; GFX11-NEXT: s_and_b32 s5, 0xffff, s5
|
|
; GFX11-NEXT: s_and_b32 s6, 0xffff, s6
|
|
; GFX11-NEXT: s_lshl_b32 s4, s4, s5
|
|
; GFX11-NEXT: s_lshr_b32 s5, s8, s6
|
|
; GFX11-NEXT: s_or_b32 s2, s2, s3
|
|
; GFX11-NEXT: s_and_b32 s1, s1, 0xff
|
|
; GFX11-NEXT: s_or_b32 s3, s4, s5
|
|
; GFX11-NEXT: s_and_b32 s0, s0, 0xff
|
|
; GFX11-NEXT: s_lshl_b32 s1, s1, 8
|
|
; GFX11-NEXT: s_and_b32 s2, s2, 0xff
|
|
; GFX11-NEXT: s_or_b32 s0, s0, s1
|
|
; GFX11-NEXT: s_lshl_b32 s1, s2, 16
|
|
; GFX11-NEXT: s_and_b32 s2, s3, 0xff
|
|
; GFX11-NEXT: s_or_b32 s0, s0, s1
|
|
; GFX11-NEXT: s_lshl_b32 s1, s2, 24
|
|
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
|
|
; GFX11-NEXT: s_or_b32 s0, s0, s1
|
|
; GFX11-NEXT: ; return to shader part epilog
|
|
%lhs = bitcast i32 %lhs.arg to <4 x i8>
|
|
%rhs = bitcast i32 %rhs.arg to <4 x i8>
|
|
%amt = bitcast i32 %amt.arg to <4 x i8>
|
|
%result = call <4 x i8> @llvm.fshr.v4i8(<4 x i8> %lhs, <4 x i8> %rhs, <4 x i8> %amt)
|
|
%cast.result = bitcast <4 x i8> %result to i32
|
|
ret i32 %cast.result
|
|
}
|
|
|
|
define i32 @v_fshr_v4i8(i32 %lhs.arg, i32 %rhs.arg, i32 %amt.arg) {
|
|
; GFX6-LABEL: v_fshr_v4i8:
|
|
; GFX6: ; %bb.0:
|
|
; GFX6-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX6-NEXT: v_lshrrev_b32_e32 v7, 8, v2
|
|
; GFX6-NEXT: v_lshrrev_b32_e32 v8, 16, v2
|
|
; GFX6-NEXT: v_lshrrev_b32_e32 v9, 24, v2
|
|
; GFX6-NEXT: v_and_b32_e32 v10, 7, v2
|
|
; GFX6-NEXT: v_not_b32_e32 v2, v2
|
|
; GFX6-NEXT: v_lshrrev_b32_e32 v3, 8, v0
|
|
; GFX6-NEXT: v_lshrrev_b32_e32 v4, 16, v0
|
|
; GFX6-NEXT: v_lshrrev_b32_e32 v5, 24, v0
|
|
; GFX6-NEXT: v_and_b32_e32 v2, 7, v2
|
|
; GFX6-NEXT: v_lshlrev_b32_e32 v0, 1, v0
|
|
; GFX6-NEXT: v_lshlrev_b32_e32 v0, v2, v0
|
|
; GFX6-NEXT: v_and_b32_e32 v2, 0xff, v1
|
|
; GFX6-NEXT: v_lshrrev_b32_e32 v2, v10, v2
|
|
; GFX6-NEXT: v_or_b32_e32 v0, v0, v2
|
|
; GFX6-NEXT: v_and_b32_e32 v2, 7, v7
|
|
; GFX6-NEXT: v_not_b32_e32 v7, v7
|
|
; GFX6-NEXT: v_and_b32_e32 v7, 7, v7
|
|
; GFX6-NEXT: v_lshlrev_b32_e32 v3, 1, v3
|
|
; GFX6-NEXT: v_lshlrev_b32_e32 v3, v7, v3
|
|
; GFX6-NEXT: v_bfe_u32 v7, v1, 8, 8
|
|
; GFX6-NEXT: v_lshrrev_b32_e32 v2, v2, v7
|
|
; GFX6-NEXT: v_not_b32_e32 v7, v8
|
|
; GFX6-NEXT: v_lshrrev_b32_e32 v6, 24, v1
|
|
; GFX6-NEXT: v_or_b32_e32 v2, v3, v2
|
|
; GFX6-NEXT: v_and_b32_e32 v3, 7, v8
|
|
; GFX6-NEXT: v_and_b32_e32 v7, 7, v7
|
|
; GFX6-NEXT: v_lshlrev_b32_e32 v4, 1, v4
|
|
; GFX6-NEXT: v_bfe_u32 v1, v1, 16, 8
|
|
; GFX6-NEXT: v_lshlrev_b32_e32 v4, v7, v4
|
|
; GFX6-NEXT: v_lshrrev_b32_e32 v1, v3, v1
|
|
; GFX6-NEXT: v_or_b32_e32 v1, v4, v1
|
|
; GFX6-NEXT: v_not_b32_e32 v4, v9
|
|
; GFX6-NEXT: v_and_b32_e32 v3, 7, v9
|
|
; GFX6-NEXT: v_and_b32_e32 v4, 7, v4
|
|
; GFX6-NEXT: v_lshlrev_b32_e32 v5, 1, v5
|
|
; GFX6-NEXT: v_and_b32_e32 v2, 0xff, v2
|
|
; GFX6-NEXT: v_lshlrev_b32_e32 v4, v4, v5
|
|
; GFX6-NEXT: v_lshrrev_b32_e32 v3, v3, v6
|
|
; GFX6-NEXT: v_and_b32_e32 v0, 0xff, v0
|
|
; GFX6-NEXT: v_lshlrev_b32_e32 v2, 8, v2
|
|
; GFX6-NEXT: v_and_b32_e32 v1, 0xff, v1
|
|
; GFX6-NEXT: v_or_b32_e32 v3, v4, v3
|
|
; GFX6-NEXT: v_or_b32_e32 v0, v0, v2
|
|
; GFX6-NEXT: v_lshlrev_b32_e32 v1, 16, v1
|
|
; GFX6-NEXT: v_or_b32_e32 v0, v0, v1
|
|
; GFX6-NEXT: v_and_b32_e32 v1, 0xff, v3
|
|
; GFX6-NEXT: v_lshlrev_b32_e32 v1, 24, v1
|
|
; GFX6-NEXT: v_or_b32_e32 v0, v0, v1
|
|
; GFX6-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX8-LABEL: v_fshr_v4i8:
|
|
; GFX8: ; %bb.0:
|
|
; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX8-NEXT: v_xor_b32_e32 v7, -1, v2
|
|
; GFX8-NEXT: v_lshlrev_b16_e32 v6, 1, v0
|
|
; GFX8-NEXT: v_and_b32_e32 v7, 7, v7
|
|
; GFX8-NEXT: v_lshlrev_b16_e32 v6, v7, v6
|
|
; GFX8-NEXT: v_and_b32_e32 v7, 7, v2
|
|
; GFX8-NEXT: v_lshrrev_b32_e32 v5, 8, v2
|
|
; GFX8-NEXT: v_lshrrev_b16_sdwa v7, v7, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
|
|
; GFX8-NEXT: v_lshrrev_b32_e32 v3, 8, v0
|
|
; GFX8-NEXT: v_or_b32_e32 v6, v6, v7
|
|
; GFX8-NEXT: v_xor_b32_e32 v7, -1, v5
|
|
; GFX8-NEXT: v_lshrrev_b32_e32 v4, 8, v1
|
|
; GFX8-NEXT: v_lshlrev_b16_e32 v3, 1, v3
|
|
; GFX8-NEXT: v_and_b32_e32 v7, 7, v7
|
|
; GFX8-NEXT: v_and_b32_e32 v5, 7, v5
|
|
; GFX8-NEXT: v_lshlrev_b16_e32 v3, v7, v3
|
|
; GFX8-NEXT: v_lshrrev_b16_sdwa v4, v5, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
|
|
; GFX8-NEXT: v_mov_b32_e32 v7, -1
|
|
; GFX8-NEXT: v_or_b32_e32 v3, v3, v4
|
|
; GFX8-NEXT: v_mov_b32_e32 v4, 1
|
|
; GFX8-NEXT: v_xor_b32_sdwa v9, v2, v7 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
|
|
; GFX8-NEXT: v_lshlrev_b16_sdwa v5, v4, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
|
|
; GFX8-NEXT: v_and_b32_e32 v9, 7, v9
|
|
; GFX8-NEXT: v_mov_b32_e32 v8, 0xff
|
|
; GFX8-NEXT: v_lshlrev_b16_e32 v5, v9, v5
|
|
; GFX8-NEXT: v_mov_b32_e32 v9, 7
|
|
; GFX8-NEXT: v_lshlrev_b16_sdwa v0, v4, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_3
|
|
; GFX8-NEXT: v_xor_b32_sdwa v4, v2, v7 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_3 src1_sel:DWORD
|
|
; GFX8-NEXT: v_and_b32_sdwa v10, v2, v9 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
|
|
; GFX8-NEXT: v_and_b32_sdwa v8, v1, v8 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
|
|
; GFX8-NEXT: v_and_b32_e32 v4, 7, v4
|
|
; GFX8-NEXT: v_and_b32_sdwa v2, v2, v9 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_3 src1_sel:DWORD
|
|
; GFX8-NEXT: v_lshrrev_b16_e32 v8, v10, v8
|
|
; GFX8-NEXT: v_lshlrev_b16_e32 v0, v4, v0
|
|
; GFX8-NEXT: v_lshrrev_b16_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_3
|
|
; GFX8-NEXT: v_or_b32_e32 v5, v5, v8
|
|
; GFX8-NEXT: v_or_b32_e32 v0, v0, v1
|
|
; GFX8-NEXT: v_mov_b32_e32 v1, 8
|
|
; GFX8-NEXT: v_lshlrev_b32_sdwa v1, v1, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
|
|
; GFX8-NEXT: v_and_b32_e32 v2, 0xff, v5
|
|
; GFX8-NEXT: v_or_b32_sdwa v1, v6, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
|
|
; GFX8-NEXT: v_lshlrev_b32_e32 v2, 16, v2
|
|
; GFX8-NEXT: v_and_b32_e32 v0, 0xff, v0
|
|
; GFX8-NEXT: v_or_b32_e32 v1, v1, v2
|
|
; GFX8-NEXT: v_lshlrev_b32_e32 v0, 24, v0
|
|
; GFX8-NEXT: v_or_b32_e32 v0, v1, v0
|
|
; GFX8-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX9-LABEL: v_fshr_v4i8:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: v_xor_b32_e32 v7, -1, v2
|
|
; GFX9-NEXT: v_lshlrev_b16_e32 v6, 1, v0
|
|
; GFX9-NEXT: v_and_b32_e32 v7, 7, v7
|
|
; GFX9-NEXT: v_lshlrev_b16_e32 v6, v7, v6
|
|
; GFX9-NEXT: v_and_b32_e32 v7, 7, v2
|
|
; GFX9-NEXT: v_lshrrev_b32_e32 v5, 8, v2
|
|
; GFX9-NEXT: v_lshrrev_b16_sdwa v7, v7, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
|
|
; GFX9-NEXT: v_lshrrev_b32_e32 v3, 8, v0
|
|
; GFX9-NEXT: v_or_b32_e32 v6, v6, v7
|
|
; GFX9-NEXT: v_xor_b32_e32 v7, -1, v5
|
|
; GFX9-NEXT: v_lshrrev_b32_e32 v4, 8, v1
|
|
; GFX9-NEXT: v_lshlrev_b16_e32 v3, 1, v3
|
|
; GFX9-NEXT: v_and_b32_e32 v7, 7, v7
|
|
; GFX9-NEXT: v_and_b32_e32 v5, 7, v5
|
|
; GFX9-NEXT: v_lshlrev_b16_e32 v3, v7, v3
|
|
; GFX9-NEXT: v_lshrrev_b16_sdwa v4, v5, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
|
|
; GFX9-NEXT: v_mov_b32_e32 v7, -1
|
|
; GFX9-NEXT: v_or_b32_e32 v3, v3, v4
|
|
; GFX9-NEXT: v_mov_b32_e32 v4, 1
|
|
; GFX9-NEXT: v_xor_b32_sdwa v9, v2, v7 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
|
|
; GFX9-NEXT: v_lshlrev_b16_sdwa v5, v4, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
|
|
; GFX9-NEXT: v_and_b32_e32 v9, 7, v9
|
|
; GFX9-NEXT: v_mov_b32_e32 v8, 0xff
|
|
; GFX9-NEXT: v_lshlrev_b16_e32 v5, v9, v5
|
|
; GFX9-NEXT: v_mov_b32_e32 v9, 7
|
|
; GFX9-NEXT: v_lshlrev_b16_sdwa v0, v4, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_3
|
|
; GFX9-NEXT: v_xor_b32_sdwa v4, v2, v7 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_3 src1_sel:DWORD
|
|
; GFX9-NEXT: v_and_b32_sdwa v10, v2, v9 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
|
|
; GFX9-NEXT: v_and_b32_sdwa v11, v1, v8 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
|
|
; GFX9-NEXT: v_and_b32_e32 v4, 7, v4
|
|
; GFX9-NEXT: v_and_b32_sdwa v2, v2, v9 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_3 src1_sel:DWORD
|
|
; GFX9-NEXT: v_lshrrev_b16_e32 v10, v10, v11
|
|
; GFX9-NEXT: v_lshlrev_b16_e32 v0, v4, v0
|
|
; GFX9-NEXT: v_lshrrev_b16_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_3
|
|
; GFX9-NEXT: v_or_b32_e32 v5, v5, v10
|
|
; GFX9-NEXT: v_or_b32_e32 v0, v0, v1
|
|
; GFX9-NEXT: v_mov_b32_e32 v1, 8
|
|
; GFX9-NEXT: v_lshlrev_b32_sdwa v1, v1, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
|
|
; GFX9-NEXT: v_and_b32_e32 v2, 0xff, v5
|
|
; GFX9-NEXT: v_and_b32_e32 v0, 0xff, v0
|
|
; GFX9-NEXT: v_and_or_b32 v1, v6, v8, v1
|
|
; GFX9-NEXT: v_lshlrev_b32_e32 v2, 16, v2
|
|
; GFX9-NEXT: v_lshlrev_b32_e32 v0, 24, v0
|
|
; GFX9-NEXT: v_or3_b32 v0, v1, v2, v0
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX10-LABEL: v_fshr_v4i8:
|
|
; GFX10: ; %bb.0:
|
|
; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX10-NEXT: v_lshrrev_b32_e32 v5, 8, v2
|
|
; GFX10-NEXT: v_lshrrev_b32_e32 v4, 8, v0
|
|
; GFX10-NEXT: v_xor_b32_e32 v8, -1, v2
|
|
; GFX10-NEXT: v_mov_b32_e32 v3, -1
|
|
; GFX10-NEXT: v_lshrrev_b32_e32 v6, 16, v0
|
|
; GFX10-NEXT: v_xor_b32_e32 v10, -1, v5
|
|
; GFX10-NEXT: v_lshrrev_b32_e32 v7, 24, v0
|
|
; GFX10-NEXT: v_lshrrev_b32_e32 v9, 8, v1
|
|
; GFX10-NEXT: v_lshlrev_b16 v0, 1, v0
|
|
; GFX10-NEXT: v_and_b32_e32 v8, 7, v8
|
|
; GFX10-NEXT: v_lshlrev_b16 v4, 1, v4
|
|
; GFX10-NEXT: v_and_b32_e32 v10, 7, v10
|
|
; GFX10-NEXT: v_mov_b32_e32 v14, 0xff
|
|
; GFX10-NEXT: v_lshrrev_b32_e32 v11, 24, v1
|
|
; GFX10-NEXT: v_lshlrev_b16 v0, v8, v0
|
|
; GFX10-NEXT: v_and_b32_e32 v8, 0xff, v9
|
|
; GFX10-NEXT: v_lshlrev_b16 v4, v10, v4
|
|
; GFX10-NEXT: v_xor_b32_sdwa v9, v2, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
|
|
; GFX10-NEXT: v_mov_b32_e32 v10, 7
|
|
; GFX10-NEXT: v_xor_b32_sdwa v3, v2, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_3 src1_sel:DWORD
|
|
; GFX10-NEXT: v_and_b32_e32 v12, 7, v2
|
|
; GFX10-NEXT: v_and_b32_e32 v13, 0xff, v1
|
|
; GFX10-NEXT: v_and_b32_e32 v5, 7, v5
|
|
; GFX10-NEXT: v_lshlrev_b16 v6, 1, v6
|
|
; GFX10-NEXT: v_and_b32_e32 v9, 7, v9
|
|
; GFX10-NEXT: v_and_b32_sdwa v15, v2, v10 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
|
|
; GFX10-NEXT: v_and_b32_sdwa v1, v1, v14 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
|
|
; GFX10-NEXT: v_lshlrev_b16 v7, 1, v7
|
|
; GFX10-NEXT: v_and_b32_e32 v3, 7, v3
|
|
; GFX10-NEXT: v_and_b32_sdwa v2, v2, v10 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_3 src1_sel:DWORD
|
|
; GFX10-NEXT: v_lshrrev_b16 v5, v5, v8
|
|
; GFX10-NEXT: v_lshlrev_b16 v6, v9, v6
|
|
; GFX10-NEXT: v_lshrrev_b16 v1, v15, v1
|
|
; GFX10-NEXT: v_lshlrev_b16 v3, v3, v7
|
|
; GFX10-NEXT: v_lshrrev_b16 v2, v2, v11
|
|
; GFX10-NEXT: v_lshrrev_b16 v7, v12, v13
|
|
; GFX10-NEXT: v_or_b32_e32 v4, v4, v5
|
|
; GFX10-NEXT: v_mov_b32_e32 v5, 8
|
|
; GFX10-NEXT: v_or_b32_e32 v1, v6, v1
|
|
; GFX10-NEXT: v_or_b32_e32 v2, v3, v2
|
|
; GFX10-NEXT: v_or_b32_e32 v0, v0, v7
|
|
; GFX10-NEXT: v_lshlrev_b32_sdwa v3, v5, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
|
|
; GFX10-NEXT: v_and_b32_e32 v1, 0xff, v1
|
|
; GFX10-NEXT: v_and_b32_e32 v2, 0xff, v2
|
|
; GFX10-NEXT: v_and_or_b32 v0, 0xff, v0, v3
|
|
; GFX10-NEXT: v_lshlrev_b32_e32 v1, 16, v1
|
|
; GFX10-NEXT: v_lshlrev_b32_e32 v2, 24, v2
|
|
; GFX10-NEXT: v_or3_b32 v0, v0, v1, v2
|
|
; GFX10-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX11-LABEL: v_fshr_v4i8:
|
|
; GFX11: ; %bb.0:
|
|
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX11-NEXT: v_lshrrev_b32_e32 v6, 8, v1
|
|
; GFX11-NEXT: v_lshrrev_b32_e32 v7, 8, v2
|
|
; GFX11-NEXT: v_lshrrev_b32_e32 v3, 8, v0
|
|
; GFX11-NEXT: v_lshrrev_b32_e32 v11, 16, v2
|
|
; GFX11-NEXT: v_lshrrev_b32_e32 v13, 24, v2
|
|
; GFX11-NEXT: v_and_b32_e32 v6, 0xff, v6
|
|
; GFX11-NEXT: v_xor_b32_e32 v12, -1, v7
|
|
; GFX11-NEXT: v_and_b32_e32 v7, 7, v7
|
|
; GFX11-NEXT: v_lshrrev_b32_e32 v4, 16, v0
|
|
; GFX11-NEXT: v_lshrrev_b32_e32 v5, 24, v0
|
|
; GFX11-NEXT: v_lshrrev_b32_e32 v8, 16, v1
|
|
; GFX11-NEXT: v_lshlrev_b16 v3, 1, v3
|
|
; GFX11-NEXT: v_and_b32_e32 v12, 7, v12
|
|
; GFX11-NEXT: v_xor_b32_e32 v14, -1, v11
|
|
; GFX11-NEXT: v_lshrrev_b16 v6, v7, v6
|
|
; GFX11-NEXT: v_xor_b32_e32 v7, -1, v13
|
|
; GFX11-NEXT: v_lshrrev_b32_e32 v9, 24, v1
|
|
; GFX11-NEXT: v_xor_b32_e32 v10, -1, v2
|
|
; GFX11-NEXT: v_lshlrev_b16 v3, v12, v3
|
|
; GFX11-NEXT: v_lshlrev_b16 v4, 1, v4
|
|
; GFX11-NEXT: v_and_b32_e32 v12, 7, v14
|
|
; GFX11-NEXT: v_and_b32_e32 v11, 7, v11
|
|
; GFX11-NEXT: v_and_b32_e32 v8, 0xff, v8
|
|
; GFX11-NEXT: v_lshlrev_b16 v5, 1, v5
|
|
; GFX11-NEXT: v_and_b32_e32 v7, 7, v7
|
|
; GFX11-NEXT: v_and_b32_e32 v13, 7, v13
|
|
; GFX11-NEXT: v_lshlrev_b16 v0, 1, v0
|
|
; GFX11-NEXT: v_and_b32_e32 v10, 7, v10
|
|
; GFX11-NEXT: v_and_b32_e32 v2, 7, v2
|
|
; GFX11-NEXT: v_and_b32_e32 v1, 0xff, v1
|
|
; GFX11-NEXT: v_or_b32_e32 v3, v3, v6
|
|
; GFX11-NEXT: v_lshlrev_b16 v4, v12, v4
|
|
; GFX11-NEXT: v_lshrrev_b16 v6, v11, v8
|
|
; GFX11-NEXT: v_lshlrev_b16 v5, v7, v5
|
|
; GFX11-NEXT: v_lshrrev_b16 v7, v13, v9
|
|
; GFX11-NEXT: v_lshlrev_b16 v0, v10, v0
|
|
; GFX11-NEXT: v_lshrrev_b16 v1, v2, v1
|
|
; GFX11-NEXT: v_and_b32_e32 v2, 0xff, v3
|
|
; GFX11-NEXT: v_or_b32_e32 v3, v4, v6
|
|
; GFX11-NEXT: v_or_b32_e32 v4, v5, v7
|
|
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
|
|
; GFX11-NEXT: v_or_b32_e32 v0, v0, v1
|
|
; GFX11-NEXT: v_lshlrev_b32_e32 v1, 8, v2
|
|
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
|
|
; GFX11-NEXT: v_and_b32_e32 v2, 0xff, v3
|
|
; GFX11-NEXT: v_and_b32_e32 v3, 0xff, v4
|
|
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
|
|
; GFX11-NEXT: v_and_or_b32 v0, 0xff, v0, v1
|
|
; GFX11-NEXT: v_lshlrev_b32_e32 v1, 16, v2
|
|
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
|
|
; GFX11-NEXT: v_lshlrev_b32_e32 v2, 24, v3
|
|
; GFX11-NEXT: v_or3_b32 v0, v0, v1, v2
|
|
; GFX11-NEXT: s_setpc_b64 s[30:31]
|
|
%lhs = bitcast i32 %lhs.arg to <4 x i8>
|
|
%rhs = bitcast i32 %rhs.arg to <4 x i8>
|
|
%amt = bitcast i32 %amt.arg to <4 x i8>
|
|
%result = call <4 x i8> @llvm.fshr.v4i8(<4 x i8> %lhs, <4 x i8> %rhs, <4 x i8> %amt)
|
|
%cast.result = bitcast <4 x i8> %result to i32
|
|
ret i32 %cast.result
|
|
}
|
|
|
|
define amdgpu_ps i24 @s_fshr_i24(i24 inreg %lhs, i24 inreg %rhs, i24 inreg %amt) {
|
|
; GFX6-LABEL: s_fshr_i24:
|
|
; GFX6: ; %bb.0:
|
|
; GFX6-NEXT: v_cvt_f32_ubyte0_e32 v0, 24
|
|
; GFX6-NEXT: v_rcp_iflag_f32_e32 v0, v0
|
|
; GFX6-NEXT: v_not_b32_e32 v1, 23
|
|
; GFX6-NEXT: s_and_b32 s2, s2, 0xffffff
|
|
; GFX6-NEXT: s_lshl_b32 s0, s0, 1
|
|
; GFX6-NEXT: v_mul_f32_e32 v0, 0x4f7ffffe, v0
|
|
; GFX6-NEXT: v_cvt_u32_f32_e32 v0, v0
|
|
; GFX6-NEXT: s_and_b32 s1, s1, 0xffffff
|
|
; GFX6-NEXT: v_mul_lo_u32 v2, v0, v1
|
|
; GFX6-NEXT: v_mul_hi_u32 v2, v0, v2
|
|
; GFX6-NEXT: v_add_i32_e32 v0, vcc, v0, v2
|
|
; GFX6-NEXT: v_mul_hi_u32 v0, s2, v0
|
|
; GFX6-NEXT: v_mul_lo_u32 v0, v0, 24
|
|
; GFX6-NEXT: v_sub_i32_e32 v0, vcc, s2, v0
|
|
; GFX6-NEXT: v_add_i32_e32 v2, vcc, v0, v1
|
|
; GFX6-NEXT: v_cmp_le_u32_e32 vcc, 24, v0
|
|
; GFX6-NEXT: v_cndmask_b32_e32 v0, v0, v2, vcc
|
|
; GFX6-NEXT: v_add_i32_e32 v1, vcc, v0, v1
|
|
; GFX6-NEXT: v_cmp_le_u32_e32 vcc, 24, v0
|
|
; GFX6-NEXT: v_cndmask_b32_e32 v0, v0, v1, vcc
|
|
; GFX6-NEXT: v_sub_i32_e32 v1, vcc, 23, v0
|
|
; GFX6-NEXT: v_and_b32_e32 v0, 0xffffff, v0
|
|
; GFX6-NEXT: v_and_b32_e32 v1, 0xffffff, v1
|
|
; GFX6-NEXT: v_lshl_b32_e32 v1, s0, v1
|
|
; GFX6-NEXT: v_lshr_b32_e32 v0, s1, v0
|
|
; GFX6-NEXT: v_or_b32_e32 v0, v1, v0
|
|
; GFX6-NEXT: v_readfirstlane_b32 s0, v0
|
|
; GFX6-NEXT: ; return to shader part epilog
|
|
;
|
|
; GFX8-LABEL: s_fshr_i24:
|
|
; GFX8: ; %bb.0:
|
|
; GFX8-NEXT: v_cvt_f32_ubyte0_e32 v0, 24
|
|
; GFX8-NEXT: v_rcp_iflag_f32_e32 v0, v0
|
|
; GFX8-NEXT: v_not_b32_e32 v1, 23
|
|
; GFX8-NEXT: s_and_b32 s2, s2, 0xffffff
|
|
; GFX8-NEXT: s_lshl_b32 s0, s0, 1
|
|
; GFX8-NEXT: v_mul_f32_e32 v0, 0x4f7ffffe, v0
|
|
; GFX8-NEXT: v_cvt_u32_f32_e32 v0, v0
|
|
; GFX8-NEXT: s_and_b32 s1, s1, 0xffffff
|
|
; GFX8-NEXT: v_mul_lo_u32 v2, v0, v1
|
|
; GFX8-NEXT: v_mul_hi_u32 v2, v0, v2
|
|
; GFX8-NEXT: v_add_u32_e32 v0, vcc, v0, v2
|
|
; GFX8-NEXT: v_mul_hi_u32 v0, s2, v0
|
|
; GFX8-NEXT: v_mul_lo_u32 v0, v0, 24
|
|
; GFX8-NEXT: v_sub_u32_e32 v0, vcc, s2, v0
|
|
; GFX8-NEXT: v_add_u32_e32 v2, vcc, v0, v1
|
|
; GFX8-NEXT: v_cmp_le_u32_e32 vcc, 24, v0
|
|
; GFX8-NEXT: v_cndmask_b32_e32 v0, v0, v2, vcc
|
|
; GFX8-NEXT: v_add_u32_e32 v1, vcc, v0, v1
|
|
; GFX8-NEXT: v_cmp_le_u32_e32 vcc, 24, v0
|
|
; GFX8-NEXT: v_cndmask_b32_e32 v0, v0, v1, vcc
|
|
; GFX8-NEXT: v_sub_u32_e32 v1, vcc, 23, v0
|
|
; GFX8-NEXT: v_and_b32_e32 v0, 0xffffff, v0
|
|
; GFX8-NEXT: v_and_b32_e32 v1, 0xffffff, v1
|
|
; GFX8-NEXT: v_lshlrev_b32_e64 v1, v1, s0
|
|
; GFX8-NEXT: v_lshrrev_b32_e64 v0, v0, s1
|
|
; GFX8-NEXT: v_or_b32_e32 v0, v1, v0
|
|
; GFX8-NEXT: v_readfirstlane_b32 s0, v0
|
|
; GFX8-NEXT: ; return to shader part epilog
|
|
;
|
|
; GFX9-LABEL: s_fshr_i24:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: v_cvt_f32_ubyte0_e32 v0, 24
|
|
; GFX9-NEXT: v_rcp_iflag_f32_e32 v0, v0
|
|
; GFX9-NEXT: v_not_b32_e32 v1, 23
|
|
; GFX9-NEXT: s_and_b32 s2, s2, 0xffffff
|
|
; GFX9-NEXT: s_and_b32 s1, s1, 0xffffff
|
|
; GFX9-NEXT: v_mul_f32_e32 v0, 0x4f7ffffe, v0
|
|
; GFX9-NEXT: v_cvt_u32_f32_e32 v0, v0
|
|
; GFX9-NEXT: s_lshl_b32 s0, s0, 1
|
|
; GFX9-NEXT: v_mul_lo_u32 v1, v0, v1
|
|
; GFX9-NEXT: v_mul_hi_u32 v1, v0, v1
|
|
; GFX9-NEXT: v_add_u32_e32 v0, v0, v1
|
|
; GFX9-NEXT: v_mul_hi_u32 v0, s2, v0
|
|
; GFX9-NEXT: v_mul_lo_u32 v0, v0, 24
|
|
; GFX9-NEXT: v_sub_u32_e32 v0, s2, v0
|
|
; GFX9-NEXT: v_add_u32_e32 v1, 0xffffffe8, v0
|
|
; GFX9-NEXT: v_cmp_le_u32_e32 vcc, 24, v0
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v0, v0, v1, vcc
|
|
; GFX9-NEXT: v_add_u32_e32 v1, 0xffffffe8, v0
|
|
; GFX9-NEXT: v_cmp_le_u32_e32 vcc, 24, v0
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v0, v0, v1, vcc
|
|
; GFX9-NEXT: v_sub_u32_e32 v1, 23, v0
|
|
; GFX9-NEXT: v_and_b32_e32 v0, 0xffffff, v0
|
|
; GFX9-NEXT: v_and_b32_e32 v1, 0xffffff, v1
|
|
; GFX9-NEXT: v_lshrrev_b32_e64 v0, v0, s1
|
|
; GFX9-NEXT: v_lshl_or_b32 v0, s0, v1, v0
|
|
; GFX9-NEXT: v_readfirstlane_b32 s0, v0
|
|
; GFX9-NEXT: ; return to shader part epilog
|
|
;
|
|
; GFX10-LABEL: s_fshr_i24:
|
|
; GFX10: ; %bb.0:
|
|
; GFX10-NEXT: v_cvt_f32_ubyte0_e32 v0, 24
|
|
; GFX10-NEXT: s_and_b32 s2, s2, 0xffffff
|
|
; GFX10-NEXT: s_and_b32 s1, s1, 0xffffff
|
|
; GFX10-NEXT: s_lshl_b32 s0, s0, 1
|
|
; GFX10-NEXT: v_rcp_iflag_f32_e32 v0, v0
|
|
; GFX10-NEXT: v_mul_f32_e32 v0, 0x4f7ffffe, v0
|
|
; GFX10-NEXT: v_cvt_u32_f32_e32 v0, v0
|
|
; GFX10-NEXT: v_mul_lo_u32 v1, 0xffffffe8, v0
|
|
; GFX10-NEXT: v_mul_hi_u32 v1, v0, v1
|
|
; GFX10-NEXT: v_add_nc_u32_e32 v0, v0, v1
|
|
; GFX10-NEXT: v_mul_hi_u32 v0, s2, v0
|
|
; GFX10-NEXT: v_mul_lo_u32 v0, v0, 24
|
|
; GFX10-NEXT: v_sub_nc_u32_e32 v0, s2, v0
|
|
; GFX10-NEXT: v_add_nc_u32_e32 v1, 0xffffffe8, v0
|
|
; GFX10-NEXT: v_cmp_le_u32_e32 vcc_lo, 24, v0
|
|
; GFX10-NEXT: v_cndmask_b32_e32 v0, v0, v1, vcc_lo
|
|
; GFX10-NEXT: v_add_nc_u32_e32 v1, 0xffffffe8, v0
|
|
; GFX10-NEXT: v_cmp_le_u32_e32 vcc_lo, 24, v0
|
|
; GFX10-NEXT: v_cndmask_b32_e32 v0, v0, v1, vcc_lo
|
|
; GFX10-NEXT: v_sub_nc_u32_e32 v1, 23, v0
|
|
; GFX10-NEXT: v_and_b32_e32 v0, 0xffffff, v0
|
|
; GFX10-NEXT: v_and_b32_e32 v1, 0xffffff, v1
|
|
; GFX10-NEXT: v_lshrrev_b32_e64 v0, v0, s1
|
|
; GFX10-NEXT: v_lshl_or_b32 v0, s0, v1, v0
|
|
; GFX10-NEXT: v_readfirstlane_b32 s0, v0
|
|
; GFX10-NEXT: ; return to shader part epilog
|
|
;
|
|
; GFX11-LABEL: s_fshr_i24:
|
|
; GFX11: ; %bb.0:
|
|
; GFX11-NEXT: v_cvt_f32_ubyte0_e32 v0, 24
|
|
; GFX11-NEXT: s_and_b32 s2, s2, 0xffffff
|
|
; GFX11-NEXT: s_and_b32 s1, s1, 0xffffff
|
|
; GFX11-NEXT: s_lshl_b32 s0, s0, 1
|
|
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
|
|
; GFX11-NEXT: v_rcp_iflag_f32_e32 v0, v0
|
|
; GFX11-NEXT: s_waitcnt_depctr 0xfff
|
|
; GFX11-NEXT: v_mul_f32_e32 v0, 0x4f7ffffe, v0
|
|
; GFX11-NEXT: v_cvt_u32_f32_e32 v0, v0
|
|
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
|
|
; GFX11-NEXT: v_mul_lo_u32 v1, 0xffffffe8, v0
|
|
; GFX11-NEXT: v_mul_hi_u32 v1, v0, v1
|
|
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
|
|
; GFX11-NEXT: v_add_nc_u32_e32 v0, v0, v1
|
|
; GFX11-NEXT: v_mul_hi_u32 v0, s2, v0
|
|
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
|
|
; GFX11-NEXT: v_mul_lo_u32 v0, v0, 24
|
|
; GFX11-NEXT: v_sub_nc_u32_e32 v0, s2, v0
|
|
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
|
|
; GFX11-NEXT: v_add_nc_u32_e32 v1, 0xffffffe8, v0
|
|
; GFX11-NEXT: v_cmp_le_u32_e32 vcc_lo, 24, v0
|
|
; GFX11-NEXT: v_cndmask_b32_e32 v0, v0, v1, vcc_lo
|
|
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
|
|
; GFX11-NEXT: v_add_nc_u32_e32 v1, 0xffffffe8, v0
|
|
; GFX11-NEXT: v_cmp_le_u32_e32 vcc_lo, 24, v0
|
|
; GFX11-NEXT: v_cndmask_b32_e32 v0, v0, v1, vcc_lo
|
|
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
|
|
; GFX11-NEXT: v_sub_nc_u32_e32 v1, 23, v0
|
|
; GFX11-NEXT: v_and_b32_e32 v0, 0xffffff, v0
|
|
; GFX11-NEXT: v_and_b32_e32 v1, 0xffffff, v1
|
|
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
|
|
; GFX11-NEXT: v_lshrrev_b32_e64 v0, v0, s1
|
|
; GFX11-NEXT: v_lshl_or_b32 v0, s0, v1, v0
|
|
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
|
|
; GFX11-NEXT: v_readfirstlane_b32 s0, v0
|
|
; GFX11-NEXT: ; return to shader part epilog
|
|
%result = call i24 @llvm.fshr.i24(i24 %lhs, i24 %rhs, i24 %amt)
|
|
ret i24 %result
|
|
}
|
|
|
|
define i24 @v_fshr_i24(i24 %lhs, i24 %rhs, i24 %amt) {
|
|
; GFX6-LABEL: v_fshr_i24:
|
|
; GFX6: ; %bb.0:
|
|
; GFX6-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX6-NEXT: v_cvt_f32_ubyte0_e32 v3, 24
|
|
; GFX6-NEXT: v_rcp_iflag_f32_e32 v3, v3
|
|
; GFX6-NEXT: v_not_b32_e32 v4, 23
|
|
; GFX6-NEXT: v_and_b32_e32 v2, 0xffffff, v2
|
|
; GFX6-NEXT: v_lshlrev_b32_e32 v0, 1, v0
|
|
; GFX6-NEXT: v_mul_f32_e32 v3, 0x4f7ffffe, v3
|
|
; GFX6-NEXT: v_cvt_u32_f32_e32 v3, v3
|
|
; GFX6-NEXT: v_and_b32_e32 v1, 0xffffff, v1
|
|
; GFX6-NEXT: v_mul_lo_u32 v5, v3, v4
|
|
; GFX6-NEXT: v_mul_hi_u32 v5, v3, v5
|
|
; GFX6-NEXT: v_add_i32_e32 v3, vcc, v3, v5
|
|
; GFX6-NEXT: v_mul_hi_u32 v3, v2, v3
|
|
; GFX6-NEXT: v_mul_lo_u32 v3, v3, 24
|
|
; GFX6-NEXT: v_sub_i32_e32 v2, vcc, v2, v3
|
|
; GFX6-NEXT: v_add_i32_e32 v3, vcc, v2, v4
|
|
; GFX6-NEXT: v_cmp_le_u32_e32 vcc, 24, v2
|
|
; GFX6-NEXT: v_cndmask_b32_e32 v2, v2, v3, vcc
|
|
; GFX6-NEXT: v_add_i32_e32 v3, vcc, 0xffffffe8, v2
|
|
; GFX6-NEXT: v_cmp_le_u32_e32 vcc, 24, v2
|
|
; GFX6-NEXT: v_cndmask_b32_e32 v2, v2, v3, vcc
|
|
; GFX6-NEXT: v_sub_i32_e32 v3, vcc, 23, v2
|
|
; GFX6-NEXT: v_and_b32_e32 v2, 0xffffff, v2
|
|
; GFX6-NEXT: v_and_b32_e32 v3, 0xffffff, v3
|
|
; GFX6-NEXT: v_lshlrev_b32_e32 v0, v3, v0
|
|
; GFX6-NEXT: v_lshrrev_b32_e32 v1, v2, v1
|
|
; GFX6-NEXT: v_or_b32_e32 v0, v0, v1
|
|
; GFX6-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX8-LABEL: v_fshr_i24:
|
|
; GFX8: ; %bb.0:
|
|
; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX8-NEXT: v_cvt_f32_ubyte0_e32 v3, 24
|
|
; GFX8-NEXT: v_rcp_iflag_f32_e32 v3, v3
|
|
; GFX8-NEXT: v_not_b32_e32 v4, 23
|
|
; GFX8-NEXT: v_and_b32_e32 v2, 0xffffff, v2
|
|
; GFX8-NEXT: v_lshlrev_b32_e32 v0, 1, v0
|
|
; GFX8-NEXT: v_mul_f32_e32 v3, 0x4f7ffffe, v3
|
|
; GFX8-NEXT: v_cvt_u32_f32_e32 v3, v3
|
|
; GFX8-NEXT: v_and_b32_e32 v1, 0xffffff, v1
|
|
; GFX8-NEXT: v_mul_lo_u32 v5, v3, v4
|
|
; GFX8-NEXT: v_mul_hi_u32 v5, v3, v5
|
|
; GFX8-NEXT: v_add_u32_e32 v3, vcc, v3, v5
|
|
; GFX8-NEXT: v_mul_hi_u32 v3, v2, v3
|
|
; GFX8-NEXT: v_mul_lo_u32 v3, v3, 24
|
|
; GFX8-NEXT: v_sub_u32_e32 v2, vcc, v2, v3
|
|
; GFX8-NEXT: v_add_u32_e32 v3, vcc, v2, v4
|
|
; GFX8-NEXT: v_cmp_le_u32_e32 vcc, 24, v2
|
|
; GFX8-NEXT: v_cndmask_b32_e32 v2, v2, v3, vcc
|
|
; GFX8-NEXT: v_add_u32_e32 v3, vcc, 0xffffffe8, v2
|
|
; GFX8-NEXT: v_cmp_le_u32_e32 vcc, 24, v2
|
|
; GFX8-NEXT: v_cndmask_b32_e32 v2, v2, v3, vcc
|
|
; GFX8-NEXT: v_sub_u32_e32 v3, vcc, 23, v2
|
|
; GFX8-NEXT: v_and_b32_e32 v2, 0xffffff, v2
|
|
; GFX8-NEXT: v_and_b32_e32 v3, 0xffffff, v3
|
|
; GFX8-NEXT: v_lshlrev_b32_e32 v0, v3, v0
|
|
; GFX8-NEXT: v_lshrrev_b32_e32 v1, v2, v1
|
|
; GFX8-NEXT: v_or_b32_e32 v0, v0, v1
|
|
; GFX8-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX9-LABEL: v_fshr_i24:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: v_cvt_f32_ubyte0_e32 v3, 24
|
|
; GFX9-NEXT: v_rcp_iflag_f32_e32 v3, v3
|
|
; GFX9-NEXT: v_not_b32_e32 v4, 23
|
|
; GFX9-NEXT: v_and_b32_e32 v2, 0xffffff, v2
|
|
; GFX9-NEXT: v_and_b32_e32 v1, 0xffffff, v1
|
|
; GFX9-NEXT: v_mul_f32_e32 v3, 0x4f7ffffe, v3
|
|
; GFX9-NEXT: v_cvt_u32_f32_e32 v3, v3
|
|
; GFX9-NEXT: v_lshlrev_b32_e32 v0, 1, v0
|
|
; GFX9-NEXT: v_mul_lo_u32 v4, v3, v4
|
|
; GFX9-NEXT: v_mul_hi_u32 v4, v3, v4
|
|
; GFX9-NEXT: v_add_u32_e32 v3, v3, v4
|
|
; GFX9-NEXT: v_mul_hi_u32 v3, v2, v3
|
|
; GFX9-NEXT: v_mul_lo_u32 v3, v3, 24
|
|
; GFX9-NEXT: v_sub_u32_e32 v2, v2, v3
|
|
; GFX9-NEXT: v_add_u32_e32 v3, 0xffffffe8, v2
|
|
; GFX9-NEXT: v_cmp_le_u32_e32 vcc, 24, v2
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v2, v2, v3, vcc
|
|
; GFX9-NEXT: v_add_u32_e32 v3, 0xffffffe8, v2
|
|
; GFX9-NEXT: v_cmp_le_u32_e32 vcc, 24, v2
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v2, v2, v3, vcc
|
|
; GFX9-NEXT: v_sub_u32_e32 v3, 23, v2
|
|
; GFX9-NEXT: v_and_b32_e32 v2, 0xffffff, v2
|
|
; GFX9-NEXT: v_and_b32_e32 v3, 0xffffff, v3
|
|
; GFX9-NEXT: v_lshrrev_b32_e32 v1, v2, v1
|
|
; GFX9-NEXT: v_lshl_or_b32 v0, v0, v3, v1
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX10-LABEL: v_fshr_i24:
|
|
; GFX10: ; %bb.0:
|
|
; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX10-NEXT: v_cvt_f32_ubyte0_e32 v3, 24
|
|
; GFX10-NEXT: v_and_b32_e32 v2, 0xffffff, v2
|
|
; GFX10-NEXT: v_and_b32_e32 v1, 0xffffff, v1
|
|
; GFX10-NEXT: v_lshlrev_b32_e32 v0, 1, v0
|
|
; GFX10-NEXT: v_rcp_iflag_f32_e32 v3, v3
|
|
; GFX10-NEXT: v_mul_f32_e32 v3, 0x4f7ffffe, v3
|
|
; GFX10-NEXT: v_cvt_u32_f32_e32 v3, v3
|
|
; GFX10-NEXT: v_mul_lo_u32 v4, 0xffffffe8, v3
|
|
; GFX10-NEXT: v_mul_hi_u32 v4, v3, v4
|
|
; GFX10-NEXT: v_add_nc_u32_e32 v3, v3, v4
|
|
; GFX10-NEXT: v_mul_hi_u32 v3, v2, v3
|
|
; GFX10-NEXT: v_mul_lo_u32 v3, v3, 24
|
|
; GFX10-NEXT: v_sub_nc_u32_e32 v2, v2, v3
|
|
; GFX10-NEXT: v_add_nc_u32_e32 v3, 0xffffffe8, v2
|
|
; GFX10-NEXT: v_cmp_le_u32_e32 vcc_lo, 24, v2
|
|
; GFX10-NEXT: v_cndmask_b32_e32 v2, v2, v3, vcc_lo
|
|
; GFX10-NEXT: v_add_nc_u32_e32 v3, 0xffffffe8, v2
|
|
; GFX10-NEXT: v_cmp_le_u32_e32 vcc_lo, 24, v2
|
|
; GFX10-NEXT: v_cndmask_b32_e32 v2, v2, v3, vcc_lo
|
|
; GFX10-NEXT: v_sub_nc_u32_e32 v3, 23, v2
|
|
; GFX10-NEXT: v_and_b32_e32 v2, 0xffffff, v2
|
|
; GFX10-NEXT: v_and_b32_e32 v3, 0xffffff, v3
|
|
; GFX10-NEXT: v_lshrrev_b32_e32 v1, v2, v1
|
|
; GFX10-NEXT: v_lshl_or_b32 v0, v0, v3, v1
|
|
; GFX10-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX11-LABEL: v_fshr_i24:
|
|
; GFX11: ; %bb.0:
|
|
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX11-NEXT: v_cvt_f32_ubyte0_e32 v3, 24
|
|
; GFX11-NEXT: v_and_b32_e32 v2, 0xffffff, v2
|
|
; GFX11-NEXT: v_lshlrev_b32_e32 v0, 1, v0
|
|
; GFX11-NEXT: v_and_b32_e32 v1, 0xffffff, v1
|
|
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_2) | instid1(VALU_DEP_1)
|
|
; GFX11-NEXT: v_rcp_iflag_f32_e32 v3, v3
|
|
; GFX11-NEXT: s_waitcnt_depctr 0xfff
|
|
; GFX11-NEXT: v_mul_f32_e32 v3, 0x4f7ffffe, v3
|
|
; GFX11-NEXT: v_cvt_u32_f32_e32 v3, v3
|
|
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
|
|
; GFX11-NEXT: v_mul_lo_u32 v4, 0xffffffe8, v3
|
|
; GFX11-NEXT: v_mul_hi_u32 v4, v3, v4
|
|
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
|
|
; GFX11-NEXT: v_add_nc_u32_e32 v3, v3, v4
|
|
; GFX11-NEXT: v_mul_hi_u32 v3, v2, v3
|
|
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
|
|
; GFX11-NEXT: v_mul_lo_u32 v3, v3, 24
|
|
; GFX11-NEXT: v_sub_nc_u32_e32 v2, v2, v3
|
|
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
|
|
; GFX11-NEXT: v_add_nc_u32_e32 v3, 0xffffffe8, v2
|
|
; GFX11-NEXT: v_cmp_le_u32_e32 vcc_lo, 24, v2
|
|
; GFX11-NEXT: v_cndmask_b32_e32 v2, v2, v3, vcc_lo
|
|
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
|
|
; GFX11-NEXT: v_add_nc_u32_e32 v3, 0xffffffe8, v2
|
|
; GFX11-NEXT: v_cmp_le_u32_e32 vcc_lo, 24, v2
|
|
; GFX11-NEXT: v_cndmask_b32_e32 v2, v2, v3, vcc_lo
|
|
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
|
|
; GFX11-NEXT: v_sub_nc_u32_e32 v3, 23, v2
|
|
; GFX11-NEXT: v_and_b32_e32 v2, 0xffffff, v2
|
|
; GFX11-NEXT: v_and_b32_e32 v3, 0xffffff, v3
|
|
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
|
|
; GFX11-NEXT: v_lshrrev_b32_e32 v1, v2, v1
|
|
; GFX11-NEXT: v_lshl_or_b32 v0, v0, v3, v1
|
|
; GFX11-NEXT: s_setpc_b64 s[30:31]
|
|
%result = call i24 @llvm.fshr.i24(i24 %lhs, i24 %rhs, i24 %amt)
|
|
ret i24 %result
|
|
}
|
|
|
|
define amdgpu_ps i48 @s_fshr_v2i24(i48 inreg %lhs.arg, i48 inreg %rhs.arg, i48 inreg %amt.arg) {
|
|
; GFX6-LABEL: s_fshr_v2i24:
|
|
; GFX6: ; %bb.0:
|
|
; GFX6-NEXT: v_cvt_f32_ubyte0_e32 v2, 24
|
|
; GFX6-NEXT: v_rcp_iflag_f32_e32 v2, v2
|
|
; GFX6-NEXT: s_lshr_b32 s7, s1, 8
|
|
; GFX6-NEXT: s_bfe_u32 s9, s0, 0x80008
|
|
; GFX6-NEXT: s_and_b32 s1, s1, 0xff
|
|
; GFX6-NEXT: v_mul_f32_e32 v2, 0x4f7ffffe, v2
|
|
; GFX6-NEXT: v_cvt_u32_f32_e32 v2, v2
|
|
; GFX6-NEXT: v_mov_b32_e32 v0, s0
|
|
; GFX6-NEXT: v_not_b32_e32 v3, 23
|
|
; GFX6-NEXT: s_lshr_b32 s6, s0, 16
|
|
; GFX6-NEXT: s_and_b32 s8, s0, 0xff
|
|
; GFX6-NEXT: s_lshl_b32 s9, s9, 8
|
|
; GFX6-NEXT: v_alignbit_b32 v0, s1, v0, 24
|
|
; GFX6-NEXT: s_and_b32 s0, s7, 0xff
|
|
; GFX6-NEXT: s_lshr_b32 s1, s2, 16
|
|
; GFX6-NEXT: s_lshr_b32 s7, s3, 8
|
|
; GFX6-NEXT: s_bfe_u32 s10, s2, 0x80008
|
|
; GFX6-NEXT: v_mul_lo_u32 v4, v2, v3
|
|
; GFX6-NEXT: s_or_b32 s8, s8, s9
|
|
; GFX6-NEXT: s_and_b32 s9, s2, 0xff
|
|
; GFX6-NEXT: s_lshl_b32 s10, s10, 8
|
|
; GFX6-NEXT: s_and_b32 s1, s1, 0xff
|
|
; GFX6-NEXT: s_and_b32 s3, s3, 0xff
|
|
; GFX6-NEXT: v_mov_b32_e32 v1, s2
|
|
; GFX6-NEXT: s_and_b32 s2, s7, 0xff
|
|
; GFX6-NEXT: s_or_b32 s9, s9, s10
|
|
; GFX6-NEXT: s_and_b32 s1, 0xffff, s1
|
|
; GFX6-NEXT: v_alignbit_b32 v1, s3, v1, 24
|
|
; GFX6-NEXT: s_and_b32 s2, 0xffff, s2
|
|
; GFX6-NEXT: s_and_b32 s9, 0xffff, s9
|
|
; GFX6-NEXT: s_lshl_b32 s1, s1, 16
|
|
; GFX6-NEXT: v_and_b32_e32 v1, 0xffff, v1
|
|
; GFX6-NEXT: s_lshl_b32 s2, s2, 16
|
|
; GFX6-NEXT: s_or_b32 s1, s9, s1
|
|
; GFX6-NEXT: v_or_b32_e32 v1, s2, v1
|
|
; GFX6-NEXT: s_lshr_b32 s2, s4, 16
|
|
; GFX6-NEXT: s_bfe_u32 s9, s4, 0x80008
|
|
; GFX6-NEXT: v_mul_hi_u32 v4, v2, v4
|
|
; GFX6-NEXT: s_and_b32 s7, s4, 0xff
|
|
; GFX6-NEXT: s_lshl_b32 s9, s9, 8
|
|
; GFX6-NEXT: s_and_b32 s2, s2, 0xff
|
|
; GFX6-NEXT: s_or_b32 s7, s7, s9
|
|
; GFX6-NEXT: s_and_b32 s2, 0xffff, s2
|
|
; GFX6-NEXT: s_and_b32 s7, 0xffff, s7
|
|
; GFX6-NEXT: s_lshl_b32 s2, s2, 16
|
|
; GFX6-NEXT: s_or_b32 s2, s7, s2
|
|
; GFX6-NEXT: v_add_i32_e32 v2, vcc, v2, v4
|
|
; GFX6-NEXT: v_mul_hi_u32 v4, s2, v2
|
|
; GFX6-NEXT: s_lshr_b32 s3, s5, 8
|
|
; GFX6-NEXT: s_and_b32 s5, s5, 0xff
|
|
; GFX6-NEXT: v_mov_b32_e32 v5, s4
|
|
; GFX6-NEXT: s_and_b32 s3, s3, 0xff
|
|
; GFX6-NEXT: v_alignbit_b32 v5, s5, v5, 24
|
|
; GFX6-NEXT: s_and_b32 s3, 0xffff, s3
|
|
; GFX6-NEXT: v_and_b32_e32 v5, 0xffff, v5
|
|
; GFX6-NEXT: v_mul_lo_u32 v4, v4, 24
|
|
; GFX6-NEXT: s_lshl_b32 s3, s3, 16
|
|
; GFX6-NEXT: v_or_b32_e32 v5, s3, v5
|
|
; GFX6-NEXT: v_mul_hi_u32 v2, v5, v2
|
|
; GFX6-NEXT: v_sub_i32_e32 v4, vcc, s2, v4
|
|
; GFX6-NEXT: v_add_i32_e32 v6, vcc, v4, v3
|
|
; GFX6-NEXT: v_cmp_le_u32_e32 vcc, 24, v4
|
|
; GFX6-NEXT: v_mul_lo_u32 v2, v2, 24
|
|
; GFX6-NEXT: v_cndmask_b32_e32 v4, v4, v6, vcc
|
|
; GFX6-NEXT: v_add_i32_e32 v6, vcc, v4, v3
|
|
; GFX6-NEXT: v_cmp_le_u32_e32 vcc, 24, v4
|
|
; GFX6-NEXT: v_cndmask_b32_e32 v4, v4, v6, vcc
|
|
; GFX6-NEXT: v_sub_i32_e32 v2, vcc, v5, v2
|
|
; GFX6-NEXT: v_sub_i32_e32 v6, vcc, 23, v4
|
|
; GFX6-NEXT: v_add_i32_e32 v5, vcc, v2, v3
|
|
; GFX6-NEXT: v_cmp_le_u32_e32 vcc, 24, v2
|
|
; GFX6-NEXT: s_and_b32 s6, s6, 0xff
|
|
; GFX6-NEXT: v_cndmask_b32_e32 v2, v2, v5, vcc
|
|
; GFX6-NEXT: s_and_b32 s8, 0xffff, s8
|
|
; GFX6-NEXT: s_and_b32 s6, 0xffff, s6
|
|
; GFX6-NEXT: v_add_i32_e32 v3, vcc, v2, v3
|
|
; GFX6-NEXT: s_lshl_b32 s2, s6, 17
|
|
; GFX6-NEXT: s_lshl_b32 s3, s8, 1
|
|
; GFX6-NEXT: v_cmp_le_u32_e32 vcc, 24, v2
|
|
; GFX6-NEXT: v_and_b32_e32 v0, 0xffff, v0
|
|
; GFX6-NEXT: s_and_b32 s0, 0xffff, s0
|
|
; GFX6-NEXT: s_or_b32 s2, s2, s3
|
|
; GFX6-NEXT: v_and_b32_e32 v6, 0xffffff, v6
|
|
; GFX6-NEXT: v_and_b32_e32 v4, 0xffffff, v4
|
|
; GFX6-NEXT: v_cndmask_b32_e32 v2, v2, v3, vcc
|
|
; GFX6-NEXT: v_lshl_b32_e32 v6, s2, v6
|
|
; GFX6-NEXT: v_lshr_b32_e32 v4, s1, v4
|
|
; GFX6-NEXT: v_sub_i32_e32 v3, vcc, 23, v2
|
|
; GFX6-NEXT: s_lshl_b32 s0, s0, 17
|
|
; GFX6-NEXT: v_lshlrev_b32_e32 v0, 1, v0
|
|
; GFX6-NEXT: v_or_b32_e32 v4, v6, v4
|
|
; GFX6-NEXT: v_or_b32_e32 v0, s0, v0
|
|
; GFX6-NEXT: v_and_b32_e32 v3, 0xffffff, v3
|
|
; GFX6-NEXT: v_and_b32_e32 v2, 0xffffff, v2
|
|
; GFX6-NEXT: v_lshlrev_b32_e32 v0, v3, v0
|
|
; GFX6-NEXT: v_lshrrev_b32_e32 v1, v2, v1
|
|
; GFX6-NEXT: v_bfe_u32 v2, v4, 8, 8
|
|
; GFX6-NEXT: v_or_b32_e32 v0, v0, v1
|
|
; GFX6-NEXT: v_and_b32_e32 v1, 0xff, v4
|
|
; GFX6-NEXT: v_lshlrev_b32_e32 v2, 8, v2
|
|
; GFX6-NEXT: v_or_b32_e32 v1, v1, v2
|
|
; GFX6-NEXT: v_bfe_u32 v2, v4, 16, 8
|
|
; GFX6-NEXT: v_lshlrev_b32_e32 v2, 16, v2
|
|
; GFX6-NEXT: v_or_b32_e32 v1, v1, v2
|
|
; GFX6-NEXT: v_and_b32_e32 v2, 0xff, v0
|
|
; GFX6-NEXT: v_lshlrev_b32_e32 v2, 24, v2
|
|
; GFX6-NEXT: v_or_b32_e32 v1, v1, v2
|
|
; GFX6-NEXT: v_bfe_u32 v2, v0, 8, 8
|
|
; GFX6-NEXT: v_bfe_u32 v0, v0, 16, 8
|
|
; GFX6-NEXT: v_lshlrev_b32_e32 v0, 8, v0
|
|
; GFX6-NEXT: v_or_b32_e32 v0, v2, v0
|
|
; GFX6-NEXT: v_readfirstlane_b32 s0, v1
|
|
; GFX6-NEXT: v_readfirstlane_b32 s1, v0
|
|
; GFX6-NEXT: ; return to shader part epilog
|
|
;
|
|
; GFX8-LABEL: s_fshr_v2i24:
|
|
; GFX8: ; %bb.0:
|
|
; GFX8-NEXT: v_cvt_f32_ubyte0_e32 v0, 24
|
|
; GFX8-NEXT: s_lshr_b32 s9, s1, 8
|
|
; GFX8-NEXT: s_and_b32 s1, s1, 0xff
|
|
; GFX8-NEXT: v_rcp_iflag_f32_e32 v0, v0
|
|
; GFX8-NEXT: s_lshr_b32 s6, s0, 8
|
|
; GFX8-NEXT: s_lshr_b32 s8, s0, 24
|
|
; GFX8-NEXT: s_lshl_b32 s1, s1, 8
|
|
; GFX8-NEXT: s_and_b32 s6, s6, 0xff
|
|
; GFX8-NEXT: s_or_b32 s1, s8, s1
|
|
; GFX8-NEXT: s_lshr_b32 s8, s2, 8
|
|
; GFX8-NEXT: s_lshr_b32 s7, s0, 16
|
|
; GFX8-NEXT: s_and_b32 s0, s0, 0xff
|
|
; GFX8-NEXT: s_lshl_b32 s6, s6, 8
|
|
; GFX8-NEXT: s_and_b32 s8, s8, 0xff
|
|
; GFX8-NEXT: s_or_b32 s0, s0, s6
|
|
; GFX8-NEXT: s_and_b32 s6, s7, 0xff
|
|
; GFX8-NEXT: s_and_b32 s7, s9, 0xff
|
|
; GFX8-NEXT: s_lshr_b32 s9, s2, 16
|
|
; GFX8-NEXT: s_lshr_b32 s10, s2, 24
|
|
; GFX8-NEXT: s_and_b32 s2, s2, 0xff
|
|
; GFX8-NEXT: s_lshl_b32 s8, s8, 8
|
|
; GFX8-NEXT: v_mul_f32_e32 v0, 0x4f7ffffe, v0
|
|
; GFX8-NEXT: s_or_b32 s2, s2, s8
|
|
; GFX8-NEXT: s_and_b32 s8, s9, 0xff
|
|
; GFX8-NEXT: v_cvt_u32_f32_e32 v0, v0
|
|
; GFX8-NEXT: s_and_b32 s8, 0xffff, s8
|
|
; GFX8-NEXT: s_lshr_b32 s11, s3, 8
|
|
; GFX8-NEXT: s_and_b32 s2, 0xffff, s2
|
|
; GFX8-NEXT: s_lshl_b32 s8, s8, 16
|
|
; GFX8-NEXT: s_and_b32 s3, s3, 0xff
|
|
; GFX8-NEXT: s_or_b32 s2, s2, s8
|
|
; GFX8-NEXT: s_lshl_b32 s3, s3, 8
|
|
; GFX8-NEXT: s_and_b32 s8, s11, 0xff
|
|
; GFX8-NEXT: v_not_b32_e32 v1, 23
|
|
; GFX8-NEXT: s_or_b32 s3, s10, s3
|
|
; GFX8-NEXT: s_and_b32 s8, 0xffff, s8
|
|
; GFX8-NEXT: v_mul_lo_u32 v2, v0, v1
|
|
; GFX8-NEXT: s_and_b32 s3, 0xffff, s3
|
|
; GFX8-NEXT: s_lshl_b32 s8, s8, 16
|
|
; GFX8-NEXT: s_or_b32 s3, s3, s8
|
|
; GFX8-NEXT: s_lshr_b32 s8, s4, 8
|
|
; GFX8-NEXT: s_and_b32 s8, s8, 0xff
|
|
; GFX8-NEXT: s_lshr_b32 s9, s4, 16
|
|
; GFX8-NEXT: s_lshr_b32 s10, s4, 24
|
|
; GFX8-NEXT: s_and_b32 s4, s4, 0xff
|
|
; GFX8-NEXT: s_lshl_b32 s8, s8, 8
|
|
; GFX8-NEXT: v_mul_hi_u32 v2, v0, v2
|
|
; GFX8-NEXT: s_or_b32 s4, s4, s8
|
|
; GFX8-NEXT: s_and_b32 s8, s9, 0xff
|
|
; GFX8-NEXT: s_and_b32 s8, 0xffff, s8
|
|
; GFX8-NEXT: s_and_b32 s4, 0xffff, s4
|
|
; GFX8-NEXT: s_lshl_b32 s8, s8, 16
|
|
; GFX8-NEXT: s_or_b32 s4, s4, s8
|
|
; GFX8-NEXT: v_add_u32_e32 v0, vcc, v0, v2
|
|
; GFX8-NEXT: v_mul_hi_u32 v2, s4, v0
|
|
; GFX8-NEXT: s_lshr_b32 s11, s5, 8
|
|
; GFX8-NEXT: s_and_b32 s5, s5, 0xff
|
|
; GFX8-NEXT: s_lshl_b32 s5, s5, 8
|
|
; GFX8-NEXT: v_mul_lo_u32 v2, v2, 24
|
|
; GFX8-NEXT: s_and_b32 s8, s11, 0xff
|
|
; GFX8-NEXT: s_or_b32 s5, s10, s5
|
|
; GFX8-NEXT: s_and_b32 s8, 0xffff, s8
|
|
; GFX8-NEXT: s_and_b32 s5, 0xffff, s5
|
|
; GFX8-NEXT: s_lshl_b32 s8, s8, 16
|
|
; GFX8-NEXT: s_or_b32 s5, s5, s8
|
|
; GFX8-NEXT: v_sub_u32_e32 v2, vcc, s4, v2
|
|
; GFX8-NEXT: v_add_u32_e32 v3, vcc, v2, v1
|
|
; GFX8-NEXT: v_mul_hi_u32 v0, s5, v0
|
|
; GFX8-NEXT: v_cmp_le_u32_e32 vcc, 24, v2
|
|
; GFX8-NEXT: v_cndmask_b32_e32 v2, v2, v3, vcc
|
|
; GFX8-NEXT: v_add_u32_e32 v3, vcc, v2, v1
|
|
; GFX8-NEXT: v_cmp_le_u32_e32 vcc, 24, v2
|
|
; GFX8-NEXT: v_mul_lo_u32 v0, v0, 24
|
|
; GFX8-NEXT: s_and_b32 s0, 0xffff, s0
|
|
; GFX8-NEXT: s_and_b32 s6, 0xffff, s6
|
|
; GFX8-NEXT: v_cndmask_b32_e32 v2, v2, v3, vcc
|
|
; GFX8-NEXT: v_sub_u32_e32 v3, vcc, 23, v2
|
|
; GFX8-NEXT: s_lshl_b32 s4, s6, 17
|
|
; GFX8-NEXT: s_lshl_b32 s0, s0, 1
|
|
; GFX8-NEXT: s_or_b32 s0, s4, s0
|
|
; GFX8-NEXT: v_and_b32_e32 v3, 0xffffff, v3
|
|
; GFX8-NEXT: v_and_b32_e32 v2, 0xffffff, v2
|
|
; GFX8-NEXT: v_lshlrev_b32_e64 v3, v3, s0
|
|
; GFX8-NEXT: v_lshrrev_b32_e64 v2, v2, s2
|
|
; GFX8-NEXT: v_sub_u32_e32 v0, vcc, s5, v0
|
|
; GFX8-NEXT: v_or_b32_e32 v2, v3, v2
|
|
; GFX8-NEXT: v_add_u32_e32 v3, vcc, v0, v1
|
|
; GFX8-NEXT: v_cmp_le_u32_e32 vcc, 24, v0
|
|
; GFX8-NEXT: v_cndmask_b32_e32 v0, v0, v3, vcc
|
|
; GFX8-NEXT: v_add_u32_e32 v1, vcc, v0, v1
|
|
; GFX8-NEXT: v_cmp_le_u32_e32 vcc, 24, v0
|
|
; GFX8-NEXT: s_and_b32 s1, 0xffff, s1
|
|
; GFX8-NEXT: s_and_b32 s7, 0xffff, s7
|
|
; GFX8-NEXT: v_cndmask_b32_e32 v0, v0, v1, vcc
|
|
; GFX8-NEXT: v_sub_u32_e32 v1, vcc, 23, v0
|
|
; GFX8-NEXT: s_lshl_b32 s0, s7, 17
|
|
; GFX8-NEXT: s_lshl_b32 s1, s1, 1
|
|
; GFX8-NEXT: s_or_b32 s0, s0, s1
|
|
; GFX8-NEXT: v_and_b32_e32 v1, 0xffffff, v1
|
|
; GFX8-NEXT: v_and_b32_e32 v0, 0xffffff, v0
|
|
; GFX8-NEXT: v_lshlrev_b32_e64 v1, v1, s0
|
|
; GFX8-NEXT: v_lshrrev_b32_e64 v0, v0, s3
|
|
; GFX8-NEXT: v_or_b32_e32 v0, v1, v0
|
|
; GFX8-NEXT: v_mov_b32_e32 v1, 8
|
|
; GFX8-NEXT: v_lshlrev_b32_sdwa v3, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1
|
|
; GFX8-NEXT: v_mov_b32_e32 v4, 16
|
|
; GFX8-NEXT: v_or_b32_sdwa v3, v2, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
|
|
; GFX8-NEXT: v_lshlrev_b32_sdwa v2, v4, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2
|
|
; GFX8-NEXT: v_or_b32_e32 v2, v3, v2
|
|
; GFX8-NEXT: v_and_b32_e32 v3, 0xff, v0
|
|
; GFX8-NEXT: v_lshlrev_b32_e32 v3, 24, v3
|
|
; GFX8-NEXT: v_lshlrev_b32_sdwa v1, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2
|
|
; GFX8-NEXT: v_or_b32_e32 v2, v2, v3
|
|
; GFX8-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:DWORD
|
|
; GFX8-NEXT: v_readfirstlane_b32 s0, v2
|
|
; GFX8-NEXT: v_readfirstlane_b32 s1, v0
|
|
; GFX8-NEXT: ; return to shader part epilog
|
|
;
|
|
; GFX9-LABEL: s_fshr_v2i24:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: v_cvt_f32_ubyte0_e32 v0, 24
|
|
; GFX9-NEXT: s_lshr_b32 s9, s1, 8
|
|
; GFX9-NEXT: s_and_b32 s1, s1, 0xff
|
|
; GFX9-NEXT: v_rcp_iflag_f32_e32 v0, v0
|
|
; GFX9-NEXT: s_lshr_b32 s6, s0, 8
|
|
; GFX9-NEXT: s_lshr_b32 s8, s0, 24
|
|
; GFX9-NEXT: s_lshl_b32 s1, s1, 8
|
|
; GFX9-NEXT: s_and_b32 s6, s6, 0xff
|
|
; GFX9-NEXT: s_or_b32 s1, s8, s1
|
|
; GFX9-NEXT: s_lshr_b32 s8, s2, 8
|
|
; GFX9-NEXT: s_lshr_b32 s7, s0, 16
|
|
; GFX9-NEXT: s_and_b32 s0, s0, 0xff
|
|
; GFX9-NEXT: s_lshl_b32 s6, s6, 8
|
|
; GFX9-NEXT: s_and_b32 s8, s8, 0xff
|
|
; GFX9-NEXT: s_or_b32 s0, s0, s6
|
|
; GFX9-NEXT: s_and_b32 s6, s7, 0xff
|
|
; GFX9-NEXT: s_and_b32 s7, s9, 0xff
|
|
; GFX9-NEXT: s_lshr_b32 s9, s2, 16
|
|
; GFX9-NEXT: s_lshr_b32 s10, s2, 24
|
|
; GFX9-NEXT: s_and_b32 s2, s2, 0xff
|
|
; GFX9-NEXT: s_lshl_b32 s8, s8, 8
|
|
; GFX9-NEXT: v_mul_f32_e32 v0, 0x4f7ffffe, v0
|
|
; GFX9-NEXT: s_or_b32 s2, s2, s8
|
|
; GFX9-NEXT: s_and_b32 s8, s9, 0xff
|
|
; GFX9-NEXT: v_cvt_u32_f32_e32 v0, v0
|
|
; GFX9-NEXT: s_and_b32 s8, 0xffff, s8
|
|
; GFX9-NEXT: s_lshr_b32 s11, s3, 8
|
|
; GFX9-NEXT: s_and_b32 s2, 0xffff, s2
|
|
; GFX9-NEXT: s_lshl_b32 s8, s8, 16
|
|
; GFX9-NEXT: s_and_b32 s3, s3, 0xff
|
|
; GFX9-NEXT: s_or_b32 s2, s2, s8
|
|
; GFX9-NEXT: s_lshl_b32 s3, s3, 8
|
|
; GFX9-NEXT: s_and_b32 s8, s11, 0xff
|
|
; GFX9-NEXT: v_not_b32_e32 v1, 23
|
|
; GFX9-NEXT: s_or_b32 s3, s10, s3
|
|
; GFX9-NEXT: s_and_b32 s8, 0xffff, s8
|
|
; GFX9-NEXT: v_mul_lo_u32 v1, v0, v1
|
|
; GFX9-NEXT: s_and_b32 s3, 0xffff, s3
|
|
; GFX9-NEXT: s_lshl_b32 s8, s8, 16
|
|
; GFX9-NEXT: s_or_b32 s3, s3, s8
|
|
; GFX9-NEXT: s_lshr_b32 s8, s4, 8
|
|
; GFX9-NEXT: s_and_b32 s8, s8, 0xff
|
|
; GFX9-NEXT: s_lshr_b32 s9, s4, 16
|
|
; GFX9-NEXT: s_lshr_b32 s10, s4, 24
|
|
; GFX9-NEXT: s_and_b32 s4, s4, 0xff
|
|
; GFX9-NEXT: s_lshl_b32 s8, s8, 8
|
|
; GFX9-NEXT: v_mul_hi_u32 v1, v0, v1
|
|
; GFX9-NEXT: s_or_b32 s4, s4, s8
|
|
; GFX9-NEXT: s_and_b32 s8, s9, 0xff
|
|
; GFX9-NEXT: s_and_b32 s8, 0xffff, s8
|
|
; GFX9-NEXT: s_and_b32 s4, 0xffff, s4
|
|
; GFX9-NEXT: s_lshl_b32 s8, s8, 16
|
|
; GFX9-NEXT: s_or_b32 s4, s4, s8
|
|
; GFX9-NEXT: v_add_u32_e32 v0, v0, v1
|
|
; GFX9-NEXT: v_mul_hi_u32 v1, s4, v0
|
|
; GFX9-NEXT: s_lshr_b32 s11, s5, 8
|
|
; GFX9-NEXT: s_and_b32 s5, s5, 0xff
|
|
; GFX9-NEXT: s_lshl_b32 s5, s5, 8
|
|
; GFX9-NEXT: s_and_b32 s8, s11, 0xff
|
|
; GFX9-NEXT: s_or_b32 s5, s10, s5
|
|
; GFX9-NEXT: s_and_b32 s8, 0xffff, s8
|
|
; GFX9-NEXT: s_and_b32 s5, 0xffff, s5
|
|
; GFX9-NEXT: v_mul_lo_u32 v1, v1, 24
|
|
; GFX9-NEXT: s_lshl_b32 s8, s8, 16
|
|
; GFX9-NEXT: s_or_b32 s5, s5, s8
|
|
; GFX9-NEXT: v_mul_hi_u32 v0, s5, v0
|
|
; GFX9-NEXT: v_sub_u32_e32 v1, s4, v1
|
|
; GFX9-NEXT: v_add_u32_e32 v2, 0xffffffe8, v1
|
|
; GFX9-NEXT: v_cmp_le_u32_e32 vcc, 24, v1
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v1, v1, v2, vcc
|
|
; GFX9-NEXT: v_mul_lo_u32 v0, v0, 24
|
|
; GFX9-NEXT: v_add_u32_e32 v2, 0xffffffe8, v1
|
|
; GFX9-NEXT: v_cmp_le_u32_e32 vcc, 24, v1
|
|
; GFX9-NEXT: s_and_b32 s0, 0xffff, s0
|
|
; GFX9-NEXT: s_and_b32 s6, 0xffff, s6
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v1, v1, v2, vcc
|
|
; GFX9-NEXT: v_sub_u32_e32 v2, 23, v1
|
|
; GFX9-NEXT: s_lshl_b32 s4, s6, 17
|
|
; GFX9-NEXT: s_lshl_b32 s0, s0, 1
|
|
; GFX9-NEXT: v_and_b32_e32 v1, 0xffffff, v1
|
|
; GFX9-NEXT: s_or_b32 s0, s4, s0
|
|
; GFX9-NEXT: v_and_b32_e32 v2, 0xffffff, v2
|
|
; GFX9-NEXT: v_lshrrev_b32_e64 v1, v1, s2
|
|
; GFX9-NEXT: v_sub_u32_e32 v0, s5, v0
|
|
; GFX9-NEXT: v_lshl_or_b32 v1, s0, v2, v1
|
|
; GFX9-NEXT: v_add_u32_e32 v2, 0xffffffe8, v0
|
|
; GFX9-NEXT: v_cmp_le_u32_e32 vcc, 24, v0
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v0, v0, v2, vcc
|
|
; GFX9-NEXT: v_add_u32_e32 v2, 0xffffffe8, v0
|
|
; GFX9-NEXT: v_cmp_le_u32_e32 vcc, 24, v0
|
|
; GFX9-NEXT: s_and_b32 s1, 0xffff, s1
|
|
; GFX9-NEXT: s_and_b32 s7, 0xffff, s7
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v0, v0, v2, vcc
|
|
; GFX9-NEXT: v_sub_u32_e32 v2, 23, v0
|
|
; GFX9-NEXT: s_lshl_b32 s0, s7, 17
|
|
; GFX9-NEXT: s_lshl_b32 s1, s1, 1
|
|
; GFX9-NEXT: v_and_b32_e32 v0, 0xffffff, v0
|
|
; GFX9-NEXT: s_or_b32 s0, s0, s1
|
|
; GFX9-NEXT: v_and_b32_e32 v2, 0xffffff, v2
|
|
; GFX9-NEXT: v_lshrrev_b32_e64 v0, v0, s3
|
|
; GFX9-NEXT: v_mov_b32_e32 v3, 8
|
|
; GFX9-NEXT: v_lshl_or_b32 v0, s0, v2, v0
|
|
; GFX9-NEXT: v_mov_b32_e32 v2, 0xff
|
|
; GFX9-NEXT: v_lshlrev_b32_sdwa v3, v3, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1
|
|
; GFX9-NEXT: v_and_or_b32 v2, v1, v2, v3
|
|
; GFX9-NEXT: v_mov_b32_e32 v3, 16
|
|
; GFX9-NEXT: v_lshlrev_b32_sdwa v1, v3, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2
|
|
; GFX9-NEXT: v_and_b32_e32 v3, 0xff, v0
|
|
; GFX9-NEXT: v_lshlrev_b32_e32 v3, 24, v3
|
|
; GFX9-NEXT: v_or3_b32 v1, v2, v1, v3
|
|
; GFX9-NEXT: v_bfe_u32 v2, v0, 8, 8
|
|
; GFX9-NEXT: v_bfe_u32 v0, v0, 16, 8
|
|
; GFX9-NEXT: v_lshl_or_b32 v0, v0, 8, v2
|
|
; GFX9-NEXT: v_readfirstlane_b32 s0, v1
|
|
; GFX9-NEXT: v_readfirstlane_b32 s1, v0
|
|
; GFX9-NEXT: ; return to shader part epilog
|
|
;
|
|
; GFX10-LABEL: s_fshr_v2i24:
|
|
; GFX10: ; %bb.0:
|
|
; GFX10-NEXT: v_cvt_f32_ubyte0_e32 v0, 24
|
|
; GFX10-NEXT: s_lshr_b32 s14, s4, 8
|
|
; GFX10-NEXT: s_lshr_b32 s15, s4, 16
|
|
; GFX10-NEXT: s_and_b32 s14, s14, 0xff
|
|
; GFX10-NEXT: s_lshr_b32 s16, s4, 24
|
|
; GFX10-NEXT: v_rcp_iflag_f32_e32 v0, v0
|
|
; GFX10-NEXT: s_and_b32 s4, s4, 0xff
|
|
; GFX10-NEXT: s_and_b32 s15, s15, 0xff
|
|
; GFX10-NEXT: s_lshl_b32 s14, s14, 8
|
|
; GFX10-NEXT: s_and_b32 s15, 0xffff, s15
|
|
; GFX10-NEXT: s_or_b32 s4, s4, s14
|
|
; GFX10-NEXT: s_lshr_b32 s17, s5, 8
|
|
; GFX10-NEXT: s_and_b32 s5, s5, 0xff
|
|
; GFX10-NEXT: s_lshl_b32 s14, s15, 16
|
|
; GFX10-NEXT: s_and_b32 s4, 0xffff, s4
|
|
; GFX10-NEXT: v_mul_f32_e32 v0, 0x4f7ffffe, v0
|
|
; GFX10-NEXT: s_lshl_b32 s5, s5, 8
|
|
; GFX10-NEXT: s_and_b32 s15, s17, 0xff
|
|
; GFX10-NEXT: s_or_b32 s4, s4, s14
|
|
; GFX10-NEXT: s_or_b32 s5, s16, s5
|
|
; GFX10-NEXT: v_cvt_u32_f32_e32 v0, v0
|
|
; GFX10-NEXT: s_and_b32 s14, 0xffff, s15
|
|
; GFX10-NEXT: s_and_b32 s5, 0xffff, s5
|
|
; GFX10-NEXT: s_lshl_b32 s14, s14, 16
|
|
; GFX10-NEXT: s_lshr_b32 s9, s1, 8
|
|
; GFX10-NEXT: v_mul_lo_u32 v1, 0xffffffe8, v0
|
|
; GFX10-NEXT: s_or_b32 s5, s5, s14
|
|
; GFX10-NEXT: s_and_b32 s1, s1, 0xff
|
|
; GFX10-NEXT: s_lshr_b32 s10, s2, 8
|
|
; GFX10-NEXT: s_lshr_b32 s8, s0, 24
|
|
; GFX10-NEXT: s_lshr_b32 s11, s2, 16
|
|
; GFX10-NEXT: s_lshl_b32 s1, s1, 8
|
|
; GFX10-NEXT: s_and_b32 s9, s9, 0xff
|
|
; GFX10-NEXT: v_mul_hi_u32 v1, v0, v1
|
|
; GFX10-NEXT: s_and_b32 s10, s10, 0xff
|
|
; GFX10-NEXT: s_lshr_b32 s12, s2, 24
|
|
; GFX10-NEXT: s_and_b32 s2, s2, 0xff
|
|
; GFX10-NEXT: s_or_b32 s1, s8, s1
|
|
; GFX10-NEXT: s_and_b32 s8, 0xffff, s9
|
|
; GFX10-NEXT: s_lshl_b32 s9, s10, 8
|
|
; GFX10-NEXT: s_lshr_b32 s6, s0, 8
|
|
; GFX10-NEXT: v_add_nc_u32_e32 v0, v0, v1
|
|
; GFX10-NEXT: s_or_b32 s2, s2, s9
|
|
; GFX10-NEXT: s_lshr_b32 s13, s3, 8
|
|
; GFX10-NEXT: s_and_b32 s2, 0xffff, s2
|
|
; GFX10-NEXT: s_and_b32 s3, s3, 0xff
|
|
; GFX10-NEXT: v_mul_hi_u32 v1, s4, v0
|
|
; GFX10-NEXT: v_mul_hi_u32 v0, s5, v0
|
|
; GFX10-NEXT: s_and_b32 s6, s6, 0xff
|
|
; GFX10-NEXT: s_lshl_b32 s3, s3, 8
|
|
; GFX10-NEXT: s_lshr_b32 s7, s0, 16
|
|
; GFX10-NEXT: s_and_b32 s0, s0, 0xff
|
|
; GFX10-NEXT: s_lshl_b32 s6, s6, 8
|
|
; GFX10-NEXT: s_or_b32 s3, s12, s3
|
|
; GFX10-NEXT: v_mul_lo_u32 v1, v1, 24
|
|
; GFX10-NEXT: v_mul_lo_u32 v0, v0, 24
|
|
; GFX10-NEXT: s_and_b32 s7, s7, 0xff
|
|
; GFX10-NEXT: s_or_b32 s0, s0, s6
|
|
; GFX10-NEXT: s_and_b32 s3, 0xffff, s3
|
|
; GFX10-NEXT: s_and_b32 s7, 0xffff, s7
|
|
; GFX10-NEXT: s_and_b32 s0, 0xffff, s0
|
|
; GFX10-NEXT: s_and_b32 s1, 0xffff, s1
|
|
; GFX10-NEXT: v_sub_nc_u32_e32 v1, s4, v1
|
|
; GFX10-NEXT: v_sub_nc_u32_e32 v0, s5, v0
|
|
; GFX10-NEXT: s_and_b32 s4, s11, 0xff
|
|
; GFX10-NEXT: s_lshl_b32 s0, s0, 1
|
|
; GFX10-NEXT: s_and_b32 s4, 0xffff, s4
|
|
; GFX10-NEXT: v_add_nc_u32_e32 v2, 0xffffffe8, v1
|
|
; GFX10-NEXT: v_cmp_le_u32_e32 vcc_lo, 24, v1
|
|
; GFX10-NEXT: s_lshl_b32 s4, s4, 16
|
|
; GFX10-NEXT: s_lshl_b32 s1, s1, 1
|
|
; GFX10-NEXT: s_or_b32 s2, s2, s4
|
|
; GFX10-NEXT: s_and_b32 s4, s13, 0xff
|
|
; GFX10-NEXT: v_cndmask_b32_e32 v1, v1, v2, vcc_lo
|
|
; GFX10-NEXT: v_add_nc_u32_e32 v2, 0xffffffe8, v0
|
|
; GFX10-NEXT: v_cmp_le_u32_e32 vcc_lo, 24, v0
|
|
; GFX10-NEXT: s_and_b32 s4, 0xffff, s4
|
|
; GFX10-NEXT: v_add_nc_u32_e32 v3, 0xffffffe8, v1
|
|
; GFX10-NEXT: s_lshl_b32 s4, s4, 16
|
|
; GFX10-NEXT: v_cndmask_b32_e32 v0, v0, v2, vcc_lo
|
|
; GFX10-NEXT: v_cmp_le_u32_e32 vcc_lo, 24, v1
|
|
; GFX10-NEXT: s_or_b32 s3, s3, s4
|
|
; GFX10-NEXT: s_lshl_b32 s4, s7, 17
|
|
; GFX10-NEXT: v_add_nc_u32_e32 v2, 0xffffffe8, v0
|
|
; GFX10-NEXT: v_cndmask_b32_e32 v1, v1, v3, vcc_lo
|
|
; GFX10-NEXT: v_cmp_le_u32_e32 vcc_lo, 24, v0
|
|
; GFX10-NEXT: s_or_b32 s0, s4, s0
|
|
; GFX10-NEXT: v_sub_nc_u32_e32 v3, 23, v1
|
|
; GFX10-NEXT: v_cndmask_b32_e32 v0, v0, v2, vcc_lo
|
|
; GFX10-NEXT: v_and_b32_e32 v1, 0xffffff, v1
|
|
; GFX10-NEXT: v_and_b32_e32 v2, 0xffffff, v3
|
|
; GFX10-NEXT: v_sub_nc_u32_e32 v3, 23, v0
|
|
; GFX10-NEXT: v_and_b32_e32 v0, 0xffffff, v0
|
|
; GFX10-NEXT: v_lshrrev_b32_e64 v1, v1, s2
|
|
; GFX10-NEXT: s_lshl_b32 s2, s8, 17
|
|
; GFX10-NEXT: v_and_b32_e32 v3, 0xffffff, v3
|
|
; GFX10-NEXT: v_lshrrev_b32_e64 v0, v0, s3
|
|
; GFX10-NEXT: v_lshl_or_b32 v1, s0, v2, v1
|
|
; GFX10-NEXT: s_or_b32 s0, s2, s1
|
|
; GFX10-NEXT: v_mov_b32_e32 v2, 8
|
|
; GFX10-NEXT: v_lshl_or_b32 v0, s0, v3, v0
|
|
; GFX10-NEXT: v_mov_b32_e32 v3, 16
|
|
; GFX10-NEXT: v_lshlrev_b32_sdwa v2, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1
|
|
; GFX10-NEXT: v_and_b32_e32 v4, 0xff, v0
|
|
; GFX10-NEXT: v_and_or_b32 v2, 0xff, v1, v2
|
|
; GFX10-NEXT: v_lshlrev_b32_sdwa v1, v3, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2
|
|
; GFX10-NEXT: v_lshlrev_b32_e32 v3, 24, v4
|
|
; GFX10-NEXT: v_bfe_u32 v4, v0, 8, 8
|
|
; GFX10-NEXT: v_bfe_u32 v0, v0, 16, 8
|
|
; GFX10-NEXT: v_or3_b32 v1, v2, v1, v3
|
|
; GFX10-NEXT: v_lshl_or_b32 v0, v0, 8, v4
|
|
; GFX10-NEXT: v_readfirstlane_b32 s0, v1
|
|
; GFX10-NEXT: v_readfirstlane_b32 s1, v0
|
|
; GFX10-NEXT: ; return to shader part epilog
|
|
;
|
|
; GFX11-LABEL: s_fshr_v2i24:
|
|
; GFX11: ; %bb.0:
|
|
; GFX11-NEXT: v_cvt_f32_ubyte0_e32 v0, 24
|
|
; GFX11-NEXT: s_lshr_b32 s14, s4, 8
|
|
; GFX11-NEXT: s_lshr_b32 s15, s4, 16
|
|
; GFX11-NEXT: s_and_b32 s14, s14, 0xff
|
|
; GFX11-NEXT: s_lshr_b32 s16, s4, 24
|
|
; GFX11-NEXT: v_rcp_iflag_f32_e32 v0, v0
|
|
; GFX11-NEXT: s_and_b32 s4, s4, 0xff
|
|
; GFX11-NEXT: s_and_b32 s15, s15, 0xff
|
|
; GFX11-NEXT: s_lshl_b32 s14, s14, 8
|
|
; GFX11-NEXT: s_and_b32 s15, 0xffff, s15
|
|
; GFX11-NEXT: s_or_b32 s4, s4, s14
|
|
; GFX11-NEXT: s_lshr_b32 s17, s5, 8
|
|
; GFX11-NEXT: s_and_b32 s5, s5, 0xff
|
|
; GFX11-NEXT: s_lshl_b32 s14, s15, 16
|
|
; GFX11-NEXT: s_and_b32 s4, 0xffff, s4
|
|
; GFX11-NEXT: s_waitcnt_depctr 0xfff
|
|
; GFX11-NEXT: v_mul_f32_e32 v0, 0x4f7ffffe, v0
|
|
; GFX11-NEXT: s_lshl_b32 s5, s5, 8
|
|
; GFX11-NEXT: s_and_b32 s15, s17, 0xff
|
|
; GFX11-NEXT: s_or_b32 s4, s4, s14
|
|
; GFX11-NEXT: s_or_b32 s5, s16, s5
|
|
; GFX11-NEXT: v_cvt_u32_f32_e32 v0, v0
|
|
; GFX11-NEXT: s_and_b32 s14, 0xffff, s15
|
|
; GFX11-NEXT: s_and_b32 s5, 0xffff, s5
|
|
; GFX11-NEXT: s_lshl_b32 s14, s14, 16
|
|
; GFX11-NEXT: s_lshr_b32 s10, s2, 8
|
|
; GFX11-NEXT: v_mul_lo_u32 v1, 0xffffffe8, v0
|
|
; GFX11-NEXT: s_or_b32 s5, s5, s14
|
|
; GFX11-NEXT: s_lshr_b32 s9, s1, 8
|
|
; GFX11-NEXT: s_and_b32 s1, s1, 0xff
|
|
; GFX11-NEXT: s_lshr_b32 s11, s2, 16
|
|
; GFX11-NEXT: s_and_b32 s10, s10, 0xff
|
|
; GFX11-NEXT: s_lshr_b32 s6, s0, 8
|
|
; GFX11-NEXT: s_lshr_b32 s8, s0, 24
|
|
; GFX11-NEXT: v_mul_hi_u32 v1, v0, v1
|
|
; GFX11-NEXT: s_lshr_b32 s12, s2, 24
|
|
; GFX11-NEXT: s_and_b32 s2, s2, 0xff
|
|
; GFX11-NEXT: s_lshl_b32 s1, s1, 8
|
|
; GFX11-NEXT: s_and_b32 s9, s9, 0xff
|
|
; GFX11-NEXT: s_and_b32 s11, s11, 0xff
|
|
; GFX11-NEXT: s_and_b32 s6, s6, 0xff
|
|
; GFX11-NEXT: s_or_b32 s1, s8, s1
|
|
; GFX11-NEXT: v_add_nc_u32_e32 v0, v0, v1
|
|
; GFX11-NEXT: s_and_b32 s8, 0xffff, s9
|
|
; GFX11-NEXT: s_and_b32 s9, 0xffff, s11
|
|
; GFX11-NEXT: s_lshr_b32 s7, s0, 16
|
|
; GFX11-NEXT: s_and_b32 s0, s0, 0xff
|
|
; GFX11-NEXT: v_mul_hi_u32 v1, s4, v0
|
|
; GFX11-NEXT: v_mul_hi_u32 v0, s5, v0
|
|
; GFX11-NEXT: s_lshr_b32 s13, s3, 8
|
|
; GFX11-NEXT: s_and_b32 s3, s3, 0xff
|
|
; GFX11-NEXT: s_lshl_b32 s6, s6, 8
|
|
; GFX11-NEXT: s_and_b32 s7, s7, 0xff
|
|
; GFX11-NEXT: s_lshl_b32 s3, s3, 8
|
|
; GFX11-NEXT: s_and_b32 s13, s13, 0xff
|
|
; GFX11-NEXT: v_mul_lo_u32 v1, v1, 24
|
|
; GFX11-NEXT: v_mul_lo_u32 v0, v0, 24
|
|
; GFX11-NEXT: s_or_b32 s0, s0, s6
|
|
; GFX11-NEXT: s_and_b32 s7, 0xffff, s7
|
|
; GFX11-NEXT: s_or_b32 s3, s12, s3
|
|
; GFX11-NEXT: s_and_b32 s0, 0xffff, s0
|
|
; GFX11-NEXT: s_and_b32 s3, 0xffff, s3
|
|
; GFX11-NEXT: s_lshl_b32 s0, s0, 1
|
|
; GFX11-NEXT: v_sub_nc_u32_e32 v1, s4, v1
|
|
; GFX11-NEXT: v_sub_nc_u32_e32 v0, s5, v0
|
|
; GFX11-NEXT: s_lshl_b32 s4, s10, 8
|
|
; GFX11-NEXT: s_and_b32 s10, 0xffff, s13
|
|
; GFX11-NEXT: s_or_b32 s2, s2, s4
|
|
; GFX11-NEXT: v_add_nc_u32_e32 v2, 0xffffffe8, v1
|
|
; GFX11-NEXT: v_cmp_le_u32_e32 vcc_lo, 24, v1
|
|
; GFX11-NEXT: s_lshl_b32 s4, s9, 16
|
|
; GFX11-NEXT: s_and_b32 s2, 0xffff, s2
|
|
; GFX11-NEXT: s_lshl_b32 s5, s10, 16
|
|
; GFX11-NEXT: s_or_b32 s2, s2, s4
|
|
; GFX11-NEXT: v_dual_cndmask_b32 v1, v1, v2 :: v_dual_add_nc_u32 v2, 0xffffffe8, v0
|
|
; GFX11-NEXT: v_cmp_le_u32_e32 vcc_lo, 24, v0
|
|
; GFX11-NEXT: s_lshl_b32 s4, s7, 17
|
|
; GFX11-NEXT: s_and_b32 s1, 0xffff, s1
|
|
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_4) | instid1(VALU_DEP_2)
|
|
; GFX11-NEXT: v_add_nc_u32_e32 v3, 0xffffffe8, v1
|
|
; GFX11-NEXT: s_or_b32 s0, s4, s0
|
|
; GFX11-NEXT: v_cndmask_b32_e32 v0, v0, v2, vcc_lo
|
|
; GFX11-NEXT: v_cmp_le_u32_e32 vcc_lo, 24, v1
|
|
; GFX11-NEXT: s_lshl_b32 s1, s1, 1
|
|
; GFX11-NEXT: v_dual_cndmask_b32 v1, v1, v3 :: v_dual_add_nc_u32 v2, 0xffffffe8, v0
|
|
; GFX11-NEXT: v_cmp_le_u32_e32 vcc_lo, 24, v0
|
|
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_3)
|
|
; GFX11-NEXT: v_sub_nc_u32_e32 v3, 23, v1
|
|
; GFX11-NEXT: v_dual_cndmask_b32 v0, v0, v2 :: v_dual_and_b32 v1, 0xffffff, v1
|
|
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
|
|
; GFX11-NEXT: v_and_b32_e32 v2, 0xffffff, v3
|
|
; GFX11-NEXT: v_lshrrev_b32_e64 v1, v1, s2
|
|
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_2) | instid1(VALU_DEP_3)
|
|
; GFX11-NEXT: v_sub_nc_u32_e32 v3, 23, v0
|
|
; GFX11-NEXT: v_and_b32_e32 v0, 0xffffff, v0
|
|
; GFX11-NEXT: s_or_b32 s2, s3, s5
|
|
; GFX11-NEXT: v_lshl_or_b32 v1, s0, v2, v1
|
|
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
|
|
; GFX11-NEXT: v_and_b32_e32 v2, 0xffffff, v3
|
|
; GFX11-NEXT: v_lshrrev_b32_e64 v0, v0, s2
|
|
; GFX11-NEXT: s_lshl_b32 s0, s8, 17
|
|
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
|
|
; GFX11-NEXT: s_or_b32 s0, s0, s1
|
|
; GFX11-NEXT: v_bfe_u32 v3, v1, 16, 8
|
|
; GFX11-NEXT: v_lshl_or_b32 v0, s0, v2, v0
|
|
; GFX11-NEXT: v_bfe_u32 v2, v1, 8, 8
|
|
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
|
|
; GFX11-NEXT: v_lshlrev_b32_e32 v3, 16, v3
|
|
; GFX11-NEXT: v_and_b32_e32 v4, 0xff, v0
|
|
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
|
|
; GFX11-NEXT: v_lshlrev_b32_e32 v2, 8, v2
|
|
; GFX11-NEXT: v_lshlrev_b32_e32 v4, 24, v4
|
|
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_3)
|
|
; GFX11-NEXT: v_and_or_b32 v1, 0xff, v1, v2
|
|
; GFX11-NEXT: v_bfe_u32 v2, v0, 8, 8
|
|
; GFX11-NEXT: v_bfe_u32 v0, v0, 16, 8
|
|
; GFX11-NEXT: v_or3_b32 v1, v1, v3, v4
|
|
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
|
|
; GFX11-NEXT: v_lshl_or_b32 v0, v0, 8, v2
|
|
; GFX11-NEXT: v_readfirstlane_b32 s0, v1
|
|
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2)
|
|
; GFX11-NEXT: v_readfirstlane_b32 s1, v0
|
|
; GFX11-NEXT: ; return to shader part epilog
|
|
%lhs = bitcast i48 %lhs.arg to <2 x i24>
|
|
%rhs = bitcast i48 %rhs.arg to <2 x i24>
|
|
%amt = bitcast i48 %amt.arg to <2 x i24>
|
|
%result = call <2 x i24> @llvm.fshr.v2i24(<2 x i24> %lhs, <2 x i24> %rhs, <2 x i24> %amt)
|
|
%cast.result = bitcast <2 x i24> %result to i48
|
|
ret i48 %cast.result
|
|
}
|
|
|
|
define <2 x i24> @v_fshr_v2i24(<2 x i24> %lhs, <2 x i24> %rhs, <2 x i24> %amt) {
|
|
; GFX6-LABEL: v_fshr_v2i24:
|
|
; GFX6: ; %bb.0:
|
|
; GFX6-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX6-NEXT: v_cvt_f32_ubyte0_e32 v6, 24
|
|
; GFX6-NEXT: v_rcp_iflag_f32_e32 v6, v6
|
|
; GFX6-NEXT: v_not_b32_e32 v7, 23
|
|
; GFX6-NEXT: v_and_b32_e32 v4, 0xffffff, v4
|
|
; GFX6-NEXT: v_and_b32_e32 v5, 0xffffff, v5
|
|
; GFX6-NEXT: v_mul_f32_e32 v6, 0x4f7ffffe, v6
|
|
; GFX6-NEXT: v_cvt_u32_f32_e32 v6, v6
|
|
; GFX6-NEXT: v_lshlrev_b32_e32 v0, 1, v0
|
|
; GFX6-NEXT: v_and_b32_e32 v2, 0xffffff, v2
|
|
; GFX6-NEXT: v_lshlrev_b32_e32 v1, 1, v1
|
|
; GFX6-NEXT: v_mul_lo_u32 v8, v6, v7
|
|
; GFX6-NEXT: v_and_b32_e32 v3, 0xffffff, v3
|
|
; GFX6-NEXT: v_mul_hi_u32 v8, v6, v8
|
|
; GFX6-NEXT: v_add_i32_e32 v6, vcc, v6, v8
|
|
; GFX6-NEXT: v_mul_hi_u32 v8, v4, v6
|
|
; GFX6-NEXT: v_mul_hi_u32 v6, v5, v6
|
|
; GFX6-NEXT: v_mul_lo_u32 v8, v8, 24
|
|
; GFX6-NEXT: v_mul_lo_u32 v6, v6, 24
|
|
; GFX6-NEXT: v_sub_i32_e32 v4, vcc, v4, v8
|
|
; GFX6-NEXT: v_add_i32_e32 v8, vcc, v4, v7
|
|
; GFX6-NEXT: v_cmp_le_u32_e32 vcc, 24, v4
|
|
; GFX6-NEXT: v_cndmask_b32_e32 v4, v4, v8, vcc
|
|
; GFX6-NEXT: v_add_i32_e32 v8, vcc, v4, v7
|
|
; GFX6-NEXT: v_cmp_le_u32_e32 vcc, 24, v4
|
|
; GFX6-NEXT: v_cndmask_b32_e32 v4, v4, v8, vcc
|
|
; GFX6-NEXT: v_sub_i32_e32 v8, vcc, 23, v4
|
|
; GFX6-NEXT: v_and_b32_e32 v8, 0xffffff, v8
|
|
; GFX6-NEXT: v_and_b32_e32 v4, 0xffffff, v4
|
|
; GFX6-NEXT: v_lshlrev_b32_e32 v0, v8, v0
|
|
; GFX6-NEXT: v_lshrrev_b32_e32 v2, v4, v2
|
|
; GFX6-NEXT: v_or_b32_e32 v0, v0, v2
|
|
; GFX6-NEXT: v_sub_i32_e32 v2, vcc, v5, v6
|
|
; GFX6-NEXT: v_add_i32_e32 v4, vcc, v2, v7
|
|
; GFX6-NEXT: v_cmp_le_u32_e32 vcc, 24, v2
|
|
; GFX6-NEXT: v_cndmask_b32_e32 v2, v2, v4, vcc
|
|
; GFX6-NEXT: v_add_i32_e32 v4, vcc, 0xffffffe8, v2
|
|
; GFX6-NEXT: v_cmp_le_u32_e32 vcc, 24, v2
|
|
; GFX6-NEXT: v_cndmask_b32_e32 v2, v2, v4, vcc
|
|
; GFX6-NEXT: v_sub_i32_e32 v4, vcc, 23, v2
|
|
; GFX6-NEXT: v_and_b32_e32 v4, 0xffffff, v4
|
|
; GFX6-NEXT: v_and_b32_e32 v2, 0xffffff, v2
|
|
; GFX6-NEXT: v_lshlrev_b32_e32 v1, v4, v1
|
|
; GFX6-NEXT: v_lshrrev_b32_e32 v2, v2, v3
|
|
; GFX6-NEXT: v_or_b32_e32 v1, v1, v2
|
|
; GFX6-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX8-LABEL: v_fshr_v2i24:
|
|
; GFX8: ; %bb.0:
|
|
; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX8-NEXT: v_cvt_f32_ubyte0_e32 v6, 24
|
|
; GFX8-NEXT: v_rcp_iflag_f32_e32 v6, v6
|
|
; GFX8-NEXT: v_not_b32_e32 v7, 23
|
|
; GFX8-NEXT: v_and_b32_e32 v4, 0xffffff, v4
|
|
; GFX8-NEXT: v_and_b32_e32 v5, 0xffffff, v5
|
|
; GFX8-NEXT: v_mul_f32_e32 v6, 0x4f7ffffe, v6
|
|
; GFX8-NEXT: v_cvt_u32_f32_e32 v6, v6
|
|
; GFX8-NEXT: v_lshlrev_b32_e32 v0, 1, v0
|
|
; GFX8-NEXT: v_and_b32_e32 v2, 0xffffff, v2
|
|
; GFX8-NEXT: v_lshlrev_b32_e32 v1, 1, v1
|
|
; GFX8-NEXT: v_mul_lo_u32 v8, v6, v7
|
|
; GFX8-NEXT: v_and_b32_e32 v3, 0xffffff, v3
|
|
; GFX8-NEXT: v_mul_hi_u32 v8, v6, v8
|
|
; GFX8-NEXT: v_add_u32_e32 v6, vcc, v6, v8
|
|
; GFX8-NEXT: v_mul_hi_u32 v8, v4, v6
|
|
; GFX8-NEXT: v_mul_hi_u32 v6, v5, v6
|
|
; GFX8-NEXT: v_mul_lo_u32 v8, v8, 24
|
|
; GFX8-NEXT: v_mul_lo_u32 v6, v6, 24
|
|
; GFX8-NEXT: v_sub_u32_e32 v4, vcc, v4, v8
|
|
; GFX8-NEXT: v_add_u32_e32 v8, vcc, v4, v7
|
|
; GFX8-NEXT: v_cmp_le_u32_e32 vcc, 24, v4
|
|
; GFX8-NEXT: v_cndmask_b32_e32 v4, v4, v8, vcc
|
|
; GFX8-NEXT: v_add_u32_e32 v8, vcc, v4, v7
|
|
; GFX8-NEXT: v_cmp_le_u32_e32 vcc, 24, v4
|
|
; GFX8-NEXT: v_cndmask_b32_e32 v4, v4, v8, vcc
|
|
; GFX8-NEXT: v_sub_u32_e32 v8, vcc, 23, v4
|
|
; GFX8-NEXT: v_and_b32_e32 v8, 0xffffff, v8
|
|
; GFX8-NEXT: v_and_b32_e32 v4, 0xffffff, v4
|
|
; GFX8-NEXT: v_lshlrev_b32_e32 v0, v8, v0
|
|
; GFX8-NEXT: v_lshrrev_b32_e32 v2, v4, v2
|
|
; GFX8-NEXT: v_or_b32_e32 v0, v0, v2
|
|
; GFX8-NEXT: v_sub_u32_e32 v2, vcc, v5, v6
|
|
; GFX8-NEXT: v_add_u32_e32 v4, vcc, v2, v7
|
|
; GFX8-NEXT: v_cmp_le_u32_e32 vcc, 24, v2
|
|
; GFX8-NEXT: v_cndmask_b32_e32 v2, v2, v4, vcc
|
|
; GFX8-NEXT: v_add_u32_e32 v4, vcc, 0xffffffe8, v2
|
|
; GFX8-NEXT: v_cmp_le_u32_e32 vcc, 24, v2
|
|
; GFX8-NEXT: v_cndmask_b32_e32 v2, v2, v4, vcc
|
|
; GFX8-NEXT: v_sub_u32_e32 v4, vcc, 23, v2
|
|
; GFX8-NEXT: v_and_b32_e32 v4, 0xffffff, v4
|
|
; GFX8-NEXT: v_and_b32_e32 v2, 0xffffff, v2
|
|
; GFX8-NEXT: v_lshlrev_b32_e32 v1, v4, v1
|
|
; GFX8-NEXT: v_lshrrev_b32_e32 v2, v2, v3
|
|
; GFX8-NEXT: v_or_b32_e32 v1, v1, v2
|
|
; GFX8-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX9-LABEL: v_fshr_v2i24:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: v_cvt_f32_ubyte0_e32 v6, 24
|
|
; GFX9-NEXT: v_rcp_iflag_f32_e32 v6, v6
|
|
; GFX9-NEXT: v_not_b32_e32 v7, 23
|
|
; GFX9-NEXT: v_and_b32_e32 v4, 0xffffff, v4
|
|
; GFX9-NEXT: v_and_b32_e32 v5, 0xffffff, v5
|
|
; GFX9-NEXT: v_mul_f32_e32 v6, 0x4f7ffffe, v6
|
|
; GFX9-NEXT: v_cvt_u32_f32_e32 v6, v6
|
|
; GFX9-NEXT: v_and_b32_e32 v2, 0xffffff, v2
|
|
; GFX9-NEXT: v_lshlrev_b32_e32 v0, 1, v0
|
|
; GFX9-NEXT: v_and_b32_e32 v3, 0xffffff, v3
|
|
; GFX9-NEXT: v_mul_lo_u32 v7, v6, v7
|
|
; GFX9-NEXT: v_lshlrev_b32_e32 v1, 1, v1
|
|
; GFX9-NEXT: v_mul_hi_u32 v7, v6, v7
|
|
; GFX9-NEXT: v_add_u32_e32 v6, v6, v7
|
|
; GFX9-NEXT: v_mul_hi_u32 v7, v4, v6
|
|
; GFX9-NEXT: v_mul_hi_u32 v6, v5, v6
|
|
; GFX9-NEXT: v_mul_lo_u32 v7, v7, 24
|
|
; GFX9-NEXT: v_mul_lo_u32 v6, v6, 24
|
|
; GFX9-NEXT: v_sub_u32_e32 v4, v4, v7
|
|
; GFX9-NEXT: v_sub_u32_e32 v5, v5, v6
|
|
; GFX9-NEXT: v_add_u32_e32 v6, 0xffffffe8, v4
|
|
; GFX9-NEXT: v_cmp_le_u32_e32 vcc, 24, v4
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v4, v4, v6, vcc
|
|
; GFX9-NEXT: v_add_u32_e32 v6, 0xffffffe8, v4
|
|
; GFX9-NEXT: v_cmp_le_u32_e32 vcc, 24, v4
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v4, v4, v6, vcc
|
|
; GFX9-NEXT: v_sub_u32_e32 v6, 23, v4
|
|
; GFX9-NEXT: v_and_b32_e32 v4, 0xffffff, v4
|
|
; GFX9-NEXT: v_and_b32_e32 v6, 0xffffff, v6
|
|
; GFX9-NEXT: v_lshrrev_b32_e32 v2, v4, v2
|
|
; GFX9-NEXT: v_lshl_or_b32 v0, v0, v6, v2
|
|
; GFX9-NEXT: v_add_u32_e32 v2, 0xffffffe8, v5
|
|
; GFX9-NEXT: v_cmp_le_u32_e32 vcc, 24, v5
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v2, v5, v2, vcc
|
|
; GFX9-NEXT: v_add_u32_e32 v4, 0xffffffe8, v2
|
|
; GFX9-NEXT: v_cmp_le_u32_e32 vcc, 24, v2
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v2, v2, v4, vcc
|
|
; GFX9-NEXT: v_sub_u32_e32 v4, 23, v2
|
|
; GFX9-NEXT: v_and_b32_e32 v2, 0xffffff, v2
|
|
; GFX9-NEXT: v_and_b32_e32 v4, 0xffffff, v4
|
|
; GFX9-NEXT: v_lshrrev_b32_e32 v2, v2, v3
|
|
; GFX9-NEXT: v_lshl_or_b32 v1, v1, v4, v2
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX10-LABEL: v_fshr_v2i24:
|
|
; GFX10: ; %bb.0:
|
|
; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX10-NEXT: v_cvt_f32_ubyte0_e32 v6, 24
|
|
; GFX10-NEXT: v_and_b32_e32 v4, 0xffffff, v4
|
|
; GFX10-NEXT: v_and_b32_e32 v5, 0xffffff, v5
|
|
; GFX10-NEXT: v_and_b32_e32 v2, 0xffffff, v2
|
|
; GFX10-NEXT: v_and_b32_e32 v3, 0xffffff, v3
|
|
; GFX10-NEXT: v_rcp_iflag_f32_e32 v6, v6
|
|
; GFX10-NEXT: v_lshlrev_b32_e32 v0, 1, v0
|
|
; GFX10-NEXT: v_lshlrev_b32_e32 v1, 1, v1
|
|
; GFX10-NEXT: v_mul_f32_e32 v6, 0x4f7ffffe, v6
|
|
; GFX10-NEXT: v_cvt_u32_f32_e32 v6, v6
|
|
; GFX10-NEXT: v_mul_lo_u32 v7, 0xffffffe8, v6
|
|
; GFX10-NEXT: v_mul_hi_u32 v7, v6, v7
|
|
; GFX10-NEXT: v_add_nc_u32_e32 v6, v6, v7
|
|
; GFX10-NEXT: v_mul_hi_u32 v7, v4, v6
|
|
; GFX10-NEXT: v_mul_hi_u32 v6, v5, v6
|
|
; GFX10-NEXT: v_mul_lo_u32 v7, v7, 24
|
|
; GFX10-NEXT: v_mul_lo_u32 v6, v6, 24
|
|
; GFX10-NEXT: v_sub_nc_u32_e32 v4, v4, v7
|
|
; GFX10-NEXT: v_sub_nc_u32_e32 v5, v5, v6
|
|
; GFX10-NEXT: v_add_nc_u32_e32 v6, 0xffffffe8, v4
|
|
; GFX10-NEXT: v_cmp_le_u32_e32 vcc_lo, 24, v4
|
|
; GFX10-NEXT: v_add_nc_u32_e32 v7, 0xffffffe8, v5
|
|
; GFX10-NEXT: v_cndmask_b32_e32 v4, v4, v6, vcc_lo
|
|
; GFX10-NEXT: v_cmp_le_u32_e32 vcc_lo, 24, v5
|
|
; GFX10-NEXT: v_add_nc_u32_e32 v6, 0xffffffe8, v4
|
|
; GFX10-NEXT: v_cndmask_b32_e32 v5, v5, v7, vcc_lo
|
|
; GFX10-NEXT: v_cmp_le_u32_e32 vcc_lo, 24, v4
|
|
; GFX10-NEXT: v_add_nc_u32_e32 v7, 0xffffffe8, v5
|
|
; GFX10-NEXT: v_cndmask_b32_e32 v4, v4, v6, vcc_lo
|
|
; GFX10-NEXT: v_cmp_le_u32_e32 vcc_lo, 24, v5
|
|
; GFX10-NEXT: v_sub_nc_u32_e32 v6, 23, v4
|
|
; GFX10-NEXT: v_cndmask_b32_e32 v5, v5, v7, vcc_lo
|
|
; GFX10-NEXT: v_and_b32_e32 v4, 0xffffff, v4
|
|
; GFX10-NEXT: v_and_b32_e32 v6, 0xffffff, v6
|
|
; GFX10-NEXT: v_sub_nc_u32_e32 v7, 23, v5
|
|
; GFX10-NEXT: v_and_b32_e32 v5, 0xffffff, v5
|
|
; GFX10-NEXT: v_lshrrev_b32_e32 v2, v4, v2
|
|
; GFX10-NEXT: v_and_b32_e32 v4, 0xffffff, v7
|
|
; GFX10-NEXT: v_lshrrev_b32_e32 v3, v5, v3
|
|
; GFX10-NEXT: v_lshl_or_b32 v0, v0, v6, v2
|
|
; GFX10-NEXT: v_lshl_or_b32 v1, v1, v4, v3
|
|
; GFX10-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX11-LABEL: v_fshr_v2i24:
|
|
; GFX11: ; %bb.0:
|
|
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX11-NEXT: v_cvt_f32_ubyte0_e32 v6, 24
|
|
; GFX11-NEXT: v_and_b32_e32 v4, 0xffffff, v4
|
|
; GFX11-NEXT: v_and_b32_e32 v2, 0xffffff, v2
|
|
; GFX11-NEXT: v_and_b32_e32 v3, 0xffffff, v3
|
|
; GFX11-NEXT: v_lshlrev_b32_e32 v0, 1, v0
|
|
; GFX11-NEXT: v_rcp_iflag_f32_e32 v6, v6
|
|
; GFX11-NEXT: s_waitcnt_depctr 0xfff
|
|
; GFX11-NEXT: v_dual_mul_f32 v6, 0x4f7ffffe, v6 :: v_dual_lshlrev_b32 v1, 1, v1
|
|
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
|
|
; GFX11-NEXT: v_cvt_u32_f32_e32 v6, v6
|
|
; GFX11-NEXT: v_mul_lo_u32 v7, 0xffffffe8, v6
|
|
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
|
|
; GFX11-NEXT: v_mul_hi_u32 v7, v6, v7
|
|
; GFX11-NEXT: v_add_nc_u32_e32 v6, v6, v7
|
|
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
|
|
; GFX11-NEXT: v_mul_hi_u32 v7, v4, v6
|
|
; GFX11-NEXT: v_mul_lo_u32 v7, v7, 24
|
|
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
|
|
; GFX11-NEXT: v_sub_nc_u32_e32 v4, v4, v7
|
|
; GFX11-NEXT: v_cmp_le_u32_e32 vcc_lo, 24, v4
|
|
; GFX11-NEXT: v_and_b32_e32 v5, 0xffffff, v5
|
|
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
|
|
; GFX11-NEXT: v_mul_hi_u32 v6, v5, v6
|
|
; GFX11-NEXT: v_mul_lo_u32 v6, v6, 24
|
|
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
|
|
; GFX11-NEXT: v_sub_nc_u32_e32 v5, v5, v6
|
|
; GFX11-NEXT: v_add_nc_u32_e32 v6, 0xffffffe8, v4
|
|
; GFX11-NEXT: v_cndmask_b32_e32 v4, v4, v6, vcc_lo
|
|
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
|
|
; GFX11-NEXT: v_cmp_le_u32_e32 vcc_lo, 24, v5
|
|
; GFX11-NEXT: v_add_nc_u32_e32 v6, 0xffffffe8, v4
|
|
; GFX11-NEXT: v_add_nc_u32_e32 v7, 0xffffffe8, v5
|
|
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
|
|
; GFX11-NEXT: v_cndmask_b32_e32 v5, v5, v7, vcc_lo
|
|
; GFX11-NEXT: v_cmp_le_u32_e32 vcc_lo, 24, v4
|
|
; GFX11-NEXT: v_dual_cndmask_b32 v4, v4, v6 :: v_dual_add_nc_u32 v7, 0xffffffe8, v5
|
|
; GFX11-NEXT: v_cmp_le_u32_e32 vcc_lo, 24, v5
|
|
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_3)
|
|
; GFX11-NEXT: v_sub_nc_u32_e32 v6, 23, v4
|
|
; GFX11-NEXT: v_dual_cndmask_b32 v5, v5, v7 :: v_dual_and_b32 v4, 0xffffff, v4
|
|
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
|
|
; GFX11-NEXT: v_and_b32_e32 v6, 0xffffff, v6
|
|
; GFX11-NEXT: v_sub_nc_u32_e32 v7, 23, v5
|
|
; GFX11-NEXT: v_and_b32_e32 v5, 0xffffff, v5
|
|
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3)
|
|
; GFX11-NEXT: v_lshrrev_b32_e32 v2, v4, v2
|
|
; GFX11-NEXT: v_and_b32_e32 v4, 0xffffff, v7
|
|
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
|
|
; GFX11-NEXT: v_lshrrev_b32_e32 v3, v5, v3
|
|
; GFX11-NEXT: v_lshl_or_b32 v0, v0, v6, v2
|
|
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2)
|
|
; GFX11-NEXT: v_lshl_or_b32 v1, v1, v4, v3
|
|
; GFX11-NEXT: s_setpc_b64 s[30:31]
|
|
%result = call <2 x i24> @llvm.fshr.v2i24(<2 x i24> %lhs, <2 x i24> %rhs, <2 x i24> %amt)
|
|
ret <2 x i24> %result
|
|
}
|
|
|
|
define amdgpu_ps i32 @s_fshr_i32(i32 inreg %lhs, i32 inreg %rhs, i32 inreg %amt) {
|
|
; GFX6-LABEL: s_fshr_i32:
|
|
; GFX6: ; %bb.0:
|
|
; GFX6-NEXT: v_mov_b32_e32 v0, s1
|
|
; GFX6-NEXT: v_mov_b32_e32 v1, s2
|
|
; GFX6-NEXT: v_alignbit_b32 v0, s0, v0, v1
|
|
; GFX6-NEXT: v_readfirstlane_b32 s0, v0
|
|
; GFX6-NEXT: ; return to shader part epilog
|
|
;
|
|
; GFX8-LABEL: s_fshr_i32:
|
|
; GFX8: ; %bb.0:
|
|
; GFX8-NEXT: v_mov_b32_e32 v0, s1
|
|
; GFX8-NEXT: v_mov_b32_e32 v1, s2
|
|
; GFX8-NEXT: v_alignbit_b32 v0, s0, v0, v1
|
|
; GFX8-NEXT: v_readfirstlane_b32 s0, v0
|
|
; GFX8-NEXT: ; return to shader part epilog
|
|
;
|
|
; GFX9-LABEL: s_fshr_i32:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: v_mov_b32_e32 v0, s1
|
|
; GFX9-NEXT: v_mov_b32_e32 v1, s2
|
|
; GFX9-NEXT: v_alignbit_b32 v0, s0, v0, v1
|
|
; GFX9-NEXT: v_readfirstlane_b32 s0, v0
|
|
; GFX9-NEXT: ; return to shader part epilog
|
|
;
|
|
; GFX10-LABEL: s_fshr_i32:
|
|
; GFX10: ; %bb.0:
|
|
; GFX10-NEXT: v_mov_b32_e32 v0, s2
|
|
; GFX10-NEXT: v_alignbit_b32 v0, s0, s1, v0
|
|
; GFX10-NEXT: v_readfirstlane_b32 s0, v0
|
|
; GFX10-NEXT: ; return to shader part epilog
|
|
;
|
|
; GFX11-LABEL: s_fshr_i32:
|
|
; GFX11: ; %bb.0:
|
|
; GFX11-NEXT: v_mov_b32_e32 v0, s2
|
|
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
|
|
; GFX11-NEXT: v_alignbit_b32 v0, s0, s1, v0
|
|
; GFX11-NEXT: v_readfirstlane_b32 s0, v0
|
|
; GFX11-NEXT: ; return to shader part epilog
|
|
%result = call i32 @llvm.fshr.i32(i32 %lhs, i32 %rhs, i32 %amt)
|
|
ret i32 %result
|
|
}
|
|
|
|
define amdgpu_ps i32 @s_fshr_i32_5(i32 inreg %lhs, i32 inreg %rhs) {
|
|
; GFX6-LABEL: s_fshr_i32_5:
|
|
; GFX6: ; %bb.0:
|
|
; GFX6-NEXT: v_mov_b32_e32 v0, s1
|
|
; GFX6-NEXT: v_alignbit_b32 v0, s0, v0, 5
|
|
; GFX6-NEXT: v_readfirstlane_b32 s0, v0
|
|
; GFX6-NEXT: ; return to shader part epilog
|
|
;
|
|
; GFX8-LABEL: s_fshr_i32_5:
|
|
; GFX8: ; %bb.0:
|
|
; GFX8-NEXT: v_mov_b32_e32 v0, s1
|
|
; GFX8-NEXT: v_alignbit_b32 v0, s0, v0, 5
|
|
; GFX8-NEXT: v_readfirstlane_b32 s0, v0
|
|
; GFX8-NEXT: ; return to shader part epilog
|
|
;
|
|
; GFX9-LABEL: s_fshr_i32_5:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: v_mov_b32_e32 v0, s1
|
|
; GFX9-NEXT: v_alignbit_b32 v0, s0, v0, 5
|
|
; GFX9-NEXT: v_readfirstlane_b32 s0, v0
|
|
; GFX9-NEXT: ; return to shader part epilog
|
|
;
|
|
; GFX10-LABEL: s_fshr_i32_5:
|
|
; GFX10: ; %bb.0:
|
|
; GFX10-NEXT: v_alignbit_b32 v0, s0, s1, 5
|
|
; GFX10-NEXT: v_readfirstlane_b32 s0, v0
|
|
; GFX10-NEXT: ; return to shader part epilog
|
|
;
|
|
; GFX11-LABEL: s_fshr_i32_5:
|
|
; GFX11: ; %bb.0:
|
|
; GFX11-NEXT: v_alignbit_b32 v0, s0, s1, 5
|
|
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
|
|
; GFX11-NEXT: v_readfirstlane_b32 s0, v0
|
|
; GFX11-NEXT: ; return to shader part epilog
|
|
%result = call i32 @llvm.fshr.i32(i32 %lhs, i32 %rhs, i32 5)
|
|
ret i32 %result
|
|
}
|
|
|
|
define amdgpu_ps i32 @s_fshr_i32_8(i32 inreg %lhs, i32 inreg %rhs) {
|
|
; GFX6-LABEL: s_fshr_i32_8:
|
|
; GFX6: ; %bb.0:
|
|
; GFX6-NEXT: v_mov_b32_e32 v0, s1
|
|
; GFX6-NEXT: v_alignbit_b32 v0, s0, v0, 8
|
|
; GFX6-NEXT: v_readfirstlane_b32 s0, v0
|
|
; GFX6-NEXT: ; return to shader part epilog
|
|
;
|
|
; GFX8-LABEL: s_fshr_i32_8:
|
|
; GFX8: ; %bb.0:
|
|
; GFX8-NEXT: v_mov_b32_e32 v0, s1
|
|
; GFX8-NEXT: v_alignbit_b32 v0, s0, v0, 8
|
|
; GFX8-NEXT: v_readfirstlane_b32 s0, v0
|
|
; GFX8-NEXT: ; return to shader part epilog
|
|
;
|
|
; GFX9-LABEL: s_fshr_i32_8:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: v_mov_b32_e32 v0, s1
|
|
; GFX9-NEXT: v_alignbit_b32 v0, s0, v0, 8
|
|
; GFX9-NEXT: v_readfirstlane_b32 s0, v0
|
|
; GFX9-NEXT: ; return to shader part epilog
|
|
;
|
|
; GFX10-LABEL: s_fshr_i32_8:
|
|
; GFX10: ; %bb.0:
|
|
; GFX10-NEXT: v_alignbit_b32 v0, s0, s1, 8
|
|
; GFX10-NEXT: v_readfirstlane_b32 s0, v0
|
|
; GFX10-NEXT: ; return to shader part epilog
|
|
;
|
|
; GFX11-LABEL: s_fshr_i32_8:
|
|
; GFX11: ; %bb.0:
|
|
; GFX11-NEXT: v_alignbit_b32 v0, s0, s1, 8
|
|
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
|
|
; GFX11-NEXT: v_readfirstlane_b32 s0, v0
|
|
; GFX11-NEXT: ; return to shader part epilog
|
|
%result = call i32 @llvm.fshr.i32(i32 %lhs, i32 %rhs, i32 8)
|
|
ret i32 %result
|
|
}
|
|
|
|
define i32 @v_fshr_i32(i32 %lhs, i32 %rhs, i32 %amt) {
|
|
; GCN-LABEL: v_fshr_i32:
|
|
; GCN: ; %bb.0:
|
|
; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GCN-NEXT: v_alignbit_b32 v0, v0, v1, v2
|
|
; GCN-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX11-LABEL: v_fshr_i32:
|
|
; GFX11: ; %bb.0:
|
|
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX11-NEXT: v_alignbit_b32 v0, v0, v1, v2
|
|
; GFX11-NEXT: s_setpc_b64 s[30:31]
|
|
%result = call i32 @llvm.fshr.i32(i32 %lhs, i32 %rhs, i32 %amt)
|
|
ret i32 %result
|
|
}
|
|
|
|
define i32 @v_fshr_i32_5(i32 %lhs, i32 %rhs) {
|
|
; GCN-LABEL: v_fshr_i32_5:
|
|
; GCN: ; %bb.0:
|
|
; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GCN-NEXT: v_alignbit_b32 v0, v0, v1, 5
|
|
; GCN-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX11-LABEL: v_fshr_i32_5:
|
|
; GFX11: ; %bb.0:
|
|
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX11-NEXT: v_alignbit_b32 v0, v0, v1, 5
|
|
; GFX11-NEXT: s_setpc_b64 s[30:31]
|
|
%result = call i32 @llvm.fshr.i32(i32 %lhs, i32 %rhs, i32 5)
|
|
ret i32 %result
|
|
}
|
|
|
|
define i32 @v_fshr_i32_8(i32 %lhs, i32 %rhs) {
|
|
; GCN-LABEL: v_fshr_i32_8:
|
|
; GCN: ; %bb.0:
|
|
; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GCN-NEXT: v_alignbit_b32 v0, v0, v1, 8
|
|
; GCN-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX11-LABEL: v_fshr_i32_8:
|
|
; GFX11: ; %bb.0:
|
|
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX11-NEXT: v_alignbit_b32 v0, v0, v1, 8
|
|
; GFX11-NEXT: s_setpc_b64 s[30:31]
|
|
%result = call i32 @llvm.fshr.i32(i32 %lhs, i32 %rhs, i32 8)
|
|
ret i32 %result
|
|
}
|
|
|
|
define amdgpu_ps float @v_fshr_i32_ssv(i32 inreg %lhs, i32 inreg %rhs, i32 %amt) {
|
|
; GFX6-LABEL: v_fshr_i32_ssv:
|
|
; GFX6: ; %bb.0:
|
|
; GFX6-NEXT: v_mov_b32_e32 v1, s1
|
|
; GFX6-NEXT: v_alignbit_b32 v0, s0, v1, v0
|
|
; GFX6-NEXT: ; return to shader part epilog
|
|
;
|
|
; GFX8-LABEL: v_fshr_i32_ssv:
|
|
; GFX8: ; %bb.0:
|
|
; GFX8-NEXT: v_mov_b32_e32 v1, s1
|
|
; GFX8-NEXT: v_alignbit_b32 v0, s0, v1, v0
|
|
; GFX8-NEXT: ; return to shader part epilog
|
|
;
|
|
; GFX9-LABEL: v_fshr_i32_ssv:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: v_mov_b32_e32 v1, s1
|
|
; GFX9-NEXT: v_alignbit_b32 v0, s0, v1, v0
|
|
; GFX9-NEXT: ; return to shader part epilog
|
|
;
|
|
; GFX10-LABEL: v_fshr_i32_ssv:
|
|
; GFX10: ; %bb.0:
|
|
; GFX10-NEXT: v_alignbit_b32 v0, s0, s1, v0
|
|
; GFX10-NEXT: ; return to shader part epilog
|
|
;
|
|
; GFX11-LABEL: v_fshr_i32_ssv:
|
|
; GFX11: ; %bb.0:
|
|
; GFX11-NEXT: v_alignbit_b32 v0, s0, s1, v0
|
|
; GFX11-NEXT: ; return to shader part epilog
|
|
%result = call i32 @llvm.fshr.i32(i32 %lhs, i32 %rhs, i32 %amt)
|
|
%cast.result = bitcast i32 %result to float
|
|
ret float %cast.result
|
|
}
|
|
|
|
define amdgpu_ps float @v_fshr_i32_svs(i32 inreg %lhs, i32 %rhs, i32 inreg %amt) {
|
|
; GFX6-LABEL: v_fshr_i32_svs:
|
|
; GFX6: ; %bb.0:
|
|
; GFX6-NEXT: v_mov_b32_e32 v1, s1
|
|
; GFX6-NEXT: v_alignbit_b32 v0, s0, v0, v1
|
|
; GFX6-NEXT: ; return to shader part epilog
|
|
;
|
|
; GFX8-LABEL: v_fshr_i32_svs:
|
|
; GFX8: ; %bb.0:
|
|
; GFX8-NEXT: v_mov_b32_e32 v1, s1
|
|
; GFX8-NEXT: v_alignbit_b32 v0, s0, v0, v1
|
|
; GFX8-NEXT: ; return to shader part epilog
|
|
;
|
|
; GFX9-LABEL: v_fshr_i32_svs:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: v_mov_b32_e32 v1, s1
|
|
; GFX9-NEXT: v_alignbit_b32 v0, s0, v0, v1
|
|
; GFX9-NEXT: ; return to shader part epilog
|
|
;
|
|
; GFX10-LABEL: v_fshr_i32_svs:
|
|
; GFX10: ; %bb.0:
|
|
; GFX10-NEXT: v_alignbit_b32 v0, s0, v0, s1
|
|
; GFX10-NEXT: ; return to shader part epilog
|
|
;
|
|
; GFX11-LABEL: v_fshr_i32_svs:
|
|
; GFX11: ; %bb.0:
|
|
; GFX11-NEXT: v_alignbit_b32 v0, s0, v0, s1
|
|
; GFX11-NEXT: ; return to shader part epilog
|
|
%result = call i32 @llvm.fshr.i32(i32 %lhs, i32 %rhs, i32 %amt)
|
|
%cast.result = bitcast i32 %result to float
|
|
ret float %cast.result
|
|
}
|
|
|
|
define amdgpu_ps float @v_fshr_i32_vss(i32 inreg %lhs, i32 inreg %rhs, i32 inreg %amt) {
|
|
; GFX6-LABEL: v_fshr_i32_vss:
|
|
; GFX6: ; %bb.0:
|
|
; GFX6-NEXT: v_mov_b32_e32 v0, s1
|
|
; GFX6-NEXT: v_mov_b32_e32 v1, s2
|
|
; GFX6-NEXT: v_alignbit_b32 v0, s0, v0, v1
|
|
; GFX6-NEXT: ; return to shader part epilog
|
|
;
|
|
; GFX8-LABEL: v_fshr_i32_vss:
|
|
; GFX8: ; %bb.0:
|
|
; GFX8-NEXT: v_mov_b32_e32 v0, s1
|
|
; GFX8-NEXT: v_mov_b32_e32 v1, s2
|
|
; GFX8-NEXT: v_alignbit_b32 v0, s0, v0, v1
|
|
; GFX8-NEXT: ; return to shader part epilog
|
|
;
|
|
; GFX9-LABEL: v_fshr_i32_vss:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: v_mov_b32_e32 v0, s1
|
|
; GFX9-NEXT: v_mov_b32_e32 v1, s2
|
|
; GFX9-NEXT: v_alignbit_b32 v0, s0, v0, v1
|
|
; GFX9-NEXT: ; return to shader part epilog
|
|
;
|
|
; GFX10-LABEL: v_fshr_i32_vss:
|
|
; GFX10: ; %bb.0:
|
|
; GFX10-NEXT: v_mov_b32_e32 v0, s2
|
|
; GFX10-NEXT: v_alignbit_b32 v0, s0, s1, v0
|
|
; GFX10-NEXT: ; return to shader part epilog
|
|
;
|
|
; GFX11-LABEL: v_fshr_i32_vss:
|
|
; GFX11: ; %bb.0:
|
|
; GFX11-NEXT: v_mov_b32_e32 v0, s2
|
|
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
|
|
; GFX11-NEXT: v_alignbit_b32 v0, s0, s1, v0
|
|
; GFX11-NEXT: ; return to shader part epilog
|
|
%result = call i32 @llvm.fshr.i32(i32 %lhs, i32 %rhs, i32 %amt)
|
|
%cast.result = bitcast i32 %result to float
|
|
ret float %cast.result
|
|
}
|
|
|
|
define <2 x i32> @v_fshr_v2i32(<2 x i32> %lhs, <2 x i32> %rhs, <2 x i32> %amt) {
|
|
; GCN-LABEL: v_fshr_v2i32:
|
|
; GCN: ; %bb.0:
|
|
; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GCN-NEXT: v_alignbit_b32 v0, v0, v2, v4
|
|
; GCN-NEXT: v_alignbit_b32 v1, v1, v3, v5
|
|
; GCN-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX11-LABEL: v_fshr_v2i32:
|
|
; GFX11: ; %bb.0:
|
|
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX11-NEXT: v_alignbit_b32 v0, v0, v2, v4
|
|
; GFX11-NEXT: v_alignbit_b32 v1, v1, v3, v5
|
|
; GFX11-NEXT: s_setpc_b64 s[30:31]
|
|
%result = call <2 x i32> @llvm.fshr.v2i32(<2 x i32> %lhs, <2 x i32> %rhs, <2 x i32> %amt)
|
|
ret <2 x i32> %result
|
|
}
|
|
|
|
define <3 x i32> @v_fshr_v3i32(<3 x i32> %lhs, <3 x i32> %rhs, <3 x i32> %amt) {
|
|
; GCN-LABEL: v_fshr_v3i32:
|
|
; GCN: ; %bb.0:
|
|
; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GCN-NEXT: v_alignbit_b32 v0, v0, v3, v6
|
|
; GCN-NEXT: v_alignbit_b32 v1, v1, v4, v7
|
|
; GCN-NEXT: v_alignbit_b32 v2, v2, v5, v8
|
|
; GCN-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX11-LABEL: v_fshr_v3i32:
|
|
; GFX11: ; %bb.0:
|
|
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX11-NEXT: v_alignbit_b32 v0, v0, v3, v6
|
|
; GFX11-NEXT: v_alignbit_b32 v1, v1, v4, v7
|
|
; GFX11-NEXT: v_alignbit_b32 v2, v2, v5, v8
|
|
; GFX11-NEXT: s_setpc_b64 s[30:31]
|
|
%result = call <3 x i32> @llvm.fshr.v3i32(<3 x i32> %lhs, <3 x i32> %rhs, <3 x i32> %amt)
|
|
ret <3 x i32> %result
|
|
}
|
|
|
|
define <4 x i32> @v_fshr_v4i32(<4 x i32> %lhs, <4 x i32> %rhs, <4 x i32> %amt) {
|
|
; GCN-LABEL: v_fshr_v4i32:
|
|
; GCN: ; %bb.0:
|
|
; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GCN-NEXT: v_alignbit_b32 v0, v0, v4, v8
|
|
; GCN-NEXT: v_alignbit_b32 v1, v1, v5, v9
|
|
; GCN-NEXT: v_alignbit_b32 v2, v2, v6, v10
|
|
; GCN-NEXT: v_alignbit_b32 v3, v3, v7, v11
|
|
; GCN-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX11-LABEL: v_fshr_v4i32:
|
|
; GFX11: ; %bb.0:
|
|
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX11-NEXT: v_alignbit_b32 v0, v0, v4, v8
|
|
; GFX11-NEXT: v_alignbit_b32 v1, v1, v5, v9
|
|
; GFX11-NEXT: v_alignbit_b32 v2, v2, v6, v10
|
|
; GFX11-NEXT: v_alignbit_b32 v3, v3, v7, v11
|
|
; GFX11-NEXT: s_setpc_b64 s[30:31]
|
|
%result = call <4 x i32> @llvm.fshr.v4i32(<4 x i32> %lhs, <4 x i32> %rhs, <4 x i32> %amt)
|
|
ret <4 x i32> %result
|
|
}
|
|
|
|
define amdgpu_ps i16 @s_fshr_i16(i16 inreg %lhs, i16 inreg %rhs, i16 inreg %amt) {
|
|
; GFX6-LABEL: s_fshr_i16:
|
|
; GFX6: ; %bb.0:
|
|
; GFX6-NEXT: s_and_b32 s3, s2, 15
|
|
; GFX6-NEXT: s_andn2_b32 s2, 15, s2
|
|
; GFX6-NEXT: s_lshl_b32 s0, s0, 1
|
|
; GFX6-NEXT: s_and_b32 s2, 0xffff, s2
|
|
; GFX6-NEXT: s_lshl_b32 s0, s0, s2
|
|
; GFX6-NEXT: s_and_b32 s2, 0xffff, s3
|
|
; GFX6-NEXT: s_and_b32 s1, s1, 0xffff
|
|
; GFX6-NEXT: s_lshr_b32 s1, s1, s2
|
|
; GFX6-NEXT: s_or_b32 s0, s0, s1
|
|
; GFX6-NEXT: ; return to shader part epilog
|
|
;
|
|
; GFX8-LABEL: s_fshr_i16:
|
|
; GFX8: ; %bb.0:
|
|
; GFX8-NEXT: s_and_b32 s3, s2, 15
|
|
; GFX8-NEXT: s_andn2_b32 s2, 15, s2
|
|
; GFX8-NEXT: s_lshl_b32 s0, s0, 1
|
|
; GFX8-NEXT: s_and_b32 s2, 0xffff, s2
|
|
; GFX8-NEXT: s_lshl_b32 s0, s0, s2
|
|
; GFX8-NEXT: s_and_b32 s1, 0xffff, s1
|
|
; GFX8-NEXT: s_and_b32 s2, 0xffff, s3
|
|
; GFX8-NEXT: s_lshr_b32 s1, s1, s2
|
|
; GFX8-NEXT: s_or_b32 s0, s0, s1
|
|
; GFX8-NEXT: ; return to shader part epilog
|
|
;
|
|
; GFX9-LABEL: s_fshr_i16:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_and_b32 s3, s2, 15
|
|
; GFX9-NEXT: s_andn2_b32 s2, 15, s2
|
|
; GFX9-NEXT: s_lshl_b32 s0, s0, 1
|
|
; GFX9-NEXT: s_and_b32 s2, 0xffff, s2
|
|
; GFX9-NEXT: s_lshl_b32 s0, s0, s2
|
|
; GFX9-NEXT: s_and_b32 s1, 0xffff, s1
|
|
; GFX9-NEXT: s_and_b32 s2, 0xffff, s3
|
|
; GFX9-NEXT: s_lshr_b32 s1, s1, s2
|
|
; GFX9-NEXT: s_or_b32 s0, s0, s1
|
|
; GFX9-NEXT: ; return to shader part epilog
|
|
;
|
|
; GFX10-LABEL: s_fshr_i16:
|
|
; GFX10: ; %bb.0:
|
|
; GFX10-NEXT: s_and_b32 s3, s2, 15
|
|
; GFX10-NEXT: s_andn2_b32 s2, 15, s2
|
|
; GFX10-NEXT: s_lshl_b32 s0, s0, 1
|
|
; GFX10-NEXT: s_and_b32 s2, 0xffff, s2
|
|
; GFX10-NEXT: s_and_b32 s1, 0xffff, s1
|
|
; GFX10-NEXT: s_and_b32 s3, 0xffff, s3
|
|
; GFX10-NEXT: s_lshl_b32 s0, s0, s2
|
|
; GFX10-NEXT: s_lshr_b32 s1, s1, s3
|
|
; GFX10-NEXT: s_or_b32 s0, s0, s1
|
|
; GFX10-NEXT: ; return to shader part epilog
|
|
;
|
|
; GFX11-LABEL: s_fshr_i16:
|
|
; GFX11: ; %bb.0:
|
|
; GFX11-NEXT: s_and_b32 s3, s2, 15
|
|
; GFX11-NEXT: s_and_not1_b32 s2, 15, s2
|
|
; GFX11-NEXT: s_lshl_b32 s0, s0, 1
|
|
; GFX11-NEXT: s_and_b32 s2, 0xffff, s2
|
|
; GFX11-NEXT: s_and_b32 s1, 0xffff, s1
|
|
; GFX11-NEXT: s_and_b32 s3, 0xffff, s3
|
|
; GFX11-NEXT: s_lshl_b32 s0, s0, s2
|
|
; GFX11-NEXT: s_lshr_b32 s1, s1, s3
|
|
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
|
|
; GFX11-NEXT: s_or_b32 s0, s0, s1
|
|
; GFX11-NEXT: ; return to shader part epilog
|
|
%result = call i16 @llvm.fshr.i16(i16 %lhs, i16 %rhs, i16 %amt)
|
|
ret i16 %result
|
|
}
|
|
|
|
define amdgpu_ps i16 @s_fshr_i16_4(i16 inreg %lhs, i16 inreg %rhs) {
|
|
; GFX6-LABEL: s_fshr_i16_4:
|
|
; GFX6: ; %bb.0:
|
|
; GFX6-NEXT: s_lshl_b32 s0, s0, 12
|
|
; GFX6-NEXT: s_bfe_u32 s1, s1, 0xc0004
|
|
; GFX6-NEXT: s_or_b32 s0, s0, s1
|
|
; GFX6-NEXT: ; return to shader part epilog
|
|
;
|
|
; GFX8-LABEL: s_fshr_i16_4:
|
|
; GFX8: ; %bb.0:
|
|
; GFX8-NEXT: s_and_b32 s1, 0xffff, s1
|
|
; GFX8-NEXT: s_lshl_b32 s0, s0, 12
|
|
; GFX8-NEXT: s_lshr_b32 s1, s1, 4
|
|
; GFX8-NEXT: s_or_b32 s0, s0, s1
|
|
; GFX8-NEXT: ; return to shader part epilog
|
|
;
|
|
; GFX9-LABEL: s_fshr_i16_4:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_and_b32 s1, 0xffff, s1
|
|
; GFX9-NEXT: s_lshl_b32 s0, s0, 12
|
|
; GFX9-NEXT: s_lshr_b32 s1, s1, 4
|
|
; GFX9-NEXT: s_or_b32 s0, s0, s1
|
|
; GFX9-NEXT: ; return to shader part epilog
|
|
;
|
|
; GFX10-LABEL: s_fshr_i16_4:
|
|
; GFX10: ; %bb.0:
|
|
; GFX10-NEXT: s_and_b32 s1, 0xffff, s1
|
|
; GFX10-NEXT: s_lshl_b32 s0, s0, 12
|
|
; GFX10-NEXT: s_lshr_b32 s1, s1, 4
|
|
; GFX10-NEXT: s_or_b32 s0, s0, s1
|
|
; GFX10-NEXT: ; return to shader part epilog
|
|
;
|
|
; GFX11-LABEL: s_fshr_i16_4:
|
|
; GFX11: ; %bb.0:
|
|
; GFX11-NEXT: s_and_b32 s1, 0xffff, s1
|
|
; GFX11-NEXT: s_lshl_b32 s0, s0, 12
|
|
; GFX11-NEXT: s_lshr_b32 s1, s1, 4
|
|
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
|
|
; GFX11-NEXT: s_or_b32 s0, s0, s1
|
|
; GFX11-NEXT: ; return to shader part epilog
|
|
%result = call i16 @llvm.fshr.i16(i16 %lhs, i16 %rhs, i16 4)
|
|
ret i16 %result
|
|
}
|
|
|
|
define amdgpu_ps i16 @s_fshr_i16_5(i16 inreg %lhs, i16 inreg %rhs) {
|
|
; GFX6-LABEL: s_fshr_i16_5:
|
|
; GFX6: ; %bb.0:
|
|
; GFX6-NEXT: s_lshl_b32 s0, s0, 11
|
|
; GFX6-NEXT: s_bfe_u32 s1, s1, 0xb0005
|
|
; GFX6-NEXT: s_or_b32 s0, s0, s1
|
|
; GFX6-NEXT: ; return to shader part epilog
|
|
;
|
|
; GFX8-LABEL: s_fshr_i16_5:
|
|
; GFX8: ; %bb.0:
|
|
; GFX8-NEXT: s_and_b32 s1, 0xffff, s1
|
|
; GFX8-NEXT: s_lshl_b32 s0, s0, 11
|
|
; GFX8-NEXT: s_lshr_b32 s1, s1, 5
|
|
; GFX8-NEXT: s_or_b32 s0, s0, s1
|
|
; GFX8-NEXT: ; return to shader part epilog
|
|
;
|
|
; GFX9-LABEL: s_fshr_i16_5:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_and_b32 s1, 0xffff, s1
|
|
; GFX9-NEXT: s_lshl_b32 s0, s0, 11
|
|
; GFX9-NEXT: s_lshr_b32 s1, s1, 5
|
|
; GFX9-NEXT: s_or_b32 s0, s0, s1
|
|
; GFX9-NEXT: ; return to shader part epilog
|
|
;
|
|
; GFX10-LABEL: s_fshr_i16_5:
|
|
; GFX10: ; %bb.0:
|
|
; GFX10-NEXT: s_and_b32 s1, 0xffff, s1
|
|
; GFX10-NEXT: s_lshl_b32 s0, s0, 11
|
|
; GFX10-NEXT: s_lshr_b32 s1, s1, 5
|
|
; GFX10-NEXT: s_or_b32 s0, s0, s1
|
|
; GFX10-NEXT: ; return to shader part epilog
|
|
;
|
|
; GFX11-LABEL: s_fshr_i16_5:
|
|
; GFX11: ; %bb.0:
|
|
; GFX11-NEXT: s_and_b32 s1, 0xffff, s1
|
|
; GFX11-NEXT: s_lshl_b32 s0, s0, 11
|
|
; GFX11-NEXT: s_lshr_b32 s1, s1, 5
|
|
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
|
|
; GFX11-NEXT: s_or_b32 s0, s0, s1
|
|
; GFX11-NEXT: ; return to shader part epilog
|
|
%result = call i16 @llvm.fshr.i16(i16 %lhs, i16 %rhs, i16 5)
|
|
ret i16 %result
|
|
}
|
|
|
|
define i16 @v_fshr_i16(i16 %lhs, i16 %rhs, i16 %amt) {
|
|
; GFX6-LABEL: v_fshr_i16:
|
|
; GFX6: ; %bb.0:
|
|
; GFX6-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX6-NEXT: v_and_b32_e32 v3, 15, v2
|
|
; GFX6-NEXT: v_xor_b32_e32 v2, -1, v2
|
|
; GFX6-NEXT: v_and_b32_e32 v2, 15, v2
|
|
; GFX6-NEXT: v_lshlrev_b32_e32 v0, 1, v0
|
|
; GFX6-NEXT: v_and_b32_e32 v2, 0xffff, v2
|
|
; GFX6-NEXT: v_lshlrev_b32_e32 v0, v2, v0
|
|
; GFX6-NEXT: v_and_b32_e32 v2, 0xffff, v3
|
|
; GFX6-NEXT: v_and_b32_e32 v1, 0xffff, v1
|
|
; GFX6-NEXT: v_lshrrev_b32_e32 v1, v2, v1
|
|
; GFX6-NEXT: v_or_b32_e32 v0, v0, v1
|
|
; GFX6-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX8-LABEL: v_fshr_i16:
|
|
; GFX8: ; %bb.0:
|
|
; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX8-NEXT: v_and_b32_e32 v3, 15, v2
|
|
; GFX8-NEXT: v_xor_b32_e32 v2, -1, v2
|
|
; GFX8-NEXT: v_and_b32_e32 v2, 15, v2
|
|
; GFX8-NEXT: v_lshlrev_b16_e32 v0, 1, v0
|
|
; GFX8-NEXT: v_lshlrev_b16_e32 v0, v2, v0
|
|
; GFX8-NEXT: v_lshrrev_b16_e32 v1, v3, v1
|
|
; GFX8-NEXT: v_or_b32_e32 v0, v0, v1
|
|
; GFX8-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX9-LABEL: v_fshr_i16:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: v_and_b32_e32 v3, 15, v2
|
|
; GFX9-NEXT: v_xor_b32_e32 v2, -1, v2
|
|
; GFX9-NEXT: v_and_b32_e32 v2, 15, v2
|
|
; GFX9-NEXT: v_lshlrev_b16_e32 v0, 1, v0
|
|
; GFX9-NEXT: v_lshlrev_b16_e32 v0, v2, v0
|
|
; GFX9-NEXT: v_lshrrev_b16_e32 v1, v3, v1
|
|
; GFX9-NEXT: v_or_b32_e32 v0, v0, v1
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX10-LABEL: v_fshr_i16:
|
|
; GFX10: ; %bb.0:
|
|
; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX10-NEXT: v_xor_b32_e32 v3, -1, v2
|
|
; GFX10-NEXT: v_lshlrev_b16 v0, 1, v0
|
|
; GFX10-NEXT: v_and_b32_e32 v2, 15, v2
|
|
; GFX10-NEXT: v_and_b32_e32 v3, 15, v3
|
|
; GFX10-NEXT: v_lshrrev_b16 v1, v2, v1
|
|
; GFX10-NEXT: v_lshlrev_b16 v0, v3, v0
|
|
; GFX10-NEXT: v_or_b32_e32 v0, v0, v1
|
|
; GFX10-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX11-LABEL: v_fshr_i16:
|
|
; GFX11: ; %bb.0:
|
|
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX11-NEXT: v_xor_b32_e32 v3, -1, v2
|
|
; GFX11-NEXT: v_lshlrev_b16 v0, 1, v0
|
|
; GFX11-NEXT: v_and_b32_e32 v2, 15, v2
|
|
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
|
|
; GFX11-NEXT: v_and_b32_e32 v3, 15, v3
|
|
; GFX11-NEXT: v_lshrrev_b16 v1, v2, v1
|
|
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
|
|
; GFX11-NEXT: v_lshlrev_b16 v0, v3, v0
|
|
; GFX11-NEXT: v_or_b32_e32 v0, v0, v1
|
|
; GFX11-NEXT: s_setpc_b64 s[30:31]
|
|
%result = call i16 @llvm.fshr.i16(i16 %lhs, i16 %rhs, i16 %amt)
|
|
ret i16 %result
|
|
}
|
|
|
|
define i16 @v_fshr_i16_4(i16 %lhs, i16 %rhs) {
|
|
; GFX6-LABEL: v_fshr_i16_4:
|
|
; GFX6: ; %bb.0:
|
|
; GFX6-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX6-NEXT: v_lshlrev_b32_e32 v0, 12, v0
|
|
; GFX6-NEXT: v_bfe_u32 v1, v1, 4, 12
|
|
; GFX6-NEXT: v_or_b32_e32 v0, v0, v1
|
|
; GFX6-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX8-LABEL: v_fshr_i16_4:
|
|
; GFX8: ; %bb.0:
|
|
; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX8-NEXT: v_lshlrev_b16_e32 v0, 12, v0
|
|
; GFX8-NEXT: v_lshrrev_b16_e32 v1, 4, v1
|
|
; GFX8-NEXT: v_or_b32_e32 v0, v0, v1
|
|
; GFX8-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX9-LABEL: v_fshr_i16_4:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: v_lshlrev_b16_e32 v0, 12, v0
|
|
; GFX9-NEXT: v_lshrrev_b16_e32 v1, 4, v1
|
|
; GFX9-NEXT: v_or_b32_e32 v0, v0, v1
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX10-LABEL: v_fshr_i16_4:
|
|
; GFX10: ; %bb.0:
|
|
; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX10-NEXT: v_lshlrev_b16 v0, 12, v0
|
|
; GFX10-NEXT: v_lshrrev_b16 v1, 4, v1
|
|
; GFX10-NEXT: v_or_b32_e32 v0, v0, v1
|
|
; GFX10-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX11-LABEL: v_fshr_i16_4:
|
|
; GFX11: ; %bb.0:
|
|
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX11-NEXT: v_lshlrev_b16 v0, 12, v0
|
|
; GFX11-NEXT: v_lshrrev_b16 v1, 4, v1
|
|
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
|
|
; GFX11-NEXT: v_or_b32_e32 v0, v0, v1
|
|
; GFX11-NEXT: s_setpc_b64 s[30:31]
|
|
%result = call i16 @llvm.fshr.i16(i16 %lhs, i16 %rhs, i16 4)
|
|
ret i16 %result
|
|
}
|
|
|
|
define i16 @v_fshr_i16_5(i16 %lhs, i16 %rhs) {
|
|
; GFX6-LABEL: v_fshr_i16_5:
|
|
; GFX6: ; %bb.0:
|
|
; GFX6-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX6-NEXT: v_lshlrev_b32_e32 v0, 11, v0
|
|
; GFX6-NEXT: v_bfe_u32 v1, v1, 5, 11
|
|
; GFX6-NEXT: v_or_b32_e32 v0, v0, v1
|
|
; GFX6-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX8-LABEL: v_fshr_i16_5:
|
|
; GFX8: ; %bb.0:
|
|
; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX8-NEXT: v_lshlrev_b16_e32 v0, 11, v0
|
|
; GFX8-NEXT: v_lshrrev_b16_e32 v1, 5, v1
|
|
; GFX8-NEXT: v_or_b32_e32 v0, v0, v1
|
|
; GFX8-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX9-LABEL: v_fshr_i16_5:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: v_lshlrev_b16_e32 v0, 11, v0
|
|
; GFX9-NEXT: v_lshrrev_b16_e32 v1, 5, v1
|
|
; GFX9-NEXT: v_or_b32_e32 v0, v0, v1
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX10-LABEL: v_fshr_i16_5:
|
|
; GFX10: ; %bb.0:
|
|
; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX10-NEXT: v_lshlrev_b16 v0, 11, v0
|
|
; GFX10-NEXT: v_lshrrev_b16 v1, 5, v1
|
|
; GFX10-NEXT: v_or_b32_e32 v0, v0, v1
|
|
; GFX10-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX11-LABEL: v_fshr_i16_5:
|
|
; GFX11: ; %bb.0:
|
|
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX11-NEXT: v_lshlrev_b16 v0, 11, v0
|
|
; GFX11-NEXT: v_lshrrev_b16 v1, 5, v1
|
|
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
|
|
; GFX11-NEXT: v_or_b32_e32 v0, v0, v1
|
|
; GFX11-NEXT: s_setpc_b64 s[30:31]
|
|
%result = call i16 @llvm.fshr.i16(i16 %lhs, i16 %rhs, i16 5)
|
|
ret i16 %result
|
|
}
|
|
|
|
define amdgpu_ps half @v_fshr_i16_ssv(i16 inreg %lhs, i16 inreg %rhs, i16 %amt) {
|
|
; GFX6-LABEL: v_fshr_i16_ssv:
|
|
; GFX6: ; %bb.0:
|
|
; GFX6-NEXT: v_and_b32_e32 v1, 15, v0
|
|
; GFX6-NEXT: v_xor_b32_e32 v0, -1, v0
|
|
; GFX6-NEXT: v_and_b32_e32 v0, 15, v0
|
|
; GFX6-NEXT: s_lshl_b32 s0, s0, 1
|
|
; GFX6-NEXT: v_and_b32_e32 v0, 0xffff, v0
|
|
; GFX6-NEXT: v_lshl_b32_e32 v0, s0, v0
|
|
; GFX6-NEXT: v_and_b32_e32 v1, 0xffff, v1
|
|
; GFX6-NEXT: s_and_b32 s0, s1, 0xffff
|
|
; GFX6-NEXT: v_lshr_b32_e32 v1, s0, v1
|
|
; GFX6-NEXT: v_or_b32_e32 v0, v0, v1
|
|
; GFX6-NEXT: ; return to shader part epilog
|
|
;
|
|
; GFX8-LABEL: v_fshr_i16_ssv:
|
|
; GFX8: ; %bb.0:
|
|
; GFX8-NEXT: v_and_b32_e32 v1, 15, v0
|
|
; GFX8-NEXT: v_xor_b32_e32 v0, -1, v0
|
|
; GFX8-NEXT: v_and_b32_e32 v0, 15, v0
|
|
; GFX8-NEXT: s_lshl_b32 s0, s0, 1
|
|
; GFX8-NEXT: v_lshlrev_b16_e64 v0, v0, s0
|
|
; GFX8-NEXT: v_lshrrev_b16_e64 v1, v1, s1
|
|
; GFX8-NEXT: v_or_b32_e32 v0, v0, v1
|
|
; GFX8-NEXT: ; return to shader part epilog
|
|
;
|
|
; GFX9-LABEL: v_fshr_i16_ssv:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: v_and_b32_e32 v1, 15, v0
|
|
; GFX9-NEXT: v_xor_b32_e32 v0, -1, v0
|
|
; GFX9-NEXT: v_and_b32_e32 v0, 15, v0
|
|
; GFX9-NEXT: s_lshl_b32 s0, s0, 1
|
|
; GFX9-NEXT: v_lshlrev_b16_e64 v0, v0, s0
|
|
; GFX9-NEXT: v_lshrrev_b16_e64 v1, v1, s1
|
|
; GFX9-NEXT: v_or_b32_e32 v0, v0, v1
|
|
; GFX9-NEXT: ; return to shader part epilog
|
|
;
|
|
; GFX10-LABEL: v_fshr_i16_ssv:
|
|
; GFX10: ; %bb.0:
|
|
; GFX10-NEXT: v_xor_b32_e32 v1, -1, v0
|
|
; GFX10-NEXT: v_and_b32_e32 v0, 15, v0
|
|
; GFX10-NEXT: s_lshl_b32 s0, s0, 1
|
|
; GFX10-NEXT: v_and_b32_e32 v1, 15, v1
|
|
; GFX10-NEXT: v_lshrrev_b16 v0, v0, s1
|
|
; GFX10-NEXT: v_lshlrev_b16 v1, v1, s0
|
|
; GFX10-NEXT: v_or_b32_e32 v0, v1, v0
|
|
; GFX10-NEXT: ; return to shader part epilog
|
|
;
|
|
; GFX11-LABEL: v_fshr_i16_ssv:
|
|
; GFX11: ; %bb.0:
|
|
; GFX11-NEXT: v_xor_b32_e32 v1, -1, v0
|
|
; GFX11-NEXT: v_and_b32_e32 v0, 15, v0
|
|
; GFX11-NEXT: s_lshl_b32 s0, s0, 1
|
|
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
|
|
; GFX11-NEXT: v_and_b32_e32 v1, 15, v1
|
|
; GFX11-NEXT: v_lshrrev_b16 v0, v0, s1
|
|
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
|
|
; GFX11-NEXT: v_lshlrev_b16 v1, v1, s0
|
|
; GFX11-NEXT: v_or_b32_e32 v0, v1, v0
|
|
; GFX11-NEXT: ; return to shader part epilog
|
|
%result = call i16 @llvm.fshr.i16(i16 %lhs, i16 %rhs, i16 %amt)
|
|
%cast.result = bitcast i16 %result to half
|
|
ret half %cast.result
|
|
}
|
|
|
|
define amdgpu_ps half @v_fshr_i16_svs(i16 inreg %lhs, i16 %rhs, i16 inreg %amt) {
|
|
; GFX6-LABEL: v_fshr_i16_svs:
|
|
; GFX6: ; %bb.0:
|
|
; GFX6-NEXT: s_and_b32 s2, s1, 15
|
|
; GFX6-NEXT: s_andn2_b32 s1, 15, s1
|
|
; GFX6-NEXT: s_lshl_b32 s0, s0, 1
|
|
; GFX6-NEXT: s_and_b32 s1, 0xffff, s1
|
|
; GFX6-NEXT: s_lshl_b32 s0, s0, s1
|
|
; GFX6-NEXT: s_and_b32 s1, 0xffff, s2
|
|
; GFX6-NEXT: v_and_b32_e32 v0, 0xffff, v0
|
|
; GFX6-NEXT: v_lshrrev_b32_e32 v0, s1, v0
|
|
; GFX6-NEXT: v_or_b32_e32 v0, s0, v0
|
|
; GFX6-NEXT: ; return to shader part epilog
|
|
;
|
|
; GFX8-LABEL: v_fshr_i16_svs:
|
|
; GFX8: ; %bb.0:
|
|
; GFX8-NEXT: s_and_b32 s2, s1, 15
|
|
; GFX8-NEXT: s_andn2_b32 s1, 15, s1
|
|
; GFX8-NEXT: s_lshl_b32 s0, s0, 1
|
|
; GFX8-NEXT: s_and_b32 s1, 0xffff, s1
|
|
; GFX8-NEXT: s_lshl_b32 s0, s0, s1
|
|
; GFX8-NEXT: v_lshrrev_b16_e32 v0, s2, v0
|
|
; GFX8-NEXT: v_or_b32_e32 v0, s0, v0
|
|
; GFX8-NEXT: ; return to shader part epilog
|
|
;
|
|
; GFX9-LABEL: v_fshr_i16_svs:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_and_b32 s2, s1, 15
|
|
; GFX9-NEXT: s_andn2_b32 s1, 15, s1
|
|
; GFX9-NEXT: s_lshl_b32 s0, s0, 1
|
|
; GFX9-NEXT: s_and_b32 s1, 0xffff, s1
|
|
; GFX9-NEXT: s_lshl_b32 s0, s0, s1
|
|
; GFX9-NEXT: v_lshrrev_b16_e32 v0, s2, v0
|
|
; GFX9-NEXT: v_or_b32_e32 v0, s0, v0
|
|
; GFX9-NEXT: ; return to shader part epilog
|
|
;
|
|
; GFX10-LABEL: v_fshr_i16_svs:
|
|
; GFX10: ; %bb.0:
|
|
; GFX10-NEXT: s_and_b32 s2, s1, 15
|
|
; GFX10-NEXT: s_andn2_b32 s1, 15, s1
|
|
; GFX10-NEXT: v_lshrrev_b16 v0, s2, v0
|
|
; GFX10-NEXT: s_lshl_b32 s0, s0, 1
|
|
; GFX10-NEXT: s_and_b32 s1, 0xffff, s1
|
|
; GFX10-NEXT: s_lshl_b32 s0, s0, s1
|
|
; GFX10-NEXT: v_or_b32_e32 v0, s0, v0
|
|
; GFX10-NEXT: ; return to shader part epilog
|
|
;
|
|
; GFX11-LABEL: v_fshr_i16_svs:
|
|
; GFX11: ; %bb.0:
|
|
; GFX11-NEXT: s_and_b32 s2, s1, 15
|
|
; GFX11-NEXT: s_and_not1_b32 s1, 15, s1
|
|
; GFX11-NEXT: v_lshrrev_b16 v0, s2, v0
|
|
; GFX11-NEXT: s_lshl_b32 s0, s0, 1
|
|
; GFX11-NEXT: s_and_b32 s1, 0xffff, s1
|
|
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
|
|
; GFX11-NEXT: s_lshl_b32 s0, s0, s1
|
|
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
|
|
; GFX11-NEXT: v_or_b32_e32 v0, s0, v0
|
|
; GFX11-NEXT: ; return to shader part epilog
|
|
%result = call i16 @llvm.fshr.i16(i16 %lhs, i16 %rhs, i16 %amt)
|
|
%cast.result = bitcast i16 %result to half
|
|
ret half %cast.result
|
|
}
|
|
|
|
define amdgpu_ps half @v_fshr_i16_vss(i16 %lhs, i16 inreg %rhs, i16 inreg %amt) {
|
|
; GFX6-LABEL: v_fshr_i16_vss:
|
|
; GFX6: ; %bb.0:
|
|
; GFX6-NEXT: s_and_b32 s2, s1, 15
|
|
; GFX6-NEXT: s_andn2_b32 s1, 15, s1
|
|
; GFX6-NEXT: v_lshlrev_b32_e32 v0, 1, v0
|
|
; GFX6-NEXT: s_and_b32 s1, 0xffff, s1
|
|
; GFX6-NEXT: v_lshlrev_b32_e32 v0, s1, v0
|
|
; GFX6-NEXT: s_and_b32 s1, 0xffff, s2
|
|
; GFX6-NEXT: s_and_b32 s0, s0, 0xffff
|
|
; GFX6-NEXT: s_lshr_b32 s0, s0, s1
|
|
; GFX6-NEXT: v_or_b32_e32 v0, s0, v0
|
|
; GFX6-NEXT: ; return to shader part epilog
|
|
;
|
|
; GFX8-LABEL: v_fshr_i16_vss:
|
|
; GFX8: ; %bb.0:
|
|
; GFX8-NEXT: s_and_b32 s2, s1, 15
|
|
; GFX8-NEXT: s_andn2_b32 s1, 15, s1
|
|
; GFX8-NEXT: v_lshlrev_b16_e32 v0, 1, v0
|
|
; GFX8-NEXT: v_lshlrev_b16_e32 v0, s1, v0
|
|
; GFX8-NEXT: s_and_b32 s0, 0xffff, s0
|
|
; GFX8-NEXT: s_and_b32 s1, 0xffff, s2
|
|
; GFX8-NEXT: s_lshr_b32 s0, s0, s1
|
|
; GFX8-NEXT: v_or_b32_e32 v0, s0, v0
|
|
; GFX8-NEXT: ; return to shader part epilog
|
|
;
|
|
; GFX9-LABEL: v_fshr_i16_vss:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_and_b32 s2, s1, 15
|
|
; GFX9-NEXT: s_andn2_b32 s1, 15, s1
|
|
; GFX9-NEXT: v_lshlrev_b16_e32 v0, 1, v0
|
|
; GFX9-NEXT: v_lshlrev_b16_e32 v0, s1, v0
|
|
; GFX9-NEXT: s_and_b32 s0, 0xffff, s0
|
|
; GFX9-NEXT: s_and_b32 s1, 0xffff, s2
|
|
; GFX9-NEXT: s_lshr_b32 s0, s0, s1
|
|
; GFX9-NEXT: v_or_b32_e32 v0, s0, v0
|
|
; GFX9-NEXT: ; return to shader part epilog
|
|
;
|
|
; GFX10-LABEL: v_fshr_i16_vss:
|
|
; GFX10: ; %bb.0:
|
|
; GFX10-NEXT: v_lshlrev_b16 v0, 1, v0
|
|
; GFX10-NEXT: s_andn2_b32 s2, 15, s1
|
|
; GFX10-NEXT: s_and_b32 s1, s1, 15
|
|
; GFX10-NEXT: s_and_b32 s0, 0xffff, s0
|
|
; GFX10-NEXT: s_and_b32 s1, 0xffff, s1
|
|
; GFX10-NEXT: v_lshlrev_b16 v0, s2, v0
|
|
; GFX10-NEXT: s_lshr_b32 s0, s0, s1
|
|
; GFX10-NEXT: v_or_b32_e32 v0, s0, v0
|
|
; GFX10-NEXT: ; return to shader part epilog
|
|
;
|
|
; GFX11-LABEL: v_fshr_i16_vss:
|
|
; GFX11: ; %bb.0:
|
|
; GFX11-NEXT: v_lshlrev_b16 v0, 1, v0
|
|
; GFX11-NEXT: s_and_not1_b32 s2, 15, s1
|
|
; GFX11-NEXT: s_and_b32 s1, s1, 15
|
|
; GFX11-NEXT: s_and_b32 s0, 0xffff, s0
|
|
; GFX11-NEXT: s_and_b32 s1, 0xffff, s1
|
|
; GFX11-NEXT: v_lshlrev_b16 v0, s2, v0
|
|
; GFX11-NEXT: s_lshr_b32 s0, s0, s1
|
|
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
|
|
; GFX11-NEXT: v_or_b32_e32 v0, s0, v0
|
|
; GFX11-NEXT: ; return to shader part epilog
|
|
%result = call i16 @llvm.fshr.i16(i16 %lhs, i16 %rhs, i16 %amt)
|
|
%cast.result = bitcast i16 %result to half
|
|
ret half %cast.result
|
|
}
|
|
|
|
define amdgpu_ps i32 @s_fshr_v2i16(<2 x i16> inreg %lhs, <2 x i16> inreg %rhs, <2 x i16> inreg %amt) {
|
|
; GFX6-LABEL: s_fshr_v2i16:
|
|
; GFX6: ; %bb.0:
|
|
; GFX6-NEXT: s_lshl_b32 s5, s5, 16
|
|
; GFX6-NEXT: s_and_b32 s4, s4, 0xffff
|
|
; GFX6-NEXT: s_or_b32 s4, s5, s4
|
|
; GFX6-NEXT: s_bfe_u32 s5, s2, 0xf0001
|
|
; GFX6-NEXT: s_lshl_b32 s0, s0, 1
|
|
; GFX6-NEXT: s_lshr_b32 s5, s5, 14
|
|
; GFX6-NEXT: s_or_b32 s0, s0, s5
|
|
; GFX6-NEXT: s_bfe_u32 s5, s3, 0xf0001
|
|
; GFX6-NEXT: s_lshl_b32 s1, s1, 1
|
|
; GFX6-NEXT: s_lshr_b32 s5, s5, 14
|
|
; GFX6-NEXT: s_xor_b32 s4, s4, -1
|
|
; GFX6-NEXT: s_or_b32 s1, s1, s5
|
|
; GFX6-NEXT: s_lshl_b32 s2, s2, 1
|
|
; GFX6-NEXT: s_lshr_b32 s5, s4, 16
|
|
; GFX6-NEXT: s_and_b32 s6, s4, 15
|
|
; GFX6-NEXT: s_andn2_b32 s4, 15, s4
|
|
; GFX6-NEXT: s_and_b32 s6, 0xffff, s6
|
|
; GFX6-NEXT: s_bfe_u32 s2, s2, 0xf0001
|
|
; GFX6-NEXT: s_and_b32 s4, 0xffff, s4
|
|
; GFX6-NEXT: s_lshl_b32 s0, s0, s6
|
|
; GFX6-NEXT: s_lshr_b32 s2, s2, s4
|
|
; GFX6-NEXT: s_or_b32 s0, s0, s2
|
|
; GFX6-NEXT: s_and_b32 s2, s5, 15
|
|
; GFX6-NEXT: s_lshl_b32 s3, s3, 1
|
|
; GFX6-NEXT: s_andn2_b32 s4, 15, s5
|
|
; GFX6-NEXT: s_and_b32 s2, 0xffff, s2
|
|
; GFX6-NEXT: s_lshl_b32 s1, s1, s2
|
|
; GFX6-NEXT: s_bfe_u32 s2, s3, 0xf0001
|
|
; GFX6-NEXT: s_and_b32 s3, 0xffff, s4
|
|
; GFX6-NEXT: s_lshr_b32 s2, s2, s3
|
|
; GFX6-NEXT: s_or_b32 s1, s1, s2
|
|
; GFX6-NEXT: s_and_b32 s1, 0xffff, s1
|
|
; GFX6-NEXT: s_and_b32 s0, 0xffff, s0
|
|
; GFX6-NEXT: s_lshl_b32 s1, s1, 16
|
|
; GFX6-NEXT: s_or_b32 s0, s0, s1
|
|
; GFX6-NEXT: ; return to shader part epilog
|
|
;
|
|
; GFX8-LABEL: s_fshr_v2i16:
|
|
; GFX8: ; %bb.0:
|
|
; GFX8-NEXT: s_and_b32 s5, 0xffff, s1
|
|
; GFX8-NEXT: s_lshr_b32 s3, s0, 16
|
|
; GFX8-NEXT: s_lshr_b32 s4, s1, 16
|
|
; GFX8-NEXT: s_lshl_b32 s0, s0, 1
|
|
; GFX8-NEXT: s_lshr_b32 s5, s5, 15
|
|
; GFX8-NEXT: s_or_b32 s0, s0, s5
|
|
; GFX8-NEXT: s_lshl_b32 s3, s3, 1
|
|
; GFX8-NEXT: s_lshr_b32 s5, s4, 15
|
|
; GFX8-NEXT: s_lshl_b32 s1, s1, 1
|
|
; GFX8-NEXT: s_xor_b32 s2, s2, -1
|
|
; GFX8-NEXT: s_or_b32 s3, s3, s5
|
|
; GFX8-NEXT: s_lshr_b32 s5, s2, 16
|
|
; GFX8-NEXT: s_and_b32 s6, s2, 15
|
|
; GFX8-NEXT: s_andn2_b32 s2, 15, s2
|
|
; GFX8-NEXT: s_and_b32 s1, 0xffff, s1
|
|
; GFX8-NEXT: s_and_b32 s6, 0xffff, s6
|
|
; GFX8-NEXT: s_lshr_b32 s1, s1, 1
|
|
; GFX8-NEXT: s_and_b32 s2, 0xffff, s2
|
|
; GFX8-NEXT: s_lshl_b32 s0, s0, s6
|
|
; GFX8-NEXT: s_lshr_b32 s1, s1, s2
|
|
; GFX8-NEXT: s_or_b32 s0, s0, s1
|
|
; GFX8-NEXT: s_and_b32 s1, s5, 15
|
|
; GFX8-NEXT: s_lshl_b32 s4, s4, 1
|
|
; GFX8-NEXT: s_and_b32 s1, 0xffff, s1
|
|
; GFX8-NEXT: s_andn2_b32 s2, 15, s5
|
|
; GFX8-NEXT: s_lshl_b32 s1, s3, s1
|
|
; GFX8-NEXT: s_and_b32 s3, 0xffff, s4
|
|
; GFX8-NEXT: s_lshr_b32 s3, s3, 1
|
|
; GFX8-NEXT: s_and_b32 s2, 0xffff, s2
|
|
; GFX8-NEXT: s_lshr_b32 s2, s3, s2
|
|
; GFX8-NEXT: s_or_b32 s1, s1, s2
|
|
; GFX8-NEXT: s_and_b32 s1, 0xffff, s1
|
|
; GFX8-NEXT: s_and_b32 s0, 0xffff, s0
|
|
; GFX8-NEXT: s_lshl_b32 s1, s1, 16
|
|
; GFX8-NEXT: s_or_b32 s0, s0, s1
|
|
; GFX8-NEXT: ; return to shader part epilog
|
|
;
|
|
; GFX9-LABEL: s_fshr_v2i16:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_lshr_b32 s4, s0, 16
|
|
; GFX9-NEXT: s_lshl_b32 s0, s0, 0x10001
|
|
; GFX9-NEXT: s_lshl_b32 s4, s4, 1
|
|
; GFX9-NEXT: s_and_b32 s3, s2, 0xf000f
|
|
; GFX9-NEXT: s_andn2_b32 s2, 0xf000f, s2
|
|
; GFX9-NEXT: s_pack_ll_b32_b16 s0, s0, s4
|
|
; GFX9-NEXT: s_lshr_b32 s4, s0, 16
|
|
; GFX9-NEXT: s_lshr_b32 s5, s2, 16
|
|
; GFX9-NEXT: s_lshl_b32 s0, s0, s2
|
|
; GFX9-NEXT: s_lshl_b32 s2, s4, s5
|
|
; GFX9-NEXT: s_pack_ll_b32_b16 s0, s0, s2
|
|
; GFX9-NEXT: s_lshr_b32 s2, s1, 16
|
|
; GFX9-NEXT: s_and_b32 s1, s1, 0xffff
|
|
; GFX9-NEXT: s_lshr_b32 s4, s3, 16
|
|
; GFX9-NEXT: s_lshr_b32 s1, s1, s3
|
|
; GFX9-NEXT: s_lshr_b32 s2, s2, s4
|
|
; GFX9-NEXT: s_pack_ll_b32_b16 s1, s1, s2
|
|
; GFX9-NEXT: s_or_b32 s0, s0, s1
|
|
; GFX9-NEXT: ; return to shader part epilog
|
|
;
|
|
; GFX10-LABEL: s_fshr_v2i16:
|
|
; GFX10: ; %bb.0:
|
|
; GFX10-NEXT: s_lshr_b32 s3, s0, 16
|
|
; GFX10-NEXT: s_lshl_b32 s0, s0, 0x10001
|
|
; GFX10-NEXT: s_lshl_b32 s3, s3, 1
|
|
; GFX10-NEXT: s_and_b32 s4, s2, 0xf000f
|
|
; GFX10-NEXT: s_pack_ll_b32_b16 s0, s0, s3
|
|
; GFX10-NEXT: s_andn2_b32 s2, 0xf000f, s2
|
|
; GFX10-NEXT: s_lshr_b32 s3, s0, 16
|
|
; GFX10-NEXT: s_lshr_b32 s5, s2, 16
|
|
; GFX10-NEXT: s_lshl_b32 s0, s0, s2
|
|
; GFX10-NEXT: s_lshl_b32 s2, s3, s5
|
|
; GFX10-NEXT: s_lshr_b32 s3, s1, 16
|
|
; GFX10-NEXT: s_and_b32 s1, s1, 0xffff
|
|
; GFX10-NEXT: s_lshr_b32 s5, s4, 16
|
|
; GFX10-NEXT: s_lshr_b32 s1, s1, s4
|
|
; GFX10-NEXT: s_lshr_b32 s3, s3, s5
|
|
; GFX10-NEXT: s_pack_ll_b32_b16 s0, s0, s2
|
|
; GFX10-NEXT: s_pack_ll_b32_b16 s1, s1, s3
|
|
; GFX10-NEXT: s_or_b32 s0, s0, s1
|
|
; GFX10-NEXT: ; return to shader part epilog
|
|
;
|
|
; GFX11-LABEL: s_fshr_v2i16:
|
|
; GFX11: ; %bb.0:
|
|
; GFX11-NEXT: s_lshr_b32 s3, s0, 16
|
|
; GFX11-NEXT: s_lshl_b32 s0, s0, 0x10001
|
|
; GFX11-NEXT: s_lshl_b32 s3, s3, 1
|
|
; GFX11-NEXT: s_and_b32 s4, s2, 0xf000f
|
|
; GFX11-NEXT: s_pack_ll_b32_b16 s0, s0, s3
|
|
; GFX11-NEXT: s_and_not1_b32 s2, 0xf000f, s2
|
|
; GFX11-NEXT: s_lshr_b32 s3, s0, 16
|
|
; GFX11-NEXT: s_lshr_b32 s5, s2, 16
|
|
; GFX11-NEXT: s_lshl_b32 s0, s0, s2
|
|
; GFX11-NEXT: s_lshl_b32 s2, s3, s5
|
|
; GFX11-NEXT: s_lshr_b32 s3, s1, 16
|
|
; GFX11-NEXT: s_and_b32 s1, s1, 0xffff
|
|
; GFX11-NEXT: s_lshr_b32 s5, s4, 16
|
|
; GFX11-NEXT: s_lshr_b32 s1, s1, s4
|
|
; GFX11-NEXT: s_lshr_b32 s3, s3, s5
|
|
; GFX11-NEXT: s_pack_ll_b32_b16 s0, s0, s2
|
|
; GFX11-NEXT: s_pack_ll_b32_b16 s1, s1, s3
|
|
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
|
|
; GFX11-NEXT: s_or_b32 s0, s0, s1
|
|
; GFX11-NEXT: ; return to shader part epilog
|
|
%result = call <2 x i16> @llvm.fshr.v2i16(<2 x i16> %lhs, <2 x i16> %rhs, <2 x i16> %amt)
|
|
%cast = bitcast <2 x i16> %result to i32
|
|
ret i32 %cast
|
|
}
|
|
|
|
define <2 x i16> @v_fshr_v2i16(<2 x i16> %lhs, <2 x i16> %rhs, <2 x i16> %amt) {
|
|
; GFX6-LABEL: v_fshr_v2i16:
|
|
; GFX6: ; %bb.0:
|
|
; GFX6-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX6-NEXT: v_lshlrev_b32_e32 v5, 16, v5
|
|
; GFX6-NEXT: v_and_b32_e32 v4, 0xffff, v4
|
|
; GFX6-NEXT: v_or_b32_e32 v4, v5, v4
|
|
; GFX6-NEXT: v_bfe_u32 v5, v2, 1, 15
|
|
; GFX6-NEXT: v_lshlrev_b32_e32 v0, 1, v0
|
|
; GFX6-NEXT: v_lshrrev_b32_e32 v5, 14, v5
|
|
; GFX6-NEXT: v_or_b32_e32 v0, v0, v5
|
|
; GFX6-NEXT: v_bfe_u32 v5, v3, 1, 15
|
|
; GFX6-NEXT: v_lshlrev_b32_e32 v1, 1, v1
|
|
; GFX6-NEXT: v_lshrrev_b32_e32 v5, 14, v5
|
|
; GFX6-NEXT: v_xor_b32_e32 v4, -1, v4
|
|
; GFX6-NEXT: v_or_b32_e32 v1, v1, v5
|
|
; GFX6-NEXT: v_lshrrev_b32_e32 v5, 16, v4
|
|
; GFX6-NEXT: v_and_b32_e32 v6, 15, v4
|
|
; GFX6-NEXT: v_xor_b32_e32 v4, -1, v4
|
|
; GFX6-NEXT: v_lshlrev_b32_e32 v2, 1, v2
|
|
; GFX6-NEXT: v_and_b32_e32 v4, 15, v4
|
|
; GFX6-NEXT: v_and_b32_e32 v6, 0xffff, v6
|
|
; GFX6-NEXT: v_bfe_u32 v2, v2, 1, 15
|
|
; GFX6-NEXT: v_and_b32_e32 v4, 0xffff, v4
|
|
; GFX6-NEXT: v_lshlrev_b32_e32 v0, v6, v0
|
|
; GFX6-NEXT: v_lshrrev_b32_e32 v2, v4, v2
|
|
; GFX6-NEXT: v_or_b32_e32 v0, v0, v2
|
|
; GFX6-NEXT: v_and_b32_e32 v2, 15, v5
|
|
; GFX6-NEXT: v_xor_b32_e32 v4, -1, v5
|
|
; GFX6-NEXT: v_lshlrev_b32_e32 v3, 1, v3
|
|
; GFX6-NEXT: v_and_b32_e32 v4, 15, v4
|
|
; GFX6-NEXT: v_and_b32_e32 v2, 0xffff, v2
|
|
; GFX6-NEXT: v_lshlrev_b32_e32 v1, v2, v1
|
|
; GFX6-NEXT: v_bfe_u32 v2, v3, 1, 15
|
|
; GFX6-NEXT: v_and_b32_e32 v3, 0xffff, v4
|
|
; GFX6-NEXT: v_lshrrev_b32_e32 v2, v3, v2
|
|
; GFX6-NEXT: v_or_b32_e32 v1, v1, v2
|
|
; GFX6-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX8-LABEL: v_fshr_v2i16:
|
|
; GFX8: ; %bb.0:
|
|
; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX8-NEXT: v_lshlrev_b16_e32 v3, 1, v0
|
|
; GFX8-NEXT: v_lshrrev_b16_e32 v4, 15, v1
|
|
; GFX8-NEXT: v_or_b32_e32 v3, v3, v4
|
|
; GFX8-NEXT: v_mov_b32_e32 v4, 1
|
|
; GFX8-NEXT: v_mov_b32_e32 v5, 15
|
|
; GFX8-NEXT: v_lshlrev_b16_sdwa v0, v4, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
|
|
; GFX8-NEXT: v_lshrrev_b16_sdwa v6, v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
|
|
; GFX8-NEXT: v_xor_b32_e32 v2, -1, v2
|
|
; GFX8-NEXT: v_or_b32_e32 v0, v0, v6
|
|
; GFX8-NEXT: v_lshlrev_b16_e32 v6, 1, v1
|
|
; GFX8-NEXT: v_lshlrev_b16_sdwa v1, v4, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
|
|
; GFX8-NEXT: v_and_b32_e32 v4, 15, v2
|
|
; GFX8-NEXT: v_xor_b32_e32 v7, -1, v2
|
|
; GFX8-NEXT: v_and_b32_e32 v7, 15, v7
|
|
; GFX8-NEXT: v_lshlrev_b16_e32 v3, v4, v3
|
|
; GFX8-NEXT: v_lshrrev_b16_e32 v4, 1, v6
|
|
; GFX8-NEXT: v_lshrrev_b16_e32 v4, v7, v4
|
|
; GFX8-NEXT: v_or_b32_e32 v3, v3, v4
|
|
; GFX8-NEXT: v_and_b32_sdwa v4, v2, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
|
|
; GFX8-NEXT: v_mov_b32_e32 v5, -1
|
|
; GFX8-NEXT: v_xor_b32_sdwa v2, v2, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
|
|
; GFX8-NEXT: v_and_b32_e32 v2, 15, v2
|
|
; GFX8-NEXT: v_lshrrev_b16_e32 v1, 1, v1
|
|
; GFX8-NEXT: v_lshlrev_b16_e32 v0, v4, v0
|
|
; GFX8-NEXT: v_lshrrev_b16_e32 v1, v2, v1
|
|
; GFX8-NEXT: v_or_b32_e32 v0, v0, v1
|
|
; GFX8-NEXT: v_and_b32_e32 v0, 0xffff, v0
|
|
; GFX8-NEXT: v_lshlrev_b32_e32 v0, 16, v0
|
|
; GFX8-NEXT: v_or_b32_sdwa v0, v3, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
|
|
; GFX8-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX9-LABEL: v_fshr_v2i16:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: v_and_b32_e32 v3, 0xf000f, v2
|
|
; GFX9-NEXT: v_xor_b32_e32 v2, -1, v2
|
|
; GFX9-NEXT: v_and_b32_e32 v2, 0xf000f, v2
|
|
; GFX9-NEXT: v_pk_lshlrev_b16 v0, 1, v0 op_sel_hi:[0,1]
|
|
; GFX9-NEXT: v_pk_lshlrev_b16 v0, v2, v0
|
|
; GFX9-NEXT: v_pk_lshrrev_b16 v1, v3, v1
|
|
; GFX9-NEXT: v_or_b32_e32 v0, v0, v1
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX10-LABEL: v_fshr_v2i16:
|
|
; GFX10: ; %bb.0:
|
|
; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX10-NEXT: v_xor_b32_e32 v3, -1, v2
|
|
; GFX10-NEXT: v_pk_lshlrev_b16 v0, 1, v0 op_sel_hi:[0,1]
|
|
; GFX10-NEXT: v_and_b32_e32 v2, 0xf000f, v2
|
|
; GFX10-NEXT: v_and_b32_e32 v3, 0xf000f, v3
|
|
; GFX10-NEXT: v_pk_lshrrev_b16 v1, v2, v1
|
|
; GFX10-NEXT: v_pk_lshlrev_b16 v0, v3, v0
|
|
; GFX10-NEXT: v_or_b32_e32 v0, v0, v1
|
|
; GFX10-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX11-LABEL: v_fshr_v2i16:
|
|
; GFX11: ; %bb.0:
|
|
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX11-NEXT: v_xor_b32_e32 v3, -1, v2
|
|
; GFX11-NEXT: v_pk_lshlrev_b16 v0, 1, v0 op_sel_hi:[0,1]
|
|
; GFX11-NEXT: v_and_b32_e32 v2, 0xf000f, v2
|
|
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
|
|
; GFX11-NEXT: v_and_b32_e32 v3, 0xf000f, v3
|
|
; GFX11-NEXT: v_pk_lshrrev_b16 v1, v2, v1
|
|
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
|
|
; GFX11-NEXT: v_pk_lshlrev_b16 v0, v3, v0
|
|
; GFX11-NEXT: v_or_b32_e32 v0, v0, v1
|
|
; GFX11-NEXT: s_setpc_b64 s[30:31]
|
|
%result = call <2 x i16> @llvm.fshr.v2i16(<2 x i16> %lhs, <2 x i16> %rhs, <2 x i16> %amt)
|
|
ret <2 x i16> %result
|
|
}
|
|
|
|
define <2 x i16> @v_fshr_v2i16_4_8(<2 x i16> %lhs, <2 x i16> %rhs) {
|
|
; GFX6-LABEL: v_fshr_v2i16_4_8:
|
|
; GFX6: ; %bb.0:
|
|
; GFX6-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX6-NEXT: v_bfe_u32 v2, v2, 1, 15
|
|
; GFX6-NEXT: v_lshlrev_b32_e32 v0, 12, v0
|
|
; GFX6-NEXT: v_lshrrev_b32_e32 v2, 3, v2
|
|
; GFX6-NEXT: v_or_b32_e32 v0, v0, v2
|
|
; GFX6-NEXT: v_bfe_u32 v2, v3, 1, 15
|
|
; GFX6-NEXT: v_lshlrev_b32_e32 v1, 8, v1
|
|
; GFX6-NEXT: v_lshrrev_b32_e32 v2, 7, v2
|
|
; GFX6-NEXT: v_or_b32_e32 v1, v1, v2
|
|
; GFX6-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX8-LABEL: v_fshr_v2i16_4_8:
|
|
; GFX8: ; %bb.0:
|
|
; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX8-NEXT: v_lshrrev_b32_e32 v2, 16, v0
|
|
; GFX8-NEXT: v_lshlrev_b16_e32 v0, 12, v0
|
|
; GFX8-NEXT: v_lshrrev_b16_e32 v3, 4, v1
|
|
; GFX8-NEXT: v_or_b32_e32 v0, v0, v3
|
|
; GFX8-NEXT: v_mov_b32_e32 v3, 8
|
|
; GFX8-NEXT: v_lshlrev_b16_e32 v2, 8, v2
|
|
; GFX8-NEXT: v_lshrrev_b16_sdwa v1, v3, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
|
|
; GFX8-NEXT: v_or_b32_e32 v1, v2, v1
|
|
; GFX8-NEXT: v_and_b32_e32 v1, 0xffff, v1
|
|
; GFX8-NEXT: v_lshlrev_b32_e32 v1, 16, v1
|
|
; GFX8-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
|
|
; GFX8-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX9-LABEL: v_fshr_v2i16_4_8:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: v_mov_b32_e32 v2, 0x8000c
|
|
; GFX9-NEXT: v_pk_lshlrev_b16 v0, v2, v0
|
|
; GFX9-NEXT: v_mov_b32_e32 v2, 0x80004
|
|
; GFX9-NEXT: v_pk_lshrrev_b16 v1, v2, v1
|
|
; GFX9-NEXT: v_or_b32_e32 v0, v0, v1
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX10-LABEL: v_fshr_v2i16_4_8:
|
|
; GFX10: ; %bb.0:
|
|
; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX10-NEXT: v_pk_lshlrev_b16 v0, 0x8000c, v0
|
|
; GFX10-NEXT: v_pk_lshrrev_b16 v1, 0x80004, v1
|
|
; GFX10-NEXT: v_or_b32_e32 v0, v0, v1
|
|
; GFX10-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX11-LABEL: v_fshr_v2i16_4_8:
|
|
; GFX11: ; %bb.0:
|
|
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX11-NEXT: v_pk_lshlrev_b16 v0, 0x8000c, v0
|
|
; GFX11-NEXT: v_pk_lshrrev_b16 v1, 0x80004, v1
|
|
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
|
|
; GFX11-NEXT: v_or_b32_e32 v0, v0, v1
|
|
; GFX11-NEXT: s_setpc_b64 s[30:31]
|
|
%result = call <2 x i16> @llvm.fshr.v2i16(<2 x i16> %lhs, <2 x i16> %rhs, <2 x i16> <i16 4, i16 8>)
|
|
ret <2 x i16> %result
|
|
}
|
|
|
|
define amdgpu_ps float @v_fshr_v2i16_ssv(<2 x i16> inreg %lhs, <2 x i16> inreg %rhs, <2 x i16> %amt) {
|
|
; GFX6-LABEL: v_fshr_v2i16_ssv:
|
|
; GFX6: ; %bb.0:
|
|
; GFX6-NEXT: v_lshlrev_b32_e32 v1, 16, v1
|
|
; GFX6-NEXT: v_and_b32_e32 v0, 0xffff, v0
|
|
; GFX6-NEXT: v_or_b32_e32 v0, v1, v0
|
|
; GFX6-NEXT: s_bfe_u32 s4, s2, 0xf0001
|
|
; GFX6-NEXT: v_xor_b32_e32 v0, -1, v0
|
|
; GFX6-NEXT: s_lshl_b32 s0, s0, 1
|
|
; GFX6-NEXT: s_lshr_b32 s4, s4, 14
|
|
; GFX6-NEXT: v_lshrrev_b32_e32 v1, 16, v0
|
|
; GFX6-NEXT: v_and_b32_e32 v2, 15, v0
|
|
; GFX6-NEXT: v_xor_b32_e32 v0, -1, v0
|
|
; GFX6-NEXT: s_or_b32 s0, s0, s4
|
|
; GFX6-NEXT: s_lshl_b32 s2, s2, 1
|
|
; GFX6-NEXT: v_and_b32_e32 v0, 15, v0
|
|
; GFX6-NEXT: v_and_b32_e32 v2, 0xffff, v2
|
|
; GFX6-NEXT: v_lshl_b32_e32 v2, s0, v2
|
|
; GFX6-NEXT: s_bfe_u32 s0, s2, 0xf0001
|
|
; GFX6-NEXT: v_and_b32_e32 v0, 0xffff, v0
|
|
; GFX6-NEXT: v_lshr_b32_e32 v0, s0, v0
|
|
; GFX6-NEXT: s_bfe_u32 s4, s3, 0xf0001
|
|
; GFX6-NEXT: v_or_b32_e32 v0, v2, v0
|
|
; GFX6-NEXT: v_and_b32_e32 v2, 15, v1
|
|
; GFX6-NEXT: v_xor_b32_e32 v1, -1, v1
|
|
; GFX6-NEXT: s_lshl_b32 s1, s1, 1
|
|
; GFX6-NEXT: s_lshr_b32 s4, s4, 14
|
|
; GFX6-NEXT: s_lshl_b32 s3, s3, 1
|
|
; GFX6-NEXT: v_and_b32_e32 v1, 15, v1
|
|
; GFX6-NEXT: s_or_b32 s1, s1, s4
|
|
; GFX6-NEXT: v_and_b32_e32 v2, 0xffff, v2
|
|
; GFX6-NEXT: s_bfe_u32 s0, s3, 0xf0001
|
|
; GFX6-NEXT: v_and_b32_e32 v1, 0xffff, v1
|
|
; GFX6-NEXT: v_lshl_b32_e32 v2, s1, v2
|
|
; GFX6-NEXT: v_lshr_b32_e32 v1, s0, v1
|
|
; GFX6-NEXT: v_or_b32_e32 v1, v2, v1
|
|
; GFX6-NEXT: v_and_b32_e32 v1, 0xffff, v1
|
|
; GFX6-NEXT: v_and_b32_e32 v0, 0xffff, v0
|
|
; GFX6-NEXT: v_lshlrev_b32_e32 v1, 16, v1
|
|
; GFX6-NEXT: v_or_b32_e32 v0, v0, v1
|
|
; GFX6-NEXT: ; return to shader part epilog
|
|
;
|
|
; GFX8-LABEL: v_fshr_v2i16_ssv:
|
|
; GFX8: ; %bb.0:
|
|
; GFX8-NEXT: s_and_b32 s4, 0xffff, s1
|
|
; GFX8-NEXT: s_lshr_b32 s2, s0, 16
|
|
; GFX8-NEXT: s_lshl_b32 s0, s0, 1
|
|
; GFX8-NEXT: s_lshr_b32 s4, s4, 15
|
|
; GFX8-NEXT: v_xor_b32_e32 v0, -1, v0
|
|
; GFX8-NEXT: s_lshr_b32 s3, s1, 16
|
|
; GFX8-NEXT: s_or_b32 s0, s0, s4
|
|
; GFX8-NEXT: s_lshl_b32 s1, s1, 1
|
|
; GFX8-NEXT: v_and_b32_e32 v1, 15, v0
|
|
; GFX8-NEXT: v_xor_b32_e32 v2, -1, v0
|
|
; GFX8-NEXT: v_lshlrev_b16_e64 v1, v1, s0
|
|
; GFX8-NEXT: s_and_b32 s0, 0xffff, s1
|
|
; GFX8-NEXT: v_and_b32_e32 v2, 15, v2
|
|
; GFX8-NEXT: s_lshr_b32 s0, s0, 1
|
|
; GFX8-NEXT: v_lshrrev_b16_e64 v2, v2, s0
|
|
; GFX8-NEXT: s_lshr_b32 s4, s3, 15
|
|
; GFX8-NEXT: s_lshl_b32 s3, s3, 1
|
|
; GFX8-NEXT: v_or_b32_e32 v1, v1, v2
|
|
; GFX8-NEXT: v_mov_b32_e32 v2, 15
|
|
; GFX8-NEXT: v_mov_b32_e32 v3, -1
|
|
; GFX8-NEXT: s_lshl_b32 s2, s2, 1
|
|
; GFX8-NEXT: v_and_b32_sdwa v2, v0, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
|
|
; GFX8-NEXT: v_xor_b32_sdwa v0, v0, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
|
|
; GFX8-NEXT: s_and_b32 s0, 0xffff, s3
|
|
; GFX8-NEXT: s_or_b32 s2, s2, s4
|
|
; GFX8-NEXT: v_and_b32_e32 v0, 15, v0
|
|
; GFX8-NEXT: s_lshr_b32 s0, s0, 1
|
|
; GFX8-NEXT: v_lshlrev_b16_e64 v2, v2, s2
|
|
; GFX8-NEXT: v_lshrrev_b16_e64 v0, v0, s0
|
|
; GFX8-NEXT: v_or_b32_e32 v0, v2, v0
|
|
; GFX8-NEXT: v_and_b32_e32 v0, 0xffff, v0
|
|
; GFX8-NEXT: v_lshlrev_b32_e32 v0, 16, v0
|
|
; GFX8-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
|
|
; GFX8-NEXT: ; return to shader part epilog
|
|
;
|
|
; GFX9-LABEL: v_fshr_v2i16_ssv:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_lshr_b32 s2, s0, 16
|
|
; GFX9-NEXT: v_and_b32_e32 v1, 0xf000f, v0
|
|
; GFX9-NEXT: v_xor_b32_e32 v0, -1, v0
|
|
; GFX9-NEXT: s_lshl_b32 s0, s0, 0x10001
|
|
; GFX9-NEXT: s_lshl_b32 s2, s2, 1
|
|
; GFX9-NEXT: v_and_b32_e32 v0, 0xf000f, v0
|
|
; GFX9-NEXT: s_pack_ll_b32_b16 s0, s0, s2
|
|
; GFX9-NEXT: v_pk_lshlrev_b16 v0, v0, s0
|
|
; GFX9-NEXT: v_pk_lshrrev_b16 v1, v1, s1
|
|
; GFX9-NEXT: v_or_b32_e32 v0, v0, v1
|
|
; GFX9-NEXT: ; return to shader part epilog
|
|
;
|
|
; GFX10-LABEL: v_fshr_v2i16_ssv:
|
|
; GFX10: ; %bb.0:
|
|
; GFX10-NEXT: v_xor_b32_e32 v1, -1, v0
|
|
; GFX10-NEXT: s_lshr_b32 s2, s0, 16
|
|
; GFX10-NEXT: v_and_b32_e32 v0, 0xf000f, v0
|
|
; GFX10-NEXT: s_lshl_b32 s0, s0, 0x10001
|
|
; GFX10-NEXT: s_lshl_b32 s2, s2, 1
|
|
; GFX10-NEXT: v_and_b32_e32 v1, 0xf000f, v1
|
|
; GFX10-NEXT: s_pack_ll_b32_b16 s0, s0, s2
|
|
; GFX10-NEXT: v_pk_lshrrev_b16 v0, v0, s1
|
|
; GFX10-NEXT: v_pk_lshlrev_b16 v1, v1, s0
|
|
; GFX10-NEXT: v_or_b32_e32 v0, v1, v0
|
|
; GFX10-NEXT: ; return to shader part epilog
|
|
;
|
|
; GFX11-LABEL: v_fshr_v2i16_ssv:
|
|
; GFX11: ; %bb.0:
|
|
; GFX11-NEXT: v_xor_b32_e32 v1, -1, v0
|
|
; GFX11-NEXT: s_lshr_b32 s2, s0, 16
|
|
; GFX11-NEXT: v_and_b32_e32 v0, 0xf000f, v0
|
|
; GFX11-NEXT: s_lshl_b32 s0, s0, 0x10001
|
|
; GFX11-NEXT: s_lshl_b32 s2, s2, 1
|
|
; GFX11-NEXT: v_and_b32_e32 v1, 0xf000f, v1
|
|
; GFX11-NEXT: s_pack_ll_b32_b16 s0, s0, s2
|
|
; GFX11-NEXT: v_pk_lshrrev_b16 v0, v0, s1
|
|
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
|
|
; GFX11-NEXT: v_pk_lshlrev_b16 v1, v1, s0
|
|
; GFX11-NEXT: v_or_b32_e32 v0, v1, v0
|
|
; GFX11-NEXT: ; return to shader part epilog
|
|
%result = call <2 x i16> @llvm.fshr.v2i16(<2 x i16> %lhs, <2 x i16> %rhs, <2 x i16> %amt)
|
|
%cast = bitcast <2 x i16> %result to float
|
|
ret float %cast
|
|
}
|
|
|
|
define amdgpu_ps float @v_fshr_v2i16_svs(<2 x i16> inreg %lhs, <2 x i16> %rhs, <2 x i16> inreg %amt) {
|
|
; GFX6-LABEL: v_fshr_v2i16_svs:
|
|
; GFX6: ; %bb.0:
|
|
; GFX6-NEXT: v_bfe_u32 v2, v0, 1, 15
|
|
; GFX6-NEXT: s_lshl_b32 s3, s3, 16
|
|
; GFX6-NEXT: s_and_b32 s2, s2, 0xffff
|
|
; GFX6-NEXT: s_lshl_b32 s0, s0, 1
|
|
; GFX6-NEXT: v_lshrrev_b32_e32 v2, 14, v2
|
|
; GFX6-NEXT: v_bfe_u32 v3, v1, 1, 15
|
|
; GFX6-NEXT: s_or_b32 s2, s3, s2
|
|
; GFX6-NEXT: v_or_b32_e32 v2, s0, v2
|
|
; GFX6-NEXT: s_lshl_b32 s0, s1, 1
|
|
; GFX6-NEXT: v_lshrrev_b32_e32 v3, 14, v3
|
|
; GFX6-NEXT: v_or_b32_e32 v3, s0, v3
|
|
; GFX6-NEXT: s_xor_b32 s0, s2, -1
|
|
; GFX6-NEXT: v_lshlrev_b32_e32 v0, 1, v0
|
|
; GFX6-NEXT: s_lshr_b32 s1, s0, 16
|
|
; GFX6-NEXT: s_and_b32 s2, s0, 15
|
|
; GFX6-NEXT: s_andn2_b32 s0, 15, s0
|
|
; GFX6-NEXT: v_bfe_u32 v0, v0, 1, 15
|
|
; GFX6-NEXT: s_and_b32 s0, 0xffff, s0
|
|
; GFX6-NEXT: s_and_b32 s2, 0xffff, s2
|
|
; GFX6-NEXT: v_lshrrev_b32_e32 v0, s0, v0
|
|
; GFX6-NEXT: s_and_b32 s0, s1, 15
|
|
; GFX6-NEXT: v_lshlrev_b32_e32 v1, 1, v1
|
|
; GFX6-NEXT: v_lshlrev_b32_e32 v2, s2, v2
|
|
; GFX6-NEXT: s_andn2_b32 s1, 15, s1
|
|
; GFX6-NEXT: s_and_b32 s0, 0xffff, s0
|
|
; GFX6-NEXT: v_or_b32_e32 v0, v2, v0
|
|
; GFX6-NEXT: v_lshlrev_b32_e32 v2, s0, v3
|
|
; GFX6-NEXT: v_bfe_u32 v1, v1, 1, 15
|
|
; GFX6-NEXT: s_and_b32 s0, 0xffff, s1
|
|
; GFX6-NEXT: v_lshrrev_b32_e32 v1, s0, v1
|
|
; GFX6-NEXT: v_or_b32_e32 v1, v2, v1
|
|
; GFX6-NEXT: v_and_b32_e32 v1, 0xffff, v1
|
|
; GFX6-NEXT: v_and_b32_e32 v0, 0xffff, v0
|
|
; GFX6-NEXT: v_lshlrev_b32_e32 v1, 16, v1
|
|
; GFX6-NEXT: v_or_b32_e32 v0, v0, v1
|
|
; GFX6-NEXT: ; return to shader part epilog
|
|
;
|
|
; GFX8-LABEL: v_fshr_v2i16_svs:
|
|
; GFX8: ; %bb.0:
|
|
; GFX8-NEXT: s_lshr_b32 s2, s0, 16
|
|
; GFX8-NEXT: s_lshl_b32 s0, s0, 1
|
|
; GFX8-NEXT: v_lshrrev_b16_e32 v1, 15, v0
|
|
; GFX8-NEXT: v_mov_b32_e32 v2, 15
|
|
; GFX8-NEXT: v_or_b32_e32 v1, s0, v1
|
|
; GFX8-NEXT: s_lshl_b32 s0, s2, 1
|
|
; GFX8-NEXT: v_lshrrev_b16_sdwa v2, v2, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
|
|
; GFX8-NEXT: v_or_b32_e32 v2, s0, v2
|
|
; GFX8-NEXT: v_lshlrev_b16_e32 v3, 1, v0
|
|
; GFX8-NEXT: v_mov_b32_e32 v4, 1
|
|
; GFX8-NEXT: s_xor_b32 s0, s1, -1
|
|
; GFX8-NEXT: v_lshlrev_b16_sdwa v0, v4, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
|
|
; GFX8-NEXT: s_lshr_b32 s1, s0, 16
|
|
; GFX8-NEXT: s_and_b32 s2, s0, 15
|
|
; GFX8-NEXT: s_andn2_b32 s0, 15, s0
|
|
; GFX8-NEXT: v_lshrrev_b16_e32 v3, 1, v3
|
|
; GFX8-NEXT: v_lshrrev_b16_e32 v3, s0, v3
|
|
; GFX8-NEXT: s_and_b32 s0, s1, 15
|
|
; GFX8-NEXT: s_andn2_b32 s1, 15, s1
|
|
; GFX8-NEXT: v_lshrrev_b16_e32 v0, 1, v0
|
|
; GFX8-NEXT: v_lshlrev_b16_e32 v2, s0, v2
|
|
; GFX8-NEXT: v_lshrrev_b16_e32 v0, s1, v0
|
|
; GFX8-NEXT: v_or_b32_e32 v0, v2, v0
|
|
; GFX8-NEXT: v_lshlrev_b16_e32 v1, s2, v1
|
|
; GFX8-NEXT: v_and_b32_e32 v0, 0xffff, v0
|
|
; GFX8-NEXT: v_or_b32_e32 v1, v1, v3
|
|
; GFX8-NEXT: v_lshlrev_b32_e32 v0, 16, v0
|
|
; GFX8-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
|
|
; GFX8-NEXT: ; return to shader part epilog
|
|
;
|
|
; GFX9-LABEL: v_fshr_v2i16_svs:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_lshr_b32 s3, s0, 16
|
|
; GFX9-NEXT: s_lshl_b32 s0, s0, 0x10001
|
|
; GFX9-NEXT: s_lshl_b32 s3, s3, 1
|
|
; GFX9-NEXT: s_and_b32 s2, s1, 0xf000f
|
|
; GFX9-NEXT: s_andn2_b32 s1, 0xf000f, s1
|
|
; GFX9-NEXT: s_pack_ll_b32_b16 s0, s0, s3
|
|
; GFX9-NEXT: s_lshr_b32 s3, s0, 16
|
|
; GFX9-NEXT: s_lshr_b32 s4, s1, 16
|
|
; GFX9-NEXT: s_lshl_b32 s0, s0, s1
|
|
; GFX9-NEXT: s_lshl_b32 s1, s3, s4
|
|
; GFX9-NEXT: s_pack_ll_b32_b16 s0, s0, s1
|
|
; GFX9-NEXT: v_pk_lshrrev_b16 v0, s2, v0
|
|
; GFX9-NEXT: v_or_b32_e32 v0, s0, v0
|
|
; GFX9-NEXT: ; return to shader part epilog
|
|
;
|
|
; GFX10-LABEL: v_fshr_v2i16_svs:
|
|
; GFX10: ; %bb.0:
|
|
; GFX10-NEXT: s_lshr_b32 s2, s0, 16
|
|
; GFX10-NEXT: s_lshl_b32 s0, s0, 0x10001
|
|
; GFX10-NEXT: s_lshl_b32 s2, s2, 1
|
|
; GFX10-NEXT: s_and_b32 s3, s1, 0xf000f
|
|
; GFX10-NEXT: s_pack_ll_b32_b16 s0, s0, s2
|
|
; GFX10-NEXT: s_andn2_b32 s1, 0xf000f, s1
|
|
; GFX10-NEXT: s_lshr_b32 s2, s0, 16
|
|
; GFX10-NEXT: s_lshr_b32 s4, s1, 16
|
|
; GFX10-NEXT: v_pk_lshrrev_b16 v0, s3, v0
|
|
; GFX10-NEXT: s_lshl_b32 s0, s0, s1
|
|
; GFX10-NEXT: s_lshl_b32 s1, s2, s4
|
|
; GFX10-NEXT: s_pack_ll_b32_b16 s0, s0, s1
|
|
; GFX10-NEXT: v_or_b32_e32 v0, s0, v0
|
|
; GFX10-NEXT: ; return to shader part epilog
|
|
;
|
|
; GFX11-LABEL: v_fshr_v2i16_svs:
|
|
; GFX11: ; %bb.0:
|
|
; GFX11-NEXT: s_lshr_b32 s2, s0, 16
|
|
; GFX11-NEXT: s_lshl_b32 s0, s0, 0x10001
|
|
; GFX11-NEXT: s_lshl_b32 s2, s2, 1
|
|
; GFX11-NEXT: s_and_b32 s3, s1, 0xf000f
|
|
; GFX11-NEXT: s_pack_ll_b32_b16 s0, s0, s2
|
|
; GFX11-NEXT: s_and_not1_b32 s1, 0xf000f, s1
|
|
; GFX11-NEXT: s_lshr_b32 s2, s0, 16
|
|
; GFX11-NEXT: s_lshr_b32 s4, s1, 16
|
|
; GFX11-NEXT: v_pk_lshrrev_b16 v0, s3, v0
|
|
; GFX11-NEXT: s_lshl_b32 s0, s0, s1
|
|
; GFX11-NEXT: s_lshl_b32 s1, s2, s4
|
|
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
|
|
; GFX11-NEXT: s_pack_ll_b32_b16 s0, s0, s1
|
|
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
|
|
; GFX11-NEXT: v_or_b32_e32 v0, s0, v0
|
|
; GFX11-NEXT: ; return to shader part epilog
|
|
%result = call <2 x i16> @llvm.fshr.v2i16(<2 x i16> %lhs, <2 x i16> %rhs, <2 x i16> %amt)
|
|
%cast = bitcast <2 x i16> %result to float
|
|
ret float %cast
|
|
}
|
|
|
|
define amdgpu_ps float @v_fshr_v2i16_vss(<2 x i16> %lhs, <2 x i16> inreg %rhs, <2 x i16> inreg %amt) {
|
|
; GFX6-LABEL: v_fshr_v2i16_vss:
|
|
; GFX6: ; %bb.0:
|
|
; GFX6-NEXT: s_lshl_b32 s3, s3, 16
|
|
; GFX6-NEXT: s_and_b32 s2, s2, 0xffff
|
|
; GFX6-NEXT: s_or_b32 s2, s3, s2
|
|
; GFX6-NEXT: s_bfe_u32 s3, s0, 0xf0001
|
|
; GFX6-NEXT: v_lshlrev_b32_e32 v0, 1, v0
|
|
; GFX6-NEXT: s_lshr_b32 s3, s3, 14
|
|
; GFX6-NEXT: v_or_b32_e32 v0, s3, v0
|
|
; GFX6-NEXT: s_bfe_u32 s3, s1, 0xf0001
|
|
; GFX6-NEXT: v_lshlrev_b32_e32 v1, 1, v1
|
|
; GFX6-NEXT: s_lshr_b32 s3, s3, 14
|
|
; GFX6-NEXT: s_xor_b32 s2, s2, -1
|
|
; GFX6-NEXT: v_or_b32_e32 v1, s3, v1
|
|
; GFX6-NEXT: s_lshl_b32 s0, s0, 1
|
|
; GFX6-NEXT: s_lshr_b32 s3, s2, 16
|
|
; GFX6-NEXT: s_and_b32 s4, s2, 15
|
|
; GFX6-NEXT: s_andn2_b32 s2, 15, s2
|
|
; GFX6-NEXT: s_and_b32 s4, 0xffff, s4
|
|
; GFX6-NEXT: s_bfe_u32 s0, s0, 0xf0001
|
|
; GFX6-NEXT: s_and_b32 s2, 0xffff, s2
|
|
; GFX6-NEXT: v_lshlrev_b32_e32 v0, s4, v0
|
|
; GFX6-NEXT: s_lshr_b32 s0, s0, s2
|
|
; GFX6-NEXT: v_or_b32_e32 v0, s0, v0
|
|
; GFX6-NEXT: s_and_b32 s0, s3, 15
|
|
; GFX6-NEXT: s_lshl_b32 s1, s1, 1
|
|
; GFX6-NEXT: s_andn2_b32 s2, 15, s3
|
|
; GFX6-NEXT: s_and_b32 s0, 0xffff, s0
|
|
; GFX6-NEXT: v_lshlrev_b32_e32 v1, s0, v1
|
|
; GFX6-NEXT: s_bfe_u32 s0, s1, 0xf0001
|
|
; GFX6-NEXT: s_and_b32 s1, 0xffff, s2
|
|
; GFX6-NEXT: s_lshr_b32 s0, s0, s1
|
|
; GFX6-NEXT: v_or_b32_e32 v1, s0, v1
|
|
; GFX6-NEXT: v_and_b32_e32 v1, 0xffff, v1
|
|
; GFX6-NEXT: v_and_b32_e32 v0, 0xffff, v0
|
|
; GFX6-NEXT: v_lshlrev_b32_e32 v1, 16, v1
|
|
; GFX6-NEXT: v_or_b32_e32 v0, v0, v1
|
|
; GFX6-NEXT: ; return to shader part epilog
|
|
;
|
|
; GFX8-LABEL: v_fshr_v2i16_vss:
|
|
; GFX8: ; %bb.0:
|
|
; GFX8-NEXT: s_and_b32 s3, 0xffff, s0
|
|
; GFX8-NEXT: s_lshr_b32 s2, s0, 16
|
|
; GFX8-NEXT: v_lshlrev_b16_e32 v1, 1, v0
|
|
; GFX8-NEXT: s_lshr_b32 s3, s3, 15
|
|
; GFX8-NEXT: v_mov_b32_e32 v2, 1
|
|
; GFX8-NEXT: v_or_b32_e32 v1, s3, v1
|
|
; GFX8-NEXT: v_lshlrev_b16_sdwa v0, v2, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
|
|
; GFX8-NEXT: s_lshr_b32 s3, s2, 15
|
|
; GFX8-NEXT: s_lshl_b32 s0, s0, 1
|
|
; GFX8-NEXT: s_xor_b32 s1, s1, -1
|
|
; GFX8-NEXT: v_or_b32_e32 v0, s3, v0
|
|
; GFX8-NEXT: s_lshr_b32 s3, s1, 16
|
|
; GFX8-NEXT: s_and_b32 s4, s1, 15
|
|
; GFX8-NEXT: s_andn2_b32 s1, 15, s1
|
|
; GFX8-NEXT: s_and_b32 s0, 0xffff, s0
|
|
; GFX8-NEXT: s_lshr_b32 s0, s0, 1
|
|
; GFX8-NEXT: s_and_b32 s1, 0xffff, s1
|
|
; GFX8-NEXT: v_lshlrev_b16_e32 v1, s4, v1
|
|
; GFX8-NEXT: s_lshr_b32 s0, s0, s1
|
|
; GFX8-NEXT: s_lshl_b32 s2, s2, 1
|
|
; GFX8-NEXT: v_or_b32_e32 v1, s0, v1
|
|
; GFX8-NEXT: s_and_b32 s0, s3, 15
|
|
; GFX8-NEXT: s_andn2_b32 s1, 15, s3
|
|
; GFX8-NEXT: v_lshlrev_b16_e32 v0, s0, v0
|
|
; GFX8-NEXT: s_and_b32 s0, 0xffff, s2
|
|
; GFX8-NEXT: s_lshr_b32 s0, s0, 1
|
|
; GFX8-NEXT: s_and_b32 s1, 0xffff, s1
|
|
; GFX8-NEXT: s_lshr_b32 s0, s0, s1
|
|
; GFX8-NEXT: v_or_b32_e32 v0, s0, v0
|
|
; GFX8-NEXT: v_and_b32_e32 v0, 0xffff, v0
|
|
; GFX8-NEXT: v_lshlrev_b32_e32 v0, 16, v0
|
|
; GFX8-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
|
|
; GFX8-NEXT: ; return to shader part epilog
|
|
;
|
|
; GFX9-LABEL: v_fshr_v2i16_vss:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_and_b32 s2, s1, 0xf000f
|
|
; GFX9-NEXT: s_andn2_b32 s1, 0xf000f, s1
|
|
; GFX9-NEXT: v_pk_lshlrev_b16 v0, 1, v0 op_sel_hi:[0,1]
|
|
; GFX9-NEXT: v_pk_lshlrev_b16 v0, s1, v0
|
|
; GFX9-NEXT: s_lshr_b32 s1, s0, 16
|
|
; GFX9-NEXT: s_and_b32 s0, s0, 0xffff
|
|
; GFX9-NEXT: s_lshr_b32 s3, s2, 16
|
|
; GFX9-NEXT: s_lshr_b32 s0, s0, s2
|
|
; GFX9-NEXT: s_lshr_b32 s1, s1, s3
|
|
; GFX9-NEXT: s_pack_ll_b32_b16 s0, s0, s1
|
|
; GFX9-NEXT: v_or_b32_e32 v0, s0, v0
|
|
; GFX9-NEXT: ; return to shader part epilog
|
|
;
|
|
; GFX10-LABEL: v_fshr_v2i16_vss:
|
|
; GFX10: ; %bb.0:
|
|
; GFX10-NEXT: v_pk_lshlrev_b16 v0, 1, v0 op_sel_hi:[0,1]
|
|
; GFX10-NEXT: s_and_b32 s2, s1, 0xf000f
|
|
; GFX10-NEXT: s_andn2_b32 s1, 0xf000f, s1
|
|
; GFX10-NEXT: s_lshr_b32 s3, s0, 16
|
|
; GFX10-NEXT: s_and_b32 s0, s0, 0xffff
|
|
; GFX10-NEXT: s_lshr_b32 s4, s2, 16
|
|
; GFX10-NEXT: v_pk_lshlrev_b16 v0, s1, v0
|
|
; GFX10-NEXT: s_lshr_b32 s0, s0, s2
|
|
; GFX10-NEXT: s_lshr_b32 s1, s3, s4
|
|
; GFX10-NEXT: s_pack_ll_b32_b16 s0, s0, s1
|
|
; GFX10-NEXT: v_or_b32_e32 v0, s0, v0
|
|
; GFX10-NEXT: ; return to shader part epilog
|
|
;
|
|
; GFX11-LABEL: v_fshr_v2i16_vss:
|
|
; GFX11: ; %bb.0:
|
|
; GFX11-NEXT: v_pk_lshlrev_b16 v0, 1, v0 op_sel_hi:[0,1]
|
|
; GFX11-NEXT: s_and_b32 s2, s1, 0xf000f
|
|
; GFX11-NEXT: s_and_not1_b32 s1, 0xf000f, s1
|
|
; GFX11-NEXT: s_lshr_b32 s3, s0, 16
|
|
; GFX11-NEXT: s_and_b32 s0, s0, 0xffff
|
|
; GFX11-NEXT: s_lshr_b32 s4, s2, 16
|
|
; GFX11-NEXT: v_pk_lshlrev_b16 v0, s1, v0
|
|
; GFX11-NEXT: s_lshr_b32 s0, s0, s2
|
|
; GFX11-NEXT: s_lshr_b32 s1, s3, s4
|
|
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
|
|
; GFX11-NEXT: s_pack_ll_b32_b16 s0, s0, s1
|
|
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
|
|
; GFX11-NEXT: v_or_b32_e32 v0, s0, v0
|
|
; GFX11-NEXT: ; return to shader part epilog
|
|
%result = call <2 x i16> @llvm.fshr.v2i16(<2 x i16> %lhs, <2 x i16> %rhs, <2 x i16> %amt)
|
|
%cast = bitcast <2 x i16> %result to float
|
|
ret float %cast
|
|
}
|
|
|
|
define amdgpu_ps i48 @s_fshr_v3i16(<3 x i16> inreg %lhs, <3 x i16> inreg %rhs, <3 x i16> inreg %amt) {
|
|
; GFX6-LABEL: s_fshr_v3i16:
|
|
; GFX6: ; %bb.0:
|
|
; GFX6-NEXT: s_and_b32 s7, s7, 0xffff
|
|
; GFX6-NEXT: s_and_b32 s6, s6, 0xffff
|
|
; GFX6-NEXT: s_lshl_b32 s7, s7, 16
|
|
; GFX6-NEXT: s_or_b32 s6, s6, s7
|
|
; GFX6-NEXT: s_and_b32 s7, s8, 0xffff
|
|
; GFX6-NEXT: s_bfe_u32 s8, s3, 0xf0001
|
|
; GFX6-NEXT: s_lshl_b32 s0, s0, 1
|
|
; GFX6-NEXT: s_lshr_b32 s8, s8, 14
|
|
; GFX6-NEXT: s_or_b32 s0, s0, s8
|
|
; GFX6-NEXT: s_bfe_u32 s8, s4, 0xf0001
|
|
; GFX6-NEXT: s_lshl_b32 s1, s1, 1
|
|
; GFX6-NEXT: s_lshr_b32 s8, s8, 14
|
|
; GFX6-NEXT: s_xor_b32 s6, s6, -1
|
|
; GFX6-NEXT: s_or_b32 s1, s1, s8
|
|
; GFX6-NEXT: s_lshl_b32 s3, s3, 1
|
|
; GFX6-NEXT: s_lshr_b32 s8, s6, 16
|
|
; GFX6-NEXT: s_and_b32 s9, s6, 15
|
|
; GFX6-NEXT: s_andn2_b32 s6, 15, s6
|
|
; GFX6-NEXT: s_and_b32 s9, 0xffff, s9
|
|
; GFX6-NEXT: s_bfe_u32 s3, s3, 0xf0001
|
|
; GFX6-NEXT: s_and_b32 s6, 0xffff, s6
|
|
; GFX6-NEXT: s_lshl_b32 s0, s0, s9
|
|
; GFX6-NEXT: s_lshr_b32 s3, s3, s6
|
|
; GFX6-NEXT: s_or_b32 s0, s0, s3
|
|
; GFX6-NEXT: s_and_b32 s3, s8, 15
|
|
; GFX6-NEXT: s_lshl_b32 s4, s4, 1
|
|
; GFX6-NEXT: s_andn2_b32 s6, 15, s8
|
|
; GFX6-NEXT: s_and_b32 s3, 0xffff, s3
|
|
; GFX6-NEXT: s_lshl_b32 s1, s1, s3
|
|
; GFX6-NEXT: s_bfe_u32 s3, s4, 0xf0001
|
|
; GFX6-NEXT: s_and_b32 s4, 0xffff, s6
|
|
; GFX6-NEXT: s_lshr_b32 s3, s3, s4
|
|
; GFX6-NEXT: s_or_b32 s1, s1, s3
|
|
; GFX6-NEXT: s_bfe_u32 s3, s5, 0xf0001
|
|
; GFX6-NEXT: s_lshl_b32 s2, s2, 1
|
|
; GFX6-NEXT: s_lshr_b32 s3, s3, 14
|
|
; GFX6-NEXT: s_xor_b32 s4, s7, -1
|
|
; GFX6-NEXT: s_or_b32 s2, s2, s3
|
|
; GFX6-NEXT: s_lshl_b32 s3, s5, 1
|
|
; GFX6-NEXT: s_and_b32 s5, s4, 15
|
|
; GFX6-NEXT: s_andn2_b32 s4, 15, s4
|
|
; GFX6-NEXT: s_and_b32 s5, 0xffff, s5
|
|
; GFX6-NEXT: s_bfe_u32 s3, s3, 0xf0001
|
|
; GFX6-NEXT: s_and_b32 s4, 0xffff, s4
|
|
; GFX6-NEXT: s_lshl_b32 s2, s2, s5
|
|
; GFX6-NEXT: s_lshr_b32 s3, s3, s4
|
|
; GFX6-NEXT: s_and_b32 s1, 0xffff, s1
|
|
; GFX6-NEXT: s_or_b32 s2, s2, s3
|
|
; GFX6-NEXT: s_and_b32 s0, 0xffff, s0
|
|
; GFX6-NEXT: s_lshl_b32 s1, s1, 16
|
|
; GFX6-NEXT: s_or_b32 s0, s0, s1
|
|
; GFX6-NEXT: s_and_b32 s1, 0xffff, s2
|
|
; GFX6-NEXT: ; return to shader part epilog
|
|
;
|
|
; GFX8-LABEL: s_fshr_v3i16:
|
|
; GFX8: ; %bb.0:
|
|
; GFX8-NEXT: s_and_b32 s8, 0xffff, s2
|
|
; GFX8-NEXT: s_lshr_b32 s6, s0, 16
|
|
; GFX8-NEXT: s_lshr_b32 s7, s2, 16
|
|
; GFX8-NEXT: s_lshl_b32 s0, s0, 1
|
|
; GFX8-NEXT: s_lshr_b32 s8, s8, 15
|
|
; GFX8-NEXT: s_or_b32 s0, s0, s8
|
|
; GFX8-NEXT: s_lshl_b32 s6, s6, 1
|
|
; GFX8-NEXT: s_lshr_b32 s8, s7, 15
|
|
; GFX8-NEXT: s_lshl_b32 s2, s2, 1
|
|
; GFX8-NEXT: s_xor_b32 s4, s4, -1
|
|
; GFX8-NEXT: s_or_b32 s6, s6, s8
|
|
; GFX8-NEXT: s_lshr_b32 s8, s4, 16
|
|
; GFX8-NEXT: s_and_b32 s9, s4, 15
|
|
; GFX8-NEXT: s_andn2_b32 s4, 15, s4
|
|
; GFX8-NEXT: s_and_b32 s2, 0xffff, s2
|
|
; GFX8-NEXT: s_and_b32 s9, 0xffff, s9
|
|
; GFX8-NEXT: s_lshr_b32 s2, s2, 1
|
|
; GFX8-NEXT: s_and_b32 s4, 0xffff, s4
|
|
; GFX8-NEXT: s_lshl_b32 s0, s0, s9
|
|
; GFX8-NEXT: s_lshr_b32 s2, s2, s4
|
|
; GFX8-NEXT: s_or_b32 s0, s0, s2
|
|
; GFX8-NEXT: s_and_b32 s2, s8, 15
|
|
; GFX8-NEXT: s_lshl_b32 s7, s7, 1
|
|
; GFX8-NEXT: s_and_b32 s2, 0xffff, s2
|
|
; GFX8-NEXT: s_andn2_b32 s4, 15, s8
|
|
; GFX8-NEXT: s_lshl_b32 s2, s6, s2
|
|
; GFX8-NEXT: s_and_b32 s6, 0xffff, s7
|
|
; GFX8-NEXT: s_lshr_b32 s6, s6, 1
|
|
; GFX8-NEXT: s_and_b32 s4, 0xffff, s4
|
|
; GFX8-NEXT: s_lshr_b32 s4, s6, s4
|
|
; GFX8-NEXT: s_or_b32 s2, s2, s4
|
|
; GFX8-NEXT: s_and_b32 s4, 0xffff, s3
|
|
; GFX8-NEXT: s_lshl_b32 s1, s1, 1
|
|
; GFX8-NEXT: s_lshr_b32 s4, s4, 15
|
|
; GFX8-NEXT: s_or_b32 s1, s1, s4
|
|
; GFX8-NEXT: s_lshl_b32 s3, s3, 1
|
|
; GFX8-NEXT: s_xor_b32 s4, s5, -1
|
|
; GFX8-NEXT: s_and_b32 s5, s4, 15
|
|
; GFX8-NEXT: s_andn2_b32 s4, 15, s4
|
|
; GFX8-NEXT: s_and_b32 s3, 0xffff, s3
|
|
; GFX8-NEXT: s_and_b32 s5, 0xffff, s5
|
|
; GFX8-NEXT: s_lshr_b32 s3, s3, 1
|
|
; GFX8-NEXT: s_and_b32 s4, 0xffff, s4
|
|
; GFX8-NEXT: s_lshl_b32 s1, s1, s5
|
|
; GFX8-NEXT: s_lshr_b32 s3, s3, s4
|
|
; GFX8-NEXT: s_and_b32 s2, 0xffff, s2
|
|
; GFX8-NEXT: s_or_b32 s1, s1, s3
|
|
; GFX8-NEXT: s_and_b32 s0, 0xffff, s0
|
|
; GFX8-NEXT: s_lshl_b32 s2, s2, 16
|
|
; GFX8-NEXT: s_or_b32 s0, s0, s2
|
|
; GFX8-NEXT: s_and_b32 s1, 0xffff, s1
|
|
; GFX8-NEXT: ; return to shader part epilog
|
|
;
|
|
; GFX9-LABEL: s_fshr_v3i16:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_lshr_b32 s7, s0, 16
|
|
; GFX9-NEXT: s_lshl_b32 s0, s0, 0x10001
|
|
; GFX9-NEXT: s_lshl_b32 s7, s7, 1
|
|
; GFX9-NEXT: s_and_b32 s6, s4, 0xf000f
|
|
; GFX9-NEXT: s_andn2_b32 s4, 0xf000f, s4
|
|
; GFX9-NEXT: s_pack_ll_b32_b16 s0, s0, s7
|
|
; GFX9-NEXT: s_lshr_b32 s7, s0, 16
|
|
; GFX9-NEXT: s_lshr_b32 s8, s4, 16
|
|
; GFX9-NEXT: s_lshl_b32 s0, s0, s4
|
|
; GFX9-NEXT: s_lshl_b32 s4, s7, s8
|
|
; GFX9-NEXT: s_pack_ll_b32_b16 s0, s0, s4
|
|
; GFX9-NEXT: s_lshr_b32 s4, s2, 16
|
|
; GFX9-NEXT: s_and_b32 s2, s2, 0xffff
|
|
; GFX9-NEXT: s_lshr_b32 s7, s6, 16
|
|
; GFX9-NEXT: s_lshr_b32 s2, s2, s6
|
|
; GFX9-NEXT: s_lshr_b32 s4, s4, s7
|
|
; GFX9-NEXT: s_pack_ll_b32_b16 s2, s2, s4
|
|
; GFX9-NEXT: s_or_b32 s0, s0, s2
|
|
; GFX9-NEXT: s_and_b32 s2, s5, 0xf000f
|
|
; GFX9-NEXT: s_andn2_b32 s4, 0xf000f, s5
|
|
; GFX9-NEXT: s_lshr_b32 s5, s1, 16
|
|
; GFX9-NEXT: s_lshl_b32 s1, s1, 0x10001
|
|
; GFX9-NEXT: s_lshl_b32 s5, s5, 1
|
|
; GFX9-NEXT: s_pack_ll_b32_b16 s1, s1, s5
|
|
; GFX9-NEXT: s_lshr_b32 s5, s1, 16
|
|
; GFX9-NEXT: s_lshr_b32 s6, s4, 16
|
|
; GFX9-NEXT: s_lshl_b32 s1, s1, s4
|
|
; GFX9-NEXT: s_lshl_b32 s4, s5, s6
|
|
; GFX9-NEXT: s_pack_ll_b32_b16 s1, s1, s4
|
|
; GFX9-NEXT: s_lshr_b32 s4, s3, 16
|
|
; GFX9-NEXT: s_and_b32 s3, s3, 0xffff
|
|
; GFX9-NEXT: s_lshr_b32 s5, s2, 16
|
|
; GFX9-NEXT: s_lshr_b32 s2, s3, s2
|
|
; GFX9-NEXT: s_lshr_b32 s3, s4, s5
|
|
; GFX9-NEXT: s_pack_ll_b32_b16 s2, s2, s3
|
|
; GFX9-NEXT: s_or_b32 s1, s1, s2
|
|
; GFX9-NEXT: s_lshr_b32 s2, s0, 16
|
|
; GFX9-NEXT: s_and_b32 s0, s0, 0xffff
|
|
; GFX9-NEXT: s_lshl_b32 s2, s2, 16
|
|
; GFX9-NEXT: s_or_b32 s0, s0, s2
|
|
; GFX9-NEXT: s_and_b32 s1, s1, 0xffff
|
|
; GFX9-NEXT: ; return to shader part epilog
|
|
;
|
|
; GFX10-LABEL: s_fshr_v3i16:
|
|
; GFX10: ; %bb.0:
|
|
; GFX10-NEXT: s_lshr_b32 s6, s0, 16
|
|
; GFX10-NEXT: s_lshl_b32 s0, s0, 0x10001
|
|
; GFX10-NEXT: s_lshl_b32 s6, s6, 1
|
|
; GFX10-NEXT: s_and_b32 s7, s4, 0xf000f
|
|
; GFX10-NEXT: s_pack_ll_b32_b16 s0, s0, s6
|
|
; GFX10-NEXT: s_andn2_b32 s4, 0xf000f, s4
|
|
; GFX10-NEXT: s_lshr_b32 s6, s0, 16
|
|
; GFX10-NEXT: s_lshr_b32 s8, s4, 16
|
|
; GFX10-NEXT: s_lshl_b32 s0, s0, s4
|
|
; GFX10-NEXT: s_lshl_b32 s4, s6, s8
|
|
; GFX10-NEXT: s_lshr_b32 s6, s2, 16
|
|
; GFX10-NEXT: s_and_b32 s2, s2, 0xffff
|
|
; GFX10-NEXT: s_lshr_b32 s8, s7, 16
|
|
; GFX10-NEXT: s_lshr_b32 s2, s2, s7
|
|
; GFX10-NEXT: s_lshr_b32 s6, s6, s8
|
|
; GFX10-NEXT: s_pack_ll_b32_b16 s0, s0, s4
|
|
; GFX10-NEXT: s_pack_ll_b32_b16 s2, s2, s6
|
|
; GFX10-NEXT: s_and_b32 s4, s5, 0xf000f
|
|
; GFX10-NEXT: s_or_b32 s0, s0, s2
|
|
; GFX10-NEXT: s_lshr_b32 s2, s1, 16
|
|
; GFX10-NEXT: s_lshl_b32 s1, s1, 0x10001
|
|
; GFX10-NEXT: s_lshl_b32 s2, s2, 1
|
|
; GFX10-NEXT: s_pack_ll_b32_b16 s1, s1, s2
|
|
; GFX10-NEXT: s_andn2_b32 s2, 0xf000f, s5
|
|
; GFX10-NEXT: s_lshr_b32 s5, s1, 16
|
|
; GFX10-NEXT: s_lshr_b32 s6, s2, 16
|
|
; GFX10-NEXT: s_lshl_b32 s1, s1, s2
|
|
; GFX10-NEXT: s_lshl_b32 s2, s5, s6
|
|
; GFX10-NEXT: s_lshr_b32 s5, s3, 16
|
|
; GFX10-NEXT: s_and_b32 s3, s3, 0xffff
|
|
; GFX10-NEXT: s_lshr_b32 s6, s4, 16
|
|
; GFX10-NEXT: s_lshr_b32 s3, s3, s4
|
|
; GFX10-NEXT: s_lshr_b32 s4, s5, s6
|
|
; GFX10-NEXT: s_pack_ll_b32_b16 s1, s1, s2
|
|
; GFX10-NEXT: s_pack_ll_b32_b16 s2, s3, s4
|
|
; GFX10-NEXT: s_lshr_b32 s3, s0, 16
|
|
; GFX10-NEXT: s_and_b32 s0, s0, 0xffff
|
|
; GFX10-NEXT: s_lshl_b32 s3, s3, 16
|
|
; GFX10-NEXT: s_or_b32 s1, s1, s2
|
|
; GFX10-NEXT: s_or_b32 s0, s0, s3
|
|
; GFX10-NEXT: s_and_b32 s1, s1, 0xffff
|
|
; GFX10-NEXT: ; return to shader part epilog
|
|
;
|
|
; GFX11-LABEL: s_fshr_v3i16:
|
|
; GFX11: ; %bb.0:
|
|
; GFX11-NEXT: s_lshr_b32 s6, s0, 16
|
|
; GFX11-NEXT: s_lshl_b32 s0, s0, 0x10001
|
|
; GFX11-NEXT: s_lshl_b32 s6, s6, 1
|
|
; GFX11-NEXT: s_and_b32 s7, s4, 0xf000f
|
|
; GFX11-NEXT: s_pack_ll_b32_b16 s0, s0, s6
|
|
; GFX11-NEXT: s_and_not1_b32 s4, 0xf000f, s4
|
|
; GFX11-NEXT: s_lshr_b32 s6, s0, 16
|
|
; GFX11-NEXT: s_lshr_b32 s8, s4, 16
|
|
; GFX11-NEXT: s_lshl_b32 s0, s0, s4
|
|
; GFX11-NEXT: s_lshl_b32 s4, s6, s8
|
|
; GFX11-NEXT: s_lshr_b32 s6, s2, 16
|
|
; GFX11-NEXT: s_and_b32 s2, s2, 0xffff
|
|
; GFX11-NEXT: s_lshr_b32 s8, s7, 16
|
|
; GFX11-NEXT: s_lshr_b32 s2, s2, s7
|
|
; GFX11-NEXT: s_lshr_b32 s6, s6, s8
|
|
; GFX11-NEXT: s_pack_ll_b32_b16 s0, s0, s4
|
|
; GFX11-NEXT: s_pack_ll_b32_b16 s2, s2, s6
|
|
; GFX11-NEXT: s_and_b32 s4, s5, 0xf000f
|
|
; GFX11-NEXT: s_or_b32 s0, s0, s2
|
|
; GFX11-NEXT: s_lshr_b32 s2, s1, 16
|
|
; GFX11-NEXT: s_lshl_b32 s1, s1, 0x10001
|
|
; GFX11-NEXT: s_lshl_b32 s2, s2, 1
|
|
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
|
|
; GFX11-NEXT: s_pack_ll_b32_b16 s1, s1, s2
|
|
; GFX11-NEXT: s_and_not1_b32 s2, 0xf000f, s5
|
|
; GFX11-NEXT: s_lshr_b32 s5, s1, 16
|
|
; GFX11-NEXT: s_lshr_b32 s6, s2, 16
|
|
; GFX11-NEXT: s_lshl_b32 s1, s1, s2
|
|
; GFX11-NEXT: s_lshl_b32 s2, s5, s6
|
|
; GFX11-NEXT: s_lshr_b32 s5, s3, 16
|
|
; GFX11-NEXT: s_and_b32 s3, s3, 0xffff
|
|
; GFX11-NEXT: s_lshr_b32 s6, s4, 16
|
|
; GFX11-NEXT: s_lshr_b32 s3, s3, s4
|
|
; GFX11-NEXT: s_lshr_b32 s4, s5, s6
|
|
; GFX11-NEXT: s_pack_ll_b32_b16 s1, s1, s2
|
|
; GFX11-NEXT: s_pack_ll_b32_b16 s2, s3, s4
|
|
; GFX11-NEXT: s_lshr_b32 s3, s0, 16
|
|
; GFX11-NEXT: s_and_b32 s0, s0, 0xffff
|
|
; GFX11-NEXT: s_lshl_b32 s3, s3, 16
|
|
; GFX11-NEXT: s_or_b32 s1, s1, s2
|
|
; GFX11-NEXT: s_or_b32 s0, s0, s3
|
|
; GFX11-NEXT: s_and_b32 s1, s1, 0xffff
|
|
; GFX11-NEXT: ; return to shader part epilog
|
|
%result = call <3 x i16> @llvm.fshr.v3i16(<3 x i16> %lhs, <3 x i16> %rhs, <3 x i16> %amt)
|
|
%cast = bitcast <3 x i16> %result to i48
|
|
ret i48 %cast
|
|
}
|
|
|
|
define <3 x half> @v_fshr_v3i16(<3 x i16> %lhs, <3 x i16> %rhs, <3 x i16> %amt) {
|
|
; GFX6-LABEL: v_fshr_v3i16:
|
|
; GFX6: ; %bb.0:
|
|
; GFX6-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX6-NEXT: v_and_b32_e32 v7, 0xffff, v7
|
|
; GFX6-NEXT: v_and_b32_e32 v6, 0xffff, v6
|
|
; GFX6-NEXT: v_lshlrev_b32_e32 v7, 16, v7
|
|
; GFX6-NEXT: v_or_b32_e32 v6, v6, v7
|
|
; GFX6-NEXT: v_and_b32_e32 v7, 0xffff, v8
|
|
; GFX6-NEXT: v_bfe_u32 v8, v3, 1, 15
|
|
; GFX6-NEXT: v_lshlrev_b32_e32 v0, 1, v0
|
|
; GFX6-NEXT: v_lshrrev_b32_e32 v8, 14, v8
|
|
; GFX6-NEXT: v_or_b32_e32 v0, v0, v8
|
|
; GFX6-NEXT: v_bfe_u32 v8, v4, 1, 15
|
|
; GFX6-NEXT: v_lshlrev_b32_e32 v1, 1, v1
|
|
; GFX6-NEXT: v_lshrrev_b32_e32 v8, 14, v8
|
|
; GFX6-NEXT: v_xor_b32_e32 v6, -1, v6
|
|
; GFX6-NEXT: v_or_b32_e32 v1, v1, v8
|
|
; GFX6-NEXT: v_lshrrev_b32_e32 v8, 16, v6
|
|
; GFX6-NEXT: v_and_b32_e32 v9, 15, v6
|
|
; GFX6-NEXT: v_xor_b32_e32 v6, -1, v6
|
|
; GFX6-NEXT: v_lshlrev_b32_e32 v3, 1, v3
|
|
; GFX6-NEXT: v_and_b32_e32 v6, 15, v6
|
|
; GFX6-NEXT: v_and_b32_e32 v9, 0xffff, v9
|
|
; GFX6-NEXT: v_bfe_u32 v3, v3, 1, 15
|
|
; GFX6-NEXT: v_and_b32_e32 v6, 0xffff, v6
|
|
; GFX6-NEXT: v_lshlrev_b32_e32 v0, v9, v0
|
|
; GFX6-NEXT: v_lshrrev_b32_e32 v3, v6, v3
|
|
; GFX6-NEXT: v_or_b32_e32 v0, v0, v3
|
|
; GFX6-NEXT: v_and_b32_e32 v3, 15, v8
|
|
; GFX6-NEXT: v_xor_b32_e32 v6, -1, v8
|
|
; GFX6-NEXT: v_lshlrev_b32_e32 v4, 1, v4
|
|
; GFX6-NEXT: v_and_b32_e32 v6, 15, v6
|
|
; GFX6-NEXT: v_and_b32_e32 v3, 0xffff, v3
|
|
; GFX6-NEXT: v_lshlrev_b32_e32 v1, v3, v1
|
|
; GFX6-NEXT: v_bfe_u32 v3, v4, 1, 15
|
|
; GFX6-NEXT: v_and_b32_e32 v4, 0xffff, v6
|
|
; GFX6-NEXT: v_lshrrev_b32_e32 v3, v4, v3
|
|
; GFX6-NEXT: v_or_b32_e32 v1, v1, v3
|
|
; GFX6-NEXT: v_bfe_u32 v3, v5, 1, 15
|
|
; GFX6-NEXT: v_lshlrev_b32_e32 v2, 1, v2
|
|
; GFX6-NEXT: v_lshrrev_b32_e32 v3, 14, v3
|
|
; GFX6-NEXT: v_xor_b32_e32 v4, -1, v7
|
|
; GFX6-NEXT: v_or_b32_e32 v2, v2, v3
|
|
; GFX6-NEXT: v_lshlrev_b32_e32 v3, 1, v5
|
|
; GFX6-NEXT: v_and_b32_e32 v5, 15, v4
|
|
; GFX6-NEXT: v_xor_b32_e32 v4, -1, v4
|
|
; GFX6-NEXT: v_and_b32_e32 v4, 15, v4
|
|
; GFX6-NEXT: v_and_b32_e32 v5, 0xffff, v5
|
|
; GFX6-NEXT: v_bfe_u32 v3, v3, 1, 15
|
|
; GFX6-NEXT: v_and_b32_e32 v4, 0xffff, v4
|
|
; GFX6-NEXT: v_lshlrev_b32_e32 v2, v5, v2
|
|
; GFX6-NEXT: v_lshrrev_b32_e32 v3, v4, v3
|
|
; GFX6-NEXT: v_or_b32_e32 v2, v2, v3
|
|
; GFX6-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX8-LABEL: v_fshr_v3i16:
|
|
; GFX8: ; %bb.0:
|
|
; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX8-NEXT: v_lshlrev_b16_e32 v6, 1, v0
|
|
; GFX8-NEXT: v_lshrrev_b16_e32 v7, 15, v2
|
|
; GFX8-NEXT: v_or_b32_e32 v6, v6, v7
|
|
; GFX8-NEXT: v_mov_b32_e32 v7, 1
|
|
; GFX8-NEXT: v_mov_b32_e32 v8, 15
|
|
; GFX8-NEXT: v_lshlrev_b16_sdwa v0, v7, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
|
|
; GFX8-NEXT: v_lshrrev_b16_sdwa v9, v8, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
|
|
; GFX8-NEXT: v_xor_b32_e32 v4, -1, v4
|
|
; GFX8-NEXT: v_or_b32_e32 v0, v0, v9
|
|
; GFX8-NEXT: v_lshlrev_b16_e32 v9, 1, v2
|
|
; GFX8-NEXT: v_lshlrev_b16_sdwa v2, v7, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
|
|
; GFX8-NEXT: v_and_b32_e32 v7, 15, v4
|
|
; GFX8-NEXT: v_xor_b32_e32 v10, -1, v4
|
|
; GFX8-NEXT: v_and_b32_e32 v10, 15, v10
|
|
; GFX8-NEXT: v_lshlrev_b16_e32 v6, v7, v6
|
|
; GFX8-NEXT: v_lshrrev_b16_e32 v7, 1, v9
|
|
; GFX8-NEXT: v_lshrrev_b16_e32 v7, v10, v7
|
|
; GFX8-NEXT: v_or_b32_e32 v6, v6, v7
|
|
; GFX8-NEXT: v_and_b32_sdwa v7, v4, v8 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
|
|
; GFX8-NEXT: v_mov_b32_e32 v8, -1
|
|
; GFX8-NEXT: v_xor_b32_sdwa v4, v4, v8 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
|
|
; GFX8-NEXT: v_and_b32_e32 v4, 15, v4
|
|
; GFX8-NEXT: v_lshrrev_b16_e32 v2, 1, v2
|
|
; GFX8-NEXT: v_lshlrev_b16_e32 v0, v7, v0
|
|
; GFX8-NEXT: v_lshrrev_b16_e32 v2, v4, v2
|
|
; GFX8-NEXT: v_or_b32_e32 v0, v0, v2
|
|
; GFX8-NEXT: v_lshlrev_b16_e32 v1, 1, v1
|
|
; GFX8-NEXT: v_lshrrev_b16_e32 v2, 15, v3
|
|
; GFX8-NEXT: v_or_b32_e32 v1, v1, v2
|
|
; GFX8-NEXT: v_lshlrev_b16_e32 v2, 1, v3
|
|
; GFX8-NEXT: v_xor_b32_e32 v3, -1, v5
|
|
; GFX8-NEXT: v_and_b32_e32 v4, 15, v3
|
|
; GFX8-NEXT: v_xor_b32_e32 v3, -1, v3
|
|
; GFX8-NEXT: v_and_b32_e32 v3, 15, v3
|
|
; GFX8-NEXT: v_lshrrev_b16_e32 v2, 1, v2
|
|
; GFX8-NEXT: v_lshlrev_b16_e32 v1, v4, v1
|
|
; GFX8-NEXT: v_lshrrev_b16_e32 v2, v3, v2
|
|
; GFX8-NEXT: v_and_b32_e32 v0, 0xffff, v0
|
|
; GFX8-NEXT: v_or_b32_e32 v1, v1, v2
|
|
; GFX8-NEXT: v_lshlrev_b32_e32 v0, 16, v0
|
|
; GFX8-NEXT: v_or_b32_sdwa v0, v6, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
|
|
; GFX8-NEXT: v_and_b32_e32 v1, 0xffff, v1
|
|
; GFX8-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX9-LABEL: v_fshr_v3i16:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: v_and_b32_e32 v6, 0xf000f, v4
|
|
; GFX9-NEXT: v_xor_b32_e32 v4, -1, v4
|
|
; GFX9-NEXT: v_and_b32_e32 v4, 0xf000f, v4
|
|
; GFX9-NEXT: v_pk_lshlrev_b16 v0, 1, v0 op_sel_hi:[0,1]
|
|
; GFX9-NEXT: v_pk_lshlrev_b16 v0, v4, v0
|
|
; GFX9-NEXT: v_pk_lshrrev_b16 v2, v6, v2
|
|
; GFX9-NEXT: v_xor_b32_e32 v4, -1, v5
|
|
; GFX9-NEXT: v_or_b32_e32 v0, v0, v2
|
|
; GFX9-NEXT: v_and_b32_e32 v2, 0xf000f, v5
|
|
; GFX9-NEXT: v_and_b32_e32 v4, 0xf000f, v4
|
|
; GFX9-NEXT: v_pk_lshlrev_b16 v1, 1, v1 op_sel_hi:[0,1]
|
|
; GFX9-NEXT: v_pk_lshlrev_b16 v1, v4, v1
|
|
; GFX9-NEXT: v_pk_lshrrev_b16 v2, v2, v3
|
|
; GFX9-NEXT: v_or_b32_e32 v1, v1, v2
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX10-LABEL: v_fshr_v3i16:
|
|
; GFX10: ; %bb.0:
|
|
; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX10-NEXT: v_xor_b32_e32 v6, -1, v4
|
|
; GFX10-NEXT: v_xor_b32_e32 v7, -1, v5
|
|
; GFX10-NEXT: v_and_b32_e32 v4, 0xf000f, v4
|
|
; GFX10-NEXT: v_pk_lshlrev_b16 v0, 1, v0 op_sel_hi:[0,1]
|
|
; GFX10-NEXT: v_and_b32_e32 v5, 0xf000f, v5
|
|
; GFX10-NEXT: v_and_b32_e32 v6, 0xf000f, v6
|
|
; GFX10-NEXT: v_pk_lshlrev_b16 v1, 1, v1 op_sel_hi:[0,1]
|
|
; GFX10-NEXT: v_and_b32_e32 v7, 0xf000f, v7
|
|
; GFX10-NEXT: v_pk_lshrrev_b16 v2, v4, v2
|
|
; GFX10-NEXT: v_pk_lshrrev_b16 v3, v5, v3
|
|
; GFX10-NEXT: v_pk_lshlrev_b16 v0, v6, v0
|
|
; GFX10-NEXT: v_pk_lshlrev_b16 v1, v7, v1
|
|
; GFX10-NEXT: v_or_b32_e32 v0, v0, v2
|
|
; GFX10-NEXT: v_or_b32_e32 v1, v1, v3
|
|
; GFX10-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX11-LABEL: v_fshr_v3i16:
|
|
; GFX11: ; %bb.0:
|
|
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX11-NEXT: v_xor_b32_e32 v6, -1, v4
|
|
; GFX11-NEXT: v_xor_b32_e32 v7, -1, v5
|
|
; GFX11-NEXT: v_and_b32_e32 v4, 0xf000f, v4
|
|
; GFX11-NEXT: v_pk_lshlrev_b16 v0, 1, v0 op_sel_hi:[0,1]
|
|
; GFX11-NEXT: v_and_b32_e32 v5, 0xf000f, v5
|
|
; GFX11-NEXT: v_and_b32_e32 v6, 0xf000f, v6
|
|
; GFX11-NEXT: v_pk_lshlrev_b16 v1, 1, v1 op_sel_hi:[0,1]
|
|
; GFX11-NEXT: v_and_b32_e32 v7, 0xf000f, v7
|
|
; GFX11-NEXT: v_pk_lshrrev_b16 v2, v4, v2
|
|
; GFX11-NEXT: v_pk_lshrrev_b16 v3, v5, v3
|
|
; GFX11-NEXT: v_pk_lshlrev_b16 v0, v6, v0
|
|
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_2)
|
|
; GFX11-NEXT: v_pk_lshlrev_b16 v1, v7, v1
|
|
; GFX11-NEXT: v_or_b32_e32 v0, v0, v2
|
|
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2)
|
|
; GFX11-NEXT: v_or_b32_e32 v1, v1, v3
|
|
; GFX11-NEXT: s_setpc_b64 s[30:31]
|
|
%result = call <3 x i16> @llvm.fshr.v3i16(<3 x i16> %lhs, <3 x i16> %rhs, <3 x i16> %amt)
|
|
%cast.result = bitcast <3 x i16> %result to <3 x half>
|
|
ret <3 x half> %cast.result
|
|
}
|
|
|
|
define amdgpu_ps <2 x i32> @s_fshr_v4i16(<4 x i16> inreg %lhs, <4 x i16> inreg %rhs, <4 x i16> inreg %amt) {
|
|
; GFX6-LABEL: s_fshr_v4i16:
|
|
; GFX6: ; %bb.0:
|
|
; GFX6-NEXT: s_lshl_b32 s9, s9, 16
|
|
; GFX6-NEXT: s_and_b32 s8, s8, 0xffff
|
|
; GFX6-NEXT: s_or_b32 s8, s9, s8
|
|
; GFX6-NEXT: s_lshl_b32 s9, s11, 16
|
|
; GFX6-NEXT: s_and_b32 s10, s10, 0xffff
|
|
; GFX6-NEXT: s_or_b32 s9, s9, s10
|
|
; GFX6-NEXT: s_bfe_u32 s10, s4, 0xf0001
|
|
; GFX6-NEXT: s_lshl_b32 s0, s0, 1
|
|
; GFX6-NEXT: s_lshr_b32 s10, s10, 14
|
|
; GFX6-NEXT: s_or_b32 s0, s0, s10
|
|
; GFX6-NEXT: s_bfe_u32 s10, s5, 0xf0001
|
|
; GFX6-NEXT: s_lshl_b32 s1, s1, 1
|
|
; GFX6-NEXT: s_lshr_b32 s10, s10, 14
|
|
; GFX6-NEXT: s_xor_b32 s8, s8, -1
|
|
; GFX6-NEXT: s_or_b32 s1, s1, s10
|
|
; GFX6-NEXT: s_lshl_b32 s4, s4, 1
|
|
; GFX6-NEXT: s_lshr_b32 s10, s8, 16
|
|
; GFX6-NEXT: s_and_b32 s11, s8, 15
|
|
; GFX6-NEXT: s_andn2_b32 s8, 15, s8
|
|
; GFX6-NEXT: s_and_b32 s11, 0xffff, s11
|
|
; GFX6-NEXT: s_bfe_u32 s4, s4, 0xf0001
|
|
; GFX6-NEXT: s_and_b32 s8, 0xffff, s8
|
|
; GFX6-NEXT: s_lshl_b32 s0, s0, s11
|
|
; GFX6-NEXT: s_lshr_b32 s4, s4, s8
|
|
; GFX6-NEXT: s_or_b32 s0, s0, s4
|
|
; GFX6-NEXT: s_and_b32 s4, s10, 15
|
|
; GFX6-NEXT: s_lshl_b32 s5, s5, 1
|
|
; GFX6-NEXT: s_andn2_b32 s8, 15, s10
|
|
; GFX6-NEXT: s_and_b32 s4, 0xffff, s4
|
|
; GFX6-NEXT: s_lshl_b32 s1, s1, s4
|
|
; GFX6-NEXT: s_bfe_u32 s4, s5, 0xf0001
|
|
; GFX6-NEXT: s_and_b32 s5, 0xffff, s8
|
|
; GFX6-NEXT: s_lshr_b32 s4, s4, s5
|
|
; GFX6-NEXT: s_or_b32 s1, s1, s4
|
|
; GFX6-NEXT: s_and_b32 s1, 0xffff, s1
|
|
; GFX6-NEXT: s_and_b32 s0, 0xffff, s0
|
|
; GFX6-NEXT: s_lshl_b32 s1, s1, 16
|
|
; GFX6-NEXT: s_or_b32 s0, s0, s1
|
|
; GFX6-NEXT: s_lshl_b32 s1, s2, 1
|
|
; GFX6-NEXT: s_bfe_u32 s2, s6, 0xf0001
|
|
; GFX6-NEXT: s_lshr_b32 s2, s2, 14
|
|
; GFX6-NEXT: s_or_b32 s1, s1, s2
|
|
; GFX6-NEXT: s_lshl_b32 s2, s3, 1
|
|
; GFX6-NEXT: s_bfe_u32 s3, s7, 0xf0001
|
|
; GFX6-NEXT: s_lshr_b32 s3, s3, 14
|
|
; GFX6-NEXT: s_xor_b32 s5, s9, -1
|
|
; GFX6-NEXT: s_or_b32 s2, s2, s3
|
|
; GFX6-NEXT: s_lshl_b32 s3, s6, 1
|
|
; GFX6-NEXT: s_lshl_b32 s4, s7, 1
|
|
; GFX6-NEXT: s_lshr_b32 s6, s5, 16
|
|
; GFX6-NEXT: s_and_b32 s7, s5, 15
|
|
; GFX6-NEXT: s_andn2_b32 s5, 15, s5
|
|
; GFX6-NEXT: s_and_b32 s7, 0xffff, s7
|
|
; GFX6-NEXT: s_bfe_u32 s3, s3, 0xf0001
|
|
; GFX6-NEXT: s_and_b32 s5, 0xffff, s5
|
|
; GFX6-NEXT: s_lshl_b32 s1, s1, s7
|
|
; GFX6-NEXT: s_lshr_b32 s3, s3, s5
|
|
; GFX6-NEXT: s_or_b32 s1, s1, s3
|
|
; GFX6-NEXT: s_and_b32 s3, s6, 15
|
|
; GFX6-NEXT: s_andn2_b32 s5, 15, s6
|
|
; GFX6-NEXT: s_and_b32 s3, 0xffff, s3
|
|
; GFX6-NEXT: s_lshl_b32 s2, s2, s3
|
|
; GFX6-NEXT: s_bfe_u32 s3, s4, 0xf0001
|
|
; GFX6-NEXT: s_and_b32 s4, 0xffff, s5
|
|
; GFX6-NEXT: s_lshr_b32 s3, s3, s4
|
|
; GFX6-NEXT: s_or_b32 s2, s2, s3
|
|
; GFX6-NEXT: s_and_b32 s2, 0xffff, s2
|
|
; GFX6-NEXT: s_and_b32 s1, 0xffff, s1
|
|
; GFX6-NEXT: s_lshl_b32 s2, s2, 16
|
|
; GFX6-NEXT: s_or_b32 s1, s1, s2
|
|
; GFX6-NEXT: ; return to shader part epilog
|
|
;
|
|
; GFX8-LABEL: s_fshr_v4i16:
|
|
; GFX8: ; %bb.0:
|
|
; GFX8-NEXT: s_and_b32 s8, 0xffff, s2
|
|
; GFX8-NEXT: s_lshr_b32 s6, s0, 16
|
|
; GFX8-NEXT: s_lshr_b32 s7, s2, 16
|
|
; GFX8-NEXT: s_lshl_b32 s0, s0, 1
|
|
; GFX8-NEXT: s_lshr_b32 s8, s8, 15
|
|
; GFX8-NEXT: s_or_b32 s0, s0, s8
|
|
; GFX8-NEXT: s_lshl_b32 s6, s6, 1
|
|
; GFX8-NEXT: s_lshr_b32 s8, s7, 15
|
|
; GFX8-NEXT: s_lshl_b32 s2, s2, 1
|
|
; GFX8-NEXT: s_xor_b32 s4, s4, -1
|
|
; GFX8-NEXT: s_or_b32 s6, s6, s8
|
|
; GFX8-NEXT: s_lshr_b32 s8, s4, 16
|
|
; GFX8-NEXT: s_and_b32 s9, s4, 15
|
|
; GFX8-NEXT: s_andn2_b32 s4, 15, s4
|
|
; GFX8-NEXT: s_and_b32 s2, 0xffff, s2
|
|
; GFX8-NEXT: s_and_b32 s9, 0xffff, s9
|
|
; GFX8-NEXT: s_lshr_b32 s2, s2, 1
|
|
; GFX8-NEXT: s_and_b32 s4, 0xffff, s4
|
|
; GFX8-NEXT: s_lshl_b32 s0, s0, s9
|
|
; GFX8-NEXT: s_lshr_b32 s2, s2, s4
|
|
; GFX8-NEXT: s_or_b32 s0, s0, s2
|
|
; GFX8-NEXT: s_and_b32 s2, s8, 15
|
|
; GFX8-NEXT: s_lshl_b32 s7, s7, 1
|
|
; GFX8-NEXT: s_and_b32 s2, 0xffff, s2
|
|
; GFX8-NEXT: s_andn2_b32 s4, 15, s8
|
|
; GFX8-NEXT: s_lshl_b32 s2, s6, s2
|
|
; GFX8-NEXT: s_and_b32 s6, 0xffff, s7
|
|
; GFX8-NEXT: s_lshr_b32 s6, s6, 1
|
|
; GFX8-NEXT: s_and_b32 s4, 0xffff, s4
|
|
; GFX8-NEXT: s_lshr_b32 s4, s6, s4
|
|
; GFX8-NEXT: s_or_b32 s2, s2, s4
|
|
; GFX8-NEXT: s_and_b32 s2, 0xffff, s2
|
|
; GFX8-NEXT: s_and_b32 s0, 0xffff, s0
|
|
; GFX8-NEXT: s_lshl_b32 s2, s2, 16
|
|
; GFX8-NEXT: s_and_b32 s6, 0xffff, s3
|
|
; GFX8-NEXT: s_or_b32 s0, s0, s2
|
|
; GFX8-NEXT: s_lshr_b32 s2, s1, 16
|
|
; GFX8-NEXT: s_lshr_b32 s4, s3, 16
|
|
; GFX8-NEXT: s_lshl_b32 s1, s1, 1
|
|
; GFX8-NEXT: s_lshr_b32 s6, s6, 15
|
|
; GFX8-NEXT: s_or_b32 s1, s1, s6
|
|
; GFX8-NEXT: s_lshl_b32 s2, s2, 1
|
|
; GFX8-NEXT: s_lshr_b32 s6, s4, 15
|
|
; GFX8-NEXT: s_lshl_b32 s3, s3, 1
|
|
; GFX8-NEXT: s_xor_b32 s5, s5, -1
|
|
; GFX8-NEXT: s_or_b32 s2, s2, s6
|
|
; GFX8-NEXT: s_lshr_b32 s6, s5, 16
|
|
; GFX8-NEXT: s_and_b32 s7, s5, 15
|
|
; GFX8-NEXT: s_andn2_b32 s5, 15, s5
|
|
; GFX8-NEXT: s_and_b32 s3, 0xffff, s3
|
|
; GFX8-NEXT: s_and_b32 s7, 0xffff, s7
|
|
; GFX8-NEXT: s_lshr_b32 s3, s3, 1
|
|
; GFX8-NEXT: s_and_b32 s5, 0xffff, s5
|
|
; GFX8-NEXT: s_lshl_b32 s1, s1, s7
|
|
; GFX8-NEXT: s_lshr_b32 s3, s3, s5
|
|
; GFX8-NEXT: s_or_b32 s1, s1, s3
|
|
; GFX8-NEXT: s_and_b32 s3, s6, 15
|
|
; GFX8-NEXT: s_lshl_b32 s4, s4, 1
|
|
; GFX8-NEXT: s_and_b32 s3, 0xffff, s3
|
|
; GFX8-NEXT: s_andn2_b32 s5, 15, s6
|
|
; GFX8-NEXT: s_lshl_b32 s2, s2, s3
|
|
; GFX8-NEXT: s_and_b32 s3, 0xffff, s4
|
|
; GFX8-NEXT: s_lshr_b32 s3, s3, 1
|
|
; GFX8-NEXT: s_and_b32 s4, 0xffff, s5
|
|
; GFX8-NEXT: s_lshr_b32 s3, s3, s4
|
|
; GFX8-NEXT: s_or_b32 s2, s2, s3
|
|
; GFX8-NEXT: s_and_b32 s2, 0xffff, s2
|
|
; GFX8-NEXT: s_and_b32 s1, 0xffff, s1
|
|
; GFX8-NEXT: s_lshl_b32 s2, s2, 16
|
|
; GFX8-NEXT: s_or_b32 s1, s1, s2
|
|
; GFX8-NEXT: ; return to shader part epilog
|
|
;
|
|
; GFX9-LABEL: s_fshr_v4i16:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_lshr_b32 s7, s0, 16
|
|
; GFX9-NEXT: s_lshl_b32 s0, s0, 0x10001
|
|
; GFX9-NEXT: s_lshl_b32 s7, s7, 1
|
|
; GFX9-NEXT: s_and_b32 s6, s4, 0xf000f
|
|
; GFX9-NEXT: s_andn2_b32 s4, 0xf000f, s4
|
|
; GFX9-NEXT: s_pack_ll_b32_b16 s0, s0, s7
|
|
; GFX9-NEXT: s_lshr_b32 s7, s0, 16
|
|
; GFX9-NEXT: s_lshr_b32 s8, s4, 16
|
|
; GFX9-NEXT: s_lshl_b32 s0, s0, s4
|
|
; GFX9-NEXT: s_lshl_b32 s4, s7, s8
|
|
; GFX9-NEXT: s_pack_ll_b32_b16 s0, s0, s4
|
|
; GFX9-NEXT: s_lshr_b32 s4, s2, 16
|
|
; GFX9-NEXT: s_and_b32 s2, s2, 0xffff
|
|
; GFX9-NEXT: s_lshr_b32 s7, s6, 16
|
|
; GFX9-NEXT: s_lshr_b32 s2, s2, s6
|
|
; GFX9-NEXT: s_lshr_b32 s4, s4, s7
|
|
; GFX9-NEXT: s_pack_ll_b32_b16 s2, s2, s4
|
|
; GFX9-NEXT: s_or_b32 s0, s0, s2
|
|
; GFX9-NEXT: s_and_b32 s2, s5, 0xf000f
|
|
; GFX9-NEXT: s_andn2_b32 s4, 0xf000f, s5
|
|
; GFX9-NEXT: s_lshr_b32 s5, s1, 16
|
|
; GFX9-NEXT: s_lshl_b32 s1, s1, 0x10001
|
|
; GFX9-NEXT: s_lshl_b32 s5, s5, 1
|
|
; GFX9-NEXT: s_pack_ll_b32_b16 s1, s1, s5
|
|
; GFX9-NEXT: s_lshr_b32 s5, s1, 16
|
|
; GFX9-NEXT: s_lshr_b32 s6, s4, 16
|
|
; GFX9-NEXT: s_lshl_b32 s1, s1, s4
|
|
; GFX9-NEXT: s_lshl_b32 s4, s5, s6
|
|
; GFX9-NEXT: s_pack_ll_b32_b16 s1, s1, s4
|
|
; GFX9-NEXT: s_lshr_b32 s4, s3, 16
|
|
; GFX9-NEXT: s_and_b32 s3, s3, 0xffff
|
|
; GFX9-NEXT: s_lshr_b32 s5, s2, 16
|
|
; GFX9-NEXT: s_lshr_b32 s2, s3, s2
|
|
; GFX9-NEXT: s_lshr_b32 s3, s4, s5
|
|
; GFX9-NEXT: s_pack_ll_b32_b16 s2, s2, s3
|
|
; GFX9-NEXT: s_or_b32 s1, s1, s2
|
|
; GFX9-NEXT: ; return to shader part epilog
|
|
;
|
|
; GFX10-LABEL: s_fshr_v4i16:
|
|
; GFX10: ; %bb.0:
|
|
; GFX10-NEXT: s_lshr_b32 s6, s0, 16
|
|
; GFX10-NEXT: s_lshl_b32 s0, s0, 0x10001
|
|
; GFX10-NEXT: s_lshl_b32 s6, s6, 1
|
|
; GFX10-NEXT: s_and_b32 s7, s4, 0xf000f
|
|
; GFX10-NEXT: s_pack_ll_b32_b16 s0, s0, s6
|
|
; GFX10-NEXT: s_andn2_b32 s4, 0xf000f, s4
|
|
; GFX10-NEXT: s_lshr_b32 s6, s0, 16
|
|
; GFX10-NEXT: s_lshr_b32 s8, s4, 16
|
|
; GFX10-NEXT: s_lshl_b32 s0, s0, s4
|
|
; GFX10-NEXT: s_lshl_b32 s4, s6, s8
|
|
; GFX10-NEXT: s_lshr_b32 s6, s2, 16
|
|
; GFX10-NEXT: s_pack_ll_b32_b16 s0, s0, s4
|
|
; GFX10-NEXT: s_lshr_b32 s4, s1, 16
|
|
; GFX10-NEXT: s_and_b32 s2, s2, 0xffff
|
|
; GFX10-NEXT: s_lshr_b32 s8, s7, 16
|
|
; GFX10-NEXT: s_lshl_b32 s1, s1, 0x10001
|
|
; GFX10-NEXT: s_lshl_b32 s4, s4, 1
|
|
; GFX10-NEXT: s_lshr_b32 s2, s2, s7
|
|
; GFX10-NEXT: s_lshr_b32 s6, s6, s8
|
|
; GFX10-NEXT: s_pack_ll_b32_b16 s1, s1, s4
|
|
; GFX10-NEXT: s_andn2_b32 s4, 0xf000f, s5
|
|
; GFX10-NEXT: s_pack_ll_b32_b16 s2, s2, s6
|
|
; GFX10-NEXT: s_and_b32 s6, s5, 0xf000f
|
|
; GFX10-NEXT: s_lshr_b32 s5, s1, 16
|
|
; GFX10-NEXT: s_lshr_b32 s7, s4, 16
|
|
; GFX10-NEXT: s_lshl_b32 s1, s1, s4
|
|
; GFX10-NEXT: s_lshl_b32 s4, s5, s7
|
|
; GFX10-NEXT: s_lshr_b32 s5, s3, 16
|
|
; GFX10-NEXT: s_and_b32 s3, s3, 0xffff
|
|
; GFX10-NEXT: s_lshr_b32 s7, s6, 16
|
|
; GFX10-NEXT: s_lshr_b32 s3, s3, s6
|
|
; GFX10-NEXT: s_lshr_b32 s5, s5, s7
|
|
; GFX10-NEXT: s_pack_ll_b32_b16 s1, s1, s4
|
|
; GFX10-NEXT: s_pack_ll_b32_b16 s3, s3, s5
|
|
; GFX10-NEXT: s_or_b32 s0, s0, s2
|
|
; GFX10-NEXT: s_or_b32 s1, s1, s3
|
|
; GFX10-NEXT: ; return to shader part epilog
|
|
;
|
|
; GFX11-LABEL: s_fshr_v4i16:
|
|
; GFX11: ; %bb.0:
|
|
; GFX11-NEXT: s_lshr_b32 s6, s0, 16
|
|
; GFX11-NEXT: s_lshl_b32 s0, s0, 0x10001
|
|
; GFX11-NEXT: s_lshl_b32 s6, s6, 1
|
|
; GFX11-NEXT: s_and_b32 s7, s4, 0xf000f
|
|
; GFX11-NEXT: s_pack_ll_b32_b16 s0, s0, s6
|
|
; GFX11-NEXT: s_and_not1_b32 s4, 0xf000f, s4
|
|
; GFX11-NEXT: s_lshr_b32 s6, s0, 16
|
|
; GFX11-NEXT: s_lshr_b32 s8, s4, 16
|
|
; GFX11-NEXT: s_lshl_b32 s0, s0, s4
|
|
; GFX11-NEXT: s_lshl_b32 s4, s6, s8
|
|
; GFX11-NEXT: s_lshr_b32 s6, s2, 16
|
|
; GFX11-NEXT: s_pack_ll_b32_b16 s0, s0, s4
|
|
; GFX11-NEXT: s_lshr_b32 s4, s1, 16
|
|
; GFX11-NEXT: s_and_b32 s2, s2, 0xffff
|
|
; GFX11-NEXT: s_lshr_b32 s8, s7, 16
|
|
; GFX11-NEXT: s_lshl_b32 s1, s1, 0x10001
|
|
; GFX11-NEXT: s_lshl_b32 s4, s4, 1
|
|
; GFX11-NEXT: s_lshr_b32 s2, s2, s7
|
|
; GFX11-NEXT: s_lshr_b32 s6, s6, s8
|
|
; GFX11-NEXT: s_pack_ll_b32_b16 s1, s1, s4
|
|
; GFX11-NEXT: s_and_not1_b32 s4, 0xf000f, s5
|
|
; GFX11-NEXT: s_pack_ll_b32_b16 s2, s2, s6
|
|
; GFX11-NEXT: s_and_b32 s6, s5, 0xf000f
|
|
; GFX11-NEXT: s_lshr_b32 s5, s1, 16
|
|
; GFX11-NEXT: s_lshr_b32 s7, s4, 16
|
|
; GFX11-NEXT: s_lshl_b32 s1, s1, s4
|
|
; GFX11-NEXT: s_lshl_b32 s4, s5, s7
|
|
; GFX11-NEXT: s_lshr_b32 s5, s3, 16
|
|
; GFX11-NEXT: s_and_b32 s3, s3, 0xffff
|
|
; GFX11-NEXT: s_lshr_b32 s7, s6, 16
|
|
; GFX11-NEXT: s_lshr_b32 s3, s3, s6
|
|
; GFX11-NEXT: s_lshr_b32 s5, s5, s7
|
|
; GFX11-NEXT: s_pack_ll_b32_b16 s1, s1, s4
|
|
; GFX11-NEXT: s_pack_ll_b32_b16 s3, s3, s5
|
|
; GFX11-NEXT: s_or_b32 s0, s0, s2
|
|
; GFX11-NEXT: s_or_b32 s1, s1, s3
|
|
; GFX11-NEXT: ; return to shader part epilog
|
|
%result = call <4 x i16> @llvm.fshr.v4i16(<4 x i16> %lhs, <4 x i16> %rhs, <4 x i16> %amt)
|
|
%cast.result = bitcast <4 x i16> %result to <2 x i32>
|
|
ret <2 x i32> %cast.result
|
|
}
|
|
|
|
define <4 x half> @v_fshr_v4i16(<4 x i16> %lhs, <4 x i16> %rhs, <4 x i16> %amt) {
|
|
; GFX6-LABEL: v_fshr_v4i16:
|
|
; GFX6: ; %bb.0:
|
|
; GFX6-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX6-NEXT: v_lshlrev_b32_e32 v9, 16, v9
|
|
; GFX6-NEXT: v_and_b32_e32 v8, 0xffff, v8
|
|
; GFX6-NEXT: v_or_b32_e32 v8, v9, v8
|
|
; GFX6-NEXT: v_lshlrev_b32_e32 v9, 16, v11
|
|
; GFX6-NEXT: v_and_b32_e32 v10, 0xffff, v10
|
|
; GFX6-NEXT: v_or_b32_e32 v9, v9, v10
|
|
; GFX6-NEXT: v_bfe_u32 v10, v4, 1, 15
|
|
; GFX6-NEXT: v_lshlrev_b32_e32 v0, 1, v0
|
|
; GFX6-NEXT: v_lshrrev_b32_e32 v10, 14, v10
|
|
; GFX6-NEXT: v_or_b32_e32 v0, v0, v10
|
|
; GFX6-NEXT: v_bfe_u32 v10, v5, 1, 15
|
|
; GFX6-NEXT: v_lshlrev_b32_e32 v1, 1, v1
|
|
; GFX6-NEXT: v_lshrrev_b32_e32 v10, 14, v10
|
|
; GFX6-NEXT: v_xor_b32_e32 v8, -1, v8
|
|
; GFX6-NEXT: v_or_b32_e32 v1, v1, v10
|
|
; GFX6-NEXT: v_lshrrev_b32_e32 v10, 16, v8
|
|
; GFX6-NEXT: v_and_b32_e32 v11, 15, v8
|
|
; GFX6-NEXT: v_xor_b32_e32 v8, -1, v8
|
|
; GFX6-NEXT: v_lshlrev_b32_e32 v4, 1, v4
|
|
; GFX6-NEXT: v_and_b32_e32 v8, 15, v8
|
|
; GFX6-NEXT: v_and_b32_e32 v11, 0xffff, v11
|
|
; GFX6-NEXT: v_bfe_u32 v4, v4, 1, 15
|
|
; GFX6-NEXT: v_and_b32_e32 v8, 0xffff, v8
|
|
; GFX6-NEXT: v_lshlrev_b32_e32 v0, v11, v0
|
|
; GFX6-NEXT: v_lshrrev_b32_e32 v4, v8, v4
|
|
; GFX6-NEXT: v_or_b32_e32 v0, v0, v4
|
|
; GFX6-NEXT: v_and_b32_e32 v4, 15, v10
|
|
; GFX6-NEXT: v_xor_b32_e32 v8, -1, v10
|
|
; GFX6-NEXT: v_lshlrev_b32_e32 v5, 1, v5
|
|
; GFX6-NEXT: v_and_b32_e32 v8, 15, v8
|
|
; GFX6-NEXT: v_and_b32_e32 v4, 0xffff, v4
|
|
; GFX6-NEXT: v_lshlrev_b32_e32 v1, v4, v1
|
|
; GFX6-NEXT: v_bfe_u32 v4, v5, 1, 15
|
|
; GFX6-NEXT: v_and_b32_e32 v5, 0xffff, v8
|
|
; GFX6-NEXT: v_lshrrev_b32_e32 v4, v5, v4
|
|
; GFX6-NEXT: v_or_b32_e32 v1, v1, v4
|
|
; GFX6-NEXT: v_bfe_u32 v4, v6, 1, 15
|
|
; GFX6-NEXT: v_lshlrev_b32_e32 v2, 1, v2
|
|
; GFX6-NEXT: v_lshrrev_b32_e32 v4, 14, v4
|
|
; GFX6-NEXT: v_or_b32_e32 v2, v2, v4
|
|
; GFX6-NEXT: v_bfe_u32 v4, v7, 1, 15
|
|
; GFX6-NEXT: v_lshlrev_b32_e32 v3, 1, v3
|
|
; GFX6-NEXT: v_lshrrev_b32_e32 v4, 14, v4
|
|
; GFX6-NEXT: v_or_b32_e32 v3, v3, v4
|
|
; GFX6-NEXT: v_lshlrev_b32_e32 v4, 1, v6
|
|
; GFX6-NEXT: v_xor_b32_e32 v6, -1, v9
|
|
; GFX6-NEXT: v_lshlrev_b32_e32 v5, 1, v7
|
|
; GFX6-NEXT: v_lshrrev_b32_e32 v7, 16, v6
|
|
; GFX6-NEXT: v_and_b32_e32 v8, 15, v6
|
|
; GFX6-NEXT: v_xor_b32_e32 v6, -1, v6
|
|
; GFX6-NEXT: v_and_b32_e32 v6, 15, v6
|
|
; GFX6-NEXT: v_and_b32_e32 v8, 0xffff, v8
|
|
; GFX6-NEXT: v_bfe_u32 v4, v4, 1, 15
|
|
; GFX6-NEXT: v_and_b32_e32 v6, 0xffff, v6
|
|
; GFX6-NEXT: v_lshlrev_b32_e32 v2, v8, v2
|
|
; GFX6-NEXT: v_lshrrev_b32_e32 v4, v6, v4
|
|
; GFX6-NEXT: v_or_b32_e32 v2, v2, v4
|
|
; GFX6-NEXT: v_and_b32_e32 v4, 15, v7
|
|
; GFX6-NEXT: v_xor_b32_e32 v6, -1, v7
|
|
; GFX6-NEXT: v_and_b32_e32 v6, 15, v6
|
|
; GFX6-NEXT: v_and_b32_e32 v4, 0xffff, v4
|
|
; GFX6-NEXT: v_lshlrev_b32_e32 v3, v4, v3
|
|
; GFX6-NEXT: v_bfe_u32 v4, v5, 1, 15
|
|
; GFX6-NEXT: v_and_b32_e32 v5, 0xffff, v6
|
|
; GFX6-NEXT: v_lshrrev_b32_e32 v4, v5, v4
|
|
; GFX6-NEXT: v_or_b32_e32 v3, v3, v4
|
|
; GFX6-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX8-LABEL: v_fshr_v4i16:
|
|
; GFX8: ; %bb.0:
|
|
; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX8-NEXT: v_lshlrev_b16_e32 v6, 1, v0
|
|
; GFX8-NEXT: v_lshrrev_b16_e32 v7, 15, v2
|
|
; GFX8-NEXT: v_or_b32_e32 v6, v6, v7
|
|
; GFX8-NEXT: v_mov_b32_e32 v7, 1
|
|
; GFX8-NEXT: v_mov_b32_e32 v8, 15
|
|
; GFX8-NEXT: v_lshlrev_b16_sdwa v0, v7, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
|
|
; GFX8-NEXT: v_lshrrev_b16_sdwa v9, v8, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
|
|
; GFX8-NEXT: v_xor_b32_e32 v4, -1, v4
|
|
; GFX8-NEXT: v_or_b32_e32 v0, v0, v9
|
|
; GFX8-NEXT: v_lshlrev_b16_e32 v9, 1, v2
|
|
; GFX8-NEXT: v_xor_b32_e32 v11, -1, v4
|
|
; GFX8-NEXT: v_and_b32_e32 v10, 15, v4
|
|
; GFX8-NEXT: v_and_b32_e32 v11, 15, v11
|
|
; GFX8-NEXT: v_lshrrev_b16_e32 v9, 1, v9
|
|
; GFX8-NEXT: v_lshlrev_b16_e32 v6, v10, v6
|
|
; GFX8-NEXT: v_lshrrev_b16_e32 v9, v11, v9
|
|
; GFX8-NEXT: v_mov_b32_e32 v10, -1
|
|
; GFX8-NEXT: v_lshlrev_b16_sdwa v2, v7, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
|
|
; GFX8-NEXT: v_or_b32_e32 v6, v6, v9
|
|
; GFX8-NEXT: v_and_b32_sdwa v9, v4, v8 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
|
|
; GFX8-NEXT: v_xor_b32_sdwa v4, v4, v10 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
|
|
; GFX8-NEXT: v_and_b32_e32 v4, 15, v4
|
|
; GFX8-NEXT: v_lshrrev_b16_e32 v2, 1, v2
|
|
; GFX8-NEXT: v_lshlrev_b16_e32 v0, v9, v0
|
|
; GFX8-NEXT: v_lshrrev_b16_e32 v2, v4, v2
|
|
; GFX8-NEXT: v_or_b32_e32 v0, v0, v2
|
|
; GFX8-NEXT: v_lshlrev_b16_e32 v2, 1, v1
|
|
; GFX8-NEXT: v_lshrrev_b16_e32 v4, 15, v3
|
|
; GFX8-NEXT: v_and_b32_e32 v0, 0xffff, v0
|
|
; GFX8-NEXT: v_or_b32_e32 v2, v2, v4
|
|
; GFX8-NEXT: v_lshlrev_b16_sdwa v1, v7, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
|
|
; GFX8-NEXT: v_lshrrev_b16_sdwa v4, v8, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
|
|
; GFX8-NEXT: v_xor_b32_e32 v5, -1, v5
|
|
; GFX8-NEXT: v_lshlrev_b32_e32 v0, 16, v0
|
|
; GFX8-NEXT: v_or_b32_e32 v1, v1, v4
|
|
; GFX8-NEXT: v_lshlrev_b16_e32 v4, 1, v3
|
|
; GFX8-NEXT: v_lshlrev_b16_sdwa v3, v7, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
|
|
; GFX8-NEXT: v_xor_b32_e32 v7, -1, v5
|
|
; GFX8-NEXT: v_or_b32_sdwa v0, v6, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
|
|
; GFX8-NEXT: v_and_b32_e32 v6, 15, v5
|
|
; GFX8-NEXT: v_and_b32_e32 v7, 15, v7
|
|
; GFX8-NEXT: v_lshrrev_b16_e32 v4, 1, v4
|
|
; GFX8-NEXT: v_lshlrev_b16_e32 v2, v6, v2
|
|
; GFX8-NEXT: v_lshrrev_b16_e32 v4, v7, v4
|
|
; GFX8-NEXT: v_or_b32_e32 v2, v2, v4
|
|
; GFX8-NEXT: v_and_b32_sdwa v4, v5, v8 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
|
|
; GFX8-NEXT: v_xor_b32_sdwa v5, v5, v10 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
|
|
; GFX8-NEXT: v_and_b32_e32 v5, 15, v5
|
|
; GFX8-NEXT: v_lshrrev_b16_e32 v3, 1, v3
|
|
; GFX8-NEXT: v_lshlrev_b16_e32 v1, v4, v1
|
|
; GFX8-NEXT: v_lshrrev_b16_e32 v3, v5, v3
|
|
; GFX8-NEXT: v_or_b32_e32 v1, v1, v3
|
|
; GFX8-NEXT: v_and_b32_e32 v1, 0xffff, v1
|
|
; GFX8-NEXT: v_lshlrev_b32_e32 v1, 16, v1
|
|
; GFX8-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
|
|
; GFX8-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX9-LABEL: v_fshr_v4i16:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: v_and_b32_e32 v6, 0xf000f, v4
|
|
; GFX9-NEXT: v_xor_b32_e32 v4, -1, v4
|
|
; GFX9-NEXT: v_and_b32_e32 v4, 0xf000f, v4
|
|
; GFX9-NEXT: v_pk_lshlrev_b16 v0, 1, v0 op_sel_hi:[0,1]
|
|
; GFX9-NEXT: v_pk_lshlrev_b16 v0, v4, v0
|
|
; GFX9-NEXT: v_pk_lshrrev_b16 v2, v6, v2
|
|
; GFX9-NEXT: v_xor_b32_e32 v4, -1, v5
|
|
; GFX9-NEXT: v_or_b32_e32 v0, v0, v2
|
|
; GFX9-NEXT: v_and_b32_e32 v2, 0xf000f, v5
|
|
; GFX9-NEXT: v_and_b32_e32 v4, 0xf000f, v4
|
|
; GFX9-NEXT: v_pk_lshlrev_b16 v1, 1, v1 op_sel_hi:[0,1]
|
|
; GFX9-NEXT: v_pk_lshlrev_b16 v1, v4, v1
|
|
; GFX9-NEXT: v_pk_lshrrev_b16 v2, v2, v3
|
|
; GFX9-NEXT: v_or_b32_e32 v1, v1, v2
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX10-LABEL: v_fshr_v4i16:
|
|
; GFX10: ; %bb.0:
|
|
; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX10-NEXT: v_xor_b32_e32 v6, -1, v4
|
|
; GFX10-NEXT: v_xor_b32_e32 v7, -1, v5
|
|
; GFX10-NEXT: v_and_b32_e32 v4, 0xf000f, v4
|
|
; GFX10-NEXT: v_pk_lshlrev_b16 v0, 1, v0 op_sel_hi:[0,1]
|
|
; GFX10-NEXT: v_and_b32_e32 v5, 0xf000f, v5
|
|
; GFX10-NEXT: v_and_b32_e32 v6, 0xf000f, v6
|
|
; GFX10-NEXT: v_pk_lshlrev_b16 v1, 1, v1 op_sel_hi:[0,1]
|
|
; GFX10-NEXT: v_and_b32_e32 v7, 0xf000f, v7
|
|
; GFX10-NEXT: v_pk_lshrrev_b16 v2, v4, v2
|
|
; GFX10-NEXT: v_pk_lshrrev_b16 v3, v5, v3
|
|
; GFX10-NEXT: v_pk_lshlrev_b16 v0, v6, v0
|
|
; GFX10-NEXT: v_pk_lshlrev_b16 v1, v7, v1
|
|
; GFX10-NEXT: v_or_b32_e32 v0, v0, v2
|
|
; GFX10-NEXT: v_or_b32_e32 v1, v1, v3
|
|
; GFX10-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX11-LABEL: v_fshr_v4i16:
|
|
; GFX11: ; %bb.0:
|
|
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX11-NEXT: v_xor_b32_e32 v6, -1, v4
|
|
; GFX11-NEXT: v_xor_b32_e32 v7, -1, v5
|
|
; GFX11-NEXT: v_and_b32_e32 v4, 0xf000f, v4
|
|
; GFX11-NEXT: v_pk_lshlrev_b16 v0, 1, v0 op_sel_hi:[0,1]
|
|
; GFX11-NEXT: v_and_b32_e32 v5, 0xf000f, v5
|
|
; GFX11-NEXT: v_and_b32_e32 v6, 0xf000f, v6
|
|
; GFX11-NEXT: v_pk_lshlrev_b16 v1, 1, v1 op_sel_hi:[0,1]
|
|
; GFX11-NEXT: v_and_b32_e32 v7, 0xf000f, v7
|
|
; GFX11-NEXT: v_pk_lshrrev_b16 v2, v4, v2
|
|
; GFX11-NEXT: v_pk_lshrrev_b16 v3, v5, v3
|
|
; GFX11-NEXT: v_pk_lshlrev_b16 v0, v6, v0
|
|
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_2)
|
|
; GFX11-NEXT: v_pk_lshlrev_b16 v1, v7, v1
|
|
; GFX11-NEXT: v_or_b32_e32 v0, v0, v2
|
|
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2)
|
|
; GFX11-NEXT: v_or_b32_e32 v1, v1, v3
|
|
; GFX11-NEXT: s_setpc_b64 s[30:31]
|
|
%result = call <4 x i16> @llvm.fshr.v4i16(<4 x i16> %lhs, <4 x i16> %rhs, <4 x i16> %amt)
|
|
%cast.result = bitcast <4 x i16> %result to <4 x half>
|
|
ret <4 x half> %cast.result
|
|
}
|
|
|
|
define amdgpu_ps i64 @s_fshr_i64(i64 inreg %lhs, i64 inreg %rhs, i64 inreg %amt) {
|
|
; GFX6-LABEL: s_fshr_i64:
|
|
; GFX6: ; %bb.0:
|
|
; GFX6-NEXT: s_lshl_b64 s[0:1], s[0:1], 1
|
|
; GFX6-NEXT: s_not_b32 s5, s4
|
|
; GFX6-NEXT: s_lshl_b64 s[0:1], s[0:1], s5
|
|
; GFX6-NEXT: s_lshr_b64 s[2:3], s[2:3], s4
|
|
; GFX6-NEXT: s_or_b64 s[0:1], s[0:1], s[2:3]
|
|
; GFX6-NEXT: ; return to shader part epilog
|
|
;
|
|
; GFX8-LABEL: s_fshr_i64:
|
|
; GFX8: ; %bb.0:
|
|
; GFX8-NEXT: s_lshl_b64 s[0:1], s[0:1], 1
|
|
; GFX8-NEXT: s_not_b32 s5, s4
|
|
; GFX8-NEXT: s_lshl_b64 s[0:1], s[0:1], s5
|
|
; GFX8-NEXT: s_lshr_b64 s[2:3], s[2:3], s4
|
|
; GFX8-NEXT: s_or_b64 s[0:1], s[0:1], s[2:3]
|
|
; GFX8-NEXT: ; return to shader part epilog
|
|
;
|
|
; GFX9-LABEL: s_fshr_i64:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_lshl_b64 s[0:1], s[0:1], 1
|
|
; GFX9-NEXT: s_not_b32 s5, s4
|
|
; GFX9-NEXT: s_lshl_b64 s[0:1], s[0:1], s5
|
|
; GFX9-NEXT: s_lshr_b64 s[2:3], s[2:3], s4
|
|
; GFX9-NEXT: s_or_b64 s[0:1], s[0:1], s[2:3]
|
|
; GFX9-NEXT: ; return to shader part epilog
|
|
;
|
|
; GFX10-LABEL: s_fshr_i64:
|
|
; GFX10: ; %bb.0:
|
|
; GFX10-NEXT: s_lshl_b64 s[0:1], s[0:1], 1
|
|
; GFX10-NEXT: s_not_b32 s5, s4
|
|
; GFX10-NEXT: s_lshr_b64 s[2:3], s[2:3], s4
|
|
; GFX10-NEXT: s_lshl_b64 s[0:1], s[0:1], s5
|
|
; GFX10-NEXT: s_or_b64 s[0:1], s[0:1], s[2:3]
|
|
; GFX10-NEXT: ; return to shader part epilog
|
|
;
|
|
; GFX11-LABEL: s_fshr_i64:
|
|
; GFX11: ; %bb.0:
|
|
; GFX11-NEXT: s_lshl_b64 s[0:1], s[0:1], 1
|
|
; GFX11-NEXT: s_not_b32 s5, s4
|
|
; GFX11-NEXT: s_lshr_b64 s[2:3], s[2:3], s4
|
|
; GFX11-NEXT: s_lshl_b64 s[0:1], s[0:1], s5
|
|
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
|
|
; GFX11-NEXT: s_or_b64 s[0:1], s[0:1], s[2:3]
|
|
; GFX11-NEXT: ; return to shader part epilog
|
|
%result = call i64 @llvm.fshr.i64(i64 %lhs, i64 %rhs, i64 %amt)
|
|
ret i64 %result
|
|
}
|
|
|
|
define amdgpu_ps i64 @s_fshr_i64_5(i64 inreg %lhs, i64 inreg %rhs) {
|
|
; GCN-LABEL: s_fshr_i64_5:
|
|
; GCN: ; %bb.0:
|
|
; GCN-NEXT: s_lshl_b32 s1, s0, 27
|
|
; GCN-NEXT: s_mov_b32 s0, 0
|
|
; GCN-NEXT: s_lshr_b64 s[2:3], s[2:3], 5
|
|
; GCN-NEXT: s_or_b64 s[0:1], s[0:1], s[2:3]
|
|
; GCN-NEXT: ; return to shader part epilog
|
|
;
|
|
; GFX11-LABEL: s_fshr_i64_5:
|
|
; GFX11: ; %bb.0:
|
|
; GFX11-NEXT: s_lshl_b32 s1, s0, 27
|
|
; GFX11-NEXT: s_mov_b32 s0, 0
|
|
; GFX11-NEXT: s_lshr_b64 s[2:3], s[2:3], 5
|
|
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
|
|
; GFX11-NEXT: s_or_b64 s[0:1], s[0:1], s[2:3]
|
|
; GFX11-NEXT: ; return to shader part epilog
|
|
%result = call i64 @llvm.fshr.i64(i64 %lhs, i64 %rhs, i64 5)
|
|
ret i64 %result
|
|
}
|
|
|
|
define amdgpu_ps i64 @s_fshr_i64_32(i64 inreg %lhs, i64 inreg %rhs) {
|
|
; GCN-LABEL: s_fshr_i64_32:
|
|
; GCN: ; %bb.0:
|
|
; GCN-NEXT: s_mov_b32 s1, s0
|
|
; GCN-NEXT: s_mov_b32 s0, 0
|
|
; GCN-NEXT: s_mov_b32 s2, s3
|
|
; GCN-NEXT: s_mov_b32 s3, s0
|
|
; GCN-NEXT: s_or_b64 s[0:1], s[0:1], s[2:3]
|
|
; GCN-NEXT: ; return to shader part epilog
|
|
;
|
|
; GFX11-LABEL: s_fshr_i64_32:
|
|
; GFX11: ; %bb.0:
|
|
; GFX11-NEXT: s_mov_b32 s1, s0
|
|
; GFX11-NEXT: s_mov_b32 s0, 0
|
|
; GFX11-NEXT: s_mov_b32 s2, s3
|
|
; GFX11-NEXT: s_mov_b32 s3, s0
|
|
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
|
|
; GFX11-NEXT: s_or_b64 s[0:1], s[0:1], s[2:3]
|
|
; GFX11-NEXT: ; return to shader part epilog
|
|
%result = call i64 @llvm.fshr.i64(i64 %lhs, i64 %rhs, i64 32)
|
|
ret i64 %result
|
|
}
|
|
|
|
define amdgpu_ps i64 @s_fshr_i64_48(i64 inreg %lhs, i64 inreg %rhs) {
|
|
; GCN-LABEL: s_fshr_i64_48:
|
|
; GCN: ; %bb.0:
|
|
; GCN-NEXT: s_lshl_b64 s[0:1], s[0:1], 16
|
|
; GCN-NEXT: s_lshr_b32 s2, s3, 16
|
|
; GCN-NEXT: s_mov_b32 s3, 0
|
|
; GCN-NEXT: s_or_b64 s[0:1], s[0:1], s[2:3]
|
|
; GCN-NEXT: ; return to shader part epilog
|
|
;
|
|
; GFX11-LABEL: s_fshr_i64_48:
|
|
; GFX11: ; %bb.0:
|
|
; GFX11-NEXT: s_lshl_b64 s[0:1], s[0:1], 16
|
|
; GFX11-NEXT: s_lshr_b32 s2, s3, 16
|
|
; GFX11-NEXT: s_mov_b32 s3, 0
|
|
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
|
|
; GFX11-NEXT: s_or_b64 s[0:1], s[0:1], s[2:3]
|
|
; GFX11-NEXT: ; return to shader part epilog
|
|
%result = call i64 @llvm.fshr.i64(i64 %lhs, i64 %rhs, i64 48)
|
|
ret i64 %result
|
|
}
|
|
|
|
define i64 @v_fshr_i64(i64 %lhs, i64 %rhs, i64 %amt) {
|
|
; GFX6-LABEL: v_fshr_i64:
|
|
; GFX6: ; %bb.0:
|
|
; GFX6-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX6-NEXT: v_lshl_b64 v[0:1], v[0:1], 1
|
|
; GFX6-NEXT: v_not_b32_e32 v5, v4
|
|
; GFX6-NEXT: v_and_b32_e32 v5, 63, v5
|
|
; GFX6-NEXT: v_and_b32_e32 v4, 63, v4
|
|
; GFX6-NEXT: v_lshl_b64 v[0:1], v[0:1], v5
|
|
; GFX6-NEXT: v_lshr_b64 v[2:3], v[2:3], v4
|
|
; GFX6-NEXT: v_or_b32_e32 v0, v0, v2
|
|
; GFX6-NEXT: v_or_b32_e32 v1, v1, v3
|
|
; GFX6-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX8-LABEL: v_fshr_i64:
|
|
; GFX8: ; %bb.0:
|
|
; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX8-NEXT: v_lshlrev_b64 v[0:1], 1, v[0:1]
|
|
; GFX8-NEXT: v_not_b32_e32 v5, v4
|
|
; GFX8-NEXT: v_and_b32_e32 v5, 63, v5
|
|
; GFX8-NEXT: v_and_b32_e32 v4, 63, v4
|
|
; GFX8-NEXT: v_lshlrev_b64 v[0:1], v5, v[0:1]
|
|
; GFX8-NEXT: v_lshrrev_b64 v[2:3], v4, v[2:3]
|
|
; GFX8-NEXT: v_or_b32_e32 v0, v0, v2
|
|
; GFX8-NEXT: v_or_b32_e32 v1, v1, v3
|
|
; GFX8-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX9-LABEL: v_fshr_i64:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: v_lshlrev_b64 v[0:1], 1, v[0:1]
|
|
; GFX9-NEXT: v_not_b32_e32 v5, v4
|
|
; GFX9-NEXT: v_and_b32_e32 v5, 63, v5
|
|
; GFX9-NEXT: v_and_b32_e32 v4, 63, v4
|
|
; GFX9-NEXT: v_lshlrev_b64 v[0:1], v5, v[0:1]
|
|
; GFX9-NEXT: v_lshrrev_b64 v[2:3], v4, v[2:3]
|
|
; GFX9-NEXT: v_or_b32_e32 v0, v0, v2
|
|
; GFX9-NEXT: v_or_b32_e32 v1, v1, v3
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX10-LABEL: v_fshr_i64:
|
|
; GFX10: ; %bb.0:
|
|
; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX10-NEXT: v_not_b32_e32 v5, v4
|
|
; GFX10-NEXT: v_lshlrev_b64 v[0:1], 1, v[0:1]
|
|
; GFX10-NEXT: v_and_b32_e32 v4, 63, v4
|
|
; GFX10-NEXT: v_and_b32_e32 v5, 63, v5
|
|
; GFX10-NEXT: v_lshrrev_b64 v[2:3], v4, v[2:3]
|
|
; GFX10-NEXT: v_lshlrev_b64 v[0:1], v5, v[0:1]
|
|
; GFX10-NEXT: v_or_b32_e32 v0, v0, v2
|
|
; GFX10-NEXT: v_or_b32_e32 v1, v1, v3
|
|
; GFX10-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX11-LABEL: v_fshr_i64:
|
|
; GFX11: ; %bb.0:
|
|
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX11-NEXT: v_not_b32_e32 v5, v4
|
|
; GFX11-NEXT: v_lshlrev_b64 v[0:1], 1, v[0:1]
|
|
; GFX11-NEXT: v_and_b32_e32 v4, 63, v4
|
|
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
|
|
; GFX11-NEXT: v_and_b32_e32 v5, 63, v5
|
|
; GFX11-NEXT: v_lshrrev_b64 v[2:3], v4, v[2:3]
|
|
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
|
|
; GFX11-NEXT: v_lshlrev_b64 v[0:1], v5, v[0:1]
|
|
; GFX11-NEXT: v_or_b32_e32 v0, v0, v2
|
|
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2)
|
|
; GFX11-NEXT: v_or_b32_e32 v1, v1, v3
|
|
; GFX11-NEXT: s_setpc_b64 s[30:31]
|
|
%result = call i64 @llvm.fshr.i64(i64 %lhs, i64 %rhs, i64 %amt)
|
|
ret i64 %result
|
|
}
|
|
|
|
define i64 @v_fshr_i64_5(i64 %lhs, i64 %rhs) {
|
|
; GFX6-LABEL: v_fshr_i64_5:
|
|
; GFX6: ; %bb.0:
|
|
; GFX6-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX6-NEXT: v_mov_b32_e32 v4, v0
|
|
; GFX6-NEXT: v_lshr_b64 v[0:1], v[2:3], 5
|
|
; GFX6-NEXT: v_lshlrev_b32_e32 v2, 27, v4
|
|
; GFX6-NEXT: v_or_b32_e32 v1, v2, v1
|
|
; GFX6-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX8-LABEL: v_fshr_i64_5:
|
|
; GFX8: ; %bb.0:
|
|
; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX8-NEXT: v_mov_b32_e32 v4, v0
|
|
; GFX8-NEXT: v_lshrrev_b64 v[0:1], 5, v[2:3]
|
|
; GFX8-NEXT: v_lshlrev_b32_e32 v2, 27, v4
|
|
; GFX8-NEXT: v_or_b32_e32 v1, v2, v1
|
|
; GFX8-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX9-LABEL: v_fshr_i64_5:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: v_mov_b32_e32 v4, v0
|
|
; GFX9-NEXT: v_lshrrev_b64 v[0:1], 5, v[2:3]
|
|
; GFX9-NEXT: v_lshl_or_b32 v1, v4, 27, v1
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX10-LABEL: v_fshr_i64_5:
|
|
; GFX10: ; %bb.0:
|
|
; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX10-NEXT: v_mov_b32_e32 v4, v0
|
|
; GFX10-NEXT: v_lshrrev_b64 v[0:1], 5, v[2:3]
|
|
; GFX10-NEXT: v_lshl_or_b32 v1, v4, 27, v1
|
|
; GFX10-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX11-LABEL: v_fshr_i64_5:
|
|
; GFX11: ; %bb.0:
|
|
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX11-NEXT: v_mov_b32_e32 v4, v0
|
|
; GFX11-NEXT: v_lshrrev_b64 v[0:1], 5, v[2:3]
|
|
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
|
|
; GFX11-NEXT: v_lshl_or_b32 v1, v4, 27, v1
|
|
; GFX11-NEXT: s_setpc_b64 s[30:31]
|
|
%result = call i64 @llvm.fshr.i64(i64 %lhs, i64 %rhs, i64 5)
|
|
ret i64 %result
|
|
}
|
|
|
|
define i64 @v_fshr_i64_32(i64 %lhs, i64 %rhs) {
|
|
; GCN-LABEL: v_fshr_i64_32:
|
|
; GCN: ; %bb.0:
|
|
; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GCN-NEXT: v_mov_b32_e32 v1, v0
|
|
; GCN-NEXT: v_mov_b32_e32 v0, v3
|
|
; GCN-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX11-LABEL: v_fshr_i64_32:
|
|
; GFX11: ; %bb.0:
|
|
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX11-NEXT: v_dual_mov_b32 v1, v0 :: v_dual_mov_b32 v0, v3
|
|
; GFX11-NEXT: s_setpc_b64 s[30:31]
|
|
%result = call i64 @llvm.fshr.i64(i64 %lhs, i64 %rhs, i64 32)
|
|
ret i64 %result
|
|
}
|
|
|
|
define i64 @v_fshr_i64_48(i64 %lhs, i64 %rhs) {
|
|
; GFX6-LABEL: v_fshr_i64_48:
|
|
; GFX6: ; %bb.0:
|
|
; GFX6-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX6-NEXT: v_lshl_b64 v[0:1], v[0:1], 16
|
|
; GFX6-NEXT: v_lshrrev_b32_e32 v2, 16, v3
|
|
; GFX6-NEXT: v_or_b32_e32 v0, v0, v2
|
|
; GFX6-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX8-LABEL: v_fshr_i64_48:
|
|
; GFX8: ; %bb.0:
|
|
; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX8-NEXT: v_lshlrev_b64 v[0:1], 16, v[0:1]
|
|
; GFX8-NEXT: v_or_b32_sdwa v0, v0, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
|
|
; GFX8-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX9-LABEL: v_fshr_i64_48:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: v_lshlrev_b64 v[0:1], 16, v[0:1]
|
|
; GFX9-NEXT: v_or_b32_sdwa v0, v0, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX10-LABEL: v_fshr_i64_48:
|
|
; GFX10: ; %bb.0:
|
|
; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX10-NEXT: v_lshlrev_b64 v[0:1], 16, v[0:1]
|
|
; GFX10-NEXT: v_or_b32_sdwa v0, v0, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
|
|
; GFX10-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX11-LABEL: v_fshr_i64_48:
|
|
; GFX11: ; %bb.0:
|
|
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX11-NEXT: v_lshlrev_b64 v[0:1], 16, v[0:1]
|
|
; GFX11-NEXT: v_lshrrev_b32_e32 v2, 16, v3
|
|
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
|
|
; GFX11-NEXT: v_or_b32_e32 v0, v0, v2
|
|
; GFX11-NEXT: s_setpc_b64 s[30:31]
|
|
%result = call i64 @llvm.fshr.i64(i64 %lhs, i64 %rhs, i64 48)
|
|
ret i64 %result
|
|
}
|
|
|
|
define amdgpu_ps <2 x float> @v_fshr_i64_ssv(i64 inreg %lhs, i64 inreg %rhs, i64 %amt) {
|
|
; GFX6-LABEL: v_fshr_i64_ssv:
|
|
; GFX6: ; %bb.0:
|
|
; GFX6-NEXT: v_not_b32_e32 v1, v0
|
|
; GFX6-NEXT: s_lshl_b64 s[0:1], s[0:1], 1
|
|
; GFX6-NEXT: v_and_b32_e32 v1, 63, v1
|
|
; GFX6-NEXT: v_and_b32_e32 v0, 63, v0
|
|
; GFX6-NEXT: v_lshl_b64 v[1:2], s[0:1], v1
|
|
; GFX6-NEXT: v_lshr_b64 v[3:4], s[2:3], v0
|
|
; GFX6-NEXT: v_or_b32_e32 v0, v1, v3
|
|
; GFX6-NEXT: v_or_b32_e32 v1, v2, v4
|
|
; GFX6-NEXT: ; return to shader part epilog
|
|
;
|
|
; GFX8-LABEL: v_fshr_i64_ssv:
|
|
; GFX8: ; %bb.0:
|
|
; GFX8-NEXT: v_not_b32_e32 v1, v0
|
|
; GFX8-NEXT: s_lshl_b64 s[0:1], s[0:1], 1
|
|
; GFX8-NEXT: v_and_b32_e32 v1, 63, v1
|
|
; GFX8-NEXT: v_and_b32_e32 v0, 63, v0
|
|
; GFX8-NEXT: v_lshlrev_b64 v[1:2], v1, s[0:1]
|
|
; GFX8-NEXT: v_lshrrev_b64 v[3:4], v0, s[2:3]
|
|
; GFX8-NEXT: v_or_b32_e32 v0, v1, v3
|
|
; GFX8-NEXT: v_or_b32_e32 v1, v2, v4
|
|
; GFX8-NEXT: ; return to shader part epilog
|
|
;
|
|
; GFX9-LABEL: v_fshr_i64_ssv:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: v_not_b32_e32 v1, v0
|
|
; GFX9-NEXT: s_lshl_b64 s[0:1], s[0:1], 1
|
|
; GFX9-NEXT: v_and_b32_e32 v1, 63, v1
|
|
; GFX9-NEXT: v_and_b32_e32 v0, 63, v0
|
|
; GFX9-NEXT: v_lshlrev_b64 v[1:2], v1, s[0:1]
|
|
; GFX9-NEXT: v_lshrrev_b64 v[3:4], v0, s[2:3]
|
|
; GFX9-NEXT: v_or_b32_e32 v0, v1, v3
|
|
; GFX9-NEXT: v_or_b32_e32 v1, v2, v4
|
|
; GFX9-NEXT: ; return to shader part epilog
|
|
;
|
|
; GFX10-LABEL: v_fshr_i64_ssv:
|
|
; GFX10: ; %bb.0:
|
|
; GFX10-NEXT: v_not_b32_e32 v1, v0
|
|
; GFX10-NEXT: v_and_b32_e32 v0, 63, v0
|
|
; GFX10-NEXT: s_lshl_b64 s[0:1], s[0:1], 1
|
|
; GFX10-NEXT: v_and_b32_e32 v2, 63, v1
|
|
; GFX10-NEXT: v_lshrrev_b64 v[0:1], v0, s[2:3]
|
|
; GFX10-NEXT: v_lshlrev_b64 v[2:3], v2, s[0:1]
|
|
; GFX10-NEXT: v_or_b32_e32 v0, v2, v0
|
|
; GFX10-NEXT: v_or_b32_e32 v1, v3, v1
|
|
; GFX10-NEXT: ; return to shader part epilog
|
|
;
|
|
; GFX11-LABEL: v_fshr_i64_ssv:
|
|
; GFX11: ; %bb.0:
|
|
; GFX11-NEXT: v_not_b32_e32 v1, v0
|
|
; GFX11-NEXT: v_and_b32_e32 v0, 63, v0
|
|
; GFX11-NEXT: s_lshl_b64 s[0:1], s[0:1], 1
|
|
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
|
|
; GFX11-NEXT: v_and_b32_e32 v2, 63, v1
|
|
; GFX11-NEXT: v_lshrrev_b64 v[0:1], v0, s[2:3]
|
|
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
|
|
; GFX11-NEXT: v_lshlrev_b64 v[2:3], v2, s[0:1]
|
|
; GFX11-NEXT: v_or_b32_e32 v0, v2, v0
|
|
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2)
|
|
; GFX11-NEXT: v_or_b32_e32 v1, v3, v1
|
|
; GFX11-NEXT: ; return to shader part epilog
|
|
%result = call i64 @llvm.fshr.i64(i64 %lhs, i64 %rhs, i64 %amt)
|
|
%cast = bitcast i64 %result to <2 x float>
|
|
ret <2 x float> %cast
|
|
}
|
|
|
|
define amdgpu_ps <2 x float> @v_fshr_i64_svs(i64 inreg %lhs, i64 %rhs, i64 inreg %amt) {
|
|
; GFX6-LABEL: v_fshr_i64_svs:
|
|
; GFX6: ; %bb.0:
|
|
; GFX6-NEXT: s_not_b32 s3, s2
|
|
; GFX6-NEXT: s_and_b32 s2, s2, 63
|
|
; GFX6-NEXT: s_lshl_b64 s[0:1], s[0:1], 1
|
|
; GFX6-NEXT: v_lshr_b64 v[0:1], v[0:1], s2
|
|
; GFX6-NEXT: s_lshl_b64 s[0:1], s[0:1], s3
|
|
; GFX6-NEXT: v_or_b32_e32 v0, s0, v0
|
|
; GFX6-NEXT: v_or_b32_e32 v1, s1, v1
|
|
; GFX6-NEXT: ; return to shader part epilog
|
|
;
|
|
; GFX8-LABEL: v_fshr_i64_svs:
|
|
; GFX8: ; %bb.0:
|
|
; GFX8-NEXT: s_not_b32 s3, s2
|
|
; GFX8-NEXT: s_and_b32 s2, s2, 63
|
|
; GFX8-NEXT: s_lshl_b64 s[0:1], s[0:1], 1
|
|
; GFX8-NEXT: v_lshrrev_b64 v[0:1], s2, v[0:1]
|
|
; GFX8-NEXT: s_lshl_b64 s[0:1], s[0:1], s3
|
|
; GFX8-NEXT: v_or_b32_e32 v0, s0, v0
|
|
; GFX8-NEXT: v_or_b32_e32 v1, s1, v1
|
|
; GFX8-NEXT: ; return to shader part epilog
|
|
;
|
|
; GFX9-LABEL: v_fshr_i64_svs:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_not_b32 s3, s2
|
|
; GFX9-NEXT: s_and_b32 s2, s2, 63
|
|
; GFX9-NEXT: s_lshl_b64 s[0:1], s[0:1], 1
|
|
; GFX9-NEXT: v_lshrrev_b64 v[0:1], s2, v[0:1]
|
|
; GFX9-NEXT: s_lshl_b64 s[0:1], s[0:1], s3
|
|
; GFX9-NEXT: v_or_b32_e32 v0, s0, v0
|
|
; GFX9-NEXT: v_or_b32_e32 v1, s1, v1
|
|
; GFX9-NEXT: ; return to shader part epilog
|
|
;
|
|
; GFX10-LABEL: v_fshr_i64_svs:
|
|
; GFX10: ; %bb.0:
|
|
; GFX10-NEXT: s_and_b32 s3, s2, 63
|
|
; GFX10-NEXT: s_lshl_b64 s[0:1], s[0:1], 1
|
|
; GFX10-NEXT: v_lshrrev_b64 v[0:1], s3, v[0:1]
|
|
; GFX10-NEXT: s_not_b32 s2, s2
|
|
; GFX10-NEXT: s_lshl_b64 s[0:1], s[0:1], s2
|
|
; GFX10-NEXT: v_or_b32_e32 v0, s0, v0
|
|
; GFX10-NEXT: v_or_b32_e32 v1, s1, v1
|
|
; GFX10-NEXT: ; return to shader part epilog
|
|
;
|
|
; GFX11-LABEL: v_fshr_i64_svs:
|
|
; GFX11: ; %bb.0:
|
|
; GFX11-NEXT: s_and_b32 s3, s2, 63
|
|
; GFX11-NEXT: s_lshl_b64 s[0:1], s[0:1], 1
|
|
; GFX11-NEXT: v_lshrrev_b64 v[0:1], s3, v[0:1]
|
|
; GFX11-NEXT: s_not_b32 s2, s2
|
|
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
|
|
; GFX11-NEXT: s_lshl_b64 s[0:1], s[0:1], s2
|
|
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
|
|
; GFX11-NEXT: v_or_b32_e32 v0, s0, v0
|
|
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2)
|
|
; GFX11-NEXT: v_or_b32_e32 v1, s1, v1
|
|
; GFX11-NEXT: ; return to shader part epilog
|
|
%result = call i64 @llvm.fshr.i64(i64 %lhs, i64 %rhs, i64 %amt)
|
|
%cast = bitcast i64 %result to <2 x float>
|
|
ret <2 x float> %cast
|
|
}
|
|
|
|
define amdgpu_ps <2 x float> @v_fshr_i64_vss(i64 %lhs, i64 inreg %rhs, i64 inreg %amt) {
|
|
; GFX6-LABEL: v_fshr_i64_vss:
|
|
; GFX6: ; %bb.0:
|
|
; GFX6-NEXT: v_lshl_b64 v[0:1], v[0:1], 1
|
|
; GFX6-NEXT: s_andn2_b32 s3, 63, s2
|
|
; GFX6-NEXT: v_lshl_b64 v[0:1], v[0:1], s3
|
|
; GFX6-NEXT: s_lshr_b64 s[0:1], s[0:1], s2
|
|
; GFX6-NEXT: v_or_b32_e32 v0, s0, v0
|
|
; GFX6-NEXT: v_or_b32_e32 v1, s1, v1
|
|
; GFX6-NEXT: ; return to shader part epilog
|
|
;
|
|
; GFX8-LABEL: v_fshr_i64_vss:
|
|
; GFX8: ; %bb.0:
|
|
; GFX8-NEXT: v_lshlrev_b64 v[0:1], 1, v[0:1]
|
|
; GFX8-NEXT: s_andn2_b32 s3, 63, s2
|
|
; GFX8-NEXT: v_lshlrev_b64 v[0:1], s3, v[0:1]
|
|
; GFX8-NEXT: s_lshr_b64 s[0:1], s[0:1], s2
|
|
; GFX8-NEXT: v_or_b32_e32 v0, s0, v0
|
|
; GFX8-NEXT: v_or_b32_e32 v1, s1, v1
|
|
; GFX8-NEXT: ; return to shader part epilog
|
|
;
|
|
; GFX9-LABEL: v_fshr_i64_vss:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: v_lshlrev_b64 v[0:1], 1, v[0:1]
|
|
; GFX9-NEXT: s_andn2_b32 s3, 63, s2
|
|
; GFX9-NEXT: v_lshlrev_b64 v[0:1], s3, v[0:1]
|
|
; GFX9-NEXT: s_lshr_b64 s[0:1], s[0:1], s2
|
|
; GFX9-NEXT: v_or_b32_e32 v0, s0, v0
|
|
; GFX9-NEXT: v_or_b32_e32 v1, s1, v1
|
|
; GFX9-NEXT: ; return to shader part epilog
|
|
;
|
|
; GFX10-LABEL: v_fshr_i64_vss:
|
|
; GFX10: ; %bb.0:
|
|
; GFX10-NEXT: v_lshlrev_b64 v[0:1], 1, v[0:1]
|
|
; GFX10-NEXT: s_andn2_b32 s3, 63, s2
|
|
; GFX10-NEXT: s_lshr_b64 s[0:1], s[0:1], s2
|
|
; GFX10-NEXT: v_lshlrev_b64 v[0:1], s3, v[0:1]
|
|
; GFX10-NEXT: v_or_b32_e32 v0, s0, v0
|
|
; GFX10-NEXT: v_or_b32_e32 v1, s1, v1
|
|
; GFX10-NEXT: ; return to shader part epilog
|
|
;
|
|
; GFX11-LABEL: v_fshr_i64_vss:
|
|
; GFX11: ; %bb.0:
|
|
; GFX11-NEXT: v_lshlrev_b64 v[0:1], 1, v[0:1]
|
|
; GFX11-NEXT: s_and_not1_b32 s3, 63, s2
|
|
; GFX11-NEXT: s_lshr_b64 s[0:1], s[0:1], s2
|
|
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
|
|
; GFX11-NEXT: v_lshlrev_b64 v[0:1], s3, v[0:1]
|
|
; GFX11-NEXT: v_or_b32_e32 v0, s0, v0
|
|
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2)
|
|
; GFX11-NEXT: v_or_b32_e32 v1, s1, v1
|
|
; GFX11-NEXT: ; return to shader part epilog
|
|
%result = call i64 @llvm.fshr.i64(i64 %lhs, i64 %rhs, i64 %amt)
|
|
%cast = bitcast i64 %result to <2 x float>
|
|
ret <2 x float> %cast
|
|
}
|
|
|
|
define amdgpu_ps <2 x i64> @s_fshr_v2i64(<2 x i64> inreg %lhs, <2 x i64> inreg %rhs, <2 x i64> inreg %amt) {
|
|
; GFX6-LABEL: s_fshr_v2i64:
|
|
; GFX6: ; %bb.0:
|
|
; GFX6-NEXT: s_lshl_b64 s[0:1], s[0:1], 1
|
|
; GFX6-NEXT: s_not_b32 s9, s8
|
|
; GFX6-NEXT: s_lshl_b64 s[0:1], s[0:1], s9
|
|
; GFX6-NEXT: s_lshr_b64 s[4:5], s[4:5], s8
|
|
; GFX6-NEXT: s_or_b64 s[0:1], s[0:1], s[4:5]
|
|
; GFX6-NEXT: s_lshl_b64 s[2:3], s[2:3], 1
|
|
; GFX6-NEXT: s_not_b32 s4, s10
|
|
; GFX6-NEXT: s_lshl_b64 s[2:3], s[2:3], s4
|
|
; GFX6-NEXT: s_lshr_b64 s[4:5], s[6:7], s10
|
|
; GFX6-NEXT: s_or_b64 s[2:3], s[2:3], s[4:5]
|
|
; GFX6-NEXT: ; return to shader part epilog
|
|
;
|
|
; GFX8-LABEL: s_fshr_v2i64:
|
|
; GFX8: ; %bb.0:
|
|
; GFX8-NEXT: s_lshl_b64 s[0:1], s[0:1], 1
|
|
; GFX8-NEXT: s_not_b32 s9, s8
|
|
; GFX8-NEXT: s_lshl_b64 s[0:1], s[0:1], s9
|
|
; GFX8-NEXT: s_lshr_b64 s[4:5], s[4:5], s8
|
|
; GFX8-NEXT: s_or_b64 s[0:1], s[0:1], s[4:5]
|
|
; GFX8-NEXT: s_lshl_b64 s[2:3], s[2:3], 1
|
|
; GFX8-NEXT: s_not_b32 s4, s10
|
|
; GFX8-NEXT: s_lshl_b64 s[2:3], s[2:3], s4
|
|
; GFX8-NEXT: s_lshr_b64 s[4:5], s[6:7], s10
|
|
; GFX8-NEXT: s_or_b64 s[2:3], s[2:3], s[4:5]
|
|
; GFX8-NEXT: ; return to shader part epilog
|
|
;
|
|
; GFX9-LABEL: s_fshr_v2i64:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_lshl_b64 s[0:1], s[0:1], 1
|
|
; GFX9-NEXT: s_not_b32 s9, s8
|
|
; GFX9-NEXT: s_lshl_b64 s[0:1], s[0:1], s9
|
|
; GFX9-NEXT: s_lshr_b64 s[4:5], s[4:5], s8
|
|
; GFX9-NEXT: s_or_b64 s[0:1], s[0:1], s[4:5]
|
|
; GFX9-NEXT: s_lshl_b64 s[2:3], s[2:3], 1
|
|
; GFX9-NEXT: s_not_b32 s4, s10
|
|
; GFX9-NEXT: s_lshl_b64 s[2:3], s[2:3], s4
|
|
; GFX9-NEXT: s_lshr_b64 s[4:5], s[6:7], s10
|
|
; GFX9-NEXT: s_or_b64 s[2:3], s[2:3], s[4:5]
|
|
; GFX9-NEXT: ; return to shader part epilog
|
|
;
|
|
; GFX10-LABEL: s_fshr_v2i64:
|
|
; GFX10: ; %bb.0:
|
|
; GFX10-NEXT: s_lshl_b64 s[0:1], s[0:1], 1
|
|
; GFX10-NEXT: s_not_b32 s9, s8
|
|
; GFX10-NEXT: s_lshl_b64 s[2:3], s[2:3], 1
|
|
; GFX10-NEXT: s_lshl_b64 s[0:1], s[0:1], s9
|
|
; GFX10-NEXT: s_not_b32 s9, s10
|
|
; GFX10-NEXT: s_lshr_b64 s[4:5], s[4:5], s8
|
|
; GFX10-NEXT: s_lshl_b64 s[2:3], s[2:3], s9
|
|
; GFX10-NEXT: s_lshr_b64 s[6:7], s[6:7], s10
|
|
; GFX10-NEXT: s_or_b64 s[0:1], s[0:1], s[4:5]
|
|
; GFX10-NEXT: s_or_b64 s[2:3], s[2:3], s[6:7]
|
|
; GFX10-NEXT: ; return to shader part epilog
|
|
;
|
|
; GFX11-LABEL: s_fshr_v2i64:
|
|
; GFX11: ; %bb.0:
|
|
; GFX11-NEXT: s_lshl_b64 s[0:1], s[0:1], 1
|
|
; GFX11-NEXT: s_not_b32 s9, s8
|
|
; GFX11-NEXT: s_lshl_b64 s[2:3], s[2:3], 1
|
|
; GFX11-NEXT: s_lshl_b64 s[0:1], s[0:1], s9
|
|
; GFX11-NEXT: s_not_b32 s9, s10
|
|
; GFX11-NEXT: s_lshr_b64 s[4:5], s[4:5], s8
|
|
; GFX11-NEXT: s_lshl_b64 s[2:3], s[2:3], s9
|
|
; GFX11-NEXT: s_lshr_b64 s[6:7], s[6:7], s10
|
|
; GFX11-NEXT: s_or_b64 s[0:1], s[0:1], s[4:5]
|
|
; GFX11-NEXT: s_or_b64 s[2:3], s[2:3], s[6:7]
|
|
; GFX11-NEXT: ; return to shader part epilog
|
|
%result = call <2 x i64> @llvm.fshr.v2i64(<2 x i64> %lhs, <2 x i64> %rhs, <2 x i64> %amt)
|
|
ret <2 x i64> %result
|
|
}
|
|
|
|
define <2 x i64> @v_fshr_v2i64(<2 x i64> %lhs, <2 x i64> %rhs, <2 x i64> %amt) {
|
|
; GFX6-LABEL: v_fshr_v2i64:
|
|
; GFX6: ; %bb.0:
|
|
; GFX6-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX6-NEXT: v_lshl_b64 v[0:1], v[0:1], 1
|
|
; GFX6-NEXT: v_not_b32_e32 v9, v8
|
|
; GFX6-NEXT: v_and_b32_e32 v9, 63, v9
|
|
; GFX6-NEXT: v_and_b32_e32 v8, 63, v8
|
|
; GFX6-NEXT: v_lshl_b64 v[0:1], v[0:1], v9
|
|
; GFX6-NEXT: v_lshr_b64 v[4:5], v[4:5], v8
|
|
; GFX6-NEXT: v_lshl_b64 v[2:3], v[2:3], 1
|
|
; GFX6-NEXT: v_or_b32_e32 v0, v0, v4
|
|
; GFX6-NEXT: v_not_b32_e32 v4, v10
|
|
; GFX6-NEXT: v_and_b32_e32 v4, 63, v4
|
|
; GFX6-NEXT: v_lshl_b64 v[2:3], v[2:3], v4
|
|
; GFX6-NEXT: v_and_b32_e32 v4, 63, v10
|
|
; GFX6-NEXT: v_lshr_b64 v[6:7], v[6:7], v4
|
|
; GFX6-NEXT: v_or_b32_e32 v1, v1, v5
|
|
; GFX6-NEXT: v_or_b32_e32 v2, v2, v6
|
|
; GFX6-NEXT: v_or_b32_e32 v3, v3, v7
|
|
; GFX6-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX8-LABEL: v_fshr_v2i64:
|
|
; GFX8: ; %bb.0:
|
|
; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX8-NEXT: v_lshlrev_b64 v[0:1], 1, v[0:1]
|
|
; GFX8-NEXT: v_not_b32_e32 v9, v8
|
|
; GFX8-NEXT: v_and_b32_e32 v9, 63, v9
|
|
; GFX8-NEXT: v_and_b32_e32 v8, 63, v8
|
|
; GFX8-NEXT: v_lshlrev_b64 v[0:1], v9, v[0:1]
|
|
; GFX8-NEXT: v_lshrrev_b64 v[4:5], v8, v[4:5]
|
|
; GFX8-NEXT: v_lshlrev_b64 v[2:3], 1, v[2:3]
|
|
; GFX8-NEXT: v_or_b32_e32 v0, v0, v4
|
|
; GFX8-NEXT: v_not_b32_e32 v4, v10
|
|
; GFX8-NEXT: v_and_b32_e32 v4, 63, v4
|
|
; GFX8-NEXT: v_lshlrev_b64 v[2:3], v4, v[2:3]
|
|
; GFX8-NEXT: v_and_b32_e32 v4, 63, v10
|
|
; GFX8-NEXT: v_lshrrev_b64 v[6:7], v4, v[6:7]
|
|
; GFX8-NEXT: v_or_b32_e32 v1, v1, v5
|
|
; GFX8-NEXT: v_or_b32_e32 v2, v2, v6
|
|
; GFX8-NEXT: v_or_b32_e32 v3, v3, v7
|
|
; GFX8-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX9-LABEL: v_fshr_v2i64:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: v_lshlrev_b64 v[0:1], 1, v[0:1]
|
|
; GFX9-NEXT: v_not_b32_e32 v9, v8
|
|
; GFX9-NEXT: v_and_b32_e32 v9, 63, v9
|
|
; GFX9-NEXT: v_and_b32_e32 v8, 63, v8
|
|
; GFX9-NEXT: v_lshlrev_b64 v[0:1], v9, v[0:1]
|
|
; GFX9-NEXT: v_lshrrev_b64 v[4:5], v8, v[4:5]
|
|
; GFX9-NEXT: v_lshlrev_b64 v[2:3], 1, v[2:3]
|
|
; GFX9-NEXT: v_or_b32_e32 v0, v0, v4
|
|
; GFX9-NEXT: v_not_b32_e32 v4, v10
|
|
; GFX9-NEXT: v_and_b32_e32 v4, 63, v4
|
|
; GFX9-NEXT: v_lshlrev_b64 v[2:3], v4, v[2:3]
|
|
; GFX9-NEXT: v_and_b32_e32 v4, 63, v10
|
|
; GFX9-NEXT: v_lshrrev_b64 v[6:7], v4, v[6:7]
|
|
; GFX9-NEXT: v_or_b32_e32 v1, v1, v5
|
|
; GFX9-NEXT: v_or_b32_e32 v2, v2, v6
|
|
; GFX9-NEXT: v_or_b32_e32 v3, v3, v7
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX10-LABEL: v_fshr_v2i64:
|
|
; GFX10: ; %bb.0:
|
|
; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX10-NEXT: v_not_b32_e32 v9, v8
|
|
; GFX10-NEXT: v_not_b32_e32 v11, v10
|
|
; GFX10-NEXT: v_lshlrev_b64 v[0:1], 1, v[0:1]
|
|
; GFX10-NEXT: v_lshlrev_b64 v[2:3], 1, v[2:3]
|
|
; GFX10-NEXT: v_and_b32_e32 v8, 63, v8
|
|
; GFX10-NEXT: v_and_b32_e32 v9, 63, v9
|
|
; GFX10-NEXT: v_and_b32_e32 v11, 63, v11
|
|
; GFX10-NEXT: v_and_b32_e32 v10, 63, v10
|
|
; GFX10-NEXT: v_lshrrev_b64 v[4:5], v8, v[4:5]
|
|
; GFX10-NEXT: v_lshlrev_b64 v[0:1], v9, v[0:1]
|
|
; GFX10-NEXT: v_lshlrev_b64 v[2:3], v11, v[2:3]
|
|
; GFX10-NEXT: v_lshrrev_b64 v[6:7], v10, v[6:7]
|
|
; GFX10-NEXT: v_or_b32_e32 v0, v0, v4
|
|
; GFX10-NEXT: v_or_b32_e32 v1, v1, v5
|
|
; GFX10-NEXT: v_or_b32_e32 v2, v2, v6
|
|
; GFX10-NEXT: v_or_b32_e32 v3, v3, v7
|
|
; GFX10-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX11-LABEL: v_fshr_v2i64:
|
|
; GFX11: ; %bb.0:
|
|
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX11-NEXT: v_not_b32_e32 v9, v8
|
|
; GFX11-NEXT: v_not_b32_e32 v11, v10
|
|
; GFX11-NEXT: v_lshlrev_b64 v[0:1], 1, v[0:1]
|
|
; GFX11-NEXT: v_lshlrev_b64 v[2:3], 1, v[2:3]
|
|
; GFX11-NEXT: v_and_b32_e32 v8, 63, v8
|
|
; GFX11-NEXT: v_and_b32_e32 v9, 63, v9
|
|
; GFX11-NEXT: v_and_b32_e32 v11, 63, v11
|
|
; GFX11-NEXT: v_and_b32_e32 v10, 63, v10
|
|
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
|
|
; GFX11-NEXT: v_lshrrev_b64 v[4:5], v8, v[4:5]
|
|
; GFX11-NEXT: v_lshlrev_b64 v[0:1], v9, v[0:1]
|
|
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
|
|
; GFX11-NEXT: v_lshlrev_b64 v[2:3], v11, v[2:3]
|
|
; GFX11-NEXT: v_lshrrev_b64 v[6:7], v10, v[6:7]
|
|
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_4)
|
|
; GFX11-NEXT: v_or_b32_e32 v0, v0, v4
|
|
; GFX11-NEXT: v_or_b32_e32 v1, v1, v5
|
|
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_4)
|
|
; GFX11-NEXT: v_or_b32_e32 v2, v2, v6
|
|
; GFX11-NEXT: v_or_b32_e32 v3, v3, v7
|
|
; GFX11-NEXT: s_setpc_b64 s[30:31]
|
|
%result = call <2 x i64> @llvm.fshr.v2i64(<2 x i64> %lhs, <2 x i64> %rhs, <2 x i64> %amt)
|
|
ret <2 x i64> %result
|
|
}
|
|
|
|
define amdgpu_ps i128 @s_fshr_i128(i128 inreg %lhs, i128 inreg %rhs, i128 inreg %amt) {
|
|
; GFX6-LABEL: s_fshr_i128:
|
|
; GFX6: ; %bb.0:
|
|
; GFX6-NEXT: s_lshl_b64 s[10:11], s[0:1], 1
|
|
; GFX6-NEXT: s_lshl_b64 s[2:3], s[2:3], 1
|
|
; GFX6-NEXT: s_lshr_b32 s0, s1, 31
|
|
; GFX6-NEXT: s_mov_b32 s1, 0
|
|
; GFX6-NEXT: s_or_b64 s[0:1], s[2:3], s[0:1]
|
|
; GFX6-NEXT: s_andn2_b32 s2, 0x7f, s8
|
|
; GFX6-NEXT: s_not_b32 s9, s8
|
|
; GFX6-NEXT: s_sub_i32 s16, s2, 64
|
|
; GFX6-NEXT: s_sub_i32 s12, 64, s2
|
|
; GFX6-NEXT: s_cmp_lt_u32 s2, 64
|
|
; GFX6-NEXT: s_cselect_b32 s17, 1, 0
|
|
; GFX6-NEXT: s_cmp_eq_u32 s2, 0
|
|
; GFX6-NEXT: s_cselect_b32 s18, 1, 0
|
|
; GFX6-NEXT: s_lshr_b64 s[12:13], s[10:11], s12
|
|
; GFX6-NEXT: s_lshl_b64 s[14:15], s[0:1], s9
|
|
; GFX6-NEXT: s_lshl_b64 s[2:3], s[10:11], s9
|
|
; GFX6-NEXT: s_or_b64 s[12:13], s[12:13], s[14:15]
|
|
; GFX6-NEXT: s_lshl_b64 s[10:11], s[10:11], s16
|
|
; GFX6-NEXT: s_cmp_lg_u32 s17, 0
|
|
; GFX6-NEXT: s_cselect_b64 s[2:3], s[2:3], 0
|
|
; GFX6-NEXT: s_cselect_b64 s[10:11], s[12:13], s[10:11]
|
|
; GFX6-NEXT: s_cmp_lg_u32 s18, 0
|
|
; GFX6-NEXT: s_cselect_b64 s[10:11], s[0:1], s[10:11]
|
|
; GFX6-NEXT: s_and_b32 s0, s8, 0x7f
|
|
; GFX6-NEXT: s_sub_i32 s14, s0, 64
|
|
; GFX6-NEXT: s_sub_i32 s12, 64, s0
|
|
; GFX6-NEXT: s_cmp_lt_u32 s0, 64
|
|
; GFX6-NEXT: s_cselect_b32 s15, 1, 0
|
|
; GFX6-NEXT: s_cmp_eq_u32 s0, 0
|
|
; GFX6-NEXT: s_cselect_b32 s16, 1, 0
|
|
; GFX6-NEXT: s_lshr_b64 s[0:1], s[6:7], s8
|
|
; GFX6-NEXT: s_lshr_b64 s[8:9], s[4:5], s8
|
|
; GFX6-NEXT: s_lshl_b64 s[12:13], s[6:7], s12
|
|
; GFX6-NEXT: s_or_b64 s[8:9], s[8:9], s[12:13]
|
|
; GFX6-NEXT: s_lshr_b64 s[6:7], s[6:7], s14
|
|
; GFX6-NEXT: s_cmp_lg_u32 s15, 0
|
|
; GFX6-NEXT: s_cselect_b64 s[6:7], s[8:9], s[6:7]
|
|
; GFX6-NEXT: s_cmp_lg_u32 s16, 0
|
|
; GFX6-NEXT: s_cselect_b64 s[4:5], s[4:5], s[6:7]
|
|
; GFX6-NEXT: s_cmp_lg_u32 s15, 0
|
|
; GFX6-NEXT: s_cselect_b64 s[6:7], s[0:1], 0
|
|
; GFX6-NEXT: s_or_b64 s[0:1], s[2:3], s[4:5]
|
|
; GFX6-NEXT: s_or_b64 s[2:3], s[10:11], s[6:7]
|
|
; GFX6-NEXT: ; return to shader part epilog
|
|
;
|
|
; GFX8-LABEL: s_fshr_i128:
|
|
; GFX8: ; %bb.0:
|
|
; GFX8-NEXT: s_lshl_b64 s[10:11], s[0:1], 1
|
|
; GFX8-NEXT: s_lshl_b64 s[2:3], s[2:3], 1
|
|
; GFX8-NEXT: s_lshr_b32 s0, s1, 31
|
|
; GFX8-NEXT: s_mov_b32 s1, 0
|
|
; GFX8-NEXT: s_or_b64 s[0:1], s[2:3], s[0:1]
|
|
; GFX8-NEXT: s_andn2_b32 s2, 0x7f, s8
|
|
; GFX8-NEXT: s_not_b32 s9, s8
|
|
; GFX8-NEXT: s_sub_i32 s16, s2, 64
|
|
; GFX8-NEXT: s_sub_i32 s12, 64, s2
|
|
; GFX8-NEXT: s_cmp_lt_u32 s2, 64
|
|
; GFX8-NEXT: s_cselect_b32 s17, 1, 0
|
|
; GFX8-NEXT: s_cmp_eq_u32 s2, 0
|
|
; GFX8-NEXT: s_cselect_b32 s18, 1, 0
|
|
; GFX8-NEXT: s_lshr_b64 s[12:13], s[10:11], s12
|
|
; GFX8-NEXT: s_lshl_b64 s[14:15], s[0:1], s9
|
|
; GFX8-NEXT: s_lshl_b64 s[2:3], s[10:11], s9
|
|
; GFX8-NEXT: s_or_b64 s[12:13], s[12:13], s[14:15]
|
|
; GFX8-NEXT: s_lshl_b64 s[10:11], s[10:11], s16
|
|
; GFX8-NEXT: s_cmp_lg_u32 s17, 0
|
|
; GFX8-NEXT: s_cselect_b64 s[2:3], s[2:3], 0
|
|
; GFX8-NEXT: s_cselect_b64 s[10:11], s[12:13], s[10:11]
|
|
; GFX8-NEXT: s_cmp_lg_u32 s18, 0
|
|
; GFX8-NEXT: s_cselect_b64 s[10:11], s[0:1], s[10:11]
|
|
; GFX8-NEXT: s_and_b32 s0, s8, 0x7f
|
|
; GFX8-NEXT: s_sub_i32 s14, s0, 64
|
|
; GFX8-NEXT: s_sub_i32 s12, 64, s0
|
|
; GFX8-NEXT: s_cmp_lt_u32 s0, 64
|
|
; GFX8-NEXT: s_cselect_b32 s15, 1, 0
|
|
; GFX8-NEXT: s_cmp_eq_u32 s0, 0
|
|
; GFX8-NEXT: s_cselect_b32 s16, 1, 0
|
|
; GFX8-NEXT: s_lshr_b64 s[0:1], s[6:7], s8
|
|
; GFX8-NEXT: s_lshr_b64 s[8:9], s[4:5], s8
|
|
; GFX8-NEXT: s_lshl_b64 s[12:13], s[6:7], s12
|
|
; GFX8-NEXT: s_or_b64 s[8:9], s[8:9], s[12:13]
|
|
; GFX8-NEXT: s_lshr_b64 s[6:7], s[6:7], s14
|
|
; GFX8-NEXT: s_cmp_lg_u32 s15, 0
|
|
; GFX8-NEXT: s_cselect_b64 s[6:7], s[8:9], s[6:7]
|
|
; GFX8-NEXT: s_cmp_lg_u32 s16, 0
|
|
; GFX8-NEXT: s_cselect_b64 s[4:5], s[4:5], s[6:7]
|
|
; GFX8-NEXT: s_cmp_lg_u32 s15, 0
|
|
; GFX8-NEXT: s_cselect_b64 s[6:7], s[0:1], 0
|
|
; GFX8-NEXT: s_or_b64 s[0:1], s[2:3], s[4:5]
|
|
; GFX8-NEXT: s_or_b64 s[2:3], s[10:11], s[6:7]
|
|
; GFX8-NEXT: ; return to shader part epilog
|
|
;
|
|
; GFX9-LABEL: s_fshr_i128:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_lshl_b64 s[10:11], s[0:1], 1
|
|
; GFX9-NEXT: s_lshl_b64 s[2:3], s[2:3], 1
|
|
; GFX9-NEXT: s_lshr_b32 s0, s1, 31
|
|
; GFX9-NEXT: s_mov_b32 s1, 0
|
|
; GFX9-NEXT: s_or_b64 s[0:1], s[2:3], s[0:1]
|
|
; GFX9-NEXT: s_andn2_b32 s2, 0x7f, s8
|
|
; GFX9-NEXT: s_not_b32 s9, s8
|
|
; GFX9-NEXT: s_sub_i32 s16, s2, 64
|
|
; GFX9-NEXT: s_sub_i32 s12, 64, s2
|
|
; GFX9-NEXT: s_cmp_lt_u32 s2, 64
|
|
; GFX9-NEXT: s_cselect_b32 s17, 1, 0
|
|
; GFX9-NEXT: s_cmp_eq_u32 s2, 0
|
|
; GFX9-NEXT: s_cselect_b32 s18, 1, 0
|
|
; GFX9-NEXT: s_lshr_b64 s[12:13], s[10:11], s12
|
|
; GFX9-NEXT: s_lshl_b64 s[14:15], s[0:1], s9
|
|
; GFX9-NEXT: s_lshl_b64 s[2:3], s[10:11], s9
|
|
; GFX9-NEXT: s_or_b64 s[12:13], s[12:13], s[14:15]
|
|
; GFX9-NEXT: s_lshl_b64 s[10:11], s[10:11], s16
|
|
; GFX9-NEXT: s_cmp_lg_u32 s17, 0
|
|
; GFX9-NEXT: s_cselect_b64 s[2:3], s[2:3], 0
|
|
; GFX9-NEXT: s_cselect_b64 s[10:11], s[12:13], s[10:11]
|
|
; GFX9-NEXT: s_cmp_lg_u32 s18, 0
|
|
; GFX9-NEXT: s_cselect_b64 s[10:11], s[0:1], s[10:11]
|
|
; GFX9-NEXT: s_and_b32 s0, s8, 0x7f
|
|
; GFX9-NEXT: s_sub_i32 s14, s0, 64
|
|
; GFX9-NEXT: s_sub_i32 s12, 64, s0
|
|
; GFX9-NEXT: s_cmp_lt_u32 s0, 64
|
|
; GFX9-NEXT: s_cselect_b32 s15, 1, 0
|
|
; GFX9-NEXT: s_cmp_eq_u32 s0, 0
|
|
; GFX9-NEXT: s_cselect_b32 s16, 1, 0
|
|
; GFX9-NEXT: s_lshr_b64 s[0:1], s[6:7], s8
|
|
; GFX9-NEXT: s_lshr_b64 s[8:9], s[4:5], s8
|
|
; GFX9-NEXT: s_lshl_b64 s[12:13], s[6:7], s12
|
|
; GFX9-NEXT: s_or_b64 s[8:9], s[8:9], s[12:13]
|
|
; GFX9-NEXT: s_lshr_b64 s[6:7], s[6:7], s14
|
|
; GFX9-NEXT: s_cmp_lg_u32 s15, 0
|
|
; GFX9-NEXT: s_cselect_b64 s[6:7], s[8:9], s[6:7]
|
|
; GFX9-NEXT: s_cmp_lg_u32 s16, 0
|
|
; GFX9-NEXT: s_cselect_b64 s[4:5], s[4:5], s[6:7]
|
|
; GFX9-NEXT: s_cmp_lg_u32 s15, 0
|
|
; GFX9-NEXT: s_cselect_b64 s[6:7], s[0:1], 0
|
|
; GFX9-NEXT: s_or_b64 s[0:1], s[2:3], s[4:5]
|
|
; GFX9-NEXT: s_or_b64 s[2:3], s[10:11], s[6:7]
|
|
; GFX9-NEXT: ; return to shader part epilog
|
|
;
|
|
; GFX10-LABEL: s_fshr_i128:
|
|
; GFX10: ; %bb.0:
|
|
; GFX10-NEXT: s_lshl_b64 s[2:3], s[2:3], 1
|
|
; GFX10-NEXT: s_lshr_b32 s10, s1, 31
|
|
; GFX10-NEXT: s_mov_b32 s11, 0
|
|
; GFX10-NEXT: s_andn2_b32 s9, 0x7f, s8
|
|
; GFX10-NEXT: s_lshl_b64 s[0:1], s[0:1], 1
|
|
; GFX10-NEXT: s_or_b64 s[2:3], s[2:3], s[10:11]
|
|
; GFX10-NEXT: s_not_b32 s14, s8
|
|
; GFX10-NEXT: s_sub_i32 s16, s9, 64
|
|
; GFX10-NEXT: s_sub_i32 s10, 64, s9
|
|
; GFX10-NEXT: s_cmp_lt_u32 s9, 64
|
|
; GFX10-NEXT: s_cselect_b32 s17, 1, 0
|
|
; GFX10-NEXT: s_cmp_eq_u32 s9, 0
|
|
; GFX10-NEXT: s_cselect_b32 s9, 1, 0
|
|
; GFX10-NEXT: s_lshr_b64 s[10:11], s[0:1], s10
|
|
; GFX10-NEXT: s_lshl_b64 s[12:13], s[2:3], s14
|
|
; GFX10-NEXT: s_lshl_b64 s[14:15], s[0:1], s14
|
|
; GFX10-NEXT: s_or_b64 s[10:11], s[10:11], s[12:13]
|
|
; GFX10-NEXT: s_lshl_b64 s[0:1], s[0:1], s16
|
|
; GFX10-NEXT: s_cmp_lg_u32 s17, 0
|
|
; GFX10-NEXT: s_cselect_b64 s[12:13], s[14:15], 0
|
|
; GFX10-NEXT: s_cselect_b64 s[0:1], s[10:11], s[0:1]
|
|
; GFX10-NEXT: s_cmp_lg_u32 s9, 0
|
|
; GFX10-NEXT: s_cselect_b64 s[2:3], s[2:3], s[0:1]
|
|
; GFX10-NEXT: s_and_b32 s0, s8, 0x7f
|
|
; GFX10-NEXT: s_sub_i32 s14, s0, 64
|
|
; GFX10-NEXT: s_sub_i32 s9, 64, s0
|
|
; GFX10-NEXT: s_cmp_lt_u32 s0, 64
|
|
; GFX10-NEXT: s_cselect_b32 s15, 1, 0
|
|
; GFX10-NEXT: s_cmp_eq_u32 s0, 0
|
|
; GFX10-NEXT: s_cselect_b32 s16, 1, 0
|
|
; GFX10-NEXT: s_lshr_b64 s[0:1], s[4:5], s8
|
|
; GFX10-NEXT: s_lshl_b64 s[10:11], s[6:7], s9
|
|
; GFX10-NEXT: s_lshr_b64 s[8:9], s[6:7], s8
|
|
; GFX10-NEXT: s_or_b64 s[0:1], s[0:1], s[10:11]
|
|
; GFX10-NEXT: s_lshr_b64 s[6:7], s[6:7], s14
|
|
; GFX10-NEXT: s_cmp_lg_u32 s15, 0
|
|
; GFX10-NEXT: s_cselect_b64 s[0:1], s[0:1], s[6:7]
|
|
; GFX10-NEXT: s_cmp_lg_u32 s16, 0
|
|
; GFX10-NEXT: s_cselect_b64 s[0:1], s[4:5], s[0:1]
|
|
; GFX10-NEXT: s_cmp_lg_u32 s15, 0
|
|
; GFX10-NEXT: s_cselect_b64 s[4:5], s[8:9], 0
|
|
; GFX10-NEXT: s_or_b64 s[0:1], s[12:13], s[0:1]
|
|
; GFX10-NEXT: s_or_b64 s[2:3], s[2:3], s[4:5]
|
|
; GFX10-NEXT: ; return to shader part epilog
|
|
;
|
|
; GFX11-LABEL: s_fshr_i128:
|
|
; GFX11: ; %bb.0:
|
|
; GFX11-NEXT: s_lshl_b64 s[2:3], s[2:3], 1
|
|
; GFX11-NEXT: s_lshr_b32 s10, s1, 31
|
|
; GFX11-NEXT: s_mov_b32 s11, 0
|
|
; GFX11-NEXT: s_and_not1_b32 s9, 0x7f, s8
|
|
; GFX11-NEXT: s_lshl_b64 s[0:1], s[0:1], 1
|
|
; GFX11-NEXT: s_or_b64 s[2:3], s[2:3], s[10:11]
|
|
; GFX11-NEXT: s_not_b32 s14, s8
|
|
; GFX11-NEXT: s_sub_i32 s16, s9, 64
|
|
; GFX11-NEXT: s_sub_i32 s10, 64, s9
|
|
; GFX11-NEXT: s_cmp_lt_u32 s9, 64
|
|
; GFX11-NEXT: s_cselect_b32 s17, 1, 0
|
|
; GFX11-NEXT: s_cmp_eq_u32 s9, 0
|
|
; GFX11-NEXT: s_cselect_b32 s9, 1, 0
|
|
; GFX11-NEXT: s_lshr_b64 s[10:11], s[0:1], s10
|
|
; GFX11-NEXT: s_lshl_b64 s[12:13], s[2:3], s14
|
|
; GFX11-NEXT: s_lshl_b64 s[14:15], s[0:1], s14
|
|
; GFX11-NEXT: s_or_b64 s[10:11], s[10:11], s[12:13]
|
|
; GFX11-NEXT: s_lshl_b64 s[0:1], s[0:1], s16
|
|
; GFX11-NEXT: s_cmp_lg_u32 s17, 0
|
|
; GFX11-NEXT: s_cselect_b64 s[12:13], s[14:15], 0
|
|
; GFX11-NEXT: s_cselect_b64 s[0:1], s[10:11], s[0:1]
|
|
; GFX11-NEXT: s_cmp_lg_u32 s9, 0
|
|
; GFX11-NEXT: s_cselect_b64 s[2:3], s[2:3], s[0:1]
|
|
; GFX11-NEXT: s_and_b32 s0, s8, 0x7f
|
|
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
|
|
; GFX11-NEXT: s_sub_i32 s14, s0, 64
|
|
; GFX11-NEXT: s_sub_i32 s9, 64, s0
|
|
; GFX11-NEXT: s_cmp_lt_u32 s0, 64
|
|
; GFX11-NEXT: s_cselect_b32 s15, 1, 0
|
|
; GFX11-NEXT: s_cmp_eq_u32 s0, 0
|
|
; GFX11-NEXT: s_cselect_b32 s16, 1, 0
|
|
; GFX11-NEXT: s_lshr_b64 s[0:1], s[4:5], s8
|
|
; GFX11-NEXT: s_lshl_b64 s[10:11], s[6:7], s9
|
|
; GFX11-NEXT: s_lshr_b64 s[8:9], s[6:7], s8
|
|
; GFX11-NEXT: s_or_b64 s[0:1], s[0:1], s[10:11]
|
|
; GFX11-NEXT: s_lshr_b64 s[6:7], s[6:7], s14
|
|
; GFX11-NEXT: s_cmp_lg_u32 s15, 0
|
|
; GFX11-NEXT: s_cselect_b64 s[0:1], s[0:1], s[6:7]
|
|
; GFX11-NEXT: s_cmp_lg_u32 s16, 0
|
|
; GFX11-NEXT: s_cselect_b64 s[0:1], s[4:5], s[0:1]
|
|
; GFX11-NEXT: s_cmp_lg_u32 s15, 0
|
|
; GFX11-NEXT: s_cselect_b64 s[4:5], s[8:9], 0
|
|
; GFX11-NEXT: s_or_b64 s[0:1], s[12:13], s[0:1]
|
|
; GFX11-NEXT: s_or_b64 s[2:3], s[2:3], s[4:5]
|
|
; GFX11-NEXT: ; return to shader part epilog
|
|
%result = call i128 @llvm.fshr.i128(i128 %lhs, i128 %rhs, i128 %amt)
|
|
ret i128 %result
|
|
}
|
|
|
|
define i128 @v_fshr_i128(i128 %lhs, i128 %rhs, i128 %amt) {
|
|
; GFX6-LABEL: v_fshr_i128:
|
|
; GFX6: ; %bb.0:
|
|
; GFX6-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX6-NEXT: v_lshl_b64 v[2:3], v[2:3], 1
|
|
; GFX6-NEXT: v_lshl_b64 v[9:10], v[0:1], 1
|
|
; GFX6-NEXT: v_lshrrev_b32_e32 v0, 31, v1
|
|
; GFX6-NEXT: v_or_b32_e32 v2, v2, v0
|
|
; GFX6-NEXT: v_not_b32_e32 v0, v8
|
|
; GFX6-NEXT: v_and_b32_e32 v15, 0x7f, v0
|
|
; GFX6-NEXT: v_sub_i32_e32 v0, vcc, 64, v15
|
|
; GFX6-NEXT: v_not_b32_e32 v16, 63
|
|
; GFX6-NEXT: v_lshr_b64 v[0:1], v[9:10], v0
|
|
; GFX6-NEXT: v_lshl_b64 v[11:12], v[2:3], v15
|
|
; GFX6-NEXT: v_add_i32_e32 v17, vcc, v15, v16
|
|
; GFX6-NEXT: v_lshl_b64 v[13:14], v[9:10], v15
|
|
; GFX6-NEXT: v_or_b32_e32 v11, v0, v11
|
|
; GFX6-NEXT: v_or_b32_e32 v12, v1, v12
|
|
; GFX6-NEXT: v_lshl_b64 v[0:1], v[9:10], v17
|
|
; GFX6-NEXT: v_cmp_gt_u32_e32 vcc, 64, v15
|
|
; GFX6-NEXT: v_cndmask_b32_e32 v10, 0, v13, vcc
|
|
; GFX6-NEXT: v_cndmask_b32_e32 v13, 0, v14, vcc
|
|
; GFX6-NEXT: v_cndmask_b32_e32 v0, v0, v11, vcc
|
|
; GFX6-NEXT: v_cndmask_b32_e32 v1, v1, v12, vcc
|
|
; GFX6-NEXT: v_cmp_eq_u32_e32 vcc, 0, v15
|
|
; GFX6-NEXT: v_and_b32_e32 v14, 0x7f, v8
|
|
; GFX6-NEXT: v_cndmask_b32_e32 v11, v0, v2, vcc
|
|
; GFX6-NEXT: v_cndmask_b32_e32 v12, v1, v3, vcc
|
|
; GFX6-NEXT: v_sub_i32_e32 v2, vcc, 64, v14
|
|
; GFX6-NEXT: v_lshr_b64 v[0:1], v[4:5], v14
|
|
; GFX6-NEXT: v_lshl_b64 v[2:3], v[6:7], v2
|
|
; GFX6-NEXT: v_add_i32_e32 v15, vcc, v14, v16
|
|
; GFX6-NEXT: v_or_b32_e32 v2, v0, v2
|
|
; GFX6-NEXT: v_or_b32_e32 v3, v1, v3
|
|
; GFX6-NEXT: v_lshr_b64 v[0:1], v[6:7], v15
|
|
; GFX6-NEXT: v_lshr_b64 v[8:9], v[6:7], v14
|
|
; GFX6-NEXT: v_cmp_gt_u32_e32 vcc, 64, v14
|
|
; GFX6-NEXT: v_cndmask_b32_e32 v0, v0, v2, vcc
|
|
; GFX6-NEXT: v_cndmask_b32_e32 v1, v1, v3, vcc
|
|
; GFX6-NEXT: v_cmp_eq_u32_e64 s[4:5], 0, v14
|
|
; GFX6-NEXT: v_cndmask_b32_e64 v0, v0, v4, s[4:5]
|
|
; GFX6-NEXT: v_cndmask_b32_e64 v1, v1, v5, s[4:5]
|
|
; GFX6-NEXT: v_cndmask_b32_e32 v2, 0, v8, vcc
|
|
; GFX6-NEXT: v_cndmask_b32_e32 v3, 0, v9, vcc
|
|
; GFX6-NEXT: v_or_b32_e32 v0, v10, v0
|
|
; GFX6-NEXT: v_or_b32_e32 v1, v13, v1
|
|
; GFX6-NEXT: v_or_b32_e32 v2, v11, v2
|
|
; GFX6-NEXT: v_or_b32_e32 v3, v12, v3
|
|
; GFX6-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX8-LABEL: v_fshr_i128:
|
|
; GFX8: ; %bb.0:
|
|
; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX8-NEXT: v_lshlrev_b64 v[2:3], 1, v[2:3]
|
|
; GFX8-NEXT: v_lshlrev_b64 v[9:10], 1, v[0:1]
|
|
; GFX8-NEXT: v_lshrrev_b32_e32 v0, 31, v1
|
|
; GFX8-NEXT: v_or_b32_e32 v2, v2, v0
|
|
; GFX8-NEXT: v_not_b32_e32 v0, v8
|
|
; GFX8-NEXT: v_and_b32_e32 v15, 0x7f, v0
|
|
; GFX8-NEXT: v_sub_u32_e32 v0, vcc, 64, v15
|
|
; GFX8-NEXT: v_not_b32_e32 v16, 63
|
|
; GFX8-NEXT: v_lshrrev_b64 v[0:1], v0, v[9:10]
|
|
; GFX8-NEXT: v_lshlrev_b64 v[11:12], v15, v[2:3]
|
|
; GFX8-NEXT: v_add_u32_e32 v17, vcc, v15, v16
|
|
; GFX8-NEXT: v_lshlrev_b64 v[13:14], v15, v[9:10]
|
|
; GFX8-NEXT: v_or_b32_e32 v11, v0, v11
|
|
; GFX8-NEXT: v_or_b32_e32 v12, v1, v12
|
|
; GFX8-NEXT: v_lshlrev_b64 v[0:1], v17, v[9:10]
|
|
; GFX8-NEXT: v_cmp_gt_u32_e32 vcc, 64, v15
|
|
; GFX8-NEXT: v_cndmask_b32_e32 v10, 0, v13, vcc
|
|
; GFX8-NEXT: v_cndmask_b32_e32 v13, 0, v14, vcc
|
|
; GFX8-NEXT: v_cndmask_b32_e32 v0, v0, v11, vcc
|
|
; GFX8-NEXT: v_cndmask_b32_e32 v1, v1, v12, vcc
|
|
; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, 0, v15
|
|
; GFX8-NEXT: v_and_b32_e32 v14, 0x7f, v8
|
|
; GFX8-NEXT: v_cndmask_b32_e32 v11, v0, v2, vcc
|
|
; GFX8-NEXT: v_cndmask_b32_e32 v12, v1, v3, vcc
|
|
; GFX8-NEXT: v_sub_u32_e32 v2, vcc, 64, v14
|
|
; GFX8-NEXT: v_lshrrev_b64 v[0:1], v14, v[4:5]
|
|
; GFX8-NEXT: v_lshlrev_b64 v[2:3], v2, v[6:7]
|
|
; GFX8-NEXT: v_add_u32_e32 v15, vcc, v14, v16
|
|
; GFX8-NEXT: v_or_b32_e32 v2, v0, v2
|
|
; GFX8-NEXT: v_or_b32_e32 v3, v1, v3
|
|
; GFX8-NEXT: v_lshrrev_b64 v[0:1], v15, v[6:7]
|
|
; GFX8-NEXT: v_lshrrev_b64 v[8:9], v14, v[6:7]
|
|
; GFX8-NEXT: v_cmp_gt_u32_e32 vcc, 64, v14
|
|
; GFX8-NEXT: v_cndmask_b32_e32 v0, v0, v2, vcc
|
|
; GFX8-NEXT: v_cndmask_b32_e32 v1, v1, v3, vcc
|
|
; GFX8-NEXT: v_cmp_eq_u32_e64 s[4:5], 0, v14
|
|
; GFX8-NEXT: v_cndmask_b32_e64 v0, v0, v4, s[4:5]
|
|
; GFX8-NEXT: v_cndmask_b32_e64 v1, v1, v5, s[4:5]
|
|
; GFX8-NEXT: v_cndmask_b32_e32 v2, 0, v8, vcc
|
|
; GFX8-NEXT: v_cndmask_b32_e32 v3, 0, v9, vcc
|
|
; GFX8-NEXT: v_or_b32_e32 v0, v10, v0
|
|
; GFX8-NEXT: v_or_b32_e32 v1, v13, v1
|
|
; GFX8-NEXT: v_or_b32_e32 v2, v11, v2
|
|
; GFX8-NEXT: v_or_b32_e32 v3, v12, v3
|
|
; GFX8-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX9-LABEL: v_fshr_i128:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: v_lshlrev_b64 v[2:3], 1, v[2:3]
|
|
; GFX9-NEXT: v_lshlrev_b64 v[9:10], 1, v[0:1]
|
|
; GFX9-NEXT: v_lshrrev_b32_e32 v0, 31, v1
|
|
; GFX9-NEXT: v_or_b32_e32 v2, v2, v0
|
|
; GFX9-NEXT: v_not_b32_e32 v0, v8
|
|
; GFX9-NEXT: v_and_b32_e32 v15, 0x7f, v0
|
|
; GFX9-NEXT: v_sub_u32_e32 v0, 64, v15
|
|
; GFX9-NEXT: v_lshrrev_b64 v[0:1], v0, v[9:10]
|
|
; GFX9-NEXT: v_lshlrev_b64 v[11:12], v15, v[2:3]
|
|
; GFX9-NEXT: v_add_u32_e32 v16, 0xffffffc0, v15
|
|
; GFX9-NEXT: v_lshlrev_b64 v[13:14], v15, v[9:10]
|
|
; GFX9-NEXT: v_or_b32_e32 v11, v0, v11
|
|
; GFX9-NEXT: v_or_b32_e32 v12, v1, v12
|
|
; GFX9-NEXT: v_lshlrev_b64 v[0:1], v16, v[9:10]
|
|
; GFX9-NEXT: v_cmp_gt_u32_e32 vcc, 64, v15
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v10, 0, v13, vcc
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v13, 0, v14, vcc
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v0, v0, v11, vcc
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v1, v1, v12, vcc
|
|
; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, 0, v15
|
|
; GFX9-NEXT: v_and_b32_e32 v14, 0x7f, v8
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v11, v0, v2, vcc
|
|
; GFX9-NEXT: v_sub_u32_e32 v2, 64, v14
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v12, v1, v3, vcc
|
|
; GFX9-NEXT: v_lshrrev_b64 v[0:1], v14, v[4:5]
|
|
; GFX9-NEXT: v_lshlrev_b64 v[2:3], v2, v[6:7]
|
|
; GFX9-NEXT: v_add_u32_e32 v15, 0xffffffc0, v14
|
|
; GFX9-NEXT: v_or_b32_e32 v2, v0, v2
|
|
; GFX9-NEXT: v_or_b32_e32 v3, v1, v3
|
|
; GFX9-NEXT: v_lshrrev_b64 v[0:1], v15, v[6:7]
|
|
; GFX9-NEXT: v_lshrrev_b64 v[8:9], v14, v[6:7]
|
|
; GFX9-NEXT: v_cmp_gt_u32_e32 vcc, 64, v14
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v0, v0, v2, vcc
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v1, v1, v3, vcc
|
|
; GFX9-NEXT: v_cmp_eq_u32_e64 s[4:5], 0, v14
|
|
; GFX9-NEXT: v_cndmask_b32_e64 v0, v0, v4, s[4:5]
|
|
; GFX9-NEXT: v_cndmask_b32_e64 v1, v1, v5, s[4:5]
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v2, 0, v8, vcc
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v3, 0, v9, vcc
|
|
; GFX9-NEXT: v_or_b32_e32 v0, v10, v0
|
|
; GFX9-NEXT: v_or_b32_e32 v1, v13, v1
|
|
; GFX9-NEXT: v_or_b32_e32 v2, v11, v2
|
|
; GFX9-NEXT: v_or_b32_e32 v3, v12, v3
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX10-LABEL: v_fshr_i128:
|
|
; GFX10: ; %bb.0:
|
|
; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX10-NEXT: v_not_b32_e32 v9, v8
|
|
; GFX10-NEXT: v_lshlrev_b64 v[2:3], 1, v[2:3]
|
|
; GFX10-NEXT: v_lshrrev_b32_e32 v10, 31, v1
|
|
; GFX10-NEXT: v_and_b32_e32 v19, 0x7f, v8
|
|
; GFX10-NEXT: v_lshlrev_b64 v[0:1], 1, v[0:1]
|
|
; GFX10-NEXT: v_and_b32_e32 v18, 0x7f, v9
|
|
; GFX10-NEXT: v_or_b32_e32 v2, v2, v10
|
|
; GFX10-NEXT: v_sub_nc_u32_e32 v16, 64, v19
|
|
; GFX10-NEXT: v_add_nc_u32_e32 v21, 0xffffffc0, v19
|
|
; GFX10-NEXT: v_sub_nc_u32_e32 v10, 64, v18
|
|
; GFX10-NEXT: v_add_nc_u32_e32 v20, 0xffffffc0, v18
|
|
; GFX10-NEXT: v_lshlrev_b64 v[8:9], v18, v[2:3]
|
|
; GFX10-NEXT: v_lshrrev_b64 v[12:13], v19, v[4:5]
|
|
; GFX10-NEXT: v_lshlrev_b64 v[16:17], v16, v[6:7]
|
|
; GFX10-NEXT: v_lshrrev_b64 v[10:11], v10, v[0:1]
|
|
; GFX10-NEXT: v_lshlrev_b64 v[14:15], v18, v[0:1]
|
|
; GFX10-NEXT: v_lshlrev_b64 v[0:1], v20, v[0:1]
|
|
; GFX10-NEXT: v_cmp_gt_u32_e32 vcc_lo, 64, v18
|
|
; GFX10-NEXT: v_cmp_gt_u32_e64 s4, 64, v19
|
|
; GFX10-NEXT: v_or_b32_e32 v12, v12, v16
|
|
; GFX10-NEXT: v_or_b32_e32 v10, v10, v8
|
|
; GFX10-NEXT: v_or_b32_e32 v11, v11, v9
|
|
; GFX10-NEXT: v_lshrrev_b64 v[8:9], v21, v[6:7]
|
|
; GFX10-NEXT: v_or_b32_e32 v13, v13, v17
|
|
; GFX10-NEXT: v_cmp_eq_u32_e64 s5, 0, v19
|
|
; GFX10-NEXT: v_cndmask_b32_e32 v10, v0, v10, vcc_lo
|
|
; GFX10-NEXT: v_cndmask_b32_e32 v11, v1, v11, vcc_lo
|
|
; GFX10-NEXT: v_lshrrev_b64 v[0:1], v19, v[6:7]
|
|
; GFX10-NEXT: v_cndmask_b32_e64 v8, v8, v12, s4
|
|
; GFX10-NEXT: v_cmp_eq_u32_e64 s6, 0, v18
|
|
; GFX10-NEXT: v_cndmask_b32_e64 v6, v9, v13, s4
|
|
; GFX10-NEXT: v_cndmask_b32_e32 v14, 0, v14, vcc_lo
|
|
; GFX10-NEXT: v_cndmask_b32_e32 v7, 0, v15, vcc_lo
|
|
; GFX10-NEXT: v_cndmask_b32_e64 v4, v8, v4, s5
|
|
; GFX10-NEXT: v_cndmask_b32_e64 v2, v10, v2, s6
|
|
; GFX10-NEXT: v_cndmask_b32_e64 v3, v11, v3, s6
|
|
; GFX10-NEXT: v_cndmask_b32_e64 v5, v6, v5, s5
|
|
; GFX10-NEXT: v_cndmask_b32_e64 v6, 0, v0, s4
|
|
; GFX10-NEXT: v_cndmask_b32_e64 v8, 0, v1, s4
|
|
; GFX10-NEXT: v_or_b32_e32 v0, v14, v4
|
|
; GFX10-NEXT: v_or_b32_e32 v1, v7, v5
|
|
; GFX10-NEXT: v_or_b32_e32 v2, v2, v6
|
|
; GFX10-NEXT: v_or_b32_e32 v3, v3, v8
|
|
; GFX10-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX11-LABEL: v_fshr_i128:
|
|
; GFX11: ; %bb.0:
|
|
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX11-NEXT: v_not_b32_e32 v9, v8
|
|
; GFX11-NEXT: v_lshlrev_b64 v[2:3], 1, v[2:3]
|
|
; GFX11-NEXT: v_lshrrev_b32_e32 v10, 31, v1
|
|
; GFX11-NEXT: v_lshlrev_b64 v[0:1], 1, v[0:1]
|
|
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3)
|
|
; GFX11-NEXT: v_and_b32_e32 v18, 0x7f, v9
|
|
; GFX11-NEXT: v_or_b32_e32 v2, v2, v10
|
|
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_4)
|
|
; GFX11-NEXT: v_sub_nc_u32_e32 v10, 64, v18
|
|
; GFX11-NEXT: v_lshlrev_b64 v[14:15], v18, v[0:1]
|
|
; GFX11-NEXT: v_cmp_gt_u32_e32 vcc_lo, 64, v18
|
|
; GFX11-NEXT: v_and_b32_e32 v19, 0x7f, v8
|
|
; GFX11-NEXT: v_add_nc_u32_e32 v20, 0xffffffc0, v18
|
|
; GFX11-NEXT: v_lshlrev_b64 v[8:9], v18, v[2:3]
|
|
; GFX11-NEXT: v_lshrrev_b64 v[10:11], v10, v[0:1]
|
|
; GFX11-NEXT: v_cndmask_b32_e32 v14, 0, v14, vcc_lo
|
|
; GFX11-NEXT: v_sub_nc_u32_e32 v16, 64, v19
|
|
; GFX11-NEXT: v_lshlrev_b64 v[0:1], v20, v[0:1]
|
|
; GFX11-NEXT: v_lshrrev_b64 v[12:13], v19, v[4:5]
|
|
; GFX11-NEXT: v_cmp_gt_u32_e64 s0, 64, v19
|
|
; GFX11-NEXT: v_or_b32_e32 v10, v10, v8
|
|
; GFX11-NEXT: v_add_nc_u32_e32 v21, 0xffffffc0, v19
|
|
; GFX11-NEXT: v_lshlrev_b64 v[16:17], v16, v[6:7]
|
|
; GFX11-NEXT: v_or_b32_e32 v11, v11, v9
|
|
; GFX11-NEXT: v_cmp_eq_u32_e64 s1, 0, v19
|
|
; GFX11-NEXT: v_cndmask_b32_e32 v10, v0, v10, vcc_lo
|
|
; GFX11-NEXT: v_lshrrev_b64 v[8:9], v21, v[6:7]
|
|
; GFX11-NEXT: v_or_b32_e32 v12, v12, v16
|
|
; GFX11-NEXT: v_or_b32_e32 v13, v13, v17
|
|
; GFX11-NEXT: v_cndmask_b32_e32 v11, v1, v11, vcc_lo
|
|
; GFX11-NEXT: v_lshrrev_b64 v[0:1], v19, v[6:7]
|
|
; GFX11-NEXT: v_cmp_eq_u32_e64 s2, 0, v18
|
|
; GFX11-NEXT: v_cndmask_b32_e64 v8, v8, v12, s0
|
|
; GFX11-NEXT: v_cndmask_b32_e64 v6, v9, v13, s0
|
|
; GFX11-NEXT: v_cndmask_b32_e32 v7, 0, v15, vcc_lo
|
|
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4)
|
|
; GFX11-NEXT: v_cndmask_b32_e64 v2, v10, v2, s2
|
|
; GFX11-NEXT: v_cndmask_b32_e64 v3, v11, v3, s2
|
|
; GFX11-NEXT: v_cndmask_b32_e64 v4, v8, v4, s1
|
|
; GFX11-NEXT: v_cndmask_b32_e64 v5, v6, v5, s1
|
|
; GFX11-NEXT: v_cndmask_b32_e64 v6, 0, v0, s0
|
|
; GFX11-NEXT: v_cndmask_b32_e64 v8, 0, v1, s0
|
|
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
|
|
; GFX11-NEXT: v_or_b32_e32 v0, v14, v4
|
|
; GFX11-NEXT: v_or_b32_e32 v1, v7, v5
|
|
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
|
|
; GFX11-NEXT: v_or_b32_e32 v2, v2, v6
|
|
; GFX11-NEXT: v_or_b32_e32 v3, v3, v8
|
|
; GFX11-NEXT: s_setpc_b64 s[30:31]
|
|
%result = call i128 @llvm.fshr.i128(i128 %lhs, i128 %rhs, i128 %amt)
|
|
ret i128 %result
|
|
}
|
|
|
|
define amdgpu_ps <4 x float> @v_fshr_i128_ssv(i128 inreg %lhs, i128 inreg %rhs, i128 %amt) {
|
|
; GFX6-LABEL: v_fshr_i128_ssv:
|
|
; GFX6: ; %bb.0:
|
|
; GFX6-NEXT: v_not_b32_e32 v1, v0
|
|
; GFX6-NEXT: s_lshl_b64 s[8:9], s[0:1], 1
|
|
; GFX6-NEXT: s_lshl_b64 s[2:3], s[2:3], 1
|
|
; GFX6-NEXT: s_lshr_b32 s0, s1, 31
|
|
; GFX6-NEXT: s_mov_b32 s1, 0
|
|
; GFX6-NEXT: v_and_b32_e32 v7, 0x7f, v1
|
|
; GFX6-NEXT: s_or_b64 s[0:1], s[2:3], s[0:1]
|
|
; GFX6-NEXT: v_sub_i32_e32 v1, vcc, 64, v7
|
|
; GFX6-NEXT: v_not_b32_e32 v8, 63
|
|
; GFX6-NEXT: v_lshr_b64 v[1:2], s[8:9], v1
|
|
; GFX6-NEXT: v_lshl_b64 v[3:4], s[0:1], v7
|
|
; GFX6-NEXT: v_add_i32_e32 v9, vcc, v7, v8
|
|
; GFX6-NEXT: v_lshl_b64 v[5:6], s[8:9], v7
|
|
; GFX6-NEXT: v_or_b32_e32 v3, v1, v3
|
|
; GFX6-NEXT: v_or_b32_e32 v4, v2, v4
|
|
; GFX6-NEXT: v_lshl_b64 v[1:2], s[8:9], v9
|
|
; GFX6-NEXT: v_cmp_gt_u32_e32 vcc, 64, v7
|
|
; GFX6-NEXT: v_cndmask_b32_e32 v9, 0, v5, vcc
|
|
; GFX6-NEXT: v_cndmask_b32_e32 v6, 0, v6, vcc
|
|
; GFX6-NEXT: v_cndmask_b32_e32 v1, v1, v3, vcc
|
|
; GFX6-NEXT: v_cndmask_b32_e32 v2, v2, v4, vcc
|
|
; GFX6-NEXT: v_mov_b32_e32 v3, s0
|
|
; GFX6-NEXT: v_mov_b32_e32 v4, s1
|
|
; GFX6-NEXT: v_cmp_eq_u32_e32 vcc, 0, v7
|
|
; GFX6-NEXT: v_and_b32_e32 v11, 0x7f, v0
|
|
; GFX6-NEXT: v_cndmask_b32_e32 v7, v1, v3, vcc
|
|
; GFX6-NEXT: v_cndmask_b32_e32 v10, v2, v4, vcc
|
|
; GFX6-NEXT: v_sub_i32_e32 v2, vcc, 64, v11
|
|
; GFX6-NEXT: v_lshr_b64 v[0:1], s[4:5], v11
|
|
; GFX6-NEXT: v_lshl_b64 v[2:3], s[6:7], v2
|
|
; GFX6-NEXT: v_add_i32_e32 v8, vcc, v11, v8
|
|
; GFX6-NEXT: v_or_b32_e32 v2, v0, v2
|
|
; GFX6-NEXT: v_or_b32_e32 v3, v1, v3
|
|
; GFX6-NEXT: v_lshr_b64 v[0:1], s[6:7], v8
|
|
; GFX6-NEXT: v_lshr_b64 v[4:5], s[6:7], v11
|
|
; GFX6-NEXT: v_cmp_gt_u32_e32 vcc, 64, v11
|
|
; GFX6-NEXT: v_cndmask_b32_e32 v0, v0, v2, vcc
|
|
; GFX6-NEXT: v_cndmask_b32_e32 v1, v1, v3, vcc
|
|
; GFX6-NEXT: v_mov_b32_e32 v2, s4
|
|
; GFX6-NEXT: v_mov_b32_e32 v3, s5
|
|
; GFX6-NEXT: v_cmp_eq_u32_e64 s[0:1], 0, v11
|
|
; GFX6-NEXT: v_cndmask_b32_e64 v0, v0, v2, s[0:1]
|
|
; GFX6-NEXT: v_cndmask_b32_e64 v1, v1, v3, s[0:1]
|
|
; GFX6-NEXT: v_cndmask_b32_e32 v2, 0, v4, vcc
|
|
; GFX6-NEXT: v_cndmask_b32_e32 v3, 0, v5, vcc
|
|
; GFX6-NEXT: v_or_b32_e32 v0, v9, v0
|
|
; GFX6-NEXT: v_or_b32_e32 v1, v6, v1
|
|
; GFX6-NEXT: v_or_b32_e32 v2, v7, v2
|
|
; GFX6-NEXT: v_or_b32_e32 v3, v10, v3
|
|
; GFX6-NEXT: ; return to shader part epilog
|
|
;
|
|
; GFX8-LABEL: v_fshr_i128_ssv:
|
|
; GFX8: ; %bb.0:
|
|
; GFX8-NEXT: v_not_b32_e32 v1, v0
|
|
; GFX8-NEXT: s_lshl_b64 s[8:9], s[0:1], 1
|
|
; GFX8-NEXT: s_lshl_b64 s[2:3], s[2:3], 1
|
|
; GFX8-NEXT: s_lshr_b32 s0, s1, 31
|
|
; GFX8-NEXT: s_mov_b32 s1, 0
|
|
; GFX8-NEXT: v_and_b32_e32 v7, 0x7f, v1
|
|
; GFX8-NEXT: s_or_b64 s[0:1], s[2:3], s[0:1]
|
|
; GFX8-NEXT: v_sub_u32_e32 v1, vcc, 64, v7
|
|
; GFX8-NEXT: v_not_b32_e32 v8, 63
|
|
; GFX8-NEXT: v_lshrrev_b64 v[1:2], v1, s[8:9]
|
|
; GFX8-NEXT: v_lshlrev_b64 v[3:4], v7, s[0:1]
|
|
; GFX8-NEXT: v_add_u32_e32 v9, vcc, v7, v8
|
|
; GFX8-NEXT: v_lshlrev_b64 v[5:6], v7, s[8:9]
|
|
; GFX8-NEXT: v_or_b32_e32 v3, v1, v3
|
|
; GFX8-NEXT: v_or_b32_e32 v4, v2, v4
|
|
; GFX8-NEXT: v_lshlrev_b64 v[1:2], v9, s[8:9]
|
|
; GFX8-NEXT: v_cmp_gt_u32_e32 vcc, 64, v7
|
|
; GFX8-NEXT: v_cndmask_b32_e32 v9, 0, v5, vcc
|
|
; GFX8-NEXT: v_cndmask_b32_e32 v6, 0, v6, vcc
|
|
; GFX8-NEXT: v_cndmask_b32_e32 v1, v1, v3, vcc
|
|
; GFX8-NEXT: v_cndmask_b32_e32 v2, v2, v4, vcc
|
|
; GFX8-NEXT: v_mov_b32_e32 v3, s0
|
|
; GFX8-NEXT: v_mov_b32_e32 v4, s1
|
|
; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, 0, v7
|
|
; GFX8-NEXT: v_and_b32_e32 v11, 0x7f, v0
|
|
; GFX8-NEXT: v_cndmask_b32_e32 v7, v1, v3, vcc
|
|
; GFX8-NEXT: v_cndmask_b32_e32 v10, v2, v4, vcc
|
|
; GFX8-NEXT: v_sub_u32_e32 v2, vcc, 64, v11
|
|
; GFX8-NEXT: v_lshrrev_b64 v[0:1], v11, s[4:5]
|
|
; GFX8-NEXT: v_lshlrev_b64 v[2:3], v2, s[6:7]
|
|
; GFX8-NEXT: v_add_u32_e32 v8, vcc, v11, v8
|
|
; GFX8-NEXT: v_or_b32_e32 v2, v0, v2
|
|
; GFX8-NEXT: v_or_b32_e32 v3, v1, v3
|
|
; GFX8-NEXT: v_lshrrev_b64 v[0:1], v8, s[6:7]
|
|
; GFX8-NEXT: v_lshrrev_b64 v[4:5], v11, s[6:7]
|
|
; GFX8-NEXT: v_cmp_gt_u32_e32 vcc, 64, v11
|
|
; GFX8-NEXT: v_cndmask_b32_e32 v0, v0, v2, vcc
|
|
; GFX8-NEXT: v_cndmask_b32_e32 v1, v1, v3, vcc
|
|
; GFX8-NEXT: v_mov_b32_e32 v2, s4
|
|
; GFX8-NEXT: v_mov_b32_e32 v3, s5
|
|
; GFX8-NEXT: v_cmp_eq_u32_e64 s[0:1], 0, v11
|
|
; GFX8-NEXT: v_cndmask_b32_e64 v0, v0, v2, s[0:1]
|
|
; GFX8-NEXT: v_cndmask_b32_e64 v1, v1, v3, s[0:1]
|
|
; GFX8-NEXT: v_cndmask_b32_e32 v2, 0, v4, vcc
|
|
; GFX8-NEXT: v_cndmask_b32_e32 v3, 0, v5, vcc
|
|
; GFX8-NEXT: v_or_b32_e32 v0, v9, v0
|
|
; GFX8-NEXT: v_or_b32_e32 v1, v6, v1
|
|
; GFX8-NEXT: v_or_b32_e32 v2, v7, v2
|
|
; GFX8-NEXT: v_or_b32_e32 v3, v10, v3
|
|
; GFX8-NEXT: ; return to shader part epilog
|
|
;
|
|
; GFX9-LABEL: v_fshr_i128_ssv:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: v_not_b32_e32 v1, v0
|
|
; GFX9-NEXT: s_lshl_b64 s[8:9], s[0:1], 1
|
|
; GFX9-NEXT: s_lshl_b64 s[2:3], s[2:3], 1
|
|
; GFX9-NEXT: s_lshr_b32 s0, s1, 31
|
|
; GFX9-NEXT: s_mov_b32 s1, 0
|
|
; GFX9-NEXT: v_and_b32_e32 v7, 0x7f, v1
|
|
; GFX9-NEXT: s_or_b64 s[0:1], s[2:3], s[0:1]
|
|
; GFX9-NEXT: v_sub_u32_e32 v1, 64, v7
|
|
; GFX9-NEXT: v_lshrrev_b64 v[1:2], v1, s[8:9]
|
|
; GFX9-NEXT: v_lshlrev_b64 v[3:4], v7, s[0:1]
|
|
; GFX9-NEXT: v_add_u32_e32 v8, 0xffffffc0, v7
|
|
; GFX9-NEXT: v_lshlrev_b64 v[5:6], v7, s[8:9]
|
|
; GFX9-NEXT: v_or_b32_e32 v3, v1, v3
|
|
; GFX9-NEXT: v_or_b32_e32 v4, v2, v4
|
|
; GFX9-NEXT: v_lshlrev_b64 v[1:2], v8, s[8:9]
|
|
; GFX9-NEXT: v_cmp_gt_u32_e32 vcc, 64, v7
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v8, 0, v5, vcc
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v6, 0, v6, vcc
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v1, v1, v3, vcc
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v2, v2, v4, vcc
|
|
; GFX9-NEXT: v_mov_b32_e32 v4, s1
|
|
; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, 0, v7
|
|
; GFX9-NEXT: v_and_b32_e32 v10, 0x7f, v0
|
|
; GFX9-NEXT: v_mov_b32_e32 v3, s0
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v9, v2, v4, vcc
|
|
; GFX9-NEXT: v_sub_u32_e32 v2, 64, v10
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v7, v1, v3, vcc
|
|
; GFX9-NEXT: v_lshrrev_b64 v[0:1], v10, s[4:5]
|
|
; GFX9-NEXT: v_lshlrev_b64 v[2:3], v2, s[6:7]
|
|
; GFX9-NEXT: v_add_u32_e32 v11, 0xffffffc0, v10
|
|
; GFX9-NEXT: v_or_b32_e32 v2, v0, v2
|
|
; GFX9-NEXT: v_or_b32_e32 v3, v1, v3
|
|
; GFX9-NEXT: v_lshrrev_b64 v[0:1], v11, s[6:7]
|
|
; GFX9-NEXT: v_lshrrev_b64 v[4:5], v10, s[6:7]
|
|
; GFX9-NEXT: v_cmp_gt_u32_e32 vcc, 64, v10
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v0, v0, v2, vcc
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v1, v1, v3, vcc
|
|
; GFX9-NEXT: v_mov_b32_e32 v2, s4
|
|
; GFX9-NEXT: v_mov_b32_e32 v3, s5
|
|
; GFX9-NEXT: v_cmp_eq_u32_e64 s[0:1], 0, v10
|
|
; GFX9-NEXT: v_cndmask_b32_e64 v0, v0, v2, s[0:1]
|
|
; GFX9-NEXT: v_cndmask_b32_e64 v1, v1, v3, s[0:1]
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v2, 0, v4, vcc
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v3, 0, v5, vcc
|
|
; GFX9-NEXT: v_or_b32_e32 v0, v8, v0
|
|
; GFX9-NEXT: v_or_b32_e32 v1, v6, v1
|
|
; GFX9-NEXT: v_or_b32_e32 v2, v7, v2
|
|
; GFX9-NEXT: v_or_b32_e32 v3, v9, v3
|
|
; GFX9-NEXT: ; return to shader part epilog
|
|
;
|
|
; GFX10-LABEL: v_fshr_i128_ssv:
|
|
; GFX10: ; %bb.0:
|
|
; GFX10-NEXT: v_not_b32_e32 v1, v0
|
|
; GFX10-NEXT: v_and_b32_e32 v13, 0x7f, v0
|
|
; GFX10-NEXT: s_mov_b32 s9, 0
|
|
; GFX10-NEXT: s_lshl_b64 s[2:3], s[2:3], 1
|
|
; GFX10-NEXT: s_lshr_b32 s8, s1, 31
|
|
; GFX10-NEXT: v_and_b32_e32 v12, 0x7f, v1
|
|
; GFX10-NEXT: v_sub_nc_u32_e32 v8, 64, v13
|
|
; GFX10-NEXT: s_lshl_b64 s[0:1], s[0:1], 1
|
|
; GFX10-NEXT: s_or_b64 s[8:9], s[2:3], s[8:9]
|
|
; GFX10-NEXT: v_add_nc_u32_e32 v14, 0xffffffc0, v13
|
|
; GFX10-NEXT: v_sub_nc_u32_e32 v2, 64, v12
|
|
; GFX10-NEXT: v_lshlrev_b64 v[0:1], v12, s[8:9]
|
|
; GFX10-NEXT: v_add_nc_u32_e32 v10, 0xffffffc0, v12
|
|
; GFX10-NEXT: v_lshrrev_b64 v[4:5], v13, s[4:5]
|
|
; GFX10-NEXT: v_lshlrev_b64 v[8:9], v8, s[6:7]
|
|
; GFX10-NEXT: v_lshrrev_b64 v[2:3], v2, s[0:1]
|
|
; GFX10-NEXT: v_cmp_gt_u32_e32 vcc_lo, 64, v12
|
|
; GFX10-NEXT: v_lshlrev_b64 v[10:11], v10, s[0:1]
|
|
; GFX10-NEXT: v_lshlrev_b64 v[6:7], v12, s[0:1]
|
|
; GFX10-NEXT: v_cmp_gt_u32_e64 s0, 64, v13
|
|
; GFX10-NEXT: v_or_b32_e32 v4, v4, v8
|
|
; GFX10-NEXT: v_or_b32_e32 v2, v2, v0
|
|
; GFX10-NEXT: v_or_b32_e32 v3, v3, v1
|
|
; GFX10-NEXT: v_lshrrev_b64 v[0:1], v14, s[6:7]
|
|
; GFX10-NEXT: v_or_b32_e32 v5, v5, v9
|
|
; GFX10-NEXT: v_cmp_eq_u32_e64 s1, 0, v13
|
|
; GFX10-NEXT: v_cndmask_b32_e32 v8, v10, v2, vcc_lo
|
|
; GFX10-NEXT: v_cndmask_b32_e32 v10, v11, v3, vcc_lo
|
|
; GFX10-NEXT: v_lshrrev_b64 v[2:3], v13, s[6:7]
|
|
; GFX10-NEXT: v_cndmask_b32_e64 v0, v0, v4, s0
|
|
; GFX10-NEXT: v_cmp_eq_u32_e64 s2, 0, v12
|
|
; GFX10-NEXT: v_cndmask_b32_e64 v1, v1, v5, s0
|
|
; GFX10-NEXT: v_cndmask_b32_e32 v6, 0, v6, vcc_lo
|
|
; GFX10-NEXT: v_cndmask_b32_e32 v4, 0, v7, vcc_lo
|
|
; GFX10-NEXT: v_cndmask_b32_e64 v0, v0, s4, s1
|
|
; GFX10-NEXT: v_cndmask_b32_e64 v5, v8, s8, s2
|
|
; GFX10-NEXT: v_cndmask_b32_e64 v7, v10, s9, s2
|
|
; GFX10-NEXT: v_cndmask_b32_e64 v1, v1, s5, s1
|
|
; GFX10-NEXT: v_cndmask_b32_e64 v2, 0, v2, s0
|
|
; GFX10-NEXT: v_cndmask_b32_e64 v3, 0, v3, s0
|
|
; GFX10-NEXT: v_or_b32_e32 v0, v6, v0
|
|
; GFX10-NEXT: v_or_b32_e32 v1, v4, v1
|
|
; GFX10-NEXT: v_or_b32_e32 v2, v5, v2
|
|
; GFX10-NEXT: v_or_b32_e32 v3, v7, v3
|
|
; GFX10-NEXT: ; return to shader part epilog
|
|
;
|
|
; GFX11-LABEL: v_fshr_i128_ssv:
|
|
; GFX11: ; %bb.0:
|
|
; GFX11-NEXT: v_not_b32_e32 v1, v0
|
|
; GFX11-NEXT: s_lshr_b32 s8, s1, 31
|
|
; GFX11-NEXT: s_lshl_b64 s[0:1], s[0:1], 1
|
|
; GFX11-NEXT: s_mov_b32 s9, 0
|
|
; GFX11-NEXT: s_lshl_b64 s[2:3], s[2:3], 1
|
|
; GFX11-NEXT: v_and_b32_e32 v12, 0x7f, v1
|
|
; GFX11-NEXT: s_or_b64 s[8:9], s[2:3], s[8:9]
|
|
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
|
|
; GFX11-NEXT: v_lshlrev_b64 v[6:7], v12, s[0:1]
|
|
; GFX11-NEXT: v_cmp_gt_u32_e32 vcc_lo, 64, v12
|
|
; GFX11-NEXT: v_and_b32_e32 v13, 0x7f, v0
|
|
; GFX11-NEXT: v_sub_nc_u32_e32 v2, 64, v12
|
|
; GFX11-NEXT: v_lshlrev_b64 v[0:1], v12, s[8:9]
|
|
; GFX11-NEXT: v_add_nc_u32_e32 v10, 0xffffffc0, v12
|
|
; GFX11-NEXT: v_cndmask_b32_e32 v6, 0, v6, vcc_lo
|
|
; GFX11-NEXT: v_sub_nc_u32_e32 v8, 64, v13
|
|
; GFX11-NEXT: v_lshrrev_b64 v[2:3], v2, s[0:1]
|
|
; GFX11-NEXT: v_add_nc_u32_e32 v14, 0xffffffc0, v13
|
|
; GFX11-NEXT: v_lshrrev_b64 v[4:5], v13, s[4:5]
|
|
; GFX11-NEXT: v_lshlrev_b64 v[10:11], v10, s[0:1]
|
|
; GFX11-NEXT: v_lshlrev_b64 v[8:9], v8, s[6:7]
|
|
; GFX11-NEXT: v_cmp_gt_u32_e64 s0, 64, v13
|
|
; GFX11-NEXT: v_or_b32_e32 v2, v2, v0
|
|
; GFX11-NEXT: v_or_b32_e32 v3, v3, v1
|
|
; GFX11-NEXT: v_lshrrev_b64 v[0:1], v14, s[6:7]
|
|
; GFX11-NEXT: v_cmp_eq_u32_e64 s1, 0, v13
|
|
; GFX11-NEXT: v_or_b32_e32 v4, v4, v8
|
|
; GFX11-NEXT: v_or_b32_e32 v5, v5, v9
|
|
; GFX11-NEXT: v_cndmask_b32_e32 v8, v10, v2, vcc_lo
|
|
; GFX11-NEXT: v_cndmask_b32_e32 v10, v11, v3, vcc_lo
|
|
; GFX11-NEXT: v_lshrrev_b64 v[2:3], v13, s[6:7]
|
|
; GFX11-NEXT: v_cndmask_b32_e64 v0, v0, v4, s0
|
|
; GFX11-NEXT: v_cmp_eq_u32_e64 s2, 0, v12
|
|
; GFX11-NEXT: v_cndmask_b32_e64 v1, v1, v5, s0
|
|
; GFX11-NEXT: v_cndmask_b32_e32 v4, 0, v7, vcc_lo
|
|
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
|
|
; GFX11-NEXT: v_cndmask_b32_e64 v0, v0, s4, s1
|
|
; GFX11-NEXT: v_cndmask_b32_e64 v5, v8, s8, s2
|
|
; GFX11-NEXT: v_cndmask_b32_e64 v7, v10, s9, s2
|
|
; GFX11-NEXT: v_cndmask_b32_e64 v1, v1, s5, s1
|
|
; GFX11-NEXT: v_cndmask_b32_e64 v2, 0, v2, s0
|
|
; GFX11-NEXT: v_cndmask_b32_e64 v3, 0, v3, s0
|
|
; GFX11-NEXT: v_or_b32_e32 v0, v6, v0
|
|
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
|
|
; GFX11-NEXT: v_or_b32_e32 v1, v4, v1
|
|
; GFX11-NEXT: v_or_b32_e32 v2, v5, v2
|
|
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4)
|
|
; GFX11-NEXT: v_or_b32_e32 v3, v7, v3
|
|
; GFX11-NEXT: ; return to shader part epilog
|
|
%result = call i128 @llvm.fshr.i128(i128 %lhs, i128 %rhs, i128 %amt)
|
|
%cast.result = bitcast i128 %result to <4 x float>
|
|
ret <4 x float> %cast.result
|
|
}
|
|
|
|
define amdgpu_ps <4 x float> @v_fshr_i128_svs(i128 inreg %lhs, i128 %rhs, i128 inreg %amt) {
|
|
; GFX6-LABEL: v_fshr_i128_svs:
|
|
; GFX6: ; %bb.0:
|
|
; GFX6-NEXT: s_lshl_b64 s[6:7], s[0:1], 1
|
|
; GFX6-NEXT: s_lshl_b64 s[2:3], s[2:3], 1
|
|
; GFX6-NEXT: s_lshr_b32 s0, s1, 31
|
|
; GFX6-NEXT: s_mov_b32 s1, 0
|
|
; GFX6-NEXT: s_or_b64 s[0:1], s[2:3], s[0:1]
|
|
; GFX6-NEXT: s_andn2_b32 s2, 0x7f, s4
|
|
; GFX6-NEXT: s_not_b32 s5, s4
|
|
; GFX6-NEXT: s_sub_i32 s12, s2, 64
|
|
; GFX6-NEXT: s_sub_i32 s8, 64, s2
|
|
; GFX6-NEXT: s_cmp_lt_u32 s2, 64
|
|
; GFX6-NEXT: s_cselect_b32 s13, 1, 0
|
|
; GFX6-NEXT: s_cmp_eq_u32 s2, 0
|
|
; GFX6-NEXT: s_cselect_b32 s14, 1, 0
|
|
; GFX6-NEXT: s_lshr_b64 s[8:9], s[6:7], s8
|
|
; GFX6-NEXT: s_lshl_b64 s[10:11], s[0:1], s5
|
|
; GFX6-NEXT: s_lshl_b64 s[2:3], s[6:7], s5
|
|
; GFX6-NEXT: s_or_b64 s[8:9], s[8:9], s[10:11]
|
|
; GFX6-NEXT: s_lshl_b64 s[6:7], s[6:7], s12
|
|
; GFX6-NEXT: s_cmp_lg_u32 s13, 0
|
|
; GFX6-NEXT: s_cselect_b64 s[2:3], s[2:3], 0
|
|
; GFX6-NEXT: s_cselect_b64 s[6:7], s[8:9], s[6:7]
|
|
; GFX6-NEXT: s_cmp_lg_u32 s14, 0
|
|
; GFX6-NEXT: s_cselect_b64 s[6:7], s[0:1], s[6:7]
|
|
; GFX6-NEXT: s_and_b32 s0, s4, 0x7f
|
|
; GFX6-NEXT: s_sub_i32 s1, s0, 64
|
|
; GFX6-NEXT: s_sub_i32 s4, 64, s0
|
|
; GFX6-NEXT: s_cmp_lt_u32 s0, 64
|
|
; GFX6-NEXT: s_cselect_b32 s5, 1, 0
|
|
; GFX6-NEXT: s_cmp_eq_u32 s0, 0
|
|
; GFX6-NEXT: v_lshr_b64 v[4:5], v[0:1], s0
|
|
; GFX6-NEXT: v_lshl_b64 v[6:7], v[2:3], s4
|
|
; GFX6-NEXT: s_cselect_b32 s8, 1, 0
|
|
; GFX6-NEXT: v_lshr_b64 v[8:9], v[2:3], s0
|
|
; GFX6-NEXT: v_lshr_b64 v[2:3], v[2:3], s1
|
|
; GFX6-NEXT: s_and_b32 s0, 1, s5
|
|
; GFX6-NEXT: v_or_b32_e32 v4, v4, v6
|
|
; GFX6-NEXT: v_or_b32_e32 v5, v5, v7
|
|
; GFX6-NEXT: v_cmp_ne_u32_e64 vcc, 0, s0
|
|
; GFX6-NEXT: s_and_b32 s0, 1, s8
|
|
; GFX6-NEXT: v_cndmask_b32_e32 v2, v2, v4, vcc
|
|
; GFX6-NEXT: v_cndmask_b32_e32 v3, v3, v5, vcc
|
|
; GFX6-NEXT: v_cmp_ne_u32_e64 s[0:1], 0, s0
|
|
; GFX6-NEXT: v_cndmask_b32_e64 v0, v2, v0, s[0:1]
|
|
; GFX6-NEXT: v_cndmask_b32_e64 v1, v3, v1, s[0:1]
|
|
; GFX6-NEXT: v_cndmask_b32_e32 v2, 0, v8, vcc
|
|
; GFX6-NEXT: v_cndmask_b32_e32 v3, 0, v9, vcc
|
|
; GFX6-NEXT: v_or_b32_e32 v0, s2, v0
|
|
; GFX6-NEXT: v_or_b32_e32 v1, s3, v1
|
|
; GFX6-NEXT: v_or_b32_e32 v2, s6, v2
|
|
; GFX6-NEXT: v_or_b32_e32 v3, s7, v3
|
|
; GFX6-NEXT: ; return to shader part epilog
|
|
;
|
|
; GFX8-LABEL: v_fshr_i128_svs:
|
|
; GFX8: ; %bb.0:
|
|
; GFX8-NEXT: s_lshl_b64 s[6:7], s[0:1], 1
|
|
; GFX8-NEXT: s_lshl_b64 s[2:3], s[2:3], 1
|
|
; GFX8-NEXT: s_lshr_b32 s0, s1, 31
|
|
; GFX8-NEXT: s_mov_b32 s1, 0
|
|
; GFX8-NEXT: s_or_b64 s[0:1], s[2:3], s[0:1]
|
|
; GFX8-NEXT: s_andn2_b32 s2, 0x7f, s4
|
|
; GFX8-NEXT: s_not_b32 s5, s4
|
|
; GFX8-NEXT: s_sub_i32 s12, s2, 64
|
|
; GFX8-NEXT: s_sub_i32 s8, 64, s2
|
|
; GFX8-NEXT: s_cmp_lt_u32 s2, 64
|
|
; GFX8-NEXT: s_cselect_b32 s13, 1, 0
|
|
; GFX8-NEXT: s_cmp_eq_u32 s2, 0
|
|
; GFX8-NEXT: s_cselect_b32 s14, 1, 0
|
|
; GFX8-NEXT: s_lshr_b64 s[8:9], s[6:7], s8
|
|
; GFX8-NEXT: s_lshl_b64 s[10:11], s[0:1], s5
|
|
; GFX8-NEXT: s_lshl_b64 s[2:3], s[6:7], s5
|
|
; GFX8-NEXT: s_or_b64 s[8:9], s[8:9], s[10:11]
|
|
; GFX8-NEXT: s_lshl_b64 s[6:7], s[6:7], s12
|
|
; GFX8-NEXT: s_cmp_lg_u32 s13, 0
|
|
; GFX8-NEXT: s_cselect_b64 s[2:3], s[2:3], 0
|
|
; GFX8-NEXT: s_cselect_b64 s[6:7], s[8:9], s[6:7]
|
|
; GFX8-NEXT: s_cmp_lg_u32 s14, 0
|
|
; GFX8-NEXT: s_cselect_b64 s[6:7], s[0:1], s[6:7]
|
|
; GFX8-NEXT: s_and_b32 s0, s4, 0x7f
|
|
; GFX8-NEXT: s_sub_i32 s1, s0, 64
|
|
; GFX8-NEXT: s_sub_i32 s4, 64, s0
|
|
; GFX8-NEXT: s_cmp_lt_u32 s0, 64
|
|
; GFX8-NEXT: s_cselect_b32 s5, 1, 0
|
|
; GFX8-NEXT: s_cmp_eq_u32 s0, 0
|
|
; GFX8-NEXT: v_lshrrev_b64 v[4:5], s0, v[0:1]
|
|
; GFX8-NEXT: v_lshlrev_b64 v[6:7], s4, v[2:3]
|
|
; GFX8-NEXT: s_cselect_b32 s8, 1, 0
|
|
; GFX8-NEXT: v_lshrrev_b64 v[8:9], s0, v[2:3]
|
|
; GFX8-NEXT: v_lshrrev_b64 v[2:3], s1, v[2:3]
|
|
; GFX8-NEXT: s_and_b32 s0, 1, s5
|
|
; GFX8-NEXT: v_or_b32_e32 v4, v4, v6
|
|
; GFX8-NEXT: v_or_b32_e32 v5, v5, v7
|
|
; GFX8-NEXT: v_cmp_ne_u32_e64 vcc, 0, s0
|
|
; GFX8-NEXT: s_and_b32 s0, 1, s8
|
|
; GFX8-NEXT: v_cndmask_b32_e32 v2, v2, v4, vcc
|
|
; GFX8-NEXT: v_cndmask_b32_e32 v3, v3, v5, vcc
|
|
; GFX8-NEXT: v_cmp_ne_u32_e64 s[0:1], 0, s0
|
|
; GFX8-NEXT: v_cndmask_b32_e64 v0, v2, v0, s[0:1]
|
|
; GFX8-NEXT: v_cndmask_b32_e64 v1, v3, v1, s[0:1]
|
|
; GFX8-NEXT: v_cndmask_b32_e32 v2, 0, v8, vcc
|
|
; GFX8-NEXT: v_cndmask_b32_e32 v3, 0, v9, vcc
|
|
; GFX8-NEXT: v_or_b32_e32 v0, s2, v0
|
|
; GFX8-NEXT: v_or_b32_e32 v1, s3, v1
|
|
; GFX8-NEXT: v_or_b32_e32 v2, s6, v2
|
|
; GFX8-NEXT: v_or_b32_e32 v3, s7, v3
|
|
; GFX8-NEXT: ; return to shader part epilog
|
|
;
|
|
; GFX9-LABEL: v_fshr_i128_svs:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_lshl_b64 s[6:7], s[0:1], 1
|
|
; GFX9-NEXT: s_lshl_b64 s[2:3], s[2:3], 1
|
|
; GFX9-NEXT: s_lshr_b32 s0, s1, 31
|
|
; GFX9-NEXT: s_mov_b32 s1, 0
|
|
; GFX9-NEXT: s_or_b64 s[0:1], s[2:3], s[0:1]
|
|
; GFX9-NEXT: s_andn2_b32 s2, 0x7f, s4
|
|
; GFX9-NEXT: s_not_b32 s5, s4
|
|
; GFX9-NEXT: s_sub_i32 s12, s2, 64
|
|
; GFX9-NEXT: s_sub_i32 s8, 64, s2
|
|
; GFX9-NEXT: s_cmp_lt_u32 s2, 64
|
|
; GFX9-NEXT: s_cselect_b32 s13, 1, 0
|
|
; GFX9-NEXT: s_cmp_eq_u32 s2, 0
|
|
; GFX9-NEXT: s_cselect_b32 s14, 1, 0
|
|
; GFX9-NEXT: s_lshr_b64 s[8:9], s[6:7], s8
|
|
; GFX9-NEXT: s_lshl_b64 s[10:11], s[0:1], s5
|
|
; GFX9-NEXT: s_lshl_b64 s[2:3], s[6:7], s5
|
|
; GFX9-NEXT: s_or_b64 s[8:9], s[8:9], s[10:11]
|
|
; GFX9-NEXT: s_lshl_b64 s[6:7], s[6:7], s12
|
|
; GFX9-NEXT: s_cmp_lg_u32 s13, 0
|
|
; GFX9-NEXT: s_cselect_b64 s[2:3], s[2:3], 0
|
|
; GFX9-NEXT: s_cselect_b64 s[6:7], s[8:9], s[6:7]
|
|
; GFX9-NEXT: s_cmp_lg_u32 s14, 0
|
|
; GFX9-NEXT: s_cselect_b64 s[6:7], s[0:1], s[6:7]
|
|
; GFX9-NEXT: s_and_b32 s0, s4, 0x7f
|
|
; GFX9-NEXT: s_sub_i32 s1, s0, 64
|
|
; GFX9-NEXT: s_sub_i32 s4, 64, s0
|
|
; GFX9-NEXT: s_cmp_lt_u32 s0, 64
|
|
; GFX9-NEXT: s_cselect_b32 s5, 1, 0
|
|
; GFX9-NEXT: s_cmp_eq_u32 s0, 0
|
|
; GFX9-NEXT: v_lshrrev_b64 v[4:5], s0, v[0:1]
|
|
; GFX9-NEXT: v_lshlrev_b64 v[6:7], s4, v[2:3]
|
|
; GFX9-NEXT: s_cselect_b32 s8, 1, 0
|
|
; GFX9-NEXT: v_lshrrev_b64 v[8:9], s0, v[2:3]
|
|
; GFX9-NEXT: v_lshrrev_b64 v[2:3], s1, v[2:3]
|
|
; GFX9-NEXT: s_and_b32 s0, 1, s5
|
|
; GFX9-NEXT: v_or_b32_e32 v4, v4, v6
|
|
; GFX9-NEXT: v_or_b32_e32 v5, v5, v7
|
|
; GFX9-NEXT: v_cmp_ne_u32_e64 vcc, 0, s0
|
|
; GFX9-NEXT: s_and_b32 s0, 1, s8
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v2, v2, v4, vcc
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v3, v3, v5, vcc
|
|
; GFX9-NEXT: v_cmp_ne_u32_e64 s[0:1], 0, s0
|
|
; GFX9-NEXT: v_cndmask_b32_e64 v0, v2, v0, s[0:1]
|
|
; GFX9-NEXT: v_cndmask_b32_e64 v1, v3, v1, s[0:1]
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v2, 0, v8, vcc
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v3, 0, v9, vcc
|
|
; GFX9-NEXT: v_or_b32_e32 v0, s2, v0
|
|
; GFX9-NEXT: v_or_b32_e32 v1, s3, v1
|
|
; GFX9-NEXT: v_or_b32_e32 v2, s6, v2
|
|
; GFX9-NEXT: v_or_b32_e32 v3, s7, v3
|
|
; GFX9-NEXT: ; return to shader part epilog
|
|
;
|
|
; GFX10-LABEL: v_fshr_i128_svs:
|
|
; GFX10: ; %bb.0:
|
|
; GFX10-NEXT: s_lshl_b64 s[2:3], s[2:3], 1
|
|
; GFX10-NEXT: s_lshr_b32 s6, s1, 31
|
|
; GFX10-NEXT: s_mov_b32 s7, 0
|
|
; GFX10-NEXT: s_andn2_b32 s5, 0x7f, s4
|
|
; GFX10-NEXT: s_lshl_b64 s[0:1], s[0:1], 1
|
|
; GFX10-NEXT: s_or_b64 s[2:3], s[2:3], s[6:7]
|
|
; GFX10-NEXT: s_not_b32 s10, s4
|
|
; GFX10-NEXT: s_sub_i32 s12, s5, 64
|
|
; GFX10-NEXT: s_sub_i32 s6, 64, s5
|
|
; GFX10-NEXT: s_cmp_lt_u32 s5, 64
|
|
; GFX10-NEXT: s_cselect_b32 s13, 1, 0
|
|
; GFX10-NEXT: s_cmp_eq_u32 s5, 0
|
|
; GFX10-NEXT: s_cselect_b32 s5, 1, 0
|
|
; GFX10-NEXT: s_lshr_b64 s[6:7], s[0:1], s6
|
|
; GFX10-NEXT: s_lshl_b64 s[8:9], s[2:3], s10
|
|
; GFX10-NEXT: s_lshl_b64 s[10:11], s[0:1], s10
|
|
; GFX10-NEXT: s_or_b64 s[6:7], s[6:7], s[8:9]
|
|
; GFX10-NEXT: s_lshl_b64 s[0:1], s[0:1], s12
|
|
; GFX10-NEXT: s_cmp_lg_u32 s13, 0
|
|
; GFX10-NEXT: s_cselect_b64 s[8:9], s[10:11], 0
|
|
; GFX10-NEXT: s_cselect_b64 s[0:1], s[6:7], s[0:1]
|
|
; GFX10-NEXT: s_cmp_lg_u32 s5, 0
|
|
; GFX10-NEXT: s_cselect_b64 s[2:3], s[2:3], s[0:1]
|
|
; GFX10-NEXT: s_and_b32 s0, s4, 0x7f
|
|
; GFX10-NEXT: s_sub_i32 s1, 64, s0
|
|
; GFX10-NEXT: v_lshrrev_b64 v[4:5], s0, v[0:1]
|
|
; GFX10-NEXT: v_lshlrev_b64 v[6:7], s1, v[2:3]
|
|
; GFX10-NEXT: s_sub_i32 s1, s0, 64
|
|
; GFX10-NEXT: s_cmp_lt_u32 s0, 64
|
|
; GFX10-NEXT: v_lshrrev_b64 v[8:9], s1, v[2:3]
|
|
; GFX10-NEXT: s_cselect_b32 s4, 1, 0
|
|
; GFX10-NEXT: s_cmp_eq_u32 s0, 0
|
|
; GFX10-NEXT: v_or_b32_e32 v4, v4, v6
|
|
; GFX10-NEXT: s_cselect_b32 s5, 1, 0
|
|
; GFX10-NEXT: s_and_b32 s1, 1, s4
|
|
; GFX10-NEXT: v_or_b32_e32 v5, v5, v7
|
|
; GFX10-NEXT: v_cmp_ne_u32_e64 vcc_lo, 0, s1
|
|
; GFX10-NEXT: v_lshrrev_b64 v[2:3], s0, v[2:3]
|
|
; GFX10-NEXT: s_and_b32 s0, 1, s5
|
|
; GFX10-NEXT: v_cmp_ne_u32_e64 s0, 0, s0
|
|
; GFX10-NEXT: v_cndmask_b32_e32 v4, v8, v4, vcc_lo
|
|
; GFX10-NEXT: v_cndmask_b32_e32 v5, v9, v5, vcc_lo
|
|
; GFX10-NEXT: v_cndmask_b32_e32 v2, 0, v2, vcc_lo
|
|
; GFX10-NEXT: v_cndmask_b32_e32 v3, 0, v3, vcc_lo
|
|
; GFX10-NEXT: v_cndmask_b32_e64 v0, v4, v0, s0
|
|
; GFX10-NEXT: v_cndmask_b32_e64 v1, v5, v1, s0
|
|
; GFX10-NEXT: v_or_b32_e32 v2, s2, v2
|
|
; GFX10-NEXT: v_or_b32_e32 v3, s3, v3
|
|
; GFX10-NEXT: v_or_b32_e32 v0, s8, v0
|
|
; GFX10-NEXT: v_or_b32_e32 v1, s9, v1
|
|
; GFX10-NEXT: ; return to shader part epilog
|
|
;
|
|
; GFX11-LABEL: v_fshr_i128_svs:
|
|
; GFX11: ; %bb.0:
|
|
; GFX11-NEXT: s_lshl_b64 s[2:3], s[2:3], 1
|
|
; GFX11-NEXT: s_lshr_b32 s6, s1, 31
|
|
; GFX11-NEXT: s_mov_b32 s7, 0
|
|
; GFX11-NEXT: s_and_not1_b32 s5, 0x7f, s4
|
|
; GFX11-NEXT: s_lshl_b64 s[0:1], s[0:1], 1
|
|
; GFX11-NEXT: s_or_b64 s[2:3], s[2:3], s[6:7]
|
|
; GFX11-NEXT: s_not_b32 s10, s4
|
|
; GFX11-NEXT: s_sub_i32 s12, s5, 64
|
|
; GFX11-NEXT: s_sub_i32 s6, 64, s5
|
|
; GFX11-NEXT: s_cmp_lt_u32 s5, 64
|
|
; GFX11-NEXT: s_cselect_b32 s13, 1, 0
|
|
; GFX11-NEXT: s_cmp_eq_u32 s5, 0
|
|
; GFX11-NEXT: s_cselect_b32 s5, 1, 0
|
|
; GFX11-NEXT: s_lshr_b64 s[6:7], s[0:1], s6
|
|
; GFX11-NEXT: s_lshl_b64 s[8:9], s[2:3], s10
|
|
; GFX11-NEXT: s_lshl_b64 s[10:11], s[0:1], s10
|
|
; GFX11-NEXT: s_or_b64 s[6:7], s[6:7], s[8:9]
|
|
; GFX11-NEXT: s_lshl_b64 s[0:1], s[0:1], s12
|
|
; GFX11-NEXT: s_cmp_lg_u32 s13, 0
|
|
; GFX11-NEXT: s_cselect_b64 s[8:9], s[10:11], 0
|
|
; GFX11-NEXT: s_cselect_b64 s[0:1], s[6:7], s[0:1]
|
|
; GFX11-NEXT: s_cmp_lg_u32 s5, 0
|
|
; GFX11-NEXT: s_cselect_b64 s[2:3], s[2:3], s[0:1]
|
|
; GFX11-NEXT: s_and_b32 s0, s4, 0x7f
|
|
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
|
|
; GFX11-NEXT: s_sub_i32 s1, 64, s0
|
|
; GFX11-NEXT: v_lshrrev_b64 v[4:5], s0, v[0:1]
|
|
; GFX11-NEXT: v_lshlrev_b64 v[6:7], s1, v[2:3]
|
|
; GFX11-NEXT: s_sub_i32 s1, s0, 64
|
|
; GFX11-NEXT: s_cmp_lt_u32 s0, 64
|
|
; GFX11-NEXT: v_lshrrev_b64 v[8:9], s1, v[2:3]
|
|
; GFX11-NEXT: s_cselect_b32 s4, 1, 0
|
|
; GFX11-NEXT: s_cmp_eq_u32 s0, 0
|
|
; GFX11-NEXT: v_or_b32_e32 v4, v4, v6
|
|
; GFX11-NEXT: s_cselect_b32 s5, 1, 0
|
|
; GFX11-NEXT: s_and_b32 s1, 1, s4
|
|
; GFX11-NEXT: v_or_b32_e32 v5, v5, v7
|
|
; GFX11-NEXT: v_cmp_ne_u32_e64 vcc_lo, 0, s1
|
|
; GFX11-NEXT: v_lshrrev_b64 v[2:3], s0, v[2:3]
|
|
; GFX11-NEXT: s_and_b32 s0, 1, s5
|
|
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_3)
|
|
; GFX11-NEXT: v_cmp_ne_u32_e64 s0, 0, s0
|
|
; GFX11-NEXT: v_dual_cndmask_b32 v4, v8, v4 :: v_dual_cndmask_b32 v5, v9, v5
|
|
; GFX11-NEXT: v_dual_cndmask_b32 v2, 0, v2 :: v_dual_cndmask_b32 v3, 0, v3
|
|
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_3)
|
|
; GFX11-NEXT: v_cndmask_b32_e64 v0, v4, v0, s0
|
|
; GFX11-NEXT: v_cndmask_b32_e64 v1, v5, v1, s0
|
|
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_4)
|
|
; GFX11-NEXT: v_or_b32_e32 v2, s2, v2
|
|
; GFX11-NEXT: v_or_b32_e32 v3, s3, v3
|
|
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
|
|
; GFX11-NEXT: v_or_b32_e32 v0, s8, v0
|
|
; GFX11-NEXT: v_or_b32_e32 v1, s9, v1
|
|
; GFX11-NEXT: ; return to shader part epilog
|
|
%result = call i128 @llvm.fshr.i128(i128 %lhs, i128 %rhs, i128 %amt)
|
|
%cast.result = bitcast i128 %result to <4 x float>
|
|
ret <4 x float> %cast.result
|
|
}
|
|
|
|
define amdgpu_ps <4 x float> @v_fshr_i128_vss(i128 %lhs, i128 inreg %rhs, i128 inreg %amt) {
|
|
; GFX6-LABEL: v_fshr_i128_vss:
|
|
; GFX6: ; %bb.0:
|
|
; GFX6-NEXT: v_lshl_b64 v[2:3], v[2:3], 1
|
|
; GFX6-NEXT: v_lshl_b64 v[4:5], v[0:1], 1
|
|
; GFX6-NEXT: v_lshrrev_b32_e32 v0, 31, v1
|
|
; GFX6-NEXT: s_andn2_b32 s5, 0x7f, s4
|
|
; GFX6-NEXT: v_or_b32_e32 v2, v2, v0
|
|
; GFX6-NEXT: s_sub_i32 s6, s5, 64
|
|
; GFX6-NEXT: s_sub_i32 s7, 64, s5
|
|
; GFX6-NEXT: s_cmp_lt_u32 s5, 64
|
|
; GFX6-NEXT: v_lshr_b64 v[0:1], v[4:5], s7
|
|
; GFX6-NEXT: v_lshl_b64 v[6:7], v[2:3], s5
|
|
; GFX6-NEXT: s_cselect_b32 s8, 1, 0
|
|
; GFX6-NEXT: s_cmp_eq_u32 s5, 0
|
|
; GFX6-NEXT: s_cselect_b32 s9, 1, 0
|
|
; GFX6-NEXT: v_lshl_b64 v[8:9], v[4:5], s5
|
|
; GFX6-NEXT: v_or_b32_e32 v6, v0, v6
|
|
; GFX6-NEXT: v_or_b32_e32 v7, v1, v7
|
|
; GFX6-NEXT: v_lshl_b64 v[0:1], v[4:5], s6
|
|
; GFX6-NEXT: s_and_b32 s5, 1, s8
|
|
; GFX6-NEXT: v_cmp_ne_u32_e64 vcc, 0, s5
|
|
; GFX6-NEXT: s_and_b32 s5, 1, s9
|
|
; GFX6-NEXT: v_cndmask_b32_e32 v4, 0, v8, vcc
|
|
; GFX6-NEXT: v_cndmask_b32_e32 v5, 0, v9, vcc
|
|
; GFX6-NEXT: v_cndmask_b32_e32 v0, v0, v6, vcc
|
|
; GFX6-NEXT: v_cndmask_b32_e32 v1, v1, v7, vcc
|
|
; GFX6-NEXT: v_cmp_ne_u32_e64 vcc, 0, s5
|
|
; GFX6-NEXT: s_and_b32 s5, s4, 0x7f
|
|
; GFX6-NEXT: s_sub_i32 s10, s5, 64
|
|
; GFX6-NEXT: s_sub_i32 s8, 64, s5
|
|
; GFX6-NEXT: s_cmp_lt_u32 s5, 64
|
|
; GFX6-NEXT: s_cselect_b32 s11, 1, 0
|
|
; GFX6-NEXT: s_cmp_eq_u32 s5, 0
|
|
; GFX6-NEXT: s_cselect_b32 s12, 1, 0
|
|
; GFX6-NEXT: s_lshr_b64 s[6:7], s[2:3], s4
|
|
; GFX6-NEXT: s_lshr_b64 s[4:5], s[0:1], s4
|
|
; GFX6-NEXT: s_lshl_b64 s[8:9], s[2:3], s8
|
|
; GFX6-NEXT: s_or_b64 s[4:5], s[4:5], s[8:9]
|
|
; GFX6-NEXT: s_lshr_b64 s[2:3], s[2:3], s10
|
|
; GFX6-NEXT: s_cmp_lg_u32 s11, 0
|
|
; GFX6-NEXT: s_cselect_b64 s[2:3], s[4:5], s[2:3]
|
|
; GFX6-NEXT: s_cmp_lg_u32 s12, 0
|
|
; GFX6-NEXT: s_cselect_b64 s[0:1], s[0:1], s[2:3]
|
|
; GFX6-NEXT: s_cmp_lg_u32 s11, 0
|
|
; GFX6-NEXT: v_cndmask_b32_e32 v2, v0, v2, vcc
|
|
; GFX6-NEXT: v_cndmask_b32_e32 v3, v1, v3, vcc
|
|
; GFX6-NEXT: s_cselect_b64 s[2:3], s[6:7], 0
|
|
; GFX6-NEXT: v_or_b32_e32 v0, s0, v4
|
|
; GFX6-NEXT: v_or_b32_e32 v1, s1, v5
|
|
; GFX6-NEXT: v_or_b32_e32 v2, s2, v2
|
|
; GFX6-NEXT: v_or_b32_e32 v3, s3, v3
|
|
; GFX6-NEXT: ; return to shader part epilog
|
|
;
|
|
; GFX8-LABEL: v_fshr_i128_vss:
|
|
; GFX8: ; %bb.0:
|
|
; GFX8-NEXT: v_lshlrev_b64 v[2:3], 1, v[2:3]
|
|
; GFX8-NEXT: v_lshlrev_b64 v[4:5], 1, v[0:1]
|
|
; GFX8-NEXT: v_lshrrev_b32_e32 v0, 31, v1
|
|
; GFX8-NEXT: s_andn2_b32 s5, 0x7f, s4
|
|
; GFX8-NEXT: v_or_b32_e32 v2, v2, v0
|
|
; GFX8-NEXT: s_sub_i32 s6, s5, 64
|
|
; GFX8-NEXT: s_sub_i32 s7, 64, s5
|
|
; GFX8-NEXT: s_cmp_lt_u32 s5, 64
|
|
; GFX8-NEXT: v_lshrrev_b64 v[0:1], s7, v[4:5]
|
|
; GFX8-NEXT: v_lshlrev_b64 v[6:7], s5, v[2:3]
|
|
; GFX8-NEXT: s_cselect_b32 s8, 1, 0
|
|
; GFX8-NEXT: s_cmp_eq_u32 s5, 0
|
|
; GFX8-NEXT: s_cselect_b32 s9, 1, 0
|
|
; GFX8-NEXT: v_lshlrev_b64 v[8:9], s5, v[4:5]
|
|
; GFX8-NEXT: v_or_b32_e32 v6, v0, v6
|
|
; GFX8-NEXT: v_or_b32_e32 v7, v1, v7
|
|
; GFX8-NEXT: v_lshlrev_b64 v[0:1], s6, v[4:5]
|
|
; GFX8-NEXT: s_and_b32 s5, 1, s8
|
|
; GFX8-NEXT: v_cmp_ne_u32_e64 vcc, 0, s5
|
|
; GFX8-NEXT: s_and_b32 s5, 1, s9
|
|
; GFX8-NEXT: v_cndmask_b32_e32 v4, 0, v8, vcc
|
|
; GFX8-NEXT: v_cndmask_b32_e32 v5, 0, v9, vcc
|
|
; GFX8-NEXT: v_cndmask_b32_e32 v0, v0, v6, vcc
|
|
; GFX8-NEXT: v_cndmask_b32_e32 v1, v1, v7, vcc
|
|
; GFX8-NEXT: v_cmp_ne_u32_e64 vcc, 0, s5
|
|
; GFX8-NEXT: s_and_b32 s5, s4, 0x7f
|
|
; GFX8-NEXT: s_sub_i32 s10, s5, 64
|
|
; GFX8-NEXT: s_sub_i32 s8, 64, s5
|
|
; GFX8-NEXT: s_cmp_lt_u32 s5, 64
|
|
; GFX8-NEXT: s_cselect_b32 s11, 1, 0
|
|
; GFX8-NEXT: s_cmp_eq_u32 s5, 0
|
|
; GFX8-NEXT: s_cselect_b32 s12, 1, 0
|
|
; GFX8-NEXT: s_lshr_b64 s[6:7], s[2:3], s4
|
|
; GFX8-NEXT: s_lshr_b64 s[4:5], s[0:1], s4
|
|
; GFX8-NEXT: s_lshl_b64 s[8:9], s[2:3], s8
|
|
; GFX8-NEXT: s_or_b64 s[4:5], s[4:5], s[8:9]
|
|
; GFX8-NEXT: s_lshr_b64 s[2:3], s[2:3], s10
|
|
; GFX8-NEXT: s_cmp_lg_u32 s11, 0
|
|
; GFX8-NEXT: s_cselect_b64 s[2:3], s[4:5], s[2:3]
|
|
; GFX8-NEXT: s_cmp_lg_u32 s12, 0
|
|
; GFX8-NEXT: s_cselect_b64 s[0:1], s[0:1], s[2:3]
|
|
; GFX8-NEXT: s_cmp_lg_u32 s11, 0
|
|
; GFX8-NEXT: v_cndmask_b32_e32 v2, v0, v2, vcc
|
|
; GFX8-NEXT: v_cndmask_b32_e32 v3, v1, v3, vcc
|
|
; GFX8-NEXT: s_cselect_b64 s[2:3], s[6:7], 0
|
|
; GFX8-NEXT: v_or_b32_e32 v0, s0, v4
|
|
; GFX8-NEXT: v_or_b32_e32 v1, s1, v5
|
|
; GFX8-NEXT: v_or_b32_e32 v2, s2, v2
|
|
; GFX8-NEXT: v_or_b32_e32 v3, s3, v3
|
|
; GFX8-NEXT: ; return to shader part epilog
|
|
;
|
|
; GFX9-LABEL: v_fshr_i128_vss:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: v_lshlrev_b64 v[2:3], 1, v[2:3]
|
|
; GFX9-NEXT: v_lshlrev_b64 v[4:5], 1, v[0:1]
|
|
; GFX9-NEXT: v_lshrrev_b32_e32 v0, 31, v1
|
|
; GFX9-NEXT: s_andn2_b32 s5, 0x7f, s4
|
|
; GFX9-NEXT: v_or_b32_e32 v2, v2, v0
|
|
; GFX9-NEXT: s_sub_i32 s6, s5, 64
|
|
; GFX9-NEXT: s_sub_i32 s7, 64, s5
|
|
; GFX9-NEXT: s_cmp_lt_u32 s5, 64
|
|
; GFX9-NEXT: v_lshrrev_b64 v[0:1], s7, v[4:5]
|
|
; GFX9-NEXT: v_lshlrev_b64 v[6:7], s5, v[2:3]
|
|
; GFX9-NEXT: s_cselect_b32 s8, 1, 0
|
|
; GFX9-NEXT: s_cmp_eq_u32 s5, 0
|
|
; GFX9-NEXT: s_cselect_b32 s9, 1, 0
|
|
; GFX9-NEXT: v_lshlrev_b64 v[8:9], s5, v[4:5]
|
|
; GFX9-NEXT: v_or_b32_e32 v6, v0, v6
|
|
; GFX9-NEXT: v_or_b32_e32 v7, v1, v7
|
|
; GFX9-NEXT: v_lshlrev_b64 v[0:1], s6, v[4:5]
|
|
; GFX9-NEXT: s_and_b32 s5, 1, s8
|
|
; GFX9-NEXT: v_cmp_ne_u32_e64 vcc, 0, s5
|
|
; GFX9-NEXT: s_and_b32 s5, 1, s9
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v4, 0, v8, vcc
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v5, 0, v9, vcc
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v0, v0, v6, vcc
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v1, v1, v7, vcc
|
|
; GFX9-NEXT: v_cmp_ne_u32_e64 vcc, 0, s5
|
|
; GFX9-NEXT: s_and_b32 s5, s4, 0x7f
|
|
; GFX9-NEXT: s_sub_i32 s10, s5, 64
|
|
; GFX9-NEXT: s_sub_i32 s8, 64, s5
|
|
; GFX9-NEXT: s_cmp_lt_u32 s5, 64
|
|
; GFX9-NEXT: s_cselect_b32 s11, 1, 0
|
|
; GFX9-NEXT: s_cmp_eq_u32 s5, 0
|
|
; GFX9-NEXT: s_cselect_b32 s12, 1, 0
|
|
; GFX9-NEXT: s_lshr_b64 s[6:7], s[2:3], s4
|
|
; GFX9-NEXT: s_lshr_b64 s[4:5], s[0:1], s4
|
|
; GFX9-NEXT: s_lshl_b64 s[8:9], s[2:3], s8
|
|
; GFX9-NEXT: s_or_b64 s[4:5], s[4:5], s[8:9]
|
|
; GFX9-NEXT: s_lshr_b64 s[2:3], s[2:3], s10
|
|
; GFX9-NEXT: s_cmp_lg_u32 s11, 0
|
|
; GFX9-NEXT: s_cselect_b64 s[2:3], s[4:5], s[2:3]
|
|
; GFX9-NEXT: s_cmp_lg_u32 s12, 0
|
|
; GFX9-NEXT: s_cselect_b64 s[0:1], s[0:1], s[2:3]
|
|
; GFX9-NEXT: s_cmp_lg_u32 s11, 0
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v2, v0, v2, vcc
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v3, v1, v3, vcc
|
|
; GFX9-NEXT: s_cselect_b64 s[2:3], s[6:7], 0
|
|
; GFX9-NEXT: v_or_b32_e32 v0, s0, v4
|
|
; GFX9-NEXT: v_or_b32_e32 v1, s1, v5
|
|
; GFX9-NEXT: v_or_b32_e32 v2, s2, v2
|
|
; GFX9-NEXT: v_or_b32_e32 v3, s3, v3
|
|
; GFX9-NEXT: ; return to shader part epilog
|
|
;
|
|
; GFX10-LABEL: v_fshr_i128_vss:
|
|
; GFX10: ; %bb.0:
|
|
; GFX10-NEXT: v_lshlrev_b64 v[2:3], 1, v[2:3]
|
|
; GFX10-NEXT: v_lshrrev_b32_e32 v4, 31, v1
|
|
; GFX10-NEXT: v_lshlrev_b64 v[0:1], 1, v[0:1]
|
|
; GFX10-NEXT: s_andn2_b32 s5, 0x7f, s4
|
|
; GFX10-NEXT: s_sub_i32 s6, s5, 64
|
|
; GFX10-NEXT: v_or_b32_e32 v2, v2, v4
|
|
; GFX10-NEXT: s_sub_i32 s7, 64, s5
|
|
; GFX10-NEXT: s_cmp_lt_u32 s5, 64
|
|
; GFX10-NEXT: v_lshrrev_b64 v[4:5], s7, v[0:1]
|
|
; GFX10-NEXT: s_cselect_b32 s8, 1, 0
|
|
; GFX10-NEXT: s_cmp_eq_u32 s5, 0
|
|
; GFX10-NEXT: v_lshlrev_b64 v[6:7], s5, v[2:3]
|
|
; GFX10-NEXT: s_cselect_b32 s9, 1, 0
|
|
; GFX10-NEXT: v_lshlrev_b64 v[8:9], s5, v[0:1]
|
|
; GFX10-NEXT: s_and_b32 s5, 1, s8
|
|
; GFX10-NEXT: v_lshlrev_b64 v[0:1], s6, v[0:1]
|
|
; GFX10-NEXT: v_cmp_ne_u32_e64 vcc_lo, 0, s5
|
|
; GFX10-NEXT: s_and_b32 s5, s4, 0x7f
|
|
; GFX10-NEXT: v_or_b32_e32 v4, v4, v6
|
|
; GFX10-NEXT: v_or_b32_e32 v5, v5, v7
|
|
; GFX10-NEXT: s_and_b32 s6, 1, s9
|
|
; GFX10-NEXT: s_sub_i32 s10, s5, 64
|
|
; GFX10-NEXT: s_sub_i32 s8, 64, s5
|
|
; GFX10-NEXT: s_cmp_lt_u32 s5, 64
|
|
; GFX10-NEXT: v_cndmask_b32_e32 v6, 0, v8, vcc_lo
|
|
; GFX10-NEXT: s_cselect_b32 s11, 1, 0
|
|
; GFX10-NEXT: s_cmp_eq_u32 s5, 0
|
|
; GFX10-NEXT: v_cndmask_b32_e32 v7, 0, v9, vcc_lo
|
|
; GFX10-NEXT: v_cndmask_b32_e32 v0, v0, v4, vcc_lo
|
|
; GFX10-NEXT: v_cndmask_b32_e32 v1, v1, v5, vcc_lo
|
|
; GFX10-NEXT: v_cmp_ne_u32_e64 vcc_lo, 0, s6
|
|
; GFX10-NEXT: s_cselect_b32 s12, 1, 0
|
|
; GFX10-NEXT: s_lshr_b64 s[6:7], s[0:1], s4
|
|
; GFX10-NEXT: s_lshl_b64 s[8:9], s[2:3], s8
|
|
; GFX10-NEXT: s_lshr_b64 s[4:5], s[2:3], s4
|
|
; GFX10-NEXT: s_or_b64 s[6:7], s[6:7], s[8:9]
|
|
; GFX10-NEXT: s_lshr_b64 s[2:3], s[2:3], s10
|
|
; GFX10-NEXT: s_cmp_lg_u32 s11, 0
|
|
; GFX10-NEXT: v_cndmask_b32_e32 v2, v0, v2, vcc_lo
|
|
; GFX10-NEXT: s_cselect_b64 s[2:3], s[6:7], s[2:3]
|
|
; GFX10-NEXT: s_cmp_lg_u32 s12, 0
|
|
; GFX10-NEXT: v_cndmask_b32_e32 v3, v1, v3, vcc_lo
|
|
; GFX10-NEXT: s_cselect_b64 s[0:1], s[0:1], s[2:3]
|
|
; GFX10-NEXT: s_cmp_lg_u32 s11, 0
|
|
; GFX10-NEXT: v_or_b32_e32 v0, s0, v6
|
|
; GFX10-NEXT: s_cselect_b64 s[2:3], s[4:5], 0
|
|
; GFX10-NEXT: v_or_b32_e32 v1, s1, v7
|
|
; GFX10-NEXT: v_or_b32_e32 v2, s2, v2
|
|
; GFX10-NEXT: v_or_b32_e32 v3, s3, v3
|
|
; GFX10-NEXT: ; return to shader part epilog
|
|
;
|
|
; GFX11-LABEL: v_fshr_i128_vss:
|
|
; GFX11: ; %bb.0:
|
|
; GFX11-NEXT: v_lshlrev_b64 v[2:3], 1, v[2:3]
|
|
; GFX11-NEXT: v_lshrrev_b32_e32 v4, 31, v1
|
|
; GFX11-NEXT: v_lshlrev_b64 v[0:1], 1, v[0:1]
|
|
; GFX11-NEXT: s_and_not1_b32 s5, 0x7f, s4
|
|
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_2)
|
|
; GFX11-NEXT: s_sub_i32 s6, s5, 64
|
|
; GFX11-NEXT: v_or_b32_e32 v2, v2, v4
|
|
; GFX11-NEXT: s_sub_i32 s7, 64, s5
|
|
; GFX11-NEXT: s_cmp_lt_u32 s5, 64
|
|
; GFX11-NEXT: v_lshrrev_b64 v[4:5], s7, v[0:1]
|
|
; GFX11-NEXT: s_cselect_b32 s8, 1, 0
|
|
; GFX11-NEXT: s_cmp_eq_u32 s5, 0
|
|
; GFX11-NEXT: v_lshlrev_b64 v[6:7], s5, v[2:3]
|
|
; GFX11-NEXT: s_cselect_b32 s9, 1, 0
|
|
; GFX11-NEXT: v_lshlrev_b64 v[8:9], s5, v[0:1]
|
|
; GFX11-NEXT: s_and_b32 s5, 1, s8
|
|
; GFX11-NEXT: v_lshlrev_b64 v[0:1], s6, v[0:1]
|
|
; GFX11-NEXT: v_cmp_ne_u32_e64 vcc_lo, 0, s5
|
|
; GFX11-NEXT: s_and_b32 s5, s4, 0x7f
|
|
; GFX11-NEXT: v_or_b32_e32 v4, v4, v6
|
|
; GFX11-NEXT: v_or_b32_e32 v5, v5, v7
|
|
; GFX11-NEXT: s_and_b32 s6, 1, s9
|
|
; GFX11-NEXT: s_sub_i32 s10, s5, 64
|
|
; GFX11-NEXT: s_sub_i32 s8, 64, s5
|
|
; GFX11-NEXT: s_cmp_lt_u32 s5, 64
|
|
; GFX11-NEXT: v_dual_cndmask_b32 v6, 0, v8 :: v_dual_cndmask_b32 v7, 0, v9
|
|
; GFX11-NEXT: s_cselect_b32 s11, 1, 0
|
|
; GFX11-NEXT: s_cmp_eq_u32 s5, 0
|
|
; GFX11-NEXT: v_dual_cndmask_b32 v0, v0, v4 :: v_dual_cndmask_b32 v1, v1, v5
|
|
; GFX11-NEXT: v_cmp_ne_u32_e64 vcc_lo, 0, s6
|
|
; GFX11-NEXT: s_cselect_b32 s12, 1, 0
|
|
; GFX11-NEXT: s_lshr_b64 s[6:7], s[0:1], s4
|
|
; GFX11-NEXT: s_lshl_b64 s[8:9], s[2:3], s8
|
|
; GFX11-NEXT: s_lshr_b64 s[4:5], s[2:3], s4
|
|
; GFX11-NEXT: s_or_b64 s[6:7], s[6:7], s[8:9]
|
|
; GFX11-NEXT: s_lshr_b64 s[2:3], s[2:3], s10
|
|
; GFX11-NEXT: s_cmp_lg_u32 s11, 0
|
|
; GFX11-NEXT: v_dual_cndmask_b32 v2, v0, v2 :: v_dual_cndmask_b32 v3, v1, v3
|
|
; GFX11-NEXT: s_cselect_b64 s[2:3], s[6:7], s[2:3]
|
|
; GFX11-NEXT: s_cmp_lg_u32 s12, 0
|
|
; GFX11-NEXT: s_cselect_b64 s[0:1], s[0:1], s[2:3]
|
|
; GFX11-NEXT: s_cmp_lg_u32 s11, 0
|
|
; GFX11-NEXT: v_or_b32_e32 v0, s0, v6
|
|
; GFX11-NEXT: s_cselect_b64 s[2:3], s[4:5], 0
|
|
; GFX11-NEXT: v_or_b32_e32 v1, s1, v7
|
|
; GFX11-NEXT: v_or_b32_e32 v2, s2, v2
|
|
; GFX11-NEXT: v_or_b32_e32 v3, s3, v3
|
|
; GFX11-NEXT: ; return to shader part epilog
|
|
%result = call i128 @llvm.fshr.i128(i128 %lhs, i128 %rhs, i128 %amt)
|
|
%cast.result = bitcast i128 %result to <4 x float>
|
|
ret <4 x float> %cast.result
|
|
}
|
|
|
|
define amdgpu_ps i128 @s_fshr_i128_65(i128 inreg %lhs, i128 inreg %rhs) {
|
|
; GFX6-LABEL: s_fshr_i128_65:
|
|
; GFX6: ; %bb.0:
|
|
; GFX6-NEXT: s_mov_b32 s4, 0
|
|
; GFX6-NEXT: s_lshl_b32 s5, s0, 31
|
|
; GFX6-NEXT: s_lshl_b32 s3, s2, 31
|
|
; GFX6-NEXT: s_mov_b32 s2, s4
|
|
; GFX6-NEXT: s_lshr_b64 s[0:1], s[0:1], 1
|
|
; GFX6-NEXT: s_or_b64 s[2:3], s[2:3], s[0:1]
|
|
; GFX6-NEXT: s_lshr_b64 s[0:1], s[6:7], 1
|
|
; GFX6-NEXT: s_or_b64 s[0:1], s[4:5], s[0:1]
|
|
; GFX6-NEXT: ; return to shader part epilog
|
|
;
|
|
; GFX8-LABEL: s_fshr_i128_65:
|
|
; GFX8: ; %bb.0:
|
|
; GFX8-NEXT: s_mov_b32 s4, 0
|
|
; GFX8-NEXT: s_lshl_b32 s5, s0, 31
|
|
; GFX8-NEXT: s_lshl_b32 s3, s2, 31
|
|
; GFX8-NEXT: s_mov_b32 s2, s4
|
|
; GFX8-NEXT: s_lshr_b64 s[0:1], s[0:1], 1
|
|
; GFX8-NEXT: s_or_b64 s[2:3], s[2:3], s[0:1]
|
|
; GFX8-NEXT: s_lshr_b64 s[0:1], s[6:7], 1
|
|
; GFX8-NEXT: s_or_b64 s[0:1], s[4:5], s[0:1]
|
|
; GFX8-NEXT: ; return to shader part epilog
|
|
;
|
|
; GFX9-LABEL: s_fshr_i128_65:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_mov_b32 s4, 0
|
|
; GFX9-NEXT: s_lshl_b32 s5, s0, 31
|
|
; GFX9-NEXT: s_lshl_b32 s3, s2, 31
|
|
; GFX9-NEXT: s_mov_b32 s2, s4
|
|
; GFX9-NEXT: s_lshr_b64 s[0:1], s[0:1], 1
|
|
; GFX9-NEXT: s_or_b64 s[2:3], s[2:3], s[0:1]
|
|
; GFX9-NEXT: s_lshr_b64 s[0:1], s[6:7], 1
|
|
; GFX9-NEXT: s_or_b64 s[0:1], s[4:5], s[0:1]
|
|
; GFX9-NEXT: ; return to shader part epilog
|
|
;
|
|
; GFX10-LABEL: s_fshr_i128_65:
|
|
; GFX10: ; %bb.0:
|
|
; GFX10-NEXT: s_mov_b32 s4, 0
|
|
; GFX10-NEXT: s_lshl_b32 s5, s0, 31
|
|
; GFX10-NEXT: s_lshl_b32 s3, s2, 31
|
|
; GFX10-NEXT: s_mov_b32 s2, s4
|
|
; GFX10-NEXT: s_lshr_b64 s[6:7], s[6:7], 1
|
|
; GFX10-NEXT: s_lshr_b64 s[8:9], s[0:1], 1
|
|
; GFX10-NEXT: s_or_b64 s[0:1], s[4:5], s[6:7]
|
|
; GFX10-NEXT: s_or_b64 s[2:3], s[2:3], s[8:9]
|
|
; GFX10-NEXT: ; return to shader part epilog
|
|
;
|
|
; GFX11-LABEL: s_fshr_i128_65:
|
|
; GFX11: ; %bb.0:
|
|
; GFX11-NEXT: s_mov_b32 s4, 0
|
|
; GFX11-NEXT: s_lshl_b32 s5, s0, 31
|
|
; GFX11-NEXT: s_lshl_b32 s3, s2, 31
|
|
; GFX11-NEXT: s_mov_b32 s2, s4
|
|
; GFX11-NEXT: s_lshr_b64 s[6:7], s[6:7], 1
|
|
; GFX11-NEXT: s_lshr_b64 s[8:9], s[0:1], 1
|
|
; GFX11-NEXT: s_or_b64 s[0:1], s[4:5], s[6:7]
|
|
; GFX11-NEXT: s_or_b64 s[2:3], s[2:3], s[8:9]
|
|
; GFX11-NEXT: ; return to shader part epilog
|
|
%result = call i128 @llvm.fshr.i128(i128 %lhs, i128 %rhs, i128 65)
|
|
ret i128 %result
|
|
}
|
|
|
|
define i128 @v_fshr_i128_65(i128 %lhs, i128 %rhs) {
|
|
; GFX6-LABEL: v_fshr_i128_65:
|
|
; GFX6: ; %bb.0:
|
|
; GFX6-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX6-NEXT: v_lshlrev_b32_e32 v4, 31, v0
|
|
; GFX6-NEXT: v_lshlrev_b32_e32 v5, 31, v2
|
|
; GFX6-NEXT: v_lshr_b64 v[2:3], v[0:1], 1
|
|
; GFX6-NEXT: v_lshr_b64 v[0:1], v[6:7], 1
|
|
; GFX6-NEXT: v_or_b32_e32 v3, v5, v3
|
|
; GFX6-NEXT: v_or_b32_e32 v1, v4, v1
|
|
; GFX6-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX8-LABEL: v_fshr_i128_65:
|
|
; GFX8: ; %bb.0:
|
|
; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX8-NEXT: v_lshlrev_b32_e32 v4, 31, v0
|
|
; GFX8-NEXT: v_lshlrev_b32_e32 v5, 31, v2
|
|
; GFX8-NEXT: v_lshrrev_b64 v[2:3], 1, v[0:1]
|
|
; GFX8-NEXT: v_lshrrev_b64 v[0:1], 1, v[6:7]
|
|
; GFX8-NEXT: v_or_b32_e32 v3, v5, v3
|
|
; GFX8-NEXT: v_or_b32_e32 v1, v4, v1
|
|
; GFX8-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX9-LABEL: v_fshr_i128_65:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: v_mov_b32_e32 v8, v2
|
|
; GFX9-NEXT: v_lshrrev_b64 v[2:3], 1, v[0:1]
|
|
; GFX9-NEXT: v_lshrrev_b64 v[4:5], 1, v[6:7]
|
|
; GFX9-NEXT: v_lshl_or_b32 v3, v8, 31, v3
|
|
; GFX9-NEXT: v_lshl_or_b32 v1, v0, 31, v5
|
|
; GFX9-NEXT: v_mov_b32_e32 v0, v4
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX10-LABEL: v_fshr_i128_65:
|
|
; GFX10: ; %bb.0:
|
|
; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX10-NEXT: v_mov_b32_e32 v8, v2
|
|
; GFX10-NEXT: v_lshrrev_b64 v[4:5], 1, v[6:7]
|
|
; GFX10-NEXT: v_lshrrev_b64 v[2:3], 1, v[0:1]
|
|
; GFX10-NEXT: v_lshl_or_b32 v1, v0, 31, v5
|
|
; GFX10-NEXT: v_lshl_or_b32 v3, v8, 31, v3
|
|
; GFX10-NEXT: v_mov_b32_e32 v0, v4
|
|
; GFX10-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX11-LABEL: v_fshr_i128_65:
|
|
; GFX11: ; %bb.0:
|
|
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX11-NEXT: v_mov_b32_e32 v8, v2
|
|
; GFX11-NEXT: v_lshrrev_b64 v[4:5], 1, v[6:7]
|
|
; GFX11-NEXT: v_lshrrev_b64 v[2:3], 1, v[0:1]
|
|
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
|
|
; GFX11-NEXT: v_lshl_or_b32 v1, v0, 31, v5
|
|
; GFX11-NEXT: v_lshl_or_b32 v3, v8, 31, v3
|
|
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4)
|
|
; GFX11-NEXT: v_mov_b32_e32 v0, v4
|
|
; GFX11-NEXT: s_setpc_b64 s[30:31]
|
|
%result = call i128 @llvm.fshr.i128(i128 %lhs, i128 %rhs, i128 65)
|
|
ret i128 %result
|
|
}
|
|
|
|
define amdgpu_ps <2 x i128> @s_fshr_v2i128(<2 x i128> inreg %lhs, <2 x i128> inreg %rhs, <2 x i128> inreg %amt) {
|
|
; GFX6-LABEL: s_fshr_v2i128:
|
|
; GFX6: ; %bb.0:
|
|
; GFX6-NEXT: s_lshl_b64 s[2:3], s[2:3], 1
|
|
; GFX6-NEXT: s_lshr_b32 s22, s1, 31
|
|
; GFX6-NEXT: s_mov_b32 s23, 0
|
|
; GFX6-NEXT: s_lshl_b64 s[18:19], s[0:1], 1
|
|
; GFX6-NEXT: s_or_b64 s[0:1], s[2:3], s[22:23]
|
|
; GFX6-NEXT: s_andn2_b32 s2, 0x7f, s16
|
|
; GFX6-NEXT: s_not_b32 s17, s16
|
|
; GFX6-NEXT: s_sub_i32 s21, s2, 64
|
|
; GFX6-NEXT: s_sub_i32 s22, 64, s2
|
|
; GFX6-NEXT: s_cmp_lt_u32 s2, 64
|
|
; GFX6-NEXT: s_cselect_b32 s28, 1, 0
|
|
; GFX6-NEXT: s_cmp_eq_u32 s2, 0
|
|
; GFX6-NEXT: s_cselect_b32 s29, 1, 0
|
|
; GFX6-NEXT: s_lshr_b64 s[24:25], s[18:19], s22
|
|
; GFX6-NEXT: s_lshl_b64 s[26:27], s[0:1], s17
|
|
; GFX6-NEXT: s_lshl_b64 s[2:3], s[18:19], s17
|
|
; GFX6-NEXT: s_or_b64 s[24:25], s[24:25], s[26:27]
|
|
; GFX6-NEXT: s_lshl_b64 s[18:19], s[18:19], s21
|
|
; GFX6-NEXT: s_cmp_lg_u32 s28, 0
|
|
; GFX6-NEXT: s_cselect_b64 s[2:3], s[2:3], 0
|
|
; GFX6-NEXT: s_cselect_b64 s[18:19], s[24:25], s[18:19]
|
|
; GFX6-NEXT: s_cmp_lg_u32 s29, 0
|
|
; GFX6-NEXT: s_cselect_b64 s[18:19], s[0:1], s[18:19]
|
|
; GFX6-NEXT: s_and_b32 s0, s16, 0x7f
|
|
; GFX6-NEXT: s_sub_i32 s21, s0, 64
|
|
; GFX6-NEXT: s_sub_i32 s22, 64, s0
|
|
; GFX6-NEXT: s_cmp_lt_u32 s0, 64
|
|
; GFX6-NEXT: s_cselect_b32 s26, 1, 0
|
|
; GFX6-NEXT: s_cmp_eq_u32 s0, 0
|
|
; GFX6-NEXT: s_cselect_b32 s27, 1, 0
|
|
; GFX6-NEXT: s_lshr_b64 s[0:1], s[10:11], s16
|
|
; GFX6-NEXT: s_lshr_b64 s[16:17], s[8:9], s16
|
|
; GFX6-NEXT: s_lshl_b64 s[24:25], s[10:11], s22
|
|
; GFX6-NEXT: s_or_b64 s[16:17], s[16:17], s[24:25]
|
|
; GFX6-NEXT: s_lshr_b64 s[10:11], s[10:11], s21
|
|
; GFX6-NEXT: s_cmp_lg_u32 s26, 0
|
|
; GFX6-NEXT: s_cselect_b64 s[10:11], s[16:17], s[10:11]
|
|
; GFX6-NEXT: s_cmp_lg_u32 s27, 0
|
|
; GFX6-NEXT: s_cselect_b64 s[8:9], s[8:9], s[10:11]
|
|
; GFX6-NEXT: s_cmp_lg_u32 s26, 0
|
|
; GFX6-NEXT: s_cselect_b64 s[10:11], s[0:1], 0
|
|
; GFX6-NEXT: s_lshl_b64 s[6:7], s[6:7], 1
|
|
; GFX6-NEXT: s_lshr_b32 s22, s5, 31
|
|
; GFX6-NEXT: s_or_b64 s[0:1], s[2:3], s[8:9]
|
|
; GFX6-NEXT: s_lshl_b64 s[8:9], s[4:5], 1
|
|
; GFX6-NEXT: s_or_b64 s[4:5], s[6:7], s[22:23]
|
|
; GFX6-NEXT: s_andn2_b32 s6, 0x7f, s20
|
|
; GFX6-NEXT: s_or_b64 s[2:3], s[18:19], s[10:11]
|
|
; GFX6-NEXT: s_not_b32 s16, s20
|
|
; GFX6-NEXT: s_sub_i32 s18, s6, 64
|
|
; GFX6-NEXT: s_sub_i32 s10, 64, s6
|
|
; GFX6-NEXT: s_cmp_lt_u32 s6, 64
|
|
; GFX6-NEXT: s_cselect_b32 s19, 1, 0
|
|
; GFX6-NEXT: s_cmp_eq_u32 s6, 0
|
|
; GFX6-NEXT: s_cselect_b32 s21, 1, 0
|
|
; GFX6-NEXT: s_lshl_b64 s[6:7], s[8:9], s16
|
|
; GFX6-NEXT: s_lshr_b64 s[10:11], s[8:9], s10
|
|
; GFX6-NEXT: s_lshl_b64 s[16:17], s[4:5], s16
|
|
; GFX6-NEXT: s_or_b64 s[10:11], s[10:11], s[16:17]
|
|
; GFX6-NEXT: s_lshl_b64 s[8:9], s[8:9], s18
|
|
; GFX6-NEXT: s_cmp_lg_u32 s19, 0
|
|
; GFX6-NEXT: s_cselect_b64 s[6:7], s[6:7], 0
|
|
; GFX6-NEXT: s_cselect_b64 s[8:9], s[10:11], s[8:9]
|
|
; GFX6-NEXT: s_cmp_lg_u32 s21, 0
|
|
; GFX6-NEXT: s_cselect_b64 s[8:9], s[4:5], s[8:9]
|
|
; GFX6-NEXT: s_and_b32 s4, s20, 0x7f
|
|
; GFX6-NEXT: s_sub_i32 s18, s4, 64
|
|
; GFX6-NEXT: s_sub_i32 s16, 64, s4
|
|
; GFX6-NEXT: s_cmp_lt_u32 s4, 64
|
|
; GFX6-NEXT: s_cselect_b32 s19, 1, 0
|
|
; GFX6-NEXT: s_cmp_eq_u32 s4, 0
|
|
; GFX6-NEXT: s_cselect_b32 s21, 1, 0
|
|
; GFX6-NEXT: s_lshr_b64 s[10:11], s[12:13], s20
|
|
; GFX6-NEXT: s_lshl_b64 s[16:17], s[14:15], s16
|
|
; GFX6-NEXT: s_lshr_b64 s[4:5], s[14:15], s20
|
|
; GFX6-NEXT: s_or_b64 s[10:11], s[10:11], s[16:17]
|
|
; GFX6-NEXT: s_lshr_b64 s[14:15], s[14:15], s18
|
|
; GFX6-NEXT: s_cmp_lg_u32 s19, 0
|
|
; GFX6-NEXT: s_cselect_b64 s[10:11], s[10:11], s[14:15]
|
|
; GFX6-NEXT: s_cmp_lg_u32 s21, 0
|
|
; GFX6-NEXT: s_cselect_b64 s[10:11], s[12:13], s[10:11]
|
|
; GFX6-NEXT: s_cmp_lg_u32 s19, 0
|
|
; GFX6-NEXT: s_cselect_b64 s[12:13], s[4:5], 0
|
|
; GFX6-NEXT: s_or_b64 s[4:5], s[6:7], s[10:11]
|
|
; GFX6-NEXT: s_or_b64 s[6:7], s[8:9], s[12:13]
|
|
; GFX6-NEXT: ; return to shader part epilog
|
|
;
|
|
; GFX8-LABEL: s_fshr_v2i128:
|
|
; GFX8: ; %bb.0:
|
|
; GFX8-NEXT: s_lshl_b64 s[2:3], s[2:3], 1
|
|
; GFX8-NEXT: s_lshr_b32 s22, s1, 31
|
|
; GFX8-NEXT: s_mov_b32 s23, 0
|
|
; GFX8-NEXT: s_lshl_b64 s[18:19], s[0:1], 1
|
|
; GFX8-NEXT: s_or_b64 s[0:1], s[2:3], s[22:23]
|
|
; GFX8-NEXT: s_andn2_b32 s2, 0x7f, s16
|
|
; GFX8-NEXT: s_not_b32 s17, s16
|
|
; GFX8-NEXT: s_sub_i32 s21, s2, 64
|
|
; GFX8-NEXT: s_sub_i32 s22, 64, s2
|
|
; GFX8-NEXT: s_cmp_lt_u32 s2, 64
|
|
; GFX8-NEXT: s_cselect_b32 s28, 1, 0
|
|
; GFX8-NEXT: s_cmp_eq_u32 s2, 0
|
|
; GFX8-NEXT: s_cselect_b32 s29, 1, 0
|
|
; GFX8-NEXT: s_lshr_b64 s[24:25], s[18:19], s22
|
|
; GFX8-NEXT: s_lshl_b64 s[26:27], s[0:1], s17
|
|
; GFX8-NEXT: s_lshl_b64 s[2:3], s[18:19], s17
|
|
; GFX8-NEXT: s_or_b64 s[24:25], s[24:25], s[26:27]
|
|
; GFX8-NEXT: s_lshl_b64 s[18:19], s[18:19], s21
|
|
; GFX8-NEXT: s_cmp_lg_u32 s28, 0
|
|
; GFX8-NEXT: s_cselect_b64 s[2:3], s[2:3], 0
|
|
; GFX8-NEXT: s_cselect_b64 s[18:19], s[24:25], s[18:19]
|
|
; GFX8-NEXT: s_cmp_lg_u32 s29, 0
|
|
; GFX8-NEXT: s_cselect_b64 s[18:19], s[0:1], s[18:19]
|
|
; GFX8-NEXT: s_and_b32 s0, s16, 0x7f
|
|
; GFX8-NEXT: s_sub_i32 s21, s0, 64
|
|
; GFX8-NEXT: s_sub_i32 s22, 64, s0
|
|
; GFX8-NEXT: s_cmp_lt_u32 s0, 64
|
|
; GFX8-NEXT: s_cselect_b32 s26, 1, 0
|
|
; GFX8-NEXT: s_cmp_eq_u32 s0, 0
|
|
; GFX8-NEXT: s_cselect_b32 s27, 1, 0
|
|
; GFX8-NEXT: s_lshr_b64 s[0:1], s[10:11], s16
|
|
; GFX8-NEXT: s_lshr_b64 s[16:17], s[8:9], s16
|
|
; GFX8-NEXT: s_lshl_b64 s[24:25], s[10:11], s22
|
|
; GFX8-NEXT: s_or_b64 s[16:17], s[16:17], s[24:25]
|
|
; GFX8-NEXT: s_lshr_b64 s[10:11], s[10:11], s21
|
|
; GFX8-NEXT: s_cmp_lg_u32 s26, 0
|
|
; GFX8-NEXT: s_cselect_b64 s[10:11], s[16:17], s[10:11]
|
|
; GFX8-NEXT: s_cmp_lg_u32 s27, 0
|
|
; GFX8-NEXT: s_cselect_b64 s[8:9], s[8:9], s[10:11]
|
|
; GFX8-NEXT: s_cmp_lg_u32 s26, 0
|
|
; GFX8-NEXT: s_cselect_b64 s[10:11], s[0:1], 0
|
|
; GFX8-NEXT: s_lshl_b64 s[6:7], s[6:7], 1
|
|
; GFX8-NEXT: s_lshr_b32 s22, s5, 31
|
|
; GFX8-NEXT: s_or_b64 s[0:1], s[2:3], s[8:9]
|
|
; GFX8-NEXT: s_lshl_b64 s[8:9], s[4:5], 1
|
|
; GFX8-NEXT: s_or_b64 s[4:5], s[6:7], s[22:23]
|
|
; GFX8-NEXT: s_andn2_b32 s6, 0x7f, s20
|
|
; GFX8-NEXT: s_or_b64 s[2:3], s[18:19], s[10:11]
|
|
; GFX8-NEXT: s_not_b32 s16, s20
|
|
; GFX8-NEXT: s_sub_i32 s18, s6, 64
|
|
; GFX8-NEXT: s_sub_i32 s10, 64, s6
|
|
; GFX8-NEXT: s_cmp_lt_u32 s6, 64
|
|
; GFX8-NEXT: s_cselect_b32 s19, 1, 0
|
|
; GFX8-NEXT: s_cmp_eq_u32 s6, 0
|
|
; GFX8-NEXT: s_cselect_b32 s21, 1, 0
|
|
; GFX8-NEXT: s_lshl_b64 s[6:7], s[8:9], s16
|
|
; GFX8-NEXT: s_lshr_b64 s[10:11], s[8:9], s10
|
|
; GFX8-NEXT: s_lshl_b64 s[16:17], s[4:5], s16
|
|
; GFX8-NEXT: s_or_b64 s[10:11], s[10:11], s[16:17]
|
|
; GFX8-NEXT: s_lshl_b64 s[8:9], s[8:9], s18
|
|
; GFX8-NEXT: s_cmp_lg_u32 s19, 0
|
|
; GFX8-NEXT: s_cselect_b64 s[6:7], s[6:7], 0
|
|
; GFX8-NEXT: s_cselect_b64 s[8:9], s[10:11], s[8:9]
|
|
; GFX8-NEXT: s_cmp_lg_u32 s21, 0
|
|
; GFX8-NEXT: s_cselect_b64 s[8:9], s[4:5], s[8:9]
|
|
; GFX8-NEXT: s_and_b32 s4, s20, 0x7f
|
|
; GFX8-NEXT: s_sub_i32 s18, s4, 64
|
|
; GFX8-NEXT: s_sub_i32 s16, 64, s4
|
|
; GFX8-NEXT: s_cmp_lt_u32 s4, 64
|
|
; GFX8-NEXT: s_cselect_b32 s19, 1, 0
|
|
; GFX8-NEXT: s_cmp_eq_u32 s4, 0
|
|
; GFX8-NEXT: s_cselect_b32 s21, 1, 0
|
|
; GFX8-NEXT: s_lshr_b64 s[10:11], s[12:13], s20
|
|
; GFX8-NEXT: s_lshl_b64 s[16:17], s[14:15], s16
|
|
; GFX8-NEXT: s_lshr_b64 s[4:5], s[14:15], s20
|
|
; GFX8-NEXT: s_or_b64 s[10:11], s[10:11], s[16:17]
|
|
; GFX8-NEXT: s_lshr_b64 s[14:15], s[14:15], s18
|
|
; GFX8-NEXT: s_cmp_lg_u32 s19, 0
|
|
; GFX8-NEXT: s_cselect_b64 s[10:11], s[10:11], s[14:15]
|
|
; GFX8-NEXT: s_cmp_lg_u32 s21, 0
|
|
; GFX8-NEXT: s_cselect_b64 s[10:11], s[12:13], s[10:11]
|
|
; GFX8-NEXT: s_cmp_lg_u32 s19, 0
|
|
; GFX8-NEXT: s_cselect_b64 s[12:13], s[4:5], 0
|
|
; GFX8-NEXT: s_or_b64 s[4:5], s[6:7], s[10:11]
|
|
; GFX8-NEXT: s_or_b64 s[6:7], s[8:9], s[12:13]
|
|
; GFX8-NEXT: ; return to shader part epilog
|
|
;
|
|
; GFX9-LABEL: s_fshr_v2i128:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_lshl_b64 s[2:3], s[2:3], 1
|
|
; GFX9-NEXT: s_lshr_b32 s22, s1, 31
|
|
; GFX9-NEXT: s_mov_b32 s23, 0
|
|
; GFX9-NEXT: s_lshl_b64 s[18:19], s[0:1], 1
|
|
; GFX9-NEXT: s_or_b64 s[0:1], s[2:3], s[22:23]
|
|
; GFX9-NEXT: s_andn2_b32 s2, 0x7f, s16
|
|
; GFX9-NEXT: s_not_b32 s17, s16
|
|
; GFX9-NEXT: s_sub_i32 s21, s2, 64
|
|
; GFX9-NEXT: s_sub_i32 s22, 64, s2
|
|
; GFX9-NEXT: s_cmp_lt_u32 s2, 64
|
|
; GFX9-NEXT: s_cselect_b32 s28, 1, 0
|
|
; GFX9-NEXT: s_cmp_eq_u32 s2, 0
|
|
; GFX9-NEXT: s_cselect_b32 s29, 1, 0
|
|
; GFX9-NEXT: s_lshr_b64 s[24:25], s[18:19], s22
|
|
; GFX9-NEXT: s_lshl_b64 s[26:27], s[0:1], s17
|
|
; GFX9-NEXT: s_lshl_b64 s[2:3], s[18:19], s17
|
|
; GFX9-NEXT: s_or_b64 s[24:25], s[24:25], s[26:27]
|
|
; GFX9-NEXT: s_lshl_b64 s[18:19], s[18:19], s21
|
|
; GFX9-NEXT: s_cmp_lg_u32 s28, 0
|
|
; GFX9-NEXT: s_cselect_b64 s[2:3], s[2:3], 0
|
|
; GFX9-NEXT: s_cselect_b64 s[18:19], s[24:25], s[18:19]
|
|
; GFX9-NEXT: s_cmp_lg_u32 s29, 0
|
|
; GFX9-NEXT: s_cselect_b64 s[18:19], s[0:1], s[18:19]
|
|
; GFX9-NEXT: s_and_b32 s0, s16, 0x7f
|
|
; GFX9-NEXT: s_sub_i32 s21, s0, 64
|
|
; GFX9-NEXT: s_sub_i32 s22, 64, s0
|
|
; GFX9-NEXT: s_cmp_lt_u32 s0, 64
|
|
; GFX9-NEXT: s_cselect_b32 s26, 1, 0
|
|
; GFX9-NEXT: s_cmp_eq_u32 s0, 0
|
|
; GFX9-NEXT: s_cselect_b32 s27, 1, 0
|
|
; GFX9-NEXT: s_lshr_b64 s[0:1], s[10:11], s16
|
|
; GFX9-NEXT: s_lshr_b64 s[16:17], s[8:9], s16
|
|
; GFX9-NEXT: s_lshl_b64 s[24:25], s[10:11], s22
|
|
; GFX9-NEXT: s_or_b64 s[16:17], s[16:17], s[24:25]
|
|
; GFX9-NEXT: s_lshr_b64 s[10:11], s[10:11], s21
|
|
; GFX9-NEXT: s_cmp_lg_u32 s26, 0
|
|
; GFX9-NEXT: s_cselect_b64 s[10:11], s[16:17], s[10:11]
|
|
; GFX9-NEXT: s_cmp_lg_u32 s27, 0
|
|
; GFX9-NEXT: s_cselect_b64 s[8:9], s[8:9], s[10:11]
|
|
; GFX9-NEXT: s_cmp_lg_u32 s26, 0
|
|
; GFX9-NEXT: s_cselect_b64 s[10:11], s[0:1], 0
|
|
; GFX9-NEXT: s_lshl_b64 s[6:7], s[6:7], 1
|
|
; GFX9-NEXT: s_lshr_b32 s22, s5, 31
|
|
; GFX9-NEXT: s_or_b64 s[0:1], s[2:3], s[8:9]
|
|
; GFX9-NEXT: s_lshl_b64 s[8:9], s[4:5], 1
|
|
; GFX9-NEXT: s_or_b64 s[4:5], s[6:7], s[22:23]
|
|
; GFX9-NEXT: s_andn2_b32 s6, 0x7f, s20
|
|
; GFX9-NEXT: s_or_b64 s[2:3], s[18:19], s[10:11]
|
|
; GFX9-NEXT: s_not_b32 s16, s20
|
|
; GFX9-NEXT: s_sub_i32 s18, s6, 64
|
|
; GFX9-NEXT: s_sub_i32 s10, 64, s6
|
|
; GFX9-NEXT: s_cmp_lt_u32 s6, 64
|
|
; GFX9-NEXT: s_cselect_b32 s19, 1, 0
|
|
; GFX9-NEXT: s_cmp_eq_u32 s6, 0
|
|
; GFX9-NEXT: s_cselect_b32 s21, 1, 0
|
|
; GFX9-NEXT: s_lshl_b64 s[6:7], s[8:9], s16
|
|
; GFX9-NEXT: s_lshr_b64 s[10:11], s[8:9], s10
|
|
; GFX9-NEXT: s_lshl_b64 s[16:17], s[4:5], s16
|
|
; GFX9-NEXT: s_or_b64 s[10:11], s[10:11], s[16:17]
|
|
; GFX9-NEXT: s_lshl_b64 s[8:9], s[8:9], s18
|
|
; GFX9-NEXT: s_cmp_lg_u32 s19, 0
|
|
; GFX9-NEXT: s_cselect_b64 s[6:7], s[6:7], 0
|
|
; GFX9-NEXT: s_cselect_b64 s[8:9], s[10:11], s[8:9]
|
|
; GFX9-NEXT: s_cmp_lg_u32 s21, 0
|
|
; GFX9-NEXT: s_cselect_b64 s[8:9], s[4:5], s[8:9]
|
|
; GFX9-NEXT: s_and_b32 s4, s20, 0x7f
|
|
; GFX9-NEXT: s_sub_i32 s18, s4, 64
|
|
; GFX9-NEXT: s_sub_i32 s16, 64, s4
|
|
; GFX9-NEXT: s_cmp_lt_u32 s4, 64
|
|
; GFX9-NEXT: s_cselect_b32 s19, 1, 0
|
|
; GFX9-NEXT: s_cmp_eq_u32 s4, 0
|
|
; GFX9-NEXT: s_cselect_b32 s21, 1, 0
|
|
; GFX9-NEXT: s_lshr_b64 s[10:11], s[12:13], s20
|
|
; GFX9-NEXT: s_lshl_b64 s[16:17], s[14:15], s16
|
|
; GFX9-NEXT: s_lshr_b64 s[4:5], s[14:15], s20
|
|
; GFX9-NEXT: s_or_b64 s[10:11], s[10:11], s[16:17]
|
|
; GFX9-NEXT: s_lshr_b64 s[14:15], s[14:15], s18
|
|
; GFX9-NEXT: s_cmp_lg_u32 s19, 0
|
|
; GFX9-NEXT: s_cselect_b64 s[10:11], s[10:11], s[14:15]
|
|
; GFX9-NEXT: s_cmp_lg_u32 s21, 0
|
|
; GFX9-NEXT: s_cselect_b64 s[10:11], s[12:13], s[10:11]
|
|
; GFX9-NEXT: s_cmp_lg_u32 s19, 0
|
|
; GFX9-NEXT: s_cselect_b64 s[12:13], s[4:5], 0
|
|
; GFX9-NEXT: s_or_b64 s[4:5], s[6:7], s[10:11]
|
|
; GFX9-NEXT: s_or_b64 s[6:7], s[8:9], s[12:13]
|
|
; GFX9-NEXT: ; return to shader part epilog
|
|
;
|
|
; GFX10-LABEL: s_fshr_v2i128:
|
|
; GFX10: ; %bb.0:
|
|
; GFX10-NEXT: s_lshl_b64 s[2:3], s[2:3], 1
|
|
; GFX10-NEXT: s_lshr_b32 s18, s1, 31
|
|
; GFX10-NEXT: s_mov_b32 s19, 0
|
|
; GFX10-NEXT: s_andn2_b32 s17, 0x7f, s16
|
|
; GFX10-NEXT: s_lshl_b64 s[0:1], s[0:1], 1
|
|
; GFX10-NEXT: s_or_b64 s[2:3], s[2:3], s[18:19]
|
|
; GFX10-NEXT: s_not_b32 s18, s16
|
|
; GFX10-NEXT: s_sub_i32 s21, s17, 64
|
|
; GFX10-NEXT: s_sub_i32 s22, 64, s17
|
|
; GFX10-NEXT: s_cmp_lt_u32 s17, 64
|
|
; GFX10-NEXT: s_cselect_b32 s28, 1, 0
|
|
; GFX10-NEXT: s_cmp_eq_u32 s17, 0
|
|
; GFX10-NEXT: s_cselect_b32 s17, 1, 0
|
|
; GFX10-NEXT: s_lshr_b64 s[22:23], s[0:1], s22
|
|
; GFX10-NEXT: s_lshl_b64 s[24:25], s[2:3], s18
|
|
; GFX10-NEXT: s_lshl_b64 s[26:27], s[0:1], s18
|
|
; GFX10-NEXT: s_or_b64 s[22:23], s[22:23], s[24:25]
|
|
; GFX10-NEXT: s_lshl_b64 s[0:1], s[0:1], s21
|
|
; GFX10-NEXT: s_cmp_lg_u32 s28, 0
|
|
; GFX10-NEXT: s_cselect_b64 s[24:25], s[26:27], 0
|
|
; GFX10-NEXT: s_cselect_b64 s[0:1], s[22:23], s[0:1]
|
|
; GFX10-NEXT: s_cmp_lg_u32 s17, 0
|
|
; GFX10-NEXT: s_cselect_b64 s[2:3], s[2:3], s[0:1]
|
|
; GFX10-NEXT: s_and_b32 s0, s16, 0x7f
|
|
; GFX10-NEXT: s_sub_i32 s18, s0, 64
|
|
; GFX10-NEXT: s_sub_i32 s17, 64, s0
|
|
; GFX10-NEXT: s_cmp_lt_u32 s0, 64
|
|
; GFX10-NEXT: s_cselect_b32 s21, 1, 0
|
|
; GFX10-NEXT: s_cmp_eq_u32 s0, 0
|
|
; GFX10-NEXT: s_cselect_b32 s26, 1, 0
|
|
; GFX10-NEXT: s_lshr_b64 s[0:1], s[8:9], s16
|
|
; GFX10-NEXT: s_lshl_b64 s[22:23], s[10:11], s17
|
|
; GFX10-NEXT: s_lshr_b64 s[16:17], s[10:11], s16
|
|
; GFX10-NEXT: s_or_b64 s[0:1], s[0:1], s[22:23]
|
|
; GFX10-NEXT: s_lshr_b64 s[10:11], s[10:11], s18
|
|
; GFX10-NEXT: s_cmp_lg_u32 s21, 0
|
|
; GFX10-NEXT: s_cselect_b64 s[0:1], s[0:1], s[10:11]
|
|
; GFX10-NEXT: s_cmp_lg_u32 s26, 0
|
|
; GFX10-NEXT: s_cselect_b64 s[0:1], s[8:9], s[0:1]
|
|
; GFX10-NEXT: s_cmp_lg_u32 s21, 0
|
|
; GFX10-NEXT: s_cselect_b64 s[8:9], s[16:17], 0
|
|
; GFX10-NEXT: s_lshl_b64 s[6:7], s[6:7], 1
|
|
; GFX10-NEXT: s_or_b64 s[2:3], s[2:3], s[8:9]
|
|
; GFX10-NEXT: s_lshr_b32 s18, s5, 31
|
|
; GFX10-NEXT: s_andn2_b32 s8, 0x7f, s20
|
|
; GFX10-NEXT: s_or_b64 s[0:1], s[24:25], s[0:1]
|
|
; GFX10-NEXT: s_lshl_b64 s[4:5], s[4:5], 1
|
|
; GFX10-NEXT: s_or_b64 s[6:7], s[6:7], s[18:19]
|
|
; GFX10-NEXT: s_not_b32 s16, s20
|
|
; GFX10-NEXT: s_sub_i32 s18, s8, 64
|
|
; GFX10-NEXT: s_sub_i32 s9, 64, s8
|
|
; GFX10-NEXT: s_cmp_lt_u32 s8, 64
|
|
; GFX10-NEXT: s_cselect_b32 s19, 1, 0
|
|
; GFX10-NEXT: s_cmp_eq_u32 s8, 0
|
|
; GFX10-NEXT: s_cselect_b32 s21, 1, 0
|
|
; GFX10-NEXT: s_lshr_b64 s[8:9], s[4:5], s9
|
|
; GFX10-NEXT: s_lshl_b64 s[10:11], s[6:7], s16
|
|
; GFX10-NEXT: s_lshl_b64 s[16:17], s[4:5], s16
|
|
; GFX10-NEXT: s_or_b64 s[8:9], s[8:9], s[10:11]
|
|
; GFX10-NEXT: s_lshl_b64 s[4:5], s[4:5], s18
|
|
; GFX10-NEXT: s_cmp_lg_u32 s19, 0
|
|
; GFX10-NEXT: s_cselect_b64 s[10:11], s[16:17], 0
|
|
; GFX10-NEXT: s_cselect_b64 s[4:5], s[8:9], s[4:5]
|
|
; GFX10-NEXT: s_cmp_lg_u32 s21, 0
|
|
; GFX10-NEXT: s_cselect_b64 s[6:7], s[6:7], s[4:5]
|
|
; GFX10-NEXT: s_and_b32 s4, s20, 0x7f
|
|
; GFX10-NEXT: s_sub_i32 s18, s4, 64
|
|
; GFX10-NEXT: s_sub_i32 s8, 64, s4
|
|
; GFX10-NEXT: s_cmp_lt_u32 s4, 64
|
|
; GFX10-NEXT: s_cselect_b32 s19, 1, 0
|
|
; GFX10-NEXT: s_cmp_eq_u32 s4, 0
|
|
; GFX10-NEXT: s_cselect_b32 s21, 1, 0
|
|
; GFX10-NEXT: s_lshr_b64 s[4:5], s[12:13], s20
|
|
; GFX10-NEXT: s_lshl_b64 s[8:9], s[14:15], s8
|
|
; GFX10-NEXT: s_lshr_b64 s[16:17], s[14:15], s20
|
|
; GFX10-NEXT: s_or_b64 s[4:5], s[4:5], s[8:9]
|
|
; GFX10-NEXT: s_lshr_b64 s[8:9], s[14:15], s18
|
|
; GFX10-NEXT: s_cmp_lg_u32 s19, 0
|
|
; GFX10-NEXT: s_cselect_b64 s[4:5], s[4:5], s[8:9]
|
|
; GFX10-NEXT: s_cmp_lg_u32 s21, 0
|
|
; GFX10-NEXT: s_cselect_b64 s[4:5], s[12:13], s[4:5]
|
|
; GFX10-NEXT: s_cmp_lg_u32 s19, 0
|
|
; GFX10-NEXT: s_cselect_b64 s[8:9], s[16:17], 0
|
|
; GFX10-NEXT: s_or_b64 s[4:5], s[10:11], s[4:5]
|
|
; GFX10-NEXT: s_or_b64 s[6:7], s[6:7], s[8:9]
|
|
; GFX10-NEXT: ; return to shader part epilog
|
|
;
|
|
; GFX11-LABEL: s_fshr_v2i128:
|
|
; GFX11: ; %bb.0:
|
|
; GFX11-NEXT: s_lshl_b64 s[2:3], s[2:3], 1
|
|
; GFX11-NEXT: s_lshr_b32 s18, s1, 31
|
|
; GFX11-NEXT: s_mov_b32 s19, 0
|
|
; GFX11-NEXT: s_and_not1_b32 s17, 0x7f, s16
|
|
; GFX11-NEXT: s_lshl_b64 s[0:1], s[0:1], 1
|
|
; GFX11-NEXT: s_or_b64 s[2:3], s[2:3], s[18:19]
|
|
; GFX11-NEXT: s_not_b32 s18, s16
|
|
; GFX11-NEXT: s_sub_i32 s21, s17, 64
|
|
; GFX11-NEXT: s_sub_i32 s22, 64, s17
|
|
; GFX11-NEXT: s_cmp_lt_u32 s17, 64
|
|
; GFX11-NEXT: s_cselect_b32 s28, 1, 0
|
|
; GFX11-NEXT: s_cmp_eq_u32 s17, 0
|
|
; GFX11-NEXT: s_cselect_b32 s17, 1, 0
|
|
; GFX11-NEXT: s_lshr_b64 s[22:23], s[0:1], s22
|
|
; GFX11-NEXT: s_lshl_b64 s[24:25], s[2:3], s18
|
|
; GFX11-NEXT: s_lshl_b64 s[26:27], s[0:1], s18
|
|
; GFX11-NEXT: s_or_b64 s[22:23], s[22:23], s[24:25]
|
|
; GFX11-NEXT: s_lshl_b64 s[0:1], s[0:1], s21
|
|
; GFX11-NEXT: s_cmp_lg_u32 s28, 0
|
|
; GFX11-NEXT: s_cselect_b64 s[24:25], s[26:27], 0
|
|
; GFX11-NEXT: s_cselect_b64 s[0:1], s[22:23], s[0:1]
|
|
; GFX11-NEXT: s_cmp_lg_u32 s17, 0
|
|
; GFX11-NEXT: s_cselect_b64 s[2:3], s[2:3], s[0:1]
|
|
; GFX11-NEXT: s_and_b32 s0, s16, 0x7f
|
|
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
|
|
; GFX11-NEXT: s_sub_i32 s18, s0, 64
|
|
; GFX11-NEXT: s_sub_i32 s17, 64, s0
|
|
; GFX11-NEXT: s_cmp_lt_u32 s0, 64
|
|
; GFX11-NEXT: s_cselect_b32 s21, 1, 0
|
|
; GFX11-NEXT: s_cmp_eq_u32 s0, 0
|
|
; GFX11-NEXT: s_cselect_b32 s26, 1, 0
|
|
; GFX11-NEXT: s_lshr_b64 s[0:1], s[8:9], s16
|
|
; GFX11-NEXT: s_lshl_b64 s[22:23], s[10:11], s17
|
|
; GFX11-NEXT: s_lshr_b64 s[16:17], s[10:11], s16
|
|
; GFX11-NEXT: s_or_b64 s[0:1], s[0:1], s[22:23]
|
|
; GFX11-NEXT: s_lshr_b64 s[10:11], s[10:11], s18
|
|
; GFX11-NEXT: s_cmp_lg_u32 s21, 0
|
|
; GFX11-NEXT: s_cselect_b64 s[0:1], s[0:1], s[10:11]
|
|
; GFX11-NEXT: s_cmp_lg_u32 s26, 0
|
|
; GFX11-NEXT: s_cselect_b64 s[0:1], s[8:9], s[0:1]
|
|
; GFX11-NEXT: s_cmp_lg_u32 s21, 0
|
|
; GFX11-NEXT: s_cselect_b64 s[8:9], s[16:17], 0
|
|
; GFX11-NEXT: s_lshl_b64 s[6:7], s[6:7], 1
|
|
; GFX11-NEXT: s_or_b64 s[2:3], s[2:3], s[8:9]
|
|
; GFX11-NEXT: s_lshr_b32 s18, s5, 31
|
|
; GFX11-NEXT: s_and_not1_b32 s8, 0x7f, s20
|
|
; GFX11-NEXT: s_or_b64 s[0:1], s[24:25], s[0:1]
|
|
; GFX11-NEXT: s_lshl_b64 s[4:5], s[4:5], 1
|
|
; GFX11-NEXT: s_or_b64 s[6:7], s[6:7], s[18:19]
|
|
; GFX11-NEXT: s_not_b32 s16, s20
|
|
; GFX11-NEXT: s_sub_i32 s18, s8, 64
|
|
; GFX11-NEXT: s_sub_i32 s9, 64, s8
|
|
; GFX11-NEXT: s_cmp_lt_u32 s8, 64
|
|
; GFX11-NEXT: s_cselect_b32 s19, 1, 0
|
|
; GFX11-NEXT: s_cmp_eq_u32 s8, 0
|
|
; GFX11-NEXT: s_cselect_b32 s21, 1, 0
|
|
; GFX11-NEXT: s_lshr_b64 s[8:9], s[4:5], s9
|
|
; GFX11-NEXT: s_lshl_b64 s[10:11], s[6:7], s16
|
|
; GFX11-NEXT: s_lshl_b64 s[16:17], s[4:5], s16
|
|
; GFX11-NEXT: s_or_b64 s[8:9], s[8:9], s[10:11]
|
|
; GFX11-NEXT: s_lshl_b64 s[4:5], s[4:5], s18
|
|
; GFX11-NEXT: s_cmp_lg_u32 s19, 0
|
|
; GFX11-NEXT: s_cselect_b64 s[10:11], s[16:17], 0
|
|
; GFX11-NEXT: s_cselect_b64 s[4:5], s[8:9], s[4:5]
|
|
; GFX11-NEXT: s_cmp_lg_u32 s21, 0
|
|
; GFX11-NEXT: s_cselect_b64 s[6:7], s[6:7], s[4:5]
|
|
; GFX11-NEXT: s_and_b32 s4, s20, 0x7f
|
|
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
|
|
; GFX11-NEXT: s_sub_i32 s18, s4, 64
|
|
; GFX11-NEXT: s_sub_i32 s8, 64, s4
|
|
; GFX11-NEXT: s_cmp_lt_u32 s4, 64
|
|
; GFX11-NEXT: s_cselect_b32 s19, 1, 0
|
|
; GFX11-NEXT: s_cmp_eq_u32 s4, 0
|
|
; GFX11-NEXT: s_cselect_b32 s21, 1, 0
|
|
; GFX11-NEXT: s_lshr_b64 s[4:5], s[12:13], s20
|
|
; GFX11-NEXT: s_lshl_b64 s[8:9], s[14:15], s8
|
|
; GFX11-NEXT: s_lshr_b64 s[16:17], s[14:15], s20
|
|
; GFX11-NEXT: s_or_b64 s[4:5], s[4:5], s[8:9]
|
|
; GFX11-NEXT: s_lshr_b64 s[8:9], s[14:15], s18
|
|
; GFX11-NEXT: s_cmp_lg_u32 s19, 0
|
|
; GFX11-NEXT: s_cselect_b64 s[4:5], s[4:5], s[8:9]
|
|
; GFX11-NEXT: s_cmp_lg_u32 s21, 0
|
|
; GFX11-NEXT: s_cselect_b64 s[4:5], s[12:13], s[4:5]
|
|
; GFX11-NEXT: s_cmp_lg_u32 s19, 0
|
|
; GFX11-NEXT: s_cselect_b64 s[8:9], s[16:17], 0
|
|
; GFX11-NEXT: s_or_b64 s[4:5], s[10:11], s[4:5]
|
|
; GFX11-NEXT: s_or_b64 s[6:7], s[6:7], s[8:9]
|
|
; GFX11-NEXT: ; return to shader part epilog
|
|
%result = call <2 x i128> @llvm.fshr.v2i128(<2 x i128> %lhs, <2 x i128> %rhs, <2 x i128> %amt)
|
|
ret <2 x i128> %result
|
|
}
|
|
|
|
define <2 x i128> @v_fshr_v2i128(<2 x i128> %lhs, <2 x i128> %rhs, <2 x i128> %amt) {
|
|
; GFX6-LABEL: v_fshr_v2i128:
|
|
; GFX6: ; %bb.0:
|
|
; GFX6-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX6-NEXT: v_lshl_b64 v[2:3], v[2:3], 1
|
|
; GFX6-NEXT: v_lshl_b64 v[17:18], v[0:1], 1
|
|
; GFX6-NEXT: v_lshrrev_b32_e32 v0, 31, v1
|
|
; GFX6-NEXT: v_or_b32_e32 v2, v2, v0
|
|
; GFX6-NEXT: v_not_b32_e32 v0, v16
|
|
; GFX6-NEXT: v_and_b32_e32 v19, 0x7f, v0
|
|
; GFX6-NEXT: v_sub_i32_e32 v0, vcc, 64, v19
|
|
; GFX6-NEXT: v_not_b32_e32 v25, 63
|
|
; GFX6-NEXT: v_lshr_b64 v[0:1], v[17:18], v0
|
|
; GFX6-NEXT: v_lshl_b64 v[21:22], v[2:3], v19
|
|
; GFX6-NEXT: v_add_i32_e32 v26, vcc, v19, v25
|
|
; GFX6-NEXT: v_lshl_b64 v[23:24], v[17:18], v19
|
|
; GFX6-NEXT: v_or_b32_e32 v21, v0, v21
|
|
; GFX6-NEXT: v_or_b32_e32 v22, v1, v22
|
|
; GFX6-NEXT: v_lshl_b64 v[0:1], v[17:18], v26
|
|
; GFX6-NEXT: v_cmp_gt_u32_e32 vcc, 64, v19
|
|
; GFX6-NEXT: v_cndmask_b32_e32 v18, 0, v23, vcc
|
|
; GFX6-NEXT: v_cndmask_b32_e32 v23, 0, v24, vcc
|
|
; GFX6-NEXT: v_cndmask_b32_e32 v0, v0, v21, vcc
|
|
; GFX6-NEXT: v_cndmask_b32_e32 v1, v1, v22, vcc
|
|
; GFX6-NEXT: v_cmp_eq_u32_e32 vcc, 0, v19
|
|
; GFX6-NEXT: v_and_b32_e32 v22, 0x7f, v16
|
|
; GFX6-NEXT: v_cndmask_b32_e32 v19, v0, v2, vcc
|
|
; GFX6-NEXT: v_cndmask_b32_e32 v21, v1, v3, vcc
|
|
; GFX6-NEXT: v_sub_i32_e32 v2, vcc, 64, v22
|
|
; GFX6-NEXT: v_lshr_b64 v[0:1], v[8:9], v22
|
|
; GFX6-NEXT: v_lshl_b64 v[2:3], v[10:11], v2
|
|
; GFX6-NEXT: v_add_i32_e32 v24, vcc, v22, v25
|
|
; GFX6-NEXT: v_or_b32_e32 v2, v0, v2
|
|
; GFX6-NEXT: v_or_b32_e32 v3, v1, v3
|
|
; GFX6-NEXT: v_lshr_b64 v[0:1], v[10:11], v24
|
|
; GFX6-NEXT: v_cmp_gt_u32_e32 vcc, 64, v22
|
|
; GFX6-NEXT: v_cndmask_b32_e32 v0, v0, v2, vcc
|
|
; GFX6-NEXT: v_cndmask_b32_e32 v1, v1, v3, vcc
|
|
; GFX6-NEXT: v_cmp_eq_u32_e64 s[4:5], 0, v22
|
|
; GFX6-NEXT: v_lshl_b64 v[6:7], v[6:7], 1
|
|
; GFX6-NEXT: v_cndmask_b32_e64 v0, v0, v8, s[4:5]
|
|
; GFX6-NEXT: v_cndmask_b32_e64 v1, v1, v9, s[4:5]
|
|
; GFX6-NEXT: v_lshl_b64 v[8:9], v[4:5], 1
|
|
; GFX6-NEXT: v_lshrrev_b32_e32 v4, 31, v5
|
|
; GFX6-NEXT: v_lshr_b64 v[16:17], v[10:11], v22
|
|
; GFX6-NEXT: v_or_b32_e32 v6, v6, v4
|
|
; GFX6-NEXT: v_not_b32_e32 v4, v20
|
|
; GFX6-NEXT: v_or_b32_e32 v0, v18, v0
|
|
; GFX6-NEXT: v_and_b32_e32 v18, 0x7f, v4
|
|
; GFX6-NEXT: v_cndmask_b32_e32 v2, 0, v16, vcc
|
|
; GFX6-NEXT: v_cndmask_b32_e32 v3, 0, v17, vcc
|
|
; GFX6-NEXT: v_sub_i32_e32 v4, vcc, 64, v18
|
|
; GFX6-NEXT: v_lshr_b64 v[4:5], v[8:9], v4
|
|
; GFX6-NEXT: v_lshl_b64 v[10:11], v[6:7], v18
|
|
; GFX6-NEXT: v_or_b32_e32 v2, v19, v2
|
|
; GFX6-NEXT: v_add_i32_e32 v19, vcc, v18, v25
|
|
; GFX6-NEXT: v_lshl_b64 v[16:17], v[8:9], v18
|
|
; GFX6-NEXT: v_or_b32_e32 v10, v4, v10
|
|
; GFX6-NEXT: v_or_b32_e32 v11, v5, v11
|
|
; GFX6-NEXT: v_lshl_b64 v[4:5], v[8:9], v19
|
|
; GFX6-NEXT: v_cmp_gt_u32_e32 vcc, 64, v18
|
|
; GFX6-NEXT: v_cndmask_b32_e32 v16, 0, v16, vcc
|
|
; GFX6-NEXT: v_cndmask_b32_e32 v17, 0, v17, vcc
|
|
; GFX6-NEXT: v_cndmask_b32_e32 v4, v4, v10, vcc
|
|
; GFX6-NEXT: v_cndmask_b32_e32 v5, v5, v11, vcc
|
|
; GFX6-NEXT: v_cmp_eq_u32_e32 vcc, 0, v18
|
|
; GFX6-NEXT: v_and_b32_e32 v18, 0x7f, v20
|
|
; GFX6-NEXT: v_cndmask_b32_e32 v10, v4, v6, vcc
|
|
; GFX6-NEXT: v_cndmask_b32_e32 v11, v5, v7, vcc
|
|
; GFX6-NEXT: v_sub_i32_e32 v6, vcc, 64, v18
|
|
; GFX6-NEXT: v_lshr_b64 v[4:5], v[12:13], v18
|
|
; GFX6-NEXT: v_lshl_b64 v[6:7], v[14:15], v6
|
|
; GFX6-NEXT: v_add_i32_e32 v19, vcc, v18, v25
|
|
; GFX6-NEXT: v_or_b32_e32 v6, v4, v6
|
|
; GFX6-NEXT: v_or_b32_e32 v7, v5, v7
|
|
; GFX6-NEXT: v_lshr_b64 v[4:5], v[14:15], v19
|
|
; GFX6-NEXT: v_lshr_b64 v[8:9], v[14:15], v18
|
|
; GFX6-NEXT: v_cmp_gt_u32_e32 vcc, 64, v18
|
|
; GFX6-NEXT: v_cndmask_b32_e32 v4, v4, v6, vcc
|
|
; GFX6-NEXT: v_cndmask_b32_e32 v5, v5, v7, vcc
|
|
; GFX6-NEXT: v_cmp_eq_u32_e64 s[4:5], 0, v18
|
|
; GFX6-NEXT: v_cndmask_b32_e64 v4, v4, v12, s[4:5]
|
|
; GFX6-NEXT: v_cndmask_b32_e64 v5, v5, v13, s[4:5]
|
|
; GFX6-NEXT: v_cndmask_b32_e32 v6, 0, v8, vcc
|
|
; GFX6-NEXT: v_cndmask_b32_e32 v7, 0, v9, vcc
|
|
; GFX6-NEXT: v_or_b32_e32 v1, v23, v1
|
|
; GFX6-NEXT: v_or_b32_e32 v3, v21, v3
|
|
; GFX6-NEXT: v_or_b32_e32 v4, v16, v4
|
|
; GFX6-NEXT: v_or_b32_e32 v5, v17, v5
|
|
; GFX6-NEXT: v_or_b32_e32 v6, v10, v6
|
|
; GFX6-NEXT: v_or_b32_e32 v7, v11, v7
|
|
; GFX6-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX8-LABEL: v_fshr_v2i128:
|
|
; GFX8: ; %bb.0:
|
|
; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX8-NEXT: v_lshlrev_b64 v[2:3], 1, v[2:3]
|
|
; GFX8-NEXT: v_lshlrev_b64 v[17:18], 1, v[0:1]
|
|
; GFX8-NEXT: v_lshrrev_b32_e32 v0, 31, v1
|
|
; GFX8-NEXT: v_or_b32_e32 v2, v2, v0
|
|
; GFX8-NEXT: v_not_b32_e32 v0, v16
|
|
; GFX8-NEXT: v_and_b32_e32 v19, 0x7f, v0
|
|
; GFX8-NEXT: v_sub_u32_e32 v0, vcc, 64, v19
|
|
; GFX8-NEXT: v_not_b32_e32 v25, 63
|
|
; GFX8-NEXT: v_lshrrev_b64 v[0:1], v0, v[17:18]
|
|
; GFX8-NEXT: v_lshlrev_b64 v[21:22], v19, v[2:3]
|
|
; GFX8-NEXT: v_add_u32_e32 v26, vcc, v19, v25
|
|
; GFX8-NEXT: v_lshlrev_b64 v[23:24], v19, v[17:18]
|
|
; GFX8-NEXT: v_or_b32_e32 v21, v0, v21
|
|
; GFX8-NEXT: v_or_b32_e32 v22, v1, v22
|
|
; GFX8-NEXT: v_lshlrev_b64 v[0:1], v26, v[17:18]
|
|
; GFX8-NEXT: v_cmp_gt_u32_e32 vcc, 64, v19
|
|
; GFX8-NEXT: v_cndmask_b32_e32 v18, 0, v23, vcc
|
|
; GFX8-NEXT: v_cndmask_b32_e32 v23, 0, v24, vcc
|
|
; GFX8-NEXT: v_cndmask_b32_e32 v0, v0, v21, vcc
|
|
; GFX8-NEXT: v_cndmask_b32_e32 v1, v1, v22, vcc
|
|
; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, 0, v19
|
|
; GFX8-NEXT: v_and_b32_e32 v22, 0x7f, v16
|
|
; GFX8-NEXT: v_cndmask_b32_e32 v19, v0, v2, vcc
|
|
; GFX8-NEXT: v_cndmask_b32_e32 v21, v1, v3, vcc
|
|
; GFX8-NEXT: v_sub_u32_e32 v2, vcc, 64, v22
|
|
; GFX8-NEXT: v_lshrrev_b64 v[0:1], v22, v[8:9]
|
|
; GFX8-NEXT: v_lshlrev_b64 v[2:3], v2, v[10:11]
|
|
; GFX8-NEXT: v_add_u32_e32 v24, vcc, v22, v25
|
|
; GFX8-NEXT: v_or_b32_e32 v2, v0, v2
|
|
; GFX8-NEXT: v_or_b32_e32 v3, v1, v3
|
|
; GFX8-NEXT: v_lshrrev_b64 v[0:1], v24, v[10:11]
|
|
; GFX8-NEXT: v_cmp_gt_u32_e32 vcc, 64, v22
|
|
; GFX8-NEXT: v_cndmask_b32_e32 v0, v0, v2, vcc
|
|
; GFX8-NEXT: v_cndmask_b32_e32 v1, v1, v3, vcc
|
|
; GFX8-NEXT: v_cmp_eq_u32_e64 s[4:5], 0, v22
|
|
; GFX8-NEXT: v_lshlrev_b64 v[6:7], 1, v[6:7]
|
|
; GFX8-NEXT: v_cndmask_b32_e64 v0, v0, v8, s[4:5]
|
|
; GFX8-NEXT: v_cndmask_b32_e64 v1, v1, v9, s[4:5]
|
|
; GFX8-NEXT: v_lshlrev_b64 v[8:9], 1, v[4:5]
|
|
; GFX8-NEXT: v_lshrrev_b32_e32 v4, 31, v5
|
|
; GFX8-NEXT: v_lshrrev_b64 v[16:17], v22, v[10:11]
|
|
; GFX8-NEXT: v_or_b32_e32 v6, v6, v4
|
|
; GFX8-NEXT: v_not_b32_e32 v4, v20
|
|
; GFX8-NEXT: v_or_b32_e32 v0, v18, v0
|
|
; GFX8-NEXT: v_and_b32_e32 v18, 0x7f, v4
|
|
; GFX8-NEXT: v_cndmask_b32_e32 v2, 0, v16, vcc
|
|
; GFX8-NEXT: v_cndmask_b32_e32 v3, 0, v17, vcc
|
|
; GFX8-NEXT: v_sub_u32_e32 v4, vcc, 64, v18
|
|
; GFX8-NEXT: v_lshrrev_b64 v[4:5], v4, v[8:9]
|
|
; GFX8-NEXT: v_lshlrev_b64 v[10:11], v18, v[6:7]
|
|
; GFX8-NEXT: v_or_b32_e32 v2, v19, v2
|
|
; GFX8-NEXT: v_add_u32_e32 v19, vcc, v18, v25
|
|
; GFX8-NEXT: v_lshlrev_b64 v[16:17], v18, v[8:9]
|
|
; GFX8-NEXT: v_or_b32_e32 v10, v4, v10
|
|
; GFX8-NEXT: v_or_b32_e32 v11, v5, v11
|
|
; GFX8-NEXT: v_lshlrev_b64 v[4:5], v19, v[8:9]
|
|
; GFX8-NEXT: v_cmp_gt_u32_e32 vcc, 64, v18
|
|
; GFX8-NEXT: v_cndmask_b32_e32 v16, 0, v16, vcc
|
|
; GFX8-NEXT: v_cndmask_b32_e32 v17, 0, v17, vcc
|
|
; GFX8-NEXT: v_cndmask_b32_e32 v4, v4, v10, vcc
|
|
; GFX8-NEXT: v_cndmask_b32_e32 v5, v5, v11, vcc
|
|
; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, 0, v18
|
|
; GFX8-NEXT: v_and_b32_e32 v18, 0x7f, v20
|
|
; GFX8-NEXT: v_cndmask_b32_e32 v10, v4, v6, vcc
|
|
; GFX8-NEXT: v_cndmask_b32_e32 v11, v5, v7, vcc
|
|
; GFX8-NEXT: v_sub_u32_e32 v6, vcc, 64, v18
|
|
; GFX8-NEXT: v_lshrrev_b64 v[4:5], v18, v[12:13]
|
|
; GFX8-NEXT: v_lshlrev_b64 v[6:7], v6, v[14:15]
|
|
; GFX8-NEXT: v_add_u32_e32 v19, vcc, v18, v25
|
|
; GFX8-NEXT: v_or_b32_e32 v6, v4, v6
|
|
; GFX8-NEXT: v_or_b32_e32 v7, v5, v7
|
|
; GFX8-NEXT: v_lshrrev_b64 v[4:5], v19, v[14:15]
|
|
; GFX8-NEXT: v_lshrrev_b64 v[8:9], v18, v[14:15]
|
|
; GFX8-NEXT: v_cmp_gt_u32_e32 vcc, 64, v18
|
|
; GFX8-NEXT: v_cndmask_b32_e32 v4, v4, v6, vcc
|
|
; GFX8-NEXT: v_cndmask_b32_e32 v5, v5, v7, vcc
|
|
; GFX8-NEXT: v_cmp_eq_u32_e64 s[4:5], 0, v18
|
|
; GFX8-NEXT: v_cndmask_b32_e64 v4, v4, v12, s[4:5]
|
|
; GFX8-NEXT: v_cndmask_b32_e64 v5, v5, v13, s[4:5]
|
|
; GFX8-NEXT: v_cndmask_b32_e32 v6, 0, v8, vcc
|
|
; GFX8-NEXT: v_cndmask_b32_e32 v7, 0, v9, vcc
|
|
; GFX8-NEXT: v_or_b32_e32 v1, v23, v1
|
|
; GFX8-NEXT: v_or_b32_e32 v3, v21, v3
|
|
; GFX8-NEXT: v_or_b32_e32 v4, v16, v4
|
|
; GFX8-NEXT: v_or_b32_e32 v5, v17, v5
|
|
; GFX8-NEXT: v_or_b32_e32 v6, v10, v6
|
|
; GFX8-NEXT: v_or_b32_e32 v7, v11, v7
|
|
; GFX8-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX9-LABEL: v_fshr_v2i128:
|
|
; GFX9: ; %bb.0:
|
|
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX9-NEXT: v_lshlrev_b64 v[2:3], 1, v[2:3]
|
|
; GFX9-NEXT: v_lshlrev_b64 v[17:18], 1, v[0:1]
|
|
; GFX9-NEXT: v_lshrrev_b32_e32 v0, 31, v1
|
|
; GFX9-NEXT: v_or_b32_e32 v2, v2, v0
|
|
; GFX9-NEXT: v_not_b32_e32 v0, v16
|
|
; GFX9-NEXT: v_and_b32_e32 v19, 0x7f, v0
|
|
; GFX9-NEXT: v_sub_u32_e32 v0, 64, v19
|
|
; GFX9-NEXT: v_lshrrev_b64 v[0:1], v0, v[17:18]
|
|
; GFX9-NEXT: v_lshlrev_b64 v[21:22], v19, v[2:3]
|
|
; GFX9-NEXT: v_add_u32_e32 v25, 0xffffffc0, v19
|
|
; GFX9-NEXT: v_lshlrev_b64 v[23:24], v19, v[17:18]
|
|
; GFX9-NEXT: v_or_b32_e32 v21, v0, v21
|
|
; GFX9-NEXT: v_or_b32_e32 v22, v1, v22
|
|
; GFX9-NEXT: v_lshlrev_b64 v[0:1], v25, v[17:18]
|
|
; GFX9-NEXT: v_cmp_gt_u32_e32 vcc, 64, v19
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v18, 0, v23, vcc
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v23, 0, v24, vcc
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v0, v0, v21, vcc
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v1, v1, v22, vcc
|
|
; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, 0, v19
|
|
; GFX9-NEXT: v_and_b32_e32 v22, 0x7f, v16
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v19, v0, v2, vcc
|
|
; GFX9-NEXT: v_sub_u32_e32 v2, 64, v22
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v21, v1, v3, vcc
|
|
; GFX9-NEXT: v_lshrrev_b64 v[0:1], v22, v[8:9]
|
|
; GFX9-NEXT: v_lshlrev_b64 v[2:3], v2, v[10:11]
|
|
; GFX9-NEXT: v_add_u32_e32 v24, 0xffffffc0, v22
|
|
; GFX9-NEXT: v_or_b32_e32 v2, v0, v2
|
|
; GFX9-NEXT: v_or_b32_e32 v3, v1, v3
|
|
; GFX9-NEXT: v_lshrrev_b64 v[0:1], v24, v[10:11]
|
|
; GFX9-NEXT: v_cmp_gt_u32_e32 vcc, 64, v22
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v0, v0, v2, vcc
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v1, v1, v3, vcc
|
|
; GFX9-NEXT: v_cmp_eq_u32_e64 s[4:5], 0, v22
|
|
; GFX9-NEXT: v_lshlrev_b64 v[6:7], 1, v[6:7]
|
|
; GFX9-NEXT: v_cndmask_b32_e64 v0, v0, v8, s[4:5]
|
|
; GFX9-NEXT: v_cndmask_b32_e64 v1, v1, v9, s[4:5]
|
|
; GFX9-NEXT: v_lshlrev_b64 v[8:9], 1, v[4:5]
|
|
; GFX9-NEXT: v_lshrrev_b32_e32 v4, 31, v5
|
|
; GFX9-NEXT: v_or_b32_e32 v6, v6, v4
|
|
; GFX9-NEXT: v_not_b32_e32 v4, v20
|
|
; GFX9-NEXT: v_lshrrev_b64 v[16:17], v22, v[10:11]
|
|
; GFX9-NEXT: v_or_b32_e32 v0, v18, v0
|
|
; GFX9-NEXT: v_and_b32_e32 v18, 0x7f, v4
|
|
; GFX9-NEXT: v_sub_u32_e32 v4, 64, v18
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v2, 0, v16, vcc
|
|
; GFX9-NEXT: v_lshrrev_b64 v[4:5], v4, v[8:9]
|
|
; GFX9-NEXT: v_lshlrev_b64 v[10:11], v18, v[6:7]
|
|
; GFX9-NEXT: v_or_b32_e32 v2, v19, v2
|
|
; GFX9-NEXT: v_add_u32_e32 v19, 0xffffffc0, v18
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v3, 0, v17, vcc
|
|
; GFX9-NEXT: v_lshlrev_b64 v[16:17], v18, v[8:9]
|
|
; GFX9-NEXT: v_or_b32_e32 v10, v4, v10
|
|
; GFX9-NEXT: v_or_b32_e32 v11, v5, v11
|
|
; GFX9-NEXT: v_lshlrev_b64 v[4:5], v19, v[8:9]
|
|
; GFX9-NEXT: v_cmp_gt_u32_e32 vcc, 64, v18
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v16, 0, v16, vcc
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v17, 0, v17, vcc
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v4, v4, v10, vcc
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v5, v5, v11, vcc
|
|
; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, 0, v18
|
|
; GFX9-NEXT: v_and_b32_e32 v18, 0x7f, v20
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v10, v4, v6, vcc
|
|
; GFX9-NEXT: v_sub_u32_e32 v6, 64, v18
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v11, v5, v7, vcc
|
|
; GFX9-NEXT: v_lshrrev_b64 v[4:5], v18, v[12:13]
|
|
; GFX9-NEXT: v_lshlrev_b64 v[6:7], v6, v[14:15]
|
|
; GFX9-NEXT: v_add_u32_e32 v19, 0xffffffc0, v18
|
|
; GFX9-NEXT: v_or_b32_e32 v6, v4, v6
|
|
; GFX9-NEXT: v_or_b32_e32 v7, v5, v7
|
|
; GFX9-NEXT: v_lshrrev_b64 v[4:5], v19, v[14:15]
|
|
; GFX9-NEXT: v_lshrrev_b64 v[8:9], v18, v[14:15]
|
|
; GFX9-NEXT: v_cmp_gt_u32_e32 vcc, 64, v18
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v4, v4, v6, vcc
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v5, v5, v7, vcc
|
|
; GFX9-NEXT: v_cmp_eq_u32_e64 s[4:5], 0, v18
|
|
; GFX9-NEXT: v_cndmask_b32_e64 v4, v4, v12, s[4:5]
|
|
; GFX9-NEXT: v_cndmask_b32_e64 v5, v5, v13, s[4:5]
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v6, 0, v8, vcc
|
|
; GFX9-NEXT: v_cndmask_b32_e32 v7, 0, v9, vcc
|
|
; GFX9-NEXT: v_or_b32_e32 v1, v23, v1
|
|
; GFX9-NEXT: v_or_b32_e32 v3, v21, v3
|
|
; GFX9-NEXT: v_or_b32_e32 v4, v16, v4
|
|
; GFX9-NEXT: v_or_b32_e32 v5, v17, v5
|
|
; GFX9-NEXT: v_or_b32_e32 v6, v10, v6
|
|
; GFX9-NEXT: v_or_b32_e32 v7, v11, v7
|
|
; GFX9-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX10-LABEL: v_fshr_v2i128:
|
|
; GFX10: ; %bb.0:
|
|
; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX10-NEXT: v_not_b32_e32 v17, v16
|
|
; GFX10-NEXT: v_lshlrev_b64 v[2:3], 1, v[2:3]
|
|
; GFX10-NEXT: v_and_b32_e32 v26, 0x7f, v16
|
|
; GFX10-NEXT: v_lshlrev_b64 v[6:7], 1, v[6:7]
|
|
; GFX10-NEXT: v_and_b32_e32 v25, 0x7f, v17
|
|
; GFX10-NEXT: v_lshrrev_b32_e32 v17, 31, v1
|
|
; GFX10-NEXT: v_lshlrev_b64 v[0:1], 1, v[0:1]
|
|
; GFX10-NEXT: v_add_nc_u32_e32 v27, 0xffffffc0, v26
|
|
; GFX10-NEXT: v_cmp_gt_u32_e64 s4, 64, v26
|
|
; GFX10-NEXT: v_sub_nc_u32_e32 v18, 64, v25
|
|
; GFX10-NEXT: v_or_b32_e32 v2, v2, v17
|
|
; GFX10-NEXT: v_add_nc_u32_e32 v19, 0xffffffc0, v25
|
|
; GFX10-NEXT: v_lshlrev_b64 v[23:24], v25, v[0:1]
|
|
; GFX10-NEXT: v_cmp_gt_u32_e32 vcc_lo, 64, v25
|
|
; GFX10-NEXT: v_lshrrev_b64 v[17:18], v18, v[0:1]
|
|
; GFX10-NEXT: v_lshlrev_b64 v[21:22], v25, v[2:3]
|
|
; GFX10-NEXT: v_lshlrev_b64 v[0:1], v19, v[0:1]
|
|
; GFX10-NEXT: v_cndmask_b32_e32 v23, 0, v23, vcc_lo
|
|
; GFX10-NEXT: v_cndmask_b32_e32 v24, 0, v24, vcc_lo
|
|
; GFX10-NEXT: v_or_b32_e32 v22, v18, v22
|
|
; GFX10-NEXT: v_sub_nc_u32_e32 v18, 64, v26
|
|
; GFX10-NEXT: v_or_b32_e32 v21, v17, v21
|
|
; GFX10-NEXT: v_lshrrev_b64 v[16:17], v26, v[8:9]
|
|
; GFX10-NEXT: v_cndmask_b32_e32 v22, v1, v22, vcc_lo
|
|
; GFX10-NEXT: v_lshlrev_b64 v[18:19], v18, v[10:11]
|
|
; GFX10-NEXT: v_cndmask_b32_e32 v21, v0, v21, vcc_lo
|
|
; GFX10-NEXT: v_lshrrev_b64 v[0:1], v27, v[10:11]
|
|
; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v25
|
|
; GFX10-NEXT: v_or_b32_e32 v16, v16, v18
|
|
; GFX10-NEXT: v_or_b32_e32 v17, v17, v19
|
|
; GFX10-NEXT: v_cndmask_b32_e32 v18, v21, v2, vcc_lo
|
|
; GFX10-NEXT: v_cndmask_b32_e32 v22, v22, v3, vcc_lo
|
|
; GFX10-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v26
|
|
; GFX10-NEXT: v_cndmask_b32_e64 v0, v0, v16, s4
|
|
; GFX10-NEXT: v_not_b32_e32 v16, v20
|
|
; GFX10-NEXT: v_cndmask_b32_e64 v1, v1, v17, s4
|
|
; GFX10-NEXT: v_lshrrev_b64 v[2:3], v26, v[10:11]
|
|
; GFX10-NEXT: v_cndmask_b32_e32 v0, v0, v8, vcc_lo
|
|
; GFX10-NEXT: v_and_b32_e32 v25, 0x7f, v16
|
|
; GFX10-NEXT: v_lshrrev_b32_e32 v8, 31, v5
|
|
; GFX10-NEXT: v_lshlrev_b64 v[4:5], 1, v[4:5]
|
|
; GFX10-NEXT: v_cndmask_b32_e32 v1, v1, v9, vcc_lo
|
|
; GFX10-NEXT: v_or_b32_e32 v0, v23, v0
|
|
; GFX10-NEXT: v_sub_nc_u32_e32 v9, 64, v25
|
|
; GFX10-NEXT: v_or_b32_e32 v6, v6, v8
|
|
; GFX10-NEXT: v_and_b32_e32 v23, 0x7f, v20
|
|
; GFX10-NEXT: v_cndmask_b32_e64 v2, 0, v2, s4
|
|
; GFX10-NEXT: v_cndmask_b32_e64 v26, 0, v3, s4
|
|
; GFX10-NEXT: v_lshrrev_b64 v[8:9], v9, v[4:5]
|
|
; GFX10-NEXT: v_lshlrev_b64 v[10:11], v25, v[6:7]
|
|
; GFX10-NEXT: v_sub_nc_u32_e32 v20, 64, v23
|
|
; GFX10-NEXT: v_add_nc_u32_e32 v3, 0xffffffc0, v25
|
|
; GFX10-NEXT: v_or_b32_e32 v2, v18, v2
|
|
; GFX10-NEXT: v_lshlrev_b64 v[16:17], v25, v[4:5]
|
|
; GFX10-NEXT: v_lshrrev_b64 v[18:19], v23, v[12:13]
|
|
; GFX10-NEXT: v_or_b32_e32 v10, v8, v10
|
|
; GFX10-NEXT: v_add_nc_u32_e32 v8, 0xffffffc0, v23
|
|
; GFX10-NEXT: v_lshlrev_b64 v[20:21], v20, v[14:15]
|
|
; GFX10-NEXT: v_cmp_gt_u32_e32 vcc_lo, 64, v25
|
|
; GFX10-NEXT: v_lshlrev_b64 v[3:4], v3, v[4:5]
|
|
; GFX10-NEXT: v_or_b32_e32 v5, v9, v11
|
|
; GFX10-NEXT: v_lshrrev_b64 v[8:9], v8, v[14:15]
|
|
; GFX10-NEXT: v_cmp_gt_u32_e64 s4, 64, v23
|
|
; GFX10-NEXT: v_cndmask_b32_e32 v11, 0, v16, vcc_lo
|
|
; GFX10-NEXT: v_or_b32_e32 v16, v18, v20
|
|
; GFX10-NEXT: v_or_b32_e32 v18, v19, v21
|
|
; GFX10-NEXT: v_cndmask_b32_e32 v10, v3, v10, vcc_lo
|
|
; GFX10-NEXT: v_cndmask_b32_e32 v5, v4, v5, vcc_lo
|
|
; GFX10-NEXT: v_lshrrev_b64 v[3:4], v23, v[14:15]
|
|
; GFX10-NEXT: v_cndmask_b32_e64 v8, v8, v16, s4
|
|
; GFX10-NEXT: v_cmp_eq_u32_e64 s5, 0, v23
|
|
; GFX10-NEXT: v_cmp_eq_u32_e64 s6, 0, v25
|
|
; GFX10-NEXT: v_cndmask_b32_e64 v9, v9, v18, s4
|
|
; GFX10-NEXT: v_cndmask_b32_e32 v14, 0, v17, vcc_lo
|
|
; GFX10-NEXT: v_or_b32_e32 v1, v24, v1
|
|
; GFX10-NEXT: v_cndmask_b32_e64 v6, v10, v6, s6
|
|
; GFX10-NEXT: v_cndmask_b32_e64 v7, v5, v7, s6
|
|
; GFX10-NEXT: v_cndmask_b32_e64 v5, v8, v12, s5
|
|
; GFX10-NEXT: v_cndmask_b32_e64 v8, v9, v13, s5
|
|
; GFX10-NEXT: v_cndmask_b32_e64 v9, 0, v3, s4
|
|
; GFX10-NEXT: v_cndmask_b32_e64 v10, 0, v4, s4
|
|
; GFX10-NEXT: v_or_b32_e32 v3, v22, v26
|
|
; GFX10-NEXT: v_or_b32_e32 v4, v11, v5
|
|
; GFX10-NEXT: v_or_b32_e32 v5, v14, v8
|
|
; GFX10-NEXT: v_or_b32_e32 v6, v6, v9
|
|
; GFX10-NEXT: v_or_b32_e32 v7, v7, v10
|
|
; GFX10-NEXT: s_setpc_b64 s[30:31]
|
|
;
|
|
; GFX11-LABEL: v_fshr_v2i128:
|
|
; GFX11: ; %bb.0:
|
|
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; GFX11-NEXT: v_not_b32_e32 v17, v16
|
|
; GFX11-NEXT: v_lshlrev_b64 v[2:3], 1, v[2:3]
|
|
; GFX11-NEXT: v_lshlrev_b64 v[6:7], 1, v[6:7]
|
|
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_2) | instid1(VALU_DEP_3)
|
|
; GFX11-NEXT: v_and_b32_e32 v25, 0x7f, v17
|
|
; GFX11-NEXT: v_lshrrev_b32_e32 v17, 31, v1
|
|
; GFX11-NEXT: v_lshlrev_b64 v[0:1], 1, v[0:1]
|
|
; GFX11-NEXT: v_cmp_gt_u32_e32 vcc_lo, 64, v25
|
|
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
|
|
; GFX11-NEXT: v_or_b32_e32 v2, v2, v17
|
|
; GFX11-NEXT: v_lshlrev_b64 v[23:24], v25, v[0:1]
|
|
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
|
|
; GFX11-NEXT: v_dual_cndmask_b32 v23, 0, v23 :: v_dual_and_b32 v26, 0x7f, v16
|
|
; GFX11-NEXT: v_cndmask_b32_e32 v24, 0, v24, vcc_lo
|
|
; GFX11-NEXT: v_sub_nc_u32_e32 v18, 64, v25
|
|
; GFX11-NEXT: v_lshlrev_b64 v[21:22], v25, v[2:3]
|
|
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3)
|
|
; GFX11-NEXT: v_cmp_gt_u32_e64 s0, 64, v26
|
|
; GFX11-NEXT: v_lshrrev_b64 v[17:18], v18, v[0:1]
|
|
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_3)
|
|
; GFX11-NEXT: v_or_b32_e32 v22, v18, v22
|
|
; GFX11-NEXT: v_add_nc_u32_e32 v19, 0xffffffc0, v25
|
|
; GFX11-NEXT: v_or_b32_e32 v21, v17, v21
|
|
; GFX11-NEXT: v_sub_nc_u32_e32 v18, 64, v26
|
|
; GFX11-NEXT: v_lshrrev_b64 v[16:17], v26, v[8:9]
|
|
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3)
|
|
; GFX11-NEXT: v_lshlrev_b64 v[0:1], v19, v[0:1]
|
|
; GFX11-NEXT: v_lshlrev_b64 v[18:19], v18, v[10:11]
|
|
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_4)
|
|
; GFX11-NEXT: v_dual_cndmask_b32 v22, v1, v22 :: v_dual_cndmask_b32 v21, v0, v21
|
|
; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v25
|
|
; GFX11-NEXT: v_add_nc_u32_e32 v27, 0xffffffc0, v26
|
|
; GFX11-NEXT: v_or_b32_e32 v16, v16, v18
|
|
; GFX11-NEXT: v_or_b32_e32 v17, v17, v19
|
|
; GFX11-NEXT: v_cndmask_b32_e32 v22, v22, v3, vcc_lo
|
|
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_1)
|
|
; GFX11-NEXT: v_lshrrev_b64 v[0:1], v27, v[10:11]
|
|
; GFX11-NEXT: v_cndmask_b32_e64 v0, v0, v16, s0
|
|
; GFX11-NEXT: v_not_b32_e32 v16, v20
|
|
; GFX11-NEXT: v_cndmask_b32_e32 v18, v21, v2, vcc_lo
|
|
; GFX11-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v26
|
|
; GFX11-NEXT: v_cndmask_b32_e64 v1, v1, v17, s0
|
|
; GFX11-NEXT: v_lshrrev_b64 v[2:3], v26, v[10:11]
|
|
; GFX11-NEXT: v_and_b32_e32 v25, 0x7f, v16
|
|
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_2) | instid1(VALU_DEP_4)
|
|
; GFX11-NEXT: v_dual_cndmask_b32 v0, v0, v8 :: v_dual_cndmask_b32 v1, v1, v9
|
|
; GFX11-NEXT: v_lshrrev_b32_e32 v8, 31, v5
|
|
; GFX11-NEXT: v_lshlrev_b64 v[4:5], 1, v[4:5]
|
|
; GFX11-NEXT: v_sub_nc_u32_e32 v9, 64, v25
|
|
; GFX11-NEXT: v_cndmask_b32_e64 v26, 0, v3, s0
|
|
; GFX11-NEXT: v_add_nc_u32_e32 v3, 0xffffffc0, v25
|
|
; GFX11-NEXT: v_or_b32_e32 v6, v6, v8
|
|
; GFX11-NEXT: v_or_b32_e32 v0, v23, v0
|
|
; GFX11-NEXT: v_lshrrev_b64 v[8:9], v9, v[4:5]
|
|
; GFX11-NEXT: v_lshlrev_b64 v[16:17], v25, v[4:5]
|
|
; GFX11-NEXT: v_lshlrev_b64 v[3:4], v3, v[4:5]
|
|
; GFX11-NEXT: v_lshlrev_b64 v[10:11], v25, v[6:7]
|
|
; GFX11-NEXT: v_cmp_gt_u32_e32 vcc_lo, 64, v25
|
|
; GFX11-NEXT: v_cndmask_b32_e64 v2, 0, v2, s0
|
|
; GFX11-NEXT: v_cmp_eq_u32_e64 s2, 0, v25
|
|
; GFX11-NEXT: v_or_b32_e32 v1, v24, v1
|
|
; GFX11-NEXT: v_or_b32_e32 v10, v8, v10
|
|
; GFX11-NEXT: v_and_b32_e32 v23, 0x7f, v20
|
|
; GFX11-NEXT: v_or_b32_e32 v2, v18, v2
|
|
; GFX11-NEXT: v_or_b32_e32 v5, v9, v11
|
|
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
|
|
; GFX11-NEXT: v_dual_cndmask_b32 v11, 0, v16 :: v_dual_cndmask_b32 v10, v3, v10
|
|
; GFX11-NEXT: v_sub_nc_u32_e32 v20, 64, v23
|
|
; GFX11-NEXT: v_add_nc_u32_e32 v8, 0xffffffc0, v23
|
|
; GFX11-NEXT: v_lshrrev_b64 v[18:19], v23, v[12:13]
|
|
; GFX11-NEXT: v_cmp_gt_u32_e64 s0, 64, v23
|
|
; GFX11-NEXT: v_cndmask_b32_e32 v5, v4, v5, vcc_lo
|
|
; GFX11-NEXT: v_lshlrev_b64 v[20:21], v20, v[14:15]
|
|
; GFX11-NEXT: v_lshrrev_b64 v[8:9], v8, v[14:15]
|
|
; GFX11-NEXT: v_lshrrev_b64 v[3:4], v23, v[14:15]
|
|
; GFX11-NEXT: v_cndmask_b32_e32 v14, 0, v17, vcc_lo
|
|
; GFX11-NEXT: v_cmp_eq_u32_e64 s1, 0, v23
|
|
; GFX11-NEXT: v_cndmask_b32_e64 v6, v10, v6, s2
|
|
; GFX11-NEXT: v_or_b32_e32 v16, v18, v20
|
|
; GFX11-NEXT: v_or_b32_e32 v18, v19, v21
|
|
; GFX11-NEXT: v_cndmask_b32_e64 v7, v5, v7, s2
|
|
; GFX11-NEXT: v_cndmask_b32_e64 v10, 0, v4, s0
|
|
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
|
|
; GFX11-NEXT: v_cndmask_b32_e64 v8, v8, v16, s0
|
|
; GFX11-NEXT: v_cndmask_b32_e64 v9, v9, v18, s0
|
|
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
|
|
; GFX11-NEXT: v_or_b32_e32 v7, v7, v10
|
|
; GFX11-NEXT: v_cndmask_b32_e64 v5, v8, v12, s1
|
|
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_2) | instid1(VALU_DEP_4)
|
|
; GFX11-NEXT: v_cndmask_b32_e64 v8, v9, v13, s1
|
|
; GFX11-NEXT: v_cndmask_b32_e64 v9, 0, v3, s0
|
|
; GFX11-NEXT: v_or_b32_e32 v3, v22, v26
|
|
; GFX11-NEXT: v_or_b32_e32 v4, v11, v5
|
|
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
|
|
; GFX11-NEXT: v_or_b32_e32 v5, v14, v8
|
|
; GFX11-NEXT: v_or_b32_e32 v6, v6, v9
|
|
; GFX11-NEXT: s_setpc_b64 s[30:31]
|
|
%result = call <2 x i128> @llvm.fshr.v2i128(<2 x i128> %lhs, <2 x i128> %rhs, <2 x i128> %amt)
|
|
ret <2 x i128> %result
|
|
}
|
|
|
|
declare i7 @llvm.fshr.i7(i7, i7, i7) #0
|
|
declare i8 @llvm.fshr.i8(i8, i8, i8) #0
|
|
declare <2 x i8> @llvm.fshr.v2i8(<2 x i8>, <2 x i8>, <2 x i8>) #0
|
|
declare <4 x i8> @llvm.fshr.v4i8(<4 x i8>, <4 x i8>, <4 x i8>) #0
|
|
|
|
declare i16 @llvm.fshr.i16(i16, i16, i16) #0
|
|
declare <2 x i16> @llvm.fshr.v2i16(<2 x i16>, <2 x i16>, <2 x i16>) #0
|
|
declare <3 x i16> @llvm.fshr.v3i16(<3 x i16>, <3 x i16>, <3 x i16>) #0
|
|
declare <4 x i16> @llvm.fshr.v4i16(<4 x i16>, <4 x i16>, <4 x i16>) #0
|
|
declare <5 x i16> @llvm.fshr.v5i16(<5 x i16>, <5 x i16>, <5 x i16>) #0
|
|
declare <6 x i16> @llvm.fshr.v6i16(<6 x i16>, <6 x i16>, <6 x i16>) #0
|
|
declare <8 x i16> @llvm.fshr.v8i16(<8 x i16>, <8 x i16>, <8 x i16>) #0
|
|
|
|
declare i24 @llvm.fshr.i24(i24, i24, i24) #0
|
|
declare <2 x i24> @llvm.fshr.v2i24(<2 x i24>, <2 x i24>, <2 x i24>) #0
|
|
|
|
declare i32 @llvm.fshr.i32(i32, i32, i32) #0
|
|
declare <2 x i32> @llvm.fshr.v2i32(<2 x i32>, <2 x i32>, <2 x i32>) #0
|
|
declare <3 x i32> @llvm.fshr.v3i32(<3 x i32>, <3 x i32>, <3 x i32>) #0
|
|
declare <4 x i32> @llvm.fshr.v4i32(<4 x i32>, <4 x i32>, <4 x i32>) #0
|
|
declare <5 x i32> @llvm.fshr.v5i32(<5 x i32>, <5 x i32>, <5 x i32>) #0
|
|
declare <16 x i32> @llvm.fshr.v16i32(<16 x i32>, <16 x i32>, <16 x i32>) #0
|
|
|
|
declare i48 @llvm.fshr.i48(i48, i48, i48) #0
|
|
|
|
declare i64 @llvm.fshr.i64(i64, i64, i64) #0
|
|
declare <2 x i64> @llvm.fshr.v2i64(<2 x i64>, <2 x i64>, <2 x i64>) #0
|
|
|
|
declare i128 @llvm.fshr.i128(i128, i128, i128) #0
|
|
declare <2 x i128> @llvm.fshr.v2i128(<2 x i128>, <2 x i128>, <2 x i128>) #0
|
|
|
|
attributes #0 = { nounwind readnone speculatable willreturn }
|