Files
clang-p2996/llvm/test/CodeGen/AMDGPU/fexp.ll
Matt Arsenault bf07a50a98 AMDGPU: Restrict extract_vector_elt combine to loads
The intention is to enable the extract_vector_elt load combine,
and doing this for other operations interferes with more
useful optimizations on vectors.

Handle any type of load since in principle we should do the
same combine for the various load intrinsics.

llvm-svn: 341219
2018-08-31 15:39:52 +00:00

294 lines
10 KiB
LLVM

; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
;RUN: llc -mtriple=amdgcn-- < %s | FileCheck -enable-var-scope -check-prefix=SI %s
;RUN: llc -mtriple=amdgcn-- -mcpu=fiji < %s | FileCheck -enable-var-scope -check-prefix=VI %s
;RUN: llc -mtriple=amdgcn-- -mcpu=gfx900 < %s | FileCheck -enable-var-scope -check-prefix=GFX9 %s
define float @v_exp_f32(float %arg0) {
; SI-LABEL: v_exp_f32:
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: v_mul_f32_e32 v0, 0x3fb8aa3b, v0
; SI-NEXT: v_exp_f32_e32 v0, v0
; SI-NEXT: s_setpc_b64 s[30:31]
;
; VI-LABEL: v_exp_f32:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: v_mul_f32_e32 v0, 0x3fb8aa3b, v0
; VI-NEXT: v_exp_f32_e32 v0, v0
; VI-NEXT: s_setpc_b64 s[30:31]
;
; GFX9-LABEL: v_exp_f32:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: v_mul_f32_e32 v0, 0x3fb8aa3b, v0
; GFX9-NEXT: v_exp_f32_e32 v0, v0
; GFX9-NEXT: s_setpc_b64 s[30:31]
%result = call float @llvm.exp.f32(float %arg0)
ret float %result
}
define <2 x float> @v_exp_v2f32(<2 x float> %arg0) {
; SI-LABEL: v_exp_v2f32:
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: v_mov_b32_e32 v2, 0x3fb8aa3b
; SI-NEXT: v_mul_f32_e32 v0, v0, v2
; SI-NEXT: v_mul_f32_e32 v1, v1, v2
; SI-NEXT: v_exp_f32_e32 v0, v0
; SI-NEXT: v_exp_f32_e32 v1, v1
; SI-NEXT: s_setpc_b64 s[30:31]
;
; VI-LABEL: v_exp_v2f32:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: v_mov_b32_e32 v2, 0x3fb8aa3b
; VI-NEXT: v_mul_f32_e32 v0, v0, v2
; VI-NEXT: v_mul_f32_e32 v1, v1, v2
; VI-NEXT: v_exp_f32_e32 v0, v0
; VI-NEXT: v_exp_f32_e32 v1, v1
; VI-NEXT: s_setpc_b64 s[30:31]
;
; GFX9-LABEL: v_exp_v2f32:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: v_mov_b32_e32 v2, 0x3fb8aa3b
; GFX9-NEXT: v_mul_f32_e32 v0, v0, v2
; GFX9-NEXT: v_mul_f32_e32 v1, v1, v2
; GFX9-NEXT: v_exp_f32_e32 v0, v0
; GFX9-NEXT: v_exp_f32_e32 v1, v1
; GFX9-NEXT: s_setpc_b64 s[30:31]
%result = call <2 x float> @llvm.exp.v2f32(<2 x float> %arg0)
ret <2 x float> %result
}
define <3 x float> @v_exp_v3f32(<3 x float> %arg0) {
; SI-LABEL: v_exp_v3f32:
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: v_mov_b32_e32 v3, 0x3fb8aa3b
; SI-NEXT: v_mul_f32_e32 v0, v0, v3
; SI-NEXT: v_mul_f32_e32 v1, v1, v3
; SI-NEXT: v_mul_f32_e32 v2, v2, v3
; SI-NEXT: v_exp_f32_e32 v0, v0
; SI-NEXT: v_exp_f32_e32 v1, v1
; SI-NEXT: v_exp_f32_e32 v2, v2
; SI-NEXT: s_setpc_b64 s[30:31]
;
; VI-LABEL: v_exp_v3f32:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: v_mov_b32_e32 v3, 0x3fb8aa3b
; VI-NEXT: v_mul_f32_e32 v0, v0, v3
; VI-NEXT: v_mul_f32_e32 v1, v1, v3
; VI-NEXT: v_mul_f32_e32 v2, v2, v3
; VI-NEXT: v_exp_f32_e32 v0, v0
; VI-NEXT: v_exp_f32_e32 v1, v1
; VI-NEXT: v_exp_f32_e32 v2, v2
; VI-NEXT: s_setpc_b64 s[30:31]
;
; GFX9-LABEL: v_exp_v3f32:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: v_mov_b32_e32 v3, 0x3fb8aa3b
; GFX9-NEXT: v_mul_f32_e32 v0, v0, v3
; GFX9-NEXT: v_mul_f32_e32 v1, v1, v3
; GFX9-NEXT: v_mul_f32_e32 v2, v2, v3
; GFX9-NEXT: v_exp_f32_e32 v0, v0
; GFX9-NEXT: v_exp_f32_e32 v1, v1
; GFX9-NEXT: v_exp_f32_e32 v2, v2
; GFX9-NEXT: s_setpc_b64 s[30:31]
%result = call <3 x float> @llvm.exp.v3f32(<3 x float> %arg0)
ret <3 x float> %result
}
define <4 x float> @v_exp_v4f32(<4 x float> %arg0) {
; SI-LABEL: v_exp_v4f32:
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: v_mov_b32_e32 v4, 0x3fb8aa3b
; SI-NEXT: v_mul_f32_e32 v0, v0, v4
; SI-NEXT: v_mul_f32_e32 v1, v1, v4
; SI-NEXT: v_mul_f32_e32 v2, v2, v4
; SI-NEXT: v_mul_f32_e32 v3, v3, v4
; SI-NEXT: v_exp_f32_e32 v0, v0
; SI-NEXT: v_exp_f32_e32 v1, v1
; SI-NEXT: v_exp_f32_e32 v2, v2
; SI-NEXT: v_exp_f32_e32 v3, v3
; SI-NEXT: s_setpc_b64 s[30:31]
;
; VI-LABEL: v_exp_v4f32:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: v_mov_b32_e32 v4, 0x3fb8aa3b
; VI-NEXT: v_mul_f32_e32 v0, v0, v4
; VI-NEXT: v_mul_f32_e32 v1, v1, v4
; VI-NEXT: v_mul_f32_e32 v2, v2, v4
; VI-NEXT: v_mul_f32_e32 v3, v3, v4
; VI-NEXT: v_exp_f32_e32 v0, v0
; VI-NEXT: v_exp_f32_e32 v1, v1
; VI-NEXT: v_exp_f32_e32 v2, v2
; VI-NEXT: v_exp_f32_e32 v3, v3
; VI-NEXT: s_setpc_b64 s[30:31]
;
; GFX9-LABEL: v_exp_v4f32:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: v_mov_b32_e32 v4, 0x3fb8aa3b
; GFX9-NEXT: v_mul_f32_e32 v0, v0, v4
; GFX9-NEXT: v_mul_f32_e32 v1, v1, v4
; GFX9-NEXT: v_mul_f32_e32 v2, v2, v4
; GFX9-NEXT: v_mul_f32_e32 v3, v3, v4
; GFX9-NEXT: v_exp_f32_e32 v0, v0
; GFX9-NEXT: v_exp_f32_e32 v1, v1
; GFX9-NEXT: v_exp_f32_e32 v2, v2
; GFX9-NEXT: v_exp_f32_e32 v3, v3
; GFX9-NEXT: s_setpc_b64 s[30:31]
%result = call <4 x float> @llvm.exp.v4f32(<4 x float> %arg0)
ret <4 x float> %result
}
define half @v_exp_f16(half %arg0) {
; SI-LABEL: v_exp_f16:
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: v_cvt_f16_f32_e32 v0, v0
; SI-NEXT: v_cvt_f32_f16_e32 v0, v0
; SI-NEXT: v_mul_f32_e32 v0, 0x3fb8aa3b, v0
; SI-NEXT: v_exp_f32_e32 v0, v0
; SI-NEXT: s_setpc_b64 s[30:31]
;
; VI-LABEL: v_exp_f16:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: v_mul_f16_e32 v0, 0x3dc5, v0
; VI-NEXT: v_exp_f16_e32 v0, v0
; VI-NEXT: s_setpc_b64 s[30:31]
;
; GFX9-LABEL: v_exp_f16:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: v_mul_f16_e32 v0, 0x3dc5, v0
; GFX9-NEXT: v_exp_f16_e32 v0, v0
; GFX9-NEXT: s_setpc_b64 s[30:31]
%result = call half @llvm.exp.f16(half %arg0)
ret half %result
}
define <2 x half> @v_exp_v2f16(<2 x half> %arg0) {
; SI-LABEL: v_exp_v2f16:
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: v_cvt_f16_f32_e32 v1, v1
; SI-NEXT: v_cvt_f16_f32_e32 v0, v0
; SI-NEXT: v_mov_b32_e32 v2, 0x3fb8aa3b
; SI-NEXT: v_cvt_f32_f16_e32 v1, v1
; SI-NEXT: v_cvt_f32_f16_e32 v0, v0
; SI-NEXT: v_mul_f32_e32 v0, v0, v2
; SI-NEXT: v_mul_f32_e32 v1, v1, v2
; SI-NEXT: v_exp_f32_e32 v0, v0
; SI-NEXT: v_exp_f32_e32 v1, v1
; SI-NEXT: s_setpc_b64 s[30:31]
;
; VI-LABEL: v_exp_v2f16:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: v_mov_b32_e32 v1, 0x3dc5
; VI-NEXT: v_mul_f16_sdwa v2, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
; VI-NEXT: v_mul_f16_e32 v0, v0, v1
; VI-NEXT: v_exp_f16_sdwa v2, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD
; VI-NEXT: v_exp_f16_e32 v0, v0
; VI-NEXT: v_or_b32_e32 v0, v0, v2
; VI-NEXT: s_setpc_b64 s[30:31]
;
; GFX9-LABEL: v_exp_v2f16:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: v_mov_b32_e32 v1, 0x3dc5
; GFX9-NEXT: v_pk_mul_f16 v0, v0, v1 op_sel_hi:[1,0]
; GFX9-NEXT: v_exp_f16_e32 v1, v0
; GFX9-NEXT: v_exp_f16_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1
; GFX9-NEXT: v_and_b32_e32 v1, 0xffff, v1
; GFX9-NEXT: v_lshl_or_b32 v0, v0, 16, v1
; GFX9-NEXT: s_setpc_b64 s[30:31]
%result = call <2 x half> @llvm.exp.v2f16(<2 x half> %arg0)
ret <2 x half> %result
}
; define <3 x half> @v_exp_v3f16(<3 x half> %arg0) {
; %result = call <3 x half> @llvm.exp.v3f16(<3 x half> %arg0)
; ret <3 x half> %result
; }
define <4 x half> @v_exp_v4f16(<4 x half> %arg0) {
; SI-LABEL: v_exp_v4f16:
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: v_cvt_f16_f32_e32 v3, v3
; SI-NEXT: v_cvt_f16_f32_e32 v2, v2
; SI-NEXT: v_cvt_f16_f32_e32 v1, v1
; SI-NEXT: v_cvt_f16_f32_e32 v0, v0
; SI-NEXT: v_mov_b32_e32 v4, 0x3fb8aa3b
; SI-NEXT: v_cvt_f32_f16_e32 v3, v3
; SI-NEXT: v_cvt_f32_f16_e32 v2, v2
; SI-NEXT: v_cvt_f32_f16_e32 v1, v1
; SI-NEXT: v_cvt_f32_f16_e32 v0, v0
; SI-NEXT: v_mul_f32_e32 v0, v0, v4
; SI-NEXT: v_mul_f32_e32 v1, v1, v4
; SI-NEXT: v_mul_f32_e32 v2, v2, v4
; SI-NEXT: v_mul_f32_e32 v3, v3, v4
; SI-NEXT: v_exp_f32_e32 v0, v0
; SI-NEXT: v_exp_f32_e32 v1, v1
; SI-NEXT: v_exp_f32_e32 v2, v2
; SI-NEXT: v_exp_f32_e32 v3, v3
; SI-NEXT: s_setpc_b64 s[30:31]
;
; VI-LABEL: v_exp_v4f16:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: v_mov_b32_e32 v2, 0x3dc5
; VI-NEXT: v_mul_f16_e32 v3, v1, v2
; VI-NEXT: v_mul_f16_e32 v4, v0, v2
; VI-NEXT: v_mul_f16_sdwa v1, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
; VI-NEXT: v_mul_f16_sdwa v0, v0, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
; VI-NEXT: v_exp_f16_e32 v3, v3
; VI-NEXT: v_exp_f16_sdwa v1, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD
; VI-NEXT: v_exp_f16_e32 v4, v4
; VI-NEXT: v_exp_f16_sdwa v0, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD
; VI-NEXT: v_or_b32_e32 v1, v3, v1
; VI-NEXT: v_or_b32_e32 v0, v4, v0
; VI-NEXT: s_setpc_b64 s[30:31]
;
; GFX9-LABEL: v_exp_v4f16:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: v_mov_b32_e32 v2, 0x3dc5
; GFX9-NEXT: v_mul_f16_e32 v3, v1, v2
; GFX9-NEXT: v_mul_f16_e32 v4, v0, v2
; GFX9-NEXT: v_mul_f16_sdwa v1, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
; GFX9-NEXT: v_mul_f16_sdwa v0, v0, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
; GFX9-NEXT: v_exp_f16_e32 v3, v3
; GFX9-NEXT: v_exp_f16_e32 v4, v4
; GFX9-NEXT: v_exp_f16_e32 v0, v0
; GFX9-NEXT: v_exp_f16_e32 v1, v1
; GFX9-NEXT: v_mov_b32_e32 v2, 0xffff
; GFX9-NEXT: v_and_b32_e32 v4, v2, v4
; GFX9-NEXT: v_and_b32_e32 v2, v2, v3
; GFX9-NEXT: v_lshl_or_b32 v0, v0, 16, v4
; GFX9-NEXT: v_lshl_or_b32 v1, v1, 16, v2
; GFX9-NEXT: s_setpc_b64 s[30:31]
%result = call <4 x half> @llvm.exp.v4f16(<4 x half> %arg0)
ret <4 x half> %result
}
declare float @llvm.exp.f32(float)
declare <2 x float> @llvm.exp.v2f32(<2 x float>)
declare <3 x float> @llvm.exp.v3f32(<3 x float>)
declare <4 x float> @llvm.exp.v4f32(<4 x float>)
declare half @llvm.exp.f16(half)
declare <2 x half> @llvm.exp.v2f16(<2 x half>)
declare <3 x half> @llvm.exp.v3f16(<3 x half>)
declare <4 x half> @llvm.exp.v4f16(<4 x half>)