mayFoldIntoStore currently just checks the direct (oneuse) user of a SDValue to check its stored, which prevents cases where we bitcast the value prior to storing (usually the bitcast will be removed later). This patch peeks up through oneuse BITCAST nodes chain to see if its eventually stored. The main use of mayFoldIntoStore is v8i16 EXTRACT_VECTOR_ELT lowering which will only use PEXTRW/PEXTRB for index0 extractions (vs the faster MOVD) if the extracted value will be folded into a store on SSE41+ targets. Fixes #107086
5217 lines
252 KiB
LLVM
5217 lines
252 KiB
LLVM
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
|
|
; RUN: llc < %s -disable-peephole -mtriple=x86_64-unknown-unknown -mattr=+avx -verify-machineinstrs | FileCheck %s --check-prefixes=AVX,AVX1
|
|
; RUN: llc < %s -disable-peephole -mtriple=x86_64-unknown-unknown -mattr=+avx2 -verify-machineinstrs | FileCheck %s --check-prefixes=AVX,AVX2
|
|
; RUN: llc < %s -disable-peephole -mtriple=x86_64-unknown-unknown -mattr=+f16c -verify-machineinstrs | FileCheck %s --check-prefixes=F16C
|
|
; RUN: llc < %s -disable-peephole -mtriple=x86_64-unknown-unknown -mattr=+f16c -verify-machineinstrs | FileCheck %s --check-prefixes=F16C
|
|
; RUN: llc < %s -disable-peephole -mtriple=x86_64-unknown-unknown -mattr=+f16c,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle -verify-machineinstrs | FileCheck %s --check-prefixes=F16C
|
|
; RUN: llc < %s -disable-peephole -mtriple=x86_64-unknown-unknown -mattr=+f16c,+fast-variable-perlane-shuffle -verify-machineinstrs | FileCheck %s --check-prefixes=F16C
|
|
; RUN: llc < %s -disable-peephole -mtriple=x86_64-unknown-unknown -mattr=+avx512f -verify-machineinstrs | FileCheck %s --check-prefixes=AVX512,AVX512F
|
|
; RUN: llc < %s -disable-peephole -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512vl,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle -verify-machineinstrs | FileCheck %s --check-prefixes=AVX512,AVX512-FASTLANE
|
|
; RUN: llc < %s -disable-peephole -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512vl,+fast-variable-perlane-shuffle -verify-machineinstrs | FileCheck %s --check-prefixes=AVX512,AVX512-FASTLANE
|
|
|
|
;
|
|
; Half to Float
|
|
;
|
|
|
|
define float @cvt_i16_to_f32(i16 %a0) nounwind {
|
|
; AVX-LABEL: cvt_i16_to_f32:
|
|
; AVX: # %bb.0:
|
|
; AVX-NEXT: vpinsrw $0, %edi, %xmm0, %xmm0
|
|
; AVX-NEXT: jmp __extendhfsf2@PLT # TAILCALL
|
|
;
|
|
; F16C-LABEL: cvt_i16_to_f32:
|
|
; F16C: # %bb.0:
|
|
; F16C-NEXT: vmovd %edi, %xmm0
|
|
; F16C-NEXT: vcvtph2ps %xmm0, %xmm0
|
|
; F16C-NEXT: retq
|
|
;
|
|
; AVX512-LABEL: cvt_i16_to_f32:
|
|
; AVX512: # %bb.0:
|
|
; AVX512-NEXT: vmovd %edi, %xmm0
|
|
; AVX512-NEXT: vcvtph2ps %xmm0, %xmm0
|
|
; AVX512-NEXT: retq
|
|
%1 = bitcast i16 %a0 to half
|
|
%2 = fpext half %1 to float
|
|
ret float %2
|
|
}
|
|
|
|
define <4 x float> @cvt_4i16_to_4f32(<4 x i16> %a0) nounwind {
|
|
; AVX-LABEL: cvt_4i16_to_4f32:
|
|
; AVX: # %bb.0:
|
|
; AVX-NEXT: subq $72, %rsp
|
|
; AVX-NEXT: vmovq %xmm0, %rax
|
|
; AVX-NEXT: movq %rax, %rcx
|
|
; AVX-NEXT: movq %rax, %rdx
|
|
; AVX-NEXT: vpinsrw $0, %eax, %xmm0, %xmm0
|
|
; AVX-NEXT: # kill: def $eax killed $eax killed $rax
|
|
; AVX-NEXT: shrl $16, %eax
|
|
; AVX-NEXT: shrq $32, %rcx
|
|
; AVX-NEXT: shrq $48, %rdx
|
|
; AVX-NEXT: vpinsrw $0, %edx, %xmm0, %xmm1
|
|
; AVX-NEXT: vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX-NEXT: vpinsrw $0, %ecx, %xmm0, %xmm1
|
|
; AVX-NEXT: vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX-NEXT: vpinsrw $0, %eax, %xmm0, %xmm1
|
|
; AVX-NEXT: vmovdqa %xmm1, (%rsp) # 16-byte Spill
|
|
; AVX-NEXT: callq __extendhfsf2@PLT
|
|
; AVX-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX-NEXT: vmovaps (%rsp), %xmm0 # 16-byte Reload
|
|
; AVX-NEXT: callq __extendhfsf2@PLT
|
|
; AVX-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
|
|
; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[2,3]
|
|
; AVX-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
|
|
; AVX-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
|
|
; AVX-NEXT: callq __extendhfsf2@PLT
|
|
; AVX-NEXT: vmovaps (%rsp), %xmm1 # 16-byte Reload
|
|
; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1],xmm0[0],xmm1[3]
|
|
; AVX-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
|
|
; AVX-NEXT: callq __extendhfsf2@PLT
|
|
; AVX-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
|
|
; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0]
|
|
; AVX-NEXT: addq $72, %rsp
|
|
; AVX-NEXT: retq
|
|
;
|
|
; F16C-LABEL: cvt_4i16_to_4f32:
|
|
; F16C: # %bb.0:
|
|
; F16C-NEXT: vcvtph2ps %xmm0, %xmm0
|
|
; F16C-NEXT: retq
|
|
;
|
|
; AVX512-LABEL: cvt_4i16_to_4f32:
|
|
; AVX512: # %bb.0:
|
|
; AVX512-NEXT: vcvtph2ps %xmm0, %xmm0
|
|
; AVX512-NEXT: retq
|
|
%1 = bitcast <4 x i16> %a0 to <4 x half>
|
|
%2 = fpext <4 x half> %1 to <4 x float>
|
|
ret <4 x float> %2
|
|
}
|
|
|
|
define <4 x float> @cvt_8i16_to_4f32(<8 x i16> %a0) nounwind {
|
|
; AVX-LABEL: cvt_8i16_to_4f32:
|
|
; AVX: # %bb.0:
|
|
; AVX-NEXT: subq $72, %rsp
|
|
; AVX-NEXT: vmovq %xmm0, %rax
|
|
; AVX-NEXT: movq %rax, %rcx
|
|
; AVX-NEXT: movq %rax, %rdx
|
|
; AVX-NEXT: vpinsrw $0, %eax, %xmm0, %xmm0
|
|
; AVX-NEXT: # kill: def $eax killed $eax killed $rax
|
|
; AVX-NEXT: shrl $16, %eax
|
|
; AVX-NEXT: shrq $32, %rcx
|
|
; AVX-NEXT: shrq $48, %rdx
|
|
; AVX-NEXT: vpinsrw $0, %edx, %xmm0, %xmm1
|
|
; AVX-NEXT: vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX-NEXT: vpinsrw $0, %ecx, %xmm0, %xmm1
|
|
; AVX-NEXT: vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX-NEXT: vpinsrw $0, %eax, %xmm0, %xmm1
|
|
; AVX-NEXT: vmovdqa %xmm1, (%rsp) # 16-byte Spill
|
|
; AVX-NEXT: callq __extendhfsf2@PLT
|
|
; AVX-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX-NEXT: vmovaps (%rsp), %xmm0 # 16-byte Reload
|
|
; AVX-NEXT: callq __extendhfsf2@PLT
|
|
; AVX-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
|
|
; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[2,3]
|
|
; AVX-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
|
|
; AVX-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
|
|
; AVX-NEXT: callq __extendhfsf2@PLT
|
|
; AVX-NEXT: vmovaps (%rsp), %xmm1 # 16-byte Reload
|
|
; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1],xmm0[0],xmm1[3]
|
|
; AVX-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
|
|
; AVX-NEXT: callq __extendhfsf2@PLT
|
|
; AVX-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
|
|
; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0]
|
|
; AVX-NEXT: addq $72, %rsp
|
|
; AVX-NEXT: retq
|
|
;
|
|
; F16C-LABEL: cvt_8i16_to_4f32:
|
|
; F16C: # %bb.0:
|
|
; F16C-NEXT: vcvtph2ps %xmm0, %xmm0
|
|
; F16C-NEXT: retq
|
|
;
|
|
; AVX512-LABEL: cvt_8i16_to_4f32:
|
|
; AVX512: # %bb.0:
|
|
; AVX512-NEXT: vcvtph2ps %xmm0, %xmm0
|
|
; AVX512-NEXT: retq
|
|
%1 = shufflevector <8 x i16> %a0, <8 x i16> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
|
|
%2 = bitcast <4 x i16> %1 to <4 x half>
|
|
%3 = fpext <4 x half> %2 to <4 x float>
|
|
ret <4 x float> %3
|
|
}
|
|
|
|
define <8 x float> @cvt_8i16_to_8f32(<8 x i16> %a0) nounwind {
|
|
; AVX-LABEL: cvt_8i16_to_8f32:
|
|
; AVX: # %bb.0:
|
|
; AVX-NEXT: subq $56, %rsp
|
|
; AVX-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX-NEXT: vpsrldq {{.*#+}} xmm0 = xmm0[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
|
|
; AVX-NEXT: callq __extendhfsf2@PLT
|
|
; AVX-NEXT: vmovdqa %xmm0, (%rsp) # 16-byte Spill
|
|
; AVX-NEXT: vpermilps $78, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
|
|
; AVX-NEXT: # xmm0 = mem[2,3,0,1]
|
|
; AVX-NEXT: callq __extendhfsf2@PLT
|
|
; AVX-NEXT: vinsertps $16, (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
|
|
; AVX-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[2,3]
|
|
; AVX-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
|
|
; AVX-NEXT: vpermilps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
|
|
; AVX-NEXT: # xmm0 = mem[3,3,3,3]
|
|
; AVX-NEXT: callq __extendhfsf2@PLT
|
|
; AVX-NEXT: vmovaps (%rsp), %xmm1 # 16-byte Reload
|
|
; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1],xmm0[0],xmm1[3]
|
|
; AVX-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
|
|
; AVX-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
|
|
; AVX-NEXT: vpsrldq {{.*#+}} xmm0 = xmm0[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
|
|
; AVX-NEXT: callq __extendhfsf2@PLT
|
|
; AVX-NEXT: vmovaps (%rsp), %xmm1 # 16-byte Reload
|
|
; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0]
|
|
; AVX-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
|
|
; AVX-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
|
|
; AVX-NEXT: callq __extendhfsf2@PLT
|
|
; AVX-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
|
|
; AVX-NEXT: vpsrld $16, %xmm0, %xmm0
|
|
; AVX-NEXT: callq __extendhfsf2@PLT
|
|
; AVX-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
|
|
; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[2,3]
|
|
; AVX-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX-NEXT: vpermilps $245, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
|
|
; AVX-NEXT: # xmm0 = mem[1,1,3,3]
|
|
; AVX-NEXT: callq __extendhfsf2@PLT
|
|
; AVX-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
|
|
; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1],xmm0[0],xmm1[3]
|
|
; AVX-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
|
|
; AVX-NEXT: vpsrlq $48, %xmm0, %xmm0
|
|
; AVX-NEXT: callq __extendhfsf2@PLT
|
|
; AVX-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
|
|
; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0]
|
|
; AVX-NEXT: vinsertf128 $1, (%rsp), %ymm0, %ymm0 # 16-byte Folded Reload
|
|
; AVX-NEXT: addq $56, %rsp
|
|
; AVX-NEXT: retq
|
|
;
|
|
; F16C-LABEL: cvt_8i16_to_8f32:
|
|
; F16C: # %bb.0:
|
|
; F16C-NEXT: vcvtph2ps %xmm0, %ymm0
|
|
; F16C-NEXT: retq
|
|
;
|
|
; AVX512-LABEL: cvt_8i16_to_8f32:
|
|
; AVX512: # %bb.0:
|
|
; AVX512-NEXT: vcvtph2ps %xmm0, %ymm0
|
|
; AVX512-NEXT: retq
|
|
%1 = bitcast <8 x i16> %a0 to <8 x half>
|
|
%2 = fpext <8 x half> %1 to <8 x float>
|
|
ret <8 x float> %2
|
|
}
|
|
|
|
define <16 x float> @cvt_16i16_to_16f32(<16 x i16> %a0) nounwind {
|
|
; AVX1-LABEL: cvt_16i16_to_16f32:
|
|
; AVX1: # %bb.0:
|
|
; AVX1-NEXT: subq $104, %rsp
|
|
; AVX1-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
|
|
; AVX1-NEXT: vpsrldq {{.*#+}} xmm0 = xmm0[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
|
|
; AVX1-NEXT: vzeroupper
|
|
; AVX1-NEXT: callq __extendhfsf2@PLT
|
|
; AVX1-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX1-NEXT: vpermilps $78, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
|
|
; AVX1-NEXT: # xmm0 = mem[2,3,0,1]
|
|
; AVX1-NEXT: callq __extendhfsf2@PLT
|
|
; AVX1-NEXT: vinsertps $16, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
|
|
; AVX1-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[2,3]
|
|
; AVX1-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX1-NEXT: vpermilps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
|
|
; AVX1-NEXT: # xmm0 = mem[3,3,3,3]
|
|
; AVX1-NEXT: callq __extendhfsf2@PLT
|
|
; AVX1-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
|
|
; AVX1-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1],xmm0[0],xmm1[3]
|
|
; AVX1-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX1-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
|
|
; AVX1-NEXT: vpsrldq {{.*#+}} xmm0 = xmm0[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
|
|
; AVX1-NEXT: vzeroupper
|
|
; AVX1-NEXT: callq __extendhfsf2@PLT
|
|
; AVX1-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
|
|
; AVX1-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0]
|
|
; AVX1-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX1-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
|
|
; AVX1-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
|
|
; AVX1-NEXT: vzeroupper
|
|
; AVX1-NEXT: callq __extendhfsf2@PLT
|
|
; AVX1-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
|
|
; AVX1-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
|
|
; AVX1-NEXT: vpsrld $16, %xmm0, %xmm0
|
|
; AVX1-NEXT: vzeroupper
|
|
; AVX1-NEXT: callq __extendhfsf2@PLT
|
|
; AVX1-NEXT: vmovaps (%rsp), %xmm1 # 16-byte Reload
|
|
; AVX1-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[2,3]
|
|
; AVX1-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
|
|
; AVX1-NEXT: vpermilps $245, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
|
|
; AVX1-NEXT: # xmm0 = mem[1,1,3,3]
|
|
; AVX1-NEXT: callq __extendhfsf2@PLT
|
|
; AVX1-NEXT: vmovaps (%rsp), %xmm1 # 16-byte Reload
|
|
; AVX1-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1],xmm0[0],xmm1[3]
|
|
; AVX1-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
|
|
; AVX1-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
|
|
; AVX1-NEXT: vpsrlq $48, %xmm0, %xmm0
|
|
; AVX1-NEXT: vzeroupper
|
|
; AVX1-NEXT: callq __extendhfsf2@PLT
|
|
; AVX1-NEXT: vmovaps (%rsp), %xmm1 # 16-byte Reload
|
|
; AVX1-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0]
|
|
; AVX1-NEXT: vinsertf128 $1, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 16-byte Folded Reload
|
|
; AVX1-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
|
|
; AVX1-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
|
|
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
|
|
; AVX1-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX1-NEXT: vpsrldq {{.*#+}} xmm0 = xmm0[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
|
|
; AVX1-NEXT: vzeroupper
|
|
; AVX1-NEXT: callq __extendhfsf2@PLT
|
|
; AVX1-NEXT: vmovdqa %xmm0, (%rsp) # 16-byte Spill
|
|
; AVX1-NEXT: vpermilps $78, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
|
|
; AVX1-NEXT: # xmm0 = mem[2,3,0,1]
|
|
; AVX1-NEXT: callq __extendhfsf2@PLT
|
|
; AVX1-NEXT: vinsertps $16, (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
|
|
; AVX1-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[2,3]
|
|
; AVX1-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
|
|
; AVX1-NEXT: vpermilps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
|
|
; AVX1-NEXT: # xmm0 = mem[3,3,3,3]
|
|
; AVX1-NEXT: callq __extendhfsf2@PLT
|
|
; AVX1-NEXT: vmovaps (%rsp), %xmm1 # 16-byte Reload
|
|
; AVX1-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1],xmm0[0],xmm1[3]
|
|
; AVX1-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
|
|
; AVX1-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
|
|
; AVX1-NEXT: vpsrldq {{.*#+}} xmm0 = xmm0[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
|
|
; AVX1-NEXT: callq __extendhfsf2@PLT
|
|
; AVX1-NEXT: vmovaps (%rsp), %xmm1 # 16-byte Reload
|
|
; AVX1-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0]
|
|
; AVX1-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
|
|
; AVX1-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
|
|
; AVX1-NEXT: callq __extendhfsf2@PLT
|
|
; AVX1-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX1-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
|
|
; AVX1-NEXT: vpsrld $16, %xmm0, %xmm0
|
|
; AVX1-NEXT: callq __extendhfsf2@PLT
|
|
; AVX1-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
|
|
; AVX1-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[2,3]
|
|
; AVX1-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX1-NEXT: vpermilps $245, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
|
|
; AVX1-NEXT: # xmm0 = mem[1,1,3,3]
|
|
; AVX1-NEXT: callq __extendhfsf2@PLT
|
|
; AVX1-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
|
|
; AVX1-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1],xmm0[0],xmm1[3]
|
|
; AVX1-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX1-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
|
|
; AVX1-NEXT: vpsrlq $48, %xmm0, %xmm0
|
|
; AVX1-NEXT: callq __extendhfsf2@PLT
|
|
; AVX1-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
|
|
; AVX1-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0]
|
|
; AVX1-NEXT: vinsertf128 $1, (%rsp), %ymm0, %ymm1 # 16-byte Folded Reload
|
|
; AVX1-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
|
|
; AVX1-NEXT: addq $104, %rsp
|
|
; AVX1-NEXT: retq
|
|
;
|
|
; AVX2-LABEL: cvt_16i16_to_16f32:
|
|
; AVX2: # %bb.0:
|
|
; AVX2-NEXT: subq $104, %rsp
|
|
; AVX2-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
|
|
; AVX2-NEXT: vpsrldq {{.*#+}} xmm0 = xmm0[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
|
|
; AVX2-NEXT: vzeroupper
|
|
; AVX2-NEXT: callq __extendhfsf2@PLT
|
|
; AVX2-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX2-NEXT: vpermilps $78, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
|
|
; AVX2-NEXT: # xmm0 = mem[2,3,0,1]
|
|
; AVX2-NEXT: callq __extendhfsf2@PLT
|
|
; AVX2-NEXT: vinsertps $16, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
|
|
; AVX2-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[2,3]
|
|
; AVX2-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX2-NEXT: vpermilps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
|
|
; AVX2-NEXT: # xmm0 = mem[3,3,3,3]
|
|
; AVX2-NEXT: callq __extendhfsf2@PLT
|
|
; AVX2-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
|
|
; AVX2-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1],xmm0[0],xmm1[3]
|
|
; AVX2-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX2-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
|
|
; AVX2-NEXT: vpsrldq {{.*#+}} xmm0 = xmm0[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
|
|
; AVX2-NEXT: vzeroupper
|
|
; AVX2-NEXT: callq __extendhfsf2@PLT
|
|
; AVX2-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
|
|
; AVX2-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0]
|
|
; AVX2-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX2-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
|
|
; AVX2-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
|
|
; AVX2-NEXT: vzeroupper
|
|
; AVX2-NEXT: callq __extendhfsf2@PLT
|
|
; AVX2-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
|
|
; AVX2-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
|
|
; AVX2-NEXT: vpsrld $16, %xmm0, %xmm0
|
|
; AVX2-NEXT: vzeroupper
|
|
; AVX2-NEXT: callq __extendhfsf2@PLT
|
|
; AVX2-NEXT: vmovaps (%rsp), %xmm1 # 16-byte Reload
|
|
; AVX2-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[2,3]
|
|
; AVX2-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
|
|
; AVX2-NEXT: vpermilps $245, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
|
|
; AVX2-NEXT: # xmm0 = mem[1,1,3,3]
|
|
; AVX2-NEXT: callq __extendhfsf2@PLT
|
|
; AVX2-NEXT: vmovaps (%rsp), %xmm1 # 16-byte Reload
|
|
; AVX2-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1],xmm0[0],xmm1[3]
|
|
; AVX2-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
|
|
; AVX2-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
|
|
; AVX2-NEXT: vpsrlq $48, %xmm0, %xmm0
|
|
; AVX2-NEXT: vzeroupper
|
|
; AVX2-NEXT: callq __extendhfsf2@PLT
|
|
; AVX2-NEXT: vmovaps (%rsp), %xmm1 # 16-byte Reload
|
|
; AVX2-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0]
|
|
; AVX2-NEXT: vinsertf128 $1, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 16-byte Folded Reload
|
|
; AVX2-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
|
|
; AVX2-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
|
|
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm0
|
|
; AVX2-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX2-NEXT: vpsrldq {{.*#+}} xmm0 = xmm0[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
|
|
; AVX2-NEXT: vzeroupper
|
|
; AVX2-NEXT: callq __extendhfsf2@PLT
|
|
; AVX2-NEXT: vmovdqa %xmm0, (%rsp) # 16-byte Spill
|
|
; AVX2-NEXT: vpermilps $78, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
|
|
; AVX2-NEXT: # xmm0 = mem[2,3,0,1]
|
|
; AVX2-NEXT: callq __extendhfsf2@PLT
|
|
; AVX2-NEXT: vinsertps $16, (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
|
|
; AVX2-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[2,3]
|
|
; AVX2-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
|
|
; AVX2-NEXT: vpermilps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
|
|
; AVX2-NEXT: # xmm0 = mem[3,3,3,3]
|
|
; AVX2-NEXT: callq __extendhfsf2@PLT
|
|
; AVX2-NEXT: vmovaps (%rsp), %xmm1 # 16-byte Reload
|
|
; AVX2-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1],xmm0[0],xmm1[3]
|
|
; AVX2-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
|
|
; AVX2-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
|
|
; AVX2-NEXT: vpsrldq {{.*#+}} xmm0 = xmm0[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
|
|
; AVX2-NEXT: callq __extendhfsf2@PLT
|
|
; AVX2-NEXT: vmovaps (%rsp), %xmm1 # 16-byte Reload
|
|
; AVX2-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0]
|
|
; AVX2-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
|
|
; AVX2-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
|
|
; AVX2-NEXT: callq __extendhfsf2@PLT
|
|
; AVX2-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX2-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
|
|
; AVX2-NEXT: vpsrld $16, %xmm0, %xmm0
|
|
; AVX2-NEXT: callq __extendhfsf2@PLT
|
|
; AVX2-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
|
|
; AVX2-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[2,3]
|
|
; AVX2-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX2-NEXT: vpermilps $245, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
|
|
; AVX2-NEXT: # xmm0 = mem[1,1,3,3]
|
|
; AVX2-NEXT: callq __extendhfsf2@PLT
|
|
; AVX2-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
|
|
; AVX2-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1],xmm0[0],xmm1[3]
|
|
; AVX2-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX2-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
|
|
; AVX2-NEXT: vpsrlq $48, %xmm0, %xmm0
|
|
; AVX2-NEXT: callq __extendhfsf2@PLT
|
|
; AVX2-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
|
|
; AVX2-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0]
|
|
; AVX2-NEXT: vinsertf128 $1, (%rsp), %ymm0, %ymm1 # 16-byte Folded Reload
|
|
; AVX2-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
|
|
; AVX2-NEXT: addq $104, %rsp
|
|
; AVX2-NEXT: retq
|
|
;
|
|
; F16C-LABEL: cvt_16i16_to_16f32:
|
|
; F16C: # %bb.0:
|
|
; F16C-NEXT: vcvtph2ps %xmm0, %ymm2
|
|
; F16C-NEXT: vextractf128 $1, %ymm0, %xmm0
|
|
; F16C-NEXT: vcvtph2ps %xmm0, %ymm1
|
|
; F16C-NEXT: vmovaps %ymm2, %ymm0
|
|
; F16C-NEXT: retq
|
|
;
|
|
; AVX512-LABEL: cvt_16i16_to_16f32:
|
|
; AVX512: # %bb.0:
|
|
; AVX512-NEXT: vcvtph2ps %ymm0, %zmm0
|
|
; AVX512-NEXT: retq
|
|
%1 = bitcast <16 x i16> %a0 to <16 x half>
|
|
%2 = fpext <16 x half> %1 to <16 x float>
|
|
ret <16 x float> %2
|
|
}
|
|
|
|
define <2 x float> @cvt_2i16_to_2f32_constrained(<2 x i16> %a0) nounwind strictfp {
|
|
; AVX-LABEL: cvt_2i16_to_2f32_constrained:
|
|
; AVX: # %bb.0:
|
|
; AVX-NEXT: subq $40, %rsp
|
|
; AVX-NEXT: vmovd %xmm0, %eax
|
|
; AVX-NEXT: vpinsrw $0, %eax, %xmm0, %xmm0
|
|
; AVX-NEXT: shrl $16, %eax
|
|
; AVX-NEXT: vpinsrw $0, %eax, %xmm0, %xmm1
|
|
; AVX-NEXT: vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX-NEXT: callq __extendhfsf2@PLT
|
|
; AVX-NEXT: vmovdqa %xmm0, (%rsp) # 16-byte Spill
|
|
; AVX-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
|
|
; AVX-NEXT: callq __extendhfsf2@PLT
|
|
; AVX-NEXT: vmovaps (%rsp), %xmm1 # 16-byte Reload
|
|
; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[2,3]
|
|
; AVX-NEXT: addq $40, %rsp
|
|
; AVX-NEXT: retq
|
|
;
|
|
; F16C-LABEL: cvt_2i16_to_2f32_constrained:
|
|
; F16C: # %bb.0:
|
|
; F16C-NEXT: vpmovzxdq {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero
|
|
; F16C-NEXT: vcvtph2ps %xmm0, %xmm0
|
|
; F16C-NEXT: retq
|
|
;
|
|
; AVX512-LABEL: cvt_2i16_to_2f32_constrained:
|
|
; AVX512: # %bb.0:
|
|
; AVX512-NEXT: vpmovzxdq {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero
|
|
; AVX512-NEXT: vcvtph2ps %xmm0, %xmm0
|
|
; AVX512-NEXT: retq
|
|
%1 = bitcast <2 x i16> %a0 to <2 x half>
|
|
%2 = call <2 x float> @llvm.experimental.constrained.fpext.v2f32.v2f16(<2 x half> %1, metadata !"fpexcept.strict") strictfp
|
|
ret <2 x float> %2
|
|
}
|
|
declare <2 x float> @llvm.experimental.constrained.fpext.v2f32.v2f16(<2 x half>, metadata) strictfp
|
|
|
|
define <4 x float> @cvt_4i16_to_4f32_constrained(<4 x i16> %a0) nounwind strictfp {
|
|
; AVX-LABEL: cvt_4i16_to_4f32_constrained:
|
|
; AVX: # %bb.0:
|
|
; AVX-NEXT: subq $72, %rsp
|
|
; AVX-NEXT: vmovq %xmm0, %rax
|
|
; AVX-NEXT: movq %rax, %rcx
|
|
; AVX-NEXT: movq %rax, %rdx
|
|
; AVX-NEXT: vpinsrw $0, %eax, %xmm0, %xmm0
|
|
; AVX-NEXT: # kill: def $eax killed $eax killed $rax
|
|
; AVX-NEXT: shrl $16, %eax
|
|
; AVX-NEXT: shrq $32, %rcx
|
|
; AVX-NEXT: shrq $48, %rdx
|
|
; AVX-NEXT: vpinsrw $0, %edx, %xmm0, %xmm1
|
|
; AVX-NEXT: vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX-NEXT: vpinsrw $0, %ecx, %xmm0, %xmm1
|
|
; AVX-NEXT: vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX-NEXT: vpinsrw $0, %eax, %xmm0, %xmm1
|
|
; AVX-NEXT: vmovdqa %xmm1, (%rsp) # 16-byte Spill
|
|
; AVX-NEXT: callq __extendhfsf2@PLT
|
|
; AVX-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX-NEXT: vmovaps (%rsp), %xmm0 # 16-byte Reload
|
|
; AVX-NEXT: callq __extendhfsf2@PLT
|
|
; AVX-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
|
|
; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[2,3]
|
|
; AVX-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
|
|
; AVX-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
|
|
; AVX-NEXT: callq __extendhfsf2@PLT
|
|
; AVX-NEXT: vmovaps (%rsp), %xmm1 # 16-byte Reload
|
|
; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1],xmm0[0],xmm1[3]
|
|
; AVX-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
|
|
; AVX-NEXT: callq __extendhfsf2@PLT
|
|
; AVX-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
|
|
; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0]
|
|
; AVX-NEXT: addq $72, %rsp
|
|
; AVX-NEXT: retq
|
|
;
|
|
; F16C-LABEL: cvt_4i16_to_4f32_constrained:
|
|
; F16C: # %bb.0:
|
|
; F16C-NEXT: vcvtph2ps %xmm0, %xmm0
|
|
; F16C-NEXT: retq
|
|
;
|
|
; AVX512-LABEL: cvt_4i16_to_4f32_constrained:
|
|
; AVX512: # %bb.0:
|
|
; AVX512-NEXT: vcvtph2ps %xmm0, %xmm0
|
|
; AVX512-NEXT: retq
|
|
%1 = bitcast <4 x i16> %a0 to <4 x half>
|
|
%2 = call <4 x float> @llvm.experimental.constrained.fpext.v4f32.v4f16(<4 x half> %1, metadata !"fpexcept.strict") strictfp
|
|
ret <4 x float> %2
|
|
}
|
|
declare <4 x float> @llvm.experimental.constrained.fpext.v4f32.v4f16(<4 x half>, metadata) strictfp
|
|
|
|
define <8 x float> @cvt_8i16_to_8f32_constrained(<8 x i16> %a0) nounwind strictfp {
|
|
; AVX-LABEL: cvt_8i16_to_8f32_constrained:
|
|
; AVX: # %bb.0:
|
|
; AVX-NEXT: subq $56, %rsp
|
|
; AVX-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX-NEXT: vpsrldq {{.*#+}} xmm0 = xmm0[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
|
|
; AVX-NEXT: callq __extendhfsf2@PLT
|
|
; AVX-NEXT: vmovdqa %xmm0, (%rsp) # 16-byte Spill
|
|
; AVX-NEXT: vpermilps $78, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
|
|
; AVX-NEXT: # xmm0 = mem[2,3,0,1]
|
|
; AVX-NEXT: callq __extendhfsf2@PLT
|
|
; AVX-NEXT: vinsertps $16, (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
|
|
; AVX-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[2,3]
|
|
; AVX-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
|
|
; AVX-NEXT: vpermilps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
|
|
; AVX-NEXT: # xmm0 = mem[3,3,3,3]
|
|
; AVX-NEXT: callq __extendhfsf2@PLT
|
|
; AVX-NEXT: vmovaps (%rsp), %xmm1 # 16-byte Reload
|
|
; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1],xmm0[0],xmm1[3]
|
|
; AVX-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
|
|
; AVX-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
|
|
; AVX-NEXT: vpsrldq {{.*#+}} xmm0 = xmm0[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
|
|
; AVX-NEXT: callq __extendhfsf2@PLT
|
|
; AVX-NEXT: vmovaps (%rsp), %xmm1 # 16-byte Reload
|
|
; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0]
|
|
; AVX-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
|
|
; AVX-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
|
|
; AVX-NEXT: callq __extendhfsf2@PLT
|
|
; AVX-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
|
|
; AVX-NEXT: vpsrld $16, %xmm0, %xmm0
|
|
; AVX-NEXT: callq __extendhfsf2@PLT
|
|
; AVX-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
|
|
; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[2,3]
|
|
; AVX-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX-NEXT: vpermilps $245, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
|
|
; AVX-NEXT: # xmm0 = mem[1,1,3,3]
|
|
; AVX-NEXT: callq __extendhfsf2@PLT
|
|
; AVX-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
|
|
; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1],xmm0[0],xmm1[3]
|
|
; AVX-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
|
|
; AVX-NEXT: vpsrlq $48, %xmm0, %xmm0
|
|
; AVX-NEXT: callq __extendhfsf2@PLT
|
|
; AVX-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
|
|
; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0]
|
|
; AVX-NEXT: vinsertf128 $1, (%rsp), %ymm0, %ymm0 # 16-byte Folded Reload
|
|
; AVX-NEXT: addq $56, %rsp
|
|
; AVX-NEXT: retq
|
|
;
|
|
; F16C-LABEL: cvt_8i16_to_8f32_constrained:
|
|
; F16C: # %bb.0:
|
|
; F16C-NEXT: vcvtph2ps %xmm0, %ymm0
|
|
; F16C-NEXT: retq
|
|
;
|
|
; AVX512-LABEL: cvt_8i16_to_8f32_constrained:
|
|
; AVX512: # %bb.0:
|
|
; AVX512-NEXT: vcvtph2ps %xmm0, %ymm0
|
|
; AVX512-NEXT: retq
|
|
%1 = bitcast <8 x i16> %a0 to <8 x half>
|
|
%2 = call <8 x float> @llvm.experimental.constrained.fpext.v8f32.v8f16(<8 x half> %1, metadata !"fpexcept.strict") strictfp
|
|
ret <8 x float> %2
|
|
}
|
|
declare <8 x float> @llvm.experimental.constrained.fpext.v8f32.v8f16(<8 x half>, metadata) strictfp
|
|
|
|
define <16 x float> @cvt_16i16_to_16f32_constrained(<16 x i16> %a0) nounwind strictfp {
|
|
; AVX1-LABEL: cvt_16i16_to_16f32_constrained:
|
|
; AVX1: # %bb.0:
|
|
; AVX1-NEXT: subq $104, %rsp
|
|
; AVX1-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
|
|
; AVX1-NEXT: vpsrldq {{.*#+}} xmm0 = xmm0[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
|
|
; AVX1-NEXT: vzeroupper
|
|
; AVX1-NEXT: callq __extendhfsf2@PLT
|
|
; AVX1-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX1-NEXT: vpermilps $78, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
|
|
; AVX1-NEXT: # xmm0 = mem[2,3,0,1]
|
|
; AVX1-NEXT: callq __extendhfsf2@PLT
|
|
; AVX1-NEXT: vinsertps $16, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
|
|
; AVX1-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[2,3]
|
|
; AVX1-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX1-NEXT: vpermilps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
|
|
; AVX1-NEXT: # xmm0 = mem[3,3,3,3]
|
|
; AVX1-NEXT: callq __extendhfsf2@PLT
|
|
; AVX1-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
|
|
; AVX1-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1],xmm0[0],xmm1[3]
|
|
; AVX1-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX1-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
|
|
; AVX1-NEXT: vpsrldq {{.*#+}} xmm0 = xmm0[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
|
|
; AVX1-NEXT: vzeroupper
|
|
; AVX1-NEXT: callq __extendhfsf2@PLT
|
|
; AVX1-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
|
|
; AVX1-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0]
|
|
; AVX1-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX1-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
|
|
; AVX1-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
|
|
; AVX1-NEXT: vzeroupper
|
|
; AVX1-NEXT: callq __extendhfsf2@PLT
|
|
; AVX1-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
|
|
; AVX1-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
|
|
; AVX1-NEXT: vpsrld $16, %xmm0, %xmm0
|
|
; AVX1-NEXT: vzeroupper
|
|
; AVX1-NEXT: callq __extendhfsf2@PLT
|
|
; AVX1-NEXT: vmovaps (%rsp), %xmm1 # 16-byte Reload
|
|
; AVX1-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[2,3]
|
|
; AVX1-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
|
|
; AVX1-NEXT: vpermilps $245, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
|
|
; AVX1-NEXT: # xmm0 = mem[1,1,3,3]
|
|
; AVX1-NEXT: callq __extendhfsf2@PLT
|
|
; AVX1-NEXT: vmovaps (%rsp), %xmm1 # 16-byte Reload
|
|
; AVX1-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1],xmm0[0],xmm1[3]
|
|
; AVX1-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
|
|
; AVX1-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
|
|
; AVX1-NEXT: vpsrlq $48, %xmm0, %xmm0
|
|
; AVX1-NEXT: vzeroupper
|
|
; AVX1-NEXT: callq __extendhfsf2@PLT
|
|
; AVX1-NEXT: vmovaps (%rsp), %xmm1 # 16-byte Reload
|
|
; AVX1-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0]
|
|
; AVX1-NEXT: vinsertf128 $1, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 16-byte Folded Reload
|
|
; AVX1-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
|
|
; AVX1-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
|
|
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
|
|
; AVX1-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX1-NEXT: vpsrldq {{.*#+}} xmm0 = xmm0[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
|
|
; AVX1-NEXT: vzeroupper
|
|
; AVX1-NEXT: callq __extendhfsf2@PLT
|
|
; AVX1-NEXT: vmovdqa %xmm0, (%rsp) # 16-byte Spill
|
|
; AVX1-NEXT: vpermilps $78, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
|
|
; AVX1-NEXT: # xmm0 = mem[2,3,0,1]
|
|
; AVX1-NEXT: callq __extendhfsf2@PLT
|
|
; AVX1-NEXT: vinsertps $16, (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
|
|
; AVX1-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[2,3]
|
|
; AVX1-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
|
|
; AVX1-NEXT: vpermilps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
|
|
; AVX1-NEXT: # xmm0 = mem[3,3,3,3]
|
|
; AVX1-NEXT: callq __extendhfsf2@PLT
|
|
; AVX1-NEXT: vmovaps (%rsp), %xmm1 # 16-byte Reload
|
|
; AVX1-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1],xmm0[0],xmm1[3]
|
|
; AVX1-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
|
|
; AVX1-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
|
|
; AVX1-NEXT: vpsrldq {{.*#+}} xmm0 = xmm0[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
|
|
; AVX1-NEXT: callq __extendhfsf2@PLT
|
|
; AVX1-NEXT: vmovaps (%rsp), %xmm1 # 16-byte Reload
|
|
; AVX1-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0]
|
|
; AVX1-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
|
|
; AVX1-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
|
|
; AVX1-NEXT: callq __extendhfsf2@PLT
|
|
; AVX1-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX1-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
|
|
; AVX1-NEXT: vpsrld $16, %xmm0, %xmm0
|
|
; AVX1-NEXT: callq __extendhfsf2@PLT
|
|
; AVX1-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
|
|
; AVX1-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[2,3]
|
|
; AVX1-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX1-NEXT: vpermilps $245, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
|
|
; AVX1-NEXT: # xmm0 = mem[1,1,3,3]
|
|
; AVX1-NEXT: callq __extendhfsf2@PLT
|
|
; AVX1-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
|
|
; AVX1-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1],xmm0[0],xmm1[3]
|
|
; AVX1-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX1-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
|
|
; AVX1-NEXT: vpsrlq $48, %xmm0, %xmm0
|
|
; AVX1-NEXT: callq __extendhfsf2@PLT
|
|
; AVX1-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
|
|
; AVX1-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0]
|
|
; AVX1-NEXT: vinsertf128 $1, (%rsp), %ymm0, %ymm1 # 16-byte Folded Reload
|
|
; AVX1-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
|
|
; AVX1-NEXT: addq $104, %rsp
|
|
; AVX1-NEXT: retq
|
|
;
|
|
; AVX2-LABEL: cvt_16i16_to_16f32_constrained:
|
|
; AVX2: # %bb.0:
|
|
; AVX2-NEXT: subq $104, %rsp
|
|
; AVX2-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
|
|
; AVX2-NEXT: vpsrldq {{.*#+}} xmm0 = xmm0[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
|
|
; AVX2-NEXT: vzeroupper
|
|
; AVX2-NEXT: callq __extendhfsf2@PLT
|
|
; AVX2-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX2-NEXT: vpermilps $78, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
|
|
; AVX2-NEXT: # xmm0 = mem[2,3,0,1]
|
|
; AVX2-NEXT: callq __extendhfsf2@PLT
|
|
; AVX2-NEXT: vinsertps $16, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
|
|
; AVX2-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[2,3]
|
|
; AVX2-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX2-NEXT: vpermilps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
|
|
; AVX2-NEXT: # xmm0 = mem[3,3,3,3]
|
|
; AVX2-NEXT: callq __extendhfsf2@PLT
|
|
; AVX2-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
|
|
; AVX2-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1],xmm0[0],xmm1[3]
|
|
; AVX2-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX2-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
|
|
; AVX2-NEXT: vpsrldq {{.*#+}} xmm0 = xmm0[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
|
|
; AVX2-NEXT: vzeroupper
|
|
; AVX2-NEXT: callq __extendhfsf2@PLT
|
|
; AVX2-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
|
|
; AVX2-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0]
|
|
; AVX2-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX2-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
|
|
; AVX2-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
|
|
; AVX2-NEXT: vzeroupper
|
|
; AVX2-NEXT: callq __extendhfsf2@PLT
|
|
; AVX2-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
|
|
; AVX2-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
|
|
; AVX2-NEXT: vpsrld $16, %xmm0, %xmm0
|
|
; AVX2-NEXT: vzeroupper
|
|
; AVX2-NEXT: callq __extendhfsf2@PLT
|
|
; AVX2-NEXT: vmovaps (%rsp), %xmm1 # 16-byte Reload
|
|
; AVX2-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[2,3]
|
|
; AVX2-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
|
|
; AVX2-NEXT: vpermilps $245, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
|
|
; AVX2-NEXT: # xmm0 = mem[1,1,3,3]
|
|
; AVX2-NEXT: callq __extendhfsf2@PLT
|
|
; AVX2-NEXT: vmovaps (%rsp), %xmm1 # 16-byte Reload
|
|
; AVX2-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1],xmm0[0],xmm1[3]
|
|
; AVX2-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
|
|
; AVX2-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
|
|
; AVX2-NEXT: vpsrlq $48, %xmm0, %xmm0
|
|
; AVX2-NEXT: vzeroupper
|
|
; AVX2-NEXT: callq __extendhfsf2@PLT
|
|
; AVX2-NEXT: vmovaps (%rsp), %xmm1 # 16-byte Reload
|
|
; AVX2-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0]
|
|
; AVX2-NEXT: vinsertf128 $1, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 16-byte Folded Reload
|
|
; AVX2-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
|
|
; AVX2-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
|
|
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm0
|
|
; AVX2-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX2-NEXT: vpsrldq {{.*#+}} xmm0 = xmm0[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
|
|
; AVX2-NEXT: vzeroupper
|
|
; AVX2-NEXT: callq __extendhfsf2@PLT
|
|
; AVX2-NEXT: vmovdqa %xmm0, (%rsp) # 16-byte Spill
|
|
; AVX2-NEXT: vpermilps $78, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
|
|
; AVX2-NEXT: # xmm0 = mem[2,3,0,1]
|
|
; AVX2-NEXT: callq __extendhfsf2@PLT
|
|
; AVX2-NEXT: vinsertps $16, (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
|
|
; AVX2-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[2,3]
|
|
; AVX2-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
|
|
; AVX2-NEXT: vpermilps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
|
|
; AVX2-NEXT: # xmm0 = mem[3,3,3,3]
|
|
; AVX2-NEXT: callq __extendhfsf2@PLT
|
|
; AVX2-NEXT: vmovaps (%rsp), %xmm1 # 16-byte Reload
|
|
; AVX2-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1],xmm0[0],xmm1[3]
|
|
; AVX2-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
|
|
; AVX2-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
|
|
; AVX2-NEXT: vpsrldq {{.*#+}} xmm0 = xmm0[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
|
|
; AVX2-NEXT: callq __extendhfsf2@PLT
|
|
; AVX2-NEXT: vmovaps (%rsp), %xmm1 # 16-byte Reload
|
|
; AVX2-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0]
|
|
; AVX2-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
|
|
; AVX2-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
|
|
; AVX2-NEXT: callq __extendhfsf2@PLT
|
|
; AVX2-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX2-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
|
|
; AVX2-NEXT: vpsrld $16, %xmm0, %xmm0
|
|
; AVX2-NEXT: callq __extendhfsf2@PLT
|
|
; AVX2-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
|
|
; AVX2-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[2,3]
|
|
; AVX2-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX2-NEXT: vpermilps $245, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
|
|
; AVX2-NEXT: # xmm0 = mem[1,1,3,3]
|
|
; AVX2-NEXT: callq __extendhfsf2@PLT
|
|
; AVX2-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
|
|
; AVX2-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1],xmm0[0],xmm1[3]
|
|
; AVX2-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX2-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
|
|
; AVX2-NEXT: vpsrlq $48, %xmm0, %xmm0
|
|
; AVX2-NEXT: callq __extendhfsf2@PLT
|
|
; AVX2-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
|
|
; AVX2-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0]
|
|
; AVX2-NEXT: vinsertf128 $1, (%rsp), %ymm0, %ymm1 # 16-byte Folded Reload
|
|
; AVX2-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
|
|
; AVX2-NEXT: addq $104, %rsp
|
|
; AVX2-NEXT: retq
|
|
;
|
|
; F16C-LABEL: cvt_16i16_to_16f32_constrained:
|
|
; F16C: # %bb.0:
|
|
; F16C-NEXT: vextractf128 $1, %ymm0, %xmm1
|
|
; F16C-NEXT: vcvtph2ps %xmm1, %ymm1
|
|
; F16C-NEXT: vcvtph2ps %xmm0, %ymm0
|
|
; F16C-NEXT: retq
|
|
;
|
|
; AVX512-LABEL: cvt_16i16_to_16f32_constrained:
|
|
; AVX512: # %bb.0:
|
|
; AVX512-NEXT: vcvtph2ps %ymm0, %zmm0
|
|
; AVX512-NEXT: retq
|
|
%1 = bitcast <16 x i16> %a0 to <16 x half>
|
|
%2 = call <16 x float> @llvm.experimental.constrained.fpext.v16f32.v16f16(<16 x half> %1, metadata !"fpexcept.strict") strictfp
|
|
ret <16 x float> %2
|
|
}
|
|
declare <16 x float> @llvm.experimental.constrained.fpext.v16f32.v16f16(<16 x half>, metadata) strictfp
|
|
|
|
;
|
|
; Half to Float (Load)
|
|
;
|
|
|
|
define float @load_cvt_i16_to_f32(ptr %a0) nounwind {
|
|
; AVX-LABEL: load_cvt_i16_to_f32:
|
|
; AVX: # %bb.0:
|
|
; AVX-NEXT: vpinsrw $0, (%rdi), %xmm0, %xmm0
|
|
; AVX-NEXT: jmp __extendhfsf2@PLT # TAILCALL
|
|
;
|
|
; F16C-LABEL: load_cvt_i16_to_f32:
|
|
; F16C: # %bb.0:
|
|
; F16C-NEXT: vpinsrw $0, (%rdi), %xmm0, %xmm0
|
|
; F16C-NEXT: vcvtph2ps %xmm0, %xmm0
|
|
; F16C-NEXT: retq
|
|
;
|
|
; AVX512-LABEL: load_cvt_i16_to_f32:
|
|
; AVX512: # %bb.0:
|
|
; AVX512-NEXT: vpinsrw $0, (%rdi), %xmm0, %xmm0
|
|
; AVX512-NEXT: vcvtph2ps %xmm0, %xmm0
|
|
; AVX512-NEXT: retq
|
|
%1 = load i16, ptr %a0
|
|
%2 = bitcast i16 %1 to half
|
|
%3 = fpext half %2 to float
|
|
ret float %3
|
|
}
|
|
|
|
define <4 x float> @load_cvt_4i16_to_4f32(ptr %a0) nounwind {
|
|
; AVX-LABEL: load_cvt_4i16_to_4f32:
|
|
; AVX: # %bb.0:
|
|
; AVX-NEXT: subq $72, %rsp
|
|
; AVX-NEXT: vpinsrw $0, 6(%rdi), %xmm0, %xmm0
|
|
; AVX-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX-NEXT: vpinsrw $0, 4(%rdi), %xmm0, %xmm0
|
|
; AVX-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX-NEXT: vpinsrw $0, (%rdi), %xmm0, %xmm0
|
|
; AVX-NEXT: vmovdqa %xmm0, (%rsp) # 16-byte Spill
|
|
; AVX-NEXT: vpinsrw $0, 2(%rdi), %xmm0, %xmm0
|
|
; AVX-NEXT: callq __extendhfsf2@PLT
|
|
; AVX-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX-NEXT: vmovaps (%rsp), %xmm0 # 16-byte Reload
|
|
; AVX-NEXT: callq __extendhfsf2@PLT
|
|
; AVX-NEXT: vinsertps $16, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
|
|
; AVX-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[2,3]
|
|
; AVX-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
|
|
; AVX-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
|
|
; AVX-NEXT: callq __extendhfsf2@PLT
|
|
; AVX-NEXT: vmovaps (%rsp), %xmm1 # 16-byte Reload
|
|
; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1],xmm0[0],xmm1[3]
|
|
; AVX-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
|
|
; AVX-NEXT: callq __extendhfsf2@PLT
|
|
; AVX-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
|
|
; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0]
|
|
; AVX-NEXT: addq $72, %rsp
|
|
; AVX-NEXT: retq
|
|
;
|
|
; F16C-LABEL: load_cvt_4i16_to_4f32:
|
|
; F16C: # %bb.0:
|
|
; F16C-NEXT: vcvtph2ps (%rdi), %xmm0
|
|
; F16C-NEXT: retq
|
|
;
|
|
; AVX512-LABEL: load_cvt_4i16_to_4f32:
|
|
; AVX512: # %bb.0:
|
|
; AVX512-NEXT: vcvtph2ps (%rdi), %xmm0
|
|
; AVX512-NEXT: retq
|
|
%1 = load <4 x i16>, ptr %a0
|
|
%2 = bitcast <4 x i16> %1 to <4 x half>
|
|
%3 = fpext <4 x half> %2 to <4 x float>
|
|
ret <4 x float> %3
|
|
}
|
|
|
|
define <4 x float> @load_cvt_8i16_to_4f32(ptr %a0) nounwind {
|
|
; AVX-LABEL: load_cvt_8i16_to_4f32:
|
|
; AVX: # %bb.0:
|
|
; AVX-NEXT: subq $72, %rsp
|
|
; AVX-NEXT: movq (%rdi), %rax
|
|
; AVX-NEXT: movq %rax, %rcx
|
|
; AVX-NEXT: movq %rax, %rdx
|
|
; AVX-NEXT: vpinsrw $0, %eax, %xmm0, %xmm0
|
|
; AVX-NEXT: # kill: def $eax killed $eax killed $rax
|
|
; AVX-NEXT: shrl $16, %eax
|
|
; AVX-NEXT: shrq $32, %rcx
|
|
; AVX-NEXT: shrq $48, %rdx
|
|
; AVX-NEXT: vpinsrw $0, %edx, %xmm0, %xmm1
|
|
; AVX-NEXT: vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX-NEXT: vpinsrw $0, %ecx, %xmm0, %xmm1
|
|
; AVX-NEXT: vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX-NEXT: vpinsrw $0, %eax, %xmm0, %xmm1
|
|
; AVX-NEXT: vmovdqa %xmm1, (%rsp) # 16-byte Spill
|
|
; AVX-NEXT: callq __extendhfsf2@PLT
|
|
; AVX-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX-NEXT: vmovaps (%rsp), %xmm0 # 16-byte Reload
|
|
; AVX-NEXT: callq __extendhfsf2@PLT
|
|
; AVX-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
|
|
; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[2,3]
|
|
; AVX-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
|
|
; AVX-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
|
|
; AVX-NEXT: callq __extendhfsf2@PLT
|
|
; AVX-NEXT: vmovaps (%rsp), %xmm1 # 16-byte Reload
|
|
; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1],xmm0[0],xmm1[3]
|
|
; AVX-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
|
|
; AVX-NEXT: callq __extendhfsf2@PLT
|
|
; AVX-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
|
|
; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0]
|
|
; AVX-NEXT: addq $72, %rsp
|
|
; AVX-NEXT: retq
|
|
;
|
|
; F16C-LABEL: load_cvt_8i16_to_4f32:
|
|
; F16C: # %bb.0:
|
|
; F16C-NEXT: vcvtph2ps (%rdi), %xmm0
|
|
; F16C-NEXT: retq
|
|
;
|
|
; AVX512-LABEL: load_cvt_8i16_to_4f32:
|
|
; AVX512: # %bb.0:
|
|
; AVX512-NEXT: vcvtph2ps (%rdi), %xmm0
|
|
; AVX512-NEXT: retq
|
|
%1 = load <8 x i16>, ptr %a0
|
|
%2 = shufflevector <8 x i16> %1, <8 x i16> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
|
|
%3 = bitcast <4 x i16> %2 to <4 x half>
|
|
%4 = fpext <4 x half> %3 to <4 x float>
|
|
ret <4 x float> %4
|
|
}
|
|
|
|
define <8 x float> @load_cvt_8i16_to_8f32(ptr %a0) nounwind {
|
|
; AVX1-LABEL: load_cvt_8i16_to_8f32:
|
|
; AVX1: # %bb.0:
|
|
; AVX1-NEXT: pushq %rbx
|
|
; AVX1-NEXT: subq $48, %rsp
|
|
; AVX1-NEXT: movq %rdi, %rbx
|
|
; AVX1-NEXT: vmovaps (%rdi), %xmm0
|
|
; AVX1-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX1-NEXT: vbroadcastss 8(%rdi), %xmm0
|
|
; AVX1-NEXT: callq __extendhfsf2@PLT
|
|
; AVX1-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
|
|
; AVX1-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
|
|
; AVX1-NEXT: vpsrldq {{.*#+}} xmm0 = xmm0[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
|
|
; AVX1-NEXT: callq __extendhfsf2@PLT
|
|
; AVX1-NEXT: vmovaps (%rsp), %xmm1 # 16-byte Reload
|
|
; AVX1-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[2,3]
|
|
; AVX1-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
|
|
; AVX1-NEXT: vbroadcastss 12(%rbx), %xmm0
|
|
; AVX1-NEXT: callq __extendhfsf2@PLT
|
|
; AVX1-NEXT: vmovaps (%rsp), %xmm1 # 16-byte Reload
|
|
; AVX1-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1],xmm0[0],xmm1[3]
|
|
; AVX1-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
|
|
; AVX1-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
|
|
; AVX1-NEXT: vpsrldq {{.*#+}} xmm0 = xmm0[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
|
|
; AVX1-NEXT: callq __extendhfsf2@PLT
|
|
; AVX1-NEXT: vmovaps (%rsp), %xmm1 # 16-byte Reload
|
|
; AVX1-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0]
|
|
; AVX1-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
|
|
; AVX1-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
|
|
; AVX1-NEXT: callq __extendhfsf2@PLT
|
|
; AVX1-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX1-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
|
|
; AVX1-NEXT: vpsrld $16, %xmm0, %xmm0
|
|
; AVX1-NEXT: callq __extendhfsf2@PLT
|
|
; AVX1-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
|
|
; AVX1-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[2,3]
|
|
; AVX1-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX1-NEXT: vbroadcastss 4(%rbx), %xmm0
|
|
; AVX1-NEXT: callq __extendhfsf2@PLT
|
|
; AVX1-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
|
|
; AVX1-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1],xmm0[0],xmm1[3]
|
|
; AVX1-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX1-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
|
|
; AVX1-NEXT: vpsrlq $48, %xmm0, %xmm0
|
|
; AVX1-NEXT: callq __extendhfsf2@PLT
|
|
; AVX1-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
|
|
; AVX1-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0]
|
|
; AVX1-NEXT: vinsertf128 $1, (%rsp), %ymm0, %ymm0 # 16-byte Folded Reload
|
|
; AVX1-NEXT: addq $48, %rsp
|
|
; AVX1-NEXT: popq %rbx
|
|
; AVX1-NEXT: retq
|
|
;
|
|
; AVX2-LABEL: load_cvt_8i16_to_8f32:
|
|
; AVX2: # %bb.0:
|
|
; AVX2-NEXT: pushq %rbx
|
|
; AVX2-NEXT: subq $48, %rsp
|
|
; AVX2-NEXT: movq %rdi, %rbx
|
|
; AVX2-NEXT: vmovdqa (%rdi), %xmm0
|
|
; AVX2-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX2-NEXT: vpinsrw $0, 8(%rdi), %xmm0, %xmm0
|
|
; AVX2-NEXT: callq __extendhfsf2@PLT
|
|
; AVX2-NEXT: vmovdqa %xmm0, (%rsp) # 16-byte Spill
|
|
; AVX2-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
|
|
; AVX2-NEXT: vpsrldq {{.*#+}} xmm0 = xmm0[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
|
|
; AVX2-NEXT: callq __extendhfsf2@PLT
|
|
; AVX2-NEXT: vmovaps (%rsp), %xmm1 # 16-byte Reload
|
|
; AVX2-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[2,3]
|
|
; AVX2-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
|
|
; AVX2-NEXT: vpinsrw $0, 12(%rbx), %xmm0, %xmm0
|
|
; AVX2-NEXT: callq __extendhfsf2@PLT
|
|
; AVX2-NEXT: vmovaps (%rsp), %xmm1 # 16-byte Reload
|
|
; AVX2-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1],xmm0[0],xmm1[3]
|
|
; AVX2-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
|
|
; AVX2-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
|
|
; AVX2-NEXT: vpsrldq {{.*#+}} xmm0 = xmm0[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
|
|
; AVX2-NEXT: callq __extendhfsf2@PLT
|
|
; AVX2-NEXT: vmovaps (%rsp), %xmm1 # 16-byte Reload
|
|
; AVX2-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0]
|
|
; AVX2-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
|
|
; AVX2-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
|
|
; AVX2-NEXT: callq __extendhfsf2@PLT
|
|
; AVX2-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX2-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
|
|
; AVX2-NEXT: vpsrld $16, %xmm0, %xmm0
|
|
; AVX2-NEXT: callq __extendhfsf2@PLT
|
|
; AVX2-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
|
|
; AVX2-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[2,3]
|
|
; AVX2-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX2-NEXT: vpinsrw $0, 4(%rbx), %xmm0, %xmm0
|
|
; AVX2-NEXT: callq __extendhfsf2@PLT
|
|
; AVX2-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
|
|
; AVX2-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1],xmm0[0],xmm1[3]
|
|
; AVX2-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX2-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
|
|
; AVX2-NEXT: vpsrlq $48, %xmm0, %xmm0
|
|
; AVX2-NEXT: callq __extendhfsf2@PLT
|
|
; AVX2-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
|
|
; AVX2-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0]
|
|
; AVX2-NEXT: vinsertf128 $1, (%rsp), %ymm0, %ymm0 # 16-byte Folded Reload
|
|
; AVX2-NEXT: addq $48, %rsp
|
|
; AVX2-NEXT: popq %rbx
|
|
; AVX2-NEXT: retq
|
|
;
|
|
; F16C-LABEL: load_cvt_8i16_to_8f32:
|
|
; F16C: # %bb.0:
|
|
; F16C-NEXT: vcvtph2ps (%rdi), %ymm0
|
|
; F16C-NEXT: retq
|
|
;
|
|
; AVX512-LABEL: load_cvt_8i16_to_8f32:
|
|
; AVX512: # %bb.0:
|
|
; AVX512-NEXT: vcvtph2ps (%rdi), %ymm0
|
|
; AVX512-NEXT: retq
|
|
%1 = load <8 x i16>, ptr %a0
|
|
%2 = bitcast <8 x i16> %1 to <8 x half>
|
|
%3 = fpext <8 x half> %2 to <8 x float>
|
|
ret <8 x float> %3
|
|
}
|
|
|
|
define <16 x float> @load_cvt_16i16_to_16f32(ptr %a0) nounwind {
|
|
; AVX1-LABEL: load_cvt_16i16_to_16f32:
|
|
; AVX1: # %bb.0:
|
|
; AVX1-NEXT: pushq %rbx
|
|
; AVX1-NEXT: subq $80, %rsp
|
|
; AVX1-NEXT: movq %rdi, %rbx
|
|
; AVX1-NEXT: vbroadcastss 8(%rdi), %xmm0
|
|
; AVX1-NEXT: callq __extendhfsf2@PLT
|
|
; AVX1-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
|
|
; AVX1-NEXT: vmovdqa (%rbx), %xmm1
|
|
; AVX1-NEXT: vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX1-NEXT: vmovaps 16(%rbx), %xmm0
|
|
; AVX1-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX1-NEXT: vpsrldq {{.*#+}} xmm0 = xmm1[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
|
|
; AVX1-NEXT: callq __extendhfsf2@PLT
|
|
; AVX1-NEXT: vmovaps (%rsp), %xmm1 # 16-byte Reload
|
|
; AVX1-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[2,3]
|
|
; AVX1-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
|
|
; AVX1-NEXT: vbroadcastss 12(%rbx), %xmm0
|
|
; AVX1-NEXT: callq __extendhfsf2@PLT
|
|
; AVX1-NEXT: vmovaps (%rsp), %xmm1 # 16-byte Reload
|
|
; AVX1-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1],xmm0[0],xmm1[3]
|
|
; AVX1-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
|
|
; AVX1-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
|
|
; AVX1-NEXT: vpsrldq {{.*#+}} xmm0 = xmm0[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
|
|
; AVX1-NEXT: callq __extendhfsf2@PLT
|
|
; AVX1-NEXT: vmovaps (%rsp), %xmm1 # 16-byte Reload
|
|
; AVX1-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0]
|
|
; AVX1-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
|
|
; AVX1-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
|
|
; AVX1-NEXT: callq __extendhfsf2@PLT
|
|
; AVX1-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX1-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
|
|
; AVX1-NEXT: vpsrld $16, %xmm0, %xmm0
|
|
; AVX1-NEXT: callq __extendhfsf2@PLT
|
|
; AVX1-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
|
|
; AVX1-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[2,3]
|
|
; AVX1-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX1-NEXT: vbroadcastss 4(%rbx), %xmm0
|
|
; AVX1-NEXT: callq __extendhfsf2@PLT
|
|
; AVX1-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
|
|
; AVX1-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1],xmm0[0],xmm1[3]
|
|
; AVX1-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX1-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
|
|
; AVX1-NEXT: vpsrlq $48, %xmm0, %xmm0
|
|
; AVX1-NEXT: callq __extendhfsf2@PLT
|
|
; AVX1-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
|
|
; AVX1-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0]
|
|
; AVX1-NEXT: vinsertf128 $1, (%rsp), %ymm0, %ymm0 # 16-byte Folded Reload
|
|
; AVX1-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
|
|
; AVX1-NEXT: vbroadcastss 24(%rbx), %xmm0
|
|
; AVX1-NEXT: vzeroupper
|
|
; AVX1-NEXT: callq __extendhfsf2@PLT
|
|
; AVX1-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
|
|
; AVX1-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
|
|
; AVX1-NEXT: vpsrldq {{.*#+}} xmm0 = xmm0[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
|
|
; AVX1-NEXT: callq __extendhfsf2@PLT
|
|
; AVX1-NEXT: vmovaps (%rsp), %xmm1 # 16-byte Reload
|
|
; AVX1-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[2,3]
|
|
; AVX1-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
|
|
; AVX1-NEXT: vbroadcastss 28(%rbx), %xmm0
|
|
; AVX1-NEXT: callq __extendhfsf2@PLT
|
|
; AVX1-NEXT: vmovaps (%rsp), %xmm1 # 16-byte Reload
|
|
; AVX1-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1],xmm0[0],xmm1[3]
|
|
; AVX1-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
|
|
; AVX1-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
|
|
; AVX1-NEXT: vpsrldq {{.*#+}} xmm0 = xmm0[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
|
|
; AVX1-NEXT: callq __extendhfsf2@PLT
|
|
; AVX1-NEXT: vmovaps (%rsp), %xmm1 # 16-byte Reload
|
|
; AVX1-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0]
|
|
; AVX1-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
|
|
; AVX1-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
|
|
; AVX1-NEXT: callq __extendhfsf2@PLT
|
|
; AVX1-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX1-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
|
|
; AVX1-NEXT: vpsrld $16, %xmm0, %xmm0
|
|
; AVX1-NEXT: callq __extendhfsf2@PLT
|
|
; AVX1-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
|
|
; AVX1-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[2,3]
|
|
; AVX1-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX1-NEXT: vbroadcastss 20(%rbx), %xmm0
|
|
; AVX1-NEXT: callq __extendhfsf2@PLT
|
|
; AVX1-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
|
|
; AVX1-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1],xmm0[0],xmm1[3]
|
|
; AVX1-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX1-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
|
|
; AVX1-NEXT: vpsrlq $48, %xmm0, %xmm0
|
|
; AVX1-NEXT: callq __extendhfsf2@PLT
|
|
; AVX1-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
|
|
; AVX1-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0]
|
|
; AVX1-NEXT: vinsertf128 $1, (%rsp), %ymm0, %ymm1 # 16-byte Folded Reload
|
|
; AVX1-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
|
|
; AVX1-NEXT: addq $80, %rsp
|
|
; AVX1-NEXT: popq %rbx
|
|
; AVX1-NEXT: retq
|
|
;
|
|
; AVX2-LABEL: load_cvt_16i16_to_16f32:
|
|
; AVX2: # %bb.0:
|
|
; AVX2-NEXT: pushq %rbx
|
|
; AVX2-NEXT: subq $80, %rsp
|
|
; AVX2-NEXT: movq %rdi, %rbx
|
|
; AVX2-NEXT: vpinsrw $0, 8(%rdi), %xmm0, %xmm0
|
|
; AVX2-NEXT: callq __extendhfsf2@PLT
|
|
; AVX2-NEXT: vmovdqa %xmm0, (%rsp) # 16-byte Spill
|
|
; AVX2-NEXT: vmovdqa (%rbx), %xmm1
|
|
; AVX2-NEXT: vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX2-NEXT: vmovaps 16(%rbx), %xmm0
|
|
; AVX2-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX2-NEXT: vpsrldq {{.*#+}} xmm0 = xmm1[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
|
|
; AVX2-NEXT: callq __extendhfsf2@PLT
|
|
; AVX2-NEXT: vmovaps (%rsp), %xmm1 # 16-byte Reload
|
|
; AVX2-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[2,3]
|
|
; AVX2-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
|
|
; AVX2-NEXT: vpinsrw $0, 12(%rbx), %xmm0, %xmm0
|
|
; AVX2-NEXT: callq __extendhfsf2@PLT
|
|
; AVX2-NEXT: vmovaps (%rsp), %xmm1 # 16-byte Reload
|
|
; AVX2-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1],xmm0[0],xmm1[3]
|
|
; AVX2-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
|
|
; AVX2-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
|
|
; AVX2-NEXT: vpsrldq {{.*#+}} xmm0 = xmm0[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
|
|
; AVX2-NEXT: callq __extendhfsf2@PLT
|
|
; AVX2-NEXT: vmovaps (%rsp), %xmm1 # 16-byte Reload
|
|
; AVX2-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0]
|
|
; AVX2-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
|
|
; AVX2-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
|
|
; AVX2-NEXT: callq __extendhfsf2@PLT
|
|
; AVX2-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX2-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
|
|
; AVX2-NEXT: vpsrld $16, %xmm0, %xmm0
|
|
; AVX2-NEXT: callq __extendhfsf2@PLT
|
|
; AVX2-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
|
|
; AVX2-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[2,3]
|
|
; AVX2-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX2-NEXT: vpinsrw $0, 4(%rbx), %xmm0, %xmm0
|
|
; AVX2-NEXT: callq __extendhfsf2@PLT
|
|
; AVX2-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
|
|
; AVX2-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1],xmm0[0],xmm1[3]
|
|
; AVX2-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX2-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
|
|
; AVX2-NEXT: vpsrlq $48, %xmm0, %xmm0
|
|
; AVX2-NEXT: callq __extendhfsf2@PLT
|
|
; AVX2-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
|
|
; AVX2-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0]
|
|
; AVX2-NEXT: vinsertf128 $1, (%rsp), %ymm0, %ymm0 # 16-byte Folded Reload
|
|
; AVX2-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
|
|
; AVX2-NEXT: vpinsrw $0, 24(%rbx), %xmm0, %xmm0
|
|
; AVX2-NEXT: vzeroupper
|
|
; AVX2-NEXT: callq __extendhfsf2@PLT
|
|
; AVX2-NEXT: vmovdqa %xmm0, (%rsp) # 16-byte Spill
|
|
; AVX2-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
|
|
; AVX2-NEXT: vpsrldq {{.*#+}} xmm0 = xmm0[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
|
|
; AVX2-NEXT: callq __extendhfsf2@PLT
|
|
; AVX2-NEXT: vmovaps (%rsp), %xmm1 # 16-byte Reload
|
|
; AVX2-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[2,3]
|
|
; AVX2-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
|
|
; AVX2-NEXT: vpinsrw $0, 28(%rbx), %xmm0, %xmm0
|
|
; AVX2-NEXT: callq __extendhfsf2@PLT
|
|
; AVX2-NEXT: vmovaps (%rsp), %xmm1 # 16-byte Reload
|
|
; AVX2-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1],xmm0[0],xmm1[3]
|
|
; AVX2-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
|
|
; AVX2-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
|
|
; AVX2-NEXT: vpsrldq {{.*#+}} xmm0 = xmm0[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
|
|
; AVX2-NEXT: callq __extendhfsf2@PLT
|
|
; AVX2-NEXT: vmovaps (%rsp), %xmm1 # 16-byte Reload
|
|
; AVX2-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0]
|
|
; AVX2-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
|
|
; AVX2-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
|
|
; AVX2-NEXT: callq __extendhfsf2@PLT
|
|
; AVX2-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX2-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
|
|
; AVX2-NEXT: vpsrld $16, %xmm0, %xmm0
|
|
; AVX2-NEXT: callq __extendhfsf2@PLT
|
|
; AVX2-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
|
|
; AVX2-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[2,3]
|
|
; AVX2-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX2-NEXT: vpinsrw $0, 20(%rbx), %xmm0, %xmm0
|
|
; AVX2-NEXT: callq __extendhfsf2@PLT
|
|
; AVX2-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
|
|
; AVX2-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1],xmm0[0],xmm1[3]
|
|
; AVX2-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX2-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
|
|
; AVX2-NEXT: vpsrlq $48, %xmm0, %xmm0
|
|
; AVX2-NEXT: callq __extendhfsf2@PLT
|
|
; AVX2-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
|
|
; AVX2-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0]
|
|
; AVX2-NEXT: vinsertf128 $1, (%rsp), %ymm0, %ymm1 # 16-byte Folded Reload
|
|
; AVX2-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
|
|
; AVX2-NEXT: addq $80, %rsp
|
|
; AVX2-NEXT: popq %rbx
|
|
; AVX2-NEXT: retq
|
|
;
|
|
; F16C-LABEL: load_cvt_16i16_to_16f32:
|
|
; F16C: # %bb.0:
|
|
; F16C-NEXT: vcvtph2ps (%rdi), %ymm0
|
|
; F16C-NEXT: vcvtph2ps 16(%rdi), %ymm1
|
|
; F16C-NEXT: retq
|
|
;
|
|
; AVX512-LABEL: load_cvt_16i16_to_16f32:
|
|
; AVX512: # %bb.0:
|
|
; AVX512-NEXT: vcvtph2ps (%rdi), %zmm0
|
|
; AVX512-NEXT: retq
|
|
%1 = load <16 x i16>, ptr %a0
|
|
%2 = bitcast <16 x i16> %1 to <16 x half>
|
|
%3 = fpext <16 x half> %2 to <16 x float>
|
|
ret <16 x float> %3
|
|
}
|
|
|
|
define <4 x float> @load_cvt_4i16_to_4f32_constrained(ptr %a0) nounwind strictfp {
|
|
; AVX-LABEL: load_cvt_4i16_to_4f32_constrained:
|
|
; AVX: # %bb.0:
|
|
; AVX-NEXT: subq $72, %rsp
|
|
; AVX-NEXT: vpinsrw $0, 6(%rdi), %xmm0, %xmm0
|
|
; AVX-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX-NEXT: vpinsrw $0, 4(%rdi), %xmm0, %xmm0
|
|
; AVX-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX-NEXT: vpinsrw $0, (%rdi), %xmm0, %xmm0
|
|
; AVX-NEXT: vmovdqa %xmm0, (%rsp) # 16-byte Spill
|
|
; AVX-NEXT: vpinsrw $0, 2(%rdi), %xmm0, %xmm0
|
|
; AVX-NEXT: callq __extendhfsf2@PLT
|
|
; AVX-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX-NEXT: vmovaps (%rsp), %xmm0 # 16-byte Reload
|
|
; AVX-NEXT: callq __extendhfsf2@PLT
|
|
; AVX-NEXT: vinsertps $16, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
|
|
; AVX-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[2,3]
|
|
; AVX-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
|
|
; AVX-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
|
|
; AVX-NEXT: callq __extendhfsf2@PLT
|
|
; AVX-NEXT: vmovaps (%rsp), %xmm1 # 16-byte Reload
|
|
; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1],xmm0[0],xmm1[3]
|
|
; AVX-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
|
|
; AVX-NEXT: callq __extendhfsf2@PLT
|
|
; AVX-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
|
|
; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0]
|
|
; AVX-NEXT: addq $72, %rsp
|
|
; AVX-NEXT: retq
|
|
;
|
|
; F16C-LABEL: load_cvt_4i16_to_4f32_constrained:
|
|
; F16C: # %bb.0:
|
|
; F16C-NEXT: vcvtph2ps (%rdi), %xmm0
|
|
; F16C-NEXT: retq
|
|
;
|
|
; AVX512-LABEL: load_cvt_4i16_to_4f32_constrained:
|
|
; AVX512: # %bb.0:
|
|
; AVX512-NEXT: vcvtph2ps (%rdi), %xmm0
|
|
; AVX512-NEXT: retq
|
|
%1 = load <4 x i16>, ptr %a0
|
|
%2 = bitcast <4 x i16> %1 to <4 x half>
|
|
%3 = call <4 x float> @llvm.experimental.constrained.fpext.v4f32.v4f16(<4 x half> %2, metadata !"fpexcept.strict") strictfp
|
|
ret <4 x float> %3
|
|
}
|
|
|
|
define <4 x float> @load_cvt_8i16_to_4f32_constrained(ptr %a0) nounwind strictfp {
|
|
; AVX-LABEL: load_cvt_8i16_to_4f32_constrained:
|
|
; AVX: # %bb.0:
|
|
; AVX-NEXT: subq $72, %rsp
|
|
; AVX-NEXT: movq (%rdi), %rax
|
|
; AVX-NEXT: movq %rax, %rcx
|
|
; AVX-NEXT: movq %rax, %rdx
|
|
; AVX-NEXT: vpinsrw $0, %eax, %xmm0, %xmm0
|
|
; AVX-NEXT: # kill: def $eax killed $eax killed $rax
|
|
; AVX-NEXT: shrl $16, %eax
|
|
; AVX-NEXT: shrq $32, %rcx
|
|
; AVX-NEXT: shrq $48, %rdx
|
|
; AVX-NEXT: vpinsrw $0, %edx, %xmm0, %xmm1
|
|
; AVX-NEXT: vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX-NEXT: vpinsrw $0, %ecx, %xmm0, %xmm1
|
|
; AVX-NEXT: vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX-NEXT: vpinsrw $0, %eax, %xmm0, %xmm1
|
|
; AVX-NEXT: vmovdqa %xmm1, (%rsp) # 16-byte Spill
|
|
; AVX-NEXT: callq __extendhfsf2@PLT
|
|
; AVX-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX-NEXT: vmovaps (%rsp), %xmm0 # 16-byte Reload
|
|
; AVX-NEXT: callq __extendhfsf2@PLT
|
|
; AVX-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
|
|
; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[2,3]
|
|
; AVX-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
|
|
; AVX-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
|
|
; AVX-NEXT: callq __extendhfsf2@PLT
|
|
; AVX-NEXT: vmovaps (%rsp), %xmm1 # 16-byte Reload
|
|
; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1],xmm0[0],xmm1[3]
|
|
; AVX-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
|
|
; AVX-NEXT: callq __extendhfsf2@PLT
|
|
; AVX-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
|
|
; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0]
|
|
; AVX-NEXT: addq $72, %rsp
|
|
; AVX-NEXT: retq
|
|
;
|
|
; F16C-LABEL: load_cvt_8i16_to_4f32_constrained:
|
|
; F16C: # %bb.0:
|
|
; F16C-NEXT: vcvtph2ps (%rdi), %xmm0
|
|
; F16C-NEXT: retq
|
|
;
|
|
; AVX512-LABEL: load_cvt_8i16_to_4f32_constrained:
|
|
; AVX512: # %bb.0:
|
|
; AVX512-NEXT: vcvtph2ps (%rdi), %xmm0
|
|
; AVX512-NEXT: retq
|
|
%1 = load <8 x i16>, ptr %a0
|
|
%2 = shufflevector <8 x i16> %1, <8 x i16> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
|
|
%3 = bitcast <4 x i16> %2 to <4 x half>
|
|
%4 = call <4 x float> @llvm.experimental.constrained.fpext.v4f32.v4f16(<4 x half> %3, metadata !"fpexcept.strict") strictfp
|
|
ret <4 x float> %4
|
|
}
|
|
|
|
;
|
|
; Half to Double
|
|
;
|
|
|
|
define double @cvt_i16_to_f64(i16 %a0) nounwind {
|
|
; AVX-LABEL: cvt_i16_to_f64:
|
|
; AVX: # %bb.0:
|
|
; AVX-NEXT: pushq %rax
|
|
; AVX-NEXT: vpinsrw $0, %edi, %xmm0, %xmm0
|
|
; AVX-NEXT: callq __extendhfsf2@PLT
|
|
; AVX-NEXT: vcvtss2sd %xmm0, %xmm0, %xmm0
|
|
; AVX-NEXT: popq %rax
|
|
; AVX-NEXT: retq
|
|
;
|
|
; F16C-LABEL: cvt_i16_to_f64:
|
|
; F16C: # %bb.0:
|
|
; F16C-NEXT: vmovd %edi, %xmm0
|
|
; F16C-NEXT: vcvtph2ps %xmm0, %xmm0
|
|
; F16C-NEXT: vcvtss2sd %xmm0, %xmm0, %xmm0
|
|
; F16C-NEXT: retq
|
|
;
|
|
; AVX512-LABEL: cvt_i16_to_f64:
|
|
; AVX512: # %bb.0:
|
|
; AVX512-NEXT: vmovd %edi, %xmm0
|
|
; AVX512-NEXT: vcvtph2ps %xmm0, %xmm0
|
|
; AVX512-NEXT: vcvtss2sd %xmm0, %xmm0, %xmm0
|
|
; AVX512-NEXT: retq
|
|
%1 = bitcast i16 %a0 to half
|
|
%2 = fpext half %1 to double
|
|
ret double %2
|
|
}
|
|
|
|
define <2 x double> @cvt_2i16_to_2f64(<2 x i16> %a0) nounwind {
|
|
; AVX-LABEL: cvt_2i16_to_2f64:
|
|
; AVX: # %bb.0:
|
|
; AVX-NEXT: subq $40, %rsp
|
|
; AVX-NEXT: vmovd %xmm0, %eax
|
|
; AVX-NEXT: vpinsrw $0, %eax, %xmm0, %xmm0
|
|
; AVX-NEXT: shrl $16, %eax
|
|
; AVX-NEXT: vpinsrw $0, %eax, %xmm0, %xmm1
|
|
; AVX-NEXT: vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX-NEXT: callq __extendhfsf2@PLT
|
|
; AVX-NEXT: vcvtss2sd %xmm0, %xmm0, %xmm0
|
|
; AVX-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
|
|
; AVX-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
|
|
; AVX-NEXT: callq __extendhfsf2@PLT
|
|
; AVX-NEXT: vcvtss2sd %xmm0, %xmm0, %xmm0
|
|
; AVX-NEXT: vmovaps (%rsp), %xmm1 # 16-byte Reload
|
|
; AVX-NEXT: vmovlhps {{.*#+}} xmm0 = xmm1[0],xmm0[0]
|
|
; AVX-NEXT: addq $40, %rsp
|
|
; AVX-NEXT: retq
|
|
;
|
|
; F16C-LABEL: cvt_2i16_to_2f64:
|
|
; F16C: # %bb.0:
|
|
; F16C-NEXT: vcvtph2ps %xmm0, %xmm0
|
|
; F16C-NEXT: vcvtps2pd %xmm0, %xmm0
|
|
; F16C-NEXT: retq
|
|
;
|
|
; AVX512-LABEL: cvt_2i16_to_2f64:
|
|
; AVX512: # %bb.0:
|
|
; AVX512-NEXT: vcvtph2ps %xmm0, %xmm0
|
|
; AVX512-NEXT: vcvtps2pd %xmm0, %xmm0
|
|
; AVX512-NEXT: retq
|
|
%1 = bitcast <2 x i16> %a0 to <2 x half>
|
|
%2 = fpext <2 x half> %1 to <2 x double>
|
|
ret <2 x double> %2
|
|
}
|
|
|
|
define <4 x double> @cvt_4i16_to_4f64(<4 x i16> %a0) nounwind {
|
|
; AVX-LABEL: cvt_4i16_to_4f64:
|
|
; AVX: # %bb.0:
|
|
; AVX-NEXT: subq $72, %rsp
|
|
; AVX-NEXT: vmovq %xmm0, %rax
|
|
; AVX-NEXT: movq %rax, %rcx
|
|
; AVX-NEXT: movl %eax, %edx
|
|
; AVX-NEXT: vpinsrw $0, %eax, %xmm0, %xmm0
|
|
; AVX-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX-NEXT: shrq $48, %rax
|
|
; AVX-NEXT: shrq $32, %rcx
|
|
; AVX-NEXT: shrl $16, %edx
|
|
; AVX-NEXT: vpinsrw $0, %edx, %xmm0, %xmm0
|
|
; AVX-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX-NEXT: vpinsrw $0, %ecx, %xmm0, %xmm0
|
|
; AVX-NEXT: vmovdqa %xmm0, (%rsp) # 16-byte Spill
|
|
; AVX-NEXT: vpinsrw $0, %eax, %xmm0, %xmm0
|
|
; AVX-NEXT: callq __extendhfsf2@PLT
|
|
; AVX-NEXT: vcvtss2sd %xmm0, %xmm0, %xmm0
|
|
; AVX-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX-NEXT: vmovaps (%rsp), %xmm0 # 16-byte Reload
|
|
; AVX-NEXT: callq __extendhfsf2@PLT
|
|
; AVX-NEXT: vcvtss2sd %xmm0, %xmm0, %xmm0
|
|
; AVX-NEXT: vunpcklpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
|
|
; AVX-NEXT: # xmm0 = xmm0[0],mem[0]
|
|
; AVX-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
|
|
; AVX-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
|
|
; AVX-NEXT: callq __extendhfsf2@PLT
|
|
; AVX-NEXT: vcvtss2sd %xmm0, %xmm0, %xmm0
|
|
; AVX-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
|
|
; AVX-NEXT: callq __extendhfsf2@PLT
|
|
; AVX-NEXT: vcvtss2sd %xmm0, %xmm0, %xmm0
|
|
; AVX-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
|
|
; AVX-NEXT: vmovlhps {{.*#+}} xmm0 = xmm1[0],xmm0[0]
|
|
; AVX-NEXT: vinsertf128 $1, (%rsp), %ymm0, %ymm0 # 16-byte Folded Reload
|
|
; AVX-NEXT: addq $72, %rsp
|
|
; AVX-NEXT: retq
|
|
;
|
|
; F16C-LABEL: cvt_4i16_to_4f64:
|
|
; F16C: # %bb.0:
|
|
; F16C-NEXT: vcvtph2ps %xmm0, %xmm0
|
|
; F16C-NEXT: vcvtps2pd %xmm0, %ymm0
|
|
; F16C-NEXT: retq
|
|
;
|
|
; AVX512-LABEL: cvt_4i16_to_4f64:
|
|
; AVX512: # %bb.0:
|
|
; AVX512-NEXT: vcvtph2ps %xmm0, %xmm0
|
|
; AVX512-NEXT: vcvtps2pd %xmm0, %ymm0
|
|
; AVX512-NEXT: retq
|
|
%1 = bitcast <4 x i16> %a0 to <4 x half>
|
|
%2 = fpext <4 x half> %1 to <4 x double>
|
|
ret <4 x double> %2
|
|
}
|
|
|
|
define <2 x double> @cvt_8i16_to_2f64(<8 x i16> %a0) nounwind {
|
|
; AVX-LABEL: cvt_8i16_to_2f64:
|
|
; AVX: # %bb.0:
|
|
; AVX-NEXT: subq $40, %rsp
|
|
; AVX-NEXT: vmovd %xmm0, %eax
|
|
; AVX-NEXT: vpinsrw $0, %eax, %xmm0, %xmm0
|
|
; AVX-NEXT: shrl $16, %eax
|
|
; AVX-NEXT: vpinsrw $0, %eax, %xmm0, %xmm1
|
|
; AVX-NEXT: vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX-NEXT: callq __extendhfsf2@PLT
|
|
; AVX-NEXT: vcvtss2sd %xmm0, %xmm0, %xmm0
|
|
; AVX-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
|
|
; AVX-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
|
|
; AVX-NEXT: callq __extendhfsf2@PLT
|
|
; AVX-NEXT: vcvtss2sd %xmm0, %xmm0, %xmm0
|
|
; AVX-NEXT: vmovaps (%rsp), %xmm1 # 16-byte Reload
|
|
; AVX-NEXT: vmovlhps {{.*#+}} xmm0 = xmm1[0],xmm0[0]
|
|
; AVX-NEXT: addq $40, %rsp
|
|
; AVX-NEXT: retq
|
|
;
|
|
; F16C-LABEL: cvt_8i16_to_2f64:
|
|
; F16C: # %bb.0:
|
|
; F16C-NEXT: vcvtph2ps %xmm0, %xmm0
|
|
; F16C-NEXT: vcvtps2pd %xmm0, %xmm0
|
|
; F16C-NEXT: retq
|
|
;
|
|
; AVX512-LABEL: cvt_8i16_to_2f64:
|
|
; AVX512: # %bb.0:
|
|
; AVX512-NEXT: vcvtph2ps %xmm0, %xmm0
|
|
; AVX512-NEXT: vcvtps2pd %xmm0, %xmm0
|
|
; AVX512-NEXT: retq
|
|
%1 = shufflevector <8 x i16> %a0, <8 x i16> undef, <2 x i32> <i32 0, i32 1>
|
|
%2 = bitcast <2 x i16> %1 to <2 x half>
|
|
%3 = fpext <2 x half> %2 to <2 x double>
|
|
ret <2 x double> %3
|
|
}
|
|
|
|
define <4 x double> @cvt_8i16_to_4f64(<8 x i16> %a0) nounwind {
|
|
; AVX-LABEL: cvt_8i16_to_4f64:
|
|
; AVX: # %bb.0:
|
|
; AVX-NEXT: subq $72, %rsp
|
|
; AVX-NEXT: vmovq %xmm0, %rax
|
|
; AVX-NEXT: movq %rax, %rcx
|
|
; AVX-NEXT: movl %eax, %edx
|
|
; AVX-NEXT: vpinsrw $0, %eax, %xmm0, %xmm0
|
|
; AVX-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX-NEXT: shrq $48, %rax
|
|
; AVX-NEXT: shrq $32, %rcx
|
|
; AVX-NEXT: shrl $16, %edx
|
|
; AVX-NEXT: vpinsrw $0, %edx, %xmm0, %xmm0
|
|
; AVX-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX-NEXT: vpinsrw $0, %ecx, %xmm0, %xmm0
|
|
; AVX-NEXT: vmovdqa %xmm0, (%rsp) # 16-byte Spill
|
|
; AVX-NEXT: vpinsrw $0, %eax, %xmm0, %xmm0
|
|
; AVX-NEXT: callq __extendhfsf2@PLT
|
|
; AVX-NEXT: vcvtss2sd %xmm0, %xmm0, %xmm0
|
|
; AVX-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX-NEXT: vmovaps (%rsp), %xmm0 # 16-byte Reload
|
|
; AVX-NEXT: callq __extendhfsf2@PLT
|
|
; AVX-NEXT: vcvtss2sd %xmm0, %xmm0, %xmm0
|
|
; AVX-NEXT: vunpcklpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
|
|
; AVX-NEXT: # xmm0 = xmm0[0],mem[0]
|
|
; AVX-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
|
|
; AVX-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
|
|
; AVX-NEXT: callq __extendhfsf2@PLT
|
|
; AVX-NEXT: vcvtss2sd %xmm0, %xmm0, %xmm0
|
|
; AVX-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
|
|
; AVX-NEXT: callq __extendhfsf2@PLT
|
|
; AVX-NEXT: vcvtss2sd %xmm0, %xmm0, %xmm0
|
|
; AVX-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
|
|
; AVX-NEXT: vmovlhps {{.*#+}} xmm0 = xmm1[0],xmm0[0]
|
|
; AVX-NEXT: vinsertf128 $1, (%rsp), %ymm0, %ymm0 # 16-byte Folded Reload
|
|
; AVX-NEXT: addq $72, %rsp
|
|
; AVX-NEXT: retq
|
|
;
|
|
; F16C-LABEL: cvt_8i16_to_4f64:
|
|
; F16C: # %bb.0:
|
|
; F16C-NEXT: vcvtph2ps %xmm0, %xmm0
|
|
; F16C-NEXT: vcvtps2pd %xmm0, %ymm0
|
|
; F16C-NEXT: retq
|
|
;
|
|
; AVX512-LABEL: cvt_8i16_to_4f64:
|
|
; AVX512: # %bb.0:
|
|
; AVX512-NEXT: vcvtph2ps %xmm0, %xmm0
|
|
; AVX512-NEXT: vcvtps2pd %xmm0, %ymm0
|
|
; AVX512-NEXT: retq
|
|
%1 = shufflevector <8 x i16> %a0, <8 x i16> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
|
|
%2 = bitcast <4 x i16> %1 to <4 x half>
|
|
%3 = fpext <4 x half> %2 to <4 x double>
|
|
ret <4 x double> %3
|
|
}
|
|
|
|
define <8 x double> @cvt_8i16_to_8f64(<8 x i16> %a0) nounwind {
|
|
; AVX-LABEL: cvt_8i16_to_8f64:
|
|
; AVX: # %bb.0:
|
|
; AVX-NEXT: subq $88, %rsp
|
|
; AVX-NEXT: vmovdqa %xmm0, (%rsp) # 16-byte Spill
|
|
; AVX-NEXT: vpsrlq $48, %xmm0, %xmm0
|
|
; AVX-NEXT: callq __extendhfsf2@PLT
|
|
; AVX-NEXT: vcvtss2sd %xmm0, %xmm0, %xmm0
|
|
; AVX-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX-NEXT: vpermilps $245, (%rsp), %xmm0 # 16-byte Folded Reload
|
|
; AVX-NEXT: # xmm0 = mem[1,1,3,3]
|
|
; AVX-NEXT: callq __extendhfsf2@PLT
|
|
; AVX-NEXT: vcvtss2sd %xmm0, %xmm0, %xmm0
|
|
; AVX-NEXT: vunpcklpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
|
|
; AVX-NEXT: # xmm0 = xmm0[0],mem[0]
|
|
; AVX-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX-NEXT: vmovaps (%rsp), %xmm0 # 16-byte Reload
|
|
; AVX-NEXT: callq __extendhfsf2@PLT
|
|
; AVX-NEXT: vcvtss2sd %xmm0, %xmm0, %xmm0
|
|
; AVX-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX-NEXT: vmovdqa (%rsp), %xmm0 # 16-byte Reload
|
|
; AVX-NEXT: vpsrld $16, %xmm0, %xmm0
|
|
; AVX-NEXT: callq __extendhfsf2@PLT
|
|
; AVX-NEXT: vcvtss2sd %xmm0, %xmm0, %xmm0
|
|
; AVX-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
|
|
; AVX-NEXT: vmovlhps {{.*#+}} xmm0 = xmm1[0],xmm0[0]
|
|
; AVX-NEXT: vinsertf128 $1, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 16-byte Folded Reload
|
|
; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
|
|
; AVX-NEXT: vmovdqa (%rsp), %xmm0 # 16-byte Reload
|
|
; AVX-NEXT: vpsrldq {{.*#+}} xmm0 = xmm0[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
|
|
; AVX-NEXT: vzeroupper
|
|
; AVX-NEXT: callq __extendhfsf2@PLT
|
|
; AVX-NEXT: vcvtss2sd %xmm0, %xmm0, %xmm0
|
|
; AVX-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX-NEXT: vpermilps $255, (%rsp), %xmm0 # 16-byte Folded Reload
|
|
; AVX-NEXT: # xmm0 = mem[3,3,3,3]
|
|
; AVX-NEXT: callq __extendhfsf2@PLT
|
|
; AVX-NEXT: vcvtss2sd %xmm0, %xmm0, %xmm0
|
|
; AVX-NEXT: vunpcklpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
|
|
; AVX-NEXT: # xmm0 = xmm0[0],mem[0]
|
|
; AVX-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX-NEXT: vmovdqa (%rsp), %xmm0 # 16-byte Reload
|
|
; AVX-NEXT: vpsrldq {{.*#+}} xmm0 = xmm0[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
|
|
; AVX-NEXT: callq __extendhfsf2@PLT
|
|
; AVX-NEXT: vcvtss2sd %xmm0, %xmm0, %xmm0
|
|
; AVX-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX-NEXT: vpermilps $78, (%rsp), %xmm0 # 16-byte Folded Reload
|
|
; AVX-NEXT: # xmm0 = mem[2,3,0,1]
|
|
; AVX-NEXT: callq __extendhfsf2@PLT
|
|
; AVX-NEXT: vcvtss2sd %xmm0, %xmm0, %xmm0
|
|
; AVX-NEXT: vunpcklpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
|
|
; AVX-NEXT: # xmm0 = xmm0[0],mem[0]
|
|
; AVX-NEXT: vinsertf128 $1, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm1 # 16-byte Folded Reload
|
|
; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
|
|
; AVX-NEXT: addq $88, %rsp
|
|
; AVX-NEXT: retq
|
|
;
|
|
; F16C-LABEL: cvt_8i16_to_8f64:
|
|
; F16C: # %bb.0:
|
|
; F16C-NEXT: vcvtph2ps %xmm0, %ymm1
|
|
; F16C-NEXT: vcvtps2pd %xmm1, %ymm0
|
|
; F16C-NEXT: vextractf128 $1, %ymm1, %xmm1
|
|
; F16C-NEXT: vcvtps2pd %xmm1, %ymm1
|
|
; F16C-NEXT: retq
|
|
;
|
|
; AVX512-LABEL: cvt_8i16_to_8f64:
|
|
; AVX512: # %bb.0:
|
|
; AVX512-NEXT: vcvtph2ps %xmm0, %ymm0
|
|
; AVX512-NEXT: vcvtps2pd %ymm0, %zmm0
|
|
; AVX512-NEXT: retq
|
|
%1 = bitcast <8 x i16> %a0 to <8 x half>
|
|
%2 = fpext <8 x half> %1 to <8 x double>
|
|
ret <8 x double> %2
|
|
}
|
|
|
|
define <2 x double> @cvt_2i16_to_2f64_constrained(<2 x i16> %a0) nounwind strictfp {
|
|
; AVX-LABEL: cvt_2i16_to_2f64_constrained:
|
|
; AVX: # %bb.0:
|
|
; AVX-NEXT: subq $40, %rsp
|
|
; AVX-NEXT: vmovd %xmm0, %eax
|
|
; AVX-NEXT: vpinsrw $0, %eax, %xmm0, %xmm0
|
|
; AVX-NEXT: shrl $16, %eax
|
|
; AVX-NEXT: vpinsrw $0, %eax, %xmm0, %xmm1
|
|
; AVX-NEXT: vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX-NEXT: callq __extendhfsf2@PLT
|
|
; AVX-NEXT: vcvtss2sd %xmm0, %xmm0, %xmm0
|
|
; AVX-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
|
|
; AVX-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
|
|
; AVX-NEXT: callq __extendhfsf2@PLT
|
|
; AVX-NEXT: vcvtss2sd %xmm0, %xmm0, %xmm0
|
|
; AVX-NEXT: vmovaps (%rsp), %xmm1 # 16-byte Reload
|
|
; AVX-NEXT: vmovlhps {{.*#+}} xmm0 = xmm1[0],xmm0[0]
|
|
; AVX-NEXT: addq $40, %rsp
|
|
; AVX-NEXT: retq
|
|
;
|
|
; F16C-LABEL: cvt_2i16_to_2f64_constrained:
|
|
; F16C: # %bb.0:
|
|
; F16C-NEXT: vpmovzxdq {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero
|
|
; F16C-NEXT: vcvtph2ps %xmm0, %xmm0
|
|
; F16C-NEXT: vcvtps2pd %xmm0, %xmm0
|
|
; F16C-NEXT: retq
|
|
;
|
|
; AVX512-LABEL: cvt_2i16_to_2f64_constrained:
|
|
; AVX512: # %bb.0:
|
|
; AVX512-NEXT: vpmovzxdq {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero
|
|
; AVX512-NEXT: vcvtph2ps %xmm0, %xmm0
|
|
; AVX512-NEXT: vcvtps2pd %xmm0, %xmm0
|
|
; AVX512-NEXT: retq
|
|
%1 = bitcast <2 x i16> %a0 to <2 x half>
|
|
%2 = call <2 x double> @llvm.experimental.constrained.fpext.v2f64.v2f16(<2 x half> %1, metadata !"fpexcept.strict") strictfp
|
|
ret <2 x double> %2
|
|
}
|
|
declare <2 x double> @llvm.experimental.constrained.fpext.v2f64.v2f16(<2 x half>, metadata) strictfp
|
|
|
|
define <4 x double> @cvt_4i16_to_4f64_constrained(<4 x i16> %a0) nounwind strictfp {
|
|
; AVX-LABEL: cvt_4i16_to_4f64_constrained:
|
|
; AVX: # %bb.0:
|
|
; AVX-NEXT: subq $72, %rsp
|
|
; AVX-NEXT: vmovq %xmm0, %rax
|
|
; AVX-NEXT: movq %rax, %rcx
|
|
; AVX-NEXT: movl %eax, %edx
|
|
; AVX-NEXT: vpinsrw $0, %eax, %xmm0, %xmm0
|
|
; AVX-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX-NEXT: shrq $48, %rax
|
|
; AVX-NEXT: shrq $32, %rcx
|
|
; AVX-NEXT: shrl $16, %edx
|
|
; AVX-NEXT: vpinsrw $0, %edx, %xmm0, %xmm0
|
|
; AVX-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX-NEXT: vpinsrw $0, %ecx, %xmm0, %xmm0
|
|
; AVX-NEXT: vmovdqa %xmm0, (%rsp) # 16-byte Spill
|
|
; AVX-NEXT: vpinsrw $0, %eax, %xmm0, %xmm0
|
|
; AVX-NEXT: callq __extendhfsf2@PLT
|
|
; AVX-NEXT: vcvtss2sd %xmm0, %xmm0, %xmm0
|
|
; AVX-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX-NEXT: vmovaps (%rsp), %xmm0 # 16-byte Reload
|
|
; AVX-NEXT: callq __extendhfsf2@PLT
|
|
; AVX-NEXT: vcvtss2sd %xmm0, %xmm0, %xmm0
|
|
; AVX-NEXT: vunpcklpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
|
|
; AVX-NEXT: # xmm0 = xmm0[0],mem[0]
|
|
; AVX-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
|
|
; AVX-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
|
|
; AVX-NEXT: callq __extendhfsf2@PLT
|
|
; AVX-NEXT: vcvtss2sd %xmm0, %xmm0, %xmm0
|
|
; AVX-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
|
|
; AVX-NEXT: callq __extendhfsf2@PLT
|
|
; AVX-NEXT: vcvtss2sd %xmm0, %xmm0, %xmm0
|
|
; AVX-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
|
|
; AVX-NEXT: vmovlhps {{.*#+}} xmm0 = xmm1[0],xmm0[0]
|
|
; AVX-NEXT: vinsertf128 $1, (%rsp), %ymm0, %ymm0 # 16-byte Folded Reload
|
|
; AVX-NEXT: addq $72, %rsp
|
|
; AVX-NEXT: retq
|
|
;
|
|
; F16C-LABEL: cvt_4i16_to_4f64_constrained:
|
|
; F16C: # %bb.0:
|
|
; F16C-NEXT: vcvtph2ps %xmm0, %xmm0
|
|
; F16C-NEXT: vcvtps2pd %xmm0, %ymm0
|
|
; F16C-NEXT: retq
|
|
;
|
|
; AVX512-LABEL: cvt_4i16_to_4f64_constrained:
|
|
; AVX512: # %bb.0:
|
|
; AVX512-NEXT: vcvtph2ps %xmm0, %xmm0
|
|
; AVX512-NEXT: vcvtps2pd %xmm0, %ymm0
|
|
; AVX512-NEXT: retq
|
|
%1 = bitcast <4 x i16> %a0 to <4 x half>
|
|
%2 = call <4 x double> @llvm.experimental.constrained.fpext.v4f64.v4f16(<4 x half> %1, metadata !"fpexcept.strict") strictfp
|
|
ret <4 x double> %2
|
|
}
|
|
declare <4 x double> @llvm.experimental.constrained.fpext.v4f64.v4f16(<4 x half>, metadata) strictfp
|
|
|
|
define <8 x double> @cvt_8i16_to_8f64_constrained(<8 x i16> %a0) nounwind strictfp {
|
|
; AVX-LABEL: cvt_8i16_to_8f64_constrained:
|
|
; AVX: # %bb.0:
|
|
; AVX-NEXT: subq $88, %rsp
|
|
; AVX-NEXT: vmovdqa %xmm0, (%rsp) # 16-byte Spill
|
|
; AVX-NEXT: vpsrlq $48, %xmm0, %xmm0
|
|
; AVX-NEXT: callq __extendhfsf2@PLT
|
|
; AVX-NEXT: vcvtss2sd %xmm0, %xmm0, %xmm0
|
|
; AVX-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX-NEXT: vpermilps $245, (%rsp), %xmm0 # 16-byte Folded Reload
|
|
; AVX-NEXT: # xmm0 = mem[1,1,3,3]
|
|
; AVX-NEXT: callq __extendhfsf2@PLT
|
|
; AVX-NEXT: vcvtss2sd %xmm0, %xmm0, %xmm0
|
|
; AVX-NEXT: vunpcklpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
|
|
; AVX-NEXT: # xmm0 = xmm0[0],mem[0]
|
|
; AVX-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX-NEXT: vmovaps (%rsp), %xmm0 # 16-byte Reload
|
|
; AVX-NEXT: callq __extendhfsf2@PLT
|
|
; AVX-NEXT: vcvtss2sd %xmm0, %xmm0, %xmm0
|
|
; AVX-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX-NEXT: vmovdqa (%rsp), %xmm0 # 16-byte Reload
|
|
; AVX-NEXT: vpsrld $16, %xmm0, %xmm0
|
|
; AVX-NEXT: callq __extendhfsf2@PLT
|
|
; AVX-NEXT: vcvtss2sd %xmm0, %xmm0, %xmm0
|
|
; AVX-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
|
|
; AVX-NEXT: vmovlhps {{.*#+}} xmm0 = xmm1[0],xmm0[0]
|
|
; AVX-NEXT: vinsertf128 $1, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 16-byte Folded Reload
|
|
; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
|
|
; AVX-NEXT: vmovdqa (%rsp), %xmm0 # 16-byte Reload
|
|
; AVX-NEXT: vpsrldq {{.*#+}} xmm0 = xmm0[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
|
|
; AVX-NEXT: vzeroupper
|
|
; AVX-NEXT: callq __extendhfsf2@PLT
|
|
; AVX-NEXT: vcvtss2sd %xmm0, %xmm0, %xmm0
|
|
; AVX-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX-NEXT: vpermilps $255, (%rsp), %xmm0 # 16-byte Folded Reload
|
|
; AVX-NEXT: # xmm0 = mem[3,3,3,3]
|
|
; AVX-NEXT: callq __extendhfsf2@PLT
|
|
; AVX-NEXT: vcvtss2sd %xmm0, %xmm0, %xmm0
|
|
; AVX-NEXT: vunpcklpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
|
|
; AVX-NEXT: # xmm0 = xmm0[0],mem[0]
|
|
; AVX-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX-NEXT: vmovdqa (%rsp), %xmm0 # 16-byte Reload
|
|
; AVX-NEXT: vpsrldq {{.*#+}} xmm0 = xmm0[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
|
|
; AVX-NEXT: callq __extendhfsf2@PLT
|
|
; AVX-NEXT: vcvtss2sd %xmm0, %xmm0, %xmm0
|
|
; AVX-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX-NEXT: vpermilps $78, (%rsp), %xmm0 # 16-byte Folded Reload
|
|
; AVX-NEXT: # xmm0 = mem[2,3,0,1]
|
|
; AVX-NEXT: callq __extendhfsf2@PLT
|
|
; AVX-NEXT: vcvtss2sd %xmm0, %xmm0, %xmm0
|
|
; AVX-NEXT: vunpcklpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
|
|
; AVX-NEXT: # xmm0 = xmm0[0],mem[0]
|
|
; AVX-NEXT: vinsertf128 $1, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm1 # 16-byte Folded Reload
|
|
; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
|
|
; AVX-NEXT: addq $88, %rsp
|
|
; AVX-NEXT: retq
|
|
;
|
|
; F16C-LABEL: cvt_8i16_to_8f64_constrained:
|
|
; F16C: # %bb.0:
|
|
; F16C-NEXT: vcvtph2ps %xmm0, %ymm0
|
|
; F16C-NEXT: vextractf128 $1, %ymm0, %xmm1
|
|
; F16C-NEXT: vcvtps2pd %xmm1, %ymm1
|
|
; F16C-NEXT: vcvtps2pd %xmm0, %ymm0
|
|
; F16C-NEXT: retq
|
|
;
|
|
; AVX512-LABEL: cvt_8i16_to_8f64_constrained:
|
|
; AVX512: # %bb.0:
|
|
; AVX512-NEXT: vcvtph2ps %xmm0, %ymm0
|
|
; AVX512-NEXT: vcvtps2pd %ymm0, %zmm0
|
|
; AVX512-NEXT: retq
|
|
%1 = bitcast <8 x i16> %a0 to <8 x half>
|
|
%2 = call <8 x double> @llvm.experimental.constrained.fpext.v8f64.v8f16(<8 x half> %1, metadata !"fpexcept.strict") strictfp
|
|
ret <8 x double> %2
|
|
}
|
|
declare <8 x double> @llvm.experimental.constrained.fpext.v8f64.v8f16(<8 x half>, metadata) strictfp
|
|
|
|
;
|
|
; Half to Double (Load)
|
|
;
|
|
|
|
define double @load_cvt_i16_to_f64(ptr %a0) nounwind {
|
|
; AVX-LABEL: load_cvt_i16_to_f64:
|
|
; AVX: # %bb.0:
|
|
; AVX-NEXT: pushq %rax
|
|
; AVX-NEXT: vpinsrw $0, (%rdi), %xmm0, %xmm0
|
|
; AVX-NEXT: callq __extendhfsf2@PLT
|
|
; AVX-NEXT: vcvtss2sd %xmm0, %xmm0, %xmm0
|
|
; AVX-NEXT: popq %rax
|
|
; AVX-NEXT: retq
|
|
;
|
|
; F16C-LABEL: load_cvt_i16_to_f64:
|
|
; F16C: # %bb.0:
|
|
; F16C-NEXT: vpinsrw $0, (%rdi), %xmm0, %xmm0
|
|
; F16C-NEXT: vcvtph2ps %xmm0, %xmm0
|
|
; F16C-NEXT: vcvtss2sd %xmm0, %xmm0, %xmm0
|
|
; F16C-NEXT: retq
|
|
;
|
|
; AVX512-LABEL: load_cvt_i16_to_f64:
|
|
; AVX512: # %bb.0:
|
|
; AVX512-NEXT: vpinsrw $0, (%rdi), %xmm0, %xmm0
|
|
; AVX512-NEXT: vcvtph2ps %xmm0, %xmm0
|
|
; AVX512-NEXT: vcvtss2sd %xmm0, %xmm0, %xmm0
|
|
; AVX512-NEXT: retq
|
|
%1 = load i16, ptr %a0
|
|
%2 = bitcast i16 %1 to half
|
|
%3 = fpext half %2 to double
|
|
ret double %3
|
|
}
|
|
|
|
define <2 x double> @load_cvt_2i16_to_2f64(ptr %a0) nounwind {
|
|
; AVX-LABEL: load_cvt_2i16_to_2f64:
|
|
; AVX: # %bb.0:
|
|
; AVX-NEXT: subq $40, %rsp
|
|
; AVX-NEXT: vpinsrw $0, (%rdi), %xmm0, %xmm0
|
|
; AVX-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX-NEXT: vpinsrw $0, 2(%rdi), %xmm0, %xmm0
|
|
; AVX-NEXT: callq __extendhfsf2@PLT
|
|
; AVX-NEXT: vcvtss2sd %xmm0, %xmm0, %xmm0
|
|
; AVX-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
|
|
; AVX-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
|
|
; AVX-NEXT: callq __extendhfsf2@PLT
|
|
; AVX-NEXT: vcvtss2sd %xmm0, %xmm0, %xmm0
|
|
; AVX-NEXT: vunpcklpd (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
|
|
; AVX-NEXT: # xmm0 = xmm0[0],mem[0]
|
|
; AVX-NEXT: addq $40, %rsp
|
|
; AVX-NEXT: retq
|
|
;
|
|
; F16C-LABEL: load_cvt_2i16_to_2f64:
|
|
; F16C: # %bb.0:
|
|
; F16C-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
|
|
; F16C-NEXT: vcvtph2ps %xmm0, %xmm0
|
|
; F16C-NEXT: vcvtps2pd %xmm0, %xmm0
|
|
; F16C-NEXT: retq
|
|
;
|
|
; AVX512-LABEL: load_cvt_2i16_to_2f64:
|
|
; AVX512: # %bb.0:
|
|
; AVX512-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
|
|
; AVX512-NEXT: vcvtph2ps %xmm0, %xmm0
|
|
; AVX512-NEXT: vcvtps2pd %xmm0, %xmm0
|
|
; AVX512-NEXT: retq
|
|
%1 = load <2 x i16>, ptr %a0
|
|
%2 = bitcast <2 x i16> %1 to <2 x half>
|
|
%3 = fpext <2 x half> %2 to <2 x double>
|
|
ret <2 x double> %3
|
|
}
|
|
|
|
define <4 x double> @load_cvt_4i16_to_4f64(ptr %a0) nounwind {
|
|
; AVX-LABEL: load_cvt_4i16_to_4f64:
|
|
; AVX: # %bb.0:
|
|
; AVX-NEXT: subq $72, %rsp
|
|
; AVX-NEXT: vpinsrw $0, (%rdi), %xmm0, %xmm0
|
|
; AVX-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX-NEXT: vpinsrw $0, 2(%rdi), %xmm0, %xmm0
|
|
; AVX-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX-NEXT: vpinsrw $0, 4(%rdi), %xmm0, %xmm0
|
|
; AVX-NEXT: vmovdqa %xmm0, (%rsp) # 16-byte Spill
|
|
; AVX-NEXT: vpinsrw $0, 6(%rdi), %xmm0, %xmm0
|
|
; AVX-NEXT: callq __extendhfsf2@PLT
|
|
; AVX-NEXT: vcvtss2sd %xmm0, %xmm0, %xmm0
|
|
; AVX-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX-NEXT: vmovaps (%rsp), %xmm0 # 16-byte Reload
|
|
; AVX-NEXT: callq __extendhfsf2@PLT
|
|
; AVX-NEXT: vcvtss2sd %xmm0, %xmm0, %xmm0
|
|
; AVX-NEXT: vunpcklpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
|
|
; AVX-NEXT: # xmm0 = xmm0[0],mem[0]
|
|
; AVX-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
|
|
; AVX-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
|
|
; AVX-NEXT: callq __extendhfsf2@PLT
|
|
; AVX-NEXT: vcvtss2sd %xmm0, %xmm0, %xmm0
|
|
; AVX-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
|
|
; AVX-NEXT: callq __extendhfsf2@PLT
|
|
; AVX-NEXT: vcvtss2sd %xmm0, %xmm0, %xmm0
|
|
; AVX-NEXT: vunpcklpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
|
|
; AVX-NEXT: # xmm0 = xmm0[0],mem[0]
|
|
; AVX-NEXT: vinsertf128 $1, (%rsp), %ymm0, %ymm0 # 16-byte Folded Reload
|
|
; AVX-NEXT: addq $72, %rsp
|
|
; AVX-NEXT: retq
|
|
;
|
|
; F16C-LABEL: load_cvt_4i16_to_4f64:
|
|
; F16C: # %bb.0:
|
|
; F16C-NEXT: vcvtph2ps (%rdi), %xmm0
|
|
; F16C-NEXT: vcvtps2pd %xmm0, %ymm0
|
|
; F16C-NEXT: retq
|
|
;
|
|
; AVX512-LABEL: load_cvt_4i16_to_4f64:
|
|
; AVX512: # %bb.0:
|
|
; AVX512-NEXT: vcvtph2ps (%rdi), %xmm0
|
|
; AVX512-NEXT: vcvtps2pd %xmm0, %ymm0
|
|
; AVX512-NEXT: retq
|
|
%1 = load <4 x i16>, ptr %a0
|
|
%2 = bitcast <4 x i16> %1 to <4 x half>
|
|
%3 = fpext <4 x half> %2 to <4 x double>
|
|
ret <4 x double> %3
|
|
}
|
|
|
|
define <4 x double> @load_cvt_8i16_to_4f64(ptr %a0) nounwind {
|
|
; AVX-LABEL: load_cvt_8i16_to_4f64:
|
|
; AVX: # %bb.0:
|
|
; AVX-NEXT: subq $72, %rsp
|
|
; AVX-NEXT: movq (%rdi), %rax
|
|
; AVX-NEXT: movq %rax, %rcx
|
|
; AVX-NEXT: movl %eax, %edx
|
|
; AVX-NEXT: vpinsrw $0, %eax, %xmm0, %xmm0
|
|
; AVX-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX-NEXT: shrq $48, %rax
|
|
; AVX-NEXT: shrq $32, %rcx
|
|
; AVX-NEXT: shrl $16, %edx
|
|
; AVX-NEXT: vpinsrw $0, %edx, %xmm0, %xmm0
|
|
; AVX-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX-NEXT: vpinsrw $0, %ecx, %xmm0, %xmm0
|
|
; AVX-NEXT: vmovdqa %xmm0, (%rsp) # 16-byte Spill
|
|
; AVX-NEXT: vpinsrw $0, %eax, %xmm0, %xmm0
|
|
; AVX-NEXT: callq __extendhfsf2@PLT
|
|
; AVX-NEXT: vcvtss2sd %xmm0, %xmm0, %xmm0
|
|
; AVX-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX-NEXT: vmovaps (%rsp), %xmm0 # 16-byte Reload
|
|
; AVX-NEXT: callq __extendhfsf2@PLT
|
|
; AVX-NEXT: vcvtss2sd %xmm0, %xmm0, %xmm0
|
|
; AVX-NEXT: vunpcklpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
|
|
; AVX-NEXT: # xmm0 = xmm0[0],mem[0]
|
|
; AVX-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
|
|
; AVX-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
|
|
; AVX-NEXT: callq __extendhfsf2@PLT
|
|
; AVX-NEXT: vcvtss2sd %xmm0, %xmm0, %xmm0
|
|
; AVX-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
|
|
; AVX-NEXT: callq __extendhfsf2@PLT
|
|
; AVX-NEXT: vcvtss2sd %xmm0, %xmm0, %xmm0
|
|
; AVX-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
|
|
; AVX-NEXT: vmovlhps {{.*#+}} xmm0 = xmm1[0],xmm0[0]
|
|
; AVX-NEXT: vinsertf128 $1, (%rsp), %ymm0, %ymm0 # 16-byte Folded Reload
|
|
; AVX-NEXT: addq $72, %rsp
|
|
; AVX-NEXT: retq
|
|
;
|
|
; F16C-LABEL: load_cvt_8i16_to_4f64:
|
|
; F16C: # %bb.0:
|
|
; F16C-NEXT: vcvtph2ps (%rdi), %xmm0
|
|
; F16C-NEXT: vcvtps2pd %xmm0, %ymm0
|
|
; F16C-NEXT: retq
|
|
;
|
|
; AVX512-LABEL: load_cvt_8i16_to_4f64:
|
|
; AVX512: # %bb.0:
|
|
; AVX512-NEXT: vcvtph2ps (%rdi), %xmm0
|
|
; AVX512-NEXT: vcvtps2pd %xmm0, %ymm0
|
|
; AVX512-NEXT: retq
|
|
%1 = load <8 x i16>, ptr %a0
|
|
%2 = shufflevector <8 x i16> %1, <8 x i16> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
|
|
%3 = bitcast <4 x i16> %2 to <4 x half>
|
|
%4 = fpext <4 x half> %3 to <4 x double>
|
|
ret <4 x double> %4
|
|
}
|
|
|
|
define <8 x double> @load_cvt_8i16_to_8f64(ptr %a0) nounwind {
|
|
; AVX1-LABEL: load_cvt_8i16_to_8f64:
|
|
; AVX1: # %bb.0:
|
|
; AVX1-NEXT: pushq %rbx
|
|
; AVX1-NEXT: subq $80, %rsp
|
|
; AVX1-NEXT: movq %rdi, %rbx
|
|
; AVX1-NEXT: vmovaps (%rdi), %xmm0
|
|
; AVX1-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX1-NEXT: vbroadcastss 4(%rdi), %xmm0
|
|
; AVX1-NEXT: callq __extendhfsf2@PLT
|
|
; AVX1-NEXT: vcvtss2sd %xmm0, %xmm0, %xmm0
|
|
; AVX1-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX1-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
|
|
; AVX1-NEXT: vpsrlq $48, %xmm0, %xmm0
|
|
; AVX1-NEXT: callq __extendhfsf2@PLT
|
|
; AVX1-NEXT: vcvtss2sd %xmm0, %xmm0, %xmm0
|
|
; AVX1-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
|
|
; AVX1-NEXT: vmovlhps {{.*#+}} xmm0 = xmm1[0],xmm0[0]
|
|
; AVX1-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX1-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
|
|
; AVX1-NEXT: callq __extendhfsf2@PLT
|
|
; AVX1-NEXT: vcvtss2sd %xmm0, %xmm0, %xmm0
|
|
; AVX1-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
|
|
; AVX1-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
|
|
; AVX1-NEXT: vpsrld $16, %xmm0, %xmm0
|
|
; AVX1-NEXT: callq __extendhfsf2@PLT
|
|
; AVX1-NEXT: vcvtss2sd %xmm0, %xmm0, %xmm0
|
|
; AVX1-NEXT: vmovaps (%rsp), %xmm1 # 16-byte Reload
|
|
; AVX1-NEXT: vmovlhps {{.*#+}} xmm0 = xmm1[0],xmm0[0]
|
|
; AVX1-NEXT: vinsertf128 $1, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 16-byte Folded Reload
|
|
; AVX1-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
|
|
; AVX1-NEXT: vbroadcastss 12(%rbx), %xmm0
|
|
; AVX1-NEXT: vzeroupper
|
|
; AVX1-NEXT: callq __extendhfsf2@PLT
|
|
; AVX1-NEXT: vcvtss2sd %xmm0, %xmm0, %xmm0
|
|
; AVX1-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
|
|
; AVX1-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
|
|
; AVX1-NEXT: vpsrldq {{.*#+}} xmm0 = xmm0[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
|
|
; AVX1-NEXT: callq __extendhfsf2@PLT
|
|
; AVX1-NEXT: vcvtss2sd %xmm0, %xmm0, %xmm0
|
|
; AVX1-NEXT: vmovaps (%rsp), %xmm1 # 16-byte Reload
|
|
; AVX1-NEXT: vmovlhps {{.*#+}} xmm0 = xmm1[0],xmm0[0]
|
|
; AVX1-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
|
|
; AVX1-NEXT: vbroadcastss 8(%rbx), %xmm0
|
|
; AVX1-NEXT: callq __extendhfsf2@PLT
|
|
; AVX1-NEXT: vcvtss2sd %xmm0, %xmm0, %xmm0
|
|
; AVX1-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX1-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
|
|
; AVX1-NEXT: vpsrldq {{.*#+}} xmm0 = xmm0[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
|
|
; AVX1-NEXT: callq __extendhfsf2@PLT
|
|
; AVX1-NEXT: vcvtss2sd %xmm0, %xmm0, %xmm0
|
|
; AVX1-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
|
|
; AVX1-NEXT: vmovlhps {{.*#+}} xmm0 = xmm1[0],xmm0[0]
|
|
; AVX1-NEXT: vinsertf128 $1, (%rsp), %ymm0, %ymm1 # 16-byte Folded Reload
|
|
; AVX1-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
|
|
; AVX1-NEXT: addq $80, %rsp
|
|
; AVX1-NEXT: popq %rbx
|
|
; AVX1-NEXT: retq
|
|
;
|
|
; AVX2-LABEL: load_cvt_8i16_to_8f64:
|
|
; AVX2: # %bb.0:
|
|
; AVX2-NEXT: pushq %rbx
|
|
; AVX2-NEXT: subq $80, %rsp
|
|
; AVX2-NEXT: movq %rdi, %rbx
|
|
; AVX2-NEXT: vmovdqa (%rdi), %xmm0
|
|
; AVX2-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX2-NEXT: vpinsrw $0, 4(%rdi), %xmm0, %xmm0
|
|
; AVX2-NEXT: callq __extendhfsf2@PLT
|
|
; AVX2-NEXT: vcvtss2sd %xmm0, %xmm0, %xmm0
|
|
; AVX2-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX2-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
|
|
; AVX2-NEXT: vpsrlq $48, %xmm0, %xmm0
|
|
; AVX2-NEXT: callq __extendhfsf2@PLT
|
|
; AVX2-NEXT: vcvtss2sd %xmm0, %xmm0, %xmm0
|
|
; AVX2-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
|
|
; AVX2-NEXT: vmovlhps {{.*#+}} xmm0 = xmm1[0],xmm0[0]
|
|
; AVX2-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX2-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
|
|
; AVX2-NEXT: callq __extendhfsf2@PLT
|
|
; AVX2-NEXT: vcvtss2sd %xmm0, %xmm0, %xmm0
|
|
; AVX2-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
|
|
; AVX2-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
|
|
; AVX2-NEXT: vpsrld $16, %xmm0, %xmm0
|
|
; AVX2-NEXT: callq __extendhfsf2@PLT
|
|
; AVX2-NEXT: vcvtss2sd %xmm0, %xmm0, %xmm0
|
|
; AVX2-NEXT: vmovaps (%rsp), %xmm1 # 16-byte Reload
|
|
; AVX2-NEXT: vmovlhps {{.*#+}} xmm0 = xmm1[0],xmm0[0]
|
|
; AVX2-NEXT: vinsertf128 $1, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 16-byte Folded Reload
|
|
; AVX2-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
|
|
; AVX2-NEXT: vpinsrw $0, 12(%rbx), %xmm0, %xmm0
|
|
; AVX2-NEXT: vzeroupper
|
|
; AVX2-NEXT: callq __extendhfsf2@PLT
|
|
; AVX2-NEXT: vcvtss2sd %xmm0, %xmm0, %xmm0
|
|
; AVX2-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
|
|
; AVX2-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
|
|
; AVX2-NEXT: vpsrldq {{.*#+}} xmm0 = xmm0[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
|
|
; AVX2-NEXT: callq __extendhfsf2@PLT
|
|
; AVX2-NEXT: vcvtss2sd %xmm0, %xmm0, %xmm0
|
|
; AVX2-NEXT: vmovaps (%rsp), %xmm1 # 16-byte Reload
|
|
; AVX2-NEXT: vmovlhps {{.*#+}} xmm0 = xmm1[0],xmm0[0]
|
|
; AVX2-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
|
|
; AVX2-NEXT: vpinsrw $0, 8(%rbx), %xmm0, %xmm0
|
|
; AVX2-NEXT: callq __extendhfsf2@PLT
|
|
; AVX2-NEXT: vcvtss2sd %xmm0, %xmm0, %xmm0
|
|
; AVX2-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX2-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
|
|
; AVX2-NEXT: vpsrldq {{.*#+}} xmm0 = xmm0[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
|
|
; AVX2-NEXT: callq __extendhfsf2@PLT
|
|
; AVX2-NEXT: vcvtss2sd %xmm0, %xmm0, %xmm0
|
|
; AVX2-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
|
|
; AVX2-NEXT: vmovlhps {{.*#+}} xmm0 = xmm1[0],xmm0[0]
|
|
; AVX2-NEXT: vinsertf128 $1, (%rsp), %ymm0, %ymm1 # 16-byte Folded Reload
|
|
; AVX2-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
|
|
; AVX2-NEXT: addq $80, %rsp
|
|
; AVX2-NEXT: popq %rbx
|
|
; AVX2-NEXT: retq
|
|
;
|
|
; F16C-LABEL: load_cvt_8i16_to_8f64:
|
|
; F16C: # %bb.0:
|
|
; F16C-NEXT: vcvtph2ps (%rdi), %ymm1
|
|
; F16C-NEXT: vcvtps2pd %xmm1, %ymm0
|
|
; F16C-NEXT: vextractf128 $1, %ymm1, %xmm1
|
|
; F16C-NEXT: vcvtps2pd %xmm1, %ymm1
|
|
; F16C-NEXT: retq
|
|
;
|
|
; AVX512-LABEL: load_cvt_8i16_to_8f64:
|
|
; AVX512: # %bb.0:
|
|
; AVX512-NEXT: vcvtph2ps (%rdi), %ymm0
|
|
; AVX512-NEXT: vcvtps2pd %ymm0, %zmm0
|
|
; AVX512-NEXT: retq
|
|
%1 = load <8 x i16>, ptr %a0
|
|
%2 = bitcast <8 x i16> %1 to <8 x half>
|
|
%3 = fpext <8 x half> %2 to <8 x double>
|
|
ret <8 x double> %3
|
|
}
|
|
|
|
;
|
|
; Float to Half
|
|
;
|
|
|
|
define i16 @cvt_f32_to_i16(float %a0) nounwind {
|
|
; AVX-LABEL: cvt_f32_to_i16:
|
|
; AVX: # %bb.0:
|
|
; AVX-NEXT: pushq %rax
|
|
; AVX-NEXT: callq __truncsfhf2@PLT
|
|
; AVX-NEXT: vpextrw $0, %xmm0, %eax
|
|
; AVX-NEXT: # kill: def $ax killed $ax killed $eax
|
|
; AVX-NEXT: popq %rcx
|
|
; AVX-NEXT: retq
|
|
;
|
|
; F16C-LABEL: cvt_f32_to_i16:
|
|
; F16C: # %bb.0:
|
|
; F16C-NEXT: vcvtps2ph $4, %xmm0, %xmm0
|
|
; F16C-NEXT: vmovd %xmm0, %eax
|
|
; F16C-NEXT: # kill: def $ax killed $ax killed $eax
|
|
; F16C-NEXT: retq
|
|
;
|
|
; AVX512-LABEL: cvt_f32_to_i16:
|
|
; AVX512: # %bb.0:
|
|
; AVX512-NEXT: vcvtps2ph $4, %xmm0, %xmm0
|
|
; AVX512-NEXT: vmovd %xmm0, %eax
|
|
; AVX512-NEXT: # kill: def $ax killed $ax killed $eax
|
|
; AVX512-NEXT: retq
|
|
%1 = fptrunc float %a0 to half
|
|
%2 = bitcast half %1 to i16
|
|
ret i16 %2
|
|
}
|
|
|
|
define <4 x i16> @cvt_4f32_to_4i16(<4 x float> %a0) nounwind {
|
|
; AVX-LABEL: cvt_4f32_to_4i16:
|
|
; AVX: # %bb.0:
|
|
; AVX-NEXT: subq $72, %rsp
|
|
; AVX-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
|
|
; AVX-NEXT: vmovshdup {{.*#+}} xmm0 = xmm0[1,1,3,3]
|
|
; AVX-NEXT: callq __truncsfhf2@PLT
|
|
; AVX-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX-NEXT: vmovaps (%rsp), %xmm0 # 16-byte Reload
|
|
; AVX-NEXT: callq __truncsfhf2@PLT
|
|
; AVX-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX-NEXT: vpermilpd $1, (%rsp), %xmm0 # 16-byte Folded Reload
|
|
; AVX-NEXT: # xmm0 = mem[1,0]
|
|
; AVX-NEXT: callq __truncsfhf2@PLT
|
|
; AVX-NEXT: vmovapd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX-NEXT: vpshufd $255, (%rsp), %xmm0 # 16-byte Folded Reload
|
|
; AVX-NEXT: # xmm0 = mem[3,3,3,3]
|
|
; AVX-NEXT: callq __truncsfhf2@PLT
|
|
; AVX-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
|
|
; AVX-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
|
|
; AVX-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
|
|
; AVX-NEXT: vpunpcklwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm1 # 16-byte Folded Reload
|
|
; AVX-NEXT: # xmm1 = xmm1[0],mem[0],xmm1[1],mem[1],xmm1[2],mem[2],xmm1[3],mem[3]
|
|
; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0],xmm0[0],zero,zero
|
|
; AVX-NEXT: addq $72, %rsp
|
|
; AVX-NEXT: retq
|
|
;
|
|
; F16C-LABEL: cvt_4f32_to_4i16:
|
|
; F16C: # %bb.0:
|
|
; F16C-NEXT: vcvtps2ph $4, %xmm0, %xmm0
|
|
; F16C-NEXT: retq
|
|
;
|
|
; AVX512-LABEL: cvt_4f32_to_4i16:
|
|
; AVX512: # %bb.0:
|
|
; AVX512-NEXT: vcvtps2ph $4, %xmm0, %xmm0
|
|
; AVX512-NEXT: retq
|
|
%1 = fptrunc <4 x float> %a0 to <4 x half>
|
|
%2 = bitcast <4 x half> %1 to <4 x i16>
|
|
ret <4 x i16> %2
|
|
}
|
|
|
|
define <8 x i16> @cvt_4f32_to_8i16_undef(<4 x float> %a0) nounwind {
|
|
; AVX-LABEL: cvt_4f32_to_8i16_undef:
|
|
; AVX: # %bb.0:
|
|
; AVX-NEXT: subq $72, %rsp
|
|
; AVX-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
|
|
; AVX-NEXT: vmovshdup {{.*#+}} xmm0 = xmm0[1,1,3,3]
|
|
; AVX-NEXT: callq __truncsfhf2@PLT
|
|
; AVX-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX-NEXT: vmovaps (%rsp), %xmm0 # 16-byte Reload
|
|
; AVX-NEXT: callq __truncsfhf2@PLT
|
|
; AVX-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX-NEXT: vpermilpd $1, (%rsp), %xmm0 # 16-byte Folded Reload
|
|
; AVX-NEXT: # xmm0 = mem[1,0]
|
|
; AVX-NEXT: callq __truncsfhf2@PLT
|
|
; AVX-NEXT: vmovapd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX-NEXT: vpshufd $255, (%rsp), %xmm0 # 16-byte Folded Reload
|
|
; AVX-NEXT: # xmm0 = mem[3,3,3,3]
|
|
; AVX-NEXT: callq __truncsfhf2@PLT
|
|
; AVX-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
|
|
; AVX-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
|
|
; AVX-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
|
|
; AVX-NEXT: vpunpcklwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm1 # 16-byte Folded Reload
|
|
; AVX-NEXT: # xmm1 = xmm1[0],mem[0],xmm1[1],mem[1],xmm1[2],mem[2],xmm1[3],mem[3]
|
|
; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0],xmm0[0],zero,zero
|
|
; AVX-NEXT: addq $72, %rsp
|
|
; AVX-NEXT: retq
|
|
;
|
|
; F16C-LABEL: cvt_4f32_to_8i16_undef:
|
|
; F16C: # %bb.0:
|
|
; F16C-NEXT: vcvtps2ph $4, %xmm0, %xmm0
|
|
; F16C-NEXT: retq
|
|
;
|
|
; AVX512-LABEL: cvt_4f32_to_8i16_undef:
|
|
; AVX512: # %bb.0:
|
|
; AVX512-NEXT: vcvtps2ph $4, %xmm0, %xmm0
|
|
; AVX512-NEXT: retq
|
|
%1 = fptrunc <4 x float> %a0 to <4 x half>
|
|
%2 = bitcast <4 x half> %1 to <4 x i16>
|
|
%3 = shufflevector <4 x i16> %2, <4 x i16> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
|
|
ret <8 x i16> %3
|
|
}
|
|
|
|
define <8 x i16> @cvt_4f32_to_8i16_zero(<4 x float> %a0) nounwind {
|
|
; AVX-LABEL: cvt_4f32_to_8i16_zero:
|
|
; AVX: # %bb.0:
|
|
; AVX-NEXT: subq $72, %rsp
|
|
; AVX-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
|
|
; AVX-NEXT: vmovshdup {{.*#+}} xmm0 = xmm0[1,1,3,3]
|
|
; AVX-NEXT: callq __truncsfhf2@PLT
|
|
; AVX-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX-NEXT: vmovaps (%rsp), %xmm0 # 16-byte Reload
|
|
; AVX-NEXT: callq __truncsfhf2@PLT
|
|
; AVX-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX-NEXT: vpermilpd $1, (%rsp), %xmm0 # 16-byte Folded Reload
|
|
; AVX-NEXT: # xmm0 = mem[1,0]
|
|
; AVX-NEXT: callq __truncsfhf2@PLT
|
|
; AVX-NEXT: vmovapd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX-NEXT: vpshufd $255, (%rsp), %xmm0 # 16-byte Folded Reload
|
|
; AVX-NEXT: # xmm0 = mem[3,3,3,3]
|
|
; AVX-NEXT: callq __truncsfhf2@PLT
|
|
; AVX-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
|
|
; AVX-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
|
|
; AVX-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
|
|
; AVX-NEXT: vpunpcklwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm1 # 16-byte Folded Reload
|
|
; AVX-NEXT: # xmm1 = xmm1[0],mem[0],xmm1[1],mem[1],xmm1[2],mem[2],xmm1[3],mem[3]
|
|
; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0],xmm0[0],zero,zero
|
|
; AVX-NEXT: addq $72, %rsp
|
|
; AVX-NEXT: retq
|
|
;
|
|
; F16C-LABEL: cvt_4f32_to_8i16_zero:
|
|
; F16C: # %bb.0:
|
|
; F16C-NEXT: vcvtps2ph $4, %xmm0, %xmm0
|
|
; F16C-NEXT: retq
|
|
;
|
|
; AVX512-LABEL: cvt_4f32_to_8i16_zero:
|
|
; AVX512: # %bb.0:
|
|
; AVX512-NEXT: vcvtps2ph $4, %xmm0, %xmm0
|
|
; AVX512-NEXT: retq
|
|
%1 = fptrunc <4 x float> %a0 to <4 x half>
|
|
%2 = bitcast <4 x half> %1 to <4 x i16>
|
|
%3 = shufflevector <4 x i16> %2, <4 x i16> zeroinitializer, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
|
|
ret <8 x i16> %3
|
|
}
|
|
|
|
define <8 x i16> @cvt_8f32_to_8i16(<8 x float> %a0) nounwind {
|
|
; AVX-LABEL: cvt_8f32_to_8i16:
|
|
; AVX: # %bb.0:
|
|
; AVX-NEXT: subq $88, %rsp
|
|
; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
|
|
; AVX-NEXT: vextractf128 $1, %ymm0, %xmm0
|
|
; AVX-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX-NEXT: vshufps {{.*#+}} xmm0 = xmm0[3,3,3,3]
|
|
; AVX-NEXT: vzeroupper
|
|
; AVX-NEXT: callq __truncsfhf2@PLT
|
|
; AVX-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
|
|
; AVX-NEXT: vpermilpd $1, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
|
|
; AVX-NEXT: # xmm0 = mem[1,0]
|
|
; AVX-NEXT: callq __truncsfhf2@PLT
|
|
; AVX-NEXT: vpunpcklwd (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
|
|
; AVX-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3]
|
|
; AVX-NEXT: vmovdqa %xmm0, (%rsp) # 16-byte Spill
|
|
; AVX-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
|
|
; AVX-NEXT: callq __truncsfhf2@PLT
|
|
; AVX-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX-NEXT: vmovshdup {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
|
|
; AVX-NEXT: # xmm0 = mem[1,1,3,3]
|
|
; AVX-NEXT: callq __truncsfhf2@PLT
|
|
; AVX-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
|
|
; AVX-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
|
|
; AVX-NEXT: vpunpckldq (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
|
|
; AVX-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1]
|
|
; AVX-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX-NEXT: vpermilps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
|
|
; AVX-NEXT: # xmm0 = mem[3,3,3,3]
|
|
; AVX-NEXT: callq __truncsfhf2@PLT
|
|
; AVX-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
|
|
; AVX-NEXT: vpermilpd $1, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
|
|
; AVX-NEXT: # xmm0 = mem[1,0]
|
|
; AVX-NEXT: callq __truncsfhf2@PLT
|
|
; AVX-NEXT: vpunpcklwd (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
|
|
; AVX-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3]
|
|
; AVX-NEXT: vmovdqa %xmm0, (%rsp) # 16-byte Spill
|
|
; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
|
|
; AVX-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
|
|
; AVX-NEXT: vzeroupper
|
|
; AVX-NEXT: callq __truncsfhf2@PLT
|
|
; AVX-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX-NEXT: vmovshdup {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
|
|
; AVX-NEXT: # xmm0 = mem[1,1,3,3]
|
|
; AVX-NEXT: callq __truncsfhf2@PLT
|
|
; AVX-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
|
|
; AVX-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
|
|
; AVX-NEXT: vpunpckldq (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
|
|
; AVX-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1]
|
|
; AVX-NEXT: vpunpcklqdq {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
|
|
; AVX-NEXT: # xmm0 = xmm0[0],mem[0]
|
|
; AVX-NEXT: addq $88, %rsp
|
|
; AVX-NEXT: retq
|
|
;
|
|
; F16C-LABEL: cvt_8f32_to_8i16:
|
|
; F16C: # %bb.0:
|
|
; F16C-NEXT: vcvtps2ph $4, %ymm0, %xmm0
|
|
; F16C-NEXT: vzeroupper
|
|
; F16C-NEXT: retq
|
|
;
|
|
; AVX512-LABEL: cvt_8f32_to_8i16:
|
|
; AVX512: # %bb.0:
|
|
; AVX512-NEXT: vcvtps2ph $4, %ymm0, %xmm0
|
|
; AVX512-NEXT: vzeroupper
|
|
; AVX512-NEXT: retq
|
|
%1 = fptrunc <8 x float> %a0 to <8 x half>
|
|
%2 = bitcast <8 x half> %1 to <8 x i16>
|
|
ret <8 x i16> %2
|
|
}
|
|
|
|
define <16 x i16> @cvt_16f32_to_16i16(<16 x float> %a0) nounwind {
|
|
; AVX1-LABEL: cvt_16f32_to_16i16:
|
|
; AVX1: # %bb.0:
|
|
; AVX1-NEXT: subq $120, %rsp
|
|
; AVX1-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
|
|
; AVX1-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
|
|
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm0
|
|
; AVX1-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX1-NEXT: vshufps {{.*#+}} xmm0 = xmm0[3,3,3,3]
|
|
; AVX1-NEXT: vzeroupper
|
|
; AVX1-NEXT: callq __truncsfhf2@PLT
|
|
; AVX1-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
|
|
; AVX1-NEXT: vpermilpd $1, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
|
|
; AVX1-NEXT: # xmm0 = mem[1,0]
|
|
; AVX1-NEXT: callq __truncsfhf2@PLT
|
|
; AVX1-NEXT: vpunpcklwd (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
|
|
; AVX1-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3]
|
|
; AVX1-NEXT: vmovdqa %xmm0, (%rsp) # 16-byte Spill
|
|
; AVX1-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
|
|
; AVX1-NEXT: callq __truncsfhf2@PLT
|
|
; AVX1-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX1-NEXT: vmovshdup {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
|
|
; AVX1-NEXT: # xmm0 = mem[1,1,3,3]
|
|
; AVX1-NEXT: callq __truncsfhf2@PLT
|
|
; AVX1-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
|
|
; AVX1-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
|
|
; AVX1-NEXT: vpunpckldq (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
|
|
; AVX1-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1]
|
|
; AVX1-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX1-NEXT: vpermilps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
|
|
; AVX1-NEXT: # xmm0 = mem[3,3,3,3]
|
|
; AVX1-NEXT: callq __truncsfhf2@PLT
|
|
; AVX1-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
|
|
; AVX1-NEXT: vpermilpd $1, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
|
|
; AVX1-NEXT: # xmm0 = mem[1,0]
|
|
; AVX1-NEXT: callq __truncsfhf2@PLT
|
|
; AVX1-NEXT: vpunpcklwd (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
|
|
; AVX1-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3]
|
|
; AVX1-NEXT: vmovdqa %xmm0, (%rsp) # 16-byte Spill
|
|
; AVX1-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
|
|
; AVX1-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
|
|
; AVX1-NEXT: vzeroupper
|
|
; AVX1-NEXT: callq __truncsfhf2@PLT
|
|
; AVX1-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX1-NEXT: vmovshdup {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
|
|
; AVX1-NEXT: # xmm0 = mem[1,1,3,3]
|
|
; AVX1-NEXT: callq __truncsfhf2@PLT
|
|
; AVX1-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
|
|
; AVX1-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
|
|
; AVX1-NEXT: vpunpckldq (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
|
|
; AVX1-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1]
|
|
; AVX1-NEXT: vpunpcklqdq {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
|
|
; AVX1-NEXT: # xmm0 = xmm0[0],mem[0]
|
|
; AVX1-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX1-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
|
|
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
|
|
; AVX1-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX1-NEXT: vshufps {{.*#+}} xmm0 = xmm0[3,3,3,3]
|
|
; AVX1-NEXT: vzeroupper
|
|
; AVX1-NEXT: callq __truncsfhf2@PLT
|
|
; AVX1-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
|
|
; AVX1-NEXT: vpermilpd $1, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
|
|
; AVX1-NEXT: # xmm0 = mem[1,0]
|
|
; AVX1-NEXT: callq __truncsfhf2@PLT
|
|
; AVX1-NEXT: vpunpcklwd (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
|
|
; AVX1-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3]
|
|
; AVX1-NEXT: vmovdqa %xmm0, (%rsp) # 16-byte Spill
|
|
; AVX1-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
|
|
; AVX1-NEXT: callq __truncsfhf2@PLT
|
|
; AVX1-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX1-NEXT: vmovshdup {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
|
|
; AVX1-NEXT: # xmm0 = mem[1,1,3,3]
|
|
; AVX1-NEXT: callq __truncsfhf2@PLT
|
|
; AVX1-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
|
|
; AVX1-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
|
|
; AVX1-NEXT: vpunpckldq (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
|
|
; AVX1-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1]
|
|
; AVX1-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX1-NEXT: vpermilps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
|
|
; AVX1-NEXT: # xmm0 = mem[3,3,3,3]
|
|
; AVX1-NEXT: callq __truncsfhf2@PLT
|
|
; AVX1-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
|
|
; AVX1-NEXT: vpermilpd $1, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
|
|
; AVX1-NEXT: # xmm0 = mem[1,0]
|
|
; AVX1-NEXT: callq __truncsfhf2@PLT
|
|
; AVX1-NEXT: vpunpcklwd (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
|
|
; AVX1-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3]
|
|
; AVX1-NEXT: vmovdqa %xmm0, (%rsp) # 16-byte Spill
|
|
; AVX1-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
|
|
; AVX1-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
|
|
; AVX1-NEXT: vzeroupper
|
|
; AVX1-NEXT: callq __truncsfhf2@PLT
|
|
; AVX1-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX1-NEXT: vmovshdup {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
|
|
; AVX1-NEXT: # xmm0 = mem[1,1,3,3]
|
|
; AVX1-NEXT: callq __truncsfhf2@PLT
|
|
; AVX1-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
|
|
; AVX1-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
|
|
; AVX1-NEXT: vpunpckldq (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
|
|
; AVX1-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1]
|
|
; AVX1-NEXT: vpunpcklqdq {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
|
|
; AVX1-NEXT: # xmm0 = xmm0[0],mem[0]
|
|
; AVX1-NEXT: vinsertf128 $1, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 16-byte Folded Reload
|
|
; AVX1-NEXT: addq $120, %rsp
|
|
; AVX1-NEXT: retq
|
|
;
|
|
; AVX2-LABEL: cvt_16f32_to_16i16:
|
|
; AVX2: # %bb.0:
|
|
; AVX2-NEXT: subq $184, %rsp
|
|
; AVX2-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
|
|
; AVX2-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
|
|
; AVX2-NEXT: vextractf128 $1, %ymm1, %xmm0
|
|
; AVX2-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX2-NEXT: vshufps {{.*#+}} xmm0 = xmm0[3,3,3,3]
|
|
; AVX2-NEXT: vzeroupper
|
|
; AVX2-NEXT: callq __truncsfhf2@PLT
|
|
; AVX2-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX2-NEXT: vpermilpd $1, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
|
|
; AVX2-NEXT: # xmm0 = mem[1,0]
|
|
; AVX2-NEXT: callq __truncsfhf2@PLT
|
|
; AVX2-NEXT: vpunpcklwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
|
|
; AVX2-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3]
|
|
; AVX2-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX2-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
|
|
; AVX2-NEXT: vextractf128 $1, %ymm0, %xmm0
|
|
; AVX2-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX2-NEXT: vshufps {{.*#+}} xmm0 = xmm0[3,3,3,3]
|
|
; AVX2-NEXT: vzeroupper
|
|
; AVX2-NEXT: callq __truncsfhf2@PLT
|
|
; AVX2-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
|
|
; AVX2-NEXT: vpermilpd $1, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
|
|
; AVX2-NEXT: # xmm0 = mem[1,0]
|
|
; AVX2-NEXT: callq __truncsfhf2@PLT
|
|
; AVX2-NEXT: vpunpcklwd (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
|
|
; AVX2-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3]
|
|
; AVX2-NEXT: vinserti128 $1, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 16-byte Folded Reload
|
|
; AVX2-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
|
|
; AVX2-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
|
|
; AVX2-NEXT: vzeroupper
|
|
; AVX2-NEXT: callq __truncsfhf2@PLT
|
|
; AVX2-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
|
|
; AVX2-NEXT: vmovshdup {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
|
|
; AVX2-NEXT: # xmm0 = mem[1,1,3,3]
|
|
; AVX2-NEXT: callq __truncsfhf2@PLT
|
|
; AVX2-NEXT: vmovdqa (%rsp), %xmm1 # 16-byte Reload
|
|
; AVX2-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
|
|
; AVX2-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX2-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
|
|
; AVX2-NEXT: callq __truncsfhf2@PLT
|
|
; AVX2-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
|
|
; AVX2-NEXT: vmovshdup {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
|
|
; AVX2-NEXT: # xmm0 = mem[1,1,3,3]
|
|
; AVX2-NEXT: callq __truncsfhf2@PLT
|
|
; AVX2-NEXT: vmovdqa (%rsp), %xmm1 # 16-byte Reload
|
|
; AVX2-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
|
|
; AVX2-NEXT: vinserti128 $1, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 16-byte Folded Reload
|
|
; AVX2-NEXT: vpunpckldq {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
|
|
; AVX2-NEXT: # ymm0 = ymm0[0],mem[0],ymm0[1],mem[1],ymm0[4],mem[4],ymm0[5],mem[5]
|
|
; AVX2-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
|
|
; AVX2-NEXT: vpermilps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
|
|
; AVX2-NEXT: # xmm0 = mem[3,3,3,3]
|
|
; AVX2-NEXT: vzeroupper
|
|
; AVX2-NEXT: callq __truncsfhf2@PLT
|
|
; AVX2-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX2-NEXT: vpermilpd $1, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
|
|
; AVX2-NEXT: # xmm0 = mem[1,0]
|
|
; AVX2-NEXT: callq __truncsfhf2@PLT
|
|
; AVX2-NEXT: vpunpcklwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
|
|
; AVX2-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3]
|
|
; AVX2-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX2-NEXT: vpermilps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
|
|
; AVX2-NEXT: # xmm0 = mem[3,3,3,3]
|
|
; AVX2-NEXT: callq __truncsfhf2@PLT
|
|
; AVX2-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX2-NEXT: vpermilpd $1, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
|
|
; AVX2-NEXT: # xmm0 = mem[1,0]
|
|
; AVX2-NEXT: callq __truncsfhf2@PLT
|
|
; AVX2-NEXT: vpunpcklwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
|
|
; AVX2-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3]
|
|
; AVX2-NEXT: vinserti128 $1, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 16-byte Folded Reload
|
|
; AVX2-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
|
|
; AVX2-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
|
|
; AVX2-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
|
|
; AVX2-NEXT: vzeroupper
|
|
; AVX2-NEXT: callq __truncsfhf2@PLT
|
|
; AVX2-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX2-NEXT: vmovshdup {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
|
|
; AVX2-NEXT: # xmm0 = mem[1,1,3,3]
|
|
; AVX2-NEXT: callq __truncsfhf2@PLT
|
|
; AVX2-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
|
|
; AVX2-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
|
|
; AVX2-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX2-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
|
|
; AVX2-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
|
|
; AVX2-NEXT: vzeroupper
|
|
; AVX2-NEXT: callq __truncsfhf2@PLT
|
|
; AVX2-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX2-NEXT: vmovshdup {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
|
|
; AVX2-NEXT: # xmm0 = mem[1,1,3,3]
|
|
; AVX2-NEXT: callq __truncsfhf2@PLT
|
|
; AVX2-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
|
|
; AVX2-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
|
|
; AVX2-NEXT: vinserti128 $1, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 16-byte Folded Reload
|
|
; AVX2-NEXT: vpunpckldq {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
|
|
; AVX2-NEXT: # ymm0 = ymm0[0],mem[0],ymm0[1],mem[1],ymm0[4],mem[4],ymm0[5],mem[5]
|
|
; AVX2-NEXT: vpunpcklqdq {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
|
|
; AVX2-NEXT: # ymm0 = ymm0[0],mem[0],ymm0[2],mem[2]
|
|
; AVX2-NEXT: addq $184, %rsp
|
|
; AVX2-NEXT: retq
|
|
;
|
|
; F16C-LABEL: cvt_16f32_to_16i16:
|
|
; F16C: # %bb.0:
|
|
; F16C-NEXT: vcvtps2ph $4, %ymm0, %xmm0
|
|
; F16C-NEXT: vcvtps2ph $4, %ymm1, %xmm1
|
|
; F16C-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
|
|
; F16C-NEXT: retq
|
|
;
|
|
; AVX512-LABEL: cvt_16f32_to_16i16:
|
|
; AVX512: # %bb.0:
|
|
; AVX512-NEXT: vcvtps2ph $4, %zmm0, %ymm0
|
|
; AVX512-NEXT: retq
|
|
%1 = fptrunc <16 x float> %a0 to <16 x half>
|
|
%2 = bitcast <16 x half> %1 to <16 x i16>
|
|
ret <16 x i16> %2
|
|
}
|
|
|
|
;
|
|
; Float to Half (Store)
|
|
;
|
|
|
|
define void @store_cvt_f32_to_i16(float %a0, ptr %a1) nounwind {
|
|
; AVX-LABEL: store_cvt_f32_to_i16:
|
|
; AVX: # %bb.0:
|
|
; AVX-NEXT: pushq %rbx
|
|
; AVX-NEXT: movq %rdi, %rbx
|
|
; AVX-NEXT: callq __truncsfhf2@PLT
|
|
; AVX-NEXT: vpextrw $0, %xmm0, (%rbx)
|
|
; AVX-NEXT: popq %rbx
|
|
; AVX-NEXT: retq
|
|
;
|
|
; F16C-LABEL: store_cvt_f32_to_i16:
|
|
; F16C: # %bb.0:
|
|
; F16C-NEXT: vcvtps2ph $4, %xmm0, %xmm0
|
|
; F16C-NEXT: vpextrw $0, %xmm0, (%rdi)
|
|
; F16C-NEXT: retq
|
|
;
|
|
; AVX512-LABEL: store_cvt_f32_to_i16:
|
|
; AVX512: # %bb.0:
|
|
; AVX512-NEXT: vcvtps2ph $4, %xmm0, %xmm0
|
|
; AVX512-NEXT: vpextrw $0, %xmm0, (%rdi)
|
|
; AVX512-NEXT: retq
|
|
%1 = fptrunc float %a0 to half
|
|
%2 = bitcast half %1 to i16
|
|
store i16 %2, ptr %a1
|
|
ret void
|
|
}
|
|
|
|
define void @store_cvt_4f32_to_4i16(<4 x float> %a0, ptr %a1) nounwind {
|
|
; AVX-LABEL: store_cvt_4f32_to_4i16:
|
|
; AVX: # %bb.0:
|
|
; AVX-NEXT: pushq %rbx
|
|
; AVX-NEXT: subq $64, %rsp
|
|
; AVX-NEXT: movq %rdi, %rbx
|
|
; AVX-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
|
|
; AVX-NEXT: vmovshdup {{.*#+}} xmm0 = xmm0[1,1,3,3]
|
|
; AVX-NEXT: callq __truncsfhf2@PLT
|
|
; AVX-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX-NEXT: vpermilpd $1, (%rsp), %xmm0 # 16-byte Folded Reload
|
|
; AVX-NEXT: # xmm0 = mem[1,0]
|
|
; AVX-NEXT: callq __truncsfhf2@PLT
|
|
; AVX-NEXT: vmovapd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX-NEXT: vpermilps $255, (%rsp), %xmm0 # 16-byte Folded Reload
|
|
; AVX-NEXT: # xmm0 = mem[3,3,3,3]
|
|
; AVX-NEXT: callq __truncsfhf2@PLT
|
|
; AVX-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX-NEXT: vmovdqa (%rsp), %xmm0 # 16-byte Reload
|
|
; AVX-NEXT: callq __truncsfhf2@PLT
|
|
; AVX-NEXT: vpextrw $0, %xmm0, (%rbx)
|
|
; AVX-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
|
|
; AVX-NEXT: vpextrw $0, %xmm0, 6(%rbx)
|
|
; AVX-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
|
|
; AVX-NEXT: vpextrw $0, %xmm0, 4(%rbx)
|
|
; AVX-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
|
|
; AVX-NEXT: vpextrw $0, %xmm0, 2(%rbx)
|
|
; AVX-NEXT: addq $64, %rsp
|
|
; AVX-NEXT: popq %rbx
|
|
; AVX-NEXT: retq
|
|
;
|
|
; F16C-LABEL: store_cvt_4f32_to_4i16:
|
|
; F16C: # %bb.0:
|
|
; F16C-NEXT: vcvtps2ph $4, %xmm0, (%rdi)
|
|
; F16C-NEXT: retq
|
|
;
|
|
; AVX512-LABEL: store_cvt_4f32_to_4i16:
|
|
; AVX512: # %bb.0:
|
|
; AVX512-NEXT: vcvtps2ph $4, %xmm0, (%rdi)
|
|
; AVX512-NEXT: retq
|
|
%1 = fptrunc <4 x float> %a0 to <4 x half>
|
|
%2 = bitcast <4 x half> %1 to <4 x i16>
|
|
store <4 x i16> %2, ptr %a1
|
|
ret void
|
|
}
|
|
|
|
define void @store_cvt_4f32_to_8i16_undef(<4 x float> %a0, ptr %a1) nounwind {
|
|
; AVX-LABEL: store_cvt_4f32_to_8i16_undef:
|
|
; AVX: # %bb.0:
|
|
; AVX-NEXT: pushq %rbx
|
|
; AVX-NEXT: subq $64, %rsp
|
|
; AVX-NEXT: movq %rdi, %rbx
|
|
; AVX-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
|
|
; AVX-NEXT: vmovshdup {{.*#+}} xmm0 = xmm0[1,1,3,3]
|
|
; AVX-NEXT: callq __truncsfhf2@PLT
|
|
; AVX-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX-NEXT: vmovaps (%rsp), %xmm0 # 16-byte Reload
|
|
; AVX-NEXT: callq __truncsfhf2@PLT
|
|
; AVX-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX-NEXT: vpermilpd $1, (%rsp), %xmm0 # 16-byte Folded Reload
|
|
; AVX-NEXT: # xmm0 = mem[1,0]
|
|
; AVX-NEXT: callq __truncsfhf2@PLT
|
|
; AVX-NEXT: vmovapd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX-NEXT: vpshufd $255, (%rsp), %xmm0 # 16-byte Folded Reload
|
|
; AVX-NEXT: # xmm0 = mem[3,3,3,3]
|
|
; AVX-NEXT: callq __truncsfhf2@PLT
|
|
; AVX-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
|
|
; AVX-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
|
|
; AVX-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
|
|
; AVX-NEXT: vpunpcklwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm1 # 16-byte Folded Reload
|
|
; AVX-NEXT: # xmm1 = xmm1[0],mem[0],xmm1[1],mem[1],xmm1[2],mem[2],xmm1[3],mem[3]
|
|
; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0],xmm0[0],zero,zero
|
|
; AVX-NEXT: vmovaps %xmm0, (%rbx)
|
|
; AVX-NEXT: addq $64, %rsp
|
|
; AVX-NEXT: popq %rbx
|
|
; AVX-NEXT: retq
|
|
;
|
|
; F16C-LABEL: store_cvt_4f32_to_8i16_undef:
|
|
; F16C: # %bb.0:
|
|
; F16C-NEXT: vcvtps2ph $4, %xmm0, %xmm0
|
|
; F16C-NEXT: vmovaps %xmm0, (%rdi)
|
|
; F16C-NEXT: retq
|
|
;
|
|
; AVX512-LABEL: store_cvt_4f32_to_8i16_undef:
|
|
; AVX512: # %bb.0:
|
|
; AVX512-NEXT: vcvtps2ph $4, %xmm0, %xmm0
|
|
; AVX512-NEXT: vmovaps %xmm0, (%rdi)
|
|
; AVX512-NEXT: retq
|
|
%1 = fptrunc <4 x float> %a0 to <4 x half>
|
|
%2 = bitcast <4 x half> %1 to <4 x i16>
|
|
%3 = shufflevector <4 x i16> %2, <4 x i16> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
|
|
store <8 x i16> %3, ptr %a1
|
|
ret void
|
|
}
|
|
|
|
define void @store_cvt_4f32_to_8i16_zero(<4 x float> %a0, ptr %a1) nounwind {
|
|
; AVX-LABEL: store_cvt_4f32_to_8i16_zero:
|
|
; AVX: # %bb.0:
|
|
; AVX-NEXT: pushq %rbx
|
|
; AVX-NEXT: subq $64, %rsp
|
|
; AVX-NEXT: movq %rdi, %rbx
|
|
; AVX-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
|
|
; AVX-NEXT: vmovshdup {{.*#+}} xmm0 = xmm0[1,1,3,3]
|
|
; AVX-NEXT: callq __truncsfhf2@PLT
|
|
; AVX-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX-NEXT: vmovaps (%rsp), %xmm0 # 16-byte Reload
|
|
; AVX-NEXT: callq __truncsfhf2@PLT
|
|
; AVX-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX-NEXT: vpermilpd $1, (%rsp), %xmm0 # 16-byte Folded Reload
|
|
; AVX-NEXT: # xmm0 = mem[1,0]
|
|
; AVX-NEXT: callq __truncsfhf2@PLT
|
|
; AVX-NEXT: vmovapd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX-NEXT: vpshufd $255, (%rsp), %xmm0 # 16-byte Folded Reload
|
|
; AVX-NEXT: # xmm0 = mem[3,3,3,3]
|
|
; AVX-NEXT: callq __truncsfhf2@PLT
|
|
; AVX-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
|
|
; AVX-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
|
|
; AVX-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
|
|
; AVX-NEXT: vpunpcklwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm1 # 16-byte Folded Reload
|
|
; AVX-NEXT: # xmm1 = xmm1[0],mem[0],xmm1[1],mem[1],xmm1[2],mem[2],xmm1[3],mem[3]
|
|
; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0],xmm0[0],zero,zero
|
|
; AVX-NEXT: vmovaps %xmm0, (%rbx)
|
|
; AVX-NEXT: addq $64, %rsp
|
|
; AVX-NEXT: popq %rbx
|
|
; AVX-NEXT: retq
|
|
;
|
|
; F16C-LABEL: store_cvt_4f32_to_8i16_zero:
|
|
; F16C: # %bb.0:
|
|
; F16C-NEXT: vcvtps2ph $4, %xmm0, %xmm0
|
|
; F16C-NEXT: vmovaps %xmm0, (%rdi)
|
|
; F16C-NEXT: retq
|
|
;
|
|
; AVX512-LABEL: store_cvt_4f32_to_8i16_zero:
|
|
; AVX512: # %bb.0:
|
|
; AVX512-NEXT: vcvtps2ph $4, %xmm0, %xmm0
|
|
; AVX512-NEXT: vmovaps %xmm0, (%rdi)
|
|
; AVX512-NEXT: retq
|
|
%1 = fptrunc <4 x float> %a0 to <4 x half>
|
|
%2 = bitcast <4 x half> %1 to <4 x i16>
|
|
%3 = shufflevector <4 x i16> %2, <4 x i16> zeroinitializer, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
|
|
store <8 x i16> %3, ptr %a1
|
|
ret void
|
|
}
|
|
|
|
define void @store_cvt_8f32_to_8i16(<8 x float> %a0, ptr %a1) nounwind {
|
|
; AVX-LABEL: store_cvt_8f32_to_8i16:
|
|
; AVX: # %bb.0:
|
|
; AVX-NEXT: pushq %rbx
|
|
; AVX-NEXT: subq $80, %rsp
|
|
; AVX-NEXT: movq %rdi, %rbx
|
|
; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
|
|
; AVX-NEXT: vextractf128 $1, %ymm0, %xmm0
|
|
; AVX-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX-NEXT: vshufps {{.*#+}} xmm0 = xmm0[3,3,3,3]
|
|
; AVX-NEXT: vzeroupper
|
|
; AVX-NEXT: callq __truncsfhf2@PLT
|
|
; AVX-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
|
|
; AVX-NEXT: vpermilpd $1, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
|
|
; AVX-NEXT: # xmm0 = mem[1,0]
|
|
; AVX-NEXT: callq __truncsfhf2@PLT
|
|
; AVX-NEXT: vpunpcklwd (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
|
|
; AVX-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3]
|
|
; AVX-NEXT: vmovdqa %xmm0, (%rsp) # 16-byte Spill
|
|
; AVX-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
|
|
; AVX-NEXT: callq __truncsfhf2@PLT
|
|
; AVX-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX-NEXT: vmovshdup {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
|
|
; AVX-NEXT: # xmm0 = mem[1,1,3,3]
|
|
; AVX-NEXT: callq __truncsfhf2@PLT
|
|
; AVX-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
|
|
; AVX-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
|
|
; AVX-NEXT: vpunpckldq (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
|
|
; AVX-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1]
|
|
; AVX-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX-NEXT: vpermilps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
|
|
; AVX-NEXT: # xmm0 = mem[3,3,3,3]
|
|
; AVX-NEXT: callq __truncsfhf2@PLT
|
|
; AVX-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
|
|
; AVX-NEXT: vpermilpd $1, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
|
|
; AVX-NEXT: # xmm0 = mem[1,0]
|
|
; AVX-NEXT: callq __truncsfhf2@PLT
|
|
; AVX-NEXT: vpunpcklwd (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
|
|
; AVX-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3]
|
|
; AVX-NEXT: vmovdqa %xmm0, (%rsp) # 16-byte Spill
|
|
; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
|
|
; AVX-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
|
|
; AVX-NEXT: vzeroupper
|
|
; AVX-NEXT: callq __truncsfhf2@PLT
|
|
; AVX-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX-NEXT: vmovshdup {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
|
|
; AVX-NEXT: # xmm0 = mem[1,1,3,3]
|
|
; AVX-NEXT: callq __truncsfhf2@PLT
|
|
; AVX-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
|
|
; AVX-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
|
|
; AVX-NEXT: vpunpckldq (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
|
|
; AVX-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1]
|
|
; AVX-NEXT: vpunpcklqdq {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
|
|
; AVX-NEXT: # xmm0 = xmm0[0],mem[0]
|
|
; AVX-NEXT: vmovdqa %xmm0, (%rbx)
|
|
; AVX-NEXT: addq $80, %rsp
|
|
; AVX-NEXT: popq %rbx
|
|
; AVX-NEXT: retq
|
|
;
|
|
; F16C-LABEL: store_cvt_8f32_to_8i16:
|
|
; F16C: # %bb.0:
|
|
; F16C-NEXT: vcvtps2ph $4, %ymm0, (%rdi)
|
|
; F16C-NEXT: vzeroupper
|
|
; F16C-NEXT: retq
|
|
;
|
|
; AVX512-LABEL: store_cvt_8f32_to_8i16:
|
|
; AVX512: # %bb.0:
|
|
; AVX512-NEXT: vcvtps2ph $4, %ymm0, (%rdi)
|
|
; AVX512-NEXT: vzeroupper
|
|
; AVX512-NEXT: retq
|
|
%1 = fptrunc <8 x float> %a0 to <8 x half>
|
|
%2 = bitcast <8 x half> %1 to <8 x i16>
|
|
store <8 x i16> %2, ptr %a1
|
|
ret void
|
|
}
|
|
|
|
define void @store_cvt_16f32_to_16i16(<16 x float> %a0, ptr %a1) nounwind {
|
|
; AVX1-LABEL: store_cvt_16f32_to_16i16:
|
|
; AVX1: # %bb.0:
|
|
; AVX1-NEXT: pushq %rbx
|
|
; AVX1-NEXT: subq $112, %rsp
|
|
; AVX1-NEXT: movq %rdi, %rbx
|
|
; AVX1-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
|
|
; AVX1-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
|
|
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm0
|
|
; AVX1-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX1-NEXT: vshufps {{.*#+}} xmm0 = xmm0[3,3,3,3]
|
|
; AVX1-NEXT: vzeroupper
|
|
; AVX1-NEXT: callq __truncsfhf2@PLT
|
|
; AVX1-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
|
|
; AVX1-NEXT: vpermilpd $1, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
|
|
; AVX1-NEXT: # xmm0 = mem[1,0]
|
|
; AVX1-NEXT: callq __truncsfhf2@PLT
|
|
; AVX1-NEXT: vpunpcklwd (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
|
|
; AVX1-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3]
|
|
; AVX1-NEXT: vmovdqa %xmm0, (%rsp) # 16-byte Spill
|
|
; AVX1-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
|
|
; AVX1-NEXT: callq __truncsfhf2@PLT
|
|
; AVX1-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX1-NEXT: vmovshdup {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
|
|
; AVX1-NEXT: # xmm0 = mem[1,1,3,3]
|
|
; AVX1-NEXT: callq __truncsfhf2@PLT
|
|
; AVX1-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
|
|
; AVX1-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
|
|
; AVX1-NEXT: vpunpckldq (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
|
|
; AVX1-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1]
|
|
; AVX1-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX1-NEXT: vpermilps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
|
|
; AVX1-NEXT: # xmm0 = mem[3,3,3,3]
|
|
; AVX1-NEXT: callq __truncsfhf2@PLT
|
|
; AVX1-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
|
|
; AVX1-NEXT: vpermilpd $1, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
|
|
; AVX1-NEXT: # xmm0 = mem[1,0]
|
|
; AVX1-NEXT: callq __truncsfhf2@PLT
|
|
; AVX1-NEXT: vpunpcklwd (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
|
|
; AVX1-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3]
|
|
; AVX1-NEXT: vmovdqa %xmm0, (%rsp) # 16-byte Spill
|
|
; AVX1-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
|
|
; AVX1-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
|
|
; AVX1-NEXT: vzeroupper
|
|
; AVX1-NEXT: callq __truncsfhf2@PLT
|
|
; AVX1-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX1-NEXT: vmovshdup {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
|
|
; AVX1-NEXT: # xmm0 = mem[1,1,3,3]
|
|
; AVX1-NEXT: callq __truncsfhf2@PLT
|
|
; AVX1-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
|
|
; AVX1-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
|
|
; AVX1-NEXT: vpunpckldq (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
|
|
; AVX1-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1]
|
|
; AVX1-NEXT: vpunpcklqdq {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
|
|
; AVX1-NEXT: # xmm0 = xmm0[0],mem[0]
|
|
; AVX1-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX1-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
|
|
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
|
|
; AVX1-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX1-NEXT: vshufps {{.*#+}} xmm0 = xmm0[3,3,3,3]
|
|
; AVX1-NEXT: vzeroupper
|
|
; AVX1-NEXT: callq __truncsfhf2@PLT
|
|
; AVX1-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
|
|
; AVX1-NEXT: vpermilpd $1, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
|
|
; AVX1-NEXT: # xmm0 = mem[1,0]
|
|
; AVX1-NEXT: callq __truncsfhf2@PLT
|
|
; AVX1-NEXT: vpunpcklwd (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
|
|
; AVX1-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3]
|
|
; AVX1-NEXT: vmovdqa %xmm0, (%rsp) # 16-byte Spill
|
|
; AVX1-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
|
|
; AVX1-NEXT: callq __truncsfhf2@PLT
|
|
; AVX1-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX1-NEXT: vmovshdup {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
|
|
; AVX1-NEXT: # xmm0 = mem[1,1,3,3]
|
|
; AVX1-NEXT: callq __truncsfhf2@PLT
|
|
; AVX1-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
|
|
; AVX1-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
|
|
; AVX1-NEXT: vpunpckldq (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
|
|
; AVX1-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1]
|
|
; AVX1-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX1-NEXT: vpermilps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
|
|
; AVX1-NEXT: # xmm0 = mem[3,3,3,3]
|
|
; AVX1-NEXT: callq __truncsfhf2@PLT
|
|
; AVX1-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
|
|
; AVX1-NEXT: vpermilpd $1, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
|
|
; AVX1-NEXT: # xmm0 = mem[1,0]
|
|
; AVX1-NEXT: callq __truncsfhf2@PLT
|
|
; AVX1-NEXT: vpunpcklwd (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
|
|
; AVX1-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3]
|
|
; AVX1-NEXT: vmovdqa %xmm0, (%rsp) # 16-byte Spill
|
|
; AVX1-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
|
|
; AVX1-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
|
|
; AVX1-NEXT: vzeroupper
|
|
; AVX1-NEXT: callq __truncsfhf2@PLT
|
|
; AVX1-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX1-NEXT: vmovshdup {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
|
|
; AVX1-NEXT: # xmm0 = mem[1,1,3,3]
|
|
; AVX1-NEXT: callq __truncsfhf2@PLT
|
|
; AVX1-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
|
|
; AVX1-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
|
|
; AVX1-NEXT: vpunpckldq (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
|
|
; AVX1-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1]
|
|
; AVX1-NEXT: vpunpcklqdq {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
|
|
; AVX1-NEXT: # xmm0 = xmm0[0],mem[0]
|
|
; AVX1-NEXT: vinsertf128 $1, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 16-byte Folded Reload
|
|
; AVX1-NEXT: vmovaps %ymm0, (%rbx)
|
|
; AVX1-NEXT: addq $112, %rsp
|
|
; AVX1-NEXT: popq %rbx
|
|
; AVX1-NEXT: vzeroupper
|
|
; AVX1-NEXT: retq
|
|
;
|
|
; AVX2-LABEL: store_cvt_16f32_to_16i16:
|
|
; AVX2: # %bb.0:
|
|
; AVX2-NEXT: pushq %rbx
|
|
; AVX2-NEXT: subq $176, %rsp
|
|
; AVX2-NEXT: movq %rdi, %rbx
|
|
; AVX2-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
|
|
; AVX2-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
|
|
; AVX2-NEXT: vextractf128 $1, %ymm1, %xmm0
|
|
; AVX2-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX2-NEXT: vshufps {{.*#+}} xmm0 = xmm0[3,3,3,3]
|
|
; AVX2-NEXT: vzeroupper
|
|
; AVX2-NEXT: callq __truncsfhf2@PLT
|
|
; AVX2-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX2-NEXT: vpermilpd $1, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
|
|
; AVX2-NEXT: # xmm0 = mem[1,0]
|
|
; AVX2-NEXT: callq __truncsfhf2@PLT
|
|
; AVX2-NEXT: vpunpcklwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
|
|
; AVX2-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3]
|
|
; AVX2-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX2-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
|
|
; AVX2-NEXT: vextractf128 $1, %ymm0, %xmm0
|
|
; AVX2-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX2-NEXT: vshufps {{.*#+}} xmm0 = xmm0[3,3,3,3]
|
|
; AVX2-NEXT: vzeroupper
|
|
; AVX2-NEXT: callq __truncsfhf2@PLT
|
|
; AVX2-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
|
|
; AVX2-NEXT: vpermilpd $1, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
|
|
; AVX2-NEXT: # xmm0 = mem[1,0]
|
|
; AVX2-NEXT: callq __truncsfhf2@PLT
|
|
; AVX2-NEXT: vpunpcklwd (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
|
|
; AVX2-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3]
|
|
; AVX2-NEXT: vinserti128 $1, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 16-byte Folded Reload
|
|
; AVX2-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
|
|
; AVX2-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
|
|
; AVX2-NEXT: vzeroupper
|
|
; AVX2-NEXT: callq __truncsfhf2@PLT
|
|
; AVX2-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
|
|
; AVX2-NEXT: vmovshdup {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
|
|
; AVX2-NEXT: # xmm0 = mem[1,1,3,3]
|
|
; AVX2-NEXT: callq __truncsfhf2@PLT
|
|
; AVX2-NEXT: vmovdqa (%rsp), %xmm1 # 16-byte Reload
|
|
; AVX2-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
|
|
; AVX2-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX2-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
|
|
; AVX2-NEXT: callq __truncsfhf2@PLT
|
|
; AVX2-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
|
|
; AVX2-NEXT: vmovshdup {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
|
|
; AVX2-NEXT: # xmm0 = mem[1,1,3,3]
|
|
; AVX2-NEXT: callq __truncsfhf2@PLT
|
|
; AVX2-NEXT: vmovdqa (%rsp), %xmm1 # 16-byte Reload
|
|
; AVX2-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
|
|
; AVX2-NEXT: vinserti128 $1, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 16-byte Folded Reload
|
|
; AVX2-NEXT: vpunpckldq {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
|
|
; AVX2-NEXT: # ymm0 = ymm0[0],mem[0],ymm0[1],mem[1],ymm0[4],mem[4],ymm0[5],mem[5]
|
|
; AVX2-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
|
|
; AVX2-NEXT: vpermilps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
|
|
; AVX2-NEXT: # xmm0 = mem[3,3,3,3]
|
|
; AVX2-NEXT: vzeroupper
|
|
; AVX2-NEXT: callq __truncsfhf2@PLT
|
|
; AVX2-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX2-NEXT: vpermilpd $1, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
|
|
; AVX2-NEXT: # xmm0 = mem[1,0]
|
|
; AVX2-NEXT: callq __truncsfhf2@PLT
|
|
; AVX2-NEXT: vpunpcklwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
|
|
; AVX2-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3]
|
|
; AVX2-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX2-NEXT: vpermilps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
|
|
; AVX2-NEXT: # xmm0 = mem[3,3,3,3]
|
|
; AVX2-NEXT: callq __truncsfhf2@PLT
|
|
; AVX2-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX2-NEXT: vpermilpd $1, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
|
|
; AVX2-NEXT: # xmm0 = mem[1,0]
|
|
; AVX2-NEXT: callq __truncsfhf2@PLT
|
|
; AVX2-NEXT: vpunpcklwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
|
|
; AVX2-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3]
|
|
; AVX2-NEXT: vinserti128 $1, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 16-byte Folded Reload
|
|
; AVX2-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
|
|
; AVX2-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
|
|
; AVX2-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
|
|
; AVX2-NEXT: vzeroupper
|
|
; AVX2-NEXT: callq __truncsfhf2@PLT
|
|
; AVX2-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX2-NEXT: vmovshdup {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
|
|
; AVX2-NEXT: # xmm0 = mem[1,1,3,3]
|
|
; AVX2-NEXT: callq __truncsfhf2@PLT
|
|
; AVX2-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
|
|
; AVX2-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
|
|
; AVX2-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX2-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
|
|
; AVX2-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
|
|
; AVX2-NEXT: vzeroupper
|
|
; AVX2-NEXT: callq __truncsfhf2@PLT
|
|
; AVX2-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX2-NEXT: vmovshdup {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
|
|
; AVX2-NEXT: # xmm0 = mem[1,1,3,3]
|
|
; AVX2-NEXT: callq __truncsfhf2@PLT
|
|
; AVX2-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
|
|
; AVX2-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
|
|
; AVX2-NEXT: vinserti128 $1, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 16-byte Folded Reload
|
|
; AVX2-NEXT: vpunpckldq {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
|
|
; AVX2-NEXT: # ymm0 = ymm0[0],mem[0],ymm0[1],mem[1],ymm0[4],mem[4],ymm0[5],mem[5]
|
|
; AVX2-NEXT: vpunpcklqdq {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
|
|
; AVX2-NEXT: # ymm0 = ymm0[0],mem[0],ymm0[2],mem[2]
|
|
; AVX2-NEXT: vmovdqa %ymm0, (%rbx)
|
|
; AVX2-NEXT: addq $176, %rsp
|
|
; AVX2-NEXT: popq %rbx
|
|
; AVX2-NEXT: vzeroupper
|
|
; AVX2-NEXT: retq
|
|
;
|
|
; F16C-LABEL: store_cvt_16f32_to_16i16:
|
|
; F16C: # %bb.0:
|
|
; F16C-NEXT: vcvtps2ph $4, %ymm1, 16(%rdi)
|
|
; F16C-NEXT: vcvtps2ph $4, %ymm0, (%rdi)
|
|
; F16C-NEXT: vzeroupper
|
|
; F16C-NEXT: retq
|
|
;
|
|
; AVX512-LABEL: store_cvt_16f32_to_16i16:
|
|
; AVX512: # %bb.0:
|
|
; AVX512-NEXT: vcvtps2ph $4, %zmm0, (%rdi)
|
|
; AVX512-NEXT: vzeroupper
|
|
; AVX512-NEXT: retq
|
|
%1 = fptrunc <16 x float> %a0 to <16 x half>
|
|
%2 = bitcast <16 x half> %1 to <16 x i16>
|
|
store <16 x i16> %2, ptr %a1
|
|
ret void
|
|
}
|
|
|
|
;
|
|
; Double to Half
|
|
;
|
|
|
|
define i16 @cvt_f64_to_i16(double %a0) nounwind {
|
|
; ALL-LABEL: cvt_f64_to_i16:
|
|
; ALL: # %bb.0:
|
|
; ALL-NEXT: pushq %rax
|
|
; ALL-NEXT: callq __truncdfhf2@PLT
|
|
; ALL-NEXT: vpextrw $0, %xmm0, %eax
|
|
; ALL-NEXT: # kill: def $ax killed $ax killed $eax
|
|
; ALL-NEXT: popq %rcx
|
|
; ALL-NEXT: retq
|
|
; AVX-LABEL: cvt_f64_to_i16:
|
|
; AVX: # %bb.0:
|
|
; AVX-NEXT: pushq %rax
|
|
; AVX-NEXT: callq __truncdfhf2@PLT
|
|
; AVX-NEXT: vpextrw $0, %xmm0, %eax
|
|
; AVX-NEXT: # kill: def $ax killed $ax killed $eax
|
|
; AVX-NEXT: popq %rcx
|
|
; AVX-NEXT: retq
|
|
;
|
|
; F16C-LABEL: cvt_f64_to_i16:
|
|
; F16C: # %bb.0:
|
|
; F16C-NEXT: pushq %rax
|
|
; F16C-NEXT: callq __truncdfhf2@PLT
|
|
; F16C-NEXT: vpextrw $0, %xmm0, %eax
|
|
; F16C-NEXT: # kill: def $ax killed $ax killed $eax
|
|
; F16C-NEXT: popq %rcx
|
|
; F16C-NEXT: retq
|
|
;
|
|
; AVX512-LABEL: cvt_f64_to_i16:
|
|
; AVX512: # %bb.0:
|
|
; AVX512-NEXT: pushq %rax
|
|
; AVX512-NEXT: callq __truncdfhf2@PLT
|
|
; AVX512-NEXT: vpextrw $0, %xmm0, %eax
|
|
; AVX512-NEXT: # kill: def $ax killed $ax killed $eax
|
|
; AVX512-NEXT: popq %rcx
|
|
; AVX512-NEXT: retq
|
|
%1 = fptrunc double %a0 to half
|
|
%2 = bitcast half %1 to i16
|
|
ret i16 %2
|
|
}
|
|
|
|
define <2 x i16> @cvt_2f64_to_2i16(<2 x double> %a0) nounwind {
|
|
; AVX-LABEL: cvt_2f64_to_2i16:
|
|
; AVX: # %bb.0:
|
|
; AVX-NEXT: subq $40, %rsp
|
|
; AVX-NEXT: vmovapd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX-NEXT: vshufpd {{.*#+}} xmm0 = xmm0[1,0]
|
|
; AVX-NEXT: callq __truncdfhf2@PLT
|
|
; AVX-NEXT: vmovapd %xmm0, (%rsp) # 16-byte Spill
|
|
; AVX-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
|
|
; AVX-NEXT: callq __truncdfhf2@PLT
|
|
; AVX-NEXT: vpunpcklwd (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
|
|
; AVX-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3]
|
|
; AVX-NEXT: addq $40, %rsp
|
|
; AVX-NEXT: retq
|
|
;
|
|
; F16C-LABEL: cvt_2f64_to_2i16:
|
|
; F16C: # %bb.0:
|
|
; F16C-NEXT: subq $40, %rsp
|
|
; F16C-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
|
|
; F16C-NEXT: callq __truncdfhf2@PLT
|
|
; F16C-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; F16C-NEXT: vpermilpd $1, (%rsp), %xmm0 # 16-byte Folded Reload
|
|
; F16C-NEXT: # xmm0 = mem[1,0]
|
|
; F16C-NEXT: callq __truncdfhf2@PLT
|
|
; F16C-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
|
|
; F16C-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
|
|
; F16C-NEXT: addq $40, %rsp
|
|
; F16C-NEXT: retq
|
|
;
|
|
; AVX512F-LABEL: cvt_2f64_to_2i16:
|
|
; AVX512F: # %bb.0:
|
|
; AVX512F-NEXT: subq $104, %rsp
|
|
; AVX512F-NEXT: vmovdqa %xmm0, (%rsp) # 16-byte Spill
|
|
; AVX512F-NEXT: callq __truncdfhf2@PLT
|
|
; AVX512F-NEXT: vpbroadcastw %xmm0, %xmm0
|
|
; AVX512F-NEXT: vmovdqu64 %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
|
|
; AVX512F-NEXT: vmovaps (%rsp), %xmm0 # 16-byte Reload
|
|
; AVX512F-NEXT: vzeroupper
|
|
; AVX512F-NEXT: callq __truncdfhf2@PLT
|
|
; AVX512F-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX512F-NEXT: vpermilpd $1, (%rsp), %xmm0 # 16-byte Folded Reload
|
|
; AVX512F-NEXT: # xmm0 = mem[1,0]
|
|
; AVX512F-NEXT: callq __truncdfhf2@PLT
|
|
; AVX512F-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
|
|
; AVX512F-NEXT: vpunpcklwd {{.*#+}} xmm2 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
|
|
; AVX512F-NEXT: vpmovsxbq {{.*#+}} xmm1 = [16,0]
|
|
; AVX512F-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
|
|
; AVX512F-NEXT: vpermt2ps %zmm2, %zmm1, %zmm0
|
|
; AVX512F-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
|
|
; AVX512F-NEXT: addq $104, %rsp
|
|
; AVX512F-NEXT: vzeroupper
|
|
; AVX512F-NEXT: retq
|
|
;
|
|
; AVX512-FASTLANE-LABEL: cvt_2f64_to_2i16:
|
|
; AVX512-FASTLANE: # %bb.0:
|
|
; AVX512-FASTLANE-NEXT: subq $40, %rsp
|
|
; AVX512-FASTLANE-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX512-FASTLANE-NEXT: callq __truncdfhf2@PLT
|
|
; AVX512-FASTLANE-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
|
|
; AVX512-FASTLANE-NEXT: vpermilpd $1, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
|
|
; AVX512-FASTLANE-NEXT: # xmm0 = mem[1,0]
|
|
; AVX512-FASTLANE-NEXT: callq __truncdfhf2@PLT
|
|
; AVX512-FASTLANE-NEXT: vmovdqa (%rsp), %xmm1 # 16-byte Reload
|
|
; AVX512-FASTLANE-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
|
|
; AVX512-FASTLANE-NEXT: vmovdqa %xmm0, (%rsp) # 16-byte Spill
|
|
; AVX512-FASTLANE-NEXT: callq __truncdfhf2@PLT
|
|
; AVX512-FASTLANE-NEXT: vpbroadcastw %xmm0, %xmm1
|
|
; AVX512-FASTLANE-NEXT: vpmovsxbq {{.*#+}} xmm0 = [4,0]
|
|
; AVX512-FASTLANE-NEXT: vpermi2ps (%rsp), %xmm1, %xmm0 # 16-byte Folded Reload
|
|
; AVX512-FASTLANE-NEXT: addq $40, %rsp
|
|
; AVX512-FASTLANE-NEXT: retq
|
|
%1 = fptrunc <2 x double> %a0 to <2 x half>
|
|
%2 = bitcast <2 x half> %1 to <2 x i16>
|
|
ret <2 x i16> %2
|
|
}
|
|
|
|
define <4 x i16> @cvt_4f64_to_4i16(<4 x double> %a0) nounwind {
|
|
; AVX1-LABEL: cvt_4f64_to_4i16:
|
|
; AVX1: # %bb.0:
|
|
; AVX1-NEXT: subq $88, %rsp
|
|
; AVX1-NEXT: vmovupd %ymm0, (%rsp) # 32-byte Spill
|
|
; AVX1-NEXT: vshufpd {{.*#+}} xmm0 = xmm0[1,0]
|
|
; AVX1-NEXT: vzeroupper
|
|
; AVX1-NEXT: callq __truncdfhf2@PLT
|
|
; AVX1-NEXT: vmovapd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX1-NEXT: vmovups (%rsp), %ymm0 # 32-byte Reload
|
|
; AVX1-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
|
|
; AVX1-NEXT: vzeroupper
|
|
; AVX1-NEXT: callq __truncdfhf2@PLT
|
|
; AVX1-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX1-NEXT: vmovups (%rsp), %ymm0 # 32-byte Reload
|
|
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
|
|
; AVX1-NEXT: vmovapd %xmm0, (%rsp) # 16-byte Spill
|
|
; AVX1-NEXT: vshufpd {{.*#+}} xmm0 = xmm0[1,0]
|
|
; AVX1-NEXT: vzeroupper
|
|
; AVX1-NEXT: callq __truncdfhf2@PLT
|
|
; AVX1-NEXT: vmovapd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX1-NEXT: vmovdqa (%rsp), %xmm0 # 16-byte Reload
|
|
; AVX1-NEXT: callq __truncdfhf2@PLT
|
|
; AVX1-NEXT: vpunpcklwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
|
|
; AVX1-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3]
|
|
; AVX1-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
|
|
; AVX1-NEXT: vpunpcklwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm1 # 16-byte Folded Reload
|
|
; AVX1-NEXT: # xmm1 = xmm1[0],mem[0],xmm1[1],mem[1],xmm1[2],mem[2],xmm1[3],mem[3]
|
|
; AVX1-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0],xmm0[0],zero,zero
|
|
; AVX1-NEXT: addq $88, %rsp
|
|
; AVX1-NEXT: retq
|
|
;
|
|
; AVX2-LABEL: cvt_4f64_to_4i16:
|
|
; AVX2: # %bb.0:
|
|
; AVX2-NEXT: subq $88, %rsp
|
|
; AVX2-NEXT: vmovupd %ymm0, (%rsp) # 32-byte Spill
|
|
; AVX2-NEXT: vshufpd {{.*#+}} xmm0 = xmm0[1,0]
|
|
; AVX2-NEXT: vzeroupper
|
|
; AVX2-NEXT: callq __truncdfhf2@PLT
|
|
; AVX2-NEXT: vmovapd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX2-NEXT: vmovups (%rsp), %ymm0 # 32-byte Reload
|
|
; AVX2-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
|
|
; AVX2-NEXT: vzeroupper
|
|
; AVX2-NEXT: callq __truncdfhf2@PLT
|
|
; AVX2-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX2-NEXT: vmovupd (%rsp), %ymm0 # 32-byte Reload
|
|
; AVX2-NEXT: vextractf128 $1, %ymm0, %xmm0
|
|
; AVX2-NEXT: vmovapd %xmm0, (%rsp) # 16-byte Spill
|
|
; AVX2-NEXT: vshufpd {{.*#+}} xmm0 = xmm0[1,0]
|
|
; AVX2-NEXT: vzeroupper
|
|
; AVX2-NEXT: callq __truncdfhf2@PLT
|
|
; AVX2-NEXT: vmovapd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX2-NEXT: vmovdqa (%rsp), %xmm0 # 16-byte Reload
|
|
; AVX2-NEXT: callq __truncdfhf2@PLT
|
|
; AVX2-NEXT: vpunpcklwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
|
|
; AVX2-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3]
|
|
; AVX2-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
|
|
; AVX2-NEXT: vpunpcklwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm1 # 16-byte Folded Reload
|
|
; AVX2-NEXT: # xmm1 = xmm1[0],mem[0],xmm1[1],mem[1],xmm1[2],mem[2],xmm1[3],mem[3]
|
|
; AVX2-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0],xmm0[0],zero,zero
|
|
; AVX2-NEXT: addq $88, %rsp
|
|
; AVX2-NEXT: retq
|
|
;
|
|
; F16C-LABEL: cvt_4f64_to_4i16:
|
|
; F16C: # %bb.0:
|
|
; F16C-NEXT: subq $72, %rsp
|
|
; F16C-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
|
|
; F16C-NEXT: vextractf128 $1, %ymm0, %xmm0
|
|
; F16C-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
|
|
; F16C-NEXT: vzeroupper
|
|
; F16C-NEXT: callq __truncdfhf2@PLT
|
|
; F16C-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; F16C-NEXT: vpermilpd $1, (%rsp), %xmm0 # 16-byte Folded Reload
|
|
; F16C-NEXT: # xmm0 = mem[1,0]
|
|
; F16C-NEXT: callq __truncdfhf2@PLT
|
|
; F16C-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
|
|
; F16C-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
|
|
; F16C-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; F16C-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
|
|
; F16C-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
|
|
; F16C-NEXT: vzeroupper
|
|
; F16C-NEXT: callq __truncdfhf2@PLT
|
|
; F16C-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
|
|
; F16C-NEXT: vpermilpd $1, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
|
|
; F16C-NEXT: # xmm0 = mem[1,0]
|
|
; F16C-NEXT: callq __truncdfhf2@PLT
|
|
; F16C-NEXT: vmovdqa (%rsp), %xmm1 # 16-byte Reload
|
|
; F16C-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
|
|
; F16C-NEXT: vinsertps $28, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
|
|
; F16C-NEXT: # xmm0 = xmm0[0],mem[0],zero,zero
|
|
; F16C-NEXT: addq $72, %rsp
|
|
; F16C-NEXT: retq
|
|
;
|
|
; AVX512-LABEL: cvt_4f64_to_4i16:
|
|
; AVX512: # %bb.0:
|
|
; AVX512-NEXT: subq $72, %rsp
|
|
; AVX512-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
|
|
; AVX512-NEXT: vextractf128 $1, %ymm0, %xmm0
|
|
; AVX512-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
|
|
; AVX512-NEXT: vzeroupper
|
|
; AVX512-NEXT: callq __truncdfhf2@PLT
|
|
; AVX512-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX512-NEXT: vpermilpd $1, (%rsp), %xmm0 # 16-byte Folded Reload
|
|
; AVX512-NEXT: # xmm0 = mem[1,0]
|
|
; AVX512-NEXT: callq __truncdfhf2@PLT
|
|
; AVX512-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
|
|
; AVX512-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
|
|
; AVX512-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX512-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
|
|
; AVX512-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
|
|
; AVX512-NEXT: vzeroupper
|
|
; AVX512-NEXT: callq __truncdfhf2@PLT
|
|
; AVX512-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
|
|
; AVX512-NEXT: vpermilpd $1, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
|
|
; AVX512-NEXT: # xmm0 = mem[1,0]
|
|
; AVX512-NEXT: callq __truncdfhf2@PLT
|
|
; AVX512-NEXT: vmovdqa (%rsp), %xmm1 # 16-byte Reload
|
|
; AVX512-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
|
|
; AVX512-NEXT: vpunpckldq {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
|
|
; AVX512-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1]
|
|
; AVX512-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX512-NEXT: callq __truncdfhf2@PLT
|
|
; AVX512-NEXT: vpbroadcastw %xmm0, %xmm0
|
|
; AVX512-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
|
|
; AVX512-NEXT: vshufps {{.*#+}} xmm0 = xmm1[0,1],xmm0[0,0]
|
|
; AVX512-NEXT: addq $72, %rsp
|
|
; AVX512-NEXT: retq
|
|
%1 = fptrunc <4 x double> %a0 to <4 x half>
|
|
%2 = bitcast <4 x half> %1 to <4 x i16>
|
|
ret <4 x i16> %2
|
|
}
|
|
|
|
define <8 x i16> @cvt_4f64_to_8i16_undef(<4 x double> %a0) nounwind {
|
|
; AVX1-LABEL: cvt_4f64_to_8i16_undef:
|
|
; AVX1: # %bb.0:
|
|
; AVX1-NEXT: subq $88, %rsp
|
|
; AVX1-NEXT: vmovupd %ymm0, (%rsp) # 32-byte Spill
|
|
; AVX1-NEXT: vshufpd {{.*#+}} xmm0 = xmm0[1,0]
|
|
; AVX1-NEXT: vzeroupper
|
|
; AVX1-NEXT: callq __truncdfhf2@PLT
|
|
; AVX1-NEXT: vmovapd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX1-NEXT: vmovups (%rsp), %ymm0 # 32-byte Reload
|
|
; AVX1-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
|
|
; AVX1-NEXT: vzeroupper
|
|
; AVX1-NEXT: callq __truncdfhf2@PLT
|
|
; AVX1-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX1-NEXT: vmovups (%rsp), %ymm0 # 32-byte Reload
|
|
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
|
|
; AVX1-NEXT: vmovapd %xmm0, (%rsp) # 16-byte Spill
|
|
; AVX1-NEXT: vshufpd {{.*#+}} xmm0 = xmm0[1,0]
|
|
; AVX1-NEXT: vzeroupper
|
|
; AVX1-NEXT: callq __truncdfhf2@PLT
|
|
; AVX1-NEXT: vmovapd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX1-NEXT: vmovdqa (%rsp), %xmm0 # 16-byte Reload
|
|
; AVX1-NEXT: callq __truncdfhf2@PLT
|
|
; AVX1-NEXT: vpunpcklwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
|
|
; AVX1-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3]
|
|
; AVX1-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
|
|
; AVX1-NEXT: vpunpcklwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm1 # 16-byte Folded Reload
|
|
; AVX1-NEXT: # xmm1 = xmm1[0],mem[0],xmm1[1],mem[1],xmm1[2],mem[2],xmm1[3],mem[3]
|
|
; AVX1-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0],xmm0[0],zero,zero
|
|
; AVX1-NEXT: addq $88, %rsp
|
|
; AVX1-NEXT: retq
|
|
;
|
|
; AVX2-LABEL: cvt_4f64_to_8i16_undef:
|
|
; AVX2: # %bb.0:
|
|
; AVX2-NEXT: subq $88, %rsp
|
|
; AVX2-NEXT: vmovupd %ymm0, (%rsp) # 32-byte Spill
|
|
; AVX2-NEXT: vshufpd {{.*#+}} xmm0 = xmm0[1,0]
|
|
; AVX2-NEXT: vzeroupper
|
|
; AVX2-NEXT: callq __truncdfhf2@PLT
|
|
; AVX2-NEXT: vmovapd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX2-NEXT: vmovups (%rsp), %ymm0 # 32-byte Reload
|
|
; AVX2-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
|
|
; AVX2-NEXT: vzeroupper
|
|
; AVX2-NEXT: callq __truncdfhf2@PLT
|
|
; AVX2-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX2-NEXT: vmovupd (%rsp), %ymm0 # 32-byte Reload
|
|
; AVX2-NEXT: vextractf128 $1, %ymm0, %xmm0
|
|
; AVX2-NEXT: vmovapd %xmm0, (%rsp) # 16-byte Spill
|
|
; AVX2-NEXT: vshufpd {{.*#+}} xmm0 = xmm0[1,0]
|
|
; AVX2-NEXT: vzeroupper
|
|
; AVX2-NEXT: callq __truncdfhf2@PLT
|
|
; AVX2-NEXT: vmovapd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX2-NEXT: vmovdqa (%rsp), %xmm0 # 16-byte Reload
|
|
; AVX2-NEXT: callq __truncdfhf2@PLT
|
|
; AVX2-NEXT: vpunpcklwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
|
|
; AVX2-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3]
|
|
; AVX2-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
|
|
; AVX2-NEXT: vpunpcklwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm1 # 16-byte Folded Reload
|
|
; AVX2-NEXT: # xmm1 = xmm1[0],mem[0],xmm1[1],mem[1],xmm1[2],mem[2],xmm1[3],mem[3]
|
|
; AVX2-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0],xmm0[0],zero,zero
|
|
; AVX2-NEXT: addq $88, %rsp
|
|
; AVX2-NEXT: retq
|
|
;
|
|
; F16C-LABEL: cvt_4f64_to_8i16_undef:
|
|
; F16C: # %bb.0:
|
|
; F16C-NEXT: subq $72, %rsp
|
|
; F16C-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
|
|
; F16C-NEXT: vextractf128 $1, %ymm0, %xmm0
|
|
; F16C-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
|
|
; F16C-NEXT: vzeroupper
|
|
; F16C-NEXT: callq __truncdfhf2@PLT
|
|
; F16C-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; F16C-NEXT: vpermilpd $1, (%rsp), %xmm0 # 16-byte Folded Reload
|
|
; F16C-NEXT: # xmm0 = mem[1,0]
|
|
; F16C-NEXT: callq __truncdfhf2@PLT
|
|
; F16C-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
|
|
; F16C-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
|
|
; F16C-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; F16C-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
|
|
; F16C-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
|
|
; F16C-NEXT: vzeroupper
|
|
; F16C-NEXT: callq __truncdfhf2@PLT
|
|
; F16C-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
|
|
; F16C-NEXT: vpermilpd $1, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
|
|
; F16C-NEXT: # xmm0 = mem[1,0]
|
|
; F16C-NEXT: callq __truncdfhf2@PLT
|
|
; F16C-NEXT: vmovdqa (%rsp), %xmm1 # 16-byte Reload
|
|
; F16C-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
|
|
; F16C-NEXT: vinsertps $28, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
|
|
; F16C-NEXT: # xmm0 = xmm0[0],mem[0],zero,zero
|
|
; F16C-NEXT: addq $72, %rsp
|
|
; F16C-NEXT: retq
|
|
;
|
|
; AVX512-LABEL: cvt_4f64_to_8i16_undef:
|
|
; AVX512: # %bb.0:
|
|
; AVX512-NEXT: subq $72, %rsp
|
|
; AVX512-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
|
|
; AVX512-NEXT: vextractf128 $1, %ymm0, %xmm0
|
|
; AVX512-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
|
|
; AVX512-NEXT: vzeroupper
|
|
; AVX512-NEXT: callq __truncdfhf2@PLT
|
|
; AVX512-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX512-NEXT: vpermilpd $1, (%rsp), %xmm0 # 16-byte Folded Reload
|
|
; AVX512-NEXT: # xmm0 = mem[1,0]
|
|
; AVX512-NEXT: callq __truncdfhf2@PLT
|
|
; AVX512-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
|
|
; AVX512-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
|
|
; AVX512-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX512-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
|
|
; AVX512-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
|
|
; AVX512-NEXT: vzeroupper
|
|
; AVX512-NEXT: callq __truncdfhf2@PLT
|
|
; AVX512-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
|
|
; AVX512-NEXT: vpermilpd $1, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
|
|
; AVX512-NEXT: # xmm0 = mem[1,0]
|
|
; AVX512-NEXT: callq __truncdfhf2@PLT
|
|
; AVX512-NEXT: vmovdqa (%rsp), %xmm1 # 16-byte Reload
|
|
; AVX512-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
|
|
; AVX512-NEXT: vpunpckldq {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
|
|
; AVX512-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1]
|
|
; AVX512-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX512-NEXT: callq __truncdfhf2@PLT
|
|
; AVX512-NEXT: vpbroadcastw %xmm0, %xmm0
|
|
; AVX512-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
|
|
; AVX512-NEXT: vshufps {{.*#+}} xmm0 = xmm1[0,1],xmm0[0,0]
|
|
; AVX512-NEXT: addq $72, %rsp
|
|
; AVX512-NEXT: retq
|
|
%1 = fptrunc <4 x double> %a0 to <4 x half>
|
|
%2 = bitcast <4 x half> %1 to <4 x i16>
|
|
%3 = shufflevector <4 x i16> %2, <4 x i16> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
|
|
ret <8 x i16> %3
|
|
}
|
|
|
|
define <8 x i16> @cvt_4f64_to_8i16_zero(<4 x double> %a0) nounwind {
|
|
; AVX1-LABEL: cvt_4f64_to_8i16_zero:
|
|
; AVX1: # %bb.0:
|
|
; AVX1-NEXT: subq $88, %rsp
|
|
; AVX1-NEXT: vmovupd %ymm0, (%rsp) # 32-byte Spill
|
|
; AVX1-NEXT: vshufpd {{.*#+}} xmm0 = xmm0[1,0]
|
|
; AVX1-NEXT: vzeroupper
|
|
; AVX1-NEXT: callq __truncdfhf2@PLT
|
|
; AVX1-NEXT: vmovapd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX1-NEXT: vmovups (%rsp), %ymm0 # 32-byte Reload
|
|
; AVX1-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
|
|
; AVX1-NEXT: vzeroupper
|
|
; AVX1-NEXT: callq __truncdfhf2@PLT
|
|
; AVX1-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX1-NEXT: vmovups (%rsp), %ymm0 # 32-byte Reload
|
|
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
|
|
; AVX1-NEXT: vmovapd %xmm0, (%rsp) # 16-byte Spill
|
|
; AVX1-NEXT: vshufpd {{.*#+}} xmm0 = xmm0[1,0]
|
|
; AVX1-NEXT: vzeroupper
|
|
; AVX1-NEXT: callq __truncdfhf2@PLT
|
|
; AVX1-NEXT: vmovapd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX1-NEXT: vmovdqa (%rsp), %xmm0 # 16-byte Reload
|
|
; AVX1-NEXT: callq __truncdfhf2@PLT
|
|
; AVX1-NEXT: vpunpcklwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
|
|
; AVX1-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3]
|
|
; AVX1-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
|
|
; AVX1-NEXT: vpunpcklwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm1 # 16-byte Folded Reload
|
|
; AVX1-NEXT: # xmm1 = xmm1[0],mem[0],xmm1[1],mem[1],xmm1[2],mem[2],xmm1[3],mem[3]
|
|
; AVX1-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0],xmm0[0],zero,zero
|
|
; AVX1-NEXT: addq $88, %rsp
|
|
; AVX1-NEXT: retq
|
|
;
|
|
; AVX2-LABEL: cvt_4f64_to_8i16_zero:
|
|
; AVX2: # %bb.0:
|
|
; AVX2-NEXT: subq $88, %rsp
|
|
; AVX2-NEXT: vmovupd %ymm0, (%rsp) # 32-byte Spill
|
|
; AVX2-NEXT: vshufpd {{.*#+}} xmm0 = xmm0[1,0]
|
|
; AVX2-NEXT: vzeroupper
|
|
; AVX2-NEXT: callq __truncdfhf2@PLT
|
|
; AVX2-NEXT: vmovapd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX2-NEXT: vmovups (%rsp), %ymm0 # 32-byte Reload
|
|
; AVX2-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
|
|
; AVX2-NEXT: vzeroupper
|
|
; AVX2-NEXT: callq __truncdfhf2@PLT
|
|
; AVX2-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX2-NEXT: vmovupd (%rsp), %ymm0 # 32-byte Reload
|
|
; AVX2-NEXT: vextractf128 $1, %ymm0, %xmm0
|
|
; AVX2-NEXT: vmovapd %xmm0, (%rsp) # 16-byte Spill
|
|
; AVX2-NEXT: vshufpd {{.*#+}} xmm0 = xmm0[1,0]
|
|
; AVX2-NEXT: vzeroupper
|
|
; AVX2-NEXT: callq __truncdfhf2@PLT
|
|
; AVX2-NEXT: vmovapd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX2-NEXT: vmovdqa (%rsp), %xmm0 # 16-byte Reload
|
|
; AVX2-NEXT: callq __truncdfhf2@PLT
|
|
; AVX2-NEXT: vpunpcklwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
|
|
; AVX2-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3]
|
|
; AVX2-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
|
|
; AVX2-NEXT: vpunpcklwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm1 # 16-byte Folded Reload
|
|
; AVX2-NEXT: # xmm1 = xmm1[0],mem[0],xmm1[1],mem[1],xmm1[2],mem[2],xmm1[3],mem[3]
|
|
; AVX2-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0],xmm0[0],zero,zero
|
|
; AVX2-NEXT: addq $88, %rsp
|
|
; AVX2-NEXT: retq
|
|
;
|
|
; F16C-LABEL: cvt_4f64_to_8i16_zero:
|
|
; F16C: # %bb.0:
|
|
; F16C-NEXT: subq $72, %rsp
|
|
; F16C-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
|
|
; F16C-NEXT: vextractf128 $1, %ymm0, %xmm0
|
|
; F16C-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
|
|
; F16C-NEXT: vzeroupper
|
|
; F16C-NEXT: callq __truncdfhf2@PLT
|
|
; F16C-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; F16C-NEXT: vpermilpd $1, (%rsp), %xmm0 # 16-byte Folded Reload
|
|
; F16C-NEXT: # xmm0 = mem[1,0]
|
|
; F16C-NEXT: callq __truncdfhf2@PLT
|
|
; F16C-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
|
|
; F16C-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
|
|
; F16C-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; F16C-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
|
|
; F16C-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
|
|
; F16C-NEXT: vzeroupper
|
|
; F16C-NEXT: callq __truncdfhf2@PLT
|
|
; F16C-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
|
|
; F16C-NEXT: vpermilpd $1, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
|
|
; F16C-NEXT: # xmm0 = mem[1,0]
|
|
; F16C-NEXT: callq __truncdfhf2@PLT
|
|
; F16C-NEXT: vmovdqa (%rsp), %xmm1 # 16-byte Reload
|
|
; F16C-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
|
|
; F16C-NEXT: vinsertps $28, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
|
|
; F16C-NEXT: # xmm0 = xmm0[0],mem[0],zero,zero
|
|
; F16C-NEXT: addq $72, %rsp
|
|
; F16C-NEXT: retq
|
|
;
|
|
; AVX512-LABEL: cvt_4f64_to_8i16_zero:
|
|
; AVX512: # %bb.0:
|
|
; AVX512-NEXT: subq $72, %rsp
|
|
; AVX512-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
|
|
; AVX512-NEXT: vextractf128 $1, %ymm0, %xmm0
|
|
; AVX512-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
|
|
; AVX512-NEXT: vzeroupper
|
|
; AVX512-NEXT: callq __truncdfhf2@PLT
|
|
; AVX512-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX512-NEXT: vpermilpd $1, (%rsp), %xmm0 # 16-byte Folded Reload
|
|
; AVX512-NEXT: # xmm0 = mem[1,0]
|
|
; AVX512-NEXT: callq __truncdfhf2@PLT
|
|
; AVX512-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
|
|
; AVX512-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
|
|
; AVX512-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX512-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
|
|
; AVX512-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
|
|
; AVX512-NEXT: vzeroupper
|
|
; AVX512-NEXT: callq __truncdfhf2@PLT
|
|
; AVX512-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
|
|
; AVX512-NEXT: vpermilpd $1, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
|
|
; AVX512-NEXT: # xmm0 = mem[1,0]
|
|
; AVX512-NEXT: callq __truncdfhf2@PLT
|
|
; AVX512-NEXT: vmovdqa (%rsp), %xmm1 # 16-byte Reload
|
|
; AVX512-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
|
|
; AVX512-NEXT: vinsertps $28, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
|
|
; AVX512-NEXT: # xmm0 = xmm0[0],mem[0],zero,zero
|
|
; AVX512-NEXT: addq $72, %rsp
|
|
; AVX512-NEXT: retq
|
|
%1 = fptrunc <4 x double> %a0 to <4 x half>
|
|
%2 = bitcast <4 x half> %1 to <4 x i16>
|
|
%3 = shufflevector <4 x i16> %2, <4 x i16> zeroinitializer, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
|
|
ret <8 x i16> %3
|
|
}
|
|
|
|
define <8 x i16> @cvt_8f64_to_8i16(<8 x double> %a0) nounwind {
|
|
; AVX-LABEL: cvt_8f64_to_8i16:
|
|
; AVX: # %bb.0:
|
|
; AVX-NEXT: subq $104, %rsp
|
|
; AVX-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
|
|
; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
|
|
; AVX-NEXT: vextractf128 $1, %ymm1, %xmm0
|
|
; AVX-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
|
|
; AVX-NEXT: vzeroupper
|
|
; AVX-NEXT: callq __truncdfhf2@PLT
|
|
; AVX-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX-NEXT: vpermilpd $1, (%rsp), %xmm0 # 16-byte Folded Reload
|
|
; AVX-NEXT: # xmm0 = mem[1,0]
|
|
; AVX-NEXT: callq __truncdfhf2@PLT
|
|
; AVX-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
|
|
; AVX-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
|
|
; AVX-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
|
|
; AVX-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
|
|
; AVX-NEXT: vzeroupper
|
|
; AVX-NEXT: callq __truncdfhf2@PLT
|
|
; AVX-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
|
|
; AVX-NEXT: vpermilpd $1, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
|
|
; AVX-NEXT: # xmm0 = mem[1,0]
|
|
; AVX-NEXT: callq __truncdfhf2@PLT
|
|
; AVX-NEXT: vmovdqa (%rsp), %xmm1 # 16-byte Reload
|
|
; AVX-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
|
|
; AVX-NEXT: vpunpckldq {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
|
|
; AVX-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1]
|
|
; AVX-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
|
|
; AVX-NEXT: vextractf128 $1, %ymm0, %xmm0
|
|
; AVX-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
|
|
; AVX-NEXT: vzeroupper
|
|
; AVX-NEXT: callq __truncdfhf2@PLT
|
|
; AVX-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX-NEXT: vpermilpd $1, (%rsp), %xmm0 # 16-byte Folded Reload
|
|
; AVX-NEXT: # xmm0 = mem[1,0]
|
|
; AVX-NEXT: callq __truncdfhf2@PLT
|
|
; AVX-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
|
|
; AVX-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
|
|
; AVX-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
|
|
; AVX-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
|
|
; AVX-NEXT: vzeroupper
|
|
; AVX-NEXT: callq __truncdfhf2@PLT
|
|
; AVX-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
|
|
; AVX-NEXT: vpermilpd $1, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
|
|
; AVX-NEXT: # xmm0 = mem[1,0]
|
|
; AVX-NEXT: callq __truncdfhf2@PLT
|
|
; AVX-NEXT: vmovdqa (%rsp), %xmm1 # 16-byte Reload
|
|
; AVX-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
|
|
; AVX-NEXT: vpunpckldq {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
|
|
; AVX-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1]
|
|
; AVX-NEXT: vpunpcklqdq {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
|
|
; AVX-NEXT: # xmm0 = xmm0[0],mem[0]
|
|
; AVX-NEXT: addq $104, %rsp
|
|
; AVX-NEXT: retq
|
|
;
|
|
; F16C-LABEL: cvt_8f64_to_8i16:
|
|
; F16C: # %bb.0:
|
|
; F16C-NEXT: subq $104, %rsp
|
|
; F16C-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
|
|
; F16C-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
|
|
; F16C-NEXT: vextractf128 $1, %ymm1, %xmm0
|
|
; F16C-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
|
|
; F16C-NEXT: vzeroupper
|
|
; F16C-NEXT: callq __truncdfhf2@PLT
|
|
; F16C-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; F16C-NEXT: vpermilpd $1, (%rsp), %xmm0 # 16-byte Folded Reload
|
|
; F16C-NEXT: # xmm0 = mem[1,0]
|
|
; F16C-NEXT: callq __truncdfhf2@PLT
|
|
; F16C-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
|
|
; F16C-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
|
|
; F16C-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; F16C-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
|
|
; F16C-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
|
|
; F16C-NEXT: vzeroupper
|
|
; F16C-NEXT: callq __truncdfhf2@PLT
|
|
; F16C-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
|
|
; F16C-NEXT: vpermilpd $1, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
|
|
; F16C-NEXT: # xmm0 = mem[1,0]
|
|
; F16C-NEXT: callq __truncdfhf2@PLT
|
|
; F16C-NEXT: vmovdqa (%rsp), %xmm1 # 16-byte Reload
|
|
; F16C-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
|
|
; F16C-NEXT: vpunpckldq {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
|
|
; F16C-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1]
|
|
; F16C-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; F16C-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
|
|
; F16C-NEXT: vextractf128 $1, %ymm0, %xmm0
|
|
; F16C-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
|
|
; F16C-NEXT: vzeroupper
|
|
; F16C-NEXT: callq __truncdfhf2@PLT
|
|
; F16C-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; F16C-NEXT: vpermilpd $1, (%rsp), %xmm0 # 16-byte Folded Reload
|
|
; F16C-NEXT: # xmm0 = mem[1,0]
|
|
; F16C-NEXT: callq __truncdfhf2@PLT
|
|
; F16C-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
|
|
; F16C-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
|
|
; F16C-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; F16C-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
|
|
; F16C-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
|
|
; F16C-NEXT: vzeroupper
|
|
; F16C-NEXT: callq __truncdfhf2@PLT
|
|
; F16C-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
|
|
; F16C-NEXT: vpermilpd $1, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
|
|
; F16C-NEXT: # xmm0 = mem[1,0]
|
|
; F16C-NEXT: callq __truncdfhf2@PLT
|
|
; F16C-NEXT: vmovdqa (%rsp), %xmm1 # 16-byte Reload
|
|
; F16C-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
|
|
; F16C-NEXT: vpunpckldq {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
|
|
; F16C-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1]
|
|
; F16C-NEXT: vpunpcklqdq {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
|
|
; F16C-NEXT: # xmm0 = xmm0[0],mem[0]
|
|
; F16C-NEXT: addq $104, %rsp
|
|
; F16C-NEXT: retq
|
|
;
|
|
; AVX512-LABEL: cvt_8f64_to_8i16:
|
|
; AVX512: # %bb.0:
|
|
; AVX512-NEXT: subq $120, %rsp
|
|
; AVX512-NEXT: vmovups %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
|
|
; AVX512-NEXT: vextractf32x4 $3, %zmm0, %xmm0
|
|
; AVX512-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
|
|
; AVX512-NEXT: vzeroupper
|
|
; AVX512-NEXT: callq __truncdfhf2@PLT
|
|
; AVX512-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX512-NEXT: vpermilpd $1, (%rsp), %xmm0 # 16-byte Folded Reload
|
|
; AVX512-NEXT: # xmm0 = mem[1,0]
|
|
; AVX512-NEXT: callq __truncdfhf2@PLT
|
|
; AVX512-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
|
|
; AVX512-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
|
|
; AVX512-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX512-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
|
|
; AVX512-NEXT: vextractf32x4 $2, %zmm0, %xmm0
|
|
; AVX512-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX512-NEXT: vzeroupper
|
|
; AVX512-NEXT: callq __truncdfhf2@PLT
|
|
; AVX512-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
|
|
; AVX512-NEXT: vpermilpd $1, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
|
|
; AVX512-NEXT: # xmm0 = mem[1,0]
|
|
; AVX512-NEXT: callq __truncdfhf2@PLT
|
|
; AVX512-NEXT: vmovdqa (%rsp), %xmm1 # 16-byte Reload
|
|
; AVX512-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
|
|
; AVX512-NEXT: vpunpckldq {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
|
|
; AVX512-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1]
|
|
; AVX512-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX512-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
|
|
; AVX512-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
|
|
; AVX512-NEXT: vzeroupper
|
|
; AVX512-NEXT: callq __truncdfhf2@PLT
|
|
; AVX512-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
|
|
; AVX512-NEXT: vpermilpd $1, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
|
|
; AVX512-NEXT: # xmm0 = mem[1,0]
|
|
; AVX512-NEXT: callq __truncdfhf2@PLT
|
|
; AVX512-NEXT: vmovdqa (%rsp), %xmm1 # 16-byte Reload
|
|
; AVX512-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
|
|
; AVX512-NEXT: vmovdqa %xmm0, (%rsp) # 16-byte Spill
|
|
; AVX512-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
|
|
; AVX512-NEXT: vextractf128 $1, %ymm0, %xmm0
|
|
; AVX512-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX512-NEXT: vzeroupper
|
|
; AVX512-NEXT: callq __truncdfhf2@PLT
|
|
; AVX512-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX512-NEXT: vpermilpd $1, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
|
|
; AVX512-NEXT: # xmm0 = mem[1,0]
|
|
; AVX512-NEXT: callq __truncdfhf2@PLT
|
|
; AVX512-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
|
|
; AVX512-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
|
|
; AVX512-NEXT: vmovdqa (%rsp), %xmm1 # 16-byte Reload
|
|
; AVX512-NEXT: vpunpckldq {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
|
|
; AVX512-NEXT: vpunpcklqdq {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
|
|
; AVX512-NEXT: # xmm0 = xmm0[0],mem[0]
|
|
; AVX512-NEXT: addq $120, %rsp
|
|
; AVX512-NEXT: retq
|
|
%1 = fptrunc <8 x double> %a0 to <8 x half>
|
|
%2 = bitcast <8 x half> %1 to <8 x i16>
|
|
ret <8 x i16> %2
|
|
}
|
|
|
|
;
|
|
; Double to Half (Store)
|
|
;
|
|
|
|
define void @store_cvt_f64_to_i16(double %a0, ptr %a1) nounwind {
|
|
; ALL-LABEL: store_cvt_f64_to_i16:
|
|
; ALL: # %bb.0:
|
|
; ALL-NEXT: pushq %rbx
|
|
; ALL-NEXT: movq %rdi, %rbx
|
|
; ALL-NEXT: callq __truncdfhf2@PLT
|
|
; ALL-NEXT: vpextrw $0, %xmm0, (%rbx)
|
|
; ALL-NEXT: popq %rbx
|
|
; ALL-NEXT: retq
|
|
; AVX-LABEL: store_cvt_f64_to_i16:
|
|
; AVX: # %bb.0:
|
|
; AVX-NEXT: pushq %rbx
|
|
; AVX-NEXT: movq %rdi, %rbx
|
|
; AVX-NEXT: callq __truncdfhf2@PLT
|
|
; AVX-NEXT: vpextrw $0, %xmm0, (%rbx)
|
|
; AVX-NEXT: popq %rbx
|
|
; AVX-NEXT: retq
|
|
;
|
|
; F16C-LABEL: store_cvt_f64_to_i16:
|
|
; F16C: # %bb.0:
|
|
; F16C-NEXT: pushq %rbx
|
|
; F16C-NEXT: movq %rdi, %rbx
|
|
; F16C-NEXT: callq __truncdfhf2@PLT
|
|
; F16C-NEXT: vpextrw $0, %xmm0, (%rbx)
|
|
; F16C-NEXT: popq %rbx
|
|
; F16C-NEXT: retq
|
|
;
|
|
; AVX512-LABEL: store_cvt_f64_to_i16:
|
|
; AVX512: # %bb.0:
|
|
; AVX512-NEXT: pushq %rbx
|
|
; AVX512-NEXT: movq %rdi, %rbx
|
|
; AVX512-NEXT: callq __truncdfhf2@PLT
|
|
; AVX512-NEXT: vpextrw $0, %xmm0, (%rbx)
|
|
; AVX512-NEXT: popq %rbx
|
|
; AVX512-NEXT: retq
|
|
%1 = fptrunc double %a0 to half
|
|
%2 = bitcast half %1 to i16
|
|
store i16 %2, ptr %a1
|
|
ret void
|
|
}
|
|
|
|
define void @store_cvt_2f64_to_2i16(<2 x double> %a0, ptr %a1) nounwind {
|
|
; AVX-LABEL: store_cvt_2f64_to_2i16:
|
|
; AVX: # %bb.0:
|
|
; AVX-NEXT: pushq %rbx
|
|
; AVX-NEXT: subq $32, %rsp
|
|
; AVX-NEXT: movq %rdi, %rbx
|
|
; AVX-NEXT: vmovapd %xmm0, (%rsp) # 16-byte Spill
|
|
; AVX-NEXT: vshufpd {{.*#+}} xmm0 = xmm0[1,0]
|
|
; AVX-NEXT: callq __truncdfhf2@PLT
|
|
; AVX-NEXT: vmovapd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX-NEXT: vmovdqa (%rsp), %xmm0 # 16-byte Reload
|
|
; AVX-NEXT: callq __truncdfhf2@PLT
|
|
; AVX-NEXT: vpextrw $0, %xmm0, (%rbx)
|
|
; AVX-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
|
|
; AVX-NEXT: vpextrw $0, %xmm0, 2(%rbx)
|
|
; AVX-NEXT: addq $32, %rsp
|
|
; AVX-NEXT: popq %rbx
|
|
; AVX-NEXT: retq
|
|
;
|
|
; F16C-LABEL: store_cvt_2f64_to_2i16:
|
|
; F16C: # %bb.0:
|
|
; F16C-NEXT: pushq %rbx
|
|
; F16C-NEXT: subq $32, %rsp
|
|
; F16C-NEXT: movq %rdi, %rbx
|
|
; F16C-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
|
|
; F16C-NEXT: callq __truncdfhf2@PLT
|
|
; F16C-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; F16C-NEXT: vpermilpd $1, (%rsp), %xmm0 # 16-byte Folded Reload
|
|
; F16C-NEXT: # xmm0 = mem[1,0]
|
|
; F16C-NEXT: callq __truncdfhf2@PLT
|
|
; F16C-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
|
|
; F16C-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
|
|
; F16C-NEXT: vmovd %xmm0, (%rbx)
|
|
; F16C-NEXT: addq $32, %rsp
|
|
; F16C-NEXT: popq %rbx
|
|
; F16C-NEXT: retq
|
|
;
|
|
; AVX512-LABEL: store_cvt_2f64_to_2i16:
|
|
; AVX512: # %bb.0:
|
|
; AVX512-NEXT: pushq %rbx
|
|
; AVX512-NEXT: subq $32, %rsp
|
|
; AVX512-NEXT: movq %rdi, %rbx
|
|
; AVX512-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
|
|
; AVX512-NEXT: callq __truncdfhf2@PLT
|
|
; AVX512-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX512-NEXT: vpermilpd $1, (%rsp), %xmm0 # 16-byte Folded Reload
|
|
; AVX512-NEXT: # xmm0 = mem[1,0]
|
|
; AVX512-NEXT: callq __truncdfhf2@PLT
|
|
; AVX512-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
|
|
; AVX512-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
|
|
; AVX512-NEXT: vmovd %xmm0, (%rbx)
|
|
; AVX512-NEXT: addq $32, %rsp
|
|
; AVX512-NEXT: popq %rbx
|
|
; AVX512-NEXT: retq
|
|
%1 = fptrunc <2 x double> %a0 to <2 x half>
|
|
%2 = bitcast <2 x half> %1 to <2 x i16>
|
|
store <2 x i16> %2, ptr %a1
|
|
ret void
|
|
}
|
|
|
|
define void @store_cvt_4f64_to_4i16(<4 x double> %a0, ptr %a1) nounwind {
|
|
; AVX1-LABEL: store_cvt_4f64_to_4i16:
|
|
; AVX1: # %bb.0:
|
|
; AVX1-NEXT: pushq %rbx
|
|
; AVX1-NEXT: subq $80, %rsp
|
|
; AVX1-NEXT: movq %rdi, %rbx
|
|
; AVX1-NEXT: vmovupd %ymm0, (%rsp) # 32-byte Spill
|
|
; AVX1-NEXT: vshufpd {{.*#+}} xmm0 = xmm0[1,0]
|
|
; AVX1-NEXT: vzeroupper
|
|
; AVX1-NEXT: callq __truncdfhf2@PLT
|
|
; AVX1-NEXT: vmovapd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX1-NEXT: vmovups (%rsp), %ymm0 # 32-byte Reload
|
|
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
|
|
; AVX1-NEXT: vmovapd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX1-NEXT: vshufpd {{.*#+}} xmm0 = xmm0[1,0]
|
|
; AVX1-NEXT: vzeroupper
|
|
; AVX1-NEXT: callq __truncdfhf2@PLT
|
|
; AVX1-NEXT: vmovapd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX1-NEXT: vmovups (%rsp), %ymm0 # 32-byte Reload
|
|
; AVX1-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
|
|
; AVX1-NEXT: vzeroupper
|
|
; AVX1-NEXT: callq __truncdfhf2@PLT
|
|
; AVX1-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
|
|
; AVX1-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
|
|
; AVX1-NEXT: callq __truncdfhf2@PLT
|
|
; AVX1-NEXT: vpextrw $0, %xmm0, 4(%rbx)
|
|
; AVX1-NEXT: vmovdqa (%rsp), %xmm0 # 16-byte Reload
|
|
; AVX1-NEXT: vpextrw $0, %xmm0, (%rbx)
|
|
; AVX1-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
|
|
; AVX1-NEXT: vpextrw $0, %xmm0, 6(%rbx)
|
|
; AVX1-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
|
|
; AVX1-NEXT: vpextrw $0, %xmm0, 2(%rbx)
|
|
; AVX1-NEXT: addq $80, %rsp
|
|
; AVX1-NEXT: popq %rbx
|
|
; AVX1-NEXT: retq
|
|
;
|
|
; AVX2-LABEL: store_cvt_4f64_to_4i16:
|
|
; AVX2: # %bb.0:
|
|
; AVX2-NEXT: pushq %rbx
|
|
; AVX2-NEXT: subq $80, %rsp
|
|
; AVX2-NEXT: movq %rdi, %rbx
|
|
; AVX2-NEXT: vmovupd %ymm0, (%rsp) # 32-byte Spill
|
|
; AVX2-NEXT: vshufpd {{.*#+}} xmm0 = xmm0[1,0]
|
|
; AVX2-NEXT: vzeroupper
|
|
; AVX2-NEXT: callq __truncdfhf2@PLT
|
|
; AVX2-NEXT: vmovapd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX2-NEXT: vmovupd (%rsp), %ymm0 # 32-byte Reload
|
|
; AVX2-NEXT: vextractf128 $1, %ymm0, %xmm0
|
|
; AVX2-NEXT: vmovapd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX2-NEXT: vshufpd {{.*#+}} xmm0 = xmm0[1,0]
|
|
; AVX2-NEXT: vzeroupper
|
|
; AVX2-NEXT: callq __truncdfhf2@PLT
|
|
; AVX2-NEXT: vmovapd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX2-NEXT: vmovups (%rsp), %ymm0 # 32-byte Reload
|
|
; AVX2-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
|
|
; AVX2-NEXT: vzeroupper
|
|
; AVX2-NEXT: callq __truncdfhf2@PLT
|
|
; AVX2-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
|
|
; AVX2-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
|
|
; AVX2-NEXT: callq __truncdfhf2@PLT
|
|
; AVX2-NEXT: vpextrw $0, %xmm0, 4(%rbx)
|
|
; AVX2-NEXT: vmovdqa (%rsp), %xmm0 # 16-byte Reload
|
|
; AVX2-NEXT: vpextrw $0, %xmm0, (%rbx)
|
|
; AVX2-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
|
|
; AVX2-NEXT: vpextrw $0, %xmm0, 6(%rbx)
|
|
; AVX2-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
|
|
; AVX2-NEXT: vpextrw $0, %xmm0, 2(%rbx)
|
|
; AVX2-NEXT: addq $80, %rsp
|
|
; AVX2-NEXT: popq %rbx
|
|
; AVX2-NEXT: retq
|
|
;
|
|
; F16C-LABEL: store_cvt_4f64_to_4i16:
|
|
; F16C: # %bb.0:
|
|
; F16C-NEXT: pushq %rbx
|
|
; F16C-NEXT: subq $64, %rsp
|
|
; F16C-NEXT: movq %rdi, %rbx
|
|
; F16C-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
|
|
; F16C-NEXT: vextractf128 $1, %ymm0, %xmm0
|
|
; F16C-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
|
|
; F16C-NEXT: vzeroupper
|
|
; F16C-NEXT: callq __truncdfhf2@PLT
|
|
; F16C-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; F16C-NEXT: vpermilpd $1, (%rsp), %xmm0 # 16-byte Folded Reload
|
|
; F16C-NEXT: # xmm0 = mem[1,0]
|
|
; F16C-NEXT: callq __truncdfhf2@PLT
|
|
; F16C-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
|
|
; F16C-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
|
|
; F16C-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; F16C-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
|
|
; F16C-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
|
|
; F16C-NEXT: vzeroupper
|
|
; F16C-NEXT: callq __truncdfhf2@PLT
|
|
; F16C-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
|
|
; F16C-NEXT: vpermilpd $1, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
|
|
; F16C-NEXT: # xmm0 = mem[1,0]
|
|
; F16C-NEXT: callq __truncdfhf2@PLT
|
|
; F16C-NEXT: vmovdqa (%rsp), %xmm1 # 16-byte Reload
|
|
; F16C-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
|
|
; F16C-NEXT: vpunpckldq {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
|
|
; F16C-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1]
|
|
; F16C-NEXT: vmovq %xmm0, (%rbx)
|
|
; F16C-NEXT: addq $64, %rsp
|
|
; F16C-NEXT: popq %rbx
|
|
; F16C-NEXT: retq
|
|
;
|
|
; AVX512-LABEL: store_cvt_4f64_to_4i16:
|
|
; AVX512: # %bb.0:
|
|
; AVX512-NEXT: pushq %rbx
|
|
; AVX512-NEXT: subq $64, %rsp
|
|
; AVX512-NEXT: movq %rdi, %rbx
|
|
; AVX512-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
|
|
; AVX512-NEXT: vextractf128 $1, %ymm0, %xmm0
|
|
; AVX512-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
|
|
; AVX512-NEXT: vzeroupper
|
|
; AVX512-NEXT: callq __truncdfhf2@PLT
|
|
; AVX512-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX512-NEXT: vpermilpd $1, (%rsp), %xmm0 # 16-byte Folded Reload
|
|
; AVX512-NEXT: # xmm0 = mem[1,0]
|
|
; AVX512-NEXT: callq __truncdfhf2@PLT
|
|
; AVX512-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
|
|
; AVX512-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
|
|
; AVX512-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX512-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
|
|
; AVX512-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
|
|
; AVX512-NEXT: vzeroupper
|
|
; AVX512-NEXT: callq __truncdfhf2@PLT
|
|
; AVX512-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
|
|
; AVX512-NEXT: vpermilpd $1, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
|
|
; AVX512-NEXT: # xmm0 = mem[1,0]
|
|
; AVX512-NEXT: callq __truncdfhf2@PLT
|
|
; AVX512-NEXT: vmovdqa (%rsp), %xmm1 # 16-byte Reload
|
|
; AVX512-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
|
|
; AVX512-NEXT: vpunpckldq {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
|
|
; AVX512-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1]
|
|
; AVX512-NEXT: vmovq %xmm0, (%rbx)
|
|
; AVX512-NEXT: addq $64, %rsp
|
|
; AVX512-NEXT: popq %rbx
|
|
; AVX512-NEXT: retq
|
|
%1 = fptrunc <4 x double> %a0 to <4 x half>
|
|
%2 = bitcast <4 x half> %1 to <4 x i16>
|
|
store <4 x i16> %2, ptr %a1
|
|
ret void
|
|
}
|
|
|
|
define void @store_cvt_4f64_to_8i16_undef(<4 x double> %a0, ptr %a1) nounwind {
|
|
; AVX1-LABEL: store_cvt_4f64_to_8i16_undef:
|
|
; AVX1: # %bb.0:
|
|
; AVX1-NEXT: pushq %rbx
|
|
; AVX1-NEXT: subq $80, %rsp
|
|
; AVX1-NEXT: movq %rdi, %rbx
|
|
; AVX1-NEXT: vmovupd %ymm0, (%rsp) # 32-byte Spill
|
|
; AVX1-NEXT: vshufpd {{.*#+}} xmm0 = xmm0[1,0]
|
|
; AVX1-NEXT: vzeroupper
|
|
; AVX1-NEXT: callq __truncdfhf2@PLT
|
|
; AVX1-NEXT: vmovapd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX1-NEXT: vmovups (%rsp), %ymm0 # 32-byte Reload
|
|
; AVX1-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
|
|
; AVX1-NEXT: vzeroupper
|
|
; AVX1-NEXT: callq __truncdfhf2@PLT
|
|
; AVX1-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX1-NEXT: vmovups (%rsp), %ymm0 # 32-byte Reload
|
|
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
|
|
; AVX1-NEXT: vmovapd %xmm0, (%rsp) # 16-byte Spill
|
|
; AVX1-NEXT: vshufpd {{.*#+}} xmm0 = xmm0[1,0]
|
|
; AVX1-NEXT: vzeroupper
|
|
; AVX1-NEXT: callq __truncdfhf2@PLT
|
|
; AVX1-NEXT: vmovapd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX1-NEXT: vmovdqa (%rsp), %xmm0 # 16-byte Reload
|
|
; AVX1-NEXT: callq __truncdfhf2@PLT
|
|
; AVX1-NEXT: vpunpcklwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
|
|
; AVX1-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3]
|
|
; AVX1-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
|
|
; AVX1-NEXT: vpunpcklwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm1 # 16-byte Folded Reload
|
|
; AVX1-NEXT: # xmm1 = xmm1[0],mem[0],xmm1[1],mem[1],xmm1[2],mem[2],xmm1[3],mem[3]
|
|
; AVX1-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0],xmm0[0],zero,zero
|
|
; AVX1-NEXT: vmovaps %xmm0, (%rbx)
|
|
; AVX1-NEXT: addq $80, %rsp
|
|
; AVX1-NEXT: popq %rbx
|
|
; AVX1-NEXT: retq
|
|
;
|
|
; AVX2-LABEL: store_cvt_4f64_to_8i16_undef:
|
|
; AVX2: # %bb.0:
|
|
; AVX2-NEXT: pushq %rbx
|
|
; AVX2-NEXT: subq $80, %rsp
|
|
; AVX2-NEXT: movq %rdi, %rbx
|
|
; AVX2-NEXT: vmovupd %ymm0, (%rsp) # 32-byte Spill
|
|
; AVX2-NEXT: vshufpd {{.*#+}} xmm0 = xmm0[1,0]
|
|
; AVX2-NEXT: vzeroupper
|
|
; AVX2-NEXT: callq __truncdfhf2@PLT
|
|
; AVX2-NEXT: vmovapd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX2-NEXT: vmovups (%rsp), %ymm0 # 32-byte Reload
|
|
; AVX2-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
|
|
; AVX2-NEXT: vzeroupper
|
|
; AVX2-NEXT: callq __truncdfhf2@PLT
|
|
; AVX2-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX2-NEXT: vmovupd (%rsp), %ymm0 # 32-byte Reload
|
|
; AVX2-NEXT: vextractf128 $1, %ymm0, %xmm0
|
|
; AVX2-NEXT: vmovapd %xmm0, (%rsp) # 16-byte Spill
|
|
; AVX2-NEXT: vshufpd {{.*#+}} xmm0 = xmm0[1,0]
|
|
; AVX2-NEXT: vzeroupper
|
|
; AVX2-NEXT: callq __truncdfhf2@PLT
|
|
; AVX2-NEXT: vmovapd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX2-NEXT: vmovdqa (%rsp), %xmm0 # 16-byte Reload
|
|
; AVX2-NEXT: callq __truncdfhf2@PLT
|
|
; AVX2-NEXT: vpunpcklwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
|
|
; AVX2-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3]
|
|
; AVX2-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
|
|
; AVX2-NEXT: vpunpcklwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm1 # 16-byte Folded Reload
|
|
; AVX2-NEXT: # xmm1 = xmm1[0],mem[0],xmm1[1],mem[1],xmm1[2],mem[2],xmm1[3],mem[3]
|
|
; AVX2-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0],xmm0[0],zero,zero
|
|
; AVX2-NEXT: vmovaps %xmm0, (%rbx)
|
|
; AVX2-NEXT: addq $80, %rsp
|
|
; AVX2-NEXT: popq %rbx
|
|
; AVX2-NEXT: retq
|
|
;
|
|
; F16C-LABEL: store_cvt_4f64_to_8i16_undef:
|
|
; F16C: # %bb.0:
|
|
; F16C-NEXT: pushq %rbx
|
|
; F16C-NEXT: subq $64, %rsp
|
|
; F16C-NEXT: movq %rdi, %rbx
|
|
; F16C-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
|
|
; F16C-NEXT: vextractf128 $1, %ymm0, %xmm0
|
|
; F16C-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
|
|
; F16C-NEXT: vzeroupper
|
|
; F16C-NEXT: callq __truncdfhf2@PLT
|
|
; F16C-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; F16C-NEXT: vpermilpd $1, (%rsp), %xmm0 # 16-byte Folded Reload
|
|
; F16C-NEXT: # xmm0 = mem[1,0]
|
|
; F16C-NEXT: callq __truncdfhf2@PLT
|
|
; F16C-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
|
|
; F16C-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
|
|
; F16C-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; F16C-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
|
|
; F16C-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
|
|
; F16C-NEXT: vzeroupper
|
|
; F16C-NEXT: callq __truncdfhf2@PLT
|
|
; F16C-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
|
|
; F16C-NEXT: vpermilpd $1, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
|
|
; F16C-NEXT: # xmm0 = mem[1,0]
|
|
; F16C-NEXT: callq __truncdfhf2@PLT
|
|
; F16C-NEXT: vmovdqa (%rsp), %xmm1 # 16-byte Reload
|
|
; F16C-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
|
|
; F16C-NEXT: vinsertps $28, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
|
|
; F16C-NEXT: # xmm0 = xmm0[0],mem[0],zero,zero
|
|
; F16C-NEXT: vmovaps %xmm0, (%rbx)
|
|
; F16C-NEXT: addq $64, %rsp
|
|
; F16C-NEXT: popq %rbx
|
|
; F16C-NEXT: retq
|
|
;
|
|
; AVX512-LABEL: store_cvt_4f64_to_8i16_undef:
|
|
; AVX512: # %bb.0:
|
|
; AVX512-NEXT: pushq %rbx
|
|
; AVX512-NEXT: subq $64, %rsp
|
|
; AVX512-NEXT: movq %rdi, %rbx
|
|
; AVX512-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
|
|
; AVX512-NEXT: vextractf128 $1, %ymm0, %xmm0
|
|
; AVX512-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
|
|
; AVX512-NEXT: vzeroupper
|
|
; AVX512-NEXT: callq __truncdfhf2@PLT
|
|
; AVX512-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX512-NEXT: vpermilpd $1, (%rsp), %xmm0 # 16-byte Folded Reload
|
|
; AVX512-NEXT: # xmm0 = mem[1,0]
|
|
; AVX512-NEXT: callq __truncdfhf2@PLT
|
|
; AVX512-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
|
|
; AVX512-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
|
|
; AVX512-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX512-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
|
|
; AVX512-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
|
|
; AVX512-NEXT: vzeroupper
|
|
; AVX512-NEXT: callq __truncdfhf2@PLT
|
|
; AVX512-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
|
|
; AVX512-NEXT: vpermilpd $1, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
|
|
; AVX512-NEXT: # xmm0 = mem[1,0]
|
|
; AVX512-NEXT: callq __truncdfhf2@PLT
|
|
; AVX512-NEXT: vmovdqa (%rsp), %xmm1 # 16-byte Reload
|
|
; AVX512-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
|
|
; AVX512-NEXT: vpunpckldq {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
|
|
; AVX512-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1]
|
|
; AVX512-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX512-NEXT: callq __truncdfhf2@PLT
|
|
; AVX512-NEXT: vpbroadcastw %xmm0, %xmm0
|
|
; AVX512-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
|
|
; AVX512-NEXT: vshufps {{.*#+}} xmm0 = xmm1[0,1],xmm0[0,0]
|
|
; AVX512-NEXT: vmovaps %xmm0, (%rbx)
|
|
; AVX512-NEXT: addq $64, %rsp
|
|
; AVX512-NEXT: popq %rbx
|
|
; AVX512-NEXT: retq
|
|
%1 = fptrunc <4 x double> %a0 to <4 x half>
|
|
%2 = bitcast <4 x half> %1 to <4 x i16>
|
|
%3 = shufflevector <4 x i16> %2, <4 x i16> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
|
|
store <8 x i16> %3, ptr %a1
|
|
ret void
|
|
}
|
|
|
|
define void @store_cvt_4f64_to_8i16_zero(<4 x double> %a0, ptr %a1) nounwind {
|
|
; AVX1-LABEL: store_cvt_4f64_to_8i16_zero:
|
|
; AVX1: # %bb.0:
|
|
; AVX1-NEXT: pushq %rbx
|
|
; AVX1-NEXT: subq $80, %rsp
|
|
; AVX1-NEXT: movq %rdi, %rbx
|
|
; AVX1-NEXT: vmovupd %ymm0, (%rsp) # 32-byte Spill
|
|
; AVX1-NEXT: vshufpd {{.*#+}} xmm0 = xmm0[1,0]
|
|
; AVX1-NEXT: vzeroupper
|
|
; AVX1-NEXT: callq __truncdfhf2@PLT
|
|
; AVX1-NEXT: vmovapd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX1-NEXT: vmovups (%rsp), %ymm0 # 32-byte Reload
|
|
; AVX1-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
|
|
; AVX1-NEXT: vzeroupper
|
|
; AVX1-NEXT: callq __truncdfhf2@PLT
|
|
; AVX1-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX1-NEXT: vmovups (%rsp), %ymm0 # 32-byte Reload
|
|
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
|
|
; AVX1-NEXT: vmovapd %xmm0, (%rsp) # 16-byte Spill
|
|
; AVX1-NEXT: vshufpd {{.*#+}} xmm0 = xmm0[1,0]
|
|
; AVX1-NEXT: vzeroupper
|
|
; AVX1-NEXT: callq __truncdfhf2@PLT
|
|
; AVX1-NEXT: vmovapd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX1-NEXT: vmovdqa (%rsp), %xmm0 # 16-byte Reload
|
|
; AVX1-NEXT: callq __truncdfhf2@PLT
|
|
; AVX1-NEXT: vpunpcklwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
|
|
; AVX1-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3]
|
|
; AVX1-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
|
|
; AVX1-NEXT: vpunpcklwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm1 # 16-byte Folded Reload
|
|
; AVX1-NEXT: # xmm1 = xmm1[0],mem[0],xmm1[1],mem[1],xmm1[2],mem[2],xmm1[3],mem[3]
|
|
; AVX1-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0],xmm0[0],zero,zero
|
|
; AVX1-NEXT: vmovaps %xmm0, (%rbx)
|
|
; AVX1-NEXT: addq $80, %rsp
|
|
; AVX1-NEXT: popq %rbx
|
|
; AVX1-NEXT: retq
|
|
;
|
|
; AVX2-LABEL: store_cvt_4f64_to_8i16_zero:
|
|
; AVX2: # %bb.0:
|
|
; AVX2-NEXT: pushq %rbx
|
|
; AVX2-NEXT: subq $80, %rsp
|
|
; AVX2-NEXT: movq %rdi, %rbx
|
|
; AVX2-NEXT: vmovupd %ymm0, (%rsp) # 32-byte Spill
|
|
; AVX2-NEXT: vshufpd {{.*#+}} xmm0 = xmm0[1,0]
|
|
; AVX2-NEXT: vzeroupper
|
|
; AVX2-NEXT: callq __truncdfhf2@PLT
|
|
; AVX2-NEXT: vmovapd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX2-NEXT: vmovups (%rsp), %ymm0 # 32-byte Reload
|
|
; AVX2-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
|
|
; AVX2-NEXT: vzeroupper
|
|
; AVX2-NEXT: callq __truncdfhf2@PLT
|
|
; AVX2-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX2-NEXT: vmovupd (%rsp), %ymm0 # 32-byte Reload
|
|
; AVX2-NEXT: vextractf128 $1, %ymm0, %xmm0
|
|
; AVX2-NEXT: vmovapd %xmm0, (%rsp) # 16-byte Spill
|
|
; AVX2-NEXT: vshufpd {{.*#+}} xmm0 = xmm0[1,0]
|
|
; AVX2-NEXT: vzeroupper
|
|
; AVX2-NEXT: callq __truncdfhf2@PLT
|
|
; AVX2-NEXT: vmovapd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX2-NEXT: vmovdqa (%rsp), %xmm0 # 16-byte Reload
|
|
; AVX2-NEXT: callq __truncdfhf2@PLT
|
|
; AVX2-NEXT: vpunpcklwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
|
|
; AVX2-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3]
|
|
; AVX2-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
|
|
; AVX2-NEXT: vpunpcklwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm1 # 16-byte Folded Reload
|
|
; AVX2-NEXT: # xmm1 = xmm1[0],mem[0],xmm1[1],mem[1],xmm1[2],mem[2],xmm1[3],mem[3]
|
|
; AVX2-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0],xmm0[0],zero,zero
|
|
; AVX2-NEXT: vmovaps %xmm0, (%rbx)
|
|
; AVX2-NEXT: addq $80, %rsp
|
|
; AVX2-NEXT: popq %rbx
|
|
; AVX2-NEXT: retq
|
|
;
|
|
; F16C-LABEL: store_cvt_4f64_to_8i16_zero:
|
|
; F16C: # %bb.0:
|
|
; F16C-NEXT: pushq %rbx
|
|
; F16C-NEXT: subq $64, %rsp
|
|
; F16C-NEXT: movq %rdi, %rbx
|
|
; F16C-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
|
|
; F16C-NEXT: vextractf128 $1, %ymm0, %xmm0
|
|
; F16C-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
|
|
; F16C-NEXT: vzeroupper
|
|
; F16C-NEXT: callq __truncdfhf2@PLT
|
|
; F16C-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; F16C-NEXT: vpermilpd $1, (%rsp), %xmm0 # 16-byte Folded Reload
|
|
; F16C-NEXT: # xmm0 = mem[1,0]
|
|
; F16C-NEXT: callq __truncdfhf2@PLT
|
|
; F16C-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
|
|
; F16C-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
|
|
; F16C-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; F16C-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
|
|
; F16C-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
|
|
; F16C-NEXT: vzeroupper
|
|
; F16C-NEXT: callq __truncdfhf2@PLT
|
|
; F16C-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
|
|
; F16C-NEXT: vpermilpd $1, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
|
|
; F16C-NEXT: # xmm0 = mem[1,0]
|
|
; F16C-NEXT: callq __truncdfhf2@PLT
|
|
; F16C-NEXT: vmovdqa (%rsp), %xmm1 # 16-byte Reload
|
|
; F16C-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
|
|
; F16C-NEXT: vinsertps $28, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
|
|
; F16C-NEXT: # xmm0 = xmm0[0],mem[0],zero,zero
|
|
; F16C-NEXT: vmovaps %xmm0, (%rbx)
|
|
; F16C-NEXT: addq $64, %rsp
|
|
; F16C-NEXT: popq %rbx
|
|
; F16C-NEXT: retq
|
|
;
|
|
; AVX512-LABEL: store_cvt_4f64_to_8i16_zero:
|
|
; AVX512: # %bb.0:
|
|
; AVX512-NEXT: pushq %rbx
|
|
; AVX512-NEXT: subq $64, %rsp
|
|
; AVX512-NEXT: movq %rdi, %rbx
|
|
; AVX512-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
|
|
; AVX512-NEXT: vextractf128 $1, %ymm0, %xmm0
|
|
; AVX512-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
|
|
; AVX512-NEXT: vzeroupper
|
|
; AVX512-NEXT: callq __truncdfhf2@PLT
|
|
; AVX512-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX512-NEXT: vpermilpd $1, (%rsp), %xmm0 # 16-byte Folded Reload
|
|
; AVX512-NEXT: # xmm0 = mem[1,0]
|
|
; AVX512-NEXT: callq __truncdfhf2@PLT
|
|
; AVX512-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
|
|
; AVX512-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
|
|
; AVX512-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX512-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
|
|
; AVX512-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
|
|
; AVX512-NEXT: vzeroupper
|
|
; AVX512-NEXT: callq __truncdfhf2@PLT
|
|
; AVX512-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
|
|
; AVX512-NEXT: vpermilpd $1, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
|
|
; AVX512-NEXT: # xmm0 = mem[1,0]
|
|
; AVX512-NEXT: callq __truncdfhf2@PLT
|
|
; AVX512-NEXT: vmovdqa (%rsp), %xmm1 # 16-byte Reload
|
|
; AVX512-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
|
|
; AVX512-NEXT: vinsertps $28, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
|
|
; AVX512-NEXT: # xmm0 = xmm0[0],mem[0],zero,zero
|
|
; AVX512-NEXT: vmovaps %xmm0, (%rbx)
|
|
; AVX512-NEXT: addq $64, %rsp
|
|
; AVX512-NEXT: popq %rbx
|
|
; AVX512-NEXT: retq
|
|
%1 = fptrunc <4 x double> %a0 to <4 x half>
|
|
%2 = bitcast <4 x half> %1 to <4 x i16>
|
|
%3 = shufflevector <4 x i16> %2, <4 x i16> zeroinitializer, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
|
|
store <8 x i16> %3, ptr %a1
|
|
ret void
|
|
}
|
|
|
|
define void @store_cvt_8f64_to_8i16(<8 x double> %a0, ptr %a1) nounwind {
|
|
; AVX-LABEL: store_cvt_8f64_to_8i16:
|
|
; AVX: # %bb.0:
|
|
; AVX-NEXT: pushq %rbx
|
|
; AVX-NEXT: subq $96, %rsp
|
|
; AVX-NEXT: movq %rdi, %rbx
|
|
; AVX-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
|
|
; AVX-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
|
|
; AVX-NEXT: vextractf128 $1, %ymm1, %xmm0
|
|
; AVX-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
|
|
; AVX-NEXT: vzeroupper
|
|
; AVX-NEXT: callq __truncdfhf2@PLT
|
|
; AVX-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX-NEXT: vpermilpd $1, (%rsp), %xmm0 # 16-byte Folded Reload
|
|
; AVX-NEXT: # xmm0 = mem[1,0]
|
|
; AVX-NEXT: callq __truncdfhf2@PLT
|
|
; AVX-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
|
|
; AVX-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
|
|
; AVX-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
|
|
; AVX-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
|
|
; AVX-NEXT: vzeroupper
|
|
; AVX-NEXT: callq __truncdfhf2@PLT
|
|
; AVX-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
|
|
; AVX-NEXT: vpermilpd $1, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
|
|
; AVX-NEXT: # xmm0 = mem[1,0]
|
|
; AVX-NEXT: callq __truncdfhf2@PLT
|
|
; AVX-NEXT: vmovdqa (%rsp), %xmm1 # 16-byte Reload
|
|
; AVX-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
|
|
; AVX-NEXT: vpunpckldq {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
|
|
; AVX-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1]
|
|
; AVX-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
|
|
; AVX-NEXT: vextractf128 $1, %ymm0, %xmm0
|
|
; AVX-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
|
|
; AVX-NEXT: vzeroupper
|
|
; AVX-NEXT: callq __truncdfhf2@PLT
|
|
; AVX-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX-NEXT: vpermilpd $1, (%rsp), %xmm0 # 16-byte Folded Reload
|
|
; AVX-NEXT: # xmm0 = mem[1,0]
|
|
; AVX-NEXT: callq __truncdfhf2@PLT
|
|
; AVX-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
|
|
; AVX-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
|
|
; AVX-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
|
|
; AVX-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
|
|
; AVX-NEXT: vzeroupper
|
|
; AVX-NEXT: callq __truncdfhf2@PLT
|
|
; AVX-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
|
|
; AVX-NEXT: vpermilpd $1, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
|
|
; AVX-NEXT: # xmm0 = mem[1,0]
|
|
; AVX-NEXT: callq __truncdfhf2@PLT
|
|
; AVX-NEXT: vmovdqa (%rsp), %xmm1 # 16-byte Reload
|
|
; AVX-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
|
|
; AVX-NEXT: vpunpckldq {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
|
|
; AVX-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1]
|
|
; AVX-NEXT: vpunpcklqdq {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
|
|
; AVX-NEXT: # xmm0 = xmm0[0],mem[0]
|
|
; AVX-NEXT: vmovdqa %xmm0, (%rbx)
|
|
; AVX-NEXT: addq $96, %rsp
|
|
; AVX-NEXT: popq %rbx
|
|
; AVX-NEXT: retq
|
|
;
|
|
; F16C-LABEL: store_cvt_8f64_to_8i16:
|
|
; F16C: # %bb.0:
|
|
; F16C-NEXT: pushq %rbx
|
|
; F16C-NEXT: subq $96, %rsp
|
|
; F16C-NEXT: movq %rdi, %rbx
|
|
; F16C-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
|
|
; F16C-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
|
|
; F16C-NEXT: vextractf128 $1, %ymm1, %xmm0
|
|
; F16C-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
|
|
; F16C-NEXT: vzeroupper
|
|
; F16C-NEXT: callq __truncdfhf2@PLT
|
|
; F16C-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; F16C-NEXT: vpermilpd $1, (%rsp), %xmm0 # 16-byte Folded Reload
|
|
; F16C-NEXT: # xmm0 = mem[1,0]
|
|
; F16C-NEXT: callq __truncdfhf2@PLT
|
|
; F16C-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
|
|
; F16C-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
|
|
; F16C-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; F16C-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
|
|
; F16C-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
|
|
; F16C-NEXT: vzeroupper
|
|
; F16C-NEXT: callq __truncdfhf2@PLT
|
|
; F16C-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
|
|
; F16C-NEXT: vpermilpd $1, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
|
|
; F16C-NEXT: # xmm0 = mem[1,0]
|
|
; F16C-NEXT: callq __truncdfhf2@PLT
|
|
; F16C-NEXT: vmovdqa (%rsp), %xmm1 # 16-byte Reload
|
|
; F16C-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
|
|
; F16C-NEXT: vpunpckldq {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
|
|
; F16C-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1]
|
|
; F16C-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; F16C-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
|
|
; F16C-NEXT: vextractf128 $1, %ymm0, %xmm0
|
|
; F16C-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
|
|
; F16C-NEXT: vzeroupper
|
|
; F16C-NEXT: callq __truncdfhf2@PLT
|
|
; F16C-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; F16C-NEXT: vpermilpd $1, (%rsp), %xmm0 # 16-byte Folded Reload
|
|
; F16C-NEXT: # xmm0 = mem[1,0]
|
|
; F16C-NEXT: callq __truncdfhf2@PLT
|
|
; F16C-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
|
|
; F16C-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
|
|
; F16C-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; F16C-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
|
|
; F16C-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
|
|
; F16C-NEXT: vzeroupper
|
|
; F16C-NEXT: callq __truncdfhf2@PLT
|
|
; F16C-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
|
|
; F16C-NEXT: vpermilpd $1, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
|
|
; F16C-NEXT: # xmm0 = mem[1,0]
|
|
; F16C-NEXT: callq __truncdfhf2@PLT
|
|
; F16C-NEXT: vmovdqa (%rsp), %xmm1 # 16-byte Reload
|
|
; F16C-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
|
|
; F16C-NEXT: vpunpckldq {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
|
|
; F16C-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1]
|
|
; F16C-NEXT: vpunpcklqdq {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
|
|
; F16C-NEXT: # xmm0 = xmm0[0],mem[0]
|
|
; F16C-NEXT: vmovdqa %xmm0, (%rbx)
|
|
; F16C-NEXT: addq $96, %rsp
|
|
; F16C-NEXT: popq %rbx
|
|
; F16C-NEXT: retq
|
|
;
|
|
; AVX512-LABEL: store_cvt_8f64_to_8i16:
|
|
; AVX512: # %bb.0:
|
|
; AVX512-NEXT: pushq %rbx
|
|
; AVX512-NEXT: subq $112, %rsp
|
|
; AVX512-NEXT: movq %rdi, %rbx
|
|
; AVX512-NEXT: vmovups %zmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Spill
|
|
; AVX512-NEXT: vextractf32x4 $3, %zmm0, %xmm0
|
|
; AVX512-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
|
|
; AVX512-NEXT: vzeroupper
|
|
; AVX512-NEXT: callq __truncdfhf2@PLT
|
|
; AVX512-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX512-NEXT: vpermilpd $1, (%rsp), %xmm0 # 16-byte Folded Reload
|
|
; AVX512-NEXT: # xmm0 = mem[1,0]
|
|
; AVX512-NEXT: callq __truncdfhf2@PLT
|
|
; AVX512-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
|
|
; AVX512-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
|
|
; AVX512-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX512-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
|
|
; AVX512-NEXT: vextractf32x4 $2, %zmm0, %xmm0
|
|
; AVX512-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX512-NEXT: vzeroupper
|
|
; AVX512-NEXT: callq __truncdfhf2@PLT
|
|
; AVX512-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
|
|
; AVX512-NEXT: vpermilpd $1, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
|
|
; AVX512-NEXT: # xmm0 = mem[1,0]
|
|
; AVX512-NEXT: callq __truncdfhf2@PLT
|
|
; AVX512-NEXT: vmovdqa (%rsp), %xmm1 # 16-byte Reload
|
|
; AVX512-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
|
|
; AVX512-NEXT: vpunpckldq {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
|
|
; AVX512-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1]
|
|
; AVX512-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX512-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
|
|
; AVX512-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
|
|
; AVX512-NEXT: vzeroupper
|
|
; AVX512-NEXT: callq __truncdfhf2@PLT
|
|
; AVX512-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
|
|
; AVX512-NEXT: vpermilpd $1, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
|
|
; AVX512-NEXT: # xmm0 = mem[1,0]
|
|
; AVX512-NEXT: callq __truncdfhf2@PLT
|
|
; AVX512-NEXT: vmovdqa (%rsp), %xmm1 # 16-byte Reload
|
|
; AVX512-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
|
|
; AVX512-NEXT: vmovdqa %xmm0, (%rsp) # 16-byte Spill
|
|
; AVX512-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %zmm0 # 64-byte Reload
|
|
; AVX512-NEXT: vextractf128 $1, %ymm0, %xmm0
|
|
; AVX512-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX512-NEXT: vzeroupper
|
|
; AVX512-NEXT: callq __truncdfhf2@PLT
|
|
; AVX512-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX512-NEXT: vpermilpd $1, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
|
|
; AVX512-NEXT: # xmm0 = mem[1,0]
|
|
; AVX512-NEXT: callq __truncdfhf2@PLT
|
|
; AVX512-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
|
|
; AVX512-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
|
|
; AVX512-NEXT: vmovdqa (%rsp), %xmm1 # 16-byte Reload
|
|
; AVX512-NEXT: vpunpckldq {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
|
|
; AVX512-NEXT: vpunpcklqdq {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
|
|
; AVX512-NEXT: # xmm0 = xmm0[0],mem[0]
|
|
; AVX512-NEXT: vmovdqa %xmm0, (%rbx)
|
|
; AVX512-NEXT: addq $112, %rsp
|
|
; AVX512-NEXT: popq %rbx
|
|
; AVX512-NEXT: retq
|
|
%1 = fptrunc <8 x double> %a0 to <8 x half>
|
|
%2 = bitcast <8 x half> %1 to <8 x i16>
|
|
store <8 x i16> %2, ptr %a1
|
|
ret void
|
|
}
|
|
|
|
define void @store_cvt_32f32_to_32f16(<32 x float> %a0, ptr %a1) nounwind {
|
|
; AVX1-LABEL: store_cvt_32f32_to_32f16:
|
|
; AVX1: # %bb.0:
|
|
; AVX1-NEXT: pushq %rbx
|
|
; AVX1-NEXT: subq $176, %rsp
|
|
; AVX1-NEXT: movq %rdi, %rbx
|
|
; AVX1-NEXT: vmovups %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
|
|
; AVX1-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
|
|
; AVX1-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
|
|
; AVX1-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
|
|
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm0
|
|
; AVX1-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX1-NEXT: vshufps {{.*#+}} xmm0 = xmm0[3,3,3,3]
|
|
; AVX1-NEXT: vzeroupper
|
|
; AVX1-NEXT: callq __truncsfhf2@PLT
|
|
; AVX1-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
|
|
; AVX1-NEXT: vpermilpd $1, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
|
|
; AVX1-NEXT: # xmm0 = mem[1,0]
|
|
; AVX1-NEXT: callq __truncsfhf2@PLT
|
|
; AVX1-NEXT: vpunpcklwd (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
|
|
; AVX1-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3]
|
|
; AVX1-NEXT: vmovdqa %xmm0, (%rsp) # 16-byte Spill
|
|
; AVX1-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
|
|
; AVX1-NEXT: callq __truncsfhf2@PLT
|
|
; AVX1-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX1-NEXT: vmovshdup {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
|
|
; AVX1-NEXT: # xmm0 = mem[1,1,3,3]
|
|
; AVX1-NEXT: callq __truncsfhf2@PLT
|
|
; AVX1-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
|
|
; AVX1-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
|
|
; AVX1-NEXT: vpunpckldq (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
|
|
; AVX1-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1]
|
|
; AVX1-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX1-NEXT: vpermilps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
|
|
; AVX1-NEXT: # xmm0 = mem[3,3,3,3]
|
|
; AVX1-NEXT: callq __truncsfhf2@PLT
|
|
; AVX1-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
|
|
; AVX1-NEXT: vpermilpd $1, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
|
|
; AVX1-NEXT: # xmm0 = mem[1,0]
|
|
; AVX1-NEXT: callq __truncsfhf2@PLT
|
|
; AVX1-NEXT: vpunpcklwd (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
|
|
; AVX1-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3]
|
|
; AVX1-NEXT: vmovdqa %xmm0, (%rsp) # 16-byte Spill
|
|
; AVX1-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
|
|
; AVX1-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
|
|
; AVX1-NEXT: vzeroupper
|
|
; AVX1-NEXT: callq __truncsfhf2@PLT
|
|
; AVX1-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX1-NEXT: vmovshdup {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
|
|
; AVX1-NEXT: # xmm0 = mem[1,1,3,3]
|
|
; AVX1-NEXT: callq __truncsfhf2@PLT
|
|
; AVX1-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
|
|
; AVX1-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
|
|
; AVX1-NEXT: vpunpckldq (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
|
|
; AVX1-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1]
|
|
; AVX1-NEXT: vpunpcklqdq {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
|
|
; AVX1-NEXT: # xmm0 = xmm0[0],mem[0]
|
|
; AVX1-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX1-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
|
|
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
|
|
; AVX1-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX1-NEXT: vshufps {{.*#+}} xmm0 = xmm0[3,3,3,3]
|
|
; AVX1-NEXT: vzeroupper
|
|
; AVX1-NEXT: callq __truncsfhf2@PLT
|
|
; AVX1-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
|
|
; AVX1-NEXT: vpermilpd $1, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
|
|
; AVX1-NEXT: # xmm0 = mem[1,0]
|
|
; AVX1-NEXT: callq __truncsfhf2@PLT
|
|
; AVX1-NEXT: vpunpcklwd (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
|
|
; AVX1-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3]
|
|
; AVX1-NEXT: vmovdqa %xmm0, (%rsp) # 16-byte Spill
|
|
; AVX1-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
|
|
; AVX1-NEXT: callq __truncsfhf2@PLT
|
|
; AVX1-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX1-NEXT: vmovshdup {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
|
|
; AVX1-NEXT: # xmm0 = mem[1,1,3,3]
|
|
; AVX1-NEXT: callq __truncsfhf2@PLT
|
|
; AVX1-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
|
|
; AVX1-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
|
|
; AVX1-NEXT: vpunpckldq (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
|
|
; AVX1-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1]
|
|
; AVX1-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX1-NEXT: vpermilps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
|
|
; AVX1-NEXT: # xmm0 = mem[3,3,3,3]
|
|
; AVX1-NEXT: callq __truncsfhf2@PLT
|
|
; AVX1-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
|
|
; AVX1-NEXT: vpermilpd $1, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
|
|
; AVX1-NEXT: # xmm0 = mem[1,0]
|
|
; AVX1-NEXT: callq __truncsfhf2@PLT
|
|
; AVX1-NEXT: vpunpcklwd (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
|
|
; AVX1-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3]
|
|
; AVX1-NEXT: vmovdqa %xmm0, (%rsp) # 16-byte Spill
|
|
; AVX1-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
|
|
; AVX1-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
|
|
; AVX1-NEXT: vzeroupper
|
|
; AVX1-NEXT: callq __truncsfhf2@PLT
|
|
; AVX1-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX1-NEXT: vmovshdup {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
|
|
; AVX1-NEXT: # xmm0 = mem[1,1,3,3]
|
|
; AVX1-NEXT: callq __truncsfhf2@PLT
|
|
; AVX1-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
|
|
; AVX1-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
|
|
; AVX1-NEXT: vpunpckldq (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
|
|
; AVX1-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1]
|
|
; AVX1-NEXT: vpunpcklqdq {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
|
|
; AVX1-NEXT: # xmm0 = xmm0[0],mem[0]
|
|
; AVX1-NEXT: vinsertf128 $1, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 16-byte Folded Reload
|
|
; AVX1-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
|
|
; AVX1-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
|
|
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
|
|
; AVX1-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX1-NEXT: vshufps {{.*#+}} xmm0 = xmm0[3,3,3,3]
|
|
; AVX1-NEXT: vzeroupper
|
|
; AVX1-NEXT: callq __truncsfhf2@PLT
|
|
; AVX1-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX1-NEXT: vpermilpd $1, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
|
|
; AVX1-NEXT: # xmm0 = mem[1,0]
|
|
; AVX1-NEXT: callq __truncsfhf2@PLT
|
|
; AVX1-NEXT: vpunpcklwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
|
|
; AVX1-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3]
|
|
; AVX1-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX1-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
|
|
; AVX1-NEXT: callq __truncsfhf2@PLT
|
|
; AVX1-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
|
|
; AVX1-NEXT: vmovshdup {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
|
|
; AVX1-NEXT: # xmm0 = mem[1,1,3,3]
|
|
; AVX1-NEXT: callq __truncsfhf2@PLT
|
|
; AVX1-NEXT: vmovdqa (%rsp), %xmm1 # 16-byte Reload
|
|
; AVX1-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
|
|
; AVX1-NEXT: vpunpckldq {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
|
|
; AVX1-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1]
|
|
; AVX1-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX1-NEXT: vpermilps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
|
|
; AVX1-NEXT: # xmm0 = mem[3,3,3,3]
|
|
; AVX1-NEXT: callq __truncsfhf2@PLT
|
|
; AVX1-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX1-NEXT: vpermilpd $1, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
|
|
; AVX1-NEXT: # xmm0 = mem[1,0]
|
|
; AVX1-NEXT: callq __truncsfhf2@PLT
|
|
; AVX1-NEXT: vpunpcklwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
|
|
; AVX1-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3]
|
|
; AVX1-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX1-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
|
|
; AVX1-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
|
|
; AVX1-NEXT: vzeroupper
|
|
; AVX1-NEXT: callq __truncsfhf2@PLT
|
|
; AVX1-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
|
|
; AVX1-NEXT: vmovshdup {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
|
|
; AVX1-NEXT: # xmm0 = mem[1,1,3,3]
|
|
; AVX1-NEXT: callq __truncsfhf2@PLT
|
|
; AVX1-NEXT: vmovdqa (%rsp), %xmm1 # 16-byte Reload
|
|
; AVX1-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
|
|
; AVX1-NEXT: vpunpckldq {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
|
|
; AVX1-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1]
|
|
; AVX1-NEXT: vpunpcklqdq {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
|
|
; AVX1-NEXT: # xmm0 = xmm0[0],mem[0]
|
|
; AVX1-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX1-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
|
|
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
|
|
; AVX1-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX1-NEXT: vshufps {{.*#+}} xmm0 = xmm0[3,3,3,3]
|
|
; AVX1-NEXT: vzeroupper
|
|
; AVX1-NEXT: callq __truncsfhf2@PLT
|
|
; AVX1-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX1-NEXT: vpermilpd $1, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
|
|
; AVX1-NEXT: # xmm0 = mem[1,0]
|
|
; AVX1-NEXT: callq __truncsfhf2@PLT
|
|
; AVX1-NEXT: vpunpcklwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
|
|
; AVX1-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3]
|
|
; AVX1-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX1-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
|
|
; AVX1-NEXT: callq __truncsfhf2@PLT
|
|
; AVX1-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
|
|
; AVX1-NEXT: vmovshdup {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
|
|
; AVX1-NEXT: # xmm0 = mem[1,1,3,3]
|
|
; AVX1-NEXT: callq __truncsfhf2@PLT
|
|
; AVX1-NEXT: vmovdqa (%rsp), %xmm1 # 16-byte Reload
|
|
; AVX1-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
|
|
; AVX1-NEXT: vpunpckldq {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
|
|
; AVX1-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1]
|
|
; AVX1-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX1-NEXT: vpermilps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
|
|
; AVX1-NEXT: # xmm0 = mem[3,3,3,3]
|
|
; AVX1-NEXT: callq __truncsfhf2@PLT
|
|
; AVX1-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX1-NEXT: vpermilpd $1, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
|
|
; AVX1-NEXT: # xmm0 = mem[1,0]
|
|
; AVX1-NEXT: callq __truncsfhf2@PLT
|
|
; AVX1-NEXT: vpunpcklwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
|
|
; AVX1-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3]
|
|
; AVX1-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX1-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
|
|
; AVX1-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
|
|
; AVX1-NEXT: vzeroupper
|
|
; AVX1-NEXT: callq __truncsfhf2@PLT
|
|
; AVX1-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
|
|
; AVX1-NEXT: vmovshdup {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
|
|
; AVX1-NEXT: # xmm0 = mem[1,1,3,3]
|
|
; AVX1-NEXT: callq __truncsfhf2@PLT
|
|
; AVX1-NEXT: vmovdqa (%rsp), %xmm1 # 16-byte Reload
|
|
; AVX1-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
|
|
; AVX1-NEXT: vpunpckldq {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
|
|
; AVX1-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1]
|
|
; AVX1-NEXT: vpunpcklqdq {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
|
|
; AVX1-NEXT: # xmm0 = xmm0[0],mem[0]
|
|
; AVX1-NEXT: vinsertf128 $1, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 16-byte Folded Reload
|
|
; AVX1-NEXT: vmovaps %ymm0, 32(%rbx)
|
|
; AVX1-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
|
|
; AVX1-NEXT: vmovaps %ymm0, (%rbx)
|
|
; AVX1-NEXT: addq $176, %rsp
|
|
; AVX1-NEXT: popq %rbx
|
|
; AVX1-NEXT: vzeroupper
|
|
; AVX1-NEXT: retq
|
|
;
|
|
; AVX2-LABEL: store_cvt_32f32_to_32f16:
|
|
; AVX2: # %bb.0:
|
|
; AVX2-NEXT: pushq %rbx
|
|
; AVX2-NEXT: subq $240, %rsp
|
|
; AVX2-NEXT: movq %rdi, %rbx
|
|
; AVX2-NEXT: vmovups %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
|
|
; AVX2-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
|
|
; AVX2-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
|
|
; AVX2-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
|
|
; AVX2-NEXT: vextractf128 $1, %ymm1, %xmm0
|
|
; AVX2-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX2-NEXT: vshufps {{.*#+}} xmm0 = xmm0[3,3,3,3]
|
|
; AVX2-NEXT: vzeroupper
|
|
; AVX2-NEXT: callq __truncsfhf2@PLT
|
|
; AVX2-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
|
|
; AVX2-NEXT: vpermilpd $1, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
|
|
; AVX2-NEXT: # xmm0 = mem[1,0]
|
|
; AVX2-NEXT: callq __truncsfhf2@PLT
|
|
; AVX2-NEXT: vpunpcklwd (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
|
|
; AVX2-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3]
|
|
; AVX2-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX2-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
|
|
; AVX2-NEXT: vextractf128 $1, %ymm0, %xmm0
|
|
; AVX2-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
|
|
; AVX2-NEXT: vshufps {{.*#+}} xmm0 = xmm0[3,3,3,3]
|
|
; AVX2-NEXT: vzeroupper
|
|
; AVX2-NEXT: callq __truncsfhf2@PLT
|
|
; AVX2-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX2-NEXT: vpermilpd $1, (%rsp), %xmm0 # 16-byte Folded Reload
|
|
; AVX2-NEXT: # xmm0 = mem[1,0]
|
|
; AVX2-NEXT: callq __truncsfhf2@PLT
|
|
; AVX2-NEXT: vpunpcklwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
|
|
; AVX2-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3]
|
|
; AVX2-NEXT: vinserti128 $1, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 16-byte Folded Reload
|
|
; AVX2-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
|
|
; AVX2-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
|
|
; AVX2-NEXT: vzeroupper
|
|
; AVX2-NEXT: callq __truncsfhf2@PLT
|
|
; AVX2-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX2-NEXT: vmovshdup {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
|
|
; AVX2-NEXT: # xmm0 = mem[1,1,3,3]
|
|
; AVX2-NEXT: callq __truncsfhf2@PLT
|
|
; AVX2-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
|
|
; AVX2-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
|
|
; AVX2-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX2-NEXT: vmovaps (%rsp), %xmm0 # 16-byte Reload
|
|
; AVX2-NEXT: callq __truncsfhf2@PLT
|
|
; AVX2-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX2-NEXT: vmovshdup (%rsp), %xmm0 # 16-byte Folded Reload
|
|
; AVX2-NEXT: # xmm0 = mem[1,1,3,3]
|
|
; AVX2-NEXT: callq __truncsfhf2@PLT
|
|
; AVX2-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
|
|
; AVX2-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
|
|
; AVX2-NEXT: vinserti128 $1, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 16-byte Folded Reload
|
|
; AVX2-NEXT: vpunpckldq {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
|
|
; AVX2-NEXT: # ymm0 = ymm0[0],mem[0],ymm0[1],mem[1],ymm0[4],mem[4],ymm0[5],mem[5]
|
|
; AVX2-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
|
|
; AVX2-NEXT: vpermilps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
|
|
; AVX2-NEXT: # xmm0 = mem[3,3,3,3]
|
|
; AVX2-NEXT: vzeroupper
|
|
; AVX2-NEXT: callq __truncsfhf2@PLT
|
|
; AVX2-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
|
|
; AVX2-NEXT: vpermilpd $1, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
|
|
; AVX2-NEXT: # xmm0 = mem[1,0]
|
|
; AVX2-NEXT: callq __truncsfhf2@PLT
|
|
; AVX2-NEXT: vpunpcklwd (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
|
|
; AVX2-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3]
|
|
; AVX2-NEXT: vmovdqa %xmm0, (%rsp) # 16-byte Spill
|
|
; AVX2-NEXT: vpermilps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
|
|
; AVX2-NEXT: # xmm0 = mem[3,3,3,3]
|
|
; AVX2-NEXT: callq __truncsfhf2@PLT
|
|
; AVX2-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX2-NEXT: vpermilpd $1, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
|
|
; AVX2-NEXT: # xmm0 = mem[1,0]
|
|
; AVX2-NEXT: callq __truncsfhf2@PLT
|
|
; AVX2-NEXT: vpunpcklwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
|
|
; AVX2-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3]
|
|
; AVX2-NEXT: vinserti128 $1, (%rsp), %ymm0, %ymm0 # 16-byte Folded Reload
|
|
; AVX2-NEXT: vmovdqu %ymm0, (%rsp) # 32-byte Spill
|
|
; AVX2-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
|
|
; AVX2-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
|
|
; AVX2-NEXT: vzeroupper
|
|
; AVX2-NEXT: callq __truncsfhf2@PLT
|
|
; AVX2-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX2-NEXT: vmovshdup {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
|
|
; AVX2-NEXT: # xmm0 = mem[1,1,3,3]
|
|
; AVX2-NEXT: callq __truncsfhf2@PLT
|
|
; AVX2-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
|
|
; AVX2-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
|
|
; AVX2-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX2-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
|
|
; AVX2-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
|
|
; AVX2-NEXT: vzeroupper
|
|
; AVX2-NEXT: callq __truncsfhf2@PLT
|
|
; AVX2-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX2-NEXT: vmovshdup {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
|
|
; AVX2-NEXT: # xmm0 = mem[1,1,3,3]
|
|
; AVX2-NEXT: callq __truncsfhf2@PLT
|
|
; AVX2-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
|
|
; AVX2-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
|
|
; AVX2-NEXT: vinserti128 $1, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 16-byte Folded Reload
|
|
; AVX2-NEXT: vpunpckldq (%rsp), %ymm0, %ymm0 # 32-byte Folded Reload
|
|
; AVX2-NEXT: # ymm0 = ymm0[0],mem[0],ymm0[1],mem[1],ymm0[4],mem[4],ymm0[5],mem[5]
|
|
; AVX2-NEXT: vpunpcklqdq {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
|
|
; AVX2-NEXT: # ymm0 = ymm0[0],mem[0],ymm0[2],mem[2]
|
|
; AVX2-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
|
|
; AVX2-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
|
|
; AVX2-NEXT: vextractf128 $1, %ymm0, %xmm0
|
|
; AVX2-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX2-NEXT: vshufps {{.*#+}} xmm0 = xmm0[3,3,3,3]
|
|
; AVX2-NEXT: vzeroupper
|
|
; AVX2-NEXT: callq __truncsfhf2@PLT
|
|
; AVX2-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX2-NEXT: vpermilpd $1, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
|
|
; AVX2-NEXT: # xmm0 = mem[1,0]
|
|
; AVX2-NEXT: callq __truncsfhf2@PLT
|
|
; AVX2-NEXT: vpunpcklwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
|
|
; AVX2-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3]
|
|
; AVX2-NEXT: vmovdqa %xmm0, (%rsp) # 16-byte Spill
|
|
; AVX2-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
|
|
; AVX2-NEXT: vextractf128 $1, %ymm0, %xmm0
|
|
; AVX2-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX2-NEXT: vshufps {{.*#+}} xmm0 = xmm0[3,3,3,3]
|
|
; AVX2-NEXT: vzeroupper
|
|
; AVX2-NEXT: callq __truncsfhf2@PLT
|
|
; AVX2-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX2-NEXT: vpermilpd $1, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
|
|
; AVX2-NEXT: # xmm0 = mem[1,0]
|
|
; AVX2-NEXT: callq __truncsfhf2@PLT
|
|
; AVX2-NEXT: vpunpcklwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
|
|
; AVX2-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3]
|
|
; AVX2-NEXT: vinserti128 $1, (%rsp), %ymm0, %ymm0 # 16-byte Folded Reload
|
|
; AVX2-NEXT: vmovdqu %ymm0, (%rsp) # 32-byte Spill
|
|
; AVX2-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
|
|
; AVX2-NEXT: vzeroupper
|
|
; AVX2-NEXT: callq __truncsfhf2@PLT
|
|
; AVX2-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX2-NEXT: vmovshdup {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
|
|
; AVX2-NEXT: # xmm0 = mem[1,1,3,3]
|
|
; AVX2-NEXT: callq __truncsfhf2@PLT
|
|
; AVX2-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
|
|
; AVX2-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
|
|
; AVX2-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX2-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
|
|
; AVX2-NEXT: callq __truncsfhf2@PLT
|
|
; AVX2-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX2-NEXT: vmovshdup {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
|
|
; AVX2-NEXT: # xmm0 = mem[1,1,3,3]
|
|
; AVX2-NEXT: callq __truncsfhf2@PLT
|
|
; AVX2-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
|
|
; AVX2-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
|
|
; AVX2-NEXT: vinserti128 $1, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 16-byte Folded Reload
|
|
; AVX2-NEXT: vpunpckldq (%rsp), %ymm0, %ymm0 # 32-byte Folded Reload
|
|
; AVX2-NEXT: # ymm0 = ymm0[0],mem[0],ymm0[1],mem[1],ymm0[4],mem[4],ymm0[5],mem[5]
|
|
; AVX2-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
|
|
; AVX2-NEXT: vpermilps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
|
|
; AVX2-NEXT: # xmm0 = mem[3,3,3,3]
|
|
; AVX2-NEXT: vzeroupper
|
|
; AVX2-NEXT: callq __truncsfhf2@PLT
|
|
; AVX2-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX2-NEXT: vpermilpd $1, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
|
|
; AVX2-NEXT: # xmm0 = mem[1,0]
|
|
; AVX2-NEXT: callq __truncsfhf2@PLT
|
|
; AVX2-NEXT: vpunpcklwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
|
|
; AVX2-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3]
|
|
; AVX2-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX2-NEXT: vpermilps $255, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
|
|
; AVX2-NEXT: # xmm0 = mem[3,3,3,3]
|
|
; AVX2-NEXT: callq __truncsfhf2@PLT
|
|
; AVX2-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
|
|
; AVX2-NEXT: vpermilpd $1, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
|
|
; AVX2-NEXT: # xmm0 = mem[1,0]
|
|
; AVX2-NEXT: callq __truncsfhf2@PLT
|
|
; AVX2-NEXT: vpunpcklwd (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
|
|
; AVX2-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3]
|
|
; AVX2-NEXT: vinserti128 $1, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 16-byte Folded Reload
|
|
; AVX2-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
|
|
; AVX2-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
|
|
; AVX2-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
|
|
; AVX2-NEXT: vzeroupper
|
|
; AVX2-NEXT: callq __truncsfhf2@PLT
|
|
; AVX2-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
|
|
; AVX2-NEXT: vmovshdup {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
|
|
; AVX2-NEXT: # xmm0 = mem[1,1,3,3]
|
|
; AVX2-NEXT: callq __truncsfhf2@PLT
|
|
; AVX2-NEXT: vmovdqa (%rsp), %xmm1 # 16-byte Reload
|
|
; AVX2-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
|
|
; AVX2-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX2-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
|
|
; AVX2-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
|
|
; AVX2-NEXT: vzeroupper
|
|
; AVX2-NEXT: callq __truncsfhf2@PLT
|
|
; AVX2-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
|
|
; AVX2-NEXT: vmovshdup {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
|
|
; AVX2-NEXT: # xmm0 = mem[1,1,3,3]
|
|
; AVX2-NEXT: callq __truncsfhf2@PLT
|
|
; AVX2-NEXT: vmovdqa (%rsp), %xmm1 # 16-byte Reload
|
|
; AVX2-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
|
|
; AVX2-NEXT: vinserti128 $1, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 16-byte Folded Reload
|
|
; AVX2-NEXT: vpunpckldq {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
|
|
; AVX2-NEXT: # ymm0 = ymm0[0],mem[0],ymm0[1],mem[1],ymm0[4],mem[4],ymm0[5],mem[5]
|
|
; AVX2-NEXT: vpunpcklqdq {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
|
|
; AVX2-NEXT: # ymm0 = ymm0[0],mem[0],ymm0[2],mem[2]
|
|
; AVX2-NEXT: vmovdqa %ymm0, 32(%rbx)
|
|
; AVX2-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
|
|
; AVX2-NEXT: vmovaps %ymm0, (%rbx)
|
|
; AVX2-NEXT: addq $240, %rsp
|
|
; AVX2-NEXT: popq %rbx
|
|
; AVX2-NEXT: vzeroupper
|
|
; AVX2-NEXT: retq
|
|
;
|
|
; F16C-LABEL: store_cvt_32f32_to_32f16:
|
|
; F16C: # %bb.0:
|
|
; F16C-NEXT: vcvtps2ph $4, %ymm3, 48(%rdi)
|
|
; F16C-NEXT: vcvtps2ph $4, %ymm2, 32(%rdi)
|
|
; F16C-NEXT: vcvtps2ph $4, %ymm1, 16(%rdi)
|
|
; F16C-NEXT: vcvtps2ph $4, %ymm0, (%rdi)
|
|
; F16C-NEXT: vzeroupper
|
|
; F16C-NEXT: retq
|
|
;
|
|
; AVX512-LABEL: store_cvt_32f32_to_32f16:
|
|
; AVX512: # %bb.0:
|
|
; AVX512-NEXT: vcvtps2ph $4, %zmm1, 32(%rdi)
|
|
; AVX512-NEXT: vcvtps2ph $4, %zmm0, (%rdi)
|
|
; AVX512-NEXT: vzeroupper
|
|
; AVX512-NEXT: retq
|
|
%1 = fptrunc <32 x float> %a0 to <32 x half>
|
|
store <32 x half> %1, ptr %a1
|
|
ret void
|
|
}
|
|
|
|
define <4 x i32> @fptosi_2f16_to_4i32(<2 x half> %a) nounwind {
|
|
; AVX-LABEL: fptosi_2f16_to_4i32:
|
|
; AVX: # %bb.0:
|
|
; AVX-NEXT: subq $40, %rsp
|
|
; AVX-NEXT: vpsrld $16, %xmm0, %xmm1
|
|
; AVX-NEXT: vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX-NEXT: callq __extendhfsf2@PLT
|
|
; AVX-NEXT: vmovdqa %xmm0, (%rsp) # 16-byte Spill
|
|
; AVX-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
|
|
; AVX-NEXT: callq __extendhfsf2@PLT
|
|
; AVX-NEXT: vmovaps (%rsp), %xmm1 # 16-byte Reload
|
|
; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[2,3]
|
|
; AVX-NEXT: vcvttps2dq %xmm0, %xmm0
|
|
; AVX-NEXT: vmovq {{.*#+}} xmm0 = xmm0[0],zero
|
|
; AVX-NEXT: addq $40, %rsp
|
|
; AVX-NEXT: retq
|
|
;
|
|
; F16C-LABEL: fptosi_2f16_to_4i32:
|
|
; F16C: # %bb.0:
|
|
; F16C-NEXT: vcvtph2ps %xmm0, %xmm0
|
|
; F16C-NEXT: vcvttps2dq %xmm0, %xmm0
|
|
; F16C-NEXT: vmovq {{.*#+}} xmm0 = xmm0[0],zero
|
|
; F16C-NEXT: retq
|
|
;
|
|
; AVX512-LABEL: fptosi_2f16_to_4i32:
|
|
; AVX512: # %bb.0:
|
|
; AVX512-NEXT: vcvtph2ps %xmm0, %xmm0
|
|
; AVX512-NEXT: vcvttps2dq %xmm0, %xmm0
|
|
; AVX512-NEXT: vmovq {{.*#+}} xmm0 = xmm0[0],zero
|
|
; AVX512-NEXT: retq
|
|
%cvt = fptosi <2 x half> %a to <2 x i32>
|
|
%ext = shufflevector <2 x i32> %cvt, <2 x i32> zeroinitializer, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
|
|
ret <4 x i32> %ext
|
|
}
|
|
|
|
; PR83402
|
|
define <4 x i32> @fptosi_4f16_to_4i32(<4 x half> %a) nounwind {
|
|
; AVX-LABEL: fptosi_4f16_to_4i32:
|
|
; AVX: # %bb.0:
|
|
; AVX-NEXT: subq $72, %rsp
|
|
; AVX-NEXT: vmovdqa %xmm0, %xmm1
|
|
; AVX-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX-NEXT: vpsrld $16, %xmm0, %xmm0
|
|
; AVX-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX-NEXT: vmovshdup {{.*#+}} xmm0 = xmm1[1,1,3,3]
|
|
; AVX-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
|
|
; AVX-NEXT: vpsrlq $48, %xmm1, %xmm0
|
|
; AVX-NEXT: callq __extendhfsf2@PLT
|
|
; AVX-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX-NEXT: vmovaps (%rsp), %xmm0 # 16-byte Reload
|
|
; AVX-NEXT: callq __extendhfsf2@PLT
|
|
; AVX-NEXT: vinsertps $16, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
|
|
; AVX-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[2,3]
|
|
; AVX-NEXT: vcvttps2dq %xmm0, %xmm0
|
|
; AVX-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
|
|
; AVX-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
|
|
; AVX-NEXT: callq __extendhfsf2@PLT
|
|
; AVX-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
|
|
; AVX-NEXT: callq __extendhfsf2@PLT
|
|
; AVX-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
|
|
; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[2,3]
|
|
; AVX-NEXT: vcvttps2dq %xmm0, %xmm0
|
|
; AVX-NEXT: vunpcklpd (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
|
|
; AVX-NEXT: # xmm0 = xmm0[0],mem[0]
|
|
; AVX-NEXT: addq $72, %rsp
|
|
; AVX-NEXT: retq
|
|
;
|
|
; F16C-LABEL: fptosi_4f16_to_4i32:
|
|
; F16C: # %bb.0:
|
|
; F16C-NEXT: vcvtph2ps %xmm0, %xmm0
|
|
; F16C-NEXT: vcvttps2dq %xmm0, %xmm0
|
|
; F16C-NEXT: retq
|
|
;
|
|
; AVX512-LABEL: fptosi_4f16_to_4i32:
|
|
; AVX512: # %bb.0:
|
|
; AVX512-NEXT: vcvtph2ps %xmm0, %xmm0
|
|
; AVX512-NEXT: vcvttps2dq %xmm0, %xmm0
|
|
; AVX512-NEXT: retq
|
|
%cvt = fptosi <4 x half> %a to <4 x i32>
|
|
ret <4 x i32> %cvt
|
|
}
|
|
|
|
define <4 x i32> @fptoui_2f16_to_4i32(<2 x half> %a) nounwind {
|
|
; AVX1-LABEL: fptoui_2f16_to_4i32:
|
|
; AVX1: # %bb.0:
|
|
; AVX1-NEXT: subq $40, %rsp
|
|
; AVX1-NEXT: vpsrld $16, %xmm0, %xmm1
|
|
; AVX1-NEXT: vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX1-NEXT: callq __extendhfsf2@PLT
|
|
; AVX1-NEXT: vmovdqa %xmm0, (%rsp) # 16-byte Spill
|
|
; AVX1-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
|
|
; AVX1-NEXT: callq __extendhfsf2@PLT
|
|
; AVX1-NEXT: vmovaps (%rsp), %xmm1 # 16-byte Reload
|
|
; AVX1-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[2,3]
|
|
; AVX1-NEXT: vcvttps2dq %xmm0, %xmm1
|
|
; AVX1-NEXT: vpsrad $31, %xmm1, %xmm2
|
|
; AVX1-NEXT: vsubps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
|
|
; AVX1-NEXT: vcvttps2dq %xmm0, %xmm0
|
|
; AVX1-NEXT: vpand %xmm2, %xmm0, %xmm0
|
|
; AVX1-NEXT: vpor %xmm0, %xmm1, %xmm0
|
|
; AVX1-NEXT: vmovq {{.*#+}} xmm0 = xmm0[0],zero
|
|
; AVX1-NEXT: addq $40, %rsp
|
|
; AVX1-NEXT: retq
|
|
;
|
|
; AVX2-LABEL: fptoui_2f16_to_4i32:
|
|
; AVX2: # %bb.0:
|
|
; AVX2-NEXT: subq $40, %rsp
|
|
; AVX2-NEXT: vpsrld $16, %xmm0, %xmm1
|
|
; AVX2-NEXT: vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX2-NEXT: callq __extendhfsf2@PLT
|
|
; AVX2-NEXT: vmovdqa %xmm0, (%rsp) # 16-byte Spill
|
|
; AVX2-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
|
|
; AVX2-NEXT: callq __extendhfsf2@PLT
|
|
; AVX2-NEXT: vmovaps (%rsp), %xmm1 # 16-byte Reload
|
|
; AVX2-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[2,3]
|
|
; AVX2-NEXT: vcvttps2dq %xmm0, %xmm1
|
|
; AVX2-NEXT: vpsrad $31, %xmm1, %xmm2
|
|
; AVX2-NEXT: vbroadcastss {{.*#+}} xmm3 = [2.14748365E+9,2.14748365E+9,2.14748365E+9,2.14748365E+9]
|
|
; AVX2-NEXT: vsubps %xmm3, %xmm0, %xmm0
|
|
; AVX2-NEXT: vcvttps2dq %xmm0, %xmm0
|
|
; AVX2-NEXT: vpand %xmm2, %xmm0, %xmm0
|
|
; AVX2-NEXT: vpor %xmm0, %xmm1, %xmm0
|
|
; AVX2-NEXT: vmovq {{.*#+}} xmm0 = xmm0[0],zero
|
|
; AVX2-NEXT: addq $40, %rsp
|
|
; AVX2-NEXT: retq
|
|
;
|
|
; F16C-LABEL: fptoui_2f16_to_4i32:
|
|
; F16C: # %bb.0:
|
|
; F16C-NEXT: vxorps %xmm1, %xmm1, %xmm1
|
|
; F16C-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3]
|
|
; F16C-NEXT: vcvtph2ps %xmm0, %xmm0
|
|
; F16C-NEXT: vcvttps2dq %xmm0, %xmm1
|
|
; F16C-NEXT: vpsrad $31, %xmm1, %xmm2
|
|
; F16C-NEXT: vsubps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
|
|
; F16C-NEXT: vcvttps2dq %xmm0, %xmm0
|
|
; F16C-NEXT: vpand %xmm2, %xmm0, %xmm0
|
|
; F16C-NEXT: vpor %xmm0, %xmm1, %xmm0
|
|
; F16C-NEXT: vmovq {{.*#+}} xmm0 = xmm0[0],zero
|
|
; F16C-NEXT: retq
|
|
;
|
|
; AVX512F-LABEL: fptoui_2f16_to_4i32:
|
|
; AVX512F: # %bb.0:
|
|
; AVX512F-NEXT: vxorps %xmm1, %xmm1, %xmm1
|
|
; AVX512F-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3]
|
|
; AVX512F-NEXT: vcvtph2ps %xmm0, %xmm0
|
|
; AVX512F-NEXT: vcvttps2udq %zmm0, %zmm0
|
|
; AVX512F-NEXT: vmovq {{.*#+}} xmm0 = xmm0[0],zero
|
|
; AVX512F-NEXT: vzeroupper
|
|
; AVX512F-NEXT: retq
|
|
;
|
|
; AVX512-FASTLANE-LABEL: fptoui_2f16_to_4i32:
|
|
; AVX512-FASTLANE: # %bb.0:
|
|
; AVX512-FASTLANE-NEXT: vcvtph2ps %xmm0, %xmm0
|
|
; AVX512-FASTLANE-NEXT: vcvttps2udq %xmm0, %xmm0
|
|
; AVX512-FASTLANE-NEXT: vmovq {{.*#+}} xmm0 = xmm0[0],zero
|
|
; AVX512-FASTLANE-NEXT: retq
|
|
%cvt = fptoui <2 x half> %a to <2 x i32>
|
|
%ext = shufflevector <2 x i32> %cvt, <2 x i32> zeroinitializer, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
|
|
ret <4 x i32> %ext
|
|
}
|
|
|
|
define <4 x i32> @fptoui_4f16_to_4i32(<4 x half> %a) nounwind {
|
|
; AVX1-LABEL: fptoui_4f16_to_4i32:
|
|
; AVX1: # %bb.0:
|
|
; AVX1-NEXT: subq $72, %rsp
|
|
; AVX1-NEXT: vmovdqa %xmm0, %xmm1
|
|
; AVX1-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX1-NEXT: vpsrld $16, %xmm0, %xmm0
|
|
; AVX1-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX1-NEXT: vmovshdup {{.*#+}} xmm0 = xmm1[1,1,3,3]
|
|
; AVX1-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
|
|
; AVX1-NEXT: vpsrlq $48, %xmm1, %xmm0
|
|
; AVX1-NEXT: callq __extendhfsf2@PLT
|
|
; AVX1-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX1-NEXT: vmovaps (%rsp), %xmm0 # 16-byte Reload
|
|
; AVX1-NEXT: callq __extendhfsf2@PLT
|
|
; AVX1-NEXT: vinsertps $16, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
|
|
; AVX1-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[2,3]
|
|
; AVX1-NEXT: vcvttps2dq %xmm0, %xmm1
|
|
; AVX1-NEXT: vpsrad $31, %xmm1, %xmm2
|
|
; AVX1-NEXT: vsubps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
|
|
; AVX1-NEXT: vcvttps2dq %xmm0, %xmm0
|
|
; AVX1-NEXT: vpand %xmm2, %xmm0, %xmm0
|
|
; AVX1-NEXT: vpor %xmm0, %xmm1, %xmm0
|
|
; AVX1-NEXT: vmovdqa %xmm0, (%rsp) # 16-byte Spill
|
|
; AVX1-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
|
|
; AVX1-NEXT: callq __extendhfsf2@PLT
|
|
; AVX1-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX1-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
|
|
; AVX1-NEXT: callq __extendhfsf2@PLT
|
|
; AVX1-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
|
|
; AVX1-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[2,3]
|
|
; AVX1-NEXT: vcvttps2dq %xmm0, %xmm1
|
|
; AVX1-NEXT: vpsrad $31, %xmm1, %xmm2
|
|
; AVX1-NEXT: vsubps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
|
|
; AVX1-NEXT: vcvttps2dq %xmm0, %xmm0
|
|
; AVX1-NEXT: vpand %xmm2, %xmm0, %xmm0
|
|
; AVX1-NEXT: vpor %xmm0, %xmm1, %xmm0
|
|
; AVX1-NEXT: vpunpcklqdq (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
|
|
; AVX1-NEXT: # xmm0 = xmm0[0],mem[0]
|
|
; AVX1-NEXT: addq $72, %rsp
|
|
; AVX1-NEXT: retq
|
|
;
|
|
; AVX2-LABEL: fptoui_4f16_to_4i32:
|
|
; AVX2: # %bb.0:
|
|
; AVX2-NEXT: subq $72, %rsp
|
|
; AVX2-NEXT: vmovdqa %xmm0, %xmm1
|
|
; AVX2-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX2-NEXT: vpsrld $16, %xmm0, %xmm0
|
|
; AVX2-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX2-NEXT: vmovshdup {{.*#+}} xmm0 = xmm1[1,1,3,3]
|
|
; AVX2-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
|
|
; AVX2-NEXT: vpsrlq $48, %xmm1, %xmm0
|
|
; AVX2-NEXT: callq __extendhfsf2@PLT
|
|
; AVX2-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX2-NEXT: vmovaps (%rsp), %xmm0 # 16-byte Reload
|
|
; AVX2-NEXT: callq __extendhfsf2@PLT
|
|
; AVX2-NEXT: vinsertps $16, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
|
|
; AVX2-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[2,3]
|
|
; AVX2-NEXT: vcvttps2dq %xmm0, %xmm1
|
|
; AVX2-NEXT: vpsrad $31, %xmm1, %xmm2
|
|
; AVX2-NEXT: vbroadcastss {{.*#+}} xmm3 = [2.14748365E+9,2.14748365E+9,2.14748365E+9,2.14748365E+9]
|
|
; AVX2-NEXT: vsubps %xmm3, %xmm0, %xmm0
|
|
; AVX2-NEXT: vcvttps2dq %xmm0, %xmm0
|
|
; AVX2-NEXT: vpand %xmm2, %xmm0, %xmm0
|
|
; AVX2-NEXT: vpor %xmm0, %xmm1, %xmm0
|
|
; AVX2-NEXT: vmovdqa %xmm0, (%rsp) # 16-byte Spill
|
|
; AVX2-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
|
|
; AVX2-NEXT: callq __extendhfsf2@PLT
|
|
; AVX2-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
; AVX2-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
|
|
; AVX2-NEXT: callq __extendhfsf2@PLT
|
|
; AVX2-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
|
|
; AVX2-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[2,3]
|
|
; AVX2-NEXT: vcvttps2dq %xmm0, %xmm1
|
|
; AVX2-NEXT: vpsrad $31, %xmm1, %xmm2
|
|
; AVX2-NEXT: vbroadcastss {{.*#+}} xmm3 = [2.14748365E+9,2.14748365E+9,2.14748365E+9,2.14748365E+9]
|
|
; AVX2-NEXT: vsubps %xmm3, %xmm0, %xmm0
|
|
; AVX2-NEXT: vcvttps2dq %xmm0, %xmm0
|
|
; AVX2-NEXT: vpand %xmm2, %xmm0, %xmm0
|
|
; AVX2-NEXT: vpor %xmm0, %xmm1, %xmm0
|
|
; AVX2-NEXT: vpunpcklqdq (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
|
|
; AVX2-NEXT: # xmm0 = xmm0[0],mem[0]
|
|
; AVX2-NEXT: addq $72, %rsp
|
|
; AVX2-NEXT: retq
|
|
;
|
|
; F16C-LABEL: fptoui_4f16_to_4i32:
|
|
; F16C: # %bb.0:
|
|
; F16C-NEXT: vcvtph2ps %xmm0, %xmm0
|
|
; F16C-NEXT: vcvttps2dq %xmm0, %xmm1
|
|
; F16C-NEXT: vsubps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
|
|
; F16C-NEXT: vcvttps2dq %xmm0, %xmm0
|
|
; F16C-NEXT: vorps %xmm0, %xmm1, %xmm0
|
|
; F16C-NEXT: vblendvps %xmm1, %xmm0, %xmm1, %xmm0
|
|
; F16C-NEXT: retq
|
|
;
|
|
; AVX512F-LABEL: fptoui_4f16_to_4i32:
|
|
; AVX512F: # %bb.0:
|
|
; AVX512F-NEXT: vcvtph2ps %xmm0, %xmm0
|
|
; AVX512F-NEXT: vcvttps2udq %zmm0, %zmm0
|
|
; AVX512F-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
|
|
; AVX512F-NEXT: vzeroupper
|
|
; AVX512F-NEXT: retq
|
|
;
|
|
; AVX512-FASTLANE-LABEL: fptoui_4f16_to_4i32:
|
|
; AVX512-FASTLANE: # %bb.0:
|
|
; AVX512-FASTLANE-NEXT: vcvtph2ps %xmm0, %xmm0
|
|
; AVX512-FASTLANE-NEXT: vcvttps2udq %xmm0, %xmm0
|
|
; AVX512-FASTLANE-NEXT: retq
|
|
%cvt = fptoui <4 x half> %a to <4 x i32>
|
|
ret <4 x i32> %cvt
|
|
}
|