[X86] combineConcatVectorOps - attempt to recursively call combineConcatVectorOps from inside ConcatSubOperand. (#131303)

Before falling back to creating a generic ISD::CONCAT_VECTORS node, see if we can directly concat the subvectors if we peek through any bitcasts.
This commit is contained in:
Simon Pilgrim
2025-03-14 12:09:54 +00:00
committed by GitHub
parent 80079c9c2f
commit 73e93ec3a2
6 changed files with 1621 additions and 1636 deletions

View File

@@ -57937,11 +57937,14 @@ static SDValue combineConcatVectorOps(const SDLoc &DL, MVT VT,
// Attempt to peek through bitcasts and concat the original subvectors.
EVT SubVT = peekThroughBitcasts(Subs[0]).getValueType();
if (SubVT.isSimple() && SubVT.isVector()) {
EVT ConcatVT =
EVT::getVectorVT(Ctx, SubVT.getScalarType(),
MVT ConcatVT =
MVT::getVectorVT(SubVT.getSimpleVT().getScalarType(),
SubVT.getVectorElementCount() * Subs.size());
for (SDValue &Sub : Subs)
Sub = DAG.getBitcast(SubVT, Sub);
if (SDValue ConcatSrc = combineConcatVectorOps(DL, ConcatVT, Subs, DAG,
Subtarget, Depth + 1))
return DAG.getBitcast(VT, ConcatSrc);
return DAG.getBitcast(
VT, DAG.getNode(ISD::CONCAT_VECTORS, DL, ConcatVT, Subs));
}

View File

@@ -1653,305 +1653,305 @@ define void @store_i16_stride3_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.ve
;
; AVX512-LABEL: store_i16_stride3_vf32:
; AVX512: # %bb.0:
; AVX512-NEXT: vmovdqa (%rsi), %xmm0
; AVX512-NEXT: vmovdqa 16(%rsi), %xmm3
; AVX512-NEXT: vmovdqa 32(%rsi), %xmm2
; AVX512-NEXT: vprold $16, %xmm2, %xmm4
; AVX512-NEXT: vmovdqa (%rdi), %xmm1
; AVX512-NEXT: vmovdqa 16(%rdi), %xmm5
; AVX512-NEXT: vmovdqa 32(%rdi), %xmm6
; AVX512-NEXT: vpshufd {{.*#+}} xmm7 = xmm6[1,1,2,2]
; AVX512-NEXT: vpblendw {{.*#+}} xmm4 = xmm7[0,1],xmm4[2],xmm7[3,4],xmm4[5],xmm7[6,7]
; AVX512-NEXT: vpunpcklwd {{.*#+}} xmm6 = xmm6[0],xmm2[0],xmm6[1],xmm2[1],xmm6[2],xmm2[2],xmm6[3],xmm2[3]
; AVX512-NEXT: vmovdqa {{.*#+}} xmm2 = [0,1,2,3,4,5,4,5,6,7,10,11,8,9,10,11]
; AVX512-NEXT: vpshufb %xmm2, %xmm6, %xmm6
; AVX512-NEXT: vinserti128 $1, %xmm4, %ymm6, %ymm4
; AVX512-NEXT: vpunpckhwd {{.*#+}} xmm6 = xmm3[4],xmm5[4],xmm3[5],xmm5[5],xmm3[6],xmm5[6],xmm3[7],xmm5[7]
; AVX512-NEXT: vmovdqa {{.*#+}} xmm7 = [4,5,10,11,10,11,8,9,8,9,14,15,12,13,14,15]
; AVX512-NEXT: vpshufb %xmm7, %xmm6, %xmm6
; AVX512-NEXT: vprold $16, %xmm3, %xmm3
; AVX512-NEXT: vpshufd {{.*#+}} xmm5 = xmm5[1,1,2,2]
; AVX512-NEXT: vpblendw {{.*#+}} xmm3 = xmm5[0,1],xmm3[2],xmm5[3,4],xmm3[5],xmm5[6,7]
; AVX512-NEXT: vinserti128 $1, %xmm6, %ymm3, %ymm3
; AVX512-NEXT: vshufi64x2 {{.*#+}} zmm4 = zmm3[0,1,2,3],zmm4[0,1,2,3]
; AVX512-NEXT: vmovdqa (%rdx), %ymm3
; AVX512-NEXT: vmovdqa 32(%rdx), %ymm5
; AVX512-NEXT: vpmovsxbd {{.*#+}} zmm6 = [21,21,0,22,22,0,23,23,0,0,0,0,1,1,0,2]
; AVX512-NEXT: vpermi2d (%rdx), %zmm5, %zmm6
; AVX512-NEXT: vpternlogq {{.*#+}} zmm6 = zmm6 ^ (mem & (zmm6 ^ zmm4))
; AVX512-NEXT: vmovdqa 32(%rdi), %ymm4
; AVX512-NEXT: vmovdqa {{.*#+}} ymm8 = [128,128,128,128,12,13,128,128,128,128,14,15,128,128,128,128,16,17,128,128,128,128,18,19,128,128,128,128,20,21,128,128]
; AVX512-NEXT: vpshufb %ymm8, %ymm4, %ymm4
; AVX512-NEXT: vmovdqa 32(%rsi), %ymm9
; AVX512-NEXT: vmovdqa {{.*#+}} ymm10 = [10,11,0,1,128,128,12,13,2,3,128,128,14,15,4,5,128,128,16,17,28,29,128,128,18,19,18,19,128,128,20,21]
; AVX512-NEXT: vpshufb %ymm10, %ymm9, %ymm9
; AVX512-NEXT: vpor %ymm4, %ymm9, %ymm4
; AVX512-NEXT: vmovdqa 48(%rdi), %xmm9
; AVX512-NEXT: vmovdqa 48(%rsi), %xmm11
; AVX512-NEXT: vpunpckhwd {{.*#+}} xmm12 = xmm11[4],xmm9[4],xmm11[5],xmm9[5],xmm11[6],xmm9[6],xmm11[7],xmm9[7]
; AVX512-NEXT: vpshufb %xmm7, %xmm12, %xmm7
; AVX512-NEXT: vprold $16, %xmm11, %xmm11
; AVX512-NEXT: vpshufd {{.*#+}} xmm9 = xmm9[1,1,2,2]
; AVX512-NEXT: vpblendw {{.*#+}} xmm9 = xmm9[0,1],xmm11[2],xmm9[3,4],xmm11[5],xmm9[6,7]
; AVX512-NEXT: vinserti128 $1, %xmm7, %ymm9, %ymm7
; AVX512-NEXT: vshufi64x2 {{.*#+}} zmm4 = zmm4[0,1,2,3],zmm7[0,1,2,3]
; AVX512-NEXT: vmovdqa {{.*#+}} ymm7 = [128,128,10,11,128,128,128,128,12,13,128,128,128,128,14,15,128,128,128,128,16,17,128,128,128,128,18,19,128,128,128,128]
; AVX512-NEXT: vpshufb %ymm7, %ymm5, %ymm9
; AVX512-NEXT: vpmovsxbd {{.*#+}} ymm11 = [5,5,0,6,6,0,7,7]
; AVX512-NEXT: vpermd %ymm5, %ymm11, %ymm5
; AVX512-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm5, %ymm5
; AVX512-NEXT: vinserti64x4 $1, %ymm5, %zmm9, %zmm5
; AVX512-NEXT: vpternlogq {{.*#+}} zmm5 = zmm5 | (zmm4 & mem)
; AVX512-NEXT: vmovdqa (%rdi), %ymm4
; AVX512-NEXT: vpshufb %ymm8, %ymm4, %ymm4
; AVX512-NEXT: vmovdqa (%rsi), %ymm8
; AVX512-NEXT: vpshufb %ymm10, %ymm8, %ymm8
; AVX512-NEXT: vpor %ymm4, %ymm8, %ymm4
; AVX512-NEXT: vprold $16, %xmm0, %xmm8
; AVX512-NEXT: vpshufd {{.*#+}} xmm9 = xmm1[1,1,2,2]
; AVX512-NEXT: vpblendw {{.*#+}} xmm8 = xmm9[0,1],xmm8[2],xmm9[3,4],xmm8[5],xmm9[6,7]
; AVX512-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
; AVX512-NEXT: vpshufb %xmm2, %xmm0, %xmm0
; AVX512-NEXT: vinserti128 $1, %xmm8, %ymm0, %ymm0
; AVX512-NEXT: vshufi64x2 {{.*#+}} zmm0 = zmm0[0,1,2,3],zmm4[0,1,2,3]
; AVX512-NEXT: vpshufb %ymm7, %ymm3, %ymm1
; AVX512-NEXT: vpmovsxbd {{.*#+}} ymm2 = [0,0,0,0,1,1,0,2]
; AVX512-NEXT: vpermd %ymm3, %ymm2, %ymm2
; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm3 = [65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535]
; AVX512-NEXT: vpandn %ymm2, %ymm3, %ymm2
; AVX512-NEXT: vinserti64x4 $1, %ymm1, %zmm2, %zmm1
; AVX512-NEXT: vpternlogq {{.*#+}} zmm1 = zmm1 | (zmm0 & zmm3)
; AVX512-NEXT: vmovdqa64 %zmm1, (%rcx)
; AVX512-NEXT: vmovdqa64 %zmm5, 128(%rcx)
; AVX512-NEXT: vmovdqa64 %zmm6, 64(%rcx)
; AVX512-NEXT: vmovdqa64 (%rdx), %zmm0
; AVX512-NEXT: vmovdqa (%rsi), %xmm1
; AVX512-NEXT: vmovdqa 16(%rsi), %xmm4
; AVX512-NEXT: vmovdqa 32(%rsi), %xmm3
; AVX512-NEXT: vprold $16, %xmm3, %xmm5
; AVX512-NEXT: vmovdqa (%rdi), %xmm2
; AVX512-NEXT: vmovdqa 16(%rdi), %xmm6
; AVX512-NEXT: vmovdqa 32(%rdi), %xmm7
; AVX512-NEXT: vpshufd {{.*#+}} xmm8 = xmm7[1,1,2,2]
; AVX512-NEXT: vpblendw {{.*#+}} xmm5 = xmm8[0,1],xmm5[2],xmm8[3,4],xmm5[5],xmm8[6,7]
; AVX512-NEXT: vpunpcklwd {{.*#+}} xmm7 = xmm7[0],xmm3[0],xmm7[1],xmm3[1],xmm7[2],xmm3[2],xmm7[3],xmm3[3]
; AVX512-NEXT: vmovdqa {{.*#+}} xmm3 = [0,1,2,3,4,5,4,5,6,7,10,11,8,9,10,11]
; AVX512-NEXT: vpshufb %xmm3, %xmm7, %xmm7
; AVX512-NEXT: vinserti128 $1, %xmm5, %ymm7, %ymm5
; AVX512-NEXT: vpunpckhwd {{.*#+}} xmm7 = xmm4[4],xmm6[4],xmm4[5],xmm6[5],xmm4[6],xmm6[6],xmm4[7],xmm6[7]
; AVX512-NEXT: vmovdqa {{.*#+}} xmm8 = [4,5,10,11,10,11,8,9,8,9,14,15,12,13,14,15]
; AVX512-NEXT: vpshufb %xmm8, %xmm7, %xmm7
; AVX512-NEXT: vprold $16, %xmm4, %xmm4
; AVX512-NEXT: vpshufd {{.*#+}} xmm6 = xmm6[1,1,2,2]
; AVX512-NEXT: vpblendw {{.*#+}} xmm4 = xmm6[0,1],xmm4[2],xmm6[3,4],xmm4[5],xmm6[6,7]
; AVX512-NEXT: vinserti128 $1, %xmm7, %ymm4, %ymm4
; AVX512-NEXT: vshufi64x2 {{.*#+}} zmm5 = zmm4[0,1,2,3],zmm5[0,1,2,3]
; AVX512-NEXT: vmovdqa (%rdx), %ymm4
; AVX512-NEXT: vmovdqa 32(%rdx), %ymm6
; AVX512-NEXT: vpmovsxbd {{.*#+}} zmm7 = [5,5,0,6,6,0,7,7,0,16,16,0,17,17,0,18]
; AVX512-NEXT: vpermt2d %zmm6, %zmm7, %zmm0
; AVX512-NEXT: vpternlogq {{.*#+}} zmm0 = zmm0 ^ (mem & (zmm0 ^ zmm5))
; AVX512-NEXT: vmovdqa 32(%rdi), %ymm5
; AVX512-NEXT: vmovdqa {{.*#+}} ymm9 = [128,128,128,128,12,13,128,128,128,128,14,15,128,128,128,128,16,17,128,128,128,128,18,19,128,128,128,128,20,21,128,128]
; AVX512-NEXT: vpshufb %ymm9, %ymm5, %ymm5
; AVX512-NEXT: vmovdqa 32(%rsi), %ymm10
; AVX512-NEXT: vmovdqa {{.*#+}} ymm11 = [10,11,0,1,128,128,12,13,2,3,128,128,14,15,4,5,128,128,16,17,28,29,128,128,18,19,18,19,128,128,20,21]
; AVX512-NEXT: vpshufb %ymm11, %ymm10, %ymm10
; AVX512-NEXT: vpor %ymm5, %ymm10, %ymm5
; AVX512-NEXT: vmovdqa 48(%rdi), %xmm10
; AVX512-NEXT: vmovdqa 48(%rsi), %xmm12
; AVX512-NEXT: vpunpckhwd {{.*#+}} xmm13 = xmm12[4],xmm10[4],xmm12[5],xmm10[5],xmm12[6],xmm10[6],xmm12[7],xmm10[7]
; AVX512-NEXT: vpshufb %xmm8, %xmm13, %xmm8
; AVX512-NEXT: vprold $16, %xmm12, %xmm12
; AVX512-NEXT: vpshufd {{.*#+}} xmm10 = xmm10[1,1,2,2]
; AVX512-NEXT: vpblendw {{.*#+}} xmm10 = xmm10[0,1],xmm12[2],xmm10[3,4],xmm12[5],xmm10[6,7]
; AVX512-NEXT: vinserti128 $1, %xmm8, %ymm10, %ymm8
; AVX512-NEXT: vshufi64x2 {{.*#+}} zmm5 = zmm5[0,1,2,3],zmm8[0,1,2,3]
; AVX512-NEXT: vmovdqa {{.*#+}} ymm8 = [128,128,10,11,128,128,128,128,12,13,128,128,128,128,14,15,128,128,128,128,16,17,128,128,128,128,18,19,128,128,128,128]
; AVX512-NEXT: vpshufb %ymm8, %ymm6, %ymm10
; AVX512-NEXT: vpermd %ymm6, %ymm7, %ymm6
; AVX512-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm6, %ymm6
; AVX512-NEXT: vinserti64x4 $1, %ymm6, %zmm10, %zmm6
; AVX512-NEXT: vpternlogq {{.*#+}} zmm6 = zmm6 | (zmm5 & mem)
; AVX512-NEXT: vmovdqa (%rdi), %ymm5
; AVX512-NEXT: vpshufb %ymm9, %ymm5, %ymm5
; AVX512-NEXT: vmovdqa (%rsi), %ymm7
; AVX512-NEXT: vpshufb %ymm11, %ymm7, %ymm7
; AVX512-NEXT: vpor %ymm5, %ymm7, %ymm5
; AVX512-NEXT: vprold $16, %xmm1, %xmm7
; AVX512-NEXT: vpshufd {{.*#+}} xmm9 = xmm2[1,1,2,2]
; AVX512-NEXT: vpblendw {{.*#+}} xmm7 = xmm9[0,1],xmm7[2],xmm9[3,4],xmm7[5],xmm9[6,7]
; AVX512-NEXT: vpunpcklwd {{.*#+}} xmm1 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3]
; AVX512-NEXT: vpshufb %xmm3, %xmm1, %xmm1
; AVX512-NEXT: vinserti128 $1, %xmm7, %ymm1, %ymm1
; AVX512-NEXT: vshufi64x2 {{.*#+}} zmm1 = zmm1[0,1,2,3],zmm5[0,1,2,3]
; AVX512-NEXT: vpshufb %ymm8, %ymm4, %ymm2
; AVX512-NEXT: vpmovsxbd {{.*#+}} ymm3 = [0,0,0,0,1,1,0,2]
; AVX512-NEXT: vpermd %ymm4, %ymm3, %ymm3
; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm4 = [65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535]
; AVX512-NEXT: vpandn %ymm3, %ymm4, %ymm3
; AVX512-NEXT: vinserti64x4 $1, %ymm2, %zmm3, %zmm2
; AVX512-NEXT: vpternlogq {{.*#+}} zmm2 = zmm2 | (zmm1 & zmm4)
; AVX512-NEXT: vmovdqa64 %zmm2, (%rcx)
; AVX512-NEXT: vmovdqa64 %zmm6, 128(%rcx)
; AVX512-NEXT: vmovdqa64 %zmm0, 64(%rcx)
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
;
; AVX512-FCP-LABEL: store_i16_stride3_vf32:
; AVX512-FCP: # %bb.0:
; AVX512-FCP-NEXT: vmovdqa (%rsi), %xmm0
; AVX512-FCP-NEXT: vmovdqa 16(%rsi), %xmm3
; AVX512-FCP-NEXT: vmovdqa 32(%rsi), %xmm2
; AVX512-FCP-NEXT: vprold $16, %xmm2, %xmm4
; AVX512-FCP-NEXT: vmovdqa (%rdi), %xmm1
; AVX512-FCP-NEXT: vmovdqa 16(%rdi), %xmm5
; AVX512-FCP-NEXT: vmovdqa 32(%rdi), %xmm6
; AVX512-FCP-NEXT: vpshufd {{.*#+}} xmm7 = xmm6[1,1,2,2]
; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm4 = xmm7[0,1],xmm4[2],xmm7[3,4],xmm4[5],xmm7[6,7]
; AVX512-FCP-NEXT: vpunpcklwd {{.*#+}} xmm6 = xmm6[0],xmm2[0],xmm6[1],xmm2[1],xmm6[2],xmm2[2],xmm6[3],xmm2[3]
; AVX512-FCP-NEXT: vmovdqa {{.*#+}} xmm2 = [0,1,2,3,4,5,4,5,6,7,10,11,8,9,10,11]
; AVX512-FCP-NEXT: vpshufb %xmm2, %xmm6, %xmm6
; AVX512-FCP-NEXT: vinserti128 $1, %xmm4, %ymm6, %ymm4
; AVX512-FCP-NEXT: vpunpckhwd {{.*#+}} xmm6 = xmm3[4],xmm5[4],xmm3[5],xmm5[5],xmm3[6],xmm5[6],xmm3[7],xmm5[7]
; AVX512-FCP-NEXT: vmovdqa {{.*#+}} xmm7 = [4,5,10,11,10,11,8,9,8,9,14,15,12,13,14,15]
; AVX512-FCP-NEXT: vpshufb %xmm7, %xmm6, %xmm6
; AVX512-FCP-NEXT: vprold $16, %xmm3, %xmm3
; AVX512-FCP-NEXT: vpshufd {{.*#+}} xmm5 = xmm5[1,1,2,2]
; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm3 = xmm5[0,1],xmm3[2],xmm5[3,4],xmm3[5],xmm5[6,7]
; AVX512-FCP-NEXT: vinserti128 $1, %xmm6, %ymm3, %ymm3
; AVX512-FCP-NEXT: vshufi64x2 {{.*#+}} zmm4 = zmm3[0,1,2,3],zmm4[0,1,2,3]
; AVX512-FCP-NEXT: vmovdqa (%rdx), %ymm3
; AVX512-FCP-NEXT: vmovdqa 32(%rdx), %ymm5
; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} zmm6 = [21,21,0,22,22,0,23,23,0,0,0,0,1,1,0,2]
; AVX512-FCP-NEXT: vpermi2d (%rdx), %zmm5, %zmm6
; AVX512-FCP-NEXT: vpternlogq {{.*#+}} zmm6 = zmm6 ^ (mem & (zmm6 ^ zmm4))
; AVX512-FCP-NEXT: vmovdqa 32(%rdi), %ymm4
; AVX512-FCP-NEXT: vmovdqa {{.*#+}} ymm8 = [128,128,128,128,12,13,128,128,128,128,14,15,128,128,128,128,16,17,128,128,128,128,18,19,128,128,128,128,20,21,128,128]
; AVX512-FCP-NEXT: vpshufb %ymm8, %ymm4, %ymm4
; AVX512-FCP-NEXT: vmovdqa 32(%rsi), %ymm9
; AVX512-FCP-NEXT: vmovdqa {{.*#+}} ymm10 = [10,11,0,1,128,128,12,13,2,3,128,128,14,15,4,5,128,128,16,17,28,29,128,128,18,19,18,19,128,128,20,21]
; AVX512-FCP-NEXT: vpshufb %ymm10, %ymm9, %ymm9
; AVX512-FCP-NEXT: vpor %ymm4, %ymm9, %ymm4
; AVX512-FCP-NEXT: vmovdqa 48(%rdi), %xmm9
; AVX512-FCP-NEXT: vmovdqa 48(%rsi), %xmm11
; AVX512-FCP-NEXT: vpunpckhwd {{.*#+}} xmm12 = xmm11[4],xmm9[4],xmm11[5],xmm9[5],xmm11[6],xmm9[6],xmm11[7],xmm9[7]
; AVX512-FCP-NEXT: vpshufb %xmm7, %xmm12, %xmm7
; AVX512-FCP-NEXT: vprold $16, %xmm11, %xmm11
; AVX512-FCP-NEXT: vpshufd {{.*#+}} xmm9 = xmm9[1,1,2,2]
; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm9 = xmm9[0,1],xmm11[2],xmm9[3,4],xmm11[5],xmm9[6,7]
; AVX512-FCP-NEXT: vinserti128 $1, %xmm7, %ymm9, %ymm7
; AVX512-FCP-NEXT: vshufi64x2 {{.*#+}} zmm4 = zmm4[0,1,2,3],zmm7[0,1,2,3]
; AVX512-FCP-NEXT: vmovdqa {{.*#+}} ymm7 = [128,128,10,11,128,128,128,128,12,13,128,128,128,128,14,15,128,128,128,128,16,17,128,128,128,128,18,19,128,128,128,128]
; AVX512-FCP-NEXT: vpshufb %ymm7, %ymm5, %ymm9
; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} ymm11 = [5,5,0,6,6,0,7,7]
; AVX512-FCP-NEXT: vpermd %ymm5, %ymm11, %ymm5
; AVX512-FCP-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm5, %ymm5
; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm5, %zmm9, %zmm5
; AVX512-FCP-NEXT: vpternlogq {{.*#+}} zmm5 = zmm5 | (zmm4 & mem)
; AVX512-FCP-NEXT: vmovdqa (%rdi), %ymm4
; AVX512-FCP-NEXT: vpshufb %ymm8, %ymm4, %ymm4
; AVX512-FCP-NEXT: vmovdqa (%rsi), %ymm8
; AVX512-FCP-NEXT: vpshufb %ymm10, %ymm8, %ymm8
; AVX512-FCP-NEXT: vpor %ymm4, %ymm8, %ymm4
; AVX512-FCP-NEXT: vprold $16, %xmm0, %xmm8
; AVX512-FCP-NEXT: vpshufd {{.*#+}} xmm9 = xmm1[1,1,2,2]
; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm8 = xmm9[0,1],xmm8[2],xmm9[3,4],xmm8[5],xmm9[6,7]
; AVX512-FCP-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
; AVX512-FCP-NEXT: vpshufb %xmm2, %xmm0, %xmm0
; AVX512-FCP-NEXT: vinserti128 $1, %xmm8, %ymm0, %ymm0
; AVX512-FCP-NEXT: vshufi64x2 {{.*#+}} zmm0 = zmm0[0,1,2,3],zmm4[0,1,2,3]
; AVX512-FCP-NEXT: vpshufb %ymm7, %ymm3, %ymm1
; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} ymm2 = [0,0,0,0,1,1,0,2]
; AVX512-FCP-NEXT: vpermd %ymm3, %ymm2, %ymm2
; AVX512-FCP-NEXT: vmovdqa64 {{.*#+}} zmm3 = [65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535]
; AVX512-FCP-NEXT: vpandn %ymm2, %ymm3, %ymm2
; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm1, %zmm2, %zmm1
; AVX512-FCP-NEXT: vpternlogq {{.*#+}} zmm1 = zmm1 | (zmm0 & zmm3)
; AVX512-FCP-NEXT: vmovdqa64 %zmm1, (%rcx)
; AVX512-FCP-NEXT: vmovdqa64 %zmm5, 128(%rcx)
; AVX512-FCP-NEXT: vmovdqa64 %zmm6, 64(%rcx)
; AVX512-FCP-NEXT: vmovdqa64 (%rdx), %zmm0
; AVX512-FCP-NEXT: vmovdqa (%rsi), %xmm1
; AVX512-FCP-NEXT: vmovdqa 16(%rsi), %xmm4
; AVX512-FCP-NEXT: vmovdqa 32(%rsi), %xmm3
; AVX512-FCP-NEXT: vprold $16, %xmm3, %xmm5
; AVX512-FCP-NEXT: vmovdqa (%rdi), %xmm2
; AVX512-FCP-NEXT: vmovdqa 16(%rdi), %xmm6
; AVX512-FCP-NEXT: vmovdqa 32(%rdi), %xmm7
; AVX512-FCP-NEXT: vpshufd {{.*#+}} xmm8 = xmm7[1,1,2,2]
; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm5 = xmm8[0,1],xmm5[2],xmm8[3,4],xmm5[5],xmm8[6,7]
; AVX512-FCP-NEXT: vpunpcklwd {{.*#+}} xmm7 = xmm7[0],xmm3[0],xmm7[1],xmm3[1],xmm7[2],xmm3[2],xmm7[3],xmm3[3]
; AVX512-FCP-NEXT: vmovdqa {{.*#+}} xmm3 = [0,1,2,3,4,5,4,5,6,7,10,11,8,9,10,11]
; AVX512-FCP-NEXT: vpshufb %xmm3, %xmm7, %xmm7
; AVX512-FCP-NEXT: vinserti128 $1, %xmm5, %ymm7, %ymm5
; AVX512-FCP-NEXT: vpunpckhwd {{.*#+}} xmm7 = xmm4[4],xmm6[4],xmm4[5],xmm6[5],xmm4[6],xmm6[6],xmm4[7],xmm6[7]
; AVX512-FCP-NEXT: vmovdqa {{.*#+}} xmm8 = [4,5,10,11,10,11,8,9,8,9,14,15,12,13,14,15]
; AVX512-FCP-NEXT: vpshufb %xmm8, %xmm7, %xmm7
; AVX512-FCP-NEXT: vprold $16, %xmm4, %xmm4
; AVX512-FCP-NEXT: vpshufd {{.*#+}} xmm6 = xmm6[1,1,2,2]
; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm4 = xmm6[0,1],xmm4[2],xmm6[3,4],xmm4[5],xmm6[6,7]
; AVX512-FCP-NEXT: vinserti128 $1, %xmm7, %ymm4, %ymm4
; AVX512-FCP-NEXT: vshufi64x2 {{.*#+}} zmm5 = zmm4[0,1,2,3],zmm5[0,1,2,3]
; AVX512-FCP-NEXT: vmovdqa (%rdx), %ymm4
; AVX512-FCP-NEXT: vmovdqa 32(%rdx), %ymm6
; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} zmm7 = [5,5,0,6,6,0,7,7,0,16,16,0,17,17,0,18]
; AVX512-FCP-NEXT: vpermt2d %zmm6, %zmm7, %zmm0
; AVX512-FCP-NEXT: vpternlogq {{.*#+}} zmm0 = zmm0 ^ (mem & (zmm0 ^ zmm5))
; AVX512-FCP-NEXT: vmovdqa 32(%rdi), %ymm5
; AVX512-FCP-NEXT: vmovdqa {{.*#+}} ymm9 = [128,128,128,128,12,13,128,128,128,128,14,15,128,128,128,128,16,17,128,128,128,128,18,19,128,128,128,128,20,21,128,128]
; AVX512-FCP-NEXT: vpshufb %ymm9, %ymm5, %ymm5
; AVX512-FCP-NEXT: vmovdqa 32(%rsi), %ymm10
; AVX512-FCP-NEXT: vmovdqa {{.*#+}} ymm11 = [10,11,0,1,128,128,12,13,2,3,128,128,14,15,4,5,128,128,16,17,28,29,128,128,18,19,18,19,128,128,20,21]
; AVX512-FCP-NEXT: vpshufb %ymm11, %ymm10, %ymm10
; AVX512-FCP-NEXT: vpor %ymm5, %ymm10, %ymm5
; AVX512-FCP-NEXT: vmovdqa 48(%rdi), %xmm10
; AVX512-FCP-NEXT: vmovdqa 48(%rsi), %xmm12
; AVX512-FCP-NEXT: vpunpckhwd {{.*#+}} xmm13 = xmm12[4],xmm10[4],xmm12[5],xmm10[5],xmm12[6],xmm10[6],xmm12[7],xmm10[7]
; AVX512-FCP-NEXT: vpshufb %xmm8, %xmm13, %xmm8
; AVX512-FCP-NEXT: vprold $16, %xmm12, %xmm12
; AVX512-FCP-NEXT: vpshufd {{.*#+}} xmm10 = xmm10[1,1,2,2]
; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm10 = xmm10[0,1],xmm12[2],xmm10[3,4],xmm12[5],xmm10[6,7]
; AVX512-FCP-NEXT: vinserti128 $1, %xmm8, %ymm10, %ymm8
; AVX512-FCP-NEXT: vshufi64x2 {{.*#+}} zmm5 = zmm5[0,1,2,3],zmm8[0,1,2,3]
; AVX512-FCP-NEXT: vmovdqa {{.*#+}} ymm8 = [128,128,10,11,128,128,128,128,12,13,128,128,128,128,14,15,128,128,128,128,16,17,128,128,128,128,18,19,128,128,128,128]
; AVX512-FCP-NEXT: vpshufb %ymm8, %ymm6, %ymm10
; AVX512-FCP-NEXT: vpermd %ymm6, %ymm7, %ymm6
; AVX512-FCP-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm6, %ymm6
; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm6, %zmm10, %zmm6
; AVX512-FCP-NEXT: vpternlogq {{.*#+}} zmm6 = zmm6 | (zmm5 & mem)
; AVX512-FCP-NEXT: vmovdqa (%rdi), %ymm5
; AVX512-FCP-NEXT: vpshufb %ymm9, %ymm5, %ymm5
; AVX512-FCP-NEXT: vmovdqa (%rsi), %ymm7
; AVX512-FCP-NEXT: vpshufb %ymm11, %ymm7, %ymm7
; AVX512-FCP-NEXT: vpor %ymm5, %ymm7, %ymm5
; AVX512-FCP-NEXT: vprold $16, %xmm1, %xmm7
; AVX512-FCP-NEXT: vpshufd {{.*#+}} xmm9 = xmm2[1,1,2,2]
; AVX512-FCP-NEXT: vpblendw {{.*#+}} xmm7 = xmm9[0,1],xmm7[2],xmm9[3,4],xmm7[5],xmm9[6,7]
; AVX512-FCP-NEXT: vpunpcklwd {{.*#+}} xmm1 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3]
; AVX512-FCP-NEXT: vpshufb %xmm3, %xmm1, %xmm1
; AVX512-FCP-NEXT: vinserti128 $1, %xmm7, %ymm1, %ymm1
; AVX512-FCP-NEXT: vshufi64x2 {{.*#+}} zmm1 = zmm1[0,1,2,3],zmm5[0,1,2,3]
; AVX512-FCP-NEXT: vpshufb %ymm8, %ymm4, %ymm2
; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} ymm3 = [0,0,0,0,1,1,0,2]
; AVX512-FCP-NEXT: vpermd %ymm4, %ymm3, %ymm3
; AVX512-FCP-NEXT: vmovdqa64 {{.*#+}} zmm4 = [65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535]
; AVX512-FCP-NEXT: vpandn %ymm3, %ymm4, %ymm3
; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm2, %zmm3, %zmm2
; AVX512-FCP-NEXT: vpternlogq {{.*#+}} zmm2 = zmm2 | (zmm1 & zmm4)
; AVX512-FCP-NEXT: vmovdqa64 %zmm2, (%rcx)
; AVX512-FCP-NEXT: vmovdqa64 %zmm6, 128(%rcx)
; AVX512-FCP-NEXT: vmovdqa64 %zmm0, 64(%rcx)
; AVX512-FCP-NEXT: vzeroupper
; AVX512-FCP-NEXT: retq
;
; AVX512DQ-LABEL: store_i16_stride3_vf32:
; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: vmovdqa (%rsi), %xmm0
; AVX512DQ-NEXT: vmovdqa 16(%rsi), %xmm3
; AVX512DQ-NEXT: vmovdqa 32(%rsi), %xmm2
; AVX512DQ-NEXT: vprold $16, %xmm2, %xmm4
; AVX512DQ-NEXT: vmovdqa (%rdi), %xmm1
; AVX512DQ-NEXT: vmovdqa 16(%rdi), %xmm5
; AVX512DQ-NEXT: vmovdqa 32(%rdi), %xmm6
; AVX512DQ-NEXT: vpshufd {{.*#+}} xmm7 = xmm6[1,1,2,2]
; AVX512DQ-NEXT: vpblendw {{.*#+}} xmm4 = xmm7[0,1],xmm4[2],xmm7[3,4],xmm4[5],xmm7[6,7]
; AVX512DQ-NEXT: vpunpcklwd {{.*#+}} xmm6 = xmm6[0],xmm2[0],xmm6[1],xmm2[1],xmm6[2],xmm2[2],xmm6[3],xmm2[3]
; AVX512DQ-NEXT: vmovdqa {{.*#+}} xmm2 = [0,1,2,3,4,5,4,5,6,7,10,11,8,9,10,11]
; AVX512DQ-NEXT: vpshufb %xmm2, %xmm6, %xmm6
; AVX512DQ-NEXT: vinserti128 $1, %xmm4, %ymm6, %ymm4
; AVX512DQ-NEXT: vpunpckhwd {{.*#+}} xmm6 = xmm3[4],xmm5[4],xmm3[5],xmm5[5],xmm3[6],xmm5[6],xmm3[7],xmm5[7]
; AVX512DQ-NEXT: vmovdqa {{.*#+}} xmm7 = [4,5,10,11,10,11,8,9,8,9,14,15,12,13,14,15]
; AVX512DQ-NEXT: vpshufb %xmm7, %xmm6, %xmm6
; AVX512DQ-NEXT: vprold $16, %xmm3, %xmm3
; AVX512DQ-NEXT: vpshufd {{.*#+}} xmm5 = xmm5[1,1,2,2]
; AVX512DQ-NEXT: vpblendw {{.*#+}} xmm3 = xmm5[0,1],xmm3[2],xmm5[3,4],xmm3[5],xmm5[6,7]
; AVX512DQ-NEXT: vinserti128 $1, %xmm6, %ymm3, %ymm3
; AVX512DQ-NEXT: vshufi64x2 {{.*#+}} zmm4 = zmm3[0,1,2,3],zmm4[0,1,2,3]
; AVX512DQ-NEXT: vmovdqa (%rdx), %ymm3
; AVX512DQ-NEXT: vmovdqa 32(%rdx), %ymm5
; AVX512DQ-NEXT: vpmovsxbd {{.*#+}} zmm6 = [21,21,0,22,22,0,23,23,0,0,0,0,1,1,0,2]
; AVX512DQ-NEXT: vpermi2d (%rdx), %zmm5, %zmm6
; AVX512DQ-NEXT: vpternlogq {{.*#+}} zmm6 = zmm6 ^ (mem & (zmm6 ^ zmm4))
; AVX512DQ-NEXT: vmovdqa 32(%rdi), %ymm4
; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm8 = [128,128,128,128,12,13,128,128,128,128,14,15,128,128,128,128,16,17,128,128,128,128,18,19,128,128,128,128,20,21,128,128]
; AVX512DQ-NEXT: vpshufb %ymm8, %ymm4, %ymm4
; AVX512DQ-NEXT: vmovdqa 32(%rsi), %ymm9
; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm10 = [10,11,0,1,128,128,12,13,2,3,128,128,14,15,4,5,128,128,16,17,28,29,128,128,18,19,18,19,128,128,20,21]
; AVX512DQ-NEXT: vpshufb %ymm10, %ymm9, %ymm9
; AVX512DQ-NEXT: vpor %ymm4, %ymm9, %ymm4
; AVX512DQ-NEXT: vmovdqa 48(%rdi), %xmm9
; AVX512DQ-NEXT: vmovdqa 48(%rsi), %xmm11
; AVX512DQ-NEXT: vpunpckhwd {{.*#+}} xmm12 = xmm11[4],xmm9[4],xmm11[5],xmm9[5],xmm11[6],xmm9[6],xmm11[7],xmm9[7]
; AVX512DQ-NEXT: vpshufb %xmm7, %xmm12, %xmm7
; AVX512DQ-NEXT: vprold $16, %xmm11, %xmm11
; AVX512DQ-NEXT: vpshufd {{.*#+}} xmm9 = xmm9[1,1,2,2]
; AVX512DQ-NEXT: vpblendw {{.*#+}} xmm9 = xmm9[0,1],xmm11[2],xmm9[3,4],xmm11[5],xmm9[6,7]
; AVX512DQ-NEXT: vinserti128 $1, %xmm7, %ymm9, %ymm7
; AVX512DQ-NEXT: vshufi64x2 {{.*#+}} zmm4 = zmm4[0,1,2,3],zmm7[0,1,2,3]
; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm7 = [128,128,10,11,128,128,128,128,12,13,128,128,128,128,14,15,128,128,128,128,16,17,128,128,128,128,18,19,128,128,128,128]
; AVX512DQ-NEXT: vpshufb %ymm7, %ymm5, %ymm9
; AVX512DQ-NEXT: vpmovsxbd {{.*#+}} ymm11 = [5,5,0,6,6,0,7,7]
; AVX512DQ-NEXT: vpermd %ymm5, %ymm11, %ymm5
; AVX512DQ-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm5, %ymm5
; AVX512DQ-NEXT: vinserti64x4 $1, %ymm5, %zmm9, %zmm5
; AVX512DQ-NEXT: vpternlogq {{.*#+}} zmm5 = zmm5 | (zmm4 & mem)
; AVX512DQ-NEXT: vmovdqa (%rdi), %ymm4
; AVX512DQ-NEXT: vpshufb %ymm8, %ymm4, %ymm4
; AVX512DQ-NEXT: vmovdqa (%rsi), %ymm8
; AVX512DQ-NEXT: vpshufb %ymm10, %ymm8, %ymm8
; AVX512DQ-NEXT: vpor %ymm4, %ymm8, %ymm4
; AVX512DQ-NEXT: vprold $16, %xmm0, %xmm8
; AVX512DQ-NEXT: vpshufd {{.*#+}} xmm9 = xmm1[1,1,2,2]
; AVX512DQ-NEXT: vpblendw {{.*#+}} xmm8 = xmm9[0,1],xmm8[2],xmm9[3,4],xmm8[5],xmm9[6,7]
; AVX512DQ-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
; AVX512DQ-NEXT: vpshufb %xmm2, %xmm0, %xmm0
; AVX512DQ-NEXT: vinserti128 $1, %xmm8, %ymm0, %ymm0
; AVX512DQ-NEXT: vshufi64x2 {{.*#+}} zmm0 = zmm0[0,1,2,3],zmm4[0,1,2,3]
; AVX512DQ-NEXT: vpshufb %ymm7, %ymm3, %ymm1
; AVX512DQ-NEXT: vpmovsxbd {{.*#+}} ymm2 = [0,0,0,0,1,1,0,2]
; AVX512DQ-NEXT: vpermd %ymm3, %ymm2, %ymm2
; AVX512DQ-NEXT: vmovdqa64 {{.*#+}} zmm3 = [65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535]
; AVX512DQ-NEXT: vpandn %ymm2, %ymm3, %ymm2
; AVX512DQ-NEXT: vinserti64x4 $1, %ymm1, %zmm2, %zmm1
; AVX512DQ-NEXT: vpternlogq {{.*#+}} zmm1 = zmm1 | (zmm0 & zmm3)
; AVX512DQ-NEXT: vmovdqa64 %zmm1, (%rcx)
; AVX512DQ-NEXT: vmovdqa64 %zmm5, 128(%rcx)
; AVX512DQ-NEXT: vmovdqa64 %zmm6, 64(%rcx)
; AVX512DQ-NEXT: vmovdqa64 (%rdx), %zmm0
; AVX512DQ-NEXT: vmovdqa (%rsi), %xmm1
; AVX512DQ-NEXT: vmovdqa 16(%rsi), %xmm4
; AVX512DQ-NEXT: vmovdqa 32(%rsi), %xmm3
; AVX512DQ-NEXT: vprold $16, %xmm3, %xmm5
; AVX512DQ-NEXT: vmovdqa (%rdi), %xmm2
; AVX512DQ-NEXT: vmovdqa 16(%rdi), %xmm6
; AVX512DQ-NEXT: vmovdqa 32(%rdi), %xmm7
; AVX512DQ-NEXT: vpshufd {{.*#+}} xmm8 = xmm7[1,1,2,2]
; AVX512DQ-NEXT: vpblendw {{.*#+}} xmm5 = xmm8[0,1],xmm5[2],xmm8[3,4],xmm5[5],xmm8[6,7]
; AVX512DQ-NEXT: vpunpcklwd {{.*#+}} xmm7 = xmm7[0],xmm3[0],xmm7[1],xmm3[1],xmm7[2],xmm3[2],xmm7[3],xmm3[3]
; AVX512DQ-NEXT: vmovdqa {{.*#+}} xmm3 = [0,1,2,3,4,5,4,5,6,7,10,11,8,9,10,11]
; AVX512DQ-NEXT: vpshufb %xmm3, %xmm7, %xmm7
; AVX512DQ-NEXT: vinserti128 $1, %xmm5, %ymm7, %ymm5
; AVX512DQ-NEXT: vpunpckhwd {{.*#+}} xmm7 = xmm4[4],xmm6[4],xmm4[5],xmm6[5],xmm4[6],xmm6[6],xmm4[7],xmm6[7]
; AVX512DQ-NEXT: vmovdqa {{.*#+}} xmm8 = [4,5,10,11,10,11,8,9,8,9,14,15,12,13,14,15]
; AVX512DQ-NEXT: vpshufb %xmm8, %xmm7, %xmm7
; AVX512DQ-NEXT: vprold $16, %xmm4, %xmm4
; AVX512DQ-NEXT: vpshufd {{.*#+}} xmm6 = xmm6[1,1,2,2]
; AVX512DQ-NEXT: vpblendw {{.*#+}} xmm4 = xmm6[0,1],xmm4[2],xmm6[3,4],xmm4[5],xmm6[6,7]
; AVX512DQ-NEXT: vinserti128 $1, %xmm7, %ymm4, %ymm4
; AVX512DQ-NEXT: vshufi64x2 {{.*#+}} zmm5 = zmm4[0,1,2,3],zmm5[0,1,2,3]
; AVX512DQ-NEXT: vmovdqa (%rdx), %ymm4
; AVX512DQ-NEXT: vmovdqa 32(%rdx), %ymm6
; AVX512DQ-NEXT: vpmovsxbd {{.*#+}} zmm7 = [5,5,0,6,6,0,7,7,0,16,16,0,17,17,0,18]
; AVX512DQ-NEXT: vpermt2d %zmm6, %zmm7, %zmm0
; AVX512DQ-NEXT: vpternlogq {{.*#+}} zmm0 = zmm0 ^ (mem & (zmm0 ^ zmm5))
; AVX512DQ-NEXT: vmovdqa 32(%rdi), %ymm5
; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm9 = [128,128,128,128,12,13,128,128,128,128,14,15,128,128,128,128,16,17,128,128,128,128,18,19,128,128,128,128,20,21,128,128]
; AVX512DQ-NEXT: vpshufb %ymm9, %ymm5, %ymm5
; AVX512DQ-NEXT: vmovdqa 32(%rsi), %ymm10
; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm11 = [10,11,0,1,128,128,12,13,2,3,128,128,14,15,4,5,128,128,16,17,28,29,128,128,18,19,18,19,128,128,20,21]
; AVX512DQ-NEXT: vpshufb %ymm11, %ymm10, %ymm10
; AVX512DQ-NEXT: vpor %ymm5, %ymm10, %ymm5
; AVX512DQ-NEXT: vmovdqa 48(%rdi), %xmm10
; AVX512DQ-NEXT: vmovdqa 48(%rsi), %xmm12
; AVX512DQ-NEXT: vpunpckhwd {{.*#+}} xmm13 = xmm12[4],xmm10[4],xmm12[5],xmm10[5],xmm12[6],xmm10[6],xmm12[7],xmm10[7]
; AVX512DQ-NEXT: vpshufb %xmm8, %xmm13, %xmm8
; AVX512DQ-NEXT: vprold $16, %xmm12, %xmm12
; AVX512DQ-NEXT: vpshufd {{.*#+}} xmm10 = xmm10[1,1,2,2]
; AVX512DQ-NEXT: vpblendw {{.*#+}} xmm10 = xmm10[0,1],xmm12[2],xmm10[3,4],xmm12[5],xmm10[6,7]
; AVX512DQ-NEXT: vinserti128 $1, %xmm8, %ymm10, %ymm8
; AVX512DQ-NEXT: vshufi64x2 {{.*#+}} zmm5 = zmm5[0,1,2,3],zmm8[0,1,2,3]
; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm8 = [128,128,10,11,128,128,128,128,12,13,128,128,128,128,14,15,128,128,128,128,16,17,128,128,128,128,18,19,128,128,128,128]
; AVX512DQ-NEXT: vpshufb %ymm8, %ymm6, %ymm10
; AVX512DQ-NEXT: vpermd %ymm6, %ymm7, %ymm6
; AVX512DQ-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm6, %ymm6
; AVX512DQ-NEXT: vinserti64x4 $1, %ymm6, %zmm10, %zmm6
; AVX512DQ-NEXT: vpternlogq {{.*#+}} zmm6 = zmm6 | (zmm5 & mem)
; AVX512DQ-NEXT: vmovdqa (%rdi), %ymm5
; AVX512DQ-NEXT: vpshufb %ymm9, %ymm5, %ymm5
; AVX512DQ-NEXT: vmovdqa (%rsi), %ymm7
; AVX512DQ-NEXT: vpshufb %ymm11, %ymm7, %ymm7
; AVX512DQ-NEXT: vpor %ymm5, %ymm7, %ymm5
; AVX512DQ-NEXT: vprold $16, %xmm1, %xmm7
; AVX512DQ-NEXT: vpshufd {{.*#+}} xmm9 = xmm2[1,1,2,2]
; AVX512DQ-NEXT: vpblendw {{.*#+}} xmm7 = xmm9[0,1],xmm7[2],xmm9[3,4],xmm7[5],xmm9[6,7]
; AVX512DQ-NEXT: vpunpcklwd {{.*#+}} xmm1 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3]
; AVX512DQ-NEXT: vpshufb %xmm3, %xmm1, %xmm1
; AVX512DQ-NEXT: vinserti128 $1, %xmm7, %ymm1, %ymm1
; AVX512DQ-NEXT: vshufi64x2 {{.*#+}} zmm1 = zmm1[0,1,2,3],zmm5[0,1,2,3]
; AVX512DQ-NEXT: vpshufb %ymm8, %ymm4, %ymm2
; AVX512DQ-NEXT: vpmovsxbd {{.*#+}} ymm3 = [0,0,0,0,1,1,0,2]
; AVX512DQ-NEXT: vpermd %ymm4, %ymm3, %ymm3
; AVX512DQ-NEXT: vmovdqa64 {{.*#+}} zmm4 = [65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535]
; AVX512DQ-NEXT: vpandn %ymm3, %ymm4, %ymm3
; AVX512DQ-NEXT: vinserti64x4 $1, %ymm2, %zmm3, %zmm2
; AVX512DQ-NEXT: vpternlogq {{.*#+}} zmm2 = zmm2 | (zmm1 & zmm4)
; AVX512DQ-NEXT: vmovdqa64 %zmm2, (%rcx)
; AVX512DQ-NEXT: vmovdqa64 %zmm6, 128(%rcx)
; AVX512DQ-NEXT: vmovdqa64 %zmm0, 64(%rcx)
; AVX512DQ-NEXT: vzeroupper
; AVX512DQ-NEXT: retq
;
; AVX512DQ-FCP-LABEL: store_i16_stride3_vf32:
; AVX512DQ-FCP: # %bb.0:
; AVX512DQ-FCP-NEXT: vmovdqa (%rsi), %xmm0
; AVX512DQ-FCP-NEXT: vmovdqa 16(%rsi), %xmm3
; AVX512DQ-FCP-NEXT: vmovdqa 32(%rsi), %xmm2
; AVX512DQ-FCP-NEXT: vprold $16, %xmm2, %xmm4
; AVX512DQ-FCP-NEXT: vmovdqa (%rdi), %xmm1
; AVX512DQ-FCP-NEXT: vmovdqa 16(%rdi), %xmm5
; AVX512DQ-FCP-NEXT: vmovdqa 32(%rdi), %xmm6
; AVX512DQ-FCP-NEXT: vpshufd {{.*#+}} xmm7 = xmm6[1,1,2,2]
; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm4 = xmm7[0,1],xmm4[2],xmm7[3,4],xmm4[5],xmm7[6,7]
; AVX512DQ-FCP-NEXT: vpunpcklwd {{.*#+}} xmm6 = xmm6[0],xmm2[0],xmm6[1],xmm2[1],xmm6[2],xmm2[2],xmm6[3],xmm2[3]
; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} xmm2 = [0,1,2,3,4,5,4,5,6,7,10,11,8,9,10,11]
; AVX512DQ-FCP-NEXT: vpshufb %xmm2, %xmm6, %xmm6
; AVX512DQ-FCP-NEXT: vinserti128 $1, %xmm4, %ymm6, %ymm4
; AVX512DQ-FCP-NEXT: vpunpckhwd {{.*#+}} xmm6 = xmm3[4],xmm5[4],xmm3[5],xmm5[5],xmm3[6],xmm5[6],xmm3[7],xmm5[7]
; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} xmm7 = [4,5,10,11,10,11,8,9,8,9,14,15,12,13,14,15]
; AVX512DQ-FCP-NEXT: vpshufb %xmm7, %xmm6, %xmm6
; AVX512DQ-FCP-NEXT: vprold $16, %xmm3, %xmm3
; AVX512DQ-FCP-NEXT: vpshufd {{.*#+}} xmm5 = xmm5[1,1,2,2]
; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm3 = xmm5[0,1],xmm3[2],xmm5[3,4],xmm3[5],xmm5[6,7]
; AVX512DQ-FCP-NEXT: vinserti128 $1, %xmm6, %ymm3, %ymm3
; AVX512DQ-FCP-NEXT: vshufi64x2 {{.*#+}} zmm4 = zmm3[0,1,2,3],zmm4[0,1,2,3]
; AVX512DQ-FCP-NEXT: vmovdqa (%rdx), %ymm3
; AVX512DQ-FCP-NEXT: vmovdqa 32(%rdx), %ymm5
; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} zmm6 = [21,21,0,22,22,0,23,23,0,0,0,0,1,1,0,2]
; AVX512DQ-FCP-NEXT: vpermi2d (%rdx), %zmm5, %zmm6
; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} zmm6 = zmm6 ^ (mem & (zmm6 ^ zmm4))
; AVX512DQ-FCP-NEXT: vmovdqa 32(%rdi), %ymm4
; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} ymm8 = [128,128,128,128,12,13,128,128,128,128,14,15,128,128,128,128,16,17,128,128,128,128,18,19,128,128,128,128,20,21,128,128]
; AVX512DQ-FCP-NEXT: vpshufb %ymm8, %ymm4, %ymm4
; AVX512DQ-FCP-NEXT: vmovdqa 32(%rsi), %ymm9
; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} ymm10 = [10,11,0,1,128,128,12,13,2,3,128,128,14,15,4,5,128,128,16,17,28,29,128,128,18,19,18,19,128,128,20,21]
; AVX512DQ-FCP-NEXT: vpshufb %ymm10, %ymm9, %ymm9
; AVX512DQ-FCP-NEXT: vpor %ymm4, %ymm9, %ymm4
; AVX512DQ-FCP-NEXT: vmovdqa 48(%rdi), %xmm9
; AVX512DQ-FCP-NEXT: vmovdqa 48(%rsi), %xmm11
; AVX512DQ-FCP-NEXT: vpunpckhwd {{.*#+}} xmm12 = xmm11[4],xmm9[4],xmm11[5],xmm9[5],xmm11[6],xmm9[6],xmm11[7],xmm9[7]
; AVX512DQ-FCP-NEXT: vpshufb %xmm7, %xmm12, %xmm7
; AVX512DQ-FCP-NEXT: vprold $16, %xmm11, %xmm11
; AVX512DQ-FCP-NEXT: vpshufd {{.*#+}} xmm9 = xmm9[1,1,2,2]
; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm9 = xmm9[0,1],xmm11[2],xmm9[3,4],xmm11[5],xmm9[6,7]
; AVX512DQ-FCP-NEXT: vinserti128 $1, %xmm7, %ymm9, %ymm7
; AVX512DQ-FCP-NEXT: vshufi64x2 {{.*#+}} zmm4 = zmm4[0,1,2,3],zmm7[0,1,2,3]
; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} ymm7 = [128,128,10,11,128,128,128,128,12,13,128,128,128,128,14,15,128,128,128,128,16,17,128,128,128,128,18,19,128,128,128,128]
; AVX512DQ-FCP-NEXT: vpshufb %ymm7, %ymm5, %ymm9
; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} ymm11 = [5,5,0,6,6,0,7,7]
; AVX512DQ-FCP-NEXT: vpermd %ymm5, %ymm11, %ymm5
; AVX512DQ-FCP-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm5, %ymm5
; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm5, %zmm9, %zmm5
; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} zmm5 = zmm5 | (zmm4 & mem)
; AVX512DQ-FCP-NEXT: vmovdqa (%rdi), %ymm4
; AVX512DQ-FCP-NEXT: vpshufb %ymm8, %ymm4, %ymm4
; AVX512DQ-FCP-NEXT: vmovdqa (%rsi), %ymm8
; AVX512DQ-FCP-NEXT: vpshufb %ymm10, %ymm8, %ymm8
; AVX512DQ-FCP-NEXT: vpor %ymm4, %ymm8, %ymm4
; AVX512DQ-FCP-NEXT: vprold $16, %xmm0, %xmm8
; AVX512DQ-FCP-NEXT: vpshufd {{.*#+}} xmm9 = xmm1[1,1,2,2]
; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm8 = xmm9[0,1],xmm8[2],xmm9[3,4],xmm8[5],xmm9[6,7]
; AVX512DQ-FCP-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
; AVX512DQ-FCP-NEXT: vpshufb %xmm2, %xmm0, %xmm0
; AVX512DQ-FCP-NEXT: vinserti128 $1, %xmm8, %ymm0, %ymm0
; AVX512DQ-FCP-NEXT: vshufi64x2 {{.*#+}} zmm0 = zmm0[0,1,2,3],zmm4[0,1,2,3]
; AVX512DQ-FCP-NEXT: vpshufb %ymm7, %ymm3, %ymm1
; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} ymm2 = [0,0,0,0,1,1,0,2]
; AVX512DQ-FCP-NEXT: vpermd %ymm3, %ymm2, %ymm2
; AVX512DQ-FCP-NEXT: vmovdqa64 {{.*#+}} zmm3 = [65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535]
; AVX512DQ-FCP-NEXT: vpandn %ymm2, %ymm3, %ymm2
; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm1, %zmm2, %zmm1
; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} zmm1 = zmm1 | (zmm0 & zmm3)
; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm1, (%rcx)
; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm5, 128(%rcx)
; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm6, 64(%rcx)
; AVX512DQ-FCP-NEXT: vmovdqa64 (%rdx), %zmm0
; AVX512DQ-FCP-NEXT: vmovdqa (%rsi), %xmm1
; AVX512DQ-FCP-NEXT: vmovdqa 16(%rsi), %xmm4
; AVX512DQ-FCP-NEXT: vmovdqa 32(%rsi), %xmm3
; AVX512DQ-FCP-NEXT: vprold $16, %xmm3, %xmm5
; AVX512DQ-FCP-NEXT: vmovdqa (%rdi), %xmm2
; AVX512DQ-FCP-NEXT: vmovdqa 16(%rdi), %xmm6
; AVX512DQ-FCP-NEXT: vmovdqa 32(%rdi), %xmm7
; AVX512DQ-FCP-NEXT: vpshufd {{.*#+}} xmm8 = xmm7[1,1,2,2]
; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm5 = xmm8[0,1],xmm5[2],xmm8[3,4],xmm5[5],xmm8[6,7]
; AVX512DQ-FCP-NEXT: vpunpcklwd {{.*#+}} xmm7 = xmm7[0],xmm3[0],xmm7[1],xmm3[1],xmm7[2],xmm3[2],xmm7[3],xmm3[3]
; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} xmm3 = [0,1,2,3,4,5,4,5,6,7,10,11,8,9,10,11]
; AVX512DQ-FCP-NEXT: vpshufb %xmm3, %xmm7, %xmm7
; AVX512DQ-FCP-NEXT: vinserti128 $1, %xmm5, %ymm7, %ymm5
; AVX512DQ-FCP-NEXT: vpunpckhwd {{.*#+}} xmm7 = xmm4[4],xmm6[4],xmm4[5],xmm6[5],xmm4[6],xmm6[6],xmm4[7],xmm6[7]
; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} xmm8 = [4,5,10,11,10,11,8,9,8,9,14,15,12,13,14,15]
; AVX512DQ-FCP-NEXT: vpshufb %xmm8, %xmm7, %xmm7
; AVX512DQ-FCP-NEXT: vprold $16, %xmm4, %xmm4
; AVX512DQ-FCP-NEXT: vpshufd {{.*#+}} xmm6 = xmm6[1,1,2,2]
; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm4 = xmm6[0,1],xmm4[2],xmm6[3,4],xmm4[5],xmm6[6,7]
; AVX512DQ-FCP-NEXT: vinserti128 $1, %xmm7, %ymm4, %ymm4
; AVX512DQ-FCP-NEXT: vshufi64x2 {{.*#+}} zmm5 = zmm4[0,1,2,3],zmm5[0,1,2,3]
; AVX512DQ-FCP-NEXT: vmovdqa (%rdx), %ymm4
; AVX512DQ-FCP-NEXT: vmovdqa 32(%rdx), %ymm6
; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} zmm7 = [5,5,0,6,6,0,7,7,0,16,16,0,17,17,0,18]
; AVX512DQ-FCP-NEXT: vpermt2d %zmm6, %zmm7, %zmm0
; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} zmm0 = zmm0 ^ (mem & (zmm0 ^ zmm5))
; AVX512DQ-FCP-NEXT: vmovdqa 32(%rdi), %ymm5
; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} ymm9 = [128,128,128,128,12,13,128,128,128,128,14,15,128,128,128,128,16,17,128,128,128,128,18,19,128,128,128,128,20,21,128,128]
; AVX512DQ-FCP-NEXT: vpshufb %ymm9, %ymm5, %ymm5
; AVX512DQ-FCP-NEXT: vmovdqa 32(%rsi), %ymm10
; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} ymm11 = [10,11,0,1,128,128,12,13,2,3,128,128,14,15,4,5,128,128,16,17,28,29,128,128,18,19,18,19,128,128,20,21]
; AVX512DQ-FCP-NEXT: vpshufb %ymm11, %ymm10, %ymm10
; AVX512DQ-FCP-NEXT: vpor %ymm5, %ymm10, %ymm5
; AVX512DQ-FCP-NEXT: vmovdqa 48(%rdi), %xmm10
; AVX512DQ-FCP-NEXT: vmovdqa 48(%rsi), %xmm12
; AVX512DQ-FCP-NEXT: vpunpckhwd {{.*#+}} xmm13 = xmm12[4],xmm10[4],xmm12[5],xmm10[5],xmm12[6],xmm10[6],xmm12[7],xmm10[7]
; AVX512DQ-FCP-NEXT: vpshufb %xmm8, %xmm13, %xmm8
; AVX512DQ-FCP-NEXT: vprold $16, %xmm12, %xmm12
; AVX512DQ-FCP-NEXT: vpshufd {{.*#+}} xmm10 = xmm10[1,1,2,2]
; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm10 = xmm10[0,1],xmm12[2],xmm10[3,4],xmm12[5],xmm10[6,7]
; AVX512DQ-FCP-NEXT: vinserti128 $1, %xmm8, %ymm10, %ymm8
; AVX512DQ-FCP-NEXT: vshufi64x2 {{.*#+}} zmm5 = zmm5[0,1,2,3],zmm8[0,1,2,3]
; AVX512DQ-FCP-NEXT: vmovdqa {{.*#+}} ymm8 = [128,128,10,11,128,128,128,128,12,13,128,128,128,128,14,15,128,128,128,128,16,17,128,128,128,128,18,19,128,128,128,128]
; AVX512DQ-FCP-NEXT: vpshufb %ymm8, %ymm6, %ymm10
; AVX512DQ-FCP-NEXT: vpermd %ymm6, %ymm7, %ymm6
; AVX512DQ-FCP-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm6, %ymm6
; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm6, %zmm10, %zmm6
; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} zmm6 = zmm6 | (zmm5 & mem)
; AVX512DQ-FCP-NEXT: vmovdqa (%rdi), %ymm5
; AVX512DQ-FCP-NEXT: vpshufb %ymm9, %ymm5, %ymm5
; AVX512DQ-FCP-NEXT: vmovdqa (%rsi), %ymm7
; AVX512DQ-FCP-NEXT: vpshufb %ymm11, %ymm7, %ymm7
; AVX512DQ-FCP-NEXT: vpor %ymm5, %ymm7, %ymm5
; AVX512DQ-FCP-NEXT: vprold $16, %xmm1, %xmm7
; AVX512DQ-FCP-NEXT: vpshufd {{.*#+}} xmm9 = xmm2[1,1,2,2]
; AVX512DQ-FCP-NEXT: vpblendw {{.*#+}} xmm7 = xmm9[0,1],xmm7[2],xmm9[3,4],xmm7[5],xmm9[6,7]
; AVX512DQ-FCP-NEXT: vpunpcklwd {{.*#+}} xmm1 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3]
; AVX512DQ-FCP-NEXT: vpshufb %xmm3, %xmm1, %xmm1
; AVX512DQ-FCP-NEXT: vinserti128 $1, %xmm7, %ymm1, %ymm1
; AVX512DQ-FCP-NEXT: vshufi64x2 {{.*#+}} zmm1 = zmm1[0,1,2,3],zmm5[0,1,2,3]
; AVX512DQ-FCP-NEXT: vpshufb %ymm8, %ymm4, %ymm2
; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} ymm3 = [0,0,0,0,1,1,0,2]
; AVX512DQ-FCP-NEXT: vpermd %ymm4, %ymm3, %ymm3
; AVX512DQ-FCP-NEXT: vmovdqa64 {{.*#+}} zmm4 = [65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535,0,65535,65535]
; AVX512DQ-FCP-NEXT: vpandn %ymm3, %ymm4, %ymm3
; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm2, %zmm3, %zmm2
; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} zmm2 = zmm2 | (zmm1 & zmm4)
; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm2, (%rcx)
; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm6, 128(%rcx)
; AVX512DQ-FCP-NEXT: vmovdqa64 %zmm0, 64(%rcx)
; AVX512DQ-FCP-NEXT: vzeroupper
; AVX512DQ-FCP-NEXT: retq
;

File diff suppressed because it is too large Load Diff

View File

@@ -4966,7 +4966,7 @@ define void @store_i8_stride5_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512-FCP-NEXT: vpandn %ymm13, %ymm14, %ymm13
; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm3, %zmm13, %zmm3
; AVX512-FCP-NEXT: vmovdqa (%r8), %ymm13
; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} zmm15 = [4,6,5,5,5,5,4,6,6,6,6,6,7,7,7,7]
; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} zmm15 = [4,0,5,5,5,5,0,6,6,6,6,0,7,7,7,7]
; AVX512-FCP-NEXT: vpshufb %ymm11, %ymm13, %ymm11
; AVX512-FCP-NEXT: vpermd %ymm13, %ymm15, %ymm13
; AVX512-FCP-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm13, %ymm13
@@ -4988,7 +4988,7 @@ define void @store_i8_stride5_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512-FCP-NEXT: vpermt2q %zmm4, %zmm6, %zmm1
; AVX512-FCP-NEXT: vmovdqa64 (%r8), %zmm4
; AVX512-FCP-NEXT: vpermd %zmm10, %zmm15, %zmm6
; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} zmm7 = [6,6,6,0,7,7,7,7,16,16,16,16,16,16,17,17]
; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} zmm7 = [6,6,6,0,7,7,7,7,0,16,16,16,16,0,17,17]
; AVX512-FCP-NEXT: vpermi2d %zmm10, %zmm4, %zmm7
; AVX512-FCP-NEXT: vpermq {{.*#+}} ymm8 = ymm18[0,0,1,1]
; AVX512-FCP-NEXT: vinserti64x4 $1, %ymm16, %zmm8, %zmm8
@@ -5014,7 +5014,7 @@ define void @store_i8_stride5_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512-FCP-NEXT: vpermq {{.*#+}} zmm0 = zmm5[0,0,1,1,4,4,5,5]
; AVX512-FCP-NEXT: vpermq {{.*#+}} zmm2 = zmm2[0,0,1,1,4,4,5,5]
; AVX512-FCP-NEXT: vpternlogq {{.*#+}} zmm2 = zmm2 ^ (mem & (zmm2 ^ zmm0))
; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} zmm0 = [0,0,0,0,0,0,1,1,1,1,2,2,2,2,2,2]
; AVX512-FCP-NEXT: vpmovsxbd {{.*#+}} zmm0 = [0,0,0,0,0,0,1,1,1,1,0,2,2,2,2,0]
; AVX512-FCP-NEXT: vpermd %zmm4, %zmm0, %zmm0
; AVX512-FCP-NEXT: vpternlogq {{.*#+}} zmm0 = zmm0 ^ (mem & (zmm0 ^ zmm2))
; AVX512-FCP-NEXT: vpternlogq {{.*#+}} zmm7 = zmm7 ^ (mem & (zmm7 ^ zmm1))
@@ -5308,7 +5308,7 @@ define void @store_i8_stride5_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512DQ-FCP-NEXT: vpandn %ymm13, %ymm14, %ymm13
; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm3, %zmm13, %zmm3
; AVX512DQ-FCP-NEXT: vmovdqa (%r8), %ymm13
; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} zmm15 = [4,6,5,5,5,5,4,6,6,6,6,6,7,7,7,7]
; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} zmm15 = [4,0,5,5,5,5,0,6,6,6,6,0,7,7,7,7]
; AVX512DQ-FCP-NEXT: vpshufb %ymm11, %ymm13, %ymm11
; AVX512DQ-FCP-NEXT: vpermd %ymm13, %ymm15, %ymm13
; AVX512DQ-FCP-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm13, %ymm13
@@ -5330,7 +5330,7 @@ define void @store_i8_stride5_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512DQ-FCP-NEXT: vpermt2q %zmm4, %zmm6, %zmm1
; AVX512DQ-FCP-NEXT: vmovdqa64 (%r8), %zmm4
; AVX512DQ-FCP-NEXT: vpermd %zmm10, %zmm15, %zmm6
; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} zmm7 = [6,6,6,0,7,7,7,7,16,16,16,16,16,16,17,17]
; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} zmm7 = [6,6,6,0,7,7,7,7,0,16,16,16,16,0,17,17]
; AVX512DQ-FCP-NEXT: vpermi2d %zmm10, %zmm4, %zmm7
; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} ymm8 = ymm18[0,0,1,1]
; AVX512DQ-FCP-NEXT: vinserti64x4 $1, %ymm16, %zmm8, %zmm8
@@ -5356,7 +5356,7 @@ define void @store_i8_stride5_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} zmm0 = zmm5[0,0,1,1,4,4,5,5]
; AVX512DQ-FCP-NEXT: vpermq {{.*#+}} zmm2 = zmm2[0,0,1,1,4,4,5,5]
; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} zmm2 = zmm2 ^ (mem & (zmm2 ^ zmm0))
; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} zmm0 = [0,0,0,0,0,0,1,1,1,1,2,2,2,2,2,2]
; AVX512DQ-FCP-NEXT: vpmovsxbd {{.*#+}} zmm0 = [0,0,0,0,0,0,1,1,1,1,0,2,2,2,2,0]
; AVX512DQ-FCP-NEXT: vpermd %zmm4, %zmm0, %zmm0
; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} zmm0 = zmm0 ^ (mem & (zmm0 ^ zmm2))
; AVX512DQ-FCP-NEXT: vpternlogq {{.*#+}} zmm7 = zmm7 ^ (mem & (zmm7 ^ zmm1))

View File

@@ -2054,40 +2054,38 @@ define void @store_i8_stride8_vf16(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512BW-NEXT: vinserti128 $1, (%r10), %ymm3, %ymm3
; AVX512BW-NEXT: vinserti128 $1, (%r9), %ymm2, %ymm2
; AVX512BW-NEXT: vinserti64x4 $1, %ymm3, %zmm2, %zmm3
; AVX512BW-NEXT: vinserti64x4 $1, %ymm2, %zmm2, %zmm2
; AVX512BW-NEXT: vpermq {{.*#+}} zmm4 = zmm2[0,2,0,2,4,6,4,6]
; AVX512BW-NEXT: vpshufb {{.*#+}} zmm4 = zmm4[u,u,u,u,0,8,u,u,u,u,u,u,1,9,u,u,u,u,u,u,18,26,u,u,u,u,u,u,19,27,u,u,u,u,u,u,36,44,u,u,u,u,u,u,37,45,u,u,u,u,u,u,54,62,u,u,u,u,u,u,55,63,u,u]
; AVX512BW-NEXT: vshufi64x2 {{.*#+}} zmm3 = zmm3[4,5,6,7,4,5,6,7]
; AVX512BW-NEXT: vpermq {{.*#+}} zmm5 = zmm3[0,2,0,2,4,6,4,6]
; AVX512BW-NEXT: vpshufb {{.*#+}} zmm5 = zmm5[u,u,u,u,u,u,0,8,u,u,u,u,u,u,1,9,u,u,u,u,u,u,18,26,u,u,u,u,u,u,19,27,u,u,u,u,u,u,36,44,u,u,u,u,u,u,37,45,u,u,u,u,u,u,54,62,u,u,u,u,u,u,55,63]
; AVX512BW-NEXT: movl $-2004318072, %ecx # imm = 0x88888888
; AVX512BW-NEXT: kmovd %ecx, %k1
; AVX512BW-NEXT: vmovdqu16 %zmm5, %zmm4 {%k1}
; AVX512BW-NEXT: vinserti64x4 $1, %ymm0, %zmm0, %zmm0
; AVX512BW-NEXT: vpermq {{.*#+}} zmm5 = zmm0[0,2,0,2,4,6,4,6]
; AVX512BW-NEXT: vpshufb {{.*#+}} zmm5 = zmm5[0,8,u,u,u,u,u,u,1,9,u,u,u,u,u,u,18,26,u,u,u,u,u,u,19,27,u,u,u,u,u,u,36,44,u,u,u,u,u,u,37,45,u,u,u,u,u,u,54,62,u,u,u,u,u,u,55,63,u,u,u,u,u,u]
; AVX512BW-NEXT: vpermq {{.*#+}} zmm4 = zmm0[0,2,0,2,4,6,4,6]
; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm5 = [0,8,128,128,128,128,128,128,1,9,128,128,128,128,128,128,18,26,128,128,128,128,128,128,19,27,128,128,128,128,128,128,36,44,128,128,128,128,128,128,37,45,128,128,128,128,128,128,54,62,128,128,128,128,128,128,55,63,128,128,128,128,128,128]
; AVX512BW-NEXT: vpshufb %zmm5, %zmm4, %zmm4
; AVX512BW-NEXT: vshufi64x2 {{.*#+}} zmm1 = zmm1[4,5,6,7,4,5,6,7]
; AVX512BW-NEXT: vpermq {{.*#+}} zmm6 = zmm1[0,2,0,2,4,6,4,6]
; AVX512BW-NEXT: vpshufb {{.*#+}} zmm6 = zmm6[u,u,0,8,u,u,u,u,u,u,1,9,u,u,u,u,u,u,18,26,u,u,u,u,u,u,19,27,u,u,u,u,u,u,36,44,u,u,u,u,u,u,37,45,u,u,u,u,u,u,54,62,u,u,u,u,u,u,55,63,u,u,u,u]
; AVX512BW-NEXT: movl $572662306, %ecx # imm = 0x22222222
; AVX512BW-NEXT: kmovd %ecx, %k2
; AVX512BW-NEXT: vmovdqu16 %zmm6, %zmm5 {%k2}
; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm7 = [128,128,0,8,128,128,128,128,128,128,1,9,128,128,128,128,128,128,18,26,128,128,128,128,128,128,19,27,128,128,128,128,128,128,36,44,128,128,128,128,128,128,37,45,128,128,128,128,128,128,54,62,128,128,128,128,128,128,55,63,128,128,128,128]
; AVX512BW-NEXT: vpshufb %zmm7, %zmm6, %zmm6
; AVX512BW-NEXT: vporq %zmm4, %zmm6, %zmm4
; AVX512BW-NEXT: vinserti64x4 $1, %ymm2, %zmm2, %zmm2
; AVX512BW-NEXT: vpermq {{.*#+}} zmm6 = zmm2[0,2,0,2,4,6,4,6]
; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm8 = [128,128,128,128,0,8,128,128,128,128,128,128,1,9,128,128,128,128,128,128,18,26,128,128,128,128,128,128,19,27,128,128,128,128,128,128,36,44,128,128,128,128,128,128,37,45,128,128,128,128,128,128,54,62,128,128,128,128,128,128,55,63,128,128]
; AVX512BW-NEXT: vpshufb %zmm8, %zmm6, %zmm6
; AVX512BW-NEXT: vshufi64x2 {{.*#+}} zmm3 = zmm3[4,5,6,7,4,5,6,7]
; AVX512BW-NEXT: vpermq {{.*#+}} zmm9 = zmm3[0,2,0,2,4,6,4,6]
; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm10 = [128,128,128,128,128,128,0,8,128,128,128,128,128,128,1,9,128,128,128,128,128,128,18,26,128,128,128,128,128,128,19,27,128,128,128,128,128,128,36,44,128,128,128,128,128,128,37,45,128,128,128,128,128,128,54,62,128,128,128,128,128,128,55,63]
; AVX512BW-NEXT: vpshufb %zmm10, %zmm9, %zmm9
; AVX512BW-NEXT: movw $-21846, %cx # imm = 0xAAAA
; AVX512BW-NEXT: kmovd %ecx, %k3
; AVX512BW-NEXT: vmovdqa32 %zmm4, %zmm5 {%k3}
; AVX512BW-NEXT: vpermq {{.*#+}} zmm2 = zmm2[1,3,1,3,5,7,5,7]
; AVX512BW-NEXT: vpshufb {{.*#+}} zmm2 = zmm2[u,u,u,u,0,8,0,8,u,u,u,u,1,9,1,9,u,u,u,u,18,26,18,26,u,u,u,u,19,27,19,27,u,u,u,u,36,44,u,u,u,u,u,u,37,45,u,u,u,u,u,u,54,62,u,u,u,u,u,u,55,63,u,u]
; AVX512BW-NEXT: vpermq {{.*#+}} zmm3 = zmm3[1,3,1,3,5,7,5,7]
; AVX512BW-NEXT: vpshufb {{.*#+}} zmm3 = zmm3[u,u,u,u,0,8,0,8,u,u,u,u,1,9,1,9,u,u,u,u,18,26,18,26,u,u,u,u,19,27,19,27,u,u,u,u,u,u,36,44,u,u,u,u,u,u,37,45,u,u,u,u,u,u,54,62,u,u,u,u,u,u,55,63]
; AVX512BW-NEXT: vmovdqu16 %zmm3, %zmm2 {%k1}
; AVX512BW-NEXT: kmovd %ecx, %k1
; AVX512BW-NEXT: vpord %zmm6, %zmm9, %zmm4 {%k1}
; AVX512BW-NEXT: vpermq {{.*#+}} zmm0 = zmm0[1,3,1,3,5,7,5,7]
; AVX512BW-NEXT: vpshufb {{.*#+}} zmm0 = zmm0[0,8,0,8,u,u,u,u,1,9,1,9,u,u,u,u,18,26,18,26,u,u,u,u,19,27,19,27,u,u,u,u,36,44,u,u,u,u,u,u,37,45,u,u,u,u,u,u,54,62,u,u,u,u,u,u,55,63,u,u,u,u,u,u]
; AVX512BW-NEXT: vpshufb %zmm5, %zmm0, %zmm0
; AVX512BW-NEXT: vpermq {{.*#+}} zmm1 = zmm1[1,3,1,3,5,7,5,7]
; AVX512BW-NEXT: vpshufb {{.*#+}} zmm1 = zmm1[0,8,0,8,u,u,u,u,1,9,1,9,u,u,u,u,18,26,18,26,u,u,u,u,19,27,19,27,u,u,u,u,u,u,36,44,u,u,u,u,u,u,37,45,u,u,u,u,u,u,54,62,u,u,u,u,u,u,55,63,u,u,u,u]
; AVX512BW-NEXT: vmovdqu16 %zmm1, %zmm0 {%k2}
; AVX512BW-NEXT: vmovdqa32 %zmm2, %zmm0 {%k3}
; AVX512BW-NEXT: vpshufb %zmm7, %zmm1, %zmm1
; AVX512BW-NEXT: vporq %zmm0, %zmm1, %zmm0
; AVX512BW-NEXT: vpermq {{.*#+}} zmm1 = zmm2[1,3,1,3,5,7,5,7]
; AVX512BW-NEXT: vpshufb %zmm8, %zmm1, %zmm1
; AVX512BW-NEXT: vpermq {{.*#+}} zmm2 = zmm3[1,3,1,3,5,7,5,7]
; AVX512BW-NEXT: vpshufb %zmm10, %zmm2, %zmm2
; AVX512BW-NEXT: vpord %zmm1, %zmm2, %zmm0 {%k1}
; AVX512BW-NEXT: vmovdqa64 %zmm0, 64(%rax)
; AVX512BW-NEXT: vmovdqa64 %zmm5, (%rax)
; AVX512BW-NEXT: vmovdqa64 %zmm4, (%rax)
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
;
@@ -2106,42 +2104,40 @@ define void @store_i8_stride8_vf16(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512BW-FCP-NEXT: vinserti128 $1, (%r10), %ymm3, %ymm3
; AVX512BW-FCP-NEXT: vinserti128 $1, (%r9), %ymm2, %ymm2
; AVX512BW-FCP-NEXT: vinserti64x4 $1, %ymm3, %zmm2, %zmm3
; AVX512BW-FCP-NEXT: vbroadcasti32x4 {{.*#+}} zmm4 = [4,6,4,6,4,6,4,6]
; AVX512BW-FCP-NEXT: vbroadcasti32x4 {{.*#+}} zmm4 = [0,2,0,2,0,2,0,2]
; AVX512BW-FCP-NEXT: # zmm4 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
; AVX512BW-FCP-NEXT: vpermq %zmm3, %zmm4, %zmm5
; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} zmm5 = zmm5[u,u,u,u,u,u,0,8,u,u,u,u,u,u,1,9,u,u,u,u,u,u,18,26,u,u,u,u,u,u,19,27,u,u,u,u,u,u,36,44,u,u,u,u,u,u,37,45,u,u,u,u,u,u,54,62,u,u,u,u,u,u,55,63]
; AVX512BW-FCP-NEXT: vbroadcasti32x4 {{.*#+}} zmm6 = [0,2,0,2,0,2,0,2]
; AVX512BW-FCP-NEXT: # zmm6 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
; AVX512BW-FCP-NEXT: vpermq %zmm3, %zmm6, %zmm7
; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} zmm7 = zmm7[u,u,u,u,0,8,u,u,u,u,u,u,1,9,u,u,u,u,u,u,18,26,u,u,u,u,u,u,19,27,u,u,u,u,u,u,36,44,u,u,u,u,u,u,37,45,u,u,u,u,u,u,54,62,u,u,u,u,u,u,55,63,u,u]
; AVX512BW-FCP-NEXT: movl $-2004318072, %ecx # imm = 0x88888888
; AVX512BW-FCP-NEXT: kmovd %ecx, %k1
; AVX512BW-FCP-NEXT: vmovdqu16 %zmm5, %zmm7 {%k1}
; AVX512BW-FCP-NEXT: vpermq %zmm1, %zmm4, %zmm4
; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} zmm4 = zmm4[u,u,0,8,u,u,u,u,u,u,1,9,u,u,u,u,u,u,18,26,u,u,u,u,u,u,19,27,u,u,u,u,u,u,36,44,u,u,u,u,u,u,37,45,u,u,u,u,u,u,54,62,u,u,u,u,u,u,55,63,u,u,u,u]
; AVX512BW-FCP-NEXT: vpermq %zmm1, %zmm6, %zmm5
; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} zmm5 = zmm5[0,8,u,u,u,u,u,u,1,9,u,u,u,u,u,u,18,26,u,u,u,u,u,u,19,27,u,u,u,u,u,u,36,44,u,u,u,u,u,u,37,45,u,u,u,u,u,u,54,62,u,u,u,u,u,u,55,63,u,u,u,u,u,u]
; AVX512BW-FCP-NEXT: movl $572662306, %ecx # imm = 0x22222222
; AVX512BW-FCP-NEXT: kmovd %ecx, %k2
; AVX512BW-FCP-NEXT: vmovdqu16 %zmm4, %zmm5 {%k2}
; AVX512BW-FCP-NEXT: vpermq %zmm1, %zmm4, %zmm5
; AVX512BW-FCP-NEXT: vmovdqa64 {{.*#+}} zmm6 = [0,8,128,128,128,128,128,128,1,9,128,128,128,128,128,128,18,26,128,128,128,128,128,128,19,27,128,128,128,128,128,128,36,44,128,128,128,128,128,128,37,45,128,128,128,128,128,128,54,62,128,128,128,128,128,128,55,63,128,128,128,128,128,128]
; AVX512BW-FCP-NEXT: vpshufb %zmm6, %zmm5, %zmm5
; AVX512BW-FCP-NEXT: vbroadcasti32x4 {{.*#+}} zmm7 = [4,6,4,6,4,6,4,6]
; AVX512BW-FCP-NEXT: # zmm7 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
; AVX512BW-FCP-NEXT: vpermq %zmm1, %zmm7, %zmm8
; AVX512BW-FCP-NEXT: vmovdqa64 {{.*#+}} zmm9 = [128,128,0,8,128,128,128,128,128,128,1,9,128,128,128,128,128,128,18,26,128,128,128,128,128,128,19,27,128,128,128,128,128,128,36,44,128,128,128,128,128,128,37,45,128,128,128,128,128,128,54,62,128,128,128,128,128,128,55,63,128,128,128,128]
; AVX512BW-FCP-NEXT: vpshufb %zmm9, %zmm8, %zmm8
; AVX512BW-FCP-NEXT: vporq %zmm5, %zmm8, %zmm5
; AVX512BW-FCP-NEXT: vpermq %zmm3, %zmm4, %zmm4
; AVX512BW-FCP-NEXT: vmovdqa64 {{.*#+}} zmm8 = [128,128,128,128,0,8,128,128,128,128,128,128,1,9,128,128,128,128,128,128,18,26,128,128,128,128,128,128,19,27,128,128,128,128,128,128,36,44,128,128,128,128,128,128,37,45,128,128,128,128,128,128,54,62,128,128,128,128,128,128,55,63,128,128]
; AVX512BW-FCP-NEXT: vpshufb %zmm8, %zmm4, %zmm4
; AVX512BW-FCP-NEXT: vpermq %zmm3, %zmm7, %zmm7
; AVX512BW-FCP-NEXT: vmovdqa64 {{.*#+}} zmm10 = [128,128,128,128,128,128,0,8,128,128,128,128,128,128,1,9,128,128,128,128,128,128,18,26,128,128,128,128,128,128,19,27,128,128,128,128,128,128,36,44,128,128,128,128,128,128,37,45,128,128,128,128,128,128,54,62,128,128,128,128,128,128,55,63]
; AVX512BW-FCP-NEXT: vpshufb %zmm10, %zmm7, %zmm7
; AVX512BW-FCP-NEXT: movw $-21846, %cx # imm = 0xAAAA
; AVX512BW-FCP-NEXT: kmovd %ecx, %k3
; AVX512BW-FCP-NEXT: vmovdqa32 %zmm7, %zmm5 {%k3}
; AVX512BW-FCP-NEXT: kmovd %ecx, %k1
; AVX512BW-FCP-NEXT: vpord %zmm4, %zmm7, %zmm5 {%k1}
; AVX512BW-FCP-NEXT: vbroadcasti32x4 {{.*#+}} zmm4 = [5,7,5,7,5,7,5,7]
; AVX512BW-FCP-NEXT: # zmm4 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
; AVX512BW-FCP-NEXT: vpermq %zmm3, %zmm4, %zmm3
; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} zmm3 = zmm3[u,u,u,u,0,8,0,8,u,u,u,u,1,9,1,9,u,u,u,u,18,26,18,26,u,u,u,u,19,27,19,27,u,u,u,u,u,u,36,44,u,u,u,u,u,u,37,45,u,u,u,u,u,u,54,62,u,u,u,u,u,u,55,63]
; AVX512BW-FCP-NEXT: vbroadcasti32x4 {{.*#+}} zmm6 = [1,3,1,3,1,3,1,3]
; AVX512BW-FCP-NEXT: # zmm6 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
; AVX512BW-FCP-NEXT: vpermq %zmm2, %zmm6, %zmm2
; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} zmm2 = zmm2[u,u,u,u,0,8,0,8,u,u,u,u,1,9,1,9,u,u,u,u,18,26,18,26,u,u,u,u,19,27,19,27,u,u,u,u,36,44,u,u,u,u,u,u,37,45,u,u,u,u,u,u,54,62,u,u,u,u,u,u,55,63,u,u]
; AVX512BW-FCP-NEXT: vmovdqu16 %zmm3, %zmm2 {%k1}
; AVX512BW-FCP-NEXT: vpshufb %zmm10, %zmm3, %zmm3
; AVX512BW-FCP-NEXT: vpermq %zmm1, %zmm4, %zmm1
; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} zmm1 = zmm1[0,8,0,8,u,u,u,u,1,9,1,9,u,u,u,u,18,26,18,26,u,u,u,u,19,27,19,27,u,u,u,u,u,u,36,44,u,u,u,u,u,u,37,45,u,u,u,u,u,u,54,62,u,u,u,u,u,u,55,63,u,u,u,u]
; AVX512BW-FCP-NEXT: vpermq %zmm0, %zmm6, %zmm0
; AVX512BW-FCP-NEXT: vpshufb {{.*#+}} zmm0 = zmm0[0,8,0,8,u,u,u,u,1,9,1,9,u,u,u,u,18,26,18,26,u,u,u,u,19,27,19,27,u,u,u,u,36,44,u,u,u,u,u,u,37,45,u,u,u,u,u,u,54,62,u,u,u,u,u,u,55,63,u,u,u,u,u,u]
; AVX512BW-FCP-NEXT: vmovdqu16 %zmm1, %zmm0 {%k2}
; AVX512BW-FCP-NEXT: vmovdqa32 %zmm2, %zmm0 {%k3}
; AVX512BW-FCP-NEXT: vpshufb %zmm9, %zmm1, %zmm1
; AVX512BW-FCP-NEXT: vbroadcasti32x4 {{.*#+}} zmm4 = [1,3,1,3,1,3,1,3]
; AVX512BW-FCP-NEXT: # zmm4 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
; AVX512BW-FCP-NEXT: vpermq %zmm0, %zmm4, %zmm0
; AVX512BW-FCP-NEXT: vpshufb %zmm6, %zmm0, %zmm0
; AVX512BW-FCP-NEXT: vporq %zmm0, %zmm1, %zmm0
; AVX512BW-FCP-NEXT: vpermq %zmm2, %zmm4, %zmm1
; AVX512BW-FCP-NEXT: vpshufb %zmm8, %zmm1, %zmm1
; AVX512BW-FCP-NEXT: vpord %zmm1, %zmm3, %zmm0 {%k1}
; AVX512BW-FCP-NEXT: vmovdqa64 %zmm0, 64(%rax)
; AVX512BW-FCP-NEXT: vmovdqa64 %zmm5, (%rax)
; AVX512BW-FCP-NEXT: vzeroupper
@@ -2162,40 +2158,38 @@ define void @store_i8_stride8_vf16(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512DQ-BW-NEXT: vinserti128 $1, (%r10), %ymm3, %ymm3
; AVX512DQ-BW-NEXT: vinserti128 $1, (%r9), %ymm2, %ymm2
; AVX512DQ-BW-NEXT: vinserti64x4 $1, %ymm3, %zmm2, %zmm3
; AVX512DQ-BW-NEXT: vinserti64x4 $1, %ymm2, %zmm2, %zmm2
; AVX512DQ-BW-NEXT: vpermq {{.*#+}} zmm4 = zmm2[0,2,0,2,4,6,4,6]
; AVX512DQ-BW-NEXT: vpshufb {{.*#+}} zmm4 = zmm4[u,u,u,u,0,8,u,u,u,u,u,u,1,9,u,u,u,u,u,u,18,26,u,u,u,u,u,u,19,27,u,u,u,u,u,u,36,44,u,u,u,u,u,u,37,45,u,u,u,u,u,u,54,62,u,u,u,u,u,u,55,63,u,u]
; AVX512DQ-BW-NEXT: vshufi64x2 {{.*#+}} zmm3 = zmm3[4,5,6,7,4,5,6,7]
; AVX512DQ-BW-NEXT: vpermq {{.*#+}} zmm5 = zmm3[0,2,0,2,4,6,4,6]
; AVX512DQ-BW-NEXT: vpshufb {{.*#+}} zmm5 = zmm5[u,u,u,u,u,u,0,8,u,u,u,u,u,u,1,9,u,u,u,u,u,u,18,26,u,u,u,u,u,u,19,27,u,u,u,u,u,u,36,44,u,u,u,u,u,u,37,45,u,u,u,u,u,u,54,62,u,u,u,u,u,u,55,63]
; AVX512DQ-BW-NEXT: movl $-2004318072, %ecx # imm = 0x88888888
; AVX512DQ-BW-NEXT: kmovd %ecx, %k1
; AVX512DQ-BW-NEXT: vmovdqu16 %zmm5, %zmm4 {%k1}
; AVX512DQ-BW-NEXT: vinserti64x4 $1, %ymm0, %zmm0, %zmm0
; AVX512DQ-BW-NEXT: vpermq {{.*#+}} zmm5 = zmm0[0,2,0,2,4,6,4,6]
; AVX512DQ-BW-NEXT: vpshufb {{.*#+}} zmm5 = zmm5[0,8,u,u,u,u,u,u,1,9,u,u,u,u,u,u,18,26,u,u,u,u,u,u,19,27,u,u,u,u,u,u,36,44,u,u,u,u,u,u,37,45,u,u,u,u,u,u,54,62,u,u,u,u,u,u,55,63,u,u,u,u,u,u]
; AVX512DQ-BW-NEXT: vpermq {{.*#+}} zmm4 = zmm0[0,2,0,2,4,6,4,6]
; AVX512DQ-BW-NEXT: vmovdqa64 {{.*#+}} zmm5 = [0,8,128,128,128,128,128,128,1,9,128,128,128,128,128,128,18,26,128,128,128,128,128,128,19,27,128,128,128,128,128,128,36,44,128,128,128,128,128,128,37,45,128,128,128,128,128,128,54,62,128,128,128,128,128,128,55,63,128,128,128,128,128,128]
; AVX512DQ-BW-NEXT: vpshufb %zmm5, %zmm4, %zmm4
; AVX512DQ-BW-NEXT: vshufi64x2 {{.*#+}} zmm1 = zmm1[4,5,6,7,4,5,6,7]
; AVX512DQ-BW-NEXT: vpermq {{.*#+}} zmm6 = zmm1[0,2,0,2,4,6,4,6]
; AVX512DQ-BW-NEXT: vpshufb {{.*#+}} zmm6 = zmm6[u,u,0,8,u,u,u,u,u,u,1,9,u,u,u,u,u,u,18,26,u,u,u,u,u,u,19,27,u,u,u,u,u,u,36,44,u,u,u,u,u,u,37,45,u,u,u,u,u,u,54,62,u,u,u,u,u,u,55,63,u,u,u,u]
; AVX512DQ-BW-NEXT: movl $572662306, %ecx # imm = 0x22222222
; AVX512DQ-BW-NEXT: kmovd %ecx, %k2
; AVX512DQ-BW-NEXT: vmovdqu16 %zmm6, %zmm5 {%k2}
; AVX512DQ-BW-NEXT: vmovdqa64 {{.*#+}} zmm7 = [128,128,0,8,128,128,128,128,128,128,1,9,128,128,128,128,128,128,18,26,128,128,128,128,128,128,19,27,128,128,128,128,128,128,36,44,128,128,128,128,128,128,37,45,128,128,128,128,128,128,54,62,128,128,128,128,128,128,55,63,128,128,128,128]
; AVX512DQ-BW-NEXT: vpshufb %zmm7, %zmm6, %zmm6
; AVX512DQ-BW-NEXT: vporq %zmm4, %zmm6, %zmm4
; AVX512DQ-BW-NEXT: vinserti64x4 $1, %ymm2, %zmm2, %zmm2
; AVX512DQ-BW-NEXT: vpermq {{.*#+}} zmm6 = zmm2[0,2,0,2,4,6,4,6]
; AVX512DQ-BW-NEXT: vmovdqa64 {{.*#+}} zmm8 = [128,128,128,128,0,8,128,128,128,128,128,128,1,9,128,128,128,128,128,128,18,26,128,128,128,128,128,128,19,27,128,128,128,128,128,128,36,44,128,128,128,128,128,128,37,45,128,128,128,128,128,128,54,62,128,128,128,128,128,128,55,63,128,128]
; AVX512DQ-BW-NEXT: vpshufb %zmm8, %zmm6, %zmm6
; AVX512DQ-BW-NEXT: vshufi64x2 {{.*#+}} zmm3 = zmm3[4,5,6,7,4,5,6,7]
; AVX512DQ-BW-NEXT: vpermq {{.*#+}} zmm9 = zmm3[0,2,0,2,4,6,4,6]
; AVX512DQ-BW-NEXT: vmovdqa64 {{.*#+}} zmm10 = [128,128,128,128,128,128,0,8,128,128,128,128,128,128,1,9,128,128,128,128,128,128,18,26,128,128,128,128,128,128,19,27,128,128,128,128,128,128,36,44,128,128,128,128,128,128,37,45,128,128,128,128,128,128,54,62,128,128,128,128,128,128,55,63]
; AVX512DQ-BW-NEXT: vpshufb %zmm10, %zmm9, %zmm9
; AVX512DQ-BW-NEXT: movw $-21846, %cx # imm = 0xAAAA
; AVX512DQ-BW-NEXT: kmovd %ecx, %k3
; AVX512DQ-BW-NEXT: vmovdqa32 %zmm4, %zmm5 {%k3}
; AVX512DQ-BW-NEXT: vpermq {{.*#+}} zmm2 = zmm2[1,3,1,3,5,7,5,7]
; AVX512DQ-BW-NEXT: vpshufb {{.*#+}} zmm2 = zmm2[u,u,u,u,0,8,0,8,u,u,u,u,1,9,1,9,u,u,u,u,18,26,18,26,u,u,u,u,19,27,19,27,u,u,u,u,36,44,u,u,u,u,u,u,37,45,u,u,u,u,u,u,54,62,u,u,u,u,u,u,55,63,u,u]
; AVX512DQ-BW-NEXT: vpermq {{.*#+}} zmm3 = zmm3[1,3,1,3,5,7,5,7]
; AVX512DQ-BW-NEXT: vpshufb {{.*#+}} zmm3 = zmm3[u,u,u,u,0,8,0,8,u,u,u,u,1,9,1,9,u,u,u,u,18,26,18,26,u,u,u,u,19,27,19,27,u,u,u,u,u,u,36,44,u,u,u,u,u,u,37,45,u,u,u,u,u,u,54,62,u,u,u,u,u,u,55,63]
; AVX512DQ-BW-NEXT: vmovdqu16 %zmm3, %zmm2 {%k1}
; AVX512DQ-BW-NEXT: kmovd %ecx, %k1
; AVX512DQ-BW-NEXT: vpord %zmm6, %zmm9, %zmm4 {%k1}
; AVX512DQ-BW-NEXT: vpermq {{.*#+}} zmm0 = zmm0[1,3,1,3,5,7,5,7]
; AVX512DQ-BW-NEXT: vpshufb {{.*#+}} zmm0 = zmm0[0,8,0,8,u,u,u,u,1,9,1,9,u,u,u,u,18,26,18,26,u,u,u,u,19,27,19,27,u,u,u,u,36,44,u,u,u,u,u,u,37,45,u,u,u,u,u,u,54,62,u,u,u,u,u,u,55,63,u,u,u,u,u,u]
; AVX512DQ-BW-NEXT: vpshufb %zmm5, %zmm0, %zmm0
; AVX512DQ-BW-NEXT: vpermq {{.*#+}} zmm1 = zmm1[1,3,1,3,5,7,5,7]
; AVX512DQ-BW-NEXT: vpshufb {{.*#+}} zmm1 = zmm1[0,8,0,8,u,u,u,u,1,9,1,9,u,u,u,u,18,26,18,26,u,u,u,u,19,27,19,27,u,u,u,u,u,u,36,44,u,u,u,u,u,u,37,45,u,u,u,u,u,u,54,62,u,u,u,u,u,u,55,63,u,u,u,u]
; AVX512DQ-BW-NEXT: vmovdqu16 %zmm1, %zmm0 {%k2}
; AVX512DQ-BW-NEXT: vmovdqa32 %zmm2, %zmm0 {%k3}
; AVX512DQ-BW-NEXT: vpshufb %zmm7, %zmm1, %zmm1
; AVX512DQ-BW-NEXT: vporq %zmm0, %zmm1, %zmm0
; AVX512DQ-BW-NEXT: vpermq {{.*#+}} zmm1 = zmm2[1,3,1,3,5,7,5,7]
; AVX512DQ-BW-NEXT: vpshufb %zmm8, %zmm1, %zmm1
; AVX512DQ-BW-NEXT: vpermq {{.*#+}} zmm2 = zmm3[1,3,1,3,5,7,5,7]
; AVX512DQ-BW-NEXT: vpshufb %zmm10, %zmm2, %zmm2
; AVX512DQ-BW-NEXT: vpord %zmm1, %zmm2, %zmm0 {%k1}
; AVX512DQ-BW-NEXT: vmovdqa64 %zmm0, 64(%rax)
; AVX512DQ-BW-NEXT: vmovdqa64 %zmm5, (%rax)
; AVX512DQ-BW-NEXT: vmovdqa64 %zmm4, (%rax)
; AVX512DQ-BW-NEXT: vzeroupper
; AVX512DQ-BW-NEXT: retq
;
@@ -2214,42 +2208,40 @@ define void @store_i8_stride8_vf16(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vec
; AVX512DQ-BW-FCP-NEXT: vinserti128 $1, (%r10), %ymm3, %ymm3
; AVX512DQ-BW-FCP-NEXT: vinserti128 $1, (%r9), %ymm2, %ymm2
; AVX512DQ-BW-FCP-NEXT: vinserti64x4 $1, %ymm3, %zmm2, %zmm3
; AVX512DQ-BW-FCP-NEXT: vbroadcasti32x4 {{.*#+}} zmm4 = [4,6,4,6,4,6,4,6]
; AVX512DQ-BW-FCP-NEXT: vbroadcasti32x4 {{.*#+}} zmm4 = [0,2,0,2,0,2,0,2]
; AVX512DQ-BW-FCP-NEXT: # zmm4 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
; AVX512DQ-BW-FCP-NEXT: vpermq %zmm3, %zmm4, %zmm5
; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} zmm5 = zmm5[u,u,u,u,u,u,0,8,u,u,u,u,u,u,1,9,u,u,u,u,u,u,18,26,u,u,u,u,u,u,19,27,u,u,u,u,u,u,36,44,u,u,u,u,u,u,37,45,u,u,u,u,u,u,54,62,u,u,u,u,u,u,55,63]
; AVX512DQ-BW-FCP-NEXT: vbroadcasti32x4 {{.*#+}} zmm6 = [0,2,0,2,0,2,0,2]
; AVX512DQ-BW-FCP-NEXT: # zmm6 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
; AVX512DQ-BW-FCP-NEXT: vpermq %zmm3, %zmm6, %zmm7
; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} zmm7 = zmm7[u,u,u,u,0,8,u,u,u,u,u,u,1,9,u,u,u,u,u,u,18,26,u,u,u,u,u,u,19,27,u,u,u,u,u,u,36,44,u,u,u,u,u,u,37,45,u,u,u,u,u,u,54,62,u,u,u,u,u,u,55,63,u,u]
; AVX512DQ-BW-FCP-NEXT: movl $-2004318072, %ecx # imm = 0x88888888
; AVX512DQ-BW-FCP-NEXT: kmovd %ecx, %k1
; AVX512DQ-BW-FCP-NEXT: vmovdqu16 %zmm5, %zmm7 {%k1}
; AVX512DQ-BW-FCP-NEXT: vpermq %zmm1, %zmm4, %zmm4
; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} zmm4 = zmm4[u,u,0,8,u,u,u,u,u,u,1,9,u,u,u,u,u,u,18,26,u,u,u,u,u,u,19,27,u,u,u,u,u,u,36,44,u,u,u,u,u,u,37,45,u,u,u,u,u,u,54,62,u,u,u,u,u,u,55,63,u,u,u,u]
; AVX512DQ-BW-FCP-NEXT: vpermq %zmm1, %zmm6, %zmm5
; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} zmm5 = zmm5[0,8,u,u,u,u,u,u,1,9,u,u,u,u,u,u,18,26,u,u,u,u,u,u,19,27,u,u,u,u,u,u,36,44,u,u,u,u,u,u,37,45,u,u,u,u,u,u,54,62,u,u,u,u,u,u,55,63,u,u,u,u,u,u]
; AVX512DQ-BW-FCP-NEXT: movl $572662306, %ecx # imm = 0x22222222
; AVX512DQ-BW-FCP-NEXT: kmovd %ecx, %k2
; AVX512DQ-BW-FCP-NEXT: vmovdqu16 %zmm4, %zmm5 {%k2}
; AVX512DQ-BW-FCP-NEXT: vpermq %zmm1, %zmm4, %zmm5
; AVX512DQ-BW-FCP-NEXT: vmovdqa64 {{.*#+}} zmm6 = [0,8,128,128,128,128,128,128,1,9,128,128,128,128,128,128,18,26,128,128,128,128,128,128,19,27,128,128,128,128,128,128,36,44,128,128,128,128,128,128,37,45,128,128,128,128,128,128,54,62,128,128,128,128,128,128,55,63,128,128,128,128,128,128]
; AVX512DQ-BW-FCP-NEXT: vpshufb %zmm6, %zmm5, %zmm5
; AVX512DQ-BW-FCP-NEXT: vbroadcasti32x4 {{.*#+}} zmm7 = [4,6,4,6,4,6,4,6]
; AVX512DQ-BW-FCP-NEXT: # zmm7 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
; AVX512DQ-BW-FCP-NEXT: vpermq %zmm1, %zmm7, %zmm8
; AVX512DQ-BW-FCP-NEXT: vmovdqa64 {{.*#+}} zmm9 = [128,128,0,8,128,128,128,128,128,128,1,9,128,128,128,128,128,128,18,26,128,128,128,128,128,128,19,27,128,128,128,128,128,128,36,44,128,128,128,128,128,128,37,45,128,128,128,128,128,128,54,62,128,128,128,128,128,128,55,63,128,128,128,128]
; AVX512DQ-BW-FCP-NEXT: vpshufb %zmm9, %zmm8, %zmm8
; AVX512DQ-BW-FCP-NEXT: vporq %zmm5, %zmm8, %zmm5
; AVX512DQ-BW-FCP-NEXT: vpermq %zmm3, %zmm4, %zmm4
; AVX512DQ-BW-FCP-NEXT: vmovdqa64 {{.*#+}} zmm8 = [128,128,128,128,0,8,128,128,128,128,128,128,1,9,128,128,128,128,128,128,18,26,128,128,128,128,128,128,19,27,128,128,128,128,128,128,36,44,128,128,128,128,128,128,37,45,128,128,128,128,128,128,54,62,128,128,128,128,128,128,55,63,128,128]
; AVX512DQ-BW-FCP-NEXT: vpshufb %zmm8, %zmm4, %zmm4
; AVX512DQ-BW-FCP-NEXT: vpermq %zmm3, %zmm7, %zmm7
; AVX512DQ-BW-FCP-NEXT: vmovdqa64 {{.*#+}} zmm10 = [128,128,128,128,128,128,0,8,128,128,128,128,128,128,1,9,128,128,128,128,128,128,18,26,128,128,128,128,128,128,19,27,128,128,128,128,128,128,36,44,128,128,128,128,128,128,37,45,128,128,128,128,128,128,54,62,128,128,128,128,128,128,55,63]
; AVX512DQ-BW-FCP-NEXT: vpshufb %zmm10, %zmm7, %zmm7
; AVX512DQ-BW-FCP-NEXT: movw $-21846, %cx # imm = 0xAAAA
; AVX512DQ-BW-FCP-NEXT: kmovd %ecx, %k3
; AVX512DQ-BW-FCP-NEXT: vmovdqa32 %zmm7, %zmm5 {%k3}
; AVX512DQ-BW-FCP-NEXT: kmovd %ecx, %k1
; AVX512DQ-BW-FCP-NEXT: vpord %zmm4, %zmm7, %zmm5 {%k1}
; AVX512DQ-BW-FCP-NEXT: vbroadcasti32x4 {{.*#+}} zmm4 = [5,7,5,7,5,7,5,7]
; AVX512DQ-BW-FCP-NEXT: # zmm4 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
; AVX512DQ-BW-FCP-NEXT: vpermq %zmm3, %zmm4, %zmm3
; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} zmm3 = zmm3[u,u,u,u,0,8,0,8,u,u,u,u,1,9,1,9,u,u,u,u,18,26,18,26,u,u,u,u,19,27,19,27,u,u,u,u,u,u,36,44,u,u,u,u,u,u,37,45,u,u,u,u,u,u,54,62,u,u,u,u,u,u,55,63]
; AVX512DQ-BW-FCP-NEXT: vbroadcasti32x4 {{.*#+}} zmm6 = [1,3,1,3,1,3,1,3]
; AVX512DQ-BW-FCP-NEXT: # zmm6 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
; AVX512DQ-BW-FCP-NEXT: vpermq %zmm2, %zmm6, %zmm2
; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} zmm2 = zmm2[u,u,u,u,0,8,0,8,u,u,u,u,1,9,1,9,u,u,u,u,18,26,18,26,u,u,u,u,19,27,19,27,u,u,u,u,36,44,u,u,u,u,u,u,37,45,u,u,u,u,u,u,54,62,u,u,u,u,u,u,55,63,u,u]
; AVX512DQ-BW-FCP-NEXT: vmovdqu16 %zmm3, %zmm2 {%k1}
; AVX512DQ-BW-FCP-NEXT: vpshufb %zmm10, %zmm3, %zmm3
; AVX512DQ-BW-FCP-NEXT: vpermq %zmm1, %zmm4, %zmm1
; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} zmm1 = zmm1[0,8,0,8,u,u,u,u,1,9,1,9,u,u,u,u,18,26,18,26,u,u,u,u,19,27,19,27,u,u,u,u,u,u,36,44,u,u,u,u,u,u,37,45,u,u,u,u,u,u,54,62,u,u,u,u,u,u,55,63,u,u,u,u]
; AVX512DQ-BW-FCP-NEXT: vpermq %zmm0, %zmm6, %zmm0
; AVX512DQ-BW-FCP-NEXT: vpshufb {{.*#+}} zmm0 = zmm0[0,8,0,8,u,u,u,u,1,9,1,9,u,u,u,u,18,26,18,26,u,u,u,u,19,27,19,27,u,u,u,u,36,44,u,u,u,u,u,u,37,45,u,u,u,u,u,u,54,62,u,u,u,u,u,u,55,63,u,u,u,u,u,u]
; AVX512DQ-BW-FCP-NEXT: vmovdqu16 %zmm1, %zmm0 {%k2}
; AVX512DQ-BW-FCP-NEXT: vmovdqa32 %zmm2, %zmm0 {%k3}
; AVX512DQ-BW-FCP-NEXT: vpshufb %zmm9, %zmm1, %zmm1
; AVX512DQ-BW-FCP-NEXT: vbroadcasti32x4 {{.*#+}} zmm4 = [1,3,1,3,1,3,1,3]
; AVX512DQ-BW-FCP-NEXT: # zmm4 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
; AVX512DQ-BW-FCP-NEXT: vpermq %zmm0, %zmm4, %zmm0
; AVX512DQ-BW-FCP-NEXT: vpshufb %zmm6, %zmm0, %zmm0
; AVX512DQ-BW-FCP-NEXT: vporq %zmm0, %zmm1, %zmm0
; AVX512DQ-BW-FCP-NEXT: vpermq %zmm2, %zmm4, %zmm1
; AVX512DQ-BW-FCP-NEXT: vpshufb %zmm8, %zmm1, %zmm1
; AVX512DQ-BW-FCP-NEXT: vpord %zmm1, %zmm3, %zmm0 {%k1}
; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm0, 64(%rax)
; AVX512DQ-BW-FCP-NEXT: vmovdqa64 %zmm5, (%rax)
; AVX512DQ-BW-FCP-NEXT: vzeroupper

View File

@@ -6173,9 +6173,9 @@ define void @vec512_v64i8_to_v4i128_factor16(ptr %in.vec.base.ptr, ptr %in.vec.b
; AVX512F-FAST: # %bb.0:
; AVX512F-FAST-NEXT: vmovdqa (%rdi), %xmm0
; AVX512F-FAST-NEXT: vpaddb (%rsi), %xmm0, %xmm0
; AVX512F-FAST-NEXT: vpmovzxbq {{.*#+}} xmm1 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero
; AVX512F-FAST-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[2,u,u,u,u,u,u,u,3,u,u,u,u,u,u,u]
; AVX512F-FAST-NEXT: vpmovsxbq {{.*#+}} zmm2 = [8,9,9,0,0,1,1,3]
; AVX512F-FAST-NEXT: vpshufb {{.*#+}} xmm1 = xmm0[2,u,u,u,u,u,u,u,3,u,u,u,u,u,u,u]
; AVX512F-FAST-NEXT: vpmovzxbq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero
; AVX512F-FAST-NEXT: vpmovsxbq {{.*#+}} zmm2 = [0,0,1,0,8,0,9,0]
; AVX512F-FAST-NEXT: vpermi2q %zmm1, %zmm0, %zmm2
; AVX512F-FAST-NEXT: vbroadcasti32x4 {{.*#+}} zmm0 = [255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
; AVX512F-FAST-NEXT: # zmm0 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
@@ -6209,9 +6209,9 @@ define void @vec512_v64i8_to_v4i128_factor16(ptr %in.vec.base.ptr, ptr %in.vec.b
; AVX512BW-FAST: # %bb.0:
; AVX512BW-FAST-NEXT: vmovdqa (%rdi), %xmm0
; AVX512BW-FAST-NEXT: vpaddb (%rsi), %xmm0, %xmm0
; AVX512BW-FAST-NEXT: vpmovzxbq {{.*#+}} xmm1 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero
; AVX512BW-FAST-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[2,u,u,u,u,u,u,u,3,u,u,u,u,u,u,u]
; AVX512BW-FAST-NEXT: vpmovsxbq {{.*#+}} zmm2 = [8,9,9,0,0,1,1,3]
; AVX512BW-FAST-NEXT: vpshufb {{.*#+}} xmm1 = xmm0[2,u,u,u,u,u,u,u,3,u,u,u,u,u,u,u]
; AVX512BW-FAST-NEXT: vpmovzxbq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero
; AVX512BW-FAST-NEXT: vpmovsxbq {{.*#+}} zmm2 = [0,0,1,0,8,0,9,0]
; AVX512BW-FAST-NEXT: vpermi2q %zmm1, %zmm0, %zmm2
; AVX512BW-FAST-NEXT: vbroadcasti32x4 {{.*#+}} zmm0 = [255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
; AVX512BW-FAST-NEXT: # zmm0 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]