Don't just return the known zero upperbits, compute the absdiff Knownbits and perform the horizontal sum. Add implementations that handle both the X86ISD::PSADBW nodes and the INTRINSIC_WO_CHAIN intrinsics (pre-legalization).
148 lines
6.2 KiB
LLVM
148 lines
6.2 KiB
LLVM
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
|
|
; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefixes=SSE,X86-SSE
|
|
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefixes=SSE,X64-SSE
|
|
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefixes=AVX2
|
|
|
|
; Only bottom 16 bits are set - upper 48 bits are zero.
|
|
define <2 x i64> @combine_psadbw_shift(<16 x i8> %0, <16 x i8> %1) nounwind {
|
|
; SSE-LABEL: combine_psadbw_shift:
|
|
; SSE: # %bb.0:
|
|
; SSE-NEXT: xorps %xmm0, %xmm0
|
|
; SSE-NEXT: ret{{[l|q]}}
|
|
;
|
|
; AVX2-LABEL: combine_psadbw_shift:
|
|
; AVX2: # %bb.0:
|
|
; AVX2-NEXT: vxorps %xmm0, %xmm0, %xmm0
|
|
; AVX2-NEXT: retq
|
|
%3 = tail call <2 x i64> @llvm.x86.sse2.psad.bw(<16 x i8> %0, <16 x i8> %1)
|
|
%4 = lshr <2 x i64> %3, <i64 48, i64 48>
|
|
ret <2 x i64> %4
|
|
}
|
|
|
|
; Propagate the demanded result elements to the 8 aliasing source elements.
|
|
define i64 @combine_psadbw_demandedelt(<16 x i8> %0, <16 x i8> %1) nounwind {
|
|
; X86-SSE-LABEL: combine_psadbw_demandedelt:
|
|
; X86-SSE: # %bb.0:
|
|
; X86-SSE-NEXT: psadbw %xmm1, %xmm0
|
|
; X86-SSE-NEXT: movd %xmm0, %eax
|
|
; X86-SSE-NEXT: xorl %edx, %edx
|
|
; X86-SSE-NEXT: retl
|
|
;
|
|
; X64-SSE-LABEL: combine_psadbw_demandedelt:
|
|
; X64-SSE: # %bb.0:
|
|
; X64-SSE-NEXT: psadbw %xmm1, %xmm0
|
|
; X64-SSE-NEXT: movq %xmm0, %rax
|
|
; X64-SSE-NEXT: retq
|
|
;
|
|
; AVX2-LABEL: combine_psadbw_demandedelt:
|
|
; AVX2: # %bb.0:
|
|
; AVX2-NEXT: vpsadbw %xmm1, %xmm0, %xmm0
|
|
; AVX2-NEXT: vmovq %xmm0, %rax
|
|
; AVX2-NEXT: retq
|
|
%3 = shufflevector <16 x i8> %0, <16 x i8> %0, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11>
|
|
%4 = shufflevector <16 x i8> %1, <16 x i8> %1, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 12, i32 13, i32 14, i32 15, i32 8, i32 9, i32 10, i32 11>
|
|
%5 = tail call <2 x i64> @llvm.x86.sse2.psad.bw(<16 x i8> %3, <16 x i8> %4)
|
|
%6 = extractelement <2 x i64> %5, i32 0
|
|
ret i64 %6
|
|
}
|
|
|
|
; TODO: Each PSADBW source element has a maximum value of 3 - so max sum-of-diffs for each <8 x i8> should be 24.
|
|
define <2 x i64> @combine_psadbw_cmp_knownbits(<16 x i8> %a0) nounwind {
|
|
; X86-SSE-LABEL: combine_psadbw_cmp_knownbits:
|
|
; X86-SSE: # %bb.0:
|
|
; X86-SSE-NEXT: xorps %xmm0, %xmm0
|
|
; X86-SSE-NEXT: retl
|
|
;
|
|
; X64-SSE-LABEL: combine_psadbw_cmp_knownbits:
|
|
; X64-SSE: # %bb.0:
|
|
; X64-SSE-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
|
|
; X64-SSE-NEXT: pxor %xmm1, %xmm1
|
|
; X64-SSE-NEXT: psadbw %xmm1, %xmm0
|
|
; X64-SSE-NEXT: pcmpgtd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
|
|
; X64-SSE-NEXT: retq
|
|
;
|
|
; AVX2-LABEL: combine_psadbw_cmp_knownbits:
|
|
; AVX2: # %bb.0:
|
|
; AVX2-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
|
|
; AVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1
|
|
; AVX2-NEXT: vpsadbw %xmm1, %xmm0, %xmm0
|
|
; AVX2-NEXT: vpcmpgtq {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
|
|
; AVX2-NEXT: retq
|
|
%mask = and <16 x i8> %a0, <i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3>
|
|
%sad = tail call <2 x i64> @llvm.x86.sse2.psad.bw(<16 x i8> %mask, <16 x i8> zeroinitializer)
|
|
%cmp = icmp sgt <2 x i64> %sad, <i64 32, i64 32>
|
|
%ext = sext <2 x i1> %cmp to <2 x i64>
|
|
ret <2 x i64> %ext
|
|
}
|
|
|
|
; No need to scalarize the sitofp as the PSADBW results are smaller than i32.
|
|
define <2 x double> @combine_psadbw_sitofp_knownbits(<16 x i8> %a0) nounwind {
|
|
; X86-SSE-LABEL: combine_psadbw_sitofp_knownbits:
|
|
; X86-SSE: # %bb.0:
|
|
; X86-SSE-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
|
|
; X86-SSE-NEXT: pxor %xmm1, %xmm1
|
|
; X86-SSE-NEXT: psadbw %xmm0, %xmm1
|
|
; X86-SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm1[0,2,2,3]
|
|
; X86-SSE-NEXT: cvtdq2pd %xmm0, %xmm0
|
|
; X86-SSE-NEXT: retl
|
|
;
|
|
; X64-SSE-LABEL: combine_psadbw_sitofp_knownbits:
|
|
; X64-SSE: # %bb.0:
|
|
; X64-SSE-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
|
|
; X64-SSE-NEXT: pxor %xmm1, %xmm1
|
|
; X64-SSE-NEXT: psadbw %xmm0, %xmm1
|
|
; X64-SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm1[0,2,2,3]
|
|
; X64-SSE-NEXT: cvtdq2pd %xmm0, %xmm0
|
|
; X64-SSE-NEXT: retq
|
|
;
|
|
; AVX2-LABEL: combine_psadbw_sitofp_knownbits:
|
|
; AVX2: # %bb.0:
|
|
; AVX2-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
|
|
; AVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1
|
|
; AVX2-NEXT: vpsadbw %xmm1, %xmm0, %xmm0
|
|
; AVX2-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
|
|
; AVX2-NEXT: vcvtdq2pd %xmm0, %xmm0
|
|
; AVX2-NEXT: retq
|
|
%mask = and <16 x i8> %a0, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
|
|
%sad = tail call <2 x i64> @llvm.x86.sse2.psad.bw(<16 x i8> %mask, <16 x i8> zeroinitializer)
|
|
%cvt = sitofp <2 x i64> %sad to <2 x double>
|
|
ret <2 x double> %cvt
|
|
}
|
|
|
|
; Convert from uitofp to sitofp as the PSADBW results are zero-extended.
|
|
define <2 x double> @combine_psadbw_uitofp_knownbits(<16 x i8> %a0) nounwind {
|
|
; X86-SSE-LABEL: combine_psadbw_uitofp_knownbits:
|
|
; X86-SSE: # %bb.0:
|
|
; X86-SSE-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
|
|
; X86-SSE-NEXT: pxor %xmm1, %xmm1
|
|
; X86-SSE-NEXT: psadbw %xmm0, %xmm1
|
|
; X86-SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm1[0,2,2,3]
|
|
; X86-SSE-NEXT: cvtdq2pd %xmm0, %xmm0
|
|
; X86-SSE-NEXT: retl
|
|
;
|
|
; X64-SSE-LABEL: combine_psadbw_uitofp_knownbits:
|
|
; X64-SSE: # %bb.0:
|
|
; X64-SSE-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
|
|
; X64-SSE-NEXT: pxor %xmm1, %xmm1
|
|
; X64-SSE-NEXT: psadbw %xmm0, %xmm1
|
|
; X64-SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm1[0,2,2,3]
|
|
; X64-SSE-NEXT: cvtdq2pd %xmm0, %xmm0
|
|
; X64-SSE-NEXT: retq
|
|
;
|
|
; AVX2-LABEL: combine_psadbw_uitofp_knownbits:
|
|
; AVX2: # %bb.0:
|
|
; AVX2-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
|
|
; AVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1
|
|
; AVX2-NEXT: vpsadbw %xmm1, %xmm0, %xmm0
|
|
; AVX2-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
|
|
; AVX2-NEXT: vcvtdq2pd %xmm0, %xmm0
|
|
; AVX2-NEXT: retq
|
|
%mask = and <16 x i8> %a0, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
|
|
%sad = tail call <2 x i64> @llvm.x86.sse2.psad.bw(<16 x i8> %mask, <16 x i8> zeroinitializer)
|
|
%cvt = uitofp <2 x i64> %sad to <2 x double>
|
|
ret <2 x double> %cvt
|
|
}
|
|
|
|
declare <2 x i64> @llvm.x86.sse2.psad.bw(<16 x i8>, <16 x i8>)
|
|
|