If we have a store of a load with no other uses in between it, it's considered dead and is removed. So sometimes when legalizing a fixed length vector store of an insert, we end up producing better code through scalarization than without. An example is the follow below: %a = load <4 x i64>, ptr %x %b = insertelement <4 x i64> %a, i64 %y, i32 2 store <4 x i64> %b, ptr %x If this is scalarized, then DAGCombine successfully removes 3 of the 4 stores which are considered dead, and on RISC-V we get: sd a1, 16(a0) However if we make the vector type legal (-mattr=+v), then we lose the optimisation because we don't scalarize it. This patch attempts to recover the optimisation for vectors by identifying patterns where we store a load with a single insert inbetween, replacing it with a scalar store of the inserted element. Reviewed By: RKSimon Differential Revision: https://reviews.llvm.org/D152276
80 lines
2.2 KiB
LLVM
80 lines
2.2 KiB
LLVM
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
|
|
; RUN: llc < %s -mtriple=i686-darwin -mattr=+mmx,+sse2 | FileCheck %s --check-prefix=X86
|
|
; RUN: llc < %s -mtriple=x86_64-darwin -mattr=+mmx,+sse4.1 | FileCheck %s --check-prefix=X64
|
|
|
|
; This is not an MMX operation; promoted to xmm.
|
|
define x86_mmx @t0(i32 %A) nounwind {
|
|
; X86-LABEL: t0:
|
|
; X86: ## %bb.0:
|
|
; X86-NEXT: movd {{[0-9]+}}(%esp), %mm1
|
|
; X86-NEXT: pxor %mm0, %mm0
|
|
; X86-NEXT: punpckldq %mm1, %mm0 ## mm0 = mm0[0],mm1[0]
|
|
; X86-NEXT: retl
|
|
;
|
|
; X64-LABEL: t0:
|
|
; X64: ## %bb.0:
|
|
; X64-NEXT: movd %edi, %xmm0
|
|
; X64-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,0,1,1]
|
|
; X64-NEXT: retq
|
|
%tmp3 = insertelement <2 x i32> < i32 0, i32 undef >, i32 %A, i32 1
|
|
%tmp4 = bitcast <2 x i32> %tmp3 to x86_mmx
|
|
ret x86_mmx %tmp4
|
|
}
|
|
|
|
define <8 x i8> @t1(i8 zeroext %x) nounwind {
|
|
; X86-LABEL: t1:
|
|
; X86: ## %bb.0:
|
|
; X86-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
|
|
; X86-NEXT: retl
|
|
;
|
|
; X64-LABEL: t1:
|
|
; X64: ## %bb.0:
|
|
; X64-NEXT: movd %edi, %xmm0
|
|
; X64-NEXT: retq
|
|
%r = insertelement <8 x i8> undef, i8 %x, i32 0
|
|
ret <8 x i8> %r
|
|
}
|
|
|
|
; PR2574
|
|
define <2 x float> @t2(<2 x float> %a0) {
|
|
; X86-LABEL: t2:
|
|
; X86: ## %bb.0:
|
|
; X86-NEXT: xorps %xmm0, %xmm0
|
|
; X86-NEXT: retl
|
|
;
|
|
; X64-LABEL: t2:
|
|
; X64: ## %bb.0:
|
|
; X64-NEXT: xorps %xmm0, %xmm0
|
|
; X64-NEXT: retq
|
|
%v1 = insertelement <2 x float> %a0, float 0.000000e+00, i32 0
|
|
%v2 = insertelement <2 x float> %v1, float 0.000000e+00, i32 1
|
|
ret <2 x float> %v2
|
|
}
|
|
|
|
@g0 = external global i16
|
|
@g1 = external global <4 x i16>
|
|
|
|
; PR2562
|
|
define void @t3() {
|
|
; X86-LABEL: t3:
|
|
; X86: ## %bb.0:
|
|
; X86-NEXT: movl L_g0$non_lazy_ptr, %eax
|
|
; X86-NEXT: movzwl (%eax), %eax
|
|
; X86-NEXT: movl L_g1$non_lazy_ptr, %ecx
|
|
; X86-NEXT: movw %ax, (%ecx)
|
|
; X86-NEXT: retl
|
|
;
|
|
; X64-LABEL: t3:
|
|
; X64: ## %bb.0:
|
|
; X64-NEXT: movq _g0@GOTPCREL(%rip), %rax
|
|
; X64-NEXT: movzwl (%rax), %eax
|
|
; X64-NEXT: movq _g1@GOTPCREL(%rip), %rcx
|
|
; X64-NEXT: movw %ax, (%rcx)
|
|
; X64-NEXT: retq
|
|
load i16, ptr @g0
|
|
load <4 x i16>, ptr @g1
|
|
insertelement <4 x i16> %2, i16 %1, i32 0
|
|
store <4 x i16> %3, ptr @g1
|
|
ret void
|
|
}
|