SLP vectorizer emits extracts for externally used vectorized scalars and estimates the cost for each such extract. But in many cases these scalars are input for insertelement instructions, forming buildvector, and instead of extractelement/insertelement pair we can emit/cost estimate shuffle(s) cost and generate series of shuffles, which can be further optimized. Tested using test-suite (+SPEC2017), the tests passed, SLP was able to generate/vectorize more instructions in many cases and it allowed to reduce number of re-vectorization attempts (where we could try to vectorize buildector insertelements again and again). Differential Revision: https://reviews.llvm.org/D107966
43 lines
1.5 KiB
LLVM
43 lines
1.5 KiB
LLVM
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
|
|
; RUN: opt -S -slp-vectorizer < %s | FileCheck %s
|
|
|
|
define i64 @test() {
|
|
; CHECK-LABEL: @test(
|
|
; CHECK-NEXT: bb1:
|
|
; CHECK-NEXT: br label [[BB3:%.*]]
|
|
; CHECK: bb2:
|
|
; CHECK-NEXT: br label [[BB3]]
|
|
; CHECK: bb3:
|
|
; CHECK-NEXT: [[TMP0:%.*]] = phi <2 x i32> [ poison, [[BB2:%.*]] ], [ zeroinitializer, [[BB1:%.*]] ]
|
|
; CHECK-NEXT: [[SHUFFLE:%.*]] = shufflevector <2 x i32> [[TMP0]], <2 x i32> poison, <4 x i32> <i32 0, i32 0, i32 0, i32 1>
|
|
; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <2 x i32> [[TMP0]], <2 x i32> poison, <8 x i32> zeroinitializer
|
|
; CHECK-NEXT: [[TMP2:%.*]] = call i32 @llvm.vector.reduce.mul.v8i32(<8 x i32> [[TMP1]])
|
|
; CHECK-NEXT: [[TMP3:%.*]] = call i32 @llvm.vector.reduce.mul.v4i32(<4 x i32> [[SHUFFLE]])
|
|
; CHECK-NEXT: [[OP_RDX:%.*]] = mul i32 [[TMP2]], [[TMP3]]
|
|
; CHECK-NEXT: [[TMP65:%.*]] = sext i32 [[OP_RDX]] to i64
|
|
; CHECK-NEXT: ret i64 [[TMP65]]
|
|
;
|
|
bb1:
|
|
br label %bb3
|
|
|
|
bb2:
|
|
br label %bb3
|
|
|
|
bb3:
|
|
%tmp = phi i32 [ 0, %bb2 ], [ 0, %bb1 ]
|
|
%tmp4 = phi i32 [ 0, %bb2 ], [ 0, %bb1 ]
|
|
%tmp5 = mul i32 %tmp, %tmp4
|
|
%tmp6 = mul i32 %tmp5, %tmp4
|
|
%tmp7 = mul i32 %tmp6, %tmp4
|
|
%tmp8 = mul i32 %tmp7, %tmp4
|
|
%tmp9 = mul i32 %tmp8, %tmp4
|
|
%tmp10 = mul i32 %tmp9, %tmp4
|
|
%tmp11 = mul i32 %tmp10, %tmp4
|
|
%tmp12 = mul i32 %tmp11, %tmp4
|
|
%tmp13 = mul i32 %tmp12, %tmp4
|
|
%tmp14 = mul i32 %tmp13, %tmp4
|
|
%tmp15 = mul i32 %tmp14, %tmp4
|
|
%tmp65 = sext i32 %tmp15 to i64
|
|
ret i64 %tmp65
|
|
}
|