This patch replaces the tight hard cut-off for the number of runtime checks with a more accurate cost-driven approach. The new approach allows vectorization with a larger number of runtime checks in general, but only executes the vector loop (and runtime checks) if considered profitable at runtime. Profitable here means that the cost-model indicates that the runtime check cost + vector loop cost < scalar loop cost. To do that, LV computes the minimum trip count for which runtime check cost + vector-loop-cost < scalar loop cost. Note that there is still a hard cut-off to avoid excessive compile-time/code-size increases, but it is much larger than the original limit. The performance impact on standard test-suites like SPEC2006/SPEC2006/MultiSource is mostly neutral, but the new approach can give substantial gains in cases where we failed to vectorize before due to the over-aggressive cut-offs. On AArch64 with -O3, I didn't observe any regressions outside the noise level (<0.4%) and there are the following execution time improvements. Both `IRSmk` and `srad` are relatively short running, but the changes are far above the noise level for them on my benchmark system. ``` CFP2006/447.dealII/447.dealII -1.9% CINT2017rate/525.x264_r/525.x264_r -2.2% ASC_Sequoia/IRSmk/IRSmk -9.2% Rodinia/srad/srad -36.1% ``` `size` regressions on AArch64 with -O3 are ``` MultiSource/Applications/hbd/hbd 90256.00 106768.00 18.3% MultiSourc...ks/ASCI_Purple/SMG2000/smg2000 240676.00 257268.00 6.9% MultiSourc...enchmarks/mafft/pairlocalalign 472603.00 489131.00 3.5% External/S...2017rate/525.x264_r/525.x264_r 613831.00 630343.00 2.7% External/S...NT2006/464.h264ref/464.h264ref 818920.00 835448.00 2.0% External/S...te/538.imagick_r/538.imagick_r 1994730.00 2027754.00 1.7% MultiSourc...nchmarks/tramp3d-v4/tramp3d-v4 1236471.00 1253015.00 1.3% MultiSource/Applications/oggenc/oggenc 2108147.00 2124675.00 0.8% External/S.../CFP2006/447.dealII/447.dealII 4742999.00 4759559.00 0.3% External/S...rate/510.parest_r/510.parest_r 14206377.00 14239433.00 0.2% ``` Reviewed By: lebedev.ri, ebrevnov, dmgreen Differential Revision: https://reviews.llvm.org/D109368
106 lines
4.2 KiB
LLVM
106 lines
4.2 KiB
LLVM
; REQUIRES: asserts
|
|
|
|
; RUN: opt -passes='loop-vectorize' -mtriple=x86_64-unknown-linux -S -debug %s 2>&1 | FileCheck %s
|
|
|
|
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
|
|
|
|
target triple = "x86_64-unknown-linux"
|
|
|
|
declare double @llvm.pow.f64(double, double)
|
|
|
|
; Test case where the memory runtime checks and vector body is more expensive
|
|
; than running the scalar loop.
|
|
define void @test(double* nocapture %A, double* nocapture %B, double* nocapture %C, double* nocapture %D, double* nocapture %E) {
|
|
|
|
; CHECK: Calculating cost of runtime checks:
|
|
; CHECK-NEXT: 0 for {{.+}} = getelementptr double, double* %A, i64 16
|
|
; CHECK-NEXT: 0 for {{.+}} = bitcast double*
|
|
; CHECK-NEXT: 0 for {{.+}} = getelementptr double, double* %B, i64 16
|
|
; CHECK-NEXT: 0 for {{.+}} = bitcast double*
|
|
; CHECK-NEXT: 0 for {{.+}} = getelementptr double, double* %E, i64 16
|
|
; CHECK-NEXT: 0 for {{.+}} = bitcast double*
|
|
; CHECK-NEXT: 0 for {{.+}} = getelementptr double, double* %C, i64 16
|
|
; CHECK-NEXT: 0 for {{.+}} = bitcast double*
|
|
; CHECK-NEXT: 0 for {{.+}} = getelementptr double, double* %D, i64 16
|
|
; CHECK-NEXT: 0 for {{.+}} = bitcast double*
|
|
; CHECK-NEXT: 1 for {{.+}} = icmp ult i8*
|
|
; CHECK-NEXT: 1 for {{.+}} = icmp ult i8*
|
|
; CHECK-NEXT: 1 for {{.+}} = and i1
|
|
; CHECK-NEXT: 1 for {{.+}} = icmp ult i8*
|
|
; CHECK-NEXT: 1 for {{.+}} = icmp ult i8*
|
|
; CHECK-NEXT: 1 for {{.+}} = and i1
|
|
; CHECK-NEXT: 1 for {{.+}} = or i1
|
|
; CHECK-NEXT: 1 for {{.+}} = icmp ult i8*
|
|
; CHECK-NEXT: 1 for {{.+}} = icmp ult i8*
|
|
; CHECK-NEXT: 1 for {{.+}} = and i1
|
|
; CHECK-NEXT: 1 for {{.+}} = or i1
|
|
; CHECK-NEXT: 1 for {{.+}} = icmp ult i8*
|
|
; CHECK-NEXT: 1 for {{.+}} = icmp ult i8*
|
|
; CHECK-NEXT: 1 for {{.+}} = and i1
|
|
; CHECK-NEXT: 1 for {{.+}} = or i1
|
|
; CHECK-NEXT: 1 for {{.+}} = icmp ult i8*
|
|
; CHECK-NEXT: 1 for {{.+}} = icmp ult i8*
|
|
; CHECK-NEXT: 1 for {{.+}} = and i1
|
|
; CHECK-NEXT: 1 for {{.+}} = or i1
|
|
; CHECK-NEXT: 1 for {{.+}} = icmp ult i8*
|
|
; CHECK-NEXT: 1 for {{.+}} = icmp ult i8*
|
|
; CHECK-NEXT: 1 for {{.+}} = and i1
|
|
; CHECK-NEXT: 1 for {{.+}} = or i1
|
|
; CHECK-NEXT: 1 for {{.+}} = icmp ult i8*
|
|
; CHECK-NEXT: 1 for {{.+}} = icmp ult i8*
|
|
; CHECK-NEXT: 1 for {{.+}} = and i1
|
|
; CHECK-NEXT: 1 for {{.+}} = or i1
|
|
; CHECK-NEXT: 1 for {{.+}} = icmp ult i8*
|
|
; CHECK-NEXT: 1 for {{.+}} = icmp ult i8*
|
|
; CHECK-NEXT: 1 for {{.+}} = and i1
|
|
; CHECK-NEXT: 1 for {{.+}} = or i1
|
|
; CHECK-NEXT: 1 for {{.+}} = icmp ult i8*
|
|
; CHECK-NEXT: 1 for {{.+}} = icmp ult i8*
|
|
; CHECK-NEXT: 1 for {{.+}} = and i1
|
|
; CHECK-NEXT: 1 for {{.+}} = or i1
|
|
; CHECK-NEXT: Total cost of runtime checks: 35
|
|
|
|
; CHECK: LV: Vectorization is not beneficial: expected trip count < minimum profitable VF (16 < 70)
|
|
;
|
|
; CHECK-LABEL: @test(
|
|
; CHECK-NEXT: entry:
|
|
; CHECK-NEXT: br label %for.body
|
|
; CHECK-NOT: vector.memcheck
|
|
; CHECK-NOT: vector.body
|
|
;
|
|
entry:
|
|
br label %for.body
|
|
|
|
for.body:
|
|
%iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
|
|
%gep.A = getelementptr inbounds double, double* %A, i64 %iv
|
|
%l.A = load double, double* %gep.A, align 4
|
|
store double 0.0, double* %gep.A, align 4
|
|
%p.1 = call double @llvm.pow.f64(double %l.A, double 2.0)
|
|
|
|
%gep.B = getelementptr inbounds double, double* %B, i64 %iv
|
|
%l.B = load double, double* %gep.B, align 4
|
|
%p.2 = call double @llvm.pow.f64(double %l.B, double %p.1)
|
|
store double 0.0, double* %gep.B, align 4
|
|
|
|
%gep.C = getelementptr inbounds double, double* %C, i64 %iv
|
|
%l.C = load double, double* %gep.C, align 4
|
|
%p.3 = call double @llvm.pow.f64(double %p.1, double %l.C)
|
|
|
|
%gep.D = getelementptr inbounds double, double* %D, i64 %iv
|
|
%l.D = load double, double* %gep.D
|
|
%p.4 = call double @llvm.pow.f64(double %p.3, double %l.D)
|
|
%p.5 = call double @llvm.pow.f64(double %p.4, double %p.3)
|
|
%mul = fmul double 2.0, %p.5
|
|
%mul.2 = fmul double %mul, 2.0
|
|
%mul.3 = fmul double %mul, %mul.2
|
|
%gep.E = getelementptr inbounds double, double* %E, i64 %iv
|
|
store double %mul.3, double* %gep.E, align 4
|
|
%iv.next = add i64 %iv, 1
|
|
%exitcond = icmp eq i64 %iv.next, 16
|
|
br i1 %exitcond, label %for.end, label %for.body
|
|
|
|
for.end:
|
|
ret void
|
|
}
|