It would waste time to specialize a function which would inline finally. This patch did two things: - Don't specialize functions which are always-inline. - Don't spescialize functions whose lines of code are less than threshold (100 by default). For spec2017int, this patch could reduce the number of specialized functions by 33%. Then the compile time didn't increase for every benchmark. Reviewed By: SjoerdMeijer, xbolva00, snehasish Differential Revision: https://reviews.llvm.org/D107897
40 lines
808 B
LLVM
40 lines
808 B
LLVM
; REQUIRES: asserts
|
|
; RUN: opt -stats -function-specialization -S -force-function-specialization < %s 2>&1 | FileCheck %s
|
|
|
|
; CHECK: 2 function-specialization - Number of functions specialized
|
|
|
|
define i64 @main(i64 %x, i1 %flag) {
|
|
entry:
|
|
br i1 %flag, label %plus, label %minus
|
|
|
|
plus:
|
|
%tmp0 = call i64 @compute(i64 %x, i64 (i64)* @plus)
|
|
br label %merge
|
|
|
|
minus:
|
|
%tmp1 = call i64 @compute(i64 %x, i64 (i64)* @minus)
|
|
br label %merge
|
|
|
|
merge:
|
|
%tmp2 = phi i64 [ %tmp0, %plus ], [ %tmp1, %minus]
|
|
ret i64 %tmp2
|
|
}
|
|
|
|
define internal i64 @compute(i64 %x, i64 (i64)* %binop) {
|
|
entry:
|
|
%tmp0 = call i64 %binop(i64 %x)
|
|
ret i64 %tmp0
|
|
}
|
|
|
|
define internal i64 @plus(i64 %x) {
|
|
entry:
|
|
%tmp0 = add i64 %x, 1
|
|
ret i64 %tmp0
|
|
}
|
|
|
|
define internal i64 @minus(i64 %x) {
|
|
entry:
|
|
%tmp0 = sub i64 %x, 1
|
|
ret i64 %tmp0
|
|
}
|