Use the llvm flag `-pgo-function-entry-coverage` to create single byte "counters" to track functions coverage. This mode has significantly less size overhead in both code and data because * We mark a function as "covered" with a store instead of an increment which generally requires fewer assembly instructions * We use a single byte per function rather than 8 bytes per block The trade off of course is that this mode only tells you if a function has been covered. This is useful, for example, to detect dead code. When combined with debug info correlation [0] we are able to create an instrumented Clang binary that is only 150M (the vanilla Clang binary is 143M). That is an overhead of 7M (4.9%) compared to the default instrumentation (without value profiling) which has an overhead of 31M (21.7%). [0] https://groups.google.com/g/llvm-dev/c/r03Z6JoN7d4 Reviewed By: kyulee Differential Revision: https://reviews.llvm.org/D116180
27 lines
697 B
LLVM
27 lines
697 B
LLVM
; RUN: opt < %s -passes=pgo-instr-gen -pgo-function-entry-coverage -S | FileCheck %s
|
|
target datalayout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128"
|
|
target triple = "x86_64-unknown-linux-gnu"
|
|
|
|
define i32 @foo(i32 %i) {
|
|
entry:
|
|
; CHECK: call void @llvm.instrprof.cover({{.*}})
|
|
%cmp = icmp sgt i32 %i, 0
|
|
br i1 %cmp, label %if.then, label %if.else
|
|
|
|
if.then:
|
|
; CHECK-NOT: llvm.instrprof.cover(
|
|
%add = add nsw i32 %i, 2
|
|
%s = select i1 %cmp, i32 %add, i32 0
|
|
br label %if.end
|
|
|
|
if.else:
|
|
%sub = sub nsw i32 %i, 2
|
|
br label %if.end
|
|
|
|
if.end:
|
|
%retv = phi i32 [ %add, %if.then ], [ %sub, %if.else ]
|
|
ret i32 %retv
|
|
}
|
|
|
|
; CHECK: declare void @llvm.instrprof.cover(
|