Files
clang-p2996/llvm/test/CodeGen/X86/pcsections-memops.ll
Marco Elver bdb4353ae0 [SelectionDAG] Optimize copyExtraInfo deep copy
It turns out that there are relatively trivial, albeit rare, cases that
require a MaxDepth of more than 16 (see added test). However, we want to
avoid having to rely on a large fixed MaxDepth.

Since these cases are relatively rare, apply the following strategy:

  1. Start with a low MaxDepth of 16 - if the entry node was not
     reached, we can return (the common case).

  2. If the entry node was reached, exponentially increase MaxDepth up
     to some large limit that should cover all cases and guard against
     stack exhaustion.

This retains the better performance with a low MaxDepth in the common
case, and in complex cases backs off and retries. On a whole, this is
preferable vs. starting with a large MaxDepth which would unnecessarily
penalize the common case where a low MaxDepth is sufficient.

Reviewed By: dvyukov

Differential Revision: https://reviews.llvm.org/D145386
2023-03-06 17:29:53 +01:00

245 lines
9.8 KiB
LLVM

; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -O0 < %s | FileCheck %s --check-prefixes=O0
; RUN: llc -O1 < %s | FileCheck %s --check-prefixes=OPT
; RUN: llc -O2 < %s | FileCheck %s --check-prefixes=OPT
; RUN: llc -O3 < %s | FileCheck %s --check-prefixes=OPT
target datalayout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128"
target triple = "x86_64-unknown-linux-gnu"
@__llvm_gcov_ctr.89 = internal global [2 x i64] zeroinitializer
@__llvm_gcov_ctr.109 = internal global [2 x i64] zeroinitializer
@__llvm_gcov_ctr.112 = internal global [2 x i64] zeroinitializer
@__llvm_gcov_ctr.113 = internal global [2 x i64] zeroinitializer
@__llvm_gcov_ctr.154 = internal global [1 x i64] zeroinitializer
@__llvm_gcov_ctr.155 = internal global [2 x i64] zeroinitializer
@__llvm_gcov_ctr.156 = internal global [2 x i64] zeroinitializer
@__llvm_gcov_ctr.157 = internal global [2 x i64] zeroinitializer
@__llvm_gcov_ctr.160 = internal global [2 x i64] zeroinitializer
@__llvm_gcov_ctr.161 = internal global [2 x i64] zeroinitializer
@__llvm_gcov_ctr.189 = internal global [2 x i64] zeroinitializer
@__llvm_gcov_ctr.1109 = internal global [2 x i64] zeroinitializer
@__llvm_gcov_ctr.1112 = internal global [2 x i64] zeroinitializer
@__llvm_gcov_ctr.1113 = internal global [2 x i64] zeroinitializer
@__llvm_gcov_ctr.1154 = internal global [1 x i64] zeroinitializer
@__llvm_gcov_ctr.1155 = internal global [2 x i64] zeroinitializer
@__llvm_gcov_ctr.1156 = internal global [2 x i64] zeroinitializer
@__llvm_gcov_ctr.1157 = internal global [2 x i64] zeroinitializer
@__llvm_gcov_ctr.1160 = internal global [2 x i64] zeroinitializer
@__llvm_gcov_ctr.1161 = internal global [2 x i64] zeroinitializer
define dso_local void @independent_load_stores() {
; O0-LABEL: independent_load_stores:
; O0: # %bb.0:
; O0-NEXT: .Lpcsection0:
; O0-NEXT: movq __llvm_gcov_ctr.109, %rax
; O0-NEXT: addq $1, %rax
; O0-NEXT: .Lpcsection1:
; O0-NEXT: movq %rax, __llvm_gcov_ctr.109
; O0-NEXT: .Lpcsection2:
; O0-NEXT: movq __llvm_gcov_ctr.112, %rax
; O0-NEXT: addq $1, %rax
; O0-NEXT: .Lpcsection3:
; O0-NEXT: movq %rax, __llvm_gcov_ctr.112
; O0-NEXT: .Lpcsection4:
; O0-NEXT: movq __llvm_gcov_ctr.113, %rax
; O0-NEXT: addq $1, %rax
; O0-NEXT: .Lpcsection5:
; O0-NEXT: movq %rax, __llvm_gcov_ctr.113
; O0-NEXT: .Lpcsection6:
; O0-NEXT: movq __llvm_gcov_ctr.154, %rax
; O0-NEXT: addq $1, %rax
; O0-NEXT: .Lpcsection7:
; O0-NEXT: movq %rax, __llvm_gcov_ctr.154
; O0-NEXT: .Lpcsection8:
; O0-NEXT: movq __llvm_gcov_ctr.155, %rax
; O0-NEXT: addq $1, %rax
; O0-NEXT: .Lpcsection9:
; O0-NEXT: movq %rax, __llvm_gcov_ctr.155
; O0-NEXT: .Lpcsection10:
; O0-NEXT: movq __llvm_gcov_ctr.89, %rax
; O0-NEXT: addq $1, %rax
; O0-NEXT: .Lpcsection11:
; O0-NEXT: movq %rax, __llvm_gcov_ctr.89
; O0-NEXT: .Lpcsection12:
; O0-NEXT: movq __llvm_gcov_ctr.160, %rax
; O0-NEXT: addq $1, %rax
; O0-NEXT: .Lpcsection13:
; O0-NEXT: movq %rax, __llvm_gcov_ctr.160
; O0-NEXT: .Lpcsection14:
; O0-NEXT: movq __llvm_gcov_ctr.156, %rax
; O0-NEXT: addq $1, %rax
; O0-NEXT: .Lpcsection15:
; O0-NEXT: movq %rax, __llvm_gcov_ctr.156
; O0-NEXT: .Lpcsection16:
; O0-NEXT: movq __llvm_gcov_ctr.157, %rax
; O0-NEXT: addq $1, %rax
; O0-NEXT: .Lpcsection17:
; O0-NEXT: movq %rax, __llvm_gcov_ctr.157
; O0-NEXT: .Lpcsection18:
; O0-NEXT: movq __llvm_gcov_ctr.161, %rax
; O0-NEXT: addq $1, %rax
; O0-NEXT: .Lpcsection19:
; O0-NEXT: movq %rax, __llvm_gcov_ctr.161
; O0-NEXT: .Lpcsection20:
; O0-NEXT: movq __llvm_gcov_ctr.1109, %rax
; O0-NEXT: addq $1, %rax
; O0-NEXT: .Lpcsection21:
; O0-NEXT: movq %rax, __llvm_gcov_ctr.1109
; O0-NEXT: .Lpcsection22:
; O0-NEXT: movq __llvm_gcov_ctr.1112, %rax
; O0-NEXT: addq $1, %rax
; O0-NEXT: .Lpcsection23:
; O0-NEXT: movq %rax, __llvm_gcov_ctr.1112
; O0-NEXT: .Lpcsection24:
; O0-NEXT: movq __llvm_gcov_ctr.1113, %rax
; O0-NEXT: addq $1, %rax
; O0-NEXT: .Lpcsection25:
; O0-NEXT: movq %rax, __llvm_gcov_ctr.1113
; O0-NEXT: .Lpcsection26:
; O0-NEXT: movq __llvm_gcov_ctr.1154, %rax
; O0-NEXT: addq $1, %rax
; O0-NEXT: .Lpcsection27:
; O0-NEXT: movq %rax, __llvm_gcov_ctr.1154
; O0-NEXT: .Lpcsection28:
; O0-NEXT: movq __llvm_gcov_ctr.1155, %rax
; O0-NEXT: addq $1, %rax
; O0-NEXT: .Lpcsection29:
; O0-NEXT: movq %rax, __llvm_gcov_ctr.1155
; O0-NEXT: .Lpcsection30:
; O0-NEXT: movq __llvm_gcov_ctr.189, %rax
; O0-NEXT: addq $1, %rax
; O0-NEXT: .Lpcsection31:
; O0-NEXT: movq %rax, __llvm_gcov_ctr.189
; O0-NEXT: .Lpcsection32:
; O0-NEXT: movq __llvm_gcov_ctr.1160, %rax
; O0-NEXT: addq $1, %rax
; O0-NEXT: .Lpcsection33:
; O0-NEXT: movq %rax, __llvm_gcov_ctr.1160
; O0-NEXT: .Lpcsection34:
; O0-NEXT: movq __llvm_gcov_ctr.1156, %rax
; O0-NEXT: addq $1, %rax
; O0-NEXT: .Lpcsection35:
; O0-NEXT: movq %rax, __llvm_gcov_ctr.1156
; O0-NEXT: .Lpcsection36:
; O0-NEXT: movq __llvm_gcov_ctr.1157, %rax
; O0-NEXT: addq $1, %rax
; O0-NEXT: .Lpcsection37:
; O0-NEXT: movq %rax, __llvm_gcov_ctr.1157
; O0-NEXT: .Lpcsection38:
; O0-NEXT: movq __llvm_gcov_ctr.1161, %rax
; O0-NEXT: addq $1, %rax
; O0-NEXT: .Lpcsection39:
; O0-NEXT: movq %rax, __llvm_gcov_ctr.1161
; O0-NEXT: retq
;
; OPT-LABEL: independent_load_stores:
; OPT: # %bb.0:
; OPT-NEXT: .Lpcsection0:
; OPT-NEXT: incq __llvm_gcov_ctr.109(%rip)
; OPT-NEXT: .Lpcsection1:
; OPT-NEXT: incq __llvm_gcov_ctr.112(%rip)
; OPT-NEXT: .Lpcsection2:
; OPT-NEXT: incq __llvm_gcov_ctr.113(%rip)
; OPT-NEXT: .Lpcsection3:
; OPT-NEXT: incq __llvm_gcov_ctr.154(%rip)
; OPT-NEXT: .Lpcsection4:
; OPT-NEXT: incq __llvm_gcov_ctr.155(%rip)
; OPT-NEXT: .Lpcsection5:
; OPT-NEXT: incq __llvm_gcov_ctr.89(%rip)
; OPT-NEXT: .Lpcsection6:
; OPT-NEXT: incq __llvm_gcov_ctr.160(%rip)
; OPT-NEXT: .Lpcsection7:
; OPT-NEXT: incq __llvm_gcov_ctr.156(%rip)
; OPT-NEXT: .Lpcsection8:
; OPT-NEXT: incq __llvm_gcov_ctr.157(%rip)
; OPT-NEXT: .Lpcsection9:
; OPT-NEXT: incq __llvm_gcov_ctr.161(%rip)
; OPT-NEXT: .Lpcsection10:
; OPT-NEXT: incq __llvm_gcov_ctr.1109(%rip)
; OPT-NEXT: .Lpcsection11:
; OPT-NEXT: incq __llvm_gcov_ctr.1112(%rip)
; OPT-NEXT: .Lpcsection12:
; OPT-NEXT: incq __llvm_gcov_ctr.1113(%rip)
; OPT-NEXT: .Lpcsection13:
; OPT-NEXT: incq __llvm_gcov_ctr.1154(%rip)
; OPT-NEXT: .Lpcsection14:
; OPT-NEXT: incq __llvm_gcov_ctr.1155(%rip)
; OPT-NEXT: .Lpcsection15:
; OPT-NEXT: incq __llvm_gcov_ctr.189(%rip)
; OPT-NEXT: .Lpcsection16:
; OPT-NEXT: incq __llvm_gcov_ctr.1160(%rip)
; OPT-NEXT: .Lpcsection17:
; OPT-NEXT: incq __llvm_gcov_ctr.1156(%rip)
; OPT-NEXT: .Lpcsection18:
; OPT-NEXT: incq __llvm_gcov_ctr.1157(%rip)
; OPT-NEXT: .Lpcsection19:
; OPT-NEXT: incq __llvm_gcov_ctr.1161(%rip)
; OPT-NEXT: retq
%1 = load i64, ptr @__llvm_gcov_ctr.109, align 8, !pcsections !0
%2 = add i64 %1, 1
store i64 %2, ptr @__llvm_gcov_ctr.109, align 8, !pcsections !0
%3 = load i64, ptr @__llvm_gcov_ctr.112, align 8, !pcsections !0
%4 = add i64 %3, 1
store i64 %4, ptr @__llvm_gcov_ctr.112, align 8, !pcsections !0
%5 = load i64, ptr @__llvm_gcov_ctr.113, align 8, !pcsections !0
%6 = add i64 %5, 1
store i64 %6, ptr @__llvm_gcov_ctr.113, align 8, !pcsections !0
%7 = load i64, ptr @__llvm_gcov_ctr.154, align 8, !pcsections !0
%8 = add i64 %7, 1
store i64 %8, ptr @__llvm_gcov_ctr.154, align 8, !pcsections !0
%9 = load i64, ptr @__llvm_gcov_ctr.155, align 8, !pcsections !0
%10 = add i64 %9, 1
store i64 %10, ptr @__llvm_gcov_ctr.155, align 8, !pcsections !0
%11 = load i64, ptr @__llvm_gcov_ctr.89, align 8, !pcsections !0
%12 = add i64 %11, 1
store i64 %12, ptr @__llvm_gcov_ctr.89, align 8, !pcsections !0
%13 = load i64, ptr @__llvm_gcov_ctr.160, align 8, !pcsections !0
%14 = add i64 %13, 1
store i64 %14, ptr @__llvm_gcov_ctr.160, align 8, !pcsections !0
%15 = load i64, ptr @__llvm_gcov_ctr.156, align 8, !pcsections !0
%16 = add i64 %15, 1
store i64 %16, ptr @__llvm_gcov_ctr.156, align 8, !pcsections !0
%17 = load i64, ptr @__llvm_gcov_ctr.157, align 8, !pcsections !0
%18 = add i64 %17, 1
store i64 %18, ptr @__llvm_gcov_ctr.157, align 8, !pcsections !0
%19 = load i64, ptr @__llvm_gcov_ctr.161, align 8, !pcsections !0
%20 = add i64 %19, 1
store i64 %20, ptr @__llvm_gcov_ctr.161, align 8, !pcsections !0
%21 = load i64, ptr @__llvm_gcov_ctr.1109, align 8, !pcsections !0
%22 = add i64 %21, 1
store i64 %22, ptr @__llvm_gcov_ctr.1109, align 8, !pcsections !0
%23 = load i64, ptr @__llvm_gcov_ctr.1112, align 8, !pcsections !0
%24 = add i64 %23, 1
store i64 %24, ptr @__llvm_gcov_ctr.1112, align 8, !pcsections !0
%25 = load i64, ptr @__llvm_gcov_ctr.1113, align 8, !pcsections !0
%26 = add i64 %25, 1
store i64 %26, ptr @__llvm_gcov_ctr.1113, align 8, !pcsections !0
%27 = load i64, ptr @__llvm_gcov_ctr.1154, align 8, !pcsections !0
%28 = add i64 %27, 1
store i64 %28, ptr @__llvm_gcov_ctr.1154, align 8, !pcsections !0
%29 = load i64, ptr @__llvm_gcov_ctr.1155, align 8, !pcsections !0
%30 = add i64 %29, 1
store i64 %30, ptr @__llvm_gcov_ctr.1155, align 8, !pcsections !0
%31 = load i64, ptr @__llvm_gcov_ctr.189, align 8, !pcsections !0
%32 = add i64 %31, 1
store i64 %32, ptr @__llvm_gcov_ctr.189, align 8, !pcsections !0
%33 = load i64, ptr @__llvm_gcov_ctr.1160, align 8, !pcsections !0
%34 = add i64 %33, 1
store i64 %34, ptr @__llvm_gcov_ctr.1160, align 8, !pcsections !0
%35 = load i64, ptr @__llvm_gcov_ctr.1156, align 8, !pcsections !0
%36 = add i64 %35, 1
store i64 %36, ptr @__llvm_gcov_ctr.1156, align 8, !pcsections !0
%37 = load i64, ptr @__llvm_gcov_ctr.1157, align 8, !pcsections !0
%38 = add i64 %37, 1
store i64 %38, ptr @__llvm_gcov_ctr.1157, align 8, !pcsections !0
%39 = load i64, ptr @__llvm_gcov_ctr.1161, align 8, !pcsections !0
%40 = add i64 %39, 1
store i64 %40, ptr @__llvm_gcov_ctr.1161, align 8, !pcsections !0
ret void
}
!0 = !{!"foo"}