Files
clang-p2996/llvm/test/CodeGen/PowerPC/fast-isel-branch.ll
Jonas Paulsson 5ecd363295 Reapply "[CodeGen] Add new pass for late cleanup of redundant definitions."
This reverts commit 122efef8ee.

- Patch fixed to not reuse definitions from predecessors in EH landing pads.
- Late review suggestions (by MaskRay) have been addressed.
- M68k/pipeline.ll test updated.
- Init captures added in processBlock() to avoid capturing structured bindings.
- RISCV has this disabled for now.

Original commit message:

A new pass MachineLateInstrsCleanup is added to be run after PEI.

This is a simple pass that removes redundant and identical instructions
whenever found by scanning the MF once while keeping track of register
definitions in a map. These instructions are typically immediate loads
resulting from rematerialization, and address loads emitted by target in
eliminateFrameInde().

This is enabled by default, but a target could easily disable it by means of
'disablePass(&MachineLateInstrsCleanupID);'.

This late cleanup is naturally not "optimal" in removing instructions as it
is done by looking at phys-regs, but still quite effective. It would be
desirable to improve other parts of CodeGen and avoid these redundant
instructions in the first place, but there are no ideas for this yet.

Differential Revision: https://reviews.llvm.org/D123394

Reviewed By: RKSimon, foad, craig.topper, arsenm, asb
2022-12-05 12:53:50 -06:00

106 lines
2.8 KiB
LLVM

; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=powerpc64le-unknown-linux-gnu < %s | FileCheck %s -check-prefix=ELF64
; RUN: llc -mtriple=powerpc64-ibm-aix-xcoff < %s | FileCheck %s -check-prefix=AIX64
@x = global i32 1000, align 4
define signext i32 @bar() #0 {
; ELF64-LABEL: bar:
; ELF64: # %bb.0: # %entry
; ELF64-NEXT: mflr 0
; ELF64-NEXT: stdu 1, -48(1)
; ELF64-NEXT: std 0, 64(1)
; ELF64-NEXT: .cfi_def_cfa_offset 48
; ELF64-NEXT: .cfi_offset lr, 16
; ELF64-NEXT: li 3, 0
; ELF64-NEXT: stw 3, 44(1)
; ELF64-NEXT: li 3, 0
; ELF64-NEXT: stw 3, 40(1)
; ELF64-NEXT: .LBB0_1: # %for.cond
; ELF64-NEXT: #
; ELF64-NEXT: lwz 3, 40(1)
; ELF64-NEXT: addis 4, 2, .LC0@toc@ha
; ELF64-NEXT: ld 4, .LC0@toc@l(4)
; ELF64-NEXT: lwz 4, 0(4)
; ELF64-NEXT: cmpw 3, 4
; ELF64-NEXT: bge 0, .LBB0_4
; ELF64-NEXT: # %bb.2: # %for.body
; ELF64-NEXT: #
; ELF64-NEXT: bl foo
; ELF64-NEXT: nop
; ELF64-NEXT: # %bb.3: # %for.inc
; ELF64-NEXT: #
; ELF64-NEXT: lwz 3, 40(1)
; ELF64-NEXT: addi 3, 3, 1
; ELF64-NEXT: stw 3, 40(1)
; ELF64-NEXT: b .LBB0_1
; ELF64-NEXT: .LBB0_4: # %for.end
; ELF64-NEXT: li 3, 0
; ELF64-NEXT: addi 1, 1, 48
; ELF64-NEXT: ld 0, 16(1)
; ELF64-NEXT: mtlr 0
; ELF64-NEXT: blr
;
; AIX64-LABEL: bar:
; AIX64: # %bb.0: # %entry
; AIX64-NEXT: mflr 0
; AIX64-NEXT: stdu 1, -128(1)
; AIX64-NEXT: std 0, 144(1)
; AIX64-NEXT: li 3, 0
; AIX64-NEXT: stw 3, 124(1)
; AIX64-NEXT: li 3, 0
; AIX64-NEXT: stw 3, 120(1)
; AIX64-NEXT: L..BB0_1: # %for.cond
; AIX64-NEXT: #
; AIX64-NEXT: lwz 3, 120(1)
; AIX64-NEXT: ld 4, L..C0(2)
; AIX64-NEXT: lwz 4, 0(4)
; AIX64-NEXT: cmpw 3, 4
; AIX64-NEXT: bge 0, L..BB0_4
; AIX64-NEXT: # %bb.2: # %for.body
; AIX64-NEXT: #
; AIX64-NEXT: bl .foo[PR]
; AIX64-NEXT: nop
; AIX64-NEXT: # %bb.3: # %for.inc
; AIX64-NEXT: #
; AIX64-NEXT: lwz 3, 120(1)
; AIX64-NEXT: addi 3, 3, 1
; AIX64-NEXT: stw 3, 120(1)
; AIX64-NEXT: b L..BB0_1
; AIX64-NEXT: L..BB0_4: # %for.end
; AIX64-NEXT: li 3, 0
; AIX64-NEXT: addi 1, 1, 128
; AIX64-NEXT: ld 0, 16(1)
; AIX64-NEXT: mtlr 0
; AIX64-NEXT: blr
entry:
%retval = alloca i32, align 4
%i = alloca i32, align 4
store i32 0, i32* %retval, align 4
store i32 0, i32* %i, align 4
br label %for.cond
for.cond:
%0 = load i32, i32* %i, align 4
%1 = load i32, i32* @x, align 4
%cmp = icmp slt i32 %0, %1
br i1 %cmp, label %for.body, label %for.end
for.body:
call void bitcast (void (...)* @foo to void ()*)()
br label %for.inc
for.inc:
%2 = load i32, i32* %i, align 4
%inc = add nsw i32 %2, 1
store i32 %inc, i32* %i, align 4
br label %for.cond
for.end:
ret i32 0
}
declare void @foo(...)
attributes #0 = { optnone noinline }