No longer conservatively assume a load/store accesses the stack when we can prove that we did not compute any stack-relative address up to this point in the program. We do this in a cheap not-quite-a-dataflow-analysis: Assume `NoStackAddressUsed` when all predecessors of a block already guarantee it. Process blocks in reverse post order to guarantee that except for loop headers we have processed all predecessors of a block before processing the block itself. For loops we accept the conservative answer as they are unlikely to be shrink-wrappable anyway. Differential Revision: https://reviews.llvm.org/D152213
209 lines
6.8 KiB
LLVM
209 lines
6.8 KiB
LLVM
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 2
|
|
; RUN: llc -verify-machineinstrs -mcpu=pwr7 -mattr=-altivec -mtriple powerpc-ibm-aix-xcoff \
|
|
; RUN: -code-model=small -stop-after=machine-cp < %s | FileCheck \
|
|
; RUN: --check-prefix=32SMALL-MIR %s
|
|
|
|
; RUN: llc -verify-machineinstrs -mcpu=pwr7 -mattr=-altivec -mtriple powerpc-ibm-aix-xcoff \
|
|
; RUN: -code-model=large -stop-after=machine-cp < %s | FileCheck \
|
|
; RUN: --check-prefix=32LARGE-MIR %s
|
|
|
|
; RUN: llc -verify-machineinstrs -mcpu=pwr7 -mattr=-altivec -mtriple powerpc64-ibm-aix-xcoff \
|
|
; RUN: -code-model=small -stop-after=machine-cp < %s | FileCheck \
|
|
; RUN: --check-prefix=64SMALL-MIR %s
|
|
|
|
; RUN: llc -verify-machineinstrs -mcpu=pwr7 -mattr=-altivec -mtriple powerpc64-ibm-aix-xcoff \
|
|
; RUN: -code-model=large -stop-after=machine-cp < %s | FileCheck \
|
|
; RUN: --check-prefix=64LARGE-MIR %s
|
|
|
|
; RUN: llc -mtriple powerpc-ibm-aix-xcoff -code-model=small < %s | FileCheck \
|
|
; RUN: --check-prefixes=32SMALL-ASM,SMALL-ASM %s
|
|
|
|
; RUN: llc -mtriple powerpc-ibm-aix-xcoff -code-model=large < %s | FileCheck \
|
|
; RUN: --check-prefixes=32LARGE-ASM,LARGE-ASM %s
|
|
|
|
; RUN: llc -mtriple powerpc64-ibm-aix-xcoff -code-model=small < %s | FileCheck \
|
|
; RUN: --check-prefixes=64SMALL-ASM,SMALL-ASM %s
|
|
|
|
; RUN: llc -mtriple powerpc64-ibm-aix-xcoff -code-model=large < %s | FileCheck \
|
|
; RUN: --check-prefixes=64LARGE-ASM,LARGE-ASM %s
|
|
|
|
; RUN: llc -mtriple powerpc-ibm-aix-xcoff -function-sections < %s | FileCheck \
|
|
; RUN: --check-prefix=FUNC-ASM %s
|
|
|
|
; RUN: llc -mtriple powerpc64-ibm-aix-xcoff -function-sections < %s | FileCheck \
|
|
; RUN: --check-prefix=FUNC-ASM %s
|
|
|
|
define i32 @jump_table(i32 %a) {
|
|
; 32SMALL-ASM-LABEL: jump_table:
|
|
; 32SMALL-ASM: # %bb.0: # %entry
|
|
; 32SMALL-ASM-NEXT: addi 3, 3, -1
|
|
; 32SMALL-ASM-NEXT: cmplwi 3, 3
|
|
; 32SMALL-ASM-NEXT: bgt 0, L..BB0_3
|
|
; 32SMALL-ASM-NEXT: # %bb.1: # %entry
|
|
; 32SMALL-ASM-NEXT: lwz 4, L..C0(2) # %jump-table.0
|
|
; 32SMALL-ASM-NEXT: slwi 3, 3, 2
|
|
; 32SMALL-ASM-NEXT: lwzx 3, 3, 4
|
|
; 32SMALL-ASM-NEXT: add 3, 3, 4
|
|
; 32SMALL-ASM-NEXT: mtctr 3
|
|
; 32SMALL-ASM-NEXT: bctr
|
|
; 32SMALL-ASM-NEXT: L..BB0_2: # %sw.bb
|
|
; 32SMALL-ASM-NEXT: #APP
|
|
; 32SMALL-ASM-NEXT: #NO_APP
|
|
; 32SMALL-ASM-NEXT: L..BB0_3: # %sw.epilog
|
|
; 32SMALL-ASM-NEXT: li 3, 0
|
|
; 32SMALL-ASM-NEXT: blr
|
|
; 32SMALL-ASM-NEXT: L..BB0_4: # %sw.bb1
|
|
; 32SMALL-ASM-NEXT: li 3, 0
|
|
; 32SMALL-ASM-NEXT: #APP
|
|
; 32SMALL-ASM-NEXT: #NO_APP
|
|
; 32SMALL-ASM-NEXT: blr
|
|
; 32SMALL-ASM-NEXT: L..BB0_5: # %sw.bb2
|
|
; 32SMALL-ASM-NEXT: li 3, 0
|
|
; 32SMALL-ASM-NEXT: #APP
|
|
; 32SMALL-ASM-NEXT: #NO_APP
|
|
; 32SMALL-ASM-NEXT: blr
|
|
; 32SMALL-ASM-NEXT: L..BB0_6: # %sw.bb3
|
|
; 32SMALL-ASM-NEXT: li 3, 0
|
|
; 32SMALL-ASM-NEXT: #APP
|
|
; 32SMALL-ASM-NEXT: #NO_APP
|
|
; 32SMALL-ASM-NEXT: blr
|
|
;
|
|
; 32LARGE-ASM-LABEL: jump_table:
|
|
; 32LARGE-ASM: # %bb.0: # %entry
|
|
; 32LARGE-ASM-NEXT: addi 3, 3, -1
|
|
; 32LARGE-ASM-NEXT: cmplwi 3, 3
|
|
; 32LARGE-ASM-NEXT: bgt 0, L..BB0_3
|
|
; 32LARGE-ASM-NEXT: # %bb.1: # %entry
|
|
; 32LARGE-ASM-NEXT: addis 4, L..C0@u(2)
|
|
; 32LARGE-ASM-NEXT: slwi 3, 3, 2
|
|
; 32LARGE-ASM-NEXT: lwz 4, L..C0@l(4)
|
|
; 32LARGE-ASM-NEXT: lwzx 3, 3, 4
|
|
; 32LARGE-ASM-NEXT: add 3, 3, 4
|
|
; 32LARGE-ASM-NEXT: mtctr 3
|
|
; 32LARGE-ASM-NEXT: bctr
|
|
; 32LARGE-ASM-NEXT: L..BB0_2: # %sw.bb
|
|
; 32LARGE-ASM-NEXT: #APP
|
|
; 32LARGE-ASM-NEXT: #NO_APP
|
|
; 32LARGE-ASM-NEXT: L..BB0_3: # %sw.epilog
|
|
; 32LARGE-ASM-NEXT: li 3, 0
|
|
; 32LARGE-ASM-NEXT: blr
|
|
; 32LARGE-ASM-NEXT: L..BB0_4: # %sw.bb1
|
|
; 32LARGE-ASM-NEXT: li 3, 0
|
|
; 32LARGE-ASM-NEXT: #APP
|
|
; 32LARGE-ASM-NEXT: #NO_APP
|
|
; 32LARGE-ASM-NEXT: blr
|
|
; 32LARGE-ASM-NEXT: L..BB0_5: # %sw.bb2
|
|
; 32LARGE-ASM-NEXT: li 3, 0
|
|
; 32LARGE-ASM-NEXT: #APP
|
|
; 32LARGE-ASM-NEXT: #NO_APP
|
|
; 32LARGE-ASM-NEXT: blr
|
|
; 32LARGE-ASM-NEXT: L..BB0_6: # %sw.bb3
|
|
; 32LARGE-ASM-NEXT: li 3, 0
|
|
; 32LARGE-ASM-NEXT: #APP
|
|
; 32LARGE-ASM-NEXT: #NO_APP
|
|
; 32LARGE-ASM-NEXT: blr
|
|
;
|
|
; 64SMALL-ASM-LABEL: jump_table:
|
|
; 64SMALL-ASM: # %bb.0: # %entry
|
|
; 64SMALL-ASM-NEXT: addi 3, 3, -1
|
|
; 64SMALL-ASM-NEXT: cmplwi 3, 3
|
|
; 64SMALL-ASM-NEXT: bgt 0, L..BB0_3
|
|
; 64SMALL-ASM-NEXT: # %bb.1: # %entry
|
|
; 64SMALL-ASM-NEXT: ld 4, L..C0(2) # %jump-table.0
|
|
; 64SMALL-ASM-NEXT: rldic 3, 3, 2, 30
|
|
; 64SMALL-ASM-NEXT: lwax 3, 3, 4
|
|
; 64SMALL-ASM-NEXT: add 3, 3, 4
|
|
; 64SMALL-ASM-NEXT: mtctr 3
|
|
; 64SMALL-ASM-NEXT: bctr
|
|
; 64SMALL-ASM-NEXT: L..BB0_2: # %sw.bb
|
|
; 64SMALL-ASM-NEXT: #APP
|
|
; 64SMALL-ASM-NEXT: #NO_APP
|
|
; 64SMALL-ASM-NEXT: L..BB0_3: # %sw.epilog
|
|
; 64SMALL-ASM-NEXT: li 3, 0
|
|
; 64SMALL-ASM-NEXT: blr
|
|
; 64SMALL-ASM-NEXT: L..BB0_4: # %sw.bb1
|
|
; 64SMALL-ASM-NEXT: li 3, 0
|
|
; 64SMALL-ASM-NEXT: #APP
|
|
; 64SMALL-ASM-NEXT: #NO_APP
|
|
; 64SMALL-ASM-NEXT: blr
|
|
; 64SMALL-ASM-NEXT: L..BB0_5: # %sw.bb2
|
|
; 64SMALL-ASM-NEXT: li 3, 0
|
|
; 64SMALL-ASM-NEXT: #APP
|
|
; 64SMALL-ASM-NEXT: #NO_APP
|
|
; 64SMALL-ASM-NEXT: blr
|
|
; 64SMALL-ASM-NEXT: L..BB0_6: # %sw.bb3
|
|
; 64SMALL-ASM-NEXT: li 3, 0
|
|
; 64SMALL-ASM-NEXT: #APP
|
|
; 64SMALL-ASM-NEXT: #NO_APP
|
|
; 64SMALL-ASM-NEXT: blr
|
|
;
|
|
; 64LARGE-ASM-LABEL: jump_table:
|
|
; 64LARGE-ASM: # %bb.0: # %entry
|
|
; 64LARGE-ASM-NEXT: addi 3, 3, -1
|
|
; 64LARGE-ASM-NEXT: cmplwi 3, 3
|
|
; 64LARGE-ASM-NEXT: bgt 0, L..BB0_3
|
|
; 64LARGE-ASM-NEXT: # %bb.1: # %entry
|
|
; 64LARGE-ASM-NEXT: addis 4, L..C0@u(2)
|
|
; 64LARGE-ASM-NEXT: rldic 3, 3, 2, 30
|
|
; 64LARGE-ASM-NEXT: ld 4, L..C0@l(4)
|
|
; 64LARGE-ASM-NEXT: lwax 3, 3, 4
|
|
; 64LARGE-ASM-NEXT: add 3, 3, 4
|
|
; 64LARGE-ASM-NEXT: mtctr 3
|
|
; 64LARGE-ASM-NEXT: bctr
|
|
; 64LARGE-ASM-NEXT: L..BB0_2: # %sw.bb
|
|
; 64LARGE-ASM-NEXT: #APP
|
|
; 64LARGE-ASM-NEXT: #NO_APP
|
|
; 64LARGE-ASM-NEXT: L..BB0_3: # %sw.epilog
|
|
; 64LARGE-ASM-NEXT: li 3, 0
|
|
; 64LARGE-ASM-NEXT: blr
|
|
; 64LARGE-ASM-NEXT: L..BB0_4: # %sw.bb1
|
|
; 64LARGE-ASM-NEXT: li 3, 0
|
|
; 64LARGE-ASM-NEXT: #APP
|
|
; 64LARGE-ASM-NEXT: #NO_APP
|
|
; 64LARGE-ASM-NEXT: blr
|
|
; 64LARGE-ASM-NEXT: L..BB0_5: # %sw.bb2
|
|
; 64LARGE-ASM-NEXT: li 3, 0
|
|
; 64LARGE-ASM-NEXT: #APP
|
|
; 64LARGE-ASM-NEXT: #NO_APP
|
|
; 64LARGE-ASM-NEXT: blr
|
|
; 64LARGE-ASM-NEXT: L..BB0_6: # %sw.bb3
|
|
; 64LARGE-ASM-NEXT: li 3, 0
|
|
; 64LARGE-ASM-NEXT: #APP
|
|
; 64LARGE-ASM-NEXT: #NO_APP
|
|
; 64LARGE-ASM-NEXT: blr
|
|
entry:
|
|
switch i32 %a, label %sw.epilog [
|
|
i32 1, label %sw.bb
|
|
i32 2, label %sw.bb1
|
|
i32 3, label %sw.bb2
|
|
i32 4, label %sw.bb3
|
|
]
|
|
|
|
sw.bb:
|
|
tail call void asm sideeffect "", ""()
|
|
br label %sw.epilog
|
|
|
|
sw.bb1:
|
|
tail call void asm sideeffect "", ""()
|
|
br label %sw.epilog
|
|
|
|
sw.bb2:
|
|
tail call void asm sideeffect "", ""()
|
|
br label %sw.epilog
|
|
|
|
sw.bb3:
|
|
tail call void asm sideeffect "", ""()
|
|
br label %sw.epilog
|
|
|
|
sw.epilog:
|
|
ret i32 0
|
|
}
|
|
;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
|
|
; 32LARGE-MIR: {{.*}}
|
|
; 32SMALL-MIR: {{.*}}
|
|
; 64LARGE-MIR: {{.*}}
|
|
; 64SMALL-MIR: {{.*}}
|
|
; FUNC-ASM: {{.*}}
|
|
; LARGE-ASM: {{.*}}
|
|
; SMALL-ASM: {{.*}}
|