Files
clang-p2996/llvm/test/CodeGen/AMDGPU/optimize-exec-masking-pre-ra.mir
Scott Linder 60b1967c39 [AMDGPU] Add Scratch Wave Offset to Scratch Buffer Descriptor in entry functions
Add the scratch wave offset to the scratch buffer descriptor (SRSrc) in
the entry function prologue. This allows us to removes the scratch wave
offset register from the calling convention ABI.

As part of this change, allow the use of an inline constant zero for the
SOffset of MUBUF instructions accessing the stack in entry functions
when a frame pointer is not requested/required. Entry functions with
calls still need to set up the calling convention ABI stack pointer
register, and reference it in order to address arguments of called
functions. The ABI stack pointer register remains unswizzled, but is now
wave-relative instead of queue-relative.

Non-entry functions also use an inline constant zero SOffset for
wave-relative scratch access, but continue to use the stack and frame
pointers as before. When the stack or frame pointer is converted to a
swizzled offset it is now scaled directly, as the scratch wave offset no
longer needs to be subtracted first.

Update llvm/docs/AMDGPUUsage.rst to reflect these changes to the calling
convention.

Tags: #llvm

Differential Revision: https://reviews.llvm.org/D75138
2020-03-19 15:35:16 -04:00

120 lines
4.5 KiB
YAML

# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
# RUN: llc -mtriple=amdgcn-mesa-mesa3d -run-pass=si-optimize-exec-masking-pre-ra -verify-machineinstrs %s -o - | FileCheck -check-prefix=GCN %s
# Check for regression from assuming an instruction was a copy after
# dropping the opcode check.
---
name: exec_src1_is_not_copy
tracksRegLiveness: true
machineFunctionInfo:
isEntryFunction: true
scratchRSrcReg: '$sgpr96_sgpr97_sgpr98_sgpr99'
frameOffsetReg: '$sgpr101'
body: |
; GCN-LABEL: name: exec_src1_is_not_copy
; GCN: bb.0:
; GCN: successors: %bb.1(0x40000000), %bb.2(0x40000000)
; GCN: liveins: $vgpr0
; GCN: [[COPY:%[0-9]+]]:sreg_64 = COPY $exec
; GCN: [[DEF:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF
; GCN: [[V_CMP_NE_U32_e64_:%[0-9]+]]:sreg_64 = V_CMP_NE_U32_e64 0, [[DEF]], implicit $exec
; GCN: [[COPY1:%[0-9]+]]:sreg_64 = COPY $exec, implicit-def $exec
; GCN: [[S_AND_B64_:%[0-9]+]]:sreg_64 = S_AND_B64 [[COPY1]], [[V_CMP_NE_U32_e64_]], implicit-def dead $scc
; GCN: [[S_XOR_B64_:%[0-9]+]]:sreg_64 = S_XOR_B64 [[S_AND_B64_]], [[COPY1]], implicit-def dead $scc
; GCN: $exec = S_MOV_B64_term [[S_AND_B64_]]
; GCN: SI_MASK_BRANCH %bb.2, implicit $exec
; GCN: S_BRANCH %bb.1
; GCN: bb.1:
; GCN: successors: %bb.2(0x80000000)
; GCN: bb.2:
; GCN: successors: %bb.3(0x40000000), %bb.6(0x40000000)
; GCN: [[S_OR_SAVEEXEC_B64_:%[0-9]+]]:sreg_64 = S_OR_SAVEEXEC_B64 [[S_XOR_B64_]], implicit-def $exec, implicit-def $scc, implicit $exec
; GCN: $exec = S_AND_B64 $exec, [[COPY]], implicit-def dead $scc
; GCN: [[S_AND_B64_1:%[0-9]+]]:sreg_64 = S_AND_B64 $exec, [[S_OR_SAVEEXEC_B64_]], implicit-def $scc
; GCN: $exec = S_XOR_B64_term $exec, [[S_AND_B64_1]], implicit-def $scc
; GCN: SI_MASK_BRANCH %bb.6, implicit $exec
; GCN: S_BRANCH %bb.3
; GCN: bb.3:
; GCN: successors: %bb.4(0x40000000), %bb.5(0x40000000)
; GCN: [[V_CMP_NE_U32_e64_1:%[0-9]+]]:sreg_64 = V_CMP_NE_U32_e64 0, [[DEF]], implicit $exec
; GCN: [[COPY2:%[0-9]+]]:sreg_64 = COPY $exec, implicit-def $exec
; GCN: [[S_AND_B64_2:%[0-9]+]]:sreg_64 = S_AND_B64 [[COPY2]], [[V_CMP_NE_U32_e64_1]], implicit-def dead $scc
; GCN: $exec = S_MOV_B64_term [[S_AND_B64_2]]
; GCN: SI_MASK_BRANCH %bb.5, implicit $exec
; GCN: S_BRANCH %bb.4
; GCN: bb.4:
; GCN: successors: %bb.5(0x80000000)
; GCN: bb.5:
; GCN: successors: %bb.6(0x80000000)
; GCN: $exec = S_OR_B64 $exec, [[COPY2]], implicit-def $scc
; GCN: bb.6:
; GCN: $exec = S_OR_B64 $exec, [[S_AND_B64_1]], implicit-def $scc
bb.0:
successors: %bb.1, %bb.2
liveins: $vgpr0
%0:sreg_64 = COPY $exec
%1:vgpr_32 = IMPLICIT_DEF
%2:sreg_64 = V_CMP_NE_U32_e64 0, %1, implicit $exec
%3:sreg_64 = COPY $exec, implicit-def $exec
%4:sreg_64 = S_AND_B64 %3, %2, implicit-def dead $scc
%5:sreg_64 = S_XOR_B64 %4, %3, implicit-def dead $scc
$exec = S_MOV_B64_term %4
SI_MASK_BRANCH %bb.2, implicit $exec
S_BRANCH %bb.1
bb.1:
bb.2:
successors: %bb.3, %bb.6
%6:sreg_64 = S_OR_SAVEEXEC_B64 %5, implicit-def $exec, implicit-def $scc, implicit $exec
$exec = S_AND_B64 $exec, %0, implicit-def dead $scc
%7:sreg_64 = S_AND_B64 $exec, %6, implicit-def $scc
$exec = S_XOR_B64_term $exec, %7, implicit-def $scc
SI_MASK_BRANCH %bb.6, implicit $exec
S_BRANCH %bb.3
bb.3:
successors: %bb.4, %bb.5
%8:sreg_64 = V_CMP_NE_U32_e64 0, %1, implicit $exec
%9:sreg_64 = COPY $exec, implicit-def $exec
%10:sreg_64 = S_AND_B64 %9, %8, implicit-def dead $scc
$exec = S_MOV_B64_term %10
SI_MASK_BRANCH %bb.5, implicit $exec
S_BRANCH %bb.4
bb.4:
bb.5:
$exec = S_OR_B64 $exec, %9, implicit-def $scc
bb.6:
$exec = S_OR_B64 $exec, %7, implicit-def $scc
...
# When folding a v_cndmask and a v_cmp in a pattern leading to
# s_cbranch_vccz, ensure that an undef operand is handled correctly.
---
name: cndmask_cmp_cbranch_fold_undef
tracksRegLiveness: true
body: |
; GCN-LABEL: name: cndmask_cmp_cbranch_fold_undef
; GCN: bb.0:
; GCN: successors: %bb.1(0x80000000)
; GCN: $vcc = S_ANDN2_B64 $exec, undef %1:sreg_64_xexec, implicit-def $scc
; GCN: S_CBRANCH_VCCZ %bb.1, implicit $vcc
; GCN: bb.1:
bb.0:
%1:vgpr_32 = V_CNDMASK_B32_e64 0, 0, 0, 1, undef %0:sreg_64_xexec, implicit $exec
V_CMP_NE_U32_e32 1, %1, implicit-def $vcc, implicit $exec
$vcc = S_AND_B64 $exec, $vcc, implicit-def dead $scc
S_CBRANCH_VCCZ %bb.1, implicit $vcc
bb.1:
...