AMDGPU/GlobalISel: Temporal divergence lowering (non i1) (#124298)
Record all uses outside cycle with divergent exit during propagateTemporalDivergence in Uniformity analysis. With this list of candidates for temporal divergence lowering, excluding known lane masks from control flow intrinsics, find sources from inside the cycle that are not i1 and uniform. Temporal divergence lowering (non i1): create copy(v_mov) to vgpr, with implicit exec (to stop other passes from moving this copy outside of the cycle) and use this vgpr outside of the cycle instead of original uniform source.
This commit is contained in:
@@ -51,6 +51,7 @@
|
||||
#include "llvm/ADT/SmallPtrSet.h"
|
||||
#include "llvm/ADT/SparseBitVector.h"
|
||||
#include "llvm/ADT/StringExtras.h"
|
||||
#include "llvm/CodeGen/MachineInstr.h"
|
||||
#include "llvm/Support/raw_ostream.h"
|
||||
|
||||
#define DEBUG_TYPE "uniformity"
|
||||
@@ -342,6 +343,9 @@ public:
|
||||
typename SyncDependenceAnalysisT::DivergenceDescriptor;
|
||||
using BlockLabelMapT = typename SyncDependenceAnalysisT::BlockLabelMap;
|
||||
|
||||
using TemporalDivergenceTuple =
|
||||
std::tuple<ConstValueRefT, InstructionT *, const CycleT *>;
|
||||
|
||||
GenericUniformityAnalysisImpl(const DominatorTreeT &DT, const CycleInfoT &CI,
|
||||
const TargetTransformInfo *TTI)
|
||||
: Context(CI.getSSAContext()), F(*Context.getFunction()), CI(CI),
|
||||
@@ -396,6 +400,11 @@ public:
|
||||
|
||||
void print(raw_ostream &out) const;
|
||||
|
||||
SmallVector<TemporalDivergenceTuple, 8> TemporalDivergenceList;
|
||||
|
||||
void recordTemporalDivergence(ConstValueRefT, const InstructionT *,
|
||||
const CycleT *);
|
||||
|
||||
protected:
|
||||
/// \brief Value/block pair representing a single phi input.
|
||||
struct PhiInput {
|
||||
@@ -1129,6 +1138,13 @@ void GenericUniformityAnalysisImpl<ContextT>::compute() {
|
||||
}
|
||||
}
|
||||
|
||||
template <typename ContextT>
|
||||
void GenericUniformityAnalysisImpl<ContextT>::recordTemporalDivergence(
|
||||
ConstValueRefT Val, const InstructionT *User, const CycleT *Cycle) {
|
||||
TemporalDivergenceList.emplace_back(Val, const_cast<InstructionT *>(User),
|
||||
Cycle);
|
||||
}
|
||||
|
||||
template <typename ContextT>
|
||||
bool GenericUniformityAnalysisImpl<ContextT>::isAlwaysUniform(
|
||||
const InstructionT &Instr) const {
|
||||
@@ -1146,6 +1162,12 @@ template <typename ContextT>
|
||||
void GenericUniformityAnalysisImpl<ContextT>::print(raw_ostream &OS) const {
|
||||
bool haveDivergentArgs = false;
|
||||
|
||||
// When we print Value, LLVM IR instruction, we want to print extra new line.
|
||||
// In LLVM IR print function for Value does not print new line at the end.
|
||||
// In MIR print for MachineInstr prints new line at the end.
|
||||
constexpr bool IsMIR = std::is_same<InstructionT, MachineInstr>::value;
|
||||
std::string NewLine = IsMIR ? "" : "\n";
|
||||
|
||||
// Control flow instructions may be divergent even if their inputs are
|
||||
// uniform. Thus, although exceedingly rare, it is possible to have a program
|
||||
// with no divergent values but with divergent control structures.
|
||||
@@ -1180,6 +1202,16 @@ void GenericUniformityAnalysisImpl<ContextT>::print(raw_ostream &OS) const {
|
||||
}
|
||||
}
|
||||
|
||||
if (!TemporalDivergenceList.empty()) {
|
||||
OS << "\nTEMPORAL DIVERGENCE LIST:\n";
|
||||
|
||||
for (auto [Val, UseInst, Cycle] : TemporalDivergenceList) {
|
||||
OS << "Value :" << Context.print(Val) << NewLine
|
||||
<< "Used by :" << Context.print(UseInst) << NewLine
|
||||
<< "Outside cycle :" << Cycle->print(Context) << "\n\n";
|
||||
}
|
||||
}
|
||||
|
||||
for (auto &block : F) {
|
||||
OS << "\nBLOCK " << Context.print(&block) << '\n';
|
||||
|
||||
@@ -1191,7 +1223,7 @@ void GenericUniformityAnalysisImpl<ContextT>::print(raw_ostream &OS) const {
|
||||
OS << " DIVERGENT: ";
|
||||
else
|
||||
OS << " ";
|
||||
OS << Context.print(value) << '\n';
|
||||
OS << Context.print(value) << NewLine;
|
||||
}
|
||||
|
||||
OS << "TERMINATORS\n";
|
||||
@@ -1203,13 +1235,21 @@ void GenericUniformityAnalysisImpl<ContextT>::print(raw_ostream &OS) const {
|
||||
OS << " DIVERGENT: ";
|
||||
else
|
||||
OS << " ";
|
||||
OS << Context.print(T) << '\n';
|
||||
OS << Context.print(T) << NewLine;
|
||||
}
|
||||
|
||||
OS << "END BLOCK\n";
|
||||
}
|
||||
}
|
||||
|
||||
template <typename ContextT>
|
||||
iterator_range<
|
||||
typename GenericUniformityInfo<ContextT>::TemporalDivergenceTuple *>
|
||||
GenericUniformityInfo<ContextT>::getTemporalDivergenceList() const {
|
||||
return make_range(DA->TemporalDivergenceList.begin(),
|
||||
DA->TemporalDivergenceList.end());
|
||||
}
|
||||
|
||||
template <typename ContextT>
|
||||
bool GenericUniformityInfo<ContextT>::hasDivergence() const {
|
||||
return DA->hasDivergence();
|
||||
|
||||
@@ -40,6 +40,9 @@ public:
|
||||
using CycleInfoT = GenericCycleInfo<ContextT>;
|
||||
using CycleT = typename CycleInfoT::CycleT;
|
||||
|
||||
using TemporalDivergenceTuple =
|
||||
std::tuple<ConstValueRefT, InstructionT *, const CycleT *>;
|
||||
|
||||
GenericUniformityInfo(const DominatorTreeT &DT, const CycleInfoT &CI,
|
||||
const TargetTransformInfo *TTI = nullptr);
|
||||
GenericUniformityInfo() = default;
|
||||
@@ -78,6 +81,8 @@ public:
|
||||
|
||||
void print(raw_ostream &Out) const;
|
||||
|
||||
iterator_range<TemporalDivergenceTuple *> getTemporalDivergenceList() const;
|
||||
|
||||
private:
|
||||
using ImplT = GenericUniformityAnalysisImpl<ContextT>;
|
||||
|
||||
|
||||
@@ -79,13 +79,12 @@ template <>
|
||||
void llvm::GenericUniformityAnalysisImpl<
|
||||
SSAContext>::propagateTemporalDivergence(const Instruction &I,
|
||||
const Cycle &DefCycle) {
|
||||
if (isDivergent(I))
|
||||
return;
|
||||
for (auto *User : I.users()) {
|
||||
auto *UserInstr = cast<Instruction>(User);
|
||||
if (DefCycle.contains(UserInstr->getParent()))
|
||||
continue;
|
||||
markDivergent(*UserInstr);
|
||||
recordTemporalDivergence(&I, UserInstr, &DefCycle);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -117,12 +117,12 @@ void llvm::GenericUniformityAnalysisImpl<MachineSSAContext>::
|
||||
if (!Op.getReg().isVirtual())
|
||||
continue;
|
||||
auto Reg = Op.getReg();
|
||||
if (isDivergent(Reg))
|
||||
continue;
|
||||
for (MachineInstr &UserInstr : RegInfo.use_instructions(Reg)) {
|
||||
if (DefCycle.contains(UserInstr.getParent()))
|
||||
continue;
|
||||
markDivergent(UserInstr);
|
||||
|
||||
recordTemporalDivergence(Reg, &UserInstr, &DefCycle);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -193,7 +193,7 @@ INITIALIZE_PASS_END(MachineUniformityAnalysisPass, "machine-uniformity",
|
||||
|
||||
void MachineUniformityAnalysisPass::getAnalysisUsage(AnalysisUsage &AU) const {
|
||||
AU.setPreservesAll();
|
||||
AU.addRequired<MachineCycleInfoWrapperPass>();
|
||||
AU.addRequiredTransitive<MachineCycleInfoWrapperPass>();
|
||||
AU.addRequired<MachineDominatorTreeWrapperPass>();
|
||||
MachineFunctionPass::getAnalysisUsage(AU);
|
||||
}
|
||||
|
||||
@@ -16,6 +16,7 @@
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#include "AMDGPU.h"
|
||||
#include "AMDGPUGlobalISelUtils.h"
|
||||
#include "SILowerI1Copies.h"
|
||||
#include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
|
||||
#include "llvm/CodeGen/MachineFunctionPass.h"
|
||||
@@ -78,6 +79,8 @@ public:
|
||||
Register DstReg, Register PrevReg,
|
||||
Register CurReg) override;
|
||||
void constrainAsLaneMask(Incoming &In) override;
|
||||
|
||||
bool lowerTemporalDivergence();
|
||||
};
|
||||
|
||||
DivergenceLoweringHelper::DivergenceLoweringHelper(
|
||||
@@ -188,6 +191,43 @@ void DivergenceLoweringHelper::constrainAsLaneMask(Incoming &In) {
|
||||
In.Reg = Copy.getReg(0);
|
||||
}
|
||||
|
||||
void replaceUsesOfRegInInstWith(Register Reg, MachineInstr *Inst,
|
||||
Register NewReg) {
|
||||
for (MachineOperand &Op : Inst->operands()) {
|
||||
if (Op.isReg() && Op.getReg() == Reg)
|
||||
Op.setReg(NewReg);
|
||||
}
|
||||
}
|
||||
|
||||
bool DivergenceLoweringHelper::lowerTemporalDivergence() {
|
||||
AMDGPU::IntrinsicLaneMaskAnalyzer ILMA(*MF);
|
||||
DenseMap<Register, Register> TDCache;
|
||||
|
||||
for (auto [Reg, UseInst, _] : MUI->getTemporalDivergenceList()) {
|
||||
if (MRI->getType(Reg) == LLT::scalar(1) || MUI->isDivergent(Reg) ||
|
||||
ILMA.isS32S64LaneMask(Reg))
|
||||
continue;
|
||||
|
||||
Register CachedTDCopy = TDCache.lookup(Reg);
|
||||
if (CachedTDCopy) {
|
||||
replaceUsesOfRegInInstWith(Reg, UseInst, CachedTDCopy);
|
||||
continue;
|
||||
}
|
||||
|
||||
MachineInstr *Inst = MRI->getVRegDef(Reg);
|
||||
MachineBasicBlock *MBB = Inst->getParent();
|
||||
B.setInsertPt(*MBB, MBB->SkipPHIsAndLabels(std::next(Inst->getIterator())));
|
||||
|
||||
Register VgprReg = MRI->createGenericVirtualRegister(MRI->getType(Reg));
|
||||
B.buildInstr(AMDGPU::COPY, {VgprReg}, {Reg})
|
||||
.addUse(ExecReg, RegState::Implicit);
|
||||
|
||||
replaceUsesOfRegInInstWith(Reg, UseInst, VgprReg);
|
||||
TDCache[Reg] = VgprReg;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
} // End anonymous namespace.
|
||||
|
||||
INITIALIZE_PASS_BEGIN(AMDGPUGlobalISelDivergenceLowering, DEBUG_TYPE,
|
||||
@@ -218,5 +258,15 @@ bool AMDGPUGlobalISelDivergenceLowering::runOnMachineFunction(
|
||||
|
||||
DivergenceLoweringHelper Helper(&MF, &DT, &PDT, &MUI);
|
||||
|
||||
return Helper.lowerPhis();
|
||||
bool Changed = false;
|
||||
// Temporal divergence lowering needs to inspect list of instructions used
|
||||
// outside cycle with divergent exit provided by uniformity analysis. Uniform
|
||||
// instructions from the list require lowering, no instruction is deleted.
|
||||
// Thus it needs to be run before lowerPhis that deletes phis that require
|
||||
// lowering and replaces them with new instructions.
|
||||
|
||||
// Non-i1 temporal divergence lowering.
|
||||
Changed |= Helper.lowerTemporalDivergence();
|
||||
Changed |= Helper.lowerPhis();
|
||||
return Changed;
|
||||
}
|
||||
|
||||
@@ -83,6 +83,7 @@ class RegBankSelectHelper {
|
||||
MachineRegisterInfo &MRI;
|
||||
AMDGPU::IntrinsicLaneMaskAnalyzer &ILMA;
|
||||
const MachineUniformityInfo &MUI;
|
||||
const SIRegisterInfo &TRI;
|
||||
const RegisterBank *SgprRB;
|
||||
const RegisterBank *VgprRB;
|
||||
const RegisterBank *VccRB;
|
||||
@@ -91,14 +92,29 @@ public:
|
||||
RegBankSelectHelper(MachineIRBuilder &B,
|
||||
AMDGPU::IntrinsicLaneMaskAnalyzer &ILMA,
|
||||
const MachineUniformityInfo &MUI,
|
||||
const RegisterBankInfo &RBI)
|
||||
: B(B), MRI(*B.getMRI()), ILMA(ILMA), MUI(MUI),
|
||||
const SIRegisterInfo &TRI, const RegisterBankInfo &RBI)
|
||||
: B(B), MRI(*B.getMRI()), ILMA(ILMA), MUI(MUI), TRI(TRI),
|
||||
SgprRB(&RBI.getRegBank(AMDGPU::SGPRRegBankID)),
|
||||
VgprRB(&RBI.getRegBank(AMDGPU::VGPRRegBankID)),
|
||||
VccRB(&RBI.getRegBank(AMDGPU::VCCRegBankID)) {}
|
||||
|
||||
// Temporal divergence copy: COPY to vgpr with implicit use of $exec inside of
|
||||
// the cycle
|
||||
// Note: uniformity analysis does not consider that registers with vgpr def
|
||||
// are divergent (you can have uniform value in vgpr).
|
||||
// - TODO: implicit use of $exec could be implemented as indicator that
|
||||
// instruction is divergent
|
||||
bool isTemporalDivergenceCopy(Register Reg) {
|
||||
MachineInstr *MI = MRI.getVRegDef(Reg);
|
||||
if (!MI->isCopy() || MI->getNumImplicitOperands() != 1)
|
||||
return false;
|
||||
|
||||
return MI->implicit_operands().begin()->getReg() == TRI.getExec();
|
||||
}
|
||||
|
||||
const RegisterBank *getRegBankToAssign(Register Reg) {
|
||||
if (MUI.isUniform(Reg) || ILMA.isS32S64LaneMask(Reg))
|
||||
if (!isTemporalDivergenceCopy(Reg) &&
|
||||
(MUI.isUniform(Reg) || ILMA.isS32S64LaneMask(Reg)))
|
||||
return SgprRB;
|
||||
if (MRI.getType(Reg) == LLT::scalar(1))
|
||||
return VccRB;
|
||||
@@ -209,7 +225,8 @@ bool AMDGPURegBankSelect::runOnMachineFunction(MachineFunction &MF) {
|
||||
getAnalysis<MachineUniformityAnalysisPass>().getUniformityInfo();
|
||||
MachineRegisterInfo &MRI = *B.getMRI();
|
||||
const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
|
||||
RegBankSelectHelper RBSHelper(B, ILMA, MUI, *ST.getRegBankInfo());
|
||||
RegBankSelectHelper RBSHelper(B, ILMA, MUI, *ST.getRegisterInfo(),
|
||||
*ST.getRegBankInfo());
|
||||
// Virtual registers at this point don't have register banks.
|
||||
// Virtual registers in def and use operands of already inst-selected
|
||||
// instruction have register class.
|
||||
|
||||
@@ -15,6 +15,7 @@
|
||||
#include "GCNSubtarget.h"
|
||||
#include "llvm/CodeGen/MachineBasicBlock.h"
|
||||
#include "llvm/CodeGen/MachinePostDominators.h"
|
||||
#include "llvm/CodeGen/MachineRegisterInfo.h"
|
||||
#include "llvm/CodeGen/MachineSSAUpdater.h"
|
||||
|
||||
namespace llvm {
|
||||
@@ -72,6 +73,11 @@ public:
|
||||
LaneMaskRegAttrs = MRI->getVRegAttrs(LaneMask);
|
||||
}
|
||||
|
||||
void
|
||||
initializeLaneMaskRegisterAttributes(MachineRegisterInfo::VRegAttrs Attrs) {
|
||||
LaneMaskRegAttrs = Attrs;
|
||||
}
|
||||
|
||||
bool isLaneMaskReg(Register Reg) const {
|
||||
return TII->getRegisterInfo().isSGPRReg(*MRI, Reg) &&
|
||||
TII->getRegisterInfo().getRegSizeInBits(Reg, *MRI) ==
|
||||
|
||||
@@ -471,7 +471,7 @@ body: |
|
||||
; GFX10-NEXT: bb.2:
|
||||
; GFX10-NEXT: successors: %bb.5(0x40000000), %bb.6(0x40000000)
|
||||
; GFX10-NEXT: {{ $}}
|
||||
; GFX10-NEXT: [[PHI:%[0-9]+]]:sreg_32(s1) = PHI [[COPY3]](s1), %bb.0, %56(s1), %bb.4
|
||||
; GFX10-NEXT: [[PHI:%[0-9]+]]:sreg_32(s1) = PHI [[COPY3]](s1), %bb.0, %57(s1), %bb.4
|
||||
; GFX10-NEXT: [[PHI1:%[0-9]+]]:_(s32) = G_PHI %29(s32), %bb.4, [[DEF]](s32), %bb.0
|
||||
; GFX10-NEXT: [[COPY4:%[0-9]+]]:sreg_32(s1) = COPY [[PHI]](s1)
|
||||
; GFX10-NEXT: G_BRCOND [[COPY4]](s1), %bb.5
|
||||
@@ -486,6 +486,7 @@ body: |
|
||||
; GFX10-NEXT: [[C7:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
|
||||
; GFX10-NEXT: [[AMDGPU_BUFFER_LOAD1:%[0-9]+]]:_(s32) = G_AMDGPU_BUFFER_LOAD [[UV]](<4 x s32>), [[C7]](s32), [[PHI2]], [[C7]], 0, 0, 0 :: (dereferenceable load (s32), align 1, addrspace 8)
|
||||
; GFX10-NEXT: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[AMDGPU_BUFFER_LOAD1]], [[PHI4]]
|
||||
; GFX10-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY [[ADD]](s32), implicit $exec_lo
|
||||
; GFX10-NEXT: [[C8:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
|
||||
; GFX10-NEXT: [[ADD1:%[0-9]+]]:_(s32) = G_ADD [[PHI3]], [[C8]]
|
||||
; GFX10-NEXT: [[C9:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
|
||||
@@ -497,11 +498,11 @@ body: |
|
||||
; GFX10-NEXT: bb.4:
|
||||
; GFX10-NEXT: successors: %bb.2(0x80000000)
|
||||
; GFX10-NEXT: {{ $}}
|
||||
; GFX10-NEXT: [[ICMP2:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[ADD]](s32), [[AMDGPU_BUFFER_LOAD]]
|
||||
; GFX10-NEXT: [[ICMP2:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[COPY5]](s32), [[AMDGPU_BUFFER_LOAD]]
|
||||
; GFX10-NEXT: [[OR1:%[0-9]+]]:_(s1) = G_OR [[ICMP]], [[ICMP2]]
|
||||
; GFX10-NEXT: [[ZEXT1:%[0-9]+]]:_(s32) = G_ZEXT [[OR1]](s1)
|
||||
; GFX10-NEXT: [[C10:%[0-9]+]]:_(s1) = G_CONSTANT i1 false
|
||||
; GFX10-NEXT: [[COPY5:%[0-9]+]]:sreg_32(s1) = COPY [[C10]](s1)
|
||||
; GFX10-NEXT: [[COPY6:%[0-9]+]]:sreg_32(s1) = COPY [[C10]](s1)
|
||||
; GFX10-NEXT: G_BR %bb.2
|
||||
; GFX10-NEXT: {{ $}}
|
||||
; GFX10-NEXT: bb.5:
|
||||
|
||||
@@ -642,6 +642,7 @@ body: |
|
||||
; GFX10-NEXT: {{ $}}
|
||||
; GFX10-NEXT: [[PHI:%[0-9]+]]:_(s32) = G_PHI %11(s32), %bb.6, [[C]](s32), %bb.0
|
||||
; GFX10-NEXT: [[PHI1:%[0-9]+]]:_(s32) = G_PHI [[C]](s32), %bb.0, %13(s32), %bb.6
|
||||
; GFX10-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY [[PHI1]](s32), implicit $exec_lo
|
||||
; GFX10-NEXT: {{ $}}
|
||||
; GFX10-NEXT: bb.2:
|
||||
; GFX10-NEXT: successors: %bb.3(0x40000000), %bb.4(0x40000000)
|
||||
@@ -665,8 +666,8 @@ body: |
|
||||
; GFX10-NEXT: G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.amdgcn.end.cf), [[SI_IF]](s32)
|
||||
; GFX10-NEXT: [[ICMP1:%[0-9]+]]:sreg_32_xm0_xexec(s1) = G_ICMP intpred(ne), [[COPY1]](s32), [[PHI1]]
|
||||
; GFX10-NEXT: [[C2:%[0-9]+]]:_(s1) = G_CONSTANT i1 true
|
||||
; GFX10-NEXT: [[COPY6:%[0-9]+]]:sreg_32(s1) = COPY [[C2]](s1)
|
||||
; GFX10-NEXT: [[COPY7:%[0-9]+]]:sreg_32(s1) = COPY [[COPY6]](s1)
|
||||
; GFX10-NEXT: [[COPY7:%[0-9]+]]:sreg_32(s1) = COPY [[C2]](s1)
|
||||
; GFX10-NEXT: [[COPY8:%[0-9]+]]:sreg_32(s1) = COPY [[COPY7]](s1)
|
||||
; GFX10-NEXT: [[SI_IF1:%[0-9]+]]:sreg_32_xm0_xexec(s32) = SI_IF [[ICMP1]](s1), %bb.6, implicit-def $exec, implicit-def $scc, implicit $exec
|
||||
; GFX10-NEXT: G_BR %bb.5
|
||||
; GFX10-NEXT: {{ $}}
|
||||
@@ -676,19 +677,19 @@ body: |
|
||||
; GFX10-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
|
||||
; GFX10-NEXT: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[PHI1]], [[C3]]
|
||||
; GFX10-NEXT: [[C4:%[0-9]+]]:_(s1) = G_CONSTANT i1 false
|
||||
; GFX10-NEXT: [[COPY8:%[0-9]+]]:sreg_32(s1) = COPY [[C4]](s1)
|
||||
; GFX10-NEXT: [[S_ANDN2_B32_:%[0-9]+]]:sreg_32(s1) = S_ANDN2_B32 [[COPY7]](s1), $exec_lo, implicit-def $scc
|
||||
; GFX10-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32(s1) = S_AND_B32 $exec_lo, [[COPY8]](s1), implicit-def $scc
|
||||
; GFX10-NEXT: [[COPY9:%[0-9]+]]:sreg_32(s1) = COPY [[C4]](s1)
|
||||
; GFX10-NEXT: [[S_ANDN2_B32_:%[0-9]+]]:sreg_32(s1) = S_ANDN2_B32 [[COPY8]](s1), $exec_lo, implicit-def $scc
|
||||
; GFX10-NEXT: [[S_AND_B32_:%[0-9]+]]:sreg_32(s1) = S_AND_B32 $exec_lo, [[COPY9]](s1), implicit-def $scc
|
||||
; GFX10-NEXT: [[S_OR_B32_:%[0-9]+]]:sreg_32(s1) = S_OR_B32 [[S_ANDN2_B32_]](s1), [[S_AND_B32_]](s1), implicit-def $scc
|
||||
; GFX10-NEXT: {{ $}}
|
||||
; GFX10-NEXT: bb.6:
|
||||
; GFX10-NEXT: successors: %bb.7(0x04000000), %bb.1(0x7c000000)
|
||||
; GFX10-NEXT: {{ $}}
|
||||
; GFX10-NEXT: [[PHI2:%[0-9]+]]:sreg_32(s1) = PHI [[COPY6]](s1), %bb.4, [[S_OR_B32_]](s1), %bb.5
|
||||
; GFX10-NEXT: [[PHI2:%[0-9]+]]:sreg_32(s1) = PHI [[COPY7]](s1), %bb.4, [[S_OR_B32_]](s1), %bb.5
|
||||
; GFX10-NEXT: [[PHI3:%[0-9]+]]:_(s32) = G_PHI [[ADD]](s32), %bb.5, [[DEF]](s32), %bb.4
|
||||
; GFX10-NEXT: [[COPY9:%[0-9]+]]:sreg_32(s1) = COPY [[PHI2]](s1)
|
||||
; GFX10-NEXT: [[COPY10:%[0-9]+]]:sreg_32(s1) = COPY [[PHI2]](s1)
|
||||
; GFX10-NEXT: G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.amdgcn.end.cf), [[SI_IF1]](s32)
|
||||
; GFX10-NEXT: [[INT:%[0-9]+]]:sreg_32_xm0_xexec(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.if.break), [[COPY9]](s1), [[PHI]](s32)
|
||||
; GFX10-NEXT: [[INT:%[0-9]+]]:sreg_32_xm0_xexec(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.if.break), [[COPY10]](s1), [[PHI]](s32)
|
||||
; GFX10-NEXT: SI_LOOP [[INT]](s32), %bb.1, implicit-def $exec, implicit-def $scc, implicit $exec
|
||||
; GFX10-NEXT: G_BR %bb.7
|
||||
; GFX10-NEXT: {{ $}}
|
||||
@@ -702,7 +703,7 @@ body: |
|
||||
; GFX10-NEXT: bb.8:
|
||||
; GFX10-NEXT: successors: %bb.9(0x80000000)
|
||||
; GFX10-NEXT: {{ $}}
|
||||
; GFX10-NEXT: G_STORE [[PHI1]](s32), [[MV1]](p1) :: (store (s32), addrspace 1)
|
||||
; GFX10-NEXT: G_STORE [[COPY6]](s32), [[MV1]](p1) :: (store (s32), addrspace 1)
|
||||
; GFX10-NEXT: {{ $}}
|
||||
; GFX10-NEXT: bb.9:
|
||||
; GFX10-NEXT: G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.amdgcn.end.cf), [[SI_IF2]](s32)
|
||||
|
||||
@@ -5,20 +5,20 @@ define void @temporal_divergent_i32(float %val, ptr %addr) {
|
||||
; GFX10-LABEL: temporal_divergent_i32:
|
||||
; GFX10: ; %bb.0: ; %entry
|
||||
; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
||||
; GFX10-NEXT: s_mov_b32 s4, -1
|
||||
; GFX10-NEXT: s_mov_b32 s5, 0
|
||||
; GFX10-NEXT: s_mov_b32 s5, -1
|
||||
; GFX10-NEXT: s_mov_b32 s4, 0
|
||||
; GFX10-NEXT: .LBB0_1: ; %loop
|
||||
; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1
|
||||
; GFX10-NEXT: s_add_i32 s4, s4, 1
|
||||
; GFX10-NEXT: v_cvt_f32_u32_e32 v3, s4
|
||||
; GFX10-NEXT: s_add_i32 s5, s5, 1
|
||||
; GFX10-NEXT: v_cvt_f32_u32_e32 v3, s5
|
||||
; GFX10-NEXT: v_cmp_gt_f32_e32 vcc_lo, v3, v0
|
||||
; GFX10-NEXT: s_or_b32 s5, vcc_lo, s5
|
||||
; GFX10-NEXT: s_andn2_b32 exec_lo, exec_lo, s5
|
||||
; GFX10-NEXT: v_mov_b32_e32 v3, s5
|
||||
; GFX10-NEXT: s_or_b32 s4, vcc_lo, s4
|
||||
; GFX10-NEXT: s_andn2_b32 exec_lo, exec_lo, s4
|
||||
; GFX10-NEXT: s_cbranch_execnz .LBB0_1
|
||||
; GFX10-NEXT: ; %bb.2: ; %exit
|
||||
; GFX10-NEXT: s_or_b32 exec_lo, exec_lo, s5
|
||||
; GFX10-NEXT: v_mov_b32_e32 v0, s4
|
||||
; GFX10-NEXT: flat_store_dword v[1:2], v0
|
||||
; GFX10-NEXT: s_or_b32 exec_lo, exec_lo, s4
|
||||
; GFX10-NEXT: flat_store_dword v[1:2], v3
|
||||
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
|
||||
; GFX10-NEXT: s_setpc_b64 s[30:31]
|
||||
entry:
|
||||
@@ -40,21 +40,21 @@ define void @temporal_divergent_i32_multiple_use(float %val, ptr %addr, ptr %add
|
||||
; GFX10-LABEL: temporal_divergent_i32_multiple_use:
|
||||
; GFX10: ; %bb.0: ; %entry
|
||||
; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
||||
; GFX10-NEXT: s_mov_b32 s4, -1
|
||||
; GFX10-NEXT: s_mov_b32 s5, 0
|
||||
; GFX10-NEXT: s_mov_b32 s5, -1
|
||||
; GFX10-NEXT: s_mov_b32 s4, 0
|
||||
; GFX10-NEXT: .LBB1_1: ; %loop
|
||||
; GFX10-NEXT: ; =>This Inner Loop Header: Depth=1
|
||||
; GFX10-NEXT: s_add_i32 s4, s4, 1
|
||||
; GFX10-NEXT: v_cvt_f32_u32_e32 v5, s4
|
||||
; GFX10-NEXT: s_add_i32 s5, s5, 1
|
||||
; GFX10-NEXT: v_cvt_f32_u32_e32 v5, s5
|
||||
; GFX10-NEXT: v_cmp_gt_f32_e32 vcc_lo, v5, v0
|
||||
; GFX10-NEXT: s_or_b32 s5, vcc_lo, s5
|
||||
; GFX10-NEXT: s_andn2_b32 exec_lo, exec_lo, s5
|
||||
; GFX10-NEXT: v_mov_b32_e32 v5, s5
|
||||
; GFX10-NEXT: s_or_b32 s4, vcc_lo, s4
|
||||
; GFX10-NEXT: s_andn2_b32 exec_lo, exec_lo, s4
|
||||
; GFX10-NEXT: s_cbranch_execnz .LBB1_1
|
||||
; GFX10-NEXT: ; %bb.2: ; %exit
|
||||
; GFX10-NEXT: s_or_b32 exec_lo, exec_lo, s5
|
||||
; GFX10-NEXT: v_mov_b32_e32 v0, s4
|
||||
; GFX10-NEXT: flat_store_dword v[1:2], v0
|
||||
; GFX10-NEXT: flat_store_dword v[3:4], v0
|
||||
; GFX10-NEXT: s_or_b32 exec_lo, exec_lo, s4
|
||||
; GFX10-NEXT: flat_store_dword v[1:2], v5
|
||||
; GFX10-NEXT: flat_store_dword v[3:4], v5
|
||||
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
|
||||
; GFX10-NEXT: s_setpc_b64 s[30:31]
|
||||
entry:
|
||||
|
||||
@@ -25,6 +25,7 @@ body: |
|
||||
; GFX10-NEXT: [[PHI1:%[0-9]+]]:_(s32) = G_PHI [[C]](s32), %bb.0, %9(s32), %bb.1
|
||||
; GFX10-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
|
||||
; GFX10-NEXT: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[PHI1]], [[C2]]
|
||||
; GFX10-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY [[ADD]](s32), implicit $exec_lo
|
||||
; GFX10-NEXT: [[UITOFP:%[0-9]+]]:_(s32) = G_UITOFP [[ADD]](s32)
|
||||
; GFX10-NEXT: [[FCMP:%[0-9]+]]:_(s1) = G_FCMP floatpred(ogt), [[UITOFP]](s32), [[COPY]]
|
||||
; GFX10-NEXT: [[INT:%[0-9]+]]:sreg_32_xm0_xexec(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.if.break), [[FCMP]](s1), [[PHI]](s32)
|
||||
@@ -33,7 +34,7 @@ body: |
|
||||
; GFX10-NEXT: {{ $}}
|
||||
; GFX10-NEXT: bb.2:
|
||||
; GFX10-NEXT: G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.amdgcn.end.cf), [[INT]](s32)
|
||||
; GFX10-NEXT: G_STORE [[ADD]](s32), [[MV]](p0) :: (store (s32))
|
||||
; GFX10-NEXT: G_STORE [[COPY3]](s32), [[MV]](p0) :: (store (s32))
|
||||
; GFX10-NEXT: SI_RETURN
|
||||
bb.0:
|
||||
successors: %bb.1(0x80000000)
|
||||
@@ -92,6 +93,7 @@ body: |
|
||||
; GFX10-NEXT: [[PHI1:%[0-9]+]]:_(s32) = G_PHI [[C]](s32), %bb.0, %12(s32), %bb.1
|
||||
; GFX10-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
|
||||
; GFX10-NEXT: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[PHI1]], [[C2]]
|
||||
; GFX10-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY [[ADD]](s32), implicit $exec_lo
|
||||
; GFX10-NEXT: [[UITOFP:%[0-9]+]]:_(s32) = G_UITOFP [[ADD]](s32)
|
||||
; GFX10-NEXT: [[FCMP:%[0-9]+]]:_(s1) = G_FCMP floatpred(ogt), [[UITOFP]](s32), [[COPY]]
|
||||
; GFX10-NEXT: [[INT:%[0-9]+]]:sreg_32_xm0_xexec(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.if.break), [[FCMP]](s1), [[PHI]](s32)
|
||||
@@ -100,8 +102,8 @@ body: |
|
||||
; GFX10-NEXT: {{ $}}
|
||||
; GFX10-NEXT: bb.2:
|
||||
; GFX10-NEXT: G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.amdgcn.end.cf), [[INT]](s32)
|
||||
; GFX10-NEXT: G_STORE [[ADD]](s32), [[MV]](p0) :: (store (s32))
|
||||
; GFX10-NEXT: G_STORE [[ADD]](s32), [[MV1]](p0) :: (store (s32))
|
||||
; GFX10-NEXT: G_STORE [[COPY5]](s32), [[MV]](p0) :: (store (s32))
|
||||
; GFX10-NEXT: G_STORE [[COPY5]](s32), [[MV1]](p0) :: (store (s32))
|
||||
; GFX10-NEXT: SI_RETURN
|
||||
bb.0:
|
||||
liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4
|
||||
|
||||
@@ -450,19 +450,20 @@ define amdgpu_ps void @divergent_because_of_temporal_divergent_use(float %val, p
|
||||
;
|
||||
; NEW_RBS-LABEL: divergent_because_of_temporal_divergent_use:
|
||||
; NEW_RBS: ; %bb.0: ; %entry
|
||||
; NEW_RBS-NEXT: s_mov_b32 s0, -1
|
||||
; NEW_RBS-NEXT: s_mov_b32 s1, 0
|
||||
; NEW_RBS-NEXT: s_mov_b32 s1, -1
|
||||
; NEW_RBS-NEXT: s_mov_b32 s0, 0
|
||||
; NEW_RBS-NEXT: .LBB15_1: ; %loop
|
||||
; NEW_RBS-NEXT: ; =>This Inner Loop Header: Depth=1
|
||||
; NEW_RBS-NEXT: s_add_i32 s0, s0, 1
|
||||
; NEW_RBS-NEXT: v_cvt_f32_u32_e32 v3, s0
|
||||
; NEW_RBS-NEXT: s_add_i32 s1, s1, 1
|
||||
; NEW_RBS-NEXT: v_cvt_f32_u32_e32 v3, s1
|
||||
; NEW_RBS-NEXT: v_cmp_gt_f32_e32 vcc_lo, v3, v0
|
||||
; NEW_RBS-NEXT: s_or_b32 s1, vcc_lo, s1
|
||||
; NEW_RBS-NEXT: s_andn2_b32 exec_lo, exec_lo, s1
|
||||
; NEW_RBS-NEXT: v_mov_b32_e32 v3, s1
|
||||
; NEW_RBS-NEXT: s_or_b32 s0, vcc_lo, s0
|
||||
; NEW_RBS-NEXT: s_andn2_b32 exec_lo, exec_lo, s0
|
||||
; NEW_RBS-NEXT: s_cbranch_execnz .LBB15_1
|
||||
; NEW_RBS-NEXT: ; %bb.2: ; %exit
|
||||
; NEW_RBS-NEXT: s_or_b32 exec_lo, exec_lo, s1
|
||||
; NEW_RBS-NEXT: v_mul_lo_u32 v0, s0, 10
|
||||
; NEW_RBS-NEXT: s_or_b32 exec_lo, exec_lo, s0
|
||||
; NEW_RBS-NEXT: v_mul_lo_u32 v0, v3, 10
|
||||
; NEW_RBS-NEXT: global_store_dword v[1:2], v0, off
|
||||
; NEW_RBS-NEXT: s_endpgm
|
||||
entry:
|
||||
|
||||
Reference in New Issue
Block a user