[CodeGen] Provide a target independent default for optimizeLoadInst [NFC]
This just moves the x86 implementation into generic code since it appears to be suitable for any target. The heart of this transform is inside foldMemoryOperand so other targets won't actually kick in until they implement said API. This just removes one piece to implement in the process of enabling foldMemoryOperand.
This commit is contained in:
committed by
Philip Reames
parent
52f941adbc
commit
236f938ef6
@@ -1791,9 +1791,7 @@ public:
|
||||
virtual MachineInstr *optimizeLoadInstr(MachineInstr &MI,
|
||||
const MachineRegisterInfo *MRI,
|
||||
Register &FoldAsLoadDefReg,
|
||||
MachineInstr *&DefMI) const {
|
||||
return nullptr;
|
||||
}
|
||||
MachineInstr *&DefMI) const;
|
||||
|
||||
/// 'Reg' is known to be defined by a move immediate instruction,
|
||||
/// try to fold the immediate into the use instruction.
|
||||
|
||||
@@ -496,6 +496,47 @@ static const TargetRegisterClass *canFoldCopy(const MachineInstr &MI,
|
||||
|
||||
MCInst TargetInstrInfo::getNop() const { llvm_unreachable("Not implemented"); }
|
||||
|
||||
/// Try to remove the load by folding it to a register
|
||||
/// operand at the use. We fold the load instructions if load defines a virtual
|
||||
/// register, the virtual register is used once in the same BB, and the
|
||||
/// instructions in-between do not load or store, and have no side effects.
|
||||
MachineInstr *TargetInstrInfo::optimizeLoadInstr(MachineInstr &MI,
|
||||
const MachineRegisterInfo *MRI,
|
||||
Register &FoldAsLoadDefReg,
|
||||
MachineInstr *&DefMI) const {
|
||||
// Check whether we can move DefMI here.
|
||||
DefMI = MRI->getVRegDef(FoldAsLoadDefReg);
|
||||
assert(DefMI);
|
||||
bool SawStore = false;
|
||||
if (!DefMI->isSafeToMove(SawStore))
|
||||
return nullptr;
|
||||
|
||||
// Collect information about virtual register operands of MI.
|
||||
SmallVector<unsigned, 1> SrcOperandIds;
|
||||
for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
|
||||
MachineOperand &MO = MI.getOperand(i);
|
||||
if (!MO.isReg())
|
||||
continue;
|
||||
Register Reg = MO.getReg();
|
||||
if (Reg != FoldAsLoadDefReg)
|
||||
continue;
|
||||
// Do not fold if we have a subreg use or a def.
|
||||
if (MO.getSubReg() || MO.isDef())
|
||||
return nullptr;
|
||||
SrcOperandIds.push_back(i);
|
||||
}
|
||||
if (SrcOperandIds.empty())
|
||||
return nullptr;
|
||||
|
||||
// Check whether we can fold the def into SrcOperandId.
|
||||
if (MachineInstr *FoldMI = foldMemoryOperand(MI, SrcOperandIds, *DefMI)) {
|
||||
FoldAsLoadDefReg = 0;
|
||||
return FoldMI;
|
||||
}
|
||||
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
std::pair<unsigned, unsigned>
|
||||
TargetInstrInfo::getPatchpointUnfoldableRange(const MachineInstr &MI) const {
|
||||
switch (MI.getOpcode()) {
|
||||
|
||||
@@ -5716,47 +5716,6 @@ bool X86InstrInfo::optimizeCompareInstr(MachineInstr &CmpInstr, Register SrcReg,
|
||||
return true;
|
||||
}
|
||||
|
||||
/// Try to remove the load by folding it to a register
|
||||
/// operand at the use. We fold the load instructions if load defines a virtual
|
||||
/// register, the virtual register is used once in the same BB, and the
|
||||
/// instructions in-between do not load or store, and have no side effects.
|
||||
MachineInstr *X86InstrInfo::optimizeLoadInstr(MachineInstr &MI,
|
||||
const MachineRegisterInfo *MRI,
|
||||
Register &FoldAsLoadDefReg,
|
||||
MachineInstr *&DefMI) const {
|
||||
// Check whether we can move DefMI here.
|
||||
DefMI = MRI->getVRegDef(FoldAsLoadDefReg);
|
||||
assert(DefMI);
|
||||
bool SawStore = false;
|
||||
if (!DefMI->isSafeToMove(SawStore))
|
||||
return nullptr;
|
||||
|
||||
// Collect information about virtual register operands of MI.
|
||||
SmallVector<unsigned, 1> SrcOperandIds;
|
||||
for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
|
||||
MachineOperand &MO = MI.getOperand(i);
|
||||
if (!MO.isReg())
|
||||
continue;
|
||||
Register Reg = MO.getReg();
|
||||
if (Reg != FoldAsLoadDefReg)
|
||||
continue;
|
||||
// Do not fold if we have a subreg use or a def.
|
||||
if (MO.getSubReg() || MO.isDef())
|
||||
return nullptr;
|
||||
SrcOperandIds.push_back(i);
|
||||
}
|
||||
if (SrcOperandIds.empty())
|
||||
return nullptr;
|
||||
|
||||
// Check whether we can fold the def into SrcOperandId.
|
||||
if (MachineInstr *FoldMI = foldMemoryOperand(MI, SrcOperandIds, *DefMI)) {
|
||||
FoldAsLoadDefReg = 0;
|
||||
return FoldMI;
|
||||
}
|
||||
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
/// \returns true if the instruction can be changed to COPY when imm is 0.
|
||||
static bool canConvert2Copy(unsigned Opc) {
|
||||
switch (Opc) {
|
||||
|
||||
@@ -571,11 +571,6 @@ public:
|
||||
Register SrcReg2, int64_t CmpMask, int64_t CmpValue,
|
||||
const MachineRegisterInfo *MRI) const override;
|
||||
|
||||
MachineInstr *optimizeLoadInstr(MachineInstr &MI,
|
||||
const MachineRegisterInfo *MRI,
|
||||
Register &FoldAsLoadDefReg,
|
||||
MachineInstr *&DefMI) const override;
|
||||
|
||||
bool foldImmediate(MachineInstr &UseMI, MachineInstr &DefMI, Register Reg,
|
||||
MachineRegisterInfo *MRI) const override;
|
||||
|
||||
|
||||
Reference in New Issue
Block a user