[X86][MC] Move the code about MOVSX encoding optimization to X86EncodingOptimization.cpp, NFCI
This commit is contained in:
@@ -230,6 +230,7 @@ bool X86::optimizeVPCMPWithImmediateOneOrSix(MCInst &MI) {
|
||||
FROM_TO(VPCMPWZrmik, VPCMPEQWZrmk, VPCMPGTWZrmk)
|
||||
FROM_TO(VPCMPWZrri, VPCMPEQWZrr, VPCMPGTWZrr)
|
||||
FROM_TO(VPCMPWZrrik, VPCMPEQWZrrk, VPCMPGTWZrrk)
|
||||
#undef FROM_TO
|
||||
}
|
||||
MCOperand &LastOp = MI.getOperand(MI.getNumOperands() - 1);
|
||||
int64_t Imm = LastOp.getImm();
|
||||
@@ -244,3 +245,24 @@ bool X86::optimizeVPCMPWithImmediateOneOrSix(MCInst &MI) {
|
||||
MI.erase(&LastOp);
|
||||
return true;
|
||||
}
|
||||
|
||||
bool X86::optimizeMOVSX(MCInst &MI) {
|
||||
unsigned NewOpc;
|
||||
#define FROM_TO(FROM, TO, R0, R1) \
|
||||
case X86::FROM: \
|
||||
if (MI.getOperand(0).getReg() != X86::R0 || \
|
||||
MI.getOperand(1).getReg() != X86::R1) \
|
||||
return false; \
|
||||
NewOpc = X86::TO; \
|
||||
break;
|
||||
switch (MI.getOpcode()) {
|
||||
default:
|
||||
return false;
|
||||
FROM_TO(MOVSX16rr8, CBW, AX, AL) // movsbw %al, %ax --> cbtw
|
||||
FROM_TO(MOVSX32rr16, CWDE, EAX, AX) // movswl %ax, %eax --> cwtl
|
||||
FROM_TO(MOVSX64rr32, CDQE, RAX, EAX) // movslq %eax, %rax --> cltq
|
||||
}
|
||||
MI.clear();
|
||||
MI.setOpcode(NewOpc);
|
||||
return true;
|
||||
}
|
||||
|
||||
@@ -19,6 +19,7 @@ namespace X86 {
|
||||
bool optimizeInstFromVEX3ToVEX2(MCInst &MI, const MCInstrDesc &Desc);
|
||||
bool optimizeShiftRotateWithImmediateOne(MCInst &MI);
|
||||
bool optimizeVPCMPWithImmediateOneOrSix(MCInst &MI);
|
||||
bool optimizeMOVSX(MCInst &MI);
|
||||
} // namespace X86
|
||||
} // namespace llvm
|
||||
#endif
|
||||
|
||||
@@ -343,34 +343,6 @@ static void SimplifyShortImmForm(MCInst &Inst, unsigned Opcode) {
|
||||
Inst.addOperand(Saved);
|
||||
}
|
||||
|
||||
/// If a movsx instruction has a shorter encoding for the used register
|
||||
/// simplify the instruction to use it instead.
|
||||
static void SimplifyMOVSX(MCInst &Inst) {
|
||||
unsigned NewOpcode = 0;
|
||||
unsigned Op0 = Inst.getOperand(0).getReg(), Op1 = Inst.getOperand(1).getReg();
|
||||
switch (Inst.getOpcode()) {
|
||||
default:
|
||||
llvm_unreachable("Unexpected instruction!");
|
||||
case X86::MOVSX16rr8: // movsbw %al, %ax --> cbtw
|
||||
if (Op0 == X86::AX && Op1 == X86::AL)
|
||||
NewOpcode = X86::CBW;
|
||||
break;
|
||||
case X86::MOVSX32rr16: // movswl %ax, %eax --> cwtl
|
||||
if (Op0 == X86::EAX && Op1 == X86::AX)
|
||||
NewOpcode = X86::CWDE;
|
||||
break;
|
||||
case X86::MOVSX64rr32: // movslq %eax, %rax --> cltq
|
||||
if (Op0 == X86::RAX && Op1 == X86::EAX)
|
||||
NewOpcode = X86::CDQE;
|
||||
break;
|
||||
}
|
||||
|
||||
if (NewOpcode != 0) {
|
||||
Inst = MCInst();
|
||||
Inst.setOpcode(NewOpcode);
|
||||
}
|
||||
}
|
||||
|
||||
/// Simplify things like MOV32rm to MOV32o32a.
|
||||
static void SimplifyShortMoveForm(X86AsmPrinter &Printer, MCInst &Inst,
|
||||
unsigned Opcode) {
|
||||
@@ -511,6 +483,9 @@ void X86MCInstLower::Lower(const MachineInstr *MI, MCInst &OutMI) const {
|
||||
if (X86::optimizeVPCMPWithImmediateOneOrSix(OutMI))
|
||||
return;
|
||||
|
||||
if (X86::optimizeMOVSX(OutMI))
|
||||
return;
|
||||
|
||||
// Handle a few special cases to eliminate operand modifiers.
|
||||
switch (OutMI.getOpcode()) {
|
||||
case X86::LEA64_32r:
|
||||
@@ -703,14 +678,6 @@ void X86MCInstLower::Lower(const MachineInstr *MI, MCInst &OutMI) const {
|
||||
SimplifyShortImmForm(OutMI, NewOpc);
|
||||
break;
|
||||
}
|
||||
|
||||
// Try to shrink some forms of movsx.
|
||||
case X86::MOVSX16rr8:
|
||||
case X86::MOVSX32rr16:
|
||||
case X86::MOVSX64rr32:
|
||||
SimplifyMOVSX(OutMI);
|
||||
break;
|
||||
|
||||
case X86::VCMPPDrri:
|
||||
case X86::VCMPPDYrri:
|
||||
case X86::VCMPPSrri:
|
||||
|
||||
Reference in New Issue
Block a user