[X86, Peephole] Enable FoldImmediate for X86

Enable FoldImmediate for X86 by implementing X86InstrInfo::FoldImmediate.

Also enhanced peephole by deleting identical instructions after FoldImmediate.

Differential Revision: https://reviews.llvm.org/D151848
This commit is contained in:
Guozhi Wei
2023-10-17 16:22:42 +00:00
parent 08d6b87454
commit 760e7d00d1
20 changed files with 2883 additions and 2219 deletions

View File

@@ -202,7 +202,8 @@ namespace {
bool isMoveImmediate(MachineInstr &MI, SmallSet<Register, 4> &ImmDefRegs,
DenseMap<Register, MachineInstr *> &ImmDefMIs);
bool foldImmediate(MachineInstr &MI, SmallSet<Register, 4> &ImmDefRegs,
DenseMap<Register, MachineInstr *> &ImmDefMIs);
DenseMap<Register, MachineInstr *> &ImmDefMIs,
bool &Deleted);
/// Finds recurrence cycles, but only ones that formulated around
/// a def operand and a use operand that are tied. If there is a use
@@ -217,8 +218,11 @@ namespace {
/// set \p CopyMIs. If this virtual register was previously seen as a
/// copy, replace the uses of this copy with the previously seen copy's
/// destination register.
/// \p LocalMIs contains all previous seen instructions. An optimized away
/// instruction should be deleted from LocalMIs.
bool foldRedundantCopy(MachineInstr &MI,
DenseMap<RegSubRegPair, MachineInstr *> &CopyMIs);
DenseMap<RegSubRegPair, MachineInstr *> &CopyMIs,
SmallPtrSetImpl<MachineInstr *> &LocalMIs);
/// Is the register \p Reg a non-allocatable physical register?
bool isNAPhysCopy(Register Reg);
@@ -1351,18 +1355,19 @@ bool PeepholeOptimizer::isMoveImmediate(
MachineInstr &MI, SmallSet<Register, 4> &ImmDefRegs,
DenseMap<Register, MachineInstr *> &ImmDefMIs) {
const MCInstrDesc &MCID = MI.getDesc();
if (!MI.isMoveImmediate())
return false;
if (MCID.getNumDefs() != 1)
if (MCID.getNumDefs() != 1 || !MI.getOperand(0).isReg())
return false;
Register Reg = MI.getOperand(0).getReg();
if (Reg.isVirtual()) {
ImmDefMIs.insert(std::make_pair(Reg, &MI));
ImmDefRegs.insert(Reg);
return true;
}
if (!Reg.isVirtual())
return false;
return false;
int64_t ImmVal;
if (!MI.isMoveImmediate() && !TII->getConstValDefinedInReg(MI, Reg, ImmVal))
return false;
ImmDefMIs.insert(std::make_pair(Reg, &MI));
ImmDefRegs.insert(Reg);
return true;
}
/// Try folding register operands that are defined by move immediate
@@ -1370,7 +1375,8 @@ bool PeepholeOptimizer::isMoveImmediate(
/// and only if the def and use are in the same BB.
bool PeepholeOptimizer::foldImmediate(
MachineInstr &MI, SmallSet<Register, 4> &ImmDefRegs,
DenseMap<Register, MachineInstr *> &ImmDefMIs) {
DenseMap<Register, MachineInstr *> &ImmDefMIs, bool &Deleted) {
Deleted = false;
for (unsigned i = 0, e = MI.getDesc().getNumOperands(); i != e; ++i) {
MachineOperand &MO = MI.getOperand(i);
if (!MO.isReg() || MO.isDef())
@@ -1384,6 +1390,19 @@ bool PeepholeOptimizer::foldImmediate(
assert(II != ImmDefMIs.end() && "couldn't find immediate definition");
if (TII->FoldImmediate(MI, *II->second, Reg, MRI)) {
++NumImmFold;
// FoldImmediate can delete ImmDefMI if MI was its only user. If ImmDefMI
// is not deleted, and we happened to get a same MI, we can delete MI and
// replace its users.
if (MRI->getVRegDef(Reg) &&
MI.isIdenticalTo(*II->second, MachineInstr::IgnoreVRegDefs)) {
Register DstReg = MI.getOperand(0).getReg();
if (DstReg.isVirtual() &&
MRI->getRegClass(DstReg) == MRI->getRegClass(Reg)) {
MRI->replaceRegWith(DstReg, Reg);
MI.eraseFromParent();
Deleted = true;
}
}
return true;
}
}
@@ -1405,7 +1424,8 @@ bool PeepholeOptimizer::foldImmediate(
//
// Should replace %2 uses with %1:sub1
bool PeepholeOptimizer::foldRedundantCopy(
MachineInstr &MI, DenseMap<RegSubRegPair, MachineInstr *> &CopyMIs) {
MachineInstr &MI, DenseMap<RegSubRegPair, MachineInstr *> &CopyMIs,
SmallPtrSetImpl<MachineInstr *> &LocalMIs) {
assert(MI.isCopy() && "expected a COPY machine instruction");
Register SrcReg = MI.getOperand(1).getReg();
@@ -1425,6 +1445,8 @@ bool PeepholeOptimizer::foldRedundantCopy(
}
MachineInstr *PrevCopy = CopyMIs.find(SrcPair)->second;
if (!LocalMIs.count(PrevCopy))
return false;
assert(SrcSubReg == PrevCopy->getOperand(1).getSubReg() &&
"Unexpected mismatching subreg!");
@@ -1732,7 +1754,7 @@ bool PeepholeOptimizer::runOnMachineFunction(MachineFunction &MF) {
continue;
}
if (MI->isCopy() && (foldRedundantCopy(*MI, CopySrcMIs) ||
if (MI->isCopy() && (foldRedundantCopy(*MI, CopySrcMIs, LocalMIs) ||
foldRedundantNAPhysCopy(*MI, NAPhysToVirtMIs))) {
LocalMIs.erase(MI);
LLVM_DEBUG(dbgs() << "Deleting redundant copy: " << *MI << "\n");
@@ -1750,8 +1772,14 @@ bool PeepholeOptimizer::runOnMachineFunction(MachineFunction &MF) {
// next iteration sees the new instructions.
MII = MI;
++MII;
if (SeenMoveImm)
Changed |= foldImmediate(*MI, ImmDefRegs, ImmDefMIs);
if (SeenMoveImm) {
bool Deleted;
Changed |= foldImmediate(*MI, ImmDefRegs, ImmDefMIs, Deleted);
if (Deleted) {
LocalMIs.erase(MI);
continue;
}
}
}
// Check whether MI is a load candidate for folding into a later

View File

@@ -3867,12 +3867,42 @@ bool X86InstrInfo::verifyInstruction(const MachineInstr &MI,
bool X86InstrInfo::getConstValDefinedInReg(const MachineInstr &MI,
const Register Reg,
int64_t &ImmVal) const {
if (MI.getOpcode() != X86::MOV32ri && MI.getOpcode() != X86::MOV64ri)
Register MovReg = Reg;
const MachineInstr *MovMI = &MI;
// Follow use-def for SUBREG_TO_REG to find the real move immediate
// instruction. It is quite common for x86-64.
if (MI.isSubregToReg()) {
// We use following pattern to setup 64b immediate.
// %8:gr32 = MOV32r0 implicit-def dead $eflags
// %6:gr64 = SUBREG_TO_REG 0, killed %8:gr32, %subreg.sub_32bit
if (!MI.getOperand(1).isImm())
return false;
unsigned FillBits = MI.getOperand(1).getImm();
unsigned SubIdx = MI.getOperand(3).getImm();
MovReg = MI.getOperand(2).getReg();
if (SubIdx != X86::sub_32bit || FillBits != 0)
return false;
const MachineRegisterInfo &MRI = MI.getParent()->getParent()->getRegInfo();
MovMI = MRI.getUniqueVRegDef(MovReg);
if (!MovMI)
return false;
}
if (MovMI->getOpcode() == X86::MOV32r0 &&
MovMI->getOperand(0).getReg() == MovReg) {
ImmVal = 0;
return true;
}
if (MovMI->getOpcode() != X86::MOV32ri &&
MovMI->getOpcode() != X86::MOV64ri &&
MovMI->getOpcode() != X86::MOV32ri64 && MovMI->getOpcode() != X86::MOV8ri)
return false;
// Mov Src can be a global address.
if (!MI.getOperand(1).isImm() || MI.getOperand(0).getReg() != Reg)
if (!MovMI->getOperand(1).isImm() || MovMI->getOperand(0).getReg() != MovReg)
return false;
ImmVal = MI.getOperand(1).getImm();
ImmVal = MovMI->getOperand(1).getImm();
return true;
}
@@ -4769,6 +4799,310 @@ MachineInstr *X86InstrInfo::optimizeLoadInstr(MachineInstr &MI,
return nullptr;
}
/// Convert an ALUrr opcode to corresponding ALUri opcode. Such as
/// ADD32rr ==> ADD32ri
/// ShiftRotate will be set to true if the Opcode is shift or rotate.
/// If the ALUri can be further changed to COPY when the immediate is 0, set
/// CanConvert2Copy to true.
static unsigned ConvertALUrr2ALUri(unsigned Opcode, bool &CanConvert2Copy,
bool &ShiftRotate) {
CanConvert2Copy = false;
ShiftRotate = false;
unsigned NewOpcode = 0;
switch (Opcode) {
case X86::ADD64rr:
NewOpcode = X86::ADD64ri32;
CanConvert2Copy = true;
break;
case X86::ADC64rr:
NewOpcode = X86::ADC64ri32;
break;
case X86::SUB64rr:
NewOpcode = X86::SUB64ri32;
CanConvert2Copy = true;
break;
case X86::SBB64rr:
NewOpcode = X86::SBB64ri32;
break;
case X86::AND64rr:
NewOpcode = X86::AND64ri32;
break;
case X86::OR64rr:
NewOpcode = X86::OR64ri32;
CanConvert2Copy = true;
break;
case X86::XOR64rr:
NewOpcode = X86::XOR64ri32;
CanConvert2Copy = true;
break;
case X86::TEST64rr:
NewOpcode = X86::TEST64ri32;
break;
case X86::CMP64rr:
NewOpcode = X86::CMP64ri32;
break;
case X86::SHR64rCL:
NewOpcode = X86::SHR64ri;
ShiftRotate = true;
break;
case X86::SHL64rCL:
NewOpcode = X86::SHL64ri;
ShiftRotate = true;
break;
case X86::SAR64rCL:
NewOpcode = X86::SAR64ri;
ShiftRotate = true;
break;
case X86::ROL64rCL:
NewOpcode = X86::ROL64ri;
ShiftRotate = true;
break;
case X86::ROR64rCL:
NewOpcode = X86::ROR64ri;
ShiftRotate = true;
break;
case X86::RCL64rCL:
NewOpcode = X86::RCL64ri;
ShiftRotate = true;
break;
case X86::RCR64rCL:
NewOpcode = X86::RCR64ri;
ShiftRotate = true;
break;
case X86::ADD32rr:
NewOpcode = X86::ADD32ri;
CanConvert2Copy = true;
break;
case X86::ADC32rr:
NewOpcode = X86::ADC32ri;
break;
case X86::SUB32rr:
NewOpcode = X86::SUB32ri;
CanConvert2Copy = true;
break;
case X86::SBB32rr:
NewOpcode = X86::SBB32ri;
break;
case X86::AND32rr:
NewOpcode = X86::AND32ri;
break;
case X86::OR32rr:
NewOpcode = X86::OR32ri;
CanConvert2Copy = true;
break;
case X86::XOR32rr:
NewOpcode = X86::XOR32ri;
CanConvert2Copy = true;
break;
case X86::TEST32rr:
NewOpcode = X86::TEST32ri;
break;
case X86::CMP32rr:
NewOpcode = X86::CMP32ri;
break;
case X86::SHR32rCL:
NewOpcode = X86::SHR32ri;
ShiftRotate = true;
break;
case X86::SHL32rCL:
NewOpcode = X86::SHL32ri;
ShiftRotate = true;
break;
case X86::SAR32rCL:
NewOpcode = X86::SAR32ri;
ShiftRotate = true;
break;
case X86::ROL32rCL:
NewOpcode = X86::ROL32ri;
ShiftRotate = true;
break;
case X86::ROR32rCL:
NewOpcode = X86::ROR32ri;
ShiftRotate = true;
break;
case X86::RCL32rCL:
NewOpcode = X86::RCL32ri;
ShiftRotate = true;
break;
case X86::RCR32rCL:
NewOpcode = X86::RCR32ri;
ShiftRotate = true;
break;
}
return NewOpcode;
}
/// Real implementation of FoldImmediate.
/// Reg is assigned ImmVal in DefMI, and is used in UseMI.
/// If MakeChange is true, this function tries to replace Reg by ImmVal in
/// UseMI. If MakeChange is false, just check if folding is possible.
/// Return true if folding is successful or possible.
bool X86InstrInfo::FoldImmediateImpl(MachineInstr &UseMI, MachineInstr *DefMI,
Register Reg, int64_t ImmVal,
MachineRegisterInfo *MRI,
bool MakeChange) const {
bool Modified = false;
bool ShiftRotate = false;
// When ImmVal is 0, some instructions can be changed to COPY.
bool CanChangeToCopy = false;
unsigned Opc = UseMI.getOpcode();
// 64 bit operations accept sign extended 32 bit immediates.
// 32 bit operations accept all 32 bit immediates, so we don't need to check
// them.
const TargetRegisterClass *RC = nullptr;
if (Reg.isVirtual())
RC = MRI->getRegClass(Reg);
if ((Reg.isPhysical() && X86::GR64RegClass.contains(Reg)) ||
(Reg.isVirtual() && X86::GR64RegClass.hasSubClassEq(RC))) {
if (!isInt<32>(ImmVal))
return false;
}
if (UseMI.findRegisterUseOperand(Reg)->getSubReg())
return false;
// Immediate has larger code size than register. So avoid folding the
// immediate if it has more than 1 use and we are optimizing for size.
if (UseMI.getMF()->getFunction().hasOptSize() && Reg.isVirtual() &&
!MRI->hasOneNonDBGUse(Reg))
return false;
unsigned NewOpc;
if (Opc == TargetOpcode::COPY) {
Register ToReg = UseMI.getOperand(0).getReg();
const TargetRegisterClass *RC = nullptr;
if (ToReg.isVirtual())
RC = MRI->getRegClass(ToReg);
bool GR32Reg = (ToReg.isVirtual() && X86::GR32RegClass.hasSubClassEq(RC)) ||
(ToReg.isPhysical() && X86::GR32RegClass.contains(ToReg));
bool GR64Reg = (ToReg.isVirtual() && X86::GR64RegClass.hasSubClassEq(RC)) ||
(ToReg.isPhysical() && X86::GR64RegClass.contains(ToReg));
bool GR8Reg = (ToReg.isVirtual() && X86::GR8RegClass.hasSubClassEq(RC)) ||
(ToReg.isPhysical() && X86::GR8RegClass.contains(ToReg));
if (ImmVal == 0) {
// We have MOV32r0 only.
if (!GR32Reg)
return false;
}
if (GR64Reg) {
if (isUInt<32>(ImmVal))
NewOpc = X86::MOV32ri64;
else
NewOpc = X86::MOV64ri;
} else if (GR32Reg) {
NewOpc = X86::MOV32ri;
if (ImmVal == 0) {
// MOV32r0 clobbers EFLAGS.
const TargetRegisterInfo *TRI = &getRegisterInfo();
if (UseMI.getParent()->computeRegisterLiveness(TRI, X86::EFLAGS, UseMI)
!= MachineBasicBlock::LQR_Dead)
return false;
// MOV32r0 is different than other cases because it doesn't encode the
// immediate in the instruction. So we directly modify it here.
if (!MakeChange)
return true;
UseMI.setDesc(get(X86::MOV32r0));
UseMI.removeOperand(UseMI.findRegisterUseOperandIdx(Reg));
UseMI.addOperand(MachineOperand::CreateReg(X86::EFLAGS, /*isDef=*/ true,
/*isImp=*/ true,
/*isKill=*/ false,
/*isDead=*/ true));
Modified = true;
}
} else if (GR8Reg)
NewOpc = X86::MOV8ri;
else
return false;
} else
NewOpc = ConvertALUrr2ALUri(Opc, CanChangeToCopy, ShiftRotate);
if (!NewOpc)
return false;
// For SUB instructions the immediate can only be the second source operand.
if ((NewOpc == X86::SUB64ri32 || NewOpc == X86::SUB32ri ||
NewOpc == X86::SBB64ri32 || NewOpc == X86::SBB32ri) &&
UseMI.findRegisterUseOperandIdx(Reg) != 2)
return false;
// For CMP instructions the immediate can only be at index 1.
if ((NewOpc == X86::CMP64ri32 || NewOpc == X86::CMP32ri) &&
UseMI.findRegisterUseOperandIdx(Reg) != 1)
return false;
if (ShiftRotate) {
unsigned RegIdx = UseMI.findRegisterUseOperandIdx(Reg);
if (RegIdx < 2)
return false;
if (!isInt<8>(ImmVal))
return false;
assert(Reg == X86::CL);
if (!MakeChange)
return true;
UseMI.setDesc(get(NewOpc));
UseMI.removeOperand(RegIdx);
UseMI.addOperand(MachineOperand::CreateImm(ImmVal));
// Reg is physical register $cl, so we don't know if DefMI is dead through
// MRI. Let the caller handle it, or pass dead-mi-elimination can delete
// the dead physical register define instruction.
return true;
}
if (!MakeChange)
return true;
if (!Modified) {
// Modify the instruction.
if (ImmVal == 0 && CanChangeToCopy &&
UseMI.registerDefIsDead(X86::EFLAGS)) {
// %100 = add %101, 0
// ==>
// %100 = COPY %101
UseMI.setDesc(get(TargetOpcode::COPY));
UseMI.removeOperand(UseMI.findRegisterUseOperandIdx(Reg));
UseMI.removeOperand(UseMI.findRegisterDefOperandIdx(X86::EFLAGS));
UseMI.untieRegOperand(0);
UseMI.clearFlag(MachineInstr::MIFlag::NoSWrap);
UseMI.clearFlag(MachineInstr::MIFlag::NoUWrap);
} else {
unsigned Op1 = 1, Op2 = CommuteAnyOperandIndex;
unsigned ImmOpNum = 2;
if (!UseMI.getOperand(0).isDef()) {
Op1 = 0; // TEST, CMP
ImmOpNum = 1;
}
if (Opc == TargetOpcode::COPY)
ImmOpNum = 1;
if (findCommutedOpIndices(UseMI, Op1, Op2) &&
UseMI.getOperand(Op1).getReg() == Reg)
commuteInstruction(UseMI);
assert(UseMI.getOperand(ImmOpNum).getReg() == Reg);
UseMI.setDesc(get(NewOpc));
UseMI.getOperand(ImmOpNum).ChangeToImmediate(ImmVal);
}
}
if (Reg.isVirtual() && MRI->use_nodbg_empty(Reg))
DefMI->eraseFromBundle();
return true;
}
/// FoldImmediate - 'Reg' is known to be defined by a move immediate
/// instruction, try to fold the immediate into the use instruction.
bool X86InstrInfo::FoldImmediate(MachineInstr &UseMI, MachineInstr &DefMI,
Register Reg, MachineRegisterInfo *MRI) const {
int64_t ImmVal;
if (!getConstValDefinedInReg(DefMI, Reg, ImmVal))
return false;
return FoldImmediateImpl(UseMI, &DefMI, Reg, ImmVal, MRI, true);
}
/// Expand a single-def pseudo instruction to a two-addr
/// instruction with two undef reads of the register being defined.
/// This is used for mapping:

View File

@@ -550,6 +550,15 @@ public:
Register &FoldAsLoadDefReg,
MachineInstr *&DefMI) const override;
bool FoldImmediateImpl(MachineInstr &UseMI, MachineInstr *DefMI, Register Reg,
int64_t ImmVal, MachineRegisterInfo *MRI,
bool MakeChange) const;
/// Reg is known to be defined by a move immediate instruction, try to fold
/// the immediate into the use instruction.
bool FoldImmediate(MachineInstr &UseMI, MachineInstr &DefMI, Register Reg,
MachineRegisterInfo *MRI) const override;
std::pair<unsigned, unsigned>
decomposeMachineOperandsTargetFlags(unsigned TF) const override;

View File

@@ -8,7 +8,6 @@ body: |
; GCN-LABEL: name: fold_simm_virtual
; GCN: [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 0
; GCN-NEXT: [[S_MOV_B32_1:%[0-9]+]]:sreg_32 = S_MOV_B32 0
; GCN-NEXT: SI_RETURN_TO_EPILOG
%0:sreg_32 = S_MOV_B32 0
%1:sreg_32 = COPY killed %0

View File

@@ -4,8 +4,7 @@
define i8 @test_i8(i32 %a, i8 %f, i8 %t) {
; ALL-LABEL: test_i8:
; ALL: # %bb.0: # %entry
; ALL-NEXT: xorl %ecx, %ecx
; ALL-NEXT: cmpl %ecx, %edi
; ALL-NEXT: cmpl $0, %edi
; ALL-NEXT: setg %cl
; ALL-NEXT: testb $1, %cl
; ALL-NEXT: je .LBB0_2
@@ -35,8 +34,7 @@ cond.end: ; preds = %cond.false, %cond.t
define i16 @test_i16(i32 %a, i16 %f, i16 %t) {
; ALL-LABEL: test_i16:
; ALL: # %bb.0: # %entry
; ALL-NEXT: xorl %ecx, %ecx
; ALL-NEXT: cmpl %ecx, %edi
; ALL-NEXT: cmpl $0, %edi
; ALL-NEXT: setg %cl
; ALL-NEXT: testb $1, %cl
; ALL-NEXT: je .LBB1_2
@@ -67,8 +65,7 @@ define i32 @test_i32(i32 %a, i32 %f, i32 %t) {
; ALL-LABEL: test_i32:
; ALL: # %bb.0: # %entry
; ALL-NEXT: movl %esi, %eax
; ALL-NEXT: xorl %ecx, %ecx
; ALL-NEXT: cmpl %ecx, %edi
; ALL-NEXT: cmpl $0, %edi
; ALL-NEXT: setg %cl
; ALL-NEXT: testb $1, %cl
; ALL-NEXT: je .LBB2_1
@@ -96,8 +93,7 @@ define i64 @test_i64(i32 %a, i64 %f, i64 %t) {
; ALL-LABEL: test_i64:
; ALL: # %bb.0: # %entry
; ALL-NEXT: movq %rsi, %rax
; ALL-NEXT: xorl %ecx, %ecx
; ALL-NEXT: cmpl %ecx, %edi
; ALL-NEXT: cmpl $0, %edi
; ALL-NEXT: setg %cl
; ALL-NEXT: testb $1, %cl
; ALL-NEXT: je .LBB3_1
@@ -124,8 +120,7 @@ cond.end: ; preds = %cond.false, %cond.t
define float @test_float(i32 %a, float %f, float %t) {
; ALL-LABEL: test_float:
; ALL: # %bb.0: # %entry
; ALL-NEXT: xorl %eax, %eax
; ALL-NEXT: cmpl %eax, %edi
; ALL-NEXT: cmpl $0, %edi
; ALL-NEXT: setg %al
; ALL-NEXT: testb $1, %al
; ALL-NEXT: je .LBB4_1
@@ -152,8 +147,7 @@ cond.end: ; preds = %cond.false, %cond.t
define double @test_double(i32 %a, double %f, double %t) {
; ALL-LABEL: test_double:
; ALL: # %bb.0: # %entry
; ALL-NEXT: xorl %eax, %eax
; ALL-NEXT: cmpl %eax, %edi
; ALL-NEXT: cmpl $0, %edi
; ALL-NEXT: setg %al
; ALL-NEXT: testb $1, %al
; ALL-NEXT: je .LBB5_1

View File

@@ -178,15 +178,15 @@ define i128 @scalar_i128(i128 %x, i128 %y, ptr %divdst) nounwind {
; X86-NEXT: pushl %edi
; X86-NEXT: pushl %esi
; X86-NEXT: subl $152, %esp
; X86-NEXT: movl {{[0-9]+}}(%esp), %ebp
; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: movl %ecx, %eax
; X86-NEXT: sarl $31, %eax
; X86-NEXT: movl %ebp, %edx
; X86-NEXT: sarl $31, %edx
; X86-NEXT: movl %edx, %edi
; X86-NEXT: sarl $31, %edi
; X86-NEXT: movl %eax, %esi
; X86-NEXT: xorl %ecx, %esi
; X86-NEXT: movl %esi, %edi
; X86-NEXT: movl %esi, %ebp
; X86-NEXT: movl %eax, %ecx
; X86-NEXT: xorl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: movl %ecx, %ebx
@@ -195,66 +195,67 @@ define i128 @scalar_i128(i128 %x, i128 %y, ptr %divdst) nounwind {
; X86-NEXT: movl %eax, %esi
; X86-NEXT: xorl {{[0-9]+}}(%esp), %esi
; X86-NEXT: subl %eax, %esi
; X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-NEXT: movl %esi, (%esp) # 4-byte Spill
; X86-NEXT: sbbl %eax, %ecx
; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-NEXT: sbbl %eax, %ebx
; X86-NEXT: movl %ebx, (%esp) # 4-byte Spill
; X86-NEXT: sbbl %eax, %edi
; X86-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-NEXT: movl %edx, %edi
; X86-NEXT: xorl %ebp, %edi
; X86-NEXT: movl %edx, %ebx
; X86-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-NEXT: sbbl %eax, %ebp
; X86-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-NEXT: movl %edi, %esi
; X86-NEXT: xorl %edx, %esi
; X86-NEXT: movl %edi, %edx
; X86-NEXT: xorl {{[0-9]+}}(%esp), %edx
; X86-NEXT: movl %edi, %ebx
; X86-NEXT: xorl {{[0-9]+}}(%esp), %ebx
; X86-NEXT: movl %edx, %esi
; X86-NEXT: xorl {{[0-9]+}}(%esp), %esi
; X86-NEXT: movl %edx, %ebp
; X86-NEXT: movl %edi, %ebp
; X86-NEXT: xorl {{[0-9]+}}(%esp), %ebp
; X86-NEXT: subl %edx, %ebp
; X86-NEXT: sbbl %edx, %esi
; X86-NEXT: sbbl %edx, %ebx
; X86-NEXT: sbbl %edx, %edi
; X86-NEXT: xorl %eax, %edx
; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-NEXT: movl %esi, %eax
; X86-NEXT: orl %edi, %eax
; X86-NEXT: subl %edi, %ebp
; X86-NEXT: sbbl %edi, %ebx
; X86-NEXT: sbbl %edi, %edx
; X86-NEXT: sbbl %edi, %esi
; X86-NEXT: xorl %eax, %edi
; X86-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-NEXT: movl %ebx, %eax
; X86-NEXT: orl %esi, %eax
; X86-NEXT: movl %ebp, %ecx
; X86-NEXT: orl %ebx, %ecx
; X86-NEXT: orl %edx, %ecx
; X86-NEXT: movl %edx, %edi
; X86-NEXT: orl %eax, %ecx
; X86-NEXT: sete %cl
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
; X86-NEXT: orl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
; X86-NEXT: orl (%esp), %edx # 4-byte Folded Reload
; X86-NEXT: movl (%esp), %edx # 4-byte Reload
; X86-NEXT: orl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
; X86-NEXT: orl %eax, %edx
; X86-NEXT: sete %al
; X86-NEXT: orb %cl, %al
; X86-NEXT: movb %al, {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Spill
; X86-NEXT: bsrl %edi, %edx
; X86-NEXT: bsrl %esi, %edx
; X86-NEXT: xorl $31, %edx
; X86-NEXT: bsrl %ebx, %ecx
; X86-NEXT: bsrl %edi, %ecx
; X86-NEXT: xorl $31, %ecx
; X86-NEXT: addl $32, %ecx
; X86-NEXT: testl %edi, %edi
; X86-NEXT: testl %esi, %esi
; X86-NEXT: cmovnel %edx, %ecx
; X86-NEXT: bsrl %esi, %edx
; X86-NEXT: bsrl %ebx, %edx
; X86-NEXT: xorl $31, %edx
; X86-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-NEXT: bsrl %ebp, %ebp
; X86-NEXT: xorl $31, %ebp
; X86-NEXT: addl $32, %ebp
; X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-NEXT: testl %esi, %esi
; X86-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-NEXT: testl %ebx, %ebx
; X86-NEXT: cmovnel %edx, %ebp
; X86-NEXT: addl $64, %ebp
; X86-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-NEXT: orl %edi, %ebx
; X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-NEXT: orl %esi, %edi
; X86-NEXT: cmovnel %ecx, %ebp
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
; X86-NEXT: bsrl %edi, %edx
; X86-NEXT: xorl $31, %edx
; X86-NEXT: movl (%esp), %eax # 4-byte Reload
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
; X86-NEXT: bsrl %eax, %ecx
; X86-NEXT: xorl $31, %ecx
; X86-NEXT: addl $32, %ecx
@@ -263,7 +264,7 @@ define i128 @scalar_i128(i128 %x, i128 %y, ptr %divdst) nounwind {
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
; X86-NEXT: bsrl %ebx, %esi
; X86-NEXT: xorl $31, %esi
; X86-NEXT: bsrl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
; X86-NEXT: bsrl (%esp), %edx # 4-byte Folded Reload
; X86-NEXT: xorl $31, %edx
; X86-NEXT: addl $32, %edx
; X86-NEXT: testl %ebx, %ebx
@@ -272,149 +273,137 @@ define i128 @scalar_i128(i128 %x, i128 %y, ptr %divdst) nounwind {
; X86-NEXT: movl %eax, %esi
; X86-NEXT: orl %edi, %esi
; X86-NEXT: cmovnel %ecx, %edx
; X86-NEXT: xorl %ebx, %ebx
; X86-NEXT: xorl %esi, %esi
; X86-NEXT: subl %edx, %ebp
; X86-NEXT: movl $0, %eax
; X86-NEXT: sbbl %eax, %eax
; X86-NEXT: movl $0, %ebx
; X86-NEXT: sbbl %ebx, %ebx
; X86-NEXT: movl $0, %edx
; X86-NEXT: sbbl %edx, %edx
; X86-NEXT: movl $0, %esi
; X86-NEXT: sbbl %esi, %esi
; X86-NEXT: movl $0, %eax
; X86-NEXT: sbbl %eax, %eax
; X86-NEXT: movl $127, %ecx
; X86-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-NEXT: cmpl %ebp, %ecx
; X86-NEXT: movl %esi, %ebp
; X86-NEXT: movl $0, %ecx
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-NEXT: sbbl %eax, %ecx
; X86-NEXT: sbbl %ebx, %ecx
; X86-NEXT: movl $0, %ecx
; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-NEXT: sbbl %edx, %ecx
; X86-NEXT: movl $0, %ecx
; X86-NEXT: sbbl %esi, %ecx
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-NEXT: sbbl %eax, %ecx
; X86-NEXT: setb %cl
; X86-NEXT: orb {{[-0-9]+}}(%e{{[sb]}}p), %cl # 1-byte Folded Reload
; X86-NEXT: cmovnel %ebx, %edi
; X86-NEXT: movl (%esp), %edx # 4-byte Reload
; X86-NEXT: cmovnel %ebx, %edx
; X86-NEXT: cmovnel %esi, %edi
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
; X86-NEXT: cmovnel %esi, %edx
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
; X86-NEXT: cmovnel %ebx, %eax
; X86-NEXT: cmovel {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
; X86-NEXT: movl %ebx, %esi
; X86-NEXT: jne .LBB4_1
; X86-NEXT: # %bb.8: # %_udiv-special-cases
; X86-NEXT: movl %ebp, %ebx
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
; X86-NEXT: xorl $127, %ebp
; X86-NEXT: orl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
; X86-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-NEXT: orl %ebx, %ecx
; X86-NEXT: orl %ebp, %ecx
; X86-NEXT: cmovnel %esi, %eax
; X86-NEXT: cmovel (%esp), %esi # 4-byte Folded Reload
; X86-NEXT: movl {{[0-9]+}}(%esp), %ebp
; X86-NEXT: jne .LBB4_8
; X86-NEXT: # %bb.1: # %_udiv-special-cases
; X86-NEXT: movl %ebx, %ecx
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
; X86-NEXT: je .LBB4_9
; X86-NEXT: # %bb.5: # %udiv-bb1
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
; X86-NEXT: movl %eax, {{[0-9]+}}(%esp)
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
; X86-NEXT: movl %eax, {{[0-9]+}}(%esp)
; X86-NEXT: xorl $127, %ebx
; X86-NEXT: orl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-NEXT: orl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
; X86-NEXT: orl %ebx, %ecx
; X86-NEXT: je .LBB4_8
; X86-NEXT: # %bb.2: # %udiv-bb1
; X86-NEXT: movl (%esp), %eax # 4-byte Reload
; X86-NEXT: movl %eax, {{[0-9]+}}(%esp)
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
; X86-NEXT: movl %eax, {{[0-9]+}}(%esp)
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
; X86-NEXT: movl %eax, {{[0-9]+}}(%esp)
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
; X86-NEXT: movl %eax, {{[0-9]+}}(%esp)
; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
; X86-NEXT: movl %ecx, %eax
; X86-NEXT: movl %ecx, %ebp
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
; X86-NEXT: # kill: def $al killed $al killed $eax
; X86-NEXT: xorb $127, %al
; X86-NEXT: movb %al, %ch
; X86-NEXT: andb $7, %ch
; X86-NEXT: shrb $3, %al
; X86-NEXT: andb $15, %al
; X86-NEXT: negb %al
; X86-NEXT: movsbl %al, %edi
; X86-NEXT: movl 144(%esp,%edi), %edx
; X86-NEXT: movl 148(%esp,%edi), %esi
; X86-NEXT: movsbl %al, %ebx
; X86-NEXT: movl 144(%esp,%ebx), %edx
; X86-NEXT: movl 148(%esp,%ebx), %edi
; X86-NEXT: movb %ch, %cl
; X86-NEXT: shldl %cl, %edx, %esi
; X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-NEXT: shldl %cl, %edx, %edi
; X86-NEXT: shll %cl, %edx
; X86-NEXT: notb %cl
; X86-NEXT: movl 140(%esp,%edi), %eax
; X86-NEXT: movl 140(%esp,%ebx), %eax
; X86-NEXT: movl %eax, %esi
; X86-NEXT: shrl %esi
; X86-NEXT: shrl %cl, %esi
; X86-NEXT: orl %edx, %esi
; X86-NEXT: movl %esi, %edx
; X86-NEXT: movl 136(%esp,%edi), %esi
; X86-NEXT: movl 136(%esp,%ebx), %esi
; X86-NEXT: movb %ch, %cl
; X86-NEXT: shldl %cl, %esi, %eax
; X86-NEXT: shll %cl, %esi
; X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-NEXT: addl $1, %ebp
; X86-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-NEXT: addl $1, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
; X86-NEXT: adcl $0, %ecx
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
; X86-NEXT: adcl $0, %edi
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
; X86-NEXT: adcl $0, %esi
; X86-NEXT: jae .LBB4_2
; X86-NEXT: # %bb.6:
; X86-NEXT: xorl %ebp, %ebp
; X86-NEXT: xorl %ecx, %ecx
; X86-NEXT: jmp .LBB4_7
; X86-NEXT: .LBB4_1:
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
; X86-NEXT: jmp .LBB4_9
; X86-NEXT: .LBB4_2: # %udiv-preheader
; X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
; X86-NEXT: movl %esi, {{[0-9]+}}(%esp)
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
; X86-NEXT: movl %esi, {{[0-9]+}}(%esp)
; X86-NEXT: adcl $0, %ebx
; X86-NEXT: adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
; X86-NEXT: jae .LBB4_3
; X86-NEXT: # %bb.6:
; X86-NEXT: xorl %ebx, %ebx
; X86-NEXT: xorl %esi, %esi
; X86-NEXT: jmp .LBB4_7
; X86-NEXT: .LBB4_3: # %udiv-preheader
; X86-NEXT: movl (%esp), %esi # 4-byte Reload
; X86-NEXT: movl %esi, {{[0-9]+}}(%esp)
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
; X86-NEXT: movl %esi, {{[0-9]+}}(%esp)
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
; X86-NEXT: movl %esi, {{[0-9]+}}(%esp)
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
; X86-NEXT: movl %esi, {{[0-9]+}}(%esp)
; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
; X86-NEXT: movb %bl, %ch
; X86-NEXT: andb $7, %ch
; X86-NEXT: movb %bl, %cl
; X86-NEXT: shrb $3, %cl
; X86-NEXT: andb $15, %cl
; X86-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-NEXT: movzbl %cl, %ebx
; X86-NEXT: movl 100(%esp,%ebx), %esi
; X86-NEXT: movl %esi, (%esp) # 4-byte Spill
; X86-NEXT: movl 96(%esp,%ebx), %edi
; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-NEXT: movl %edi, %ebp
; X86-NEXT: movb %ch, %cl
; X86-NEXT: shrdl %cl, %esi, %ebp
; X86-NEXT: movzbl %cl, %ebp
; X86-NEXT: movl 100(%esp,%ebp), %esi
; X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
; X86-NEXT: movl 88(%esp,%ebx), %esi
; X86-NEXT: movl 92(%esp,%ebx), %ebx
; X86-NEXT: movl %ebx, %eax
; X86-NEXT: movl 96(%esp,%ebp), %ebx
; X86-NEXT: movl %ebp, %eax
; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-NEXT: movl %ebx, %edx
; X86-NEXT: movb %ch, %cl
; X86-NEXT: shrdl %cl, %esi, %edx
; X86-NEXT: movl 88(%esp,%ebp), %ebp
; X86-NEXT: movl 92(%esp,%eax), %esi
; X86-NEXT: movl %esi, %eax
; X86-NEXT: shrl %cl, %eax
; X86-NEXT: notb %cl
; X86-NEXT: addl %edi, %edi
; X86-NEXT: shll %cl, %edi
; X86-NEXT: orl %eax, %edi
; X86-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-NEXT: addl %ebx, %ebx
; X86-NEXT: shll %cl, %ebx
; X86-NEXT: orl %eax, %ebx
; X86-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-NEXT: movb %ch, %cl
; X86-NEXT: shrl %cl, (%esp) # 4-byte Folded Spill
; X86-NEXT: shrdl %cl, %ebx, %esi
; X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-NEXT: shrl %cl, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
; X86-NEXT: shrdl %cl, %esi, %ebp
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
; X86-NEXT: addl $-1, %eax
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
@@ -424,115 +413,117 @@ define i128 @scalar_i128(i128 %x, i128 %y, ptr %divdst) nounwind {
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
; X86-NEXT: adcl $-1, %eax
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
; X86-NEXT: movl %ecx, %eax
; X86-NEXT: adcl $-1, %eax
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-NEXT: xorl %esi, %esi
; X86-NEXT: movl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
; X86-NEXT: movl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
; X86-NEXT: .p2align 4, 0x90
; X86-NEXT: .LBB4_3: # %udiv-do-while
; X86-NEXT: # =>This Inner Loop Header: Depth=1
; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-NEXT: movl %ebp, %edx
; X86-NEXT: shldl $1, %ebp, (%esp) # 4-byte Folded Spill
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
; X86-NEXT: shldl $1, %ebp, %edx
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
; X86-NEXT: shldl $1, %ebx, %ebp
; X86-NEXT: shldl $1, %esi, %ebx
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
; X86-NEXT: shldl $1, %ecx, %esi
; X86-NEXT: .p2align 4, 0x90
; X86-NEXT: .LBB4_4: # %udiv-do-while
; X86-NEXT: # =>This Inner Loop Header: Depth=1
; X86-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
; X86-NEXT: shldl $1, %edx, %ebx
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
; X86-NEXT: shldl $1, %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
; X86-NEXT: shldl $1, %ebp, %edx
; X86-NEXT: shldl $1, %edi, %ebp
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
; X86-NEXT: orl %eax, %esi
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
; X86-NEXT: shldl $1, %edi, %ecx
; X86-NEXT: orl %eax, %ecx
; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-NEXT: shldl $1, %eax, %edi
; X86-NEXT: orl %esi, %edi
; X86-NEXT: movl %edi, (%esp) # 4-byte Spill
; X86-NEXT: movl %ecx, %edi
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
; X86-NEXT: shldl $1, %ecx, %edi
; X86-NEXT: orl %eax, %edi
; X86-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-NEXT: addl %ecx, %ecx
; X86-NEXT: orl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
; X86-NEXT: shldl $1, %ecx, %eax
; X86-NEXT: orl %esi, %eax
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
; X86-NEXT: shldl $1, %eax, %ecx
; X86-NEXT: orl %esi, %ecx
; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-NEXT: cmpl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Reload
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
; X86-NEXT: sbbl %ebp, %ecx
; X86-NEXT: addl %eax, %eax
; X86-NEXT: orl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-NEXT: cmpl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Reload
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
; X86-NEXT: sbbl %edx, %ecx
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
; X86-NEXT: sbbl (%esp), %ecx # 4-byte Folded Reload
; X86-NEXT: sbbl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
; X86-NEXT: sbbl %ebx, %ecx
; X86-NEXT: sarl $31, %ecx
; X86-NEXT: movl %ecx, %eax
; X86-NEXT: andl $1, %eax
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-NEXT: movl %ecx, %eax
; X86-NEXT: andl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-NEXT: movl %ecx, %esi
; X86-NEXT: andl %edi, %esi
; X86-NEXT: movl %ecx, %edi
; X86-NEXT: andl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
; X86-NEXT: movl %ecx, %eax
; X86-NEXT: andl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
; X86-NEXT: andl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
; X86-NEXT: subl %ecx, %ebx
; X86-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-NEXT: sbbl %eax, %ebp
; X86-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-NEXT: sbbl %edi, %edx
; X86-NEXT: movl %edx, %ebp
; X86-NEXT: subl %ecx, %ebp
; X86-NEXT: sbbl %eax, %edx
; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
; X86-NEXT: sbbl %eax, (%esp) # 4-byte Folded Spill
; X86-NEXT: sbbl %edi, %edx
; X86-NEXT: movl (%esp), %edi # 4-byte Reload
; X86-NEXT: sbbl %esi, %ebx
; X86-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
; X86-NEXT: addl $-1, %ecx
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
; X86-NEXT: adcl $-1, %eax
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
; X86-NEXT: adcl $-1, %edi
; X86-NEXT: adcl $-1, %edx
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
; X86-NEXT: adcl $-1, %esi
; X86-NEXT: adcl $-1, %ebx
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-NEXT: orl %edx, %eax
; X86-NEXT: orl %ebx, %eax
; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-NEXT: orl %edi, %ecx
; X86-NEXT: orl %eax, %ecx
; X86-NEXT: jne .LBB4_3
; X86-NEXT: # %bb.4:
; X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
; X86-NEXT: orl %esi, %ecx
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
; X86-NEXT: orl %eax, %ecx
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
; X86-NEXT: jne .LBB4_4
; X86-NEXT: # %bb.5:
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
; X86-NEXT: movl {{[0-9]+}}(%esp), %ebp
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
; X86-NEXT: .LBB4_7: # %udiv-loop-exit
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
; X86-NEXT: shldl $1, %edx, %edi
; X86-NEXT: orl %ecx, %edi
; X86-NEXT: orl %esi, %edi
; X86-NEXT: shldl $1, %eax, %edx
; X86-NEXT: orl %ecx, %edx
; X86-NEXT: orl %esi, %edx
; X86-NEXT: movl %esi, %ecx
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
; X86-NEXT: shldl $1, %esi, %eax
; X86-NEXT: orl %ecx, %eax
; X86-NEXT: addl %esi, %esi
; X86-NEXT: orl %ebp, %esi
; X86-NEXT: .LBB4_9: # %udiv-end
; X86-NEXT: xorl %ebx, %edi
; X86-NEXT: xorl %ebx, %edx
; X86-NEXT: xorl %ebx, %eax
; X86-NEXT: xorl %ebx, %esi
; X86-NEXT: subl %ebx, %esi
; X86-NEXT: orl %ebx, %esi
; X86-NEXT: .LBB4_8: # %udiv-end
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
; X86-NEXT: xorl %ecx, %edi
; X86-NEXT: xorl %ecx, %edx
; X86-NEXT: xorl %ecx, %eax
; X86-NEXT: xorl %ecx, %esi
; X86-NEXT: subl %ecx, %esi
; X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-NEXT: sbbl %ebx, %eax
; X86-NEXT: sbbl %ecx, %eax
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-NEXT: sbbl %ebx, %edx
; X86-NEXT: sbbl %ebx, %edi
; X86-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: movl %esi, (%ecx)
; X86-NEXT: movl %eax, 4(%ecx)
; X86-NEXT: movl %edx, 8(%ecx)
; X86-NEXT: movl %edi, 12(%ecx)
; X86-NEXT: sbbl %ecx, %edx
; X86-NEXT: sbbl %ecx, %edi
; X86-NEXT: movl %edi, (%esp) # 4-byte Spill
; X86-NEXT: movl %esi, (%ebp)
; X86-NEXT: movl %eax, 4(%ebp)
; X86-NEXT: movl %edx, 8(%ebp)
; X86-NEXT: movl %edi, 12(%ebp)
; X86-NEXT: movl {{[0-9]+}}(%esp), %edi
; X86-NEXT: movl {{[0-9]+}}(%esp), %ebp
; X86-NEXT: movl %edx, %ebx
@@ -541,7 +532,7 @@ define i128 @scalar_i128(i128 %x, i128 %y, ptr %divdst) nounwind {
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-NEXT: movl %esi, %eax
; X86-NEXT: mull %edi
; X86-NEXT: movl %eax, (%esp) # 4-byte Spill
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-NEXT: movl %edx, %edi
; X86-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
; X86-NEXT: adcl $0, %ecx
@@ -562,10 +553,10 @@ define i128 @scalar_i128(i128 %x, i128 %y, ptr %divdst) nounwind {
; X86-NEXT: adcl %eax, %edx
; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
; X86-NEXT: movl (%esp), %ecx # 4-byte Reload
; X86-NEXT: imull %eax, %ecx
; X86-NEXT: mull %ebx
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-NEXT: movl %eax, (%esp) # 4-byte Spill
; X86-NEXT: imull {{[0-9]+}}(%esp), %ebx
; X86-NEXT: addl %edx, %ebx
; X86-NEXT: addl %ecx, %ebx
@@ -577,12 +568,12 @@ define i128 @scalar_i128(i128 %x, i128 %y, ptr %divdst) nounwind {
; X86-NEXT: mull %edx
; X86-NEXT: addl %edx, %ebp
; X86-NEXT: addl %ecx, %ebp
; X86-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
; X86-NEXT: addl (%esp), %eax # 4-byte Folded Reload
; X86-NEXT: adcl %ebx, %ebp
; X86-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
; X86-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
; X86-NEXT: subl (%esp), %edx # 4-byte Folded Reload
; X86-NEXT: subl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: sbbl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
; X86-NEXT: movl {{[0-9]+}}(%esp), %esi

View File

@@ -304,7 +304,7 @@ define i128 @scalar_i128(i128 %x, i128 %y, ptr %divdst) nounwind {
; X86-NEXT: movl 128(%esp,%eax), %esi
; X86-NEXT: movb %ch, %cl
; X86-NEXT: shldl %cl, %edx, %esi
; X86-NEXT: movl %esi, (%esp) # 4-byte Spill
; X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-NEXT: shll %cl, %edx
; X86-NEXT: notb %cl
; X86-NEXT: movl 120(%esp,%eax), %ebp
@@ -319,10 +319,10 @@ define i128 @scalar_i128(i128 %x, i128 %y, ptr %divdst) nounwind {
; X86-NEXT: shll %cl, %ebp
; X86-NEXT: addl $1, %edi
; X86-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
; X86-NEXT: adcl $0, %ecx
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
; X86-NEXT: adcl $0, %eax
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
; X86-NEXT: adcl $0, %edi
; X86-NEXT: adcl $0, %ebx
; X86-NEXT: jae .LBB4_3
; X86-NEXT: # %bb.6:
@@ -331,14 +331,14 @@ define i128 @scalar_i128(i128 %x, i128 %y, ptr %divdst) nounwind {
; X86-NEXT: movl %edx, %ebx
; X86-NEXT: jmp .LBB4_7
; X86-NEXT: .LBB4_3: # %udiv-preheader
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: movl %ecx, {{[0-9]+}}(%esp)
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: movl %ecx, {{[0-9]+}}(%esp)
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: movl %ecx, {{[0-9]+}}(%esp)
; X86-NEXT: movl {{[0-9]+}}(%esp), %edi
; X86-NEXT: movl %edi, {{[0-9]+}}(%esp)
; X86-NEXT: movl {{[0-9]+}}(%esp), %edi
; X86-NEXT: movl %edi, {{[0-9]+}}(%esp)
; X86-NEXT: movl {{[0-9]+}}(%esp), %edi
; X86-NEXT: movl %edi, {{[0-9]+}}(%esp)
; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: movl %ecx, {{[0-9]+}}(%esp)
; X86-NEXT: movl $0, {{[0-9]+}}(%esp)
@@ -348,22 +348,23 @@ define i128 @scalar_i128(i128 %x, i128 %y, ptr %divdst) nounwind {
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
; X86-NEXT: movb %al, %ch
; X86-NEXT: andb $7, %ch
; X86-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-NEXT: # kill: def $al killed $al killed $eax
; X86-NEXT: shrb $3, %al
; X86-NEXT: andb $15, %al
; X86-NEXT: movzbl %al, %eax
; X86-NEXT: movl 80(%esp,%eax), %ebp
; X86-NEXT: movl 80(%esp,%eax), %edi
; X86-NEXT: movl %edi, (%esp) # 4-byte Spill
; X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-NEXT: movl 76(%esp,%eax), %edi
; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-NEXT: movl %edi, %ebx
; X86-NEXT: movb %ch, %cl
; X86-NEXT: shrdl %cl, %ebp, %ebx
; X86-NEXT: movl (%esp), %edx # 4-byte Reload
; X86-NEXT: shrdl %cl, %edx, %ebx
; X86-NEXT: movl 68(%esp,%eax), %esi
; X86-NEXT: movl 72(%esp,%eax), %edx
; X86-NEXT: movl %edx, %eax
; X86-NEXT: movl 72(%esp,%eax), %eax
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-NEXT: shrl %cl, %eax
; X86-NEXT: notb %cl
; X86-NEXT: addl %edi, %edi
@@ -371,8 +372,10 @@ define i128 @scalar_i128(i128 %x, i128 %y, ptr %divdst) nounwind {
; X86-NEXT: orl %eax, %edi
; X86-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-NEXT: movb %ch, %cl
; X86-NEXT: shrl %cl, %ebp
; X86-NEXT: shrdl %cl, %edx, %esi
; X86-NEXT: movl (%esp), %edx # 4-byte Reload
; X86-NEXT: shrl %cl, %edx
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
; X86-NEXT: shrdl %cl, %eax, %esi
; X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: addl $-1, %eax
@@ -383,19 +386,20 @@ define i128 @scalar_i128(i128 %x, i128 %y, ptr %divdst) nounwind {
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: adcl $-1, %ecx
; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-NEXT: movl {{[0-9]+}}(%esp), %esi
; X86-NEXT: adcl $-1, %esi
; X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: adcl $-1, %ecx
; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-NEXT: movl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
; X86-NEXT: movl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
; X86-NEXT: movl (%esp), %esi # 4-byte Reload
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
; X86-NEXT: .p2align 4, 0x90
; X86-NEXT: .LBB4_4: # %udiv-do-while
; X86-NEXT: # =>This Inner Loop Header: Depth=1
; X86-NEXT: movl %ebx, (%esp) # 4-byte Spill
; X86-NEXT: shldl $1, %ebx, %ebp
; X86-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-NEXT: shldl $1, %ebx, %edx
; X86-NEXT: movl %edx, (%esp) # 4-byte Spill
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
; X86-NEXT: shldl $1, %ebx, (%esp) # 4-byte Folded Spill
; X86-NEXT: shldl $1, %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
; X86-NEXT: shldl $1, %edx, %ebx
; X86-NEXT: shldl $1, %esi, %edx
@@ -407,27 +411,25 @@ define i128 @scalar_i128(i128 %x, i128 %y, ptr %divdst) nounwind {
; X86-NEXT: shldl $1, %ecx, %eax
; X86-NEXT: orl %edi, %eax
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
; X86-NEXT: shldl $1, %eax, %ecx
; X86-NEXT: shldl $1, %ebp, %ecx
; X86-NEXT: orl %edi, %ecx
; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-NEXT: addl %eax, %eax
; X86-NEXT: orl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-NEXT: addl %ebp, %ebp
; X86-NEXT: orl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
; X86-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-NEXT: cmpl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Reload
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
; X86-NEXT: sbbl %ebx, %ecx
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
; X86-NEXT: sbbl (%esp), %ecx # 4-byte Folded Reload
; X86-NEXT: sbbl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
; X86-NEXT: sbbl %ebp, %ecx
; X86-NEXT: sbbl (%esp), %ecx # 4-byte Folded Reload
; X86-NEXT: sarl $31, %ecx
; X86-NEXT: movl %ecx, %eax
; X86-NEXT: andl $1, %eax
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-NEXT: movl %ecx, %eax
; X86-NEXT: andl {{[0-9]+}}(%esp), %eax
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-NEXT: movl %ecx, %ebp
; X86-NEXT: andl {{[0-9]+}}(%esp), %ebp
; X86-NEXT: movl %ecx, %edi
; X86-NEXT: andl {{[0-9]+}}(%esp), %edi
; X86-NEXT: movl %ecx, %eax
@@ -437,36 +439,35 @@ define i128 @scalar_i128(i128 %x, i128 %y, ptr %divdst) nounwind {
; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-NEXT: sbbl %eax, %ebx
; X86-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-NEXT: movl (%esp), %ebx # 4-byte Reload
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
; X86-NEXT: sbbl %edi, %ebx
; X86-NEXT: sbbl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
; X86-NEXT: movl %ebp, (%esp) # 4-byte Spill
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
; X86-NEXT: sbbl %ebp, (%esp) # 4-byte Folded Spill
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
; X86-NEXT: addl $-1, %ecx
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
; X86-NEXT: adcl $-1, %eax
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
; X86-NEXT: adcl $-1, %edx
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
; X86-NEXT: adcl $-1, %edi
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
; X86-NEXT: adcl $-1, %edx
; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-NEXT: orl %edi, %eax
; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-NEXT: orl %edx, %ecx
; X86-NEXT: movl (%esp), %ebp # 4-byte Reload
; X86-NEXT: orl %edx, %eax
; X86-NEXT: movl (%esp), %edx # 4-byte Reload
; X86-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-NEXT: orl %edi, %ecx
; X86-NEXT: orl %eax, %ecx
; X86-NEXT: jne .LBB4_4
; X86-NEXT: # %bb.5:
; X86-NEXT: movl %esi, (%esp) # 4-byte Spill
; X86-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
; X86-NEXT: .LBB4_7: # %udiv-loop-exit
; X86-NEXT: movl (%esp), %edx # 4-byte Reload
; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
; X86-NEXT: shldl $1, %esi, %edx
; X86-NEXT: orl %eax, %edx
; X86-NEXT: shldl $1, %ebx, %esi

View File

@@ -11,8 +11,8 @@ define i32 @freeze(i32 %t) {
;
; FAST-LABEL: freeze:
; FAST: # %bb.0:
; FAST-NEXT: movl $10, %eax
; FAST-NEXT: xorl %edi, %eax
; FAST-NEXT: movl %edi, %eax
; FAST-NEXT: xorl $10, %eax
; FAST-NEXT: retq
%1 = freeze i32 %t
%2 = freeze i32 10

View File

@@ -0,0 +1,57 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 3
; RUN: llc < %s -mtriple=x86_64-unknown | FileCheck %s
; When optimize for size, the constant $858993459 is moved into a register,
; and use that register in following two andl instructions.
define i32 @cnt32_optsize(i32 %x) nounwind readnone optsize {
; CHECK-LABEL: cnt32_optsize:
; CHECK: # %bb.0:
; CHECK-NEXT: movl %edi, %eax
; CHECK-NEXT: shrl %eax
; CHECK-NEXT: andl $1431655765, %eax # imm = 0x55555555
; CHECK-NEXT: subl %eax, %edi
; CHECK-NEXT: movl $858993459, %eax # imm = 0x33333333
; CHECK-NEXT: movl %edi, %ecx
; CHECK-NEXT: andl %eax, %ecx
; CHECK-NEXT: shrl $2, %edi
; CHECK-NEXT: andl %eax, %edi
; CHECK-NEXT: addl %ecx, %edi
; CHECK-NEXT: movl %edi, %eax
; CHECK-NEXT: shrl $4, %eax
; CHECK-NEXT: addl %edi, %eax
; CHECK-NEXT: andl $252645135, %eax # imm = 0xF0F0F0F
; CHECK-NEXT: imull $16843009, %eax, %eax # imm = 0x1010101
; CHECK-NEXT: shrl $24, %eax
; CHECK-NEXT: retq
%cnt = tail call i32 @llvm.ctpop.i32(i32 %x)
ret i32 %cnt
}
; When optimize for speed, the constant $858993459 can be directly folded into
; two andl instructions.
define i32 @cnt32_optspeed(i32 %x) nounwind readnone {
; CHECK-LABEL: cnt32_optspeed:
; CHECK: # %bb.0:
; CHECK-NEXT: movl %edi, %eax
; CHECK-NEXT: shrl %eax
; CHECK-NEXT: andl $1431655765, %eax # imm = 0x55555555
; CHECK-NEXT: subl %eax, %edi
; CHECK-NEXT: movl %edi, %eax
; CHECK-NEXT: andl $858993459, %eax # imm = 0x33333333
; CHECK-NEXT: shrl $2, %edi
; CHECK-NEXT: andl $858993459, %edi # imm = 0x33333333
; CHECK-NEXT: addl %eax, %edi
; CHECK-NEXT: movl %edi, %eax
; CHECK-NEXT: shrl $4, %eax
; CHECK-NEXT: addl %edi, %eax
; CHECK-NEXT: andl $252645135, %eax # imm = 0xF0F0F0F
; CHECK-NEXT: imull $16843009, %eax, %eax # imm = 0x1010101
; CHECK-NEXT: shrl $24, %eax
; CHECK-NEXT: retq
%cnt = tail call i32 @llvm.ctpop.i32(i32 %x)
ret i32 %cnt
}
declare i32 @llvm.ctpop.i32(i32) nounwind readnone

View File

@@ -0,0 +1,143 @@
# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py UTC_ARGS: --version 3
# RUN: llc -mtriple=x86_64-- -run-pass=peephole-opt %s -o - | FileCheck %s
--- |
define void @foldImmediate() { ret void }
...
---
# Check that immediates can be folded into ALU instructions.
name: foldImmediate
registers:
- { id: 0, class: gr32 }
- { id: 1, class: gr32 }
- { id: 2, class: gr32 }
- { id: 3, class: gr32 }
- { id: 4, class: gr32 }
- { id: 5, class: gr32 }
- { id: 6, class: gr32 }
- { id: 7, class: gr64 }
- { id: 8, class: gr64 }
- { id: 9, class: gr64 }
- { id: 10, class: gr64 }
- { id: 11, class: gr64 }
- { id: 12, class: gr64 }
- { id: 13, class: gr64 }
- { id: 14, class: gr64 }
- { id: 15, class: gr64 }
- { id: 16, class: gr32 }
- { id: 17, class: gr64 }
- { id: 18, class: gr32 }
body: |
bb.0:
liveins: $rdi, $rsi
; CHECK-LABEL: name: foldImmediate
; CHECK: liveins: $rdi, $rsi
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: [[MOV32ri:%[0-9]+]]:gr32 = MOV32ri 81
; CHECK-NEXT: [[COPY:%[0-9]+]]:gr32 = COPY $edi
; CHECK-NEXT: [[ADD32ri:%[0-9]+]]:gr32 = ADD32ri [[COPY]], 81, implicit-def $eflags
; CHECK-NEXT: NOOP implicit [[ADD32ri]]
; CHECK-NEXT: [[SUB32ri:%[0-9]+]]:gr32 = SUB32ri [[COPY]], 81, implicit-def $eflags
; CHECK-NEXT: NOOP implicit [[SUB32ri]]
; CHECK-NEXT: [[AND32ri:%[0-9]+]]:gr32 = AND32ri [[COPY]], 81, implicit-def $eflags
; CHECK-NEXT: NOOP implicit [[AND32ri]]
; CHECK-NEXT: [[OR32ri:%[0-9]+]]:gr32 = OR32ri [[COPY]], 81, implicit-def $eflags
; CHECK-NEXT: NOOP implicit [[OR32ri]]
; CHECK-NEXT: [[XOR32ri:%[0-9]+]]:gr32 = XOR32ri [[COPY]], 81, implicit-def $eflags
; CHECK-NEXT: NOOP implicit [[XOR32ri]]
; CHECK-NEXT: TEST32ri [[COPY]], 81, implicit-def $eflags
; CHECK-NEXT: NOOP implicit $eflags
; CHECK-NEXT: CMP32ri [[COPY]], 81, implicit-def $eflags
; CHECK-NEXT: NOOP implicit $eflags
; CHECK-NEXT: [[ADC32ri:%[0-9]+]]:gr32 = ADC32ri [[COPY]], 81, implicit-def $eflags, implicit $eflags
; CHECK-NEXT: NOOP implicit [[ADC32ri]]
; CHECK-NEXT: [[SBB32ri:%[0-9]+]]:gr32 = SBB32ri [[COPY]], 81, implicit-def $eflags, implicit $eflags
; CHECK-NEXT: NOOP implicit [[SBB32ri]]
; CHECK-NEXT: [[SUBREG_TO_REG:%[0-9]+]]:gr64 = SUBREG_TO_REG 0, killed [[MOV32ri]], %subreg.sub_32bit
; CHECK-NEXT: [[COPY1:%[0-9]+]]:gr64 = COPY $rsi
; CHECK-NEXT: [[ADD64ri32_:%[0-9]+]]:gr64 = ADD64ri32 [[COPY1]], 81, implicit-def $eflags
; CHECK-NEXT: NOOP implicit [[ADD64ri32_]]
; CHECK-NEXT: [[SUB64ri32_:%[0-9]+]]:gr64 = SUB64ri32 [[COPY1]], 81, implicit-def $eflags
; CHECK-NEXT: NOOP implicit [[SUB64ri32_]]
; CHECK-NEXT: [[AND64ri32_:%[0-9]+]]:gr64 = AND64ri32 [[COPY1]], 81, implicit-def $eflags
; CHECK-NEXT: NOOP implicit [[AND64ri32_]]
; CHECK-NEXT: [[OR64ri32_:%[0-9]+]]:gr64 = OR64ri32 [[COPY1]], 81, implicit-def $eflags
; CHECK-NEXT: NOOP implicit [[OR64ri32_]]
; CHECK-NEXT: [[XOR64ri32_:%[0-9]+]]:gr64 = XOR64ri32 [[COPY1]], 81, implicit-def $eflags
; CHECK-NEXT: NOOP implicit [[XOR64ri32_]]
; CHECK-NEXT: [[MOV32ri64_:%[0-9]+]]:gr64 = MOV32ri64 81
; CHECK-NEXT: NOOP implicit [[MOV32ri64_]]
; CHECK-NEXT: TEST64ri32 [[COPY1]], 81, implicit-def $eflags
; CHECK-NEXT: NOOP implicit $eflags
; CHECK-NEXT: [[ADC64ri32_:%[0-9]+]]:gr64 = ADC64ri32 [[COPY1]], 81, implicit-def $eflags, implicit $eflags
; CHECK-NEXT: NOOP implicit [[ADC64ri32_]]
; CHECK-NEXT: [[SBB64ri32_:%[0-9]+]]:gr64 = SBB64ri32 [[COPY1]], 81, implicit-def $eflags, implicit $eflags
; CHECK-NEXT: NOOP implicit [[SBB64ri32_]]
; CHECK-NEXT: CMP64ri32 [[COPY1]], 81, implicit-def $eflags
; CHECK-NEXT: NOOP implicit $eflags
; CHECK-NEXT: CMP64rr [[SUBREG_TO_REG]], [[COPY1]], implicit-def $eflags
; CHECK-NEXT: NOOP implicit $eflags
%0 = MOV32ri 81
%1 = COPY $edi
%2 = ADD32rr %0, %1, implicit-def $eflags
NOOP implicit %2
%3 = SUB32rr %1, %0, implicit-def $eflags
NOOP implicit %3
%4 = AND32rr %0, %1, implicit-def $eflags
NOOP implicit %4
%5 = OR32rr %0, %1, implicit-def $eflags
NOOP implicit %5
%6 = XOR32rr %0, %1, implicit-def $eflags
NOOP implicit %6
TEST32rr %0, %1, implicit-def $eflags
NOOP implicit $eflags
CMP32rr %1, %0, implicit-def $eflags
NOOP implicit $eflags
%16 = ADC32rr %0, %1, implicit-def $eflags, implicit $eflags
NOOP implicit %16
%18 = SBB32rr %1, %0, implicit-def $eflags, implicit $eflags
NOOP implicit %18
%7 = SUBREG_TO_REG 0, killed %0:gr32, %subreg.sub_32bit
%8 = COPY $rsi
%9 = ADD64rr %7, %8, implicit-def $eflags
NOOP implicit %9
%10 = SUB64rr %8, %7, implicit-def $eflags
NOOP implicit %10
%11 = AND64rr %8, %7, implicit-def $eflags
NOOP implicit %11
%12 = OR64rr %8, %7, implicit-def $eflags
NOOP implicit %12
%13 = XOR64rr %8, %7, implicit-def $eflags
NOOP implicit %13
%14 = COPY %7
NOOP implicit %14
TEST64rr %8, %7, implicit-def $eflags
NOOP implicit $eflags
%15 = ADC64rr %8, %7, implicit-def $eflags, implicit $eflags
NOOP implicit %15
%17 = SBB64rr %8, %7, implicit-def $eflags, implicit $eflags
NOOP implicit %17
CMP64rr %8, %7, implicit-def $eflags
NOOP implicit $eflags
CMP64rr %7, %8, implicit-def $eflags
NOOP implicit $eflags
...

File diff suppressed because it is too large Load Diff

View File

@@ -145,8 +145,8 @@ define dso_local i64 @test_ebp(i64 %in) local_unnamed_addr nounwind {
; CHECK-LABEL: test_ebp:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: pushl %ebp
; CHECK-NEXT: movl $19088743, %esp # imm = 0x1234567
; CHECK-NEXT: movl $-1985229329, %ebp # imm = 0x89ABCDEF
; CHECK-NEXT: movl $19088743, %esp # imm = 0x1234567
; CHECK-NEXT: #APP
; CHECK-NEXT: movl %ebp, %eax
; CHECK-NEXT: #NO_APP

View File

@@ -1044,12 +1044,11 @@ define i32 @cnt32_pgso(i32 %x) nounwind readnone !prof !14 {
; X86-NEXT: shrl %ecx
; X86-NEXT: andl $1431655765, %ecx # imm = 0x55555555
; X86-NEXT: subl %ecx, %eax
; X86-NEXT: movl $858993459, %ecx # imm = 0x33333333
; X86-NEXT: movl %eax, %edx
; X86-NEXT: andl %ecx, %edx
; X86-NEXT: movl %eax, %ecx
; X86-NEXT: andl $858993459, %ecx # imm = 0x33333333
; X86-NEXT: shrl $2, %eax
; X86-NEXT: andl %ecx, %eax
; X86-NEXT: addl %edx, %eax
; X86-NEXT: andl $858993459, %eax # imm = 0x33333333
; X86-NEXT: addl %ecx, %eax
; X86-NEXT: movl %eax, %ecx
; X86-NEXT: shrl $4, %ecx
; X86-NEXT: addl %eax, %ecx
@@ -1064,12 +1063,11 @@ define i32 @cnt32_pgso(i32 %x) nounwind readnone !prof !14 {
; X64-NEXT: shrl %eax
; X64-NEXT: andl $1431655765, %eax # imm = 0x55555555
; X64-NEXT: subl %eax, %edi
; X64-NEXT: movl $858993459, %eax # imm = 0x33333333
; X64-NEXT: movl %edi, %ecx
; X64-NEXT: andl %eax, %ecx
; X64-NEXT: movl %edi, %eax
; X64-NEXT: andl $858993459, %eax # imm = 0x33333333
; X64-NEXT: shrl $2, %edi
; X64-NEXT: andl %eax, %edi
; X64-NEXT: addl %ecx, %edi
; X64-NEXT: andl $858993459, %edi # imm = 0x33333333
; X64-NEXT: addl %eax, %edi
; X64-NEXT: movl %edi, %eax
; X64-NEXT: shrl $4, %eax
; X64-NEXT: addl %edi, %eax
@@ -1094,49 +1092,40 @@ define i32 @cnt32_pgso(i32 %x) nounwind readnone !prof !14 {
define i64 @cnt64_pgso(i64 %x) nounwind readnone !prof !14 {
; X86-NOSSE-LABEL: cnt64_pgso:
; X86-NOSSE: # %bb.0:
; X86-NOSSE-NEXT: pushl %ebx
; X86-NOSSE-NEXT: pushl %edi
; X86-NOSSE-NEXT: pushl %esi
; X86-NOSSE-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NOSSE-NEXT: movl {{[0-9]+}}(%esp), %esi
; X86-NOSSE-NEXT: movl %esi, %ecx
; X86-NOSSE-NEXT: shrl %ecx
; X86-NOSSE-NEXT: movl $1431655765, %edx # imm = 0x55555555
; X86-NOSSE-NEXT: andl %edx, %ecx
; X86-NOSSE-NEXT: subl %ecx, %esi
; X86-NOSSE-NEXT: movl $858993459, %ecx # imm = 0x33333333
; X86-NOSSE-NEXT: movl %esi, %edi
; X86-NOSSE-NEXT: andl %ecx, %edi
; X86-NOSSE-NEXT: shrl $2, %esi
; X86-NOSSE-NEXT: andl %ecx, %esi
; X86-NOSSE-NEXT: addl %edi, %esi
; X86-NOSSE-NEXT: movl %esi, %ebx
; X86-NOSSE-NEXT: shrl $4, %ebx
; X86-NOSSE-NEXT: addl %esi, %ebx
; X86-NOSSE-NEXT: movl $252645135, %edi # imm = 0xF0F0F0F
; X86-NOSSE-NEXT: andl %edi, %ebx
; X86-NOSSE-NEXT: imull $16843009, %ebx, %esi # imm = 0x1010101
; X86-NOSSE-NEXT: shrl $24, %esi
; X86-NOSSE-NEXT: movl %eax, %ebx
; X86-NOSSE-NEXT: shrl %ebx
; X86-NOSSE-NEXT: andl %edx, %ebx
; X86-NOSSE-NEXT: subl %ebx, %eax
; X86-NOSSE-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NOSSE-NEXT: movl %ecx, %edx
; X86-NOSSE-NEXT: shrl %edx
; X86-NOSSE-NEXT: andl $1431655765, %edx # imm = 0x55555555
; X86-NOSSE-NEXT: subl %edx, %ecx
; X86-NOSSE-NEXT: movl %ecx, %edx
; X86-NOSSE-NEXT: andl $858993459, %edx # imm = 0x33333333
; X86-NOSSE-NEXT: shrl $2, %ecx
; X86-NOSSE-NEXT: andl $858993459, %ecx # imm = 0x33333333
; X86-NOSSE-NEXT: addl %edx, %ecx
; X86-NOSSE-NEXT: movl %ecx, %edx
; X86-NOSSE-NEXT: shrl $4, %edx
; X86-NOSSE-NEXT: addl %ecx, %edx
; X86-NOSSE-NEXT: andl $252645135, %edx # imm = 0xF0F0F0F
; X86-NOSSE-NEXT: imull $16843009, %edx, %ecx # imm = 0x1010101
; X86-NOSSE-NEXT: shrl $24, %ecx
; X86-NOSSE-NEXT: movl %eax, %edx
; X86-NOSSE-NEXT: andl %ecx, %edx
; X86-NOSSE-NEXT: shrl %edx
; X86-NOSSE-NEXT: andl $1431655765, %edx # imm = 0x55555555
; X86-NOSSE-NEXT: subl %edx, %eax
; X86-NOSSE-NEXT: movl %eax, %edx
; X86-NOSSE-NEXT: andl $858993459, %edx # imm = 0x33333333
; X86-NOSSE-NEXT: shrl $2, %eax
; X86-NOSSE-NEXT: andl %ecx, %eax
; X86-NOSSE-NEXT: andl $858993459, %eax # imm = 0x33333333
; X86-NOSSE-NEXT: addl %edx, %eax
; X86-NOSSE-NEXT: movl %eax, %ecx
; X86-NOSSE-NEXT: shrl $4, %ecx
; X86-NOSSE-NEXT: addl %eax, %ecx
; X86-NOSSE-NEXT: andl %edi, %ecx
; X86-NOSSE-NEXT: imull $16843009, %ecx, %eax # imm = 0x1010101
; X86-NOSSE-NEXT: movl %eax, %edx
; X86-NOSSE-NEXT: shrl $4, %edx
; X86-NOSSE-NEXT: addl %eax, %edx
; X86-NOSSE-NEXT: andl $252645135, %edx # imm = 0xF0F0F0F
; X86-NOSSE-NEXT: imull $16843009, %edx, %eax # imm = 0x1010101
; X86-NOSSE-NEXT: shrl $24, %eax
; X86-NOSSE-NEXT: addl %esi, %eax
; X86-NOSSE-NEXT: addl %ecx, %eax
; X86-NOSSE-NEXT: xorl %edx, %edx
; X86-NOSSE-NEXT: popl %esi
; X86-NOSSE-NEXT: popl %edi
; X86-NOSSE-NEXT: popl %ebx
; X86-NOSSE-NEXT: retl
;
; X64-LABEL: cnt64_pgso:
@@ -1223,92 +1212,85 @@ define i64 @cnt64_pgso(i64 %x) nounwind readnone !prof !14 {
define i128 @cnt128_pgso(i128 %x) nounwind readnone !prof !14 {
; X86-NOSSE-LABEL: cnt128_pgso:
; X86-NOSSE: # %bb.0:
; X86-NOSSE-NEXT: pushl %ebp
; X86-NOSSE-NEXT: pushl %ebx
; X86-NOSSE-NEXT: pushl %edi
; X86-NOSSE-NEXT: pushl %esi
; X86-NOSSE-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NOSSE-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NOSSE-NEXT: movl {{[0-9]+}}(%esp), %edx
; X86-NOSSE-NEXT: movl {{[0-9]+}}(%esp), %esi
; X86-NOSSE-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NOSSE-NEXT: movl {{[0-9]+}}(%esp), %ebx
; X86-NOSSE-NEXT: movl %ebx, %ecx
; X86-NOSSE-NEXT: shrl %ecx
; X86-NOSSE-NEXT: movl $1431655765, %edi # imm = 0x55555555
; X86-NOSSE-NEXT: andl %edi, %ecx
; X86-NOSSE-NEXT: subl %ecx, %ebx
; X86-NOSSE-NEXT: movl $858993459, %ecx # imm = 0x33333333
; X86-NOSSE-NEXT: movl %ebx, %ebp
; X86-NOSSE-NEXT: andl %ecx, %ebp
; X86-NOSSE-NEXT: shrl $2, %ebx
; X86-NOSSE-NEXT: andl %ecx, %ebx
; X86-NOSSE-NEXT: addl %ebp, %ebx
; X86-NOSSE-NEXT: movl %ebx, %ebp
; X86-NOSSE-NEXT: shrl $4, %ebp
; X86-NOSSE-NEXT: addl %ebx, %ebp
; X86-NOSSE-NEXT: movl %eax, %ebx
; X86-NOSSE-NEXT: movl {{[0-9]+}}(%esp), %edi
; X86-NOSSE-NEXT: movl %edi, %ebx
; X86-NOSSE-NEXT: shrl %ebx
; X86-NOSSE-NEXT: andl %edi, %ebx
; X86-NOSSE-NEXT: subl %ebx, %eax
; X86-NOSSE-NEXT: movl %eax, %ebx
; X86-NOSSE-NEXT: andl %ecx, %ebx
; X86-NOSSE-NEXT: shrl $2, %eax
; X86-NOSSE-NEXT: andl %ecx, %eax
; X86-NOSSE-NEXT: addl %ebx, %eax
; X86-NOSSE-NEXT: movl %eax, %edi
; X86-NOSSE-NEXT: shrl $4, %edi
; X86-NOSSE-NEXT: addl %eax, %edi
; X86-NOSSE-NEXT: movl $252645135, %ebx # imm = 0xF0F0F0F
; X86-NOSSE-NEXT: andl %ebx, %ebp
; X86-NOSSE-NEXT: imull $16843009, %ebp, %eax # imm = 0x1010101
; X86-NOSSE-NEXT: shrl $24, %eax
; X86-NOSSE-NEXT: andl %ebx, %edi
; X86-NOSSE-NEXT: imull $16843009, %edi, %edi # imm = 0x1010101
; X86-NOSSE-NEXT: andl $1431655765, %ebx # imm = 0x55555555
; X86-NOSSE-NEXT: subl %ebx, %edi
; X86-NOSSE-NEXT: movl %edi, %ebx
; X86-NOSSE-NEXT: andl $858993459, %ebx # imm = 0x33333333
; X86-NOSSE-NEXT: shrl $2, %edi
; X86-NOSSE-NEXT: andl $858993459, %edi # imm = 0x33333333
; X86-NOSSE-NEXT: addl %ebx, %edi
; X86-NOSSE-NEXT: movl %edi, %ebx
; X86-NOSSE-NEXT: shrl $4, %ebx
; X86-NOSSE-NEXT: addl %edi, %ebx
; X86-NOSSE-NEXT: andl $252645135, %ebx # imm = 0xF0F0F0F
; X86-NOSSE-NEXT: imull $16843009, %ebx, %edi # imm = 0x1010101
; X86-NOSSE-NEXT: shrl $24, %edi
; X86-NOSSE-NEXT: addl %eax, %edi
; X86-NOSSE-NEXT: movl %esi, %eax
; X86-NOSSE-NEXT: shrl %eax
; X86-NOSSE-NEXT: movl $1431655765, %ebp # imm = 0x55555555
; X86-NOSSE-NEXT: andl %ebp, %eax
; X86-NOSSE-NEXT: subl %eax, %esi
; X86-NOSSE-NEXT: movl %esi, %eax
; X86-NOSSE-NEXT: andl %ecx, %eax
; X86-NOSSE-NEXT: movl %esi, %ebx
; X86-NOSSE-NEXT: shrl %ebx
; X86-NOSSE-NEXT: andl $1431655765, %ebx # imm = 0x55555555
; X86-NOSSE-NEXT: subl %ebx, %esi
; X86-NOSSE-NEXT: movl %esi, %ebx
; X86-NOSSE-NEXT: andl $858993459, %ebx # imm = 0x33333333
; X86-NOSSE-NEXT: shrl $2, %esi
; X86-NOSSE-NEXT: andl %ecx, %esi
; X86-NOSSE-NEXT: addl %eax, %esi
; X86-NOSSE-NEXT: movl %esi, %ebp
; X86-NOSSE-NEXT: shrl $4, %ebp
; X86-NOSSE-NEXT: addl %esi, %ebp
; X86-NOSSE-NEXT: movl %edx, %eax
; X86-NOSSE-NEXT: shrl %eax
; X86-NOSSE-NEXT: movl $1431655765, %esi # imm = 0x55555555
; X86-NOSSE-NEXT: andl %esi, %eax
; X86-NOSSE-NEXT: subl %eax, %edx
; X86-NOSSE-NEXT: movl %edx, %eax
; X86-NOSSE-NEXT: andl %ecx, %eax
; X86-NOSSE-NEXT: andl $858993459, %esi # imm = 0x33333333
; X86-NOSSE-NEXT: addl %ebx, %esi
; X86-NOSSE-NEXT: movl %esi, %ebx
; X86-NOSSE-NEXT: shrl $4, %ebx
; X86-NOSSE-NEXT: addl %esi, %ebx
; X86-NOSSE-NEXT: andl $252645135, %ebx # imm = 0xF0F0F0F
; X86-NOSSE-NEXT: imull $16843009, %ebx, %esi # imm = 0x1010101
; X86-NOSSE-NEXT: shrl $24, %esi
; X86-NOSSE-NEXT: addl %edi, %esi
; X86-NOSSE-NEXT: movl %edx, %edi
; X86-NOSSE-NEXT: shrl %edi
; X86-NOSSE-NEXT: andl $1431655765, %edi # imm = 0x55555555
; X86-NOSSE-NEXT: subl %edi, %edx
; X86-NOSSE-NEXT: movl %edx, %edi
; X86-NOSSE-NEXT: andl $858993459, %edi # imm = 0x33333333
; X86-NOSSE-NEXT: shrl $2, %edx
; X86-NOSSE-NEXT: andl %ecx, %edx
; X86-NOSSE-NEXT: addl %eax, %edx
; X86-NOSSE-NEXT: movl %edx, %eax
; X86-NOSSE-NEXT: shrl $4, %eax
; X86-NOSSE-NEXT: addl %edx, %eax
; X86-NOSSE-NEXT: andl %ebx, %ebp
; X86-NOSSE-NEXT: andl %ebx, %eax
; X86-NOSSE-NEXT: imull $16843009, %ebp, %ecx # imm = 0x1010101
; X86-NOSSE-NEXT: shrl $24, %ecx
; X86-NOSSE-NEXT: imull $16843009, %eax, %edx # imm = 0x1010101
; X86-NOSSE-NEXT: shrl $24, %edx
; X86-NOSSE-NEXT: addl %ecx, %edx
; X86-NOSSE-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NOSSE-NEXT: andl $858993459, %edx # imm = 0x33333333
; X86-NOSSE-NEXT: addl %edi, %edx
; X86-NOSSE-NEXT: xorl %ecx, %ecx
; X86-NOSSE-NEXT: movl %ecx, 12(%eax)
; X86-NOSSE-NEXT: movl %ecx, 8(%eax)
; X86-NOSSE-NEXT: movl %ecx, 4(%eax)
; X86-NOSSE-NEXT: movl %edx, (%eax)
; X86-NOSSE-NEXT: movl %edx, %edi
; X86-NOSSE-NEXT: shrl $4, %edi
; X86-NOSSE-NEXT: addl %edx, %edi
; X86-NOSSE-NEXT: andl $252645135, %edi # imm = 0xF0F0F0F
; X86-NOSSE-NEXT: imull $16843009, %edi, %edx # imm = 0x1010101
; X86-NOSSE-NEXT: shrl $24, %edx
; X86-NOSSE-NEXT: movl %ecx, %edi
; X86-NOSSE-NEXT: shrl %edi
; X86-NOSSE-NEXT: andl $1431655765, %edi # imm = 0x55555555
; X86-NOSSE-NEXT: subl %edi, %ecx
; X86-NOSSE-NEXT: movl %ecx, %edi
; X86-NOSSE-NEXT: andl $858993459, %edi # imm = 0x33333333
; X86-NOSSE-NEXT: shrl $2, %ecx
; X86-NOSSE-NEXT: andl $858993459, %ecx # imm = 0x33333333
; X86-NOSSE-NEXT: addl %edi, %ecx
; X86-NOSSE-NEXT: movl %ecx, %edi
; X86-NOSSE-NEXT: shrl $4, %edi
; X86-NOSSE-NEXT: addl %ecx, %edi
; X86-NOSSE-NEXT: andl $252645135, %edi # imm = 0xF0F0F0F
; X86-NOSSE-NEXT: imull $16843009, %edi, %ecx # imm = 0x1010101
; X86-NOSSE-NEXT: shrl $24, %ecx
; X86-NOSSE-NEXT: addl %edx, %ecx
; X86-NOSSE-NEXT: addl %esi, %ecx
; X86-NOSSE-NEXT: xorl %edx, %edx
; X86-NOSSE-NEXT: movl %edx, 12(%eax)
; X86-NOSSE-NEXT: movl %edx, 8(%eax)
; X86-NOSSE-NEXT: movl %edx, 4(%eax)
; X86-NOSSE-NEXT: movl %ecx, (%eax)
; X86-NOSSE-NEXT: popl %esi
; X86-NOSSE-NEXT: popl %edi
; X86-NOSSE-NEXT: popl %ebx
; X86-NOSSE-NEXT: popl %ebp
; X86-NOSSE-NEXT: retl $4
;
; X64-LABEL: cnt128_pgso:

View File

@@ -81,8 +81,8 @@ define ptr @SyFgets(ptr %line, i64 %length, i64 %fid) {
; CHECK-NEXT: imulq $1040, %rdx, %rax ## imm = 0x410
; CHECK-NEXT: movq _syBuf@GOTPCREL(%rip), %rcx
; CHECK-NEXT: leaq 8(%rcx,%rax), %rdx
; CHECK-NEXT: movl $1, %r13d
; CHECK-NEXT: movq _syCTRO@GOTPCREL(%rip), %rax
; CHECK-NEXT: movl $1, %r13d
; CHECK-NEXT: movb $1, %cl
; CHECK-NEXT: .p2align 4, 0x90
; CHECK-NEXT: LBB0_9: ## %do.body

View File

@@ -18,6 +18,6 @@ define i8 @test_remat() {
define i32 @test_remat32() {
ret i32 0
; CHECK: REGISTER COALESCER
; CHECK: Remat: $eax = MOV32r0 implicit-def dead $eflags
; CHECK: $eax = MOV32r0 implicit-def dead $eflags
}

View File

@@ -9,10 +9,9 @@ define i128 @select_eq_i128(ptr %a) {
; CHECK-NEXT: xorl %eax, %eax
; CHECK-NEXT: ptest %xmm0, %xmm0
; CHECK-NEXT: setne %al
; CHECK-NEXT: xorl %ecx, %ecx
; CHECK-NEXT: addq $-1, %rax
; CHECK-NEXT: movabsq $9223372036854775807, %rdx # imm = 0x7FFFFFFFFFFFFFFF
; CHECK-NEXT: adcq %rcx, %rdx
; CHECK-NEXT: adcq $0, %rdx
; CHECK-NEXT: retq
%1 = load i128, ptr %a, align 16
%cmp = icmp eq i128 %1, 1

View File

@@ -1986,29 +1986,29 @@ define void @PR34947(ptr %p0, ptr %p1) nounwind {
; X86-SSE-NEXT: movdqa (%eax), %xmm3
; X86-SSE-NEXT: movdqa (%ecx), %xmm0
; X86-SSE-NEXT: movdqa 16(%ecx), %xmm1
; X86-SSE-NEXT: pxor %xmm4, %xmm4
; X86-SSE-NEXT: pxor %xmm5, %xmm5
; X86-SSE-NEXT: movdqa %xmm3, %xmm2
; X86-SSE-NEXT: pextrw $7, %xmm3, %eax
; X86-SSE-NEXT: pextrw $4, %xmm3, %edi
; X86-SSE-NEXT: pextrw $0, %xmm3, %ebp
; X86-SSE-NEXT: pextrw $1, %xmm3, %esi
; X86-SSE-NEXT: pextrw $3, %xmm3, %ebx
; X86-SSE-NEXT: movdqa %xmm3, %xmm5
; X86-SSE-NEXT: punpcklwd {{.*#+}} xmm5 = xmm5[0],xmm4[0],xmm5[1],xmm4[1],xmm5[2],xmm4[2],xmm5[3],xmm4[3]
; X86-SSE-NEXT: punpckhwd {{.*#+}} xmm2 = xmm2[4],xmm4[4],xmm2[5],xmm4[5],xmm2[6],xmm4[6],xmm2[7],xmm4[7]
; X86-SSE-NEXT: movdqa %xmm3, %xmm4
; X86-SSE-NEXT: punpcklwd {{.*#+}} xmm4 = xmm4[0],xmm5[0],xmm4[1],xmm5[1],xmm4[2],xmm5[2],xmm4[3],xmm5[3]
; X86-SSE-NEXT: punpckhwd {{.*#+}} xmm2 = xmm2[4],xmm5[4],xmm2[5],xmm5[5],xmm2[6],xmm5[6],xmm2[7],xmm5[7]
; X86-SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm1[3,3,3,3]
; X86-SSE-NEXT: movd %xmm3, %ecx
; X86-SSE-NEXT: xorl %edx, %edx
; X86-SSE-NEXT: divl %ecx
; X86-SSE-NEXT: movd %edx, %xmm3
; X86-SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm2[2,3,2,3]
; X86-SSE-NEXT: movd %xmm4, %eax
; X86-SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm1[2,3,2,3]
; X86-SSE-NEXT: movd %xmm4, %ecx
; X86-SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm2[2,3,2,3]
; X86-SSE-NEXT: movd %xmm5, %eax
; X86-SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm1[2,3,2,3]
; X86-SSE-NEXT: movd %xmm5, %ecx
; X86-SSE-NEXT: xorl %edx, %edx
; X86-SSE-NEXT: divl %ecx
; X86-SSE-NEXT: movd %edx, %xmm4
; X86-SSE-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm3[0],xmm4[1],xmm3[1]
; X86-SSE-NEXT: movd %edx, %xmm5
; X86-SSE-NEXT: punpckldq {{.*#+}} xmm5 = xmm5[0],xmm3[0],xmm5[1],xmm3[1]
; X86-SSE-NEXT: movl %edi, %eax
; X86-SSE-NEXT: xorl %edx, %edx
; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %edi
@@ -2022,7 +2022,7 @@ define void @PR34947(ptr %p0, ptr %p1) nounwind {
; X86-SSE-NEXT: divl %ecx
; X86-SSE-NEXT: movd %edx, %xmm1
; X86-SSE-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1]
; X86-SSE-NEXT: punpcklqdq {{.*#+}} xmm3 = xmm3[0],xmm4[0]
; X86-SSE-NEXT: punpcklqdq {{.*#+}} xmm3 = xmm3[0],xmm5[0]
; X86-SSE-NEXT: movl %ebp, %eax
; X86-SSE-NEXT: xorl %edx, %edx
; X86-SSE-NEXT: divl (%edi)
@@ -2040,7 +2040,7 @@ define void @PR34947(ptr %p0, ptr %p1) nounwind {
; X86-SSE-NEXT: xorl %edx, %edx
; X86-SSE-NEXT: divl %ecx
; X86-SSE-NEXT: movd %edx, %xmm2
; X86-SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm5[2,3,2,3]
; X86-SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm4[2,3,2,3]
; X86-SSE-NEXT: movd %xmm4, %eax
; X86-SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,2,3]
; X86-SSE-NEXT: movd %xmm0, %ecx
@@ -2207,29 +2207,29 @@ define void @PR34947(ptr %p0, ptr %p1) nounwind {
; X64-SSE-NEXT: movdqa (%rdi), %xmm3
; X64-SSE-NEXT: movdqa (%rsi), %xmm0
; X64-SSE-NEXT: movdqa 16(%rsi), %xmm1
; X64-SSE-NEXT: pxor %xmm4, %xmm4
; X64-SSE-NEXT: pxor %xmm5, %xmm5
; X64-SSE-NEXT: movdqa %xmm3, %xmm2
; X64-SSE-NEXT: pextrw $7, %xmm3, %eax
; X64-SSE-NEXT: pextrw $4, %xmm3, %r8d
; X64-SSE-NEXT: pextrw $0, %xmm3, %r10d
; X64-SSE-NEXT: pextrw $1, %xmm3, %edi
; X64-SSE-NEXT: pextrw $3, %xmm3, %r9d
; X64-SSE-NEXT: movdqa %xmm3, %xmm5
; X64-SSE-NEXT: punpcklwd {{.*#+}} xmm5 = xmm5[0],xmm4[0],xmm5[1],xmm4[1],xmm5[2],xmm4[2],xmm5[3],xmm4[3]
; X64-SSE-NEXT: punpckhwd {{.*#+}} xmm2 = xmm2[4],xmm4[4],xmm2[5],xmm4[5],xmm2[6],xmm4[6],xmm2[7],xmm4[7]
; X64-SSE-NEXT: movdqa %xmm3, %xmm4
; X64-SSE-NEXT: punpcklwd {{.*#+}} xmm4 = xmm4[0],xmm5[0],xmm4[1],xmm5[1],xmm4[2],xmm5[2],xmm4[3],xmm5[3]
; X64-SSE-NEXT: punpckhwd {{.*#+}} xmm2 = xmm2[4],xmm5[4],xmm2[5],xmm5[5],xmm2[6],xmm5[6],xmm2[7],xmm5[7]
; X64-SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm1[3,3,3,3]
; X64-SSE-NEXT: movd %xmm3, %r11d
; X64-SSE-NEXT: xorl %edx, %edx
; X64-SSE-NEXT: divl %r11d
; X64-SSE-NEXT: movd %edx, %xmm3
; X64-SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm2[2,3,2,3]
; X64-SSE-NEXT: movd %xmm4, %eax
; X64-SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm1[2,3,2,3]
; X64-SSE-NEXT: movd %xmm4, %r11d
; X64-SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm2[2,3,2,3]
; X64-SSE-NEXT: movd %xmm5, %eax
; X64-SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm1[2,3,2,3]
; X64-SSE-NEXT: movd %xmm5, %r11d
; X64-SSE-NEXT: xorl %edx, %edx
; X64-SSE-NEXT: divl %r11d
; X64-SSE-NEXT: movd %edx, %xmm4
; X64-SSE-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm3[0],xmm4[1],xmm3[1]
; X64-SSE-NEXT: movd %edx, %xmm5
; X64-SSE-NEXT: punpckldq {{.*#+}} xmm5 = xmm5[0],xmm3[0],xmm5[1],xmm3[1]
; X64-SSE-NEXT: movl %r8d, %eax
; X64-SSE-NEXT: xorl %edx, %edx
; X64-SSE-NEXT: divl 16(%rsi)
@@ -2242,7 +2242,7 @@ define void @PR34947(ptr %p0, ptr %p1) nounwind {
; X64-SSE-NEXT: divl %r8d
; X64-SSE-NEXT: movd %edx, %xmm1
; X64-SSE-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1]
; X64-SSE-NEXT: punpcklqdq {{.*#+}} xmm3 = xmm3[0],xmm4[0]
; X64-SSE-NEXT: punpcklqdq {{.*#+}} xmm3 = xmm3[0],xmm5[0]
; X64-SSE-NEXT: movl %r10d, %eax
; X64-SSE-NEXT: xorl %edx, %edx
; X64-SSE-NEXT: divl (%rsi)
@@ -2260,7 +2260,7 @@ define void @PR34947(ptr %p0, ptr %p1) nounwind {
; X64-SSE-NEXT: xorl %edx, %edx
; X64-SSE-NEXT: divl %edi
; X64-SSE-NEXT: movd %edx, %xmm2
; X64-SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm5[2,3,2,3]
; X64-SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm4[2,3,2,3]
; X64-SSE-NEXT: movd %xmm4, %eax
; X64-SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,2,3]
; X64-SSE-NEXT: movd %xmm0, %edi

View File

@@ -283,15 +283,14 @@ define i32 @test_call_setjmp(ptr%ptr) nounwind {
; X64-NOPIC-NEXT: pushq %rbp
; X64-NOPIC-NEXT: pushq %r15
; X64-NOPIC-NEXT: pushq %r14
; X64-NOPIC-NEXT: pushq %r13
; X64-NOPIC-NEXT: pushq %r12
; X64-NOPIC-NEXT: pushq %rbx
; X64-NOPIC-NEXT: subq $24, %rsp
; X64-NOPIC-NEXT: subq $16, %rsp
; X64-NOPIC-NEXT: movq %rsp, %rax
; X64-NOPIC-NEXT: movq %rdi, %rbx
; X64-NOPIC-NEXT: movq $-1, %r15
; X64-NOPIC-NEXT: sarq $63, %rax
; X64-NOPIC-NEXT: leaq {{[0-9]+}}(%rsp), %r14
; X64-NOPIC-NEXT: movq %rsp, %r14
; X64-NOPIC-NEXT: shlq $47, %rax
; X64-NOPIC-NEXT: movq %r14, %rdi
; X64-NOPIC-NEXT: orq %rax, %rsp
@@ -302,24 +301,23 @@ define i32 @test_call_setjmp(ptr%ptr) nounwind {
; X64-NOPIC-NEXT: sarq $63, %rax
; X64-NOPIC-NEXT: cmpq $.Lslh_ret_addr4, %r12
; X64-NOPIC-NEXT: cmovneq %r15, %rax
; X64-NOPIC-NEXT: movl (%rbx), %r12d
; X64-NOPIC-NEXT: movl $42, %ebp
; X64-NOPIC-NEXT: movl (%rbx), %ebp
; X64-NOPIC-NEXT: shlq $47, %rax
; X64-NOPIC-NEXT: movq %r14, %rdi
; X64-NOPIC-NEXT: movl %ebp, %esi
; X64-NOPIC-NEXT: movl $42, %esi
; X64-NOPIC-NEXT: orq %rax, %rsp
; X64-NOPIC-NEXT: movq $.Lslh_ret_addr5, %r13
; X64-NOPIC-NEXT: movq $.Lslh_ret_addr5, %r12
; X64-NOPIC-NEXT: callq sigsetjmp@PLT
; X64-NOPIC-NEXT: .Lslh_ret_addr5:
; X64-NOPIC-NEXT: movq %rsp, %rax
; X64-NOPIC-NEXT: sarq $63, %rax
; X64-NOPIC-NEXT: cmpq $.Lslh_ret_addr5, %r13
; X64-NOPIC-NEXT: cmpq $.Lslh_ret_addr5, %r12
; X64-NOPIC-NEXT: cmovneq %r15, %rax
; X64-NOPIC-NEXT: addl (%rbx), %r12d
; X64-NOPIC-NEXT: addl (%rbx), %ebp
; X64-NOPIC-NEXT: shlq $47, %rax
; X64-NOPIC-NEXT: movq %r14, %rdi
; X64-NOPIC-NEXT: movq %r14, %rsi
; X64-NOPIC-NEXT: movl %ebp, %edx
; X64-NOPIC-NEXT: movl $42, %edx
; X64-NOPIC-NEXT: orq %rax, %rsp
; X64-NOPIC-NEXT: movq $.Lslh_ret_addr6, %r14
; X64-NOPIC-NEXT: callq __sigsetjmp@PLT
@@ -329,15 +327,14 @@ define i32 @test_call_setjmp(ptr%ptr) nounwind {
; X64-NOPIC-NEXT: cmpq $.Lslh_ret_addr6, %r14
; X64-NOPIC-NEXT: movq %rax, %rcx
; X64-NOPIC-NEXT: cmovneq %r15, %rcx
; X64-NOPIC-NEXT: addl (%rbx), %r12d
; X64-NOPIC-NEXT: movl %r12d, %eax
; X64-NOPIC-NEXT: addl (%rbx), %ebp
; X64-NOPIC-NEXT: movl %ebp, %eax
; X64-NOPIC-NEXT: orl %ecx, %eax
; X64-NOPIC-NEXT: shlq $47, %rcx
; X64-NOPIC-NEXT: orq %rcx, %rsp
; X64-NOPIC-NEXT: addq $24, %rsp
; X64-NOPIC-NEXT: addq $16, %rsp
; X64-NOPIC-NEXT: popq %rbx
; X64-NOPIC-NEXT: popq %r12
; X64-NOPIC-NEXT: popq %r13
; X64-NOPIC-NEXT: popq %r14
; X64-NOPIC-NEXT: popq %r15
; X64-NOPIC-NEXT: popq %rbp
@@ -348,15 +345,14 @@ define i32 @test_call_setjmp(ptr%ptr) nounwind {
; X64-NOPIC-MCM-NEXT: pushq %rbp
; X64-NOPIC-MCM-NEXT: pushq %r15
; X64-NOPIC-MCM-NEXT: pushq %r14
; X64-NOPIC-MCM-NEXT: pushq %r13
; X64-NOPIC-MCM-NEXT: pushq %r12
; X64-NOPIC-MCM-NEXT: pushq %rbx
; X64-NOPIC-MCM-NEXT: subq $24, %rsp
; X64-NOPIC-MCM-NEXT: subq $16, %rsp
; X64-NOPIC-MCM-NEXT: movq %rsp, %rax
; X64-NOPIC-MCM-NEXT: movq %rdi, %rbx
; X64-NOPIC-MCM-NEXT: movq $-1, %r15
; X64-NOPIC-MCM-NEXT: sarq $63, %rax
; X64-NOPIC-MCM-NEXT: leaq {{[0-9]+}}(%rsp), %r14
; X64-NOPIC-MCM-NEXT: movq %rsp, %r14
; X64-NOPIC-MCM-NEXT: shlq $47, %rax
; X64-NOPIC-MCM-NEXT: movq %r14, %rdi
; X64-NOPIC-MCM-NEXT: orq %rax, %rsp
@@ -368,25 +364,24 @@ define i32 @test_call_setjmp(ptr%ptr) nounwind {
; X64-NOPIC-MCM-NEXT: leaq .Lslh_ret_addr4(%rip), %rcx
; X64-NOPIC-MCM-NEXT: cmpq %rcx, %r12
; X64-NOPIC-MCM-NEXT: cmovneq %r15, %rax
; X64-NOPIC-MCM-NEXT: movl (%rbx), %r12d
; X64-NOPIC-MCM-NEXT: movl $42, %ebp
; X64-NOPIC-MCM-NEXT: movl (%rbx), %ebp
; X64-NOPIC-MCM-NEXT: shlq $47, %rax
; X64-NOPIC-MCM-NEXT: movq %r14, %rdi
; X64-NOPIC-MCM-NEXT: movl %ebp, %esi
; X64-NOPIC-MCM-NEXT: movl $42, %esi
; X64-NOPIC-MCM-NEXT: orq %rax, %rsp
; X64-NOPIC-MCM-NEXT: leaq .Lslh_ret_addr5(%rip), %r13
; X64-NOPIC-MCM-NEXT: leaq .Lslh_ret_addr5(%rip), %r12
; X64-NOPIC-MCM-NEXT: callq sigsetjmp@PLT
; X64-NOPIC-MCM-NEXT: .Lslh_ret_addr5:
; X64-NOPIC-MCM-NEXT: movq %rsp, %rax
; X64-NOPIC-MCM-NEXT: sarq $63, %rax
; X64-NOPIC-MCM-NEXT: leaq .Lslh_ret_addr5(%rip), %rcx
; X64-NOPIC-MCM-NEXT: cmpq %rcx, %r13
; X64-NOPIC-MCM-NEXT: cmpq %rcx, %r12
; X64-NOPIC-MCM-NEXT: cmovneq %r15, %rax
; X64-NOPIC-MCM-NEXT: addl (%rbx), %r12d
; X64-NOPIC-MCM-NEXT: addl (%rbx), %ebp
; X64-NOPIC-MCM-NEXT: shlq $47, %rax
; X64-NOPIC-MCM-NEXT: movq %r14, %rdi
; X64-NOPIC-MCM-NEXT: movq %r14, %rsi
; X64-NOPIC-MCM-NEXT: movl %ebp, %edx
; X64-NOPIC-MCM-NEXT: movl $42, %edx
; X64-NOPIC-MCM-NEXT: orq %rax, %rsp
; X64-NOPIC-MCM-NEXT: leaq .Lslh_ret_addr6(%rip), %r14
; X64-NOPIC-MCM-NEXT: callq __sigsetjmp@PLT
@@ -397,15 +392,14 @@ define i32 @test_call_setjmp(ptr%ptr) nounwind {
; X64-NOPIC-MCM-NEXT: cmpq %rcx, %r14
; X64-NOPIC-MCM-NEXT: movq %rax, %rcx
; X64-NOPIC-MCM-NEXT: cmovneq %r15, %rcx
; X64-NOPIC-MCM-NEXT: addl (%rbx), %r12d
; X64-NOPIC-MCM-NEXT: movl %r12d, %eax
; X64-NOPIC-MCM-NEXT: addl (%rbx), %ebp
; X64-NOPIC-MCM-NEXT: movl %ebp, %eax
; X64-NOPIC-MCM-NEXT: orl %ecx, %eax
; X64-NOPIC-MCM-NEXT: shlq $47, %rcx
; X64-NOPIC-MCM-NEXT: orq %rcx, %rsp
; X64-NOPIC-MCM-NEXT: addq $24, %rsp
; X64-NOPIC-MCM-NEXT: addq $16, %rsp
; X64-NOPIC-MCM-NEXT: popq %rbx
; X64-NOPIC-MCM-NEXT: popq %r12
; X64-NOPIC-MCM-NEXT: popq %r13
; X64-NOPIC-MCM-NEXT: popq %r14
; X64-NOPIC-MCM-NEXT: popq %r15
; X64-NOPIC-MCM-NEXT: popq %rbp
@@ -416,15 +410,14 @@ define i32 @test_call_setjmp(ptr%ptr) nounwind {
; X64-PIC-NEXT: pushq %rbp
; X64-PIC-NEXT: pushq %r15
; X64-PIC-NEXT: pushq %r14
; X64-PIC-NEXT: pushq %r13
; X64-PIC-NEXT: pushq %r12
; X64-PIC-NEXT: pushq %rbx
; X64-PIC-NEXT: subq $24, %rsp
; X64-PIC-NEXT: subq $16, %rsp
; X64-PIC-NEXT: movq %rsp, %rax
; X64-PIC-NEXT: movq %rdi, %rbx
; X64-PIC-NEXT: movq $-1, %r15
; X64-PIC-NEXT: sarq $63, %rax
; X64-PIC-NEXT: leaq {{[0-9]+}}(%rsp), %r14
; X64-PIC-NEXT: movq %rsp, %r14
; X64-PIC-NEXT: shlq $47, %rax
; X64-PIC-NEXT: movq %r14, %rdi
; X64-PIC-NEXT: orq %rax, %rsp
@@ -436,25 +429,24 @@ define i32 @test_call_setjmp(ptr%ptr) nounwind {
; X64-PIC-NEXT: leaq .Lslh_ret_addr4(%rip), %rcx
; X64-PIC-NEXT: cmpq %rcx, %r12
; X64-PIC-NEXT: cmovneq %r15, %rax
; X64-PIC-NEXT: movl (%rbx), %r12d
; X64-PIC-NEXT: movl $42, %ebp
; X64-PIC-NEXT: movl (%rbx), %ebp
; X64-PIC-NEXT: shlq $47, %rax
; X64-PIC-NEXT: movq %r14, %rdi
; X64-PIC-NEXT: movl %ebp, %esi
; X64-PIC-NEXT: movl $42, %esi
; X64-PIC-NEXT: orq %rax, %rsp
; X64-PIC-NEXT: leaq .Lslh_ret_addr5(%rip), %r13
; X64-PIC-NEXT: leaq .Lslh_ret_addr5(%rip), %r12
; X64-PIC-NEXT: callq sigsetjmp@PLT
; X64-PIC-NEXT: .Lslh_ret_addr5:
; X64-PIC-NEXT: movq %rsp, %rax
; X64-PIC-NEXT: sarq $63, %rax
; X64-PIC-NEXT: leaq .Lslh_ret_addr5(%rip), %rcx
; X64-PIC-NEXT: cmpq %rcx, %r13
; X64-PIC-NEXT: cmpq %rcx, %r12
; X64-PIC-NEXT: cmovneq %r15, %rax
; X64-PIC-NEXT: addl (%rbx), %r12d
; X64-PIC-NEXT: addl (%rbx), %ebp
; X64-PIC-NEXT: shlq $47, %rax
; X64-PIC-NEXT: movq %r14, %rdi
; X64-PIC-NEXT: movq %r14, %rsi
; X64-PIC-NEXT: movl %ebp, %edx
; X64-PIC-NEXT: movl $42, %edx
; X64-PIC-NEXT: orq %rax, %rsp
; X64-PIC-NEXT: leaq .Lslh_ret_addr6(%rip), %r14
; X64-PIC-NEXT: callq __sigsetjmp@PLT
@@ -465,15 +457,14 @@ define i32 @test_call_setjmp(ptr%ptr) nounwind {
; X64-PIC-NEXT: cmpq %rcx, %r14
; X64-PIC-NEXT: movq %rax, %rcx
; X64-PIC-NEXT: cmovneq %r15, %rcx
; X64-PIC-NEXT: addl (%rbx), %r12d
; X64-PIC-NEXT: movl %r12d, %eax
; X64-PIC-NEXT: addl (%rbx), %ebp
; X64-PIC-NEXT: movl %ebp, %eax
; X64-PIC-NEXT: orl %ecx, %eax
; X64-PIC-NEXT: shlq $47, %rcx
; X64-PIC-NEXT: orq %rcx, %rsp
; X64-PIC-NEXT: addq $24, %rsp
; X64-PIC-NEXT: addq $16, %rsp
; X64-PIC-NEXT: popq %rbx
; X64-PIC-NEXT: popq %r12
; X64-PIC-NEXT: popq %r13
; X64-PIC-NEXT: popq %r14
; X64-PIC-NEXT: popq %r15
; X64-PIC-NEXT: popq %rbp

View File

@@ -1566,11 +1566,11 @@ define swiftcc { i64, i64, i64, i64} @params_and_return_in_reg(i64, i64, i64, i6
; CHECK-APPLE-NEXT: .cfi_offset %r14, -32
; CHECK-APPLE-NEXT: .cfi_offset %r15, -24
; CHECK-APPLE-NEXT: .cfi_offset %rbp, -16
; CHECK-APPLE-NEXT: movq %r12, %rbx
; CHECK-APPLE-NEXT: movq %r13, (%rsp) ## 8-byte Spill
; CHECK-APPLE-NEXT: movq %r12, (%rsp) ## 8-byte Spill
; CHECK-APPLE-NEXT: movq %r13, {{[-0-9]+}}(%r{{[sb]}}p) ## 8-byte Spill
; CHECK-APPLE-NEXT: movq %r9, {{[-0-9]+}}(%r{{[sb]}}p) ## 8-byte Spill
; CHECK-APPLE-NEXT: movq %r8, {{[-0-9]+}}(%r{{[sb]}}p) ## 8-byte Spill
; CHECK-APPLE-NEXT: movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) ## 8-byte Spill
; CHECK-APPLE-NEXT: movq %rcx, %rbx
; CHECK-APPLE-NEXT: movq %rdx, %r14
; CHECK-APPLE-NEXT: movq %rsi, %r15
; CHECK-APPLE-NEXT: movq %rdi, %rbp
@@ -1587,16 +1587,16 @@ define swiftcc { i64, i64, i64, i64} @params_and_return_in_reg(i64, i64, i64, i6
; CHECK-APPLE-NEXT: movq %rbp, %rdi
; CHECK-APPLE-NEXT: movq %r15, %rsi
; CHECK-APPLE-NEXT: movq %r14, %rdx
; CHECK-APPLE-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rcx ## 8-byte Reload
; CHECK-APPLE-NEXT: movq %rbx, %rcx
; CHECK-APPLE-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r8 ## 8-byte Reload
; CHECK-APPLE-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r9 ## 8-byte Reload
; CHECK-APPLE-NEXT: movq (%rsp), %r13 ## 8-byte Reload
; CHECK-APPLE-NEXT: movq %rbx, %r12
; CHECK-APPLE-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r13 ## 8-byte Reload
; CHECK-APPLE-NEXT: movq (%rsp), %r12 ## 8-byte Reload
; CHECK-APPLE-NEXT: callq _params_and_return_in_reg2
; CHECK-APPLE-NEXT: movq %rax, %r14
; CHECK-APPLE-NEXT: movq %rdx, %r15
; CHECK-APPLE-NEXT: movq %rcx, %rbp
; CHECK-APPLE-NEXT: movq %r8, %rbx
; CHECK-APPLE-NEXT: movq %rax, %rbx
; CHECK-APPLE-NEXT: movq %rdx, %r14
; CHECK-APPLE-NEXT: movq %rcx, %r15
; CHECK-APPLE-NEXT: movq %r8, %rbp
; CHECK-APPLE-NEXT: movq %r12, (%rsp) ## 8-byte Spill
; CHECK-APPLE-NEXT: movl $1, %edi
; CHECK-APPLE-NEXT: movl $2, %esi
@@ -1607,10 +1607,10 @@ define swiftcc { i64, i64, i64, i64} @params_and_return_in_reg(i64, i64, i64, i6
; CHECK-APPLE-NEXT: xorl %r13d, %r13d
; CHECK-APPLE-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r12 ## 8-byte Reload
; CHECK-APPLE-NEXT: callq _params_in_reg2
; CHECK-APPLE-NEXT: movq %r14, %rax
; CHECK-APPLE-NEXT: movq %r15, %rdx
; CHECK-APPLE-NEXT: movq %rbp, %rcx
; CHECK-APPLE-NEXT: movq %rbx, %r8
; CHECK-APPLE-NEXT: movq %rbx, %rax
; CHECK-APPLE-NEXT: movq %r14, %rdx
; CHECK-APPLE-NEXT: movq %r15, %rcx
; CHECK-APPLE-NEXT: movq %rbp, %r8
; CHECK-APPLE-NEXT: movq (%rsp), %r12 ## 8-byte Reload
; CHECK-APPLE-NEXT: addq $48, %rsp
; CHECK-APPLE-NEXT: popq %rbx

View File

@@ -173,13 +173,14 @@ define <8 x i32> @PR46393(<8 x i16> %a0, i8 %a1) {
define i64 @PR55050() {
; X86-LABEL: PR55050:
; X86: # %bb.0: # %entry
; X86-NEXT: xorl %edx, %edx
; X86-NEXT: xorl %eax, %eax
; X86-NEXT: testb %al, %al
; X86-NEXT: testb %dl, %dl
; X86-NEXT: jne .LBB10_2
; X86-NEXT: # %bb.1: # %if
; X86-NEXT: xorl %eax, %eax
; X86-NEXT: xorl %edx, %edx
; X86-NEXT: .LBB10_2: # %exit
; X86-NEXT: movl %eax, %edx
; X86-NEXT: retl
;
; X64-LABEL: PR55050: