diff --git a/llvm/include/llvm/MC/MCAsmLayout.h b/llvm/include/llvm/MC/MCAsmLayout.h index 7f54a996df64..24b621e6f145 100644 --- a/llvm/include/llvm/MC/MCAsmLayout.h +++ b/llvm/include/llvm/MC/MCAsmLayout.h @@ -31,16 +31,12 @@ class MCAsmLayout { /// List of sections in layout order. llvm::SmallVector SectionOrder; - /// Compute the layout for the section if necessary. - void ensureValid(const MCFragment *F) const; - public: MCAsmLayout(MCAssembler &Assembler); /// Get the assembler object this is a layout for. MCAssembler &getAssembler() const { return Assembler; } - void layoutBundle(MCFragment *Prev, MCFragment *F); /// \name Section Access (in layout order) /// @{ @@ -70,11 +66,6 @@ public: /// file. This may include additional padding, or be 0 for virtual sections. uint64_t getSectionFileSize(const MCSection *Sec) const; - /// Get the offset of the given symbol, as computed in the current - /// layout. - /// \return True on success. - bool getSymbolOffset(const MCSymbol &S, uint64_t &Val) const; - /// Variant that reports a fatal error if the offset is not computable. uint64_t getSymbolOffset(const MCSymbol &S) const; diff --git a/llvm/include/llvm/MC/MCAssembler.h b/llvm/include/llvm/MC/MCAssembler.h index 7f076948c4af..53588139f7fa 100644 --- a/llvm/include/llvm/MC/MCAssembler.h +++ b/llvm/include/llvm/MC/MCAssembler.h @@ -118,6 +118,7 @@ private: std::unique_ptr Emitter; std::unique_ptr Writer; + MCAsmLayout *Layout = nullptr; bool RelaxAll = false; bool SubsectionsViaSymbols = false; bool IncrementalLinkerCompatible = false; @@ -171,7 +172,6 @@ private: /// Evaluate a fixup to a relocatable expression and the value which should be /// placed into the fixup. /// - /// \param Layout The layout to use for evaluation. /// \param Fixup The fixup to evaluate. /// \param DF The fragment the fixup is inside. /// \param Target [out] On return, the relocatable expression the fixup @@ -183,45 +183,38 @@ private: /// \return Whether the fixup value was fully resolved. This is true if the /// \p Value result is fixed, otherwise the value may change due to /// relocation. - bool evaluateFixup(const MCAsmLayout &Layout, const MCFixup &Fixup, - const MCFragment *DF, MCValue &Target, - const MCSubtargetInfo *STI, uint64_t &Value, - bool &WasForced) const; + bool evaluateFixup(const MCFixup &Fixup, const MCFragment *DF, + MCValue &Target, const MCSubtargetInfo *STI, + uint64_t &Value, bool &WasForced) const; /// Check whether a fixup can be satisfied, or whether it needs to be relaxed /// (increased in size, in order to hold its value correctly). - bool fixupNeedsRelaxation(const MCFixup &Fixup, const MCRelaxableFragment *DF, - const MCAsmLayout &Layout) const; + bool fixupNeedsRelaxation(const MCFixup &Fixup, const MCRelaxableFragment *DF) const; /// Check whether the given fragment needs relaxation. - bool fragmentNeedsRelaxation(const MCRelaxableFragment *IF, - const MCAsmLayout &Layout) const; + bool fragmentNeedsRelaxation(const MCRelaxableFragment *IF) const; /// Perform one layout iteration and return true if any offsets /// were adjusted. - bool layoutOnce(MCAsmLayout &Layout); + bool layoutOnce(); /// Perform relaxation on a single fragment - returns true if the fragment /// changes as a result of relaxation. - bool relaxFragment(MCAsmLayout &Layout, MCFragment &F); - bool relaxInstruction(MCAsmLayout &Layout, MCRelaxableFragment &IF); - bool relaxLEB(MCAsmLayout &Layout, MCLEBFragment &IF); - bool relaxBoundaryAlign(MCAsmLayout &Layout, MCBoundaryAlignFragment &BF); - bool relaxDwarfLineAddr(MCAsmLayout &Layout, MCDwarfLineAddrFragment &DF); - bool relaxDwarfCallFrameFragment(MCAsmLayout &Layout, - MCDwarfCallFrameFragment &DF); - bool relaxCVInlineLineTable(MCAsmLayout &Layout, - MCCVInlineLineTableFragment &DF); - bool relaxCVDefRange(MCAsmLayout &Layout, MCCVDefRangeFragment &DF); - bool relaxPseudoProbeAddr(MCAsmLayout &Layout, MCPseudoProbeAddrFragment &DF); + bool relaxFragment(MCFragment &F); + bool relaxInstruction(MCRelaxableFragment &IF); + bool relaxLEB(MCLEBFragment &IF); + bool relaxBoundaryAlign(MCBoundaryAlignFragment &BF); + bool relaxDwarfLineAddr(MCDwarfLineAddrFragment &DF); + bool relaxDwarfCallFrameFragment(MCDwarfCallFrameFragment &DF); + bool relaxCVInlineLineTable(MCCVInlineLineTableFragment &DF); + bool relaxCVDefRange(MCCVDefRangeFragment &DF); + bool relaxPseudoProbeAddr(MCPseudoProbeAddrFragment &DF); /// finishLayout - Finalize a layout, including fragment lowering. void finishLayout(MCAsmLayout &Layout); - std::tuple handleFixup(const MCAsmLayout &Layout, - MCFragment &F, - const MCFixup &Fixup, - const MCSubtargetInfo *STI); + std::tuple + handleFixup(MCFragment &F, const MCFixup &Fixup, const MCSubtargetInfo *STI); public: struct Symver { @@ -246,10 +239,28 @@ public: MCAssembler &operator=(const MCAssembler &) = delete; ~MCAssembler(); - /// Compute the effective fragment size assuming it is laid out at the given - /// \p SectionAddress and \p FragmentOffset. - uint64_t computeFragmentSize(const MCAsmLayout &Layout, - const MCFragment &F) const; + /// Compute the effective fragment size. + uint64_t computeFragmentSize(const MCFragment &F) const; + + void layoutBundle(MCFragment *Prev, MCFragment *F) const; + void ensureValid(MCSection &Sec) const; + + // Get the offset of the given fragment inside its containing section. + uint64_t getFragmentOffset(const MCFragment &F) const; + + uint64_t getSectionAddressSize(const MCSection &Sec) const; + uint64_t getSectionFileSize(const MCSection &Sec) const; + + // Get the offset of the given symbol, as computed in the current + // layout. + // \return True on success. + bool getSymbolOffset(const MCSymbol &S, uint64_t &Val) const; + + // Variant that reports a fatal error if the offset is not computable. + uint64_t getSymbolOffset(const MCSymbol &S) const; + + // If this symbol is equivalent to A + Constant, return A. + const MCSymbol *getBaseSymbol(const MCSymbol &Symbol) const; /// Check whether a particular symbol is visible to the linker and is required /// in the symbol table, or whether it can be discarded by the assembler. This @@ -349,6 +360,8 @@ public: IncrementalLinkerCompatible = Value; } + MCAsmLayout *getLayout() const { return Layout; } + bool hasLayout() const { return Layout; } bool getRelaxAll() const { return RelaxAll; } void setRelaxAll(bool Value) { RelaxAll = Value; } diff --git a/llvm/include/llvm/MC/MCExpr.h b/llvm/include/llvm/MC/MCExpr.h index 42d240254be6..40319dcc8ee7 100644 --- a/llvm/include/llvm/MC/MCExpr.h +++ b/llvm/include/llvm/MC/MCExpr.h @@ -65,7 +65,6 @@ protected: } bool evaluateAsRelocatableImpl(MCValue &Res, const MCAssembler *Asm, - const MCAsmLayout *Layout, const MCFixup *Fixup, const SectionAddrMap *Addrs, bool InSet) const; @@ -96,11 +95,8 @@ public: /// Try to evaluate the expression to an absolute value. /// /// \param Res - The absolute value, if evaluation succeeds. - /// \param Layout - The assembler layout object to use for evaluating symbol - /// values. If not given, then only non-symbolic expressions will be - /// evaluated. /// \return - True on success. - bool evaluateAsAbsolute(int64_t &Res, const MCAsmLayout &Layout, + bool evaluateAsAbsolute(int64_t &Res, const MCAssembler &Asm, const SectionAddrMap &Addrs) const; bool evaluateAsAbsolute(int64_t &Res) const; bool evaluateAsAbsolute(int64_t &Res, const MCAssembler &Asm) const; @@ -124,7 +120,7 @@ public: /// /// This is a more aggressive variant of evaluateAsRelocatable. The intended /// use is for when relocations are not available, like the .size directive. - bool evaluateAsValue(MCValue &Res, const MCAsmLayout &Layout) const; + bool evaluateAsValue(MCValue &Res, const MCAssembler &Asm) const; /// Find the "associated section" for this expression, which is /// currently defined as the absolute section for constants, or diff --git a/llvm/lib/MC/ELFObjectWriter.cpp b/llvm/lib/MC/ELFObjectWriter.cpp index 72e58d717ce3..e6dbc3e37e67 100644 --- a/llvm/lib/MC/ELFObjectWriter.cpp +++ b/llvm/lib/MC/ELFObjectWriter.cpp @@ -108,7 +108,7 @@ struct ELFWriter { DwoOnly, } Mode; - static uint64_t SymbolValue(const MCSymbol &Sym, const MCAsmLayout &Layout); + static uint64_t symbolValue(const MCSymbol &Sym, const MCAssembler &Asm); static bool isInSymtab(const MCAsmLayout &Layout, const MCSymbolELF &Symbol, bool Used, bool Renamed); @@ -454,16 +454,15 @@ void ELFWriter::writeHeader(const MCAssembler &Asm) { W.write(StringTableIndex); } -uint64_t ELFWriter::SymbolValue(const MCSymbol &Sym, - const MCAsmLayout &Layout) { +uint64_t ELFWriter::symbolValue(const MCSymbol &Sym, const MCAssembler &Asm) { if (Sym.isCommon()) return Sym.getCommonAlignment()->value(); uint64_t Res; - if (!Layout.getSymbolOffset(Sym, Res)) + if (!Asm.getSymbolOffset(Sym, Res)) return 0; - if (Layout.getAssembler().isThumbFunc(&Sym)) + if (Asm.isThumbFunc(&Sym)) Res |= 1; return Res; @@ -542,7 +541,7 @@ void ELFWriter::writeSymbol(SymbolTableWriter &Writer, uint32_t StringIndex, uint8_t Visibility = Symbol.getVisibility(); uint8_t Other = Symbol.getOther() | Visibility; - uint64_t Value = SymbolValue(*MSD.Symbol, Layout); + uint64_t Value = symbolValue(*MSD.Symbol, Layout.getAssembler()); uint64_t Size = 0; const MCExpr *ESize = MSD.Symbol->getSize(); diff --git a/llvm/lib/MC/MCAssembler.cpp b/llvm/lib/MC/MCAssembler.cpp index e6e77d376150..c675ba076266 100644 --- a/llvm/lib/MC/MCAssembler.cpp +++ b/llvm/lib/MC/MCAssembler.cpp @@ -172,10 +172,9 @@ bool MCAssembler::isSymbolLinkerVisible(const MCSymbol &Symbol) const { return false; } -bool MCAssembler::evaluateFixup(const MCAsmLayout &Layout, const MCFixup &Fixup, - const MCFragment *DF, MCValue &Target, - const MCSubtargetInfo *STI, uint64_t &Value, - bool &WasForced) const { +bool MCAssembler::evaluateFixup(const MCFixup &Fixup, const MCFragment *DF, + MCValue &Target, const MCSubtargetInfo *STI, + uint64_t &Value, bool &WasForced) const { ++stats::evaluateFixup; // FIXME: This code has some duplication with recordRelocation. We should @@ -188,7 +187,7 @@ bool MCAssembler::evaluateFixup(const MCAsmLayout &Layout, const MCFixup &Fixup, MCContext &Ctx = getContext(); Value = 0; WasForced = false; - if (!Expr->evaluateAsRelocatable(Target, &Layout, &Fixup)) { + if (!Expr->evaluateAsRelocatable(Target, Layout, &Fixup)) { Ctx.reportError(Fixup.getLoc(), "expected relocatable expression"); return true; } @@ -205,7 +204,7 @@ bool MCAssembler::evaluateFixup(const MCAsmLayout &Layout, const MCFixup &Fixup, MCFixupKindInfo::FKF_IsTarget; if (IsTarget) - return getBackend().evaluateTargetFixup(*this, Layout, Fixup, DF, Target, + return getBackend().evaluateTargetFixup(*this, *Layout, Fixup, DF, Target, STI, Value, WasForced); unsigned FixupFlags = getBackendPtr()->getFixupKindInfo(Fixup.getKind()).Flags; @@ -238,12 +237,12 @@ bool MCAssembler::evaluateFixup(const MCAsmLayout &Layout, const MCFixup &Fixup, if (const MCSymbolRefExpr *A = Target.getSymA()) { const MCSymbol &Sym = A->getSymbol(); if (Sym.isDefined()) - Value += Layout.getSymbolOffset(Sym); + Value += getSymbolOffset(Sym); } if (const MCSymbolRefExpr *B = Target.getSymB()) { const MCSymbol &Sym = B->getSymbol(); if (Sym.isDefined()) - Value -= Layout.getSymbolOffset(Sym); + Value -= getSymbolOffset(Sym); } bool ShouldAlignPC = getBackend().getFixupKindInfo(Fixup.getKind()).Flags & @@ -252,7 +251,7 @@ bool MCAssembler::evaluateFixup(const MCAsmLayout &Layout, const MCFixup &Fixup, "FKF_IsAlignedDownTo32Bits is only allowed on PC-relative fixups!"); if (IsPCRel) { - uint64_t Offset = Layout.getFragmentOffset(DF) + Fixup.getOffset(); + uint64_t Offset = getFragmentOffset(*DF) + Fixup.getOffset(); // A number of ARM fixups in Thumb mode require that the effective PC // address be determined as the 32-bit aligned version of the actual offset. @@ -271,14 +270,13 @@ bool MCAssembler::evaluateFixup(const MCAsmLayout &Layout, const MCFixup &Fixup, // recordRelocation handle non-VK_None cases like A@plt-B+C. if (!IsResolved && Target.getSymA() && Target.getSymB() && Target.getSymA()->getKind() == MCSymbolRefExpr::VK_None && - getBackend().handleAddSubRelocations(Layout, *DF, Fixup, Target, Value)) + getBackend().handleAddSubRelocations(*Layout, *DF, Fixup, Target, Value)) return true; return IsResolved; } -uint64_t MCAssembler::computeFragmentSize(const MCAsmLayout &Layout, - const MCFragment &F) const { +uint64_t MCAssembler::computeFragmentSize(const MCFragment &F) const { assert(getBackendPtr() && "Requires assembler backend"); switch (F.getKind()) { case MCFragment::FT_Data: @@ -290,7 +288,7 @@ uint64_t MCAssembler::computeFragmentSize(const MCAsmLayout &Layout, case MCFragment::FT_Fill: { auto &FF = cast(F); int64_t NumValues = 0; - if (!FF.getNumValues().evaluateKnownAbsolute(NumValues, Layout)) { + if (!FF.getNumValues().evaluateKnownAbsolute(NumValues, *Layout)) { getContext().reportError(FF.getLoc(), "expected assembly-time absolute expression"); return 0; @@ -317,7 +315,7 @@ uint64_t MCAssembler::computeFragmentSize(const MCAsmLayout &Layout, case MCFragment::FT_Align: { const MCAlignFragment &AF = cast(F); - unsigned Offset = Layout.getFragmentOffset(&AF); + unsigned Offset = getFragmentOffset(AF); unsigned Size = offsetToAlignment(Offset, AF.getAlignment()); // Insert extra Nops for code alignment if the target define @@ -340,17 +338,17 @@ uint64_t MCAssembler::computeFragmentSize(const MCAsmLayout &Layout, case MCFragment::FT_Org: { const MCOrgFragment &OF = cast(F); MCValue Value; - if (!OF.getOffset().evaluateAsValue(Value, Layout)) { + if (!OF.getOffset().evaluateAsValue(Value, *this)) { getContext().reportError(OF.getLoc(), "expected assembly-time absolute expression"); return 0; } - uint64_t FragmentOffset = Layout.getFragmentOffset(&OF); + uint64_t FragmentOffset = getFragmentOffset(OF); int64_t TargetLocation = Value.getConstant(); if (const MCSymbolRefExpr *A = Value.getSymA()) { uint64_t Val; - if (!Layout.getSymbolOffset(A->getSymbol(), Val)) { + if (!getSymbolOffset(A->getSymbol(), Val)) { getContext().reportError(OF.getLoc(), "expected absolute expression"); return 0; } @@ -433,7 +431,7 @@ static uint64_t computeBundlePadding(unsigned BundleSize, return 0; } -void MCAsmLayout::layoutBundle(MCFragment *Prev, MCFragment *F) { +void MCAssembler::layoutBundle(MCFragment *Prev, MCFragment *F) const { // If bundling is enabled and this fragment has instructions in it, it has to // obey the bundling restrictions. With padding, we'll have: // @@ -458,13 +456,13 @@ void MCAsmLayout::layoutBundle(MCFragment *Prev, MCFragment *F) { assert(isa(F) && "Only MCEncodedFragment implementations have instructions"); MCEncodedFragment *EF = cast(F); - uint64_t FSize = Assembler.computeFragmentSize(*this, *EF); + uint64_t FSize = computeFragmentSize(*EF); - if (FSize > Assembler.getBundleAlignSize()) + if (FSize > getBundleAlignSize()) report_fatal_error("Fragment can't be larger than a bundle size"); - uint64_t RequiredBundlePadding = computeBundlePadding( - Assembler.getBundleAlignSize(), EF, EF->Offset, FSize); + uint64_t RequiredBundlePadding = + computeBundlePadding(getBundleAlignSize(), EF, EF->Offset, FSize); if (RequiredBundlePadding > UINT8_MAX) report_fatal_error("Padding cannot exceed 255 bytes"); EF->setBundlePadding(static_cast(RequiredBundlePadding)); @@ -474,8 +472,7 @@ void MCAsmLayout::layoutBundle(MCFragment *Prev, MCFragment *F) { DF->Offset = EF->Offset; } -void MCAsmLayout::ensureValid(const MCFragment *Frag) const { - MCSection &Sec = *Frag->getParent(); +void MCAssembler::ensureValid(MCSection &Sec) const { if (Sec.hasLayout()) return; Sec.setHasLayout(true); @@ -483,22 +480,26 @@ void MCAsmLayout::ensureValid(const MCFragment *Frag) const { uint64_t Offset = 0; for (MCFragment &F : Sec) { F.Offset = Offset; - if (Assembler.isBundlingEnabled() && F.hasInstructions()) { - const_cast(this)->layoutBundle(Prev, &F); + if (isBundlingEnabled() && F.hasInstructions()) { + layoutBundle(Prev, &F); Offset = F.Offset; } - Offset += getAssembler().computeFragmentSize(*this, F); + Offset += computeFragmentSize(F); Prev = &F; } } +uint64_t MCAssembler::getFragmentOffset(const MCFragment &F) const { + ensureValid(*F.getParent()); + return F.Offset; +} + uint64_t MCAsmLayout::getFragmentOffset(const MCFragment *F) const { - ensureValid(F); - return F->Offset; + return Assembler.getFragmentOffset(*F); } // Simple getSymbolOffset helper for the non-variable case. -static bool getLabelOffset(const MCAsmLayout &Layout, const MCSymbol &S, +static bool getLabelOffset(const MCAssembler &Asm, const MCSymbol &S, bool ReportError, uint64_t &Val) { if (!S.getFragment()) { if (ReportError) @@ -506,18 +507,18 @@ static bool getLabelOffset(const MCAsmLayout &Layout, const MCSymbol &S, S.getName() + "'"); return false; } - Val = Layout.getFragmentOffset(S.getFragment()) + S.getOffset(); + Val = Asm.getFragmentOffset(*S.getFragment()) + S.getOffset(); return true; } -static bool getSymbolOffsetImpl(const MCAsmLayout &Layout, const MCSymbol &S, +static bool getSymbolOffsetImpl(const MCAssembler &Asm, const MCSymbol &S, bool ReportError, uint64_t &Val) { if (!S.isVariable()) - return getLabelOffset(Layout, S, ReportError, Val); + return getLabelOffset(Asm, S, ReportError, Val); // If SD is a variable, evaluate it. MCValue Target; - if (!S.getVariableValue()->evaluateAsValue(Target, Layout)) + if (!S.getVariableValue()->evaluateAsValue(Target, Asm)) report_fatal_error("unable to evaluate offset for variable '" + S.getName() + "'"); @@ -530,7 +531,7 @@ static bool getSymbolOffsetImpl(const MCAsmLayout &Layout, const MCSymbol &S, // having been simplified during evaluation, but on Mach-O they can be // variables due to PR19203. This, and the line below for `B` can be // restored to call `getLabelOffset` when PR19203 is fixed. - if (!getSymbolOffsetImpl(Layout, A->getSymbol(), ReportError, ValA)) + if (!getSymbolOffsetImpl(Asm, A->getSymbol(), ReportError, ValA)) return false; Offset += ValA; } @@ -538,7 +539,7 @@ static bool getSymbolOffsetImpl(const MCAsmLayout &Layout, const MCSymbol &S, const MCSymbolRefExpr *B = Target.getSymB(); if (B) { uint64_t ValB; - if (!getSymbolOffsetImpl(Layout, B->getSymbol(), ReportError, ValB)) + if (!getSymbolOffsetImpl(Asm, B->getSymbol(), ReportError, ValB)) return false; Offset -= ValB; } @@ -547,31 +548,36 @@ static bool getSymbolOffsetImpl(const MCAsmLayout &Layout, const MCSymbol &S, return true; } -bool MCAsmLayout::getSymbolOffset(const MCSymbol &S, uint64_t &Val) const { +bool MCAssembler::getSymbolOffset(const MCSymbol &S, uint64_t &Val) const { return getSymbolOffsetImpl(*this, S, false, Val); } -uint64_t MCAsmLayout::getSymbolOffset(const MCSymbol &S) const { +uint64_t MCAssembler::getSymbolOffset(const MCSymbol &S) const { uint64_t Val; getSymbolOffsetImpl(*this, S, true, Val); return Val; } -const MCSymbol *MCAsmLayout::getBaseSymbol(const MCSymbol &Symbol) const { +uint64_t MCAsmLayout::getSymbolOffset(const MCSymbol &S) const { + return Assembler.getSymbolOffset(S); +} + +const MCSymbol *MCAssembler::getBaseSymbol(const MCSymbol &Symbol) const { + assert(Layout); if (!Symbol.isVariable()) return &Symbol; const MCExpr *Expr = Symbol.getVariableValue(); MCValue Value; if (!Expr->evaluateAsValue(Value, *this)) { - Assembler.getContext().reportError(Expr->getLoc(), - "expression could not be evaluated"); + getContext().reportError(Expr->getLoc(), + "expression could not be evaluated"); return nullptr; } const MCSymbolRefExpr *RefB = Value.getSymB(); if (RefB) { - Assembler.getContext().reportError( + getContext().reportError( Expr->getLoc(), Twine("symbol '") + RefB->getSymbol().getName() + "' could not be evaluated in a subtraction expression"); @@ -583,31 +589,37 @@ const MCSymbol *MCAsmLayout::getBaseSymbol(const MCSymbol &Symbol) const { return nullptr; const MCSymbol &ASym = A->getSymbol(); - const MCAssembler &Asm = getAssembler(); if (ASym.isCommon()) { - Asm.getContext().reportError(Expr->getLoc(), - "Common symbol '" + ASym.getName() + - "' cannot be used in assignment expr"); + getContext().reportError(Expr->getLoc(), + "Common symbol '" + ASym.getName() + + "' cannot be used in assignment expr"); return nullptr; } return &ASym; } - -uint64_t MCAsmLayout::getSectionAddressSize(const MCSection *Sec) const { - // The size is the last fragment's end offset. - const MCFragment &F = *Sec->curFragList()->Tail; - return getFragmentOffset(&F) + getAssembler().computeFragmentSize(*this, F); +const MCSymbol *MCAsmLayout::getBaseSymbol(const MCSymbol &Symbol) const { + return Assembler.getBaseSymbol(Symbol); } -uint64_t MCAsmLayout::getSectionFileSize(const MCSection *Sec) const { - // Virtual sections have no file size. - if (Sec->isVirtualSection()) - return 0; +uint64_t MCAssembler::getSectionAddressSize(const MCSection &Sec) const { + // The size is the last fragment's end offset. + const MCFragment &F = *Sec.curFragList()->Tail; + return getFragmentOffset(F) + computeFragmentSize(F); +} +uint64_t MCAsmLayout::getSectionAddressSize(const MCSection *Sec) const { + return Assembler.getSectionAddressSize(*Sec); +} - // Otherwise, the file size is the same as the address space size. +uint64_t MCAssembler::getSectionFileSize(const MCSection &Sec) const { + // Virtual sections have no file size. + if (Sec.isVirtualSection()) + return 0; return getSectionAddressSize(Sec); } +uint64_t MCAsmLayout::getSectionFileSize(const MCSection *Sec) const { + return Assembler.getSectionFileSize(*Sec); +} bool MCAssembler::registerSymbol(const MCSymbol &Symbol) { bool Changed = !Symbol.isRegistered(); @@ -657,7 +669,7 @@ void MCAssembler::writeFragmentPadding(raw_ostream &OS, static void writeFragment(raw_ostream &OS, const MCAssembler &Asm, const MCAsmLayout &Layout, const MCFragment &F) { // FIXME: Embed in fragments instead? - uint64_t FragmentSize = Asm.computeFragmentSize(Layout, F); + uint64_t FragmentSize = Asm.computeFragmentSize(F); llvm::endianness Endian = Asm.getBackend().Endian; @@ -933,19 +945,19 @@ void MCAssembler::writeSectionData(raw_ostream &OS, const MCSection *Sec, } std::tuple -MCAssembler::handleFixup(const MCAsmLayout &Layout, MCFragment &F, - const MCFixup &Fixup, const MCSubtargetInfo *STI) { +MCAssembler::handleFixup(MCFragment &F, const MCFixup &Fixup, + const MCSubtargetInfo *STI) { // Evaluate the fixup. MCValue Target; uint64_t FixedValue; bool WasForced; bool IsResolved = - evaluateFixup(Layout, Fixup, &F, Target, STI, FixedValue, WasForced); + evaluateFixup(Fixup, &F, Target, STI, FixedValue, WasForced); if (!IsResolved) { // The fixup was unresolved, we need a relocation. Inform the object // writer of the relocation, and give it an opportunity to adjust the // fixup value if need be. - getWriter().recordRelocation(*this, Layout, &F, Fixup, Target, FixedValue); + getWriter().recordRelocation(*this, *Layout, &F, Fixup, Target, FixedValue); } return std::make_tuple(Target, FixedValue, IsResolved); } @@ -986,7 +998,8 @@ void MCAssembler::layout(MCAsmLayout &Layout) { } // Layout until everything fits. - while (layoutOnce(Layout)) { + this->Layout = &Layout; + while (layoutOnce()) { if (getContext().hadError()) return; // Size of fragments in one section can depend on the size of fragments in @@ -1082,7 +1095,7 @@ void MCAssembler::layout(MCAsmLayout &Layout) { bool IsResolved; MCValue Target; std::tie(Target, FixedValue, IsResolved) = - handleFixup(Layout, Frag, Fixup, STI); + handleFixup(Frag, Fixup, STI); getBackend().applyFixup(*this, Fixup, Target, Contents, FixedValue, IsResolved, STI); } @@ -1097,27 +1110,27 @@ void MCAssembler::Finish() { // Write the object file. stats::ObjectBytes += getWriter().writeObject(*this, Layout); + + this->Layout = nullptr; } bool MCAssembler::fixupNeedsRelaxation(const MCFixup &Fixup, - const MCRelaxableFragment *DF, - const MCAsmLayout &Layout) const { + const MCRelaxableFragment *DF) const { assert(getBackendPtr() && "Expected assembler backend"); MCValue Target; uint64_t Value; bool WasForced; - bool Resolved = evaluateFixup(Layout, Fixup, DF, Target, - DF->getSubtargetInfo(), Value, WasForced); + bool Resolved = evaluateFixup(Fixup, DF, Target, DF->getSubtargetInfo(), + Value, WasForced); if (Target.getSymA() && Target.getSymA()->getKind() == MCSymbolRefExpr::VK_X86_ABS8 && Fixup.getKind() == FK_Data_1) return false; return getBackend().fixupNeedsRelaxationAdvanced(Fixup, Resolved, Value, DF, - Layout, WasForced); + *Layout, WasForced); } -bool MCAssembler::fragmentNeedsRelaxation(const MCRelaxableFragment *F, - const MCAsmLayout &Layout) const { +bool MCAssembler::fragmentNeedsRelaxation(const MCRelaxableFragment *F) const { assert(getBackendPtr() && "Expected assembler backend"); // If this inst doesn't ever need relaxation, ignore it. This occurs when we // are intentionally pushing out inst fragments, or because we relaxed a @@ -1126,17 +1139,16 @@ bool MCAssembler::fragmentNeedsRelaxation(const MCRelaxableFragment *F, return false; for (const MCFixup &Fixup : F->getFixups()) - if (fixupNeedsRelaxation(Fixup, F, Layout)) + if (fixupNeedsRelaxation(Fixup, F)) return true; return false; } -bool MCAssembler::relaxInstruction(MCAsmLayout &Layout, - MCRelaxableFragment &F) { +bool MCAssembler::relaxInstruction(MCRelaxableFragment &F) { assert(getEmitterPtr() && "Expected CodeEmitter defined for relaxInstruction"); - if (!fragmentNeedsRelaxation(&F, Layout)) + if (!fragmentNeedsRelaxation(&F)) return false; ++stats::RelaxedInstructions; @@ -1158,7 +1170,7 @@ bool MCAssembler::relaxInstruction(MCAsmLayout &Layout, return true; } -bool MCAssembler::relaxLEB(MCAsmLayout &Layout, MCLEBFragment &LF) { +bool MCAssembler::relaxLEB(MCLEBFragment &LF) { const unsigned OldSize = static_cast(LF.getContents().size()); unsigned PadTo = OldSize; int64_t Value; @@ -1168,11 +1180,12 @@ bool MCAssembler::relaxLEB(MCAsmLayout &Layout, MCLEBFragment &LF) { // requires that .uleb128 A-B is foldable where A and B reside in different // fragments. This is used by __gcc_except_table. bool Abs = getSubsectionsViaSymbols() - ? LF.getValue().evaluateKnownAbsolute(Value, Layout) - : LF.getValue().evaluateAsAbsolute(Value, Layout); + ? LF.getValue().evaluateKnownAbsolute(Value, *Layout) + : LF.getValue().evaluateAsAbsolute(Value, *Layout); if (!Abs) { bool Relaxed, UseZeroPad; - std::tie(Relaxed, UseZeroPad) = getBackend().relaxLEB128(LF, Layout, Value); + std::tie(Relaxed, UseZeroPad) = + getBackend().relaxLEB128(LF, *Layout, Value); if (!Relaxed) { getContext().reportError(LF.getValue().getLoc(), Twine(LF.isSigned() ? ".s" : ".u") + @@ -1234,17 +1247,16 @@ static bool needPadding(uint64_t StartAddr, uint64_t Size, isAgainstBoundary(StartAddr, Size, BoundaryAlignment); } -bool MCAssembler::relaxBoundaryAlign(MCAsmLayout &Layout, - MCBoundaryAlignFragment &BF) { +bool MCAssembler::relaxBoundaryAlign(MCBoundaryAlignFragment &BF) { // BoundaryAlignFragment that doesn't need to align any fragment should not be // relaxed. if (!BF.getLastFragment()) return false; - uint64_t AlignedOffset = Layout.getFragmentOffset(&BF); + uint64_t AlignedOffset = getFragmentOffset(BF); uint64_t AlignedSize = 0; for (const MCFragment *F = BF.getNext();; F = F->getNext()) { - AlignedSize += computeFragmentSize(Layout, *F); + AlignedSize += computeFragmentSize(*F); if (F == BF.getLastFragment()) break; } @@ -1259,17 +1271,16 @@ bool MCAssembler::relaxBoundaryAlign(MCAsmLayout &Layout, return true; } -bool MCAssembler::relaxDwarfLineAddr(MCAsmLayout &Layout, - MCDwarfLineAddrFragment &DF) { +bool MCAssembler::relaxDwarfLineAddr(MCDwarfLineAddrFragment &DF) { bool WasRelaxed; - if (getBackend().relaxDwarfLineAddr(DF, Layout, WasRelaxed)) + if (getBackend().relaxDwarfLineAddr(DF, *Layout, WasRelaxed)) return WasRelaxed; - MCContext &Context = Layout.getAssembler().getContext(); + MCContext &Context = getContext(); uint64_t OldSize = DF.getContents().size(); int64_t AddrDelta; - bool Abs = DF.getAddrDelta().evaluateKnownAbsolute(AddrDelta, Layout); + bool Abs = DF.getAddrDelta().evaluateKnownAbsolute(AddrDelta, *Layout); assert(Abs && "We created a line delta with an invalid expression"); (void)Abs; int64_t LineDelta; @@ -1283,15 +1294,14 @@ bool MCAssembler::relaxDwarfLineAddr(MCAsmLayout &Layout, return OldSize != Data.size(); } -bool MCAssembler::relaxDwarfCallFrameFragment(MCAsmLayout &Layout, - MCDwarfCallFrameFragment &DF) { +bool MCAssembler::relaxDwarfCallFrameFragment(MCDwarfCallFrameFragment &DF) { bool WasRelaxed; - if (getBackend().relaxDwarfCFA(DF, Layout, WasRelaxed)) + if (getBackend().relaxDwarfCFA(DF, *Layout, WasRelaxed)) return WasRelaxed; - MCContext &Context = Layout.getAssembler().getContext(); + MCContext &Context = getContext(); int64_t Value; - bool Abs = DF.getAddrDelta().evaluateAsAbsolute(Value, Layout); + bool Abs = DF.getAddrDelta().evaluateAsAbsolute(Value, *Layout); if (!Abs) { getContext().reportError(DF.getAddrDelta().getLoc(), "invalid CFI advance_loc expression"); @@ -1308,25 +1318,22 @@ bool MCAssembler::relaxDwarfCallFrameFragment(MCAsmLayout &Layout, return OldSize != Data.size(); } -bool MCAssembler::relaxCVInlineLineTable(MCAsmLayout &Layout, - MCCVInlineLineTableFragment &F) { +bool MCAssembler::relaxCVInlineLineTable(MCCVInlineLineTableFragment &F) { unsigned OldSize = F.getContents().size(); - getContext().getCVContext().encodeInlineLineTable(Layout, F); + getContext().getCVContext().encodeInlineLineTable(*Layout, F); return OldSize != F.getContents().size(); } -bool MCAssembler::relaxCVDefRange(MCAsmLayout &Layout, - MCCVDefRangeFragment &F) { +bool MCAssembler::relaxCVDefRange(MCCVDefRangeFragment &F) { unsigned OldSize = F.getContents().size(); - getContext().getCVContext().encodeDefRange(Layout, F); + getContext().getCVContext().encodeDefRange(*Layout, F); return OldSize != F.getContents().size(); } -bool MCAssembler::relaxPseudoProbeAddr(MCAsmLayout &Layout, - MCPseudoProbeAddrFragment &PF) { +bool MCAssembler::relaxPseudoProbeAddr(MCPseudoProbeAddrFragment &PF) { uint64_t OldSize = PF.getContents().size(); int64_t AddrDelta; - bool Abs = PF.getAddrDelta().evaluateKnownAbsolute(AddrDelta, Layout); + bool Abs = PF.getAddrDelta().evaluateKnownAbsolute(AddrDelta, *Layout); assert(Abs && "We created a pseudo probe with an invalid expression"); (void)Abs; SmallVectorImpl &Data = PF.getContents(); @@ -1339,39 +1346,38 @@ bool MCAssembler::relaxPseudoProbeAddr(MCAsmLayout &Layout, return OldSize != Data.size(); } -bool MCAssembler::relaxFragment(MCAsmLayout &Layout, MCFragment &F) { +bool MCAssembler::relaxFragment(MCFragment &F) { switch(F.getKind()) { default: return false; case MCFragment::FT_Relaxable: assert(!getRelaxAll() && "Did not expect a MCRelaxableFragment in RelaxAll mode"); - return relaxInstruction(Layout, cast(F)); + return relaxInstruction(cast(F)); case MCFragment::FT_Dwarf: - return relaxDwarfLineAddr(Layout, cast(F)); + return relaxDwarfLineAddr(cast(F)); case MCFragment::FT_DwarfFrame: - return relaxDwarfCallFrameFragment(Layout, - cast(F)); + return relaxDwarfCallFrameFragment(cast(F)); case MCFragment::FT_LEB: - return relaxLEB(Layout, cast(F)); + return relaxLEB(cast(F)); case MCFragment::FT_BoundaryAlign: - return relaxBoundaryAlign(Layout, cast(F)); + return relaxBoundaryAlign(cast(F)); case MCFragment::FT_CVInlineLines: - return relaxCVInlineLineTable(Layout, cast(F)); + return relaxCVInlineLineTable(cast(F)); case MCFragment::FT_CVDefRange: - return relaxCVDefRange(Layout, cast(F)); + return relaxCVDefRange(cast(F)); case MCFragment::FT_PseudoProbe: - return relaxPseudoProbeAddr(Layout, cast(F)); + return relaxPseudoProbeAddr(cast(F)); } } -bool MCAssembler::layoutOnce(MCAsmLayout &Layout) { +bool MCAssembler::layoutOnce() { ++stats::RelaxationSteps; bool Changed = false; for (MCSection &Sec : *this) for (MCFragment &Frag : Sec) - if (relaxFragment(Layout, Frag)) + if (relaxFragment(Frag)) Changed = true; return Changed; } diff --git a/llvm/lib/MC/MCExpr.cpp b/llvm/lib/MC/MCExpr.cpp index 5cfb7b9becd1..e2af92e30e4b 100644 --- a/llvm/lib/MC/MCExpr.cpp +++ b/llvm/lib/MC/MCExpr.cpp @@ -553,12 +553,11 @@ bool MCExpr::evaluateAsAbsolute(int64_t &Res, return evaluateAsAbsolute(Res, &Layout.getAssembler(), &Layout, nullptr, false); } -bool MCExpr::evaluateAsAbsolute(int64_t &Res, - const MCAsmLayout &Layout, +bool MCExpr::evaluateAsAbsolute(int64_t &Res, const MCAssembler &Asm, const SectionAddrMap &Addrs) const { // Setting InSet causes us to absolutize differences across sections and that // is what the MachO writer uses Addrs for. - return evaluateAsAbsolute(Res, &Layout.getAssembler(), &Layout, &Addrs, true); + return evaluateAsAbsolute(Res, &Asm, Asm.getLayout(), &Addrs, true); } bool MCExpr::evaluateAsAbsolute(int64_t &Res, const MCAssembler &Asm) const { @@ -587,7 +586,7 @@ bool MCExpr::evaluateAsAbsolute(int64_t &Res, const MCAssembler *Asm, } bool IsRelocatable = - evaluateAsRelocatableImpl(Value, Asm, Layout, nullptr, Addrs, InSet); + evaluateAsRelocatableImpl(Value, Asm, nullptr, Addrs, InSet); // Record the current value. Res = Value.getConstant(); @@ -597,9 +596,8 @@ bool MCExpr::evaluateAsAbsolute(int64_t &Res, const MCAssembler *Asm, /// Helper method for \see EvaluateSymbolAdd(). static void AttemptToFoldSymbolOffsetDifference( - const MCAssembler *Asm, const MCAsmLayout *Layout, - const SectionAddrMap *Addrs, bool InSet, const MCSymbolRefExpr *&A, - const MCSymbolRefExpr *&B, int64_t &Addend) { + const MCAssembler *Asm, const SectionAddrMap *Addrs, bool InSet, + const MCSymbolRefExpr *&A, const MCSymbolRefExpr *&B, int64_t &Addend) { if (!A || !B) return; @@ -636,6 +634,7 @@ static void AttemptToFoldSymbolOffsetDifference( // separated by a linker-relaxable instruction. If the section contains // instructions and InSet is false (not expressions in directive like // .size/.fill), disable the fast path. + const MCAsmLayout *Layout = Asm->getLayout(); if (Layout && (InSet || !SecA.hasInstructions() || !(Asm->getContext().getTargetTriple().isRISCV() || Asm->getContext().getTargetTriple().isLoongArch()))) { @@ -711,7 +710,7 @@ static void AttemptToFoldSymbolOffsetDifference( AF && Layout && AF->hasEmitNops() && !Asm->getBackend().shouldInsertExtraNopBytesForCodeAlign( *AF, Count)) { - Displacement += Asm->computeFragmentSize(*Layout, *AF); + Displacement += Asm->computeFragmentSize(*AF); } else if (auto *FF = dyn_cast(FI); FF && FF->getNumValues().evaluateAsAbsolute(Num)) { Displacement += Num * FF->getValueSize(); @@ -741,8 +740,7 @@ static void AttemptToFoldSymbolOffsetDifference( /// They might look redundant, but this function can be used before layout /// is done (see the object streamer for example) and having the Asm argument /// lets us avoid relaxations early. -static bool EvaluateSymbolicAdd(const MCAssembler *Asm, - const MCAsmLayout *Layout, +static bool evaluateSymbolicAdd(const MCAssembler *Asm, const SectionAddrMap *Addrs, bool InSet, const MCValue &LHS, const MCValue &RHS, MCValue &Res) { @@ -762,9 +760,6 @@ static bool EvaluateSymbolicAdd(const MCAssembler *Asm, // Fold the result constant immediately. int64_t Result_Cst = LHS_Cst + RHS_Cst; - assert((!Layout || Asm) && - "Must have an assembler object if layout is given!"); - // If we have a layout, we can fold resolved differences. if (Asm) { // First, fold out any differences which are fully resolved. By @@ -777,13 +772,13 @@ static bool EvaluateSymbolicAdd(const MCAssembler *Asm, // (RHS_A - RHS_B). // Since we are attempting to be as aggressive as possible about folding, we // attempt to evaluate each possible alternative. - AttemptToFoldSymbolOffsetDifference(Asm, Layout, Addrs, InSet, LHS_A, LHS_B, + AttemptToFoldSymbolOffsetDifference(Asm, Addrs, InSet, LHS_A, LHS_B, Result_Cst); - AttemptToFoldSymbolOffsetDifference(Asm, Layout, Addrs, InSet, LHS_A, RHS_B, + AttemptToFoldSymbolOffsetDifference(Asm, Addrs, InSet, LHS_A, RHS_B, Result_Cst); - AttemptToFoldSymbolOffsetDifference(Asm, Layout, Addrs, InSet, RHS_A, LHS_B, + AttemptToFoldSymbolOffsetDifference(Asm, Addrs, InSet, RHS_A, LHS_B, Result_Cst); - AttemptToFoldSymbolOffsetDifference(Asm, Layout, Addrs, InSet, RHS_A, RHS_B, + AttemptToFoldSymbolOffsetDifference(Asm, Addrs, InSet, RHS_A, RHS_B, Result_Cst); } @@ -804,14 +799,11 @@ bool MCExpr::evaluateAsRelocatable(MCValue &Res, const MCAsmLayout *Layout, const MCFixup *Fixup) const { MCAssembler *Assembler = Layout ? &Layout->getAssembler() : nullptr; - return evaluateAsRelocatableImpl(Res, Assembler, Layout, Fixup, nullptr, - false); + return evaluateAsRelocatableImpl(Res, Assembler, Fixup, nullptr, false); } -bool MCExpr::evaluateAsValue(MCValue &Res, const MCAsmLayout &Layout) const { - MCAssembler *Assembler = &Layout.getAssembler(); - return evaluateAsRelocatableImpl(Res, Assembler, &Layout, nullptr, nullptr, - true); +bool MCExpr::evaluateAsValue(MCValue &Res, const MCAssembler &Asm) const { + return evaluateAsRelocatableImpl(Res, &Asm, nullptr, nullptr, true); } static bool canExpand(const MCSymbol &Sym, bool InSet) { @@ -831,12 +823,11 @@ static bool canExpand(const MCSymbol &Sym, bool InSet) { } bool MCExpr::evaluateAsRelocatableImpl(MCValue &Res, const MCAssembler *Asm, - const MCAsmLayout *Layout, const MCFixup *Fixup, const SectionAddrMap *Addrs, bool InSet) const { ++stats::MCExprEvaluate; - + MCAsmLayout *Layout = Asm ? Asm->getLayout() : nullptr; switch (getKind()) { case Target: return cast(this)->evaluateAsRelocatableImpl(Res, Layout, @@ -856,7 +847,7 @@ bool MCExpr::evaluateAsRelocatableImpl(MCValue &Res, const MCAssembler *Asm, canExpand(Sym, InSet)) { bool IsMachO = SRE->hasSubsectionsViaSymbols(); if (Sym.getVariableValue()->evaluateAsRelocatableImpl( - Res, Asm, Layout, Fixup, Addrs, InSet || IsMachO)) { + Res, Asm, Fixup, Addrs, InSet || IsMachO)) { if (Kind != MCSymbolRefExpr::VK_None) { if (Res.isAbsolute()) { Res = MCValue::get(SRE, nullptr, 0); @@ -901,8 +892,8 @@ bool MCExpr::evaluateAsRelocatableImpl(MCValue &Res, const MCAssembler *Asm, const MCUnaryExpr *AUE = cast(this); MCValue Value; - if (!AUE->getSubExpr()->evaluateAsRelocatableImpl(Value, Asm, Layout, Fixup, - Addrs, InSet)) + if (!AUE->getSubExpr()->evaluateAsRelocatableImpl(Value, Asm, Fixup, Addrs, + InSet)) return false; switch (AUE->getOpcode()) { @@ -937,10 +928,10 @@ bool MCExpr::evaluateAsRelocatableImpl(MCValue &Res, const MCAssembler *Asm, const MCBinaryExpr *ABE = cast(this); MCValue LHSValue, RHSValue; - if (!ABE->getLHS()->evaluateAsRelocatableImpl(LHSValue, Asm, Layout, Fixup, - Addrs, InSet) || - !ABE->getRHS()->evaluateAsRelocatableImpl(RHSValue, Asm, Layout, Fixup, - Addrs, InSet)) { + if (!ABE->getLHS()->evaluateAsRelocatableImpl(LHSValue, Asm, Fixup, Addrs, + InSet) || + !ABE->getRHS()->evaluateAsRelocatableImpl(RHSValue, Asm, Fixup, Addrs, + InSet)) { // Check if both are Target Expressions, see if we can compare them. if (const MCTargetExpr *L = dyn_cast(ABE->getLHS())) { if (const MCTargetExpr *R = dyn_cast(ABE->getRHS())) { @@ -968,16 +959,16 @@ bool MCExpr::evaluateAsRelocatableImpl(MCValue &Res, const MCAssembler *Asm, case MCBinaryExpr::Sub: // Negate RHS and add. // The cast avoids undefined behavior if the constant is INT64_MIN. - return EvaluateSymbolicAdd( - Asm, Layout, Addrs, InSet, LHSValue, + return evaluateSymbolicAdd( + Asm, Addrs, InSet, LHSValue, MCValue::get(RHSValue.getSymB(), RHSValue.getSymA(), -(uint64_t)RHSValue.getConstant(), RHSValue.getRefKind()), Res); case MCBinaryExpr::Add: - return EvaluateSymbolicAdd( - Asm, Layout, Addrs, InSet, LHSValue, + return evaluateSymbolicAdd( + Asm, Addrs, InSet, LHSValue, MCValue::get(RHSValue.getSymA(), RHSValue.getSymB(), RHSValue.getConstant(), RHSValue.getRefKind()), Res); diff --git a/llvm/lib/MC/WinCOFFObjectWriter.cpp b/llvm/lib/MC/WinCOFFObjectWriter.cpp index a938f433cabc..5101cb381cb6 100644 --- a/llvm/lib/MC/WinCOFFObjectWriter.cpp +++ b/llvm/lib/MC/WinCOFFObjectWriter.cpp @@ -367,13 +367,12 @@ void WinCOFFWriter::defineSection(const MCSectionCOFF &MCSec, } } -static uint64_t getSymbolValue(const MCSymbol &Symbol, - const MCAsmLayout &Layout) { +static uint64_t getSymbolValue(const MCSymbol &Symbol, const MCAssembler &Asm) { if (Symbol.isCommon() && Symbol.isExternal()) return Symbol.getCommonSize(); uint64_t Res; - if (!Layout.getSymbolOffset(Symbol, Res)) + if (!Asm.getSymbolOffset(Symbol, Res)) return 0; return Res; @@ -446,7 +445,7 @@ void WinCOFFWriter::DefineSymbol(const MCSymbol &MCSym, MCAssembler &Assembler, } if (Local) { - Local->Data.Value = getSymbolValue(MCSym, Layout); + Local->Data.Value = getSymbolValue(MCSym, Assembler); const MCSymbolCOFF &SymbolCOFF = cast(MCSym); Local->Data.Type = SymbolCOFF.getType(); diff --git a/llvm/lib/Target/ARM/MCTargetDesc/ARMMachObjectWriter.cpp b/llvm/lib/Target/ARM/MCTargetDesc/ARMMachObjectWriter.cpp index 4a41fce711f3..5626c0b3eaca 100644 --- a/llvm/lib/Target/ARM/MCTargetDesc/ARMMachObjectWriter.cpp +++ b/llvm/lib/Target/ARM/MCTargetDesc/ARMMachObjectWriter.cpp @@ -438,7 +438,7 @@ void ARMMachObjectWriter::recordRelocation(MachObjectWriter *Writer, if (A->isVariable()) { int64_t Res; if (A->getVariableValue()->evaluateAsAbsolute( - Res, Layout, Writer->getSectionAddressMap())) { + Res, Asm, Writer->getSectionAddressMap())) { FixedValue = Res; return; } diff --git a/llvm/lib/Target/Hexagon/MCTargetDesc/HexagonAsmBackend.cpp b/llvm/lib/Target/Hexagon/MCTargetDesc/HexagonAsmBackend.cpp index 6f8853733ae9..0536f8927731 100644 --- a/llvm/lib/Target/Hexagon/MCTargetDesc/HexagonAsmBackend.cpp +++ b/llvm/lib/Target/Hexagon/MCTargetDesc/HexagonAsmBackend.cpp @@ -722,7 +722,7 @@ public: default: break; case MCFragment::FT_Align: { - auto Size = Asm.computeFragmentSize(Layout, *Frags[J]); + auto Size = Asm.computeFragmentSize(*Frags[J]); for (auto K = J; K != 0 && Size >= HEXAGON_PACKET_SIZE;) { --K; switch (Frags[K]->getKind()) { diff --git a/llvm/lib/Target/X86/MCTargetDesc/X86AsmBackend.cpp b/llvm/lib/Target/X86/MCTargetDesc/X86AsmBackend.cpp index bf30cee1d5cd..fce484acf0ee 100644 --- a/llvm/lib/Target/X86/MCTargetDesc/X86AsmBackend.cpp +++ b/llvm/lib/Target/X86/MCTargetDesc/X86AsmBackend.cpp @@ -919,7 +919,7 @@ void X86AsmBackend::finishLayout(MCAssembler const &Asm, #ifndef NDEBUG const uint64_t OrigOffset = Layout.getFragmentOffset(&F); #endif - const uint64_t OrigSize = Asm.computeFragmentSize(Layout, F); + const uint64_t OrigSize = Asm.computeFragmentSize(F); // To keep the effects local, prefer to relax instructions closest to // the align directive. This is purely about human understandability @@ -951,7 +951,7 @@ void X86AsmBackend::finishLayout(MCAssembler const &Asm, #ifndef NDEBUG const uint64_t FinalOffset = Layout.getFragmentOffset(&F); - const uint64_t FinalSize = Asm.computeFragmentSize(Layout, F); + const uint64_t FinalSize = Asm.computeFragmentSize(F); assert(OrigOffset + OrigSize == FinalOffset + FinalSize && "can't move start of next fragment!"); assert(FinalSize == RemainingSize && "inconsistent size computation?"); @@ -974,7 +974,7 @@ void X86AsmBackend::finishLayout(MCAssembler const &Asm, for (unsigned int i = 0, n = Layout.getSectionOrder().size(); i != n; ++i) { MCSection &Section = *Layout.getSectionOrder()[i]; Layout.getFragmentOffset(&*Section.curFragList()->Tail); - Asm.computeFragmentSize(Layout, *Section.curFragList()->Tail); + Asm.computeFragmentSize(*Section.curFragList()->Tail); } } diff --git a/llvm/lib/Target/X86/MCTargetDesc/X86MachObjectWriter.cpp b/llvm/lib/Target/X86/MCTargetDesc/X86MachObjectWriter.cpp index 4c91a14be988..825bd3a203d1 100644 --- a/llvm/lib/Target/X86/MCTargetDesc/X86MachObjectWriter.cpp +++ b/llvm/lib/Target/X86/MCTargetDesc/X86MachObjectWriter.cpp @@ -252,8 +252,8 @@ void X86MachObjectWriter::RecordX86_64Relocation( } else if (Symbol->isVariable()) { const MCExpr *Value = Symbol->getVariableValue(); int64_t Res; - bool isAbs = Value->evaluateAsAbsolute(Res, Layout, - Writer->getSectionAddressMap()); + bool isAbs = + Value->evaluateAsAbsolute(Res, Asm, Writer->getSectionAddressMap()); if (isAbs) { FixedValue = Res; return; @@ -566,7 +566,7 @@ void X86MachObjectWriter::RecordX86Relocation(MachObjectWriter *Writer, if (A->isVariable()) { int64_t Res; if (A->getVariableValue()->evaluateAsAbsolute( - Res, Layout, Writer->getSectionAddressMap())) { + Res, Asm, Writer->getSectionAddressMap())) { FixedValue = Res; return; }