Re-apply e50aea58d5, "Major JITLinkMemoryManager refactor". with fixes.

Adds explicit narrowing casts to JITLinkMemoryManager.cpp.

Honors -slab-address option in llvm-jitlink.cpp, which was accidentally
dropped in the refactor.

This effectively reverts commit 6641d29b70.
This commit is contained in:
Lang Hames
2021-10-11 20:55:30 -07:00
parent db9c2d7751
commit 962a2479b5
34 changed files with 1708 additions and 856 deletions

View File

@@ -47,7 +47,7 @@ int main(int argc, char *argv[]) {
.setObjectLinkingLayerCreator(
[&](ExecutionSession &ES, const Triple &TT) {
return std::make_unique<ObjectLinkingLayer>(
ES, std::make_unique<jitlink::InProcessMemoryManager>());
ES, ExitOnErr(jitlink::InProcessMemoryManager::Create()));
})
.create());

View File

@@ -209,7 +209,7 @@ int main(int argc, char *argv[]) {
[&](ExecutionSession &ES, const Triple &TT) {
// Create ObjectLinkingLayer.
auto ObjLinkingLayer = std::make_unique<ObjectLinkingLayer>(
ES, std::make_unique<jitlink::InProcessMemoryManager>());
ES, ExitOnErr(jitlink::InProcessMemoryManager::Create()));
// Add an instance of our plugin.
ObjLinkingLayer->addPlugin(std::make_unique<MyPlugin>());
return ObjLinkingLayer;

View File

@@ -13,19 +13,19 @@
#ifndef LLVM_EXECUTIONENGINE_JITLINK_JITLINK_H
#define LLVM_EXECUTIONENGINE_JITLINK_JITLINK_H
#include "JITLinkMemoryManager.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/DenseSet.h"
#include "llvm/ADT/Optional.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/Triple.h"
#include "llvm/ExecutionEngine/JITLink/JITLinkMemoryManager.h"
#include "llvm/ExecutionEngine/JITLink/MemoryFlags.h"
#include "llvm/ExecutionEngine/JITSymbol.h"
#include "llvm/Support/Allocator.h"
#include "llvm/Support/Endian.h"
#include "llvm/Support/Error.h"
#include "llvm/Support/FormatVariadic.h"
#include "llvm/Support/MathExtras.h"
#include "llvm/Support/Memory.h"
#include "llvm/Support/MemoryBuffer.h"
#include <map>
@@ -225,7 +225,7 @@ public:
/// Get the content for this block. Block must not be a zero-fill block.
ArrayRef<char> getContent() const {
assert(Data && "Section does not contain content");
assert(Data && "Block does not contain content");
return ArrayRef<char>(Data, Size);
}
@@ -233,6 +233,7 @@ public:
/// Caller is responsible for ensuring the underlying bytes are not
/// deallocated while pointed to by this block.
void setContent(ArrayRef<char> Content) {
assert(Content.data() && "Setting null content");
Data = Content.data();
Size = Content.size();
ContentMutable = false;
@@ -251,6 +252,7 @@ public:
/// to call this on a block with immutable content -- consider using
/// getMutableContent instead.
MutableArrayRef<char> getAlreadyMutableContent() {
assert(Data && "Block does not contain content");
assert(ContentMutable && "Content is not mutable");
return MutableArrayRef<char>(const_cast<char *>(Data), Size);
}
@@ -260,6 +262,7 @@ public:
/// The caller is responsible for ensuring that the memory pointed to by
/// MutableContent is not deallocated while pointed to by this block.
void setMutableContent(MutableArrayRef<char> MutableContent) {
assert(MutableContent.data() && "Setting null content");
Data = MutableContent.data();
Size = MutableContent.size();
ContentMutable = true;
@@ -295,6 +298,7 @@ public:
/// Add an edge to this block.
void addEdge(Edge::Kind K, Edge::OffsetT Offset, Symbol &Target,
Edge::AddendT Addend) {
assert(!isZeroFill() && "Adding edge to zero-fill block?");
Edges.push_back(Edge(K, Offset, Target, Addend));
}
@@ -640,8 +644,7 @@ class Section {
friend class LinkGraph;
private:
Section(StringRef Name, sys::Memory::ProtectionFlags Prot,
SectionOrdinal SecOrdinal)
Section(StringRef Name, MemProt Prot, SectionOrdinal SecOrdinal)
: Name(Name), Prot(Prot), SecOrdinal(SecOrdinal) {}
using SymbolSet = DenseSet<Symbol *>;
@@ -666,12 +669,16 @@ public:
StringRef getName() const { return Name; }
/// Returns the protection flags for this section.
sys::Memory::ProtectionFlags getProtectionFlags() const { return Prot; }
MemProt getMemProt() const { return Prot; }
/// Set the protection flags for this section.
void setProtectionFlags(sys::Memory::ProtectionFlags Prot) {
this->Prot = Prot;
}
void setMemProt(MemProt Prot) { this->Prot = Prot; }
/// Get the deallocation policy for this section.
MemDeallocPolicy getMemDeallocPolicy() const { return MDP; }
/// Set the deallocation policy for this section.
void setMemDeallocPolicy(MemDeallocPolicy MDP) { this->MDP = MDP; }
/// Returns the ordinal for this section.
SectionOrdinal getOrdinal() const { return SecOrdinal; }
@@ -686,6 +693,7 @@ public:
return make_range(Blocks.begin(), Blocks.end());
}
/// Returns the number of blocks in this section.
BlockSet::size_type blocks_size() const { return Blocks.size(); }
/// Returns an iterator over the symbols defined in this section.
@@ -734,7 +742,8 @@ private:
}
StringRef Name;
sys::Memory::ProtectionFlags Prot;
MemProt Prot;
MemDeallocPolicy MDP = MemDeallocPolicy::Standard;
SectionOrdinal SecOrdinal = 0;
BlockSet Blocks;
SymbolSet Symbols;
@@ -916,6 +925,11 @@ public:
: Name(std::move(Name)), TT(TT), PointerSize(PointerSize),
Endianness(Endianness), GetEdgeKindName(std::move(GetEdgeKindName)) {}
LinkGraph(const LinkGraph &) = delete;
LinkGraph &operator=(const LinkGraph &) = delete;
LinkGraph(LinkGraph &&) = delete;
LinkGraph &operator=(LinkGraph &&) = delete;
/// Returns the name of this graph (usually the name of the original
/// underlying MemoryBuffer).
const std::string &getName() const { return Name; }
@@ -962,7 +976,7 @@ public:
}
/// Create a section with the given name, protection flags, and alignment.
Section &createSection(StringRef Name, sys::Memory::ProtectionFlags Prot) {
Section &createSection(StringRef Name, MemProt Prot) {
assert(llvm::find_if(Sections,
[&](std::unique_ptr<Section> &Sec) {
return Sec->getName() == Name;
@@ -1350,6 +1364,13 @@ public:
Sections.erase(I);
}
/// Accessor for the AllocActions object for this graph. This can be used to
/// register allocation action calls prior to finalization.
///
/// Accessing this object after finalization will result in undefined
/// behavior.
JITLinkMemoryManager::AllocActions &allocActions() { return AAs; }
/// Dump the graph.
void dump(raw_ostream &OS);
@@ -1366,6 +1387,7 @@ private:
SectionList Sections;
ExternalSymbolSet ExternalSymbols;
ExternalSymbolSet AbsoluteSymbols;
JITLinkMemoryManager::AllocActions AAs;
};
inline MutableArrayRef<char> Block::getMutableContent(LinkGraph &G) {
@@ -1655,8 +1677,7 @@ public:
/// finalized (i.e. emitted to memory and memory permissions set). If all of
/// this objects dependencies have also been finalized then the code is ready
/// to run.
virtual void
notifyFinalized(std::unique_ptr<JITLinkMemoryManager::Allocation> A) = 0;
virtual void notifyFinalized(JITLinkMemoryManager::FinalizedAlloc Alloc) = 0;
/// Called by JITLink prior to linking to determine whether default passes for
/// the target should be added. The default implementation returns true.

View File

@@ -13,106 +13,408 @@
#ifndef LLVM_EXECUTIONENGINE_JITLINK_JITLINKMEMORYMANAGER_H
#define LLVM_EXECUTIONENGINE_JITLINK_JITLINKMEMORYMANAGER_H
#include "llvm/ADT/DenseMap.h"
#include "llvm/ExecutionEngine/JITLink/JITLinkDylib.h"
#include "llvm/ExecutionEngine/JITLink/MemoryFlags.h"
#include "llvm/ExecutionEngine/JITSymbol.h"
#include "llvm/Support/Allocator.h"
#include "llvm/Support/Error.h"
#include "llvm/Support/MSVCErrorWorkarounds.h"
#include "llvm/Support/Memory.h"
#include "llvm/Support/RecyclingAllocator.h"
#include <cstdint>
#include <future>
#include <mutex>
namespace llvm {
namespace jitlink {
class Block;
class LinkGraph;
class Section;
/// Manages allocations of JIT memory.
///
/// Instances of this class may be accessed concurrently from multiple threads
/// and their implemetations should include any necessary synchronization.
class JITLinkMemoryManager {
public:
using ProtectionFlags = sys::Memory::ProtectionFlags;
class SegmentRequest {
public:
SegmentRequest() = default;
SegmentRequest(uint64_t Alignment, size_t ContentSize,
uint64_t ZeroFillSize)
: Alignment(Alignment), ContentSize(ContentSize),
ZeroFillSize(ZeroFillSize) {
assert(isPowerOf2_32(Alignment) && "Alignment must be power of 2");
}
uint64_t getAlignment() const { return Alignment; }
size_t getContentSize() const { return ContentSize; }
uint64_t getZeroFillSize() const { return ZeroFillSize; }
private:
uint64_t Alignment = 0;
size_t ContentSize = 0;
uint64_t ZeroFillSize = 0;
/// Represents a call to a graph-memory-management support function in the
/// executor.
///
/// Support functions are called as:
///
/// auto *Result =
/// ((char*(*)(const void*, size_t))FnAddr)(
/// (const void*)CtxAddr, (size_t)CtxSize)
///
/// A null result is interpreted as success.
///
/// A non-null result is interpreted as a heap-allocated string containing
/// an error message to report to the allocator (the allocator's
/// executor-side implementation code is responsible for freeing the error
/// string).
struct AllocActionCall {
JITTargetAddress FnAddr = 0;
JITTargetAddress CtxAddr = 0;
JITTargetAddress CtxSize = 0;
};
using SegmentsRequestMap = DenseMap<unsigned, SegmentRequest>;
/// A pair of AllocActionCalls, one to be run at finalization time, one to be
/// run at deallocation time.
///
/// AllocActionCallPairs should be constructed for paired operations (e.g.
/// __register_ehframe and __deregister_ehframe for eh-frame registration).
/// See comments for AllocActions for execution ordering.
///
/// For unpaired operations one or the other member can be left unused, as
/// AllocationActionCalls with an FnAddr of zero will be skipped.
struct AllocActionCallPair {
AllocActionCall Finalize;
AllocActionCall Dealloc;
};
/// Represents an allocation created by the memory manager.
/// A vector of allocation actions to be run for this allocation.
///
/// An allocation object is responsible for allocating and owning jit-linker
/// working and target memory, and for transfering from working to target
/// memory.
/// Finalize allocations will be run in order at finalize time. Dealloc
/// actions will be run in reverse order at deallocation time.
using AllocActions = std::vector<AllocActionCallPair>;
/// Represents a finalized allocation.
///
class Allocation {
/// Finalized allocations must be passed to the
/// JITLinkMemoryManager:deallocate method prior to being destroyed.
///
/// The interpretation of the Address associated with the finalized allocation
/// is up to the memory manager implementation. Common options are using the
/// base address of the allocation, or the address of a memory management
/// object that tracks the allocation.
class FinalizedAlloc {
friend class JITLinkMemoryManager;
public:
using FinalizeContinuation = std::function<void(Error)>;
static constexpr JITTargetAddress InvalidAddr = ~JITTargetAddress(0);
virtual ~Allocation();
FinalizedAlloc() = default;
explicit FinalizedAlloc(JITTargetAddress A) : A(A) {
assert(A != 0 && "Explicitly creating an invalid allocation?");
}
FinalizedAlloc(const FinalizedAlloc &) = delete;
FinalizedAlloc(FinalizedAlloc &&Other) : A(Other.A) {
Other.A = InvalidAddr;
}
FinalizedAlloc &operator=(const FinalizedAlloc &) = delete;
FinalizedAlloc &operator=(FinalizedAlloc &&Other) {
assert(A == InvalidAddr &&
"Cannot overwrite active finalized allocation");
std::swap(A, Other.A);
return *this;
}
~FinalizedAlloc() {
assert(A == InvalidAddr && "Finalized allocation was not deallocated");
}
/// Should return the address of linker working memory for the segment with
/// the given protection flags.
virtual MutableArrayRef<char> getWorkingMemory(ProtectionFlags Seg) = 0;
/// FinalizedAllocs convert to false for default-constructed, and
/// true otherwise. Default-constructed allocs need not be deallocated.
explicit operator bool() const { return A != InvalidAddr; }
/// Should return the final address in the target process where the segment
/// will reside.
virtual JITTargetAddress getTargetMemory(ProtectionFlags Seg) = 0;
/// Returns the address associated with this finalized allocation.
/// The allocation is unmodified.
JITTargetAddress getAddress() const { return A; }
/// Should transfer from working memory to target memory, and release
/// working memory.
virtual void finalizeAsync(FinalizeContinuation OnFinalize) = 0;
/// Returns the address associated with this finalized allocation and
/// resets this object to the default state.
/// This should only be used by allocators when deallocating memory.
JITTargetAddress release() {
JITTargetAddress Tmp = A;
A = InvalidAddr;
return Tmp;
}
/// Calls finalizeAsync and waits for completion.
Error finalize() {
std::promise<MSVCPError> FinalizeResultP;
private:
JITTargetAddress A = InvalidAddr;
};
/// Represents an allocation which has not been finalized yet.
///
/// InFlightAllocs manage both executor memory allocations and working
/// memory allocations.
///
/// On finalization, the InFlightAlloc should transfer the content of
/// working memory into executor memory, apply memory protections, and
/// run any finalization functions.
///
/// Working memory should be kept alive at least until one of the following
/// happens: (1) the InFlightAlloc instance is destroyed, (2) the
/// InFlightAlloc is abandoned, (3) finalized target memory is destroyed.
///
/// If abandon is called then working memory and executor memory should both
/// be freed.
class InFlightAlloc {
public:
using OnFinalizedFunction = unique_function<void(Expected<FinalizedAlloc>)>;
using OnAbandonedFunction = unique_function<void(Error)>;
virtual ~InFlightAlloc();
/// Called prior to finalization if the allocation should be abandoned.
virtual void abandon(OnAbandonedFunction OnAbandoned) = 0;
/// Called to transfer working memory to the target and apply finalization.
virtual void finalize(OnFinalizedFunction OnFinalized) = 0;
/// Synchronous convenience version of finalize.
Expected<FinalizedAlloc> finalize() {
std::promise<MSVCPExpected<FinalizedAlloc>> FinalizeResultP;
auto FinalizeResultF = FinalizeResultP.get_future();
finalizeAsync(
[&](Error Err) { FinalizeResultP.set_value(std::move(Err)); });
finalize([&](Expected<FinalizedAlloc> Result) {
FinalizeResultP.set_value(std::move(Result));
});
return FinalizeResultF.get();
}
/// Should deallocate target memory.
virtual Error deallocate() = 0;
};
/// Typedef for the argument to be passed to OnAllocatedFunction.
using AllocResult = Expected<std::unique_ptr<InFlightAlloc>>;
/// Called when allocation has been completed.
using OnAllocatedFunction = unique_function<void(AllocResult)>;
/// Called when deallocation has completed.
using OnDeallocatedFunction = unique_function<void(Error)>;
virtual ~JITLinkMemoryManager();
/// Create an Allocation object.
/// Start the allocation process.
///
/// The JD argument represents the target JITLinkDylib, and can be used by
/// JITLinkMemoryManager implementers to manage per-dylib allocation pools
/// (e.g. one pre-reserved address space slab per dylib to ensure that all
/// allocations for the dylib are within a certain range). The JD argument
/// may be null (representing an allocation not associated with any
/// JITDylib.
/// If the initial allocation is successful then the OnAllocated function will
/// be called with a std::unique_ptr<InFlightAlloc> value. If the assocation
/// is unsuccessful then the OnAllocated function will be called with an
/// Error.
virtual void allocate(const JITLinkDylib *JD, LinkGraph &G,
OnAllocatedFunction OnAllocated) = 0;
/// Convenience function for blocking allocation.
AllocResult allocate(const JITLinkDylib *JD, LinkGraph &G) {
std::promise<MSVCPExpected<std::unique_ptr<InFlightAlloc>>> AllocResultP;
auto AllocResultF = AllocResultP.get_future();
allocate(JD, G, [&](AllocResult Alloc) {
AllocResultP.set_value(std::move(Alloc));
});
return AllocResultF.get();
}
/// Deallocate a list of allocation objects.
///
/// The request argument describes the segment sizes and permisssions being
/// requested.
virtual Expected<std::unique_ptr<Allocation>>
allocate(const JITLinkDylib *JD, const SegmentsRequestMap &Request) = 0;
/// Dealloc actions will be run in reverse order (from the end of the vector
/// to the start).
virtual void deallocate(std::vector<FinalizedAlloc> Allocs,
OnDeallocatedFunction OnDeallocated) = 0;
/// Convenience function for deallocation of a single alloc.
void deallocate(FinalizedAlloc Alloc, OnDeallocatedFunction OnDeallocated) {
std::vector<FinalizedAlloc> Allocs;
Allocs.push_back(std::move(Alloc));
deallocate(std::move(Allocs), std::move(OnDeallocated));
}
/// Convenience function for blocking deallocation.
Error deallocate(std::vector<FinalizedAlloc> Allocs) {
std::promise<MSVCPError> DeallocResultP;
auto DeallocResultF = DeallocResultP.get_future();
deallocate(std::move(Allocs),
[&](Error Err) { DeallocResultP.set_value(std::move(Err)); });
return DeallocResultF.get();
}
/// Convenience function for blocking deallocation of a single alloc.
Error deallocate(FinalizedAlloc Alloc) {
std::vector<FinalizedAlloc> Allocs;
Allocs.push_back(std::move(Alloc));
return deallocate(std::move(Allocs));
}
};
/// BasicLayout simplifies the implementation of JITLinkMemoryManagers.
///
/// BasicLayout groups Sections into Segments based on their memory protection
/// and deallocation policies. JITLinkMemoryManagers can construct a BasicLayout
/// from a Graph, and then assign working memory and addresses to each of the
/// Segments. These addreses will be mapped back onto the Graph blocks in
/// the apply method.
class BasicLayout {
public:
/// The Alignment, ContentSize and ZeroFillSize of each segment will be
/// pre-filled from the Graph. Clients must set the Addr and WorkingMem fields
/// prior to calling apply.
class Segment {
friend class BasicLayout;
public:
Align Alignment;
size_t ContentSize = 0;
uint64_t ZeroFillSize = 0;
JITTargetAddress Addr = 0;
char *WorkingMem;
private:
size_t NextWorkingMemOffset = 0;
std::vector<Block *> ContentBlocks, ZeroFillBlocks;
};
/// A convenience class that further groups segments based on memory
/// deallocation policy. This allows clients to make two slab allocations:
/// one for all standard segments, and one for all finalize segments.
struct ContiguousPageBasedLayoutSizes {
uint64_t StandardSegs = 0;
uint64_t FinalizeSegs = 0;
uint64_t total() const { return StandardSegs + FinalizeSegs; }
};
private:
using SegmentMap = AllocGroupSmallMap<Segment>;
public:
BasicLayout(LinkGraph &G);
/// Return a reference to the graph this allocation was created from.
LinkGraph &getGraph() { return G; }
/// Returns the total number of required to allocate all segments (with each
/// segment padded out to page size) for all standard segments, and all
/// finalize segments.
///
/// This is a convenience function for the common case where the segments will
/// be allocated contiguously.
///
/// This function will return an error if any segment has an alignment that
/// is higher than a page.
Expected<ContiguousPageBasedLayoutSizes>
getContiguousPageBasedLayoutSizes(uint64_t PageSize);
/// Returns an iterator over the segments of the layout.
iterator_range<SegmentMap::iterator> segments() {
return {Segments.begin(), Segments.end()};
}
/// Apply the layout to the graph.
Error apply();
/// Returns a reference to the AllocActions in the graph.
/// This convenience function saves callers from having to #include
/// LinkGraph.h if all they need are allocation actions.
JITLinkMemoryManager::AllocActions &graphAllocActions();
private:
LinkGraph &G;
SegmentMap Segments;
};
/// A utility class for making simple allocations using JITLinkMemoryManager.
///
/// SimpleSegementAlloc takes a mapping of AllocGroups to Segments and uses
/// this to create a LinkGraph with one Section (containing one Block) per
/// Segment. Clients can obtain a pointer to the working memory and executor
/// address of that block using the Segment's AllocGroup. Once memory has been
/// populated, clients can call finalize to finalize the memory.
class SimpleSegmentAlloc {
public:
/// Describes a segment to be allocated.
struct Segment {
Segment() = default;
Segment(size_t ContentSize, Align ContentAlign)
: ContentSize(ContentSize), ContentAlign(ContentAlign) {}
size_t ContentSize = 0;
Align ContentAlign;
};
/// Describes the segment working memory and executor address.
struct SegmentInfo {
JITTargetAddress Addr = 0;
MutableArrayRef<char> WorkingMem;
};
using SegmentMap = AllocGroupSmallMap<Segment>;
using OnCreatedFunction = unique_function<void(Expected<SimpleSegmentAlloc>)>;
using OnFinalizedFunction =
JITLinkMemoryManager::InFlightAlloc::OnFinalizedFunction;
static void Create(JITLinkMemoryManager &MemMgr, const JITLinkDylib *JD,
SegmentMap Segments, OnCreatedFunction OnCreated);
static Expected<SimpleSegmentAlloc> Create(JITLinkMemoryManager &MemMgr,
const JITLinkDylib *JD,
SegmentMap Segments);
SimpleSegmentAlloc(SimpleSegmentAlloc &&);
SimpleSegmentAlloc &operator=(SimpleSegmentAlloc &&);
~SimpleSegmentAlloc();
/// Returns the SegmentInfo for the given group.
SegmentInfo getSegInfo(AllocGroup AG);
/// Finalize all groups (async version).
void finalize(OnFinalizedFunction OnFinalized) {
Alloc->finalize(std::move(OnFinalized));
}
/// Finalize all groups.
Expected<JITLinkMemoryManager::FinalizedAlloc> finalize() {
return Alloc->finalize();
}
private:
SimpleSegmentAlloc(
std::unique_ptr<LinkGraph> G, AllocGroupSmallMap<Block *> ContentBlocks,
std::unique_ptr<JITLinkMemoryManager::InFlightAlloc> Alloc);
std::unique_ptr<LinkGraph> G;
AllocGroupSmallMap<Block *> ContentBlocks;
std::unique_ptr<JITLinkMemoryManager::InFlightAlloc> Alloc;
};
/// A JITLinkMemoryManager that allocates in-process memory.
class InProcessMemoryManager : public JITLinkMemoryManager {
public:
Expected<std::unique_ptr<Allocation>>
allocate(const JITLinkDylib *JD, const SegmentsRequestMap &Request) override;
class IPInFlightAlloc;
/// Attempts to auto-detect the host page size.
static Expected<std::unique_ptr<InProcessMemoryManager>> Create();
/// Create an instance using the given page size.
InProcessMemoryManager(uint64_t PageSize) : PageSize(PageSize) {}
void allocate(const JITLinkDylib *JD, LinkGraph &G,
OnAllocatedFunction OnAllocated) override;
// Use overloads from base class.
using JITLinkMemoryManager::allocate;
void deallocate(std::vector<FinalizedAlloc> Alloc,
OnDeallocatedFunction OnDeallocated) override;
// Use overloads from base class.
using JITLinkMemoryManager::deallocate;
private:
// FIXME: Use an in-place array instead of a vector for DeallocActions.
// There shouldn't need to be a heap alloc for this.
struct FinalizedAllocInfo {
sys::MemoryBlock StandardSegments;
std::vector<AllocActionCall> DeallocActions;
};
FinalizedAlloc
createFinalizedAlloc(sys::MemoryBlock StandardSegments,
std::vector<AllocActionCall> DeallocActions);
uint64_t PageSize;
std::mutex FinalizedAllocsMutex;
RecyclingAllocator<BumpPtrAllocator, FinalizedAllocInfo> FinalizedAllocInfos;
};
} // end namespace jitlink

View File

@@ -0,0 +1,225 @@
//===-------- MemoryFlags.h - Memory allocation flags -----------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// Defines types and operations related to memory protection and allocation
// lifetimes.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_EXECUTIONENGINE_JITLINK_MEMORYFLAGS_H
#define LLVM_EXECUTIONENGINE_JITLINK_MEMORYFLAGS_H
#include "llvm/ADT/BitmaskEnum.h"
#include "llvm/ADT/DenseMapInfo.h"
#include "llvm/Support/Memory.h"
#include "llvm/Support/raw_ostream.h"
namespace llvm {
namespace jitlink {
/// Describes Read/Write/Exec permissions for memory.
enum class MemProt {
None = 0,
Read = 1U << 0,
Write = 1U << 1,
Exec = 1U << 2,
LLVM_MARK_AS_BITMASK_ENUM(/* LargestValue = */ Exec)
};
/// Print a MemProt as an RWX triple.
raw_ostream &operator<<(raw_ostream &OS, MemProt MP);
/// Convert a MemProt value to a corresponding sys::Memory::ProtectionFlags
/// value.
inline sys::Memory::ProtectionFlags toSysMemoryProtectionFlags(MemProt MP) {
std::underlying_type_t<sys::Memory::ProtectionFlags> PF = 0;
if ((MP & MemProt::Read) != MemProt::None)
PF |= sys::Memory::MF_READ;
if ((MP & MemProt::Write) != MemProt::None)
PF |= sys::Memory::MF_WRITE;
if ((MP & MemProt::Exec) != MemProt::None)
PF |= sys::Memory::MF_EXEC;
return static_cast<sys::Memory::ProtectionFlags>(PF);
}
/// Convert a sys::Memory::ProtectionFlags value to a corresponding MemProt
/// value.
inline MemProt fromSysMemoryProtectionFlags(sys::Memory::ProtectionFlags PF) {
MemProt MP = MemProt::None;
if (PF & sys::Memory::MF_READ)
MP |= MemProt::Read;
if (PF & sys::Memory::MF_WRITE)
MP |= MemProt::Write;
if (PF & sys::Memory::MF_EXEC)
MP |= MemProt::None;
return MP;
}
/// Describes a memory deallocation policy for memory to be allocated by a
/// JITLinkMemoryManager.
///
/// All memory allocated by a call to JITLinkMemoryManager::allocate should be
/// deallocated if a call is made to
/// JITLinkMemoryManager::InFlightAllocation::abandon. The policies below apply
/// to finalized allocations.
enum class MemDeallocPolicy {
/// Standard memory should be deallocated when the deallocate method is called
/// for the finalized allocation.
Standard,
/// Finalize memory should be overwritten and then deallocated after all
/// finalization functions have been run.
Finalize
};
/// Print a MemDeallocPolicy.
raw_ostream &operator<<(raw_ostream &OS, MemDeallocPolicy MDP);
/// A pair of memory protections and allocation policies.
///
/// Optimized for use as a small map key.
class AllocGroup {
friend struct llvm::DenseMapInfo<AllocGroup>;
using underlying_type = uint8_t;
static constexpr unsigned BitsForProt = 3;
static constexpr unsigned BitsForDeallocPolicy = 1;
static constexpr unsigned MaxIdentifiers =
1U << (BitsForProt + BitsForDeallocPolicy);
public:
static constexpr unsigned NumGroups = MaxIdentifiers;
/// Create a default AllocGroup. No memory protections, standard
/// deallocation policy.
AllocGroup() = default;
/// Create an AllocGroup from a MemProt only -- uses
/// MemoryDeallocationPolicy::Standard.
AllocGroup(MemProt MP) : Id(static_cast<underlying_type>(MP)) {}
/// Create an AllocGroup from a MemProt and a MemoryDeallocationPolicy.
AllocGroup(MemProt MP, MemDeallocPolicy MDP)
: Id(static_cast<underlying_type>(MP) |
(static_cast<underlying_type>(MDP) << BitsForProt)) {}
/// Returns the MemProt for this group.
MemProt getMemProt() const {
return static_cast<MemProt>(Id & ((1U << BitsForProt) - 1));
}
/// Returns the MemoryDeallocationPolicy for this group.
MemDeallocPolicy getMemDeallocPolicy() const {
return static_cast<MemDeallocPolicy>(Id >> BitsForProt);
}
friend bool operator==(const AllocGroup &LHS, const AllocGroup &RHS) {
return LHS.Id == RHS.Id;
}
friend bool operator!=(const AllocGroup &LHS, const AllocGroup &RHS) {
return !(LHS == RHS);
}
friend bool operator<(const AllocGroup &LHS, const AllocGroup &RHS) {
return LHS.Id < RHS.Id;
}
private:
AllocGroup(underlying_type RawId) : Id(RawId) {}
underlying_type Id = 0;
};
/// A specialized small-map for AllocGroups.
///
/// Iteration order is guaranteed to match key ordering.
template <typename T> class AllocGroupSmallMap {
private:
using ElemT = std::pair<AllocGroup, T>;
using VectorTy = SmallVector<ElemT, 4>;
static bool compareKey(const ElemT &E, const AllocGroup &G) {
return E.first < G;
}
public:
using iterator = typename VectorTy::iterator;
AllocGroupSmallMap() = default;
AllocGroupSmallMap(std::initializer_list<std::pair<AllocGroup, T>> Inits) {
Elems.reserve(Inits.size());
for (const auto &E : Inits)
Elems.push_back(E);
llvm::sort(Elems, [](const ElemT &LHS, const ElemT &RHS) {
return LHS.first < RHS.first;
});
}
iterator begin() { return Elems.begin(); }
iterator end() { return Elems.end(); }
iterator find(AllocGroup G) {
auto I = lower_bound(Elems, G, compareKey);
return (I->first == G) ? I : end();
}
bool empty() const { return Elems.empty(); }
size_t size() const { return Elems.size(); }
T &operator[](AllocGroup G) {
auto I = lower_bound(Elems, G, compareKey);
if (I == Elems.end() || I->first != G)
I = Elems.insert(I, std::make_pair(G, T()));
return I->second;
}
private:
VectorTy Elems;
};
/// Print an AllocGroup.
raw_ostream &operator<<(raw_ostream &OS, AllocGroup AG);
} // end namespace jitlink
template <> struct DenseMapInfo<jitlink::MemProt> {
static inline jitlink::MemProt getEmptyKey() {
return jitlink::MemProt(~uint8_t(0));
}
static inline jitlink::MemProt getTombstoneKey() {
return jitlink::MemProt(~uint8_t(0) - 1);
}
static unsigned getHashValue(const jitlink::MemProt &Val) {
using UT = std::underlying_type_t<jitlink::MemProt>;
return DenseMapInfo<UT>::getHashValue(static_cast<UT>(Val));
}
static bool isEqual(const jitlink::MemProt &LHS,
const jitlink::MemProt &RHS) {
return LHS == RHS;
}
};
template <> struct DenseMapInfo<jitlink::AllocGroup> {
static inline jitlink::AllocGroup getEmptyKey() {
return jitlink::AllocGroup(~uint8_t(0));
}
static inline jitlink::AllocGroup getTombstoneKey() {
return jitlink::AllocGroup(~uint8_t(0) - 1);
}
static unsigned getHashValue(const jitlink::AllocGroup &Val) {
return DenseMapInfo<jitlink::AllocGroup::underlying_type>::getHashValue(
Val.Id);
}
static bool isEqual(const jitlink::AllocGroup &LHS,
const jitlink::AllocGroup &RHS) {
return LHS == RHS;
}
};
} // end namespace llvm
#endif // LLVM_EXECUTIONENGINE_JITLINK_MEMORYFLAGS_H

View File

@@ -14,6 +14,7 @@
#define LLVM_EXECUTIONENGINE_ORC_EPCDEBUGOBJECTREGISTRAR_H
#include "llvm/ExecutionEngine/JITSymbol.h"
#include "llvm/ExecutionEngine/Orc/Shared/ExecutorAddress.h"
#include "llvm/ExecutionEngine/Orc/Shared/WrapperFunctionUtils.h"
#include "llvm/Support/Error.h"
#include "llvm/Support/Memory.h"
@@ -32,7 +33,7 @@ class ExecutionSession;
/// Abstract interface for registering debug objects in the executor process.
class DebugObjectRegistrar {
public:
virtual Error registerDebugObject(sys::MemoryBlock) = 0;
virtual Error registerDebugObject(ExecutorAddrRange TargetMem) = 0;
virtual ~DebugObjectRegistrar() {}
};
@@ -43,7 +44,7 @@ public:
EPCDebugObjectRegistrar(ExecutionSession &ES, ExecutorAddr RegisterFn)
: ES(ES), RegisterFn(RegisterFn) {}
Error registerDebugObject(sys::MemoryBlock TargetMem) override;
Error registerDebugObject(ExecutorAddrRange TargetMem) override;
private:
ExecutionSession &ES;

View File

@@ -39,17 +39,58 @@ public:
EPCGenericJITLinkMemoryManager(ExecutorProcessControl &EPC, SymbolAddrs SAs)
: EPC(EPC), SAs(SAs) {}
Expected<std::unique_ptr<Allocation>>
allocate(const jitlink::JITLinkDylib *JD,
const SegmentsRequestMap &Request) override;
void allocate(const jitlink::JITLinkDylib *JD, jitlink::LinkGraph &G,
OnAllocatedFunction OnAllocated) override;
// Use overloads from base class.
using JITLinkMemoryManager::allocate;
void deallocate(std::vector<FinalizedAlloc> Allocs,
OnDeallocatedFunction OnDeallocated) override;
// Use overloads from base class.
using JITLinkMemoryManager::deallocate;
private:
class Alloc;
class InFlightAlloc;
void completeAllocation(ExecutorAddr AllocAddr, jitlink::BasicLayout BL,
OnAllocatedFunction OnAllocated);
ExecutorProcessControl &EPC;
SymbolAddrs SAs;
};
namespace shared {
/// FIXME: This specialization should be moved into TargetProcessControlTypes.h
/// (or whereever those types get merged to) once ORC depends on JITLink.
template <>
class SPSSerializationTraits<SPSExecutorAddr,
jitlink::JITLinkMemoryManager::FinalizedAlloc> {
public:
static size_t size(const jitlink::JITLinkMemoryManager::FinalizedAlloc &FA) {
return SPSArgList<SPSExecutorAddr>::size(ExecutorAddr(FA.getAddress()));
}
static bool
serialize(SPSOutputBuffer &OB,
const jitlink::JITLinkMemoryManager::FinalizedAlloc &FA) {
return SPSArgList<SPSExecutorAddr>::serialize(
OB, ExecutorAddr(FA.getAddress()));
}
static bool deserialize(SPSInputBuffer &IB,
jitlink::JITLinkMemoryManager::FinalizedAlloc &FA) {
ExecutorAddr A;
if (!SPSArgList<SPSExecutorAddr>::deserialize(IB, A))
return false;
FA = jitlink::JITLinkMemoryManager::FinalizedAlloc(A.getValue());
return true;
}
};
} // end namespace shared
} // end namespace orc
} // end namespace llvm

View File

@@ -126,7 +126,7 @@ public:
}
private:
using Allocation = jitlink::JITLinkMemoryManager::Allocation;
using FinalizedAlloc = jitlink::JITLinkMemoryManager::FinalizedAlloc;
struct IndirectStubInfo {
IndirectStubInfo() = default;
@@ -149,12 +149,12 @@ private:
ExecutorProcessControl &EPC;
std::unique_ptr<ABISupport> ABI;
JITTargetAddress ResolverBlockAddr;
std::unique_ptr<jitlink::JITLinkMemoryManager::Allocation> ResolverBlock;
FinalizedAlloc ResolverBlock;
std::unique_ptr<TrampolinePool> TP;
std::unique_ptr<LazyCallThroughManager> LCTM;
std::vector<IndirectStubInfo> AvailableIndirectStubs;
std::vector<std::unique_ptr<Allocation>> IndirectStubAllocs;
std::vector<FinalizedAlloc> IndirectStubAllocs;
};
/// This will call writeResolver on the given EPCIndirectionUtils instance

View File

@@ -184,13 +184,13 @@ public:
}
private:
using AllocPtr = std::unique_ptr<jitlink::JITLinkMemoryManager::Allocation>;
using FinalizedAlloc = jitlink::JITLinkMemoryManager::FinalizedAlloc;
void modifyPassConfig(MaterializationResponsibility &MR,
jitlink::LinkGraph &G,
jitlink::PassConfiguration &PassConfig);
void notifyLoaded(MaterializationResponsibility &MR);
Error notifyEmitted(MaterializationResponsibility &MR, AllocPtr Alloc);
Error notifyEmitted(MaterializationResponsibility &MR, FinalizedAlloc FA);
Error handleRemoveResources(ResourceKey K) override;
void handleTransferResources(ResourceKey DstKey, ResourceKey SrcKey) override;
@@ -201,7 +201,7 @@ private:
bool OverrideObjectFlags = false;
bool AutoClaimObjectSymbols = false;
ReturnObjectBufferFunction ReturnObjectBuffer;
DenseMap<ResourceKey, std::vector<AllocPtr>> Allocs;
DenseMap<ResourceKey, std::vector<FinalizedAlloc>> Allocs;
std::vector<std::unique_ptr<Plugin>> Plugins;
};

View File

@@ -37,7 +37,7 @@ namespace sys {
/// The size as it was allocated. This is always greater or equal to the
/// size that was originally requested.
size_t allocatedSize() const { return AllocatedSize; }
private:
void *Address; ///< Address of first byte of memory area
size_t AllocatedSize; ///< Size, in bytes of the memory area

View File

@@ -3,6 +3,7 @@ add_llvm_component_library(LLVMJITLink
JITLink.cpp
JITLinkGeneric.cpp
JITLinkMemoryManager.cpp
MemoryFlags.cpp
# Formats:

View File

@@ -36,11 +36,9 @@ protected:
}
Section &getCommonSection() {
if (!CommonSection) {
auto Prot = static_cast<sys::Memory::ProtectionFlags>(
sys::Memory::MF_READ | sys::Memory::MF_WRITE);
CommonSection = &G->createSection(CommonSectionName, Prot);
}
if (!CommonSection)
CommonSection =
&G->createSection(CommonSectionName, MemProt::Read | MemProt::Write);
return *CommonSection;
}
@@ -295,13 +293,11 @@ template <typename ELFT> Error ELFLinkGraphBuilder<ELFT>::graphifySections() {
});
// Get the section's memory protection flags.
sys::Memory::ProtectionFlags Prot;
MemProt Prot;
if (Sec.sh_flags & ELF::SHF_EXECINSTR)
Prot = static_cast<sys::Memory::ProtectionFlags>(sys::Memory::MF_READ |
sys::Memory::MF_EXEC);
Prot = MemProt::Read | MemProt::Exec;
else
Prot = static_cast<sys::Memory::ProtectionFlags>(sys::Memory::MF_READ |
sys::Memory::MF_WRITE);
Prot = MemProt::Read | MemProt::Write;
// For now we just use this to skip the "undefined" section, probably need
// to revist.

View File

@@ -80,16 +80,14 @@ public:
private:
Section &getGOTSection() const {
if (!GOTSection)
GOTSection = &G.createSection("$__GOT", sys::Memory::MF_READ);
GOTSection = &G.createSection("$__GOT", MemProt::Read);
return *GOTSection;
}
Section &getStubsSection() const {
if (!StubsSection) {
auto StubsProt = static_cast<sys::Memory::ProtectionFlags>(
sys::Memory::MF_READ | sys::Memory::MF_EXEC);
StubsSection = &G.createSection("$__STUBS", StubsProt);
}
if (!StubsSection)
StubsSection =
&G.createSection("$__STUBS", MemProt::Read | MemProt::Exec);
return *StubsSection;
}

View File

@@ -65,8 +65,7 @@ public:
Section &getTLSInfoSection() const {
if (!TLSInfoSection)
TLSInfoSection =
&G.createSection(ELFTLSInfoSectionName, sys::Memory::MF_READ);
TLSInfoSection = &G.createSection(ELFTLSInfoSectionName, MemProt::Read);
return *TLSInfoSection;
}
@@ -172,16 +171,14 @@ public:
private:
Section &getGOTSection() const {
if (!GOTSection)
GOTSection = &G.createSection(ELFGOTSectionName, sys::Memory::MF_READ);
GOTSection = &G.createSection(ELFGOTSectionName, MemProt::Read);
return *GOTSection;
}
Section &getStubsSection() const {
if (!StubsSection) {
auto StubsProt = static_cast<sys::Memory::ProtectionFlags>(
sys::Memory::MF_READ | sys::Memory::MF_EXEC);
StubsSection = &G.createSection("$__STUBS", StubsProt);
}
if (!StubsSection)
StubsSection =
&G.createSection("$__STUBS", MemProt::Read | MemProt::Exec);
return *StubsSection;
}

View File

@@ -48,12 +48,21 @@ void JITLinkerBase::linkPhase1(std::unique_ptr<JITLinkerBase> Self) {
if (auto Err = runPasses(Passes.PostPrunePasses))
return Ctx->notifyFailed(std::move(Err));
// Sort blocks into segments.
auto Layout = layOutBlocks();
Ctx->getMemoryManager().allocate(
Ctx->getJITLinkDylib(), *G,
[S = std::move(Self)](AllocResult AR) mutable {
auto *TmpSelf = S.get();
TmpSelf->linkPhase2(std::move(S), std::move(AR));
});
}
// Allocate memory for segments.
if (auto Err = allocateSegments(Layout))
return Ctx->notifyFailed(std::move(Err));
void JITLinkerBase::linkPhase2(std::unique_ptr<JITLinkerBase> Self,
AllocResult AR) {
if (AR)
Alloc = std::move(*AR);
else
return Ctx->notifyFailed(AR.takeError());
LLVM_DEBUG({
dbgs() << "Link graph \"" << G->getName()
@@ -73,16 +82,16 @@ void JITLinkerBase::linkPhase1(std::unique_ptr<JITLinkerBase> Self) {
auto ExternalSymbols = getExternalSymbolNames();
// If there are no external symbols then proceed immediately with phase 2.
// If there are no external symbols then proceed immediately with phase 3.
if (ExternalSymbols.empty()) {
LLVM_DEBUG({
dbgs() << "No external symbols for " << G->getName()
<< ". Proceeding immediately with link phase 2.\n";
<< ". Proceeding immediately with link phase 3.\n";
});
// FIXME: Once callee expressions are defined to be sequenced before
// argument expressions (c++17) we can simplify this. See below.
auto &TmpSelf = *Self;
TmpSelf.linkPhase2(std::move(Self), AsyncLookupResult(), std::move(Layout));
TmpSelf.linkPhase3(std::move(Self), AsyncLookupResult());
return;
}
@@ -100,37 +109,31 @@ void JITLinkerBase::linkPhase1(std::unique_ptr<JITLinkerBase> Self) {
//
// Ctx->lookup(std::move(UnresolvedExternals),
// [Self=std::move(Self)](Expected<AsyncLookupResult> Result) {
// Self->linkPhase2(std::move(Self), std::move(Result));
// Self->linkPhase3(std::move(Self), std::move(Result));
// });
auto *TmpCtx = Ctx.get();
TmpCtx->lookup(std::move(ExternalSymbols),
createLookupContinuation(
[S = std::move(Self), L = std::move(Layout)](
Expected<AsyncLookupResult> LookupResult) mutable {
auto &TmpSelf = *S;
TmpSelf.linkPhase2(std::move(S), std::move(LookupResult),
std::move(L));
}));
Ctx->lookup(std::move(ExternalSymbols),
createLookupContinuation(
[S = std::move(Self)](
Expected<AsyncLookupResult> LookupResult) mutable {
auto &TmpSelf = *S;
TmpSelf.linkPhase3(std::move(S), std::move(LookupResult));
}));
}
void JITLinkerBase::linkPhase2(std::unique_ptr<JITLinkerBase> Self,
Expected<AsyncLookupResult> LR,
SegmentLayoutMap Layout) {
void JITLinkerBase::linkPhase3(std::unique_ptr<JITLinkerBase> Self,
Expected<AsyncLookupResult> LR) {
LLVM_DEBUG({
dbgs() << "Starting link phase 2 for graph " << G->getName() << "\n";
dbgs() << "Starting link phase 3 for graph " << G->getName() << "\n";
});
// If the lookup failed, bail out.
if (!LR)
return deallocateAndBailOut(LR.takeError());
return abandonAllocAndBailOut(std::move(Self), LR.takeError());
// Assign addresses to external addressables.
applyLookupResult(*LR);
// Copy block content to working memory.
copyBlockContentToWorkingMemory(Layout, *Alloc);
LLVM_DEBUG({
dbgs() << "Link graph \"" << G->getName()
<< "\" before pre-fixup passes:\n";
@@ -138,7 +141,7 @@ void JITLinkerBase::linkPhase2(std::unique_ptr<JITLinkerBase> Self,
});
if (auto Err = runPasses(Passes.PreFixupPasses))
return deallocateAndBailOut(std::move(Err));
return abandonAllocAndBailOut(std::move(Self), std::move(Err));
LLVM_DEBUG({
dbgs() << "Link graph \"" << G->getName() << "\" before copy-and-fixup:\n";
@@ -147,7 +150,7 @@ void JITLinkerBase::linkPhase2(std::unique_ptr<JITLinkerBase> Self,
// Fix up block content.
if (auto Err = fixUpBlocks(*G))
return deallocateAndBailOut(std::move(Err));
return abandonAllocAndBailOut(std::move(Self), std::move(Err));
LLVM_DEBUG({
dbgs() << "Link graph \"" << G->getName() << "\" after copy-and-fixup:\n";
@@ -155,27 +158,25 @@ void JITLinkerBase::linkPhase2(std::unique_ptr<JITLinkerBase> Self,
});
if (auto Err = runPasses(Passes.PostFixupPasses))
return deallocateAndBailOut(std::move(Err));
return abandonAllocAndBailOut(std::move(Self), std::move(Err));
// FIXME: Use move capture once we have c++14.
auto *UnownedSelf = Self.release();
auto Phase3Continuation = [UnownedSelf](Error Err) {
std::unique_ptr<JITLinkerBase> Self(UnownedSelf);
UnownedSelf->linkPhase3(std::move(Self), std::move(Err));
};
Alloc->finalizeAsync(std::move(Phase3Continuation));
Alloc->finalize([S = std::move(Self)](FinalizeResult FR) mutable {
auto *TmpSelf = S.get();
TmpSelf->linkPhase4(std::move(S), std::move(FR));
});
}
void JITLinkerBase::linkPhase3(std::unique_ptr<JITLinkerBase> Self, Error Err) {
void JITLinkerBase::linkPhase4(std::unique_ptr<JITLinkerBase> Self,
FinalizeResult FR) {
LLVM_DEBUG({
dbgs() << "Starting link phase 3 for graph " << G->getName() << "\n";
dbgs() << "Starting link phase 4 for graph " << G->getName() << "\n";
});
if (Err)
return deallocateAndBailOut(std::move(Err));
Ctx->notifyFinalized(std::move(Alloc));
if (!FR)
return Ctx->notifyFailed(FR.takeError());
Ctx->notifyFinalized(std::move(*FR));
LLVM_DEBUG({ dbgs() << "Link of graph " << G->getName() << " complete\n"; });
}
@@ -187,131 +188,6 @@ Error JITLinkerBase::runPasses(LinkGraphPassList &Passes) {
return Error::success();
}
JITLinkerBase::SegmentLayoutMap JITLinkerBase::layOutBlocks() {
SegmentLayoutMap Layout;
/// Partition blocks based on permissions and content vs. zero-fill.
for (auto *B : G->blocks()) {
auto &SegLists = Layout[B->getSection().getProtectionFlags()];
if (!B->isZeroFill())
SegLists.ContentBlocks.push_back(B);
else
SegLists.ZeroFillBlocks.push_back(B);
}
/// Sort blocks within each list.
for (auto &KV : Layout) {
auto CompareBlocks = [](const Block *LHS, const Block *RHS) {
// Sort by section, address and size
if (LHS->getSection().getOrdinal() != RHS->getSection().getOrdinal())
return LHS->getSection().getOrdinal() < RHS->getSection().getOrdinal();
if (LHS->getAddress() != RHS->getAddress())
return LHS->getAddress() < RHS->getAddress();
return LHS->getSize() < RHS->getSize();
};
auto &SegLists = KV.second;
llvm::sort(SegLists.ContentBlocks, CompareBlocks);
llvm::sort(SegLists.ZeroFillBlocks, CompareBlocks);
}
LLVM_DEBUG({
dbgs() << "Computed segment ordering:\n";
for (auto &KV : Layout) {
dbgs() << " Segment "
<< static_cast<sys::Memory::ProtectionFlags>(KV.first) << ":\n";
auto &SL = KV.second;
for (auto &SIEntry :
{std::make_pair(&SL.ContentBlocks, "content block"),
std::make_pair(&SL.ZeroFillBlocks, "zero-fill block")}) {
dbgs() << " " << SIEntry.second << ":\n";
for (auto *B : *SIEntry.first)
dbgs() << " " << *B << "\n";
}
}
});
return Layout;
}
Error JITLinkerBase::allocateSegments(const SegmentLayoutMap &Layout) {
// Compute segment sizes and allocate memory.
LLVM_DEBUG(dbgs() << "JIT linker requesting: { ");
JITLinkMemoryManager::SegmentsRequestMap Segments;
for (auto &KV : Layout) {
auto &Prot = KV.first;
auto &SegLists = KV.second;
uint64_t SegAlign = 1;
// Calculate segment content size.
size_t SegContentSize = 0;
for (auto *B : SegLists.ContentBlocks) {
SegAlign = std::max(SegAlign, B->getAlignment());
SegContentSize = alignToBlock(SegContentSize, *B);
SegContentSize += B->getSize();
}
uint64_t SegZeroFillStart = SegContentSize;
uint64_t SegZeroFillEnd = SegZeroFillStart;
for (auto *B : SegLists.ZeroFillBlocks) {
SegAlign = std::max(SegAlign, B->getAlignment());
SegZeroFillEnd = alignToBlock(SegZeroFillEnd, *B);
SegZeroFillEnd += B->getSize();
}
Segments[Prot] = {SegAlign, SegContentSize,
SegZeroFillEnd - SegZeroFillStart};
LLVM_DEBUG({
dbgs() << (&KV == &*Layout.begin() ? "" : "; ")
<< static_cast<sys::Memory::ProtectionFlags>(Prot)
<< ": alignment = " << SegAlign
<< ", content size = " << SegContentSize
<< ", zero-fill size = " << (SegZeroFillEnd - SegZeroFillStart);
});
}
LLVM_DEBUG(dbgs() << " }\n");
if (auto AllocOrErr =
Ctx->getMemoryManager().allocate(Ctx->getJITLinkDylib(), Segments))
Alloc = std::move(*AllocOrErr);
else
return AllocOrErr.takeError();
LLVM_DEBUG({
dbgs() << "JIT linker got memory (working -> target):\n";
for (auto &KV : Layout) {
auto Prot = static_cast<sys::Memory::ProtectionFlags>(KV.first);
dbgs() << " " << Prot << ": "
<< (const void *)Alloc->getWorkingMemory(Prot).data() << " -> "
<< formatv("{0:x16}", Alloc->getTargetMemory(Prot)) << "\n";
}
});
// Update block target addresses.
for (auto &KV : Layout) {
auto &Prot = KV.first;
auto &SL = KV.second;
JITTargetAddress NextBlockAddr =
Alloc->getTargetMemory(static_cast<sys::Memory::ProtectionFlags>(Prot));
for (auto *SIList : {&SL.ContentBlocks, &SL.ZeroFillBlocks})
for (auto *B : *SIList) {
NextBlockAddr = alignToBlock(NextBlockAddr, *B);
B->setAddress(NextBlockAddr);
NextBlockAddr += B->getSize();
}
}
return Error::success();
}
JITLinkContext::LookupMap JITLinkerBase::getExternalSymbolNames() const {
// Identify unresolved external symbols.
JITLinkContext::LookupMap UnresolvedExternals;
@@ -351,90 +227,13 @@ void JITLinkerBase::applyLookupResult(AsyncLookupResult Result) {
});
}
void JITLinkerBase::copyBlockContentToWorkingMemory(
const SegmentLayoutMap &Layout, JITLinkMemoryManager::Allocation &Alloc) {
LLVM_DEBUG(dbgs() << "Copying block content:\n");
for (auto &KV : Layout) {
auto &Prot = KV.first;
auto &SegLayout = KV.second;
auto SegMem =
Alloc.getWorkingMemory(static_cast<sys::Memory::ProtectionFlags>(Prot));
LLVM_DEBUG({
dbgs() << " Processing segment "
<< static_cast<sys::Memory::ProtectionFlags>(Prot) << " [ "
<< (const void *)SegMem.data() << " .. "
<< (const void *)((char *)SegMem.data() + SegMem.size())
<< " ]\n Processing content sections:\n";
});
if (SegLayout.ContentBlocks.empty()) {
LLVM_DEBUG(dbgs() << " No content blocks.\n");
continue;
}
size_t BlockOffset = 0;
size_t LastBlockEnd = 0;
for (auto *B : SegLayout.ContentBlocks) {
LLVM_DEBUG(dbgs() << " " << *B << ":\n");
// Pad to alignment/alignment-offset.
BlockOffset = alignToBlock(BlockOffset, *B);
LLVM_DEBUG({
dbgs() << " Bumped block offset to "
<< formatv("{0:x}", BlockOffset) << " to meet block alignment "
<< B->getAlignment() << " and alignment offset "
<< B->getAlignmentOffset() << "\n";
});
// Zero pad up to alignment.
LLVM_DEBUG({
if (LastBlockEnd != BlockOffset)
dbgs() << " Zero padding from " << formatv("{0:x}", LastBlockEnd)
<< " to " << formatv("{0:x}", BlockOffset) << "\n";
});
for (; LastBlockEnd != BlockOffset; ++LastBlockEnd)
*(SegMem.data() + LastBlockEnd) = 0;
// Copy initial block content.
LLVM_DEBUG({
dbgs() << " Copying block " << *B << " content, "
<< B->getContent().size() << " bytes, from "
<< (const void *)B->getContent().data() << " to offset "
<< formatv("{0:x}", BlockOffset) << "\n";
});
memcpy(SegMem.data() + BlockOffset, B->getContent().data(),
B->getContent().size());
// Point the block's content to the fixed up buffer.
B->setMutableContent(
{SegMem.data() + BlockOffset, B->getContent().size()});
// Update block end pointer.
LastBlockEnd = BlockOffset + B->getContent().size();
BlockOffset = LastBlockEnd;
}
// Zero pad the rest of the segment.
LLVM_DEBUG({
dbgs() << " Zero padding end of segment from offset "
<< formatv("{0:x}", LastBlockEnd) << " to "
<< formatv("{0:x}", SegMem.size()) << "\n";
});
for (; LastBlockEnd != SegMem.size(); ++LastBlockEnd)
*(SegMem.data() + LastBlockEnd) = 0;
}
}
void JITLinkerBase::deallocateAndBailOut(Error Err) {
void JITLinkerBase::abandonAllocAndBailOut(std::unique_ptr<JITLinkerBase> Self,
Error Err) {
assert(Err && "Should not be bailing out on success value");
assert(Alloc && "can not call deallocateAndBailOut before allocation");
Ctx->notifyFailed(joinErrors(std::move(Err), Alloc->deallocate()));
assert(Alloc && "can not call abandonAllocAndBailOut before allocation");
Alloc->abandon([S = std::move(Self), E1 = std::move(Err)](Error E2) mutable {
S->Ctx->notifyFailed(joinErrors(std::move(E1), std::move(E2)));
});
}
void prune(LinkGraph &G) {

View File

@@ -42,14 +42,9 @@ public:
virtual ~JITLinkerBase();
protected:
struct SegmentLayout {
using BlocksList = std::vector<Block *>;
BlocksList ContentBlocks;
BlocksList ZeroFillBlocks;
};
using SegmentLayoutMap = DenseMap<unsigned, SegmentLayout>;
using InFlightAlloc = JITLinkMemoryManager::InFlightAlloc;
using AllocResult = Expected<std::unique_ptr<InFlightAlloc>>;
using FinalizeResult = Expected<JITLinkMemoryManager::FinalizedAlloc>;
// Returns the PassConfiguration for this instance. This can be used by
// JITLinkerBase implementations to add late passes that reference their
@@ -61,39 +56,27 @@ protected:
// 1.1: Run pre-prune passes
// 1.2: Prune graph
// 1.3: Run post-prune passes
// 1.4: Sort blocks into segments
// 1.5: Allocate segment memory, update node vmaddrs to target vmaddrs
// 1.6: Run post-allocation passes
// 1.7: Notify context of final assigned symbol addresses
// 1.8: Identify external symbols and make an async call to resolve
// 1.4: Allocate memory.
void linkPhase1(std::unique_ptr<JITLinkerBase> Self);
// Phase 2:
// 2.1: Apply resolution results
// 2.2: Run pre-fixup passes
// 2.3: Fix up block contents
// 2.4: Run post-fixup passes
// 2.5: Make an async call to transfer and finalize memory.
void linkPhase2(std::unique_ptr<JITLinkerBase> Self,
Expected<AsyncLookupResult> LookupResult,
SegmentLayoutMap Layout);
// 2.2: Run post-allocation passes
// 2.3: Notify context of final assigned symbol addresses
// 2.4: Identify external symbols and make an async call to resolve
void linkPhase2(std::unique_ptr<JITLinkerBase> Self, AllocResult AR);
// Phase 3:
// 3.1: Call OnFinalized callback, handing off allocation.
void linkPhase3(std::unique_ptr<JITLinkerBase> Self, Error Err);
// 3.1: Apply resolution results
// 3.2: Run pre-fixup passes
// 3.3: Fix up block contents
// 3.4: Run post-fixup passes
// 3.5: Make an async call to transfer and finalize memory.
void linkPhase3(std::unique_ptr<JITLinkerBase> Self,
Expected<AsyncLookupResult> LookupResult);
// Align a JITTargetAddress to conform with block alignment requirements.
static JITTargetAddress alignToBlock(JITTargetAddress Addr, Block &B) {
uint64_t Delta = (B.getAlignmentOffset() - Addr) % B.getAlignment();
return Addr + Delta;
}
// Align a pointer to conform with block alignment requirements.
static char *alignToBlock(char *P, Block &B) {
uint64_t PAddr = static_cast<uint64_t>(reinterpret_cast<uintptr_t>(P));
uint64_t Delta = (B.getAlignmentOffset() - PAddr) % B.getAlignment();
return P + Delta;
}
// Phase 4:
// 4.1: Call OnFinalized callback, handing off allocation.
void linkPhase4(std::unique_ptr<JITLinkerBase> Self, FinalizeResult FR);
private:
// Run all passes in the given pass list, bailing out immediately if any pass
@@ -104,18 +87,14 @@ private:
// Implemented in JITLinker.
virtual Error fixUpBlocks(LinkGraph &G) const = 0;
SegmentLayoutMap layOutBlocks();
Error allocateSegments(const SegmentLayoutMap &Layout);
JITLinkContext::LookupMap getExternalSymbolNames() const;
void applyLookupResult(AsyncLookupResult LR);
void copyBlockContentToWorkingMemory(const SegmentLayoutMap &Layout,
JITLinkMemoryManager::Allocation &Alloc);
void deallocateAndBailOut(Error Err);
void abandonAllocAndBailOut(std::unique_ptr<JITLinkerBase> Self, Error Err);
std::unique_ptr<JITLinkContext> Ctx;
std::unique_ptr<LinkGraph> G;
PassConfiguration Passes;
std::unique_ptr<JITLinkMemoryManager::Allocation> Alloc;
std::unique_ptr<InFlightAlloc> Alloc;
};
template <typename LinkerImpl> class JITLinker : public JITLinkerBase {
@@ -152,6 +131,8 @@ private:
// Copy Block data and apply fixups.
LLVM_DEBUG(dbgs() << " Applying fixups.\n");
assert((!B->isZeroFill() || B->edges_size() == 0) &&
"Edges in zero-fill block?");
for (auto &E : B->edges()) {
// Skip non-relocation edges.

View File

@@ -7,128 +7,489 @@
//===----------------------------------------------------------------------===//
#include "llvm/ExecutionEngine/JITLink/JITLinkMemoryManager.h"
#include "llvm/ExecutionEngine/JITLink/JITLink.h"
#include "llvm/Support/FormatVariadic.h"
#include "llvm/Support/Process.h"
#define DEBUG_TYPE "jitlink"
namespace llvm {
namespace jitlink {
JITLinkMemoryManager::~JITLinkMemoryManager() = default;
JITLinkMemoryManager::Allocation::~Allocation() = default;
JITLinkMemoryManager::InFlightAlloc::~InFlightAlloc() = default;
Expected<std::unique_ptr<JITLinkMemoryManager::Allocation>>
InProcessMemoryManager::allocate(const JITLinkDylib *JD,
const SegmentsRequestMap &Request) {
static Error runAllocAction(JITLinkMemoryManager::AllocActionCall &C) {
using DeallocFnTy = char *(*)(const void *, size_t);
auto *Fn = jitTargetAddressToPointer<DeallocFnTy>(C.FnAddr);
using AllocationMap = DenseMap<unsigned, sys::MemoryBlock>;
if (char *ErrMsg = Fn(jitTargetAddressToPointer<const void *>(C.CtxAddr),
static_cast<size_t>(C.CtxSize))) {
auto E = make_error<StringError>(ErrMsg, inconvertibleErrorCode());
free(ErrMsg);
return E;
}
// Local class for allocation.
class IPMMAlloc : public Allocation {
public:
IPMMAlloc(AllocationMap SegBlocks) : SegBlocks(std::move(SegBlocks)) {}
MutableArrayRef<char> getWorkingMemory(ProtectionFlags Seg) override {
assert(SegBlocks.count(Seg) && "No allocation for segment");
return {static_cast<char *>(SegBlocks[Seg].base()),
SegBlocks[Seg].allocatedSize()};
}
JITTargetAddress getTargetMemory(ProtectionFlags Seg) override {
assert(SegBlocks.count(Seg) && "No allocation for segment");
return pointerToJITTargetAddress(SegBlocks[Seg].base());
}
void finalizeAsync(FinalizeContinuation OnFinalize) override {
OnFinalize(applyProtections());
}
Error deallocate() override {
if (SegBlocks.empty())
return Error::success();
void *SlabStart = SegBlocks.begin()->second.base();
char *SlabEnd = (char *)SlabStart;
for (auto &KV : SegBlocks) {
SlabStart = std::min(SlabStart, KV.second.base());
SlabEnd = std::max(SlabEnd, (char *)(KV.second.base()) +
KV.second.allocatedSize());
}
size_t SlabSize = SlabEnd - (char *)SlabStart;
assert((SlabSize % sys::Process::getPageSizeEstimate()) == 0 &&
"Slab size is not a multiple of page size");
sys::MemoryBlock Slab(SlabStart, SlabSize);
if (auto EC = sys::Memory::releaseMappedMemory(Slab))
return errorCodeToError(EC);
return Error::success();
}
return Error::success();
}
private:
Error applyProtections() {
for (auto &KV : SegBlocks) {
auto &Prot = KV.first;
auto &Block = KV.second;
if (auto EC = sys::Memory::protectMappedMemory(Block, Prot))
return errorCodeToError(EC);
if (Prot & sys::Memory::MF_EXEC)
sys::Memory::InvalidateInstructionCache(Block.base(),
Block.allocatedSize());
}
return Error::success();
}
// Align a JITTargetAddress to conform with block alignment requirements.
static JITTargetAddress alignToBlock(JITTargetAddress Addr, Block &B) {
uint64_t Delta = (B.getAlignmentOffset() - Addr) % B.getAlignment();
return Addr + Delta;
}
AllocationMap SegBlocks;
BasicLayout::BasicLayout(LinkGraph &G) : G(G) {
for (auto &Sec : G.sections()) {
// Skip empty sections.
if (empty(Sec.blocks()))
continue;
auto &Seg = Segments[{Sec.getMemProt(), Sec.getMemDeallocPolicy()}];
for (auto *B : Sec.blocks())
if (LLVM_LIKELY(!B->isZeroFill()))
Seg.ContentBlocks.push_back(B);
else
Seg.ZeroFillBlocks.push_back(B);
}
// Build Segments map.
auto CompareBlocks = [](const Block *LHS, const Block *RHS) {
// Sort by section, address and size
if (LHS->getSection().getOrdinal() != RHS->getSection().getOrdinal())
return LHS->getSection().getOrdinal() < RHS->getSection().getOrdinal();
if (LHS->getAddress() != RHS->getAddress())
return LHS->getAddress() < RHS->getAddress();
return LHS->getSize() < RHS->getSize();
};
if (!isPowerOf2_64((uint64_t)sys::Process::getPageSizeEstimate()))
return make_error<StringError>("Page size is not a power of 2",
inconvertibleErrorCode());
LLVM_DEBUG(dbgs() << "Generated BasicLayout for " << G.getName() << ":\n");
for (auto &KV : Segments) {
auto &Seg = KV.second;
AllocationMap Blocks;
const sys::Memory::ProtectionFlags ReadWrite =
static_cast<sys::Memory::ProtectionFlags>(sys::Memory::MF_READ |
sys::Memory::MF_WRITE);
llvm::sort(Seg.ContentBlocks, CompareBlocks);
llvm::sort(Seg.ZeroFillBlocks, CompareBlocks);
// Compute the total number of pages to allocate.
size_t TotalSize = 0;
for (auto &KV : Request) {
const auto &Seg = KV.second;
for (auto *B : Seg.ContentBlocks) {
Seg.ContentSize = alignToBlock(Seg.ContentSize, *B);
Seg.ContentSize += B->getSize();
Seg.Alignment = std::max(Seg.Alignment, Align(B->getAlignment()));
}
if (Seg.getAlignment() > sys::Process::getPageSizeEstimate())
return make_error<StringError>("Cannot request higher than page "
"alignment",
uint64_t SegEndOffset = Seg.ContentSize;
for (auto *B : Seg.ZeroFillBlocks) {
SegEndOffset = alignToBlock(SegEndOffset, *B);
SegEndOffset += B->getSize();
Seg.Alignment = std::max(Seg.Alignment, Align(B->getAlignment()));
}
Seg.ZeroFillSize = SegEndOffset - Seg.ContentSize;
LLVM_DEBUG({
dbgs() << " Seg " << KV.first
<< ": content-size=" << formatv("{0:x}", Seg.ContentSize)
<< ", zero-fill-size=" << formatv("{0:x}", Seg.ZeroFillSize)
<< ", align=" << formatv("{0:x}", Seg.Alignment.value()) << "\n";
});
}
}
Expected<BasicLayout::ContiguousPageBasedLayoutSizes>
BasicLayout::getContiguousPageBasedLayoutSizes(uint64_t PageSize) {
ContiguousPageBasedLayoutSizes SegsSizes;
for (auto &KV : segments()) {
auto &AG = KV.first;
auto &Seg = KV.second;
if (Seg.Alignment > PageSize)
return make_error<StringError>("Segment alignment greater than page size",
inconvertibleErrorCode());
TotalSize = alignTo(TotalSize, sys::Process::getPageSizeEstimate());
TotalSize += Seg.getContentSize();
TotalSize += Seg.getZeroFillSize();
uint64_t SegSize = alignTo(Seg.ContentSize + Seg.ZeroFillSize, PageSize);
if (AG.getMemDeallocPolicy() == MemDeallocPolicy::Standard)
SegsSizes.StandardSegs += SegSize;
else
SegsSizes.FinalizeSegs += SegSize;
}
// Allocate one slab to cover all the segments.
std::error_code EC;
auto SlabRemaining =
sys::Memory::allocateMappedMemory(TotalSize, nullptr, ReadWrite, EC);
return SegsSizes;
}
if (EC)
return errorCodeToError(EC);
Error BasicLayout::apply() {
for (auto &KV : Segments) {
auto &Seg = KV.second;
// Allocate segment memory from the slab.
for (auto &KV : Request) {
assert(!(Seg.ContentBlocks.empty() && Seg.ZeroFillBlocks.empty()) &&
"Empty section recorded?");
const auto &Seg = KV.second;
for (auto *B : Seg.ContentBlocks) {
// Align addr and working-mem-offset.
Seg.Addr = alignToBlock(Seg.Addr, *B);
Seg.NextWorkingMemOffset = alignToBlock(Seg.NextWorkingMemOffset, *B);
uint64_t SegmentSize = alignTo(Seg.getContentSize() + Seg.getZeroFillSize(),
sys::Process::getPageSizeEstimate());
assert(SlabRemaining.allocatedSize() >= SegmentSize &&
"Mapping exceeds allocation");
// Update block addr.
B->setAddress(Seg.Addr);
Seg.Addr += B->getSize();
sys::MemoryBlock SegMem(SlabRemaining.base(), SegmentSize);
SlabRemaining = sys::MemoryBlock((char *)SlabRemaining.base() + SegmentSize,
SlabRemaining.allocatedSize() - SegmentSize);
// Copy content to working memory, then update content to point at working
// memory.
memcpy(Seg.WorkingMem + Seg.NextWorkingMemOffset, B->getContent().data(),
B->getSize());
B->setMutableContent(
{Seg.WorkingMem + Seg.NextWorkingMemOffset, B->getSize()});
Seg.NextWorkingMemOffset += B->getSize();
}
// Zero out the zero-fill memory.
memset(static_cast<char *>(SegMem.base()) + Seg.getContentSize(), 0,
Seg.getZeroFillSize());
for (auto *B : Seg.ZeroFillBlocks) {
// Align addr.
Seg.Addr = alignToBlock(Seg.Addr, *B);
// Update block addr.
B->setAddress(Seg.Addr);
Seg.Addr += B->getSize();
}
// Record the block for this segment.
Blocks[KV.first] = std::move(SegMem);
Seg.ContentBlocks.clear();
Seg.ZeroFillBlocks.clear();
}
return std::unique_ptr<InProcessMemoryManager::Allocation>(
new IPMMAlloc(std::move(Blocks)));
return Error::success();
}
JITLinkMemoryManager::AllocActions &BasicLayout::graphAllocActions() {
return G.allocActions();
}
void SimpleSegmentAlloc::Create(JITLinkMemoryManager &MemMgr,
const JITLinkDylib *JD, SegmentMap Segments,
OnCreatedFunction OnCreated) {
static_assert(AllocGroup::NumGroups == 16,
"AllocGroup has changed. Section names below must be updated");
StringRef AGSectionNames[] = {
"__---.standard", "__R--.standard", "__-W-.standard", "__RW-.standard",
"__--X.standard", "__R-X.standard", "__-WX.standard", "__RWX.standard",
"__---.finalize", "__R--.finalize", "__-W-.finalize", "__RW-.finalize",
"__--X.finalize", "__R-X.finalize", "__-WX.finalize", "__RWX.finalize"};
auto G =
std::make_unique<LinkGraph>("", Triple(), 0, support::native, nullptr);
AllocGroupSmallMap<Block *> ContentBlocks;
JITTargetAddress NextAddr = 0x100000;
for (auto &KV : Segments) {
auto &AG = KV.first;
auto &Seg = KV.second;
auto AGSectionName =
AGSectionNames[static_cast<unsigned>(AG.getMemProt()) |
static_cast<bool>(AG.getMemDeallocPolicy()) << 3];
auto &Sec = G->createSection(AGSectionName, AG.getMemProt());
Sec.setMemDeallocPolicy(AG.getMemDeallocPolicy());
if (Seg.ContentSize != 0) {
NextAddr = alignTo(NextAddr, Seg.ContentAlign);
auto &B =
G->createMutableContentBlock(Sec, G->allocateBuffer(Seg.ContentSize),
NextAddr, Seg.ContentAlign.value(), 0);
ContentBlocks[AG] = &B;
NextAddr += Seg.ContentSize;
}
}
// GRef declared separately since order-of-argument-eval isn't specified.
auto &GRef = *G;
MemMgr.allocate(JD, GRef,
[G = std::move(G), ContentBlocks = std::move(ContentBlocks),
OnCreated = std::move(OnCreated)](
JITLinkMemoryManager::AllocResult Alloc) mutable {
if (!Alloc)
OnCreated(Alloc.takeError());
else
OnCreated(SimpleSegmentAlloc(std::move(G),
std::move(ContentBlocks),
std::move(*Alloc)));
});
}
Expected<SimpleSegmentAlloc>
SimpleSegmentAlloc::Create(JITLinkMemoryManager &MemMgr, const JITLinkDylib *JD,
SegmentMap Segments) {
std::promise<MSVCPExpected<SimpleSegmentAlloc>> AllocP;
auto AllocF = AllocP.get_future();
Create(MemMgr, JD, std::move(Segments),
[&](Expected<SimpleSegmentAlloc> Result) {
AllocP.set_value(std::move(Result));
});
return AllocF.get();
}
SimpleSegmentAlloc::SimpleSegmentAlloc(SimpleSegmentAlloc &&) = default;
SimpleSegmentAlloc &
SimpleSegmentAlloc::operator=(SimpleSegmentAlloc &&) = default;
SimpleSegmentAlloc::~SimpleSegmentAlloc() {}
SimpleSegmentAlloc::SegmentInfo SimpleSegmentAlloc::getSegInfo(AllocGroup AG) {
auto I = ContentBlocks.find(AG);
if (I != ContentBlocks.end()) {
auto &B = *I->second;
return {B.getAddress(), B.getAlreadyMutableContent()};
}
return {};
}
SimpleSegmentAlloc::SimpleSegmentAlloc(
std::unique_ptr<LinkGraph> G, AllocGroupSmallMap<Block *> ContentBlocks,
std::unique_ptr<JITLinkMemoryManager::InFlightAlloc> Alloc)
: G(std::move(G)), ContentBlocks(std::move(ContentBlocks)),
Alloc(std::move(Alloc)) {}
class InProcessMemoryManager::IPInFlightAlloc
: public JITLinkMemoryManager::InFlightAlloc {
public:
IPInFlightAlloc(InProcessMemoryManager &MemMgr, LinkGraph &G, BasicLayout BL,
sys::MemoryBlock StandardSegments,
sys::MemoryBlock FinalizationSegments)
: MemMgr(MemMgr), G(G), BL(std::move(BL)),
StandardSegments(std::move(StandardSegments)),
FinalizationSegments(std::move(FinalizationSegments)) {}
void finalize(OnFinalizedFunction OnFinalized) override {
// Apply memory protections to all segments.
if (auto Err = applyProtections()) {
OnFinalized(std::move(Err));
return;
}
// Run finalization actions.
// FIXME: Roll back previous successful actions on failure.
std::vector<AllocActionCall> DeallocActions;
DeallocActions.reserve(G.allocActions().size());
for (auto &ActPair : G.allocActions()) {
if (ActPair.Finalize.FnAddr)
if (auto Err = runAllocAction(ActPair.Finalize)) {
OnFinalized(std::move(Err));
return;
}
if (ActPair.Dealloc.FnAddr)
DeallocActions.push_back(ActPair.Dealloc);
}
G.allocActions().clear();
// Release the finalize segments slab.
if (auto EC = sys::Memory::releaseMappedMemory(FinalizationSegments)) {
OnFinalized(errorCodeToError(EC));
return;
}
// Continue with finalized allocation.
OnFinalized(MemMgr.createFinalizedAlloc(std::move(StandardSegments),
std::move(DeallocActions)));
}
void abandon(OnAbandonedFunction OnAbandoned) override {
Error Err = Error::success();
if (auto EC = sys::Memory::releaseMappedMemory(FinalizationSegments))
Err = joinErrors(std::move(Err), errorCodeToError(EC));
if (auto EC = sys::Memory::releaseMappedMemory(StandardSegments))
Err = joinErrors(std::move(Err), errorCodeToError(EC));
OnAbandoned(std::move(Err));
}
private:
Error applyProtections() {
for (auto &KV : BL.segments()) {
const auto &AG = KV.first;
auto &Seg = KV.second;
auto Prot = toSysMemoryProtectionFlags(AG.getMemProt());
uint64_t SegSize =
alignTo(Seg.ContentSize + Seg.ZeroFillSize, MemMgr.PageSize);
sys::MemoryBlock MB(Seg.WorkingMem, SegSize);
if (auto EC = sys::Memory::protectMappedMemory(MB, Prot))
return errorCodeToError(EC);
if (Prot & sys::Memory::MF_EXEC)
sys::Memory::InvalidateInstructionCache(MB.base(), MB.allocatedSize());
}
return Error::success();
}
InProcessMemoryManager &MemMgr;
LinkGraph &G;
BasicLayout BL;
sys::MemoryBlock StandardSegments;
sys::MemoryBlock FinalizationSegments;
};
Expected<std::unique_ptr<InProcessMemoryManager>>
InProcessMemoryManager::Create() {
if (auto PageSize = sys::Process::getPageSize())
return std::make_unique<InProcessMemoryManager>(*PageSize);
else
return PageSize.takeError();
}
void InProcessMemoryManager::allocate(const JITLinkDylib *JD, LinkGraph &G,
OnAllocatedFunction OnAllocated) {
// FIXME: Just check this once on startup.
if (!isPowerOf2_64((uint64_t)PageSize)) {
OnAllocated(make_error<StringError>("Page size is not a power of 2",
inconvertibleErrorCode()));
return;
}
BasicLayout BL(G);
/// Scan the request and calculate the group and total sizes.
/// Check that segment size is no larger than a page.
auto SegsSizes = BL.getContiguousPageBasedLayoutSizes(PageSize);
if (!SegsSizes) {
OnAllocated(SegsSizes.takeError());
return;
}
/// Check that the total size requested (including zero fill) is not larger
/// than a size_t.
if (SegsSizes->total() > std::numeric_limits<size_t>::max()) {
OnAllocated(make_error<JITLinkError>(
"Total requested size " + formatv("{0:x}", SegSizes->total()) +
" for graph " + G.getName() + " exceeds address space"));
return;
}
// Allocate one slab for the whole thing (to make sure everything is
// in-range), then partition into standard and finalization blocks.
//
// FIXME: Make two separate allocations in the future to reduce
// fragmentation: finalization segments will usually be a single page, and
// standard segments are likely to be more than one page. Where multiple
// allocations are in-flight at once (likely) the current approach will leave
// a lot of single-page holes.
sys::MemoryBlock Slab;
sys::MemoryBlock StandardSegsMem;
sys::MemoryBlock FinalizeSegsMem;
{
const sys::Memory::ProtectionFlags ReadWrite =
static_cast<sys::Memory::ProtectionFlags>(sys::Memory::MF_READ |
sys::Memory::MF_WRITE);
std::error_code EC;
Slab = sys::Memory::allocateMappedMemory(SegsSizes->total(), nullptr,
ReadWrite, EC);
if (EC) {
OnAllocated(errorCodeToError(EC));
return;
}
// Zero-fill the whole slab up-front.
memset(Slab.base(), 0, Slab.allocatedSize());
StandardSegsMem = {Slab.base(),
static_cast<size_t>(SegsSizes->StandardSegs)};
FinalizeSegsMem = {(void *)((char *)Slab.base() + SegsSizes->StandardSegs),
static_cast<size_t>(SegsSizes->FinalizeSegs)};
}
auto NextStandardSegAddr = pointerToJITTargetAddress(StandardSegsMem.base());
auto NextFinalizeSegAddr = pointerToJITTargetAddress(FinalizeSegsMem.base());
LLVM_DEBUG({
dbgs() << "InProcessMemoryManager allocated:\n";
if (SegsSizes->StandardSegs)
dbgs() << formatv(" [ {0:x16} -- {1:x16} ]", NextStandardSegAddr,
NextStandardSegAddr + StandardSegsMem.allocatedSize())
<< " to stardard segs\n";
else
dbgs() << " no standard segs\n";
if (SegsSizes->FinalizeSegs)
dbgs() << formatv(" [ {0:x16} -- {1:x16} ]", NextFinalizeSegAddr,
NextFinalizeSegAddr + FinalizeSegsMem.allocatedSize())
<< " to finalize segs\n";
else
dbgs() << " no finalize segs\n";
});
// Build ProtMap, assign addresses.
for (auto &KV : BL.segments()) {
auto &AG = KV.first;
auto &Seg = KV.second;
auto &SegAddr = (AG.getMemDeallocPolicy() == MemDeallocPolicy::Standard)
? NextStandardSegAddr
: NextFinalizeSegAddr;
Seg.WorkingMem = jitTargetAddressToPointer<char *>(SegAddr);
Seg.Addr = SegAddr;
SegAddr += alignTo(Seg.ContentSize + Seg.ZeroFillSize, PageSize);
}
if (auto Err = BL.apply()) {
OnAllocated(std::move(Err));
return;
}
OnAllocated(std::make_unique<IPInFlightAlloc>(*this, G, std::move(BL),
std::move(StandardSegsMem),
std::move(FinalizeSegsMem)));
}
void InProcessMemoryManager::deallocate(std::vector<FinalizedAlloc> Allocs,
OnDeallocatedFunction OnDeallocated) {
std::vector<sys::MemoryBlock> StandardSegmentsList;
std::vector<std::vector<AllocActionCall>> DeallocActionsList;
{
std::lock_guard<std::mutex> Lock(FinalizedAllocsMutex);
for (auto &Alloc : Allocs) {
auto *FA =
jitTargetAddressToPointer<FinalizedAllocInfo *>(Alloc.release());
StandardSegmentsList.push_back(std::move(FA->StandardSegments));
if (!FA->DeallocActions.empty())
DeallocActionsList.push_back(std::move(FA->DeallocActions));
FA->~FinalizedAllocInfo();
FinalizedAllocInfos.Deallocate(FA);
}
}
Error DeallocErr = Error::success();
while (!DeallocActionsList.empty()) {
auto &DeallocActions = DeallocActionsList.back();
auto &StandardSegments = StandardSegmentsList.back();
/// Run any deallocate calls.
while (!DeallocActions.empty()) {
if (auto Err = runAllocAction(DeallocActions.back()))
DeallocErr = joinErrors(std::move(DeallocErr), std::move(Err));
DeallocActions.pop_back();
}
/// Release the standard segments slab.
if (auto EC = sys::Memory::releaseMappedMemory(StandardSegments))
DeallocErr = joinErrors(std::move(DeallocErr), errorCodeToError(EC));
DeallocActionsList.pop_back();
StandardSegmentsList.pop_back();
}
OnDeallocated(std::move(DeallocErr));
}
JITLinkMemoryManager::FinalizedAlloc
InProcessMemoryManager::createFinalizedAlloc(
sys::MemoryBlock StandardSegments,
std::vector<AllocActionCall> DeallocActions) {
std::lock_guard<std::mutex> Lock(FinalizedAllocsMutex);
auto *FA = FinalizedAllocInfos.Allocate<FinalizedAllocInfo>();
new (FA) FinalizedAllocInfo(
{std::move(StandardSegments), std::move(DeallocActions)});
return FinalizedAlloc(pointerToJITTargetAddress(FA));
}
} // end namespace jitlink

View File

@@ -107,11 +107,9 @@ MachOLinkGraphBuilder::getEndianness(const object::MachOObjectFile &Obj) {
}
Section &MachOLinkGraphBuilder::getCommonSection() {
if (!CommonSection) {
auto Prot = static_cast<sys::Memory::ProtectionFlags>(
sys::Memory::MF_READ | sys::Memory::MF_WRITE);
CommonSection = &G->createSection(CommonSectionName, Prot);
}
if (!CommonSection)
CommonSection =
&G->createSection(CommonSectionName, MemProt::Read | MemProt::Write);
return *CommonSection;
}
@@ -176,13 +174,11 @@ Error MachOLinkGraphBuilder::createNormalizedSections() {
// Get prot flags.
// FIXME: Make sure this test is correct (it's probably missing cases
// as-is).
sys::Memory::ProtectionFlags Prot;
MemProt Prot;
if (NSec.Flags & MachO::S_ATTR_PURE_INSTRUCTIONS)
Prot = static_cast<sys::Memory::ProtectionFlags>(sys::Memory::MF_READ |
sys::Memory::MF_EXEC);
Prot = MemProt::Read | MemProt::Exec;
else
Prot = static_cast<sys::Memory::ProtectionFlags>(sys::Memory::MF_READ |
sys::Memory::MF_WRITE);
Prot = MemProt::Read | MemProt::Write;
if (!isDebugSection(NSec)) {
auto FullyQualifiedName =

View File

@@ -457,16 +457,14 @@ public:
private:
Section &getGOTSection() {
if (!GOTSection)
GOTSection = &G.createSection("$__GOT", sys::Memory::MF_READ);
GOTSection = &G.createSection("$__GOT", MemProt::Read);
return *GOTSection;
}
Section &getStubsSection() {
if (!StubsSection) {
auto StubsProt = static_cast<sys::Memory::ProtectionFlags>(
sys::Memory::MF_READ | sys::Memory::MF_EXEC);
StubsSection = &G.createSection("$__STUBS", StubsProt);
}
if (!StubsSection)
StubsSection =
&G.createSection("$__STUBS", MemProt::Read | MemProt::Exec);
return *StubsSection;
}

View File

@@ -479,16 +479,14 @@ public:
private:
Section &getGOTSection() {
if (!GOTSection)
GOTSection = &G.createSection("$__GOT", sys::Memory::MF_READ);
GOTSection = &G.createSection("$__GOT", MemProt::Read);
return *GOTSection;
}
Section &getStubsSection() {
if (!StubsSection) {
auto StubsProt = static_cast<sys::Memory::ProtectionFlags>(
sys::Memory::MF_READ | sys::Memory::MF_EXEC);
StubsSection = &G.createSection("$__STUBS", StubsProt);
}
if (!StubsSection)
StubsSection =
&G.createSection("$__STUBS", MemProt::Read | MemProt::Exec);
return *StubsSection;
}

View File

@@ -0,0 +1,33 @@
//===------------- MemoryFlags.cpp - Memory allocation flags --------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
#include "llvm/ExecutionEngine/JITLink/MemoryFlags.h"
#define DEBUG_TYPE "jitlink"
namespace llvm {
namespace jitlink {
raw_ostream &operator<<(raw_ostream &OS, MemProt MP) {
return OS << (((MP & MemProt::Read) != MemProt::None) ? 'R' : '-')
<< (((MP & MemProt::Write) != MemProt::None) ? 'W' : '-')
<< (((MP & MemProt::Exec) != MemProt::None) ? 'X' : '-');
}
raw_ostream &operator<<(raw_ostream &OS, MemDeallocPolicy MDP) {
return OS << (MDP == MemDeallocPolicy::Standard ? "standard" : "finalize");
}
raw_ostream &operator<<(raw_ostream &OS, AllocGroup AG) {
return OS << '(' << AG.getMemProt() << ", " << AG.getMemDeallocPolicy()
<< ')';
}
} // end namespace jitlink
} // end namespace llvm

View File

@@ -1,10 +1,15 @@
//===---- DebugObjectManagerPlugin.h - JITLink debug objects ---*- C++ -*-===//
//===------- DebugObjectManagerPlugin.cpp - JITLink debug objects ---------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// FIXME: Update Plugin to poke the debug object into a new JITLink section,
// rather than creating a new allocation.
//
//===----------------------------------------------------------------------===//
#include "llvm/ExecutionEngine/Orc/DebugObjectManagerPlugin.h"
@@ -108,70 +113,77 @@ void ELFDebugObjectSection<ELFT>::dump(raw_ostream &OS, StringRef Name) {
}
}
static constexpr sys::Memory::ProtectionFlags ReadOnly =
static_cast<sys::Memory::ProtectionFlags>(sys::Memory::MF_READ);
enum class Requirement {
// Request final target memory load-addresses for all sections.
ReportFinalSectionLoadAddresses,
};
/// The plugin creates a debug object from JITLinkContext when JITLink starts
/// processing the corresponding LinkGraph. It provides access to the pass
/// configuration of the LinkGraph and calls the finalization function, once
/// the resulting link artifact was emitted.
/// The plugin creates a debug object from when JITLink starts processing the
/// corresponding LinkGraph. It provides access to the pass configuration of
/// the LinkGraph and calls the finalization function, once the resulting link
/// artifact was emitted.
///
class DebugObject {
public:
DebugObject(JITLinkContext &Ctx, ExecutionSession &ES) : Ctx(Ctx), ES(ES) {}
DebugObject(JITLinkMemoryManager &MemMgr, const JITLinkDylib *JD,
ExecutionSession &ES)
: MemMgr(MemMgr), JD(JD), ES(ES) {}
void set(Requirement Req) { Reqs.insert(Req); }
bool has(Requirement Req) const { return Reqs.count(Req) > 0; }
using FinalizeContinuation = std::function<void(Expected<sys::MemoryBlock>)>;
using FinalizeContinuation = std::function<void(Expected<ExecutorAddrRange>)>;
void finalizeAsync(FinalizeContinuation OnFinalize);
virtual ~DebugObject() {
if (Alloc)
if (Error Err = Alloc->deallocate())
if (Alloc) {
std::vector<FinalizedAlloc> Allocs;
Allocs.push_back(std::move(Alloc));
if (Error Err = MemMgr.deallocate(std::move(Allocs)))
ES.reportError(std::move(Err));
}
}
virtual void reportSectionTargetMemoryRange(StringRef Name,
SectionRange TargetMem) {}
protected:
using Allocation = JITLinkMemoryManager::Allocation;
using InFlightAlloc = JITLinkMemoryManager::InFlightAlloc;
using FinalizedAlloc = JITLinkMemoryManager::FinalizedAlloc;
virtual Expected<std::unique_ptr<Allocation>>
finalizeWorkingMemory(JITLinkContext &Ctx) = 0;
virtual Expected<SimpleSegmentAlloc> finalizeWorkingMemory() = 0;
JITLinkMemoryManager &MemMgr;
const JITLinkDylib *JD = nullptr;
private:
JITLinkContext &Ctx;
ExecutionSession &ES;
std::set<Requirement> Reqs;
std::unique_ptr<Allocation> Alloc{nullptr};
FinalizedAlloc Alloc;
};
// Finalize working memory and take ownership of the resulting allocation. Start
// copying memory over to the target and pass on the result once we're done.
// Ownership of the allocation remains with us for the rest of our lifetime.
void DebugObject::finalizeAsync(FinalizeContinuation OnFinalize) {
assert(Alloc == nullptr && "Cannot finalize more than once");
assert(!Alloc && "Cannot finalize more than once");
auto AllocOrErr = finalizeWorkingMemory(Ctx);
if (!AllocOrErr)
OnFinalize(AllocOrErr.takeError());
Alloc = std::move(*AllocOrErr);
Alloc->finalizeAsync([this, OnFinalize](Error Err) {
if (Err)
OnFinalize(std::move(Err));
else
OnFinalize(sys::MemoryBlock(
jitTargetAddressToPointer<void *>(Alloc->getTargetMemory(ReadOnly)),
Alloc->getWorkingMemory(ReadOnly).size()));
});
if (auto SimpleSegAlloc = finalizeWorkingMemory()) {
auto ROSeg = SimpleSegAlloc->getSegInfo(MemProt::Read);
ExecutorAddrRange DebugObjRange(ExecutorAddr(ROSeg.Addr),
ExecutorAddrDiff(ROSeg.WorkingMem.size()));
SimpleSegAlloc->finalize(
[this, DebugObjRange,
OnFinalize = std::move(OnFinalize)](Expected<FinalizedAlloc> FA) {
if (FA) {
Alloc = std::move(*FA);
OnFinalize(DebugObjRange);
} else
OnFinalize(FA.takeError());
});
} else
OnFinalize(SimpleSegAlloc.takeError());
}
/// The current implementation of ELFDebugObject replicates the approach used in
@@ -190,8 +202,7 @@ public:
StringRef getBuffer() const { return Buffer->getMemBufferRef().getBuffer(); }
protected:
Expected<std::unique_ptr<Allocation>>
finalizeWorkingMemory(JITLinkContext &Ctx) override;
Expected<SimpleSegmentAlloc> finalizeWorkingMemory() override;
template <typename ELFT>
Error recordSection(StringRef Name,
@@ -201,15 +212,16 @@ protected:
private:
template <typename ELFT>
static Expected<std::unique_ptr<ELFDebugObject>>
CreateArchType(MemoryBufferRef Buffer, JITLinkContext &Ctx,
ExecutionSession &ES);
CreateArchType(MemoryBufferRef Buffer, JITLinkMemoryManager &MemMgr,
const JITLinkDylib *JD, ExecutionSession &ES);
static std::unique_ptr<WritableMemoryBuffer>
CopyBuffer(MemoryBufferRef Buffer, Error &Err);
ELFDebugObject(std::unique_ptr<WritableMemoryBuffer> Buffer,
JITLinkContext &Ctx, ExecutionSession &ES)
: DebugObject(Ctx, ES), Buffer(std::move(Buffer)) {
JITLinkMemoryManager &MemMgr, const JITLinkDylib *JD,
ExecutionSession &ES)
: DebugObject(MemMgr, JD, ES), Buffer(std::move(Buffer)) {
set(Requirement::ReportFinalSectionLoadAddresses);
}
@@ -244,13 +256,14 @@ ELFDebugObject::CopyBuffer(MemoryBufferRef Buffer, Error &Err) {
template <typename ELFT>
Expected<std::unique_ptr<ELFDebugObject>>
ELFDebugObject::CreateArchType(MemoryBufferRef Buffer, JITLinkContext &Ctx,
ExecutionSession &ES) {
ELFDebugObject::CreateArchType(MemoryBufferRef Buffer,
JITLinkMemoryManager &MemMgr,
const JITLinkDylib *JD, ExecutionSession &ES) {
using SectionHeader = typename ELFT::Shdr;
Error Err = Error::success();
std::unique_ptr<ELFDebugObject> DebugObj(
new ELFDebugObject(CopyBuffer(Buffer, Err), Ctx, ES));
new ELFDebugObject(CopyBuffer(Buffer, Err), MemMgr, JD, ES));
if (Err)
return std::move(Err);
@@ -299,23 +312,26 @@ ELFDebugObject::Create(MemoryBufferRef Buffer, JITLinkContext &Ctx,
if (Class == ELF::ELFCLASS32) {
if (Endian == ELF::ELFDATA2LSB)
return CreateArchType<ELF32LE>(Buffer, Ctx, ES);
return CreateArchType<ELF32LE>(Buffer, Ctx.getMemoryManager(),
Ctx.getJITLinkDylib(), ES);
if (Endian == ELF::ELFDATA2MSB)
return CreateArchType<ELF32BE>(Buffer, Ctx, ES);
return CreateArchType<ELF32BE>(Buffer, Ctx.getMemoryManager(),
Ctx.getJITLinkDylib(), ES);
return nullptr;
}
if (Class == ELF::ELFCLASS64) {
if (Endian == ELF::ELFDATA2LSB)
return CreateArchType<ELF64LE>(Buffer, Ctx, ES);
return CreateArchType<ELF64LE>(Buffer, Ctx.getMemoryManager(),
Ctx.getJITLinkDylib(), ES);
if (Endian == ELF::ELFDATA2MSB)
return CreateArchType<ELF64BE>(Buffer, Ctx, ES);
return CreateArchType<ELF64BE>(Buffer, Ctx.getMemoryManager(),
Ctx.getJITLinkDylib(), ES);
return nullptr;
}
return nullptr;
}
Expected<std::unique_ptr<DebugObject::Allocation>>
ELFDebugObject::finalizeWorkingMemory(JITLinkContext &Ctx) {
Expected<SimpleSegmentAlloc> ELFDebugObject::finalizeWorkingMemory() {
LLVM_DEBUG({
dbgs() << "Section load-addresses in debug object for \""
<< Buffer->getBufferIdentifier() << "\":\n";
@@ -324,28 +340,21 @@ ELFDebugObject::finalizeWorkingMemory(JITLinkContext &Ctx) {
});
// TODO: This works, but what actual alignment requirements do we have?
unsigned Alignment = sys::Process::getPageSizeEstimate();
JITLinkMemoryManager &MemMgr = Ctx.getMemoryManager();
const JITLinkDylib *JD = Ctx.getJITLinkDylib();
unsigned PageSize = sys::Process::getPageSizeEstimate();
size_t Size = Buffer->getBufferSize();
// Allocate working memory for debug object in read-only segment.
JITLinkMemoryManager::SegmentsRequestMap SingleReadOnlySegment;
SingleReadOnlySegment[ReadOnly] =
JITLinkMemoryManager::SegmentRequest(Alignment, Size, 0);
auto AllocOrErr = MemMgr.allocate(JD, SingleReadOnlySegment);
if (!AllocOrErr)
return AllocOrErr.takeError();
auto Alloc = SimpleSegmentAlloc::Create(
MemMgr, JD, {{MemProt::Read, {Size, Align(PageSize)}}});
if (!Alloc)
return Alloc;
// Initialize working memory with a copy of our object buffer.
// TODO: Use our buffer as working memory directly.
std::unique_ptr<Allocation> Alloc = std::move(*AllocOrErr);
MutableArrayRef<char> WorkingMem = Alloc->getWorkingMemory(ReadOnly);
memcpy(WorkingMem.data(), Buffer->getBufferStart(), Size);
auto SegInfo = Alloc->getSegInfo(MemProt::Read);
memcpy(SegInfo.WorkingMem.data(), Buffer->getBufferStart(), Size);
Buffer.reset();
return std::move(Alloc);
return Alloc;
}
void ELFDebugObject::reportSectionTargetMemoryRange(StringRef Name,
@@ -447,7 +456,7 @@ Error DebugObjectManagerPlugin::notifyEmitted(
std::future<MSVCPError> FinalizeErr = FinalizePromise.get_future();
It->second->finalizeAsync(
[this, &FinalizePromise, &MR](Expected<sys::MemoryBlock> TargetMem) {
[this, &FinalizePromise, &MR](Expected<ExecutorAddrRange> TargetMem) {
// Any failure here will fail materialization.
if (!TargetMem) {
FinalizePromise.set_value(TargetMem.takeError());

View File

@@ -56,7 +56,7 @@ public:
"<DSOHandleMU>", TT, PointerSize, Endianness,
jitlink::getGenericEdgeKindName);
auto &DSOHandleSection =
G->createSection(".data.__dso_handle", sys::Memory::MF_READ);
G->createSection(".data.__dso_handle", jitlink::MemProt::Read);
auto &DSOHandleBlock = G->createContentBlock(
DSOHandleSection, getDSOHandleContent(PointerSize), 0, 8, 0);
auto &DSOHandleSymbol = G->addDefinedSymbol(

View File

@@ -43,10 +43,9 @@ createJITLoaderGDBRegistrar(ExecutionSession &ES) {
ES, ExecutorAddr((*Result)[0][0]));
}
Error EPCDebugObjectRegistrar::registerDebugObject(sys::MemoryBlock TargetMem) {
return ES.callSPSWrapper<void(SPSExecutorAddr, uint64_t)>(
RegisterFn, ExecutorAddr::fromPtr(TargetMem.base()),
static_cast<uint64_t>(TargetMem.allocatedSize()));
Error EPCDebugObjectRegistrar::registerDebugObject(
ExecutorAddrRange TargetMem) {
return ES.callSPSWrapper<void(SPSExecutorAddrRange)>(RegisterFn, TargetMem);
}
} // namespace orc

View File

@@ -7,132 +7,168 @@
//===----------------------------------------------------------------------===//
#include "llvm/ExecutionEngine/Orc/EPCGenericJITLinkMemoryManager.h"
#include "llvm/ExecutionEngine/JITLink/JITLink.h"
#include "llvm/ExecutionEngine/Orc/LookupAndRecordAddrs.h"
#include "llvm/ExecutionEngine/Orc/Shared/OrcRTBridge.h"
#include <limits>
using namespace llvm::jitlink;
namespace llvm {
namespace orc {
class EPCGenericJITLinkMemoryManager::Alloc
: public jitlink::JITLinkMemoryManager::Allocation {
class EPCGenericJITLinkMemoryManager::InFlightAlloc
: public jitlink::JITLinkMemoryManager::InFlightAlloc {
public:
struct SegInfo {
char *WorkingMem = nullptr;
ExecutorAddr TargetAddr;
ExecutorAddr Addr;
uint64_t ContentSize = 0;
uint64_t ZeroFillSize = 0;
};
using SegInfoMap = DenseMap<unsigned, SegInfo>;
using SegInfoMap = AllocGroupSmallMap<SegInfo>;
Alloc(EPCGenericJITLinkMemoryManager &Parent, ExecutorAddr TargetAddr,
std::unique_ptr<char[]> WorkingBuffer, SegInfoMap Segs)
: Parent(Parent), TargetAddr(TargetAddr),
WorkingBuffer(std::move(WorkingBuffer)), Segs(std::move(Segs)) {}
InFlightAlloc(EPCGenericJITLinkMemoryManager &Parent, LinkGraph &G,
ExecutorAddr AllocAddr, SegInfoMap Segs)
: Parent(Parent), G(G), AllocAddr(AllocAddr), Segs(std::move(Segs)) {}
MutableArrayRef<char> getWorkingMemory(ProtectionFlags Seg) override {
auto I = Segs.find(Seg);
assert(I != Segs.end() && "No allocation for seg");
assert(I->second.ContentSize <= std::numeric_limits<size_t>::max());
return {I->second.WorkingMem, static_cast<size_t>(I->second.ContentSize)};
}
JITTargetAddress getTargetMemory(ProtectionFlags Seg) override {
auto I = Segs.find(Seg);
assert(I != Segs.end() && "No allocation for seg");
return I->second.TargetAddr.getValue();
}
void finalizeAsync(FinalizeContinuation OnFinalize) override {
char *WorkingMem = WorkingBuffer.get();
void finalize(OnFinalizedFunction OnFinalize) override {
tpctypes::FinalizeRequest FR;
for (auto &KV : Segs) {
assert(KV.second.ContentSize <= std::numeric_limits<size_t>::max());
FR.Segments.push_back(tpctypes::SegFinalizeRequest{
tpctypes::toWireProtectionFlags(
static_cast<sys::Memory::ProtectionFlags>(KV.first)),
KV.second.TargetAddr,
toSysMemoryProtectionFlags(KV.first.getMemProt())),
KV.second.Addr,
alignTo(KV.second.ContentSize + KV.second.ZeroFillSize,
Parent.EPC.getPageSize()),
{WorkingMem, static_cast<size_t>(KV.second.ContentSize)}});
WorkingMem += KV.second.ContentSize;
{KV.second.WorkingMem, static_cast<size_t>(KV.second.ContentSize)}});
}
// Transfer allocation actions.
// FIXME: Merge JITLink and ORC SupportFunctionCall and Action list types,
// turn this into a std::swap.
FR.Actions.reserve(G.allocActions().size());
for (auto &ActPair : G.allocActions())
FR.Actions.push_back(
{{ExecutorAddr(ActPair.Finalize.FnAddr),
ExecutorAddr(ActPair.Finalize.CtxAddr), ActPair.Finalize.CtxSize},
{ExecutorAddr(ActPair.Dealloc.FnAddr),
ExecutorAddr(ActPair.Dealloc.CtxAddr), ActPair.Dealloc.CtxSize}});
G.allocActions().clear();
Parent.EPC.callSPSWrapperAsync<
rt::SPSSimpleExecutorMemoryManagerFinalizeSignature>(
Parent.SAs.Finalize,
[OnFinalize = std::move(OnFinalize)](Error SerializationErr,
Error FinalizeErr) {
[OnFinalize = std::move(OnFinalize), AllocAddr = this->AllocAddr](
Error SerializationErr, Error FinalizeErr) mutable {
// FIXME: Release abandoned alloc.
if (SerializationErr) {
cantFail(std::move(FinalizeErr));
OnFinalize(std::move(SerializationErr));
} else
} else if (FinalizeErr)
OnFinalize(std::move(FinalizeErr));
else
OnFinalize(FinalizedAlloc(AllocAddr.getValue()));
},
Parent.SAs.Allocator, std::move(FR));
}
Error deallocate() override {
Error Err = Error::success();
if (auto E2 = Parent.EPC.callSPSWrapper<
rt::SPSSimpleExecutorMemoryManagerDeallocateSignature>(
Parent.SAs.Deallocate, Err, Parent.SAs.Allocator,
ArrayRef<ExecutorAddr>(TargetAddr)))
return E2;
return Err;
void abandon(OnAbandonedFunction OnAbandoned) override {
// FIXME: Return memory to pool instead.
Parent.EPC.callSPSWrapperAsync<
rt::SPSSimpleExecutorMemoryManagerDeallocateSignature>(
Parent.SAs.Deallocate,
[OnAbandoned = std::move(OnAbandoned)](Error SerializationErr,
Error DeallocateErr) mutable {
if (SerializationErr) {
cantFail(std::move(DeallocateErr));
OnAbandoned(std::move(SerializationErr));
} else
OnAbandoned(std::move(DeallocateErr));
},
Parent.SAs.Allocator, ArrayRef<ExecutorAddr>(AllocAddr));
}
private:
EPCGenericJITLinkMemoryManager &Parent;
ExecutorAddr TargetAddr;
std::unique_ptr<char[]> WorkingBuffer;
LinkGraph &G;
ExecutorAddr AllocAddr;
SegInfoMap Segs;
};
Expected<std::unique_ptr<jitlink::JITLinkMemoryManager::Allocation>>
EPCGenericJITLinkMemoryManager::allocate(const jitlink::JITLinkDylib *JD,
const SegmentsRequestMap &Request) {
Alloc::SegInfoMap Segs;
uint64_t AllocSize = 0;
size_t WorkingSize = 0;
for (auto &KV : Request) {
if (!isPowerOf2_64(KV.second.getAlignment()))
return make_error<StringError>("Alignment is not a power of two",
inconvertibleErrorCode());
if (KV.second.getAlignment() > EPC.getPageSize())
return make_error<StringError>("Alignment exceeds page size",
inconvertibleErrorCode());
void EPCGenericJITLinkMemoryManager::allocate(const JITLinkDylib *JD,
LinkGraph &G,
OnAllocatedFunction OnAllocated) {
BasicLayout BL(G);
auto &Seg = Segs[KV.first];
Seg.ContentSize = KV.second.getContentSize();
Seg.ZeroFillSize = KV.second.getZeroFillSize();
AllocSize += alignTo(Seg.ContentSize + Seg.ZeroFillSize, EPC.getPageSize());
WorkingSize += Seg.ContentSize;
}
auto Pages = BL.getContiguousPageBasedLayoutSizes(EPC.getPageSize());
if (!Pages)
return OnAllocated(Pages.takeError());
std::unique_ptr<char[]> WorkingBuffer;
if (WorkingSize > 0)
WorkingBuffer = std::make_unique<char[]>(WorkingSize);
Expected<ExecutorAddr> TargetAllocAddr((ExecutorAddr()));
if (auto Err = EPC.callSPSWrapper<
rt::SPSSimpleExecutorMemoryManagerReserveSignature>(
SAs.Reserve, TargetAllocAddr, SAs.Allocator, AllocSize))
return std::move(Err);
if (!TargetAllocAddr)
return TargetAllocAddr.takeError();
EPC.callSPSWrapperAsync<rt::SPSSimpleExecutorMemoryManagerReserveSignature>(
SAs.Reserve,
[this, BL = std::move(BL), OnAllocated = std::move(OnAllocated)](
Error SerializationErr, Expected<ExecutorAddr> AllocAddr) mutable {
if (SerializationErr) {
cantFail(AllocAddr.takeError());
return OnAllocated(std::move(SerializationErr));
}
if (!AllocAddr)
return OnAllocated(AllocAddr.takeError());
char *WorkingMem = WorkingBuffer.get();
JITTargetAddress SegAddr = TargetAllocAddr->getValue();
for (auto &KV : Segs) {
completeAllocation(*AllocAddr, std::move(BL), std::move(OnAllocated));
},
SAs.Allocator, Pages->total());
}
void EPCGenericJITLinkMemoryManager::deallocate(
std::vector<FinalizedAlloc> Allocs, OnDeallocatedFunction OnDeallocated) {
EPC.callSPSWrapperAsync<
rt::SPSSimpleExecutorMemoryManagerDeallocateSignature>(
SAs.Deallocate,
[OnDeallocated = std::move(OnDeallocated)](Error SerErr,
Error DeallocErr) mutable {
if (SerErr) {
cantFail(std::move(DeallocErr));
OnDeallocated(std::move(SerErr));
} else
OnDeallocated(std::move(DeallocErr));
},
SAs.Allocator, Allocs);
for (auto &A : Allocs)
A.release();
}
void EPCGenericJITLinkMemoryManager::completeAllocation(
ExecutorAddr AllocAddr, BasicLayout BL, OnAllocatedFunction OnAllocated) {
InFlightAlloc::SegInfoMap SegInfos;
ExecutorAddr NextSegAddr = AllocAddr;
for (auto &KV : BL.segments()) {
const auto &AG = KV.first;
auto &Seg = KV.second;
Seg.TargetAddr.setValue(SegAddr);
SegAddr += alignTo(Seg.ContentSize + Seg.ZeroFillSize, EPC.getPageSize());
Seg.WorkingMem = WorkingMem;
WorkingMem += Seg.ContentSize;
Seg.Addr = NextSegAddr.getValue();
KV.second.WorkingMem = BL.getGraph().allocateBuffer(Seg.ContentSize).data();
NextSegAddr += ExecutorAddrDiff(
alignTo(Seg.ContentSize + Seg.ZeroFillSize, EPC.getPageSize()));
auto &SegInfo = SegInfos[AG];
SegInfo.ContentSize = Seg.ContentSize;
SegInfo.ZeroFillSize = Seg.ZeroFillSize;
SegInfo.Addr = ExecutorAddr(Seg.Addr);
SegInfo.WorkingMem = Seg.WorkingMem;
}
return std::make_unique<Alloc>(*this, *TargetAllocAddr,
std::move(WorkingBuffer), std::move(Segs));
if (auto Err = BL.apply())
return OnAllocated(std::move(Err));
OnAllocated(std::make_unique<InFlightAlloc>(*this, BL.getGraph(), AllocAddr,
std::move(SegInfos)));
}
} // end namespace orc

View File

@@ -43,12 +43,12 @@ public:
protected:
Error grow() override;
using Allocation = jitlink::JITLinkMemoryManager::Allocation;
using FinalizedAlloc = jitlink::JITLinkMemoryManager::FinalizedAlloc;
EPCIndirectionUtils &EPCIU;
unsigned TrampolineSize = 0;
unsigned TrampolinesPerPage = 0;
std::vector<std::unique_ptr<Allocation>> TrampolineBlocks;
std::vector<FinalizedAlloc> TrampolineBlocks;
};
class EPCIndirectStubsManager : public IndirectStubsManager,
@@ -89,12 +89,19 @@ EPCTrampolinePool::EPCTrampolinePool(EPCIndirectionUtils &EPCIU)
Error EPCTrampolinePool::deallocatePool() {
Error Err = Error::success();
for (auto &Alloc : TrampolineBlocks)
Err = joinErrors(std::move(Err), Alloc->deallocate());
return Err;
std::promise<MSVCPError> DeallocResultP;
auto DeallocResultF = DeallocResultP.get_future();
EPCIU.getExecutorProcessControl().getMemMgr().deallocate(
std::move(TrampolineBlocks),
[&](Error Err) { DeallocResultP.set_value(std::move(Err)); });
return DeallocResultF.get();
}
Error EPCTrampolinePool::grow() {
using namespace jitlink;
assert(AvailableTrampolines.empty() &&
"Grow called with trampolines still available");
@@ -102,34 +109,26 @@ Error EPCTrampolinePool::grow() {
assert(ResolverAddress && "Resolver address can not be null");
auto &EPC = EPCIU.getExecutorProcessControl();
constexpr auto TrampolinePagePermissions =
static_cast<sys::Memory::ProtectionFlags>(sys::Memory::MF_READ |
sys::Memory::MF_EXEC);
auto PageSize = EPC.getPageSize();
jitlink::JITLinkMemoryManager::SegmentsRequestMap Request;
Request[TrampolinePagePermissions] = {PageSize, static_cast<size_t>(PageSize),
0};
auto Alloc = EPC.getMemMgr().allocate(nullptr, Request);
auto Alloc = SimpleSegmentAlloc::Create(
EPC.getMemMgr(), nullptr,
{{MemProt::Read | MemProt::Exec, {PageSize, Align(PageSize)}}});
if (!Alloc)
return Alloc.takeError();
unsigned NumTrampolines = TrampolinesPerPage;
auto WorkingMemory = (*Alloc)->getWorkingMemory(TrampolinePagePermissions);
auto TargetAddress = (*Alloc)->getTargetMemory(TrampolinePagePermissions);
EPCIU.getABISupport().writeTrampolines(WorkingMemory.data(), TargetAddress,
ResolverAddress, NumTrampolines);
auto TargetAddr = (*Alloc)->getTargetMemory(TrampolinePagePermissions);
auto SegInfo = Alloc->getSegInfo(MemProt::Read | MemProt::Exec);
EPCIU.getABISupport().writeTrampolines(
SegInfo.WorkingMem.data(), SegInfo.Addr, ResolverAddress, NumTrampolines);
for (unsigned I = 0; I < NumTrampolines; ++I)
AvailableTrampolines.push_back(TargetAddr + (I * TrampolineSize));
AvailableTrampolines.push_back(SegInfo.Addr + (I * TrampolineSize));
if (auto Err = (*Alloc)->finalize())
return Err;
auto FA = Alloc->finalize();
if (!FA)
return FA.takeError();
TrampolineBlocks.push_back(std::move(*Alloc));
TrampolineBlocks.push_back(std::move(*FA));
return Error::success();
}
@@ -267,17 +266,17 @@ EPCIndirectionUtils::Create(ExecutorProcessControl &EPC) {
}
Error EPCIndirectionUtils::cleanup() {
Error Err = Error::success();
for (auto &A : IndirectStubAllocs)
Err = joinErrors(std::move(Err), A->deallocate());
auto &MemMgr = EPC.getMemMgr();
auto Err = MemMgr.deallocate(std::move(IndirectStubAllocs));
if (TP)
Err = joinErrors(std::move(Err),
static_cast<EPCTrampolinePool &>(*TP).deallocatePool());
if (ResolverBlock)
Err = joinErrors(std::move(Err), ResolverBlock->deallocate());
Err =
joinErrors(std::move(Err), MemMgr.deallocate(std::move(ResolverBlock)));
return Err;
}
@@ -285,29 +284,29 @@ Error EPCIndirectionUtils::cleanup() {
Expected<JITTargetAddress>
EPCIndirectionUtils::writeResolverBlock(JITTargetAddress ReentryFnAddr,
JITTargetAddress ReentryCtxAddr) {
using namespace jitlink;
assert(ABI && "ABI can not be null");
constexpr auto ResolverBlockPermissions =
static_cast<sys::Memory::ProtectionFlags>(sys::Memory::MF_READ |
sys::Memory::MF_EXEC);
auto ResolverSize = ABI->getResolverCodeSize();
jitlink::JITLinkMemoryManager::SegmentsRequestMap Request;
Request[ResolverBlockPermissions] = {EPC.getPageSize(),
static_cast<size_t>(ResolverSize), 0};
auto Alloc = EPC.getMemMgr().allocate(nullptr, Request);
auto Alloc =
SimpleSegmentAlloc::Create(EPC.getMemMgr(), nullptr,
{{MemProt::Read | MemProt::Exec,
{ResolverSize, Align(EPC.getPageSize())}}});
if (!Alloc)
return Alloc.takeError();
auto WorkingMemory = (*Alloc)->getWorkingMemory(ResolverBlockPermissions);
ResolverBlockAddr = (*Alloc)->getTargetMemory(ResolverBlockPermissions);
ABI->writeResolverCode(WorkingMemory.data(), ResolverBlockAddr, ReentryFnAddr,
auto SegInfo = Alloc->getSegInfo(MemProt::Read | MemProt::Exec);
ABI->writeResolverCode(SegInfo.WorkingMem.data(), SegInfo.Addr, ReentryFnAddr,
ReentryCtxAddr);
if (auto Err = (*Alloc)->finalize())
return std::move(Err);
auto FA = Alloc->finalize();
if (!FA)
return FA.takeError();
ResolverBlock = std::move(*Alloc);
return ResolverBlockAddr;
ResolverBlock = std::move(*FA);
return SegInfo.Addr;
}
std::unique_ptr<IndirectStubsManager>
@@ -341,6 +340,7 @@ EPCIndirectionUtils::EPCIndirectionUtils(ExecutorProcessControl &EPC,
Expected<EPCIndirectionUtils::IndirectStubInfoVector>
EPCIndirectionUtils::getIndirectStubs(unsigned NumStubs) {
using namespace jitlink;
std::lock_guard<std::mutex> Lock(EPCUIMutex);
@@ -350,42 +350,40 @@ EPCIndirectionUtils::getIndirectStubs(unsigned NumStubs) {
auto PageSize = EPC.getPageSize();
auto StubBytes = alignTo(NumStubsToAllocate * ABI->getStubSize(), PageSize);
NumStubsToAllocate = StubBytes / ABI->getStubSize();
auto PointerBytes =
auto PtrBytes =
alignTo(NumStubsToAllocate * ABI->getPointerSize(), PageSize);
constexpr auto StubPagePermissions =
static_cast<sys::Memory::ProtectionFlags>(sys::Memory::MF_READ |
sys::Memory::MF_EXEC);
constexpr auto PointerPagePermissions =
static_cast<sys::Memory::ProtectionFlags>(sys::Memory::MF_READ |
sys::Memory::MF_WRITE);
auto StubProt = MemProt::Read | MemProt::Exec;
auto PtrProt = MemProt::Read | MemProt::Write;
auto Alloc = SimpleSegmentAlloc::Create(
EPC.getMemMgr(), nullptr,
{{StubProt, {static_cast<size_t>(StubBytes), Align(PageSize)}},
{PtrProt, {PtrBytes, Align(PageSize)}}});
jitlink::JITLinkMemoryManager::SegmentsRequestMap Request;
Request[StubPagePermissions] = {PageSize, static_cast<size_t>(StubBytes),
0};
Request[PointerPagePermissions] = {PageSize, 0, PointerBytes};
auto Alloc = EPC.getMemMgr().allocate(nullptr, Request);
if (!Alloc)
return Alloc.takeError();
auto StubTargetAddr = (*Alloc)->getTargetMemory(StubPagePermissions);
auto PointerTargetAddr = (*Alloc)->getTargetMemory(PointerPagePermissions);
auto StubSeg = Alloc->getSegInfo(StubProt);
auto PtrSeg = Alloc->getSegInfo(PtrProt);
ABI->writeIndirectStubsBlock(
(*Alloc)->getWorkingMemory(StubPagePermissions).data(), StubTargetAddr,
PointerTargetAddr, NumStubsToAllocate);
ABI->writeIndirectStubsBlock(StubSeg.WorkingMem.data(), StubSeg.Addr,
PtrSeg.Addr, NumStubsToAllocate);
if (auto Err = (*Alloc)->finalize())
return std::move(Err);
auto FA = Alloc->finalize();
if (!FA)
return FA.takeError();
IndirectStubAllocs.push_back(std::move(*FA));
auto StubExecutorAddr = StubSeg.Addr;
auto PtrExecutorAddr = PtrSeg.Addr;
for (unsigned I = 0; I != NumStubsToAllocate; ++I) {
AvailableIndirectStubs.push_back(
IndirectStubInfo(StubTargetAddr, PointerTargetAddr));
StubTargetAddr += ABI->getStubSize();
PointerTargetAddr += ABI->getPointerSize();
IndirectStubInfo(StubExecutorAddr, PtrExecutorAddr));
StubExecutorAddr += ABI->getStubSize();
PtrExecutorAddr += ABI->getPointerSize();
}
IndirectStubAllocs.push_back(std::move(*Alloc));
}
assert(NumStubs <= AvailableIndirectStubs.size() &&

View File

@@ -31,7 +31,8 @@ SelfExecutorProcessControl::SelfExecutorProcessControl(
OwnedMemMgr = std::move(MemMgr);
if (!OwnedMemMgr)
OwnedMemMgr = std::make_unique<jitlink::InProcessMemoryManager>();
OwnedMemMgr = std::make_unique<jitlink::InProcessMemoryManager>(
sys::Process::getPageSizeEstimate());
this->TargetTriple = std::move(TargetTriple);
this->PageSize = PageSize;

View File

@@ -53,7 +53,7 @@ public:
auto G = std::make_unique<jitlink::LinkGraph>(
"<MachOHeaderMU>", TT, PointerSize, Endianness,
jitlink::getGenericEdgeKindName);
auto &HeaderSection = G->createSection("__header", sys::Memory::MF_READ);
auto &HeaderSection = G->createSection("__header", jitlink::MemProt::Read);
auto &HeaderBlock = createHeaderBlock(*G, HeaderSection);
// Init symbol is header-start symbol.

View File

@@ -306,8 +306,7 @@ public:
return Error::success();
}
void notifyFinalized(
std::unique_ptr<JITLinkMemoryManager::Allocation> A) override {
void notifyFinalized(JITLinkMemoryManager::FinalizedAlloc A) override {
if (auto Err = Layer.notifyEmitted(*MR, std::move(A))) {
Layer.getExecutionSession().reportError(std::move(Err));
MR->failMaterialization();
@@ -680,7 +679,7 @@ void ObjectLinkingLayer::notifyLoaded(MaterializationResponsibility &MR) {
}
Error ObjectLinkingLayer::notifyEmitted(MaterializationResponsibility &MR,
AllocPtr Alloc) {
FinalizedAlloc FA) {
Error Err = Error::success();
for (auto &P : Plugins)
Err = joinErrors(std::move(Err), P->notifyEmitted(MR));
@@ -689,17 +688,20 @@ Error ObjectLinkingLayer::notifyEmitted(MaterializationResponsibility &MR,
return Err;
return MR.withResourceKeyDo(
[&](ResourceKey K) { Allocs[K].push_back(std::move(Alloc)); });
[&](ResourceKey K) { Allocs[K].push_back(std::move(FA)); });
}
Error ObjectLinkingLayer::handleRemoveResources(ResourceKey K) {
Error Err = Error::success();
{
Error Err = Error::success();
for (auto &P : Plugins)
Err = joinErrors(std::move(Err), P->notifyRemovingResources(K));
if (Err)
return Err;
}
for (auto &P : Plugins)
Err = joinErrors(std::move(Err), P->notifyRemovingResources(K));
std::vector<AllocPtr> AllocsToRemove;
std::vector<FinalizedAlloc> AllocsToRemove;
getExecutionSession().runSessionLocked([&] {
auto I = Allocs.find(K);
if (I != Allocs.end()) {
@@ -708,12 +710,7 @@ Error ObjectLinkingLayer::handleRemoveResources(ResourceKey K) {
}
});
while (!AllocsToRemove.empty()) {
Err = joinErrors(std::move(Err), AllocsToRemove.back()->deallocate());
AllocsToRemove.pop_back();
}
return Err;
return MemMgr.deallocate(std::move(AllocsToRemove));
}
void ObjectLinkingLayer::handleTransferResources(ResourceKey DstKey,

View File

@@ -64,15 +64,16 @@ LLVM_ATTRIBUTE_NOINLINE void __jit_debug_register_code() {
}
using namespace llvm;
using namespace llvm::orc;
// Serialize rendezvous with the debugger as well as access to shared data.
ManagedStatic<std::mutex> JITDebugLock;
// Register debug object, return error message or null for success.
static void registerJITLoaderGDBImpl(JITTargetAddress Addr, uint64_t Size) {
static void registerJITLoaderGDBImpl(ExecutorAddrRange DebugObjRange) {
jit_code_entry *E = new jit_code_entry;
E->symfile_addr = jitTargetAddressToPointer<const char *>(Addr);
E->symfile_size = Size;
E->symfile_addr = DebugObjRange.Start.toPtr<const char *>();
E->symfile_size = DebugObjRange.size().getValue();
E->prev_entry = nullptr;
std::lock_guard<std::mutex> Lock(*JITDebugLock);
@@ -95,7 +96,7 @@ static void registerJITLoaderGDBImpl(JITTargetAddress Addr, uint64_t Size) {
extern "C" orc::shared::detail::CWrapperFunctionResult
llvm_orc_registerJITLoaderGDBWrapper(const char *Data, uint64_t Size) {
using namespace orc::shared;
return WrapperFunction<void(SPSExecutorAddr, uint64_t)>::handle(
return WrapperFunction<void(SPSExecutorAddrRange)>::handle(
Data, Size, registerJITLoaderGDBImpl)
.release();
}

View File

@@ -357,6 +357,12 @@ static void dumpSectionContents(raw_ostream &OS, LinkGraph &G) {
}
class JITLinkSlabAllocator final : public JITLinkMemoryManager {
private:
struct FinalizedAllocInfo {
sys::MemoryBlock Mem;
std::vector<AllocActionCall> DeallocActions;
};
public:
static Expected<std::unique_ptr<JITLinkSlabAllocator>>
Create(uint64_t SlabSize) {
@@ -368,101 +374,161 @@ public:
return std::move(Allocator);
}
Expected<std::unique_ptr<JITLinkMemoryManager::Allocation>>
allocate(const JITLinkDylib *JD, const SegmentsRequestMap &Request) override {
using AllocationMap = DenseMap<unsigned, sys::MemoryBlock>;
void allocate(const JITLinkDylib *JD, LinkGraph &G,
OnAllocatedFunction OnAllocated) override {
// Local class for allocation.
class IPMMAlloc : public Allocation {
class IPMMAlloc : public InFlightAlloc {
public:
IPMMAlloc(JITLinkSlabAllocator &Parent, AllocationMap SegBlocks)
: Parent(Parent), SegBlocks(std::move(SegBlocks)) {}
MutableArrayRef<char> getWorkingMemory(ProtectionFlags Seg) override {
assert(SegBlocks.count(Seg) && "No allocation for segment");
return {static_cast<char *>(SegBlocks[Seg].base()),
SegBlocks[Seg].allocatedSize()};
IPMMAlloc(JITLinkSlabAllocator &Parent, BasicLayout BL,
sys::MemoryBlock StandardSegs, sys::MemoryBlock FinalizeSegs)
: Parent(Parent), BL(std::move(BL)),
StandardSegs(std::move(StandardSegs)),
FinalizeSegs(std::move(FinalizeSegs)) {}
void finalize(OnFinalizedFunction OnFinalized) override {
if (auto Err = applyProtections()) {
OnFinalized(std::move(Err));
return;
}
// FIXME: Run finalize actions.
assert(BL.graphAllocActions().empty() &&
"Support function calls not supported yet");
OnFinalized(FinalizedAlloc(
pointerToJITTargetAddress(new FinalizedAllocInfo())));
}
JITTargetAddress getTargetMemory(ProtectionFlags Seg) override {
assert(SegBlocks.count(Seg) && "No allocation for segment");
return pointerToJITTargetAddress(SegBlocks[Seg].base()) +
Parent.TargetDelta;
}
void finalizeAsync(FinalizeContinuation OnFinalize) override {
OnFinalize(applyProtections());
}
Error deallocate() override {
for (auto &KV : SegBlocks)
if (auto EC = sys::Memory::releaseMappedMemory(KV.second))
return errorCodeToError(EC);
return Error::success();
void abandon(OnAbandonedFunction OnAbandoned) override {
OnAbandoned(joinErrors(Parent.freeBlock(StandardSegs),
Parent.freeBlock(FinalizeSegs)));
}
private:
Error applyProtections() {
for (auto &KV : SegBlocks) {
auto &Prot = KV.first;
auto &Block = KV.second;
if (auto EC = sys::Memory::protectMappedMemory(Block, Prot))
for (auto &KV : BL.segments()) {
const auto &Group = KV.first;
auto &Seg = KV.second;
auto Prot = toSysMemoryProtectionFlags(Group.getMemProt());
uint64_t SegSize =
alignTo(Seg.ContentSize + Seg.ZeroFillSize, Parent.PageSize);
sys::MemoryBlock MB(Seg.WorkingMem, SegSize);
if (auto EC = sys::Memory::protectMappedMemory(MB, Prot))
return errorCodeToError(EC);
if (Prot & sys::Memory::MF_EXEC)
sys::Memory::InvalidateInstructionCache(Block.base(),
Block.allocatedSize());
sys::Memory::InvalidateInstructionCache(MB.base(),
MB.allocatedSize());
}
return Error::success();
}
JITLinkSlabAllocator &Parent;
AllocationMap SegBlocks;
BasicLayout BL;
sys::MemoryBlock StandardSegs;
sys::MemoryBlock FinalizeSegs;
};
AllocationMap Blocks;
BasicLayout BL(G);
auto SegsSizes = BL.getContiguousPageBasedLayoutSizes(PageSize);
for (auto &KV : Request) {
if (!SegsSizes) {
OnAllocated(SegsSizes.takeError());
return;
}
char *AllocBase = 0;
{
std::lock_guard<std::mutex> Lock(SlabMutex);
if (SegsSizes->total() > SlabRemaining.allocatedSize()) {
OnAllocated(make_error<StringError>(
"Slab allocator out of memory: request for " +
formatv("{0:x}", SegsSizes->total()) +
" bytes exceeds remaining capacity of " +
formatv("{0:x}", SlabRemaining.allocatedSize()) + " bytes",
inconvertibleErrorCode()));
return;
}
AllocBase = reinterpret_cast<char *>(SlabRemaining.base());
SlabRemaining =
sys::MemoryBlock(AllocBase + SegsSizes->total(),
SlabRemaining.allocatedSize() - SegsSizes->total());
}
sys::MemoryBlock StandardSegs(AllocBase, SegsSizes->StandardSegs);
sys::MemoryBlock FinalizeSegs(AllocBase + SegsSizes->StandardSegs,
SegsSizes->FinalizeSegs);
auto NextStandardSegAddr = pointerToJITTargetAddress(StandardSegs.base());
auto NextFinalizeSegAddr = pointerToJITTargetAddress(FinalizeSegs.base());
LLVM_DEBUG({
dbgs() << "JITLinkSlabAllocator allocated:\n";
if (SegsSizes->StandardSegs)
dbgs() << formatv(" [ {0:x16} -- {1:x16} ]", NextStandardSegAddr,
NextStandardSegAddr + StandardSegs.allocatedSize())
<< " to stardard segs\n";
else
dbgs() << " no standard segs\n";
if (SegsSizes->FinalizeSegs)
dbgs() << formatv(" [ {0:x16} -- {1:x16} ]", NextFinalizeSegAddr,
NextFinalizeSegAddr + FinalizeSegs.allocatedSize())
<< " to finalize segs\n";
else
dbgs() << " no finalize segs\n";
});
for (auto &KV : BL.segments()) {
auto &Group = KV.first;
auto &Seg = KV.second;
if (Seg.getAlignment() > PageSize)
return make_error<StringError>("Cannot request higher than page "
"alignment",
inconvertibleErrorCode());
auto &SegAddr =
(Group.getMemDeallocPolicy() == MemDeallocPolicy::Standard)
? NextStandardSegAddr
: NextFinalizeSegAddr;
if (PageSize % Seg.getAlignment() != 0)
return make_error<StringError>("Page size is not a multiple of "
"alignment",
inconvertibleErrorCode());
LLVM_DEBUG({
dbgs() << " " << Group << " -> " << formatv("{0:x16}", SegAddr)
<< "\n";
});
Seg.WorkingMem = jitTargetAddressToPointer<char *>(SegAddr);
Seg.Addr = SegAddr + NextSlabDelta;
uint64_t ZeroFillStart = Seg.getContentSize();
uint64_t SegmentSize = ZeroFillStart + Seg.getZeroFillSize();
// Round segment size up to page boundary.
SegmentSize = (SegmentSize + PageSize - 1) & ~(PageSize - 1);
// Take segment bytes from the front of the slab.
void *SlabBase = SlabRemaining.base();
uint64_t SlabRemainingSize = SlabRemaining.allocatedSize();
if (SegmentSize > SlabRemainingSize)
return make_error<StringError>(
"Slab allocator out of memory: request for " +
formatv("{0:x}", SegmentSize) +
" bytes exceeds remaining capacity of " +
formatv("{0:x}", SlabRemainingSize) + " bytes",
inconvertibleErrorCode());
sys::MemoryBlock SegMem(SlabBase, SegmentSize);
SlabRemaining =
sys::MemoryBlock(reinterpret_cast<char *>(SlabBase) + SegmentSize,
SlabRemainingSize - SegmentSize);
SegAddr += alignTo(Seg.ContentSize + Seg.ZeroFillSize, PageSize);
// Zero out the zero-fill memory.
memset(static_cast<char *>(SegMem.base()) + ZeroFillStart, 0,
Seg.getZeroFillSize());
// Record the block for this segment.
Blocks[KV.first] = std::move(SegMem);
if (Seg.ZeroFillSize != 0)
memset(Seg.WorkingMem + Seg.ContentSize, 0, Seg.ZeroFillSize);
}
return std::unique_ptr<InProcessMemoryManager::Allocation>(
new IPMMAlloc(*this, std::move(Blocks)));
NextSlabDelta += SegsSizes->total();
if (auto Err = BL.apply()) {
OnAllocated(std::move(Err));
return;
}
OnAllocated(std::unique_ptr<InProcessMemoryManager::InFlightAlloc>(
new IPMMAlloc(*this, std::move(BL), std::move(StandardSegs),
std::move(FinalizeSegs))));
}
void deallocate(std::vector<FinalizedAlloc> FinalizedAllocs,
OnDeallocatedFunction OnDeallocated) override {
Error Err = Error::success();
for (auto &FA : FinalizedAllocs) {
std::unique_ptr<FinalizedAllocInfo> FAI(
jitTargetAddressToPointer<FinalizedAllocInfo *>(FA.release()));
// FIXME: Run dealloc actions.
Err = joinErrors(std::move(Err), freeBlock(FAI->Mem));
}
OnDeallocated(std::move(Err));
}
private:
@@ -510,13 +576,19 @@ private:
// Calculate the target address delta to link as-if slab were at
// SlabAddress.
if (SlabAddress != ~0ULL)
TargetDelta =
NextSlabDelta =
SlabAddress - pointerToJITTargetAddress(SlabRemaining.base());
}
Error freeBlock(sys::MemoryBlock MB) {
// FIXME: Return memory to slab.
return Error::success();
}
std::mutex SlabMutex;
sys::MemoryBlock SlabRemaining;
uint64_t PageSize = 0;
int64_t TargetDelta = 0;
int64_t NextSlabDelta = 0;
};
Expected<uint64_t> getSlabAllocSize(StringRef SizeString) {
@@ -547,7 +619,7 @@ static std::unique_ptr<JITLinkMemoryManager> createMemoryManager() {
auto SlabSize = ExitOnErr(getSlabAllocSize(SlabAllocateSizeString));
return ExitOnErr(JITLinkSlabAllocator::Create(SlabSize));
}
return std::make_unique<InProcessMemoryManager>();
return ExitOnErr(InProcessMemoryManager::Create());
}
LLVMJITLinkObjectLinkingLayer::LLVMJITLinkObjectLinkingLayer(

View File

@@ -15,9 +15,6 @@
using namespace llvm;
using namespace llvm::jitlink;
static auto RWFlags =
sys::Memory::ProtectionFlags(sys::Memory::MF_READ | sys::Memory::MF_WRITE);
static const char BlockContentBytes[] = {
0x54, 0x68, 0x65, 0x72, 0x65, 0x20, 0x77, 0x61, 0x73, 0x20, 0x6d, 0x6f,
0x76, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x20, 0x61, 0x74, 0x20, 0x74, 0x68,
@@ -78,7 +75,7 @@ TEST(LinkGraphTest, AddressAccess) {
LinkGraph G("foo", Triple("x86_64-apple-darwin"), 8, support::little,
getGenericEdgeKindName);
auto &Sec1 = G.createSection("__data.1", RWFlags);
auto &Sec1 = G.createSection("__data.1", MemProt::Read | MemProt::Write);
auto &B1 = G.createContentBlock(Sec1, BlockContent, 0x1000, 8, 0);
auto &S1 = G.addDefinedSymbol(B1, 4, "S1", 4, Linkage::Strong, Scope::Default,
false, false);
@@ -94,7 +91,7 @@ TEST(LinkGraphTest, BlockAndSymbolIteration) {
// Check that we can iterate over blocks within Sections and across sections.
LinkGraph G("foo", Triple("x86_64-apple-darwin"), 8, support::little,
getGenericEdgeKindName);
auto &Sec1 = G.createSection("__data.1", RWFlags);
auto &Sec1 = G.createSection("__data.1", MemProt::Read | MemProt::Write);
auto &B1 = G.createContentBlock(Sec1, BlockContent, 0x1000, 8, 0);
auto &B2 = G.createContentBlock(Sec1, BlockContent, 0x2000, 8, 0);
auto &S1 = G.addDefinedSymbol(B1, 0, "S1", 4, Linkage::Strong, Scope::Default,
@@ -102,7 +99,7 @@ TEST(LinkGraphTest, BlockAndSymbolIteration) {
auto &S2 = G.addDefinedSymbol(B2, 4, "S2", 4, Linkage::Strong, Scope::Default,
false, false);
auto &Sec2 = G.createSection("__data.2", RWFlags);
auto &Sec2 = G.createSection("__data.2", MemProt::Read | MemProt::Write);
auto &B3 = G.createContentBlock(Sec2, BlockContent, 0x3000, 8, 0);
auto &B4 = G.createContentBlock(Sec2, BlockContent, 0x4000, 8, 0);
auto &S3 = G.addDefinedSymbol(B3, 0, "S3", 4, Linkage::Strong, Scope::Default,
@@ -141,7 +138,7 @@ TEST(LinkGraphTest, ContentAccessAndUpdate) {
// Check that we can make a defined symbol external.
LinkGraph G("foo", Triple("x86_64-apple-darwin"), 8, support::little,
getGenericEdgeKindName);
auto &Sec = G.createSection("__data", RWFlags);
auto &Sec = G.createSection("__data", MemProt::Read | MemProt::Write);
// Create an initial block.
auto &B = G.createContentBlock(Sec, BlockContent, 0x1000, 8, 0);
@@ -208,7 +205,7 @@ TEST(LinkGraphTest, MakeExternal) {
// Check that we can make a defined symbol external.
LinkGraph G("foo", Triple("x86_64-apple-darwin"), 8, support::little,
getGenericEdgeKindName);
auto &Sec = G.createSection("__data", RWFlags);
auto &Sec = G.createSection("__data", MemProt::Read | MemProt::Write);
// Create an initial block.
auto &B1 = G.createContentBlock(Sec, BlockContent, 0x1000, 8, 0);
@@ -253,7 +250,7 @@ TEST(LinkGraphTest, MakeDefined) {
// Check that we can make an external symbol defined.
LinkGraph G("foo", Triple("x86_64-apple-darwin"), 8, support::little,
getGenericEdgeKindName);
auto &Sec = G.createSection("__data", RWFlags);
auto &Sec = G.createSection("__data", MemProt::Read | MemProt::Write);
// Create an initial block.
auto &B1 = G.createContentBlock(Sec, BlockContent, 0x1000, 8, 0);
@@ -297,7 +294,7 @@ TEST(LinkGraphTest, TransferDefinedSymbol) {
// Check that we can transfer a defined symbol from one block to another.
LinkGraph G("foo", Triple("x86_64-apple-darwin"), 8, support::little,
getGenericEdgeKindName);
auto &Sec = G.createSection("__data", RWFlags);
auto &Sec = G.createSection("__data", MemProt::Read | MemProt::Write);
// Create an initial block.
auto &B1 = G.createContentBlock(Sec, BlockContent, 0x1000, 8, 0);
@@ -328,8 +325,8 @@ TEST(LinkGraphTest, TransferDefinedSymbolAcrossSections) {
// section to another.
LinkGraph G("foo", Triple("x86_64-apple-darwin"), 8, support::little,
getGenericEdgeKindName);
auto &Sec1 = G.createSection("__data.1", RWFlags);
auto &Sec2 = G.createSection("__data.2", RWFlags);
auto &Sec1 = G.createSection("__data.1", MemProt::Read | MemProt::Write);
auto &Sec2 = G.createSection("__data.2", MemProt::Read | MemProt::Write);
// Create blocks in each section.
auto &B1 = G.createContentBlock(Sec1, BlockContent, 0x1000, 8, 0);
@@ -357,8 +354,8 @@ TEST(LinkGraphTest, TransferBlock) {
// section to another.
LinkGraph G("foo", Triple("x86_64-apple-darwin"), 8, support::little,
getGenericEdgeKindName);
auto &Sec1 = G.createSection("__data.1", RWFlags);
auto &Sec2 = G.createSection("__data.2", RWFlags);
auto &Sec1 = G.createSection("__data.1", MemProt::Read | MemProt::Write);
auto &Sec2 = G.createSection("__data.2", MemProt::Read | MemProt::Write);
// Create an initial block.
auto &B1 = G.createContentBlock(Sec1, BlockContent, 0x1000, 8, 0);
@@ -401,9 +398,9 @@ TEST(LinkGraphTest, MergeSections) {
// section to another.
LinkGraph G("foo", Triple("x86_64-apple-darwin"), 8, support::little,
getGenericEdgeKindName);
auto &Sec1 = G.createSection("__data.1", RWFlags);
auto &Sec2 = G.createSection("__data.2", RWFlags);
auto &Sec3 = G.createSection("__data.3", RWFlags);
auto &Sec1 = G.createSection("__data.1", MemProt::Read | MemProt::Write);
auto &Sec2 = G.createSection("__data.2", MemProt::Read | MemProt::Write);
auto &Sec3 = G.createSection("__data.3", MemProt::Read | MemProt::Write);
// Create an initial block.
auto &B1 = G.createContentBlock(Sec1, BlockContent, 0x1000, 8, 0);
@@ -475,7 +472,7 @@ TEST(LinkGraphTest, SplitBlock) {
// Check that the LinkGraph::splitBlock test works as expected.
LinkGraph G("foo", Triple("x86_64-apple-darwin"), 8, support::little,
getGenericEdgeKindName);
auto &Sec = G.createSection("__data", RWFlags);
auto &Sec = G.createSection("__data", MemProt::Read | MemProt::Write);
// Create the block to split.
auto &B1 = G.createContentBlock(Sec, BlockContent, 0x1000, 8, 0);

View File

@@ -116,27 +116,24 @@ TEST(EPCGenericJITLinkMemoryManagerTest, AllocFinalizeFree) {
auto MemMgr = std::make_unique<EPCGenericJITLinkMemoryManager>(*SelfEPC, SAs);
jitlink::JITLinkMemoryManager::SegmentsRequestMap SRM;
StringRef Hello = "hello";
SRM[sys::Memory::MF_READ] = {8, Hello.size(), 8};
auto Alloc = MemMgr->allocate(nullptr, SRM);
EXPECT_THAT_EXPECTED(Alloc, Succeeded());
auto SSA = jitlink::SimpleSegmentAlloc::Create(
*MemMgr, nullptr, {{jitlink::MemProt::Read, {Hello.size(), Align(1)}}});
EXPECT_THAT_EXPECTED(SSA, Succeeded());
auto SegInfo = SSA->getSegInfo(jitlink::MemProt::Read);
memcpy(SegInfo.WorkingMem.data(), Hello.data(), Hello.size());
MutableArrayRef<char> WorkingMem =
(*Alloc)->getWorkingMemory(sys::Memory::MF_READ);
memcpy(WorkingMem.data(), Hello.data(), Hello.size());
auto FA = SSA->finalize();
EXPECT_THAT_EXPECTED(FA, Succeeded());
auto Err = (*Alloc)->finalize();
EXPECT_THAT_ERROR(std::move(Err), Succeeded());
ExecutorAddr TargetAddr((*Alloc)->getTargetMemory(sys::Memory::MF_READ));
ExecutorAddr TargetAddr(SegInfo.Addr);
const char *TargetMem = TargetAddr.toPtr<const char *>();
EXPECT_NE(TargetMem, WorkingMem.data());
EXPECT_NE(TargetMem, SegInfo.WorkingMem.data());
StringRef TargetHello(TargetMem, Hello.size());
EXPECT_EQ(Hello, TargetHello);
auto Err2 = (*Alloc)->deallocate();
auto Err2 = MemMgr->deallocate(std::move(*FA));
EXPECT_THAT_ERROR(std::move(Err2), Succeeded());
}

View File

@@ -19,9 +19,6 @@ using namespace llvm::orc;
namespace {
auto RWFlags =
sys::Memory::ProtectionFlags(sys::Memory::MF_READ | sys::Memory::MF_WRITE);
const char BlockContentBytes[] = {0x01, 0x02, 0x03, 0x04,
0x05, 0x06, 0x07, 0x08};
@@ -38,7 +35,7 @@ protected:
ExecutionSession ES{std::make_unique<UnsupportedExecutorProcessControl>()};
JITDylib &JD = ES.createBareJITDylib("main");
ObjectLinkingLayer ObjLinkingLayer{
ES, std::make_unique<InProcessMemoryManager>()};
ES, std::make_unique<InProcessMemoryManager>(4096)};
};
TEST_F(ObjectLinkingLayerTest, AddLinkGraph) {
@@ -46,7 +43,7 @@ TEST_F(ObjectLinkingLayerTest, AddLinkGraph) {
std::make_unique<LinkGraph>("foo", Triple("x86_64-apple-darwin"), 8,
support::little, x86_64::getEdgeKindName);
auto &Sec1 = G->createSection("__data", RWFlags);
auto &Sec1 = G->createSection("__data", MemProt::Read | MemProt::Write);
auto &B1 = G->createContentBlock(Sec1, BlockContent, 0x1000, 8, 0);
G->addDefinedSymbol(B1, 4, "_X", 4, Linkage::Strong, Scope::Default, false,
false);