Files
clang-p2996/llvm/lib/Target/AMDGPU/MCTargetDesc/AMDGPUAsmBackend.cpp
Alex Bradbury b22f751fa7 Thread MCSubtargetInfo through Target::createMCAsmBackend
Currently it's not possible to access MCSubtargetInfo from a TgtMCAsmBackend. 
D20830 threaded an MCSubtargetInfo reference through 
MCAsmBackend::relaxInstruction, but this isn't the only function that would 
benefit from access. This patch removes the Triple and CPUString arguments 
from createMCAsmBackend and replaces them with MCSubtargetInfo.

This patch just changes the interface without making any intentional 
functional changes. Once in, several cleanups are possible:
* Get rid of the awkward MCSubtargetInfo handling in ARMAsmBackend
* Support 16-bit instructions when valid in MipsAsmBackend::writeNopData
* Get rid of the CPU string parsing in X86AsmBackend and just use a SubtargetFeature for HasNopl
* Emit 16-bit nops in RISCVAsmBackend::writeNopData if the compressed instruction set extension is enabled (see D41221)

This change initially exposed PR35686, which has since been resolved in r321026.

Differential Revision: https://reviews.llvm.org/D41349

llvm-svn: 321692
2018-01-03 08:53:05 +00:00

207 lines
6.2 KiB
C++

//===-- AMDGPUAsmBackend.cpp - AMDGPU Assembler Backend -------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
/// \file
//===----------------------------------------------------------------------===//
#include "MCTargetDesc/AMDGPUFixupKinds.h"
#include "MCTargetDesc/AMDGPUMCTargetDesc.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/BinaryFormat/ELF.h"
#include "llvm/MC/MCAsmBackend.h"
#include "llvm/MC/MCAssembler.h"
#include "llvm/MC/MCContext.h"
#include "llvm/MC/MCFixupKindInfo.h"
#include "llvm/MC/MCObjectWriter.h"
#include "llvm/MC/MCValue.h"
#include "llvm/Support/TargetRegistry.h"
using namespace llvm;
namespace {
class AMDGPUAsmBackend : public MCAsmBackend {
public:
AMDGPUAsmBackend(const Target &T)
: MCAsmBackend() {}
unsigned getNumFixupKinds() const override { return AMDGPU::NumTargetFixupKinds; };
void applyFixup(const MCAssembler &Asm, const MCFixup &Fixup,
const MCValue &Target, MutableArrayRef<char> Data,
uint64_t Value, bool IsResolved) const override;
bool fixupNeedsRelaxation(const MCFixup &Fixup, uint64_t Value,
const MCRelaxableFragment *DF,
const MCAsmLayout &Layout) const override {
return false;
}
void relaxInstruction(const MCInst &Inst, const MCSubtargetInfo &STI,
MCInst &Res) const override {
llvm_unreachable("Not implemented");
}
bool mayNeedRelaxation(const MCInst &Inst) const override { return false; }
unsigned getMinimumNopSize() const override;
bool writeNopData(uint64_t Count, MCObjectWriter *OW) const override;
const MCFixupKindInfo &getFixupKindInfo(MCFixupKind Kind) const override;
};
} //End anonymous namespace
static unsigned getFixupKindNumBytes(unsigned Kind) {
switch (Kind) {
case AMDGPU::fixup_si_sopp_br:
return 2;
case FK_SecRel_1:
case FK_Data_1:
return 1;
case FK_SecRel_2:
case FK_Data_2:
return 2;
case FK_SecRel_4:
case FK_Data_4:
case FK_PCRel_4:
return 4;
case FK_SecRel_8:
case FK_Data_8:
return 8;
default:
llvm_unreachable("Unknown fixup kind!");
}
}
static uint64_t adjustFixupValue(const MCFixup &Fixup, uint64_t Value,
MCContext *Ctx) {
int64_t SignedValue = static_cast<int64_t>(Value);
switch (static_cast<unsigned>(Fixup.getKind())) {
case AMDGPU::fixup_si_sopp_br: {
int64_t BrImm = (SignedValue - 4) / 4;
if (Ctx && !isInt<16>(BrImm))
Ctx->reportError(Fixup.getLoc(), "branch size exceeds simm16");
return BrImm;
}
case FK_Data_1:
case FK_Data_2:
case FK_Data_4:
case FK_Data_8:
case FK_PCRel_4:
case FK_SecRel_4:
return Value;
default:
llvm_unreachable("unhandled fixup kind");
}
}
void AMDGPUAsmBackend::applyFixup(const MCAssembler &Asm, const MCFixup &Fixup,
const MCValue &Target,
MutableArrayRef<char> Data, uint64_t Value,
bool IsResolved) const {
Value = adjustFixupValue(Fixup, Value, &Asm.getContext());
if (!Value)
return; // Doesn't change encoding.
MCFixupKindInfo Info = getFixupKindInfo(Fixup.getKind());
// Shift the value into position.
Value <<= Info.TargetOffset;
unsigned NumBytes = getFixupKindNumBytes(Fixup.getKind());
uint32_t Offset = Fixup.getOffset();
assert(Offset + NumBytes <= Data.size() && "Invalid fixup offset!");
// For each byte of the fragment that the fixup touches, mask in the bits from
// the fixup value.
for (unsigned i = 0; i != NumBytes; ++i)
Data[Offset + i] |= static_cast<uint8_t>((Value >> (i * 8)) & 0xff);
}
const MCFixupKindInfo &AMDGPUAsmBackend::getFixupKindInfo(
MCFixupKind Kind) const {
const static MCFixupKindInfo Infos[AMDGPU::NumTargetFixupKinds] = {
// name offset bits flags
{ "fixup_si_sopp_br", 0, 16, MCFixupKindInfo::FKF_IsPCRel },
};
if (Kind < FirstTargetFixupKind)
return MCAsmBackend::getFixupKindInfo(Kind);
return Infos[Kind - FirstTargetFixupKind];
}
unsigned AMDGPUAsmBackend::getMinimumNopSize() const {
return 4;
}
bool AMDGPUAsmBackend::writeNopData(uint64_t Count, MCObjectWriter *OW) const {
// If the count is not 4-byte aligned, we must be writing data into the text
// section (otherwise we have unaligned instructions, and thus have far
// bigger problems), so just write zeros instead.
OW->WriteZeros(Count % 4);
// We are properly aligned, so write NOPs as requested.
Count /= 4;
// FIXME: R600 support.
// s_nop 0
const uint32_t Encoded_S_NOP_0 = 0xbf800000;
for (uint64_t I = 0; I != Count; ++I)
OW->write32(Encoded_S_NOP_0);
return true;
}
//===----------------------------------------------------------------------===//
// ELFAMDGPUAsmBackend class
//===----------------------------------------------------------------------===//
namespace {
class ELFAMDGPUAsmBackend : public AMDGPUAsmBackend {
bool Is64Bit;
bool HasRelocationAddend;
uint8_t OSABI = ELF::ELFOSABI_NONE;
public:
ELFAMDGPUAsmBackend(const Target &T, const Triple &TT) :
AMDGPUAsmBackend(T), Is64Bit(TT.getArch() == Triple::amdgcn),
HasRelocationAddend(TT.getOS() == Triple::AMDHSA) {
switch (TT.getOS()) {
case Triple::AMDHSA:
OSABI = ELF::ELFOSABI_AMDGPU_HSA;
break;
case Triple::AMDPAL:
OSABI = ELF::ELFOSABI_AMDGPU_PAL;
break;
case Triple::Mesa3D:
OSABI = ELF::ELFOSABI_AMDGPU_MESA3D;
break;
default:
break;
}
}
std::unique_ptr<MCObjectWriter>
createObjectWriter(raw_pwrite_stream &OS) const override {
return createAMDGPUELFObjectWriter(Is64Bit, OSABI, HasRelocationAddend, OS);
}
};
} // end anonymous namespace
MCAsmBackend *llvm::createAMDGPUAsmBackend(const Target &T,
const MCSubtargetInfo &STI,
const MCRegisterInfo &MRI,
const MCTargetOptions &Options) {
// Use 64-bit ELF for amdgcn
return new ELFAMDGPUAsmBackend(T, STI.getTargetTriple());
}