The patch replaces SPIRVBaseInfo.* previously created using macros by the tablegen approach. There are many small changes in other files due to differences in namespaces. Also, functions in SPIRVUtils are moved to the llvm namespace. Differential Revision: https://reviews.llvm.org/D130518 Co-authored-by: Aleksandr Bezzubikov <zuban32s@gmail.com> Co-authored-by: Michal Paszkowski <michal.paszkowski@outlook.com> Co-authored-by: Andrey Tretyakov <andrey1.tretyakov@intel.com> Co-authored-by: Konrad Trifunovic <konrad.trifunovic@intel.com>
1368 lines
56 KiB
C++
1368 lines
56 KiB
C++
//===- SPIRVInstructionSelector.cpp ------------------------------*- C++ -*-==//
|
|
//
|
|
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
|
// See https://llvm.org/LICENSE.txt for license information.
|
|
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
|
//
|
|
//===----------------------------------------------------------------------===//
|
|
//
|
|
// This file implements the targeting of the InstructionSelector class for
|
|
// SPIRV.
|
|
// TODO: This should be generated by TableGen.
|
|
//
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
#include "SPIRV.h"
|
|
#include "SPIRVGlobalRegistry.h"
|
|
#include "SPIRVInstrInfo.h"
|
|
#include "SPIRVRegisterBankInfo.h"
|
|
#include "SPIRVRegisterInfo.h"
|
|
#include "SPIRVTargetMachine.h"
|
|
#include "SPIRVUtils.h"
|
|
#include "llvm/ADT/APFloat.h"
|
|
#include "llvm/CodeGen/GlobalISel/InstructionSelector.h"
|
|
#include "llvm/CodeGen/GlobalISel/InstructionSelectorImpl.h"
|
|
#include "llvm/CodeGen/MachineInstrBuilder.h"
|
|
#include "llvm/CodeGen/MachineRegisterInfo.h"
|
|
#include "llvm/IR/IntrinsicsSPIRV.h"
|
|
#include "llvm/Support/Debug.h"
|
|
|
|
#define DEBUG_TYPE "spirv-isel"
|
|
|
|
using namespace llvm;
|
|
|
|
namespace {
|
|
|
|
#define GET_GLOBALISEL_PREDICATE_BITSET
|
|
#include "SPIRVGenGlobalISel.inc"
|
|
#undef GET_GLOBALISEL_PREDICATE_BITSET
|
|
|
|
class SPIRVInstructionSelector : public InstructionSelector {
|
|
const SPIRVSubtarget &STI;
|
|
const SPIRVInstrInfo &TII;
|
|
const SPIRVRegisterInfo &TRI;
|
|
const RegisterBankInfo &RBI;
|
|
SPIRVGlobalRegistry &GR;
|
|
MachineRegisterInfo *MRI;
|
|
|
|
public:
|
|
SPIRVInstructionSelector(const SPIRVTargetMachine &TM,
|
|
const SPIRVSubtarget &ST,
|
|
const RegisterBankInfo &RBI);
|
|
void setupMF(MachineFunction &MF, GISelKnownBits *KB,
|
|
CodeGenCoverage &CoverageInfo, ProfileSummaryInfo *PSI,
|
|
BlockFrequencyInfo *BFI) override;
|
|
// Common selection code. Instruction-specific selection occurs in spvSelect.
|
|
bool select(MachineInstr &I) override;
|
|
static const char *getName() { return DEBUG_TYPE; }
|
|
|
|
#define GET_GLOBALISEL_PREDICATES_DECL
|
|
#include "SPIRVGenGlobalISel.inc"
|
|
#undef GET_GLOBALISEL_PREDICATES_DECL
|
|
|
|
#define GET_GLOBALISEL_TEMPORARIES_DECL
|
|
#include "SPIRVGenGlobalISel.inc"
|
|
#undef GET_GLOBALISEL_TEMPORARIES_DECL
|
|
|
|
private:
|
|
// tblgen-erated 'select' implementation, used as the initial selector for
|
|
// the patterns that don't require complex C++.
|
|
bool selectImpl(MachineInstr &I, CodeGenCoverage &CoverageInfo) const;
|
|
|
|
// All instruction-specific selection that didn't happen in "select()".
|
|
// Is basically a large Switch/Case delegating to all other select method.
|
|
bool spvSelect(Register ResVReg, const SPIRVType *ResType,
|
|
MachineInstr &I) const;
|
|
|
|
bool selectGlobalValue(Register ResVReg, MachineInstr &I,
|
|
const MachineInstr *Init = nullptr) const;
|
|
|
|
bool selectUnOpWithSrc(Register ResVReg, const SPIRVType *ResType,
|
|
MachineInstr &I, Register SrcReg,
|
|
unsigned Opcode) const;
|
|
bool selectUnOp(Register ResVReg, const SPIRVType *ResType, MachineInstr &I,
|
|
unsigned Opcode) const;
|
|
|
|
bool selectLoad(Register ResVReg, const SPIRVType *ResType,
|
|
MachineInstr &I) const;
|
|
bool selectStore(MachineInstr &I) const;
|
|
|
|
bool selectMemOperation(Register ResVReg, MachineInstr &I) const;
|
|
|
|
bool selectAtomicRMW(Register ResVReg, const SPIRVType *ResType,
|
|
MachineInstr &I, unsigned NewOpcode) const;
|
|
|
|
bool selectAtomicCmpXchg(Register ResVReg, const SPIRVType *ResType,
|
|
MachineInstr &I) const;
|
|
|
|
bool selectFence(MachineInstr &I) const;
|
|
|
|
bool selectAddrSpaceCast(Register ResVReg, const SPIRVType *ResType,
|
|
MachineInstr &I) const;
|
|
|
|
bool selectBitreverse(Register ResVReg, const SPIRVType *ResType,
|
|
MachineInstr &I) const;
|
|
|
|
bool selectConstVector(Register ResVReg, const SPIRVType *ResType,
|
|
MachineInstr &I) const;
|
|
|
|
bool selectCmp(Register ResVReg, const SPIRVType *ResType,
|
|
unsigned comparisonOpcode, MachineInstr &I) const;
|
|
|
|
bool selectICmp(Register ResVReg, const SPIRVType *ResType,
|
|
MachineInstr &I) const;
|
|
bool selectFCmp(Register ResVReg, const SPIRVType *ResType,
|
|
MachineInstr &I) const;
|
|
|
|
void renderImm32(MachineInstrBuilder &MIB, const MachineInstr &I,
|
|
int OpIdx) const;
|
|
void renderFImm32(MachineInstrBuilder &MIB, const MachineInstr &I,
|
|
int OpIdx) const;
|
|
|
|
bool selectConst(Register ResVReg, const SPIRVType *ResType, const APInt &Imm,
|
|
MachineInstr &I) const;
|
|
|
|
bool selectSelect(Register ResVReg, const SPIRVType *ResType, MachineInstr &I,
|
|
bool IsSigned) const;
|
|
bool selectIToF(Register ResVReg, const SPIRVType *ResType, MachineInstr &I,
|
|
bool IsSigned, unsigned Opcode) const;
|
|
bool selectExt(Register ResVReg, const SPIRVType *ResType, MachineInstr &I,
|
|
bool IsSigned) const;
|
|
|
|
bool selectTrunc(Register ResVReg, const SPIRVType *ResType,
|
|
MachineInstr &I) const;
|
|
|
|
bool selectIntToBool(Register IntReg, Register ResVReg,
|
|
const SPIRVType *intTy, const SPIRVType *boolTy,
|
|
MachineInstr &I) const;
|
|
|
|
bool selectOpUndef(Register ResVReg, const SPIRVType *ResType,
|
|
MachineInstr &I) const;
|
|
bool selectIntrinsic(Register ResVReg, const SPIRVType *ResType,
|
|
MachineInstr &I) const;
|
|
bool selectExtractVal(Register ResVReg, const SPIRVType *ResType,
|
|
MachineInstr &I) const;
|
|
bool selectInsertVal(Register ResVReg, const SPIRVType *ResType,
|
|
MachineInstr &I) const;
|
|
bool selectExtractElt(Register ResVReg, const SPIRVType *ResType,
|
|
MachineInstr &I) const;
|
|
bool selectInsertElt(Register ResVReg, const SPIRVType *ResType,
|
|
MachineInstr &I) const;
|
|
bool selectGEP(Register ResVReg, const SPIRVType *ResType,
|
|
MachineInstr &I) const;
|
|
|
|
bool selectFrameIndex(Register ResVReg, const SPIRVType *ResType,
|
|
MachineInstr &I) const;
|
|
|
|
bool selectBranch(MachineInstr &I) const;
|
|
bool selectBranchCond(MachineInstr &I) const;
|
|
|
|
bool selectPhi(Register ResVReg, const SPIRVType *ResType,
|
|
MachineInstr &I) const;
|
|
|
|
Register buildI32Constant(uint32_t Val, MachineInstr &I,
|
|
const SPIRVType *ResType = nullptr) const;
|
|
|
|
Register buildZerosVal(const SPIRVType *ResType, MachineInstr &I) const;
|
|
Register buildOnesVal(bool AllOnes, const SPIRVType *ResType,
|
|
MachineInstr &I) const;
|
|
};
|
|
|
|
} // end anonymous namespace
|
|
|
|
#define GET_GLOBALISEL_IMPL
|
|
#include "SPIRVGenGlobalISel.inc"
|
|
#undef GET_GLOBALISEL_IMPL
|
|
|
|
SPIRVInstructionSelector::SPIRVInstructionSelector(const SPIRVTargetMachine &TM,
|
|
const SPIRVSubtarget &ST,
|
|
const RegisterBankInfo &RBI)
|
|
: InstructionSelector(), STI(ST), TII(*ST.getInstrInfo()),
|
|
TRI(*ST.getRegisterInfo()), RBI(RBI), GR(*ST.getSPIRVGlobalRegistry()),
|
|
#define GET_GLOBALISEL_PREDICATES_INIT
|
|
#include "SPIRVGenGlobalISel.inc"
|
|
#undef GET_GLOBALISEL_PREDICATES_INIT
|
|
#define GET_GLOBALISEL_TEMPORARIES_INIT
|
|
#include "SPIRVGenGlobalISel.inc"
|
|
#undef GET_GLOBALISEL_TEMPORARIES_INIT
|
|
{
|
|
}
|
|
|
|
void SPIRVInstructionSelector::setupMF(MachineFunction &MF, GISelKnownBits *KB,
|
|
CodeGenCoverage &CoverageInfo,
|
|
ProfileSummaryInfo *PSI,
|
|
BlockFrequencyInfo *BFI) {
|
|
MRI = &MF.getRegInfo();
|
|
GR.setCurrentFunc(MF);
|
|
InstructionSelector::setupMF(MF, KB, CoverageInfo, PSI, BFI);
|
|
}
|
|
|
|
static bool isImm(const MachineOperand &MO, MachineRegisterInfo *MRI);
|
|
|
|
// Defined in SPIRVLegalizerInfo.cpp.
|
|
extern bool isTypeFoldingSupported(unsigned Opcode);
|
|
|
|
bool SPIRVInstructionSelector::select(MachineInstr &I) {
|
|
assert(I.getParent() && "Instruction should be in a basic block!");
|
|
assert(I.getParent()->getParent() && "Instruction should be in a function!");
|
|
|
|
Register Opcode = I.getOpcode();
|
|
// If it's not a GMIR instruction, we've selected it already.
|
|
if (!isPreISelGenericOpcode(Opcode)) {
|
|
if (Opcode == SPIRV::ASSIGN_TYPE) { // These pseudos aren't needed any more.
|
|
auto *Def = MRI->getVRegDef(I.getOperand(1).getReg());
|
|
if (isTypeFoldingSupported(Def->getOpcode())) {
|
|
auto Res = selectImpl(I, *CoverageInfo);
|
|
assert(Res || Def->getOpcode() == TargetOpcode::G_CONSTANT);
|
|
if (Res)
|
|
return Res;
|
|
}
|
|
MRI->replaceRegWith(I.getOperand(1).getReg(), I.getOperand(0).getReg());
|
|
I.removeFromParent();
|
|
} else if (I.getNumDefs() == 1) {
|
|
// Make all vregs 32 bits (for SPIR-V IDs).
|
|
MRI->setType(I.getOperand(0).getReg(), LLT::scalar(32));
|
|
}
|
|
return true;
|
|
}
|
|
|
|
if (I.getNumOperands() != I.getNumExplicitOperands()) {
|
|
LLVM_DEBUG(errs() << "Generic instr has unexpected implicit operands\n");
|
|
return false;
|
|
}
|
|
|
|
// Common code for getting return reg+type, and removing selected instr
|
|
// from parent occurs here. Instr-specific selection happens in spvSelect().
|
|
bool HasDefs = I.getNumDefs() > 0;
|
|
Register ResVReg = HasDefs ? I.getOperand(0).getReg() : Register(0);
|
|
SPIRVType *ResType = HasDefs ? GR.getSPIRVTypeForVReg(ResVReg) : nullptr;
|
|
assert(!HasDefs || ResType || I.getOpcode() == TargetOpcode::G_GLOBAL_VALUE);
|
|
if (spvSelect(ResVReg, ResType, I)) {
|
|
if (HasDefs) // Make all vregs 32 bits (for SPIR-V IDs).
|
|
MRI->setType(ResVReg, LLT::scalar(32));
|
|
I.removeFromParent();
|
|
return true;
|
|
}
|
|
return false;
|
|
}
|
|
|
|
bool SPIRVInstructionSelector::spvSelect(Register ResVReg,
|
|
const SPIRVType *ResType,
|
|
MachineInstr &I) const {
|
|
assert(!isTypeFoldingSupported(I.getOpcode()) ||
|
|
I.getOpcode() == TargetOpcode::G_CONSTANT);
|
|
const unsigned Opcode = I.getOpcode();
|
|
switch (Opcode) {
|
|
case TargetOpcode::G_CONSTANT:
|
|
return selectConst(ResVReg, ResType, I.getOperand(1).getCImm()->getValue(),
|
|
I);
|
|
case TargetOpcode::G_GLOBAL_VALUE:
|
|
return selectGlobalValue(ResVReg, I);
|
|
case TargetOpcode::G_IMPLICIT_DEF:
|
|
return selectOpUndef(ResVReg, ResType, I);
|
|
|
|
case TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS:
|
|
return selectIntrinsic(ResVReg, ResType, I);
|
|
case TargetOpcode::G_BITREVERSE:
|
|
return selectBitreverse(ResVReg, ResType, I);
|
|
|
|
case TargetOpcode::G_BUILD_VECTOR:
|
|
return selectConstVector(ResVReg, ResType, I);
|
|
|
|
case TargetOpcode::G_SHUFFLE_VECTOR: {
|
|
MachineBasicBlock &BB = *I.getParent();
|
|
auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpVectorShuffle))
|
|
.addDef(ResVReg)
|
|
.addUse(GR.getSPIRVTypeID(ResType))
|
|
.addUse(I.getOperand(1).getReg())
|
|
.addUse(I.getOperand(2).getReg());
|
|
for (auto V : I.getOperand(3).getShuffleMask())
|
|
MIB.addImm(V);
|
|
return MIB.constrainAllUses(TII, TRI, RBI);
|
|
}
|
|
case TargetOpcode::G_MEMMOVE:
|
|
case TargetOpcode::G_MEMCPY:
|
|
return selectMemOperation(ResVReg, I);
|
|
|
|
case TargetOpcode::G_ICMP:
|
|
return selectICmp(ResVReg, ResType, I);
|
|
case TargetOpcode::G_FCMP:
|
|
return selectFCmp(ResVReg, ResType, I);
|
|
|
|
case TargetOpcode::G_FRAME_INDEX:
|
|
return selectFrameIndex(ResVReg, ResType, I);
|
|
|
|
case TargetOpcode::G_LOAD:
|
|
return selectLoad(ResVReg, ResType, I);
|
|
case TargetOpcode::G_STORE:
|
|
return selectStore(I);
|
|
|
|
case TargetOpcode::G_BR:
|
|
return selectBranch(I);
|
|
case TargetOpcode::G_BRCOND:
|
|
return selectBranchCond(I);
|
|
|
|
case TargetOpcode::G_PHI:
|
|
return selectPhi(ResVReg, ResType, I);
|
|
|
|
case TargetOpcode::G_FPTOSI:
|
|
return selectUnOp(ResVReg, ResType, I, SPIRV::OpConvertFToS);
|
|
case TargetOpcode::G_FPTOUI:
|
|
return selectUnOp(ResVReg, ResType, I, SPIRV::OpConvertFToU);
|
|
|
|
case TargetOpcode::G_SITOFP:
|
|
return selectIToF(ResVReg, ResType, I, true, SPIRV::OpConvertSToF);
|
|
case TargetOpcode::G_UITOFP:
|
|
return selectIToF(ResVReg, ResType, I, false, SPIRV::OpConvertUToF);
|
|
|
|
case TargetOpcode::G_CTPOP:
|
|
return selectUnOp(ResVReg, ResType, I, SPIRV::OpBitCount);
|
|
|
|
case TargetOpcode::G_SEXT:
|
|
return selectExt(ResVReg, ResType, I, true);
|
|
case TargetOpcode::G_ANYEXT:
|
|
case TargetOpcode::G_ZEXT:
|
|
return selectExt(ResVReg, ResType, I, false);
|
|
case TargetOpcode::G_TRUNC:
|
|
return selectTrunc(ResVReg, ResType, I);
|
|
case TargetOpcode::G_FPTRUNC:
|
|
case TargetOpcode::G_FPEXT:
|
|
return selectUnOp(ResVReg, ResType, I, SPIRV::OpFConvert);
|
|
|
|
case TargetOpcode::G_PTRTOINT:
|
|
return selectUnOp(ResVReg, ResType, I, SPIRV::OpConvertPtrToU);
|
|
case TargetOpcode::G_INTTOPTR:
|
|
return selectUnOp(ResVReg, ResType, I, SPIRV::OpConvertUToPtr);
|
|
case TargetOpcode::G_BITCAST:
|
|
return selectUnOp(ResVReg, ResType, I, SPIRV::OpBitcast);
|
|
case TargetOpcode::G_ADDRSPACE_CAST:
|
|
return selectAddrSpaceCast(ResVReg, ResType, I);
|
|
case TargetOpcode::G_PTR_ADD: {
|
|
// Currently, we get G_PTR_ADD only as a result of translating
|
|
// global variables, initialized with constant expressions like GV + Const
|
|
// (see test opencl/basic/progvar_prog_scope_init.ll).
|
|
// TODO: extend the handler once we have other cases.
|
|
assert(I.getOperand(1).isReg() && I.getOperand(2).isReg());
|
|
Register GV = I.getOperand(1).getReg();
|
|
MachineRegisterInfo::def_instr_iterator II = MRI->def_instr_begin(GV);
|
|
assert(((*II).getOpcode() == TargetOpcode::G_GLOBAL_VALUE ||
|
|
(*II).getOpcode() == TargetOpcode::COPY ||
|
|
(*II).getOpcode() == SPIRV::OpVariable) &&
|
|
isImm(I.getOperand(2), MRI));
|
|
Register Idx = buildZerosVal(GR.getOrCreateSPIRVIntegerType(32, I, TII), I);
|
|
MachineBasicBlock &BB = *I.getParent();
|
|
auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpSpecConstantOp))
|
|
.addDef(ResVReg)
|
|
.addUse(GR.getSPIRVTypeID(ResType))
|
|
.addImm(static_cast<uint32_t>(
|
|
SPIRV::Opcode::InBoundsPtrAccessChain))
|
|
.addUse(GV)
|
|
.addUse(Idx)
|
|
.addUse(I.getOperand(2).getReg());
|
|
return MIB.constrainAllUses(TII, TRI, RBI);
|
|
}
|
|
|
|
case TargetOpcode::G_ATOMICRMW_OR:
|
|
return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicOr);
|
|
case TargetOpcode::G_ATOMICRMW_ADD:
|
|
return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicIAdd);
|
|
case TargetOpcode::G_ATOMICRMW_AND:
|
|
return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicAnd);
|
|
case TargetOpcode::G_ATOMICRMW_MAX:
|
|
return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicSMax);
|
|
case TargetOpcode::G_ATOMICRMW_MIN:
|
|
return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicSMin);
|
|
case TargetOpcode::G_ATOMICRMW_SUB:
|
|
return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicISub);
|
|
case TargetOpcode::G_ATOMICRMW_XOR:
|
|
return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicXor);
|
|
case TargetOpcode::G_ATOMICRMW_UMAX:
|
|
return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicUMax);
|
|
case TargetOpcode::G_ATOMICRMW_UMIN:
|
|
return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicUMin);
|
|
case TargetOpcode::G_ATOMICRMW_XCHG:
|
|
return selectAtomicRMW(ResVReg, ResType, I, SPIRV::OpAtomicExchange);
|
|
case TargetOpcode::G_ATOMIC_CMPXCHG:
|
|
return selectAtomicCmpXchg(ResVReg, ResType, I);
|
|
|
|
case TargetOpcode::G_FENCE:
|
|
return selectFence(I);
|
|
|
|
default:
|
|
return false;
|
|
}
|
|
}
|
|
|
|
bool SPIRVInstructionSelector::selectUnOpWithSrc(Register ResVReg,
|
|
const SPIRVType *ResType,
|
|
MachineInstr &I,
|
|
Register SrcReg,
|
|
unsigned Opcode) const {
|
|
return BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(Opcode))
|
|
.addDef(ResVReg)
|
|
.addUse(GR.getSPIRVTypeID(ResType))
|
|
.addUse(SrcReg)
|
|
.constrainAllUses(TII, TRI, RBI);
|
|
}
|
|
|
|
bool SPIRVInstructionSelector::selectUnOp(Register ResVReg,
|
|
const SPIRVType *ResType,
|
|
MachineInstr &I,
|
|
unsigned Opcode) const {
|
|
return selectUnOpWithSrc(ResVReg, ResType, I, I.getOperand(1).getReg(),
|
|
Opcode);
|
|
}
|
|
|
|
static SPIRV::Scope::Scope getScope(SyncScope::ID Ord) {
|
|
switch (Ord) {
|
|
case SyncScope::SingleThread:
|
|
return SPIRV::Scope::Invocation;
|
|
case SyncScope::System:
|
|
return SPIRV::Scope::Device;
|
|
default:
|
|
llvm_unreachable("Unsupported synchronization Scope ID.");
|
|
}
|
|
}
|
|
|
|
static void addMemoryOperands(MachineMemOperand *MemOp,
|
|
MachineInstrBuilder &MIB) {
|
|
uint32_t SpvMemOp = static_cast<uint32_t>(SPIRV::MemoryOperand::None);
|
|
if (MemOp->isVolatile())
|
|
SpvMemOp |= static_cast<uint32_t>(SPIRV::MemoryOperand::Volatile);
|
|
if (MemOp->isNonTemporal())
|
|
SpvMemOp |= static_cast<uint32_t>(SPIRV::MemoryOperand::Nontemporal);
|
|
if (MemOp->getAlign().value())
|
|
SpvMemOp |= static_cast<uint32_t>(SPIRV::MemoryOperand::Aligned);
|
|
|
|
if (SpvMemOp != static_cast<uint32_t>(SPIRV::MemoryOperand::None)) {
|
|
MIB.addImm(SpvMemOp);
|
|
if (SpvMemOp & static_cast<uint32_t>(SPIRV::MemoryOperand::Aligned))
|
|
MIB.addImm(MemOp->getAlign().value());
|
|
}
|
|
}
|
|
|
|
static void addMemoryOperands(uint64_t Flags, MachineInstrBuilder &MIB) {
|
|
uint32_t SpvMemOp = static_cast<uint32_t>(SPIRV::MemoryOperand::None);
|
|
if (Flags & MachineMemOperand::Flags::MOVolatile)
|
|
SpvMemOp |= static_cast<uint32_t>(SPIRV::MemoryOperand::Volatile);
|
|
if (Flags & MachineMemOperand::Flags::MONonTemporal)
|
|
SpvMemOp |= static_cast<uint32_t>(SPIRV::MemoryOperand::Nontemporal);
|
|
|
|
if (SpvMemOp != static_cast<uint32_t>(SPIRV::MemoryOperand::None))
|
|
MIB.addImm(SpvMemOp);
|
|
}
|
|
|
|
bool SPIRVInstructionSelector::selectLoad(Register ResVReg,
|
|
const SPIRVType *ResType,
|
|
MachineInstr &I) const {
|
|
unsigned OpOffset =
|
|
I.getOpcode() == TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS ? 1 : 0;
|
|
Register Ptr = I.getOperand(1 + OpOffset).getReg();
|
|
auto MIB = BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(SPIRV::OpLoad))
|
|
.addDef(ResVReg)
|
|
.addUse(GR.getSPIRVTypeID(ResType))
|
|
.addUse(Ptr);
|
|
if (!I.getNumMemOperands()) {
|
|
assert(I.getOpcode() == TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS);
|
|
addMemoryOperands(I.getOperand(2 + OpOffset).getImm(), MIB);
|
|
} else {
|
|
addMemoryOperands(*I.memoperands_begin(), MIB);
|
|
}
|
|
return MIB.constrainAllUses(TII, TRI, RBI);
|
|
}
|
|
|
|
bool SPIRVInstructionSelector::selectStore(MachineInstr &I) const {
|
|
unsigned OpOffset =
|
|
I.getOpcode() == TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS ? 1 : 0;
|
|
Register StoreVal = I.getOperand(0 + OpOffset).getReg();
|
|
Register Ptr = I.getOperand(1 + OpOffset).getReg();
|
|
MachineBasicBlock &BB = *I.getParent();
|
|
auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpStore))
|
|
.addUse(Ptr)
|
|
.addUse(StoreVal);
|
|
if (!I.getNumMemOperands()) {
|
|
assert(I.getOpcode() == TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS);
|
|
addMemoryOperands(I.getOperand(2 + OpOffset).getImm(), MIB);
|
|
} else {
|
|
addMemoryOperands(*I.memoperands_begin(), MIB);
|
|
}
|
|
return MIB.constrainAllUses(TII, TRI, RBI);
|
|
}
|
|
|
|
bool SPIRVInstructionSelector::selectMemOperation(Register ResVReg,
|
|
MachineInstr &I) const {
|
|
MachineBasicBlock &BB = *I.getParent();
|
|
auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpCopyMemorySized))
|
|
.addUse(I.getOperand(0).getReg())
|
|
.addUse(I.getOperand(1).getReg())
|
|
.addUse(I.getOperand(2).getReg());
|
|
if (I.getNumMemOperands())
|
|
addMemoryOperands(*I.memoperands_begin(), MIB);
|
|
bool Result = MIB.constrainAllUses(TII, TRI, RBI);
|
|
if (ResVReg.isValid() && ResVReg != MIB->getOperand(0).getReg())
|
|
BuildMI(BB, I, I.getDebugLoc(), TII.get(TargetOpcode::COPY), ResVReg)
|
|
.addUse(MIB->getOperand(0).getReg());
|
|
return Result;
|
|
}
|
|
|
|
bool SPIRVInstructionSelector::selectAtomicRMW(Register ResVReg,
|
|
const SPIRVType *ResType,
|
|
MachineInstr &I,
|
|
unsigned NewOpcode) const {
|
|
assert(I.hasOneMemOperand());
|
|
const MachineMemOperand *MemOp = *I.memoperands_begin();
|
|
uint32_t Scope = static_cast<uint32_t>(getScope(MemOp->getSyncScopeID()));
|
|
Register ScopeReg = buildI32Constant(Scope, I);
|
|
|
|
Register Ptr = I.getOperand(1).getReg();
|
|
// TODO: Changed as it's implemented in the translator. See test/atomicrmw.ll
|
|
// auto ScSem =
|
|
// getMemSemanticsForStorageClass(GR.getPointerStorageClass(Ptr));
|
|
AtomicOrdering AO = MemOp->getSuccessOrdering();
|
|
uint32_t MemSem = static_cast<uint32_t>(getMemSemantics(AO));
|
|
Register MemSemReg = buildI32Constant(MemSem /*| ScSem*/, I);
|
|
|
|
return BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(NewOpcode))
|
|
.addDef(ResVReg)
|
|
.addUse(GR.getSPIRVTypeID(ResType))
|
|
.addUse(Ptr)
|
|
.addUse(ScopeReg)
|
|
.addUse(MemSemReg)
|
|
.addUse(I.getOperand(2).getReg())
|
|
.constrainAllUses(TII, TRI, RBI);
|
|
}
|
|
|
|
bool SPIRVInstructionSelector::selectFence(MachineInstr &I) const {
|
|
AtomicOrdering AO = AtomicOrdering(I.getOperand(0).getImm());
|
|
uint32_t MemSem = static_cast<uint32_t>(getMemSemantics(AO));
|
|
Register MemSemReg = buildI32Constant(MemSem, I);
|
|
SyncScope::ID Ord = SyncScope::ID(I.getOperand(1).getImm());
|
|
uint32_t Scope = static_cast<uint32_t>(getScope(Ord));
|
|
Register ScopeReg = buildI32Constant(Scope, I);
|
|
MachineBasicBlock &BB = *I.getParent();
|
|
return BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpMemoryBarrier))
|
|
.addUse(ScopeReg)
|
|
.addUse(MemSemReg)
|
|
.constrainAllUses(TII, TRI, RBI);
|
|
}
|
|
|
|
bool SPIRVInstructionSelector::selectAtomicCmpXchg(Register ResVReg,
|
|
const SPIRVType *ResType,
|
|
MachineInstr &I) const {
|
|
Register ScopeReg;
|
|
Register MemSemEqReg;
|
|
Register MemSemNeqReg;
|
|
Register Ptr = I.getOperand(2).getReg();
|
|
if (I.getOpcode() != TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS) {
|
|
assert(I.hasOneMemOperand());
|
|
const MachineMemOperand *MemOp = *I.memoperands_begin();
|
|
unsigned Scope = static_cast<uint32_t>(getScope(MemOp->getSyncScopeID()));
|
|
ScopeReg = buildI32Constant(Scope, I);
|
|
|
|
unsigned ScSem = static_cast<uint32_t>(
|
|
getMemSemanticsForStorageClass(GR.getPointerStorageClass(Ptr)));
|
|
AtomicOrdering AO = MemOp->getSuccessOrdering();
|
|
unsigned MemSemEq = static_cast<uint32_t>(getMemSemantics(AO)) | ScSem;
|
|
MemSemEqReg = buildI32Constant(MemSemEq, I);
|
|
AtomicOrdering FO = MemOp->getFailureOrdering();
|
|
unsigned MemSemNeq = static_cast<uint32_t>(getMemSemantics(FO)) | ScSem;
|
|
MemSemNeqReg =
|
|
MemSemEq == MemSemNeq ? MemSemEqReg : buildI32Constant(MemSemNeq, I);
|
|
} else {
|
|
ScopeReg = I.getOperand(5).getReg();
|
|
MemSemEqReg = I.getOperand(6).getReg();
|
|
MemSemNeqReg = I.getOperand(7).getReg();
|
|
}
|
|
|
|
Register Cmp = I.getOperand(3).getReg();
|
|
Register Val = I.getOperand(4).getReg();
|
|
SPIRVType *SpvValTy = GR.getSPIRVTypeForVReg(Val);
|
|
Register ACmpRes = MRI->createVirtualRegister(&SPIRV::IDRegClass);
|
|
const DebugLoc &DL = I.getDebugLoc();
|
|
bool Result =
|
|
BuildMI(*I.getParent(), I, DL, TII.get(SPIRV::OpAtomicCompareExchange))
|
|
.addDef(ACmpRes)
|
|
.addUse(GR.getSPIRVTypeID(SpvValTy))
|
|
.addUse(Ptr)
|
|
.addUse(ScopeReg)
|
|
.addUse(MemSemEqReg)
|
|
.addUse(MemSemNeqReg)
|
|
.addUse(Val)
|
|
.addUse(Cmp)
|
|
.constrainAllUses(TII, TRI, RBI);
|
|
Register CmpSuccReg = MRI->createVirtualRegister(&SPIRV::IDRegClass);
|
|
SPIRVType *BoolTy = GR.getOrCreateSPIRVBoolType(I, TII);
|
|
Result |= BuildMI(*I.getParent(), I, DL, TII.get(SPIRV::OpIEqual))
|
|
.addDef(CmpSuccReg)
|
|
.addUse(GR.getSPIRVTypeID(BoolTy))
|
|
.addUse(ACmpRes)
|
|
.addUse(Cmp)
|
|
.constrainAllUses(TII, TRI, RBI);
|
|
Register TmpReg = MRI->createVirtualRegister(&SPIRV::IDRegClass);
|
|
Result |= BuildMI(*I.getParent(), I, DL, TII.get(SPIRV::OpCompositeInsert))
|
|
.addDef(TmpReg)
|
|
.addUse(GR.getSPIRVTypeID(ResType))
|
|
.addUse(ACmpRes)
|
|
.addUse(GR.getOrCreateUndef(I, ResType, TII))
|
|
.addImm(0)
|
|
.constrainAllUses(TII, TRI, RBI);
|
|
Result |= BuildMI(*I.getParent(), I, DL, TII.get(SPIRV::OpCompositeInsert))
|
|
.addDef(ResVReg)
|
|
.addUse(GR.getSPIRVTypeID(ResType))
|
|
.addUse(CmpSuccReg)
|
|
.addUse(TmpReg)
|
|
.addImm(1)
|
|
.constrainAllUses(TII, TRI, RBI);
|
|
return Result;
|
|
}
|
|
|
|
static bool isGenericCastablePtr(SPIRV::StorageClass::StorageClass SC) {
|
|
switch (SC) {
|
|
case SPIRV::StorageClass::Workgroup:
|
|
case SPIRV::StorageClass::CrossWorkgroup:
|
|
case SPIRV::StorageClass::Function:
|
|
return true;
|
|
default:
|
|
return false;
|
|
}
|
|
}
|
|
|
|
// In SPIR-V address space casting can only happen to and from the Generic
|
|
// storage class. We can also only case Workgroup, CrossWorkgroup, or Function
|
|
// pointers to and from Generic pointers. As such, we can convert e.g. from
|
|
// Workgroup to Function by going via a Generic pointer as an intermediary. All
|
|
// other combinations can only be done by a bitcast, and are probably not safe.
|
|
bool SPIRVInstructionSelector::selectAddrSpaceCast(Register ResVReg,
|
|
const SPIRVType *ResType,
|
|
MachineInstr &I) const {
|
|
// If the AddrSpaceCast user is single and in OpConstantComposite or
|
|
// OpVariable, we should select OpSpecConstantOp.
|
|
auto UIs = MRI->use_instructions(ResVReg);
|
|
if (!UIs.empty() && ++UIs.begin() == UIs.end() &&
|
|
(UIs.begin()->getOpcode() == SPIRV::OpConstantComposite ||
|
|
UIs.begin()->getOpcode() == SPIRV::OpVariable ||
|
|
isSpvIntrinsic(*UIs.begin(), Intrinsic::spv_init_global))) {
|
|
Register NewReg = I.getOperand(1).getReg();
|
|
MachineBasicBlock &BB = *I.getParent();
|
|
SPIRVType *SpvBaseTy = GR.getOrCreateSPIRVIntegerType(8, I, TII);
|
|
ResType = GR.getOrCreateSPIRVPointerType(SpvBaseTy, I, TII,
|
|
SPIRV::StorageClass::Generic);
|
|
bool Result =
|
|
BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpSpecConstantOp))
|
|
.addDef(ResVReg)
|
|
.addUse(GR.getSPIRVTypeID(ResType))
|
|
.addImm(static_cast<uint32_t>(SPIRV::Opcode::PtrCastToGeneric))
|
|
.addUse(NewReg)
|
|
.constrainAllUses(TII, TRI, RBI);
|
|
return Result;
|
|
}
|
|
Register SrcPtr = I.getOperand(1).getReg();
|
|
SPIRVType *SrcPtrTy = GR.getSPIRVTypeForVReg(SrcPtr);
|
|
SPIRV::StorageClass::StorageClass SrcSC = GR.getPointerStorageClass(SrcPtr);
|
|
SPIRV::StorageClass::StorageClass DstSC = GR.getPointerStorageClass(ResVReg);
|
|
|
|
// Casting from an eligable pointer to Generic.
|
|
if (DstSC == SPIRV::StorageClass::Generic && isGenericCastablePtr(SrcSC))
|
|
return selectUnOp(ResVReg, ResType, I, SPIRV::OpPtrCastToGeneric);
|
|
// Casting from Generic to an eligable pointer.
|
|
if (SrcSC == SPIRV::StorageClass::Generic && isGenericCastablePtr(DstSC))
|
|
return selectUnOp(ResVReg, ResType, I, SPIRV::OpGenericCastToPtr);
|
|
// Casting between 2 eligable pointers using Generic as an intermediary.
|
|
if (isGenericCastablePtr(SrcSC) && isGenericCastablePtr(DstSC)) {
|
|
Register Tmp = MRI->createVirtualRegister(&SPIRV::IDRegClass);
|
|
SPIRVType *GenericPtrTy = GR.getOrCreateSPIRVPointerType(
|
|
SrcPtrTy, I, TII, SPIRV::StorageClass::Generic);
|
|
MachineBasicBlock &BB = *I.getParent();
|
|
const DebugLoc &DL = I.getDebugLoc();
|
|
bool Success = BuildMI(BB, I, DL, TII.get(SPIRV::OpPtrCastToGeneric))
|
|
.addDef(Tmp)
|
|
.addUse(GR.getSPIRVTypeID(GenericPtrTy))
|
|
.addUse(SrcPtr)
|
|
.constrainAllUses(TII, TRI, RBI);
|
|
return Success && BuildMI(BB, I, DL, TII.get(SPIRV::OpGenericCastToPtr))
|
|
.addDef(ResVReg)
|
|
.addUse(GR.getSPIRVTypeID(ResType))
|
|
.addUse(Tmp)
|
|
.constrainAllUses(TII, TRI, RBI);
|
|
}
|
|
// TODO Should this case just be disallowed completely?
|
|
// We're casting 2 other arbitrary address spaces, so have to bitcast.
|
|
return selectUnOp(ResVReg, ResType, I, SPIRV::OpBitcast);
|
|
}
|
|
|
|
static unsigned getFCmpOpcode(unsigned PredNum) {
|
|
auto Pred = static_cast<CmpInst::Predicate>(PredNum);
|
|
switch (Pred) {
|
|
case CmpInst::FCMP_OEQ:
|
|
return SPIRV::OpFOrdEqual;
|
|
case CmpInst::FCMP_OGE:
|
|
return SPIRV::OpFOrdGreaterThanEqual;
|
|
case CmpInst::FCMP_OGT:
|
|
return SPIRV::OpFOrdGreaterThan;
|
|
case CmpInst::FCMP_OLE:
|
|
return SPIRV::OpFOrdLessThanEqual;
|
|
case CmpInst::FCMP_OLT:
|
|
return SPIRV::OpFOrdLessThan;
|
|
case CmpInst::FCMP_ONE:
|
|
return SPIRV::OpFOrdNotEqual;
|
|
case CmpInst::FCMP_ORD:
|
|
return SPIRV::OpOrdered;
|
|
case CmpInst::FCMP_UEQ:
|
|
return SPIRV::OpFUnordEqual;
|
|
case CmpInst::FCMP_UGE:
|
|
return SPIRV::OpFUnordGreaterThanEqual;
|
|
case CmpInst::FCMP_UGT:
|
|
return SPIRV::OpFUnordGreaterThan;
|
|
case CmpInst::FCMP_ULE:
|
|
return SPIRV::OpFUnordLessThanEqual;
|
|
case CmpInst::FCMP_ULT:
|
|
return SPIRV::OpFUnordLessThan;
|
|
case CmpInst::FCMP_UNE:
|
|
return SPIRV::OpFUnordNotEqual;
|
|
case CmpInst::FCMP_UNO:
|
|
return SPIRV::OpUnordered;
|
|
default:
|
|
llvm_unreachable("Unknown predicate type for FCmp");
|
|
}
|
|
}
|
|
|
|
static unsigned getICmpOpcode(unsigned PredNum) {
|
|
auto Pred = static_cast<CmpInst::Predicate>(PredNum);
|
|
switch (Pred) {
|
|
case CmpInst::ICMP_EQ:
|
|
return SPIRV::OpIEqual;
|
|
case CmpInst::ICMP_NE:
|
|
return SPIRV::OpINotEqual;
|
|
case CmpInst::ICMP_SGE:
|
|
return SPIRV::OpSGreaterThanEqual;
|
|
case CmpInst::ICMP_SGT:
|
|
return SPIRV::OpSGreaterThan;
|
|
case CmpInst::ICMP_SLE:
|
|
return SPIRV::OpSLessThanEqual;
|
|
case CmpInst::ICMP_SLT:
|
|
return SPIRV::OpSLessThan;
|
|
case CmpInst::ICMP_UGE:
|
|
return SPIRV::OpUGreaterThanEqual;
|
|
case CmpInst::ICMP_UGT:
|
|
return SPIRV::OpUGreaterThan;
|
|
case CmpInst::ICMP_ULE:
|
|
return SPIRV::OpULessThanEqual;
|
|
case CmpInst::ICMP_ULT:
|
|
return SPIRV::OpULessThan;
|
|
default:
|
|
llvm_unreachable("Unknown predicate type for ICmp");
|
|
}
|
|
}
|
|
|
|
static unsigned getPtrCmpOpcode(unsigned Pred) {
|
|
switch (static_cast<CmpInst::Predicate>(Pred)) {
|
|
case CmpInst::ICMP_EQ:
|
|
return SPIRV::OpPtrEqual;
|
|
case CmpInst::ICMP_NE:
|
|
return SPIRV::OpPtrNotEqual;
|
|
default:
|
|
llvm_unreachable("Unknown predicate type for pointer comparison");
|
|
}
|
|
}
|
|
|
|
// Return the logical operation, or abort if none exists.
|
|
static unsigned getBoolCmpOpcode(unsigned PredNum) {
|
|
auto Pred = static_cast<CmpInst::Predicate>(PredNum);
|
|
switch (Pred) {
|
|
case CmpInst::ICMP_EQ:
|
|
return SPIRV::OpLogicalEqual;
|
|
case CmpInst::ICMP_NE:
|
|
return SPIRV::OpLogicalNotEqual;
|
|
default:
|
|
llvm_unreachable("Unknown predicate type for Bool comparison");
|
|
}
|
|
}
|
|
|
|
bool SPIRVInstructionSelector::selectBitreverse(Register ResVReg,
|
|
const SPIRVType *ResType,
|
|
MachineInstr &I) const {
|
|
MachineBasicBlock &BB = *I.getParent();
|
|
return BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpBitReverse))
|
|
.addDef(ResVReg)
|
|
.addUse(GR.getSPIRVTypeID(ResType))
|
|
.addUse(I.getOperand(1).getReg())
|
|
.constrainAllUses(TII, TRI, RBI);
|
|
}
|
|
|
|
bool SPIRVInstructionSelector::selectConstVector(Register ResVReg,
|
|
const SPIRVType *ResType,
|
|
MachineInstr &I) const {
|
|
// TODO: only const case is supported for now.
|
|
assert(std::all_of(
|
|
I.operands_begin(), I.operands_end(), [this](const MachineOperand &MO) {
|
|
if (MO.isDef())
|
|
return true;
|
|
if (!MO.isReg())
|
|
return false;
|
|
SPIRVType *ConstTy = this->MRI->getVRegDef(MO.getReg());
|
|
assert(ConstTy && ConstTy->getOpcode() == SPIRV::ASSIGN_TYPE &&
|
|
ConstTy->getOperand(1).isReg());
|
|
Register ConstReg = ConstTy->getOperand(1).getReg();
|
|
const MachineInstr *Const = this->MRI->getVRegDef(ConstReg);
|
|
assert(Const);
|
|
return (Const->getOpcode() == TargetOpcode::G_CONSTANT ||
|
|
Const->getOpcode() == TargetOpcode::G_FCONSTANT);
|
|
}));
|
|
|
|
auto MIB = BuildMI(*I.getParent(), I, I.getDebugLoc(),
|
|
TII.get(SPIRV::OpConstantComposite))
|
|
.addDef(ResVReg)
|
|
.addUse(GR.getSPIRVTypeID(ResType));
|
|
for (unsigned i = I.getNumExplicitDefs(); i < I.getNumExplicitOperands(); ++i)
|
|
MIB.addUse(I.getOperand(i).getReg());
|
|
return MIB.constrainAllUses(TII, TRI, RBI);
|
|
}
|
|
|
|
bool SPIRVInstructionSelector::selectCmp(Register ResVReg,
|
|
const SPIRVType *ResType,
|
|
unsigned CmpOpc,
|
|
MachineInstr &I) const {
|
|
Register Cmp0 = I.getOperand(2).getReg();
|
|
Register Cmp1 = I.getOperand(3).getReg();
|
|
assert(GR.getSPIRVTypeForVReg(Cmp0)->getOpcode() ==
|
|
GR.getSPIRVTypeForVReg(Cmp1)->getOpcode() &&
|
|
"CMP operands should have the same type");
|
|
return BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(CmpOpc))
|
|
.addDef(ResVReg)
|
|
.addUse(GR.getSPIRVTypeID(ResType))
|
|
.addUse(Cmp0)
|
|
.addUse(Cmp1)
|
|
.constrainAllUses(TII, TRI, RBI);
|
|
}
|
|
|
|
bool SPIRVInstructionSelector::selectICmp(Register ResVReg,
|
|
const SPIRVType *ResType,
|
|
MachineInstr &I) const {
|
|
auto Pred = I.getOperand(1).getPredicate();
|
|
unsigned CmpOpc;
|
|
|
|
Register CmpOperand = I.getOperand(2).getReg();
|
|
if (GR.isScalarOfType(CmpOperand, SPIRV::OpTypePointer))
|
|
CmpOpc = getPtrCmpOpcode(Pred);
|
|
else if (GR.isScalarOrVectorOfType(CmpOperand, SPIRV::OpTypeBool))
|
|
CmpOpc = getBoolCmpOpcode(Pred);
|
|
else
|
|
CmpOpc = getICmpOpcode(Pred);
|
|
return selectCmp(ResVReg, ResType, CmpOpc, I);
|
|
}
|
|
|
|
void SPIRVInstructionSelector::renderFImm32(MachineInstrBuilder &MIB,
|
|
const MachineInstr &I,
|
|
int OpIdx) const {
|
|
assert(I.getOpcode() == TargetOpcode::G_FCONSTANT && OpIdx == -1 &&
|
|
"Expected G_FCONSTANT");
|
|
const ConstantFP *FPImm = I.getOperand(1).getFPImm();
|
|
addNumImm(FPImm->getValueAPF().bitcastToAPInt(), MIB);
|
|
}
|
|
|
|
void SPIRVInstructionSelector::renderImm32(MachineInstrBuilder &MIB,
|
|
const MachineInstr &I,
|
|
int OpIdx) const {
|
|
assert(I.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 &&
|
|
"Expected G_CONSTANT");
|
|
addNumImm(I.getOperand(1).getCImm()->getValue(), MIB);
|
|
}
|
|
|
|
Register
|
|
SPIRVInstructionSelector::buildI32Constant(uint32_t Val, MachineInstr &I,
|
|
const SPIRVType *ResType) const {
|
|
Type *LLVMTy = IntegerType::get(GR.CurMF->getFunction().getContext(), 32);
|
|
const SPIRVType *SpvI32Ty =
|
|
ResType ? ResType : GR.getOrCreateSPIRVIntegerType(32, I, TII);
|
|
// Find a constant in DT or build a new one.
|
|
auto ConstInt = ConstantInt::get(LLVMTy, Val);
|
|
Register NewReg = GR.find(ConstInt, GR.CurMF);
|
|
if (!NewReg.isValid()) {
|
|
NewReg = MRI->createGenericVirtualRegister(LLT::scalar(32));
|
|
GR.add(ConstInt, GR.CurMF, NewReg);
|
|
MachineInstr *MI;
|
|
MachineBasicBlock &BB = *I.getParent();
|
|
if (Val == 0) {
|
|
MI = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpConstantNull))
|
|
.addDef(NewReg)
|
|
.addUse(GR.getSPIRVTypeID(SpvI32Ty));
|
|
} else {
|
|
MI = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpConstantI))
|
|
.addDef(NewReg)
|
|
.addUse(GR.getSPIRVTypeID(SpvI32Ty))
|
|
.addImm(APInt(32, Val).getZExtValue());
|
|
}
|
|
constrainSelectedInstRegOperands(*MI, TII, TRI, RBI);
|
|
}
|
|
return NewReg;
|
|
}
|
|
|
|
bool SPIRVInstructionSelector::selectFCmp(Register ResVReg,
|
|
const SPIRVType *ResType,
|
|
MachineInstr &I) const {
|
|
unsigned CmpOp = getFCmpOpcode(I.getOperand(1).getPredicate());
|
|
return selectCmp(ResVReg, ResType, CmpOp, I);
|
|
}
|
|
|
|
Register SPIRVInstructionSelector::buildZerosVal(const SPIRVType *ResType,
|
|
MachineInstr &I) const {
|
|
if (ResType->getOpcode() == SPIRV::OpTypeVector)
|
|
return GR.getOrCreateConsIntVector(0, I, ResType, TII);
|
|
return GR.getOrCreateConstInt(0, I, ResType, TII);
|
|
}
|
|
|
|
Register SPIRVInstructionSelector::buildOnesVal(bool AllOnes,
|
|
const SPIRVType *ResType,
|
|
MachineInstr &I) const {
|
|
unsigned BitWidth = GR.getScalarOrVectorBitWidth(ResType);
|
|
APInt One = AllOnes ? APInt::getAllOnesValue(BitWidth)
|
|
: APInt::getOneBitSet(BitWidth, 0);
|
|
if (ResType->getOpcode() == SPIRV::OpTypeVector)
|
|
return GR.getOrCreateConsIntVector(One.getZExtValue(), I, ResType, TII);
|
|
return GR.getOrCreateConstInt(One.getZExtValue(), I, ResType, TII);
|
|
}
|
|
|
|
bool SPIRVInstructionSelector::selectSelect(Register ResVReg,
|
|
const SPIRVType *ResType,
|
|
MachineInstr &I,
|
|
bool IsSigned) const {
|
|
// To extend a bool, we need to use OpSelect between constants.
|
|
Register ZeroReg = buildZerosVal(ResType, I);
|
|
Register OneReg = buildOnesVal(IsSigned, ResType, I);
|
|
bool IsScalarBool =
|
|
GR.isScalarOfType(I.getOperand(1).getReg(), SPIRV::OpTypeBool);
|
|
unsigned Opcode =
|
|
IsScalarBool ? SPIRV::OpSelectSISCond : SPIRV::OpSelectSIVCond;
|
|
return BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(Opcode))
|
|
.addDef(ResVReg)
|
|
.addUse(GR.getSPIRVTypeID(ResType))
|
|
.addUse(I.getOperand(1).getReg())
|
|
.addUse(OneReg)
|
|
.addUse(ZeroReg)
|
|
.constrainAllUses(TII, TRI, RBI);
|
|
}
|
|
|
|
bool SPIRVInstructionSelector::selectIToF(Register ResVReg,
|
|
const SPIRVType *ResType,
|
|
MachineInstr &I, bool IsSigned,
|
|
unsigned Opcode) const {
|
|
Register SrcReg = I.getOperand(1).getReg();
|
|
// We can convert bool value directly to float type without OpConvert*ToF,
|
|
// however the translator generates OpSelect+OpConvert*ToF, so we do the same.
|
|
if (GR.isScalarOrVectorOfType(I.getOperand(1).getReg(), SPIRV::OpTypeBool)) {
|
|
unsigned BitWidth = GR.getScalarOrVectorBitWidth(ResType);
|
|
SPIRVType *TmpType = GR.getOrCreateSPIRVIntegerType(BitWidth, I, TII);
|
|
if (ResType->getOpcode() == SPIRV::OpTypeVector) {
|
|
const unsigned NumElts = ResType->getOperand(2).getImm();
|
|
TmpType = GR.getOrCreateSPIRVVectorType(TmpType, NumElts, I, TII);
|
|
}
|
|
SrcReg = MRI->createVirtualRegister(&SPIRV::IDRegClass);
|
|
selectSelect(SrcReg, TmpType, I, false);
|
|
}
|
|
return selectUnOpWithSrc(ResVReg, ResType, I, SrcReg, Opcode);
|
|
}
|
|
|
|
bool SPIRVInstructionSelector::selectExt(Register ResVReg,
|
|
const SPIRVType *ResType,
|
|
MachineInstr &I, bool IsSigned) const {
|
|
if (GR.isScalarOrVectorOfType(I.getOperand(1).getReg(), SPIRV::OpTypeBool))
|
|
return selectSelect(ResVReg, ResType, I, IsSigned);
|
|
unsigned Opcode = IsSigned ? SPIRV::OpSConvert : SPIRV::OpUConvert;
|
|
return selectUnOp(ResVReg, ResType, I, Opcode);
|
|
}
|
|
|
|
bool SPIRVInstructionSelector::selectIntToBool(Register IntReg,
|
|
Register ResVReg,
|
|
const SPIRVType *IntTy,
|
|
const SPIRVType *BoolTy,
|
|
MachineInstr &I) const {
|
|
// To truncate to a bool, we use OpBitwiseAnd 1 and OpINotEqual to zero.
|
|
Register BitIntReg = MRI->createVirtualRegister(&SPIRV::IDRegClass);
|
|
bool IsVectorTy = IntTy->getOpcode() == SPIRV::OpTypeVector;
|
|
unsigned Opcode = IsVectorTy ? SPIRV::OpBitwiseAndV : SPIRV::OpBitwiseAndS;
|
|
Register Zero = buildZerosVal(IntTy, I);
|
|
Register One = buildOnesVal(false, IntTy, I);
|
|
MachineBasicBlock &BB = *I.getParent();
|
|
BuildMI(BB, I, I.getDebugLoc(), TII.get(Opcode))
|
|
.addDef(BitIntReg)
|
|
.addUse(GR.getSPIRVTypeID(IntTy))
|
|
.addUse(IntReg)
|
|
.addUse(One)
|
|
.constrainAllUses(TII, TRI, RBI);
|
|
return BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpINotEqual))
|
|
.addDef(ResVReg)
|
|
.addUse(GR.getSPIRVTypeID(BoolTy))
|
|
.addUse(BitIntReg)
|
|
.addUse(Zero)
|
|
.constrainAllUses(TII, TRI, RBI);
|
|
}
|
|
|
|
bool SPIRVInstructionSelector::selectTrunc(Register ResVReg,
|
|
const SPIRVType *ResType,
|
|
MachineInstr &I) const {
|
|
if (GR.isScalarOrVectorOfType(ResVReg, SPIRV::OpTypeBool)) {
|
|
Register IntReg = I.getOperand(1).getReg();
|
|
const SPIRVType *ArgType = GR.getSPIRVTypeForVReg(IntReg);
|
|
return selectIntToBool(IntReg, ResVReg, ArgType, ResType, I);
|
|
}
|
|
bool IsSigned = GR.isScalarOrVectorSigned(ResType);
|
|
unsigned Opcode = IsSigned ? SPIRV::OpSConvert : SPIRV::OpUConvert;
|
|
return selectUnOp(ResVReg, ResType, I, Opcode);
|
|
}
|
|
|
|
bool SPIRVInstructionSelector::selectConst(Register ResVReg,
|
|
const SPIRVType *ResType,
|
|
const APInt &Imm,
|
|
MachineInstr &I) const {
|
|
unsigned TyOpcode = ResType->getOpcode();
|
|
assert(TyOpcode != SPIRV::OpTypePointer || Imm.isNullValue());
|
|
MachineBasicBlock &BB = *I.getParent();
|
|
if ((TyOpcode == SPIRV::OpTypePointer || TyOpcode == SPIRV::OpTypeEvent) &&
|
|
Imm.isNullValue())
|
|
return BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpConstantNull))
|
|
.addDef(ResVReg)
|
|
.addUse(GR.getSPIRVTypeID(ResType))
|
|
.constrainAllUses(TII, TRI, RBI);
|
|
if (TyOpcode == SPIRV::OpTypeInt) {
|
|
Register Reg = GR.getOrCreateConstInt(Imm.getZExtValue(), I, ResType, TII);
|
|
if (Reg == ResVReg)
|
|
return true;
|
|
return BuildMI(BB, I, I.getDebugLoc(), TII.get(TargetOpcode::COPY))
|
|
.addDef(ResVReg)
|
|
.addUse(Reg)
|
|
.constrainAllUses(TII, TRI, RBI);
|
|
}
|
|
auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpConstantI))
|
|
.addDef(ResVReg)
|
|
.addUse(GR.getSPIRVTypeID(ResType));
|
|
// <=32-bit integers should be caught by the sdag pattern.
|
|
assert(Imm.getBitWidth() > 32);
|
|
addNumImm(Imm, MIB);
|
|
return MIB.constrainAllUses(TII, TRI, RBI);
|
|
}
|
|
|
|
bool SPIRVInstructionSelector::selectOpUndef(Register ResVReg,
|
|
const SPIRVType *ResType,
|
|
MachineInstr &I) const {
|
|
return BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(SPIRV::OpUndef))
|
|
.addDef(ResVReg)
|
|
.addUse(GR.getSPIRVTypeID(ResType))
|
|
.constrainAllUses(TII, TRI, RBI);
|
|
}
|
|
|
|
static bool isImm(const MachineOperand &MO, MachineRegisterInfo *MRI) {
|
|
assert(MO.isReg());
|
|
const SPIRVType *TypeInst = MRI->getVRegDef(MO.getReg());
|
|
if (TypeInst->getOpcode() != SPIRV::ASSIGN_TYPE)
|
|
return false;
|
|
assert(TypeInst->getOperand(1).isReg());
|
|
MachineInstr *ImmInst = MRI->getVRegDef(TypeInst->getOperand(1).getReg());
|
|
return ImmInst->getOpcode() == TargetOpcode::G_CONSTANT;
|
|
}
|
|
|
|
static int64_t foldImm(const MachineOperand &MO, MachineRegisterInfo *MRI) {
|
|
const SPIRVType *TypeInst = MRI->getVRegDef(MO.getReg());
|
|
MachineInstr *ImmInst = MRI->getVRegDef(TypeInst->getOperand(1).getReg());
|
|
assert(ImmInst->getOpcode() == TargetOpcode::G_CONSTANT);
|
|
return ImmInst->getOperand(1).getCImm()->getZExtValue();
|
|
}
|
|
|
|
bool SPIRVInstructionSelector::selectInsertVal(Register ResVReg,
|
|
const SPIRVType *ResType,
|
|
MachineInstr &I) const {
|
|
MachineBasicBlock &BB = *I.getParent();
|
|
auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpCompositeInsert))
|
|
.addDef(ResVReg)
|
|
.addUse(GR.getSPIRVTypeID(ResType))
|
|
// object to insert
|
|
.addUse(I.getOperand(3).getReg())
|
|
// composite to insert into
|
|
.addUse(I.getOperand(2).getReg());
|
|
for (unsigned i = 4; i < I.getNumOperands(); i++)
|
|
MIB.addImm(foldImm(I.getOperand(i), MRI));
|
|
return MIB.constrainAllUses(TII, TRI, RBI);
|
|
}
|
|
|
|
bool SPIRVInstructionSelector::selectExtractVal(Register ResVReg,
|
|
const SPIRVType *ResType,
|
|
MachineInstr &I) const {
|
|
MachineBasicBlock &BB = *I.getParent();
|
|
auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpCompositeExtract))
|
|
.addDef(ResVReg)
|
|
.addUse(GR.getSPIRVTypeID(ResType))
|
|
.addUse(I.getOperand(2).getReg());
|
|
for (unsigned i = 3; i < I.getNumOperands(); i++)
|
|
MIB.addImm(foldImm(I.getOperand(i), MRI));
|
|
return MIB.constrainAllUses(TII, TRI, RBI);
|
|
}
|
|
|
|
bool SPIRVInstructionSelector::selectInsertElt(Register ResVReg,
|
|
const SPIRVType *ResType,
|
|
MachineInstr &I) const {
|
|
if (isImm(I.getOperand(4), MRI))
|
|
return selectInsertVal(ResVReg, ResType, I);
|
|
MachineBasicBlock &BB = *I.getParent();
|
|
return BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpVectorInsertDynamic))
|
|
.addDef(ResVReg)
|
|
.addUse(GR.getSPIRVTypeID(ResType))
|
|
.addUse(I.getOperand(2).getReg())
|
|
.addUse(I.getOperand(3).getReg())
|
|
.addUse(I.getOperand(4).getReg())
|
|
.constrainAllUses(TII, TRI, RBI);
|
|
}
|
|
|
|
bool SPIRVInstructionSelector::selectExtractElt(Register ResVReg,
|
|
const SPIRVType *ResType,
|
|
MachineInstr &I) const {
|
|
if (isImm(I.getOperand(3), MRI))
|
|
return selectExtractVal(ResVReg, ResType, I);
|
|
MachineBasicBlock &BB = *I.getParent();
|
|
return BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpVectorExtractDynamic))
|
|
.addDef(ResVReg)
|
|
.addUse(GR.getSPIRVTypeID(ResType))
|
|
.addUse(I.getOperand(2).getReg())
|
|
.addUse(I.getOperand(3).getReg())
|
|
.constrainAllUses(TII, TRI, RBI);
|
|
}
|
|
|
|
bool SPIRVInstructionSelector::selectGEP(Register ResVReg,
|
|
const SPIRVType *ResType,
|
|
MachineInstr &I) const {
|
|
// In general we should also support OpAccessChain instrs here (i.e. not
|
|
// PtrAccessChain) but SPIRV-LLVM Translator doesn't emit them at all and so
|
|
// do we to stay compliant with its test and more importantly consumers.
|
|
unsigned Opcode = I.getOperand(2).getImm() ? SPIRV::OpInBoundsPtrAccessChain
|
|
: SPIRV::OpPtrAccessChain;
|
|
auto Res = BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(Opcode))
|
|
.addDef(ResVReg)
|
|
.addUse(GR.getSPIRVTypeID(ResType))
|
|
// Object to get a pointer to.
|
|
.addUse(I.getOperand(3).getReg());
|
|
// Adding indices.
|
|
for (unsigned i = 4; i < I.getNumExplicitOperands(); ++i)
|
|
Res.addUse(I.getOperand(i).getReg());
|
|
return Res.constrainAllUses(TII, TRI, RBI);
|
|
}
|
|
|
|
bool SPIRVInstructionSelector::selectIntrinsic(Register ResVReg,
|
|
const SPIRVType *ResType,
|
|
MachineInstr &I) const {
|
|
MachineBasicBlock &BB = *I.getParent();
|
|
switch (I.getIntrinsicID()) {
|
|
case Intrinsic::spv_load:
|
|
return selectLoad(ResVReg, ResType, I);
|
|
break;
|
|
case Intrinsic::spv_store:
|
|
return selectStore(I);
|
|
break;
|
|
case Intrinsic::spv_extractv:
|
|
return selectExtractVal(ResVReg, ResType, I);
|
|
break;
|
|
case Intrinsic::spv_insertv:
|
|
return selectInsertVal(ResVReg, ResType, I);
|
|
break;
|
|
case Intrinsic::spv_extractelt:
|
|
return selectExtractElt(ResVReg, ResType, I);
|
|
break;
|
|
case Intrinsic::spv_insertelt:
|
|
return selectInsertElt(ResVReg, ResType, I);
|
|
break;
|
|
case Intrinsic::spv_gep:
|
|
return selectGEP(ResVReg, ResType, I);
|
|
break;
|
|
case Intrinsic::spv_unref_global:
|
|
case Intrinsic::spv_init_global: {
|
|
MachineInstr *MI = MRI->getVRegDef(I.getOperand(1).getReg());
|
|
MachineInstr *Init = I.getNumExplicitOperands() > 2
|
|
? MRI->getVRegDef(I.getOperand(2).getReg())
|
|
: nullptr;
|
|
assert(MI);
|
|
return selectGlobalValue(MI->getOperand(0).getReg(), *MI, Init);
|
|
} break;
|
|
case Intrinsic::spv_const_composite: {
|
|
// If no values are attached, the composite is null constant.
|
|
bool IsNull = I.getNumExplicitDefs() + 1 == I.getNumExplicitOperands();
|
|
unsigned Opcode =
|
|
IsNull ? SPIRV::OpConstantNull : SPIRV::OpConstantComposite;
|
|
auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(Opcode))
|
|
.addDef(ResVReg)
|
|
.addUse(GR.getSPIRVTypeID(ResType));
|
|
// skip type MD node we already used when generated assign.type for this
|
|
if (!IsNull) {
|
|
for (unsigned i = I.getNumExplicitDefs() + 1;
|
|
i < I.getNumExplicitOperands(); ++i) {
|
|
MIB.addUse(I.getOperand(i).getReg());
|
|
}
|
|
}
|
|
return MIB.constrainAllUses(TII, TRI, RBI);
|
|
} break;
|
|
case Intrinsic::spv_assign_name: {
|
|
auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpName));
|
|
MIB.addUse(I.getOperand(I.getNumExplicitDefs() + 1).getReg());
|
|
for (unsigned i = I.getNumExplicitDefs() + 2;
|
|
i < I.getNumExplicitOperands(); ++i) {
|
|
MIB.addImm(I.getOperand(i).getImm());
|
|
}
|
|
return MIB.constrainAllUses(TII, TRI, RBI);
|
|
} break;
|
|
case Intrinsic::spv_switch: {
|
|
auto MIB = BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpSwitch));
|
|
for (unsigned i = 1; i < I.getNumExplicitOperands(); ++i) {
|
|
if (I.getOperand(i).isReg())
|
|
MIB.addReg(I.getOperand(i).getReg());
|
|
else if (I.getOperand(i).isCImm())
|
|
addNumImm(I.getOperand(i).getCImm()->getValue(), MIB);
|
|
else if (I.getOperand(i).isMBB())
|
|
MIB.addMBB(I.getOperand(i).getMBB());
|
|
else
|
|
llvm_unreachable("Unexpected OpSwitch operand");
|
|
}
|
|
return MIB.constrainAllUses(TII, TRI, RBI);
|
|
} break;
|
|
case Intrinsic::spv_cmpxchg:
|
|
return selectAtomicCmpXchg(ResVReg, ResType, I);
|
|
break;
|
|
default:
|
|
llvm_unreachable("Intrinsic selection not implemented");
|
|
}
|
|
return true;
|
|
}
|
|
|
|
bool SPIRVInstructionSelector::selectFrameIndex(Register ResVReg,
|
|
const SPIRVType *ResType,
|
|
MachineInstr &I) const {
|
|
return BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(SPIRV::OpVariable))
|
|
.addDef(ResVReg)
|
|
.addUse(GR.getSPIRVTypeID(ResType))
|
|
.addImm(static_cast<uint32_t>(SPIRV::StorageClass::Function))
|
|
.constrainAllUses(TII, TRI, RBI);
|
|
}
|
|
|
|
bool SPIRVInstructionSelector::selectBranch(MachineInstr &I) const {
|
|
// InstructionSelector walks backwards through the instructions. We can use
|
|
// both a G_BR and a G_BRCOND to create an OpBranchConditional. We hit G_BR
|
|
// first, so can generate an OpBranchConditional here. If there is no
|
|
// G_BRCOND, we just use OpBranch for a regular unconditional branch.
|
|
const MachineInstr *PrevI = I.getPrevNode();
|
|
MachineBasicBlock &MBB = *I.getParent();
|
|
if (PrevI != nullptr && PrevI->getOpcode() == TargetOpcode::G_BRCOND) {
|
|
return BuildMI(MBB, I, I.getDebugLoc(), TII.get(SPIRV::OpBranchConditional))
|
|
.addUse(PrevI->getOperand(0).getReg())
|
|
.addMBB(PrevI->getOperand(1).getMBB())
|
|
.addMBB(I.getOperand(0).getMBB())
|
|
.constrainAllUses(TII, TRI, RBI);
|
|
}
|
|
return BuildMI(MBB, I, I.getDebugLoc(), TII.get(SPIRV::OpBranch))
|
|
.addMBB(I.getOperand(0).getMBB())
|
|
.constrainAllUses(TII, TRI, RBI);
|
|
}
|
|
|
|
bool SPIRVInstructionSelector::selectBranchCond(MachineInstr &I) const {
|
|
// InstructionSelector walks backwards through the instructions. For an
|
|
// explicit conditional branch with no fallthrough, we use both a G_BR and a
|
|
// G_BRCOND to create an OpBranchConditional. We should hit G_BR first, and
|
|
// generate the OpBranchConditional in selectBranch above.
|
|
//
|
|
// If an OpBranchConditional has been generated, we simply return, as the work
|
|
// is alread done. If there is no OpBranchConditional, LLVM must be relying on
|
|
// implicit fallthrough to the next basic block, so we need to create an
|
|
// OpBranchConditional with an explicit "false" argument pointing to the next
|
|
// basic block that LLVM would fall through to.
|
|
const MachineInstr *NextI = I.getNextNode();
|
|
// Check if this has already been successfully selected.
|
|
if (NextI != nullptr && NextI->getOpcode() == SPIRV::OpBranchConditional)
|
|
return true;
|
|
// Must be relying on implicit block fallthrough, so generate an
|
|
// OpBranchConditional with the "next" basic block as the "false" target.
|
|
MachineBasicBlock &MBB = *I.getParent();
|
|
unsigned NextMBBNum = MBB.getNextNode()->getNumber();
|
|
MachineBasicBlock *NextMBB = I.getMF()->getBlockNumbered(NextMBBNum);
|
|
return BuildMI(MBB, I, I.getDebugLoc(), TII.get(SPIRV::OpBranchConditional))
|
|
.addUse(I.getOperand(0).getReg())
|
|
.addMBB(I.getOperand(1).getMBB())
|
|
.addMBB(NextMBB)
|
|
.constrainAllUses(TII, TRI, RBI);
|
|
}
|
|
|
|
bool SPIRVInstructionSelector::selectPhi(Register ResVReg,
|
|
const SPIRVType *ResType,
|
|
MachineInstr &I) const {
|
|
auto MIB = BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(SPIRV::OpPhi))
|
|
.addDef(ResVReg)
|
|
.addUse(GR.getSPIRVTypeID(ResType));
|
|
const unsigned NumOps = I.getNumOperands();
|
|
for (unsigned i = 1; i < NumOps; i += 2) {
|
|
MIB.addUse(I.getOperand(i + 0).getReg());
|
|
MIB.addMBB(I.getOperand(i + 1).getMBB());
|
|
}
|
|
return MIB.constrainAllUses(TII, TRI, RBI);
|
|
}
|
|
|
|
bool SPIRVInstructionSelector::selectGlobalValue(
|
|
Register ResVReg, MachineInstr &I, const MachineInstr *Init) const {
|
|
// FIXME: don't use MachineIRBuilder here, replace it with BuildMI.
|
|
MachineIRBuilder MIRBuilder(I);
|
|
const GlobalValue *GV = I.getOperand(1).getGlobal();
|
|
SPIRVType *ResType = GR.getOrCreateSPIRVType(
|
|
GV->getType(), MIRBuilder, SPIRV::AccessQualifier::ReadWrite, false);
|
|
|
|
std::string GlobalIdent = GV->getGlobalIdentifier();
|
|
// We have functions as operands in tests with blocks of instruction e.g. in
|
|
// transcoding/global_block.ll. These operands are not used and should be
|
|
// substituted by zero constants. Their type is expected to be always
|
|
// OpTypePointer Function %uchar.
|
|
if (isa<Function>(GV)) {
|
|
const Constant *ConstVal = GV;
|
|
MachineBasicBlock &BB = *I.getParent();
|
|
Register NewReg = GR.find(ConstVal, GR.CurMF);
|
|
if (!NewReg.isValid()) {
|
|
SPIRVType *SpvBaseTy = GR.getOrCreateSPIRVIntegerType(8, I, TII);
|
|
ResType = GR.getOrCreateSPIRVPointerType(SpvBaseTy, I, TII);
|
|
Register NewReg = ResVReg;
|
|
GR.add(ConstVal, GR.CurMF, NewReg);
|
|
return BuildMI(BB, I, I.getDebugLoc(), TII.get(SPIRV::OpConstantNull))
|
|
.addDef(NewReg)
|
|
.addUse(GR.getSPIRVTypeID(ResType))
|
|
.constrainAllUses(TII, TRI, RBI);
|
|
}
|
|
assert(NewReg != ResVReg);
|
|
return BuildMI(BB, I, I.getDebugLoc(), TII.get(TargetOpcode::COPY))
|
|
.addDef(ResVReg)
|
|
.addUse(NewReg)
|
|
.constrainAllUses(TII, TRI, RBI);
|
|
}
|
|
auto GlobalVar = cast<GlobalVariable>(GV);
|
|
assert(GlobalVar->getName() != "llvm.global.annotations");
|
|
|
|
bool HasInit = GlobalVar->hasInitializer() &&
|
|
!isa<UndefValue>(GlobalVar->getInitializer());
|
|
// Skip empty declaration for GVs with initilaizers till we get the decl with
|
|
// passed initializer.
|
|
if (HasInit && !Init)
|
|
return true;
|
|
|
|
unsigned AddrSpace = GV->getAddressSpace();
|
|
SPIRV::StorageClass::StorageClass Storage =
|
|
addressSpaceToStorageClass(AddrSpace);
|
|
bool HasLnkTy = GV->getLinkage() != GlobalValue::InternalLinkage &&
|
|
Storage != SPIRV::StorageClass::Function;
|
|
SPIRV::LinkageType::LinkageType LnkType =
|
|
(GV->isDeclaration() || GV->hasAvailableExternallyLinkage())
|
|
? SPIRV::LinkageType::Import
|
|
: SPIRV::LinkageType::Export;
|
|
|
|
Register Reg = GR.buildGlobalVariable(ResVReg, ResType, GlobalIdent, GV,
|
|
Storage, Init, GlobalVar->isConstant(),
|
|
HasLnkTy, LnkType, MIRBuilder, true);
|
|
return Reg.isValid();
|
|
}
|
|
|
|
namespace llvm {
|
|
InstructionSelector *
|
|
createSPIRVInstructionSelector(const SPIRVTargetMachine &TM,
|
|
const SPIRVSubtarget &Subtarget,
|
|
const RegisterBankInfo &RBI) {
|
|
return new SPIRVInstructionSelector(TM, Subtarget, RBI);
|
|
}
|
|
} // namespace llvm
|