[AMDGPU][Disassembler][NFCI] Always defer immediate operands. (#138885)
Removes the need to parameterise decoders with OperandSemantics, ImmWidth and MandatoryLiteral. Likely allows further simplification of handling _DEFERRED immediates. Tested to work downstream.
This commit is contained in:
@@ -45,6 +45,11 @@ using namespace llvm;
|
||||
|
||||
using DecodeStatus = llvm::MCDisassembler::DecodeStatus;
|
||||
|
||||
static int64_t getInlineImmValF16(unsigned Imm);
|
||||
static int64_t getInlineImmValBF16(unsigned Imm);
|
||||
static int64_t getInlineImmVal32(unsigned Imm);
|
||||
static int64_t getInlineImmVal64(unsigned Imm);
|
||||
|
||||
AMDGPUDisassembler::AMDGPUDisassembler(const MCSubtargetInfo &STI,
|
||||
MCContext &Ctx, MCInstrInfo const *MCII)
|
||||
: MCDisassembler(STI, Ctx), MCII(MCII), MRI(*Ctx.getRegisterInfo()),
|
||||
@@ -151,36 +156,31 @@ static DecodeStatus decodeDpp8FI(MCInst &Inst, unsigned Val, uint64_t Addr,
|
||||
Inst, DAsm->createRegOperand(AMDGPU::RegClass##RegClassID, Imm)); \
|
||||
}
|
||||
|
||||
#define DECODE_SrcOp(Name, EncSize, OpWidth, EncImm, MandatoryLiteral, \
|
||||
ImmWidth) \
|
||||
#define DECODE_SrcOp(Name, EncSize, OpWidth, EncImm) \
|
||||
static DecodeStatus Name(MCInst &Inst, unsigned Imm, uint64_t /*Addr*/, \
|
||||
const MCDisassembler *Decoder) { \
|
||||
assert(Imm < (1 << EncSize) && #EncSize "-bit encoding"); \
|
||||
auto DAsm = static_cast<const AMDGPUDisassembler *>(Decoder); \
|
||||
return addOperand(Inst, \
|
||||
DAsm->decodeSrcOp(AMDGPUDisassembler::OpWidth, EncImm, \
|
||||
MandatoryLiteral, ImmWidth)); \
|
||||
DAsm->decodeSrcOp(AMDGPUDisassembler::OpWidth, EncImm)); \
|
||||
}
|
||||
|
||||
static DecodeStatus decodeSrcOp(MCInst &Inst, unsigned EncSize,
|
||||
AMDGPUDisassembler::OpWidthTy OpWidth,
|
||||
unsigned Imm, unsigned EncImm,
|
||||
bool MandatoryLiteral, unsigned ImmWidth,
|
||||
AMDGPU::OperandSemantics Sema,
|
||||
const MCDisassembler *Decoder) {
|
||||
assert(Imm < (1U << EncSize) && "Operand doesn't fit encoding!");
|
||||
const auto *DAsm = static_cast<const AMDGPUDisassembler *>(Decoder);
|
||||
return addOperand(Inst, DAsm->decodeSrcOp(OpWidth, EncImm, MandatoryLiteral,
|
||||
ImmWidth, Sema));
|
||||
return addOperand(Inst, DAsm->decodeSrcOp(OpWidth, EncImm));
|
||||
}
|
||||
|
||||
// Decoder for registers. Imm(7-bit) is number of register, uses decodeSrcOp to
|
||||
// get register class. Used by SGPR only operands.
|
||||
#define DECODE_OPERAND_SREG_7(RegClass, OpWidth) \
|
||||
DECODE_SrcOp(Decode##RegClass##RegisterClass, 7, OpWidth, Imm, false, 0)
|
||||
DECODE_SrcOp(Decode##RegClass##RegisterClass, 7, OpWidth, Imm)
|
||||
|
||||
#define DECODE_OPERAND_SREG_8(RegClass, OpWidth) \
|
||||
DECODE_SrcOp(Decode##RegClass##RegisterClass, 8, OpWidth, Imm, false, 0)
|
||||
DECODE_SrcOp(Decode##RegClass##RegisterClass, 8, OpWidth, Imm)
|
||||
|
||||
// Decoder for registers. Imm(10-bit): Imm{7-0} is number of register,
|
||||
// Imm{9} is acc(agpr or vgpr) Imm{8} should be 0 (see VOP3Pe_SMFMAC).
|
||||
@@ -190,7 +190,7 @@ template <AMDGPUDisassembler::OpWidthTy OpWidth>
|
||||
static DecodeStatus decodeAV10(MCInst &Inst, unsigned Imm, uint64_t /* Addr */,
|
||||
const MCDisassembler *Decoder) {
|
||||
return decodeSrcOp(Inst, 10, OpWidth, Imm, Imm | AMDGPU::EncValues::IS_VGPR,
|
||||
false, 0, AMDGPU::OperandSemantics::INT, Decoder);
|
||||
Decoder);
|
||||
}
|
||||
|
||||
// Decoder for Src(9-bit encoding) registers only.
|
||||
@@ -198,8 +198,7 @@ template <AMDGPUDisassembler::OpWidthTy OpWidth>
|
||||
static DecodeStatus decodeSrcReg9(MCInst &Inst, unsigned Imm,
|
||||
uint64_t /* Addr */,
|
||||
const MCDisassembler *Decoder) {
|
||||
return decodeSrcOp(Inst, 9, OpWidth, Imm, Imm, false, 0,
|
||||
AMDGPU::OperandSemantics::INT, Decoder);
|
||||
return decodeSrcOp(Inst, 9, OpWidth, Imm, Imm, Decoder);
|
||||
}
|
||||
|
||||
// Decoder for Src(9-bit encoding) AGPR, register number encoded in 9bits, set
|
||||
@@ -208,8 +207,7 @@ static DecodeStatus decodeSrcReg9(MCInst &Inst, unsigned Imm,
|
||||
template <AMDGPUDisassembler::OpWidthTy OpWidth>
|
||||
static DecodeStatus decodeSrcA9(MCInst &Inst, unsigned Imm, uint64_t /* Addr */,
|
||||
const MCDisassembler *Decoder) {
|
||||
return decodeSrcOp(Inst, 9, OpWidth, Imm, Imm | 512, false, 0,
|
||||
AMDGPU::OperandSemantics::INT, Decoder);
|
||||
return decodeSrcOp(Inst, 9, OpWidth, Imm, Imm | 512, Decoder);
|
||||
}
|
||||
|
||||
// Decoder for 'enum10' from decodeSrcOp, Imm{0-8} is 9-bit Src encoding
|
||||
@@ -218,42 +216,35 @@ template <AMDGPUDisassembler::OpWidthTy OpWidth>
|
||||
static DecodeStatus decodeSrcAV10(MCInst &Inst, unsigned Imm,
|
||||
uint64_t /* Addr */,
|
||||
const MCDisassembler *Decoder) {
|
||||
return decodeSrcOp(Inst, 10, OpWidth, Imm, Imm, false, 0,
|
||||
AMDGPU::OperandSemantics::INT, Decoder);
|
||||
return decodeSrcOp(Inst, 10, OpWidth, Imm, Imm, Decoder);
|
||||
}
|
||||
|
||||
// Decoder for RegisterOperands using 9-bit Src encoding. Operand can be
|
||||
// register from RegClass or immediate. Registers that don't belong to RegClass
|
||||
// will be decoded and InstPrinter will report warning. Immediate will be
|
||||
// decoded into constant of size ImmWidth, should match width of immediate used
|
||||
// by OperandType (important for floating point types).
|
||||
template <AMDGPUDisassembler::OpWidthTy OpWidth, unsigned ImmWidth,
|
||||
unsigned OperandSemantics>
|
||||
// decoded into constant matching the OperandType (important for floating point
|
||||
// types).
|
||||
template <AMDGPUDisassembler::OpWidthTy OpWidth>
|
||||
static DecodeStatus decodeSrcRegOrImm9(MCInst &Inst, unsigned Imm,
|
||||
uint64_t /* Addr */,
|
||||
const MCDisassembler *Decoder) {
|
||||
return decodeSrcOp(Inst, 9, OpWidth, Imm, Imm, false, ImmWidth,
|
||||
(AMDGPU::OperandSemantics)OperandSemantics, Decoder);
|
||||
return decodeSrcOp(Inst, 9, OpWidth, Imm, Imm, Decoder);
|
||||
}
|
||||
|
||||
// Decoder for Src(9-bit encoding) AGPR or immediate. Set Imm{9} to 1 (set acc)
|
||||
// and decode using 'enum10' from decodeSrcOp.
|
||||
template <AMDGPUDisassembler::OpWidthTy OpWidth, unsigned ImmWidth,
|
||||
unsigned OperandSemantics>
|
||||
template <AMDGPUDisassembler::OpWidthTy OpWidth>
|
||||
static DecodeStatus decodeSrcRegOrImmA9(MCInst &Inst, unsigned Imm,
|
||||
uint64_t /* Addr */,
|
||||
const MCDisassembler *Decoder) {
|
||||
return decodeSrcOp(Inst, 9, OpWidth, Imm, Imm | 512, false, ImmWidth,
|
||||
(AMDGPU::OperandSemantics)OperandSemantics, Decoder);
|
||||
return decodeSrcOp(Inst, 9, OpWidth, Imm, Imm | 512, Decoder);
|
||||
}
|
||||
|
||||
template <AMDGPUDisassembler::OpWidthTy OpWidth, unsigned ImmWidth,
|
||||
unsigned OperandSemantics>
|
||||
template <AMDGPUDisassembler::OpWidthTy OpWidth>
|
||||
static DecodeStatus decodeSrcRegOrImmDeferred9(MCInst &Inst, unsigned Imm,
|
||||
uint64_t /* Addr */,
|
||||
const MCDisassembler *Decoder) {
|
||||
return decodeSrcOp(Inst, 9, OpWidth, Imm, Imm, true, ImmWidth,
|
||||
(AMDGPU::OperandSemantics)OperandSemantics, Decoder);
|
||||
return decodeSrcOp(Inst, 9, OpWidth, Imm, Imm, Decoder);
|
||||
}
|
||||
|
||||
// Default decoders generated by tablegen: 'Decode<RegClass>RegisterClass'
|
||||
@@ -320,8 +311,7 @@ DecodeVGPR_16_Lo128RegisterClass(MCInst &Inst, unsigned Imm, uint64_t /*Addr*/,
|
||||
return addOperand(Inst, DAsm->createVGPR16Operand(RegIdx, IsHi));
|
||||
}
|
||||
|
||||
template <AMDGPUDisassembler::OpWidthTy OpWidth, unsigned ImmWidth,
|
||||
unsigned OperandSemantics>
|
||||
template <AMDGPUDisassembler::OpWidthTy OpWidth>
|
||||
static DecodeStatus decodeOperand_VSrcT16_Lo128(MCInst &Inst, unsigned Imm,
|
||||
uint64_t /*Addr*/,
|
||||
const MCDisassembler *Decoder) {
|
||||
@@ -333,13 +323,10 @@ static DecodeStatus decodeOperand_VSrcT16_Lo128(MCInst &Inst, unsigned Imm,
|
||||
unsigned RegIdx = Imm & 0x7f;
|
||||
return addOperand(Inst, DAsm->createVGPR16Operand(RegIdx, IsHi));
|
||||
}
|
||||
return addOperand(Inst, DAsm->decodeNonVGPRSrcOp(
|
||||
OpWidth, Imm & 0xFF, false, ImmWidth,
|
||||
(AMDGPU::OperandSemantics)OperandSemantics));
|
||||
return addOperand(Inst, DAsm->decodeNonVGPRSrcOp(OpWidth, Imm & 0xFF));
|
||||
}
|
||||
|
||||
template <AMDGPUDisassembler::OpWidthTy OpWidth, unsigned ImmWidth,
|
||||
unsigned OperandSemantics>
|
||||
template <AMDGPUDisassembler::OpWidthTy OpWidth>
|
||||
static DecodeStatus
|
||||
decodeOperand_VSrcT16_Lo128_Deferred(MCInst &Inst, unsigned Imm,
|
||||
uint64_t /*Addr*/,
|
||||
@@ -352,13 +339,10 @@ decodeOperand_VSrcT16_Lo128_Deferred(MCInst &Inst, unsigned Imm,
|
||||
unsigned RegIdx = Imm & 0x7f;
|
||||
return addOperand(Inst, DAsm->createVGPR16Operand(RegIdx, IsHi));
|
||||
}
|
||||
return addOperand(Inst, DAsm->decodeNonVGPRSrcOp(
|
||||
OpWidth, Imm & 0xFF, true, ImmWidth,
|
||||
(AMDGPU::OperandSemantics)OperandSemantics));
|
||||
return addOperand(Inst, DAsm->decodeNonVGPRSrcOp(OpWidth, Imm & 0xFF));
|
||||
}
|
||||
|
||||
template <AMDGPUDisassembler::OpWidthTy OpWidth, unsigned ImmWidth,
|
||||
unsigned OperandSemantics>
|
||||
template <AMDGPUDisassembler::OpWidthTy OpWidth>
|
||||
static DecodeStatus decodeOperand_VSrcT16(MCInst &Inst, unsigned Imm,
|
||||
uint64_t /*Addr*/,
|
||||
const MCDisassembler *Decoder) {
|
||||
@@ -370,9 +354,7 @@ static DecodeStatus decodeOperand_VSrcT16(MCInst &Inst, unsigned Imm,
|
||||
unsigned RegIdx = Imm & 0xff;
|
||||
return addOperand(Inst, DAsm->createVGPR16Operand(RegIdx, IsHi));
|
||||
}
|
||||
return addOperand(Inst, DAsm->decodeNonVGPRSrcOp(
|
||||
OpWidth, Imm & 0xFF, false, ImmWidth,
|
||||
(AMDGPU::OperandSemantics)OperandSemantics));
|
||||
return addOperand(Inst, DAsm->decodeNonVGPRSrcOp(OpWidth, Imm & 0xFF));
|
||||
}
|
||||
|
||||
static DecodeStatus decodeOperand_VGPR_16(MCInst &Inst, unsigned Imm,
|
||||
@@ -462,9 +444,7 @@ static DecodeStatus decodeOperand_VSrc_f64(MCInst &Inst, unsigned Imm,
|
||||
const MCDisassembler *Decoder) {
|
||||
assert(Imm < (1 << 9) && "9-bit encoding");
|
||||
const auto *DAsm = static_cast<const AMDGPUDisassembler *>(Decoder);
|
||||
return addOperand(Inst,
|
||||
DAsm->decodeSrcOp(AMDGPUDisassembler::OPW64, Imm, false, 64,
|
||||
AMDGPU::OperandSemantics::FP64));
|
||||
return addOperand(Inst, DAsm->decodeSrcOp(AMDGPUDisassembler::OPW64, Imm));
|
||||
}
|
||||
|
||||
#define DECODE_SDWA(DecName) \
|
||||
@@ -557,6 +537,78 @@ static inline DecoderUInt128 eat16Bytes(ArrayRef<uint8_t> &Bytes) {
|
||||
return DecoderUInt128(Lo, Hi);
|
||||
}
|
||||
|
||||
void AMDGPUDisassembler::decodeImmOperands(MCInst &MI,
|
||||
const MCInstrInfo &MCII) const {
|
||||
const MCInstrDesc &Desc = MCII.get(MI.getOpcode());
|
||||
for (auto [OpNo, OpDesc] : enumerate(Desc.operands())) {
|
||||
if (OpNo >= MI.getNumOperands())
|
||||
continue;
|
||||
|
||||
// TODO: Fix V_DUAL_FMAMK_F32_X_FMAAK_F32_gfx12 vsrc operands,
|
||||
// defined to take VGPR_32, but in reality allowing inline constants.
|
||||
bool IsSrc = AMDGPU::OPERAND_SRC_FIRST <= OpDesc.OperandType &&
|
||||
OpDesc.OperandType <= AMDGPU::OPERAND_SRC_LAST;
|
||||
if (!IsSrc && OpDesc.OperandType != MCOI::OPERAND_REGISTER)
|
||||
continue;
|
||||
|
||||
MCOperand &Op = MI.getOperand(OpNo);
|
||||
if (!Op.isImm())
|
||||
continue;
|
||||
int64_t Imm = Op.getImm();
|
||||
if (AMDGPU::EncValues::INLINE_INTEGER_C_MIN <= Imm &&
|
||||
Imm <= AMDGPU::EncValues::INLINE_INTEGER_C_MAX) {
|
||||
Op = decodeIntImmed(Imm);
|
||||
continue;
|
||||
}
|
||||
|
||||
if (Imm == AMDGPU::EncValues::LITERAL_CONST) {
|
||||
switch (OpDesc.OperandType) {
|
||||
case AMDGPU::OPERAND_REG_IMM_BF16_DEFERRED:
|
||||
case AMDGPU::OPERAND_REG_IMM_FP16_DEFERRED:
|
||||
case AMDGPU::OPERAND_REG_IMM_FP32_DEFERRED:
|
||||
Op = MCOperand::createImm(AMDGPU::EncValues::LITERAL_CONST);
|
||||
continue;
|
||||
default:
|
||||
Op = decodeLiteralConstant(OpDesc.OperandType ==
|
||||
AMDGPU::OPERAND_REG_IMM_FP64);
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
if (AMDGPU::EncValues::INLINE_FLOATING_C_MIN <= Imm &&
|
||||
Imm <= AMDGPU::EncValues::INLINE_FLOATING_C_MAX) {
|
||||
switch (OpDesc.OperandType) {
|
||||
case AMDGPU::OPERAND_REG_IMM_BF16:
|
||||
case AMDGPU::OPERAND_REG_IMM_BF16_DEFERRED:
|
||||
case AMDGPU::OPERAND_REG_IMM_V2BF16:
|
||||
case AMDGPU::OPERAND_REG_INLINE_C_BF16:
|
||||
case AMDGPU::OPERAND_REG_INLINE_C_V2BF16:
|
||||
Imm = getInlineImmValBF16(Imm);
|
||||
break;
|
||||
case AMDGPU::OPERAND_REG_IMM_FP16:
|
||||
case AMDGPU::OPERAND_REG_IMM_FP16_DEFERRED:
|
||||
case AMDGPU::OPERAND_REG_IMM_INT16:
|
||||
case AMDGPU::OPERAND_REG_IMM_V2FP16:
|
||||
case AMDGPU::OPERAND_REG_INLINE_C_FP16:
|
||||
case AMDGPU::OPERAND_REG_INLINE_C_INT16:
|
||||
case AMDGPU::OPERAND_REG_INLINE_C_V2FP16:
|
||||
Imm = getInlineImmValF16(Imm);
|
||||
break;
|
||||
case AMDGPU::OPERAND_REG_IMM_FP64:
|
||||
case AMDGPU::OPERAND_REG_IMM_INT64:
|
||||
case AMDGPU::OPERAND_REG_INLINE_AC_FP64:
|
||||
case AMDGPU::OPERAND_REG_INLINE_C_FP64:
|
||||
case AMDGPU::OPERAND_REG_INLINE_C_INT64:
|
||||
Imm = getInlineImmVal64(Imm);
|
||||
break;
|
||||
default:
|
||||
Imm = getInlineImmVal32(Imm);
|
||||
}
|
||||
Op.setImm(Imm);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
DecodeStatus AMDGPUDisassembler::getInstruction(MCInst &MI, uint64_t &Size,
|
||||
ArrayRef<uint8_t> Bytes_,
|
||||
uint64_t Address,
|
||||
@@ -711,6 +763,8 @@ DecodeStatus AMDGPUDisassembler::getInstruction(MCInst &MI, uint64_t &Size,
|
||||
|
||||
DecodeStatus Status = MCDisassembler::Success;
|
||||
|
||||
decodeImmOperands(MI, *MCII);
|
||||
|
||||
if (MCII->get(MI.getOpcode()).TSFlags & SIInstrFlags::DPP) {
|
||||
if (isMacDPP(MI))
|
||||
convertMacDPPInst(MI);
|
||||
@@ -1575,33 +1629,6 @@ static int64_t getInlineImmValBF16(unsigned Imm) {
|
||||
}
|
||||
}
|
||||
|
||||
static int64_t getInlineImmVal16(unsigned Imm, AMDGPU::OperandSemantics Sema) {
|
||||
return (Sema == AMDGPU::OperandSemantics::BF16) ? getInlineImmValBF16(Imm)
|
||||
: getInlineImmValF16(Imm);
|
||||
}
|
||||
|
||||
MCOperand AMDGPUDisassembler::decodeFPImmed(unsigned ImmWidth, unsigned Imm,
|
||||
AMDGPU::OperandSemantics Sema) {
|
||||
assert(Imm >= AMDGPU::EncValues::INLINE_FLOATING_C_MIN &&
|
||||
Imm <= AMDGPU::EncValues::INLINE_FLOATING_C_MAX);
|
||||
|
||||
// ToDo: case 248: 1/(2*PI) - is allowed only on VI
|
||||
// ImmWidth 0 is a default case where operand should not allow immediates.
|
||||
// Imm value is still decoded into 32 bit immediate operand, inst printer will
|
||||
// use it to print verbose error message.
|
||||
switch (ImmWidth) {
|
||||
case 0:
|
||||
case 32:
|
||||
return MCOperand::createImm(getInlineImmVal32(Imm));
|
||||
case 64:
|
||||
return MCOperand::createImm(getInlineImmVal64(Imm));
|
||||
case 16:
|
||||
return MCOperand::createImm(getInlineImmVal16(Imm, Sema));
|
||||
default:
|
||||
llvm_unreachable("implement me");
|
||||
}
|
||||
}
|
||||
|
||||
unsigned AMDGPUDisassembler::getVgprClassId(const OpWidthTy Width) const {
|
||||
using namespace AMDGPU;
|
||||
|
||||
@@ -1709,10 +1736,8 @@ int AMDGPUDisassembler::getTTmpIdx(unsigned Val) const {
|
||||
return (TTmpMin <= Val && Val <= TTmpMax)? Val - TTmpMin : -1;
|
||||
}
|
||||
|
||||
MCOperand AMDGPUDisassembler::decodeSrcOp(const OpWidthTy Width, unsigned Val,
|
||||
bool MandatoryLiteral,
|
||||
unsigned ImmWidth,
|
||||
AMDGPU::OperandSemantics Sema) const {
|
||||
MCOperand AMDGPUDisassembler::decodeSrcOp(const OpWidthTy Width,
|
||||
unsigned Val) const {
|
||||
using namespace AMDGPU::EncValues;
|
||||
|
||||
assert(Val < 1024); // enum10
|
||||
@@ -1724,14 +1749,11 @@ MCOperand AMDGPUDisassembler::decodeSrcOp(const OpWidthTy Width, unsigned Val,
|
||||
return createRegOperand(IsAGPR ? getAgprClassId(Width)
|
||||
: getVgprClassId(Width), Val - VGPR_MIN);
|
||||
}
|
||||
return decodeNonVGPRSrcOp(Width, Val & 0xFF, MandatoryLiteral, ImmWidth,
|
||||
Sema);
|
||||
return decodeNonVGPRSrcOp(Width, Val & 0xFF);
|
||||
}
|
||||
|
||||
MCOperand
|
||||
AMDGPUDisassembler::decodeNonVGPRSrcOp(const OpWidthTy Width, unsigned Val,
|
||||
bool MandatoryLiteral, unsigned ImmWidth,
|
||||
AMDGPU::OperandSemantics Sema) const {
|
||||
MCOperand AMDGPUDisassembler::decodeNonVGPRSrcOp(const OpWidthTy Width,
|
||||
unsigned Val) const {
|
||||
// Cases when Val{8} is 1 (vgpr, agpr or true 16 vgpr) should have been
|
||||
// decoded earlier.
|
||||
assert(Val < (1 << 8) && "9-bit Src encoding when Val{8} is 0");
|
||||
@@ -1748,18 +1770,10 @@ AMDGPUDisassembler::decodeNonVGPRSrcOp(const OpWidthTy Width, unsigned Val,
|
||||
return createSRegOperand(getTtmpClassId(Width), TTmpIdx);
|
||||
}
|
||||
|
||||
if (INLINE_INTEGER_C_MIN <= Val && Val <= INLINE_INTEGER_C_MAX)
|
||||
return decodeIntImmed(Val);
|
||||
|
||||
if (INLINE_FLOATING_C_MIN <= Val && Val <= INLINE_FLOATING_C_MAX)
|
||||
return decodeFPImmed(ImmWidth, Val, Sema);
|
||||
|
||||
if (Val == LITERAL_CONST) {
|
||||
if (MandatoryLiteral)
|
||||
// Keep a sentinel value for deferred setting
|
||||
return MCOperand::createImm(LITERAL_CONST);
|
||||
return decodeLiteralConstant(Sema == AMDGPU::OperandSemantics::FP64);
|
||||
}
|
||||
if ((INLINE_INTEGER_C_MIN <= Val && Val <= INLINE_INTEGER_C_MAX) ||
|
||||
(INLINE_FLOATING_C_MIN <= Val && Val <= INLINE_FLOATING_C_MAX) ||
|
||||
Val == LITERAL_CONST)
|
||||
return MCOperand::createImm(Val);
|
||||
|
||||
switch (Width) {
|
||||
case OPW32:
|
||||
@@ -1878,10 +1892,8 @@ MCOperand AMDGPUDisassembler::decodeSpecialReg96Plus(unsigned Val) const {
|
||||
return errOperand(Val, "unknown operand encoding " + Twine(Val));
|
||||
}
|
||||
|
||||
MCOperand
|
||||
AMDGPUDisassembler::decodeSDWASrc(const OpWidthTy Width, const unsigned Val,
|
||||
unsigned ImmWidth,
|
||||
AMDGPU::OperandSemantics Sema) const {
|
||||
MCOperand AMDGPUDisassembler::decodeSDWASrc(const OpWidthTy Width,
|
||||
const unsigned Val) const {
|
||||
using namespace AMDGPU::SDWA;
|
||||
using namespace AMDGPU::EncValues;
|
||||
|
||||
@@ -1908,11 +1920,9 @@ AMDGPUDisassembler::decodeSDWASrc(const OpWidthTy Width, const unsigned Val,
|
||||
|
||||
const unsigned SVal = Val - SDWA9EncValues::SRC_SGPR_MIN;
|
||||
|
||||
if (INLINE_INTEGER_C_MIN <= SVal && SVal <= INLINE_INTEGER_C_MAX)
|
||||
return decodeIntImmed(SVal);
|
||||
|
||||
if (INLINE_FLOATING_C_MIN <= SVal && SVal <= INLINE_FLOATING_C_MAX)
|
||||
return decodeFPImmed(ImmWidth, SVal, Sema);
|
||||
if ((INLINE_INTEGER_C_MIN <= SVal && SVal <= INLINE_INTEGER_C_MAX) ||
|
||||
(INLINE_FLOATING_C_MIN <= SVal && SVal <= INLINE_FLOATING_C_MAX))
|
||||
return MCOperand::createImm(SVal);
|
||||
|
||||
return decodeSpecialReg32(SVal);
|
||||
}
|
||||
@@ -1922,11 +1932,11 @@ AMDGPUDisassembler::decodeSDWASrc(const OpWidthTy Width, const unsigned Val,
|
||||
}
|
||||
|
||||
MCOperand AMDGPUDisassembler::decodeSDWASrc16(unsigned Val) const {
|
||||
return decodeSDWASrc(OPW16, Val, 16, AMDGPU::OperandSemantics::FP16);
|
||||
return decodeSDWASrc(OPW16, Val);
|
||||
}
|
||||
|
||||
MCOperand AMDGPUDisassembler::decodeSDWASrc32(unsigned Val) const {
|
||||
return decodeSDWASrc(OPW32, Val, 32, AMDGPU::OperandSemantics::FP32);
|
||||
return decodeSDWASrc(OPW32, Val);
|
||||
}
|
||||
|
||||
MCOperand AMDGPUDisassembler::decodeSDWAVopcDst(unsigned Val) const {
|
||||
|
||||
@@ -108,6 +108,8 @@ private:
|
||||
|
||||
const MCExpr *createConstantSymbolExpr(StringRef Id, int64_t Val);
|
||||
|
||||
void decodeImmOperands(MCInst &MI, const MCInstrInfo &MCII) const;
|
||||
|
||||
public:
|
||||
AMDGPUDisassembler(const MCSubtargetInfo &STI, MCContext &Ctx,
|
||||
MCInstrInfo const *MCII);
|
||||
@@ -211,30 +213,20 @@ public:
|
||||
unsigned getTtmpClassId(const OpWidthTy Width) const;
|
||||
|
||||
static MCOperand decodeIntImmed(unsigned Imm);
|
||||
static MCOperand decodeFPImmed(unsigned ImmWidth, unsigned Imm,
|
||||
AMDGPU::OperandSemantics Sema);
|
||||
|
||||
MCOperand decodeMandatoryLiteralConstant(unsigned Imm) const;
|
||||
MCOperand decodeLiteralConstant(bool ExtendFP64) const;
|
||||
|
||||
MCOperand decodeSrcOp(
|
||||
const OpWidthTy Width, unsigned Val, bool MandatoryLiteral = false,
|
||||
unsigned ImmWidth = 0,
|
||||
AMDGPU::OperandSemantics Sema = AMDGPU::OperandSemantics::INT) const;
|
||||
MCOperand decodeSrcOp(const OpWidthTy Width, unsigned Val) const;
|
||||
|
||||
MCOperand decodeNonVGPRSrcOp(
|
||||
const OpWidthTy Width, unsigned Val, bool MandatoryLiteral = false,
|
||||
unsigned ImmWidth = 0,
|
||||
AMDGPU::OperandSemantics Sema = AMDGPU::OperandSemantics::INT) const;
|
||||
MCOperand decodeNonVGPRSrcOp(const OpWidthTy Width, unsigned Val) const;
|
||||
|
||||
MCOperand decodeVOPDDstYOp(MCInst &Inst, unsigned Val) const;
|
||||
MCOperand decodeSpecialReg32(unsigned Val) const;
|
||||
MCOperand decodeSpecialReg64(unsigned Val) const;
|
||||
MCOperand decodeSpecialReg96Plus(unsigned Val) const;
|
||||
|
||||
MCOperand decodeSDWASrc(const OpWidthTy Width, unsigned Val,
|
||||
unsigned ImmWidth,
|
||||
AMDGPU::OperandSemantics Sema) const;
|
||||
MCOperand decodeSDWASrc(const OpWidthTy Width, unsigned Val) const;
|
||||
MCOperand decodeSDWASrc16(unsigned Val) const;
|
||||
MCOperand decodeSDWASrc32(unsigned Val) const;
|
||||
MCOperand decodeSDWAVopcDst(unsigned Val) const;
|
||||
|
||||
@@ -269,15 +269,6 @@ enum OperandType : unsigned {
|
||||
OPERAND_KIMM_LAST = OPERAND_KIMM16
|
||||
|
||||
};
|
||||
|
||||
// Should be in sync with the OperandSemantics defined in SIRegisterInfo.td
|
||||
enum OperandSemantics : unsigned {
|
||||
INT = 0,
|
||||
FP16 = 1,
|
||||
BF16 = 2,
|
||||
FP32 = 3,
|
||||
FP64 = 4,
|
||||
};
|
||||
}
|
||||
|
||||
// Input operand modifiers bit-masks
|
||||
|
||||
@@ -1093,41 +1093,30 @@ class RegOrImmOperand <RegisterClass RegClass, string OperandTypeName>
|
||||
let ParserMatchClass = RegImmMatcher<!subst("_Deferred", "", NAME)>;
|
||||
}
|
||||
|
||||
// Should be in sync with the OperandSemantics defined in SIDefines.h
|
||||
def OperandSemantics {
|
||||
int INT = 0;
|
||||
int FP16 = 1;
|
||||
int BF16 = 2;
|
||||
int FP32 = 3;
|
||||
int FP64 = 4;
|
||||
}
|
||||
|
||||
//===----------------------------------------------------------------------===//
|
||||
// SSrc_* Operands with an SGPR or a 32-bit immediate
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
class SrcRegOrImm9<RegisterClass regClass, string opWidth, string operandType,
|
||||
int immWidth, int OperandSemantics>
|
||||
class SrcRegOrImm9<RegisterClass regClass, string opWidth, string operandType>
|
||||
: RegOrImmOperand<regClass, operandType> {
|
||||
string DecoderMethodName = "decodeSrcRegOrImm9";
|
||||
let DecoderMethod = DecoderMethodName # "<AMDGPUDisassembler::" # opWidth #
|
||||
", " # immWidth # ", " # OperandSemantics # ">";
|
||||
let DecoderMethod = DecoderMethodName # "<AMDGPUDisassembler::" # opWidth # ">";
|
||||
}
|
||||
|
||||
class SrcRegOrImm9_t16<string operandType, int OperandSemantics, RegisterClass regClass = VS_16>
|
||||
: SrcRegOrImm9<regClass, "OPW16", operandType, 16, OperandSemantics> {
|
||||
class SrcRegOrImm9_t16<string operandType, RegisterClass regClass = VS_16>
|
||||
: SrcRegOrImm9<regClass, "OPW16", operandType> {
|
||||
let DecoderMethodName = "decodeOperand_VSrcT16";
|
||||
let EncoderMethod = "getMachineOpValueT16";
|
||||
}
|
||||
|
||||
def SSrc_b16 : SrcRegOrImm9 <SReg_32, "OPW32", "OPERAND_REG_IMM_INT16", 16, OperandSemantics.INT>;
|
||||
def SSrc_bf16: SrcRegOrImm9 <SReg_32, "OPW32", "OPERAND_REG_IMM_BF16", 16, OperandSemantics.BF16>;
|
||||
def SSrc_f16 : SrcRegOrImm9 <SReg_32, "OPW32", "OPERAND_REG_IMM_FP16", 16, OperandSemantics.FP16>;
|
||||
def SSrc_b32 : SrcRegOrImm9 <SReg_32, "OPW32", "OPERAND_REG_IMM_INT32", 32, OperandSemantics.INT>;
|
||||
def SSrc_f32 : SrcRegOrImm9 <SReg_32, "OPW32", "OPERAND_REG_IMM_FP32", 32, OperandSemantics.FP32>;
|
||||
def SSrc_b64 : SrcRegOrImm9 <SReg_64, "OPW64", "OPERAND_REG_IMM_INT64", 64, OperandSemantics.INT>;
|
||||
def SSrc_b16 : SrcRegOrImm9 <SReg_32, "OPW32", "OPERAND_REG_IMM_INT16">;
|
||||
def SSrc_bf16: SrcRegOrImm9 <SReg_32, "OPW32", "OPERAND_REG_IMM_BF16">;
|
||||
def SSrc_f16 : SrcRegOrImm9 <SReg_32, "OPW32", "OPERAND_REG_IMM_FP16">;
|
||||
def SSrc_b32 : SrcRegOrImm9 <SReg_32, "OPW32", "OPERAND_REG_IMM_INT32">;
|
||||
def SSrc_f32 : SrcRegOrImm9 <SReg_32, "OPW32", "OPERAND_REG_IMM_FP32">;
|
||||
def SSrc_b64 : SrcRegOrImm9 <SReg_64, "OPW64", "OPERAND_REG_IMM_INT64">;
|
||||
|
||||
def SSrcOrLds_b32 : SrcRegOrImm9 <SRegOrLds_32, "OPW32", "OPERAND_REG_IMM_INT32", 32, OperandSemantics.INT>;
|
||||
def SSrcOrLds_b32 : SrcRegOrImm9 <SRegOrLds_32, "OPW32", "OPERAND_REG_IMM_INT32">;
|
||||
|
||||
//===----------------------------------------------------------------------===//
|
||||
// SSrc_32_Deferred Operands with an SGPR or a 32-bit immediate for use with
|
||||
@@ -1135,81 +1124,81 @@ def SSrcOrLds_b32 : SrcRegOrImm9 <SRegOrLds_32, "OPW32", "OPERAND_REG_IMM_INT32"
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
class SrcRegOrImmDeferred9<RegisterClass regClass, string opWidth,
|
||||
string operandType, int immWidth, int OperandSemantics>
|
||||
string operandType>
|
||||
: RegOrImmOperand<regClass, operandType> {
|
||||
string DecoderMethodName = "decodeSrcRegOrImmDeferred9";
|
||||
let DecoderMethod = DecoderMethodName # "<AMDGPUDisassembler::" #
|
||||
opWidth # ", " # immWidth # ", " # OperandSemantics # ">";
|
||||
opWidth # ">";
|
||||
}
|
||||
|
||||
def SSrc_f32_Deferred : SrcRegOrImmDeferred9<SReg_32, "OPW32", "OPERAND_REG_IMM_FP32_DEFERRED", 32, OperandSemantics.FP32>;
|
||||
def SSrc_f32_Deferred : SrcRegOrImmDeferred9<SReg_32, "OPW32", "OPERAND_REG_IMM_FP32_DEFERRED">;
|
||||
|
||||
//===----------------------------------------------------------------------===//
|
||||
// SCSrc_* Operands with an SGPR or a inline constant
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
def SCSrc_b32 : SrcRegOrImm9 <SReg_32, "OPW32", "OPERAND_REG_INLINE_C_INT32", 32, OperandSemantics.INT>;
|
||||
def SCSrc_b64 : SrcRegOrImm9 <SReg_64, "OPW64", "OPERAND_REG_INLINE_C_INT64", 64, OperandSemantics.INT>;
|
||||
def SCSrc_b32 : SrcRegOrImm9 <SReg_32, "OPW32", "OPERAND_REG_INLINE_C_INT32">;
|
||||
def SCSrc_b64 : SrcRegOrImm9 <SReg_64, "OPW64", "OPERAND_REG_INLINE_C_INT64">;
|
||||
|
||||
//===----------------------------------------------------------------------===//
|
||||
// VSrc_* Operands with an SGPR, VGPR or a 32-bit immediate
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
// The current and temporary future default used case for VOP3.
|
||||
def VSrc_b16 : SrcRegOrImm9 <VS_32, "OPW32", "OPERAND_REG_IMM_INT16", 16, OperandSemantics.INT>;
|
||||
def VSrc_bf16 : SrcRegOrImm9 <VS_32, "OPW32", "OPERAND_REG_IMM_BF16", 16, OperandSemantics.BF16>;
|
||||
def VSrc_f16 : SrcRegOrImm9 <VS_32, "OPW32", "OPERAND_REG_IMM_FP16", 16, OperandSemantics.FP16>;
|
||||
def VSrc_b16 : SrcRegOrImm9 <VS_32, "OPW32", "OPERAND_REG_IMM_INT16">;
|
||||
def VSrc_bf16 : SrcRegOrImm9 <VS_32, "OPW32", "OPERAND_REG_IMM_BF16">;
|
||||
def VSrc_f16 : SrcRegOrImm9 <VS_32, "OPW32", "OPERAND_REG_IMM_FP16">;
|
||||
|
||||
// True16 VOP3 operands.
|
||||
def VSrcT_b16 : SrcRegOrImm9_t16 <"OPERAND_REG_IMM_INT16", OperandSemantics.INT>;
|
||||
def VSrcT_bf16 : SrcRegOrImm9_t16 <"OPERAND_REG_IMM_BF16", OperandSemantics.BF16>;
|
||||
def VSrcT_f16 : SrcRegOrImm9_t16 <"OPERAND_REG_IMM_FP16", OperandSemantics.FP16>;
|
||||
def VSrcT_b16 : SrcRegOrImm9_t16 <"OPERAND_REG_IMM_INT16">;
|
||||
def VSrcT_bf16 : SrcRegOrImm9_t16 <"OPERAND_REG_IMM_BF16">;
|
||||
def VSrcT_f16 : SrcRegOrImm9_t16 <"OPERAND_REG_IMM_FP16">;
|
||||
|
||||
// True16 VOP1/2/C operands.
|
||||
let DecoderMethodName = "decodeOperand_VSrcT16_Lo128", EncoderMethod = "getMachineOpValueT16Lo128" in {
|
||||
def VSrcT_b16_Lo128 : SrcRegOrImm9_t16 <"OPERAND_REG_IMM_INT16", OperandSemantics.INT, VS_16_Lo128>;
|
||||
def VSrcT_bf16_Lo128 : SrcRegOrImm9_t16 <"OPERAND_REG_IMM_BF16", OperandSemantics.BF16, VS_16_Lo128>;
|
||||
def VSrcT_f16_Lo128 : SrcRegOrImm9_t16 <"OPERAND_REG_IMM_FP16",OperandSemantics.FP16, VS_16_Lo128>;
|
||||
def VSrcT_b16_Lo128 : SrcRegOrImm9_t16 <"OPERAND_REG_IMM_INT16", VS_16_Lo128>;
|
||||
def VSrcT_bf16_Lo128 : SrcRegOrImm9_t16 <"OPERAND_REG_IMM_BF16", VS_16_Lo128>;
|
||||
def VSrcT_f16_Lo128 : SrcRegOrImm9_t16 <"OPERAND_REG_IMM_FP16", VS_16_Lo128>;
|
||||
} // End DecoderMethodName = "decodeOperand_VSrcT16_Lo128", EncoderMethod = "getMachineOpValueT16Lo128"
|
||||
|
||||
// The current and temporary future default used case for fake VOP1/2/C.
|
||||
// For VOP1,2,C True16 instructions. _Lo128 use first 128 32-bit VGPRs only.
|
||||
def VSrcFake16_b16_Lo128 : SrcRegOrImm9 <VS_32_Lo128, "OPW16", "OPERAND_REG_IMM_INT16", 16, OperandSemantics.INT>;
|
||||
def VSrcFake16_bf16_Lo128 : SrcRegOrImm9 <VS_32_Lo128, "OPW16", "OPERAND_REG_IMM_BF16", 16, OperandSemantics.BF16>;
|
||||
def VSrcFake16_f16_Lo128 : SrcRegOrImm9 <VS_32_Lo128, "OPW16", "OPERAND_REG_IMM_FP16", 16, OperandSemantics.FP16>;
|
||||
def VSrcFake16_b16_Lo128 : SrcRegOrImm9 <VS_32_Lo128, "OPW16", "OPERAND_REG_IMM_INT16">;
|
||||
def VSrcFake16_bf16_Lo128 : SrcRegOrImm9 <VS_32_Lo128, "OPW16", "OPERAND_REG_IMM_BF16">;
|
||||
def VSrcFake16_f16_Lo128 : SrcRegOrImm9 <VS_32_Lo128, "OPW16", "OPERAND_REG_IMM_FP16">;
|
||||
|
||||
def VSrc_b32 : SrcRegOrImm9 <VS_32, "OPW32", "OPERAND_REG_IMM_INT32", 32, OperandSemantics.INT>;
|
||||
def VSrc_f32 : SrcRegOrImm9 <VS_32, "OPW32", "OPERAND_REG_IMM_FP32", 32, OperandSemantics.FP32>;
|
||||
def VSrc_v2b16 : SrcRegOrImm9 <VS_32, "OPW32", "OPERAND_REG_IMM_V2INT16", 32, OperandSemantics.INT>;
|
||||
def VSrc_v2bf16 : SrcRegOrImm9 <VS_32, "OPW32", "OPERAND_REG_IMM_V2BF16", 16, OperandSemantics.BF16>;
|
||||
def VSrc_v2f16 : SrcRegOrImm9 <VS_32, "OPW32", "OPERAND_REG_IMM_V2FP16", 16, OperandSemantics.FP16>;
|
||||
def VSrc_b64 : SrcRegOrImm9 <VS_64, "OPW64", "OPERAND_REG_IMM_INT64", 64, OperandSemantics.INT>;
|
||||
def VSrc_f64 : SrcRegOrImm9 <VS_64, "OPW64", "OPERAND_REG_IMM_FP64", 64, OperandSemantics.FP64> {
|
||||
def VSrc_b32 : SrcRegOrImm9 <VS_32, "OPW32", "OPERAND_REG_IMM_INT32">;
|
||||
def VSrc_f32 : SrcRegOrImm9 <VS_32, "OPW32", "OPERAND_REG_IMM_FP32">;
|
||||
def VSrc_v2b16 : SrcRegOrImm9 <VS_32, "OPW32", "OPERAND_REG_IMM_V2INT16">;
|
||||
def VSrc_v2bf16 : SrcRegOrImm9 <VS_32, "OPW32", "OPERAND_REG_IMM_V2BF16">;
|
||||
def VSrc_v2f16 : SrcRegOrImm9 <VS_32, "OPW32", "OPERAND_REG_IMM_V2FP16">;
|
||||
def VSrc_b64 : SrcRegOrImm9 <VS_64, "OPW64", "OPERAND_REG_IMM_INT64">;
|
||||
def VSrc_f64 : SrcRegOrImm9 <VS_64, "OPW64", "OPERAND_REG_IMM_FP64"> {
|
||||
let DecoderMethod = "decodeOperand_VSrc_f64";
|
||||
}
|
||||
def VSrc_v2b32 : SrcRegOrImm9 <VS_64, "OPW64", "OPERAND_REG_IMM_V2INT32", 32, OperandSemantics.INT>;
|
||||
def VSrc_v2f32 : SrcRegOrImm9 <VS_64, "OPW64", "OPERAND_REG_IMM_V2FP32", 32, OperandSemantics.FP32>;
|
||||
def VSrc_v2b32 : SrcRegOrImm9 <VS_64, "OPW64", "OPERAND_REG_IMM_V2INT32">;
|
||||
def VSrc_v2f32 : SrcRegOrImm9 <VS_64, "OPW64", "OPERAND_REG_IMM_V2FP32">;
|
||||
|
||||
//===----------------------------------------------------------------------===//
|
||||
// VSrc_*_Deferred Operands with an SGPR, VGPR or a 32-bit immediate for use
|
||||
// with FMAMK/FMAAK
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
def VSrc_bf16_Deferred : SrcRegOrImmDeferred9<VS_32, "OPW16", "OPERAND_REG_IMM_BF16_DEFERRED", 16, OperandSemantics.BF16>;
|
||||
def VSrc_f16_Deferred : SrcRegOrImmDeferred9<VS_32, "OPW16", "OPERAND_REG_IMM_FP16_DEFERRED", 16, OperandSemantics.FP16>;
|
||||
def VSrc_f32_Deferred : SrcRegOrImmDeferred9<VS_32, "OPW32", "OPERAND_REG_IMM_FP32_DEFERRED", 32, OperandSemantics.FP32>;
|
||||
def VSrc_bf16_Deferred : SrcRegOrImmDeferred9<VS_32, "OPW16", "OPERAND_REG_IMM_BF16_DEFERRED">;
|
||||
def VSrc_f16_Deferred : SrcRegOrImmDeferred9<VS_32, "OPW16", "OPERAND_REG_IMM_FP16_DEFERRED">;
|
||||
def VSrc_f32_Deferred : SrcRegOrImmDeferred9<VS_32, "OPW32", "OPERAND_REG_IMM_FP32_DEFERRED">;
|
||||
|
||||
// True 16 Operands
|
||||
def VSrcT_f16_Lo128_Deferred : SrcRegOrImmDeferred9<VS_16_Lo128, "OPW16",
|
||||
"OPERAND_REG_IMM_FP16_DEFERRED", 16, OperandSemantics.FP16> {
|
||||
"OPERAND_REG_IMM_FP16_DEFERRED"> {
|
||||
let DecoderMethodName = "decodeOperand_VSrcT16_Lo128_Deferred";
|
||||
let EncoderMethod = "getMachineOpValueT16Lo128";
|
||||
}
|
||||
|
||||
def VSrcFake16_bf16_Lo128_Deferred
|
||||
: SrcRegOrImmDeferred9<VS_32_Lo128, "OPW16", "OPERAND_REG_IMM_BF16_DEFERRED", 16, OperandSemantics.BF16>;
|
||||
: SrcRegOrImmDeferred9<VS_32_Lo128, "OPW16", "OPERAND_REG_IMM_BF16_DEFERRED">;
|
||||
def VSrcFake16_f16_Lo128_Deferred
|
||||
: SrcRegOrImmDeferred9<VS_32_Lo128, "OPW16", "OPERAND_REG_IMM_FP16_DEFERRED", 16, OperandSemantics.FP16>;
|
||||
: SrcRegOrImmDeferred9<VS_32_Lo128, "OPW16", "OPERAND_REG_IMM_FP16_DEFERRED">;
|
||||
|
||||
//===----------------------------------------------------------------------===//
|
||||
// VRegSrc_* Operands with a VGPR
|
||||
@@ -1282,38 +1271,38 @@ def ARegSrc_32 : AVOperand<AGPR_32, "decodeSrcA9", "OPW32">;
|
||||
// VCSrc_* Operands with an SGPR, VGPR or an inline constant
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
def VCSrc_b16 : SrcRegOrImm9 <VS_32, "OPW32", "OPERAND_REG_INLINE_C_INT16", 16, OperandSemantics.INT>;
|
||||
def VCSrc_bf16 : SrcRegOrImm9 <VS_32, "OPW32", "OPERAND_REG_INLINE_C_BF16", 16, OperandSemantics.BF16>;
|
||||
def VCSrc_f16 : SrcRegOrImm9 <VS_32, "OPW32", "OPERAND_REG_INLINE_C_FP16", 16, OperandSemantics.FP16>;
|
||||
def VCSrc_b32 : SrcRegOrImm9 <VS_32, "OPW32", "OPERAND_REG_INLINE_C_INT32", 32, OperandSemantics.INT>;
|
||||
def VCSrc_f32 : SrcRegOrImm9 <VS_32, "OPW32", "OPERAND_REG_INLINE_C_FP32", 32, OperandSemantics.FP32>;
|
||||
def VCSrc_v2b16 : SrcRegOrImm9 <VS_32, "OPW32", "OPERAND_REG_INLINE_C_V2INT16", 32, OperandSemantics.INT>;
|
||||
def VCSrc_v2bf16: SrcRegOrImm9 <VS_32, "OPW32", "OPERAND_REG_INLINE_C_V2BF16", 16, OperandSemantics.BF16>;
|
||||
def VCSrc_v2f16 : SrcRegOrImm9 <VS_32, "OPW32", "OPERAND_REG_INLINE_C_V2FP16", 16, OperandSemantics.FP16>;
|
||||
def VCSrc_b16 : SrcRegOrImm9 <VS_32, "OPW32", "OPERAND_REG_INLINE_C_INT16">;
|
||||
def VCSrc_bf16 : SrcRegOrImm9 <VS_32, "OPW32", "OPERAND_REG_INLINE_C_BF16">;
|
||||
def VCSrc_f16 : SrcRegOrImm9 <VS_32, "OPW32", "OPERAND_REG_INLINE_C_FP16">;
|
||||
def VCSrc_b32 : SrcRegOrImm9 <VS_32, "OPW32", "OPERAND_REG_INLINE_C_INT32">;
|
||||
def VCSrc_f32 : SrcRegOrImm9 <VS_32, "OPW32", "OPERAND_REG_INLINE_C_FP32">;
|
||||
def VCSrc_v2b16 : SrcRegOrImm9 <VS_32, "OPW32", "OPERAND_REG_INLINE_C_V2INT16">;
|
||||
def VCSrc_v2bf16: SrcRegOrImm9 <VS_32, "OPW32", "OPERAND_REG_INLINE_C_V2BF16">;
|
||||
def VCSrc_v2f16 : SrcRegOrImm9 <VS_32, "OPW32", "OPERAND_REG_INLINE_C_V2FP16">;
|
||||
|
||||
// True 16 Operands
|
||||
def VCSrcT_b16 : SrcRegOrImm9_t16 <"OPERAND_REG_INLINE_C_INT16", OperandSemantics.INT>;
|
||||
def VCSrcT_bf16 : SrcRegOrImm9_t16 <"OPERAND_REG_INLINE_C_BF16", OperandSemantics.BF16>;
|
||||
def VCSrcT_f16 : SrcRegOrImm9_t16 <"OPERAND_REG_INLINE_C_FP16", OperandSemantics.FP16>;
|
||||
def VCSrcT_b16 : SrcRegOrImm9_t16 <"OPERAND_REG_INLINE_C_INT16">;
|
||||
def VCSrcT_bf16 : SrcRegOrImm9_t16 <"OPERAND_REG_INLINE_C_BF16">;
|
||||
def VCSrcT_f16 : SrcRegOrImm9_t16 <"OPERAND_REG_INLINE_C_FP16">;
|
||||
//===----------------------------------------------------------------------===//
|
||||
// VISrc_* Operands with a VGPR or an inline constant
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
def VISrc_64_bf16 : SrcRegOrImm9 <VReg_64, "OPW64", "OPERAND_REG_INLINE_C_BF16", 16, OperandSemantics.BF16>;
|
||||
def VISrc_64_f16 : SrcRegOrImm9 <VReg_64, "OPW64", "OPERAND_REG_INLINE_C_FP16", 16, OperandSemantics.FP16>;
|
||||
def VISrc_64_b32 : SrcRegOrImm9 <VReg_64, "OPW64", "OPERAND_REG_INLINE_C_INT32", 32, OperandSemantics.INT>;
|
||||
def VISrc_64_f64 : SrcRegOrImm9 <VReg_64, "OPW64", "OPERAND_REG_INLINE_C_FP64", 64, OperandSemantics.FP64>;
|
||||
def VISrc_128_bf16 : SrcRegOrImm9 <VReg_128, "OPW128", "OPERAND_REG_INLINE_C_BF16", 16, OperandSemantics.BF16>;
|
||||
def VISrc_128_f16 : SrcRegOrImm9 <VReg_128, "OPW128", "OPERAND_REG_INLINE_C_FP16", 16, OperandSemantics.FP16>;
|
||||
def VISrc_128_b32 : SrcRegOrImm9 <VReg_128, "OPW128", "OPERAND_REG_INLINE_C_INT32", 32, OperandSemantics.INT>;
|
||||
def VISrc_128_f32 : SrcRegOrImm9 <VReg_128, "OPW128", "OPERAND_REG_INLINE_C_FP32", 32, OperandSemantics.FP32>;
|
||||
def VISrc_256_b32 : SrcRegOrImm9 <VReg_256, "OPW256", "OPERAND_REG_INLINE_C_INT32", 32, OperandSemantics.INT>;
|
||||
def VISrc_256_f32 : SrcRegOrImm9 <VReg_256, "OPW256", "OPERAND_REG_INLINE_C_FP32", 32, OperandSemantics.FP32>;
|
||||
def VISrc_256_f64 : SrcRegOrImm9 <VReg_256, "OPW256", "OPERAND_REG_INLINE_C_FP64", 64, OperandSemantics.FP64>;
|
||||
def VISrc_512_b32 : SrcRegOrImm9 <VReg_512, "OPW512", "OPERAND_REG_INLINE_C_INT32", 32, OperandSemantics.INT>;
|
||||
def VISrc_512_f32 : SrcRegOrImm9 <VReg_512, "OPW512", "OPERAND_REG_INLINE_C_FP32", 32, OperandSemantics.FP32>;
|
||||
def VISrc_1024_b32 : SrcRegOrImm9 <VReg_1024, "OPW1024", "OPERAND_REG_INLINE_C_INT32", 32, OperandSemantics.INT>;
|
||||
def VISrc_1024_f32 : SrcRegOrImm9 <VReg_1024, "OPW1024", "OPERAND_REG_INLINE_C_FP32", 32, OperandSemantics.FP32>;
|
||||
def VISrc_64_bf16 : SrcRegOrImm9 <VReg_64, "OPW64", "OPERAND_REG_INLINE_C_BF16">;
|
||||
def VISrc_64_f16 : SrcRegOrImm9 <VReg_64, "OPW64", "OPERAND_REG_INLINE_C_FP16">;
|
||||
def VISrc_64_b32 : SrcRegOrImm9 <VReg_64, "OPW64", "OPERAND_REG_INLINE_C_INT32">;
|
||||
def VISrc_64_f64 : SrcRegOrImm9 <VReg_64, "OPW64", "OPERAND_REG_INLINE_C_FP64">;
|
||||
def VISrc_128_bf16 : SrcRegOrImm9 <VReg_128, "OPW128", "OPERAND_REG_INLINE_C_BF16">;
|
||||
def VISrc_128_f16 : SrcRegOrImm9 <VReg_128, "OPW128", "OPERAND_REG_INLINE_C_FP16">;
|
||||
def VISrc_128_b32 : SrcRegOrImm9 <VReg_128, "OPW128", "OPERAND_REG_INLINE_C_INT32">;
|
||||
def VISrc_128_f32 : SrcRegOrImm9 <VReg_128, "OPW128", "OPERAND_REG_INLINE_C_FP32">;
|
||||
def VISrc_256_b32 : SrcRegOrImm9 <VReg_256, "OPW256", "OPERAND_REG_INLINE_C_INT32">;
|
||||
def VISrc_256_f32 : SrcRegOrImm9 <VReg_256, "OPW256", "OPERAND_REG_INLINE_C_FP32">;
|
||||
def VISrc_256_f64 : SrcRegOrImm9 <VReg_256, "OPW256", "OPERAND_REG_INLINE_C_FP64">;
|
||||
def VISrc_512_b32 : SrcRegOrImm9 <VReg_512, "OPW512", "OPERAND_REG_INLINE_C_INT32">;
|
||||
def VISrc_512_f32 : SrcRegOrImm9 <VReg_512, "OPW512", "OPERAND_REG_INLINE_C_FP32">;
|
||||
def VISrc_1024_b32 : SrcRegOrImm9 <VReg_1024, "OPW1024", "OPERAND_REG_INLINE_C_INT32">;
|
||||
def VISrc_1024_f32 : SrcRegOrImm9 <VReg_1024, "OPW1024", "OPERAND_REG_INLINE_C_FP32">;
|
||||
|
||||
//===----------------------------------------------------------------------===//
|
||||
// AVSrc_*, AVDst_*, AVLdSt_* Operands with an AGPR or VGPR
|
||||
@@ -1349,18 +1338,16 @@ def AVLdSt_1024 : AVLdStOperand<AV_1024, "OPW1024">;
|
||||
// ACSrc_* Operands with an AGPR or an inline constant
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
class SrcRegOrImmA9<RegisterClass regClass, string opWidth, string operandType,
|
||||
int immWidth, int OperandSemantics>
|
||||
class SrcRegOrImmA9<RegisterClass regClass, string opWidth, string operandType>
|
||||
: RegOrImmOperand<regClass, operandType> {
|
||||
let DecoderMethod = "decodeSrcRegOrImmA9<AMDGPUDisassembler::" # opWidth #
|
||||
", " # immWidth # ", " # OperandSemantics # ">";
|
||||
let DecoderMethod = "decodeSrcRegOrImmA9<AMDGPUDisassembler::" # opWidth # ">";
|
||||
}
|
||||
|
||||
def AISrc_64_f64 : SrcRegOrImmA9 <AReg_64, "OPW64", "OPERAND_REG_INLINE_AC_FP64", 64, OperandSemantics.FP64>;
|
||||
def AISrc_128_f32 : SrcRegOrImmA9 <AReg_128, "OPW128", "OPERAND_REG_INLINE_AC_FP32", 32, OperandSemantics.FP32>;
|
||||
def AISrc_128_b32 : SrcRegOrImmA9 <AReg_128, "OPW128", "OPERAND_REG_INLINE_AC_INT32", 32, OperandSemantics.INT>;
|
||||
def AISrc_256_f64 : SrcRegOrImmA9 <AReg_256, "OPW256", "OPERAND_REG_INLINE_AC_FP64", 64, OperandSemantics.FP64>;
|
||||
def AISrc_512_f32 : SrcRegOrImmA9 <AReg_512, "OPW512", "OPERAND_REG_INLINE_AC_FP32", 32, OperandSemantics.FP32>;
|
||||
def AISrc_512_b32 : SrcRegOrImmA9 <AReg_512, "OPW512", "OPERAND_REG_INLINE_AC_INT32", 32, OperandSemantics.INT>;
|
||||
def AISrc_1024_f32 : SrcRegOrImmA9 <AReg_1024, "OPW1024", "OPERAND_REG_INLINE_AC_FP32", 32, OperandSemantics.FP32>;
|
||||
def AISrc_1024_b32 : SrcRegOrImmA9 <AReg_1024, "OPW1024", "OPERAND_REG_INLINE_AC_INT32", 32, OperandSemantics.INT>;
|
||||
def AISrc_64_f64 : SrcRegOrImmA9 <AReg_64, "OPW64", "OPERAND_REG_INLINE_AC_FP64">;
|
||||
def AISrc_128_f32 : SrcRegOrImmA9 <AReg_128, "OPW128", "OPERAND_REG_INLINE_AC_FP32">;
|
||||
def AISrc_128_b32 : SrcRegOrImmA9 <AReg_128, "OPW128", "OPERAND_REG_INLINE_AC_INT32">;
|
||||
def AISrc_256_f64 : SrcRegOrImmA9 <AReg_256, "OPW256", "OPERAND_REG_INLINE_AC_FP64">;
|
||||
def AISrc_512_f32 : SrcRegOrImmA9 <AReg_512, "OPW512", "OPERAND_REG_INLINE_AC_FP32">;
|
||||
def AISrc_512_b32 : SrcRegOrImmA9 <AReg_512, "OPW512", "OPERAND_REG_INLINE_AC_INT32">;
|
||||
def AISrc_1024_f32 : SrcRegOrImmA9 <AReg_1024, "OPW1024", "OPERAND_REG_INLINE_AC_FP32">;
|
||||
def AISrc_1024_b32 : SrcRegOrImmA9 <AReg_1024, "OPW1024", "OPERAND_REG_INLINE_AC_INT32">;
|
||||
|
||||
Reference in New Issue
Block a user