5012 lines
196 KiB
Diff
5012 lines
196 KiB
Diff
diff --git a/llvm/lib/Target/RISCV/AsmParser/RISCVAsmParser.cpp b/llvm/lib/Target/RISCV/AsmParser/RISCVAsmParser.cpp
|
|
index 9a069b15d8e4..b933193a8b28 100644
|
|
--- a/llvm/lib/Target/RISCV/AsmParser/RISCVAsmParser.cpp
|
|
+++ b/llvm/lib/Target/RISCV/AsmParser/RISCVAsmParser.cpp
|
|
@@ -1,2656 +1,2680 @@
|
|
//===-- RISCVAsmParser.cpp - Parse RISCV assembly to MCInst instructions --===//
|
|
//
|
|
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
|
// See https://llvm.org/LICENSE.txt for license information.
|
|
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
|
//
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
#include "MCTargetDesc/RISCVAsmBackend.h"
|
|
#include "MCTargetDesc/RISCVBaseInfo.h"
|
|
#include "MCTargetDesc/RISCVInstPrinter.h"
|
|
#include "MCTargetDesc/RISCVMCExpr.h"
|
|
#include "MCTargetDesc/RISCVMCTargetDesc.h"
|
|
#include "MCTargetDesc/RISCVMatInt.h"
|
|
#include "MCTargetDesc/RISCVTargetStreamer.h"
|
|
#include "TargetInfo/RISCVTargetInfo.h"
|
|
#include "llvm/ADT/STLExtras.h"
|
|
#include "llvm/ADT/SmallBitVector.h"
|
|
#include "llvm/ADT/SmallString.h"
|
|
#include "llvm/ADT/SmallVector.h"
|
|
#include "llvm/ADT/Statistic.h"
|
|
#include "llvm/MC/MCAssembler.h"
|
|
#include "llvm/MC/MCContext.h"
|
|
#include "llvm/MC/MCExpr.h"
|
|
#include "llvm/MC/MCInst.h"
|
|
#include "llvm/MC/MCInstBuilder.h"
|
|
#include "llvm/MC/MCObjectFileInfo.h"
|
|
#include "llvm/MC/MCParser/MCAsmLexer.h"
|
|
#include "llvm/MC/MCParser/MCParsedAsmOperand.h"
|
|
#include "llvm/MC/MCParser/MCTargetAsmParser.h"
|
|
#include "llvm/MC/MCRegisterInfo.h"
|
|
#include "llvm/MC/MCStreamer.h"
|
|
#include "llvm/MC/MCSubtargetInfo.h"
|
|
#include "llvm/MC/MCValue.h"
|
|
#include "llvm/Support/Casting.h"
|
|
#include "llvm/Support/MathExtras.h"
|
|
#include "llvm/Support/RISCVAttributes.h"
|
|
#include "llvm/Support/TargetRegistry.h"
|
|
|
|
#include <limits>
|
|
|
|
using namespace llvm;
|
|
|
|
#define DEBUG_TYPE "riscv-asm-parser"
|
|
|
|
// Include the auto-generated portion of the compress emitter.
|
|
#define GEN_COMPRESS_INSTR
|
|
#include "RISCVGenCompressInstEmitter.inc"
|
|
|
|
STATISTIC(RISCVNumInstrsCompressed,
|
|
"Number of RISC-V Compressed instructions emitted");
|
|
|
|
namespace {
|
|
struct RISCVOperand;
|
|
|
|
struct ParserOptionsSet {
|
|
bool IsPicEnabled;
|
|
};
|
|
|
|
class RISCVAsmParser : public MCTargetAsmParser {
|
|
SmallVector<FeatureBitset, 4> FeatureBitStack;
|
|
|
|
SmallVector<ParserOptionsSet, 4> ParserOptionsStack;
|
|
ParserOptionsSet ParserOptions;
|
|
|
|
SMLoc getLoc() const { return getParser().getTok().getLoc(); }
|
|
bool isRV64() const { return getSTI().hasFeature(RISCV::Feature64Bit); }
|
|
bool isRV32E() const { return getSTI().hasFeature(RISCV::FeatureRV32E); }
|
|
|
|
RISCVTargetStreamer &getTargetStreamer() {
|
|
MCTargetStreamer &TS = *getParser().getStreamer().getTargetStreamer();
|
|
return static_cast<RISCVTargetStreamer &>(TS);
|
|
}
|
|
|
|
unsigned validateTargetOperandClass(MCParsedAsmOperand &Op,
|
|
unsigned Kind) override;
|
|
|
|
bool generateImmOutOfRangeError(OperandVector &Operands, uint64_t ErrorInfo,
|
|
int64_t Lower, int64_t Upper, Twine Msg);
|
|
|
|
bool MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
|
|
OperandVector &Operands, MCStreamer &Out,
|
|
uint64_t &ErrorInfo,
|
|
bool MatchingInlineAsm) override;
|
|
|
|
bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc) override;
|
|
OperandMatchResultTy tryParseRegister(unsigned &RegNo, SMLoc &StartLoc,
|
|
SMLoc &EndLoc) override;
|
|
|
|
bool ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
|
|
SMLoc NameLoc, OperandVector &Operands) override;
|
|
|
|
bool ParseDirective(AsmToken DirectiveID) override;
|
|
|
|
// Helper to actually emit an instruction to the MCStreamer. Also, when
|
|
// possible, compression of the instruction is performed.
|
|
void emitToStreamer(MCStreamer &S, const MCInst &Inst);
|
|
|
|
// Helper to emit a combination of LUI, ADDI(W), and SLLI instructions that
|
|
// synthesize the desired immedate value into the destination register.
|
|
void emitLoadImm(MCRegister DestReg, int64_t Value, MCStreamer &Out);
|
|
|
|
// Helper to emit a combination of AUIPC and SecondOpcode. Used to implement
|
|
// helpers such as emitLoadLocalAddress and emitLoadAddress.
|
|
void emitAuipcInstPair(MCOperand DestReg, MCOperand TmpReg,
|
|
const MCExpr *Symbol, RISCVMCExpr::VariantKind VKHi,
|
|
unsigned SecondOpcode, SMLoc IDLoc, MCStreamer &Out);
|
|
|
|
// Helper to emit pseudo instruction "lla" used in PC-rel addressing.
|
|
void emitLoadLocalAddress(MCInst &Inst, SMLoc IDLoc, MCStreamer &Out);
|
|
|
|
// Helper to emit pseudo instruction "la" used in GOT/PC-rel addressing.
|
|
void emitLoadAddress(MCInst &Inst, SMLoc IDLoc, MCStreamer &Out);
|
|
|
|
// Helper to emit pseudo instruction "la.tls.ie" used in initial-exec TLS
|
|
// addressing.
|
|
void emitLoadTLSIEAddress(MCInst &Inst, SMLoc IDLoc, MCStreamer &Out);
|
|
|
|
// Helper to emit pseudo instruction "la.tls.gd" used in global-dynamic TLS
|
|
// addressing.
|
|
void emitLoadTLSGDAddress(MCInst &Inst, SMLoc IDLoc, MCStreamer &Out);
|
|
|
|
// Helper to emit pseudo load/store instruction with a symbol.
|
|
void emitLoadStoreSymbol(MCInst &Inst, unsigned Opcode, SMLoc IDLoc,
|
|
MCStreamer &Out, bool HasTmpReg);
|
|
|
|
// Helper to emit pseudo sign/zero extend instruction.
|
|
void emitPseudoExtend(MCInst &Inst, bool SignExtend, int64_t Width,
|
|
SMLoc IDLoc, MCStreamer &Out);
|
|
|
|
// Helper to emit pseudo vmsge{u}.vx instruction.
|
|
void emitVMSGE(MCInst &Inst, unsigned Opcode, SMLoc IDLoc, MCStreamer &Out);
|
|
|
|
// Checks that a PseudoAddTPRel is using x4/tp in its second input operand.
|
|
// Enforcing this using a restricted register class for the second input
|
|
// operand of PseudoAddTPRel results in a poor diagnostic due to the fact
|
|
// 'add' is an overloaded mnemonic.
|
|
bool checkPseudoAddTPRel(MCInst &Inst, OperandVector &Operands);
|
|
|
|
// Check instruction constraints.
|
|
bool validateInstruction(MCInst &Inst, OperandVector &Operands);
|
|
|
|
/// Helper for processing MC instructions that have been successfully matched
|
|
/// by MatchAndEmitInstruction. Modifications to the emitted instructions,
|
|
/// like the expansion of pseudo instructions (e.g., "li"), can be performed
|
|
/// in this method.
|
|
bool processInstruction(MCInst &Inst, SMLoc IDLoc, OperandVector &Operands,
|
|
MCStreamer &Out);
|
|
|
|
// Auto-generated instruction matching functions
|
|
#define GET_ASSEMBLER_HEADER
|
|
#include "RISCVGenAsmMatcher.inc"
|
|
|
|
OperandMatchResultTy parseCSRSystemRegister(OperandVector &Operands);
|
|
OperandMatchResultTy parseImmediate(OperandVector &Operands);
|
|
OperandMatchResultTy parseRegister(OperandVector &Operands,
|
|
bool AllowParens = false);
|
|
OperandMatchResultTy parseMemOpBaseReg(OperandVector &Operands);
|
|
OperandMatchResultTy parseAtomicMemOp(OperandVector &Operands);
|
|
OperandMatchResultTy parseOperandWithModifier(OperandVector &Operands);
|
|
OperandMatchResultTy parseBareSymbol(OperandVector &Operands);
|
|
OperandMatchResultTy parseCallSymbol(OperandVector &Operands);
|
|
OperandMatchResultTy parsePseudoJumpSymbol(OperandVector &Operands);
|
|
OperandMatchResultTy parseJALOffset(OperandVector &Operands);
|
|
OperandMatchResultTy parseVTypeI(OperandVector &Operands);
|
|
OperandMatchResultTy parseMaskReg(OperandVector &Operands);
|
|
|
|
bool parseOperand(OperandVector &Operands, StringRef Mnemonic);
|
|
|
|
bool parseDirectiveOption();
|
|
bool parseDirectiveAttribute();
|
|
|
|
void setFeatureBits(uint64_t Feature, StringRef FeatureString) {
|
|
if (!(getSTI().getFeatureBits()[Feature])) {
|
|
MCSubtargetInfo &STI = copySTI();
|
|
setAvailableFeatures(
|
|
ComputeAvailableFeatures(STI.ToggleFeature(FeatureString)));
|
|
}
|
|
}
|
|
|
|
bool getFeatureBits(uint64_t Feature) {
|
|
return getSTI().getFeatureBits()[Feature];
|
|
}
|
|
|
|
void clearFeatureBits(uint64_t Feature, StringRef FeatureString) {
|
|
if (getSTI().getFeatureBits()[Feature]) {
|
|
MCSubtargetInfo &STI = copySTI();
|
|
setAvailableFeatures(
|
|
ComputeAvailableFeatures(STI.ToggleFeature(FeatureString)));
|
|
}
|
|
}
|
|
|
|
void pushFeatureBits() {
|
|
assert(FeatureBitStack.size() == ParserOptionsStack.size() &&
|
|
"These two stacks must be kept synchronized");
|
|
FeatureBitStack.push_back(getSTI().getFeatureBits());
|
|
ParserOptionsStack.push_back(ParserOptions);
|
|
}
|
|
|
|
bool popFeatureBits() {
|
|
assert(FeatureBitStack.size() == ParserOptionsStack.size() &&
|
|
"These two stacks must be kept synchronized");
|
|
if (FeatureBitStack.empty())
|
|
return true;
|
|
|
|
FeatureBitset FeatureBits = FeatureBitStack.pop_back_val();
|
|
copySTI().setFeatureBits(FeatureBits);
|
|
setAvailableFeatures(ComputeAvailableFeatures(FeatureBits));
|
|
|
|
ParserOptions = ParserOptionsStack.pop_back_val();
|
|
|
|
return false;
|
|
}
|
|
|
|
std::unique_ptr<RISCVOperand> defaultMaskRegOp() const;
|
|
|
|
public:
|
|
enum RISCVMatchResultTy {
|
|
Match_Dummy = FIRST_TARGET_MATCH_RESULT_TY,
|
|
#define GET_OPERAND_DIAGNOSTIC_TYPES
|
|
#include "RISCVGenAsmMatcher.inc"
|
|
#undef GET_OPERAND_DIAGNOSTIC_TYPES
|
|
};
|
|
|
|
static bool classifySymbolRef(const MCExpr *Expr,
|
|
RISCVMCExpr::VariantKind &Kind);
|
|
|
|
RISCVAsmParser(const MCSubtargetInfo &STI, MCAsmParser &Parser,
|
|
const MCInstrInfo &MII, const MCTargetOptions &Options)
|
|
: MCTargetAsmParser(Options, STI, MII) {
|
|
Parser.addAliasForDirective(".half", ".2byte");
|
|
Parser.addAliasForDirective(".hword", ".2byte");
|
|
Parser.addAliasForDirective(".word", ".4byte");
|
|
Parser.addAliasForDirective(".dword", ".8byte");
|
|
setAvailableFeatures(ComputeAvailableFeatures(STI.getFeatureBits()));
|
|
|
|
auto ABIName = StringRef(Options.ABIName);
|
|
if (ABIName.endswith("f") &&
|
|
!getSTI().getFeatureBits()[RISCV::FeatureStdExtF]) {
|
|
errs() << "Hard-float 'f' ABI can't be used for a target that "
|
|
"doesn't support the F instruction set extension (ignoring "
|
|
"target-abi)\n";
|
|
} else if (ABIName.endswith("d") &&
|
|
!getSTI().getFeatureBits()[RISCV::FeatureStdExtD]) {
|
|
errs() << "Hard-float 'd' ABI can't be used for a target that "
|
|
"doesn't support the D instruction set extension (ignoring "
|
|
"target-abi)\n";
|
|
}
|
|
|
|
const MCObjectFileInfo *MOFI = Parser.getContext().getObjectFileInfo();
|
|
ParserOptions.IsPicEnabled = MOFI->isPositionIndependent();
|
|
}
|
|
};
|
|
|
|
/// RISCVOperand - Instances of this class represent a parsed machine
|
|
/// instruction
|
|
struct RISCVOperand : public MCParsedAsmOperand {
|
|
|
|
enum class KindTy {
|
|
Token,
|
|
Register,
|
|
Immediate,
|
|
SystemRegister,
|
|
VType,
|
|
} Kind;
|
|
|
|
bool IsRV64;
|
|
|
|
struct RegOp {
|
|
MCRegister RegNum;
|
|
};
|
|
|
|
struct ImmOp {
|
|
const MCExpr *Val;
|
|
};
|
|
|
|
struct SysRegOp {
|
|
const char *Data;
|
|
unsigned Length;
|
|
unsigned Encoding;
|
|
// FIXME: Add the Encoding parsed fields as needed for checks,
|
|
// e.g.: read/write or user/supervisor/machine privileges.
|
|
};
|
|
|
|
struct VTypeOp {
|
|
unsigned Val;
|
|
};
|
|
|
|
SMLoc StartLoc, EndLoc;
|
|
union {
|
|
StringRef Tok;
|
|
RegOp Reg;
|
|
ImmOp Imm;
|
|
struct SysRegOp SysReg;
|
|
struct VTypeOp VType;
|
|
};
|
|
|
|
RISCVOperand(KindTy K) : MCParsedAsmOperand(), Kind(K) {}
|
|
|
|
public:
|
|
RISCVOperand(const RISCVOperand &o) : MCParsedAsmOperand() {
|
|
Kind = o.Kind;
|
|
IsRV64 = o.IsRV64;
|
|
StartLoc = o.StartLoc;
|
|
EndLoc = o.EndLoc;
|
|
switch (Kind) {
|
|
case KindTy::Register:
|
|
Reg = o.Reg;
|
|
break;
|
|
case KindTy::Immediate:
|
|
Imm = o.Imm;
|
|
break;
|
|
case KindTy::Token:
|
|
Tok = o.Tok;
|
|
break;
|
|
case KindTy::SystemRegister:
|
|
SysReg = o.SysReg;
|
|
break;
|
|
case KindTy::VType:
|
|
VType = o.VType;
|
|
break;
|
|
}
|
|
}
|
|
|
|
bool isToken() const override { return Kind == KindTy::Token; }
|
|
bool isReg() const override { return Kind == KindTy::Register; }
|
|
bool isV0Reg() const {
|
|
return Kind == KindTy::Register && Reg.RegNum == RISCV::V0;
|
|
}
|
|
bool isImm() const override { return Kind == KindTy::Immediate; }
|
|
bool isMem() const override { return false; }
|
|
bool isSystemRegister() const { return Kind == KindTy::SystemRegister; }
|
|
bool isVType() const { return Kind == KindTy::VType; }
|
|
|
|
bool isGPR() const {
|
|
return Kind == KindTy::Register &&
|
|
RISCVMCRegisterClasses[RISCV::GPRRegClassID].contains(Reg.RegNum);
|
|
}
|
|
|
|
static bool evaluateConstantImm(const MCExpr *Expr, int64_t &Imm,
|
|
RISCVMCExpr::VariantKind &VK) {
|
|
if (auto *RE = dyn_cast<RISCVMCExpr>(Expr)) {
|
|
VK = RE->getKind();
|
|
return RE->evaluateAsConstant(Imm);
|
|
}
|
|
|
|
if (auto CE = dyn_cast<MCConstantExpr>(Expr)) {
|
|
VK = RISCVMCExpr::VK_RISCV_None;
|
|
Imm = CE->getValue();
|
|
return true;
|
|
}
|
|
|
|
return false;
|
|
}
|
|
|
|
// True if operand is a symbol with no modifiers, or a constant with no
|
|
// modifiers and isShiftedInt<N-1, 1>(Op).
|
|
template <int N> bool isBareSimmNLsb0() const {
|
|
int64_t Imm;
|
|
RISCVMCExpr::VariantKind VK = RISCVMCExpr::VK_RISCV_None;
|
|
if (!isImm())
|
|
return false;
|
|
bool IsConstantImm = evaluateConstantImm(getImm(), Imm, VK);
|
|
bool IsValid;
|
|
if (!IsConstantImm)
|
|
IsValid = RISCVAsmParser::classifySymbolRef(getImm(), VK);
|
|
else
|
|
IsValid = isShiftedInt<N - 1, 1>(Imm);
|
|
return IsValid && VK == RISCVMCExpr::VK_RISCV_None;
|
|
}
|
|
|
|
// Predicate methods for AsmOperands defined in RISCVInstrInfo.td
|
|
|
|
bool isBareSymbol() const {
|
|
int64_t Imm;
|
|
RISCVMCExpr::VariantKind VK = RISCVMCExpr::VK_RISCV_None;
|
|
// Must be of 'immediate' type but not a constant.
|
|
if (!isImm() || evaluateConstantImm(getImm(), Imm, VK))
|
|
return false;
|
|
return RISCVAsmParser::classifySymbolRef(getImm(), VK) &&
|
|
VK == RISCVMCExpr::VK_RISCV_None;
|
|
}
|
|
|
|
bool isCallSymbol() const {
|
|
int64_t Imm;
|
|
RISCVMCExpr::VariantKind VK = RISCVMCExpr::VK_RISCV_None;
|
|
// Must be of 'immediate' type but not a constant.
|
|
if (!isImm() || evaluateConstantImm(getImm(), Imm, VK))
|
|
return false;
|
|
return RISCVAsmParser::classifySymbolRef(getImm(), VK) &&
|
|
(VK == RISCVMCExpr::VK_RISCV_CALL ||
|
|
VK == RISCVMCExpr::VK_RISCV_CALL_PLT);
|
|
}
|
|
|
|
bool isPseudoJumpSymbol() const {
|
|
int64_t Imm;
|
|
RISCVMCExpr::VariantKind VK = RISCVMCExpr::VK_RISCV_None;
|
|
// Must be of 'immediate' type but not a constant.
|
|
if (!isImm() || evaluateConstantImm(getImm(), Imm, VK))
|
|
return false;
|
|
return RISCVAsmParser::classifySymbolRef(getImm(), VK) &&
|
|
VK == RISCVMCExpr::VK_RISCV_CALL;
|
|
}
|
|
|
|
bool isTPRelAddSymbol() const {
|
|
int64_t Imm;
|
|
RISCVMCExpr::VariantKind VK = RISCVMCExpr::VK_RISCV_None;
|
|
// Must be of 'immediate' type but not a constant.
|
|
if (!isImm() || evaluateConstantImm(getImm(), Imm, VK))
|
|
return false;
|
|
return RISCVAsmParser::classifySymbolRef(getImm(), VK) &&
|
|
VK == RISCVMCExpr::VK_RISCV_TPREL_ADD;
|
|
}
|
|
|
|
bool isCSRSystemRegister() const { return isSystemRegister(); }
|
|
|
|
bool isVTypeI() const { return isVType(); }
|
|
|
|
/// Return true if the operand is a valid for the fence instruction e.g.
|
|
/// ('iorw').
|
|
bool isFenceArg() const {
|
|
if (!isImm())
|
|
return false;
|
|
const MCExpr *Val = getImm();
|
|
auto *SVal = dyn_cast<MCSymbolRefExpr>(Val);
|
|
if (!SVal || SVal->getKind() != MCSymbolRefExpr::VK_None)
|
|
return false;
|
|
|
|
StringRef Str = SVal->getSymbol().getName();
|
|
// Letters must be unique, taken from 'iorw', and in ascending order. This
|
|
// holds as long as each individual character is one of 'iorw' and is
|
|
// greater than the previous character.
|
|
char Prev = '\0';
|
|
for (char c : Str) {
|
|
if (c != 'i' && c != 'o' && c != 'r' && c != 'w')
|
|
return false;
|
|
if (c <= Prev)
|
|
return false;
|
|
Prev = c;
|
|
}
|
|
return true;
|
|
}
|
|
|
|
/// Return true if the operand is a valid floating point rounding mode.
|
|
bool isFRMArg() const {
|
|
if (!isImm())
|
|
return false;
|
|
const MCExpr *Val = getImm();
|
|
auto *SVal = dyn_cast<MCSymbolRefExpr>(Val);
|
|
if (!SVal || SVal->getKind() != MCSymbolRefExpr::VK_None)
|
|
return false;
|
|
|
|
StringRef Str = SVal->getSymbol().getName();
|
|
|
|
return RISCVFPRndMode::stringToRoundingMode(Str) != RISCVFPRndMode::Invalid;
|
|
}
|
|
|
|
bool isImmXLenLI() const {
|
|
int64_t Imm;
|
|
RISCVMCExpr::VariantKind VK = RISCVMCExpr::VK_RISCV_None;
|
|
if (!isImm())
|
|
return false;
|
|
bool IsConstantImm = evaluateConstantImm(getImm(), Imm, VK);
|
|
if (VK == RISCVMCExpr::VK_RISCV_LO || VK == RISCVMCExpr::VK_RISCV_PCREL_LO)
|
|
return true;
|
|
// Given only Imm, ensuring that the actually specified constant is either
|
|
// a signed or unsigned 64-bit number is unfortunately impossible.
|
|
return IsConstantImm && VK == RISCVMCExpr::VK_RISCV_None &&
|
|
(isRV64() || (isInt<32>(Imm) || isUInt<32>(Imm)));
|
|
}
|
|
|
|
bool isUImmLog2XLen() const {
|
|
int64_t Imm;
|
|
RISCVMCExpr::VariantKind VK = RISCVMCExpr::VK_RISCV_None;
|
|
if (!isImm())
|
|
return false;
|
|
if (!evaluateConstantImm(getImm(), Imm, VK) ||
|
|
VK != RISCVMCExpr::VK_RISCV_None)
|
|
return false;
|
|
return (isRV64() && isUInt<6>(Imm)) || isUInt<5>(Imm);
|
|
}
|
|
|
|
bool isUImmLog2XLenNonZero() const {
|
|
int64_t Imm;
|
|
RISCVMCExpr::VariantKind VK = RISCVMCExpr::VK_RISCV_None;
|
|
if (!isImm())
|
|
return false;
|
|
if (!evaluateConstantImm(getImm(), Imm, VK) ||
|
|
VK != RISCVMCExpr::VK_RISCV_None)
|
|
return false;
|
|
if (Imm == 0)
|
|
return false;
|
|
return (isRV64() && isUInt<6>(Imm)) || isUInt<5>(Imm);
|
|
}
|
|
|
|
bool isUImmLog2XLenHalf() const {
|
|
int64_t Imm;
|
|
RISCVMCExpr::VariantKind VK = RISCVMCExpr::VK_RISCV_None;
|
|
if (!isImm())
|
|
return false;
|
|
if (!evaluateConstantImm(getImm(), Imm, VK) ||
|
|
VK != RISCVMCExpr::VK_RISCV_None)
|
|
return false;
|
|
return (isRV64() && isUInt<5>(Imm)) || isUInt<4>(Imm);
|
|
}
|
|
|
|
bool isUImm5() const {
|
|
int64_t Imm;
|
|
RISCVMCExpr::VariantKind VK = RISCVMCExpr::VK_RISCV_None;
|
|
if (!isImm())
|
|
return false;
|
|
bool IsConstantImm = evaluateConstantImm(getImm(), Imm, VK);
|
|
return IsConstantImm && isUInt<5>(Imm) && VK == RISCVMCExpr::VK_RISCV_None;
|
|
}
|
|
|
|
bool isSImm5() const {
|
|
if (!isImm())
|
|
return false;
|
|
RISCVMCExpr::VariantKind VK = RISCVMCExpr::VK_RISCV_None;
|
|
int64_t Imm;
|
|
bool IsConstantImm = evaluateConstantImm(getImm(), Imm, VK);
|
|
return IsConstantImm && isInt<5>(Imm) && VK == RISCVMCExpr::VK_RISCV_None;
|
|
}
|
|
|
|
bool isSImm6() const {
|
|
if (!isImm())
|
|
return false;
|
|
RISCVMCExpr::VariantKind VK = RISCVMCExpr::VK_RISCV_None;
|
|
int64_t Imm;
|
|
bool IsConstantImm = evaluateConstantImm(getImm(), Imm, VK);
|
|
return IsConstantImm && isInt<6>(Imm) &&
|
|
VK == RISCVMCExpr::VK_RISCV_None;
|
|
}
|
|
|
|
bool isSImm6NonZero() const {
|
|
if (!isImm())
|
|
return false;
|
|
RISCVMCExpr::VariantKind VK = RISCVMCExpr::VK_RISCV_None;
|
|
int64_t Imm;
|
|
bool IsConstantImm = evaluateConstantImm(getImm(), Imm, VK);
|
|
return IsConstantImm && isInt<6>(Imm) && (Imm != 0) &&
|
|
VK == RISCVMCExpr::VK_RISCV_None;
|
|
}
|
|
|
|
bool isCLUIImm() const {
|
|
if (!isImm())
|
|
return false;
|
|
int64_t Imm;
|
|
RISCVMCExpr::VariantKind VK = RISCVMCExpr::VK_RISCV_None;
|
|
bool IsConstantImm = evaluateConstantImm(getImm(), Imm, VK);
|
|
return IsConstantImm && (Imm != 0) &&
|
|
(isUInt<5>(Imm) || (Imm >= 0xfffe0 && Imm <= 0xfffff)) &&
|
|
VK == RISCVMCExpr::VK_RISCV_None;
|
|
}
|
|
|
|
bool isUImm7Lsb00() const {
|
|
if (!isImm())
|
|
return false;
|
|
int64_t Imm;
|
|
RISCVMCExpr::VariantKind VK = RISCVMCExpr::VK_RISCV_None;
|
|
bool IsConstantImm = evaluateConstantImm(getImm(), Imm, VK);
|
|
return IsConstantImm && isShiftedUInt<5, 2>(Imm) &&
|
|
VK == RISCVMCExpr::VK_RISCV_None;
|
|
}
|
|
|
|
bool isUImm8Lsb00() const {
|
|
if (!isImm())
|
|
return false;
|
|
int64_t Imm;
|
|
RISCVMCExpr::VariantKind VK = RISCVMCExpr::VK_RISCV_None;
|
|
bool IsConstantImm = evaluateConstantImm(getImm(), Imm, VK);
|
|
return IsConstantImm && isShiftedUInt<6, 2>(Imm) &&
|
|
VK == RISCVMCExpr::VK_RISCV_None;
|
|
}
|
|
|
|
bool isUImm8Lsb000() const {
|
|
if (!isImm())
|
|
return false;
|
|
int64_t Imm;
|
|
RISCVMCExpr::VariantKind VK = RISCVMCExpr::VK_RISCV_None;
|
|
bool IsConstantImm = evaluateConstantImm(getImm(), Imm, VK);
|
|
return IsConstantImm && isShiftedUInt<5, 3>(Imm) &&
|
|
VK == RISCVMCExpr::VK_RISCV_None;
|
|
}
|
|
|
|
bool isSImm9Lsb0() const { return isBareSimmNLsb0<9>(); }
|
|
|
|
bool isUImm9Lsb000() const {
|
|
if (!isImm())
|
|
return false;
|
|
int64_t Imm;
|
|
RISCVMCExpr::VariantKind VK = RISCVMCExpr::VK_RISCV_None;
|
|
bool IsConstantImm = evaluateConstantImm(getImm(), Imm, VK);
|
|
return IsConstantImm && isShiftedUInt<6, 3>(Imm) &&
|
|
VK == RISCVMCExpr::VK_RISCV_None;
|
|
}
|
|
|
|
bool isUImm10Lsb00NonZero() const {
|
|
if (!isImm())
|
|
return false;
|
|
int64_t Imm;
|
|
RISCVMCExpr::VariantKind VK = RISCVMCExpr::VK_RISCV_None;
|
|
bool IsConstantImm = evaluateConstantImm(getImm(), Imm, VK);
|
|
return IsConstantImm && isShiftedUInt<8, 2>(Imm) && (Imm != 0) &&
|
|
VK == RISCVMCExpr::VK_RISCV_None;
|
|
}
|
|
|
|
bool isSImm12() const {
|
|
RISCVMCExpr::VariantKind VK = RISCVMCExpr::VK_RISCV_None;
|
|
int64_t Imm;
|
|
bool IsValid;
|
|
if (!isImm())
|
|
return false;
|
|
bool IsConstantImm = evaluateConstantImm(getImm(), Imm, VK);
|
|
if (!IsConstantImm)
|
|
IsValid = RISCVAsmParser::classifySymbolRef(getImm(), VK);
|
|
else
|
|
IsValid = isInt<12>(Imm);
|
|
return IsValid && ((IsConstantImm && VK == RISCVMCExpr::VK_RISCV_None) ||
|
|
VK == RISCVMCExpr::VK_RISCV_LO ||
|
|
VK == RISCVMCExpr::VK_RISCV_PCREL_LO ||
|
|
VK == RISCVMCExpr::VK_RISCV_TPREL_LO);
|
|
}
|
|
|
|
bool isSImm12Lsb0() const { return isBareSimmNLsb0<12>(); }
|
|
|
|
bool isSImm13Lsb0() const { return isBareSimmNLsb0<13>(); }
|
|
|
|
bool isSImm10Lsb0000NonZero() const {
|
|
if (!isImm())
|
|
return false;
|
|
int64_t Imm;
|
|
RISCVMCExpr::VariantKind VK = RISCVMCExpr::VK_RISCV_None;
|
|
bool IsConstantImm = evaluateConstantImm(getImm(), Imm, VK);
|
|
return IsConstantImm && (Imm != 0) && isShiftedInt<6, 4>(Imm) &&
|
|
VK == RISCVMCExpr::VK_RISCV_None;
|
|
}
|
|
|
|
bool isUImm20LUI() const {
|
|
RISCVMCExpr::VariantKind VK = RISCVMCExpr::VK_RISCV_None;
|
|
int64_t Imm;
|
|
bool IsValid;
|
|
if (!isImm())
|
|
return false;
|
|
bool IsConstantImm = evaluateConstantImm(getImm(), Imm, VK);
|
|
if (!IsConstantImm) {
|
|
IsValid = RISCVAsmParser::classifySymbolRef(getImm(), VK);
|
|
return IsValid && (VK == RISCVMCExpr::VK_RISCV_HI ||
|
|
VK == RISCVMCExpr::VK_RISCV_TPREL_HI);
|
|
} else {
|
|
return isUInt<20>(Imm) && (VK == RISCVMCExpr::VK_RISCV_None ||
|
|
VK == RISCVMCExpr::VK_RISCV_HI ||
|
|
VK == RISCVMCExpr::VK_RISCV_TPREL_HI);
|
|
}
|
|
}
|
|
|
|
bool isUImm20AUIPC() const {
|
|
RISCVMCExpr::VariantKind VK = RISCVMCExpr::VK_RISCV_None;
|
|
int64_t Imm;
|
|
bool IsValid;
|
|
if (!isImm())
|
|
return false;
|
|
bool IsConstantImm = evaluateConstantImm(getImm(), Imm, VK);
|
|
if (!IsConstantImm) {
|
|
IsValid = RISCVAsmParser::classifySymbolRef(getImm(), VK);
|
|
return IsValid && (VK == RISCVMCExpr::VK_RISCV_PCREL_HI ||
|
|
VK == RISCVMCExpr::VK_RISCV_GOT_HI ||
|
|
VK == RISCVMCExpr::VK_RISCV_TLS_GOT_HI ||
|
|
VK == RISCVMCExpr::VK_RISCV_TLS_GD_HI);
|
|
} else {
|
|
return isUInt<20>(Imm) && (VK == RISCVMCExpr::VK_RISCV_None ||
|
|
VK == RISCVMCExpr::VK_RISCV_PCREL_HI ||
|
|
VK == RISCVMCExpr::VK_RISCV_GOT_HI ||
|
|
VK == RISCVMCExpr::VK_RISCV_TLS_GOT_HI ||
|
|
VK == RISCVMCExpr::VK_RISCV_TLS_GD_HI);
|
|
}
|
|
}
|
|
|
|
bool isSImm21Lsb0JAL() const { return isBareSimmNLsb0<21>(); }
|
|
|
|
bool isImmZero() const {
|
|
if (!isImm())
|
|
return false;
|
|
int64_t Imm;
|
|
RISCVMCExpr::VariantKind VK = RISCVMCExpr::VK_RISCV_None;
|
|
bool IsConstantImm = evaluateConstantImm(getImm(), Imm, VK);
|
|
return IsConstantImm && (Imm == 0) && VK == RISCVMCExpr::VK_RISCV_None;
|
|
}
|
|
|
|
bool isSImm5Plus1() const {
|
|
if (!isImm())
|
|
return false;
|
|
RISCVMCExpr::VariantKind VK = RISCVMCExpr::VK_RISCV_None;
|
|
int64_t Imm;
|
|
bool IsConstantImm = evaluateConstantImm(getImm(), Imm, VK);
|
|
return IsConstantImm && isInt<5>(Imm - 1) &&
|
|
VK == RISCVMCExpr::VK_RISCV_None;
|
|
}
|
|
|
|
/// getStartLoc - Gets location of the first token of this operand
|
|
SMLoc getStartLoc() const override { return StartLoc; }
|
|
/// getEndLoc - Gets location of the last token of this operand
|
|
SMLoc getEndLoc() const override { return EndLoc; }
|
|
/// True if this operand is for an RV64 instruction
|
|
bool isRV64() const { return IsRV64; }
|
|
|
|
unsigned getReg() const override {
|
|
assert(Kind == KindTy::Register && "Invalid type access!");
|
|
return Reg.RegNum.id();
|
|
}
|
|
|
|
StringRef getSysReg() const {
|
|
assert(Kind == KindTy::SystemRegister && "Invalid type access!");
|
|
return StringRef(SysReg.Data, SysReg.Length);
|
|
}
|
|
|
|
const MCExpr *getImm() const {
|
|
assert(Kind == KindTy::Immediate && "Invalid type access!");
|
|
return Imm.Val;
|
|
}
|
|
|
|
StringRef getToken() const {
|
|
assert(Kind == KindTy::Token && "Invalid type access!");
|
|
return Tok;
|
|
}
|
|
|
|
unsigned getVType() const {
|
|
assert(Kind == KindTy::VType && "Invalid type access!");
|
|
return VType.Val;
|
|
}
|
|
|
|
void print(raw_ostream &OS) const override {
|
|
auto RegName = [](unsigned Reg) {
|
|
if (Reg)
|
|
return RISCVInstPrinter::getRegisterName(Reg);
|
|
else
|
|
return "noreg";
|
|
};
|
|
|
|
switch (Kind) {
|
|
case KindTy::Immediate:
|
|
OS << *getImm();
|
|
break;
|
|
case KindTy::Register:
|
|
OS << "<register " << RegName(getReg()) << ">";
|
|
break;
|
|
case KindTy::Token:
|
|
OS << "'" << getToken() << "'";
|
|
break;
|
|
case KindTy::SystemRegister:
|
|
OS << "<sysreg: " << getSysReg() << '>';
|
|
break;
|
|
case KindTy::VType:
|
|
OS << "<vtype: ";
|
|
RISCVVType::printVType(getVType(), OS);
|
|
OS << '>';
|
|
break;
|
|
}
|
|
}
|
|
|
|
static std::unique_ptr<RISCVOperand> createToken(StringRef Str, SMLoc S,
|
|
bool IsRV64) {
|
|
auto Op = std::make_unique<RISCVOperand>(KindTy::Token);
|
|
Op->Tok = Str;
|
|
Op->StartLoc = S;
|
|
Op->EndLoc = S;
|
|
Op->IsRV64 = IsRV64;
|
|
return Op;
|
|
}
|
|
|
|
static std::unique_ptr<RISCVOperand> createReg(unsigned RegNo, SMLoc S,
|
|
SMLoc E, bool IsRV64) {
|
|
auto Op = std::make_unique<RISCVOperand>(KindTy::Register);
|
|
Op->Reg.RegNum = RegNo;
|
|
Op->StartLoc = S;
|
|
Op->EndLoc = E;
|
|
Op->IsRV64 = IsRV64;
|
|
return Op;
|
|
}
|
|
|
|
static std::unique_ptr<RISCVOperand> createImm(const MCExpr *Val, SMLoc S,
|
|
SMLoc E, bool IsRV64) {
|
|
auto Op = std::make_unique<RISCVOperand>(KindTy::Immediate);
|
|
Op->Imm.Val = Val;
|
|
Op->StartLoc = S;
|
|
Op->EndLoc = E;
|
|
Op->IsRV64 = IsRV64;
|
|
return Op;
|
|
}
|
|
|
|
static std::unique_ptr<RISCVOperand>
|
|
createSysReg(StringRef Str, SMLoc S, unsigned Encoding, bool IsRV64) {
|
|
auto Op = std::make_unique<RISCVOperand>(KindTy::SystemRegister);
|
|
Op->SysReg.Data = Str.data();
|
|
Op->SysReg.Length = Str.size();
|
|
Op->SysReg.Encoding = Encoding;
|
|
Op->StartLoc = S;
|
|
Op->IsRV64 = IsRV64;
|
|
return Op;
|
|
}
|
|
|
|
static std::unique_ptr<RISCVOperand> createVType(unsigned VTypeI, SMLoc S,
|
|
bool IsRV64) {
|
|
auto Op = std::make_unique<RISCVOperand>(KindTy::VType);
|
|
Op->VType.Val = VTypeI;
|
|
Op->StartLoc = S;
|
|
Op->IsRV64 = IsRV64;
|
|
return Op;
|
|
}
|
|
|
|
void addExpr(MCInst &Inst, const MCExpr *Expr) const {
|
|
assert(Expr && "Expr shouldn't be null!");
|
|
int64_t Imm = 0;
|
|
RISCVMCExpr::VariantKind VK = RISCVMCExpr::VK_RISCV_None;
|
|
bool IsConstant = evaluateConstantImm(Expr, Imm, VK);
|
|
|
|
if (IsConstant)
|
|
Inst.addOperand(MCOperand::createImm(Imm));
|
|
else
|
|
Inst.addOperand(MCOperand::createExpr(Expr));
|
|
}
|
|
|
|
// Used by the TableGen Code
|
|
void addRegOperands(MCInst &Inst, unsigned N) const {
|
|
assert(N == 1 && "Invalid number of operands!");
|
|
Inst.addOperand(MCOperand::createReg(getReg()));
|
|
}
|
|
|
|
void addImmOperands(MCInst &Inst, unsigned N) const {
|
|
assert(N == 1 && "Invalid number of operands!");
|
|
addExpr(Inst, getImm());
|
|
}
|
|
|
|
void addFenceArgOperands(MCInst &Inst, unsigned N) const {
|
|
assert(N == 1 && "Invalid number of operands!");
|
|
// isFenceArg has validated the operand, meaning this cast is safe
|
|
auto SE = cast<MCSymbolRefExpr>(getImm());
|
|
|
|
unsigned Imm = 0;
|
|
for (char c : SE->getSymbol().getName()) {
|
|
switch (c) {
|
|
default:
|
|
llvm_unreachable("FenceArg must contain only [iorw]");
|
|
case 'i': Imm |= RISCVFenceField::I; break;
|
|
case 'o': Imm |= RISCVFenceField::O; break;
|
|
case 'r': Imm |= RISCVFenceField::R; break;
|
|
case 'w': Imm |= RISCVFenceField::W; break;
|
|
}
|
|
}
|
|
Inst.addOperand(MCOperand::createImm(Imm));
|
|
}
|
|
|
|
void addCSRSystemRegisterOperands(MCInst &Inst, unsigned N) const {
|
|
assert(N == 1 && "Invalid number of operands!");
|
|
Inst.addOperand(MCOperand::createImm(SysReg.Encoding));
|
|
}
|
|
|
|
void addVTypeIOperands(MCInst &Inst, unsigned N) const {
|
|
assert(N == 1 && "Invalid number of operands!");
|
|
Inst.addOperand(MCOperand::createImm(getVType()));
|
|
}
|
|
|
|
// Returns the rounding mode represented by this RISCVOperand. Should only
|
|
// be called after checking isFRMArg.
|
|
RISCVFPRndMode::RoundingMode getRoundingMode() const {
|
|
// isFRMArg has validated the operand, meaning this cast is safe.
|
|
auto SE = cast<MCSymbolRefExpr>(getImm());
|
|
RISCVFPRndMode::RoundingMode FRM =
|
|
RISCVFPRndMode::stringToRoundingMode(SE->getSymbol().getName());
|
|
assert(FRM != RISCVFPRndMode::Invalid && "Invalid rounding mode");
|
|
return FRM;
|
|
}
|
|
|
|
void addFRMArgOperands(MCInst &Inst, unsigned N) const {
|
|
assert(N == 1 && "Invalid number of operands!");
|
|
Inst.addOperand(MCOperand::createImm(getRoundingMode()));
|
|
}
|
|
};
|
|
} // end anonymous namespace.
|
|
|
|
#define GET_REGISTER_MATCHER
|
|
#define GET_SUBTARGET_FEATURE_NAME
|
|
#define GET_MATCHER_IMPLEMENTATION
|
|
#define GET_MNEMONIC_SPELL_CHECKER
|
|
#include "RISCVGenAsmMatcher.inc"
|
|
|
|
static MCRegister convertFPR64ToFPR16(MCRegister Reg) {
|
|
assert(Reg >= RISCV::F0_D && Reg <= RISCV::F31_D && "Invalid register");
|
|
return Reg - RISCV::F0_D + RISCV::F0_H;
|
|
}
|
|
|
|
static MCRegister convertFPR64ToFPR32(MCRegister Reg) {
|
|
assert(Reg >= RISCV::F0_D && Reg <= RISCV::F31_D && "Invalid register");
|
|
return Reg - RISCV::F0_D + RISCV::F0_F;
|
|
}
|
|
|
|
static MCRegister convertVRToVRMx(const MCRegisterInfo &RI, MCRegister Reg,
|
|
unsigned Kind) {
|
|
unsigned RegClassID;
|
|
if (Kind == MCK_VRM2)
|
|
RegClassID = RISCV::VRM2RegClassID;
|
|
else if (Kind == MCK_VRM4)
|
|
RegClassID = RISCV::VRM4RegClassID;
|
|
else if (Kind == MCK_VRM8)
|
|
RegClassID = RISCV::VRM8RegClassID;
|
|
else
|
|
return 0;
|
|
return RI.getMatchingSuperReg(Reg, RISCV::sub_vrm1_0,
|
|
&RISCVMCRegisterClasses[RegClassID]);
|
|
}
|
|
|
|
unsigned RISCVAsmParser::validateTargetOperandClass(MCParsedAsmOperand &AsmOp,
|
|
unsigned Kind) {
|
|
RISCVOperand &Op = static_cast<RISCVOperand &>(AsmOp);
|
|
if (!Op.isReg())
|
|
return Match_InvalidOperand;
|
|
|
|
MCRegister Reg = Op.getReg();
|
|
bool IsRegFPR64 =
|
|
RISCVMCRegisterClasses[RISCV::FPR64RegClassID].contains(Reg);
|
|
bool IsRegFPR64C =
|
|
RISCVMCRegisterClasses[RISCV::FPR64CRegClassID].contains(Reg);
|
|
bool IsRegVR = RISCVMCRegisterClasses[RISCV::VRRegClassID].contains(Reg);
|
|
|
|
// As the parser couldn't differentiate an FPR32 from an FPR64, coerce the
|
|
// register from FPR64 to FPR32 or FPR64C to FPR32C if necessary.
|
|
if ((IsRegFPR64 && Kind == MCK_FPR32) ||
|
|
(IsRegFPR64C && Kind == MCK_FPR32C)) {
|
|
Op.Reg.RegNum = convertFPR64ToFPR32(Reg);
|
|
return Match_Success;
|
|
}
|
|
// As the parser couldn't differentiate an FPR16 from an FPR64, coerce the
|
|
// register from FPR64 to FPR16 if necessary.
|
|
if (IsRegFPR64 && Kind == MCK_FPR16) {
|
|
Op.Reg.RegNum = convertFPR64ToFPR16(Reg);
|
|
return Match_Success;
|
|
}
|
|
// As the parser couldn't differentiate an VRM2/VRM4/VRM8 from an VR, coerce
|
|
// the register from VR to VRM2/VRM4/VRM8 if necessary.
|
|
if (IsRegVR && (Kind == MCK_VRM2 || Kind == MCK_VRM4 || Kind == MCK_VRM8)) {
|
|
Op.Reg.RegNum = convertVRToVRMx(*getContext().getRegisterInfo(), Reg, Kind);
|
|
if (Op.Reg.RegNum == 0)
|
|
return Match_InvalidOperand;
|
|
return Match_Success;
|
|
}
|
|
return Match_InvalidOperand;
|
|
}
|
|
|
|
bool RISCVAsmParser::generateImmOutOfRangeError(
|
|
OperandVector &Operands, uint64_t ErrorInfo, int64_t Lower, int64_t Upper,
|
|
Twine Msg = "immediate must be an integer in the range") {
|
|
SMLoc ErrorLoc = ((RISCVOperand &)*Operands[ErrorInfo]).getStartLoc();
|
|
return Error(ErrorLoc, Msg + " [" + Twine(Lower) + ", " + Twine(Upper) + "]");
|
|
}
|
|
|
|
static std::string RISCVMnemonicSpellCheck(StringRef S,
|
|
const FeatureBitset &FBS,
|
|
unsigned VariantID = 0);
|
|
|
|
bool RISCVAsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
|
|
OperandVector &Operands,
|
|
MCStreamer &Out,
|
|
uint64_t &ErrorInfo,
|
|
bool MatchingInlineAsm) {
|
|
MCInst Inst;
|
|
FeatureBitset MissingFeatures;
|
|
|
|
auto Result =
|
|
MatchInstructionImpl(Operands, Inst, ErrorInfo, MissingFeatures,
|
|
MatchingInlineAsm);
|
|
switch (Result) {
|
|
default:
|
|
break;
|
|
case Match_Success:
|
|
if (validateInstruction(Inst, Operands))
|
|
return true;
|
|
return processInstruction(Inst, IDLoc, Operands, Out);
|
|
case Match_MissingFeature: {
|
|
assert(MissingFeatures.any() && "Unknown missing features!");
|
|
bool FirstFeature = true;
|
|
std::string Msg = "instruction requires the following:";
|
|
for (unsigned i = 0, e = MissingFeatures.size(); i != e; ++i) {
|
|
if (MissingFeatures[i]) {
|
|
Msg += FirstFeature ? " " : ", ";
|
|
Msg += getSubtargetFeatureName(i);
|
|
FirstFeature = false;
|
|
}
|
|
}
|
|
return Error(IDLoc, Msg);
|
|
}
|
|
case Match_MnemonicFail: {
|
|
FeatureBitset FBS = ComputeAvailableFeatures(getSTI().getFeatureBits());
|
|
std::string Suggestion = RISCVMnemonicSpellCheck(
|
|
((RISCVOperand &)*Operands[0]).getToken(), FBS);
|
|
return Error(IDLoc, "unrecognized instruction mnemonic" + Suggestion);
|
|
}
|
|
case Match_InvalidOperand: {
|
|
SMLoc ErrorLoc = IDLoc;
|
|
if (ErrorInfo != ~0U) {
|
|
if (ErrorInfo >= Operands.size())
|
|
return Error(ErrorLoc, "too few operands for instruction");
|
|
|
|
ErrorLoc = ((RISCVOperand &)*Operands[ErrorInfo]).getStartLoc();
|
|
if (ErrorLoc == SMLoc())
|
|
ErrorLoc = IDLoc;
|
|
}
|
|
return Error(ErrorLoc, "invalid operand for instruction");
|
|
}
|
|
}
|
|
|
|
// Handle the case when the error message is of specific type
|
|
// other than the generic Match_InvalidOperand, and the
|
|
// corresponding operand is missing.
|
|
if (Result > FIRST_TARGET_MATCH_RESULT_TY) {
|
|
SMLoc ErrorLoc = IDLoc;
|
|
if (ErrorInfo != ~0U && ErrorInfo >= Operands.size())
|
|
return Error(ErrorLoc, "too few operands for instruction");
|
|
}
|
|
|
|
switch(Result) {
|
|
default:
|
|
break;
|
|
case Match_InvalidImmXLenLI:
|
|
if (isRV64()) {
|
|
SMLoc ErrorLoc = ((RISCVOperand &)*Operands[ErrorInfo]).getStartLoc();
|
|
return Error(ErrorLoc, "operand must be a constant 64-bit integer");
|
|
}
|
|
return generateImmOutOfRangeError(Operands, ErrorInfo,
|
|
std::numeric_limits<int32_t>::min(),
|
|
std::numeric_limits<uint32_t>::max());
|
|
case Match_InvalidImmZero: {
|
|
SMLoc ErrorLoc = ((RISCVOperand &)*Operands[ErrorInfo]).getStartLoc();
|
|
return Error(ErrorLoc, "immediate must be zero");
|
|
}
|
|
case Match_InvalidUImmLog2XLen:
|
|
if (isRV64())
|
|
return generateImmOutOfRangeError(Operands, ErrorInfo, 0, (1 << 6) - 1);
|
|
return generateImmOutOfRangeError(Operands, ErrorInfo, 0, (1 << 5) - 1);
|
|
case Match_InvalidUImmLog2XLenNonZero:
|
|
if (isRV64())
|
|
return generateImmOutOfRangeError(Operands, ErrorInfo, 1, (1 << 6) - 1);
|
|
return generateImmOutOfRangeError(Operands, ErrorInfo, 1, (1 << 5) - 1);
|
|
case Match_InvalidUImmLog2XLenHalf:
|
|
if (isRV64())
|
|
return generateImmOutOfRangeError(Operands, ErrorInfo, 0, (1 << 5) - 1);
|
|
return generateImmOutOfRangeError(Operands, ErrorInfo, 0, (1 << 4) - 1);
|
|
case Match_InvalidUImm5:
|
|
return generateImmOutOfRangeError(Operands, ErrorInfo, 0, (1 << 5) - 1);
|
|
case Match_InvalidSImm5:
|
|
return generateImmOutOfRangeError(Operands, ErrorInfo, -(1 << 4),
|
|
(1 << 4) - 1);
|
|
case Match_InvalidSImm6:
|
|
return generateImmOutOfRangeError(Operands, ErrorInfo, -(1 << 5),
|
|
(1 << 5) - 1);
|
|
case Match_InvalidSImm6NonZero:
|
|
return generateImmOutOfRangeError(
|
|
Operands, ErrorInfo, -(1 << 5), (1 << 5) - 1,
|
|
"immediate must be non-zero in the range");
|
|
case Match_InvalidCLUIImm:
|
|
return generateImmOutOfRangeError(
|
|
Operands, ErrorInfo, 1, (1 << 5) - 1,
|
|
"immediate must be in [0xfffe0, 0xfffff] or");
|
|
case Match_InvalidUImm7Lsb00:
|
|
return generateImmOutOfRangeError(
|
|
Operands, ErrorInfo, 0, (1 << 7) - 4,
|
|
"immediate must be a multiple of 4 bytes in the range");
|
|
case Match_InvalidUImm8Lsb00:
|
|
return generateImmOutOfRangeError(
|
|
Operands, ErrorInfo, 0, (1 << 8) - 4,
|
|
"immediate must be a multiple of 4 bytes in the range");
|
|
case Match_InvalidUImm8Lsb000:
|
|
return generateImmOutOfRangeError(
|
|
Operands, ErrorInfo, 0, (1 << 8) - 8,
|
|
"immediate must be a multiple of 8 bytes in the range");
|
|
case Match_InvalidSImm9Lsb0:
|
|
return generateImmOutOfRangeError(
|
|
Operands, ErrorInfo, -(1 << 8), (1 << 8) - 2,
|
|
"immediate must be a multiple of 2 bytes in the range");
|
|
case Match_InvalidUImm9Lsb000:
|
|
return generateImmOutOfRangeError(
|
|
Operands, ErrorInfo, 0, (1 << 9) - 8,
|
|
"immediate must be a multiple of 8 bytes in the range");
|
|
case Match_InvalidUImm10Lsb00NonZero:
|
|
return generateImmOutOfRangeError(
|
|
Operands, ErrorInfo, 4, (1 << 10) - 4,
|
|
"immediate must be a multiple of 4 bytes in the range");
|
|
case Match_InvalidSImm10Lsb0000NonZero:
|
|
return generateImmOutOfRangeError(
|
|
Operands, ErrorInfo, -(1 << 9), (1 << 9) - 16,
|
|
"immediate must be a multiple of 16 bytes and non-zero in the range");
|
|
case Match_InvalidSImm12:
|
|
return generateImmOutOfRangeError(
|
|
Operands, ErrorInfo, -(1 << 11), (1 << 11) - 1,
|
|
"operand must be a symbol with %lo/%pcrel_lo/%tprel_lo modifier or an "
|
|
"integer in the range");
|
|
case Match_InvalidSImm12Lsb0:
|
|
return generateImmOutOfRangeError(
|
|
Operands, ErrorInfo, -(1 << 11), (1 << 11) - 2,
|
|
"immediate must be a multiple of 2 bytes in the range");
|
|
case Match_InvalidSImm13Lsb0:
|
|
return generateImmOutOfRangeError(
|
|
Operands, ErrorInfo, -(1 << 12), (1 << 12) - 2,
|
|
"immediate must be a multiple of 2 bytes in the range");
|
|
case Match_InvalidUImm20LUI:
|
|
return generateImmOutOfRangeError(Operands, ErrorInfo, 0, (1 << 20) - 1,
|
|
"operand must be a symbol with "
|
|
"%hi/%tprel_hi modifier or an integer in "
|
|
"the range");
|
|
case Match_InvalidUImm20AUIPC:
|
|
return generateImmOutOfRangeError(
|
|
Operands, ErrorInfo, 0, (1 << 20) - 1,
|
|
"operand must be a symbol with a "
|
|
"%pcrel_hi/%got_pcrel_hi/%tls_ie_pcrel_hi/%tls_gd_pcrel_hi modifier or "
|
|
"an integer in the range");
|
|
case Match_InvalidSImm21Lsb0JAL:
|
|
return generateImmOutOfRangeError(
|
|
Operands, ErrorInfo, -(1 << 20), (1 << 20) - 2,
|
|
"immediate must be a multiple of 2 bytes in the range");
|
|
case Match_InvalidCSRSystemRegister: {
|
|
return generateImmOutOfRangeError(Operands, ErrorInfo, 0, (1 << 12) - 1,
|
|
"operand must be a valid system register "
|
|
"name or an integer in the range");
|
|
}
|
|
case Match_InvalidFenceArg: {
|
|
SMLoc ErrorLoc = ((RISCVOperand &)*Operands[ErrorInfo]).getStartLoc();
|
|
return Error(
|
|
ErrorLoc,
|
|
"operand must be formed of letters selected in-order from 'iorw'");
|
|
}
|
|
case Match_InvalidFRMArg: {
|
|
SMLoc ErrorLoc = ((RISCVOperand &)*Operands[ErrorInfo]).getStartLoc();
|
|
return Error(
|
|
ErrorLoc,
|
|
"operand must be a valid floating point rounding mode mnemonic");
|
|
}
|
|
case Match_InvalidBareSymbol: {
|
|
SMLoc ErrorLoc = ((RISCVOperand &)*Operands[ErrorInfo]).getStartLoc();
|
|
return Error(ErrorLoc, "operand must be a bare symbol name");
|
|
}
|
|
case Match_InvalidPseudoJumpSymbol: {
|
|
SMLoc ErrorLoc = ((RISCVOperand &)*Operands[ErrorInfo]).getStartLoc();
|
|
return Error(ErrorLoc, "operand must be a valid jump target");
|
|
}
|
|
case Match_InvalidCallSymbol: {
|
|
SMLoc ErrorLoc = ((RISCVOperand &)*Operands[ErrorInfo]).getStartLoc();
|
|
return Error(ErrorLoc, "operand must be a bare symbol name");
|
|
}
|
|
case Match_InvalidTPRelAddSymbol: {
|
|
SMLoc ErrorLoc = ((RISCVOperand &)*Operands[ErrorInfo]).getStartLoc();
|
|
return Error(ErrorLoc, "operand must be a symbol with %tprel_add modifier");
|
|
}
|
|
case Match_InvalidVTypeI: {
|
|
SMLoc ErrorLoc = ((RISCVOperand &)*Operands[ErrorInfo]).getStartLoc();
|
|
return Error(
|
|
ErrorLoc,
|
|
"operand must be "
|
|
"e[8|16|32|64|128|256|512|1024],m[1|2|4|8|f2|f4|f8],[ta|tu],[ma|mu]");
|
|
}
|
|
case Match_InvalidVMaskRegister: {
|
|
SMLoc ErrorLoc = ((RISCVOperand &)*Operands[ErrorInfo]).getStartLoc();
|
|
return Error(ErrorLoc, "operand must be v0.t");
|
|
}
|
|
case Match_InvalidSImm5Plus1: {
|
|
return generateImmOutOfRangeError(Operands, ErrorInfo, -(1 << 4) + 1,
|
|
(1 << 4),
|
|
"immediate must be in the range");
|
|
}
|
|
}
|
|
|
|
llvm_unreachable("Unknown match type detected!");
|
|
}
|
|
|
|
// Attempts to match Name as a register (either using the default name or
|
|
// alternative ABI names), setting RegNo to the matching register. Upon
|
|
// failure, returns true and sets RegNo to 0. If IsRV32E then registers
|
|
// x16-x31 will be rejected.
|
|
static bool matchRegisterNameHelper(bool IsRV32E, MCRegister &RegNo,
|
|
StringRef Name) {
|
|
RegNo = MatchRegisterName(Name);
|
|
// The 16-/32- and 64-bit FPRs have the same asm name. Check that the initial
|
|
// match always matches the 64-bit variant, and not the 16/32-bit one.
|
|
assert(!(RegNo >= RISCV::F0_H && RegNo <= RISCV::F31_H));
|
|
assert(!(RegNo >= RISCV::F0_F && RegNo <= RISCV::F31_F));
|
|
// The default FPR register class is based on the tablegen enum ordering.
|
|
static_assert(RISCV::F0_D < RISCV::F0_H, "FPR matching must be updated");
|
|
static_assert(RISCV::F0_D < RISCV::F0_F, "FPR matching must be updated");
|
|
if (RegNo == RISCV::NoRegister)
|
|
RegNo = MatchRegisterAltName(Name);
|
|
if (IsRV32E && RegNo >= RISCV::X16 && RegNo <= RISCV::X31)
|
|
RegNo = RISCV::NoRegister;
|
|
return RegNo == RISCV::NoRegister;
|
|
}
|
|
|
|
bool RISCVAsmParser::ParseRegister(unsigned &RegNo, SMLoc &StartLoc,
|
|
SMLoc &EndLoc) {
|
|
if (tryParseRegister(RegNo, StartLoc, EndLoc) != MatchOperand_Success)
|
|
return Error(StartLoc, "invalid register name");
|
|
return false;
|
|
}
|
|
|
|
OperandMatchResultTy RISCVAsmParser::tryParseRegister(unsigned &RegNo,
|
|
SMLoc &StartLoc,
|
|
SMLoc &EndLoc) {
|
|
const AsmToken &Tok = getParser().getTok();
|
|
StartLoc = Tok.getLoc();
|
|
EndLoc = Tok.getEndLoc();
|
|
RegNo = 0;
|
|
StringRef Name = getLexer().getTok().getIdentifier();
|
|
|
|
if (matchRegisterNameHelper(isRV32E(), (MCRegister &)RegNo, Name))
|
|
return MatchOperand_NoMatch;
|
|
|
|
getParser().Lex(); // Eat identifier token.
|
|
return MatchOperand_Success;
|
|
}
|
|
|
|
OperandMatchResultTy RISCVAsmParser::parseRegister(OperandVector &Operands,
|
|
bool AllowParens) {
|
|
SMLoc FirstS = getLoc();
|
|
bool HadParens = false;
|
|
AsmToken LParen;
|
|
|
|
// If this is an LParen and a parenthesised register name is allowed, parse it
|
|
// atomically.
|
|
if (AllowParens && getLexer().is(AsmToken::LParen)) {
|
|
AsmToken Buf[2];
|
|
size_t ReadCount = getLexer().peekTokens(Buf);
|
|
if (ReadCount == 2 && Buf[1].getKind() == AsmToken::RParen) {
|
|
HadParens = true;
|
|
LParen = getParser().getTok();
|
|
getParser().Lex(); // Eat '('
|
|
}
|
|
}
|
|
|
|
switch (getLexer().getKind()) {
|
|
default:
|
|
if (HadParens)
|
|
getLexer().UnLex(LParen);
|
|
return MatchOperand_NoMatch;
|
|
case AsmToken::Identifier:
|
|
StringRef Name = getLexer().getTok().getIdentifier();
|
|
MCRegister RegNo;
|
|
matchRegisterNameHelper(isRV32E(), RegNo, Name);
|
|
|
|
if (RegNo == RISCV::NoRegister) {
|
|
if (HadParens)
|
|
getLexer().UnLex(LParen);
|
|
return MatchOperand_NoMatch;
|
|
}
|
|
if (HadParens)
|
|
Operands.push_back(RISCVOperand::createToken("(", FirstS, isRV64()));
|
|
SMLoc S = getLoc();
|
|
SMLoc E = SMLoc::getFromPointer(S.getPointer() - 1);
|
|
getLexer().Lex();
|
|
Operands.push_back(RISCVOperand::createReg(RegNo, S, E, isRV64()));
|
|
}
|
|
|
|
if (HadParens) {
|
|
getParser().Lex(); // Eat ')'
|
|
Operands.push_back(RISCVOperand::createToken(")", getLoc(), isRV64()));
|
|
}
|
|
|
|
return MatchOperand_Success;
|
|
}
|
|
|
|
OperandMatchResultTy
|
|
RISCVAsmParser::parseCSRSystemRegister(OperandVector &Operands) {
|
|
SMLoc S = getLoc();
|
|
const MCExpr *Res;
|
|
|
|
switch (getLexer().getKind()) {
|
|
default:
|
|
return MatchOperand_NoMatch;
|
|
case AsmToken::LParen:
|
|
case AsmToken::Minus:
|
|
case AsmToken::Plus:
|
|
case AsmToken::Exclaim:
|
|
case AsmToken::Tilde:
|
|
case AsmToken::Integer:
|
|
case AsmToken::String: {
|
|
if (getParser().parseExpression(Res))
|
|
return MatchOperand_ParseFail;
|
|
|
|
auto *CE = dyn_cast<MCConstantExpr>(Res);
|
|
if (CE) {
|
|
int64_t Imm = CE->getValue();
|
|
if (isUInt<12>(Imm)) {
|
|
auto SysReg = RISCVSysReg::lookupSysRegByEncoding(Imm);
|
|
// Accept an immediate representing a named or un-named Sys Reg
|
|
// if the range is valid, regardless of the required features.
|
|
Operands.push_back(RISCVOperand::createSysReg(
|
|
SysReg ? SysReg->Name : "", S, Imm, isRV64()));
|
|
return MatchOperand_Success;
|
|
}
|
|
}
|
|
|
|
Twine Msg = "immediate must be an integer in the range";
|
|
Error(S, Msg + " [" + Twine(0) + ", " + Twine((1 << 12) - 1) + "]");
|
|
return MatchOperand_ParseFail;
|
|
}
|
|
case AsmToken::Identifier: {
|
|
StringRef Identifier;
|
|
if (getParser().parseIdentifier(Identifier))
|
|
return MatchOperand_ParseFail;
|
|
|
|
auto SysReg = RISCVSysReg::lookupSysRegByName(Identifier);
|
|
if (!SysReg)
|
|
SysReg = RISCVSysReg::lookupSysRegByAltName(Identifier);
|
|
// Accept a named Sys Reg if the required features are present.
|
|
if (SysReg) {
|
|
if (!SysReg->haveRequiredFeatures(getSTI().getFeatureBits())) {
|
|
Error(S, "system register use requires an option to be enabled");
|
|
return MatchOperand_ParseFail;
|
|
}
|
|
Operands.push_back(RISCVOperand::createSysReg(
|
|
Identifier, S, SysReg->Encoding, isRV64()));
|
|
return MatchOperand_Success;
|
|
}
|
|
|
|
Twine Msg = "operand must be a valid system register name "
|
|
"or an integer in the range";
|
|
Error(S, Msg + " [" + Twine(0) + ", " + Twine((1 << 12) - 1) + "]");
|
|
return MatchOperand_ParseFail;
|
|
}
|
|
case AsmToken::Percent: {
|
|
// Discard operand with modifier.
|
|
Twine Msg = "immediate must be an integer in the range";
|
|
Error(S, Msg + " [" + Twine(0) + ", " + Twine((1 << 12) - 1) + "]");
|
|
return MatchOperand_ParseFail;
|
|
}
|
|
}
|
|
|
|
return MatchOperand_NoMatch;
|
|
}
|
|
|
|
OperandMatchResultTy RISCVAsmParser::parseImmediate(OperandVector &Operands) {
|
|
SMLoc S = getLoc();
|
|
SMLoc E = SMLoc::getFromPointer(S.getPointer() - 1);
|
|
const MCExpr *Res;
|
|
|
|
switch (getLexer().getKind()) {
|
|
default:
|
|
return MatchOperand_NoMatch;
|
|
case AsmToken::LParen:
|
|
case AsmToken::Dot:
|
|
case AsmToken::Minus:
|
|
case AsmToken::Plus:
|
|
case AsmToken::Exclaim:
|
|
case AsmToken::Tilde:
|
|
case AsmToken::Integer:
|
|
case AsmToken::String:
|
|
case AsmToken::Identifier:
|
|
if (getParser().parseExpression(Res))
|
|
return MatchOperand_ParseFail;
|
|
break;
|
|
case AsmToken::Percent:
|
|
return parseOperandWithModifier(Operands);
|
|
}
|
|
|
|
Operands.push_back(RISCVOperand::createImm(Res, S, E, isRV64()));
|
|
return MatchOperand_Success;
|
|
}
|
|
|
|
OperandMatchResultTy
|
|
RISCVAsmParser::parseOperandWithModifier(OperandVector &Operands) {
|
|
SMLoc S = getLoc();
|
|
SMLoc E = SMLoc::getFromPointer(S.getPointer() - 1);
|
|
|
|
if (getLexer().getKind() != AsmToken::Percent) {
|
|
Error(getLoc(), "expected '%' for operand modifier");
|
|
return MatchOperand_ParseFail;
|
|
}
|
|
|
|
getParser().Lex(); // Eat '%'
|
|
|
|
if (getLexer().getKind() != AsmToken::Identifier) {
|
|
Error(getLoc(), "expected valid identifier for operand modifier");
|
|
return MatchOperand_ParseFail;
|
|
}
|
|
StringRef Identifier = getParser().getTok().getIdentifier();
|
|
RISCVMCExpr::VariantKind VK = RISCVMCExpr::getVariantKindForName(Identifier);
|
|
if (VK == RISCVMCExpr::VK_RISCV_Invalid) {
|
|
Error(getLoc(), "unrecognized operand modifier");
|
|
return MatchOperand_ParseFail;
|
|
}
|
|
|
|
getParser().Lex(); // Eat the identifier
|
|
if (getLexer().getKind() != AsmToken::LParen) {
|
|
Error(getLoc(), "expected '('");
|
|
return MatchOperand_ParseFail;
|
|
}
|
|
getParser().Lex(); // Eat '('
|
|
|
|
const MCExpr *SubExpr;
|
|
if (getParser().parseParenExpression(SubExpr, E)) {
|
|
return MatchOperand_ParseFail;
|
|
}
|
|
|
|
const MCExpr *ModExpr = RISCVMCExpr::create(SubExpr, VK, getContext());
|
|
Operands.push_back(RISCVOperand::createImm(ModExpr, S, E, isRV64()));
|
|
return MatchOperand_Success;
|
|
}
|
|
|
|
OperandMatchResultTy RISCVAsmParser::parseBareSymbol(OperandVector &Operands) {
|
|
SMLoc S = getLoc();
|
|
SMLoc E = SMLoc::getFromPointer(S.getPointer() - 1);
|
|
const MCExpr *Res;
|
|
|
|
if (getLexer().getKind() != AsmToken::Identifier)
|
|
return MatchOperand_NoMatch;
|
|
|
|
StringRef Identifier;
|
|
AsmToken Tok = getLexer().getTok();
|
|
|
|
if (getParser().parseIdentifier(Identifier))
|
|
return MatchOperand_ParseFail;
|
|
|
|
if (Identifier.consume_back("@plt")) {
|
|
Error(getLoc(), "'@plt' operand not valid for instruction");
|
|
return MatchOperand_ParseFail;
|
|
}
|
|
|
|
MCSymbol *Sym = getContext().getOrCreateSymbol(Identifier);
|
|
|
|
if (Sym->isVariable()) {
|
|
const MCExpr *V = Sym->getVariableValue(/*SetUsed=*/false);
|
|
if (!isa<MCSymbolRefExpr>(V)) {
|
|
getLexer().UnLex(Tok); // Put back if it's not a bare symbol.
|
|
return MatchOperand_NoMatch;
|
|
}
|
|
Res = V;
|
|
} else
|
|
Res = MCSymbolRefExpr::create(Sym, MCSymbolRefExpr::VK_None, getContext());
|
|
|
|
MCBinaryExpr::Opcode Opcode;
|
|
switch (getLexer().getKind()) {
|
|
default:
|
|
Operands.push_back(RISCVOperand::createImm(Res, S, E, isRV64()));
|
|
return MatchOperand_Success;
|
|
case AsmToken::Plus:
|
|
Opcode = MCBinaryExpr::Add;
|
|
break;
|
|
case AsmToken::Minus:
|
|
Opcode = MCBinaryExpr::Sub;
|
|
break;
|
|
}
|
|
|
|
const MCExpr *Expr;
|
|
if (getParser().parseExpression(Expr))
|
|
return MatchOperand_ParseFail;
|
|
Res = MCBinaryExpr::create(Opcode, Res, Expr, getContext());
|
|
Operands.push_back(RISCVOperand::createImm(Res, S, E, isRV64()));
|
|
return MatchOperand_Success;
|
|
}
|
|
|
|
OperandMatchResultTy RISCVAsmParser::parseCallSymbol(OperandVector &Operands) {
|
|
SMLoc S = getLoc();
|
|
SMLoc E = SMLoc::getFromPointer(S.getPointer() - 1);
|
|
const MCExpr *Res;
|
|
|
|
if (getLexer().getKind() != AsmToken::Identifier)
|
|
return MatchOperand_NoMatch;
|
|
|
|
// Avoid parsing the register in `call rd, foo` as a call symbol.
|
|
if (getLexer().peekTok().getKind() != AsmToken::EndOfStatement)
|
|
return MatchOperand_NoMatch;
|
|
|
|
StringRef Identifier;
|
|
if (getParser().parseIdentifier(Identifier))
|
|
return MatchOperand_ParseFail;
|
|
|
|
RISCVMCExpr::VariantKind Kind = RISCVMCExpr::VK_RISCV_CALL;
|
|
if (Identifier.consume_back("@plt"))
|
|
Kind = RISCVMCExpr::VK_RISCV_CALL_PLT;
|
|
|
|
MCSymbol *Sym = getContext().getOrCreateSymbol(Identifier);
|
|
Res = MCSymbolRefExpr::create(Sym, MCSymbolRefExpr::VK_None, getContext());
|
|
Res = RISCVMCExpr::create(Res, Kind, getContext());
|
|
Operands.push_back(RISCVOperand::createImm(Res, S, E, isRV64()));
|
|
return MatchOperand_Success;
|
|
}
|
|
|
|
OperandMatchResultTy
|
|
RISCVAsmParser::parsePseudoJumpSymbol(OperandVector &Operands) {
|
|
SMLoc S = getLoc();
|
|
SMLoc E = SMLoc::getFromPointer(S.getPointer() - 1);
|
|
const MCExpr *Res;
|
|
|
|
if (getParser().parseExpression(Res))
|
|
return MatchOperand_ParseFail;
|
|
|
|
if (Res->getKind() != MCExpr::ExprKind::SymbolRef ||
|
|
cast<MCSymbolRefExpr>(Res)->getKind() ==
|
|
MCSymbolRefExpr::VariantKind::VK_PLT) {
|
|
Error(S, "operand must be a valid jump target");
|
|
return MatchOperand_ParseFail;
|
|
}
|
|
|
|
Res = RISCVMCExpr::create(Res, RISCVMCExpr::VK_RISCV_CALL, getContext());
|
|
Operands.push_back(RISCVOperand::createImm(Res, S, E, isRV64()));
|
|
return MatchOperand_Success;
|
|
}
|
|
|
|
OperandMatchResultTy RISCVAsmParser::parseJALOffset(OperandVector &Operands) {
|
|
// Parsing jal operands is fiddly due to the `jal foo` and `jal ra, foo`
|
|
// both being acceptable forms. When parsing `jal ra, foo` this function
|
|
// will be called for the `ra` register operand in an attempt to match the
|
|
// single-operand alias. parseJALOffset must fail for this case. It would
|
|
// seem logical to try parse the operand using parseImmediate and return
|
|
// NoMatch if the next token is a comma (meaning we must be parsing a jal in
|
|
// the second form rather than the first). We can't do this as there's no
|
|
// way of rewinding the lexer state. Instead, return NoMatch if this operand
|
|
// is an identifier and is followed by a comma.
|
|
if (getLexer().is(AsmToken::Identifier) &&
|
|
getLexer().peekTok().is(AsmToken::Comma))
|
|
return MatchOperand_NoMatch;
|
|
|
|
return parseImmediate(Operands);
|
|
}
|
|
|
|
OperandMatchResultTy RISCVAsmParser::parseVTypeI(OperandVector &Operands) {
|
|
SMLoc S = getLoc();
|
|
if (getLexer().isNot(AsmToken::Identifier))
|
|
return MatchOperand_NoMatch;
|
|
|
|
SmallVector<AsmToken, 7> VTypeIElements;
|
|
// Put all the tokens for vtypei operand into VTypeIElements vector.
|
|
while (getLexer().isNot(AsmToken::EndOfStatement)) {
|
|
VTypeIElements.push_back(getLexer().getTok());
|
|
getLexer().Lex();
|
|
if (getLexer().is(AsmToken::EndOfStatement))
|
|
break;
|
|
if (getLexer().isNot(AsmToken::Comma))
|
|
goto MatchFail;
|
|
AsmToken Comma = getLexer().getTok();
|
|
VTypeIElements.push_back(Comma);
|
|
getLexer().Lex();
|
|
}
|
|
|
|
if (VTypeIElements.size() == 7) {
|
|
// The VTypeIElements layout is:
|
|
// SEW comma LMUL comma TA comma MA
|
|
// 0 1 2 3 4 5 6
|
|
StringRef Name = VTypeIElements[0].getIdentifier();
|
|
if (!Name.consume_front("e"))
|
|
goto MatchFail;
|
|
unsigned Sew;
|
|
if (Name.getAsInteger(10, Sew))
|
|
goto MatchFail;
|
|
if (!RISCVVType::isValidSEW(Sew))
|
|
goto MatchFail;
|
|
|
|
Name = VTypeIElements[2].getIdentifier();
|
|
if (!Name.consume_front("m"))
|
|
goto MatchFail;
|
|
// "m" or "mf"
|
|
bool Fractional = Name.consume_front("f");
|
|
unsigned Lmul;
|
|
if (Name.getAsInteger(10, Lmul))
|
|
goto MatchFail;
|
|
if (!RISCVVType::isValidLMUL(Lmul, Fractional))
|
|
goto MatchFail;
|
|
|
|
// ta or tu
|
|
Name = VTypeIElements[4].getIdentifier();
|
|
bool TailAgnostic;
|
|
if (Name == "ta")
|
|
TailAgnostic = true;
|
|
else if (Name == "tu")
|
|
TailAgnostic = false;
|
|
else
|
|
goto MatchFail;
|
|
|
|
// ma or mu
|
|
Name = VTypeIElements[6].getIdentifier();
|
|
bool MaskAgnostic;
|
|
if (Name == "ma")
|
|
MaskAgnostic = true;
|
|
else if (Name == "mu")
|
|
MaskAgnostic = false;
|
|
else
|
|
goto MatchFail;
|
|
|
|
unsigned SewLog2 = Log2_32(Sew / 8);
|
|
unsigned LmulLog2 = Log2_32(Lmul);
|
|
RISCVVSEW VSEW = static_cast<RISCVVSEW>(SewLog2);
|
|
RISCVVLMUL VLMUL =
|
|
static_cast<RISCVVLMUL>(Fractional ? 8 - LmulLog2 : LmulLog2);
|
|
|
|
unsigned VTypeI =
|
|
RISCVVType::encodeVTYPE(VLMUL, VSEW, TailAgnostic, MaskAgnostic);
|
|
Operands.push_back(RISCVOperand::createVType(VTypeI, S, isRV64()));
|
|
return MatchOperand_Success;
|
|
}
|
|
|
|
// If NoMatch, unlex all the tokens that comprise a vtypei operand
|
|
MatchFail:
|
|
while (!VTypeIElements.empty())
|
|
getLexer().UnLex(VTypeIElements.pop_back_val());
|
|
return MatchOperand_NoMatch;
|
|
}
|
|
|
|
OperandMatchResultTy RISCVAsmParser::parseMaskReg(OperandVector &Operands) {
|
|
switch (getLexer().getKind()) {
|
|
default:
|
|
return MatchOperand_NoMatch;
|
|
case AsmToken::Identifier:
|
|
StringRef Name = getLexer().getTok().getIdentifier();
|
|
if (!Name.consume_back(".t")) {
|
|
Error(getLoc(), "expected '.t' suffix");
|
|
return MatchOperand_ParseFail;
|
|
}
|
|
MCRegister RegNo;
|
|
matchRegisterNameHelper(isRV32E(), RegNo, Name);
|
|
|
|
if (RegNo == RISCV::NoRegister)
|
|
return MatchOperand_NoMatch;
|
|
if (RegNo != RISCV::V0)
|
|
return MatchOperand_NoMatch;
|
|
SMLoc S = getLoc();
|
|
SMLoc E = SMLoc::getFromPointer(S.getPointer() - 1);
|
|
getLexer().Lex();
|
|
Operands.push_back(RISCVOperand::createReg(RegNo, S, E, isRV64()));
|
|
}
|
|
|
|
return MatchOperand_Success;
|
|
}
|
|
|
|
OperandMatchResultTy
|
|
RISCVAsmParser::parseMemOpBaseReg(OperandVector &Operands) {
|
|
if (getLexer().isNot(AsmToken::LParen)) {
|
|
Error(getLoc(), "expected '('");
|
|
return MatchOperand_ParseFail;
|
|
}
|
|
|
|
getParser().Lex(); // Eat '('
|
|
Operands.push_back(RISCVOperand::createToken("(", getLoc(), isRV64()));
|
|
|
|
if (parseRegister(Operands) != MatchOperand_Success) {
|
|
Error(getLoc(), "expected register");
|
|
return MatchOperand_ParseFail;
|
|
}
|
|
|
|
if (getLexer().isNot(AsmToken::RParen)) {
|
|
Error(getLoc(), "expected ')'");
|
|
return MatchOperand_ParseFail;
|
|
}
|
|
|
|
getParser().Lex(); // Eat ')'
|
|
Operands.push_back(RISCVOperand::createToken(")", getLoc(), isRV64()));
|
|
|
|
return MatchOperand_Success;
|
|
}
|
|
|
|
OperandMatchResultTy RISCVAsmParser::parseAtomicMemOp(OperandVector &Operands) {
|
|
// Atomic operations such as lr.w, sc.w, and amo*.w accept a "memory operand"
|
|
// as one of their register operands, such as `(a0)`. This just denotes that
|
|
// the register (in this case `a0`) contains a memory address.
|
|
//
|
|
// Normally, we would be able to parse these by putting the parens into the
|
|
// instruction string. However, GNU as also accepts a zero-offset memory
|
|
// operand (such as `0(a0)`), and ignores the 0. Normally this would be parsed
|
|
// with parseImmediate followed by parseMemOpBaseReg, but these instructions
|
|
// do not accept an immediate operand, and we do not want to add a "dummy"
|
|
// operand that is silently dropped.
|
|
//
|
|
// Instead, we use this custom parser. This will: allow (and discard) an
|
|
// offset if it is zero; require (and discard) parentheses; and add only the
|
|
// parsed register operand to `Operands`.
|
|
//
|
|
// These operands are printed with RISCVInstPrinter::printAtomicMemOp, which
|
|
// will only print the register surrounded by parentheses (which GNU as also
|
|
// uses as its canonical representation for these operands).
|
|
std::unique_ptr<RISCVOperand> OptionalImmOp;
|
|
|
|
if (getLexer().isNot(AsmToken::LParen)) {
|
|
// Parse an Integer token. We do not accept arbritrary constant expressions
|
|
// in the offset field (because they may include parens, which complicates
|
|
// parsing a lot).
|
|
int64_t ImmVal;
|
|
SMLoc ImmStart = getLoc();
|
|
if (getParser().parseIntToken(ImmVal,
|
|
"expected '(' or optional integer offset"))
|
|
return MatchOperand_ParseFail;
|
|
|
|
// Create a RISCVOperand for checking later (so the error messages are
|
|
// nicer), but we don't add it to Operands.
|
|
SMLoc ImmEnd = getLoc();
|
|
OptionalImmOp =
|
|
RISCVOperand::createImm(MCConstantExpr::create(ImmVal, getContext()),
|
|
ImmStart, ImmEnd, isRV64());
|
|
}
|
|
|
|
if (getLexer().isNot(AsmToken::LParen)) {
|
|
Error(getLoc(), OptionalImmOp ? "expected '(' after optional integer offset"
|
|
: "expected '(' or optional integer offset");
|
|
return MatchOperand_ParseFail;
|
|
}
|
|
getParser().Lex(); // Eat '('
|
|
|
|
if (parseRegister(Operands) != MatchOperand_Success) {
|
|
Error(getLoc(), "expected register");
|
|
return MatchOperand_ParseFail;
|
|
}
|
|
|
|
if (getLexer().isNot(AsmToken::RParen)) {
|
|
Error(getLoc(), "expected ')'");
|
|
return MatchOperand_ParseFail;
|
|
}
|
|
getParser().Lex(); // Eat ')'
|
|
|
|
// Deferred Handling of non-zero offsets. This makes the error messages nicer.
|
|
if (OptionalImmOp && !OptionalImmOp->isImmZero()) {
|
|
Error(OptionalImmOp->getStartLoc(), "optional integer offset must be 0",
|
|
SMRange(OptionalImmOp->getStartLoc(), OptionalImmOp->getEndLoc()));
|
|
return MatchOperand_ParseFail;
|
|
}
|
|
|
|
return MatchOperand_Success;
|
|
}
|
|
|
|
/// Looks at a token type and creates the relevant operand from this
|
|
/// information, adding to Operands. If operand was parsed, returns false, else
|
|
/// true.
|
|
bool RISCVAsmParser::parseOperand(OperandVector &Operands, StringRef Mnemonic) {
|
|
// Check if the current operand has a custom associated parser, if so, try to
|
|
// custom parse the operand, or fallback to the general approach.
|
|
OperandMatchResultTy Result =
|
|
MatchOperandParserImpl(Operands, Mnemonic, /*ParseForAllFeatures=*/true);
|
|
if (Result == MatchOperand_Success)
|
|
return false;
|
|
if (Result == MatchOperand_ParseFail)
|
|
return true;
|
|
|
|
// Attempt to parse token as a register.
|
|
if (parseRegister(Operands, true) == MatchOperand_Success)
|
|
return false;
|
|
|
|
// Attempt to parse token as an immediate
|
|
if (parseImmediate(Operands) == MatchOperand_Success) {
|
|
// Parse memory base register if present
|
|
if (getLexer().is(AsmToken::LParen))
|
|
return parseMemOpBaseReg(Operands) != MatchOperand_Success;
|
|
return false;
|
|
}
|
|
|
|
// Finally we have exhausted all options and must declare defeat.
|
|
Error(getLoc(), "unknown operand");
|
|
return true;
|
|
}
|
|
|
|
bool RISCVAsmParser::ParseInstruction(ParseInstructionInfo &Info,
|
|
StringRef Name, SMLoc NameLoc,
|
|
OperandVector &Operands) {
|
|
// Ensure that if the instruction occurs when relaxation is enabled,
|
|
// relocations are forced for the file. Ideally this would be done when there
|
|
// is enough information to reliably determine if the instruction itself may
|
|
// cause relaxations. Unfortunately instruction processing stage occurs in the
|
|
// same pass as relocation emission, so it's too late to set a 'sticky bit'
|
|
// for the entire file.
|
|
if (getSTI().getFeatureBits()[RISCV::FeatureRelax]) {
|
|
auto *Assembler = getTargetStreamer().getStreamer().getAssemblerPtr();
|
|
if (Assembler != nullptr) {
|
|
RISCVAsmBackend &MAB =
|
|
static_cast<RISCVAsmBackend &>(Assembler->getBackend());
|
|
MAB.setForceRelocs();
|
|
}
|
|
}
|
|
|
|
// First operand is token for instruction
|
|
Operands.push_back(RISCVOperand::createToken(Name, NameLoc, isRV64()));
|
|
|
|
// If there are no more operands, then finish
|
|
if (getLexer().is(AsmToken::EndOfStatement))
|
|
return false;
|
|
|
|
// Parse first operand
|
|
if (parseOperand(Operands, Name))
|
|
return true;
|
|
|
|
// Parse until end of statement, consuming commas between operands
|
|
unsigned OperandIdx = 1;
|
|
while (getLexer().is(AsmToken::Comma)) {
|
|
// Consume comma token
|
|
getLexer().Lex();
|
|
|
|
// Parse next operand
|
|
if (parseOperand(Operands, Name))
|
|
return true;
|
|
|
|
++OperandIdx;
|
|
}
|
|
|
|
if (getLexer().isNot(AsmToken::EndOfStatement)) {
|
|
SMLoc Loc = getLexer().getLoc();
|
|
getParser().eatToEndOfStatement();
|
|
return Error(Loc, "unexpected token");
|
|
}
|
|
|
|
getParser().Lex(); // Consume the EndOfStatement.
|
|
return false;
|
|
}
|
|
|
|
bool RISCVAsmParser::classifySymbolRef(const MCExpr *Expr,
|
|
RISCVMCExpr::VariantKind &Kind) {
|
|
Kind = RISCVMCExpr::VK_RISCV_None;
|
|
|
|
if (const RISCVMCExpr *RE = dyn_cast<RISCVMCExpr>(Expr)) {
|
|
Kind = RE->getKind();
|
|
Expr = RE->getSubExpr();
|
|
}
|
|
|
|
MCValue Res;
|
|
MCFixup Fixup;
|
|
if (Expr->evaluateAsRelocatable(Res, nullptr, &Fixup))
|
|
return Res.getRefKind() == RISCVMCExpr::VK_RISCV_None;
|
|
return false;
|
|
}
|
|
|
|
bool RISCVAsmParser::ParseDirective(AsmToken DirectiveID) {
|
|
// This returns false if this function recognizes the directive
|
|
// regardless of whether it is successfully handles or reports an
|
|
// error. Otherwise it returns true to give the generic parser a
|
|
// chance at recognizing it.
|
|
StringRef IDVal = DirectiveID.getString();
|
|
|
|
if (IDVal == ".option")
|
|
return parseDirectiveOption();
|
|
else if (IDVal == ".attribute")
|
|
return parseDirectiveAttribute();
|
|
|
|
return true;
|
|
}
|
|
|
|
bool RISCVAsmParser::parseDirectiveOption() {
|
|
MCAsmParser &Parser = getParser();
|
|
// Get the option token.
|
|
AsmToken Tok = Parser.getTok();
|
|
// At the moment only identifiers are supported.
|
|
if (Tok.isNot(AsmToken::Identifier))
|
|
return Error(Parser.getTok().getLoc(),
|
|
"unexpected token, expected identifier");
|
|
|
|
StringRef Option = Tok.getIdentifier();
|
|
|
|
if (Option == "push") {
|
|
getTargetStreamer().emitDirectiveOptionPush();
|
|
|
|
Parser.Lex();
|
|
if (Parser.getTok().isNot(AsmToken::EndOfStatement))
|
|
return Error(Parser.getTok().getLoc(),
|
|
"unexpected token, expected end of statement");
|
|
|
|
pushFeatureBits();
|
|
return false;
|
|
}
|
|
|
|
if (Option == "pop") {
|
|
SMLoc StartLoc = Parser.getTok().getLoc();
|
|
getTargetStreamer().emitDirectiveOptionPop();
|
|
|
|
Parser.Lex();
|
|
if (Parser.getTok().isNot(AsmToken::EndOfStatement))
|
|
return Error(Parser.getTok().getLoc(),
|
|
"unexpected token, expected end of statement");
|
|
|
|
if (popFeatureBits())
|
|
return Error(StartLoc, ".option pop with no .option push");
|
|
|
|
return false;
|
|
}
|
|
|
|
if (Option == "rvc") {
|
|
getTargetStreamer().emitDirectiveOptionRVC();
|
|
|
|
Parser.Lex();
|
|
if (Parser.getTok().isNot(AsmToken::EndOfStatement))
|
|
return Error(Parser.getTok().getLoc(),
|
|
"unexpected token, expected end of statement");
|
|
|
|
setFeatureBits(RISCV::FeatureStdExtC, "c");
|
|
return false;
|
|
}
|
|
|
|
if (Option == "norvc") {
|
|
getTargetStreamer().emitDirectiveOptionNoRVC();
|
|
|
|
Parser.Lex();
|
|
if (Parser.getTok().isNot(AsmToken::EndOfStatement))
|
|
return Error(Parser.getTok().getLoc(),
|
|
"unexpected token, expected end of statement");
|
|
|
|
clearFeatureBits(RISCV::FeatureStdExtC, "c");
|
|
return false;
|
|
}
|
|
|
|
if (Option == "pic") {
|
|
getTargetStreamer().emitDirectiveOptionPIC();
|
|
|
|
Parser.Lex();
|
|
if (Parser.getTok().isNot(AsmToken::EndOfStatement))
|
|
return Error(Parser.getTok().getLoc(),
|
|
"unexpected token, expected end of statement");
|
|
|
|
ParserOptions.IsPicEnabled = true;
|
|
return false;
|
|
}
|
|
|
|
if (Option == "nopic") {
|
|
getTargetStreamer().emitDirectiveOptionNoPIC();
|
|
|
|
Parser.Lex();
|
|
if (Parser.getTok().isNot(AsmToken::EndOfStatement))
|
|
return Error(Parser.getTok().getLoc(),
|
|
"unexpected token, expected end of statement");
|
|
|
|
ParserOptions.IsPicEnabled = false;
|
|
return false;
|
|
}
|
|
|
|
if (Option == "relax") {
|
|
getTargetStreamer().emitDirectiveOptionRelax();
|
|
|
|
Parser.Lex();
|
|
if (Parser.getTok().isNot(AsmToken::EndOfStatement))
|
|
return Error(Parser.getTok().getLoc(),
|
|
"unexpected token, expected end of statement");
|
|
|
|
setFeatureBits(RISCV::FeatureRelax, "relax");
|
|
return false;
|
|
}
|
|
|
|
if (Option == "norelax") {
|
|
getTargetStreamer().emitDirectiveOptionNoRelax();
|
|
|
|
Parser.Lex();
|
|
if (Parser.getTok().isNot(AsmToken::EndOfStatement))
|
|
return Error(Parser.getTok().getLoc(),
|
|
"unexpected token, expected end of statement");
|
|
|
|
clearFeatureBits(RISCV::FeatureRelax, "relax");
|
|
return false;
|
|
}
|
|
|
|
// Unknown option.
|
|
Warning(Parser.getTok().getLoc(),
|
|
"unknown option, expected 'push', 'pop', 'rvc', 'norvc', 'relax' or "
|
|
"'norelax'");
|
|
Parser.eatToEndOfStatement();
|
|
return false;
|
|
}
|
|
|
|
/// parseDirectiveAttribute
|
|
/// ::= .attribute expression ',' ( expression | "string" )
|
|
/// ::= .attribute identifier ',' ( expression | "string" )
|
|
bool RISCVAsmParser::parseDirectiveAttribute() {
|
|
MCAsmParser &Parser = getParser();
|
|
int64_t Tag;
|
|
SMLoc TagLoc;
|
|
TagLoc = Parser.getTok().getLoc();
|
|
if (Parser.getTok().is(AsmToken::Identifier)) {
|
|
StringRef Name = Parser.getTok().getIdentifier();
|
|
Optional<unsigned> Ret =
|
|
ELFAttrs::attrTypeFromString(Name, RISCVAttrs::RISCVAttributeTags);
|
|
if (!Ret.hasValue()) {
|
|
Error(TagLoc, "attribute name not recognised: " + Name);
|
|
return false;
|
|
}
|
|
Tag = Ret.getValue();
|
|
Parser.Lex();
|
|
} else {
|
|
const MCExpr *AttrExpr;
|
|
|
|
TagLoc = Parser.getTok().getLoc();
|
|
if (Parser.parseExpression(AttrExpr))
|
|
return true;
|
|
|
|
const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(AttrExpr);
|
|
if (check(!CE, TagLoc, "expected numeric constant"))
|
|
return true;
|
|
|
|
Tag = CE->getValue();
|
|
}
|
|
|
|
if (Parser.parseToken(AsmToken::Comma, "comma expected"))
|
|
return true;
|
|
|
|
StringRef StringValue;
|
|
int64_t IntegerValue = 0;
|
|
bool IsIntegerValue = true;
|
|
|
|
// RISC-V attributes have a string value if the tag number is odd
|
|
// and an integer value if the tag number is even.
|
|
if (Tag % 2)
|
|
IsIntegerValue = false;
|
|
|
|
SMLoc ValueExprLoc = Parser.getTok().getLoc();
|
|
if (IsIntegerValue) {
|
|
const MCExpr *ValueExpr;
|
|
if (Parser.parseExpression(ValueExpr))
|
|
return true;
|
|
|
|
const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ValueExpr);
|
|
if (!CE)
|
|
return Error(ValueExprLoc, "expected numeric constant");
|
|
IntegerValue = CE->getValue();
|
|
} else {
|
|
if (Parser.getTok().isNot(AsmToken::String))
|
|
return Error(Parser.getTok().getLoc(), "expected string constant");
|
|
|
|
StringValue = Parser.getTok().getStringContents();
|
|
Parser.Lex();
|
|
}
|
|
|
|
if (Parser.parseToken(AsmToken::EndOfStatement,
|
|
"unexpected token in '.attribute' directive"))
|
|
return true;
|
|
|
|
if (Tag == RISCVAttrs::ARCH) {
|
|
StringRef Arch = StringValue;
|
|
if (Arch.consume_front("rv32"))
|
|
clearFeatureBits(RISCV::Feature64Bit, "64bit");
|
|
else if (Arch.consume_front("rv64"))
|
|
setFeatureBits(RISCV::Feature64Bit, "64bit");
|
|
else
|
|
return Error(ValueExprLoc, "bad arch string " + Arch);
|
|
|
|
// .attribute arch overrides the current architecture, so unset all
|
|
// currently enabled extensions
|
|
clearFeatureBits(RISCV::FeatureRV32E, "e");
|
|
clearFeatureBits(RISCV::FeatureStdExtM, "m");
|
|
clearFeatureBits(RISCV::FeatureStdExtA, "a");
|
|
clearFeatureBits(RISCV::FeatureStdExtF, "f");
|
|
clearFeatureBits(RISCV::FeatureStdExtD, "d");
|
|
clearFeatureBits(RISCV::FeatureStdExtC, "c");
|
|
clearFeatureBits(RISCV::FeatureStdExtB, "experimental-b");
|
|
clearFeatureBits(RISCV::FeatureStdExtV, "experimental-v");
|
|
clearFeatureBits(RISCV::FeatureExtZfh, "experimental-zfh");
|
|
clearFeatureBits(RISCV::FeatureExtZba, "experimental-zba");
|
|
clearFeatureBits(RISCV::FeatureExtZbb, "experimental-zbb");
|
|
clearFeatureBits(RISCV::FeatureExtZbc, "experimental-zbc");
|
|
clearFeatureBits(RISCV::FeatureExtZbe, "experimental-zbe");
|
|
clearFeatureBits(RISCV::FeatureExtZbf, "experimental-zbf");
|
|
clearFeatureBits(RISCV::FeatureExtZbm, "experimental-zbm");
|
|
clearFeatureBits(RISCV::FeatureExtZbp, "experimental-zbp");
|
|
clearFeatureBits(RISCV::FeatureExtZbproposedc, "experimental-zbproposedc");
|
|
clearFeatureBits(RISCV::FeatureExtZbr, "experimental-zbr");
|
|
clearFeatureBits(RISCV::FeatureExtZbs, "experimental-zbs");
|
|
clearFeatureBits(RISCV::FeatureExtZbt, "experimental-zbt");
|
|
clearFeatureBits(RISCV::FeatureExtZvamo, "experimental-zvamo");
|
|
clearFeatureBits(RISCV::FeatureStdExtZvlsseg, "experimental-zvlsseg");
|
|
|
|
while (!Arch.empty()) {
|
|
bool DropFirst = true;
|
|
if (Arch[0] == 'i')
|
|
clearFeatureBits(RISCV::FeatureRV32E, "e");
|
|
else if (Arch[0] == 'e')
|
|
setFeatureBits(RISCV::FeatureRV32E, "e");
|
|
else if (Arch[0] == 'g') {
|
|
clearFeatureBits(RISCV::FeatureRV32E, "e");
|
|
setFeatureBits(RISCV::FeatureStdExtM, "m");
|
|
setFeatureBits(RISCV::FeatureStdExtA, "a");
|
|
setFeatureBits(RISCV::FeatureStdExtF, "f");
|
|
setFeatureBits(RISCV::FeatureStdExtD, "d");
|
|
} else if (Arch[0] == 'm')
|
|
setFeatureBits(RISCV::FeatureStdExtM, "m");
|
|
else if (Arch[0] == 'a')
|
|
setFeatureBits(RISCV::FeatureStdExtA, "a");
|
|
else if (Arch[0] == 'f')
|
|
setFeatureBits(RISCV::FeatureStdExtF, "f");
|
|
else if (Arch[0] == 'd') {
|
|
setFeatureBits(RISCV::FeatureStdExtF, "f");
|
|
setFeatureBits(RISCV::FeatureStdExtD, "d");
|
|
} else if (Arch[0] == 'c') {
|
|
setFeatureBits(RISCV::FeatureStdExtC, "c");
|
|
} else if (Arch[0] == 'b') {
|
|
setFeatureBits(RISCV::FeatureStdExtB, "experimental-b");
|
|
} else if (Arch[0] == 'v') {
|
|
setFeatureBits(RISCV::FeatureStdExtV, "experimental-v");
|
|
} else if (Arch[0] == 's' || Arch[0] == 'x' || Arch[0] == 'z') {
|
|
StringRef Ext =
|
|
Arch.take_until([](char c) { return ::isdigit(c) || c == '_'; });
|
|
if (Ext == "zba")
|
|
setFeatureBits(RISCV::FeatureExtZba, "experimental-zba");
|
|
else if (Ext == "zbb")
|
|
setFeatureBits(RISCV::FeatureExtZbb, "experimental-zbb");
|
|
else if (Ext == "zbc")
|
|
setFeatureBits(RISCV::FeatureExtZbc, "experimental-zbc");
|
|
else if (Ext == "zbe")
|
|
setFeatureBits(RISCV::FeatureExtZbe, "experimental-zbe");
|
|
else if (Ext == "zbf")
|
|
setFeatureBits(RISCV::FeatureExtZbf, "experimental-zbf");
|
|
else if (Ext == "zbm")
|
|
setFeatureBits(RISCV::FeatureExtZbm, "experimental-zbm");
|
|
else if (Ext == "zbp")
|
|
setFeatureBits(RISCV::FeatureExtZbp, "experimental-zbp");
|
|
else if (Ext == "zbproposedc")
|
|
setFeatureBits(RISCV::FeatureExtZbproposedc,
|
|
"experimental-zbproposedc");
|
|
else if (Ext == "zbr")
|
|
setFeatureBits(RISCV::FeatureExtZbr, "experimental-zbr");
|
|
else if (Ext == "zbs")
|
|
setFeatureBits(RISCV::FeatureExtZbs, "experimental-zbs");
|
|
else if (Ext == "zbt")
|
|
setFeatureBits(RISCV::FeatureExtZbt, "experimental-zbt");
|
|
else if (Ext == "zfh")
|
|
setFeatureBits(RISCV::FeatureExtZfh, "experimental-zfh");
|
|
else if (Ext == "zvamo")
|
|
setFeatureBits(RISCV::FeatureExtZvamo, "experimental-zvamo");
|
|
else if (Ext == "zvlsseg")
|
|
setFeatureBits(RISCV::FeatureStdExtZvlsseg, "experimental-zvlsseg");
|
|
else
|
|
return Error(ValueExprLoc, "bad arch string " + Ext);
|
|
Arch = Arch.drop_until([](char c) { return ::isdigit(c) || c == '_'; });
|
|
DropFirst = false;
|
|
} else
|
|
return Error(ValueExprLoc, "bad arch string " + Arch);
|
|
|
|
if (DropFirst)
|
|
Arch = Arch.drop_front(1);
|
|
int major = 0;
|
|
int minor = 0;
|
|
Arch.consumeInteger(10, major);
|
|
Arch.consume_front("p");
|
|
Arch.consumeInteger(10, minor);
|
|
Arch = Arch.drop_while([](char c) { return c == '_'; });
|
|
}
|
|
}
|
|
|
|
if (IsIntegerValue)
|
|
getTargetStreamer().emitAttribute(Tag, IntegerValue);
|
|
else {
|
|
if (Tag != RISCVAttrs::ARCH) {
|
|
getTargetStreamer().emitTextAttribute(Tag, StringValue);
|
|
} else {
|
|
std::string formalArchStr = "rv32";
|
|
if (getFeatureBits(RISCV::Feature64Bit))
|
|
formalArchStr = "rv64";
|
|
if (getFeatureBits(RISCV::FeatureRV32E))
|
|
formalArchStr = (Twine(formalArchStr) + "e1p9").str();
|
|
else
|
|
formalArchStr = (Twine(formalArchStr) + "i2p0").str();
|
|
|
|
if (getFeatureBits(RISCV::FeatureStdExtM))
|
|
formalArchStr = (Twine(formalArchStr) + "_m2p0").str();
|
|
if (getFeatureBits(RISCV::FeatureStdExtA))
|
|
formalArchStr = (Twine(formalArchStr) + "_a2p0").str();
|
|
if (getFeatureBits(RISCV::FeatureStdExtF))
|
|
formalArchStr = (Twine(formalArchStr) + "_f2p0").str();
|
|
if (getFeatureBits(RISCV::FeatureStdExtD))
|
|
formalArchStr = (Twine(formalArchStr) + "_d2p0").str();
|
|
if (getFeatureBits(RISCV::FeatureStdExtC))
|
|
formalArchStr = (Twine(formalArchStr) + "_c2p0").str();
|
|
if (getFeatureBits(RISCV::FeatureStdExtB))
|
|
formalArchStr = (Twine(formalArchStr) + "_b0p93").str();
|
|
if (getFeatureBits(RISCV::FeatureStdExtV))
|
|
formalArchStr = (Twine(formalArchStr) + "_v0p10").str();
|
|
if (getFeatureBits(RISCV::FeatureExtZfh))
|
|
formalArchStr = (Twine(formalArchStr) + "_zfh0p1").str();
|
|
if (getFeatureBits(RISCV::FeatureExtZba))
|
|
formalArchStr = (Twine(formalArchStr) + "_zba0p93").str();
|
|
if (getFeatureBits(RISCV::FeatureExtZbb))
|
|
formalArchStr = (Twine(formalArchStr) + "_zbb0p93").str();
|
|
if (getFeatureBits(RISCV::FeatureExtZbc))
|
|
formalArchStr = (Twine(formalArchStr) + "_zbc0p93").str();
|
|
if (getFeatureBits(RISCV::FeatureExtZbe))
|
|
formalArchStr = (Twine(formalArchStr) + "_zbe0p93").str();
|
|
if (getFeatureBits(RISCV::FeatureExtZbf))
|
|
formalArchStr = (Twine(formalArchStr) + "_zbf0p93").str();
|
|
if (getFeatureBits(RISCV::FeatureExtZbm))
|
|
formalArchStr = (Twine(formalArchStr) + "_zbm0p93").str();
|
|
if (getFeatureBits(RISCV::FeatureExtZbp))
|
|
formalArchStr = (Twine(formalArchStr) + "_zbp0p93").str();
|
|
if (getFeatureBits(RISCV::FeatureExtZbproposedc))
|
|
formalArchStr = (Twine(formalArchStr) + "_zbproposedc0p93").str();
|
|
if (getFeatureBits(RISCV::FeatureExtZbr))
|
|
formalArchStr = (Twine(formalArchStr) + "_zbr0p93").str();
|
|
if (getFeatureBits(RISCV::FeatureExtZbs))
|
|
formalArchStr = (Twine(formalArchStr) + "_zbs0p93").str();
|
|
if (getFeatureBits(RISCV::FeatureExtZbt))
|
|
formalArchStr = (Twine(formalArchStr) + "_zbt0p93").str();
|
|
if (getFeatureBits(RISCV::FeatureExtZvamo))
|
|
formalArchStr = (Twine(formalArchStr) + "_zvamo0p10").str();
|
|
if (getFeatureBits(RISCV::FeatureStdExtZvlsseg))
|
|
formalArchStr = (Twine(formalArchStr) + "_zvlsseg0p10").str();
|
|
|
|
getTargetStreamer().emitTextAttribute(Tag, formalArchStr);
|
|
}
|
|
}
|
|
|
|
return false;
|
|
}
|
|
|
|
void RISCVAsmParser::emitToStreamer(MCStreamer &S, const MCInst &Inst) {
|
|
MCInst CInst;
|
|
bool Res = compressInst(CInst, Inst, getSTI(), S.getContext());
|
|
if (Res)
|
|
++RISCVNumInstrsCompressed;
|
|
S.emitInstruction((Res ? CInst : Inst), getSTI());
|
|
}
|
|
|
|
void RISCVAsmParser::emitLoadImm(MCRegister DestReg, int64_t Value,
|
|
MCStreamer &Out) {
|
|
RISCVMatInt::InstSeq Seq = RISCVMatInt::generateInstSeq(Value, isRV64());
|
|
|
|
MCRegister SrcReg = RISCV::X0;
|
|
for (RISCVMatInt::Inst &Inst : Seq) {
|
|
if (Inst.Opc == RISCV::LUI) {
|
|
emitToStreamer(
|
|
Out, MCInstBuilder(RISCV::LUI).addReg(DestReg).addImm(Inst.Imm));
|
|
} else {
|
|
emitToStreamer(
|
|
Out, MCInstBuilder(Inst.Opc).addReg(DestReg).addReg(SrcReg).addImm(
|
|
Inst.Imm));
|
|
}
|
|
|
|
// Only the first instruction has X0 as its source.
|
|
SrcReg = DestReg;
|
|
}
|
|
}
|
|
|
|
void RISCVAsmParser::emitAuipcInstPair(MCOperand DestReg, MCOperand TmpReg,
|
|
const MCExpr *Symbol,
|
|
RISCVMCExpr::VariantKind VKHi,
|
|
unsigned SecondOpcode, SMLoc IDLoc,
|
|
MCStreamer &Out) {
|
|
// A pair of instructions for PC-relative addressing; expands to
|
|
// TmpLabel: AUIPC TmpReg, VKHi(symbol)
|
|
// OP DestReg, TmpReg, %pcrel_lo(TmpLabel)
|
|
MCContext &Ctx = getContext();
|
|
|
|
MCSymbol *TmpLabel = Ctx.createNamedTempSymbol("pcrel_hi");
|
|
Out.emitLabel(TmpLabel);
|
|
|
|
const RISCVMCExpr *SymbolHi = RISCVMCExpr::create(Symbol, VKHi, Ctx);
|
|
emitToStreamer(
|
|
Out, MCInstBuilder(RISCV::AUIPC).addOperand(TmpReg).addExpr(SymbolHi));
|
|
|
|
const MCExpr *RefToLinkTmpLabel =
|
|
RISCVMCExpr::create(MCSymbolRefExpr::create(TmpLabel, Ctx),
|
|
RISCVMCExpr::VK_RISCV_PCREL_LO, Ctx);
|
|
|
|
emitToStreamer(Out, MCInstBuilder(SecondOpcode)
|
|
.addOperand(DestReg)
|
|
.addOperand(TmpReg)
|
|
.addExpr(RefToLinkTmpLabel));
|
|
}
|
|
|
|
void RISCVAsmParser::emitLoadLocalAddress(MCInst &Inst, SMLoc IDLoc,
|
|
MCStreamer &Out) {
|
|
// The load local address pseudo-instruction "lla" is used in PC-relative
|
|
// addressing of local symbols:
|
|
// lla rdest, symbol
|
|
// expands to
|
|
// TmpLabel: AUIPC rdest, %pcrel_hi(symbol)
|
|
// ADDI rdest, rdest, %pcrel_lo(TmpLabel)
|
|
MCOperand DestReg = Inst.getOperand(0);
|
|
const MCExpr *Symbol = Inst.getOperand(1).getExpr();
|
|
emitAuipcInstPair(DestReg, DestReg, Symbol, RISCVMCExpr::VK_RISCV_PCREL_HI,
|
|
RISCV::ADDI, IDLoc, Out);
|
|
}
|
|
|
|
void RISCVAsmParser::emitLoadAddress(MCInst &Inst, SMLoc IDLoc,
|
|
MCStreamer &Out) {
|
|
// The load address pseudo-instruction "la" is used in PC-relative and
|
|
// GOT-indirect addressing of global symbols:
|
|
// la rdest, symbol
|
|
// expands to either (for non-PIC)
|
|
// TmpLabel: AUIPC rdest, %pcrel_hi(symbol)
|
|
// ADDI rdest, rdest, %pcrel_lo(TmpLabel)
|
|
// or (for PIC)
|
|
// TmpLabel: AUIPC rdest, %got_pcrel_hi(symbol)
|
|
// Lx rdest, %pcrel_lo(TmpLabel)(rdest)
|
|
MCOperand DestReg = Inst.getOperand(0);
|
|
const MCExpr *Symbol = Inst.getOperand(1).getExpr();
|
|
unsigned SecondOpcode;
|
|
RISCVMCExpr::VariantKind VKHi;
|
|
if (ParserOptions.IsPicEnabled) {
|
|
SecondOpcode = isRV64() ? RISCV::LD : RISCV::LW;
|
|
VKHi = RISCVMCExpr::VK_RISCV_GOT_HI;
|
|
} else {
|
|
SecondOpcode = RISCV::ADDI;
|
|
VKHi = RISCVMCExpr::VK_RISCV_PCREL_HI;
|
|
}
|
|
emitAuipcInstPair(DestReg, DestReg, Symbol, VKHi, SecondOpcode, IDLoc, Out);
|
|
}
|
|
|
|
void RISCVAsmParser::emitLoadTLSIEAddress(MCInst &Inst, SMLoc IDLoc,
|
|
MCStreamer &Out) {
|
|
// The load TLS IE address pseudo-instruction "la.tls.ie" is used in
|
|
// initial-exec TLS model addressing of global symbols:
|
|
// la.tls.ie rdest, symbol
|
|
// expands to
|
|
// TmpLabel: AUIPC rdest, %tls_ie_pcrel_hi(symbol)
|
|
// Lx rdest, %pcrel_lo(TmpLabel)(rdest)
|
|
MCOperand DestReg = Inst.getOperand(0);
|
|
const MCExpr *Symbol = Inst.getOperand(1).getExpr();
|
|
unsigned SecondOpcode = isRV64() ? RISCV::LD : RISCV::LW;
|
|
emitAuipcInstPair(DestReg, DestReg, Symbol, RISCVMCExpr::VK_RISCV_TLS_GOT_HI,
|
|
SecondOpcode, IDLoc, Out);
|
|
}
|
|
|
|
void RISCVAsmParser::emitLoadTLSGDAddress(MCInst &Inst, SMLoc IDLoc,
|
|
MCStreamer &Out) {
|
|
// The load TLS GD address pseudo-instruction "la.tls.gd" is used in
|
|
// global-dynamic TLS model addressing of global symbols:
|
|
// la.tls.gd rdest, symbol
|
|
// expands to
|
|
// TmpLabel: AUIPC rdest, %tls_gd_pcrel_hi(symbol)
|
|
// ADDI rdest, rdest, %pcrel_lo(TmpLabel)
|
|
MCOperand DestReg = Inst.getOperand(0);
|
|
const MCExpr *Symbol = Inst.getOperand(1).getExpr();
|
|
emitAuipcInstPair(DestReg, DestReg, Symbol, RISCVMCExpr::VK_RISCV_TLS_GD_HI,
|
|
RISCV::ADDI, IDLoc, Out);
|
|
}
|
|
|
|
void RISCVAsmParser::emitLoadStoreSymbol(MCInst &Inst, unsigned Opcode,
|
|
SMLoc IDLoc, MCStreamer &Out,
|
|
bool HasTmpReg) {
|
|
// The load/store pseudo-instruction does a pc-relative load with
|
|
// a symbol.
|
|
//
|
|
// The expansion looks like this
|
|
//
|
|
// TmpLabel: AUIPC tmp, %pcrel_hi(symbol)
|
|
// [S|L]X rd, %pcrel_lo(TmpLabel)(tmp)
|
|
MCOperand DestReg = Inst.getOperand(0);
|
|
unsigned SymbolOpIdx = HasTmpReg ? 2 : 1;
|
|
unsigned TmpRegOpIdx = HasTmpReg ? 1 : 0;
|
|
MCOperand TmpReg = Inst.getOperand(TmpRegOpIdx);
|
|
const MCExpr *Symbol = Inst.getOperand(SymbolOpIdx).getExpr();
|
|
emitAuipcInstPair(DestReg, TmpReg, Symbol, RISCVMCExpr::VK_RISCV_PCREL_HI,
|
|
Opcode, IDLoc, Out);
|
|
}
|
|
|
|
void RISCVAsmParser::emitPseudoExtend(MCInst &Inst, bool SignExtend,
|
|
int64_t Width, SMLoc IDLoc,
|
|
MCStreamer &Out) {
|
|
// The sign/zero extend pseudo-instruction does two shifts, with the shift
|
|
// amounts dependent on the XLEN.
|
|
//
|
|
// The expansion looks like this
|
|
//
|
|
// SLLI rd, rs, XLEN - Width
|
|
// SR[A|R]I rd, rd, XLEN - Width
|
|
MCOperand DestReg = Inst.getOperand(0);
|
|
MCOperand SourceReg = Inst.getOperand(1);
|
|
|
|
unsigned SecondOpcode = SignExtend ? RISCV::SRAI : RISCV::SRLI;
|
|
int64_t ShAmt = (isRV64() ? 64 : 32) - Width;
|
|
|
|
assert(ShAmt > 0 && "Shift amount must be non-zero.");
|
|
|
|
emitToStreamer(Out, MCInstBuilder(RISCV::SLLI)
|
|
.addOperand(DestReg)
|
|
.addOperand(SourceReg)
|
|
.addImm(ShAmt));
|
|
|
|
emitToStreamer(Out, MCInstBuilder(SecondOpcode)
|
|
.addOperand(DestReg)
|
|
.addOperand(DestReg)
|
|
.addImm(ShAmt));
|
|
}
|
|
|
|
void RISCVAsmParser::emitVMSGE(MCInst &Inst, unsigned Opcode, SMLoc IDLoc,
|
|
MCStreamer &Out) {
|
|
if (Inst.getNumOperands() == 3) {
|
|
// unmasked va >= x
|
|
//
|
|
// pseudoinstruction: vmsge{u}.vx vd, va, x
|
|
// expansion: vmslt{u}.vx vd, va, x; vmnand.mm vd, vd, vd
|
|
emitToStreamer(Out, MCInstBuilder(Opcode)
|
|
.addOperand(Inst.getOperand(0))
|
|
.addOperand(Inst.getOperand(1))
|
|
.addOperand(Inst.getOperand(2))
|
|
.addReg(RISCV::NoRegister));
|
|
emitToStreamer(Out, MCInstBuilder(RISCV::VMNAND_MM)
|
|
.addOperand(Inst.getOperand(0))
|
|
.addOperand(Inst.getOperand(0))
|
|
.addOperand(Inst.getOperand(0)));
|
|
} else if (Inst.getNumOperands() == 4) {
|
|
// masked va >= x, vd != v0
|
|
//
|
|
// pseudoinstruction: vmsge{u}.vx vd, va, x, v0.t
|
|
// expansion: vmslt{u}.vx vd, va, x, v0.t; vmxor.mm vd, vd, v0
|
|
assert(Inst.getOperand(0).getReg() != RISCV::V0 &&
|
|
"The destination register should not be V0.");
|
|
emitToStreamer(Out, MCInstBuilder(Opcode)
|
|
.addOperand(Inst.getOperand(0))
|
|
.addOperand(Inst.getOperand(1))
|
|
.addOperand(Inst.getOperand(2))
|
|
.addOperand(Inst.getOperand(3)));
|
|
emitToStreamer(Out, MCInstBuilder(RISCV::VMXOR_MM)
|
|
.addOperand(Inst.getOperand(0))
|
|
.addOperand(Inst.getOperand(0))
|
|
.addReg(RISCV::V0));
|
|
- } else if (Inst.getNumOperands() == 5) {
|
|
+ } else if (Inst.getNumOperands() == 5 &&
|
|
+ Inst.getOperand(0).getReg() == RISCV::V0) {
|
|
// masked va >= x, vd == v0
|
|
//
|
|
// pseudoinstruction: vmsge{u}.vx vd, va, x, v0.t, vt
|
|
// expansion: vmslt{u}.vx vt, va, x; vmandnot.mm vd, vd, vt
|
|
assert(Inst.getOperand(0).getReg() == RISCV::V0 &&
|
|
"The destination register should be V0.");
|
|
assert(Inst.getOperand(1).getReg() != RISCV::V0 &&
|
|
"The temporary vector register should not be V0.");
|
|
emitToStreamer(Out, MCInstBuilder(Opcode)
|
|
.addOperand(Inst.getOperand(1))
|
|
.addOperand(Inst.getOperand(2))
|
|
.addOperand(Inst.getOperand(3))
|
|
.addOperand(Inst.getOperand(4)));
|
|
emitToStreamer(Out, MCInstBuilder(RISCV::VMANDNOT_MM)
|
|
.addOperand(Inst.getOperand(0))
|
|
.addOperand(Inst.getOperand(0))
|
|
.addOperand(Inst.getOperand(1)));
|
|
+ } else if (Inst.getNumOperands() == 5) {
|
|
+ // masked va >= x, any vd
|
|
+ //
|
|
+ // pseudoinstruction: vmsge{u}.vx vd, va, x, v0.t, vt
|
|
+ // expansion: vmslt{u}.vx vt, va, x; vmandnot.mm vt, v0, vt; vmandnot.mm vd,
|
|
+ // vd, v0; vmor.mm vd, vt, vd
|
|
+ emitToStreamer(Out, MCInstBuilder(Opcode)
|
|
+ .addOperand(Inst.getOperand(1))
|
|
+ .addOperand(Inst.getOperand(2))
|
|
+ .addOperand(Inst.getOperand(3))
|
|
+ .addReg(RISCV::NoRegister));
|
|
+ emitToStreamer(Out, MCInstBuilder(RISCV::VMANDNOT_MM)
|
|
+ .addOperand(Inst.getOperand(1))
|
|
+ .addReg(RISCV::V0)
|
|
+ .addOperand(Inst.getOperand(1)));
|
|
+ emitToStreamer(Out, MCInstBuilder(RISCV::VMANDNOT_MM)
|
|
+ .addOperand(Inst.getOperand(0))
|
|
+ .addOperand(Inst.getOperand(0))
|
|
+ .addReg(RISCV::V0));
|
|
+ emitToStreamer(Out, MCInstBuilder(RISCV::VMOR_MM)
|
|
+ .addOperand(Inst.getOperand(0))
|
|
+ .addOperand(Inst.getOperand(1))
|
|
+ .addOperand(Inst.getOperand(0)));
|
|
}
|
|
}
|
|
|
|
bool RISCVAsmParser::checkPseudoAddTPRel(MCInst &Inst,
|
|
OperandVector &Operands) {
|
|
assert(Inst.getOpcode() == RISCV::PseudoAddTPRel && "Invalid instruction");
|
|
assert(Inst.getOperand(2).isReg() && "Unexpected second operand kind");
|
|
if (Inst.getOperand(2).getReg() != RISCV::X4) {
|
|
SMLoc ErrorLoc = ((RISCVOperand &)*Operands[3]).getStartLoc();
|
|
return Error(ErrorLoc, "the second input operand must be tp/x4 when using "
|
|
"%tprel_add modifier");
|
|
}
|
|
|
|
return false;
|
|
}
|
|
|
|
std::unique_ptr<RISCVOperand> RISCVAsmParser::defaultMaskRegOp() const {
|
|
return RISCVOperand::createReg(RISCV::NoRegister, llvm::SMLoc(),
|
|
llvm::SMLoc(), isRV64());
|
|
}
|
|
|
|
bool RISCVAsmParser::validateInstruction(MCInst &Inst,
|
|
OperandVector &Operands) {
|
|
const MCInstrDesc &MCID = MII.get(Inst.getOpcode());
|
|
unsigned Constraints =
|
|
(MCID.TSFlags & RISCVII::ConstraintMask) >> RISCVII::ConstraintShift;
|
|
if (Constraints == RISCVII::NoConstraint)
|
|
return false;
|
|
|
|
unsigned DestReg = Inst.getOperand(0).getReg();
|
|
// Operands[1] will be the first operand, DestReg.
|
|
SMLoc Loc = Operands[1]->getStartLoc();
|
|
if (Constraints & RISCVII::VS2Constraint) {
|
|
unsigned CheckReg = Inst.getOperand(1).getReg();
|
|
if (DestReg == CheckReg)
|
|
return Error(Loc, "The destination vector register group cannot overlap"
|
|
" the source vector register group.");
|
|
}
|
|
if ((Constraints & RISCVII::VS1Constraint) && (Inst.getOperand(2).isReg())) {
|
|
unsigned CheckReg = Inst.getOperand(2).getReg();
|
|
if (DestReg == CheckReg)
|
|
return Error(Loc, "The destination vector register group cannot overlap"
|
|
" the source vector register group.");
|
|
}
|
|
if ((Constraints & RISCVII::VMConstraint) && (DestReg == RISCV::V0)) {
|
|
// vadc, vsbc are special cases. These instructions have no mask register.
|
|
// The destination register could not be V0.
|
|
unsigned Opcode = Inst.getOpcode();
|
|
if (Opcode == RISCV::VADC_VVM || Opcode == RISCV::VADC_VXM ||
|
|
Opcode == RISCV::VADC_VIM || Opcode == RISCV::VSBC_VVM ||
|
|
Opcode == RISCV::VSBC_VXM || Opcode == RISCV::VFMERGE_VFM ||
|
|
Opcode == RISCV::VMERGE_VIM || Opcode == RISCV::VMERGE_VVM ||
|
|
Opcode == RISCV::VMERGE_VXM)
|
|
return Error(Loc, "The destination vector register group cannot be V0.");
|
|
|
|
// Regardless masked or unmasked version, the number of operands is the
|
|
// same. For example, "viota.m v0, v2" is "viota.m v0, v2, NoRegister"
|
|
// actually. We need to check the last operand to ensure whether it is
|
|
// masked or not.
|
|
unsigned CheckReg = Inst.getOperand(Inst.getNumOperands() - 1).getReg();
|
|
assert((CheckReg == RISCV::V0 || CheckReg == RISCV::NoRegister) &&
|
|
"Unexpected register for mask operand");
|
|
|
|
if (DestReg == CheckReg)
|
|
return Error(Loc, "The destination vector register group cannot overlap"
|
|
" the mask register.");
|
|
}
|
|
return false;
|
|
}
|
|
|
|
bool RISCVAsmParser::processInstruction(MCInst &Inst, SMLoc IDLoc,
|
|
OperandVector &Operands,
|
|
MCStreamer &Out) {
|
|
Inst.setLoc(IDLoc);
|
|
|
|
switch (Inst.getOpcode()) {
|
|
default:
|
|
break;
|
|
case RISCV::PseudoLI: {
|
|
MCRegister Reg = Inst.getOperand(0).getReg();
|
|
const MCOperand &Op1 = Inst.getOperand(1);
|
|
if (Op1.isExpr()) {
|
|
// We must have li reg, %lo(sym) or li reg, %pcrel_lo(sym) or similar.
|
|
// Just convert to an addi. This allows compatibility with gas.
|
|
emitToStreamer(Out, MCInstBuilder(RISCV::ADDI)
|
|
.addReg(Reg)
|
|
.addReg(RISCV::X0)
|
|
.addExpr(Op1.getExpr()));
|
|
return false;
|
|
}
|
|
int64_t Imm = Inst.getOperand(1).getImm();
|
|
// On RV32 the immediate here can either be a signed or an unsigned
|
|
// 32-bit number. Sign extension has to be performed to ensure that Imm
|
|
// represents the expected signed 64-bit number.
|
|
if (!isRV64())
|
|
Imm = SignExtend64<32>(Imm);
|
|
emitLoadImm(Reg, Imm, Out);
|
|
return false;
|
|
}
|
|
case RISCV::PseudoLLA:
|
|
emitLoadLocalAddress(Inst, IDLoc, Out);
|
|
return false;
|
|
case RISCV::PseudoLA:
|
|
emitLoadAddress(Inst, IDLoc, Out);
|
|
return false;
|
|
case RISCV::PseudoLA_TLS_IE:
|
|
emitLoadTLSIEAddress(Inst, IDLoc, Out);
|
|
return false;
|
|
case RISCV::PseudoLA_TLS_GD:
|
|
emitLoadTLSGDAddress(Inst, IDLoc, Out);
|
|
return false;
|
|
case RISCV::PseudoLB:
|
|
emitLoadStoreSymbol(Inst, RISCV::LB, IDLoc, Out, /*HasTmpReg=*/false);
|
|
return false;
|
|
case RISCV::PseudoLBU:
|
|
emitLoadStoreSymbol(Inst, RISCV::LBU, IDLoc, Out, /*HasTmpReg=*/false);
|
|
return false;
|
|
case RISCV::PseudoLH:
|
|
emitLoadStoreSymbol(Inst, RISCV::LH, IDLoc, Out, /*HasTmpReg=*/false);
|
|
return false;
|
|
case RISCV::PseudoLHU:
|
|
emitLoadStoreSymbol(Inst, RISCV::LHU, IDLoc, Out, /*HasTmpReg=*/false);
|
|
return false;
|
|
case RISCV::PseudoLW:
|
|
emitLoadStoreSymbol(Inst, RISCV::LW, IDLoc, Out, /*HasTmpReg=*/false);
|
|
return false;
|
|
case RISCV::PseudoLWU:
|
|
emitLoadStoreSymbol(Inst, RISCV::LWU, IDLoc, Out, /*HasTmpReg=*/false);
|
|
return false;
|
|
case RISCV::PseudoLD:
|
|
emitLoadStoreSymbol(Inst, RISCV::LD, IDLoc, Out, /*HasTmpReg=*/false);
|
|
return false;
|
|
case RISCV::PseudoFLH:
|
|
emitLoadStoreSymbol(Inst, RISCV::FLH, IDLoc, Out, /*HasTmpReg=*/true);
|
|
return false;
|
|
case RISCV::PseudoFLW:
|
|
emitLoadStoreSymbol(Inst, RISCV::FLW, IDLoc, Out, /*HasTmpReg=*/true);
|
|
return false;
|
|
case RISCV::PseudoFLD:
|
|
emitLoadStoreSymbol(Inst, RISCV::FLD, IDLoc, Out, /*HasTmpReg=*/true);
|
|
return false;
|
|
case RISCV::PseudoSB:
|
|
emitLoadStoreSymbol(Inst, RISCV::SB, IDLoc, Out, /*HasTmpReg=*/true);
|
|
return false;
|
|
case RISCV::PseudoSH:
|
|
emitLoadStoreSymbol(Inst, RISCV::SH, IDLoc, Out, /*HasTmpReg=*/true);
|
|
return false;
|
|
case RISCV::PseudoSW:
|
|
emitLoadStoreSymbol(Inst, RISCV::SW, IDLoc, Out, /*HasTmpReg=*/true);
|
|
return false;
|
|
case RISCV::PseudoSD:
|
|
emitLoadStoreSymbol(Inst, RISCV::SD, IDLoc, Out, /*HasTmpReg=*/true);
|
|
return false;
|
|
case RISCV::PseudoFSH:
|
|
emitLoadStoreSymbol(Inst, RISCV::FSH, IDLoc, Out, /*HasTmpReg=*/true);
|
|
return false;
|
|
case RISCV::PseudoFSW:
|
|
emitLoadStoreSymbol(Inst, RISCV::FSW, IDLoc, Out, /*HasTmpReg=*/true);
|
|
return false;
|
|
case RISCV::PseudoFSD:
|
|
emitLoadStoreSymbol(Inst, RISCV::FSD, IDLoc, Out, /*HasTmpReg=*/true);
|
|
return false;
|
|
case RISCV::PseudoAddTPRel:
|
|
if (checkPseudoAddTPRel(Inst, Operands))
|
|
return true;
|
|
break;
|
|
case RISCV::PseudoSEXT_B:
|
|
emitPseudoExtend(Inst, /*SignExtend=*/true, /*Width=*/8, IDLoc, Out);
|
|
return false;
|
|
case RISCV::PseudoSEXT_H:
|
|
emitPseudoExtend(Inst, /*SignExtend=*/true, /*Width=*/16, IDLoc, Out);
|
|
return false;
|
|
case RISCV::PseudoZEXT_H:
|
|
emitPseudoExtend(Inst, /*SignExtend=*/false, /*Width=*/16, IDLoc, Out);
|
|
return false;
|
|
case RISCV::PseudoZEXT_W:
|
|
emitPseudoExtend(Inst, /*SignExtend=*/false, /*Width=*/32, IDLoc, Out);
|
|
return false;
|
|
case RISCV::PseudoVMSGEU_VX:
|
|
case RISCV::PseudoVMSGEU_VX_M:
|
|
case RISCV::PseudoVMSGEU_VX_M_T:
|
|
emitVMSGE(Inst, RISCV::VMSLTU_VX, IDLoc, Out);
|
|
return false;
|
|
case RISCV::PseudoVMSGE_VX:
|
|
case RISCV::PseudoVMSGE_VX_M:
|
|
case RISCV::PseudoVMSGE_VX_M_T:
|
|
emitVMSGE(Inst, RISCV::VMSLT_VX, IDLoc, Out);
|
|
return false;
|
|
case RISCV::PseudoVMSGE_VI:
|
|
case RISCV::PseudoVMSLT_VI: {
|
|
// These instructions are signed and so is immediate so we can subtract one
|
|
// and change the opcode.
|
|
int64_t Imm = Inst.getOperand(2).getImm();
|
|
unsigned Opc = Inst.getOpcode() == RISCV::PseudoVMSGE_VI ? RISCV::VMSGT_VI
|
|
: RISCV::VMSLE_VI;
|
|
emitToStreamer(Out, MCInstBuilder(Opc)
|
|
.addOperand(Inst.getOperand(0))
|
|
.addOperand(Inst.getOperand(1))
|
|
.addImm(Imm - 1)
|
|
.addOperand(Inst.getOperand(3)));
|
|
return false;
|
|
}
|
|
case RISCV::PseudoVMSGEU_VI:
|
|
case RISCV::PseudoVMSLTU_VI: {
|
|
int64_t Imm = Inst.getOperand(2).getImm();
|
|
// Unsigned comparisons are tricky because the immediate is signed. If the
|
|
// immediate is 0 we can't just subtract one. vmsltu.vi v0, v1, 0 is always
|
|
// false, but vmsle.vi v0, v1, -1 is always true. Instead we use
|
|
// vmsne v0, v1, v1 which is always false.
|
|
if (Imm == 0) {
|
|
unsigned Opc = Inst.getOpcode() == RISCV::PseudoVMSGEU_VI
|
|
? RISCV::VMSEQ_VV
|
|
: RISCV::VMSNE_VV;
|
|
emitToStreamer(Out, MCInstBuilder(Opc)
|
|
.addOperand(Inst.getOperand(0))
|
|
.addOperand(Inst.getOperand(1))
|
|
.addOperand(Inst.getOperand(1))
|
|
.addOperand(Inst.getOperand(3)));
|
|
} else {
|
|
// Other immediate values can subtract one like signed.
|
|
unsigned Opc = Inst.getOpcode() == RISCV::PseudoVMSGEU_VI
|
|
? RISCV::VMSGTU_VI
|
|
: RISCV::VMSLEU_VI;
|
|
emitToStreamer(Out, MCInstBuilder(Opc)
|
|
.addOperand(Inst.getOperand(0))
|
|
.addOperand(Inst.getOperand(1))
|
|
.addImm(Imm - 1)
|
|
.addOperand(Inst.getOperand(3)));
|
|
}
|
|
|
|
return false;
|
|
}
|
|
}
|
|
|
|
emitToStreamer(Out, Inst);
|
|
return false;
|
|
}
|
|
|
|
extern "C" LLVM_EXTERNAL_VISIBILITY void LLVMInitializeRISCVAsmParser() {
|
|
RegisterMCAsmParser<RISCVAsmParser> X(getTheRISCV32Target());
|
|
RegisterMCAsmParser<RISCVAsmParser> Y(getTheRISCV64Target());
|
|
}
|
|
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoV.td b/llvm/lib/Target/RISCV/RISCVInstrInfoV.td
|
|
index 44208c86faf4..2c0d8f2f5163 100644
|
|
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoV.td
|
|
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoV.td
|
|
@@ -1,1172 +1,1172 @@
|
|
//===-- RISCVInstrInfoV.td - RISC-V 'V' instructions -------*- tablegen -*-===//
|
|
//
|
|
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
|
// See https://llvm.org/LICENSE.txt for license information.
|
|
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
|
//
|
|
//===----------------------------------------------------------------------===//
|
|
///
|
|
/// This file describes the RISC-V instructions from the standard 'V' Vector
|
|
/// extension, version 0.10.
|
|
/// This version is still experimental as the 'V' extension hasn't been
|
|
/// ratified yet.
|
|
///
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
include "RISCVInstrFormatsV.td"
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
// Operand and SDNode transformation definitions.
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
def VTypeIAsmOperand : AsmOperandClass {
|
|
let Name = "VTypeI";
|
|
let ParserMethod = "parseVTypeI";
|
|
let DiagnosticType = "InvalidVTypeI";
|
|
}
|
|
|
|
def VTypeIOp : Operand<XLenVT> {
|
|
let ParserMatchClass = VTypeIAsmOperand;
|
|
let PrintMethod = "printVTypeI";
|
|
let DecoderMethod = "decodeUImmOperand<11>";
|
|
}
|
|
|
|
def VMaskAsmOperand : AsmOperandClass {
|
|
let Name = "RVVMaskRegOpOperand";
|
|
let RenderMethod = "addRegOperands";
|
|
let PredicateMethod = "isV0Reg";
|
|
let ParserMethod = "parseMaskReg";
|
|
let IsOptional = 1;
|
|
let DefaultMethod = "defaultMaskRegOp";
|
|
let DiagnosticType = "InvalidVMaskRegister";
|
|
}
|
|
|
|
def VMaskOp : RegisterOperand<VMV0> {
|
|
let ParserMatchClass = VMaskAsmOperand;
|
|
let PrintMethod = "printVMaskReg";
|
|
let EncoderMethod = "getVMaskReg";
|
|
let DecoderMethod = "decodeVMaskReg";
|
|
}
|
|
|
|
def simm5 : Operand<XLenVT>, ImmLeaf<XLenVT, [{return isInt<5>(Imm);}]> {
|
|
let ParserMatchClass = SImmAsmOperand<5>;
|
|
let EncoderMethod = "getImmOpValue";
|
|
let DecoderMethod = "decodeSImmOperand<5>";
|
|
let MCOperandPredicate = [{
|
|
int64_t Imm;
|
|
if (MCOp.evaluateAsConstantImm(Imm))
|
|
return isInt<5>(Imm);
|
|
return MCOp.isBareSymbolRef();
|
|
}];
|
|
}
|
|
|
|
def SImm5Plus1AsmOperand : AsmOperandClass {
|
|
let Name = "SImm5Plus1";
|
|
let RenderMethod = "addImmOperands";
|
|
let DiagnosticType = "InvalidSImm5Plus1";
|
|
}
|
|
|
|
def simm5_plus1 : Operand<XLenVT>, ImmLeaf<XLenVT,
|
|
[{return (isInt<5>(Imm) && Imm != -16) || Imm == 16;}]> {
|
|
let ParserMatchClass = SImm5Plus1AsmOperand;
|
|
let MCOperandPredicate = [{
|
|
int64_t Imm;
|
|
if (MCOp.evaluateAsConstantImm(Imm))
|
|
return (isInt<5>(Imm) && Imm != -16) || Imm == 16;
|
|
return MCOp.isBareSymbolRef();
|
|
}];
|
|
}
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
// Instruction class templates
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
let hasSideEffects = 0, mayLoad = 1, mayStore = 0 in {
|
|
// load vd, (rs1)
|
|
class VUnitStrideLoadMask<string opcodestr>
|
|
: RVInstVLU<0b000, LSWidth8.Value{3}, LUMOPUnitStrideMask, LSWidth8.Value{2-0},
|
|
(outs VR:$vd),
|
|
(ins GPR:$rs1), opcodestr, "$vd, (${rs1})"> {
|
|
let vm = 1;
|
|
}
|
|
|
|
// load vd, (rs1), vm
|
|
class VUnitStrideLoad<RISCVLSUMOP lumop, RISCVWidth width,
|
|
string opcodestr>
|
|
: RVInstVLU<0b000, width.Value{3}, lumop, width.Value{2-0},
|
|
(outs VR:$vd),
|
|
(ins GPR:$rs1, VMaskOp:$vm), opcodestr, "$vd, (${rs1})$vm">;
|
|
|
|
// load vd, (rs1), rs2, vm
|
|
class VStridedLoad<RISCVWidth width, string opcodestr>
|
|
: RVInstVLS<0b000, width.Value{3}, width.Value{2-0},
|
|
(outs VR:$vd),
|
|
(ins GPR:$rs1, GPR:$rs2, VMaskOp:$vm), opcodestr,
|
|
"$vd, (${rs1}), $rs2$vm">;
|
|
|
|
// load vd, (rs1), vs2, vm
|
|
class VIndexedLoad<RISCVMOP mop, RISCVWidth width, string opcodestr>
|
|
: RVInstVLX<0b000, width.Value{3}, mop, width.Value{2-0},
|
|
(outs VR:$vd),
|
|
(ins GPR:$rs1, VR:$vs2, VMaskOp:$vm), opcodestr,
|
|
"$vd, (${rs1}), $vs2$vm">;
|
|
|
|
// vl<nf>r.v vd, (rs1)
|
|
class VWholeLoad<bits<3> nf, RISCVWidth width, string opcodestr, RegisterClass VRC>
|
|
: RVInstVLU<nf, width.Value{3}, LUMOPUnitStrideWholeReg,
|
|
width.Value{2-0}, (outs VRC:$vd), (ins GPR:$rs1),
|
|
opcodestr, "$vd, (${rs1})"> {
|
|
let vm = 1;
|
|
let Uses = [];
|
|
let RVVConstraint = NoConstraint;
|
|
}
|
|
|
|
// segment load vd, (rs1), vm
|
|
class VUnitStrideSegmentLoad<bits<3> nf, RISCVLSUMOP lumop,
|
|
RISCVWidth width, string opcodestr>
|
|
: RVInstVLU<nf, width.Value{3}, lumop, width.Value{2-0},
|
|
(outs VR:$vd),
|
|
(ins GPR:$rs1, VMaskOp:$vm), opcodestr, "$vd, (${rs1})$vm">;
|
|
|
|
// segment load vd, (rs1), rs2, vm
|
|
class VStridedSegmentLoad<bits<3> nf, RISCVWidth width, string opcodestr>
|
|
: RVInstVLS<nf, width.Value{3}, width.Value{2-0},
|
|
(outs VR:$vd),
|
|
(ins GPR:$rs1, GPR:$rs2, VMaskOp:$vm), opcodestr,
|
|
"$vd, (${rs1}), $rs2$vm">;
|
|
|
|
// segment load vd, (rs1), vs2, vm
|
|
class VIndexedSegmentLoad<bits<3> nf, RISCVMOP mop, RISCVWidth width,
|
|
string opcodestr>
|
|
: RVInstVLX<nf, width.Value{3}, mop, width.Value{2-0},
|
|
(outs VR:$vd),
|
|
(ins GPR:$rs1, VR:$vs2, VMaskOp:$vm), opcodestr,
|
|
"$vd, (${rs1}), $vs2$vm">;
|
|
} // hasSideEffects = 0, mayLoad = 1, mayStore = 0
|
|
|
|
let hasSideEffects = 0, mayLoad = 0, mayStore = 1 in {
|
|
// store vd, vs3, (rs1)
|
|
class VUnitStrideStoreMask<string opcodestr>
|
|
: RVInstVSU<0b000, LSWidth8.Value{3}, SUMOPUnitStrideMask, LSWidth8.Value{2-0},
|
|
(outs), (ins VR:$vs3, GPR:$rs1), opcodestr,
|
|
"$vs3, (${rs1})"> {
|
|
let vm = 1;
|
|
}
|
|
|
|
// store vd, vs3, (rs1), vm
|
|
class VUnitStrideStore<RISCVLSUMOP sumop, RISCVWidth width,
|
|
string opcodestr>
|
|
: RVInstVSU<0b000, width.Value{3}, sumop, width.Value{2-0},
|
|
(outs), (ins VR:$vs3, GPR:$rs1, VMaskOp:$vm), opcodestr,
|
|
"$vs3, (${rs1})$vm">;
|
|
|
|
// store vd, vs3, (rs1), rs2, vm
|
|
class VStridedStore<RISCVWidth width, string opcodestr>
|
|
: RVInstVSS<0b000, width.Value{3}, width.Value{2-0}, (outs),
|
|
(ins VR:$vs3, GPR:$rs1, GPR:$rs2, VMaskOp:$vm),
|
|
opcodestr, "$vs3, (${rs1}), $rs2$vm">;
|
|
|
|
// store vd, vs3, (rs1), vs2, vm
|
|
class VIndexedStore<RISCVMOP mop, RISCVWidth width, string opcodestr>
|
|
: RVInstVSX<0b000, width.Value{3}, mop, width.Value{2-0}, (outs),
|
|
(ins VR:$vs3, GPR:$rs1, VR:$vs2, VMaskOp:$vm),
|
|
opcodestr, "$vs3, (${rs1}), $vs2$vm">;
|
|
|
|
// vs<nf>r.v vd, (rs1)
|
|
class VWholeStore<bits<3> nf, string opcodestr, RegisterClass VRC>
|
|
: RVInstVSU<nf, 0, SUMOPUnitStrideWholeReg,
|
|
0b000, (outs), (ins VRC:$vs3, GPR:$rs1),
|
|
opcodestr, "$vs3, (${rs1})"> {
|
|
let vm = 1;
|
|
let Uses = [];
|
|
}
|
|
|
|
// segment store vd, vs3, (rs1), vm
|
|
class VUnitStrideSegmentStore<bits<3> nf, RISCVWidth width, string opcodestr>
|
|
: RVInstVSU<nf, width.Value{3}, SUMOPUnitStride, width.Value{2-0},
|
|
(outs), (ins VR:$vs3, GPR:$rs1, VMaskOp:$vm), opcodestr,
|
|
"$vs3, (${rs1})$vm">;
|
|
|
|
// segment store vd, vs3, (rs1), rs2, vm
|
|
class VStridedSegmentStore<bits<3> nf, RISCVWidth width, string opcodestr>
|
|
: RVInstVSS<nf, width.Value{3}, width.Value{2-0}, (outs),
|
|
(ins VR:$vs3, GPR:$rs1, GPR:$rs2, VMaskOp:$vm),
|
|
opcodestr, "$vs3, (${rs1}), $rs2$vm">;
|
|
|
|
// segment store vd, vs3, (rs1), vs2, vm
|
|
class VIndexedSegmentStore<bits<3> nf, RISCVMOP mop, RISCVWidth width,
|
|
string opcodestr>
|
|
: RVInstVSX<nf, width.Value{3}, mop, width.Value{2-0}, (outs),
|
|
(ins VR:$vs3, GPR:$rs1, VR:$vs2, VMaskOp:$vm),
|
|
opcodestr, "$vs3, (${rs1}), $vs2$vm">;
|
|
} // hasSideEffects = 0, mayLoad = 0, mayStore = 1
|
|
|
|
let hasSideEffects = 0, mayLoad = 0, mayStore = 0 in {
|
|
// op vd, vs2, vs1, vm
|
|
class VALUVV<bits<6> funct6, RISCVVFormat opv, string opcodestr>
|
|
: RVInstVV<funct6, opv, (outs VR:$vd),
|
|
(ins VR:$vs2, VR:$vs1, VMaskOp:$vm),
|
|
opcodestr, "$vd, $vs2, $vs1$vm">;
|
|
|
|
// op vd, vs2, vs1, v0 (without mask, use v0 as carry input)
|
|
class VALUmVV<bits<6> funct6, RISCVVFormat opv, string opcodestr>
|
|
: RVInstVV<funct6, opv, (outs VR:$vd),
|
|
(ins VR:$vs2, VR:$vs1, VMV0:$v0),
|
|
opcodestr, "$vd, $vs2, $vs1, v0"> {
|
|
let vm = 0;
|
|
}
|
|
|
|
// op vd, vs1, vs2, vm (reverse the order of vs1 and vs2)
|
|
class VALUrVV<bits<6> funct6, RISCVVFormat opv, string opcodestr>
|
|
: RVInstVV<funct6, opv, (outs VR:$vd),
|
|
(ins VR:$vs1, VR:$vs2, VMaskOp:$vm),
|
|
opcodestr, "$vd, $vs1, $vs2$vm">;
|
|
|
|
// op vd, vs2, vs1
|
|
class VALUVVNoVm<bits<6> funct6, RISCVVFormat opv, string opcodestr>
|
|
: RVInstVV<funct6, opv, (outs VR:$vd),
|
|
(ins VR:$vs2, VR:$vs1),
|
|
opcodestr, "$vd, $vs2, $vs1"> {
|
|
let vm = 1;
|
|
}
|
|
|
|
// op vd, vs2, rs1, vm
|
|
class VALUVX<bits<6> funct6, RISCVVFormat opv, string opcodestr>
|
|
: RVInstVX<funct6, opv, (outs VR:$vd),
|
|
(ins VR:$vs2, GPR:$rs1, VMaskOp:$vm),
|
|
opcodestr, "$vd, $vs2, $rs1$vm">;
|
|
|
|
// op vd, vs2, rs1, v0 (without mask, use v0 as carry input)
|
|
class VALUmVX<bits<6> funct6, RISCVVFormat opv, string opcodestr>
|
|
: RVInstVX<funct6, opv, (outs VR:$vd),
|
|
(ins VR:$vs2, GPR:$rs1, VMV0:$v0),
|
|
opcodestr, "$vd, $vs2, $rs1, v0"> {
|
|
let vm = 0;
|
|
}
|
|
|
|
// op vd, rs1, vs2, vm (reverse the order of rs1 and vs2)
|
|
class VALUrVX<bits<6> funct6, RISCVVFormat opv, string opcodestr>
|
|
: RVInstVX<funct6, opv, (outs VR:$vd),
|
|
(ins GPR:$rs1, VR:$vs2, VMaskOp:$vm),
|
|
opcodestr, "$vd, $rs1, $vs2$vm">;
|
|
|
|
// op vd, vs1, vs2
|
|
class VALUVXNoVm<bits<6> funct6, RISCVVFormat opv, string opcodestr>
|
|
: RVInstVX<funct6, opv, (outs VR:$vd),
|
|
(ins VR:$vs2, GPR:$rs1),
|
|
opcodestr, "$vd, $vs2, $rs1"> {
|
|
let vm = 1;
|
|
}
|
|
|
|
// op vd, vs2, imm, vm
|
|
class VALUVI<bits<6> funct6, string opcodestr, Operand optype = simm5>
|
|
: RVInstIVI<funct6, (outs VR:$vd),
|
|
(ins VR:$vs2, optype:$imm, VMaskOp:$vm),
|
|
opcodestr, "$vd, $vs2, $imm$vm">;
|
|
|
|
// op vd, vs2, imm, v0 (without mask, use v0 as carry input)
|
|
class VALUmVI<bits<6> funct6, string opcodestr, Operand optype = simm5>
|
|
: RVInstIVI<funct6, (outs VR:$vd),
|
|
(ins VR:$vs2, optype:$imm, VMV0:$v0),
|
|
opcodestr, "$vd, $vs2, $imm, v0"> {
|
|
let vm = 0;
|
|
}
|
|
|
|
// op vd, vs2, imm, vm
|
|
class VALUVINoVm<bits<6> funct6, string opcodestr, Operand optype = simm5>
|
|
: RVInstIVI<funct6, (outs VR:$vd),
|
|
(ins VR:$vs2, optype:$imm),
|
|
opcodestr, "$vd, $vs2, $imm"> {
|
|
let vm = 1;
|
|
}
|
|
|
|
// op vd, vs2, rs1, vm (Float)
|
|
class VALUVF<bits<6> funct6, RISCVVFormat opv, string opcodestr>
|
|
: RVInstVX<funct6, opv, (outs VR:$vd),
|
|
(ins VR:$vs2, FPR32:$rs1, VMaskOp:$vm),
|
|
opcodestr, "$vd, $vs2, $rs1$vm">;
|
|
|
|
// op vd, rs1, vs2, vm (Float) (with mask, reverse the order of rs1 and vs2)
|
|
class VALUrVF<bits<6> funct6, RISCVVFormat opv, string opcodestr>
|
|
: RVInstVX<funct6, opv, (outs VR:$vd),
|
|
(ins FPR32:$rs1, VR:$vs2, VMaskOp:$vm),
|
|
opcodestr, "$vd, $rs1, $vs2$vm">;
|
|
|
|
// op vd, vs2, vm (use vs1 as instruction encoding)
|
|
class VALUVs2<bits<6> funct6, bits<5> vs1, RISCVVFormat opv, string opcodestr>
|
|
: RVInstV<funct6, vs1, opv, (outs VR:$vd),
|
|
(ins VR:$vs2, VMaskOp:$vm),
|
|
opcodestr, "$vd, $vs2$vm">;
|
|
} // hasSideEffects = 0, mayLoad = 0, mayStore = 0
|
|
|
|
let hasSideEffects = 0, mayLoad = 1, mayStore = 1 in {
|
|
// vamo vd, (rs1), vs2, vd, vm
|
|
class VAMOWd<RISCVAMOOP amoop, RISCVWidth width, string opcodestr>
|
|
: RVInstVAMO<amoop, width.Value{2-0}, (outs VR:$vd_wd),
|
|
(ins GPR:$rs1, VR:$vs2, VR:$vd, VMaskOp:$vm),
|
|
opcodestr, "$vd_wd, (${rs1}), $vs2, $vd$vm"> {
|
|
let Constraints = "$vd_wd = $vd";
|
|
let wd = 1;
|
|
bits<5> vd;
|
|
let Inst{11-7} = vd;
|
|
}
|
|
|
|
// vamo x0, (rs1), vs2, vs3, vm
|
|
class VAMONoWd<RISCVAMOOP amoop, RISCVWidth width, string opcodestr>
|
|
: RVInstVAMO<amoop, width.Value{2-0}, (outs),
|
|
(ins GPR:$rs1, VR:$vs2, VR:$vs3, VMaskOp:$vm),
|
|
opcodestr, "x0, (${rs1}), $vs2, $vs3$vm"> {
|
|
bits<5> vs3;
|
|
let Inst{11-7} = vs3;
|
|
}
|
|
|
|
} // hasSideEffects = 0, mayLoad = 1, mayStore = 1
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
// Combination of instruction classes.
|
|
// Use these multiclasses to define instructions more easily.
|
|
//===----------------------------------------------------------------------===//
|
|
multiclass VALU_IV_V_X_I<string opcodestr, bits<6> funct6, Operand optype = simm5, string vw = "v"> {
|
|
def V : VALUVV<funct6, OPIVV, opcodestr # "." # vw # "v">;
|
|
def X : VALUVX<funct6, OPIVX, opcodestr # "." # vw # "x">;
|
|
def I : VALUVI<funct6, opcodestr # "." # vw # "i", optype>;
|
|
}
|
|
|
|
multiclass VALU_IV_V_X<string opcodestr, bits<6> funct6, string vw = "v"> {
|
|
def V : VALUVV<funct6, OPIVV, opcodestr # "." # vw # "v">;
|
|
def X : VALUVX<funct6, OPIVX, opcodestr # "." # vw # "x">;
|
|
}
|
|
|
|
multiclass VALUr_IV_V_X<string opcodestr, bits<6> funct6, string vw = "v"> {
|
|
def V : VALUrVV<funct6, OPIVV, opcodestr # "." # vw # "v">;
|
|
def X : VALUrVX<funct6, OPIVX, opcodestr # "." # vw # "x">;
|
|
}
|
|
|
|
multiclass VALU_IV_X_I<string opcodestr, bits<6> funct6, Operand optype = simm5, string vw = "v"> {
|
|
def X : VALUVX<funct6, OPIVX, opcodestr # "." # vw # "x">;
|
|
def I : VALUVI<funct6, opcodestr # "." # vw # "i", optype>;
|
|
}
|
|
|
|
multiclass VALU_IV_V<string opcodestr, bits<6> funct6> {
|
|
def _VS : VALUVV<funct6, OPIVV, opcodestr # ".vs">;
|
|
}
|
|
|
|
multiclass VALUr_IV_X<string opcodestr, bits<6> funct6, string vw = "v"> {
|
|
def X : VALUrVX<funct6, OPIVX, opcodestr # "." # vw # "x">;
|
|
}
|
|
|
|
multiclass VALU_MV_V_X<string opcodestr, bits<6> funct6, string vw = "v"> {
|
|
def V : VALUVV<funct6, OPMVV, opcodestr # "." # vw # "v">;
|
|
def X : VALUVX<funct6, OPMVX, opcodestr # "." # vw # "x">;
|
|
}
|
|
|
|
multiclass VALU_MV_V<string opcodestr, bits<6> funct6> {
|
|
def _VS : VALUVV<funct6, OPMVV, opcodestr # ".vs">;
|
|
}
|
|
|
|
multiclass VALU_MV_Mask<string opcodestr, bits<6> funct6, string vm = "v"> {
|
|
def M : VALUVVNoVm<funct6, OPMVV, opcodestr # "." # vm # "m">;
|
|
}
|
|
|
|
multiclass VALU_MV_X<string opcodestr, bits<6> funct6, string vw = "v"> {
|
|
def X : VALUVX<funct6, OPMVX, opcodestr # "." # vw # "x">;
|
|
}
|
|
|
|
multiclass VALUr_MV_V_X<string opcodestr, bits<6> funct6, string vw = "v"> {
|
|
def V : VALUrVV<funct6, OPMVV, opcodestr # "." # vw # "v">;
|
|
def X : VALUrVX<funct6, OPMVX, opcodestr # "." # vw # "x">;
|
|
}
|
|
|
|
multiclass VALUr_MV_X<string opcodestr, bits<6> funct6, string vw = "v"> {
|
|
def X : VALUrVX<funct6, OPMVX, opcodestr # "." # vw # "x">;
|
|
}
|
|
|
|
multiclass VALU_MV_VS2<string opcodestr, bits<6> funct6, bits<5> vs1> {
|
|
def "" : VALUVs2<funct6, vs1, OPMVV, opcodestr>;
|
|
}
|
|
|
|
multiclass VALUm_IV_V_X_I<string opcodestr, bits<6> funct6> {
|
|
def VM : VALUmVV<funct6, OPIVV, opcodestr # ".vvm">;
|
|
def XM : VALUmVX<funct6, OPIVX, opcodestr # ".vxm">;
|
|
def IM : VALUmVI<funct6, opcodestr # ".vim">;
|
|
}
|
|
|
|
multiclass VALUm_IV_V_X<string opcodestr, bits<6> funct6> {
|
|
def VM : VALUmVV<funct6, OPIVV, opcodestr # ".vvm">;
|
|
def XM : VALUmVX<funct6, OPIVX, opcodestr # ".vxm">;
|
|
}
|
|
|
|
multiclass VALUNoVm_IV_V_X_I<string opcodestr, bits<6> funct6, Operand optype = simm5> {
|
|
def V : VALUVVNoVm<funct6, OPIVV, opcodestr # ".vv">;
|
|
def X : VALUVXNoVm<funct6, OPIVX, opcodestr # ".vx">;
|
|
def I : VALUVINoVm<funct6, opcodestr # ".vi", optype>;
|
|
}
|
|
|
|
multiclass VALUNoVm_IV_V_X<string opcodestr, bits<6> funct6> {
|
|
def V : VALUVVNoVm<funct6, OPIVV, opcodestr # ".vv">;
|
|
def X : VALUVXNoVm<funct6, OPIVX, opcodestr # ".vx">;
|
|
}
|
|
|
|
multiclass VALU_FV_V_F<string opcodestr, bits<6> funct6, string vw = "v"> {
|
|
def V : VALUVV<funct6, OPFVV, opcodestr # "." # vw # "v">;
|
|
def F : VALUVF<funct6, OPFVF, opcodestr # "." # vw # "f">;
|
|
}
|
|
|
|
multiclass VALU_FV_F<string opcodestr, bits<6> funct6, string vw = "v"> {
|
|
def F : VALUVF<funct6, OPFVF, opcodestr # "." # vw # "f">;
|
|
}
|
|
|
|
multiclass VALUr_FV_V_F<string opcodestr, bits<6> funct6, string vw = "v"> {
|
|
def V : VALUrVV<funct6, OPFVV, opcodestr # "." # vw # "v">;
|
|
def F : VALUrVF<funct6, OPFVF, opcodestr # "." # vw # "f">;
|
|
}
|
|
|
|
multiclass VALU_FV_V<string opcodestr, bits<6> funct6> {
|
|
def _VS : VALUVV<funct6, OPFVV, opcodestr # ".vs">;
|
|
}
|
|
|
|
multiclass VALU_FV_VS2<string opcodestr, bits<6> funct6, bits<5> vs1> {
|
|
def "" : VALUVs2<funct6, vs1, OPFVV, opcodestr>;
|
|
}
|
|
|
|
multiclass VAMO<RISCVAMOOP amoop, RISCVWidth width, string opcodestr> {
|
|
def _WD : VAMOWd<amoop, width, opcodestr>;
|
|
def _UNWD : VAMONoWd<amoop, width, opcodestr>;
|
|
}
|
|
|
|
multiclass VWholeLoad<bits<3> nf, string opcodestr, RegisterClass VRC> {
|
|
def E8_V : VWholeLoad<nf, LSWidth8, opcodestr # "e8.v", VRC>;
|
|
def E16_V : VWholeLoad<nf, LSWidth16, opcodestr # "e16.v", VRC>;
|
|
def E32_V : VWholeLoad<nf, LSWidth32, opcodestr # "e32.v", VRC>;
|
|
def E64_V : VWholeLoad<nf, LSWidth64, opcodestr # "e64.v", VRC>;
|
|
}
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
// Instructions
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
let Predicates = [HasStdExtV] in {
|
|
let hasSideEffects = 1, mayLoad = 0, mayStore = 0 in {
|
|
def VSETVLI : RVInstSetVLi<(outs GPR:$rd), (ins GPR:$rs1, VTypeIOp:$vtypei),
|
|
"vsetvli", "$rd, $rs1, $vtypei">;
|
|
|
|
def VSETIVLI : RVInstSetiVLi<(outs GPR:$rd), (ins uimm5:$uimm, VTypeIOp:$vtypei),
|
|
"vsetivli", "$rd, $uimm, $vtypei">;
|
|
|
|
def VSETVL : RVInstSetVL<(outs GPR:$rd), (ins GPR:$rs1, GPR:$rs2),
|
|
"vsetvl", "$rd, $rs1, $rs2">;
|
|
} // hasSideEffects = 1, mayLoad = 0, mayStore = 0
|
|
|
|
// Vector Unit-Stride Instructions
|
|
def VLE8_V : VUnitStrideLoad<LUMOPUnitStride, LSWidth8, "vle8.v">;
|
|
def VLE16_V : VUnitStrideLoad<LUMOPUnitStride, LSWidth16, "vle16.v">;
|
|
def VLE32_V : VUnitStrideLoad<LUMOPUnitStride, LSWidth32, "vle32.v">;
|
|
def VLE64_V : VUnitStrideLoad<LUMOPUnitStride, LSWidth64, "vle64.v">;
|
|
|
|
def VLE8FF_V : VUnitStrideLoad<LUMOPUnitStrideFF, LSWidth8, "vle8ff.v">;
|
|
def VLE16FF_V : VUnitStrideLoad<LUMOPUnitStrideFF, LSWidth16, "vle16ff.v">;
|
|
def VLE32FF_V : VUnitStrideLoad<LUMOPUnitStrideFF, LSWidth32, "vle32ff.v">;
|
|
def VLE64FF_V : VUnitStrideLoad<LUMOPUnitStrideFF, LSWidth64, "vle64ff.v">;
|
|
|
|
def VLE1_V : VUnitStrideLoadMask<"vle1.v">;
|
|
def VSE1_V : VUnitStrideStoreMask<"vse1.v">;
|
|
|
|
def VSE8_V : VUnitStrideStore<SUMOPUnitStride, LSWidth8, "vse8.v">;
|
|
def VSE16_V : VUnitStrideStore<SUMOPUnitStride, LSWidth16, "vse16.v">;
|
|
def VSE32_V : VUnitStrideStore<SUMOPUnitStride, LSWidth32, "vse32.v">;
|
|
def VSE64_V : VUnitStrideStore<SUMOPUnitStride, LSWidth64, "vse64.v">;
|
|
|
|
// Vector Strided Instructions
|
|
def VLSE8_V : VStridedLoad<LSWidth8, "vlse8.v">;
|
|
def VLSE16_V : VStridedLoad<LSWidth16, "vlse16.v">;
|
|
def VLSE32_V : VStridedLoad<LSWidth32, "vlse32.v">;
|
|
def VLSE64_V : VStridedLoad<LSWidth64, "vlse64.v">;
|
|
|
|
def VSSE8_V : VStridedStore<LSWidth8, "vsse8.v">;
|
|
def VSSE16_V : VStridedStore<LSWidth16, "vsse16.v">;
|
|
def VSSE32_V : VStridedStore<LSWidth32, "vsse32.v">;
|
|
def VSSE64_V : VStridedStore<LSWidth64, "vsse64.v">;
|
|
|
|
// Vector Indexed Instructions
|
|
def VLUXEI8_V : VIndexedLoad<MOPLDIndexedUnord, LSWidth8, "vluxei8.v">;
|
|
def VLUXEI16_V : VIndexedLoad<MOPLDIndexedUnord, LSWidth16, "vluxei16.v">;
|
|
def VLUXEI32_V : VIndexedLoad<MOPLDIndexedUnord, LSWidth32, "vluxei32.v">;
|
|
def VLUXEI64_V : VIndexedLoad<MOPLDIndexedUnord, LSWidth64, "vluxei64.v">;
|
|
|
|
def VLOXEI8_V : VIndexedLoad<MOPLDIndexedOrder, LSWidth8, "vloxei8.v">;
|
|
def VLOXEI16_V : VIndexedLoad<MOPLDIndexedOrder, LSWidth16, "vloxei16.v">;
|
|
def VLOXEI32_V : VIndexedLoad<MOPLDIndexedOrder, LSWidth32, "vloxei32.v">;
|
|
def VLOXEI64_V : VIndexedLoad<MOPLDIndexedOrder, LSWidth64, "vloxei64.v">;
|
|
|
|
def VSUXEI8_V : VIndexedStore<MOPSTIndexedUnord, LSWidth8, "vsuxei8.v">;
|
|
def VSUXEI16_V : VIndexedStore<MOPSTIndexedUnord, LSWidth16, "vsuxei16.v">;
|
|
def VSUXEI32_V : VIndexedStore<MOPSTIndexedUnord, LSWidth32, "vsuxei32.v">;
|
|
def VSUXEI64_V : VIndexedStore<MOPSTIndexedUnord, LSWidth64, "vsuxei64.v">;
|
|
|
|
def VSOXEI8_V : VIndexedStore<MOPSTIndexedOrder, LSWidth8, "vsoxei8.v">;
|
|
def VSOXEI16_V : VIndexedStore<MOPSTIndexedOrder, LSWidth16, "vsoxei16.v">;
|
|
def VSOXEI32_V : VIndexedStore<MOPSTIndexedOrder, LSWidth32, "vsoxei32.v">;
|
|
def VSOXEI64_V : VIndexedStore<MOPSTIndexedOrder, LSWidth64, "vsoxei64.v">;
|
|
|
|
defm VL1R : VWholeLoad<0, "vl1r", VR>;
|
|
defm VL2R : VWholeLoad<1, "vl2r", VRM2>;
|
|
defm VL4R : VWholeLoad<3, "vl4r", VRM4>;
|
|
defm VL8R : VWholeLoad<7, "vl8r", VRM8>;
|
|
def : InstAlias<"vl1r.v $vd, (${rs1})", (VL1RE8_V VR:$vd, GPR:$rs1)>;
|
|
def : InstAlias<"vl2r.v $vd, (${rs1})", (VL2RE8_V VRM2:$vd, GPR:$rs1)>;
|
|
def : InstAlias<"vl4r.v $vd, (${rs1})", (VL4RE8_V VRM4:$vd, GPR:$rs1)>;
|
|
def : InstAlias<"vl8r.v $vd, (${rs1})", (VL8RE8_V VRM8:$vd, GPR:$rs1)>;
|
|
|
|
def VS1R_V : VWholeStore<0, "vs1r.v", VR>;
|
|
def VS2R_V : VWholeStore<1, "vs2r.v", VRM2>;
|
|
def VS4R_V : VWholeStore<3, "vs4r.v", VRM4>;
|
|
def VS8R_V : VWholeStore<7, "vs8r.v", VRM8>;
|
|
|
|
// Vector Single-Width Integer Add and Subtract
|
|
defm VADD_V : VALU_IV_V_X_I<"vadd", 0b000000>;
|
|
defm VSUB_V : VALU_IV_V_X<"vsub", 0b000010>;
|
|
defm VRSUB_V : VALU_IV_X_I<"vrsub", 0b000011>;
|
|
|
|
def : InstAlias<"vneg.v $vd, $vs$vm", (VRSUB_VX VR:$vd, VR:$vs, X0, VMaskOp:$vm)>;
|
|
|
|
// Vector Widening Integer Add/Subtract
|
|
// Refer to 11.2 Widening Vector Arithmetic Instructions
|
|
// The destination vector register group cannot overlap a source vector
|
|
// register group of a different element width (including the mask register
|
|
// if masked), otherwise an illegal instruction exception is raised.
|
|
let Constraints = "@earlyclobber $vd" in {
|
|
let RVVConstraint = WidenV in {
|
|
defm VWADDU_V : VALU_MV_V_X<"vwaddu", 0b110000>;
|
|
defm VWSUBU_V : VALU_MV_V_X<"vwsubu", 0b110010>;
|
|
defm VWADD_V : VALU_MV_V_X<"vwadd", 0b110001>;
|
|
defm VWSUB_V : VALU_MV_V_X<"vwsub", 0b110011>;
|
|
} // RVVConstraint = WidenV
|
|
// Set earlyclobber for following instructions for second and mask operands.
|
|
// This has the downside that the earlyclobber constraint is too coarse and
|
|
// will impose unnecessary restrictions by not allowing the destination to
|
|
// overlap with the first (wide) operand.
|
|
let RVVConstraint = WidenW in {
|
|
defm VWADDU_W : VALU_MV_V_X<"vwaddu", 0b110100, "w">;
|
|
defm VWSUBU_W : VALU_MV_V_X<"vwsubu", 0b110110, "w">;
|
|
defm VWADD_W : VALU_MV_V_X<"vwadd", 0b110101, "w">;
|
|
defm VWSUB_W : VALU_MV_V_X<"vwsub", 0b110111, "w">;
|
|
} // RVVConstraint = WidenW
|
|
} // Constraints = "@earlyclobber $vd"
|
|
|
|
def : InstAlias<"vwcvt.x.x.v $vd, $vs$vm",
|
|
(VWADD_VX VR:$vd, VR:$vs, X0, VMaskOp:$vm)>;
|
|
def : InstAlias<"vwcvtu.x.x.v $vd, $vs$vm",
|
|
(VWADDU_VX VR:$vd, VR:$vs, X0, VMaskOp:$vm)>;
|
|
|
|
// Vector Integer Extension
|
|
defm VZEXT_VF8 : VALU_MV_VS2<"vzext.vf8", 0b010010, 0b00010>;
|
|
defm VSEXT_VF8 : VALU_MV_VS2<"vsext.vf8", 0b010010, 0b00011>;
|
|
defm VZEXT_VF4 : VALU_MV_VS2<"vzext.vf4", 0b010010, 0b00100>;
|
|
defm VSEXT_VF4 : VALU_MV_VS2<"vsext.vf4", 0b010010, 0b00101>;
|
|
defm VZEXT_VF2 : VALU_MV_VS2<"vzext.vf2", 0b010010, 0b00110>;
|
|
defm VSEXT_VF2 : VALU_MV_VS2<"vsext.vf2", 0b010010, 0b00111>;
|
|
|
|
// Vector Integer Add-with-Carry / Subtract-with-Borrow Instructions
|
|
defm VADC_V : VALUm_IV_V_X_I<"vadc", 0b010000>;
|
|
let Constraints = "@earlyclobber $vd", RVVConstraint = NoConstraint in {
|
|
defm VMADC_V : VALUm_IV_V_X_I<"vmadc", 0b010001>;
|
|
defm VMADC_V : VALUNoVm_IV_V_X_I<"vmadc", 0b010001>;
|
|
} // Constraints = "@earlyclobber $vd", RVVConstraint = NoConstraint
|
|
defm VSBC_V : VALUm_IV_V_X<"vsbc", 0b010010>;
|
|
let Constraints = "@earlyclobber $vd", RVVConstraint = NoConstraint in {
|
|
defm VMSBC_V : VALUm_IV_V_X<"vmsbc", 0b010011>;
|
|
defm VMSBC_V : VALUNoVm_IV_V_X<"vmsbc", 0b010011>;
|
|
} // Constraints = "@earlyclobber $vd", RVVConstraint = NoConstraint
|
|
|
|
// Vector Bitwise Logical Instructions
|
|
defm VAND_V : VALU_IV_V_X_I<"vand", 0b001001>;
|
|
defm VOR_V : VALU_IV_V_X_I<"vor", 0b001010>;
|
|
defm VXOR_V : VALU_IV_V_X_I<"vxor", 0b001011>;
|
|
|
|
def : InstAlias<"vnot.v $vd, $vs$vm",
|
|
(VXOR_VI VR:$vd, VR:$vs, -1, VMaskOp:$vm)>;
|
|
|
|
// Vector Single-Width Bit Shift Instructions
|
|
defm VSLL_V : VALU_IV_V_X_I<"vsll", 0b100101, uimm5>;
|
|
defm VSRL_V : VALU_IV_V_X_I<"vsrl", 0b101000, uimm5>;
|
|
defm VSRA_V : VALU_IV_V_X_I<"vsra", 0b101001, uimm5>;
|
|
|
|
// Vector Narrowing Integer Right Shift Instructions
|
|
// Refer to 11.3. Narrowing Vector Arithmetic Instructions
|
|
// The destination vector register group cannot overlap the first source
|
|
// vector register group (specified by vs2). The destination vector register
|
|
// group cannot overlap the mask register if used, unless LMUL=1.
|
|
let Constraints = "@earlyclobber $vd" in {
|
|
defm VNSRL_W : VALU_IV_V_X_I<"vnsrl", 0b101100, uimm5, "w">;
|
|
defm VNSRA_W : VALU_IV_V_X_I<"vnsra", 0b101101, uimm5, "w">;
|
|
} // Constraints = "@earlyclobber $vd"
|
|
|
|
def : InstAlias<"vncvt.x.x.w $vd, $vs$vm",
|
|
(VNSRL_WX VR:$vd, VR:$vs, X0, VMaskOp:$vm)>;
|
|
|
|
// Vector Integer Comparison Instructions
|
|
let RVVConstraint = NoConstraint in {
|
|
defm VMSEQ_V : VALU_IV_V_X_I<"vmseq", 0b011000>;
|
|
defm VMSNE_V : VALU_IV_V_X_I<"vmsne", 0b011001>;
|
|
defm VMSLTU_V : VALU_IV_V_X<"vmsltu", 0b011010>;
|
|
defm VMSLT_V : VALU_IV_V_X<"vmslt", 0b011011>;
|
|
defm VMSLEU_V : VALU_IV_V_X_I<"vmsleu", 0b011100>;
|
|
defm VMSLE_V : VALU_IV_V_X_I<"vmsle", 0b011101>;
|
|
defm VMSGTU_V : VALU_IV_X_I<"vmsgtu", 0b011110>;
|
|
defm VMSGT_V : VALU_IV_X_I<"vmsgt", 0b011111>;
|
|
} // RVVConstraint = NoConstraint
|
|
|
|
def : InstAlias<"vmsgtu.vv $vd, $va, $vb$vm",
|
|
(VMSLTU_VV VR:$vd, VR:$vb, VR:$va, VMaskOp:$vm), 0>;
|
|
def : InstAlias<"vmsgt.vv $vd, $va, $vb$vm",
|
|
(VMSLT_VV VR:$vd, VR:$vb, VR:$va, VMaskOp:$vm), 0>;
|
|
def : InstAlias<"vmsgeu.vv $vd, $va, $vb$vm",
|
|
(VMSLEU_VV VR:$vd, VR:$vb, VR:$va, VMaskOp:$vm), 0>;
|
|
def : InstAlias<"vmsge.vv $vd, $va, $vb$vm",
|
|
(VMSLE_VV VR:$vd, VR:$vb, VR:$va, VMaskOp:$vm), 0>;
|
|
|
|
let isCodeGenOnly = 0, isAsmParserOnly = 1, hasSideEffects = 0, mayLoad = 0,
|
|
mayStore = 0 in {
|
|
// For unsigned comparisons we need to special case 0 immediate to maintain
|
|
// the always true/false semantics we would invert if we just decremented the
|
|
// immediate like we do for signed. To match the GNU assembler we will use
|
|
// vmseq/vmsne.vv with the same register for both operands which we can't do
|
|
// from an InstAlias.
|
|
def PseudoVMSGEU_VI : Pseudo<(outs VR:$vd),
|
|
(ins VR:$vs2, simm5_plus1:$imm, VMaskOp:$vm),
|
|
[], "vmsgeu.vi", "$vd, $vs2, $imm$vm">;
|
|
def PseudoVMSLTU_VI : Pseudo<(outs VR:$vd),
|
|
(ins VR:$vs2, simm5_plus1:$imm, VMaskOp:$vm),
|
|
[], "vmsltu.vi", "$vd, $vs2, $imm$vm">;
|
|
// Handle signed with pseudos as well for more consistency in the
|
|
// implementation.
|
|
def PseudoVMSGE_VI : Pseudo<(outs VR:$vd),
|
|
(ins VR:$vs2, simm5_plus1:$imm, VMaskOp:$vm),
|
|
[], "vmsge.vi", "$vd, $vs2, $imm$vm">;
|
|
def PseudoVMSLT_VI : Pseudo<(outs VR:$vd),
|
|
(ins VR:$vs2, simm5_plus1:$imm, VMaskOp:$vm),
|
|
[], "vmslt.vi", "$vd, $vs2, $imm$vm">;
|
|
}
|
|
|
|
let isCodeGenOnly = 0, isAsmParserOnly = 1, hasSideEffects = 0, mayLoad = 0,
|
|
mayStore = 0 in {
|
|
def PseudoVMSGEU_VX : Pseudo<(outs VR:$vd),
|
|
(ins VR:$vs2, GPR:$rs1),
|
|
[], "vmsgeu.vx", "$vd, $vs2, $rs1">;
|
|
def PseudoVMSGE_VX : Pseudo<(outs VR:$vd),
|
|
(ins VR:$vs2, GPR:$rs1),
|
|
[], "vmsge.vx", "$vd, $vs2, $rs1">;
|
|
def PseudoVMSGEU_VX_M : Pseudo<(outs VRNoV0:$vd),
|
|
(ins VR:$vs2, GPR:$rs1, VMaskOp:$vm),
|
|
[], "vmsgeu.vx", "$vd, $vs2, $rs1$vm">;
|
|
def PseudoVMSGE_VX_M : Pseudo<(outs VRNoV0:$vd),
|
|
(ins VR:$vs2, GPR:$rs1, VMaskOp:$vm),
|
|
[], "vmsge.vx", "$vd, $vs2, $rs1$vm">;
|
|
-def PseudoVMSGEU_VX_M_T : Pseudo<(outs VMV0:$vd, VR:$scratch),
|
|
+def PseudoVMSGEU_VX_M_T : Pseudo<(outs VR:$vd, VR:$scratch),
|
|
(ins VR:$vs2, GPR:$rs1, VMaskOp:$vm),
|
|
[], "vmsgeu.vx", "$vd, $vs2, $rs1$vm, $scratch">;
|
|
-def PseudoVMSGE_VX_M_T : Pseudo<(outs VMV0:$vd, VR:$scratch),
|
|
+def PseudoVMSGE_VX_M_T : Pseudo<(outs VR:$vd, VR:$scratch),
|
|
(ins VR:$vs2, GPR:$rs1, VMaskOp:$vm),
|
|
[], "vmsge.vx", "$vd, $vs2, $rs1$vm, $scratch">;
|
|
}
|
|
|
|
// Vector Integer Min/Max Instructions
|
|
defm VMINU_V : VALU_IV_V_X<"vminu", 0b000100>;
|
|
defm VMIN_V : VALU_IV_V_X<"vmin", 0b000101>;
|
|
defm VMAXU_V : VALU_IV_V_X<"vmaxu", 0b000110>;
|
|
defm VMAX_V : VALU_IV_V_X<"vmax", 0b000111>;
|
|
|
|
// Vector Single-Width Integer Multiply Instructions
|
|
defm VMUL_V : VALU_MV_V_X<"vmul", 0b100101>;
|
|
defm VMULH_V : VALU_MV_V_X<"vmulh", 0b100111>;
|
|
defm VMULHU_V : VALU_MV_V_X<"vmulhu", 0b100100>;
|
|
defm VMULHSU_V : VALU_MV_V_X<"vmulhsu", 0b100110>;
|
|
|
|
// Vector Integer Divide Instructions
|
|
defm VDIVU_V : VALU_MV_V_X<"vdivu", 0b100000>;
|
|
defm VDIV_V : VALU_MV_V_X<"vdiv", 0b100001>;
|
|
defm VREMU_V : VALU_MV_V_X<"vremu", 0b100010>;
|
|
defm VREM_V : VALU_MV_V_X<"vrem", 0b100011>;
|
|
|
|
// Vector Widening Integer Multiply Instructions
|
|
let Constraints = "@earlyclobber $vd", RVVConstraint = WidenV in {
|
|
defm VWMUL_V : VALU_MV_V_X<"vwmul", 0b111011>;
|
|
defm VWMULU_V : VALU_MV_V_X<"vwmulu", 0b111000>;
|
|
defm VWMULSU_V : VALU_MV_V_X<"vwmulsu", 0b111010>;
|
|
} // Constraints = "@earlyclobber $vd", RVVConstraint = WidenV
|
|
|
|
// Vector Single-Width Integer Multiply-Add Instructions
|
|
defm VMACC_V : VALUr_MV_V_X<"vmacc", 0b101101>;
|
|
defm VNMSAC_V : VALUr_MV_V_X<"vnmsac", 0b101111>;
|
|
defm VMADD_V : VALUr_MV_V_X<"vmadd", 0b101001>;
|
|
defm VNMSUB_V : VALUr_MV_V_X<"vnmsub", 0b101011>;
|
|
|
|
// Vector Widening Integer Multiply-Add Instructions
|
|
let Constraints = "@earlyclobber $vd", RVVConstraint = WidenV in {
|
|
defm VWMACCU_V : VALUr_MV_V_X<"vwmaccu", 0b111100>;
|
|
defm VWMACC_V : VALUr_MV_V_X<"vwmacc", 0b111101>;
|
|
defm VWMACCSU_V : VALUr_MV_V_X<"vwmaccsu", 0b111111>;
|
|
defm VWMACCUS_V : VALUr_MV_X<"vwmaccus", 0b111110>;
|
|
} // Constraints = "@earlyclobber $vd", RVVConstraint = WidenV
|
|
|
|
// Vector Integer Merge Instructions
|
|
defm VMERGE_V : VALUm_IV_V_X_I<"vmerge", 0b010111>;
|
|
|
|
// Vector Integer Move Instructions
|
|
let hasSideEffects = 0, mayLoad = 0, mayStore = 0, vs2 = 0, vm = 1,
|
|
RVVConstraint = NoConstraint in {
|
|
// op vd, vs1
|
|
def VMV_V_V : RVInstVV<0b010111, OPIVV, (outs VR:$vd),
|
|
(ins VR:$vs1), "vmv.v.v", "$vd, $vs1">;
|
|
// op vd, rs1
|
|
def VMV_V_X : RVInstVX<0b010111, OPIVX, (outs VR:$vd),
|
|
(ins GPR:$rs1), "vmv.v.x", "$vd, $rs1">;
|
|
// op vd, imm
|
|
def VMV_V_I : RVInstIVI<0b010111, (outs VR:$vd),
|
|
(ins simm5:$imm), "vmv.v.i", "$vd, $imm">;
|
|
} // hasSideEffects = 0, mayLoad = 0, mayStore = 0
|
|
|
|
// Vector Fixed-Point Arithmetic Instructions
|
|
defm VSADDU_V : VALU_IV_V_X_I<"vsaddu", 0b100000>;
|
|
defm VSADD_V : VALU_IV_V_X_I<"vsadd", 0b100001>;
|
|
defm VSSUBU_V : VALU_IV_V_X<"vssubu", 0b100010>;
|
|
defm VSSUB_V : VALU_IV_V_X<"vssub", 0b100011>;
|
|
|
|
// Vector Single-Width Averaging Add and Subtract
|
|
defm VAADDU_V : VALU_MV_V_X<"vaaddu", 0b001000>;
|
|
defm VAADD_V : VALU_MV_V_X<"vaadd", 0b001001>;
|
|
defm VASUBU_V : VALU_MV_V_X<"vasubu", 0b001010>;
|
|
defm VASUB_V : VALU_MV_V_X<"vasub", 0b001011>;
|
|
|
|
// Vector Single-Width Fractional Multiply with Rounding and Saturation
|
|
defm VSMUL_V : VALU_IV_V_X<"vsmul", 0b100111>;
|
|
|
|
// Vector Single-Width Scaling Shift Instructions
|
|
defm VSSRL_V : VALU_IV_V_X_I<"vssrl", 0b101010, uimm5>;
|
|
defm VSSRA_V : VALU_IV_V_X_I<"vssra", 0b101011, uimm5>;
|
|
|
|
// Vector Narrowing Fixed-Point Clip Instructions
|
|
let Constraints = "@earlyclobber $vd" in {
|
|
defm VNCLIPU_W : VALU_IV_V_X_I<"vnclipu", 0b101110, uimm5, "w">;
|
|
defm VNCLIP_W : VALU_IV_V_X_I<"vnclip", 0b101111, uimm5, "w">;
|
|
} // Constraints = "@earlyclobber $vd"
|
|
} // Predicates = [HasStdExtV]
|
|
|
|
let Predicates = [HasStdExtV, HasStdExtF] in {
|
|
// Vector Single-Width Floating-Point Add/Subtract Instructions
|
|
defm VFADD_V : VALU_FV_V_F<"vfadd", 0b000000>;
|
|
defm VFSUB_V : VALU_FV_V_F<"vfsub", 0b000010>;
|
|
defm VFRSUB_V : VALU_FV_F<"vfrsub", 0b100111>;
|
|
|
|
// Vector Widening Floating-Point Add/Subtract Instructions
|
|
let Constraints = "@earlyclobber $vd" in {
|
|
let RVVConstraint = WidenV in {
|
|
defm VFWADD_V : VALU_FV_V_F<"vfwadd", 0b110000>;
|
|
defm VFWSUB_V : VALU_FV_V_F<"vfwsub", 0b110010>;
|
|
} // RVVConstraint = WidenV
|
|
// Set earlyclobber for following instructions for second and mask operands.
|
|
// This has the downside that the earlyclobber constraint is too coarse and
|
|
// will impose unnecessary restrictions by not allowing the destination to
|
|
// overlap with the first (wide) operand.
|
|
let RVVConstraint = WidenW in {
|
|
defm VFWADD_W : VALU_FV_V_F<"vfwadd", 0b110100, "w">;
|
|
defm VFWSUB_W : VALU_FV_V_F<"vfwsub", 0b110110, "w">;
|
|
} // RVVConstraint = WidenW
|
|
} // Constraints = "@earlyclobber $vd"
|
|
|
|
// Vector Single-Width Floating-Point Multiply/Divide Instructions
|
|
defm VFMUL_V : VALU_FV_V_F<"vfmul", 0b100100>;
|
|
defm VFDIV_V : VALU_FV_V_F<"vfdiv", 0b100000>;
|
|
defm VFRDIV_V : VALU_FV_F<"vfrdiv", 0b100001>;
|
|
|
|
// Vector Widening Floating-Point Multiply
|
|
let Constraints = "@earlyclobber $vd", RVVConstraint = WidenV in {
|
|
defm VFWMUL_V : VALU_FV_V_F<"vfwmul", 0b111000>;
|
|
} // Constraints = "@earlyclobber $vd", RVVConstraint = WidenV
|
|
|
|
// Vector Single-Width Floating-Point Fused Multiply-Add Instructions
|
|
defm VFMACC_V : VALUr_FV_V_F<"vfmacc", 0b101100>;
|
|
defm VFNMACC_V : VALUr_FV_V_F<"vfnmacc", 0b101101>;
|
|
defm VFMSAC_V : VALUr_FV_V_F<"vfmsac", 0b101110>;
|
|
defm VFNMSAC_V : VALUr_FV_V_F<"vfnmsac", 0b101111>;
|
|
defm VFMADD_V : VALUr_FV_V_F<"vfmadd", 0b101000>;
|
|
defm VFNMADD_V : VALUr_FV_V_F<"vfnmadd", 0b101001>;
|
|
defm VFMSUB_V : VALUr_FV_V_F<"vfmsub", 0b101010>;
|
|
defm VFNMSUB_V : VALUr_FV_V_F<"vfnmsub", 0b101011>;
|
|
|
|
// Vector Widening Floating-Point Fused Multiply-Add Instructions
|
|
let Constraints = "@earlyclobber $vd", RVVConstraint = WidenV in {
|
|
defm VFWMACC_V : VALUr_FV_V_F<"vfwmacc", 0b111100>;
|
|
defm VFWNMACC_V : VALUr_FV_V_F<"vfwnmacc", 0b111101>;
|
|
defm VFWMSAC_V : VALUr_FV_V_F<"vfwmsac", 0b111110>;
|
|
defm VFWNMSAC_V : VALUr_FV_V_F<"vfwnmsac", 0b111111>;
|
|
} // Constraints = "@earlyclobber $vd", RVVConstraint = WidenV
|
|
|
|
// Vector Floating-Point Square-Root Instruction
|
|
defm VFSQRT_V : VALU_FV_VS2<"vfsqrt.v", 0b010011, 0b00000>;
|
|
defm VFRSQRT7_V : VALU_FV_VS2<"vfrsqrt7.v", 0b010011, 0b00100>;
|
|
defm VFREC7_V : VALU_FV_VS2<"vfrec7.v", 0b010011, 0b00101>;
|
|
|
|
// Vector Floating-Point MIN/MAX Instructions
|
|
defm VFMIN_V : VALU_FV_V_F<"vfmin", 0b000100>;
|
|
defm VFMAX_V : VALU_FV_V_F<"vfmax", 0b000110>;
|
|
|
|
// Vector Floating-Point Sign-Injection Instructions
|
|
defm VFSGNJ_V : VALU_FV_V_F<"vfsgnj", 0b001000>;
|
|
defm VFSGNJN_V : VALU_FV_V_F<"vfsgnjn", 0b001001>;
|
|
defm VFSGNJX_V : VALU_FV_V_F<"vfsgnjx", 0b001010>;
|
|
|
|
def : InstAlias<"vfneg.v $vd, $vs$vm",
|
|
(VFSGNJN_VV VR:$vd, VR:$vs, VR:$vs, VMaskOp:$vm)>;
|
|
def : InstAlias<"vfabs.v $vd, $vs$vm",
|
|
(VFSGNJX_VV VR:$vd, VR:$vs, VR:$vs, VMaskOp:$vm)>;
|
|
|
|
// Vector Floating-Point Compare Instructions
|
|
let RVVConstraint = NoConstraint in {
|
|
defm VMFEQ_V : VALU_FV_V_F<"vmfeq", 0b011000>;
|
|
defm VMFNE_V : VALU_FV_V_F<"vmfne", 0b011100>;
|
|
defm VMFLT_V : VALU_FV_V_F<"vmflt", 0b011011>;
|
|
defm VMFLE_V : VALU_FV_V_F<"vmfle", 0b011001>;
|
|
defm VMFGT_V : VALU_FV_F<"vmfgt", 0b011101>;
|
|
defm VMFGE_V : VALU_FV_F<"vmfge", 0b011111>;
|
|
} // RVVConstraint = NoConstraint
|
|
|
|
def : InstAlias<"vmfgt.vv $vd, $va, $vb$vm",
|
|
(VMFLT_VV VR:$vd, VR:$vb, VR:$va, VMaskOp:$vm), 0>;
|
|
def : InstAlias<"vmfge.vv $vd, $va, $vb$vm",
|
|
(VMFLE_VV VR:$vd, VR:$vb, VR:$va, VMaskOp:$vm), 0>;
|
|
|
|
// Vector Floating-Point Classify Instruction
|
|
defm VFCLASS_V : VALU_FV_VS2<"vfclass.v", 0b010011, 0b10000>;
|
|
|
|
let hasSideEffects = 0, mayLoad = 0, mayStore = 0 in {
|
|
// Vector Floating-Point Merge Instruction
|
|
def VFMERGE_VFM : RVInstVX<0b010111, OPFVF, (outs VR:$vd),
|
|
(ins VR:$vs2, FPR32:$rs1, VMV0:$v0),
|
|
"vfmerge.vfm", "$vd, $vs2, $rs1, v0"> {
|
|
let vm = 0;
|
|
}
|
|
|
|
// Vector Floating-Point Move Instruction
|
|
let RVVConstraint = NoConstraint in
|
|
def VFMV_V_F : RVInstVX<0b010111, OPFVF, (outs VR:$vd),
|
|
(ins FPR32:$rs1), "vfmv.v.f", "$vd, $rs1"> {
|
|
let vs2 = 0;
|
|
let vm = 1;
|
|
}
|
|
} // hasSideEffects = 0, mayLoad = 0, mayStore = 0
|
|
|
|
// Single-Width Floating-Point/Integer Type-Convert Instructions
|
|
defm VFCVT_XU_F_V : VALU_FV_VS2<"vfcvt.xu.f.v", 0b010010, 0b00000>;
|
|
defm VFCVT_X_F_V : VALU_FV_VS2<"vfcvt.x.f.v", 0b010010, 0b00001>;
|
|
defm VFCVT_RTZ_XU_F_V : VALU_FV_VS2<"vfcvt.rtz.xu.f.v", 0b010010, 0b00110>;
|
|
defm VFCVT_RTZ_X_F_V : VALU_FV_VS2<"vfcvt.rtz.x.f.v", 0b010010, 0b00111>;
|
|
defm VFCVT_F_XU_V : VALU_FV_VS2<"vfcvt.f.xu.v", 0b010010, 0b00010>;
|
|
defm VFCVT_F_X_V : VALU_FV_VS2<"vfcvt.f.x.v", 0b010010, 0b00011>;
|
|
|
|
// Widening Floating-Point/Integer Type-Convert Instructions
|
|
let Constraints = "@earlyclobber $vd", RVVConstraint = WidenCvt in {
|
|
defm VFWCVT_XU_F_V : VALU_FV_VS2<"vfwcvt.xu.f.v", 0b010010, 0b01000>;
|
|
defm VFWCVT_X_F_V : VALU_FV_VS2<"vfwcvt.x.f.v", 0b010010, 0b01001>;
|
|
defm VFWCVT_RTZ_XU_F_V : VALU_FV_VS2<"vfwcvt.rtz.xu.f.v", 0b010010, 0b01110>;
|
|
defm VFWCVT_RTZ_X_F_V : VALU_FV_VS2<"vfwcvt.rtz.x.f.v", 0b010010, 0b01111>;
|
|
defm VFWCVT_F_XU_V : VALU_FV_VS2<"vfwcvt.f.xu.v", 0b010010, 0b01010>;
|
|
defm VFWCVT_F_X_V : VALU_FV_VS2<"vfwcvt.f.x.v", 0b010010, 0b01011>;
|
|
defm VFWCVT_F_F_V : VALU_FV_VS2<"vfwcvt.f.f.v", 0b010010, 0b01100>;
|
|
} // Constraints = "@earlyclobber $vd", RVVConstraint = WidenCvt
|
|
|
|
// Narrowing Floating-Point/Integer Type-Convert Instructions
|
|
let Constraints = "@earlyclobber $vd" in {
|
|
defm VFNCVT_XU_F_W : VALU_FV_VS2<"vfncvt.xu.f.w", 0b010010, 0b10000>;
|
|
defm VFNCVT_X_F_W : VALU_FV_VS2<"vfncvt.x.f.w", 0b010010, 0b10001>;
|
|
defm VFNCVT_RTZ_XU_F_W : VALU_FV_VS2<"vfncvt.rtz.xu.f.w", 0b010010, 0b10110>;
|
|
defm VFNCVT_RTZ_X_F_W : VALU_FV_VS2<"vfncvt.rtz.x.f.w", 0b010010, 0b10111>;
|
|
defm VFNCVT_F_XU_W : VALU_FV_VS2<"vfncvt.f.xu.w", 0b010010, 0b10010>;
|
|
defm VFNCVT_F_X_W : VALU_FV_VS2<"vfncvt.f.x.w", 0b010010, 0b10011>;
|
|
defm VFNCVT_F_F_W : VALU_FV_VS2<"vfncvt.f.f.w", 0b010010, 0b10100>;
|
|
defm VFNCVT_ROD_F_F_W : VALU_FV_VS2<"vfncvt.rod.f.f.w", 0b010010, 0b10101>;
|
|
} // Constraints = "@earlyclobber $vd"
|
|
} // Predicates = [HasStdExtV, HasStdExtF]
|
|
|
|
let Predicates = [HasStdExtV] in {
|
|
// Vector Single-Width Integer Reduction Instructions
|
|
let RVVConstraint = NoConstraint in {
|
|
defm VREDSUM : VALU_MV_V<"vredsum", 0b000000>;
|
|
defm VREDMAXU : VALU_MV_V<"vredmaxu", 0b000110>;
|
|
defm VREDMAX : VALU_MV_V<"vredmax", 0b000111>;
|
|
defm VREDMINU : VALU_MV_V<"vredminu", 0b000100>;
|
|
defm VREDMIN : VALU_MV_V<"vredmin", 0b000101>;
|
|
defm VREDAND : VALU_MV_V<"vredand", 0b000001>;
|
|
defm VREDOR : VALU_MV_V<"vredor", 0b000010>;
|
|
defm VREDXOR : VALU_MV_V<"vredxor", 0b000011>;
|
|
} // RVVConstraint = NoConstraint
|
|
|
|
// Vector Widening Integer Reduction Instructions
|
|
let Constraints = "@earlyclobber $vd", RVVConstraint = NoConstraint in {
|
|
// Set earlyclobber for following instructions for second and mask operands.
|
|
// This has the downside that the earlyclobber constraint is too coarse and
|
|
// will impose unnecessary restrictions by not allowing the destination to
|
|
// overlap with the first (wide) operand.
|
|
defm VWREDSUMU : VALU_IV_V<"vwredsumu", 0b110000>;
|
|
defm VWREDSUM : VALU_IV_V<"vwredsum", 0b110001>;
|
|
} // Constraints = "@earlyclobber $vd", RVVConstraint = NoConstraint
|
|
} // Predicates = [HasStdExtV]
|
|
|
|
let Predicates = [HasStdExtV, HasStdExtF] in {
|
|
// Vector Single-Width Floating-Point Reduction Instructions
|
|
let RVVConstraint = NoConstraint in {
|
|
defm VFREDOSUM : VALU_FV_V<"vfredosum", 0b000011>;
|
|
defm VFREDSUM : VALU_FV_V<"vfredsum", 0b000001>;
|
|
defm VFREDMAX : VALU_FV_V<"vfredmax", 0b000111>;
|
|
defm VFREDMIN : VALU_FV_V<"vfredmin", 0b000101>;
|
|
} // RVVConstraint = NoConstraint
|
|
|
|
// Vector Widening Floating-Point Reduction Instructions
|
|
let Constraints = "@earlyclobber $vd", RVVConstraint = NoConstraint in {
|
|
// Set earlyclobber for following instructions for second and mask operands.
|
|
// This has the downside that the earlyclobber constraint is too coarse and
|
|
// will impose unnecessary restrictions by not allowing the destination to
|
|
// overlap with the first (wide) operand.
|
|
defm VFWREDOSUM : VALU_FV_V<"vfwredosum", 0b110011>;
|
|
defm VFWREDSUM : VALU_FV_V<"vfwredsum", 0b110001>;
|
|
} // Constraints = "@earlyclobber $vd", RVVConstraint = NoConstraint
|
|
} // Predicates = [HasStdExtV, HasStdExtF]
|
|
|
|
let Predicates = [HasStdExtV] in {
|
|
// Vector Mask-Register Logical Instructions
|
|
let RVVConstraint = NoConstraint in {
|
|
defm VMAND_M : VALU_MV_Mask<"vmand", 0b011001, "m">;
|
|
defm VMNAND_M : VALU_MV_Mask<"vmnand", 0b011101, "m">;
|
|
defm VMANDNOT_M : VALU_MV_Mask<"vmandnot", 0b011000, "m">;
|
|
defm VMXOR_M : VALU_MV_Mask<"vmxor", 0b011011, "m">;
|
|
defm VMOR_M : VALU_MV_Mask<"vmor", 0b011010, "m">;
|
|
defm VMNOR_M : VALU_MV_Mask<"vmnor", 0b011110, "m">;
|
|
defm VMORNOT_M : VALU_MV_Mask<"vmornot", 0b011100, "m">;
|
|
defm VMXNOR_M : VALU_MV_Mask<"vmxnor", 0b011111, "m">;
|
|
}
|
|
|
|
def : InstAlias<"vmmv.m $vd, $vs",
|
|
(VMAND_MM VR:$vd, VR:$vs, VR:$vs)>;
|
|
def : InstAlias<"vmclr.m $vd",
|
|
(VMXOR_MM VR:$vd, VR:$vd, VR:$vd)>;
|
|
def : InstAlias<"vmset.m $vd",
|
|
(VMXNOR_MM VR:$vd, VR:$vd, VR:$vd)>;
|
|
def : InstAlias<"vmnot.m $vd, $vs",
|
|
(VMNAND_MM VR:$vd, VR:$vs, VR:$vs)>;
|
|
|
|
let hasSideEffects = 0, mayLoad = 0, mayStore = 0,
|
|
RVVConstraint = NoConstraint in {
|
|
// Vector mask population count vpopc
|
|
def VPOPC_M : RVInstV<0b010000, 0b10000, OPMVV, (outs GPR:$vd),
|
|
(ins VR:$vs2, VMaskOp:$vm),
|
|
"vpopc.m", "$vd, $vs2$vm">;
|
|
|
|
// vfirst find-first-set mask bit
|
|
def VFIRST_M : RVInstV<0b010000, 0b10001, OPMVV, (outs GPR:$vd),
|
|
(ins VR:$vs2, VMaskOp:$vm),
|
|
"vfirst.m", "$vd, $vs2$vm">;
|
|
} // hasSideEffects = 0, mayLoad = 0, mayStore = 0
|
|
|
|
let Constraints = "@earlyclobber $vd", RVVConstraint = Iota in {
|
|
// vmsbf.m set-before-first mask bit
|
|
defm VMSBF_M : VALU_MV_VS2<"vmsbf.m", 0b010100, 0b00001>;
|
|
// vmsif.m set-including-first mask bit
|
|
defm VMSIF_M : VALU_MV_VS2<"vmsif.m", 0b010100, 0b00011>;
|
|
// vmsof.m set-only-first mask bit
|
|
defm VMSOF_M : VALU_MV_VS2<"vmsof.m", 0b010100, 0b00010>;
|
|
// Vector Iota Instruction
|
|
defm VIOTA_M : VALU_MV_VS2<"viota.m", 0b010100, 0b10000>;
|
|
} // Constraints = "@earlyclobber $vd", RVVConstraint = Iota
|
|
|
|
// Vector Element Index Instruction
|
|
let hasSideEffects = 0, mayLoad = 0, mayStore = 0 in {
|
|
def VID_V : RVInstV<0b010100, 0b10001, OPMVV, (outs VR:$vd),
|
|
(ins VMaskOp:$vm), "vid.v", "$vd$vm"> {
|
|
let vs2 = 0;
|
|
}
|
|
|
|
// Integer Scalar Move Instructions
|
|
let vm = 1, RVVConstraint = NoConstraint in {
|
|
def VMV_X_S : RVInstV<0b010000, 0b00000, OPMVV, (outs GPR:$vd),
|
|
(ins VR:$vs2), "vmv.x.s", "$vd, $vs2">;
|
|
let Constraints = "$vd = $vd_wb" in
|
|
def VMV_S_X : RVInstV2<0b010000, 0b00000, OPMVX, (outs VR:$vd_wb),
|
|
(ins VR:$vd, GPR:$rs1), "vmv.s.x", "$vd, $rs1">;
|
|
|
|
}
|
|
} // hasSideEffects = 0, mayLoad = 0, mayStore = 0
|
|
} // Predicates = [HasStdExtV]
|
|
|
|
let Predicates = [HasStdExtV, HasStdExtF] in {
|
|
let hasSideEffects = 0, mayLoad = 0, mayStore = 0, vm = 1,
|
|
RVVConstraint = NoConstraint in {
|
|
// Floating-Point Scalar Move Instructions
|
|
def VFMV_F_S : RVInstV<0b010000, 0b00000, OPFVV, (outs FPR32:$vd),
|
|
(ins VR:$vs2), "vfmv.f.s", "$vd, $vs2">;
|
|
let Constraints = "$vd = $vd_wb" in
|
|
def VFMV_S_F : RVInstV2<0b010000, 0b00000, OPFVF, (outs VR:$vd_wb),
|
|
(ins VR:$vd, FPR32:$rs1), "vfmv.s.f", "$vd, $rs1">;
|
|
|
|
} // hasSideEffects = 0, mayLoad = 0, mayStore = 0, vm = 1
|
|
} // Predicates = [HasStdExtV, HasStdExtF]
|
|
|
|
let Predicates = [HasStdExtV] in {
|
|
// Vector Slide Instructions
|
|
let Constraints = "@earlyclobber $vd", RVVConstraint = SlideUp in {
|
|
defm VSLIDEUP_V : VALU_IV_X_I<"vslideup", 0b001110, uimm5>;
|
|
defm VSLIDE1UP_V : VALU_MV_X<"vslide1up", 0b001110>;
|
|
} // Constraints = "@earlyclobber $vd", RVVConstraint = SlideUp
|
|
defm VSLIDEDOWN_V : VALU_IV_X_I<"vslidedown", 0b001111, uimm5>;
|
|
defm VSLIDE1DOWN_V : VALU_MV_X<"vslide1down", 0b001111>;
|
|
} // Predicates = [HasStdExtV]
|
|
|
|
let Predicates = [HasStdExtV, HasStdExtF] in {
|
|
let Constraints = "@earlyclobber $vd", RVVConstraint = SlideUp in {
|
|
defm VFSLIDE1UP_V : VALU_FV_F<"vfslide1up", 0b001110>;
|
|
} // Constraints = "@earlyclobber $vd", RVVConstraint = SlideUp
|
|
defm VFSLIDE1DOWN_V : VALU_FV_F<"vfslide1down", 0b001111>;
|
|
} // Predicates = [HasStdExtV, HasStdExtF]
|
|
|
|
let Predicates = [HasStdExtV] in {
|
|
// Vector Register Gather Instruction
|
|
let Constraints = "@earlyclobber $vd", RVVConstraint = Vrgather in {
|
|
defm VRGATHER_V : VALU_IV_V_X_I<"vrgather", 0b001100, uimm5>;
|
|
def VRGATHEREI16_VV : VALUVV<0b001110, OPIVV, "vrgatherei16.vv">;
|
|
} // Constraints = "@earlyclobber $vd", RVVConstraint = Vrgather
|
|
|
|
// Vector Compress Instruction
|
|
let Constraints = "@earlyclobber $vd", RVVConstraint = Vcompress in {
|
|
defm VCOMPRESS_V : VALU_MV_Mask<"vcompress", 0b010111>;
|
|
} // Constraints = "@earlyclobber $vd", RVVConstraint = Vcompress
|
|
|
|
let hasSideEffects = 0, mayLoad = 0, mayStore = 0,
|
|
RVVConstraint = NoConstraint in {
|
|
foreach nf = [1, 2, 4, 8] in {
|
|
def VMV#nf#R_V : RVInstV<0b100111, !add(nf, -1), OPIVI, (outs VR:$vd),
|
|
(ins VR:$vs2), "vmv" # nf # "r.v",
|
|
"$vd, $vs2"> {
|
|
let Uses = [];
|
|
let vm = 1;
|
|
}
|
|
}
|
|
} // hasSideEffects = 0, mayLoad = 0, mayStore = 0
|
|
} // Predicates = [HasStdExtV]
|
|
|
|
let Predicates = [HasStdExtZvlsseg] in {
|
|
foreach nf=2-8 in {
|
|
def VLSEG#nf#E8_V : VUnitStrideSegmentLoad<!add(nf, -1), LUMOPUnitStride, LSWidth8, "vlseg"#nf#"e8.v">;
|
|
def VLSEG#nf#E16_V : VUnitStrideSegmentLoad<!add(nf, -1), LUMOPUnitStride, LSWidth16, "vlseg"#nf#"e16.v">;
|
|
def VLSEG#nf#E32_V : VUnitStrideSegmentLoad<!add(nf, -1), LUMOPUnitStride, LSWidth32, "vlseg"#nf#"e32.v">;
|
|
def VLSEG#nf#E64_V : VUnitStrideSegmentLoad<!add(nf, -1), LUMOPUnitStride, LSWidth64, "vlseg"#nf#"e64.v">;
|
|
|
|
def VLSEG#nf#E8FF_V : VUnitStrideSegmentLoad<!add(nf, -1), LUMOPUnitStrideFF, LSWidth8, "vlseg"#nf#"e8ff.v">;
|
|
def VLSEG#nf#E16FF_V : VUnitStrideSegmentLoad<!add(nf, -1), LUMOPUnitStrideFF, LSWidth16, "vlseg"#nf#"e16ff.v">;
|
|
def VLSEG#nf#E32FF_V : VUnitStrideSegmentLoad<!add(nf, -1), LUMOPUnitStrideFF, LSWidth32, "vlseg"#nf#"e32ff.v">;
|
|
def VLSEG#nf#E64FF_V : VUnitStrideSegmentLoad<!add(nf, -1), LUMOPUnitStrideFF, LSWidth64, "vlseg"#nf#"e64ff.v">;
|
|
|
|
def VSSEG#nf#E8_V : VUnitStrideSegmentStore<!add(nf, -1), LSWidth8, "vsseg"#nf#"e8.v">;
|
|
def VSSEG#nf#E16_V : VUnitStrideSegmentStore<!add(nf, -1), LSWidth16, "vsseg"#nf#"e16.v">;
|
|
def VSSEG#nf#E32_V : VUnitStrideSegmentStore<!add(nf, -1), LSWidth32, "vsseg"#nf#"e32.v">;
|
|
def VSSEG#nf#E64_V : VUnitStrideSegmentStore<!add(nf, -1), LSWidth64, "vsseg"#nf#"e64.v">;
|
|
|
|
// Vector Strided Instructions
|
|
def VLSSEG#nf#E8_V : VStridedSegmentLoad<!add(nf, -1), LSWidth8, "vlsseg"#nf#"e8.v">;
|
|
def VLSSEG#nf#E16_V : VStridedSegmentLoad<!add(nf, -1), LSWidth16, "vlsseg"#nf#"e16.v">;
|
|
def VLSSEG#nf#E32_V : VStridedSegmentLoad<!add(nf, -1), LSWidth32, "vlsseg"#nf#"e32.v">;
|
|
def VLSSEG#nf#E64_V : VStridedSegmentLoad<!add(nf, -1), LSWidth64, "vlsseg"#nf#"e64.v">;
|
|
|
|
def VSSSEG#nf#E8_V : VStridedSegmentStore<!add(nf, -1), LSWidth8, "vssseg"#nf#"e8.v">;
|
|
def VSSSEG#nf#E16_V : VStridedSegmentStore<!add(nf, -1), LSWidth16, "vssseg"#nf#"e16.v">;
|
|
def VSSSEG#nf#E32_V : VStridedSegmentStore<!add(nf, -1), LSWidth32, "vssseg"#nf#"e32.v">;
|
|
def VSSSEG#nf#E64_V : VStridedSegmentStore<!add(nf, -1), LSWidth64, "vssseg"#nf#"e64.v">;
|
|
|
|
// Vector Indexed Instructions
|
|
def VLUXSEG#nf#EI8_V : VIndexedSegmentLoad<!add(nf, -1), MOPLDIndexedUnord,
|
|
LSWidth8, "vluxseg"#nf#"ei8.v">;
|
|
def VLUXSEG#nf#EI16_V : VIndexedSegmentLoad<!add(nf, -1), MOPLDIndexedUnord,
|
|
LSWidth16, "vluxseg"#nf#"ei16.v">;
|
|
def VLUXSEG#nf#EI32_V : VIndexedSegmentLoad<!add(nf, -1), MOPLDIndexedUnord,
|
|
LSWidth32, "vluxseg"#nf#"ei32.v">;
|
|
def VLUXSEG#nf#EI64_V : VIndexedSegmentLoad<!add(nf, -1), MOPLDIndexedUnord,
|
|
LSWidth64, "vluxseg"#nf#"ei64.v">;
|
|
|
|
def VLOXSEG#nf#EI8_V : VIndexedSegmentLoad<!add(nf, -1), MOPLDIndexedOrder,
|
|
LSWidth8, "vloxseg"#nf#"ei8.v">;
|
|
def VLOXSEG#nf#EI16_V : VIndexedSegmentLoad<!add(nf, -1), MOPLDIndexedOrder,
|
|
LSWidth16, "vloxseg"#nf#"ei16.v">;
|
|
def VLOXSEG#nf#EI32_V : VIndexedSegmentLoad<!add(nf, -1), MOPLDIndexedOrder,
|
|
LSWidth32, "vloxseg"#nf#"ei32.v">;
|
|
def VLOXSEG#nf#EI64_V : VIndexedSegmentLoad<!add(nf, -1), MOPLDIndexedOrder,
|
|
LSWidth64, "vloxseg"#nf#"ei64.v">;
|
|
|
|
def VSUXSEG#nf#EI8_V : VIndexedSegmentStore<!add(nf, -1), MOPSTIndexedUnord,
|
|
LSWidth8, "vsuxseg"#nf#"ei8.v">;
|
|
def VSUXSEG#nf#EI16_V : VIndexedSegmentStore<!add(nf, -1), MOPSTIndexedUnord,
|
|
LSWidth16, "vsuxseg"#nf#"ei16.v">;
|
|
def VSUXSEG#nf#EI32_V : VIndexedSegmentStore<!add(nf, -1), MOPSTIndexedUnord,
|
|
LSWidth32, "vsuxseg"#nf#"ei32.v">;
|
|
def VSUXSEG#nf#EI64_V : VIndexedSegmentStore<!add(nf, -1), MOPSTIndexedUnord,
|
|
LSWidth64, "vsuxseg"#nf#"ei64.v">;
|
|
|
|
def VSOXSEG#nf#EI8_V : VIndexedSegmentStore<!add(nf, -1), MOPSTIndexedOrder,
|
|
LSWidth8, "vsoxseg"#nf#"ei8.v">;
|
|
def VSOXSEG#nf#EI16_V : VIndexedSegmentStore<!add(nf, -1), MOPSTIndexedOrder,
|
|
LSWidth16, "vsoxseg"#nf#"ei16.v">;
|
|
def VSOXSEG#nf#EI32_V : VIndexedSegmentStore<!add(nf, -1), MOPSTIndexedOrder,
|
|
LSWidth32, "vsoxseg"#nf#"ei32.v">;
|
|
def VSOXSEG#nf#EI64_V : VIndexedSegmentStore<!add(nf, -1), MOPSTIndexedOrder,
|
|
LSWidth64, "vsoxseg"#nf#"ei64.v">;
|
|
}
|
|
} // Predicates = [HasStdExtZvlsseg]
|
|
|
|
let Predicates = [HasStdExtZvamo, HasStdExtA] in {
|
|
defm VAMOSWAPEI8 : VAMO<AMOOPVamoSwap, LSWidth8, "vamoswapei8.v">;
|
|
defm VAMOSWAPEI16 : VAMO<AMOOPVamoSwap, LSWidth16, "vamoswapei16.v">;
|
|
defm VAMOSWAPEI32 : VAMO<AMOOPVamoSwap, LSWidth32, "vamoswapei32.v">;
|
|
|
|
defm VAMOADDEI8 : VAMO<AMOOPVamoAdd, LSWidth8, "vamoaddei8.v">;
|
|
defm VAMOADDEI16 : VAMO<AMOOPVamoAdd, LSWidth16, "vamoaddei16.v">;
|
|
defm VAMOADDEI32 : VAMO<AMOOPVamoAdd, LSWidth32, "vamoaddei32.v">;
|
|
|
|
defm VAMOXOREI8 : VAMO<AMOOPVamoXor, LSWidth8, "vamoxorei8.v">;
|
|
defm VAMOXOREI16 : VAMO<AMOOPVamoXor, LSWidth16, "vamoxorei16.v">;
|
|
defm VAMOXOREI32 : VAMO<AMOOPVamoXor, LSWidth32, "vamoxorei32.v">;
|
|
|
|
defm VAMOANDEI8 : VAMO<AMOOPVamoAnd, LSWidth8, "vamoandei8.v">;
|
|
defm VAMOANDEI16 : VAMO<AMOOPVamoAnd, LSWidth16, "vamoandei16.v">;
|
|
defm VAMOANDEI32 : VAMO<AMOOPVamoAnd, LSWidth32, "vamoandei32.v">;
|
|
|
|
defm VAMOOREI8 : VAMO<AMOOPVamoOr, LSWidth8, "vamoorei8.v">;
|
|
defm VAMOOREI16 : VAMO<AMOOPVamoOr, LSWidth16, "vamoorei16.v">;
|
|
defm VAMOOREI32 : VAMO<AMOOPVamoOr, LSWidth32, "vamoorei32.v">;
|
|
|
|
defm VAMOMINEI8 : VAMO<AMOOPVamoMin, LSWidth8, "vamominei8.v">;
|
|
defm VAMOMINEI16 : VAMO<AMOOPVamoMin, LSWidth16, "vamominei16.v">;
|
|
defm VAMOMINEI32 : VAMO<AMOOPVamoMin, LSWidth32, "vamominei32.v">;
|
|
|
|
defm VAMOMAXEI8 : VAMO<AMOOPVamoMax, LSWidth8, "vamomaxei8.v">;
|
|
defm VAMOMAXEI16 : VAMO<AMOOPVamoMax, LSWidth16, "vamomaxei16.v">;
|
|
defm VAMOMAXEI32 : VAMO<AMOOPVamoMax, LSWidth32, "vamomaxei32.v">;
|
|
|
|
defm VAMOMINUEI8 : VAMO<AMOOPVamoMinu, LSWidth8, "vamominuei8.v">;
|
|
defm VAMOMINUEI16 : VAMO<AMOOPVamoMinu, LSWidth16, "vamominuei16.v">;
|
|
defm VAMOMINUEI32 : VAMO<AMOOPVamoMinu, LSWidth32, "vamominuei32.v">;
|
|
|
|
defm VAMOMAXUEI8 : VAMO<AMOOPVamoMaxu, LSWidth8, "vamomaxuei8.v">;
|
|
defm VAMOMAXUEI16 : VAMO<AMOOPVamoMaxu, LSWidth16, "vamomaxuei16.v">;
|
|
defm VAMOMAXUEI32 : VAMO<AMOOPVamoMaxu, LSWidth32, "vamomaxuei32.v">;
|
|
} // Predicates = [HasStdExtZvamo, HasStdExtA]
|
|
|
|
let Predicates = [HasStdExtZvamo, HasStdExtA, IsRV64] in {
|
|
defm VAMOSWAPEI64 : VAMO<AMOOPVamoSwap, LSWidth64, "vamoswapei64.v">;
|
|
defm VAMOADDEI64 : VAMO<AMOOPVamoAdd, LSWidth64, "vamoaddei64.v">;
|
|
defm VAMOXOREI64 : VAMO<AMOOPVamoXor, LSWidth64, "vamoxorei64.v">;
|
|
defm VAMOANDEI64 : VAMO<AMOOPVamoAnd, LSWidth64, "vamoandei64.v">;
|
|
defm VAMOOREI64 : VAMO<AMOOPVamoOr, LSWidth64, "vamoorei64.v">;
|
|
defm VAMOMINEI64 : VAMO<AMOOPVamoMin, LSWidth64, "vamominei64.v">;
|
|
defm VAMOMAXEI64 : VAMO<AMOOPVamoMax, LSWidth64, "vamomaxei64.v">;
|
|
defm VAMOMINUEI64 : VAMO<AMOOPVamoMinu, LSWidth64, "vamominuei64.v">;
|
|
defm VAMOMAXUEI64 : VAMO<AMOOPVamoMaxu, LSWidth64, "vamomaxuei64.v">;
|
|
} // Predicates = [HasStdExtZvamo, HasStdExtA, IsRV64]
|
|
|
|
include "RISCVInstrInfoVPseudos.td"
|
|
diff --git a/llvm/test/MC/RISCV/rvv/compare.s b/llvm/test/MC/RISCV/rvv/compare.s
|
|
index 00f883f327fc..28bc8b55369a 100644
|
|
--- a/llvm/test/MC/RISCV/rvv/compare.s
|
|
+++ b/llvm/test/MC/RISCV/rvv/compare.s
|
|
@@ -1,438 +1,468 @@
|
|
# RUN: llvm-mc -triple=riscv64 -show-encoding --mattr=+experimental-v %s \
|
|
# RUN: | FileCheck %s --check-prefixes=CHECK-ENCODING
|
|
# RUN: not llvm-mc -triple=riscv64 -show-encoding %s 2>&1 \
|
|
# RUN: | FileCheck %s --check-prefix=CHECK-ERROR
|
|
# RUN: llvm-mc -triple=riscv64 -filetype=obj --mattr=+experimental-v %s \
|
|
# RUN: | llvm-objdump -d --mattr=+experimental-v - \
|
|
# RUN: | FileCheck %s --check-prefix=CHECK-INST
|
|
# RUN: llvm-mc -triple=riscv64 -filetype=obj --mattr=+experimental-v %s \
|
|
# RUN: | llvm-objdump -d - | FileCheck %s --check-prefix=CHECK-UNKNOWN
|
|
|
|
vmslt.vv v0, v4, v20, v0.t
|
|
# CHECK-INST: vmslt.vv v0, v4, v20, v0.t
|
|
# CHECK-ENCODING: [0x57,0x00,0x4a,0x6c]
|
|
# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions)
|
|
# CHECK-UNKNOWN: 57 00 4a 6c <unknown>
|
|
|
|
vmseq.vv v8, v4, v20, v0.t
|
|
# CHECK-INST: vmseq.vv v8, v4, v20, v0.t
|
|
# CHECK-ENCODING: [0x57,0x04,0x4a,0x60]
|
|
# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions)
|
|
# CHECK-UNKNOWN: 57 04 4a 60 <unknown>
|
|
|
|
vmseq.vv v8, v4, v20
|
|
# CHECK-INST: vmseq.vv v8, v4, v20
|
|
# CHECK-ENCODING: [0x57,0x04,0x4a,0x62]
|
|
# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions)
|
|
# CHECK-UNKNOWN: 57 04 4a 62 <unknown>
|
|
|
|
vmseq.vx v8, v4, a0, v0.t
|
|
# CHECK-INST: vmseq.vx v8, v4, a0, v0.t
|
|
# CHECK-ENCODING: [0x57,0x44,0x45,0x60]
|
|
# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions)
|
|
# CHECK-UNKNOWN: 57 44 45 60 <unknown>
|
|
|
|
vmseq.vx v8, v4, a0
|
|
# CHECK-INST: vmseq.vx v8, v4, a0
|
|
# CHECK-ENCODING: [0x57,0x44,0x45,0x62]
|
|
# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions)
|
|
# CHECK-UNKNOWN: 57 44 45 62 <unknown>
|
|
|
|
vmseq.vi v8, v4, 15, v0.t
|
|
# CHECK-INST: vmseq.vi v8, v4, 15, v0.t
|
|
# CHECK-ENCODING: [0x57,0xb4,0x47,0x60]
|
|
# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions)
|
|
# CHECK-UNKNOWN: 57 b4 47 60 <unknown>
|
|
|
|
vmseq.vi v8, v4, 15
|
|
# CHECK-INST: vmseq.vi v8, v4, 15
|
|
# CHECK-ENCODING: [0x57,0xb4,0x47,0x62]
|
|
# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions)
|
|
# CHECK-UNKNOWN: 57 b4 47 62 <unknown>
|
|
|
|
vmsne.vv v8, v4, v20, v0.t
|
|
# CHECK-INST: vmsne.vv v8, v4, v20, v0.t
|
|
# CHECK-ENCODING: [0x57,0x04,0x4a,0x64]
|
|
# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions)
|
|
# CHECK-UNKNOWN: 57 04 4a 64 <unknown>
|
|
|
|
vmsne.vv v8, v4, v20
|
|
# CHECK-INST: vmsne.vv v8, v4, v20
|
|
# CHECK-ENCODING: [0x57,0x04,0x4a,0x66]
|
|
# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions)
|
|
# CHECK-UNKNOWN: 57 04 4a 66 <unknown>
|
|
|
|
vmsne.vx v8, v4, a0, v0.t
|
|
# CHECK-INST: vmsne.vx v8, v4, a0, v0.t
|
|
# CHECK-ENCODING: [0x57,0x44,0x45,0x64]
|
|
# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions)
|
|
# CHECK-UNKNOWN: 57 44 45 64 <unknown>
|
|
|
|
vmsne.vx v8, v4, a0
|
|
# CHECK-INST: vmsne.vx v8, v4, a0
|
|
# CHECK-ENCODING: [0x57,0x44,0x45,0x66]
|
|
# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions)
|
|
# CHECK-UNKNOWN: 57 44 45 66 <unknown>
|
|
|
|
vmsne.vi v8, v4, 15, v0.t
|
|
# CHECK-INST: vmsne.vi v8, v4, 15, v0.t
|
|
# CHECK-ENCODING: [0x57,0xb4,0x47,0x64]
|
|
# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions)
|
|
# CHECK-UNKNOWN: 57 b4 47 64 <unknown>
|
|
|
|
vmsne.vi v8, v4, 15
|
|
# CHECK-INST: vmsne.vi v8, v4, 15
|
|
# CHECK-ENCODING: [0x57,0xb4,0x47,0x66]
|
|
# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions)
|
|
# CHECK-UNKNOWN: 57 b4 47 66 <unknown>
|
|
|
|
vmsltu.vv v8, v4, v20, v0.t
|
|
# CHECK-INST: vmsltu.vv v8, v4, v20, v0.t
|
|
# CHECK-ENCODING: [0x57,0x04,0x4a,0x68]
|
|
# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions)
|
|
# CHECK-UNKNOWN: 57 04 4a 68 <unknown>
|
|
|
|
vmsltu.vv v8, v4, v20
|
|
# CHECK-INST: vmsltu.vv v8, v4, v20
|
|
# CHECK-ENCODING: [0x57,0x04,0x4a,0x6a]
|
|
# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions)
|
|
# CHECK-UNKNOWN: 57 04 4a 6a <unknown>
|
|
|
|
vmsltu.vx v8, v4, a0, v0.t
|
|
# CHECK-INST: vmsltu.vx v8, v4, a0, v0.t
|
|
# CHECK-ENCODING: [0x57,0x44,0x45,0x68]
|
|
# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions)
|
|
# CHECK-UNKNOWN: 57 44 45 68 <unknown>
|
|
|
|
vmsltu.vx v8, v4, a0
|
|
# CHECK-INST: vmsltu.vx v8, v4, a0
|
|
# CHECK-ENCODING: [0x57,0x44,0x45,0x6a]
|
|
# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions)
|
|
# CHECK-UNKNOWN: 57 44 45 6a <unknown>
|
|
|
|
vmslt.vv v8, v4, v20, v0.t
|
|
# CHECK-INST: vmslt.vv v8, v4, v20, v0.t
|
|
# CHECK-ENCODING: [0x57,0x04,0x4a,0x6c]
|
|
# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions)
|
|
# CHECK-UNKNOWN: 57 04 4a 6c <unknown>
|
|
|
|
vmslt.vv v8, v4, v20
|
|
# CHECK-INST: vmslt.vv v8, v4, v20
|
|
# CHECK-ENCODING: [0x57,0x04,0x4a,0x6e]
|
|
# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions)
|
|
# CHECK-UNKNOWN: 57 04 4a 6e <unknown>
|
|
|
|
vmslt.vx v8, v4, a0, v0.t
|
|
# CHECK-INST: vmslt.vx v8, v4, a0, v0.t
|
|
# CHECK-ENCODING: [0x57,0x44,0x45,0x6c]
|
|
# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions)
|
|
# CHECK-UNKNOWN: 57 44 45 6c <unknown>
|
|
|
|
vmslt.vx v8, v4, a0
|
|
# CHECK-INST: vmslt.vx v8, v4, a0
|
|
# CHECK-ENCODING: [0x57,0x44,0x45,0x6e]
|
|
# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions)
|
|
# CHECK-UNKNOWN: 57 44 45 6e <unknown>
|
|
|
|
vmsleu.vv v8, v4, v20, v0.t
|
|
# CHECK-INST: vmsleu.vv v8, v4, v20, v0.t
|
|
# CHECK-ENCODING: [0x57,0x04,0x4a,0x70]
|
|
# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions)
|
|
# CHECK-UNKNOWN: 57 04 4a 70 <unknown>
|
|
|
|
vmsleu.vv v8, v4, v20
|
|
# CHECK-INST: vmsleu.vv v8, v4, v20
|
|
# CHECK-ENCODING: [0x57,0x04,0x4a,0x72]
|
|
# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions)
|
|
# CHECK-UNKNOWN: 57 04 4a 72 <unknown>
|
|
|
|
vmsleu.vx v8, v4, a0, v0.t
|
|
# CHECK-INST: vmsleu.vx v8, v4, a0, v0.t
|
|
# CHECK-ENCODING: [0x57,0x44,0x45,0x70]
|
|
# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions)
|
|
# CHECK-UNKNOWN: 57 44 45 70 <unknown>
|
|
|
|
vmsleu.vx v8, v4, a0
|
|
# CHECK-INST: vmsleu.vx v8, v4, a0
|
|
# CHECK-ENCODING: [0x57,0x44,0x45,0x72]
|
|
# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions)
|
|
# CHECK-UNKNOWN: 57 44 45 72 <unknown>
|
|
|
|
vmsleu.vi v8, v4, 15, v0.t
|
|
# CHECK-INST: vmsleu.vi v8, v4, 15, v0.t
|
|
# CHECK-ENCODING: [0x57,0xb4,0x47,0x70]
|
|
# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions)
|
|
# CHECK-UNKNOWN: 57 b4 47 70 <unknown>
|
|
|
|
vmsleu.vi v8, v4, 15
|
|
# CHECK-INST: vmsleu.vi v8, v4, 15
|
|
# CHECK-ENCODING: [0x57,0xb4,0x47,0x72]
|
|
# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions)
|
|
# CHECK-UNKNOWN: 57 b4 47 72 <unknown>
|
|
|
|
vmsle.vv v8, v4, v20, v0.t
|
|
# CHECK-INST: vmsle.vv v8, v4, v20, v0.t
|
|
# CHECK-ENCODING: [0x57,0x04,0x4a,0x74]
|
|
# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions)
|
|
# CHECK-UNKNOWN: 57 04 4a 74 <unknown>
|
|
|
|
vmsle.vv v8, v4, v20
|
|
# CHECK-INST: vmsle.vv v8, v4, v20
|
|
# CHECK-ENCODING: [0x57,0x04,0x4a,0x76]
|
|
# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions)
|
|
# CHECK-UNKNOWN: 57 04 4a 76 <unknown>
|
|
|
|
vmsle.vx v8, v4, a0, v0.t
|
|
# CHECK-INST: vmsle.vx v8, v4, a0, v0.t
|
|
# CHECK-ENCODING: [0x57,0x44,0x45,0x74]
|
|
# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions)
|
|
# CHECK-UNKNOWN: 57 44 45 74 <unknown>
|
|
|
|
vmsle.vx v8, v4, a0
|
|
# CHECK-INST: vmsle.vx v8, v4, a0
|
|
# CHECK-ENCODING: [0x57,0x44,0x45,0x76]
|
|
# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions)
|
|
# CHECK-UNKNOWN: 57 44 45 76 <unknown>
|
|
|
|
vmsle.vi v8, v4, 15, v0.t
|
|
# CHECK-INST: vmsle.vi v8, v4, 15, v0.t
|
|
# CHECK-ENCODING: [0x57,0xb4,0x47,0x74]
|
|
# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions)
|
|
# CHECK-UNKNOWN: 57 b4 47 74 <unknown>
|
|
|
|
vmsle.vi v8, v4, 15
|
|
# CHECK-INST: vmsle.vi v8, v4, 15
|
|
# CHECK-ENCODING: [0x57,0xb4,0x47,0x76]
|
|
# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions)
|
|
# CHECK-UNKNOWN: 57 b4 47 76 <unknown>
|
|
|
|
vmsgtu.vx v8, v4, a0, v0.t
|
|
# CHECK-INST: vmsgtu.vx v8, v4, a0, v0.t
|
|
# CHECK-ENCODING: [0x57,0x44,0x45,0x78]
|
|
# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions)
|
|
# CHECK-UNKNOWN: 57 44 45 78 <unknown>
|
|
|
|
vmsgtu.vx v8, v4, a0
|
|
# CHECK-INST: vmsgtu.vx v8, v4, a0
|
|
# CHECK-ENCODING: [0x57,0x44,0x45,0x7a]
|
|
# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions)
|
|
# CHECK-UNKNOWN: 57 44 45 7a <unknown>
|
|
|
|
vmsgtu.vi v8, v4, 15, v0.t
|
|
# CHECK-INST: vmsgtu.vi v8, v4, 15, v0.t
|
|
# CHECK-ENCODING: [0x57,0xb4,0x47,0x78]
|
|
# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions)
|
|
# CHECK-UNKNOWN: 57 b4 47 78 <unknown>
|
|
|
|
vmsgtu.vi v8, v4, 15
|
|
# CHECK-INST: vmsgtu.vi v8, v4, 15
|
|
# CHECK-ENCODING: [0x57,0xb4,0x47,0x7a]
|
|
# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions)
|
|
# CHECK-UNKNOWN: 57 b4 47 7a <unknown>
|
|
|
|
vmsgt.vx v8, v4, a0, v0.t
|
|
# CHECK-INST: vmsgt.vx v8, v4, a0, v0.t
|
|
# CHECK-ENCODING: [0x57,0x44,0x45,0x7c]
|
|
# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions)
|
|
# CHECK-UNKNOWN: 57 44 45 7c <unknown>
|
|
|
|
vmsgt.vx v8, v4, a0
|
|
# CHECK-INST: vmsgt.vx v8, v4, a0
|
|
# CHECK-ENCODING: [0x57,0x44,0x45,0x7e]
|
|
# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions)
|
|
# CHECK-UNKNOWN: 57 44 45 7e <unknown>
|
|
|
|
vmsgt.vi v8, v4, 15, v0.t
|
|
# CHECK-INST: vmsgt.vi v8, v4, 15, v0.t
|
|
# CHECK-ENCODING: [0x57,0xb4,0x47,0x7c]
|
|
# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions)
|
|
# CHECK-UNKNOWN: 57 b4 47 7c <unknown>
|
|
|
|
vmsgt.vi v8, v4, 15
|
|
# CHECK-INST: vmsgt.vi v8, v4, 15
|
|
# CHECK-ENCODING: [0x57,0xb4,0x47,0x7e]
|
|
# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions)
|
|
# CHECK-UNKNOWN: 57 b4 47 7e <unknown>
|
|
|
|
vmsgtu.vv v8, v20, v4, v0.t
|
|
# CHECK-INST: vmsltu.vv v8, v4, v20, v0.t
|
|
# CHECK-ENCODING: [0x57,0x04,0x4a,0x68]
|
|
# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions)
|
|
# CHECK-UNKNOWN: 57 04 4a 68 <unknown>
|
|
|
|
vmsgtu.vv v8, v20, v4
|
|
# CHECK-INST: vmsltu.vv v8, v4, v20
|
|
# CHECK-ENCODING: [0x57,0x04,0x4a,0x6a]
|
|
# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions)
|
|
# CHECK-UNKNOWN: 57 04 4a 6a <unknown>
|
|
|
|
vmsgt.vv v8, v20, v4, v0.t
|
|
# CHECK-INST: vmslt.vv v8, v4, v20, v0.t
|
|
# CHECK-ENCODING: [0x57,0x04,0x4a,0x6c]
|
|
# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions)
|
|
# CHECK-UNKNOWN: 57 04 4a 6c <unknown>
|
|
|
|
vmsgt.vv v8, v20, v4
|
|
# CHECK-INST: vmslt.vv v8, v4, v20
|
|
# CHECK-ENCODING: [0x57,0x04,0x4a,0x6e]
|
|
# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions)
|
|
# CHECK-UNKNOWN: 57 04 4a 6e <unknown>
|
|
|
|
vmsgeu.vv v8, v20, v4, v0.t
|
|
# CHECK-INST: vmsleu.vv v8, v4, v20, v0.t
|
|
# CHECK-ENCODING: [0x57,0x04,0x4a,0x70]
|
|
# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions)
|
|
# CHECK-UNKNOWN: 57 04 4a 70 <unknown>
|
|
|
|
vmsgeu.vv v8, v20, v4
|
|
# CHECK-INST: vmsleu.vv v8, v4, v20
|
|
# CHECK-ENCODING: [0x57,0x04,0x4a,0x72]
|
|
# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions)
|
|
# CHECK-UNKNOWN: 57 04 4a 72 <unknown>
|
|
|
|
vmsge.vv v8, v20, v4, v0.t
|
|
# CHECK-INST: vmsle.vv v8, v4, v20, v0.t
|
|
# CHECK-ENCODING: [0x57,0x04,0x4a,0x74]
|
|
# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions)
|
|
# CHECK-UNKNOWN: 57 04 4a 74 <unknown>
|
|
|
|
vmsge.vv v8, v20, v4
|
|
# CHECK-INST: vmsle.vv v8, v4, v20
|
|
# CHECK-ENCODING: [0x57,0x04,0x4a,0x76]
|
|
# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions)
|
|
# CHECK-UNKNOWN: 57 04 4a 76 <unknown>
|
|
|
|
vmsltu.vi v8, v4, 16, v0.t
|
|
# CHECK-INST: vmsleu.vi v8, v4, 15, v0.t
|
|
# CHECK-ENCODING: [0x57,0xb4,0x47,0x70]
|
|
# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions)
|
|
# CHECK-UNKNOWN: 57 b4 47 70 <unknown>
|
|
|
|
vmsltu.vi v8, v4, 16
|
|
# CHECK-INST: vmsleu.vi v8, v4, 15
|
|
# CHECK-ENCODING: [0x57,0xb4,0x47,0x72]
|
|
# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions)
|
|
# CHECK-UNKNOWN: 57 b4 47 72 <unknown>
|
|
|
|
vmsltu.vi v8, v4, 0, v0.t
|
|
# CHECK-INST: vmsne.vv v8, v4, v4, v0.t
|
|
# CHECK-ENCODING: [0x57,0x04,0x42,0x64]
|
|
# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions)
|
|
# CHECK-UNKNOWN: 57 04 42 64 <unknown>
|
|
|
|
vmsltu.vi v8, v4, 0
|
|
# CHECK-INST: vmsne.vv v8, v4, v4
|
|
# CHECK-ENCODING: [0x57,0x04,0x42,0x66]
|
|
# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions)
|
|
# CHECK-UNKNOWN: 57 04 42 66 <unknown>
|
|
|
|
vmslt.vi v8, v4, 16, v0.t
|
|
# CHECK-INST: vmsle.vi v8, v4, 15, v0.t
|
|
# CHECK-ENCODING: [0x57,0xb4,0x47,0x74]
|
|
# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions)
|
|
# CHECK-UNKNOWN: 57 b4 47 74 <unknown>
|
|
|
|
vmslt.vi v8, v4, 16
|
|
# CHECK-INST: vmsle.vi v8, v4, 15
|
|
# CHECK-ENCODING: [0x57,0xb4,0x47,0x76]
|
|
# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions)
|
|
# CHECK-UNKNOWN: 57 b4 47 76 <unknown>
|
|
|
|
vmsgeu.vi v8, v4, 16, v0.t
|
|
# CHECK-INST: vmsgtu.vi v8, v4, 15, v0.t
|
|
# CHECK-ENCODING: [0x57,0xb4,0x47,0x78]
|
|
# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions)
|
|
# CHECK-UNKNOWN: 57 b4 47 78 <unknown>
|
|
|
|
vmsgeu.vi v8, v4, 16
|
|
# CHECK-INST: vmsgtu.vi v8, v4, 15
|
|
# CHECK-ENCODING: [0x57,0xb4,0x47,0x7a]
|
|
# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions)
|
|
# CHECK-UNKNOWN: 57 b4 47 7a <unknown>
|
|
|
|
vmsgeu.vi v8, v4, 0, v0.t
|
|
# CHECK-INST: vmseq.vv v8, v4, v4, v0.t
|
|
# CHECK-ENCODING: [0x57,0x04,0x42,0x60]
|
|
# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions)
|
|
# CHECK-UNKNOWN: 57 04 42 60 <unknown>
|
|
|
|
vmsgeu.vi v8, v4, 0
|
|
# CHECK-INST: vmseq.vv v8, v4, v4
|
|
# CHECK-ENCODING: [0x57,0x04,0x42,0x62]
|
|
# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions)
|
|
# CHECK-UNKNOWN: 57 04 42 62 <unknown>
|
|
|
|
vmsge.vi v8, v4, 16, v0.t
|
|
# CHECK-INST: vmsgt.vi v8, v4, 15, v0.t
|
|
# CHECK-ENCODING: [0x57,0xb4,0x47,0x7c]
|
|
# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions)
|
|
# CHECK-UNKNOWN: 57 b4 47 7c <unknown>
|
|
|
|
vmsge.vi v8, v4, 16
|
|
# CHECK-INST: vmsgt.vi v8, v4, 15
|
|
# CHECK-ENCODING: [0x57,0xb4,0x47,0x7e]
|
|
# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions)
|
|
# CHECK-UNKNOWN: 57 b4 47 7e <unknown>
|
|
|
|
vmsgeu.vx v8, v4, a0
|
|
# CHECK-INST: vmsltu.vx v8, v4, a0
|
|
# CHECK-INST: vmnot.m v8, v8
|
|
# CHECK-ENCODING: [0x57,0x44,0x45,0x6a]
|
|
# CHECK-ENCODING: [0x57,0x24,0x84,0x76]
|
|
# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions)
|
|
# CHECK-UNKNOWN: 57 44 45 6a <unknown>
|
|
# CHECK-UNKNOWN: 57 24 84 76 <unknown>
|
|
|
|
vmsge.vx v0, v4, a0
|
|
# CHECK-INST: vmslt.vx v0, v4, a0
|
|
# CHECK-INST: vmnot.m v0, v0
|
|
# CHECK-ENCODING: [0x57,0x40,0x45,0x6e]
|
|
# CHECK-ENCODING: [0x57,0x20,0x00,0x76]
|
|
# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions)
|
|
# CHECK-UNKNOWN: 57 40 45 6e <unknown>
|
|
# CHECK-UNKNOWN: 57 20 00 76 <unknown>
|
|
|
|
vmsge.vx v8, v4, a0
|
|
# CHECK-INST: vmslt.vx v8, v4, a0
|
|
# CHECK-INST: vmnot.m v8, v8
|
|
# CHECK-ENCODING: [0x57,0x44,0x45,0x6e]
|
|
# CHECK-ENCODING: [0x57,0x24,0x84,0x76]
|
|
# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions)
|
|
# CHECK-UNKNOWN: 57 44 45 6e <unknown>
|
|
# CHECK-UNKNOWN: 57 24 84 76 <unknown>
|
|
|
|
vmsgeu.vx v8, v4, a0, v0.t
|
|
# CHECK-INST: vmsltu.vx v8, v4, a0, v0.t
|
|
# CHECK-INST: vmxor.mm v8, v8, v0
|
|
# CHECK-ENCODING: [0x57,0x44,0x45,0x68]
|
|
# CHECK-ENCODING: [0x57,0x24,0x80,0x6e]
|
|
# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions)
|
|
# CHECK-UNKNOWN: 57 44 45 68 <unknown>
|
|
# CHECK-UNKNOWN: 57 24 80 6e <unknown>
|
|
|
|
vmsge.vx v8, v4, a0, v0.t
|
|
# CHECK-INST: vmslt.vx v8, v4, a0, v0.t
|
|
# CHECK-INST: vmxor.mm v8, v8, v0
|
|
# CHECK-ENCODING: [0x57,0x44,0x45,0x6c]
|
|
# CHECK-ENCODING: [0x57,0x24,0x80,0x6e]
|
|
# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions)
|
|
# CHECK-UNKNOWN: 57 44 45 6c <unknown>
|
|
# CHECK-UNKNOWN: 57 24 80 6e <unknown>
|
|
|
|
vmsgeu.vx v0, v4, a0, v0.t, v2
|
|
# CHECK-INST: vmsltu.vx v2, v4, a0, v0.t
|
|
# CHECK-INST: vmandnot.mm v0, v0, v2
|
|
# CHECK-ENCODING: [0x57,0x41,0x45,0x68]
|
|
# CHECK-ENCODING: [0x57,0x20,0x01,0x62]
|
|
# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions)
|
|
# CHECK-UNKNOWN: 57 41 45 68 <unknown>
|
|
# CHECK-UNKNOWN: 57 20 01 62 <unknown>
|
|
|
|
vmsge.vx v0, v4, a0, v0.t, v2
|
|
# CHECK-INST: vmslt.vx v2, v4, a0, v0.t
|
|
# CHECK-INST: vmandnot.mm v0, v0, v2
|
|
# CHECK-ENCODING: [0x57,0x41,0x45,0x6c]
|
|
# CHECK-ENCODING: [0x57,0x20,0x01,0x62]
|
|
# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions)
|
|
# CHECK-UNKNOWN: 57 41 45 6c <unknown>
|
|
# CHECK-UNKNOWN: 57 20 01 62 <unknown>
|
|
+
|
|
+vmsgeu.vx v9, v4, a0, v0.t, v2
|
|
+# CHECK-INST: vmsltu.vx v2, v4, a0
|
|
+# CHECK-INST: vmandnot.mm v2, v0, v2
|
|
+# CHECK-INST: vmandnot.mm v9, v9, v0
|
|
+# CHECK-INST: vmor.mm v9, v2, v9
|
|
+# CHECK-ENCODING: [0x57,0x41,0x45,0x6a]
|
|
+# CHECK-ENCODING: [0x57,0x21,0x01,0x62]
|
|
+# CHECK-ENCODING: [0xd7,0x24,0x90,0x62]
|
|
+# CHECK-ENCODING: [0xd7,0xa4,0x24,0x6a]
|
|
+# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions)
|
|
+# CHECK-UNKNOWN: 57 41 45 6a <unknown>
|
|
+# CHECK-UNKNOWN: 57 21 01 62 <unknown>
|
|
+# CHECK-UNKNOWN: d7 24 90 62 <unknown>
|
|
+# CHECK-UNKNOWN: d7 a4 24 6a <unknown>
|
|
+
|
|
+vmsge.vx v8, v4, a0, v0.t, v2
|
|
+# CHECK-INST: vmslt.vx v2, v4, a0
|
|
+# CHECK-INST: vmandnot.mm v2, v0, v2
|
|
+# CHECK-INST: vmandnot.mm v8, v8, v0
|
|
+# CHECK-INST: vmor.mm v8, v2, v8
|
|
+# CHECK-ENCODING: [0x57,0x41,0x45,0x6e]
|
|
+# CHECK-ENCODING: [0x57,0x21,0x01,0x62]
|
|
+# CHECK-ENCODING: [0x57,0x24,0x80,0x62]
|
|
+# CHECK-ENCODING: [0x57,0x24,0x24,0x6a]
|
|
+# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions)
|
|
+# CHECK-UNKNOWN: 57 41 45 6e <unknown>
|
|
+# CHECK-UNKNOWN: 57 21 01 62 <unknown>
|
|
+# CHECK-UNKNOWN: 57 24 80 62 <unknown>
|
|
+# CHECK-UNKNOWN: 57 24 24 6a <unknown>
|
|
\ No newline at end of file
|
|
diff --git a/llvm/test/MC/RISCV/rvv/invalid.s b/llvm/test/MC/RISCV/rvv/invalid.s
|
|
index feb4966efd10..6c6cdaf1810e 100644
|
|
--- a/llvm/test/MC/RISCV/rvv/invalid.s
|
|
+++ b/llvm/test/MC/RISCV/rvv/invalid.s
|
|
@@ -1,667 +1,663 @@
|
|
# RUN: not llvm-mc -triple=riscv64 --mattr=+experimental-v --mattr=+f %s 2>&1 \
|
|
# RUN: | FileCheck %s --check-prefix=CHECK-ERROR
|
|
|
|
vsetivli a2, 32, e8,m1
|
|
# CHECK-ERROR: immediate must be an integer in the range [0, 31]
|
|
|
|
vsetivli a2, zero, e8,m1
|
|
# CHECK-ERROR: immediate must be an integer in the range [0, 31]
|
|
|
|
vsetivli a2, 5, e31
|
|
# CHECK-ERROR: operand must be e[8|16|32|64|128|256|512|1024],m[1|2|4|8|f2|f4|f8],[ta|tu],[ma|mu]
|
|
|
|
vsetvli a2, a0, e31
|
|
# CHECK-ERROR: operand must be e[8|16|32|64|128|256|512|1024],m[1|2|4|8|f2|f4|f8],[ta|tu],[ma|mu]
|
|
|
|
vsetvli a2, a0, e32,m3
|
|
# CHECK-ERROR: operand must be e[8|16|32|64|128|256|512|1024],m[1|2|4|8|f2|f4|f8],[ta|tu],[ma|mu]
|
|
|
|
vsetvli a2, a0, m1,e32
|
|
# CHECK-ERROR: operand must be e[8|16|32|64|128|256|512|1024],m[1|2|4|8|f2|f4|f8],[ta|tu],[ma|mu]
|
|
|
|
vsetvli a2, a0, e32,m16
|
|
# CHECK-ERROR: operand must be e[8|16|32|64|128|256|512|1024],m[1|2|4|8|f2|f4|f8],[ta|tu],[ma|mu]
|
|
|
|
vsetvli a2, a0, e2048,m8
|
|
# CHECK-ERROR: operand must be e[8|16|32|64|128|256|512|1024],m[1|2|4|8|f2|f4|f8],[ta|tu],[ma|mu]
|
|
|
|
vsetvli a2, a0, e1,m8
|
|
# CHECK-ERROR: operand must be e[8|16|32|64|128|256|512|1024],m[1|2|4|8|f2|f4|f8],[ta|tu],[ma|mu]
|
|
|
|
vsetvli a2, a0, e8,m1,tx
|
|
# CHECK-ERROR: operand must be e[8|16|32|64|128|256|512|1024],m[1|2|4|8|f2|f4|f8],[ta|tu],[ma|mu]
|
|
|
|
vsetvli a2, a0, e8,m1,ta,mx
|
|
# CHECK-ERROR: operand must be e[8|16|32|64|128|256|512|1024],m[1|2|4|8|f2|f4|f8],[ta|tu],[ma|mu]
|
|
|
|
vsetvli a2, a0, e8,m1,ma
|
|
# CHECK-ERROR: operand must be e[8|16|32|64|128|256|512|1024],m[1|2|4|8|f2|f4|f8],[ta|tu],[ma|mu]
|
|
|
|
vsetvli a2, a0, e8,m1,mu
|
|
# CHECK-ERROR: operand must be e[8|16|32|64|128|256|512|1024],m[1|2|4|8|f2|f4|f8],[ta|tu],[ma|mu]
|
|
|
|
vsetvli a2, a0, e8x,m1,tu,mu
|
|
# CHECK-ERROR: operand must be e[8|16|32|64|128|256|512|1024],m[1|2|4|8|f2|f4|f8],[ta|tu],[ma|mu]
|
|
|
|
vsetvli a2, a0, e8,m1z,tu,mu
|
|
# CHECK-ERROR: operand must be e[8|16|32|64|128|256|512|1024],m[1|2|4|8|f2|f4|f8],[ta|tu],[ma|mu]
|
|
|
|
vsetvli a2, a0, e8,mf1,tu,mu
|
|
# CHECK-ERROR: operand must be e[8|16|32|64|128|256|512|1024],m[1|2|4|8|f2|f4|f8],[ta|tu],[ma|mu]
|
|
|
|
vsetvli a2, a0, e8,m1,tu,mut
|
|
# CHECK-ERROR: operand must be e[8|16|32|64|128|256|512|1024],m[1|2|4|8|f2|f4|f8],[ta|tu],[ma|mu]
|
|
|
|
vsetvli a2, a0, e8,m1,tut,mu
|
|
# CHECK-ERROR: operand must be e[8|16|32|64|128|256|512|1024],m[1|2|4|8|f2|f4|f8],[ta|tu],[ma|mu]
|
|
|
|
vsetvli a2, a0, e8
|
|
# CHECK-ERROR: operand must be e[8|16|32|64|128|256|512|1024],m[1|2|4|8|f2|f4|f8],[ta|tu],[ma|mu]
|
|
|
|
vsetvli a2, a0, e8,m1
|
|
# CHECK-ERROR: operand must be e[8|16|32|64|128|256|512|1024],m[1|2|4|8|f2|f4|f8],[ta|tu],[ma|mu]
|
|
|
|
vsetvli a2, a0, e8,m1,ta
|
|
# CHECK-ERROR: operand must be e[8|16|32|64|128|256|512|1024],m[1|2|4|8|f2|f4|f8],[ta|tu],[ma|mu]
|
|
|
|
vadd.vv v1, v3, v2, v4.t
|
|
# CHECK-ERROR: operand must be v0.t
|
|
|
|
vadd.vv v1, v3, v2, v0
|
|
# CHECK-ERROR: expected '.t' suffix
|
|
|
|
vadd.vv v1, v3, a0
|
|
# CHECK-ERROR: invalid operand for instruction
|
|
|
|
vmslt.vi v1, v2, -16
|
|
# CHECK-ERROR: immediate must be in the range [-15, 16]
|
|
|
|
vmslt.vi v1, v2, 17
|
|
# CHECK-ERROR: immediate must be in the range [-15, 16]
|
|
|
|
viota.m v0, v2, v0.t
|
|
# CHECK-ERROR: The destination vector register group cannot overlap the mask register.
|
|
# CHECK-ERROR-LABEL: viota.m v0, v2, v0.t
|
|
|
|
viota.m v2, v2
|
|
# CHECK-ERROR: The destination vector register group cannot overlap the source vector register group.
|
|
# CHECK-ERROR-LABEL: viota.m v2, v2
|
|
|
|
vfwcvt.xu.f.v v0, v2, v0.t
|
|
# CHECK-ERROR: The destination vector register group cannot overlap the mask register.
|
|
# CHECK-ERROR-LABEL: vfwcvt.xu.f.v v0, v2, v0.t
|
|
|
|
vfwcvt.xu.f.v v2, v2
|
|
# CHECK-ERROR: The destination vector register group cannot overlap the source vector register group.
|
|
# CHECK-ERROR-LABEL: vfwcvt.xu.f.v v2, v2
|
|
|
|
vfwcvt.x.f.v v0, v2, v0.t
|
|
# CHECK-ERROR: The destination vector register group cannot overlap the mask register.
|
|
# CHECK-ERROR-LABEL: vfwcvt.x.f.v v0, v2, v0.t
|
|
|
|
vfwcvt.x.f.v v2, v2
|
|
# CHECK-ERROR: The destination vector register group cannot overlap the source vector register group.
|
|
# CHECK-ERROR-LABEL: vfwcvt.x.f.v v2, v2
|
|
|
|
vfwcvt.f.xu.v v0, v2, v0.t
|
|
# CHECK-ERROR: The destination vector register group cannot overlap the mask register.
|
|
# CHECK-ERROR-LABEL: vfwcvt.f.xu.v v0, v2, v0.t
|
|
|
|
vfwcvt.f.xu.v v2, v2
|
|
# CHECK-ERROR: The destination vector register group cannot overlap the source vector register group.
|
|
# CHECK-ERROR-LABEL: vfwcvt.f.xu.v v2, v2
|
|
|
|
vfwcvt.f.x.v v0, v2, v0.t
|
|
# CHECK-ERROR: The destination vector register group cannot overlap the mask register.
|
|
# CHECK-ERROR-LABEL: vfwcvt.f.x.v v0, v2, v0.t
|
|
|
|
vfwcvt.f.x.v v2, v2
|
|
# CHECK-ERROR: The destination vector register group cannot overlap the source vector register group.
|
|
# CHECK-ERROR-LABEL: vfwcvt.f.x.v v2, v2
|
|
|
|
vfwcvt.f.f.v v0, v2, v0.t
|
|
# CHECK-ERROR: The destination vector register group cannot overlap the mask register.
|
|
# CHECK-ERROR-LABEL: vfwcvt.f.f.v v0, v2, v0.t
|
|
|
|
vfwcvt.f.f.v v2, v2
|
|
# CHECK-ERROR: The destination vector register group cannot overlap the source vector register group.
|
|
# CHECK-ERROR-LABEL: vfwcvt.f.f.v v2, v2
|
|
|
|
vslideup.vx v0, v2, a0, v0.t
|
|
# CHECK-ERROR: The destination vector register group cannot overlap the mask register.
|
|
# CHECK-ERROR-LABEL: vslideup.vx v0, v2, a0, v0.t
|
|
|
|
vslideup.vx v2, v2, a0
|
|
# CHECK-ERROR: The destination vector register group cannot overlap the source vector register group.
|
|
# CHECK-ERROR-LABEL: vslideup.vx v2, v2, a0
|
|
|
|
vslideup.vi v0, v2, 31, v0.t
|
|
# CHECK-ERROR: The destination vector register group cannot overlap the mask register.
|
|
# CHECK-ERROR-LABEL: vslideup.vi v0, v2, 31, v0.t
|
|
|
|
vslideup.vi v2, v2, 31
|
|
# CHECK-ERROR: The destination vector register group cannot overlap the source vector register group.
|
|
# CHECK-ERROR-LABEL: vslideup.vi v2, v2, 31
|
|
|
|
vslide1up.vx v0, v2, a0, v0.t
|
|
# CHECK-ERROR: The destination vector register group cannot overlap the mask register.
|
|
# CHECK-ERROR-LABEL: vslide1up.vx v0, v2, a0, v0.t
|
|
|
|
vslide1up.vx v2, v2, a0
|
|
# CHECK-ERROR: The destination vector register group cannot overlap the source vector register group.
|
|
# CHECK-ERROR-LABEL: vslide1up.vx v2, v2, a0
|
|
|
|
vrgather.vv v0, v2, v4, v0.t
|
|
# CHECK-ERROR: The destination vector register group cannot overlap the mask register.
|
|
# CHECK-ERROR-LABEL: vrgather.vv v0, v2, v4, v0.t
|
|
|
|
vrgather.vv v2, v2, v4
|
|
# CHECK-ERROR: The destination vector register group cannot overlap the source vector register group.
|
|
# CHECK-ERROR-LABEL: vrgather.vv v2, v2, v4
|
|
|
|
vrgather.vx v0, v2, a0, v0.t
|
|
# CHECK-ERROR: The destination vector register group cannot overlap the mask register.
|
|
# CHECK-ERROR-LABEL: vrgather.vx v0, v2, a0, v0.t
|
|
|
|
vrgather.vx v2, v2, a0
|
|
# CHECK-ERROR: The destination vector register group cannot overlap the source vector register group.
|
|
# CHECK-ERROR-LABEL: vrgather.vx v2, v2, a0
|
|
|
|
vrgather.vi v0, v2, 31, v0.t
|
|
# CHECK-ERROR: The destination vector register group cannot overlap the mask register.
|
|
# CHECK-ERROR-LABEL: vrgather.vi v0, v2, 31, v0.t
|
|
|
|
vrgather.vi v2, v2, 31
|
|
# CHECK-ERROR: The destination vector register group cannot overlap the source vector register group.
|
|
# CHECK-ERROR-LABEL: vrgather.vi v2, v2, 31
|
|
|
|
vwaddu.vv v0, v2, v4, v0.t
|
|
# CHECK-ERROR: The destination vector register group cannot overlap the mask register.
|
|
# CHECK-ERROR-LABEL: vwaddu.vv v0, v2, v4, v0.t
|
|
|
|
vwaddu.vv v2, v2, v4
|
|
# CHECK-ERROR: The destination vector register group cannot overlap the source vector register group.
|
|
# CHECK-ERROR-LABEL: vwaddu.vv v2, v2, v4
|
|
|
|
vwsubu.vv v0, v2, v4, v0.t
|
|
# CHECK-ERROR: The destination vector register group cannot overlap the mask register.
|
|
# CHECK-ERROR-LABEL: vwsubu.vv v0, v2, v4, v0.t
|
|
|
|
vwsubu.vv v2, v2, v4
|
|
# CHECK-ERROR: The destination vector register group cannot overlap the source vector register group.
|
|
# CHECK-ERROR-LABEL: vwsubu.vv v2, v2, v4
|
|
|
|
vwadd.vv v0, v2, v4, v0.t
|
|
# CHECK-ERROR: The destination vector register group cannot overlap the mask register.
|
|
# CHECK-ERROR-LABEL: vwadd.vv v0, v2, v4, v0.t
|
|
|
|
vwadd.vv v2, v2, v4
|
|
# CHECK-ERROR: The destination vector register group cannot overlap the source vector register group.
|
|
# CHECK-ERROR-LABEL: vwadd.vv v2, v2, v4
|
|
|
|
vwsub.vv v0, v2, v4, v0.t
|
|
# CHECK-ERROR: The destination vector register group cannot overlap the mask register.
|
|
# CHECK-ERROR-LABEL: vwsub.vv v0, v2, v4, v0.t
|
|
|
|
vwsub.vv v2, v2, v4
|
|
# CHECK-ERROR: The destination vector register group cannot overlap the source vector register group.
|
|
# CHECK-ERROR-LABEL: vwsub.vv v2, v2, v4
|
|
|
|
vwmul.vv v0, v2, v4, v0.t
|
|
# CHECK-ERROR: The destination vector register group cannot overlap the mask register.
|
|
# CHECK-ERROR-LABEL: vwmul.vv v0, v2, v4, v0.t
|
|
|
|
vwmul.vv v2, v2, v4
|
|
# CHECK-ERROR: The destination vector register group cannot overlap the source vector register group.
|
|
# CHECK-ERROR-LABEL: vwmul.vv v2, v2, v4
|
|
|
|
vwmulu.vv v0, v2, v4, v0.t
|
|
# CHECK-ERROR: The destination vector register group cannot overlap the mask register.
|
|
# CHECK-ERROR-LABEL: vwmulu.vv v0, v2, v4, v0.t
|
|
|
|
vwmulu.vv v2, v2, v4
|
|
# CHECK-ERROR: The destination vector register group cannot overlap the source vector register group.
|
|
# CHECK-ERROR-LABEL: vwmulu.vv v2, v2, v4
|
|
|
|
vwmulsu.vv v0, v2, v4, v0.t
|
|
# CHECK-ERROR: The destination vector register group cannot overlap the mask register.
|
|
# CHECK-ERROR-LABEL: vwmulsu.vv v0, v2, v4, v0.t
|
|
|
|
vwmulsu.vv v2, v2, v4
|
|
# CHECK-ERROR: The destination vector register group cannot overlap the source vector register group.
|
|
# CHECK-ERROR-LABEL: vwmulsu.vv v2, v2, v4
|
|
|
|
vwmaccu.vv v0, v4, v2, v0.t
|
|
# CHECK-ERROR: The destination vector register group cannot overlap the mask register.
|
|
# CHECK-ERROR-LABEL: vwmaccu.vv v0, v4, v2, v0.t
|
|
|
|
vwmaccu.vv v2, v4, v2
|
|
# CHECK-ERROR: The destination vector register group cannot overlap the source vector register group.
|
|
# CHECK-ERROR-LABEL: vwmaccu.vv v2, v4, v2
|
|
|
|
vwmacc.vv v0, v4, v2, v0.t
|
|
# CHECK-ERROR: The destination vector register group cannot overlap the mask register.
|
|
# CHECK-ERROR-LABEL: vwmacc.vv v0, v4, v2, v0.t
|
|
|
|
vwmacc.vv v2, v4, v2
|
|
# CHECK-ERROR: The destination vector register group cannot overlap the source vector register group.
|
|
# CHECK-ERROR-LABEL: vwmacc.vv v2, v4, v2
|
|
|
|
vwmaccsu.vv v0, v4, v2, v0.t
|
|
# CHECK-ERROR: The destination vector register group cannot overlap the mask register.
|
|
# CHECK-ERROR-LABEL: vwmaccsu.vv v0, v4, v2, v0.t
|
|
|
|
vwmaccsu.vv v2, v4, v2
|
|
# CHECK-ERROR: The destination vector register group cannot overlap the source vector register group.
|
|
# CHECK-ERROR-LABEL: vwmaccsu.vv v2, v4, v2
|
|
|
|
vfwadd.vv v0, v2, v4, v0.t
|
|
# CHECK-ERROR: The destination vector register group cannot overlap the mask register.
|
|
# CHECK-ERROR-LABEL: vfwadd.vv v0, v2, v4, v0.t
|
|
|
|
vfwadd.vv v2, v2, v4
|
|
# CHECK-ERROR: The destination vector register group cannot overlap the source vector register group.
|
|
# CHECK-ERROR-LABEL: vfwadd.vv v2, v2, v4
|
|
|
|
vfwsub.vv v0, v2, v4, v0.t
|
|
# CHECK-ERROR: The destination vector register group cannot overlap the mask register.
|
|
# CHECK-ERROR-LABEL: vfwsub.vv v0, v2, v4, v0.t
|
|
|
|
vfwsub.vv v2, v2, v4
|
|
# CHECK-ERROR: The destination vector register group cannot overlap the source vector register group.
|
|
# CHECK-ERROR-LABEL: vfwsub.vv v2, v2, v4
|
|
|
|
vfwmul.vv v0, v2, v4, v0.t
|
|
# CHECK-ERROR: The destination vector register group cannot overlap the mask register.
|
|
# CHECK-ERROR-LABEL: vfwmul.vv v0, v2, v4, v0.t
|
|
|
|
vfwmul.vv v2, v2, v4
|
|
# CHECK-ERROR: The destination vector register group cannot overlap the source vector register group.
|
|
# CHECK-ERROR-LABEL: vfwmul.vv v2, v2, v4
|
|
|
|
vfwmacc.vv v0, v4, v2, v0.t
|
|
# CHECK-ERROR: The destination vector register group cannot overlap the mask register.
|
|
# CHECK-ERROR-LABEL: vfwmacc.vv v0, v4, v2, v0.t
|
|
|
|
vfwmacc.vv v2, v4, v2
|
|
# CHECK-ERROR: The destination vector register group cannot overlap the source vector register group.
|
|
# CHECK-ERROR-LABEL: vfwmacc.vv v2, v4, v2
|
|
|
|
vfwnmacc.vv v0, v4, v2, v0.t
|
|
# CHECK-ERROR: The destination vector register group cannot overlap the mask register.
|
|
# CHECK-ERROR-LABEL: vfwnmacc.vv v0, v4, v2, v0.t
|
|
|
|
vfwnmacc.vv v2, v4, v2
|
|
# CHECK-ERROR: The destination vector register group cannot overlap the source vector register group.
|
|
# CHECK-ERROR-LABEL: vfwnmacc.vv v2, v4, v2
|
|
|
|
vfwmsac.vv v0, v4, v2, v0.t
|
|
# CHECK-ERROR: The destination vector register group cannot overlap the mask register.
|
|
# CHECK-ERROR-LABEL: vfwmsac.vv v0, v4, v2, v0.t
|
|
|
|
vfwmsac.vv v2, v4, v2
|
|
# CHECK-ERROR: The destination vector register group cannot overlap the source vector register group.
|
|
# CHECK-ERROR-LABEL: vfwmsac.vv v2, v4, v2
|
|
|
|
vfwnmsac.vv v0, v4, v2, v0.t
|
|
# CHECK-ERROR: The destination vector register group cannot overlap the mask register.
|
|
# CHECK-ERROR-LABEL: vfwnmsac.vv v0, v4, v2, v0.t
|
|
|
|
vfwnmsac.vv v2, v4, v2
|
|
# CHECK-ERROR: The destination vector register group cannot overlap the source vector register group.
|
|
# CHECK-ERROR-LABEL: vfwnmsac.vv v2, v4, v2
|
|
|
|
vwaddu.vx v0, v2, a0, v0.t
|
|
# CHECK-ERROR: The destination vector register group cannot overlap the mask register.
|
|
# CHECK-ERROR-LABEL: vwaddu.vx v0, v2, a0, v0.t
|
|
|
|
vwaddu.vx v2, v2, a0
|
|
# CHECK-ERROR: The destination vector register group cannot overlap the source vector register group.
|
|
# CHECK-ERROR-LABEL: vwaddu.vx v2, v2, a0
|
|
|
|
vwsubu.vx v0, v2, a0, v0.t
|
|
# CHECK-ERROR: The destination vector register group cannot overlap the mask register.
|
|
# CHECK-ERROR-LABEL: vwsubu.vx v0, v2, a0, v0.t
|
|
|
|
vwsubu.vx v2, v2, a0
|
|
# CHECK-ERROR: The destination vector register group cannot overlap the source vector register group.
|
|
# CHECK-ERROR-LABEL: vwsubu.vx v2, v2, a0
|
|
|
|
vwadd.vx v0, v2, a0, v0.t
|
|
# CHECK-ERROR: The destination vector register group cannot overlap the mask register.
|
|
# CHECK-ERROR-LABEL: vwadd.vx v0, v2, a0, v0.t
|
|
|
|
vwadd.vx v2, v2, a0
|
|
# CHECK-ERROR: The destination vector register group cannot overlap the source vector register group.
|
|
# CHECK-ERROR-LABEL: vwadd.vx v2, v2, a0
|
|
|
|
vwsub.vx v0, v2, a0, v0.t
|
|
# CHECK-ERROR: The destination vector register group cannot overlap the mask register.
|
|
# CHECK-ERROR-LABEL: vwsub.vx v0, v2, a0, v0.t
|
|
|
|
vwsub.vx v2, v2, a0
|
|
# CHECK-ERROR: The destination vector register group cannot overlap the source vector register group.
|
|
# CHECK-ERROR-LABEL: vwsub.vx v2, v2, a0
|
|
|
|
vwmul.vx v0, v2, a0, v0.t
|
|
# CHECK-ERROR: The destination vector register group cannot overlap the mask register.
|
|
# CHECK-ERROR-LABEL: vwmul.vx v0, v2, a0, v0.t
|
|
|
|
vwmul.vx v2, v2, a0
|
|
# CHECK-ERROR: The destination vector register group cannot overlap the source vector register group.
|
|
# CHECK-ERROR-LABEL: vwmul.vx v2, v2, a0
|
|
|
|
vwmulu.vx v0, v2, a0, v0.t
|
|
# CHECK-ERROR: The destination vector register group cannot overlap the mask register.
|
|
# CHECK-ERROR-LABEL: vwmulu.vx v0, v2, a0, v0.t
|
|
|
|
vwmulu.vx v2, v2, a0
|
|
# CHECK-ERROR: The destination vector register group cannot overlap the source vector register group.
|
|
# CHECK-ERROR-LABEL: vwmulu.vx v2, v2, a0
|
|
|
|
vwmulsu.vx v0, v2, a0, v0.t
|
|
# CHECK-ERROR: The destination vector register group cannot overlap the mask register.
|
|
# CHECK-ERROR-LABEL: vwmulsu.vx v0, v2, a0, v0.t
|
|
|
|
vwmulsu.vx v2, v2, a0
|
|
# CHECK-ERROR: The destination vector register group cannot overlap the source vector register group.
|
|
# CHECK-ERROR-LABEL: vwmulsu.vx v2, v2, a0
|
|
|
|
vwmaccu.vx v0, a0, v2, v0.t
|
|
# CHECK-ERROR: The destination vector register group cannot overlap the mask register.
|
|
# CHECK-ERROR-LABEL: vwmaccu.vx v0, a0, v2, v0.t
|
|
|
|
vwmaccu.vx v2, a0, v2
|
|
# CHECK-ERROR: The destination vector register group cannot overlap the source vector register group.
|
|
# CHECK-ERROR-LABEL: vwmaccu.vx v2, a0, v2
|
|
|
|
vwmacc.vx v0, a0, v2, v0.t
|
|
# CHECK-ERROR: The destination vector register group cannot overlap the mask register.
|
|
# CHECK-ERROR-LABEL: vwmacc.vx v0, a0, v2, v0.t
|
|
|
|
vwmacc.vx v2, a0, v2
|
|
# CHECK-ERROR: The destination vector register group cannot overlap the source vector register group.
|
|
# CHECK-ERROR-LABEL: vwmacc.vx v2, a0, v2
|
|
|
|
vwmaccsu.vx v0, a0, v2, v0.t
|
|
# CHECK-ERROR: The destination vector register group cannot overlap the mask register.
|
|
# CHECK-ERROR-LABEL: vwmaccsu.vx v0, a0, v2, v0.t
|
|
|
|
vwmaccsu.vx v2, a0, v2
|
|
# CHECK-ERROR: The destination vector register group cannot overlap the source vector register group.
|
|
# CHECK-ERROR-LABEL: vwmaccsu.vx v2, a0, v2
|
|
|
|
vwmaccus.vx v0, a0, v2, v0.t
|
|
# CHECK-ERROR: The destination vector register group cannot overlap the mask register.
|
|
# CHECK-ERROR-LABEL: vwmaccus.vx v0, a0, v2, v0.t
|
|
|
|
vwmaccus.vx v2, a0, v2
|
|
# CHECK-ERROR: The destination vector register group cannot overlap the source vector register group.
|
|
# CHECK-ERROR-LABEL: vwmaccus.vx v2, a0, v2
|
|
|
|
vfwadd.vf v0, v2, fa0, v0.t
|
|
# CHECK-ERROR: The destination vector register group cannot overlap the mask register.
|
|
# CHECK-ERROR-LABEL: vfwadd.vf v0, v2, fa0, v0.t
|
|
|
|
vfwadd.vf v2, v2, fa0
|
|
# CHECK-ERROR: The destination vector register group cannot overlap the source vector register group.
|
|
# CHECK-ERROR-LABEL: vfwadd.vf v2, v2, fa0
|
|
|
|
vfwsub.vf v0, v2, fa0, v0.t
|
|
# CHECK-ERROR: The destination vector register group cannot overlap the mask register.
|
|
# CHECK-ERROR-LABEL: vfwsub.vf v0, v2, fa0, v0.t
|
|
|
|
vfwsub.vf v2, v2, fa0
|
|
# CHECK-ERROR: The destination vector register group cannot overlap the source vector register group.
|
|
# CHECK-ERROR-LABEL: vfwsub.vf v2, v2, fa0
|
|
|
|
vfwmul.vf v0, v2, fa0, v0.t
|
|
# CHECK-ERROR: The destination vector register group cannot overlap the mask register.
|
|
# CHECK-ERROR-LABEL: vfwmul.vf v0, v2, fa0, v0.t
|
|
|
|
vfwmul.vf v2, v2, fa0
|
|
# CHECK-ERROR: The destination vector register group cannot overlap the source vector register group.
|
|
# CHECK-ERROR-LABEL: vfwmul.vf v2, v2, fa0
|
|
|
|
vfwmacc.vf v0, fa0, v2, v0.t
|
|
# CHECK-ERROR: The destination vector register group cannot overlap the mask register.
|
|
# CHECK-ERROR-LABEL: vfwmacc.vf v0, fa0, v2, v0.t
|
|
|
|
vfwmacc.vf v2, fa0, v2
|
|
# CHECK-ERROR: The destination vector register group cannot overlap the source vector register group.
|
|
# CHECK-ERROR-LABEL: vfwmacc.vf v2, fa0, v2
|
|
|
|
vfwnmacc.vf v0, fa0, v2, v0.t
|
|
# CHECK-ERROR: The destination vector register group cannot overlap the mask register.
|
|
# CHECK-ERROR-LABEL: vfwnmacc.vf v0, fa0, v2, v0.t
|
|
|
|
vfwnmacc.vf v2, fa0, v2
|
|
# CHECK-ERROR: The destination vector register group cannot overlap the source vector register group.
|
|
# CHECK-ERROR-LABEL: vfwnmacc.vf v2, fa0, v2
|
|
|
|
vfwmsac.vf v0, fa0, v2, v0.t
|
|
# CHECK-ERROR: The destination vector register group cannot overlap the mask register.
|
|
# CHECK-ERROR-LABEL: vfwmsac.vf v0, fa0, v2, v0.t
|
|
|
|
vfwmsac.vf v2, fa0, v2
|
|
# CHECK-ERROR: The destination vector register group cannot overlap the source vector register group.
|
|
# CHECK-ERROR-LABEL: vfwmsac.vf v2, fa0, v2
|
|
|
|
vfwnmsac.vf v0, fa0, v2, v0.t
|
|
# CHECK-ERROR: The destination vector register group cannot overlap the mask register.
|
|
# CHECK-ERROR-LABEL: vfwnmsac.vf v0, fa0, v2, v0.t
|
|
|
|
vfwnmsac.vf v2, fa0, v2
|
|
# CHECK-ERROR: The destination vector register group cannot overlap the source vector register group.
|
|
# CHECK-ERROR-LABEL: vfwnmsac.vf v2, fa0, v2
|
|
|
|
vcompress.vm v2, v2, v4
|
|
# CHECK-ERROR: The destination vector register group cannot overlap the source vector register group.
|
|
# CHECK-ERROR-LABEL: vcompress.vm v2, v2, v4
|
|
|
|
vwaddu.wv v0, v2, v4, v0.t
|
|
# CHECK-ERROR: The destination vector register group cannot overlap the mask register.
|
|
# CHECK-ERROR-LABEL: vwaddu.wv v0, v2, v4, v0.t
|
|
|
|
vwaddu.wv v2, v4, v2
|
|
# CHECK-ERROR: The destination vector register group cannot overlap the source vector register group.
|
|
# CHECK-ERROR-LABEL: vwaddu.wv v2, v4, v2
|
|
|
|
vwsubu.wv v0, v2, v4, v0.t
|
|
# CHECK-ERROR: The destination vector register group cannot overlap the mask register.
|
|
# CHECK-ERROR-LABEL: vwsubu.wv v0, v2, v4, v0.t
|
|
|
|
vwsubu.wv v2, v4, v2
|
|
# CHECK-ERROR: The destination vector register group cannot overlap the source vector register group.
|
|
# CHECK-ERROR-LABEL: vwsubu.wv v2, v4, v2
|
|
|
|
vwadd.wv v0, v2, v4, v0.t
|
|
# CHECK-ERROR: The destination vector register group cannot overlap the mask register.
|
|
# CHECK-ERROR-LABEL: vwadd.wv v0, v2, v4, v0.t
|
|
|
|
vwadd.wv v2, v4, v2
|
|
# CHECK-ERROR: The destination vector register group cannot overlap the source vector register group.
|
|
# CHECK-ERROR-LABEL: vwadd.wv v2, v4, v2
|
|
|
|
vwsub.wv v0, v2, v4, v0.t
|
|
# CHECK-ERROR: The destination vector register group cannot overlap the mask register.
|
|
# CHECK-ERROR-LABEL: vwsub.wv v0, v2, v4, v0.t
|
|
|
|
vwsub.wv v2, v4, v2
|
|
# CHECK-ERROR: The destination vector register group cannot overlap the source vector register group.
|
|
# CHECK-ERROR-LABEL: vwsub.wv v2, v4, v2
|
|
|
|
vfwadd.wv v0, v2, v4, v0.t
|
|
# CHECK-ERROR: The destination vector register group cannot overlap the mask register.
|
|
# CHECK-ERROR-LABEL: vfwadd.wv v0, v2, v4, v0.t
|
|
|
|
vfwadd.wv v2, v4, v2
|
|
# CHECK-ERROR: The destination vector register group cannot overlap the source vector register group.
|
|
# CHECK-ERROR-LABEL: vfwadd.wv v2, v4, v2
|
|
|
|
vfwsub.wv v0, v2, v4, v0.t
|
|
# CHECK-ERROR: The destination vector register group cannot overlap the mask register.
|
|
# CHECK-ERROR-LABEL: vfwsub.wv v0, v2, v4, v0.t
|
|
|
|
vfwsub.wv v2, v4, v2
|
|
# CHECK-ERROR: The destination vector register group cannot overlap the source vector register group.
|
|
# CHECK-ERROR-LABEL: vfwsub.wv v2, v4, v2
|
|
|
|
vwaddu.wx v0, v2, a0, v0.t
|
|
# CHECK-ERROR: The destination vector register group cannot overlap the mask register.
|
|
# CHECK-ERROR-LABEL: vwaddu.wx v0, v2, a0, v0.t
|
|
|
|
vwsubu.wx v0, v2, a0, v0.t
|
|
# CHECK-ERROR: The destination vector register group cannot overlap the mask register.
|
|
# CHECK-ERROR-LABEL: vwsubu.wx v0, v2, a0, v0.t
|
|
|
|
vwadd.wx v0, v2, a0, v0.t
|
|
# CHECK-ERROR: The destination vector register group cannot overlap the mask register.
|
|
# CHECK-ERROR-LABEL: vwadd.wx v0, v2, a0, v0.t
|
|
|
|
vwsub.wx v0, v2, a0, v0.t
|
|
# CHECK-ERROR: The destination vector register group cannot overlap the mask register.
|
|
# CHECK-ERROR-LABEL: vwsub.wx v0, v2, a0, v0.t
|
|
|
|
vfwadd.wf v0, v2, fa0, v0.t
|
|
# CHECK-ERROR: The destination vector register group cannot overlap the mask register.
|
|
# CHECK-ERROR-LABEL: vfwadd.wf v0, v2, fa0, v0.t
|
|
|
|
vfwsub.wf v0, v2, fa0, v0.t
|
|
# CHECK-ERROR: The destination vector register group cannot overlap the mask register.
|
|
# CHECK-ERROR-LABEL: vfwsub.wf v0, v2, fa0, v0.t
|
|
|
|
vadc.vvm v0, v2, v4, v0
|
|
# CHECK-ERROR: The destination vector register group cannot be V0.
|
|
# CHECK-ERROR-LABEL: vadc.vvm v0, v2, v4, v0
|
|
|
|
vadd.vv v0, v2, v4, v0.t
|
|
# CHECK-ERROR: The destination vector register group cannot overlap the mask register.
|
|
# CHECK-ERROR-LABEL: vadd.vv v0, v2, v4, v0.t
|
|
|
|
vadd.vx v0, v2, a0, v0.t
|
|
# CHECK-ERROR: The destination vector register group cannot overlap the mask register.
|
|
# CHECK-ERROR-LABEL: vadd.vx v0, v2, a0, v0.t
|
|
|
|
vadd.vi v0, v2, 1, v0.t
|
|
# CHECK-ERROR: The destination vector register group cannot overlap the mask register.
|
|
# CHECK-ERROR-LABEL: vadd.vi v0, v2, 1, v0.t
|
|
|
|
vmsge.vx v0, v4, a0, v0.t
|
|
# CHECK-ERROR: too few operands for instruction
|
|
# CHECK-ERROR-LABEL: vmsge.vx v0, v4, a0, v0.t
|
|
|
|
-vmsge.vx v8, v4, a0, v0.t, v2
|
|
-# CHECK-ERROR: invalid operand for instruction
|
|
-# CHECK-ERROR-LABEL: vmsge.vx v8, v4, a0, v0.t, v2
|
|
-
|
|
vmerge.vim v0, v1, 1, v0
|
|
# CHECK-ERROR: The destination vector register group cannot be V0.
|
|
# CHECK-ERROR-LABEL: vmerge.vim v0, v1, 1, v0
|
|
|
|
vmerge.vvm v0, v1, v2, v0
|
|
# CHECK-ERROR: The destination vector register group cannot be V0.
|
|
# CHECK-ERROR-LABEL: vmerge.vvm v0, v1, v2, v0
|
|
|
|
vmerge.vxm v0, v1, x1, v0
|
|
# CHECK-ERROR: The destination vector register group cannot be V0.
|
|
# CHECK-ERROR-LABEL: vmerge.vxm v0, v1, x1, v0
|
|
|
|
vfmerge.vfm v0, v1, f1, v0
|
|
# CHECK-ERROR: The destination vector register group cannot be V0.
|
|
# CHECK-ERROR-LABEL: vfmerge.vfm v0, v1, f1, v0
|
|
|
|
vle8.v v0, (a0), v0.t
|
|
# CHECK-ERROR: The destination vector register group cannot overlap the mask register.
|
|
# CHECK-ERROR-LABEL: vle8.v v0, (a0), v0.t
|
|
|
|
vfclass.v v0, v1, v0.t
|
|
# CHECK-ERROR: The destination vector register group cannot overlap the mask register.
|
|
# CHECK-ERROR-LABEL: vfclass.v v0, v1, v0.t
|
|
|
|
vfsqrt.v v0, v1, v0.t
|
|
# CHECK-ERROR: The destination vector register group cannot overlap the mask register.
|
|
# CHECK-ERROR-LABEL: vfsqrt.v v0, v1, v0.t
|
|
|
|
vzext.vf2 v0, v1, v0.t
|
|
# CHECK-ERROR: The destination vector register group cannot overlap the mask register.
|
|
# CHECK-ERROR-LABEL: vzext.vf2 v0, v1, v0.t
|
|
|
|
vid.v v0, v0.t
|
|
# CHECK-ERROR: The destination vector register group cannot overlap the mask register.
|
|
# CHECK-ERROR-LABEL: vid.v v0, v0.t
|
|
|
|
vnsrl.wv v0, v4, v20, v0.t
|
|
# CHECK-ERROR: The destination vector register group cannot overlap the mask register.
|
|
# CHECK-ERROR-LABEL: vnsrl.wv v0, v4, v20, v0.t
|
|
|
|
vfncvt.xu.f.w v0, v4, v0.t
|
|
# CHECK-ERROR: The destination vector register group cannot overlap the mask register.
|
|
# CHECK-ERROR-LABEL: vfncvt.xu.f.w v0, v4, v0.t
|
|
|
|
vl2re8.v v1, (a0)
|
|
# CHECK-ERROR: invalid operand for instruction
|
|
|
|
vl4re8.v v1, (a0)
|
|
# CHECK-ERROR: invalid operand for instruction
|
|
|
|
vl4re8.v v2, (a0)
|
|
# CHECK-ERROR: invalid operand for instruction
|
|
|
|
vl4re8.v v3, (a0)
|
|
# CHECK-ERROR: invalid operand for instruction
|
|
|
|
vl8re8.v v1, (a0)
|
|
# CHECK-ERROR: invalid operand for instruction
|
|
|
|
vl8re8.v v2, (a0)
|
|
# CHECK-ERROR: invalid operand for instruction
|
|
|
|
vl8re8.v v3, (a0)
|
|
# CHECK-ERROR: invalid operand for instruction
|
|
|
|
vl8re8.v v4, (a0)
|
|
# CHECK-ERROR: invalid operand for instruction
|
|
|
|
vl8re8.v v5, (a0)
|
|
# CHECK-ERROR: invalid operand for instruction
|
|
|
|
vl8re8.v v6, (a0)
|
|
# CHECK-ERROR: invalid operand for instruction
|
|
|
|
vl8re8.v v7, (a0)
|
|
# CHECK-ERROR: invalid operand for instruction
|
|
|
|
vs2r.v v1, (a0)
|
|
# CHECK-ERROR: invalid operand for instruction
|
|
|
|
vs4r.v v1, (a0)
|
|
# CHECK-ERROR: invalid operand for instruction
|
|
|
|
vs4r.v v2, (a0)
|
|
# CHECK-ERROR: invalid operand for instruction
|
|
|
|
vs4r.v v3, (a0)
|
|
# CHECK-ERROR: invalid operand for instruction
|
|
|
|
vs8r.v v1, (a0)
|
|
# CHECK-ERROR: invalid operand for instruction
|
|
|
|
vs8r.v v2, (a0)
|
|
# CHECK-ERROR: invalid operand for instruction
|
|
|
|
vs8r.v v3, (a0)
|
|
# CHECK-ERROR: invalid operand for instruction
|
|
|
|
vs8r.v v4, (a0)
|
|
# CHECK-ERROR: invalid operand for instruction
|
|
|
|
vs8r.v v5, (a0)
|
|
# CHECK-ERROR: invalid operand for instruction
|
|
|
|
vs8r.v v6, (a0)
|
|
# CHECK-ERROR: invalid operand for instruction
|
|
|
|
vs8r.v v7, (a0)
|
|
# CHECK-ERROR: invalid operand for instruction
|
|
|