[RISCV] Implement intrinsics for XAndesVPackFPH (#140007)
This patch implements clang intrinsic support for XAndesVPackFPH. The document for the intrinsics can be found at: https://github.com/andestech/andes-vector-intrinsic-doc/blob/ast-v5_4_0-release-v5/auto-generated/andes-v5/intrinsic_funcs.adoc#andes-vector-packed-fp16-extensionxandesvpackfph and with policy variants https://github.com/andestech/andes-vector-intrinsic-doc/blob/ast-v5_4_0-release-v5/auto-generated/andes-v5/policy_funcs/intrinsic_funcs.adoc#andes-vector-packed-fp16-extensionxandesvpackfph Co-authored-by: Tony Chuan-Yue Yuan <yuan593@andestech.com>
This commit is contained in:
@@ -201,3 +201,12 @@ clang_tablegen(riscv_sifive_vector_builtin_cg.inc -gen-riscv-sifive-vector-built
|
||||
clang_tablegen(riscv_sifive_vector_builtin_sema.inc -gen-riscv-sifive-vector-builtin-sema
|
||||
SOURCE riscv_sifive_vector.td
|
||||
TARGET ClangRISCVSiFiveVectorBuiltinSema)
|
||||
clang_tablegen(riscv_andes_vector_builtins.inc -gen-riscv-andes-vector-builtins
|
||||
SOURCE riscv_andes_vector.td
|
||||
TARGET ClangRISCVAndesVectorBuiltins)
|
||||
clang_tablegen(riscv_andes_vector_builtin_cg.inc -gen-riscv-andes-vector-builtin-codegen
|
||||
SOURCE riscv_andes_vector.td
|
||||
TARGET ClangRISCVAndesVectorBuiltinCG)
|
||||
clang_tablegen(riscv_andes_vector_builtin_sema.inc -gen-riscv-andes-vector-builtin-sema
|
||||
SOURCE riscv_andes_vector.td
|
||||
TARGET ClangRISCVAndesVectorBuiltinSema)
|
||||
|
||||
@@ -197,6 +197,9 @@ namespace clang {
|
||||
FirstSiFiveBuiltin,
|
||||
LastRVVBuiltin = FirstSiFiveBuiltin - 1,
|
||||
#include "clang/Basic/riscv_sifive_vector_builtins.inc"
|
||||
FirstAndesBuiltin,
|
||||
LastSiFiveBuiltin = FirstAndesBuiltin - 1,
|
||||
#include "clang/Basic/riscv_andes_vector_builtins.inc"
|
||||
#undef GET_RISCVV_BUILTIN_ENUMERATORS
|
||||
FirstTSBuiltin,
|
||||
};
|
||||
|
||||
83
clang/include/clang/Basic/riscv_andes_vector.td
Normal file
83
clang/include/clang/Basic/riscv_andes_vector.td
Normal file
@@ -0,0 +1,83 @@
|
||||
//==--- riscv_andes_vector.td - RISC-V Andes Builtin function list --------===//
|
||||
//
|
||||
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
||||
// See https://llvm.org/LICENSE.txt for license information.
|
||||
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
//
|
||||
// This file defines the builtins for RISC-V Andes Vector Extension. See:
|
||||
//
|
||||
// https://github.com/andestech/andes-vector-intrinsic-doc
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
include "riscv_vector_common.td"
|
||||
|
||||
//===----------------------------------------------------------------------===//
|
||||
// Instruction definitions
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
// Andes Vector Packed FP16 Extension (XAndesVPackFPH)
|
||||
|
||||
multiclass RVVFPMAD {
|
||||
let Log2LMUL = [-2, -1, 0, 1, 2, 3],
|
||||
OverloadedName = NAME in {
|
||||
defm NAME : RVVOutOp1BuiltinSet<NAME, "x", [["vf", "v", "vvf"]]>;
|
||||
|
||||
let HasFRMRoundModeOp = true in
|
||||
defm NAME : RVVOutOp1BuiltinSet<NAME, "x", [["vf", "v", "vvfu"]]>;
|
||||
}
|
||||
}
|
||||
|
||||
let RequiredFeatures = ["Xandesvpackfph"],
|
||||
UnMaskedPolicyScheme = HasPassthruOperand in {
|
||||
let ManualCodegen = [{
|
||||
{
|
||||
// LLVM intrinsic
|
||||
// Unmasked: (passthru, op0, op1, round_mode, vl)
|
||||
// Masked: (passthru, vector_in, vector_in/scalar_in, mask, frm, vl, policy)
|
||||
|
||||
SmallVector<llvm::Value*, 7> Operands;
|
||||
bool HasMaskedOff = !(
|
||||
(IsMasked && (PolicyAttrs & RVV_VTA) && (PolicyAttrs & RVV_VMA)) ||
|
||||
(!IsMasked && PolicyAttrs & RVV_VTA));
|
||||
bool HasRoundModeOp = IsMasked ?
|
||||
(HasMaskedOff ? Ops.size() == 6 : Ops.size() == 5) :
|
||||
(HasMaskedOff ? Ops.size() == 5 : Ops.size() == 4);
|
||||
|
||||
unsigned Offset = IsMasked ?
|
||||
(HasMaskedOff ? 2 : 1) : (HasMaskedOff ? 1 : 0);
|
||||
|
||||
if (!HasMaskedOff)
|
||||
Operands.push_back(llvm::PoisonValue::get(ResultType));
|
||||
else
|
||||
Operands.push_back(Ops[IsMasked ? 1 : 0]);
|
||||
|
||||
Operands.push_back(Ops[Offset]); // op0
|
||||
Operands.push_back(Ops[Offset + 1]); // op1
|
||||
|
||||
if (IsMasked)
|
||||
Operands.push_back(Ops[0]); // mask
|
||||
|
||||
if (HasRoundModeOp) {
|
||||
Operands.push_back(Ops[Offset + 2]); // frm
|
||||
Operands.push_back(Ops[Offset + 3]); // vl
|
||||
} else {
|
||||
Operands.push_back(ConstantInt::get(Ops[Offset + 2]->getType(), 7)); // frm
|
||||
Operands.push_back(Ops[Offset + 2]); // vl
|
||||
}
|
||||
|
||||
if (IsMasked)
|
||||
Operands.push_back(ConstantInt::get(Ops.back()->getType(), PolicyAttrs));
|
||||
|
||||
IntrinsicTypes = {ResultType, Ops[Offset + 1]->getType(),
|
||||
Operands.back()->getType()};
|
||||
llvm::Function *F = CGM.getIntrinsic(ID, IntrinsicTypes);
|
||||
return Builder.CreateCall(F, Operands, "");
|
||||
}
|
||||
}] in {
|
||||
defm nds_vfpmadt : RVVFPMAD;
|
||||
defm nds_vfpmadb : RVVFPMAD;
|
||||
}
|
||||
}
|
||||
@@ -24,7 +24,7 @@ class Preprocessor;
|
||||
namespace sema {
|
||||
class RISCVIntrinsicManager {
|
||||
public:
|
||||
enum class IntrinsicKind : uint8_t { RVV, SIFIVE_VECTOR };
|
||||
enum class IntrinsicKind : uint8_t { RVV, SIFIVE_VECTOR, ANDES_VECTOR };
|
||||
|
||||
virtual ~RISCVIntrinsicManager() = default;
|
||||
|
||||
|
||||
@@ -51,6 +51,9 @@ public:
|
||||
/// Indicate RISC-V SiFive vector builtin functions enabled or not.
|
||||
bool DeclareSiFiveVectorBuiltins = false;
|
||||
|
||||
/// Indicate RISC-V Andes vector builtin functions enabled or not.
|
||||
bool DeclareAndesVectorBuiltins = false;
|
||||
|
||||
std::unique_ptr<sema::RISCVIntrinsicManager> IntrinsicManager;
|
||||
};
|
||||
|
||||
|
||||
@@ -489,6 +489,7 @@ public:
|
||||
enum RVVRequire {
|
||||
RVV_REQ_RV64,
|
||||
RVV_REQ_Zvfhmin,
|
||||
RVV_REQ_Xandesvpackfph,
|
||||
RVV_REQ_Xsfvcp,
|
||||
RVV_REQ_Xsfvfnrclipxfqf,
|
||||
RVV_REQ_Xsfvfwmaccqqq,
|
||||
|
||||
@@ -265,13 +265,15 @@ void RISCVTargetInfo::getTargetDefines(const LangOptions &Opts,
|
||||
static constexpr int NumRVVBuiltins =
|
||||
RISCVVector::FirstSiFiveBuiltin - Builtin::FirstTSBuiltin;
|
||||
static constexpr int NumRVVSiFiveBuiltins =
|
||||
RISCVVector::FirstTSBuiltin - RISCVVector::FirstSiFiveBuiltin;
|
||||
RISCVVector::FirstAndesBuiltin - RISCVVector::FirstSiFiveBuiltin;
|
||||
static constexpr int NumRVVAndesBuiltins =
|
||||
RISCVVector::FirstTSBuiltin - RISCVVector::FirstAndesBuiltin;
|
||||
static constexpr int NumRISCVBuiltins =
|
||||
RISCV::LastTSBuiltin - RISCVVector::FirstTSBuiltin;
|
||||
static constexpr int NumBuiltins =
|
||||
RISCV::LastTSBuiltin - Builtin::FirstTSBuiltin;
|
||||
static_assert(NumBuiltins ==
|
||||
(NumRVVBuiltins + NumRVVSiFiveBuiltins + NumRISCVBuiltins));
|
||||
static_assert(NumBuiltins == (NumRVVBuiltins + NumRVVSiFiveBuiltins +
|
||||
NumRVVAndesBuiltins + NumRISCVBuiltins));
|
||||
|
||||
namespace RVV {
|
||||
#define GET_RISCVV_BUILTIN_STR_TABLE
|
||||
@@ -299,6 +301,19 @@ static constexpr std::array<Builtin::Info, NumRVVSiFiveBuiltins> BuiltinInfos =
|
||||
};
|
||||
} // namespace RVVSiFive
|
||||
|
||||
namespace RVVAndes {
|
||||
#define GET_RISCVV_BUILTIN_STR_TABLE
|
||||
#include "clang/Basic/riscv_andes_vector_builtins.inc"
|
||||
#undef GET_RISCVV_BUILTIN_STR_TABLE
|
||||
|
||||
static constexpr std::array<Builtin::Info, NumRVVAndesBuiltins> BuiltinInfos =
|
||||
{
|
||||
#define GET_RISCVV_BUILTIN_INFOS
|
||||
#include "clang/Basic/riscv_andes_vector_builtins.inc"
|
||||
#undef GET_RISCVV_BUILTIN_INFOS
|
||||
};
|
||||
} // namespace RVVAndes
|
||||
|
||||
#define GET_BUILTIN_STR_TABLE
|
||||
#include "clang/Basic/BuiltinsRISCV.inc"
|
||||
#undef GET_BUILTIN_STR_TABLE
|
||||
@@ -315,6 +330,7 @@ RISCVTargetInfo::getTargetBuiltins() const {
|
||||
return {
|
||||
{&RVV::BuiltinStrings, RVV::BuiltinInfos, "__builtin_rvv_"},
|
||||
{&RVVSiFive::BuiltinStrings, RVVSiFive::BuiltinInfos, "__builtin_rvv_"},
|
||||
{&RVVAndes::BuiltinStrings, RVVAndes::BuiltinInfos, "__builtin_rvv_"},
|
||||
{&BuiltinStrings, BuiltinInfos},
|
||||
};
|
||||
}
|
||||
|
||||
@@ -418,6 +418,9 @@ Value *CodeGenFunction::EmitRISCVBuiltinExpr(unsigned BuiltinID,
|
||||
|
||||
// SiFive Vector builtins are handled from here.
|
||||
#include "clang/Basic/riscv_sifive_vector_builtin_cg.inc"
|
||||
|
||||
// Andes Vector builtins are handled from here.
|
||||
#include "clang/Basic/riscv_andes_vector_builtin_cg.inc"
|
||||
}
|
||||
|
||||
assert(ID != Intrinsic::not_intrinsic);
|
||||
|
||||
@@ -127,6 +127,7 @@ set(riscv_files
|
||||
riscv_crypto.h
|
||||
riscv_ntlh.h
|
||||
sifive_vector.h
|
||||
andes_vector.h
|
||||
)
|
||||
|
||||
set(systemz_files
|
||||
|
||||
16
clang/lib/Headers/andes_vector.h
Normal file
16
clang/lib/Headers/andes_vector.h
Normal file
@@ -0,0 +1,16 @@
|
||||
//===----- andes_vector.h - Andes Vector definitions ----------------------===//
|
||||
//
|
||||
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
||||
// See https://llvm.org/LICENSE.txt for license information.
|
||||
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#ifndef _ANDES_VECTOR_H_
|
||||
#define _ANDES_VECTOR_H_
|
||||
|
||||
#include "riscv_vector.h"
|
||||
|
||||
#pragma clang riscv intrinsic andes_vector
|
||||
|
||||
#endif //_ANDES_VECTOR_H_
|
||||
@@ -4139,6 +4139,7 @@ void PragmaMaxTokensTotalHandler::HandlePragma(Preprocessor &PP,
|
||||
|
||||
// Handle '#pragma clang riscv intrinsic vector'.
|
||||
// '#pragma clang riscv intrinsic sifive_vector'.
|
||||
// '#pragma clang riscv intrinsic andes_vector'.
|
||||
void PragmaRISCVHandler::HandlePragma(Preprocessor &PP,
|
||||
PragmaIntroducer Introducer,
|
||||
Token &FirstToken) {
|
||||
@@ -4154,10 +4155,11 @@ void PragmaRISCVHandler::HandlePragma(Preprocessor &PP,
|
||||
|
||||
PP.Lex(Tok);
|
||||
II = Tok.getIdentifierInfo();
|
||||
if (!II || !(II->isStr("vector") || II->isStr("sifive_vector"))) {
|
||||
if (!II || !(II->isStr("vector") || II->isStr("sifive_vector") ||
|
||||
II->isStr("andes_vector"))) {
|
||||
PP.Diag(Tok.getLocation(), diag::warn_pragma_invalid_argument)
|
||||
<< PP.getSpelling(Tok) << "riscv" << /*Expected=*/true
|
||||
<< "'vector' or 'sifive_vector'";
|
||||
<< "'vector', 'sifive_vector' or 'andes_vector'";
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -4172,4 +4174,6 @@ void PragmaRISCVHandler::HandlePragma(Preprocessor &PP,
|
||||
Actions.RISCV().DeclareRVVBuiltins = true;
|
||||
else if (II->isStr("sifive_vector"))
|
||||
Actions.RISCV().DeclareSiFiveVectorBuiltins = true;
|
||||
else if (II->isStr("andes_vector"))
|
||||
Actions.RISCV().DeclareAndesVectorBuiltins = true;
|
||||
}
|
||||
|
||||
@@ -946,7 +946,8 @@ bool Sema::LookupBuiltin(LookupResult &R) {
|
||||
}
|
||||
}
|
||||
|
||||
if (RISCV().DeclareRVVBuiltins || RISCV().DeclareSiFiveVectorBuiltins) {
|
||||
if (RISCV().DeclareRVVBuiltins || RISCV().DeclareSiFiveVectorBuiltins ||
|
||||
RISCV().DeclareAndesVectorBuiltins) {
|
||||
if (!RISCV().IntrinsicManager)
|
||||
RISCV().IntrinsicManager = CreateRISCVIntrinsicManager(*this);
|
||||
|
||||
|
||||
@@ -69,6 +69,12 @@ static const PrototypeDescriptor RVSiFiveVectorSignatureTable[] = {
|
||||
#undef DECL_SIGNATURE_TABLE
|
||||
};
|
||||
|
||||
static const PrototypeDescriptor RVAndesVectorSignatureTable[] = {
|
||||
#define DECL_SIGNATURE_TABLE
|
||||
#include "clang/Basic/riscv_andes_vector_builtin_sema.inc"
|
||||
#undef DECL_SIGNATURE_TABLE
|
||||
};
|
||||
|
||||
static const RVVIntrinsicRecord RVVIntrinsicRecords[] = {
|
||||
#define DECL_INTRINSIC_RECORDS
|
||||
#include "clang/Basic/riscv_vector_builtin_sema.inc"
|
||||
@@ -81,6 +87,12 @@ static const RVVIntrinsicRecord RVSiFiveVectorIntrinsicRecords[] = {
|
||||
#undef DECL_INTRINSIC_RECORDS
|
||||
};
|
||||
|
||||
static const RVVIntrinsicRecord RVAndesVectorIntrinsicRecords[] = {
|
||||
#define DECL_INTRINSIC_RECORDS
|
||||
#include "clang/Basic/riscv_andes_vector_builtin_sema.inc"
|
||||
#undef DECL_INTRINSIC_RECORDS
|
||||
};
|
||||
|
||||
// Get subsequence of signature table.
|
||||
static ArrayRef<PrototypeDescriptor>
|
||||
ProtoSeq2ArrayRef(IntrinsicKind K, uint16_t Index, uint8_t Length) {
|
||||
@@ -89,6 +101,8 @@ ProtoSeq2ArrayRef(IntrinsicKind K, uint16_t Index, uint8_t Length) {
|
||||
return ArrayRef(&RVVSignatureTable[Index], Length);
|
||||
case IntrinsicKind::SIFIVE_VECTOR:
|
||||
return ArrayRef(&RVSiFiveVectorSignatureTable[Index], Length);
|
||||
case IntrinsicKind::ANDES_VECTOR:
|
||||
return ArrayRef(&RVAndesVectorSignatureTable[Index], Length);
|
||||
}
|
||||
llvm_unreachable("Unhandled IntrinsicKind");
|
||||
}
|
||||
@@ -167,6 +181,7 @@ private:
|
||||
RVVTypeCache TypeCache;
|
||||
bool ConstructedRISCVVBuiltins;
|
||||
bool ConstructedRISCVSiFiveVectorBuiltins;
|
||||
bool ConstructedRISCVAndesVectorBuiltins;
|
||||
|
||||
// List of all RVV intrinsic.
|
||||
std::vector<RVVIntrinsicDef> IntrinsicList;
|
||||
@@ -192,6 +207,7 @@ public:
|
||||
RISCVIntrinsicManagerImpl(clang::Sema &S) : S(S), Context(S.Context) {
|
||||
ConstructedRISCVVBuiltins = false;
|
||||
ConstructedRISCVSiFiveVectorBuiltins = false;
|
||||
ConstructedRISCVAndesVectorBuiltins = false;
|
||||
}
|
||||
|
||||
// Initialize IntrinsicList
|
||||
@@ -209,6 +225,7 @@ void RISCVIntrinsicManagerImpl::ConstructRVVIntrinsics(
|
||||
const TargetInfo &TI = Context.getTargetInfo();
|
||||
static const std::pair<const char *, unsigned> FeatureCheckList[] = {
|
||||
{"64bit", RVV_REQ_RV64},
|
||||
{"xandesvpackfph", RVV_REQ_Xandesvpackfph},
|
||||
{"xsfvcp", RVV_REQ_Xsfvcp},
|
||||
{"xsfvfnrclipxfqf", RVV_REQ_Xsfvfnrclipxfqf},
|
||||
{"xsfvfwmaccqqq", RVV_REQ_Xsfvfwmaccqqq},
|
||||
@@ -358,6 +375,12 @@ void RISCVIntrinsicManagerImpl::InitIntrinsicList() {
|
||||
ConstructRVVIntrinsics(RVSiFiveVectorIntrinsicRecords,
|
||||
IntrinsicKind::SIFIVE_VECTOR);
|
||||
}
|
||||
if (S.RISCV().DeclareAndesVectorBuiltins &&
|
||||
!ConstructedRISCVAndesVectorBuiltins) {
|
||||
ConstructedRISCVAndesVectorBuiltins = true;
|
||||
ConstructRVVIntrinsics(RVAndesVectorIntrinsicRecords,
|
||||
IntrinsicKind::ANDES_VECTOR);
|
||||
}
|
||||
}
|
||||
|
||||
// Compute name and signatures for intrinsic with practical types.
|
||||
|
||||
@@ -1214,6 +1214,7 @@ llvm::raw_ostream &operator<<(llvm::raw_ostream &OS, enum RVVRequire Require) {
|
||||
switch (Require) {
|
||||
STRINGIFY(RVV_REQ_RV64)
|
||||
STRINGIFY(RVV_REQ_Zvfhmin)
|
||||
STRINGIFY(RVV_REQ_Xandesvpackfph)
|
||||
STRINGIFY(RVV_REQ_Xsfvcp)
|
||||
STRINGIFY(RVV_REQ_Xsfvfnrclipxfqf)
|
||||
STRINGIFY(RVV_REQ_Xsfvfwmaccqqq)
|
||||
|
||||
@@ -0,0 +1,225 @@
|
||||
// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
|
||||
// REQUIRES: riscv-registered-target
|
||||
// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x \
|
||||
// RUN: -target-feature +zvfhmin \
|
||||
// RUN: -target-feature +xandesvpackfph -disable-O0-optnone \
|
||||
// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
|
||||
// RUN: FileCheck --check-prefix=CHECK-RV64 %s
|
||||
|
||||
#include <andes_vector.h>
|
||||
|
||||
// CHECK-RV64-LABEL: @test_nds_vfpmadb_vf_f16mf4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.nds.vfpmadb.nxv1f16.f32.i64(<vscale x 1 x half> poison, <vscale x 1 x half> [[OP1:%.*]], float [[OP2:%.*]], i64 7, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16mf4_t test_nds_vfpmadb_vf_f16mf4(vfloat16mf4_t op1, float op2, size_t vl) {
|
||||
return __riscv_nds_vfpmadb_vf_f16mf4(op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV64-LABEL: @test_nds_vfpmadb_vf_f16mf2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.nds.vfpmadb.nxv2f16.f32.i64(<vscale x 2 x half> poison, <vscale x 2 x half> [[OP1:%.*]], float [[OP2:%.*]], i64 7, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16mf2_t test_nds_vfpmadb_vf_f16mf2(vfloat16mf2_t op1, float op2, size_t vl) {
|
||||
return __riscv_nds_vfpmadb_vf_f16mf2(op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV64-LABEL: @test_nds_vfpmadb_vf_f16m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.nds.vfpmadb.nxv4f16.f32.i64(<vscale x 4 x half> poison, <vscale x 4 x half> [[OP1:%.*]], float [[OP2:%.*]], i64 7, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16m1_t test_nds_vfpmadb_vf_f16m1(vfloat16m1_t op1, float op2, size_t vl) {
|
||||
return __riscv_nds_vfpmadb_vf_f16m1(op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV64-LABEL: @test_nds_vfpmadb_vf_f16m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.nds.vfpmadb.nxv8f16.f32.i64(<vscale x 8 x half> poison, <vscale x 8 x half> [[OP1:%.*]], float [[OP2:%.*]], i64 7, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16m2_t test_nds_vfpmadb_vf_f16m2(vfloat16m2_t op1, float op2, size_t vl) {
|
||||
return __riscv_nds_vfpmadb_vf_f16m2(op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV64-LABEL: @test_nds_vfpmadb_vf_f16m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.nds.vfpmadb.nxv16f16.f32.i64(<vscale x 16 x half> poison, <vscale x 16 x half> [[OP1:%.*]], float [[OP2:%.*]], i64 7, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16m4_t test_nds_vfpmadb_vf_f16m4(vfloat16m4_t op1, float op2, size_t vl) {
|
||||
return __riscv_nds_vfpmadb_vf_f16m4(op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV64-LABEL: @test_nds_vfpmadb_vf_f16m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.nds.vfpmadb.nxv32f16.f32.i64(<vscale x 32 x half> poison, <vscale x 32 x half> [[OP1:%.*]], float [[OP2:%.*]], i64 7, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16m8_t test_nds_vfpmadb_vf_f16m8(vfloat16m8_t op1, float op2, size_t vl) {
|
||||
return __riscv_nds_vfpmadb_vf_f16m8(op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV64-LABEL: @test_nds_vfpmadb_vf_f16mf4_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.nds.vfpmadb.mask.nxv1f16.f32.i64(<vscale x 1 x half> poison, <vscale x 1 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 3)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16mf4_t test_nds_vfpmadb_vf_f16mf4_m(vbool64_t mask, vfloat16mf4_t op1, float op2, size_t vl) {
|
||||
return __riscv_nds_vfpmadb_vf_f16mf4_m(mask, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV64-LABEL: @test_nds_vfpmadb_vf_f16mf2_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.nds.vfpmadb.mask.nxv2f16.f32.i64(<vscale x 2 x half> poison, <vscale x 2 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 3)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16mf2_t test_nds_vfpmadb_vf_f16mf2_m(vbool32_t mask, vfloat16mf2_t op1, float op2, size_t vl) {
|
||||
return __riscv_nds_vfpmadb_vf_f16mf2_m(mask, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV64-LABEL: @test_nds_vfpmadb_vf_f16m1_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.nds.vfpmadb.mask.nxv4f16.f32.i64(<vscale x 4 x half> poison, <vscale x 4 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 3)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16m1_t test_nds_vfpmadb_vf_f16m1_m(vbool16_t mask, vfloat16m1_t op1, float op2, size_t vl) {
|
||||
return __riscv_nds_vfpmadb_vf_f16m1_m(mask, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV64-LABEL: @test_nds_vfpmadb_vf_f16m2_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.nds.vfpmadb.mask.nxv8f16.f32.i64(<vscale x 8 x half> poison, <vscale x 8 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 3)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16m2_t test_nds_vfpmadb_vf_f16m2_m(vbool8_t mask, vfloat16m2_t op1, float op2, size_t vl) {
|
||||
return __riscv_nds_vfpmadb_vf_f16m2_m(mask, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV64-LABEL: @test_nds_vfpmadb_vf_f16m4_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.nds.vfpmadb.mask.nxv16f16.f32.i64(<vscale x 16 x half> poison, <vscale x 16 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 3)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16m4_t test_nds_vfpmadb_vf_f16m4_m(vbool4_t mask, vfloat16m4_t op1, float op2, size_t vl) {
|
||||
return __riscv_nds_vfpmadb_vf_f16m4_m(mask, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV64-LABEL: @test_nds_vfpmadb_vf_f16m8_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.nds.vfpmadb.mask.nxv32f16.f32.i64(<vscale x 32 x half> poison, <vscale x 32 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 3)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16m8_t test_nds_vfpmadb_vf_f16m8_m(vbool2_t mask, vfloat16m8_t op1, float op2, size_t vl) {
|
||||
return __riscv_nds_vfpmadb_vf_f16m8_m(mask, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV64-LABEL: @test_nds_vfpmadb_vf_f16mf4_rm(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.nds.vfpmadb.nxv1f16.f32.i64(<vscale x 1 x half> poison, <vscale x 1 x half> [[OP1:%.*]], float [[OP2:%.*]], i64 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16mf4_t test_nds_vfpmadb_vf_f16mf4_rm(vfloat16mf4_t op1, float op2, size_t vl) {
|
||||
return __riscv_nds_vfpmadb_vf_f16mf4_rm(op1, op2, __RISCV_FRM_RNE, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV64-LABEL: @test_nds_vfpmadb_vf_f16mf2_rm(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.nds.vfpmadb.nxv2f16.f32.i64(<vscale x 2 x half> poison, <vscale x 2 x half> [[OP1:%.*]], float [[OP2:%.*]], i64 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16mf2_t test_nds_vfpmadb_vf_f16mf2_rm(vfloat16mf2_t op1, float op2, size_t vl) {
|
||||
return __riscv_nds_vfpmadb_vf_f16mf2_rm(op1, op2, __RISCV_FRM_RNE, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV64-LABEL: @test_nds_vfpmadb_vf_f16m1_rm(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.nds.vfpmadb.nxv4f16.f32.i64(<vscale x 4 x half> poison, <vscale x 4 x half> [[OP1:%.*]], float [[OP2:%.*]], i64 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16m1_t test_nds_vfpmadb_vf_f16m1_rm(vfloat16m1_t op1, float op2, size_t vl) {
|
||||
return __riscv_nds_vfpmadb_vf_f16m1_rm(op1, op2, __RISCV_FRM_RNE, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV64-LABEL: @test_nds_vfpmadb_vf_f16m2_rm(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.nds.vfpmadb.nxv8f16.f32.i64(<vscale x 8 x half> poison, <vscale x 8 x half> [[OP1:%.*]], float [[OP2:%.*]], i64 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16m2_t test_nds_vfpmadb_vf_f16m2_rm(vfloat16m2_t op1, float op2, size_t vl) {
|
||||
return __riscv_nds_vfpmadb_vf_f16m2_rm(op1, op2, __RISCV_FRM_RNE, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV64-LABEL: @test_nds_vfpmadb_vf_f16m4_rm(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.nds.vfpmadb.nxv16f16.f32.i64(<vscale x 16 x half> poison, <vscale x 16 x half> [[OP1:%.*]], float [[OP2:%.*]], i64 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16m4_t test_nds_vfpmadb_vf_f16m4_rm(vfloat16m4_t op1, float op2, size_t vl) {
|
||||
return __riscv_nds_vfpmadb_vf_f16m4_rm(op1, op2, __RISCV_FRM_RNE, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV64-LABEL: @test_nds_vfpmadb_vf_f16m8_rm(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.nds.vfpmadb.nxv32f16.f32.i64(<vscale x 32 x half> poison, <vscale x 32 x half> [[OP1:%.*]], float [[OP2:%.*]], i64 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16m8_t test_nds_vfpmadb_vf_f16m8_rm(vfloat16m8_t op1, float op2, size_t vl) {
|
||||
return __riscv_nds_vfpmadb_vf_f16m8_rm(op1, op2, __RISCV_FRM_RNE, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV64-LABEL: @test_nds_vfpmadb_vf_f16mf4_rm_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.nds.vfpmadb.mask.nxv1f16.f32.i64(<vscale x 1 x half> poison, <vscale x 1 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 0, i64 [[VL:%.*]], i64 3)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16mf4_t test_nds_vfpmadb_vf_f16mf4_rm_m(vbool64_t mask, vfloat16mf4_t op1, float op2, size_t vl) {
|
||||
return __riscv_nds_vfpmadb_vf_f16mf4_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV64-LABEL: @test_nds_vfpmadb_vf_f16mf2_rm_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.nds.vfpmadb.mask.nxv2f16.f32.i64(<vscale x 2 x half> poison, <vscale x 2 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 0, i64 [[VL:%.*]], i64 3)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16mf2_t test_nds_vfpmadb_vf_f16mf2_rm_m(vbool32_t mask, vfloat16mf2_t op1, float op2, size_t vl) {
|
||||
return __riscv_nds_vfpmadb_vf_f16mf2_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV64-LABEL: @test_nds_vfpmadb_vf_f16m1_rm_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.nds.vfpmadb.mask.nxv4f16.f32.i64(<vscale x 4 x half> poison, <vscale x 4 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 0, i64 [[VL:%.*]], i64 3)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16m1_t test_nds_vfpmadb_vf_f16m1_rm_m(vbool16_t mask, vfloat16m1_t op1, float op2, size_t vl) {
|
||||
return __riscv_nds_vfpmadb_vf_f16m1_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV64-LABEL: @test_nds_vfpmadb_vf_f16m2_rm_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.nds.vfpmadb.mask.nxv8f16.f32.i64(<vscale x 8 x half> poison, <vscale x 8 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 0, i64 [[VL:%.*]], i64 3)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16m2_t test_nds_vfpmadb_vf_f16m2_rm_m(vbool8_t mask, vfloat16m2_t op1, float op2, size_t vl) {
|
||||
return __riscv_nds_vfpmadb_vf_f16m2_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV64-LABEL: @test_nds_vfpmadb_vf_f16m4_rm_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.nds.vfpmadb.mask.nxv16f16.f32.i64(<vscale x 16 x half> poison, <vscale x 16 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 0, i64 [[VL:%.*]], i64 3)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16m4_t test_nds_vfpmadb_vf_f16m4_rm_m(vbool4_t mask, vfloat16m4_t op1, float op2, size_t vl) {
|
||||
return __riscv_nds_vfpmadb_vf_f16m4_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV64-LABEL: @test_nds_vfpmadb_vf_f16m8_rm_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.nds.vfpmadb.mask.nxv32f16.f32.i64(<vscale x 32 x half> poison, <vscale x 32 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 0, i64 [[VL:%.*]], i64 3)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16m8_t test_nds_vfpmadb_vf_f16m8_rm_m(vbool2_t mask, vfloat16m8_t op1, float op2, size_t vl) {
|
||||
return __riscv_nds_vfpmadb_vf_f16m8_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl);
|
||||
}
|
||||
@@ -0,0 +1,225 @@
|
||||
// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
|
||||
// REQUIRES: riscv-registered-target
|
||||
// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x \
|
||||
// RUN: -target-feature +zvfhmin \
|
||||
// RUN: -target-feature +xandesvpackfph -disable-O0-optnone \
|
||||
// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
|
||||
// RUN: FileCheck --check-prefix=CHECK-RV64 %s
|
||||
|
||||
#include <andes_vector.h>
|
||||
|
||||
// CHECK-RV64-LABEL: @test_nds_vfpmadt_vf_f16mf4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.nds.vfpmadt.nxv1f16.f32.i64(<vscale x 1 x half> poison, <vscale x 1 x half> [[OP1:%.*]], float [[OP2:%.*]], i64 7, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16mf4_t test_nds_vfpmadt_vf_f16mf4(vfloat16mf4_t op1, float op2, size_t vl) {
|
||||
return __riscv_nds_vfpmadt_vf_f16mf4(op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV64-LABEL: @test_nds_vfpmadt_vf_f16mf2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.nds.vfpmadt.nxv2f16.f32.i64(<vscale x 2 x half> poison, <vscale x 2 x half> [[OP1:%.*]], float [[OP2:%.*]], i64 7, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16mf2_t test_nds_vfpmadt_vf_f16mf2(vfloat16mf2_t op1, float op2, size_t vl) {
|
||||
return __riscv_nds_vfpmadt_vf_f16mf2(op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV64-LABEL: @test_nds_vfpmadt_vf_f16m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.nds.vfpmadt.nxv4f16.f32.i64(<vscale x 4 x half> poison, <vscale x 4 x half> [[OP1:%.*]], float [[OP2:%.*]], i64 7, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16m1_t test_nds_vfpmadt_vf_f16m1(vfloat16m1_t op1, float op2, size_t vl) {
|
||||
return __riscv_nds_vfpmadt_vf_f16m1(op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV64-LABEL: @test_nds_vfpmadt_vf_f16m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.nds.vfpmadt.nxv8f16.f32.i64(<vscale x 8 x half> poison, <vscale x 8 x half> [[OP1:%.*]], float [[OP2:%.*]], i64 7, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16m2_t test_nds_vfpmadt_vf_f16m2(vfloat16m2_t op1, float op2, size_t vl) {
|
||||
return __riscv_nds_vfpmadt_vf_f16m2(op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV64-LABEL: @test_nds_vfpmadt_vf_f16m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.nds.vfpmadt.nxv16f16.f32.i64(<vscale x 16 x half> poison, <vscale x 16 x half> [[OP1:%.*]], float [[OP2:%.*]], i64 7, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16m4_t test_nds_vfpmadt_vf_f16m4(vfloat16m4_t op1, float op2, size_t vl) {
|
||||
return __riscv_nds_vfpmadt_vf_f16m4(op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV64-LABEL: @test_nds_vfpmadt_vf_f16m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.nds.vfpmadt.nxv32f16.f32.i64(<vscale x 32 x half> poison, <vscale x 32 x half> [[OP1:%.*]], float [[OP2:%.*]], i64 7, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16m8_t test_nds_vfpmadt_vf_f16m8(vfloat16m8_t op1, float op2, size_t vl) {
|
||||
return __riscv_nds_vfpmadt_vf_f16m8(op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV64-LABEL: @test_nds_vfpmadt_vf_f16mf4_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.nds.vfpmadt.mask.nxv1f16.f32.i64(<vscale x 1 x half> poison, <vscale x 1 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 3)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16mf4_t test_nds_vfpmadt_vf_f16mf4_m(vbool64_t mask, vfloat16mf4_t op1, float op2, size_t vl) {
|
||||
return __riscv_nds_vfpmadt_vf_f16mf4_m(mask, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV64-LABEL: @test_nds_vfpmadt_vf_f16mf2_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.nds.vfpmadt.mask.nxv2f16.f32.i64(<vscale x 2 x half> poison, <vscale x 2 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 3)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16mf2_t test_nds_vfpmadt_vf_f16mf2_m(vbool32_t mask, vfloat16mf2_t op1, float op2, size_t vl) {
|
||||
return __riscv_nds_vfpmadt_vf_f16mf2_m(mask, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV64-LABEL: @test_nds_vfpmadt_vf_f16m1_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.nds.vfpmadt.mask.nxv4f16.f32.i64(<vscale x 4 x half> poison, <vscale x 4 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 3)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16m1_t test_nds_vfpmadt_vf_f16m1_m(vbool16_t mask, vfloat16m1_t op1, float op2, size_t vl) {
|
||||
return __riscv_nds_vfpmadt_vf_f16m1_m(mask, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV64-LABEL: @test_nds_vfpmadt_vf_f16m2_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.nds.vfpmadt.mask.nxv8f16.f32.i64(<vscale x 8 x half> poison, <vscale x 8 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 3)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16m2_t test_nds_vfpmadt_vf_f16m2_m(vbool8_t mask, vfloat16m2_t op1, float op2, size_t vl) {
|
||||
return __riscv_nds_vfpmadt_vf_f16m2_m(mask, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV64-LABEL: @test_nds_vfpmadt_vf_f16m4_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.nds.vfpmadt.mask.nxv16f16.f32.i64(<vscale x 16 x half> poison, <vscale x 16 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 3)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16m4_t test_nds_vfpmadt_vf_f16m4_m(vbool4_t mask, vfloat16m4_t op1, float op2, size_t vl) {
|
||||
return __riscv_nds_vfpmadt_vf_f16m4_m(mask, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV64-LABEL: @test_nds_vfpmadt_vf_f16m8_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.nds.vfpmadt.mask.nxv32f16.f32.i64(<vscale x 32 x half> poison, <vscale x 32 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 3)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16m8_t test_nds_vfpmadt_vf_f16m8_m(vbool2_t mask, vfloat16m8_t op1, float op2, size_t vl) {
|
||||
return __riscv_nds_vfpmadt_vf_f16m8_m(mask, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV64-LABEL: @test_nds_vfpmadt_vf_f16mf4_rm(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.nds.vfpmadt.nxv1f16.f32.i64(<vscale x 1 x half> poison, <vscale x 1 x half> [[OP1:%.*]], float [[OP2:%.*]], i64 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16mf4_t test_nds_vfpmadt_vf_f16mf4_rm(vfloat16mf4_t op1, float op2, size_t vl) {
|
||||
return __riscv_nds_vfpmadt_vf_f16mf4_rm(op1, op2, __RISCV_FRM_RNE, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV64-LABEL: @test_nds_vfpmadt_vf_f16mf2_rm(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.nds.vfpmadt.nxv2f16.f32.i64(<vscale x 2 x half> poison, <vscale x 2 x half> [[OP1:%.*]], float [[OP2:%.*]], i64 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16mf2_t test_nds_vfpmadt_vf_f16mf2_rm(vfloat16mf2_t op1, float op2, size_t vl) {
|
||||
return __riscv_nds_vfpmadt_vf_f16mf2_rm(op1, op2, __RISCV_FRM_RNE, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV64-LABEL: @test_nds_vfpmadt_vf_f16m1_rm(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.nds.vfpmadt.nxv4f16.f32.i64(<vscale x 4 x half> poison, <vscale x 4 x half> [[OP1:%.*]], float [[OP2:%.*]], i64 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16m1_t test_nds_vfpmadt_vf_f16m1_rm(vfloat16m1_t op1, float op2, size_t vl) {
|
||||
return __riscv_nds_vfpmadt_vf_f16m1_rm(op1, op2, __RISCV_FRM_RNE, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV64-LABEL: @test_nds_vfpmadt_vf_f16m2_rm(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.nds.vfpmadt.nxv8f16.f32.i64(<vscale x 8 x half> poison, <vscale x 8 x half> [[OP1:%.*]], float [[OP2:%.*]], i64 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16m2_t test_nds_vfpmadt_vf_f16m2_rm(vfloat16m2_t op1, float op2, size_t vl) {
|
||||
return __riscv_nds_vfpmadt_vf_f16m2_rm(op1, op2, __RISCV_FRM_RNE, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV64-LABEL: @test_nds_vfpmadt_vf_f16m4_rm(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.nds.vfpmadt.nxv16f16.f32.i64(<vscale x 16 x half> poison, <vscale x 16 x half> [[OP1:%.*]], float [[OP2:%.*]], i64 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16m4_t test_nds_vfpmadt_vf_f16m4_rm(vfloat16m4_t op1, float op2, size_t vl) {
|
||||
return __riscv_nds_vfpmadt_vf_f16m4_rm(op1, op2, __RISCV_FRM_RNE, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV64-LABEL: @test_nds_vfpmadt_vf_f16m8_rm(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.nds.vfpmadt.nxv32f16.f32.i64(<vscale x 32 x half> poison, <vscale x 32 x half> [[OP1:%.*]], float [[OP2:%.*]], i64 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16m8_t test_nds_vfpmadt_vf_f16m8_rm(vfloat16m8_t op1, float op2, size_t vl) {
|
||||
return __riscv_nds_vfpmadt_vf_f16m8_rm(op1, op2, __RISCV_FRM_RNE, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV64-LABEL: @test_nds_vfpmadt_vf_f16mf4_rm_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.nds.vfpmadt.mask.nxv1f16.f32.i64(<vscale x 1 x half> poison, <vscale x 1 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 0, i64 [[VL:%.*]], i64 3)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16mf4_t test_nds_vfpmadt_vf_f16mf4_rm_m(vbool64_t mask, vfloat16mf4_t op1, float op2, size_t vl) {
|
||||
return __riscv_nds_vfpmadt_vf_f16mf4_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV64-LABEL: @test_nds_vfpmadt_vf_f16mf2_rm_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.nds.vfpmadt.mask.nxv2f16.f32.i64(<vscale x 2 x half> poison, <vscale x 2 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 0, i64 [[VL:%.*]], i64 3)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16mf2_t test_nds_vfpmadt_vf_f16mf2_rm_m(vbool32_t mask, vfloat16mf2_t op1, float op2, size_t vl) {
|
||||
return __riscv_nds_vfpmadt_vf_f16mf2_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV64-LABEL: @test_nds_vfpmadt_vf_f16m1_rm_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.nds.vfpmadt.mask.nxv4f16.f32.i64(<vscale x 4 x half> poison, <vscale x 4 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 0, i64 [[VL:%.*]], i64 3)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16m1_t test_nds_vfpmadt_vf_f16m1_rm_m(vbool16_t mask, vfloat16m1_t op1, float op2, size_t vl) {
|
||||
return __riscv_nds_vfpmadt_vf_f16m1_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV64-LABEL: @test_nds_vfpmadt_vf_f16m2_rm_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.nds.vfpmadt.mask.nxv8f16.f32.i64(<vscale x 8 x half> poison, <vscale x 8 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 0, i64 [[VL:%.*]], i64 3)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16m2_t test_nds_vfpmadt_vf_f16m2_rm_m(vbool8_t mask, vfloat16m2_t op1, float op2, size_t vl) {
|
||||
return __riscv_nds_vfpmadt_vf_f16m2_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV64-LABEL: @test_nds_vfpmadt_vf_f16m4_rm_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.nds.vfpmadt.mask.nxv16f16.f32.i64(<vscale x 16 x half> poison, <vscale x 16 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 0, i64 [[VL:%.*]], i64 3)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16m4_t test_nds_vfpmadt_vf_f16m4_rm_m(vbool4_t mask, vfloat16m4_t op1, float op2, size_t vl) {
|
||||
return __riscv_nds_vfpmadt_vf_f16m4_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV64-LABEL: @test_nds_vfpmadt_vf_f16m8_rm_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.nds.vfpmadt.mask.nxv32f16.f32.i64(<vscale x 32 x half> poison, <vscale x 32 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 0, i64 [[VL:%.*]], i64 3)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16m8_t test_nds_vfpmadt_vf_f16m8_rm_m(vbool2_t mask, vfloat16m8_t op1, float op2, size_t vl) {
|
||||
return __riscv_nds_vfpmadt_vf_f16m8_rm_m(mask, op1, op2, __RISCV_FRM_RNE, vl);
|
||||
}
|
||||
@@ -0,0 +1,225 @@
|
||||
// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
|
||||
// REQUIRES: riscv-registered-target
|
||||
// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x \
|
||||
// RUN: -target-feature +zvfhmin \
|
||||
// RUN: -target-feature +xandesvpackfph -disable-O0-optnone \
|
||||
// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
|
||||
// RUN: FileCheck --check-prefix=CHECK-RV64 %s
|
||||
|
||||
#include <andes_vector.h>
|
||||
|
||||
// CHECK-RV64-LABEL: @test_nds_vfpmadb_vf_f16mf4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.nds.vfpmadb.nxv1f16.f32.i64(<vscale x 1 x half> poison, <vscale x 1 x half> [[OP1:%.*]], float [[OP2:%.*]], i64 7, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16mf4_t test_nds_vfpmadb_vf_f16mf4(vfloat16mf4_t op1, float op2, size_t vl) {
|
||||
return __riscv_nds_vfpmadb(op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV64-LABEL: @test_nds_vfpmadb_vf_f16mf2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.nds.vfpmadb.nxv2f16.f32.i64(<vscale x 2 x half> poison, <vscale x 2 x half> [[OP1:%.*]], float [[OP2:%.*]], i64 7, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16mf2_t test_nds_vfpmadb_vf_f16mf2(vfloat16mf2_t op1, float op2, size_t vl) {
|
||||
return __riscv_nds_vfpmadb(op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV64-LABEL: @test_nds_vfpmadb_vf_f16m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.nds.vfpmadb.nxv4f16.f32.i64(<vscale x 4 x half> poison, <vscale x 4 x half> [[OP1:%.*]], float [[OP2:%.*]], i64 7, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16m1_t test_nds_vfpmadb_vf_f16m1(vfloat16m1_t op1, float op2, size_t vl) {
|
||||
return __riscv_nds_vfpmadb(op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV64-LABEL: @test_nds_vfpmadb_vf_f16m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.nds.vfpmadb.nxv8f16.f32.i64(<vscale x 8 x half> poison, <vscale x 8 x half> [[OP1:%.*]], float [[OP2:%.*]], i64 7, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16m2_t test_nds_vfpmadb_vf_f16m2(vfloat16m2_t op1, float op2, size_t vl) {
|
||||
return __riscv_nds_vfpmadb(op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV64-LABEL: @test_nds_vfpmadb_vf_f16m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.nds.vfpmadb.nxv16f16.f32.i64(<vscale x 16 x half> poison, <vscale x 16 x half> [[OP1:%.*]], float [[OP2:%.*]], i64 7, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16m4_t test_nds_vfpmadb_vf_f16m4(vfloat16m4_t op1, float op2, size_t vl) {
|
||||
return __riscv_nds_vfpmadb(op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV64-LABEL: @test_nds_vfpmadb_vf_f16m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.nds.vfpmadb.nxv32f16.f32.i64(<vscale x 32 x half> poison, <vscale x 32 x half> [[OP1:%.*]], float [[OP2:%.*]], i64 7, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16m8_t test_nds_vfpmadb_vf_f16m8(vfloat16m8_t op1, float op2, size_t vl) {
|
||||
return __riscv_nds_vfpmadb(op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV64-LABEL: @test_nds_vfpmadb_vf_f16mf4_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.nds.vfpmadb.mask.nxv1f16.f32.i64(<vscale x 1 x half> poison, <vscale x 1 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 3)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16mf4_t test_nds_vfpmadb_vf_f16mf4_m(vbool64_t mask, vfloat16mf4_t op1, float op2, size_t vl) {
|
||||
return __riscv_nds_vfpmadb(mask, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV64-LABEL: @test_nds_vfpmadb_vf_f16mf2_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.nds.vfpmadb.mask.nxv2f16.f32.i64(<vscale x 2 x half> poison, <vscale x 2 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 3)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16mf2_t test_nds_vfpmadb_vf_f16mf2_m(vbool32_t mask, vfloat16mf2_t op1, float op2, size_t vl) {
|
||||
return __riscv_nds_vfpmadb(mask, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV64-LABEL: @test_nds_vfpmadb_vf_f16m1_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.nds.vfpmadb.mask.nxv4f16.f32.i64(<vscale x 4 x half> poison, <vscale x 4 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 3)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16m1_t test_nds_vfpmadb_vf_f16m1_m(vbool16_t mask, vfloat16m1_t op1, float op2, size_t vl) {
|
||||
return __riscv_nds_vfpmadb(mask, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV64-LABEL: @test_nds_vfpmadb_vf_f16m2_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.nds.vfpmadb.mask.nxv8f16.f32.i64(<vscale x 8 x half> poison, <vscale x 8 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 3)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16m2_t test_nds_vfpmadb_vf_f16m2_m(vbool8_t mask, vfloat16m2_t op1, float op2, size_t vl) {
|
||||
return __riscv_nds_vfpmadb(mask, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV64-LABEL: @test_nds_vfpmadb_vf_f16m4_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.nds.vfpmadb.mask.nxv16f16.f32.i64(<vscale x 16 x half> poison, <vscale x 16 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 3)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16m4_t test_nds_vfpmadb_vf_f16m4_m(vbool4_t mask, vfloat16m4_t op1, float op2, size_t vl) {
|
||||
return __riscv_nds_vfpmadb(mask, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV64-LABEL: @test_nds_vfpmadb_vf_f16m8_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.nds.vfpmadb.mask.nxv32f16.f32.i64(<vscale x 32 x half> poison, <vscale x 32 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 3)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16m8_t test_nds_vfpmadb_vf_f16m8_m(vbool2_t mask, vfloat16m8_t op1, float op2, size_t vl) {
|
||||
return __riscv_nds_vfpmadb(mask, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV64-LABEL: @test_nds_vfpmadb_vf_f16mf4_rm(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.nds.vfpmadb.nxv1f16.f32.i64(<vscale x 1 x half> poison, <vscale x 1 x half> [[OP1:%.*]], float [[OP2:%.*]], i64 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16mf4_t test_nds_vfpmadb_vf_f16mf4_rm(vfloat16mf4_t op1, float op2, size_t vl) {
|
||||
return __riscv_nds_vfpmadb(op1, op2, __RISCV_FRM_RNE, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV64-LABEL: @test_nds_vfpmadb_vf_f16mf2_rm(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.nds.vfpmadb.nxv2f16.f32.i64(<vscale x 2 x half> poison, <vscale x 2 x half> [[OP1:%.*]], float [[OP2:%.*]], i64 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16mf2_t test_nds_vfpmadb_vf_f16mf2_rm(vfloat16mf2_t op1, float op2, size_t vl) {
|
||||
return __riscv_nds_vfpmadb(op1, op2, __RISCV_FRM_RNE, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV64-LABEL: @test_nds_vfpmadb_vf_f16m1_rm(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.nds.vfpmadb.nxv4f16.f32.i64(<vscale x 4 x half> poison, <vscale x 4 x half> [[OP1:%.*]], float [[OP2:%.*]], i64 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16m1_t test_nds_vfpmadb_vf_f16m1_rm(vfloat16m1_t op1, float op2, size_t vl) {
|
||||
return __riscv_nds_vfpmadb(op1, op2, __RISCV_FRM_RNE, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV64-LABEL: @test_nds_vfpmadb_vf_f16m2_rm(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.nds.vfpmadb.nxv8f16.f32.i64(<vscale x 8 x half> poison, <vscale x 8 x half> [[OP1:%.*]], float [[OP2:%.*]], i64 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16m2_t test_nds_vfpmadb_vf_f16m2_rm(vfloat16m2_t op1, float op2, size_t vl) {
|
||||
return __riscv_nds_vfpmadb(op1, op2, __RISCV_FRM_RNE, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV64-LABEL: @test_nds_vfpmadb_vf_f16m4_rm(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.nds.vfpmadb.nxv16f16.f32.i64(<vscale x 16 x half> poison, <vscale x 16 x half> [[OP1:%.*]], float [[OP2:%.*]], i64 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16m4_t test_nds_vfpmadb_vf_f16m4_rm(vfloat16m4_t op1, float op2, size_t vl) {
|
||||
return __riscv_nds_vfpmadb(op1, op2, __RISCV_FRM_RNE, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV64-LABEL: @test_nds_vfpmadb_vf_f16m8_rm(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.nds.vfpmadb.nxv32f16.f32.i64(<vscale x 32 x half> poison, <vscale x 32 x half> [[OP1:%.*]], float [[OP2:%.*]], i64 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16m8_t test_nds_vfpmadb_vf_f16m8_rm(vfloat16m8_t op1, float op2, size_t vl) {
|
||||
return __riscv_nds_vfpmadb(op1, op2, __RISCV_FRM_RNE, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV64-LABEL: @test_nds_vfpmadb_vf_f16mf4_rm_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.nds.vfpmadb.mask.nxv1f16.f32.i64(<vscale x 1 x half> poison, <vscale x 1 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 0, i64 [[VL:%.*]], i64 3)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16mf4_t test_nds_vfpmadb_vf_f16mf4_rm_m(vbool64_t mask, vfloat16mf4_t op1, float op2, size_t vl) {
|
||||
return __riscv_nds_vfpmadb(mask, op1, op2, __RISCV_FRM_RNE, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV64-LABEL: @test_nds_vfpmadb_vf_f16mf2_rm_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.nds.vfpmadb.mask.nxv2f16.f32.i64(<vscale x 2 x half> poison, <vscale x 2 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 0, i64 [[VL:%.*]], i64 3)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16mf2_t test_nds_vfpmadb_vf_f16mf2_rm_m(vbool32_t mask, vfloat16mf2_t op1, float op2, size_t vl) {
|
||||
return __riscv_nds_vfpmadb(mask, op1, op2, __RISCV_FRM_RNE, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV64-LABEL: @test_nds_vfpmadb_vf_f16m1_rm_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.nds.vfpmadb.mask.nxv4f16.f32.i64(<vscale x 4 x half> poison, <vscale x 4 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 0, i64 [[VL:%.*]], i64 3)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16m1_t test_nds_vfpmadb_vf_f16m1_rm_m(vbool16_t mask, vfloat16m1_t op1, float op2, size_t vl) {
|
||||
return __riscv_nds_vfpmadb(mask, op1, op2, __RISCV_FRM_RNE, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV64-LABEL: @test_nds_vfpmadb_vf_f16m2_rm_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.nds.vfpmadb.mask.nxv8f16.f32.i64(<vscale x 8 x half> poison, <vscale x 8 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 0, i64 [[VL:%.*]], i64 3)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16m2_t test_nds_vfpmadb_vf_f16m2_rm_m(vbool8_t mask, vfloat16m2_t op1, float op2, size_t vl) {
|
||||
return __riscv_nds_vfpmadb(mask, op1, op2, __RISCV_FRM_RNE, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV64-LABEL: @test_nds_vfpmadb_vf_f16m4_rm_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.nds.vfpmadb.mask.nxv16f16.f32.i64(<vscale x 16 x half> poison, <vscale x 16 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 0, i64 [[VL:%.*]], i64 3)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16m4_t test_nds_vfpmadb_vf_f16m4_rm_m(vbool4_t mask, vfloat16m4_t op1, float op2, size_t vl) {
|
||||
return __riscv_nds_vfpmadb(mask, op1, op2, __RISCV_FRM_RNE, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV64-LABEL: @test_nds_vfpmadb_vf_f16m8_rm_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.nds.vfpmadb.mask.nxv32f16.f32.i64(<vscale x 32 x half> poison, <vscale x 32 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 0, i64 [[VL:%.*]], i64 3)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16m8_t test_nds_vfpmadb_vf_f16m8_rm_m(vbool2_t mask, vfloat16m8_t op1, float op2, size_t vl) {
|
||||
return __riscv_nds_vfpmadb(mask, op1, op2, __RISCV_FRM_RNE, vl);
|
||||
}
|
||||
@@ -0,0 +1,225 @@
|
||||
// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
|
||||
// REQUIRES: riscv-registered-target
|
||||
// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x \
|
||||
// RUN: -target-feature +zvfhmin \
|
||||
// RUN: -target-feature +xandesvpackfph -disable-O0-optnone \
|
||||
// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
|
||||
// RUN: FileCheck --check-prefix=CHECK-RV64 %s
|
||||
|
||||
#include <andes_vector.h>
|
||||
|
||||
// CHECK-RV64-LABEL: @test_nds_vfpmadt_vf_f16mf4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.nds.vfpmadt.nxv1f16.f32.i64(<vscale x 1 x half> poison, <vscale x 1 x half> [[OP1:%.*]], float [[OP2:%.*]], i64 7, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16mf4_t test_nds_vfpmadt_vf_f16mf4(vfloat16mf4_t op1, float op2, size_t vl) {
|
||||
return __riscv_nds_vfpmadt(op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV64-LABEL: @test_nds_vfpmadt_vf_f16mf2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.nds.vfpmadt.nxv2f16.f32.i64(<vscale x 2 x half> poison, <vscale x 2 x half> [[OP1:%.*]], float [[OP2:%.*]], i64 7, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16mf2_t test_nds_vfpmadt_vf_f16mf2(vfloat16mf2_t op1, float op2, size_t vl) {
|
||||
return __riscv_nds_vfpmadt(op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV64-LABEL: @test_nds_vfpmadt_vf_f16m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.nds.vfpmadt.nxv4f16.f32.i64(<vscale x 4 x half> poison, <vscale x 4 x half> [[OP1:%.*]], float [[OP2:%.*]], i64 7, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16m1_t test_nds_vfpmadt_vf_f16m1(vfloat16m1_t op1, float op2, size_t vl) {
|
||||
return __riscv_nds_vfpmadt(op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV64-LABEL: @test_nds_vfpmadt_vf_f16m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.nds.vfpmadt.nxv8f16.f32.i64(<vscale x 8 x half> poison, <vscale x 8 x half> [[OP1:%.*]], float [[OP2:%.*]], i64 7, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16m2_t test_nds_vfpmadt_vf_f16m2(vfloat16m2_t op1, float op2, size_t vl) {
|
||||
return __riscv_nds_vfpmadt(op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV64-LABEL: @test_nds_vfpmadt_vf_f16m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.nds.vfpmadt.nxv16f16.f32.i64(<vscale x 16 x half> poison, <vscale x 16 x half> [[OP1:%.*]], float [[OP2:%.*]], i64 7, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16m4_t test_nds_vfpmadt_vf_f16m4(vfloat16m4_t op1, float op2, size_t vl) {
|
||||
return __riscv_nds_vfpmadt(op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV64-LABEL: @test_nds_vfpmadt_vf_f16m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.nds.vfpmadt.nxv32f16.f32.i64(<vscale x 32 x half> poison, <vscale x 32 x half> [[OP1:%.*]], float [[OP2:%.*]], i64 7, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16m8_t test_nds_vfpmadt_vf_f16m8(vfloat16m8_t op1, float op2, size_t vl) {
|
||||
return __riscv_nds_vfpmadt(op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV64-LABEL: @test_nds_vfpmadt_vf_f16mf4_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.nds.vfpmadt.mask.nxv1f16.f32.i64(<vscale x 1 x half> poison, <vscale x 1 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 3)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16mf4_t test_nds_vfpmadt_vf_f16mf4_m(vbool64_t mask, vfloat16mf4_t op1, float op2, size_t vl) {
|
||||
return __riscv_nds_vfpmadt(mask, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV64-LABEL: @test_nds_vfpmadt_vf_f16mf2_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.nds.vfpmadt.mask.nxv2f16.f32.i64(<vscale x 2 x half> poison, <vscale x 2 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 3)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16mf2_t test_nds_vfpmadt_vf_f16mf2_m(vbool32_t mask, vfloat16mf2_t op1, float op2, size_t vl) {
|
||||
return __riscv_nds_vfpmadt(mask, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV64-LABEL: @test_nds_vfpmadt_vf_f16m1_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.nds.vfpmadt.mask.nxv4f16.f32.i64(<vscale x 4 x half> poison, <vscale x 4 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 3)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16m1_t test_nds_vfpmadt_vf_f16m1_m(vbool16_t mask, vfloat16m1_t op1, float op2, size_t vl) {
|
||||
return __riscv_nds_vfpmadt(mask, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV64-LABEL: @test_nds_vfpmadt_vf_f16m2_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.nds.vfpmadt.mask.nxv8f16.f32.i64(<vscale x 8 x half> poison, <vscale x 8 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 3)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16m2_t test_nds_vfpmadt_vf_f16m2_m(vbool8_t mask, vfloat16m2_t op1, float op2, size_t vl) {
|
||||
return __riscv_nds_vfpmadt(mask, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV64-LABEL: @test_nds_vfpmadt_vf_f16m4_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.nds.vfpmadt.mask.nxv16f16.f32.i64(<vscale x 16 x half> poison, <vscale x 16 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 3)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16m4_t test_nds_vfpmadt_vf_f16m4_m(vbool4_t mask, vfloat16m4_t op1, float op2, size_t vl) {
|
||||
return __riscv_nds_vfpmadt(mask, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV64-LABEL: @test_nds_vfpmadt_vf_f16m8_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.nds.vfpmadt.mask.nxv32f16.f32.i64(<vscale x 32 x half> poison, <vscale x 32 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 3)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16m8_t test_nds_vfpmadt_vf_f16m8_m(vbool2_t mask, vfloat16m8_t op1, float op2, size_t vl) {
|
||||
return __riscv_nds_vfpmadt(mask, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV64-LABEL: @test_nds_vfpmadt_vf_f16mf4_rm(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.nds.vfpmadt.nxv1f16.f32.i64(<vscale x 1 x half> poison, <vscale x 1 x half> [[OP1:%.*]], float [[OP2:%.*]], i64 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16mf4_t test_nds_vfpmadt_vf_f16mf4_rm(vfloat16mf4_t op1, float op2, size_t vl) {
|
||||
return __riscv_nds_vfpmadt(op1, op2, __RISCV_FRM_RNE, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV64-LABEL: @test_nds_vfpmadt_vf_f16mf2_rm(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.nds.vfpmadt.nxv2f16.f32.i64(<vscale x 2 x half> poison, <vscale x 2 x half> [[OP1:%.*]], float [[OP2:%.*]], i64 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16mf2_t test_nds_vfpmadt_vf_f16mf2_rm(vfloat16mf2_t op1, float op2, size_t vl) {
|
||||
return __riscv_nds_vfpmadt(op1, op2, __RISCV_FRM_RNE, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV64-LABEL: @test_nds_vfpmadt_vf_f16m1_rm(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.nds.vfpmadt.nxv4f16.f32.i64(<vscale x 4 x half> poison, <vscale x 4 x half> [[OP1:%.*]], float [[OP2:%.*]], i64 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16m1_t test_nds_vfpmadt_vf_f16m1_rm(vfloat16m1_t op1, float op2, size_t vl) {
|
||||
return __riscv_nds_vfpmadt(op1, op2, __RISCV_FRM_RNE, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV64-LABEL: @test_nds_vfpmadt_vf_f16m2_rm(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.nds.vfpmadt.nxv8f16.f32.i64(<vscale x 8 x half> poison, <vscale x 8 x half> [[OP1:%.*]], float [[OP2:%.*]], i64 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16m2_t test_nds_vfpmadt_vf_f16m2_rm(vfloat16m2_t op1, float op2, size_t vl) {
|
||||
return __riscv_nds_vfpmadt(op1, op2, __RISCV_FRM_RNE, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV64-LABEL: @test_nds_vfpmadt_vf_f16m4_rm(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.nds.vfpmadt.nxv16f16.f32.i64(<vscale x 16 x half> poison, <vscale x 16 x half> [[OP1:%.*]], float [[OP2:%.*]], i64 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16m4_t test_nds_vfpmadt_vf_f16m4_rm(vfloat16m4_t op1, float op2, size_t vl) {
|
||||
return __riscv_nds_vfpmadt(op1, op2, __RISCV_FRM_RNE, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV64-LABEL: @test_nds_vfpmadt_vf_f16m8_rm(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.nds.vfpmadt.nxv32f16.f32.i64(<vscale x 32 x half> poison, <vscale x 32 x half> [[OP1:%.*]], float [[OP2:%.*]], i64 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16m8_t test_nds_vfpmadt_vf_f16m8_rm(vfloat16m8_t op1, float op2, size_t vl) {
|
||||
return __riscv_nds_vfpmadt(op1, op2, __RISCV_FRM_RNE, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV64-LABEL: @test_nds_vfpmadt_vf_f16mf4_rm_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.nds.vfpmadt.mask.nxv1f16.f32.i64(<vscale x 1 x half> poison, <vscale x 1 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 0, i64 [[VL:%.*]], i64 3)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16mf4_t test_nds_vfpmadt_vf_f16mf4_rm_m(vbool64_t mask, vfloat16mf4_t op1, float op2, size_t vl) {
|
||||
return __riscv_nds_vfpmadt(mask, op1, op2, __RISCV_FRM_RNE, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV64-LABEL: @test_nds_vfpmadt_vf_f16mf2_rm_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.nds.vfpmadt.mask.nxv2f16.f32.i64(<vscale x 2 x half> poison, <vscale x 2 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 0, i64 [[VL:%.*]], i64 3)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16mf2_t test_nds_vfpmadt_vf_f16mf2_rm_m(vbool32_t mask, vfloat16mf2_t op1, float op2, size_t vl) {
|
||||
return __riscv_nds_vfpmadt(mask, op1, op2, __RISCV_FRM_RNE, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV64-LABEL: @test_nds_vfpmadt_vf_f16m1_rm_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.nds.vfpmadt.mask.nxv4f16.f32.i64(<vscale x 4 x half> poison, <vscale x 4 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 0, i64 [[VL:%.*]], i64 3)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16m1_t test_nds_vfpmadt_vf_f16m1_rm_m(vbool16_t mask, vfloat16m1_t op1, float op2, size_t vl) {
|
||||
return __riscv_nds_vfpmadt(mask, op1, op2, __RISCV_FRM_RNE, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV64-LABEL: @test_nds_vfpmadt_vf_f16m2_rm_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.nds.vfpmadt.mask.nxv8f16.f32.i64(<vscale x 8 x half> poison, <vscale x 8 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 0, i64 [[VL:%.*]], i64 3)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16m2_t test_nds_vfpmadt_vf_f16m2_rm_m(vbool8_t mask, vfloat16m2_t op1, float op2, size_t vl) {
|
||||
return __riscv_nds_vfpmadt(mask, op1, op2, __RISCV_FRM_RNE, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV64-LABEL: @test_nds_vfpmadt_vf_f16m4_rm_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.nds.vfpmadt.mask.nxv16f16.f32.i64(<vscale x 16 x half> poison, <vscale x 16 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 0, i64 [[VL:%.*]], i64 3)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16m4_t test_nds_vfpmadt_vf_f16m4_rm_m(vbool4_t mask, vfloat16m4_t op1, float op2, size_t vl) {
|
||||
return __riscv_nds_vfpmadt(mask, op1, op2, __RISCV_FRM_RNE, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV64-LABEL: @test_nds_vfpmadt_vf_f16m8_rm_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.nds.vfpmadt.mask.nxv32f16.f32.i64(<vscale x 32 x half> poison, <vscale x 32 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 0, i64 [[VL:%.*]], i64 3)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16m8_t test_nds_vfpmadt_vf_f16m8_rm_m(vbool2_t mask, vfloat16m8_t op1, float op2, size_t vl) {
|
||||
return __riscv_nds_vfpmadt(mask, op1, op2, __RISCV_FRM_RNE, vl);
|
||||
}
|
||||
@@ -0,0 +1,441 @@
|
||||
// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
|
||||
// REQUIRES: riscv-registered-target
|
||||
// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x \
|
||||
// RUN: -target-feature +zvfhmin \
|
||||
// RUN: -target-feature +xandesvpackfph -disable-O0-optnone \
|
||||
// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
|
||||
// RUN: FileCheck --check-prefix=CHECK-RV64 %s
|
||||
|
||||
#include <andes_vector.h>
|
||||
|
||||
// CHECK-RV64-LABEL: @test_nds_vfpmadb_vf_f16mf4_tu(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.nds.vfpmadb.nxv1f16.f32.i64(<vscale x 1 x half> [[MASKEDOFF:%.*]], <vscale x 1 x half> [[OP1:%.*]], float [[OP2:%.*]], i64 7, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16mf4_t test_nds_vfpmadb_vf_f16mf4_tu(vfloat16mf4_t maskedoff, vfloat16mf4_t op1, float op2, size_t vl) {
|
||||
return __riscv_nds_vfpmadb_vf_f16mf4_tu(maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV64-LABEL: @test_nds_vfpmadb_vf_f16mf2_tu(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.nds.vfpmadb.nxv2f16.f32.i64(<vscale x 2 x half> [[MASKEDOFF:%.*]], <vscale x 2 x half> [[OP1:%.*]], float [[OP2:%.*]], i64 7, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16mf2_t test_nds_vfpmadb_vf_f16mf2_tu(vfloat16mf2_t maskedoff, vfloat16mf2_t op1, float op2, size_t vl) {
|
||||
return __riscv_nds_vfpmadb_vf_f16mf2_tu(maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV64-LABEL: @test_nds_vfpmadb_vf_f16m1_tu(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.nds.vfpmadb.nxv4f16.f32.i64(<vscale x 4 x half> [[MASKEDOFF:%.*]], <vscale x 4 x half> [[OP1:%.*]], float [[OP2:%.*]], i64 7, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16m1_t test_nds_vfpmadb_vf_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m1_t op1, float op2, size_t vl) {
|
||||
return __riscv_nds_vfpmadb_vf_f16m1_tu(maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV64-LABEL: @test_nds_vfpmadb_vf_f16m2_tu(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.nds.vfpmadb.nxv8f16.f32.i64(<vscale x 8 x half> [[MASKEDOFF:%.*]], <vscale x 8 x half> [[OP1:%.*]], float [[OP2:%.*]], i64 7, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16m2_t test_nds_vfpmadb_vf_f16m2_tu(vfloat16m2_t maskedoff, vfloat16m2_t op1, float op2, size_t vl) {
|
||||
return __riscv_nds_vfpmadb_vf_f16m2_tu(maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV64-LABEL: @test_nds_vfpmadb_vf_f16m4_tu(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.nds.vfpmadb.nxv16f16.f32.i64(<vscale x 16 x half> [[MASKEDOFF:%.*]], <vscale x 16 x half> [[OP1:%.*]], float [[OP2:%.*]], i64 7, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16m4_t test_nds_vfpmadb_vf_f16m4_tu(vfloat16m4_t maskedoff, vfloat16m4_t op1, float op2, size_t vl) {
|
||||
return __riscv_nds_vfpmadb_vf_f16m4_tu(maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV64-LABEL: @test_nds_vfpmadb_vf_f16m8_tu(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.nds.vfpmadb.nxv32f16.f32.i64(<vscale x 32 x half> [[MASKEDOFF:%.*]], <vscale x 32 x half> [[OP1:%.*]], float [[OP2:%.*]], i64 7, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16m8_t test_nds_vfpmadb_vf_f16m8_tu(vfloat16m8_t maskedoff, vfloat16m8_t op1, float op2, size_t vl) {
|
||||
return __riscv_nds_vfpmadb_vf_f16m8_tu(maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV64-LABEL: @test_nds_vfpmadb_vf_f16mf4_tum(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.nds.vfpmadb.mask.nxv1f16.f32.i64(<vscale x 1 x half> [[MASKEDOFF:%.*]], <vscale x 1 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 2)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16mf4_t test_nds_vfpmadb_vf_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, float op2, size_t vl) {
|
||||
return __riscv_nds_vfpmadb_vf_f16mf4_tum(mask, maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV64-LABEL: @test_nds_vfpmadb_vf_f16mf2_tum(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.nds.vfpmadb.mask.nxv2f16.f32.i64(<vscale x 2 x half> [[MASKEDOFF:%.*]], <vscale x 2 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 2)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16mf2_t test_nds_vfpmadb_vf_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, float op2, size_t vl) {
|
||||
return __riscv_nds_vfpmadb_vf_f16mf2_tum(mask, maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV64-LABEL: @test_nds_vfpmadb_vf_f16m1_tum(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.nds.vfpmadb.mask.nxv4f16.f32.i64(<vscale x 4 x half> [[MASKEDOFF:%.*]], <vscale x 4 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 2)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16m1_t test_nds_vfpmadb_vf_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, float op2, size_t vl) {
|
||||
return __riscv_nds_vfpmadb_vf_f16m1_tum(mask, maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV64-LABEL: @test_nds_vfpmadb_vf_f16m2_tum(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.nds.vfpmadb.mask.nxv8f16.f32.i64(<vscale x 8 x half> [[MASKEDOFF:%.*]], <vscale x 8 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 2)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16m2_t test_nds_vfpmadb_vf_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, float op2, size_t vl) {
|
||||
return __riscv_nds_vfpmadb_vf_f16m2_tum(mask, maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV64-LABEL: @test_nds_vfpmadb_vf_f16m4_tum(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.nds.vfpmadb.mask.nxv16f16.f32.i64(<vscale x 16 x half> [[MASKEDOFF:%.*]], <vscale x 16 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 2)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16m4_t test_nds_vfpmadb_vf_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, float op2, size_t vl) {
|
||||
return __riscv_nds_vfpmadb_vf_f16m4_tum(mask, maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV64-LABEL: @test_nds_vfpmadb_vf_f16m8_tum(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.nds.vfpmadb.mask.nxv32f16.f32.i64(<vscale x 32 x half> [[MASKEDOFF:%.*]], <vscale x 32 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 2)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16m8_t test_nds_vfpmadb_vf_f16m8_tum(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, float op2, size_t vl) {
|
||||
return __riscv_nds_vfpmadb_vf_f16m8_tum(mask, maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV64-LABEL: @test_nds_vfpmadb_vf_f16mf4_tumu(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.nds.vfpmadb.mask.nxv1f16.f32.i64(<vscale x 1 x half> [[MASKEDOFF:%.*]], <vscale x 1 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16mf4_t test_nds_vfpmadb_vf_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, float op2, size_t vl) {
|
||||
return __riscv_nds_vfpmadb_vf_f16mf4_tumu(mask, maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV64-LABEL: @test_nds_vfpmadb_vf_f16mf2_tumu(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.nds.vfpmadb.mask.nxv2f16.f32.i64(<vscale x 2 x half> [[MASKEDOFF:%.*]], <vscale x 2 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16mf2_t test_nds_vfpmadb_vf_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, float op2, size_t vl) {
|
||||
return __riscv_nds_vfpmadb_vf_f16mf2_tumu(mask, maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV64-LABEL: @test_nds_vfpmadb_vf_f16m1_tumu(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.nds.vfpmadb.mask.nxv4f16.f32.i64(<vscale x 4 x half> [[MASKEDOFF:%.*]], <vscale x 4 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16m1_t test_nds_vfpmadb_vf_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, float op2, size_t vl) {
|
||||
return __riscv_nds_vfpmadb_vf_f16m1_tumu(mask, maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV64-LABEL: @test_nds_vfpmadb_vf_f16m2_tumu(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.nds.vfpmadb.mask.nxv8f16.f32.i64(<vscale x 8 x half> [[MASKEDOFF:%.*]], <vscale x 8 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16m2_t test_nds_vfpmadb_vf_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, float op2, size_t vl) {
|
||||
return __riscv_nds_vfpmadb_vf_f16m2_tumu(mask, maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV64-LABEL: @test_nds_vfpmadb_vf_f16m4_tumu(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.nds.vfpmadb.mask.nxv16f16.f32.i64(<vscale x 16 x half> [[MASKEDOFF:%.*]], <vscale x 16 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16m4_t test_nds_vfpmadb_vf_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, float op2, size_t vl) {
|
||||
return __riscv_nds_vfpmadb_vf_f16m4_tumu(mask, maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV64-LABEL: @test_nds_vfpmadb_vf_f16m8_tumu(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.nds.vfpmadb.mask.nxv32f16.f32.i64(<vscale x 32 x half> [[MASKEDOFF:%.*]], <vscale x 32 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16m8_t test_nds_vfpmadb_vf_f16m8_tumu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, float op2, size_t vl) {
|
||||
return __riscv_nds_vfpmadb_vf_f16m8_tumu(mask, maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV64-LABEL: @test_nds_vfpmadb_vf_f16mf4_mu(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.nds.vfpmadb.mask.nxv1f16.f32.i64(<vscale x 1 x half> [[MASKEDOFF:%.*]], <vscale x 1 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 1)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16mf4_t test_nds_vfpmadb_vf_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, float op2, size_t vl) {
|
||||
return __riscv_nds_vfpmadb_vf_f16mf4_mu(mask, maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV64-LABEL: @test_nds_vfpmadb_vf_f16mf2_mu(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.nds.vfpmadb.mask.nxv2f16.f32.i64(<vscale x 2 x half> [[MASKEDOFF:%.*]], <vscale x 2 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 1)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16mf2_t test_nds_vfpmadb_vf_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, float op2, size_t vl) {
|
||||
return __riscv_nds_vfpmadb_vf_f16mf2_mu(mask, maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV64-LABEL: @test_nds_vfpmadb_vf_f16m1_mu(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.nds.vfpmadb.mask.nxv4f16.f32.i64(<vscale x 4 x half> [[MASKEDOFF:%.*]], <vscale x 4 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 1)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16m1_t test_nds_vfpmadb_vf_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, float op2, size_t vl) {
|
||||
return __riscv_nds_vfpmadb_vf_f16m1_mu(mask, maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV64-LABEL: @test_nds_vfpmadb_vf_f16m2_mu(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.nds.vfpmadb.mask.nxv8f16.f32.i64(<vscale x 8 x half> [[MASKEDOFF:%.*]], <vscale x 8 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 1)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16m2_t test_nds_vfpmadb_vf_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, float op2, size_t vl) {
|
||||
return __riscv_nds_vfpmadb_vf_f16m2_mu(mask, maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV64-LABEL: @test_nds_vfpmadb_vf_f16m4_mu(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.nds.vfpmadb.mask.nxv16f16.f32.i64(<vscale x 16 x half> [[MASKEDOFF:%.*]], <vscale x 16 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 1)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16m4_t test_nds_vfpmadb_vf_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, float op2, size_t vl) {
|
||||
return __riscv_nds_vfpmadb_vf_f16m4_mu(mask, maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV64-LABEL: @test_nds_vfpmadb_vf_f16m8_mu(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.nds.vfpmadb.mask.nxv32f16.f32.i64(<vscale x 32 x half> [[MASKEDOFF:%.*]], <vscale x 32 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 1)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16m8_t test_nds_vfpmadb_vf_f16m8_mu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, float op2, size_t vl) {
|
||||
return __riscv_nds_vfpmadb_vf_f16m8_mu(mask, maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV64-LABEL: @test_nds_vfpmadb_vf_f16mf4_rm_tu(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.nds.vfpmadb.nxv1f16.f32.i64(<vscale x 1 x half> [[MASKEDOFF:%.*]], <vscale x 1 x half> [[OP1:%.*]], float [[OP2:%.*]], i64 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16mf4_t test_nds_vfpmadb_vf_f16mf4_rm_tu(vfloat16mf4_t maskedoff, vfloat16mf4_t op1, float op2, size_t vl) {
|
||||
return __riscv_nds_vfpmadb_vf_f16mf4_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV64-LABEL: @test_nds_vfpmadb_vf_f16mf2_rm_tu(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.nds.vfpmadb.nxv2f16.f32.i64(<vscale x 2 x half> [[MASKEDOFF:%.*]], <vscale x 2 x half> [[OP1:%.*]], float [[OP2:%.*]], i64 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16mf2_t test_nds_vfpmadb_vf_f16mf2_rm_tu(vfloat16mf2_t maskedoff, vfloat16mf2_t op1, float op2, size_t vl) {
|
||||
return __riscv_nds_vfpmadb_vf_f16mf2_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV64-LABEL: @test_nds_vfpmadb_vf_f16m1_rm_tu(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.nds.vfpmadb.nxv4f16.f32.i64(<vscale x 4 x half> [[MASKEDOFF:%.*]], <vscale x 4 x half> [[OP1:%.*]], float [[OP2:%.*]], i64 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16m1_t test_nds_vfpmadb_vf_f16m1_rm_tu(vfloat16m1_t maskedoff, vfloat16m1_t op1, float op2, size_t vl) {
|
||||
return __riscv_nds_vfpmadb_vf_f16m1_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV64-LABEL: @test_nds_vfpmadb_vf_f16m2_rm_tu(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.nds.vfpmadb.nxv8f16.f32.i64(<vscale x 8 x half> [[MASKEDOFF:%.*]], <vscale x 8 x half> [[OP1:%.*]], float [[OP2:%.*]], i64 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16m2_t test_nds_vfpmadb_vf_f16m2_rm_tu(vfloat16m2_t maskedoff, vfloat16m2_t op1, float op2, size_t vl) {
|
||||
return __riscv_nds_vfpmadb_vf_f16m2_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV64-LABEL: @test_nds_vfpmadb_vf_f16m4_rm_tu(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.nds.vfpmadb.nxv16f16.f32.i64(<vscale x 16 x half> [[MASKEDOFF:%.*]], <vscale x 16 x half> [[OP1:%.*]], float [[OP2:%.*]], i64 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16m4_t test_nds_vfpmadb_vf_f16m4_rm_tu(vfloat16m4_t maskedoff, vfloat16m4_t op1, float op2, size_t vl) {
|
||||
return __riscv_nds_vfpmadb_vf_f16m4_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV64-LABEL: @test_nds_vfpmadb_vf_f16m8_rm_tu(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.nds.vfpmadb.nxv32f16.f32.i64(<vscale x 32 x half> [[MASKEDOFF:%.*]], <vscale x 32 x half> [[OP1:%.*]], float [[OP2:%.*]], i64 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16m8_t test_nds_vfpmadb_vf_f16m8_rm_tu(vfloat16m8_t maskedoff, vfloat16m8_t op1, float op2, size_t vl) {
|
||||
return __riscv_nds_vfpmadb_vf_f16m8_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV64-LABEL: @test_nds_vfpmadb_vf_f16mf4_rm_tum(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.nds.vfpmadb.mask.nxv1f16.f32.i64(<vscale x 1 x half> [[MASKEDOFF:%.*]], <vscale x 1 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 0, i64 [[VL:%.*]], i64 2)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16mf4_t test_nds_vfpmadb_vf_f16mf4_rm_tum(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, float op2, size_t vl) {
|
||||
return __riscv_nds_vfpmadb_vf_f16mf4_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV64-LABEL: @test_nds_vfpmadb_vf_f16mf2_rm_tum(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.nds.vfpmadb.mask.nxv2f16.f32.i64(<vscale x 2 x half> [[MASKEDOFF:%.*]], <vscale x 2 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 0, i64 [[VL:%.*]], i64 2)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16mf2_t test_nds_vfpmadb_vf_f16mf2_rm_tum(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, float op2, size_t vl) {
|
||||
return __riscv_nds_vfpmadb_vf_f16mf2_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV64-LABEL: @test_nds_vfpmadb_vf_f16m1_rm_tum(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.nds.vfpmadb.mask.nxv4f16.f32.i64(<vscale x 4 x half> [[MASKEDOFF:%.*]], <vscale x 4 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 0, i64 [[VL:%.*]], i64 2)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16m1_t test_nds_vfpmadb_vf_f16m1_rm_tum(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, float op2, size_t vl) {
|
||||
return __riscv_nds_vfpmadb_vf_f16m1_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV64-LABEL: @test_nds_vfpmadb_vf_f16m2_rm_tum(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.nds.vfpmadb.mask.nxv8f16.f32.i64(<vscale x 8 x half> [[MASKEDOFF:%.*]], <vscale x 8 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 0, i64 [[VL:%.*]], i64 2)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16m2_t test_nds_vfpmadb_vf_f16m2_rm_tum(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, float op2, size_t vl) {
|
||||
return __riscv_nds_vfpmadb_vf_f16m2_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV64-LABEL: @test_nds_vfpmadb_vf_f16m4_rm_tum(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.nds.vfpmadb.mask.nxv16f16.f32.i64(<vscale x 16 x half> [[MASKEDOFF:%.*]], <vscale x 16 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 0, i64 [[VL:%.*]], i64 2)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16m4_t test_nds_vfpmadb_vf_f16m4_rm_tum(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, float op2, size_t vl) {
|
||||
return __riscv_nds_vfpmadb_vf_f16m4_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV64-LABEL: @test_nds_vfpmadb_vf_f16m8_rm_tum(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.nds.vfpmadb.mask.nxv32f16.f32.i64(<vscale x 32 x half> [[MASKEDOFF:%.*]], <vscale x 32 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 0, i64 [[VL:%.*]], i64 2)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16m8_t test_nds_vfpmadb_vf_f16m8_rm_tum(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, float op2, size_t vl) {
|
||||
return __riscv_nds_vfpmadb_vf_f16m8_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV64-LABEL: @test_nds_vfpmadb_vf_f16mf4_rm_tumu(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.nds.vfpmadb.mask.nxv1f16.f32.i64(<vscale x 1 x half> [[MASKEDOFF:%.*]], <vscale x 1 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 0, i64 [[VL:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16mf4_t test_nds_vfpmadb_vf_f16mf4_rm_tumu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, float op2, size_t vl) {
|
||||
return __riscv_nds_vfpmadb_vf_f16mf4_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV64-LABEL: @test_nds_vfpmadb_vf_f16mf2_rm_tumu(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.nds.vfpmadb.mask.nxv2f16.f32.i64(<vscale x 2 x half> [[MASKEDOFF:%.*]], <vscale x 2 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 0, i64 [[VL:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16mf2_t test_nds_vfpmadb_vf_f16mf2_rm_tumu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, float op2, size_t vl) {
|
||||
return __riscv_nds_vfpmadb_vf_f16mf2_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV64-LABEL: @test_nds_vfpmadb_vf_f16m1_rm_tumu(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.nds.vfpmadb.mask.nxv4f16.f32.i64(<vscale x 4 x half> [[MASKEDOFF:%.*]], <vscale x 4 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 0, i64 [[VL:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16m1_t test_nds_vfpmadb_vf_f16m1_rm_tumu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, float op2, size_t vl) {
|
||||
return __riscv_nds_vfpmadb_vf_f16m1_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV64-LABEL: @test_nds_vfpmadb_vf_f16m2_rm_tumu(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.nds.vfpmadb.mask.nxv8f16.f32.i64(<vscale x 8 x half> [[MASKEDOFF:%.*]], <vscale x 8 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 0, i64 [[VL:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16m2_t test_nds_vfpmadb_vf_f16m2_rm_tumu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, float op2, size_t vl) {
|
||||
return __riscv_nds_vfpmadb_vf_f16m2_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV64-LABEL: @test_nds_vfpmadb_vf_f16m4_rm_tumu(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.nds.vfpmadb.mask.nxv16f16.f32.i64(<vscale x 16 x half> [[MASKEDOFF:%.*]], <vscale x 16 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 0, i64 [[VL:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16m4_t test_nds_vfpmadb_vf_f16m4_rm_tumu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, float op2, size_t vl) {
|
||||
return __riscv_nds_vfpmadb_vf_f16m4_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV64-LABEL: @test_nds_vfpmadb_vf_f16m8_rm_tumu(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.nds.vfpmadb.mask.nxv32f16.f32.i64(<vscale x 32 x half> [[MASKEDOFF:%.*]], <vscale x 32 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 0, i64 [[VL:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16m8_t test_nds_vfpmadb_vf_f16m8_rm_tumu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, float op2, size_t vl) {
|
||||
return __riscv_nds_vfpmadb_vf_f16m8_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV64-LABEL: @test_nds_vfpmadb_vf_f16mf4_rm_mu(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.nds.vfpmadb.mask.nxv1f16.f32.i64(<vscale x 1 x half> [[MASKEDOFF:%.*]], <vscale x 1 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 0, i64 [[VL:%.*]], i64 1)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16mf4_t test_nds_vfpmadb_vf_f16mf4_rm_mu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, float op2, size_t vl) {
|
||||
return __riscv_nds_vfpmadb_vf_f16mf4_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV64-LABEL: @test_nds_vfpmadb_vf_f16mf2_rm_mu(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.nds.vfpmadb.mask.nxv2f16.f32.i64(<vscale x 2 x half> [[MASKEDOFF:%.*]], <vscale x 2 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 0, i64 [[VL:%.*]], i64 1)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16mf2_t test_nds_vfpmadb_vf_f16mf2_rm_mu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, float op2, size_t vl) {
|
||||
return __riscv_nds_vfpmadb_vf_f16mf2_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV64-LABEL: @test_nds_vfpmadb_vf_f16m1_rm_mu(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.nds.vfpmadb.mask.nxv4f16.f32.i64(<vscale x 4 x half> [[MASKEDOFF:%.*]], <vscale x 4 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 0, i64 [[VL:%.*]], i64 1)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16m1_t test_nds_vfpmadb_vf_f16m1_rm_mu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, float op2, size_t vl) {
|
||||
return __riscv_nds_vfpmadb_vf_f16m1_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV64-LABEL: @test_nds_vfpmadb_vf_f16m2_rm_mu(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.nds.vfpmadb.mask.nxv8f16.f32.i64(<vscale x 8 x half> [[MASKEDOFF:%.*]], <vscale x 8 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 0, i64 [[VL:%.*]], i64 1)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16m2_t test_nds_vfpmadb_vf_f16m2_rm_mu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, float op2, size_t vl) {
|
||||
return __riscv_nds_vfpmadb_vf_f16m2_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV64-LABEL: @test_nds_vfpmadb_vf_f16m4_rm_mu(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.nds.vfpmadb.mask.nxv16f16.f32.i64(<vscale x 16 x half> [[MASKEDOFF:%.*]], <vscale x 16 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 0, i64 [[VL:%.*]], i64 1)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16m4_t test_nds_vfpmadb_vf_f16m4_rm_mu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, float op2, size_t vl) {
|
||||
return __riscv_nds_vfpmadb_vf_f16m4_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV64-LABEL: @test_nds_vfpmadb_vf_f16m8_rm_mu(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.nds.vfpmadb.mask.nxv32f16.f32.i64(<vscale x 32 x half> [[MASKEDOFF:%.*]], <vscale x 32 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 0, i64 [[VL:%.*]], i64 1)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16m8_t test_nds_vfpmadb_vf_f16m8_rm_mu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, float op2, size_t vl) {
|
||||
return __riscv_nds_vfpmadb_vf_f16m8_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
|
||||
}
|
||||
@@ -0,0 +1,441 @@
|
||||
// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
|
||||
// REQUIRES: riscv-registered-target
|
||||
// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x \
|
||||
// RUN: -target-feature +zvfhmin \
|
||||
// RUN: -target-feature +xandesvpackfph -disable-O0-optnone \
|
||||
// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
|
||||
// RUN: FileCheck --check-prefix=CHECK-RV64 %s
|
||||
|
||||
#include <andes_vector.h>
|
||||
|
||||
// CHECK-RV64-LABEL: @test_nds_vfpmadt_vf_f16mf4_tu(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.nds.vfpmadt.nxv1f16.f32.i64(<vscale x 1 x half> [[MASKEDOFF:%.*]], <vscale x 1 x half> [[OP1:%.*]], float [[OP2:%.*]], i64 7, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16mf4_t test_nds_vfpmadt_vf_f16mf4_tu(vfloat16mf4_t maskedoff, vfloat16mf4_t op1, float op2, size_t vl) {
|
||||
return __riscv_nds_vfpmadt_vf_f16mf4_tu(maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV64-LABEL: @test_nds_vfpmadt_vf_f16mf2_tu(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.nds.vfpmadt.nxv2f16.f32.i64(<vscale x 2 x half> [[MASKEDOFF:%.*]], <vscale x 2 x half> [[OP1:%.*]], float [[OP2:%.*]], i64 7, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16mf2_t test_nds_vfpmadt_vf_f16mf2_tu(vfloat16mf2_t maskedoff, vfloat16mf2_t op1, float op2, size_t vl) {
|
||||
return __riscv_nds_vfpmadt_vf_f16mf2_tu(maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV64-LABEL: @test_nds_vfpmadt_vf_f16m1_tu(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.nds.vfpmadt.nxv4f16.f32.i64(<vscale x 4 x half> [[MASKEDOFF:%.*]], <vscale x 4 x half> [[OP1:%.*]], float [[OP2:%.*]], i64 7, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16m1_t test_nds_vfpmadt_vf_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m1_t op1, float op2, size_t vl) {
|
||||
return __riscv_nds_vfpmadt_vf_f16m1_tu(maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV64-LABEL: @test_nds_vfpmadt_vf_f16m2_tu(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.nds.vfpmadt.nxv8f16.f32.i64(<vscale x 8 x half> [[MASKEDOFF:%.*]], <vscale x 8 x half> [[OP1:%.*]], float [[OP2:%.*]], i64 7, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16m2_t test_nds_vfpmadt_vf_f16m2_tu(vfloat16m2_t maskedoff, vfloat16m2_t op1, float op2, size_t vl) {
|
||||
return __riscv_nds_vfpmadt_vf_f16m2_tu(maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV64-LABEL: @test_nds_vfpmadt_vf_f16m4_tu(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.nds.vfpmadt.nxv16f16.f32.i64(<vscale x 16 x half> [[MASKEDOFF:%.*]], <vscale x 16 x half> [[OP1:%.*]], float [[OP2:%.*]], i64 7, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16m4_t test_nds_vfpmadt_vf_f16m4_tu(vfloat16m4_t maskedoff, vfloat16m4_t op1, float op2, size_t vl) {
|
||||
return __riscv_nds_vfpmadt_vf_f16m4_tu(maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV64-LABEL: @test_nds_vfpmadt_vf_f16m8_tu(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.nds.vfpmadt.nxv32f16.f32.i64(<vscale x 32 x half> [[MASKEDOFF:%.*]], <vscale x 32 x half> [[OP1:%.*]], float [[OP2:%.*]], i64 7, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16m8_t test_nds_vfpmadt_vf_f16m8_tu(vfloat16m8_t maskedoff, vfloat16m8_t op1, float op2, size_t vl) {
|
||||
return __riscv_nds_vfpmadt_vf_f16m8_tu(maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV64-LABEL: @test_nds_vfpmadt_vf_f16mf4_tum(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.nds.vfpmadt.mask.nxv1f16.f32.i64(<vscale x 1 x half> [[MASKEDOFF:%.*]], <vscale x 1 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 2)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16mf4_t test_nds_vfpmadt_vf_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, float op2, size_t vl) {
|
||||
return __riscv_nds_vfpmadt_vf_f16mf4_tum(mask, maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV64-LABEL: @test_nds_vfpmadt_vf_f16mf2_tum(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.nds.vfpmadt.mask.nxv2f16.f32.i64(<vscale x 2 x half> [[MASKEDOFF:%.*]], <vscale x 2 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 2)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16mf2_t test_nds_vfpmadt_vf_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, float op2, size_t vl) {
|
||||
return __riscv_nds_vfpmadt_vf_f16mf2_tum(mask, maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV64-LABEL: @test_nds_vfpmadt_vf_f16m1_tum(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.nds.vfpmadt.mask.nxv4f16.f32.i64(<vscale x 4 x half> [[MASKEDOFF:%.*]], <vscale x 4 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 2)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16m1_t test_nds_vfpmadt_vf_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, float op2, size_t vl) {
|
||||
return __riscv_nds_vfpmadt_vf_f16m1_tum(mask, maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV64-LABEL: @test_nds_vfpmadt_vf_f16m2_tum(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.nds.vfpmadt.mask.nxv8f16.f32.i64(<vscale x 8 x half> [[MASKEDOFF:%.*]], <vscale x 8 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 2)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16m2_t test_nds_vfpmadt_vf_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, float op2, size_t vl) {
|
||||
return __riscv_nds_vfpmadt_vf_f16m2_tum(mask, maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV64-LABEL: @test_nds_vfpmadt_vf_f16m4_tum(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.nds.vfpmadt.mask.nxv16f16.f32.i64(<vscale x 16 x half> [[MASKEDOFF:%.*]], <vscale x 16 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 2)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16m4_t test_nds_vfpmadt_vf_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, float op2, size_t vl) {
|
||||
return __riscv_nds_vfpmadt_vf_f16m4_tum(mask, maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV64-LABEL: @test_nds_vfpmadt_vf_f16m8_tum(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.nds.vfpmadt.mask.nxv32f16.f32.i64(<vscale x 32 x half> [[MASKEDOFF:%.*]], <vscale x 32 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 2)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16m8_t test_nds_vfpmadt_vf_f16m8_tum(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, float op2, size_t vl) {
|
||||
return __riscv_nds_vfpmadt_vf_f16m8_tum(mask, maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV64-LABEL: @test_nds_vfpmadt_vf_f16mf4_tumu(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.nds.vfpmadt.mask.nxv1f16.f32.i64(<vscale x 1 x half> [[MASKEDOFF:%.*]], <vscale x 1 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16mf4_t test_nds_vfpmadt_vf_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, float op2, size_t vl) {
|
||||
return __riscv_nds_vfpmadt_vf_f16mf4_tumu(mask, maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV64-LABEL: @test_nds_vfpmadt_vf_f16mf2_tumu(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.nds.vfpmadt.mask.nxv2f16.f32.i64(<vscale x 2 x half> [[MASKEDOFF:%.*]], <vscale x 2 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16mf2_t test_nds_vfpmadt_vf_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, float op2, size_t vl) {
|
||||
return __riscv_nds_vfpmadt_vf_f16mf2_tumu(mask, maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV64-LABEL: @test_nds_vfpmadt_vf_f16m1_tumu(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.nds.vfpmadt.mask.nxv4f16.f32.i64(<vscale x 4 x half> [[MASKEDOFF:%.*]], <vscale x 4 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16m1_t test_nds_vfpmadt_vf_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, float op2, size_t vl) {
|
||||
return __riscv_nds_vfpmadt_vf_f16m1_tumu(mask, maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV64-LABEL: @test_nds_vfpmadt_vf_f16m2_tumu(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.nds.vfpmadt.mask.nxv8f16.f32.i64(<vscale x 8 x half> [[MASKEDOFF:%.*]], <vscale x 8 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16m2_t test_nds_vfpmadt_vf_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, float op2, size_t vl) {
|
||||
return __riscv_nds_vfpmadt_vf_f16m2_tumu(mask, maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV64-LABEL: @test_nds_vfpmadt_vf_f16m4_tumu(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.nds.vfpmadt.mask.nxv16f16.f32.i64(<vscale x 16 x half> [[MASKEDOFF:%.*]], <vscale x 16 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16m4_t test_nds_vfpmadt_vf_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, float op2, size_t vl) {
|
||||
return __riscv_nds_vfpmadt_vf_f16m4_tumu(mask, maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV64-LABEL: @test_nds_vfpmadt_vf_f16m8_tumu(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.nds.vfpmadt.mask.nxv32f16.f32.i64(<vscale x 32 x half> [[MASKEDOFF:%.*]], <vscale x 32 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16m8_t test_nds_vfpmadt_vf_f16m8_tumu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, float op2, size_t vl) {
|
||||
return __riscv_nds_vfpmadt_vf_f16m8_tumu(mask, maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV64-LABEL: @test_nds_vfpmadt_vf_f16mf4_mu(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.nds.vfpmadt.mask.nxv1f16.f32.i64(<vscale x 1 x half> [[MASKEDOFF:%.*]], <vscale x 1 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 1)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16mf4_t test_nds_vfpmadt_vf_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, float op2, size_t vl) {
|
||||
return __riscv_nds_vfpmadt_vf_f16mf4_mu(mask, maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV64-LABEL: @test_nds_vfpmadt_vf_f16mf2_mu(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.nds.vfpmadt.mask.nxv2f16.f32.i64(<vscale x 2 x half> [[MASKEDOFF:%.*]], <vscale x 2 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 1)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16mf2_t test_nds_vfpmadt_vf_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, float op2, size_t vl) {
|
||||
return __riscv_nds_vfpmadt_vf_f16mf2_mu(mask, maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV64-LABEL: @test_nds_vfpmadt_vf_f16m1_mu(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.nds.vfpmadt.mask.nxv4f16.f32.i64(<vscale x 4 x half> [[MASKEDOFF:%.*]], <vscale x 4 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 1)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16m1_t test_nds_vfpmadt_vf_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, float op2, size_t vl) {
|
||||
return __riscv_nds_vfpmadt_vf_f16m1_mu(mask, maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV64-LABEL: @test_nds_vfpmadt_vf_f16m2_mu(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.nds.vfpmadt.mask.nxv8f16.f32.i64(<vscale x 8 x half> [[MASKEDOFF:%.*]], <vscale x 8 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 1)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16m2_t test_nds_vfpmadt_vf_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, float op2, size_t vl) {
|
||||
return __riscv_nds_vfpmadt_vf_f16m2_mu(mask, maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV64-LABEL: @test_nds_vfpmadt_vf_f16m4_mu(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.nds.vfpmadt.mask.nxv16f16.f32.i64(<vscale x 16 x half> [[MASKEDOFF:%.*]], <vscale x 16 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 1)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16m4_t test_nds_vfpmadt_vf_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, float op2, size_t vl) {
|
||||
return __riscv_nds_vfpmadt_vf_f16m4_mu(mask, maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV64-LABEL: @test_nds_vfpmadt_vf_f16m8_mu(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.nds.vfpmadt.mask.nxv32f16.f32.i64(<vscale x 32 x half> [[MASKEDOFF:%.*]], <vscale x 32 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 1)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16m8_t test_nds_vfpmadt_vf_f16m8_mu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, float op2, size_t vl) {
|
||||
return __riscv_nds_vfpmadt_vf_f16m8_mu(mask, maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV64-LABEL: @test_nds_vfpmadt_vf_f16mf4_rm_tu(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.nds.vfpmadt.nxv1f16.f32.i64(<vscale x 1 x half> [[MASKEDOFF:%.*]], <vscale x 1 x half> [[OP1:%.*]], float [[OP2:%.*]], i64 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16mf4_t test_nds_vfpmadt_vf_f16mf4_rm_tu(vfloat16mf4_t maskedoff, vfloat16mf4_t op1, float op2, size_t vl) {
|
||||
return __riscv_nds_vfpmadt_vf_f16mf4_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV64-LABEL: @test_nds_vfpmadt_vf_f16mf2_rm_tu(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.nds.vfpmadt.nxv2f16.f32.i64(<vscale x 2 x half> [[MASKEDOFF:%.*]], <vscale x 2 x half> [[OP1:%.*]], float [[OP2:%.*]], i64 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16mf2_t test_nds_vfpmadt_vf_f16mf2_rm_tu(vfloat16mf2_t maskedoff, vfloat16mf2_t op1, float op2, size_t vl) {
|
||||
return __riscv_nds_vfpmadt_vf_f16mf2_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV64-LABEL: @test_nds_vfpmadt_vf_f16m1_rm_tu(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.nds.vfpmadt.nxv4f16.f32.i64(<vscale x 4 x half> [[MASKEDOFF:%.*]], <vscale x 4 x half> [[OP1:%.*]], float [[OP2:%.*]], i64 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16m1_t test_nds_vfpmadt_vf_f16m1_rm_tu(vfloat16m1_t maskedoff, vfloat16m1_t op1, float op2, size_t vl) {
|
||||
return __riscv_nds_vfpmadt_vf_f16m1_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV64-LABEL: @test_nds_vfpmadt_vf_f16m2_rm_tu(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.nds.vfpmadt.nxv8f16.f32.i64(<vscale x 8 x half> [[MASKEDOFF:%.*]], <vscale x 8 x half> [[OP1:%.*]], float [[OP2:%.*]], i64 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16m2_t test_nds_vfpmadt_vf_f16m2_rm_tu(vfloat16m2_t maskedoff, vfloat16m2_t op1, float op2, size_t vl) {
|
||||
return __riscv_nds_vfpmadt_vf_f16m2_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV64-LABEL: @test_nds_vfpmadt_vf_f16m4_rm_tu(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.nds.vfpmadt.nxv16f16.f32.i64(<vscale x 16 x half> [[MASKEDOFF:%.*]], <vscale x 16 x half> [[OP1:%.*]], float [[OP2:%.*]], i64 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16m4_t test_nds_vfpmadt_vf_f16m4_rm_tu(vfloat16m4_t maskedoff, vfloat16m4_t op1, float op2, size_t vl) {
|
||||
return __riscv_nds_vfpmadt_vf_f16m4_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV64-LABEL: @test_nds_vfpmadt_vf_f16m8_rm_tu(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.nds.vfpmadt.nxv32f16.f32.i64(<vscale x 32 x half> [[MASKEDOFF:%.*]], <vscale x 32 x half> [[OP1:%.*]], float [[OP2:%.*]], i64 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16m8_t test_nds_vfpmadt_vf_f16m8_rm_tu(vfloat16m8_t maskedoff, vfloat16m8_t op1, float op2, size_t vl) {
|
||||
return __riscv_nds_vfpmadt_vf_f16m8_rm_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV64-LABEL: @test_nds_vfpmadt_vf_f16mf4_rm_tum(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.nds.vfpmadt.mask.nxv1f16.f32.i64(<vscale x 1 x half> [[MASKEDOFF:%.*]], <vscale x 1 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 0, i64 [[VL:%.*]], i64 2)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16mf4_t test_nds_vfpmadt_vf_f16mf4_rm_tum(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, float op2, size_t vl) {
|
||||
return __riscv_nds_vfpmadt_vf_f16mf4_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV64-LABEL: @test_nds_vfpmadt_vf_f16mf2_rm_tum(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.nds.vfpmadt.mask.nxv2f16.f32.i64(<vscale x 2 x half> [[MASKEDOFF:%.*]], <vscale x 2 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 0, i64 [[VL:%.*]], i64 2)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16mf2_t test_nds_vfpmadt_vf_f16mf2_rm_tum(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, float op2, size_t vl) {
|
||||
return __riscv_nds_vfpmadt_vf_f16mf2_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV64-LABEL: @test_nds_vfpmadt_vf_f16m1_rm_tum(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.nds.vfpmadt.mask.nxv4f16.f32.i64(<vscale x 4 x half> [[MASKEDOFF:%.*]], <vscale x 4 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 0, i64 [[VL:%.*]], i64 2)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16m1_t test_nds_vfpmadt_vf_f16m1_rm_tum(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, float op2, size_t vl) {
|
||||
return __riscv_nds_vfpmadt_vf_f16m1_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV64-LABEL: @test_nds_vfpmadt_vf_f16m2_rm_tum(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.nds.vfpmadt.mask.nxv8f16.f32.i64(<vscale x 8 x half> [[MASKEDOFF:%.*]], <vscale x 8 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 0, i64 [[VL:%.*]], i64 2)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16m2_t test_nds_vfpmadt_vf_f16m2_rm_tum(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, float op2, size_t vl) {
|
||||
return __riscv_nds_vfpmadt_vf_f16m2_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV64-LABEL: @test_nds_vfpmadt_vf_f16m4_rm_tum(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.nds.vfpmadt.mask.nxv16f16.f32.i64(<vscale x 16 x half> [[MASKEDOFF:%.*]], <vscale x 16 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 0, i64 [[VL:%.*]], i64 2)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16m4_t test_nds_vfpmadt_vf_f16m4_rm_tum(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, float op2, size_t vl) {
|
||||
return __riscv_nds_vfpmadt_vf_f16m4_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV64-LABEL: @test_nds_vfpmadt_vf_f16m8_rm_tum(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.nds.vfpmadt.mask.nxv32f16.f32.i64(<vscale x 32 x half> [[MASKEDOFF:%.*]], <vscale x 32 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 0, i64 [[VL:%.*]], i64 2)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16m8_t test_nds_vfpmadt_vf_f16m8_rm_tum(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, float op2, size_t vl) {
|
||||
return __riscv_nds_vfpmadt_vf_f16m8_rm_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV64-LABEL: @test_nds_vfpmadt_vf_f16mf4_rm_tumu(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.nds.vfpmadt.mask.nxv1f16.f32.i64(<vscale x 1 x half> [[MASKEDOFF:%.*]], <vscale x 1 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 0, i64 [[VL:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16mf4_t test_nds_vfpmadt_vf_f16mf4_rm_tumu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, float op2, size_t vl) {
|
||||
return __riscv_nds_vfpmadt_vf_f16mf4_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV64-LABEL: @test_nds_vfpmadt_vf_f16mf2_rm_tumu(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.nds.vfpmadt.mask.nxv2f16.f32.i64(<vscale x 2 x half> [[MASKEDOFF:%.*]], <vscale x 2 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 0, i64 [[VL:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16mf2_t test_nds_vfpmadt_vf_f16mf2_rm_tumu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, float op2, size_t vl) {
|
||||
return __riscv_nds_vfpmadt_vf_f16mf2_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV64-LABEL: @test_nds_vfpmadt_vf_f16m1_rm_tumu(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.nds.vfpmadt.mask.nxv4f16.f32.i64(<vscale x 4 x half> [[MASKEDOFF:%.*]], <vscale x 4 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 0, i64 [[VL:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16m1_t test_nds_vfpmadt_vf_f16m1_rm_tumu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, float op2, size_t vl) {
|
||||
return __riscv_nds_vfpmadt_vf_f16m1_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV64-LABEL: @test_nds_vfpmadt_vf_f16m2_rm_tumu(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.nds.vfpmadt.mask.nxv8f16.f32.i64(<vscale x 8 x half> [[MASKEDOFF:%.*]], <vscale x 8 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 0, i64 [[VL:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16m2_t test_nds_vfpmadt_vf_f16m2_rm_tumu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, float op2, size_t vl) {
|
||||
return __riscv_nds_vfpmadt_vf_f16m2_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV64-LABEL: @test_nds_vfpmadt_vf_f16m4_rm_tumu(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.nds.vfpmadt.mask.nxv16f16.f32.i64(<vscale x 16 x half> [[MASKEDOFF:%.*]], <vscale x 16 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 0, i64 [[VL:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16m4_t test_nds_vfpmadt_vf_f16m4_rm_tumu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, float op2, size_t vl) {
|
||||
return __riscv_nds_vfpmadt_vf_f16m4_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV64-LABEL: @test_nds_vfpmadt_vf_f16m8_rm_tumu(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.nds.vfpmadt.mask.nxv32f16.f32.i64(<vscale x 32 x half> [[MASKEDOFF:%.*]], <vscale x 32 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 0, i64 [[VL:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16m8_t test_nds_vfpmadt_vf_f16m8_rm_tumu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, float op2, size_t vl) {
|
||||
return __riscv_nds_vfpmadt_vf_f16m8_rm_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV64-LABEL: @test_nds_vfpmadt_vf_f16mf4_rm_mu(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.nds.vfpmadt.mask.nxv1f16.f32.i64(<vscale x 1 x half> [[MASKEDOFF:%.*]], <vscale x 1 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 0, i64 [[VL:%.*]], i64 1)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16mf4_t test_nds_vfpmadt_vf_f16mf4_rm_mu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, float op2, size_t vl) {
|
||||
return __riscv_nds_vfpmadt_vf_f16mf4_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV64-LABEL: @test_nds_vfpmadt_vf_f16mf2_rm_mu(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.nds.vfpmadt.mask.nxv2f16.f32.i64(<vscale x 2 x half> [[MASKEDOFF:%.*]], <vscale x 2 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 0, i64 [[VL:%.*]], i64 1)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16mf2_t test_nds_vfpmadt_vf_f16mf2_rm_mu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, float op2, size_t vl) {
|
||||
return __riscv_nds_vfpmadt_vf_f16mf2_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV64-LABEL: @test_nds_vfpmadt_vf_f16m1_rm_mu(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.nds.vfpmadt.mask.nxv4f16.f32.i64(<vscale x 4 x half> [[MASKEDOFF:%.*]], <vscale x 4 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 0, i64 [[VL:%.*]], i64 1)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16m1_t test_nds_vfpmadt_vf_f16m1_rm_mu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, float op2, size_t vl) {
|
||||
return __riscv_nds_vfpmadt_vf_f16m1_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV64-LABEL: @test_nds_vfpmadt_vf_f16m2_rm_mu(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.nds.vfpmadt.mask.nxv8f16.f32.i64(<vscale x 8 x half> [[MASKEDOFF:%.*]], <vscale x 8 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 0, i64 [[VL:%.*]], i64 1)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16m2_t test_nds_vfpmadt_vf_f16m2_rm_mu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, float op2, size_t vl) {
|
||||
return __riscv_nds_vfpmadt_vf_f16m2_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV64-LABEL: @test_nds_vfpmadt_vf_f16m4_rm_mu(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.nds.vfpmadt.mask.nxv16f16.f32.i64(<vscale x 16 x half> [[MASKEDOFF:%.*]], <vscale x 16 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 0, i64 [[VL:%.*]], i64 1)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16m4_t test_nds_vfpmadt_vf_f16m4_rm_mu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, float op2, size_t vl) {
|
||||
return __riscv_nds_vfpmadt_vf_f16m4_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV64-LABEL: @test_nds_vfpmadt_vf_f16m8_rm_mu(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.nds.vfpmadt.mask.nxv32f16.f32.i64(<vscale x 32 x half> [[MASKEDOFF:%.*]], <vscale x 32 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 0, i64 [[VL:%.*]], i64 1)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16m8_t test_nds_vfpmadt_vf_f16m8_rm_mu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, float op2, size_t vl) {
|
||||
return __riscv_nds_vfpmadt_vf_f16m8_rm_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
|
||||
}
|
||||
@@ -0,0 +1,441 @@
|
||||
// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
|
||||
// REQUIRES: riscv-registered-target
|
||||
// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x \
|
||||
// RUN: -target-feature +zvfhmin \
|
||||
// RUN: -target-feature +xandesvpackfph -disable-O0-optnone \
|
||||
// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
|
||||
// RUN: FileCheck --check-prefix=CHECK-RV64 %s
|
||||
|
||||
#include <andes_vector.h>
|
||||
|
||||
// CHECK-RV64-LABEL: @test_nds_vfpmadb_vf_f16mf4_tu(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.nds.vfpmadb.nxv1f16.f32.i64(<vscale x 1 x half> [[MASKEDOFF:%.*]], <vscale x 1 x half> [[OP1:%.*]], float [[OP2:%.*]], i64 7, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16mf4_t test_nds_vfpmadb_vf_f16mf4_tu(vfloat16mf4_t maskedoff, vfloat16mf4_t op1, float op2, size_t vl) {
|
||||
return __riscv_nds_vfpmadb_tu(maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV64-LABEL: @test_nds_vfpmadb_vf_f16mf2_tu(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.nds.vfpmadb.nxv2f16.f32.i64(<vscale x 2 x half> [[MASKEDOFF:%.*]], <vscale x 2 x half> [[OP1:%.*]], float [[OP2:%.*]], i64 7, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16mf2_t test_nds_vfpmadb_vf_f16mf2_tu(vfloat16mf2_t maskedoff, vfloat16mf2_t op1, float op2, size_t vl) {
|
||||
return __riscv_nds_vfpmadb_tu(maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV64-LABEL: @test_nds_vfpmadb_vf_f16m1_tu(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.nds.vfpmadb.nxv4f16.f32.i64(<vscale x 4 x half> [[MASKEDOFF:%.*]], <vscale x 4 x half> [[OP1:%.*]], float [[OP2:%.*]], i64 7, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16m1_t test_nds_vfpmadb_vf_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m1_t op1, float op2, size_t vl) {
|
||||
return __riscv_nds_vfpmadb_tu(maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV64-LABEL: @test_nds_vfpmadb_vf_f16m2_tu(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.nds.vfpmadb.nxv8f16.f32.i64(<vscale x 8 x half> [[MASKEDOFF:%.*]], <vscale x 8 x half> [[OP1:%.*]], float [[OP2:%.*]], i64 7, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16m2_t test_nds_vfpmadb_vf_f16m2_tu(vfloat16m2_t maskedoff, vfloat16m2_t op1, float op2, size_t vl) {
|
||||
return __riscv_nds_vfpmadb_tu(maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV64-LABEL: @test_nds_vfpmadb_vf_f16m4_tu(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.nds.vfpmadb.nxv16f16.f32.i64(<vscale x 16 x half> [[MASKEDOFF:%.*]], <vscale x 16 x half> [[OP1:%.*]], float [[OP2:%.*]], i64 7, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16m4_t test_nds_vfpmadb_vf_f16m4_tu(vfloat16m4_t maskedoff, vfloat16m4_t op1, float op2, size_t vl) {
|
||||
return __riscv_nds_vfpmadb_tu(maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV64-LABEL: @test_nds_vfpmadb_vf_f16m8_tu(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.nds.vfpmadb.nxv32f16.f32.i64(<vscale x 32 x half> [[MASKEDOFF:%.*]], <vscale x 32 x half> [[OP1:%.*]], float [[OP2:%.*]], i64 7, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16m8_t test_nds_vfpmadb_vf_f16m8_tu(vfloat16m8_t maskedoff, vfloat16m8_t op1, float op2, size_t vl) {
|
||||
return __riscv_nds_vfpmadb_tu(maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV64-LABEL: @test_nds_vfpmadb_vf_f16mf4_tum(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.nds.vfpmadb.mask.nxv1f16.f32.i64(<vscale x 1 x half> [[MASKEDOFF:%.*]], <vscale x 1 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 2)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16mf4_t test_nds_vfpmadb_vf_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, float op2, size_t vl) {
|
||||
return __riscv_nds_vfpmadb_tum(mask, maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV64-LABEL: @test_nds_vfpmadb_vf_f16mf2_tum(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.nds.vfpmadb.mask.nxv2f16.f32.i64(<vscale x 2 x half> [[MASKEDOFF:%.*]], <vscale x 2 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 2)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16mf2_t test_nds_vfpmadb_vf_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, float op2, size_t vl) {
|
||||
return __riscv_nds_vfpmadb_tum(mask, maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV64-LABEL: @test_nds_vfpmadb_vf_f16m1_tum(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.nds.vfpmadb.mask.nxv4f16.f32.i64(<vscale x 4 x half> [[MASKEDOFF:%.*]], <vscale x 4 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 2)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16m1_t test_nds_vfpmadb_vf_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, float op2, size_t vl) {
|
||||
return __riscv_nds_vfpmadb_tum(mask, maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV64-LABEL: @test_nds_vfpmadb_vf_f16m2_tum(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.nds.vfpmadb.mask.nxv8f16.f32.i64(<vscale x 8 x half> [[MASKEDOFF:%.*]], <vscale x 8 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 2)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16m2_t test_nds_vfpmadb_vf_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, float op2, size_t vl) {
|
||||
return __riscv_nds_vfpmadb_tum(mask, maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV64-LABEL: @test_nds_vfpmadb_vf_f16m4_tum(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.nds.vfpmadb.mask.nxv16f16.f32.i64(<vscale x 16 x half> [[MASKEDOFF:%.*]], <vscale x 16 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 2)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16m4_t test_nds_vfpmadb_vf_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, float op2, size_t vl) {
|
||||
return __riscv_nds_vfpmadb_tum(mask, maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV64-LABEL: @test_nds_vfpmadb_vf_f16m8_tum(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.nds.vfpmadb.mask.nxv32f16.f32.i64(<vscale x 32 x half> [[MASKEDOFF:%.*]], <vscale x 32 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 2)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16m8_t test_nds_vfpmadb_vf_f16m8_tum(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, float op2, size_t vl) {
|
||||
return __riscv_nds_vfpmadb_tum(mask, maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV64-LABEL: @test_nds_vfpmadb_vf_f16mf4_tumu(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.nds.vfpmadb.mask.nxv1f16.f32.i64(<vscale x 1 x half> [[MASKEDOFF:%.*]], <vscale x 1 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16mf4_t test_nds_vfpmadb_vf_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, float op2, size_t vl) {
|
||||
return __riscv_nds_vfpmadb_tumu(mask, maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV64-LABEL: @test_nds_vfpmadb_vf_f16mf2_tumu(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.nds.vfpmadb.mask.nxv2f16.f32.i64(<vscale x 2 x half> [[MASKEDOFF:%.*]], <vscale x 2 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16mf2_t test_nds_vfpmadb_vf_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, float op2, size_t vl) {
|
||||
return __riscv_nds_vfpmadb_tumu(mask, maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV64-LABEL: @test_nds_vfpmadb_vf_f16m1_tumu(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.nds.vfpmadb.mask.nxv4f16.f32.i64(<vscale x 4 x half> [[MASKEDOFF:%.*]], <vscale x 4 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16m1_t test_nds_vfpmadb_vf_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, float op2, size_t vl) {
|
||||
return __riscv_nds_vfpmadb_tumu(mask, maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV64-LABEL: @test_nds_vfpmadb_vf_f16m2_tumu(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.nds.vfpmadb.mask.nxv8f16.f32.i64(<vscale x 8 x half> [[MASKEDOFF:%.*]], <vscale x 8 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16m2_t test_nds_vfpmadb_vf_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, float op2, size_t vl) {
|
||||
return __riscv_nds_vfpmadb_tumu(mask, maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV64-LABEL: @test_nds_vfpmadb_vf_f16m4_tumu(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.nds.vfpmadb.mask.nxv16f16.f32.i64(<vscale x 16 x half> [[MASKEDOFF:%.*]], <vscale x 16 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16m4_t test_nds_vfpmadb_vf_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, float op2, size_t vl) {
|
||||
return __riscv_nds_vfpmadb_tumu(mask, maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV64-LABEL: @test_nds_vfpmadb_vf_f16m8_tumu(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.nds.vfpmadb.mask.nxv32f16.f32.i64(<vscale x 32 x half> [[MASKEDOFF:%.*]], <vscale x 32 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16m8_t test_nds_vfpmadb_vf_f16m8_tumu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, float op2, size_t vl) {
|
||||
return __riscv_nds_vfpmadb_tumu(mask, maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV64-LABEL: @test_nds_vfpmadb_vf_f16mf4_mu(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.nds.vfpmadb.mask.nxv1f16.f32.i64(<vscale x 1 x half> [[MASKEDOFF:%.*]], <vscale x 1 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 1)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16mf4_t test_nds_vfpmadb_vf_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, float op2, size_t vl) {
|
||||
return __riscv_nds_vfpmadb_mu(mask, maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV64-LABEL: @test_nds_vfpmadb_vf_f16mf2_mu(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.nds.vfpmadb.mask.nxv2f16.f32.i64(<vscale x 2 x half> [[MASKEDOFF:%.*]], <vscale x 2 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 1)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16mf2_t test_nds_vfpmadb_vf_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, float op2, size_t vl) {
|
||||
return __riscv_nds_vfpmadb_mu(mask, maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV64-LABEL: @test_nds_vfpmadb_vf_f16m1_mu(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.nds.vfpmadb.mask.nxv4f16.f32.i64(<vscale x 4 x half> [[MASKEDOFF:%.*]], <vscale x 4 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 1)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16m1_t test_nds_vfpmadb_vf_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, float op2, size_t vl) {
|
||||
return __riscv_nds_vfpmadb_mu(mask, maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV64-LABEL: @test_nds_vfpmadb_vf_f16m2_mu(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.nds.vfpmadb.mask.nxv8f16.f32.i64(<vscale x 8 x half> [[MASKEDOFF:%.*]], <vscale x 8 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 1)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16m2_t test_nds_vfpmadb_vf_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, float op2, size_t vl) {
|
||||
return __riscv_nds_vfpmadb_mu(mask, maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV64-LABEL: @test_nds_vfpmadb_vf_f16m4_mu(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.nds.vfpmadb.mask.nxv16f16.f32.i64(<vscale x 16 x half> [[MASKEDOFF:%.*]], <vscale x 16 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 1)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16m4_t test_nds_vfpmadb_vf_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, float op2, size_t vl) {
|
||||
return __riscv_nds_vfpmadb_mu(mask, maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV64-LABEL: @test_nds_vfpmadb_vf_f16m8_mu(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.nds.vfpmadb.mask.nxv32f16.f32.i64(<vscale x 32 x half> [[MASKEDOFF:%.*]], <vscale x 32 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 1)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16m8_t test_nds_vfpmadb_vf_f16m8_mu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, float op2, size_t vl) {
|
||||
return __riscv_nds_vfpmadb_mu(mask, maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV64-LABEL: @test_nds_vfpmadb_vf_f16mf4_rm_tu(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.nds.vfpmadb.nxv1f16.f32.i64(<vscale x 1 x half> [[MASKEDOFF:%.*]], <vscale x 1 x half> [[OP1:%.*]], float [[OP2:%.*]], i64 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16mf4_t test_nds_vfpmadb_vf_f16mf4_rm_tu(vfloat16mf4_t maskedoff, vfloat16mf4_t op1, float op2, size_t vl) {
|
||||
return __riscv_nds_vfpmadb_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV64-LABEL: @test_nds_vfpmadb_vf_f16mf2_rm_tu(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.nds.vfpmadb.nxv2f16.f32.i64(<vscale x 2 x half> [[MASKEDOFF:%.*]], <vscale x 2 x half> [[OP1:%.*]], float [[OP2:%.*]], i64 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16mf2_t test_nds_vfpmadb_vf_f16mf2_rm_tu(vfloat16mf2_t maskedoff, vfloat16mf2_t op1, float op2, size_t vl) {
|
||||
return __riscv_nds_vfpmadb_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV64-LABEL: @test_nds_vfpmadb_vf_f16m1_rm_tu(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.nds.vfpmadb.nxv4f16.f32.i64(<vscale x 4 x half> [[MASKEDOFF:%.*]], <vscale x 4 x half> [[OP1:%.*]], float [[OP2:%.*]], i64 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16m1_t test_nds_vfpmadb_vf_f16m1_rm_tu(vfloat16m1_t maskedoff, vfloat16m1_t op1, float op2, size_t vl) {
|
||||
return __riscv_nds_vfpmadb_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV64-LABEL: @test_nds_vfpmadb_vf_f16m2_rm_tu(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.nds.vfpmadb.nxv8f16.f32.i64(<vscale x 8 x half> [[MASKEDOFF:%.*]], <vscale x 8 x half> [[OP1:%.*]], float [[OP2:%.*]], i64 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16m2_t test_nds_vfpmadb_vf_f16m2_rm_tu(vfloat16m2_t maskedoff, vfloat16m2_t op1, float op2, size_t vl) {
|
||||
return __riscv_nds_vfpmadb_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV64-LABEL: @test_nds_vfpmadb_vf_f16m4_rm_tu(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.nds.vfpmadb.nxv16f16.f32.i64(<vscale x 16 x half> [[MASKEDOFF:%.*]], <vscale x 16 x half> [[OP1:%.*]], float [[OP2:%.*]], i64 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16m4_t test_nds_vfpmadb_vf_f16m4_rm_tu(vfloat16m4_t maskedoff, vfloat16m4_t op1, float op2, size_t vl) {
|
||||
return __riscv_nds_vfpmadb_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV64-LABEL: @test_nds_vfpmadb_vf_f16m8_rm_tu(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.nds.vfpmadb.nxv32f16.f32.i64(<vscale x 32 x half> [[MASKEDOFF:%.*]], <vscale x 32 x half> [[OP1:%.*]], float [[OP2:%.*]], i64 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16m8_t test_nds_vfpmadb_vf_f16m8_rm_tu(vfloat16m8_t maskedoff, vfloat16m8_t op1, float op2, size_t vl) {
|
||||
return __riscv_nds_vfpmadb_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV64-LABEL: @test_nds_vfpmadb_vf_f16mf4_rm_tum(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.nds.vfpmadb.mask.nxv1f16.f32.i64(<vscale x 1 x half> [[MASKEDOFF:%.*]], <vscale x 1 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 0, i64 [[VL:%.*]], i64 2)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16mf4_t test_nds_vfpmadb_vf_f16mf4_rm_tum(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, float op2, size_t vl) {
|
||||
return __riscv_nds_vfpmadb_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV64-LABEL: @test_nds_vfpmadb_vf_f16mf2_rm_tum(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.nds.vfpmadb.mask.nxv2f16.f32.i64(<vscale x 2 x half> [[MASKEDOFF:%.*]], <vscale x 2 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 0, i64 [[VL:%.*]], i64 2)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16mf2_t test_nds_vfpmadb_vf_f16mf2_rm_tum(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, float op2, size_t vl) {
|
||||
return __riscv_nds_vfpmadb_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV64-LABEL: @test_nds_vfpmadb_vf_f16m1_rm_tum(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.nds.vfpmadb.mask.nxv4f16.f32.i64(<vscale x 4 x half> [[MASKEDOFF:%.*]], <vscale x 4 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 0, i64 [[VL:%.*]], i64 2)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16m1_t test_nds_vfpmadb_vf_f16m1_rm_tum(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, float op2, size_t vl) {
|
||||
return __riscv_nds_vfpmadb_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV64-LABEL: @test_nds_vfpmadb_vf_f16m2_rm_tum(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.nds.vfpmadb.mask.nxv8f16.f32.i64(<vscale x 8 x half> [[MASKEDOFF:%.*]], <vscale x 8 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 0, i64 [[VL:%.*]], i64 2)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16m2_t test_nds_vfpmadb_vf_f16m2_rm_tum(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, float op2, size_t vl) {
|
||||
return __riscv_nds_vfpmadb_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV64-LABEL: @test_nds_vfpmadb_vf_f16m4_rm_tum(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.nds.vfpmadb.mask.nxv16f16.f32.i64(<vscale x 16 x half> [[MASKEDOFF:%.*]], <vscale x 16 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 0, i64 [[VL:%.*]], i64 2)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16m4_t test_nds_vfpmadb_vf_f16m4_rm_tum(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, float op2, size_t vl) {
|
||||
return __riscv_nds_vfpmadb_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV64-LABEL: @test_nds_vfpmadb_vf_f16m8_rm_tum(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.nds.vfpmadb.mask.nxv32f16.f32.i64(<vscale x 32 x half> [[MASKEDOFF:%.*]], <vscale x 32 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 0, i64 [[VL:%.*]], i64 2)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16m8_t test_nds_vfpmadb_vf_f16m8_rm_tum(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, float op2, size_t vl) {
|
||||
return __riscv_nds_vfpmadb_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV64-LABEL: @test_nds_vfpmadb_vf_f16mf4_rm_tumu(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.nds.vfpmadb.mask.nxv1f16.f32.i64(<vscale x 1 x half> [[MASKEDOFF:%.*]], <vscale x 1 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 0, i64 [[VL:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16mf4_t test_nds_vfpmadb_vf_f16mf4_rm_tumu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, float op2, size_t vl) {
|
||||
return __riscv_nds_vfpmadb_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV64-LABEL: @test_nds_vfpmadb_vf_f16mf2_rm_tumu(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.nds.vfpmadb.mask.nxv2f16.f32.i64(<vscale x 2 x half> [[MASKEDOFF:%.*]], <vscale x 2 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 0, i64 [[VL:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16mf2_t test_nds_vfpmadb_vf_f16mf2_rm_tumu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, float op2, size_t vl) {
|
||||
return __riscv_nds_vfpmadb_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV64-LABEL: @test_nds_vfpmadb_vf_f16m1_rm_tumu(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.nds.vfpmadb.mask.nxv4f16.f32.i64(<vscale x 4 x half> [[MASKEDOFF:%.*]], <vscale x 4 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 0, i64 [[VL:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16m1_t test_nds_vfpmadb_vf_f16m1_rm_tumu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, float op2, size_t vl) {
|
||||
return __riscv_nds_vfpmadb_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV64-LABEL: @test_nds_vfpmadb_vf_f16m2_rm_tumu(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.nds.vfpmadb.mask.nxv8f16.f32.i64(<vscale x 8 x half> [[MASKEDOFF:%.*]], <vscale x 8 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 0, i64 [[VL:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16m2_t test_nds_vfpmadb_vf_f16m2_rm_tumu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, float op2, size_t vl) {
|
||||
return __riscv_nds_vfpmadb_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV64-LABEL: @test_nds_vfpmadb_vf_f16m4_rm_tumu(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.nds.vfpmadb.mask.nxv16f16.f32.i64(<vscale x 16 x half> [[MASKEDOFF:%.*]], <vscale x 16 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 0, i64 [[VL:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16m4_t test_nds_vfpmadb_vf_f16m4_rm_tumu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, float op2, size_t vl) {
|
||||
return __riscv_nds_vfpmadb_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV64-LABEL: @test_nds_vfpmadb_vf_f16m8_rm_tumu(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.nds.vfpmadb.mask.nxv32f16.f32.i64(<vscale x 32 x half> [[MASKEDOFF:%.*]], <vscale x 32 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 0, i64 [[VL:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16m8_t test_nds_vfpmadb_vf_f16m8_rm_tumu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, float op2, size_t vl) {
|
||||
return __riscv_nds_vfpmadb_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV64-LABEL: @test_nds_vfpmadb_vf_f16mf4_rm_mu(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.nds.vfpmadb.mask.nxv1f16.f32.i64(<vscale x 1 x half> [[MASKEDOFF:%.*]], <vscale x 1 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 0, i64 [[VL:%.*]], i64 1)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16mf4_t test_nds_vfpmadb_vf_f16mf4_rm_mu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, float op2, size_t vl) {
|
||||
return __riscv_nds_vfpmadb_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV64-LABEL: @test_nds_vfpmadb_vf_f16mf2_rm_mu(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.nds.vfpmadb.mask.nxv2f16.f32.i64(<vscale x 2 x half> [[MASKEDOFF:%.*]], <vscale x 2 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 0, i64 [[VL:%.*]], i64 1)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16mf2_t test_nds_vfpmadb_vf_f16mf2_rm_mu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, float op2, size_t vl) {
|
||||
return __riscv_nds_vfpmadb_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV64-LABEL: @test_nds_vfpmadb_vf_f16m1_rm_mu(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.nds.vfpmadb.mask.nxv4f16.f32.i64(<vscale x 4 x half> [[MASKEDOFF:%.*]], <vscale x 4 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 0, i64 [[VL:%.*]], i64 1)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16m1_t test_nds_vfpmadb_vf_f16m1_rm_mu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, float op2, size_t vl) {
|
||||
return __riscv_nds_vfpmadb_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV64-LABEL: @test_nds_vfpmadb_vf_f16m2_rm_mu(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.nds.vfpmadb.mask.nxv8f16.f32.i64(<vscale x 8 x half> [[MASKEDOFF:%.*]], <vscale x 8 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 0, i64 [[VL:%.*]], i64 1)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16m2_t test_nds_vfpmadb_vf_f16m2_rm_mu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, float op2, size_t vl) {
|
||||
return __riscv_nds_vfpmadb_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV64-LABEL: @test_nds_vfpmadb_vf_f16m4_rm_mu(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.nds.vfpmadb.mask.nxv16f16.f32.i64(<vscale x 16 x half> [[MASKEDOFF:%.*]], <vscale x 16 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 0, i64 [[VL:%.*]], i64 1)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16m4_t test_nds_vfpmadb_vf_f16m4_rm_mu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, float op2, size_t vl) {
|
||||
return __riscv_nds_vfpmadb_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV64-LABEL: @test_nds_vfpmadb_vf_f16m8_rm_mu(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.nds.vfpmadb.mask.nxv32f16.f32.i64(<vscale x 32 x half> [[MASKEDOFF:%.*]], <vscale x 32 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 0, i64 [[VL:%.*]], i64 1)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16m8_t test_nds_vfpmadb_vf_f16m8_rm_mu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, float op2, size_t vl) {
|
||||
return __riscv_nds_vfpmadb_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
|
||||
}
|
||||
@@ -0,0 +1,441 @@
|
||||
// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
|
||||
// REQUIRES: riscv-registered-target
|
||||
// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x \
|
||||
// RUN: -target-feature +zvfhmin \
|
||||
// RUN: -target-feature +xandesvpackfph -disable-O0-optnone \
|
||||
// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
|
||||
// RUN: FileCheck --check-prefix=CHECK-RV64 %s
|
||||
|
||||
#include <andes_vector.h>
|
||||
|
||||
// CHECK-RV64-LABEL: @test_nds_vfpmadt_vf_f16mf4_tu(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.nds.vfpmadt.nxv1f16.f32.i64(<vscale x 1 x half> [[MASKEDOFF:%.*]], <vscale x 1 x half> [[OP1:%.*]], float [[OP2:%.*]], i64 7, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16mf4_t test_nds_vfpmadt_vf_f16mf4_tu(vfloat16mf4_t maskedoff, vfloat16mf4_t op1, float op2, size_t vl) {
|
||||
return __riscv_nds_vfpmadt_tu(maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV64-LABEL: @test_nds_vfpmadt_vf_f16mf2_tu(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.nds.vfpmadt.nxv2f16.f32.i64(<vscale x 2 x half> [[MASKEDOFF:%.*]], <vscale x 2 x half> [[OP1:%.*]], float [[OP2:%.*]], i64 7, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16mf2_t test_nds_vfpmadt_vf_f16mf2_tu(vfloat16mf2_t maskedoff, vfloat16mf2_t op1, float op2, size_t vl) {
|
||||
return __riscv_nds_vfpmadt_tu(maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV64-LABEL: @test_nds_vfpmadt_vf_f16m1_tu(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.nds.vfpmadt.nxv4f16.f32.i64(<vscale x 4 x half> [[MASKEDOFF:%.*]], <vscale x 4 x half> [[OP1:%.*]], float [[OP2:%.*]], i64 7, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16m1_t test_nds_vfpmadt_vf_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m1_t op1, float op2, size_t vl) {
|
||||
return __riscv_nds_vfpmadt_tu(maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV64-LABEL: @test_nds_vfpmadt_vf_f16m2_tu(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.nds.vfpmadt.nxv8f16.f32.i64(<vscale x 8 x half> [[MASKEDOFF:%.*]], <vscale x 8 x half> [[OP1:%.*]], float [[OP2:%.*]], i64 7, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16m2_t test_nds_vfpmadt_vf_f16m2_tu(vfloat16m2_t maskedoff, vfloat16m2_t op1, float op2, size_t vl) {
|
||||
return __riscv_nds_vfpmadt_tu(maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV64-LABEL: @test_nds_vfpmadt_vf_f16m4_tu(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.nds.vfpmadt.nxv16f16.f32.i64(<vscale x 16 x half> [[MASKEDOFF:%.*]], <vscale x 16 x half> [[OP1:%.*]], float [[OP2:%.*]], i64 7, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16m4_t test_nds_vfpmadt_vf_f16m4_tu(vfloat16m4_t maskedoff, vfloat16m4_t op1, float op2, size_t vl) {
|
||||
return __riscv_nds_vfpmadt_tu(maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV64-LABEL: @test_nds_vfpmadt_vf_f16m8_tu(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.nds.vfpmadt.nxv32f16.f32.i64(<vscale x 32 x half> [[MASKEDOFF:%.*]], <vscale x 32 x half> [[OP1:%.*]], float [[OP2:%.*]], i64 7, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16m8_t test_nds_vfpmadt_vf_f16m8_tu(vfloat16m8_t maskedoff, vfloat16m8_t op1, float op2, size_t vl) {
|
||||
return __riscv_nds_vfpmadt_tu(maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV64-LABEL: @test_nds_vfpmadt_vf_f16mf4_tum(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.nds.vfpmadt.mask.nxv1f16.f32.i64(<vscale x 1 x half> [[MASKEDOFF:%.*]], <vscale x 1 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 2)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16mf4_t test_nds_vfpmadt_vf_f16mf4_tum(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, float op2, size_t vl) {
|
||||
return __riscv_nds_vfpmadt_tum(mask, maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV64-LABEL: @test_nds_vfpmadt_vf_f16mf2_tum(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.nds.vfpmadt.mask.nxv2f16.f32.i64(<vscale x 2 x half> [[MASKEDOFF:%.*]], <vscale x 2 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 2)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16mf2_t test_nds_vfpmadt_vf_f16mf2_tum(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, float op2, size_t vl) {
|
||||
return __riscv_nds_vfpmadt_tum(mask, maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV64-LABEL: @test_nds_vfpmadt_vf_f16m1_tum(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.nds.vfpmadt.mask.nxv4f16.f32.i64(<vscale x 4 x half> [[MASKEDOFF:%.*]], <vscale x 4 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 2)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16m1_t test_nds_vfpmadt_vf_f16m1_tum(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, float op2, size_t vl) {
|
||||
return __riscv_nds_vfpmadt_tum(mask, maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV64-LABEL: @test_nds_vfpmadt_vf_f16m2_tum(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.nds.vfpmadt.mask.nxv8f16.f32.i64(<vscale x 8 x half> [[MASKEDOFF:%.*]], <vscale x 8 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 2)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16m2_t test_nds_vfpmadt_vf_f16m2_tum(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, float op2, size_t vl) {
|
||||
return __riscv_nds_vfpmadt_tum(mask, maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV64-LABEL: @test_nds_vfpmadt_vf_f16m4_tum(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.nds.vfpmadt.mask.nxv16f16.f32.i64(<vscale x 16 x half> [[MASKEDOFF:%.*]], <vscale x 16 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 2)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16m4_t test_nds_vfpmadt_vf_f16m4_tum(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, float op2, size_t vl) {
|
||||
return __riscv_nds_vfpmadt_tum(mask, maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV64-LABEL: @test_nds_vfpmadt_vf_f16m8_tum(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.nds.vfpmadt.mask.nxv32f16.f32.i64(<vscale x 32 x half> [[MASKEDOFF:%.*]], <vscale x 32 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 2)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16m8_t test_nds_vfpmadt_vf_f16m8_tum(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, float op2, size_t vl) {
|
||||
return __riscv_nds_vfpmadt_tum(mask, maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV64-LABEL: @test_nds_vfpmadt_vf_f16mf4_tumu(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.nds.vfpmadt.mask.nxv1f16.f32.i64(<vscale x 1 x half> [[MASKEDOFF:%.*]], <vscale x 1 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16mf4_t test_nds_vfpmadt_vf_f16mf4_tumu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, float op2, size_t vl) {
|
||||
return __riscv_nds_vfpmadt_tumu(mask, maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV64-LABEL: @test_nds_vfpmadt_vf_f16mf2_tumu(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.nds.vfpmadt.mask.nxv2f16.f32.i64(<vscale x 2 x half> [[MASKEDOFF:%.*]], <vscale x 2 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16mf2_t test_nds_vfpmadt_vf_f16mf2_tumu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, float op2, size_t vl) {
|
||||
return __riscv_nds_vfpmadt_tumu(mask, maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV64-LABEL: @test_nds_vfpmadt_vf_f16m1_tumu(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.nds.vfpmadt.mask.nxv4f16.f32.i64(<vscale x 4 x half> [[MASKEDOFF:%.*]], <vscale x 4 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16m1_t test_nds_vfpmadt_vf_f16m1_tumu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, float op2, size_t vl) {
|
||||
return __riscv_nds_vfpmadt_tumu(mask, maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV64-LABEL: @test_nds_vfpmadt_vf_f16m2_tumu(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.nds.vfpmadt.mask.nxv8f16.f32.i64(<vscale x 8 x half> [[MASKEDOFF:%.*]], <vscale x 8 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16m2_t test_nds_vfpmadt_vf_f16m2_tumu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, float op2, size_t vl) {
|
||||
return __riscv_nds_vfpmadt_tumu(mask, maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV64-LABEL: @test_nds_vfpmadt_vf_f16m4_tumu(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.nds.vfpmadt.mask.nxv16f16.f32.i64(<vscale x 16 x half> [[MASKEDOFF:%.*]], <vscale x 16 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16m4_t test_nds_vfpmadt_vf_f16m4_tumu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, float op2, size_t vl) {
|
||||
return __riscv_nds_vfpmadt_tumu(mask, maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV64-LABEL: @test_nds_vfpmadt_vf_f16m8_tumu(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.nds.vfpmadt.mask.nxv32f16.f32.i64(<vscale x 32 x half> [[MASKEDOFF:%.*]], <vscale x 32 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16m8_t test_nds_vfpmadt_vf_f16m8_tumu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, float op2, size_t vl) {
|
||||
return __riscv_nds_vfpmadt_tumu(mask, maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV64-LABEL: @test_nds_vfpmadt_vf_f16mf4_mu(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.nds.vfpmadt.mask.nxv1f16.f32.i64(<vscale x 1 x half> [[MASKEDOFF:%.*]], <vscale x 1 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 1)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16mf4_t test_nds_vfpmadt_vf_f16mf4_mu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, float op2, size_t vl) {
|
||||
return __riscv_nds_vfpmadt_mu(mask, maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV64-LABEL: @test_nds_vfpmadt_vf_f16mf2_mu(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.nds.vfpmadt.mask.nxv2f16.f32.i64(<vscale x 2 x half> [[MASKEDOFF:%.*]], <vscale x 2 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 1)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16mf2_t test_nds_vfpmadt_vf_f16mf2_mu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, float op2, size_t vl) {
|
||||
return __riscv_nds_vfpmadt_mu(mask, maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV64-LABEL: @test_nds_vfpmadt_vf_f16m1_mu(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.nds.vfpmadt.mask.nxv4f16.f32.i64(<vscale x 4 x half> [[MASKEDOFF:%.*]], <vscale x 4 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 1)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16m1_t test_nds_vfpmadt_vf_f16m1_mu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, float op2, size_t vl) {
|
||||
return __riscv_nds_vfpmadt_mu(mask, maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV64-LABEL: @test_nds_vfpmadt_vf_f16m2_mu(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.nds.vfpmadt.mask.nxv8f16.f32.i64(<vscale x 8 x half> [[MASKEDOFF:%.*]], <vscale x 8 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 1)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16m2_t test_nds_vfpmadt_vf_f16m2_mu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, float op2, size_t vl) {
|
||||
return __riscv_nds_vfpmadt_mu(mask, maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV64-LABEL: @test_nds_vfpmadt_vf_f16m4_mu(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.nds.vfpmadt.mask.nxv16f16.f32.i64(<vscale x 16 x half> [[MASKEDOFF:%.*]], <vscale x 16 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 1)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16m4_t test_nds_vfpmadt_vf_f16m4_mu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, float op2, size_t vl) {
|
||||
return __riscv_nds_vfpmadt_mu(mask, maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV64-LABEL: @test_nds_vfpmadt_vf_f16m8_mu(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.nds.vfpmadt.mask.nxv32f16.f32.i64(<vscale x 32 x half> [[MASKEDOFF:%.*]], <vscale x 32 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 7, i64 [[VL:%.*]], i64 1)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16m8_t test_nds_vfpmadt_vf_f16m8_mu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, float op2, size_t vl) {
|
||||
return __riscv_nds_vfpmadt_mu(mask, maskedoff, op1, op2, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV64-LABEL: @test_nds_vfpmadt_vf_f16mf4_rm_tu(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.nds.vfpmadt.nxv1f16.f32.i64(<vscale x 1 x half> [[MASKEDOFF:%.*]], <vscale x 1 x half> [[OP1:%.*]], float [[OP2:%.*]], i64 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16mf4_t test_nds_vfpmadt_vf_f16mf4_rm_tu(vfloat16mf4_t maskedoff, vfloat16mf4_t op1, float op2, size_t vl) {
|
||||
return __riscv_nds_vfpmadt_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV64-LABEL: @test_nds_vfpmadt_vf_f16mf2_rm_tu(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.nds.vfpmadt.nxv2f16.f32.i64(<vscale x 2 x half> [[MASKEDOFF:%.*]], <vscale x 2 x half> [[OP1:%.*]], float [[OP2:%.*]], i64 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16mf2_t test_nds_vfpmadt_vf_f16mf2_rm_tu(vfloat16mf2_t maskedoff, vfloat16mf2_t op1, float op2, size_t vl) {
|
||||
return __riscv_nds_vfpmadt_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV64-LABEL: @test_nds_vfpmadt_vf_f16m1_rm_tu(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.nds.vfpmadt.nxv4f16.f32.i64(<vscale x 4 x half> [[MASKEDOFF:%.*]], <vscale x 4 x half> [[OP1:%.*]], float [[OP2:%.*]], i64 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16m1_t test_nds_vfpmadt_vf_f16m1_rm_tu(vfloat16m1_t maskedoff, vfloat16m1_t op1, float op2, size_t vl) {
|
||||
return __riscv_nds_vfpmadt_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV64-LABEL: @test_nds_vfpmadt_vf_f16m2_rm_tu(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.nds.vfpmadt.nxv8f16.f32.i64(<vscale x 8 x half> [[MASKEDOFF:%.*]], <vscale x 8 x half> [[OP1:%.*]], float [[OP2:%.*]], i64 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16m2_t test_nds_vfpmadt_vf_f16m2_rm_tu(vfloat16m2_t maskedoff, vfloat16m2_t op1, float op2, size_t vl) {
|
||||
return __riscv_nds_vfpmadt_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV64-LABEL: @test_nds_vfpmadt_vf_f16m4_rm_tu(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.nds.vfpmadt.nxv16f16.f32.i64(<vscale x 16 x half> [[MASKEDOFF:%.*]], <vscale x 16 x half> [[OP1:%.*]], float [[OP2:%.*]], i64 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16m4_t test_nds_vfpmadt_vf_f16m4_rm_tu(vfloat16m4_t maskedoff, vfloat16m4_t op1, float op2, size_t vl) {
|
||||
return __riscv_nds_vfpmadt_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV64-LABEL: @test_nds_vfpmadt_vf_f16m8_rm_tu(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.nds.vfpmadt.nxv32f16.f32.i64(<vscale x 32 x half> [[MASKEDOFF:%.*]], <vscale x 32 x half> [[OP1:%.*]], float [[OP2:%.*]], i64 0, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16m8_t test_nds_vfpmadt_vf_f16m8_rm_tu(vfloat16m8_t maskedoff, vfloat16m8_t op1, float op2, size_t vl) {
|
||||
return __riscv_nds_vfpmadt_tu(maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV64-LABEL: @test_nds_vfpmadt_vf_f16mf4_rm_tum(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.nds.vfpmadt.mask.nxv1f16.f32.i64(<vscale x 1 x half> [[MASKEDOFF:%.*]], <vscale x 1 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 0, i64 [[VL:%.*]], i64 2)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16mf4_t test_nds_vfpmadt_vf_f16mf4_rm_tum(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, float op2, size_t vl) {
|
||||
return __riscv_nds_vfpmadt_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV64-LABEL: @test_nds_vfpmadt_vf_f16mf2_rm_tum(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.nds.vfpmadt.mask.nxv2f16.f32.i64(<vscale x 2 x half> [[MASKEDOFF:%.*]], <vscale x 2 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 0, i64 [[VL:%.*]], i64 2)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16mf2_t test_nds_vfpmadt_vf_f16mf2_rm_tum(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, float op2, size_t vl) {
|
||||
return __riscv_nds_vfpmadt_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV64-LABEL: @test_nds_vfpmadt_vf_f16m1_rm_tum(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.nds.vfpmadt.mask.nxv4f16.f32.i64(<vscale x 4 x half> [[MASKEDOFF:%.*]], <vscale x 4 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 0, i64 [[VL:%.*]], i64 2)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16m1_t test_nds_vfpmadt_vf_f16m1_rm_tum(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, float op2, size_t vl) {
|
||||
return __riscv_nds_vfpmadt_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV64-LABEL: @test_nds_vfpmadt_vf_f16m2_rm_tum(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.nds.vfpmadt.mask.nxv8f16.f32.i64(<vscale x 8 x half> [[MASKEDOFF:%.*]], <vscale x 8 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 0, i64 [[VL:%.*]], i64 2)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16m2_t test_nds_vfpmadt_vf_f16m2_rm_tum(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, float op2, size_t vl) {
|
||||
return __riscv_nds_vfpmadt_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV64-LABEL: @test_nds_vfpmadt_vf_f16m4_rm_tum(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.nds.vfpmadt.mask.nxv16f16.f32.i64(<vscale x 16 x half> [[MASKEDOFF:%.*]], <vscale x 16 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 0, i64 [[VL:%.*]], i64 2)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16m4_t test_nds_vfpmadt_vf_f16m4_rm_tum(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, float op2, size_t vl) {
|
||||
return __riscv_nds_vfpmadt_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV64-LABEL: @test_nds_vfpmadt_vf_f16m8_rm_tum(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.nds.vfpmadt.mask.nxv32f16.f32.i64(<vscale x 32 x half> [[MASKEDOFF:%.*]], <vscale x 32 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 0, i64 [[VL:%.*]], i64 2)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16m8_t test_nds_vfpmadt_vf_f16m8_rm_tum(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, float op2, size_t vl) {
|
||||
return __riscv_nds_vfpmadt_tum(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV64-LABEL: @test_nds_vfpmadt_vf_f16mf4_rm_tumu(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.nds.vfpmadt.mask.nxv1f16.f32.i64(<vscale x 1 x half> [[MASKEDOFF:%.*]], <vscale x 1 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 0, i64 [[VL:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16mf4_t test_nds_vfpmadt_vf_f16mf4_rm_tumu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, float op2, size_t vl) {
|
||||
return __riscv_nds_vfpmadt_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV64-LABEL: @test_nds_vfpmadt_vf_f16mf2_rm_tumu(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.nds.vfpmadt.mask.nxv2f16.f32.i64(<vscale x 2 x half> [[MASKEDOFF:%.*]], <vscale x 2 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 0, i64 [[VL:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16mf2_t test_nds_vfpmadt_vf_f16mf2_rm_tumu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, float op2, size_t vl) {
|
||||
return __riscv_nds_vfpmadt_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV64-LABEL: @test_nds_vfpmadt_vf_f16m1_rm_tumu(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.nds.vfpmadt.mask.nxv4f16.f32.i64(<vscale x 4 x half> [[MASKEDOFF:%.*]], <vscale x 4 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 0, i64 [[VL:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16m1_t test_nds_vfpmadt_vf_f16m1_rm_tumu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, float op2, size_t vl) {
|
||||
return __riscv_nds_vfpmadt_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV64-LABEL: @test_nds_vfpmadt_vf_f16m2_rm_tumu(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.nds.vfpmadt.mask.nxv8f16.f32.i64(<vscale x 8 x half> [[MASKEDOFF:%.*]], <vscale x 8 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 0, i64 [[VL:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16m2_t test_nds_vfpmadt_vf_f16m2_rm_tumu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, float op2, size_t vl) {
|
||||
return __riscv_nds_vfpmadt_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV64-LABEL: @test_nds_vfpmadt_vf_f16m4_rm_tumu(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.nds.vfpmadt.mask.nxv16f16.f32.i64(<vscale x 16 x half> [[MASKEDOFF:%.*]], <vscale x 16 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 0, i64 [[VL:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16m4_t test_nds_vfpmadt_vf_f16m4_rm_tumu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, float op2, size_t vl) {
|
||||
return __riscv_nds_vfpmadt_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV64-LABEL: @test_nds_vfpmadt_vf_f16m8_rm_tumu(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.nds.vfpmadt.mask.nxv32f16.f32.i64(<vscale x 32 x half> [[MASKEDOFF:%.*]], <vscale x 32 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 0, i64 [[VL:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16m8_t test_nds_vfpmadt_vf_f16m8_rm_tumu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, float op2, size_t vl) {
|
||||
return __riscv_nds_vfpmadt_tumu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV64-LABEL: @test_nds_vfpmadt_vf_f16mf4_rm_mu(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.nds.vfpmadt.mask.nxv1f16.f32.i64(<vscale x 1 x half> [[MASKEDOFF:%.*]], <vscale x 1 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 0, i64 [[VL:%.*]], i64 1)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16mf4_t test_nds_vfpmadt_vf_f16mf4_rm_mu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, float op2, size_t vl) {
|
||||
return __riscv_nds_vfpmadt_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV64-LABEL: @test_nds_vfpmadt_vf_f16mf2_rm_mu(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.nds.vfpmadt.mask.nxv2f16.f32.i64(<vscale x 2 x half> [[MASKEDOFF:%.*]], <vscale x 2 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 0, i64 [[VL:%.*]], i64 1)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16mf2_t test_nds_vfpmadt_vf_f16mf2_rm_mu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, float op2, size_t vl) {
|
||||
return __riscv_nds_vfpmadt_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV64-LABEL: @test_nds_vfpmadt_vf_f16m1_rm_mu(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.nds.vfpmadt.mask.nxv4f16.f32.i64(<vscale x 4 x half> [[MASKEDOFF:%.*]], <vscale x 4 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 0, i64 [[VL:%.*]], i64 1)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16m1_t test_nds_vfpmadt_vf_f16m1_rm_mu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, float op2, size_t vl) {
|
||||
return __riscv_nds_vfpmadt_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV64-LABEL: @test_nds_vfpmadt_vf_f16m2_rm_mu(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.nds.vfpmadt.mask.nxv8f16.f32.i64(<vscale x 8 x half> [[MASKEDOFF:%.*]], <vscale x 8 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 0, i64 [[VL:%.*]], i64 1)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16m2_t test_nds_vfpmadt_vf_f16m2_rm_mu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, float op2, size_t vl) {
|
||||
return __riscv_nds_vfpmadt_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV64-LABEL: @test_nds_vfpmadt_vf_f16m4_rm_mu(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.nds.vfpmadt.mask.nxv16f16.f32.i64(<vscale x 16 x half> [[MASKEDOFF:%.*]], <vscale x 16 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 0, i64 [[VL:%.*]], i64 1)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16m4_t test_nds_vfpmadt_vf_f16m4_rm_mu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, float op2, size_t vl) {
|
||||
return __riscv_nds_vfpmadt_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
|
||||
}
|
||||
|
||||
// CHECK-RV64-LABEL: @test_nds_vfpmadt_vf_f16m8_rm_mu(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.nds.vfpmadt.mask.nxv32f16.f32.i64(<vscale x 32 x half> [[MASKEDOFF:%.*]], <vscale x 32 x half> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 0, i64 [[VL:%.*]], i64 1)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16m8_t test_nds_vfpmadt_vf_f16m8_rm_mu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, float op2, size_t vl) {
|
||||
return __riscv_nds_vfpmadt_mu(mask, maskedoff, op1, op2, __RISCV_FRM_RNE, vl);
|
||||
}
|
||||
@@ -2,7 +2,7 @@
|
||||
// RUN: 2>&1 | FileCheck %s
|
||||
|
||||
#pragma clang riscv intrinsic vvvv
|
||||
// CHECK: warning: unexpected argument 'vvvv' to '#pragma riscv'; expected 'vector' or 'sifive_vector' [-Wignored-pragmas]
|
||||
// CHECK: warning: unexpected argument 'vvvv' to '#pragma riscv'; expected 'vector', 'sifive_vector' or 'andes_vector' [-Wignored-pragmas]
|
||||
|
||||
#pragma clang riscv what + 3241
|
||||
// CHECK: warning: unexpected argument 'what' to '#pragma riscv'; expected 'intrinsic' [-Wignored-pragmas]
|
||||
|
||||
@@ -774,6 +774,7 @@ void RVVEmitter::createRVVIntrinsics(
|
||||
StringSwitch<RVVRequire>(RequiredFeature)
|
||||
.Case("RV64", RVV_REQ_RV64)
|
||||
.Case("Zvfhmin", RVV_REQ_Zvfhmin)
|
||||
.Case("Xandesvpackfph", RVV_REQ_Xandesvpackfph)
|
||||
.Case("Xsfvcp", RVV_REQ_Xsfvcp)
|
||||
.Case("Xsfvfnrclipxfqf", RVV_REQ_Xsfvfnrclipxfqf)
|
||||
.Case("Xsfvfwmaccqqq", RVV_REQ_Xsfvfwmaccqqq)
|
||||
|
||||
@@ -109,6 +109,9 @@ enum ActionType {
|
||||
GenRISCVSiFiveVectorBuiltins,
|
||||
GenRISCVSiFiveVectorBuiltinCG,
|
||||
GenRISCVSiFiveVectorBuiltinSema,
|
||||
GenRISCVAndesVectorBuiltins,
|
||||
GenRISCVAndesVectorBuiltinCG,
|
||||
GenRISCVAndesVectorBuiltinSema,
|
||||
GenAttrDocs,
|
||||
GenDiagDocs,
|
||||
GenOptDocs,
|
||||
@@ -314,6 +317,15 @@ cl::opt<ActionType> Action(
|
||||
clEnumValN(GenRISCVSiFiveVectorBuiltinSema,
|
||||
"gen-riscv-sifive-vector-builtin-sema",
|
||||
"Generate riscv_sifive_vector_builtin_sema.inc for clang"),
|
||||
clEnumValN(GenRISCVAndesVectorBuiltins,
|
||||
"gen-riscv-andes-vector-builtins",
|
||||
"Generate riscv_andes_vector_builtins.inc for clang"),
|
||||
clEnumValN(GenRISCVAndesVectorBuiltinCG,
|
||||
"gen-riscv-andes-vector-builtin-codegen",
|
||||
"Generate riscv_andes_vector_builtin_cg.inc for clang"),
|
||||
clEnumValN(GenRISCVAndesVectorBuiltinSema,
|
||||
"gen-riscv-andes-vector-builtin-sema",
|
||||
"Generate riscv_andes_vector_builtin_sema.inc for clang"),
|
||||
clEnumValN(GenAttrDocs, "gen-attr-docs",
|
||||
"Generate attribute documentation"),
|
||||
clEnumValN(GenDiagDocs, "gen-diag-docs",
|
||||
@@ -593,6 +605,15 @@ bool ClangTableGenMain(raw_ostream &OS, const RecordKeeper &Records) {
|
||||
case GenRISCVSiFiveVectorBuiltinSema:
|
||||
EmitRVVBuiltinSema(Records, OS);
|
||||
break;
|
||||
case GenRISCVAndesVectorBuiltins:
|
||||
EmitRVVBuiltins(Records, OS);
|
||||
break;
|
||||
case GenRISCVAndesVectorBuiltinCG:
|
||||
EmitRVVBuiltinCG(Records, OS);
|
||||
break;
|
||||
case GenRISCVAndesVectorBuiltinSema:
|
||||
EmitRVVBuiltinSema(Records, OS);
|
||||
break;
|
||||
case GenAttrDocs:
|
||||
EmitClangAttrDocs(Records, OS);
|
||||
break;
|
||||
|
||||
Reference in New Issue
Block a user