ISel: introduce vector ISD::LRINT, ISD::LLRINT; custom RISCV lowering (#66924)

The issue #55208 noticed that std::rint is vectorized by the
SLPVectorizer, but a very similar function, std::lrint, is not.
std::lrint corresponds to ISD::LRINT in the SelectionDAG, and
std::llrint is a familiar cousin corresponding to ISD::LLRINT. Now,
neither ISD::LRINT nor ISD::LLRINT have a corresponding vector variant,
and the LangRef makes this clear in the documentation of llvm.lrint.*
and llvm.llrint.*.

This patch extends the LangRef to include vector variants of
llvm.lrint.* and llvm.llrint.*, and lays the necessary ground-work of
scalarizing it for all targets. However, this patch would be devoid of
motivation unless we show the utility of these new vector variants.
Hence, the RISCV target has been chosen to implement a custom lowering
to the vfcvt.x.f.v instruction. The patch also includes a CostModel for
RISCV, and a trivial follow-up can potentially enable the SLPVectorizer
to vectorize std::lrint and std::llrint, fixing #55208.

The patch includes tests, obviously for the RISCV target, but also for
the X86, AArch64, and PowerPC targets to justify the addition of the
vector variants to the LangRef.
This commit is contained in:
Ramkumar Ramachandra
2023-10-19 13:05:04 +01:00
committed by GitHub
parent 3d7802d210
commit 98c90a13c6
21 changed files with 12200 additions and 15 deletions

View File

@@ -15760,7 +15760,8 @@ Syntax:
"""""""
This is an overloaded intrinsic. You can use ``llvm.lrint`` on any
floating-point type. Not all targets support all types however.
floating-point type or vector of floating-point type. Not all targets
support all types however.
::
@@ -15804,7 +15805,8 @@ Syntax:
"""""""
This is an overloaded intrinsic. You can use ``llvm.llrint`` on any
floating-point type. Not all targets support all types however.
floating-point type or vector of floating-point type. Not all targets
support all types however.
::

View File

@@ -1847,6 +1847,12 @@ public:
case Intrinsic::rint:
ISD = ISD::FRINT;
break;
case Intrinsic::lrint:
ISD = ISD::LRINT;
break;
case Intrinsic::llrint:
ISD = ISD::LLRINT;
break;
case Intrinsic::round:
ISD = ISD::FROUND;
break;

View File

@@ -505,6 +505,7 @@ namespace {
SDValue visitUINT_TO_FP(SDNode *N);
SDValue visitFP_TO_SINT(SDNode *N);
SDValue visitFP_TO_UINT(SDNode *N);
SDValue visitXRINT(SDNode *N);
SDValue visitFP_ROUND(SDNode *N);
SDValue visitFP_EXTEND(SDNode *N);
SDValue visitFNEG(SDNode *N);
@@ -1911,6 +1912,7 @@ void DAGCombiner::Run(CombineLevel AtLevel) {
}
SDValue DAGCombiner::visit(SDNode *N) {
// clang-format off
switch (N->getOpcode()) {
default: break;
case ISD::TokenFactor: return visitTokenFactor(N);
@@ -2011,6 +2013,8 @@ SDValue DAGCombiner::visit(SDNode *N) {
case ISD::UINT_TO_FP: return visitUINT_TO_FP(N);
case ISD::FP_TO_SINT: return visitFP_TO_SINT(N);
case ISD::FP_TO_UINT: return visitFP_TO_UINT(N);
case ISD::LRINT:
case ISD::LLRINT: return visitXRINT(N);
case ISD::FP_ROUND: return visitFP_ROUND(N);
case ISD::FP_EXTEND: return visitFP_EXTEND(N);
case ISD::FNEG: return visitFNEG(N);
@@ -2065,6 +2069,7 @@ SDValue DAGCombiner::visit(SDNode *N) {
#include "llvm/IR/VPIntrinsics.def"
return visitVPOp(N);
}
// clang-format on
return SDValue();
}
@@ -17480,6 +17485,21 @@ SDValue DAGCombiner::visitFP_TO_UINT(SDNode *N) {
return FoldIntToFPToInt(N, DAG);
}
SDValue DAGCombiner::visitXRINT(SDNode *N) {
SDValue N0 = N->getOperand(0);
EVT VT = N->getValueType(0);
// fold (lrint|llrint undef) -> undef
if (N0.isUndef())
return DAG.getUNDEF(VT);
// fold (lrint|llrint c1fp) -> c1
if (DAG.isConstantFPBuildVectorOrConstantFP(N0))
return DAG.getNode(N->getOpcode(), SDLoc(N), VT, N0);
return SDValue();
}
SDValue DAGCombiner::visitFP_ROUND(SDNode *N) {
SDValue N0 = N->getOperand(0);
SDValue N1 = N->getOperand(1);

View File

@@ -2198,6 +2198,7 @@ bool DAGTypeLegalizer::PromoteFloatOperand(SDNode *N, unsigned OpNo) {
// to use the promoted float operand. Nodes that produce at least one
// promotion-requiring floating point result have their operands legalized as
// a part of PromoteFloatResult.
// clang-format off
switch (N->getOpcode()) {
default:
#ifndef NDEBUG
@@ -2209,7 +2210,9 @@ bool DAGTypeLegalizer::PromoteFloatOperand(SDNode *N, unsigned OpNo) {
case ISD::BITCAST: R = PromoteFloatOp_BITCAST(N, OpNo); break;
case ISD::FCOPYSIGN: R = PromoteFloatOp_FCOPYSIGN(N, OpNo); break;
case ISD::FP_TO_SINT:
case ISD::FP_TO_UINT: R = PromoteFloatOp_FP_TO_XINT(N, OpNo); break;
case ISD::FP_TO_UINT:
case ISD::LRINT:
case ISD::LLRINT: R = PromoteFloatOp_UnaryOp(N, OpNo); break;
case ISD::FP_TO_SINT_SAT:
case ISD::FP_TO_UINT_SAT:
R = PromoteFloatOp_FP_TO_XINT_SAT(N, OpNo); break;
@@ -2218,6 +2221,7 @@ bool DAGTypeLegalizer::PromoteFloatOperand(SDNode *N, unsigned OpNo) {
case ISD::SETCC: R = PromoteFloatOp_SETCC(N, OpNo); break;
case ISD::STORE: R = PromoteFloatOp_STORE(N, OpNo); break;
}
// clang-format on
if (R.getNode())
ReplaceValueWith(SDValue(N, 0), R);
@@ -2251,7 +2255,7 @@ SDValue DAGTypeLegalizer::PromoteFloatOp_FCOPYSIGN(SDNode *N, unsigned OpNo) {
}
// Convert the promoted float value to the desired integer type
SDValue DAGTypeLegalizer::PromoteFloatOp_FP_TO_XINT(SDNode *N, unsigned OpNo) {
SDValue DAGTypeLegalizer::PromoteFloatOp_UnaryOp(SDNode *N, unsigned OpNo) {
SDValue Op = GetPromotedFloat(N->getOperand(0));
return DAG.getNode(N->getOpcode(), SDLoc(N), N->getValueType(0), Op);
}

View File

@@ -711,7 +711,7 @@ private:
SDValue PromoteFloatOp_BITCAST(SDNode *N, unsigned OpNo);
SDValue PromoteFloatOp_FCOPYSIGN(SDNode *N, unsigned OpNo);
SDValue PromoteFloatOp_FP_EXTEND(SDNode *N, unsigned OpNo);
SDValue PromoteFloatOp_FP_TO_XINT(SDNode *N, unsigned OpNo);
SDValue PromoteFloatOp_UnaryOp(SDNode *N, unsigned OpNo);
SDValue PromoteFloatOp_FP_TO_XINT_SAT(SDNode *N, unsigned OpNo);
SDValue PromoteFloatOp_STORE(SDNode *N, unsigned OpNo);
SDValue PromoteFloatOp_SELECT_CC(SDNode *N, unsigned OpNo);

View File

@@ -402,6 +402,8 @@ SDValue VectorLegalizer::LegalizeOp(SDValue Op) {
case ISD::FCEIL:
case ISD::FTRUNC:
case ISD::FRINT:
case ISD::LRINT:
case ISD::LLRINT:
case ISD::FNEARBYINT:
case ISD::FROUND:
case ISD::FROUNDEVEN:

View File

@@ -101,6 +101,8 @@ void DAGTypeLegalizer::ScalarizeVectorResult(SDNode *N, unsigned ResNo) {
case ISD::FP_TO_SINT:
case ISD::FP_TO_UINT:
case ISD::FRINT:
case ISD::LRINT:
case ISD::LLRINT:
case ISD::FROUND:
case ISD::FROUNDEVEN:
case ISD::FSIN:
@@ -681,6 +683,8 @@ bool DAGTypeLegalizer::ScalarizeVectorOperand(SDNode *N, unsigned OpNo) {
case ISD::FP_TO_UINT:
case ISD::SINT_TO_FP:
case ISD::UINT_TO_FP:
case ISD::LRINT:
case ISD::LLRINT:
Res = ScalarizeVecOp_UnaryOp(N);
break;
case ISD::STRICT_SINT_TO_FP:
@@ -1097,6 +1101,8 @@ void DAGTypeLegalizer::SplitVectorResult(SDNode *N, unsigned ResNo) {
case ISD::VP_FP_TO_UINT:
case ISD::FRINT:
case ISD::VP_FRINT:
case ISD::LRINT:
case ISD::LLRINT:
case ISD::FROUND:
case ISD::VP_FROUND:
case ISD::FROUNDEVEN:
@@ -2974,6 +2980,8 @@ bool DAGTypeLegalizer::SplitVectorOperand(SDNode *N, unsigned OpNo) {
case ISD::ZERO_EXTEND:
case ISD::ANY_EXTEND:
case ISD::FTRUNC:
case ISD::LRINT:
case ISD::LLRINT:
Res = SplitVecOp_UnaryOp(N);
break;
case ISD::FLDEXP:
@@ -4209,6 +4217,8 @@ void DAGTypeLegalizer::WidenVectorResult(SDNode *N, unsigned ResNo) {
case ISD::FLOG2:
case ISD::FNEARBYINT:
case ISD::FRINT:
case ISD::LRINT:
case ISD::LLRINT:
case ISD::FROUND:
case ISD::FROUNDEVEN:
case ISD::FSIN:
@@ -5958,7 +5968,11 @@ bool DAGTypeLegalizer::WidenVectorOperand(SDNode *N, unsigned OpNo) {
case ISD::STRICT_FSETCCS: Res = WidenVecOp_STRICT_FSETCC(N); break;
case ISD::VSELECT: Res = WidenVecOp_VSELECT(N); break;
case ISD::FLDEXP:
case ISD::FCOPYSIGN: Res = WidenVecOp_UnrollVectorOp(N); break;
case ISD::FCOPYSIGN:
case ISD::LRINT:
case ISD::LLRINT:
Res = WidenVecOp_UnrollVectorOp(N);
break;
case ISD::IS_FPCLASS: Res = WidenVecOp_IS_FPCLASS(N); break;
case ISD::ANY_EXTEND:

View File

@@ -5135,6 +5135,8 @@ bool SelectionDAG::isKnownNeverNaN(SDValue Op, bool SNaN, unsigned Depth) const
case ISD::FROUND:
case ISD::FROUNDEVEN:
case ISD::FRINT:
case ISD::LRINT:
case ISD::LLRINT:
case ISD::FNEARBYINT:
case ISD::FLDEXP: {
if (SNaN)

View File

@@ -873,13 +873,13 @@ void TargetLoweringBase::initActions() {
// These operations default to expand for vector types.
if (VT.isVector())
setOperationAction({ISD::FCOPYSIGN, ISD::SIGN_EXTEND_INREG,
ISD::ANY_EXTEND_VECTOR_INREG,
ISD::SIGN_EXTEND_VECTOR_INREG,
ISD::ZERO_EXTEND_VECTOR_INREG, ISD::SPLAT_VECTOR},
VT, Expand);
setOperationAction(
{ISD::FCOPYSIGN, ISD::SIGN_EXTEND_INREG, ISD::ANY_EXTEND_VECTOR_INREG,
ISD::SIGN_EXTEND_VECTOR_INREG, ISD::ZERO_EXTEND_VECTOR_INREG,
ISD::SPLAT_VECTOR, ISD::LRINT, ISD::LLRINT},
VT, Expand);
// Constrained floating-point operations default to expand.
// Constrained floating-point operations default to expand.
#define DAG_INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC, DAGN) \
setOperationAction(ISD::STRICT_##DAGN, VT, Expand);
#include "llvm/IR/ConstrainedOps.def"

View File

@@ -5669,10 +5669,28 @@ void Verifier::visitIntrinsicCall(Intrinsic::ID ID, CallBase &Call) {
}
break;
}
case Intrinsic::lround:
case Intrinsic::llround:
case Intrinsic::lrint:
case Intrinsic::llrint: {
Type *ValTy = Call.getArgOperand(0)->getType();
Type *ResultTy = Call.getType();
Check(
ValTy->isFPOrFPVectorTy() && ResultTy->isIntOrIntVectorTy(),
"llvm.lrint, llvm.llrint: argument must be floating-point or vector "
"of floating-points, and result must be integer or vector of integers",
&Call);
Check(ValTy->isVectorTy() == ResultTy->isVectorTy(),
"llvm.lrint, llvm.llrint: argument and result disagree on vector use",
&Call);
if (ValTy->isVectorTy()) {
Check(cast<VectorType>(ValTy)->getElementCount() ==
cast<VectorType>(ResultTy)->getElementCount(),
"llvm.lrint, llvm.llrint: argument must be same length as result",
&Call);
}
break;
}
case Intrinsic::lround:
case Intrinsic::llround: {
Type *ValTy = Call.getArgOperand(0)->getType();
Type *ResultTy = Call.getType();
Check(!ValTy->isVectorTy() && !ResultTy->isVectorTy(),

View File

@@ -731,7 +731,7 @@ RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM,
VT, Custom);
setOperationAction({ISD::FP_TO_SINT_SAT, ISD::FP_TO_UINT_SAT}, VT,
Custom);
setOperationAction({ISD::LRINT, ISD::LLRINT}, VT, Custom);
setOperationAction(
{ISD::SADDSAT, ISD::UADDSAT, ISD::SSUBSAT, ISD::USUBSAT}, VT, Legal);
@@ -2950,6 +2950,31 @@ lowerFTRUNC_FCEIL_FFLOOR_FROUND(SDValue Op, SelectionDAG &DAG,
DAG.getTargetConstant(FRM, DL, Subtarget.getXLenVT()));
}
// Expand vector LRINT and LLRINT by converting to the integer domain.
static SDValue lowerVectorXRINT(SDValue Op, SelectionDAG &DAG,
const RISCVSubtarget &Subtarget) {
MVT VT = Op.getSimpleValueType();
assert(VT.isVector() && "Unexpected type");
SDLoc DL(Op);
SDValue Src = Op.getOperand(0);
MVT ContainerVT = VT;
if (VT.isFixedLengthVector()) {
ContainerVT = getContainerForFixedLengthVector(DAG, VT, Subtarget);
Src = convertToScalableVector(ContainerVT, Src, DAG, Subtarget);
}
auto [Mask, VL] = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
SDValue Truncated =
DAG.getNode(RISCVISD::VFCVT_X_F_VL, DL, ContainerVT, Src, Mask, VL);
if (!VT.isFixedLengthVector())
return Truncated;
return convertFromScalableVector(VT, Truncated, DAG, Subtarget);
}
static SDValue
getVSlidedown(SelectionDAG &DAG, const RISCVSubtarget &Subtarget,
const SDLoc &DL, EVT VT, SDValue Merge, SDValue Op,
@@ -5978,6 +6003,9 @@ SDValue RISCVTargetLowering::LowerOperation(SDValue Op,
case ISD::FROUND:
case ISD::FROUNDEVEN:
return lowerFTRUNC_FCEIL_FFLOOR_FROUND(Op, DAG, Subtarget);
case ISD::LRINT:
case ISD::LLRINT:
return lowerVectorXRINT(Op, DAG, Subtarget);
case ISD::VECREDUCE_ADD:
case ISD::VECREDUCE_UMAX:
case ISD::VECREDUCE_SMAX:

View File

@@ -668,6 +668,31 @@ static const CostTblEntry VectorIntrinsicCostTable[]{
{Intrinsic::rint, MVT::nxv2f64, 7},
{Intrinsic::rint, MVT::nxv4f64, 7},
{Intrinsic::rint, MVT::nxv8f64, 7},
{Intrinsic::lrint, MVT::v2i32, 1},
{Intrinsic::lrint, MVT::v4i32, 1},
{Intrinsic::lrint, MVT::v8i32, 1},
{Intrinsic::lrint, MVT::v16i32, 1},
{Intrinsic::lrint, MVT::nxv1i32, 1},
{Intrinsic::lrint, MVT::nxv2i32, 1},
{Intrinsic::lrint, MVT::nxv4i32, 1},
{Intrinsic::lrint, MVT::nxv8i32, 1},
{Intrinsic::lrint, MVT::nxv16i32, 1},
{Intrinsic::lrint, MVT::v2i64, 1},
{Intrinsic::lrint, MVT::v4i64, 1},
{Intrinsic::lrint, MVT::v8i64, 1},
{Intrinsic::lrint, MVT::v16i64, 1},
{Intrinsic::lrint, MVT::nxv1i64, 1},
{Intrinsic::lrint, MVT::nxv2i64, 1},
{Intrinsic::lrint, MVT::nxv4i64, 1},
{Intrinsic::lrint, MVT::nxv8i64, 1},
{Intrinsic::llrint, MVT::v2i64, 1},
{Intrinsic::llrint, MVT::v4i64, 1},
{Intrinsic::llrint, MVT::v8i64, 1},
{Intrinsic::llrint, MVT::v16i64, 1},
{Intrinsic::llrint, MVT::nxv1i64, 1},
{Intrinsic::llrint, MVT::nxv2i64, 1},
{Intrinsic::llrint, MVT::nxv4i64, 1},
{Intrinsic::llrint, MVT::nxv8i64, 1},
{Intrinsic::nearbyint, MVT::v2f32, 9},
{Intrinsic::nearbyint, MVT::v4f32, 9},
{Intrinsic::nearbyint, MVT::v8f32, 9},
@@ -1051,6 +1076,8 @@ RISCVTTIImpl::getIntrinsicInstrCost(const IntrinsicCostAttributes &ICA,
case Intrinsic::floor:
case Intrinsic::trunc:
case Intrinsic::rint:
case Intrinsic::lrint:
case Intrinsic::llrint:
case Intrinsic::round:
case Intrinsic::roundeven: {
// These all use the same code.

View File

@@ -181,6 +181,96 @@ define void @rint() {
ret void
}
define void @lrint() {
; CHECK-LABEL: 'lrint'
; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %1 = call i64 @llvm.lrint.i64.f32(float undef)
; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %2 = call <2 x i64> @llvm.lrint.v2i64.v2f32(<2 x float> undef)
; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %3 = call <4 x i64> @llvm.lrint.v4i64.v4f32(<4 x float> undef)
; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %4 = call <8 x i64> @llvm.lrint.v8i64.v8f32(<8 x float> undef)
; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %5 = call <16 x i64> @llvm.lrint.v16i64.v16f32(<16 x float> undef)
; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %6 = call <vscale x 1 x i64> @llvm.lrint.nxv1i64.nxv1f32(<vscale x 1 x float> undef)
; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %7 = call <vscale x 2 x i64> @llvm.lrint.nxv2i64.nxv2f32(<vscale x 2 x float> undef)
; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %8 = call <vscale x 4 x i64> @llvm.lrint.nxv4i64.nxv4f32(<vscale x 4 x float> undef)
; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %9 = call <vscale x 8 x i64> @llvm.lrint.nxv8i64.nxv8f32(<vscale x 8 x float> undef)
; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %10 = call <vscale x 16 x i64> @llvm.lrint.nxv16i64.nxv16f32(<vscale x 16 x float> undef)
; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %11 = call i64 @llvm.lrint.i64.f64(double undef)
; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %12 = call <2 x i64> @llvm.lrint.v2i64.v2f64(<2 x double> undef)
; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %13 = call <4 x i64> @llvm.lrint.v4i64.v4f64(<4 x double> undef)
; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %14 = call <8 x i64> @llvm.lrint.v8i64.v8f64(<8 x double> undef)
; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %15 = call <16 x i64> @llvm.lrint.v16i64.v16f64(<16 x double> undef)
; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %16 = call <vscale x 1 x i64> @llvm.lrint.nxv1i64.nxv1f64(<vscale x 1 x double> undef)
; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %17 = call <vscale x 2 x i64> @llvm.lrint.nxv2i64.nxv2f64(<vscale x 2 x double> undef)
; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %18 = call <vscale x 4 x i64> @llvm.lrint.nxv4i64.nxv4f64(<vscale x 4 x double> undef)
; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %19 = call <vscale x 8 x i64> @llvm.lrint.nxv8i64.nxv8f64(<vscale x 8 x double> undef)
; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void
;
call i64 @llvm.lrint.i64.f32(float undef)
call <2 x i64> @llvm.lrint.v2i64.v2f32(<2 x float> undef)
call <4 x i64> @llvm.lrint.v4i64.v4f32(<4 x float> undef)
call <8 x i64> @llvm.lrint.v8i64.v8f32(<8 x float> undef)
call <16 x i64> @llvm.lrint.v16i64.v16f32(<16 x float> undef)
call <vscale x 1 x i64> @llvm.lrint.nvx1i64.nvx1f32(<vscale x 1 x float> undef)
call <vscale x 2 x i64> @llvm.lrint.nvx2i64.nvx2f32(<vscale x 2 x float> undef)
call <vscale x 4 x i64> @llvm.lrint.nvx4i64.nvx4f32(<vscale x 4 x float> undef)
call <vscale x 8 x i64> @llvm.lrint.nvx8i64.nvx8f32(<vscale x 8 x float> undef)
call <vscale x 16 x i64> @llvm.lrint.nvx16i64.nvx16f32(<vscale x 16 x float> undef)
call i64 @llvm.lrint.i64.f64(double undef)
call <2 x i64> @llvm.lrint.v2i64.v2f64(<2 x double> undef)
call <4 x i64> @llvm.lrint.v4i64.v4f64(<4 x double> undef)
call <8 x i64> @llvm.lrint.v8i64.v8f64(<8 x double> undef)
call <16 x i64> @llvm.lrint.v16i64.v16f64(<16 x double> undef)
call <vscale x 1 x i64> @llvm.lrint.nvx1i64.nvx1f64(<vscale x 1 x double> undef)
call <vscale x 2 x i64> @llvm.lrint.nvx2i64.nvx2f64(<vscale x 2 x double> undef)
call <vscale x 4 x i64> @llvm.lrint.nvx4i64.nvx4f64(<vscale x 4 x double> undef)
call <vscale x 8 x i64> @llvm.lrint.nvx8i64.nvx8f64(<vscale x 8 x double> undef)
ret void
}
define void @llrint() {
; CHECK-LABEL: 'llrint'
; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %1 = call i64 @llvm.llrint.i64.f32(float undef)
; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %2 = call <2 x i64> @llvm.llrint.v2i64.v2f32(<2 x float> undef)
; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %3 = call <4 x i64> @llvm.llrint.v4i64.v4f32(<4 x float> undef)
; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %4 = call <8 x i64> @llvm.llrint.v8i64.v8f32(<8 x float> undef)
; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %5 = call <16 x i64> @llvm.llrint.v16i64.v16f32(<16 x float> undef)
; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %6 = call <vscale x 1 x i64> @llvm.llrint.nxv1i64.nxv1f32(<vscale x 1 x float> undef)
; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %7 = call <vscale x 2 x i64> @llvm.llrint.nxv2i64.nxv2f32(<vscale x 2 x float> undef)
; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %8 = call <vscale x 4 x i64> @llvm.llrint.nxv4i64.nxv4f32(<vscale x 4 x float> undef)
; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %9 = call <vscale x 8 x i64> @llvm.llrint.nxv8i64.nxv8f32(<vscale x 8 x float> undef)
; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %10 = call <vscale x 16 x i64> @llvm.llrint.nxv16i64.nxv16f32(<vscale x 16 x float> undef)
; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %11 = call i64 @llvm.llrint.i64.f64(double undef)
; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %12 = call <2 x i64> @llvm.llrint.v2i64.v2f64(<2 x double> undef)
; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %13 = call <4 x i64> @llvm.llrint.v4i64.v4f64(<4 x double> undef)
; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %14 = call <8 x i64> @llvm.llrint.v8i64.v8f64(<8 x double> undef)
; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %15 = call <16 x i64> @llvm.llrint.v16i64.v16f64(<16 x double> undef)
; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %16 = call <vscale x 1 x i64> @llvm.llrint.nxv1i64.nxv1f64(<vscale x 1 x double> undef)
; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %17 = call <vscale x 2 x i64> @llvm.llrint.nxv2i64.nxv2f64(<vscale x 2 x double> undef)
; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %18 = call <vscale x 4 x i64> @llvm.llrint.nxv4i64.nxv4f64(<vscale x 4 x double> undef)
; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %19 = call <vscale x 8 x i64> @llvm.llrint.nxv8i64.nxv8f64(<vscale x 8 x double> undef)
; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void
;
call i64 @llvm.llrint.i64.f32(float undef)
call <2 x i64> @llvm.llrint.v2i64.v2f32(<2 x float> undef)
call <4 x i64> @llvm.llrint.v4i64.v4f32(<4 x float> undef)
call <8 x i64> @llvm.llrint.v8i64.v8f32(<8 x float> undef)
call <16 x i64> @llvm.llrint.v16i64.v16f32(<16 x float> undef)
call <vscale x 1 x i64> @llvm.llrint.nvx1i64.nvx1f32(<vscale x 1 x float> undef)
call <vscale x 2 x i64> @llvm.llrint.nvx2i64.nvx2f32(<vscale x 2 x float> undef)
call <vscale x 4 x i64> @llvm.llrint.nvx4i64.nvx4f32(<vscale x 4 x float> undef)
call <vscale x 8 x i64> @llvm.llrint.nvx8i64.nvx8f32(<vscale x 8 x float> undef)
call <vscale x 16 x i64> @llvm.llrint.nvx16i64.nvx16f32(<vscale x 16 x float> undef)
call i64 @llvm.llrint.i64.f64(double undef)
call <2 x i64> @llvm.llrint.v2i64.v2f64(<2 x double> undef)
call <4 x i64> @llvm.llrint.v4i64.v4f64(<4 x double> undef)
call <8 x i64> @llvm.llrint.v8i64.v8f64(<8 x double> undef)
call <16 x i64> @llvm.llrint.v16i64.v16f64(<16 x double> undef)
call <vscale x 1 x i64> @llvm.llrint.nvx1i64.nvx1f64(<vscale x 1 x double> undef)
call <vscale x 2 x i64> @llvm.llrint.nvx2i64.nvx2f64(<vscale x 2 x double> undef)
call <vscale x 4 x i64> @llvm.llrint.nvx4i64.nvx4f64(<vscale x 4 x double> undef)
call <vscale x 8 x i64> @llvm.llrint.nvx8i64.nvx8f64(<vscale x 8 x double> undef)
ret void
}
define void @nearbyint() {
; CHECK-LABEL: 'nearbyint'
; CHECK-NEXT: Cost Model: Found an estimated cost of 10 for instruction: %1 = call float @llvm.nearbyint.f32(float undef)
@@ -683,6 +773,46 @@ declare <vscale x 2 x double> @llvm.rint.nvx2f64(<vscale x 2 x double>)
declare <vscale x 4 x double> @llvm.rint.nvx4f64(<vscale x 4 x double>)
declare <vscale x 8 x double> @llvm.rint.nvx8f64(<vscale x 8 x double>)
declare i64 @llvm.lrint.i64.f32(float)
declare <2 x i64> @llvm.lrint.v2i64.v2f32(<2 x float>)
declare <4 x i64> @llvm.lrint.v4i64.v4f32(<4 x float>)
declare <8 x i64> @llvm.lrint.v8i64.v8f32(<8 x float>)
declare <16 x i64> @llvm.lrint.v16i64.v16f32(<16 x float>)
declare <vscale x 1 x i64> @llvm.lrint.nvx1i64.nvx1f32(<vscale x 1 x float>)
declare <vscale x 2 x i64> @llvm.lrint.nvx2i64.nvx2f32(<vscale x 2 x float>)
declare <vscale x 4 x i64> @llvm.lrint.nvx4i64.nvx4f32(<vscale x 4 x float>)
declare <vscale x 8 x i64> @llvm.lrint.nvx8i64.nvx8f32(<vscale x 8 x float>)
declare <vscale x 16 x i64> @llvm.lrint.nvx16i64.nvx16f32(<vscale x 16 x float>)
declare i64 @llvm.lrint.i64.f64(double)
declare <2 x i64> @llvm.lrint.v2i64.v2f64(<2 x double>)
declare <4 x i64> @llvm.lrint.v4i64.v4f64(<4 x double>)
declare <8 x i64> @llvm.lrint.v8i64.v8f64(<8 x double>)
declare <16 x i64> @llvm.lrint.v16i64.v16f64(<16 x double>)
declare <vscale x 1 x i64> @llvm.lrint.nvx1i64.nvx1f64(<vscale x 1 x double>)
declare <vscale x 2 x i64> @llvm.lrint.nvx2i64.nvx2f64(<vscale x 2 x double>)
declare <vscale x 4 x i64> @llvm.lrint.nvx4i64.nvx4f64(<vscale x 4 x double>)
declare <vscale x 8 x i64> @llvm.lrint.nvx8i64.nvx8f64(<vscale x 8 x double>)
declare i64 @llvm.llrint.i64.f32(float)
declare <2 x i64> @llvm.llrint.v2i64.v2f32(<2 x float>)
declare <4 x i64> @llvm.llrint.v4i64.v4f32(<4 x float>)
declare <8 x i64> @llvm.llrint.v8i64.v8f32(<8 x float>)
declare <16 x i64> @llvm.llrint.v16i64.v16f32(<16 x float>)
declare <vscale x 1 x i64> @llvm.llrint.nvx1i64.nvx1f32(<vscale x 1 x float>)
declare <vscale x 2 x i64> @llvm.llrint.nvx2i64.nvx2f32(<vscale x 2 x float>)
declare <vscale x 4 x i64> @llvm.llrint.nvx4i64.nvx4f32(<vscale x 4 x float>)
declare <vscale x 8 x i64> @llvm.llrint.nvx8i64.nvx8f32(<vscale x 8 x float>)
declare <vscale x 16 x i64> @llvm.llrint.nvx16i64.nvx16f32(<vscale x 16 x float>)
declare i64 @llvm.llrint.i64.f64(double)
declare <2 x i64> @llvm.llrint.v2i64.v2f64(<2 x double>)
declare <4 x i64> @llvm.llrint.v4i64.v4f64(<4 x double>)
declare <8 x i64> @llvm.llrint.v8i64.v8f64(<8 x double>)
declare <16 x i64> @llvm.llrint.v16i64.v16f64(<16 x double>)
declare <vscale x 1 x i64> @llvm.llrint.nvx1i64.nvx1f64(<vscale x 1 x double>)
declare <vscale x 2 x i64> @llvm.llrint.nvx2i64.nvx2f64(<vscale x 2 x double>)
declare <vscale x 4 x i64> @llvm.llrint.nvx4i64.nvx4f64(<vscale x 4 x double>)
declare <vscale x 8 x i64> @llvm.llrint.nvx8i64.nvx8f64(<vscale x 8 x double>)
declare float @llvm.nearbyint.f32(float)
declare <2 x float> @llvm.nearbyint.v2f32(<2 x float>)
declare <4 x float> @llvm.nearbyint.v4f32(<4 x float>)

View File

@@ -0,0 +1,621 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc < %s -mtriple=aarch64 -mattr=+neon | FileCheck %s
define <1 x i64> @llrint_v1i64_v1f16(<1 x half> %x) {
; CHECK-LABEL: llrint_v1i64_v1f16:
; CHECK: // %bb.0:
; CHECK-NEXT: fcvt s0, h0
; CHECK-NEXT: frintx s0, s0
; CHECK-NEXT: fcvtzs x8, s0
; CHECK-NEXT: fmov d0, x8
; CHECK-NEXT: ret
%a = call <1 x i64> @llvm.llrint.v1i64.v1f16(<1 x half> %x)
ret <1 x i64> %a
}
declare <1 x i64> @llvm.llrint.v1i64.v1f16(<1 x half>)
define <2 x i64> @llrint_v1i64_v2f16(<2 x half> %x) {
; CHECK-LABEL: llrint_v1i64_v2f16:
; CHECK: // %bb.0:
; CHECK-NEXT: // kill: def $d0 killed $d0 def $q0
; CHECK-NEXT: mov h1, v0.h[1]
; CHECK-NEXT: fcvt s0, h0
; CHECK-NEXT: fcvt s1, h1
; CHECK-NEXT: frintx s0, s0
; CHECK-NEXT: frintx s1, s1
; CHECK-NEXT: fcvtzs x8, s0
; CHECK-NEXT: fcvtzs x9, s1
; CHECK-NEXT: fmov d0, x8
; CHECK-NEXT: mov v0.d[1], x9
; CHECK-NEXT: ret
%a = call <2 x i64> @llvm.llrint.v2i64.v2f16(<2 x half> %x)
ret <2 x i64> %a
}
declare <2 x i64> @llvm.llrint.v2i64.v2f16(<2 x half>)
define <4 x i64> @llrint_v4i64_v4f16(<4 x half> %x) {
; CHECK-LABEL: llrint_v4i64_v4f16:
; CHECK: // %bb.0:
; CHECK-NEXT: // kill: def $d0 killed $d0 def $q0
; CHECK-NEXT: mov h1, v0.h[2]
; CHECK-NEXT: mov h2, v0.h[1]
; CHECK-NEXT: mov h3, v0.h[3]
; CHECK-NEXT: fcvt s0, h0
; CHECK-NEXT: fcvt s1, h1
; CHECK-NEXT: fcvt s2, h2
; CHECK-NEXT: fcvt s3, h3
; CHECK-NEXT: frintx s0, s0
; CHECK-NEXT: frintx s1, s1
; CHECK-NEXT: frintx s2, s2
; CHECK-NEXT: frintx s3, s3
; CHECK-NEXT: fcvtzs x8, s0
; CHECK-NEXT: fcvtzs x9, s1
; CHECK-NEXT: fcvtzs x10, s2
; CHECK-NEXT: fcvtzs x11, s3
; CHECK-NEXT: fmov d0, x8
; CHECK-NEXT: fmov d1, x9
; CHECK-NEXT: mov v0.d[1], x10
; CHECK-NEXT: mov v1.d[1], x11
; CHECK-NEXT: ret
%a = call <4 x i64> @llvm.llrint.v4i64.v4f16(<4 x half> %x)
ret <4 x i64> %a
}
declare <4 x i64> @llvm.llrint.v4i64.v4f16(<4 x half>)
define <8 x i64> @llrint_v8i64_v8f16(<8 x half> %x) {
; CHECK-LABEL: llrint_v8i64_v8f16:
; CHECK: // %bb.0:
; CHECK-NEXT: ext v1.16b, v0.16b, v0.16b, #8
; CHECK-NEXT: mov h4, v0.h[2]
; CHECK-NEXT: mov h3, v0.h[1]
; CHECK-NEXT: mov h7, v0.h[3]
; CHECK-NEXT: fcvt s0, h0
; CHECK-NEXT: mov h2, v1.h[2]
; CHECK-NEXT: mov h5, v1.h[1]
; CHECK-NEXT: mov h6, v1.h[3]
; CHECK-NEXT: fcvt s1, h1
; CHECK-NEXT: fcvt s4, h4
; CHECK-NEXT: fcvt s3, h3
; CHECK-NEXT: fcvt s7, h7
; CHECK-NEXT: frintx s0, s0
; CHECK-NEXT: fcvt s2, h2
; CHECK-NEXT: fcvt s5, h5
; CHECK-NEXT: fcvt s6, h6
; CHECK-NEXT: frintx s1, s1
; CHECK-NEXT: frintx s4, s4
; CHECK-NEXT: frintx s3, s3
; CHECK-NEXT: frintx s7, s7
; CHECK-NEXT: fcvtzs x9, s0
; CHECK-NEXT: frintx s2, s2
; CHECK-NEXT: frintx s5, s5
; CHECK-NEXT: frintx s6, s6
; CHECK-NEXT: fcvtzs x8, s1
; CHECK-NEXT: fcvtzs x12, s4
; CHECK-NEXT: fcvtzs x11, s3
; CHECK-NEXT: fcvtzs x15, s7
; CHECK-NEXT: fmov d0, x9
; CHECK-NEXT: fcvtzs x10, s2
; CHECK-NEXT: fcvtzs x13, s5
; CHECK-NEXT: fcvtzs x14, s6
; CHECK-NEXT: fmov d2, x8
; CHECK-NEXT: fmov d1, x12
; CHECK-NEXT: mov v0.d[1], x11
; CHECK-NEXT: fmov d3, x10
; CHECK-NEXT: mov v2.d[1], x13
; CHECK-NEXT: mov v1.d[1], x15
; CHECK-NEXT: mov v3.d[1], x14
; CHECK-NEXT: ret
%a = call <8 x i64> @llvm.llrint.v8i64.v8f16(<8 x half> %x)
ret <8 x i64> %a
}
declare <8 x i64> @llvm.llrint.v8i64.v8f16(<8 x half>)
define <16 x i64> @llrint_v16i64_v16f16(<16 x half> %x) {
; CHECK-LABEL: llrint_v16i64_v16f16:
; CHECK: // %bb.0:
; CHECK-NEXT: ext v2.16b, v0.16b, v0.16b, #8
; CHECK-NEXT: ext v3.16b, v1.16b, v1.16b, #8
; CHECK-NEXT: mov h17, v0.h[1]
; CHECK-NEXT: mov h19, v0.h[2]
; CHECK-NEXT: fcvt s18, h0
; CHECK-NEXT: mov h0, v0.h[3]
; CHECK-NEXT: mov h4, v2.h[1]
; CHECK-NEXT: mov h5, v2.h[2]
; CHECK-NEXT: fcvt s7, h3
; CHECK-NEXT: fcvt s6, h2
; CHECK-NEXT: mov h16, v3.h[2]
; CHECK-NEXT: mov h2, v2.h[3]
; CHECK-NEXT: fcvt s17, h17
; CHECK-NEXT: fcvt s19, h19
; CHECK-NEXT: frintx s18, s18
; CHECK-NEXT: fcvt s0, h0
; CHECK-NEXT: fcvt s4, h4
; CHECK-NEXT: fcvt s5, h5
; CHECK-NEXT: frintx s7, s7
; CHECK-NEXT: frintx s6, s6
; CHECK-NEXT: fcvt s16, h16
; CHECK-NEXT: fcvt s2, h2
; CHECK-NEXT: frintx s17, s17
; CHECK-NEXT: frintx s19, s19
; CHECK-NEXT: fcvtzs x13, s18
; CHECK-NEXT: frintx s0, s0
; CHECK-NEXT: frintx s4, s4
; CHECK-NEXT: frintx s5, s5
; CHECK-NEXT: fcvtzs x9, s7
; CHECK-NEXT: mov h7, v1.h[2]
; CHECK-NEXT: fcvtzs x8, s6
; CHECK-NEXT: mov h6, v1.h[1]
; CHECK-NEXT: frintx s16, s16
; CHECK-NEXT: fcvtzs x14, s17
; CHECK-NEXT: fcvtzs x15, s19
; CHECK-NEXT: fcvtzs x10, s4
; CHECK-NEXT: mov h4, v3.h[1]
; CHECK-NEXT: fcvtzs x11, s5
; CHECK-NEXT: mov h5, v1.h[3]
; CHECK-NEXT: mov h3, v3.h[3]
; CHECK-NEXT: fcvt s1, h1
; CHECK-NEXT: fcvt s7, h7
; CHECK-NEXT: fcvt s6, h6
; CHECK-NEXT: fcvtzs x12, s16
; CHECK-NEXT: frintx s16, s2
; CHECK-NEXT: fmov d2, x8
; CHECK-NEXT: fcvt s4, h4
; CHECK-NEXT: fcvt s3, h3
; CHECK-NEXT: fcvt s5, h5
; CHECK-NEXT: frintx s1, s1
; CHECK-NEXT: frintx s7, s7
; CHECK-NEXT: frintx s17, s6
; CHECK-NEXT: fmov d6, x9
; CHECK-NEXT: mov v2.d[1], x10
; CHECK-NEXT: frintx s4, s4
; CHECK-NEXT: frintx s18, s3
; CHECK-NEXT: frintx s5, s5
; CHECK-NEXT: fcvtzs x8, s1
; CHECK-NEXT: fcvtzs x9, s7
; CHECK-NEXT: fmov d3, x11
; CHECK-NEXT: fcvtzs x11, s0
; CHECK-NEXT: fmov d7, x12
; CHECK-NEXT: fcvtzs x12, s16
; CHECK-NEXT: fcvtzs x16, s17
; CHECK-NEXT: fcvtzs x17, s4
; CHECK-NEXT: fmov d0, x13
; CHECK-NEXT: fmov d1, x15
; CHECK-NEXT: fcvtzs x18, s18
; CHECK-NEXT: fcvtzs x0, s5
; CHECK-NEXT: fmov d4, x8
; CHECK-NEXT: fmov d5, x9
; CHECK-NEXT: mov v0.d[1], x14
; CHECK-NEXT: mov v1.d[1], x11
; CHECK-NEXT: mov v3.d[1], x12
; CHECK-NEXT: mov v4.d[1], x16
; CHECK-NEXT: mov v6.d[1], x17
; CHECK-NEXT: mov v7.d[1], x18
; CHECK-NEXT: mov v5.d[1], x0
; CHECK-NEXT: ret
%a = call <16 x i64> @llvm.llrint.v16i64.v16f16(<16 x half> %x)
ret <16 x i64> %a
}
declare <16 x i64> @llvm.llrint.v16i64.v16f16(<16 x half>)
define <32 x i64> @llrint_v32i64_v32f16(<32 x half> %x) {
; CHECK-LABEL: llrint_v32i64_v32f16:
; CHECK: // %bb.0:
; CHECK-NEXT: ext v4.16b, v1.16b, v1.16b, #8
; CHECK-NEXT: ext v5.16b, v2.16b, v2.16b, #8
; CHECK-NEXT: ext v6.16b, v3.16b, v3.16b, #8
; CHECK-NEXT: ext v7.16b, v0.16b, v0.16b, #8
; CHECK-NEXT: mov h19, v0.h[1]
; CHECK-NEXT: fcvt s21, h0
; CHECK-NEXT: mov h23, v1.h[2]
; CHECK-NEXT: fcvt s22, h1
; CHECK-NEXT: fcvt s26, h2
; CHECK-NEXT: mov h27, v2.h[1]
; CHECK-NEXT: mov h28, v2.h[2]
; CHECK-NEXT: mov h16, v4.h[2]
; CHECK-NEXT: fcvt s17, h5
; CHECK-NEXT: mov h18, v5.h[2]
; CHECK-NEXT: mov h20, v6.h[2]
; CHECK-NEXT: fcvt s24, h7
; CHECK-NEXT: fcvt s25, h6
; CHECK-NEXT: fcvt s19, h19
; CHECK-NEXT: frintx s22, s22
; CHECK-NEXT: fcvt s16, h16
; CHECK-NEXT: frintx s17, s17
; CHECK-NEXT: fcvt s18, h18
; CHECK-NEXT: fcvt s20, h20
; CHECK-NEXT: frintx s16, s16
; CHECK-NEXT: fcvtzs x12, s17
; CHECK-NEXT: frintx s17, s18
; CHECK-NEXT: frintx s18, s21
; CHECK-NEXT: fcvt s21, h23
; CHECK-NEXT: frintx s23, s24
; CHECK-NEXT: frintx s24, s25
; CHECK-NEXT: frintx s25, s19
; CHECK-NEXT: mov h19, v7.h[1]
; CHECK-NEXT: fcvtzs x13, s16
; CHECK-NEXT: frintx s16, s20
; CHECK-NEXT: frintx s20, s26
; CHECK-NEXT: fcvtzs x9, s23
; CHECK-NEXT: mov h23, v3.h[2]
; CHECK-NEXT: fcvt s26, h27
; CHECK-NEXT: fcvtzs x15, s24
; CHECK-NEXT: fcvtzs x10, s25
; CHECK-NEXT: fcvt s24, h28
; CHECK-NEXT: mov h25, v3.h[3]
; CHECK-NEXT: fcvtzs x14, s17
; CHECK-NEXT: frintx s21, s21
; CHECK-NEXT: fmov d17, x12
; CHECK-NEXT: fcvtzs x12, s16
; CHECK-NEXT: fmov d16, x13
; CHECK-NEXT: fcvtzs x13, s22
; CHECK-NEXT: fcvt s22, h3
; CHECK-NEXT: mov h3, v3.h[1]
; CHECK-NEXT: mov h27, v0.h[2]
; CHECK-NEXT: mov h28, v2.h[3]
; CHECK-NEXT: fcvt s23, h23
; CHECK-NEXT: frintx s26, s26
; CHECK-NEXT: fcvtzs x16, s20
; CHECK-NEXT: frintx s20, s24
; CHECK-NEXT: fcvt s24, h25
; CHECK-NEXT: fcvtzs x11, s18
; CHECK-NEXT: fmov d18, x14
; CHECK-NEXT: fcvtzs x14, s21
; CHECK-NEXT: frintx s22, s22
; CHECK-NEXT: fcvt s3, h3
; CHECK-NEXT: fcvt s25, h27
; CHECK-NEXT: fcvt s27, h28
; CHECK-NEXT: frintx s23, s23
; CHECK-NEXT: mov h21, v1.h[3]
; CHECK-NEXT: fmov d2, x15
; CHECK-NEXT: fcvtzs x15, s26
; CHECK-NEXT: fmov d26, x13
; CHECK-NEXT: mov h1, v1.h[1]
; CHECK-NEXT: fcvtzs x13, s20
; CHECK-NEXT: frintx s20, s24
; CHECK-NEXT: fmov d24, x14
; CHECK-NEXT: fcvtzs x14, s22
; CHECK-NEXT: frintx s3, s3
; CHECK-NEXT: fmov d22, x16
; CHECK-NEXT: frintx s27, s27
; CHECK-NEXT: fcvtzs x16, s23
; CHECK-NEXT: fcvt s21, h21
; CHECK-NEXT: frintx s25, s25
; CHECK-NEXT: fcvt s1, h1
; CHECK-NEXT: mov h0, v0.h[3]
; CHECK-NEXT: mov h23, v7.h[2]
; CHECK-NEXT: mov v22.d[1], x15
; CHECK-NEXT: fcvtzs x15, s20
; CHECK-NEXT: fmov d20, x13
; CHECK-NEXT: fcvtzs x13, s3
; CHECK-NEXT: fmov d3, x14
; CHECK-NEXT: fcvtzs x14, s27
; CHECK-NEXT: fmov d27, x16
; CHECK-NEXT: frintx s21, s21
; CHECK-NEXT: mov h7, v7.h[3]
; CHECK-NEXT: frintx s1, s1
; CHECK-NEXT: fcvt s0, h0
; CHECK-NEXT: fcvt s23, h23
; CHECK-NEXT: fcvt s19, h19
; CHECK-NEXT: mov v27.d[1], x15
; CHECK-NEXT: fcvtzs x15, s25
; CHECK-NEXT: mov h25, v6.h[3]
; CHECK-NEXT: mov h6, v6.h[1]
; CHECK-NEXT: mov v3.d[1], x13
; CHECK-NEXT: fcvtzs x13, s21
; CHECK-NEXT: mov h21, v5.h[1]
; CHECK-NEXT: mov h5, v5.h[3]
; CHECK-NEXT: mov v20.d[1], x14
; CHECK-NEXT: fcvtzs x14, s1
; CHECK-NEXT: mov h1, v4.h[1]
; CHECK-NEXT: frintx s0, s0
; CHECK-NEXT: fcvt s25, h25
; CHECK-NEXT: fcvt s7, h7
; CHECK-NEXT: stp q3, q27, [x8, #192]
; CHECK-NEXT: fcvt s6, h6
; CHECK-NEXT: mov h3, v4.h[3]
; CHECK-NEXT: stp q22, q20, [x8, #128]
; CHECK-NEXT: fcvt s21, h21
; CHECK-NEXT: fcvt s5, h5
; CHECK-NEXT: mov v24.d[1], x13
; CHECK-NEXT: mov v26.d[1], x14
; CHECK-NEXT: fcvt s4, h4
; CHECK-NEXT: frintx s22, s25
; CHECK-NEXT: fmov d20, x12
; CHECK-NEXT: fcvt s1, h1
; CHECK-NEXT: frintx s6, s6
; CHECK-NEXT: fcvt s3, h3
; CHECK-NEXT: fcvtzs x12, s0
; CHECK-NEXT: frintx s5, s5
; CHECK-NEXT: frintx s21, s21
; CHECK-NEXT: fmov d0, x11
; CHECK-NEXT: stp q26, q24, [x8, #64]
; CHECK-NEXT: fmov d24, x15
; CHECK-NEXT: frintx s4, s4
; CHECK-NEXT: fcvtzs x11, s22
; CHECK-NEXT: frintx s22, s23
; CHECK-NEXT: frintx s1, s1
; CHECK-NEXT: fcvtzs x13, s6
; CHECK-NEXT: frintx s3, s3
; CHECK-NEXT: frintx s6, s7
; CHECK-NEXT: fcvtzs x14, s5
; CHECK-NEXT: mov v24.d[1], x12
; CHECK-NEXT: frintx s5, s19
; CHECK-NEXT: fcvtzs x12, s21
; CHECK-NEXT: mov v0.d[1], x10
; CHECK-NEXT: fcvtzs x10, s4
; CHECK-NEXT: mov v20.d[1], x11
; CHECK-NEXT: fcvtzs x11, s22
; CHECK-NEXT: mov v2.d[1], x13
; CHECK-NEXT: fcvtzs x15, s3
; CHECK-NEXT: fcvtzs x13, s1
; CHECK-NEXT: mov v18.d[1], x14
; CHECK-NEXT: fcvtzs x14, s6
; CHECK-NEXT: stp q0, q24, [x8]
; CHECK-NEXT: mov v17.d[1], x12
; CHECK-NEXT: fcvtzs x12, s5
; CHECK-NEXT: fmov d0, x10
; CHECK-NEXT: fmov d1, x11
; CHECK-NEXT: stp q2, q20, [x8, #224]
; CHECK-NEXT: fmov d2, x9
; CHECK-NEXT: mov v16.d[1], x15
; CHECK-NEXT: stp q17, q18, [x8, #160]
; CHECK-NEXT: mov v0.d[1], x13
; CHECK-NEXT: mov v1.d[1], x14
; CHECK-NEXT: mov v2.d[1], x12
; CHECK-NEXT: stp q0, q16, [x8, #96]
; CHECK-NEXT: stp q2, q1, [x8, #32]
; CHECK-NEXT: ret
%a = call <32 x i64> @llvm.llrint.v32i64.v32f16(<32 x half> %x)
ret <32 x i64> %a
}
declare <32 x i64> @llvm.llrint.v32i64.v32f16(<32 x half>)
define <1 x i64> @llrint_v1i64_v1f32(<1 x float> %x) {
; CHECK-LABEL: llrint_v1i64_v1f32:
; CHECK: // %bb.0:
; CHECK-NEXT: // kill: def $d0 killed $d0 def $q0
; CHECK-NEXT: frintx s0, s0
; CHECK-NEXT: fcvtzs x8, s0
; CHECK-NEXT: fmov d0, x8
; CHECK-NEXT: ret
%a = call <1 x i64> @llvm.llrint.v1i64.v1f32(<1 x float> %x)
ret <1 x i64> %a
}
declare <1 x i64> @llvm.llrint.v1i64.v1f32(<1 x float>)
define <2 x i64> @llrint_v2i64_v2f32(<2 x float> %x) {
; CHECK-LABEL: llrint_v2i64_v2f32:
; CHECK: // %bb.0:
; CHECK-NEXT: // kill: def $d0 killed $d0 def $q0
; CHECK-NEXT: mov s1, v0.s[1]
; CHECK-NEXT: frintx s0, s0
; CHECK-NEXT: frintx s1, s1
; CHECK-NEXT: fcvtzs x8, s0
; CHECK-NEXT: fcvtzs x9, s1
; CHECK-NEXT: fmov d0, x8
; CHECK-NEXT: mov v0.d[1], x9
; CHECK-NEXT: ret
%a = call <2 x i64> @llvm.llrint.v2i64.v2f32(<2 x float> %x)
ret <2 x i64> %a
}
declare <2 x i64> @llvm.llrint.v2i64.v2f32(<2 x float>)
define <4 x i64> @llrint_v4i64_v4f32(<4 x float> %x) {
; CHECK-LABEL: llrint_v4i64_v4f32:
; CHECK: // %bb.0:
; CHECK-NEXT: ext v1.16b, v0.16b, v0.16b, #8
; CHECK-NEXT: mov s3, v0.s[1]
; CHECK-NEXT: frintx s0, s0
; CHECK-NEXT: mov s2, v1.s[1]
; CHECK-NEXT: frintx s1, s1
; CHECK-NEXT: frintx s3, s3
; CHECK-NEXT: fcvtzs x9, s0
; CHECK-NEXT: frintx s2, s2
; CHECK-NEXT: fcvtzs x8, s1
; CHECK-NEXT: fcvtzs x11, s3
; CHECK-NEXT: fmov d0, x9
; CHECK-NEXT: fcvtzs x10, s2
; CHECK-NEXT: fmov d1, x8
; CHECK-NEXT: mov v0.d[1], x11
; CHECK-NEXT: mov v1.d[1], x10
; CHECK-NEXT: ret
%a = call <4 x i64> @llvm.llrint.v4i64.v4f32(<4 x float> %x)
ret <4 x i64> %a
}
declare <4 x i64> @llvm.llrint.v4i64.v4f32(<4 x float>)
define <8 x i64> @llrint_v8i64_v8f32(<8 x float> %x) {
; CHECK-LABEL: llrint_v8i64_v8f32:
; CHECK: // %bb.0:
; CHECK-NEXT: ext v2.16b, v0.16b, v0.16b, #8
; CHECK-NEXT: ext v3.16b, v1.16b, v1.16b, #8
; CHECK-NEXT: mov s4, v0.s[1]
; CHECK-NEXT: mov s7, v1.s[1]
; CHECK-NEXT: frintx s0, s0
; CHECK-NEXT: frintx s1, s1
; CHECK-NEXT: mov s5, v2.s[1]
; CHECK-NEXT: mov s6, v3.s[1]
; CHECK-NEXT: frintx s2, s2
; CHECK-NEXT: frintx s3, s3
; CHECK-NEXT: frintx s4, s4
; CHECK-NEXT: frintx s7, s7
; CHECK-NEXT: fcvtzs x9, s0
; CHECK-NEXT: fcvtzs x12, s1
; CHECK-NEXT: frintx s5, s5
; CHECK-NEXT: frintx s6, s6
; CHECK-NEXT: fcvtzs x8, s2
; CHECK-NEXT: fcvtzs x10, s3
; CHECK-NEXT: fcvtzs x11, s4
; CHECK-NEXT: fcvtzs x15, s7
; CHECK-NEXT: fmov d0, x9
; CHECK-NEXT: fmov d2, x12
; CHECK-NEXT: fcvtzs x13, s5
; CHECK-NEXT: fcvtzs x14, s6
; CHECK-NEXT: fmov d1, x8
; CHECK-NEXT: fmov d3, x10
; CHECK-NEXT: mov v0.d[1], x11
; CHECK-NEXT: mov v2.d[1], x15
; CHECK-NEXT: mov v1.d[1], x13
; CHECK-NEXT: mov v3.d[1], x14
; CHECK-NEXT: ret
%a = call <8 x i64> @llvm.llrint.v8i64.v8f32(<8 x float> %x)
ret <8 x i64> %a
}
declare <8 x i64> @llvm.llrint.v8i64.v8f32(<8 x float>)
define <16 x i64> @llrint_v16i64_v16f32(<16 x float> %x) {
; CHECK-LABEL: llrint_v16i64_v16f32:
; CHECK: // %bb.0:
; CHECK-NEXT: ext v4.16b, v0.16b, v0.16b, #8
; CHECK-NEXT: ext v5.16b, v1.16b, v1.16b, #8
; CHECK-NEXT: ext v6.16b, v2.16b, v2.16b, #8
; CHECK-NEXT: frintx s7, s0
; CHECK-NEXT: ext v16.16b, v3.16b, v3.16b, #8
; CHECK-NEXT: mov s0, v0.s[1]
; CHECK-NEXT: frintx s17, s4
; CHECK-NEXT: mov s4, v4.s[1]
; CHECK-NEXT: mov s18, v5.s[1]
; CHECK-NEXT: frintx s5, s5
; CHECK-NEXT: frintx s19, s6
; CHECK-NEXT: fcvtzs x8, s7
; CHECK-NEXT: frintx s7, s16
; CHECK-NEXT: mov s6, v6.s[1]
; CHECK-NEXT: mov s16, v16.s[1]
; CHECK-NEXT: frintx s0, s0
; CHECK-NEXT: frintx s4, s4
; CHECK-NEXT: fcvtzs x9, s17
; CHECK-NEXT: frintx s17, s1
; CHECK-NEXT: mov s1, v1.s[1]
; CHECK-NEXT: frintx s18, s18
; CHECK-NEXT: fcvtzs x10, s5
; CHECK-NEXT: mov s5, v2.s[1]
; CHECK-NEXT: fcvtzs x11, s19
; CHECK-NEXT: mov s19, v3.s[1]
; CHECK-NEXT: frintx s2, s2
; CHECK-NEXT: fcvtzs x12, s7
; CHECK-NEXT: frintx s6, s6
; CHECK-NEXT: fcvtzs x13, s4
; CHECK-NEXT: frintx s4, s3
; CHECK-NEXT: frintx s16, s16
; CHECK-NEXT: fcvtzs x14, s18
; CHECK-NEXT: frintx s18, s1
; CHECK-NEXT: fcvtzs x15, s17
; CHECK-NEXT: frintx s20, s5
; CHECK-NEXT: frintx s17, s19
; CHECK-NEXT: fmov d1, x9
; CHECK-NEXT: fcvtzs x9, s2
; CHECK-NEXT: fmov d5, x11
; CHECK-NEXT: fmov d3, x10
; CHECK-NEXT: fcvtzs x11, s4
; CHECK-NEXT: fcvtzs x10, s0
; CHECK-NEXT: fmov d7, x12
; CHECK-NEXT: fcvtzs x12, s18
; CHECK-NEXT: fcvtzs x17, s6
; CHECK-NEXT: fcvtzs x18, s16
; CHECK-NEXT: fcvtzs x16, s20
; CHECK-NEXT: fcvtzs x0, s17
; CHECK-NEXT: fmov d0, x8
; CHECK-NEXT: fmov d2, x15
; CHECK-NEXT: fmov d4, x9
; CHECK-NEXT: mov v1.d[1], x13
; CHECK-NEXT: fmov d6, x11
; CHECK-NEXT: mov v3.d[1], x14
; CHECK-NEXT: mov v0.d[1], x10
; CHECK-NEXT: mov v5.d[1], x17
; CHECK-NEXT: mov v7.d[1], x18
; CHECK-NEXT: mov v2.d[1], x12
; CHECK-NEXT: mov v4.d[1], x16
; CHECK-NEXT: mov v6.d[1], x0
; CHECK-NEXT: ret
%a = call <16 x i64> @llvm.llrint.v16i64.v16f32(<16 x float> %x)
ret <16 x i64> %a
}
declare <16 x i64> @llvm.llrint.v16i64.v16f32(<16 x float>)
define <1 x i64> @llrint_v1i64_v1f64(<1 x double> %x) {
; CHECK-LABEL: llrint_v1i64_v1f64:
; CHECK: // %bb.0:
; CHECK-NEXT: frintx d0, d0
; CHECK-NEXT: fcvtzs x8, d0
; CHECK-NEXT: fmov d0, x8
; CHECK-NEXT: ret
%a = call <1 x i64> @llvm.llrint.v1i64.v1f64(<1 x double> %x)
ret <1 x i64> %a
}
declare <1 x i64> @llvm.llrint.v1i64.v1f64(<1 x double>)
define <2 x i64> @llrint_v2i64_v2f64(<2 x double> %x) {
; CHECK-LABEL: llrint_v2i64_v2f64:
; CHECK: // %bb.0:
; CHECK-NEXT: mov d1, v0.d[1]
; CHECK-NEXT: frintx d0, d0
; CHECK-NEXT: frintx d1, d1
; CHECK-NEXT: fcvtzs x8, d0
; CHECK-NEXT: fcvtzs x9, d1
; CHECK-NEXT: fmov d0, x8
; CHECK-NEXT: mov v0.d[1], x9
; CHECK-NEXT: ret
%a = call <2 x i64> @llvm.llrint.v2i64.v2f64(<2 x double> %x)
ret <2 x i64> %a
}
declare <2 x i64> @llvm.llrint.v2i64.v2f64(<2 x double>)
define <4 x i64> @llrint_v4i64_v4f64(<4 x double> %x) {
; CHECK-LABEL: llrint_v4i64_v4f64:
; CHECK: // %bb.0:
; CHECK-NEXT: mov d2, v0.d[1]
; CHECK-NEXT: mov d3, v1.d[1]
; CHECK-NEXT: frintx d0, d0
; CHECK-NEXT: frintx d1, d1
; CHECK-NEXT: frintx d2, d2
; CHECK-NEXT: frintx d3, d3
; CHECK-NEXT: fcvtzs x8, d0
; CHECK-NEXT: fcvtzs x9, d1
; CHECK-NEXT: fcvtzs x10, d2
; CHECK-NEXT: fcvtzs x11, d3
; CHECK-NEXT: fmov d0, x8
; CHECK-NEXT: fmov d1, x9
; CHECK-NEXT: mov v0.d[1], x10
; CHECK-NEXT: mov v1.d[1], x11
; CHECK-NEXT: ret
%a = call <4 x i64> @llvm.llrint.v4i64.v4f64(<4 x double> %x)
ret <4 x i64> %a
}
declare <4 x i64> @llvm.llrint.v4i64.v4f64(<4 x double>)
define <8 x i64> @llrint_v8i64_v8f64(<8 x double> %x) {
; CHECK-LABEL: llrint_v8i64_v8f64:
; CHECK: // %bb.0:
; CHECK-NEXT: mov d4, v0.d[1]
; CHECK-NEXT: mov d5, v1.d[1]
; CHECK-NEXT: mov d6, v2.d[1]
; CHECK-NEXT: mov d7, v3.d[1]
; CHECK-NEXT: frintx d0, d0
; CHECK-NEXT: frintx d1, d1
; CHECK-NEXT: frintx d2, d2
; CHECK-NEXT: frintx d3, d3
; CHECK-NEXT: frintx d4, d4
; CHECK-NEXT: frintx d5, d5
; CHECK-NEXT: frintx d6, d6
; CHECK-NEXT: frintx d7, d7
; CHECK-NEXT: fcvtzs x8, d0
; CHECK-NEXT: fcvtzs x9, d1
; CHECK-NEXT: fcvtzs x10, d2
; CHECK-NEXT: fcvtzs x11, d3
; CHECK-NEXT: fcvtzs x12, d4
; CHECK-NEXT: fcvtzs x13, d5
; CHECK-NEXT: fcvtzs x14, d6
; CHECK-NEXT: fcvtzs x15, d7
; CHECK-NEXT: fmov d0, x8
; CHECK-NEXT: fmov d1, x9
; CHECK-NEXT: fmov d2, x10
; CHECK-NEXT: fmov d3, x11
; CHECK-NEXT: mov v0.d[1], x12
; CHECK-NEXT: mov v1.d[1], x13
; CHECK-NEXT: mov v2.d[1], x14
; CHECK-NEXT: mov v3.d[1], x15
; CHECK-NEXT: ret
%a = call <8 x i64> @llvm.llrint.v8i64.v8f64(<8 x double> %x)
ret <8 x i64> %a
}
declare <8 x i64> @llvm.llrint.v8i64.v8f64(<8 x double>)

View File

@@ -0,0 +1,622 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=aarch64 -mattr=+neon | FileCheck %s
; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=aarch64 -mattr=+neon | FileCheck %s
define <1 x i64> @lrint_v1f16(<1 x half> %x) {
; CHECK-LABEL: lrint_v1f16:
; CHECK: // %bb.0:
; CHECK-NEXT: fcvt s0, h0
; CHECK-NEXT: frintx s0, s0
; CHECK-NEXT: fcvtzs x8, s0
; CHECK-NEXT: fmov d0, x8
; CHECK-NEXT: ret
%a = call <1 x i64> @llvm.lrint.v1i64.v1f16(<1 x half> %x)
ret <1 x i64> %a
}
declare <1 x i64> @llvm.lrint.v1i64.v1f16(<1 x half>)
define <2 x i64> @lrint_v2f16(<2 x half> %x) {
; CHECK-LABEL: lrint_v2f16:
; CHECK: // %bb.0:
; CHECK-NEXT: // kill: def $d0 killed $d0 def $q0
; CHECK-NEXT: mov h1, v0.h[1]
; CHECK-NEXT: fcvt s0, h0
; CHECK-NEXT: fcvt s1, h1
; CHECK-NEXT: frintx s0, s0
; CHECK-NEXT: frintx s1, s1
; CHECK-NEXT: fcvtzs x8, s0
; CHECK-NEXT: fcvtzs x9, s1
; CHECK-NEXT: fmov d0, x8
; CHECK-NEXT: mov v0.d[1], x9
; CHECK-NEXT: ret
%a = call <2 x i64> @llvm.lrint.v2i64.v2f16(<2 x half> %x)
ret <2 x i64> %a
}
declare <2 x i64> @llvm.lrint.v2i64.v2f16(<2 x half>)
define <4 x i64> @lrint_v4f16(<4 x half> %x) {
; CHECK-LABEL: lrint_v4f16:
; CHECK: // %bb.0:
; CHECK-NEXT: // kill: def $d0 killed $d0 def $q0
; CHECK-NEXT: mov h1, v0.h[2]
; CHECK-NEXT: mov h2, v0.h[1]
; CHECK-NEXT: mov h3, v0.h[3]
; CHECK-NEXT: fcvt s0, h0
; CHECK-NEXT: fcvt s1, h1
; CHECK-NEXT: fcvt s2, h2
; CHECK-NEXT: fcvt s3, h3
; CHECK-NEXT: frintx s0, s0
; CHECK-NEXT: frintx s1, s1
; CHECK-NEXT: frintx s2, s2
; CHECK-NEXT: frintx s3, s3
; CHECK-NEXT: fcvtzs x8, s0
; CHECK-NEXT: fcvtzs x9, s1
; CHECK-NEXT: fcvtzs x10, s2
; CHECK-NEXT: fcvtzs x11, s3
; CHECK-NEXT: fmov d0, x8
; CHECK-NEXT: fmov d1, x9
; CHECK-NEXT: mov v0.d[1], x10
; CHECK-NEXT: mov v1.d[1], x11
; CHECK-NEXT: ret
%a = call <4 x i64> @llvm.lrint.v4i64.v4f16(<4 x half> %x)
ret <4 x i64> %a
}
declare <4 x i64> @llvm.lrint.v4i64.v4f16(<4 x half>)
define <8 x i64> @lrint_v8f16(<8 x half> %x) {
; CHECK-LABEL: lrint_v8f16:
; CHECK: // %bb.0:
; CHECK-NEXT: ext v1.16b, v0.16b, v0.16b, #8
; CHECK-NEXT: mov h4, v0.h[2]
; CHECK-NEXT: mov h3, v0.h[1]
; CHECK-NEXT: mov h7, v0.h[3]
; CHECK-NEXT: fcvt s0, h0
; CHECK-NEXT: mov h2, v1.h[2]
; CHECK-NEXT: mov h5, v1.h[1]
; CHECK-NEXT: mov h6, v1.h[3]
; CHECK-NEXT: fcvt s1, h1
; CHECK-NEXT: fcvt s4, h4
; CHECK-NEXT: fcvt s3, h3
; CHECK-NEXT: fcvt s7, h7
; CHECK-NEXT: frintx s0, s0
; CHECK-NEXT: fcvt s2, h2
; CHECK-NEXT: fcvt s5, h5
; CHECK-NEXT: fcvt s6, h6
; CHECK-NEXT: frintx s1, s1
; CHECK-NEXT: frintx s4, s4
; CHECK-NEXT: frintx s3, s3
; CHECK-NEXT: frintx s7, s7
; CHECK-NEXT: fcvtzs x9, s0
; CHECK-NEXT: frintx s2, s2
; CHECK-NEXT: frintx s5, s5
; CHECK-NEXT: frintx s6, s6
; CHECK-NEXT: fcvtzs x8, s1
; CHECK-NEXT: fcvtzs x12, s4
; CHECK-NEXT: fcvtzs x11, s3
; CHECK-NEXT: fcvtzs x15, s7
; CHECK-NEXT: fmov d0, x9
; CHECK-NEXT: fcvtzs x10, s2
; CHECK-NEXT: fcvtzs x13, s5
; CHECK-NEXT: fcvtzs x14, s6
; CHECK-NEXT: fmov d2, x8
; CHECK-NEXT: fmov d1, x12
; CHECK-NEXT: mov v0.d[1], x11
; CHECK-NEXT: fmov d3, x10
; CHECK-NEXT: mov v2.d[1], x13
; CHECK-NEXT: mov v1.d[1], x15
; CHECK-NEXT: mov v3.d[1], x14
; CHECK-NEXT: ret
%a = call <8 x i64> @llvm.lrint.v8i64.v8f16(<8 x half> %x)
ret <8 x i64> %a
}
declare <8 x i64> @llvm.lrint.v8i64.v8f16(<8 x half>)
define <16 x i64> @lrint_v16i64_v16f16(<16 x half> %x) {
; CHECK-LABEL: lrint_v16i64_v16f16:
; CHECK: // %bb.0:
; CHECK-NEXT: ext v2.16b, v0.16b, v0.16b, #8
; CHECK-NEXT: ext v3.16b, v1.16b, v1.16b, #8
; CHECK-NEXT: mov h17, v0.h[1]
; CHECK-NEXT: mov h19, v0.h[2]
; CHECK-NEXT: fcvt s18, h0
; CHECK-NEXT: mov h0, v0.h[3]
; CHECK-NEXT: mov h4, v2.h[1]
; CHECK-NEXT: mov h5, v2.h[2]
; CHECK-NEXT: fcvt s7, h3
; CHECK-NEXT: fcvt s6, h2
; CHECK-NEXT: mov h16, v3.h[2]
; CHECK-NEXT: mov h2, v2.h[3]
; CHECK-NEXT: fcvt s17, h17
; CHECK-NEXT: fcvt s19, h19
; CHECK-NEXT: frintx s18, s18
; CHECK-NEXT: fcvt s0, h0
; CHECK-NEXT: fcvt s4, h4
; CHECK-NEXT: fcvt s5, h5
; CHECK-NEXT: frintx s7, s7
; CHECK-NEXT: frintx s6, s6
; CHECK-NEXT: fcvt s16, h16
; CHECK-NEXT: fcvt s2, h2
; CHECK-NEXT: frintx s17, s17
; CHECK-NEXT: frintx s19, s19
; CHECK-NEXT: fcvtzs x13, s18
; CHECK-NEXT: frintx s0, s0
; CHECK-NEXT: frintx s4, s4
; CHECK-NEXT: frintx s5, s5
; CHECK-NEXT: fcvtzs x9, s7
; CHECK-NEXT: mov h7, v1.h[2]
; CHECK-NEXT: fcvtzs x8, s6
; CHECK-NEXT: mov h6, v1.h[1]
; CHECK-NEXT: frintx s16, s16
; CHECK-NEXT: fcvtzs x14, s17
; CHECK-NEXT: fcvtzs x15, s19
; CHECK-NEXT: fcvtzs x10, s4
; CHECK-NEXT: mov h4, v3.h[1]
; CHECK-NEXT: fcvtzs x11, s5
; CHECK-NEXT: mov h5, v1.h[3]
; CHECK-NEXT: mov h3, v3.h[3]
; CHECK-NEXT: fcvt s1, h1
; CHECK-NEXT: fcvt s7, h7
; CHECK-NEXT: fcvt s6, h6
; CHECK-NEXT: fcvtzs x12, s16
; CHECK-NEXT: frintx s16, s2
; CHECK-NEXT: fmov d2, x8
; CHECK-NEXT: fcvt s4, h4
; CHECK-NEXT: fcvt s3, h3
; CHECK-NEXT: fcvt s5, h5
; CHECK-NEXT: frintx s1, s1
; CHECK-NEXT: frintx s7, s7
; CHECK-NEXT: frintx s17, s6
; CHECK-NEXT: fmov d6, x9
; CHECK-NEXT: mov v2.d[1], x10
; CHECK-NEXT: frintx s4, s4
; CHECK-NEXT: frintx s18, s3
; CHECK-NEXT: frintx s5, s5
; CHECK-NEXT: fcvtzs x8, s1
; CHECK-NEXT: fcvtzs x9, s7
; CHECK-NEXT: fmov d3, x11
; CHECK-NEXT: fcvtzs x11, s0
; CHECK-NEXT: fmov d7, x12
; CHECK-NEXT: fcvtzs x12, s16
; CHECK-NEXT: fcvtzs x16, s17
; CHECK-NEXT: fcvtzs x17, s4
; CHECK-NEXT: fmov d0, x13
; CHECK-NEXT: fmov d1, x15
; CHECK-NEXT: fcvtzs x18, s18
; CHECK-NEXT: fcvtzs x0, s5
; CHECK-NEXT: fmov d4, x8
; CHECK-NEXT: fmov d5, x9
; CHECK-NEXT: mov v0.d[1], x14
; CHECK-NEXT: mov v1.d[1], x11
; CHECK-NEXT: mov v3.d[1], x12
; CHECK-NEXT: mov v4.d[1], x16
; CHECK-NEXT: mov v6.d[1], x17
; CHECK-NEXT: mov v7.d[1], x18
; CHECK-NEXT: mov v5.d[1], x0
; CHECK-NEXT: ret
%a = call <16 x i64> @llvm.lrint.v16i64.v16f16(<16 x half> %x)
ret <16 x i64> %a
}
declare <16 x i64> @llvm.lrint.v16i64.v16f16(<16 x half>)
define <32 x i64> @lrint_v32i64_v32f16(<32 x half> %x) {
; CHECK-LABEL: lrint_v32i64_v32f16:
; CHECK: // %bb.0:
; CHECK-NEXT: ext v4.16b, v1.16b, v1.16b, #8
; CHECK-NEXT: ext v5.16b, v2.16b, v2.16b, #8
; CHECK-NEXT: ext v6.16b, v3.16b, v3.16b, #8
; CHECK-NEXT: ext v7.16b, v0.16b, v0.16b, #8
; CHECK-NEXT: mov h19, v0.h[1]
; CHECK-NEXT: fcvt s21, h0
; CHECK-NEXT: mov h23, v1.h[2]
; CHECK-NEXT: fcvt s22, h1
; CHECK-NEXT: fcvt s26, h2
; CHECK-NEXT: mov h27, v2.h[1]
; CHECK-NEXT: mov h28, v2.h[2]
; CHECK-NEXT: mov h16, v4.h[2]
; CHECK-NEXT: fcvt s17, h5
; CHECK-NEXT: mov h18, v5.h[2]
; CHECK-NEXT: mov h20, v6.h[2]
; CHECK-NEXT: fcvt s24, h7
; CHECK-NEXT: fcvt s25, h6
; CHECK-NEXT: fcvt s19, h19
; CHECK-NEXT: frintx s22, s22
; CHECK-NEXT: fcvt s16, h16
; CHECK-NEXT: frintx s17, s17
; CHECK-NEXT: fcvt s18, h18
; CHECK-NEXT: fcvt s20, h20
; CHECK-NEXT: frintx s16, s16
; CHECK-NEXT: fcvtzs x12, s17
; CHECK-NEXT: frintx s17, s18
; CHECK-NEXT: frintx s18, s21
; CHECK-NEXT: fcvt s21, h23
; CHECK-NEXT: frintx s23, s24
; CHECK-NEXT: frintx s24, s25
; CHECK-NEXT: frintx s25, s19
; CHECK-NEXT: mov h19, v7.h[1]
; CHECK-NEXT: fcvtzs x13, s16
; CHECK-NEXT: frintx s16, s20
; CHECK-NEXT: frintx s20, s26
; CHECK-NEXT: fcvtzs x9, s23
; CHECK-NEXT: mov h23, v3.h[2]
; CHECK-NEXT: fcvt s26, h27
; CHECK-NEXT: fcvtzs x15, s24
; CHECK-NEXT: fcvtzs x10, s25
; CHECK-NEXT: fcvt s24, h28
; CHECK-NEXT: mov h25, v3.h[3]
; CHECK-NEXT: fcvtzs x14, s17
; CHECK-NEXT: frintx s21, s21
; CHECK-NEXT: fmov d17, x12
; CHECK-NEXT: fcvtzs x12, s16
; CHECK-NEXT: fmov d16, x13
; CHECK-NEXT: fcvtzs x13, s22
; CHECK-NEXT: fcvt s22, h3
; CHECK-NEXT: mov h3, v3.h[1]
; CHECK-NEXT: mov h27, v0.h[2]
; CHECK-NEXT: mov h28, v2.h[3]
; CHECK-NEXT: fcvt s23, h23
; CHECK-NEXT: frintx s26, s26
; CHECK-NEXT: fcvtzs x16, s20
; CHECK-NEXT: frintx s20, s24
; CHECK-NEXT: fcvt s24, h25
; CHECK-NEXT: fcvtzs x11, s18
; CHECK-NEXT: fmov d18, x14
; CHECK-NEXT: fcvtzs x14, s21
; CHECK-NEXT: frintx s22, s22
; CHECK-NEXT: fcvt s3, h3
; CHECK-NEXT: fcvt s25, h27
; CHECK-NEXT: fcvt s27, h28
; CHECK-NEXT: frintx s23, s23
; CHECK-NEXT: mov h21, v1.h[3]
; CHECK-NEXT: fmov d2, x15
; CHECK-NEXT: fcvtzs x15, s26
; CHECK-NEXT: fmov d26, x13
; CHECK-NEXT: mov h1, v1.h[1]
; CHECK-NEXT: fcvtzs x13, s20
; CHECK-NEXT: frintx s20, s24
; CHECK-NEXT: fmov d24, x14
; CHECK-NEXT: fcvtzs x14, s22
; CHECK-NEXT: frintx s3, s3
; CHECK-NEXT: fmov d22, x16
; CHECK-NEXT: frintx s27, s27
; CHECK-NEXT: fcvtzs x16, s23
; CHECK-NEXT: fcvt s21, h21
; CHECK-NEXT: frintx s25, s25
; CHECK-NEXT: fcvt s1, h1
; CHECK-NEXT: mov h0, v0.h[3]
; CHECK-NEXT: mov h23, v7.h[2]
; CHECK-NEXT: mov v22.d[1], x15
; CHECK-NEXT: fcvtzs x15, s20
; CHECK-NEXT: fmov d20, x13
; CHECK-NEXT: fcvtzs x13, s3
; CHECK-NEXT: fmov d3, x14
; CHECK-NEXT: fcvtzs x14, s27
; CHECK-NEXT: fmov d27, x16
; CHECK-NEXT: frintx s21, s21
; CHECK-NEXT: mov h7, v7.h[3]
; CHECK-NEXT: frintx s1, s1
; CHECK-NEXT: fcvt s0, h0
; CHECK-NEXT: fcvt s23, h23
; CHECK-NEXT: fcvt s19, h19
; CHECK-NEXT: mov v27.d[1], x15
; CHECK-NEXT: fcvtzs x15, s25
; CHECK-NEXT: mov h25, v6.h[3]
; CHECK-NEXT: mov h6, v6.h[1]
; CHECK-NEXT: mov v3.d[1], x13
; CHECK-NEXT: fcvtzs x13, s21
; CHECK-NEXT: mov h21, v5.h[1]
; CHECK-NEXT: mov h5, v5.h[3]
; CHECK-NEXT: mov v20.d[1], x14
; CHECK-NEXT: fcvtzs x14, s1
; CHECK-NEXT: mov h1, v4.h[1]
; CHECK-NEXT: frintx s0, s0
; CHECK-NEXT: fcvt s25, h25
; CHECK-NEXT: fcvt s7, h7
; CHECK-NEXT: stp q3, q27, [x8, #192]
; CHECK-NEXT: fcvt s6, h6
; CHECK-NEXT: mov h3, v4.h[3]
; CHECK-NEXT: stp q22, q20, [x8, #128]
; CHECK-NEXT: fcvt s21, h21
; CHECK-NEXT: fcvt s5, h5
; CHECK-NEXT: mov v24.d[1], x13
; CHECK-NEXT: mov v26.d[1], x14
; CHECK-NEXT: fcvt s4, h4
; CHECK-NEXT: frintx s22, s25
; CHECK-NEXT: fmov d20, x12
; CHECK-NEXT: fcvt s1, h1
; CHECK-NEXT: frintx s6, s6
; CHECK-NEXT: fcvt s3, h3
; CHECK-NEXT: fcvtzs x12, s0
; CHECK-NEXT: frintx s5, s5
; CHECK-NEXT: frintx s21, s21
; CHECK-NEXT: fmov d0, x11
; CHECK-NEXT: stp q26, q24, [x8, #64]
; CHECK-NEXT: fmov d24, x15
; CHECK-NEXT: frintx s4, s4
; CHECK-NEXT: fcvtzs x11, s22
; CHECK-NEXT: frintx s22, s23
; CHECK-NEXT: frintx s1, s1
; CHECK-NEXT: fcvtzs x13, s6
; CHECK-NEXT: frintx s3, s3
; CHECK-NEXT: frintx s6, s7
; CHECK-NEXT: fcvtzs x14, s5
; CHECK-NEXT: mov v24.d[1], x12
; CHECK-NEXT: frintx s5, s19
; CHECK-NEXT: fcvtzs x12, s21
; CHECK-NEXT: mov v0.d[1], x10
; CHECK-NEXT: fcvtzs x10, s4
; CHECK-NEXT: mov v20.d[1], x11
; CHECK-NEXT: fcvtzs x11, s22
; CHECK-NEXT: mov v2.d[1], x13
; CHECK-NEXT: fcvtzs x15, s3
; CHECK-NEXT: fcvtzs x13, s1
; CHECK-NEXT: mov v18.d[1], x14
; CHECK-NEXT: fcvtzs x14, s6
; CHECK-NEXT: stp q0, q24, [x8]
; CHECK-NEXT: mov v17.d[1], x12
; CHECK-NEXT: fcvtzs x12, s5
; CHECK-NEXT: fmov d0, x10
; CHECK-NEXT: fmov d1, x11
; CHECK-NEXT: stp q2, q20, [x8, #224]
; CHECK-NEXT: fmov d2, x9
; CHECK-NEXT: mov v16.d[1], x15
; CHECK-NEXT: stp q17, q18, [x8, #160]
; CHECK-NEXT: mov v0.d[1], x13
; CHECK-NEXT: mov v1.d[1], x14
; CHECK-NEXT: mov v2.d[1], x12
; CHECK-NEXT: stp q0, q16, [x8, #96]
; CHECK-NEXT: stp q2, q1, [x8, #32]
; CHECK-NEXT: ret
%a = call <32 x i64> @llvm.lrint.v32i64.v32f16(<32 x half> %x)
ret <32 x i64> %a
}
declare <32 x i64> @llvm.lrint.v32i64.v32f16(<32 x half>)
define <1 x i64> @lrint_v1f32(<1 x float> %x) {
; CHECK-LABEL: lrint_v1f32:
; CHECK: // %bb.0:
; CHECK-NEXT: // kill: def $d0 killed $d0 def $q0
; CHECK-NEXT: frintx s0, s0
; CHECK-NEXT: fcvtzs x8, s0
; CHECK-NEXT: fmov d0, x8
; CHECK-NEXT: ret
%a = call <1 x i64> @llvm.lrint.v1i64.v1f32(<1 x float> %x)
ret <1 x i64> %a
}
declare <1 x i64> @llvm.lrint.v1i64.v1f32(<1 x float>)
define <2 x i64> @lrint_v2f32(<2 x float> %x) {
; CHECK-LABEL: lrint_v2f32:
; CHECK: // %bb.0:
; CHECK-NEXT: // kill: def $d0 killed $d0 def $q0
; CHECK-NEXT: mov s1, v0.s[1]
; CHECK-NEXT: frintx s0, s0
; CHECK-NEXT: frintx s1, s1
; CHECK-NEXT: fcvtzs x8, s0
; CHECK-NEXT: fcvtzs x9, s1
; CHECK-NEXT: fmov d0, x8
; CHECK-NEXT: mov v0.d[1], x9
; CHECK-NEXT: ret
%a = call <2 x i64> @llvm.lrint.v2i64.v2f32(<2 x float> %x)
ret <2 x i64> %a
}
declare <2 x i64> @llvm.lrint.v2i64.v2f32(<2 x float>)
define <4 x i64> @lrint_v4f32(<4 x float> %x) {
; CHECK-LABEL: lrint_v4f32:
; CHECK: // %bb.0:
; CHECK-NEXT: ext v1.16b, v0.16b, v0.16b, #8
; CHECK-NEXT: mov s3, v0.s[1]
; CHECK-NEXT: frintx s0, s0
; CHECK-NEXT: mov s2, v1.s[1]
; CHECK-NEXT: frintx s1, s1
; CHECK-NEXT: frintx s3, s3
; CHECK-NEXT: fcvtzs x9, s0
; CHECK-NEXT: frintx s2, s2
; CHECK-NEXT: fcvtzs x8, s1
; CHECK-NEXT: fcvtzs x11, s3
; CHECK-NEXT: fmov d0, x9
; CHECK-NEXT: fcvtzs x10, s2
; CHECK-NEXT: fmov d1, x8
; CHECK-NEXT: mov v0.d[1], x11
; CHECK-NEXT: mov v1.d[1], x10
; CHECK-NEXT: ret
%a = call <4 x i64> @llvm.lrint.v4i64.v4f32(<4 x float> %x)
ret <4 x i64> %a
}
declare <4 x i64> @llvm.lrint.v4i64.v4f32(<4 x float>)
define <8 x i64> @lrint_v8f32(<8 x float> %x) {
; CHECK-LABEL: lrint_v8f32:
; CHECK: // %bb.0:
; CHECK-NEXT: ext v2.16b, v0.16b, v0.16b, #8
; CHECK-NEXT: ext v3.16b, v1.16b, v1.16b, #8
; CHECK-NEXT: mov s4, v0.s[1]
; CHECK-NEXT: mov s7, v1.s[1]
; CHECK-NEXT: frintx s0, s0
; CHECK-NEXT: frintx s1, s1
; CHECK-NEXT: mov s5, v2.s[1]
; CHECK-NEXT: mov s6, v3.s[1]
; CHECK-NEXT: frintx s2, s2
; CHECK-NEXT: frintx s3, s3
; CHECK-NEXT: frintx s4, s4
; CHECK-NEXT: frintx s7, s7
; CHECK-NEXT: fcvtzs x9, s0
; CHECK-NEXT: fcvtzs x12, s1
; CHECK-NEXT: frintx s5, s5
; CHECK-NEXT: frintx s6, s6
; CHECK-NEXT: fcvtzs x8, s2
; CHECK-NEXT: fcvtzs x10, s3
; CHECK-NEXT: fcvtzs x11, s4
; CHECK-NEXT: fcvtzs x15, s7
; CHECK-NEXT: fmov d0, x9
; CHECK-NEXT: fmov d2, x12
; CHECK-NEXT: fcvtzs x13, s5
; CHECK-NEXT: fcvtzs x14, s6
; CHECK-NEXT: fmov d1, x8
; CHECK-NEXT: fmov d3, x10
; CHECK-NEXT: mov v0.d[1], x11
; CHECK-NEXT: mov v2.d[1], x15
; CHECK-NEXT: mov v1.d[1], x13
; CHECK-NEXT: mov v3.d[1], x14
; CHECK-NEXT: ret
%a = call <8 x i64> @llvm.lrint.v8i64.v8f32(<8 x float> %x)
ret <8 x i64> %a
}
declare <8 x i64> @llvm.lrint.v8i64.v8f32(<8 x float>)
define <16 x i64> @lrint_v16i64_v16f32(<16 x float> %x) {
; CHECK-LABEL: lrint_v16i64_v16f32:
; CHECK: // %bb.0:
; CHECK-NEXT: ext v4.16b, v0.16b, v0.16b, #8
; CHECK-NEXT: ext v5.16b, v1.16b, v1.16b, #8
; CHECK-NEXT: ext v6.16b, v2.16b, v2.16b, #8
; CHECK-NEXT: frintx s7, s0
; CHECK-NEXT: ext v16.16b, v3.16b, v3.16b, #8
; CHECK-NEXT: mov s0, v0.s[1]
; CHECK-NEXT: frintx s17, s4
; CHECK-NEXT: mov s4, v4.s[1]
; CHECK-NEXT: mov s18, v5.s[1]
; CHECK-NEXT: frintx s5, s5
; CHECK-NEXT: frintx s19, s6
; CHECK-NEXT: fcvtzs x8, s7
; CHECK-NEXT: frintx s7, s16
; CHECK-NEXT: mov s6, v6.s[1]
; CHECK-NEXT: mov s16, v16.s[1]
; CHECK-NEXT: frintx s0, s0
; CHECK-NEXT: frintx s4, s4
; CHECK-NEXT: fcvtzs x9, s17
; CHECK-NEXT: frintx s17, s1
; CHECK-NEXT: mov s1, v1.s[1]
; CHECK-NEXT: frintx s18, s18
; CHECK-NEXT: fcvtzs x10, s5
; CHECK-NEXT: mov s5, v2.s[1]
; CHECK-NEXT: fcvtzs x11, s19
; CHECK-NEXT: mov s19, v3.s[1]
; CHECK-NEXT: frintx s2, s2
; CHECK-NEXT: fcvtzs x12, s7
; CHECK-NEXT: frintx s6, s6
; CHECK-NEXT: fcvtzs x13, s4
; CHECK-NEXT: frintx s4, s3
; CHECK-NEXT: frintx s16, s16
; CHECK-NEXT: fcvtzs x14, s18
; CHECK-NEXT: frintx s18, s1
; CHECK-NEXT: fcvtzs x15, s17
; CHECK-NEXT: frintx s20, s5
; CHECK-NEXT: frintx s17, s19
; CHECK-NEXT: fmov d1, x9
; CHECK-NEXT: fcvtzs x9, s2
; CHECK-NEXT: fmov d5, x11
; CHECK-NEXT: fmov d3, x10
; CHECK-NEXT: fcvtzs x11, s4
; CHECK-NEXT: fcvtzs x10, s0
; CHECK-NEXT: fmov d7, x12
; CHECK-NEXT: fcvtzs x12, s18
; CHECK-NEXT: fcvtzs x17, s6
; CHECK-NEXT: fcvtzs x18, s16
; CHECK-NEXT: fcvtzs x16, s20
; CHECK-NEXT: fcvtzs x0, s17
; CHECK-NEXT: fmov d0, x8
; CHECK-NEXT: fmov d2, x15
; CHECK-NEXT: fmov d4, x9
; CHECK-NEXT: mov v1.d[1], x13
; CHECK-NEXT: fmov d6, x11
; CHECK-NEXT: mov v3.d[1], x14
; CHECK-NEXT: mov v0.d[1], x10
; CHECK-NEXT: mov v5.d[1], x17
; CHECK-NEXT: mov v7.d[1], x18
; CHECK-NEXT: mov v2.d[1], x12
; CHECK-NEXT: mov v4.d[1], x16
; CHECK-NEXT: mov v6.d[1], x0
; CHECK-NEXT: ret
%a = call <16 x i64> @llvm.lrint.v16i64.v16f32(<16 x float> %x)
ret <16 x i64> %a
}
declare <16 x i64> @llvm.lrint.v16i64.v16f32(<16 x float>)
define <1 x i64> @lrint_v1f64(<1 x double> %x) {
; CHECK-LABEL: lrint_v1f64:
; CHECK: // %bb.0:
; CHECK-NEXT: frintx d0, d0
; CHECK-NEXT: fcvtzs x8, d0
; CHECK-NEXT: fmov d0, x8
; CHECK-NEXT: ret
%a = call <1 x i64> @llvm.lrint.v1i64.v1f64(<1 x double> %x)
ret <1 x i64> %a
}
declare <1 x i64> @llvm.lrint.v1i64.v1f64(<1 x double>)
define <2 x i64> @lrint_v2f64(<2 x double> %x) {
; CHECK-LABEL: lrint_v2f64:
; CHECK: // %bb.0:
; CHECK-NEXT: mov d1, v0.d[1]
; CHECK-NEXT: frintx d0, d0
; CHECK-NEXT: frintx d1, d1
; CHECK-NEXT: fcvtzs x8, d0
; CHECK-NEXT: fcvtzs x9, d1
; CHECK-NEXT: fmov d0, x8
; CHECK-NEXT: mov v0.d[1], x9
; CHECK-NEXT: ret
%a = call <2 x i64> @llvm.lrint.v2i64.v2f64(<2 x double> %x)
ret <2 x i64> %a
}
declare <2 x i64> @llvm.lrint.v2i64.v2f64(<2 x double>)
define <4 x i64> @lrint_v4f64(<4 x double> %x) {
; CHECK-LABEL: lrint_v4f64:
; CHECK: // %bb.0:
; CHECK-NEXT: mov d2, v0.d[1]
; CHECK-NEXT: mov d3, v1.d[1]
; CHECK-NEXT: frintx d0, d0
; CHECK-NEXT: frintx d1, d1
; CHECK-NEXT: frintx d2, d2
; CHECK-NEXT: frintx d3, d3
; CHECK-NEXT: fcvtzs x8, d0
; CHECK-NEXT: fcvtzs x9, d1
; CHECK-NEXT: fcvtzs x10, d2
; CHECK-NEXT: fcvtzs x11, d3
; CHECK-NEXT: fmov d0, x8
; CHECK-NEXT: fmov d1, x9
; CHECK-NEXT: mov v0.d[1], x10
; CHECK-NEXT: mov v1.d[1], x11
; CHECK-NEXT: ret
%a = call <4 x i64> @llvm.lrint.v4i64.v4f64(<4 x double> %x)
ret <4 x i64> %a
}
declare <4 x i64> @llvm.lrint.v4i64.v4f64(<4 x double>)
define <8 x i64> @lrint_v8f64(<8 x double> %x) {
; CHECK-LABEL: lrint_v8f64:
; CHECK: // %bb.0:
; CHECK-NEXT: mov d4, v0.d[1]
; CHECK-NEXT: mov d5, v1.d[1]
; CHECK-NEXT: mov d6, v2.d[1]
; CHECK-NEXT: mov d7, v3.d[1]
; CHECK-NEXT: frintx d0, d0
; CHECK-NEXT: frintx d1, d1
; CHECK-NEXT: frintx d2, d2
; CHECK-NEXT: frintx d3, d3
; CHECK-NEXT: frintx d4, d4
; CHECK-NEXT: frintx d5, d5
; CHECK-NEXT: frintx d6, d6
; CHECK-NEXT: frintx d7, d7
; CHECK-NEXT: fcvtzs x8, d0
; CHECK-NEXT: fcvtzs x9, d1
; CHECK-NEXT: fcvtzs x10, d2
; CHECK-NEXT: fcvtzs x11, d3
; CHECK-NEXT: fcvtzs x12, d4
; CHECK-NEXT: fcvtzs x13, d5
; CHECK-NEXT: fcvtzs x14, d6
; CHECK-NEXT: fcvtzs x15, d7
; CHECK-NEXT: fmov d0, x8
; CHECK-NEXT: fmov d1, x9
; CHECK-NEXT: fmov d2, x10
; CHECK-NEXT: fmov d3, x11
; CHECK-NEXT: mov v0.d[1], x12
; CHECK-NEXT: mov v1.d[1], x13
; CHECK-NEXT: mov v2.d[1], x14
; CHECK-NEXT: mov v3.d[1], x15
; CHECK-NEXT: ret
%a = call <8 x i64> @llvm.lrint.v8i64.v8f64(<8 x double> %x)
ret <8 x i64> %a
}
declare <8 x i64> @llvm.lrint.v8i64.v8f64(<8 x double>)

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,108 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=riscv64 -mattr=+v,+f,+d -target-abi=lp64d \
; RUN: -verify-machineinstrs < %s | FileCheck %s
define <vscale x 1 x i64> @llrint_nxv1i64_nxv1f32(<vscale x 1 x float> %x) {
; CHECK-LABEL: llrint_nxv1i64_nxv1f32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma
; CHECK-NEXT: vfwcvt.x.f.v v9, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: ret
%a = call <vscale x 1 x i64> @llvm.llrint.nxv1i64.nxv1f32(<vscale x 1 x float> %x)
ret <vscale x 1 x i64> %a
}
declare <vscale x 1 x i64> @llvm.llrint.nxv1i64.nxv1f32(<vscale x 1 x float>)
define <vscale x 2 x i64> @llrint_nxv2i64_nxv2f32(<vscale x 2 x float> %x) {
; CHECK-LABEL: llrint_nxv2i64_nxv2f32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma
; CHECK-NEXT: vfwcvt.x.f.v v10, v8
; CHECK-NEXT: vmv2r.v v8, v10
; CHECK-NEXT: ret
%a = call <vscale x 2 x i64> @llvm.llrint.nxv2i64.nxv2f32(<vscale x 2 x float> %x)
ret <vscale x 2 x i64> %a
}
declare <vscale x 2 x i64> @llvm.llrint.nxv2i64.nxv2f32(<vscale x 2 x float>)
define <vscale x 4 x i64> @llrint_nxv4i64_nxv4f32(<vscale x 4 x float> %x) {
; CHECK-LABEL: llrint_nxv4i64_nxv4f32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma
; CHECK-NEXT: vfwcvt.x.f.v v12, v8
; CHECK-NEXT: vmv4r.v v8, v12
; CHECK-NEXT: ret
%a = call <vscale x 4 x i64> @llvm.llrint.nxv4i64.nxv4f32(<vscale x 4 x float> %x)
ret <vscale x 4 x i64> %a
}
declare <vscale x 4 x i64> @llvm.llrint.nxv4i64.nxv4f32(<vscale x 4 x float>)
define <vscale x 8 x i64> @llrint_nxv8i64_nxv8f32(<vscale x 8 x float> %x) {
; CHECK-LABEL: llrint_nxv8i64_nxv8f32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma
; CHECK-NEXT: vfwcvt.x.f.v v16, v8
; CHECK-NEXT: vmv8r.v v8, v16
; CHECK-NEXT: ret
%a = call <vscale x 8 x i64> @llvm.llrint.nxv8i64.nxv8f32(<vscale x 8 x float> %x)
ret <vscale x 8 x i64> %a
}
declare <vscale x 8 x i64> @llvm.llrint.nxv8i64.nxv8f32(<vscale x 8 x float>)
define <vscale x 16 x i64> @llrint_nxv16i64_nxv16f32(<vscale x 16 x float> %x) {
; CHECK-LABEL: llrint_nxv16i64_nxv16f32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma
; CHECK-NEXT: vfwcvt.x.f.v v24, v8
; CHECK-NEXT: vfwcvt.x.f.v v16, v12
; CHECK-NEXT: vmv8r.v v8, v24
; CHECK-NEXT: ret
%a = call <vscale x 16 x i64> @llvm.llrint.nxv16i64.nxv16f32(<vscale x 16 x float> %x)
ret <vscale x 16 x i64> %a
}
declare <vscale x 16 x i64> @llvm.llrint.nxv16i64.nxv16f32(<vscale x 16 x float>)
define <vscale x 1 x i64> @llrint_nxv1i64_nxv1f64(<vscale x 1 x double> %x) {
; CHECK-LABEL: llrint_nxv1i64_nxv1f64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma
; CHECK-NEXT: vfcvt.x.f.v v8, v8
; CHECK-NEXT: ret
%a = call <vscale x 1 x i64> @llvm.llrint.nxv1i64.nxv1f64(<vscale x 1 x double> %x)
ret <vscale x 1 x i64> %a
}
declare <vscale x 1 x i64> @llvm.llrint.nxv1i64.nxv1f64(<vscale x 1 x double>)
define <vscale x 2 x i64> @llrint_nxv2i64_nxv2f64(<vscale x 2 x double> %x) {
; CHECK-LABEL: llrint_nxv2i64_nxv2f64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, ma
; CHECK-NEXT: vfcvt.x.f.v v8, v8
; CHECK-NEXT: ret
%a = call <vscale x 2 x i64> @llvm.llrint.nxv2i64.nxv2f64(<vscale x 2 x double> %x)
ret <vscale x 2 x i64> %a
}
declare <vscale x 2 x i64> @llvm.llrint.nxv2i64.nxv2f64(<vscale x 2 x double>)
define <vscale x 4 x i64> @llrint_nxv4i64_nxv4f64(<vscale x 4 x double> %x) {
; CHECK-LABEL: llrint_nxv4i64_nxv4f64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, ma
; CHECK-NEXT: vfcvt.x.f.v v8, v8
; CHECK-NEXT: ret
%a = call <vscale x 4 x i64> @llvm.llrint.nxv4i64.nxv4f64(<vscale x 4 x double> %x)
ret <vscale x 4 x i64> %a
}
declare <vscale x 4 x i64> @llvm.llrint.nxv4i64.nxv4f64(<vscale x 4 x double>)
define <vscale x 8 x i64> @llrint_nxv8i64_nxv8f64(<vscale x 8 x double> %x) {
; CHECK-LABEL: llrint_nxv8i64_nxv8f64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma
; CHECK-NEXT: vfcvt.x.f.v v8, v8
; CHECK-NEXT: ret
%a = call <vscale x 8 x i64> @llvm.llrint.nxv8i64.nxv8f64(<vscale x 8 x double> %x)
ret <vscale x 8 x i64> %a
}
declare <vscale x 8 x i64> @llvm.llrint.nxv8i64.nxv8f64(<vscale x 8 x double>)

View File

@@ -0,0 +1,155 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+f,+d \
; RUN: -target-abi=ilp32d -verify-machineinstrs | FileCheck %s --check-prefix=RV32
; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+f,+d \
; RUN: -target-abi=lp64d -verify-machineinstrs | FileCheck %s --check-prefix=RV64
define <vscale x 1 x iXLen> @lrint_nxv1f32(<vscale x 1 x float> %x) {
; RV32-LABEL: lrint_nxv1f32:
; RV32: # %bb.0:
; RV32-NEXT: vsetvli a0, zero, e32, mf2, ta, ma
; RV32-NEXT: vfcvt.x.f.v v8, v8
; RV32-NEXT: ret
;
; RV64-LABEL: lrint_nxv1f32:
; RV64: # %bb.0:
; RV64-NEXT: vsetvli a0, zero, e32, mf2, ta, ma
; RV64-NEXT: vfwcvt.x.f.v v9, v8
; RV64-NEXT: vmv1r.v v8, v9
; RV64-NEXT: ret
%a = call <vscale x 1 x iXLen> @llvm.lrint.nxv1iXLen.nxv1f32(<vscale x 1 x float> %x)
ret <vscale x 1 x iXLen> %a
}
declare <vscale x 1 x iXLen> @llvm.lrint.nxv1iXLen.nxv1f32(<vscale x 1 x float>)
define <vscale x 2 x iXLen> @lrint_nxv2f32(<vscale x 2 x float> %x) {
; RV32-LABEL: lrint_nxv2f32:
; RV32: # %bb.0:
; RV32-NEXT: vsetvli a0, zero, e32, m1, ta, ma
; RV32-NEXT: vfcvt.x.f.v v8, v8
; RV32-NEXT: ret
;
; RV64-LABEL: lrint_nxv2f32:
; RV64: # %bb.0:
; RV64-NEXT: vsetvli a0, zero, e32, m1, ta, ma
; RV64-NEXT: vfwcvt.x.f.v v10, v8
; RV64-NEXT: vmv2r.v v8, v10
; RV64-NEXT: ret
%a = call <vscale x 2 x iXLen> @llvm.lrint.nxv2iXLen.nxv2f32(<vscale x 2 x float> %x)
ret <vscale x 2 x iXLen> %a
}
declare <vscale x 2 x iXLen> @llvm.lrint.nxv2iXLen.nxv2f32(<vscale x 2 x float>)
define <vscale x 4 x iXLen> @lrint_nxv4f32(<vscale x 4 x float> %x) {
; RV32-LABEL: lrint_nxv4f32:
; RV32: # %bb.0:
; RV32-NEXT: vsetvli a0, zero, e32, m2, ta, ma
; RV32-NEXT: vfcvt.x.f.v v8, v8
; RV32-NEXT: ret
;
; RV64-LABEL: lrint_nxv4f32:
; RV64: # %bb.0:
; RV64-NEXT: vsetvli a0, zero, e32, m2, ta, ma
; RV64-NEXT: vfwcvt.x.f.v v12, v8
; RV64-NEXT: vmv4r.v v8, v12
; RV64-NEXT: ret
%a = call <vscale x 4 x iXLen> @llvm.lrint.nxv4iXLen.nxv4f32(<vscale x 4 x float> %x)
ret <vscale x 4 x iXLen> %a
}
declare <vscale x 4 x iXLen> @llvm.lrint.nxv4iXLen.nxv4f32(<vscale x 4 x float>)
define <vscale x 8 x iXLen> @lrint_nxv8f32(<vscale x 8 x float> %x) {
; RV32-LABEL: lrint_nxv8f32:
; RV32: # %bb.0:
; RV32-NEXT: vsetvli a0, zero, e32, m4, ta, ma
; RV32-NEXT: vfcvt.x.f.v v8, v8
; RV32-NEXT: ret
;
; RV64-LABEL: lrint_nxv8f32:
; RV64: # %bb.0:
; RV64-NEXT: vsetvli a0, zero, e32, m4, ta, ma
; RV64-NEXT: vfwcvt.x.f.v v16, v8
; RV64-NEXT: vmv8r.v v8, v16
; RV64-NEXT: ret
%a = call <vscale x 8 x iXLen> @llvm.lrint.nxv8iXLen.nxv8f32(<vscale x 8 x float> %x)
ret <vscale x 8 x iXLen> %a
}
declare <vscale x 8 x iXLen> @llvm.lrint.nxv8iXLen.nxv8f32(<vscale x 8 x float>)
define <vscale x 16 x iXLen> @lrint_nxv16iXLen_nxv16f32(<vscale x 16 x float> %x) {
%a = call <vscale x 16 x iXLen> @llvm.lrint.nxv16iXLen.nxv16f32(<vscale x 16 x float> %x)
ret <vscale x 16 x iXLen> %a
}
declare <vscale x 16 x iXLen> @llvm.lrint.nxv16iXLen.nxv16f32(<vscale x 16 x float>)
define <vscale x 1 x iXLen> @lrint_nxv1f64(<vscale x 1 x double> %x) {
; RV32-LABEL: lrint_nxv1f64:
; RV32: # %bb.0:
; RV32-NEXT: vsetvli a0, zero, e32, mf2, ta, ma
; RV32-NEXT: vfncvt.x.f.w v9, v8
; RV32-NEXT: vmv1r.v v8, v9
; RV32-NEXT: ret
;
; RV64-LABEL: lrint_nxv1f64:
; RV64: # %bb.0:
; RV64-NEXT: vsetvli a0, zero, e64, m1, ta, ma
; RV64-NEXT: vfcvt.x.f.v v8, v8
; RV64-NEXT: ret
%a = call <vscale x 1 x iXLen> @llvm.lrint.nxv1iXLen.nxv1f64(<vscale x 1 x double> %x)
ret <vscale x 1 x iXLen> %a
}
declare <vscale x 1 x iXLen> @llvm.lrint.nxv1iXLen.nxv1f64(<vscale x 1 x double>)
define <vscale x 2 x iXLen> @lrint_nxv2f64(<vscale x 2 x double> %x) {
; RV32-LABEL: lrint_nxv2f64:
; RV32: # %bb.0:
; RV32-NEXT: vsetvli a0, zero, e32, m1, ta, ma
; RV32-NEXT: vfncvt.x.f.w v10, v8
; RV32-NEXT: vmv.v.v v8, v10
; RV32-NEXT: ret
;
; RV64-LABEL: lrint_nxv2f64:
; RV64: # %bb.0:
; RV64-NEXT: vsetvli a0, zero, e64, m2, ta, ma
; RV64-NEXT: vfcvt.x.f.v v8, v8
; RV64-NEXT: ret
%a = call <vscale x 2 x iXLen> @llvm.lrint.nxv2iXLen.nxv2f64(<vscale x 2 x double> %x)
ret <vscale x 2 x iXLen> %a
}
declare <vscale x 2 x iXLen> @llvm.lrint.nxv2iXLen.nxv2f64(<vscale x 2 x double>)
define <vscale x 4 x iXLen> @lrint_nxv4f64(<vscale x 4 x double> %x) {
; RV32-LABEL: lrint_nxv4f64:
; RV32: # %bb.0:
; RV32-NEXT: vsetvli a0, zero, e32, m2, ta, ma
; RV32-NEXT: vfncvt.x.f.w v12, v8
; RV32-NEXT: vmv.v.v v8, v12
; RV32-NEXT: ret
;
; RV64-LABEL: lrint_nxv4f64:
; RV64: # %bb.0:
; RV64-NEXT: vsetvli a0, zero, e64, m4, ta, ma
; RV64-NEXT: vfcvt.x.f.v v8, v8
; RV64-NEXT: ret
%a = call <vscale x 4 x iXLen> @llvm.lrint.nxv4iXLen.nxv4f64(<vscale x 4 x double> %x)
ret <vscale x 4 x iXLen> %a
}
declare <vscale x 4 x iXLen> @llvm.lrint.nxv4iXLen.nxv4f64(<vscale x 4 x double>)
define <vscale x 8 x iXLen> @lrint_nxv8f64(<vscale x 8 x double> %x) {
; RV32-LABEL: lrint_nxv8f64:
; RV32: # %bb.0:
; RV32-NEXT: vsetvli a0, zero, e32, m4, ta, ma
; RV32-NEXT: vfncvt.x.f.w v16, v8
; RV32-NEXT: vmv.v.v v8, v16
; RV32-NEXT: ret
;
; RV64-LABEL: lrint_nxv8f64:
; RV64: # %bb.0:
; RV64-NEXT: vsetvli a0, zero, e64, m8, ta, ma
; RV64-NEXT: vfcvt.x.f.v v8, v8
; RV64-NEXT: ret
%a = call <vscale x 8 x iXLen> @llvm.lrint.nxv8iXLen.nxv8f64(<vscale x 8 x double> %x)
ret <vscale x 8 x iXLen> %a
}
declare <vscale x 8 x iXLen> @llvm.lrint.nxv8iXLen.nxv8f64(<vscale x 8 x double>)

View File

@@ -0,0 +1,290 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc < %s -mtriple=x86_64-unknown | FileCheck %s --check-prefix=X64-SSE
; RUN: llc < %s -mtriple=x86_64-unknown -mattr=avx | FileCheck %s --check-prefix=X64-AVX
; RUN: llc < %s -mtriple=x86_64-unknown -mattr=avx512f | FileCheck %s --check-prefix=X64-AVX
define <1 x i64> @llrint_v1i64_v1f32(<1 x float> %x) {
; X64-SSE-LABEL: llrint_v1i64_v1f32:
; X64-SSE: # %bb.0:
; X64-SSE-NEXT: cvtss2si %xmm0, %rax
; X64-SSE-NEXT: retq
;
; X64-AVX-LABEL: llrint_v1i64_v1f32:
; X64-AVX: # %bb.0:
; X64-AVX-NEXT: vcvtss2si %xmm0, %rax
; X64-AVX-NEXT: retq
%a = call <1 x i64> @llvm.llrint.v1i64.v1f32(<1 x float> %x)
ret <1 x i64> %a
}
declare <1 x i64> @llvm.llrint.v1i64.v1f32(<1 x float>)
define <2 x i64> @llrint_v2i64_v2f32(<2 x float> %x) {
; X64-SSE-LABEL: llrint_v2i64_v2f32:
; X64-SSE: # %bb.0:
; X64-SSE-NEXT: cvtss2si %xmm0, %rax
; X64-SSE-NEXT: movq %rax, %xmm1
; X64-SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1,1,1]
; X64-SSE-NEXT: cvtss2si %xmm0, %rax
; X64-SSE-NEXT: movq %rax, %xmm0
; X64-SSE-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm0[0]
; X64-SSE-NEXT: movdqa %xmm1, %xmm0
; X64-SSE-NEXT: retq
;
; X64-AVX-LABEL: llrint_v2i64_v2f32:
; X64-AVX: # %bb.0:
; X64-AVX-NEXT: vcvtss2si %xmm0, %rax
; X64-AVX-NEXT: vmovq %rax, %xmm1
; X64-AVX-NEXT: vmovshdup {{.*#+}} xmm0 = xmm0[1,1,3,3]
; X64-AVX-NEXT: vcvtss2si %xmm0, %rax
; X64-AVX-NEXT: vmovq %rax, %xmm0
; X64-AVX-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0]
; X64-AVX-NEXT: retq
%a = call <2 x i64> @llvm.llrint.v2i64.v2f32(<2 x float> %x)
ret <2 x i64> %a
}
declare <2 x i64> @llvm.llrint.v2i64.v2f32(<2 x float>)
define <4 x i64> @llrint_v4i64_v4f32(<4 x float> %x) {
; X64-SSE-LABEL: llrint_v4i64_v4f32:
; X64-SSE: # %bb.0:
; X64-SSE-NEXT: cvtss2si %xmm0, %rax
; X64-SSE-NEXT: movq %rax, %xmm2
; X64-SSE-NEXT: movaps %xmm0, %xmm1
; X64-SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,1],xmm0[1,1]
; X64-SSE-NEXT: cvtss2si %xmm1, %rax
; X64-SSE-NEXT: movq %rax, %xmm1
; X64-SSE-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm1[0]
; X64-SSE-NEXT: movaps %xmm0, %xmm1
; X64-SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[3,3],xmm0[3,3]
; X64-SSE-NEXT: cvtss2si %xmm1, %rax
; X64-SSE-NEXT: movq %rax, %xmm3
; X64-SSE-NEXT: movhlps {{.*#+}} xmm0 = xmm0[1,1]
; X64-SSE-NEXT: cvtss2si %xmm0, %rax
; X64-SSE-NEXT: movq %rax, %xmm1
; X64-SSE-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm3[0]
; X64-SSE-NEXT: movdqa %xmm2, %xmm0
; X64-SSE-NEXT: retq
%a = call <4 x i64> @llvm.llrint.v4i64.v4f32(<4 x float> %x)
ret <4 x i64> %a
}
declare <4 x i64> @llvm.llrint.v4i64.v4f32(<4 x float>)
define <8 x i64> @llrint_v8i64_v8f32(<8 x float> %x) {
; X64-SSE-LABEL: llrint_v8i64_v8f32:
; X64-SSE: # %bb.0:
; X64-SSE-NEXT: movaps %xmm0, %xmm2
; X64-SSE-NEXT: cvtss2si %xmm0, %rax
; X64-SSE-NEXT: movq %rax, %xmm0
; X64-SSE-NEXT: movaps %xmm2, %xmm3
; X64-SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[1,1],xmm2[1,1]
; X64-SSE-NEXT: cvtss2si %xmm3, %rax
; X64-SSE-NEXT: movq %rax, %xmm3
; X64-SSE-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm3[0]
; X64-SSE-NEXT: movaps %xmm2, %xmm3
; X64-SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[3,3],xmm2[3,3]
; X64-SSE-NEXT: cvtss2si %xmm3, %rax
; X64-SSE-NEXT: movq %rax, %xmm3
; X64-SSE-NEXT: movhlps {{.*#+}} xmm2 = xmm2[1,1]
; X64-SSE-NEXT: cvtss2si %xmm2, %rax
; X64-SSE-NEXT: movq %rax, %xmm4
; X64-SSE-NEXT: punpcklqdq {{.*#+}} xmm4 = xmm4[0],xmm3[0]
; X64-SSE-NEXT: cvtss2si %xmm1, %rax
; X64-SSE-NEXT: movq %rax, %xmm2
; X64-SSE-NEXT: movaps %xmm1, %xmm3
; X64-SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[1,1],xmm1[1,1]
; X64-SSE-NEXT: cvtss2si %xmm3, %rax
; X64-SSE-NEXT: movq %rax, %xmm3
; X64-SSE-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm3[0]
; X64-SSE-NEXT: movaps %xmm1, %xmm3
; X64-SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[3,3],xmm1[3,3]
; X64-SSE-NEXT: cvtss2si %xmm3, %rax
; X64-SSE-NEXT: movq %rax, %xmm5
; X64-SSE-NEXT: movhlps {{.*#+}} xmm1 = xmm1[1,1]
; X64-SSE-NEXT: cvtss2si %xmm1, %rax
; X64-SSE-NEXT: movq %rax, %xmm3
; X64-SSE-NEXT: punpcklqdq {{.*#+}} xmm3 = xmm3[0],xmm5[0]
; X64-SSE-NEXT: movdqa %xmm4, %xmm1
; X64-SSE-NEXT: retq
%a = call <8 x i64> @llvm.llrint.v8i64.v8f32(<8 x float> %x)
ret <8 x i64> %a
}
declare <8 x i64> @llvm.llrint.v8i64.v8f32(<8 x float>)
define <16 x i64> @llrint_v16i64_v16f32(<16 x float> %x) {
; X64-SSE-LABEL: llrint_v16i64_v16f32:
; X64-SSE: # %bb.0:
; X64-SSE-NEXT: movq %rdi, %rax
; X64-SSE-NEXT: cvtss2si %xmm0, %rcx
; X64-SSE-NEXT: movq %rcx, %xmm4
; X64-SSE-NEXT: movaps %xmm0, %xmm5
; X64-SSE-NEXT: shufps {{.*#+}} xmm5 = xmm5[1,1],xmm0[1,1]
; X64-SSE-NEXT: cvtss2si %xmm5, %rcx
; X64-SSE-NEXT: movq %rcx, %xmm5
; X64-SSE-NEXT: punpcklqdq {{.*#+}} xmm4 = xmm4[0],xmm5[0]
; X64-SSE-NEXT: movaps %xmm0, %xmm5
; X64-SSE-NEXT: shufps {{.*#+}} xmm5 = xmm5[3,3],xmm0[3,3]
; X64-SSE-NEXT: cvtss2si %xmm5, %rcx
; X64-SSE-NEXT: movq %rcx, %xmm5
; X64-SSE-NEXT: movhlps {{.*#+}} xmm0 = xmm0[1,1]
; X64-SSE-NEXT: cvtss2si %xmm0, %rcx
; X64-SSE-NEXT: movq %rcx, %xmm0
; X64-SSE-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm5[0]
; X64-SSE-NEXT: cvtss2si %xmm1, %rcx
; X64-SSE-NEXT: movq %rcx, %xmm5
; X64-SSE-NEXT: movaps %xmm1, %xmm6
; X64-SSE-NEXT: shufps {{.*#+}} xmm6 = xmm6[1,1],xmm1[1,1]
; X64-SSE-NEXT: cvtss2si %xmm6, %rcx
; X64-SSE-NEXT: movq %rcx, %xmm6
; X64-SSE-NEXT: punpcklqdq {{.*#+}} xmm5 = xmm5[0],xmm6[0]
; X64-SSE-NEXT: movaps %xmm1, %xmm6
; X64-SSE-NEXT: shufps {{.*#+}} xmm6 = xmm6[3,3],xmm1[3,3]
; X64-SSE-NEXT: cvtss2si %xmm6, %rcx
; X64-SSE-NEXT: movq %rcx, %xmm6
; X64-SSE-NEXT: movhlps {{.*#+}} xmm1 = xmm1[1,1]
; X64-SSE-NEXT: cvtss2si %xmm1, %rcx
; X64-SSE-NEXT: movq %rcx, %xmm1
; X64-SSE-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm6[0]
; X64-SSE-NEXT: cvtss2si %xmm2, %rcx
; X64-SSE-NEXT: movq %rcx, %xmm6
; X64-SSE-NEXT: movaps %xmm2, %xmm7
; X64-SSE-NEXT: shufps {{.*#+}} xmm7 = xmm7[1,1],xmm2[1,1]
; X64-SSE-NEXT: cvtss2si %xmm7, %rcx
; X64-SSE-NEXT: movq %rcx, %xmm7
; X64-SSE-NEXT: punpcklqdq {{.*#+}} xmm6 = xmm6[0],xmm7[0]
; X64-SSE-NEXT: movaps %xmm2, %xmm7
; X64-SSE-NEXT: shufps {{.*#+}} xmm7 = xmm7[3,3],xmm2[3,3]
; X64-SSE-NEXT: cvtss2si %xmm7, %rcx
; X64-SSE-NEXT: movq %rcx, %xmm7
; X64-SSE-NEXT: movhlps {{.*#+}} xmm2 = xmm2[1,1]
; X64-SSE-NEXT: cvtss2si %xmm2, %rcx
; X64-SSE-NEXT: movq %rcx, %xmm2
; X64-SSE-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm7[0]
; X64-SSE-NEXT: cvtss2si %xmm3, %rcx
; X64-SSE-NEXT: movq %rcx, %xmm7
; X64-SSE-NEXT: movaps %xmm3, %xmm8
; X64-SSE-NEXT: shufps {{.*#+}} xmm8 = xmm8[1,1],xmm3[1,1]
; X64-SSE-NEXT: cvtss2si %xmm8, %rcx
; X64-SSE-NEXT: movq %rcx, %xmm8
; X64-SSE-NEXT: punpcklqdq {{.*#+}} xmm7 = xmm7[0],xmm8[0]
; X64-SSE-NEXT: movaps %xmm3, %xmm8
; X64-SSE-NEXT: shufps {{.*#+}} xmm8 = xmm8[3,3],xmm3[3,3]
; X64-SSE-NEXT: cvtss2si %xmm8, %rcx
; X64-SSE-NEXT: movq %rcx, %xmm8
; X64-SSE-NEXT: movhlps {{.*#+}} xmm3 = xmm3[1,1]
; X64-SSE-NEXT: cvtss2si %xmm3, %rcx
; X64-SSE-NEXT: movq %rcx, %xmm3
; X64-SSE-NEXT: punpcklqdq {{.*#+}} xmm3 = xmm3[0],xmm8[0]
; X64-SSE-NEXT: movdqa %xmm3, 112(%rdi)
; X64-SSE-NEXT: movdqa %xmm7, 96(%rdi)
; X64-SSE-NEXT: movdqa %xmm2, 80(%rdi)
; X64-SSE-NEXT: movdqa %xmm6, 64(%rdi)
; X64-SSE-NEXT: movdqa %xmm1, 48(%rdi)
; X64-SSE-NEXT: movdqa %xmm5, 32(%rdi)
; X64-SSE-NEXT: movdqa %xmm0, 16(%rdi)
; X64-SSE-NEXT: movdqa %xmm4, (%rdi)
; X64-SSE-NEXT: retq
%a = call <16 x i64> @llvm.llrint.v16i64.v16f32(<16 x float> %x)
ret <16 x i64> %a
}
declare <16 x i64> @llvm.llrint.v16i64.v16f32(<16 x float>)
define <1 x i64> @llrint_v1i64_v1f64(<1 x double> %x) {
; X64-SSE-LABEL: llrint_v1i64_v1f64:
; X64-SSE: # %bb.0:
; X64-SSE-NEXT: cvtsd2si %xmm0, %rax
; X64-SSE-NEXT: retq
;
; X64-AVX-LABEL: llrint_v1i64_v1f64:
; X64-AVX: # %bb.0:
; X64-AVX-NEXT: vcvtsd2si %xmm0, %rax
; X64-AVX-NEXT: retq
%a = call <1 x i64> @llvm.llrint.v1i64.v1f64(<1 x double> %x)
ret <1 x i64> %a
}
declare <1 x i64> @llvm.llrint.v1i64.v1f64(<1 x double>)
define <2 x i64> @llrint_v2i64_v2f64(<2 x double> %x) {
; X64-SSE-LABEL: llrint_v2i64_v2f64:
; X64-SSE: # %bb.0:
; X64-SSE-NEXT: cvtsd2si %xmm0, %rax
; X64-SSE-NEXT: movq %rax, %xmm1
; X64-SSE-NEXT: unpckhpd {{.*#+}} xmm0 = xmm0[1,1]
; X64-SSE-NEXT: cvtsd2si %xmm0, %rax
; X64-SSE-NEXT: movq %rax, %xmm0
; X64-SSE-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm0[0]
; X64-SSE-NEXT: movdqa %xmm1, %xmm0
; X64-SSE-NEXT: retq
;
; X64-AVX-LABEL: llrint_v2i64_v2f64:
; X64-AVX: # %bb.0:
; X64-AVX-NEXT: vcvtsd2si %xmm0, %rax
; X64-AVX-NEXT: vmovq %rax, %xmm1
; X64-AVX-NEXT: vshufpd {{.*#+}} xmm0 = xmm0[1,0]
; X64-AVX-NEXT: vcvtsd2si %xmm0, %rax
; X64-AVX-NEXT: vmovq %rax, %xmm0
; X64-AVX-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0]
; X64-AVX-NEXT: retq
%a = call <2 x i64> @llvm.llrint.v2i64.v2f64(<2 x double> %x)
ret <2 x i64> %a
}
declare <2 x i64> @llvm.llrint.v2i64.v2f64(<2 x double>)
define <4 x i64> @llrint_v4i64_v4f64(<4 x double> %x) {
; X64-SSE-LABEL: llrint_v4i64_v4f64:
; X64-SSE: # %bb.0:
; X64-SSE-NEXT: cvtsd2si %xmm0, %rax
; X64-SSE-NEXT: movq %rax, %xmm2
; X64-SSE-NEXT: unpckhpd {{.*#+}} xmm0 = xmm0[1,1]
; X64-SSE-NEXT: cvtsd2si %xmm0, %rax
; X64-SSE-NEXT: movq %rax, %xmm0
; X64-SSE-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm0[0]
; X64-SSE-NEXT: cvtsd2si %xmm1, %rax
; X64-SSE-NEXT: movq %rax, %xmm3
; X64-SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1,1]
; X64-SSE-NEXT: cvtsd2si %xmm1, %rax
; X64-SSE-NEXT: movq %rax, %xmm0
; X64-SSE-NEXT: punpcklqdq {{.*#+}} xmm3 = xmm3[0],xmm0[0]
; X64-SSE-NEXT: movdqa %xmm2, %xmm0
; X64-SSE-NEXT: movdqa %xmm3, %xmm1
; X64-SSE-NEXT: retq
%a = call <4 x i64> @llvm.llrint.v4i64.v4f64(<4 x double> %x)
ret <4 x i64> %a
}
declare <4 x i64> @llvm.llrint.v4i64.v4f64(<4 x double>)
define <8 x i64> @llrint_v8i64_v8f64(<8 x double> %x) {
; X64-SSE-LABEL: llrint_v8i64_v8f64:
; X64-SSE: # %bb.0:
; X64-SSE-NEXT: cvtsd2si %xmm0, %rax
; X64-SSE-NEXT: movq %rax, %xmm4
; X64-SSE-NEXT: unpckhpd {{.*#+}} xmm0 = xmm0[1,1]
; X64-SSE-NEXT: cvtsd2si %xmm0, %rax
; X64-SSE-NEXT: movq %rax, %xmm0
; X64-SSE-NEXT: punpcklqdq {{.*#+}} xmm4 = xmm4[0],xmm0[0]
; X64-SSE-NEXT: cvtsd2si %xmm1, %rax
; X64-SSE-NEXT: movq %rax, %xmm5
; X64-SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1,1]
; X64-SSE-NEXT: cvtsd2si %xmm1, %rax
; X64-SSE-NEXT: movq %rax, %xmm0
; X64-SSE-NEXT: punpcklqdq {{.*#+}} xmm5 = xmm5[0],xmm0[0]
; X64-SSE-NEXT: cvtsd2si %xmm2, %rax
; X64-SSE-NEXT: movq %rax, %xmm6
; X64-SSE-NEXT: unpckhpd {{.*#+}} xmm2 = xmm2[1,1]
; X64-SSE-NEXT: cvtsd2si %xmm2, %rax
; X64-SSE-NEXT: movq %rax, %xmm0
; X64-SSE-NEXT: punpcklqdq {{.*#+}} xmm6 = xmm6[0],xmm0[0]
; X64-SSE-NEXT: cvtsd2si %xmm3, %rax
; X64-SSE-NEXT: movq %rax, %xmm7
; X64-SSE-NEXT: unpckhpd {{.*#+}} xmm3 = xmm3[1,1]
; X64-SSE-NEXT: cvtsd2si %xmm3, %rax
; X64-SSE-NEXT: movq %rax, %xmm0
; X64-SSE-NEXT: punpcklqdq {{.*#+}} xmm7 = xmm7[0],xmm0[0]
; X64-SSE-NEXT: movdqa %xmm4, %xmm0
; X64-SSE-NEXT: movdqa %xmm5, %xmm1
; X64-SSE-NEXT: movdqa %xmm6, %xmm2
; X64-SSE-NEXT: movdqa %xmm7, %xmm3
; X64-SSE-NEXT: retq
%a = call <8 x i64> @llvm.llrint.v8i64.v8f64(<8 x double> %x)
ret <8 x i64> %a
}
declare <8 x i64> @llvm.llrint.v8i64.v8f64(<8 x double>)

View File

@@ -0,0 +1,429 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=i686-unknown -mattr=sse2 | FileCheck %s --check-prefix=X86-SSE2
; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=i686-unknown -mattr=avx | FileCheck %s --check-prefix=X86-AVX
; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=i686-unknown -mattr=avx512f | FileCheck %s --check-prefix=X86-AVX
; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=x86_64-unknown | FileCheck %s --check-prefix=X64-SSE
; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=x86_64-unknown -mattr=avx | FileCheck %s --check-prefix=X64-AVX
; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=x86_64-unknown -mattr=avx512f | FileCheck %s --check-prefix=X64-AVX
; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=x86_64-unknown | FileCheck %s --check-prefix=X64-SSE
; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=x86_64-unknown -mattr=avx | FileCheck %s --check-prefix=X64-AVX
; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=x86_64-unknown -mattr=avx512f | FileCheck %s --check-prefix=X64-AVX
define <1 x iXLen> @lrint_v1f32(<1 x float> %x) {
; X86-SSE2-LABEL: lrint_v1f32:
; X86-SSE2: # %bb.0:
; X86-SSE2-NEXT: cvtss2si {{[0-9]+}}(%esp), %eax
; X86-SSE2-NEXT: retl
;
; X86-AVX-LABEL: lrint_v1f32:
; X86-AVX: # %bb.0:
; X86-AVX-NEXT: vcvtss2si {{[0-9]+}}(%esp), %eax
; X86-AVX-NEXT: retl
%a = call <1 x iXLen> @llvm.lrint.v1iXLen.v1f32(<1 x float> %x)
ret <1 x iXLen> %a
}
declare <1 x iXLen> @llvm.lrint.v1iXLen.v1f32(<1 x float>)
define <2 x iXLen> @lrint_v2f32(<2 x float> %x) {
; X86-SSE2-LABEL: lrint_v2f32:
; X86-SSE2: # %bb.0:
; X86-SSE2-NEXT: movaps %xmm0, %xmm1
; X86-SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[3,3],xmm0[3,3]
; X86-SSE2-NEXT: cvtss2si %xmm1, %eax
; X86-SSE2-NEXT: movd %eax, %xmm1
; X86-SSE2-NEXT: movaps %xmm0, %xmm2
; X86-SSE2-NEXT: unpckhpd {{.*#+}} xmm2 = xmm2[1],xmm0[1]
; X86-SSE2-NEXT: cvtss2si %xmm2, %eax
; X86-SSE2-NEXT: movd %eax, %xmm2
; X86-SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
; X86-SSE2-NEXT: cvtss2si %xmm0, %eax
; X86-SSE2-NEXT: movd %eax, %xmm1
; X86-SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1,1,1]
; X86-SSE2-NEXT: cvtss2si %xmm0, %eax
; X86-SSE2-NEXT: movd %eax, %xmm0
; X86-SSE2-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
; X86-SSE2-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0]
; X86-SSE2-NEXT: movdqa %xmm1, %xmm0
; X86-SSE2-NEXT: retl
;
; X86-AVX-LABEL: lrint_v2f32:
; X86-AVX: # %bb.0:
; X86-AVX-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
; X86-AVX-NEXT: vcvtss2si %xmm1, %eax
; X86-AVX-NEXT: vcvtss2si %xmm0, %ecx
; X86-AVX-NEXT: vmovd %ecx, %xmm1
; X86-AVX-NEXT: vpinsrd $1, %eax, %xmm1, %xmm1
; X86-AVX-NEXT: vshufpd {{.*#+}} xmm2 = xmm0[1,0]
; X86-AVX-NEXT: vcvtss2si %xmm2, %eax
; X86-AVX-NEXT: vpinsrd $2, %eax, %xmm1, %xmm1
; X86-AVX-NEXT: vshufps {{.*#+}} xmm0 = xmm0[3,3,3,3]
; X86-AVX-NEXT: vcvtss2si %xmm0, %eax
; X86-AVX-NEXT: vpinsrd $3, %eax, %xmm1, %xmm0
; X86-AVX-NEXT: retl
%a = call <2 x iXLen> @llvm.lrint.v2iXLen.v2f32(<2 x float> %x)
ret <2 x iXLen> %a
}
declare <2 x iXLen> @llvm.lrint.v2iXLen.v2f32(<2 x float>)
define <4 x iXLen> @lrint_v4f32(<4 x float> %x) {
; X86-SSE2-LABEL: lrint_v4f32:
; X86-SSE2: # %bb.0:
; X86-SSE2-NEXT: movaps %xmm0, %xmm1
; X86-SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[3,3],xmm0[3,3]
; X86-SSE2-NEXT: cvtss2si %xmm1, %eax
; X86-SSE2-NEXT: movd %eax, %xmm1
; X86-SSE2-NEXT: movaps %xmm0, %xmm2
; X86-SSE2-NEXT: unpckhpd {{.*#+}} xmm2 = xmm2[1],xmm0[1]
; X86-SSE2-NEXT: cvtss2si %xmm2, %eax
; X86-SSE2-NEXT: movd %eax, %xmm2
; X86-SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
; X86-SSE2-NEXT: cvtss2si %xmm0, %eax
; X86-SSE2-NEXT: movd %eax, %xmm1
; X86-SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1,1,1]
; X86-SSE2-NEXT: cvtss2si %xmm0, %eax
; X86-SSE2-NEXT: movd %eax, %xmm0
; X86-SSE2-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
; X86-SSE2-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0]
; X86-SSE2-NEXT: movdqa %xmm1, %xmm0
; X86-SSE2-NEXT: retl
;
; X86-AVX-LABEL: lrint_v4f32:
; X86-AVX: # %bb.0:
; X86-AVX-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
; X86-AVX-NEXT: vcvtss2si %xmm1, %eax
; X86-AVX-NEXT: vcvtss2si %xmm0, %ecx
; X86-AVX-NEXT: vmovd %ecx, %xmm1
; X86-AVX-NEXT: vpinsrd $1, %eax, %xmm1, %xmm1
; X86-AVX-NEXT: vshufpd {{.*#+}} xmm2 = xmm0[1,0]
; X86-AVX-NEXT: vcvtss2si %xmm2, %eax
; X86-AVX-NEXT: vpinsrd $2, %eax, %xmm1, %xmm1
; X86-AVX-NEXT: vshufps {{.*#+}} xmm0 = xmm0[3,3,3,3]
; X86-AVX-NEXT: vcvtss2si %xmm0, %eax
; X86-AVX-NEXT: vpinsrd $3, %eax, %xmm1, %xmm0
; X86-AVX-NEXT: retl
%a = call <4 x iXLen> @llvm.lrint.v4iXLen.v4f32(<4 x float> %x)
ret <4 x iXLen> %a
}
declare <4 x iXLen> @llvm.lrint.v4iXLen.v4f32(<4 x float>)
define <8 x iXLen> @lrint_v8f32(<8 x float> %x) {
; X86-SSE2-LABEL: lrint_v8f32:
; X86-SSE2: # %bb.0:
; X86-SSE2-NEXT: movaps %xmm0, %xmm2
; X86-SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,3,3,3]
; X86-SSE2-NEXT: cvtss2si %xmm0, %eax
; X86-SSE2-NEXT: movd %eax, %xmm0
; X86-SSE2-NEXT: movaps %xmm2, %xmm3
; X86-SSE2-NEXT: unpckhpd {{.*#+}} xmm3 = xmm3[1],xmm2[1]
; X86-SSE2-NEXT: cvtss2si %xmm3, %eax
; X86-SSE2-NEXT: movd %eax, %xmm3
; X86-SSE2-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1]
; X86-SSE2-NEXT: cvtss2si %xmm2, %eax
; X86-SSE2-NEXT: movd %eax, %xmm0
; X86-SSE2-NEXT: shufps {{.*#+}} xmm2 = xmm2[1,1,1,1]
; X86-SSE2-NEXT: cvtss2si %xmm2, %eax
; X86-SSE2-NEXT: movd %eax, %xmm2
; X86-SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
; X86-SSE2-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm3[0]
; X86-SSE2-NEXT: movaps %xmm1, %xmm2
; X86-SSE2-NEXT: shufps {{.*#+}} xmm2 = xmm2[3,3],xmm1[3,3]
; X86-SSE2-NEXT: cvtss2si %xmm2, %eax
; X86-SSE2-NEXT: movd %eax, %xmm2
; X86-SSE2-NEXT: movaps %xmm1, %xmm3
; X86-SSE2-NEXT: unpckhpd {{.*#+}} xmm3 = xmm3[1],xmm1[1]
; X86-SSE2-NEXT: cvtss2si %xmm3, %eax
; X86-SSE2-NEXT: movd %eax, %xmm3
; X86-SSE2-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1]
; X86-SSE2-NEXT: cvtss2si %xmm1, %eax
; X86-SSE2-NEXT: movd %eax, %xmm2
; X86-SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,1,1,1]
; X86-SSE2-NEXT: cvtss2si %xmm1, %eax
; X86-SSE2-NEXT: movd %eax, %xmm1
; X86-SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
; X86-SSE2-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm3[0]
; X86-SSE2-NEXT: movdqa %xmm2, %xmm1
; X86-SSE2-NEXT: retl
%a = call <8 x iXLen> @llvm.lrint.v8iXLen.v8f32(<8 x float> %x)
ret <8 x iXLen> %a
}
declare <8 x iXLen> @llvm.lrint.v8iXLen.v8f32(<8 x float>)
define <16 x iXLen> @lrint_v16iXLen_v16f32(<16 x float> %x) {
%a = call <16 x iXLen> @llvm.lrint.v16iXLen.v16f32(<16 x float> %x)
ret <16 x iXLen> %a
}
declare <16 x iXLen> @llvm.lrint.v16iXLen.v16f32(<16 x float>)
define <1 x i64> @lrint_v1f64(<1 x double> %x) {
; X86-SSE2-LABEL: lrint_v1f64:
; X86-SSE2: # %bb.0:
; X86-SSE2-NEXT: pushl %ebp
; X86-SSE2-NEXT: .cfi_def_cfa_offset 8
; X86-SSE2-NEXT: .cfi_offset %ebp, -8
; X86-SSE2-NEXT: movl %esp, %ebp
; X86-SSE2-NEXT: .cfi_def_cfa_register %ebp
; X86-SSE2-NEXT: andl $-8, %esp
; X86-SSE2-NEXT: subl $8, %esp
; X86-SSE2-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; X86-SSE2-NEXT: movsd %xmm0, (%esp)
; X86-SSE2-NEXT: fldl (%esp)
; X86-SSE2-NEXT: fistpll (%esp)
; X86-SSE2-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
; X86-SSE2-NEXT: movd %xmm0, %eax
; X86-SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,1,1]
; X86-SSE2-NEXT: movd %xmm0, %edx
; X86-SSE2-NEXT: movl %ebp, %esp
; X86-SSE2-NEXT: popl %ebp
; X86-SSE2-NEXT: .cfi_def_cfa %esp, 4
; X86-SSE2-NEXT: retl
;
; X86-AVX-LABEL: lrint_v1f64:
; X86-AVX: # %bb.0:
; X86-AVX-NEXT: pushl %ebp
; X86-AVX-NEXT: .cfi_def_cfa_offset 8
; X86-AVX-NEXT: .cfi_offset %ebp, -8
; X86-AVX-NEXT: movl %esp, %ebp
; X86-AVX-NEXT: .cfi_def_cfa_register %ebp
; X86-AVX-NEXT: andl $-8, %esp
; X86-AVX-NEXT: subl $8, %esp
; X86-AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; X86-AVX-NEXT: vmovsd %xmm0, (%esp)
; X86-AVX-NEXT: fldl (%esp)
; X86-AVX-NEXT: fistpll (%esp)
; X86-AVX-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero
; X86-AVX-NEXT: vmovd %xmm0, %eax
; X86-AVX-NEXT: vpextrd $1, %xmm0, %edx
; X86-AVX-NEXT: movl %ebp, %esp
; X86-AVX-NEXT: popl %ebp
; X86-AVX-NEXT: .cfi_def_cfa %esp, 4
; X86-AVX-NEXT: retl
;
; X64-SSE-LABEL: lrint_v1f64:
; X64-SSE: # %bb.0:
; X64-SSE-NEXT: cvtsd2si %xmm0, %rax
; X64-SSE-NEXT: retq
;
; X64-AVX-LABEL: lrint_v1f64:
; X64-AVX: # %bb.0:
; X64-AVX-NEXT: vcvtsd2si %xmm0, %rax
; X64-AVX-NEXT: retq
%a = call <1 x i64> @llvm.lrint.v1i64.v1f64(<1 x double> %x)
ret <1 x i64> %a
}
declare <1 x i64> @llvm.lrint.v1i64.v1f64(<1 x double>)
define <2 x i64> @lrint_v2f64(<2 x double> %x) {
; X86-SSE2-LABEL: lrint_v2f64:
; X86-SSE2: # %bb.0:
; X86-SSE2-NEXT: pushl %ebp
; X86-SSE2-NEXT: .cfi_def_cfa_offset 8
; X86-SSE2-NEXT: .cfi_offset %ebp, -8
; X86-SSE2-NEXT: movl %esp, %ebp
; X86-SSE2-NEXT: .cfi_def_cfa_register %ebp
; X86-SSE2-NEXT: andl $-8, %esp
; X86-SSE2-NEXT: subl $16, %esp
; X86-SSE2-NEXT: movhps %xmm0, (%esp)
; X86-SSE2-NEXT: movlps %xmm0, {{[0-9]+}}(%esp)
; X86-SSE2-NEXT: fldl (%esp)
; X86-SSE2-NEXT: fistpll (%esp)
; X86-SSE2-NEXT: fldl {{[0-9]+}}(%esp)
; X86-SSE2-NEXT: fistpll {{[0-9]+}}(%esp)
; X86-SSE2-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
; X86-SSE2-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; X86-SSE2-NEXT: movlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
; X86-SSE2-NEXT: movl %ebp, %esp
; X86-SSE2-NEXT: popl %ebp
; X86-SSE2-NEXT: .cfi_def_cfa %esp, 4
; X86-SSE2-NEXT: retl
;
; X86-AVX-LABEL: lrint_v2f64:
; X86-AVX: # %bb.0:
; X86-AVX-NEXT: pushl %ebp
; X86-AVX-NEXT: .cfi_def_cfa_offset 8
; X86-AVX-NEXT: .cfi_offset %ebp, -8
; X86-AVX-NEXT: movl %esp, %ebp
; X86-AVX-NEXT: .cfi_def_cfa_register %ebp
; X86-AVX-NEXT: andl $-8, %esp
; X86-AVX-NEXT: subl $16, %esp
; X86-AVX-NEXT: vmovlps %xmm0, {{[0-9]+}}(%esp)
; X86-AVX-NEXT: vmovhps %xmm0, (%esp)
; X86-AVX-NEXT: fldl {{[0-9]+}}(%esp)
; X86-AVX-NEXT: fistpll {{[0-9]+}}(%esp)
; X86-AVX-NEXT: fldl (%esp)
; X86-AVX-NEXT: fistpll (%esp)
; X86-AVX-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
; X86-AVX-NEXT: vpinsrd $1, {{[0-9]+}}(%esp), %xmm0, %xmm0
; X86-AVX-NEXT: vpinsrd $2, (%esp), %xmm0, %xmm0
; X86-AVX-NEXT: vpinsrd $3, {{[0-9]+}}(%esp), %xmm0, %xmm0
; X86-AVX-NEXT: movl %ebp, %esp
; X86-AVX-NEXT: popl %ebp
; X86-AVX-NEXT: .cfi_def_cfa %esp, 4
; X86-AVX-NEXT: retl
;
; X64-SSE-LABEL: lrint_v2f64:
; X64-SSE: # %bb.0:
; X64-SSE-NEXT: cvtsd2si %xmm0, %rax
; X64-SSE-NEXT: movq %rax, %xmm1
; X64-SSE-NEXT: unpckhpd {{.*#+}} xmm0 = xmm0[1,1]
; X64-SSE-NEXT: cvtsd2si %xmm0, %rax
; X64-SSE-NEXT: movq %rax, %xmm0
; X64-SSE-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm0[0]
; X64-SSE-NEXT: movdqa %xmm1, %xmm0
; X64-SSE-NEXT: retq
;
; X64-AVX-LABEL: lrint_v2f64:
; X64-AVX: # %bb.0:
; X64-AVX-NEXT: vcvtsd2si %xmm0, %rax
; X64-AVX-NEXT: vmovq %rax, %xmm1
; X64-AVX-NEXT: vshufpd {{.*#+}} xmm0 = xmm0[1,0]
; X64-AVX-NEXT: vcvtsd2si %xmm0, %rax
; X64-AVX-NEXT: vmovq %rax, %xmm0
; X64-AVX-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0]
; X64-AVX-NEXT: retq
%a = call <2 x i64> @llvm.lrint.v2i64.v2f64(<2 x double> %x)
ret <2 x i64> %a
}
declare <2 x i64> @llvm.lrint.v2i64.v2f64(<2 x double>)
define <4 x i64> @lrint_v4f64(<4 x double> %x) {
; X86-SSE2-LABEL: lrint_v4f64:
; X86-SSE2: # %bb.0:
; X86-SSE2-NEXT: pushl %ebp
; X86-SSE2-NEXT: .cfi_def_cfa_offset 8
; X86-SSE2-NEXT: .cfi_offset %ebp, -8
; X86-SSE2-NEXT: movl %esp, %ebp
; X86-SSE2-NEXT: .cfi_def_cfa_register %ebp
; X86-SSE2-NEXT: andl $-8, %esp
; X86-SSE2-NEXT: subl $32, %esp
; X86-SSE2-NEXT: movhps %xmm0, {{[0-9]+}}(%esp)
; X86-SSE2-NEXT: movlps %xmm0, {{[0-9]+}}(%esp)
; X86-SSE2-NEXT: movhps %xmm1, (%esp)
; X86-SSE2-NEXT: movlps %xmm1, {{[0-9]+}}(%esp)
; X86-SSE2-NEXT: fldl {{[0-9]+}}(%esp)
; X86-SSE2-NEXT: fistpll {{[0-9]+}}(%esp)
; X86-SSE2-NEXT: fldl {{[0-9]+}}(%esp)
; X86-SSE2-NEXT: fistpll {{[0-9]+}}(%esp)
; X86-SSE2-NEXT: fldl (%esp)
; X86-SSE2-NEXT: fistpll (%esp)
; X86-SSE2-NEXT: fldl {{[0-9]+}}(%esp)
; X86-SSE2-NEXT: fistpll {{[0-9]+}}(%esp)
; X86-SSE2-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
; X86-SSE2-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; X86-SSE2-NEXT: movlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
; X86-SSE2-NEXT: movsd {{.*#+}} xmm2 = mem[0],zero
; X86-SSE2-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
; X86-SSE2-NEXT: movlhps {{.*#+}} xmm1 = xmm1[0],xmm2[0]
; X86-SSE2-NEXT: movl %ebp, %esp
; X86-SSE2-NEXT: popl %ebp
; X86-SSE2-NEXT: .cfi_def_cfa %esp, 4
; X86-SSE2-NEXT: retl
;
; X64-SSE-LABEL: lrint_v4f64:
; X64-SSE: # %bb.0:
; X64-SSE-NEXT: cvtsd2si %xmm0, %rax
; X64-SSE-NEXT: movq %rax, %xmm2
; X64-SSE-NEXT: unpckhpd {{.*#+}} xmm0 = xmm0[1,1]
; X64-SSE-NEXT: cvtsd2si %xmm0, %rax
; X64-SSE-NEXT: movq %rax, %xmm0
; X64-SSE-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm0[0]
; X64-SSE-NEXT: cvtsd2si %xmm1, %rax
; X64-SSE-NEXT: movq %rax, %xmm3
; X64-SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1,1]
; X64-SSE-NEXT: cvtsd2si %xmm1, %rax
; X64-SSE-NEXT: movq %rax, %xmm0
; X64-SSE-NEXT: punpcklqdq {{.*#+}} xmm3 = xmm3[0],xmm0[0]
; X64-SSE-NEXT: movdqa %xmm2, %xmm0
; X64-SSE-NEXT: movdqa %xmm3, %xmm1
; X64-SSE-NEXT: retq
%a = call <4 x i64> @llvm.lrint.v4i64.v4f64(<4 x double> %x)
ret <4 x i64> %a
}
declare <4 x i64> @llvm.lrint.v4i64.v4f64(<4 x double>)
define <8 x i64> @lrint_v8f64(<8 x double> %x) {
; X86-SSE2-LABEL: lrint_v8f64:
; X86-SSE2: # %bb.0:
; X86-SSE2-NEXT: pushl %ebp
; X86-SSE2-NEXT: .cfi_def_cfa_offset 8
; X86-SSE2-NEXT: .cfi_offset %ebp, -8
; X86-SSE2-NEXT: movl %esp, %ebp
; X86-SSE2-NEXT: .cfi_def_cfa_register %ebp
; X86-SSE2-NEXT: andl $-16, %esp
; X86-SSE2-NEXT: subl $80, %esp
; X86-SSE2-NEXT: movaps 8(%ebp), %xmm3
; X86-SSE2-NEXT: movhps %xmm0, {{[0-9]+}}(%esp)
; X86-SSE2-NEXT: movlps %xmm0, {{[0-9]+}}(%esp)
; X86-SSE2-NEXT: movhps %xmm1, {{[0-9]+}}(%esp)
; X86-SSE2-NEXT: movlps %xmm1, {{[0-9]+}}(%esp)
; X86-SSE2-NEXT: movhps %xmm2, {{[0-9]+}}(%esp)
; X86-SSE2-NEXT: movlps %xmm2, {{[0-9]+}}(%esp)
; X86-SSE2-NEXT: movhps %xmm3, {{[0-9]+}}(%esp)
; X86-SSE2-NEXT: movlps %xmm3, {{[0-9]+}}(%esp)
; X86-SSE2-NEXT: fldl {{[0-9]+}}(%esp)
; X86-SSE2-NEXT: fistpll {{[0-9]+}}(%esp)
; X86-SSE2-NEXT: fldl {{[0-9]+}}(%esp)
; X86-SSE2-NEXT: fistpll {{[0-9]+}}(%esp)
; X86-SSE2-NEXT: fldl {{[0-9]+}}(%esp)
; X86-SSE2-NEXT: fistpll {{[0-9]+}}(%esp)
; X86-SSE2-NEXT: fldl {{[0-9]+}}(%esp)
; X86-SSE2-NEXT: fistpll {{[0-9]+}}(%esp)
; X86-SSE2-NEXT: fldl {{[0-9]+}}(%esp)
; X86-SSE2-NEXT: fistpll {{[0-9]+}}(%esp)
; X86-SSE2-NEXT: fldl {{[0-9]+}}(%esp)
; X86-SSE2-NEXT: fistpll {{[0-9]+}}(%esp)
; X86-SSE2-NEXT: fldl {{[0-9]+}}(%esp)
; X86-SSE2-NEXT: fistpll {{[0-9]+}}(%esp)
; X86-SSE2-NEXT: fldl {{[0-9]+}}(%esp)
; X86-SSE2-NEXT: fistpll {{[0-9]+}}(%esp)
; X86-SSE2-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
; X86-SSE2-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; X86-SSE2-NEXT: movlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
; X86-SSE2-NEXT: movsd {{.*#+}} xmm2 = mem[0],zero
; X86-SSE2-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
; X86-SSE2-NEXT: movlhps {{.*#+}} xmm1 = xmm1[0],xmm2[0]
; X86-SSE2-NEXT: movsd {{.*#+}} xmm3 = mem[0],zero
; X86-SSE2-NEXT: movsd {{.*#+}} xmm2 = mem[0],zero
; X86-SSE2-NEXT: movlhps {{.*#+}} xmm2 = xmm2[0],xmm3[0]
; X86-SSE2-NEXT: movsd {{.*#+}} xmm4 = mem[0],zero
; X86-SSE2-NEXT: movsd {{.*#+}} xmm3 = mem[0],zero
; X86-SSE2-NEXT: movlhps {{.*#+}} xmm3 = xmm3[0],xmm4[0]
; X86-SSE2-NEXT: movl %ebp, %esp
; X86-SSE2-NEXT: popl %ebp
; X86-SSE2-NEXT: .cfi_def_cfa %esp, 4
; X86-SSE2-NEXT: retl
;
; X64-SSE-LABEL: lrint_v8f64:
; X64-SSE: # %bb.0:
; X64-SSE-NEXT: cvtsd2si %xmm0, %rax
; X64-SSE-NEXT: movq %rax, %xmm4
; X64-SSE-NEXT: unpckhpd {{.*#+}} xmm0 = xmm0[1,1]
; X64-SSE-NEXT: cvtsd2si %xmm0, %rax
; X64-SSE-NEXT: movq %rax, %xmm0
; X64-SSE-NEXT: punpcklqdq {{.*#+}} xmm4 = xmm4[0],xmm0[0]
; X64-SSE-NEXT: cvtsd2si %xmm1, %rax
; X64-SSE-NEXT: movq %rax, %xmm5
; X64-SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1,1]
; X64-SSE-NEXT: cvtsd2si %xmm1, %rax
; X64-SSE-NEXT: movq %rax, %xmm0
; X64-SSE-NEXT: punpcklqdq {{.*#+}} xmm5 = xmm5[0],xmm0[0]
; X64-SSE-NEXT: cvtsd2si %xmm2, %rax
; X64-SSE-NEXT: movq %rax, %xmm6
; X64-SSE-NEXT: unpckhpd {{.*#+}} xmm2 = xmm2[1,1]
; X64-SSE-NEXT: cvtsd2si %xmm2, %rax
; X64-SSE-NEXT: movq %rax, %xmm0
; X64-SSE-NEXT: punpcklqdq {{.*#+}} xmm6 = xmm6[0],xmm0[0]
; X64-SSE-NEXT: cvtsd2si %xmm3, %rax
; X64-SSE-NEXT: movq %rax, %xmm7
; X64-SSE-NEXT: unpckhpd {{.*#+}} xmm3 = xmm3[1,1]
; X64-SSE-NEXT: cvtsd2si %xmm3, %rax
; X64-SSE-NEXT: movq %rax, %xmm0
; X64-SSE-NEXT: punpcklqdq {{.*#+}} xmm7 = xmm7[0],xmm0[0]
; X64-SSE-NEXT: movdqa %xmm4, %xmm0
; X64-SSE-NEXT: movdqa %xmm5, %xmm1
; X64-SSE-NEXT: movdqa %xmm6, %xmm2
; X64-SSE-NEXT: movdqa %xmm7, %xmm3
; X64-SSE-NEXT: retq
%a = call <8 x i64> @llvm.lrint.v8i64.v8f64(<8 x double> %x)
ret <8 x i64> %a
}
declare <8 x i64> @llvm.lrint.v8i64.v8f64(<8 x double>)