[ARM] Add neon vector support for round
As per #142559, this marks fround as legal for Neon and upgrades the existing arm.neon.vrinta intrinsics.
This commit is contained in:
@@ -839,8 +839,8 @@ static const ARMVectorIntrinsicInfo ARMSIMDIntrinsicMap [] = {
|
||||
NEONMAP2(vrhadd_v, arm_neon_vrhaddu, arm_neon_vrhadds, Add1ArgType | UnsignedAlts),
|
||||
NEONMAP2(vrhaddq_v, arm_neon_vrhaddu, arm_neon_vrhadds, Add1ArgType | UnsignedAlts),
|
||||
NEONMAP1(vrnd_v, arm_neon_vrintz, Add1ArgType),
|
||||
NEONMAP1(vrnda_v, arm_neon_vrinta, Add1ArgType),
|
||||
NEONMAP1(vrndaq_v, arm_neon_vrinta, Add1ArgType),
|
||||
NEONMAP1(vrnda_v, round, Add1ArgType),
|
||||
NEONMAP1(vrndaq_v, round, Add1ArgType),
|
||||
NEONMAP0(vrndi_v),
|
||||
NEONMAP0(vrndiq_v),
|
||||
NEONMAP1(vrndm_v, floor, Add1ArgType),
|
||||
|
||||
@@ -16,7 +16,7 @@
|
||||
// CHECK-A32-NEXT: [[TMP0:%.*]] = bitcast <2 x float> [[A]] to <2 x i32>
|
||||
// CHECK-A32-NEXT: [[TMP1:%.*]] = bitcast <2 x i32> [[TMP0]] to <8 x i8>
|
||||
// CHECK-A32-NEXT: [[VRNDA_V_I:%.*]] = bitcast <8 x i8> [[TMP1]] to <2 x float>
|
||||
// CHECK-A32-NEXT: [[VRNDA_V1_I:%.*]] = call <2 x float> @llvm.arm.neon.vrinta.v2f32(<2 x float> [[VRNDA_V_I]])
|
||||
// CHECK-A32-NEXT: [[VRNDA_V1_I:%.*]] = call <2 x float> @llvm.round.v2f32(<2 x float> [[VRNDA_V_I]])
|
||||
// CHECK-A32-NEXT: [[VRNDA_V2_I:%.*]] = bitcast <2 x float> [[VRNDA_V1_I]] to <8 x i8>
|
||||
// CHECK-A32-NEXT: [[TMP2:%.*]] = bitcast <8 x i8> [[VRNDA_V2_I]] to <2 x i32>
|
||||
// CHECK-A32-NEXT: [[TMP3:%.*]] = bitcast <2 x i32> [[TMP2]] to <2 x float>
|
||||
@@ -41,7 +41,7 @@ float32x2_t test_vrnda_f32(float32x2_t a) {
|
||||
// CHECK-A32-NEXT: [[TMP0:%.*]] = bitcast <4 x float> [[A]] to <4 x i32>
|
||||
// CHECK-A32-NEXT: [[TMP1:%.*]] = bitcast <4 x i32> [[TMP0]] to <16 x i8>
|
||||
// CHECK-A32-NEXT: [[VRNDAQ_V_I:%.*]] = bitcast <16 x i8> [[TMP1]] to <4 x float>
|
||||
// CHECK-A32-NEXT: [[VRNDAQ_V1_I:%.*]] = call <4 x float> @llvm.arm.neon.vrinta.v4f32(<4 x float> [[VRNDAQ_V_I]])
|
||||
// CHECK-A32-NEXT: [[VRNDAQ_V1_I:%.*]] = call <4 x float> @llvm.round.v4f32(<4 x float> [[VRNDAQ_V_I]])
|
||||
// CHECK-A32-NEXT: [[VRNDAQ_V2_I:%.*]] = bitcast <4 x float> [[VRNDAQ_V1_I]] to <16 x i8>
|
||||
// CHECK-A32-NEXT: [[TMP2:%.*]] = bitcast <16 x i8> [[VRNDAQ_V2_I]] to <4 x i32>
|
||||
// CHECK-A32-NEXT: [[TMP3:%.*]] = bitcast <4 x i32> [[TMP2]] to <4 x float>
|
||||
|
||||
@@ -554,7 +554,7 @@ float16x8_t test_vrndq_f16(float16x8_t a) {
|
||||
// CHECK-NEXT: [[TMP0:%.*]] = bitcast <4 x half> [[A]] to <4 x i16>
|
||||
// CHECK-NEXT: [[TMP1:%.*]] = bitcast <4 x i16> [[TMP0]] to <8 x i8>
|
||||
// CHECK-NEXT: [[VRNDA_V_I:%.*]] = bitcast <8 x i8> [[TMP1]] to <4 x half>
|
||||
// CHECK-NEXT: [[VRNDA_V1_I:%.*]] = call <4 x half> @llvm.arm.neon.vrinta.v4f16(<4 x half> [[VRNDA_V_I]])
|
||||
// CHECK-NEXT: [[VRNDA_V1_I:%.*]] = call <4 x half> @llvm.round.v4f16(<4 x half> [[VRNDA_V_I]])
|
||||
// CHECK-NEXT: [[VRNDA_V2_I:%.*]] = bitcast <4 x half> [[VRNDA_V1_I]] to <8 x i8>
|
||||
// CHECK-NEXT: [[TMP2:%.*]] = bitcast <8 x i8> [[VRNDA_V2_I]] to <4 x i16>
|
||||
// CHECK-NEXT: [[TMP3:%.*]] = bitcast <4 x i16> [[TMP2]] to <4 x half>
|
||||
@@ -570,7 +570,7 @@ float16x4_t test_vrnda_f16(float16x4_t a) {
|
||||
// CHECK-NEXT: [[TMP0:%.*]] = bitcast <8 x half> [[A]] to <8 x i16>
|
||||
// CHECK-NEXT: [[TMP1:%.*]] = bitcast <8 x i16> [[TMP0]] to <16 x i8>
|
||||
// CHECK-NEXT: [[VRNDAQ_V_I:%.*]] = bitcast <16 x i8> [[TMP1]] to <8 x half>
|
||||
// CHECK-NEXT: [[VRNDAQ_V1_I:%.*]] = call <8 x half> @llvm.arm.neon.vrinta.v8f16(<8 x half> [[VRNDAQ_V_I]])
|
||||
// CHECK-NEXT: [[VRNDAQ_V1_I:%.*]] = call <8 x half> @llvm.round.v8f16(<8 x half> [[VRNDAQ_V_I]])
|
||||
// CHECK-NEXT: [[VRNDAQ_V2_I:%.*]] = bitcast <8 x half> [[VRNDAQ_V1_I]] to <16 x i8>
|
||||
// CHECK-NEXT: [[TMP2:%.*]] = bitcast <16 x i8> [[VRNDAQ_V2_I]] to <8 x i16>
|
||||
// CHECK-NEXT: [[TMP3:%.*]] = bitcast <8 x i16> [[TMP2]] to <8 x half>
|
||||
|
||||
@@ -680,7 +680,6 @@ def int_arm_neon_vtbx4 : Neon_Tbl6Arg_Intrinsic;
|
||||
// Vector and Scalar Rounding.
|
||||
def int_arm_neon_vrintn : Neon_1FloatArg_Intrinsic;
|
||||
def int_arm_neon_vrintx : Neon_1Arg_Intrinsic;
|
||||
def int_arm_neon_vrinta : Neon_1Arg_Intrinsic;
|
||||
def int_arm_neon_vrintz : Neon_1Arg_Intrinsic;
|
||||
def int_arm_neon_vrintp : Neon_1Arg_Intrinsic;
|
||||
|
||||
|
||||
@@ -720,6 +720,7 @@ static bool upgradeArmOrAarch64IntrinsicFunction(bool IsArm, Function *F,
|
||||
.StartsWith("vqsubs.", Intrinsic::ssub_sat)
|
||||
.StartsWith("vqsubu.", Intrinsic::usub_sat)
|
||||
.StartsWith("vrintm.", Intrinsic::floor)
|
||||
.StartsWith("vrinta.", Intrinsic::round)
|
||||
.Default(Intrinsic::not_intrinsic);
|
||||
if (ID != Intrinsic::not_intrinsic) {
|
||||
NewFn = Intrinsic::getOrInsertDeclaration(F->getParent(), ID,
|
||||
|
||||
@@ -1546,6 +1546,8 @@ ARMTargetLowering::ARMTargetLowering(const TargetMachine &TM_,
|
||||
if (Subtarget->hasV8Ops()) {
|
||||
setOperationAction(ISD::FFLOOR, MVT::v2f32, Legal);
|
||||
setOperationAction(ISD::FFLOOR, MVT::v4f32, Legal);
|
||||
setOperationAction(ISD::FROUND, MVT::v2f32, Legal);
|
||||
setOperationAction(ISD::FROUND, MVT::v4f32, Legal);
|
||||
}
|
||||
|
||||
if (Subtarget->hasFullFP16()) {
|
||||
@@ -1561,6 +1563,8 @@ ARMTargetLowering::ARMTargetLowering(const TargetMachine &TM_,
|
||||
|
||||
setOperationAction(ISD::FFLOOR, MVT::v4f16, Legal);
|
||||
setOperationAction(ISD::FFLOOR, MVT::v8f16, Legal);
|
||||
setOperationAction(ISD::FROUND, MVT::v4f16, Legal);
|
||||
setOperationAction(ISD::FROUND, MVT::v8f16, Legal);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -7314,7 +7314,7 @@ multiclass VRINT_FPI<string op, bits<3> op9_7, SDPatternOperator Int> {
|
||||
|
||||
defm VRINTNN : VRINT_FPI<"n", 0b000, int_arm_neon_vrintn>;
|
||||
defm VRINTXN : VRINT_FPI<"x", 0b001, int_arm_neon_vrintx>;
|
||||
defm VRINTAN : VRINT_FPI<"a", 0b010, int_arm_neon_vrinta>;
|
||||
defm VRINTAN : VRINT_FPI<"a", 0b010, fround>;
|
||||
defm VRINTZN : VRINT_FPI<"z", 0b011, int_arm_neon_vrintz>;
|
||||
defm VRINTMN : VRINT_FPI<"m", 0b101, ffloor>;
|
||||
defm VRINTPN : VRINT_FPI<"p", 0b111, int_arm_neon_vrintp>;
|
||||
|
||||
@@ -79,21 +79,7 @@ define <4 x half> @frinta_4h(<4 x half> %A) nounwind {
|
||||
;
|
||||
; CHECK-FP16-LABEL: frinta_4h:
|
||||
; CHECK-FP16: @ %bb.0:
|
||||
; CHECK-FP16-NEXT: vmovx.f16 s2, s0
|
||||
; CHECK-FP16-NEXT: vrinta.f16 s2, s2
|
||||
; CHECK-FP16-NEXT: vmov r0, s2
|
||||
; CHECK-FP16-NEXT: vrinta.f16 s2, s0
|
||||
; CHECK-FP16-NEXT: vmov r1, s2
|
||||
; CHECK-FP16-NEXT: vrinta.f16 s2, s1
|
||||
; CHECK-FP16-NEXT: vmovx.f16 s0, s1
|
||||
; CHECK-FP16-NEXT: vrinta.f16 s0, s0
|
||||
; CHECK-FP16-NEXT: vmov.16 d16[0], r1
|
||||
; CHECK-FP16-NEXT: vmov.16 d16[1], r0
|
||||
; CHECK-FP16-NEXT: vmov r0, s2
|
||||
; CHECK-FP16-NEXT: vmov.16 d16[2], r0
|
||||
; CHECK-FP16-NEXT: vmov r0, s0
|
||||
; CHECK-FP16-NEXT: vmov.16 d16[3], r0
|
||||
; CHECK-FP16-NEXT: vorr d0, d16, d16
|
||||
; CHECK-FP16-NEXT: vrinta.f16 d0, d0
|
||||
; CHECK-FP16-NEXT: bx lr
|
||||
%tmp3 = call <4 x half> @llvm.round.v4f16(<4 x half> %A)
|
||||
ret <4 x half> %tmp3
|
||||
@@ -243,35 +229,7 @@ define <8 x half> @frinta_8h(<8 x half> %A) nounwind {
|
||||
;
|
||||
; CHECK-FP16-LABEL: frinta_8h:
|
||||
; CHECK-FP16: @ %bb.0:
|
||||
; CHECK-FP16-NEXT: vmovx.f16 s4, s2
|
||||
; CHECK-FP16-NEXT: vrinta.f16 s4, s4
|
||||
; CHECK-FP16-NEXT: vmov r0, s4
|
||||
; CHECK-FP16-NEXT: vrinta.f16 s4, s2
|
||||
; CHECK-FP16-NEXT: vmov r1, s4
|
||||
; CHECK-FP16-NEXT: vrinta.f16 s4, s3
|
||||
; CHECK-FP16-NEXT: vmov.16 d17[0], r1
|
||||
; CHECK-FP16-NEXT: vmov.16 d17[1], r0
|
||||
; CHECK-FP16-NEXT: vmov r0, s4
|
||||
; CHECK-FP16-NEXT: vmovx.f16 s4, s3
|
||||
; CHECK-FP16-NEXT: vrinta.f16 s4, s4
|
||||
; CHECK-FP16-NEXT: vmov.16 d17[2], r0
|
||||
; CHECK-FP16-NEXT: vmov r0, s4
|
||||
; CHECK-FP16-NEXT: vmovx.f16 s4, s0
|
||||
; CHECK-FP16-NEXT: vrinta.f16 s4, s4
|
||||
; CHECK-FP16-NEXT: vmov.16 d17[3], r0
|
||||
; CHECK-FP16-NEXT: vmov r0, s4
|
||||
; CHECK-FP16-NEXT: vrinta.f16 s4, s0
|
||||
; CHECK-FP16-NEXT: vmovx.f16 s0, s1
|
||||
; CHECK-FP16-NEXT: vmov r1, s4
|
||||
; CHECK-FP16-NEXT: vrinta.f16 s4, s1
|
||||
; CHECK-FP16-NEXT: vrinta.f16 s0, s0
|
||||
; CHECK-FP16-NEXT: vmov.16 d16[0], r1
|
||||
; CHECK-FP16-NEXT: vmov.16 d16[1], r0
|
||||
; CHECK-FP16-NEXT: vmov r0, s4
|
||||
; CHECK-FP16-NEXT: vmov.16 d16[2], r0
|
||||
; CHECK-FP16-NEXT: vmov r0, s0
|
||||
; CHECK-FP16-NEXT: vmov.16 d16[3], r0
|
||||
; CHECK-FP16-NEXT: vorr q0, q8, q8
|
||||
; CHECK-FP16-NEXT: vrinta.f16 q0, q0
|
||||
; CHECK-FP16-NEXT: bx lr
|
||||
%tmp3 = call <8 x half> @llvm.round.v8f16(<8 x half> %A)
|
||||
ret <8 x half> %tmp3
|
||||
@@ -297,9 +255,7 @@ define <2 x float> @frinta_2s(<2 x float> %A) nounwind {
|
||||
;
|
||||
; CHECK-LABEL: frinta_2s:
|
||||
; CHECK: @ %bb.0:
|
||||
; CHECK-NEXT: vrinta.f32 s3, s1
|
||||
; CHECK-NEXT: vrinta.f32 s2, s0
|
||||
; CHECK-NEXT: vmov.f64 d0, d1
|
||||
; CHECK-NEXT: vrinta.f32 d0, d0
|
||||
; CHECK-NEXT: bx lr
|
||||
%tmp3 = call <2 x float> @llvm.round.v2f32(<2 x float> %A)
|
||||
ret <2 x float> %tmp3
|
||||
@@ -331,11 +287,7 @@ define <4 x float> @frinta_4s(<4 x float> %A) nounwind {
|
||||
;
|
||||
; CHECK-LABEL: frinta_4s:
|
||||
; CHECK: @ %bb.0:
|
||||
; CHECK-NEXT: vrinta.f32 s7, s3
|
||||
; CHECK-NEXT: vrinta.f32 s6, s2
|
||||
; CHECK-NEXT: vrinta.f32 s5, s1
|
||||
; CHECK-NEXT: vrinta.f32 s4, s0
|
||||
; CHECK-NEXT: vorr q0, q1, q1
|
||||
; CHECK-NEXT: vrinta.f32 q0, q0
|
||||
; CHECK-NEXT: bx lr
|
||||
%tmp3 = call <4 x float> @llvm.round.v4f32(<4 x float> %A)
|
||||
ret <4 x float> %tmp3
|
||||
|
||||
Reference in New Issue
Block a user