From 4e3781607cd12eb337298ee6d16ebecde4ce5741 Mon Sep 17 00:00:00 2001 From: David Green Date: Thu, 19 Sep 2024 08:32:23 +0100 Subject: [PATCH] [ARM][MVE] Add vector tests for ucmp/scmp. NFC --- llvm/test/CodeGen/Thumb2/mve-scmp.ll | 344 +++++++++++++++++++++++++++ llvm/test/CodeGen/Thumb2/mve-ucmp.ll | 343 ++++++++++++++++++++++++++ 2 files changed, 687 insertions(+) create mode 100644 llvm/test/CodeGen/Thumb2/mve-scmp.ll create mode 100644 llvm/test/CodeGen/Thumb2/mve-ucmp.ll diff --git a/llvm/test/CodeGen/Thumb2/mve-scmp.ll b/llvm/test/CodeGen/Thumb2/mve-scmp.ll new file mode 100644 index 000000000000..23462384eca9 --- /dev/null +++ b/llvm/test/CodeGen/Thumb2/mve-scmp.ll @@ -0,0 +1,344 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 +; RUN: llc -mtriple=thumbv8.1m.main-none-none-eabi -mattr=+mve.fp %s -o - | FileCheck %s + +define arm_aapcs_vfpcc <8 x i8> @s_v8i8(<8 x i8> %a, <8 x i8> %b) { +; CHECK-LABEL: s_v8i8: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vmovlb.s8 q1, q1 +; CHECK-NEXT: vmovlb.s8 q0, q0 +; CHECK-NEXT: vmov.i32 q2, #0x0 +; CHECK-NEXT: vmov.i16 q3, #0x1 +; CHECK-NEXT: vcmp.s16 gt, q0, q1 +; CHECK-NEXT: vpsel q2, q3, q2 +; CHECK-NEXT: vmov.i8 q3, #0xff +; CHECK-NEXT: vcmp.s16 gt, q1, q0 +; CHECK-NEXT: vpsel q0, q3, q2 +; CHECK-NEXT: bx lr +entry: + %c = call <8 x i8> @llvm.scmp(<8 x i8> %a, <8 x i8> %b) + ret <8 x i8> %c +} + +define arm_aapcs_vfpcc <16 x i8> @s_v16i8(<16 x i8> %a, <16 x i8> %b) { +; CHECK-LABEL: s_v16i8: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vmov.i32 q2, #0x0 +; CHECK-NEXT: vmov.i8 q3, #0x1 +; CHECK-NEXT: vcmp.s8 gt, q0, q1 +; CHECK-NEXT: vpsel q2, q3, q2 +; CHECK-NEXT: vmov.i8 q3, #0xff +; CHECK-NEXT: vcmp.s8 gt, q1, q0 +; CHECK-NEXT: vpsel q0, q3, q2 +; CHECK-NEXT: bx lr +entry: + %c = call <16 x i8> @llvm.scmp(<16 x i8> %a, <16 x i8> %b) + ret <16 x i8> %c +} + +define arm_aapcs_vfpcc <4 x i16> @s_v4i16(<4 x i16> %a, <4 x i16> %b) { +; CHECK-LABEL: s_v4i16: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vmovlb.s16 q1, q1 +; CHECK-NEXT: vmovlb.s16 q0, q0 +; CHECK-NEXT: vmov.i32 q2, #0x0 +; CHECK-NEXT: vmov.i32 q3, #0x1 +; CHECK-NEXT: vcmp.s32 gt, q0, q1 +; CHECK-NEXT: vpsel q2, q3, q2 +; CHECK-NEXT: vmov.i8 q3, #0xff +; CHECK-NEXT: vcmp.s32 gt, q1, q0 +; CHECK-NEXT: vpsel q0, q3, q2 +; CHECK-NEXT: bx lr +entry: + %c = call <4 x i16> @llvm.scmp(<4 x i16> %a, <4 x i16> %b) + ret <4 x i16> %c +} + +define arm_aapcs_vfpcc <8 x i16> @s_v8i16(<8 x i16> %a, <8 x i16> %b) { +; CHECK-LABEL: s_v8i16: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vmov.i32 q2, #0x0 +; CHECK-NEXT: vmov.i16 q3, #0x1 +; CHECK-NEXT: vcmp.s16 gt, q0, q1 +; CHECK-NEXT: vpsel q2, q3, q2 +; CHECK-NEXT: vmov.i8 q3, #0xff +; CHECK-NEXT: vcmp.s16 gt, q1, q0 +; CHECK-NEXT: vpsel q0, q3, q2 +; CHECK-NEXT: bx lr +entry: + %c = call <8 x i16> @llvm.scmp(<8 x i16> %a, <8 x i16> %b) + ret <8 x i16> %c +} + +define arm_aapcs_vfpcc <16 x i16> @s_v16i16(<16 x i16> %a, <16 x i16> %b) { +; CHECK-LABEL: s_v16i16: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: .vsave {d8, d9, d10, d11, d12, d13, d14, d15} +; CHECK-NEXT: vpush {d8, d9, d10, d11, d12, d13, d14, d15} +; CHECK-NEXT: vmov.i32 q4, #0x0 +; CHECK-NEXT: vmov.i16 q5, #0x1 +; CHECK-NEXT: vcmp.s16 gt, q0, q2 +; CHECK-NEXT: vmov.i8 q7, #0xff +; CHECK-NEXT: vpsel q6, q5, q4 +; CHECK-NEXT: vcmp.s16 gt, q2, q0 +; CHECK-NEXT: vpsel q0, q7, q6 +; CHECK-NEXT: vcmp.s16 gt, q1, q3 +; CHECK-NEXT: vpsel q2, q5, q4 +; CHECK-NEXT: vcmp.s16 gt, q3, q1 +; CHECK-NEXT: vpsel q1, q7, q2 +; CHECK-NEXT: vpop {d8, d9, d10, d11, d12, d13, d14, d15} +; CHECK-NEXT: bx lr +entry: + %c = call <16 x i16> @llvm.scmp(<16 x i16> %a, <16 x i16> %b) + ret <16 x i16> %c +} + +define arm_aapcs_vfpcc <2 x i32> @s_v2i32(<2 x i32> %a, <2 x i32> %b) { +; CHECK-LABEL: s_v2i32: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: .save {r4, r5, r6, lr} +; CHECK-NEXT: push {r4, r5, r6, lr} +; CHECK-NEXT: vmov r2, s4 +; CHECK-NEXT: adr.w r12, .LCPI5_0 +; CHECK-NEXT: vmov r1, s0 +; CHECK-NEXT: vldrw.u32 q3, [r12] +; CHECK-NEXT: vmov r0, s6 +; CHECK-NEXT: movs r6, #0 +; CHECK-NEXT: vmov r4, s2 +; CHECK-NEXT: vmov.i32 q2, #0x0 +; CHECK-NEXT: vmov.i8 q1, #0xff +; CHECK-NEXT: subs r3, r2, r1 +; CHECK-NEXT: asr.w lr, r2, #31 +; CHECK-NEXT: sbcs.w r3, lr, r1, asr #31 +; CHECK-NEXT: csetm r12, lt +; CHECK-NEXT: movs r3, #0 +; CHECK-NEXT: subs r5, r0, r4 +; CHECK-NEXT: bfi r3, r12, #0, #8 +; CHECK-NEXT: asr.w r12, r0, #31 +; CHECK-NEXT: sbcs.w r5, r12, r4, asr #31 +; CHECK-NEXT: csetm r5, lt +; CHECK-NEXT: bfi r3, r5, #8, #8 +; CHECK-NEXT: vmsr p0, r3 +; CHECK-NEXT: asrs r3, r1, #31 +; CHECK-NEXT: subs r1, r1, r2 +; CHECK-NEXT: vpsel q0, q3, q2 +; CHECK-NEXT: sbcs.w r1, r3, r2, asr #31 +; CHECK-NEXT: csetm r1, lt +; CHECK-NEXT: subs r2, r4, r0 +; CHECK-NEXT: bfi r6, r1, #0, #8 +; CHECK-NEXT: asr.w r1, r4, #31 +; CHECK-NEXT: sbcs.w r0, r1, r0, asr #31 +; CHECK-NEXT: csetm r0, lt +; CHECK-NEXT: bfi r6, r0, #8, #8 +; CHECK-NEXT: vmsr p0, r6 +; CHECK-NEXT: vpsel q0, q1, q0 +; CHECK-NEXT: pop {r4, r5, r6, pc} +; CHECK-NEXT: .p2align 4 +; CHECK-NEXT: @ %bb.1: +; CHECK-NEXT: .LCPI5_0: +; CHECK-NEXT: .long 1 @ 0x1 +; CHECK-NEXT: .long 0 @ 0x0 +; CHECK-NEXT: .long 1 @ 0x1 +; CHECK-NEXT: .long 0 @ 0x0 +entry: + %c = call <2 x i32> @llvm.scmp(<2 x i32> %a, <2 x i32> %b) + ret <2 x i32> %c +} + +define arm_aapcs_vfpcc <4 x i32> @s_v4i32(<4 x i32> %a, <4 x i32> %b) { +; CHECK-LABEL: s_v4i32: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vmov.i32 q2, #0x0 +; CHECK-NEXT: vmov.i32 q3, #0x1 +; CHECK-NEXT: vcmp.s32 gt, q0, q1 +; CHECK-NEXT: vpsel q2, q3, q2 +; CHECK-NEXT: vmov.i8 q3, #0xff +; CHECK-NEXT: vcmp.s32 gt, q1, q0 +; CHECK-NEXT: vpsel q0, q3, q2 +; CHECK-NEXT: bx lr +entry: + %c = call <4 x i32> @llvm.scmp(<4 x i32> %a, <4 x i32> %b) + ret <4 x i32> %c +} + +define arm_aapcs_vfpcc <8 x i32> @s_v8i32(<8 x i32> %a, <8 x i32> %b) { +; CHECK-LABEL: s_v8i32: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: .vsave {d8, d9, d10, d11, d12, d13, d14, d15} +; CHECK-NEXT: vpush {d8, d9, d10, d11, d12, d13, d14, d15} +; CHECK-NEXT: vmov.i32 q4, #0x0 +; CHECK-NEXT: vmov.i32 q5, #0x1 +; CHECK-NEXT: vcmp.s32 gt, q0, q2 +; CHECK-NEXT: vmov.i8 q7, #0xff +; CHECK-NEXT: vpsel q6, q5, q4 +; CHECK-NEXT: vcmp.s32 gt, q2, q0 +; CHECK-NEXT: vpsel q0, q7, q6 +; CHECK-NEXT: vcmp.s32 gt, q1, q3 +; CHECK-NEXT: vpsel q2, q5, q4 +; CHECK-NEXT: vcmp.s32 gt, q3, q1 +; CHECK-NEXT: vpsel q1, q7, q2 +; CHECK-NEXT: vpop {d8, d9, d10, d11, d12, d13, d14, d15} +; CHECK-NEXT: bx lr +entry: + %c = call <8 x i32> @llvm.scmp(<8 x i32> %a, <8 x i32> %b) + ret <8 x i32> %c +} + +define arm_aapcs_vfpcc <2 x i64> @s_v2i64(<2 x i64> %a, <2 x i64> %b) { +; CHECK-LABEL: s_v2i64: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: .save {r4, r5, r6, r7, r8, lr} +; CHECK-NEXT: push.w {r4, r5, r6, r7, r8, lr} +; CHECK-NEXT: vmov lr, r12, d0 +; CHECK-NEXT: movs r4, #0 +; CHECK-NEXT: vmov r3, r8, d2 +; CHECK-NEXT: movs r0, #0 +; CHECK-NEXT: vmov r6, r7, d3 +; CHECK-NEXT: vmov.i32 q1, #0x0 +; CHECK-NEXT: subs.w r1, r3, lr +; CHECK-NEXT: sbcs.w r1, r8, r12 +; CHECK-NEXT: csetm r1, lt +; CHECK-NEXT: bfi r4, r1, #0, #8 +; CHECK-NEXT: vmov r1, r5, d1 +; CHECK-NEXT: subs r2, r6, r1 +; CHECK-NEXT: sbcs.w r2, r7, r5 +; CHECK-NEXT: csetm r2, lt +; CHECK-NEXT: bfi r4, r2, #8, #8 +; CHECK-NEXT: adr r2, .LCPI8_0 +; CHECK-NEXT: vldrw.u32 q0, [r2] +; CHECK-NEXT: subs.w r2, lr, r3 +; CHECK-NEXT: sbcs.w r2, r12, r8 +; CHECK-NEXT: vmsr p0, r4 +; CHECK-NEXT: csetm r2, lt +; CHECK-NEXT: subs r1, r1, r6 +; CHECK-NEXT: sbcs.w r1, r5, r7 +; CHECK-NEXT: bfi r0, r2, #0, #8 +; CHECK-NEXT: csetm r1, lt +; CHECK-NEXT: vpsel q0, q0, q1 +; CHECK-NEXT: bfi r0, r1, #8, #8 +; CHECK-NEXT: vmov.i8 q1, #0xff +; CHECK-NEXT: vmsr p0, r0 +; CHECK-NEXT: vpsel q0, q1, q0 +; CHECK-NEXT: pop.w {r4, r5, r6, r7, r8, pc} +; CHECK-NEXT: .p2align 4 +; CHECK-NEXT: @ %bb.1: +; CHECK-NEXT: .LCPI8_0: +; CHECK-NEXT: .long 1 @ 0x1 +; CHECK-NEXT: .long 0 @ 0x0 +; CHECK-NEXT: .long 1 @ 0x1 +; CHECK-NEXT: .long 0 @ 0x0 +entry: + %c = call <2 x i64> @llvm.scmp(<2 x i64> %a, <2 x i64> %b) + ret <2 x i64> %c +} + +define arm_aapcs_vfpcc <4 x i64> @s_v4i64(<4 x i64> %a, <4 x i64> %b) { +; CHECK-LABEL: s_v4i64: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: .save {r4, r5, r6, r7, r8, r9, lr} +; CHECK-NEXT: push.w {r4, r5, r6, r7, r8, r9, lr} +; CHECK-NEXT: .pad #4 +; CHECK-NEXT: sub sp, #4 +; CHECK-NEXT: .vsave {d8, d9, d10, d11} +; CHECK-NEXT: vpush {d8, d9, d10, d11} +; CHECK-NEXT: vmov lr, r12, d0 +; CHECK-NEXT: movs r4, #0 +; CHECK-NEXT: vmov r3, r8, d4 +; CHECK-NEXT: vmov.i32 q5, #0x0 +; CHECK-NEXT: vmov r6, r7, d5 +; CHECK-NEXT: mov.w r9, #0 +; CHECK-NEXT: vmov.i8 q2, #0xff +; CHECK-NEXT: subs.w r1, r3, lr +; CHECK-NEXT: sbcs.w r1, r8, r12 +; CHECK-NEXT: csetm r1, lt +; CHECK-NEXT: bfi r4, r1, #0, #8 +; CHECK-NEXT: vmov r1, r5, d1 +; CHECK-NEXT: subs r2, r6, r1 +; CHECK-NEXT: sbcs.w r2, r7, r5 +; CHECK-NEXT: csetm r2, lt +; CHECK-NEXT: bfi r4, r2, #8, #8 +; CHECK-NEXT: adr r2, .LCPI9_0 +; CHECK-NEXT: vldrw.u32 q4, [r2] +; CHECK-NEXT: subs.w r2, lr, r3 +; CHECK-NEXT: sbcs.w r2, r12, r8 +; CHECK-NEXT: mov.w r3, #0 +; CHECK-NEXT: csetm r2, lt +; CHECK-NEXT: subs r1, r1, r6 +; CHECK-NEXT: sbcs.w r1, r5, r7 +; CHECK-NEXT: bfi r3, r2, #0, #8 +; CHECK-NEXT: csetm r1, lt +; CHECK-NEXT: vmsr p0, r4 +; CHECK-NEXT: bfi r3, r1, #8, #8 +; CHECK-NEXT: vpsel q0, q4, q5 +; CHECK-NEXT: vmsr p0, r3 +; CHECK-NEXT: vmov lr, r12, d2 +; CHECK-NEXT: vmov r3, r7, d6 +; CHECK-NEXT: movs r5, #0 +; CHECK-NEXT: vmov r2, r1, d7 +; CHECK-NEXT: vpsel q0, q2, q0 +; CHECK-NEXT: subs.w r6, r3, lr +; CHECK-NEXT: sbcs.w r6, r7, r12 +; CHECK-NEXT: csetm r6, lt +; CHECK-NEXT: bfi r5, r6, #0, #8 +; CHECK-NEXT: vmov r6, r4, d3 +; CHECK-NEXT: subs r0, r2, r6 +; CHECK-NEXT: sbcs.w r0, r1, r4 +; CHECK-NEXT: csetm r0, lt +; CHECK-NEXT: bfi r5, r0, #8, #8 +; CHECK-NEXT: subs.w r0, lr, r3 +; CHECK-NEXT: sbcs.w r0, r12, r7 +; CHECK-NEXT: vmsr p0, r5 +; CHECK-NEXT: csetm r0, lt +; CHECK-NEXT: vpsel q1, q4, q5 +; CHECK-NEXT: bfi r9, r0, #0, #8 +; CHECK-NEXT: subs r0, r6, r2 +; CHECK-NEXT: sbcs.w r0, r4, r1 +; CHECK-NEXT: csetm r0, lt +; CHECK-NEXT: bfi r9, r0, #8, #8 +; CHECK-NEXT: vmsr p0, r9 +; CHECK-NEXT: vpsel q1, q2, q1 +; CHECK-NEXT: vpop {d8, d9, d10, d11} +; CHECK-NEXT: add sp, #4 +; CHECK-NEXT: pop.w {r4, r5, r6, r7, r8, r9, pc} +; CHECK-NEXT: .p2align 4 +; CHECK-NEXT: @ %bb.1: +; CHECK-NEXT: .LCPI9_0: +; CHECK-NEXT: .long 1 @ 0x1 +; CHECK-NEXT: .long 0 @ 0x0 +; CHECK-NEXT: .long 1 @ 0x1 +; CHECK-NEXT: .long 0 @ 0x0 +entry: + %c = call <4 x i64> @llvm.scmp(<4 x i64> %a, <4 x i64> %b) + ret <4 x i64> %c +} + +define arm_aapcs_vfpcc <16 x i8> @signOf_neon(<8 x i16> %s0_lo, <8 x i16> %s0_hi, <8 x i16> %s1_lo, <8 x i16> %s1_hi) { +; CHECK-LABEL: signOf_neon: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: .vsave {d8, d9, d10, d11, d12, d13, d14, d15} +; CHECK-NEXT: vpush {d8, d9, d10, d11, d12, d13, d14, d15} +; CHECK-NEXT: .pad #16 +; CHECK-NEXT: sub sp, #16 +; CHECK-NEXT: vmov.i32 q4, #0x0 +; CHECK-NEXT: vmov.i16 q5, #0x1 +; CHECK-NEXT: vcmp.s16 gt, q1, q3 +; CHECK-NEXT: vmov.i8 q7, #0xff +; CHECK-NEXT: vpsel q6, q5, q4 +; CHECK-NEXT: vcmp.s16 gt, q3, q1 +; CHECK-NEXT: vpsel q1, q7, q6 +; CHECK-NEXT: vcmp.s16 gt, q0, q2 +; CHECK-NEXT: vpsel q3, q5, q4 +; CHECK-NEXT: vcmp.s16 gt, q2, q0 +; CHECK-NEXT: mov r0, sp +; CHECK-NEXT: vpsel q0, q7, q3 +; CHECK-NEXT: vstrb.16 q1, [r0, #8] +; CHECK-NEXT: vstrb.16 q0, [r0] +; CHECK-NEXT: vldrw.u32 q0, [r0] +; CHECK-NEXT: add sp, #16 +; CHECK-NEXT: vpop {d8, d9, d10, d11, d12, d13, d14, d15} +; CHECK-NEXT: bx lr +entry: + %0 = shufflevector <8 x i16> %s0_lo, <8 x i16> %s0_hi, <16 x i32> + %1 = shufflevector <8 x i16> %s1_lo, <8 x i16> %s1_hi, <16 x i32> + %or.i = tail call <16 x i8> @llvm.scmp.v16i8.v16i16(<16 x i16> %0, <16 x i16> %1) + ret <16 x i8> %or.i +} diff --git a/llvm/test/CodeGen/Thumb2/mve-ucmp.ll b/llvm/test/CodeGen/Thumb2/mve-ucmp.ll new file mode 100644 index 000000000000..92dc9a01d211 --- /dev/null +++ b/llvm/test/CodeGen/Thumb2/mve-ucmp.ll @@ -0,0 +1,343 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 +; RUN: llc -mtriple=thumbv8.1m.main-none-none-eabi -mattr=+mve.fp %s -o - | FileCheck %s + +define arm_aapcs_vfpcc <8 x i8> @u_v8i8(<8 x i8> %a, <8 x i8> %b) { +; CHECK-LABEL: u_v8i8: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vmovlb.u8 q1, q1 +; CHECK-NEXT: vmovlb.u8 q0, q0 +; CHECK-NEXT: vmov.i32 q2, #0x0 +; CHECK-NEXT: vmov.i16 q3, #0x1 +; CHECK-NEXT: vcmp.u16 hi, q0, q1 +; CHECK-NEXT: vpsel q2, q3, q2 +; CHECK-NEXT: vmov.i8 q3, #0xff +; CHECK-NEXT: vcmp.u16 hi, q1, q0 +; CHECK-NEXT: vpsel q0, q3, q2 +; CHECK-NEXT: bx lr +entry: + %c = call <8 x i8> @llvm.ucmp(<8 x i8> %a, <8 x i8> %b) + ret <8 x i8> %c +} + +define arm_aapcs_vfpcc <16 x i8> @u_v16i8(<16 x i8> %a, <16 x i8> %b) { +; CHECK-LABEL: u_v16i8: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vmov.i32 q2, #0x0 +; CHECK-NEXT: vmov.i8 q3, #0x1 +; CHECK-NEXT: vcmp.u8 hi, q0, q1 +; CHECK-NEXT: vpsel q2, q3, q2 +; CHECK-NEXT: vmov.i8 q3, #0xff +; CHECK-NEXT: vcmp.u8 hi, q1, q0 +; CHECK-NEXT: vpsel q0, q3, q2 +; CHECK-NEXT: bx lr +entry: + %c = call <16 x i8> @llvm.ucmp(<16 x i8> %a, <16 x i8> %b) + ret <16 x i8> %c +} + +define arm_aapcs_vfpcc <4 x i16> @u_v4i16(<4 x i16> %a, <4 x i16> %b) { +; CHECK-LABEL: u_v4i16: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vmovlb.u16 q1, q1 +; CHECK-NEXT: vmovlb.u16 q0, q0 +; CHECK-NEXT: vmov.i32 q2, #0x0 +; CHECK-NEXT: vmov.i32 q3, #0x1 +; CHECK-NEXT: vcmp.u32 hi, q0, q1 +; CHECK-NEXT: vpsel q2, q3, q2 +; CHECK-NEXT: vmov.i8 q3, #0xff +; CHECK-NEXT: vcmp.u32 hi, q1, q0 +; CHECK-NEXT: vpsel q0, q3, q2 +; CHECK-NEXT: bx lr +entry: + %c = call <4 x i16> @llvm.ucmp(<4 x i16> %a, <4 x i16> %b) + ret <4 x i16> %c +} + +define arm_aapcs_vfpcc <8 x i16> @u_v8i16(<8 x i16> %a, <8 x i16> %b) { +; CHECK-LABEL: u_v8i16: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vmov.i32 q2, #0x0 +; CHECK-NEXT: vmov.i16 q3, #0x1 +; CHECK-NEXT: vcmp.u16 hi, q0, q1 +; CHECK-NEXT: vpsel q2, q3, q2 +; CHECK-NEXT: vmov.i8 q3, #0xff +; CHECK-NEXT: vcmp.u16 hi, q1, q0 +; CHECK-NEXT: vpsel q0, q3, q2 +; CHECK-NEXT: bx lr +entry: + %c = call <8 x i16> @llvm.ucmp(<8 x i16> %a, <8 x i16> %b) + ret <8 x i16> %c +} + +define arm_aapcs_vfpcc <16 x i16> @u_v16i16(<16 x i16> %a, <16 x i16> %b) { +; CHECK-LABEL: u_v16i16: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: .vsave {d8, d9, d10, d11, d12, d13, d14, d15} +; CHECK-NEXT: vpush {d8, d9, d10, d11, d12, d13, d14, d15} +; CHECK-NEXT: vmov.i32 q4, #0x0 +; CHECK-NEXT: vmov.i16 q5, #0x1 +; CHECK-NEXT: vcmp.u16 hi, q0, q2 +; CHECK-NEXT: vmov.i8 q7, #0xff +; CHECK-NEXT: vpsel q6, q5, q4 +; CHECK-NEXT: vcmp.u16 hi, q2, q0 +; CHECK-NEXT: vpsel q0, q7, q6 +; CHECK-NEXT: vcmp.u16 hi, q1, q3 +; CHECK-NEXT: vpsel q2, q5, q4 +; CHECK-NEXT: vcmp.u16 hi, q3, q1 +; CHECK-NEXT: vpsel q1, q7, q2 +; CHECK-NEXT: vpop {d8, d9, d10, d11, d12, d13, d14, d15} +; CHECK-NEXT: bx lr +entry: + %c = call <16 x i16> @llvm.ucmp(<16 x i16> %a, <16 x i16> %b) + ret <16 x i16> %c +} + +define arm_aapcs_vfpcc <2 x i32> @u_v2i32(<2 x i32> %a, <2 x i32> %b) { +; CHECK-LABEL: u_v2i32: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: .save {r4, r5, r6, r7, r8, lr} +; CHECK-NEXT: push.w {r4, r5, r6, r7, r8, lr} +; CHECK-NEXT: vmov.i64 q2, #0xffffffff +; CHECK-NEXT: movs r4, #0 +; CHECK-NEXT: vand q1, q1, q2 +; CHECK-NEXT: vand q0, q0, q2 +; CHECK-NEXT: vmov lr, r12, d0 +; CHECK-NEXT: movs r0, #0 +; CHECK-NEXT: vmov r3, r8, d2 +; CHECK-NEXT: vmov r6, r7, d3 +; CHECK-NEXT: vmov.i32 q1, #0x0 +; CHECK-NEXT: subs.w r1, r3, lr +; CHECK-NEXT: sbcs.w r1, r8, r12 +; CHECK-NEXT: csetm r1, lo +; CHECK-NEXT: bfi r4, r1, #0, #8 +; CHECK-NEXT: vmov r1, r5, d1 +; CHECK-NEXT: subs r2, r6, r1 +; CHECK-NEXT: sbcs.w r2, r7, r5 +; CHECK-NEXT: csetm r2, lo +; CHECK-NEXT: bfi r4, r2, #8, #8 +; CHECK-NEXT: adr r2, .LCPI5_0 +; CHECK-NEXT: vldrw.u32 q0, [r2] +; CHECK-NEXT: subs.w r2, lr, r3 +; CHECK-NEXT: sbcs.w r2, r12, r8 +; CHECK-NEXT: vmsr p0, r4 +; CHECK-NEXT: csetm r2, lo +; CHECK-NEXT: subs r1, r1, r6 +; CHECK-NEXT: sbcs.w r1, r5, r7 +; CHECK-NEXT: bfi r0, r2, #0, #8 +; CHECK-NEXT: csetm r1, lo +; CHECK-NEXT: vpsel q0, q0, q1 +; CHECK-NEXT: bfi r0, r1, #8, #8 +; CHECK-NEXT: vmov.i8 q1, #0xff +; CHECK-NEXT: vmsr p0, r0 +; CHECK-NEXT: vpsel q0, q1, q0 +; CHECK-NEXT: pop.w {r4, r5, r6, r7, r8, pc} +; CHECK-NEXT: .p2align 4 +; CHECK-NEXT: @ %bb.1: +; CHECK-NEXT: .LCPI5_0: +; CHECK-NEXT: .long 1 @ 0x1 +; CHECK-NEXT: .long 0 @ 0x0 +; CHECK-NEXT: .long 1 @ 0x1 +; CHECK-NEXT: .long 0 @ 0x0 +entry: + %c = call <2 x i32> @llvm.ucmp(<2 x i32> %a, <2 x i32> %b) + ret <2 x i32> %c +} + +define arm_aapcs_vfpcc <4 x i32> @u_v4i32(<4 x i32> %a, <4 x i32> %b) { +; CHECK-LABEL: u_v4i32: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vmov.i32 q2, #0x0 +; CHECK-NEXT: vmov.i32 q3, #0x1 +; CHECK-NEXT: vcmp.u32 hi, q0, q1 +; CHECK-NEXT: vpsel q2, q3, q2 +; CHECK-NEXT: vmov.i8 q3, #0xff +; CHECK-NEXT: vcmp.u32 hi, q1, q0 +; CHECK-NEXT: vpsel q0, q3, q2 +; CHECK-NEXT: bx lr +entry: + %c = call <4 x i32> @llvm.ucmp(<4 x i32> %a, <4 x i32> %b) + ret <4 x i32> %c +} + +define arm_aapcs_vfpcc <8 x i32> @u_v8i32(<8 x i32> %a, <8 x i32> %b) { +; CHECK-LABEL: u_v8i32: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: .vsave {d8, d9, d10, d11, d12, d13, d14, d15} +; CHECK-NEXT: vpush {d8, d9, d10, d11, d12, d13, d14, d15} +; CHECK-NEXT: vmov.i32 q4, #0x0 +; CHECK-NEXT: vmov.i32 q5, #0x1 +; CHECK-NEXT: vcmp.u32 hi, q0, q2 +; CHECK-NEXT: vmov.i8 q7, #0xff +; CHECK-NEXT: vpsel q6, q5, q4 +; CHECK-NEXT: vcmp.u32 hi, q2, q0 +; CHECK-NEXT: vpsel q0, q7, q6 +; CHECK-NEXT: vcmp.u32 hi, q1, q3 +; CHECK-NEXT: vpsel q2, q5, q4 +; CHECK-NEXT: vcmp.u32 hi, q3, q1 +; CHECK-NEXT: vpsel q1, q7, q2 +; CHECK-NEXT: vpop {d8, d9, d10, d11, d12, d13, d14, d15} +; CHECK-NEXT: bx lr +entry: + %c = call <8 x i32> @llvm.ucmp(<8 x i32> %a, <8 x i32> %b) + ret <8 x i32> %c +} + +define arm_aapcs_vfpcc <2 x i64> @u_v2i64(<2 x i64> %a, <2 x i64> %b) { +; CHECK-LABEL: u_v2i64: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: .save {r4, r5, r6, r7, r8, lr} +; CHECK-NEXT: push.w {r4, r5, r6, r7, r8, lr} +; CHECK-NEXT: vmov lr, r12, d0 +; CHECK-NEXT: movs r4, #0 +; CHECK-NEXT: vmov r3, r8, d2 +; CHECK-NEXT: movs r0, #0 +; CHECK-NEXT: vmov r6, r7, d3 +; CHECK-NEXT: vmov.i32 q1, #0x0 +; CHECK-NEXT: subs.w r1, r3, lr +; CHECK-NEXT: sbcs.w r1, r8, r12 +; CHECK-NEXT: csetm r1, lo +; CHECK-NEXT: bfi r4, r1, #0, #8 +; CHECK-NEXT: vmov r1, r5, d1 +; CHECK-NEXT: subs r2, r6, r1 +; CHECK-NEXT: sbcs.w r2, r7, r5 +; CHECK-NEXT: csetm r2, lo +; CHECK-NEXT: bfi r4, r2, #8, #8 +; CHECK-NEXT: adr r2, .LCPI8_0 +; CHECK-NEXT: vldrw.u32 q0, [r2] +; CHECK-NEXT: subs.w r2, lr, r3 +; CHECK-NEXT: sbcs.w r2, r12, r8 +; CHECK-NEXT: vmsr p0, r4 +; CHECK-NEXT: csetm r2, lo +; CHECK-NEXT: subs r1, r1, r6 +; CHECK-NEXT: sbcs.w r1, r5, r7 +; CHECK-NEXT: bfi r0, r2, #0, #8 +; CHECK-NEXT: csetm r1, lo +; CHECK-NEXT: vpsel q0, q0, q1 +; CHECK-NEXT: bfi r0, r1, #8, #8 +; CHECK-NEXT: vmov.i8 q1, #0xff +; CHECK-NEXT: vmsr p0, r0 +; CHECK-NEXT: vpsel q0, q1, q0 +; CHECK-NEXT: pop.w {r4, r5, r6, r7, r8, pc} +; CHECK-NEXT: .p2align 4 +; CHECK-NEXT: @ %bb.1: +; CHECK-NEXT: .LCPI8_0: +; CHECK-NEXT: .long 1 @ 0x1 +; CHECK-NEXT: .long 0 @ 0x0 +; CHECK-NEXT: .long 1 @ 0x1 +; CHECK-NEXT: .long 0 @ 0x0 +entry: + %c = call <2 x i64> @llvm.ucmp(<2 x i64> %a, <2 x i64> %b) + ret <2 x i64> %c +} + +define arm_aapcs_vfpcc <4 x i64> @u_v4i64(<4 x i64> %a, <4 x i64> %b) { +; CHECK-LABEL: u_v4i64: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: .save {r4, r5, r6, r7, r8, r9, lr} +; CHECK-NEXT: push.w {r4, r5, r6, r7, r8, r9, lr} +; CHECK-NEXT: .pad #4 +; CHECK-NEXT: sub sp, #4 +; CHECK-NEXT: .vsave {d8, d9, d10, d11} +; CHECK-NEXT: vpush {d8, d9, d10, d11} +; CHECK-NEXT: vmov lr, r12, d0 +; CHECK-NEXT: movs r4, #0 +; CHECK-NEXT: vmov r3, r8, d4 +; CHECK-NEXT: vmov.i32 q5, #0x0 +; CHECK-NEXT: vmov r6, r7, d5 +; CHECK-NEXT: mov.w r9, #0 +; CHECK-NEXT: vmov.i8 q2, #0xff +; CHECK-NEXT: subs.w r1, r3, lr +; CHECK-NEXT: sbcs.w r1, r8, r12 +; CHECK-NEXT: csetm r1, lo +; CHECK-NEXT: bfi r4, r1, #0, #8 +; CHECK-NEXT: vmov r1, r5, d1 +; CHECK-NEXT: subs r2, r6, r1 +; CHECK-NEXT: sbcs.w r2, r7, r5 +; CHECK-NEXT: csetm r2, lo +; CHECK-NEXT: bfi r4, r2, #8, #8 +; CHECK-NEXT: adr r2, .LCPI9_0 +; CHECK-NEXT: vldrw.u32 q4, [r2] +; CHECK-NEXT: subs.w r2, lr, r3 +; CHECK-NEXT: sbcs.w r2, r12, r8 +; CHECK-NEXT: mov.w r3, #0 +; CHECK-NEXT: csetm r2, lo +; CHECK-NEXT: subs r1, r1, r6 +; CHECK-NEXT: sbcs.w r1, r5, r7 +; CHECK-NEXT: bfi r3, r2, #0, #8 +; CHECK-NEXT: csetm r1, lo +; CHECK-NEXT: vmsr p0, r4 +; CHECK-NEXT: bfi r3, r1, #8, #8 +; CHECK-NEXT: vpsel q0, q4, q5 +; CHECK-NEXT: vmsr p0, r3 +; CHECK-NEXT: vmov lr, r12, d2 +; CHECK-NEXT: vmov r3, r7, d6 +; CHECK-NEXT: movs r5, #0 +; CHECK-NEXT: vmov r2, r1, d7 +; CHECK-NEXT: vpsel q0, q2, q0 +; CHECK-NEXT: subs.w r6, r3, lr +; CHECK-NEXT: sbcs.w r6, r7, r12 +; CHECK-NEXT: csetm r6, lo +; CHECK-NEXT: bfi r5, r6, #0, #8 +; CHECK-NEXT: vmov r6, r4, d3 +; CHECK-NEXT: subs r0, r2, r6 +; CHECK-NEXT: sbcs.w r0, r1, r4 +; CHECK-NEXT: csetm r0, lo +; CHECK-NEXT: bfi r5, r0, #8, #8 +; CHECK-NEXT: subs.w r0, lr, r3 +; CHECK-NEXT: sbcs.w r0, r12, r7 +; CHECK-NEXT: vmsr p0, r5 +; CHECK-NEXT: csetm r0, lo +; CHECK-NEXT: vpsel q1, q4, q5 +; CHECK-NEXT: bfi r9, r0, #0, #8 +; CHECK-NEXT: subs r0, r6, r2 +; CHECK-NEXT: sbcs.w r0, r4, r1 +; CHECK-NEXT: csetm r0, lo +; CHECK-NEXT: bfi r9, r0, #8, #8 +; CHECK-NEXT: vmsr p0, r9 +; CHECK-NEXT: vpsel q1, q2, q1 +; CHECK-NEXT: vpop {d8, d9, d10, d11} +; CHECK-NEXT: add sp, #4 +; CHECK-NEXT: pop.w {r4, r5, r6, r7, r8, r9, pc} +; CHECK-NEXT: .p2align 4 +; CHECK-NEXT: @ %bb.1: +; CHECK-NEXT: .LCPI9_0: +; CHECK-NEXT: .long 1 @ 0x1 +; CHECK-NEXT: .long 0 @ 0x0 +; CHECK-NEXT: .long 1 @ 0x1 +; CHECK-NEXT: .long 0 @ 0x0 +entry: + %c = call <4 x i64> @llvm.ucmp(<4 x i64> %a, <4 x i64> %b) + ret <4 x i64> %c +} + +define arm_aapcs_vfpcc <16 x i8> @signOf_neon(<8 x i16> %s0_lo, <8 x i16> %s0_hi, <8 x i16> %s1_lo, <8 x i16> %s1_hi) { +; CHECK-LABEL: signOf_neon: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: .vsave {d8, d9, d10, d11, d12, d13, d14, d15} +; CHECK-NEXT: vpush {d8, d9, d10, d11, d12, d13, d14, d15} +; CHECK-NEXT: .pad #16 +; CHECK-NEXT: sub sp, #16 +; CHECK-NEXT: vmov.i32 q4, #0x0 +; CHECK-NEXT: vmov.i16 q5, #0x1 +; CHECK-NEXT: vcmp.u16 hi, q1, q3 +; CHECK-NEXT: vmov.i8 q7, #0xff +; CHECK-NEXT: vpsel q6, q5, q4 +; CHECK-NEXT: vcmp.u16 hi, q3, q1 +; CHECK-NEXT: vpsel q1, q7, q6 +; CHECK-NEXT: vcmp.u16 hi, q0, q2 +; CHECK-NEXT: vpsel q3, q5, q4 +; CHECK-NEXT: vcmp.u16 hi, q2, q0 +; CHECK-NEXT: mov r0, sp +; CHECK-NEXT: vpsel q0, q7, q3 +; CHECK-NEXT: vstrb.16 q1, [r0, #8] +; CHECK-NEXT: vstrb.16 q0, [r0] +; CHECK-NEXT: vldrw.u32 q0, [r0] +; CHECK-NEXT: add sp, #16 +; CHECK-NEXT: vpop {d8, d9, d10, d11, d12, d13, d14, d15} +; CHECK-NEXT: bx lr +entry: + %0 = shufflevector <8 x i16> %s0_lo, <8 x i16> %s0_hi, <16 x i32> + %1 = shufflevector <8 x i16> %s1_lo, <8 x i16> %s1_hi, <16 x i32> + %or.i = tail call <16 x i8> @llvm.ucmp.v16i8.v16i16(<16 x i16> %0, <16 x i16> %1) + ret <16 x i8> %or.i +}