diff --git a/llvm/test/CodeGen/RISCV/rvv/vp-splice-fixed-vectors.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vp-splice.ll similarity index 85% rename from llvm/test/CodeGen/RISCV/rvv/vp-splice-fixed-vectors.ll rename to llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vp-splice.ll index 494bf46050cc..7245d8a11f56 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vp-splice-fixed-vectors.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vp-splice.ll @@ -1,15 +1,7 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -mtriple riscv64 -mattr=+f,+d,+v -verify-machineinstrs -riscv-v-vector-bits-min=128 \ +; RUN: llc -mtriple riscv64 -mattr=+f,+d,+v,+zvfh -verify-machineinstrs -riscv-v-vector-bits-min=128 \ ; RUN: < %s | FileCheck %s -declare <2 x i64> @llvm.experimental.vp.splice.v2i64(<2 x i64>, <2 x i64>, i32, <2 x i1>, i32, i32) -declare <4 x i32> @llvm.experimental.vp.splice.v4i32(<4 x i32>, <4 x i32>, i32, <4 x i1>, i32, i32) -declare <8 x i16> @llvm.experimental.vp.splice.v8i16(<8 x i16>, <8 x i16>, i32, <8 x i1>, i32, i32) -declare <16 x i8> @llvm.experimental.vp.splice.v16i8(<16 x i8>, <16 x i8>, i32, <16 x i1>, i32, i32) - -declare <2 x double> @llvm.experimental.vp.splice.v2f64(<2 x double>, <2 x double>, i32, <2 x i1>, i32, i32) -declare <4 x float> @llvm.experimental.vp.splice.v4f32(<4 x float>, <4 x float>, i32, <4 x i1>, i32, i32) - define <2 x i64> @test_vp_splice_v2i64(<2 x i64> %va, <2 x i64> %vb, i32 zeroext %evla, i32 zeroext %evlb) { ; CHECK-LABEL: test_vp_splice_v2i64: ; CHECK: # %bb.0: @@ -255,3 +247,44 @@ define <4 x float> @test_vp_splice_v4f32_masked(<4 x float> %va, <4 x float> %vb %v = call <4 x float> @llvm.experimental.vp.splice.v4f32(<4 x float> %va, <4 x float> %vb, i32 5, <4 x i1> %mask, i32 %evla, i32 %evlb) ret <4 x float> %v } + +define <8 x half> @test_vp_splice_v8f16(<8 x half> %va, <8 x half> %vb, i32 zeroext %evla, i32 zeroext %evlb) { +; CHECK-LABEL: test_vp_splice_v8f16: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, a0, -5 +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma +; CHECK-NEXT: vslidedown.vi v8, v8, 5 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vslideup.vx v8, v9, a0 +; CHECK-NEXT: ret + + %v = call <8 x half> @llvm.experimental.vp.splice.v8f16(<8 x half> %va, <8 x half> %vb, i32 5, <8 x i1> splat (i1 1), i32 %evla, i32 %evlb) + ret <8 x half> %v +} + +define <8 x half> @test_vp_splice_v8f16_negative_offset(<8 x half> %va, <8 x half> %vb, i32 zeroext %evla, i32 zeroext %evlb) { +; CHECK-LABEL: test_vp_splice_v8f16_negative_offset: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, a0, -5 +; CHECK-NEXT: vsetivli zero, 5, e16, m1, ta, ma +; CHECK-NEXT: vslidedown.vx v8, v8, a0 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vslideup.vi v8, v9, 5 +; CHECK-NEXT: ret + + %v = call <8 x half> @llvm.experimental.vp.splice.v8f16(<8 x half> %va, <8 x half> %vb, i32 -5, <8 x i1> splat (i1 1), i32 %evla, i32 %evlb) + ret <8 x half> %v +} + +define <8 x half> @test_vp_splice_v8f16_masked(<8 x half> %va, <8 x half> %vb, <8 x i1> %mask, i32 zeroext %evla, i32 zeroext %evlb) { +; CHECK-LABEL: test_vp_splice_v8f16_masked: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, a0, -5 +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma +; CHECK-NEXT: vslidedown.vi v8, v8, 5, v0.t +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu +; CHECK-NEXT: vslideup.vx v8, v9, a0, v0.t +; CHECK-NEXT: ret + %v = call <8 x half> @llvm.experimental.vp.splice.v8f16(<8 x half> %va, <8 x half> %vb, i32 5, <8 x i1> %mask, i32 %evla, i32 %evlb) + ret <8 x half> %v +}