Files
clang-p2996/llvm/test/CodeGen/RISCV/rvv/vmulh-sdnode.ll
Luke Lau d8d131dfa9 [RISCV] Convert more constant splats in tests to splat shorthand. NFC (#87616)
A handy shorthand for specifying the shufflevector(insertelement(poison,
foo, 0), poison, zeroinitializer) splat pattern was introduced in
#74620.

Some of the RISC-V tests were converted over to use this new form in
dbb65dd330, this patch handles the rest
which didn't have any codegen diffs.

This not only converts some constant expressions to the new form, but
also instruction sequences that weren't previously constant expressions
to constant expressions as well. In some cases this affects codegen, but
these have been omitted here and will be handled in a separate PR.
2024-04-09 15:46:38 +08:00

265 lines
10 KiB
LLVM

; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs < %s | FileCheck %s
; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s | FileCheck %s
; Test that the prepareSREMEqFold optimization doesn't crash on scalable
; vector types.
define <vscale x 4 x i1> @srem_eq_fold_nxv4i8(<vscale x 4 x i8> %va) {
; CHECK-LABEL: srem_eq_fold_nxv4i8:
; CHECK: # %bb.0:
; CHECK-NEXT: li a0, 42
; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, ma
; CHECK-NEXT: vmv.v.x v9, a0
; CHECK-NEXT: li a1, -85
; CHECK-NEXT: vmacc.vx v9, a1, v8
; CHECK-NEXT: vsll.vi v8, v9, 7
; CHECK-NEXT: vsrl.vi v9, v9, 1
; CHECK-NEXT: vor.vv v8, v9, v8
; CHECK-NEXT: vmsleu.vx v0, v8, a0
; CHECK-NEXT: ret
%rem = srem <vscale x 4 x i8> %va, splat (i8 6)
%cc = icmp eq <vscale x 4 x i8> %rem, zeroinitializer
ret <vscale x 4 x i1> %cc
}
define <vscale x 1 x i32> @vmulh_vv_nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %vb) {
; CHECK-LABEL: vmulh_vv_nxv1i32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma
; CHECK-NEXT: vmulh.vv v8, v9, v8
; CHECK-NEXT: ret
%vc = sext <vscale x 1 x i32> %vb to <vscale x 1 x i64>
%vd = sext <vscale x 1 x i32> %va to <vscale x 1 x i64>
%ve = mul <vscale x 1 x i64> %vc, %vd
%vf = lshr <vscale x 1 x i64> %ve, splat (i64 32)
%vg = trunc <vscale x 1 x i64> %vf to <vscale x 1 x i32>
ret <vscale x 1 x i32> %vg
}
define <vscale x 1 x i32> @vmulh_vx_nxv1i32(<vscale x 1 x i32> %va, i32 %x) {
; CHECK-LABEL: vmulh_vx_nxv1i32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, ma
; CHECK-NEXT: vmulh.vx v8, v8, a0
; CHECK-NEXT: ret
%head1 = insertelement <vscale x 1 x i32> poison, i32 %x, i32 0
%splat1 = shufflevector <vscale x 1 x i32> %head1, <vscale x 1 x i32> poison, <vscale x 1 x i32> zeroinitializer
%vb = sext <vscale x 1 x i32> %splat1 to <vscale x 1 x i64>
%vc = sext <vscale x 1 x i32> %va to <vscale x 1 x i64>
%vd = mul <vscale x 1 x i64> %vb, %vc
%ve = lshr <vscale x 1 x i64> %vd, splat (i64 32)
%vf = trunc <vscale x 1 x i64> %ve to <vscale x 1 x i32>
ret <vscale x 1 x i32> %vf
}
define <vscale x 1 x i32> @vmulh_vi_nxv1i32_0(<vscale x 1 x i32> %va) {
; CHECK-LABEL: vmulh_vi_nxv1i32_0:
; CHECK: # %bb.0:
; CHECK-NEXT: li a0, -7
; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, ma
; CHECK-NEXT: vmulh.vx v8, v8, a0
; CHECK-NEXT: ret
%vb = sext <vscale x 1 x i32> splat (i32 -7) to <vscale x 1 x i64>
%vc = sext <vscale x 1 x i32> %va to <vscale x 1 x i64>
%vd = mul <vscale x 1 x i64> %vb, %vc
%ve = lshr <vscale x 1 x i64> %vd, splat (i64 32)
%vf = trunc <vscale x 1 x i64> %ve to <vscale x 1 x i32>
ret <vscale x 1 x i32> %vf
}
define <vscale x 1 x i32> @vmulh_vi_nxv1i32_1(<vscale x 1 x i32> %va) {
; CHECK-LABEL: vmulh_vi_nxv1i32_1:
; CHECK: # %bb.0:
; CHECK-NEXT: li a0, 16
; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, ma
; CHECK-NEXT: vmulh.vx v8, v8, a0
; CHECK-NEXT: ret
%vb = sext <vscale x 1 x i32> splat (i32 16) to <vscale x 1 x i64>
%vc = sext <vscale x 1 x i32> %va to <vscale x 1 x i64>
%vd = mul <vscale x 1 x i64> %vb, %vc
%ve = lshr <vscale x 1 x i64> %vd, splat (i64 32)
%vf = trunc <vscale x 1 x i64> %ve to <vscale x 1 x i32>
ret <vscale x 1 x i32> %vf
}
define <vscale x 2 x i32> @vmulh_vv_nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %vb) {
; CHECK-LABEL: vmulh_vv_nxv2i32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma
; CHECK-NEXT: vmulh.vv v8, v9, v8
; CHECK-NEXT: ret
%vc = sext <vscale x 2 x i32> %vb to <vscale x 2 x i64>
%vd = sext <vscale x 2 x i32> %va to <vscale x 2 x i64>
%ve = mul <vscale x 2 x i64> %vc, %vd
%vf = lshr <vscale x 2 x i64> %ve, splat (i64 32)
%vg = trunc <vscale x 2 x i64> %vf to <vscale x 2 x i32>
ret <vscale x 2 x i32> %vg
}
define <vscale x 2 x i32> @vmulh_vx_nxv2i32(<vscale x 2 x i32> %va, i32 %x) {
; CHECK-LABEL: vmulh_vx_nxv2i32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, ma
; CHECK-NEXT: vmulh.vx v8, v8, a0
; CHECK-NEXT: ret
%head1 = insertelement <vscale x 2 x i32> poison, i32 %x, i32 0
%splat1 = shufflevector <vscale x 2 x i32> %head1, <vscale x 2 x i32> poison, <vscale x 2 x i32> zeroinitializer
%vb = sext <vscale x 2 x i32> %splat1 to <vscale x 2 x i64>
%vc = sext <vscale x 2 x i32> %va to <vscale x 2 x i64>
%vd = mul <vscale x 2 x i64> %vb, %vc
%ve = lshr <vscale x 2 x i64> %vd, splat (i64 32)
%vf = trunc <vscale x 2 x i64> %ve to <vscale x 2 x i32>
ret <vscale x 2 x i32> %vf
}
define <vscale x 2 x i32> @vmulh_vi_nxv2i32_0(<vscale x 2 x i32> %va) {
; CHECK-LABEL: vmulh_vi_nxv2i32_0:
; CHECK: # %bb.0:
; CHECK-NEXT: li a0, -7
; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, ma
; CHECK-NEXT: vmulh.vx v8, v8, a0
; CHECK-NEXT: ret
%vb = sext <vscale x 2 x i32> splat (i32 -7) to <vscale x 2 x i64>
%vc = sext <vscale x 2 x i32> %va to <vscale x 2 x i64>
%vd = mul <vscale x 2 x i64> %vb, %vc
%ve = lshr <vscale x 2 x i64> %vd, splat (i64 32)
%vf = trunc <vscale x 2 x i64> %ve to <vscale x 2 x i32>
ret <vscale x 2 x i32> %vf
}
define <vscale x 2 x i32> @vmulh_vi_nxv2i32_1(<vscale x 2 x i32> %va) {
; CHECK-LABEL: vmulh_vi_nxv2i32_1:
; CHECK: # %bb.0:
; CHECK-NEXT: li a0, 16
; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, ma
; CHECK-NEXT: vmulh.vx v8, v8, a0
; CHECK-NEXT: ret
%vb = sext <vscale x 2 x i32> splat (i32 16) to <vscale x 2 x i64>
%vc = sext <vscale x 2 x i32> %va to <vscale x 2 x i64>
%vd = mul <vscale x 2 x i64> %vb, %vc
%ve = lshr <vscale x 2 x i64> %vd, splat (i64 32)
%vf = trunc <vscale x 2 x i64> %ve to <vscale x 2 x i32>
ret <vscale x 2 x i32> %vf
}
define <vscale x 4 x i32> @vmulh_vv_nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> %vb) {
; CHECK-LABEL: vmulh_vv_nxv4i32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma
; CHECK-NEXT: vmulh.vv v8, v10, v8
; CHECK-NEXT: ret
%vc = sext <vscale x 4 x i32> %vb to <vscale x 4 x i64>
%vd = sext <vscale x 4 x i32> %va to <vscale x 4 x i64>
%ve = mul <vscale x 4 x i64> %vc, %vd
%vf = lshr <vscale x 4 x i64> %ve, splat (i64 32)
%vg = trunc <vscale x 4 x i64> %vf to <vscale x 4 x i32>
ret <vscale x 4 x i32> %vg
}
define <vscale x 4 x i32> @vmulh_vx_nxv4i32(<vscale x 4 x i32> %va, i32 %x) {
; CHECK-LABEL: vmulh_vx_nxv4i32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, ma
; CHECK-NEXT: vmulh.vx v8, v8, a0
; CHECK-NEXT: ret
%head1 = insertelement <vscale x 4 x i32> poison, i32 %x, i32 0
%splat1 = shufflevector <vscale x 4 x i32> %head1, <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
%vb = sext <vscale x 4 x i32> %splat1 to <vscale x 4 x i64>
%vc = sext <vscale x 4 x i32> %va to <vscale x 4 x i64>
%vd = mul <vscale x 4 x i64> %vb, %vc
%ve = lshr <vscale x 4 x i64> %vd, splat (i64 32)
%vf = trunc <vscale x 4 x i64> %ve to <vscale x 4 x i32>
ret <vscale x 4 x i32> %vf
}
define <vscale x 4 x i32> @vmulh_vi_nxv4i32_0(<vscale x 4 x i32> %va) {
; CHECK-LABEL: vmulh_vi_nxv4i32_0:
; CHECK: # %bb.0:
; CHECK-NEXT: li a0, -7
; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, ma
; CHECK-NEXT: vmulh.vx v8, v8, a0
; CHECK-NEXT: ret
%vb = sext <vscale x 4 x i32> splat (i32 -7) to <vscale x 4 x i64>
%vc = sext <vscale x 4 x i32> %va to <vscale x 4 x i64>
%vd = mul <vscale x 4 x i64> %vb, %vc
%ve = lshr <vscale x 4 x i64> %vd, splat (i64 32)
%vf = trunc <vscale x 4 x i64> %ve to <vscale x 4 x i32>
ret <vscale x 4 x i32> %vf
}
define <vscale x 4 x i32> @vmulh_vi_nxv4i32_1(<vscale x 4 x i32> %va) {
; CHECK-LABEL: vmulh_vi_nxv4i32_1:
; CHECK: # %bb.0:
; CHECK-NEXT: li a0, 16
; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, ma
; CHECK-NEXT: vmulh.vx v8, v8, a0
; CHECK-NEXT: ret
%vb = sext <vscale x 4 x i32> splat (i32 16) to <vscale x 4 x i64>
%vc = sext <vscale x 4 x i32> %va to <vscale x 4 x i64>
%vd = mul <vscale x 4 x i64> %vb, %vc
%ve = lshr <vscale x 4 x i64> %vd, splat (i64 32)
%vf = trunc <vscale x 4 x i64> %ve to <vscale x 4 x i32>
ret <vscale x 4 x i32> %vf
}
define <vscale x 8 x i32> @vmulh_vv_nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %vb) {
; CHECK-LABEL: vmulh_vv_nxv8i32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma
; CHECK-NEXT: vmulh.vv v8, v12, v8
; CHECK-NEXT: ret
%vc = sext <vscale x 8 x i32> %vb to <vscale x 8 x i64>
%vd = sext <vscale x 8 x i32> %va to <vscale x 8 x i64>
%ve = mul <vscale x 8 x i64> %vc, %vd
%vf = lshr <vscale x 8 x i64> %ve, splat (i64 32)
%vg = trunc <vscale x 8 x i64> %vf to <vscale x 8 x i32>
ret <vscale x 8 x i32> %vg
}
define <vscale x 8 x i32> @vmulh_vx_nxv8i32(<vscale x 8 x i32> %va, i32 %x) {
; CHECK-LABEL: vmulh_vx_nxv8i32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, ma
; CHECK-NEXT: vmulh.vx v8, v8, a0
; CHECK-NEXT: ret
%head1 = insertelement <vscale x 8 x i32> poison, i32 %x, i32 0
%splat1 = shufflevector <vscale x 8 x i32> %head1, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
%vb = sext <vscale x 8 x i32> %splat1 to <vscale x 8 x i64>
%vc = sext <vscale x 8 x i32> %va to <vscale x 8 x i64>
%vd = mul <vscale x 8 x i64> %vb, %vc
%ve = lshr <vscale x 8 x i64> %vd, splat (i64 32)
%vf = trunc <vscale x 8 x i64> %ve to <vscale x 8 x i32>
ret <vscale x 8 x i32> %vf
}
define <vscale x 8 x i32> @vmulh_vi_nxv8i32_0(<vscale x 8 x i32> %va) {
; CHECK-LABEL: vmulh_vi_nxv8i32_0:
; CHECK: # %bb.0:
; CHECK-NEXT: li a0, -7
; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, ma
; CHECK-NEXT: vmulh.vx v8, v8, a0
; CHECK-NEXT: ret
%vb = sext <vscale x 8 x i32> splat (i32 -7) to <vscale x 8 x i64>
%vc = sext <vscale x 8 x i32> %va to <vscale x 8 x i64>
%vd = mul <vscale x 8 x i64> %vb, %vc
%ve = lshr <vscale x 8 x i64> %vd, splat (i64 32)
%vf = trunc <vscale x 8 x i64> %ve to <vscale x 8 x i32>
ret <vscale x 8 x i32> %vf
}
define <vscale x 8 x i32> @vmulh_vi_nxv8i32_1(<vscale x 8 x i32> %va) {
; CHECK-LABEL: vmulh_vi_nxv8i32_1:
; CHECK: # %bb.0:
; CHECK-NEXT: li a0, 16
; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, ma
; CHECK-NEXT: vmulh.vx v8, v8, a0
; CHECK-NEXT: ret
%vb = sext <vscale x 8 x i32> splat (i32 16) to <vscale x 8 x i64>
%vc = sext <vscale x 8 x i32> %va to <vscale x 8 x i64>
%vd = mul <vscale x 8 x i64> %vb, %vc
%ve = lshr <vscale x 8 x i64> %vd, splat (i64 32)
%vf = trunc <vscale x 8 x i64> %ve to <vscale x 8 x i32>
ret <vscale x 8 x i32> %vf
}