Files
clang-p2996/llvm/test/CodeGen/RISCV/rvv/vwsub-mask-sdnode.ll
Paul Walker dbb65dd330 [LLVM][tests/CodeGen/RISCV] Convert instances of ConstantExpr based splats to use splat().
This is mostly NFC but some output does change due to consistently
inserting into poison rather than undef and using i64 as the index
type for inserts.
2024-02-27 13:37:23 +00:00

74 lines
3.2 KiB
LLVM

; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK
; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK
define <vscale x 8 x i64> @vwsub_wv_mask_v8i32(<vscale x 8 x i32> %x, <vscale x 8 x i64> %y) {
; CHECK-LABEL: vwsub_wv_mask_v8i32:
; CHECK: # %bb.0:
; CHECK-NEXT: li a0, 42
; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, ma
; CHECK-NEXT: vmslt.vx v0, v8, a0
; CHECK-NEXT: vsetvli zero, zero, e32, m4, tu, mu
; CHECK-NEXT: vwsub.wv v16, v16, v8, v0.t
; CHECK-NEXT: vmv8r.v v8, v16
; CHECK-NEXT: ret
%mask = icmp slt <vscale x 8 x i32> %x, splat (i32 42)
%a = select <vscale x 8 x i1> %mask, <vscale x 8 x i32> %x, <vscale x 8 x i32> zeroinitializer
%sa = sext <vscale x 8 x i32> %a to <vscale x 8 x i64>
%ret = sub <vscale x 8 x i64> %y, %sa
ret <vscale x 8 x i64> %ret
}
define <vscale x 8 x i64> @vwsubu_wv_mask_v8i32(<vscale x 8 x i32> %x, <vscale x 8 x i64> %y) {
; CHECK-LABEL: vwsubu_wv_mask_v8i32:
; CHECK: # %bb.0:
; CHECK-NEXT: li a0, 42
; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, ma
; CHECK-NEXT: vmslt.vx v0, v8, a0
; CHECK-NEXT: vsetvli zero, zero, e32, m4, tu, mu
; CHECK-NEXT: vwsubu.wv v16, v16, v8, v0.t
; CHECK-NEXT: vmv8r.v v8, v16
; CHECK-NEXT: ret
%mask = icmp slt <vscale x 8 x i32> %x, splat (i32 42)
%a = select <vscale x 8 x i1> %mask, <vscale x 8 x i32> %x, <vscale x 8 x i32> zeroinitializer
%sa = zext <vscale x 8 x i32> %a to <vscale x 8 x i64>
%ret = sub <vscale x 8 x i64> %y, %sa
ret <vscale x 8 x i64> %ret
}
define <vscale x 8 x i64> @vwsubu_vv_mask_v8i32(<vscale x 8 x i32> %x, <vscale x 8 x i32> %y) {
; CHECK-LABEL: vwsubu_vv_mask_v8i32:
; CHECK: # %bb.0:
; CHECK-NEXT: li a0, 42
; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, ma
; CHECK-NEXT: vmslt.vx v0, v8, a0
; CHECK-NEXT: vmv.v.i v16, 0
; CHECK-NEXT: vmerge.vvm v8, v16, v8, v0
; CHECK-NEXT: vwsubu.vv v16, v12, v8
; CHECK-NEXT: vmv8r.v v8, v16
; CHECK-NEXT: ret
%mask = icmp slt <vscale x 8 x i32> %x, splat (i32 42)
%a = select <vscale x 8 x i1> %mask, <vscale x 8 x i32> %x, <vscale x 8 x i32> zeroinitializer
%sa = zext <vscale x 8 x i32> %a to <vscale x 8 x i64>
%sy = zext <vscale x 8 x i32> %y to <vscale x 8 x i64>
%ret = sub <vscale x 8 x i64> %sy, %sa
ret <vscale x 8 x i64> %ret
}
define <vscale x 8 x i64> @vwsub_wv_mask_v8i32_nonzero(<vscale x 8 x i32> %x, <vscale x 8 x i64> %y) {
; CHECK-LABEL: vwsub_wv_mask_v8i32_nonzero:
; CHECK: # %bb.0:
; CHECK-NEXT: li a0, 42
; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, ma
; CHECK-NEXT: vmslt.vx v0, v8, a0
; CHECK-NEXT: vmv.v.i v12, 1
; CHECK-NEXT: vmerge.vvm v24, v12, v8, v0
; CHECK-NEXT: vwsub.wv v8, v16, v24
; CHECK-NEXT: ret
%mask = icmp slt <vscale x 8 x i32> %x, splat (i32 42)
%a = select <vscale x 8 x i1> %mask, <vscale x 8 x i32> %x, <vscale x 8 x i32> splat (i32 1)
%sa = sext <vscale x 8 x i32> %a to <vscale x 8 x i64>
%ret = sub <vscale x 8 x i64> %y, %sa
ret <vscale x 8 x i64> %ret
}