Files
clang-p2996/llvm/test/CodeGen/RISCV/rvv/vfneg-sdnode.ll
Jianjian GUAN 879e801a91 [RISCV] Apply promotion for f16 vector ops when only have zvfhmin
For most fp16 vector ops, we could promote it to fp32 vector when zvfhmin is enable but zvfh is not.
But for nxv32f16, we need to split it first since nxv32f32 is not a valid MVT.

Reviewed By: michaelmaitland

Differential Revision: https://reviews.llvm.org/D153848
2023-08-23 16:49:20 +08:00

225 lines
7.5 KiB
LLVM

; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=riscv32 -mattr=+d,+zfh,+zvfh,+v -target-abi=ilp32d \
; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFH
; RUN: llc -mtriple=riscv64 -mattr=+d,+zfh,+zvfh,+v -target-abi=lp64d \
; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFH
; RUN: llc -mtriple=riscv32 -mattr=+d,+zfhmin,+zvfhmin,+v -target-abi=ilp32d \
; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN
; RUN: llc -mtriple=riscv64 -mattr=+d,+zfhmin,+zvfhmin,+v -target-abi=lp64d \
; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN
define <vscale x 1 x half> @vfneg_vv_nxv1f16(<vscale x 1 x half> %va) {
; ZVFH-LABEL: vfneg_vv_nxv1f16:
; ZVFH: # %bb.0:
; ZVFH-NEXT: vsetvli a0, zero, e16, mf4, ta, ma
; ZVFH-NEXT: vfneg.v v8, v8
; ZVFH-NEXT: ret
;
; ZVFHMIN-LABEL: vfneg_vv_nxv1f16:
; ZVFHMIN: # %bb.0:
; ZVFHMIN-NEXT: vsetvli a0, zero, e16, mf4, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v9, v8
; ZVFHMIN-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
; ZVFHMIN-NEXT: vfneg.v v9, v9
; ZVFHMIN-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v9
; ZVFHMIN-NEXT: ret
%vb = fneg <vscale x 1 x half> %va
ret <vscale x 1 x half> %vb
}
define <vscale x 2 x half> @vfneg_vv_nxv2f16(<vscale x 2 x half> %va) {
; ZVFH-LABEL: vfneg_vv_nxv2f16:
; ZVFH: # %bb.0:
; ZVFH-NEXT: vsetvli a0, zero, e16, mf2, ta, ma
; ZVFH-NEXT: vfneg.v v8, v8
; ZVFH-NEXT: ret
;
; ZVFHMIN-LABEL: vfneg_vv_nxv2f16:
; ZVFHMIN: # %bb.0:
; ZVFHMIN-NEXT: vsetvli a0, zero, e16, mf2, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v9, v8
; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, ta, ma
; ZVFHMIN-NEXT: vfneg.v v9, v9
; ZVFHMIN-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v9
; ZVFHMIN-NEXT: ret
%vb = fneg <vscale x 2 x half> %va
ret <vscale x 2 x half> %vb
}
define <vscale x 4 x half> @vfneg_vv_nxv4f16(<vscale x 4 x half> %va) {
; ZVFH-LABEL: vfneg_vv_nxv4f16:
; ZVFH: # %bb.0:
; ZVFH-NEXT: vsetvli a0, zero, e16, m1, ta, ma
; ZVFH-NEXT: vfneg.v v8, v8
; ZVFH-NEXT: ret
;
; ZVFHMIN-LABEL: vfneg_vv_nxv4f16:
; ZVFHMIN: # %bb.0:
; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m1, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v8
; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m2, ta, ma
; ZVFHMIN-NEXT: vfneg.v v10, v10
; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m1, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v10
; ZVFHMIN-NEXT: ret
%vb = fneg <vscale x 4 x half> %va
ret <vscale x 4 x half> %vb
}
define <vscale x 8 x half> @vfneg_vv_nxv8f16(<vscale x 8 x half> %va) {
; ZVFH-LABEL: vfneg_vv_nxv8f16:
; ZVFH: # %bb.0:
; ZVFH-NEXT: vsetvli a0, zero, e16, m2, ta, ma
; ZVFH-NEXT: vfneg.v v8, v8
; ZVFH-NEXT: ret
;
; ZVFHMIN-LABEL: vfneg_vv_nxv8f16:
; ZVFHMIN: # %bb.0:
; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m2, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v8
; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m4, ta, ma
; ZVFHMIN-NEXT: vfneg.v v12, v12
; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m2, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v12
; ZVFHMIN-NEXT: ret
%vb = fneg <vscale x 8 x half> %va
ret <vscale x 8 x half> %vb
}
define <vscale x 16 x half> @vfneg_vv_nxv16f16(<vscale x 16 x half> %va) {
; ZVFH-LABEL: vfneg_vv_nxv16f16:
; ZVFH: # %bb.0:
; ZVFH-NEXT: vsetvli a0, zero, e16, m4, ta, ma
; ZVFH-NEXT: vfneg.v v8, v8
; ZVFH-NEXT: ret
;
; ZVFHMIN-LABEL: vfneg_vv_nxv16f16:
; ZVFHMIN: # %bb.0:
; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v8
; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
; ZVFHMIN-NEXT: vfneg.v v16, v16
; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v16
; ZVFHMIN-NEXT: ret
%vb = fneg <vscale x 16 x half> %va
ret <vscale x 16 x half> %vb
}
define <vscale x 32 x half> @vfneg_vv_nxv32f16(<vscale x 32 x half> %va) {
; ZVFH-LABEL: vfneg_vv_nxv32f16:
; ZVFH: # %bb.0:
; ZVFH-NEXT: vsetvli a0, zero, e16, m8, ta, ma
; ZVFH-NEXT: vfneg.v v8, v8
; ZVFH-NEXT: ret
;
; ZVFHMIN-LABEL: vfneg_vv_nxv32f16:
; ZVFHMIN: # %bb.0:
; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v8
; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
; ZVFHMIN-NEXT: vfneg.v v16, v16
; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v16
; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v12
; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
; ZVFHMIN-NEXT: vfneg.v v16, v16
; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v12, v16
; ZVFHMIN-NEXT: ret
%vb = fneg <vscale x 32 x half> %va
ret <vscale x 32 x half> %vb
}
define <vscale x 1 x float> @vfneg_vv_nxv1f32(<vscale x 1 x float> %va) {
; CHECK-LABEL: vfneg_vv_nxv1f32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma
; CHECK-NEXT: vfneg.v v8, v8
; CHECK-NEXT: ret
%vb = fneg <vscale x 1 x float> %va
ret <vscale x 1 x float> %vb
}
define <vscale x 2 x float> @vfneg_vv_nxv2f32(<vscale x 2 x float> %va) {
; CHECK-LABEL: vfneg_vv_nxv2f32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma
; CHECK-NEXT: vfneg.v v8, v8
; CHECK-NEXT: ret
%vb = fneg <vscale x 2 x float> %va
ret <vscale x 2 x float> %vb
}
define <vscale x 4 x float> @vfneg_vv_nxv4f32(<vscale x 4 x float> %va) {
; CHECK-LABEL: vfneg_vv_nxv4f32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma
; CHECK-NEXT: vfneg.v v8, v8
; CHECK-NEXT: ret
%vb = fneg <vscale x 4 x float> %va
ret <vscale x 4 x float> %vb
}
define <vscale x 8 x float> @vfneg_vv_nxv8f32(<vscale x 8 x float> %va) {
; CHECK-LABEL: vfneg_vv_nxv8f32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma
; CHECK-NEXT: vfneg.v v8, v8
; CHECK-NEXT: ret
%vb = fneg <vscale x 8 x float> %va
ret <vscale x 8 x float> %vb
}
define <vscale x 16 x float> @vfneg_vv_nxv16f32(<vscale x 16 x float> %va) {
; CHECK-LABEL: vfneg_vv_nxv16f32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, ma
; CHECK-NEXT: vfneg.v v8, v8
; CHECK-NEXT: ret
%vb = fneg <vscale x 16 x float> %va
ret <vscale x 16 x float> %vb
}
define <vscale x 1 x double> @vfneg_vv_nxv1f64(<vscale x 1 x double> %va) {
; CHECK-LABEL: vfneg_vv_nxv1f64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma
; CHECK-NEXT: vfneg.v v8, v8
; CHECK-NEXT: ret
%vb = fneg <vscale x 1 x double> %va
ret <vscale x 1 x double> %vb
}
define <vscale x 2 x double> @vfneg_vv_nxv2f64(<vscale x 2 x double> %va) {
; CHECK-LABEL: vfneg_vv_nxv2f64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, ma
; CHECK-NEXT: vfneg.v v8, v8
; CHECK-NEXT: ret
%vb = fneg <vscale x 2 x double> %va
ret <vscale x 2 x double> %vb
}
define <vscale x 4 x double> @vfneg_vv_nxv4f64(<vscale x 4 x double> %va) {
; CHECK-LABEL: vfneg_vv_nxv4f64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, ma
; CHECK-NEXT: vfneg.v v8, v8
; CHECK-NEXT: ret
%vb = fneg <vscale x 4 x double> %va
ret <vscale x 4 x double> %vb
}
define <vscale x 8 x double> @vfneg_vv_nxv8f64(<vscale x 8 x double> %va) {
; CHECK-LABEL: vfneg_vv_nxv8f64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma
; CHECK-NEXT: vfneg.v v8, v8
; CHECK-NEXT: ret
%vb = fneg <vscale x 8 x double> %va
ret <vscale x 8 x double> %vb
}