Files
clang-p2996/llvm/test/CodeGen/RISCV/rvv/mscatter-sdnode.ll
Hsiangkai Wang facff468b6 [RISCV] Reorder the vector register allocation order.
GPR uses argument registers as the first group of registers to allocate.
This patch uses vector argument registers, v8 to v23, as the first group
to allocate.

Differential Revision: https://reviews.llvm.org/D111304
2021-10-19 09:30:13 +08:00

1939 lines
80 KiB
LLVM

; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=riscv32 -mattr=+m,+d,+experimental-zfh,+experimental-v -target-abi=ilp32d \
; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefix=RV32
; RUN: llc -mtriple=riscv64 -mattr=+m,+d,+experimental-zfh,+experimental-v -target-abi=lp64d \
; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefix=RV64
declare void @llvm.masked.scatter.nxv1i8.nxv1p0i8(<vscale x 1 x i8>, <vscale x 1 x i8*>, i32, <vscale x 1 x i1>)
define void @mscatter_nxv1i8(<vscale x 1 x i8> %val, <vscale x 1 x i8*> %ptrs, <vscale x 1 x i1> %m) {
; RV32-LABEL: mscatter_nxv1i8:
; RV32: # %bb.0:
; RV32-NEXT: vsetvli a0, zero, e8, mf8, ta, mu
; RV32-NEXT: vsoxei32.v v8, (zero), v9, v0.t
; RV32-NEXT: ret
;
; RV64-LABEL: mscatter_nxv1i8:
; RV64: # %bb.0:
; RV64-NEXT: vsetvli a0, zero, e8, mf8, ta, mu
; RV64-NEXT: vsoxei64.v v8, (zero), v9, v0.t
; RV64-NEXT: ret
call void @llvm.masked.scatter.nxv1i8.nxv1p0i8(<vscale x 1 x i8> %val, <vscale x 1 x i8*> %ptrs, i32 1, <vscale x 1 x i1> %m)
ret void
}
declare void @llvm.masked.scatter.nxv2i8.nxv2p0i8(<vscale x 2 x i8>, <vscale x 2 x i8*>, i32, <vscale x 2 x i1>)
define void @mscatter_nxv2i8(<vscale x 2 x i8> %val, <vscale x 2 x i8*> %ptrs, <vscale x 2 x i1> %m) {
; RV32-LABEL: mscatter_nxv2i8:
; RV32: # %bb.0:
; RV32-NEXT: vsetvli a0, zero, e8, mf4, ta, mu
; RV32-NEXT: vsoxei32.v v8, (zero), v9, v0.t
; RV32-NEXT: ret
;
; RV64-LABEL: mscatter_nxv2i8:
; RV64: # %bb.0:
; RV64-NEXT: vsetvli a0, zero, e8, mf4, ta, mu
; RV64-NEXT: vsoxei64.v v8, (zero), v10, v0.t
; RV64-NEXT: ret
call void @llvm.masked.scatter.nxv2i8.nxv2p0i8(<vscale x 2 x i8> %val, <vscale x 2 x i8*> %ptrs, i32 1, <vscale x 2 x i1> %m)
ret void
}
define void @mscatter_nxv2i16_truncstore_nxv2i8(<vscale x 2 x i16> %val, <vscale x 2 x i8*> %ptrs, <vscale x 2 x i1> %m) {
; RV32-LABEL: mscatter_nxv2i16_truncstore_nxv2i8:
; RV32: # %bb.0:
; RV32-NEXT: vsetvli a0, zero, e8, mf4, ta, mu
; RV32-NEXT: vnsrl.wi v8, v8, 0
; RV32-NEXT: vsoxei32.v v8, (zero), v9, v0.t
; RV32-NEXT: ret
;
; RV64-LABEL: mscatter_nxv2i16_truncstore_nxv2i8:
; RV64: # %bb.0:
; RV64-NEXT: vsetvli a0, zero, e8, mf4, ta, mu
; RV64-NEXT: vnsrl.wi v8, v8, 0
; RV64-NEXT: vsoxei64.v v8, (zero), v10, v0.t
; RV64-NEXT: ret
%tval = trunc <vscale x 2 x i16> %val to <vscale x 2 x i8>
call void @llvm.masked.scatter.nxv2i8.nxv2p0i8(<vscale x 2 x i8> %tval, <vscale x 2 x i8*> %ptrs, i32 1, <vscale x 2 x i1> %m)
ret void
}
define void @mscatter_nxv2i32_truncstore_nxv2i8(<vscale x 2 x i32> %val, <vscale x 2 x i8*> %ptrs, <vscale x 2 x i1> %m) {
; RV32-LABEL: mscatter_nxv2i32_truncstore_nxv2i8:
; RV32: # %bb.0:
; RV32-NEXT: vsetvli a0, zero, e16, mf2, ta, mu
; RV32-NEXT: vnsrl.wi v8, v8, 0
; RV32-NEXT: vsetvli zero, zero, e8, mf4, ta, mu
; RV32-NEXT: vnsrl.wi v8, v8, 0
; RV32-NEXT: vsoxei32.v v8, (zero), v9, v0.t
; RV32-NEXT: ret
;
; RV64-LABEL: mscatter_nxv2i32_truncstore_nxv2i8:
; RV64: # %bb.0:
; RV64-NEXT: vsetvli a0, zero, e16, mf2, ta, mu
; RV64-NEXT: vnsrl.wi v8, v8, 0
; RV64-NEXT: vsetvli zero, zero, e8, mf4, ta, mu
; RV64-NEXT: vnsrl.wi v8, v8, 0
; RV64-NEXT: vsoxei64.v v8, (zero), v10, v0.t
; RV64-NEXT: ret
%tval = trunc <vscale x 2 x i32> %val to <vscale x 2 x i8>
call void @llvm.masked.scatter.nxv2i8.nxv2p0i8(<vscale x 2 x i8> %tval, <vscale x 2 x i8*> %ptrs, i32 1, <vscale x 2 x i1> %m)
ret void
}
define void @mscatter_nxv2i64_truncstore_nxv2i8(<vscale x 2 x i64> %val, <vscale x 2 x i8*> %ptrs, <vscale x 2 x i1> %m) {
; RV32-LABEL: mscatter_nxv2i64_truncstore_nxv2i8:
; RV32: # %bb.0:
; RV32-NEXT: vsetvli a0, zero, e32, m1, ta, mu
; RV32-NEXT: vnsrl.wi v11, v8, 0
; RV32-NEXT: vsetvli zero, zero, e16, mf2, ta, mu
; RV32-NEXT: vnsrl.wi v8, v11, 0
; RV32-NEXT: vsetvli zero, zero, e8, mf4, ta, mu
; RV32-NEXT: vnsrl.wi v8, v8, 0
; RV32-NEXT: vsoxei32.v v8, (zero), v10, v0.t
; RV32-NEXT: ret
;
; RV64-LABEL: mscatter_nxv2i64_truncstore_nxv2i8:
; RV64: # %bb.0:
; RV64-NEXT: vsetvli a0, zero, e32, m1, ta, mu
; RV64-NEXT: vnsrl.wi v12, v8, 0
; RV64-NEXT: vsetvli zero, zero, e16, mf2, ta, mu
; RV64-NEXT: vnsrl.wi v8, v12, 0
; RV64-NEXT: vsetvli zero, zero, e8, mf4, ta, mu
; RV64-NEXT: vnsrl.wi v8, v8, 0
; RV64-NEXT: vsoxei64.v v8, (zero), v10, v0.t
; RV64-NEXT: ret
%tval = trunc <vscale x 2 x i64> %val to <vscale x 2 x i8>
call void @llvm.masked.scatter.nxv2i8.nxv2p0i8(<vscale x 2 x i8> %tval, <vscale x 2 x i8*> %ptrs, i32 1, <vscale x 2 x i1> %m)
ret void
}
declare void @llvm.masked.scatter.nxv4i8.nxv4p0i8(<vscale x 4 x i8>, <vscale x 4 x i8*>, i32, <vscale x 4 x i1>)
define void @mscatter_nxv4i8(<vscale x 4 x i8> %val, <vscale x 4 x i8*> %ptrs, <vscale x 4 x i1> %m) {
; RV32-LABEL: mscatter_nxv4i8:
; RV32: # %bb.0:
; RV32-NEXT: vsetvli a0, zero, e8, mf2, ta, mu
; RV32-NEXT: vsoxei32.v v8, (zero), v10, v0.t
; RV32-NEXT: ret
;
; RV64-LABEL: mscatter_nxv4i8:
; RV64: # %bb.0:
; RV64-NEXT: vsetvli a0, zero, e8, mf2, ta, mu
; RV64-NEXT: vsoxei64.v v8, (zero), v12, v0.t
; RV64-NEXT: ret
call void @llvm.masked.scatter.nxv4i8.nxv4p0i8(<vscale x 4 x i8> %val, <vscale x 4 x i8*> %ptrs, i32 1, <vscale x 4 x i1> %m)
ret void
}
define void @mscatter_truemask_nxv4i8(<vscale x 4 x i8> %val, <vscale x 4 x i8*> %ptrs) {
; RV32-LABEL: mscatter_truemask_nxv4i8:
; RV32: # %bb.0:
; RV32-NEXT: vsetvli a0, zero, e8, mf2, ta, mu
; RV32-NEXT: vsoxei32.v v8, (zero), v10
; RV32-NEXT: ret
;
; RV64-LABEL: mscatter_truemask_nxv4i8:
; RV64: # %bb.0:
; RV64-NEXT: vsetvli a0, zero, e8, mf2, ta, mu
; RV64-NEXT: vsoxei64.v v8, (zero), v12
; RV64-NEXT: ret
%mhead = insertelement <vscale x 4 x i1> undef, i1 1, i32 0
%mtrue = shufflevector <vscale x 4 x i1> %mhead, <vscale x 4 x i1> undef, <vscale x 4 x i32> zeroinitializer
call void @llvm.masked.scatter.nxv4i8.nxv4p0i8(<vscale x 4 x i8> %val, <vscale x 4 x i8*> %ptrs, i32 1, <vscale x 4 x i1> %mtrue)
ret void
}
define void @mscatter_falsemask_nxv4i8(<vscale x 4 x i8> %val, <vscale x 4 x i8*> %ptrs) {
; RV32-LABEL: mscatter_falsemask_nxv4i8:
; RV32: # %bb.0:
; RV32-NEXT: ret
;
; RV64-LABEL: mscatter_falsemask_nxv4i8:
; RV64: # %bb.0:
; RV64-NEXT: ret
call void @llvm.masked.scatter.nxv4i8.nxv4p0i8(<vscale x 4 x i8> %val, <vscale x 4 x i8*> %ptrs, i32 1, <vscale x 4 x i1> zeroinitializer)
ret void
}
declare void @llvm.masked.scatter.nxv8i8.nxv8p0i8(<vscale x 8 x i8>, <vscale x 8 x i8*>, i32, <vscale x 8 x i1>)
define void @mscatter_nxv8i8(<vscale x 8 x i8> %val, <vscale x 8 x i8*> %ptrs, <vscale x 8 x i1> %m) {
; RV32-LABEL: mscatter_nxv8i8:
; RV32: # %bb.0:
; RV32-NEXT: vsetvli a0, zero, e8, m1, ta, mu
; RV32-NEXT: vsoxei32.v v8, (zero), v12, v0.t
; RV32-NEXT: ret
;
; RV64-LABEL: mscatter_nxv8i8:
; RV64: # %bb.0:
; RV64-NEXT: vsetvli a0, zero, e8, m1, ta, mu
; RV64-NEXT: vsoxei64.v v8, (zero), v16, v0.t
; RV64-NEXT: ret
call void @llvm.masked.scatter.nxv8i8.nxv8p0i8(<vscale x 8 x i8> %val, <vscale x 8 x i8*> %ptrs, i32 1, <vscale x 8 x i1> %m)
ret void
}
define void @mscatter_baseidx_nxv8i8(<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i8> %idxs, <vscale x 8 x i1> %m) {
; RV32-LABEL: mscatter_baseidx_nxv8i8:
; RV32: # %bb.0:
; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, mu
; RV32-NEXT: vsext.vf4 v12, v9
; RV32-NEXT: vsetvli zero, zero, e8, m1, ta, mu
; RV32-NEXT: vsoxei32.v v8, (a0), v12, v0.t
; RV32-NEXT: ret
;
; RV64-LABEL: mscatter_baseidx_nxv8i8:
; RV64: # %bb.0:
; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu
; RV64-NEXT: vsext.vf8 v16, v9
; RV64-NEXT: vsetvli zero, zero, e8, m1, ta, mu
; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t
; RV64-NEXT: ret
%ptrs = getelementptr inbounds i8, i8* %base, <vscale x 8 x i8> %idxs
call void @llvm.masked.scatter.nxv8i8.nxv8p0i8(<vscale x 8 x i8> %val, <vscale x 8 x i8*> %ptrs, i32 1, <vscale x 8 x i1> %m)
ret void
}
declare void @llvm.masked.scatter.nxv1i16.nxv1p0i16(<vscale x 1 x i16>, <vscale x 1 x i16*>, i32, <vscale x 1 x i1>)
define void @mscatter_nxv1i16(<vscale x 1 x i16> %val, <vscale x 1 x i16*> %ptrs, <vscale x 1 x i1> %m) {
; RV32-LABEL: mscatter_nxv1i16:
; RV32: # %bb.0:
; RV32-NEXT: vsetvli a0, zero, e16, mf4, ta, mu
; RV32-NEXT: vsoxei32.v v8, (zero), v9, v0.t
; RV32-NEXT: ret
;
; RV64-LABEL: mscatter_nxv1i16:
; RV64: # %bb.0:
; RV64-NEXT: vsetvli a0, zero, e16, mf4, ta, mu
; RV64-NEXT: vsoxei64.v v8, (zero), v9, v0.t
; RV64-NEXT: ret
call void @llvm.masked.scatter.nxv1i16.nxv1p0i16(<vscale x 1 x i16> %val, <vscale x 1 x i16*> %ptrs, i32 2, <vscale x 1 x i1> %m)
ret void
}
declare void @llvm.masked.scatter.nxv2i16.nxv2p0i16(<vscale x 2 x i16>, <vscale x 2 x i16*>, i32, <vscale x 2 x i1>)
define void @mscatter_nxv2i16(<vscale x 2 x i16> %val, <vscale x 2 x i16*> %ptrs, <vscale x 2 x i1> %m) {
; RV32-LABEL: mscatter_nxv2i16:
; RV32: # %bb.0:
; RV32-NEXT: vsetvli a0, zero, e16, mf2, ta, mu
; RV32-NEXT: vsoxei32.v v8, (zero), v9, v0.t
; RV32-NEXT: ret
;
; RV64-LABEL: mscatter_nxv2i16:
; RV64: # %bb.0:
; RV64-NEXT: vsetvli a0, zero, e16, mf2, ta, mu
; RV64-NEXT: vsoxei64.v v8, (zero), v10, v0.t
; RV64-NEXT: ret
call void @llvm.masked.scatter.nxv2i16.nxv2p0i16(<vscale x 2 x i16> %val, <vscale x 2 x i16*> %ptrs, i32 2, <vscale x 2 x i1> %m)
ret void
}
define void @mscatter_nxv2i32_truncstore_nxv2i16(<vscale x 2 x i32> %val, <vscale x 2 x i16*> %ptrs, <vscale x 2 x i1> %m) {
; RV32-LABEL: mscatter_nxv2i32_truncstore_nxv2i16:
; RV32: # %bb.0:
; RV32-NEXT: vsetvli a0, zero, e16, mf2, ta, mu
; RV32-NEXT: vnsrl.wi v8, v8, 0
; RV32-NEXT: vsoxei32.v v8, (zero), v9, v0.t
; RV32-NEXT: ret
;
; RV64-LABEL: mscatter_nxv2i32_truncstore_nxv2i16:
; RV64: # %bb.0:
; RV64-NEXT: vsetvli a0, zero, e16, mf2, ta, mu
; RV64-NEXT: vnsrl.wi v8, v8, 0
; RV64-NEXT: vsoxei64.v v8, (zero), v10, v0.t
; RV64-NEXT: ret
%tval = trunc <vscale x 2 x i32> %val to <vscale x 2 x i16>
call void @llvm.masked.scatter.nxv2i16.nxv2p0i16(<vscale x 2 x i16> %tval, <vscale x 2 x i16*> %ptrs, i32 2, <vscale x 2 x i1> %m)
ret void
}
define void @mscatter_nxv2i64_truncstore_nxv2i16(<vscale x 2 x i64> %val, <vscale x 2 x i16*> %ptrs, <vscale x 2 x i1> %m) {
; RV32-LABEL: mscatter_nxv2i64_truncstore_nxv2i16:
; RV32: # %bb.0:
; RV32-NEXT: vsetvli a0, zero, e32, m1, ta, mu
; RV32-NEXT: vnsrl.wi v11, v8, 0
; RV32-NEXT: vsetvli zero, zero, e16, mf2, ta, mu
; RV32-NEXT: vnsrl.wi v8, v11, 0
; RV32-NEXT: vsoxei32.v v8, (zero), v10, v0.t
; RV32-NEXT: ret
;
; RV64-LABEL: mscatter_nxv2i64_truncstore_nxv2i16:
; RV64: # %bb.0:
; RV64-NEXT: vsetvli a0, zero, e32, m1, ta, mu
; RV64-NEXT: vnsrl.wi v12, v8, 0
; RV64-NEXT: vsetvli zero, zero, e16, mf2, ta, mu
; RV64-NEXT: vnsrl.wi v8, v12, 0
; RV64-NEXT: vsoxei64.v v8, (zero), v10, v0.t
; RV64-NEXT: ret
%tval = trunc <vscale x 2 x i64> %val to <vscale x 2 x i16>
call void @llvm.masked.scatter.nxv2i16.nxv2p0i16(<vscale x 2 x i16> %tval, <vscale x 2 x i16*> %ptrs, i32 2, <vscale x 2 x i1> %m)
ret void
}
declare void @llvm.masked.scatter.nxv4i16.nxv4p0i16(<vscale x 4 x i16>, <vscale x 4 x i16*>, i32, <vscale x 4 x i1>)
define void @mscatter_nxv4i16(<vscale x 4 x i16> %val, <vscale x 4 x i16*> %ptrs, <vscale x 4 x i1> %m) {
; RV32-LABEL: mscatter_nxv4i16:
; RV32: # %bb.0:
; RV32-NEXT: vsetvli a0, zero, e16, m1, ta, mu
; RV32-NEXT: vsoxei32.v v8, (zero), v10, v0.t
; RV32-NEXT: ret
;
; RV64-LABEL: mscatter_nxv4i16:
; RV64: # %bb.0:
; RV64-NEXT: vsetvli a0, zero, e16, m1, ta, mu
; RV64-NEXT: vsoxei64.v v8, (zero), v12, v0.t
; RV64-NEXT: ret
call void @llvm.masked.scatter.nxv4i16.nxv4p0i16(<vscale x 4 x i16> %val, <vscale x 4 x i16*> %ptrs, i32 2, <vscale x 4 x i1> %m)
ret void
}
define void @mscatter_truemask_nxv4i16(<vscale x 4 x i16> %val, <vscale x 4 x i16*> %ptrs) {
; RV32-LABEL: mscatter_truemask_nxv4i16:
; RV32: # %bb.0:
; RV32-NEXT: vsetvli a0, zero, e16, m1, ta, mu
; RV32-NEXT: vsoxei32.v v8, (zero), v10
; RV32-NEXT: ret
;
; RV64-LABEL: mscatter_truemask_nxv4i16:
; RV64: # %bb.0:
; RV64-NEXT: vsetvli a0, zero, e16, m1, ta, mu
; RV64-NEXT: vsoxei64.v v8, (zero), v12
; RV64-NEXT: ret
%mhead = insertelement <vscale x 4 x i1> undef, i1 1, i32 0
%mtrue = shufflevector <vscale x 4 x i1> %mhead, <vscale x 4 x i1> undef, <vscale x 4 x i32> zeroinitializer
call void @llvm.masked.scatter.nxv4i16.nxv4p0i16(<vscale x 4 x i16> %val, <vscale x 4 x i16*> %ptrs, i32 2, <vscale x 4 x i1> %mtrue)
ret void
}
define void @mscatter_falsemask_nxv4i16(<vscale x 4 x i16> %val, <vscale x 4 x i16*> %ptrs) {
; RV32-LABEL: mscatter_falsemask_nxv4i16:
; RV32: # %bb.0:
; RV32-NEXT: ret
;
; RV64-LABEL: mscatter_falsemask_nxv4i16:
; RV64: # %bb.0:
; RV64-NEXT: ret
call void @llvm.masked.scatter.nxv4i16.nxv4p0i16(<vscale x 4 x i16> %val, <vscale x 4 x i16*> %ptrs, i32 2, <vscale x 4 x i1> zeroinitializer)
ret void
}
declare void @llvm.masked.scatter.nxv8i16.nxv8p0i16(<vscale x 8 x i16>, <vscale x 8 x i16*>, i32, <vscale x 8 x i1>)
define void @mscatter_nxv8i16(<vscale x 8 x i16> %val, <vscale x 8 x i16*> %ptrs, <vscale x 8 x i1> %m) {
; RV32-LABEL: mscatter_nxv8i16:
; RV32: # %bb.0:
; RV32-NEXT: vsetvli a0, zero, e16, m2, ta, mu
; RV32-NEXT: vsoxei32.v v8, (zero), v12, v0.t
; RV32-NEXT: ret
;
; RV64-LABEL: mscatter_nxv8i16:
; RV64: # %bb.0:
; RV64-NEXT: vsetvli a0, zero, e16, m2, ta, mu
; RV64-NEXT: vsoxei64.v v8, (zero), v16, v0.t
; RV64-NEXT: ret
call void @llvm.masked.scatter.nxv8i16.nxv8p0i16(<vscale x 8 x i16> %val, <vscale x 8 x i16*> %ptrs, i32 2, <vscale x 8 x i1> %m)
ret void
}
define void @mscatter_baseidx_nxv8i8_nxv8i16(<vscale x 8 x i16> %val, i16* %base, <vscale x 8 x i8> %idxs, <vscale x 8 x i1> %m) {
; RV32-LABEL: mscatter_baseidx_nxv8i8_nxv8i16:
; RV32: # %bb.0:
; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, mu
; RV32-NEXT: vsext.vf4 v12, v10
; RV32-NEXT: vadd.vv v12, v12, v12
; RV32-NEXT: vsetvli zero, zero, e16, m2, ta, mu
; RV32-NEXT: vsoxei32.v v8, (a0), v12, v0.t
; RV32-NEXT: ret
;
; RV64-LABEL: mscatter_baseidx_nxv8i8_nxv8i16:
; RV64: # %bb.0:
; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu
; RV64-NEXT: vsext.vf8 v16, v10
; RV64-NEXT: vadd.vv v16, v16, v16
; RV64-NEXT: vsetvli zero, zero, e16, m2, ta, mu
; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t
; RV64-NEXT: ret
%ptrs = getelementptr inbounds i16, i16* %base, <vscale x 8 x i8> %idxs
call void @llvm.masked.scatter.nxv8i16.nxv8p0i16(<vscale x 8 x i16> %val, <vscale x 8 x i16*> %ptrs, i32 2, <vscale x 8 x i1> %m)
ret void
}
define void @mscatter_baseidx_sext_nxv8i8_nxv8i16(<vscale x 8 x i16> %val, i16* %base, <vscale x 8 x i8> %idxs, <vscale x 8 x i1> %m) {
; RV32-LABEL: mscatter_baseidx_sext_nxv8i8_nxv8i16:
; RV32: # %bb.0:
; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, mu
; RV32-NEXT: vsext.vf4 v12, v10
; RV32-NEXT: vadd.vv v12, v12, v12
; RV32-NEXT: vsetvli zero, zero, e16, m2, ta, mu
; RV32-NEXT: vsoxei32.v v8, (a0), v12, v0.t
; RV32-NEXT: ret
;
; RV64-LABEL: mscatter_baseidx_sext_nxv8i8_nxv8i16:
; RV64: # %bb.0:
; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu
; RV64-NEXT: vsext.vf8 v16, v10
; RV64-NEXT: vadd.vv v16, v16, v16
; RV64-NEXT: vsetvli zero, zero, e16, m2, ta, mu
; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t
; RV64-NEXT: ret
%eidxs = sext <vscale x 8 x i8> %idxs to <vscale x 8 x i16>
%ptrs = getelementptr inbounds i16, i16* %base, <vscale x 8 x i16> %eidxs
call void @llvm.masked.scatter.nxv8i16.nxv8p0i16(<vscale x 8 x i16> %val, <vscale x 8 x i16*> %ptrs, i32 2, <vscale x 8 x i1> %m)
ret void
}
define void @mscatter_baseidx_zext_nxv8i8_nxv8i16(<vscale x 8 x i16> %val, i16* %base, <vscale x 8 x i8> %idxs, <vscale x 8 x i1> %m) {
; RV32-LABEL: mscatter_baseidx_zext_nxv8i8_nxv8i16:
; RV32: # %bb.0:
; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, mu
; RV32-NEXT: vzext.vf4 v12, v10
; RV32-NEXT: vadd.vv v12, v12, v12
; RV32-NEXT: vsetvli zero, zero, e16, m2, ta, mu
; RV32-NEXT: vsoxei32.v v8, (a0), v12, v0.t
; RV32-NEXT: ret
;
; RV64-LABEL: mscatter_baseidx_zext_nxv8i8_nxv8i16:
; RV64: # %bb.0:
; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu
; RV64-NEXT: vzext.vf8 v16, v10
; RV64-NEXT: vadd.vv v16, v16, v16
; RV64-NEXT: vsetvli zero, zero, e16, m2, ta, mu
; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t
; RV64-NEXT: ret
%eidxs = zext <vscale x 8 x i8> %idxs to <vscale x 8 x i16>
%ptrs = getelementptr inbounds i16, i16* %base, <vscale x 8 x i16> %eidxs
call void @llvm.masked.scatter.nxv8i16.nxv8p0i16(<vscale x 8 x i16> %val, <vscale x 8 x i16*> %ptrs, i32 2, <vscale x 8 x i1> %m)
ret void
}
define void @mscatter_baseidx_nxv8i16(<vscale x 8 x i16> %val, i16* %base, <vscale x 8 x i16> %idxs, <vscale x 8 x i1> %m) {
; RV32-LABEL: mscatter_baseidx_nxv8i16:
; RV32: # %bb.0:
; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, mu
; RV32-NEXT: vsext.vf2 v12, v10
; RV32-NEXT: vadd.vv v12, v12, v12
; RV32-NEXT: vsetvli zero, zero, e16, m2, ta, mu
; RV32-NEXT: vsoxei32.v v8, (a0), v12, v0.t
; RV32-NEXT: ret
;
; RV64-LABEL: mscatter_baseidx_nxv8i16:
; RV64: # %bb.0:
; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu
; RV64-NEXT: vsext.vf4 v16, v10
; RV64-NEXT: vadd.vv v16, v16, v16
; RV64-NEXT: vsetvli zero, zero, e16, m2, ta, mu
; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t
; RV64-NEXT: ret
%ptrs = getelementptr inbounds i16, i16* %base, <vscale x 8 x i16> %idxs
call void @llvm.masked.scatter.nxv8i16.nxv8p0i16(<vscale x 8 x i16> %val, <vscale x 8 x i16*> %ptrs, i32 2, <vscale x 8 x i1> %m)
ret void
}
declare void @llvm.masked.scatter.nxv1i32.nxv1p0i32(<vscale x 1 x i32>, <vscale x 1 x i32*>, i32, <vscale x 1 x i1>)
define void @mscatter_nxv1i32(<vscale x 1 x i32> %val, <vscale x 1 x i32*> %ptrs, <vscale x 1 x i1> %m) {
; RV32-LABEL: mscatter_nxv1i32:
; RV32: # %bb.0:
; RV32-NEXT: vsetvli a0, zero, e32, mf2, ta, mu
; RV32-NEXT: vsoxei32.v v8, (zero), v9, v0.t
; RV32-NEXT: ret
;
; RV64-LABEL: mscatter_nxv1i32:
; RV64: # %bb.0:
; RV64-NEXT: vsetvli a0, zero, e32, mf2, ta, mu
; RV64-NEXT: vsoxei64.v v8, (zero), v9, v0.t
; RV64-NEXT: ret
call void @llvm.masked.scatter.nxv1i32.nxv1p0i32(<vscale x 1 x i32> %val, <vscale x 1 x i32*> %ptrs, i32 4, <vscale x 1 x i1> %m)
ret void
}
declare void @llvm.masked.scatter.nxv2i32.nxv2p0i32(<vscale x 2 x i32>, <vscale x 2 x i32*>, i32, <vscale x 2 x i1>)
define void @mscatter_nxv2i32(<vscale x 2 x i32> %val, <vscale x 2 x i32*> %ptrs, <vscale x 2 x i1> %m) {
; RV32-LABEL: mscatter_nxv2i32:
; RV32: # %bb.0:
; RV32-NEXT: vsetvli a0, zero, e32, m1, ta, mu
; RV32-NEXT: vsoxei32.v v8, (zero), v9, v0.t
; RV32-NEXT: ret
;
; RV64-LABEL: mscatter_nxv2i32:
; RV64: # %bb.0:
; RV64-NEXT: vsetvli a0, zero, e32, m1, ta, mu
; RV64-NEXT: vsoxei64.v v8, (zero), v10, v0.t
; RV64-NEXT: ret
call void @llvm.masked.scatter.nxv2i32.nxv2p0i32(<vscale x 2 x i32> %val, <vscale x 2 x i32*> %ptrs, i32 4, <vscale x 2 x i1> %m)
ret void
}
define void @mscatter_nxv2i64_truncstore_nxv2i32(<vscale x 2 x i64> %val, <vscale x 2 x i32*> %ptrs, <vscale x 2 x i1> %m) {
; RV32-LABEL: mscatter_nxv2i64_truncstore_nxv2i32:
; RV32: # %bb.0:
; RV32-NEXT: vsetvli a0, zero, e32, m1, ta, mu
; RV32-NEXT: vnsrl.wi v11, v8, 0
; RV32-NEXT: vsoxei32.v v11, (zero), v10, v0.t
; RV32-NEXT: ret
;
; RV64-LABEL: mscatter_nxv2i64_truncstore_nxv2i32:
; RV64: # %bb.0:
; RV64-NEXT: vsetvli a0, zero, e32, m1, ta, mu
; RV64-NEXT: vnsrl.wi v12, v8, 0
; RV64-NEXT: vsoxei64.v v12, (zero), v10, v0.t
; RV64-NEXT: ret
%tval = trunc <vscale x 2 x i64> %val to <vscale x 2 x i32>
call void @llvm.masked.scatter.nxv2i32.nxv2p0i32(<vscale x 2 x i32> %tval, <vscale x 2 x i32*> %ptrs, i32 4, <vscale x 2 x i1> %m)
ret void
}
declare void @llvm.masked.scatter.nxv4i32.nxv4p0i32(<vscale x 4 x i32>, <vscale x 4 x i32*>, i32, <vscale x 4 x i1>)
define void @mscatter_nxv4i32(<vscale x 4 x i32> %val, <vscale x 4 x i32*> %ptrs, <vscale x 4 x i1> %m) {
; RV32-LABEL: mscatter_nxv4i32:
; RV32: # %bb.0:
; RV32-NEXT: vsetvli a0, zero, e32, m2, ta, mu
; RV32-NEXT: vsoxei32.v v8, (zero), v10, v0.t
; RV32-NEXT: ret
;
; RV64-LABEL: mscatter_nxv4i32:
; RV64: # %bb.0:
; RV64-NEXT: vsetvli a0, zero, e32, m2, ta, mu
; RV64-NEXT: vsoxei64.v v8, (zero), v12, v0.t
; RV64-NEXT: ret
call void @llvm.masked.scatter.nxv4i32.nxv4p0i32(<vscale x 4 x i32> %val, <vscale x 4 x i32*> %ptrs, i32 4, <vscale x 4 x i1> %m)
ret void
}
define void @mscatter_truemask_nxv4i32(<vscale x 4 x i32> %val, <vscale x 4 x i32*> %ptrs) {
; RV32-LABEL: mscatter_truemask_nxv4i32:
; RV32: # %bb.0:
; RV32-NEXT: vsetvli a0, zero, e32, m2, ta, mu
; RV32-NEXT: vsoxei32.v v8, (zero), v10
; RV32-NEXT: ret
;
; RV64-LABEL: mscatter_truemask_nxv4i32:
; RV64: # %bb.0:
; RV64-NEXT: vsetvli a0, zero, e32, m2, ta, mu
; RV64-NEXT: vsoxei64.v v8, (zero), v12
; RV64-NEXT: ret
%mhead = insertelement <vscale x 4 x i1> undef, i1 1, i32 0
%mtrue = shufflevector <vscale x 4 x i1> %mhead, <vscale x 4 x i1> undef, <vscale x 4 x i32> zeroinitializer
call void @llvm.masked.scatter.nxv4i32.nxv4p0i32(<vscale x 4 x i32> %val, <vscale x 4 x i32*> %ptrs, i32 4, <vscale x 4 x i1> %mtrue)
ret void
}
define void @mscatter_falsemask_nxv4i32(<vscale x 4 x i32> %val, <vscale x 4 x i32*> %ptrs) {
; RV32-LABEL: mscatter_falsemask_nxv4i32:
; RV32: # %bb.0:
; RV32-NEXT: ret
;
; RV64-LABEL: mscatter_falsemask_nxv4i32:
; RV64: # %bb.0:
; RV64-NEXT: ret
call void @llvm.masked.scatter.nxv4i32.nxv4p0i32(<vscale x 4 x i32> %val, <vscale x 4 x i32*> %ptrs, i32 4, <vscale x 4 x i1> zeroinitializer)
ret void
}
declare void @llvm.masked.scatter.nxv8i32.nxv8p0i32(<vscale x 8 x i32>, <vscale x 8 x i32*>, i32, <vscale x 8 x i1>)
define void @mscatter_nxv8i32(<vscale x 8 x i32> %val, <vscale x 8 x i32*> %ptrs, <vscale x 8 x i1> %m) {
; RV32-LABEL: mscatter_nxv8i32:
; RV32: # %bb.0:
; RV32-NEXT: vsetvli a0, zero, e32, m4, ta, mu
; RV32-NEXT: vsoxei32.v v8, (zero), v12, v0.t
; RV32-NEXT: ret
;
; RV64-LABEL: mscatter_nxv8i32:
; RV64: # %bb.0:
; RV64-NEXT: vsetvli a0, zero, e32, m4, ta, mu
; RV64-NEXT: vsoxei64.v v8, (zero), v16, v0.t
; RV64-NEXT: ret
call void @llvm.masked.scatter.nxv8i32.nxv8p0i32(<vscale x 8 x i32> %val, <vscale x 8 x i32*> %ptrs, i32 4, <vscale x 8 x i1> %m)
ret void
}
define void @mscatter_baseidx_nxv8i8_nxv8i32(<vscale x 8 x i32> %val, i32* %base, <vscale x 8 x i8> %idxs, <vscale x 8 x i1> %m) {
; RV32-LABEL: mscatter_baseidx_nxv8i8_nxv8i32:
; RV32: # %bb.0:
; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, mu
; RV32-NEXT: vsext.vf4 v16, v12
; RV32-NEXT: vsll.vi v12, v16, 2
; RV32-NEXT: vsoxei32.v v8, (a0), v12, v0.t
; RV32-NEXT: ret
;
; RV64-LABEL: mscatter_baseidx_nxv8i8_nxv8i32:
; RV64: # %bb.0:
; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu
; RV64-NEXT: vsext.vf8 v16, v12
; RV64-NEXT: vsll.vi v16, v16, 2
; RV64-NEXT: vsetvli zero, zero, e32, m4, ta, mu
; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t
; RV64-NEXT: ret
%ptrs = getelementptr inbounds i32, i32* %base, <vscale x 8 x i8> %idxs
call void @llvm.masked.scatter.nxv8i32.nxv8p0i32(<vscale x 8 x i32> %val, <vscale x 8 x i32*> %ptrs, i32 4, <vscale x 8 x i1> %m)
ret void
}
define void @mscatter_baseidx_sext_nxv8i8_nxv8i32(<vscale x 8 x i32> %val, i32* %base, <vscale x 8 x i8> %idxs, <vscale x 8 x i1> %m) {
; RV32-LABEL: mscatter_baseidx_sext_nxv8i8_nxv8i32:
; RV32: # %bb.0:
; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, mu
; RV32-NEXT: vsext.vf4 v16, v12
; RV32-NEXT: vsll.vi v12, v16, 2
; RV32-NEXT: vsoxei32.v v8, (a0), v12, v0.t
; RV32-NEXT: ret
;
; RV64-LABEL: mscatter_baseidx_sext_nxv8i8_nxv8i32:
; RV64: # %bb.0:
; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu
; RV64-NEXT: vsext.vf8 v16, v12
; RV64-NEXT: vsll.vi v16, v16, 2
; RV64-NEXT: vsetvli zero, zero, e32, m4, ta, mu
; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t
; RV64-NEXT: ret
%eidxs = sext <vscale x 8 x i8> %idxs to <vscale x 8 x i32>
%ptrs = getelementptr inbounds i32, i32* %base, <vscale x 8 x i32> %eidxs
call void @llvm.masked.scatter.nxv8i32.nxv8p0i32(<vscale x 8 x i32> %val, <vscale x 8 x i32*> %ptrs, i32 4, <vscale x 8 x i1> %m)
ret void
}
define void @mscatter_baseidx_zext_nxv8i8_nxv8i32(<vscale x 8 x i32> %val, i32* %base, <vscale x 8 x i8> %idxs, <vscale x 8 x i1> %m) {
; RV32-LABEL: mscatter_baseidx_zext_nxv8i8_nxv8i32:
; RV32: # %bb.0:
; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, mu
; RV32-NEXT: vzext.vf4 v16, v12
; RV32-NEXT: vsll.vi v12, v16, 2
; RV32-NEXT: vsoxei32.v v8, (a0), v12, v0.t
; RV32-NEXT: ret
;
; RV64-LABEL: mscatter_baseidx_zext_nxv8i8_nxv8i32:
; RV64: # %bb.0:
; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu
; RV64-NEXT: vzext.vf8 v16, v12
; RV64-NEXT: vsll.vi v16, v16, 2
; RV64-NEXT: vsetvli zero, zero, e32, m4, ta, mu
; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t
; RV64-NEXT: ret
%eidxs = zext <vscale x 8 x i8> %idxs to <vscale x 8 x i32>
%ptrs = getelementptr inbounds i32, i32* %base, <vscale x 8 x i32> %eidxs
call void @llvm.masked.scatter.nxv8i32.nxv8p0i32(<vscale x 8 x i32> %val, <vscale x 8 x i32*> %ptrs, i32 4, <vscale x 8 x i1> %m)
ret void
}
define void @mscatter_baseidx_nxv8i16_nxv8i32(<vscale x 8 x i32> %val, i32* %base, <vscale x 8 x i16> %idxs, <vscale x 8 x i1> %m) {
; RV32-LABEL: mscatter_baseidx_nxv8i16_nxv8i32:
; RV32: # %bb.0:
; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, mu
; RV32-NEXT: vsext.vf2 v16, v12
; RV32-NEXT: vsll.vi v12, v16, 2
; RV32-NEXT: vsoxei32.v v8, (a0), v12, v0.t
; RV32-NEXT: ret
;
; RV64-LABEL: mscatter_baseidx_nxv8i16_nxv8i32:
; RV64: # %bb.0:
; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu
; RV64-NEXT: vsext.vf4 v16, v12
; RV64-NEXT: vsll.vi v16, v16, 2
; RV64-NEXT: vsetvli zero, zero, e32, m4, ta, mu
; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t
; RV64-NEXT: ret
%ptrs = getelementptr inbounds i32, i32* %base, <vscale x 8 x i16> %idxs
call void @llvm.masked.scatter.nxv8i32.nxv8p0i32(<vscale x 8 x i32> %val, <vscale x 8 x i32*> %ptrs, i32 4, <vscale x 8 x i1> %m)
ret void
}
define void @mscatter_baseidx_sext_nxv8i16_nxv8i32(<vscale x 8 x i32> %val, i32* %base, <vscale x 8 x i16> %idxs, <vscale x 8 x i1> %m) {
; RV32-LABEL: mscatter_baseidx_sext_nxv8i16_nxv8i32:
; RV32: # %bb.0:
; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, mu
; RV32-NEXT: vsext.vf2 v16, v12
; RV32-NEXT: vsll.vi v12, v16, 2
; RV32-NEXT: vsoxei32.v v8, (a0), v12, v0.t
; RV32-NEXT: ret
;
; RV64-LABEL: mscatter_baseidx_sext_nxv8i16_nxv8i32:
; RV64: # %bb.0:
; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu
; RV64-NEXT: vsext.vf4 v16, v12
; RV64-NEXT: vsll.vi v16, v16, 2
; RV64-NEXT: vsetvli zero, zero, e32, m4, ta, mu
; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t
; RV64-NEXT: ret
%eidxs = sext <vscale x 8 x i16> %idxs to <vscale x 8 x i32>
%ptrs = getelementptr inbounds i32, i32* %base, <vscale x 8 x i32> %eidxs
call void @llvm.masked.scatter.nxv8i32.nxv8p0i32(<vscale x 8 x i32> %val, <vscale x 8 x i32*> %ptrs, i32 4, <vscale x 8 x i1> %m)
ret void
}
define void @mscatter_baseidx_zext_nxv8i16_nxv8i32(<vscale x 8 x i32> %val, i32* %base, <vscale x 8 x i16> %idxs, <vscale x 8 x i1> %m) {
; RV32-LABEL: mscatter_baseidx_zext_nxv8i16_nxv8i32:
; RV32: # %bb.0:
; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, mu
; RV32-NEXT: vzext.vf2 v16, v12
; RV32-NEXT: vsll.vi v12, v16, 2
; RV32-NEXT: vsoxei32.v v8, (a0), v12, v0.t
; RV32-NEXT: ret
;
; RV64-LABEL: mscatter_baseidx_zext_nxv8i16_nxv8i32:
; RV64: # %bb.0:
; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu
; RV64-NEXT: vzext.vf4 v16, v12
; RV64-NEXT: vsll.vi v16, v16, 2
; RV64-NEXT: vsetvli zero, zero, e32, m4, ta, mu
; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t
; RV64-NEXT: ret
%eidxs = zext <vscale x 8 x i16> %idxs to <vscale x 8 x i32>
%ptrs = getelementptr inbounds i32, i32* %base, <vscale x 8 x i32> %eidxs
call void @llvm.masked.scatter.nxv8i32.nxv8p0i32(<vscale x 8 x i32> %val, <vscale x 8 x i32*> %ptrs, i32 4, <vscale x 8 x i1> %m)
ret void
}
define void @mscatter_baseidx_nxv8i32(<vscale x 8 x i32> %val, i32* %base, <vscale x 8 x i32> %idxs, <vscale x 8 x i1> %m) {
; RV32-LABEL: mscatter_baseidx_nxv8i32:
; RV32: # %bb.0:
; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, mu
; RV32-NEXT: vsll.vi v12, v12, 2
; RV32-NEXT: vsoxei32.v v8, (a0), v12, v0.t
; RV32-NEXT: ret
;
; RV64-LABEL: mscatter_baseidx_nxv8i32:
; RV64: # %bb.0:
; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu
; RV64-NEXT: vsext.vf2 v16, v12
; RV64-NEXT: vsll.vi v16, v16, 2
; RV64-NEXT: vsetvli zero, zero, e32, m4, ta, mu
; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t
; RV64-NEXT: ret
%ptrs = getelementptr inbounds i32, i32* %base, <vscale x 8 x i32> %idxs
call void @llvm.masked.scatter.nxv8i32.nxv8p0i32(<vscale x 8 x i32> %val, <vscale x 8 x i32*> %ptrs, i32 4, <vscale x 8 x i1> %m)
ret void
}
declare void @llvm.masked.scatter.nxv1i64.nxv1p0i64(<vscale x 1 x i64>, <vscale x 1 x i64*>, i32, <vscale x 1 x i1>)
define void @mscatter_nxv1i64(<vscale x 1 x i64> %val, <vscale x 1 x i64*> %ptrs, <vscale x 1 x i1> %m) {
; RV32-LABEL: mscatter_nxv1i64:
; RV32: # %bb.0:
; RV32-NEXT: vsetvli a0, zero, e64, m1, ta, mu
; RV32-NEXT: vsoxei32.v v8, (zero), v9, v0.t
; RV32-NEXT: ret
;
; RV64-LABEL: mscatter_nxv1i64:
; RV64: # %bb.0:
; RV64-NEXT: vsetvli a0, zero, e64, m1, ta, mu
; RV64-NEXT: vsoxei64.v v8, (zero), v9, v0.t
; RV64-NEXT: ret
call void @llvm.masked.scatter.nxv1i64.nxv1p0i64(<vscale x 1 x i64> %val, <vscale x 1 x i64*> %ptrs, i32 8, <vscale x 1 x i1> %m)
ret void
}
declare void @llvm.masked.scatter.nxv2i64.nxv2p0i64(<vscale x 2 x i64>, <vscale x 2 x i64*>, i32, <vscale x 2 x i1>)
define void @mscatter_nxv2i64(<vscale x 2 x i64> %val, <vscale x 2 x i64*> %ptrs, <vscale x 2 x i1> %m) {
; RV32-LABEL: mscatter_nxv2i64:
; RV32: # %bb.0:
; RV32-NEXT: vsetvli a0, zero, e64, m2, ta, mu
; RV32-NEXT: vsoxei32.v v8, (zero), v10, v0.t
; RV32-NEXT: ret
;
; RV64-LABEL: mscatter_nxv2i64:
; RV64: # %bb.0:
; RV64-NEXT: vsetvli a0, zero, e64, m2, ta, mu
; RV64-NEXT: vsoxei64.v v8, (zero), v10, v0.t
; RV64-NEXT: ret
call void @llvm.masked.scatter.nxv2i64.nxv2p0i64(<vscale x 2 x i64> %val, <vscale x 2 x i64*> %ptrs, i32 8, <vscale x 2 x i1> %m)
ret void
}
declare void @llvm.masked.scatter.nxv4i64.nxv4p0i64(<vscale x 4 x i64>, <vscale x 4 x i64*>, i32, <vscale x 4 x i1>)
define void @mscatter_nxv4i64(<vscale x 4 x i64> %val, <vscale x 4 x i64*> %ptrs, <vscale x 4 x i1> %m) {
; RV32-LABEL: mscatter_nxv4i64:
; RV32: # %bb.0:
; RV32-NEXT: vsetvli a0, zero, e64, m4, ta, mu
; RV32-NEXT: vsoxei32.v v8, (zero), v12, v0.t
; RV32-NEXT: ret
;
; RV64-LABEL: mscatter_nxv4i64:
; RV64: # %bb.0:
; RV64-NEXT: vsetvli a0, zero, e64, m4, ta, mu
; RV64-NEXT: vsoxei64.v v8, (zero), v12, v0.t
; RV64-NEXT: ret
call void @llvm.masked.scatter.nxv4i64.nxv4p0i64(<vscale x 4 x i64> %val, <vscale x 4 x i64*> %ptrs, i32 8, <vscale x 4 x i1> %m)
ret void
}
define void @mscatter_truemask_nxv4i64(<vscale x 4 x i64> %val, <vscale x 4 x i64*> %ptrs) {
; RV32-LABEL: mscatter_truemask_nxv4i64:
; RV32: # %bb.0:
; RV32-NEXT: vsetvli a0, zero, e64, m4, ta, mu
; RV32-NEXT: vsoxei32.v v8, (zero), v12
; RV32-NEXT: ret
;
; RV64-LABEL: mscatter_truemask_nxv4i64:
; RV64: # %bb.0:
; RV64-NEXT: vsetvli a0, zero, e64, m4, ta, mu
; RV64-NEXT: vsoxei64.v v8, (zero), v12
; RV64-NEXT: ret
%mhead = insertelement <vscale x 4 x i1> undef, i1 1, i32 0
%mtrue = shufflevector <vscale x 4 x i1> %mhead, <vscale x 4 x i1> undef, <vscale x 4 x i32> zeroinitializer
call void @llvm.masked.scatter.nxv4i64.nxv4p0i64(<vscale x 4 x i64> %val, <vscale x 4 x i64*> %ptrs, i32 8, <vscale x 4 x i1> %mtrue)
ret void
}
define void @mscatter_falsemask_nxv4i64(<vscale x 4 x i64> %val, <vscale x 4 x i64*> %ptrs) {
; RV32-LABEL: mscatter_falsemask_nxv4i64:
; RV32: # %bb.0:
; RV32-NEXT: ret
;
; RV64-LABEL: mscatter_falsemask_nxv4i64:
; RV64: # %bb.0:
; RV64-NEXT: ret
call void @llvm.masked.scatter.nxv4i64.nxv4p0i64(<vscale x 4 x i64> %val, <vscale x 4 x i64*> %ptrs, i32 8, <vscale x 4 x i1> zeroinitializer)
ret void
}
declare void @llvm.masked.scatter.nxv8i64.nxv8p0i64(<vscale x 8 x i64>, <vscale x 8 x i64*>, i32, <vscale x 8 x i1>)
define void @mscatter_nxv8i64(<vscale x 8 x i64> %val, <vscale x 8 x i64*> %ptrs, <vscale x 8 x i1> %m) {
; RV32-LABEL: mscatter_nxv8i64:
; RV32: # %bb.0:
; RV32-NEXT: vsetvli a0, zero, e64, m8, ta, mu
; RV32-NEXT: vsoxei32.v v8, (zero), v16, v0.t
; RV32-NEXT: ret
;
; RV64-LABEL: mscatter_nxv8i64:
; RV64: # %bb.0:
; RV64-NEXT: vsetvli a0, zero, e64, m8, ta, mu
; RV64-NEXT: vsoxei64.v v8, (zero), v16, v0.t
; RV64-NEXT: ret
call void @llvm.masked.scatter.nxv8i64.nxv8p0i64(<vscale x 8 x i64> %val, <vscale x 8 x i64*> %ptrs, i32 8, <vscale x 8 x i1> %m)
ret void
}
define void @mscatter_baseidx_nxv8i8_nxv8i64(<vscale x 8 x i64> %val, i64* %base, <vscale x 8 x i8> %idxs, <vscale x 8 x i1> %m) {
; RV32-LABEL: mscatter_baseidx_nxv8i8_nxv8i64:
; RV32: # %bb.0:
; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, mu
; RV32-NEXT: vsext.vf4 v20, v16
; RV32-NEXT: vsll.vi v16, v20, 3
; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu
; RV32-NEXT: vsoxei32.v v8, (a0), v16, v0.t
; RV32-NEXT: ret
;
; RV64-LABEL: mscatter_baseidx_nxv8i8_nxv8i64:
; RV64: # %bb.0:
; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu
; RV64-NEXT: vsext.vf8 v24, v16
; RV64-NEXT: vsll.vi v16, v24, 3
; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t
; RV64-NEXT: ret
%ptrs = getelementptr inbounds i64, i64* %base, <vscale x 8 x i8> %idxs
call void @llvm.masked.scatter.nxv8i64.nxv8p0i64(<vscale x 8 x i64> %val, <vscale x 8 x i64*> %ptrs, i32 8, <vscale x 8 x i1> %m)
ret void
}
define void @mscatter_baseidx_sext_nxv8i8_nxv8i64(<vscale x 8 x i64> %val, i64* %base, <vscale x 8 x i8> %idxs, <vscale x 8 x i1> %m) {
; RV32-LABEL: mscatter_baseidx_sext_nxv8i8_nxv8i64:
; RV32: # %bb.0:
; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu
; RV32-NEXT: vsext.vf8 v24, v16
; RV32-NEXT: vsll.vi v16, v24, 3
; RV32-NEXT: vsoxei64.v v8, (a0), v16, v0.t
; RV32-NEXT: ret
;
; RV64-LABEL: mscatter_baseidx_sext_nxv8i8_nxv8i64:
; RV64: # %bb.0:
; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu
; RV64-NEXT: vsext.vf8 v24, v16
; RV64-NEXT: vsll.vi v16, v24, 3
; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t
; RV64-NEXT: ret
%eidxs = sext <vscale x 8 x i8> %idxs to <vscale x 8 x i64>
%ptrs = getelementptr inbounds i64, i64* %base, <vscale x 8 x i64> %eidxs
call void @llvm.masked.scatter.nxv8i64.nxv8p0i64(<vscale x 8 x i64> %val, <vscale x 8 x i64*> %ptrs, i32 8, <vscale x 8 x i1> %m)
ret void
}
define void @mscatter_baseidx_zext_nxv8i8_nxv8i64(<vscale x 8 x i64> %val, i64* %base, <vscale x 8 x i8> %idxs, <vscale x 8 x i1> %m) {
; RV32-LABEL: mscatter_baseidx_zext_nxv8i8_nxv8i64:
; RV32: # %bb.0:
; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu
; RV32-NEXT: vzext.vf8 v24, v16
; RV32-NEXT: vsll.vi v16, v24, 3
; RV32-NEXT: vsoxei64.v v8, (a0), v16, v0.t
; RV32-NEXT: ret
;
; RV64-LABEL: mscatter_baseidx_zext_nxv8i8_nxv8i64:
; RV64: # %bb.0:
; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu
; RV64-NEXT: vzext.vf8 v24, v16
; RV64-NEXT: vsll.vi v16, v24, 3
; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t
; RV64-NEXT: ret
%eidxs = zext <vscale x 8 x i8> %idxs to <vscale x 8 x i64>
%ptrs = getelementptr inbounds i64, i64* %base, <vscale x 8 x i64> %eidxs
call void @llvm.masked.scatter.nxv8i64.nxv8p0i64(<vscale x 8 x i64> %val, <vscale x 8 x i64*> %ptrs, i32 8, <vscale x 8 x i1> %m)
ret void
}
define void @mscatter_baseidx_nxv8i16_nxv8i64(<vscale x 8 x i64> %val, i64* %base, <vscale x 8 x i16> %idxs, <vscale x 8 x i1> %m) {
; RV32-LABEL: mscatter_baseidx_nxv8i16_nxv8i64:
; RV32: # %bb.0:
; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, mu
; RV32-NEXT: vsext.vf2 v20, v16
; RV32-NEXT: vsll.vi v16, v20, 3
; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu
; RV32-NEXT: vsoxei32.v v8, (a0), v16, v0.t
; RV32-NEXT: ret
;
; RV64-LABEL: mscatter_baseidx_nxv8i16_nxv8i64:
; RV64: # %bb.0:
; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu
; RV64-NEXT: vsext.vf4 v24, v16
; RV64-NEXT: vsll.vi v16, v24, 3
; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t
; RV64-NEXT: ret
%ptrs = getelementptr inbounds i64, i64* %base, <vscale x 8 x i16> %idxs
call void @llvm.masked.scatter.nxv8i64.nxv8p0i64(<vscale x 8 x i64> %val, <vscale x 8 x i64*> %ptrs, i32 8, <vscale x 8 x i1> %m)
ret void
}
define void @mscatter_baseidx_sext_nxv8i16_nxv8i64(<vscale x 8 x i64> %val, i64* %base, <vscale x 8 x i16> %idxs, <vscale x 8 x i1> %m) {
; RV32-LABEL: mscatter_baseidx_sext_nxv8i16_nxv8i64:
; RV32: # %bb.0:
; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu
; RV32-NEXT: vsext.vf4 v24, v16
; RV32-NEXT: vsll.vi v16, v24, 3
; RV32-NEXT: vsoxei64.v v8, (a0), v16, v0.t
; RV32-NEXT: ret
;
; RV64-LABEL: mscatter_baseidx_sext_nxv8i16_nxv8i64:
; RV64: # %bb.0:
; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu
; RV64-NEXT: vsext.vf4 v24, v16
; RV64-NEXT: vsll.vi v16, v24, 3
; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t
; RV64-NEXT: ret
%eidxs = sext <vscale x 8 x i16> %idxs to <vscale x 8 x i64>
%ptrs = getelementptr inbounds i64, i64* %base, <vscale x 8 x i64> %eidxs
call void @llvm.masked.scatter.nxv8i64.nxv8p0i64(<vscale x 8 x i64> %val, <vscale x 8 x i64*> %ptrs, i32 8, <vscale x 8 x i1> %m)
ret void
}
define void @mscatter_baseidx_zext_nxv8i16_nxv8i64(<vscale x 8 x i64> %val, i64* %base, <vscale x 8 x i16> %idxs, <vscale x 8 x i1> %m) {
; RV32-LABEL: mscatter_baseidx_zext_nxv8i16_nxv8i64:
; RV32: # %bb.0:
; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu
; RV32-NEXT: vzext.vf4 v24, v16
; RV32-NEXT: vsll.vi v16, v24, 3
; RV32-NEXT: vsoxei64.v v8, (a0), v16, v0.t
; RV32-NEXT: ret
;
; RV64-LABEL: mscatter_baseidx_zext_nxv8i16_nxv8i64:
; RV64: # %bb.0:
; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu
; RV64-NEXT: vzext.vf4 v24, v16
; RV64-NEXT: vsll.vi v16, v24, 3
; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t
; RV64-NEXT: ret
%eidxs = zext <vscale x 8 x i16> %idxs to <vscale x 8 x i64>
%ptrs = getelementptr inbounds i64, i64* %base, <vscale x 8 x i64> %eidxs
call void @llvm.masked.scatter.nxv8i64.nxv8p0i64(<vscale x 8 x i64> %val, <vscale x 8 x i64*> %ptrs, i32 8, <vscale x 8 x i1> %m)
ret void
}
define void @mscatter_baseidx_nxv8i32_nxv8i64(<vscale x 8 x i64> %val, i64* %base, <vscale x 8 x i32> %idxs, <vscale x 8 x i1> %m) {
; RV32-LABEL: mscatter_baseidx_nxv8i32_nxv8i64:
; RV32: # %bb.0:
; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, mu
; RV32-NEXT: vsll.vi v16, v16, 3
; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu
; RV32-NEXT: vsoxei32.v v8, (a0), v16, v0.t
; RV32-NEXT: ret
;
; RV64-LABEL: mscatter_baseidx_nxv8i32_nxv8i64:
; RV64: # %bb.0:
; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu
; RV64-NEXT: vsext.vf2 v24, v16
; RV64-NEXT: vsll.vi v16, v24, 3
; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t
; RV64-NEXT: ret
%ptrs = getelementptr inbounds i64, i64* %base, <vscale x 8 x i32> %idxs
call void @llvm.masked.scatter.nxv8i64.nxv8p0i64(<vscale x 8 x i64> %val, <vscale x 8 x i64*> %ptrs, i32 8, <vscale x 8 x i1> %m)
ret void
}
define void @mscatter_baseidx_sext_nxv8i32_nxv8i64(<vscale x 8 x i64> %val, i64* %base, <vscale x 8 x i32> %idxs, <vscale x 8 x i1> %m) {
; RV32-LABEL: mscatter_baseidx_sext_nxv8i32_nxv8i64:
; RV32: # %bb.0:
; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu
; RV32-NEXT: vsext.vf2 v24, v16
; RV32-NEXT: vsll.vi v16, v24, 3
; RV32-NEXT: vsoxei64.v v8, (a0), v16, v0.t
; RV32-NEXT: ret
;
; RV64-LABEL: mscatter_baseidx_sext_nxv8i32_nxv8i64:
; RV64: # %bb.0:
; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu
; RV64-NEXT: vsext.vf2 v24, v16
; RV64-NEXT: vsll.vi v16, v24, 3
; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t
; RV64-NEXT: ret
%eidxs = sext <vscale x 8 x i32> %idxs to <vscale x 8 x i64>
%ptrs = getelementptr inbounds i64, i64* %base, <vscale x 8 x i64> %eidxs
call void @llvm.masked.scatter.nxv8i64.nxv8p0i64(<vscale x 8 x i64> %val, <vscale x 8 x i64*> %ptrs, i32 8, <vscale x 8 x i1> %m)
ret void
}
define void @mscatter_baseidx_zext_nxv8i32_nxv8i64(<vscale x 8 x i64> %val, i64* %base, <vscale x 8 x i32> %idxs, <vscale x 8 x i1> %m) {
; RV32-LABEL: mscatter_baseidx_zext_nxv8i32_nxv8i64:
; RV32: # %bb.0:
; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu
; RV32-NEXT: vzext.vf2 v24, v16
; RV32-NEXT: vsll.vi v16, v24, 3
; RV32-NEXT: vsoxei64.v v8, (a0), v16, v0.t
; RV32-NEXT: ret
;
; RV64-LABEL: mscatter_baseidx_zext_nxv8i32_nxv8i64:
; RV64: # %bb.0:
; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu
; RV64-NEXT: vzext.vf2 v24, v16
; RV64-NEXT: vsll.vi v16, v24, 3
; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t
; RV64-NEXT: ret
%eidxs = zext <vscale x 8 x i32> %idxs to <vscale x 8 x i64>
%ptrs = getelementptr inbounds i64, i64* %base, <vscale x 8 x i64> %eidxs
call void @llvm.masked.scatter.nxv8i64.nxv8p0i64(<vscale x 8 x i64> %val, <vscale x 8 x i64*> %ptrs, i32 8, <vscale x 8 x i1> %m)
ret void
}
define void @mscatter_baseidx_nxv8i64(<vscale x 8 x i64> %val, i64* %base, <vscale x 8 x i64> %idxs, <vscale x 8 x i1> %m) {
; RV32-LABEL: mscatter_baseidx_nxv8i64:
; RV32: # %bb.0:
; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu
; RV32-NEXT: vsll.vi v16, v16, 3
; RV32-NEXT: vsoxei64.v v8, (a0), v16, v0.t
; RV32-NEXT: ret
;
; RV64-LABEL: mscatter_baseidx_nxv8i64:
; RV64: # %bb.0:
; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu
; RV64-NEXT: vsll.vi v16, v16, 3
; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t
; RV64-NEXT: ret
%ptrs = getelementptr inbounds i64, i64* %base, <vscale x 8 x i64> %idxs
call void @llvm.masked.scatter.nxv8i64.nxv8p0i64(<vscale x 8 x i64> %val, <vscale x 8 x i64*> %ptrs, i32 8, <vscale x 8 x i1> %m)
ret void
}
declare void @llvm.masked.scatter.nxv1f16.nxv1p0f16(<vscale x 1 x half>, <vscale x 1 x half*>, i32, <vscale x 1 x i1>)
define void @mscatter_nxv1f16(<vscale x 1 x half> %val, <vscale x 1 x half*> %ptrs, <vscale x 1 x i1> %m) {
; RV32-LABEL: mscatter_nxv1f16:
; RV32: # %bb.0:
; RV32-NEXT: vsetvli a0, zero, e16, mf4, ta, mu
; RV32-NEXT: vsoxei32.v v8, (zero), v9, v0.t
; RV32-NEXT: ret
;
; RV64-LABEL: mscatter_nxv1f16:
; RV64: # %bb.0:
; RV64-NEXT: vsetvli a0, zero, e16, mf4, ta, mu
; RV64-NEXT: vsoxei64.v v8, (zero), v9, v0.t
; RV64-NEXT: ret
call void @llvm.masked.scatter.nxv1f16.nxv1p0f16(<vscale x 1 x half> %val, <vscale x 1 x half*> %ptrs, i32 2, <vscale x 1 x i1> %m)
ret void
}
declare void @llvm.masked.scatter.nxv2f16.nxv2p0f16(<vscale x 2 x half>, <vscale x 2 x half*>, i32, <vscale x 2 x i1>)
define void @mscatter_nxv2f16(<vscale x 2 x half> %val, <vscale x 2 x half*> %ptrs, <vscale x 2 x i1> %m) {
; RV32-LABEL: mscatter_nxv2f16:
; RV32: # %bb.0:
; RV32-NEXT: vsetvli a0, zero, e16, mf2, ta, mu
; RV32-NEXT: vsoxei32.v v8, (zero), v9, v0.t
; RV32-NEXT: ret
;
; RV64-LABEL: mscatter_nxv2f16:
; RV64: # %bb.0:
; RV64-NEXT: vsetvli a0, zero, e16, mf2, ta, mu
; RV64-NEXT: vsoxei64.v v8, (zero), v10, v0.t
; RV64-NEXT: ret
call void @llvm.masked.scatter.nxv2f16.nxv2p0f16(<vscale x 2 x half> %val, <vscale x 2 x half*> %ptrs, i32 2, <vscale x 2 x i1> %m)
ret void
}
declare void @llvm.masked.scatter.nxv4f16.nxv4p0f16(<vscale x 4 x half>, <vscale x 4 x half*>, i32, <vscale x 4 x i1>)
define void @mscatter_nxv4f16(<vscale x 4 x half> %val, <vscale x 4 x half*> %ptrs, <vscale x 4 x i1> %m) {
; RV32-LABEL: mscatter_nxv4f16:
; RV32: # %bb.0:
; RV32-NEXT: vsetvli a0, zero, e16, m1, ta, mu
; RV32-NEXT: vsoxei32.v v8, (zero), v10, v0.t
; RV32-NEXT: ret
;
; RV64-LABEL: mscatter_nxv4f16:
; RV64: # %bb.0:
; RV64-NEXT: vsetvli a0, zero, e16, m1, ta, mu
; RV64-NEXT: vsoxei64.v v8, (zero), v12, v0.t
; RV64-NEXT: ret
call void @llvm.masked.scatter.nxv4f16.nxv4p0f16(<vscale x 4 x half> %val, <vscale x 4 x half*> %ptrs, i32 2, <vscale x 4 x i1> %m)
ret void
}
define void @mscatter_truemask_nxv4f16(<vscale x 4 x half> %val, <vscale x 4 x half*> %ptrs) {
; RV32-LABEL: mscatter_truemask_nxv4f16:
; RV32: # %bb.0:
; RV32-NEXT: vsetvli a0, zero, e16, m1, ta, mu
; RV32-NEXT: vsoxei32.v v8, (zero), v10
; RV32-NEXT: ret
;
; RV64-LABEL: mscatter_truemask_nxv4f16:
; RV64: # %bb.0:
; RV64-NEXT: vsetvli a0, zero, e16, m1, ta, mu
; RV64-NEXT: vsoxei64.v v8, (zero), v12
; RV64-NEXT: ret
%mhead = insertelement <vscale x 4 x i1> undef, i1 1, i32 0
%mtrue = shufflevector <vscale x 4 x i1> %mhead, <vscale x 4 x i1> undef, <vscale x 4 x i32> zeroinitializer
call void @llvm.masked.scatter.nxv4f16.nxv4p0f16(<vscale x 4 x half> %val, <vscale x 4 x half*> %ptrs, i32 2, <vscale x 4 x i1> %mtrue)
ret void
}
define void @mscatter_falsemask_nxv4f16(<vscale x 4 x half> %val, <vscale x 4 x half*> %ptrs) {
; RV32-LABEL: mscatter_falsemask_nxv4f16:
; RV32: # %bb.0:
; RV32-NEXT: ret
;
; RV64-LABEL: mscatter_falsemask_nxv4f16:
; RV64: # %bb.0:
; RV64-NEXT: ret
call void @llvm.masked.scatter.nxv4f16.nxv4p0f16(<vscale x 4 x half> %val, <vscale x 4 x half*> %ptrs, i32 2, <vscale x 4 x i1> zeroinitializer)
ret void
}
declare void @llvm.masked.scatter.nxv8f16.nxv8p0f16(<vscale x 8 x half>, <vscale x 8 x half*>, i32, <vscale x 8 x i1>)
define void @mscatter_nxv8f16(<vscale x 8 x half> %val, <vscale x 8 x half*> %ptrs, <vscale x 8 x i1> %m) {
; RV32-LABEL: mscatter_nxv8f16:
; RV32: # %bb.0:
; RV32-NEXT: vsetvli a0, zero, e16, m2, ta, mu
; RV32-NEXT: vsoxei32.v v8, (zero), v12, v0.t
; RV32-NEXT: ret
;
; RV64-LABEL: mscatter_nxv8f16:
; RV64: # %bb.0:
; RV64-NEXT: vsetvli a0, zero, e16, m2, ta, mu
; RV64-NEXT: vsoxei64.v v8, (zero), v16, v0.t
; RV64-NEXT: ret
call void @llvm.masked.scatter.nxv8f16.nxv8p0f16(<vscale x 8 x half> %val, <vscale x 8 x half*> %ptrs, i32 2, <vscale x 8 x i1> %m)
ret void
}
define void @mscatter_baseidx_nxv8i8_nxv8f16(<vscale x 8 x half> %val, half* %base, <vscale x 8 x i8> %idxs, <vscale x 8 x i1> %m) {
; RV32-LABEL: mscatter_baseidx_nxv8i8_nxv8f16:
; RV32: # %bb.0:
; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, mu
; RV32-NEXT: vsext.vf4 v12, v10
; RV32-NEXT: vadd.vv v12, v12, v12
; RV32-NEXT: vsetvli zero, zero, e16, m2, ta, mu
; RV32-NEXT: vsoxei32.v v8, (a0), v12, v0.t
; RV32-NEXT: ret
;
; RV64-LABEL: mscatter_baseidx_nxv8i8_nxv8f16:
; RV64: # %bb.0:
; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu
; RV64-NEXT: vsext.vf8 v16, v10
; RV64-NEXT: vadd.vv v16, v16, v16
; RV64-NEXT: vsetvli zero, zero, e16, m2, ta, mu
; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t
; RV64-NEXT: ret
%ptrs = getelementptr inbounds half, half* %base, <vscale x 8 x i8> %idxs
call void @llvm.masked.scatter.nxv8f16.nxv8p0f16(<vscale x 8 x half> %val, <vscale x 8 x half*> %ptrs, i32 2, <vscale x 8 x i1> %m)
ret void
}
define void @mscatter_baseidx_sext_nxv8i8_nxv8f16(<vscale x 8 x half> %val, half* %base, <vscale x 8 x i8> %idxs, <vscale x 8 x i1> %m) {
; RV32-LABEL: mscatter_baseidx_sext_nxv8i8_nxv8f16:
; RV32: # %bb.0:
; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, mu
; RV32-NEXT: vsext.vf4 v12, v10
; RV32-NEXT: vadd.vv v12, v12, v12
; RV32-NEXT: vsetvli zero, zero, e16, m2, ta, mu
; RV32-NEXT: vsoxei32.v v8, (a0), v12, v0.t
; RV32-NEXT: ret
;
; RV64-LABEL: mscatter_baseidx_sext_nxv8i8_nxv8f16:
; RV64: # %bb.0:
; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu
; RV64-NEXT: vsext.vf8 v16, v10
; RV64-NEXT: vadd.vv v16, v16, v16
; RV64-NEXT: vsetvli zero, zero, e16, m2, ta, mu
; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t
; RV64-NEXT: ret
%eidxs = sext <vscale x 8 x i8> %idxs to <vscale x 8 x i16>
%ptrs = getelementptr inbounds half, half* %base, <vscale x 8 x i16> %eidxs
call void @llvm.masked.scatter.nxv8f16.nxv8p0f16(<vscale x 8 x half> %val, <vscale x 8 x half*> %ptrs, i32 2, <vscale x 8 x i1> %m)
ret void
}
define void @mscatter_baseidx_zext_nxv8i8_nxv8f16(<vscale x 8 x half> %val, half* %base, <vscale x 8 x i8> %idxs, <vscale x 8 x i1> %m) {
; RV32-LABEL: mscatter_baseidx_zext_nxv8i8_nxv8f16:
; RV32: # %bb.0:
; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, mu
; RV32-NEXT: vzext.vf4 v12, v10
; RV32-NEXT: vadd.vv v12, v12, v12
; RV32-NEXT: vsetvli zero, zero, e16, m2, ta, mu
; RV32-NEXT: vsoxei32.v v8, (a0), v12, v0.t
; RV32-NEXT: ret
;
; RV64-LABEL: mscatter_baseidx_zext_nxv8i8_nxv8f16:
; RV64: # %bb.0:
; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu
; RV64-NEXT: vzext.vf8 v16, v10
; RV64-NEXT: vadd.vv v16, v16, v16
; RV64-NEXT: vsetvli zero, zero, e16, m2, ta, mu
; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t
; RV64-NEXT: ret
%eidxs = zext <vscale x 8 x i8> %idxs to <vscale x 8 x i16>
%ptrs = getelementptr inbounds half, half* %base, <vscale x 8 x i16> %eidxs
call void @llvm.masked.scatter.nxv8f16.nxv8p0f16(<vscale x 8 x half> %val, <vscale x 8 x half*> %ptrs, i32 2, <vscale x 8 x i1> %m)
ret void
}
define void @mscatter_baseidx_nxv8f16(<vscale x 8 x half> %val, half* %base, <vscale x 8 x i16> %idxs, <vscale x 8 x i1> %m) {
; RV32-LABEL: mscatter_baseidx_nxv8f16:
; RV32: # %bb.0:
; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, mu
; RV32-NEXT: vsext.vf2 v12, v10
; RV32-NEXT: vadd.vv v12, v12, v12
; RV32-NEXT: vsetvli zero, zero, e16, m2, ta, mu
; RV32-NEXT: vsoxei32.v v8, (a0), v12, v0.t
; RV32-NEXT: ret
;
; RV64-LABEL: mscatter_baseidx_nxv8f16:
; RV64: # %bb.0:
; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu
; RV64-NEXT: vsext.vf4 v16, v10
; RV64-NEXT: vadd.vv v16, v16, v16
; RV64-NEXT: vsetvli zero, zero, e16, m2, ta, mu
; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t
; RV64-NEXT: ret
%ptrs = getelementptr inbounds half, half* %base, <vscale x 8 x i16> %idxs
call void @llvm.masked.scatter.nxv8f16.nxv8p0f16(<vscale x 8 x half> %val, <vscale x 8 x half*> %ptrs, i32 2, <vscale x 8 x i1> %m)
ret void
}
declare void @llvm.masked.scatter.nxv1f32.nxv1p0f32(<vscale x 1 x float>, <vscale x 1 x float*>, i32, <vscale x 1 x i1>)
define void @mscatter_nxv1f32(<vscale x 1 x float> %val, <vscale x 1 x float*> %ptrs, <vscale x 1 x i1> %m) {
; RV32-LABEL: mscatter_nxv1f32:
; RV32: # %bb.0:
; RV32-NEXT: vsetvli a0, zero, e32, mf2, ta, mu
; RV32-NEXT: vsoxei32.v v8, (zero), v9, v0.t
; RV32-NEXT: ret
;
; RV64-LABEL: mscatter_nxv1f32:
; RV64: # %bb.0:
; RV64-NEXT: vsetvli a0, zero, e32, mf2, ta, mu
; RV64-NEXT: vsoxei64.v v8, (zero), v9, v0.t
; RV64-NEXT: ret
call void @llvm.masked.scatter.nxv1f32.nxv1p0f32(<vscale x 1 x float> %val, <vscale x 1 x float*> %ptrs, i32 4, <vscale x 1 x i1> %m)
ret void
}
declare void @llvm.masked.scatter.nxv2f32.nxv2p0f32(<vscale x 2 x float>, <vscale x 2 x float*>, i32, <vscale x 2 x i1>)
define void @mscatter_nxv2f32(<vscale x 2 x float> %val, <vscale x 2 x float*> %ptrs, <vscale x 2 x i1> %m) {
; RV32-LABEL: mscatter_nxv2f32:
; RV32: # %bb.0:
; RV32-NEXT: vsetvli a0, zero, e32, m1, ta, mu
; RV32-NEXT: vsoxei32.v v8, (zero), v9, v0.t
; RV32-NEXT: ret
;
; RV64-LABEL: mscatter_nxv2f32:
; RV64: # %bb.0:
; RV64-NEXT: vsetvli a0, zero, e32, m1, ta, mu
; RV64-NEXT: vsoxei64.v v8, (zero), v10, v0.t
; RV64-NEXT: ret
call void @llvm.masked.scatter.nxv2f32.nxv2p0f32(<vscale x 2 x float> %val, <vscale x 2 x float*> %ptrs, i32 4, <vscale x 2 x i1> %m)
ret void
}
declare void @llvm.masked.scatter.nxv4f32.nxv4p0f32(<vscale x 4 x float>, <vscale x 4 x float*>, i32, <vscale x 4 x i1>)
define void @mscatter_nxv4f32(<vscale x 4 x float> %val, <vscale x 4 x float*> %ptrs, <vscale x 4 x i1> %m) {
; RV32-LABEL: mscatter_nxv4f32:
; RV32: # %bb.0:
; RV32-NEXT: vsetvli a0, zero, e32, m2, ta, mu
; RV32-NEXT: vsoxei32.v v8, (zero), v10, v0.t
; RV32-NEXT: ret
;
; RV64-LABEL: mscatter_nxv4f32:
; RV64: # %bb.0:
; RV64-NEXT: vsetvli a0, zero, e32, m2, ta, mu
; RV64-NEXT: vsoxei64.v v8, (zero), v12, v0.t
; RV64-NEXT: ret
call void @llvm.masked.scatter.nxv4f32.nxv4p0f32(<vscale x 4 x float> %val, <vscale x 4 x float*> %ptrs, i32 4, <vscale x 4 x i1> %m)
ret void
}
define void @mscatter_truemask_nxv4f32(<vscale x 4 x float> %val, <vscale x 4 x float*> %ptrs) {
; RV32-LABEL: mscatter_truemask_nxv4f32:
; RV32: # %bb.0:
; RV32-NEXT: vsetvli a0, zero, e32, m2, ta, mu
; RV32-NEXT: vsoxei32.v v8, (zero), v10
; RV32-NEXT: ret
;
; RV64-LABEL: mscatter_truemask_nxv4f32:
; RV64: # %bb.0:
; RV64-NEXT: vsetvli a0, zero, e32, m2, ta, mu
; RV64-NEXT: vsoxei64.v v8, (zero), v12
; RV64-NEXT: ret
%mhead = insertelement <vscale x 4 x i1> undef, i1 1, i32 0
%mtrue = shufflevector <vscale x 4 x i1> %mhead, <vscale x 4 x i1> undef, <vscale x 4 x i32> zeroinitializer
call void @llvm.masked.scatter.nxv4f32.nxv4p0f32(<vscale x 4 x float> %val, <vscale x 4 x float*> %ptrs, i32 4, <vscale x 4 x i1> %mtrue)
ret void
}
define void @mscatter_falsemask_nxv4f32(<vscale x 4 x float> %val, <vscale x 4 x float*> %ptrs) {
; RV32-LABEL: mscatter_falsemask_nxv4f32:
; RV32: # %bb.0:
; RV32-NEXT: ret
;
; RV64-LABEL: mscatter_falsemask_nxv4f32:
; RV64: # %bb.0:
; RV64-NEXT: ret
call void @llvm.masked.scatter.nxv4f32.nxv4p0f32(<vscale x 4 x float> %val, <vscale x 4 x float*> %ptrs, i32 4, <vscale x 4 x i1> zeroinitializer)
ret void
}
declare void @llvm.masked.scatter.nxv8f32.nxv8p0f32(<vscale x 8 x float>, <vscale x 8 x float*>, i32, <vscale x 8 x i1>)
define void @mscatter_nxv8f32(<vscale x 8 x float> %val, <vscale x 8 x float*> %ptrs, <vscale x 8 x i1> %m) {
; RV32-LABEL: mscatter_nxv8f32:
; RV32: # %bb.0:
; RV32-NEXT: vsetvli a0, zero, e32, m4, ta, mu
; RV32-NEXT: vsoxei32.v v8, (zero), v12, v0.t
; RV32-NEXT: ret
;
; RV64-LABEL: mscatter_nxv8f32:
; RV64: # %bb.0:
; RV64-NEXT: vsetvli a0, zero, e32, m4, ta, mu
; RV64-NEXT: vsoxei64.v v8, (zero), v16, v0.t
; RV64-NEXT: ret
call void @llvm.masked.scatter.nxv8f32.nxv8p0f32(<vscale x 8 x float> %val, <vscale x 8 x float*> %ptrs, i32 4, <vscale x 8 x i1> %m)
ret void
}
define void @mscatter_baseidx_nxv8i8_nxv8f32(<vscale x 8 x float> %val, float* %base, <vscale x 8 x i8> %idxs, <vscale x 8 x i1> %m) {
; RV32-LABEL: mscatter_baseidx_nxv8i8_nxv8f32:
; RV32: # %bb.0:
; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, mu
; RV32-NEXT: vsext.vf4 v16, v12
; RV32-NEXT: vsll.vi v12, v16, 2
; RV32-NEXT: vsoxei32.v v8, (a0), v12, v0.t
; RV32-NEXT: ret
;
; RV64-LABEL: mscatter_baseidx_nxv8i8_nxv8f32:
; RV64: # %bb.0:
; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu
; RV64-NEXT: vsext.vf8 v16, v12
; RV64-NEXT: vsll.vi v16, v16, 2
; RV64-NEXT: vsetvli zero, zero, e32, m4, ta, mu
; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t
; RV64-NEXT: ret
%ptrs = getelementptr inbounds float, float* %base, <vscale x 8 x i8> %idxs
call void @llvm.masked.scatter.nxv8f32.nxv8p0f32(<vscale x 8 x float> %val, <vscale x 8 x float*> %ptrs, i32 4, <vscale x 8 x i1> %m)
ret void
}
define void @mscatter_baseidx_sext_nxv8i8_nxv8f32(<vscale x 8 x float> %val, float* %base, <vscale x 8 x i8> %idxs, <vscale x 8 x i1> %m) {
; RV32-LABEL: mscatter_baseidx_sext_nxv8i8_nxv8f32:
; RV32: # %bb.0:
; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, mu
; RV32-NEXT: vsext.vf4 v16, v12
; RV32-NEXT: vsll.vi v12, v16, 2
; RV32-NEXT: vsoxei32.v v8, (a0), v12, v0.t
; RV32-NEXT: ret
;
; RV64-LABEL: mscatter_baseidx_sext_nxv8i8_nxv8f32:
; RV64: # %bb.0:
; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu
; RV64-NEXT: vsext.vf8 v16, v12
; RV64-NEXT: vsll.vi v16, v16, 2
; RV64-NEXT: vsetvli zero, zero, e32, m4, ta, mu
; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t
; RV64-NEXT: ret
%eidxs = sext <vscale x 8 x i8> %idxs to <vscale x 8 x i32>
%ptrs = getelementptr inbounds float, float* %base, <vscale x 8 x i32> %eidxs
call void @llvm.masked.scatter.nxv8f32.nxv8p0f32(<vscale x 8 x float> %val, <vscale x 8 x float*> %ptrs, i32 4, <vscale x 8 x i1> %m)
ret void
}
define void @mscatter_baseidx_zext_nxv8i8_nxv8f32(<vscale x 8 x float> %val, float* %base, <vscale x 8 x i8> %idxs, <vscale x 8 x i1> %m) {
; RV32-LABEL: mscatter_baseidx_zext_nxv8i8_nxv8f32:
; RV32: # %bb.0:
; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, mu
; RV32-NEXT: vzext.vf4 v16, v12
; RV32-NEXT: vsll.vi v12, v16, 2
; RV32-NEXT: vsoxei32.v v8, (a0), v12, v0.t
; RV32-NEXT: ret
;
; RV64-LABEL: mscatter_baseidx_zext_nxv8i8_nxv8f32:
; RV64: # %bb.0:
; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu
; RV64-NEXT: vzext.vf8 v16, v12
; RV64-NEXT: vsll.vi v16, v16, 2
; RV64-NEXT: vsetvli zero, zero, e32, m4, ta, mu
; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t
; RV64-NEXT: ret
%eidxs = zext <vscale x 8 x i8> %idxs to <vscale x 8 x i32>
%ptrs = getelementptr inbounds float, float* %base, <vscale x 8 x i32> %eidxs
call void @llvm.masked.scatter.nxv8f32.nxv8p0f32(<vscale x 8 x float> %val, <vscale x 8 x float*> %ptrs, i32 4, <vscale x 8 x i1> %m)
ret void
}
define void @mscatter_baseidx_nxv8i16_nxv8f32(<vscale x 8 x float> %val, float* %base, <vscale x 8 x i16> %idxs, <vscale x 8 x i1> %m) {
; RV32-LABEL: mscatter_baseidx_nxv8i16_nxv8f32:
; RV32: # %bb.0:
; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, mu
; RV32-NEXT: vsext.vf2 v16, v12
; RV32-NEXT: vsll.vi v12, v16, 2
; RV32-NEXT: vsoxei32.v v8, (a0), v12, v0.t
; RV32-NEXT: ret
;
; RV64-LABEL: mscatter_baseidx_nxv8i16_nxv8f32:
; RV64: # %bb.0:
; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu
; RV64-NEXT: vsext.vf4 v16, v12
; RV64-NEXT: vsll.vi v16, v16, 2
; RV64-NEXT: vsetvli zero, zero, e32, m4, ta, mu
; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t
; RV64-NEXT: ret
%ptrs = getelementptr inbounds float, float* %base, <vscale x 8 x i16> %idxs
call void @llvm.masked.scatter.nxv8f32.nxv8p0f32(<vscale x 8 x float> %val, <vscale x 8 x float*> %ptrs, i32 4, <vscale x 8 x i1> %m)
ret void
}
define void @mscatter_baseidx_sext_nxv8i16_nxv8f32(<vscale x 8 x float> %val, float* %base, <vscale x 8 x i16> %idxs, <vscale x 8 x i1> %m) {
; RV32-LABEL: mscatter_baseidx_sext_nxv8i16_nxv8f32:
; RV32: # %bb.0:
; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, mu
; RV32-NEXT: vsext.vf2 v16, v12
; RV32-NEXT: vsll.vi v12, v16, 2
; RV32-NEXT: vsoxei32.v v8, (a0), v12, v0.t
; RV32-NEXT: ret
;
; RV64-LABEL: mscatter_baseidx_sext_nxv8i16_nxv8f32:
; RV64: # %bb.0:
; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu
; RV64-NEXT: vsext.vf4 v16, v12
; RV64-NEXT: vsll.vi v16, v16, 2
; RV64-NEXT: vsetvli zero, zero, e32, m4, ta, mu
; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t
; RV64-NEXT: ret
%eidxs = sext <vscale x 8 x i16> %idxs to <vscale x 8 x i32>
%ptrs = getelementptr inbounds float, float* %base, <vscale x 8 x i32> %eidxs
call void @llvm.masked.scatter.nxv8f32.nxv8p0f32(<vscale x 8 x float> %val, <vscale x 8 x float*> %ptrs, i32 4, <vscale x 8 x i1> %m)
ret void
}
define void @mscatter_baseidx_zext_nxv8i16_nxv8f32(<vscale x 8 x float> %val, float* %base, <vscale x 8 x i16> %idxs, <vscale x 8 x i1> %m) {
; RV32-LABEL: mscatter_baseidx_zext_nxv8i16_nxv8f32:
; RV32: # %bb.0:
; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, mu
; RV32-NEXT: vzext.vf2 v16, v12
; RV32-NEXT: vsll.vi v12, v16, 2
; RV32-NEXT: vsoxei32.v v8, (a0), v12, v0.t
; RV32-NEXT: ret
;
; RV64-LABEL: mscatter_baseidx_zext_nxv8i16_nxv8f32:
; RV64: # %bb.0:
; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu
; RV64-NEXT: vzext.vf4 v16, v12
; RV64-NEXT: vsll.vi v16, v16, 2
; RV64-NEXT: vsetvli zero, zero, e32, m4, ta, mu
; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t
; RV64-NEXT: ret
%eidxs = zext <vscale x 8 x i16> %idxs to <vscale x 8 x i32>
%ptrs = getelementptr inbounds float, float* %base, <vscale x 8 x i32> %eidxs
call void @llvm.masked.scatter.nxv8f32.nxv8p0f32(<vscale x 8 x float> %val, <vscale x 8 x float*> %ptrs, i32 4, <vscale x 8 x i1> %m)
ret void
}
define void @mscatter_baseidx_nxv8f32(<vscale x 8 x float> %val, float* %base, <vscale x 8 x i32> %idxs, <vscale x 8 x i1> %m) {
; RV32-LABEL: mscatter_baseidx_nxv8f32:
; RV32: # %bb.0:
; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, mu
; RV32-NEXT: vsll.vi v12, v12, 2
; RV32-NEXT: vsoxei32.v v8, (a0), v12, v0.t
; RV32-NEXT: ret
;
; RV64-LABEL: mscatter_baseidx_nxv8f32:
; RV64: # %bb.0:
; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu
; RV64-NEXT: vsext.vf2 v16, v12
; RV64-NEXT: vsll.vi v16, v16, 2
; RV64-NEXT: vsetvli zero, zero, e32, m4, ta, mu
; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t
; RV64-NEXT: ret
%ptrs = getelementptr inbounds float, float* %base, <vscale x 8 x i32> %idxs
call void @llvm.masked.scatter.nxv8f32.nxv8p0f32(<vscale x 8 x float> %val, <vscale x 8 x float*> %ptrs, i32 4, <vscale x 8 x i1> %m)
ret void
}
declare void @llvm.masked.scatter.nxv1f64.nxv1p0f64(<vscale x 1 x double>, <vscale x 1 x double*>, i32, <vscale x 1 x i1>)
define void @mscatter_nxv1f64(<vscale x 1 x double> %val, <vscale x 1 x double*> %ptrs, <vscale x 1 x i1> %m) {
; RV32-LABEL: mscatter_nxv1f64:
; RV32: # %bb.0:
; RV32-NEXT: vsetvli a0, zero, e64, m1, ta, mu
; RV32-NEXT: vsoxei32.v v8, (zero), v9, v0.t
; RV32-NEXT: ret
;
; RV64-LABEL: mscatter_nxv1f64:
; RV64: # %bb.0:
; RV64-NEXT: vsetvli a0, zero, e64, m1, ta, mu
; RV64-NEXT: vsoxei64.v v8, (zero), v9, v0.t
; RV64-NEXT: ret
call void @llvm.masked.scatter.nxv1f64.nxv1p0f64(<vscale x 1 x double> %val, <vscale x 1 x double*> %ptrs, i32 8, <vscale x 1 x i1> %m)
ret void
}
declare void @llvm.masked.scatter.nxv2f64.nxv2p0f64(<vscale x 2 x double>, <vscale x 2 x double*>, i32, <vscale x 2 x i1>)
define void @mscatter_nxv2f64(<vscale x 2 x double> %val, <vscale x 2 x double*> %ptrs, <vscale x 2 x i1> %m) {
; RV32-LABEL: mscatter_nxv2f64:
; RV32: # %bb.0:
; RV32-NEXT: vsetvli a0, zero, e64, m2, ta, mu
; RV32-NEXT: vsoxei32.v v8, (zero), v10, v0.t
; RV32-NEXT: ret
;
; RV64-LABEL: mscatter_nxv2f64:
; RV64: # %bb.0:
; RV64-NEXT: vsetvli a0, zero, e64, m2, ta, mu
; RV64-NEXT: vsoxei64.v v8, (zero), v10, v0.t
; RV64-NEXT: ret
call void @llvm.masked.scatter.nxv2f64.nxv2p0f64(<vscale x 2 x double> %val, <vscale x 2 x double*> %ptrs, i32 8, <vscale x 2 x i1> %m)
ret void
}
declare void @llvm.masked.scatter.nxv4f64.nxv4p0f64(<vscale x 4 x double>, <vscale x 4 x double*>, i32, <vscale x 4 x i1>)
define void @mscatter_nxv4f64(<vscale x 4 x double> %val, <vscale x 4 x double*> %ptrs, <vscale x 4 x i1> %m) {
; RV32-LABEL: mscatter_nxv4f64:
; RV32: # %bb.0:
; RV32-NEXT: vsetvli a0, zero, e64, m4, ta, mu
; RV32-NEXT: vsoxei32.v v8, (zero), v12, v0.t
; RV32-NEXT: ret
;
; RV64-LABEL: mscatter_nxv4f64:
; RV64: # %bb.0:
; RV64-NEXT: vsetvli a0, zero, e64, m4, ta, mu
; RV64-NEXT: vsoxei64.v v8, (zero), v12, v0.t
; RV64-NEXT: ret
call void @llvm.masked.scatter.nxv4f64.nxv4p0f64(<vscale x 4 x double> %val, <vscale x 4 x double*> %ptrs, i32 8, <vscale x 4 x i1> %m)
ret void
}
define void @mscatter_truemask_nxv4f64(<vscale x 4 x double> %val, <vscale x 4 x double*> %ptrs) {
; RV32-LABEL: mscatter_truemask_nxv4f64:
; RV32: # %bb.0:
; RV32-NEXT: vsetvli a0, zero, e64, m4, ta, mu
; RV32-NEXT: vsoxei32.v v8, (zero), v12
; RV32-NEXT: ret
;
; RV64-LABEL: mscatter_truemask_nxv4f64:
; RV64: # %bb.0:
; RV64-NEXT: vsetvli a0, zero, e64, m4, ta, mu
; RV64-NEXT: vsoxei64.v v8, (zero), v12
; RV64-NEXT: ret
%mhead = insertelement <vscale x 4 x i1> undef, i1 1, i32 0
%mtrue = shufflevector <vscale x 4 x i1> %mhead, <vscale x 4 x i1> undef, <vscale x 4 x i32> zeroinitializer
call void @llvm.masked.scatter.nxv4f64.nxv4p0f64(<vscale x 4 x double> %val, <vscale x 4 x double*> %ptrs, i32 8, <vscale x 4 x i1> %mtrue)
ret void
}
define void @mscatter_falsemask_nxv4f64(<vscale x 4 x double> %val, <vscale x 4 x double*> %ptrs) {
; RV32-LABEL: mscatter_falsemask_nxv4f64:
; RV32: # %bb.0:
; RV32-NEXT: ret
;
; RV64-LABEL: mscatter_falsemask_nxv4f64:
; RV64: # %bb.0:
; RV64-NEXT: ret
call void @llvm.masked.scatter.nxv4f64.nxv4p0f64(<vscale x 4 x double> %val, <vscale x 4 x double*> %ptrs, i32 8, <vscale x 4 x i1> zeroinitializer)
ret void
}
declare void @llvm.masked.scatter.nxv8f64.nxv8p0f64(<vscale x 8 x double>, <vscale x 8 x double*>, i32, <vscale x 8 x i1>)
define void @mscatter_nxv8f64(<vscale x 8 x double> %val, <vscale x 8 x double*> %ptrs, <vscale x 8 x i1> %m) {
; RV32-LABEL: mscatter_nxv8f64:
; RV32: # %bb.0:
; RV32-NEXT: vsetvli a0, zero, e64, m8, ta, mu
; RV32-NEXT: vsoxei32.v v8, (zero), v16, v0.t
; RV32-NEXT: ret
;
; RV64-LABEL: mscatter_nxv8f64:
; RV64: # %bb.0:
; RV64-NEXT: vsetvli a0, zero, e64, m8, ta, mu
; RV64-NEXT: vsoxei64.v v8, (zero), v16, v0.t
; RV64-NEXT: ret
call void @llvm.masked.scatter.nxv8f64.nxv8p0f64(<vscale x 8 x double> %val, <vscale x 8 x double*> %ptrs, i32 8, <vscale x 8 x i1> %m)
ret void
}
define void @mscatter_baseidx_nxv8i8_nxv8f64(<vscale x 8 x double> %val, double* %base, <vscale x 8 x i8> %idxs, <vscale x 8 x i1> %m) {
; RV32-LABEL: mscatter_baseidx_nxv8i8_nxv8f64:
; RV32: # %bb.0:
; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, mu
; RV32-NEXT: vsext.vf4 v20, v16
; RV32-NEXT: vsll.vi v16, v20, 3
; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu
; RV32-NEXT: vsoxei32.v v8, (a0), v16, v0.t
; RV32-NEXT: ret
;
; RV64-LABEL: mscatter_baseidx_nxv8i8_nxv8f64:
; RV64: # %bb.0:
; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu
; RV64-NEXT: vsext.vf8 v24, v16
; RV64-NEXT: vsll.vi v16, v24, 3
; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t
; RV64-NEXT: ret
%ptrs = getelementptr inbounds double, double* %base, <vscale x 8 x i8> %idxs
call void @llvm.masked.scatter.nxv8f64.nxv8p0f64(<vscale x 8 x double> %val, <vscale x 8 x double*> %ptrs, i32 8, <vscale x 8 x i1> %m)
ret void
}
define void @mscatter_baseidx_sext_nxv8i8_nxv8f64(<vscale x 8 x double> %val, double* %base, <vscale x 8 x i8> %idxs, <vscale x 8 x i1> %m) {
; RV32-LABEL: mscatter_baseidx_sext_nxv8i8_nxv8f64:
; RV32: # %bb.0:
; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu
; RV32-NEXT: vsext.vf8 v24, v16
; RV32-NEXT: vsll.vi v16, v24, 3
; RV32-NEXT: vsoxei64.v v8, (a0), v16, v0.t
; RV32-NEXT: ret
;
; RV64-LABEL: mscatter_baseidx_sext_nxv8i8_nxv8f64:
; RV64: # %bb.0:
; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu
; RV64-NEXT: vsext.vf8 v24, v16
; RV64-NEXT: vsll.vi v16, v24, 3
; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t
; RV64-NEXT: ret
%eidxs = sext <vscale x 8 x i8> %idxs to <vscale x 8 x i64>
%ptrs = getelementptr inbounds double, double* %base, <vscale x 8 x i64> %eidxs
call void @llvm.masked.scatter.nxv8f64.nxv8p0f64(<vscale x 8 x double> %val, <vscale x 8 x double*> %ptrs, i32 8, <vscale x 8 x i1> %m)
ret void
}
define void @mscatter_baseidx_zext_nxv8i8_nxv8f64(<vscale x 8 x double> %val, double* %base, <vscale x 8 x i8> %idxs, <vscale x 8 x i1> %m) {
; RV32-LABEL: mscatter_baseidx_zext_nxv8i8_nxv8f64:
; RV32: # %bb.0:
; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu
; RV32-NEXT: vzext.vf8 v24, v16
; RV32-NEXT: vsll.vi v16, v24, 3
; RV32-NEXT: vsoxei64.v v8, (a0), v16, v0.t
; RV32-NEXT: ret
;
; RV64-LABEL: mscatter_baseidx_zext_nxv8i8_nxv8f64:
; RV64: # %bb.0:
; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu
; RV64-NEXT: vzext.vf8 v24, v16
; RV64-NEXT: vsll.vi v16, v24, 3
; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t
; RV64-NEXT: ret
%eidxs = zext <vscale x 8 x i8> %idxs to <vscale x 8 x i64>
%ptrs = getelementptr inbounds double, double* %base, <vscale x 8 x i64> %eidxs
call void @llvm.masked.scatter.nxv8f64.nxv8p0f64(<vscale x 8 x double> %val, <vscale x 8 x double*> %ptrs, i32 8, <vscale x 8 x i1> %m)
ret void
}
define void @mscatter_baseidx_nxv8i16_nxv8f64(<vscale x 8 x double> %val, double* %base, <vscale x 8 x i16> %idxs, <vscale x 8 x i1> %m) {
; RV32-LABEL: mscatter_baseidx_nxv8i16_nxv8f64:
; RV32: # %bb.0:
; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, mu
; RV32-NEXT: vsext.vf2 v20, v16
; RV32-NEXT: vsll.vi v16, v20, 3
; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu
; RV32-NEXT: vsoxei32.v v8, (a0), v16, v0.t
; RV32-NEXT: ret
;
; RV64-LABEL: mscatter_baseidx_nxv8i16_nxv8f64:
; RV64: # %bb.0:
; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu
; RV64-NEXT: vsext.vf4 v24, v16
; RV64-NEXT: vsll.vi v16, v24, 3
; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t
; RV64-NEXT: ret
%ptrs = getelementptr inbounds double, double* %base, <vscale x 8 x i16> %idxs
call void @llvm.masked.scatter.nxv8f64.nxv8p0f64(<vscale x 8 x double> %val, <vscale x 8 x double*> %ptrs, i32 8, <vscale x 8 x i1> %m)
ret void
}
define void @mscatter_baseidx_sext_nxv8i16_nxv8f64(<vscale x 8 x double> %val, double* %base, <vscale x 8 x i16> %idxs, <vscale x 8 x i1> %m) {
; RV32-LABEL: mscatter_baseidx_sext_nxv8i16_nxv8f64:
; RV32: # %bb.0:
; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu
; RV32-NEXT: vsext.vf4 v24, v16
; RV32-NEXT: vsll.vi v16, v24, 3
; RV32-NEXT: vsoxei64.v v8, (a0), v16, v0.t
; RV32-NEXT: ret
;
; RV64-LABEL: mscatter_baseidx_sext_nxv8i16_nxv8f64:
; RV64: # %bb.0:
; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu
; RV64-NEXT: vsext.vf4 v24, v16
; RV64-NEXT: vsll.vi v16, v24, 3
; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t
; RV64-NEXT: ret
%eidxs = sext <vscale x 8 x i16> %idxs to <vscale x 8 x i64>
%ptrs = getelementptr inbounds double, double* %base, <vscale x 8 x i64> %eidxs
call void @llvm.masked.scatter.nxv8f64.nxv8p0f64(<vscale x 8 x double> %val, <vscale x 8 x double*> %ptrs, i32 8, <vscale x 8 x i1> %m)
ret void
}
define void @mscatter_baseidx_zext_nxv8i16_nxv8f64(<vscale x 8 x double> %val, double* %base, <vscale x 8 x i16> %idxs, <vscale x 8 x i1> %m) {
; RV32-LABEL: mscatter_baseidx_zext_nxv8i16_nxv8f64:
; RV32: # %bb.0:
; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu
; RV32-NEXT: vzext.vf4 v24, v16
; RV32-NEXT: vsll.vi v16, v24, 3
; RV32-NEXT: vsoxei64.v v8, (a0), v16, v0.t
; RV32-NEXT: ret
;
; RV64-LABEL: mscatter_baseidx_zext_nxv8i16_nxv8f64:
; RV64: # %bb.0:
; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu
; RV64-NEXT: vzext.vf4 v24, v16
; RV64-NEXT: vsll.vi v16, v24, 3
; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t
; RV64-NEXT: ret
%eidxs = zext <vscale x 8 x i16> %idxs to <vscale x 8 x i64>
%ptrs = getelementptr inbounds double, double* %base, <vscale x 8 x i64> %eidxs
call void @llvm.masked.scatter.nxv8f64.nxv8p0f64(<vscale x 8 x double> %val, <vscale x 8 x double*> %ptrs, i32 8, <vscale x 8 x i1> %m)
ret void
}
define void @mscatter_baseidx_nxv8i32_nxv8f64(<vscale x 8 x double> %val, double* %base, <vscale x 8 x i32> %idxs, <vscale x 8 x i1> %m) {
; RV32-LABEL: mscatter_baseidx_nxv8i32_nxv8f64:
; RV32: # %bb.0:
; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, mu
; RV32-NEXT: vsll.vi v16, v16, 3
; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu
; RV32-NEXT: vsoxei32.v v8, (a0), v16, v0.t
; RV32-NEXT: ret
;
; RV64-LABEL: mscatter_baseidx_nxv8i32_nxv8f64:
; RV64: # %bb.0:
; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu
; RV64-NEXT: vsext.vf2 v24, v16
; RV64-NEXT: vsll.vi v16, v24, 3
; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t
; RV64-NEXT: ret
%ptrs = getelementptr inbounds double, double* %base, <vscale x 8 x i32> %idxs
call void @llvm.masked.scatter.nxv8f64.nxv8p0f64(<vscale x 8 x double> %val, <vscale x 8 x double*> %ptrs, i32 8, <vscale x 8 x i1> %m)
ret void
}
define void @mscatter_baseidx_sext_nxv8i32_nxv8f64(<vscale x 8 x double> %val, double* %base, <vscale x 8 x i32> %idxs, <vscale x 8 x i1> %m) {
; RV32-LABEL: mscatter_baseidx_sext_nxv8i32_nxv8f64:
; RV32: # %bb.0:
; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu
; RV32-NEXT: vsext.vf2 v24, v16
; RV32-NEXT: vsll.vi v16, v24, 3
; RV32-NEXT: vsoxei64.v v8, (a0), v16, v0.t
; RV32-NEXT: ret
;
; RV64-LABEL: mscatter_baseidx_sext_nxv8i32_nxv8f64:
; RV64: # %bb.0:
; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu
; RV64-NEXT: vsext.vf2 v24, v16
; RV64-NEXT: vsll.vi v16, v24, 3
; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t
; RV64-NEXT: ret
%eidxs = sext <vscale x 8 x i32> %idxs to <vscale x 8 x i64>
%ptrs = getelementptr inbounds double, double* %base, <vscale x 8 x i64> %eidxs
call void @llvm.masked.scatter.nxv8f64.nxv8p0f64(<vscale x 8 x double> %val, <vscale x 8 x double*> %ptrs, i32 8, <vscale x 8 x i1> %m)
ret void
}
define void @mscatter_baseidx_zext_nxv8i32_nxv8f64(<vscale x 8 x double> %val, double* %base, <vscale x 8 x i32> %idxs, <vscale x 8 x i1> %m) {
; RV32-LABEL: mscatter_baseidx_zext_nxv8i32_nxv8f64:
; RV32: # %bb.0:
; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu
; RV32-NEXT: vzext.vf2 v24, v16
; RV32-NEXT: vsll.vi v16, v24, 3
; RV32-NEXT: vsoxei64.v v8, (a0), v16, v0.t
; RV32-NEXT: ret
;
; RV64-LABEL: mscatter_baseidx_zext_nxv8i32_nxv8f64:
; RV64: # %bb.0:
; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu
; RV64-NEXT: vzext.vf2 v24, v16
; RV64-NEXT: vsll.vi v16, v24, 3
; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t
; RV64-NEXT: ret
%eidxs = zext <vscale x 8 x i32> %idxs to <vscale x 8 x i64>
%ptrs = getelementptr inbounds double, double* %base, <vscale x 8 x i64> %eidxs
call void @llvm.masked.scatter.nxv8f64.nxv8p0f64(<vscale x 8 x double> %val, <vscale x 8 x double*> %ptrs, i32 8, <vscale x 8 x i1> %m)
ret void
}
define void @mscatter_baseidx_nxv8f64(<vscale x 8 x double> %val, double* %base, <vscale x 8 x i64> %idxs, <vscale x 8 x i1> %m) {
; RV32-LABEL: mscatter_baseidx_nxv8f64:
; RV32: # %bb.0:
; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu
; RV32-NEXT: vsll.vi v16, v16, 3
; RV32-NEXT: vsoxei64.v v8, (a0), v16, v0.t
; RV32-NEXT: ret
;
; RV64-LABEL: mscatter_baseidx_nxv8f64:
; RV64: # %bb.0:
; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu
; RV64-NEXT: vsll.vi v16, v16, 3
; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t
; RV64-NEXT: ret
%ptrs = getelementptr inbounds double, double* %base, <vscale x 8 x i64> %idxs
call void @llvm.masked.scatter.nxv8f64.nxv8p0f64(<vscale x 8 x double> %val, <vscale x 8 x double*> %ptrs, i32 8, <vscale x 8 x i1> %m)
ret void
}
declare void @llvm.masked.scatter.nxv16f64.nxv16p0f64(<vscale x 16 x double>, <vscale x 16 x double*>, i32, <vscale x 16 x i1>)
declare <vscale x 16 x double> @llvm.experimental.vector.insert.nxv8f64.nxv16f64(<vscale x 16 x double>, <vscale x 8 x double>, i64)
declare <vscale x 16 x double*> @llvm.experimental.vector.insert.nxv8p0f64.nxv16p0f64(<vscale x 16 x double*>, <vscale x 8 x double*>, i64)
define void @mscatter_nxv16f64(<vscale x 8 x double> %val0, <vscale x 8 x double> %val1, <vscale x 8 x double*> %ptrs0, <vscale x 8 x double*> %ptrs1, <vscale x 16 x i1> %m) {
; RV32-LABEL: mscatter_nxv16f64:
; RV32: # %bb.0:
; RV32-NEXT: vl4re32.v v24, (a0)
; RV32-NEXT: vl4re32.v v28, (a1)
; RV32-NEXT: vsetvli a0, zero, e64, m8, ta, mu
; RV32-NEXT: vsoxei32.v v8, (zero), v24, v0.t
; RV32-NEXT: csrr a0, vlenb
; RV32-NEXT: srli a0, a0, 3
; RV32-NEXT: vsetvli a1, zero, e8, mf4, ta, mu
; RV32-NEXT: vslidedown.vx v0, v0, a0
; RV32-NEXT: vsetvli a0, zero, e64, m8, ta, mu
; RV32-NEXT: vsoxei32.v v16, (zero), v28, v0.t
; RV32-NEXT: ret
;
; RV64-LABEL: mscatter_nxv16f64:
; RV64: # %bb.0:
; RV64-NEXT: addi sp, sp, -16
; RV64-NEXT: .cfi_def_cfa_offset 16
; RV64-NEXT: csrr a2, vlenb
; RV64-NEXT: slli a2, a2, 3
; RV64-NEXT: sub sp, sp, a2
; RV64-NEXT: vl8re64.v v24, (a0)
; RV64-NEXT: addi a0, sp, 16
; RV64-NEXT: vs8r.v v16, (a0) # Unknown-size Folded Spill
; RV64-NEXT: vl8re64.v v16, (a1)
; RV64-NEXT: vsetvli a0, zero, e64, m8, ta, mu
; RV64-NEXT: vsoxei64.v v8, (zero), v24, v0.t
; RV64-NEXT: csrr a0, vlenb
; RV64-NEXT: srli a0, a0, 3
; RV64-NEXT: vsetvli a1, zero, e8, mf4, ta, mu
; RV64-NEXT: vslidedown.vx v0, v0, a0
; RV64-NEXT: vsetvli a0, zero, e64, m8, ta, mu
; RV64-NEXT: addi a0, sp, 16
; RV64-NEXT: vl8re8.v v8, (a0) # Unknown-size Folded Reload
; RV64-NEXT: vsoxei64.v v8, (zero), v16, v0.t
; RV64-NEXT: csrr a0, vlenb
; RV64-NEXT: slli a0, a0, 3
; RV64-NEXT: add sp, sp, a0
; RV64-NEXT: addi sp, sp, 16
; RV64-NEXT: ret
%p0 = call <vscale x 16 x double*> @llvm.experimental.vector.insert.nxv8p0f64.nxv16p0f64(<vscale x 16 x double*> undef, <vscale x 8 x double*> %ptrs0, i64 0)
%p1 = call <vscale x 16 x double*> @llvm.experimental.vector.insert.nxv8p0f64.nxv16p0f64(<vscale x 16 x double*> %p0, <vscale x 8 x double*> %ptrs1, i64 8)
%v0 = call <vscale x 16 x double> @llvm.experimental.vector.insert.nxv8f64.nxv16f64(<vscale x 16 x double> undef, <vscale x 8 x double> %val0, i64 0)
%v1 = call <vscale x 16 x double> @llvm.experimental.vector.insert.nxv8f64.nxv16f64(<vscale x 16 x double> %v0, <vscale x 8 x double> %val1, i64 8)
call void @llvm.masked.scatter.nxv16f64.nxv16p0f64(<vscale x 16 x double> %v1, <vscale x 16 x double*> %p1, i32 8, <vscale x 16 x i1> %m)
ret void
}
define void @mscatter_baseidx_nxv16i8_nxv16f64(<vscale x 8 x double> %val0, <vscale x 8 x double> %val1, double* %base, <vscale x 16 x i8> %idxs, <vscale x 16 x i1> %m) {
; RV32-LABEL: mscatter_baseidx_nxv16i8_nxv16f64:
; RV32: # %bb.0:
; RV32-NEXT: vl2r.v v2, (a1)
; RV32-NEXT: vsetvli a1, zero, e32, m8, ta, mu
; RV32-NEXT: vsext.vf4 v24, v2
; RV32-NEXT: vsll.vi v24, v24, 3
; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu
; RV32-NEXT: vsoxei32.v v8, (a0), v24, v0.t
; RV32-NEXT: csrr a1, vlenb
; RV32-NEXT: srli a1, a1, 3
; RV32-NEXT: vsetvli a2, zero, e8, mf4, ta, mu
; RV32-NEXT: vslidedown.vx v0, v0, a1
; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu
; RV32-NEXT: vsoxei32.v v16, (a0), v28, v0.t
; RV32-NEXT: ret
;
; RV64-LABEL: mscatter_baseidx_nxv16i8_nxv16f64:
; RV64: # %bb.0:
; RV64-NEXT: vl2r.v v2, (a1)
; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu
; RV64-NEXT: vsext.vf8 v24, v2
; RV64-NEXT: vsll.vi v24, v24, 3
; RV64-NEXT: vsoxei64.v v8, (a0), v24, v0.t
; RV64-NEXT: csrr a1, vlenb
; RV64-NEXT: srli a1, a1, 3
; RV64-NEXT: vsetvli a2, zero, e8, mf4, ta, mu
; RV64-NEXT: vslidedown.vx v0, v0, a1
; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu
; RV64-NEXT: vsext.vf8 v8, v3
; RV64-NEXT: vsll.vi v8, v8, 3
; RV64-NEXT: vsoxei64.v v16, (a0), v8, v0.t
; RV64-NEXT: ret
%ptrs = getelementptr inbounds double, double* %base, <vscale x 16 x i8> %idxs
%v0 = call <vscale x 16 x double> @llvm.experimental.vector.insert.nxv8f64.nxv16f64(<vscale x 16 x double> undef, <vscale x 8 x double> %val0, i64 0)
%v1 = call <vscale x 16 x double> @llvm.experimental.vector.insert.nxv8f64.nxv16f64(<vscale x 16 x double> %v0, <vscale x 8 x double> %val1, i64 8)
call void @llvm.masked.scatter.nxv16f64.nxv16p0f64(<vscale x 16 x double> %v1, <vscale x 16 x double*> %ptrs, i32 8, <vscale x 16 x i1> %m)
ret void
}
define void @mscatter_baseidx_nxv16i16_nxv16f64(<vscale x 8 x double> %val0, <vscale x 8 x double> %val1, double* %base, <vscale x 16 x i16> %idxs, <vscale x 16 x i1> %m) {
; RV32-LABEL: mscatter_baseidx_nxv16i16_nxv16f64:
; RV32: # %bb.0:
; RV32-NEXT: vl4re16.v v4, (a1)
; RV32-NEXT: vsetvli a1, zero, e32, m8, ta, mu
; RV32-NEXT: vsext.vf2 v24, v4
; RV32-NEXT: vsll.vi v24, v24, 3
; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu
; RV32-NEXT: vsoxei32.v v8, (a0), v24, v0.t
; RV32-NEXT: csrr a1, vlenb
; RV32-NEXT: srli a1, a1, 3
; RV32-NEXT: vsetvli a2, zero, e8, mf4, ta, mu
; RV32-NEXT: vslidedown.vx v0, v0, a1
; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu
; RV32-NEXT: vsoxei32.v v16, (a0), v28, v0.t
; RV32-NEXT: ret
;
; RV64-LABEL: mscatter_baseidx_nxv16i16_nxv16f64:
; RV64: # %bb.0:
; RV64-NEXT: vl4re16.v v4, (a1)
; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu
; RV64-NEXT: vsext.vf4 v24, v4
; RV64-NEXT: vsll.vi v24, v24, 3
; RV64-NEXT: vsoxei64.v v8, (a0), v24, v0.t
; RV64-NEXT: csrr a1, vlenb
; RV64-NEXT: srli a1, a1, 3
; RV64-NEXT: vsetvli a2, zero, e8, mf4, ta, mu
; RV64-NEXT: vslidedown.vx v0, v0, a1
; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu
; RV64-NEXT: vsext.vf4 v8, v6
; RV64-NEXT: vsll.vi v8, v8, 3
; RV64-NEXT: vsoxei64.v v16, (a0), v8, v0.t
; RV64-NEXT: ret
%ptrs = getelementptr inbounds double, double* %base, <vscale x 16 x i16> %idxs
%v0 = call <vscale x 16 x double> @llvm.experimental.vector.insert.nxv8f64.nxv16f64(<vscale x 16 x double> undef, <vscale x 8 x double> %val0, i64 0)
%v1 = call <vscale x 16 x double> @llvm.experimental.vector.insert.nxv8f64.nxv16f64(<vscale x 16 x double> %v0, <vscale x 8 x double> %val1, i64 8)
call void @llvm.masked.scatter.nxv16f64.nxv16p0f64(<vscale x 16 x double> %v1, <vscale x 16 x double*> %ptrs, i32 8, <vscale x 16 x i1> %m)
ret void
}