Add a new VPInstruction::Broadcast opcode and use it to materialize explicit broadcasts of live-ins. The initial patch only materlizes the broadcasts if the vector preheader dominates all uses that need it. Later patches will pick the best valid insert point, thus retiring implicit hoisting of broadcasts from VPTransformsState::get(). PR: https://github.com/llvm/llvm-project/pull/124644
846 lines
49 KiB
LLVM
846 lines
49 KiB
LLVM
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
|
|
; RUN: opt < %s -passes=loop-vectorize -mtriple=riscv64 -mattr=+v -S | FileCheck --check-prefixes=CHECK,NOSTRIDED %s
|
|
; RUN: opt < %s -passes=loop-vectorize -mtriple=riscv64 -mattr=+v -lv-strided-pointer-ivs=true -laa-speculate-unit-stride=false -S | FileCheck --check-prefixes=CHECK,STRIDED %s
|
|
|
|
|
|
define void @single_constant_stride_int_scaled(ptr %p) {
|
|
; CHECK-LABEL: @single_constant_stride_int_scaled(
|
|
; CHECK-NEXT: entry:
|
|
; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
|
|
; CHECK-NEXT: [[TMP1:%.*]] = mul i64 [[TMP0]], 4
|
|
; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ule i64 1024, [[TMP1]]
|
|
; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
|
|
; CHECK: vector.ph:
|
|
; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
|
|
; CHECK-NEXT: [[TMP3:%.*]] = mul i64 [[TMP2]], 4
|
|
; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP3]]
|
|
; CHECK-NEXT: [[TMP4:%.*]] = icmp eq i64 [[N_MOD_VF]], 0
|
|
; CHECK-NEXT: [[TMP5:%.*]] = select i1 [[TMP4]], i64 [[TMP3]], i64 [[N_MOD_VF]]
|
|
; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[TMP5]]
|
|
; CHECK-NEXT: [[TMP6:%.*]] = call i64 @llvm.vscale.i64()
|
|
; CHECK-NEXT: [[TMP7:%.*]] = mul i64 [[TMP6]], 4
|
|
; CHECK-NEXT: [[TMP8:%.*]] = call <vscale x 4 x i64> @llvm.stepvector.nxv4i64()
|
|
; CHECK-NEXT: [[TMP10:%.*]] = mul <vscale x 4 x i64> [[TMP8]], splat (i64 1)
|
|
; CHECK-NEXT: [[INDUCTION:%.*]] = add <vscale x 4 x i64> zeroinitializer, [[TMP10]]
|
|
; CHECK-NEXT: [[TMP13:%.*]] = mul i64 1, [[TMP7]]
|
|
; CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 4 x i64> poison, i64 [[TMP13]], i64 0
|
|
; CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 4 x i64> [[DOTSPLATINSERT]], <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
|
|
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
|
|
; CHECK: vector.body:
|
|
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
|
|
; CHECK-NEXT: [[VEC_IND:%.*]] = phi <vscale x 4 x i64> [ [[INDUCTION]], [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ]
|
|
; CHECK-NEXT: [[TMP14:%.*]] = mul nuw nsw <vscale x 4 x i64> [[VEC_IND]], splat (i64 8)
|
|
; CHECK-NEXT: [[TMP15:%.*]] = getelementptr i32, ptr [[P:%.*]], <vscale x 4 x i64> [[TMP14]]
|
|
; CHECK-NEXT: [[WIDE_MASKED_GATHER:%.*]] = call <vscale x 4 x i32> @llvm.masked.gather.nxv4i32.nxv4p0(<vscale x 4 x ptr> [[TMP15]], i32 4, <vscale x 4 x i1> splat (i1 true), <vscale x 4 x i32> poison)
|
|
; CHECK-NEXT: [[TMP16:%.*]] = add <vscale x 4 x i32> [[WIDE_MASKED_GATHER]], splat (i32 1)
|
|
; CHECK-NEXT: call void @llvm.masked.scatter.nxv4i32.nxv4p0(<vscale x 4 x i32> [[TMP16]], <vscale x 4 x ptr> [[TMP15]], i32 4, <vscale x 4 x i1> splat (i1 true))
|
|
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP7]]
|
|
; CHECK-NEXT: [[VEC_IND_NEXT]] = add <vscale x 4 x i64> [[VEC_IND]], [[DOTSPLAT]]
|
|
; CHECK-NEXT: [[TMP17:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
|
|
; CHECK-NEXT: br i1 [[TMP17]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
|
|
; CHECK: middle.block:
|
|
; CHECK-NEXT: br label [[SCALAR_PH]]
|
|
; CHECK: scalar.ph:
|
|
; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ]
|
|
; CHECK-NEXT: br label [[LOOP:%.*]]
|
|
; CHECK: loop:
|
|
; CHECK-NEXT: [[I:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[NEXTI:%.*]], [[LOOP]] ]
|
|
; CHECK-NEXT: [[OFFSET:%.*]] = mul nuw nsw i64 [[I]], 8
|
|
; CHECK-NEXT: [[Q0:%.*]] = getelementptr i32, ptr [[P]], i64 [[OFFSET]]
|
|
; CHECK-NEXT: [[X0:%.*]] = load i32, ptr [[Q0]], align 4
|
|
; CHECK-NEXT: [[Y0:%.*]] = add i32 [[X0]], 1
|
|
; CHECK-NEXT: store i32 [[Y0]], ptr [[Q0]], align 4
|
|
; CHECK-NEXT: [[NEXTI]] = add i64 [[I]], 1
|
|
; CHECK-NEXT: [[DONE:%.*]] = icmp eq i64 [[NEXTI]], 1024
|
|
; CHECK-NEXT: br i1 [[DONE]], label [[EXIT:%.*]], label [[LOOP]], !llvm.loop [[LOOP3:![0-9]+]]
|
|
; CHECK: exit:
|
|
; CHECK-NEXT: ret void
|
|
;
|
|
entry:
|
|
br label %loop
|
|
loop:
|
|
%i = phi i64 [0, %entry], [%nexti, %loop]
|
|
|
|
%offset = mul nsw nuw i64 %i, 8
|
|
%q0 = getelementptr i32, ptr %p, i64 %offset
|
|
%x0 = load i32, ptr %q0
|
|
%y0 = add i32 %x0, 1
|
|
store i32 %y0, ptr %q0
|
|
|
|
%nexti = add i64 %i, 1
|
|
%done = icmp eq i64 %nexti, 1024
|
|
br i1 %done, label %exit, label %loop
|
|
exit:
|
|
ret void
|
|
}
|
|
|
|
define void @single_constant_stride_int_iv(ptr %p) {
|
|
; CHECK-LABEL: @single_constant_stride_int_iv(
|
|
; CHECK-NEXT: entry:
|
|
; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
|
|
; CHECK-NEXT: [[TMP1:%.*]] = mul i64 [[TMP0]], 4
|
|
; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1024, [[TMP1]]
|
|
; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
|
|
; CHECK: vector.ph:
|
|
; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
|
|
; CHECK-NEXT: [[TMP3:%.*]] = mul i64 [[TMP2]], 4
|
|
; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP3]]
|
|
; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]]
|
|
; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
|
|
; CHECK-NEXT: [[TMP5:%.*]] = mul i64 [[TMP4]], 4
|
|
; CHECK-NEXT: [[IND_END:%.*]] = mul i64 [[N_VEC]], 64
|
|
; CHECK-NEXT: [[TMP6:%.*]] = call <vscale x 4 x i64> @llvm.stepvector.nxv4i64()
|
|
; CHECK-NEXT: [[TMP8:%.*]] = mul <vscale x 4 x i64> [[TMP6]], splat (i64 64)
|
|
; CHECK-NEXT: [[INDUCTION:%.*]] = add <vscale x 4 x i64> zeroinitializer, [[TMP8]]
|
|
; CHECK-NEXT: [[TMP11:%.*]] = mul i64 64, [[TMP5]]
|
|
; CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 4 x i64> poison, i64 [[TMP11]], i64 0
|
|
; CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 4 x i64> [[DOTSPLATINSERT]], <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
|
|
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
|
|
; CHECK: vector.body:
|
|
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
|
|
; CHECK-NEXT: [[VEC_IND:%.*]] = phi <vscale x 4 x i64> [ [[INDUCTION]], [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ]
|
|
; CHECK-NEXT: [[TMP12:%.*]] = getelementptr i32, ptr [[P:%.*]], <vscale x 4 x i64> [[VEC_IND]]
|
|
; CHECK-NEXT: [[WIDE_MASKED_GATHER:%.*]] = call <vscale x 4 x i32> @llvm.masked.gather.nxv4i32.nxv4p0(<vscale x 4 x ptr> [[TMP12]], i32 4, <vscale x 4 x i1> splat (i1 true), <vscale x 4 x i32> poison)
|
|
; CHECK-NEXT: [[TMP13:%.*]] = add <vscale x 4 x i32> [[WIDE_MASKED_GATHER]], splat (i32 1)
|
|
; CHECK-NEXT: call void @llvm.masked.scatter.nxv4i32.nxv4p0(<vscale x 4 x i32> [[TMP13]], <vscale x 4 x ptr> [[TMP12]], i32 4, <vscale x 4 x i1> splat (i1 true))
|
|
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
|
|
; CHECK-NEXT: [[VEC_IND_NEXT]] = add <vscale x 4 x i64> [[VEC_IND]], [[DOTSPLAT]]
|
|
; CHECK-NEXT: [[TMP14:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
|
|
; CHECK-NEXT: br i1 [[TMP14]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
|
|
; CHECK: middle.block:
|
|
; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 1024, [[N_VEC]]
|
|
; CHECK-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH]]
|
|
; CHECK: scalar.ph:
|
|
; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ]
|
|
; CHECK-NEXT: [[BC_RESUME_VAL1:%.*]] = phi i64 [ [[IND_END]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY]] ]
|
|
; CHECK-NEXT: br label [[LOOP:%.*]]
|
|
; CHECK: loop:
|
|
; CHECK-NEXT: [[I:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[NEXTI:%.*]], [[LOOP]] ]
|
|
; CHECK-NEXT: [[OFFSET:%.*]] = phi i64 [ [[BC_RESUME_VAL1]], [[SCALAR_PH]] ], [ [[OFFSET_NEXT:%.*]], [[LOOP]] ]
|
|
; CHECK-NEXT: [[Q0:%.*]] = getelementptr i32, ptr [[P]], i64 [[OFFSET]]
|
|
; CHECK-NEXT: [[X0:%.*]] = load i32, ptr [[Q0]], align 4
|
|
; CHECK-NEXT: [[Y0:%.*]] = add i32 [[X0]], 1
|
|
; CHECK-NEXT: store i32 [[Y0]], ptr [[Q0]], align 4
|
|
; CHECK-NEXT: [[OFFSET_NEXT]] = add nuw nsw i64 [[OFFSET]], 64
|
|
; CHECK-NEXT: [[NEXTI]] = add i64 [[I]], 1
|
|
; CHECK-NEXT: [[DONE:%.*]] = icmp eq i64 [[NEXTI]], 1024
|
|
; CHECK-NEXT: br i1 [[DONE]], label [[EXIT]], label [[LOOP]], !llvm.loop [[LOOP5:![0-9]+]]
|
|
; CHECK: exit:
|
|
; CHECK-NEXT: ret void
|
|
;
|
|
entry:
|
|
br label %loop
|
|
loop:
|
|
%i = phi i64 [0, %entry], [%nexti, %loop]
|
|
%offset = phi i64 [0, %entry], [%offset.next, %loop]
|
|
|
|
%q0 = getelementptr i32, ptr %p, i64 %offset
|
|
%x0 = load i32, ptr %q0
|
|
%y0 = add i32 %x0, 1
|
|
store i32 %y0, ptr %q0
|
|
|
|
%offset.next = add nsw nuw i64 %offset, 64
|
|
%nexti = add i64 %i, 1
|
|
%done = icmp eq i64 %nexti, 1024
|
|
br i1 %done, label %exit, label %loop
|
|
exit:
|
|
ret void
|
|
}
|
|
|
|
|
|
define void @single_constant_stride_ptr_iv(ptr %p) {
|
|
; CHECK-LABEL: @single_constant_stride_ptr_iv(
|
|
; CHECK-NEXT: entry:
|
|
; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
|
|
; CHECK-NEXT: [[TMP1:%.*]] = mul i64 [[TMP0]], 4
|
|
; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ule i64 1024, [[TMP1]]
|
|
; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
|
|
; CHECK: vector.ph:
|
|
; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
|
|
; CHECK-NEXT: [[TMP3:%.*]] = mul i64 [[TMP2]], 4
|
|
; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP3]]
|
|
; CHECK-NEXT: [[TMP4:%.*]] = icmp eq i64 [[N_MOD_VF]], 0
|
|
; CHECK-NEXT: [[TMP5:%.*]] = select i1 [[TMP4]], i64 [[TMP3]], i64 [[N_MOD_VF]]
|
|
; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[TMP5]]
|
|
; CHECK-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64()
|
|
; CHECK-NEXT: [[TMP8:%.*]] = mul i64 [[TMP7]], 4
|
|
; CHECK-NEXT: [[TMP18:%.*]] = mul i64 [[N_VEC]], 8
|
|
; CHECK-NEXT: [[IND_END:%.*]] = getelementptr i8, ptr [[P:%.*]], i64 [[TMP18]]
|
|
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
|
|
; CHECK: vector.body:
|
|
; CHECK-NEXT: [[POINTER_PHI:%.*]] = phi ptr [ [[P]], [[VECTOR_PH]] ], [ [[PTR_IND:%.*]], [[VECTOR_BODY]] ]
|
|
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
|
|
; CHECK-NEXT: [[TMP9:%.*]] = call i64 @llvm.vscale.i64()
|
|
; CHECK-NEXT: [[TMP10:%.*]] = mul i64 [[TMP9]], 4
|
|
; CHECK-NEXT: [[TMP11:%.*]] = mul i64 [[TMP10]], 1
|
|
; CHECK-NEXT: [[TMP12:%.*]] = mul i64 8, [[TMP11]]
|
|
; CHECK-NEXT: [[TMP13:%.*]] = mul i64 [[TMP10]], 0
|
|
; CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 4 x i64> poison, i64 [[TMP13]], i64 0
|
|
; CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 4 x i64> [[DOTSPLATINSERT]], <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
|
|
; CHECK-NEXT: [[TMP14:%.*]] = call <vscale x 4 x i64> @llvm.stepvector.nxv4i64()
|
|
; CHECK-NEXT: [[TMP15:%.*]] = add <vscale x 4 x i64> [[DOTSPLAT]], [[TMP14]]
|
|
; CHECK-NEXT: [[TMP16:%.*]] = mul <vscale x 4 x i64> [[TMP15]], splat (i64 8)
|
|
; CHECK-NEXT: [[VECTOR_GEP:%.*]] = getelementptr i8, ptr [[POINTER_PHI]], <vscale x 4 x i64> [[TMP16]]
|
|
; CHECK-NEXT: [[TMP17:%.*]] = extractelement <vscale x 4 x ptr> [[VECTOR_GEP]], i32 0
|
|
; CHECK-NEXT: [[WIDE_VEC:%.*]] = load <vscale x 8 x i32>, ptr [[TMP17]], align 4
|
|
; CHECK-NEXT: [[STRIDED_VEC:%.*]] = call { <vscale x 4 x i32>, <vscale x 4 x i32> } @llvm.vector.deinterleave2.nxv8i32(<vscale x 8 x i32> [[WIDE_VEC]])
|
|
; CHECK-NEXT: [[TMP19:%.*]] = extractvalue { <vscale x 4 x i32>, <vscale x 4 x i32> } [[STRIDED_VEC]], 0
|
|
; CHECK-NEXT: [[TMP20:%.*]] = add <vscale x 4 x i32> [[TMP19]], splat (i32 1)
|
|
; CHECK-NEXT: call void @llvm.masked.scatter.nxv4i32.nxv4p0(<vscale x 4 x i32> [[TMP20]], <vscale x 4 x ptr> [[VECTOR_GEP]], i32 4, <vscale x 4 x i1> splat (i1 true))
|
|
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP8]]
|
|
; CHECK-NEXT: [[PTR_IND]] = getelementptr i8, ptr [[POINTER_PHI]], i64 [[TMP12]]
|
|
; CHECK-NEXT: [[TMP21:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
|
|
; CHECK-NEXT: br i1 [[TMP21]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]]
|
|
; CHECK: middle.block:
|
|
; CHECK-NEXT: br label [[SCALAR_PH]]
|
|
; CHECK: scalar.ph:
|
|
; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ]
|
|
; CHECK-NEXT: [[BC_RESUME_VAL1:%.*]] = phi ptr [ [[IND_END]], [[MIDDLE_BLOCK]] ], [ [[P]], [[ENTRY]] ]
|
|
; CHECK-NEXT: br label [[LOOP:%.*]]
|
|
; CHECK: loop:
|
|
; CHECK-NEXT: [[I:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[NEXTI:%.*]], [[LOOP]] ]
|
|
; CHECK-NEXT: [[PTR:%.*]] = phi ptr [ [[BC_RESUME_VAL1]], [[SCALAR_PH]] ], [ [[PTR_NEXT:%.*]], [[LOOP]] ]
|
|
; CHECK-NEXT: [[X0:%.*]] = load i32, ptr [[PTR]], align 4
|
|
; CHECK-NEXT: [[Y0:%.*]] = add i32 [[X0]], 1
|
|
; CHECK-NEXT: store i32 [[Y0]], ptr [[PTR]], align 4
|
|
; CHECK-NEXT: [[PTR_NEXT]] = getelementptr inbounds i8, ptr [[PTR]], i64 8
|
|
; CHECK-NEXT: [[NEXTI]] = add i64 [[I]], 1
|
|
; CHECK-NEXT: [[DONE:%.*]] = icmp eq i64 [[NEXTI]], 1024
|
|
; CHECK-NEXT: br i1 [[DONE]], label [[EXIT:%.*]], label [[LOOP]], !llvm.loop [[LOOP7:![0-9]+]]
|
|
; CHECK: exit:
|
|
; CHECK-NEXT: ret void
|
|
;
|
|
entry:
|
|
br label %loop
|
|
loop:
|
|
%i = phi i64 [0, %entry], [%nexti, %loop]
|
|
%ptr = phi ptr [%p, %entry], [%ptr.next, %loop]
|
|
|
|
%x0 = load i32, ptr %ptr
|
|
%y0 = add i32 %x0, 1
|
|
store i32 %y0, ptr %ptr
|
|
|
|
%ptr.next = getelementptr inbounds i8, ptr %ptr, i64 8
|
|
%nexti = add i64 %i, 1
|
|
%done = icmp eq i64 %nexti, 1024
|
|
br i1 %done, label %exit, label %loop
|
|
exit:
|
|
ret void
|
|
}
|
|
|
|
|
|
define void @single_stride_int_scaled(ptr %p, i64 %stride) {
|
|
; NOSTRIDED-LABEL: @single_stride_int_scaled(
|
|
; NOSTRIDED-NEXT: entry:
|
|
; NOSTRIDED-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
|
|
; NOSTRIDED-NEXT: [[TMP1:%.*]] = mul i64 [[TMP0]], 4
|
|
; NOSTRIDED-NEXT: [[TMP2:%.*]] = call i64 @llvm.umax.i64(i64 8, i64 [[TMP1]])
|
|
; NOSTRIDED-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1024, [[TMP2]]
|
|
; NOSTRIDED-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_SCEVCHECK:%.*]]
|
|
; NOSTRIDED: vector.scevcheck:
|
|
; NOSTRIDED-NEXT: [[IDENT_CHECK:%.*]] = icmp ne i64 [[STRIDE:%.*]], 1
|
|
; NOSTRIDED-NEXT: br i1 [[IDENT_CHECK]], label [[SCALAR_PH]], label [[VECTOR_PH:%.*]]
|
|
; NOSTRIDED: vector.ph:
|
|
; NOSTRIDED-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64()
|
|
; NOSTRIDED-NEXT: [[TMP4:%.*]] = mul i64 [[TMP3]], 4
|
|
; NOSTRIDED-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP4]]
|
|
; NOSTRIDED-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]]
|
|
; NOSTRIDED-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64()
|
|
; NOSTRIDED-NEXT: [[TMP6:%.*]] = mul i64 [[TMP5]], 4
|
|
; NOSTRIDED-NEXT: br label [[VECTOR_BODY:%.*]]
|
|
; NOSTRIDED: vector.body:
|
|
; NOSTRIDED-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
|
|
; NOSTRIDED-NEXT: [[TMP7:%.*]] = add i64 [[INDEX]], 0
|
|
; NOSTRIDED-NEXT: [[TMP8:%.*]] = getelementptr i32, ptr [[P:%.*]], i64 [[TMP7]]
|
|
; NOSTRIDED-NEXT: [[TMP9:%.*]] = getelementptr i32, ptr [[TMP8]], i32 0
|
|
; NOSTRIDED-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x i32>, ptr [[TMP9]], align 4
|
|
; NOSTRIDED-NEXT: [[TMP10:%.*]] = add <vscale x 4 x i32> [[WIDE_LOAD]], splat (i32 1)
|
|
; NOSTRIDED-NEXT: store <vscale x 4 x i32> [[TMP10]], ptr [[TMP9]], align 4
|
|
; NOSTRIDED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP6]]
|
|
; NOSTRIDED-NEXT: [[TMP11:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
|
|
; NOSTRIDED-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]]
|
|
; NOSTRIDED: middle.block:
|
|
; NOSTRIDED-NEXT: [[CMP_N:%.*]] = icmp eq i64 1024, [[N_VEC]]
|
|
; NOSTRIDED-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH]]
|
|
; NOSTRIDED: scalar.ph:
|
|
; NOSTRIDED-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ], [ 0, [[VECTOR_SCEVCHECK]] ]
|
|
; NOSTRIDED-NEXT: br label [[LOOP:%.*]]
|
|
; NOSTRIDED: loop:
|
|
; NOSTRIDED-NEXT: [[I:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[NEXTI:%.*]], [[LOOP]] ]
|
|
; NOSTRIDED-NEXT: [[OFFSET:%.*]] = mul nuw nsw i64 [[I]], [[STRIDE]]
|
|
; NOSTRIDED-NEXT: [[Q0:%.*]] = getelementptr i32, ptr [[P]], i64 [[OFFSET]]
|
|
; NOSTRIDED-NEXT: [[X0:%.*]] = load i32, ptr [[Q0]], align 4
|
|
; NOSTRIDED-NEXT: [[Y0:%.*]] = add i32 [[X0]], 1
|
|
; NOSTRIDED-NEXT: store i32 [[Y0]], ptr [[Q0]], align 4
|
|
; NOSTRIDED-NEXT: [[NEXTI]] = add i64 [[I]], 1
|
|
; NOSTRIDED-NEXT: [[DONE:%.*]] = icmp eq i64 [[NEXTI]], 1024
|
|
; NOSTRIDED-NEXT: br i1 [[DONE]], label [[EXIT]], label [[LOOP]], !llvm.loop [[LOOP9:![0-9]+]]
|
|
; NOSTRIDED: exit:
|
|
; NOSTRIDED-NEXT: ret void
|
|
;
|
|
; STRIDED-LABEL: @single_stride_int_scaled(
|
|
; STRIDED-NEXT: entry:
|
|
; STRIDED-NEXT: br label [[LOOP:%.*]]
|
|
; STRIDED: loop:
|
|
; STRIDED-NEXT: [[I:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[NEXTI:%.*]], [[LOOP]] ]
|
|
; STRIDED-NEXT: [[OFFSET:%.*]] = mul nuw nsw i64 [[I]], [[STRIDE:%.*]]
|
|
; STRIDED-NEXT: [[Q0:%.*]] = getelementptr i32, ptr [[P:%.*]], i64 [[OFFSET]]
|
|
; STRIDED-NEXT: [[X0:%.*]] = load i32, ptr [[Q0]], align 4
|
|
; STRIDED-NEXT: [[Y0:%.*]] = add i32 [[X0]], 1
|
|
; STRIDED-NEXT: store i32 [[Y0]], ptr [[Q0]], align 4
|
|
; STRIDED-NEXT: [[NEXTI]] = add i64 [[I]], 1
|
|
; STRIDED-NEXT: [[DONE:%.*]] = icmp eq i64 [[NEXTI]], 1024
|
|
; STRIDED-NEXT: br i1 [[DONE]], label [[EXIT:%.*]], label [[LOOP]]
|
|
; STRIDED: exit:
|
|
; STRIDED-NEXT: ret void
|
|
;
|
|
entry:
|
|
br label %loop
|
|
loop:
|
|
%i = phi i64 [0, %entry], [%nexti, %loop]
|
|
|
|
%offset = mul nsw nuw i64 %i, %stride
|
|
%q0 = getelementptr i32, ptr %p, i64 %offset
|
|
%x0 = load i32, ptr %q0
|
|
%y0 = add i32 %x0, 1
|
|
store i32 %y0, ptr %q0
|
|
|
|
%nexti = add i64 %i, 1
|
|
%done = icmp eq i64 %nexti, 1024
|
|
br i1 %done, label %exit, label %loop
|
|
exit:
|
|
ret void
|
|
}
|
|
|
|
define void @single_stride_int_iv(ptr %p, i64 %stride) {
|
|
; NOSTRIDED-LABEL: @single_stride_int_iv(
|
|
; NOSTRIDED-NEXT: entry:
|
|
; NOSTRIDED-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
|
|
; NOSTRIDED-NEXT: [[TMP1:%.*]] = mul i64 [[TMP0]], 4
|
|
; NOSTRIDED-NEXT: [[TMP2:%.*]] = call i64 @llvm.umax.i64(i64 8, i64 [[TMP1]])
|
|
; NOSTRIDED-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1024, [[TMP2]]
|
|
; NOSTRIDED-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_SCEVCHECK:%.*]]
|
|
; NOSTRIDED: vector.scevcheck:
|
|
; NOSTRIDED-NEXT: [[IDENT_CHECK:%.*]] = icmp ne i64 [[STRIDE:%.*]], 1
|
|
; NOSTRIDED-NEXT: br i1 [[IDENT_CHECK]], label [[SCALAR_PH]], label [[VECTOR_PH:%.*]]
|
|
; NOSTRIDED: vector.ph:
|
|
; NOSTRIDED-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64()
|
|
; NOSTRIDED-NEXT: [[TMP4:%.*]] = mul i64 [[TMP3]], 4
|
|
; NOSTRIDED-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP4]]
|
|
; NOSTRIDED-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]]
|
|
; NOSTRIDED-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64()
|
|
; NOSTRIDED-NEXT: [[TMP6:%.*]] = mul i64 [[TMP5]], 4
|
|
; NOSTRIDED-NEXT: br label [[VECTOR_BODY:%.*]]
|
|
; NOSTRIDED: vector.body:
|
|
; NOSTRIDED-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
|
|
; NOSTRIDED-NEXT: [[TMP7:%.*]] = add i64 [[INDEX]], 0
|
|
; NOSTRIDED-NEXT: [[TMP8:%.*]] = getelementptr i32, ptr [[P:%.*]], i64 [[TMP7]]
|
|
; NOSTRIDED-NEXT: [[TMP9:%.*]] = getelementptr i32, ptr [[TMP8]], i32 0
|
|
; NOSTRIDED-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x i32>, ptr [[TMP9]], align 4
|
|
; NOSTRIDED-NEXT: [[TMP10:%.*]] = add <vscale x 4 x i32> [[WIDE_LOAD]], splat (i32 1)
|
|
; NOSTRIDED-NEXT: store <vscale x 4 x i32> [[TMP10]], ptr [[TMP9]], align 4
|
|
; NOSTRIDED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP6]]
|
|
; NOSTRIDED-NEXT: [[TMP11:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
|
|
; NOSTRIDED-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]]
|
|
; NOSTRIDED: middle.block:
|
|
; NOSTRIDED-NEXT: [[CMP_N:%.*]] = icmp eq i64 1024, [[N_VEC]]
|
|
; NOSTRIDED-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH]]
|
|
; NOSTRIDED: scalar.ph:
|
|
; NOSTRIDED-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ], [ 0, [[VECTOR_SCEVCHECK]] ]
|
|
; NOSTRIDED-NEXT: [[BC_RESUME_VAL1:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY]] ], [ 0, [[VECTOR_SCEVCHECK]] ]
|
|
; NOSTRIDED-NEXT: br label [[LOOP:%.*]]
|
|
; NOSTRIDED: loop:
|
|
; NOSTRIDED-NEXT: [[I:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[NEXTI:%.*]], [[LOOP]] ]
|
|
; NOSTRIDED-NEXT: [[OFFSET:%.*]] = phi i64 [ [[BC_RESUME_VAL1]], [[SCALAR_PH]] ], [ [[OFFSET_NEXT:%.*]], [[LOOP]] ]
|
|
; NOSTRIDED-NEXT: [[Q0:%.*]] = getelementptr i32, ptr [[P]], i64 [[OFFSET]]
|
|
; NOSTRIDED-NEXT: [[X0:%.*]] = load i32, ptr [[Q0]], align 4
|
|
; NOSTRIDED-NEXT: [[Y0:%.*]] = add i32 [[X0]], 1
|
|
; NOSTRIDED-NEXT: store i32 [[Y0]], ptr [[Q0]], align 4
|
|
; NOSTRIDED-NEXT: [[OFFSET_NEXT]] = add nuw nsw i64 [[OFFSET]], [[STRIDE]]
|
|
; NOSTRIDED-NEXT: [[NEXTI]] = add i64 [[I]], 1
|
|
; NOSTRIDED-NEXT: [[DONE:%.*]] = icmp eq i64 [[NEXTI]], 1024
|
|
; NOSTRIDED-NEXT: br i1 [[DONE]], label [[EXIT]], label [[LOOP]], !llvm.loop [[LOOP11:![0-9]+]]
|
|
; NOSTRIDED: exit:
|
|
; NOSTRIDED-NEXT: ret void
|
|
;
|
|
; STRIDED-LABEL: @single_stride_int_iv(
|
|
; STRIDED-NEXT: entry:
|
|
; STRIDED-NEXT: br label [[LOOP:%.*]]
|
|
; STRIDED: loop:
|
|
; STRIDED-NEXT: [[I:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[NEXTI:%.*]], [[LOOP]] ]
|
|
; STRIDED-NEXT: [[OFFSET:%.*]] = phi i64 [ 0, [[ENTRY]] ], [ [[OFFSET_NEXT:%.*]], [[LOOP]] ]
|
|
; STRIDED-NEXT: [[Q0:%.*]] = getelementptr i32, ptr [[P:%.*]], i64 [[OFFSET]]
|
|
; STRIDED-NEXT: [[X0:%.*]] = load i32, ptr [[Q0]], align 4
|
|
; STRIDED-NEXT: [[Y0:%.*]] = add i32 [[X0]], 1
|
|
; STRIDED-NEXT: store i32 [[Y0]], ptr [[Q0]], align 4
|
|
; STRIDED-NEXT: [[OFFSET_NEXT]] = add nuw nsw i64 [[OFFSET]], [[STRIDE:%.*]]
|
|
; STRIDED-NEXT: [[NEXTI]] = add i64 [[I]], 1
|
|
; STRIDED-NEXT: [[DONE:%.*]] = icmp eq i64 [[NEXTI]], 1024
|
|
; STRIDED-NEXT: br i1 [[DONE]], label [[EXIT:%.*]], label [[LOOP]]
|
|
; STRIDED: exit:
|
|
; STRIDED-NEXT: ret void
|
|
;
|
|
entry:
|
|
br label %loop
|
|
loop:
|
|
%i = phi i64 [0, %entry], [%nexti, %loop]
|
|
%offset = phi i64 [0, %entry], [%offset.next, %loop]
|
|
|
|
%q0 = getelementptr i32, ptr %p, i64 %offset
|
|
%x0 = load i32, ptr %q0
|
|
%y0 = add i32 %x0, 1
|
|
store i32 %y0, ptr %q0
|
|
|
|
%offset.next = add nsw nuw i64 %offset, %stride
|
|
%nexti = add i64 %i, 1
|
|
%done = icmp eq i64 %nexti, 1024
|
|
br i1 %done, label %exit, label %loop
|
|
exit:
|
|
ret void
|
|
}
|
|
|
|
|
|
define void @single_stride_ptr_iv(ptr %p, i64 %stride) {
|
|
; CHECK-LABEL: @single_stride_ptr_iv(
|
|
; CHECK-NEXT: entry:
|
|
; CHECK-NEXT: br label [[LOOP:%.*]]
|
|
; CHECK: loop:
|
|
; CHECK-NEXT: [[I:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[NEXTI:%.*]], [[LOOP]] ]
|
|
; CHECK-NEXT: [[PTR:%.*]] = phi ptr [ [[P:%.*]], [[ENTRY]] ], [ [[PTR_NEXT:%.*]], [[LOOP]] ]
|
|
; CHECK-NEXT: [[X0:%.*]] = load i32, ptr [[PTR]], align 4
|
|
; CHECK-NEXT: [[Y0:%.*]] = add i32 [[X0]], 1
|
|
; CHECK-NEXT: store i32 [[Y0]], ptr [[PTR]], align 4
|
|
; CHECK-NEXT: [[PTR_NEXT]] = getelementptr inbounds i8, ptr [[PTR]], i64 [[STRIDE:%.*]]
|
|
; CHECK-NEXT: [[NEXTI]] = add i64 [[I]], 1
|
|
; CHECK-NEXT: [[DONE:%.*]] = icmp eq i64 [[NEXTI]], 1024
|
|
; CHECK-NEXT: br i1 [[DONE]], label [[EXIT:%.*]], label [[LOOP]]
|
|
; CHECK: exit:
|
|
; CHECK-NEXT: ret void
|
|
;
|
|
entry:
|
|
br label %loop
|
|
loop:
|
|
%i = phi i64 [0, %entry], [%nexti, %loop]
|
|
%ptr = phi ptr [%p, %entry], [%ptr.next, %loop]
|
|
|
|
%x0 = load i32, ptr %ptr
|
|
%y0 = add i32 %x0, 1
|
|
store i32 %y0, ptr %ptr
|
|
|
|
%ptr.next = getelementptr inbounds i8, ptr %ptr, i64 %stride
|
|
%nexti = add i64 %i, 1
|
|
%done = icmp eq i64 %nexti, 1024
|
|
br i1 %done, label %exit, label %loop
|
|
exit:
|
|
ret void
|
|
}
|
|
|
|
define void @double_stride_int_scaled(ptr %p, ptr %p2, i64 %stride) {
|
|
; NOSTRIDED-LABEL: @double_stride_int_scaled(
|
|
; NOSTRIDED-NEXT: entry:
|
|
; NOSTRIDED-NEXT: [[P3:%.*]] = ptrtoint ptr [[P:%.*]] to i64
|
|
; NOSTRIDED-NEXT: [[P21:%.*]] = ptrtoint ptr [[P2:%.*]] to i64
|
|
; NOSTRIDED-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
|
|
; NOSTRIDED-NEXT: [[TMP1:%.*]] = mul i64 [[TMP0]], 4
|
|
; NOSTRIDED-NEXT: [[TMP2:%.*]] = call i64 @llvm.umax.i64(i64 16, i64 [[TMP1]])
|
|
; NOSTRIDED-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1024, [[TMP2]]
|
|
; NOSTRIDED-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_SCEVCHECK:%.*]]
|
|
; NOSTRIDED: vector.scevcheck:
|
|
; NOSTRIDED-NEXT: [[IDENT_CHECK:%.*]] = icmp ne i64 [[STRIDE:%.*]], 1
|
|
; NOSTRIDED-NEXT: br i1 [[IDENT_CHECK]], label [[SCALAR_PH]], label [[VECTOR_MEMCHECK:%.*]]
|
|
; NOSTRIDED: vector.memcheck:
|
|
; NOSTRIDED-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64()
|
|
; NOSTRIDED-NEXT: [[TMP4:%.*]] = mul i64 [[TMP3]], 4
|
|
; NOSTRIDED-NEXT: [[TMP5:%.*]] = mul i64 [[TMP4]], 4
|
|
; NOSTRIDED-NEXT: [[TMP6:%.*]] = sub i64 [[P21]], [[P3]]
|
|
; NOSTRIDED-NEXT: [[DIFF_CHECK:%.*]] = icmp ult i64 [[TMP6]], [[TMP5]]
|
|
; NOSTRIDED-NEXT: br i1 [[DIFF_CHECK]], label [[SCALAR_PH]], label [[VECTOR_PH:%.*]]
|
|
; NOSTRIDED: vector.ph:
|
|
; NOSTRIDED-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64()
|
|
; NOSTRIDED-NEXT: [[TMP8:%.*]] = mul i64 [[TMP7]], 4
|
|
; NOSTRIDED-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP8]]
|
|
; NOSTRIDED-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]]
|
|
; NOSTRIDED-NEXT: [[TMP9:%.*]] = call i64 @llvm.vscale.i64()
|
|
; NOSTRIDED-NEXT: [[TMP10:%.*]] = mul i64 [[TMP9]], 4
|
|
; NOSTRIDED-NEXT: br label [[VECTOR_BODY:%.*]]
|
|
; NOSTRIDED: vector.body:
|
|
; NOSTRIDED-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
|
|
; NOSTRIDED-NEXT: [[TMP11:%.*]] = add i64 [[INDEX]], 0
|
|
; NOSTRIDED-NEXT: [[TMP12:%.*]] = getelementptr i32, ptr [[P]], i64 [[TMP11]]
|
|
; NOSTRIDED-NEXT: [[TMP13:%.*]] = getelementptr i32, ptr [[TMP12]], i32 0
|
|
; NOSTRIDED-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x i32>, ptr [[TMP13]], align 4
|
|
; NOSTRIDED-NEXT: [[TMP14:%.*]] = add <vscale x 4 x i32> [[WIDE_LOAD]], splat (i32 1)
|
|
; NOSTRIDED-NEXT: [[TMP15:%.*]] = getelementptr i32, ptr [[P2]], i64 [[TMP11]]
|
|
; NOSTRIDED-NEXT: [[TMP16:%.*]] = getelementptr i32, ptr [[TMP15]], i32 0
|
|
; NOSTRIDED-NEXT: store <vscale x 4 x i32> [[TMP14]], ptr [[TMP16]], align 4
|
|
; NOSTRIDED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP10]]
|
|
; NOSTRIDED-NEXT: [[TMP17:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
|
|
; NOSTRIDED-NEXT: br i1 [[TMP17]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]]
|
|
; NOSTRIDED: middle.block:
|
|
; NOSTRIDED-NEXT: [[CMP_N:%.*]] = icmp eq i64 1024, [[N_VEC]]
|
|
; NOSTRIDED-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH]]
|
|
; NOSTRIDED: scalar.ph:
|
|
; NOSTRIDED-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ], [ 0, [[VECTOR_SCEVCHECK]] ], [ 0, [[VECTOR_MEMCHECK]] ]
|
|
; NOSTRIDED-NEXT: br label [[LOOP:%.*]]
|
|
; NOSTRIDED: loop:
|
|
; NOSTRIDED-NEXT: [[I:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[NEXTI:%.*]], [[LOOP]] ]
|
|
; NOSTRIDED-NEXT: [[OFFSET:%.*]] = mul nuw nsw i64 [[I]], [[STRIDE]]
|
|
; NOSTRIDED-NEXT: [[Q0:%.*]] = getelementptr i32, ptr [[P]], i64 [[OFFSET]]
|
|
; NOSTRIDED-NEXT: [[X0:%.*]] = load i32, ptr [[Q0]], align 4
|
|
; NOSTRIDED-NEXT: [[Y0:%.*]] = add i32 [[X0]], 1
|
|
; NOSTRIDED-NEXT: [[Q1:%.*]] = getelementptr i32, ptr [[P2]], i64 [[OFFSET]]
|
|
; NOSTRIDED-NEXT: store i32 [[Y0]], ptr [[Q1]], align 4
|
|
; NOSTRIDED-NEXT: [[NEXTI]] = add i64 [[I]], 1
|
|
; NOSTRIDED-NEXT: [[DONE:%.*]] = icmp eq i64 [[NEXTI]], 1024
|
|
; NOSTRIDED-NEXT: br i1 [[DONE]], label [[EXIT]], label [[LOOP]], !llvm.loop [[LOOP13:![0-9]+]]
|
|
; NOSTRIDED: exit:
|
|
; NOSTRIDED-NEXT: ret void
|
|
;
|
|
; STRIDED-LABEL: @double_stride_int_scaled(
|
|
; STRIDED-NEXT: entry:
|
|
; STRIDED-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
|
|
; STRIDED-NEXT: [[TMP1:%.*]] = mul i64 [[TMP0]], 4
|
|
; STRIDED-NEXT: [[TMP2:%.*]] = call i64 @llvm.umax.i64(i64 80, i64 [[TMP1]])
|
|
; STRIDED-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1024, [[TMP2]]
|
|
; STRIDED-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_MEMCHECK:%.*]]
|
|
; STRIDED: vector.scevcheck:
|
|
; STRIDED-NEXT: [[TMP24:%.*]] = shl i64 [[STRIDE:%.*]], 2
|
|
; STRIDED-NEXT: [[TMP25:%.*]] = mul i64 [[STRIDE]], -4
|
|
; STRIDED-NEXT: [[TMP26:%.*]] = icmp slt i64 [[TMP24]], 0
|
|
; STRIDED-NEXT: [[TMP27:%.*]] = select i1 [[TMP26]], i64 [[TMP25]], i64 [[TMP24]]
|
|
; STRIDED-NEXT: [[MUL:%.*]] = call { i64, i1 } @llvm.umul.with.overflow.i64(i64 [[TMP27]], i64 1023)
|
|
; STRIDED-NEXT: [[MUL_RESULT:%.*]] = extractvalue { i64, i1 } [[MUL]], 0
|
|
; STRIDED-NEXT: [[MUL_OVERFLOW:%.*]] = extractvalue { i64, i1 } [[MUL]], 1
|
|
; STRIDED-NEXT: [[TMP28:%.*]] = sub i64 0, [[MUL_RESULT]]
|
|
; STRIDED-NEXT: [[TMP29:%.*]] = getelementptr i8, ptr [[P2:%.*]], i64 [[MUL_RESULT]]
|
|
; STRIDED-NEXT: [[TMP30:%.*]] = getelementptr i8, ptr [[P2]], i64 [[TMP28]]
|
|
; STRIDED-NEXT: [[TMP31:%.*]] = icmp ult ptr [[TMP29]], [[P2]]
|
|
; STRIDED-NEXT: [[TMP32:%.*]] = icmp ugt ptr [[TMP30]], [[P2]]
|
|
; STRIDED-NEXT: [[TMP33:%.*]] = select i1 [[TMP26]], i1 [[TMP32]], i1 [[TMP31]]
|
|
; STRIDED-NEXT: [[TMP13:%.*]] = or i1 [[TMP33]], [[MUL_OVERFLOW]]
|
|
; STRIDED-NEXT: [[TMP34:%.*]] = icmp slt i64 [[TMP24]], 0
|
|
; STRIDED-NEXT: [[TMP15:%.*]] = select i1 [[TMP34]], i64 [[TMP25]], i64 [[TMP24]]
|
|
; STRIDED-NEXT: [[MUL1:%.*]] = call { i64, i1 } @llvm.umul.with.overflow.i64(i64 [[TMP15]], i64 1023)
|
|
; STRIDED-NEXT: [[MUL_RESULT2:%.*]] = extractvalue { i64, i1 } [[MUL1]], 0
|
|
; STRIDED-NEXT: [[MUL_OVERFLOW3:%.*]] = extractvalue { i64, i1 } [[MUL1]], 1
|
|
; STRIDED-NEXT: [[TMP16:%.*]] = sub i64 0, [[MUL_RESULT2]]
|
|
; STRIDED-NEXT: [[TMP35:%.*]] = getelementptr i8, ptr [[P:%.*]], i64 [[MUL_RESULT2]]
|
|
; STRIDED-NEXT: [[TMP36:%.*]] = getelementptr i8, ptr [[P]], i64 [[TMP16]]
|
|
; STRIDED-NEXT: [[TMP37:%.*]] = icmp ult ptr [[TMP35]], [[P]]
|
|
; STRIDED-NEXT: [[TMP38:%.*]] = icmp ugt ptr [[TMP36]], [[P]]
|
|
; STRIDED-NEXT: [[TMP39:%.*]] = select i1 [[TMP34]], i1 [[TMP38]], i1 [[TMP37]]
|
|
; STRIDED-NEXT: [[TMP40:%.*]] = or i1 [[TMP39]], [[MUL_OVERFLOW3]]
|
|
; STRIDED-NEXT: [[TMP23:%.*]] = or i1 [[TMP13]], [[TMP40]]
|
|
; STRIDED-NEXT: br i1 [[TMP23]], label [[SCALAR_PH]], label [[VECTOR_MEMCHECK1:%.*]]
|
|
; STRIDED: vector.memcheck:
|
|
; STRIDED-NEXT: [[TMP3:%.*]] = mul i64 [[STRIDE]], 4092
|
|
; STRIDED-NEXT: [[SCEVGEP:%.*]] = getelementptr i8, ptr [[P2]], i64 [[TMP3]]
|
|
; STRIDED-NEXT: [[TMP4:%.*]] = icmp ult ptr [[P2]], [[SCEVGEP]]
|
|
; STRIDED-NEXT: [[UMIN:%.*]] = select i1 [[TMP4]], ptr [[P2]], ptr [[SCEVGEP]]
|
|
; STRIDED-NEXT: [[TMP5:%.*]] = icmp ugt ptr [[P2]], [[SCEVGEP]]
|
|
; STRIDED-NEXT: [[UMAX:%.*]] = select i1 [[TMP5]], ptr [[P2]], ptr [[SCEVGEP]]
|
|
; STRIDED-NEXT: [[SCEVGEP1:%.*]] = getelementptr i8, ptr [[UMAX]], i64 4
|
|
; STRIDED-NEXT: [[SCEVGEP2:%.*]] = getelementptr i8, ptr [[P]], i64 [[TMP3]]
|
|
; STRIDED-NEXT: [[TMP6:%.*]] = icmp ult ptr [[P]], [[SCEVGEP2]]
|
|
; STRIDED-NEXT: [[UMIN3:%.*]] = select i1 [[TMP6]], ptr [[P]], ptr [[SCEVGEP2]]
|
|
; STRIDED-NEXT: [[TMP7:%.*]] = icmp ugt ptr [[P]], [[SCEVGEP2]]
|
|
; STRIDED-NEXT: [[UMAX4:%.*]] = select i1 [[TMP7]], ptr [[P]], ptr [[SCEVGEP2]]
|
|
; STRIDED-NEXT: [[SCEVGEP5:%.*]] = getelementptr i8, ptr [[UMAX4]], i64 4
|
|
; STRIDED-NEXT: [[BOUND0:%.*]] = icmp ult ptr [[UMIN]], [[SCEVGEP5]]
|
|
; STRIDED-NEXT: [[BOUND1:%.*]] = icmp ult ptr [[UMIN3]], [[SCEVGEP1]]
|
|
; STRIDED-NEXT: [[FOUND_CONFLICT:%.*]] = and i1 [[BOUND0]], [[BOUND1]]
|
|
; STRIDED-NEXT: br i1 [[FOUND_CONFLICT]], label [[SCALAR_PH]], label [[VECTOR_PH:%.*]]
|
|
; STRIDED: vector.ph:
|
|
; STRIDED-NEXT: [[TMP8:%.*]] = call i64 @llvm.vscale.i64()
|
|
; STRIDED-NEXT: [[TMP9:%.*]] = mul i64 [[TMP8]], 4
|
|
; STRIDED-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP9]]
|
|
; STRIDED-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]]
|
|
; STRIDED-NEXT: [[TMP10:%.*]] = call i64 @llvm.vscale.i64()
|
|
; STRIDED-NEXT: [[TMP11:%.*]] = mul i64 [[TMP10]], 4
|
|
; STRIDED-NEXT: [[BROADCAST_SPLATINSERT1:%.*]] = insertelement <vscale x 4 x i64> poison, i64 [[STRIDE]], i64 0
|
|
; STRIDED-NEXT: [[BROADCAST_SPLAT1:%.*]] = shufflevector <vscale x 4 x i64> [[BROADCAST_SPLATINSERT1]], <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
|
|
; STRIDED-NEXT: [[TMP12:%.*]] = call <vscale x 4 x i64> @llvm.stepvector.nxv4i64()
|
|
; STRIDED-NEXT: [[TMP14:%.*]] = mul <vscale x 4 x i64> [[TMP12]], splat (i64 1)
|
|
; STRIDED-NEXT: [[INDUCTION:%.*]] = add <vscale x 4 x i64> zeroinitializer, [[TMP14]]
|
|
; STRIDED-NEXT: [[TMP17:%.*]] = mul i64 1, [[TMP11]]
|
|
; STRIDED-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 4 x i64> poison, i64 [[TMP17]], i64 0
|
|
; STRIDED-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 4 x i64> [[BROADCAST_SPLATINSERT]], <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
|
|
; STRIDED-NEXT: br label [[VECTOR_BODY:%.*]]
|
|
; STRIDED: vector.body:
|
|
; STRIDED-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
|
|
; STRIDED-NEXT: [[VEC_IND:%.*]] = phi <vscale x 4 x i64> [ [[INDUCTION]], [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ]
|
|
; STRIDED-NEXT: [[TMP18:%.*]] = mul nuw nsw <vscale x 4 x i64> [[VEC_IND]], [[BROADCAST_SPLAT1]]
|
|
; STRIDED-NEXT: [[TMP19:%.*]] = getelementptr i32, ptr [[P]], <vscale x 4 x i64> [[TMP18]]
|
|
; STRIDED-NEXT: [[WIDE_MASKED_GATHER:%.*]] = call <vscale x 4 x i32> @llvm.masked.gather.nxv4i32.nxv4p0(<vscale x 4 x ptr> [[TMP19]], i32 4, <vscale x 4 x i1> splat (i1 true), <vscale x 4 x i32> poison), !alias.scope [[META8:![0-9]+]]
|
|
; STRIDED-NEXT: [[TMP20:%.*]] = add <vscale x 4 x i32> [[WIDE_MASKED_GATHER]], splat (i32 1)
|
|
; STRIDED-NEXT: [[TMP21:%.*]] = getelementptr i32, ptr [[P2]], <vscale x 4 x i64> [[TMP18]]
|
|
; STRIDED-NEXT: call void @llvm.masked.scatter.nxv4i32.nxv4p0(<vscale x 4 x i32> [[TMP20]], <vscale x 4 x ptr> [[TMP21]], i32 4, <vscale x 4 x i1> splat (i1 true)), !alias.scope [[META11:![0-9]+]], !noalias [[META8]]
|
|
; STRIDED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP11]]
|
|
; STRIDED-NEXT: [[VEC_IND_NEXT]] = add <vscale x 4 x i64> [[VEC_IND]], [[BROADCAST_SPLAT]]
|
|
; STRIDED-NEXT: [[TMP22:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
|
|
; STRIDED-NEXT: br i1 [[TMP22]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP13:![0-9]+]]
|
|
; STRIDED: middle.block:
|
|
; STRIDED-NEXT: [[CMP_N:%.*]] = icmp eq i64 1024, [[N_VEC]]
|
|
; STRIDED-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH]]
|
|
; STRIDED: scalar.ph:
|
|
; STRIDED-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ], [ 0, [[VECTOR_MEMCHECK]] ], [ 0, [[VECTOR_MEMCHECK1]] ]
|
|
; STRIDED-NEXT: br label [[LOOP:%.*]]
|
|
; STRIDED: loop:
|
|
; STRIDED-NEXT: [[I:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[NEXTI:%.*]], [[LOOP]] ]
|
|
; STRIDED-NEXT: [[OFFSET:%.*]] = mul nuw nsw i64 [[I]], [[STRIDE]]
|
|
; STRIDED-NEXT: [[Q0:%.*]] = getelementptr i32, ptr [[P]], i64 [[OFFSET]]
|
|
; STRIDED-NEXT: [[X0:%.*]] = load i32, ptr [[Q0]], align 4
|
|
; STRIDED-NEXT: [[Y0:%.*]] = add i32 [[X0]], 1
|
|
; STRIDED-NEXT: [[Q1:%.*]] = getelementptr i32, ptr [[P2]], i64 [[OFFSET]]
|
|
; STRIDED-NEXT: store i32 [[Y0]], ptr [[Q1]], align 4
|
|
; STRIDED-NEXT: [[NEXTI]] = add i64 [[I]], 1
|
|
; STRIDED-NEXT: [[DONE:%.*]] = icmp eq i64 [[NEXTI]], 1024
|
|
; STRIDED-NEXT: br i1 [[DONE]], label [[EXIT]], label [[LOOP]], !llvm.loop [[LOOP14:![0-9]+]]
|
|
; STRIDED: exit:
|
|
; STRIDED-NEXT: ret void
|
|
;
|
|
entry:
|
|
br label %loop
|
|
loop:
|
|
%i = phi i64 [0, %entry], [%nexti, %loop]
|
|
|
|
%offset = mul nsw nuw i64 %i, %stride
|
|
%q0 = getelementptr i32, ptr %p, i64 %offset
|
|
%x0 = load i32, ptr %q0
|
|
%y0 = add i32 %x0, 1
|
|
%q1 = getelementptr i32, ptr %p2, i64 %offset
|
|
store i32 %y0, ptr %q1
|
|
|
|
%nexti = add i64 %i, 1
|
|
%done = icmp eq i64 %nexti, 1024
|
|
br i1 %done, label %exit, label %loop
|
|
exit:
|
|
ret void
|
|
}
|
|
|
|
define void @double_stride_int_iv(ptr %p, ptr %p2, i64 %stride) {
|
|
; NOSTRIDED-LABEL: @double_stride_int_iv(
|
|
; NOSTRIDED-NEXT: entry:
|
|
; NOSTRIDED-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
|
|
; NOSTRIDED-NEXT: [[TMP1:%.*]] = mul i64 [[TMP0]], 4
|
|
; NOSTRIDED-NEXT: [[TMP2:%.*]] = call i64 @llvm.umax.i64(i64 8, i64 [[TMP1]])
|
|
; NOSTRIDED-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1024, [[TMP2]]
|
|
; NOSTRIDED-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_SCEVCHECK:%.*]]
|
|
; NOSTRIDED: vector.scevcheck:
|
|
; NOSTRIDED-NEXT: [[IDENT_CHECK:%.*]] = icmp ne i64 [[STRIDE:%.*]], 1
|
|
; NOSTRIDED-NEXT: br i1 [[IDENT_CHECK]], label [[SCALAR_PH]], label [[VECTOR_PH:%.*]]
|
|
; NOSTRIDED: vector.ph:
|
|
; NOSTRIDED-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64()
|
|
; NOSTRIDED-NEXT: [[TMP4:%.*]] = mul i64 [[TMP3]], 4
|
|
; NOSTRIDED-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP4]]
|
|
; NOSTRIDED-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]]
|
|
; NOSTRIDED-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64()
|
|
; NOSTRIDED-NEXT: [[TMP6:%.*]] = mul i64 [[TMP5]], 4
|
|
; NOSTRIDED-NEXT: br label [[VECTOR_BODY:%.*]]
|
|
; NOSTRIDED: vector.body:
|
|
; NOSTRIDED-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
|
|
; NOSTRIDED-NEXT: [[TMP7:%.*]] = add i64 [[INDEX]], 0
|
|
; NOSTRIDED-NEXT: [[TMP8:%.*]] = getelementptr i32, ptr [[P:%.*]], i64 [[TMP7]]
|
|
; NOSTRIDED-NEXT: [[TMP9:%.*]] = getelementptr i32, ptr [[TMP8]], i32 0
|
|
; NOSTRIDED-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x i32>, ptr [[TMP9]], align 4
|
|
; NOSTRIDED-NEXT: [[TMP10:%.*]] = add <vscale x 4 x i32> [[WIDE_LOAD]], splat (i32 1)
|
|
; NOSTRIDED-NEXT: store <vscale x 4 x i32> [[TMP10]], ptr [[TMP9]], align 4
|
|
; NOSTRIDED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP6]]
|
|
; NOSTRIDED-NEXT: [[TMP11:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
|
|
; NOSTRIDED-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP14:![0-9]+]]
|
|
; NOSTRIDED: middle.block:
|
|
; NOSTRIDED-NEXT: [[CMP_N:%.*]] = icmp eq i64 1024, [[N_VEC]]
|
|
; NOSTRIDED-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH]]
|
|
; NOSTRIDED: scalar.ph:
|
|
; NOSTRIDED-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ], [ 0, [[VECTOR_SCEVCHECK]] ]
|
|
; NOSTRIDED-NEXT: [[BC_RESUME_VAL1:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY]] ], [ 0, [[VECTOR_SCEVCHECK]] ]
|
|
; NOSTRIDED-NEXT: br label [[LOOP:%.*]]
|
|
; NOSTRIDED: loop:
|
|
; NOSTRIDED-NEXT: [[I:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[NEXTI:%.*]], [[LOOP]] ]
|
|
; NOSTRIDED-NEXT: [[OFFSET:%.*]] = phi i64 [ [[BC_RESUME_VAL1]], [[SCALAR_PH]] ], [ [[OFFSET_NEXT:%.*]], [[LOOP]] ]
|
|
; NOSTRIDED-NEXT: [[Q0:%.*]] = getelementptr i32, ptr [[P]], i64 [[OFFSET]]
|
|
; NOSTRIDED-NEXT: [[X0:%.*]] = load i32, ptr [[Q0]], align 4
|
|
; NOSTRIDED-NEXT: [[Y0:%.*]] = add i32 [[X0]], 1
|
|
; NOSTRIDED-NEXT: [[Q1:%.*]] = getelementptr i32, ptr [[P]], i64 [[OFFSET]]
|
|
; NOSTRIDED-NEXT: store i32 [[Y0]], ptr [[Q1]], align 4
|
|
; NOSTRIDED-NEXT: [[OFFSET_NEXT]] = add nuw nsw i64 [[OFFSET]], [[STRIDE]]
|
|
; NOSTRIDED-NEXT: [[NEXTI]] = add i64 [[I]], 1
|
|
; NOSTRIDED-NEXT: [[DONE:%.*]] = icmp eq i64 [[NEXTI]], 1024
|
|
; NOSTRIDED-NEXT: br i1 [[DONE]], label [[EXIT]], label [[LOOP]], !llvm.loop [[LOOP15:![0-9]+]]
|
|
; NOSTRIDED: exit:
|
|
; NOSTRIDED-NEXT: ret void
|
|
;
|
|
; STRIDED-LABEL: @double_stride_int_iv(
|
|
; STRIDED-NEXT: entry:
|
|
; STRIDED-NEXT: br label [[LOOP:%.*]]
|
|
; STRIDED: loop:
|
|
; STRIDED-NEXT: [[I:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[NEXTI:%.*]], [[LOOP]] ]
|
|
; STRIDED-NEXT: [[OFFSET:%.*]] = phi i64 [ 0, [[ENTRY]] ], [ [[OFFSET_NEXT:%.*]], [[LOOP]] ]
|
|
; STRIDED-NEXT: [[Q0:%.*]] = getelementptr i32, ptr [[P:%.*]], i64 [[OFFSET]]
|
|
; STRIDED-NEXT: [[X0:%.*]] = load i32, ptr [[Q0]], align 4
|
|
; STRIDED-NEXT: [[Y0:%.*]] = add i32 [[X0]], 1
|
|
; STRIDED-NEXT: [[Q1:%.*]] = getelementptr i32, ptr [[P]], i64 [[OFFSET]]
|
|
; STRIDED-NEXT: store i32 [[Y0]], ptr [[Q1]], align 4
|
|
; STRIDED-NEXT: [[OFFSET_NEXT]] = add nuw nsw i64 [[OFFSET]], [[STRIDE:%.*]]
|
|
; STRIDED-NEXT: [[NEXTI]] = add i64 [[I]], 1
|
|
; STRIDED-NEXT: [[DONE:%.*]] = icmp eq i64 [[NEXTI]], 1024
|
|
; STRIDED-NEXT: br i1 [[DONE]], label [[EXIT:%.*]], label [[LOOP]]
|
|
; STRIDED: exit:
|
|
; STRIDED-NEXT: ret void
|
|
;
|
|
entry:
|
|
br label %loop
|
|
loop:
|
|
%i = phi i64 [0, %entry], [%nexti, %loop]
|
|
%offset = phi i64 [0, %entry], [%offset.next, %loop]
|
|
|
|
%q0 = getelementptr i32, ptr %p, i64 %offset
|
|
%x0 = load i32, ptr %q0
|
|
%y0 = add i32 %x0, 1
|
|
%q1 = getelementptr i32, ptr %p, i64 %offset
|
|
store i32 %y0, ptr %q1
|
|
|
|
%offset.next = add nsw nuw i64 %offset, %stride
|
|
%nexti = add i64 %i, 1
|
|
%done = icmp eq i64 %nexti, 1024
|
|
br i1 %done, label %exit, label %loop
|
|
exit:
|
|
ret void
|
|
}
|
|
|
|
|
|
define void @double_stride_ptr_iv(ptr %p, ptr %p2, i64 %stride) {
|
|
; NOSTRIDED-LABEL: @double_stride_ptr_iv(
|
|
; NOSTRIDED-NEXT: entry:
|
|
; NOSTRIDED-NEXT: br label [[LOOP:%.*]]
|
|
; NOSTRIDED: loop:
|
|
; NOSTRIDED-NEXT: [[I:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[NEXTI:%.*]], [[LOOP]] ]
|
|
; NOSTRIDED-NEXT: [[PTR:%.*]] = phi ptr [ [[P:%.*]], [[ENTRY]] ], [ [[PTR_NEXT:%.*]], [[LOOP]] ]
|
|
; NOSTRIDED-NEXT: [[PTR2:%.*]] = phi ptr [ [[P2:%.*]], [[ENTRY]] ], [ [[PTR2_NEXT:%.*]], [[LOOP]] ]
|
|
; NOSTRIDED-NEXT: [[X0:%.*]] = load i32, ptr [[PTR]], align 4
|
|
; NOSTRIDED-NEXT: [[Y0:%.*]] = add i32 [[X0]], 1
|
|
; NOSTRIDED-NEXT: store i32 [[Y0]], ptr [[PTR2]], align 4
|
|
; NOSTRIDED-NEXT: [[PTR_NEXT]] = getelementptr inbounds i8, ptr [[PTR]], i64 [[STRIDE:%.*]]
|
|
; NOSTRIDED-NEXT: [[PTR2_NEXT]] = getelementptr inbounds i8, ptr [[PTR2]], i64 [[STRIDE]]
|
|
; NOSTRIDED-NEXT: [[NEXTI]] = add i64 [[I]], 1
|
|
; NOSTRIDED-NEXT: [[DONE:%.*]] = icmp eq i64 [[NEXTI]], 1024
|
|
; NOSTRIDED-NEXT: br i1 [[DONE]], label [[EXIT:%.*]], label [[LOOP]]
|
|
; NOSTRIDED: exit:
|
|
; NOSTRIDED-NEXT: ret void
|
|
;
|
|
; STRIDED-LABEL: @double_stride_ptr_iv(
|
|
; STRIDED-NEXT: entry:
|
|
; STRIDED-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
|
|
; STRIDED-NEXT: [[TMP1:%.*]] = mul i64 [[TMP0]], 4
|
|
; STRIDED-NEXT: [[TMP2:%.*]] = call i64 @llvm.umax.i64(i64 32, i64 [[TMP1]])
|
|
; STRIDED-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1024, [[TMP2]]
|
|
; STRIDED-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_MEMCHECK:%.*]]
|
|
; STRIDED: vector.memcheck:
|
|
; STRIDED-NEXT: [[TMP3:%.*]] = mul i64 [[STRIDE:%.*]], 1023
|
|
; STRIDED-NEXT: [[SCEVGEP:%.*]] = getelementptr i8, ptr [[P2:%.*]], i64 [[TMP3]]
|
|
; STRIDED-NEXT: [[TMP4:%.*]] = icmp ult ptr [[P2]], [[SCEVGEP]]
|
|
; STRIDED-NEXT: [[UMIN:%.*]] = select i1 [[TMP4]], ptr [[P2]], ptr [[SCEVGEP]]
|
|
; STRIDED-NEXT: [[TMP5:%.*]] = icmp ugt ptr [[P2]], [[SCEVGEP]]
|
|
; STRIDED-NEXT: [[UMAX:%.*]] = select i1 [[TMP5]], ptr [[P2]], ptr [[SCEVGEP]]
|
|
; STRIDED-NEXT: [[SCEVGEP1:%.*]] = getelementptr i8, ptr [[UMAX]], i64 4
|
|
; STRIDED-NEXT: [[SCEVGEP2:%.*]] = getelementptr i8, ptr [[P:%.*]], i64 [[TMP3]]
|
|
; STRIDED-NEXT: [[TMP6:%.*]] = icmp ult ptr [[P]], [[SCEVGEP2]]
|
|
; STRIDED-NEXT: [[UMIN3:%.*]] = select i1 [[TMP6]], ptr [[P]], ptr [[SCEVGEP2]]
|
|
; STRIDED-NEXT: [[TMP7:%.*]] = icmp ugt ptr [[P]], [[SCEVGEP2]]
|
|
; STRIDED-NEXT: [[UMAX4:%.*]] = select i1 [[TMP7]], ptr [[P]], ptr [[SCEVGEP2]]
|
|
; STRIDED-NEXT: [[SCEVGEP5:%.*]] = getelementptr i8, ptr [[UMAX4]], i64 4
|
|
; STRIDED-NEXT: [[BOUND0:%.*]] = icmp ult ptr [[UMIN]], [[SCEVGEP5]]
|
|
; STRIDED-NEXT: [[BOUND1:%.*]] = icmp ult ptr [[UMIN3]], [[SCEVGEP1]]
|
|
; STRIDED-NEXT: [[FOUND_CONFLICT:%.*]] = and i1 [[BOUND0]], [[BOUND1]]
|
|
; STRIDED-NEXT: br i1 [[FOUND_CONFLICT]], label [[SCALAR_PH]], label [[VECTOR_PH:%.*]]
|
|
; STRIDED: vector.ph:
|
|
; STRIDED-NEXT: [[TMP8:%.*]] = call i64 @llvm.vscale.i64()
|
|
; STRIDED-NEXT: [[TMP9:%.*]] = mul i64 [[TMP8]], 4
|
|
; STRIDED-NEXT: [[N_MOD_VF:%.*]] = urem i64 1024, [[TMP9]]
|
|
; STRIDED-NEXT: [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]]
|
|
; STRIDED-NEXT: [[TMP12:%.*]] = call i64 @llvm.vscale.i64()
|
|
; STRIDED-NEXT: [[TMP13:%.*]] = mul i64 [[TMP12]], 4
|
|
; STRIDED-NEXT: [[TMP10:%.*]] = mul i64 [[N_VEC]], [[STRIDE]]
|
|
; STRIDED-NEXT: [[IND_END:%.*]] = getelementptr i8, ptr [[P]], i64 [[TMP10]]
|
|
; STRIDED-NEXT: [[TMP11:%.*]] = mul i64 [[N_VEC]], [[STRIDE]]
|
|
; STRIDED-NEXT: [[IND_END7:%.*]] = getelementptr i8, ptr [[P2]], i64 [[TMP11]]
|
|
; STRIDED-NEXT: br label [[VECTOR_BODY:%.*]]
|
|
; STRIDED: vector.body:
|
|
; STRIDED-NEXT: [[POINTER_PHI:%.*]] = phi ptr [ [[P]], [[VECTOR_PH]] ], [ [[PTR_IND:%.*]], [[VECTOR_BODY]] ]
|
|
; STRIDED-NEXT: [[POINTER_PHI11:%.*]] = phi ptr [ [[P2]], [[VECTOR_PH]] ], [ [[PTR_IND12:%.*]], [[VECTOR_BODY]] ]
|
|
; STRIDED-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
|
|
; STRIDED-NEXT: [[TMP14:%.*]] = call i64 @llvm.vscale.i64()
|
|
; STRIDED-NEXT: [[TMP15:%.*]] = mul i64 [[TMP14]], 4
|
|
; STRIDED-NEXT: [[TMP16:%.*]] = mul i64 [[TMP15]], 1
|
|
; STRIDED-NEXT: [[TMP17:%.*]] = mul i64 [[STRIDE]], [[TMP16]]
|
|
; STRIDED-NEXT: [[TMP18:%.*]] = mul i64 [[TMP15]], 0
|
|
; STRIDED-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 4 x i64> poison, i64 [[TMP18]], i64 0
|
|
; STRIDED-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 4 x i64> [[DOTSPLATINSERT]], <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
|
|
; STRIDED-NEXT: [[TMP19:%.*]] = call <vscale x 4 x i64> @llvm.stepvector.nxv4i64()
|
|
; STRIDED-NEXT: [[TMP20:%.*]] = add <vscale x 4 x i64> [[DOTSPLAT]], [[TMP19]]
|
|
; STRIDED-NEXT: [[DOTSPLATINSERT9:%.*]] = insertelement <vscale x 4 x i64> poison, i64 [[STRIDE]], i64 0
|
|
; STRIDED-NEXT: [[DOTSPLAT10:%.*]] = shufflevector <vscale x 4 x i64> [[DOTSPLATINSERT9]], <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
|
|
; STRIDED-NEXT: [[TMP21:%.*]] = mul <vscale x 4 x i64> [[TMP20]], [[DOTSPLAT10]]
|
|
; STRIDED-NEXT: [[VECTOR_GEP:%.*]] = getelementptr i8, ptr [[POINTER_PHI]], <vscale x 4 x i64> [[TMP21]]
|
|
; STRIDED-NEXT: [[TMP22:%.*]] = call i64 @llvm.vscale.i64()
|
|
; STRIDED-NEXT: [[TMP23:%.*]] = mul i64 [[TMP22]], 4
|
|
; STRIDED-NEXT: [[TMP24:%.*]] = mul i64 [[TMP23]], 1
|
|
; STRIDED-NEXT: [[TMP25:%.*]] = mul i64 [[STRIDE]], [[TMP24]]
|
|
; STRIDED-NEXT: [[TMP26:%.*]] = mul i64 [[TMP23]], 0
|
|
; STRIDED-NEXT: [[DOTSPLATINSERT13:%.*]] = insertelement <vscale x 4 x i64> poison, i64 [[TMP26]], i64 0
|
|
; STRIDED-NEXT: [[DOTSPLAT14:%.*]] = shufflevector <vscale x 4 x i64> [[DOTSPLATINSERT13]], <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
|
|
; STRIDED-NEXT: [[TMP27:%.*]] = call <vscale x 4 x i64> @llvm.stepvector.nxv4i64()
|
|
; STRIDED-NEXT: [[TMP28:%.*]] = add <vscale x 4 x i64> [[DOTSPLAT14]], [[TMP27]]
|
|
; STRIDED-NEXT: [[TMP29:%.*]] = mul <vscale x 4 x i64> [[TMP28]], [[DOTSPLAT10]]
|
|
; STRIDED-NEXT: [[VECTOR_GEP17:%.*]] = getelementptr i8, ptr [[POINTER_PHI11]], <vscale x 4 x i64> [[TMP29]]
|
|
; STRIDED-NEXT: [[WIDE_MASKED_GATHER:%.*]] = call <vscale x 4 x i32> @llvm.masked.gather.nxv4i32.nxv4p0(<vscale x 4 x ptr> [[VECTOR_GEP]], i32 4, <vscale x 4 x i1> splat (i1 true), <vscale x 4 x i32> poison), !alias.scope [[META15:![0-9]+]]
|
|
; STRIDED-NEXT: [[TMP30:%.*]] = add <vscale x 4 x i32> [[WIDE_MASKED_GATHER]], splat (i32 1)
|
|
; STRIDED-NEXT: call void @llvm.masked.scatter.nxv4i32.nxv4p0(<vscale x 4 x i32> [[TMP30]], <vscale x 4 x ptr> [[VECTOR_GEP17]], i32 4, <vscale x 4 x i1> splat (i1 true)), !alias.scope [[META18:![0-9]+]], !noalias [[META15]]
|
|
; STRIDED-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP13]]
|
|
; STRIDED-NEXT: [[PTR_IND]] = getelementptr i8, ptr [[POINTER_PHI]], i64 [[TMP17]]
|
|
; STRIDED-NEXT: [[PTR_IND12]] = getelementptr i8, ptr [[POINTER_PHI11]], i64 [[TMP25]]
|
|
; STRIDED-NEXT: [[TMP31:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
|
|
; STRIDED-NEXT: br i1 [[TMP31]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP20:![0-9]+]]
|
|
; STRIDED: middle.block:
|
|
; STRIDED-NEXT: [[CMP_N:%.*]] = icmp eq i64 1024, [[N_VEC]]
|
|
; STRIDED-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH]]
|
|
; STRIDED: scalar.ph:
|
|
; STRIDED-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ], [ 0, [[VECTOR_MEMCHECK]] ]
|
|
; STRIDED-NEXT: [[BC_RESUME_VAL6:%.*]] = phi ptr [ [[IND_END]], [[MIDDLE_BLOCK]] ], [ [[P]], [[ENTRY]] ], [ [[P]], [[VECTOR_MEMCHECK]] ]
|
|
; STRIDED-NEXT: [[BC_RESUME_VAL8:%.*]] = phi ptr [ [[IND_END7]], [[MIDDLE_BLOCK]] ], [ [[P2]], [[ENTRY]] ], [ [[P2]], [[VECTOR_MEMCHECK]] ]
|
|
; STRIDED-NEXT: br label [[LOOP:%.*]]
|
|
; STRIDED: loop:
|
|
; STRIDED-NEXT: [[I:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[NEXTI:%.*]], [[LOOP]] ]
|
|
; STRIDED-NEXT: [[PTR:%.*]] = phi ptr [ [[BC_RESUME_VAL6]], [[SCALAR_PH]] ], [ [[PTR_NEXT:%.*]], [[LOOP]] ]
|
|
; STRIDED-NEXT: [[PTR2:%.*]] = phi ptr [ [[BC_RESUME_VAL8]], [[SCALAR_PH]] ], [ [[PTR2_NEXT:%.*]], [[LOOP]] ]
|
|
; STRIDED-NEXT: [[X0:%.*]] = load i32, ptr [[PTR]], align 4
|
|
; STRIDED-NEXT: [[Y0:%.*]] = add i32 [[X0]], 1
|
|
; STRIDED-NEXT: store i32 [[Y0]], ptr [[PTR2]], align 4
|
|
; STRIDED-NEXT: [[PTR_NEXT]] = getelementptr inbounds i8, ptr [[PTR]], i64 [[STRIDE]]
|
|
; STRIDED-NEXT: [[PTR2_NEXT]] = getelementptr inbounds i8, ptr [[PTR2]], i64 [[STRIDE]]
|
|
; STRIDED-NEXT: [[NEXTI]] = add i64 [[I]], 1
|
|
; STRIDED-NEXT: [[DONE:%.*]] = icmp eq i64 [[NEXTI]], 1024
|
|
; STRIDED-NEXT: br i1 [[DONE]], label [[EXIT]], label [[LOOP]], !llvm.loop [[LOOP21:![0-9]+]]
|
|
; STRIDED: exit:
|
|
; STRIDED-NEXT: ret void
|
|
;
|
|
entry:
|
|
br label %loop
|
|
loop:
|
|
%i = phi i64 [0, %entry], [%nexti, %loop]
|
|
%ptr = phi ptr [%p, %entry], [%ptr.next, %loop]
|
|
%ptr2 = phi ptr [%p2, %entry], [%ptr2.next, %loop]
|
|
|
|
%x0 = load i32, ptr %ptr
|
|
%y0 = add i32 %x0, 1
|
|
store i32 %y0, ptr %ptr2
|
|
|
|
%ptr.next = getelementptr inbounds i8, ptr %ptr, i64 %stride
|
|
%ptr2.next = getelementptr inbounds i8, ptr %ptr2, i64 %stride
|
|
%nexti = add i64 %i, 1
|
|
%done = icmp eq i64 %nexti, 1024
|
|
br i1 %done, label %exit, label %loop
|
|
exit:
|
|
ret void
|
|
}
|