; NOTE: Assertions have been autogenerated by utils/update_test_checks.py ; RUN: opt < %s -passes=loop-vectorize,instsimplify -force-vector-interleave=1 -S | FileCheck %s --check-prefixes=TFNONE ; RUN: opt < %s -passes=loop-vectorize,instsimplify,simplifycfg -force-vector-interleave=1 -prefer-predicate-over-epilogue=predicate-dont-vectorize -S | FileCheck %s --check-prefixes=TFCOMMON,TFALWAYS ; RUN: opt < %s -passes=loop-vectorize,instsimplify,simplifycfg -force-vector-interleave=1 -prefer-predicate-over-epilogue=predicate-else-scalar-epilogue -S | FileCheck %s --check-prefixes=TFCOMMON,TFFALLBACK ; RUN: opt < %s -passes=loop-vectorize,instsimplify,simplifycfg -force-vector-interleave=2 -prefer-predicate-over-epilogue=predicate-dont-vectorize -S | FileCheck %s --check-prefixes=TFA_INTERLEAVE target triple = "aarch64-unknown-linux-gnu" ; A call whose argument must be widened. We check that tail folding uses the ; primary mask, and that without tail folding we synthesize an all-true mask. define void @test_widen(ptr noalias %a, ptr readnone %b) #4 { ; TFNONE-LABEL: @test_widen( ; TFNONE-NEXT: entry: ; TFNONE-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() ; TFNONE-NEXT: [[TMP1:%.*]] = mul i64 [[TMP0]], 2 ; TFNONE-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1025, [[TMP1]] ; TFNONE-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] ; TFNONE: vector.ph: ; TFNONE-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() ; TFNONE-NEXT: [[TMP3:%.*]] = mul i64 [[TMP2]], 2 ; TFNONE-NEXT: [[N_MOD_VF:%.*]] = urem i64 1025, [[TMP3]] ; TFNONE-NEXT: [[N_VEC:%.*]] = sub i64 1025, [[N_MOD_VF]] ; TFNONE-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() ; TFNONE-NEXT: [[TMP5:%.*]] = mul i64 [[TMP4]], 2 ; TFNONE-NEXT: br label [[VECTOR_BODY:%.*]] ; TFNONE: vector.body: ; TFNONE-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] ; TFNONE-NEXT: [[TMP6:%.*]] = getelementptr i64, ptr [[B:%.*]], i64 [[INDEX]] ; TFNONE-NEXT: [[WIDE_LOAD:%.*]] = load , ptr [[TMP6]], align 8 ; TFNONE-NEXT: [[TMP7:%.*]] = call @foo_vector( [[WIDE_LOAD]], shufflevector ( insertelement ( poison, i1 true, i64 0), poison, zeroinitializer)) ; TFNONE-NEXT: [[TMP8:%.*]] = getelementptr inbounds i64, ptr [[A:%.*]], i64 [[INDEX]] ; TFNONE-NEXT: store [[TMP7]], ptr [[TMP8]], align 8 ; TFNONE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]] ; TFNONE-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; TFNONE-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; TFNONE: middle.block: ; TFNONE-NEXT: br i1 false, label [[FOR_COND_CLEANUP:%.*]], label [[SCALAR_PH]] ; TFNONE: scalar.ph: ; TFNONE-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ] ; TFNONE-NEXT: br label [[FOR_BODY:%.*]] ; TFNONE: for.body: ; TFNONE-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ] ; TFNONE-NEXT: [[GEP:%.*]] = getelementptr i64, ptr [[B]], i64 [[INDVARS_IV]] ; TFNONE-NEXT: [[LOAD:%.*]] = load i64, ptr [[GEP]], align 8 ; TFNONE-NEXT: [[CALL:%.*]] = call i64 @foo(i64 [[LOAD]]) #[[ATTR3:[0-9]+]] ; TFNONE-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[INDVARS_IV]] ; TFNONE-NEXT: store i64 [[CALL]], ptr [[ARRAYIDX]], align 8 ; TFNONE-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1 ; TFNONE-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], 1025 ; TFNONE-NEXT: br i1 [[EXITCOND]], label [[FOR_COND_CLEANUP]], label [[FOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] ; TFNONE: for.cond.cleanup: ; TFNONE-NEXT: ret void ; ; TFCOMMON-LABEL: @test_widen( ; TFCOMMON-NEXT: entry: ; TFCOMMON-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() ; TFCOMMON-NEXT: [[TMP1:%.*]] = mul i64 [[TMP0]], 2 ; TFCOMMON-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() ; TFCOMMON-NEXT: [[TMP3:%.*]] = mul i64 [[TMP2]], 2 ; TFCOMMON-NEXT: [[TMP4:%.*]] = sub i64 [[TMP3]], 1 ; TFCOMMON-NEXT: [[N_RND_UP:%.*]] = add i64 1025, [[TMP4]] ; TFCOMMON-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP1]] ; TFCOMMON-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]] ; TFCOMMON-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64() ; TFCOMMON-NEXT: [[TMP6:%.*]] = mul i64 [[TMP5]], 2 ; TFCOMMON-NEXT: [[ACTIVE_LANE_MASK_ENTRY:%.*]] = call @llvm.get.active.lane.mask.nxv2i1.i64(i64 0, i64 1025) ; TFCOMMON-NEXT: br label [[VECTOR_BODY:%.*]] ; TFCOMMON: vector.body: ; TFCOMMON-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] ; TFCOMMON-NEXT: [[ACTIVE_LANE_MASK:%.*]] = phi [ [[ACTIVE_LANE_MASK_ENTRY]], [[ENTRY]] ], [ [[ACTIVE_LANE_MASK_NEXT:%.*]], [[VECTOR_BODY]] ] ; TFCOMMON-NEXT: [[TMP7:%.*]] = getelementptr i64, ptr [[B:%.*]], i64 [[INDEX]] ; TFCOMMON-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call @llvm.masked.load.nxv2i64.p0(ptr [[TMP7]], i32 8, [[ACTIVE_LANE_MASK]], poison) ; TFCOMMON-NEXT: [[TMP8:%.*]] = call @foo_vector( [[WIDE_MASKED_LOAD]], [[ACTIVE_LANE_MASK]]) ; TFCOMMON-NEXT: [[TMP9:%.*]] = getelementptr inbounds i64, ptr [[A:%.*]], i64 [[INDEX]] ; TFCOMMON-NEXT: call void @llvm.masked.store.nxv2i64.p0( [[TMP8]], ptr [[TMP9]], i32 8, [[ACTIVE_LANE_MASK]]) ; TFCOMMON-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], [[TMP6]] ; TFCOMMON-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call @llvm.get.active.lane.mask.nxv2i1.i64(i64 [[INDEX_NEXT]], i64 1025) ; TFCOMMON-NEXT: [[TMP10:%.*]] = xor [[ACTIVE_LANE_MASK_NEXT]], shufflevector ( insertelement ( poison, i1 true, i64 0), poison, zeroinitializer) ; TFCOMMON-NEXT: [[TMP11:%.*]] = extractelement [[TMP10]], i32 0 ; TFCOMMON-NEXT: br i1 [[TMP11]], label [[FOR_COND_CLEANUP:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; TFCOMMON: for.cond.cleanup: ; TFCOMMON-NEXT: ret void ; ; TFA_INTERLEAVE-LABEL: @test_widen( ; TFA_INTERLEAVE-NEXT: entry: ; TFA_INTERLEAVE-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() ; TFA_INTERLEAVE-NEXT: [[TMP1:%.*]] = mul i64 [[TMP0]], 4 ; TFA_INTERLEAVE-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() ; TFA_INTERLEAVE-NEXT: [[TMP3:%.*]] = mul i64 [[TMP2]], 4 ; TFA_INTERLEAVE-NEXT: [[TMP4:%.*]] = sub i64 [[TMP3]], 1 ; TFA_INTERLEAVE-NEXT: [[N_RND_UP:%.*]] = add i64 1025, [[TMP4]] ; TFA_INTERLEAVE-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP1]] ; TFA_INTERLEAVE-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]] ; TFA_INTERLEAVE-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64() ; TFA_INTERLEAVE-NEXT: [[TMP6:%.*]] = mul i64 [[TMP5]], 4 ; TFA_INTERLEAVE-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64() ; TFA_INTERLEAVE-NEXT: [[TMP8:%.*]] = mul i64 [[TMP7]], 2 ; TFA_INTERLEAVE-NEXT: [[ACTIVE_LANE_MASK_ENTRY:%.*]] = call @llvm.get.active.lane.mask.nxv2i1.i64(i64 0, i64 1025) ; TFA_INTERLEAVE-NEXT: [[ACTIVE_LANE_MASK_ENTRY1:%.*]] = call @llvm.get.active.lane.mask.nxv2i1.i64(i64 [[TMP8]], i64 1025) ; TFA_INTERLEAVE-NEXT: br label [[VECTOR_BODY:%.*]] ; TFA_INTERLEAVE: vector.body: ; TFA_INTERLEAVE-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] ; TFA_INTERLEAVE-NEXT: [[ACTIVE_LANE_MASK:%.*]] = phi [ [[ACTIVE_LANE_MASK_ENTRY]], [[ENTRY]] ], [ [[ACTIVE_LANE_MASK_NEXT:%.*]], [[VECTOR_BODY]] ] ; TFA_INTERLEAVE-NEXT: [[ACTIVE_LANE_MASK2:%.*]] = phi [ [[ACTIVE_LANE_MASK_ENTRY1]], [[ENTRY]] ], [ [[ACTIVE_LANE_MASK_NEXT5:%.*]], [[VECTOR_BODY]] ] ; TFA_INTERLEAVE-NEXT: [[TMP9:%.*]] = getelementptr i64, ptr [[B:%.*]], i64 [[INDEX]] ; TFA_INTERLEAVE-NEXT: [[TMP10:%.*]] = call i64 @llvm.vscale.i64() ; TFA_INTERLEAVE-NEXT: [[TMP11:%.*]] = mul i64 [[TMP10]], 2 ; TFA_INTERLEAVE-NEXT: [[TMP12:%.*]] = getelementptr i64, ptr [[TMP9]], i64 [[TMP11]] ; TFA_INTERLEAVE-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call @llvm.masked.load.nxv2i64.p0(ptr [[TMP9]], i32 8, [[ACTIVE_LANE_MASK]], poison) ; TFA_INTERLEAVE-NEXT: [[WIDE_MASKED_LOAD3:%.*]] = call @llvm.masked.load.nxv2i64.p0(ptr [[TMP12]], i32 8, [[ACTIVE_LANE_MASK2]], poison) ; TFA_INTERLEAVE-NEXT: [[TMP13:%.*]] = call @foo_vector( [[WIDE_MASKED_LOAD]], [[ACTIVE_LANE_MASK]]) ; TFA_INTERLEAVE-NEXT: [[TMP14:%.*]] = call @foo_vector( [[WIDE_MASKED_LOAD3]], [[ACTIVE_LANE_MASK2]]) ; TFA_INTERLEAVE-NEXT: [[TMP15:%.*]] = getelementptr inbounds i64, ptr [[A:%.*]], i64 [[INDEX]] ; TFA_INTERLEAVE-NEXT: [[TMP16:%.*]] = call i64 @llvm.vscale.i64() ; TFA_INTERLEAVE-NEXT: [[TMP17:%.*]] = mul i64 [[TMP16]], 2 ; TFA_INTERLEAVE-NEXT: [[TMP18:%.*]] = getelementptr inbounds i64, ptr [[TMP15]], i64 [[TMP17]] ; TFA_INTERLEAVE-NEXT: call void @llvm.masked.store.nxv2i64.p0( [[TMP13]], ptr [[TMP15]], i32 8, [[ACTIVE_LANE_MASK]]) ; TFA_INTERLEAVE-NEXT: call void @llvm.masked.store.nxv2i64.p0( [[TMP14]], ptr [[TMP18]], i32 8, [[ACTIVE_LANE_MASK2]]) ; TFA_INTERLEAVE-NEXT: [[INDEX_NEXT:%.*]] = add i64 [[INDEX]], [[TMP6]] ; TFA_INTERLEAVE-NEXT: [[TMP19:%.*]] = call i64 @llvm.vscale.i64() ; TFA_INTERLEAVE-NEXT: [[TMP20:%.*]] = mul i64 [[TMP19]], 2 ; TFA_INTERLEAVE-NEXT: [[TMP21:%.*]] = add i64 [[INDEX_NEXT]], [[TMP20]] ; TFA_INTERLEAVE-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call @llvm.get.active.lane.mask.nxv2i1.i64(i64 [[INDEX_NEXT]], i64 1025) ; TFA_INTERLEAVE-NEXT: [[ACTIVE_LANE_MASK_NEXT5]] = call @llvm.get.active.lane.mask.nxv2i1.i64(i64 [[TMP21]], i64 1025) ; TFA_INTERLEAVE-NEXT: [[TMP22:%.*]] = xor [[ACTIVE_LANE_MASK_NEXT]], shufflevector ( insertelement ( poison, i1 true, i64 0), poison, zeroinitializer) ; TFA_INTERLEAVE-NEXT: [[TMP23:%.*]] = extractelement [[TMP22]], i32 0 ; TFA_INTERLEAVE-NEXT: br i1 [[TMP23]], label [[FOR_COND_CLEANUP:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; TFA_INTERLEAVE: for.cond.cleanup: ; TFA_INTERLEAVE-NEXT: ret void ; entry: br label %for.body for.body: %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ] %gep = getelementptr i64, ptr %b, i64 %indvars.iv %load = load i64, ptr %gep %call = call i64 @foo(i64 %load) #1 %arrayidx = getelementptr inbounds i64, ptr %a, i64 %indvars.iv store i64 %call, ptr %arrayidx %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1 %exitcond = icmp eq i64 %indvars.iv.next, 1025 br i1 %exitcond, label %for.cond.cleanup, label %for.body for.cond.cleanup: ret void } ; Check that a simple conditional call can be vectorized. define void @test_if_then(ptr noalias %a, ptr readnone %b) #4 { ; TFNONE-LABEL: @test_if_then( ; TFNONE-NEXT: entry: ; TFNONE-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() ; TFNONE-NEXT: [[TMP1:%.*]] = mul i64 [[TMP0]], 2 ; TFNONE-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1025, [[TMP1]] ; TFNONE-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] ; TFNONE: vector.ph: ; TFNONE-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() ; TFNONE-NEXT: [[TMP3:%.*]] = mul i64 [[TMP2]], 2 ; TFNONE-NEXT: [[N_MOD_VF:%.*]] = urem i64 1025, [[TMP3]] ; TFNONE-NEXT: [[N_VEC:%.*]] = sub i64 1025, [[N_MOD_VF]] ; TFNONE-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() ; TFNONE-NEXT: [[TMP5:%.*]] = mul i64 [[TMP4]], 2 ; TFNONE-NEXT: br label [[VECTOR_BODY:%.*]] ; TFNONE: vector.body: ; TFNONE-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] ; TFNONE-NEXT: [[TMP6:%.*]] = getelementptr inbounds i64, ptr [[A:%.*]], i64 [[INDEX]] ; TFNONE-NEXT: [[WIDE_LOAD:%.*]] = load , ptr [[TMP6]], align 8 ; TFNONE-NEXT: [[TMP7:%.*]] = icmp ugt [[WIDE_LOAD]], shufflevector ( insertelement ( poison, i64 50, i64 0), poison, zeroinitializer) ; TFNONE-NEXT: [[TMP8:%.*]] = call @foo_vector( [[WIDE_LOAD]], [[TMP7]]) ; TFNONE-NEXT: [[TMP9:%.*]] = xor [[TMP7]], shufflevector ( insertelement ( poison, i1 true, i64 0), poison, zeroinitializer) ; TFNONE-NEXT: [[PREDPHI:%.*]] = select [[TMP9]], zeroinitializer, [[TMP8]] ; TFNONE-NEXT: [[TMP10:%.*]] = getelementptr inbounds i64, ptr [[B:%.*]], i64 [[INDEX]] ; TFNONE-NEXT: store [[PREDPHI]], ptr [[TMP10]], align 8 ; TFNONE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]] ; TFNONE-NEXT: [[TMP11:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; TFNONE-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] ; TFNONE: middle.block: ; TFNONE-NEXT: br i1 false, label [[FOR_COND_CLEANUP:%.*]], label [[SCALAR_PH]] ; TFNONE: scalar.ph: ; TFNONE-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ] ; TFNONE-NEXT: br label [[FOR_BODY:%.*]] ; TFNONE: for.body: ; TFNONE-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[INDVARS_IV_NEXT:%.*]], [[IF_END:%.*]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ] ; TFNONE-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[INDVARS_IV]] ; TFNONE-NEXT: [[TMP12:%.*]] = load i64, ptr [[ARRAYIDX]], align 8 ; TFNONE-NEXT: [[CMP:%.*]] = icmp ugt i64 [[TMP12]], 50 ; TFNONE-NEXT: br i1 [[CMP]], label [[IF_THEN:%.*]], label [[IF_END]] ; TFNONE: if.then: ; TFNONE-NEXT: [[TMP13:%.*]] = call i64 @foo(i64 [[TMP12]]) #[[ATTR3]] ; TFNONE-NEXT: br label [[IF_END]] ; TFNONE: if.end: ; TFNONE-NEXT: [[TMP14:%.*]] = phi i64 [ [[TMP13]], [[IF_THEN]] ], [ 0, [[FOR_BODY]] ] ; TFNONE-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i64, ptr [[B]], i64 [[INDVARS_IV]] ; TFNONE-NEXT: store i64 [[TMP14]], ptr [[ARRAYIDX1]], align 8 ; TFNONE-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1 ; TFNONE-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], 1025 ; TFNONE-NEXT: br i1 [[EXITCOND]], label [[FOR_COND_CLEANUP]], label [[FOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] ; TFNONE: for.cond.cleanup: ; TFNONE-NEXT: ret void ; ; TFCOMMON-LABEL: @test_if_then( ; TFCOMMON-NEXT: entry: ; TFCOMMON-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() ; TFCOMMON-NEXT: [[TMP1:%.*]] = mul i64 [[TMP0]], 2 ; TFCOMMON-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() ; TFCOMMON-NEXT: [[TMP3:%.*]] = mul i64 [[TMP2]], 2 ; TFCOMMON-NEXT: [[TMP4:%.*]] = sub i64 [[TMP3]], 1 ; TFCOMMON-NEXT: [[N_RND_UP:%.*]] = add i64 1025, [[TMP4]] ; TFCOMMON-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP1]] ; TFCOMMON-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]] ; TFCOMMON-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64() ; TFCOMMON-NEXT: [[TMP6:%.*]] = mul i64 [[TMP5]], 2 ; TFCOMMON-NEXT: [[ACTIVE_LANE_MASK_ENTRY:%.*]] = call @llvm.get.active.lane.mask.nxv2i1.i64(i64 0, i64 1025) ; TFCOMMON-NEXT: br label [[VECTOR_BODY:%.*]] ; TFCOMMON: vector.body: ; TFCOMMON-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] ; TFCOMMON-NEXT: [[ACTIVE_LANE_MASK:%.*]] = phi [ [[ACTIVE_LANE_MASK_ENTRY]], [[ENTRY]] ], [ [[ACTIVE_LANE_MASK_NEXT:%.*]], [[VECTOR_BODY]] ] ; TFCOMMON-NEXT: [[TMP7:%.*]] = getelementptr inbounds i64, ptr [[A:%.*]], i64 [[INDEX]] ; TFCOMMON-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call @llvm.masked.load.nxv2i64.p0(ptr [[TMP7]], i32 8, [[ACTIVE_LANE_MASK]], poison) ; TFCOMMON-NEXT: [[TMP8:%.*]] = icmp ugt [[WIDE_MASKED_LOAD]], shufflevector ( insertelement ( poison, i64 50, i64 0), poison, zeroinitializer) ; TFCOMMON-NEXT: [[TMP9:%.*]] = select [[ACTIVE_LANE_MASK]], [[TMP8]], zeroinitializer ; TFCOMMON-NEXT: [[TMP10:%.*]] = call @foo_vector( [[WIDE_MASKED_LOAD]], [[TMP9]]) ; TFCOMMON-NEXT: [[TMP11:%.*]] = xor [[TMP8]], shufflevector ( insertelement ( poison, i1 true, i64 0), poison, zeroinitializer) ; TFCOMMON-NEXT: [[TMP12:%.*]] = select [[ACTIVE_LANE_MASK]], [[TMP11]], zeroinitializer ; TFCOMMON-NEXT: [[TMP13:%.*]] = or [[TMP9]], [[TMP12]] ; TFCOMMON-NEXT: [[PREDPHI:%.*]] = select [[TMP12]], zeroinitializer, [[TMP10]] ; TFCOMMON-NEXT: [[TMP14:%.*]] = getelementptr inbounds i64, ptr [[B:%.*]], i64 [[INDEX]] ; TFCOMMON-NEXT: call void @llvm.masked.store.nxv2i64.p0( [[PREDPHI]], ptr [[TMP14]], i32 8, [[TMP13]]) ; TFCOMMON-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], [[TMP6]] ; TFCOMMON-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call @llvm.get.active.lane.mask.nxv2i1.i64(i64 [[INDEX_NEXT]], i64 1025) ; TFCOMMON-NEXT: [[TMP15:%.*]] = xor [[ACTIVE_LANE_MASK_NEXT]], shufflevector ( insertelement ( poison, i1 true, i64 0), poison, zeroinitializer) ; TFCOMMON-NEXT: [[TMP16:%.*]] = extractelement [[TMP15]], i32 0 ; TFCOMMON-NEXT: br i1 [[TMP16]], label [[FOR_COND_CLEANUP:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] ; TFCOMMON: for.cond.cleanup: ; TFCOMMON-NEXT: ret void ; ; TFA_INTERLEAVE-LABEL: @test_if_then( ; TFA_INTERLEAVE-NEXT: entry: ; TFA_INTERLEAVE-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() ; TFA_INTERLEAVE-NEXT: [[TMP1:%.*]] = mul i64 [[TMP0]], 4 ; TFA_INTERLEAVE-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() ; TFA_INTERLEAVE-NEXT: [[TMP3:%.*]] = mul i64 [[TMP2]], 4 ; TFA_INTERLEAVE-NEXT: [[TMP4:%.*]] = sub i64 [[TMP3]], 1 ; TFA_INTERLEAVE-NEXT: [[N_RND_UP:%.*]] = add i64 1025, [[TMP4]] ; TFA_INTERLEAVE-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP1]] ; TFA_INTERLEAVE-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]] ; TFA_INTERLEAVE-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64() ; TFA_INTERLEAVE-NEXT: [[TMP6:%.*]] = mul i64 [[TMP5]], 4 ; TFA_INTERLEAVE-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64() ; TFA_INTERLEAVE-NEXT: [[TMP8:%.*]] = mul i64 [[TMP7]], 2 ; TFA_INTERLEAVE-NEXT: [[ACTIVE_LANE_MASK_ENTRY:%.*]] = call @llvm.get.active.lane.mask.nxv2i1.i64(i64 0, i64 1025) ; TFA_INTERLEAVE-NEXT: [[ACTIVE_LANE_MASK_ENTRY1:%.*]] = call @llvm.get.active.lane.mask.nxv2i1.i64(i64 [[TMP8]], i64 1025) ; TFA_INTERLEAVE-NEXT: br label [[VECTOR_BODY:%.*]] ; TFA_INTERLEAVE: vector.body: ; TFA_INTERLEAVE-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] ; TFA_INTERLEAVE-NEXT: [[ACTIVE_LANE_MASK:%.*]] = phi [ [[ACTIVE_LANE_MASK_ENTRY]], [[ENTRY]] ], [ [[ACTIVE_LANE_MASK_NEXT:%.*]], [[VECTOR_BODY]] ] ; TFA_INTERLEAVE-NEXT: [[ACTIVE_LANE_MASK2:%.*]] = phi [ [[ACTIVE_LANE_MASK_ENTRY1]], [[ENTRY]] ], [ [[ACTIVE_LANE_MASK_NEXT6:%.*]], [[VECTOR_BODY]] ] ; TFA_INTERLEAVE-NEXT: [[TMP9:%.*]] = getelementptr inbounds i64, ptr [[A:%.*]], i64 [[INDEX]] ; TFA_INTERLEAVE-NEXT: [[TMP10:%.*]] = call i64 @llvm.vscale.i64() ; TFA_INTERLEAVE-NEXT: [[TMP11:%.*]] = mul i64 [[TMP10]], 2 ; TFA_INTERLEAVE-NEXT: [[TMP12:%.*]] = getelementptr inbounds i64, ptr [[TMP9]], i64 [[TMP11]] ; TFA_INTERLEAVE-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call @llvm.masked.load.nxv2i64.p0(ptr [[TMP9]], i32 8, [[ACTIVE_LANE_MASK]], poison) ; TFA_INTERLEAVE-NEXT: [[WIDE_MASKED_LOAD3:%.*]] = call @llvm.masked.load.nxv2i64.p0(ptr [[TMP12]], i32 8, [[ACTIVE_LANE_MASK2]], poison) ; TFA_INTERLEAVE-NEXT: [[TMP13:%.*]] = icmp ugt [[WIDE_MASKED_LOAD]], shufflevector ( insertelement ( poison, i64 50, i64 0), poison, zeroinitializer) ; TFA_INTERLEAVE-NEXT: [[TMP14:%.*]] = icmp ugt [[WIDE_MASKED_LOAD3]], shufflevector ( insertelement ( poison, i64 50, i64 0), poison, zeroinitializer) ; TFA_INTERLEAVE-NEXT: [[TMP15:%.*]] = select [[ACTIVE_LANE_MASK]], [[TMP13]], zeroinitializer ; TFA_INTERLEAVE-NEXT: [[TMP16:%.*]] = select [[ACTIVE_LANE_MASK2]], [[TMP14]], zeroinitializer ; TFA_INTERLEAVE-NEXT: [[TMP17:%.*]] = call @foo_vector( [[WIDE_MASKED_LOAD]], [[TMP15]]) ; TFA_INTERLEAVE-NEXT: [[TMP18:%.*]] = call @foo_vector( [[WIDE_MASKED_LOAD3]], [[TMP16]]) ; TFA_INTERLEAVE-NEXT: [[TMP19:%.*]] = xor [[TMP13]], shufflevector ( insertelement ( poison, i1 true, i64 0), poison, zeroinitializer) ; TFA_INTERLEAVE-NEXT: [[TMP20:%.*]] = xor [[TMP14]], shufflevector ( insertelement ( poison, i1 true, i64 0), poison, zeroinitializer) ; TFA_INTERLEAVE-NEXT: [[TMP21:%.*]] = select [[ACTIVE_LANE_MASK]], [[TMP19]], zeroinitializer ; TFA_INTERLEAVE-NEXT: [[TMP22:%.*]] = select [[ACTIVE_LANE_MASK2]], [[TMP20]], zeroinitializer ; TFA_INTERLEAVE-NEXT: [[TMP23:%.*]] = or [[TMP15]], [[TMP21]] ; TFA_INTERLEAVE-NEXT: [[TMP24:%.*]] = or [[TMP16]], [[TMP22]] ; TFA_INTERLEAVE-NEXT: [[PREDPHI:%.*]] = select [[TMP21]], zeroinitializer, [[TMP17]] ; TFA_INTERLEAVE-NEXT: [[PREDPHI4:%.*]] = select [[TMP22]], zeroinitializer, [[TMP18]] ; TFA_INTERLEAVE-NEXT: [[TMP25:%.*]] = getelementptr inbounds i64, ptr [[B:%.*]], i64 [[INDEX]] ; TFA_INTERLEAVE-NEXT: [[TMP26:%.*]] = call i64 @llvm.vscale.i64() ; TFA_INTERLEAVE-NEXT: [[TMP27:%.*]] = mul i64 [[TMP26]], 2 ; TFA_INTERLEAVE-NEXT: [[TMP28:%.*]] = getelementptr inbounds i64, ptr [[TMP25]], i64 [[TMP27]] ; TFA_INTERLEAVE-NEXT: call void @llvm.masked.store.nxv2i64.p0( [[PREDPHI]], ptr [[TMP25]], i32 8, [[TMP23]]) ; TFA_INTERLEAVE-NEXT: call void @llvm.masked.store.nxv2i64.p0( [[PREDPHI4]], ptr [[TMP28]], i32 8, [[TMP24]]) ; TFA_INTERLEAVE-NEXT: [[INDEX_NEXT:%.*]] = add i64 [[INDEX]], [[TMP6]] ; TFA_INTERLEAVE-NEXT: [[TMP29:%.*]] = call i64 @llvm.vscale.i64() ; TFA_INTERLEAVE-NEXT: [[TMP30:%.*]] = mul i64 [[TMP29]], 2 ; TFA_INTERLEAVE-NEXT: [[TMP31:%.*]] = add i64 [[INDEX_NEXT]], [[TMP30]] ; TFA_INTERLEAVE-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call @llvm.get.active.lane.mask.nxv2i1.i64(i64 [[INDEX_NEXT]], i64 1025) ; TFA_INTERLEAVE-NEXT: [[ACTIVE_LANE_MASK_NEXT6]] = call @llvm.get.active.lane.mask.nxv2i1.i64(i64 [[TMP31]], i64 1025) ; TFA_INTERLEAVE-NEXT: [[TMP32:%.*]] = xor [[ACTIVE_LANE_MASK_NEXT]], shufflevector ( insertelement ( poison, i1 true, i64 0), poison, zeroinitializer) ; TFA_INTERLEAVE-NEXT: [[TMP33:%.*]] = extractelement [[TMP32]], i32 0 ; TFA_INTERLEAVE-NEXT: br i1 [[TMP33]], label [[FOR_COND_CLEANUP:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] ; TFA_INTERLEAVE: for.cond.cleanup: ; TFA_INTERLEAVE-NEXT: ret void ; entry: br label %for.body for.body: %indvars.iv = phi i64 [ %indvars.iv.next, %if.end ], [ 0, %entry ] %arrayidx = getelementptr inbounds i64, ptr %a, i64 %indvars.iv %0 = load i64, ptr %arrayidx, align 8 %cmp = icmp ugt i64 %0, 50 br i1 %cmp, label %if.then, label %if.end if.then: %1 = call i64 @foo(i64 %0) #1 br label %if.end if.end: %2 = phi i64 [%1, %if.then], [0, %for.body] %arrayidx1 = getelementptr inbounds i64, ptr %b, i64 %indvars.iv store i64 %2, ptr %arrayidx1, align 8 %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1 %exitcond = icmp eq i64 %indvars.iv.next, 1025 br i1 %exitcond, label %for.cond.cleanup, label %for.body for.cond.cleanup: ret void } ; This checks the ability to handle masking of an if-then-else CFG with ; calls inside the conditional blocks. Although one of the calls has a ; uniform parameter and the metadata lists a uniform variant, right now ; we just see a splat of the parameter instead. More work needed. define void @test_widen_if_then_else(ptr noalias %a, ptr readnone %b) #4 { ; TFNONE-LABEL: @test_widen_if_then_else( ; TFNONE-NEXT: entry: ; TFNONE-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() ; TFNONE-NEXT: [[TMP1:%.*]] = mul i64 [[TMP0]], 2 ; TFNONE-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1025, [[TMP1]] ; TFNONE-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] ; TFNONE: vector.ph: ; TFNONE-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() ; TFNONE-NEXT: [[TMP3:%.*]] = mul i64 [[TMP2]], 2 ; TFNONE-NEXT: [[N_MOD_VF:%.*]] = urem i64 1025, [[TMP3]] ; TFNONE-NEXT: [[N_VEC:%.*]] = sub i64 1025, [[N_MOD_VF]] ; TFNONE-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() ; TFNONE-NEXT: [[TMP5:%.*]] = mul i64 [[TMP4]], 2 ; TFNONE-NEXT: br label [[VECTOR_BODY:%.*]] ; TFNONE: vector.body: ; TFNONE-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] ; TFNONE-NEXT: [[TMP6:%.*]] = getelementptr inbounds i64, ptr [[A:%.*]], i64 [[INDEX]] ; TFNONE-NEXT: [[WIDE_LOAD:%.*]] = load , ptr [[TMP6]], align 8 ; TFNONE-NEXT: [[TMP7:%.*]] = icmp ugt [[WIDE_LOAD]], shufflevector ( insertelement ( poison, i64 50, i64 0), poison, zeroinitializer) ; TFNONE-NEXT: [[TMP8:%.*]] = xor [[TMP7]], shufflevector ( insertelement ( poison, i1 true, i64 0), poison, zeroinitializer) ; TFNONE-NEXT: [[TMP9:%.*]] = call @foo_vector( zeroinitializer, [[TMP8]]) ; TFNONE-NEXT: [[TMP10:%.*]] = call @foo_vector( [[WIDE_LOAD]], [[TMP7]]) ; TFNONE-NEXT: [[PREDPHI:%.*]] = select [[TMP8]], [[TMP9]], [[TMP10]] ; TFNONE-NEXT: [[TMP11:%.*]] = getelementptr inbounds i64, ptr [[B:%.*]], i64 [[INDEX]] ; TFNONE-NEXT: store [[PREDPHI]], ptr [[TMP11]], align 8 ; TFNONE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]] ; TFNONE-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; TFNONE-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] ; TFNONE: middle.block: ; TFNONE-NEXT: br i1 false, label [[FOR_COND_CLEANUP:%.*]], label [[SCALAR_PH]] ; TFNONE: scalar.ph: ; TFNONE-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ] ; TFNONE-NEXT: br label [[FOR_BODY:%.*]] ; TFNONE: for.body: ; TFNONE-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[INDVARS_IV_NEXT:%.*]], [[IF_END:%.*]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ] ; TFNONE-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[INDVARS_IV]] ; TFNONE-NEXT: [[TMP13:%.*]] = load i64, ptr [[ARRAYIDX]], align 8 ; TFNONE-NEXT: [[CMP:%.*]] = icmp ugt i64 [[TMP13]], 50 ; TFNONE-NEXT: br i1 [[CMP]], label [[IF_THEN:%.*]], label [[IF_ELSE:%.*]] ; TFNONE: if.then: ; TFNONE-NEXT: [[TMP14:%.*]] = call i64 @foo(i64 [[TMP13]]) #[[ATTR4:[0-9]+]] ; TFNONE-NEXT: br label [[IF_END]] ; TFNONE: if.else: ; TFNONE-NEXT: [[TMP15:%.*]] = call i64 @foo(i64 0) #[[ATTR4]] ; TFNONE-NEXT: br label [[IF_END]] ; TFNONE: if.end: ; TFNONE-NEXT: [[TMP16:%.*]] = phi i64 [ [[TMP14]], [[IF_THEN]] ], [ [[TMP15]], [[IF_ELSE]] ] ; TFNONE-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i64, ptr [[B]], i64 [[INDVARS_IV]] ; TFNONE-NEXT: store i64 [[TMP16]], ptr [[ARRAYIDX1]], align 8 ; TFNONE-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1 ; TFNONE-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], 1025 ; TFNONE-NEXT: br i1 [[EXITCOND]], label [[FOR_COND_CLEANUP]], label [[FOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]] ; TFNONE: for.cond.cleanup: ; TFNONE-NEXT: ret void ; ; TFCOMMON-LABEL: @test_widen_if_then_else( ; TFCOMMON-NEXT: entry: ; TFCOMMON-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() ; TFCOMMON-NEXT: [[TMP1:%.*]] = mul i64 [[TMP0]], 2 ; TFCOMMON-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() ; TFCOMMON-NEXT: [[TMP3:%.*]] = mul i64 [[TMP2]], 2 ; TFCOMMON-NEXT: [[TMP4:%.*]] = sub i64 [[TMP3]], 1 ; TFCOMMON-NEXT: [[N_RND_UP:%.*]] = add i64 1025, [[TMP4]] ; TFCOMMON-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP1]] ; TFCOMMON-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]] ; TFCOMMON-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64() ; TFCOMMON-NEXT: [[TMP6:%.*]] = mul i64 [[TMP5]], 2 ; TFCOMMON-NEXT: [[ACTIVE_LANE_MASK_ENTRY:%.*]] = call @llvm.get.active.lane.mask.nxv2i1.i64(i64 0, i64 1025) ; TFCOMMON-NEXT: br label [[VECTOR_BODY:%.*]] ; TFCOMMON: vector.body: ; TFCOMMON-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] ; TFCOMMON-NEXT: [[ACTIVE_LANE_MASK:%.*]] = phi [ [[ACTIVE_LANE_MASK_ENTRY]], [[ENTRY]] ], [ [[ACTIVE_LANE_MASK_NEXT:%.*]], [[VECTOR_BODY]] ] ; TFCOMMON-NEXT: [[TMP7:%.*]] = getelementptr inbounds i64, ptr [[A:%.*]], i64 [[INDEX]] ; TFCOMMON-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call @llvm.masked.load.nxv2i64.p0(ptr [[TMP7]], i32 8, [[ACTIVE_LANE_MASK]], poison) ; TFCOMMON-NEXT: [[TMP8:%.*]] = icmp ugt [[WIDE_MASKED_LOAD]], shufflevector ( insertelement ( poison, i64 50, i64 0), poison, zeroinitializer) ; TFCOMMON-NEXT: [[TMP9:%.*]] = xor [[TMP8]], shufflevector ( insertelement ( poison, i1 true, i64 0), poison, zeroinitializer) ; TFCOMMON-NEXT: [[TMP10:%.*]] = select [[ACTIVE_LANE_MASK]], [[TMP9]], zeroinitializer ; TFCOMMON-NEXT: [[TMP11:%.*]] = call @foo_vector( zeroinitializer, [[TMP10]]) ; TFCOMMON-NEXT: [[TMP12:%.*]] = select [[ACTIVE_LANE_MASK]], [[TMP8]], zeroinitializer ; TFCOMMON-NEXT: [[TMP13:%.*]] = call @foo_vector( [[WIDE_MASKED_LOAD]], [[TMP12]]) ; TFCOMMON-NEXT: [[TMP14:%.*]] = or [[TMP10]], [[TMP12]] ; TFCOMMON-NEXT: [[PREDPHI:%.*]] = select [[TMP10]], [[TMP11]], [[TMP13]] ; TFCOMMON-NEXT: [[TMP15:%.*]] = getelementptr inbounds i64, ptr [[B:%.*]], i64 [[INDEX]] ; TFCOMMON-NEXT: call void @llvm.masked.store.nxv2i64.p0( [[PREDPHI]], ptr [[TMP15]], i32 8, [[TMP14]]) ; TFCOMMON-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], [[TMP6]] ; TFCOMMON-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call @llvm.get.active.lane.mask.nxv2i1.i64(i64 [[INDEX_NEXT]], i64 1025) ; TFCOMMON-NEXT: [[TMP16:%.*]] = xor [[ACTIVE_LANE_MASK_NEXT]], shufflevector ( insertelement ( poison, i1 true, i64 0), poison, zeroinitializer) ; TFCOMMON-NEXT: [[TMP17:%.*]] = extractelement [[TMP16]], i32 0 ; TFCOMMON-NEXT: br i1 [[TMP17]], label [[FOR_COND_CLEANUP:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] ; TFCOMMON: for.cond.cleanup: ; TFCOMMON-NEXT: ret void ; ; TFA_INTERLEAVE-LABEL: @test_widen_if_then_else( ; TFA_INTERLEAVE-NEXT: entry: ; TFA_INTERLEAVE-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() ; TFA_INTERLEAVE-NEXT: [[TMP1:%.*]] = mul i64 [[TMP0]], 4 ; TFA_INTERLEAVE-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() ; TFA_INTERLEAVE-NEXT: [[TMP3:%.*]] = mul i64 [[TMP2]], 4 ; TFA_INTERLEAVE-NEXT: [[TMP4:%.*]] = sub i64 [[TMP3]], 1 ; TFA_INTERLEAVE-NEXT: [[N_RND_UP:%.*]] = add i64 1025, [[TMP4]] ; TFA_INTERLEAVE-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP1]] ; TFA_INTERLEAVE-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]] ; TFA_INTERLEAVE-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64() ; TFA_INTERLEAVE-NEXT: [[TMP6:%.*]] = mul i64 [[TMP5]], 4 ; TFA_INTERLEAVE-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64() ; TFA_INTERLEAVE-NEXT: [[TMP8:%.*]] = mul i64 [[TMP7]], 2 ; TFA_INTERLEAVE-NEXT: [[ACTIVE_LANE_MASK_ENTRY:%.*]] = call @llvm.get.active.lane.mask.nxv2i1.i64(i64 0, i64 1025) ; TFA_INTERLEAVE-NEXT: [[ACTIVE_LANE_MASK_ENTRY1:%.*]] = call @llvm.get.active.lane.mask.nxv2i1.i64(i64 [[TMP8]], i64 1025) ; TFA_INTERLEAVE-NEXT: br label [[VECTOR_BODY:%.*]] ; TFA_INTERLEAVE: vector.body: ; TFA_INTERLEAVE-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] ; TFA_INTERLEAVE-NEXT: [[ACTIVE_LANE_MASK:%.*]] = phi [ [[ACTIVE_LANE_MASK_ENTRY]], [[ENTRY]] ], [ [[ACTIVE_LANE_MASK_NEXT:%.*]], [[VECTOR_BODY]] ] ; TFA_INTERLEAVE-NEXT: [[ACTIVE_LANE_MASK2:%.*]] = phi [ [[ACTIVE_LANE_MASK_ENTRY1]], [[ENTRY]] ], [ [[ACTIVE_LANE_MASK_NEXT6:%.*]], [[VECTOR_BODY]] ] ; TFA_INTERLEAVE-NEXT: [[TMP9:%.*]] = getelementptr inbounds i64, ptr [[A:%.*]], i64 [[INDEX]] ; TFA_INTERLEAVE-NEXT: [[TMP10:%.*]] = call i64 @llvm.vscale.i64() ; TFA_INTERLEAVE-NEXT: [[TMP11:%.*]] = mul i64 [[TMP10]], 2 ; TFA_INTERLEAVE-NEXT: [[TMP12:%.*]] = getelementptr inbounds i64, ptr [[TMP9]], i64 [[TMP11]] ; TFA_INTERLEAVE-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call @llvm.masked.load.nxv2i64.p0(ptr [[TMP9]], i32 8, [[ACTIVE_LANE_MASK]], poison) ; TFA_INTERLEAVE-NEXT: [[WIDE_MASKED_LOAD3:%.*]] = call @llvm.masked.load.nxv2i64.p0(ptr [[TMP12]], i32 8, [[ACTIVE_LANE_MASK2]], poison) ; TFA_INTERLEAVE-NEXT: [[TMP13:%.*]] = icmp ugt [[WIDE_MASKED_LOAD]], shufflevector ( insertelement ( poison, i64 50, i64 0), poison, zeroinitializer) ; TFA_INTERLEAVE-NEXT: [[TMP14:%.*]] = icmp ugt [[WIDE_MASKED_LOAD3]], shufflevector ( insertelement ( poison, i64 50, i64 0), poison, zeroinitializer) ; TFA_INTERLEAVE-NEXT: [[TMP15:%.*]] = xor [[TMP13]], shufflevector ( insertelement ( poison, i1 true, i64 0), poison, zeroinitializer) ; TFA_INTERLEAVE-NEXT: [[TMP16:%.*]] = xor [[TMP14]], shufflevector ( insertelement ( poison, i1 true, i64 0), poison, zeroinitializer) ; TFA_INTERLEAVE-NEXT: [[TMP17:%.*]] = select [[ACTIVE_LANE_MASK]], [[TMP15]], zeroinitializer ; TFA_INTERLEAVE-NEXT: [[TMP18:%.*]] = select [[ACTIVE_LANE_MASK2]], [[TMP16]], zeroinitializer ; TFA_INTERLEAVE-NEXT: [[TMP19:%.*]] = call @foo_vector( zeroinitializer, [[TMP17]]) ; TFA_INTERLEAVE-NEXT: [[TMP20:%.*]] = call @foo_vector( zeroinitializer, [[TMP18]]) ; TFA_INTERLEAVE-NEXT: [[TMP21:%.*]] = select [[ACTIVE_LANE_MASK]], [[TMP13]], zeroinitializer ; TFA_INTERLEAVE-NEXT: [[TMP22:%.*]] = select [[ACTIVE_LANE_MASK2]], [[TMP14]], zeroinitializer ; TFA_INTERLEAVE-NEXT: [[TMP23:%.*]] = call @foo_vector( [[WIDE_MASKED_LOAD]], [[TMP21]]) ; TFA_INTERLEAVE-NEXT: [[TMP24:%.*]] = call @foo_vector( [[WIDE_MASKED_LOAD3]], [[TMP22]]) ; TFA_INTERLEAVE-NEXT: [[TMP25:%.*]] = or [[TMP17]], [[TMP21]] ; TFA_INTERLEAVE-NEXT: [[TMP26:%.*]] = or [[TMP18]], [[TMP22]] ; TFA_INTERLEAVE-NEXT: [[PREDPHI:%.*]] = select [[TMP17]], [[TMP19]], [[TMP23]] ; TFA_INTERLEAVE-NEXT: [[PREDPHI4:%.*]] = select [[TMP18]], [[TMP20]], [[TMP24]] ; TFA_INTERLEAVE-NEXT: [[TMP27:%.*]] = getelementptr inbounds i64, ptr [[B:%.*]], i64 [[INDEX]] ; TFA_INTERLEAVE-NEXT: [[TMP28:%.*]] = call i64 @llvm.vscale.i64() ; TFA_INTERLEAVE-NEXT: [[TMP29:%.*]] = mul i64 [[TMP28]], 2 ; TFA_INTERLEAVE-NEXT: [[TMP30:%.*]] = getelementptr inbounds i64, ptr [[TMP27]], i64 [[TMP29]] ; TFA_INTERLEAVE-NEXT: call void @llvm.masked.store.nxv2i64.p0( [[PREDPHI]], ptr [[TMP27]], i32 8, [[TMP25]]) ; TFA_INTERLEAVE-NEXT: call void @llvm.masked.store.nxv2i64.p0( [[PREDPHI4]], ptr [[TMP30]], i32 8, [[TMP26]]) ; TFA_INTERLEAVE-NEXT: [[INDEX_NEXT:%.*]] = add i64 [[INDEX]], [[TMP6]] ; TFA_INTERLEAVE-NEXT: [[TMP31:%.*]] = call i64 @llvm.vscale.i64() ; TFA_INTERLEAVE-NEXT: [[TMP32:%.*]] = mul i64 [[TMP31]], 2 ; TFA_INTERLEAVE-NEXT: [[TMP33:%.*]] = add i64 [[INDEX_NEXT]], [[TMP32]] ; TFA_INTERLEAVE-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call @llvm.get.active.lane.mask.nxv2i1.i64(i64 [[INDEX_NEXT]], i64 1025) ; TFA_INTERLEAVE-NEXT: [[ACTIVE_LANE_MASK_NEXT6]] = call @llvm.get.active.lane.mask.nxv2i1.i64(i64 [[TMP33]], i64 1025) ; TFA_INTERLEAVE-NEXT: [[TMP34:%.*]] = xor [[ACTIVE_LANE_MASK_NEXT]], shufflevector ( insertelement ( poison, i1 true, i64 0), poison, zeroinitializer) ; TFA_INTERLEAVE-NEXT: [[TMP35:%.*]] = extractelement [[TMP34]], i32 0 ; TFA_INTERLEAVE-NEXT: br i1 [[TMP35]], label [[FOR_COND_CLEANUP:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] ; TFA_INTERLEAVE: for.cond.cleanup: ; TFA_INTERLEAVE-NEXT: ret void ; entry: br label %for.body for.body: %indvars.iv = phi i64 [ %indvars.iv.next, %if.end ], [ 0, %entry ] %arrayidx = getelementptr inbounds i64, ptr %a, i64 %indvars.iv %0 = load i64, ptr %arrayidx, align 8 %cmp = icmp ugt i64 %0, 50 br i1 %cmp, label %if.then, label %if.else if.then: %1 = call i64 @foo(i64 %0) #0 br label %if.end if.else: %2 = call i64 @foo(i64 0) #0 br label %if.end if.end: %3 = phi i64 [%1, %if.then], [%2, %if.else] %arrayidx1 = getelementptr inbounds i64, ptr %b, i64 %indvars.iv store i64 %3, ptr %arrayidx1, align 8 %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1 %exitcond = icmp eq i64 %indvars.iv.next, 1025 br i1 %exitcond, label %for.cond.cleanup, label %for.body for.cond.cleanup: ret void } ; A call whose argument must be widened, where the vector variant does not have ; a mask. Forcing tail folding results in no vectorized call, whereas an ; unpredicated body with scalar tail can use the unmasked variant. define void @test_widen_nomask(ptr noalias %a, ptr readnone %b) #4 { ; TFNONE-LABEL: @test_widen_nomask( ; TFNONE-NEXT: entry: ; TFNONE-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() ; TFNONE-NEXT: [[TMP1:%.*]] = mul i64 [[TMP0]], 2 ; TFNONE-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1025, [[TMP1]] ; TFNONE-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] ; TFNONE: vector.ph: ; TFNONE-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() ; TFNONE-NEXT: [[TMP3:%.*]] = mul i64 [[TMP2]], 2 ; TFNONE-NEXT: [[N_MOD_VF:%.*]] = urem i64 1025, [[TMP3]] ; TFNONE-NEXT: [[N_VEC:%.*]] = sub i64 1025, [[N_MOD_VF]] ; TFNONE-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() ; TFNONE-NEXT: [[TMP5:%.*]] = mul i64 [[TMP4]], 2 ; TFNONE-NEXT: br label [[VECTOR_BODY:%.*]] ; TFNONE: vector.body: ; TFNONE-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] ; TFNONE-NEXT: [[TMP6:%.*]] = getelementptr i64, ptr [[B:%.*]], i64 [[INDEX]] ; TFNONE-NEXT: [[WIDE_LOAD:%.*]] = load , ptr [[TMP6]], align 8 ; TFNONE-NEXT: [[TMP7:%.*]] = call @foo_vector_nomask( [[WIDE_LOAD]]) ; TFNONE-NEXT: [[TMP8:%.*]] = getelementptr inbounds i64, ptr [[A:%.*]], i64 [[INDEX]] ; TFNONE-NEXT: store [[TMP7]], ptr [[TMP8]], align 8 ; TFNONE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]] ; TFNONE-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; TFNONE-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]] ; TFNONE: middle.block: ; TFNONE-NEXT: br i1 false, label [[FOR_COND_CLEANUP:%.*]], label [[SCALAR_PH]] ; TFNONE: scalar.ph: ; TFNONE-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ] ; TFNONE-NEXT: br label [[FOR_BODY:%.*]] ; TFNONE: for.body: ; TFNONE-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ] ; TFNONE-NEXT: [[GEP:%.*]] = getelementptr i64, ptr [[B]], i64 [[INDVARS_IV]] ; TFNONE-NEXT: [[LOAD:%.*]] = load i64, ptr [[GEP]], align 8 ; TFNONE-NEXT: [[CALL:%.*]] = call i64 @foo(i64 [[LOAD]]) #[[ATTR5:[0-9]+]] ; TFNONE-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[INDVARS_IV]] ; TFNONE-NEXT: store i64 [[CALL]], ptr [[ARRAYIDX]], align 8 ; TFNONE-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1 ; TFNONE-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], 1025 ; TFNONE-NEXT: br i1 [[EXITCOND]], label [[FOR_COND_CLEANUP]], label [[FOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]] ; TFNONE: for.cond.cleanup: ; TFNONE-NEXT: ret void ; ; TFALWAYS-LABEL: @test_widen_nomask( ; TFALWAYS-NEXT: entry: ; TFALWAYS-NEXT: br label [[FOR_BODY:%.*]] ; TFALWAYS: for.body: ; TFALWAYS-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ] ; TFALWAYS-NEXT: [[GEP:%.*]] = getelementptr i64, ptr [[B:%.*]], i64 [[INDVARS_IV]] ; TFALWAYS-NEXT: [[LOAD:%.*]] = load i64, ptr [[GEP]], align 8 ; TFALWAYS-NEXT: [[CALL:%.*]] = call i64 @foo(i64 [[LOAD]]) #[[ATTR5:[0-9]+]] ; TFALWAYS-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A:%.*]], i64 [[INDVARS_IV]] ; TFALWAYS-NEXT: store i64 [[CALL]], ptr [[ARRAYIDX]], align 8 ; TFALWAYS-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1 ; TFALWAYS-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], 1025 ; TFALWAYS-NEXT: br i1 [[EXITCOND]], label [[FOR_COND_CLEANUP:%.*]], label [[FOR_BODY]] ; TFALWAYS: for.cond.cleanup: ; TFALWAYS-NEXT: ret void ; ; TFFALLBACK-LABEL: @test_widen_nomask( ; TFFALLBACK-NEXT: entry: ; TFFALLBACK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() ; TFFALLBACK-NEXT: [[TMP1:%.*]] = mul i64 [[TMP0]], 2 ; TFFALLBACK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1025, [[TMP1]] ; TFFALLBACK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] ; TFFALLBACK: vector.ph: ; TFFALLBACK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() ; TFFALLBACK-NEXT: [[TMP3:%.*]] = mul i64 [[TMP2]], 2 ; TFFALLBACK-NEXT: [[N_MOD_VF:%.*]] = urem i64 1025, [[TMP3]] ; TFFALLBACK-NEXT: [[N_VEC:%.*]] = sub i64 1025, [[N_MOD_VF]] ; TFFALLBACK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() ; TFFALLBACK-NEXT: [[TMP5:%.*]] = mul i64 [[TMP4]], 2 ; TFFALLBACK-NEXT: br label [[VECTOR_BODY:%.*]] ; TFFALLBACK: vector.body: ; TFFALLBACK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] ; TFFALLBACK-NEXT: [[TMP6:%.*]] = getelementptr i64, ptr [[B:%.*]], i64 [[INDEX]] ; TFFALLBACK-NEXT: [[WIDE_LOAD:%.*]] = load , ptr [[TMP6]], align 8 ; TFFALLBACK-NEXT: [[TMP7:%.*]] = call @foo_vector_nomask( [[WIDE_LOAD]]) ; TFFALLBACK-NEXT: [[TMP8:%.*]] = getelementptr inbounds i64, ptr [[A:%.*]], i64 [[INDEX]] ; TFFALLBACK-NEXT: store [[TMP7]], ptr [[TMP8]], align 8 ; TFFALLBACK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]] ; TFFALLBACK-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; TFFALLBACK-NEXT: br i1 [[TMP9]], label [[SCALAR_PH]], label [[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] ; TFFALLBACK: scalar.ph: ; TFFALLBACK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[N_VEC]], [[VECTOR_BODY]] ] ; TFFALLBACK-NEXT: br label [[FOR_BODY:%.*]] ; TFFALLBACK: for.body: ; TFFALLBACK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ] ; TFFALLBACK-NEXT: [[GEP:%.*]] = getelementptr i64, ptr [[B]], i64 [[INDVARS_IV]] ; TFFALLBACK-NEXT: [[LOAD:%.*]] = load i64, ptr [[GEP]], align 8 ; TFFALLBACK-NEXT: [[CALL:%.*]] = call i64 @foo(i64 [[LOAD]]) #[[ATTR5:[0-9]+]] ; TFFALLBACK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[INDVARS_IV]] ; TFFALLBACK-NEXT: store i64 [[CALL]], ptr [[ARRAYIDX]], align 8 ; TFFALLBACK-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1 ; TFFALLBACK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], 1025 ; TFFALLBACK-NEXT: br i1 [[EXITCOND]], label [[FOR_COND_CLEANUP:%.*]], label [[FOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] ; TFFALLBACK: for.cond.cleanup: ; TFFALLBACK-NEXT: ret void ; ; TFA_INTERLEAVE-LABEL: @test_widen_nomask( ; TFA_INTERLEAVE-NEXT: entry: ; TFA_INTERLEAVE-NEXT: br label [[FOR_BODY:%.*]] ; TFA_INTERLEAVE: for.body: ; TFA_INTERLEAVE-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ] ; TFA_INTERLEAVE-NEXT: [[GEP:%.*]] = getelementptr i64, ptr [[B:%.*]], i64 [[INDVARS_IV]] ; TFA_INTERLEAVE-NEXT: [[LOAD:%.*]] = load i64, ptr [[GEP]], align 8 ; TFA_INTERLEAVE-NEXT: [[CALL:%.*]] = call i64 @foo(i64 [[LOAD]]) #[[ATTR5:[0-9]+]] ; TFA_INTERLEAVE-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A:%.*]], i64 [[INDVARS_IV]] ; TFA_INTERLEAVE-NEXT: store i64 [[CALL]], ptr [[ARRAYIDX]], align 8 ; TFA_INTERLEAVE-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1 ; TFA_INTERLEAVE-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], 1025 ; TFA_INTERLEAVE-NEXT: br i1 [[EXITCOND]], label [[FOR_COND_CLEANUP:%.*]], label [[FOR_BODY]] ; TFA_INTERLEAVE: for.cond.cleanup: ; TFA_INTERLEAVE-NEXT: ret void ; entry: br label %for.body for.body: %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ] %gep = getelementptr i64, ptr %b, i64 %indvars.iv %load = load i64, ptr %gep %call = call i64 @foo(i64 %load) #2 %arrayidx = getelementptr inbounds i64, ptr %a, i64 %indvars.iv store i64 %call, ptr %arrayidx %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1 %exitcond = icmp eq i64 %indvars.iv.next, 1025 br i1 %exitcond, label %for.cond.cleanup, label %for.body for.cond.cleanup: ret void } ; If both masked and unmasked options are present, we expect to see tail folding ; use the masked version and unpredicated body with scalar tail use the unmasked ; version. define void @test_widen_optmask(ptr noalias %a, ptr readnone %b) #4 { ; TFNONE-LABEL: @test_widen_optmask( ; TFNONE-NEXT: entry: ; TFNONE-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() ; TFNONE-NEXT: [[TMP1:%.*]] = mul i64 [[TMP0]], 2 ; TFNONE-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1025, [[TMP1]] ; TFNONE-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] ; TFNONE: vector.ph: ; TFNONE-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() ; TFNONE-NEXT: [[TMP3:%.*]] = mul i64 [[TMP2]], 2 ; TFNONE-NEXT: [[N_MOD_VF:%.*]] = urem i64 1025, [[TMP3]] ; TFNONE-NEXT: [[N_VEC:%.*]] = sub i64 1025, [[N_MOD_VF]] ; TFNONE-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() ; TFNONE-NEXT: [[TMP5:%.*]] = mul i64 [[TMP4]], 2 ; TFNONE-NEXT: br label [[VECTOR_BODY:%.*]] ; TFNONE: vector.body: ; TFNONE-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] ; TFNONE-NEXT: [[TMP6:%.*]] = getelementptr i64, ptr [[B:%.*]], i64 [[INDEX]] ; TFNONE-NEXT: [[WIDE_LOAD:%.*]] = load , ptr [[TMP6]], align 8 ; TFNONE-NEXT: [[TMP7:%.*]] = call @foo_vector_nomask( [[WIDE_LOAD]]) ; TFNONE-NEXT: [[TMP8:%.*]] = getelementptr inbounds i64, ptr [[A:%.*]], i64 [[INDEX]] ; TFNONE-NEXT: store [[TMP7]], ptr [[TMP8]], align 8 ; TFNONE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]] ; TFNONE-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; TFNONE-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]] ; TFNONE: middle.block: ; TFNONE-NEXT: br i1 false, label [[FOR_COND_CLEANUP:%.*]], label [[SCALAR_PH]] ; TFNONE: scalar.ph: ; TFNONE-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ] ; TFNONE-NEXT: br label [[FOR_BODY:%.*]] ; TFNONE: for.body: ; TFNONE-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ] ; TFNONE-NEXT: [[GEP:%.*]] = getelementptr i64, ptr [[B]], i64 [[INDVARS_IV]] ; TFNONE-NEXT: [[LOAD:%.*]] = load i64, ptr [[GEP]], align 8 ; TFNONE-NEXT: [[CALL:%.*]] = call i64 @foo(i64 [[LOAD]]) #[[ATTR6:[0-9]+]] ; TFNONE-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[INDVARS_IV]] ; TFNONE-NEXT: store i64 [[CALL]], ptr [[ARRAYIDX]], align 8 ; TFNONE-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1 ; TFNONE-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], 1025 ; TFNONE-NEXT: br i1 [[EXITCOND]], label [[FOR_COND_CLEANUP]], label [[FOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]] ; TFNONE: for.cond.cleanup: ; TFNONE-NEXT: ret void ; ; TFALWAYS-LABEL: @test_widen_optmask( ; TFALWAYS-NEXT: entry: ; TFALWAYS-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() ; TFALWAYS-NEXT: [[TMP1:%.*]] = mul i64 [[TMP0]], 2 ; TFALWAYS-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() ; TFALWAYS-NEXT: [[TMP3:%.*]] = mul i64 [[TMP2]], 2 ; TFALWAYS-NEXT: [[TMP4:%.*]] = sub i64 [[TMP3]], 1 ; TFALWAYS-NEXT: [[N_RND_UP:%.*]] = add i64 1025, [[TMP4]] ; TFALWAYS-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP1]] ; TFALWAYS-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]] ; TFALWAYS-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64() ; TFALWAYS-NEXT: [[TMP6:%.*]] = mul i64 [[TMP5]], 2 ; TFALWAYS-NEXT: [[ACTIVE_LANE_MASK_ENTRY:%.*]] = call @llvm.get.active.lane.mask.nxv2i1.i64(i64 0, i64 1025) ; TFALWAYS-NEXT: br label [[VECTOR_BODY:%.*]] ; TFALWAYS: vector.body: ; TFALWAYS-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] ; TFALWAYS-NEXT: [[ACTIVE_LANE_MASK:%.*]] = phi [ [[ACTIVE_LANE_MASK_ENTRY]], [[ENTRY]] ], [ [[ACTIVE_LANE_MASK_NEXT:%.*]], [[VECTOR_BODY]] ] ; TFALWAYS-NEXT: [[TMP7:%.*]] = getelementptr i64, ptr [[B:%.*]], i64 [[INDEX]] ; TFALWAYS-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call @llvm.masked.load.nxv2i64.p0(ptr [[TMP7]], i32 8, [[ACTIVE_LANE_MASK]], poison) ; TFALWAYS-NEXT: [[TMP8:%.*]] = call @foo_vector( [[WIDE_MASKED_LOAD]], [[ACTIVE_LANE_MASK]]) ; TFALWAYS-NEXT: [[TMP9:%.*]] = getelementptr inbounds i64, ptr [[A:%.*]], i64 [[INDEX]] ; TFALWAYS-NEXT: call void @llvm.masked.store.nxv2i64.p0( [[TMP8]], ptr [[TMP9]], i32 8, [[ACTIVE_LANE_MASK]]) ; TFALWAYS-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], [[TMP6]] ; TFALWAYS-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call @llvm.get.active.lane.mask.nxv2i1.i64(i64 [[INDEX_NEXT]], i64 1025) ; TFALWAYS-NEXT: [[TMP10:%.*]] = xor [[ACTIVE_LANE_MASK_NEXT]], shufflevector ( insertelement ( poison, i1 true, i64 0), poison, zeroinitializer) ; TFALWAYS-NEXT: [[TMP11:%.*]] = extractelement [[TMP10]], i32 0 ; TFALWAYS-NEXT: br i1 [[TMP11]], label [[FOR_COND_CLEANUP:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] ; TFALWAYS: for.cond.cleanup: ; TFALWAYS-NEXT: ret void ; ; TFFALLBACK-LABEL: @test_widen_optmask( ; TFFALLBACK-NEXT: entry: ; TFFALLBACK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() ; TFFALLBACK-NEXT: [[TMP1:%.*]] = mul i64 [[TMP0]], 2 ; TFFALLBACK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() ; TFFALLBACK-NEXT: [[TMP3:%.*]] = mul i64 [[TMP2]], 2 ; TFFALLBACK-NEXT: [[TMP4:%.*]] = sub i64 [[TMP3]], 1 ; TFFALLBACK-NEXT: [[N_RND_UP:%.*]] = add i64 1025, [[TMP4]] ; TFFALLBACK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP1]] ; TFFALLBACK-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]] ; TFFALLBACK-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64() ; TFFALLBACK-NEXT: [[TMP6:%.*]] = mul i64 [[TMP5]], 2 ; TFFALLBACK-NEXT: [[ACTIVE_LANE_MASK_ENTRY:%.*]] = call @llvm.get.active.lane.mask.nxv2i1.i64(i64 0, i64 1025) ; TFFALLBACK-NEXT: br label [[VECTOR_BODY:%.*]] ; TFFALLBACK: vector.body: ; TFFALLBACK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] ; TFFALLBACK-NEXT: [[ACTIVE_LANE_MASK:%.*]] = phi [ [[ACTIVE_LANE_MASK_ENTRY]], [[ENTRY]] ], [ [[ACTIVE_LANE_MASK_NEXT:%.*]], [[VECTOR_BODY]] ] ; TFFALLBACK-NEXT: [[TMP7:%.*]] = getelementptr i64, ptr [[B:%.*]], i64 [[INDEX]] ; TFFALLBACK-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call @llvm.masked.load.nxv2i64.p0(ptr [[TMP7]], i32 8, [[ACTIVE_LANE_MASK]], poison) ; TFFALLBACK-NEXT: [[TMP8:%.*]] = call @foo_vector( [[WIDE_MASKED_LOAD]], [[ACTIVE_LANE_MASK]]) ; TFFALLBACK-NEXT: [[TMP9:%.*]] = getelementptr inbounds i64, ptr [[A:%.*]], i64 [[INDEX]] ; TFFALLBACK-NEXT: call void @llvm.masked.store.nxv2i64.p0( [[TMP8]], ptr [[TMP9]], i32 8, [[ACTIVE_LANE_MASK]]) ; TFFALLBACK-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], [[TMP6]] ; TFFALLBACK-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call @llvm.get.active.lane.mask.nxv2i1.i64(i64 [[INDEX_NEXT]], i64 1025) ; TFFALLBACK-NEXT: [[TMP10:%.*]] = xor [[ACTIVE_LANE_MASK_NEXT]], shufflevector ( insertelement ( poison, i1 true, i64 0), poison, zeroinitializer) ; TFFALLBACK-NEXT: [[TMP11:%.*]] = extractelement [[TMP10]], i32 0 ; TFFALLBACK-NEXT: br i1 [[TMP11]], label [[FOR_COND_CLEANUP:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]] ; TFFALLBACK: for.cond.cleanup: ; TFFALLBACK-NEXT: ret void ; ; TFA_INTERLEAVE-LABEL: @test_widen_optmask( ; TFA_INTERLEAVE-NEXT: entry: ; TFA_INTERLEAVE-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() ; TFA_INTERLEAVE-NEXT: [[TMP1:%.*]] = mul i64 [[TMP0]], 4 ; TFA_INTERLEAVE-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() ; TFA_INTERLEAVE-NEXT: [[TMP3:%.*]] = mul i64 [[TMP2]], 4 ; TFA_INTERLEAVE-NEXT: [[TMP4:%.*]] = sub i64 [[TMP3]], 1 ; TFA_INTERLEAVE-NEXT: [[N_RND_UP:%.*]] = add i64 1025, [[TMP4]] ; TFA_INTERLEAVE-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP1]] ; TFA_INTERLEAVE-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]] ; TFA_INTERLEAVE-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64() ; TFA_INTERLEAVE-NEXT: [[TMP6:%.*]] = mul i64 [[TMP5]], 4 ; TFA_INTERLEAVE-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64() ; TFA_INTERLEAVE-NEXT: [[TMP8:%.*]] = mul i64 [[TMP7]], 2 ; TFA_INTERLEAVE-NEXT: [[ACTIVE_LANE_MASK_ENTRY:%.*]] = call @llvm.get.active.lane.mask.nxv2i1.i64(i64 0, i64 1025) ; TFA_INTERLEAVE-NEXT: [[ACTIVE_LANE_MASK_ENTRY1:%.*]] = call @llvm.get.active.lane.mask.nxv2i1.i64(i64 [[TMP8]], i64 1025) ; TFA_INTERLEAVE-NEXT: br label [[VECTOR_BODY:%.*]] ; TFA_INTERLEAVE: vector.body: ; TFA_INTERLEAVE-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] ; TFA_INTERLEAVE-NEXT: [[ACTIVE_LANE_MASK:%.*]] = phi [ [[ACTIVE_LANE_MASK_ENTRY]], [[ENTRY]] ], [ [[ACTIVE_LANE_MASK_NEXT:%.*]], [[VECTOR_BODY]] ] ; TFA_INTERLEAVE-NEXT: [[ACTIVE_LANE_MASK2:%.*]] = phi [ [[ACTIVE_LANE_MASK_ENTRY1]], [[ENTRY]] ], [ [[ACTIVE_LANE_MASK_NEXT5:%.*]], [[VECTOR_BODY]] ] ; TFA_INTERLEAVE-NEXT: [[TMP9:%.*]] = getelementptr i64, ptr [[B:%.*]], i64 [[INDEX]] ; TFA_INTERLEAVE-NEXT: [[TMP10:%.*]] = call i64 @llvm.vscale.i64() ; TFA_INTERLEAVE-NEXT: [[TMP11:%.*]] = mul i64 [[TMP10]], 2 ; TFA_INTERLEAVE-NEXT: [[TMP12:%.*]] = getelementptr i64, ptr [[TMP9]], i64 [[TMP11]] ; TFA_INTERLEAVE-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call @llvm.masked.load.nxv2i64.p0(ptr [[TMP9]], i32 8, [[ACTIVE_LANE_MASK]], poison) ; TFA_INTERLEAVE-NEXT: [[WIDE_MASKED_LOAD3:%.*]] = call @llvm.masked.load.nxv2i64.p0(ptr [[TMP12]], i32 8, [[ACTIVE_LANE_MASK2]], poison) ; TFA_INTERLEAVE-NEXT: [[TMP13:%.*]] = call @foo_vector( [[WIDE_MASKED_LOAD]], [[ACTIVE_LANE_MASK]]) ; TFA_INTERLEAVE-NEXT: [[TMP14:%.*]] = call @foo_vector( [[WIDE_MASKED_LOAD3]], [[ACTIVE_LANE_MASK2]]) ; TFA_INTERLEAVE-NEXT: [[TMP15:%.*]] = getelementptr inbounds i64, ptr [[A:%.*]], i64 [[INDEX]] ; TFA_INTERLEAVE-NEXT: [[TMP16:%.*]] = call i64 @llvm.vscale.i64() ; TFA_INTERLEAVE-NEXT: [[TMP17:%.*]] = mul i64 [[TMP16]], 2 ; TFA_INTERLEAVE-NEXT: [[TMP18:%.*]] = getelementptr inbounds i64, ptr [[TMP15]], i64 [[TMP17]] ; TFA_INTERLEAVE-NEXT: call void @llvm.masked.store.nxv2i64.p0( [[TMP13]], ptr [[TMP15]], i32 8, [[ACTIVE_LANE_MASK]]) ; TFA_INTERLEAVE-NEXT: call void @llvm.masked.store.nxv2i64.p0( [[TMP14]], ptr [[TMP18]], i32 8, [[ACTIVE_LANE_MASK2]]) ; TFA_INTERLEAVE-NEXT: [[INDEX_NEXT:%.*]] = add i64 [[INDEX]], [[TMP6]] ; TFA_INTERLEAVE-NEXT: [[TMP19:%.*]] = call i64 @llvm.vscale.i64() ; TFA_INTERLEAVE-NEXT: [[TMP20:%.*]] = mul i64 [[TMP19]], 2 ; TFA_INTERLEAVE-NEXT: [[TMP21:%.*]] = add i64 [[INDEX_NEXT]], [[TMP20]] ; TFA_INTERLEAVE-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call @llvm.get.active.lane.mask.nxv2i1.i64(i64 [[INDEX_NEXT]], i64 1025) ; TFA_INTERLEAVE-NEXT: [[ACTIVE_LANE_MASK_NEXT5]] = call @llvm.get.active.lane.mask.nxv2i1.i64(i64 [[TMP21]], i64 1025) ; TFA_INTERLEAVE-NEXT: [[TMP22:%.*]] = xor [[ACTIVE_LANE_MASK_NEXT]], shufflevector ( insertelement ( poison, i1 true, i64 0), poison, zeroinitializer) ; TFA_INTERLEAVE-NEXT: [[TMP23:%.*]] = extractelement [[TMP22]], i32 0 ; TFA_INTERLEAVE-NEXT: br i1 [[TMP23]], label [[FOR_COND_CLEANUP:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] ; TFA_INTERLEAVE: for.cond.cleanup: ; TFA_INTERLEAVE-NEXT: ret void ; entry: br label %for.body for.body: %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ] %gep = getelementptr i64, ptr %b, i64 %indvars.iv %load = load i64, ptr %gep %call = call i64 @foo(i64 %load) #3 %arrayidx = getelementptr inbounds i64, ptr %a, i64 %indvars.iv store i64 %call, ptr %arrayidx %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1 %exitcond = icmp eq i64 %indvars.iv.next, 1025 br i1 %exitcond, label %for.cond.cleanup, label %for.body for.cond.cleanup: ret void } ; An fmuladd intrinsic followed by a call; we want to make sure we correctly ; pick up the second call and assign a vector variant to it. define double @test_widen_fmuladd_and_call(ptr noalias %a, ptr readnone %b, double %m) #4 { ; TFNONE-LABEL: @test_widen_fmuladd_and_call( ; TFNONE-NEXT: entry: ; TFNONE-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() ; TFNONE-NEXT: [[TMP1:%.*]] = mul i64 [[TMP0]], 2 ; TFNONE-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1025, [[TMP1]] ; TFNONE-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] ; TFNONE: vector.ph: ; TFNONE-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() ; TFNONE-NEXT: [[TMP3:%.*]] = mul i64 [[TMP2]], 2 ; TFNONE-NEXT: [[N_MOD_VF:%.*]] = urem i64 1025, [[TMP3]] ; TFNONE-NEXT: [[N_VEC:%.*]] = sub i64 1025, [[N_MOD_VF]] ; TFNONE-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() ; TFNONE-NEXT: [[TMP5:%.*]] = mul i64 [[TMP4]], 2 ; TFNONE-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement poison, double [[M:%.*]], i64 0 ; TFNONE-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector [[BROADCAST_SPLATINSERT]], poison, zeroinitializer ; TFNONE-NEXT: br label [[VECTOR_BODY:%.*]] ; TFNONE: vector.body: ; TFNONE-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] ; TFNONE-NEXT: [[VEC_PHI:%.*]] = phi double [ 0.000000e+00, [[VECTOR_PH]] ], [ [[TMP11:%.*]], [[VECTOR_BODY]] ] ; TFNONE-NEXT: [[TMP6:%.*]] = getelementptr double, ptr [[B:%.*]], i64 [[INDEX]] ; TFNONE-NEXT: [[WIDE_LOAD:%.*]] = load , ptr [[TMP6]], align 8 ; TFNONE-NEXT: [[TMP7:%.*]] = fmul [[WIDE_LOAD]], [[BROADCAST_SPLAT]] ; TFNONE-NEXT: [[TMP8:%.*]] = fptoui [[WIDE_LOAD]] to ; TFNONE-NEXT: [[TMP9:%.*]] = call @foo_vector( [[TMP8]], shufflevector ( insertelement ( poison, i1 true, i64 0), poison, zeroinitializer)) ; TFNONE-NEXT: [[TMP10:%.*]] = getelementptr inbounds i64, ptr [[A:%.*]], i64 [[INDEX]] ; TFNONE-NEXT: store [[TMP9]], ptr [[TMP10]], align 8 ; TFNONE-NEXT: [[TMP11]] = call double @llvm.vector.reduce.fadd.nxv2f64(double [[VEC_PHI]], [[TMP7]]) ; TFNONE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]] ; TFNONE-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; TFNONE-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]] ; TFNONE: middle.block: ; TFNONE-NEXT: br i1 false, label [[FOR_COND_CLEANUP:%.*]], label [[SCALAR_PH]] ; TFNONE: scalar.ph: ; TFNONE-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ] ; TFNONE-NEXT: [[BC_MERGE_RDX:%.*]] = phi double [ 0.000000e+00, [[ENTRY]] ], [ [[TMP11]], [[MIDDLE_BLOCK]] ] ; TFNONE-NEXT: br label [[FOR_BODY:%.*]] ; TFNONE: for.body: ; TFNONE-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ] ; TFNONE-NEXT: [[FMA_SUM:%.*]] = phi double [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ], [ [[MULADD:%.*]], [[FOR_BODY]] ] ; TFNONE-NEXT: [[GEP:%.*]] = getelementptr double, ptr [[B]], i64 [[INDVARS_IV]] ; TFNONE-NEXT: [[LOAD:%.*]] = load double, ptr [[GEP]], align 8 ; TFNONE-NEXT: [[MULADD]] = tail call double @llvm.fmuladd.f64(double [[LOAD]], double [[M]], double [[FMA_SUM]]) ; TFNONE-NEXT: [[TOINT:%.*]] = fptoui double [[LOAD]] to i64 ; TFNONE-NEXT: [[CALL:%.*]] = call i64 @foo(i64 [[TOINT]]) #[[ATTR3]] ; TFNONE-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[INDVARS_IV]] ; TFNONE-NEXT: store i64 [[CALL]], ptr [[ARRAYIDX]], align 8 ; TFNONE-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1 ; TFNONE-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], 1025 ; TFNONE-NEXT: br i1 [[EXITCOND]], label [[FOR_COND_CLEANUP]], label [[FOR_BODY]], !llvm.loop [[LOOP13:![0-9]+]] ; TFNONE: for.cond.cleanup: ; TFNONE-NEXT: [[MULADD_LCSSA:%.*]] = phi double [ [[MULADD]], [[FOR_BODY]] ], [ [[TMP11]], [[MIDDLE_BLOCK]] ] ; TFNONE-NEXT: ret double [[MULADD_LCSSA]] ; ; TFALWAYS-LABEL: @test_widen_fmuladd_and_call( ; TFALWAYS-NEXT: entry: ; TFALWAYS-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() ; TFALWAYS-NEXT: [[TMP1:%.*]] = mul i64 [[TMP0]], 2 ; TFALWAYS-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() ; TFALWAYS-NEXT: [[TMP3:%.*]] = mul i64 [[TMP2]], 2 ; TFALWAYS-NEXT: [[TMP4:%.*]] = sub i64 [[TMP3]], 1 ; TFALWAYS-NEXT: [[N_RND_UP:%.*]] = add i64 1025, [[TMP4]] ; TFALWAYS-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP1]] ; TFALWAYS-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]] ; TFALWAYS-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64() ; TFALWAYS-NEXT: [[TMP6:%.*]] = mul i64 [[TMP5]], 2 ; TFALWAYS-NEXT: [[ACTIVE_LANE_MASK_ENTRY:%.*]] = call @llvm.get.active.lane.mask.nxv2i1.i64(i64 0, i64 1025) ; TFALWAYS-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement poison, double [[M:%.*]], i64 0 ; TFALWAYS-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector [[BROADCAST_SPLATINSERT]], poison, zeroinitializer ; TFALWAYS-NEXT: br label [[VECTOR_BODY:%.*]] ; TFALWAYS: vector.body: ; TFALWAYS-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] ; TFALWAYS-NEXT: [[ACTIVE_LANE_MASK:%.*]] = phi [ [[ACTIVE_LANE_MASK_ENTRY]], [[ENTRY]] ], [ [[ACTIVE_LANE_MASK_NEXT:%.*]], [[VECTOR_BODY]] ] ; TFALWAYS-NEXT: [[VEC_PHI:%.*]] = phi double [ 0.000000e+00, [[ENTRY]] ], [ [[TMP13:%.*]], [[VECTOR_BODY]] ] ; TFALWAYS-NEXT: [[TMP7:%.*]] = getelementptr double, ptr [[B:%.*]], i64 [[INDEX]] ; TFALWAYS-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call @llvm.masked.load.nxv2f64.p0(ptr [[TMP7]], i32 8, [[ACTIVE_LANE_MASK]], poison) ; TFALWAYS-NEXT: [[TMP8:%.*]] = fmul [[WIDE_MASKED_LOAD]], [[BROADCAST_SPLAT]] ; TFALWAYS-NEXT: [[TMP9:%.*]] = fptoui [[WIDE_MASKED_LOAD]] to ; TFALWAYS-NEXT: [[TMP10:%.*]] = call @foo_vector( [[TMP9]], [[ACTIVE_LANE_MASK]]) ; TFALWAYS-NEXT: [[TMP11:%.*]] = getelementptr inbounds i64, ptr [[A:%.*]], i64 [[INDEX]] ; TFALWAYS-NEXT: call void @llvm.masked.store.nxv2i64.p0( [[TMP10]], ptr [[TMP11]], i32 8, [[ACTIVE_LANE_MASK]]) ; TFALWAYS-NEXT: [[TMP12:%.*]] = select [[ACTIVE_LANE_MASK]], [[TMP8]], shufflevector ( insertelement ( poison, double -0.000000e+00, i64 0), poison, zeroinitializer) ; TFALWAYS-NEXT: [[TMP13]] = call double @llvm.vector.reduce.fadd.nxv2f64(double [[VEC_PHI]], [[TMP12]]) ; TFALWAYS-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], [[TMP6]] ; TFALWAYS-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call @llvm.get.active.lane.mask.nxv2i1.i64(i64 [[INDEX_NEXT]], i64 1025) ; TFALWAYS-NEXT: [[TMP14:%.*]] = xor [[ACTIVE_LANE_MASK_NEXT]], shufflevector ( insertelement ( poison, i1 true, i64 0), poison, zeroinitializer) ; TFALWAYS-NEXT: [[TMP15:%.*]] = extractelement [[TMP14]], i32 0 ; TFALWAYS-NEXT: br i1 [[TMP15]], label [[FOR_COND_CLEANUP:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] ; TFALWAYS: for.cond.cleanup: ; TFALWAYS-NEXT: ret double [[TMP13]] ; ; TFFALLBACK-LABEL: @test_widen_fmuladd_and_call( ; TFFALLBACK-NEXT: entry: ; TFFALLBACK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() ; TFFALLBACK-NEXT: [[TMP1:%.*]] = mul i64 [[TMP0]], 2 ; TFFALLBACK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() ; TFFALLBACK-NEXT: [[TMP3:%.*]] = mul i64 [[TMP2]], 2 ; TFFALLBACK-NEXT: [[TMP4:%.*]] = sub i64 [[TMP3]], 1 ; TFFALLBACK-NEXT: [[N_RND_UP:%.*]] = add i64 1025, [[TMP4]] ; TFFALLBACK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP1]] ; TFFALLBACK-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]] ; TFFALLBACK-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64() ; TFFALLBACK-NEXT: [[TMP6:%.*]] = mul i64 [[TMP5]], 2 ; TFFALLBACK-NEXT: [[ACTIVE_LANE_MASK_ENTRY:%.*]] = call @llvm.get.active.lane.mask.nxv2i1.i64(i64 0, i64 1025) ; TFFALLBACK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement poison, double [[M:%.*]], i64 0 ; TFFALLBACK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector [[BROADCAST_SPLATINSERT]], poison, zeroinitializer ; TFFALLBACK-NEXT: br label [[VECTOR_BODY:%.*]] ; TFFALLBACK: vector.body: ; TFFALLBACK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] ; TFFALLBACK-NEXT: [[ACTIVE_LANE_MASK:%.*]] = phi [ [[ACTIVE_LANE_MASK_ENTRY]], [[ENTRY]] ], [ [[ACTIVE_LANE_MASK_NEXT:%.*]], [[VECTOR_BODY]] ] ; TFFALLBACK-NEXT: [[VEC_PHI:%.*]] = phi double [ 0.000000e+00, [[ENTRY]] ], [ [[TMP13:%.*]], [[VECTOR_BODY]] ] ; TFFALLBACK-NEXT: [[TMP7:%.*]] = getelementptr double, ptr [[B:%.*]], i64 [[INDEX]] ; TFFALLBACK-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call @llvm.masked.load.nxv2f64.p0(ptr [[TMP7]], i32 8, [[ACTIVE_LANE_MASK]], poison) ; TFFALLBACK-NEXT: [[TMP8:%.*]] = fmul [[WIDE_MASKED_LOAD]], [[BROADCAST_SPLAT]] ; TFFALLBACK-NEXT: [[TMP9:%.*]] = fptoui [[WIDE_MASKED_LOAD]] to ; TFFALLBACK-NEXT: [[TMP10:%.*]] = call @foo_vector( [[TMP9]], [[ACTIVE_LANE_MASK]]) ; TFFALLBACK-NEXT: [[TMP11:%.*]] = getelementptr inbounds i64, ptr [[A:%.*]], i64 [[INDEX]] ; TFFALLBACK-NEXT: call void @llvm.masked.store.nxv2i64.p0( [[TMP10]], ptr [[TMP11]], i32 8, [[ACTIVE_LANE_MASK]]) ; TFFALLBACK-NEXT: [[TMP12:%.*]] = select [[ACTIVE_LANE_MASK]], [[TMP8]], shufflevector ( insertelement ( poison, double -0.000000e+00, i64 0), poison, zeroinitializer) ; TFFALLBACK-NEXT: [[TMP13]] = call double @llvm.vector.reduce.fadd.nxv2f64(double [[VEC_PHI]], [[TMP12]]) ; TFFALLBACK-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], [[TMP6]] ; TFFALLBACK-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call @llvm.get.active.lane.mask.nxv2i1.i64(i64 [[INDEX_NEXT]], i64 1025) ; TFFALLBACK-NEXT: [[TMP14:%.*]] = xor [[ACTIVE_LANE_MASK_NEXT]], shufflevector ( insertelement ( poison, i1 true, i64 0), poison, zeroinitializer) ; TFFALLBACK-NEXT: [[TMP15:%.*]] = extractelement [[TMP14]], i32 0 ; TFFALLBACK-NEXT: br i1 [[TMP15]], label [[FOR_COND_CLEANUP:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]] ; TFFALLBACK: for.cond.cleanup: ; TFFALLBACK-NEXT: ret double [[TMP13]] ; ; TFA_INTERLEAVE-LABEL: @test_widen_fmuladd_and_call( ; TFA_INTERLEAVE-NEXT: entry: ; TFA_INTERLEAVE-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() ; TFA_INTERLEAVE-NEXT: [[TMP1:%.*]] = mul i64 [[TMP0]], 4 ; TFA_INTERLEAVE-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() ; TFA_INTERLEAVE-NEXT: [[TMP3:%.*]] = mul i64 [[TMP2]], 4 ; TFA_INTERLEAVE-NEXT: [[TMP4:%.*]] = sub i64 [[TMP3]], 1 ; TFA_INTERLEAVE-NEXT: [[N_RND_UP:%.*]] = add i64 1025, [[TMP4]] ; TFA_INTERLEAVE-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], [[TMP1]] ; TFA_INTERLEAVE-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]] ; TFA_INTERLEAVE-NEXT: [[TMP5:%.*]] = call i64 @llvm.vscale.i64() ; TFA_INTERLEAVE-NEXT: [[TMP6:%.*]] = mul i64 [[TMP5]], 4 ; TFA_INTERLEAVE-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64() ; TFA_INTERLEAVE-NEXT: [[TMP8:%.*]] = mul i64 [[TMP7]], 2 ; TFA_INTERLEAVE-NEXT: [[ACTIVE_LANE_MASK_ENTRY:%.*]] = call @llvm.get.active.lane.mask.nxv2i1.i64(i64 0, i64 1025) ; TFA_INTERLEAVE-NEXT: [[ACTIVE_LANE_MASK_ENTRY1:%.*]] = call @llvm.get.active.lane.mask.nxv2i1.i64(i64 [[TMP8]], i64 1025) ; TFA_INTERLEAVE-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement poison, double [[M:%.*]], i64 0 ; TFA_INTERLEAVE-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector [[BROADCAST_SPLATINSERT]], poison, zeroinitializer ; TFA_INTERLEAVE-NEXT: br label [[VECTOR_BODY:%.*]] ; TFA_INTERLEAVE: vector.body: ; TFA_INTERLEAVE-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] ; TFA_INTERLEAVE-NEXT: [[ACTIVE_LANE_MASK:%.*]] = phi [ [[ACTIVE_LANE_MASK_ENTRY]], [[ENTRY]] ], [ [[ACTIVE_LANE_MASK_NEXT:%.*]], [[VECTOR_BODY]] ] ; TFA_INTERLEAVE-NEXT: [[ACTIVE_LANE_MASK2:%.*]] = phi [ [[ACTIVE_LANE_MASK_ENTRY1]], [[ENTRY]] ], [ [[ACTIVE_LANE_MASK_NEXT5:%.*]], [[VECTOR_BODY]] ] ; TFA_INTERLEAVE-NEXT: [[VEC_PHI:%.*]] = phi double [ 0.000000e+00, [[ENTRY]] ], [ [[TMP26:%.*]], [[VECTOR_BODY]] ] ; TFA_INTERLEAVE-NEXT: [[TMP9:%.*]] = getelementptr double, ptr [[B:%.*]], i64 [[INDEX]] ; TFA_INTERLEAVE-NEXT: [[TMP10:%.*]] = call i64 @llvm.vscale.i64() ; TFA_INTERLEAVE-NEXT: [[TMP11:%.*]] = mul i64 [[TMP10]], 2 ; TFA_INTERLEAVE-NEXT: [[TMP12:%.*]] = getelementptr double, ptr [[TMP9]], i64 [[TMP11]] ; TFA_INTERLEAVE-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call @llvm.masked.load.nxv2f64.p0(ptr [[TMP9]], i32 8, [[ACTIVE_LANE_MASK]], poison) ; TFA_INTERLEAVE-NEXT: [[WIDE_MASKED_LOAD3:%.*]] = call @llvm.masked.load.nxv2f64.p0(ptr [[TMP12]], i32 8, [[ACTIVE_LANE_MASK2]], poison) ; TFA_INTERLEAVE-NEXT: [[TMP13:%.*]] = fmul [[WIDE_MASKED_LOAD]], [[BROADCAST_SPLAT]] ; TFA_INTERLEAVE-NEXT: [[TMP14:%.*]] = fmul [[WIDE_MASKED_LOAD3]], [[BROADCAST_SPLAT]] ; TFA_INTERLEAVE-NEXT: [[TMP15:%.*]] = fptoui [[WIDE_MASKED_LOAD]] to ; TFA_INTERLEAVE-NEXT: [[TMP16:%.*]] = fptoui [[WIDE_MASKED_LOAD3]] to ; TFA_INTERLEAVE-NEXT: [[TMP17:%.*]] = call @foo_vector( [[TMP15]], [[ACTIVE_LANE_MASK]]) ; TFA_INTERLEAVE-NEXT: [[TMP18:%.*]] = call @foo_vector( [[TMP16]], [[ACTIVE_LANE_MASK2]]) ; TFA_INTERLEAVE-NEXT: [[TMP19:%.*]] = getelementptr inbounds i64, ptr [[A:%.*]], i64 [[INDEX]] ; TFA_INTERLEAVE-NEXT: [[TMP20:%.*]] = call i64 @llvm.vscale.i64() ; TFA_INTERLEAVE-NEXT: [[TMP21:%.*]] = mul i64 [[TMP20]], 2 ; TFA_INTERLEAVE-NEXT: [[TMP22:%.*]] = getelementptr inbounds i64, ptr [[TMP19]], i64 [[TMP21]] ; TFA_INTERLEAVE-NEXT: call void @llvm.masked.store.nxv2i64.p0( [[TMP17]], ptr [[TMP19]], i32 8, [[ACTIVE_LANE_MASK]]) ; TFA_INTERLEAVE-NEXT: call void @llvm.masked.store.nxv2i64.p0( [[TMP18]], ptr [[TMP22]], i32 8, [[ACTIVE_LANE_MASK2]]) ; TFA_INTERLEAVE-NEXT: [[TMP23:%.*]] = select [[ACTIVE_LANE_MASK]], [[TMP13]], shufflevector ( insertelement ( poison, double -0.000000e+00, i64 0), poison, zeroinitializer) ; TFA_INTERLEAVE-NEXT: [[TMP24:%.*]] = call double @llvm.vector.reduce.fadd.nxv2f64(double [[VEC_PHI]], [[TMP23]]) ; TFA_INTERLEAVE-NEXT: [[TMP25:%.*]] = select [[ACTIVE_LANE_MASK2]], [[TMP14]], shufflevector ( insertelement ( poison, double -0.000000e+00, i64 0), poison, zeroinitializer) ; TFA_INTERLEAVE-NEXT: [[TMP26]] = call double @llvm.vector.reduce.fadd.nxv2f64(double [[TMP24]], [[TMP25]]) ; TFA_INTERLEAVE-NEXT: [[INDEX_NEXT:%.*]] = add i64 [[INDEX]], [[TMP6]] ; TFA_INTERLEAVE-NEXT: [[TMP27:%.*]] = call i64 @llvm.vscale.i64() ; TFA_INTERLEAVE-NEXT: [[TMP28:%.*]] = mul i64 [[TMP27]], 2 ; TFA_INTERLEAVE-NEXT: [[TMP29:%.*]] = add i64 [[INDEX_NEXT]], [[TMP28]] ; TFA_INTERLEAVE-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call @llvm.get.active.lane.mask.nxv2i1.i64(i64 [[INDEX_NEXT]], i64 1025) ; TFA_INTERLEAVE-NEXT: [[ACTIVE_LANE_MASK_NEXT5]] = call @llvm.get.active.lane.mask.nxv2i1.i64(i64 [[TMP29]], i64 1025) ; TFA_INTERLEAVE-NEXT: [[TMP30:%.*]] = xor [[ACTIVE_LANE_MASK_NEXT]], shufflevector ( insertelement ( poison, i1 true, i64 0), poison, zeroinitializer) ; TFA_INTERLEAVE-NEXT: [[TMP31:%.*]] = extractelement [[TMP30]], i32 0 ; TFA_INTERLEAVE-NEXT: br i1 [[TMP31]], label [[FOR_COND_CLEANUP:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] ; TFA_INTERLEAVE: for.cond.cleanup: ; TFA_INTERLEAVE-NEXT: ret double [[TMP26]] ; entry: br label %for.body for.body: %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ] %fma_sum = phi double [ 0.000000e+00, %entry ], [ %muladd, %for.body ] %gep = getelementptr double, ptr %b, i64 %indvars.iv %load = load double, ptr %gep %muladd = tail call double @llvm.fmuladd.f64(double %load, double %m, double %fma_sum) %toint = fptoui double %load to i64 %call = call i64 @foo(i64 %toint) #1 %arrayidx = getelementptr inbounds i64, ptr %a, i64 %indvars.iv store i64 %call, ptr %arrayidx %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1 %exitcond = icmp eq i64 %indvars.iv.next, 1025 br i1 %exitcond, label %for.cond.cleanup, label %for.body for.cond.cleanup: ret double %muladd } declare i64 @foo(i64) declare double @llvm.fmuladd.f64(double, double, double) ;; scalable vector variants of foo declare @foo_uniform(i64, ) declare @foo_vector(, ) declare @foo_vector_nomask() attributes #0 = { nounwind "vector-function-abi-variant"="_ZGVsMxv_foo(foo_vector),_ZGVsMxu_foo(foo_uniform)" } attributes #1 = { nounwind "vector-function-abi-variant"="_ZGVsMxv_foo(foo_vector)" } attributes #2 = { nounwind "vector-function-abi-variant"="_ZGVsNxv_foo(foo_vector_nomask)" } attributes #3 = { nounwind "vector-function-abi-variant"="_ZGVsNxv_foo(foo_vector_nomask),_ZGVsMxv_foo(foo_vector)" } attributes #4 = { "target-features"="+sve" vscale_range(2,16) "no-trapping-math"="false" }