; NOTE: Assertions have been autogenerated by utils/update_test_checks.py ; RUN: opt < %s -loop-vectorize -force-vector-width=2 -force-vector-interleave=2 -S | FileCheck %s define void @test1(float* noalias nocapture %a, float* noalias nocapture readonly %b) { ; CHECK-LABEL: @test1( ; CHECK-NEXT: entry: ; CHECK-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] ; CHECK: vector.ph: ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK: vector.body: ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] ; CHECK-NEXT: [[TMP0:%.*]] = add i64 [[INDEX]], 0 ; CHECK-NEXT: [[TMP1:%.*]] = add i64 [[INDEX]], 2 ; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds float, float* [[B:%.*]], i64 [[TMP0]] ; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds float, float* [[B]], i64 [[TMP1]] ; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds float, float* [[TMP2]], i32 0 ; CHECK-NEXT: [[TMP5:%.*]] = bitcast float* [[TMP4]] to <2 x float>* ; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <2 x float>, <2 x float>* [[TMP5]], align 4 ; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds float, float* [[TMP2]], i32 2 ; CHECK-NEXT: [[TMP7:%.*]] = bitcast float* [[TMP6]] to <2 x float>* ; CHECK-NEXT: [[WIDE_LOAD1:%.*]] = load <2 x float>, <2 x float>* [[TMP7]], align 4 ; CHECK-NEXT: [[TMP8:%.*]] = fcmp ogt <2 x float> [[WIDE_LOAD]], ; CHECK-NEXT: [[TMP9:%.*]] = fcmp ogt <2 x float> [[WIDE_LOAD1]], ; CHECK-NEXT: [[TMP10:%.*]] = extractelement <2 x i1> [[TMP8]], i32 0 ; CHECK-NEXT: tail call void @llvm.assume(i1 [[TMP10]]) ; CHECK-NEXT: [[TMP11:%.*]] = extractelement <2 x i1> [[TMP8]], i32 1 ; CHECK-NEXT: tail call void @llvm.assume(i1 [[TMP11]]) ; CHECK-NEXT: [[TMP12:%.*]] = extractelement <2 x i1> [[TMP9]], i32 0 ; CHECK-NEXT: tail call void @llvm.assume(i1 [[TMP12]]) ; CHECK-NEXT: [[TMP13:%.*]] = extractelement <2 x i1> [[TMP9]], i32 1 ; CHECK-NEXT: tail call void @llvm.assume(i1 [[TMP13]]) ; CHECK-NEXT: [[TMP14:%.*]] = fadd <2 x float> [[WIDE_LOAD]], ; CHECK-NEXT: [[TMP15:%.*]] = fadd <2 x float> [[WIDE_LOAD1]], ; CHECK-NEXT: [[TMP16:%.*]] = getelementptr inbounds float, float* [[A:%.*]], i64 [[TMP0]] ; CHECK-NEXT: [[TMP17:%.*]] = getelementptr inbounds float, float* [[A]], i64 [[TMP1]] ; CHECK-NEXT: [[TMP18:%.*]] = getelementptr inbounds float, float* [[TMP16]], i32 0 ; CHECK-NEXT: [[TMP19:%.*]] = bitcast float* [[TMP18]] to <2 x float>* ; CHECK-NEXT: store <2 x float> [[TMP14]], <2 x float>* [[TMP19]], align 4 ; CHECK-NEXT: [[TMP20:%.*]] = getelementptr inbounds float, float* [[TMP16]], i32 2 ; CHECK-NEXT: [[TMP21:%.*]] = bitcast float* [[TMP20]] to <2 x float>* ; CHECK-NEXT: store <2 x float> [[TMP15]], <2 x float>* [[TMP21]], align 4 ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 ; CHECK-NEXT: [[TMP22:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1600 ; CHECK-NEXT: br i1 [[TMP22]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 1600, 1600 ; CHECK-NEXT: br i1 [[CMP_N]], label [[FOR_END:%.*]], label [[SCALAR_PH]] ; CHECK: scalar.ph: ; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 1600, [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ] ; CHECK-NEXT: br label [[FOR_BODY:%.*]] ; CHECK: for.body: ; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ] ; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, float* [[B]], i64 [[INDVARS_IV]] ; CHECK-NEXT: [[TMP23:%.*]] = load float, float* [[ARRAYIDX]], align 4 ; CHECK-NEXT: [[CMP1:%.*]] = fcmp ogt float [[TMP23]], 1.000000e+02 ; CHECK-NEXT: tail call void @llvm.assume(i1 [[CMP1]]) ; CHECK-NEXT: [[ADD:%.*]] = fadd float [[TMP23]], 1.000000e+00 ; CHECK-NEXT: [[ARRAYIDX5:%.*]] = getelementptr inbounds float, float* [[A]], i64 [[INDVARS_IV]] ; CHECK-NEXT: store float [[ADD]], float* [[ARRAYIDX5]], align 4 ; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1 ; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INDVARS_IV]], 1599 ; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP2:![0-9]+]] ; CHECK: for.end: ; CHECK-NEXT: ret void ; entry: br label %for.body for.body: ; preds = %for.body, %entry %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ] %arrayidx = getelementptr inbounds float, float* %b, i64 %indvars.iv %0 = load float, float* %arrayidx, align 4 %cmp1 = fcmp ogt float %0, 1.000000e+02 tail call void @llvm.assume(i1 %cmp1) %add = fadd float %0, 1.000000e+00 %arrayidx5 = getelementptr inbounds float, float* %a, i64 %indvars.iv store float %add, float* %arrayidx5, align 4 %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1 %exitcond = icmp eq i64 %indvars.iv, 1599 br i1 %exitcond, label %for.end, label %for.body for.end: ; preds = %for.body ret void } declare void @llvm.assume(i1) #0 attributes #0 = { nounwind willreturn } %struct.data = type { float*, float* } define void @test2(%struct.data* nocapture readonly %d) { ; CHECK-LABEL: @test2( ; CHECK-NEXT: entry: ; CHECK-NEXT: [[B:%.*]] = getelementptr inbounds [[STRUCT_DATA:%.*]], %struct.data* [[D:%.*]], i64 0, i32 1 ; CHECK-NEXT: [[TMP0:%.*]] = load float*, float** [[B]], align 8 ; CHECK-NEXT: [[TMP1:%.*]] = bitcast float* [[TMP0]] to i8* ; CHECK-NEXT: [[PTRINT:%.*]] = ptrtoint float* [[TMP0]] to i64 ; CHECK-NEXT: [[MASKEDPTR:%.*]] = and i64 [[PTRINT]], 31 ; CHECK-NEXT: [[MASKCOND:%.*]] = icmp eq i64 [[MASKEDPTR]], 0 ; CHECK-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_DATA]], %struct.data* [[D]], i64 0, i32 0 ; CHECK-NEXT: [[TMP2:%.*]] = load float*, float** [[A]], align 8 ; CHECK-NEXT: [[TMP3:%.*]] = bitcast float* [[TMP2]] to i8* ; CHECK-NEXT: [[PTRINT2:%.*]] = ptrtoint float* [[TMP2]] to i64 ; CHECK-NEXT: [[MASKEDPTR3:%.*]] = and i64 [[PTRINT2]], 31 ; CHECK-NEXT: [[MASKCOND4:%.*]] = icmp eq i64 [[MASKEDPTR3]], 0 ; CHECK-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_MEMCHECK:%.*]] ; CHECK: vector.memcheck: ; CHECK-NEXT: [[SCEVGEP:%.*]] = getelementptr float, float* [[TMP2]], i64 1600 ; CHECK-NEXT: [[SCEVGEP1:%.*]] = bitcast float* [[SCEVGEP]] to i8* ; CHECK-NEXT: [[SCEVGEP2:%.*]] = getelementptr float, float* [[TMP0]], i64 1600 ; CHECK-NEXT: [[SCEVGEP23:%.*]] = bitcast float* [[SCEVGEP2]] to i8* ; CHECK-NEXT: [[BOUND0:%.*]] = icmp ult i8* [[TMP3]], [[SCEVGEP23]] ; CHECK-NEXT: [[BOUND1:%.*]] = icmp ult i8* [[TMP1]], [[SCEVGEP1]] ; CHECK-NEXT: [[FOUND_CONFLICT:%.*]] = and i1 [[BOUND0]], [[BOUND1]] ; CHECK-NEXT: [[MEMCHECK_CONFLICT:%.*]] = and i1 [[FOUND_CONFLICT]], true ; CHECK-NEXT: br i1 [[MEMCHECK_CONFLICT]], label [[SCALAR_PH]], label [[VECTOR_PH:%.*]] ; CHECK: vector.ph: ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK: vector.body: ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] ; CHECK-NEXT: [[TMP4:%.*]] = add i64 [[INDEX]], 0 ; CHECK-NEXT: [[TMP5:%.*]] = add i64 [[INDEX]], 2 ; CHECK-NEXT: tail call void @llvm.assume(i1 [[MASKCOND]]) ; CHECK-NEXT: tail call void @llvm.assume(i1 [[MASKCOND]]) ; CHECK-NEXT: tail call void @llvm.assume(i1 [[MASKCOND]]) ; CHECK-NEXT: tail call void @llvm.assume(i1 [[MASKCOND]]) ; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds float, float* [[TMP0]], i64 [[TMP4]] ; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds float, float* [[TMP0]], i64 [[TMP5]] ; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds float, float* [[TMP6]], i32 0 ; CHECK-NEXT: [[TMP9:%.*]] = bitcast float* [[TMP8]] to <2 x float>* ; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <2 x float>, <2 x float>* [[TMP9]], align 4, !alias.scope !4 ; CHECK-NEXT: [[TMP10:%.*]] = getelementptr inbounds float, float* [[TMP6]], i32 2 ; CHECK-NEXT: [[TMP11:%.*]] = bitcast float* [[TMP10]] to <2 x float>* ; CHECK-NEXT: [[WIDE_LOAD4:%.*]] = load <2 x float>, <2 x float>* [[TMP11]], align 4, !alias.scope !4 ; CHECK-NEXT: [[TMP12:%.*]] = fadd <2 x float> [[WIDE_LOAD]], ; CHECK-NEXT: [[TMP13:%.*]] = fadd <2 x float> [[WIDE_LOAD4]], ; CHECK-NEXT: tail call void @llvm.assume(i1 [[MASKCOND4]]) ; CHECK-NEXT: tail call void @llvm.assume(i1 [[MASKCOND4]]) ; CHECK-NEXT: tail call void @llvm.assume(i1 [[MASKCOND4]]) ; CHECK-NEXT: tail call void @llvm.assume(i1 [[MASKCOND4]]) ; CHECK-NEXT: [[TMP14:%.*]] = getelementptr inbounds float, float* [[TMP2]], i64 [[TMP4]] ; CHECK-NEXT: [[TMP15:%.*]] = getelementptr inbounds float, float* [[TMP2]], i64 [[TMP5]] ; CHECK-NEXT: [[TMP16:%.*]] = getelementptr inbounds float, float* [[TMP14]], i32 0 ; CHECK-NEXT: [[TMP17:%.*]] = bitcast float* [[TMP16]] to <2 x float>* ; CHECK-NEXT: store <2 x float> [[TMP12]], <2 x float>* [[TMP17]], align 4, !alias.scope !7, !noalias !4 ; CHECK-NEXT: [[TMP18:%.*]] = getelementptr inbounds float, float* [[TMP14]], i32 2 ; CHECK-NEXT: [[TMP19:%.*]] = bitcast float* [[TMP18]] to <2 x float>* ; CHECK-NEXT: store <2 x float> [[TMP13]], <2 x float>* [[TMP19]], align 4, !alias.scope !7, !noalias !4 ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 ; CHECK-NEXT: [[TMP20:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1600 ; CHECK-NEXT: br i1 [[TMP20]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 1600, 1600 ; CHECK-NEXT: br i1 [[CMP_N]], label [[FOR_END:%.*]], label [[SCALAR_PH]] ; CHECK: scalar.ph: ; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 1600, [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ], [ 0, [[VECTOR_MEMCHECK]] ] ; CHECK-NEXT: br label [[FOR_BODY:%.*]] ; CHECK: for.body: ; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ] ; CHECK-NEXT: tail call void @llvm.assume(i1 [[MASKCOND]]) ; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, float* [[TMP0]], i64 [[INDVARS_IV]] ; CHECK-NEXT: [[TMP21:%.*]] = load float, float* [[ARRAYIDX]], align 4 ; CHECK-NEXT: [[ADD:%.*]] = fadd float [[TMP21]], 1.000000e+00 ; CHECK-NEXT: tail call void @llvm.assume(i1 [[MASKCOND4]]) ; CHECK-NEXT: [[ARRAYIDX5:%.*]] = getelementptr inbounds float, float* [[TMP2]], i64 [[INDVARS_IV]] ; CHECK-NEXT: store float [[ADD]], float* [[ARRAYIDX5]], align 4 ; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1 ; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INDVARS_IV]], 1599 ; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]] ; CHECK: for.end: ; CHECK-NEXT: ret void ; entry: %b = getelementptr inbounds %struct.data, %struct.data* %d, i64 0, i32 1 %0 = load float*, float** %b, align 8 %ptrint = ptrtoint float* %0 to i64 %maskedptr = and i64 %ptrint, 31 %maskcond = icmp eq i64 %maskedptr, 0 %a = getelementptr inbounds %struct.data, %struct.data* %d, i64 0, i32 0 %1 = load float*, float** %a, align 8 %ptrint2 = ptrtoint float* %1 to i64 %maskedptr3 = and i64 %ptrint2, 31 %maskcond4 = icmp eq i64 %maskedptr3, 0 br label %for.body for.body: ; preds = %for.body, %entry %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ] tail call void @llvm.assume(i1 %maskcond) %arrayidx = getelementptr inbounds float, float* %0, i64 %indvars.iv %2 = load float, float* %arrayidx, align 4 %add = fadd float %2, 1.000000e+00 tail call void @llvm.assume(i1 %maskcond4) %arrayidx5 = getelementptr inbounds float, float* %1, i64 %indvars.iv store float %add, float* %arrayidx5, align 4 %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1 %exitcond = icmp eq i64 %indvars.iv, 1599 br i1 %exitcond, label %for.end, label %for.body for.end: ; preds = %for.body ret void } ; Test case for PR43620. Make sure we can vectorize with predication in presence ; of assume calls. For now, check that we drop all assumes in predicated blocks ; in the vector body. define void @predicated_assume(float* noalias nocapture readonly %a, float* noalias nocapture %b, i32 %n) { ; Check that the vector.body does not contain any assumes. ; CHECK-LABEL: @predicated_assume( ; CHECK-NEXT: entry: ; CHECK-NEXT: [[CMP15:%.*]] = icmp eq i32 [[N:%.*]], 0 ; CHECK-NEXT: br i1 [[CMP15]], label [[FOR_COND_CLEANUP:%.*]], label [[FOR_BODY_PREHEADER:%.*]] ; CHECK: for.body.preheader: ; CHECK-NEXT: [[TMP0:%.*]] = zext i32 [[N]] to i64 ; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[TMP0]], 4 ; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] ; CHECK: vector.ph: ; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[TMP0]], 4 ; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[TMP0]], [[N_MOD_VF]] ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK: vector.body: ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] ; CHECK-NEXT: [[VEC_IND:%.*]] = phi <2 x i64> [ , [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ] ; CHECK-NEXT: [[STEP_ADD:%.*]] = add <2 x i64> [[VEC_IND]], ; CHECK-NEXT: [[TMP1:%.*]] = add i64 [[INDEX]], 0 ; CHECK-NEXT: [[TMP2:%.*]] = add i64 [[INDEX]], 1 ; CHECK-NEXT: [[TMP3:%.*]] = add i64 [[INDEX]], 2 ; CHECK-NEXT: [[TMP4:%.*]] = add i64 [[INDEX]], 3 ; CHECK-NEXT: [[TMP5:%.*]] = icmp ult <2 x i64> [[VEC_IND]], ; CHECK-NEXT: [[TMP6:%.*]] = icmp ult <2 x i64> [[STEP_ADD]], ; CHECK-NEXT: [[TMP7:%.*]] = icmp ult <2 x i64> [[VEC_IND]], ; CHECK-NEXT: [[TMP8:%.*]] = icmp ult <2 x i64> [[STEP_ADD]], ; CHECK-NEXT: [[TMP9:%.*]] = xor <2 x i1> [[TMP5]], ; CHECK-NEXT: [[TMP10:%.*]] = xor <2 x i1> [[TMP6]], ; CHECK-NEXT: [[PREDPHI:%.*]] = select <2 x i1> [[TMP5]], <2 x float> , <2 x float> ; CHECK-NEXT: [[PREDPHI2:%.*]] = select <2 x i1> [[TMP6]], <2 x float> , <2 x float> ; CHECK-NEXT: [[TMP11:%.*]] = getelementptr inbounds float, float* [[A:%.*]], i64 [[TMP1]] ; CHECK-NEXT: [[TMP12:%.*]] = getelementptr inbounds float, float* [[A]], i64 [[TMP3]] ; CHECK-NEXT: [[TMP13:%.*]] = getelementptr inbounds float, float* [[TMP11]], i32 0 ; CHECK-NEXT: [[TMP14:%.*]] = bitcast float* [[TMP13]] to <2 x float>* ; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <2 x float>, <2 x float>* [[TMP14]], align 4 ; CHECK-NEXT: [[TMP15:%.*]] = getelementptr inbounds float, float* [[TMP11]], i32 2 ; CHECK-NEXT: [[TMP16:%.*]] = bitcast float* [[TMP15]] to <2 x float>* ; CHECK-NEXT: [[WIDE_LOAD3:%.*]] = load <2 x float>, <2 x float>* [[TMP16]], align 4 ; CHECK-NEXT: [[TMP17:%.*]] = fmul <2 x float> [[PREDPHI]], [[WIDE_LOAD]] ; CHECK-NEXT: [[TMP18:%.*]] = fmul <2 x float> [[PREDPHI2]], [[WIDE_LOAD3]] ; CHECK-NEXT: [[TMP19:%.*]] = getelementptr inbounds float, float* [[B:%.*]], i64 [[TMP1]] ; CHECK-NEXT: [[TMP20:%.*]] = getelementptr inbounds float, float* [[B]], i64 [[TMP3]] ; CHECK-NEXT: [[TMP21:%.*]] = getelementptr inbounds float, float* [[TMP19]], i32 0 ; CHECK-NEXT: [[TMP22:%.*]] = bitcast float* [[TMP21]] to <2 x float>* ; CHECK-NEXT: store <2 x float> [[TMP17]], <2 x float>* [[TMP22]], align 4 ; CHECK-NEXT: [[TMP23:%.*]] = getelementptr inbounds float, float* [[TMP19]], i32 2 ; CHECK-NEXT: [[TMP24:%.*]] = bitcast float* [[TMP23]] to <2 x float>* ; CHECK-NEXT: store <2 x float> [[TMP18]], <2 x float>* [[TMP24]], align 4 ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 ; CHECK-NEXT: [[VEC_IND_NEXT]] = add <2 x i64> [[STEP_ADD]], ; CHECK-NEXT: [[TMP25:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; CHECK-NEXT: br i1 [[TMP25]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[TMP0]], [[N_VEC]] ; CHECK-NEXT: br i1 [[CMP_N]], label [[FOR_COND_CLEANUP_LOOPEXIT:%.*]], label [[SCALAR_PH]] ; CHECK: scalar.ph: ; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[FOR_BODY_PREHEADER]] ] ; CHECK-NEXT: br label [[FOR_BODY:%.*]] ; CHECK: for.cond.cleanup.loopexit: ; CHECK-NEXT: br label [[FOR_COND_CLEANUP]] ; CHECK: for.cond.cleanup: ; CHECK-NEXT: ret void ; CHECK: for.body: ; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], [[IF_END5:%.*]] ] ; CHECK-NEXT: [[CMP1:%.*]] = icmp ult i64 [[INDVARS_IV]], 495616 ; CHECK-NEXT: br i1 [[CMP1]], label [[IF_END5]], label [[IF_ELSE:%.*]] ; CHECK: if.else: ; CHECK-NEXT: [[CMP2:%.*]] = icmp ult i64 [[INDVARS_IV]], 991232 ; CHECK-NEXT: tail call void @llvm.assume(i1 [[CMP2]]) ; CHECK-NEXT: br label [[IF_END5]] ; CHECK: if.end5: ; CHECK-NEXT: [[X_0:%.*]] = phi float [ 4.200000e+01, [[IF_ELSE]] ], [ 2.300000e+01, [[FOR_BODY]] ] ; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, float* [[A]], i64 [[INDVARS_IV]] ; CHECK-NEXT: [[TMP26:%.*]] = load float, float* [[ARRAYIDX]], align 4 ; CHECK-NEXT: [[MUL:%.*]] = fmul float [[X_0]], [[TMP26]] ; CHECK-NEXT: [[ARRAYIDX7:%.*]] = getelementptr inbounds float, float* [[B]], i64 [[INDVARS_IV]] ; CHECK-NEXT: store float [[MUL]], float* [[ARRAYIDX7]], align 4 ; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1 ; CHECK-NEXT: [[CMP:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], [[TMP0]] ; CHECK-NEXT: br i1 [[CMP]], label [[FOR_COND_CLEANUP_LOOPEXIT]], label [[FOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]] ; entry: %cmp15 = icmp eq i32 %n, 0 br i1 %cmp15, label %for.cond.cleanup, label %for.body.preheader for.body.preheader: ; preds = %entry %0 = zext i32 %n to i64 br label %for.body for.cond.cleanup.loopexit: ; preds = %if.end5 br label %for.cond.cleanup for.cond.cleanup: ; preds = %for.cond.cleanup.loopexit, %entry ret void for.body: ; preds = %for.body.preheader, %if.end5 %indvars.iv = phi i64 [ 0, %for.body.preheader ], [ %indvars.iv.next, %if.end5 ] %cmp1 = icmp ult i64 %indvars.iv, 495616 br i1 %cmp1, label %if.end5, label %if.else if.else: ; preds = %for.body %cmp2 = icmp ult i64 %indvars.iv, 991232 tail call void @llvm.assume(i1 %cmp2) br label %if.end5 if.end5: ; preds = %for.body, %if.else %x.0 = phi float [ 4.200000e+01, %if.else ], [ 2.300000e+01, %for.body ] %arrayidx = getelementptr inbounds float, float* %a, i64 %indvars.iv %1 = load float, float* %arrayidx, align 4 %mul = fmul float %x.0, %1 %arrayidx7 = getelementptr inbounds float, float* %b, i64 %indvars.iv store float %mul, float* %arrayidx7, align 4 %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1 %cmp = icmp eq i64 %indvars.iv.next, %0 br i1 %cmp, label %for.cond.cleanup.loopexit, label %for.body }