; NOTE: Assertions have been autogenerated by utils/update_test_checks.py ; RUN: opt < %s -loop-vectorize -mtriple=x86_64-apple-macosx10.8.0 -mcpu=corei7-avx -S | FileCheck %s target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128" target triple = "x86_64-apple-macosx10.8.0" define i32 @conversion_cost1(i32 %n, i8* nocapture %A, float* nocapture %B) nounwind uwtable ssp { ; CHECK-LABEL: @conversion_cost1( ; CHECK-NEXT: [[TMP1:%.*]] = icmp sgt i32 [[N:%.*]], 3 ; CHECK-NEXT: br i1 [[TMP1]], label [[DOTLR_PH_PREHEADER:%.*]], label [[DOT_CRIT_EDGE:%.*]] ; CHECK: .lr.ph.preheader: ; CHECK-NEXT: [[TMP2:%.*]] = add i32 [[N]], -4 ; CHECK-NEXT: [[TMP3:%.*]] = zext i32 [[TMP2]] to i64 ; CHECK-NEXT: [[TMP4:%.*]] = add nuw nsw i64 [[TMP3]], 1 ; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[TMP4]], 32 ; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] ; CHECK: vector.ph: ; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[TMP4]], 32 ; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[TMP4]], [[N_MOD_VF]] ; CHECK-NEXT: [[IND_END:%.*]] = add i64 3, [[N_VEC]] ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK: vector.body: ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] ; CHECK-NEXT: [[VEC_IND:%.*]] = phi <32 x i64> [ , [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ] ; CHECK-NEXT: [[VEC_IND1:%.*]] = phi <32 x i8> [ , [[VECTOR_PH]] ], [ [[VEC_IND_NEXT2:%.*]], [[VECTOR_BODY]] ] ; CHECK-NEXT: [[OFFSET_IDX:%.*]] = add i64 3, [[INDEX]] ; CHECK-NEXT: [[TMP5:%.*]] = add i64 [[OFFSET_IDX]], 0 ; CHECK-NEXT: [[TMP6:%.*]] = add i64 [[OFFSET_IDX]], 1 ; CHECK-NEXT: [[TMP7:%.*]] = add i64 [[OFFSET_IDX]], 2 ; CHECK-NEXT: [[TMP8:%.*]] = add i64 [[OFFSET_IDX]], 3 ; CHECK-NEXT: [[TMP9:%.*]] = add i64 [[OFFSET_IDX]], 4 ; CHECK-NEXT: [[TMP10:%.*]] = add i64 [[OFFSET_IDX]], 5 ; CHECK-NEXT: [[TMP11:%.*]] = add i64 [[OFFSET_IDX]], 6 ; CHECK-NEXT: [[TMP12:%.*]] = add i64 [[OFFSET_IDX]], 7 ; CHECK-NEXT: [[TMP13:%.*]] = add i64 [[OFFSET_IDX]], 8 ; CHECK-NEXT: [[TMP14:%.*]] = add i64 [[OFFSET_IDX]], 9 ; CHECK-NEXT: [[TMP15:%.*]] = add i64 [[OFFSET_IDX]], 10 ; CHECK-NEXT: [[TMP16:%.*]] = add i64 [[OFFSET_IDX]], 11 ; CHECK-NEXT: [[TMP17:%.*]] = add i64 [[OFFSET_IDX]], 12 ; CHECK-NEXT: [[TMP18:%.*]] = add i64 [[OFFSET_IDX]], 13 ; CHECK-NEXT: [[TMP19:%.*]] = add i64 [[OFFSET_IDX]], 14 ; CHECK-NEXT: [[TMP20:%.*]] = add i64 [[OFFSET_IDX]], 15 ; CHECK-NEXT: [[TMP21:%.*]] = add i64 [[OFFSET_IDX]], 16 ; CHECK-NEXT: [[TMP22:%.*]] = add i64 [[OFFSET_IDX]], 17 ; CHECK-NEXT: [[TMP23:%.*]] = add i64 [[OFFSET_IDX]], 18 ; CHECK-NEXT: [[TMP24:%.*]] = add i64 [[OFFSET_IDX]], 19 ; CHECK-NEXT: [[TMP25:%.*]] = add i64 [[OFFSET_IDX]], 20 ; CHECK-NEXT: [[TMP26:%.*]] = add i64 [[OFFSET_IDX]], 21 ; CHECK-NEXT: [[TMP27:%.*]] = add i64 [[OFFSET_IDX]], 22 ; CHECK-NEXT: [[TMP28:%.*]] = add i64 [[OFFSET_IDX]], 23 ; CHECK-NEXT: [[TMP29:%.*]] = add i64 [[OFFSET_IDX]], 24 ; CHECK-NEXT: [[TMP30:%.*]] = add i64 [[OFFSET_IDX]], 25 ; CHECK-NEXT: [[TMP31:%.*]] = add i64 [[OFFSET_IDX]], 26 ; CHECK-NEXT: [[TMP32:%.*]] = add i64 [[OFFSET_IDX]], 27 ; CHECK-NEXT: [[TMP33:%.*]] = add i64 [[OFFSET_IDX]], 28 ; CHECK-NEXT: [[TMP34:%.*]] = add i64 [[OFFSET_IDX]], 29 ; CHECK-NEXT: [[TMP35:%.*]] = add i64 [[OFFSET_IDX]], 30 ; CHECK-NEXT: [[TMP36:%.*]] = add i64 [[OFFSET_IDX]], 31 ; CHECK-NEXT: [[TMP37:%.*]] = getelementptr inbounds i8, i8* [[A:%.*]], i64 [[TMP5]] ; CHECK-NEXT: [[TMP38:%.*]] = getelementptr inbounds i8, i8* [[TMP37]], i32 0 ; CHECK-NEXT: [[TMP39:%.*]] = bitcast i8* [[TMP38]] to <32 x i8>* ; CHECK-NEXT: store <32 x i8> [[VEC_IND1]], <32 x i8>* [[TMP39]], align 1 ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 32 ; CHECK-NEXT: [[VEC_IND_NEXT]] = add <32 x i64> [[VEC_IND]], ; CHECK-NEXT: [[VEC_IND_NEXT2]] = add <32 x i8> [[VEC_IND1]], ; CHECK-NEXT: [[TMP40:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; CHECK-NEXT: br i1 [[TMP40]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[TMP4]], [[N_VEC]] ; CHECK-NEXT: br i1 [[CMP_N]], label [[DOT_CRIT_EDGE_LOOPEXIT:%.*]], label [[SCALAR_PH]] ; CHECK: scalar.ph: ; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[IND_END]], [[MIDDLE_BLOCK]] ], [ 3, [[DOTLR_PH_PREHEADER]] ] ; CHECK-NEXT: br label [[DOTLR_PH:%.*]] ; CHECK: .lr.ph: ; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[INDVARS_IV_NEXT:%.*]], [[DOTLR_PH]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ] ; CHECK-NEXT: [[TMP41:%.*]] = trunc i64 [[INDVARS_IV]] to i8 ; CHECK-NEXT: [[TMP42:%.*]] = getelementptr inbounds i8, i8* [[A]], i64 [[INDVARS_IV]] ; CHECK-NEXT: store i8 [[TMP41]], i8* [[TMP42]], align 1 ; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add i64 [[INDVARS_IV]], 1 ; CHECK-NEXT: [[LFTR_WIDEIV:%.*]] = trunc i64 [[INDVARS_IV_NEXT]] to i32 ; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[LFTR_WIDEIV]], [[N]] ; CHECK-NEXT: br i1 [[EXITCOND]], label [[DOT_CRIT_EDGE_LOOPEXIT]], label [[DOTLR_PH]], !llvm.loop [[LOOP2:![0-9]+]] ; CHECK: ._crit_edge.loopexit: ; CHECK-NEXT: br label [[DOT_CRIT_EDGE]] ; CHECK: ._crit_edge: ; CHECK-NEXT: ret i32 undef ; %1 = icmp sgt i32 %n, 3 br i1 %1, label %.lr.ph, label %._crit_edge .lr.ph: ; preds = %0, %.lr.ph %indvars.iv = phi i64 [ %indvars.iv.next, %.lr.ph ], [ 3, %0 ] %2 = trunc i64 %indvars.iv to i8 %3 = getelementptr inbounds i8, i8* %A, i64 %indvars.iv store i8 %2, i8* %3, align 1 %indvars.iv.next = add i64 %indvars.iv, 1 %lftr.wideiv = trunc i64 %indvars.iv.next to i32 %exitcond = icmp eq i32 %lftr.wideiv, %n br i1 %exitcond, label %._crit_edge, label %.lr.ph ._crit_edge: ; preds = %.lr.ph, %0 ret i32 undef } define i32 @conversion_cost2(i32 %n, i8* nocapture %A, float* nocapture %B) nounwind uwtable ssp { ; CHECK-LABEL: @conversion_cost2( ; CHECK-NEXT: [[TMP1:%.*]] = icmp sgt i32 [[N:%.*]], 9 ; CHECK-NEXT: br i1 [[TMP1]], label [[DOTLR_PH_PREHEADER:%.*]], label [[DOT_CRIT_EDGE:%.*]] ; CHECK: .lr.ph.preheader: ; CHECK-NEXT: [[TMP2:%.*]] = add i32 [[N]], -10 ; CHECK-NEXT: [[TMP3:%.*]] = zext i32 [[TMP2]] to i64 ; CHECK-NEXT: [[TMP4:%.*]] = add nuw nsw i64 [[TMP3]], 1 ; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[TMP4]], 8 ; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] ; CHECK: vector.ph: ; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[TMP4]], 8 ; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[TMP4]], [[N_MOD_VF]] ; CHECK-NEXT: [[IND_END:%.*]] = add i64 9, [[N_VEC]] ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK: vector.body: ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] ; CHECK-NEXT: [[VEC_IND:%.*]] = phi <2 x i64> [ , [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ] ; CHECK-NEXT: [[STEP_ADD:%.*]] = add <2 x i64> [[VEC_IND]], ; CHECK-NEXT: [[STEP_ADD1:%.*]] = add <2 x i64> [[STEP_ADD]], ; CHECK-NEXT: [[STEP_ADD2:%.*]] = add <2 x i64> [[STEP_ADD1]], ; CHECK-NEXT: [[OFFSET_IDX:%.*]] = add i64 9, [[INDEX]] ; CHECK-NEXT: [[TMP5:%.*]] = add i64 [[OFFSET_IDX]], 0 ; CHECK-NEXT: [[TMP6:%.*]] = add i64 [[OFFSET_IDX]], 1 ; CHECK-NEXT: [[TMP7:%.*]] = add i64 [[OFFSET_IDX]], 2 ; CHECK-NEXT: [[TMP8:%.*]] = add i64 [[OFFSET_IDX]], 3 ; CHECK-NEXT: [[TMP9:%.*]] = add i64 [[OFFSET_IDX]], 4 ; CHECK-NEXT: [[TMP10:%.*]] = add i64 [[OFFSET_IDX]], 5 ; CHECK-NEXT: [[TMP11:%.*]] = add i64 [[OFFSET_IDX]], 6 ; CHECK-NEXT: [[TMP12:%.*]] = add i64 [[OFFSET_IDX]], 7 ; CHECK-NEXT: [[TMP13:%.*]] = add nsw <2 x i64> [[VEC_IND]], ; CHECK-NEXT: [[TMP14:%.*]] = add nsw <2 x i64> [[STEP_ADD]], ; CHECK-NEXT: [[TMP15:%.*]] = add nsw <2 x i64> [[STEP_ADD1]], ; CHECK-NEXT: [[TMP16:%.*]] = add nsw <2 x i64> [[STEP_ADD2]], ; CHECK-NEXT: [[TMP17:%.*]] = sitofp <2 x i64> [[TMP13]] to <2 x float> ; CHECK-NEXT: [[TMP18:%.*]] = sitofp <2 x i64> [[TMP14]] to <2 x float> ; CHECK-NEXT: [[TMP19:%.*]] = sitofp <2 x i64> [[TMP15]] to <2 x float> ; CHECK-NEXT: [[TMP20:%.*]] = sitofp <2 x i64> [[TMP16]] to <2 x float> ; CHECK-NEXT: [[TMP21:%.*]] = getelementptr inbounds float, float* [[B:%.*]], i64 [[TMP5]] ; CHECK-NEXT: [[TMP22:%.*]] = getelementptr inbounds float, float* [[B]], i64 [[TMP7]] ; CHECK-NEXT: [[TMP23:%.*]] = getelementptr inbounds float, float* [[B]], i64 [[TMP9]] ; CHECK-NEXT: [[TMP24:%.*]] = getelementptr inbounds float, float* [[B]], i64 [[TMP11]] ; CHECK-NEXT: [[TMP25:%.*]] = getelementptr inbounds float, float* [[TMP21]], i32 0 ; CHECK-NEXT: [[TMP26:%.*]] = bitcast float* [[TMP25]] to <2 x float>* ; CHECK-NEXT: store <2 x float> [[TMP17]], <2 x float>* [[TMP26]], align 4 ; CHECK-NEXT: [[TMP27:%.*]] = getelementptr inbounds float, float* [[TMP21]], i32 2 ; CHECK-NEXT: [[TMP28:%.*]] = bitcast float* [[TMP27]] to <2 x float>* ; CHECK-NEXT: store <2 x float> [[TMP18]], <2 x float>* [[TMP28]], align 4 ; CHECK-NEXT: [[TMP29:%.*]] = getelementptr inbounds float, float* [[TMP21]], i32 4 ; CHECK-NEXT: [[TMP30:%.*]] = bitcast float* [[TMP29]] to <2 x float>* ; CHECK-NEXT: store <2 x float> [[TMP19]], <2 x float>* [[TMP30]], align 4 ; CHECK-NEXT: [[TMP31:%.*]] = getelementptr inbounds float, float* [[TMP21]], i32 6 ; CHECK-NEXT: [[TMP32:%.*]] = bitcast float* [[TMP31]] to <2 x float>* ; CHECK-NEXT: store <2 x float> [[TMP20]], <2 x float>* [[TMP32]], align 4 ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 8 ; CHECK-NEXT: [[VEC_IND_NEXT]] = add <2 x i64> [[STEP_ADD2]], ; CHECK-NEXT: [[TMP33:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; CHECK-NEXT: br i1 [[TMP33]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[TMP4]], [[N_VEC]] ; CHECK-NEXT: br i1 [[CMP_N]], label [[DOT_CRIT_EDGE_LOOPEXIT:%.*]], label [[SCALAR_PH]] ; CHECK: scalar.ph: ; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[IND_END]], [[MIDDLE_BLOCK]] ], [ 9, [[DOTLR_PH_PREHEADER]] ] ; CHECK-NEXT: br label [[DOTLR_PH:%.*]] ; CHECK: .lr.ph: ; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[INDVARS_IV_NEXT:%.*]], [[DOTLR_PH]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ] ; CHECK-NEXT: [[ADD:%.*]] = add nsw i64 [[INDVARS_IV]], 3 ; CHECK-NEXT: [[TOFP:%.*]] = sitofp i64 [[ADD]] to float ; CHECK-NEXT: [[GEP:%.*]] = getelementptr inbounds float, float* [[B]], i64 [[INDVARS_IV]] ; CHECK-NEXT: store float [[TOFP]], float* [[GEP]], align 4 ; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add i64 [[INDVARS_IV]], 1 ; CHECK-NEXT: [[LFTR_WIDEIV:%.*]] = trunc i64 [[INDVARS_IV_NEXT]] to i32 ; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[LFTR_WIDEIV]], [[N]] ; CHECK-NEXT: br i1 [[EXITCOND]], label [[DOT_CRIT_EDGE_LOOPEXIT]], label [[DOTLR_PH]], !llvm.loop [[LOOP5:![0-9]+]] ; CHECK: ._crit_edge.loopexit: ; CHECK-NEXT: br label [[DOT_CRIT_EDGE]] ; CHECK: ._crit_edge: ; CHECK-NEXT: ret i32 undef ; %1 = icmp sgt i32 %n, 9 br i1 %1, label %.lr.ph, label %._crit_edge .lr.ph: ; preds = %0, %.lr.ph %indvars.iv = phi i64 [ %indvars.iv.next, %.lr.ph ], [ 9, %0 ] %add = add nsw i64 %indvars.iv, 3 %tofp = sitofp i64 %add to float %gep = getelementptr inbounds float, float* %B, i64 %indvars.iv store float %tofp, float* %gep, align 4 %indvars.iv.next = add i64 %indvars.iv, 1 %lftr.wideiv = trunc i64 %indvars.iv.next to i32 %exitcond = icmp eq i32 %lftr.wideiv, %n br i1 %exitcond, label %._crit_edge, label %.lr.ph ._crit_edge: ; preds = %.lr.ph, %0 ret i32 undef }