diff --git a/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp b/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp index 7943f58f0739..79ddb8bf0b09 100644 --- a/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp +++ b/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp @@ -1632,11 +1632,6 @@ static void licm(VPlan &Plan) { void VPlanTransforms::truncateToMinimalBitwidths( VPlan &Plan, const MapVector &MinBWs) { -#ifndef NDEBUG - // Count the processed recipes and cross check the count later with MinBWs - // size, to make sure all entries in MinBWs have been handled. - unsigned NumProcessedRecipes = 0; -#endif // Keep track of created truncates, so they can be re-used. Note that we // cannot use RAUW after creating a new truncate, as this would could make // other uses have different types for their operands, making them invalidly @@ -1659,38 +1654,12 @@ void VPlanTransforms::truncateToMinimalBitwidths( if (!NewResSizeInBits) continue; -#ifndef NDEBUG - NumProcessedRecipes++; -#endif // If the value wasn't vectorized, we must maintain the original scalar // type. Skip those here, after incrementing NumProcessedRecipes. Also // skip casts which do not need to be handled explicitly here, as // redundant casts will be removed during recipe simplification. - if (isa(&R)) { -#ifndef NDEBUG - // If any of the operands is a live-in and not used by VPWidenRecipe or - // VPWidenSelectRecipe, but in MinBWs, make sure it is counted as - // processed as well. When MinBWs is currently constructed, there is no - // information about whether recipes are widened or replicated and in - // case they are reciplicated the operands are not truncated. Counting - // them them here ensures we do not miss any recipes in MinBWs. - // TODO: Remove once the analysis is done on VPlan. - for (VPValue *Op : R.operands()) { - if (!Op->isLiveIn()) - continue; - auto *UV = dyn_cast_or_null(Op->getUnderlyingValue()); - if (UV && MinBWs.contains(UV) && !ProcessedTruncs.contains(Op) && - none_of(Op->users(), - IsaPred)) { - // Add an entry to ProcessedTruncs to avoid counting the same - // operand multiple times. - ProcessedTruncs[Op] = nullptr; - NumProcessedRecipes += 1; - } - } -#endif + if (isa(&R)) continue; - } Type *OldResTy = TypeInfo.inferScalarType(ResultVPV); unsigned OldResSizeInBits = OldResTy->getScalarSizeInBits(); @@ -1749,19 +1718,11 @@ void VPlanTransforms::truncateToMinimalBitwidths( NewOp->insertBefore(&R); } else { PH->appendRecipe(NewOp); -#ifndef NDEBUG - auto *OpInst = dyn_cast(Op->getLiveInIRValue()); - bool IsContained = MinBWs.contains(OpInst); - NumProcessedRecipes += IsContained; -#endif } } } } - - assert(MinBWs.size() == NumProcessedRecipes && - "some entries in MinBWs haven't been processed"); } /// Remove BranchOnCond recipes with true conditions together with removing diff --git a/llvm/test/Transforms/LoopVectorize/pr125278.ll b/llvm/test/Transforms/LoopVectorize/pr125278.ll new file mode 100644 index 000000000000..2dc657ca447a --- /dev/null +++ b/llvm/test/Transforms/LoopVectorize/pr125278.ll @@ -0,0 +1,58 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --check-globals none --version 5 +; RUN: opt -passes=loop-vectorize -force-vector-width=4 -S %s | FileCheck %s + +define void @pr125278(ptr %dst, i64 %n) { +; CHECK-LABEL: define void @pr125278( +; CHECK-SAME: ptr [[DST:%.*]], i64 [[N:%.*]]) { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: [[TRUE_EXT:%.*]] = zext i1 true to i32 +; CHECK-NEXT: [[UMAX:%.*]] = call i64 @llvm.umax.i64(i64 [[N]], i64 1) +; CHECK-NEXT: br label %[[COND:.*]] +; CHECK: [[COND_LOOPEXIT:.*]]: +; CHECK-NEXT: br label %[[COND]] +; CHECK: [[COND]]: +; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[UMAX]], 4 +; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] +; CHECK: [[VECTOR_PH]]: +; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[UMAX]], 4 +; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[UMAX]], [[N_MOD_VF]] +; CHECK-NEXT: br label %[[VECTOR_BODY:.*]] +; CHECK: [[VECTOR_BODY]]: +; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: store i8 1, ptr [[DST]], align 1 +; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 +; CHECK-NEXT: [[TMP0:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] +; CHECK-NEXT: br i1 [[TMP0]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] +; CHECK: [[MIDDLE_BLOCK]]: +; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[UMAX]], [[N_VEC]] +; CHECK-NEXT: br i1 [[CMP_N]], label %[[COND_LOOPEXIT]], label %[[SCALAR_PH]] +; CHECK: [[SCALAR_PH]]: +; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[COND]] ] +; CHECK-NEXT: br label %[[LOOP:.*]] +; CHECK: [[LOOP]]: +; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] +; CHECK-NEXT: [[FALSE_EXT:%.*]] = zext i1 false to i32 +; CHECK-NEXT: [[XOR:%.*]] = xor i32 [[FALSE_EXT]], [[TRUE_EXT]] +; CHECK-NEXT: [[XOR_TRUNC:%.*]] = trunc i32 [[XOR]] to i8 +; CHECK-NEXT: store i8 [[XOR_TRUNC]], ptr [[DST]], align 1 +; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1 +; CHECK-NEXT: [[CMP:%.*]] = icmp ult i64 [[IV_NEXT]], [[N]] +; CHECK-NEXT: br i1 [[CMP]], label %[[LOOP]], label %[[COND_LOOPEXIT]], !llvm.loop [[LOOP3:![0-9]+]] +; +entry: + %true.ext = zext i1 true to i32 + br label %cond + +cond: + br label %loop + +loop: + %iv = phi i64 [ 0, %cond ], [ %iv.next, %loop ] + %false.ext = zext i1 false to i32 + %xor = xor i32 %false.ext, %true.ext + %xor.trunc = trunc i32 %xor to i8 + store i8 %xor.trunc, ptr %dst, align 1 + %iv.next = add i64 %iv, 1 + %cmp = icmp ult i64 %iv.next, %n + br i1 %cmp, label %loop, label %cond +}