diff --git a/llvm/include/llvm/Analysis/ScalarEvolutionPatternMatch.h b/llvm/include/llvm/Analysis/ScalarEvolutionPatternMatch.h index cfb1b4c6ea6b..8e9d7e0b7214 100644 --- a/llvm/include/llvm/Analysis/ScalarEvolutionPatternMatch.h +++ b/llvm/include/llvm/Analysis/ScalarEvolutionPatternMatch.h @@ -196,11 +196,48 @@ m_scev_UDiv(const Op0_t &Op0, const Op1_t &Op1) { return m_scev_Binary(Op0, Op1); } +inline class_match m_Loop() { return class_match(); } + +/// Match an affine SCEVAddRecExpr. +template +struct SCEVAffineAddRec_match { + SCEVBinaryExpr_match Ops; + Loop_t Loop; + + SCEVAffineAddRec_match(Op0_t Op0, Op1_t Op1, Loop_t Loop) + : Ops(Op0, Op1), Loop(Loop) {} + + bool match(const SCEV *S) const { + return Ops.match(S) && Loop.match(cast(S)->getLoop()); + } +}; + +/// Match a specified const Loop*. +struct specificloop_ty { + const Loop *L; + + specificloop_ty(const Loop *L) : L(L) {} + + bool match(const Loop *L) const { return L == this->L; } +}; + +inline specificloop_ty m_SpecificLoop(const Loop *L) { return L; } + +inline bind_ty m_Loop(const Loop *&L) { return L; } + template -inline SCEVBinaryExpr_match +inline SCEVAffineAddRec_match> m_scev_AffineAddRec(const Op0_t &Op0, const Op1_t &Op1) { - return m_scev_Binary(Op0, Op1); + return SCEVAffineAddRec_match>( + Op0, Op1, m_Loop()); } + +template +inline SCEVAffineAddRec_match +m_scev_AffineAddRec(const Op0_t &Op0, const Op1_t &Op1, const Loop_t &L) { + return SCEVAffineAddRec_match(Op0, Op1, L); +} + } // namespace SCEVPatternMatch } // namespace llvm diff --git a/llvm/lib/Analysis/ScalarEvolution.cpp b/llvm/lib/Analysis/ScalarEvolution.cpp index 0d6447cc2761..4bd5a4c3ab75 100644 --- a/llvm/lib/Analysis/ScalarEvolution.cpp +++ b/llvm/lib/Analysis/ScalarEvolution.cpp @@ -12530,14 +12530,14 @@ static bool IsKnownPredicateViaAddRecStart(ScalarEvolution &SE, return false; const SCEV *LStart, *RStart, *Step; - if (!match(LHS, m_scev_AffineAddRec(m_SCEV(LStart), m_SCEV(Step))) || - !match(RHS, m_scev_AffineAddRec(m_SCEV(RStart), m_scev_Specific(Step)))) + const Loop *L; + if (!match(LHS, + m_scev_AffineAddRec(m_SCEV(LStart), m_SCEV(Step), m_Loop(L))) || + !match(RHS, m_scev_AffineAddRec(m_SCEV(RStart), m_scev_Specific(Step), + m_SpecificLoop(L)))) return false; const SCEVAddRecExpr *LAR = cast(LHS); const SCEVAddRecExpr *RAR = cast(RHS); - if (LAR->getLoop() != RAR->getLoop()) - return false; - SCEV::NoWrapFlags NW = ICmpInst::isSigned(Pred) ? SCEV::FlagNSW : SCEV::FlagNUW; if (!LAR->getNoWrapFlags(NW) || !RAR->getNoWrapFlags(NW)) diff --git a/llvm/lib/Transforms/Scalar/IndVarSimplify.cpp b/llvm/lib/Transforms/Scalar/IndVarSimplify.cpp index e774e5fd99cb..95d52b9b4e18 100644 --- a/llvm/lib/Transforms/Scalar/IndVarSimplify.cpp +++ b/llvm/lib/Transforms/Scalar/IndVarSimplify.cpp @@ -808,8 +808,7 @@ static bool isLoopCounter(PHINode* Phi, Loop *L, return false; const SCEV *S = SE->getSCEV(Phi); - if (!match(S, m_scev_AffineAddRec(m_SCEV(), m_scev_One())) || - cast(S)->getLoop() != L) + if (!match(S, m_scev_AffineAddRec(m_SCEV(), m_scev_One(), m_SpecificLoop(L)))) return false; int LatchIdx = Phi->getBasicBlockIndex(L->getLoopLatch()); diff --git a/llvm/lib/Transforms/Scalar/LoopStrengthReduce.cpp b/llvm/lib/Transforms/Scalar/LoopStrengthReduce.cpp index a38c40c54b67..6c18bc9f7729 100644 --- a/llvm/lib/Transforms/Scalar/LoopStrengthReduce.cpp +++ b/llvm/lib/Transforms/Scalar/LoopStrengthReduce.cpp @@ -556,13 +556,14 @@ static void DoInitialMatch(const SCEV *S, Loop *L, // Look at addrec operands. const SCEV *Start, *Step; - if (match(S, m_scev_AffineAddRec(m_SCEV(Start), m_SCEV(Step))) && + const Loop *ARLoop; + if (match(S, + m_scev_AffineAddRec(m_SCEV(Start), m_SCEV(Step), m_Loop(ARLoop))) && !Start->isZero()) { DoInitialMatch(Start, L, Good, Bad, SE); DoInitialMatch(SE.getAddRecExpr(SE.getConstant(S->getType(), 0), Step, // FIXME: AR->getNoWrapFlags() - cast(S)->getLoop(), - SCEV::FlagAnyWrap), + ARLoop, SCEV::FlagAnyWrap), L, Good, Bad, SE); return; }