Add recursive decomposition reasoning to isKnownNonEqual

The basic idea is that by looking through operand instructions which don't change the equality result that we can push the existing known bits comparison down past instructions which would obscure them.

We have analogous handling in InstSimplify for most - though weirdly not all - of these cases starting from an icmp root. It's a bit unfortunate to duplicate logic, but since my actual goal is to extend BasicAA, the icmp logic doesn't help. (And just makes it hard to test here.)  The BasicAA change will be posted separately for review.

Differential Revision: https://reviews.llvm.org/D92698
This commit is contained in:
Philip Reames
2020-12-05 15:57:09 -08:00
parent 109e70d357
commit 8f076291be
2 changed files with 42 additions and 18 deletions

View File

@@ -350,13 +350,14 @@ bool llvm::isKnownNegative(const Value *V, const DataLayout &DL, unsigned Depth,
return Known.isNegative();
}
static bool isKnownNonEqual(const Value *V1, const Value *V2, const Query &Q);
static bool isKnownNonEqual(const Value *V1, const Value *V2, unsigned Depth,
const Query &Q);
bool llvm::isKnownNonEqual(const Value *V1, const Value *V2,
const DataLayout &DL, AssumptionCache *AC,
const Instruction *CxtI, const DominatorTree *DT,
bool UseInstrInfo) {
return ::isKnownNonEqual(V1, V2,
return ::isKnownNonEqual(V1, V2, 0,
Query(DL, AC, safeCxtI(V1, safeCxtI(V2, CxtI)), DT,
UseInstrInfo, /*ORE=*/nullptr));
}
@@ -2486,7 +2487,8 @@ bool isKnownNonZero(const Value* V, unsigned Depth, const Query& Q) {
}
/// Return true if V2 == V1 + X, where X is known non-zero.
static bool isAddOfNonZero(const Value *V1, const Value *V2, const Query &Q) {
static bool isAddOfNonZero(const Value *V1, const Value *V2, unsigned Depth,
const Query &Q) {
const BinaryOperator *BO = dyn_cast<BinaryOperator>(V1);
if (!BO || BO->getOpcode() != Instruction::Add)
return false;
@@ -2497,24 +2499,54 @@ static bool isAddOfNonZero(const Value *V1, const Value *V2, const Query &Q) {
Op = BO->getOperand(0);
else
return false;
return isKnownNonZero(Op, 0, Q);
return isKnownNonZero(Op, Depth + 1, Q);
}
/// Return true if it is known that V1 != V2.
static bool isKnownNonEqual(const Value *V1, const Value *V2, const Query &Q) {
static bool isKnownNonEqual(const Value *V1, const Value *V2, unsigned Depth,
const Query &Q) {
if (V1 == V2)
return false;
if (V1->getType() != V2->getType())
// We can't look through casts yet.
return false;
if (isAddOfNonZero(V1, V2, Q) || isAddOfNonZero(V2, V1, Q))
if (Depth >= MaxAnalysisRecursionDepth)
return false;
// See if we can recurse through (exactly one of) our operands.
auto *O1 = dyn_cast<Operator>(V1);
auto *O2 = dyn_cast<Operator>(V2);
if (O1 && O2 && O1->getOpcode() == O2->getOpcode()) {
switch (O1->getOpcode()) {
default: break;
case Instruction::Add:
case Instruction::Sub:
// Assume operand order has been canonicalized
if (O1->getOperand(0) == O2->getOperand(0))
return isKnownNonEqual(O1->getOperand(1), O2->getOperand(1),
Depth + 1, Q);
if (O1->getOperand(1) == O2->getOperand(1))
return isKnownNonEqual(O1->getOperand(0), O2->getOperand(0),
Depth + 1, Q);
break;
case Instruction::SExt:
case Instruction::ZExt:
if (O1->getOperand(0)->getType() == O2->getOperand(0)->getType())
return isKnownNonEqual(O1->getOperand(0), O2->getOperand(0),
Depth + 1, Q);
break;
};
}
if (isAddOfNonZero(V1, V2, Depth, Q) || isAddOfNonZero(V2, V1, Depth, Q))
return true;
if (V1->getType()->isIntOrIntVectorTy()) {
// Are any known bits in V1 contradictory to known bits in V2? If V1
// has a known zero where V2 has a known one, they must not be equal.
KnownBits Known1 = computeKnownBits(V1, 0, Q);
KnownBits Known2 = computeKnownBits(V2, 0, Q);
KnownBits Known1 = computeKnownBits(V1, Depth, Q);
KnownBits Known2 = computeKnownBits(V2, Depth, Q);
if (Known1.Zero.intersects(Known2.One) ||
Known2.Zero.intersects(Known1.One))

View File

@@ -108,11 +108,7 @@ define i1 @add2(i8 %B, i8 %C) {
define i1 @sub1(i8 %B, i8 %C) {
; CHECK-LABEL: @sub1(
; CHECK-NEXT: [[A:%.*]] = add i8 [[B:%.*]], 1
; CHECK-NEXT: [[A_OP:%.*]] = sub i8 [[A]], [[C:%.*]]
; CHECK-NEXT: [[B_OP:%.*]] = sub i8 [[B]], [[C]]
; CHECK-NEXT: [[CMP:%.*]] = icmp eq i8 [[A_OP]], [[B_OP]]
; CHECK-NEXT: ret i1 [[CMP]]
; CHECK-NEXT: ret i1 false
;
%A = add i8 %B, 1
%A.op = sub i8 %A, %C
@@ -124,11 +120,7 @@ define i1 @sub1(i8 %B, i8 %C) {
define i1 @sub2(i8 %B, i8 %C) {
; CHECK-LABEL: @sub2(
; CHECK-NEXT: [[A:%.*]] = add i8 [[B:%.*]], 1
; CHECK-NEXT: [[A_OP:%.*]] = sub i8 [[C:%.*]], [[A]]
; CHECK-NEXT: [[B_OP:%.*]] = sub i8 [[C]], [[B]]
; CHECK-NEXT: [[CMP:%.*]] = icmp eq i8 [[A_OP]], [[B_OP]]
; CHECK-NEXT: ret i1 [[CMP]]
; CHECK-NEXT: ret i1 false
;
%A = add i8 %B, 1
%A.op = sub i8 %C, %A