The previous heuristic rejected a PHI if one of its user was an unbreakable PHI, no matter what the other users were. This worked well in most cases, but there's one case in rocRAND where it doesn't work. In that case, a PHI node has 2 PHI users where one is breakable but not the other. When that PHI node isn't broken performance falls by 35%. Relaxing the restriction to "require that half of the PHI node users are breakable" fixes the issue, and seems like a sensible change. Solves SWDEV-409648, SWDEV-398393 Reviewed By: #amdgpu, arsenm Differential Revision: https://reviews.llvm.org/D155184
1878 lines
63 KiB
C++
1878 lines
63 KiB
C++
//===-- AMDGPUCodeGenPrepare.cpp ------------------------------------------===//
|
|
//
|
|
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
|
// See https://llvm.org/LICENSE.txt for license information.
|
|
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
|
//
|
|
//===----------------------------------------------------------------------===//
|
|
//
|
|
/// \file
|
|
/// This pass does misc. AMDGPU optimizations on IR before instruction
|
|
/// selection.
|
|
//
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
#include "AMDGPU.h"
|
|
#include "AMDGPUTargetMachine.h"
|
|
#include "SIModeRegisterDefaults.h"
|
|
#include "llvm/Analysis/AssumptionCache.h"
|
|
#include "llvm/Analysis/ConstantFolding.h"
|
|
#include "llvm/Analysis/TargetLibraryInfo.h"
|
|
#include "llvm/Analysis/UniformityAnalysis.h"
|
|
#include "llvm/Analysis/ValueTracking.h"
|
|
#include "llvm/CodeGen/TargetPassConfig.h"
|
|
#include "llvm/IR/Dominators.h"
|
|
#include "llvm/IR/IRBuilder.h"
|
|
#include "llvm/IR/InstVisitor.h"
|
|
#include "llvm/IR/IntrinsicsAMDGPU.h"
|
|
#include "llvm/IR/PatternMatch.h"
|
|
#include "llvm/InitializePasses.h"
|
|
#include "llvm/Pass.h"
|
|
#include "llvm/Support/KnownBits.h"
|
|
#include "llvm/Transforms/Utils/IntegerDivision.h"
|
|
#include "llvm/Transforms/Utils/Local.h"
|
|
|
|
#define DEBUG_TYPE "amdgpu-codegenprepare"
|
|
|
|
using namespace llvm;
|
|
using namespace llvm::PatternMatch;
|
|
|
|
namespace {
|
|
|
|
static cl::opt<bool> WidenLoads(
|
|
"amdgpu-codegenprepare-widen-constant-loads",
|
|
cl::desc("Widen sub-dword constant address space loads in AMDGPUCodeGenPrepare"),
|
|
cl::ReallyHidden,
|
|
cl::init(false));
|
|
|
|
static cl::opt<bool> Widen16BitOps(
|
|
"amdgpu-codegenprepare-widen-16-bit-ops",
|
|
cl::desc("Widen uniform 16-bit instructions to 32-bit in AMDGPUCodeGenPrepare"),
|
|
cl::ReallyHidden,
|
|
cl::init(true));
|
|
|
|
static cl::opt<bool>
|
|
ScalarizeLargePHIs("amdgpu-codegenprepare-break-large-phis",
|
|
cl::desc("Break large PHI nodes for DAGISel"),
|
|
cl::ReallyHidden, cl::init(true));
|
|
|
|
static cl::opt<bool>
|
|
ForceScalarizeLargePHIs("amdgpu-codegenprepare-force-break-large-phis",
|
|
cl::desc("For testing purposes, always break large "
|
|
"PHIs even if it isn't profitable."),
|
|
cl::ReallyHidden, cl::init(false));
|
|
|
|
static cl::opt<unsigned> ScalarizeLargePHIsThreshold(
|
|
"amdgpu-codegenprepare-break-large-phis-threshold",
|
|
cl::desc("Minimum type size in bits for breaking large PHI nodes"),
|
|
cl::ReallyHidden, cl::init(32));
|
|
|
|
static cl::opt<bool> UseMul24Intrin(
|
|
"amdgpu-codegenprepare-mul24",
|
|
cl::desc("Introduce mul24 intrinsics in AMDGPUCodeGenPrepare"),
|
|
cl::ReallyHidden,
|
|
cl::init(true));
|
|
|
|
// Legalize 64-bit division by using the generic IR expansion.
|
|
static cl::opt<bool> ExpandDiv64InIR(
|
|
"amdgpu-codegenprepare-expand-div64",
|
|
cl::desc("Expand 64-bit division in AMDGPUCodeGenPrepare"),
|
|
cl::ReallyHidden,
|
|
cl::init(false));
|
|
|
|
// Leave all division operations as they are. This supersedes ExpandDiv64InIR
|
|
// and is used for testing the legalizer.
|
|
static cl::opt<bool> DisableIDivExpand(
|
|
"amdgpu-codegenprepare-disable-idiv-expansion",
|
|
cl::desc("Prevent expanding integer division in AMDGPUCodeGenPrepare"),
|
|
cl::ReallyHidden,
|
|
cl::init(false));
|
|
|
|
class AMDGPUCodeGenPrepareImpl
|
|
: public InstVisitor<AMDGPUCodeGenPrepareImpl, bool> {
|
|
public:
|
|
const GCNSubtarget *ST = nullptr;
|
|
const TargetLibraryInfo *TLInfo = nullptr;
|
|
AssumptionCache *AC = nullptr;
|
|
DominatorTree *DT = nullptr;
|
|
UniformityInfo *UA = nullptr;
|
|
Module *Mod = nullptr;
|
|
const DataLayout *DL = nullptr;
|
|
bool HasUnsafeFPMath = false;
|
|
bool HasFP32DenormalFlush = false;
|
|
bool FlowChanged = false;
|
|
|
|
DenseMap<const PHINode *, bool> BreakPhiNodesCache;
|
|
|
|
bool canBreakPHINode(const PHINode &I);
|
|
|
|
/// Copies exact/nsw/nuw flags (if any) from binary operation \p I to
|
|
/// binary operation \p V.
|
|
///
|
|
/// \returns Binary operation \p V.
|
|
/// \returns \p T's base element bit width.
|
|
unsigned getBaseElementBitWidth(const Type *T) const;
|
|
|
|
/// \returns Equivalent 32 bit integer type for given type \p T. For example,
|
|
/// if \p T is i7, then i32 is returned; if \p T is <3 x i12>, then <3 x i32>
|
|
/// is returned.
|
|
Type *getI32Ty(IRBuilder<> &B, const Type *T) const;
|
|
|
|
/// \returns True if binary operation \p I is a signed binary operation, false
|
|
/// otherwise.
|
|
bool isSigned(const BinaryOperator &I) const;
|
|
|
|
/// \returns True if the condition of 'select' operation \p I comes from a
|
|
/// signed 'icmp' operation, false otherwise.
|
|
bool isSigned(const SelectInst &I) const;
|
|
|
|
/// \returns True if type \p T needs to be promoted to 32 bit integer type,
|
|
/// false otherwise.
|
|
bool needsPromotionToI32(const Type *T) const;
|
|
|
|
/// Return true if \p T is a legal scalar floating point type.
|
|
bool isLegalFloatingTy(const Type *T) const;
|
|
|
|
/// Promotes uniform binary operation \p I to equivalent 32 bit binary
|
|
/// operation.
|
|
///
|
|
/// \details \p I's base element bit width must be greater than 1 and less
|
|
/// than or equal 16. Promotion is done by sign or zero extending operands to
|
|
/// 32 bits, replacing \p I with equivalent 32 bit binary operation, and
|
|
/// truncating the result of 32 bit binary operation back to \p I's original
|
|
/// type. Division operation is not promoted.
|
|
///
|
|
/// \returns True if \p I is promoted to equivalent 32 bit binary operation,
|
|
/// false otherwise.
|
|
bool promoteUniformOpToI32(BinaryOperator &I) const;
|
|
|
|
/// Promotes uniform 'icmp' operation \p I to 32 bit 'icmp' operation.
|
|
///
|
|
/// \details \p I's base element bit width must be greater than 1 and less
|
|
/// than or equal 16. Promotion is done by sign or zero extending operands to
|
|
/// 32 bits, and replacing \p I with 32 bit 'icmp' operation.
|
|
///
|
|
/// \returns True.
|
|
bool promoteUniformOpToI32(ICmpInst &I) const;
|
|
|
|
/// Promotes uniform 'select' operation \p I to 32 bit 'select'
|
|
/// operation.
|
|
///
|
|
/// \details \p I's base element bit width must be greater than 1 and less
|
|
/// than or equal 16. Promotion is done by sign or zero extending operands to
|
|
/// 32 bits, replacing \p I with 32 bit 'select' operation, and truncating the
|
|
/// result of 32 bit 'select' operation back to \p I's original type.
|
|
///
|
|
/// \returns True.
|
|
bool promoteUniformOpToI32(SelectInst &I) const;
|
|
|
|
/// Promotes uniform 'bitreverse' intrinsic \p I to 32 bit 'bitreverse'
|
|
/// intrinsic.
|
|
///
|
|
/// \details \p I's base element bit width must be greater than 1 and less
|
|
/// than or equal 16. Promotion is done by zero extending the operand to 32
|
|
/// bits, replacing \p I with 32 bit 'bitreverse' intrinsic, shifting the
|
|
/// result of 32 bit 'bitreverse' intrinsic to the right with zero fill (the
|
|
/// shift amount is 32 minus \p I's base element bit width), and truncating
|
|
/// the result of the shift operation back to \p I's original type.
|
|
///
|
|
/// \returns True.
|
|
bool promoteUniformBitreverseToI32(IntrinsicInst &I) const;
|
|
|
|
/// \returns The minimum number of bits needed to store the value of \Op as an
|
|
/// unsigned integer. Truncating to this size and then zero-extending to
|
|
/// the original will not change the value.
|
|
unsigned numBitsUnsigned(Value *Op) const;
|
|
|
|
/// \returns The minimum number of bits needed to store the value of \Op as a
|
|
/// signed integer. Truncating to this size and then sign-extending to
|
|
/// the original size will not change the value.
|
|
unsigned numBitsSigned(Value *Op) const;
|
|
|
|
/// Replace mul instructions with llvm.amdgcn.mul.u24 or llvm.amdgcn.mul.s24.
|
|
/// SelectionDAG has an issue where an and asserting the bits are known
|
|
bool replaceMulWithMul24(BinaryOperator &I) const;
|
|
|
|
/// Perform same function as equivalently named function in DAGCombiner. Since
|
|
/// we expand some divisions here, we need to perform this before obscuring.
|
|
bool foldBinOpIntoSelect(BinaryOperator &I) const;
|
|
|
|
bool divHasSpecialOptimization(BinaryOperator &I,
|
|
Value *Num, Value *Den) const;
|
|
int getDivNumBits(BinaryOperator &I,
|
|
Value *Num, Value *Den,
|
|
unsigned AtLeast, bool Signed) const;
|
|
|
|
/// Expands 24 bit div or rem.
|
|
Value* expandDivRem24(IRBuilder<> &Builder, BinaryOperator &I,
|
|
Value *Num, Value *Den,
|
|
bool IsDiv, bool IsSigned) const;
|
|
|
|
Value *expandDivRem24Impl(IRBuilder<> &Builder, BinaryOperator &I,
|
|
Value *Num, Value *Den, unsigned NumBits,
|
|
bool IsDiv, bool IsSigned) const;
|
|
|
|
/// Expands 32 bit div or rem.
|
|
Value* expandDivRem32(IRBuilder<> &Builder, BinaryOperator &I,
|
|
Value *Num, Value *Den) const;
|
|
|
|
Value *shrinkDivRem64(IRBuilder<> &Builder, BinaryOperator &I,
|
|
Value *Num, Value *Den) const;
|
|
void expandDivRem64(BinaryOperator &I) const;
|
|
|
|
/// Widen a scalar load.
|
|
///
|
|
/// \details \p Widen scalar load for uniform, small type loads from constant
|
|
// memory / to a full 32-bits and then truncate the input to allow a scalar
|
|
// load instead of a vector load.
|
|
//
|
|
/// \returns True.
|
|
|
|
bool canWidenScalarExtLoad(LoadInst &I) const;
|
|
|
|
Value *matchFractPat(IntrinsicInst &I);
|
|
Value *applyFractPat(IRBuilder<> &Builder, Value *FractArg);
|
|
|
|
public:
|
|
bool visitFDiv(BinaryOperator &I);
|
|
|
|
bool visitInstruction(Instruction &I) { return false; }
|
|
bool visitBinaryOperator(BinaryOperator &I);
|
|
bool visitLoadInst(LoadInst &I);
|
|
bool visitICmpInst(ICmpInst &I);
|
|
bool visitSelectInst(SelectInst &I);
|
|
bool visitPHINode(PHINode &I);
|
|
|
|
bool visitIntrinsicInst(IntrinsicInst &I);
|
|
bool visitBitreverseIntrinsicInst(IntrinsicInst &I);
|
|
bool visitMinNum(IntrinsicInst &I);
|
|
bool run(Function &F);
|
|
};
|
|
|
|
class AMDGPUCodeGenPrepare : public FunctionPass {
|
|
private:
|
|
AMDGPUCodeGenPrepareImpl Impl;
|
|
|
|
public:
|
|
static char ID;
|
|
AMDGPUCodeGenPrepare() : FunctionPass(ID) {
|
|
initializeAMDGPUCodeGenPreparePass(*PassRegistry::getPassRegistry());
|
|
}
|
|
void getAnalysisUsage(AnalysisUsage &AU) const override {
|
|
AU.addRequired<AssumptionCacheTracker>();
|
|
AU.addRequired<UniformityInfoWrapperPass>();
|
|
AU.addRequired<TargetLibraryInfoWrapperPass>();
|
|
|
|
// FIXME: Division expansion needs to preserve the dominator tree.
|
|
if (!ExpandDiv64InIR)
|
|
AU.setPreservesAll();
|
|
}
|
|
bool runOnFunction(Function &F) override;
|
|
bool doInitialization(Module &M) override;
|
|
StringRef getPassName() const override { return "AMDGPU IR optimizations"; }
|
|
};
|
|
|
|
} // end anonymous namespace
|
|
|
|
bool AMDGPUCodeGenPrepareImpl::run(Function &F) {
|
|
bool MadeChange = false;
|
|
|
|
Function::iterator NextBB;
|
|
for (Function::iterator FI = F.begin(), FE = F.end(); FI != FE; FI = NextBB) {
|
|
BasicBlock *BB = &*FI;
|
|
NextBB = std::next(FI);
|
|
|
|
BasicBlock::iterator Next;
|
|
for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E;
|
|
I = Next) {
|
|
Next = std::next(I);
|
|
|
|
MadeChange |= visit(*I);
|
|
|
|
if (Next != E) { // Control flow changed
|
|
BasicBlock *NextInstBB = Next->getParent();
|
|
if (NextInstBB != BB) {
|
|
BB = NextInstBB;
|
|
E = BB->end();
|
|
FE = F.end();
|
|
}
|
|
}
|
|
}
|
|
}
|
|
return MadeChange;
|
|
}
|
|
|
|
unsigned AMDGPUCodeGenPrepareImpl::getBaseElementBitWidth(const Type *T) const {
|
|
assert(needsPromotionToI32(T) && "T does not need promotion to i32");
|
|
|
|
if (T->isIntegerTy())
|
|
return T->getIntegerBitWidth();
|
|
return cast<VectorType>(T)->getElementType()->getIntegerBitWidth();
|
|
}
|
|
|
|
Type *AMDGPUCodeGenPrepareImpl::getI32Ty(IRBuilder<> &B, const Type *T) const {
|
|
assert(needsPromotionToI32(T) && "T does not need promotion to i32");
|
|
|
|
if (T->isIntegerTy())
|
|
return B.getInt32Ty();
|
|
return FixedVectorType::get(B.getInt32Ty(), cast<FixedVectorType>(T));
|
|
}
|
|
|
|
bool AMDGPUCodeGenPrepareImpl::isSigned(const BinaryOperator &I) const {
|
|
return I.getOpcode() == Instruction::AShr ||
|
|
I.getOpcode() == Instruction::SDiv || I.getOpcode() == Instruction::SRem;
|
|
}
|
|
|
|
bool AMDGPUCodeGenPrepareImpl::isSigned(const SelectInst &I) const {
|
|
return isa<ICmpInst>(I.getOperand(0)) ?
|
|
cast<ICmpInst>(I.getOperand(0))->isSigned() : false;
|
|
}
|
|
|
|
bool AMDGPUCodeGenPrepareImpl::needsPromotionToI32(const Type *T) const {
|
|
if (!Widen16BitOps)
|
|
return false;
|
|
|
|
const IntegerType *IntTy = dyn_cast<IntegerType>(T);
|
|
if (IntTy && IntTy->getBitWidth() > 1 && IntTy->getBitWidth() <= 16)
|
|
return true;
|
|
|
|
if (const VectorType *VT = dyn_cast<VectorType>(T)) {
|
|
// TODO: The set of packed operations is more limited, so may want to
|
|
// promote some anyway.
|
|
if (ST->hasVOP3PInsts())
|
|
return false;
|
|
|
|
return needsPromotionToI32(VT->getElementType());
|
|
}
|
|
|
|
return false;
|
|
}
|
|
|
|
bool AMDGPUCodeGenPrepareImpl::isLegalFloatingTy(const Type *Ty) const {
|
|
return Ty->isFloatTy() || Ty->isDoubleTy() ||
|
|
(Ty->isHalfTy() && ST->has16BitInsts());
|
|
}
|
|
|
|
// Return true if the op promoted to i32 should have nsw set.
|
|
static bool promotedOpIsNSW(const Instruction &I) {
|
|
switch (I.getOpcode()) {
|
|
case Instruction::Shl:
|
|
case Instruction::Add:
|
|
case Instruction::Sub:
|
|
return true;
|
|
case Instruction::Mul:
|
|
return I.hasNoUnsignedWrap();
|
|
default:
|
|
return false;
|
|
}
|
|
}
|
|
|
|
// Return true if the op promoted to i32 should have nuw set.
|
|
static bool promotedOpIsNUW(const Instruction &I) {
|
|
switch (I.getOpcode()) {
|
|
case Instruction::Shl:
|
|
case Instruction::Add:
|
|
case Instruction::Mul:
|
|
return true;
|
|
case Instruction::Sub:
|
|
return I.hasNoUnsignedWrap();
|
|
default:
|
|
return false;
|
|
}
|
|
}
|
|
|
|
bool AMDGPUCodeGenPrepareImpl::canWidenScalarExtLoad(LoadInst &I) const {
|
|
Type *Ty = I.getType();
|
|
const DataLayout &DL = Mod->getDataLayout();
|
|
int TySize = DL.getTypeSizeInBits(Ty);
|
|
Align Alignment = DL.getValueOrABITypeAlignment(I.getAlign(), Ty);
|
|
|
|
return I.isSimple() && TySize < 32 && Alignment >= 4 && UA->isUniform(&I);
|
|
}
|
|
|
|
bool AMDGPUCodeGenPrepareImpl::promoteUniformOpToI32(BinaryOperator &I) const {
|
|
assert(needsPromotionToI32(I.getType()) &&
|
|
"I does not need promotion to i32");
|
|
|
|
if (I.getOpcode() == Instruction::SDiv ||
|
|
I.getOpcode() == Instruction::UDiv ||
|
|
I.getOpcode() == Instruction::SRem ||
|
|
I.getOpcode() == Instruction::URem)
|
|
return false;
|
|
|
|
IRBuilder<> Builder(&I);
|
|
Builder.SetCurrentDebugLocation(I.getDebugLoc());
|
|
|
|
Type *I32Ty = getI32Ty(Builder, I.getType());
|
|
Value *ExtOp0 = nullptr;
|
|
Value *ExtOp1 = nullptr;
|
|
Value *ExtRes = nullptr;
|
|
Value *TruncRes = nullptr;
|
|
|
|
if (isSigned(I)) {
|
|
ExtOp0 = Builder.CreateSExt(I.getOperand(0), I32Ty);
|
|
ExtOp1 = Builder.CreateSExt(I.getOperand(1), I32Ty);
|
|
} else {
|
|
ExtOp0 = Builder.CreateZExt(I.getOperand(0), I32Ty);
|
|
ExtOp1 = Builder.CreateZExt(I.getOperand(1), I32Ty);
|
|
}
|
|
|
|
ExtRes = Builder.CreateBinOp(I.getOpcode(), ExtOp0, ExtOp1);
|
|
if (Instruction *Inst = dyn_cast<Instruction>(ExtRes)) {
|
|
if (promotedOpIsNSW(cast<Instruction>(I)))
|
|
Inst->setHasNoSignedWrap();
|
|
|
|
if (promotedOpIsNUW(cast<Instruction>(I)))
|
|
Inst->setHasNoUnsignedWrap();
|
|
|
|
if (const auto *ExactOp = dyn_cast<PossiblyExactOperator>(&I))
|
|
Inst->setIsExact(ExactOp->isExact());
|
|
}
|
|
|
|
TruncRes = Builder.CreateTrunc(ExtRes, I.getType());
|
|
|
|
I.replaceAllUsesWith(TruncRes);
|
|
I.eraseFromParent();
|
|
|
|
return true;
|
|
}
|
|
|
|
bool AMDGPUCodeGenPrepareImpl::promoteUniformOpToI32(ICmpInst &I) const {
|
|
assert(needsPromotionToI32(I.getOperand(0)->getType()) &&
|
|
"I does not need promotion to i32");
|
|
|
|
IRBuilder<> Builder(&I);
|
|
Builder.SetCurrentDebugLocation(I.getDebugLoc());
|
|
|
|
Type *I32Ty = getI32Ty(Builder, I.getOperand(0)->getType());
|
|
Value *ExtOp0 = nullptr;
|
|
Value *ExtOp1 = nullptr;
|
|
Value *NewICmp = nullptr;
|
|
|
|
if (I.isSigned()) {
|
|
ExtOp0 = Builder.CreateSExt(I.getOperand(0), I32Ty);
|
|
ExtOp1 = Builder.CreateSExt(I.getOperand(1), I32Ty);
|
|
} else {
|
|
ExtOp0 = Builder.CreateZExt(I.getOperand(0), I32Ty);
|
|
ExtOp1 = Builder.CreateZExt(I.getOperand(1), I32Ty);
|
|
}
|
|
NewICmp = Builder.CreateICmp(I.getPredicate(), ExtOp0, ExtOp1);
|
|
|
|
I.replaceAllUsesWith(NewICmp);
|
|
I.eraseFromParent();
|
|
|
|
return true;
|
|
}
|
|
|
|
bool AMDGPUCodeGenPrepareImpl::promoteUniformOpToI32(SelectInst &I) const {
|
|
assert(needsPromotionToI32(I.getType()) &&
|
|
"I does not need promotion to i32");
|
|
|
|
IRBuilder<> Builder(&I);
|
|
Builder.SetCurrentDebugLocation(I.getDebugLoc());
|
|
|
|
Type *I32Ty = getI32Ty(Builder, I.getType());
|
|
Value *ExtOp1 = nullptr;
|
|
Value *ExtOp2 = nullptr;
|
|
Value *ExtRes = nullptr;
|
|
Value *TruncRes = nullptr;
|
|
|
|
if (isSigned(I)) {
|
|
ExtOp1 = Builder.CreateSExt(I.getOperand(1), I32Ty);
|
|
ExtOp2 = Builder.CreateSExt(I.getOperand(2), I32Ty);
|
|
} else {
|
|
ExtOp1 = Builder.CreateZExt(I.getOperand(1), I32Ty);
|
|
ExtOp2 = Builder.CreateZExt(I.getOperand(2), I32Ty);
|
|
}
|
|
ExtRes = Builder.CreateSelect(I.getOperand(0), ExtOp1, ExtOp2);
|
|
TruncRes = Builder.CreateTrunc(ExtRes, I.getType());
|
|
|
|
I.replaceAllUsesWith(TruncRes);
|
|
I.eraseFromParent();
|
|
|
|
return true;
|
|
}
|
|
|
|
bool AMDGPUCodeGenPrepareImpl::promoteUniformBitreverseToI32(
|
|
IntrinsicInst &I) const {
|
|
assert(I.getIntrinsicID() == Intrinsic::bitreverse &&
|
|
"I must be bitreverse intrinsic");
|
|
assert(needsPromotionToI32(I.getType()) &&
|
|
"I does not need promotion to i32");
|
|
|
|
IRBuilder<> Builder(&I);
|
|
Builder.SetCurrentDebugLocation(I.getDebugLoc());
|
|
|
|
Type *I32Ty = getI32Ty(Builder, I.getType());
|
|
Function *I32 =
|
|
Intrinsic::getDeclaration(Mod, Intrinsic::bitreverse, { I32Ty });
|
|
Value *ExtOp = Builder.CreateZExt(I.getOperand(0), I32Ty);
|
|
Value *ExtRes = Builder.CreateCall(I32, { ExtOp });
|
|
Value *LShrOp =
|
|
Builder.CreateLShr(ExtRes, 32 - getBaseElementBitWidth(I.getType()));
|
|
Value *TruncRes =
|
|
Builder.CreateTrunc(LShrOp, I.getType());
|
|
|
|
I.replaceAllUsesWith(TruncRes);
|
|
I.eraseFromParent();
|
|
|
|
return true;
|
|
}
|
|
|
|
unsigned AMDGPUCodeGenPrepareImpl::numBitsUnsigned(Value *Op) const {
|
|
return computeKnownBits(Op, *DL, 0, AC).countMaxActiveBits();
|
|
}
|
|
|
|
unsigned AMDGPUCodeGenPrepareImpl::numBitsSigned(Value *Op) const {
|
|
return ComputeMaxSignificantBits(Op, *DL, 0, AC);
|
|
}
|
|
|
|
static void extractValues(IRBuilder<> &Builder,
|
|
SmallVectorImpl<Value *> &Values, Value *V) {
|
|
auto *VT = dyn_cast<FixedVectorType>(V->getType());
|
|
if (!VT) {
|
|
Values.push_back(V);
|
|
return;
|
|
}
|
|
|
|
for (int I = 0, E = VT->getNumElements(); I != E; ++I)
|
|
Values.push_back(Builder.CreateExtractElement(V, I));
|
|
}
|
|
|
|
static Value *insertValues(IRBuilder<> &Builder,
|
|
Type *Ty,
|
|
SmallVectorImpl<Value *> &Values) {
|
|
if (!Ty->isVectorTy()) {
|
|
assert(Values.size() == 1);
|
|
return Values[0];
|
|
}
|
|
|
|
Value *NewVal = PoisonValue::get(Ty);
|
|
for (int I = 0, E = Values.size(); I != E; ++I)
|
|
NewVal = Builder.CreateInsertElement(NewVal, Values[I], I);
|
|
|
|
return NewVal;
|
|
}
|
|
|
|
// Returns 24-bit or 48-bit (as per `NumBits` and `Size`) mul of `LHS` and
|
|
// `RHS`. `NumBits` is the number of KnownBits of the result and `Size` is the
|
|
// width of the original destination.
|
|
static Value *getMul24(IRBuilder<> &Builder, Value *LHS, Value *RHS,
|
|
unsigned Size, unsigned NumBits, bool IsSigned) {
|
|
if (Size <= 32 || NumBits <= 32) {
|
|
Intrinsic::ID ID =
|
|
IsSigned ? Intrinsic::amdgcn_mul_i24 : Intrinsic::amdgcn_mul_u24;
|
|
return Builder.CreateIntrinsic(ID, {}, {LHS, RHS});
|
|
}
|
|
|
|
assert(NumBits <= 48);
|
|
|
|
Intrinsic::ID LoID =
|
|
IsSigned ? Intrinsic::amdgcn_mul_i24 : Intrinsic::amdgcn_mul_u24;
|
|
Intrinsic::ID HiID =
|
|
IsSigned ? Intrinsic::amdgcn_mulhi_i24 : Intrinsic::amdgcn_mulhi_u24;
|
|
|
|
Value *Lo = Builder.CreateIntrinsic(LoID, {}, {LHS, RHS});
|
|
Value *Hi = Builder.CreateIntrinsic(HiID, {}, {LHS, RHS});
|
|
|
|
IntegerType *I64Ty = Builder.getInt64Ty();
|
|
Lo = Builder.CreateZExtOrTrunc(Lo, I64Ty);
|
|
Hi = Builder.CreateZExtOrTrunc(Hi, I64Ty);
|
|
|
|
return Builder.CreateOr(Lo, Builder.CreateShl(Hi, 32));
|
|
}
|
|
|
|
bool AMDGPUCodeGenPrepareImpl::replaceMulWithMul24(BinaryOperator &I) const {
|
|
if (I.getOpcode() != Instruction::Mul)
|
|
return false;
|
|
|
|
Type *Ty = I.getType();
|
|
unsigned Size = Ty->getScalarSizeInBits();
|
|
if (Size <= 16 && ST->has16BitInsts())
|
|
return false;
|
|
|
|
// Prefer scalar if this could be s_mul_i32
|
|
if (UA->isUniform(&I))
|
|
return false;
|
|
|
|
Value *LHS = I.getOperand(0);
|
|
Value *RHS = I.getOperand(1);
|
|
IRBuilder<> Builder(&I);
|
|
Builder.SetCurrentDebugLocation(I.getDebugLoc());
|
|
|
|
unsigned LHSBits = 0, RHSBits = 0;
|
|
bool IsSigned = false;
|
|
|
|
if (ST->hasMulU24() && (LHSBits = numBitsUnsigned(LHS)) <= 24 &&
|
|
(RHSBits = numBitsUnsigned(RHS)) <= 24) {
|
|
IsSigned = false;
|
|
|
|
} else if (ST->hasMulI24() && (LHSBits = numBitsSigned(LHS)) <= 24 &&
|
|
(RHSBits = numBitsSigned(RHS)) <= 24) {
|
|
IsSigned = true;
|
|
|
|
} else
|
|
return false;
|
|
|
|
SmallVector<Value *, 4> LHSVals;
|
|
SmallVector<Value *, 4> RHSVals;
|
|
SmallVector<Value *, 4> ResultVals;
|
|
extractValues(Builder, LHSVals, LHS);
|
|
extractValues(Builder, RHSVals, RHS);
|
|
|
|
IntegerType *I32Ty = Builder.getInt32Ty();
|
|
for (int I = 0, E = LHSVals.size(); I != E; ++I) {
|
|
Value *LHS, *RHS;
|
|
if (IsSigned) {
|
|
LHS = Builder.CreateSExtOrTrunc(LHSVals[I], I32Ty);
|
|
RHS = Builder.CreateSExtOrTrunc(RHSVals[I], I32Ty);
|
|
} else {
|
|
LHS = Builder.CreateZExtOrTrunc(LHSVals[I], I32Ty);
|
|
RHS = Builder.CreateZExtOrTrunc(RHSVals[I], I32Ty);
|
|
}
|
|
|
|
Value *Result =
|
|
getMul24(Builder, LHS, RHS, Size, LHSBits + RHSBits, IsSigned);
|
|
|
|
if (IsSigned) {
|
|
ResultVals.push_back(
|
|
Builder.CreateSExtOrTrunc(Result, LHSVals[I]->getType()));
|
|
} else {
|
|
ResultVals.push_back(
|
|
Builder.CreateZExtOrTrunc(Result, LHSVals[I]->getType()));
|
|
}
|
|
}
|
|
|
|
Value *NewVal = insertValues(Builder, Ty, ResultVals);
|
|
NewVal->takeName(&I);
|
|
I.replaceAllUsesWith(NewVal);
|
|
I.eraseFromParent();
|
|
|
|
return true;
|
|
}
|
|
|
|
// Find a select instruction, which may have been casted. This is mostly to deal
|
|
// with cases where i16 selects were promoted here to i32.
|
|
static SelectInst *findSelectThroughCast(Value *V, CastInst *&Cast) {
|
|
Cast = nullptr;
|
|
if (SelectInst *Sel = dyn_cast<SelectInst>(V))
|
|
return Sel;
|
|
|
|
if ((Cast = dyn_cast<CastInst>(V))) {
|
|
if (SelectInst *Sel = dyn_cast<SelectInst>(Cast->getOperand(0)))
|
|
return Sel;
|
|
}
|
|
|
|
return nullptr;
|
|
}
|
|
|
|
bool AMDGPUCodeGenPrepareImpl::foldBinOpIntoSelect(BinaryOperator &BO) const {
|
|
// Don't do this unless the old select is going away. We want to eliminate the
|
|
// binary operator, not replace a binop with a select.
|
|
int SelOpNo = 0;
|
|
|
|
CastInst *CastOp;
|
|
|
|
// TODO: Should probably try to handle some cases with multiple
|
|
// users. Duplicating the select may be profitable for division.
|
|
SelectInst *Sel = findSelectThroughCast(BO.getOperand(0), CastOp);
|
|
if (!Sel || !Sel->hasOneUse()) {
|
|
SelOpNo = 1;
|
|
Sel = findSelectThroughCast(BO.getOperand(1), CastOp);
|
|
}
|
|
|
|
if (!Sel || !Sel->hasOneUse())
|
|
return false;
|
|
|
|
Constant *CT = dyn_cast<Constant>(Sel->getTrueValue());
|
|
Constant *CF = dyn_cast<Constant>(Sel->getFalseValue());
|
|
Constant *CBO = dyn_cast<Constant>(BO.getOperand(SelOpNo ^ 1));
|
|
if (!CBO || !CT || !CF)
|
|
return false;
|
|
|
|
if (CastOp) {
|
|
if (!CastOp->hasOneUse())
|
|
return false;
|
|
CT = ConstantFoldCastOperand(CastOp->getOpcode(), CT, BO.getType(), *DL);
|
|
CF = ConstantFoldCastOperand(CastOp->getOpcode(), CF, BO.getType(), *DL);
|
|
}
|
|
|
|
// TODO: Handle special 0/-1 cases DAG combine does, although we only really
|
|
// need to handle divisions here.
|
|
Constant *FoldedT = SelOpNo ?
|
|
ConstantFoldBinaryOpOperands(BO.getOpcode(), CBO, CT, *DL) :
|
|
ConstantFoldBinaryOpOperands(BO.getOpcode(), CT, CBO, *DL);
|
|
if (!FoldedT || isa<ConstantExpr>(FoldedT))
|
|
return false;
|
|
|
|
Constant *FoldedF = SelOpNo ?
|
|
ConstantFoldBinaryOpOperands(BO.getOpcode(), CBO, CF, *DL) :
|
|
ConstantFoldBinaryOpOperands(BO.getOpcode(), CF, CBO, *DL);
|
|
if (!FoldedF || isa<ConstantExpr>(FoldedF))
|
|
return false;
|
|
|
|
IRBuilder<> Builder(&BO);
|
|
Builder.SetCurrentDebugLocation(BO.getDebugLoc());
|
|
if (const FPMathOperator *FPOp = dyn_cast<const FPMathOperator>(&BO))
|
|
Builder.setFastMathFlags(FPOp->getFastMathFlags());
|
|
|
|
Value *NewSelect = Builder.CreateSelect(Sel->getCondition(),
|
|
FoldedT, FoldedF);
|
|
NewSelect->takeName(&BO);
|
|
BO.replaceAllUsesWith(NewSelect);
|
|
BO.eraseFromParent();
|
|
if (CastOp)
|
|
CastOp->eraseFromParent();
|
|
Sel->eraseFromParent();
|
|
return true;
|
|
}
|
|
|
|
// Optimize fdiv with rcp:
|
|
//
|
|
// 1/x -> rcp(x) when rcp is sufficiently accurate or inaccurate rcp is
|
|
// allowed with unsafe-fp-math or afn.
|
|
//
|
|
// a/b -> a*rcp(b) when inaccurate rcp is allowed with unsafe-fp-math or afn.
|
|
static Value *optimizeWithRcp(Value *Num, Value *Den, bool AllowInaccurateRcp,
|
|
bool RcpIsAccurate, IRBuilder<> &Builder,
|
|
Module *Mod) {
|
|
|
|
if (!AllowInaccurateRcp && !RcpIsAccurate)
|
|
return nullptr;
|
|
|
|
Type *Ty = Den->getType();
|
|
if (const ConstantFP *CLHS = dyn_cast<ConstantFP>(Num)) {
|
|
if (AllowInaccurateRcp || RcpIsAccurate) {
|
|
if (CLHS->isExactlyValue(1.0)) {
|
|
Function *Decl = Intrinsic::getDeclaration(
|
|
Mod, Intrinsic::amdgcn_rcp, Ty);
|
|
|
|
// v_rcp_f32 and v_rsq_f32 do not support denormals, and according to
|
|
// the CI documentation has a worst case error of 1 ulp.
|
|
// OpenCL requires <= 2.5 ulp for 1.0 / x, so it should always be OK to
|
|
// use it as long as we aren't trying to use denormals.
|
|
//
|
|
// v_rcp_f16 and v_rsq_f16 DO support denormals.
|
|
|
|
// NOTE: v_sqrt and v_rcp will be combined to v_rsq later. So we don't
|
|
// insert rsq intrinsic here.
|
|
|
|
// 1.0 / x -> rcp(x)
|
|
return Builder.CreateCall(Decl, { Den });
|
|
}
|
|
|
|
// Same as for 1.0, but expand the sign out of the constant.
|
|
if (CLHS->isExactlyValue(-1.0)) {
|
|
Function *Decl = Intrinsic::getDeclaration(
|
|
Mod, Intrinsic::amdgcn_rcp, Ty);
|
|
|
|
// -1.0 / x -> rcp (fneg x)
|
|
Value *FNeg = Builder.CreateFNeg(Den);
|
|
return Builder.CreateCall(Decl, { FNeg });
|
|
}
|
|
}
|
|
}
|
|
|
|
if (AllowInaccurateRcp) {
|
|
Function *Decl = Intrinsic::getDeclaration(
|
|
Mod, Intrinsic::amdgcn_rcp, Ty);
|
|
|
|
// Turn into multiply by the reciprocal.
|
|
// x / y -> x * (1.0 / y)
|
|
Value *Recip = Builder.CreateCall(Decl, { Den });
|
|
return Builder.CreateFMul(Num, Recip);
|
|
}
|
|
return nullptr;
|
|
}
|
|
|
|
// optimize with fdiv.fast:
|
|
//
|
|
// a/b -> fdiv.fast(a, b) when !fpmath >= 2.5ulp with denormals flushed.
|
|
//
|
|
// 1/x -> fdiv.fast(1,x) when !fpmath >= 2.5ulp.
|
|
//
|
|
// NOTE: optimizeWithRcp should be tried first because rcp is the preference.
|
|
static Value *optimizeWithFDivFast(Value *Num, Value *Den, float ReqdAccuracy,
|
|
bool HasFP32DenormalFlush,
|
|
IRBuilder<> &Builder, Module *Mod) {
|
|
// fdiv.fast can achieve 2.5 ULP accuracy.
|
|
if (ReqdAccuracy < 2.5f)
|
|
return nullptr;
|
|
|
|
// Only have fdiv.fast for f32.
|
|
Type *Ty = Den->getType();
|
|
if (!Ty->isFloatTy())
|
|
return nullptr;
|
|
|
|
bool NumIsOne = false;
|
|
if (const ConstantFP *CNum = dyn_cast<ConstantFP>(Num)) {
|
|
if (CNum->isExactlyValue(+1.0) || CNum->isExactlyValue(-1.0))
|
|
NumIsOne = true;
|
|
}
|
|
|
|
// fdiv does not support denormals. But 1.0/x is always fine to use it.
|
|
if (!HasFP32DenormalFlush && !NumIsOne)
|
|
return nullptr;
|
|
|
|
Function *Decl = Intrinsic::getDeclaration(Mod, Intrinsic::amdgcn_fdiv_fast);
|
|
return Builder.CreateCall(Decl, { Num, Den });
|
|
}
|
|
|
|
// Optimizations is performed based on fpmath, fast math flags as well as
|
|
// denormals to optimize fdiv with either rcp or fdiv.fast.
|
|
//
|
|
// With rcp:
|
|
// 1/x -> rcp(x) when rcp is sufficiently accurate or inaccurate rcp is
|
|
// allowed with unsafe-fp-math or afn.
|
|
//
|
|
// a/b -> a*rcp(b) when inaccurate rcp is allowed with unsafe-fp-math or afn.
|
|
//
|
|
// With fdiv.fast:
|
|
// a/b -> fdiv.fast(a, b) when !fpmath >= 2.5ulp with denormals flushed.
|
|
//
|
|
// 1/x -> fdiv.fast(1,x) when !fpmath >= 2.5ulp.
|
|
//
|
|
// NOTE: rcp is the preference in cases that both are legal.
|
|
bool AMDGPUCodeGenPrepareImpl::visitFDiv(BinaryOperator &FDiv) {
|
|
Type *Ty = FDiv.getType()->getScalarType();
|
|
if (!Ty->isFloatTy())
|
|
return false;
|
|
|
|
// The f64 rcp/rsq approximations are pretty inaccurate. We can do an
|
|
// expansion around them in codegen. f16 is good enough to always use.
|
|
|
|
const FPMathOperator *FPOp = cast<const FPMathOperator>(&FDiv);
|
|
const float ReqdAccuracy = FPOp->getFPAccuracy();
|
|
|
|
// Inaccurate rcp is allowed with unsafe-fp-math or afn.
|
|
FastMathFlags FMF = FPOp->getFastMathFlags();
|
|
const bool AllowInaccurateRcp = HasUnsafeFPMath || FMF.approxFunc();
|
|
|
|
// rcp_f16 is accurate to 0.51 ulp.
|
|
// rcp_f32 is accurate for !fpmath >= 1.0ulp and denormals are flushed.
|
|
// rcp_f64 is never accurate.
|
|
const bool RcpIsAccurate = HasFP32DenormalFlush && ReqdAccuracy >= 1.0f;
|
|
|
|
IRBuilder<> Builder(FDiv.getParent(), std::next(FDiv.getIterator()));
|
|
Builder.setFastMathFlags(FMF);
|
|
Builder.SetCurrentDebugLocation(FDiv.getDebugLoc());
|
|
|
|
Value *Num = FDiv.getOperand(0);
|
|
Value *Den = FDiv.getOperand(1);
|
|
|
|
Value *NewFDiv = nullptr;
|
|
if (auto *VT = dyn_cast<FixedVectorType>(FDiv.getType())) {
|
|
NewFDiv = PoisonValue::get(VT);
|
|
|
|
// FIXME: Doesn't do the right thing for cases where the vector is partially
|
|
// constant. This works when the scalarizer pass is run first.
|
|
for (unsigned I = 0, E = VT->getNumElements(); I != E; ++I) {
|
|
Value *NumEltI = Builder.CreateExtractElement(Num, I);
|
|
Value *DenEltI = Builder.CreateExtractElement(Den, I);
|
|
// Try rcp first.
|
|
Value *NewElt = optimizeWithRcp(NumEltI, DenEltI, AllowInaccurateRcp,
|
|
RcpIsAccurate, Builder, Mod);
|
|
if (!NewElt) // Try fdiv.fast.
|
|
NewElt = optimizeWithFDivFast(NumEltI, DenEltI, ReqdAccuracy,
|
|
HasFP32DenormalFlush, Builder, Mod);
|
|
if (!NewElt) // Keep the original.
|
|
NewElt = Builder.CreateFDiv(NumEltI, DenEltI);
|
|
|
|
NewFDiv = Builder.CreateInsertElement(NewFDiv, NewElt, I);
|
|
}
|
|
} else { // Scalar FDiv.
|
|
// Try rcp first.
|
|
NewFDiv = optimizeWithRcp(Num, Den, AllowInaccurateRcp, RcpIsAccurate,
|
|
Builder, Mod);
|
|
if (!NewFDiv) { // Try fdiv.fast.
|
|
NewFDiv = optimizeWithFDivFast(Num, Den, ReqdAccuracy,
|
|
HasFP32DenormalFlush, Builder, Mod);
|
|
}
|
|
}
|
|
|
|
if (NewFDiv) {
|
|
FDiv.replaceAllUsesWith(NewFDiv);
|
|
NewFDiv->takeName(&FDiv);
|
|
FDiv.eraseFromParent();
|
|
}
|
|
|
|
return !!NewFDiv;
|
|
}
|
|
|
|
static bool hasUnsafeFPMath(const Function &F) {
|
|
Attribute Attr = F.getFnAttribute("unsafe-fp-math");
|
|
return Attr.getValueAsBool();
|
|
}
|
|
|
|
static std::pair<Value*, Value*> getMul64(IRBuilder<> &Builder,
|
|
Value *LHS, Value *RHS) {
|
|
Type *I32Ty = Builder.getInt32Ty();
|
|
Type *I64Ty = Builder.getInt64Ty();
|
|
|
|
Value *LHS_EXT64 = Builder.CreateZExt(LHS, I64Ty);
|
|
Value *RHS_EXT64 = Builder.CreateZExt(RHS, I64Ty);
|
|
Value *MUL64 = Builder.CreateMul(LHS_EXT64, RHS_EXT64);
|
|
Value *Lo = Builder.CreateTrunc(MUL64, I32Ty);
|
|
Value *Hi = Builder.CreateLShr(MUL64, Builder.getInt64(32));
|
|
Hi = Builder.CreateTrunc(Hi, I32Ty);
|
|
return std::pair(Lo, Hi);
|
|
}
|
|
|
|
static Value* getMulHu(IRBuilder<> &Builder, Value *LHS, Value *RHS) {
|
|
return getMul64(Builder, LHS, RHS).second;
|
|
}
|
|
|
|
/// Figure out how many bits are really needed for this division. \p AtLeast is
|
|
/// an optimization hint to bypass the second ComputeNumSignBits call if we the
|
|
/// first one is insufficient. Returns -1 on failure.
|
|
int AMDGPUCodeGenPrepareImpl::getDivNumBits(BinaryOperator &I, Value *Num,
|
|
Value *Den, unsigned AtLeast,
|
|
bool IsSigned) const {
|
|
const DataLayout &DL = Mod->getDataLayout();
|
|
unsigned LHSSignBits = ComputeNumSignBits(Num, DL, 0, AC, &I);
|
|
if (LHSSignBits < AtLeast)
|
|
return -1;
|
|
|
|
unsigned RHSSignBits = ComputeNumSignBits(Den, DL, 0, AC, &I);
|
|
if (RHSSignBits < AtLeast)
|
|
return -1;
|
|
|
|
unsigned SignBits = std::min(LHSSignBits, RHSSignBits);
|
|
unsigned DivBits = Num->getType()->getScalarSizeInBits() - SignBits;
|
|
if (IsSigned)
|
|
++DivBits;
|
|
return DivBits;
|
|
}
|
|
|
|
// The fractional part of a float is enough to accurately represent up to
|
|
// a 24-bit signed integer.
|
|
Value *AMDGPUCodeGenPrepareImpl::expandDivRem24(IRBuilder<> &Builder,
|
|
BinaryOperator &I, Value *Num,
|
|
Value *Den, bool IsDiv,
|
|
bool IsSigned) const {
|
|
int DivBits = getDivNumBits(I, Num, Den, 9, IsSigned);
|
|
if (DivBits == -1)
|
|
return nullptr;
|
|
return expandDivRem24Impl(Builder, I, Num, Den, DivBits, IsDiv, IsSigned);
|
|
}
|
|
|
|
Value *AMDGPUCodeGenPrepareImpl::expandDivRem24Impl(
|
|
IRBuilder<> &Builder, BinaryOperator &I, Value *Num, Value *Den,
|
|
unsigned DivBits, bool IsDiv, bool IsSigned) const {
|
|
Type *I32Ty = Builder.getInt32Ty();
|
|
Num = Builder.CreateTrunc(Num, I32Ty);
|
|
Den = Builder.CreateTrunc(Den, I32Ty);
|
|
|
|
Type *F32Ty = Builder.getFloatTy();
|
|
ConstantInt *One = Builder.getInt32(1);
|
|
Value *JQ = One;
|
|
|
|
if (IsSigned) {
|
|
// char|short jq = ia ^ ib;
|
|
JQ = Builder.CreateXor(Num, Den);
|
|
|
|
// jq = jq >> (bitsize - 2)
|
|
JQ = Builder.CreateAShr(JQ, Builder.getInt32(30));
|
|
|
|
// jq = jq | 0x1
|
|
JQ = Builder.CreateOr(JQ, One);
|
|
}
|
|
|
|
// int ia = (int)LHS;
|
|
Value *IA = Num;
|
|
|
|
// int ib, (int)RHS;
|
|
Value *IB = Den;
|
|
|
|
// float fa = (float)ia;
|
|
Value *FA = IsSigned ? Builder.CreateSIToFP(IA, F32Ty)
|
|
: Builder.CreateUIToFP(IA, F32Ty);
|
|
|
|
// float fb = (float)ib;
|
|
Value *FB = IsSigned ? Builder.CreateSIToFP(IB,F32Ty)
|
|
: Builder.CreateUIToFP(IB,F32Ty);
|
|
|
|
Function *RcpDecl = Intrinsic::getDeclaration(Mod, Intrinsic::amdgcn_rcp,
|
|
Builder.getFloatTy());
|
|
Value *RCP = Builder.CreateCall(RcpDecl, { FB });
|
|
Value *FQM = Builder.CreateFMul(FA, RCP);
|
|
|
|
// fq = trunc(fqm);
|
|
CallInst *FQ = Builder.CreateUnaryIntrinsic(Intrinsic::trunc, FQM);
|
|
FQ->copyFastMathFlags(Builder.getFastMathFlags());
|
|
|
|
// float fqneg = -fq;
|
|
Value *FQNeg = Builder.CreateFNeg(FQ);
|
|
|
|
// float fr = mad(fqneg, fb, fa);
|
|
auto FMAD = !ST->hasMadMacF32Insts()
|
|
? Intrinsic::fma
|
|
: (Intrinsic::ID)Intrinsic::amdgcn_fmad_ftz;
|
|
Value *FR = Builder.CreateIntrinsic(FMAD,
|
|
{FQNeg->getType()}, {FQNeg, FB, FA}, FQ);
|
|
|
|
// int iq = (int)fq;
|
|
Value *IQ = IsSigned ? Builder.CreateFPToSI(FQ, I32Ty)
|
|
: Builder.CreateFPToUI(FQ, I32Ty);
|
|
|
|
// fr = fabs(fr);
|
|
FR = Builder.CreateUnaryIntrinsic(Intrinsic::fabs, FR, FQ);
|
|
|
|
// fb = fabs(fb);
|
|
FB = Builder.CreateUnaryIntrinsic(Intrinsic::fabs, FB, FQ);
|
|
|
|
// int cv = fr >= fb;
|
|
Value *CV = Builder.CreateFCmpOGE(FR, FB);
|
|
|
|
// jq = (cv ? jq : 0);
|
|
JQ = Builder.CreateSelect(CV, JQ, Builder.getInt32(0));
|
|
|
|
// dst = iq + jq;
|
|
Value *Div = Builder.CreateAdd(IQ, JQ);
|
|
|
|
Value *Res = Div;
|
|
if (!IsDiv) {
|
|
// Rem needs compensation, it's easier to recompute it
|
|
Value *Rem = Builder.CreateMul(Div, Den);
|
|
Res = Builder.CreateSub(Num, Rem);
|
|
}
|
|
|
|
if (DivBits != 0 && DivBits < 32) {
|
|
// Extend in register from the number of bits this divide really is.
|
|
if (IsSigned) {
|
|
int InRegBits = 32 - DivBits;
|
|
|
|
Res = Builder.CreateShl(Res, InRegBits);
|
|
Res = Builder.CreateAShr(Res, InRegBits);
|
|
} else {
|
|
ConstantInt *TruncMask
|
|
= Builder.getInt32((UINT64_C(1) << DivBits) - 1);
|
|
Res = Builder.CreateAnd(Res, TruncMask);
|
|
}
|
|
}
|
|
|
|
return Res;
|
|
}
|
|
|
|
// Try to recognize special cases the DAG will emit special, better expansions
|
|
// than the general expansion we do here.
|
|
|
|
// TODO: It would be better to just directly handle those optimizations here.
|
|
bool AMDGPUCodeGenPrepareImpl::divHasSpecialOptimization(BinaryOperator &I,
|
|
Value *Num,
|
|
Value *Den) const {
|
|
if (Constant *C = dyn_cast<Constant>(Den)) {
|
|
// Arbitrary constants get a better expansion as long as a wider mulhi is
|
|
// legal.
|
|
if (C->getType()->getScalarSizeInBits() <= 32)
|
|
return true;
|
|
|
|
// TODO: Sdiv check for not exact for some reason.
|
|
|
|
// If there's no wider mulhi, there's only a better expansion for powers of
|
|
// two.
|
|
// TODO: Should really know for each vector element.
|
|
if (isKnownToBeAPowerOfTwo(C, *DL, true, 0, AC, &I, DT))
|
|
return true;
|
|
|
|
return false;
|
|
}
|
|
|
|
if (BinaryOperator *BinOpDen = dyn_cast<BinaryOperator>(Den)) {
|
|
// fold (udiv x, (shl c, y)) -> x >>u (log2(c)+y) iff c is power of 2
|
|
if (BinOpDen->getOpcode() == Instruction::Shl &&
|
|
isa<Constant>(BinOpDen->getOperand(0)) &&
|
|
isKnownToBeAPowerOfTwo(BinOpDen->getOperand(0), *DL, true,
|
|
0, AC, &I, DT)) {
|
|
return true;
|
|
}
|
|
}
|
|
|
|
return false;
|
|
}
|
|
|
|
static Value *getSign32(Value *V, IRBuilder<> &Builder, const DataLayout *DL) {
|
|
// Check whether the sign can be determined statically.
|
|
KnownBits Known = computeKnownBits(V, *DL);
|
|
if (Known.isNegative())
|
|
return Constant::getAllOnesValue(V->getType());
|
|
if (Known.isNonNegative())
|
|
return Constant::getNullValue(V->getType());
|
|
return Builder.CreateAShr(V, Builder.getInt32(31));
|
|
}
|
|
|
|
Value *AMDGPUCodeGenPrepareImpl::expandDivRem32(IRBuilder<> &Builder,
|
|
BinaryOperator &I, Value *X,
|
|
Value *Y) const {
|
|
Instruction::BinaryOps Opc = I.getOpcode();
|
|
assert(Opc == Instruction::URem || Opc == Instruction::UDiv ||
|
|
Opc == Instruction::SRem || Opc == Instruction::SDiv);
|
|
|
|
FastMathFlags FMF;
|
|
FMF.setFast();
|
|
Builder.setFastMathFlags(FMF);
|
|
|
|
if (divHasSpecialOptimization(I, X, Y))
|
|
return nullptr; // Keep it for later optimization.
|
|
|
|
bool IsDiv = Opc == Instruction::UDiv || Opc == Instruction::SDiv;
|
|
bool IsSigned = Opc == Instruction::SRem || Opc == Instruction::SDiv;
|
|
|
|
Type *Ty = X->getType();
|
|
Type *I32Ty = Builder.getInt32Ty();
|
|
Type *F32Ty = Builder.getFloatTy();
|
|
|
|
if (Ty->getScalarSizeInBits() < 32) {
|
|
if (IsSigned) {
|
|
X = Builder.CreateSExt(X, I32Ty);
|
|
Y = Builder.CreateSExt(Y, I32Ty);
|
|
} else {
|
|
X = Builder.CreateZExt(X, I32Ty);
|
|
Y = Builder.CreateZExt(Y, I32Ty);
|
|
}
|
|
}
|
|
|
|
if (Value *Res = expandDivRem24(Builder, I, X, Y, IsDiv, IsSigned)) {
|
|
return IsSigned ? Builder.CreateSExtOrTrunc(Res, Ty) :
|
|
Builder.CreateZExtOrTrunc(Res, Ty);
|
|
}
|
|
|
|
ConstantInt *Zero = Builder.getInt32(0);
|
|
ConstantInt *One = Builder.getInt32(1);
|
|
|
|
Value *Sign = nullptr;
|
|
if (IsSigned) {
|
|
Value *SignX = getSign32(X, Builder, DL);
|
|
Value *SignY = getSign32(Y, Builder, DL);
|
|
// Remainder sign is the same as LHS
|
|
Sign = IsDiv ? Builder.CreateXor(SignX, SignY) : SignX;
|
|
|
|
X = Builder.CreateAdd(X, SignX);
|
|
Y = Builder.CreateAdd(Y, SignY);
|
|
|
|
X = Builder.CreateXor(X, SignX);
|
|
Y = Builder.CreateXor(Y, SignY);
|
|
}
|
|
|
|
// The algorithm here is based on ideas from "Software Integer Division", Tom
|
|
// Rodeheffer, August 2008.
|
|
//
|
|
// unsigned udiv(unsigned x, unsigned y) {
|
|
// // Initial estimate of inv(y). The constant is less than 2^32 to ensure
|
|
// // that this is a lower bound on inv(y), even if some of the calculations
|
|
// // round up.
|
|
// unsigned z = (unsigned)((4294967296.0 - 512.0) * v_rcp_f32((float)y));
|
|
//
|
|
// // One round of UNR (Unsigned integer Newton-Raphson) to improve z.
|
|
// // Empirically this is guaranteed to give a "two-y" lower bound on
|
|
// // inv(y).
|
|
// z += umulh(z, -y * z);
|
|
//
|
|
// // Quotient/remainder estimate.
|
|
// unsigned q = umulh(x, z);
|
|
// unsigned r = x - q * y;
|
|
//
|
|
// // Two rounds of quotient/remainder refinement.
|
|
// if (r >= y) {
|
|
// ++q;
|
|
// r -= y;
|
|
// }
|
|
// if (r >= y) {
|
|
// ++q;
|
|
// r -= y;
|
|
// }
|
|
//
|
|
// return q;
|
|
// }
|
|
|
|
// Initial estimate of inv(y).
|
|
Value *FloatY = Builder.CreateUIToFP(Y, F32Ty);
|
|
Function *Rcp = Intrinsic::getDeclaration(Mod, Intrinsic::amdgcn_rcp, F32Ty);
|
|
Value *RcpY = Builder.CreateCall(Rcp, {FloatY});
|
|
Constant *Scale = ConstantFP::get(F32Ty, llvm::bit_cast<float>(0x4F7FFFFE));
|
|
Value *ScaledY = Builder.CreateFMul(RcpY, Scale);
|
|
Value *Z = Builder.CreateFPToUI(ScaledY, I32Ty);
|
|
|
|
// One round of UNR.
|
|
Value *NegY = Builder.CreateSub(Zero, Y);
|
|
Value *NegYZ = Builder.CreateMul(NegY, Z);
|
|
Z = Builder.CreateAdd(Z, getMulHu(Builder, Z, NegYZ));
|
|
|
|
// Quotient/remainder estimate.
|
|
Value *Q = getMulHu(Builder, X, Z);
|
|
Value *R = Builder.CreateSub(X, Builder.CreateMul(Q, Y));
|
|
|
|
// First quotient/remainder refinement.
|
|
Value *Cond = Builder.CreateICmpUGE(R, Y);
|
|
if (IsDiv)
|
|
Q = Builder.CreateSelect(Cond, Builder.CreateAdd(Q, One), Q);
|
|
R = Builder.CreateSelect(Cond, Builder.CreateSub(R, Y), R);
|
|
|
|
// Second quotient/remainder refinement.
|
|
Cond = Builder.CreateICmpUGE(R, Y);
|
|
Value *Res;
|
|
if (IsDiv)
|
|
Res = Builder.CreateSelect(Cond, Builder.CreateAdd(Q, One), Q);
|
|
else
|
|
Res = Builder.CreateSelect(Cond, Builder.CreateSub(R, Y), R);
|
|
|
|
if (IsSigned) {
|
|
Res = Builder.CreateXor(Res, Sign);
|
|
Res = Builder.CreateSub(Res, Sign);
|
|
}
|
|
|
|
Res = Builder.CreateTrunc(Res, Ty);
|
|
|
|
return Res;
|
|
}
|
|
|
|
Value *AMDGPUCodeGenPrepareImpl::shrinkDivRem64(IRBuilder<> &Builder,
|
|
BinaryOperator &I, Value *Num,
|
|
Value *Den) const {
|
|
if (!ExpandDiv64InIR && divHasSpecialOptimization(I, Num, Den))
|
|
return nullptr; // Keep it for later optimization.
|
|
|
|
Instruction::BinaryOps Opc = I.getOpcode();
|
|
|
|
bool IsDiv = Opc == Instruction::SDiv || Opc == Instruction::UDiv;
|
|
bool IsSigned = Opc == Instruction::SDiv || Opc == Instruction::SRem;
|
|
|
|
int NumDivBits = getDivNumBits(I, Num, Den, 32, IsSigned);
|
|
if (NumDivBits == -1)
|
|
return nullptr;
|
|
|
|
Value *Narrowed = nullptr;
|
|
if (NumDivBits <= 24) {
|
|
Narrowed = expandDivRem24Impl(Builder, I, Num, Den, NumDivBits,
|
|
IsDiv, IsSigned);
|
|
} else if (NumDivBits <= 32) {
|
|
Narrowed = expandDivRem32(Builder, I, Num, Den);
|
|
}
|
|
|
|
if (Narrowed) {
|
|
return IsSigned ? Builder.CreateSExt(Narrowed, Num->getType()) :
|
|
Builder.CreateZExt(Narrowed, Num->getType());
|
|
}
|
|
|
|
return nullptr;
|
|
}
|
|
|
|
void AMDGPUCodeGenPrepareImpl::expandDivRem64(BinaryOperator &I) const {
|
|
Instruction::BinaryOps Opc = I.getOpcode();
|
|
// Do the general expansion.
|
|
if (Opc == Instruction::UDiv || Opc == Instruction::SDiv) {
|
|
expandDivisionUpTo64Bits(&I);
|
|
return;
|
|
}
|
|
|
|
if (Opc == Instruction::URem || Opc == Instruction::SRem) {
|
|
expandRemainderUpTo64Bits(&I);
|
|
return;
|
|
}
|
|
|
|
llvm_unreachable("not a division");
|
|
}
|
|
|
|
bool AMDGPUCodeGenPrepareImpl::visitBinaryOperator(BinaryOperator &I) {
|
|
if (foldBinOpIntoSelect(I))
|
|
return true;
|
|
|
|
if (ST->has16BitInsts() && needsPromotionToI32(I.getType()) &&
|
|
UA->isUniform(&I) && promoteUniformOpToI32(I))
|
|
return true;
|
|
|
|
if (UseMul24Intrin && replaceMulWithMul24(I))
|
|
return true;
|
|
|
|
bool Changed = false;
|
|
Instruction::BinaryOps Opc = I.getOpcode();
|
|
Type *Ty = I.getType();
|
|
Value *NewDiv = nullptr;
|
|
unsigned ScalarSize = Ty->getScalarSizeInBits();
|
|
|
|
SmallVector<BinaryOperator *, 8> Div64ToExpand;
|
|
|
|
if ((Opc == Instruction::URem || Opc == Instruction::UDiv ||
|
|
Opc == Instruction::SRem || Opc == Instruction::SDiv) &&
|
|
ScalarSize <= 64 &&
|
|
!DisableIDivExpand) {
|
|
Value *Num = I.getOperand(0);
|
|
Value *Den = I.getOperand(1);
|
|
IRBuilder<> Builder(&I);
|
|
Builder.SetCurrentDebugLocation(I.getDebugLoc());
|
|
|
|
if (auto *VT = dyn_cast<FixedVectorType>(Ty)) {
|
|
NewDiv = PoisonValue::get(VT);
|
|
|
|
for (unsigned N = 0, E = VT->getNumElements(); N != E; ++N) {
|
|
Value *NumEltN = Builder.CreateExtractElement(Num, N);
|
|
Value *DenEltN = Builder.CreateExtractElement(Den, N);
|
|
|
|
Value *NewElt;
|
|
if (ScalarSize <= 32) {
|
|
NewElt = expandDivRem32(Builder, I, NumEltN, DenEltN);
|
|
if (!NewElt)
|
|
NewElt = Builder.CreateBinOp(Opc, NumEltN, DenEltN);
|
|
} else {
|
|
// See if this 64-bit division can be shrunk to 32/24-bits before
|
|
// producing the general expansion.
|
|
NewElt = shrinkDivRem64(Builder, I, NumEltN, DenEltN);
|
|
if (!NewElt) {
|
|
// The general 64-bit expansion introduces control flow and doesn't
|
|
// return the new value. Just insert a scalar copy and defer
|
|
// expanding it.
|
|
NewElt = Builder.CreateBinOp(Opc, NumEltN, DenEltN);
|
|
Div64ToExpand.push_back(cast<BinaryOperator>(NewElt));
|
|
}
|
|
}
|
|
|
|
NewDiv = Builder.CreateInsertElement(NewDiv, NewElt, N);
|
|
}
|
|
} else {
|
|
if (ScalarSize <= 32)
|
|
NewDiv = expandDivRem32(Builder, I, Num, Den);
|
|
else {
|
|
NewDiv = shrinkDivRem64(Builder, I, Num, Den);
|
|
if (!NewDiv)
|
|
Div64ToExpand.push_back(&I);
|
|
}
|
|
}
|
|
|
|
if (NewDiv) {
|
|
I.replaceAllUsesWith(NewDiv);
|
|
I.eraseFromParent();
|
|
Changed = true;
|
|
}
|
|
}
|
|
|
|
if (ExpandDiv64InIR) {
|
|
// TODO: We get much worse code in specially handled constant cases.
|
|
for (BinaryOperator *Div : Div64ToExpand) {
|
|
expandDivRem64(*Div);
|
|
FlowChanged = true;
|
|
Changed = true;
|
|
}
|
|
}
|
|
|
|
return Changed;
|
|
}
|
|
|
|
bool AMDGPUCodeGenPrepareImpl::visitLoadInst(LoadInst &I) {
|
|
if (!WidenLoads)
|
|
return false;
|
|
|
|
if ((I.getPointerAddressSpace() == AMDGPUAS::CONSTANT_ADDRESS ||
|
|
I.getPointerAddressSpace() == AMDGPUAS::CONSTANT_ADDRESS_32BIT) &&
|
|
canWidenScalarExtLoad(I)) {
|
|
IRBuilder<> Builder(&I);
|
|
Builder.SetCurrentDebugLocation(I.getDebugLoc());
|
|
|
|
Type *I32Ty = Builder.getInt32Ty();
|
|
LoadInst *WidenLoad = Builder.CreateLoad(I32Ty, I.getPointerOperand());
|
|
WidenLoad->copyMetadata(I);
|
|
|
|
// If we have range metadata, we need to convert the type, and not make
|
|
// assumptions about the high bits.
|
|
if (auto *Range = WidenLoad->getMetadata(LLVMContext::MD_range)) {
|
|
ConstantInt *Lower =
|
|
mdconst::extract<ConstantInt>(Range->getOperand(0));
|
|
|
|
if (Lower->isNullValue()) {
|
|
WidenLoad->setMetadata(LLVMContext::MD_range, nullptr);
|
|
} else {
|
|
Metadata *LowAndHigh[] = {
|
|
ConstantAsMetadata::get(ConstantInt::get(I32Ty, Lower->getValue().zext(32))),
|
|
// Don't make assumptions about the high bits.
|
|
ConstantAsMetadata::get(ConstantInt::get(I32Ty, 0))
|
|
};
|
|
|
|
WidenLoad->setMetadata(LLVMContext::MD_range,
|
|
MDNode::get(Mod->getContext(), LowAndHigh));
|
|
}
|
|
}
|
|
|
|
int TySize = Mod->getDataLayout().getTypeSizeInBits(I.getType());
|
|
Type *IntNTy = Builder.getIntNTy(TySize);
|
|
Value *ValTrunc = Builder.CreateTrunc(WidenLoad, IntNTy);
|
|
Value *ValOrig = Builder.CreateBitCast(ValTrunc, I.getType());
|
|
I.replaceAllUsesWith(ValOrig);
|
|
I.eraseFromParent();
|
|
return true;
|
|
}
|
|
|
|
return false;
|
|
}
|
|
|
|
bool AMDGPUCodeGenPrepareImpl::visitICmpInst(ICmpInst &I) {
|
|
bool Changed = false;
|
|
|
|
if (ST->has16BitInsts() && needsPromotionToI32(I.getOperand(0)->getType()) &&
|
|
UA->isUniform(&I))
|
|
Changed |= promoteUniformOpToI32(I);
|
|
|
|
return Changed;
|
|
}
|
|
|
|
bool AMDGPUCodeGenPrepareImpl::visitSelectInst(SelectInst &I) {
|
|
Value *Cond = I.getCondition();
|
|
Value *TrueVal = I.getTrueValue();
|
|
Value *FalseVal = I.getFalseValue();
|
|
Value *CmpVal;
|
|
FCmpInst::Predicate Pred;
|
|
|
|
if (ST->has16BitInsts() && needsPromotionToI32(I.getType())) {
|
|
if (UA->isUniform(&I))
|
|
return promoteUniformOpToI32(I);
|
|
return false;
|
|
}
|
|
|
|
// Match fract pattern with nan check.
|
|
if (!match(Cond, m_FCmp(Pred, m_Value(CmpVal), m_NonNaN())))
|
|
return false;
|
|
|
|
FPMathOperator *FPOp = dyn_cast<FPMathOperator>(&I);
|
|
if (!FPOp)
|
|
return false;
|
|
|
|
IRBuilder<> Builder(&I);
|
|
Builder.setFastMathFlags(FPOp->getFastMathFlags());
|
|
|
|
auto *IITrue = dyn_cast<IntrinsicInst>(TrueVal);
|
|
auto *IIFalse = dyn_cast<IntrinsicInst>(FalseVal);
|
|
|
|
Value *Fract = nullptr;
|
|
if (Pred == FCmpInst::FCMP_UNO && TrueVal == CmpVal && IIFalse &&
|
|
CmpVal == matchFractPat(*IIFalse)) {
|
|
// isnan(x) ? x : fract(x)
|
|
Fract = applyFractPat(Builder, CmpVal);
|
|
} else if (Pred == FCmpInst::FCMP_ORD && FalseVal == CmpVal && IITrue &&
|
|
CmpVal == matchFractPat(*IITrue)) {
|
|
// !isnan(x) ? fract(x) : x
|
|
Fract = applyFractPat(Builder, CmpVal);
|
|
} else
|
|
return false;
|
|
|
|
Fract->takeName(&I);
|
|
I.replaceAllUsesWith(Fract);
|
|
RecursivelyDeleteTriviallyDeadInstructions(&I, TLInfo);
|
|
return true;
|
|
}
|
|
|
|
static bool areInSameBB(const Value *A, const Value *B) {
|
|
const auto *IA = dyn_cast<Instruction>(A);
|
|
const auto *IB = dyn_cast<Instruction>(B);
|
|
return IA && IB && IA->getParent() == IB->getParent();
|
|
}
|
|
|
|
// Helper for breaking large PHIs that returns true when an extractelement on V
|
|
// is likely to be folded away by the DAG combiner.
|
|
static bool isInterestingPHIIncomingValue(const Value *V) {
|
|
const auto *FVT = dyn_cast<FixedVectorType>(V->getType());
|
|
if (!FVT)
|
|
return false;
|
|
|
|
const Value *CurVal = V;
|
|
|
|
// Check for insertelements, keeping track of the elements covered.
|
|
BitVector EltsCovered(FVT->getNumElements());
|
|
while (const auto *IE = dyn_cast<InsertElementInst>(CurVal)) {
|
|
const auto *Idx = dyn_cast<ConstantInt>(IE->getOperand(2));
|
|
|
|
// Non constant index/out of bounds index -> folding is unlikely.
|
|
// The latter is more of a sanity check because canonical IR should just
|
|
// have replaced those with poison.
|
|
if (!Idx || Idx->getSExtValue() >= FVT->getNumElements())
|
|
return false;
|
|
|
|
const auto *VecSrc = IE->getOperand(0);
|
|
|
|
// If the vector source is another instruction, it must be in the same basic
|
|
// block. Otherwise, the DAGCombiner won't see the whole thing and is
|
|
// unlikely to be able to do anything interesting here.
|
|
if (isa<Instruction>(VecSrc) && !areInSameBB(VecSrc, IE))
|
|
return false;
|
|
|
|
CurVal = VecSrc;
|
|
EltsCovered.set(Idx->getSExtValue());
|
|
|
|
// All elements covered.
|
|
if (EltsCovered.all())
|
|
return true;
|
|
}
|
|
|
|
// We either didn't find a single insertelement, or the insertelement chain
|
|
// ended before all elements were covered. Check for other interesting values.
|
|
|
|
// Constants are always interesting because we can just constant fold the
|
|
// extractelements.
|
|
if (isa<Constant>(CurVal))
|
|
return true;
|
|
|
|
// shufflevector is likely to be profitable if either operand is a constant,
|
|
// or if either source is in the same block.
|
|
// This is because shufflevector is most often lowered as a series of
|
|
// insert/extract elements anyway.
|
|
if (const auto *SV = dyn_cast<ShuffleVectorInst>(CurVal)) {
|
|
return isa<Constant>(SV->getOperand(1)) ||
|
|
areInSameBB(SV, SV->getOperand(0)) ||
|
|
areInSameBB(SV, SV->getOperand(1));
|
|
}
|
|
|
|
return false;
|
|
}
|
|
|
|
bool AMDGPUCodeGenPrepareImpl::canBreakPHINode(const PHINode &I) {
|
|
// Check in the cache, or add an entry for this node.
|
|
//
|
|
// We init with false because we consider all PHI nodes unbreakable until we
|
|
// reach a conclusion. Doing the opposite - assuming they're break-able until
|
|
// proven otherwise - can be harmful in some pathological cases so we're
|
|
// conservative for now.
|
|
const auto [It, DidInsert] = BreakPhiNodesCache.insert({&I, false});
|
|
if (!DidInsert)
|
|
return It->second;
|
|
|
|
// This function may recurse, so to guard against infinite looping, this PHI
|
|
// is conservatively considered unbreakable until we reach a conclusion.
|
|
|
|
// Don't break PHIs that have no interesting incoming values. That is, where
|
|
// there is no clear opportunity to fold the "extractelement" instructions we
|
|
// would add.
|
|
//
|
|
// Note: IC does not run after this pass, so we're only interested in the
|
|
// foldings that the DAG combiner can do.
|
|
if (none_of(I.incoming_values(),
|
|
[&](Value *V) { return isInterestingPHIIncomingValue(V); }))
|
|
return false;
|
|
|
|
// Now, check users for unbreakable PHI nodes. If we have an unbreakable PHI
|
|
// node as user, we don't want to break this PHI either because it's unlikely
|
|
// to be beneficial. We would just explode the vector and reassemble it
|
|
// directly, wasting instructions.
|
|
//
|
|
// In the case where multiple users are PHI nodes, we want at least half of
|
|
// them to be breakable.
|
|
int Score = 0;
|
|
for (const Value *U : I.users()) {
|
|
if (const auto *PU = dyn_cast<PHINode>(U))
|
|
Score += canBreakPHINode(*PU) ? 1 : -1;
|
|
}
|
|
|
|
if (Score < 0)
|
|
return false;
|
|
|
|
return BreakPhiNodesCache[&I] = true;
|
|
}
|
|
|
|
/// Helper class for "break large PHIs" (visitPHINode).
|
|
///
|
|
/// This represents a slice of a PHI's incoming value, which is made up of:
|
|
/// - The type of the slice (Ty)
|
|
/// - The index in the incoming value's vector where the slice starts (Idx)
|
|
/// - The number of elements in the slice (NumElts).
|
|
/// It also keeps track of the NewPHI node inserted for this particular slice.
|
|
///
|
|
/// Slice examples:
|
|
/// <4 x i64> -> Split into four i64 slices.
|
|
/// -> [i64, 0, 1], [i64, 1, 1], [i64, 2, 1], [i64, 3, 1]
|
|
/// <5 x i16> -> Split into 2 <2 x i16> slices + a i16 tail.
|
|
/// -> [<2 x i16>, 0, 2], [<2 x i16>, 2, 2], [i16, 4, 1]
|
|
class VectorSlice {
|
|
public:
|
|
VectorSlice(Type *Ty, unsigned Idx, unsigned NumElts)
|
|
: Ty(Ty), Idx(Idx), NumElts(NumElts) {}
|
|
|
|
Type *Ty = nullptr;
|
|
unsigned Idx = 0;
|
|
unsigned NumElts = 0;
|
|
PHINode *NewPHI = nullptr;
|
|
|
|
/// Slice \p Inc according to the information contained within this slice.
|
|
/// This is cached, so if called multiple times for the same \p BB & \p Inc
|
|
/// pair, it returns the same Sliced value as well.
|
|
///
|
|
/// Note this *intentionally* does not return the same value for, say,
|
|
/// [%bb.0, %0] & [%bb.1, %0] as:
|
|
/// - It could cause issues with dominance (e.g. if bb.1 is seen first, then
|
|
/// the value in bb.1 may not be reachable from bb.0 if it's its
|
|
/// predecessor.)
|
|
/// - We also want to make our extract instructions as local as possible so
|
|
/// the DAG has better chances of folding them out. Duplicating them like
|
|
/// that is beneficial in that regard.
|
|
///
|
|
/// This is both a minor optimization to avoid creating duplicate
|
|
/// instructions, but also a requirement for correctness. It is not forbidden
|
|
/// for a PHI node to have the same [BB, Val] pair multiple times. If we
|
|
/// returned a new value each time, those previously identical pairs would all
|
|
/// have different incoming values (from the same block) and it'd cause a "PHI
|
|
/// node has multiple entries for the same basic block with different incoming
|
|
/// values!" verifier error.
|
|
Value *getSlicedVal(BasicBlock *BB, Value *Inc, StringRef NewValName) {
|
|
Value *&Res = SlicedVals[{BB, Inc}];
|
|
if (Res)
|
|
return Res;
|
|
|
|
IRBuilder<> B(BB->getTerminator());
|
|
if (Instruction *IncInst = dyn_cast<Instruction>(Inc))
|
|
B.SetCurrentDebugLocation(IncInst->getDebugLoc());
|
|
|
|
if (NumElts > 1) {
|
|
SmallVector<int, 4> Mask;
|
|
for (unsigned K = Idx; K < (Idx + NumElts); ++K)
|
|
Mask.push_back(K);
|
|
Res = B.CreateShuffleVector(Inc, Mask, NewValName);
|
|
} else
|
|
Res = B.CreateExtractElement(Inc, Idx, NewValName);
|
|
|
|
return Res;
|
|
}
|
|
|
|
private:
|
|
SmallDenseMap<std::pair<BasicBlock *, Value *>, Value *> SlicedVals;
|
|
};
|
|
|
|
bool AMDGPUCodeGenPrepareImpl::visitPHINode(PHINode &I) {
|
|
// Break-up fixed-vector PHIs into smaller pieces.
|
|
// Default threshold is 32, so it breaks up any vector that's >32 bits into
|
|
// its elements, or into 32-bit pieces (for 8/16 bit elts).
|
|
//
|
|
// This is only helpful for DAGISel because it doesn't handle large PHIs as
|
|
// well as GlobalISel. DAGISel lowers PHIs by using CopyToReg/CopyFromReg.
|
|
// With large, odd-sized PHIs we may end up needing many `build_vector`
|
|
// operations with most elements being "undef". This inhibits a lot of
|
|
// optimization opportunities and can result in unreasonably high register
|
|
// pressure and the inevitable stack spilling.
|
|
if (!ScalarizeLargePHIs || getCGPassBuilderOption().EnableGlobalISelOption)
|
|
return false;
|
|
|
|
FixedVectorType *FVT = dyn_cast<FixedVectorType>(I.getType());
|
|
if (!FVT || DL->getTypeSizeInBits(FVT) <= ScalarizeLargePHIsThreshold)
|
|
return false;
|
|
|
|
if (!ForceScalarizeLargePHIs && !canBreakPHINode(I))
|
|
return false;
|
|
|
|
std::vector<VectorSlice> Slices;
|
|
|
|
Type *EltTy = FVT->getElementType();
|
|
{
|
|
unsigned Idx = 0;
|
|
// For 8/16 bits type, don't scalarize fully but break it up into as many
|
|
// 32-bit slices as we can, and scalarize the tail.
|
|
const unsigned EltSize = DL->getTypeSizeInBits(EltTy);
|
|
const unsigned NumElts = FVT->getNumElements();
|
|
if (EltSize == 8 || EltSize == 16) {
|
|
const unsigned SubVecSize = (32 / EltSize);
|
|
Type *SubVecTy = FixedVectorType::get(EltTy, SubVecSize);
|
|
for (unsigned End = alignDown(NumElts, SubVecSize); Idx < End;
|
|
Idx += SubVecSize)
|
|
Slices.emplace_back(SubVecTy, Idx, SubVecSize);
|
|
}
|
|
|
|
// Scalarize all remaining elements.
|
|
for (; Idx < NumElts; ++Idx)
|
|
Slices.emplace_back(EltTy, Idx, 1);
|
|
}
|
|
|
|
if (Slices.size() == 1)
|
|
return false;
|
|
|
|
// Create one PHI per vector piece. The "VectorSlice" class takes care of
|
|
// creating the necessary instruction to extract the relevant slices of each
|
|
// incoming value.
|
|
IRBuilder<> B(I.getParent());
|
|
B.SetCurrentDebugLocation(I.getDebugLoc());
|
|
|
|
unsigned IncNameSuffix = 0;
|
|
for (VectorSlice &S : Slices) {
|
|
// We need to reset the build on each iteration, because getSlicedVal may
|
|
// have inserted something into I's BB.
|
|
B.SetInsertPoint(I.getParent()->getFirstNonPHI());
|
|
S.NewPHI = B.CreatePHI(S.Ty, I.getNumIncomingValues());
|
|
|
|
for (const auto &[Idx, BB] : enumerate(I.blocks())) {
|
|
S.NewPHI->addIncoming(S.getSlicedVal(BB, I.getIncomingValue(Idx),
|
|
"largephi.extractslice" +
|
|
std::to_string(IncNameSuffix++)),
|
|
BB);
|
|
}
|
|
}
|
|
|
|
// And replace this PHI with a vector of all the previous PHI values.
|
|
Value *Vec = PoisonValue::get(FVT);
|
|
unsigned NameSuffix = 0;
|
|
for (VectorSlice &S : Slices) {
|
|
const auto ValName = "largephi.insertslice" + std::to_string(NameSuffix++);
|
|
if (S.NumElts > 1)
|
|
Vec =
|
|
B.CreateInsertVector(FVT, Vec, S.NewPHI, B.getInt64(S.Idx), ValName);
|
|
else
|
|
Vec = B.CreateInsertElement(Vec, S.NewPHI, S.Idx, ValName);
|
|
}
|
|
|
|
I.replaceAllUsesWith(Vec);
|
|
I.eraseFromParent();
|
|
return true;
|
|
}
|
|
|
|
bool AMDGPUCodeGenPrepareImpl::visitIntrinsicInst(IntrinsicInst &I) {
|
|
switch (I.getIntrinsicID()) {
|
|
case Intrinsic::bitreverse:
|
|
return visitBitreverseIntrinsicInst(I);
|
|
case Intrinsic::minnum:
|
|
return visitMinNum(I);
|
|
default:
|
|
return false;
|
|
}
|
|
}
|
|
|
|
bool AMDGPUCodeGenPrepareImpl::visitBitreverseIntrinsicInst(IntrinsicInst &I) {
|
|
bool Changed = false;
|
|
|
|
if (ST->has16BitInsts() && needsPromotionToI32(I.getType()) &&
|
|
UA->isUniform(&I))
|
|
Changed |= promoteUniformBitreverseToI32(I);
|
|
|
|
return Changed;
|
|
}
|
|
|
|
/// Match non-nan fract pattern.
|
|
/// minnum(fsub(x, floor(x)), nextafter(1.0, -1.0)
|
|
///
|
|
/// If fract is a useful instruction for the subtarget. Does not account for the
|
|
/// nan handling; the instruction has a nan check on the input value.
|
|
Value *AMDGPUCodeGenPrepareImpl::matchFractPat(IntrinsicInst &I) {
|
|
if (ST->hasFractBug())
|
|
return nullptr;
|
|
|
|
if (I.getIntrinsicID() != Intrinsic::minnum)
|
|
return nullptr;
|
|
|
|
Type *Ty = I.getType();
|
|
if (!isLegalFloatingTy(Ty->getScalarType()))
|
|
return nullptr;
|
|
|
|
Value *Arg0 = I.getArgOperand(0);
|
|
Value *Arg1 = I.getArgOperand(1);
|
|
|
|
const APFloat *C;
|
|
if (!match(Arg1, m_APFloat(C)))
|
|
return nullptr;
|
|
|
|
APFloat One(1.0);
|
|
bool LosesInfo;
|
|
One.convert(C->getSemantics(), APFloat::rmNearestTiesToEven, &LosesInfo);
|
|
|
|
// Match nextafter(1.0, -1)
|
|
One.next(true);
|
|
if (One != *C)
|
|
return nullptr;
|
|
|
|
Value *FloorSrc;
|
|
if (match(Arg0, m_FSub(m_Value(FloorSrc),
|
|
m_Intrinsic<Intrinsic::floor>(m_Deferred(FloorSrc)))))
|
|
return FloorSrc;
|
|
return nullptr;
|
|
}
|
|
|
|
Value *AMDGPUCodeGenPrepareImpl::applyFractPat(IRBuilder<> &Builder,
|
|
Value *FractArg) {
|
|
SmallVector<Value *, 4> FractVals;
|
|
extractValues(Builder, FractVals, FractArg);
|
|
|
|
SmallVector<Value *, 4> ResultVals(FractVals.size());
|
|
|
|
Type *Ty = FractArg->getType()->getScalarType();
|
|
for (unsigned I = 0, E = FractVals.size(); I != E; ++I) {
|
|
ResultVals[I] =
|
|
Builder.CreateIntrinsic(Intrinsic::amdgcn_fract, {Ty}, {FractVals[I]});
|
|
}
|
|
|
|
return insertValues(Builder, FractArg->getType(), ResultVals);
|
|
}
|
|
|
|
bool AMDGPUCodeGenPrepareImpl::visitMinNum(IntrinsicInst &I) {
|
|
Value *FractArg = matchFractPat(I);
|
|
if (!FractArg)
|
|
return false;
|
|
|
|
// Match pattern for fract intrinsic in contexts where the nan check has been
|
|
// optimized out (and hope the knowledge the source can't be nan wasn't lost).
|
|
if (!I.hasNoNaNs() && !isKnownNeverNaN(FractArg, *DL, TLInfo))
|
|
return false;
|
|
|
|
IRBuilder<> Builder(&I);
|
|
FastMathFlags FMF = I.getFastMathFlags();
|
|
FMF.setNoNaNs();
|
|
Builder.setFastMathFlags(FMF);
|
|
|
|
Value *Fract = applyFractPat(Builder, FractArg);
|
|
Fract->takeName(&I);
|
|
I.replaceAllUsesWith(Fract);
|
|
|
|
RecursivelyDeleteTriviallyDeadInstructions(&I, TLInfo);
|
|
return true;
|
|
}
|
|
|
|
bool AMDGPUCodeGenPrepare::doInitialization(Module &M) {
|
|
Impl.Mod = &M;
|
|
Impl.DL = &Impl.Mod->getDataLayout();
|
|
return false;
|
|
}
|
|
|
|
bool AMDGPUCodeGenPrepare::runOnFunction(Function &F) {
|
|
if (skipFunction(F))
|
|
return false;
|
|
|
|
auto *TPC = getAnalysisIfAvailable<TargetPassConfig>();
|
|
if (!TPC)
|
|
return false;
|
|
|
|
const AMDGPUTargetMachine &TM = TPC->getTM<AMDGPUTargetMachine>();
|
|
Impl.TLInfo = &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(F);
|
|
Impl.ST = &TM.getSubtarget<GCNSubtarget>(F);
|
|
Impl.AC = &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F);
|
|
Impl.UA = &getAnalysis<UniformityInfoWrapperPass>().getUniformityInfo();
|
|
auto *DTWP = getAnalysisIfAvailable<DominatorTreeWrapperPass>();
|
|
Impl.DT = DTWP ? &DTWP->getDomTree() : nullptr;
|
|
Impl.HasUnsafeFPMath = hasUnsafeFPMath(F);
|
|
SIModeRegisterDefaults Mode(F);
|
|
Impl.HasFP32DenormalFlush =
|
|
Mode.FP32Denormals == DenormalMode::getPreserveSign();
|
|
return Impl.run(F);
|
|
}
|
|
|
|
PreservedAnalyses AMDGPUCodeGenPreparePass::run(Function &F,
|
|
FunctionAnalysisManager &FAM) {
|
|
AMDGPUCodeGenPrepareImpl Impl;
|
|
Impl.Mod = F.getParent();
|
|
Impl.DL = &Impl.Mod->getDataLayout();
|
|
Impl.TLInfo = &FAM.getResult<TargetLibraryAnalysis>(F);
|
|
Impl.ST = &TM.getSubtarget<GCNSubtarget>(F);
|
|
Impl.AC = &FAM.getResult<AssumptionAnalysis>(F);
|
|
Impl.UA = &FAM.getResult<UniformityInfoAnalysis>(F);
|
|
Impl.DT = FAM.getCachedResult<DominatorTreeAnalysis>(F);
|
|
Impl.HasUnsafeFPMath = hasUnsafeFPMath(F);
|
|
SIModeRegisterDefaults Mode(F);
|
|
Impl.HasFP32DenormalFlush =
|
|
Mode.FP32Denormals == DenormalMode::getPreserveSign();
|
|
PreservedAnalyses PA = PreservedAnalyses::none();
|
|
if (!Impl.FlowChanged)
|
|
PA.preserveSet<CFGAnalyses>();
|
|
return Impl.run(F) ? PA : PreservedAnalyses::all();
|
|
}
|
|
|
|
INITIALIZE_PASS_BEGIN(AMDGPUCodeGenPrepare, DEBUG_TYPE,
|
|
"AMDGPU IR optimizations", false, false)
|
|
INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)
|
|
INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)
|
|
INITIALIZE_PASS_DEPENDENCY(UniformityInfoWrapperPass)
|
|
INITIALIZE_PASS_END(AMDGPUCodeGenPrepare, DEBUG_TYPE, "AMDGPU IR optimizations",
|
|
false, false)
|
|
|
|
char AMDGPUCodeGenPrepare::ID = 0;
|
|
|
|
FunctionPass *llvm::createAMDGPUCodeGenPreparePass() {
|
|
return new AMDGPUCodeGenPrepare();
|
|
}
|