The MLIR classes Type/Attribute/Operation/Op/Value support cast/dyn_cast/isa/dyn_cast_or_null functionality through llvm's doCast functionality in addition to defining methods with the same name. This change begins the migration of uses of the method to the corresponding function call as has been decided as more consistent. Note that there still exist classes that only define methods directly, such as AffineExpr, and this does not include work currently to support a functional cast/isa call. Caveats include: - This clang-tidy script probably has more problems. - This only touches C++ code, so nothing that is being generated. Context: - https://mlir.llvm.org/deprecation/ at "Use the free function variants for dyn_cast/cast/isa/…" - Original discussion at https://discourse.llvm.org/t/preferred-casting-style-going-forward/68443 Implementation: This first patch was created with the following steps. The intention is to only do automated changes at first, so I waste less time if it's reverted, and so the first mass change is more clear as an example to other teams that will need to follow similar steps. Steps are described per line, as comments are removed by git: 0. Retrieve the change from the following to build clang-tidy with an additional check: https://github.com/llvm/llvm-project/compare/main...tpopp:llvm-project:tidy-cast-check 1. Build clang-tidy 2. Run clang-tidy over your entire codebase while disabling all checks and enabling the one relevant one. Run on all header files also. 3. Delete .inc files that were also modified, so the next build rebuilds them to a pure state. 4. Some changes have been deleted for the following reasons: - Some files had a variable also named cast - Some files had not included a header file that defines the cast functions - Some files are definitions of the classes that have the casting methods, so the code still refers to the method instead of the function without adding a prefix or removing the method declaration at the same time. ``` ninja -C $BUILD_DIR clang-tidy run-clang-tidy -clang-tidy-binary=$BUILD_DIR/bin/clang-tidy -checks='-*,misc-cast-functions'\ -header-filter=mlir/ mlir/* -fix rm -rf $BUILD_DIR/tools/mlir/**/*.inc git restore mlir/lib/IR mlir/lib/Dialect/DLTI/DLTI.cpp\ mlir/lib/Dialect/Complex/IR/ComplexDialect.cpp\ mlir/lib/**/IR/\ mlir/lib/Dialect/SparseTensor/Transforms/SparseVectorization.cpp\ mlir/lib/Dialect/Vector/Transforms/LowerVectorMultiReduction.cpp\ mlir/test/lib/Dialect/Test/TestTypes.cpp\ mlir/test/lib/Dialect/Transform/TestTransformDialectExtension.cpp\ mlir/test/lib/Dialect/Test/TestAttributes.cpp\ mlir/unittests/TableGen/EnumsGenTest.cpp\ mlir/test/python/lib/PythonTestCAPI.cpp\ mlir/include/mlir/IR/ ``` Differential Revision: https://reviews.llvm.org/D150123
188 lines
7.9 KiB
C++
188 lines
7.9 KiB
C++
//===- AllocLikeConversion.cpp - LLVM conversion for alloc operations -----===//
|
|
//
|
|
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
|
// See https://llvm.org/LICENSE.txt for license information.
|
|
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
|
//
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
#include "mlir/Conversion/MemRefToLLVM/AllocLikeConversion.h"
|
|
#include "mlir/Analysis/DataLayoutAnalysis.h"
|
|
#include "mlir/Dialect/LLVMIR/FunctionCallUtils.h"
|
|
#include "mlir/Dialect/LLVMIR/LLVMDialect.h"
|
|
|
|
using namespace mlir;
|
|
|
|
namespace {
|
|
// TODO: Fix the LLVM utilities for looking up functions to take Operation*
|
|
// with SymbolTable trait instead of ModuleOp and make similar change here. This
|
|
// allows call sites to use getParentWithTrait<OpTrait::SymbolTable> instead
|
|
// of getParentOfType<ModuleOp> to pass down the operation.
|
|
LLVM::LLVMFuncOp getNotalignedAllocFn(LLVMTypeConverter *typeConverter,
|
|
ModuleOp module, Type indexType) {
|
|
bool useGenericFn = typeConverter->getOptions().useGenericFunctions;
|
|
|
|
if (useGenericFn)
|
|
return LLVM::lookupOrCreateGenericAllocFn(
|
|
module, indexType, typeConverter->useOpaquePointers());
|
|
|
|
return LLVM::lookupOrCreateMallocFn(module, indexType,
|
|
typeConverter->useOpaquePointers());
|
|
}
|
|
|
|
LLVM::LLVMFuncOp getAlignedAllocFn(LLVMTypeConverter *typeConverter,
|
|
ModuleOp module, Type indexType) {
|
|
bool useGenericFn = typeConverter->getOptions().useGenericFunctions;
|
|
|
|
if (useGenericFn)
|
|
return LLVM::lookupOrCreateGenericAlignedAllocFn(
|
|
module, indexType, typeConverter->useOpaquePointers());
|
|
|
|
return LLVM::lookupOrCreateAlignedAllocFn(module, indexType,
|
|
typeConverter->useOpaquePointers());
|
|
}
|
|
|
|
} // end namespace
|
|
|
|
Value AllocationOpLLVMLowering::createAligned(
|
|
ConversionPatternRewriter &rewriter, Location loc, Value input,
|
|
Value alignment) {
|
|
Value one = createIndexAttrConstant(rewriter, loc, alignment.getType(), 1);
|
|
Value bump = rewriter.create<LLVM::SubOp>(loc, alignment, one);
|
|
Value bumped = rewriter.create<LLVM::AddOp>(loc, input, bump);
|
|
Value mod = rewriter.create<LLVM::URemOp>(loc, bumped, alignment);
|
|
return rewriter.create<LLVM::SubOp>(loc, bumped, mod);
|
|
}
|
|
|
|
static Value castAllocFuncResult(ConversionPatternRewriter &rewriter,
|
|
Location loc, Value allocatedPtr,
|
|
MemRefType memRefType, Type elementPtrType,
|
|
LLVMTypeConverter &typeConverter) {
|
|
auto allocatedPtrTy = cast<LLVM::LLVMPointerType>(allocatedPtr.getType());
|
|
unsigned memrefAddrSpace = *typeConverter.getMemRefAddressSpace(memRefType);
|
|
if (allocatedPtrTy.getAddressSpace() != memrefAddrSpace)
|
|
allocatedPtr = rewriter.create<LLVM::AddrSpaceCastOp>(
|
|
loc,
|
|
typeConverter.getPointerType(allocatedPtrTy.getElementType(),
|
|
memrefAddrSpace),
|
|
allocatedPtr);
|
|
|
|
if (!typeConverter.useOpaquePointers())
|
|
allocatedPtr =
|
|
rewriter.create<LLVM::BitcastOp>(loc, elementPtrType, allocatedPtr);
|
|
return allocatedPtr;
|
|
}
|
|
|
|
std::tuple<Value, Value> AllocationOpLLVMLowering::allocateBufferManuallyAlign(
|
|
ConversionPatternRewriter &rewriter, Location loc, Value sizeBytes,
|
|
Operation *op, Value alignment) const {
|
|
if (alignment) {
|
|
// Adjust the allocation size to consider alignment.
|
|
sizeBytes = rewriter.create<LLVM::AddOp>(loc, sizeBytes, alignment);
|
|
}
|
|
|
|
MemRefType memRefType = getMemRefResultType(op);
|
|
// Allocate the underlying buffer.
|
|
Type elementPtrType = this->getElementPtrType(memRefType);
|
|
LLVM::LLVMFuncOp allocFuncOp = getNotalignedAllocFn(
|
|
getTypeConverter(), op->getParentOfType<ModuleOp>(), getIndexType());
|
|
auto results = rewriter.create<LLVM::CallOp>(loc, allocFuncOp, sizeBytes);
|
|
|
|
Value allocatedPtr =
|
|
castAllocFuncResult(rewriter, loc, results.getResult(), memRefType,
|
|
elementPtrType, *getTypeConverter());
|
|
|
|
Value alignedPtr = allocatedPtr;
|
|
if (alignment) {
|
|
// Compute the aligned pointer.
|
|
Value allocatedInt =
|
|
rewriter.create<LLVM::PtrToIntOp>(loc, getIndexType(), allocatedPtr);
|
|
Value alignmentInt = createAligned(rewriter, loc, allocatedInt, alignment);
|
|
alignedPtr =
|
|
rewriter.create<LLVM::IntToPtrOp>(loc, elementPtrType, alignmentInt);
|
|
}
|
|
|
|
return std::make_tuple(allocatedPtr, alignedPtr);
|
|
}
|
|
|
|
unsigned AllocationOpLLVMLowering::getMemRefEltSizeInBytes(
|
|
MemRefType memRefType, Operation *op,
|
|
const DataLayout *defaultLayout) const {
|
|
const DataLayout *layout = defaultLayout;
|
|
if (const DataLayoutAnalysis *analysis =
|
|
getTypeConverter()->getDataLayoutAnalysis()) {
|
|
layout = &analysis->getAbove(op);
|
|
}
|
|
Type elementType = memRefType.getElementType();
|
|
if (auto memRefElementType = dyn_cast<MemRefType>(elementType))
|
|
return getTypeConverter()->getMemRefDescriptorSize(memRefElementType,
|
|
*layout);
|
|
if (auto memRefElementType = dyn_cast<UnrankedMemRefType>(elementType))
|
|
return getTypeConverter()->getUnrankedMemRefDescriptorSize(
|
|
memRefElementType, *layout);
|
|
return layout->getTypeSize(elementType);
|
|
}
|
|
|
|
bool AllocationOpLLVMLowering::isMemRefSizeMultipleOf(
|
|
MemRefType type, uint64_t factor, Operation *op,
|
|
const DataLayout *defaultLayout) const {
|
|
uint64_t sizeDivisor = getMemRefEltSizeInBytes(type, op, defaultLayout);
|
|
for (unsigned i = 0, e = type.getRank(); i < e; i++) {
|
|
if (type.isDynamicDim(i))
|
|
continue;
|
|
sizeDivisor = sizeDivisor * type.getDimSize(i);
|
|
}
|
|
return sizeDivisor % factor == 0;
|
|
}
|
|
|
|
Value AllocationOpLLVMLowering::allocateBufferAutoAlign(
|
|
ConversionPatternRewriter &rewriter, Location loc, Value sizeBytes,
|
|
Operation *op, const DataLayout *defaultLayout, int64_t alignment) const {
|
|
Value allocAlignment = createIndexConstant(rewriter, loc, alignment);
|
|
|
|
MemRefType memRefType = getMemRefResultType(op);
|
|
// Function aligned_alloc requires size to be a multiple of alignment; we pad
|
|
// the size to the next multiple if necessary.
|
|
if (!isMemRefSizeMultipleOf(memRefType, alignment, op, defaultLayout))
|
|
sizeBytes = createAligned(rewriter, loc, sizeBytes, allocAlignment);
|
|
|
|
Type elementPtrType = this->getElementPtrType(memRefType);
|
|
LLVM::LLVMFuncOp allocFuncOp = getAlignedAllocFn(
|
|
getTypeConverter(), op->getParentOfType<ModuleOp>(), getIndexType());
|
|
auto results = rewriter.create<LLVM::CallOp>(
|
|
loc, allocFuncOp, ValueRange({allocAlignment, sizeBytes}));
|
|
|
|
return castAllocFuncResult(rewriter, loc, results.getResult(), memRefType,
|
|
elementPtrType, *getTypeConverter());
|
|
}
|
|
|
|
LogicalResult AllocLikeOpLLVMLowering::matchAndRewrite(
|
|
Operation *op, ArrayRef<Value> operands,
|
|
ConversionPatternRewriter &rewriter) const {
|
|
MemRefType memRefType = getMemRefResultType(op);
|
|
if (!isConvertibleAndHasIdentityMaps(memRefType))
|
|
return rewriter.notifyMatchFailure(op, "incompatible memref type");
|
|
auto loc = op->getLoc();
|
|
|
|
// Get actual sizes of the memref as values: static sizes are constant
|
|
// values and dynamic sizes are passed to 'alloc' as operands. In case of
|
|
// zero-dimensional memref, assume a scalar (size 1).
|
|
SmallVector<Value, 4> sizes;
|
|
SmallVector<Value, 4> strides;
|
|
Value sizeBytes;
|
|
this->getMemRefDescriptorSizes(loc, memRefType, operands, rewriter, sizes,
|
|
strides, sizeBytes);
|
|
|
|
// Allocate the underlying buffer.
|
|
auto [allocatedPtr, alignedPtr] =
|
|
this->allocateBuffer(rewriter, loc, sizeBytes, op);
|
|
|
|
// Create the MemRef descriptor.
|
|
auto memRefDescriptor = this->createMemRefDescriptor(
|
|
loc, memRefType, allocatedPtr, alignedPtr, sizes, strides, rewriter);
|
|
|
|
// Return the final value of the descriptor.
|
|
rewriter.replaceOp(op, {memRefDescriptor});
|
|
return success();
|
|
}
|