This commit restructures how TypeID is implemented to ideally avoid the current problems related to shared libraries. This is done by changing the "implicit" fallback path to use the name of the type, instead of using a static template variable (which breaks shared libraries). The major downside to this is that it adds some additional initialization costs for the implicit path. Given the use of type names for uniqueness in the fallback, we also no longer allow types defined in anonymous namespaces to have an implicit TypeID. To simplify defining an ID for these classes, a new `MLIR_DEFINE_EXPLICIT_INTERNAL_INLINE_TYPE_ID` macro was added to allow for explicitly defining a TypeID directly on an internal class. To help identify when types are using the fallback, `-debug-only=typeid` can be used to log which types are using implicit ids. This change generally only requires changes to the test passes, which are all defined in anonymous namespaces, and thus can't use the fallback any longer. Differential Revision: https://reviews.llvm.org/D122775
67 lines
2.1 KiB
C++
67 lines
2.1 KiB
C++
//===- TestMemRefStrideCalculation.cpp - Pass to test strides computation--===//
|
|
//
|
|
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
|
// See https://llvm.org/LICENSE.txt for license information.
|
|
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
|
//
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
#include "mlir/Dialect/MemRef/IR/MemRef.h"
|
|
#include "mlir/IR/BuiltinTypes.h"
|
|
#include "mlir/Pass/Pass.h"
|
|
|
|
using namespace mlir;
|
|
|
|
namespace {
|
|
struct TestMemRefStrideCalculation
|
|
: public PassWrapper<TestMemRefStrideCalculation,
|
|
InterfacePass<SymbolOpInterface>> {
|
|
MLIR_DEFINE_EXPLICIT_INTERNAL_INLINE_TYPE_ID(TestMemRefStrideCalculation)
|
|
|
|
StringRef getArgument() const final {
|
|
return "test-memref-stride-calculation";
|
|
}
|
|
StringRef getDescription() const final {
|
|
return "Test operation constant folding";
|
|
}
|
|
void runOnOperation() override;
|
|
};
|
|
} // namespace
|
|
|
|
/// Traverse AllocOp and compute strides of each MemRefType independently.
|
|
void TestMemRefStrideCalculation::runOnOperation() {
|
|
llvm::outs() << "Testing: " << getOperation().getName() << "\n";
|
|
getOperation().walk([&](memref::AllocOp allocOp) {
|
|
auto memrefType = allocOp.getResult().getType().cast<MemRefType>();
|
|
int64_t offset;
|
|
SmallVector<int64_t, 4> strides;
|
|
if (failed(getStridesAndOffset(memrefType, strides, offset))) {
|
|
llvm::outs() << "MemRefType " << memrefType << " cannot be converted to "
|
|
<< "strided form\n";
|
|
return;
|
|
}
|
|
llvm::outs() << "MemRefType offset: ";
|
|
if (offset == MemRefType::getDynamicStrideOrOffset())
|
|
llvm::outs() << "?";
|
|
else
|
|
llvm::outs() << offset;
|
|
llvm::outs() << " strides: ";
|
|
llvm::interleaveComma(strides, llvm::outs(), [&](int64_t v) {
|
|
if (v == MemRefType::getDynamicStrideOrOffset())
|
|
llvm::outs() << "?";
|
|
else
|
|
llvm::outs() << v;
|
|
});
|
|
llvm::outs() << "\n";
|
|
});
|
|
llvm::outs().flush();
|
|
}
|
|
|
|
namespace mlir {
|
|
namespace test {
|
|
void registerTestMemRefStrideCalculation() {
|
|
PassRegistration<TestMemRefStrideCalculation>();
|
|
}
|
|
} // namespace test
|
|
} // namespace mlir
|