Files
clang-p2996/mlir/test/lib/Dialect/GPU/TestGpuRewrite.cpp
Jacques Pienaar 09dfc5713d [mlir] Enable decoupling two kinds of greedy behavior. (#104649)
The greedy rewriter is used in many different flows and it has a lot of
convenience (work list management, debugging actions, tracing, etc). But
it combines two kinds of greedy behavior 1) how ops are matched, 2)
folding wherever it can.

These are independent forms of greedy and leads to inefficiency. E.g.,
cases where one need to create different phases in lowering and is
required to applying patterns in specific order split across different
passes. Using the driver one ends up needlessly retrying folding/having
multiple rounds of folding attempts, where one final run would have
sufficed.

Of course folks can locally avoid this behavior by just building their
own, but this is also a common requested feature that folks keep on
working around locally in suboptimal ways.

For downstream users, there should be no behavioral change. Updating
from the deprecated should just be a find and replace (e.g., `find ./
-type f -exec sed -i
's|applyPatternsAndFoldGreedily|applyPatternsGreedily|g' {} \;` variety)
as the API arguments hasn't changed between the two.
2024-12-20 08:15:48 -08:00

99 lines
3.5 KiB
C++

//===- TestAllReduceLowering.cpp - Test gpu.all_reduce lowering -----------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file contains test passes for lowering the gpu.all_reduce op.
//
//===----------------------------------------------------------------------===//
#include "mlir/Dialect/Arith/IR/Arith.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/Dialect/GPU/Transforms/Passes.h"
#include "mlir/Dialect/Index/IR/IndexDialect.h"
#include "mlir/Dialect/MemRef/IR/MemRef.h"
#include "mlir/Dialect/Vector/IR/VectorOps.h"
#include "mlir/IR/PatternMatch.h"
#include "mlir/Pass/Pass.h"
#include "mlir/Transforms/GreedyPatternRewriteDriver.h"
using namespace mlir;
namespace {
struct TestGpuRewritePass
: public PassWrapper<TestGpuRewritePass, OperationPass<ModuleOp>> {
MLIR_DEFINE_EXPLICIT_INTERNAL_INLINE_TYPE_ID(TestGpuRewritePass)
void getDependentDialects(DialectRegistry &registry) const override {
registry.insert<arith::ArithDialect, func::FuncDialect, index::IndexDialect,
memref::MemRefDialect>();
}
StringRef getArgument() const final { return "test-gpu-rewrite"; }
StringRef getDescription() const final {
return "Applies all rewrite patterns within the GPU dialect.";
}
void runOnOperation() override {
RewritePatternSet patterns(&getContext());
populateGpuRewritePatterns(patterns);
(void)applyPatternsGreedily(getOperation(), std::move(patterns));
}
};
struct TestGpuSubgroupReduceLoweringPass
: public PassWrapper<TestGpuSubgroupReduceLoweringPass,
OperationPass<ModuleOp>> {
MLIR_DEFINE_EXPLICIT_INTERNAL_INLINE_TYPE_ID(
TestGpuSubgroupReduceLoweringPass)
TestGpuSubgroupReduceLoweringPass() = default;
TestGpuSubgroupReduceLoweringPass(
const TestGpuSubgroupReduceLoweringPass &pass)
: PassWrapper(pass) {}
void getDependentDialects(DialectRegistry &registry) const override {
registry.insert<arith::ArithDialect, vector::VectorDialect>();
}
StringRef getArgument() const final {
return "test-gpu-subgroup-reduce-lowering";
}
StringRef getDescription() const final {
return "Applies gpu.subgroup_reduce lowering patterns.";
}
Option<bool> expandToShuffles{
*this, "expand-to-shuffles",
llvm::cl::desc("Expand subgroup_reduce ops to shuffle ops."),
llvm::cl::init(false)};
void runOnOperation() override {
RewritePatternSet patterns(&getContext());
// Since both pattern sets match on the same ops, set higher benefit to
// perform fewer failing matches.
populateGpuBreakDownSubgroupReducePatterns(patterns,
/*maxShuffleBitwidth=*/32,
PatternBenefit(2));
if (expandToShuffles) {
populateGpuLowerSubgroupReduceToShufflePatterns(
patterns, /*subgroupSize=*/32, /*shuffleBitwidth=*/32);
populateGpuLowerClusteredSubgroupReduceToShufflePatterns(
patterns, /*subgroupSize=*/32, /*shuffleBitwidth=*/32);
}
(void)applyPatternsGreedily(getOperation(), std::move(patterns));
}
};
} // namespace
namespace mlir {
void registerTestGpuLoweringPasses() {
PassRegistration<TestGpuRewritePass>();
PassRegistration<TestGpuSubgroupReduceLoweringPass>();
}
} // namespace mlir