This PR adds the UB to LLVM/SPIR-V conversion pass to some pipelines and tests. This is in preparation to introducing the generation of `ub.poison` in Vector dialect transformations (first one in https://github.com/llvm/llvm-project/pull/125613). It should effectively be NFC at this point.
121 lines
5.3 KiB
C++
121 lines
5.3 KiB
C++
//===- SparseTensorPipelines.cpp - Pipelines for sparse tensor code -------===//
|
|
//
|
|
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
|
// See https://llvm.org/LICENSE.txt for license information.
|
|
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
|
//
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
#include "mlir/Conversion/Passes.h"
|
|
#include "mlir/Dialect/Arith/Transforms/Passes.h"
|
|
#include "mlir/Dialect/Bufferization/Transforms/Passes.h"
|
|
#include "mlir/Dialect/Func/IR/FuncOps.h"
|
|
#include "mlir/Dialect/GPU/IR/GPUDialect.h"
|
|
#include "mlir/Dialect/GPU/Transforms/Passes.h"
|
|
#include "mlir/Dialect/Linalg/Passes.h"
|
|
#include "mlir/Dialect/MemRef/Transforms/Passes.h"
|
|
#include "mlir/Dialect/SparseTensor/Pipelines/Passes.h"
|
|
#include "mlir/Dialect/SparseTensor/Transforms/Passes.h"
|
|
#include "mlir/Pass/PassManager.h"
|
|
#include "mlir/Transforms/Passes.h"
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
// Pipeline implementation.
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
void mlir::sparse_tensor::buildSparsifier(OpPassManager &pm,
|
|
const SparsifierOptions &options) {
|
|
// Rewrite named linalg ops into generic ops and apply fusion.
|
|
pm.addNestedPass<func::FuncOp>(createLinalgGeneralizeNamedOpsPass());
|
|
pm.addNestedPass<func::FuncOp>(createLinalgElementwiseOpFusionPass());
|
|
|
|
// Sparsification and bufferization mini-pipeline.
|
|
pm.addPass(createSparsificationAndBufferizationPass(
|
|
getBufferizationOptionsForSparsification(
|
|
options.testBufferizationAnalysisOnly),
|
|
options.sparsificationOptions(), options.createSparseDeallocs,
|
|
options.enableRuntimeLibrary, options.enableBufferInitialization,
|
|
options.vectorLength,
|
|
/*enableVLAVectorization=*/options.armSVE,
|
|
/*enableSIMDIndex32=*/options.force32BitVectorIndices,
|
|
options.enableGPULibgen,
|
|
options.sparsificationOptions().sparseEmitStrategy,
|
|
options.sparsificationOptions().parallelizationStrategy));
|
|
|
|
// Bail-early for test setup.
|
|
if (options.testBufferizationAnalysisOnly)
|
|
return;
|
|
|
|
// Storage specifier lowering and bufferization wrap-up.
|
|
pm.addPass(createStorageSpecifierToLLVMPass());
|
|
pm.addNestedPass<func::FuncOp>(createCanonicalizerPass());
|
|
|
|
// GPU code generation.
|
|
const bool gpuCodegen = options.gpuTriple.hasValue();
|
|
if (gpuCodegen) {
|
|
pm.addPass(createSparseGPUCodegenPass());
|
|
pm.addNestedPass<gpu::GPUModuleOp>(createStripDebugInfoPass());
|
|
pm.addNestedPass<gpu::GPUModuleOp>(createConvertSCFToCFPass());
|
|
pm.addNestedPass<gpu::GPUModuleOp>(createConvertGpuOpsToNVVMOps());
|
|
}
|
|
|
|
// Progressively lower to LLVM. Note that the convert-vector-to-llvm
|
|
// pass is repeated on purpose.
|
|
// TODO(springerm): Add sparse support to the BufferDeallocation pass and add
|
|
// it to this pipeline.
|
|
pm.addNestedPass<func::FuncOp>(createConvertLinalgToLoopsPass());
|
|
pm.addNestedPass<func::FuncOp>(createConvertVectorToSCFPass());
|
|
pm.addNestedPass<func::FuncOp>(memref::createExpandReallocPass());
|
|
pm.addNestedPass<func::FuncOp>(createConvertSCFToCFPass());
|
|
pm.addPass(memref::createExpandStridedMetadataPass());
|
|
pm.addPass(createLowerAffinePass());
|
|
pm.addPass(
|
|
createConvertVectorToLLVMPass(options.convertVectorToLLVMOptions()));
|
|
pm.addPass(createFinalizeMemRefToLLVMConversionPass());
|
|
pm.addNestedPass<func::FuncOp>(createConvertComplexToStandardPass());
|
|
pm.addNestedPass<func::FuncOp>(arith::createArithExpandOpsPass());
|
|
pm.addNestedPass<func::FuncOp>(createConvertMathToLLVMPass());
|
|
pm.addPass(createConvertMathToLibmPass());
|
|
pm.addPass(createConvertComplexToLibmPass());
|
|
pm.addPass(
|
|
createConvertVectorToLLVMPass(options.convertVectorToLLVMOptions()));
|
|
pm.addPass(createConvertComplexToLLVMPass());
|
|
pm.addPass(
|
|
createConvertVectorToLLVMPass(options.convertVectorToLLVMOptions()));
|
|
pm.addPass(createConvertFuncToLLVMPass());
|
|
pm.addPass(createArithToLLVMConversionPass());
|
|
pm.addPass(createConvertControlFlowToLLVMPass());
|
|
|
|
// Finalize GPU code generation.
|
|
if (gpuCodegen) {
|
|
GpuNVVMAttachTargetOptions nvvmTargetOptions;
|
|
nvvmTargetOptions.triple = options.gpuTriple;
|
|
nvvmTargetOptions.chip = options.gpuChip;
|
|
nvvmTargetOptions.features = options.gpuFeatures;
|
|
pm.addPass(createGpuNVVMAttachTarget(nvvmTargetOptions));
|
|
pm.addPass(createGpuToLLVMConversionPass());
|
|
GpuModuleToBinaryPassOptions gpuModuleToBinaryPassOptions;
|
|
gpuModuleToBinaryPassOptions.compilationTarget = options.gpuFormat;
|
|
pm.addPass(createGpuModuleToBinaryPass(gpuModuleToBinaryPassOptions));
|
|
}
|
|
|
|
// Convert poison values.
|
|
pm.addPass(createUBToLLVMConversionPass());
|
|
|
|
// Ensure all casts are realized.
|
|
pm.addPass(createReconcileUnrealizedCastsPass());
|
|
}
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
// Pipeline registration.
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
void mlir::sparse_tensor::registerSparseTensorPipelines() {
|
|
PassPipelineRegistration<SparsifierOptions>(
|
|
"sparsifier",
|
|
"The standard pipeline for taking sparsity-agnostic IR using the"
|
|
" sparse-tensor type, and lowering it to LLVM IR with concrete"
|
|
" representations and algorithms for sparse tensors.",
|
|
buildSparsifier);
|
|
}
|