llvm/mlir/lib/Conversion/GPUCommon/GPUToLLVMConversion.cpp

//===- ConvertLaunchFuncToGpuRuntimeCalls.cpp - MLIR GPU lowering passes --===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file implements a pass to convert gpu.launch_func op into a sequence of
// GPU runtime calls. As most of GPU runtimes does not have a stable published
// ABI, this pass uses a slim runtime layer that builds on top of the public
// API from GPU runtime headers.
//
//===----------------------------------------------------------------------===//

#include "mlir/Conversion/GPUCommon/GPUCommonPass.h"

#include "mlir/Conversion/ArithToLLVM/ArithToLLVM.h"
#include "mlir/Conversion/AsyncToLLVM/AsyncToLLVM.h"
#include "mlir/Conversion/ControlFlowToLLVM/ControlFlowToLLVM.h"
#include "mlir/Conversion/ConvertToLLVM/ToLLVMInterface.h"
#include "mlir/Conversion/ConvertToLLVM/ToLLVMPass.h"
#include "mlir/Conversion/FuncToLLVM/ConvertFuncToLLVM.h"
#include "mlir/Conversion/FuncToLLVM/ConvertFuncToLLVMPass.h"
#include "mlir/Conversion/LLVMCommon/ConversionTarget.h"
#include "mlir/Conversion/LLVMCommon/Pattern.h"
#include "mlir/Conversion/MemRefToLLVM/MemRefToLLVM.h"
#include "mlir/Conversion/VectorToLLVM/ConvertVectorToLLVM.h"
#include "mlir/Dialect/Async/IR/Async.h"
#include "mlir/Dialect/GPU/IR/GPUDialect.h"
#include "mlir/Dialect/GPU/Transforms/Passes.h"
#include "mlir/Dialect/LLVMIR/LLVMDialect.h"
#include "mlir/Dialect/MemRef/IR/MemRef.h"
#include "mlir/IR/Attributes.h"
#include "mlir/IR/Builders.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/IR/BuiltinTypes.h"

#include "llvm/ADT/STLExtras.h"
#include "llvm/Support/Error.h"
#include "llvm/Support/FormatVariadic.h"

#define DEBUG_TYPE

namespace mlir {
#define GEN_PASS_DEF_GPUTOLLVMCONVERSIONPASS
#include "mlir/Conversion/Passes.h.inc"
} // namespace mlir

usingnamespacemlir;

namespace // namespace

void GpuToLLVMConversionPass::runOnOperation() {}

LLVM::CallOp FunctionCallBuilder::create(Location loc, OpBuilder &builder,
                                         ArrayRef<Value> arguments) const {}

// Corresponding to cusparseIndexType_t defined in cusparse.h.
static int32_t getCuSparseIndexTypeFrom(Type type) {}

static int32_t getCuSparseLtDataTypeFrom(Type type) {}

// Corresponding to cudaDataType_t defined in CUDA library_types.h.
static int32_t getCuSparseDataTypeFrom(Type type) {}

static gpu::Prune2To4SpMatFlag get2To4PruneFlag(Value spMat) {}

// TODO:  We may want a run-time (of the mlir compiler) disablement/warning:
// cusparseLt currently won't work for cuda architecture <8.0 and will trigger a
// runtime (of the CUDA program) error , but it might be great if we could at
// least output a warning when we found the target architecture is <8.0 and the
// user still wants to use cusparseLt. to make sure when lowering gpu sparse
// dialect to llvm calls, the cusparselt calls are disabled for cuda
// architecture <8.0
static bool is2To4Sparsity(Value spMat) {}

static bool isSpMMCusparseLtOp(Value op) {}

// Returns whether all operands are of LLVM type.
static LogicalResult areAllLLVMTypes(Operation *op, ValueRange operands,
                                     ConversionPatternRewriter &rewriter) {}

static LogicalResult
isAsyncWithOneDependency(ConversionPatternRewriter &rewriter,
                         gpu::AsyncOpInterface op) {}

LogicalResult ConvertHostRegisterOpToGpuRuntimeCallPattern::matchAndRewrite(
    gpu::HostRegisterOp hostRegisterOp, OpAdaptor adaptor,
    ConversionPatternRewriter &rewriter) const {}

LogicalResult ConvertHostUnregisterOpToGpuRuntimeCallPattern::matchAndRewrite(
    gpu::HostUnregisterOp hostUnregisterOp, OpAdaptor adaptor,
    ConversionPatternRewriter &rewriter) const {}

LogicalResult ConvertAllocOpToGpuRuntimeCallPattern::matchAndRewrite(
    gpu::AllocOp allocOp, OpAdaptor adaptor,
    ConversionPatternRewriter &rewriter) const {}

LogicalResult ConvertDeallocOpToGpuRuntimeCallPattern::matchAndRewrite(
    gpu::DeallocOp deallocOp, OpAdaptor adaptor,
    ConversionPatternRewriter &rewriter) const {}

static bool isGpuAsyncTokenType(Value value) {}

// Converts !gpu.async.token operands of `async.yield` to runtime calls. The
// !gpu.async.token are lowered to stream within the async.execute region, but
// are passed as events between them. For each !gpu.async.token operand, we
// create an event and record it on the stream.
LogicalResult ConvertAsyncYieldToGpuRuntimeCallPattern::matchAndRewrite(
    async::YieldOp yieldOp, OpAdaptor adaptor,
    ConversionPatternRewriter &rewriter) const {}

// Returns whether `value` is the result of an LLVM::CallOp to `functionName`.
static bool isDefinedByCallTo(Value value, StringRef functionName) {}

// Converts `gpu.wait` to runtime calls. The converted op synchronizes the host
// with the stream/event operands. The operands are destroyed. That is, it
// assumes that it is not used afterwards or elsewhere. Otherwise we will get a
// runtime error. Eventually, we should guarantee this property.
LogicalResult ConvertWaitOpToGpuRuntimeCallPattern::matchAndRewrite(
    gpu::WaitOp waitOp, OpAdaptor adaptor,
    ConversionPatternRewriter &rewriter) const {}

// Converts `gpu.wait async` to runtime calls. The converted op creates a new
// stream that is synchronized with stream/event operands. The operands are
// destroyed. That is, it assumes that it is not used afterwards or elsewhere.
// Otherwise we will get a runtime error. Eventually, we should guarantee this
// property.
LogicalResult ConvertWaitAsyncOpToGpuRuntimeCallPattern::matchAndRewrite(
    gpu::WaitOp waitOp, OpAdaptor adaptor,
    ConversionPatternRewriter &rewriter) const {}

// Legalize the op's operands.
LogicalResult LegalizeLaunchFuncOpPattern::matchAndRewrite(
    gpu::LaunchFuncOp launchOp, OpAdaptor adaptor,
    ConversionPatternRewriter &rewriter) const {}

static Value bitAndAddrspaceCast(Location loc,
                                 ConversionPatternRewriter &rewriter,
                                 LLVM::LLVMPointerType destinationType,
                                 Value sourcePtr,
                                 const LLVMTypeConverter &typeConverter) {}

LogicalResult ConvertMemcpyOpToGpuRuntimeCallPattern::matchAndRewrite(
    gpu::MemcpyOp memcpyOp, OpAdaptor adaptor,
    ConversionPatternRewriter &rewriter) const {}

LogicalResult ConvertMemsetOpToGpuRuntimeCallPattern::matchAndRewrite(
    gpu::MemsetOp memsetOp, OpAdaptor adaptor,
    ConversionPatternRewriter &rewriter) const {}

LogicalResult ConvertSetDefaultDeviceOpToGpuRuntimeCallPattern::matchAndRewrite(
    gpu::SetDefaultDeviceOp op, OpAdaptor adaptor,
    ConversionPatternRewriter &rewriter) const {}

template <typename T>
static Value genConstInt32From(OpBuilder &builder, Location loc, T tValue) {}

template <typename T>
static Value genConstFloat32From(OpBuilder &builder, Location loc, T tValue) {}

LogicalResult ConvertCreateDnTensorOpToGpuRuntimeCallPattern::matchAndRewrite(
    gpu::CreateDnTensorOp op, OpAdaptor adaptor,
    ConversionPatternRewriter &rewriter) const {}

LogicalResult ConvertDestroyDnTensorOpToGpuRuntimeCallPattern::matchAndRewrite(
    gpu::DestroyDnTensorOp op, OpAdaptor adaptor,
    ConversionPatternRewriter &rewriter) const {}

LogicalResult ConvertCreateCooOpToGpuRuntimeCallPattern::matchAndRewrite(
    gpu::CreateCooOp op, OpAdaptor adaptor,
    ConversionPatternRewriter &rewriter) const {}

LogicalResult ConvertCreateCooAoSOpToGpuRuntimeCallPattern::matchAndRewrite(
    gpu::CreateCooAoSOp op, OpAdaptor adaptor,
    ConversionPatternRewriter &rewriter) const {}

LogicalResult ConvertCreateCsrOpToGpuRuntimeCallPattern::matchAndRewrite(
    gpu::CreateCsrOp op, OpAdaptor adaptor,
    ConversionPatternRewriter &rewriter) const {}

LogicalResult ConvertCreate2To4SpMatOpToGpuRuntimeCallPattern::matchAndRewrite(
    gpu::Create2To4SpMatOp op, OpAdaptor adaptor,
    ConversionPatternRewriter &rewriter) const {}

LogicalResult ConvertDestroySpMatOpToGpuRuntimeCallPattern::matchAndRewrite(
    gpu::DestroySpMatOp op, OpAdaptor adaptor,
    ConversionPatternRewriter &rewriter) const {}

LogicalResult ConvertSpMVBufferSizeOpToGpuRuntimeCallPattern::matchAndRewrite(
    gpu::SpMVBufferSizeOp op, OpAdaptor adaptor,
    ConversionPatternRewriter &rewriter) const {}

LogicalResult ConvertSpMVOpToGpuRuntimeCallPattern::matchAndRewrite(
    gpu::SpMVOp op, OpAdaptor adaptor,
    ConversionPatternRewriter &rewriter) const {}

LogicalResult ConvertSpMMBufferSizeOpToGpuRuntimeCallPattern::matchAndRewrite(
    gpu::SpMMBufferSizeOp op, OpAdaptor adaptor,
    ConversionPatternRewriter &rewriter) const {}

LogicalResult ConvertSDDMMBufferSizeOpToGpuRuntimeCallPattern::matchAndRewrite(
    gpu::SDDMMBufferSizeOp op, OpAdaptor adaptor,
    ConversionPatternRewriter &rewriter) const {}

LogicalResult ConvertSpMMOpToGpuRuntimeCallPattern::matchAndRewrite(
    gpu::SpMMOp op, OpAdaptor adaptor,
    ConversionPatternRewriter &rewriter) const {}

template <typename T>
static void addOpaquePointerConversion(LLVMTypeConverter &converter) {}

LogicalResult ConvertSDDMMOpToGpuRuntimeCallPattern::matchAndRewrite(
    gpu::SDDMMOp op, OpAdaptor adaptor,
    ConversionPatternRewriter &rewriter) const {}

LogicalResult
ConvertSpGEMMCreateDescrOpToGpuRuntimeCallPattern::matchAndRewrite(
    gpu::SpGEMMCreateDescrOp op, OpAdaptor adaptor,
    ConversionPatternRewriter &rewriter) const {}

LogicalResult
ConvertSpGEMMDestroyDescrOpToGpuRuntimeCallPattern::matchAndRewrite(
    gpu::SpGEMMDestroyDescrOp op, OpAdaptor adaptor,
    ConversionPatternRewriter &rewriter) const {}

LogicalResult
ConvertSpGEMMWorkEstimationOrComputeOpToGpuRuntimeCallPattern::matchAndRewrite(
    gpu::SpGEMMWorkEstimationOrComputeOp op, OpAdaptor adaptor,
    ConversionPatternRewriter &rewriter) const {}

LogicalResult ConvertSpGEMMCopyOpToGpuRuntimeCallPattern::matchAndRewrite(
    gpu::SpGEMMCopyOp op, OpAdaptor adaptor,
    ConversionPatternRewriter &rewriter) const {}

LogicalResult ConvertSpMatGetSizeOpToGpuRuntimeCallPattern::matchAndRewrite(
    gpu::SpMatGetSizeOp op, OpAdaptor adaptor,
    ConversionPatternRewriter &rewriter) const {}

LogicalResult ConvertSetCsrPointersOpToGpuRuntimeCallPattern::matchAndRewrite(
    gpu::SetCsrPointersOp op, OpAdaptor adaptor,
    ConversionPatternRewriter &rewriter) const {}

LogicalResult ConvertCreateCscOpToGpuRuntimeCallPattern::matchAndRewrite(
    gpu::CreateCscOp op, OpAdaptor adaptor,
    ConversionPatternRewriter &rewriter) const {}

LogicalResult ConvertCreateBsrOpToGpuRuntimeCallPattern::matchAndRewrite(
    gpu::CreateBsrOp op, OpAdaptor adaptor,
    ConversionPatternRewriter &rewriter) const {}

void mlir::populateGpuToLLVMConversionPatterns(LLVMTypeConverter &converter,
                                               RewritePatternSet &patterns,
                                               bool kernelBarePtrCallConv) {}