llvm/mlir/lib/Dialect/Bufferization/IR/BufferizationOps.cpp

//===----------------------------------------------------------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#include "mlir/Dialect/Arith/IR/Arith.h"
#include "mlir/Dialect/Bufferization/IR/BufferizableOpInterface.h"
#include "mlir/Dialect/Bufferization/IR/Bufferization.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/Dialect/MemRef/IR/MemRef.h"
#include "mlir/Dialect/SparseTensor/IR/SparseTensor.h"
#include "mlir/Dialect/Tensor/IR/Tensor.h"
#include "mlir/IR/Matchers.h"
#include <optional>

usingnamespacemlir;
usingnamespacemlir::bufferization;

//===----------------------------------------------------------------------===//
// Helper functions
//===----------------------------------------------------------------------===//

FailureOr<Value> mlir::bufferization::castOrReallocMemRefValue(
    OpBuilder &b, Value value, MemRefType destType,
    const BufferizationOptions &options) {}

/// Try to fold to_memref(to_tensor(x)). If x's type and the result type of the
/// to_memref op are different, a memref.cast is needed.
LogicalResult mlir::bufferization::foldToMemrefToTensorPair(
    RewriterBase &rewriter, ToMemrefOp toMemref,
    const BufferizationOptions &options) {}

void mlir::bufferization::populateDynamicDimSizes(
    OpBuilder &b, Location loc, Value shapedValue,
    SmallVector<Value> &dynamicDims) {}

//===----------------------------------------------------------------------===//
// AllocTensorOp
//===----------------------------------------------------------------------===//

LogicalResult AllocTensorOp::bufferize(RewriterBase &rewriter,
                                       const BufferizationOptions &options) {}

bool AllocTensorOp::resultBufferizesToMemoryWrite(OpResult opResult,
                                                  const AnalysisState &state) {}

bool AllocTensorOp::bufferizesToMemoryRead(OpOperand &opOperand,
                                           const AnalysisState &state) {}

bool AllocTensorOp::bufferizesToMemoryWrite(OpOperand &opOperand,
                                            const AnalysisState &state) {}

AliasingValueList AllocTensorOp::getAliasingValues(OpOperand &opOperand,
                                                   const AnalysisState &state) {}

FailureOr<BaseMemRefType>
AllocTensorOp::getBufferType(Value value, const BufferizationOptions &options,
                             SmallVector<Value> &invocationStack) {}

LogicalResult AllocTensorOp::verify() {}

void AllocTensorOp::build(OpBuilder &builder, OperationState &result,
                          RankedTensorType type, ValueRange dynamicSizes) {}

void AllocTensorOp::build(OpBuilder &builder, OperationState &result,
                          RankedTensorType type, ValueRange dynamicSizes,
                          Value copy) {}

void AllocTensorOp::build(OpBuilder &builder, OperationState &result,
                          TensorType type, ValueRange dynamicSizes, Value copy,
                          IntegerAttr memorySpace) {}

namespace {
/// Change the type of the result of a `bufferization.alloc_tensor` by making
/// the result type statically sized along dimension that in the original
/// operation where defined as dynamic, but the size was defined using a
/// `constant` op. For example:
///
///  %c5 = arith.constant 5: index
///  %0 = bufferization.alloc_tensor(%arg0, %c5) : tensor<?x?xf32>
///
///  to
///
///  %0 = bufferization.alloc_tensor(%arg0) : tensor<?x5xf32>
struct ReplaceStaticShapeDims : OpRewritePattern<AllocTensorOp> {};

struct FoldDimOfAllocTensorOp : public OpRewritePattern<tensor::DimOp> {};
} // namespace

void AllocTensorOp::getCanonicalizationPatterns(RewritePatternSet &results,
                                                MLIRContext *ctx) {}

LogicalResult AllocTensorOp::reifyResultShapes(
    OpBuilder &builder, ReifiedRankedShapedTypeDims &reifiedReturnShapes) {}

ParseResult AllocTensorOp::parse(OpAsmParser &parser, OperationState &result) {}

void AllocTensorOp::print(OpAsmPrinter &p) {}

Value AllocTensorOp::getDynamicSize(OpBuilder &b, unsigned idx) {}

//===----------------------------------------------------------------------===//
// CloneOp
//===----------------------------------------------------------------------===//

OpFoldResult CloneOp::fold(FoldAdaptor adaptor) {}

namespace {

/// Merge the clone and its source (by converting the clone to a cast) when
/// possible.
struct SimplifyClones : public OpRewritePattern<CloneOp> {};

} // namespace

void CloneOp::getCanonicalizationPatterns(RewritePatternSet &results,
                                          MLIRContext *context) {}

//===----------------------------------------------------------------------===//
// DeallocTensorOp
//===----------------------------------------------------------------------===//

LogicalResult DeallocTensorOp::bufferize(RewriterBase &rewriter,
                                         const BufferizationOptions &options) {}

//===----------------------------------------------------------------------===//
// MaterializeInDestinationOp
//===----------------------------------------------------------------------===//

bool MaterializeInDestinationOp::bufferizesToMemoryRead(
    OpOperand &opOperand, const AnalysisState &state) {}

bool MaterializeInDestinationOp::bufferizesToMemoryWrite(
    OpOperand &opOperand, const AnalysisState &state) {}

bool MaterializeInDestinationOp::mustBufferizeInPlace(
    OpOperand &opOperand, const AnalysisState &state) {}

AliasingValueList
MaterializeInDestinationOp::getAliasingValues(OpOperand &opOperand,
                                              const AnalysisState &state) {}

LogicalResult
MaterializeInDestinationOp::bufferize(RewriterBase &rewriter,
                                      const BufferizationOptions &options) {}

bool MaterializeInDestinationOp::bufferizesToElementwiseAccess(
    const AnalysisState &state, ArrayRef<OpOperand *> opOperands) {}

LogicalResult MaterializeInDestinationOp::reifyResultShapes(
    OpBuilder &builder, ReifiedRankedShapedTypeDims &reifiedReturnShapes) {}

Value MaterializeInDestinationOp::buildSubsetExtraction(OpBuilder &builder,
                                                        Location loc) {}

bool MaterializeInDestinationOp::isEquivalentSubset(
    Value candidate, function_ref<bool(Value, Value)> equivalenceFn) {}

SmallVector<Value>
MaterializeInDestinationOp::getValuesNeededToBuildSubsetExtraction() {}

OpOperand &MaterializeInDestinationOp::getSourceOperand() {}

bool MaterializeInDestinationOp::operatesOnEquivalentSubset(
    SubsetOpInterface subsetOp,
    function_ref<bool(Value, Value)> equivalenceFn) {}

bool MaterializeInDestinationOp::operatesOnDisjointSubset(
    SubsetOpInterface subsetOp,
    function_ref<bool(Value, Value)> equivalenceFn) {}

LogicalResult MaterializeInDestinationOp::verify() {}

void MaterializeInDestinationOp::build(OpBuilder &builder,
                                       OperationState &state, Value source,
                                       Value dest) {}

bool MaterializeInDestinationOp::isWritable(Value value,
                                            const AnalysisState &state) {}

MutableOperandRange MaterializeInDestinationOp::getDpsInitsMutable() {}

void MaterializeInDestinationOp::getEffects(
    SmallVectorImpl<SideEffects::EffectInstance<MemoryEffects::Effect>>
        &effects) {}

//===----------------------------------------------------------------------===//
// ToTensorOp
//===----------------------------------------------------------------------===//

bool ToTensorOp::isWritable(Value value, const AnalysisState &state) {}

OpFoldResult ToTensorOp::fold(FoldAdaptor) {}

namespace {
struct DimOfToTensorFolder : public OpRewritePattern<tensor::DimOp> {};
} // namespace

void ToTensorOp::getCanonicalizationPatterns(RewritePatternSet &results,
                                             MLIRContext *context) {}

//===----------------------------------------------------------------------===//
// ToMemrefOp
//===----------------------------------------------------------------------===//

OpFoldResult ToMemrefOp::fold(FoldAdaptor) {}

namespace {

/// Replace tensor.cast + to_memref by to_memref + memref.cast.
struct ToMemrefOfCast : public OpRewritePattern<ToMemrefOp> {};

/// Canonicalize bufferization.to_tensor + bufferization.to_memref. Insert a
/// cast if necessary.
struct ToMemrefToTensorFolding : public OpRewritePattern<ToMemrefOp> {};

/// Fold a load on a to_memref operation into an tensor.extract on the
/// corresponding tensor.
struct LoadOfToMemref : public OpRewritePattern<memref::LoadOp> {};

/// Fold dim of a to_memref into the dim of the tensor.
struct DimOfCastOp : public OpRewritePattern<memref::DimOp> {};

} // namespace

void ToMemrefOp::getCanonicalizationPatterns(RewritePatternSet &results,
                                             MLIRContext *context) {}

LogicalResult ToMemrefOp::bufferize(RewriterBase &rewriter,
                                    const BufferizationOptions &options) {}

std::optional<Operation *> CloneOp::buildDealloc(OpBuilder &builder,
                                                 Value alloc) {}

std::optional<Value> CloneOp::buildClone(OpBuilder &builder, Value alloc) {}

//===----------------------------------------------------------------------===//
// DeallocOp
//===----------------------------------------------------------------------===//

LogicalResult DeallocOp::inferReturnTypes(
    MLIRContext *context, std::optional<::mlir::Location> location,
    ValueRange operands, DictionaryAttr attributes, OpaqueProperties properties,
    RegionRange regions, SmallVectorImpl<Type> &inferredReturnTypes) {}

LogicalResult DeallocOp::verify() {}

static LogicalResult updateDeallocIfChanged(DeallocOp deallocOp,
                                            ValueRange memrefs,
                                            ValueRange conditions,
                                            PatternRewriter &rewriter) {}

namespace {

/// Remove duplicate values in the list of memrefs to be deallocated. We need to
/// make sure the corresponding condition value is updated accordingly since
/// their two conditions might not cover the same set of cases. In that case, we
/// have to combine them (by computing the disjunction of them).
/// Example:
/// ```mlir
/// bufferization.dealloc (%arg0, %arg0 : ...) if (%arg1, %arg2)
/// ```
/// is canonicalized to
/// ```mlir
/// %0 = arith.ori %arg1, %arg2 : i1
/// bufferization.dealloc (%arg0 : memref<2xi32>) if (%0)
/// ```
struct DeallocRemoveDuplicateDeallocMemrefs
    : public OpRewritePattern<DeallocOp> {};

/// Remove duplicate values in the list of retained memrefs. We need to make
/// sure the corresponding result condition value is replaced properly.
/// Example:
/// ```mlir
/// %0:2 = bufferization.dealloc retain (%arg3, %arg3 : ...)
/// ```
/// is canonicalized to
/// ```mlir
/// %0 = bufferization.dealloc retain (%arg3 : memref<2xi32>)
/// ```
struct DeallocRemoveDuplicateRetainedMemrefs
    : public OpRewritePattern<DeallocOp> {};

/// Erase deallocation operations where the variadic list of memrefs to
/// deallocate is empty. Example:
/// ```mlir
/// %0 = bufferization.dealloc retain (%arg0: memref<2xi32>)
/// ```
struct EraseEmptyDealloc : public OpRewritePattern<DeallocOp> {};

/// Removes memrefs from the deallocation list if their associated condition is
/// always 'false'.
///
/// Example:
/// ```
/// bufferization.dealloc (%arg0, %arg1 : memref<2xi32>, memref<2xi32>)
///                           if (%arg2, %false)
/// ```
/// becomes
/// ```
/// bufferization.dealloc (%arg0 : memref<2xi32>) if (%arg2)
/// ```
struct EraseAlwaysFalseDealloc : public OpRewritePattern<DeallocOp> {};

/// The `memref.extract_strided_metadata` is often inserted to get the base
/// memref if the operand is not already guaranteed to be the result of a memref
/// allocation operation. This canonicalization pattern removes this extraction
/// operation if the operand is now produced by an allocation operation (e.g.,
/// due to other canonicalizations simplifying the IR).
///
/// Example:
/// ```mlir
/// %alloc = memref.alloc() : memref<2xi32>
/// %base_memref, %offset, %size, %stride = memref.extract_strided_metadata
///   %alloc : memref<2xi32> -> memref<i32>, index, index, index
/// bufferization.dealloc (%base_memref : memref<i32>) if (%cond)
/// ```
/// is canonicalized to
/// ```mlir
/// %alloc = memref.alloc() : memref<2xi32>
/// bufferization.dealloc (%alloc : memref<2xi32>) if (%cond)
/// ```
struct SkipExtractMetadataOfAlloc : public OpRewritePattern<DeallocOp> {};

/// Removes pairs of `bufferization.dealloc` and alloc operations if there is no
/// other user of the allocated value and the allocating operation can be safely
/// removed. If the same value is present multiple times, this pattern relies on
/// other canonicalization patterns to remove the duplicate first.
///
/// Example:
/// ```mlir
/// %alloc = memref.alloc() : memref<2xi32>
/// bufferization.dealloc (%alloc, %arg0, : ...) if (%true, %true)
/// ```
/// is canonicalized to
/// ```mlir
/// bufferization.dealloc (%arg0 : ...) if (%true)
/// ```
struct RemoveAllocDeallocPairWhenNoOtherUsers
    : public OpRewritePattern<DeallocOp> {};

} // anonymous namespace

void DeallocOp::getCanonicalizationPatterns(RewritePatternSet &results,
                                            MLIRContext *context) {}

void bufferization::populateDeallocOpCanonicalizationPatterns(
    RewritePatternSet &patterns, MLIRContext *context) {}

//===----------------------------------------------------------------------===//
// TableGen'd op method definitions
//===----------------------------------------------------------------------===//

#define GET_OP_CLASSES
#include "mlir/Dialect/Bufferization/IR/BufferizationOps.cpp.inc"