#include "mlir/Dialect/Affine/Analysis/LoopAnalysis.h"
#include "mlir/Analysis/SliceAnalysis.h"
#include "mlir/Dialect/Affine/Analysis/AffineAnalysis.h"
#include "mlir/Dialect/Affine/Analysis/AffineStructures.h"
#include "mlir/Dialect/Affine/Analysis/NestedMatcher.h"
#include "mlir/Dialect/Affine/IR/AffineOps.h"
#include "mlir/Dialect/Affine/IR/AffineValueMap.h"
#include "llvm/Support/MathExtras.h"
#include "llvm/ADT/DenseSet.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/Support/Debug.h"
#include <numeric>
#include <optional>
#include <type_traits>
usingnamespacemlir;
usingnamespacemlir::affine;
#define DEBUG_TYPE …
void mlir::affine::getTripCountMapAndOperands(
AffineForOp forOp, AffineMap *tripCountMap,
SmallVectorImpl<Value> *tripCountOperands) { … }
std::optional<uint64_t> mlir::affine::getConstantTripCount(AffineForOp forOp) { … }
uint64_t mlir::affine::getLargestDivisorOfTripCount(AffineForOp forOp) { … }
static bool isAccessIndexInvariant(Value iv, Value index) { … }
template <typename LoadOrStoreOp>
bool mlir::affine::isInvariantAccess(LoadOrStoreOp memOp, AffineForOp forOp) { … }
template bool mlir::affine::isInvariantAccess(AffineReadOpInterface,
AffineForOp);
template bool mlir::affine::isInvariantAccess(AffineWriteOpInterface,
AffineForOp);
template bool mlir::affine::isInvariantAccess(AffineLoadOp, AffineForOp);
template bool mlir::affine::isInvariantAccess(AffineStoreOp, AffineForOp);
DenseSet<Value> mlir::affine::getInvariantAccesses(Value iv,
ArrayRef<Value> indices) { … }
template <typename LoadOrStoreOp>
bool mlir::affine::isContiguousAccess(Value iv, LoadOrStoreOp memoryOp,
int *memRefDim) { … }
template bool mlir::affine::isContiguousAccess(Value iv,
AffineReadOpInterface loadOp,
int *memRefDim);
template bool mlir::affine::isContiguousAccess(Value iv,
AffineWriteOpInterface loadOp,
int *memRefDim);
template <typename LoadOrStoreOp>
static bool isVectorElement(LoadOrStoreOp memoryOp) { … }
VectorizableOpFun;
static bool
isVectorizableLoopBodyWithOpCond(AffineForOp loop,
const VectorizableOpFun &isVectorizableOp,
NestedPattern &vectorTransferMatcher) { … }
bool mlir::affine::isVectorizableLoopBody(
AffineForOp loop, int *memRefDim, NestedPattern &vectorTransferMatcher) { … }
bool mlir::affine::isVectorizableLoopBody(
AffineForOp loop, NestedPattern &vectorTransferMatcher) { … }
bool mlir::affine::isOpwiseShiftValid(AffineForOp forOp,
ArrayRef<uint64_t> shifts) { … }
bool mlir::affine::isTilingValid(ArrayRef<AffineForOp> loops) { … }