//===- MatmulOptimizer.cpp -----------------------------------------------===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// #include "polly/MatmulOptimizer.h" #include "polly/DependenceInfo.h" #include "polly/Options.h" #include "polly/ScheduleTreeTransform.h" #include "polly/ScopInfo.h" #include "polly/ScopPass.h" #include "polly/Simplify.h" #include "polly/Support/GICHelper.h" #include "polly/Support/ISLTools.h" #include "llvm/ADT/ArrayRef.h" #include "llvm/ADT/DenseSet.h" #include "llvm/ADT/Sequence.h" #include "llvm/ADT/SetOperations.h" #include "llvm/ADT/SmallVector.h" #include "llvm/ADT/StringRef.h" #include "llvm/ADT/iterator_range.h" #include "llvm/Analysis/TargetTransformInfo.h" #include "llvm/IR/DataLayout.h" #include "llvm/IR/Function.h" #include "llvm/IR/Module.h" #include "llvm/Support/CommandLine.h" #include "llvm/Support/Debug.h" #include "llvm/Support/TypeSize.h" #include "llvm/Support/raw_ostream.h" #include "isl/ctx.h" #include "isl/schedule_node.h" #include "isl/schedule_type.h" #include "isl/union_map.h" #include "isl/union_set.h" #include <algorithm> #include <cassert> #include <cmath> #include <cstdint> #include <string> #include <vector> #include "polly/Support/PollyDebug.h" #define DEBUG_TYPE … usingnamespacellvm; usingnamespacepolly; namespace llvm { class Value; } static cl::opt<int> LatencyVectorFma( "polly-target-latency-vector-fma", cl::desc("The minimal number of cycles between issuing two " "dependent consecutive vector fused multiply-add " "instructions."), cl::Hidden, cl::init(8), cl::cat(PollyCategory)); static cl::opt<int> ThroughputVectorFma( "polly-target-throughput-vector-fma", cl::desc("A throughput of the processor floating-point arithmetic units " "expressed in the number of vector fused multiply-add " "instructions per clock cycle."), cl::Hidden, cl::init(1), cl::cat(PollyCategory)); static cl::opt<int> FirstCacheLevelSize( "polly-target-1st-cache-level-size", cl::desc("The size of the first cache level specified in bytes."), cl::Hidden, cl::init(-1), cl::cat(PollyCategory)); static cl::opt<int> FirstCacheLevelDefaultSize( "polly-target-1st-cache-level-default-size", cl::desc("The default size of the first cache level specified in bytes" " (if not enough were provided by the TargetTransformInfo)."), cl::Hidden, cl::init(32768), cl::cat(PollyCategory)); static cl::opt<int> SecondCacheLevelSize( "polly-target-2nd-cache-level-size", cl::desc("The size of the second level specified in bytes."), cl::Hidden, cl::init(-1), cl::cat(PollyCategory)); static cl::opt<int> SecondCacheLevelDefaultSize( "polly-target-2nd-cache-level-default-size", cl::desc("The default size of the second cache level specified in bytes" " (if not enough were provided by the TargetTransformInfo)."), cl::Hidden, cl::init(262144), cl::cat(PollyCategory)); // This option, along with --polly-target-2nd-cache-level-associativity, // --polly-target-1st-cache-level-size, and --polly-target-2st-cache-level-size // represent the parameters of the target cache, which do not have typical // values that can be used by default. However, to apply the pattern matching // optimizations, we use the values of the parameters of Intel Core i7-3820 // SandyBridge in case the parameters are not specified or not provided by the // TargetTransformInfo. static cl::opt<int> FirstCacheLevelAssociativity( "polly-target-1st-cache-level-associativity", cl::desc("The associativity of the first cache level."), cl::Hidden, cl::init(-1), cl::cat(PollyCategory)); static cl::opt<int> FirstCacheLevelDefaultAssociativity( "polly-target-1st-cache-level-default-associativity", cl::desc("The default associativity of the first cache level" " (if not enough were provided by the TargetTransformInfo)."), cl::Hidden, cl::init(8), cl::cat(PollyCategory)); static cl::opt<int> SecondCacheLevelAssociativity( "polly-target-2nd-cache-level-associativity", cl::desc("The associativity of the second cache level."), cl::Hidden, cl::init(-1), cl::cat(PollyCategory)); static cl::opt<int> SecondCacheLevelDefaultAssociativity( "polly-target-2nd-cache-level-default-associativity", cl::desc("The default associativity of the second cache level" " (if not enough were provided by the TargetTransformInfo)."), cl::Hidden, cl::init(8), cl::cat(PollyCategory)); static cl::opt<int> VectorRegisterBitwidth( "polly-target-vector-register-bitwidth", cl::desc("The size in bits of a vector register (if not set, this " "information is taken from LLVM's target information."), cl::Hidden, cl::init(-1), cl::cat(PollyCategory)); static cl::opt<int> PollyPatternMatchingNcQuotient( "polly-pattern-matching-nc-quotient", cl::desc("Quotient that is obtained by dividing Nc, the parameter of the" "macro-kernel, by Nr, the parameter of the micro-kernel"), cl::Hidden, cl::init(256), cl::cat(PollyCategory)); static cl::opt<bool> PMBasedTCOpts("polly-tc-opt", cl::desc("Perform optimizations of tensor contractions based " "on pattern matching"), cl::init(false), cl::ZeroOrMore, cl::cat(PollyCategory)); static cl::opt<bool> PMBasedMMMOpts("polly-matmul-opt", cl::desc("Perform optimizations of matrix multiplications " "based on pattern matching"), cl::init(true), cl::ZeroOrMore, cl::cat(PollyCategory)); static cl::opt<int> OptComputeOut( "polly-tc-dependences-computeout", cl::desc("Bound the dependence analysis by a maximal amount of " "computational steps (0 means no bound)"), cl::Hidden, cl::init(500000), cl::ZeroOrMore, cl::cat(PollyCategory)); namespace { /// Parameters of the micro kernel. /// /// Parameters, which determine sizes of rank-1 (i.e., outer product) update /// used in the optimized matrix multiplication. struct MicroKernelParamsTy { … }; /// Parameters of the macro kernel. /// /// Parameters, which determine sizes of blocks of partitioned matrices /// used in the optimized matrix multiplication. struct MacroKernelParamsTy { … }; /// Parameters of the matrix multiplication operands. /// /// Parameters, which describe access relations that represent operands of the /// matrix multiplication. struct MatMulInfoTy { … }; /// Parameters of the tensor contraction operands. /// /// A general d-dimensional tensor T ∈ R ^ Nu0 x ... x Nud−1 can be defined /// as the set of scalar elements indexed by the set of indices u0 ... ud, /// /// T ≡ {Anu0...nud−1 ∈ R | (u0,...,ud−1) ∈ Nu0 x ... x Nud−1}. /// /// Let A, B, and C be dA, dB, and dC-dimensional tensors, respectively. /// Let the free and the contracted indices of the tensor A be grouped into /// two bundles I = i0...ir−1 and P = p0...pt−1, respectively. Similarly, /// the free and the contracted indices of B are grouped into bundles /// J = j0..js−1 and P and the free indices of C are grouped into /// bundles I and J. /// /// Tensor contraction (TC) of tensors A, B into tensor C can be represented as /// C(shuffle(I,J))=∑α·A(shuffle(I,P))·B(shuffle(P,J))+β·C(shuffle(I,J)), /// where ∑ is a summation over all contracted indices of P, /// α, β ∈ R, Npi is the length of the tensor dimension that corresponds /// to the index pi, A(shuffle(I, P)), B(shuffle(P, J)), C(shuffle(I, J)) are /// accesses to tensors A, B, C, respectively, /// shuffle(I, J), shuffle(I, P), and shuffle(P, J) are permutations of /// the enclosed indices. /// /// Multiplication of C(shuffle(I,J)) by β can be moved into a different SCoP /// statement by loop distribution, which is done by the isl scheduler. // If β is not equal to one, the optimization of TC of Polly requires /// such a transformation. /// /// TCInfoTy contains parameters, which describe access relations that represent /// operands of the tensor contraction. struct TCInfoTy { … }; /// Create an isl::union_set, which describes the option of the form /// [isolate[] -> unroll[x]]. /// /// @param Ctx An isl::ctx, which is used to create the isl::union_set. static isl::union_set getUnrollIsolatedSetOptions(isl::ctx Ctx) { … } /// Permute the two dimensions of the isl map. /// /// Permute @p DstPos and @p SrcPos dimensions of the isl map @p Map that /// have type @p DimType. /// /// @param Map The isl map to be modified. /// @param DimType The type of the dimensions. /// @param DstPos The first dimension. /// @param SrcPos The second dimension. /// @return The modified map. static isl::map permuteDimensions(isl::map Map, isl::dim DimType, unsigned DstPos, unsigned SrcPos) { … } /// Check the form of the access relation. /// /// Check that the access relation @p AccMap has the form M[i][j], where i /// is a @p FirstPos and j is a @p SecondPos. /// /// @param AccMap The access relation to be checked. /// @param FirstPos The index of the input dimension that is mapped to /// the first output dimension. /// @param SecondPos The index of the input dimension that is mapped to the /// second output dimension. /// @return True in case @p AccMap has the expected form and false, /// otherwise. static bool isMatMulOperandAcc(isl::set Domain, isl::map AccMap, int &FirstPos, int &SecondPos) { … } /// Does the memory access represent a non-scalar operand of the matrix /// multiplication. /// /// Check that the memory access @p MemAccess is the read access to a non-scalar /// operand of the matrix multiplication or its result. /// /// @param MemAccess The memory access to be checked. /// @param MMI Parameters of the matrix multiplication operands. /// @return True in case the memory access represents the read access /// to a non-scalar operand of the matrix multiplication and /// false, otherwise. static bool isMatMulNonScalarReadAccess(MemoryAccess *MemAccess, MatMulInfoTy &MMI) { … } /// Check accesses to operands of the matrix multiplication. /// /// Check that accesses of the SCoP statement, which corresponds to /// the partial schedule @p PartialSchedule, are scalar in terms of loops /// containing the matrix multiplication, in case they do not represent /// accesses to the non-scalar operands of the matrix multiplication or /// its result. /// /// @param PartialSchedule The partial schedule of the SCoP statement. /// @param MMI Parameters of the matrix multiplication operands. /// @return True in case the corresponding SCoP statement /// represents matrix multiplication and false, /// otherwise. static bool containsOnlyMatrMultAcc(isl::map PartialSchedule, MatMulInfoTy &MMI) { … } /// Check for dependencies corresponding to the matrix multiplication. /// /// Check that there is only true dependence of the form /// S(..., k, ...) -> S(..., k + 1, …), where S is the SCoP statement /// represented by @p Schedule and k is @p Pos. Such a dependence corresponds /// to the dependency produced by the matrix multiplication. /// /// @param Schedule The schedule of the SCoP statement. /// @param D The SCoP dependencies. /// @param Pos The parameter to describe an acceptable true dependence. /// In case it has a negative value, try to determine its /// acceptable value. /// @return True in case dependencies correspond to the matrix multiplication /// and false, otherwise. static bool containsOnlyMatMulDep(isl::map Schedule, const Dependences *D, int &Pos) { … } /// Check if the SCoP statement could probably be optimized with analytical /// modeling. /// /// containsMatrMult tries to determine whether the following conditions /// are true: /// 1. The last memory access modeling an array, MA1, represents writing to /// memory and has the form S(..., i1, ..., i2, ...) -> M(i1, i2) or /// S(..., i2, ..., i1, ...) -> M(i1, i2), where S is the SCoP statement /// under consideration. /// 2. There is only one loop-carried true dependency, and it has the /// form S(..., i3, ...) -> S(..., i3 + 1, ...), and there are no /// loop-carried or anti dependencies. /// 3. SCoP contains three access relations, MA2, MA3, and MA4 that represent /// reading from memory and have the form S(..., i3, ...) -> M(i1, i3), /// S(..., i3, ...) -> M(i3, i2), S(...) -> M(i1, i2), respectively, /// and all memory accesses of the SCoP that are different from MA1, MA2, /// MA3, and MA4 have stride 0, if the innermost loop is exchanged with any /// of loops i1, i2 and i3. /// /// @param PartialSchedule The PartialSchedule that contains a SCoP statement /// to check. /// @D The SCoP dependencies. /// @MMI Parameters of the matrix multiplication operands. static bool containsMatrMult(isl::map PartialSchedule, const Dependences *D, MatMulInfoTy &MMI) { … } /// Permute two dimensions of the band node. /// /// Permute FirstDim and SecondDim dimensions of the Node. /// /// @param Node The band node to be modified. /// @param FirstDim The first dimension to be permuted. /// @param SecondDim The second dimension to be permuted. static isl::schedule_node permuteBandNodeDimensions(isl::schedule_node Node, unsigned FirstDim, unsigned SecondDim) { … } static isl::schedule_node createMicroKernel(isl::schedule_node Node, MicroKernelParamsTy MicroKernelParams) { … } /// Create the BLIS macro-kernel. /// /// We create the BLIS macro-kernel by applying a combination of tiling /// of dimensions of the band node and interchanging of two innermost /// modified dimensions. The values of MacroKernelParams's fields are used /// as tile sizes. /// /// @param Node The schedule node to be modified. /// @param MacroKernelParams Parameters of the macro kernel /// to be used as tile sizes. static isl::schedule_node createMacroKernel(isl::schedule_node Node, MacroKernelParamsTy MacroKernelParams) { … } /// Get the size of the widest type of the matrix multiplication operands /// in bytes, including alignment padding. /// /// @param MMI Parameters of the matrix multiplication operands. /// @return The size of the widest type of the matrix multiplication operands /// in bytes, including alignment padding. static uint64_t getMatMulAlignTypeSize(const MatMulInfoTy &MMI) { … } /// Get the size of the widest type of the matrix multiplication operands /// in bits. /// /// @param MMI Parameters of the matrix multiplication operands. /// @return The size of the widest type of the matrix multiplication operands /// in bits. static uint64_t getMatMulTypeSize(const MatMulInfoTy &MMI) { … } /// Get parameters of the BLIS micro kernel. /// /// We choose the Mr and Nr parameters of the micro kernel to be large enough /// such that no stalls caused by the combination of latencies and dependencies /// are introduced during the updates of the resulting matrix of the matrix /// multiplication. However, they should also be as small as possible to /// release more registers for entries of multiplied matrices. /// /// @param TTI Target Transform Info. /// @param MMI Parameters of the matrix multiplication operands. /// @return The structure of type MicroKernelParamsTy. /// @see MicroKernelParamsTy static MicroKernelParamsTy getMicroKernelParams(const TargetTransformInfo *TTI, const MatMulInfoTy &MMI) { … } /// Determine parameters of the target cache. /// /// @param TTI Target Transform Info. static void getTargetCacheParameters(const llvm::TargetTransformInfo *TTI) { … } /// Get parameters of the BLIS macro kernel. /// /// During the computation of matrix multiplication, blocks of partitioned /// matrices are mapped to different layers of the memory hierarchy. /// To optimize data reuse, blocks should be ideally kept in cache between /// iterations. Since parameters of the macro kernel determine sizes of these /// blocks, there are upper and lower bounds on these parameters. /// /// @param TTI Target Transform Info. /// @param MicroKernelParams Parameters of the micro-kernel /// to be taken into account. /// @param MMI Parameters of the matrix multiplication operands. /// @return The structure of type MacroKernelParamsTy. /// @see MacroKernelParamsTy /// @see MicroKernelParamsTy static MacroKernelParamsTy getMacroKernelParams(const llvm::TargetTransformInfo *TTI, const MicroKernelParamsTy &MicroKernelParams, const MatMulInfoTy &MMI) { … } /// Create an access relation that is specific to /// the matrix multiplication pattern. /// /// Create an access relation of the following form: /// [O0, O1, O2, O3, O4, O5, O6, O7, O8] -> [OI, O5, OJ] /// where I is @p FirstDim, J is @p SecondDim. /// /// It can be used, for example, to create relations that helps to consequently /// access elements of operands of a matrix multiplication after creation of /// the BLIS micro and macro kernels. /// /// @see ScheduleTreeOptimizer::createMicroKernel /// @see ScheduleTreeOptimizer::createMacroKernel /// /// Subsequently, the described access relation is applied to the range of /// @p MapOldIndVar, that is used to map original induction variables to /// the ones, which are produced by schedule transformations. It helps to /// define relations using a new space and, at the same time, keep them /// in the original one. /// /// @param MapOldIndVar The relation, which maps original induction variables /// to the ones, which are produced by schedule /// transformations. /// @param FirstDim, SecondDim The input dimensions that are used to define /// the specified access relation. /// @return The specified access relation. static isl::map getMatMulAccRel(isl::map MapOldIndVar, unsigned FirstDim, unsigned SecondDim) { … } static isl::schedule_node createExtensionNode(isl::schedule_node Node, isl::map ExtensionMap) { … } static isl::schedule_node optimizePackedB(isl::schedule_node Node, ScopStmt *Stmt, isl::map MapOldIndVar, MicroKernelParamsTy MicroParams, MacroKernelParamsTy MacroParams, MatMulInfoTy &MMI) { … } static isl::schedule_node optimizePackedA(isl::schedule_node Node, ScopStmt *, isl::map MapOldIndVar, MicroKernelParamsTy MicroParams, MacroKernelParamsTy MacroParams, MatMulInfoTy &MMI) { … } /// Apply the packing transformation. /// /// The packing transformation can be described as a data-layout /// transformation that requires to introduce a new array, copy data /// to the array, and change memory access locations to reference the array. /// It can be used to ensure that elements of the new array are read in-stride /// access, aligned to cache lines boundaries, and preloaded into certain cache /// levels. /// /// As an example let us consider the packing of the array A that would help /// to read its elements with in-stride access. An access to the array A /// is represented by an access relation that has the form /// S[i, j, k] -> A[i, k]. The scheduling function of the SCoP statement S has /// the form S[i,j, k] -> [floor((j mod Nc) / Nr), floor((i mod Mc) / Mr), /// k mod Kc, j mod Nr, i mod Mr]. /// /// To ensure that elements of the array A are read in-stride access, we add /// a new array Packed_A[Mc/Mr][Kc][Mr] to the SCoP, using /// Scop::createScopArrayInfo, change the access relation /// S[i, j, k] -> A[i, k] to /// S[i, j, k] -> Packed_A[floor((i mod Mc) / Mr), k mod Kc, i mod Mr], using /// MemoryAccess::setNewAccessRelation, and copy the data to the array, using /// the copy statement created by Scop::addScopStmt. /// /// @param Node The schedule node to be optimized. /// @param MapOldIndVar The relation, which maps original induction variables /// to the ones, which are produced by schedule /// transformations. /// @param MicroParams, MacroParams Parameters of the BLIS kernel /// to be taken into account. /// @param MMI Parameters of the matrix multiplication operands. /// @return The optimized schedule node. static isl::schedule_node optimizeDataLayoutMatrMulPattern(isl::schedule_node Node, isl::map MapOldIndVar, MicroKernelParamsTy MicroParams, MacroKernelParamsTy MacroParams, MatMulInfoTy &MMI) { … } /// Get a relation mapping induction variables produced by schedule /// transformations to the original ones. /// /// @param Node The schedule node produced as the result of creation /// of the BLIS kernels. /// @param MicroKernelParams, MacroKernelParams Parameters of the BLIS kernel /// to be taken into account. /// @return The relation mapping original induction variables to the ones /// produced by schedule transformation. /// @see ScheduleTreeOptimizer::createMicroKernel /// @see ScheduleTreeOptimizer::createMacroKernel /// @see getMacroKernelParams static isl::map getInductionVariablesSubstitution(isl::schedule_node Node, MicroKernelParamsTy MicroKernelParams, MacroKernelParamsTy MacroKernelParams) { … } /// Isolate a set of partial tile prefixes and unroll the isolated part. /// /// The set should ensure that it contains only partial tile prefixes that have /// exactly Mr x Nr iterations of the two innermost loops produced by /// the optimization of the matrix multiplication. Mr and Nr are parameters of /// the micro-kernel. /// /// In case of parametric bounds, this helps to auto-vectorize the unrolled /// innermost loops, using the SLP vectorizer. /// /// @param Node The schedule node to be modified. /// @param MicroKernelParams Parameters of the micro-kernel /// to be taken into account. /// @return The modified isl_schedule_node. static isl::schedule_node isolateAndUnrollMatMulInnerLoops(isl::schedule_node Node, MicroKernelParamsTy MicroKernelParams) { … } /// Insert "Loop Vectorizer Disabled" mark node. /// /// @param Node The child of the mark node to be inserted. /// @return The modified isl_schedule_node. static isl::schedule_node markLoopVectorizerDisabled(isl::schedule_node Node) { … } /// Restore the initial ordering of dimensions of the band node /// /// In case the band node represents all the dimensions of the iteration /// domain, recreate the band node to restore the initial ordering of the /// dimensions. /// /// @param Node The band node to be modified. /// @return The modified schedule node. static isl::schedule_node getBandNodeWithOriginDimOrder(isl::schedule_node Node) { … } static isl::schedule_node optimizeMatMulPattern(isl::schedule_node Node, const TargetTransformInfo *TTI, MatMulInfoTy &MMI) { … } /// Check if this node contains a partial schedule that could /// probably be optimized with analytical modeling. /// /// isMatrMultPattern tries to determine whether the following conditions /// are true: /// 1. the partial schedule contains only one statement. /// 2. there are exactly three input dimensions. /// 3. all memory accesses of the statement will have stride 0 or 1, if we /// interchange loops (switch the variable used in the inner loop to /// the outer loop). /// 4. all memory accesses of the statement except from the last one, are /// read memory access and the last one is write memory access. /// 5. all subscripts of the last memory access of the statement don't /// contain the variable used in the inner loop. /// If this is the case, we could try to use an approach that is similar to /// the one used to get close-to-peak performance of matrix multiplications. /// /// @param Node The node to check. /// @param D The SCoP dependencies. /// @param MMI Parameters of the matrix multiplication operands. static bool isMatrMultPattern(isl::schedule_node Node, const Dependences *D, MatMulInfoTy &MMI) { … } /// Get the dimension size. /// /// Return the size of the dimension @p Pos, which is obtained from @p SAI. /// Return -1 in the case of the first dimension of a multi-dimensional array, /// since the ScopArrayInfo class does not carry size information. /// /// @param SAI The information about the array. /// @param Pos The position of the dimension. /// @return The size of the dimension. static int getDimSize(const ScopArrayInfo *SAI, unsigned Pos) { … } /// Check whether the access relation has the specified form. /// /// Check that the access relation @p AccMap has the form T[I0, …, In], where /// indexes I0, …, In are specified by @p Dimensions. /// /// @param Domain The domain of the access relation. /// @param AccMap The access relation to be checked. /// @param Dimensions The permutation of the subset of the input dimensions. /// @return True if @p AccMap has the expected form and false, /// otherwise. static bool isCorrectAccessMap(isl::set Domain, isl::map AccMap, ArrayRef<int> Dimensions) { … } /// Check whether the access represents the tensor contraction operand. /// /// Check that the access relation @p AccMap has the form T[i1, …, in]. /// Obtained indexes i1, …, in, their sizes and their permutation are stored /// into @p IndexSet, @p DimensionSizes, and @p Dimensions, respectively. /// /// @param Domain The domain of the access relation. /// @param AccMap The access relation to be checked. /// @param IndexSet The subset of the input dimensions. /// @param DimensionSizes Sizes of the input dimensions of @p Dimensions. /// @param Dimensions The permutation of the subset of the input dimensions. /// @return True if @p AccMap has the expected form and false, /// otherwise. static bool isTCOperandAcc(isl::set Domain, isl::map AccMap, SmallDenseSet<int> &IndexSet, SmallVectorImpl<int> &DimensionSizes, SmallVectorImpl<int> &Dimensions) { … } /// Find the intersection of two sets. /// /// Find the intersection of the set @p A and the set @p B. /// /// @param A, B Sets to intersect. /// @return The set intersection. static SmallDenseSet<int> intersect(const SmallDenseSet<int> &A, const SmallDenseSet<int> &B) { … } /// Check whether the set is a superset. /// /// Check that the set @p A is a superset of @p B. /// /// @param A, B Sets to be checked. /// @return True if the set A is a superset of B. static bool isSuperset(const SmallDenseSet<int> &A, const SmallDenseSet<int> &B) { … } /// Find the union of two sets. /// /// Find the union of the set @p A and the set @p B. /// /// @param A, B Sets to unite. /// @return The set union. static SmallDenseSet<int> unite(const SmallDenseSet<int> &A, const SmallDenseSet<int> &B) { … } /// Determine the access that writes to the tensor, which contains /// the result of the tensor contraction. /// /// @param Domain The domain of the statement. /// @param Stmt The statement, which writes to memory. /// @param TCI The information about the tensor contraction. /// @param IandJIndexSet The set, which contains free indexes of tensors. /// @return The determined MemoryAccess, or nullptr if there is no necessary /// access within the SCoP. static MemoryAccess *getWriteAccess(isl::set Domain, ScopStmt *Stmt, TCInfoTy &TCI, SmallDenseSet<int> &IandJIndexSet) { … } /// Determine an access, which reads elements of an operand of the tensor /// contraction /// /// @param MemAccessPtr The access, which reads elements of the tensor. /// @param IndexSet The set, which contains indexes of the tensors. /// @param IandJIndexSet The set, which contains free indexes of tensors. /// @param Dimensions The permutation of the subset of the input dimensions. /// @param TCI The information about the tensor contraction. /// @return True if the memory access @p MemAccessPtr corresponds /// to the tensor contraction. static bool setReadAccess(MemoryAccess *MemAccessPtr, const SmallDenseSet<int> &IndexSet, const SmallDenseSet<int> &IandJIndexSet, ArrayRef<int> Dimensions, TCInfoTy &TCI) { … } /// Check that all memory accesses of the statement, except from the last /// one, are read memory accesses, which read elements of operands of the tensor /// contraction and its result. /// /// @param Domain The domain of the statement. /// @param Stmt The statement, which writes to memory. /// @param TCI The information about the tensor contraction. /// @param IandJIndexSet The set, which contains free indexes of tensors. /// @return True if all read memory accesses of the statement @p Stmt correspond /// to the tensor contraction. static bool setReadAccesses(isl::set Domain, ScopStmt *Stmt, TCInfoTy &TCI, SmallDenseSet<int> &IandJIndexSet) { … } /// Check accesses to operands of the tensor contraction. /// /// Check that accesses of the SCoP statement, which corresponds to /// the partial schedule @p PartialSchedule, represent accesses /// to the non-scalar operands of the tensor contraction. /// /// @param Domain The domain of the SCoP statement. /// @param PartialSchedule The partial schedule of the SCoP statement. /// @param TCI Parameters of the tensor contraction operands. /// @return True if the corresponding SCoP statement /// represents tensor contraction and false, /// otherwise. static bool containsOnlyTCAcc(isl::set Domain, isl::map PartialSchedule, TCInfoTy &TCI) { … } /// Check that dependency corresponds to the tensor contraction carried over /// loop dimension @p Dim. /// /// Check that the dependency has the form /// S(..., ki, max(k(i + 1)), ..., max(kn), ...) -> /// S(..., ki + 1, min(k(i + 1)), ..., min(kn), ...), where S is the SCoP /// statement. For this purpose, we analyze the set @p DepDelta, which /// represents the differences between image elements and domain elements of /// the corresponding map. /// /// @param DepDelta The set contains the differences between image elements /// and corresponding domain elements of the map, which /// represents the dependency. /// @param Dim The position of the index ki. /// @param BoundDeltas In the case of indexes of ki, the difference between /// image elements and corresponding domain elements /// corresponds to the difference between lexicographic /// minimum and lexicographic maximum of the corresponding /// dimension of the domain of the statement. /// @param IndexSet Obtained indexes ki, which describe the dependency. /// @return True if dependencies correspond to the tensor contraction /// and false, otherwise. static bool isReductionCarriedOverDim(isl::set DepDelta, unsigned Dim, isl::pw_multi_aff BoundDeltas, const SmallDenseSet<int> &IndexSet) { … } /// Check whether dependencies are over the complete domain. /// /// In the case of the tensor contraction RAW, WAW, WAR dependencies /// have the form /// S(..., ki, max(k(i + 1)), ..., max(kn), ...) -> /// S(..., ki + 1, min(k(i + 1)), ..., min(kn), ...), where S is the SCoP /// statement. Consequently, the domain of the dependencies /// can be described as /// Domain / Domain ∩ S(…, max(kn),…) ∩ S(…, max(k(i + 1)),…), /// where Domain is the domain of the statement S. /// /// For example, in the case of the following tensor contraction, /// corresponding domains will have the following form. /// /// An example of the tensor contraction: /// for (i = 0; i < 1024; i++) /// for (j = 0; j < 1024; j++) /// for (l = 0; l < 64; ++l) /// for (w = 0; w < 64; ++w) /// C[i][j] += A[i][l][w] * B[w][j][l]; /// /// The domain of the statement: /// { S[i0, i1, i2, i3] : i0 >= 0 and i0 <= 1023 and /// i1 >= 0 and i1 <= 1023 and /// i2 >= 0 and i2 <= 63 and /// i3 >= 0 and i3 <= 63 } /// /// The domain of the dependencies: /// { S[i0, i1, i2, i3] : (i0 >= 0 and i0 <= 1023 and /// i1 >= 0 and i1 <= 1023 and /// i2 >= 0 and i2 <= 63 and /// i3 >= 0 and i3 <= 62) or /// (i3 = 63 and i0 >= 0 and i0 <= 1023 and /// i1 >= 0 and i1 <= 1023 and /// i2 >= 0 and i2 <= 62) } /// /// @param Domain The domain of the statement. /// @param DepsForStmt RAW and RED dependencies for the statement. /// @param UpperBound The lexicographic maximum of the elements in /// the @p Domain. /// @param IndexSet Obtained indexes ki, which describe the dependencies. /// @return True if dependencies are over the complete domain /// and false, otherwise. static bool areDepsOverCompleteDomain(isl::set Domain, isl::map DepsForStmt, isl::pw_multi_aff UpperBound, SmallDenseSet<int> &IndexSet) { … } /// Check that dependencies correspond to the tensor contraction. /// /// Check that there are only true dependencies of the form /// S(..., ki, max(k(i + 1)), ..., max(kn), ...) -> /// S(..., ki + 1, min(k(i + 1)), ..., min(kn), ...), where S is the SCoP /// statement represented by @p Schedule. Such dependencies are produced by /// the tensor contraction. Obtained indexes ki are stored into @p IndexSet. /// /// The form of anti and output dependencies is specified implicitly by /// the form the SCoP statement, which is checked by subsequent analysis. /// /// @param Schedule The schedule of the SCoP statement. /// @param D The SCoP dependencies. /// @param Domain The domain of the statement. /// @param IndexSet Obtained indexes ki, which describe the dependencies. /// @return True if dependencies correspond to the tensor contraction /// and false, otherwise. static bool containsOnlyTcDeps(isl::map Schedule, const Dependences *D, SmallDenseSet<int> &IndexSet, isl::set Domain) { … } /// Check if the SCoP statement could probably be optimized with analytical /// modeling. /// /// containsTCInfoTy tries to determine whether the following conditions /// are true: /// /// 1. The last memory access modeling an array, MA1, represents writing to /// memory and has the form S(..., I, ..., J, ...) -> M(shuffle(I, J)), /// where S is the SCoP statement under consideration and shuffle(I, J) /// is a permutation of indexes of sets I and J. /// 2. There are only true dependencies of the form /// S(..., ki, max(k(i + 1)), ..., max(kn), ...) -> /// S(..., ki + 1, min(k(i + 1)), ..., min(kn), ...), where S is the SCoP /// statement represented by @p Schedule and ki are indexes of the set P. /// 3. SCoP contains an arbitrary number of reads from constants and only three /// access relations, MA2, MA3, and MA4 that represent reading from memory /// and have the form /// S(..., I, ..., P, ...) -> M(shuffle(I, P)), /// S(..., P, ..., J, ...) -> M(shuffle(J, P)), /// S(...) -> M(shuffle(I, J)), respectively. /// /// @param PartialSchedule The PartialSchedule that contains a SCoP statement /// to check. /// @param D The SCoP dependencies. /// @param TCI Parameters of the tensor contraction operands. /// @param Domain The domain of the statement. /// @return True if dependencies and memory accesses correspond to the tensor /// contraction and false, otherwise. static bool containsTCInfoTy(isl::map PartialSchedule, const Dependences *D, TCInfoTy &TCI, isl::set Domain) { … } /// Check if this node contains a partial schedule that could /// probably be optimized with analytical modeling. /// /// isTCPattern is used to determine whether the SCoP represents a TC-like /// kernel [1], which is a perfectly nested set of loops, with a data usage /// pattern that is similar to that produced by the tensor contraction. /// /// A TC-like kernel can be defined as follows: /// /// 1. It satisfies the requirements of the polyhedral model. /// 2. Without loss of generality, it contains three nonempty bundles of /// one-dimensional for-loops with induction variables that are grouped into /// bundles I = i0...i(r-1), J = j0..j(s-1), and P = p0...p(t-1), and they /// are incremented by one. /// 3. The innermost loop body can be represented as a statement of the form /// C(shuffle(I, J)) = E(A(shuffle(I, P)), B(shuffle(P, J)), /// C(shuffle(I, J))), where A(shuffle(I, P)), B(shuffle(P, J)), /// C(shuffle(I, J)) are accesses to tensors A, B, C, respectively, /// shuffle(I, J), shuffle(I, P), and shuffle(P, J) are permutations of the /// enclosed indices, and E is an expression that contains reads from /// the tensors A, B, C, and an arbitrary number of reads from constants /// with respect to bundles I, J, and P. /// /// TC can be considered as a particular case of a TC-like kernel. /// /// The order of loops with indexes from P should be preserved. Otherwise, /// isTCPattern should check if a commutative operation is used. /// /// isTCPattern performs the following steps to check whether the SCoP /// corresponds to a definition of a TC-like kernel: /// /// 1. Checks that the node is the innermost band node. /// 2. Checks that the partial schedule contains only one statement. /// 3. Check that all ancestors of the node contain all band nodes for /// the statement and only mark nodes interleave such band nodes. This /// corresponds to a straightforward implementation of TC. /// 4. Analyses the dependencies to determine contraction dimensions. /// 5. Check that the last memory access modeling an array, represents writing /// to the result of the TC-like kernel. /// 6. Check that SCoP contains only three access relations that represent /// reading of the operands of the TC-like kernel and an arbitrary number of /// reads from constants. /// /// [1] - Gareev R., Grosser T., Kruse M. High-Performance Generalized Tensor /// Operations: A Compiler-Oriented Approach // ACM Transactions /// Architecture and Code Optimization (TACO). 2018. /// Vol. 15, no. 3. P. 34:1–34:27. DOI: 10.1145/3235029. /// /// If this is the case, we could logically represent tensors as matrices and /// apply algorithms, which are used to get close-to-peak performance of /// matrix multiplications in manually tuned BLAS libraries (e.g., BLIS). /// /// @param Node The node to check. /// @param D The SCoP dependencies. /// @param TCI Parameters of the tensor contraction operands. static bool isTCPattern(isl::schedule_node Node, const Dependences *D, TCInfoTy &TCI) { … } } // namespace isl::schedule_node polly::tryOptimizeMatMulPattern(isl::schedule_node Node, const llvm::TargetTransformInfo *TTI, const Dependences *D) { … }