//===- InterleavedAccessPass.cpp ------------------------------------------===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// // // This file implements the Interleaved Access pass, which identifies // interleaved memory accesses and transforms them into target specific // intrinsics. // // An interleaved load reads data from memory into several vectors, with // DE-interleaving the data on a factor. An interleaved store writes several // vectors to memory with RE-interleaving the data on a factor. // // As interleaved accesses are difficult to identified in CodeGen (mainly // because the VECTOR_SHUFFLE DAG node is quite different from the shufflevector // IR), we identify and transform them to intrinsics in this pass so the // intrinsics can be easily matched into target specific instructions later in // CodeGen. // // E.g. An interleaved load (Factor = 2): // %wide.vec = load <8 x i32>, <8 x i32>* %ptr // %v0 = shuffle <8 x i32> %wide.vec, <8 x i32> poison, <0, 2, 4, 6> // %v1 = shuffle <8 x i32> %wide.vec, <8 x i32> poison, <1, 3, 5, 7> // // It could be transformed into a ld2 intrinsic in AArch64 backend or a vld2 // intrinsic in ARM backend. // // In X86, this can be further optimized into a set of target // specific loads followed by an optimized sequence of shuffles. // // E.g. An interleaved store (Factor = 3): // %i.vec = shuffle <8 x i32> %v0, <8 x i32> %v1, // <0, 4, 8, 1, 5, 9, 2, 6, 10, 3, 7, 11> // store <12 x i32> %i.vec, <12 x i32>* %ptr // // It could be transformed into a st3 intrinsic in AArch64 backend or a vst3 // intrinsic in ARM backend. // // Similarly, a set of interleaved stores can be transformed into an optimized // sequence of shuffles followed by a set of target specific stores for X86. // //===----------------------------------------------------------------------===// #include "llvm/ADT/ArrayRef.h" #include "llvm/ADT/DenseMap.h" #include "llvm/ADT/SetVector.h" #include "llvm/ADT/SmallVector.h" #include "llvm/CodeGen/InterleavedAccess.h" #include "llvm/CodeGen/TargetLowering.h" #include "llvm/CodeGen/TargetPassConfig.h" #include "llvm/CodeGen/TargetSubtargetInfo.h" #include "llvm/IR/Constants.h" #include "llvm/IR/Dominators.h" #include "llvm/IR/Function.h" #include "llvm/IR/IRBuilder.h" #include "llvm/IR/InstIterator.h" #include "llvm/IR/Instruction.h" #include "llvm/IR/Instructions.h" #include "llvm/IR/IntrinsicInst.h" #include "llvm/InitializePasses.h" #include "llvm/Pass.h" #include "llvm/Support/Casting.h" #include "llvm/Support/CommandLine.h" #include "llvm/Support/Debug.h" #include "llvm/Support/MathExtras.h" #include "llvm/Support/raw_ostream.h" #include "llvm/Target/TargetMachine.h" #include "llvm/Transforms/Utils/Local.h" #include <cassert> #include <utility> usingnamespacellvm; #define DEBUG_TYPE … static cl::opt<bool> LowerInterleavedAccesses( "lower-interleaved-accesses", cl::desc("Enable lowering interleaved accesses to intrinsics"), cl::init(true), cl::Hidden); namespace { class InterleavedAccessImpl { … }; class InterleavedAccess : public FunctionPass { … }; } // end anonymous namespace. PreservedAnalyses InterleavedAccessPass::run(Function &F, FunctionAnalysisManager &FAM) { … } char InterleavedAccess::ID = …; bool InterleavedAccess::runOnFunction(Function &F) { … } INITIALIZE_PASS_BEGIN(InterleavedAccess, DEBUG_TYPE, "Lower interleaved memory accesses to target specific intrinsics", false, false) INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) INITIALIZE_PASS_END(InterleavedAccess, DEBUG_TYPE, "Lower interleaved memory accesses to target specific intrinsics", false, false) FunctionPass *llvm::createInterleavedAccessPass() { … } /// Check if the mask is a DE-interleave mask for an interleaved load. /// /// E.g. DE-interleave masks (Factor = 2) could be: /// <0, 2, 4, 6> (mask of index 0 to extract even elements) /// <1, 3, 5, 7> (mask of index 1 to extract odd elements) static bool isDeInterleaveMask(ArrayRef<int> Mask, unsigned &Factor, unsigned &Index, unsigned MaxFactor, unsigned NumLoadElements) { … } /// Check if the mask can be used in an interleaved store. // /// It checks for a more general pattern than the RE-interleave mask. /// I.e. <x, y, ... z, x+1, y+1, ...z+1, x+2, y+2, ...z+2, ...> /// E.g. For a Factor of 2 (LaneLen=4): <4, 32, 5, 33, 6, 34, 7, 35> /// E.g. For a Factor of 3 (LaneLen=4): <4, 32, 16, 5, 33, 17, 6, 34, 18, 7, 35, 19> /// E.g. For a Factor of 4 (LaneLen=2): <8, 2, 12, 4, 9, 3, 13, 5> /// /// The particular case of an RE-interleave mask is: /// I.e. <0, LaneLen, ... , LaneLen*(Factor - 1), 1, LaneLen + 1, ...> /// E.g. For a Factor of 2 (LaneLen=4): <0, 4, 1, 5, 2, 6, 3, 7> static bool isReInterleaveMask(ShuffleVectorInst *SVI, unsigned &Factor, unsigned MaxFactor) { … } bool InterleavedAccessImpl::lowerInterleavedLoad( LoadInst *LI, SmallVectorImpl<Instruction *> &DeadInsts) { … } bool InterleavedAccessImpl::replaceBinOpShuffles( ArrayRef<ShuffleVectorInst *> BinOpShuffles, SmallVectorImpl<ShuffleVectorInst *> &Shuffles, LoadInst *LI) { … } bool InterleavedAccessImpl::tryReplaceExtracts( ArrayRef<ExtractElementInst *> Extracts, ArrayRef<ShuffleVectorInst *> Shuffles) { … } bool InterleavedAccessImpl::lowerInterleavedStore( StoreInst *SI, SmallVectorImpl<Instruction *> &DeadInsts) { … } bool InterleavedAccessImpl::lowerDeinterleaveIntrinsic( IntrinsicInst *DI, SmallVectorImpl<Instruction *> &DeadInsts) { … } bool InterleavedAccessImpl::lowerInterleaveIntrinsic( IntrinsicInst *II, SmallVectorImpl<Instruction *> &DeadInsts) { … } bool InterleavedAccessImpl::runOnFunction(Function &F) { … }