#include "llvm/Analysis/VectorUtils.h"
#include "llvm/ADT/EquivalenceClasses.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/Analysis/DemandedBits.h"
#include "llvm/Analysis/LoopInfo.h"
#include "llvm/Analysis/LoopIterator.h"
#include "llvm/Analysis/ScalarEvolution.h"
#include "llvm/Analysis/ScalarEvolutionExpressions.h"
#include "llvm/Analysis/TargetTransformInfo.h"
#include "llvm/Analysis/ValueTracking.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/DerivedTypes.h"
#include "llvm/IR/IRBuilder.h"
#include "llvm/IR/MemoryModelRelaxationAnnotations.h"
#include "llvm/IR/PatternMatch.h"
#include "llvm/IR/Value.h"
#include "llvm/Support/CommandLine.h"
#define DEBUG_TYPE …
usingnamespacellvm;
usingnamespacellvm::PatternMatch;
static cl::opt<unsigned> MaxInterleaveGroupFactor(
"max-interleave-group-factor", cl::Hidden,
cl::desc("Maximum factor for an interleaved access group (default = 8)"),
cl::init(8));
bool llvm::isTriviallyVectorizable(Intrinsic::ID ID) { … }
bool llvm::isVectorIntrinsicWithScalarOpAtArg(Intrinsic::ID ID,
unsigned ScalarOpdIdx) { … }
bool llvm::isVectorIntrinsicWithOverloadTypeAtArg(Intrinsic::ID ID,
int OpdIdx) { … }
Intrinsic::ID llvm::getVectorIntrinsicIDForCall(const CallInst *CI,
const TargetLibraryInfo *TLI) { … }
Value *llvm::findScalarElement(Value *V, unsigned EltNo) { … }
int llvm::getSplatIndex(ArrayRef<int> Mask) { … }
Value *llvm::getSplatValue(const Value *V) { … }
bool llvm::isSplatValue(const Value *V, int Index, unsigned Depth) { … }
bool llvm::getShuffleDemandedElts(int SrcWidth, ArrayRef<int> Mask,
const APInt &DemandedElts, APInt &DemandedLHS,
APInt &DemandedRHS, bool AllowUndefElts) { … }
void llvm::narrowShuffleMaskElts(int Scale, ArrayRef<int> Mask,
SmallVectorImpl<int> &ScaledMask) { … }
bool llvm::widenShuffleMaskElts(int Scale, ArrayRef<int> Mask,
SmallVectorImpl<int> &ScaledMask) { … }
bool llvm::scaleShuffleMaskElts(unsigned NumDstElts, ArrayRef<int> Mask,
SmallVectorImpl<int> &ScaledMask) { … }
void llvm::getShuffleMaskWithWidestElts(ArrayRef<int> Mask,
SmallVectorImpl<int> &ScaledMask) { … }
void llvm::processShuffleMasks(
ArrayRef<int> Mask, unsigned NumOfSrcRegs, unsigned NumOfDestRegs,
unsigned NumOfUsedRegs, function_ref<void()> NoInputAction,
function_ref<void(ArrayRef<int>, unsigned, unsigned)> SingleInputAction,
function_ref<void(ArrayRef<int>, unsigned, unsigned)> ManyInputsAction) { … }
void llvm::getHorizDemandedEltsForFirstOperand(unsigned VectorBitWidth,
const APInt &DemandedElts,
APInt &DemandedLHS,
APInt &DemandedRHS) { … }
MapVector<Instruction *, uint64_t>
llvm::computeMinimumValueSizes(ArrayRef<BasicBlock *> Blocks, DemandedBits &DB,
const TargetTransformInfo *TTI) { … }
template <typename ListT>
static void addToAccessGroupList(ListT &List, MDNode *AccGroups) { … }
MDNode *llvm::uniteAccessGroups(MDNode *AccGroups1, MDNode *AccGroups2) { … }
MDNode *llvm::intersectAccessGroups(const Instruction *Inst1,
const Instruction *Inst2) { … }
Instruction *llvm::propagateMetadata(Instruction *Inst, ArrayRef<Value *> VL) { … }
Constant *
llvm::createBitMaskForGaps(IRBuilderBase &Builder, unsigned VF,
const InterleaveGroup<Instruction> &Group) { … }
llvm::SmallVector<int, 16>
llvm::createReplicatedMask(unsigned ReplicationFactor, unsigned VF) { … }
llvm::SmallVector<int, 16> llvm::createInterleaveMask(unsigned VF,
unsigned NumVecs) { … }
llvm::SmallVector<int, 16>
llvm::createStrideMask(unsigned Start, unsigned Stride, unsigned VF) { … }
llvm::SmallVector<int, 16> llvm::createSequentialMask(unsigned Start,
unsigned NumInts,
unsigned NumUndefs) { … }
llvm::SmallVector<int, 16> llvm::createUnaryMask(ArrayRef<int> Mask,
unsigned NumElts) { … }
static Value *concatenateTwoVectors(IRBuilderBase &Builder, Value *V1,
Value *V2) { … }
Value *llvm::concatenateVectors(IRBuilderBase &Builder,
ArrayRef<Value *> Vecs) { … }
bool llvm::maskIsAllZeroOrUndef(Value *Mask) { … }
bool llvm::maskIsAllOneOrUndef(Value *Mask) { … }
bool llvm::maskContainsAllOneOrUndef(Value *Mask) { … }
APInt llvm::possiblyDemandedEltsInMask(Value *Mask) { … }
bool InterleavedAccessInfo::isStrided(int Stride) { … }
void InterleavedAccessInfo::collectConstStrideAccesses(
MapVector<Instruction *, StrideDescriptor> &AccessStrideInfo,
const DenseMap<Value*, const SCEV*> &Strides) { … }
void InterleavedAccessInfo::analyzeInterleaving(
bool EnablePredicatedInterleavedMemAccesses) { … }
void InterleavedAccessInfo::invalidateGroupsRequiringScalarEpilogue() { … }
template <typename InstT>
void InterleaveGroup<InstT>::addMetadata(InstT *NewInst) const { … }
namespace llvm {
template <>
void InterleaveGroup<Instruction>::addMetadata(Instruction *NewInst) const { … }
}