#include "llvm/Analysis/TargetTransformInfo.h"
#include "llvm/Analysis/CFG.h"
#include "llvm/Analysis/LoopIterator.h"
#include "llvm/Analysis/TargetLibraryInfo.h"
#include "llvm/Analysis/TargetTransformInfoImpl.h"
#include "llvm/IR/CFG.h"
#include "llvm/IR/Dominators.h"
#include "llvm/IR/Instruction.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/IntrinsicInst.h"
#include "llvm/IR/Module.h"
#include "llvm/IR/Operator.h"
#include "llvm/IR/PatternMatch.h"
#include "llvm/InitializePasses.h"
#include "llvm/Support/CommandLine.h"
#include <optional>
#include <utility>
usingnamespacellvm;
usingnamespacePatternMatch;
#define DEBUG_TYPE …
static cl::opt<bool> EnableReduxCost("costmodel-reduxcost", cl::init(false),
cl::Hidden,
cl::desc("Recognize reduction patterns."));
static cl::opt<unsigned> CacheLineSize(
"cache-line-size", cl::init(0), cl::Hidden,
cl::desc("Use this to override the target cache line size when "
"specified by the user."));
static cl::opt<unsigned> MinPageSize(
"min-page-size", cl::init(0), cl::Hidden,
cl::desc("Use this to override the target's minimum page size."));
static cl::opt<unsigned> PredictableBranchThreshold(
"predictable-branch-threshold", cl::init(99), cl::Hidden,
cl::desc(
"Use this to override the target's predictable branch threshold (%)."));
namespace {
struct NoTTIImpl : TargetTransformInfoImplCRTPBase<NoTTIImpl> { … };
}
bool HardwareLoopInfo::canAnalyze(LoopInfo &LI) { … }
IntrinsicCostAttributes::IntrinsicCostAttributes(
Intrinsic::ID Id, const CallBase &CI, InstructionCost ScalarizationCost,
bool TypeBasedOnly)
: … { … }
IntrinsicCostAttributes::IntrinsicCostAttributes(Intrinsic::ID Id, Type *RTy,
ArrayRef<Type *> Tys,
FastMathFlags Flags,
const IntrinsicInst *I,
InstructionCost ScalarCost)
: … { … }
IntrinsicCostAttributes::IntrinsicCostAttributes(Intrinsic::ID Id, Type *Ty,
ArrayRef<const Value *> Args)
: … { … }
IntrinsicCostAttributes::IntrinsicCostAttributes(Intrinsic::ID Id, Type *RTy,
ArrayRef<const Value *> Args,
ArrayRef<Type *> Tys,
FastMathFlags Flags,
const IntrinsicInst *I,
InstructionCost ScalarCost)
: … { … }
HardwareLoopInfo::HardwareLoopInfo(Loop *L) : … { … }
bool HardwareLoopInfo::isHardwareLoopCandidate(ScalarEvolution &SE,
LoopInfo &LI, DominatorTree &DT,
bool ForceNestedLoop,
bool ForceHardwareLoopPHI) { … }
TargetTransformInfo::TargetTransformInfo(const DataLayout &DL)
: … { … }
TargetTransformInfo::~TargetTransformInfo() = default;
TargetTransformInfo::TargetTransformInfo(TargetTransformInfo &&Arg)
: … { … }
TargetTransformInfo &TargetTransformInfo::operator=(TargetTransformInfo &&RHS) { … }
unsigned TargetTransformInfo::getInliningThresholdMultiplier() const { … }
unsigned
TargetTransformInfo::getInliningCostBenefitAnalysisSavingsMultiplier() const { … }
unsigned
TargetTransformInfo::getInliningCostBenefitAnalysisProfitableMultiplier()
const { … }
unsigned
TargetTransformInfo::adjustInliningThreshold(const CallBase *CB) const { … }
unsigned TargetTransformInfo::getCallerAllocaCost(const CallBase *CB,
const AllocaInst *AI) const { … }
int TargetTransformInfo::getInlinerVectorBonusPercent() const { … }
InstructionCost TargetTransformInfo::getGEPCost(
Type *PointeeType, const Value *Ptr, ArrayRef<const Value *> Operands,
Type *AccessType, TTI::TargetCostKind CostKind) const { … }
InstructionCost TargetTransformInfo::getPointersChainCost(
ArrayRef<const Value *> Ptrs, const Value *Base,
const TTI::PointersChainInfo &Info, Type *AccessTy,
TTI::TargetCostKind CostKind) const { … }
unsigned TargetTransformInfo::getEstimatedNumberOfCaseClusters(
const SwitchInst &SI, unsigned &JTSize, ProfileSummaryInfo *PSI,
BlockFrequencyInfo *BFI) const { … }
InstructionCost
TargetTransformInfo::getInstructionCost(const User *U,
ArrayRef<const Value *> Operands,
enum TargetCostKind CostKind) const { … }
BranchProbability TargetTransformInfo::getPredictableBranchThreshold() const { … }
InstructionCost TargetTransformInfo::getBranchMispredictPenalty() const { … }
bool TargetTransformInfo::hasBranchDivergence(const Function *F) const { … }
bool TargetTransformInfo::isSourceOfDivergence(const Value *V) const { … }
bool llvm::TargetTransformInfo::isAlwaysUniform(const Value *V) const { … }
bool llvm::TargetTransformInfo::isValidAddrSpaceCast(unsigned FromAS,
unsigned ToAS) const { … }
bool llvm::TargetTransformInfo::addrspacesMayAlias(unsigned FromAS,
unsigned ToAS) const { … }
unsigned TargetTransformInfo::getFlatAddressSpace() const { … }
bool TargetTransformInfo::collectFlatAddressOperands(
SmallVectorImpl<int> &OpIndexes, Intrinsic::ID IID) const { … }
bool TargetTransformInfo::isNoopAddrSpaceCast(unsigned FromAS,
unsigned ToAS) const { … }
bool TargetTransformInfo::canHaveNonUndefGlobalInitializerInAddressSpace(
unsigned AS) const { … }
unsigned TargetTransformInfo::getAssumedAddrSpace(const Value *V) const { … }
bool TargetTransformInfo::isSingleThreaded() const { … }
std::pair<const Value *, unsigned>
TargetTransformInfo::getPredicatedAddrSpace(const Value *V) const { … }
Value *TargetTransformInfo::rewriteIntrinsicWithAddressSpace(
IntrinsicInst *II, Value *OldV, Value *NewV) const { … }
bool TargetTransformInfo::isLoweredToCall(const Function *F) const { … }
bool TargetTransformInfo::isHardwareLoopProfitable(
Loop *L, ScalarEvolution &SE, AssumptionCache &AC,
TargetLibraryInfo *LibInfo, HardwareLoopInfo &HWLoopInfo) const { … }
bool TargetTransformInfo::preferPredicateOverEpilogue(
TailFoldingInfo *TFI) const { … }
TailFoldingStyle TargetTransformInfo::getPreferredTailFoldingStyle(
bool IVUpdateMayOverflow) const { … }
std::optional<Instruction *>
TargetTransformInfo::instCombineIntrinsic(InstCombiner &IC,
IntrinsicInst &II) const { … }
std::optional<Value *> TargetTransformInfo::simplifyDemandedUseBitsIntrinsic(
InstCombiner &IC, IntrinsicInst &II, APInt DemandedMask, KnownBits &Known,
bool &KnownBitsComputed) const { … }
std::optional<Value *> TargetTransformInfo::simplifyDemandedVectorEltsIntrinsic(
InstCombiner &IC, IntrinsicInst &II, APInt DemandedElts, APInt &UndefElts,
APInt &UndefElts2, APInt &UndefElts3,
std::function<void(Instruction *, unsigned, APInt, APInt &)>
SimplifyAndSetOp) const { … }
void TargetTransformInfo::getUnrollingPreferences(
Loop *L, ScalarEvolution &SE, UnrollingPreferences &UP,
OptimizationRemarkEmitter *ORE) const { … }
void TargetTransformInfo::getPeelingPreferences(Loop *L, ScalarEvolution &SE,
PeelingPreferences &PP) const { … }
bool TargetTransformInfo::isLegalAddImmediate(int64_t Imm) const { … }
bool TargetTransformInfo::isLegalAddScalableImmediate(int64_t Imm) const { … }
bool TargetTransformInfo::isLegalICmpImmediate(int64_t Imm) const { … }
bool TargetTransformInfo::isLegalAddressingMode(Type *Ty, GlobalValue *BaseGV,
int64_t BaseOffset,
bool HasBaseReg, int64_t Scale,
unsigned AddrSpace,
Instruction *I,
int64_t ScalableOffset) const { … }
bool TargetTransformInfo::isLSRCostLess(const LSRCost &C1,
const LSRCost &C2) const { … }
bool TargetTransformInfo::isNumRegsMajorCostOfLSR() const { … }
bool TargetTransformInfo::shouldDropLSRSolutionIfLessProfitable() const { … }
bool TargetTransformInfo::isProfitableLSRChainElement(Instruction *I) const { … }
bool TargetTransformInfo::canMacroFuseCmp() const { … }
bool TargetTransformInfo::canSaveCmp(Loop *L, BranchInst **BI,
ScalarEvolution *SE, LoopInfo *LI,
DominatorTree *DT, AssumptionCache *AC,
TargetLibraryInfo *LibInfo) const { … }
TTI::AddressingModeKind
TargetTransformInfo::getPreferredAddressingMode(const Loop *L,
ScalarEvolution *SE) const { … }
bool TargetTransformInfo::isLegalMaskedStore(Type *DataType,
Align Alignment) const { … }
bool TargetTransformInfo::isLegalMaskedLoad(Type *DataType,
Align Alignment) const { … }
bool TargetTransformInfo::isLegalNTStore(Type *DataType,
Align Alignment) const { … }
bool TargetTransformInfo::isLegalNTLoad(Type *DataType, Align Alignment) const { … }
bool TargetTransformInfo::isLegalBroadcastLoad(Type *ElementTy,
ElementCount NumElements) const { … }
bool TargetTransformInfo::isLegalMaskedGather(Type *DataType,
Align Alignment) const { … }
bool TargetTransformInfo::isLegalAltInstr(
VectorType *VecTy, unsigned Opcode0, unsigned Opcode1,
const SmallBitVector &OpcodeMask) const { … }
bool TargetTransformInfo::isLegalMaskedScatter(Type *DataType,
Align Alignment) const { … }
bool TargetTransformInfo::forceScalarizeMaskedGather(VectorType *DataType,
Align Alignment) const { … }
bool TargetTransformInfo::forceScalarizeMaskedScatter(VectorType *DataType,
Align Alignment) const { … }
bool TargetTransformInfo::isLegalMaskedCompressStore(Type *DataType,
Align Alignment) const { … }
bool TargetTransformInfo::isLegalMaskedExpandLoad(Type *DataType,
Align Alignment) const { … }
bool TargetTransformInfo::isLegalStridedLoadStore(Type *DataType,
Align Alignment) const { … }
bool TargetTransformInfo::isLegalMaskedVectorHistogram(Type *AddrType,
Type *DataType) const { … }
bool TargetTransformInfo::enableOrderedReductions() const { … }
bool TargetTransformInfo::hasDivRemOp(Type *DataType, bool IsSigned) const { … }
bool TargetTransformInfo::hasVolatileVariant(Instruction *I,
unsigned AddrSpace) const { … }
bool TargetTransformInfo::prefersVectorizedAddressing() const { … }
InstructionCost TargetTransformInfo::getScalingFactorCost(
Type *Ty, GlobalValue *BaseGV, StackOffset BaseOffset, bool HasBaseReg,
int64_t Scale, unsigned AddrSpace) const { … }
bool TargetTransformInfo::LSRWithInstrQueries() const { … }
bool TargetTransformInfo::isTruncateFree(Type *Ty1, Type *Ty2) const { … }
bool TargetTransformInfo::isProfitableToHoist(Instruction *I) const { … }
bool TargetTransformInfo::useAA() const { … }
bool TargetTransformInfo::isTypeLegal(Type *Ty) const { … }
unsigned TargetTransformInfo::getRegUsageForType(Type *Ty) const { … }
bool TargetTransformInfo::shouldBuildLookupTables() const { … }
bool TargetTransformInfo::shouldBuildLookupTablesForConstant(
Constant *C) const { … }
bool TargetTransformInfo::shouldBuildRelLookupTables() const { … }
bool TargetTransformInfo::useColdCCForColdCall(Function &F) const { … }
InstructionCost TargetTransformInfo::getScalarizationOverhead(
VectorType *Ty, const APInt &DemandedElts, bool Insert, bool Extract,
TTI::TargetCostKind CostKind) const { … }
InstructionCost TargetTransformInfo::getOperandsScalarizationOverhead(
ArrayRef<const Value *> Args, ArrayRef<Type *> Tys,
TTI::TargetCostKind CostKind) const { … }
bool TargetTransformInfo::supportsEfficientVectorElementLoadStore() const { … }
bool TargetTransformInfo::supportsTailCalls() const { … }
bool TargetTransformInfo::supportsTailCallFor(const CallBase *CB) const { … }
bool TargetTransformInfo::enableAggressiveInterleaving(
bool LoopHasReductions) const { … }
TargetTransformInfo::MemCmpExpansionOptions
TargetTransformInfo::enableMemCmpExpansion(bool OptSize, bool IsZeroCmp) const { … }
bool TargetTransformInfo::enableSelectOptimize() const { … }
bool TargetTransformInfo::shouldTreatInstructionLikeSelect(
const Instruction *I) const { … }
bool TargetTransformInfo::enableInterleavedAccessVectorization() const { … }
bool TargetTransformInfo::enableMaskedInterleavedAccessVectorization() const { … }
bool TargetTransformInfo::isFPVectorizationPotentiallyUnsafe() const { … }
bool
TargetTransformInfo::allowsMisalignedMemoryAccesses(LLVMContext &Context,
unsigned BitWidth,
unsigned AddressSpace,
Align Alignment,
unsigned *Fast) const { … }
TargetTransformInfo::PopcntSupportKind
TargetTransformInfo::getPopcntSupport(unsigned IntTyWidthInBit) const { … }
bool TargetTransformInfo::haveFastSqrt(Type *Ty) const { … }
bool TargetTransformInfo::isExpensiveToSpeculativelyExecute(
const Instruction *I) const { … }
bool TargetTransformInfo::isFCmpOrdCheaperThanFCmpZero(Type *Ty) const { … }
InstructionCost TargetTransformInfo::getFPOpCost(Type *Ty) const { … }
InstructionCost TargetTransformInfo::getIntImmCodeSizeCost(unsigned Opcode,
unsigned Idx,
const APInt &Imm,
Type *Ty) const { … }
InstructionCost
TargetTransformInfo::getIntImmCost(const APInt &Imm, Type *Ty,
TTI::TargetCostKind CostKind) const { … }
InstructionCost TargetTransformInfo::getIntImmCostInst(
unsigned Opcode, unsigned Idx, const APInt &Imm, Type *Ty,
TTI::TargetCostKind CostKind, Instruction *Inst) const { … }
InstructionCost
TargetTransformInfo::getIntImmCostIntrin(Intrinsic::ID IID, unsigned Idx,
const APInt &Imm, Type *Ty,
TTI::TargetCostKind CostKind) const { … }
bool TargetTransformInfo::preferToKeepConstantsAttached(
const Instruction &Inst, const Function &Fn) const { … }
unsigned TargetTransformInfo::getNumberOfRegisters(unsigned ClassID) const { … }
bool TargetTransformInfo::hasConditionalLoadStoreForType(Type *Ty) const { … }
unsigned TargetTransformInfo::getRegisterClassForType(bool Vector,
Type *Ty) const { … }
const char *TargetTransformInfo::getRegisterClassName(unsigned ClassID) const { … }
TypeSize TargetTransformInfo::getRegisterBitWidth(
TargetTransformInfo::RegisterKind K) const { … }
unsigned TargetTransformInfo::getMinVectorRegisterBitWidth() const { … }
std::optional<unsigned> TargetTransformInfo::getMaxVScale() const { … }
std::optional<unsigned> TargetTransformInfo::getVScaleForTuning() const { … }
bool TargetTransformInfo::isVScaleKnownToBeAPowerOfTwo() const { … }
bool TargetTransformInfo::shouldMaximizeVectorBandwidth(
TargetTransformInfo::RegisterKind K) const { … }
ElementCount TargetTransformInfo::getMinimumVF(unsigned ElemWidth,
bool IsScalable) const { … }
unsigned TargetTransformInfo::getMaximumVF(unsigned ElemWidth,
unsigned Opcode) const { … }
unsigned TargetTransformInfo::getStoreMinimumVF(unsigned VF, Type *ScalarMemTy,
Type *ScalarValTy) const { … }
bool TargetTransformInfo::shouldConsiderAddressTypePromotion(
const Instruction &I, bool &AllowPromotionWithoutCommonHeader) const { … }
unsigned TargetTransformInfo::getCacheLineSize() const { … }
std::optional<unsigned>
TargetTransformInfo::getCacheSize(CacheLevel Level) const { … }
std::optional<unsigned>
TargetTransformInfo::getCacheAssociativity(CacheLevel Level) const { … }
std::optional<unsigned> TargetTransformInfo::getMinPageSize() const { … }
unsigned TargetTransformInfo::getPrefetchDistance() const { … }
unsigned TargetTransformInfo::getMinPrefetchStride(
unsigned NumMemAccesses, unsigned NumStridedMemAccesses,
unsigned NumPrefetches, bool HasCall) const { … }
unsigned TargetTransformInfo::getMaxPrefetchIterationsAhead() const { … }
bool TargetTransformInfo::enableWritePrefetching() const { … }
bool TargetTransformInfo::shouldPrefetchAddressSpace(unsigned AS) const { … }
unsigned TargetTransformInfo::getMaxInterleaveFactor(ElementCount VF) const { … }
TargetTransformInfo::OperandValueInfo
TargetTransformInfo::getOperandInfo(const Value *V) { … }
InstructionCost TargetTransformInfo::getArithmeticInstrCost(
unsigned Opcode, Type *Ty, TTI::TargetCostKind CostKind,
OperandValueInfo Op1Info, OperandValueInfo Op2Info,
ArrayRef<const Value *> Args, const Instruction *CxtI,
const TargetLibraryInfo *TLibInfo) const { … }
InstructionCost TargetTransformInfo::getAltInstrCost(
VectorType *VecTy, unsigned Opcode0, unsigned Opcode1,
const SmallBitVector &OpcodeMask, TTI::TargetCostKind CostKind) const { … }
InstructionCost TargetTransformInfo::getShuffleCost(
ShuffleKind Kind, VectorType *Ty, ArrayRef<int> Mask,
TTI::TargetCostKind CostKind, int Index, VectorType *SubTp,
ArrayRef<const Value *> Args, const Instruction *CxtI) const { … }
TTI::CastContextHint
TargetTransformInfo::getCastContextHint(const Instruction *I) { … }
InstructionCost TargetTransformInfo::getCastInstrCost(
unsigned Opcode, Type *Dst, Type *Src, CastContextHint CCH,
TTI::TargetCostKind CostKind, const Instruction *I) const { … }
InstructionCost TargetTransformInfo::getExtractWithExtendCost(
unsigned Opcode, Type *Dst, VectorType *VecTy, unsigned Index) const { … }
InstructionCost TargetTransformInfo::getCFInstrCost(
unsigned Opcode, TTI::TargetCostKind CostKind, const Instruction *I) const { … }
InstructionCost TargetTransformInfo::getCmpSelInstrCost(
unsigned Opcode, Type *ValTy, Type *CondTy, CmpInst::Predicate VecPred,
TTI::TargetCostKind CostKind, const Instruction *I) const { … }
InstructionCost TargetTransformInfo::getVectorInstrCost(
unsigned Opcode, Type *Val, TTI::TargetCostKind CostKind, unsigned Index,
Value *Op0, Value *Op1) const { … }
InstructionCost
TargetTransformInfo::getVectorInstrCost(const Instruction &I, Type *Val,
TTI::TargetCostKind CostKind,
unsigned Index) const { … }
InstructionCost TargetTransformInfo::getReplicationShuffleCost(
Type *EltTy, int ReplicationFactor, int VF, const APInt &DemandedDstElts,
TTI::TargetCostKind CostKind) const { … }
InstructionCost TargetTransformInfo::getMemoryOpCost(
unsigned Opcode, Type *Src, Align Alignment, unsigned AddressSpace,
TTI::TargetCostKind CostKind, TTI::OperandValueInfo OpInfo,
const Instruction *I) const { … }
InstructionCost TargetTransformInfo::getMaskedMemoryOpCost(
unsigned Opcode, Type *Src, Align Alignment, unsigned AddressSpace,
TTI::TargetCostKind CostKind) const { … }
InstructionCost TargetTransformInfo::getGatherScatterOpCost(
unsigned Opcode, Type *DataTy, const Value *Ptr, bool VariableMask,
Align Alignment, TTI::TargetCostKind CostKind, const Instruction *I) const { … }
InstructionCost TargetTransformInfo::getStridedMemoryOpCost(
unsigned Opcode, Type *DataTy, const Value *Ptr, bool VariableMask,
Align Alignment, TTI::TargetCostKind CostKind, const Instruction *I) const { … }
InstructionCost TargetTransformInfo::getInterleavedMemoryOpCost(
unsigned Opcode, Type *VecTy, unsigned Factor, ArrayRef<unsigned> Indices,
Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind,
bool UseMaskForCond, bool UseMaskForGaps) const { … }
InstructionCost
TargetTransformInfo::getIntrinsicInstrCost(const IntrinsicCostAttributes &ICA,
TTI::TargetCostKind CostKind) const { … }
InstructionCost
TargetTransformInfo::getCallInstrCost(Function *F, Type *RetTy,
ArrayRef<Type *> Tys,
TTI::TargetCostKind CostKind) const { … }
unsigned TargetTransformInfo::getNumberOfParts(Type *Tp) const { … }
InstructionCost
TargetTransformInfo::getAddressComputationCost(Type *Tp, ScalarEvolution *SE,
const SCEV *Ptr) const { … }
InstructionCost TargetTransformInfo::getMemcpyCost(const Instruction *I) const { … }
uint64_t TargetTransformInfo::getMaxMemIntrinsicInlineSizeThreshold() const { … }
InstructionCost TargetTransformInfo::getArithmeticReductionCost(
unsigned Opcode, VectorType *Ty, std::optional<FastMathFlags> FMF,
TTI::TargetCostKind CostKind) const { … }
InstructionCost TargetTransformInfo::getMinMaxReductionCost(
Intrinsic::ID IID, VectorType *Ty, FastMathFlags FMF,
TTI::TargetCostKind CostKind) const { … }
InstructionCost TargetTransformInfo::getExtendedReductionCost(
unsigned Opcode, bool IsUnsigned, Type *ResTy, VectorType *Ty,
FastMathFlags FMF, TTI::TargetCostKind CostKind) const { … }
InstructionCost TargetTransformInfo::getMulAccReductionCost(
bool IsUnsigned, Type *ResTy, VectorType *Ty,
TTI::TargetCostKind CostKind) const { … }
InstructionCost
TargetTransformInfo::getCostOfKeepingLiveOverCall(ArrayRef<Type *> Tys) const { … }
bool TargetTransformInfo::getTgtMemIntrinsic(IntrinsicInst *Inst,
MemIntrinsicInfo &Info) const { … }
unsigned TargetTransformInfo::getAtomicMemIntrinsicMaxElementSize() const { … }
Value *TargetTransformInfo::getOrCreateResultFromMemIntrinsic(
IntrinsicInst *Inst, Type *ExpectedType) const { … }
Type *TargetTransformInfo::getMemcpyLoopLoweringType(
LLVMContext &Context, Value *Length, unsigned SrcAddrSpace,
unsigned DestAddrSpace, Align SrcAlign, Align DestAlign,
std::optional<uint32_t> AtomicElementSize) const { … }
void TargetTransformInfo::getMemcpyLoopResidualLoweringType(
SmallVectorImpl<Type *> &OpsOut, LLVMContext &Context,
unsigned RemainingBytes, unsigned SrcAddrSpace, unsigned DestAddrSpace,
Align SrcAlign, Align DestAlign,
std::optional<uint32_t> AtomicCpySize) const { … }
bool TargetTransformInfo::areInlineCompatible(const Function *Caller,
const Function *Callee) const { … }
unsigned
TargetTransformInfo::getInlineCallPenalty(const Function *F,
const CallBase &Call,
unsigned DefaultCallPenalty) const { … }
bool TargetTransformInfo::areTypesABICompatible(
const Function *Caller, const Function *Callee,
const ArrayRef<Type *> &Types) const { … }
bool TargetTransformInfo::isIndexedLoadLegal(MemIndexedMode Mode,
Type *Ty) const { … }
bool TargetTransformInfo::isIndexedStoreLegal(MemIndexedMode Mode,
Type *Ty) const { … }
unsigned TargetTransformInfo::getLoadStoreVecRegBitWidth(unsigned AS) const { … }
bool TargetTransformInfo::isLegalToVectorizeLoad(LoadInst *LI) const { … }
bool TargetTransformInfo::isLegalToVectorizeStore(StoreInst *SI) const { … }
bool TargetTransformInfo::isLegalToVectorizeLoadChain(
unsigned ChainSizeInBytes, Align Alignment, unsigned AddrSpace) const { … }
bool TargetTransformInfo::isLegalToVectorizeStoreChain(
unsigned ChainSizeInBytes, Align Alignment, unsigned AddrSpace) const { … }
bool TargetTransformInfo::isLegalToVectorizeReduction(
const RecurrenceDescriptor &RdxDesc, ElementCount VF) const { … }
bool TargetTransformInfo::isElementTypeLegalForScalableVector(Type *Ty) const { … }
unsigned TargetTransformInfo::getLoadVectorFactor(unsigned VF,
unsigned LoadSize,
unsigned ChainSizeInBytes,
VectorType *VecTy) const { … }
unsigned TargetTransformInfo::getStoreVectorFactor(unsigned VF,
unsigned StoreSize,
unsigned ChainSizeInBytes,
VectorType *VecTy) const { … }
bool TargetTransformInfo::preferFixedOverScalableIfEqualCost() const { … }
bool TargetTransformInfo::preferInLoopReduction(unsigned Opcode, Type *Ty,
ReductionFlags Flags) const { … }
bool TargetTransformInfo::preferPredicatedReductionSelect(
unsigned Opcode, Type *Ty, ReductionFlags Flags) const { … }
bool TargetTransformInfo::preferEpilogueVectorization() const { … }
TargetTransformInfo::VPLegalization
TargetTransformInfo::getVPLegalizationStrategy(const VPIntrinsic &VPI) const { … }
bool TargetTransformInfo::hasArmWideBranch(bool Thumb) const { … }
unsigned TargetTransformInfo::getMaxNumArgs() const { … }
bool TargetTransformInfo::shouldExpandReduction(const IntrinsicInst *II) const { … }
TargetTransformInfo::ReductionShuffle
TargetTransformInfo::getPreferredExpandedReductionShuffle(
const IntrinsicInst *II) const { … }
unsigned TargetTransformInfo::getGISelRematGlobalCost() const { … }
unsigned TargetTransformInfo::getMinTripCountTailFoldingThreshold() const { … }
bool TargetTransformInfo::supportsScalableVectors() const { … }
bool TargetTransformInfo::enableScalableVectorization() const { … }
bool TargetTransformInfo::hasActiveVectorLength(unsigned Opcode, Type *DataType,
Align Alignment) const { … }
TargetTransformInfo::Concept::~Concept() = default;
TargetIRAnalysis::TargetIRAnalysis() : … { … }
TargetIRAnalysis::TargetIRAnalysis(
std::function<Result(const Function &)> TTICallback)
: … { … }
TargetIRAnalysis::Result TargetIRAnalysis::run(const Function &F,
FunctionAnalysisManager &) { … }
AnalysisKey TargetIRAnalysis::Key;
TargetIRAnalysis::Result TargetIRAnalysis::getDefaultTTI(const Function &F) { … }
INITIALIZE_PASS(…)
char TargetTransformInfoWrapperPass::ID = …;
void TargetTransformInfoWrapperPass::anchor() { … }
TargetTransformInfoWrapperPass::TargetTransformInfoWrapperPass()
: … { … }
TargetTransformInfoWrapperPass::TargetTransformInfoWrapperPass(
TargetIRAnalysis TIRA)
: … { … }
TargetTransformInfo &TargetTransformInfoWrapperPass::getTTI(const Function &F) { … }
ImmutablePass *
llvm::createTargetTransformInfoWrapperPass(TargetIRAnalysis TIRA) { … }