#include "X86TargetTransformInfo.h"
#include "llvm/Analysis/TargetTransformInfo.h"
#include "llvm/CodeGen/BasicTTIImpl.h"
#include "llvm/CodeGen/CostTable.h"
#include "llvm/CodeGen/TargetLowering.h"
#include "llvm/IR/InstIterator.h"
#include "llvm/IR/IntrinsicInst.h"
#include "llvm/Support/Debug.h"
#include <optional>
usingnamespacellvm;
#define DEBUG_TYPE …
struct CostKindCosts { … };
CostKindTblEntry;
TypeConversionCostKindTblEntry;
TargetTransformInfo::PopcntSupportKind
X86TTIImpl::getPopcntSupport(unsigned TyWidth) { … }
std::optional<unsigned> X86TTIImpl::getCacheSize(
TargetTransformInfo::CacheLevel Level) const { … }
std::optional<unsigned> X86TTIImpl::getCacheAssociativity(
TargetTransformInfo::CacheLevel Level) const { … }
unsigned X86TTIImpl::getNumberOfRegisters(unsigned ClassID) const { … }
bool X86TTIImpl::hasConditionalLoadStoreForType(Type *Ty) const { … }
TypeSize
X86TTIImpl::getRegisterBitWidth(TargetTransformInfo::RegisterKind K) const { … }
unsigned X86TTIImpl::getLoadStoreVecRegBitWidth(unsigned) const { … }
unsigned X86TTIImpl::getMaxInterleaveFactor(ElementCount VF) { … }
InstructionCost X86TTIImpl::getArithmeticInstrCost(
unsigned Opcode, Type *Ty, TTI::TargetCostKind CostKind,
TTI::OperandValueInfo Op1Info, TTI::OperandValueInfo Op2Info,
ArrayRef<const Value *> Args,
const Instruction *CxtI) { … }
InstructionCost
X86TTIImpl::getAltInstrCost(VectorType *VecTy, unsigned Opcode0,
unsigned Opcode1, const SmallBitVector &OpcodeMask,
TTI::TargetCostKind CostKind) const { … }
InstructionCost X86TTIImpl::getShuffleCost(
TTI::ShuffleKind Kind, VectorType *BaseTp, ArrayRef<int> Mask,
TTI::TargetCostKind CostKind, int Index, VectorType *SubTp,
ArrayRef<const Value *> Args, const Instruction *CxtI) { … }
InstructionCost X86TTIImpl::getCastInstrCost(unsigned Opcode, Type *Dst,
Type *Src,
TTI::CastContextHint CCH,
TTI::TargetCostKind CostKind,
const Instruction *I) { … }
InstructionCost X86TTIImpl::getCmpSelInstrCost(
unsigned Opcode, Type *ValTy, Type *CondTy, CmpInst::Predicate VecPred,
TTI::TargetCostKind CostKind, TTI::OperandValueInfo Op1Info,
TTI::OperandValueInfo Op2Info, const Instruction *I) { … }
unsigned X86TTIImpl::getAtomicMemIntrinsicMaxElementSize() const { … }
InstructionCost
X86TTIImpl::getIntrinsicInstrCost(const IntrinsicCostAttributes &ICA,
TTI::TargetCostKind CostKind) { … }
InstructionCost X86TTIImpl::getVectorInstrCost(unsigned Opcode, Type *Val,
TTI::TargetCostKind CostKind,
unsigned Index, Value *Op0,
Value *Op1) { … }
InstructionCost
X86TTIImpl::getScalarizationOverhead(VectorType *Ty, const APInt &DemandedElts,
bool Insert, bool Extract,
TTI::TargetCostKind CostKind) { … }
InstructionCost
X86TTIImpl::getReplicationShuffleCost(Type *EltTy, int ReplicationFactor,
int VF, const APInt &DemandedDstElts,
TTI::TargetCostKind CostKind) { … }
InstructionCost X86TTIImpl::getMemoryOpCost(unsigned Opcode, Type *Src,
MaybeAlign Alignment,
unsigned AddressSpace,
TTI::TargetCostKind CostKind,
TTI::OperandValueInfo OpInfo,
const Instruction *I) { … }
InstructionCost
X86TTIImpl::getMaskedMemoryOpCost(unsigned Opcode, Type *SrcTy, Align Alignment,
unsigned AddressSpace,
TTI::TargetCostKind CostKind) { … }
InstructionCost
X86TTIImpl::getPointersChainCost(ArrayRef<const Value *> Ptrs,
const Value *Base,
const TTI::PointersChainInfo &Info,
Type *AccessTy, TTI::TargetCostKind CostKind) { … }
InstructionCost X86TTIImpl::getAddressComputationCost(Type *Ty,
ScalarEvolution *SE,
const SCEV *Ptr) { … }
InstructionCost
X86TTIImpl::getArithmeticReductionCost(unsigned Opcode, VectorType *ValTy,
std::optional<FastMathFlags> FMF,
TTI::TargetCostKind CostKind) { … }
InstructionCost X86TTIImpl::getMinMaxCost(Intrinsic::ID IID, Type *Ty,
TTI::TargetCostKind CostKind,
FastMathFlags FMF) { … }
InstructionCost
X86TTIImpl::getMinMaxReductionCost(Intrinsic::ID IID, VectorType *ValTy,
FastMathFlags FMF,
TTI::TargetCostKind CostKind) { … }
InstructionCost X86TTIImpl::getIntImmCost(int64_t Val) { … }
InstructionCost X86TTIImpl::getIntImmCost(const APInt &Imm, Type *Ty,
TTI::TargetCostKind CostKind) { … }
InstructionCost X86TTIImpl::getIntImmCostInst(unsigned Opcode, unsigned Idx,
const APInt &Imm, Type *Ty,
TTI::TargetCostKind CostKind,
Instruction *Inst) { … }
InstructionCost X86TTIImpl::getIntImmCostIntrin(Intrinsic::ID IID, unsigned Idx,
const APInt &Imm, Type *Ty,
TTI::TargetCostKind CostKind) { … }
InstructionCost X86TTIImpl::getCFInstrCost(unsigned Opcode,
TTI::TargetCostKind CostKind,
const Instruction *I) { … }
int X86TTIImpl::getGatherOverhead() const { … }
int X86TTIImpl::getScatterOverhead() const { … }
InstructionCost X86TTIImpl::getGSVectorCost(unsigned Opcode,
TTI::TargetCostKind CostKind,
Type *SrcVTy, const Value *Ptr,
Align Alignment,
unsigned AddressSpace) { … }
InstructionCost X86TTIImpl::getGatherScatterOpCost(
unsigned Opcode, Type *SrcVTy, const Value *Ptr, bool VariableMask,
Align Alignment, TTI::TargetCostKind CostKind,
const Instruction *I = nullptr) { … }
bool X86TTIImpl::isLSRCostLess(const TargetTransformInfo::LSRCost &C1,
const TargetTransformInfo::LSRCost &C2) { … }
bool X86TTIImpl::canMacroFuseCmp() { … }
bool X86TTIImpl::isLegalMaskedLoad(Type *DataTy, Align Alignment) { … }
bool X86TTIImpl::isLegalMaskedStore(Type *DataType, Align Alignment) { … }
bool X86TTIImpl::isLegalNTLoad(Type *DataType, Align Alignment) { … }
bool X86TTIImpl::isLegalNTStore(Type *DataType, Align Alignment) { … }
bool X86TTIImpl::isLegalBroadcastLoad(Type *ElementTy,
ElementCount NumElements) const { … }
bool X86TTIImpl::isLegalMaskedExpandLoad(Type *DataTy, Align Alignment) { … }
bool X86TTIImpl::isLegalMaskedCompressStore(Type *DataTy, Align Alignment) { … }
bool X86TTIImpl::supportsGather() const { … }
bool X86TTIImpl::forceScalarizeMaskedGather(VectorType *VTy, Align Alignment) { … }
bool X86TTIImpl::isLegalMaskedGatherScatter(Type *DataTy, Align Alignment) { … }
bool X86TTIImpl::isLegalMaskedGather(Type *DataTy, Align Alignment) { … }
bool X86TTIImpl::isLegalAltInstr(VectorType *VecTy, unsigned Opcode0,
unsigned Opcode1,
const SmallBitVector &OpcodeMask) const { … }
bool X86TTIImpl::isLegalMaskedScatter(Type *DataType, Align Alignment) { … }
bool X86TTIImpl::hasDivRemOp(Type *DataType, bool IsSigned) { … }
bool X86TTIImpl::isExpensiveToSpeculativelyExecute(const Instruction* I) { … }
bool X86TTIImpl::isFCmpOrdCheaperThanFCmpZero(Type *Ty) { … }
bool X86TTIImpl::areInlineCompatible(const Function *Caller,
const Function *Callee) const { … }
bool X86TTIImpl::areTypesABICompatible(const Function *Caller,
const Function *Callee,
const ArrayRef<Type *> &Types) const { … }
X86TTIImpl::TTI::MemCmpExpansionOptions
X86TTIImpl::enableMemCmpExpansion(bool OptSize, bool IsZeroCmp) const { … }
bool X86TTIImpl::prefersVectorizedAddressing() const { … }
bool X86TTIImpl::supportsEfficientVectorElementLoadStore() const { … }
bool X86TTIImpl::enableInterleavedAccessVectorization() { … }
InstructionCost X86TTIImpl::getInterleavedMemoryOpCostAVX512(
unsigned Opcode, FixedVectorType *VecTy, unsigned Factor,
ArrayRef<unsigned> Indices, Align Alignment, unsigned AddressSpace,
TTI::TargetCostKind CostKind, bool UseMaskForCond, bool UseMaskForGaps) { … }
InstructionCost X86TTIImpl::getInterleavedMemoryOpCost(
unsigned Opcode, Type *BaseTy, unsigned Factor, ArrayRef<unsigned> Indices,
Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind,
bool UseMaskForCond, bool UseMaskForGaps) { … }
InstructionCost X86TTIImpl::getScalingFactorCost(Type *Ty, GlobalValue *BaseGV,
StackOffset BaseOffset,
bool HasBaseReg, int64_t Scale,
unsigned AddrSpace) const { … }
InstructionCost X86TTIImpl::getBranchMispredictPenalty() const { … }