#include "X86InstrInfo.h"
#include "X86.h"
#include "X86InstrBuilder.h"
#include "X86InstrFoldTables.h"
#include "X86MachineFunctionInfo.h"
#include "X86Subtarget.h"
#include "X86TargetMachine.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/Sequence.h"
#include "llvm/CodeGen/LiveIntervals.h"
#include "llvm/CodeGen/LivePhysRegs.h"
#include "llvm/CodeGen/LiveVariables.h"
#include "llvm/CodeGen/MachineCombinerPattern.h"
#include "llvm/CodeGen/MachineConstantPool.h"
#include "llvm/CodeGen/MachineDominators.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/CodeGen/MachineInstr.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/CodeGen/MachineModuleInfo.h"
#include "llvm/CodeGen/MachineOperand.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/StackMaps.h"
#include "llvm/IR/DebugInfoMetadata.h"
#include "llvm/IR/DerivedTypes.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/InstrTypes.h"
#include "llvm/IR/Module.h"
#include "llvm/MC/MCAsmInfo.h"
#include "llvm/MC/MCExpr.h"
#include "llvm/MC/MCInst.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Target/TargetOptions.h"
#include <optional>
usingnamespacellvm;
#define DEBUG_TYPE …
#define GET_INSTRINFO_CTOR_DTOR
#include "X86GenInstrInfo.inc"
static cl::opt<bool>
NoFusing("disable-spill-fusing",
cl::desc("Disable fusing of spill code into instructions"),
cl::Hidden);
static cl::opt<bool>
PrintFailedFusing("print-failed-fuse-candidates",
cl::desc("Print instructions that the allocator wants to"
" fuse, but the X86 backend currently can't"),
cl::Hidden);
static cl::opt<bool>
ReMatPICStubLoad("remat-pic-stub-load",
cl::desc("Re-materialize load from stub in PIC mode"),
cl::init(false), cl::Hidden);
static cl::opt<unsigned>
PartialRegUpdateClearance("partial-reg-update-clearance",
cl::desc("Clearance between two register writes "
"for inserting XOR to avoid partial "
"register update"),
cl::init(64), cl::Hidden);
static cl::opt<unsigned> UndefRegClearance(
"undef-reg-clearance",
cl::desc("How many idle instructions we would like before "
"certain undef register reads"),
cl::init(128), cl::Hidden);
void X86InstrInfo::anchor() { … }
X86InstrInfo::X86InstrInfo(X86Subtarget &STI)
: … { … }
const TargetRegisterClass *
X86InstrInfo::getRegClass(const MCInstrDesc &MCID, unsigned OpNum,
const TargetRegisterInfo *TRI,
const MachineFunction &MF) const { … }
bool X86InstrInfo::isCoalescableExtInstr(const MachineInstr &MI,
Register &SrcReg, Register &DstReg,
unsigned &SubIdx) const { … }
bool X86InstrInfo::isDataInvariant(MachineInstr &MI) { … }
bool X86InstrInfo::isDataInvariantLoad(MachineInstr &MI) { … }
int X86InstrInfo::getSPAdjust(const MachineInstr &MI) const { … }
bool X86InstrInfo::isFrameOperand(const MachineInstr &MI, unsigned int Op,
int &FrameIndex) const { … }
static bool isFrameLoadOpcode(int Opcode, unsigned &MemBytes) { … }
static bool isFrameStoreOpcode(int Opcode, unsigned &MemBytes) { … }
Register X86InstrInfo::isLoadFromStackSlot(const MachineInstr &MI,
int &FrameIndex) const { … }
Register X86InstrInfo::isLoadFromStackSlot(const MachineInstr &MI,
int &FrameIndex,
unsigned &MemBytes) const { … }
Register X86InstrInfo::isLoadFromStackSlotPostFE(const MachineInstr &MI,
int &FrameIndex) const { … }
Register X86InstrInfo::isStoreToStackSlot(const MachineInstr &MI,
int &FrameIndex) const { … }
Register X86InstrInfo::isStoreToStackSlot(const MachineInstr &MI,
int &FrameIndex,
unsigned &MemBytes) const { … }
Register X86InstrInfo::isStoreToStackSlotPostFE(const MachineInstr &MI,
int &FrameIndex) const { … }
static bool regIsPICBase(Register BaseReg, const MachineRegisterInfo &MRI) { … }
bool X86InstrInfo::isReallyTriviallyReMaterializable(
const MachineInstr &MI) const { … }
void X86InstrInfo::reMaterialize(MachineBasicBlock &MBB,
MachineBasicBlock::iterator I,
Register DestReg, unsigned SubIdx,
const MachineInstr &Orig,
const TargetRegisterInfo &TRI) const { … }
bool X86InstrInfo::hasLiveCondCodeDef(MachineInstr &MI) const { … }
inline static unsigned getTruncatedShiftCount(const MachineInstr &MI,
unsigned ShiftAmtOperandIdx) { … }
inline static bool isTruncatedShiftCountForLEA(unsigned ShAmt) { … }
static bool findRedundantFlagInstr(MachineInstr &CmpInstr,
MachineInstr &CmpValDefInstr,
const MachineRegisterInfo *MRI,
MachineInstr **AndInstr,
const TargetRegisterInfo *TRI,
bool &NoSignFlag, bool &ClearsOverflowFlag) { … }
bool X86InstrInfo::classifyLEAReg(MachineInstr &MI, const MachineOperand &Src,
unsigned Opc, bool AllowSP, Register &NewSrc,
bool &isKill, MachineOperand &ImplicitOp,
LiveVariables *LV, LiveIntervals *LIS) const { … }
MachineInstr *X86InstrInfo::convertToThreeAddressWithLEA(unsigned MIOpc,
MachineInstr &MI,
LiveVariables *LV,
LiveIntervals *LIS,
bool Is8BitOp) const { … }
MachineInstr *X86InstrInfo::convertToThreeAddress(MachineInstr &MI,
LiveVariables *LV,
LiveIntervals *LIS) const { … }
static unsigned getThreeSrcCommuteCase(uint64_t TSFlags, unsigned SrcOpIdx1,
unsigned SrcOpIdx2) { … }
unsigned X86InstrInfo::getFMA3OpcodeToCommuteOperands(
const MachineInstr &MI, unsigned SrcOpIdx1, unsigned SrcOpIdx2,
const X86InstrFMA3Group &FMA3Group) const { … }
static void commuteVPTERNLOG(MachineInstr &MI, unsigned SrcOpIdx1,
unsigned SrcOpIdx2) { … }
static bool isCommutableVPERMV3Instruction(unsigned Opcode) { … }
static unsigned getCommutedVPERMV3Opcode(unsigned Opcode) { … }
MachineInstr *X86InstrInfo::commuteInstructionImpl(MachineInstr &MI, bool NewMI,
unsigned OpIdx1,
unsigned OpIdx2) const { … }
bool X86InstrInfo::findThreeSrcCommutedOpIndices(const MachineInstr &MI,
unsigned &SrcOpIdx1,
unsigned &SrcOpIdx2,
bool IsIntrinsic) const { … }
bool X86InstrInfo::findCommutedOpIndices(const MachineInstr &MI,
unsigned &SrcOpIdx1,
unsigned &SrcOpIdx2) const { … }
static bool isConvertibleLEA(MachineInstr *MI) { … }
bool X86InstrInfo::hasCommutePreference(MachineInstr &MI, bool &Commute) const { … }
int X86::getCondSrcNoFromDesc(const MCInstrDesc &MCID) { … }
X86::CondCode X86::getCondFromMI(const MachineInstr &MI) { … }
X86::CondCode X86::getCondFromBranch(const MachineInstr &MI) { … }
X86::CondCode X86::getCondFromSETCC(const MachineInstr &MI) { … }
X86::CondCode X86::getCondFromCMov(const MachineInstr &MI) { … }
X86::CondCode X86::getCondFromCFCMov(const MachineInstr &MI) { … }
X86::CondCode X86::getCondFromCCMP(const MachineInstr &MI) { … }
int X86::getCCMPCondFlagsFromCondCode(X86::CondCode CC) { … }
#define GET_X86_NF_TRANSFORM_TABLE
#define GET_X86_ND2NONND_TABLE
#include "X86GenInstrMapping.inc"
static unsigned getNewOpcFromTable(ArrayRef<X86TableEntry> Table,
unsigned Opc) { … }
unsigned X86::getNFVariant(unsigned Opc) { … }
unsigned X86::getNonNDVariant(unsigned Opc) { … }
X86::CondCode X86::GetOppositeBranchCondition(X86::CondCode CC) { … }
static X86::CondCode getSwappedCondition(X86::CondCode CC) { … }
std::pair<X86::CondCode, bool>
X86::getX86ConditionCode(CmpInst::Predicate Predicate) { … }
unsigned X86::getCMovOpcode(unsigned RegBytes, bool HasMemoryOperand,
bool HasNDD) { … }
unsigned X86::getVPCMPImmForCond(ISD::CondCode CC) { … }
unsigned X86::getSwappedVPCMPImm(unsigned Imm) { … }
unsigned X86::getSwappedVPCOMImm(unsigned Imm) { … }
unsigned X86::getSwappedVCMPImm(unsigned Imm) { … }
unsigned X86::getVectorRegisterWidth(const MCOperandInfo &Info) { … }
static bool isX87Reg(unsigned Reg) { … }
bool X86::isX87Instruction(MachineInstr &MI) { … }
int X86::getFirstAddrOperandIdx(const MachineInstr &MI) { … }
const Constant *X86::getConstantFromPool(const MachineInstr &MI,
unsigned OpNo) { … }
bool X86InstrInfo::isUnconditionalTailCall(const MachineInstr &MI) const { … }
bool X86InstrInfo::canMakeTailCallConditional(
SmallVectorImpl<MachineOperand> &BranchCond,
const MachineInstr &TailCall) const { … }
void X86InstrInfo::replaceBranchWithTailCall(
MachineBasicBlock &MBB, SmallVectorImpl<MachineOperand> &BranchCond,
const MachineInstr &TailCall) const { … }
static MachineBasicBlock *getFallThroughMBB(MachineBasicBlock *MBB,
MachineBasicBlock *TBB) { … }
bool X86InstrInfo::analyzeBranchImpl(
MachineBasicBlock &MBB, MachineBasicBlock *&TBB, MachineBasicBlock *&FBB,
SmallVectorImpl<MachineOperand> &Cond,
SmallVectorImpl<MachineInstr *> &CondBranches, bool AllowModify) const { … }
bool X86InstrInfo::analyzeBranch(MachineBasicBlock &MBB,
MachineBasicBlock *&TBB,
MachineBasicBlock *&FBB,
SmallVectorImpl<MachineOperand> &Cond,
bool AllowModify) const { … }
static int getJumpTableIndexFromAddr(const MachineInstr &MI) { … }
static int getJumpTableIndexFromReg(const MachineRegisterInfo &MRI,
Register Reg) { … }
int X86InstrInfo::getJumpTableIndex(const MachineInstr &MI) const { … }
bool X86InstrInfo::analyzeBranchPredicate(MachineBasicBlock &MBB,
MachineBranchPredicate &MBP,
bool AllowModify) const { … }
unsigned X86InstrInfo::removeBranch(MachineBasicBlock &MBB,
int *BytesRemoved) const { … }
unsigned X86InstrInfo::insertBranch(MachineBasicBlock &MBB,
MachineBasicBlock *TBB,
MachineBasicBlock *FBB,
ArrayRef<MachineOperand> Cond,
const DebugLoc &DL, int *BytesAdded) const { … }
bool X86InstrInfo::canInsertSelect(const MachineBasicBlock &MBB,
ArrayRef<MachineOperand> Cond,
Register DstReg, Register TrueReg,
Register FalseReg, int &CondCycles,
int &TrueCycles, int &FalseCycles) const { … }
void X86InstrInfo::insertSelect(MachineBasicBlock &MBB,
MachineBasicBlock::iterator I,
const DebugLoc &DL, Register DstReg,
ArrayRef<MachineOperand> Cond, Register TrueReg,
Register FalseReg) const { … }
static bool isHReg(unsigned Reg) { … }
static unsigned CopyToFromAsymmetricReg(unsigned DestReg, unsigned SrcReg,
const X86Subtarget &Subtarget) { … }
void X86InstrInfo::copyPhysReg(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MI,
const DebugLoc &DL, MCRegister DestReg,
MCRegister SrcReg, bool KillSrc,
bool RenamableDest, bool RenamableSrc) const { … }
std::optional<DestSourcePair>
X86InstrInfo::isCopyInstrImpl(const MachineInstr &MI) const { … }
static unsigned getLoadStoreOpcodeForFP16(bool Load, const X86Subtarget &STI) { … }
static unsigned getLoadStoreRegOpcode(Register Reg,
const TargetRegisterClass *RC,
bool IsStackAligned,
const X86Subtarget &STI, bool Load) { … }
std::optional<ExtAddrMode>
X86InstrInfo::getAddrModeFromMemoryOp(const MachineInstr &MemI,
const TargetRegisterInfo *TRI) const { … }
bool X86InstrInfo::verifyInstruction(const MachineInstr &MI,
StringRef &ErrInfo) const { … }
bool X86InstrInfo::getConstValDefinedInReg(const MachineInstr &MI,
const Register Reg,
int64_t &ImmVal) const { … }
bool X86InstrInfo::preservesZeroValueInReg(
const MachineInstr *MI, const Register NullValueReg,
const TargetRegisterInfo *TRI) const { … }
bool X86InstrInfo::getMemOperandsWithOffsetWidth(
const MachineInstr &MemOp, SmallVectorImpl<const MachineOperand *> &BaseOps,
int64_t &Offset, bool &OffsetIsScalable, LocationSize &Width,
const TargetRegisterInfo *TRI) const { … }
static unsigned getStoreRegOpcode(Register SrcReg,
const TargetRegisterClass *RC,
bool IsStackAligned,
const X86Subtarget &STI) { … }
static unsigned getLoadRegOpcode(Register DestReg,
const TargetRegisterClass *RC,
bool IsStackAligned, const X86Subtarget &STI) { … }
static bool isAMXOpcode(unsigned Opc) { … }
void X86InstrInfo::loadStoreTileReg(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MI,
unsigned Opc, Register Reg, int FrameIdx,
bool isKill) const { … }
void X86InstrInfo::storeRegToStackSlot(
MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, Register SrcReg,
bool isKill, int FrameIdx, const TargetRegisterClass *RC,
const TargetRegisterInfo *TRI, Register VReg) const { … }
void X86InstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MI,
Register DestReg, int FrameIdx,
const TargetRegisterClass *RC,
const TargetRegisterInfo *TRI,
Register VReg) const { … }
bool X86InstrInfo::analyzeCompare(const MachineInstr &MI, Register &SrcReg,
Register &SrcReg2, int64_t &CmpMask,
int64_t &CmpValue) const { … }
bool X86InstrInfo::isRedundantFlagInstr(const MachineInstr &FlagI,
Register SrcReg, Register SrcReg2,
int64_t ImmMask, int64_t ImmValue,
const MachineInstr &OI, bool *IsSwapped,
int64_t *ImmDelta) const { … }
inline static bool isDefConvertible(const MachineInstr &MI, bool &NoSignFlag,
bool &ClearsOverflowFlag) { … }
static X86::CondCode isUseDefConvertible(const MachineInstr &MI) { … }
bool X86InstrInfo::optimizeCompareInstr(MachineInstr &CmpInstr, Register SrcReg,
Register SrcReg2, int64_t CmpMask,
int64_t CmpValue,
const MachineRegisterInfo *MRI) const { … }
MachineInstr *X86InstrInfo::optimizeLoadInstr(MachineInstr &MI,
const MachineRegisterInfo *MRI,
Register &FoldAsLoadDefReg,
MachineInstr *&DefMI) const { … }
static bool canConvert2Copy(unsigned Opc) { … }
static unsigned convertALUrr2ALUri(unsigned Opc) { … }
bool X86InstrInfo::foldImmediateImpl(MachineInstr &UseMI, MachineInstr *DefMI,
Register Reg, int64_t ImmVal,
MachineRegisterInfo *MRI,
bool MakeChange) const { … }
bool X86InstrInfo::foldImmediate(MachineInstr &UseMI, MachineInstr &DefMI,
Register Reg, MachineRegisterInfo *MRI) const { … }
static bool Expand2AddrUndef(MachineInstrBuilder &MIB,
const MCInstrDesc &Desc) { … }
static bool Expand2AddrKreg(MachineInstrBuilder &MIB, const MCInstrDesc &Desc,
Register Reg) { … }
static bool expandMOV32r1(MachineInstrBuilder &MIB, const TargetInstrInfo &TII,
bool MinusOne) { … }
static bool ExpandMOVImmSExti8(MachineInstrBuilder &MIB,
const TargetInstrInfo &TII,
const X86Subtarget &Subtarget) { … }
static void expandLoadStackGuard(MachineInstrBuilder &MIB,
const TargetInstrInfo &TII) { … }
static bool expandXorFP(MachineInstrBuilder &MIB, const TargetInstrInfo &TII) { … }
static bool expandNOVLXLoad(MachineInstrBuilder &MIB,
const TargetRegisterInfo *TRI,
const MCInstrDesc &LoadDesc,
const MCInstrDesc &BroadcastDesc, unsigned SubIdx) { … }
static bool expandNOVLXStore(MachineInstrBuilder &MIB,
const TargetRegisterInfo *TRI,
const MCInstrDesc &StoreDesc,
const MCInstrDesc &ExtractDesc, unsigned SubIdx) { … }
static bool expandSHXDROT(MachineInstrBuilder &MIB, const MCInstrDesc &Desc) { … }
bool X86InstrInfo::expandPostRAPseudo(MachineInstr &MI) const { … }
static bool hasPartialRegUpdate(unsigned Opcode, const X86Subtarget &Subtarget,
bool ForLoadFold = false) { … }
unsigned X86InstrInfo::getPartialRegUpdateClearance(
const MachineInstr &MI, unsigned OpNum,
const TargetRegisterInfo *TRI) const { … }
static bool hasUndefRegUpdate(unsigned Opcode, unsigned OpNum,
bool ForLoadFold = false) { … }
unsigned
X86InstrInfo::getUndefRegClearance(const MachineInstr &MI, unsigned OpNum,
const TargetRegisterInfo *TRI) const { … }
void X86InstrInfo::breakPartialRegDependency(
MachineInstr &MI, unsigned OpNum, const TargetRegisterInfo *TRI) const { … }
static void addOperands(MachineInstrBuilder &MIB, ArrayRef<MachineOperand> MOs,
int PtrOffset = 0) { … }
static void updateOperandRegConstraints(MachineFunction &MF,
MachineInstr &NewMI,
const TargetInstrInfo &TII) { … }
static MachineInstr *fuseTwoAddrInst(MachineFunction &MF, unsigned Opcode,
ArrayRef<MachineOperand> MOs,
MachineBasicBlock::iterator InsertPt,
MachineInstr &MI,
const TargetInstrInfo &TII) { … }
static MachineInstr *fuseInst(MachineFunction &MF, unsigned Opcode,
unsigned OpNo, ArrayRef<MachineOperand> MOs,
MachineBasicBlock::iterator InsertPt,
MachineInstr &MI, const TargetInstrInfo &TII,
int PtrOffset = 0) { … }
static MachineInstr *makeM0Inst(const TargetInstrInfo &TII, unsigned Opcode,
ArrayRef<MachineOperand> MOs,
MachineBasicBlock::iterator InsertPt,
MachineInstr &MI) { … }
MachineInstr *X86InstrInfo::foldMemoryOperandCustom(
MachineFunction &MF, MachineInstr &MI, unsigned OpNum,
ArrayRef<MachineOperand> MOs, MachineBasicBlock::iterator InsertPt,
unsigned Size, Align Alignment) const { … }
static bool shouldPreventUndefRegUpdateMemFold(MachineFunction &MF,
MachineInstr &MI) { … }
unsigned X86InstrInfo::commuteOperandsForFold(MachineInstr &MI,
unsigned Idx1) const { … }
static void printFailMsgforFold(const MachineInstr &MI, unsigned Idx) { … }
MachineInstr *X86InstrInfo::foldMemoryOperandImpl(
MachineFunction &MF, MachineInstr &MI, unsigned OpNum,
ArrayRef<MachineOperand> MOs, MachineBasicBlock::iterator InsertPt,
unsigned Size, Align Alignment, bool AllowCommute) const { … }
MachineInstr *X86InstrInfo::foldMemoryOperandImpl(
MachineFunction &MF, MachineInstr &MI, ArrayRef<unsigned> Ops,
MachineBasicBlock::iterator InsertPt, int FrameIndex, LiveIntervals *LIS,
VirtRegMap *VRM) const { … }
static bool isNonFoldablePartialRegisterLoad(const MachineInstr &LoadMI,
const MachineInstr &UserMI,
const MachineFunction &MF) { … }
MachineInstr *X86InstrInfo::foldMemoryOperandImpl(
MachineFunction &MF, MachineInstr &MI, ArrayRef<unsigned> Ops,
MachineBasicBlock::iterator InsertPt, MachineInstr &LoadMI,
LiveIntervals *LIS) const { … }
MachineInstr *
X86InstrInfo::foldMemoryBroadcast(MachineFunction &MF, MachineInstr &MI,
unsigned OpNum, ArrayRef<MachineOperand> MOs,
MachineBasicBlock::iterator InsertPt,
unsigned BitsSize, bool AllowCommute) const { … }
static SmallVector<MachineMemOperand *, 2>
extractLoadMMOs(ArrayRef<MachineMemOperand *> MMOs, MachineFunction &MF) { … }
static SmallVector<MachineMemOperand *, 2>
extractStoreMMOs(ArrayRef<MachineMemOperand *> MMOs, MachineFunction &MF) { … }
static unsigned getBroadcastOpcode(const X86FoldTableEntry *I,
const TargetRegisterClass *RC,
const X86Subtarget &STI) { … }
bool X86InstrInfo::unfoldMemoryOperand(
MachineFunction &MF, MachineInstr &MI, unsigned Reg, bool UnfoldLoad,
bool UnfoldStore, SmallVectorImpl<MachineInstr *> &NewMIs) const { … }
bool X86InstrInfo::unfoldMemoryOperand(
SelectionDAG &DAG, SDNode *N, SmallVectorImpl<SDNode *> &NewNodes) const { … }
unsigned
X86InstrInfo::getOpcodeAfterMemoryUnfold(unsigned Opc, bool UnfoldLoad,
bool UnfoldStore,
unsigned *LoadRegIndex) const { … }
bool X86InstrInfo::areLoadsFromSameBasePtr(SDNode *Load1, SDNode *Load2,
int64_t &Offset1,
int64_t &Offset2) const { … }
bool X86InstrInfo::shouldScheduleLoadsNear(SDNode *Load1, SDNode *Load2,
int64_t Offset1, int64_t Offset2,
unsigned NumLoads) const { … }
bool X86InstrInfo::isSchedulingBoundary(const MachineInstr &MI,
const MachineBasicBlock *MBB,
const MachineFunction &MF) const { … }
bool X86InstrInfo::reverseBranchCondition(
SmallVectorImpl<MachineOperand> &Cond) const { … }
bool X86InstrInfo::isSafeToMoveRegClassDefs(
const TargetRegisterClass *RC) const { … }
unsigned X86InstrInfo::getGlobalBaseReg(MachineFunction *MF) const { … }
static const uint16_t *lookup(unsigned opcode, unsigned domain,
ArrayRef<uint16_t[3]> Table) { … }
static const uint16_t *lookupAVX512(unsigned opcode, unsigned domain,
ArrayRef<uint16_t[4]> Table) { … }
static bool AdjustBlendMask(unsigned OldMask, unsigned OldWidth,
unsigned NewWidth, unsigned *pNewMask = nullptr) { … }
uint16_t X86InstrInfo::getExecutionDomainCustom(const MachineInstr &MI) const { … }
#include "X86ReplaceableInstrs.def"
bool X86InstrInfo::setExecutionDomainCustom(MachineInstr &MI,
unsigned Domain) const { … }
std::pair<uint16_t, uint16_t>
X86InstrInfo::getExecutionDomain(const MachineInstr &MI) const { … }
void X86InstrInfo::setExecutionDomain(MachineInstr &MI, unsigned Domain) const { … }
void X86InstrInfo::insertNoop(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MI) const { … }
MCInst X86InstrInfo::getNop() const { … }
bool X86InstrInfo::isHighLatencyDef(int opc) const { … }
bool X86InstrInfo::hasHighOperandLatency(const TargetSchedModel &SchedModel,
const MachineRegisterInfo *MRI,
const MachineInstr &DefMI,
unsigned DefIdx,
const MachineInstr &UseMI,
unsigned UseIdx) const { … }
bool X86InstrInfo::hasReassociableOperands(const MachineInstr &Inst,
const MachineBasicBlock *MBB) const { … }
bool X86InstrInfo::isAssociativeAndCommutative(const MachineInstr &Inst,
bool Invert) const { … }
static std::optional<ParamLoadedValue>
describeMOVrrLoadedValue(const MachineInstr &MI, Register DescribedReg,
const TargetRegisterInfo *TRI) { … }
std::optional<ParamLoadedValue>
X86InstrInfo::describeLoadedValue(const MachineInstr &MI, Register Reg) const { … }
void X86InstrInfo::setSpecialOperandAttr(MachineInstr &OldMI1,
MachineInstr &OldMI2,
MachineInstr &NewMI1,
MachineInstr &NewMI2) const { … }
std::pair<unsigned, unsigned>
X86InstrInfo::decomposeMachineOperandsTargetFlags(unsigned TF) const { … }
ArrayRef<std::pair<unsigned, const char *>>
X86InstrInfo::getSerializableDirectMachineOperandTargetFlags() const { … }
namespace {
struct CGBR : public MachineFunctionPass { … };
}
char CGBR::ID = …;
FunctionPass *llvm::createX86GlobalBaseRegPass() { … }
namespace {
struct LDTLSCleanup : public MachineFunctionPass { … };
}
char LDTLSCleanup::ID = …;
FunctionPass *llvm::createCleanupLocalDynamicTLSPass() { … }
enum MachineOutlinerClass { … };
std::optional<std::unique_ptr<outliner::OutlinedFunction>>
X86InstrInfo::getOutliningCandidateInfo(
const MachineModuleInfo &MMI,
std::vector<outliner::Candidate> &RepeatedSequenceLocs,
unsigned MinRepeats) const { … }
bool X86InstrInfo::isFunctionSafeToOutlineFrom(
MachineFunction &MF, bool OutlineFromLinkOnceODRs) const { … }
outliner::InstrType
X86InstrInfo::getOutliningTypeImpl(const MachineModuleInfo &MMI,
MachineBasicBlock::iterator &MIT,
unsigned Flags) const { … }
void X86InstrInfo::buildOutlinedFrame(
MachineBasicBlock &MBB, MachineFunction &MF,
const outliner::OutlinedFunction &OF) const { … }
MachineBasicBlock::iterator X86InstrInfo::insertOutlinedCall(
Module &M, MachineBasicBlock &MBB, MachineBasicBlock::iterator &It,
MachineFunction &MF, outliner::Candidate &C) const { … }
void X86InstrInfo::buildClearRegister(Register Reg, MachineBasicBlock &MBB,
MachineBasicBlock::iterator Iter,
DebugLoc &DL,
bool AllowSideEffects) const { … }
bool X86InstrInfo::getMachineCombinerPatterns(
MachineInstr &Root, SmallVectorImpl<unsigned> &Patterns,
bool DoRegPressureReduce) const { … }
static void
genAlternativeDpCodeSequence(MachineInstr &Root, const TargetInstrInfo &TII,
SmallVectorImpl<MachineInstr *> &InsInstrs,
SmallVectorImpl<MachineInstr *> &DelInstrs,
DenseMap<unsigned, unsigned> &InstrIdxForVirtReg) { … }
void X86InstrInfo::genAlternativeCodeSequence(
MachineInstr &Root, unsigned Pattern,
SmallVectorImpl<MachineInstr *> &InsInstrs,
SmallVectorImpl<MachineInstr *> &DelInstrs,
DenseMap<unsigned, unsigned> &InstrIdxForVirtReg) const { … }
void X86InstrInfo::getFrameIndexOperands(SmallVectorImpl<MachineOperand> &Ops,
int FI) const { … }
#define GET_INSTRINFO_HELPERS
#include "X86GenInstrInfo.inc"