llvm/llvm/lib/Target/X86/X86InstrInfo.cpp

//===-- X86InstrInfo.cpp - X86 Instruction Information --------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file contains the X86 implementation of the TargetInstrInfo class.
//
//===----------------------------------------------------------------------===//

#include "X86InstrInfo.h"
#include "X86.h"
#include "X86InstrBuilder.h"
#include "X86InstrFoldTables.h"
#include "X86MachineFunctionInfo.h"
#include "X86Subtarget.h"
#include "X86TargetMachine.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/Sequence.h"
#include "llvm/CodeGen/LiveIntervals.h"
#include "llvm/CodeGen/LivePhysRegs.h"
#include "llvm/CodeGen/LiveVariables.h"
#include "llvm/CodeGen/MachineCombinerPattern.h"
#include "llvm/CodeGen/MachineConstantPool.h"
#include "llvm/CodeGen/MachineDominators.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/CodeGen/MachineInstr.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/CodeGen/MachineModuleInfo.h"
#include "llvm/CodeGen/MachineOperand.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/StackMaps.h"
#include "llvm/IR/DebugInfoMetadata.h"
#include "llvm/IR/DerivedTypes.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/InstrTypes.h"
#include "llvm/IR/Module.h"
#include "llvm/MC/MCAsmInfo.h"
#include "llvm/MC/MCExpr.h"
#include "llvm/MC/MCInst.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Target/TargetOptions.h"
#include <optional>

usingnamespacellvm;

#define DEBUG_TYPE

#define GET_INSTRINFO_CTOR_DTOR
#include "X86GenInstrInfo.inc"

static cl::opt<bool>
    NoFusing("disable-spill-fusing",
             cl::desc("Disable fusing of spill code into instructions"),
             cl::Hidden);
static cl::opt<bool>
    PrintFailedFusing("print-failed-fuse-candidates",
                      cl::desc("Print instructions that the allocator wants to"
                               " fuse, but the X86 backend currently can't"),
                      cl::Hidden);
static cl::opt<bool>
    ReMatPICStubLoad("remat-pic-stub-load",
                     cl::desc("Re-materialize load from stub in PIC mode"),
                     cl::init(false), cl::Hidden);
static cl::opt<unsigned>
    PartialRegUpdateClearance("partial-reg-update-clearance",
                              cl::desc("Clearance between two register writes "
                                       "for inserting XOR to avoid partial "
                                       "register update"),
                              cl::init(64), cl::Hidden);
static cl::opt<unsigned> UndefRegClearance(
    "undef-reg-clearance",
    cl::desc("How many idle instructions we would like before "
             "certain undef register reads"),
    cl::init(128), cl::Hidden);

// Pin the vtable to this file.
void X86InstrInfo::anchor() {}

X86InstrInfo::X86InstrInfo(X86Subtarget &STI)
    :{}

const TargetRegisterClass *
X86InstrInfo::getRegClass(const MCInstrDesc &MCID, unsigned OpNum,
                          const TargetRegisterInfo *TRI,
                          const MachineFunction &MF) const {}

bool X86InstrInfo::isCoalescableExtInstr(const MachineInstr &MI,
                                         Register &SrcReg, Register &DstReg,
                                         unsigned &SubIdx) const {}

bool X86InstrInfo::isDataInvariant(MachineInstr &MI) {}

bool X86InstrInfo::isDataInvariantLoad(MachineInstr &MI) {}

int X86InstrInfo::getSPAdjust(const MachineInstr &MI) const {}

/// Return true and the FrameIndex if the specified
/// operand and follow operands form a reference to the stack frame.
bool X86InstrInfo::isFrameOperand(const MachineInstr &MI, unsigned int Op,
                                  int &FrameIndex) const {}

static bool isFrameLoadOpcode(int Opcode, unsigned &MemBytes) {}

static bool isFrameStoreOpcode(int Opcode, unsigned &MemBytes) {}

Register X86InstrInfo::isLoadFromStackSlot(const MachineInstr &MI,
                                           int &FrameIndex) const {}

Register X86InstrInfo::isLoadFromStackSlot(const MachineInstr &MI,
                                           int &FrameIndex,
                                           unsigned &MemBytes) const {}

Register X86InstrInfo::isLoadFromStackSlotPostFE(const MachineInstr &MI,
                                                 int &FrameIndex) const {}

Register X86InstrInfo::isStoreToStackSlot(const MachineInstr &MI,
                                          int &FrameIndex) const {}

Register X86InstrInfo::isStoreToStackSlot(const MachineInstr &MI,
                                          int &FrameIndex,
                                          unsigned &MemBytes) const {}

Register X86InstrInfo::isStoreToStackSlotPostFE(const MachineInstr &MI,
                                                int &FrameIndex) const {}

/// Return true if register is PIC base; i.e.g defined by X86::MOVPC32r.
static bool regIsPICBase(Register BaseReg, const MachineRegisterInfo &MRI) {}

bool X86InstrInfo::isReallyTriviallyReMaterializable(
    const MachineInstr &MI) const {}

void X86InstrInfo::reMaterialize(MachineBasicBlock &MBB,
                                 MachineBasicBlock::iterator I,
                                 Register DestReg, unsigned SubIdx,
                                 const MachineInstr &Orig,
                                 const TargetRegisterInfo &TRI) const {}

/// True if MI has a condition code def, e.g. EFLAGS, that is not marked dead.
bool X86InstrInfo::hasLiveCondCodeDef(MachineInstr &MI) const {}

/// Check whether the shift count for a machine operand is non-zero.
inline static unsigned getTruncatedShiftCount(const MachineInstr &MI,
                                              unsigned ShiftAmtOperandIdx) {}

/// Check whether the given shift count is appropriate
/// can be represented by a LEA instruction.
inline static bool isTruncatedShiftCountForLEA(unsigned ShAmt) {}

static bool findRedundantFlagInstr(MachineInstr &CmpInstr,
                                   MachineInstr &CmpValDefInstr,
                                   const MachineRegisterInfo *MRI,
                                   MachineInstr **AndInstr,
                                   const TargetRegisterInfo *TRI,
                                   bool &NoSignFlag, bool &ClearsOverflowFlag) {}

bool X86InstrInfo::classifyLEAReg(MachineInstr &MI, const MachineOperand &Src,
                                  unsigned Opc, bool AllowSP, Register &NewSrc,
                                  bool &isKill, MachineOperand &ImplicitOp,
                                  LiveVariables *LV, LiveIntervals *LIS) const {}

MachineInstr *X86InstrInfo::convertToThreeAddressWithLEA(unsigned MIOpc,
                                                         MachineInstr &MI,
                                                         LiveVariables *LV,
                                                         LiveIntervals *LIS,
                                                         bool Is8BitOp) const {}

/// This method must be implemented by targets that
/// set the M_CONVERTIBLE_TO_3_ADDR flag.  When this flag is set, the target
/// may be able to convert a two-address instruction into a true
/// three-address instruction on demand.  This allows the X86 target (for
/// example) to convert ADD and SHL instructions into LEA instructions if they
/// would require register copies due to two-addressness.
///
/// This method returns a null pointer if the transformation cannot be
/// performed, otherwise it returns the new instruction.
///
MachineInstr *X86InstrInfo::convertToThreeAddress(MachineInstr &MI,
                                                  LiveVariables *LV,
                                                  LiveIntervals *LIS) const {}

/// This determines which of three possible cases of a three source commute
/// the source indexes correspond to taking into account any mask operands.
/// All prevents commuting a passthru operand. Returns -1 if the commute isn't
/// possible.
/// Case 0 - Possible to commute the first and second operands.
/// Case 1 - Possible to commute the first and third operands.
/// Case 2 - Possible to commute the second and third operands.
static unsigned getThreeSrcCommuteCase(uint64_t TSFlags, unsigned SrcOpIdx1,
                                       unsigned SrcOpIdx2) {}

unsigned X86InstrInfo::getFMA3OpcodeToCommuteOperands(
    const MachineInstr &MI, unsigned SrcOpIdx1, unsigned SrcOpIdx2,
    const X86InstrFMA3Group &FMA3Group) const {}

static void commuteVPTERNLOG(MachineInstr &MI, unsigned SrcOpIdx1,
                             unsigned SrcOpIdx2) {}

// Returns true if this is a VPERMI2 or VPERMT2 instruction that can be
// commuted.
static bool isCommutableVPERMV3Instruction(unsigned Opcode) {}

// Returns commuted opcode for VPERMI2 and VPERMT2 instructions by switching
// from the I opcode to the T opcode and vice versa.
static unsigned getCommutedVPERMV3Opcode(unsigned Opcode) {}

MachineInstr *X86InstrInfo::commuteInstructionImpl(MachineInstr &MI, bool NewMI,
                                                   unsigned OpIdx1,
                                                   unsigned OpIdx2) const {}

bool X86InstrInfo::findThreeSrcCommutedOpIndices(const MachineInstr &MI,
                                                 unsigned &SrcOpIdx1,
                                                 unsigned &SrcOpIdx2,
                                                 bool IsIntrinsic) const {}

bool X86InstrInfo::findCommutedOpIndices(const MachineInstr &MI,
                                         unsigned &SrcOpIdx1,
                                         unsigned &SrcOpIdx2) const {}

static bool isConvertibleLEA(MachineInstr *MI) {}

bool X86InstrInfo::hasCommutePreference(MachineInstr &MI, bool &Commute) const {}

int X86::getCondSrcNoFromDesc(const MCInstrDesc &MCID) {}

X86::CondCode X86::getCondFromMI(const MachineInstr &MI) {}

X86::CondCode X86::getCondFromBranch(const MachineInstr &MI) {}

X86::CondCode X86::getCondFromSETCC(const MachineInstr &MI) {}

X86::CondCode X86::getCondFromCMov(const MachineInstr &MI) {}

X86::CondCode X86::getCondFromCFCMov(const MachineInstr &MI) {}

X86::CondCode X86::getCondFromCCMP(const MachineInstr &MI) {}

int X86::getCCMPCondFlagsFromCondCode(X86::CondCode CC) {}

#define GET_X86_NF_TRANSFORM_TABLE
#define GET_X86_ND2NONND_TABLE
#include "X86GenInstrMapping.inc"

static unsigned getNewOpcFromTable(ArrayRef<X86TableEntry> Table,
                                   unsigned Opc) {}
unsigned X86::getNFVariant(unsigned Opc) {}

unsigned X86::getNonNDVariant(unsigned Opc) {}

/// Return the inverse of the specified condition,
/// e.g. turning COND_E to COND_NE.
X86::CondCode X86::GetOppositeBranchCondition(X86::CondCode CC) {}

/// Assuming the flags are set by MI(a,b), return the condition code if we
/// modify the instructions such that flags are set by MI(b,a).
static X86::CondCode getSwappedCondition(X86::CondCode CC) {}

std::pair<X86::CondCode, bool>
X86::getX86ConditionCode(CmpInst::Predicate Predicate) {}

/// Return a cmov opcode for the given register size in bytes, and operand type.
unsigned X86::getCMovOpcode(unsigned RegBytes, bool HasMemoryOperand,
                            bool HasNDD) {}

/// Get the VPCMP immediate for the given condition.
unsigned X86::getVPCMPImmForCond(ISD::CondCode CC) {}

/// Get the VPCMP immediate if the operands are swapped.
unsigned X86::getSwappedVPCMPImm(unsigned Imm) {}

/// Get the VPCOM immediate if the operands are swapped.
unsigned X86::getSwappedVPCOMImm(unsigned Imm) {}

/// Get the VCMP immediate if the operands are swapped.
unsigned X86::getSwappedVCMPImm(unsigned Imm) {}

unsigned X86::getVectorRegisterWidth(const MCOperandInfo &Info) {}

/// Return true if the Reg is X87 register.
static bool isX87Reg(unsigned Reg) {}

/// check if the instruction is X87 instruction
bool X86::isX87Instruction(MachineInstr &MI) {}

int X86::getFirstAddrOperandIdx(const MachineInstr &MI) {}

const Constant *X86::getConstantFromPool(const MachineInstr &MI,
                                         unsigned OpNo) {}

bool X86InstrInfo::isUnconditionalTailCall(const MachineInstr &MI) const {}

bool X86InstrInfo::canMakeTailCallConditional(
    SmallVectorImpl<MachineOperand> &BranchCond,
    const MachineInstr &TailCall) const {}

void X86InstrInfo::replaceBranchWithTailCall(
    MachineBasicBlock &MBB, SmallVectorImpl<MachineOperand> &BranchCond,
    const MachineInstr &TailCall) const {}

// Given a MBB and its TBB, find the FBB which was a fallthrough MBB (it may
// not be a fallthrough MBB now due to layout changes). Return nullptr if the
// fallthrough MBB cannot be identified.
static MachineBasicBlock *getFallThroughMBB(MachineBasicBlock *MBB,
                                            MachineBasicBlock *TBB) {}

bool X86InstrInfo::analyzeBranchImpl(
    MachineBasicBlock &MBB, MachineBasicBlock *&TBB, MachineBasicBlock *&FBB,
    SmallVectorImpl<MachineOperand> &Cond,
    SmallVectorImpl<MachineInstr *> &CondBranches, bool AllowModify) const {}

bool X86InstrInfo::analyzeBranch(MachineBasicBlock &MBB,
                                 MachineBasicBlock *&TBB,
                                 MachineBasicBlock *&FBB,
                                 SmallVectorImpl<MachineOperand> &Cond,
                                 bool AllowModify) const {}

static int getJumpTableIndexFromAddr(const MachineInstr &MI) {}

static int getJumpTableIndexFromReg(const MachineRegisterInfo &MRI,
                                    Register Reg) {}

int X86InstrInfo::getJumpTableIndex(const MachineInstr &MI) const {}

bool X86InstrInfo::analyzeBranchPredicate(MachineBasicBlock &MBB,
                                          MachineBranchPredicate &MBP,
                                          bool AllowModify) const {}

unsigned X86InstrInfo::removeBranch(MachineBasicBlock &MBB,
                                    int *BytesRemoved) const {}

unsigned X86InstrInfo::insertBranch(MachineBasicBlock &MBB,
                                    MachineBasicBlock *TBB,
                                    MachineBasicBlock *FBB,
                                    ArrayRef<MachineOperand> Cond,
                                    const DebugLoc &DL, int *BytesAdded) const {}

bool X86InstrInfo::canInsertSelect(const MachineBasicBlock &MBB,
                                   ArrayRef<MachineOperand> Cond,
                                   Register DstReg, Register TrueReg,
                                   Register FalseReg, int &CondCycles,
                                   int &TrueCycles, int &FalseCycles) const {}

void X86InstrInfo::insertSelect(MachineBasicBlock &MBB,
                                MachineBasicBlock::iterator I,
                                const DebugLoc &DL, Register DstReg,
                                ArrayRef<MachineOperand> Cond, Register TrueReg,
                                Register FalseReg) const {}

/// Test if the given register is a physical h register.
static bool isHReg(unsigned Reg) {}

// Try and copy between VR128/VR64 and GR64 registers.
static unsigned CopyToFromAsymmetricReg(unsigned DestReg, unsigned SrcReg,
                                        const X86Subtarget &Subtarget) {}

void X86InstrInfo::copyPhysReg(MachineBasicBlock &MBB,
                               MachineBasicBlock::iterator MI,
                               const DebugLoc &DL, MCRegister DestReg,
                               MCRegister SrcReg, bool KillSrc,
                               bool RenamableDest, bool RenamableSrc) const {}

std::optional<DestSourcePair>
X86InstrInfo::isCopyInstrImpl(const MachineInstr &MI) const {}

static unsigned getLoadStoreOpcodeForFP16(bool Load, const X86Subtarget &STI) {}

static unsigned getLoadStoreRegOpcode(Register Reg,
                                      const TargetRegisterClass *RC,
                                      bool IsStackAligned,
                                      const X86Subtarget &STI, bool Load) {}

std::optional<ExtAddrMode>
X86InstrInfo::getAddrModeFromMemoryOp(const MachineInstr &MemI,
                                      const TargetRegisterInfo *TRI) const {}

bool X86InstrInfo::verifyInstruction(const MachineInstr &MI,
                                     StringRef &ErrInfo) const {}

bool X86InstrInfo::getConstValDefinedInReg(const MachineInstr &MI,
                                           const Register Reg,
                                           int64_t &ImmVal) const {}

bool X86InstrInfo::preservesZeroValueInReg(
    const MachineInstr *MI, const Register NullValueReg,
    const TargetRegisterInfo *TRI) const {}

bool X86InstrInfo::getMemOperandsWithOffsetWidth(
    const MachineInstr &MemOp, SmallVectorImpl<const MachineOperand *> &BaseOps,
    int64_t &Offset, bool &OffsetIsScalable, LocationSize &Width,
    const TargetRegisterInfo *TRI) const {}

static unsigned getStoreRegOpcode(Register SrcReg,
                                  const TargetRegisterClass *RC,
                                  bool IsStackAligned,
                                  const X86Subtarget &STI) {}

static unsigned getLoadRegOpcode(Register DestReg,
                                 const TargetRegisterClass *RC,
                                 bool IsStackAligned, const X86Subtarget &STI) {}

static bool isAMXOpcode(unsigned Opc) {}

void X86InstrInfo::loadStoreTileReg(MachineBasicBlock &MBB,
                                    MachineBasicBlock::iterator MI,
                                    unsigned Opc, Register Reg, int FrameIdx,
                                    bool isKill) const {}

void X86InstrInfo::storeRegToStackSlot(
    MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, Register SrcReg,
    bool isKill, int FrameIdx, const TargetRegisterClass *RC,
    const TargetRegisterInfo *TRI, Register VReg) const {}

void X86InstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB,
                                        MachineBasicBlock::iterator MI,
                                        Register DestReg, int FrameIdx,
                                        const TargetRegisterClass *RC,
                                        const TargetRegisterInfo *TRI,
                                        Register VReg) const {}

bool X86InstrInfo::analyzeCompare(const MachineInstr &MI, Register &SrcReg,
                                  Register &SrcReg2, int64_t &CmpMask,
                                  int64_t &CmpValue) const {}

bool X86InstrInfo::isRedundantFlagInstr(const MachineInstr &FlagI,
                                        Register SrcReg, Register SrcReg2,
                                        int64_t ImmMask, int64_t ImmValue,
                                        const MachineInstr &OI, bool *IsSwapped,
                                        int64_t *ImmDelta) const {}

/// Check whether the definition can be converted
/// to remove a comparison against zero.
inline static bool isDefConvertible(const MachineInstr &MI, bool &NoSignFlag,
                                    bool &ClearsOverflowFlag) {}

/// Check whether the use can be converted to remove a comparison against zero.
static X86::CondCode isUseDefConvertible(const MachineInstr &MI) {}

/// Check if there exists an earlier instruction that
/// operates on the same source operands and sets flags in the same way as
/// Compare; remove Compare if possible.
bool X86InstrInfo::optimizeCompareInstr(MachineInstr &CmpInstr, Register SrcReg,
                                        Register SrcReg2, int64_t CmpMask,
                                        int64_t CmpValue,
                                        const MachineRegisterInfo *MRI) const {}

/// Try to remove the load by folding it to a register
/// operand at the use. We fold the load instructions if load defines a virtual
/// register, the virtual register is used once in the same BB, and the
/// instructions in-between do not load or store, and have no side effects.
MachineInstr *X86InstrInfo::optimizeLoadInstr(MachineInstr &MI,
                                              const MachineRegisterInfo *MRI,
                                              Register &FoldAsLoadDefReg,
                                              MachineInstr *&DefMI) const {}

/// \returns true if the instruction can be changed to COPY when imm is 0.
static bool canConvert2Copy(unsigned Opc) {}

/// Convert an ALUrr opcode to corresponding ALUri opcode. Such as
///     ADD32rr  ==>  ADD32ri
static unsigned convertALUrr2ALUri(unsigned Opc) {}

/// Reg is assigned ImmVal in DefMI, and is used in UseMI.
/// If MakeChange is true, this function tries to replace Reg by ImmVal in
/// UseMI. If MakeChange is false, just check if folding is possible.
//
/// \returns true if folding is successful or possible.
bool X86InstrInfo::foldImmediateImpl(MachineInstr &UseMI, MachineInstr *DefMI,
                                     Register Reg, int64_t ImmVal,
                                     MachineRegisterInfo *MRI,
                                     bool MakeChange) const {}

/// foldImmediate - 'Reg' is known to be defined by a move immediate
/// instruction, try to fold the immediate into the use instruction.
bool X86InstrInfo::foldImmediate(MachineInstr &UseMI, MachineInstr &DefMI,
                                 Register Reg, MachineRegisterInfo *MRI) const {}

/// Expand a single-def pseudo instruction to a two-addr
/// instruction with two undef reads of the register being defined.
/// This is used for mapping:
///   %xmm4 = V_SET0
/// to:
///   %xmm4 = PXORrr undef %xmm4, undef %xmm4
///
static bool Expand2AddrUndef(MachineInstrBuilder &MIB,
                             const MCInstrDesc &Desc) {}

/// Expand a single-def pseudo instruction to a two-addr
/// instruction with two %k0 reads.
/// This is used for mapping:
///   %k4 = K_SET1
/// to:
///   %k4 = KXNORrr %k0, %k0
static bool Expand2AddrKreg(MachineInstrBuilder &MIB, const MCInstrDesc &Desc,
                            Register Reg) {}

static bool expandMOV32r1(MachineInstrBuilder &MIB, const TargetInstrInfo &TII,
                          bool MinusOne) {}

static bool ExpandMOVImmSExti8(MachineInstrBuilder &MIB,
                               const TargetInstrInfo &TII,
                               const X86Subtarget &Subtarget) {}

// LoadStackGuard has so far only been implemented for 64-bit MachO. Different
// code sequence is needed for other targets.
static void expandLoadStackGuard(MachineInstrBuilder &MIB,
                                 const TargetInstrInfo &TII) {}

static bool expandXorFP(MachineInstrBuilder &MIB, const TargetInstrInfo &TII) {}

// This is used to handle spills for 128/256-bit registers when we have AVX512,
// but not VLX. If it uses an extended register we need to use an instruction
// that loads the lower 128/256-bit, but is available with only AVX512F.
static bool expandNOVLXLoad(MachineInstrBuilder &MIB,
                            const TargetRegisterInfo *TRI,
                            const MCInstrDesc &LoadDesc,
                            const MCInstrDesc &BroadcastDesc, unsigned SubIdx) {}

// This is used to handle spills for 128/256-bit registers when we have AVX512,
// but not VLX. If it uses an extended register we need to use an instruction
// that stores the lower 128/256-bit, but is available with only AVX512F.
static bool expandNOVLXStore(MachineInstrBuilder &MIB,
                             const TargetRegisterInfo *TRI,
                             const MCInstrDesc &StoreDesc,
                             const MCInstrDesc &ExtractDesc, unsigned SubIdx) {}

static bool expandSHXDROT(MachineInstrBuilder &MIB, const MCInstrDesc &Desc) {}

bool X86InstrInfo::expandPostRAPseudo(MachineInstr &MI) const {}

/// Return true for all instructions that only update
/// the first 32 or 64-bits of the destination register and leave the rest
/// unmodified. This can be used to avoid folding loads if the instructions
/// only update part of the destination register, and the non-updated part is
/// not needed. e.g. cvtss2sd, sqrtss. Unfolding the load from these
/// instructions breaks the partial register dependency and it can improve
/// performance. e.g.:
///
///   movss (%rdi), %xmm0
///   cvtss2sd %xmm0, %xmm0
///
/// Instead of
///   cvtss2sd (%rdi), %xmm0
///
/// FIXME: This should be turned into a TSFlags.
///
static bool hasPartialRegUpdate(unsigned Opcode, const X86Subtarget &Subtarget,
                                bool ForLoadFold = false) {}

/// Inform the BreakFalseDeps pass how many idle
/// instructions we would like before a partial register update.
unsigned X86InstrInfo::getPartialRegUpdateClearance(
    const MachineInstr &MI, unsigned OpNum,
    const TargetRegisterInfo *TRI) const {}

// Return true for any instruction the copies the high bits of the first source
// operand into the unused high bits of the destination operand.
// Also returns true for instructions that have two inputs where one may
// be undef and we want it to use the same register as the other input.
static bool hasUndefRegUpdate(unsigned Opcode, unsigned OpNum,
                              bool ForLoadFold = false) {}

/// Inform the BreakFalseDeps pass how many idle instructions we would like
/// before certain undef register reads.
///
/// This catches the VCVTSI2SD family of instructions:
///
/// vcvtsi2sdq %rax, undef %xmm0, %xmm14
///
/// We should to be careful *not* to catch VXOR idioms which are presumably
/// handled specially in the pipeline:
///
/// vxorps undef %xmm1, undef %xmm1, %xmm1
///
/// Like getPartialRegUpdateClearance, this makes a strong assumption that the
/// high bits that are passed-through are not live.
unsigned
X86InstrInfo::getUndefRegClearance(const MachineInstr &MI, unsigned OpNum,
                                   const TargetRegisterInfo *TRI) const {}

void X86InstrInfo::breakPartialRegDependency(
    MachineInstr &MI, unsigned OpNum, const TargetRegisterInfo *TRI) const {}

static void addOperands(MachineInstrBuilder &MIB, ArrayRef<MachineOperand> MOs,
                        int PtrOffset = 0) {}

static void updateOperandRegConstraints(MachineFunction &MF,
                                        MachineInstr &NewMI,
                                        const TargetInstrInfo &TII) {}

static MachineInstr *fuseTwoAddrInst(MachineFunction &MF, unsigned Opcode,
                                     ArrayRef<MachineOperand> MOs,
                                     MachineBasicBlock::iterator InsertPt,
                                     MachineInstr &MI,
                                     const TargetInstrInfo &TII) {}

static MachineInstr *fuseInst(MachineFunction &MF, unsigned Opcode,
                              unsigned OpNo, ArrayRef<MachineOperand> MOs,
                              MachineBasicBlock::iterator InsertPt,
                              MachineInstr &MI, const TargetInstrInfo &TII,
                              int PtrOffset = 0) {}

static MachineInstr *makeM0Inst(const TargetInstrInfo &TII, unsigned Opcode,
                                ArrayRef<MachineOperand> MOs,
                                MachineBasicBlock::iterator InsertPt,
                                MachineInstr &MI) {}

MachineInstr *X86InstrInfo::foldMemoryOperandCustom(
    MachineFunction &MF, MachineInstr &MI, unsigned OpNum,
    ArrayRef<MachineOperand> MOs, MachineBasicBlock::iterator InsertPt,
    unsigned Size, Align Alignment) const {}

static bool shouldPreventUndefRegUpdateMemFold(MachineFunction &MF,
                                               MachineInstr &MI) {}

unsigned X86InstrInfo::commuteOperandsForFold(MachineInstr &MI,
                                              unsigned Idx1) const {}

static void printFailMsgforFold(const MachineInstr &MI, unsigned Idx) {}

MachineInstr *X86InstrInfo::foldMemoryOperandImpl(
    MachineFunction &MF, MachineInstr &MI, unsigned OpNum,
    ArrayRef<MachineOperand> MOs, MachineBasicBlock::iterator InsertPt,
    unsigned Size, Align Alignment, bool AllowCommute) const {}

MachineInstr *X86InstrInfo::foldMemoryOperandImpl(
    MachineFunction &MF, MachineInstr &MI, ArrayRef<unsigned> Ops,
    MachineBasicBlock::iterator InsertPt, int FrameIndex, LiveIntervals *LIS,
    VirtRegMap *VRM) const {}

/// Check if \p LoadMI is a partial register load that we can't fold into \p MI
/// because the latter uses contents that wouldn't be defined in the folded
/// version.  For instance, this transformation isn't legal:
///   movss (%rdi), %xmm0
///   addps %xmm0, %xmm0
/// ->
///   addps (%rdi), %xmm0
///
/// But this one is:
///   movss (%rdi), %xmm0
///   addss %xmm0, %xmm0
/// ->
///   addss (%rdi), %xmm0
///
static bool isNonFoldablePartialRegisterLoad(const MachineInstr &LoadMI,
                                             const MachineInstr &UserMI,
                                             const MachineFunction &MF) {}

MachineInstr *X86InstrInfo::foldMemoryOperandImpl(
    MachineFunction &MF, MachineInstr &MI, ArrayRef<unsigned> Ops,
    MachineBasicBlock::iterator InsertPt, MachineInstr &LoadMI,
    LiveIntervals *LIS) const {}

MachineInstr *
X86InstrInfo::foldMemoryBroadcast(MachineFunction &MF, MachineInstr &MI,
                                  unsigned OpNum, ArrayRef<MachineOperand> MOs,
                                  MachineBasicBlock::iterator InsertPt,
                                  unsigned BitsSize, bool AllowCommute) const {}

static SmallVector<MachineMemOperand *, 2>
extractLoadMMOs(ArrayRef<MachineMemOperand *> MMOs, MachineFunction &MF) {}

static SmallVector<MachineMemOperand *, 2>
extractStoreMMOs(ArrayRef<MachineMemOperand *> MMOs, MachineFunction &MF) {}

static unsigned getBroadcastOpcode(const X86FoldTableEntry *I,
                                   const TargetRegisterClass *RC,
                                   const X86Subtarget &STI) {}

bool X86InstrInfo::unfoldMemoryOperand(
    MachineFunction &MF, MachineInstr &MI, unsigned Reg, bool UnfoldLoad,
    bool UnfoldStore, SmallVectorImpl<MachineInstr *> &NewMIs) const {}

bool X86InstrInfo::unfoldMemoryOperand(
    SelectionDAG &DAG, SDNode *N, SmallVectorImpl<SDNode *> &NewNodes) const {}

unsigned
X86InstrInfo::getOpcodeAfterMemoryUnfold(unsigned Opc, bool UnfoldLoad,
                                         bool UnfoldStore,
                                         unsigned *LoadRegIndex) const {}

bool X86InstrInfo::areLoadsFromSameBasePtr(SDNode *Load1, SDNode *Load2,
                                           int64_t &Offset1,
                                           int64_t &Offset2) const {}

bool X86InstrInfo::shouldScheduleLoadsNear(SDNode *Load1, SDNode *Load2,
                                           int64_t Offset1, int64_t Offset2,
                                           unsigned NumLoads) const {}

bool X86InstrInfo::isSchedulingBoundary(const MachineInstr &MI,
                                        const MachineBasicBlock *MBB,
                                        const MachineFunction &MF) const {}

bool X86InstrInfo::reverseBranchCondition(
    SmallVectorImpl<MachineOperand> &Cond) const {}

bool X86InstrInfo::isSafeToMoveRegClassDefs(
    const TargetRegisterClass *RC) const {}

/// Return a virtual register initialized with the
/// the global base register value. Output instructions required to
/// initialize the register in the function entry block, if necessary.
///
/// TODO: Eliminate this and move the code to X86MachineFunctionInfo.
///
unsigned X86InstrInfo::getGlobalBaseReg(MachineFunction *MF) const {}

// FIXME: Some shuffle and unpack instructions have equivalents in different
// domains, but they require a bit more work than just switching opcodes.

static const uint16_t *lookup(unsigned opcode, unsigned domain,
                              ArrayRef<uint16_t[3]> Table) {}

static const uint16_t *lookupAVX512(unsigned opcode, unsigned domain,
                                    ArrayRef<uint16_t[4]> Table) {}

// Helper to attempt to widen/narrow blend masks.
static bool AdjustBlendMask(unsigned OldMask, unsigned OldWidth,
                            unsigned NewWidth, unsigned *pNewMask = nullptr) {}

uint16_t X86InstrInfo::getExecutionDomainCustom(const MachineInstr &MI) const {}

#include "X86ReplaceableInstrs.def"

bool X86InstrInfo::setExecutionDomainCustom(MachineInstr &MI,
                                            unsigned Domain) const {}

std::pair<uint16_t, uint16_t>
X86InstrInfo::getExecutionDomain(const MachineInstr &MI) const {}

void X86InstrInfo::setExecutionDomain(MachineInstr &MI, unsigned Domain) const {}

void X86InstrInfo::insertNoop(MachineBasicBlock &MBB,
                              MachineBasicBlock::iterator MI) const {}

/// Return the noop instruction to use for a noop.
MCInst X86InstrInfo::getNop() const {}

bool X86InstrInfo::isHighLatencyDef(int opc) const {}

bool X86InstrInfo::hasHighOperandLatency(const TargetSchedModel &SchedModel,
                                         const MachineRegisterInfo *MRI,
                                         const MachineInstr &DefMI,
                                         unsigned DefIdx,
                                         const MachineInstr &UseMI,
                                         unsigned UseIdx) const {}

bool X86InstrInfo::hasReassociableOperands(const MachineInstr &Inst,
                                           const MachineBasicBlock *MBB) const {}

// TODO: There are many more machine instruction opcodes to match:
//       1. Other data types (integer, vectors)
//       2. Other math / logic operations (xor, or)
//       3. Other forms of the same operation (intrinsics and other variants)
bool X86InstrInfo::isAssociativeAndCommutative(const MachineInstr &Inst,
                                               bool Invert) const {}

/// If \p DescribedReg overlaps with the MOVrr instruction's destination
/// register then, if possible, describe the value in terms of the source
/// register.
static std::optional<ParamLoadedValue>
describeMOVrrLoadedValue(const MachineInstr &MI, Register DescribedReg,
                         const TargetRegisterInfo *TRI) {}

std::optional<ParamLoadedValue>
X86InstrInfo::describeLoadedValue(const MachineInstr &MI, Register Reg) const {}

/// This is an architecture-specific helper function of reassociateOps.
/// Set special operand attributes for new instructions after reassociation.
void X86InstrInfo::setSpecialOperandAttr(MachineInstr &OldMI1,
                                         MachineInstr &OldMI2,
                                         MachineInstr &NewMI1,
                                         MachineInstr &NewMI2) const {}

std::pair<unsigned, unsigned>
X86InstrInfo::decomposeMachineOperandsTargetFlags(unsigned TF) const {}

ArrayRef<std::pair<unsigned, const char *>>
X86InstrInfo::getSerializableDirectMachineOperandTargetFlags() const {}

namespace {
/// Create Global Base Reg pass. This initializes the PIC
/// global base register for x86-32.
struct CGBR : public MachineFunctionPass {};
} // namespace

char CGBR::ID =;
FunctionPass *llvm::createX86GlobalBaseRegPass() {}

namespace {
struct LDTLSCleanup : public MachineFunctionPass {};
} // namespace

char LDTLSCleanup::ID =;
FunctionPass *llvm::createCleanupLocalDynamicTLSPass() {}

/// Constants defining how certain sequences should be outlined.
///
/// \p MachineOutlinerDefault implies that the function is called with a call
/// instruction, and a return must be emitted for the outlined function frame.
///
/// That is,
///
/// I1                                 OUTLINED_FUNCTION:
/// I2 --> call OUTLINED_FUNCTION       I1
/// I3                                  I2
///                                     I3
///                                     ret
///
/// * Call construction overhead: 1 (call instruction)
/// * Frame construction overhead: 1 (return instruction)
///
/// \p MachineOutlinerTailCall implies that the function is being tail called.
/// A jump is emitted instead of a call, and the return is already present in
/// the outlined sequence. That is,
///
/// I1                                 OUTLINED_FUNCTION:
/// I2 --> jmp OUTLINED_FUNCTION       I1
/// ret                                I2
///                                    ret
///
/// * Call construction overhead: 1 (jump instruction)
/// * Frame construction overhead: 0 (don't need to return)
///
enum MachineOutlinerClass {};

std::optional<std::unique_ptr<outliner::OutlinedFunction>>
X86InstrInfo::getOutliningCandidateInfo(
    const MachineModuleInfo &MMI,
    std::vector<outliner::Candidate> &RepeatedSequenceLocs,
    unsigned MinRepeats) const {}

bool X86InstrInfo::isFunctionSafeToOutlineFrom(
    MachineFunction &MF, bool OutlineFromLinkOnceODRs) const {}

outliner::InstrType
X86InstrInfo::getOutliningTypeImpl(const MachineModuleInfo &MMI,
                                   MachineBasicBlock::iterator &MIT,
                                   unsigned Flags) const {}

void X86InstrInfo::buildOutlinedFrame(
    MachineBasicBlock &MBB, MachineFunction &MF,
    const outliner::OutlinedFunction &OF) const {}

MachineBasicBlock::iterator X86InstrInfo::insertOutlinedCall(
    Module &M, MachineBasicBlock &MBB, MachineBasicBlock::iterator &It,
    MachineFunction &MF, outliner::Candidate &C) const {}

void X86InstrInfo::buildClearRegister(Register Reg, MachineBasicBlock &MBB,
                                      MachineBasicBlock::iterator Iter,
                                      DebugLoc &DL,
                                      bool AllowSideEffects) const {}

bool X86InstrInfo::getMachineCombinerPatterns(
    MachineInstr &Root, SmallVectorImpl<unsigned> &Patterns,
    bool DoRegPressureReduce) const {}

static void
genAlternativeDpCodeSequence(MachineInstr &Root, const TargetInstrInfo &TII,
                             SmallVectorImpl<MachineInstr *> &InsInstrs,
                             SmallVectorImpl<MachineInstr *> &DelInstrs,
                             DenseMap<unsigned, unsigned> &InstrIdxForVirtReg) {}

void X86InstrInfo::genAlternativeCodeSequence(
    MachineInstr &Root, unsigned Pattern,
    SmallVectorImpl<MachineInstr *> &InsInstrs,
    SmallVectorImpl<MachineInstr *> &DelInstrs,
    DenseMap<unsigned, unsigned> &InstrIdxForVirtReg) const {}

// See also: X86DAGToDAGISel::SelectInlineAsmMemoryOperand().
void X86InstrInfo::getFrameIndexOperands(SmallVectorImpl<MachineOperand> &Ops,
                                         int FI) const {}

#define GET_INSTRINFO_HELPERS
#include "X86GenInstrInfo.inc"