llvm/llvm/lib/Target/ARM/ARMBaseInstrInfo.cpp

//===-- ARMBaseInstrInfo.cpp - ARM Instruction Information ----------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file contains the Base ARM implementation of the TargetInstrInfo class.
//
//===----------------------------------------------------------------------===//

#include "ARMBaseInstrInfo.h"
#include "ARMBaseRegisterInfo.h"
#include "ARMConstantPoolValue.h"
#include "ARMFeatures.h"
#include "ARMHazardRecognizer.h"
#include "ARMMachineFunctionInfo.h"
#include "ARMSubtarget.h"
#include "MCTargetDesc/ARMAddressingModes.h"
#include "MCTargetDesc/ARMBaseInfo.h"
#include "MVETailPredUtils.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallSet.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/CodeGen/DFAPacketizer.h"
#include "llvm/CodeGen/LiveVariables.h"
#include "llvm/CodeGen/MachineBasicBlock.h"
#include "llvm/CodeGen/MachineConstantPool.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineInstr.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/CodeGen/MachineMemOperand.h"
#include "llvm/CodeGen/MachineModuleInfo.h"
#include "llvm/CodeGen/MachineOperand.h"
#include "llvm/CodeGen/MachinePipeliner.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/MachineScheduler.h"
#include "llvm/CodeGen/MultiHazardRecognizer.h"
#include "llvm/CodeGen/ScoreboardHazardRecognizer.h"
#include "llvm/CodeGen/SelectionDAGNodes.h"
#include "llvm/CodeGen/TargetInstrInfo.h"
#include "llvm/CodeGen/TargetRegisterInfo.h"
#include "llvm/CodeGen/TargetSchedule.h"
#include "llvm/IR/Attributes.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/DebugLoc.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/GlobalValue.h"
#include "llvm/IR/Module.h"
#include "llvm/MC/MCAsmInfo.h"
#include "llvm/MC/MCInstrDesc.h"
#include "llvm/MC/MCInstrItineraries.h"
#include "llvm/Support/BranchProbability.h"
#include "llvm/Support/Casting.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Target/TargetMachine.h"
#include "llvm/TargetParser/Triple.h"
#include <algorithm>
#include <cassert>
#include <cstdint>
#include <iterator>
#include <new>
#include <utility>
#include <vector>

usingnamespacellvm;

#define DEBUG_TYPE

#define GET_INSTRINFO_CTOR_DTOR
#include "ARMGenInstrInfo.inc"

static cl::opt<bool>
EnableARM3Addr("enable-arm-3-addr-conv", cl::Hidden,
               cl::desc("Enable ARM 2-addr to 3-addr conv"));

/// ARM_MLxEntry - Record information about MLA / MLS instructions.
struct ARM_MLxEntry {};

static const ARM_MLxEntry ARM_MLxTable[] =;

ARMBaseInstrInfo::ARMBaseInstrInfo(const ARMSubtarget& STI)
  :{}

// Use a ScoreboardHazardRecognizer for prepass ARM scheduling. TargetInstrImpl
// currently defaults to no prepass hazard recognizer.
ScheduleHazardRecognizer *
ARMBaseInstrInfo::CreateTargetHazardRecognizer(const TargetSubtargetInfo *STI,
                                               const ScheduleDAG *DAG) const {}

// Called during:
// - pre-RA scheduling
// - post-RA scheduling when FeatureUseMISched is set
ScheduleHazardRecognizer *ARMBaseInstrInfo::CreateTargetMIHazardRecognizer(
    const InstrItineraryData *II, const ScheduleDAGMI *DAG) const {}

// Called during post-RA scheduling when FeatureUseMISched is not set
ScheduleHazardRecognizer *ARMBaseInstrInfo::
CreateTargetPostRAHazardRecognizer(const InstrItineraryData *II,
                                   const ScheduleDAG *DAG) const {}

MachineInstr *
ARMBaseInstrInfo::convertToThreeAddress(MachineInstr &MI, LiveVariables *LV,
                                        LiveIntervals *LIS) const {}

// Branch analysis.
// Cond vector output format:
//   0 elements indicates an unconditional branch
//   2 elements indicates a conditional branch; the elements are
//     the condition to check and the CPSR.
//   3 elements indicates a hardware loop end; the elements
//     are the opcode, the operand value to test, and a dummy
//     operand used to pad out to 3 operands.
bool ARMBaseInstrInfo::analyzeBranch(MachineBasicBlock &MBB,
                                     MachineBasicBlock *&TBB,
                                     MachineBasicBlock *&FBB,
                                     SmallVectorImpl<MachineOperand> &Cond,
                                     bool AllowModify) const {}

unsigned ARMBaseInstrInfo::removeBranch(MachineBasicBlock &MBB,
                                        int *BytesRemoved) const {}

unsigned ARMBaseInstrInfo::insertBranch(MachineBasicBlock &MBB,
                                        MachineBasicBlock *TBB,
                                        MachineBasicBlock *FBB,
                                        ArrayRef<MachineOperand> Cond,
                                        const DebugLoc &DL,
                                        int *BytesAdded) const {}

bool ARMBaseInstrInfo::
reverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const {}

bool ARMBaseInstrInfo::isPredicated(const MachineInstr &MI) const {}

std::string ARMBaseInstrInfo::createMIROperandComment(
    const MachineInstr &MI, const MachineOperand &Op, unsigned OpIdx,
    const TargetRegisterInfo *TRI) const {}

bool ARMBaseInstrInfo::PredicateInstruction(
    MachineInstr &MI, ArrayRef<MachineOperand> Pred) const {}

bool ARMBaseInstrInfo::SubsumesPredicate(ArrayRef<MachineOperand> Pred1,
                                         ArrayRef<MachineOperand> Pred2) const {}

bool ARMBaseInstrInfo::ClobbersPredicate(MachineInstr &MI,
                                         std::vector<MachineOperand> &Pred,
                                         bool SkipDead) const {}

bool ARMBaseInstrInfo::isCPSRDefined(const MachineInstr &MI) {}

static bool isEligibleForITBlock(const MachineInstr *MI) {}

/// isPredicable - Return true if the specified instruction can be predicated.
/// By default, this returns true for every instruction with a
/// PredicateOperand.
bool ARMBaseInstrInfo::isPredicable(const MachineInstr &MI) const {}

namespace llvm {

template <> bool IsCPSRDead<MachineInstr>(const MachineInstr *MI) {}

} // end namespace llvm

/// GetInstSize - Return the size of the specified MachineInstr.
///
unsigned ARMBaseInstrInfo::getInstSizeInBytes(const MachineInstr &MI) const {}

unsigned ARMBaseInstrInfo::getInstBundleLength(const MachineInstr &MI) const {}

void ARMBaseInstrInfo::copyFromCPSR(MachineBasicBlock &MBB,
                                    MachineBasicBlock::iterator I,
                                    unsigned DestReg, bool KillSrc,
                                    const ARMSubtarget &Subtarget) const {}

void ARMBaseInstrInfo::copyToCPSR(MachineBasicBlock &MBB,
                                  MachineBasicBlock::iterator I,
                                  unsigned SrcReg, bool KillSrc,
                                  const ARMSubtarget &Subtarget) const {}

void llvm::addUnpredicatedMveVpredNOp(MachineInstrBuilder &MIB) {}

void llvm::addUnpredicatedMveVpredROp(MachineInstrBuilder &MIB,
                                      Register DestReg) {}

void llvm::addPredicatedMveVpredNOp(MachineInstrBuilder &MIB, unsigned Cond) {}

void llvm::addPredicatedMveVpredROp(MachineInstrBuilder &MIB,
                                    unsigned Cond, unsigned Inactive) {}

void ARMBaseInstrInfo::copyPhysReg(MachineBasicBlock &MBB,
                                   MachineBasicBlock::iterator I,
                                   const DebugLoc &DL, MCRegister DestReg,
                                   MCRegister SrcReg, bool KillSrc,
                                   bool RenamableDest,
                                   bool RenamableSrc) const {}

std::optional<DestSourcePair>
ARMBaseInstrInfo::isCopyInstrImpl(const MachineInstr &MI) const {}

std::optional<ParamLoadedValue>
ARMBaseInstrInfo::describeLoadedValue(const MachineInstr &MI,
                                      Register Reg) const {}

const MachineInstrBuilder &
ARMBaseInstrInfo::AddDReg(MachineInstrBuilder &MIB, unsigned Reg,
                          unsigned SubIdx, unsigned State,
                          const TargetRegisterInfo *TRI) const {}

void ARMBaseInstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB,
                                           MachineBasicBlock::iterator I,
                                           Register SrcReg, bool isKill, int FI,
                                           const TargetRegisterClass *RC,
                                           const TargetRegisterInfo *TRI,
                                           Register VReg) const {}

Register ARMBaseInstrInfo::isStoreToStackSlot(const MachineInstr &MI,
                                              int &FrameIndex) const {}

Register ARMBaseInstrInfo::isStoreToStackSlotPostFE(const MachineInstr &MI,
                                                    int &FrameIndex) const {}

void ARMBaseInstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB,
                                            MachineBasicBlock::iterator I,
                                            Register DestReg, int FI,
                                            const TargetRegisterClass *RC,
                                            const TargetRegisterInfo *TRI,
                                            Register VReg) const {}

Register ARMBaseInstrInfo::isLoadFromStackSlot(const MachineInstr &MI,
                                               int &FrameIndex) const {}

Register ARMBaseInstrInfo::isLoadFromStackSlotPostFE(const MachineInstr &MI,
                                                     int &FrameIndex) const {}

/// Expands MEMCPY to either LDMIA/STMIA or LDMIA_UPD/STMID_UPD
/// depending on whether the result is used.
void ARMBaseInstrInfo::expandMEMCPY(MachineBasicBlock::iterator MI) const {}

bool ARMBaseInstrInfo::expandPostRAPseudo(MachineInstr &MI) const {}

/// Create a copy of a const pool value. Update CPI to the new index and return
/// the label UID.
static unsigned duplicateCPV(MachineFunction &MF, unsigned &CPI) {}

void ARMBaseInstrInfo::reMaterialize(MachineBasicBlock &MBB,
                                     MachineBasicBlock::iterator I,
                                     Register DestReg, unsigned SubIdx,
                                     const MachineInstr &Orig,
                                     const TargetRegisterInfo &TRI) const {}

MachineInstr &
ARMBaseInstrInfo::duplicate(MachineBasicBlock &MBB,
    MachineBasicBlock::iterator InsertBefore,
    const MachineInstr &Orig) const {}

bool ARMBaseInstrInfo::produceSameValue(const MachineInstr &MI0,
                                        const MachineInstr &MI1,
                                        const MachineRegisterInfo *MRI) const {}

/// areLoadsFromSameBasePtr - This is used by the pre-regalloc scheduler to
/// determine if two loads are loading from the same base address. It should
/// only return true if the base pointers are the same and the only differences
/// between the two addresses is the offset. It also returns the offsets by
/// reference.
///
/// FIXME: remove this in favor of the MachineInstr interface once pre-RA-sched
/// is permanently disabled.
bool ARMBaseInstrInfo::areLoadsFromSameBasePtr(SDNode *Load1, SDNode *Load2,
                                               int64_t &Offset1,
                                               int64_t &Offset2) const {}

/// shouldScheduleLoadsNear - This is a used by the pre-regalloc scheduler to
/// determine (in conjunction with areLoadsFromSameBasePtr) if two loads should
/// be scheduled togther. On some targets if two loads are loading from
/// addresses in the same cache line, it's better if they are scheduled
/// together. This function takes two integers that represent the load offsets
/// from the common base address. It returns true if it decides it's desirable
/// to schedule the two loads together. "NumLoads" is the number of loads that
/// have already been scheduled after Load1.
///
/// FIXME: remove this in favor of the MachineInstr interface once pre-RA-sched
/// is permanently disabled.
bool ARMBaseInstrInfo::shouldScheduleLoadsNear(SDNode *Load1, SDNode *Load2,
                                               int64_t Offset1, int64_t Offset2,
                                               unsigned NumLoads) const {}

bool ARMBaseInstrInfo::isSchedulingBoundary(const MachineInstr &MI,
                                            const MachineBasicBlock *MBB,
                                            const MachineFunction &MF) const {}

bool ARMBaseInstrInfo::
isProfitableToIfCvt(MachineBasicBlock &MBB,
                    unsigned NumCycles, unsigned ExtraPredCycles,
                    BranchProbability Probability) const {}

bool ARMBaseInstrInfo::
isProfitableToIfCvt(MachineBasicBlock &TBB,
                    unsigned TCycles, unsigned TExtra,
                    MachineBasicBlock &FBB,
                    unsigned FCycles, unsigned FExtra,
                    BranchProbability Probability) const {}

unsigned
ARMBaseInstrInfo::extraSizeToPredicateInstructions(const MachineFunction &MF,
                                                   unsigned NumInsts) const {}

unsigned
ARMBaseInstrInfo::predictBranchSizeForIfCvt(MachineInstr &MI) const {}

bool
ARMBaseInstrInfo::isProfitableToUnpredicate(MachineBasicBlock &TMBB,
                                            MachineBasicBlock &FMBB) const {}

/// getInstrPredicate - If instruction is predicated, returns its predicate
/// condition, otherwise returns AL. It also returns the condition code
/// register by reference.
ARMCC::CondCodes llvm::getInstrPredicate(const MachineInstr &MI,
                                         Register &PredReg) {}

unsigned llvm::getMatchingCondBranchOpcode(unsigned Opc) {}

MachineInstr *ARMBaseInstrInfo::commuteInstructionImpl(MachineInstr &MI,
                                                       bool NewMI,
                                                       unsigned OpIdx1,
                                                       unsigned OpIdx2) const {}

/// Identify instructions that can be folded into a MOVCC instruction, and
/// return the defining instruction.
MachineInstr *
ARMBaseInstrInfo::canFoldIntoMOVCC(Register Reg, const MachineRegisterInfo &MRI,
                                   const TargetInstrInfo *TII) const {}

bool ARMBaseInstrInfo::analyzeSelect(const MachineInstr &MI,
                                     SmallVectorImpl<MachineOperand> &Cond,
                                     unsigned &TrueOp, unsigned &FalseOp,
                                     bool &Optimizable) const {}

MachineInstr *
ARMBaseInstrInfo::optimizeSelect(MachineInstr &MI,
                                 SmallPtrSetImpl<MachineInstr *> &SeenMIs,
                                 bool PreferFalse) const {}

/// Map pseudo instructions that imply an 'S' bit onto real opcodes. Whether the
/// instruction is encoded with an 'S' bit is determined by the optional CPSR
/// def operand.
///
/// This will go away once we can teach tblgen how to set the optional CPSR def
/// operand itself.
struct AddSubFlagsOpcodePair {};

static const AddSubFlagsOpcodePair AddSubFlagsOpcodeMap[] =;

unsigned llvm::convertAddSubFlagsOpcode(unsigned OldOpc) {}

void llvm::emitARMRegPlusImmediate(MachineBasicBlock &MBB,
                                   MachineBasicBlock::iterator &MBBI,
                                   const DebugLoc &dl, Register DestReg,
                                   Register BaseReg, int NumBytes,
                                   ARMCC::CondCodes Pred, Register PredReg,
                                   const ARMBaseInstrInfo &TII,
                                   unsigned MIFlags) {}

bool llvm::tryFoldSPUpdateIntoPushPop(const ARMSubtarget &Subtarget,
                                      MachineFunction &MF, MachineInstr *MI,
                                      unsigned NumBytes) {}

bool llvm::rewriteARMFrameIndex(MachineInstr &MI, unsigned FrameRegIdx,
                                Register FrameReg, int &Offset,
                                const ARMBaseInstrInfo &TII) {}

/// analyzeCompare - For a comparison instruction, return the source registers
/// in SrcReg and SrcReg2 if having two register operands, and the value it
/// compares against in CmpValue. Return true if the comparison instruction
/// can be analyzed.
bool ARMBaseInstrInfo::analyzeCompare(const MachineInstr &MI, Register &SrcReg,
                                      Register &SrcReg2, int64_t &CmpMask,
                                      int64_t &CmpValue) const {}

/// isSuitableForMask - Identify a suitable 'and' instruction that
/// operates on the given source register and applies the same mask
/// as a 'tst' instruction. Provide a limited look-through for copies.
/// When successful, MI will hold the found instruction.
static bool isSuitableForMask(MachineInstr *&MI, Register SrcReg,
                              int CmpMask, bool CommonUse) {}

/// getCmpToAddCondition - assume the flags are set by CMP(a,b), return
/// the condition code if we modify the instructions such that flags are
/// set by ADD(a,b,X).
inline static ARMCC::CondCodes getCmpToAddCondition(ARMCC::CondCodes CC) {}

/// isRedundantFlagInstr - check whether the first instruction, whose only
/// purpose is to update flags, can be made redundant.
/// CMPrr can be made redundant by SUBrr if the operands are the same.
/// CMPri can be made redundant by SUBri if the operands are the same.
/// CMPrr(r0, r1) can be made redundant by ADDr[ri](r0, r1, X).
/// This function can be extended later on.
inline static bool isRedundantFlagInstr(const MachineInstr *CmpI,
                                        Register SrcReg, Register SrcReg2,
                                        int64_t ImmValue,
                                        const MachineInstr *OI,
                                        bool &IsThumb1) {}

static bool isOptimizeCompareCandidate(MachineInstr *MI, bool &IsThumb1) {}

/// optimizeCompareInstr - Convert the instruction supplying the argument to the
/// comparison into one that sets the zero bit in the flags register;
/// Remove a redundant Compare instruction if an earlier instruction can set the
/// flags in the same way as Compare.
/// E.g. SUBrr(r1,r2) and CMPrr(r1,r2). We also handle the case where two
/// operands are swapped: SUBrr(r1,r2) and CMPrr(r2,r1), by updating the
/// condition code of instructions which use the flags.
bool ARMBaseInstrInfo::optimizeCompareInstr(
    MachineInstr &CmpInstr, Register SrcReg, Register SrcReg2, int64_t CmpMask,
    int64_t CmpValue, const MachineRegisterInfo *MRI) const {}

bool ARMBaseInstrInfo::shouldSink(const MachineInstr &MI) const {}

bool ARMBaseInstrInfo::foldImmediate(MachineInstr &UseMI, MachineInstr &DefMI,
                                     Register Reg,
                                     MachineRegisterInfo *MRI) const {}

static unsigned getNumMicroOpsSwiftLdSt(const InstrItineraryData *ItinData,
                                        const MachineInstr &MI) {}

// Return the number of 32-bit words loaded by LDM or stored by STM. If this
// can't be easily determined return 0 (missing MachineMemOperand).
//
// FIXME: The current MachineInstr design does not support relying on machine
// mem operands to determine the width of a memory access. Instead, we expect
// the target to provide this information based on the instruction opcode and
// operands. However, using MachineMemOperand is the best solution now for
// two reasons:
//
// 1) getNumMicroOps tries to infer LDM memory width from the total number of MI
// operands. This is much more dangerous than using the MachineMemOperand
// sizes because CodeGen passes can insert/remove optional machine operands. In
// fact, it's totally incorrect for preRA passes and appears to be wrong for
// postRA passes as well.
//
// 2) getNumLDMAddresses is only used by the scheduling machine model and any
// machine model that calls this should handle the unknown (zero size) case.
//
// Long term, we should require a target hook that verifies MachineMemOperand
// sizes during MC lowering. That target hook should be local to MC lowering
// because we can't ensure that it is aware of other MI forms. Doing this will
// ensure that MachineMemOperands are correctly propagated through all passes.
unsigned ARMBaseInstrInfo::getNumLDMAddresses(const MachineInstr &MI) const {}

static unsigned getNumMicroOpsSingleIssuePlusExtras(unsigned Opc,
                                                    unsigned NumRegs) {}

unsigned ARMBaseInstrInfo::getNumMicroOps(const InstrItineraryData *ItinData,
                                          const MachineInstr &MI) const {}

std::optional<unsigned>
ARMBaseInstrInfo::getVLDMDefCycle(const InstrItineraryData *ItinData,
                                  const MCInstrDesc &DefMCID, unsigned DefClass,
                                  unsigned DefIdx, unsigned DefAlign) const {}

std::optional<unsigned>
ARMBaseInstrInfo::getLDMDefCycle(const InstrItineraryData *ItinData,
                                 const MCInstrDesc &DefMCID, unsigned DefClass,
                                 unsigned DefIdx, unsigned DefAlign) const {}

std::optional<unsigned>
ARMBaseInstrInfo::getVSTMUseCycle(const InstrItineraryData *ItinData,
                                  const MCInstrDesc &UseMCID, unsigned UseClass,
                                  unsigned UseIdx, unsigned UseAlign) const {}

std::optional<unsigned>
ARMBaseInstrInfo::getSTMUseCycle(const InstrItineraryData *ItinData,
                                 const MCInstrDesc &UseMCID, unsigned UseClass,
                                 unsigned UseIdx, unsigned UseAlign) const {}

std::optional<unsigned> ARMBaseInstrInfo::getOperandLatency(
    const InstrItineraryData *ItinData, const MCInstrDesc &DefMCID,
    unsigned DefIdx, unsigned DefAlign, const MCInstrDesc &UseMCID,
    unsigned UseIdx, unsigned UseAlign) const {}

static const MachineInstr *getBundledDefMI(const TargetRegisterInfo *TRI,
                                           const MachineInstr *MI, unsigned Reg,
                                           unsigned &DefIdx, unsigned &Dist) {}

static const MachineInstr *getBundledUseMI(const TargetRegisterInfo *TRI,
                                           const MachineInstr &MI, unsigned Reg,
                                           unsigned &UseIdx, unsigned &Dist) {}

/// Return the number of cycles to add to (or subtract from) the static
/// itinerary based on the def opcode and alignment. The caller will ensure that
/// adjusted latency is at least one cycle.
static int adjustDefLatency(const ARMSubtarget &Subtarget,
                            const MachineInstr &DefMI,
                            const MCInstrDesc &DefMCID, unsigned DefAlign) {}

std::optional<unsigned> ARMBaseInstrInfo::getOperandLatency(
    const InstrItineraryData *ItinData, const MachineInstr &DefMI,
    unsigned DefIdx, const MachineInstr &UseMI, unsigned UseIdx) const {}

std::optional<unsigned> ARMBaseInstrInfo::getOperandLatencyImpl(
    const InstrItineraryData *ItinData, const MachineInstr &DefMI,
    unsigned DefIdx, const MCInstrDesc &DefMCID, unsigned DefAdj,
    const MachineOperand &DefMO, unsigned Reg, const MachineInstr &UseMI,
    unsigned UseIdx, const MCInstrDesc &UseMCID, unsigned UseAdj) const {}

std::optional<unsigned>
ARMBaseInstrInfo::getOperandLatency(const InstrItineraryData *ItinData,
                                    SDNode *DefNode, unsigned DefIdx,
                                    SDNode *UseNode, unsigned UseIdx) const {}

unsigned ARMBaseInstrInfo::getPredicationCost(const MachineInstr &MI) const {}

unsigned ARMBaseInstrInfo::getInstrLatency(const InstrItineraryData *ItinData,
                                           const MachineInstr &MI,
                                           unsigned *PredCost) const {}

unsigned ARMBaseInstrInfo::getInstrLatency(const InstrItineraryData *ItinData,
                                           SDNode *Node) const {}

bool ARMBaseInstrInfo::hasHighOperandLatency(const TargetSchedModel &SchedModel,
                                             const MachineRegisterInfo *MRI,
                                             const MachineInstr &DefMI,
                                             unsigned DefIdx,
                                             const MachineInstr &UseMI,
                                             unsigned UseIdx) const {}

bool ARMBaseInstrInfo::hasLowDefLatency(const TargetSchedModel &SchedModel,
                                        const MachineInstr &DefMI,
                                        unsigned DefIdx) const {}

bool ARMBaseInstrInfo::verifyInstruction(const MachineInstr &MI,
                                         StringRef &ErrInfo) const {}

void ARMBaseInstrInfo::expandLoadStackGuardBase(MachineBasicBlock::iterator MI,
                                                unsigned LoadImmOpc,
                                                unsigned LoadOpc) const {}

bool
ARMBaseInstrInfo::isFpMLxInstruction(unsigned Opcode, unsigned &MulOpc,
                                     unsigned &AddSubOpc,
                                     bool &NegAcc, bool &HasLane) const {}

//===----------------------------------------------------------------------===//
// Execution domains.
//===----------------------------------------------------------------------===//
//
// Some instructions go down the NEON pipeline, some go down the VFP pipeline,
// and some can go down both.  The vmov instructions go down the VFP pipeline,
// but they can be changed to vorr equivalents that are executed by the NEON
// pipeline.
//
// We use the following execution domain numbering:
//
enum ARMExeDomain {};

//
// Also see ARMInstrFormats.td and Domain* enums in ARMBaseInfo.h
//
std::pair<uint16_t, uint16_t>
ARMBaseInstrInfo::getExecutionDomain(const MachineInstr &MI) const {}

static unsigned getCorrespondingDRegAndLane(const TargetRegisterInfo *TRI,
                                            unsigned SReg, unsigned &Lane) {}

/// getImplicitSPRUseForDPRUse - Given a use of a DPR register and lane,
/// set ImplicitSReg to a register number that must be marked as implicit-use or
/// zero if no register needs to be defined as implicit-use.
///
/// If the function cannot determine if an SPR should be marked implicit use or
/// not, it returns false.
///
/// This function handles cases where an instruction is being modified from taking
/// an SPR to a DPR[Lane]. A use of the DPR is being added, which may conflict
/// with an earlier def of an SPR corresponding to DPR[Lane^1] (i.e. the other
/// lane of the DPR).
///
/// If the other SPR is defined, an implicit-use of it should be added. Else,
/// (including the case where the DPR itself is defined), it should not.
///
static bool getImplicitSPRUseForDPRUse(const TargetRegisterInfo *TRI,
                                       MachineInstr &MI, unsigned DReg,
                                       unsigned Lane, unsigned &ImplicitSReg) {}

void ARMBaseInstrInfo::setExecutionDomain(MachineInstr &MI,
                                          unsigned Domain) const {}

//===----------------------------------------------------------------------===//
// Partial register updates
//===----------------------------------------------------------------------===//
//
// Swift renames NEON registers with 64-bit granularity.  That means any
// instruction writing an S-reg implicitly reads the containing D-reg.  The
// problem is mostly avoided by translating f32 operations to v2f32 operations
// on D-registers, but f32 loads are still a problem.
//
// These instructions can load an f32 into a NEON register:
//
// VLDRS - Only writes S, partial D update.
// VLD1LNd32 - Writes all D-regs, explicit partial D update, 2 uops.
// VLD1DUPd32 - Writes all D-regs, no partial reg update, 2 uops.
//
// FCONSTD can be used as a dependency-breaking instruction.
unsigned ARMBaseInstrInfo::getPartialRegUpdateClearance(
    const MachineInstr &MI, unsigned OpNum,
    const TargetRegisterInfo *TRI) const {}

// Break a partial register dependency after getPartialRegUpdateClearance
// returned non-zero.
void ARMBaseInstrInfo::breakPartialRegDependency(
    MachineInstr &MI, unsigned OpNum, const TargetRegisterInfo *TRI) const {}

bool ARMBaseInstrInfo::hasNOP() const {}

bool ARMBaseInstrInfo::isSwiftFastImmShift(const MachineInstr *MI) const {}

bool ARMBaseInstrInfo::getRegSequenceLikeInputs(
    const MachineInstr &MI, unsigned DefIdx,
    SmallVectorImpl<RegSubRegPairAndIdx> &InputRegs) const {}

bool ARMBaseInstrInfo::getExtractSubregLikeInputs(
    const MachineInstr &MI, unsigned DefIdx,
    RegSubRegPairAndIdx &InputReg) const {}

bool ARMBaseInstrInfo::getInsertSubregLikeInputs(
    const MachineInstr &MI, unsigned DefIdx, RegSubRegPair &BaseReg,
    RegSubRegPairAndIdx &InsertedReg) const {}

std::pair<unsigned, unsigned>
ARMBaseInstrInfo::decomposeMachineOperandsTargetFlags(unsigned TF) const {}

ArrayRef<std::pair<unsigned, const char *>>
ARMBaseInstrInfo::getSerializableDirectMachineOperandTargetFlags() const {}

ArrayRef<std::pair<unsigned, const char *>>
ARMBaseInstrInfo::getSerializableBitmaskMachineOperandTargetFlags() const {}

std::optional<RegImmPair>
ARMBaseInstrInfo::isAddImmediate(const MachineInstr &MI, Register Reg) const {}

bool llvm::registerDefinedBetween(unsigned Reg,
                                  MachineBasicBlock::iterator From,
                                  MachineBasicBlock::iterator To,
                                  const TargetRegisterInfo *TRI) {}

MachineInstr *llvm::findCMPToFoldIntoCBZ(MachineInstr *Br,
                                         const TargetRegisterInfo *TRI) {}

unsigned llvm::ConstantMaterializationCost(unsigned Val,
                                           const ARMSubtarget *Subtarget,
                                           bool ForCodesize) {}

bool llvm::HasLowerConstantMaterializationCost(unsigned Val1, unsigned Val2,
                                               const ARMSubtarget *Subtarget,
                                               bool ForCodesize) {}

/// Constants defining how certain sequences should be outlined.
/// This encompasses how an outlined function should be called, and what kind of
/// frame should be emitted for that outlined function.
///
/// \p MachineOutlinerTailCall implies that the function is being created from
/// a sequence of instructions ending in a return.
///
/// That is,
///
/// I1                                OUTLINED_FUNCTION:
/// I2    --> B OUTLINED_FUNCTION     I1
/// BX LR                             I2
///                                   BX LR
///
/// +-------------------------+--------+-----+
/// |                         | Thumb2 | ARM |
/// +-------------------------+--------+-----+
/// | Call overhead in Bytes  |      4 |   4 |
/// | Frame overhead in Bytes |      0 |   0 |
/// | Stack fixup required    |     No |  No |
/// +-------------------------+--------+-----+
///
/// \p MachineOutlinerThunk implies that the function is being created from
/// a sequence of instructions ending in a call. The outlined function is
/// called with a BL instruction, and the outlined function tail-calls the
/// original call destination.
///
/// That is,
///
/// I1                                OUTLINED_FUNCTION:
/// I2   --> BL OUTLINED_FUNCTION     I1
/// BL f                              I2
///                                   B f
///
/// +-------------------------+--------+-----+
/// |                         | Thumb2 | ARM |
/// +-------------------------+--------+-----+
/// | Call overhead in Bytes  |      4 |   4 |
/// | Frame overhead in Bytes |      0 |   0 |
/// | Stack fixup required    |     No |  No |
/// +-------------------------+--------+-----+
///
/// \p MachineOutlinerNoLRSave implies that the function should be called using
/// a BL instruction, but doesn't require LR to be saved and restored. This
/// happens when LR is known to be dead.
///
/// That is,
///
/// I1                                OUTLINED_FUNCTION:
/// I2 --> BL OUTLINED_FUNCTION       I1
/// I3                                I2
///                                   I3
///                                   BX LR
///
/// +-------------------------+--------+-----+
/// |                         | Thumb2 | ARM |
/// +-------------------------+--------+-----+
/// | Call overhead in Bytes  |      4 |   4 |
/// | Frame overhead in Bytes |      2 |   4 |
/// | Stack fixup required    |     No |  No |
/// +-------------------------+--------+-----+
///
/// \p MachineOutlinerRegSave implies that the function should be called with a
/// save and restore of LR to an available register. This allows us to avoid
/// stack fixups. Note that this outlining variant is compatible with the
/// NoLRSave case.
///
/// That is,
///
/// I1     Save LR                    OUTLINED_FUNCTION:
/// I2 --> BL OUTLINED_FUNCTION       I1
/// I3     Restore LR                 I2
///                                   I3
///                                   BX LR
///
/// +-------------------------+--------+-----+
/// |                         | Thumb2 | ARM |
/// +-------------------------+--------+-----+
/// | Call overhead in Bytes  |      8 |  12 |
/// | Frame overhead in Bytes |      2 |   4 |
/// | Stack fixup required    |     No |  No |
/// +-------------------------+--------+-----+
///
/// \p MachineOutlinerDefault implies that the function should be called with
/// a save and restore of LR to the stack.
///
/// That is,
///
/// I1     Save LR                    OUTLINED_FUNCTION:
/// I2 --> BL OUTLINED_FUNCTION       I1
/// I3     Restore LR                 I2
///                                   I3
///                                   BX LR
///
/// +-------------------------+--------+-----+
/// |                         | Thumb2 | ARM |
/// +-------------------------+--------+-----+
/// | Call overhead in Bytes  |      8 |  12 |
/// | Frame overhead in Bytes |      2 |   4 |
/// | Stack fixup required    |    Yes | Yes |
/// +-------------------------+--------+-----+

enum MachineOutlinerClass {};

enum MachineOutlinerMBBFlags {};

struct OutlinerCosts {};

Register
ARMBaseInstrInfo::findRegisterToSaveLRTo(outliner::Candidate &C) const {}

// Compute liveness of LR at the point after the interval [I, E), which
// denotes a *backward* iteration through instructions. Used only for return
// basic blocks, which do not end with a tail call.
static bool isLRAvailable(const TargetRegisterInfo &TRI,
                          MachineBasicBlock::reverse_iterator I,
                          MachineBasicBlock::reverse_iterator E) {}

std::optional<std::unique_ptr<outliner::OutlinedFunction>>
ARMBaseInstrInfo::getOutliningCandidateInfo(
    const MachineModuleInfo &MMI,
    std::vector<outliner::Candidate> &RepeatedSequenceLocs,
    unsigned MinRepeats) const {}

bool ARMBaseInstrInfo::checkAndUpdateStackOffset(MachineInstr *MI,
                                                 int64_t Fixup,
                                                 bool Updt) const {}

void ARMBaseInstrInfo::mergeOutliningCandidateAttributes(
    Function &F, std::vector<outliner::Candidate> &Candidates) const {}

bool ARMBaseInstrInfo::isFunctionSafeToOutlineFrom(
    MachineFunction &MF, bool OutlineFromLinkOnceODRs) const {}

bool ARMBaseInstrInfo::isMBBSafeToOutlineFrom(MachineBasicBlock &MBB,
                                              unsigned &Flags) const {}

outliner::InstrType
ARMBaseInstrInfo::getOutliningTypeImpl(const MachineModuleInfo &MMI,
                                       MachineBasicBlock::iterator &MIT,
                                       unsigned Flags) const {}

void ARMBaseInstrInfo::fixupPostOutline(MachineBasicBlock &MBB) const {}

void ARMBaseInstrInfo::saveLROnStack(MachineBasicBlock &MBB,
                                     MachineBasicBlock::iterator It, bool CFI,
                                     bool Auth) const {}

void ARMBaseInstrInfo::emitCFIForLRSaveToReg(MachineBasicBlock &MBB,
                                             MachineBasicBlock::iterator It,
                                             Register Reg) const {}

void ARMBaseInstrInfo::restoreLRFromStack(MachineBasicBlock &MBB,
                                          MachineBasicBlock::iterator It,
                                          bool CFI, bool Auth) const {}

void ARMBaseInstrInfo::emitCFIForLRRestoreFromReg(
    MachineBasicBlock &MBB, MachineBasicBlock::iterator It) const {}

void ARMBaseInstrInfo::buildOutlinedFrame(
    MachineBasicBlock &MBB, MachineFunction &MF,
    const outliner::OutlinedFunction &OF) const {}

MachineBasicBlock::iterator ARMBaseInstrInfo::insertOutlinedCall(
    Module &M, MachineBasicBlock &MBB, MachineBasicBlock::iterator &It,
    MachineFunction &MF, outliner::Candidate &C) const {}

bool ARMBaseInstrInfo::shouldOutlineFromFunctionByDefault(
    MachineFunction &MF) const {}

bool ARMBaseInstrInfo::isReallyTriviallyReMaterializable(
    const MachineInstr &MI) const {}

unsigned llvm::getBLXOpcode(const MachineFunction &MF) {}

unsigned llvm::gettBLXrOpcode(const MachineFunction &MF) {}

unsigned llvm::getBLXpredOpcode(const MachineFunction &MF) {}

namespace {
class ARMPipelinerLoopInfo : public TargetInstrInfo::PipelinerLoopInfo {};

void ARMPipelinerLoopInfo::bumpCrossIterationPressure(RegPressureTracker &RPT,
                                                      const IterNeeds &CIN) {}

bool ARMPipelinerLoopInfo::tooMuchRegisterPressure(SwingSchedulerDAG &SSD,
                                                   SMSchedule &SMS) {}

} // namespace

std::unique_ptr<TargetInstrInfo::PipelinerLoopInfo>
ARMBaseInstrInfo::analyzeLoopForPipelining(MachineBasicBlock *LoopBB) const {}