llvm/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp

//===- AArch64InstrInfo.cpp - AArch64 Instruction Information -------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file contains the AArch64 implementation of the TargetInstrInfo class.
//
//===----------------------------------------------------------------------===//

#include "AArch64InstrInfo.h"
#include "AArch64ExpandImm.h"
#include "AArch64FrameLowering.h"
#include "AArch64MachineFunctionInfo.h"
#include "AArch64PointerAuth.h"
#include "AArch64Subtarget.h"
#include "MCTargetDesc/AArch64AddressingModes.h"
#include "MCTargetDesc/AArch64MCTargetDesc.h"
#include "Utils/AArch64BaseInfo.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/CodeGen/LivePhysRegs.h"
#include "llvm/CodeGen/MachineBasicBlock.h"
#include "llvm/CodeGen/MachineCombinerPattern.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineInstr.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/CodeGen/MachineMemOperand.h"
#include "llvm/CodeGen/MachineModuleInfo.h"
#include "llvm/CodeGen/MachineOperand.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/RegisterScavenging.h"
#include "llvm/CodeGen/StackMaps.h"
#include "llvm/CodeGen/TargetRegisterInfo.h"
#include "llvm/CodeGen/TargetSubtargetInfo.h"
#include "llvm/IR/DebugInfoMetadata.h"
#include "llvm/IR/DebugLoc.h"
#include "llvm/IR/GlobalValue.h"
#include "llvm/IR/Module.h"
#include "llvm/MC/MCAsmInfo.h"
#include "llvm/MC/MCInst.h"
#include "llvm/MC/MCInstBuilder.h"
#include "llvm/MC/MCInstrDesc.h"
#include "llvm/Support/Casting.h"
#include "llvm/Support/CodeGen.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/LEB128.h"
#include "llvm/Support/MathExtras.h"
#include "llvm/Target/TargetMachine.h"
#include "llvm/Target/TargetOptions.h"
#include <cassert>
#include <cstdint>
#include <iterator>
#include <utility>

usingnamespacellvm;

#define GET_INSTRINFO_CTOR_DTOR
#include "AArch64GenInstrInfo.inc"

static cl::opt<unsigned> TBZDisplacementBits(
    "aarch64-tbz-offset-bits", cl::Hidden, cl::init(14),
    cl::desc("Restrict range of TB[N]Z instructions (DEBUG)"));

static cl::opt<unsigned> CBZDisplacementBits(
    "aarch64-cbz-offset-bits", cl::Hidden, cl::init(19),
    cl::desc("Restrict range of CB[N]Z instructions (DEBUG)"));

static cl::opt<unsigned>
    BCCDisplacementBits("aarch64-bcc-offset-bits", cl::Hidden, cl::init(19),
                        cl::desc("Restrict range of Bcc instructions (DEBUG)"));

static cl::opt<unsigned>
    BDisplacementBits("aarch64-b-offset-bits", cl::Hidden, cl::init(26),
                      cl::desc("Restrict range of B instructions (DEBUG)"));

AArch64InstrInfo::AArch64InstrInfo(const AArch64Subtarget &STI)
    :{}

/// GetInstSize - Return the number of bytes of code the specified
/// instruction may be.  This returns the maximum number of bytes.
unsigned AArch64InstrInfo::getInstSizeInBytes(const MachineInstr &MI) const {}

unsigned AArch64InstrInfo::getInstBundleLength(const MachineInstr &MI) const {}

static void parseCondBranch(MachineInstr *LastInst, MachineBasicBlock *&Target,
                            SmallVectorImpl<MachineOperand> &Cond) {}

static unsigned getBranchDisplacementBits(unsigned Opc) {}

bool AArch64InstrInfo::isBranchOffsetInRange(unsigned BranchOp,
                                             int64_t BrOffset) const {}

MachineBasicBlock *
AArch64InstrInfo::getBranchDestBlock(const MachineInstr &MI) const {}

void AArch64InstrInfo::insertIndirectBranch(MachineBasicBlock &MBB,
                                            MachineBasicBlock &NewDestBB,
                                            MachineBasicBlock &RestoreBB,
                                            const DebugLoc &DL,
                                            int64_t BrOffset,
                                            RegScavenger *RS) const {}

// Branch analysis.
bool AArch64InstrInfo::analyzeBranch(MachineBasicBlock &MBB,
                                     MachineBasicBlock *&TBB,
                                     MachineBasicBlock *&FBB,
                                     SmallVectorImpl<MachineOperand> &Cond,
                                     bool AllowModify) const {}

bool AArch64InstrInfo::analyzeBranchPredicate(MachineBasicBlock &MBB,
                                              MachineBranchPredicate &MBP,
                                              bool AllowModify) const {}

bool AArch64InstrInfo::reverseBranchCondition(
    SmallVectorImpl<MachineOperand> &Cond) const {}

unsigned AArch64InstrInfo::removeBranch(MachineBasicBlock &MBB,
                                        int *BytesRemoved) const {}

void AArch64InstrInfo::instantiateCondBranch(
    MachineBasicBlock &MBB, const DebugLoc &DL, MachineBasicBlock *TBB,
    ArrayRef<MachineOperand> Cond) const {}

unsigned AArch64InstrInfo::insertBranch(
    MachineBasicBlock &MBB, MachineBasicBlock *TBB, MachineBasicBlock *FBB,
    ArrayRef<MachineOperand> Cond, const DebugLoc &DL, int *BytesAdded) const {}

// Find the original register that VReg is copied from.
static unsigned removeCopies(const MachineRegisterInfo &MRI, unsigned VReg) {}

// Determine if VReg is defined by an instruction that can be folded into a
// csel instruction. If so, return the folded opcode, and the replacement
// register.
static unsigned canFoldIntoCSel(const MachineRegisterInfo &MRI, unsigned VReg,
                                unsigned *NewVReg = nullptr) {}

bool AArch64InstrInfo::canInsertSelect(const MachineBasicBlock &MBB,
                                       ArrayRef<MachineOperand> Cond,
                                       Register DstReg, Register TrueReg,
                                       Register FalseReg, int &CondCycles,
                                       int &TrueCycles,
                                       int &FalseCycles) const {}

void AArch64InstrInfo::insertSelect(MachineBasicBlock &MBB,
                                    MachineBasicBlock::iterator I,
                                    const DebugLoc &DL, Register DstReg,
                                    ArrayRef<MachineOperand> Cond,
                                    Register TrueReg, Register FalseReg) const {}

// Return true if Imm can be loaded into a register by a "cheap" sequence of
// instructions. For now, "cheap" means at most two instructions.
static bool isCheapImmediate(const MachineInstr &MI, unsigned BitSize) {}

// FIXME: this implementation should be micro-architecture dependent, so a
// micro-architecture target hook should be introduced here in future.
bool AArch64InstrInfo::isAsCheapAsAMove(const MachineInstr &MI) const {}

bool AArch64InstrInfo::isFalkorShiftExtFast(const MachineInstr &MI) {}

bool AArch64InstrInfo::isSEHInstruction(const MachineInstr &MI) {}

bool AArch64InstrInfo::isCoalescableExtInstr(const MachineInstr &MI,
                                             Register &SrcReg, Register &DstReg,
                                             unsigned &SubIdx) const {}

bool AArch64InstrInfo::areMemAccessesTriviallyDisjoint(
    const MachineInstr &MIa, const MachineInstr &MIb) const {}

bool AArch64InstrInfo::isSchedulingBoundary(const MachineInstr &MI,
                                            const MachineBasicBlock *MBB,
                                            const MachineFunction &MF) const {}

/// analyzeCompare - For a comparison instruction, return the source registers
/// in SrcReg and SrcReg2, and the value it compares against in CmpValue.
/// Return true if the comparison instruction can be analyzed.
bool AArch64InstrInfo::analyzeCompare(const MachineInstr &MI, Register &SrcReg,
                                      Register &SrcReg2, int64_t &CmpMask,
                                      int64_t &CmpValue) const {}

static bool UpdateOperandRegClass(MachineInstr &Instr) {}

/// Return the opcode that does not set flags when possible - otherwise
/// return the original opcode. The caller is responsible to do the actual
/// substitution and legality checking.
static unsigned convertToNonFlagSettingOpc(const MachineInstr &MI) {}

enum AccessKind {};

/// True when condition flags are accessed (either by writing or reading)
/// on the instruction trace starting at From and ending at To.
///
/// Note: If From and To are from different blocks it's assumed CC are accessed
///       on the path.
static bool areCFlagsAccessedBetweenInstrs(
    MachineBasicBlock::iterator From, MachineBasicBlock::iterator To,
    const TargetRegisterInfo *TRI, const AccessKind AccessToCheck = AK_All) {}

std::optional<unsigned>
AArch64InstrInfo::canRemovePTestInstr(MachineInstr *PTest, MachineInstr *Mask,
                                      MachineInstr *Pred,
                                      const MachineRegisterInfo *MRI) const {}

/// optimizePTestInstr - Attempt to remove a ptest of a predicate-generating
/// operation which could set the flags in an identical manner
bool AArch64InstrInfo::optimizePTestInstr(
    MachineInstr *PTest, unsigned MaskReg, unsigned PredReg,
    const MachineRegisterInfo *MRI) const {}

/// Try to optimize a compare instruction. A compare instruction is an
/// instruction which produces AArch64::NZCV. It can be truly compare
/// instruction
/// when there are no uses of its destination register.
///
/// The following steps are tried in order:
/// 1. Convert CmpInstr into an unconditional version.
/// 2. Remove CmpInstr if above there is an instruction producing a needed
///    condition code or an instruction which can be converted into such an
///    instruction.
///    Only comparison with zero is supported.
bool AArch64InstrInfo::optimizeCompareInstr(
    MachineInstr &CmpInstr, Register SrcReg, Register SrcReg2, int64_t CmpMask,
    int64_t CmpValue, const MachineRegisterInfo *MRI) const {}

/// Get opcode of S version of Instr.
/// If Instr is S version its opcode is returned.
/// AArch64::INSTRUCTION_LIST_END is returned if Instr does not have S version
/// or we are not interested in it.
static unsigned sForm(MachineInstr &Instr) {}

/// Check if AArch64::NZCV should be alive in successors of MBB.
static bool areCFlagsAliveInSuccessors(const MachineBasicBlock *MBB) {}

/// \returns The condition code operand index for \p Instr if it is a branch
/// or select and -1 otherwise.
static int
findCondCodeUseOperandIdxForBranchOrSelect(const MachineInstr &Instr) {}

/// Find a condition code used by the instruction.
/// Returns AArch64CC::Invalid if either the instruction does not use condition
/// codes or we don't optimize CmpInstr in the presence of such instructions.
static AArch64CC::CondCode findCondCodeUsedByInstr(const MachineInstr &Instr) {}

static UsedNZCV getUsedNZCV(AArch64CC::CondCode CC) {}

/// \returns Conditions flags used after \p CmpInstr in its MachineBB if NZCV
/// flags are not alive in successors of the same \p CmpInstr and \p MI parent.
/// \returns std::nullopt otherwise.
///
/// Collect instructions using that flags in \p CCUseInstrs if provided.
std::optional<UsedNZCV>
llvm::examineCFlagsUse(MachineInstr &MI, MachineInstr &CmpInstr,
                       const TargetRegisterInfo &TRI,
                       SmallVectorImpl<MachineInstr *> *CCUseInstrs) {}

static bool isADDSRegImm(unsigned Opcode) {}

static bool isSUBSRegImm(unsigned Opcode) {}

/// Check if CmpInstr can be substituted by MI.
///
/// CmpInstr can be substituted:
/// - CmpInstr is either 'ADDS %vreg, 0' or 'SUBS %vreg, 0'
/// - and, MI and CmpInstr are from the same MachineBB
/// - and, condition flags are not alive in successors of the CmpInstr parent
/// - and, if MI opcode is the S form there must be no defs of flags between
///        MI and CmpInstr
///        or if MI opcode is not the S form there must be neither defs of flags
///        nor uses of flags between MI and CmpInstr.
/// - and, if C/V flags are not used after CmpInstr
///        or if N flag is used but MI produces poison value if signed overflow
///        occurs.
static bool canInstrSubstituteCmpInstr(MachineInstr &MI, MachineInstr &CmpInstr,
                                       const TargetRegisterInfo &TRI) {}

/// Substitute an instruction comparing to zero with another instruction
/// which produces needed condition flags.
///
/// Return true on success.
bool AArch64InstrInfo::substituteCmpToZero(
    MachineInstr &CmpInstr, unsigned SrcReg,
    const MachineRegisterInfo &MRI) const {}

/// \returns True if \p CmpInstr can be removed.
///
/// \p IsInvertCC is true if, after removing \p CmpInstr, condition
/// codes used in \p CCUseInstrs must be inverted.
static bool canCmpInstrBeRemoved(MachineInstr &MI, MachineInstr &CmpInstr,
                                 int CmpValue, const TargetRegisterInfo &TRI,
                                 SmallVectorImpl<MachineInstr *> &CCUseInstrs,
                                 bool &IsInvertCC) {}

/// Remove comparison in csinc-cmp sequence
///
/// Examples:
/// 1. \code
///   csinc w9, wzr, wzr, ne
///   cmp   w9, #0
///   b.eq
///    \endcode
/// to
///    \code
///   csinc w9, wzr, wzr, ne
///   b.ne
///    \endcode
///
/// 2. \code
///   csinc x2, xzr, xzr, mi
///   cmp   x2, #1
///   b.pl
///    \endcode
/// to
///    \code
///   csinc x2, xzr, xzr, mi
///   b.pl
///    \endcode
///
/// \param  CmpInstr comparison instruction
/// \return True when comparison removed
bool AArch64InstrInfo::removeCmpToZeroOrOne(
    MachineInstr &CmpInstr, unsigned SrcReg, int CmpValue,
    const MachineRegisterInfo &MRI) const {}

bool AArch64InstrInfo::expandPostRAPseudo(MachineInstr &MI) const {}

// Return true if this instruction simply sets its single destination register
// to zero. This is equivalent to a register rename of the zero-register.
bool AArch64InstrInfo::isGPRZero(const MachineInstr &MI) {}

// Return true if this instruction simply renames a general register without
// modifying bits.
bool AArch64InstrInfo::isGPRCopy(const MachineInstr &MI) {}

// Return true if this instruction simply renames a general register without
// modifying bits.
bool AArch64InstrInfo::isFPRCopy(const MachineInstr &MI) {}

Register AArch64InstrInfo::isLoadFromStackSlot(const MachineInstr &MI,
                                               int &FrameIndex) const {}

Register AArch64InstrInfo::isStoreToStackSlot(const MachineInstr &MI,
                                              int &FrameIndex) const {}

/// Check all MachineMemOperands for a hint to suppress pairing.
bool AArch64InstrInfo::isLdStPairSuppressed(const MachineInstr &MI) {}

/// Set a flag on the first MachineMemOperand to suppress pairing.
void AArch64InstrInfo::suppressLdStPair(MachineInstr &MI) {}

/// Check all MachineMemOperands for a hint that the load/store is strided.
bool AArch64InstrInfo::isStridedAccess(const MachineInstr &MI) {}

bool AArch64InstrInfo::hasUnscaledLdStOffset(unsigned Opc) {}

std::optional<unsigned> AArch64InstrInfo::getUnscaledLdSt(unsigned Opc) {}

unsigned AArch64InstrInfo::getLoadStoreImmIdx(unsigned Opc) {}

bool AArch64InstrInfo::isPairableLdStInst(const MachineInstr &MI) {}

bool AArch64InstrInfo::isTailCallReturnInst(const MachineInstr &MI) {}

unsigned AArch64InstrInfo::convertToFlagSettingOpc(unsigned Opc) {}

// Is this a candidate for ld/st merging or pairing?  For example, we don't
// touch volatiles or load/stores that have a hint to avoid pair formation.
bool AArch64InstrInfo::isCandidateToMergeOrPair(const MachineInstr &MI) const {}

bool AArch64InstrInfo::getMemOperandsWithOffsetWidth(
    const MachineInstr &LdSt, SmallVectorImpl<const MachineOperand *> &BaseOps,
    int64_t &Offset, bool &OffsetIsScalable, LocationSize &Width,
    const TargetRegisterInfo *TRI) const {}

std::optional<ExtAddrMode>
AArch64InstrInfo::getAddrModeFromMemoryOp(const MachineInstr &MemI,
                                          const TargetRegisterInfo *TRI) const {}

bool AArch64InstrInfo::canFoldIntoAddrMode(const MachineInstr &MemI,
                                           Register Reg,
                                           const MachineInstr &AddrI,
                                           ExtAddrMode &AM) const {}

// Given an opcode for an instruction with a [Reg, #Imm] addressing mode,
// return the opcode of an instruction performing the same operation, but using
// the [Reg, Reg] addressing mode.
static unsigned regOffsetOpcode(unsigned Opcode) {}

// Given an opcode for an instruction with a [Reg, #Imm] addressing mode, return
// the opcode of an instruction performing the same operation, but using the
// [Reg, #Imm] addressing mode with scaled offset.
unsigned scaledOffsetOpcode(unsigned Opcode, unsigned &Scale) {}

// Given an opcode for an instruction with a [Reg, #Imm] addressing mode, return
// the opcode of an instruction performing the same operation, but using the
// [Reg, #Imm] addressing mode with unscaled offset.
unsigned unscaledOffsetOpcode(unsigned Opcode) {}

// Given the opcode of a memory load/store instruction, return the opcode of an
// instruction performing the same operation, but using
// the [Reg, Reg, {s,u}xtw #N] addressing mode with sign-/zero-extend of the
// offset register.
static unsigned offsetExtendOpcode(unsigned Opcode) {}

MachineInstr *AArch64InstrInfo::emitLdStWithAddr(MachineInstr &MemI,
                                                 const ExtAddrMode &AM) const {}

/// Return true if the opcode is a post-index ld/st instruction, which really
/// loads from base+0.
static bool isPostIndexLdStOpcode(unsigned Opcode) {}

bool AArch64InstrInfo::getMemOperandWithOffsetWidth(
    const MachineInstr &LdSt, const MachineOperand *&BaseOp, int64_t &Offset,
    bool &OffsetIsScalable, TypeSize &Width,
    const TargetRegisterInfo *TRI) const {}

MachineOperand &
AArch64InstrInfo::getMemOpBaseRegImmOfsOffsetOperand(MachineInstr &LdSt) const {}

bool AArch64InstrInfo::getMemOpInfo(unsigned Opcode, TypeSize &Scale,
                                    TypeSize &Width, int64_t &MinOffset,
                                    int64_t &MaxOffset) {}

// Scaling factor for unscaled load or store.
int AArch64InstrInfo::getMemScale(unsigned Opc) {}

bool AArch64InstrInfo::isPreLd(const MachineInstr &MI) {}

bool AArch64InstrInfo::isPreSt(const MachineInstr &MI) {}

bool AArch64InstrInfo::isPreLdSt(const MachineInstr &MI) {}

bool AArch64InstrInfo::isPairedLdSt(const MachineInstr &MI) {}

const MachineOperand &AArch64InstrInfo::getLdStBaseOp(const MachineInstr &MI) {}

const MachineOperand &
AArch64InstrInfo::getLdStOffsetOp(const MachineInstr &MI) {}

const MachineOperand &
AArch64InstrInfo::getLdStAmountOp(const MachineInstr &MI) {}

static const TargetRegisterClass *getRegClass(const MachineInstr &MI,
                                              Register Reg) {}

bool AArch64InstrInfo::isHForm(const MachineInstr &MI) {}

bool AArch64InstrInfo::isQForm(const MachineInstr &MI) {}

bool AArch64InstrInfo::hasBTISemantics(const MachineInstr &MI) {}

bool AArch64InstrInfo::isFpOrNEON(Register Reg) {}

bool AArch64InstrInfo::isFpOrNEON(const MachineInstr &MI) {}

// Scale the unscaled offsets.  Returns false if the unscaled offset can't be
// scaled.
static bool scaleOffset(unsigned Opc, int64_t &Offset) {}

static bool canPairLdStOpc(unsigned FirstOpc, unsigned SecondOpc) {}

static bool shouldClusterFI(const MachineFrameInfo &MFI, int FI1,
                            int64_t Offset1, unsigned Opcode1, int FI2,
                            int64_t Offset2, unsigned Opcode2) {}

/// Detect opportunities for ldp/stp formation.
///
/// Only called for LdSt for which getMemOperandWithOffset returns true.
bool AArch64InstrInfo::shouldClusterMemOps(
    ArrayRef<const MachineOperand *> BaseOps1, int64_t OpOffset1,
    bool OffsetIsScalable1, ArrayRef<const MachineOperand *> BaseOps2,
    int64_t OpOffset2, bool OffsetIsScalable2, unsigned ClusterSize,
    unsigned NumBytes) const {}

static const MachineInstrBuilder &AddSubReg(const MachineInstrBuilder &MIB,
                                            MCRegister Reg, unsigned SubIdx,
                                            unsigned State,
                                            const TargetRegisterInfo *TRI) {}

static bool forwardCopyWillClobberTuple(unsigned DestReg, unsigned SrcReg,
                                        unsigned NumRegs) {}

void AArch64InstrInfo::copyPhysRegTuple(MachineBasicBlock &MBB,
                                        MachineBasicBlock::iterator I,
                                        const DebugLoc &DL, MCRegister DestReg,
                                        MCRegister SrcReg, bool KillSrc,
                                        unsigned Opcode,
                                        ArrayRef<unsigned> Indices) const {}

void AArch64InstrInfo::copyGPRRegTuple(MachineBasicBlock &MBB,
                                       MachineBasicBlock::iterator I,
                                       const DebugLoc &DL, MCRegister DestReg,
                                       MCRegister SrcReg, bool KillSrc,
                                       unsigned Opcode, unsigned ZeroReg,
                                       llvm::ArrayRef<unsigned> Indices) const {}

void AArch64InstrInfo::copyPhysReg(MachineBasicBlock &MBB,
                                   MachineBasicBlock::iterator I,
                                   const DebugLoc &DL, MCRegister DestReg,
                                   MCRegister SrcReg, bool KillSrc,
                                   bool RenamableDest,
                                   bool RenamableSrc) const {}

static void storeRegPairToStackSlot(const TargetRegisterInfo &TRI,
                                    MachineBasicBlock &MBB,
                                    MachineBasicBlock::iterator InsertBefore,
                                    const MCInstrDesc &MCID,
                                    Register SrcReg, bool IsKill,
                                    unsigned SubIdx0, unsigned SubIdx1, int FI,
                                    MachineMemOperand *MMO) {}

void AArch64InstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB,
                                           MachineBasicBlock::iterator MBBI,
                                           Register SrcReg, bool isKill, int FI,
                                           const TargetRegisterClass *RC,
                                           const TargetRegisterInfo *TRI,
                                           Register VReg) const {}

static void loadRegPairFromStackSlot(const TargetRegisterInfo &TRI,
                                     MachineBasicBlock &MBB,
                                     MachineBasicBlock::iterator InsertBefore,
                                     const MCInstrDesc &MCID,
                                     Register DestReg, unsigned SubIdx0,
                                     unsigned SubIdx1, int FI,
                                     MachineMemOperand *MMO) {}

void AArch64InstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB,
                                            MachineBasicBlock::iterator MBBI,
                                            Register DestReg, int FI,
                                            const TargetRegisterClass *RC,
                                            const TargetRegisterInfo *TRI,
                                            Register VReg) const {}

bool llvm::isNZCVTouchedInInstructionRange(const MachineInstr &DefMI,
                                           const MachineInstr &UseMI,
                                           const TargetRegisterInfo *TRI) {}

void AArch64InstrInfo::decomposeStackOffsetForDwarfOffsets(
    const StackOffset &Offset, int64_t &ByteSized, int64_t &VGSized) {}

/// Returns the offset in parts to which this frame offset can be
/// decomposed for the purpose of describing a frame offset.
/// For non-scalable offsets this is simply its byte size.
void AArch64InstrInfo::decomposeStackOffsetForFrameOffsets(
    const StackOffset &Offset, int64_t &NumBytes, int64_t &NumPredicateVectors,
    int64_t &NumDataVectors) {}

// Convenience function to create a DWARF expression for
//   Expr + NumBytes + NumVGScaledBytes * AArch64::VG
static void appendVGScaledOffsetExpr(SmallVectorImpl<char> &Expr, int NumBytes,
                                     int NumVGScaledBytes, unsigned VG,
                                     llvm::raw_string_ostream &Comment) {}

// Creates an MCCFIInstruction:
//    { DW_CFA_def_cfa_expression, ULEB128 (sizeof expr), expr }
static MCCFIInstruction createDefCFAExpression(const TargetRegisterInfo &TRI,
                                               unsigned Reg,
                                               const StackOffset &Offset) {}

MCCFIInstruction llvm::createDefCFA(const TargetRegisterInfo &TRI,
                                    unsigned FrameReg, unsigned Reg,
                                    const StackOffset &Offset,
                                    bool LastAdjustmentWasScalable) {}

MCCFIInstruction llvm::createCFAOffset(const TargetRegisterInfo &TRI,
                                       unsigned Reg,
                                       const StackOffset &OffsetFromDefCFA) {}

// Helper function to emit a frame offset adjustment from a given
// pointer (SrcReg), stored into DestReg. This function is explicit
// in that it requires the opcode.
static void emitFrameOffsetAdj(MachineBasicBlock &MBB,
                               MachineBasicBlock::iterator MBBI,
                               const DebugLoc &DL, unsigned DestReg,
                               unsigned SrcReg, int64_t Offset, unsigned Opc,
                               const TargetInstrInfo *TII,
                               MachineInstr::MIFlag Flag, bool NeedsWinCFI,
                               bool *HasWinCFI, bool EmitCFAOffset,
                               StackOffset CFAOffset, unsigned FrameReg) {}

void llvm::emitFrameOffset(MachineBasicBlock &MBB,
                           MachineBasicBlock::iterator MBBI, const DebugLoc &DL,
                           unsigned DestReg, unsigned SrcReg,
                           StackOffset Offset, const TargetInstrInfo *TII,
                           MachineInstr::MIFlag Flag, bool SetNZCV,
                           bool NeedsWinCFI, bool *HasWinCFI,
                           bool EmitCFAOffset, StackOffset CFAOffset,
                           unsigned FrameReg) {}

MachineInstr *AArch64InstrInfo::foldMemoryOperandImpl(
    MachineFunction &MF, MachineInstr &MI, ArrayRef<unsigned> Ops,
    MachineBasicBlock::iterator InsertPt, int FrameIndex,
    LiveIntervals *LIS, VirtRegMap *VRM) const {}

int llvm::isAArch64FrameOffsetLegal(const MachineInstr &MI,
                                    StackOffset &SOffset,
                                    bool *OutUseUnscaledOp,
                                    unsigned *OutUnscaledOp,
                                    int64_t *EmittableOffset) {}

bool llvm::rewriteAArch64FrameIndex(MachineInstr &MI, unsigned FrameRegIdx,
                                    unsigned FrameReg, StackOffset &Offset,
                                    const AArch64InstrInfo *TII) {}

void AArch64InstrInfo::insertNoop(MachineBasicBlock &MBB,
                                  MachineBasicBlock::iterator MI) const {}

MCInst AArch64InstrInfo::getNop() const {}

// AArch64 supports MachineCombiner.
bool AArch64InstrInfo::useMachineCombiner() const {}

// True when Opc sets flag
static bool isCombineInstrSettingFlag(unsigned Opc) {}

// 32b Opcodes that can be combined with a MUL
static bool isCombineInstrCandidate32(unsigned Opc) {}

// 64b Opcodes that can be combined with a MUL
static bool isCombineInstrCandidate64(unsigned Opc) {}

// FP Opcodes that can be combined with a FMUL.
static bool isCombineInstrCandidateFP(const MachineInstr &Inst) {}

// Opcodes that can be combined with a MUL
static bool isCombineInstrCandidate(unsigned Opc) {}

//
// Utility routine that checks if \param MO is defined by an
// \param CombineOpc instruction in the basic block \param MBB
static bool canCombine(MachineBasicBlock &MBB, MachineOperand &MO,
                       unsigned CombineOpc, unsigned ZeroReg = 0,
                       bool CheckZeroReg = false) {}

//
// Is \param MO defined by an integer multiply and can be combined?
static bool canCombineWithMUL(MachineBasicBlock &MBB, MachineOperand &MO,
                              unsigned MulOpc, unsigned ZeroReg) {}

//
// Is \param MO defined by a floating-point multiply and can be combined?
static bool canCombineWithFMUL(MachineBasicBlock &MBB, MachineOperand &MO,
                               unsigned MulOpc) {}

// TODO: There are many more machine instruction opcodes to match:
//       1. Other data types (integer, vectors)
//       2. Other math / logic operations (xor, or)
//       3. Other forms of the same operation (intrinsics and other variants)
bool AArch64InstrInfo::isAssociativeAndCommutative(const MachineInstr &Inst,
                                                   bool Invert) const {}

/// Find instructions that can be turned into madd.
static bool getMaddPatterns(MachineInstr &Root,
                            SmallVectorImpl<unsigned> &Patterns) {}
/// Floating-Point Support

/// Find instructions that can be turned into madd.
static bool getFMAPatterns(MachineInstr &Root,
                           SmallVectorImpl<unsigned> &Patterns) {}

static bool getFMULPatterns(MachineInstr &Root,
                            SmallVectorImpl<unsigned> &Patterns) {}

static bool getFNEGPatterns(MachineInstr &Root,
                            SmallVectorImpl<unsigned> &Patterns) {}

/// Return true when a code sequence can improve throughput. It
/// should be called only for instructions in loops.
/// \param Pattern - combiner pattern
bool AArch64InstrInfo::isThroughputPattern(unsigned Pattern) const {}

/// Find other MI combine patterns.
static bool getMiscPatterns(MachineInstr &Root,
                            SmallVectorImpl<unsigned> &Patterns) {}

CombinerObjective
AArch64InstrInfo::getCombinerObjective(unsigned Pattern) const {}

/// Return true when there is potentially a faster code sequence for an
/// instruction chain ending in \p Root. All potential patterns are listed in
/// the \p Pattern vector. Pattern should be sorted in priority order since the
/// pattern evaluator stops checking as soon as it finds a faster sequence.

bool AArch64InstrInfo::getMachineCombinerPatterns(
    MachineInstr &Root, SmallVectorImpl<unsigned> &Patterns,
    bool DoRegPressureReduce) const {}

enum class FMAInstKind {};
/// genFusedMultiply - Generate fused multiply instructions.
/// This function supports both integer and floating point instructions.
/// A typical example:
///  F|MUL I=A,B,0
///  F|ADD R,I,C
///  ==> F|MADD R,A,B,C
/// \param MF Containing MachineFunction
/// \param MRI Register information
/// \param TII Target information
/// \param Root is the F|ADD instruction
/// \param [out] InsInstrs is a vector of machine instructions and will
/// contain the generated madd instruction
/// \param IdxMulOpd is index of operand in Root that is the result of
/// the F|MUL. In the example above IdxMulOpd is 1.
/// \param MaddOpc the opcode fo the f|madd instruction
/// \param RC Register class of operands
/// \param kind of fma instruction (addressing mode) to be generated
/// \param ReplacedAddend is the result register from the instruction
/// replacing the non-combined operand, if any.
static MachineInstr *
genFusedMultiply(MachineFunction &MF, MachineRegisterInfo &MRI,
                 const TargetInstrInfo *TII, MachineInstr &Root,
                 SmallVectorImpl<MachineInstr *> &InsInstrs, unsigned IdxMulOpd,
                 unsigned MaddOpc, const TargetRegisterClass *RC,
                 FMAInstKind kind = FMAInstKind::Default,
                 const Register *ReplacedAddend = nullptr) {}

static MachineInstr *
genFNegatedMAD(MachineFunction &MF, MachineRegisterInfo &MRI,
               const TargetInstrInfo *TII, MachineInstr &Root,
               SmallVectorImpl<MachineInstr *> &InsInstrs) {}

/// Fold (FMUL x (DUP y lane)) into (FMUL_indexed x y lane)
static MachineInstr *
genIndexedMultiply(MachineInstr &Root,
                   SmallVectorImpl<MachineInstr *> &InsInstrs,
                   unsigned IdxDupOp, unsigned MulOpc,
                   const TargetRegisterClass *RC, MachineRegisterInfo &MRI) {}

/// genFusedMultiplyAcc - Helper to generate fused multiply accumulate
/// instructions.
///
/// \see genFusedMultiply
static MachineInstr *genFusedMultiplyAcc(
    MachineFunction &MF, MachineRegisterInfo &MRI, const TargetInstrInfo *TII,
    MachineInstr &Root, SmallVectorImpl<MachineInstr *> &InsInstrs,
    unsigned IdxMulOpd, unsigned MaddOpc, const TargetRegisterClass *RC) {}

/// genNeg - Helper to generate an intermediate negation of the second operand
/// of Root
static Register genNeg(MachineFunction &MF, MachineRegisterInfo &MRI,
                       const TargetInstrInfo *TII, MachineInstr &Root,
                       SmallVectorImpl<MachineInstr *> &InsInstrs,
                       DenseMap<unsigned, unsigned> &InstrIdxForVirtReg,
                       unsigned MnegOpc, const TargetRegisterClass *RC) {}

/// genFusedMultiplyAccNeg - Helper to generate fused multiply accumulate
/// instructions with an additional negation of the accumulator
static MachineInstr *genFusedMultiplyAccNeg(
    MachineFunction &MF, MachineRegisterInfo &MRI, const TargetInstrInfo *TII,
    MachineInstr &Root, SmallVectorImpl<MachineInstr *> &InsInstrs,
    DenseMap<unsigned, unsigned> &InstrIdxForVirtReg, unsigned IdxMulOpd,
    unsigned MaddOpc, unsigned MnegOpc, const TargetRegisterClass *RC) {}

/// genFusedMultiplyIdx - Helper to generate fused multiply accumulate
/// instructions.
///
/// \see genFusedMultiply
static MachineInstr *genFusedMultiplyIdx(
    MachineFunction &MF, MachineRegisterInfo &MRI, const TargetInstrInfo *TII,
    MachineInstr &Root, SmallVectorImpl<MachineInstr *> &InsInstrs,
    unsigned IdxMulOpd, unsigned MaddOpc, const TargetRegisterClass *RC) {}

/// genFusedMultiplyAccNeg - Helper to generate fused multiply accumulate
/// instructions with an additional negation of the accumulator
static MachineInstr *genFusedMultiplyIdxNeg(
    MachineFunction &MF, MachineRegisterInfo &MRI, const TargetInstrInfo *TII,
    MachineInstr &Root, SmallVectorImpl<MachineInstr *> &InsInstrs,
    DenseMap<unsigned, unsigned> &InstrIdxForVirtReg, unsigned IdxMulOpd,
    unsigned MaddOpc, unsigned MnegOpc, const TargetRegisterClass *RC) {}

/// genMaddR - Generate madd instruction and combine mul and add using
/// an extra virtual register
/// Example - an ADD intermediate needs to be stored in a register:
///   MUL I=A,B,0
///   ADD R,I,Imm
///   ==> ORR  V, ZR, Imm
///   ==> MADD R,A,B,V
/// \param MF Containing MachineFunction
/// \param MRI Register information
/// \param TII Target information
/// \param Root is the ADD instruction
/// \param [out] InsInstrs is a vector of machine instructions and will
/// contain the generated madd instruction
/// \param IdxMulOpd is index of operand in Root that is the result of
/// the MUL. In the example above IdxMulOpd is 1.
/// \param MaddOpc the opcode fo the madd instruction
/// \param VR is a virtual register that holds the value of an ADD operand
/// (V in the example above).
/// \param RC Register class of operands
static MachineInstr *genMaddR(MachineFunction &MF, MachineRegisterInfo &MRI,
                              const TargetInstrInfo *TII, MachineInstr &Root,
                              SmallVectorImpl<MachineInstr *> &InsInstrs,
                              unsigned IdxMulOpd, unsigned MaddOpc, unsigned VR,
                              const TargetRegisterClass *RC) {}

/// Do the following transformation
/// A - (B + C)  ==>   (A - B) - C
/// A - (B + C)  ==>   (A - C) - B
static void
genSubAdd2SubSub(MachineFunction &MF, MachineRegisterInfo &MRI,
                 const TargetInstrInfo *TII, MachineInstr &Root,
                 SmallVectorImpl<MachineInstr *> &InsInstrs,
                 SmallVectorImpl<MachineInstr *> &DelInstrs,
                 unsigned IdxOpd1,
                 DenseMap<unsigned, unsigned> &InstrIdxForVirtReg) {}

/// When getMachineCombinerPatterns() finds potential patterns,
/// this function generates the instructions that could replace the
/// original code sequence
void AArch64InstrInfo::genAlternativeCodeSequence(
    MachineInstr &Root, unsigned Pattern,
    SmallVectorImpl<MachineInstr *> &InsInstrs,
    SmallVectorImpl<MachineInstr *> &DelInstrs,
    DenseMap<unsigned, unsigned> &InstrIdxForVirtReg) const {}

/// Replace csincr-branch sequence by simple conditional branch
///
/// Examples:
/// 1. \code
///   csinc  w9, wzr, wzr, <condition code>
///   tbnz   w9, #0, 0x44
///    \endcode
/// to
///    \code
///   b.<inverted condition code>
///    \endcode
///
/// 2. \code
///   csinc w9, wzr, wzr, <condition code>
///   tbz   w9, #0, 0x44
///    \endcode
/// to
///    \code
///   b.<condition code>
///    \endcode
///
/// Replace compare and branch sequence by TBZ/TBNZ instruction when the
/// compare's constant operand is power of 2.
///
/// Examples:
///    \code
///   and  w8, w8, #0x400
///   cbnz w8, L1
///    \endcode
/// to
///    \code
///   tbnz w8, #10, L1
///    \endcode
///
/// \param  MI Conditional Branch
/// \return True when the simple conditional branch is generated
///
bool AArch64InstrInfo::optimizeCondBranch(MachineInstr &MI) const {}

std::pair<unsigned, unsigned>
AArch64InstrInfo::decomposeMachineOperandsTargetFlags(unsigned TF) const {}

ArrayRef<std::pair<unsigned, const char *>>
AArch64InstrInfo::getSerializableDirectMachineOperandTargetFlags() const {}

ArrayRef<std::pair<unsigned, const char *>>
AArch64InstrInfo::getSerializableBitmaskMachineOperandTargetFlags() const {}

ArrayRef<std::pair<MachineMemOperand::Flags, const char *>>
AArch64InstrInfo::getSerializableMachineMemOperandTargetFlags() const {}

/// Constants defining how certain sequences should be outlined.
/// This encompasses how an outlined function should be called, and what kind of
/// frame should be emitted for that outlined function.
///
/// \p MachineOutlinerDefault implies that the function should be called with
/// a save and restore of LR to the stack.
///
/// That is,
///
/// I1     Save LR                    OUTLINED_FUNCTION:
/// I2 --> BL OUTLINED_FUNCTION       I1
/// I3     Restore LR                 I2
///                                   I3
///                                   RET
///
/// * Call construction overhead: 3 (save + BL + restore)
/// * Frame construction overhead: 1 (ret)
/// * Requires stack fixups? Yes
///
/// \p MachineOutlinerTailCall implies that the function is being created from
/// a sequence of instructions ending in a return.
///
/// That is,
///
/// I1                             OUTLINED_FUNCTION:
/// I2 --> B OUTLINED_FUNCTION     I1
/// RET                            I2
///                                RET
///
/// * Call construction overhead: 1 (B)
/// * Frame construction overhead: 0 (Return included in sequence)
/// * Requires stack fixups? No
///
/// \p MachineOutlinerNoLRSave implies that the function should be called using
/// a BL instruction, but doesn't require LR to be saved and restored. This
/// happens when LR is known to be dead.
///
/// That is,
///
/// I1                                OUTLINED_FUNCTION:
/// I2 --> BL OUTLINED_FUNCTION       I1
/// I3                                I2
///                                   I3
///                                   RET
///
/// * Call construction overhead: 1 (BL)
/// * Frame construction overhead: 1 (RET)
/// * Requires stack fixups? No
///
/// \p MachineOutlinerThunk implies that the function is being created from
/// a sequence of instructions ending in a call. The outlined function is
/// called with a BL instruction, and the outlined function tail-calls the
/// original call destination.
///
/// That is,
///
/// I1                                OUTLINED_FUNCTION:
/// I2 --> BL OUTLINED_FUNCTION       I1
/// BL f                              I2
///                                   B f
/// * Call construction overhead: 1 (BL)
/// * Frame construction overhead: 0
/// * Requires stack fixups? No
///
/// \p MachineOutlinerRegSave implies that the function should be called with a
/// save and restore of LR to an available register. This allows us to avoid
/// stack fixups. Note that this outlining variant is compatible with the
/// NoLRSave case.
///
/// That is,
///
/// I1     Save LR                    OUTLINED_FUNCTION:
/// I2 --> BL OUTLINED_FUNCTION       I1
/// I3     Restore LR                 I2
///                                   I3
///                                   RET
///
/// * Call construction overhead: 3 (save + BL + restore)
/// * Frame construction overhead: 1 (ret)
/// * Requires stack fixups? No
enum MachineOutlinerClass {};

enum MachineOutlinerMBBFlags {};

Register
AArch64InstrInfo::findRegisterToSaveLRTo(outliner::Candidate &C) const {}

static bool
outliningCandidatesSigningScopeConsensus(const outliner::Candidate &a,
                                         const outliner::Candidate &b) {}

static bool
outliningCandidatesSigningKeyConsensus(const outliner::Candidate &a,
                                       const outliner::Candidate &b) {}

static bool outliningCandidatesV8_3OpsConsensus(const outliner::Candidate &a,
                                                const outliner::Candidate &b) {}

std::optional<std::unique_ptr<outliner::OutlinedFunction>>
AArch64InstrInfo::getOutliningCandidateInfo(
    const MachineModuleInfo &MMI,
    std::vector<outliner::Candidate> &RepeatedSequenceLocs,
    unsigned MinRepeats) const {}

void AArch64InstrInfo::mergeOutliningCandidateAttributes(
    Function &F, std::vector<outliner::Candidate> &Candidates) const {}

bool AArch64InstrInfo::isFunctionSafeToOutlineFrom(
    MachineFunction &MF, bool OutlineFromLinkOnceODRs) const {}

SmallVector<std::pair<MachineBasicBlock::iterator, MachineBasicBlock::iterator>>
AArch64InstrInfo::getOutlinableRanges(MachineBasicBlock &MBB,
                                      unsigned &Flags) const {}

outliner::InstrType
AArch64InstrInfo::getOutliningTypeImpl(const MachineModuleInfo &MMI,
                                       MachineBasicBlock::iterator &MIT,
                                       unsigned Flags) const {}

void AArch64InstrInfo::fixupPostOutline(MachineBasicBlock &MBB) const {}

static void signOutlinedFunction(MachineFunction &MF, MachineBasicBlock &MBB,
                                 const AArch64InstrInfo *TII,
                                 bool ShouldSignReturnAddr) {}

void AArch64InstrInfo::buildOutlinedFrame(
    MachineBasicBlock &MBB, MachineFunction &MF,
    const outliner::OutlinedFunction &OF) const {}

MachineBasicBlock::iterator AArch64InstrInfo::insertOutlinedCall(
    Module &M, MachineBasicBlock &MBB, MachineBasicBlock::iterator &It,
    MachineFunction &MF, outliner::Candidate &C) const {}

bool AArch64InstrInfo::shouldOutlineFromFunctionByDefault(
  MachineFunction &MF) const {}

void AArch64InstrInfo::buildClearRegister(Register Reg, MachineBasicBlock &MBB,
                                          MachineBasicBlock::iterator Iter,
                                          DebugLoc &DL,
                                          bool AllowSideEffects) const {}

std::optional<DestSourcePair>
AArch64InstrInfo::isCopyInstrImpl(const MachineInstr &MI) const {}

std::optional<DestSourcePair>
AArch64InstrInfo::isCopyLikeInstrImpl(const MachineInstr &MI) const {}

std::optional<RegImmPair>
AArch64InstrInfo::isAddImmediate(const MachineInstr &MI, Register Reg) const {}

/// If the given ORR instruction is a copy, and \p DescribedReg overlaps with
/// the destination register then, if possible, describe the value in terms of
/// the source register.
static std::optional<ParamLoadedValue>
describeORRLoadedValue(const MachineInstr &MI, Register DescribedReg,
                       const TargetInstrInfo *TII,
                       const TargetRegisterInfo *TRI) {}

bool AArch64InstrInfo::isFunctionSafeToSplit(const MachineFunction &MF) const {}

bool AArch64InstrInfo::isMBBSafeToSplitToCold(
    const MachineBasicBlock &MBB) const {}

std::optional<ParamLoadedValue>
AArch64InstrInfo::describeLoadedValue(const MachineInstr &MI,
                                      Register Reg) const {}

bool AArch64InstrInfo::isExtendLikelyToBeFolded(
    MachineInstr &ExtMI, MachineRegisterInfo &MRI) const {}

uint64_t AArch64InstrInfo::getElementSizeForOpcode(unsigned Opc) const {}

bool AArch64InstrInfo::isPTestLikeOpcode(unsigned Opc) const {}

bool AArch64InstrInfo::isWhileOpcode(unsigned Opc) const {}

unsigned int
AArch64InstrInfo::getTailDuplicateSize(CodeGenOptLevel OptLevel) const {}

bool AArch64InstrInfo::isLegalAddressingMode(unsigned NumBytes, int64_t Offset,
                                             unsigned Scale) const {}

unsigned llvm::getBLRCallOpcode(const MachineFunction &MF) {}

MachineBasicBlock::iterator
AArch64InstrInfo::probedStackAlloc(MachineBasicBlock::iterator MBBI,
                                   Register TargetReg, bool FrameSetup) const {}

namespace {
class AArch64PipelinerLoopInfo : public TargetInstrInfo::PipelinerLoopInfo {};
} // namespace

/// Clone an instruction from MI. The register of ReplaceOprNum-th operand
/// is replaced by ReplaceReg. The output register is newly created.
/// The other operands are unchanged from MI.
static Register cloneInstr(const MachineInstr *MI, unsigned ReplaceOprNum,
                           Register ReplaceReg, MachineBasicBlock &MBB,
                           MachineBasicBlock::iterator InsertTo) {}

void AArch64PipelinerLoopInfo::createRemainingIterationsGreaterCondition(
    int TC, MachineBasicBlock &MBB, SmallVectorImpl<MachineOperand> &Cond,
    DenseMap<MachineInstr *, MachineInstr *> &LastStage0Insts) {}

static void extractPhiReg(const MachineInstr &Phi, const MachineBasicBlock *MBB,
                          Register &RegMBB, Register &RegOther) {}

static bool isDefinedOutside(Register Reg, const MachineBasicBlock *BB) {}

/// If Reg is an induction variable, return true and set some parameters
static bool getIndVarInfo(Register Reg, const MachineBasicBlock *LoopBB,
                          MachineInstr *&UpdateInst,
                          unsigned &UpdateCounterOprNum, Register &InitReg,
                          bool &IsUpdatePriorComp) {}

std::unique_ptr<TargetInstrInfo::PipelinerLoopInfo>
AArch64InstrInfo::analyzeLoopForPipelining(MachineBasicBlock *LoopBB) const {}

/// verifyInstruction - Perform target specific instruction verification.
bool AArch64InstrInfo::verifyInstruction(const MachineInstr &MI,
                                         StringRef &ErrInfo) const {}

#define GET_INSTRINFO_HELPERS
#define GET_INSTRMAP_INFO
#include "AArch64GenInstrInfo.inc"