#include "AArch64InstrInfo.h"
#include "AArch64ExpandImm.h"
#include "AArch64FrameLowering.h"
#include "AArch64MachineFunctionInfo.h"
#include "AArch64PointerAuth.h"
#include "AArch64Subtarget.h"
#include "MCTargetDesc/AArch64AddressingModes.h"
#include "MCTargetDesc/AArch64MCTargetDesc.h"
#include "Utils/AArch64BaseInfo.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/CodeGen/LivePhysRegs.h"
#include "llvm/CodeGen/MachineBasicBlock.h"
#include "llvm/CodeGen/MachineCombinerPattern.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineInstr.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/CodeGen/MachineMemOperand.h"
#include "llvm/CodeGen/MachineModuleInfo.h"
#include "llvm/CodeGen/MachineOperand.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/RegisterScavenging.h"
#include "llvm/CodeGen/StackMaps.h"
#include "llvm/CodeGen/TargetRegisterInfo.h"
#include "llvm/CodeGen/TargetSubtargetInfo.h"
#include "llvm/IR/DebugInfoMetadata.h"
#include "llvm/IR/DebugLoc.h"
#include "llvm/IR/GlobalValue.h"
#include "llvm/IR/Module.h"
#include "llvm/MC/MCAsmInfo.h"
#include "llvm/MC/MCInst.h"
#include "llvm/MC/MCInstBuilder.h"
#include "llvm/MC/MCInstrDesc.h"
#include "llvm/Support/Casting.h"
#include "llvm/Support/CodeGen.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/LEB128.h"
#include "llvm/Support/MathExtras.h"
#include "llvm/Target/TargetMachine.h"
#include "llvm/Target/TargetOptions.h"
#include <cassert>
#include <cstdint>
#include <iterator>
#include <utility>
usingnamespacellvm;
#define GET_INSTRINFO_CTOR_DTOR
#include "AArch64GenInstrInfo.inc"
static cl::opt<unsigned> TBZDisplacementBits(
"aarch64-tbz-offset-bits", cl::Hidden, cl::init(14),
cl::desc("Restrict range of TB[N]Z instructions (DEBUG)"));
static cl::opt<unsigned> CBZDisplacementBits(
"aarch64-cbz-offset-bits", cl::Hidden, cl::init(19),
cl::desc("Restrict range of CB[N]Z instructions (DEBUG)"));
static cl::opt<unsigned>
BCCDisplacementBits("aarch64-bcc-offset-bits", cl::Hidden, cl::init(19),
cl::desc("Restrict range of Bcc instructions (DEBUG)"));
static cl::opt<unsigned>
BDisplacementBits("aarch64-b-offset-bits", cl::Hidden, cl::init(26),
cl::desc("Restrict range of B instructions (DEBUG)"));
AArch64InstrInfo::AArch64InstrInfo(const AArch64Subtarget &STI)
: … { … }
unsigned AArch64InstrInfo::getInstSizeInBytes(const MachineInstr &MI) const { … }
unsigned AArch64InstrInfo::getInstBundleLength(const MachineInstr &MI) const { … }
static void parseCondBranch(MachineInstr *LastInst, MachineBasicBlock *&Target,
SmallVectorImpl<MachineOperand> &Cond) { … }
static unsigned getBranchDisplacementBits(unsigned Opc) { … }
bool AArch64InstrInfo::isBranchOffsetInRange(unsigned BranchOp,
int64_t BrOffset) const { … }
MachineBasicBlock *
AArch64InstrInfo::getBranchDestBlock(const MachineInstr &MI) const { … }
void AArch64InstrInfo::insertIndirectBranch(MachineBasicBlock &MBB,
MachineBasicBlock &NewDestBB,
MachineBasicBlock &RestoreBB,
const DebugLoc &DL,
int64_t BrOffset,
RegScavenger *RS) const { … }
bool AArch64InstrInfo::analyzeBranch(MachineBasicBlock &MBB,
MachineBasicBlock *&TBB,
MachineBasicBlock *&FBB,
SmallVectorImpl<MachineOperand> &Cond,
bool AllowModify) const { … }
bool AArch64InstrInfo::analyzeBranchPredicate(MachineBasicBlock &MBB,
MachineBranchPredicate &MBP,
bool AllowModify) const { … }
bool AArch64InstrInfo::reverseBranchCondition(
SmallVectorImpl<MachineOperand> &Cond) const { … }
unsigned AArch64InstrInfo::removeBranch(MachineBasicBlock &MBB,
int *BytesRemoved) const { … }
void AArch64InstrInfo::instantiateCondBranch(
MachineBasicBlock &MBB, const DebugLoc &DL, MachineBasicBlock *TBB,
ArrayRef<MachineOperand> Cond) const { … }
unsigned AArch64InstrInfo::insertBranch(
MachineBasicBlock &MBB, MachineBasicBlock *TBB, MachineBasicBlock *FBB,
ArrayRef<MachineOperand> Cond, const DebugLoc &DL, int *BytesAdded) const { … }
static unsigned removeCopies(const MachineRegisterInfo &MRI, unsigned VReg) { … }
static unsigned canFoldIntoCSel(const MachineRegisterInfo &MRI, unsigned VReg,
unsigned *NewVReg = nullptr) { … }
bool AArch64InstrInfo::canInsertSelect(const MachineBasicBlock &MBB,
ArrayRef<MachineOperand> Cond,
Register DstReg, Register TrueReg,
Register FalseReg, int &CondCycles,
int &TrueCycles,
int &FalseCycles) const { … }
void AArch64InstrInfo::insertSelect(MachineBasicBlock &MBB,
MachineBasicBlock::iterator I,
const DebugLoc &DL, Register DstReg,
ArrayRef<MachineOperand> Cond,
Register TrueReg, Register FalseReg) const { … }
static bool isCheapImmediate(const MachineInstr &MI, unsigned BitSize) { … }
bool AArch64InstrInfo::isAsCheapAsAMove(const MachineInstr &MI) const { … }
bool AArch64InstrInfo::isFalkorShiftExtFast(const MachineInstr &MI) { … }
bool AArch64InstrInfo::isSEHInstruction(const MachineInstr &MI) { … }
bool AArch64InstrInfo::isCoalescableExtInstr(const MachineInstr &MI,
Register &SrcReg, Register &DstReg,
unsigned &SubIdx) const { … }
bool AArch64InstrInfo::areMemAccessesTriviallyDisjoint(
const MachineInstr &MIa, const MachineInstr &MIb) const { … }
bool AArch64InstrInfo::isSchedulingBoundary(const MachineInstr &MI,
const MachineBasicBlock *MBB,
const MachineFunction &MF) const { … }
bool AArch64InstrInfo::analyzeCompare(const MachineInstr &MI, Register &SrcReg,
Register &SrcReg2, int64_t &CmpMask,
int64_t &CmpValue) const { … }
static bool UpdateOperandRegClass(MachineInstr &Instr) { … }
static unsigned convertToNonFlagSettingOpc(const MachineInstr &MI) { … }
enum AccessKind { … };
static bool areCFlagsAccessedBetweenInstrs(
MachineBasicBlock::iterator From, MachineBasicBlock::iterator To,
const TargetRegisterInfo *TRI, const AccessKind AccessToCheck = AK_All) { … }
std::optional<unsigned>
AArch64InstrInfo::canRemovePTestInstr(MachineInstr *PTest, MachineInstr *Mask,
MachineInstr *Pred,
const MachineRegisterInfo *MRI) const { … }
bool AArch64InstrInfo::optimizePTestInstr(
MachineInstr *PTest, unsigned MaskReg, unsigned PredReg,
const MachineRegisterInfo *MRI) const { … }
bool AArch64InstrInfo::optimizeCompareInstr(
MachineInstr &CmpInstr, Register SrcReg, Register SrcReg2, int64_t CmpMask,
int64_t CmpValue, const MachineRegisterInfo *MRI) const { … }
static unsigned sForm(MachineInstr &Instr) { … }
static bool areCFlagsAliveInSuccessors(const MachineBasicBlock *MBB) { … }
static int
findCondCodeUseOperandIdxForBranchOrSelect(const MachineInstr &Instr) { … }
static AArch64CC::CondCode findCondCodeUsedByInstr(const MachineInstr &Instr) { … }
static UsedNZCV getUsedNZCV(AArch64CC::CondCode CC) { … }
std::optional<UsedNZCV>
llvm::examineCFlagsUse(MachineInstr &MI, MachineInstr &CmpInstr,
const TargetRegisterInfo &TRI,
SmallVectorImpl<MachineInstr *> *CCUseInstrs) { … }
static bool isADDSRegImm(unsigned Opcode) { … }
static bool isSUBSRegImm(unsigned Opcode) { … }
static bool canInstrSubstituteCmpInstr(MachineInstr &MI, MachineInstr &CmpInstr,
const TargetRegisterInfo &TRI) { … }
bool AArch64InstrInfo::substituteCmpToZero(
MachineInstr &CmpInstr, unsigned SrcReg,
const MachineRegisterInfo &MRI) const { … }
static bool canCmpInstrBeRemoved(MachineInstr &MI, MachineInstr &CmpInstr,
int CmpValue, const TargetRegisterInfo &TRI,
SmallVectorImpl<MachineInstr *> &CCUseInstrs,
bool &IsInvertCC) { … }
bool AArch64InstrInfo::removeCmpToZeroOrOne(
MachineInstr &CmpInstr, unsigned SrcReg, int CmpValue,
const MachineRegisterInfo &MRI) const { … }
bool AArch64InstrInfo::expandPostRAPseudo(MachineInstr &MI) const { … }
bool AArch64InstrInfo::isGPRZero(const MachineInstr &MI) { … }
bool AArch64InstrInfo::isGPRCopy(const MachineInstr &MI) { … }
bool AArch64InstrInfo::isFPRCopy(const MachineInstr &MI) { … }
Register AArch64InstrInfo::isLoadFromStackSlot(const MachineInstr &MI,
int &FrameIndex) const { … }
Register AArch64InstrInfo::isStoreToStackSlot(const MachineInstr &MI,
int &FrameIndex) const { … }
bool AArch64InstrInfo::isLdStPairSuppressed(const MachineInstr &MI) { … }
void AArch64InstrInfo::suppressLdStPair(MachineInstr &MI) { … }
bool AArch64InstrInfo::isStridedAccess(const MachineInstr &MI) { … }
bool AArch64InstrInfo::hasUnscaledLdStOffset(unsigned Opc) { … }
std::optional<unsigned> AArch64InstrInfo::getUnscaledLdSt(unsigned Opc) { … }
unsigned AArch64InstrInfo::getLoadStoreImmIdx(unsigned Opc) { … }
bool AArch64InstrInfo::isPairableLdStInst(const MachineInstr &MI) { … }
bool AArch64InstrInfo::isTailCallReturnInst(const MachineInstr &MI) { … }
unsigned AArch64InstrInfo::convertToFlagSettingOpc(unsigned Opc) { … }
bool AArch64InstrInfo::isCandidateToMergeOrPair(const MachineInstr &MI) const { … }
bool AArch64InstrInfo::getMemOperandsWithOffsetWidth(
const MachineInstr &LdSt, SmallVectorImpl<const MachineOperand *> &BaseOps,
int64_t &Offset, bool &OffsetIsScalable, LocationSize &Width,
const TargetRegisterInfo *TRI) const { … }
std::optional<ExtAddrMode>
AArch64InstrInfo::getAddrModeFromMemoryOp(const MachineInstr &MemI,
const TargetRegisterInfo *TRI) const { … }
bool AArch64InstrInfo::canFoldIntoAddrMode(const MachineInstr &MemI,
Register Reg,
const MachineInstr &AddrI,
ExtAddrMode &AM) const { … }
static unsigned regOffsetOpcode(unsigned Opcode) { … }
unsigned scaledOffsetOpcode(unsigned Opcode, unsigned &Scale) { … }
unsigned unscaledOffsetOpcode(unsigned Opcode) { … }
static unsigned offsetExtendOpcode(unsigned Opcode) { … }
MachineInstr *AArch64InstrInfo::emitLdStWithAddr(MachineInstr &MemI,
const ExtAddrMode &AM) const { … }
static bool isPostIndexLdStOpcode(unsigned Opcode) { … }
bool AArch64InstrInfo::getMemOperandWithOffsetWidth(
const MachineInstr &LdSt, const MachineOperand *&BaseOp, int64_t &Offset,
bool &OffsetIsScalable, TypeSize &Width,
const TargetRegisterInfo *TRI) const { … }
MachineOperand &
AArch64InstrInfo::getMemOpBaseRegImmOfsOffsetOperand(MachineInstr &LdSt) const { … }
bool AArch64InstrInfo::getMemOpInfo(unsigned Opcode, TypeSize &Scale,
TypeSize &Width, int64_t &MinOffset,
int64_t &MaxOffset) { … }
int AArch64InstrInfo::getMemScale(unsigned Opc) { … }
bool AArch64InstrInfo::isPreLd(const MachineInstr &MI) { … }
bool AArch64InstrInfo::isPreSt(const MachineInstr &MI) { … }
bool AArch64InstrInfo::isPreLdSt(const MachineInstr &MI) { … }
bool AArch64InstrInfo::isPairedLdSt(const MachineInstr &MI) { … }
const MachineOperand &AArch64InstrInfo::getLdStBaseOp(const MachineInstr &MI) { … }
const MachineOperand &
AArch64InstrInfo::getLdStOffsetOp(const MachineInstr &MI) { … }
const MachineOperand &
AArch64InstrInfo::getLdStAmountOp(const MachineInstr &MI) { … }
static const TargetRegisterClass *getRegClass(const MachineInstr &MI,
Register Reg) { … }
bool AArch64InstrInfo::isHForm(const MachineInstr &MI) { … }
bool AArch64InstrInfo::isQForm(const MachineInstr &MI) { … }
bool AArch64InstrInfo::hasBTISemantics(const MachineInstr &MI) { … }
bool AArch64InstrInfo::isFpOrNEON(Register Reg) { … }
bool AArch64InstrInfo::isFpOrNEON(const MachineInstr &MI) { … }
static bool scaleOffset(unsigned Opc, int64_t &Offset) { … }
static bool canPairLdStOpc(unsigned FirstOpc, unsigned SecondOpc) { … }
static bool shouldClusterFI(const MachineFrameInfo &MFI, int FI1,
int64_t Offset1, unsigned Opcode1, int FI2,
int64_t Offset2, unsigned Opcode2) { … }
bool AArch64InstrInfo::shouldClusterMemOps(
ArrayRef<const MachineOperand *> BaseOps1, int64_t OpOffset1,
bool OffsetIsScalable1, ArrayRef<const MachineOperand *> BaseOps2,
int64_t OpOffset2, bool OffsetIsScalable2, unsigned ClusterSize,
unsigned NumBytes) const { … }
static const MachineInstrBuilder &AddSubReg(const MachineInstrBuilder &MIB,
MCRegister Reg, unsigned SubIdx,
unsigned State,
const TargetRegisterInfo *TRI) { … }
static bool forwardCopyWillClobberTuple(unsigned DestReg, unsigned SrcReg,
unsigned NumRegs) { … }
void AArch64InstrInfo::copyPhysRegTuple(MachineBasicBlock &MBB,
MachineBasicBlock::iterator I,
const DebugLoc &DL, MCRegister DestReg,
MCRegister SrcReg, bool KillSrc,
unsigned Opcode,
ArrayRef<unsigned> Indices) const { … }
void AArch64InstrInfo::copyGPRRegTuple(MachineBasicBlock &MBB,
MachineBasicBlock::iterator I,
const DebugLoc &DL, MCRegister DestReg,
MCRegister SrcReg, bool KillSrc,
unsigned Opcode, unsigned ZeroReg,
llvm::ArrayRef<unsigned> Indices) const { … }
void AArch64InstrInfo::copyPhysReg(MachineBasicBlock &MBB,
MachineBasicBlock::iterator I,
const DebugLoc &DL, MCRegister DestReg,
MCRegister SrcReg, bool KillSrc,
bool RenamableDest,
bool RenamableSrc) const { … }
static void storeRegPairToStackSlot(const TargetRegisterInfo &TRI,
MachineBasicBlock &MBB,
MachineBasicBlock::iterator InsertBefore,
const MCInstrDesc &MCID,
Register SrcReg, bool IsKill,
unsigned SubIdx0, unsigned SubIdx1, int FI,
MachineMemOperand *MMO) { … }
void AArch64InstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MBBI,
Register SrcReg, bool isKill, int FI,
const TargetRegisterClass *RC,
const TargetRegisterInfo *TRI,
Register VReg) const { … }
static void loadRegPairFromStackSlot(const TargetRegisterInfo &TRI,
MachineBasicBlock &MBB,
MachineBasicBlock::iterator InsertBefore,
const MCInstrDesc &MCID,
Register DestReg, unsigned SubIdx0,
unsigned SubIdx1, int FI,
MachineMemOperand *MMO) { … }
void AArch64InstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MBBI,
Register DestReg, int FI,
const TargetRegisterClass *RC,
const TargetRegisterInfo *TRI,
Register VReg) const { … }
bool llvm::isNZCVTouchedInInstructionRange(const MachineInstr &DefMI,
const MachineInstr &UseMI,
const TargetRegisterInfo *TRI) { … }
void AArch64InstrInfo::decomposeStackOffsetForDwarfOffsets(
const StackOffset &Offset, int64_t &ByteSized, int64_t &VGSized) { … }
void AArch64InstrInfo::decomposeStackOffsetForFrameOffsets(
const StackOffset &Offset, int64_t &NumBytes, int64_t &NumPredicateVectors,
int64_t &NumDataVectors) { … }
static void appendVGScaledOffsetExpr(SmallVectorImpl<char> &Expr, int NumBytes,
int NumVGScaledBytes, unsigned VG,
llvm::raw_string_ostream &Comment) { … }
static MCCFIInstruction createDefCFAExpression(const TargetRegisterInfo &TRI,
unsigned Reg,
const StackOffset &Offset) { … }
MCCFIInstruction llvm::createDefCFA(const TargetRegisterInfo &TRI,
unsigned FrameReg, unsigned Reg,
const StackOffset &Offset,
bool LastAdjustmentWasScalable) { … }
MCCFIInstruction llvm::createCFAOffset(const TargetRegisterInfo &TRI,
unsigned Reg,
const StackOffset &OffsetFromDefCFA) { … }
static void emitFrameOffsetAdj(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MBBI,
const DebugLoc &DL, unsigned DestReg,
unsigned SrcReg, int64_t Offset, unsigned Opc,
const TargetInstrInfo *TII,
MachineInstr::MIFlag Flag, bool NeedsWinCFI,
bool *HasWinCFI, bool EmitCFAOffset,
StackOffset CFAOffset, unsigned FrameReg) { … }
void llvm::emitFrameOffset(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MBBI, const DebugLoc &DL,
unsigned DestReg, unsigned SrcReg,
StackOffset Offset, const TargetInstrInfo *TII,
MachineInstr::MIFlag Flag, bool SetNZCV,
bool NeedsWinCFI, bool *HasWinCFI,
bool EmitCFAOffset, StackOffset CFAOffset,
unsigned FrameReg) { … }
MachineInstr *AArch64InstrInfo::foldMemoryOperandImpl(
MachineFunction &MF, MachineInstr &MI, ArrayRef<unsigned> Ops,
MachineBasicBlock::iterator InsertPt, int FrameIndex,
LiveIntervals *LIS, VirtRegMap *VRM) const { … }
int llvm::isAArch64FrameOffsetLegal(const MachineInstr &MI,
StackOffset &SOffset,
bool *OutUseUnscaledOp,
unsigned *OutUnscaledOp,
int64_t *EmittableOffset) { … }
bool llvm::rewriteAArch64FrameIndex(MachineInstr &MI, unsigned FrameRegIdx,
unsigned FrameReg, StackOffset &Offset,
const AArch64InstrInfo *TII) { … }
void AArch64InstrInfo::insertNoop(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MI) const { … }
MCInst AArch64InstrInfo::getNop() const { … }
bool AArch64InstrInfo::useMachineCombiner() const { … }
static bool isCombineInstrSettingFlag(unsigned Opc) { … }
static bool isCombineInstrCandidate32(unsigned Opc) { … }
static bool isCombineInstrCandidate64(unsigned Opc) { … }
static bool isCombineInstrCandidateFP(const MachineInstr &Inst) { … }
static bool isCombineInstrCandidate(unsigned Opc) { … }
static bool canCombine(MachineBasicBlock &MBB, MachineOperand &MO,
unsigned CombineOpc, unsigned ZeroReg = 0,
bool CheckZeroReg = false) { … }
static bool canCombineWithMUL(MachineBasicBlock &MBB, MachineOperand &MO,
unsigned MulOpc, unsigned ZeroReg) { … }
static bool canCombineWithFMUL(MachineBasicBlock &MBB, MachineOperand &MO,
unsigned MulOpc) { … }
bool AArch64InstrInfo::isAssociativeAndCommutative(const MachineInstr &Inst,
bool Invert) const { … }
static bool getMaddPatterns(MachineInstr &Root,
SmallVectorImpl<unsigned> &Patterns) { … }
static bool getFMAPatterns(MachineInstr &Root,
SmallVectorImpl<unsigned> &Patterns) { … }
static bool getFMULPatterns(MachineInstr &Root,
SmallVectorImpl<unsigned> &Patterns) { … }
static bool getFNEGPatterns(MachineInstr &Root,
SmallVectorImpl<unsigned> &Patterns) { … }
bool AArch64InstrInfo::isThroughputPattern(unsigned Pattern) const { … }
static bool getMiscPatterns(MachineInstr &Root,
SmallVectorImpl<unsigned> &Patterns) { … }
CombinerObjective
AArch64InstrInfo::getCombinerObjective(unsigned Pattern) const { … }
bool AArch64InstrInfo::getMachineCombinerPatterns(
MachineInstr &Root, SmallVectorImpl<unsigned> &Patterns,
bool DoRegPressureReduce) const { … }
enum class FMAInstKind { … };
static MachineInstr *
genFusedMultiply(MachineFunction &MF, MachineRegisterInfo &MRI,
const TargetInstrInfo *TII, MachineInstr &Root,
SmallVectorImpl<MachineInstr *> &InsInstrs, unsigned IdxMulOpd,
unsigned MaddOpc, const TargetRegisterClass *RC,
FMAInstKind kind = FMAInstKind::Default,
const Register *ReplacedAddend = nullptr) { … }
static MachineInstr *
genFNegatedMAD(MachineFunction &MF, MachineRegisterInfo &MRI,
const TargetInstrInfo *TII, MachineInstr &Root,
SmallVectorImpl<MachineInstr *> &InsInstrs) { … }
static MachineInstr *
genIndexedMultiply(MachineInstr &Root,
SmallVectorImpl<MachineInstr *> &InsInstrs,
unsigned IdxDupOp, unsigned MulOpc,
const TargetRegisterClass *RC, MachineRegisterInfo &MRI) { … }
static MachineInstr *genFusedMultiplyAcc(
MachineFunction &MF, MachineRegisterInfo &MRI, const TargetInstrInfo *TII,
MachineInstr &Root, SmallVectorImpl<MachineInstr *> &InsInstrs,
unsigned IdxMulOpd, unsigned MaddOpc, const TargetRegisterClass *RC) { … }
static Register genNeg(MachineFunction &MF, MachineRegisterInfo &MRI,
const TargetInstrInfo *TII, MachineInstr &Root,
SmallVectorImpl<MachineInstr *> &InsInstrs,
DenseMap<unsigned, unsigned> &InstrIdxForVirtReg,
unsigned MnegOpc, const TargetRegisterClass *RC) { … }
static MachineInstr *genFusedMultiplyAccNeg(
MachineFunction &MF, MachineRegisterInfo &MRI, const TargetInstrInfo *TII,
MachineInstr &Root, SmallVectorImpl<MachineInstr *> &InsInstrs,
DenseMap<unsigned, unsigned> &InstrIdxForVirtReg, unsigned IdxMulOpd,
unsigned MaddOpc, unsigned MnegOpc, const TargetRegisterClass *RC) { … }
static MachineInstr *genFusedMultiplyIdx(
MachineFunction &MF, MachineRegisterInfo &MRI, const TargetInstrInfo *TII,
MachineInstr &Root, SmallVectorImpl<MachineInstr *> &InsInstrs,
unsigned IdxMulOpd, unsigned MaddOpc, const TargetRegisterClass *RC) { … }
static MachineInstr *genFusedMultiplyIdxNeg(
MachineFunction &MF, MachineRegisterInfo &MRI, const TargetInstrInfo *TII,
MachineInstr &Root, SmallVectorImpl<MachineInstr *> &InsInstrs,
DenseMap<unsigned, unsigned> &InstrIdxForVirtReg, unsigned IdxMulOpd,
unsigned MaddOpc, unsigned MnegOpc, const TargetRegisterClass *RC) { … }
static MachineInstr *genMaddR(MachineFunction &MF, MachineRegisterInfo &MRI,
const TargetInstrInfo *TII, MachineInstr &Root,
SmallVectorImpl<MachineInstr *> &InsInstrs,
unsigned IdxMulOpd, unsigned MaddOpc, unsigned VR,
const TargetRegisterClass *RC) { … }
static void
genSubAdd2SubSub(MachineFunction &MF, MachineRegisterInfo &MRI,
const TargetInstrInfo *TII, MachineInstr &Root,
SmallVectorImpl<MachineInstr *> &InsInstrs,
SmallVectorImpl<MachineInstr *> &DelInstrs,
unsigned IdxOpd1,
DenseMap<unsigned, unsigned> &InstrIdxForVirtReg) { … }
void AArch64InstrInfo::genAlternativeCodeSequence(
MachineInstr &Root, unsigned Pattern,
SmallVectorImpl<MachineInstr *> &InsInstrs,
SmallVectorImpl<MachineInstr *> &DelInstrs,
DenseMap<unsigned, unsigned> &InstrIdxForVirtReg) const { … }
bool AArch64InstrInfo::optimizeCondBranch(MachineInstr &MI) const { … }
std::pair<unsigned, unsigned>
AArch64InstrInfo::decomposeMachineOperandsTargetFlags(unsigned TF) const { … }
ArrayRef<std::pair<unsigned, const char *>>
AArch64InstrInfo::getSerializableDirectMachineOperandTargetFlags() const { … }
ArrayRef<std::pair<unsigned, const char *>>
AArch64InstrInfo::getSerializableBitmaskMachineOperandTargetFlags() const { … }
ArrayRef<std::pair<MachineMemOperand::Flags, const char *>>
AArch64InstrInfo::getSerializableMachineMemOperandTargetFlags() const { … }
enum MachineOutlinerClass { … };
enum MachineOutlinerMBBFlags { … };
Register
AArch64InstrInfo::findRegisterToSaveLRTo(outliner::Candidate &C) const { … }
static bool
outliningCandidatesSigningScopeConsensus(const outliner::Candidate &a,
const outliner::Candidate &b) { … }
static bool
outliningCandidatesSigningKeyConsensus(const outliner::Candidate &a,
const outliner::Candidate &b) { … }
static bool outliningCandidatesV8_3OpsConsensus(const outliner::Candidate &a,
const outliner::Candidate &b) { … }
std::optional<std::unique_ptr<outliner::OutlinedFunction>>
AArch64InstrInfo::getOutliningCandidateInfo(
const MachineModuleInfo &MMI,
std::vector<outliner::Candidate> &RepeatedSequenceLocs,
unsigned MinRepeats) const { … }
void AArch64InstrInfo::mergeOutliningCandidateAttributes(
Function &F, std::vector<outliner::Candidate> &Candidates) const { … }
bool AArch64InstrInfo::isFunctionSafeToOutlineFrom(
MachineFunction &MF, bool OutlineFromLinkOnceODRs) const { … }
SmallVector<std::pair<MachineBasicBlock::iterator, MachineBasicBlock::iterator>>
AArch64InstrInfo::getOutlinableRanges(MachineBasicBlock &MBB,
unsigned &Flags) const { … }
outliner::InstrType
AArch64InstrInfo::getOutliningTypeImpl(const MachineModuleInfo &MMI,
MachineBasicBlock::iterator &MIT,
unsigned Flags) const { … }
void AArch64InstrInfo::fixupPostOutline(MachineBasicBlock &MBB) const { … }
static void signOutlinedFunction(MachineFunction &MF, MachineBasicBlock &MBB,
const AArch64InstrInfo *TII,
bool ShouldSignReturnAddr) { … }
void AArch64InstrInfo::buildOutlinedFrame(
MachineBasicBlock &MBB, MachineFunction &MF,
const outliner::OutlinedFunction &OF) const { … }
MachineBasicBlock::iterator AArch64InstrInfo::insertOutlinedCall(
Module &M, MachineBasicBlock &MBB, MachineBasicBlock::iterator &It,
MachineFunction &MF, outliner::Candidate &C) const { … }
bool AArch64InstrInfo::shouldOutlineFromFunctionByDefault(
MachineFunction &MF) const { … }
void AArch64InstrInfo::buildClearRegister(Register Reg, MachineBasicBlock &MBB,
MachineBasicBlock::iterator Iter,
DebugLoc &DL,
bool AllowSideEffects) const { … }
std::optional<DestSourcePair>
AArch64InstrInfo::isCopyInstrImpl(const MachineInstr &MI) const { … }
std::optional<DestSourcePair>
AArch64InstrInfo::isCopyLikeInstrImpl(const MachineInstr &MI) const { … }
std::optional<RegImmPair>
AArch64InstrInfo::isAddImmediate(const MachineInstr &MI, Register Reg) const { … }
static std::optional<ParamLoadedValue>
describeORRLoadedValue(const MachineInstr &MI, Register DescribedReg,
const TargetInstrInfo *TII,
const TargetRegisterInfo *TRI) { … }
bool AArch64InstrInfo::isFunctionSafeToSplit(const MachineFunction &MF) const { … }
bool AArch64InstrInfo::isMBBSafeToSplitToCold(
const MachineBasicBlock &MBB) const { … }
std::optional<ParamLoadedValue>
AArch64InstrInfo::describeLoadedValue(const MachineInstr &MI,
Register Reg) const { … }
bool AArch64InstrInfo::isExtendLikelyToBeFolded(
MachineInstr &ExtMI, MachineRegisterInfo &MRI) const { … }
uint64_t AArch64InstrInfo::getElementSizeForOpcode(unsigned Opc) const { … }
bool AArch64InstrInfo::isPTestLikeOpcode(unsigned Opc) const { … }
bool AArch64InstrInfo::isWhileOpcode(unsigned Opc) const { … }
unsigned int
AArch64InstrInfo::getTailDuplicateSize(CodeGenOptLevel OptLevel) const { … }
bool AArch64InstrInfo::isLegalAddressingMode(unsigned NumBytes, int64_t Offset,
unsigned Scale) const { … }
unsigned llvm::getBLRCallOpcode(const MachineFunction &MF) { … }
MachineBasicBlock::iterator
AArch64InstrInfo::probedStackAlloc(MachineBasicBlock::iterator MBBI,
Register TargetReg, bool FrameSetup) const { … }
namespace {
class AArch64PipelinerLoopInfo : public TargetInstrInfo::PipelinerLoopInfo { … };
}
static Register cloneInstr(const MachineInstr *MI, unsigned ReplaceOprNum,
Register ReplaceReg, MachineBasicBlock &MBB,
MachineBasicBlock::iterator InsertTo) { … }
void AArch64PipelinerLoopInfo::createRemainingIterationsGreaterCondition(
int TC, MachineBasicBlock &MBB, SmallVectorImpl<MachineOperand> &Cond,
DenseMap<MachineInstr *, MachineInstr *> &LastStage0Insts) { … }
static void extractPhiReg(const MachineInstr &Phi, const MachineBasicBlock *MBB,
Register &RegMBB, Register &RegOther) { … }
static bool isDefinedOutside(Register Reg, const MachineBasicBlock *BB) { … }
static bool getIndVarInfo(Register Reg, const MachineBasicBlock *LoopBB,
MachineInstr *&UpdateInst,
unsigned &UpdateCounterOprNum, Register &InitReg,
bool &IsUpdatePriorComp) { … }
std::unique_ptr<TargetInstrInfo::PipelinerLoopInfo>
AArch64InstrInfo::analyzeLoopForPipelining(MachineBasicBlock *LoopBB) const { … }
bool AArch64InstrInfo::verifyInstruction(const MachineInstr &MI,
StringRef &ErrInfo) const { … }
#define GET_INSTRINFO_HELPERS
#define GET_INSTRMAP_INFO
#include "AArch64GenInstrInfo.inc"