#include "RISCVInstrInfo.h"
#include "MCTargetDesc/RISCVMatInt.h"
#include "RISCV.h"
#include "RISCVMachineFunctionInfo.h"
#include "RISCVSubtarget.h"
#include "RISCVTargetMachine.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/Analysis/MemoryLocation.h"
#include "llvm/Analysis/ValueTracking.h"
#include "llvm/CodeGen/LiveIntervals.h"
#include "llvm/CodeGen/LiveVariables.h"
#include "llvm/CodeGen/MachineCombinerPattern.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/MachineTraceMetrics.h"
#include "llvm/CodeGen/RegisterScavenging.h"
#include "llvm/CodeGen/StackMaps.h"
#include "llvm/IR/DebugInfoMetadata.h"
#include "llvm/IR/Module.h"
#include "llvm/MC/MCInstBuilder.h"
#include "llvm/MC/TargetRegistry.h"
#include "llvm/Support/ErrorHandling.h"
usingnamespacellvm;
#define GEN_CHECK_COMPRESS_INSTR
#include "RISCVGenCompressInstEmitter.inc"
#define GET_INSTRINFO_CTOR_DTOR
#define GET_INSTRINFO_NAMED_OPS
#include "RISCVGenInstrInfo.inc"
static cl::opt<bool> PreferWholeRegisterMove(
"riscv-prefer-whole-register-move", cl::init(false), cl::Hidden,
cl::desc("Prefer whole register move for vector registers."));
static cl::opt<MachineTraceStrategy> ForceMachineCombinerStrategy(
"riscv-force-machine-combiner-strategy", cl::Hidden,
cl::desc("Force machine combiner to use a specific strategy for machine "
"trace metrics evaluation."),
cl::init(MachineTraceStrategy::TS_NumStrategies),
cl::values(clEnumValN(MachineTraceStrategy::TS_Local, "local",
"Local strategy."),
clEnumValN(MachineTraceStrategy::TS_MinInstrCount, "min-instr",
"MinInstrCount strategy.")));
namespace llvm::RISCVVPseudosTable {
usingnamespaceRISCV;
#define GET_RISCVVPseudosTable_IMPL
#include "RISCVGenSearchableTables.inc"
}
namespace llvm::RISCV {
#define GET_RISCVMaskedPseudosTable_IMPL
#include "RISCVGenSearchableTables.inc"
}
RISCVInstrInfo::RISCVInstrInfo(RISCVSubtarget &STI)
: … { … }
MCInst RISCVInstrInfo::getNop() const { … }
Register RISCVInstrInfo::isLoadFromStackSlot(const MachineInstr &MI,
int &FrameIndex) const { … }
Register RISCVInstrInfo::isLoadFromStackSlot(const MachineInstr &MI,
int &FrameIndex,
unsigned &MemBytes) const { … }
Register RISCVInstrInfo::isStoreToStackSlot(const MachineInstr &MI,
int &FrameIndex) const { … }
Register RISCVInstrInfo::isStoreToStackSlot(const MachineInstr &MI,
int &FrameIndex,
unsigned &MemBytes) const { … }
bool RISCVInstrInfo::isReallyTriviallyReMaterializable(
const MachineInstr &MI) const { … }
static bool forwardCopyWillClobberTuple(unsigned DstReg, unsigned SrcReg,
unsigned NumRegs) { … }
static bool isConvertibleToVMV_V_V(const RISCVSubtarget &STI,
const MachineBasicBlock &MBB,
MachineBasicBlock::const_iterator MBBI,
MachineBasicBlock::const_iterator &DefMBBI,
RISCVII::VLMUL LMul) { … }
void RISCVInstrInfo::copyPhysRegVector(
MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI,
const DebugLoc &DL, MCRegister DstReg, MCRegister SrcReg, bool KillSrc,
const TargetRegisterClass *RegClass) const { … }
void RISCVInstrInfo::copyPhysReg(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MBBI,
const DebugLoc &DL, MCRegister DstReg,
MCRegister SrcReg, bool KillSrc,
bool RenamableDest, bool RenamableSrc) const { … }
void RISCVInstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB,
MachineBasicBlock::iterator I,
Register SrcReg, bool IsKill, int FI,
const TargetRegisterClass *RC,
const TargetRegisterInfo *TRI,
Register VReg) const { … }
void RISCVInstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB,
MachineBasicBlock::iterator I,
Register DstReg, int FI,
const TargetRegisterClass *RC,
const TargetRegisterInfo *TRI,
Register VReg) const { … }
MachineInstr *RISCVInstrInfo::foldMemoryOperandImpl(
MachineFunction &MF, MachineInstr &MI, ArrayRef<unsigned> Ops,
MachineBasicBlock::iterator InsertPt, int FrameIndex, LiveIntervals *LIS,
VirtRegMap *VRM) const { … }
void RISCVInstrInfo::movImm(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MBBI,
const DebugLoc &DL, Register DstReg, uint64_t Val,
MachineInstr::MIFlag Flag, bool DstRenamable,
bool DstIsDead) const { … }
static RISCVCC::CondCode getCondFromBranchOpc(unsigned Opc) { … }
static void parseCondBranch(MachineInstr &LastInst, MachineBasicBlock *&Target,
SmallVectorImpl<MachineOperand> &Cond) { … }
unsigned RISCVCC::getBrCond(RISCVCC::CondCode CC, bool Imm) { … }
const MCInstrDesc &RISCVInstrInfo::getBrCond(RISCVCC::CondCode CC,
bool Imm) const { … }
RISCVCC::CondCode RISCVCC::getOppositeBranchCondition(RISCVCC::CondCode CC) { … }
bool RISCVInstrInfo::analyzeBranch(MachineBasicBlock &MBB,
MachineBasicBlock *&TBB,
MachineBasicBlock *&FBB,
SmallVectorImpl<MachineOperand> &Cond,
bool AllowModify) const { … }
unsigned RISCVInstrInfo::removeBranch(MachineBasicBlock &MBB,
int *BytesRemoved) const { … }
unsigned RISCVInstrInfo::insertBranch(
MachineBasicBlock &MBB, MachineBasicBlock *TBB, MachineBasicBlock *FBB,
ArrayRef<MachineOperand> Cond, const DebugLoc &DL, int *BytesAdded) const { … }
void RISCVInstrInfo::insertIndirectBranch(MachineBasicBlock &MBB,
MachineBasicBlock &DestBB,
MachineBasicBlock &RestoreBB,
const DebugLoc &DL, int64_t BrOffset,
RegScavenger *RS) const { … }
bool RISCVInstrInfo::reverseBranchCondition(
SmallVectorImpl<MachineOperand> &Cond) const { … }
bool RISCVInstrInfo::optimizeCondBranch(MachineInstr &MI) const { … }
MachineBasicBlock *
RISCVInstrInfo::getBranchDestBlock(const MachineInstr &MI) const { … }
bool RISCVInstrInfo::isBranchOffsetInRange(unsigned BranchOp,
int64_t BrOffset) const { … }
unsigned getPredicatedOpcode(unsigned Opcode) { … }
static MachineInstr *canFoldAsPredicatedOp(Register Reg,
const MachineRegisterInfo &MRI,
const TargetInstrInfo *TII) { … }
bool RISCVInstrInfo::analyzeSelect(const MachineInstr &MI,
SmallVectorImpl<MachineOperand> &Cond,
unsigned &TrueOp, unsigned &FalseOp,
bool &Optimizable) const { … }
MachineInstr *
RISCVInstrInfo::optimizeSelect(MachineInstr &MI,
SmallPtrSetImpl<MachineInstr *> &SeenMIs,
bool PreferFalse) const { … }
unsigned RISCVInstrInfo::getInstSizeInBytes(const MachineInstr &MI) const { … }
unsigned RISCVInstrInfo::getInstBundleLength(const MachineInstr &MI) const { … }
bool RISCVInstrInfo::isAsCheapAsAMove(const MachineInstr &MI) const { … }
std::optional<DestSourcePair>
RISCVInstrInfo::isCopyInstrImpl(const MachineInstr &MI) const { … }
MachineTraceStrategy RISCVInstrInfo::getMachineCombinerTraceStrategy() const { … }
void RISCVInstrInfo::finalizeInsInstrs(
MachineInstr &Root, unsigned &Pattern,
SmallVectorImpl<MachineInstr *> &InsInstrs) const { … }
static bool isFADD(unsigned Opc) { … }
static bool isFSUB(unsigned Opc) { … }
static bool isFMUL(unsigned Opc) { … }
bool RISCVInstrInfo::isVectorAssociativeAndCommutative(const MachineInstr &Inst,
bool Invert) const { … }
bool RISCVInstrInfo::areRVVInstsReassociable(const MachineInstr &Root,
const MachineInstr &Prev) const { … }
bool RISCVInstrInfo::hasReassociableVectorSibling(const MachineInstr &Inst,
bool &Commuted) const { … }
bool RISCVInstrInfo::hasReassociableOperands(
const MachineInstr &Inst, const MachineBasicBlock *MBB) const { … }
void RISCVInstrInfo::getReassociateOperandIndices(
const MachineInstr &Root, unsigned Pattern,
std::array<unsigned, 5> &OperandIndices) const { … }
bool RISCVInstrInfo::hasReassociableSibling(const MachineInstr &Inst,
bool &Commuted) const { … }
bool RISCVInstrInfo::isAssociativeAndCommutative(const MachineInstr &Inst,
bool Invert) const { … }
std::optional<unsigned>
RISCVInstrInfo::getInverseOpcode(unsigned Opcode) const { … }
static bool canCombineFPFusedMultiply(const MachineInstr &Root,
const MachineOperand &MO,
bool DoRegPressureReduce) { … }
static bool getFPFusedMultiplyPatterns(MachineInstr &Root,
SmallVectorImpl<unsigned> &Patterns,
bool DoRegPressureReduce) { … }
static bool getFPPatterns(MachineInstr &Root,
SmallVectorImpl<unsigned> &Patterns,
bool DoRegPressureReduce) { … }
static const MachineInstr *canCombine(const MachineBasicBlock &MBB,
const MachineOperand &MO,
unsigned CombineOpc) { … }
static bool canCombineShiftIntoShXAdd(const MachineBasicBlock &MBB,
const MachineOperand &MO,
unsigned OuterShiftAmt) { … }
static unsigned getSHXADDShiftAmount(unsigned Opc) { … }
static bool getSHXADDPatterns(const MachineInstr &Root,
SmallVectorImpl<unsigned> &Patterns) { … }
CombinerObjective RISCVInstrInfo::getCombinerObjective(unsigned Pattern) const { … }
bool RISCVInstrInfo::getMachineCombinerPatterns(
MachineInstr &Root, SmallVectorImpl<unsigned> &Patterns,
bool DoRegPressureReduce) const { … }
static unsigned getFPFusedMultiplyOpcode(unsigned RootOpc, unsigned Pattern) { … }
static unsigned getAddendOperandIdx(unsigned Pattern) { … }
static void combineFPFusedMultiply(MachineInstr &Root, MachineInstr &Prev,
unsigned Pattern,
SmallVectorImpl<MachineInstr *> &InsInstrs,
SmallVectorImpl<MachineInstr *> &DelInstrs) { … }
static void
genShXAddAddShift(MachineInstr &Root, unsigned AddOpIdx,
SmallVectorImpl<MachineInstr *> &InsInstrs,
SmallVectorImpl<MachineInstr *> &DelInstrs,
DenseMap<unsigned, unsigned> &InstrIdxForVirtReg) { … }
void RISCVInstrInfo::genAlternativeCodeSequence(
MachineInstr &Root, unsigned Pattern,
SmallVectorImpl<MachineInstr *> &InsInstrs,
SmallVectorImpl<MachineInstr *> &DelInstrs,
DenseMap<unsigned, unsigned> &InstrIdxForVirtReg) const { … }
bool RISCVInstrInfo::verifyInstruction(const MachineInstr &MI,
StringRef &ErrInfo) const { … }
bool RISCVInstrInfo::canFoldIntoAddrMode(const MachineInstr &MemI, Register Reg,
const MachineInstr &AddrI,
ExtAddrMode &AM) const { … }
MachineInstr *RISCVInstrInfo::emitLdStWithAddr(MachineInstr &MemI,
const ExtAddrMode &AM) const { … }
bool RISCVInstrInfo::getMemOperandsWithOffsetWidth(
const MachineInstr &LdSt, SmallVectorImpl<const MachineOperand *> &BaseOps,
int64_t &Offset, bool &OffsetIsScalable, LocationSize &Width,
const TargetRegisterInfo *TRI) const { … }
static bool memOpsHaveSameBasePtr(const MachineInstr &MI1,
ArrayRef<const MachineOperand *> BaseOps1,
const MachineInstr &MI2,
ArrayRef<const MachineOperand *> BaseOps2) { … }
bool RISCVInstrInfo::shouldClusterMemOps(
ArrayRef<const MachineOperand *> BaseOps1, int64_t Offset1,
bool OffsetIsScalable1, ArrayRef<const MachineOperand *> BaseOps2,
int64_t Offset2, bool OffsetIsScalable2, unsigned ClusterSize,
unsigned NumBytes) const { … }
bool RISCVInstrInfo::getMemOperandWithOffsetWidth(
const MachineInstr &LdSt, const MachineOperand *&BaseReg, int64_t &Offset,
LocationSize &Width, const TargetRegisterInfo *TRI) const { … }
bool RISCVInstrInfo::areMemAccessesTriviallyDisjoint(
const MachineInstr &MIa, const MachineInstr &MIb) const { … }
std::pair<unsigned, unsigned>
RISCVInstrInfo::decomposeMachineOperandsTargetFlags(unsigned TF) const { … }
ArrayRef<std::pair<unsigned, const char *>>
RISCVInstrInfo::getSerializableDirectMachineOperandTargetFlags() const { … }
bool RISCVInstrInfo::isFunctionSafeToOutlineFrom(
MachineFunction &MF, bool OutlineFromLinkOnceODRs) const { … }
bool RISCVInstrInfo::isMBBSafeToOutlineFrom(MachineBasicBlock &MBB,
unsigned &Flags) const { … }
enum MachineOutlinerConstructionID { … };
bool RISCVInstrInfo::shouldOutlineFromFunctionByDefault(
MachineFunction &MF) const { … }
std::optional<std::unique_ptr<outliner::OutlinedFunction>>
RISCVInstrInfo::getOutliningCandidateInfo(
const MachineModuleInfo &MMI,
std::vector<outliner::Candidate> &RepeatedSequenceLocs,
unsigned MinRepeats) const { … }
outliner::InstrType
RISCVInstrInfo::getOutliningTypeImpl(const MachineModuleInfo &MMI,
MachineBasicBlock::iterator &MBBI,
unsigned Flags) const { … }
void RISCVInstrInfo::buildOutlinedFrame(
MachineBasicBlock &MBB, MachineFunction &MF,
const outliner::OutlinedFunction &OF) const { … }
MachineBasicBlock::iterator RISCVInstrInfo::insertOutlinedCall(
Module &M, MachineBasicBlock &MBB, MachineBasicBlock::iterator &It,
MachineFunction &MF, outliner::Candidate &C) const { … }
std::optional<RegImmPair> RISCVInstrInfo::isAddImmediate(const MachineInstr &MI,
Register Reg) const { … }
std::string RISCVInstrInfo::createMIROperandComment(
const MachineInstr &MI, const MachineOperand &Op, unsigned OpIdx,
const TargetRegisterInfo *TRI) const { … }
#define CASE_RVV_OPCODE_UNMASK_LMUL …
#define CASE_RVV_OPCODE_MASK_LMUL …
#define CASE_RVV_OPCODE_LMUL …
#define CASE_RVV_OPCODE_UNMASK_WIDEN …
#define CASE_RVV_OPCODE_UNMASK …
#define CASE_RVV_OPCODE_MASK_WIDEN …
#define CASE_RVV_OPCODE_MASK …
#define CASE_RVV_OPCODE_WIDEN …
#define CASE_RVV_OPCODE …
#define CASE_VMA_OPCODE_COMMON …
#define CASE_VMA_OPCODE_LMULS_M1 …
#define CASE_VMA_OPCODE_LMULS_MF2 …
#define CASE_VMA_OPCODE_LMULS_MF4 …
#define CASE_VMA_OPCODE_LMULS …
#define CASE_VFMA_OPCODE_COMMON …
#define CASE_VFMA_OPCODE_LMULS_M1 …
#define CASE_VFMA_OPCODE_LMULS_MF2 …
#define CASE_VFMA_OPCODE_LMULS_MF4 …
#define CASE_VFMA_OPCODE_VV …
#define CASE_VFMA_SPLATS …
bool RISCVInstrInfo::findCommutedOpIndices(const MachineInstr &MI,
unsigned &SrcOpIdx1,
unsigned &SrcOpIdx2) const { … }
#define CASE_VMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, LMUL) …
#define CASE_VMA_CHANGE_OPCODE_LMULS_M1(OLDOP, NEWOP, TYPE) …
#define CASE_VMA_CHANGE_OPCODE_LMULS_MF2(OLDOP, NEWOP, TYPE) …
#define CASE_VMA_CHANGE_OPCODE_LMULS_MF4(OLDOP, NEWOP, TYPE) …
#define CASE_VMA_CHANGE_OPCODE_LMULS(OLDOP, NEWOP, TYPE) …
#define CASE_VMA_CHANGE_OPCODE_SPLATS(OLDOP, NEWOP) …
#define CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, LMUL, SEW) …
#define CASE_VFMA_CHANGE_OPCODE_LMULS_M1(OLDOP, NEWOP, TYPE, SEW) …
#define CASE_VFMA_CHANGE_OPCODE_LMULS_MF2(OLDOP, NEWOP, TYPE, SEW) …
#define CASE_VFMA_CHANGE_OPCODE_VV(OLDOP, NEWOP) …
#define CASE_VFMA_CHANGE_OPCODE_LMULS_MF4(OLDOP, NEWOP, TYPE, SEW) …
#define CASE_VFMA_CHANGE_OPCODE_LMULS(OLDOP, NEWOP, TYPE, SEW) …
#define CASE_VFMA_CHANGE_OPCODE_SPLATS(OLDOP, NEWOP) …
MachineInstr *RISCVInstrInfo::commuteInstructionImpl(MachineInstr &MI,
bool NewMI,
unsigned OpIdx1,
unsigned OpIdx2) const { … }
#undef CASE_RVV_OPCODE_UNMASK_LMUL
#undef CASE_RVV_OPCODE_MASK_LMUL
#undef CASE_RVV_OPCODE_LMUL
#undef CASE_RVV_OPCODE_UNMASK_WIDEN
#undef CASE_RVV_OPCODE_UNMASK
#undef CASE_RVV_OPCODE_MASK_WIDEN
#undef CASE_RVV_OPCODE_MASK
#undef CASE_RVV_OPCODE_WIDEN
#undef CASE_RVV_OPCODE
#undef CASE_VMA_OPCODE_COMMON
#undef CASE_VMA_OPCODE_LMULS_M1
#undef CASE_VMA_OPCODE_LMULS_MF2
#undef CASE_VMA_OPCODE_LMULS_MF4
#undef CASE_VMA_OPCODE_LMULS
#undef CASE_VFMA_OPCODE_COMMON
#undef CASE_VFMA_OPCODE_LMULS_M1
#undef CASE_VFMA_OPCODE_LMULS_MF2
#undef CASE_VFMA_OPCODE_LMULS_MF4
#undef CASE_VFMA_OPCODE_VV
#undef CASE_VFMA_SPLATS
#define CASE_WIDEOP_OPCODE_COMMON …
#define CASE_WIDEOP_OPCODE_LMULS_MF4 …
#define CASE_WIDEOP_OPCODE_LMULS …
#define CASE_WIDEOP_CHANGE_OPCODE_COMMON …
#define CASE_WIDEOP_CHANGE_OPCODE_LMULS_MF4 …
#define CASE_WIDEOP_CHANGE_OPCODE_LMULS …
#define CASE_FP_WIDEOP_OPCODE_COMMON …
#define CASE_FP_WIDEOP_OPCODE_LMULS_MF4 … \
#define CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON …
#define CASE_FP_WIDEOP_CHANGE_OPCODE_LMULS_MF4 … \
#define CASE_FP_WIDEOP_CHANGE_OPCODE_LMULS …
MachineInstr *RISCVInstrInfo::convertToThreeAddress(MachineInstr &MI,
LiveVariables *LV,
LiveIntervals *LIS) const { … }
#undef CASE_WIDEOP_OPCODE_COMMON
#undef CASE_WIDEOP_OPCODE_LMULS_MF4
#undef CASE_WIDEOP_OPCODE_LMULS
#undef CASE_WIDEOP_CHANGE_OPCODE_COMMON
#undef CASE_WIDEOP_CHANGE_OPCODE_LMULS_MF4
#undef CASE_WIDEOP_CHANGE_OPCODE_LMULS
#undef CASE_FP_WIDEOP_OPCODE_COMMON
#undef CASE_FP_WIDEOP_OPCODE_LMULS_MF4
#undef CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON
#undef CASE_FP_WIDEOP_CHANGE_OPCODE_LMULS_MF4
#undef CASE_FP_WIDEOP_CHANGE_OPCODE_LMULS
void RISCVInstrInfo::mulImm(MachineFunction &MF, MachineBasicBlock &MBB,
MachineBasicBlock::iterator II, const DebugLoc &DL,
Register DestReg, uint32_t Amount,
MachineInstr::MIFlag Flag) const { … }
ArrayRef<std::pair<MachineMemOperand::Flags, const char *>>
RISCVInstrInfo::getSerializableMachineMemOperandTargetFlags() const { … }
unsigned RISCVInstrInfo::getTailDuplicateSize(CodeGenOptLevel OptLevel) const { … }
bool RISCV::isSEXT_W(const MachineInstr &MI) { … }
bool RISCV::isZEXT_W(const MachineInstr &MI) { … }
bool RISCV::isZEXT_B(const MachineInstr &MI) { … }
static bool isRVVWholeLoadStore(unsigned Opcode) { … }
bool RISCV::isRVVSpill(const MachineInstr &MI) { … }
std::optional<std::pair<unsigned, unsigned>>
RISCV::isRVVSpillForZvlsseg(unsigned Opcode) { … }
bool RISCV::isFaultFirstLoad(const MachineInstr &MI) { … }
bool RISCV::hasEqualFRM(const MachineInstr &MI1, const MachineInstr &MI2) { … }
std::optional<unsigned>
RISCV::getVectorLowDemandedScalarBits(uint16_t Opcode, unsigned Log2SEW) { … }
unsigned RISCV::getRVVMCOpcode(unsigned RVVPseudoOpcode) { … }
unsigned RISCV::getDestLog2EEW(const MCInstrDesc &Desc, unsigned Log2SEW) { … }