//===- InlineSpiller.cpp - Insert spills and restores inline --------------===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// // // The inline spiller modifies the machine function directly instead of // inserting spills and restores in VirtRegMap. // //===----------------------------------------------------------------------===// #include "SplitKit.h" #include "llvm/ADT/ArrayRef.h" #include "llvm/ADT/DenseMap.h" #include "llvm/ADT/MapVector.h" #include "llvm/ADT/STLExtras.h" #include "llvm/ADT/SetVector.h" #include "llvm/ADT/SmallPtrSet.h" #include "llvm/ADT/SmallVector.h" #include "llvm/ADT/Statistic.h" #include "llvm/Analysis/AliasAnalysis.h" #include "llvm/CodeGen/LiveInterval.h" #include "llvm/CodeGen/LiveIntervals.h" #include "llvm/CodeGen/LiveRangeEdit.h" #include "llvm/CodeGen/LiveStacks.h" #include "llvm/CodeGen/MachineBasicBlock.h" #include "llvm/CodeGen/MachineBlockFrequencyInfo.h" #include "llvm/CodeGen/MachineDominators.h" #include "llvm/CodeGen/MachineFunction.h" #include "llvm/CodeGen/MachineFunctionPass.h" #include "llvm/CodeGen/MachineInstr.h" #include "llvm/CodeGen/MachineInstrBuilder.h" #include "llvm/CodeGen/MachineInstrBundle.h" #include "llvm/CodeGen/MachineOperand.h" #include "llvm/CodeGen/MachineRegisterInfo.h" #include "llvm/CodeGen/SlotIndexes.h" #include "llvm/CodeGen/Spiller.h" #include "llvm/CodeGen/StackMaps.h" #include "llvm/CodeGen/TargetInstrInfo.h" #include "llvm/CodeGen/TargetOpcodes.h" #include "llvm/CodeGen/TargetRegisterInfo.h" #include "llvm/CodeGen/TargetSubtargetInfo.h" #include "llvm/CodeGen/VirtRegMap.h" #include "llvm/Config/llvm-config.h" #include "llvm/Support/BlockFrequency.h" #include "llvm/Support/BranchProbability.h" #include "llvm/Support/CommandLine.h" #include "llvm/Support/Compiler.h" #include "llvm/Support/Debug.h" #include "llvm/Support/ErrorHandling.h" #include "llvm/Support/raw_ostream.h" #include <cassert> #include <iterator> #include <tuple> #include <utility> usingnamespacellvm; #define DEBUG_TYPE … STATISTIC(NumSpilledRanges, "Number of spilled live ranges"); STATISTIC(NumSnippets, "Number of spilled snippets"); STATISTIC(NumSpills, "Number of spills inserted"); STATISTIC(NumSpillsRemoved, "Number of spills removed"); STATISTIC(NumReloads, "Number of reloads inserted"); STATISTIC(NumReloadsRemoved, "Number of reloads removed"); STATISTIC(NumFolded, "Number of folded stack accesses"); STATISTIC(NumFoldedLoads, "Number of folded loads"); STATISTIC(NumRemats, "Number of rematerialized defs for spilling"); static cl::opt<bool> RestrictStatepointRemat("restrict-statepoint-remat", cl::init(false), cl::Hidden, cl::desc("Restrict remat for statepoint operands")); namespace { class HoistSpillHelper : private LiveRangeEdit::Delegate { … }; class InlineSpiller : public Spiller { … }; } // end anonymous namespace Spiller::~Spiller() = default; void Spiller::anchor() { … } Spiller *llvm::createInlineSpiller(MachineFunctionPass &Pass, MachineFunction &MF, VirtRegMap &VRM, VirtRegAuxInfo &VRAI) { … } //===----------------------------------------------------------------------===// // Snippets //===----------------------------------------------------------------------===// // When spilling a virtual register, we also spill any snippets it is connected // to. The snippets are small live ranges that only have a single real use, // leftovers from live range splitting. Spilling them enables memory operand // folding or tightens the live range around the single use. // // This minimizes register pressure and maximizes the store-to-load distance for // spill slots which can be important in tight loops. /// isFullCopyOf - If MI is a COPY to or from Reg, return the other register, /// otherwise return 0. static Register isCopyOf(const MachineInstr &MI, Register Reg, const TargetInstrInfo &TII) { … } /// Check for a copy bundle as formed by SplitKit. static Register isCopyOfBundle(const MachineInstr &FirstMI, Register Reg, const TargetInstrInfo &TII) { … } static void getVDefInterval(const MachineInstr &MI, LiveIntervals &LIS) { … } /// isSnippet - Identify if a live interval is a snippet that should be spilled. /// It is assumed that SnipLI is a virtual register with the same original as /// Edit->getReg(). bool InlineSpiller::isSnippet(const LiveInterval &SnipLI) { … } /// collectRegsToSpill - Collect live range snippets that only have a single /// real use. void InlineSpiller::collectRegsToSpill() { … } bool InlineSpiller::isSibling(Register Reg) { … } /// It is beneficial to spill to earlier place in the same BB in case /// as follows: /// There is an alternative def earlier in the same MBB. /// Hoist the spill as far as possible in SpillMBB. This can ease /// register pressure: /// /// x = def /// y = use x /// s = copy x /// /// Hoisting the spill of s to immediately after the def removes the /// interference between x and y: /// /// x = def /// spill x /// y = use killed x /// /// This hoist only helps when the copy kills its source. /// bool InlineSpiller::hoistSpillInsideBB(LiveInterval &SpillLI, MachineInstr &CopyMI) { … } /// eliminateRedundantSpills - SLI:VNI is known to be on the stack. Remove any /// redundant spills of this value in SLI.reg and sibling copies. void InlineSpiller::eliminateRedundantSpills(LiveInterval &SLI, VNInfo *VNI) { … } //===----------------------------------------------------------------------===// // Rematerialization //===----------------------------------------------------------------------===// /// markValueUsed - Remember that VNI failed to rematerialize, so its defining /// instruction cannot be eliminated. See through snippet copies void InlineSpiller::markValueUsed(LiveInterval *LI, VNInfo *VNI) { … } bool InlineSpiller::canGuaranteeAssignmentAfterRemat(Register VReg, MachineInstr &MI) { … } /// reMaterializeFor - Attempt to rematerialize before MI instead of reloading. bool InlineSpiller::reMaterializeFor(LiveInterval &VirtReg, MachineInstr &MI) { … } /// reMaterializeAll - Try to rematerialize as many uses as possible, /// and trim the live ranges after. void InlineSpiller::reMaterializeAll() { … } //===----------------------------------------------------------------------===// // Spilling //===----------------------------------------------------------------------===// /// If MI is a load or store of StackSlot, it can be removed. bool InlineSpiller::coalesceStackAccess(MachineInstr *MI, Register Reg) { … } #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) LLVM_DUMP_METHOD // Dump the range of instructions from B to E with their slot indexes. static void dumpMachineInstrRangeWithSlotIndex(MachineBasicBlock::iterator B, MachineBasicBlock::iterator E, LiveIntervals const &LIS, const char *const header, Register VReg = Register()) { char NextLine = '\n'; char SlotIndent = '\t'; if (std::next(B) == E) { NextLine = ' '; SlotIndent = ' '; } dbgs() << '\t' << header << ": " << NextLine; for (MachineBasicBlock::iterator I = B; I != E; ++I) { SlotIndex Idx = LIS.getInstructionIndex(*I).getRegSlot(); // If a register was passed in and this instruction has it as a // destination that is marked as an early clobber, print the // early-clobber slot index. if (VReg) { MachineOperand *MO = I->findRegisterDefOperand(VReg, /*TRI=*/nullptr); if (MO && MO->isEarlyClobber()) Idx = Idx.getRegSlot(true); } dbgs() << SlotIndent << Idx << '\t' << *I; } } #endif /// foldMemoryOperand - Try folding stack slot references in Ops into their /// instructions. /// /// @param Ops Operand indices from AnalyzeVirtRegInBundle(). /// @param LoadMI Load instruction to use instead of stack slot when non-null. /// @return True on success. bool InlineSpiller:: foldMemoryOperand(ArrayRef<std::pair<MachineInstr *, unsigned>> Ops, MachineInstr *LoadMI) { … } void InlineSpiller::insertReload(Register NewVReg, SlotIndex Idx, MachineBasicBlock::iterator MI) { … } /// Check if \p Def fully defines a VReg with an undefined value. /// If that's the case, that means the value of VReg is actually /// not relevant. static bool isRealSpill(const MachineInstr &Def) { … } /// insertSpill - Insert a spill of NewVReg after MI. void InlineSpiller::insertSpill(Register NewVReg, bool isKill, MachineBasicBlock::iterator MI) { … } /// spillAroundUses - insert spill code around each use of Reg. void InlineSpiller::spillAroundUses(Register Reg) { … } /// spillAll - Spill all registers remaining after rematerialization. void InlineSpiller::spillAll() { … } void InlineSpiller::spill(LiveRangeEdit &edit) { … } /// Optimizations after all the reg selections and spills are done. void InlineSpiller::postOptimization() { … } /// When a spill is inserted, add the spill to MergeableSpills map. void HoistSpillHelper::addToMergeableSpills(MachineInstr &Spill, int StackSlot, unsigned Original) { … } /// When a spill is removed, remove the spill from MergeableSpills map. /// Return true if the spill is removed successfully. bool HoistSpillHelper::rmFromMergeableSpills(MachineInstr &Spill, int StackSlot) { … } /// Check BB to see if it is a possible target BB to place a hoisted spill, /// i.e., there should be a living sibling of OrigReg at the insert point. bool HoistSpillHelper::isSpillCandBB(LiveInterval &OrigLI, VNInfo &OrigVNI, MachineBasicBlock &BB, Register &LiveReg) { … } /// Remove redundant spills in the same BB. Save those redundant spills in /// SpillsToRm, and save the spill to keep and its BB in SpillBBToSpill map. void HoistSpillHelper::rmRedundantSpills( SmallPtrSet<MachineInstr *, 16> &Spills, SmallVectorImpl<MachineInstr *> &SpillsToRm, DenseMap<MachineDomTreeNode *, MachineInstr *> &SpillBBToSpill) { … } /// Starting from \p Root find a top-down traversal order of the dominator /// tree to visit all basic blocks containing the elements of \p Spills. /// Redundant spills will be found and put into \p SpillsToRm at the same /// time. \p SpillBBToSpill will be populated as part of the process and /// maps a basic block to the first store occurring in the basic block. /// \post SpillsToRm.union(Spills\@post) == Spills\@pre void HoistSpillHelper::getVisitOrders( MachineBasicBlock *Root, SmallPtrSet<MachineInstr *, 16> &Spills, SmallVectorImpl<MachineDomTreeNode *> &Orders, SmallVectorImpl<MachineInstr *> &SpillsToRm, DenseMap<MachineDomTreeNode *, unsigned> &SpillsToKeep, DenseMap<MachineDomTreeNode *, MachineInstr *> &SpillBBToSpill) { … } /// Try to hoist spills according to BB hotness. The spills to removed will /// be saved in \p SpillsToRm. The spills to be inserted will be saved in /// \p SpillsToIns. void HoistSpillHelper::runHoistSpills( LiveInterval &OrigLI, VNInfo &OrigVNI, SmallPtrSet<MachineInstr *, 16> &Spills, SmallVectorImpl<MachineInstr *> &SpillsToRm, DenseMap<MachineBasicBlock *, unsigned> &SpillsToIns) { … } /// For spills with equal values, remove redundant spills and hoist those left /// to less hot spots. /// /// Spills with equal values will be collected into the same set in /// MergeableSpills when spill is inserted. These equal spills are originated /// from the same defining instruction and are dominated by the instruction. /// Before hoisting all the equal spills, redundant spills inside in the same /// BB are first marked to be deleted. Then starting from the spills left, walk /// up on the dominator tree towards the Root node where the define instruction /// is located, mark the dominated spills to be deleted along the way and /// collect the BB nodes on the path from non-dominated spills to the define /// instruction into a WorkSet. The nodes in WorkSet are the candidate places /// where we are considering to hoist the spills. We iterate the WorkSet in /// bottom-up order, and for each node, we will decide whether to hoist spills /// inside its subtree to that node. In this way, we can get benefit locally /// even if hoisting all the equal spills to one cold place is impossible. void HoistSpillHelper::hoistAllSpills() { … } /// For VirtReg clone, the \p New register should have the same physreg or /// stackslot as the \p old register. void HoistSpillHelper::LRE_DidCloneVirtReg(Register New, Register Old) { … }