llvm/llvm/lib/Transforms/Scalar/LICM.cpp

//===-- LICM.cpp - Loop Invariant Code Motion Pass ------------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This pass performs loop invariant code motion, attempting to remove as much
// code from the body of a loop as possible.  It does this by either hoisting
// code into the preheader block, or by sinking code to the exit blocks if it is
// safe.  This pass also promotes must-aliased memory locations in the loop to
// live in registers, thus hoisting and sinking "invariant" loads and stores.
//
// Hoisting operations out of loops is a canonicalization transform.  It
// enables and simplifies subsequent optimizations in the middle-end.
// Rematerialization of hoisted instructions to reduce register pressure is the
// responsibility of the back-end, which has more accurate information about
// register pressure and also handles other optimizations than LICM that
// increase live-ranges.
//
// This pass uses alias analysis for two purposes:
//
//  1. Moving loop invariant loads and calls out of loops.  If we can determine
//     that a load or call inside of a loop never aliases anything stored to,
//     we can hoist it or sink it like any other instruction.
//  2. Scalar Promotion of Memory - If there is a store instruction inside of
//     the loop, we try to move the store to happen AFTER the loop instead of
//     inside of the loop.  This can only happen if a few conditions are true:
//       A. The pointer stored through is loop invariant
//       B. There are no stores or loads in the loop which _may_ alias the
//          pointer.  There are no calls in the loop which mod/ref the pointer.
//     If these conditions are true, we can promote the loads and stores in the
//     loop of the pointer to use a temporary alloca'd variable.  We then use
//     the SSAUpdater to construct the appropriate SSA form for the value.
//
//===----------------------------------------------------------------------===//

#include "llvm/Transforms/Scalar/LICM.h"
#include "llvm/ADT/PriorityWorklist.h"
#include "llvm/ADT/SetOperations.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/Analysis/AliasAnalysis.h"
#include "llvm/Analysis/AliasSetTracker.h"
#include "llvm/Analysis/AssumptionCache.h"
#include "llvm/Analysis/CaptureTracking.h"
#include "llvm/Analysis/DomTreeUpdater.h"
#include "llvm/Analysis/GuardUtils.h"
#include "llvm/Analysis/LazyBlockFrequencyInfo.h"
#include "llvm/Analysis/Loads.h"
#include "llvm/Analysis/LoopInfo.h"
#include "llvm/Analysis/LoopIterator.h"
#include "llvm/Analysis/LoopNestAnalysis.h"
#include "llvm/Analysis/LoopPass.h"
#include "llvm/Analysis/MemorySSA.h"
#include "llvm/Analysis/MemorySSAUpdater.h"
#include "llvm/Analysis/MustExecute.h"
#include "llvm/Analysis/OptimizationRemarkEmitter.h"
#include "llvm/Analysis/ScalarEvolution.h"
#include "llvm/Analysis/TargetLibraryInfo.h"
#include "llvm/Analysis/TargetTransformInfo.h"
#include "llvm/Analysis/ValueTracking.h"
#include "llvm/IR/CFG.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/DebugInfoMetadata.h"
#include "llvm/IR/DerivedTypes.h"
#include "llvm/IR/Dominators.h"
#include "llvm/IR/IRBuilder.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/IntrinsicInst.h"
#include "llvm/IR/LLVMContext.h"
#include "llvm/IR/Metadata.h"
#include "llvm/IR/PatternMatch.h"
#include "llvm/IR/PredIteratorCache.h"
#include "llvm/InitializePasses.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Target/TargetOptions.h"
#include "llvm/Transforms/Scalar.h"
#include "llvm/Transforms/Utils/AssumeBundleBuilder.h"
#include "llvm/Transforms/Utils/BasicBlockUtils.h"
#include "llvm/Transforms/Utils/Local.h"
#include "llvm/Transforms/Utils/LoopUtils.h"
#include "llvm/Transforms/Utils/SSAUpdater.h"
#include <algorithm>
#include <utility>
usingnamespacellvm;

namespace llvm {
class LPMUpdater;
} // namespace llvm

#define DEBUG_TYPE

STATISTIC(NumCreatedBlocks, "Number of blocks created");
STATISTIC(NumClonedBranches, "Number of branches cloned");
STATISTIC(NumSunk, "Number of instructions sunk out of loop");
STATISTIC(NumHoisted, "Number of instructions hoisted out of loop");
STATISTIC(NumMovedLoads, "Number of load insts hoisted or sunk");
STATISTIC(NumMovedCalls, "Number of call insts hoisted or sunk");
STATISTIC(NumPromotionCandidates, "Number of promotion candidates");
STATISTIC(NumLoadPromoted, "Number of load-only promotions");
STATISTIC(NumLoadStorePromoted, "Number of load and store promotions");
STATISTIC(NumMinMaxHoisted,
          "Number of min/max expressions hoisted out of the loop");
STATISTIC(NumGEPsHoisted,
          "Number of geps reassociated and hoisted out of the loop");
STATISTIC(NumAddSubHoisted, "Number of add/subtract expressions reassociated "
                            "and hoisted out of the loop");
STATISTIC(NumFPAssociationsHoisted, "Number of invariant FP expressions "
                                    "reassociated and hoisted out of the loop");
STATISTIC(NumIntAssociationsHoisted,
          "Number of invariant int expressions "
          "reassociated and hoisted out of the loop");
STATISTIC(NumBOAssociationsHoisted, "Number of invariant BinaryOp expressions "
                                    "reassociated and hoisted out of the loop");

/// Memory promotion is enabled by default.
static cl::opt<bool>
    DisablePromotion("disable-licm-promotion", cl::Hidden, cl::init(false),
                     cl::desc("Disable memory promotion in LICM pass"));

static cl::opt<bool> ControlFlowHoisting(
    "licm-control-flow-hoisting", cl::Hidden, cl::init(false),
    cl::desc("Enable control flow (and PHI) hoisting in LICM"));

static cl::opt<bool>
    SingleThread("licm-force-thread-model-single", cl::Hidden, cl::init(false),
                 cl::desc("Force thread model single in LICM pass"));

static cl::opt<uint32_t> MaxNumUsesTraversed(
    "licm-max-num-uses-traversed", cl::Hidden, cl::init(8),
    cl::desc("Max num uses visited for identifying load "
             "invariance in loop using invariant start (default = 8)"));

static cl::opt<unsigned> FPAssociationUpperLimit(
    "licm-max-num-fp-reassociations", cl::init(5U), cl::Hidden,
    cl::desc(
        "Set upper limit for the number of transformations performed "
        "during a single round of hoisting the reassociated expressions."));

cl::opt<unsigned> IntAssociationUpperLimit(
    "licm-max-num-int-reassociations", cl::init(5U), cl::Hidden,
    cl::desc(
        "Set upper limit for the number of transformations performed "
        "during a single round of hoisting the reassociated expressions."));

// Experimental option to allow imprecision in LICM in pathological cases, in
// exchange for faster compile. This is to be removed if MemorySSA starts to
// address the same issue. LICM calls MemorySSAWalker's
// getClobberingMemoryAccess, up to the value of the Cap, getting perfect
// accuracy. Afterwards, LICM will call into MemorySSA's getDefiningAccess,
// which may not be precise, since optimizeUses is capped. The result is
// correct, but we may not get as "far up" as possible to get which access is
// clobbering the one queried.
cl::opt<unsigned> llvm::SetLicmMssaOptCap(
    "licm-mssa-optimization-cap", cl::init(100), cl::Hidden,
    cl::desc("Enable imprecision in LICM in pathological cases, in exchange "
             "for faster compile. Caps the MemorySSA clobbering calls."));

// Experimentally, memory promotion carries less importance than sinking and
// hoisting. Limit when we do promotion when using MemorySSA, in order to save
// compile time.
cl::opt<unsigned> llvm::SetLicmMssaNoAccForPromotionCap(
    "licm-mssa-max-acc-promotion", cl::init(250), cl::Hidden,
    cl::desc("[LICM & MemorySSA] When MSSA in LICM is disabled, this has no "
             "effect. When MSSA in LICM is enabled, then this is the maximum "
             "number of accesses allowed to be present in a loop in order to "
             "enable memory promotion."));

static bool inSubLoop(BasicBlock *BB, Loop *CurLoop, LoopInfo *LI);
static bool isNotUsedOrFoldableInLoop(const Instruction &I, const Loop *CurLoop,
                                      const LoopSafetyInfo *SafetyInfo,
                                      TargetTransformInfo *TTI,
                                      bool &FoldableInLoop, bool LoopNestMode);
static void hoist(Instruction &I, const DominatorTree *DT, const Loop *CurLoop,
                  BasicBlock *Dest, ICFLoopSafetyInfo *SafetyInfo,
                  MemorySSAUpdater &MSSAU, ScalarEvolution *SE,
                  OptimizationRemarkEmitter *ORE);
static bool sink(Instruction &I, LoopInfo *LI, DominatorTree *DT,
                 const Loop *CurLoop, ICFLoopSafetyInfo *SafetyInfo,
                 MemorySSAUpdater &MSSAU, OptimizationRemarkEmitter *ORE);
static bool isSafeToExecuteUnconditionally(
    Instruction &Inst, const DominatorTree *DT, const TargetLibraryInfo *TLI,
    const Loop *CurLoop, const LoopSafetyInfo *SafetyInfo,
    OptimizationRemarkEmitter *ORE, const Instruction *CtxI,
    AssumptionCache *AC, bool AllowSpeculation);
static bool pointerInvalidatedByLoop(MemorySSA *MSSA, MemoryUse *MU,
                                     Loop *CurLoop, Instruction &I,
                                     SinkAndHoistLICMFlags &Flags,
                                     bool InvariantGroup);
static bool pointerInvalidatedByBlock(BasicBlock &BB, MemorySSA &MSSA,
                                      MemoryUse &MU);
/// Aggregates various functions for hoisting computations out of loop.
static bool hoistArithmetics(Instruction &I, Loop &L,
                             ICFLoopSafetyInfo &SafetyInfo,
                             MemorySSAUpdater &MSSAU, AssumptionCache *AC,
                             DominatorTree *DT);
static Instruction *cloneInstructionInExitBlock(
    Instruction &I, BasicBlock &ExitBlock, PHINode &PN, const LoopInfo *LI,
    const LoopSafetyInfo *SafetyInfo, MemorySSAUpdater &MSSAU);

static void eraseInstruction(Instruction &I, ICFLoopSafetyInfo &SafetyInfo,
                             MemorySSAUpdater &MSSAU);

static void moveInstructionBefore(Instruction &I, BasicBlock::iterator Dest,
                                  ICFLoopSafetyInfo &SafetyInfo,
                                  MemorySSAUpdater &MSSAU, ScalarEvolution *SE);

static void foreachMemoryAccess(MemorySSA *MSSA, Loop *L,
                                function_ref<void(Instruction *)> Fn);
PointersAndHasReadsOutsideSet;
static SmallVector<PointersAndHasReadsOutsideSet, 0>
collectPromotionCandidates(MemorySSA *MSSA, AliasAnalysis *AA, Loop *L);

namespace {
struct LoopInvariantCodeMotion {};

struct LegacyLICMPass : public LoopPass {};
} // namespace

PreservedAnalyses LICMPass::run(Loop &L, LoopAnalysisManager &AM,
                                LoopStandardAnalysisResults &AR, LPMUpdater &) {}

void LICMPass::printPipeline(
    raw_ostream &OS, function_ref<StringRef(StringRef)> MapClassName2PassName) {}

PreservedAnalyses LNICMPass::run(LoopNest &LN, LoopAnalysisManager &AM,
                                 LoopStandardAnalysisResults &AR,
                                 LPMUpdater &) {}

void LNICMPass::printPipeline(
    raw_ostream &OS, function_ref<StringRef(StringRef)> MapClassName2PassName) {}

char LegacyLICMPass::ID =;
INITIALIZE_PASS_BEGIN(LegacyLICMPass, "licm", "Loop Invariant Code Motion",
                      false, false)
INITIALIZE_PASS_DEPENDENCY(LoopPass)
INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)
INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass)
INITIALIZE_PASS_DEPENDENCY(MemorySSAWrapperPass)
INITIALIZE_PASS_DEPENDENCY(LazyBFIPass)
INITIALIZE_PASS_END(LegacyLICMPass, "licm", "Loop Invariant Code Motion", false,
                    false)

Pass *llvm::createLICMPass() {}

llvm::SinkAndHoistLICMFlags::SinkAndHoistLICMFlags(bool IsSink, Loop &L,
                                                   MemorySSA &MSSA)
    :{}

llvm::SinkAndHoistLICMFlags::SinkAndHoistLICMFlags(
    unsigned LicmMssaOptCap, unsigned LicmMssaNoAccForPromotionCap, bool IsSink,
    Loop &L, MemorySSA &MSSA)
    :{}

/// Hoist expressions out of the specified loop. Note, alias info for inner
/// loop is not preserved so it is not a good idea to run LICM multiple
/// times on one loop.
bool LoopInvariantCodeMotion::runOnLoop(Loop *L, AAResults *AA, LoopInfo *LI,
                                        DominatorTree *DT, AssumptionCache *AC,
                                        TargetLibraryInfo *TLI,
                                        TargetTransformInfo *TTI,
                                        ScalarEvolution *SE, MemorySSA *MSSA,
                                        OptimizationRemarkEmitter *ORE,
                                        bool LoopNestMode) {}

/// Walk the specified region of the CFG (defined by all blocks dominated by
/// the specified block, and that are in the current loop) in reverse depth
/// first order w.r.t the DominatorTree.  This allows us to visit uses before
/// definitions, allowing us to sink a loop body in one pass without iteration.
///
bool llvm::sinkRegion(DomTreeNode *N, AAResults *AA, LoopInfo *LI,
                      DominatorTree *DT, TargetLibraryInfo *TLI,
                      TargetTransformInfo *TTI, Loop *CurLoop,
                      MemorySSAUpdater &MSSAU, ICFLoopSafetyInfo *SafetyInfo,
                      SinkAndHoistLICMFlags &Flags,
                      OptimizationRemarkEmitter *ORE, Loop *OutermostLoop) {}

bool llvm::sinkRegionForLoopNest(DomTreeNode *N, AAResults *AA, LoopInfo *LI,
                                 DominatorTree *DT, TargetLibraryInfo *TLI,
                                 TargetTransformInfo *TTI, Loop *CurLoop,
                                 MemorySSAUpdater &MSSAU,
                                 ICFLoopSafetyInfo *SafetyInfo,
                                 SinkAndHoistLICMFlags &Flags,
                                 OptimizationRemarkEmitter *ORE) {}

namespace {
// This is a helper class for hoistRegion to make it able to hoist control flow
// in order to be able to hoist phis. The way this works is that we initially
// start hoisting to the loop preheader, and when we see a loop invariant branch
// we make note of this. When we then come to hoist an instruction that's
// conditional on such a branch we duplicate the branch and the relevant control
// flow, then hoist the instruction into the block corresponding to its original
// block in the duplicated control flow.
class ControlFlowHoister {};
} // namespace

/// Walk the specified region of the CFG (defined by all blocks dominated by
/// the specified block, and that are in the current loop) in depth first
/// order w.r.t the DominatorTree.  This allows us to visit definitions before
/// uses, allowing us to hoist a loop body in one pass without iteration.
///
bool llvm::hoistRegion(DomTreeNode *N, AAResults *AA, LoopInfo *LI,
                       DominatorTree *DT, AssumptionCache *AC,
                       TargetLibraryInfo *TLI, Loop *CurLoop,
                       MemorySSAUpdater &MSSAU, ScalarEvolution *SE,
                       ICFLoopSafetyInfo *SafetyInfo,
                       SinkAndHoistLICMFlags &Flags,
                       OptimizationRemarkEmitter *ORE, bool LoopNestMode,
                       bool AllowSpeculation) {}

// Return true if LI is invariant within scope of the loop. LI is invariant if
// CurLoop is dominated by an invariant.start representing the same memory
// location and size as the memory location LI loads from, and also the
// invariant.start has no uses.
static bool isLoadInvariantInLoop(LoadInst *LI, DominatorTree *DT,
                                  Loop *CurLoop) {}

namespace {
/// Return true if-and-only-if we know how to (mechanically) both hoist and
/// sink a given instruction out of a loop.  Does not address legality
/// concerns such as aliasing or speculation safety.
bool isHoistableAndSinkableInst(Instruction &I) {}
/// Return true if MSSA knows there are no MemoryDefs in the loop.
bool isReadOnly(const MemorySSAUpdater &MSSAU, const Loop *L) {}

/// Return true if I is the only Instruction with a MemoryAccess in L.
bool isOnlyMemoryAccess(const Instruction *I, const Loop *L,
                        const MemorySSAUpdater &MSSAU) {}
}

static MemoryAccess *getClobberingMemoryAccess(MemorySSA &MSSA,
                                               BatchAAResults &BAA,
                                               SinkAndHoistLICMFlags &Flags,
                                               MemoryUseOrDef *MA) {}

bool llvm::canSinkOrHoistInst(Instruction &I, AAResults *AA, DominatorTree *DT,
                              Loop *CurLoop, MemorySSAUpdater &MSSAU,
                              bool TargetExecutesOncePerLoop,
                              SinkAndHoistLICMFlags &Flags,
                              OptimizationRemarkEmitter *ORE) {}

/// Returns true if a PHINode is a trivially replaceable with an
/// Instruction.
/// This is true when all incoming values are that instruction.
/// This pattern occurs most often with LCSSA PHI nodes.
///
static bool isTriviallyReplaceablePHI(const PHINode &PN, const Instruction &I) {}

/// Return true if the instruction is foldable in the loop.
static bool isFoldableInLoop(const Instruction &I, const Loop *CurLoop,
                         const TargetTransformInfo *TTI) {}

/// Return true if the only users of this instruction are outside of
/// the loop. If this is true, we can sink the instruction to the exit
/// blocks of the loop.
///
/// We also return true if the instruction could be folded away in lowering.
/// (e.g.,  a GEP can be folded into a load as an addressing mode in the loop).
static bool isNotUsedOrFoldableInLoop(const Instruction &I, const Loop *CurLoop,
                                      const LoopSafetyInfo *SafetyInfo,
                                      TargetTransformInfo *TTI,
                                      bool &FoldableInLoop, bool LoopNestMode) {}

static Instruction *cloneInstructionInExitBlock(
    Instruction &I, BasicBlock &ExitBlock, PHINode &PN, const LoopInfo *LI,
    const LoopSafetyInfo *SafetyInfo, MemorySSAUpdater &MSSAU) {}

static void eraseInstruction(Instruction &I, ICFLoopSafetyInfo &SafetyInfo,
                             MemorySSAUpdater &MSSAU) {}

static void moveInstructionBefore(Instruction &I, BasicBlock::iterator Dest,
                                  ICFLoopSafetyInfo &SafetyInfo,
                                  MemorySSAUpdater &MSSAU,
                                  ScalarEvolution *SE) {}

static Instruction *sinkThroughTriviallyReplaceablePHI(
    PHINode *TPN, Instruction *I, LoopInfo *LI,
    SmallDenseMap<BasicBlock *, Instruction *, 32> &SunkCopies,
    const LoopSafetyInfo *SafetyInfo, const Loop *CurLoop,
    MemorySSAUpdater &MSSAU) {}

static bool canSplitPredecessors(PHINode *PN, LoopSafetyInfo *SafetyInfo) {}

static void splitPredecessorsOfLoopExit(PHINode *PN, DominatorTree *DT,
                                        LoopInfo *LI, const Loop *CurLoop,
                                        LoopSafetyInfo *SafetyInfo,
                                        MemorySSAUpdater *MSSAU) {}

/// When an instruction is found to only be used outside of the loop, this
/// function moves it to the exit blocks and patches up SSA form as needed.
/// This method is guaranteed to remove the original instruction from its
/// position, and may either delete it or move it to outside of the loop.
///
static bool sink(Instruction &I, LoopInfo *LI, DominatorTree *DT,
                 const Loop *CurLoop, ICFLoopSafetyInfo *SafetyInfo,
                 MemorySSAUpdater &MSSAU, OptimizationRemarkEmitter *ORE) {}

/// When an instruction is found to only use loop invariant operands that
/// is safe to hoist, this instruction is called to do the dirty work.
///
static void hoist(Instruction &I, const DominatorTree *DT, const Loop *CurLoop,
                  BasicBlock *Dest, ICFLoopSafetyInfo *SafetyInfo,
                  MemorySSAUpdater &MSSAU, ScalarEvolution *SE,
                  OptimizationRemarkEmitter *ORE) {}

/// Only sink or hoist an instruction if it is not a trapping instruction,
/// or if the instruction is known not to trap when moved to the preheader.
/// or if it is a trapping instruction and is guaranteed to execute.
static bool isSafeToExecuteUnconditionally(
    Instruction &Inst, const DominatorTree *DT, const TargetLibraryInfo *TLI,
    const Loop *CurLoop, const LoopSafetyInfo *SafetyInfo,
    OptimizationRemarkEmitter *ORE, const Instruction *CtxI,
    AssumptionCache *AC, bool AllowSpeculation) {}

namespace {
class LoopPromoter : public LoadAndStorePromoter {};

bool isNotCapturedBeforeOrInLoop(const Value *V, const Loop *L,
                                 DominatorTree *DT) {}

/// Return true if we can prove that a caller cannot inspect the object if an
/// unwind occurs inside the loop.
bool isNotVisibleOnUnwindInLoop(const Value *Object, const Loop *L,
                                DominatorTree *DT) {}

bool isThreadLocalObject(const Value *Object, const Loop *L, DominatorTree *DT,
                         TargetTransformInfo *TTI) {}

} // namespace

/// Try to promote memory values to scalars by sinking stores out of the
/// loop and moving loads to before the loop.  We do this by looping over
/// the stores in the loop, looking for stores to Must pointers which are
/// loop invariant.
///
bool llvm::promoteLoopAccessesToScalars(
    const SmallSetVector<Value *, 8> &PointerMustAliases,
    SmallVectorImpl<BasicBlock *> &ExitBlocks,
    SmallVectorImpl<BasicBlock::iterator> &InsertPts,
    SmallVectorImpl<MemoryAccess *> &MSSAInsertPts, PredIteratorCache &PIC,
    LoopInfo *LI, DominatorTree *DT, AssumptionCache *AC,
    const TargetLibraryInfo *TLI, TargetTransformInfo *TTI, Loop *CurLoop,
    MemorySSAUpdater &MSSAU, ICFLoopSafetyInfo *SafetyInfo,
    OptimizationRemarkEmitter *ORE, bool AllowSpeculation,
    bool HasReadsOutsideSet) {}

static void foreachMemoryAccess(MemorySSA *MSSA, Loop *L,
                                function_ref<void(Instruction *)> Fn) {}

// The bool indicates whether there might be reads outside the set, in which
// case only loads may be promoted.
static SmallVector<PointersAndHasReadsOutsideSet, 0>
collectPromotionCandidates(MemorySSA *MSSA, AliasAnalysis *AA, Loop *L) {}

static bool pointerInvalidatedByLoop(MemorySSA *MSSA, MemoryUse *MU,
                                     Loop *CurLoop, Instruction &I,
                                     SinkAndHoistLICMFlags &Flags,
                                     bool InvariantGroup) {}

bool pointerInvalidatedByBlock(BasicBlock &BB, MemorySSA &MSSA, MemoryUse &MU) {}

/// Try to simplify things like (A < INV_1 AND icmp A < INV_2) into (A <
/// min(INV_1, INV_2)), if INV_1 and INV_2 are both loop invariants and their
/// minimun can be computed outside of loop, and X is not a loop-invariant.
static bool hoistMinMax(Instruction &I, Loop &L, ICFLoopSafetyInfo &SafetyInfo,
                        MemorySSAUpdater &MSSAU) {}

/// Reassociate gep (gep ptr, idx1), idx2 to gep (gep ptr, idx2), idx1 if
/// this allows hoisting the inner GEP.
static bool hoistGEP(Instruction &I, Loop &L, ICFLoopSafetyInfo &SafetyInfo,
                     MemorySSAUpdater &MSSAU, AssumptionCache *AC,
                     DominatorTree *DT) {}

/// Try to turn things like "LV + C1 < C2" into "LV < C2 - C1". Here
/// C1 and C2 are loop invariants and LV is a loop-variant.
static bool hoistAdd(ICmpInst::Predicate Pred, Value *VariantLHS,
                     Value *InvariantRHS, ICmpInst &ICmp, Loop &L,
                     ICFLoopSafetyInfo &SafetyInfo, MemorySSAUpdater &MSSAU,
                     AssumptionCache *AC, DominatorTree *DT) {}

/// Try to reassociate and hoist the following two patterns:
/// LV - C1 < C2 --> LV < C1 + C2,
/// C1 - LV < C2 --> LV > C1 - C2.
static bool hoistSub(ICmpInst::Predicate Pred, Value *VariantLHS,
                     Value *InvariantRHS, ICmpInst &ICmp, Loop &L,
                     ICFLoopSafetyInfo &SafetyInfo, MemorySSAUpdater &MSSAU,
                     AssumptionCache *AC, DominatorTree *DT) {}

/// Reassociate and hoist add/sub expressions.
static bool hoistAddSub(Instruction &I, Loop &L, ICFLoopSafetyInfo &SafetyInfo,
                        MemorySSAUpdater &MSSAU, AssumptionCache *AC,
                        DominatorTree *DT) {}

static bool isReassociableOp(Instruction *I, unsigned IntOpcode,
                             unsigned FPOpcode) {}

/// Try to reassociate expressions like ((A1 * B1) + (A2 * B2) + ...) * C where
/// A1, A2, ... and C are loop invariants into expressions like
/// ((A1 * C * B1) + (A2 * C * B2) + ...) and hoist the (A1 * C), (A2 * C), ...
/// invariant expressions. This functions returns true only if any hoisting has
/// actually occured.
static bool hoistMulAddAssociation(Instruction &I, Loop &L,
                                   ICFLoopSafetyInfo &SafetyInfo,
                                   MemorySSAUpdater &MSSAU, AssumptionCache *AC,
                                   DominatorTree *DT) {}

/// Reassociate associative binary expressions of the form
///
/// 1. "(LV op C1) op C2" ==> "LV op (C1 op C2)"
/// 2. "(C1 op LV) op C2" ==> "LV op (C1 op C2)"
/// 3. "C2 op (C1 op LV)" ==> "LV op (C1 op C2)"
/// 4. "C2 op (LV op C1)" ==> "LV op (C1 op C2)"
///
/// where op is an associative BinOp, LV is a loop variant, and C1 and C2 are
/// loop invariants that we want to hoist, noting that associativity implies
/// commutativity.
static bool hoistBOAssociation(Instruction &I, Loop &L,
                               ICFLoopSafetyInfo &SafetyInfo,
                               MemorySSAUpdater &MSSAU, AssumptionCache *AC,
                               DominatorTree *DT) {}

static bool hoistArithmetics(Instruction &I, Loop &L,
                             ICFLoopSafetyInfo &SafetyInfo,
                             MemorySSAUpdater &MSSAU, AssumptionCache *AC,
                             DominatorTree *DT) {}

/// Little predicate that returns true if the specified basic block is in
/// a subloop of the current one, not the current one itself.
///
static bool inSubLoop(BasicBlock *BB, Loop *CurLoop, LoopInfo *LI) {}