llvm/llvm/lib/Transforms/Utils/ScalarEvolutionExpander.cpp

//===- ScalarEvolutionExpander.cpp - Scalar Evolution Analysis ------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file contains the implementation of the scalar evolution expander,
// which is used to generate the code corresponding to a given scalar evolution
// expression.
//
//===----------------------------------------------------------------------===//

#include "llvm/Transforms/Utils/ScalarEvolutionExpander.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/ScopeExit.h"
#include "llvm/ADT/SmallSet.h"
#include "llvm/Analysis/InstructionSimplify.h"
#include "llvm/Analysis/LoopInfo.h"
#include "llvm/Analysis/TargetTransformInfo.h"
#include "llvm/Analysis/ValueTracking.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/Dominators.h"
#include "llvm/IR/IntrinsicInst.h"
#include "llvm/IR/PatternMatch.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Transforms/Utils/LoopUtils.h"

#ifdef LLVM_ENABLE_ABI_BREAKING_CHECKS
#define SCEV_DEBUG_WITH_TYPE(TYPE, X)
#else
#define SCEV_DEBUG_WITH_TYPE
#endif

usingnamespacellvm;

cl::opt<unsigned> llvm::SCEVCheapExpansionBudget(
    "scev-cheap-expansion-budget", cl::Hidden, cl::init(4),
    cl::desc("When performing SCEV expansion only if it is cheap to do, this "
             "controls the budget that is considered cheap (default = 4)"));

usingnamespacePatternMatch;

PoisonFlags::PoisonFlags(const Instruction *I) {}

void PoisonFlags::apply(Instruction *I) {}

/// ReuseOrCreateCast - Arrange for there to be a cast of V to Ty at IP,
/// reusing an existing cast if a suitable one (= dominating IP) exists, or
/// creating a new one.
Value *SCEVExpander::ReuseOrCreateCast(Value *V, Type *Ty,
                                       Instruction::CastOps Op,
                                       BasicBlock::iterator IP) {}

BasicBlock::iterator
SCEVExpander::findInsertPointAfter(Instruction *I,
                                   Instruction *MustDominate) const {}

BasicBlock::iterator
SCEVExpander::GetOptimalInsertionPointForCastOf(Value *V) const {}

/// InsertNoopCastOfTo - Insert a cast of V to the specified type,
/// which must be possible with a noop cast, doing what we can to share
/// the casts.
Value *SCEVExpander::InsertNoopCastOfTo(Value *V, Type *Ty) {}

/// InsertBinop - Insert the specified binary operator, doing a small amount
/// of work to avoid inserting an obviously redundant operation, and hoisting
/// to an outer loop when the opportunity is there and it is safe.
Value *SCEVExpander::InsertBinop(Instruction::BinaryOps Opcode,
                                 Value *LHS, Value *RHS,
                                 SCEV::NoWrapFlags Flags, bool IsSafeToHoist) {}

/// expandAddToGEP - Expand an addition expression with a pointer type into
/// a GEP instead of using ptrtoint+arithmetic+inttoptr. This helps
/// BasicAliasAnalysis and other passes analyze the result. See the rules
/// for getelementptr vs. inttoptr in
/// http://llvm.org/docs/LangRef.html#pointeraliasing
/// for details.
///
/// Design note: The correctness of using getelementptr here depends on
/// ScalarEvolution not recognizing inttoptr and ptrtoint operators, as
/// they may introduce pointer arithmetic which may not be safely converted
/// into getelementptr.
///
/// Design note: It might seem desirable for this function to be more
/// loop-aware. If some of the indices are loop-invariant while others
/// aren't, it might seem desirable to emit multiple GEPs, keeping the
/// loop-invariant portions of the overall computation outside the loop.
/// However, there are a few reasons this is not done here. Hoisting simple
/// arithmetic is a low-level optimization that often isn't very
/// important until late in the optimization process. In fact, passes
/// like InstructionCombining will combine GEPs, even if it means
/// pushing loop-invariant computation down into loops, so even if the
/// GEPs were split here, the work would quickly be undone. The
/// LoopStrengthReduction pass, which is usually run quite late (and
/// after the last InstructionCombining pass), takes care of hoisting
/// loop-invariant portions of expressions, after considering what
/// can be folded using target addressing modes.
///
Value *SCEVExpander::expandAddToGEP(const SCEV *Offset, Value *V,
                                    SCEV::NoWrapFlags Flags) {}

/// PickMostRelevantLoop - Given two loops pick the one that's most relevant for
/// SCEV expansion. If they are nested, this is the most nested. If they are
/// neighboring, pick the later.
static const Loop *PickMostRelevantLoop(const Loop *A, const Loop *B,
                                        DominatorTree &DT) {}

/// getRelevantLoop - Get the most relevant loop associated with the given
/// expression, according to PickMostRelevantLoop.
const Loop *SCEVExpander::getRelevantLoop(const SCEV *S) {}

namespace {

/// LoopCompare - Compare loops by PickMostRelevantLoop.
class LoopCompare {};

}

Value *SCEVExpander::visitAddExpr(const SCEVAddExpr *S) {}

Value *SCEVExpander::visitMulExpr(const SCEVMulExpr *S) {}

Value *SCEVExpander::visitUDivExpr(const SCEVUDivExpr *S) {}

/// Determine if this is a well-behaved chain of instructions leading back to
/// the PHI. If so, it may be reused by expanded expressions.
bool SCEVExpander::isNormalAddRecExprPHI(PHINode *PN, Instruction *IncV,
                                         const Loop *L) {}

/// getIVIncOperand returns an induction variable increment's induction
/// variable operand.
///
/// If allowScale is set, any type of GEP is allowed as long as the nonIV
/// operands dominate InsertPos.
///
/// If allowScale is not set, ensure that a GEP increment conforms to one of the
/// simple patterns generated by getAddRecExprPHILiterally and
/// expandAddtoGEP. If the pattern isn't recognized, return NULL.
Instruction *SCEVExpander::getIVIncOperand(Instruction *IncV,
                                           Instruction *InsertPos,
                                           bool allowScale) {}

/// If the insert point of the current builder or any of the builders on the
/// stack of saved builders has 'I' as its insert point, update it to point to
/// the instruction after 'I'.  This is intended to be used when the instruction
/// 'I' is being moved.  If this fixup is not done and 'I' is moved to a
/// different block, the inconsistent insert point (with a mismatched
/// Instruction and Block) can lead to an instruction being inserted in a block
/// other than its parent.
void SCEVExpander::fixupInsertPoints(Instruction *I) {}

/// hoistStep - Attempt to hoist a simple IV increment above InsertPos to make
/// it available to other uses in this loop. Recursively hoist any operands,
/// until we reach a value that dominates InsertPos.
bool SCEVExpander::hoistIVInc(Instruction *IncV, Instruction *InsertPos,
                              bool RecomputePoisonFlags) {}

bool SCEVExpander::canReuseFlagsFromOriginalIVInc(PHINode *OrigPhi,
                                                  PHINode *WidePhi,
                                                  Instruction *OrigInc,
                                                  Instruction *WideInc) {}

/// Determine if this cyclic phi is in a form that would have been generated by
/// LSR. We don't care if the phi was actually expanded in this pass, as long
/// as it is in a low-cost form, for example, no implied multiplication. This
/// should match any patterns generated by getAddRecExprPHILiterally and
/// expandAddtoGEP.
bool SCEVExpander::isExpandedAddRecExprPHI(PHINode *PN, Instruction *IncV,
                                           const Loop *L) {}

/// expandIVInc - Expand an IV increment at Builder's current InsertPos.
/// Typically this is the LatchBlock terminator or IVIncInsertPos, but we may
/// need to materialize IV increments elsewhere to handle difficult situations.
Value *SCEVExpander::expandIVInc(PHINode *PN, Value *StepV, const Loop *L,
                                 bool useSubtract) {}

/// Check whether we can cheaply express the requested SCEV in terms of
/// the available PHI SCEV by truncation and/or inversion of the step.
static bool canBeCheaplyTransformed(ScalarEvolution &SE,
                                    const SCEVAddRecExpr *Phi,
                                    const SCEVAddRecExpr *Requested,
                                    bool &InvertStep) {}

static bool IsIncrementNSW(ScalarEvolution &SE, const SCEVAddRecExpr *AR) {}

static bool IsIncrementNUW(ScalarEvolution &SE, const SCEVAddRecExpr *AR) {}

/// getAddRecExprPHILiterally - Helper for expandAddRecExprLiterally. Expand
/// the base addrec, which is the addrec without any non-loop-dominating
/// values, and return the PHI.
PHINode *
SCEVExpander::getAddRecExprPHILiterally(const SCEVAddRecExpr *Normalized,
                                        const Loop *L, Type *&TruncTy,
                                        bool &InvertStep) {}

Value *SCEVExpander::expandAddRecExprLiterally(const SCEVAddRecExpr *S) {}

Value *SCEVExpander::visitAddRecExpr(const SCEVAddRecExpr *S) {}

Value *SCEVExpander::visitPtrToIntExpr(const SCEVPtrToIntExpr *S) {}

Value *SCEVExpander::visitTruncateExpr(const SCEVTruncateExpr *S) {}

Value *SCEVExpander::visitZeroExtendExpr(const SCEVZeroExtendExpr *S) {}

Value *SCEVExpander::visitSignExtendExpr(const SCEVSignExtendExpr *S) {}

Value *SCEVExpander::expandMinMaxExpr(const SCEVNAryExpr *S,
                                      Intrinsic::ID IntrinID, Twine Name,
                                      bool IsSequential) {}

Value *SCEVExpander::visitSMaxExpr(const SCEVSMaxExpr *S) {}

Value *SCEVExpander::visitUMaxExpr(const SCEVUMaxExpr *S) {}

Value *SCEVExpander::visitSMinExpr(const SCEVSMinExpr *S) {}

Value *SCEVExpander::visitUMinExpr(const SCEVUMinExpr *S) {}

Value *SCEVExpander::visitSequentialUMinExpr(const SCEVSequentialUMinExpr *S) {}

Value *SCEVExpander::visitVScale(const SCEVVScale *S) {}

Value *SCEVExpander::expandCodeFor(const SCEV *SH, Type *Ty,
                                   BasicBlock::iterator IP) {}

Value *SCEVExpander::expandCodeFor(const SCEV *SH, Type *Ty) {}

Value *SCEVExpander::FindValueInExprValueMap(
    const SCEV *S, const Instruction *InsertPt,
    SmallVectorImpl<Instruction *> &DropPoisonGeneratingInsts) {}

// The expansion of SCEV will either reuse a previous Value in ExprValueMap,
// or expand the SCEV literally. Specifically, if the expansion is in LSRMode,
// and the SCEV contains any sub scAddRecExpr type SCEV, it will be expanded
// literally, to prevent LSR's transformed SCEV from being reverted. Otherwise,
// the expansion will try to reuse Value from ExprValueMap, and only when it
// fails, expand the SCEV literally.
Value *SCEVExpander::expand(const SCEV *S) {}

void SCEVExpander::rememberInstruction(Value *I) {}

void SCEVExpander::rememberFlags(Instruction *I) {}

void SCEVExpander::replaceCongruentIVInc(
    PHINode *&Phi, PHINode *&OrigPhi, Loop *L, const DominatorTree *DT,
    SmallVectorImpl<WeakTrackingVH> &DeadInsts) {}

/// replaceCongruentIVs - Check for congruent phis in this loop header and
/// replace them with their most canonical representative. Return the number of
/// phis eliminated.
///
/// This does not depend on any SCEVExpander state but should be used in
/// the same context that SCEVExpander is used.
unsigned
SCEVExpander::replaceCongruentIVs(Loop *L, const DominatorTree *DT,
                                  SmallVectorImpl<WeakTrackingVH> &DeadInsts,
                                  const TargetTransformInfo *TTI) {}

bool SCEVExpander::hasRelatedExistingExpansion(const SCEV *S,
                                               const Instruction *At,
                                               Loop *L) {}

template<typename T> static InstructionCost costAndCollectOperands(
  const SCEVOperand &WorkItem, const TargetTransformInfo &TTI,
  TargetTransformInfo::TargetCostKind CostKind,
  SmallVectorImpl<SCEVOperand> &Worklist) {}

bool SCEVExpander::isHighCostExpansionHelper(
    const SCEVOperand &WorkItem, Loop *L, const Instruction &At,
    InstructionCost &Cost, unsigned Budget, const TargetTransformInfo &TTI,
    SmallPtrSetImpl<const SCEV *> &Processed,
    SmallVectorImpl<SCEVOperand> &Worklist) {}

Value *SCEVExpander::expandCodeForPredicate(const SCEVPredicate *Pred,
                                            Instruction *IP) {}

Value *SCEVExpander::expandComparePredicate(const SCEVComparePredicate *Pred,
                                            Instruction *IP) {}

Value *SCEVExpander::generateOverflowCheck(const SCEVAddRecExpr *AR,
                                           Instruction *Loc, bool Signed) {}

Value *SCEVExpander::expandWrapPredicate(const SCEVWrapPredicate *Pred,
                                         Instruction *IP) {}

Value *SCEVExpander::expandUnionPredicate(const SCEVUnionPredicate *Union,
                                          Instruction *IP) {}

Value *SCEVExpander::fixupLCSSAFormFor(Value *V) {}

namespace {
// Search for a SCEV subexpression that is not safe to expand.  Any expression
// that may expand to a !isSafeToSpeculativelyExecute value is unsafe, namely
// UDiv expressions. We don't know if the UDiv is derived from an IR divide
// instruction, but the important thing is that we prove the denominator is
// nonzero before expansion.
//
// IVUsers already checks that IV-derived expressions are safe. So this check is
// only needed when the expression includes some subexpression that is not IV
// derived.
//
// Currently, we only allow division by a value provably non-zero here.
//
// We cannot generally expand recurrences unless the step dominates the loop
// header. The expander handles the special case of affine recurrences by
// scaling the recurrence outside the loop, but this technique isn't generally
// applicable. Expanding a nested recurrence outside a loop requires computing
// binomial coefficients. This could be done, but the recurrence has to be in a
// perfectly reduced form, which can't be guaranteed.
struct SCEVFindUnsafe {};
} // namespace

bool SCEVExpander::isSafeToExpand(const SCEV *S) const {}

bool SCEVExpander::isSafeToExpandAt(const SCEV *S,
                                    const Instruction *InsertionPoint) const {}

void SCEVExpanderCleaner::cleanup() {}