llvm/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp

//===- InstructionCombining.cpp - Combine multiple instructions -----------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// InstructionCombining - Combine instructions to form fewer, simple
// instructions.  This pass does not modify the CFG.  This pass is where
// algebraic simplification happens.
//
// This pass combines things like:
//    %Y = add i32 %X, 1
//    %Z = add i32 %Y, 1
// into:
//    %Z = add i32 %X, 2
//
// This is a simple worklist driven algorithm.
//
// This pass guarantees that the following canonicalizations are performed on
// the program:
//    1. If a binary operator has a constant operand, it is moved to the RHS
//    2. Bitwise operators with constant operands are always grouped so that
//       shifts are performed first, then or's, then and's, then xor's.
//    3. Compare instructions are converted from <,>,<=,>= to ==,!= if possible
//    4. All cmp instructions on boolean values are replaced with logical ops
//    5. add X, X is represented as (X*2) => (X << 1)
//    6. Multiplies with a power-of-two constant argument are transformed into
//       shifts.
//   ... etc.
//
//===----------------------------------------------------------------------===//

#include "InstCombineInternal.h"
#include "llvm/ADT/APInt.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/Analysis/AliasAnalysis.h"
#include "llvm/Analysis/AssumptionCache.h"
#include "llvm/Analysis/BasicAliasAnalysis.h"
#include "llvm/Analysis/BlockFrequencyInfo.h"
#include "llvm/Analysis/CFG.h"
#include "llvm/Analysis/ConstantFolding.h"
#include "llvm/Analysis/GlobalsModRef.h"
#include "llvm/Analysis/InstructionSimplify.h"
#include "llvm/Analysis/LazyBlockFrequencyInfo.h"
#include "llvm/Analysis/MemoryBuiltins.h"
#include "llvm/Analysis/OptimizationRemarkEmitter.h"
#include "llvm/Analysis/ProfileSummaryInfo.h"
#include "llvm/Analysis/TargetFolder.h"
#include "llvm/Analysis/TargetLibraryInfo.h"
#include "llvm/Analysis/TargetTransformInfo.h"
#include "llvm/Analysis/Utils/Local.h"
#include "llvm/Analysis/ValueTracking.h"
#include "llvm/Analysis/VectorUtils.h"
#include "llvm/IR/BasicBlock.h"
#include "llvm/IR/CFG.h"
#include "llvm/IR/Constant.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/DIBuilder.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/DebugInfo.h"
#include "llvm/IR/DerivedTypes.h"
#include "llvm/IR/Dominators.h"
#include "llvm/IR/EHPersonalities.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/GetElementPtrTypeIterator.h"
#include "llvm/IR/IRBuilder.h"
#include "llvm/IR/InstrTypes.h"
#include "llvm/IR/Instruction.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/IntrinsicInst.h"
#include "llvm/IR/Intrinsics.h"
#include "llvm/IR/Metadata.h"
#include "llvm/IR/Operator.h"
#include "llvm/IR/PassManager.h"
#include "llvm/IR/PatternMatch.h"
#include "llvm/IR/Type.h"
#include "llvm/IR/Use.h"
#include "llvm/IR/User.h"
#include "llvm/IR/Value.h"
#include "llvm/IR/ValueHandle.h"
#include "llvm/InitializePasses.h"
#include "llvm/Support/Casting.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/DebugCounter.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/KnownBits.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Transforms/InstCombine/InstCombine.h"
#include "llvm/Transforms/Utils/BasicBlockUtils.h"
#include "llvm/Transforms/Utils/Local.h"
#include <algorithm>
#include <cassert>
#include <cstdint>
#include <memory>
#include <optional>
#include <string>
#include <utility>

#define DEBUG_TYPE
#include "llvm/Transforms/Utils/InstructionWorklist.h"
#include <optional>

usingnamespacellvm;
usingnamespacellvm::PatternMatch;

STATISTIC(NumWorklistIterations,
          "Number of instruction combining iterations performed");
STATISTIC(NumOneIteration, "Number of functions with one iteration");
STATISTIC(NumTwoIterations, "Number of functions with two iterations");
STATISTIC(NumThreeIterations, "Number of functions with three iterations");
STATISTIC(NumFourOrMoreIterations,
          "Number of functions with four or more iterations");

STATISTIC(NumCombined , "Number of insts combined");
STATISTIC(NumConstProp, "Number of constant folds");
STATISTIC(NumDeadInst , "Number of dead inst eliminated");
STATISTIC(NumSunkInst , "Number of instructions sunk");
STATISTIC(NumExpand,    "Number of expansions");
STATISTIC(NumFactor   , "Number of factorizations");
STATISTIC(NumReassoc  , "Number of reassociations");
DEBUG_COUNTER(VisitCounter, "instcombine-visit",
              "Controls which instructions are visited");

static cl::opt<bool>
EnableCodeSinking("instcombine-code-sinking", cl::desc("Enable code sinking"),
                                              cl::init(true));

static cl::opt<unsigned> MaxSinkNumUsers(
    "instcombine-max-sink-users", cl::init(32),
    cl::desc("Maximum number of undroppable users for instruction sinking"));

static cl::opt<unsigned>
MaxArraySize("instcombine-maxarray-size", cl::init(1024),
             cl::desc("Maximum array size considered when doing a combine"));

// FIXME: Remove this flag when it is no longer necessary to convert
// llvm.dbg.declare to avoid inaccurate debug info. Setting this to false
// increases variable availability at the cost of accuracy. Variables that
// cannot be promoted by mem2reg or SROA will be described as living in memory
// for their entire lifetime. However, passes like DSE and instcombine can
// delete stores to the alloca, leading to misleading and inaccurate debug
// information. This flag can be removed when those passes are fixed.
static cl::opt<unsigned> ShouldLowerDbgDeclare("instcombine-lower-dbg-declare",
                                               cl::Hidden, cl::init(true));

std::optional<Instruction *>
InstCombiner::targetInstCombineIntrinsic(IntrinsicInst &II) {}

std::optional<Value *> InstCombiner::targetSimplifyDemandedUseBitsIntrinsic(
    IntrinsicInst &II, APInt DemandedMask, KnownBits &Known,
    bool &KnownBitsComputed) {}

std::optional<Value *> InstCombiner::targetSimplifyDemandedVectorEltsIntrinsic(
    IntrinsicInst &II, APInt DemandedElts, APInt &PoisonElts,
    APInt &PoisonElts2, APInt &PoisonElts3,
    std::function<void(Instruction *, unsigned, APInt, APInt &)>
        SimplifyAndSetOp) {}

bool InstCombiner::isValidAddrSpaceCast(unsigned FromAS, unsigned ToAS) const {}

Value *InstCombinerImpl::EmitGEPOffset(GEPOperator *GEP, bool RewriteGEP) {}

/// Legal integers and common types are considered desirable. This is used to
/// avoid creating instructions with types that may not be supported well by the
/// the backend.
/// NOTE: This treats i8, i16 and i32 specially because they are common
///       types in frontend languages.
bool InstCombinerImpl::isDesirableIntType(unsigned BitWidth) const {}

/// Return true if it is desirable to convert an integer computation from a
/// given bit width to a new bit width.
/// We don't want to convert from a legal or desirable type (like i8) to an
/// illegal type or from a smaller to a larger illegal type. A width of '1'
/// is always treated as a desirable type because i1 is a fundamental type in
/// IR, and there are many specialized optimizations for i1 types.
/// Common/desirable widths are equally treated as legal to convert to, in
/// order to open up more combining opportunities.
bool InstCombinerImpl::shouldChangeType(unsigned FromWidth,
                                        unsigned ToWidth) const {}

/// Return true if it is desirable to convert a computation from 'From' to 'To'.
/// We don't want to convert from a legal to an illegal type or from a smaller
/// to a larger illegal type. i1 is always treated as a legal type because it is
/// a fundamental type in IR, and there are many specialized optimizations for
/// i1 types.
bool InstCombinerImpl::shouldChangeType(Type *From, Type *To) const {}

// Return true, if No Signed Wrap should be maintained for I.
// The No Signed Wrap flag can be kept if the operation "B (I.getOpcode) C",
// where both B and C should be ConstantInts, results in a constant that does
// not overflow. This function only handles the Add and Sub opcodes. For
// all other opcodes, the function conservatively returns false.
static bool maintainNoSignedWrap(BinaryOperator &I, Value *B, Value *C) {}

static bool hasNoUnsignedWrap(BinaryOperator &I) {}

static bool hasNoSignedWrap(BinaryOperator &I) {}

/// Conservatively clears subclassOptionalData after a reassociation or
/// commutation. We preserve fast-math flags when applicable as they can be
/// preserved.
static void ClearSubclassDataAfterReassociation(BinaryOperator &I) {}

/// Combine constant operands of associative operations either before or after a
/// cast to eliminate one of the associative operations:
/// (op (cast (op X, C2)), C1) --> (cast (op X, op (C1, C2)))
/// (op (cast (op X, C2)), C1) --> (op (cast X), op (C1, C2))
static bool simplifyAssocCastAssoc(BinaryOperator *BinOp1,
                                   InstCombinerImpl &IC) {}

// Simplifies IntToPtr/PtrToInt RoundTrip Cast.
// inttoptr ( ptrtoint (x) ) --> x
Value *InstCombinerImpl::simplifyIntToPtrRoundTripCast(Value *Val) {}

/// This performs a few simplifications for operators that are associative or
/// commutative:
///
///  Commutative operators:
///
///  1. Order operands such that they are listed from right (least complex) to
///     left (most complex).  This puts constants before unary operators before
///     binary operators.
///
///  Associative operators:
///
///  2. Transform: "(A op B) op C" ==> "A op (B op C)" if "B op C" simplifies.
///  3. Transform: "A op (B op C)" ==> "(A op B) op C" if "A op B" simplifies.
///
///  Associative and commutative operators:
///
///  4. Transform: "(A op B) op C" ==> "(C op A) op B" if "C op A" simplifies.
///  5. Transform: "A op (B op C)" ==> "B op (C op A)" if "C op A" simplifies.
///  6. Transform: "(A op C1) op (B op C2)" ==> "(A op B) op (C1 op C2)"
///     if C1 and C2 are constants.
bool InstCombinerImpl::SimplifyAssociativeOrCommutative(BinaryOperator &I) {}

/// Return whether "X LOp (Y ROp Z)" is always equal to
/// "(X LOp Y) ROp (X LOp Z)".
static bool leftDistributesOverRight(Instruction::BinaryOps LOp,
                                     Instruction::BinaryOps ROp) {}

/// Return whether "(X LOp Y) ROp Z" is always equal to
/// "(X ROp Z) LOp (Y ROp Z)".
static bool rightDistributesOverLeft(Instruction::BinaryOps LOp,
                                     Instruction::BinaryOps ROp) {}

/// This function returns identity value for given opcode, which can be used to
/// factor patterns like (X * 2) + X ==> (X * 2) + (X * 1) ==> X * (2 + 1).
static Value *getIdentityValue(Instruction::BinaryOps Opcode, Value *V) {}

/// This function predicates factorization using distributive laws. By default,
/// it just returns the 'Op' inputs. But for special-cases like
/// 'add(shl(X, 5), ...)', this function will have TopOpcode == Instruction::Add
/// and Op = shl(X, 5). The 'shl' is treated as the more general 'mul X, 32' to
/// allow more factorization opportunities.
static Instruction::BinaryOps
getBinOpsForFactorization(Instruction::BinaryOps TopOpcode, BinaryOperator *Op,
                          Value *&LHS, Value *&RHS, BinaryOperator *OtherOp) {}

/// This tries to simplify binary operations by factorizing out common terms
/// (e. g. "(A*B)+(A*C)" -> "A*(B+C)").
static Value *tryFactorization(BinaryOperator &I, const SimplifyQuery &SQ,
                               InstCombiner::BuilderTy &Builder,
                               Instruction::BinaryOps InnerOpcode, Value *A,
                               Value *B, Value *C, Value *D) {}

// If `I` has one Const operand and the other matches `(ctpop (not x))`,
// replace `(ctpop (not x))` with `(sub nuw nsw BitWidth(x), (ctpop x))`.
// This is only useful is the new subtract can fold so we only handle the
// following cases:
//    1) (add/sub/disjoint_or C, (ctpop (not x))
//        -> (add/sub/disjoint_or C', (ctpop x))
//    1) (cmp pred C, (ctpop (not x))
//        -> (cmp pred C', (ctpop x))
Instruction *InstCombinerImpl::tryFoldInstWithCtpopWithNot(Instruction *I) {}

// (Binop1 (Binop2 (logic_shift X, C), C1), (logic_shift Y, C))
//   IFF
//    1) the logic_shifts match
//    2) either both binops are binops and one is `and` or
//       BinOp1 is `and`
//       (logic_shift (inv_logic_shift C1, C), C) == C1 or
//
//    -> (logic_shift (Binop1 (Binop2 X, inv_logic_shift(C1, C)), Y), C)
//
// (Binop1 (Binop2 (logic_shift X, Amt), Mask), (logic_shift Y, Amt))
//   IFF
//    1) the logic_shifts match
//    2) BinOp1 == BinOp2 (if BinOp ==  `add`, then also requires `shl`).
//
//    -> (BinOp (logic_shift (BinOp X, Y)), Mask)
//
// (Binop1 (Binop2 (arithmetic_shift X, Amt), Mask), (arithmetic_shift Y, Amt))
//   IFF
//   1) Binop1 is bitwise logical operator `and`, `or` or `xor`
//   2) Binop2 is `not`
//
//   -> (arithmetic_shift Binop1((not X), Y), Amt)

Instruction *InstCombinerImpl::foldBinOpShiftWithShift(BinaryOperator &I) {}

// (Binop (zext C), (select C, T, F))
//    -> (select C, (binop 1, T), (binop 0, F))
//
// (Binop (sext C), (select C, T, F))
//    -> (select C, (binop -1, T), (binop 0, F))
//
// Attempt to simplify binary operations into a select with folded args, when
// one operand of the binop is a select instruction and the other operand is a
// zext/sext extension, whose value is the select condition.
Instruction *
InstCombinerImpl::foldBinOpOfSelectAndCastOfSelectCondition(BinaryOperator &I) {}

Value *InstCombinerImpl::tryFactorizationFolds(BinaryOperator &I) {}

/// This tries to simplify binary operations which some other binary operation
/// distributes over either by factorizing out common terms
/// (eg "(A*B)+(A*C)" -> "A*(B+C)") or expanding out if this results in
/// simplifications (eg: "A & (B | C) -> (A&B) | (A&C)" if this is a win).
/// Returns the simplified value, or null if it didn't simplify.
Value *InstCombinerImpl::foldUsingDistributiveLaws(BinaryOperator &I) {}

static std::optional<std::pair<Value *, Value *>>
matchSymmetricPhiNodesPair(PHINode *LHS, PHINode *RHS) {}

std::optional<std::pair<Value *, Value *>>
InstCombinerImpl::matchSymmetricPair(Value *LHS, Value *RHS) {}

Value *InstCombinerImpl::SimplifySelectsFeedingBinaryOp(BinaryOperator &I,
                                                        Value *LHS,
                                                        Value *RHS) {}

/// Freely adapt every user of V as-if V was changed to !V.
/// WARNING: only if canFreelyInvertAllUsersOf() said this can be done.
void InstCombinerImpl::freelyInvertAllUsersOf(Value *I, Value *IgnoredUser) {}

/// Given a 'sub' instruction, return the RHS of the instruction if the LHS is a
/// constant zero (which is the 'negate' form).
Value *InstCombinerImpl::dyn_castNegVal(Value *V) const {}

// Try to fold:
//    1) (fp_binop ({s|u}itofp x), ({s|u}itofp y))
//        -> ({s|u}itofp (int_binop x, y))
//    2) (fp_binop ({s|u}itofp x), FpC)
//        -> ({s|u}itofp (int_binop x, (fpto{s|u}i FpC)))
//
// Assuming the sign of the cast for x/y is `OpsFromSigned`.
Instruction *InstCombinerImpl::foldFBinOpOfIntCastsFromSign(
    BinaryOperator &BO, bool OpsFromSigned, std::array<Value *, 2> IntOps,
    Constant *Op1FpC, SmallVectorImpl<WithCache<const Value *>> &OpsKnown) {}

// Try to fold:
//    1) (fp_binop ({s|u}itofp x), ({s|u}itofp y))
//        -> ({s|u}itofp (int_binop x, y))
//    2) (fp_binop ({s|u}itofp x), FpC)
//        -> ({s|u}itofp (int_binop x, (fpto{s|u}i FpC)))
Instruction *InstCombinerImpl::foldFBinOpOfIntCasts(BinaryOperator &BO) {}

/// A binop with a constant operand and a sign-extended boolean operand may be
/// converted into a select of constants by applying the binary operation to
/// the constant with the two possible values of the extended boolean (0 or -1).
Instruction *InstCombinerImpl::foldBinopOfSextBoolToSelect(BinaryOperator &BO) {}

static Constant *constantFoldOperationIntoSelectOperand(Instruction &I,
                                                        SelectInst *SI,
                                                        bool IsTrueArm) {}

static Value *foldOperationIntoSelectOperand(Instruction &I, SelectInst *SI,
                                             Value *NewOp, InstCombiner &IC) {}

Instruction *InstCombinerImpl::FoldOpIntoSelect(Instruction &Op, SelectInst *SI,
                                                bool FoldWithMultiUse) {}

static Value *simplifyInstructionWithPHI(Instruction &I, PHINode *PN,
                                         Value *InValue, BasicBlock *InBB,
                                         const DataLayout &DL,
                                         const SimplifyQuery SQ) {}

Instruction *InstCombinerImpl::foldOpIntoPhi(Instruction &I, PHINode *PN) {}

Instruction *InstCombinerImpl::foldBinopWithPhiOperands(BinaryOperator &BO) {}

Instruction *InstCombinerImpl::foldBinOpIntoSelectOrPhi(BinaryOperator &I) {}

static bool shouldMergeGEPs(GEPOperator &GEP, GEPOperator &Src) {}

Instruction *InstCombinerImpl::foldVectorBinop(BinaryOperator &Inst) {}

/// Try to narrow the width of a binop if at least 1 operand is an extend of
/// of a value. This requires a potentially expensive known bits check to make
/// sure the narrow op does not overflow.
Instruction *InstCombinerImpl::narrowMathIfNoOverflow(BinaryOperator &BO) {}

/// Determine nowrap flags for (gep (gep p, x), y) to (gep p, (x + y))
/// transform.
static GEPNoWrapFlags getMergedGEPNoWrapFlags(GEPOperator &GEP1,
                                              GEPOperator &GEP2) {}

/// Thread a GEP operation with constant indices through the constant true/false
/// arms of a select.
static Instruction *foldSelectGEP(GetElementPtrInst &GEP,
                                  InstCombiner::BuilderTy &Builder) {}

// Canonicalization:
// gep T, (gep i8, base, C1), (Index + C2) into
// gep T, (gep i8, base, C1 + C2 * sizeof(T)), Index
static Instruction *canonicalizeGEPOfConstGEPI8(GetElementPtrInst &GEP,
                                                GEPOperator *Src,
                                                InstCombinerImpl &IC) {}

Instruction *InstCombinerImpl::visitGEPOfGEP(GetElementPtrInst &GEP,
                                             GEPOperator *Src) {}

Value *InstCombiner::getFreelyInvertedImpl(Value *V, bool WillInvertAllUses,
                                           BuilderTy *Builder,
                                           bool &DoesConsume, unsigned Depth) {}

Instruction *InstCombinerImpl::visitGetElementPtrInst(GetElementPtrInst &GEP) {}

static bool isNeverEqualToUnescapedAlloc(Value *V, const TargetLibraryInfo &TLI,
                                         Instruction *AI) {}

/// Given a call CB which uses an address UsedV, return true if we can prove the
/// call's only possible effect is storing to V.
static bool isRemovableWrite(CallBase &CB, Value *UsedV,
                             const TargetLibraryInfo &TLI) {}

static bool isAllocSiteRemovable(Instruction *AI,
                                 SmallVectorImpl<WeakTrackingVH> &Users,
                                 const TargetLibraryInfo &TLI) {}

Instruction *InstCombinerImpl::visitAllocSite(Instruction &MI) {}

/// Move the call to free before a NULL test.
///
/// Check if this free is accessed after its argument has been test
/// against NULL (property 0).
/// If yes, it is legal to move this call in its predecessor block.
///
/// The move is performed only if the block containing the call to free
/// will be removed, i.e.:
/// 1. it has only one predecessor P, and P has two successors
/// 2. it contains the call, noops, and an unconditional branch
/// 3. its successor is the same as its predecessor's successor
///
/// The profitability is out-of concern here and this function should
/// be called only if the caller knows this transformation would be
/// profitable (e.g., for code size).
static Instruction *tryToMoveFreeBeforeNullTest(CallInst &FI,
                                                const DataLayout &DL) {}

Instruction *InstCombinerImpl::visitFree(CallInst &FI, Value *Op) {}

Instruction *InstCombinerImpl::visitReturnInst(ReturnInst &RI) {}

// WARNING: keep in sync with SimplifyCFGOpt::simplifyUnreachable()!
bool InstCombinerImpl::removeInstructionsBeforeUnreachable(Instruction &I) {}

Instruction *InstCombinerImpl::visitUnreachableInst(UnreachableInst &I) {}

Instruction *InstCombinerImpl::visitUnconditionalBranchInst(BranchInst &BI) {}

void InstCombinerImpl::addDeadEdge(BasicBlock *From, BasicBlock *To,
                                   SmallVectorImpl<BasicBlock *> &Worklist) {}

// Under the assumption that I is unreachable, remove it and following
// instructions. Changes are reported directly to MadeIRChange.
void InstCombinerImpl::handleUnreachableFrom(
    Instruction *I, SmallVectorImpl<BasicBlock *> &Worklist) {}

void InstCombinerImpl::handlePotentiallyDeadBlocks(
    SmallVectorImpl<BasicBlock *> &Worklist) {}

void InstCombinerImpl::handlePotentiallyDeadSuccessors(BasicBlock *BB,
                                                       BasicBlock *LiveSucc) {}

Instruction *InstCombinerImpl::visitBranchInst(BranchInst &BI) {}

// Replaces (switch (select cond, X, C)/(select cond, C, X)) with (switch X) if
// we can prove that both (switch C) and (switch X) go to the default when cond
// is false/true.
static Value *simplifySwitchOnSelectUsingRanges(SwitchInst &SI,
                                                SelectInst *Select,
                                                bool IsTrueArm) {}

Instruction *InstCombinerImpl::visitSwitchInst(SwitchInst &SI) {}

Instruction *
InstCombinerImpl::foldExtractOfOverflowIntrinsic(ExtractValueInst &EV) {}

Instruction *InstCombinerImpl::visitExtractValueInst(ExtractValueInst &EV) {}

/// Return 'true' if the given typeinfo will match anything.
static bool isCatchAll(EHPersonality Personality, Constant *TypeInfo) {}

static bool shorter_filter(const Value *LHS, const Value *RHS) {}

Instruction *InstCombinerImpl::visitLandingPadInst(LandingPadInst &LI) {}

Value *
InstCombinerImpl::pushFreezeToPreventPoisonFromPropagating(FreezeInst &OrigFI) {}

Instruction *InstCombinerImpl::foldFreezeIntoRecurrence(FreezeInst &FI,
                                                        PHINode *PN) {}

bool InstCombinerImpl::freezeOtherUses(FreezeInst &FI) {}

// Check if any direct or bitcast user of this value is a shuffle instruction.
static bool isUsedWithinShuffleVector(Value *V) {}

Instruction *InstCombinerImpl::visitFreeze(FreezeInst &I) {}

/// Check for case where the call writes to an otherwise dead alloca.  This
/// shows up for unused out-params in idiomatic C/C++ code.   Note that this
/// helper *only* analyzes the write; doesn't check any other legality aspect.
static bool SoleWriteToDeadLocal(Instruction *I, TargetLibraryInfo &TLI) {}

/// Try to move the specified instruction from its current block into the
/// beginning of DestBlock, which can only happen if it's safe to move the
/// instruction past all of the instructions between it and the end of its
/// block.
bool InstCombinerImpl::tryToSinkInstruction(Instruction *I,
                                            BasicBlock *DestBlock) {}

void InstCombinerImpl::tryToSinkInstructionDbgValues(
    Instruction *I, BasicBlock::iterator InsertPos, BasicBlock *SrcBlock,
    BasicBlock *DestBlock, SmallVectorImpl<DbgVariableIntrinsic *> &DbgUsers) {}

void InstCombinerImpl::tryToSinkInstructionDbgVariableRecords(
    Instruction *I, BasicBlock::iterator InsertPos, BasicBlock *SrcBlock,
    BasicBlock *DestBlock,
    SmallVectorImpl<DbgVariableRecord *> &DbgVariableRecords) {}

bool InstCombinerImpl::run() {}

// Track the scopes used by !alias.scope and !noalias. In a function, a
// @llvm.experimental.noalias.scope.decl is only useful if that scope is used
// by both sets. If not, the declaration of the scope can be safely omitted.
// The MDNode of the scope can be omitted as well for the instructions that are
// part of this function. We do not do that at this point, as this might become
// too time consuming to do.
class AliasScopeTracker {};

/// Populate the IC worklist from a function, by walking it in reverse
/// post-order and adding all reachable code to the worklist.
///
/// This has a couple of tricks to make the code faster and more powerful.  In
/// particular, we constant fold and DCE instructions as we go, to avoid adding
/// them to the worklist (this significantly speeds up instcombine on code where
/// many instructions are dead or constant).  Additionally, if we find a branch
/// whose condition is a known constant, we only visit the reachable successors.
bool InstCombinerImpl::prepareWorklist(Function &F) {}

void InstCombiner::computeBackEdges() {}

static bool combineInstructionsOverFunction(
    Function &F, InstructionWorklist &Worklist, AliasAnalysis *AA,
    AssumptionCache &AC, TargetLibraryInfo &TLI, TargetTransformInfo &TTI,
    DominatorTree &DT, OptimizationRemarkEmitter &ORE, BlockFrequencyInfo *BFI,
    BranchProbabilityInfo *BPI, ProfileSummaryInfo *PSI,
    const InstCombineOptions &Opts) {}

InstCombinePass::InstCombinePass(InstCombineOptions Opts) :{}

void InstCombinePass::printPipeline(
    raw_ostream &OS, function_ref<StringRef(StringRef)> MapClassName2PassName) {}

PreservedAnalyses InstCombinePass::run(Function &F,
                                       FunctionAnalysisManager &AM) {}

void InstructionCombiningPass::getAnalysisUsage(AnalysisUsage &AU) const {}

bool InstructionCombiningPass::runOnFunction(Function &F) {}

char InstructionCombiningPass::ID =;

InstructionCombiningPass::InstructionCombiningPass() :{}

INITIALIZE_PASS_BEGIN(InstructionCombiningPass, "instcombine",
                      "Combine redundant instructions", false, false)
INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)
INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)
INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass)
INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass)
INITIALIZE_PASS_DEPENDENCY(GlobalsAAWrapperPass)
INITIALIZE_PASS_DEPENDENCY(OptimizationRemarkEmitterWrapperPass)
INITIALIZE_PASS_DEPENDENCY(LazyBlockFrequencyInfoPass)
INITIALIZE_PASS_DEPENDENCY(ProfileSummaryInfoWrapperPass)
INITIALIZE_PASS_END(InstructionCombiningPass, "instcombine",
                    "Combine redundant instructions", false, false)

// Initialization Routines
void llvm::initializeInstCombine(PassRegistry &Registry) {}

FunctionPass *llvm::createInstructionCombiningPass() {}