llvm/llvm/lib/Analysis/ValueTracking.cpp

//===- ValueTracking.cpp - Walk computations to compute properties --------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file contains routines that help analyze properties that chains of
// computations have.
//
//===----------------------------------------------------------------------===//

#include "llvm/Analysis/ValueTracking.h"
#include "llvm/ADT/APFloat.h"
#include "llvm/ADT/APInt.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/ScopeExit.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/SmallSet.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/iterator_range.h"
#include "llvm/Analysis/AliasAnalysis.h"
#include "llvm/Analysis/AssumeBundleQueries.h"
#include "llvm/Analysis/AssumptionCache.h"
#include "llvm/Analysis/ConstantFolding.h"
#include "llvm/Analysis/DomConditionCache.h"
#include "llvm/Analysis/GuardUtils.h"
#include "llvm/Analysis/InstructionSimplify.h"
#include "llvm/Analysis/Loads.h"
#include "llvm/Analysis/LoopInfo.h"
#include "llvm/Analysis/OptimizationRemarkEmitter.h"
#include "llvm/Analysis/TargetLibraryInfo.h"
#include "llvm/Analysis/VectorUtils.h"
#include "llvm/Analysis/WithCache.h"
#include "llvm/IR/Argument.h"
#include "llvm/IR/Attributes.h"
#include "llvm/IR/BasicBlock.h"
#include "llvm/IR/Constant.h"
#include "llvm/IR/ConstantRange.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/DerivedTypes.h"
#include "llvm/IR/DiagnosticInfo.h"
#include "llvm/IR/Dominators.h"
#include "llvm/IR/EHPersonalities.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/GetElementPtrTypeIterator.h"
#include "llvm/IR/GlobalAlias.h"
#include "llvm/IR/GlobalValue.h"
#include "llvm/IR/GlobalVariable.h"
#include "llvm/IR/InstrTypes.h"
#include "llvm/IR/Instruction.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/IntrinsicInst.h"
#include "llvm/IR/Intrinsics.h"
#include "llvm/IR/IntrinsicsAArch64.h"
#include "llvm/IR/IntrinsicsAMDGPU.h"
#include "llvm/IR/IntrinsicsRISCV.h"
#include "llvm/IR/IntrinsicsX86.h"
#include "llvm/IR/LLVMContext.h"
#include "llvm/IR/Metadata.h"
#include "llvm/IR/Module.h"
#include "llvm/IR/Operator.h"
#include "llvm/IR/PatternMatch.h"
#include "llvm/IR/Type.h"
#include "llvm/IR/User.h"
#include "llvm/IR/Value.h"
#include "llvm/Support/Casting.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/KnownBits.h"
#include "llvm/Support/MathExtras.h"
#include "llvm/TargetParser/RISCVTargetParser.h"
#include <algorithm>
#include <cassert>
#include <cstdint>
#include <optional>
#include <utility>

usingnamespacellvm;
usingnamespacellvm::PatternMatch;

// Controls the number of uses of the value searched for possible
// dominating comparisons.
static cl::opt<unsigned> DomConditionsMaxUses("dom-conditions-max-uses",
                                              cl::Hidden, cl::init(20));


/// Returns the bitwidth of the given scalar or pointer type. For vector types,
/// returns the element type's bitwidth.
static unsigned getBitWidth(Type *Ty, const DataLayout &DL) {}

// Given the provided Value and, potentially, a context instruction, return
// the preferred context instruction (if any).
static const Instruction *safeCxtI(const Value *V, const Instruction *CxtI) {}

static const Instruction *safeCxtI(const Value *V1, const Value *V2, const Instruction *CxtI) {}

static bool getShuffleDemandedElts(const ShuffleVectorInst *Shuf,
                                   const APInt &DemandedElts,
                                   APInt &DemandedLHS, APInt &DemandedRHS) {}

static void computeKnownBits(const Value *V, const APInt &DemandedElts,
                             KnownBits &Known, unsigned Depth,
                             const SimplifyQuery &Q);

void llvm::computeKnownBits(const Value *V, KnownBits &Known, unsigned Depth,
                            const SimplifyQuery &Q) {}

void llvm::computeKnownBits(const Value *V, KnownBits &Known,
                            const DataLayout &DL, unsigned Depth,
                            AssumptionCache *AC, const Instruction *CxtI,
                            const DominatorTree *DT, bool UseInstrInfo) {}

KnownBits llvm::computeKnownBits(const Value *V, const DataLayout &DL,
                                 unsigned Depth, AssumptionCache *AC,
                                 const Instruction *CxtI,
                                 const DominatorTree *DT, bool UseInstrInfo) {}

KnownBits llvm::computeKnownBits(const Value *V, const APInt &DemandedElts,
                                 const DataLayout &DL, unsigned Depth,
                                 AssumptionCache *AC, const Instruction *CxtI,
                                 const DominatorTree *DT, bool UseInstrInfo) {}

static bool haveNoCommonBitsSetSpecialCases(const Value *LHS, const Value *RHS,
                                            const SimplifyQuery &SQ) {}

bool llvm::haveNoCommonBitsSet(const WithCache<const Value *> &LHSCache,
                               const WithCache<const Value *> &RHSCache,
                               const SimplifyQuery &SQ) {}

bool llvm::isOnlyUsedInZeroComparison(const Instruction *I) {}

bool llvm::isOnlyUsedInZeroEqualityComparison(const Instruction *I) {}

bool llvm::isKnownToBeAPowerOfTwo(const Value *V, const DataLayout &DL,
                                  bool OrZero, unsigned Depth,
                                  AssumptionCache *AC, const Instruction *CxtI,
                                  const DominatorTree *DT, bool UseInstrInfo) {}

static bool isKnownNonZero(const Value *V, const APInt &DemandedElts,
                           const SimplifyQuery &Q, unsigned Depth);

bool llvm::isKnownNonNegative(const Value *V, const SimplifyQuery &SQ,
                              unsigned Depth) {}

bool llvm::isKnownPositive(const Value *V, const SimplifyQuery &SQ,
                           unsigned Depth) {}

bool llvm::isKnownNegative(const Value *V, const SimplifyQuery &SQ,
                           unsigned Depth) {}

static bool isKnownNonEqual(const Value *V1, const Value *V2,
                            const APInt &DemandedElts, unsigned Depth,
                            const SimplifyQuery &Q);

bool llvm::isKnownNonEqual(const Value *V1, const Value *V2,
                           const DataLayout &DL, AssumptionCache *AC,
                           const Instruction *CxtI, const DominatorTree *DT,
                           bool UseInstrInfo) {}

bool llvm::MaskedValueIsZero(const Value *V, const APInt &Mask,
                             const SimplifyQuery &SQ, unsigned Depth) {}

static unsigned ComputeNumSignBits(const Value *V, const APInt &DemandedElts,
                                   unsigned Depth, const SimplifyQuery &Q);

static unsigned ComputeNumSignBits(const Value *V, unsigned Depth,
                                   const SimplifyQuery &Q) {}

unsigned llvm::ComputeNumSignBits(const Value *V, const DataLayout &DL,
                                  unsigned Depth, AssumptionCache *AC,
                                  const Instruction *CxtI,
                                  const DominatorTree *DT, bool UseInstrInfo) {}

unsigned llvm::ComputeMaxSignificantBits(const Value *V, const DataLayout &DL,
                                         unsigned Depth, AssumptionCache *AC,
                                         const Instruction *CxtI,
                                         const DominatorTree *DT) {}

static void computeKnownBitsAddSub(bool Add, const Value *Op0, const Value *Op1,
                                   bool NSW, bool NUW,
                                   const APInt &DemandedElts,
                                   KnownBits &KnownOut, KnownBits &Known2,
                                   unsigned Depth, const SimplifyQuery &Q) {}

static void computeKnownBitsMul(const Value *Op0, const Value *Op1, bool NSW,
                                bool NUW, const APInt &DemandedElts,
                                KnownBits &Known, KnownBits &Known2,
                                unsigned Depth, const SimplifyQuery &Q) {}

void llvm::computeKnownBitsFromRangeMetadata(const MDNode &Ranges,
                                             KnownBits &Known) {}

static bool isEphemeralValueOf(const Instruction *I, const Value *E) {}

// Is this an intrinsic that cannot be speculated but also cannot trap?
bool llvm::isAssumeLikeIntrinsic(const Instruction *I) {}

bool llvm::isValidAssumeForContext(const Instruction *Inv,
                                   const Instruction *CxtI,
                                   const DominatorTree *DT,
                                   bool AllowEphemerals) {}

// TODO: cmpExcludesZero misses many cases where `RHS` is non-constant but
// we still have enough information about `RHS` to conclude non-zero. For
// example Pred=EQ, RHS=isKnownNonZero. cmpExcludesZero is called in loops
// so the extra compile time may not be worth it, but possibly a second API
// should be created for use outside of loops.
static bool cmpExcludesZero(CmpInst::Predicate Pred, const Value *RHS) {}

static bool isKnownNonZeroFromAssume(const Value *V, const SimplifyQuery &Q) {}

static void computeKnownBitsFromCmp(const Value *V, CmpInst::Predicate Pred,
                                    Value *LHS, Value *RHS, KnownBits &Known,
                                    const SimplifyQuery &Q) {}

static void computeKnownBitsFromICmpCond(const Value *V, ICmpInst *Cmp,
                                         KnownBits &Known,
                                         const SimplifyQuery &SQ, bool Invert) {}

static void computeKnownBitsFromCond(const Value *V, Value *Cond,
                                     KnownBits &Known, unsigned Depth,
                                     const SimplifyQuery &SQ, bool Invert) {}

void llvm::computeKnownBitsFromContext(const Value *V, KnownBits &Known,
                                       unsigned Depth, const SimplifyQuery &Q) {}

/// Compute known bits from a shift operator, including those with a
/// non-constant shift amount. Known is the output of this function. Known2 is a
/// pre-allocated temporary with the same bit width as Known and on return
/// contains the known bit of the shift value source. KF is an
/// operator-specific function that, given the known-bits and a shift amount,
/// compute the implied known-bits of the shift operator's result respectively
/// for that shift amount. The results from calling KF are conservatively
/// combined for all permitted shift amounts.
static void computeKnownBitsFromShiftOperator(
    const Operator *I, const APInt &DemandedElts, KnownBits &Known,
    KnownBits &Known2, unsigned Depth, const SimplifyQuery &Q,
    function_ref<KnownBits(const KnownBits &, const KnownBits &, bool)> KF) {}

static KnownBits
getKnownBitsFromAndXorOr(const Operator *I, const APInt &DemandedElts,
                         const KnownBits &KnownLHS, const KnownBits &KnownRHS,
                         unsigned Depth, const SimplifyQuery &Q) {}

static KnownBits computeKnownBitsForHorizontalOperation(
    const Operator *I, const APInt &DemandedElts, unsigned Depth,
    const SimplifyQuery &Q,
    const function_ref<KnownBits(const KnownBits &, const KnownBits &)>
        KnownBitsFunc) {}

// Public so this can be used in `SimplifyDemandedUseBits`.
KnownBits llvm::analyzeKnownBitsFromAndXorOr(const Operator *I,
                                             const KnownBits &KnownLHS,
                                             const KnownBits &KnownRHS,
                                             unsigned Depth,
                                             const SimplifyQuery &SQ) {}

ConstantRange llvm::getVScaleRange(const Function *F, unsigned BitWidth) {}

void llvm::adjustKnownBitsForSelectArm(KnownBits &Known, Value *Cond,
                                       Value *Arm, bool Invert, unsigned Depth,
                                       const SimplifyQuery &Q) {}

static void computeKnownBitsFromOperator(const Operator *I,
                                         const APInt &DemandedElts,
                                         KnownBits &Known, unsigned Depth,
                                         const SimplifyQuery &Q) {}

/// Determine which bits of V are known to be either zero or one and return
/// them.
KnownBits llvm::computeKnownBits(const Value *V, const APInt &DemandedElts,
                                 unsigned Depth, const SimplifyQuery &Q) {}

/// Determine which bits of V are known to be either zero or one and return
/// them.
KnownBits llvm::computeKnownBits(const Value *V, unsigned Depth,
                                 const SimplifyQuery &Q) {}

/// Determine which bits of V are known to be either zero or one and return
/// them in the Known bit set.
///
/// NOTE: we cannot consider 'undef' to be "IsZero" here.  The problem is that
/// we cannot optimize based on the assumption that it is zero without changing
/// it to be an explicit zero.  If we don't change it to zero, other code could
/// optimized based on the contradictory assumption that it is non-zero.
/// Because instcombine aggressively folds operations with undef args anyway,
/// this won't lose us code quality.
///
/// This function is defined on values with integer type, values with pointer
/// type, and vectors of integers.  In the case
/// where V is a vector, known zero, and known one values are the
/// same width as the vector element, and the bit is set only if it is true
/// for all of the demanded elements in the vector specified by DemandedElts.
void computeKnownBits(const Value *V, const APInt &DemandedElts,
                      KnownBits &Known, unsigned Depth,
                      const SimplifyQuery &Q) {}

/// Try to detect a recurrence that the value of the induction variable is
/// always a power of two (or zero).
static bool isPowerOfTwoRecurrence(const PHINode *PN, bool OrZero,
                                   unsigned Depth, SimplifyQuery &Q) {}

/// Return true if we can infer that \p V is known to be a power of 2 from
/// dominating condition \p Cond (e.g., ctpop(V) == 1).
static bool isImpliedToBeAPowerOfTwoFromCond(const Value *V, bool OrZero,
                                             const Value *Cond,
                                             bool CondIsTrue) {}

/// Return true if the given value is known to have exactly one
/// bit set when defined. For vectors return true if every element is known to
/// be a power of two when defined. Supports values with integer or pointer
/// types and vectors of integers.
bool llvm::isKnownToBeAPowerOfTwo(const Value *V, bool OrZero, unsigned Depth,
                                  const SimplifyQuery &Q) {}

/// Test whether a GEP's result is known to be non-null.
///
/// Uses properties inherent in a GEP to try to determine whether it is known
/// to be non-null.
///
/// Currently this routine does not support vector GEPs.
static bool isGEPKnownNonNull(const GEPOperator *GEP, unsigned Depth,
                              const SimplifyQuery &Q) {}

static bool isKnownNonNullFromDominatingCondition(const Value *V,
                                                  const Instruction *CtxI,
                                                  const DominatorTree *DT) {}

/// Does the 'Range' metadata (which must be a valid MD_range operand list)
/// ensure that the value it's attached to is never Value?  'RangeType' is
/// is the type of the value described by the range.
static bool rangeMetadataExcludesValue(const MDNode* Ranges, const APInt& Value) {}

/// Try to detect a recurrence that monotonically increases/decreases from a
/// non-zero starting value. These are common as induction variables.
static bool isNonZeroRecurrence(const PHINode *PN) {}

static bool matchOpWithOpEqZero(Value *Op0, Value *Op1) {}

static bool isNonZeroAdd(const APInt &DemandedElts, unsigned Depth,
                         const SimplifyQuery &Q, unsigned BitWidth, Value *X,
                         Value *Y, bool NSW, bool NUW) {}

static bool isNonZeroSub(const APInt &DemandedElts, unsigned Depth,
                         const SimplifyQuery &Q, unsigned BitWidth, Value *X,
                         Value *Y) {}

static bool isNonZeroMul(const APInt &DemandedElts, unsigned Depth,
                         const SimplifyQuery &Q, unsigned BitWidth, Value *X,
                         Value *Y, bool NSW, bool NUW) {}

static bool isNonZeroShift(const Operator *I, const APInt &DemandedElts,
                           unsigned Depth, const SimplifyQuery &Q,
                           const KnownBits &KnownVal) {}

static bool isKnownNonZeroFromOperator(const Operator *I,
                                       const APInt &DemandedElts,
                                       unsigned Depth, const SimplifyQuery &Q) {}

/// Return true if the given value is known to be non-zero when defined. For
/// vectors, return true if every demanded element is known to be non-zero when
/// defined. For pointers, if the context instruction and dominator tree are
/// specified, perform context-sensitive analysis and return true if the
/// pointer couldn't possibly be null at the specified instruction.
/// Supports values with integer or pointer type and vectors of integers.
bool isKnownNonZero(const Value *V, const APInt &DemandedElts,
                    const SimplifyQuery &Q, unsigned Depth) {}

bool llvm::isKnownNonZero(const Value *V, const SimplifyQuery &Q,
                          unsigned Depth) {}

/// If the pair of operators are the same invertible function, return the
/// the operands of the function corresponding to each input. Otherwise,
/// return std::nullopt.  An invertible function is one that is 1-to-1 and maps
/// every input value to exactly one output value.  This is equivalent to
/// saying that Op1 and Op2 are equal exactly when the specified pair of
/// operands are equal, (except that Op1 and Op2 may be poison more often.)
static std::optional<std::pair<Value*, Value*>>
getInvertibleOperands(const Operator *Op1,
                      const Operator *Op2) {}

/// Return true if V1 == (binop V2, X), where X is known non-zero.
/// Only handle a small subset of binops where (binop V2, X) with non-zero X
/// implies V2 != V1.
static bool isModifyingBinopOfNonZero(const Value *V1, const Value *V2,
                                      const APInt &DemandedElts, unsigned Depth,
                                      const SimplifyQuery &Q) {}

/// Return true if V2 == V1 * C, where V1 is known non-zero, C is not 0/1 and
/// the multiplication is nuw or nsw.
static bool isNonEqualMul(const Value *V1, const Value *V2,
                          const APInt &DemandedElts, unsigned Depth,
                          const SimplifyQuery &Q) {}

/// Return true if V2 == V1 << C, where V1 is known non-zero, C is not 0 and
/// the shift is nuw or nsw.
static bool isNonEqualShl(const Value *V1, const Value *V2,
                          const APInt &DemandedElts, unsigned Depth,
                          const SimplifyQuery &Q) {}

static bool isNonEqualPHIs(const PHINode *PN1, const PHINode *PN2,
                           const APInt &DemandedElts, unsigned Depth,
                           const SimplifyQuery &Q) {}

static bool isNonEqualSelect(const Value *V1, const Value *V2,
                             const APInt &DemandedElts, unsigned Depth,
                             const SimplifyQuery &Q) {}

// Check to see if A is both a GEP and is the incoming value for a PHI in the
// loop, and B is either a ptr or another GEP. If the PHI has 2 incoming values,
// one of them being the recursive GEP A and the other a ptr at same base and at
// the same/higher offset than B we are only incrementing the pointer further in
// loop if offset of recursive GEP is greater than 0.
static bool isNonEqualPointersWithRecursiveGEP(const Value *A, const Value *B,
                                               const SimplifyQuery &Q) {}

/// Return true if it is known that V1 != V2.
static bool isKnownNonEqual(const Value *V1, const Value *V2,
                            const APInt &DemandedElts, unsigned Depth,
                            const SimplifyQuery &Q) {}

// Match a signed min+max clamp pattern like smax(smin(In, CHigh), CLow).
// Returns the input and lower/upper bounds.
static bool isSignedMinMaxClamp(const Value *Select, const Value *&In,
                                const APInt *&CLow, const APInt *&CHigh) {}

static bool isSignedMinMaxIntrinsicClamp(const IntrinsicInst *II,
                                         const APInt *&CLow,
                                         const APInt *&CHigh) {}

/// For vector constants, loop over the elements and find the constant with the
/// minimum number of sign bits. Return 0 if the value is not a vector constant
/// or if any element was not analyzed; otherwise, return the count for the
/// element with the minimum number of sign bits.
static unsigned computeNumSignBitsVectorConstant(const Value *V,
                                                 const APInt &DemandedElts,
                                                 unsigned TyBits) {}

static unsigned ComputeNumSignBitsImpl(const Value *V,
                                       const APInt &DemandedElts,
                                       unsigned Depth, const SimplifyQuery &Q);

static unsigned ComputeNumSignBits(const Value *V, const APInt &DemandedElts,
                                   unsigned Depth, const SimplifyQuery &Q) {}

/// Return the number of times the sign bit of the register is replicated into
/// the other bits. We know that at least 1 bit is always equal to the sign bit
/// (itself), but other cases can give us information. For example, immediately
/// after an "ashr X, 2", we know that the top 3 bits are all equal to each
/// other, so we return 3. For vectors, return the number of sign bits for the
/// vector element with the minimum number of known sign bits of the demanded
/// elements in the vector specified by DemandedElts.
static unsigned ComputeNumSignBitsImpl(const Value *V,
                                       const APInt &DemandedElts,
                                       unsigned Depth, const SimplifyQuery &Q) {}

Intrinsic::ID llvm::getIntrinsicForCallSite(const CallBase &CB,
                                            const TargetLibraryInfo *TLI) {}

/// Return true if it's possible to assume IEEE treatment of input denormals in
/// \p F for \p Val.
static bool inputDenormalIsIEEE(const Function &F, const Type *Ty) {}

static bool inputDenormalIsIEEEOrPosZero(const Function &F, const Type *Ty) {}

static bool outputDenormalIsIEEEOrPosZero(const Function &F, const Type *Ty) {}

bool KnownFPClass::isKnownNeverLogicalZero(const Function &F, Type *Ty) const {}

bool KnownFPClass::isKnownNeverLogicalNegZero(const Function &F,
                                              Type *Ty) const {}

bool KnownFPClass::isKnownNeverLogicalPosZero(const Function &F,
                                              Type *Ty) const {}

void KnownFPClass::propagateDenormal(const KnownFPClass &Src, const Function &F,
                                     Type *Ty) {}

void KnownFPClass::propagateCanonicalizingSrc(const KnownFPClass &Src,
                                              const Function &F, Type *Ty) {}

/// Given an exploded icmp instruction, return true if the comparison only
/// checks the sign bit. If it only checks the sign bit, set TrueIfSigned if
/// the result of the comparison is true when the input value is signed.
bool llvm::isSignBitCheck(ICmpInst::Predicate Pred, const APInt &RHS,
                          bool &TrueIfSigned) {}

/// Returns a pair of values, which if passed to llvm.is.fpclass, returns the
/// same result as an fcmp with the given operands.
std::pair<Value *, FPClassTest> llvm::fcmpToClassTest(FCmpInst::Predicate Pred,
                                                      const Function &F,
                                                      Value *LHS, Value *RHS,
                                                      bool LookThroughSrc) {}

std::pair<Value *, FPClassTest>
llvm::fcmpToClassTest(FCmpInst::Predicate Pred, const Function &F, Value *LHS,
                      const APFloat *ConstRHS, bool LookThroughSrc) {}

/// Return the return value for fcmpImpliesClass for a compare that produces an
/// exact class test.
static std::tuple<Value *, FPClassTest, FPClassTest> exactClass(Value *V,
                                                                FPClassTest M) {}

std::tuple<Value *, FPClassTest, FPClassTest>
llvm::fcmpImpliesClass(CmpInst::Predicate Pred, const Function &F, Value *LHS,
                       FPClassTest RHSClass, bool LookThroughSrc) {}

std::tuple<Value *, FPClassTest, FPClassTest>
llvm::fcmpImpliesClass(CmpInst::Predicate Pred, const Function &F, Value *LHS,
                       const APFloat &ConstRHS, bool LookThroughSrc) {}

std::tuple<Value *, FPClassTest, FPClassTest>
llvm::fcmpImpliesClass(CmpInst::Predicate Pred, const Function &F, Value *LHS,
                       Value *RHS, bool LookThroughSrc) {}

static void computeKnownFPClassFromCond(const Value *V, Value *Cond,
                                        bool CondIsTrue,
                                        const Instruction *CxtI,
                                        KnownFPClass &KnownFromContext) {}

static KnownFPClass computeKnownFPClassFromContext(const Value *V,
                                                   const SimplifyQuery &Q) {}

void computeKnownFPClass(const Value *V, const APInt &DemandedElts,
                         FPClassTest InterestedClasses, KnownFPClass &Known,
                         unsigned Depth, const SimplifyQuery &Q);

static void computeKnownFPClass(const Value *V, KnownFPClass &Known,
                                FPClassTest InterestedClasses, unsigned Depth,
                                const SimplifyQuery &Q) {}

static void computeKnownFPClassForFPTrunc(const Operator *Op,
                                          const APInt &DemandedElts,
                                          FPClassTest InterestedClasses,
                                          KnownFPClass &Known, unsigned Depth,
                                          const SimplifyQuery &Q) {}

void computeKnownFPClass(const Value *V, const APInt &DemandedElts,
                         FPClassTest InterestedClasses, KnownFPClass &Known,
                         unsigned Depth, const SimplifyQuery &Q) {}

KnownFPClass llvm::computeKnownFPClass(const Value *V,
                                       const APInt &DemandedElts,
                                       FPClassTest InterestedClasses,
                                       unsigned Depth,
                                       const SimplifyQuery &SQ) {}

KnownFPClass llvm::computeKnownFPClass(const Value *V,
                                       FPClassTest InterestedClasses,
                                       unsigned Depth,
                                       const SimplifyQuery &SQ) {}

Value *llvm::isBytewiseValue(Value *V, const DataLayout &DL) {}

// This is the recursive version of BuildSubAggregate. It takes a few different
// arguments. Idxs is the index within the nested struct From that we are
// looking at now (which is of type IndexedType). IdxSkip is the number of
// indices from Idxs that should be left out when inserting into the resulting
// struct. To is the result struct built so far, new insertvalue instructions
// build on that.
static Value *BuildSubAggregate(Value *From, Value *To, Type *IndexedType,
                                SmallVectorImpl<unsigned> &Idxs,
                                unsigned IdxSkip,
                                BasicBlock::iterator InsertBefore) {}

// This helper takes a nested struct and extracts a part of it (which is again a
// struct) into a new value. For example, given the struct:
// { a, { b, { c, d }, e } }
// and the indices "1, 1" this returns
// { c, d }.
//
// It does this by inserting an insertvalue for each element in the resulting
// struct, as opposed to just inserting a single struct. This will only work if
// each of the elements of the substruct are known (ie, inserted into From by an
// insertvalue instruction somewhere).
//
// All inserted insertvalue instructions are inserted before InsertBefore
static Value *BuildSubAggregate(Value *From, ArrayRef<unsigned> idx_range,
                                BasicBlock::iterator InsertBefore) {}

/// Given an aggregate and a sequence of indices, see if the scalar value
/// indexed is already around as a register, for example if it was inserted
/// directly into the aggregate.
///
/// If InsertBefore is not null, this function will duplicate (modified)
/// insertvalues when a part of a nested struct is extracted.
Value *
llvm::FindInsertedValue(Value *V, ArrayRef<unsigned> idx_range,
                        std::optional<BasicBlock::iterator> InsertBefore) {}

bool llvm::isGEPBasedOnPointerToString(const GEPOperator *GEP,
                                       unsigned CharSize) {}

// If V refers to an initialized global constant, set Slice either to
// its initializer if the size of its elements equals ElementSize, or,
// for ElementSize == 8, to its representation as an array of unsiged
// char. Return true on success.
// Offset is in the unit "nr of ElementSize sized elements".
bool llvm::getConstantDataArrayInfo(const Value *V,
                                    ConstantDataArraySlice &Slice,
                                    unsigned ElementSize, uint64_t Offset) {}

/// Extract bytes from the initializer of the constant array V, which need
/// not be a nul-terminated string.  On success, store the bytes in Str and
/// return true.  When TrimAtNul is set, Str will contain only the bytes up
/// to but not including the first nul.  Return false on failure.
bool llvm::getConstantStringInfo(const Value *V, StringRef &Str,
                                 bool TrimAtNul) {}

// These next two are very similar to the above, but also look through PHI
// nodes.
// TODO: See if we can integrate these two together.

/// If we can compute the length of the string pointed to by
/// the specified pointer, return 'len+1'.  If we can't, return 0.
static uint64_t GetStringLengthH(const Value *V,
                                 SmallPtrSetImpl<const PHINode*> &PHIs,
                                 unsigned CharSize) {}

/// If we can compute the length of the string pointed to by
/// the specified pointer, return 'len+1'.  If we can't, return 0.
uint64_t llvm::GetStringLength(const Value *V, unsigned CharSize) {}

const Value *
llvm::getArgumentAliasingToReturnedPointer(const CallBase *Call,
                                           bool MustPreserveNullness) {}

bool llvm::isIntrinsicReturningPointerAliasingArgumentWithoutCapturing(
    const CallBase *Call, bool MustPreserveNullness) {}

/// \p PN defines a loop-variant pointer to an object.  Check if the
/// previous iteration of the loop was referring to the same object as \p PN.
static bool isSameUnderlyingObjectInLoop(const PHINode *PN,
                                         const LoopInfo *LI) {}

const Value *llvm::getUnderlyingObject(const Value *V, unsigned MaxLookup) {}

void llvm::getUnderlyingObjects(const Value *V,
                                SmallVectorImpl<const Value *> &Objects,
                                const LoopInfo *LI, unsigned MaxLookup) {}

const Value *llvm::getUnderlyingObjectAggressive(const Value *V) {}

/// This is the function that does the work of looking through basic
/// ptrtoint+arithmetic+inttoptr sequences.
static const Value *getUnderlyingObjectFromInt(const Value *V) {}

/// This is a wrapper around getUnderlyingObjects and adds support for basic
/// ptrtoint+arithmetic+inttoptr sequences.
/// It returns false if unidentified object is found in getUnderlyingObjects.
bool llvm::getUnderlyingObjectsForCodeGen(const Value *V,
                                          SmallVectorImpl<Value *> &Objects) {}

AllocaInst *llvm::findAllocaForValue(Value *V, bool OffsetZero) {}

static bool onlyUsedByLifetimeMarkersOrDroppableInstsHelper(
    const Value *V, bool AllowLifetime, bool AllowDroppable) {}

bool llvm::onlyUsedByLifetimeMarkers(const Value *V) {}
bool llvm::onlyUsedByLifetimeMarkersOrDroppableInsts(const Value *V) {}

bool llvm::isSafeToSpeculativelyExecute(const Instruction *Inst,
                                        const Instruction *CtxI,
                                        AssumptionCache *AC,
                                        const DominatorTree *DT,
                                        const TargetLibraryInfo *TLI,
                                        bool UseVariableInfo) {}

bool llvm::isSafeToSpeculativelyExecuteWithOpcode(
    unsigned Opcode, const Instruction *Inst, const Instruction *CtxI,
    AssumptionCache *AC, const DominatorTree *DT, const TargetLibraryInfo *TLI,
    bool UseVariableInfo) {}

bool llvm::mayHaveNonDefUseDependency(const Instruction &I) {}

/// Convert ConstantRange OverflowResult into ValueTracking OverflowResult.
static OverflowResult mapOverflowResult(ConstantRange::OverflowResult OR) {}

/// Combine constant ranges from computeConstantRange() and computeKnownBits().
ConstantRange
llvm::computeConstantRangeIncludingKnownBits(const WithCache<const Value *> &V,
                                             bool ForSigned,
                                             const SimplifyQuery &SQ) {}

OverflowResult llvm::computeOverflowForUnsignedMul(const Value *LHS,
                                                   const Value *RHS,
                                                   const SimplifyQuery &SQ,
                                                   bool IsNSW) {}

OverflowResult llvm::computeOverflowForSignedMul(const Value *LHS,
                                                 const Value *RHS,
                                                 const SimplifyQuery &SQ) {}

OverflowResult
llvm::computeOverflowForUnsignedAdd(const WithCache<const Value *> &LHS,
                                    const WithCache<const Value *> &RHS,
                                    const SimplifyQuery &SQ) {}

static OverflowResult
computeOverflowForSignedAdd(const WithCache<const Value *> &LHS,
                            const WithCache<const Value *> &RHS,
                            const AddOperator *Add, const SimplifyQuery &SQ) {}

OverflowResult llvm::computeOverflowForUnsignedSub(const Value *LHS,
                                                   const Value *RHS,
                                                   const SimplifyQuery &SQ) {}

OverflowResult llvm::computeOverflowForSignedSub(const Value *LHS,
                                                 const Value *RHS,
                                                 const SimplifyQuery &SQ) {}

bool llvm::isOverflowIntrinsicNoWrap(const WithOverflowInst *WO,
                                     const DominatorTree &DT) {}

/// Shifts return poison if shiftwidth is larger than the bitwidth.
static bool shiftAmountKnownInRange(const Value *ShiftAmount) {}

enum class UndefPoisonKind {};

static bool includesPoison(UndefPoisonKind Kind) {}

static bool includesUndef(UndefPoisonKind Kind) {}

static bool canCreateUndefOrPoison(const Operator *Op, UndefPoisonKind Kind,
                                   bool ConsiderFlagsAndMetadata) {}

bool llvm::canCreateUndefOrPoison(const Operator *Op,
                                  bool ConsiderFlagsAndMetadata) {}

bool llvm::canCreatePoison(const Operator *Op, bool ConsiderFlagsAndMetadata) {}

static bool directlyImpliesPoison(const Value *ValAssumedPoison, const Value *V,
                                  unsigned Depth) {}

static bool impliesPoison(const Value *ValAssumedPoison, const Value *V,
                          unsigned Depth) {}

bool llvm::impliesPoison(const Value *ValAssumedPoison, const Value *V) {}

static bool programUndefinedIfUndefOrPoison(const Value *V, bool PoisonOnly);

static bool isGuaranteedNotToBeUndefOrPoison(
    const Value *V, AssumptionCache *AC, const Instruction *CtxI,
    const DominatorTree *DT, unsigned Depth, UndefPoisonKind Kind) {}

bool llvm::isGuaranteedNotToBeUndefOrPoison(const Value *V, AssumptionCache *AC,
                                            const Instruction *CtxI,
                                            const DominatorTree *DT,
                                            unsigned Depth) {}

bool llvm::isGuaranteedNotToBePoison(const Value *V, AssumptionCache *AC,
                                     const Instruction *CtxI,
                                     const DominatorTree *DT, unsigned Depth) {}

bool llvm::isGuaranteedNotToBeUndef(const Value *V, AssumptionCache *AC,
                                    const Instruction *CtxI,
                                    const DominatorTree *DT, unsigned Depth) {}

/// Return true if undefined behavior would provably be executed on the path to
/// OnPathTo if Root produced a posion result.  Note that this doesn't say
/// anything about whether OnPathTo is actually executed or whether Root is
/// actually poison.  This can be used to assess whether a new use of Root can
/// be added at a location which is control equivalent with OnPathTo (such as
/// immediately before it) without introducing UB which didn't previously
/// exist.  Note that a false result conveys no information.
bool llvm::mustExecuteUBIfPoisonOnPathTo(Instruction *Root,
                                         Instruction *OnPathTo,
                                         DominatorTree *DT) {}

OverflowResult llvm::computeOverflowForSignedAdd(const AddOperator *Add,
                                                 const SimplifyQuery &SQ) {}

OverflowResult
llvm::computeOverflowForSignedAdd(const WithCache<const Value *> &LHS,
                                  const WithCache<const Value *> &RHS,
                                  const SimplifyQuery &SQ) {}

bool llvm::isGuaranteedToTransferExecutionToSuccessor(const Instruction *I) {}

bool llvm::isGuaranteedToTransferExecutionToSuccessor(const BasicBlock *BB) {}

bool llvm::isGuaranteedToTransferExecutionToSuccessor(
   BasicBlock::const_iterator Begin, BasicBlock::const_iterator End,
   unsigned ScanLimit) {}

bool llvm::isGuaranteedToTransferExecutionToSuccessor(
   iterator_range<BasicBlock::const_iterator> Range, unsigned ScanLimit) {}

bool llvm::isGuaranteedToExecuteForEveryIteration(const Instruction *I,
                                                  const Loop *L) {}

bool llvm::propagatesPoison(const Use &PoisonOp) {}

/// Enumerates all operands of \p I that are guaranteed to not be undef or
/// poison. If the callback \p Handle returns true, stop processing and return
/// true. Otherwise, return false.
template <typename CallableT>
static bool handleGuaranteedWellDefinedOps(const Instruction *I,
                                           const CallableT &Handle) {}

void llvm::getGuaranteedWellDefinedOps(
    const Instruction *I, SmallVectorImpl<const Value *> &Operands) {}

/// Enumerates all operands of \p I that are guaranteed to not be poison.
template <typename CallableT>
static bool handleGuaranteedNonPoisonOps(const Instruction *I,
                                         const CallableT &Handle) {}

void llvm::getGuaranteedNonPoisonOps(const Instruction *I,
                                     SmallVectorImpl<const Value *> &Operands) {}

bool llvm::mustTriggerUB(const Instruction *I,
                         const SmallPtrSetImpl<const Value *> &KnownPoison) {}

static bool programUndefinedIfUndefOrPoison(const Value *V,
                                            bool PoisonOnly) {}

bool llvm::programUndefinedIfUndefOrPoison(const Instruction *Inst) {}

bool llvm::programUndefinedIfPoison(const Instruction *Inst) {}

static bool isKnownNonNaN(const Value *V, FastMathFlags FMF) {}

static bool isKnownNonZero(const Value *V) {}

/// Match clamp pattern for float types without care about NaNs or signed zeros.
/// Given non-min/max outer cmp/select from the clamp pattern this
/// function recognizes if it can be substitued by a "canonical" min/max
/// pattern.
static SelectPatternResult matchFastFloatClamp(CmpInst::Predicate Pred,
                                               Value *CmpLHS, Value *CmpRHS,
                                               Value *TrueVal, Value *FalseVal,
                                               Value *&LHS, Value *&RHS) {}

/// Recognize variations of:
///   CLAMP(v,l,h) ==> ((v) < (l) ? (l) : ((v) > (h) ? (h) : (v)))
static SelectPatternResult matchClamp(CmpInst::Predicate Pred,
                                      Value *CmpLHS, Value *CmpRHS,
                                      Value *TrueVal, Value *FalseVal) {}

/// Recognize variations of:
///   a < c ? min(a,b) : min(b,c) ==> min(min(a,b),min(b,c))
static SelectPatternResult matchMinMaxOfMinMax(CmpInst::Predicate Pred,
                                               Value *CmpLHS, Value *CmpRHS,
                                               Value *TVal, Value *FVal,
                                               unsigned Depth) {}

/// If the input value is the result of a 'not' op, constant integer, or vector
/// splat of a constant integer, return the bitwise-not source value.
/// TODO: This could be extended to handle non-splat vector integer constants.
static Value *getNotValue(Value *V) {}

/// Match non-obvious integer minimum and maximum sequences.
static SelectPatternResult matchMinMax(CmpInst::Predicate Pred,
                                       Value *CmpLHS, Value *CmpRHS,
                                       Value *TrueVal, Value *FalseVal,
                                       Value *&LHS, Value *&RHS,
                                       unsigned Depth) {}

bool llvm::isKnownNegation(const Value *X, const Value *Y, bool NeedNSW,
                           bool AllowPoison) {}

bool llvm::isKnownInversion(const Value *X, const Value *Y) {}

static SelectPatternResult matchSelectPattern(CmpInst::Predicate Pred,
                                              FastMathFlags FMF,
                                              Value *CmpLHS, Value *CmpRHS,
                                              Value *TrueVal, Value *FalseVal,
                                              Value *&LHS, Value *&RHS,
                                              unsigned Depth) {}

/// Helps to match a select pattern in case of a type mismatch.
///
/// The function processes the case when type of true and false values of a
/// select instruction differs from type of the cmp instruction operands because
/// of a cast instruction. The function checks if it is legal to move the cast
/// operation after "select". If yes, it returns the new second value of
/// "select" (with the assumption that cast is moved):
/// 1. As operand of cast instruction when both values of "select" are same cast
/// instructions.
/// 2. As restored constant (by applying reverse cast operation) when the first
/// value of the "select" is a cast operation and the second value is a
/// constant.
/// NOTE: We return only the new second value because the first value could be
/// accessed as operand of cast instruction.
static Value *lookThroughCast(CmpInst *CmpI, Value *V1, Value *V2,
                              Instruction::CastOps *CastOp) {}

SelectPatternResult llvm::matchSelectPattern(Value *V, Value *&LHS, Value *&RHS,
                                             Instruction::CastOps *CastOp,
                                             unsigned Depth) {}

SelectPatternResult llvm::matchDecomposedSelectPattern(
    CmpInst *CmpI, Value *TrueVal, Value *FalseVal, Value *&LHS, Value *&RHS,
    Instruction::CastOps *CastOp, unsigned Depth) {}

CmpInst::Predicate llvm::getMinMaxPred(SelectPatternFlavor SPF, bool Ordered) {}

SelectPatternFlavor llvm::getInverseMinMaxFlavor(SelectPatternFlavor SPF) {}

Intrinsic::ID llvm::getInverseMinMaxIntrinsic(Intrinsic::ID MinMaxID) {}

APInt llvm::getMinMaxLimit(SelectPatternFlavor SPF, unsigned BitWidth) {}

std::pair<Intrinsic::ID, bool>
llvm::canConvertToMinOrMaxIntrinsic(ArrayRef<Value *> VL) {}

bool llvm::matchSimpleRecurrence(const PHINode *P, BinaryOperator *&BO,
                                 Value *&Start, Value *&Step) {}

bool llvm::matchSimpleRecurrence(const BinaryOperator *I, PHINode *&P,
                                 Value *&Start, Value *&Step) {}

/// Return true if "icmp Pred LHS RHS" is always true.
static bool isTruePredicate(CmpInst::Predicate Pred, const Value *LHS,
                            const Value *RHS) {}

/// Return true if "icmp Pred BLHS BRHS" is true whenever "icmp Pred
/// ALHS ARHS" is true.  Otherwise, return std::nullopt.
static std::optional<bool>
isImpliedCondOperands(CmpInst::Predicate Pred, const Value *ALHS,
                      const Value *ARHS, const Value *BLHS, const Value *BRHS) {}

/// Return true if "icmp1 LPred X, Y" implies "icmp2 RPred X, Y" is true.
/// Return false if "icmp1 LPred X, Y" implies "icmp2 RPred X, Y" is false.
/// Otherwise, return std::nullopt if we can't infer anything.
static std::optional<bool>
isImpliedCondMatchingOperands(CmpInst::Predicate LPred,
                              CmpInst::Predicate RPred) {}

/// Return true if "icmp LPred X, LCR" implies "icmp RPred X, RCR" is true.
/// Return false if "icmp LPred X, LCR" implies "icmp RPred X, RCR" is false.
/// Otherwise, return std::nullopt if we can't infer anything.
static std::optional<bool> isImpliedCondCommonOperandWithCR(
    CmpInst::Predicate LPred, const ConstantRange &LCR,
    CmpInst::Predicate RPred, const ConstantRange &RCR) {}

/// Return true if LHS implies RHS (expanded to its components as "R0 RPred R1")
/// is true.  Return false if LHS implies RHS is false. Otherwise, return
/// std::nullopt if we can't infer anything.
static std::optional<bool> isImpliedCondICmps(const ICmpInst *LHS,
                                              CmpInst::Predicate RPred,
                                              const Value *R0, const Value *R1,
                                              const DataLayout &DL,
                                              bool LHSIsTrue) {}

/// Return true if LHS implies RHS is true.  Return false if LHS implies RHS is
/// false.  Otherwise, return std::nullopt if we can't infer anything.  We
/// expect the RHS to be an icmp and the LHS to be an 'and', 'or', or a 'select'
/// instruction.
static std::optional<bool>
isImpliedCondAndOr(const Instruction *LHS, CmpInst::Predicate RHSPred,
                   const Value *RHSOp0, const Value *RHSOp1,
                   const DataLayout &DL, bool LHSIsTrue, unsigned Depth) {}

std::optional<bool>
llvm::isImpliedCondition(const Value *LHS, CmpInst::Predicate RHSPred,
                         const Value *RHSOp0, const Value *RHSOp1,
                         const DataLayout &DL, bool LHSIsTrue, unsigned Depth) {}

std::optional<bool> llvm::isImpliedCondition(const Value *LHS, const Value *RHS,
                                             const DataLayout &DL,
                                             bool LHSIsTrue, unsigned Depth) {}

// Returns a pair (Condition, ConditionIsTrue), where Condition is a branch
// condition dominating ContextI or nullptr, if no condition is found.
static std::pair<Value *, bool>
getDomPredecessorCondition(const Instruction *ContextI) {}

std::optional<bool> llvm::isImpliedByDomCondition(const Value *Cond,
                                                  const Instruction *ContextI,
                                                  const DataLayout &DL) {}

std::optional<bool> llvm::isImpliedByDomCondition(CmpInst::Predicate Pred,
                                                  const Value *LHS,
                                                  const Value *RHS,
                                                  const Instruction *ContextI,
                                                  const DataLayout &DL) {}

static void setLimitsForBinOp(const BinaryOperator &BO, APInt &Lower,
                              APInt &Upper, const InstrInfoQuery &IIQ,
                              bool PreferSignedRange) {}

static ConstantRange getRangeForIntrinsic(const IntrinsicInst &II) {}

static ConstantRange getRangeForSelectPattern(const SelectInst &SI,
                                              const InstrInfoQuery &IIQ) {}

static void setLimitForFPToI(const Instruction *I, APInt &Lower, APInt &Upper) {}

ConstantRange llvm::computeConstantRange(const Value *V, bool ForSigned,
                                         bool UseInstrInfo, AssumptionCache *AC,
                                         const Instruction *CtxI,
                                         const DominatorTree *DT,
                                         unsigned Depth) {}

static void
addValueAffectedByCondition(Value *V,
                            function_ref<void(Value *)> InsertAffected) {}

void llvm::findValuesAffectedByCondition(
    Value *Cond, bool IsAssume, function_ref<void(Value *)> InsertAffected) {}