llvm/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp

//===- DAGCombiner.cpp - Implement a DAG node combiner --------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This pass combines dag nodes to form fewer, simpler DAG nodes.  It can be run
// both before and after the DAG is legalized.
//
// This pass is not a substitute for the LLVM IR instcombine pass. This pass is
// primarily intended to handle simplification opportunities that are implicit
// in the LLVM IR and exposed by the various codegen lowering phases.
//
//===----------------------------------------------------------------------===//

#include "llvm/ADT/APFloat.h"
#include "llvm/ADT/APInt.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/IntervalMap.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SetVector.h"
#include "llvm/ADT/SmallBitVector.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/SmallSet.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/Analysis/AliasAnalysis.h"
#include "llvm/Analysis/MemoryLocation.h"
#include "llvm/Analysis/TargetLibraryInfo.h"
#include "llvm/Analysis/ValueTracking.h"
#include "llvm/Analysis/VectorUtils.h"
#include "llvm/CodeGen/ByteProvider.h"
#include "llvm/CodeGen/DAGCombine.h"
#include "llvm/CodeGen/ISDOpcodes.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineMemOperand.h"
#include "llvm/CodeGen/RuntimeLibcallUtil.h"
#include "llvm/CodeGen/SDPatternMatch.h"
#include "llvm/CodeGen/SelectionDAG.h"
#include "llvm/CodeGen/SelectionDAGAddressAnalysis.h"
#include "llvm/CodeGen/SelectionDAGNodes.h"
#include "llvm/CodeGen/SelectionDAGTargetInfo.h"
#include "llvm/CodeGen/TargetLowering.h"
#include "llvm/CodeGen/TargetRegisterInfo.h"
#include "llvm/CodeGen/TargetSubtargetInfo.h"
#include "llvm/CodeGen/ValueTypes.h"
#include "llvm/CodeGenTypes/MachineValueType.h"
#include "llvm/IR/Attributes.h"
#include "llvm/IR/Constant.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/DerivedTypes.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/Metadata.h"
#include "llvm/Support/Casting.h"
#include "llvm/Support/CodeGen.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/DebugCounter.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/KnownBits.h"
#include "llvm/Support/MathExtras.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Target/TargetMachine.h"
#include "llvm/Target/TargetOptions.h"
#include <algorithm>
#include <cassert>
#include <cstdint>
#include <functional>
#include <iterator>
#include <optional>
#include <string>
#include <tuple>
#include <utility>
#include <variant>

#include "MatchContext.h"

usingnamespacellvm;
usingnamespacellvm::SDPatternMatch;

#define DEBUG_TYPE

STATISTIC(NodesCombined   , "Number of dag nodes combined");
STATISTIC(PreIndexedNodes , "Number of pre-indexed nodes created");
STATISTIC(PostIndexedNodes, "Number of post-indexed nodes created");
STATISTIC(OpsNarrowed     , "Number of load/op/store narrowed");
STATISTIC(LdStFP2Int      , "Number of fp load/store pairs transformed to int");
STATISTIC(SlicedLoads, "Number of load sliced");
STATISTIC(NumFPLogicOpsConv, "Number of logic ops converted to fp ops");

DEBUG_COUNTER(DAGCombineCounter, "dagcombine",
              "Controls whether a DAG combine is performed for a node");

static cl::opt<bool>
CombinerGlobalAA("combiner-global-alias-analysis", cl::Hidden,
                 cl::desc("Enable DAG combiner's use of IR alias analysis"));

static cl::opt<bool>
UseTBAA("combiner-use-tbaa", cl::Hidden, cl::init(true),
        cl::desc("Enable DAG combiner's use of TBAA"));

#ifndef NDEBUG
static cl::opt<std::string>
CombinerAAOnlyFunc("combiner-aa-only-func", cl::Hidden,
                   cl::desc("Only use DAG-combiner alias analysis in this"
                            " function"));
#endif

/// Hidden option to stress test load slicing, i.e., when this option
/// is enabled, load slicing bypasses most of its profitability guards.
static cl::opt<bool>
StressLoadSlicing("combiner-stress-load-slicing", cl::Hidden,
                  cl::desc("Bypass the profitability model of load slicing"),
                  cl::init(false));

static cl::opt<bool>
  MaySplitLoadIndex("combiner-split-load-index", cl::Hidden, cl::init(true),
                    cl::desc("DAG combiner may split indexing from loads"));

static cl::opt<bool>
    EnableStoreMerging("combiner-store-merging", cl::Hidden, cl::init(true),
                       cl::desc("DAG combiner enable merging multiple stores "
                                "into a wider store"));

static cl::opt<unsigned> TokenFactorInlineLimit(
    "combiner-tokenfactor-inline-limit", cl::Hidden, cl::init(2048),
    cl::desc("Limit the number of operands to inline for Token Factors"));

static cl::opt<unsigned> StoreMergeDependenceLimit(
    "combiner-store-merge-dependence-limit", cl::Hidden, cl::init(10),
    cl::desc("Limit the number of times for the same StoreNode and RootNode "
             "to bail out in store merging dependence check"));

static cl::opt<bool> EnableReduceLoadOpStoreWidth(
    "combiner-reduce-load-op-store-width", cl::Hidden, cl::init(true),
    cl::desc("DAG combiner enable reducing the width of load/op/store "
             "sequence"));

static cl::opt<bool> EnableShrinkLoadReplaceStoreWithStore(
    "combiner-shrink-load-replace-store-with-store", cl::Hidden, cl::init(true),
    cl::desc("DAG combiner enable load/<replace bytes>/store with "
             "a narrower store"));

static cl::opt<bool> EnableVectorFCopySignExtendRound(
    "combiner-vector-fcopysign-extend-round", cl::Hidden, cl::init(false),
    cl::desc(
        "Enable merging extends and rounds into FCOPYSIGN on vector types"));

namespace {

  class DAGCombiner {};

/// This class is a DAGUpdateListener that removes any deleted
/// nodes from the worklist.
class WorklistRemover : public SelectionDAG::DAGUpdateListener {};

class WorklistInserter : public SelectionDAG::DAGUpdateListener {};

} // end anonymous namespace

//===----------------------------------------------------------------------===//
//  TargetLowering::DAGCombinerInfo implementation
//===----------------------------------------------------------------------===//

void TargetLowering::DAGCombinerInfo::AddToWorklist(SDNode *N) {}

SDValue TargetLowering::DAGCombinerInfo::
CombineTo(SDNode *N, ArrayRef<SDValue> To, bool AddTo) {}

SDValue TargetLowering::DAGCombinerInfo::
CombineTo(SDNode *N, SDValue Res, bool AddTo) {}

SDValue TargetLowering::DAGCombinerInfo::
CombineTo(SDNode *N, SDValue Res0, SDValue Res1, bool AddTo) {}

bool TargetLowering::DAGCombinerInfo::
recursivelyDeleteUnusedNodes(SDNode *N) {}

void TargetLowering::DAGCombinerInfo::
CommitTargetLoweringOpt(const TargetLowering::TargetLoweringOpt &TLO) {}

//===----------------------------------------------------------------------===//
// Helper Functions
//===----------------------------------------------------------------------===//

void DAGCombiner::deleteAndRecombine(SDNode *N) {}

// APInts must be the same size for most operations, this helper
// function zero extends the shorter of the pair so that they match.
// We provide an Offset so that we can create bitwidths that won't overflow.
static void zeroExtendToMatch(APInt &LHS, APInt &RHS, unsigned Offset = 0) {}

// Return true if this node is a setcc, or is a select_cc
// that selects between the target values used for true and false, making it
// equivalent to a setcc. Also, set the incoming LHS, RHS, and CC references to
// the appropriate nodes based on the type of node we are checking. This
// simplifies life a bit for the callers.
bool DAGCombiner::isSetCCEquivalent(SDValue N, SDValue &LHS, SDValue &RHS,
                                    SDValue &CC, bool MatchStrict) const {}

/// Return true if this is a SetCC-equivalent operation with only one use.
/// If this is true, it allows the users to invert the operation for free when
/// it is profitable to do so.
bool DAGCombiner::isOneUseSetCC(SDValue N) const {}

static bool isConstantSplatVectorMaskForType(SDNode *N, EVT ScalarTy) {}

// Determines if it is a constant integer or a splat/build vector of constant
// integers (and undefs).
// Do not permit build vector implicit truncation.
static bool isConstantOrConstantVector(SDValue N, bool NoOpaques = false) {}

// Determines if a BUILD_VECTOR is composed of all-constants possibly mixed with
// undef's.
static bool isAnyConstantBuildVector(SDValue V, bool NoOpaques = false) {}

// Determine if this an indexed load with an opaque target constant index.
static bool canSplitIdx(LoadSDNode *LD) {}

bool DAGCombiner::reassociationCanBreakAddressingModePattern(unsigned Opc,
                                                             const SDLoc &DL,
                                                             SDNode *N,
                                                             SDValue N0,
                                                             SDValue N1) {}

/// Helper for DAGCombiner::reassociateOps. Try to reassociate (Opc N0, N1) if
/// \p N0 is the same kind of operation as \p Opc.
SDValue DAGCombiner::reassociateOpsCommutative(unsigned Opc, const SDLoc &DL,
                                               SDValue N0, SDValue N1,
                                               SDNodeFlags Flags) {}

/// Try to reassociate commutative (Opc N0, N1) if either \p N0 or \p N1 is the
/// same kind of operation as \p Opc.
SDValue DAGCombiner::reassociateOps(unsigned Opc, const SDLoc &DL, SDValue N0,
                                    SDValue N1, SDNodeFlags Flags) {}

// Try to fold Opc(vecreduce(x), vecreduce(y)) -> vecreduce(Opc(x, y))
// Note that we only expect Flags to be passed from FP operations. For integer
// operations they need to be dropped.
SDValue DAGCombiner::reassociateReduction(unsigned RedOpc, unsigned Opc,
                                          const SDLoc &DL, EVT VT, SDValue N0,
                                          SDValue N1, SDNodeFlags Flags) {}

SDValue DAGCombiner::CombineTo(SDNode *N, const SDValue *To, unsigned NumTo,
                               bool AddTo) {}

void DAGCombiner::
CommitTargetLoweringOpt(const TargetLowering::TargetLoweringOpt &TLO) {}

/// Check the specified integer node value to see if it can be simplified or if
/// things it uses can be simplified by bit propagation. If so, return true.
bool DAGCombiner::SimplifyDemandedBits(SDValue Op, const APInt &DemandedBits,
                                       const APInt &DemandedElts,
                                       bool AssumeSingleUse) {}

/// Check the specified vector node value to see if it can be simplified or
/// if things it uses can be simplified as it only uses some of the elements.
/// If so, return true.
bool DAGCombiner::SimplifyDemandedVectorElts(SDValue Op,
                                             const APInt &DemandedElts,
                                             bool AssumeSingleUse) {}

void DAGCombiner::ReplaceLoadWithPromotedLoad(SDNode *Load, SDNode *ExtLoad) {}

SDValue DAGCombiner::PromoteOperand(SDValue Op, EVT PVT, bool &Replace) {}

SDValue DAGCombiner::SExtPromoteOperand(SDValue Op, EVT PVT) {}

SDValue DAGCombiner::ZExtPromoteOperand(SDValue Op, EVT PVT) {}

/// Promote the specified integer binary operation if the target indicates it is
/// beneficial. e.g. On x86, it's usually better to promote i16 operations to
/// i32 since i16 instructions are longer.
SDValue DAGCombiner::PromoteIntBinOp(SDValue Op) {}

/// Promote the specified integer shift operation if the target indicates it is
/// beneficial. e.g. On x86, it's usually better to promote i16 operations to
/// i32 since i16 instructions are longer.
SDValue DAGCombiner::PromoteIntShiftOp(SDValue Op) {}

SDValue DAGCombiner::PromoteExtend(SDValue Op) {}

bool DAGCombiner::PromoteLoad(SDValue Op) {}

/// Recursively delete a node which has no uses and any operands for
/// which it is the only use.
///
/// Note that this both deletes the nodes and removes them from the worklist.
/// It also adds any nodes who have had a user deleted to the worklist as they
/// may now have only one use and subject to other combines.
bool DAGCombiner::recursivelyDeleteUnusedNodes(SDNode *N) {}

//===----------------------------------------------------------------------===//
//  Main DAG Combiner implementation
//===----------------------------------------------------------------------===//

void DAGCombiner::Run(CombineLevel AtLevel) {}

SDValue DAGCombiner::visit(SDNode *N) {}

SDValue DAGCombiner::combine(SDNode *N) {}

/// Given a node, return its input chain if it has one, otherwise return a null
/// sd operand.
static SDValue getInputChainForNode(SDNode *N) {}

SDValue DAGCombiner::visitFCANONICALIZE(SDNode *N) {}

SDValue DAGCombiner::visitTokenFactor(SDNode *N) {}

/// MERGE_VALUES can always be eliminated.
SDValue DAGCombiner::visitMERGE_VALUES(SDNode *N) {}

/// If \p N is a ConstantSDNode with isOpaque() == false return it casted to a
/// ConstantSDNode pointer else nullptr.
static ConstantSDNode *getAsNonOpaqueConstant(SDValue N) {}

// isTruncateOf - If N is a truncate of some other value, return true, record
// the value being truncated in Op and which of Op's bits are zero/one in Known.
// This function computes KnownBits to avoid a duplicated call to
// computeKnownBits in the caller.
static bool isTruncateOf(SelectionDAG &DAG, SDValue N, SDValue &Op,
                         KnownBits &Known) {}

/// Return true if 'Use' is a load or a store that uses N as its base pointer
/// and that N may be folded in the load / store addressing mode.
static bool canFoldInAddressingMode(SDNode *N, SDNode *Use, SelectionDAG &DAG,
                                    const TargetLowering &TLI) {}

/// This inverts a canonicalization in IR that replaces a variable select arm
/// with an identity constant. Codegen improves if we re-use the variable
/// operand rather than load a constant. This can also be converted into a
/// masked vector operation if the target supports it.
static SDValue foldSelectWithIdentityConstant(SDNode *N, SelectionDAG &DAG,
                                              bool ShouldCommuteOperands) {}

SDValue DAGCombiner::foldBinOpIntoSelect(SDNode *BO) {}

static SDValue foldAddSubBoolOfMaskedVal(SDNode *N, const SDLoc &DL,
                                         SelectionDAG &DAG) {}

// Attempt to form avgceil(A, B) from (A | B) - ((A ^ B) >> 1)
SDValue DAGCombiner::foldSubToAvg(SDNode *N, const SDLoc &DL) {}

/// Try to fold a 'not' shifted sign-bit with add/sub with constant operand into
/// a shift and add with a different constant.
static SDValue foldAddSubOfSignBit(SDNode *N, const SDLoc &DL,
                                   SelectionDAG &DAG) {}

static bool
areBitwiseNotOfEachother(SDValue Op0, SDValue Op1) {}

/// Try to fold a node that behaves like an ADD (note that N isn't necessarily
/// an ISD::ADD here, it could for example be an ISD::OR if we know that there
/// are no common bits set in the operands).
SDValue DAGCombiner::visitADDLike(SDNode *N) {}

// Attempt to form avgfloor(A, B) from (A & B) + ((A ^ B) >> 1)
SDValue DAGCombiner::foldAddToAvg(SDNode *N, const SDLoc &DL) {}

SDValue DAGCombiner::visitADD(SDNode *N) {}

SDValue DAGCombiner::visitADDSAT(SDNode *N) {}

static SDValue getAsCarry(const TargetLowering &TLI, SDValue V,
                          bool ForceCarryReconstruction = false) {}

/// Given the operands of an add/sub operation, see if the 2nd operand is a
/// masked 0/1 whose source operand is actually known to be 0/-1. If so, invert
/// the opcode and bypass the mask operation.
static SDValue foldAddSubMasked1(bool IsAdd, SDValue N0, SDValue N1,
                                 SelectionDAG &DAG, const SDLoc &DL) {}

/// Helper for doing combines based on N0 and N1 being added to each other.
SDValue DAGCombiner::visitADDLikeCommutative(SDValue N0, SDValue N1,
                                             SDNode *LocReference) {}

SDValue DAGCombiner::visitADDC(SDNode *N) {}

/**
 * Flips a boolean if it is cheaper to compute. If the Force parameters is set,
 * then the flip also occurs if computing the inverse is the same cost.
 * This function returns an empty SDValue in case it cannot flip the boolean
 * without increasing the cost of the computation. If you want to flip a boolean
 * no matter what, use DAG.getLogicalNOT.
 */
static SDValue extractBooleanFlip(SDValue V, SelectionDAG &DAG,
                                  const TargetLowering &TLI,
                                  bool Force) {}

SDValue DAGCombiner::visitADDO(SDNode *N) {}

SDValue DAGCombiner::visitUADDOLike(SDValue N0, SDValue N1, SDNode *N) {}

SDValue DAGCombiner::visitADDE(SDNode *N) {}

SDValue DAGCombiner::visitUADDO_CARRY(SDNode *N) {}

/**
 * If we are facing some sort of diamond carry propagation pattern try to
 * break it up to generate something like:
 *   (uaddo_carry X, 0, (uaddo_carry A, B, Z):Carry)
 *
 * The end result is usually an increase in operation required, but because the
 * carry is now linearized, other transforms can kick in and optimize the DAG.
 *
 * Patterns typically look something like
 *                (uaddo A, B)
 *                /          \
 *             Carry         Sum
 *               |             \
 *               | (uaddo_carry *, 0, Z)
 *               |       /
 *                \   Carry
 *                 |   /
 * (uaddo_carry X, *, *)
 *
 * But numerous variation exist. Our goal is to identify A, B, X and Z and
 * produce a combine with a single path for carry propagation.
 */
static SDValue combineUADDO_CARRYDiamond(DAGCombiner &Combiner,
                                         SelectionDAG &DAG, SDValue X,
                                         SDValue Carry0, SDValue Carry1,
                                         SDNode *N) {}

// If we are facing some sort of diamond carry/borrow in/out pattern try to
// match patterns like:
//
//          (uaddo A, B)            CarryIn
//            |  \                     |
//            |   \                    |
//    PartialSum   PartialCarryOutX   /
//            |        |             /
//            |    ____|____________/
//            |   /    |
//     (uaddo *, *)    \________
//       |  \                   \
//       |   \                   |
//       |    PartialCarryOutY   |
//       |        \              |
//       |         \            /
//   AddCarrySum    |    ______/
//                  |   /
//   CarryOut = (or *, *)
//
// And generate UADDO_CARRY (or USUBO_CARRY) with two result values:
//
//    {AddCarrySum, CarryOut} = (uaddo_carry A, B, CarryIn)
//
// Our goal is to identify A, B, and CarryIn and produce UADDO_CARRY/USUBO_CARRY
// with a single path for carry/borrow out propagation.
static SDValue combineCarryDiamond(SelectionDAG &DAG, const TargetLowering &TLI,
                                   SDValue N0, SDValue N1, SDNode *N) {}

SDValue DAGCombiner::visitUADDO_CARRYLike(SDValue N0, SDValue N1,
                                          SDValue CarryIn, SDNode *N) {}

SDValue DAGCombiner::visitSADDO_CARRYLike(SDValue N0, SDValue N1,
                                          SDValue CarryIn, SDNode *N) {}

SDValue DAGCombiner::visitSADDO_CARRY(SDNode *N) {}

// Attempt to create a USUBSAT(LHS, RHS) node with DstVT, performing a
// clamp/truncation if necessary.
static SDValue getTruncatedUSUBSAT(EVT DstVT, EVT SrcVT, SDValue LHS,
                                   SDValue RHS, SelectionDAG &DAG,
                                   const SDLoc &DL) {}

// Try to find umax(a,b) - b or a - umin(a,b) patterns that may be converted to
// usubsat(a,b), optionally as a truncated type.
SDValue DAGCombiner::foldSubToUSubSat(EVT DstVT, SDNode *N, const SDLoc &DL) {}

// Refinement of DAG/Type Legalisation (promotion) when CTLZ is used for
// counting leading ones. Broadly, it replaces the substraction with a left
// shift.
//
// * DAG Legalisation Pattern:
//
//     (sub (ctlz (zeroextend (not Src)))
//          BitWidthDiff)
//
//       if BitWidthDiff == BitWidth(Node) - BitWidth(Src)
//       -->
//
//     (ctlz_zero_undef (not (shl (anyextend Src)
//                                BitWidthDiff)))
//
// * Type Legalisation Pattern:
//
//     (sub (ctlz (and (xor Src XorMask)
//                     AndMask))
//          BitWidthDiff)
//
//       if AndMask has only trailing ones
//       and MaskBitWidth(AndMask) == BitWidth(Node) - BitWidthDiff
//       and XorMask has more trailing ones than AndMask
//       -->
//
//     (ctlz_zero_undef (not (shl Src BitWidthDiff)))
template <class MatchContextClass>
static SDValue foldSubCtlzNot(SDNode *N, SelectionDAG &DAG) {}

// Since it may not be valid to emit a fold to zero for vector initializers
// check if we can before folding.
static SDValue tryFoldToZero(const SDLoc &DL, const TargetLowering &TLI, EVT VT,
                             SelectionDAG &DAG, bool LegalOperations) {}

SDValue DAGCombiner::visitSUB(SDNode *N) {}

SDValue DAGCombiner::visitSUBSAT(SDNode *N) {}

SDValue DAGCombiner::visitSUBC(SDNode *N) {}

SDValue DAGCombiner::visitSUBO(SDNode *N) {}

SDValue DAGCombiner::visitSUBE(SDNode *N) {}

SDValue DAGCombiner::visitUSUBO_CARRY(SDNode *N) {}

SDValue DAGCombiner::visitSSUBO_CARRY(SDNode *N) {}

// Notice that "mulfix" can be any of SMULFIX, SMULFIXSAT, UMULFIX and
// UMULFIXSAT here.
SDValue DAGCombiner::visitMULFIX(SDNode *N) {}

template <class MatchContextClass> SDValue DAGCombiner::visitMUL(SDNode *N) {}

/// Return true if divmod libcall is available.
static bool isDivRemLibcallAvailable(SDNode *Node, bool isSigned,
                                     const TargetLowering &TLI) {}

/// Issue divrem if both quotient and remainder are needed.
SDValue DAGCombiner::useDivRem(SDNode *Node) {}

static SDValue simplifyDivRem(SDNode *N, SelectionDAG &DAG) {}

SDValue DAGCombiner::visitSDIV(SDNode *N) {}

static bool isDivisorPowerOfTwo(SDValue Divisor) {}

SDValue DAGCombiner::visitSDIVLike(SDValue N0, SDValue N1, SDNode *N) {}

SDValue DAGCombiner::visitUDIV(SDNode *N) {}

SDValue DAGCombiner::visitUDIVLike(SDValue N0, SDValue N1, SDNode *N) {}

SDValue DAGCombiner::buildOptimizedSREM(SDValue N0, SDValue N1, SDNode *N) {}

// handles ISD::SREM and ISD::UREM
SDValue DAGCombiner::visitREM(SDNode *N) {}

SDValue DAGCombiner::visitMULHS(SDNode *N) {}

SDValue DAGCombiner::visitMULHU(SDNode *N) {}

SDValue DAGCombiner::visitAVG(SDNode *N) {}

SDValue DAGCombiner::visitABD(SDNode *N) {}

/// Perform optimizations common to nodes that compute two values. LoOp and HiOp
/// give the opcodes for the two computations that are being performed. Return
/// true if a simplification was made.
SDValue DAGCombiner::SimplifyNodeWithTwoResults(SDNode *N, unsigned LoOp,
                                                unsigned HiOp) {}

SDValue DAGCombiner::visitSMUL_LOHI(SDNode *N) {}

SDValue DAGCombiner::visitUMUL_LOHI(SDNode *N) {}

SDValue DAGCombiner::visitMULO(SDNode *N) {}

// Function to calculate whether the Min/Max pair of SDNodes (potentially
// swapped around) make a signed saturate pattern, clamping to between a signed
// saturate of -2^(BW-1) and 2^(BW-1)-1, or an unsigned saturate of 0 and 2^BW.
// Returns the node being clamped and the bitwidth of the clamp in BW. Should
// work with both SMIN/SMAX nodes and setcc/select combo. The operands are the
// same as SimplifySelectCC. N0<N1 ? N2 : N3.
static SDValue isSaturatingMinMax(SDValue N0, SDValue N1, SDValue N2,
                                  SDValue N3, ISD::CondCode CC, unsigned &BW,
                                  bool &Unsigned, SelectionDAG &DAG) {}

static SDValue PerformMinMaxFpToSatCombine(SDValue N0, SDValue N1, SDValue N2,
                                           SDValue N3, ISD::CondCode CC,
                                           SelectionDAG &DAG) {}

static SDValue PerformUMinFpToSatCombine(SDValue N0, SDValue N1, SDValue N2,
                                         SDValue N3, ISD::CondCode CC,
                                         SelectionDAG &DAG) {}

SDValue DAGCombiner::visitIMINMAX(SDNode *N) {}

/// If this is a bitwise logic instruction and both operands have the same
/// opcode, try to sink the other opcode after the logic instruction.
SDValue DAGCombiner::hoistLogicOpWithSameOpcodeHands(SDNode *N) {}

/// Try to make (and/or setcc (LL, LR), setcc (RL, RR)) more efficient.
SDValue DAGCombiner::foldLogicOfSetCCs(bool IsAnd, SDValue N0, SDValue N1,
                                       const SDLoc &DL) {}

static bool arebothOperandsNotSNan(SDValue Operand1, SDValue Operand2,
                                   SelectionDAG &DAG) {}

static bool arebothOperandsNotNan(SDValue Operand1, SDValue Operand2,
                                  SelectionDAG &DAG) {}

// FIXME: use FMINIMUMNUM if possible, such as for RISC-V.
static unsigned getMinMaxOpcodeForFP(SDValue Operand1, SDValue Operand2,
                                     ISD::CondCode CC, unsigned OrAndOpcode,
                                     SelectionDAG &DAG,
                                     bool isFMAXNUMFMINNUM_IEEE,
                                     bool isFMAXNUMFMINNUM) {}

static SDValue foldAndOrOfSETCC(SDNode *LogicOp, SelectionDAG &DAG) {}

// Combine `(select c, (X & 1), 0)` -> `(and (zext c), X)`.
// We canonicalize to the `select` form in the middle end, but the `and` form
// gets better codegen and all tested targets (arm, x86, riscv)
static SDValue combineSelectAsExtAnd(SDValue Cond, SDValue T, SDValue F,
                                     const SDLoc &DL, SelectionDAG &DAG) {}

/// This contains all DAGCombine rules which reduce two values combined by
/// an And operation to a single value. This makes them reusable in the context
/// of visitSELECT(). Rules involving constants are not included as
/// visitSELECT() already handles those cases.
SDValue DAGCombiner::visitANDLike(SDValue N0, SDValue N1, SDNode *N) {}

bool DAGCombiner::isAndLoadExtLoad(ConstantSDNode *AndC, LoadSDNode *LoadN,
                                   EVT LoadResultTy, EVT &ExtVT) {}

bool DAGCombiner::isLegalNarrowLdSt(LSBaseSDNode *LDST,
                                    ISD::LoadExtType ExtType, EVT &MemVT,
                                    unsigned ShAmt) {}

bool DAGCombiner::SearchForAndLoads(SDNode *N,
                                    SmallVectorImpl<LoadSDNode*> &Loads,
                                    SmallPtrSetImpl<SDNode*> &NodesWithConsts,
                                    ConstantSDNode *Mask,
                                    SDNode *&NodeToMask) {}

bool DAGCombiner::BackwardsPropagateMask(SDNode *N) {}

// Unfold
//    x &  (-1 'logical shift' y)
// To
//    (x 'opposite logical shift' y) 'logical shift' y
// if it is better for performance.
SDValue DAGCombiner::unfoldExtremeBitClearingToShifts(SDNode *N) {}

/// Try to replace shift/logic that tests if a bit is clear with mask + setcc.
/// For a target with a bit test, this is expected to become test + set and save
/// at least 1 instruction.
static SDValue combineShiftAnd1ToBitTest(SDNode *And, SelectionDAG &DAG) {}

/// For targets that support usubsat, match a bit-hack form of that operation
/// that ends in 'and' and convert it.
static SDValue foldAndToUsubsat(SDNode *N, SelectionDAG &DAG, const SDLoc &DL) {}

/// Given a bitwise logic operation N with a matching bitwise logic operand,
/// fold a pattern where 2 of the source operands are identically shifted
/// values. For example:
/// ((X0 << Y) | Z) | (X1 << Y) --> ((X0 | X1) << Y) | Z
static SDValue foldLogicOfShifts(SDNode *N, SDValue LogicOp, SDValue ShiftOp,
                                 SelectionDAG &DAG) {}

/// Given a tree of logic operations with shape like
/// (LOGIC (LOGIC (X, Y), LOGIC (Z, Y)))
/// try to match and fold shift operations with the same shift amount.
/// For example:
/// LOGIC (LOGIC (SH X0, Y), Z), (LOGIC (SH X1, Y), W) -->
/// --> LOGIC (SH (LOGIC X0, X1), Y), (LOGIC Z, W)
static SDValue foldLogicTreeOfShifts(SDNode *N, SDValue LeftHand,
                                     SDValue RightHand, SelectionDAG &DAG) {}

SDValue DAGCombiner::visitAND(SDNode *N) {}

/// Match (a >> 8) | (a << 8) as (bswap a) >> 16.
SDValue DAGCombiner::MatchBSwapHWordLow(SDNode *N, SDValue N0, SDValue N1,
                                        bool DemandHighBits) {}

/// Return true if the specified node is an element that makes up a 32-bit
/// packed halfword byteswap.
/// ((x & 0x000000ff) << 8) |
/// ((x & 0x0000ff00) >> 8) |
/// ((x & 0x00ff0000) << 8) |
/// ((x & 0xff000000) >> 8)
static bool isBSwapHWordElement(SDValue N, MutableArrayRef<SDNode *> Parts) {}

// Match 2 elements of a packed halfword bswap.
static bool isBSwapHWordPair(SDValue N, MutableArrayRef<SDNode *> Parts) {}

// Match this pattern:
//   (or (and (shl (A, 8)), 0xff00ff00), (and (srl (A, 8)), 0x00ff00ff))
// And rewrite this to:
//   (rotr (bswap A), 16)
static SDValue matchBSwapHWordOrAndAnd(const TargetLowering &TLI,
                                       SelectionDAG &DAG, SDNode *N, SDValue N0,
                                       SDValue N1, EVT VT) {}

/// Match a 32-bit packed halfword bswap. That is
/// ((x & 0x000000ff) << 8) |
/// ((x & 0x0000ff00) >> 8) |
/// ((x & 0x00ff0000) << 8) |
/// ((x & 0xff000000) >> 8)
/// => (rotl (bswap x), 16)
SDValue DAGCombiner::MatchBSwapHWord(SDNode *N, SDValue N0, SDValue N1) {}

/// This contains all DAGCombine rules which reduce two values combined by
/// an Or operation to a single value \see visitANDLike().
SDValue DAGCombiner::visitORLike(SDValue N0, SDValue N1, const SDLoc &DL) {}

/// OR combines for which the commuted variant will be tried as well.
static SDValue visitORCommutative(SelectionDAG &DAG, SDValue N0, SDValue N1,
                                  SDNode *N) {}

SDValue DAGCombiner::visitOR(SDNode *N) {}

static SDValue stripConstantMask(const SelectionDAG &DAG, SDValue Op,
                                 SDValue &Mask) {}

/// Match "(X shl/srl V1) & V2" where V2 may not be present.
static bool matchRotateHalf(const SelectionDAG &DAG, SDValue Op, SDValue &Shift,
                            SDValue &Mask) {}

/// Helper function for visitOR to extract the needed side of a rotate idiom
/// from a shl/srl/mul/udiv.  This is meant to handle cases where
/// InstCombine merged some outside op with one of the shifts from
/// the rotate pattern.
/// \returns An empty \c SDValue if the needed shift couldn't be extracted.
/// Otherwise, returns an expansion of \p ExtractFrom based on the following
/// patterns:
///
///   (or (add v v) (shrl v bitwidth-1)):
///     expands (add v v) -> (shl v 1)
///
///   (or (mul v c0) (shrl (mul v c1) c2)):
///     expands (mul v c0) -> (shl (mul v c1) c3)
///
///   (or (udiv v c0) (shl (udiv v c1) c2)):
///     expands (udiv v c0) -> (shrl (udiv v c1) c3)
///
///   (or (shl v c0) (shrl (shl v c1) c2)):
///     expands (shl v c0) -> (shl (shl v c1) c3)
///
///   (or (shrl v c0) (shl (shrl v c1) c2)):
///     expands (shrl v c0) -> (shrl (shrl v c1) c3)
///
/// Such that in all cases, c3+c2==bitwidth(op v c1).
static SDValue extractShiftForRotate(SelectionDAG &DAG, SDValue OppShift,
                                     SDValue ExtractFrom, SDValue &Mask,
                                     const SDLoc &DL) {}

// Return true if we can prove that, whenever Neg and Pos are both in the
// range [0, EltSize), Neg == (Pos == 0 ? 0 : EltSize - Pos).  This means that
// for two opposing shifts shift1 and shift2 and a value X with OpBits bits:
//
//     (or (shift1 X, Neg), (shift2 X, Pos))
//
// reduces to a rotate in direction shift2 by Pos or (equivalently) a rotate
// in direction shift1 by Neg.  The range [0, EltSize) means that we only need
// to consider shift amounts with defined behavior.
//
// The IsRotate flag should be set when the LHS of both shifts is the same.
// Otherwise if matching a general funnel shift, it should be clear.
static bool matchRotateSub(SDValue Pos, SDValue Neg, unsigned EltSize,
                           SelectionDAG &DAG, bool IsRotate) {}

// A subroutine of MatchRotate used once we have found an OR of two opposite
// shifts of Shifted.  If Neg == <operand size> - Pos then the OR reduces
// to both (PosOpcode Shifted, Pos) and (NegOpcode Shifted, Neg), with the
// former being preferred if supported.  InnerPos and InnerNeg are Pos and
// Neg with outer conversions stripped away.
SDValue DAGCombiner::MatchRotatePosNeg(SDValue Shifted, SDValue Pos,
                                       SDValue Neg, SDValue InnerPos,
                                       SDValue InnerNeg, bool HasPos,
                                       unsigned PosOpcode, unsigned NegOpcode,
                                       const SDLoc &DL) {}

// A subroutine of MatchRotate used once we have found an OR of two opposite
// shifts of N0 + N1.  If Neg == <operand size> - Pos then the OR reduces
// to both (PosOpcode N0, N1, Pos) and (NegOpcode N0, N1, Neg), with the
// former being preferred if supported.  InnerPos and InnerNeg are Pos and
// Neg with outer conversions stripped away.
// TODO: Merge with MatchRotatePosNeg.
SDValue DAGCombiner::MatchFunnelPosNeg(SDValue N0, SDValue N1, SDValue Pos,
                                       SDValue Neg, SDValue InnerPos,
                                       SDValue InnerNeg, bool HasPos,
                                       unsigned PosOpcode, unsigned NegOpcode,
                                       const SDLoc &DL) {}

// MatchRotate - Handle an 'or' of two operands.  If this is one of the many
// idioms for rotate, and if the target supports rotation instructions, generate
// a rot[lr]. This also matches funnel shift patterns, similar to rotation but
// with different shifted sources.
SDValue DAGCombiner::MatchRotate(SDValue LHS, SDValue RHS, const SDLoc &DL) {}

/// Recursively traverses the expression calculating the origin of the requested
/// byte of the given value. Returns std::nullopt if the provider can't be
/// calculated.
///
/// For all the values except the root of the expression, we verify that the
/// value has exactly one use and if not then return std::nullopt. This way if
/// the origin of the byte is returned it's guaranteed that the values which
/// contribute to the byte are not used outside of this expression.

/// However, there is a special case when dealing with vector loads -- we allow
/// more than one use if the load is a vector type.  Since the values that
/// contribute to the byte ultimately come from the ExtractVectorElements of the
/// Load, we don't care if the Load has uses other than ExtractVectorElements,
/// because those operations are independent from the pattern to be combined.
/// For vector loads, we simply care that the ByteProviders are adjacent
/// positions of the same vector, and their index matches the byte that is being
/// provided. This is captured by the \p VectorIndex algorithm. \p VectorIndex
/// is the index used in an ExtractVectorElement, and \p StartingIndex is the
/// byte position we are trying to provide for the LoadCombine. If these do
/// not match, then we can not combine the vector loads. \p Index uses the
/// byte position we are trying to provide for and is matched against the
/// shl and load size. The \p Index algorithm ensures the requested byte is
/// provided for by the pattern, and the pattern does not over provide bytes.
///
///
/// The supported LoadCombine pattern for vector loads is as follows
///                              or
///                          /        \
///                         or        shl
///                       /     \      |
///                     or      shl   zext
///                   /    \     |     |
///                 shl   zext  zext  EVE*
///                  |     |     |     |
///                 zext  EVE*  EVE*  LOAD
///                  |     |     |
///                 EVE*  LOAD  LOAD
///                  |
///                 LOAD
///
/// *ExtractVectorElement
SDByteProvider;

static std::optional<SDByteProvider>
calculateByteProvider(SDValue Op, unsigned Index, unsigned Depth,
                      std::optional<uint64_t> VectorIndex,
                      unsigned StartingIndex = 0) {}

static unsigned littleEndianByteAt(unsigned BW, unsigned i) {}

static unsigned bigEndianByteAt(unsigned BW, unsigned i) {}

// Check if the bytes offsets we are looking at match with either big or
// little endian value loaded. Return true for big endian, false for little
// endian, and std::nullopt if match failed.
static std::optional<bool> isBigEndian(const ArrayRef<int64_t> ByteOffsets,
                                       int64_t FirstOffset) {}

// Look through one layer of truncate or extend.
static SDValue stripTruncAndExt(SDValue Value) {}

/// Match a pattern where a wide type scalar value is stored by several narrow
/// stores. Fold it into a single store or a BSWAP and a store if the targets
/// supports it.
///
/// Assuming little endian target:
///  i8 *p = ...
///  i32 val = ...
///  p[0] = (val >> 0) & 0xFF;
///  p[1] = (val >> 8) & 0xFF;
///  p[2] = (val >> 16) & 0xFF;
///  p[3] = (val >> 24) & 0xFF;
/// =>
///  *((i32)p) = val;
///
///  i8 *p = ...
///  i32 val = ...
///  p[0] = (val >> 24) & 0xFF;
///  p[1] = (val >> 16) & 0xFF;
///  p[2] = (val >> 8) & 0xFF;
///  p[3] = (val >> 0) & 0xFF;
/// =>
///  *((i32)p) = BSWAP(val);
SDValue DAGCombiner::mergeTruncStores(StoreSDNode *N) {}

/// Match a pattern where a wide type scalar value is loaded by several narrow
/// loads and combined by shifts and ors. Fold it into a single load or a load
/// and a BSWAP if the targets supports it.
///
/// Assuming little endian target:
///  i8 *a = ...
///  i32 val = a[0] | (a[1] << 8) | (a[2] << 16) | (a[3] << 24)
/// =>
///  i32 val = *((i32)a)
///
///  i8 *a = ...
///  i32 val = (a[0] << 24) | (a[1] << 16) | (a[2] << 8) | a[3]
/// =>
///  i32 val = BSWAP(*((i32)a))
///
/// TODO: This rule matches complex patterns with OR node roots and doesn't
/// interact well with the worklist mechanism. When a part of the pattern is
/// updated (e.g. one of the loads) its direct users are put into the worklist,
/// but the root node of the pattern which triggers the load combine is not
/// necessarily a direct user of the changed node. For example, once the address
/// of t28 load is reassociated load combine won't be triggered:
///             t25: i32 = add t4, Constant:i32<2>
///           t26: i64 = sign_extend t25
///        t27: i64 = add t2, t26
///       t28: i8,ch = load<LD1[%tmp9]> t0, t27, undef:i64
///     t29: i32 = zero_extend t28
///   t32: i32 = shl t29, Constant:i8<8>
/// t33: i32 = or t23, t32
/// As a possible fix visitLoad can check if the load can be a part of a load
/// combine pattern and add corresponding OR roots to the worklist.
SDValue DAGCombiner::MatchLoadCombine(SDNode *N) {}

// If the target has andn, bsl, or a similar bit-select instruction,
// we want to unfold masked merge, with canonical pattern of:
//   |        A  |  |B|
//   ((x ^ y) & m) ^ y
//    |  D  |
// Into:
//   (x & m) | (y & ~m)
// If y is a constant, m is not a 'not', and the 'andn' does not work with
// immediates, we unfold into a different pattern:
//   ~(~x & m) & (m | y)
// If x is a constant, m is a 'not', and the 'andn' does not work with
// immediates, we unfold into a different pattern:
//   (x | ~m) & ~(~m & ~y)
// NOTE: we don't unfold the pattern if 'xor' is actually a 'not', because at
//       the very least that breaks andnpd / andnps patterns, and because those
//       patterns are simplified in IR and shouldn't be created in the DAG
SDValue DAGCombiner::unfoldMaskedMerge(SDNode *N) {}

SDValue DAGCombiner::visitXOR(SDNode *N) {}

/// If we have a shift-by-constant of a bitwise logic op that itself has a
/// shift-by-constant operand with identical opcode, we may be able to convert
/// that into 2 independent shifts followed by the logic op. This is a
/// throughput improvement.
static SDValue combineShiftOfShiftedLogic(SDNode *Shift, SelectionDAG &DAG) {}

/// Handle transforms common to the three shifts, when the shift amount is a
/// constant.
/// We are looking for: (shift being one of shl/sra/srl)
///   shift (binop X, C0), C1
/// And want to transform into:
///   binop (shift X, C1), (shift C0, C1)
SDValue DAGCombiner::visitShiftByConstant(SDNode *N) {}

SDValue DAGCombiner::distributeTruncateThroughAnd(SDNode *N) {}

SDValue DAGCombiner::visitRotate(SDNode *N) {}

SDValue DAGCombiner::visitSHL(SDNode *N) {}

// Transform a right shift of a multiply into a multiply-high.
// Examples:
// (srl (mul (zext i32:$a to i64), (zext i32:$a to i64)), 32) -> (mulhu $a, $b)
// (sra (mul (sext i32:$a to i64), (sext i32:$a to i64)), 32) -> (mulhs $a, $b)
static SDValue combineShiftToMULH(SDNode *N, const SDLoc &DL, SelectionDAG &DAG,
                                  const TargetLowering &TLI) {}

// fold (bswap (logic_op(bswap(x),y))) -> logic_op(x,bswap(y))
// This helper function accept SDNode with opcode ISD::BSWAP and ISD::BITREVERSE
static SDValue foldBitOrderCrossLogicOp(SDNode *N, SelectionDAG &DAG) {}

SDValue DAGCombiner::visitSRA(SDNode *N) {}

SDValue DAGCombiner::visitSRL(SDNode *N) {}

SDValue DAGCombiner::visitFunnelShift(SDNode *N) {}

SDValue DAGCombiner::visitSHLSAT(SDNode *N) {}

// Given a ABS node, detect the following patterns:
// (ABS (SUB (EXTEND a), (EXTEND b))).
// (TRUNC (ABS (SUB (EXTEND a), (EXTEND b)))).
// Generates UABD/SABD instruction.
SDValue DAGCombiner::foldABSToABD(SDNode *N, const SDLoc &DL) {}

SDValue DAGCombiner::visitABS(SDNode *N) {}

SDValue DAGCombiner::visitBSWAP(SDNode *N) {}

SDValue DAGCombiner::visitBITREVERSE(SDNode *N) {}

SDValue DAGCombiner::visitCTLZ(SDNode *N) {}

SDValue DAGCombiner::visitCTLZ_ZERO_UNDEF(SDNode *N) {}

SDValue DAGCombiner::visitCTTZ(SDNode *N) {}

SDValue DAGCombiner::visitCTTZ_ZERO_UNDEF(SDNode *N) {}

SDValue DAGCombiner::visitCTPOP(SDNode *N) {}

static bool isLegalToCombineMinNumMaxNum(SelectionDAG &DAG, SDValue LHS,
                                         SDValue RHS, const SDNodeFlags Flags,
                                         const TargetLowering &TLI) {}

static SDValue combineMinNumMaxNumImpl(const SDLoc &DL, EVT VT, SDValue LHS,
                                       SDValue RHS, SDValue True, SDValue False,
                                       ISD::CondCode CC,
                                       const TargetLowering &TLI,
                                       SelectionDAG &DAG) {}

/// Generate Min/Max node
SDValue DAGCombiner::combineMinNumMaxNum(const SDLoc &DL, EVT VT, SDValue LHS,
                                         SDValue RHS, SDValue True,
                                         SDValue False, ISD::CondCode CC) {}

/// If a (v)select has a condition value that is a sign-bit test, try to smear
/// the condition operand sign-bit across the value width and use it as a mask.
static SDValue foldSelectOfConstantsUsingSra(SDNode *N, const SDLoc &DL,
                                             SelectionDAG &DAG) {}

static bool shouldConvertSelectOfConstantsToMath(const SDValue &Cond, EVT VT,
                                                 const TargetLowering &TLI) {}

SDValue DAGCombiner::foldSelectOfConstants(SDNode *N) {}

template <class MatchContextClass>
static SDValue foldBoolSelectToLogic(SDNode *N, const SDLoc &DL,
                                     SelectionDAG &DAG) {}

static SDValue foldVSelectToSignBitSplatMask(SDNode *N, SelectionDAG &DAG) {}

// Match SELECTs with absolute difference patterns.
// (select (setcc a, b, set?gt), (sub a, b), (sub b, a)) --> (abd? a, b)
// (select (setcc a, b, set?ge), (sub a, b), (sub b, a)) --> (abd? a, b)
// (select (setcc a, b, set?lt), (sub b, a), (sub a, b)) --> (abd? a, b)
// (select (setcc a, b, set?le), (sub b, a), (sub a, b)) --> (abd? a, b)
SDValue DAGCombiner::foldSelectToABD(SDValue LHS, SDValue RHS, SDValue True,
                                     SDValue False, ISD::CondCode CC,
                                     const SDLoc &DL) {}

SDValue DAGCombiner::visitSELECT(SDNode *N) {}

// This function assumes all the vselect's arguments are CONCAT_VECTOR
// nodes and that the condition is a BV of ConstantSDNodes (or undefs).
static SDValue ConvertSelectToConcatVector(SDNode *N, SelectionDAG &DAG) {}

bool refineUniformBase(SDValue &BasePtr, SDValue &Index, bool IndexIsScaled,
                       SelectionDAG &DAG, const SDLoc &DL) {}

// Fold sext/zext of index into index type.
bool refineIndexType(SDValue &Index, ISD::MemIndexType &IndexType, EVT DataVT,
                     SelectionDAG &DAG) {}

SDValue DAGCombiner::visitVPSCATTER(SDNode *N) {}

SDValue DAGCombiner::visitMSCATTER(SDNode *N) {}

SDValue DAGCombiner::visitMSTORE(SDNode *N) {}

SDValue DAGCombiner::visitVP_STRIDED_STORE(SDNode *N) {}

SDValue DAGCombiner::visitVECTOR_COMPRESS(SDNode *N) {}

SDValue DAGCombiner::visitVPGATHER(SDNode *N) {}

SDValue DAGCombiner::visitMGATHER(SDNode *N) {}

SDValue DAGCombiner::visitMLOAD(SDNode *N) {}

SDValue DAGCombiner::visitVP_STRIDED_LOAD(SDNode *N) {}

/// A vector select of 2 constant vectors can be simplified to math/logic to
/// avoid a variable select instruction and possibly avoid constant loads.
SDValue DAGCombiner::foldVSelectOfConstants(SDNode *N) {}

SDValue DAGCombiner::visitVP_SELECT(SDNode *N) {}

SDValue DAGCombiner::visitVSELECT(SDNode *N) {}

SDValue DAGCombiner::visitSELECT_CC(SDNode *N) {}

SDValue DAGCombiner::visitSETCC(SDNode *N) {}

SDValue DAGCombiner::visitSETCCCARRY(SDNode *N) {}

/// Check if N satisfies:
///   N is used once.
///   N is a Load.
///   The load is compatible with ExtOpcode. It means
///     If load has explicit zero/sign extension, ExpOpcode must have the same
///     extension.
///     Otherwise returns true.
static bool isCompatibleLoad(SDValue N, unsigned ExtOpcode) {}

/// Fold
///   (sext (select c, load x, load y)) -> (select c, sextload x, sextload y)
///   (zext (select c, load x, load y)) -> (select c, zextload x, zextload y)
///   (aext (select c, load x, load y)) -> (select c, extload x, extload y)
/// This function is called by the DAGCombiner when visiting sext/zext/aext
/// dag nodes (see for example method DAGCombiner::visitSIGN_EXTEND).
static SDValue tryToFoldExtendSelectLoad(SDNode *N, const TargetLowering &TLI,
                                         SelectionDAG &DAG, const SDLoc &DL,
                                         CombineLevel Level) {}

/// Try to fold a sext/zext/aext dag node into a ConstantSDNode or
/// a build_vector of constants.
/// This function is called by the DAGCombiner when visiting sext/zext/aext
/// dag nodes (see for example method DAGCombiner::visitSIGN_EXTEND).
/// Vector extends are not folded if operations are legal; this is to
/// avoid introducing illegal build_vector dag nodes.
static SDValue tryToFoldExtendOfConstant(SDNode *N, const SDLoc &DL,
                                         const TargetLowering &TLI,
                                         SelectionDAG &DAG, bool LegalTypes) {}

// ExtendUsesToFormExtLoad - Trying to extend uses of a load to enable this:
// "fold ({s|z|a}ext (load x)) -> ({s|z|a}ext (truncate ({s|z|a}extload x)))"
// transformation. Returns true if extension are possible and the above
// mentioned transformation is profitable.
static bool ExtendUsesToFormExtLoad(EVT VT, SDNode *N, SDValue N0,
                                    unsigned ExtOpc,
                                    SmallVectorImpl<SDNode *> &ExtendNodes,
                                    const TargetLowering &TLI) {}

void DAGCombiner::ExtendSetCCUses(const SmallVectorImpl<SDNode *> &SetCCs,
                                  SDValue OrigLoad, SDValue ExtLoad,
                                  ISD::NodeType ExtType) {}

// FIXME: Bring more similar combines here, common to sext/zext (maybe aext?).
SDValue DAGCombiner::CombineExtLoad(SDNode *N) {}

// fold (zext (and/or/xor (shl/shr (load x), cst), cst)) ->
//      (and/or/xor (shl/shr (zextload x), (zext cst)), (zext cst))
SDValue DAGCombiner::CombineZExtLogicopShiftLoad(SDNode *N) {}

/// If we're narrowing or widening the result of a vector select and the final
/// size is the same size as a setcc (compare) feeding the select, then try to
/// apply the cast operation to the select's operands because matching vector
/// sizes for a select condition and other operands should be more efficient.
SDValue DAGCombiner::matchVSelectOpSizesWithSetCC(SDNode *Cast) {}

// fold ([s|z]ext ([s|z]extload x)) -> ([s|z]ext (truncate ([s|z]extload x)))
// fold ([s|z]ext (     extload x)) -> ([s|z]ext (truncate ([s|z]extload x)))
static SDValue tryToFoldExtOfExtload(SelectionDAG &DAG, DAGCombiner &Combiner,
                                     const TargetLowering &TLI, EVT VT,
                                     bool LegalOperations, SDNode *N,
                                     SDValue N0, ISD::LoadExtType ExtLoadType) {}

// fold ([s|z]ext (load x)) -> ([s|z]ext (truncate ([s|z]extload x)))
// Only generate vector extloads when 1) they're legal, and 2) they are
// deemed desirable by the target. NonNegZExt can be set to true if a zero
// extend has the nonneg flag to allow use of sextload if profitable.
static SDValue tryToFoldExtOfLoad(SelectionDAG &DAG, DAGCombiner &Combiner,
                                  const TargetLowering &TLI, EVT VT,
                                  bool LegalOperations, SDNode *N, SDValue N0,
                                  ISD::LoadExtType ExtLoadType,
                                  ISD::NodeType ExtOpc,
                                  bool NonNegZExt = false) {}

static SDValue
tryToFoldExtOfMaskedLoad(SelectionDAG &DAG, const TargetLowering &TLI, EVT VT,
                         bool LegalOperations, SDNode *N, SDValue N0,
                         ISD::LoadExtType ExtLoadType, ISD::NodeType ExtOpc) {}

// fold ([s|z]ext (atomic_load)) -> ([s|z]ext (truncate ([s|z]ext atomic_load)))
static SDValue tryToFoldExtOfAtomicLoad(SelectionDAG &DAG,
                                        const TargetLowering &TLI, EVT VT,
                                        SDValue N0,
                                        ISD::LoadExtType ExtLoadType) {}

static SDValue foldExtendedSignBitTest(SDNode *N, SelectionDAG &DAG,
                                       bool LegalOperations) {}

SDValue DAGCombiner::foldSextSetcc(SDNode *N) {}

SDValue DAGCombiner::visitSIGN_EXTEND(SDNode *N) {}

/// Given an extending node with a pop-count operand, if the target does not
/// support a pop-count in the narrow source type but does support it in the
/// destination type, widen the pop-count to the destination type.
static SDValue widenCtPop(SDNode *Extend, SelectionDAG &DAG, const SDLoc &DL) {}

// If we have (zext (abs X)) where X is a type that will be promoted by type
// legalization, convert to (abs (sext X)). But don't extend past a legal type.
static SDValue widenAbs(SDNode *Extend, SelectionDAG &DAG) {}

SDValue DAGCombiner::visitZERO_EXTEND(SDNode *N) {}

SDValue DAGCombiner::visitANY_EXTEND(SDNode *N) {}

SDValue DAGCombiner::visitAssertExt(SDNode *N) {}

SDValue DAGCombiner::visitAssertAlign(SDNode *N) {}

/// If the result of a load is shifted/masked/truncated to an effectively
/// narrower type, try to transform the load to a narrower type and/or
/// use an extending load.
SDValue DAGCombiner::reduceLoadWidth(SDNode *N) {}SDValue DAGCombiner::visitSIGN_EXTEND_INREG(SDNode *N) {}static SDValue foldExtendVectorInregToExtendOfSubvector(
    SDNode *N, const SDLoc &DL, const TargetLowering &TLI, SelectionDAG &DAG,
    bool LegalOperations) {}SDValue DAGCombiner::visitEXTEND_VECTOR_INREG(SDNode *N) {}SDValue DAGCombiner::visitTRUNCATE_USAT_U(SDNode *N) {}static SDValue detectUSatUPattern(SDValue In, EVT VT) {}static SDValue detectSSatSPattern(SDValue In, EVT VT) {}static SDValue detectSSatUPattern(SDValue In, EVT VT, SelectionDAG &DAG,
                                  const SDLoc &DL) {}static SDValue foldToSaturated(SDNode *N, EVT &VT, SDValue &Src, EVT &SrcVT,
                               SDLoc &DL, const TargetLowering &TLI,
                               SelectionDAG &DAG) {}SDValue DAGCombiner::visitTRUNCATE(SDNode *N) {}static SDNode *getBuildPairElt(SDNode *N, unsigned i) {}SDValue DAGCombiner::CombineConsecutiveLoads(SDNode *N, EVT VT) {}static unsigned getPPCf128HiElementSelector(const SelectionDAG &DAG) {}SDValue DAGCombiner::foldBitcastedFPLogic(SDNode *N, SelectionDAG &DAG,
                                          const TargetLowering &TLI) {}SDValue DAGCombiner::visitBITCAST(SDNode *N) {}SDValue DAGCombiner::visitBUILD_PAIR(SDNode *N) {}SDValue DAGCombiner::visitFREEZE(SDNode *N) {}SDValue DAGCombiner::
ConstantFoldBITCASTofBUILD_VECTOR(SDNode *BV, EVT DstEltVT) {}static bool isContractableFMUL(const TargetOptions &Options, SDValue N) {}static bool hasNoInfs(const TargetOptions &Options, SDValue N) {}template <class MatchContextClass>
SDValue DAGCombiner::visitFADDForFMACombine(SDNode *N) {}template <class MatchContextClass>
SDValue DAGCombiner::visitFSUBForFMACombine(SDNode *N) {}SDValue DAGCombiner::visitFMULForFMADistributiveCombine(SDNode *N) {}SDValue DAGCombiner::visitVP_FADD(SDNode *N) {}SDValue DAGCombiner::visitFADD(SDNode *N) {}SDValue DAGCombiner::visitSTRICT_FADD(SDNode *N) {}SDValue DAGCombiner::visitFSUB(SDNode *N) {}SDValue DAGCombiner::combineFMulOrFDivWithIntPow2(SDNode *N) {}SDValue DAGCombiner::visitFMUL(SDNode *N) {}template <class MatchContextClass> SDValue DAGCombiner::visitFMA(SDNode *N) {}SDValue DAGCombiner::visitFMAD(SDNode *N) {}SDValue DAGCombiner::combineRepeatedFPDivisors(SDNode *N) {}SDValue DAGCombiner::visitFDIV(SDNode *N) {}SDValue DAGCombiner::visitFREM(SDNode *N) {}SDValue DAGCombiner::visitFSQRT(SDNode *N) {}static inline bool CanCombineFCOPYSIGN_EXTEND_ROUND(EVT XTy, EVT YTy) {}static inline bool CanCombineFCOPYSIGN_EXTEND_ROUND(SDNode *N) {}SDValue DAGCombiner::visitFCOPYSIGN(SDNode *N) {}SDValue DAGCombiner::visitFPOW(SDNode *N) {}static SDValue foldFPToIntToFP(SDNode *N, SelectionDAG &DAG,
                               const TargetLowering &TLI) {}SDValue DAGCombiner::visitSINT_TO_FP(SDNode *N) {}SDValue DAGCombiner::visitUINT_TO_FP(SDNode *N) {}static SDValue FoldIntToFPToInt(SDNode *N, SelectionDAG &DAG) {}SDValue DAGCombiner::visitFP_TO_SINT(SDNode *N) {}SDValue DAGCombiner::visitFP_TO_UINT(SDNode *N) {}SDValue DAGCombiner::visitXROUND(SDNode *N) {}SDValue DAGCombiner::visitFP_ROUND(SDNode *N) {}SDValue DAGCombiner::visitFP_EXTEND(SDNode *N) {}SDValue DAGCombiner::visitFCEIL(SDNode *N) {}SDValue DAGCombiner::visitFTRUNC(SDNode *N) {}SDValue DAGCombiner::visitFFREXP(SDNode *N) {}SDValue DAGCombiner::visitFFLOOR(SDNode *N) {}SDValue DAGCombiner::visitFNEG(SDNode *N) {}SDValue DAGCombiner::visitFMinMax(SDNode *N) {}SDValue DAGCombiner::visitFABS(SDNode *N) {}SDValue DAGCombiner::visitBRCOND(SDNode *N) {}SDValue DAGCombiner::rebuildSetCC(SDValue N) {}SDValue DAGCombiner::visitBR_CC(SDNode *N) {}static bool getCombineLoadStoreParts(SDNode *N, unsigned Inc, unsigned Dec,
                                     bool &IsLoad, bool &IsMasked, SDValue &Ptr,
                                     const TargetLowering &TLI) {}bool DAGCombiner::CombineToPreIndexedLoadStore(SDNode *N) {}static bool shouldCombineToPostInc(SDNode *N, SDValue Ptr, SDNode *PtrUse,
                                   SDValue &BasePtr, SDValue &Offset,
                                   ISD::MemIndexedMode &AM,
                                   SelectionDAG &DAG,
                                   const TargetLowering &TLI) {}static SDNode *getPostIndexedLoadStoreOp(SDNode *N, bool &IsLoad,
                                         bool &IsMasked, SDValue &Ptr,
                                         SDValue &BasePtr, SDValue &Offset,
                                         ISD::MemIndexedMode &AM,
                                         SelectionDAG &DAG,
                                         const TargetLowering &TLI) {}bool DAGCombiner::CombineToPostIndexedLoadStore(SDNode *N) {}SDValue DAGCombiner::SplitIndexingFromLoad(LoadSDNode *LD) {}static inline ElementCount numVectorEltsOrZero(EVT T) {}bool DAGCombiner::getTruncatedStoreValue(StoreSDNode *ST, SDValue &Val) {}bool DAGCombiner::extendLoadedValueToExtension(LoadSDNode *LD, SDValue &Val) {}StoreSDNode *DAGCombiner::getUniqueStoreFeeding(LoadSDNode *LD,
                                                int64_t &Offset) {}SDValue DAGCombiner::ForwardStoreValueToDirectLoad(LoadSDNode *LD) {}SDValue DAGCombiner::visitLOAD(SDNode *N) {}struct LoadedSlice {}static bool areUsedBitsDense(const APInt &UsedBits) {}static bool areSlicesNextToEachOther(const LoadedSlice &First,
                                     const LoadedSlice &Second) {}static void adjustCostForPairing(SmallVectorImpl<LoadedSlice> &LoadedSlices,
                                 LoadedSlice::Cost &GlobalLSCost) {}static bool isSlicingProfitable(SmallVectorImpl<LoadedSlice> &LoadedSlices,
                                const APInt &UsedBits, bool ForCodeSize) {}bool DAGCombiner::SliceUpLoad(SDNode *N) {}static std::pair<unsigned, unsigned>
CheckForMaskedLoad(SDValue V, SDValue Ptr, SDValue Chain) {}static SDValue
ShrinkLoadReplaceStoreWithStore(const std::pair<unsigned, unsigned> &MaskInfo,
                                SDValue IVal, StoreSDNode *St,
                                DAGCombiner *DC) {}SDValue DAGCombiner::ReduceLoadOpStoreWidth(SDNode *N) {}SDValue DAGCombiner::TransformFPLoadStorePair(SDNode *N) {}bool DAGCombiner::isMulAddWithConstProfitable(SDNode *MulNode, SDValue AddNode,
                                              SDValue ConstNode) {}SDValue DAGCombiner::getMergeStoreChains(SmallVectorImpl<MemOpLink> &StoreNodes,
                                         unsigned NumStores) {}bool DAGCombiner::hasSameUnderlyingObj(ArrayRef<MemOpLink> StoreNodes) {}bool DAGCombiner::mergeStoresOfConstantsOrVecElts(
    SmallVectorImpl<MemOpLink> &StoreNodes, EVT MemVT, unsigned NumStores,
    bool IsConstantSrc, bool UseVector, bool UseTrunc) {}SDNode *
DAGCombiner::getStoreMergeCandidates(StoreSDNode *St,
                                     SmallVectorImpl<MemOpLink> &StoreNodes) {}bool DAGCombiner::checkMergeStoreCandidatesForDependencies(
    SmallVectorImpl<MemOpLink> &StoreNodes, unsigned NumStores,
    SDNode *RootNode) {}unsigned
DAGCombiner::getConsecutiveStores(SmallVectorImpl<MemOpLink> &StoreNodes,
                                  int64_t ElementSizeBytes) const {}bool DAGCombiner::tryStoreMergeOfConstants(
    SmallVectorImpl<MemOpLink> &StoreNodes, unsigned NumConsecutiveStores,
    EVT MemVT, SDNode *RootNode, bool AllowVectors) {}bool DAGCombiner::tryStoreMergeOfExtracts(
    SmallVectorImpl<MemOpLink> &StoreNodes, unsigned NumConsecutiveStores,
    EVT MemVT, SDNode *RootNode) {}bool DAGCombiner::tryStoreMergeOfLoads(SmallVectorImpl<MemOpLink> &StoreNodes,
                                       unsigned NumConsecutiveStores, EVT MemVT,
                                       SDNode *RootNode, bool AllowVectors,
                                       bool IsNonTemporalStore,
                                       bool IsNonTemporalLoad) {}bool DAGCombiner::mergeConsecutiveStores(StoreSDNode *St) {}SDValue DAGCombiner::replaceStoreChain(StoreSDNode *ST, SDValue BetterChain) {}SDValue DAGCombiner::replaceStoreOfFPConstant(StoreSDNode *ST) {}SDValue DAGCombiner::replaceStoreOfInsertLoad(StoreSDNode *ST) {}SDValue DAGCombiner::visitATOMIC_STORE(SDNode *N) {}SDValue DAGCombiner::visitSTORE(SDNode *N) {}SDValue DAGCombiner::visitLIFETIME_END(SDNode *N) {}SDValue DAGCombiner::splitMergedValStore(StoreSDNode *ST) {}static bool mergeEltWithShuffle(SDValue &X, SDValue &Y, ArrayRef<int> Mask,
                                SmallVectorImpl<int> &NewMask, SDValue Elt,
                                unsigned InsIndex) {}SDValue DAGCombiner::mergeInsertEltWithShuffle(SDNode *N, unsigned InsIndex) {}SDValue DAGCombiner::combineInsertEltToShuffle(SDNode *N, unsigned InsIndex) {}SDValue DAGCombiner::combineInsertEltToLoad(SDNode *N, unsigned InsIndex) {}SDValue DAGCombiner::visitINSERT_VECTOR_ELT(SDNode *N) {}SDValue DAGCombiner::scalarizeExtractedVectorLoad(SDNode *EVE, EVT InVecVT,
                                                  SDValue EltNo,
                                                  LoadSDNode *OriginalLoad) {}static SDValue scalarizeExtractedBinop(SDNode *ExtElt, SelectionDAG &DAG,
                                       const SDLoc &DL, bool LegalOperations) {}bool DAGCombiner::refineExtractVectorEltIntoMultipleNarrowExtractVectorElts(
    SDNode *N) {}SDValue DAGCombiner::visitEXTRACT_VECTOR_ELT(SDNode *N) {}SDValue DAGCombiner::reduceBuildVecExtToExtBuildVec(SDNode *N) {}SDValue DAGCombiner::reduceBuildVecTruncToBitCast(SDNode *N) {}SDValue DAGCombiner::createBuildVecShuffle(const SDLoc &DL, SDNode *N,
                                           ArrayRef<int> VectorMask,
                                           SDValue VecIn1, SDValue VecIn2,
                                           unsigned LeftIdx, bool DidSplitVec) {}static SDValue reduceBuildVecToShuffleWithZero(SDNode *BV, SelectionDAG &DAG) {}template <typename R, typename T>
static auto getFirstIndexOf(R &&Range, const T &Val) {}SDValue DAGCombiner::reduceBuildVecToShuffle(SDNode *N) {}SDValue DAGCombiner::convertBuildVecZextToZext(SDNode *N) {}SDValue DAGCombiner::convertBuildVecZextToBuildVecWithZeros(SDNode *N) {}SDValue DAGCombiner::visitBUILD_VECTOR(SDNode *N) {}static SDValue combineConcatVectorOfScalars(SDNode *N, SelectionDAG &DAG) {}static SDValue combineConcatVectorOfConcatVectors(SDNode *N,
                                                  SelectionDAG &DAG) {}static SDValue combineConcatVectorOfExtracts(SDNode *N, SelectionDAG &DAG) {}static SDValue combineConcatVectorOfCasts(SDNode *N, SelectionDAG &DAG) {}static SDValue combineConcatVectorOfShuffleAndItsOperands(
    SDNode *N, SelectionDAG &DAG, const TargetLowering &TLI, bool LegalTypes,
    bool LegalOperations) {}SDValue DAGCombiner::visitCONCAT_VECTORS(SDNode *N) {}static SDValue getSubVectorSrc(SDValue V, SDValue Index, EVT SubVT) {}static SDValue narrowInsertExtractVectorBinOp(SDNode *Extract,
                                              SelectionDAG &DAG,
                                              bool LegalOperations) {}static SDValue narrowExtractedVectorBinOp(SDNode *Extract, SelectionDAG &DAG,
                                          bool LegalOperations) {}static SDValue narrowExtractedVectorLoad(SDNode *Extract, SelectionDAG &DAG) {}static SDValue foldExtractSubvectorFromShuffleVector(SDNode *N,
                                                     SelectionDAG &DAG,
                                                     const TargetLowering &TLI,
                                                     bool LegalOperations) {}SDValue DAGCombiner::visitEXTRACT_SUBVECTOR(SDNode *N) {}static SDValue foldShuffleOfConcatUndefs(ShuffleVectorSDNode *Shuf,
                                         SelectionDAG &DAG) {}static SDValue partitionShuffleOfConcats(SDNode *N, SelectionDAG &DAG) {}static SDValue combineShuffleOfScalars(ShuffleVectorSDNode *SVN,
                                       SelectionDAG &DAG,
                                       const TargetLowering &TLI) {}static std::optional<EVT> canCombineShuffleToExtendVectorInreg(
    unsigned Opcode, EVT VT, std::function<bool(unsigned)> Match,
    SelectionDAG &DAG, const TargetLowering &TLI, bool LegalTypes,
    bool LegalOperations) {}static SDValue combineShuffleToAnyExtendVectorInreg(ShuffleVectorSDNode *SVN,
                                                    SelectionDAG &DAG,
                                                    const TargetLowering &TLI,
                                                    bool LegalOperations) {}static SDValue combineShuffleToZeroExtendVectorInReg(ShuffleVectorSDNode *SVN,
                                                     SelectionDAG &DAG,
                                                     const TargetLowering &TLI,
                                                     bool LegalOperations) {}static SDValue combineTruncationShuffle(ShuffleVectorSDNode *SVN,
                                        SelectionDAG &DAG) {}static SDValue combineShuffleOfSplatVal(ShuffleVectorSDNode *Shuf,
                                        SelectionDAG &DAG) {}static SDValue combineShuffleOfBitcast(ShuffleVectorSDNode *SVN,
                                       SelectionDAG &DAG,
                                       const TargetLowering &TLI,
                                       bool LegalOperations) {}static SDValue formSplatFromShuffles(ShuffleVectorSDNode *OuterShuf,
                                     SelectionDAG &DAG) {}static int getShuffleMaskIndexOfOneElementFromOp0IntoOp1(ArrayRef<int> Mask) {}static SDValue replaceShuffleOfInsert(ShuffleVectorSDNode *Shuf,
                                      SelectionDAG &DAG) {}static SDValue simplifyShuffleOfShuffle(ShuffleVectorSDNode *Shuf) {}SDValue DAGCombiner::visitVECTOR_SHUFFLE(SDNode *N) {}SDValue DAGCombiner::visitSCALAR_TO_VECTOR(SDNode *N) {}SDValue DAGCombiner::visitINSERT_SUBVECTOR(SDNode *N) {}SDValue DAGCombiner::visitFP_TO_FP16(SDNode *N) {}SDValue DAGCombiner::visitFP16_TO_FP(SDNode *N) {}SDValue DAGCombiner::visitFP_TO_BF16(SDNode *N) {}SDValue DAGCombiner::visitBF16_TO_FP(SDNode *N) {}SDValue DAGCombiner::visitVECREDUCE(SDNode *N) {}SDValue DAGCombiner::visitVP_FSUB(SDNode *N) {}SDValue DAGCombiner::visitVPOp(SDNode *N) {}SDValue DAGCombiner::visitGET_FPENV_MEM(SDNode *N) {}SDValue DAGCombiner::visitSET_FPENV_MEM(SDNode *N) {}SDValue DAGCombiner::XformToShuffleWithZero(SDNode *N) {}static SDValue scalarizeBinOpOfSplats(SDNode *N, SelectionDAG &DAG,
                                      const SDLoc &DL, bool LegalTypes) {}SDValue DAGCombiner::SimplifyVCastOp(SDNode *N, const SDLoc &DL) {}SDValue DAGCombiner::SimplifyVBinOp(SDNode *N, const SDLoc &DL) {}SDValue DAGCombiner::SimplifySelect(const SDLoc &DL, SDValue N0, SDValue N1,
                                    SDValue N2) {}bool DAGCombiner::SimplifySelectOps(SDNode *TheSelect, SDValue LHS,
                                    SDValue RHS) {}SDValue DAGCombiner::foldSelectCCToShiftAnd(const SDLoc &DL, SDValue N0,
                                            SDValue N1, SDValue N2, SDValue N3,
                                            ISD::CondCode CC) {}SDValue DAGCombiner::foldSelectOfBinops(SDNode *N) {}SDValue DAGCombiner::foldSignChangeInBitcast(SDNode *N) {}SDValue DAGCombiner::convertSelectOfFPConstantsToLoadOffset(
    const SDLoc &DL, SDValue N0, SDValue N1, SDValue N2, SDValue N3,
    ISD::CondCode CC) {}SDValue DAGCombiner::SimplifySelectCC(const SDLoc &DL, SDValue N0, SDValue N1,
                                      SDValue N2, SDValue N3, ISD::CondCode CC,
                                      bool NotExtCompare) {}SDValue DAGCombiner::SimplifySetCC(EVT VT, SDValue N0, SDValue N1,
                                   ISD::CondCode Cond, const SDLoc &DL,
                                   bool foldBooleans) {}SDValue DAGCombiner::BuildSDIV(SDNode *N) {}SDValue DAGCombiner::BuildSDIVPow2(SDNode *N) {}SDValue DAGCombiner::BuildUDIV(SDNode *N) {}SDValue DAGCombiner::BuildSREMPow2(SDNode *N) {}static SDValue takeInexpensiveLog2(SelectionDAG &DAG, const SDLoc &DL, EVT VT,
                                   SDValue Op, unsigned Depth,
                                   bool AssumeNonZero) {}SDValue DAGCombiner::BuildLogBase2(SDValue V, const SDLoc &DL,
                                   bool KnownNonZero, bool InexpensiveOnly,
                                   std::optional<EVT> OutVT) {}SDValue DAGCombiner::BuildDivEstimate(SDValue N, SDValue Op,
                                      SDNodeFlags Flags) {}SDValue DAGCombiner::buildSqrtNROneConst(SDValue Arg, SDValue Est,
                                         unsigned Iterations,
                                         SDNodeFlags Flags, bool Reciprocal) {}SDValue DAGCombiner::buildSqrtNRTwoConst(SDValue Arg, SDValue Est,
                                         unsigned Iterations,
                                         SDNodeFlags Flags, bool Reciprocal) {}SDValue DAGCombiner::buildSqrtEstimateImpl(SDValue Op, SDNodeFlags Flags,
                                           bool Reciprocal) {}SDValue DAGCombiner::buildRsqrtEstimate(SDValue Op, SDNodeFlags Flags) {}SDValue DAGCombiner::buildSqrtEstimate(SDValue Op, SDNodeFlags Flags) {}bool DAGCombiner::mayAlias(SDNode *Op0, SDNode *Op1) const {}void DAGCombiner::GatherAllAliases(SDNode *N, SDValue OriginalChain,
                                   SmallVectorImpl<SDValue> &Aliases) {}SDValue DAGCombiner::FindBetterChain(SDNode *N, SDValue OldChain) {}bool DAGCombiner::parallelizeChainedStores(StoreSDNode *St) {}bool DAGCombiner::findBetterNeighborChains(StoreSDNode *St) {}void SelectionDAG::Combine(CombineLevel Level, AliasAnalysis *AA,
                           CodeGenOptLevel OptLevel) {}