llvm/llvm/lib/Target/X86/X86ISelLowering.cpp

//===-- X86ISelLowering.cpp - X86 DAG Lowering Implementation -------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines the interfaces that X86 uses to lower LLVM code into a
// selection DAG.
//
//===----------------------------------------------------------------------===//

#include "X86ISelLowering.h"
#include "MCTargetDesc/X86ShuffleDecode.h"
#include "X86.h"
#include "X86CallingConv.h"
#include "X86FrameLowering.h"
#include "X86InstrBuilder.h"
#include "X86IntrinsicsInfo.h"
#include "X86MachineFunctionInfo.h"
#include "X86TargetMachine.h"
#include "X86TargetObjectFile.h"
#include "llvm/ADT/SmallBitVector.h"
#include "llvm/ADT/SmallSet.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/ADT/StringSwitch.h"
#include "llvm/Analysis/BlockFrequencyInfo.h"
#include "llvm/Analysis/ObjCARCUtil.h"
#include "llvm/Analysis/ProfileSummaryInfo.h"
#include "llvm/Analysis/VectorUtils.h"
#include "llvm/CodeGen/IntrinsicLowering.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/CodeGen/MachineJumpTableInfo.h"
#include "llvm/CodeGen/MachineLoopInfo.h"
#include "llvm/CodeGen/MachineModuleInfo.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/SDPatternMatch.h"
#include "llvm/CodeGen/TargetLowering.h"
#include "llvm/CodeGen/WinEHFuncInfo.h"
#include "llvm/IR/CallingConv.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/DerivedTypes.h"
#include "llvm/IR/EHPersonalities.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/GlobalAlias.h"
#include "llvm/IR/GlobalVariable.h"
#include "llvm/IR/IRBuilder.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/Intrinsics.h"
#include "llvm/IR/PatternMatch.h"
#include "llvm/MC/MCAsmInfo.h"
#include "llvm/MC/MCContext.h"
#include "llvm/MC/MCExpr.h"
#include "llvm/MC/MCSymbol.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/KnownBits.h"
#include "llvm/Support/MathExtras.h"
#include "llvm/Target/TargetOptions.h"
#include <algorithm>
#include <bitset>
#include <cctype>
#include <numeric>
usingnamespacellvm;

#define DEBUG_TYPE

static cl::opt<int> ExperimentalPrefInnermostLoopAlignment(
    "x86-experimental-pref-innermost-loop-alignment", cl::init(4),
    cl::desc(
        "Sets the preferable loop alignment for experiments (as log2 bytes) "
        "for innermost loops only. If specified, this option overrides "
        "alignment set by x86-experimental-pref-loop-alignment."),
    cl::Hidden);

static cl::opt<int> BrMergingBaseCostThresh(
    "x86-br-merging-base-cost", cl::init(2),
    cl::desc(
        "Sets the cost threshold for when multiple conditionals will be merged "
        "into one branch versus be split in multiple branches. Merging "
        "conditionals saves branches at the cost of additional instructions. "
        "This value sets the instruction cost limit, below which conditionals "
        "will be merged, and above which conditionals will be split. Set to -1 "
        "to never merge branches."),
    cl::Hidden);

static cl::opt<int> BrMergingCcmpBias(
    "x86-br-merging-ccmp-bias", cl::init(6),
    cl::desc("Increases 'x86-br-merging-base-cost' in cases that the target "
             "supports conditional compare instructions."),
    cl::Hidden);

static cl::opt<bool>
    WidenShift("x86-widen-shift", cl::init(true),
               cl::desc("Replacte narrow shifts with wider shifts."),
               cl::Hidden);

static cl::opt<int> BrMergingLikelyBias(
    "x86-br-merging-likely-bias", cl::init(0),
    cl::desc("Increases 'x86-br-merging-base-cost' in cases that it is likely "
             "that all conditionals will be executed. For example for merging "
             "the conditionals (a == b && c > d), if its known that a == b is "
             "likely, then it is likely that if the conditionals are split "
             "both sides will be executed, so it may be desirable to increase "
             "the instruction cost threshold. Set to -1 to never merge likely "
             "branches."),
    cl::Hidden);

static cl::opt<int> BrMergingUnlikelyBias(
    "x86-br-merging-unlikely-bias", cl::init(-1),
    cl::desc(
        "Decreases 'x86-br-merging-base-cost' in cases that it is unlikely "
        "that all conditionals will be executed. For example for merging "
        "the conditionals (a == b && c > d), if its known that a == b is "
        "unlikely, then it is unlikely that if the conditionals are split "
        "both sides will be executed, so it may be desirable to decrease "
        "the instruction cost threshold. Set to -1 to never merge unlikely "
        "branches."),
    cl::Hidden);

static cl::opt<bool> MulConstantOptimization(
    "mul-constant-optimization", cl::init(true),
    cl::desc("Replace 'mul x, Const' with more effective instructions like "
             "SHIFT, LEA, etc."),
    cl::Hidden);

X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM,
                                     const X86Subtarget &STI)
    :{}

// This has so far only been implemented for 64-bit MachO.
bool X86TargetLowering::useLoadStackGuardNode() const {}

bool X86TargetLowering::useStackGuardXorFP() const {}

SDValue X86TargetLowering::emitStackGuardXorFP(SelectionDAG &DAG, SDValue Val,
                                               const SDLoc &DL) const {}

TargetLoweringBase::LegalizeTypeAction
X86TargetLowering::getPreferredVectorAction(MVT VT) const {}

FastISel *
X86TargetLowering::createFastISel(FunctionLoweringInfo &funcInfo,
                                  const TargetLibraryInfo *libInfo) const {}

//===----------------------------------------------------------------------===//
//                           Other Lowering Hooks
//===----------------------------------------------------------------------===//

bool X86::mayFoldLoad(SDValue Op, const X86Subtarget &Subtarget,
                      bool AssumeSingleUse) {}

bool X86::mayFoldLoadIntoBroadcastFromMem(SDValue Op, MVT EltVT,
                                          const X86Subtarget &Subtarget,
                                          bool AssumeSingleUse) {}

bool X86::mayFoldIntoStore(SDValue Op) {}

bool X86::mayFoldIntoZeroExtend(SDValue Op) {}

static bool isLogicOp(unsigned Opcode) {}

static bool isTargetShuffle(unsigned Opcode) {}

static bool isTargetShuffleVariableMask(unsigned Opcode) {}

SDValue X86TargetLowering::getReturnAddressFrameIndex(SelectionDAG &DAG) const {}

bool X86::isOffsetSuitableForCodeModel(int64_t Offset, CodeModel::Model CM,
                                       bool HasSymbolicDisplacement) {}

/// Return true if the condition is an signed comparison operation.
static bool isX86CCSigned(unsigned X86CC) {}

static X86::CondCode TranslateIntegerX86CC(ISD::CondCode SetCCOpcode) {}

/// Do a one-to-one translation of a ISD::CondCode to the X86-specific
/// condition code, returning the condition code and the LHS/RHS of the
/// comparison to make.
static X86::CondCode TranslateX86CC(ISD::CondCode SetCCOpcode, const SDLoc &DL,
                                    bool isFP, SDValue &LHS, SDValue &RHS,
                                    SelectionDAG &DAG) {}

/// Is there a floating point cmov for the specific X86 condition code?
/// Current x86 isa includes the following FP cmov instructions:
/// fcmovb, fcomvbe, fcomve, fcmovu, fcmovae, fcmova, fcmovne, fcmovnu.
static bool hasFPCMov(unsigned X86CC) {}

static bool useVPTERNLOG(const X86Subtarget &Subtarget, MVT VT) {}

bool X86TargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
                                           const CallInst &I,
                                           MachineFunction &MF,
                                           unsigned Intrinsic) const {}

/// Returns true if the target can instruction select the
/// specified FP immediate natively. If false, the legalizer will
/// materialize the FP immediate as a load from a constant pool.
bool X86TargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT,
                                     bool ForCodeSize) const {}

bool X86TargetLowering::shouldReduceLoadWidth(SDNode *Load,
                                              ISD::LoadExtType ExtTy,
                                              EVT NewVT) const {}

/// Returns true if it is beneficial to convert a load of a constant
/// to just the constant itself.
bool X86TargetLowering::shouldConvertConstantLoadToIntImm(const APInt &Imm,
                                                          Type *Ty) const {}

bool X86TargetLowering::reduceSelectOfFPConstantLoads(EVT CmpOpVT) const {}

bool X86TargetLowering::convertSelectOfConstantsToMath(EVT VT) const {}

bool X86TargetLowering::decomposeMulByConstant(LLVMContext &Context, EVT VT,
                                               SDValue C) const {}

bool X86TargetLowering::isExtractSubvectorCheap(EVT ResVT, EVT SrcVT,
                                                unsigned Index) const {}

bool X86TargetLowering::shouldScalarizeBinop(SDValue VecOp) const {}

bool X86TargetLowering::shouldFormOverflowOp(unsigned Opcode, EVT VT,
                                             bool) const {}

bool X86TargetLowering::isCheapToSpeculateCttz(Type *Ty) const {}

bool X86TargetLowering::isCheapToSpeculateCtlz(Type *Ty) const {}

bool X86TargetLowering::ShouldShrinkFPConstant(EVT VT) const {}

bool X86TargetLowering::isScalarFPTypeInSSEReg(EVT VT) const {}

bool X86TargetLowering::isLoadBitCastBeneficial(EVT LoadVT, EVT BitcastVT,
                                                const SelectionDAG &DAG,
                                                const MachineMemOperand &MMO) const {}

bool X86TargetLowering::canMergeStoresTo(unsigned AddressSpace, EVT MemVT,
                                         const MachineFunction &MF) const {}

bool X86TargetLowering::isCtlzFast() const {}

bool X86TargetLowering::isMaskAndCmp0FoldingBeneficial(
    const Instruction &AndI) const {}

bool X86TargetLowering::hasAndNotCompare(SDValue Y) const {}

bool X86TargetLowering::hasAndNot(SDValue Y) const {}

bool X86TargetLowering::hasBitTest(SDValue X, SDValue Y) const {}

bool X86TargetLowering::
    shouldProduceAndByConstByHoistingConstFromShiftsLHSOfAnd(
        SDValue X, ConstantSDNode *XC, ConstantSDNode *CC, SDValue Y,
        unsigned OldShiftOpcode, unsigned NewShiftOpcode,
        SelectionDAG &DAG) const {}

unsigned X86TargetLowering::preferedOpcodeForCmpEqPiecesOfOperand(
    EVT VT, unsigned ShiftOpc, bool MayTransformRotate,
    const APInt &ShiftOrRotateAmt, const std::optional<APInt> &AndMask) const {}

TargetLoweringBase::CondMergingParams
X86TargetLowering::getJumpConditionMergingParams(Instruction::BinaryOps Opc,
                                                 const Value *Lhs,
                                                 const Value *Rhs) const {}

bool X86TargetLowering::preferScalarizeSplat(SDNode *N) const {}

bool X86TargetLowering::shouldFoldConstantShiftPairToMask(
    const SDNode *N, CombineLevel Level) const {}

bool X86TargetLowering::shouldFoldMaskToVariableShiftPair(SDValue Y) const {}

TargetLowering::ShiftLegalizationStrategy
X86TargetLowering::preferredShiftLegalizationStrategy(
    SelectionDAG &DAG, SDNode *N, unsigned ExpansionFactor) const {}

bool X86TargetLowering::shouldSplatInsEltVarIndex(EVT VT) const {}

MVT X86TargetLowering::hasFastEqualityCompare(unsigned NumBits) const {}

/// Val is the undef sentinel value or equal to the specified value.
static bool isUndefOrEqual(int Val, int CmpVal) {}

/// Return true if every element in Mask is the undef sentinel value or equal to
/// the specified value.
static bool isUndefOrEqual(ArrayRef<int> Mask, int CmpVal) {}

/// Return true if every element in Mask, beginning from position Pos and ending
/// in Pos+Size is the undef sentinel value or equal to the specified value.
static bool isUndefOrEqualInRange(ArrayRef<int> Mask, int CmpVal, unsigned Pos,
                                  unsigned Size) {}

/// Val is either the undef or zero sentinel value.
static bool isUndefOrZero(int Val) {}

/// Return true if every element in Mask, beginning from position Pos and ending
/// in Pos+Size is the undef sentinel value.
static bool isUndefInRange(ArrayRef<int> Mask, unsigned Pos, unsigned Size) {}

/// Return true if the mask creates a vector whose lower half is undefined.
static bool isUndefLowerHalf(ArrayRef<int> Mask) {}

/// Return true if the mask creates a vector whose upper half is undefined.
static bool isUndefUpperHalf(ArrayRef<int> Mask) {}

/// Return true if Val falls within the specified range (L, H].
static bool isInRange(int Val, int Low, int Hi) {}

/// Return true if the value of any element in Mask falls within the specified
/// range (L, H].
static bool isAnyInRange(ArrayRef<int> Mask, int Low, int Hi) {}

/// Return true if the value of any element in Mask is the zero sentinel value.
static bool isAnyZero(ArrayRef<int> Mask) {}

/// Return true if Val is undef or if its value falls within the
/// specified range (L, H].
static bool isUndefOrInRange(int Val, int Low, int Hi) {}

/// Return true if every element in Mask is undef or if its value
/// falls within the specified range (L, H].
static bool isUndefOrInRange(ArrayRef<int> Mask, int Low, int Hi) {}

/// Return true if Val is undef, zero or if its value falls within the
/// specified range (L, H].
static bool isUndefOrZeroOrInRange(int Val, int Low, int Hi) {}

/// Return true if every element in Mask is undef, zero or if its value
/// falls within the specified range (L, H].
static bool isUndefOrZeroOrInRange(ArrayRef<int> Mask, int Low, int Hi) {}

/// Return true if every element in Mask, is an in-place blend/select mask or is
/// undef.
LLVM_ATTRIBUTE_UNUSED static bool isBlendOrUndef(ArrayRef<int> Mask) {}

/// Return true if every element in Mask, beginning
/// from position Pos and ending in Pos + Size, falls within the specified
/// sequence (Low, Low + Step, ..., Low + (Size - 1) * Step) or is undef.
static bool isSequentialOrUndefInRange(ArrayRef<int> Mask, unsigned Pos,
                                       unsigned Size, int Low, int Step = 1) {}

/// Return true if every element in Mask, beginning
/// from position Pos and ending in Pos+Size, falls within the specified
/// sequential range (Low, Low+Size], or is undef or is zero.
static bool isSequentialOrUndefOrZeroInRange(ArrayRef<int> Mask, unsigned Pos,
                                             unsigned Size, int Low,
                                             int Step = 1) {}

/// Return true if every element in Mask, beginning
/// from position Pos and ending in Pos+Size is undef or is zero.
static bool isUndefOrZeroInRange(ArrayRef<int> Mask, unsigned Pos,
                                 unsigned Size) {}

/// Return true if every element of a single input is referenced by the shuffle
/// mask. i.e. it just permutes them all.
static bool isCompletePermute(ArrayRef<int> Mask) {}

/// Helper function to test whether a shuffle mask could be
/// simplified by widening the elements being shuffled.
///
/// Appends the mask for wider elements in WidenedMask if valid. Otherwise
/// leaves it in an unspecified state.
///
/// NOTE: This must handle normal vector shuffle masks and *target* vector
/// shuffle masks. The latter have the special property of a '-2' representing
/// a zero-ed lane of a vector.
static bool canWidenShuffleElements(ArrayRef<int> Mask,
                                    SmallVectorImpl<int> &WidenedMask) {}

static bool canWidenShuffleElements(ArrayRef<int> Mask,
                                    const APInt &Zeroable,
                                    bool V2IsZero,
                                    SmallVectorImpl<int> &WidenedMask) {}

static bool canWidenShuffleElements(ArrayRef<int> Mask) {}

// Attempt to narrow/widen shuffle mask until it matches the target number of
// elements.
static bool scaleShuffleElements(ArrayRef<int> Mask, unsigned NumDstElts,
                                 SmallVectorImpl<int> &ScaledMask) {}

static bool canScaleShuffleElements(ArrayRef<int> Mask, unsigned NumDstElts) {}

/// Returns true if Elt is a constant zero or a floating point constant +0.0.
bool X86::isZeroNode(SDValue Elt) {}

// Build a vector of constants.
// Use an UNDEF node if MaskElt == -1.
// Split 64-bit constants in the 32-bit mode.
static SDValue getConstVector(ArrayRef<int> Values, MVT VT, SelectionDAG &DAG,
                              const SDLoc &dl, bool IsMask = false) {}

static SDValue getConstVector(ArrayRef<APInt> Bits, const APInt &Undefs,
                              MVT VT, SelectionDAG &DAG, const SDLoc &dl) {}

static SDValue getConstVector(ArrayRef<APInt> Bits, MVT VT,
                              SelectionDAG &DAG, const SDLoc &dl) {}

/// Returns a vector of specified type with all zero elements.
static SDValue getZeroVector(MVT VT, const X86Subtarget &Subtarget,
                             SelectionDAG &DAG, const SDLoc &dl) {}

// Helper to determine if the ops are all the extracted subvectors come from a
// single source. If we allow commute they don't have to be in order (Lo/Hi).
static SDValue getSplitVectorSrc(SDValue LHS, SDValue RHS, bool AllowCommute) {}

static SDValue extractSubVector(SDValue Vec, unsigned IdxVal, SelectionDAG &DAG,
                                const SDLoc &dl, unsigned vectorWidth) {}

/// Generate a DAG to grab 128-bits from a vector > 128 bits.  This
/// sets things up to match to an AVX VEXTRACTF128 / VEXTRACTI128
/// or AVX-512 VEXTRACTF32x4 / VEXTRACTI32x4
/// instructions or a simple subregister reference. Idx is an index in the
/// 128 bits we want.  It need not be aligned to a 128-bit boundary.  That makes
/// lowering EXTRACT_VECTOR_ELT operations easier.
static SDValue extract128BitVector(SDValue Vec, unsigned IdxVal,
                                   SelectionDAG &DAG, const SDLoc &dl) {}

/// Generate a DAG to grab 256-bits from a 512-bit vector.
static SDValue extract256BitVector(SDValue Vec, unsigned IdxVal,
                                   SelectionDAG &DAG, const SDLoc &dl) {}

static SDValue insertSubVector(SDValue Result, SDValue Vec, unsigned IdxVal,
                               SelectionDAG &DAG, const SDLoc &dl,
                               unsigned vectorWidth) {}

/// Generate a DAG to put 128-bits into a vector > 128 bits.  This
/// sets things up to match to an AVX VINSERTF128/VINSERTI128 or
/// AVX-512 VINSERTF32x4/VINSERTI32x4 instructions or a
/// simple superregister reference.  Idx is an index in the 128 bits
/// we want.  It need not be aligned to a 128-bit boundary.  That makes
/// lowering INSERT_VECTOR_ELT operations easier.
static SDValue insert128BitVector(SDValue Result, SDValue Vec, unsigned IdxVal,
                                  SelectionDAG &DAG, const SDLoc &dl) {}

/// Widen a vector to a larger size with the same scalar type, with the new
/// elements either zero or undef.
static SDValue widenSubVector(MVT VT, SDValue Vec, bool ZeroNewElements,
                              const X86Subtarget &Subtarget, SelectionDAG &DAG,
                              const SDLoc &dl) {}

/// Widen a vector to a larger size with the same scalar type, with the new
/// elements either zero or undef.
static SDValue widenSubVector(SDValue Vec, bool ZeroNewElements,
                              const X86Subtarget &Subtarget, SelectionDAG &DAG,
                              const SDLoc &dl, unsigned WideSizeInBits) {}

/// Widen a mask vector type to a minimum of v8i1/v16i1 to allow use of KSHIFT
/// and bitcast with integer types.
static MVT widenMaskVectorType(MVT VT, const X86Subtarget &Subtarget) {}

/// Widen a mask vector to a minimum of v8i1/v16i1 to allow use of KSHIFT and
/// bitcast with integer types.
static SDValue widenMaskVector(SDValue Vec, bool ZeroNewElements,
                               const X86Subtarget &Subtarget, SelectionDAG &DAG,
                               const SDLoc &dl) {}

// Helper function to collect subvector ops that are concatenated together,
// either by ISD::CONCAT_VECTORS or a ISD::INSERT_SUBVECTOR series.
// The subvectors in Ops are guaranteed to be the same type.
static bool collectConcatOps(SDNode *N, SmallVectorImpl<SDValue> &Ops,
                             SelectionDAG &DAG) {}

// Helper to check if \p V can be split into subvectors and the upper subvectors
// are all undef. In which case return the lower subvector.
static SDValue isUpperSubvectorUndef(SDValue V, const SDLoc &DL,
                                     SelectionDAG &DAG) {}

// Helper to check if we can access all the constituent subvectors without any
// extract ops.
static bool isFreeToSplitVector(SDNode *N, SelectionDAG &DAG) {}

static std::pair<SDValue, SDValue> splitVector(SDValue Op, SelectionDAG &DAG,
                                               const SDLoc &dl) {}

/// Break an operation into 2 half sized ops and then concatenate the results.
static SDValue splitVectorOp(SDValue Op, SelectionDAG &DAG, const SDLoc &dl) {}

/// Break an unary integer operation into 2 half sized ops and then
/// concatenate the result back.
static SDValue splitVectorIntUnary(SDValue Op, SelectionDAG &DAG,
                                   const SDLoc &dl) {}

/// Break a binary integer operation into 2 half sized ops and then
/// concatenate the result back.
static SDValue splitVectorIntBinary(SDValue Op, SelectionDAG &DAG,
                                    const SDLoc &dl) {}

// Helper for splitting operands of an operation to legal target size and
// apply a function on each part.
// Useful for operations that are available on SSE2 in 128-bit, on AVX2 in
// 256-bit and on AVX512BW in 512-bit. The argument VT is the type used for
// deciding if/how to split Ops. Ops elements do *not* have to be of type VT.
// The argument Builder is a function that will be applied on each split part:
// SDValue Builder(SelectionDAG&G, SDLoc, ArrayRef<SDValue>)
template <typename F>
SDValue SplitOpsAndApply(SelectionDAG &DAG, const X86Subtarget &Subtarget,
                         const SDLoc &DL, EVT VT, ArrayRef<SDValue> Ops,
                         F Builder, bool CheckBWI = true) {}

// Helper function that extends a non-512-bit vector op to 512-bits on non-VLX
// targets.
static SDValue getAVX512Node(unsigned Opcode, const SDLoc &DL, MVT VT,
                             ArrayRef<SDValue> Ops, SelectionDAG &DAG,
                             const X86Subtarget &Subtarget) {}

/// Insert i1-subvector to i1-vector.
static SDValue insert1BitVector(SDValue Op, SelectionDAG &DAG,
                                const X86Subtarget &Subtarget) {}

static SDValue concatSubVectors(SDValue V1, SDValue V2, SelectionDAG &DAG,
                                const SDLoc &dl) {}

/// Returns a vector of specified type with all bits set.
/// Always build ones vectors as <4 x i32>, <8 x i32> or <16 x i32>.
/// Then bitcast to their original type, ensuring they get CSE'd.
static SDValue getOnesVector(EVT VT, SelectionDAG &DAG, const SDLoc &dl) {}

static SDValue getEXTEND_VECTOR_INREG(unsigned Opcode, const SDLoc &DL, EVT VT,
                                      SDValue In, SelectionDAG &DAG) {}

// Create OR(AND(LHS,MASK),AND(RHS,~MASK)) bit select pattern
static SDValue getBitSelect(const SDLoc &DL, MVT VT, SDValue LHS, SDValue RHS,
                            SDValue Mask, SelectionDAG &DAG) {}

void llvm::createUnpackShuffleMask(EVT VT, SmallVectorImpl<int> &Mask,
                                   bool Lo, bool Unary) {}

/// Similar to unpacklo/unpackhi, but without the 128-bit lane limitation
/// imposed by AVX and specific to the unary pattern. Example:
/// v8iX Lo --> <0, 0, 1, 1, 2, 2, 3, 3>
/// v8iX Hi --> <4, 4, 5, 5, 6, 6, 7, 7>
void llvm::createSplat2ShuffleMask(MVT VT, SmallVectorImpl<int> &Mask,
                                   bool Lo) {}

// Attempt to constant fold, else just create a VECTOR_SHUFFLE.
static SDValue getVectorShuffle(SelectionDAG &DAG, EVT VT, const SDLoc &dl,
                                SDValue V1, SDValue V2, ArrayRef<int> Mask) {}

/// Returns a vector_shuffle node for an unpackl operation.
static SDValue getUnpackl(SelectionDAG &DAG, const SDLoc &dl, EVT VT,
                          SDValue V1, SDValue V2) {}

/// Returns a vector_shuffle node for an unpackh operation.
static SDValue getUnpackh(SelectionDAG &DAG, const SDLoc &dl, EVT VT,
                          SDValue V1, SDValue V2) {}

/// Returns a node that packs the LHS + RHS nodes together at half width.
/// May return X86ISD::PACKSS/PACKUS, packing the top/bottom half.
/// TODO: Add subvector splitting if/when we have a need for it.
static SDValue getPack(SelectionDAG &DAG, const X86Subtarget &Subtarget,
                       const SDLoc &dl, MVT VT, SDValue LHS, SDValue RHS,
                       bool PackHiHalf = false) {}

/// Return a vector_shuffle of the specified vector of zero or undef vector.
/// This produces a shuffle where the low element of V2 is swizzled into the
/// zero/undef vector, landing at element Idx.
/// This produces a shuffle mask like 4,1,2,3 (idx=0) or  0,1,2,4 (idx=3).
static SDValue getShuffleVectorZeroOrUndef(SDValue V2, int Idx,
                                           bool IsZero,
                                           const X86Subtarget &Subtarget,
                                           SelectionDAG &DAG) {}

static ConstantPoolSDNode *getTargetConstantPoolFromBasePtr(SDValue Ptr) {}

// TODO: Add support for non-zero offsets.
static const Constant *getTargetConstantFromBasePtr(SDValue Ptr) {}

static const Constant *getTargetConstantFromNode(LoadSDNode *Load) {}

static const Constant *getTargetConstantFromNode(SDValue Op) {}

const Constant *
X86TargetLowering::getTargetConstantFromLoad(LoadSDNode *LD) const {}

// Extract raw constant bits from constant pools.
static bool getTargetConstantBitsFromNode(SDValue Op, unsigned EltSizeInBits,
                                          APInt &UndefElts,
                                          SmallVectorImpl<APInt> &EltBits,
                                          bool AllowWholeUndefs = true,
                                          bool AllowPartialUndefs = false) {}

namespace llvm {
namespace X86 {
bool isConstantSplat(SDValue Op, APInt &SplatVal, bool AllowPartialUndefs) {}
} // namespace X86
} // namespace llvm

static bool getTargetShuffleMaskIndices(SDValue MaskNode,
                                        unsigned MaskEltSizeInBits,
                                        SmallVectorImpl<uint64_t> &RawMask,
                                        APInt &UndefElts) {}

// Match not(xor X, -1) -> X.
// Match not(pcmpgt(C, X)) -> pcmpgt(X, C - 1).
// Match not(extract_subvector(xor X, -1)) -> extract_subvector(X).
// Match not(concat_vectors(xor X, -1, xor Y, -1)) -> concat_vectors(X, Y).
static SDValue IsNOT(SDValue V, SelectionDAG &DAG) {}

/// Create a shuffle mask that matches the PACKSS/PACKUS truncation.
/// A multi-stage pack shuffle mask is created by specifying NumStages > 1.
/// Note: This ignores saturation, so inputs must be checked first.
static void createPackShuffleMask(MVT VT, SmallVectorImpl<int> &Mask,
                                  bool Unary, unsigned NumStages = 1) {}

// Split the demanded elts of a PACKSS/PACKUS node between its operands.
static void getPackDemandedElts(EVT VT, const APInt &DemandedElts,
                                APInt &DemandedLHS, APInt &DemandedRHS) {}

// Split the demanded elts of a HADD/HSUB node between its operands.
static void getHorizDemandedElts(EVT VT, const APInt &DemandedElts,
                                 APInt &DemandedLHS, APInt &DemandedRHS) {}

/// Calculates the shuffle mask corresponding to the target-specific opcode.
/// If the mask could be calculated, returns it in \p Mask, returns the shuffle
/// operands in \p Ops, and returns true.
/// Sets \p IsUnary to true if only one source is used. Note that this will set
/// IsUnary for shuffles which use a single input multiple times, and in those
/// cases it will adjust the mask to only have indices within that single input.
/// It is an error to call this with non-empty Mask/Ops vectors.
static bool getTargetShuffleMask(SDValue N, bool AllowSentinelZero,
                                 SmallVectorImpl<SDValue> &Ops,
                                 SmallVectorImpl<int> &Mask, bool &IsUnary) {}

// Wrapper for getTargetShuffleMask with InUnary;
static bool getTargetShuffleMask(SDValue N, bool AllowSentinelZero,
                                 SmallVectorImpl<SDValue> &Ops,
                                 SmallVectorImpl<int> &Mask) {}

/// Compute whether each element of a shuffle is zeroable.
///
/// A "zeroable" vector shuffle element is one which can be lowered to zero.
/// Either it is an undef element in the shuffle mask, the element of the input
/// referenced is undef, or the element of the input referenced is known to be
/// zero. Many x86 shuffles can zero lanes cheaply and we often want to handle
/// as many lanes with this technique as possible to simplify the remaining
/// shuffle.
static void computeZeroableShuffleElements(ArrayRef<int> Mask,
                                           SDValue V1, SDValue V2,
                                           APInt &KnownUndef, APInt &KnownZero) {}

/// Decode a target shuffle mask and inputs and see if any values are
/// known to be undef or zero from their inputs.
/// Returns true if the target shuffle mask was decoded.
/// FIXME: Merge this with computeZeroableShuffleElements?
static bool getTargetShuffleAndZeroables(SDValue N, SmallVectorImpl<int> &Mask,
                                         SmallVectorImpl<SDValue> &Ops,
                                         APInt &KnownUndef, APInt &KnownZero) {}

// Replace target shuffle mask elements with known undef/zero sentinels.
static void resolveTargetShuffleFromZeroables(SmallVectorImpl<int> &Mask,
                                              const APInt &KnownUndef,
                                              const APInt &KnownZero,
                                              bool ResolveKnownZeros= true) {}

// Extract target shuffle mask sentinel elements to known undef/zero bitmasks.
static void resolveZeroablesFromTargetShuffle(const SmallVectorImpl<int> &Mask,
                                              APInt &KnownUndef,
                                              APInt &KnownZero) {}

// Attempt to create a shuffle mask from a VSELECT/BLENDV condition mask.
static bool createShuffleMaskFromVSELECT(SmallVectorImpl<int> &Mask,
                                         SDValue Cond, bool IsBLENDV = false) {}

// Forward declaration (for getFauxShuffleMask recursive check).
static bool getTargetShuffleInputs(SDValue Op, const APInt &DemandedElts,
                                   SmallVectorImpl<SDValue> &Inputs,
                                   SmallVectorImpl<int> &Mask,
                                   const SelectionDAG &DAG, unsigned Depth,
                                   bool ResolveKnownElts);

// Attempt to decode ops that could be represented as a shuffle mask.
// The decoded shuffle mask may contain a different number of elements to the
// destination value type.
// TODO: Merge into getTargetShuffleInputs()
static bool getFauxShuffleMask(SDValue N, const APInt &DemandedElts,
                               SmallVectorImpl<int> &Mask,
                               SmallVectorImpl<SDValue> &Ops,
                               const SelectionDAG &DAG, unsigned Depth,
                               bool ResolveKnownElts) {}

/// Removes unused/repeated shuffle source inputs and adjusts the shuffle mask.
static void resolveTargetShuffleInputsAndMask(SmallVectorImpl<SDValue> &Inputs,
                                              SmallVectorImpl<int> &Mask) {}

/// Calls getTargetShuffleAndZeroables to resolve a target shuffle mask's inputs
/// and then sets the SM_SentinelUndef and SM_SentinelZero values.
/// Returns true if the target shuffle mask was decoded.
static bool getTargetShuffleInputs(SDValue Op, const APInt &DemandedElts,
                                   SmallVectorImpl<SDValue> &Inputs,
                                   SmallVectorImpl<int> &Mask,
                                   APInt &KnownUndef, APInt &KnownZero,
                                   const SelectionDAG &DAG, unsigned Depth,
                                   bool ResolveKnownElts) {}

static bool getTargetShuffleInputs(SDValue Op, const APInt &DemandedElts,
                                   SmallVectorImpl<SDValue> &Inputs,
                                   SmallVectorImpl<int> &Mask,
                                   const SelectionDAG &DAG, unsigned Depth,
                                   bool ResolveKnownElts) {}

static bool getTargetShuffleInputs(SDValue Op, SmallVectorImpl<SDValue> &Inputs,
                                   SmallVectorImpl<int> &Mask,
                                   const SelectionDAG &DAG, unsigned Depth = 0,
                                   bool ResolveKnownElts = true) {}

// Attempt to create a scalar/subvector broadcast from the base MemSDNode.
static SDValue getBROADCAST_LOAD(unsigned Opcode, const SDLoc &DL, EVT VT,
                                 EVT MemVT, MemSDNode *Mem, unsigned Offset,
                                 SelectionDAG &DAG) {}

/// Returns the scalar element that will make up the i'th
/// element of the result of the vector shuffle.
static SDValue getShuffleScalarElt(SDValue Op, unsigned Index,
                                   SelectionDAG &DAG, unsigned Depth) {}

// Use PINSRB/PINSRW/PINSRD to create a build vector.
static SDValue LowerBuildVectorAsInsert(SDValue Op, const SDLoc &DL,
                                        const APInt &NonZeroMask,
                                        unsigned NumNonZero, unsigned NumZero,
                                        SelectionDAG &DAG,
                                        const X86Subtarget &Subtarget) {}

/// Custom lower build_vector of v16i8.
static SDValue LowerBuildVectorv16i8(SDValue Op, const SDLoc &DL,
                                     const APInt &NonZeroMask,
                                     unsigned NumNonZero, unsigned NumZero,
                                     SelectionDAG &DAG,
                                     const X86Subtarget &Subtarget) {}

/// Custom lower build_vector of v8i16.
static SDValue LowerBuildVectorv8i16(SDValue Op, const SDLoc &DL,
                                     const APInt &NonZeroMask,
                                     unsigned NumNonZero, unsigned NumZero,
                                     SelectionDAG &DAG,
                                     const X86Subtarget &Subtarget) {}

/// Custom lower build_vector of v4i32 or v4f32.
static SDValue LowerBuildVectorv4x32(SDValue Op, const SDLoc &DL,
                                     SelectionDAG &DAG,
                                     const X86Subtarget &Subtarget) {}

/// Return a vector logical shift node.
static SDValue getVShift(bool isLeft, EVT VT, SDValue SrcOp, unsigned NumBits,
                         SelectionDAG &DAG, const TargetLowering &TLI,
                         const SDLoc &dl) {}

static SDValue LowerAsSplatVectorLoad(SDValue SrcOp, MVT VT, const SDLoc &dl,
                                      SelectionDAG &DAG) {}

// Recurse to find a LoadSDNode source and the accumulated ByteOffest.
static bool findEltLoadSrc(SDValue Elt, LoadSDNode *&Ld, int64_t &ByteOffset) {}

/// Given the initializing elements 'Elts' of a vector of type 'VT', see if the
/// elements can be replaced by a single large load which has the same value as
/// a build_vector or insert_subvector whose loaded operands are 'Elts'.
///
/// Example: <load i32 *a, load i32 *a+4, zero, undef> -> zextload a
static SDValue EltsFromConsecutiveLoads(EVT VT, ArrayRef<SDValue> Elts,
                                        const SDLoc &DL, SelectionDAG &DAG,
                                        const X86Subtarget &Subtarget,
                                        bool IsAfterLegalize) {}

// Combine a vector ops (shuffles etc.) that is equal to build_vector load1,
// load2, load3, load4, <0, 1, 2, 3> into a vector load if the load addresses
// are consecutive, non-overlapping, and in the right order.
static SDValue combineToConsecutiveLoads(EVT VT, SDValue Op, const SDLoc &DL,
                                         SelectionDAG &DAG,
                                         const X86Subtarget &Subtarget,
                                         bool IsAfterLegalize) {}

static Constant *getConstantVector(MVT VT, ArrayRef<APInt> Bits,
                                   const APInt &Undefs, LLVMContext &C) {}

static Constant *getConstantVector(MVT VT, const APInt &SplatValue,
                                   unsigned SplatBitSize, LLVMContext &C) {}

static bool isFoldableUseOfShuffle(SDNode *N) {}

/// Attempt to use the vbroadcast instruction to generate a splat value
/// from a splat BUILD_VECTOR which uses:
///  a. A single scalar load, or a constant.
///  b. Repeated pattern of constants (e.g. <0,1,0,1> or <0,1,2,3,0,1,2,3>).
///
/// The VBROADCAST node is returned when a pattern is found,
/// or SDValue() otherwise.
static SDValue lowerBuildVectorAsBroadcast(BuildVectorSDNode *BVOp,
                                           const SDLoc &dl,
                                           const X86Subtarget &Subtarget,
                                           SelectionDAG &DAG) {}

/// For an EXTRACT_VECTOR_ELT with a constant index return the real
/// underlying vector and index.
///
/// Modifies \p ExtractedFromVec to the real vector and returns the real
/// index.
static int getUnderlyingExtractedFromVec(SDValue &ExtractedFromVec,
                                         SDValue ExtIdx) {}

static SDValue buildFromShuffleMostly(SDValue Op, const SDLoc &DL,
                                      SelectionDAG &DAG) {}

// Lower BUILD_VECTOR operation for v8bf16, v16bf16 and v32bf16 types.
static SDValue LowerBUILD_VECTORvXbf16(SDValue Op, SelectionDAG &DAG,
                                       const X86Subtarget &Subtarget) {}

// Lower BUILD_VECTOR operation for v8i1 and v16i1 types.
static SDValue LowerBUILD_VECTORvXi1(SDValue Op, const SDLoc &dl,
                                     SelectionDAG &DAG,
                                     const X86Subtarget &Subtarget) {}

LLVM_ATTRIBUTE_UNUSED static bool isHorizOp(unsigned Opcode) {}

/// This is a helper function of LowerToHorizontalOp().
/// This function checks that the build_vector \p N in input implements a
/// 128-bit partial horizontal operation on a 256-bit vector, but that operation
/// may not match the layout of an x86 256-bit horizontal instruction.
/// In other words, if this returns true, then some extraction/insertion will
/// be required to produce a valid horizontal instruction.
///
/// Parameter \p Opcode defines the kind of horizontal operation to match.
/// For example, if \p Opcode is equal to ISD::ADD, then this function
/// checks if \p N implements a horizontal arithmetic add; if instead \p Opcode
/// is equal to ISD::SUB, then this function checks if this is a horizontal
/// arithmetic sub.
///
/// This function only analyzes elements of \p N whose indices are
/// in range [BaseIdx, LastIdx).
///
/// TODO: This function was originally used to match both real and fake partial
/// horizontal operations, but the index-matching logic is incorrect for that.
/// See the corrected implementation in isHopBuildVector(). Can we reduce this
/// code because it is only used for partial h-op matching now?
static bool isHorizontalBinOpPart(const BuildVectorSDNode *N, unsigned Opcode,
                                  const SDLoc &DL, SelectionDAG &DAG,
                                  unsigned BaseIdx, unsigned LastIdx,
                                  SDValue &V0, SDValue &V1) {}

/// Emit a sequence of two 128-bit horizontal add/sub followed by
/// a concat_vector.
///
/// This is a helper function of LowerToHorizontalOp().
/// This function expects two 256-bit vectors called V0 and V1.
/// At first, each vector is split into two separate 128-bit vectors.
/// Then, the resulting 128-bit vectors are used to implement two
/// horizontal binary operations.
///
/// The kind of horizontal binary operation is defined by \p X86Opcode.
///
/// \p Mode specifies how the 128-bit parts of V0 and V1 are passed in input to
/// the two new horizontal binop.
/// When Mode is set, the first horizontal binop dag node would take as input
/// the lower 128-bit of V0 and the upper 128-bit of V0. The second
/// horizontal binop dag node would take as input the lower 128-bit of V1
/// and the upper 128-bit of V1.
///   Example:
///     HADD V0_LO, V0_HI
///     HADD V1_LO, V1_HI
///
/// Otherwise, the first horizontal binop dag node takes as input the lower
/// 128-bit of V0 and the lower 128-bit of V1, and the second horizontal binop
/// dag node takes the upper 128-bit of V0 and the upper 128-bit of V1.
///   Example:
///     HADD V0_LO, V1_LO
///     HADD V0_HI, V1_HI
///
/// If \p isUndefLO is set, then the algorithm propagates UNDEF to the lower
/// 128-bits of the result. If \p isUndefHI is set, then UNDEF is propagated to
/// the upper 128-bits of the result.
static SDValue ExpandHorizontalBinOp(const SDValue &V0, const SDValue &V1,
                                     const SDLoc &DL, SelectionDAG &DAG,
                                     unsigned X86Opcode, bool Mode,
                                     bool isUndefLO, bool isUndefHI) {}

/// Returns true iff \p BV builds a vector with the result equivalent to
/// the result of ADDSUB/SUBADD operation.
/// If true is returned then the operands of ADDSUB = Opnd0 +- Opnd1
/// (SUBADD = Opnd0 -+ Opnd1) operation are written to the parameters
/// \p Opnd0 and \p Opnd1.
static bool isAddSubOrSubAdd(const BuildVectorSDNode *BV,
                             const X86Subtarget &Subtarget, SelectionDAG &DAG,
                             SDValue &Opnd0, SDValue &Opnd1,
                             unsigned &NumExtracts,
                             bool &IsSubAdd) {}

/// Returns true if is possible to fold MUL and an idiom that has already been
/// recognized as ADDSUB/SUBADD(\p Opnd0, \p Opnd1) into
/// FMADDSUB/FMSUBADD(x, y, \p Opnd1). If (and only if) true is returned, the
/// operands of FMADDSUB/FMSUBADD are written to parameters \p Opnd0, \p Opnd1, \p Opnd2.
///
/// Prior to calling this function it should be known that there is some
/// SDNode that potentially can be replaced with an X86ISD::ADDSUB operation
/// using \p Opnd0 and \p Opnd1 as operands. Also, this method is called
/// before replacement of such SDNode with ADDSUB operation. Thus the number
/// of \p Opnd0 uses is expected to be equal to 2.
/// For example, this function may be called for the following IR:
///    %AB = fmul fast <2 x double> %A, %B
///    %Sub = fsub fast <2 x double> %AB, %C
///    %Add = fadd fast <2 x double> %AB, %C
///    %Addsub = shufflevector <2 x double> %Sub, <2 x double> %Add,
///                            <2 x i32> <i32 0, i32 3>
/// There is a def for %Addsub here, which potentially can be replaced by
/// X86ISD::ADDSUB operation:
///    %Addsub = X86ISD::ADDSUB %AB, %C
/// and such ADDSUB can further be replaced with FMADDSUB:
///    %Addsub = FMADDSUB %A, %B, %C.
///
/// The main reason why this method is called before the replacement of the
/// recognized ADDSUB idiom with ADDSUB operation is that such replacement
/// is illegal sometimes. E.g. 512-bit ADDSUB is not available, while 512-bit
/// FMADDSUB is.
static bool isFMAddSubOrFMSubAdd(const X86Subtarget &Subtarget,
                                 SelectionDAG &DAG,
                                 SDValue &Opnd0, SDValue &Opnd1, SDValue &Opnd2,
                                 unsigned ExpectedUses) {}

/// Try to fold a build_vector that performs an 'addsub' or 'fmaddsub' or
/// 'fsubadd' operation accordingly to X86ISD::ADDSUB or X86ISD::FMADDSUB or
/// X86ISD::FMSUBADD node.
static SDValue lowerToAddSubOrFMAddSub(const BuildVectorSDNode *BV,
                                       const SDLoc &DL,
                                       const X86Subtarget &Subtarget,
                                       SelectionDAG &DAG) {}

static bool isHopBuildVector(const BuildVectorSDNode *BV, SelectionDAG &DAG,
                             unsigned &HOpcode, SDValue &V0, SDValue &V1) {}

static SDValue getHopForBuildVector(const BuildVectorSDNode *BV,
                                    const SDLoc &DL, SelectionDAG &DAG,
                                    unsigned HOpcode, SDValue V0, SDValue V1) {}

/// Lower BUILD_VECTOR to a horizontal add/sub operation if possible.
static SDValue LowerToHorizontalOp(const BuildVectorSDNode *BV, const SDLoc &DL,
                                   const X86Subtarget &Subtarget,
                                   SelectionDAG &DAG) {}

static SDValue LowerShift(SDValue Op, const X86Subtarget &Subtarget,
                          SelectionDAG &DAG);

/// If a BUILD_VECTOR's source elements all apply the same bit operation and
/// one of their operands is constant, lower to a pair of BUILD_VECTOR and
/// just apply the bit to the vectors.
/// NOTE: Its not in our interest to start make a general purpose vectorizer
/// from this, but enough scalar bit operations are created from the later
/// legalization + scalarization stages to need basic support.
static SDValue lowerBuildVectorToBitOp(BuildVectorSDNode *Op, const SDLoc &DL,
                                       const X86Subtarget &Subtarget,
                                       SelectionDAG &DAG) {}

/// Create a vector constant without a load. SSE/AVX provide the bare minimum
/// functionality to do this, so it's all zeros, all ones, or some derivation
/// that is cheap to calculate.
static SDValue materializeVectorConstant(SDValue Op, const SDLoc &DL,
                                         SelectionDAG &DAG,
                                         const X86Subtarget &Subtarget) {}

/// Look for opportunities to create a VPERMV/VPERMILPV/PSHUFB variable permute
/// from a vector of source values and a vector of extraction indices.
/// The vectors might be manipulated to match the type of the permute op.
static SDValue createVariablePermute(MVT VT, SDValue SrcVec, SDValue IndicesVec,
                                     const SDLoc &DL, SelectionDAG &DAG,
                                     const X86Subtarget &Subtarget) {}

// Tries to lower a BUILD_VECTOR composed of extract-extract chains that can be
// reasoned to be a permutation of a vector by indices in a non-constant vector.
// (build_vector (extract_elt V, (extract_elt I, 0)),
//               (extract_elt V, (extract_elt I, 1)),
//                    ...
// ->
// (vpermv I, V)
//
// TODO: Handle undefs
// TODO: Utilize pshufb and zero mask blending to support more efficient
// construction of vectors with constant-0 elements.
static SDValue
LowerBUILD_VECTORAsVariablePermute(SDValue V, const SDLoc &DL,
                                   SelectionDAG &DAG,
                                   const X86Subtarget &Subtarget) {}

SDValue
X86TargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const {}

// 256-bit AVX can use the vinsertf128 instruction
// to create 256-bit vectors from two other 128-bit ones.
// TODO: Detect subvector broadcast here instead of DAG combine?
static SDValue LowerAVXCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG,
                                      const X86Subtarget &Subtarget) {}

// Returns true if the given node is a type promotion (by concatenating i1
// zeros) of the result of a node that already zeros all upper bits of
// k-register.
// TODO: Merge this with LowerAVXCONCAT_VECTORS?
static SDValue LowerCONCAT_VECTORSvXi1(SDValue Op,
                                       const X86Subtarget &Subtarget,
                                       SelectionDAG & DAG) {}

static SDValue LowerCONCAT_VECTORS(SDValue Op,
                                   const X86Subtarget &Subtarget,
                                   SelectionDAG &DAG) {}

//===----------------------------------------------------------------------===//
// Vector shuffle lowering
//
// This is an experimental code path for lowering vector shuffles on x86. It is
// designed to handle arbitrary vector shuffles and blends, gracefully
// degrading performance as necessary. It works hard to recognize idiomatic
// shuffles and lower them to optimal instruction patterns without leaving
// a framework that allows reasonably efficient handling of all vector shuffle
// patterns.
//===----------------------------------------------------------------------===//

/// Tiny helper function to identify a no-op mask.
///
/// This is a somewhat boring predicate function. It checks whether the mask
/// array input, which is assumed to be a single-input shuffle mask of the kind
/// used by the X86 shuffle instructions (not a fully general
/// ShuffleVectorSDNode mask) requires any shuffles to occur. Both undef and an
/// in-place shuffle are 'no-op's.
static bool isNoopShuffleMask(ArrayRef<int> Mask) {}

/// Test whether there are elements crossing LaneSizeInBits lanes in this
/// shuffle mask.
///
/// X86 divides up its shuffles into in-lane and cross-lane shuffle operations
/// and we routinely test for these.
static bool isLaneCrossingShuffleMask(unsigned LaneSizeInBits,
                                      unsigned ScalarSizeInBits,
                                      ArrayRef<int> Mask) {}

/// Test whether there are elements crossing 128-bit lanes in this
/// shuffle mask.
static bool is128BitLaneCrossingShuffleMask(MVT VT, ArrayRef<int> Mask) {}

/// Test whether elements in each LaneSizeInBits lane in this shuffle mask come
/// from multiple lanes - this is different to isLaneCrossingShuffleMask to
/// better support 'repeated mask + lane permute' style shuffles.
static bool isMultiLaneShuffleMask(unsigned LaneSizeInBits,
                                   unsigned ScalarSizeInBits,
                                   ArrayRef<int> Mask) {}

/// Test whether a shuffle mask is equivalent within each sub-lane.
///
/// This checks a shuffle mask to see if it is performing the same
/// lane-relative shuffle in each sub-lane. This trivially implies
/// that it is also not lane-crossing. It may however involve a blend from the
/// same lane of a second vector.
///
/// The specific repeated shuffle mask is populated in \p RepeatedMask, as it is
/// non-trivial to compute in the face of undef lanes. The representation is
/// suitable for use with existing 128-bit shuffles as entries from the second
/// vector have been remapped to [LaneSize, 2*LaneSize).
static bool isRepeatedShuffleMask(unsigned LaneSizeInBits, MVT VT,
                                  ArrayRef<int> Mask,
                                  SmallVectorImpl<int> &RepeatedMask) {}

/// Test whether a shuffle mask is equivalent within each 128-bit lane.
static bool
is128BitLaneRepeatedShuffleMask(MVT VT, ArrayRef<int> Mask,
                                SmallVectorImpl<int> &RepeatedMask) {}

static bool
is128BitLaneRepeatedShuffleMask(MVT VT, ArrayRef<int> Mask) {}

/// Test whether a shuffle mask is equivalent within each 256-bit lane.
static bool
is256BitLaneRepeatedShuffleMask(MVT VT, ArrayRef<int> Mask,
                                SmallVectorImpl<int> &RepeatedMask) {}

/// Test whether a target shuffle mask is equivalent within each sub-lane.
/// Unlike isRepeatedShuffleMask we must respect SM_SentinelZero.
static bool isRepeatedTargetShuffleMask(unsigned LaneSizeInBits,
                                        unsigned EltSizeInBits,
                                        ArrayRef<int> Mask,
                                        SmallVectorImpl<int> &RepeatedMask) {}

/// Test whether a target shuffle mask is equivalent within each sub-lane.
/// Unlike isRepeatedShuffleMask we must respect SM_SentinelZero.
static bool isRepeatedTargetShuffleMask(unsigned LaneSizeInBits, MVT VT,
                                        ArrayRef<int> Mask,
                                        SmallVectorImpl<int> &RepeatedMask) {}

/// Checks whether the vector elements referenced by two shuffle masks are
/// equivalent.
static bool IsElementEquivalent(int MaskSize, SDValue Op, SDValue ExpectedOp,
                                int Idx, int ExpectedIdx) {}

/// Checks whether a shuffle mask is equivalent to an explicit list of
/// arguments.
///
/// This is a fast way to test a shuffle mask against a fixed pattern:
///
///   if (isShuffleEquivalent(Mask, 3, 2, {1, 0})) { ... }
///
/// It returns true if the mask is exactly as wide as the argument list, and
/// each element of the mask is either -1 (signifying undef) or the value given
/// in the argument.
static bool isShuffleEquivalent(ArrayRef<int> Mask, ArrayRef<int> ExpectedMask,
                                SDValue V1 = SDValue(),
                                SDValue V2 = SDValue()) {}

/// Checks whether a target shuffle mask is equivalent to an explicit pattern.
///
/// The masks must be exactly the same width.
///
/// If an element in Mask matches SM_SentinelUndef (-1) then the corresponding
/// value in ExpectedMask is always accepted. Otherwise the indices must match.
///
/// SM_SentinelZero is accepted as a valid negative index but must match in
/// both, or via a known bits test.
static bool isTargetShuffleEquivalent(MVT VT, ArrayRef<int> Mask,
                                      ArrayRef<int> ExpectedMask,
                                      const SelectionDAG &DAG,
                                      SDValue V1 = SDValue(),
                                      SDValue V2 = SDValue()) {}

// Check if the shuffle mask is suitable for the AVX vpunpcklwd or vpunpckhwd
// instructions.
static bool isUnpackWdShuffleMask(ArrayRef<int> Mask, MVT VT,
                                  const SelectionDAG &DAG) {}

static bool is128BitUnpackShuffleMask(ArrayRef<int> Mask,
                                      const SelectionDAG &DAG) {}

/// Return true if a shuffle mask chooses elements identically in its top and
/// bottom halves. For example, any splat mask has the same top and bottom
/// halves. If an element is undefined in only one half of the mask, the halves
/// are not considered identical.
static bool hasIdenticalHalvesShuffleMask(ArrayRef<int> Mask) {}

/// Get a 4-lane 8-bit shuffle immediate for a mask.
///
/// This helper function produces an 8-bit shuffle immediate corresponding to
/// the ubiquitous shuffle encoding scheme used in x86 instructions for
/// shuffling 4 lanes. It can be used with most of the PSHUF instructions for
/// example.
///
/// NB: We rely heavily on "undef" masks preserving the input lane.
static unsigned getV4X86ShuffleImm(ArrayRef<int> Mask) {}

static SDValue getV4X86ShuffleImm8ForMask(ArrayRef<int> Mask, const SDLoc &DL,
                                          SelectionDAG &DAG) {}

// The Shuffle result is as follow:
// 0*a[0]0*a[1]...0*a[n] , n >=0 where a[] elements in a ascending order.
// Each Zeroable's element correspond to a particular Mask's element.
// As described in computeZeroableShuffleElements function.
//
// The function looks for a sub-mask that the nonzero elements are in
// increasing order. If such sub-mask exist. The function returns true.
static bool isNonZeroElementsInOrder(const APInt &Zeroable,
                                     ArrayRef<int> Mask, const EVT &VectorType,
                                     bool &IsZeroSideLeft) {}

/// Try to lower a shuffle with a single PSHUFB of V1 or V2.
static SDValue lowerShuffleWithPSHUFB(const SDLoc &DL, MVT VT,
                                      ArrayRef<int> Mask, SDValue V1,
                                      SDValue V2, const APInt &Zeroable,
                                      const X86Subtarget &Subtarget,
                                      SelectionDAG &DAG) {}

static SDValue getMaskNode(SDValue Mask, MVT MaskVT,
                           const X86Subtarget &Subtarget, SelectionDAG &DAG,
                           const SDLoc &dl);

// X86 has dedicated shuffle that can be lowered to VEXPAND
static SDValue lowerShuffleWithEXPAND(const SDLoc &DL, MVT VT, SDValue V1,
                                      SDValue V2, ArrayRef<int> Mask,
                                      const APInt &Zeroable,
                                      const X86Subtarget &Subtarget,
                                      SelectionDAG &DAG) {}

static bool matchShuffleWithUNPCK(MVT VT, SDValue &V1, SDValue &V2,
                                  unsigned &UnpackOpcode, bool IsUnary,
                                  ArrayRef<int> TargetMask, const SDLoc &DL,
                                  SelectionDAG &DAG,
                                  const X86Subtarget &Subtarget) {}

// X86 has dedicated unpack instructions that can handle specific blend
// operations: UNPCKH and UNPCKL.
static SDValue lowerShuffleWithUNPCK(const SDLoc &DL, MVT VT, SDValue V1,
                                     SDValue V2, ArrayRef<int> Mask,
                                     SelectionDAG &DAG) {}

/// Check if the mask can be mapped to a preliminary shuffle (vperm 64-bit)
/// followed by unpack 256-bit.
static SDValue lowerShuffleWithUNPCK256(const SDLoc &DL, MVT VT, SDValue V1,
                                        SDValue V2, ArrayRef<int> Mask,
                                        SelectionDAG &DAG) {}

// Check if the mask can be mapped to a TRUNCATE or VTRUNC, truncating the
// source into the lower elements and zeroing the upper elements.
static bool matchShuffleAsVTRUNC(MVT &SrcVT, MVT &DstVT, MVT VT,
                                 ArrayRef<int> Mask, const APInt &Zeroable,
                                 const X86Subtarget &Subtarget) {}

// Helper to create TRUNCATE/VTRUNC nodes, optionally with zero/undef upper
// element padding to the final DstVT.
static SDValue getAVX512TruncNode(const SDLoc &DL, MVT DstVT, SDValue Src,
                                  const X86Subtarget &Subtarget,
                                  SelectionDAG &DAG, bool ZeroUppers) {}

// Try to lower trunc+vector_shuffle to a vpmovdb or a vpmovdw instruction.
//
// An example is the following:
//
// t0: ch = EntryToken
//           t2: v4i64,ch = CopyFromReg t0, Register:v4i64 %0
//         t25: v4i32 = truncate t2
//       t41: v8i16 = bitcast t25
//       t21: v8i16 = BUILD_VECTOR undef:i16, undef:i16, undef:i16, undef:i16,
//       Constant:i16<0>, Constant:i16<0>, Constant:i16<0>, Constant:i16<0>
//     t51: v8i16 = vector_shuffle<0,2,4,6,12,13,14,15> t41, t21
//   t18: v2i64 = bitcast t51
//
// One can just use a single vpmovdw instruction, without avx512vl we need to
// use the zmm variant and extract the lower subvector, padding with zeroes.
// TODO: Merge with lowerShuffleAsVTRUNC.
static SDValue lowerShuffleWithVPMOV(const SDLoc &DL, MVT VT, SDValue V1,
                                     SDValue V2, ArrayRef<int> Mask,
                                     const APInt &Zeroable,
                                     const X86Subtarget &Subtarget,
                                     SelectionDAG &DAG) {}

// Attempt to match binary shuffle patterns as a truncate.
static SDValue lowerShuffleAsVTRUNC(const SDLoc &DL, MVT VT, SDValue V1,
                                    SDValue V2, ArrayRef<int> Mask,
                                    const APInt &Zeroable,
                                    const X86Subtarget &Subtarget,
                                    SelectionDAG &DAG) {}

/// Check whether a compaction lowering can be done by dropping even/odd
/// elements and compute how many times even/odd elements must be dropped.
///
/// This handles shuffles which take every Nth element where N is a power of
/// two. Example shuffle masks:
///
/// (even)
///  N = 1:  0,  2,  4,  6,  8, 10, 12, 14,  0,  2,  4,  6,  8, 10, 12, 14
///  N = 1:  0,  2,  4,  6,  8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30
///  N = 2:  0,  4,  8, 12,  0,  4,  8, 12,  0,  4,  8, 12,  0,  4,  8, 12
///  N = 2:  0,  4,  8, 12, 16, 20, 24, 28,  0,  4,  8, 12, 16, 20, 24, 28
///  N = 3:  0,  8,  0,  8,  0,  8,  0,  8,  0,  8,  0,  8,  0,  8,  0,  8
///  N = 3:  0,  8, 16, 24,  0,  8, 16, 24,  0,  8, 16, 24,  0,  8, 16, 24
///
/// (odd)
///  N = 1:  1,  3,  5,  7,  9, 11, 13, 15,  0,  2,  4,  6,  8, 10, 12, 14
///  N = 1:  1,  3,  5,  7,  9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31
///
/// Any of these lanes can of course be undef.
///
/// This routine only supports N <= 3.
/// FIXME: Evaluate whether either AVX or AVX-512 have any opportunities here
/// for larger N.
///
/// \returns N above, or the number of times even/odd elements must be dropped
/// if there is such a number. Otherwise returns zero.
static int canLowerByDroppingElements(ArrayRef<int> Mask, bool MatchEven,
                                      bool IsSingleInput) {}

// X86 has dedicated pack instructions that can handle specific truncation
// operations: PACKSS and PACKUS.
// Checks for compaction shuffle masks if MaxStages > 1.
// TODO: Add support for matching multiple PACKSS/PACKUS stages.
static bool matchShuffleWithPACK(MVT VT, MVT &SrcVT, SDValue &V1, SDValue &V2,
                                 unsigned &PackOpcode, ArrayRef<int> TargetMask,
                                 const SelectionDAG &DAG,
                                 const X86Subtarget &Subtarget,
                                 unsigned MaxStages = 1) {}

static SDValue lowerShuffleWithPACK(const SDLoc &DL, MVT VT, SDValue V1,
                                    SDValue V2, ArrayRef<int> Mask,
                                    const X86Subtarget &Subtarget,
                                    SelectionDAG &DAG) {}

/// Try to emit a bitmask instruction for a shuffle.
///
/// This handles cases where we can model a blend exactly as a bitmask due to
/// one of the inputs being zeroable.
static SDValue lowerShuffleAsBitMask(const SDLoc &DL, MVT VT, SDValue V1,
                                     SDValue V2, ArrayRef<int> Mask,
                                     const APInt &Zeroable,
                                     const X86Subtarget &Subtarget,
                                     SelectionDAG &DAG) {}

/// Try to emit a blend instruction for a shuffle using bit math.
///
/// This is used as a fallback approach when first class blend instructions are
/// unavailable. Currently it is only suitable for integer vectors, but could
/// be generalized for floating point vectors if desirable.
static SDValue lowerShuffleAsBitBlend(const SDLoc &DL, MVT VT, SDValue V1,
                                      SDValue V2, ArrayRef<int> Mask,
                                      SelectionDAG &DAG) {}

static SDValue getVectorMaskingNode(SDValue Op, SDValue Mask,
                                    SDValue PreservedSrc,
                                    const X86Subtarget &Subtarget,
                                    SelectionDAG &DAG);

static bool matchShuffleAsBlend(MVT VT, SDValue V1, SDValue V2,
                                MutableArrayRef<int> Mask,
                                const APInt &Zeroable, bool &ForceV1Zero,
                                bool &ForceV2Zero, uint64_t &BlendMask) {}

/// Try to emit a blend instruction for a shuffle.
///
/// This doesn't do any checks for the availability of instructions for blending
/// these values. It relies on the availability of the X86ISD::BLENDI pattern to
/// be matched in the backend with the type given. What it does check for is
/// that the shuffle mask is a blend, or convertible into a blend with zero.
static SDValue lowerShuffleAsBlend(const SDLoc &DL, MVT VT, SDValue V1,
                                   SDValue V2, ArrayRef<int> Original,
                                   const APInt &Zeroable,
                                   const X86Subtarget &Subtarget,
                                   SelectionDAG &DAG) {}

/// Try to lower as a blend of elements from two inputs followed by
/// a single-input permutation.
///
/// This matches the pattern where we can blend elements from two inputs and
/// then reduce the shuffle to a single-input permutation.
static SDValue lowerShuffleAsBlendAndPermute(const SDLoc &DL, MVT VT,
                                             SDValue V1, SDValue V2,
                                             ArrayRef<int> Mask,
                                             SelectionDAG &DAG,
                                             bool ImmBlends = false) {}

/// Try to lower as an unpack of elements from two inputs followed by
/// a single-input permutation.
///
/// This matches the pattern where we can unpack elements from two inputs and
/// then reduce the shuffle to a single-input (wider) permutation.
static SDValue lowerShuffleAsUNPCKAndPermute(const SDLoc &DL, MVT VT,
                                             SDValue V1, SDValue V2,
                                             ArrayRef<int> Mask,
                                             SelectionDAG &DAG) {}

/// Try to lower a shuffle as a permute of the inputs followed by an
/// UNPCK instruction.
///
/// This specifically targets cases where we end up with alternating between
/// the two inputs, and so can permute them into something that feeds a single
/// UNPCK instruction. Note that this routine only targets integer vectors
/// because for floating point vectors we have a generalized SHUFPS lowering
/// strategy that handles everything that doesn't *exactly* match an unpack,
/// making this clever lowering unnecessary.
static SDValue lowerShuffleAsPermuteAndUnpack(const SDLoc &DL, MVT VT,
                                              SDValue V1, SDValue V2,
                                              ArrayRef<int> Mask,
                                              const X86Subtarget &Subtarget,
                                              SelectionDAG &DAG) {}

/// Helper to form a PALIGNR-based rotate+permute, merging 2 inputs and then
/// permuting the elements of the result in place.
static SDValue lowerShuffleAsByteRotateAndPermute(
    const SDLoc &DL, MVT VT, SDValue V1, SDValue V2, ArrayRef<int> Mask,
    const X86Subtarget &Subtarget, SelectionDAG &DAG) {}

static bool isBroadcastShuffleMask(ArrayRef<int> Mask) {}

static bool isNoopOrBroadcastShuffleMask(ArrayRef<int> Mask) {}

/// Check if the Mask consists of the same element repeated multiple times.
static bool isSingleElementRepeatedMask(ArrayRef<int> Mask) {}

/// Generic routine to decompose a shuffle and blend into independent
/// blends and permutes.
///
/// This matches the extremely common pattern for handling combined
/// shuffle+blend operations on newer X86 ISAs where we have very fast blend
/// operations. It will try to pick the best arrangement of shuffles and
/// blends. For vXi8/vXi16 shuffles we may use unpack instead of blend.
static SDValue lowerShuffleAsDecomposedShuffleMerge(
    const SDLoc &DL, MVT VT, SDValue V1, SDValue V2, ArrayRef<int> Mask,
    const APInt &Zeroable, const X86Subtarget &Subtarget, SelectionDAG &DAG) {}

static int matchShuffleAsBitRotate(MVT &RotateVT, int EltSizeInBits,
                                   const X86Subtarget &Subtarget,
                                   ArrayRef<int> Mask) {}

/// Lower shuffle using X86ISD::VROTLI rotations.
static SDValue lowerShuffleAsBitRotate(const SDLoc &DL, MVT VT, SDValue V1,
                                       ArrayRef<int> Mask,
                                       const X86Subtarget &Subtarget,
                                       SelectionDAG &DAG) {}

/// Try to match a vector shuffle as an element rotation.
///
/// This is used for support PALIGNR for SSSE3 or VALIGND/Q for AVX512.
static int matchShuffleAsElementRotate(SDValue &V1, SDValue &V2,
                                       ArrayRef<int> Mask) {}

/// Try to lower a vector shuffle as a byte rotation.
///
/// SSSE3 has a generic PALIGNR instruction in x86 that will do an arbitrary
/// byte-rotation of the concatenation of two vectors; pre-SSSE3 can use
/// a PSRLDQ/PSLLDQ/POR pattern to get a similar effect. This routine will
/// try to generically lower a vector shuffle through such an pattern. It
/// does not check for the profitability of lowering either as PALIGNR or
/// PSRLDQ/PSLLDQ/POR, only whether the mask is valid to lower in that form.
/// This matches shuffle vectors that look like:
///
///   v8i16 [11, 12, 13, 14, 15, 0, 1, 2]
///
/// Essentially it concatenates V1 and V2, shifts right by some number of
/// elements, and takes the low elements as the result. Note that while this is
/// specified as a *right shift* because x86 is little-endian, it is a *left
/// rotate* of the vector lanes.
static int matchShuffleAsByteRotate(MVT VT, SDValue &V1, SDValue &V2,
                                    ArrayRef<int> Mask) {}

static SDValue lowerShuffleAsByteRotate(const SDLoc &DL, MVT VT, SDValue V1,
                                        SDValue V2, ArrayRef<int> Mask,
                                        const X86Subtarget &Subtarget,
                                        SelectionDAG &DAG) {}

/// Try to lower a vector shuffle as a dword/qword rotation.
///
/// AVX512 has a VALIGND/VALIGNQ instructions that will do an arbitrary
/// rotation of the concatenation of two vectors; This routine will
/// try to generically lower a vector shuffle through such an pattern.
///
/// Essentially it concatenates V1 and V2, shifts right by some number of
/// elements, and takes the low elements as the result. Note that while this is
/// specified as a *right shift* because x86 is little-endian, it is a *left
/// rotate* of the vector lanes.
static SDValue lowerShuffleAsVALIGN(const SDLoc &DL, MVT VT, SDValue V1,
                                    SDValue V2, ArrayRef<int> Mask,
                                    const APInt &Zeroable,
                                    const X86Subtarget &Subtarget,
                                    SelectionDAG &DAG) {}

/// Try to lower a vector shuffle as a byte shift sequence.
static SDValue lowerShuffleAsByteShiftMask(const SDLoc &DL, MVT VT, SDValue V1,
                                           SDValue V2, ArrayRef<int> Mask,
                                           const APInt &Zeroable,
                                           const X86Subtarget &Subtarget,
                                           SelectionDAG &DAG) {}

/// Try to lower a vector shuffle as a bit shift (shifts in zeros).
///
/// Attempts to match a shuffle mask against the PSLL(W/D/Q/DQ) and
/// PSRL(W/D/Q/DQ) SSE2 and AVX2 logical bit-shift instructions. The function
/// matches elements from one of the input vectors shuffled to the left or
/// right with zeroable elements 'shifted in'. It handles both the strictly
/// bit-wise element shifts and the byte shift across an entire 128-bit double
/// quad word lane.
///
/// PSHL : (little-endian) left bit shift.
/// [ zz, 0, zz,  2 ]
/// [ -1, 4, zz, -1 ]
/// PSRL : (little-endian) right bit shift.
/// [  1, zz,  3, zz]
/// [ -1, -1,  7, zz]
/// PSLLDQ : (little-endian) left byte shift
/// [ zz,  0,  1,  2,  3,  4,  5,  6]
/// [ zz, zz, -1, -1,  2,  3,  4, -1]
/// [ zz, zz, zz, zz, zz, zz, -1,  1]
/// PSRLDQ : (little-endian) right byte shift
/// [  5, 6,  7, zz, zz, zz, zz, zz]
/// [ -1, 5,  6,  7, zz, zz, zz, zz]
/// [  1, 2, -1, -1, -1, -1, zz, zz]
static int matchShuffleAsShift(MVT &ShiftVT, unsigned &Opcode,
                               unsigned ScalarSizeInBits, ArrayRef<int> Mask,
                               int MaskOffset, const APInt &Zeroable,
                               const X86Subtarget &Subtarget) {}

static SDValue lowerShuffleAsShift(const SDLoc &DL, MVT VT, SDValue V1,
                                   SDValue V2, ArrayRef<int> Mask,
                                   const APInt &Zeroable,
                                   const X86Subtarget &Subtarget,
                                   SelectionDAG &DAG, bool BitwiseOnly) {}

// EXTRQ: Extract Len elements from lower half of source, starting at Idx.
// Remainder of lower half result is zero and upper half is all undef.
static bool matchShuffleAsEXTRQ(MVT VT, SDValue &V1, SDValue &V2,
                                ArrayRef<int> Mask, uint64_t &BitLen,
                                uint64_t &BitIdx, const APInt &Zeroable) {}

// INSERTQ: Extract lowest Len elements from lower half of second source and
// insert over first source, starting at Idx.
// { A[0], .., A[Idx-1], B[0], .., B[Len-1], A[Idx+Len], .., UNDEF, ... }
static bool matchShuffleAsINSERTQ(MVT VT, SDValue &V1, SDValue &V2,
                                  ArrayRef<int> Mask, uint64_t &BitLen,
                                  uint64_t &BitIdx) {}

/// Try to lower a vector shuffle using SSE4a EXTRQ/INSERTQ.
static SDValue lowerShuffleWithSSE4A(const SDLoc &DL, MVT VT, SDValue V1,
                                     SDValue V2, ArrayRef<int> Mask,
                                     const APInt &Zeroable, SelectionDAG &DAG) {}

/// Lower a vector shuffle as a zero or any extension.
///
/// Given a specific number of elements, element bit width, and extension
/// stride, produce either a zero or any extension based on the available
/// features of the subtarget. The extended elements are consecutive and
/// begin and can start from an offsetted element index in the input; to
/// avoid excess shuffling the offset must either being in the bottom lane
/// or at the start of a higher lane. All extended elements must be from
/// the same lane.
static SDValue lowerShuffleAsSpecificZeroOrAnyExtend(
    const SDLoc &DL, MVT VT, int Scale, int Offset, bool AnyExt, SDValue InputV,
    ArrayRef<int> Mask, const X86Subtarget &Subtarget, SelectionDAG &DAG) {}

/// Try to lower a vector shuffle as a zero extension on any microarch.
///
/// This routine will try to do everything in its power to cleverly lower
/// a shuffle which happens to match the pattern of a zero extend. It doesn't
/// check for the profitability of this lowering,  it tries to aggressively
/// match this pattern. It will use all of the micro-architectural details it
/// can to emit an efficient lowering. It handles both blends with all-zero
/// inputs to explicitly zero-extend and undef-lanes (sometimes undef due to
/// masking out later).
///
/// The reason we have dedicated lowering for zext-style shuffles is that they
/// are both incredibly common and often quite performance sensitive.
static SDValue lowerShuffleAsZeroOrAnyExtend(
    const SDLoc &DL, MVT VT, SDValue V1, SDValue V2, ArrayRef<int> Mask,
    const APInt &Zeroable, const X86Subtarget &Subtarget,
    SelectionDAG &DAG) {}

/// Try to get a scalar value for a specific element of a vector.
///
/// Looks through BUILD_VECTOR and SCALAR_TO_VECTOR nodes to find a scalar.
static SDValue getScalarValueForVectorElement(SDValue V, int Idx,
                                              SelectionDAG &DAG) {}

/// Helper to test for a load that can be folded with x86 shuffles.
///
/// This is particularly important because the set of instructions varies
/// significantly based on whether the operand is a load or not.
static bool isShuffleFoldableLoad(SDValue V) {}

template<typename T>
static bool isSoftF16(T VT, const X86Subtarget &Subtarget) {}

/// Try to lower insertion of a single element into a zero vector.
///
/// This is a common pattern that we have especially efficient patterns to lower
/// across all subtarget feature sets.
static SDValue lowerShuffleAsElementInsertion(
    const SDLoc &DL, MVT VT, SDValue V1, SDValue V2, ArrayRef<int> Mask,
    const APInt &Zeroable, const X86Subtarget &Subtarget,
    SelectionDAG &DAG) {}

/// Try to lower broadcast of a single - truncated - integer element,
/// coming from a scalar_to_vector/build_vector node \p V0 with larger elements.
///
/// This assumes we have AVX2.
static SDValue lowerShuffleAsTruncBroadcast(const SDLoc &DL, MVT VT, SDValue V0,
                                            int BroadcastIdx,
                                            const X86Subtarget &Subtarget,
                                            SelectionDAG &DAG) {}

/// Test whether this can be lowered with a single SHUFPS instruction.
///
/// This is used to disable more specialized lowerings when the shufps lowering
/// will happen to be efficient.
static bool isSingleSHUFPSMask(ArrayRef<int> Mask) {}

/// Test whether the specified input (0 or 1) is in-place blended by the
/// given mask.
///
/// This returns true if the elements from a particular input are already in the
/// slot required by the given mask and require no permutation.
static bool isShuffleMaskInputInPlace(int Input, ArrayRef<int> Mask) {}

/// If we are extracting two 128-bit halves of a vector and shuffling the
/// result, match that to a 256-bit AVX2 vperm* instruction to avoid a
/// multi-shuffle lowering.
static SDValue lowerShuffleOfExtractsAsVperm(const SDLoc &DL, SDValue N0,
                                             SDValue N1, ArrayRef<int> Mask,
                                             SelectionDAG &DAG) {}

/// Try to lower broadcast of a single element.
///
/// For convenience, this code also bundles all of the subtarget feature set
/// filtering. While a little annoying to re-dispatch on type here, there isn't
/// a convenient way to factor it out.
static SDValue lowerShuffleAsBroadcast(const SDLoc &DL, MVT VT, SDValue V1,
                                       SDValue V2, ArrayRef<int> Mask,
                                       const X86Subtarget &Subtarget,
                                       SelectionDAG &DAG) {}

// Check for whether we can use INSERTPS to perform the shuffle. We only use
// INSERTPS when the V1 elements are already in the correct locations
// because otherwise we can just always use two SHUFPS instructions which
// are much smaller to encode than a SHUFPS and an INSERTPS. We can also
// perform INSERTPS if a single V1 element is out of place and all V2
// elements are zeroable.
static bool matchShuffleAsInsertPS(SDValue &V1, SDValue &V2,
                                   unsigned &InsertPSMask,
                                   const APInt &Zeroable,
                                   ArrayRef<int> Mask, SelectionDAG &DAG) {}

static SDValue lowerShuffleAsInsertPS(const SDLoc &DL, SDValue V1, SDValue V2,
                                      ArrayRef<int> Mask, const APInt &Zeroable,
                                      SelectionDAG &DAG) {}

/// Handle lowering of 2-lane 64-bit floating point shuffles.
///
/// This is the basis function for the 2-lane 64-bit shuffles as we have full
/// support for floating point shuffles but not integer shuffles. These
/// instructions will incur a domain crossing penalty on some chips though so
/// it is better to avoid lowering through this for integer vectors where
/// possible.
static SDValue lowerV2F64Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
                                 const APInt &Zeroable, SDValue V1, SDValue V2,
                                 const X86Subtarget &Subtarget,
                                 SelectionDAG &DAG) {}

/// Handle lowering of 2-lane 64-bit integer shuffles.
///
/// Tries to lower a 2-lane 64-bit shuffle using shuffle operations provided by
/// the integer unit to minimize domain crossing penalties. However, for blends
/// it falls back to the floating point shuffle operation with appropriate bit
/// casting.
static SDValue lowerV2I64Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
                                 const APInt &Zeroable, SDValue V1, SDValue V2,
                                 const X86Subtarget &Subtarget,
                                 SelectionDAG &DAG) {}

/// Lower a vector shuffle using the SHUFPS instruction.
///
/// This is a helper routine dedicated to lowering vector shuffles using SHUFPS.
/// It makes no assumptions about whether this is the *best* lowering, it simply
/// uses it.
static SDValue lowerShuffleWithSHUFPS(const SDLoc &DL, MVT VT,
                                      ArrayRef<int> Mask, SDValue V1,
                                      SDValue V2, SelectionDAG &DAG) {}

/// Lower 4-lane 32-bit floating point shuffles.
///
/// Uses instructions exclusively from the floating point unit to minimize
/// domain crossing penalties, as these are sufficient to implement all v4f32
/// shuffles.
static SDValue lowerV4F32Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
                                 const APInt &Zeroable, SDValue V1, SDValue V2,
                                 const X86Subtarget &Subtarget,
                                 SelectionDAG &DAG) {}

/// Lower 4-lane i32 vector shuffles.
///
/// We try to handle these with integer-domain shuffles where we can, but for
/// blends we use the floating point domain blend instructions.
static SDValue lowerV4I32Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
                                 const APInt &Zeroable, SDValue V1, SDValue V2,
                                 const X86Subtarget &Subtarget,
                                 SelectionDAG &DAG) {}

/// Lowering of single-input v8i16 shuffles is the cornerstone of SSE2
/// shuffle lowering, and the most complex part.
///
/// The lowering strategy is to try to form pairs of input lanes which are
/// targeted at the same half of the final vector, and then use a dword shuffle
/// to place them onto the right half, and finally unpack the paired lanes into
/// their final position.
///
/// The exact breakdown of how to form these dword pairs and align them on the
/// correct sides is really tricky. See the comments within the function for
/// more of the details.
///
/// This code also handles repeated 128-bit lanes of v8i16 shuffles, but each
/// lane must shuffle the *exact* same way. In fact, you must pass a v8 Mask to
/// this routine for it to work correctly. To shuffle a 256-bit or 512-bit i16
/// vector, form the analogous 128-bit 8-element Mask.
static SDValue lowerV8I16GeneralSingleInputShuffle(
    const SDLoc &DL, MVT VT, SDValue V, MutableArrayRef<int> Mask,
    const X86Subtarget &Subtarget, SelectionDAG &DAG) {}

/// Helper to form a PSHUFB-based shuffle+blend, opportunistically avoiding the
/// blend if only one input is used.
static SDValue lowerShuffleAsBlendOfPSHUFBs(
    const SDLoc &DL, MVT VT, SDValue V1, SDValue V2, ArrayRef<int> Mask,
    const APInt &Zeroable, SelectionDAG &DAG, bool &V1InUse, bool &V2InUse) {}

/// Generic lowering of 8-lane i16 shuffles.
///
/// This handles both single-input shuffles and combined shuffle/blends with
/// two inputs. The single input shuffles are immediately delegated to
/// a dedicated lowering routine.
///
/// The blends are lowered in one of three fundamental ways. If there are few
/// enough inputs, it delegates to a basic UNPCK-based strategy. If the shuffle
/// of the input is significantly cheaper when lowered as an interleaving of
/// the two inputs, try to interleave them. Otherwise, blend the low and high
/// halves of the inputs separately (making them have relatively few inputs)
/// and then concatenate them.
static SDValue lowerV8I16Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
                                 const APInt &Zeroable, SDValue V1, SDValue V2,
                                 const X86Subtarget &Subtarget,
                                 SelectionDAG &DAG) {}static SDValue lowerV8F16Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
                                 const APInt &Zeroable, SDValue V1, SDValue V2,
                                 const X86Subtarget &Subtarget,
                                 SelectionDAG &DAG) {}static SDValue lowerShuffleWithPERMV(const SDLoc &DL, MVT VT,
                                     ArrayRef<int> Mask, SDValue V1, SDValue V2,
                                     const X86Subtarget &Subtarget,
                                     SelectionDAG &DAG) {}static SDValue lowerV16I8Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
                                 const APInt &Zeroable, SDValue V1, SDValue V2,
                                 const X86Subtarget &Subtarget,
                                 SelectionDAG &DAG) {}static SDValue lower128BitShuffle(const SDLoc &DL, ArrayRef<int> Mask,
                                  MVT VT, SDValue V1, SDValue V2,
                                  const APInt &Zeroable,
                                  const X86Subtarget &Subtarget,
                                  SelectionDAG &DAG) {}static SDValue splitAndLowerShuffle(const SDLoc &DL, MVT VT, SDValue V1,
                                    SDValue V2, ArrayRef<int> Mask,
                                    SelectionDAG &DAG, bool SimpleOnly) {}static SDValue lowerShuffleAsSplitOrBlend(const SDLoc &DL, MVT VT, SDValue V1,
                                          SDValue V2, ArrayRef<int> Mask,
                                          const APInt &Zeroable,
                                          const X86Subtarget &Subtarget,
                                          SelectionDAG &DAG) {}static SDValue lowerShuffleAsLanePermuteAndSHUFP(const SDLoc &DL, MVT VT,
                                                 SDValue V1, SDValue V2,
                                                 ArrayRef<int> Mask,
                                                 SelectionDAG &DAG) {}static SDValue lowerShuffleAsLanePermuteAndPermute(
    const SDLoc &DL, MVT VT, SDValue V1, SDValue V2, ArrayRef<int> Mask,
    SelectionDAG &DAG, const X86Subtarget &Subtarget) {}static void computeInLaneShuffleMask(const ArrayRef<int> &Mask, int LaneSize,
                                     SmallVector<int> &InLaneMask) {}static SDValue lowerShuffleAsLanePermuteAndShuffle(
    const SDLoc &DL, MVT VT, SDValue V1, SDValue V2, ArrayRef<int> Mask,
    SelectionDAG &DAG, const X86Subtarget &Subtarget) {}static SDValue lowerV2X128Shuffle(const SDLoc &DL, MVT VT, SDValue V1,
                                  SDValue V2, ArrayRef<int> Mask,
                                  const APInt &Zeroable,
                                  const X86Subtarget &Subtarget,
                                  SelectionDAG &DAG) {}static SDValue lowerShuffleAsLanePermuteAndRepeatedMask(
    const SDLoc &DL, MVT VT, SDValue V1, SDValue V2, ArrayRef<int> Mask,
    const X86Subtarget &Subtarget, SelectionDAG &DAG) {}static bool
getHalfShuffleMask(ArrayRef<int> Mask, MutableArrayRef<int> HalfMask,
                   int &HalfIdx1, int &HalfIdx2) {}static SDValue getShuffleHalfVectors(const SDLoc &DL, SDValue V1, SDValue V2,
                                     ArrayRef<int> HalfMask, int HalfIdx1,
                                     int HalfIdx2, bool UndefLower,
                                     SelectionDAG &DAG, bool UseConcat = false) {}static SDValue lowerShuffleWithUndefHalf(const SDLoc &DL, MVT VT, SDValue V1,
                                         SDValue V2, ArrayRef<int> Mask,
                                         const X86Subtarget &Subtarget,
                                         SelectionDAG &DAG) {}static SDValue lowerShuffleAsRepeatedMaskAndLanePermute(
    const SDLoc &DL, MVT VT, SDValue V1, SDValue V2, ArrayRef<int> Mask,
    const X86Subtarget &Subtarget, SelectionDAG &DAG) {}static bool matchShuffleWithSHUFPD(MVT VT, SDValue &V1, SDValue &V2,
                                   bool &ForceV1Zero, bool &ForceV2Zero,
                                   unsigned &ShuffleImm, ArrayRef<int> Mask,
                                   const APInt &Zeroable) {}static SDValue lowerShuffleWithSHUFPD(const SDLoc &DL, MVT VT, SDValue V1,
                                      SDValue V2, ArrayRef<int> Mask,
                                      const APInt &Zeroable,
                                      const X86Subtarget &Subtarget,
                                      SelectionDAG &DAG) {}static SDValue lowerShuffleAsVTRUNCAndUnpack(const SDLoc &DL, MVT VT,
                                             SDValue V1, SDValue V2,
                                             ArrayRef<int> Mask,
                                             const APInt &Zeroable,
                                             SelectionDAG &DAG) {}static SDValue lowerShufflePairAsUNPCKAndPermute(const SDLoc &DL, MVT VT,
                                                 SDValue V1, SDValue V2,
                                                 ArrayRef<int> Mask,
                                                 SelectionDAG &DAG) {}static SDValue lowerV4F64Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
                                 const APInt &Zeroable, SDValue V1, SDValue V2,
                                 const X86Subtarget &Subtarget,
                                 SelectionDAG &DAG) {}static SDValue lowerV4I64Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
                                 const APInt &Zeroable, SDValue V1, SDValue V2,
                                 const X86Subtarget &Subtarget,
                                 SelectionDAG &DAG) {}static SDValue lowerV8F32Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
                                 const APInt &Zeroable, SDValue V1, SDValue V2,
                                 const X86Subtarget &Subtarget,
                                 SelectionDAG &DAG) {}static SDValue lowerV8I32Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
                                 const APInt &Zeroable, SDValue V1, SDValue V2,
                                 const X86Subtarget &Subtarget,
                                 SelectionDAG &DAG) {}static SDValue lowerV16I16Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
                                  const APInt &Zeroable, SDValue V1, SDValue V2,
                                  const X86Subtarget &Subtarget,
                                  SelectionDAG &DAG) {}static SDValue lowerV32I8Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
                                 const APInt &Zeroable, SDValue V1, SDValue V2,
                                 const X86Subtarget &Subtarget,
                                 SelectionDAG &DAG) {}static SDValue lower256BitShuffle(const SDLoc &DL, ArrayRef<int> Mask, MVT VT,
                                  SDValue V1, SDValue V2, const APInt &Zeroable,
                                  const X86Subtarget &Subtarget,
                                  SelectionDAG &DAG) {}static SDValue lowerV4X128Shuffle(const SDLoc &DL, MVT VT, ArrayRef<int> Mask,
                                  const APInt &Zeroable, SDValue V1, SDValue V2,
                                  const X86Subtarget &Subtarget,
                                  SelectionDAG &DAG) {}static SDValue lowerV8F64Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
                                 const APInt &Zeroable, SDValue V1, SDValue V2,
                                 const X86Subtarget &Subtarget,
                                 SelectionDAG &DAG) {}static SDValue lowerV16F32Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
                                  const APInt &Zeroable, SDValue V1, SDValue V2,
                                  const X86Subtarget &Subtarget,
                                  SelectionDAG &DAG) {}static SDValue lowerV8I64Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
                                 const APInt &Zeroable, SDValue V1, SDValue V2,
                                 const X86Subtarget &Subtarget,
                                 SelectionDAG &DAG) {}static SDValue lowerV16I32Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
                                  const APInt &Zeroable, SDValue V1, SDValue V2,
                                  const X86Subtarget &Subtarget,
                                  SelectionDAG &DAG) {}static SDValue lowerV32I16Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
                                  const APInt &Zeroable, SDValue V1, SDValue V2,
                                  const X86Subtarget &Subtarget,
                                  SelectionDAG &DAG) {}static SDValue lowerV64I8Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
                                 const APInt &Zeroable, SDValue V1, SDValue V2,
                                 const X86Subtarget &Subtarget,
                                 SelectionDAG &DAG) {}static SDValue lower512BitShuffle(const SDLoc &DL, ArrayRef<int> Mask,
                                  MVT VT, SDValue V1, SDValue V2,
                                  const APInt &Zeroable,
                                  const X86Subtarget &Subtarget,
                                  SelectionDAG &DAG) {}static SDValue lower1BitShuffleAsKSHIFTR(const SDLoc &DL, ArrayRef<int> Mask,
                                         MVT VT, SDValue V1, SDValue V2,
                                         const X86Subtarget &Subtarget,
                                         SelectionDAG &DAG) {}static int match1BitShuffleAsKSHIFT(unsigned &Opcode, ArrayRef<int> Mask,
                                    int MaskOffset, const APInt &Zeroable) {}static SDValue lower1BitShuffle(const SDLoc &DL, ArrayRef<int> Mask,
                                MVT VT, SDValue V1, SDValue V2,
                                const APInt &Zeroable,
                                const X86Subtarget &Subtarget,
                                SelectionDAG &DAG) {}static bool canonicalizeShuffleMaskWithCommute(ArrayRef<int> Mask) {}static bool canCombineAsMaskOperation(SDValue V,
                                      const X86Subtarget &Subtarget) {}static SDValue canonicalizeShuffleMaskWithHorizOp(
    MutableArrayRef<SDValue> Ops, MutableArrayRef<int> Mask,
    unsigned RootSizeInBits, const SDLoc &DL, SelectionDAG &DAG,
    const X86Subtarget &Subtarget)static SDValue lowerVECTOR_SHUFFLE(SDValue Op, const X86Subtarget &Subtarget,
                                   SelectionDAG &DAG) {}static SDValue lowerVECTOR_COMPRESS(SDValue Op, const X86Subtarget &Subtarget,
                                    SelectionDAG &DAG) {}static SDValue lowerVSELECTtoVectorShuffle(SDValue Op,
                                           const X86Subtarget &Subtarget,
                                           SelectionDAG &DAG) {}SDValue X86TargetLowering::LowerVSELECT(SDValue Op, SelectionDAG &DAG) const {}static SDValue LowerEXTRACT_VECTOR_ELT_SSE4(SDValue Op, SelectionDAG &DAG) {}static SDValue ExtractBitFromMaskVector(SDValue Op, SelectionDAG &DAG,
                                        const X86Subtarget &Subtarget) {}static APInt getExtractedDemandedElts(SDNode *N) {}SDValue
X86TargetLowering::LowerEXTRACT_VECTOR_ELT(SDValue Op,
                                           SelectionDAG &DAG) const {}static SDValue InsertBitToMaskVector(SDValue Op, SelectionDAG &DAG,
                                     const X86Subtarget &Subtarget) {}SDValue X86TargetLowering::LowerINSERT_VECTOR_ELT(SDValue Op,
                                                  SelectionDAG &DAG) const {}static SDValue LowerSCALAR_TO_VECTOR(SDValue Op, const X86Subtarget &Subtarget,
                                     SelectionDAG &DAG) {}static SDValue LowerINSERT_SUBVECTOR(SDValue Op, const X86Subtarget &Subtarget,
                                     SelectionDAG &DAG) {}static SDValue LowerEXTRACT_SUBVECTOR(SDValue Op, const X86Subtarget &Subtarget,
                                      SelectionDAG &DAG) {}unsigned X86TargetLowering::getGlobalWrapperKind(
    const GlobalValue *GV, const unsigned char OpFlags) const {}SDValue
X86TargetLowering::LowerConstantPool(SDValue Op, SelectionDAG &DAG) const {}SDValue X86TargetLowering::LowerJumpTable(SDValue Op, SelectionDAG &DAG) const {}SDValue X86TargetLowering::LowerExternalSymbol(SDValue Op,
                                               SelectionDAG &DAG) const {}SDValue
X86TargetLowering::LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const {}SDValue X86TargetLowering::LowerGlobalOrExternal(SDValue Op, SelectionDAG &DAG,
                                                 bool ForCall) const {}SDValue
X86TargetLowering::LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const {}static SDValue
GetTLSADDR(SelectionDAG &DAG, SDValue Chain, GlobalAddressSDNode *GA,
           SDValue *InGlue, const EVT PtrVT, unsigned ReturnReg,
           unsigned char OperandFlags, bool LocalDynamic = false) {}static SDValue
LowerToTLSGeneralDynamicModel32(GlobalAddressSDNode *GA, SelectionDAG &DAG,
                                const EVT PtrVT) {}static SDValue
LowerToTLSGeneralDynamicModel64(GlobalAddressSDNode *GA, SelectionDAG &DAG,
                                const EVT PtrVT) {}static SDValue
LowerToTLSGeneralDynamicModelX32(GlobalAddressSDNode *GA, SelectionDAG &DAG,
                                 const EVT PtrVT) {}static SDValue LowerToTLSLocalDynamicModel(GlobalAddressSDNode *GA,
                                           SelectionDAG &DAG, const EVT PtrVT,
                                           bool Is64Bit, bool Is64BitLP64) {}static SDValue LowerToTLSExecModel(GlobalAddressSDNode *GA, SelectionDAG &DAG,
                                   const EVT PtrVT, TLSModel::Model model,
                                   bool is64Bit, bool isPIC) {}SDValue
X86TargetLowering::LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const {}bool X86TargetLowering::addressingModeSupportsTLS(const GlobalValue &GV) const {}static SDValue LowerShiftParts(SDValue Op, SelectionDAG &DAG) {}static SDValue LowerI64IntToFP_AVX512DQ(SDValue Op, const SDLoc &dl,
                                        SelectionDAG &DAG,
                                        const X86Subtarget &Subtarget) {}static SDValue LowerI64IntToFP16(SDValue Op, const SDLoc &dl, SelectionDAG &DAG,
                                 const X86Subtarget &Subtarget) {}static bool useVectorCast(unsigned Opcode, MVT FromVT, MVT ToVT,
                          const X86Subtarget &Subtarget) {}static SDValue vectorizeExtractedCast(SDValue Cast, const SDLoc &DL,
                                      SelectionDAG &DAG,
                                      const X86Subtarget &Subtarget) {}static SDValue lowerFPToIntToFP(SDValue CastToFP, const SDLoc &DL,
                                SelectionDAG &DAG,
                                const X86Subtarget &Subtarget) {}static SDValue lowerINT_TO_FP_vXi64(SDValue Op, const SDLoc &DL,
                                    SelectionDAG &DAG,
                                    const X86Subtarget &Subtarget) {}static SDValue promoteXINT_TO_FP(SDValue Op, const SDLoc &dl,
                                 SelectionDAG &DAG) {}static bool isLegalConversion(MVT VT, bool IsSigned,
                              const X86Subtarget &Subtarget) {}SDValue X86TargetLowering::LowerSINT_TO_FP(SDValue Op,
                                           SelectionDAG &DAG) const {}std::pair<SDValue, SDValue> X86TargetLowering::BuildFILD(
    EVT DstVT, EVT SrcVT, const SDLoc &DL, SDValue Chain, SDValue Pointer,
    MachinePointerInfo PtrInfo, Align Alignment, SelectionDAG &DAG) const {}static bool shouldUseHorizontalOp(bool IsSingleSource, SelectionDAG &DAG,
                                  const X86Subtarget &Subtarget) {}static SDValue LowerUINT_TO_FP_i64(SDValue Op, const SDLoc &dl,
                                   SelectionDAG &DAG,
                                   const X86Subtarget &Subtarget) {}static SDValue LowerUINT_TO_FP_i32(SDValue Op, const SDLoc &dl,
                                   SelectionDAG &DAG,
                                   const X86Subtarget &Subtarget) {}static SDValue lowerUINT_TO_FP_v2i32(SDValue Op, const SDLoc &DL,
                                     SelectionDAG &DAG,
                                     const X86Subtarget &Subtarget) {}static SDValue lowerUINT_TO_FP_vXi32(SDValue Op, const SDLoc &DL,
                                     SelectionDAG &DAG,
                                     const X86Subtarget &Subtarget) {}static SDValue lowerUINT_TO_FP_vec(SDValue Op, const SDLoc &dl, SelectionDAG &DAG,
                                   const X86Subtarget &Subtarget) {}SDValue X86TargetLowering::LowerUINT_TO_FP(SDValue Op,
                                           SelectionDAG &DAG) const {}SDValue X86TargetLowering::FP_TO_INTHelper(SDValue Op, SelectionDAG &DAG,
                                           bool IsSigned,
                                           SDValue &Chain) const {}static SDValue LowerAVXExtend(SDValue Op, const SDLoc &dl, SelectionDAG &DAG,
                              const X86Subtarget &Subtarget) {}static SDValue SplitAndExtendv16i1(unsigned ExtOpc, MVT VT, SDValue In,
                                   const SDLoc &dl, SelectionDAG &DAG) {}static SDValue LowerZERO_EXTEND_Mask(SDValue Op, const SDLoc &DL,
                                     const X86Subtarget &Subtarget,
                                     SelectionDAG &DAG) {}static SDValue LowerZERO_EXTEND(SDValue Op, const X86Subtarget &Subtarget,
                                SelectionDAG &DAG) {}static SDValue truncateVectorWithPACK(unsigned Opcode, EVT DstVT, SDValue In,
                                      const SDLoc &DL, SelectionDAG &DAG,
                                      const X86Subtarget &Subtarget) {}static SDValue truncateVectorWithPACKUS(EVT DstVT, SDValue In, const SDLoc &DL,
                                        const X86Subtarget &Subtarget,
                                        SelectionDAG &DAG) {}static SDValue truncateVectorWithPACKSS(EVT DstVT, SDValue In, const SDLoc &DL,
                                        const X86Subtarget &Subtarget,
                                        SelectionDAG &DAG) {}static SDValue matchTruncateWithPACK(unsigned &PackOpcode, EVT DstVT,
                                     SDValue In, const SDLoc &DL,
                                     SelectionDAG &DAG,
                                     const X86Subtarget &Subtarget) {}static SDValue LowerTruncateVecPackWithSignBits(MVT DstVT, SDValue In,
                                                const SDLoc &DL,
                                                const X86Subtarget &Subtarget,
                                                SelectionDAG &DAG) {}static SDValue LowerTruncateVecPack(MVT DstVT, SDValue In, const SDLoc &DL,
                                    const X86Subtarget &Subtarget,
                                    SelectionDAG &DAG) {}static SDValue LowerTruncateVecI1(SDValue Op, const SDLoc &DL,
                                  SelectionDAG &DAG,
                                  const X86Subtarget &Subtarget) {}SDValue X86TargetLowering::LowerTRUNCATE(SDValue Op, SelectionDAG &DAG) const {}static SDValue expandFP_TO_UINT_SSE(MVT VT, SDValue Src, const SDLoc &dl,
                                    SelectionDAG &DAG,
                                    const X86Subtarget &Subtarget) {}SDValue X86TargetLowering::LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG) const {}SDValue X86TargetLowering::LowerLRINT_LLRINT(SDValue Op,
                                             SelectionDAG &DAG) const {}SDValue X86TargetLowering::LRINT_LLRINTHelper(SDNode *N,
                                              SelectionDAG &DAG) const {}SDValue
X86TargetLowering::LowerFP_TO_INT_SAT(SDValue Op, SelectionDAG &DAG) const {}SDValue X86TargetLowering::LowerFP_EXTEND(SDValue Op, SelectionDAG &DAG) const {}SDValue X86TargetLowering::LowerFP_ROUND(SDValue Op, SelectionDAG &DAG) const {}static SDValue LowerFP16_TO_FP(SDValue Op, SelectionDAG &DAG) {}static SDValue LowerFP_TO_FP16(SDValue Op, SelectionDAG &DAG) {}SDValue X86TargetLowering::LowerFP_TO_BF16(SDValue Op,
                                           SelectionDAG &DAG) const {}static SDValue lowerAddSubToHorizontalOp(SDValue Op, const SDLoc &DL,
                                         SelectionDAG &DAG,
                                         const X86Subtarget &Subtarget) {}SDValue X86TargetLowering::lowerFaddFsub(SDValue Op, SelectionDAG &DAG) const {}static SDValue LowerFROUND(SDValue Op, SelectionDAG &DAG) {}static SDValue LowerFABSorFNEG(SDValue Op, SelectionDAG &DAG) {}static SDValue LowerFCOPYSIGN(SDValue Op, SelectionDAG &DAG) {}static SDValue LowerFGETSIGN(SDValue Op, SelectionDAG &DAG) {}static SDValue getBT(SDValue Src, SDValue BitNo, const SDLoc &DL, SelectionDAG &DAG) {}static SDValue getSETCC(X86::CondCode Cond, SDValue EFLAGS, const SDLoc &dl,
                        SelectionDAG &DAG) {}static bool isOrXorXorTree(SDValue X, bool Root = true) {}template <typename F>
static SDValue emitOrXorXorTree(SDValue X, const SDLoc &DL, SelectionDAG &DAG,
                                EVT VecVT, EVT CmpVT, bool HasPT, F SToV) {}static SDValue combineVectorSizedSetCCEquality(EVT VT, SDValue X, SDValue Y,
                                               ISD::CondCode CC,
                                               const SDLoc &DL,
                                               SelectionDAG &DAG,
                                               const X86Subtarget &Subtarget) {}static bool matchScalarReduction(SDValue Op, ISD::NodeType BinOp,
                                 SmallVectorImpl<SDValue> &SrcOps,
                                 SmallVectorImpl<APInt> *SrcMask = nullptr) {}static SDValue LowerVectorAllEqual(const SDLoc &DL, SDValue LHS, SDValue RHS,
                                   ISD::CondCode CC, const APInt &OriginalMask,
                                   const X86Subtarget &Subtarget,
                                   SelectionDAG &DAG, X86::CondCode &X86CC) {}static SDValue MatchVectorAllEqualTest(SDValue LHS, SDValue RHS,
                                       ISD::CondCode CC, const SDLoc &DL,
                                       const X86Subtarget &Subtarget,
                                       SelectionDAG &DAG,
                                       X86::CondCode &X86CC) {}static bool hasNonFlagsUse(SDValue Op) {}static bool isProfitableToUseFlagOp(SDValue Op) {}static SDValue EmitTest(SDValue Op, unsigned X86CC, const SDLoc &dl,
                        SelectionDAG &DAG, const X86Subtarget &Subtarget) {}static SDValue EmitCmp(SDValue Op0, SDValue Op1, unsigned X86CC,
                       const SDLoc &dl, SelectionDAG &DAG,
                       const X86Subtarget &Subtarget) {}bool X86TargetLowering::isXAndYEqZeroPreferableToXAndYEqY(ISD::CondCode Cond,
                                                          EVT VT) const {}bool X86TargetLowering::optimizeFMulOrFDivAsShiftAddBitcast(
    SDNode *N, SDValue, SDValue IntPow2) const {}bool X86TargetLowering::isFsqrtCheap(SDValue Op, SelectionDAG &DAG) const {}SDValue X86TargetLowering::getSqrtEstimate(SDValue Op,
                                           SelectionDAG &DAG, int Enabled,
                                           int &RefinementSteps,
                                           bool &UseOneConstNR,
                                           bool Reciprocal) const {}SDValue X86TargetLowering::getRecipEstimate(SDValue Op, SelectionDAG &DAG,
                                            int Enabled,
                                            int &RefinementSteps) const {}unsigned X86TargetLowering::combineRepeatedFPDivisors() const {}SDValue
X86TargetLowering::BuildSDIVPow2(SDNode *N, const APInt &Divisor,
                                 SelectionDAG &DAG,
                                 SmallVectorImpl<SDNode *> &Created) const {}static SDValue LowerAndToBT(SDValue And, ISD::CondCode CC, const SDLoc &dl,
                            SelectionDAG &DAG, X86::CondCode &X86CC) {}static bool cheapX86FSETCC_SSE(ISD::CondCode SetCCOpcode) {}static unsigned translateX86FSETCC(ISD::CondCode SetCCOpcode, SDValue &Op0,
                                   SDValue &Op1, bool &IsAlwaysSignaling) {}static SDValue splitIntVSETCC(EVT VT, SDValue LHS, SDValue RHS,
                              ISD::CondCode Cond, SelectionDAG &DAG,
                              const SDLoc &dl) {}static SDValue LowerIntVSETCC_AVX512(SDValue Op, const SDLoc &dl,
                                     SelectionDAG &DAG) {}static SDValue incDecVectorConstant(SDValue V, SelectionDAG &DAG, bool IsInc,
                                    bool NSW) {}static SDValue LowerVSETCCWithSUBUS(SDValue Op0, SDValue Op1, MVT VT,
                                    ISD::CondCode Cond, const SDLoc &dl,
                                    const X86Subtarget &Subtarget,
                                    SelectionDAG &DAG) {}static SDValue LowerVSETCC(SDValue Op, const X86Subtarget &Subtarget,
                           SelectionDAG &DAG) {}static SDValue EmitAVX512Test(SDValue Op0, SDValue Op1, ISD::CondCode CC,
                              const SDLoc &dl, SelectionDAG &DAG,
                              const X86Subtarget &Subtarget,
                              SDValue &X86CC) {}SDValue X86TargetLowering::emitFlagsForSetcc(SDValue Op0, SDValue Op1,
                                             ISD::CondCode CC, const SDLoc &dl,
                                             SelectionDAG &DAG,
                                             SDValue &X86CC) const {}SDValue X86TargetLowering::LowerSETCC(SDValue Op, SelectionDAG &DAG) const {}SDValue X86TargetLowering::LowerSETCCCARRY(SDValue Op, SelectionDAG &DAG) const {}static std::pair<SDValue, SDValue>
getX86XALUOOp(X86::CondCode &Cond, SDValue Op, SelectionDAG &DAG) {}static SDValue LowerXALUO(SDValue Op, SelectionDAG &DAG) {}static bool isX86LogicalCmp(SDValue Op) {}static bool isTruncWithZeroHighBitsInput(SDValue V, SelectionDAG &DAG) {}static SDValue LowerSELECTWithCmpZero(SDValue CmpVal, SDValue LHS, SDValue RHS,
                                      unsigned X86CC, const SDLoc &DL,
                                      SelectionDAG &DAG,
                                      const X86Subtarget &Subtarget) {}SDValue X86TargetLowering::LowerSELECT(SDValue Op, SelectionDAG &DAG) const {}static SDValue LowerSIGN_EXTEND_Mask(SDValue Op, const SDLoc &dl,
                                     const X86Subtarget &Subtarget,
                                     SelectionDAG &DAG) {}static SDValue LowerANY_EXTEND(SDValue Op, const X86Subtarget &Subtarget,
                               SelectionDAG &DAG) {}static SDValue LowerEXTEND_VECTOR_INREG(SDValue Op,
                                        const X86Subtarget &Subtarget,
                                        SelectionDAG &DAG) {}static SDValue LowerSIGN_EXTEND(SDValue Op, const X86Subtarget &Subtarget,
                                SelectionDAG &DAG) {}static SDValue splitVectorStore(StoreSDNode *Store, SelectionDAG &DAG) {}static SDValue scalarizeVectorStore(StoreSDNode *Store, MVT StoreVT,
                                    SelectionDAG &DAG) {}static SDValue LowerStore(SDValue Op, const X86Subtarget &Subtarget,
                          SelectionDAG &DAG) {}static SDValue LowerLoad(SDValue Op, const X86Subtarget &Subtarget,
                                 SelectionDAG &DAG) {}static bool isAndOrOfSetCCs(SDValue Op, unsigned &Opc) {}SDValue X86TargetLowering::LowerBRCOND(SDValue Op, SelectionDAG &DAG) const {}SDValue
X86TargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op,
                                           SelectionDAG &DAG) const {}SDValue X86TargetLowering::LowerVASTART(SDValue Op, SelectionDAG &DAG) const {}SDValue X86TargetLowering::LowerVAARG(SDValue Op, SelectionDAG &DAG) const {}static SDValue LowerVACOPY(SDValue Op, const X86Subtarget &Subtarget,
                           SelectionDAG &DAG) {}static unsigned getTargetVShiftUniformOpcode(unsigned Opc, bool IsVariable) {}static SDValue getTargetVShiftByConstNode(unsigned Opc, const SDLoc &dl, MVT VT,
                                          SDValue SrcOp, uint64_t ShiftAmt,
                                          SelectionDAG &DAG) {}static SDValue getTargetVShiftNode(unsigned Opc, const SDLoc &dl, MVT VT,
                                   SDValue SrcOp, SDValue ShAmt, int ShAmtIdx,
                                   const X86Subtarget &Subtarget,
                                   SelectionDAG &DAG) {}static SDValue getMaskNode(SDValue Mask, MVT MaskVT,
                           const X86Subtarget &Subtarget, SelectionDAG &DAG,
                           const SDLoc &dl) {}static SDValue getVectorMaskingNode(SDValue Op, SDValue Mask,
                                    SDValue PreservedSrc,
                                    const X86Subtarget &Subtarget,
                                    SelectionDAG &DAG) {}static SDValue getScalarMaskingNode(SDValue Op, SDValue Mask,
                                    SDValue PreservedSrc,
                                    const X86Subtarget &Subtarget,
                                    SelectionDAG &DAG) {}static int getSEHRegistrationNodeSize(const Function *Fn) {}static SDValue recoverFramePointer(SelectionDAG &DAG, const Function *Fn,
                                   SDValue EntryEBP) {}SDValue X86TargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
                                                   SelectionDAG &DAG) const {}static SDValue getAVX2GatherNode(unsigned Opc, SDValue Op, SelectionDAG &DAG,
                                 SDValue Src, SDValue Mask, SDValue Base,
                                 SDValue Index, SDValue ScaleOp, SDValue Chain,
                                 const X86Subtarget &Subtarget) {}static SDValue getGatherNode(SDValue Op, SelectionDAG &DAG,
                             SDValue Src, SDValue Mask, SDValue Base,
                             SDValue Index, SDValue ScaleOp, SDValue Chain,
                             const X86Subtarget &Subtarget) {}static SDValue getScatterNode(unsigned Opc, SDValue Op, SelectionDAG &DAG,
                               SDValue Src, SDValue Mask, SDValue Base,
                               SDValue Index, SDValue ScaleOp, SDValue Chain,
                               const X86Subtarget &Subtarget) {}static SDValue getPrefetchNode(unsigned Opc, SDValue Op, SelectionDAG &DAG,
                               SDValue Mask, SDValue Base, SDValue Index,
                               SDValue ScaleOp, SDValue Chain,
                               const X86Subtarget &Subtarget) {}static SDValue expandIntrinsicWChainHelper(SDNode *N, const SDLoc &DL,
                                        SelectionDAG &DAG,
                                        unsigned TargetOpcode,
                                        unsigned SrcReg,
                                        const X86Subtarget &Subtarget,
                                        SmallVectorImpl<SDValue> &Results) {}static void getReadTimeStampCounter(SDNode *N, const SDLoc &DL, unsigned Opcode,
                                    SelectionDAG &DAG,
                                    const X86Subtarget &Subtarget,
                                    SmallVectorImpl<SDValue> &Results) {}static SDValue LowerREADCYCLECOUNTER(SDValue Op, const X86Subtarget &Subtarget,
                                     SelectionDAG &DAG) {}static SDValue MarkEHRegistrationNode(SDValue Op, SelectionDAG &DAG) {}static SDValue MarkEHGuard(SDValue Op, SelectionDAG &DAG) {}static SDValue
EmitTruncSStore(bool SignedSat, SDValue Chain, const SDLoc &DL, SDValue Val,
                SDValue Ptr, EVT MemVT, MachineMemOperand *MMO,
                SelectionDAG &DAG) {}static SDValue EmitMaskedTruncSStore(bool SignedSat, SDValue Chain,
                                     const SDLoc &DL,
                      SDValue Val, SDValue Ptr, SDValue Mask, EVT MemVT,
                      MachineMemOperand *MMO, SelectionDAG &DAG) {}bool X86::isExtendedSwiftAsyncFrameSupported(const X86Subtarget &Subtarget,
                                             const MachineFunction &MF) {}static SDValue LowerINTRINSIC_W_CHAIN(SDValue Op, const X86Subtarget &Subtarget,
                                      SelectionDAG &DAG) {}SDValue X86TargetLowering::LowerRETURNADDR(SDValue Op,
                                           SelectionDAG &DAG) const {}SDValue X86TargetLowering::LowerADDROFRETURNADDR(SDValue Op,
                                                 SelectionDAG &DAG) const {}SDValue X86TargetLowering::LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const {}Register X86TargetLowering::getRegisterByName(const char* RegName, LLT VT,
                                              const MachineFunction &MF) const {}SDValue X86TargetLowering::LowerFRAME_TO_ARGS_OFFSET(SDValue Op,
                                                     SelectionDAG &DAG) const {}Register X86TargetLowering::getExceptionPointerRegister(
    const Constant *PersonalityFn) const {}Register X86TargetLowering::getExceptionSelectorRegister(
    const Constant *PersonalityFn) const {}bool X86TargetLowering::needsFixedCatchObjects() const {}SDValue X86TargetLowering::LowerEH_RETURN(SDValue Op, SelectionDAG &DAG) const {}SDValue X86TargetLowering::lowerEH_SJLJ_SETJMP(SDValue Op,
                                               SelectionDAG &DAG) const {}SDValue X86TargetLowering::lowerEH_SJLJ_LONGJMP(SDValue Op,
                                                SelectionDAG &DAG) const {}SDValue X86TargetLowering::lowerEH_SJLJ_SETUP_DISPATCH(SDValue Op,
                                                       SelectionDAG &DAG) const {}static SDValue LowerADJUST_TRAMPOLINE(SDValue Op, SelectionDAG &DAG) {}SDValue X86TargetLowering::LowerINIT_TRAMPOLINE(SDValue Op,
                                                SelectionDAG &DAG) const {}SDValue X86TargetLowering::LowerGET_ROUNDING(SDValue Op,
                                             SelectionDAG &DAG) const {}SDValue X86TargetLowering::LowerSET_ROUNDING(SDValue Op,
                                             SelectionDAG &DAG) const {}const unsigned X87StateSize =const unsigned FPStateSize =const unsigned FPStateSizeInBits =SDValue X86TargetLowering::LowerGET_FPENV_MEM(SDValue Op,
                                              SelectionDAG &DAG) const {}static SDValue createSetFPEnvNodes(SDValue Ptr, SDValue Chain, const SDLoc &DL,
                                   EVT MemVT, MachineMemOperand *MMO,
                                   SelectionDAG &DAG,
                                   const X86Subtarget &Subtarget) {}SDValue X86TargetLowering::LowerSET_FPENV_MEM(SDValue Op,
                                              SelectionDAG &DAG) const {}SDValue X86TargetLowering::LowerRESET_FPENV(SDValue Op,
                                            SelectionDAG &DAG) const {}static SDValue LowerVectorCTLZ_AVX512CDI(SDValue Op, SelectionDAG &DAG,
                                         const X86Subtarget &Subtarget) {}static SDValue LowerVectorCTLZInRegLUT(SDValue Op, const SDLoc &DL,
                                       const X86Subtarget &Subtarget,
                                       SelectionDAG &DAG) {}static SDValue LowerVectorCTLZ(SDValue Op, const SDLoc &DL,
                               const X86Subtarget &Subtarget,
                               SelectionDAG &DAG) {}static SDValue LowerCTLZ(SDValue Op, const X86Subtarget &Subtarget,
                         SelectionDAG &DAG) {}static SDValue LowerCTTZ(SDValue Op, const X86Subtarget &Subtarget,
                         SelectionDAG &DAG) {}static SDValue lowerAddSub(SDValue Op, SelectionDAG &DAG,
                           const X86Subtarget &Subtarget) {}static SDValue LowerADDSAT_SUBSAT(SDValue Op, SelectionDAG &DAG,
                                  const X86Subtarget &Subtarget) {}static SDValue LowerABS(SDValue Op, const X86Subtarget &Subtarget,
                        SelectionDAG &DAG) {}static SDValue LowerAVG(SDValue Op, const X86Subtarget &Subtarget,
                        SelectionDAG &DAG) {}static SDValue LowerMINMAX(SDValue Op, const X86Subtarget &Subtarget,
                           SelectionDAG &DAG) {}static SDValue LowerFMINIMUM_FMAXIMUM(SDValue Op, const X86Subtarget &Subtarget,
                                      SelectionDAG &DAG) {}static SDValue LowerABD(SDValue Op, const X86Subtarget &Subtarget,
                        SelectionDAG &DAG) {}static SDValue LowerMUL(SDValue Op, const X86Subtarget &Subtarget,
                        SelectionDAG &DAG) {}static SDValue LowervXi8MulWithUNPCK(SDValue A, SDValue B, const SDLoc &dl,
                                     MVT VT, bool IsSigned,
                                     const X86Subtarget &Subtarget,
                                     SelectionDAG &DAG,
                                     SDValue *Low = nullptr) {}static SDValue LowerMULH(SDValue Op, const X86Subtarget &Subtarget,
                         SelectionDAG &DAG) {}static SDValue LowerMULO(SDValue Op, const X86Subtarget &Subtarget,
                         SelectionDAG &DAG) {}SDValue X86TargetLowering::LowerWin64_i128OP(SDValue Op, SelectionDAG &DAG) const {}SDValue X86TargetLowering::LowerWin64_FP_TO_INT128(SDValue Op,
                                                   SelectionDAG &DAG,
                                                   SDValue &Chain) const {}SDValue X86TargetLowering::LowerWin64_INT128_TO_FP(SDValue Op,
                                                   SelectionDAG &DAG) const {}uint64_t getGFNICtrlImm(unsigned Opcode, unsigned Amt = 0) {}SDValue getGFNICtrlMask(unsigned Opcode, SelectionDAG &DAG, const SDLoc &DL, MVT VT,
                        unsigned Amt = 0) {}static bool supportedVectorShiftWithImm(EVT VT, const X86Subtarget &Subtarget,
                                        unsigned Opcode) {}static
bool supportedVectorShiftWithBaseAmnt(EVT VT, const X86Subtarget &Subtarget,
                                      unsigned Opcode) {}static bool supportedVectorVarShift(EVT VT, const X86Subtarget &Subtarget,
                                    unsigned Opcode) {}static SDValue LowerShiftByScalarImmediate(SDValue Op, SelectionDAG &DAG,
                                           const X86Subtarget &Subtarget) {}static SDValue LowerShiftByScalarVariable(SDValue Op, SelectionDAG &DAG,
                                          const X86Subtarget &Subtarget) {}static SDValue convertShiftLeftToScale(SDValue Amt, const SDLoc &dl,
                                       const X86Subtarget &Subtarget,
                                       SelectionDAG &DAG) {}static SDValue LowerShift(SDValue Op, const X86Subtarget &Subtarget,
                          SelectionDAG &DAG) {}static SDValue LowerFunnelShift(SDValue Op, const X86Subtarget &Subtarget,
                                SelectionDAG &DAG) {}static SDValue LowerRotate(SDValue Op, const X86Subtarget &Subtarget,
                           SelectionDAG &DAG) {}bool X86TargetLowering::needsCmpXchgNb(Type *MemType) const {}TargetLoweringBase::AtomicExpansionKind
X86TargetLowering::shouldExpandAtomicStoreInIR(StoreInst *SI) const {}TargetLowering::AtomicExpansionKind
X86TargetLowering::shouldExpandAtomicLoadInIR(LoadInst *LI) const {}enum BitTestKind : unsigned {}static std::pair<Value *, BitTestKind> FindSingleBitChange(Value *V) {}TargetLowering::AtomicExpansionKind
X86TargetLowering::shouldExpandLogicAtomicRMWInIR(AtomicRMWInst *AI) const {}void X86TargetLowering::emitBitTestAtomicRMWIntrinsic(AtomicRMWInst *AI) const {}static bool shouldExpandCmpArithRMWInIR(AtomicRMWInst *AI) {}void X86TargetLowering::emitCmpArithAtomicRMWIntrinsic(
    AtomicRMWInst *AI) const {}TargetLowering::AtomicExpansionKind
X86TargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const {}LoadInst *
X86TargetLowering::lowerIdempotentRMWIntoFencedLoad(AtomicRMWInst *AI) const {}static SDValue emitLockedStackOp(SelectionDAG &DAG,
                                 const X86Subtarget &Subtarget, SDValue Chain,
                                 const SDLoc &DL) {}static SDValue LowerATOMIC_FENCE(SDValue Op, const X86Subtarget &Subtarget,
                                 SelectionDAG &DAG) {}static SDValue LowerCMP_SWAP(SDValue Op, const X86Subtarget &Subtarget,
                             SelectionDAG &DAG) {}static SDValue getPMOVMSKB(const SDLoc &DL, SDValue V, SelectionDAG &DAG,
                           const X86Subtarget &Subtarget) {}static SDValue LowerBITCAST(SDValue Op, const X86Subtarget &Subtarget,
                            SelectionDAG &DAG) {}static SDValue LowerHorizontalByteSum(SDValue V, MVT VT,
                                      const X86Subtarget &Subtarget,
                                      SelectionDAG &DAG) {}static SDValue LowerVectorCTPOPInRegLUT(SDValue Op, const SDLoc &DL,
                                        const X86Subtarget &Subtarget,
                                        SelectionDAG &DAG) {}static SDValue LowerVectorCTPOP(SDValue Op, const SDLoc &DL,
                                const X86Subtarget &Subtarget,
                                SelectionDAG &DAG) {}static SDValue LowerCTPOP(SDValue N, const X86Subtarget &Subtarget,
                          SelectionDAG &DAG) {}static SDValue LowerBITREVERSE_XOP(SDValue Op, SelectionDAG &DAG) {}static SDValue LowerBITREVERSE(SDValue Op, const X86Subtarget &Subtarget,
                               SelectionDAG &DAG) {}static SDValue LowerPARITY(SDValue Op, const X86Subtarget &Subtarget,
                           SelectionDAG &DAG) {}static SDValue lowerAtomicArithWithLOCK(SDValue N, SelectionDAG &DAG,
                                        const X86Subtarget &Subtarget) {}static SDValue lowerAtomicArith(SDValue N, SelectionDAG &DAG,
                                const X86Subtarget &Subtarget) {}static SDValue LowerATOMIC_STORE(SDValue Op, SelectionDAG &DAG,
                                 const X86Subtarget &Subtarget) {}static SDValue LowerADDSUBO_CARRY(SDValue Op, SelectionDAG &DAG) {}static SDValue LowerFSINCOS(SDValue Op, const X86Subtarget &Subtarget,
                            SelectionDAG &DAG) {}static SDValue ExtendToType(SDValue InOp, MVT NVT, SelectionDAG &DAG,
                            bool FillWithZeroes = false) {}static SDValue LowerMSCATTER(SDValue Op, const X86Subtarget &Subtarget,
                             SelectionDAG &DAG) {}static SDValue LowerMLOAD(SDValue Op, const X86Subtarget &Subtarget,
                          SelectionDAG &DAG) {}static SDValue LowerMSTORE(SDValue Op, const X86Subtarget &Subtarget,
                           SelectionDAG &DAG) {}static SDValue LowerMGATHER(SDValue Op, const X86Subtarget &Subtarget,
                            SelectionDAG &DAG) {}static SDValue LowerADDRSPACECAST(SDValue Op, SelectionDAG &DAG) {}SDValue X86TargetLowering::LowerGC_TRANSITION(SDValue Op,
                                              SelectionDAG &DAG) const {}static SDValue LowerCVTPS2PH(SDValue Op, SelectionDAG &DAG) {}static SDValue LowerPREFETCH(SDValue Op, const X86Subtarget &Subtarget,
                             SelectionDAG &DAG) {}static SDValue LowerFCanonicalize(SDValue Op, SelectionDAG &DAG) {}static StringRef getInstrStrFromOpNo(const SmallVectorImpl<StringRef> &AsmStrs,
                                     unsigned OpNo) {}bool X86TargetLowering::isInlineAsmTargetBranch(
    const SmallVectorImpl<StringRef> &AsmStrs, unsigned OpNo) const {}static SDValue getFlagsOfCmpZeroFori1(SelectionDAG &DAG, const SDLoc &DL,
                                      SDValue Mask) {}SDValue X86TargetLowering::visitMaskedLoad(
    SelectionDAG &DAG, const SDLoc &DL, SDValue Chain, MachineMemOperand *MMO,
    SDValue &NewLoad, SDValue Ptr, SDValue PassThru, SDValue Mask) const {}SDValue X86TargetLowering::visitMaskedStore(SelectionDAG &DAG, const SDLoc &DL,
                                            SDValue Chain,
                                            MachineMemOperand *MMO, SDValue Ptr,
                                            SDValue Val, SDValue Mask) const {}SDValue X86TargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {}void X86TargetLowering::ReplaceNodeResults(SDNode *N,
                                           SmallVectorImpl<SDValue>&Results,
                                           SelectionDAG &DAG) const {}const char *X86TargetLowering::getTargetNodeName(unsigned Opcode) const {}bool X86TargetLowering::isLegalAddressingMode(const DataLayout &DL,
                                              const AddrMode &AM, Type *Ty,
                                              unsigned AS,
                                              Instruction *I) const {}bool X86TargetLowering::isVectorShiftByScalarCheap(Type *Ty) const {}bool X86TargetLowering::isBinOp(unsigned Opcode) const {}bool X86TargetLowering::isCommutativeBinOp(unsigned Opcode) const {}bool X86TargetLowering::isTruncateFree(Type *Ty1, Type *Ty2) const {}bool X86TargetLowering::allowTruncateForTailCall(Type *Ty1, Type *Ty2) const {}bool X86TargetLowering::isLegalICmpImmediate(int64_t Imm) const {}bool X86TargetLowering::isLegalAddImmediate(int64_t Imm) const {}bool X86TargetLowering::isLegalStoreImmediate(int64_t Imm) const {}bool X86TargetLowering::isTruncateFree(EVT VT1, EVT VT2) const {}bool X86TargetLowering::isZExtFree(Type *Ty1, Type *Ty2) const {}bool X86TargetLowering::isZExtFree(EVT VT1, EVT VT2) const {}bool X86TargetLowering::isZExtFree(SDValue Val, EVT VT2) const {}bool X86TargetLowering::shouldSinkOperands(Instruction *I,
                                           SmallVectorImpl<Use *> &Ops) const {}bool X86TargetLowering::shouldConvertPhiType(Type *From, Type *To) const {}bool X86TargetLowering::isVectorLoadExtDesirable(SDValue ExtVal) const {}bool X86TargetLowering::isFMAFasterThanFMulAndFAdd(const MachineFunction &MF,
                                                   EVT VT) const {}bool X86TargetLowering::isNarrowingProfitable(SDNode *N, EVT SrcVT,
                                              EVT DestVT) const {}bool X86TargetLowering::shouldFoldSelectWithIdentityConstant(unsigned Opcode,
                                                             EVT VT) const {}bool X86TargetLowering::isShuffleMaskLegal(ArrayRef<int> Mask, EVT VT) const {}bool X86TargetLowering::isVectorClearMaskLegal(ArrayRef<int> Mask,
                                               EVT VT) const {}bool X86TargetLowering::areJTsAllowed(const Function *Fn) const {}MVT X86TargetLowering::getPreferredSwitchConditionType(LLVMContext &Context,
                                                       EVT ConditionVT) const {}static bool isEFLAGSLiveAfter(MachineBasicBlock::iterator Itr,
                              MachineBasicBlock *BB) {}static MachineBasicBlock *emitXBegin(MachineInstr &MI, MachineBasicBlock *MBB,
                                     const TargetInstrInfo *TII) {}MachineBasicBlock *
X86TargetLowering::EmitVAARGWithCustomInserter(MachineInstr &MI,
                                               MachineBasicBlock *MBB) const {}static bool checkAndUpdateEFLAGSKill(MachineBasicBlock::iterator SelectItr,
                                     MachineBasicBlock* BB,
                                     const TargetRegisterInfo* TRI) {}static bool isCMOVPseudo(MachineInstr &MI) {}static MachineInstrBuilder createPHIsForCMOVsInSinkBB(
    MachineBasicBlock::iterator MIItBegin, MachineBasicBlock::iterator MIItEnd,
    MachineBasicBlock *TrueMBB, MachineBasicBlock *FalseMBB,
    MachineBasicBlock *SinkMBB) {}MachineBasicBlock *
X86TargetLowering::EmitLoweredCascadedSelect(MachineInstr &FirstCMOV,
                                             MachineInstr &SecondCascadedCMOV,
                                             MachineBasicBlock *ThisMBB) const {}MachineBasicBlock *
X86TargetLowering::EmitLoweredSelect(MachineInstr &MI,
                                     MachineBasicBlock *ThisMBB) const {}static unsigned getSUBriOpcode(bool IsLP64) {}MachineBasicBlock *
X86TargetLowering::EmitLoweredProbedAlloca(MachineInstr &MI,
                                           MachineBasicBlock *MBB) const {}MachineBasicBlock *
X86TargetLowering::EmitLoweredSegAlloca(MachineInstr &MI,
                                        MachineBasicBlock *BB) const {}MachineBasicBlock *
X86TargetLowering::EmitLoweredCatchRet(MachineInstr &MI,
                                       MachineBasicBlock *BB) const {}MachineBasicBlock *
X86TargetLowering::EmitLoweredTLSAddr(MachineInstr &MI,
                                      MachineBasicBlock *BB) const {}MachineBasicBlock *
X86TargetLowering::EmitLoweredTLSCall(MachineInstr &MI,
                                      MachineBasicBlock *BB) const {}static unsigned getOpcodeForIndirectThunk(unsigned RPOpc) {}static const char *getIndirectThunkSymbol(const X86Subtarget &Subtarget,
                                          unsigned Reg) {}MachineBasicBlock *
X86TargetLowering::EmitLoweredIndirectThunk(MachineInstr &MI,
                                            MachineBasicBlock *BB) const {}void X86TargetLowering::emitSetJmpShadowStackFix(MachineInstr &MI,
                                                 MachineBasicBlock *MBB) const {}MachineBasicBlock *
X86TargetLowering::emitEHSjLjSetJmp(MachineInstr &MI,
                                    MachineBasicBlock *MBB) const {}MachineBasicBlock *
X86TargetLowering::emitLongJmpShadowStackFix(MachineInstr &MI,
                                             MachineBasicBlock *MBB) const {}MachineBasicBlock *
X86TargetLowering::emitEHSjLjLongJmp(MachineInstr &MI,
                                     MachineBasicBlock *MBB) const {}void X86TargetLowering::SetupEntryBlockForSjLj(MachineInstr &MI,
                                               MachineBasicBlock *MBB,
                                               MachineBasicBlock *DispatchBB,
                                               int FI) const {}MachineBasicBlock *
X86TargetLowering::EmitSjLjDispatchBlock(MachineInstr &MI,
                                         MachineBasicBlock *BB) const {}MachineBasicBlock *
X86TargetLowering::emitPatchableEventCall(MachineInstr &MI,
                                          MachineBasicBlock *BB) const {}MachineBasicBlock *
X86TargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI,
                                               MachineBasicBlock *BB) const {}bool
X86TargetLowering::targetShrinkDemandedConstant(SDValue Op,
                                                const APInt &DemandedBits,
                                                const APInt &DemandedElts,
                                                TargetLoweringOpt &TLO) const {}static void computeKnownBitsForPSADBW(SDValue LHS, SDValue RHS,
                                      KnownBits &Known,
                                      const APInt &DemandedElts,
                                      const SelectionDAG &DAG, unsigned Depth) {}static void computeKnownBitsForPMADDWD(SDValue LHS, SDValue RHS,
                                       KnownBits &Known,
                                       const APInt &DemandedElts,
                                       const SelectionDAG &DAG,
                                       unsigned Depth) {}static void computeKnownBitsForPMADDUBSW(SDValue LHS, SDValue RHS,
                                         KnownBits &Known,
                                         const APInt &DemandedElts,
                                         const SelectionDAG &DAG,
                                         unsigned Depth) {}static KnownBits computeKnownBitsForHorizontalOperation(
    const SDValue Op, const APInt &DemandedElts, unsigned Depth,
    const SelectionDAG &DAG,
    const function_ref<KnownBits(const KnownBits &, const KnownBits &)>
        KnownBitsFunc) {}void X86TargetLowering::computeKnownBitsForTargetNode(const SDValue Op,
                                                      KnownBits &Known,
                                                      const APInt &DemandedElts,
                                                      const SelectionDAG &DAG,
                                                      unsigned Depth) const {}unsigned X86TargetLowering::ComputeNumSignBitsForTargetNode(
    SDValue Op, const APInt &DemandedElts, const SelectionDAG &DAG,
    unsigned Depth) const {}SDValue X86TargetLowering::unwrapAddress(SDValue N) const {}static SDValue narrowLoadToVZLoad(LoadSDNode *LN, MVT MemVT, MVT VT,
                                  SelectionDAG &DAG) {}static bool matchUnaryShuffle(MVT MaskVT, ArrayRef<int> Mask,
                              bool AllowFloatDomain, bool AllowIntDomain,
                              SDValue V1, const SelectionDAG &DAG,
                              const X86Subtarget &Subtarget, unsigned &Shuffle,
                              MVT &SrcVT, MVT &DstVT) {}static bool matchUnaryPermuteShuffle(MVT MaskVT, ArrayRef<int> Mask,
                                     const APInt &Zeroable,
                                     bool AllowFloatDomain, bool AllowIntDomain,
                                     const SelectionDAG &DAG,
                                     const X86Subtarget &Subtarget,
                                     unsigned &Shuffle, MVT &ShuffleVT,
                                     unsigned &PermuteImm) {}static bool matchBinaryShuffle(MVT MaskVT, ArrayRef<int> Mask,
                               bool AllowFloatDomain, bool AllowIntDomain,
                               SDValue &V1, SDValue &V2, const SDLoc &DL,
                               SelectionDAG &DAG, const X86Subtarget &Subtarget,
                               unsigned &Shuffle, MVT &SrcVT, MVT &DstVT,
                               bool IsUnary) {}static bool matchBinaryPermuteShuffle(
    MVT MaskVT, ArrayRef<int> Mask, const APInt &Zeroable,
    bool AllowFloatDomain, bool AllowIntDomain, SDValue &V1, SDValue &V2,
    const SDLoc &DL, SelectionDAG &DAG, const X86Subtarget &Subtarget,
    unsigned &Shuffle, MVT &ShuffleVT, unsigned &PermuteImm) {}static SDValue combineX86ShuffleChainWithExtract(
    ArrayRef<SDValue> Inputs, SDValue Root, ArrayRef<int> BaseMask, int Depth,
    bool HasVariableMask, bool AllowVariableCrossLaneMask,
    bool AllowVariablePerLaneMask, SelectionDAG &DAG,
    const X86Subtarget &Subtarget)static SDValue combineX86ShuffleChain(ArrayRef<SDValue> Inputs, SDValue Root,
                                      ArrayRef<int> BaseMask, int Depth,
                                      bool HasVariableMask,
                                      bool AllowVariableCrossLaneMask,
                                      bool AllowVariablePerLaneMask,
                                      SelectionDAG &DAG,
                                      const X86Subtarget &Subtarget) {}static SDValue combineX86ShuffleChainWithExtract(
    ArrayRef<SDValue> Inputs, SDValue Root, ArrayRef<int> BaseMask, int Depth,
    bool HasVariableMask, bool AllowVariableCrossLaneMask,
    bool AllowVariablePerLaneMask, SelectionDAG &DAG,
    const X86Subtarget &Subtarget) {}static SDValue canonicalizeShuffleMaskWithHorizOp(
    MutableArrayRef<SDValue> Ops, MutableArrayRef<int> Mask,
    unsigned RootSizeInBits, const SDLoc &DL, SelectionDAG &DAG,
    const X86Subtarget &Subtarget) {}static SDValue combineX86ShufflesConstants(MVT VT, ArrayRef<SDValue> Ops,
                                           ArrayRef<int> Mask,
                                           bool HasVariableMask,
                                           SelectionDAG &DAG, const SDLoc &DL,
                                           const X86Subtarget &Subtarget) {}enum {}static SDValue combineX86ShufflesRecursively(
    ArrayRef<SDValue> SrcOps, int SrcOpIndex, SDValue Root,
    ArrayRef<int> RootMask, ArrayRef<const SDNode *> SrcNodes, unsigned Depth,
    unsigned MaxDepth, bool HasVariableMask, bool AllowVariableCrossLaneMask,
    bool AllowVariablePerLaneMask, SelectionDAG &DAG,
    const X86Subtarget &Subtarget) {}static SDValue combineX86ShufflesRecursively(SDValue Op, SelectionDAG &DAG,
                                             const X86Subtarget &Subtarget) {}static SmallVector<int, 4> getPSHUFShuffleMask(SDValue N) {}static SDValue combineRedundantDWordShuffle(SDValue N,
                                            MutableArrayRef<int> Mask,
                                            const SDLoc &DL,
                                            SelectionDAG &DAG) {}static SDValue combineCommutableSHUFP(SDValue N, MVT VT, const SDLoc &DL,
                                      SelectionDAG &DAG) {}static SDValue
combineBlendOfPermutes(MVT VT, SDValue N0, SDValue N1, ArrayRef<int> BlendMask,
                       const APInt &DemandedElts, SelectionDAG &DAG,
                       const X86Subtarget &Subtarget, const SDLoc &DL) {}static bool isUnaryOp(unsigned Opcode) {}static SDValue canonicalizeShuffleWithOp(SDValue N, SelectionDAG &DAG,
                                         const SDLoc &DL) {}static SDValue canonicalizeLaneShuffleWithRepeatedOps(SDValue V,
                                                      SelectionDAG &DAG,
                                                      const SDLoc &DL) {}static SDValue combineTargetShuffle(SDValue N, const SDLoc &DL,
                                    SelectionDAG &DAG,
                                    TargetLowering::DAGCombinerInfo &DCI,
                                    const X86Subtarget &Subtarget) {}static bool isAddSubOrSubAddMask(ArrayRef<int> Mask, bool &Op0Even) {}static bool isAddSubOrSubAdd(SDNode *N, const X86Subtarget &Subtarget,
                             SelectionDAG &DAG, SDValue &Opnd0, SDValue &Opnd1,
                             bool &IsSubAdd) {}static SDValue combineShuffleToFMAddSub(SDNode *N, const SDLoc &DL,
                                        const X86Subtarget &Subtarget,
                                        SelectionDAG &DAG) {}static SDValue combineShuffleToAddSubOrFMAddSub(SDNode *N, const SDLoc &DL,
                                                const X86Subtarget &Subtarget,
                                                SelectionDAG &DAG) {}static SDValue combineShuffleOfConcatUndef(SDNode *N, const SDLoc &DL,
                                           SelectionDAG &DAG,
                                           const X86Subtarget &Subtarget) {}static SDValue narrowShuffle(ShuffleVectorSDNode *Shuf, SelectionDAG &DAG) {}static SDValue combineShuffle(SDNode *N, SelectionDAG &DAG,
                              TargetLowering::DAGCombinerInfo &DCI,
                              const X86Subtarget &Subtarget) {}bool X86TargetLowering::SimplifyDemandedVectorEltsForTargetShuffle(
    SDValue Op, const APInt &DemandedElts, unsigned MaskIndex,
    TargetLowering::TargetLoweringOpt &TLO, unsigned Depth) const {}bool X86TargetLowering::SimplifyDemandedVectorEltsForTargetNode(
    SDValue Op, const APInt &DemandedElts, APInt &KnownUndef, APInt &KnownZero,
    TargetLoweringOpt &TLO, unsigned Depth) const {}bool X86TargetLowering::SimplifyDemandedBitsForTargetNode(
    SDValue Op, const APInt &OriginalDemandedBits,
    const APInt &OriginalDemandedElts, KnownBits &Known, TargetLoweringOpt &TLO,
    unsigned Depth) const {}SDValue X86TargetLowering::SimplifyMultipleUseDemandedBitsForTargetNode(
    SDValue Op, const APInt &DemandedBits, const APInt &DemandedElts,
    SelectionDAG &DAG, unsigned Depth) const {}bool X86TargetLowering::isGuaranteedNotToBeUndefOrPoisonForTargetNode(
    SDValue Op, const APInt &DemandedElts, const SelectionDAG &DAG,
    bool PoisonOnly, unsigned Depth) const {}bool X86TargetLowering::canCreateUndefOrPoisonForTargetNode(
    SDValue Op, const APInt &DemandedElts, const SelectionDAG &DAG,
    bool PoisonOnly, bool ConsiderFlags, unsigned Depth) const {}bool X86TargetLowering::isSplatValueForTargetNode(SDValue Op,
                                                  const APInt &DemandedElts,
                                                  APInt &UndefElts,
                                                  const SelectionDAG &DAG,
                                                  unsigned Depth) const {}static bool checkBitcastSrcVectorSize(SDValue Src, unsigned Size,
                                      bool AllowTruncate) {}static unsigned getAltBitOpcode(unsigned Opcode) {}static SDValue adjustBitcastSrcVectorSSE1(SelectionDAG &DAG, SDValue Src,
                                          const SDLoc &DL) {}static SDValue signExtendBitcastSrcVector(SelectionDAG &DAG, EVT SExtVT,
                                          SDValue Src, const SDLoc &DL) {}static SDValue combineBitcastvxi1(SelectionDAG &DAG, EVT VT, SDValue Src,
                                  const SDLoc &DL,
                                  const X86Subtarget &Subtarget) {}static SDValue combinevXi1ConstantToInteger(SDValue Op, SelectionDAG &DAG) {}static SDValue combineCastedMaskArithmetic(SDNode *N, SelectionDAG &DAG,
                                           TargetLowering::DAGCombinerInfo &DCI,
                                           const X86Subtarget &Subtarget) {}static SDValue createMMXBuildVector(BuildVectorSDNode *BV, SelectionDAG &DAG,
                                    const X86Subtarget &Subtarget) {}static SDValue combineBitcastToBoolVector(EVT VT, SDValue V, const SDLoc &DL,
                                          SelectionDAG &DAG,
                                          const X86Subtarget &Subtarget,
                                          unsigned Depth = 0) {}static SDValue combineBitcast(SDNode *N, SelectionDAG &DAG,
                              TargetLowering::DAGCombinerInfo &DCI,
                              const X86Subtarget &Subtarget) {}static bool detectExtMul(SelectionDAG &DAG, const SDValue &Mul, SDValue &Op0,
                         SDValue &Op1) {}static bool detectZextAbsDiff(const SDValue &Abs, SDValue &Op0, SDValue &Op1) {}static SDValue createVPDPBUSD(SelectionDAG &DAG, SDValue LHS, SDValue RHS,
                              unsigned &LogBias, const SDLoc &DL,
                              const X86Subtarget &Subtarget) {}static SDValue createPSADBW(SelectionDAG &DAG, const SDValue &Zext0,
                            const SDValue &Zext1, const SDLoc &DL,
                            const X86Subtarget &Subtarget) {}static SDValue combineMinMaxReduction(SDNode *Extract, SelectionDAG &DAG,
                                      const X86Subtarget &Subtarget) {}static SDValue combinePredicateReduction(SDNode *Extract, SelectionDAG &DAG,
                                         const X86Subtarget &Subtarget) {}static SDValue combineVPDPBUSDPattern(SDNode *Extract, SelectionDAG &DAG,
                                      const X86Subtarget &Subtarget) {}static SDValue combineBasicSADPattern(SDNode *Extract, SelectionDAG &DAG,
                                      const X86Subtarget &Subtarget) {}static SDValue
combineExtractFromVectorLoad(SDNode *N, EVT VecVT, SDValue SrcVec, uint64_t Idx,
                             const SDLoc &dl, SelectionDAG &DAG,
                             TargetLowering::DAGCombinerInfo &DCI) {}static SDValue combineExtractWithShuffle(SDNode *N, SelectionDAG &DAG,
                                         TargetLowering::DAGCombinerInfo &DCI,
                                         const X86Subtarget &Subtarget) {}static SDValue scalarizeExtEltFP(SDNode *ExtElt, SelectionDAG &DAG,
                                 const X86Subtarget &Subtarget) {}static SDValue combineArithReduction(SDNode *ExtElt, SelectionDAG &DAG,
                                     const X86Subtarget &Subtarget) {}static SDValue combineExtractVectorElt(SDNode *N, SelectionDAG &DAG,
                                       TargetLowering::DAGCombinerInfo &DCI,
                                       const X86Subtarget &Subtarget) {}static SDValue combineToExtendBoolVectorInReg(
    unsigned Opcode, const SDLoc &DL, EVT VT, SDValue N0, SelectionDAG &DAG,
    TargetLowering::DAGCombinerInfo &DCI, const X86Subtarget &Subtarget) {}static SDValue
combineVSelectWithAllOnesOrZeros(SDNode *N, SelectionDAG &DAG, const SDLoc &DL,
                                 TargetLowering::DAGCombinerInfo &DCI,
                                 const X86Subtarget &Subtarget) {}static SDValue narrowVectorSelect(SDNode *N, SelectionDAG &DAG, const SDLoc &DL,
                                  const X86Subtarget &Subtarget) {}static SDValue combineSelectOfTwoConstants(SDNode *N, SelectionDAG &DAG,
                                           const SDLoc &DL) {}static SDValue combineVSelectToBLENDV(SDNode *N, SelectionDAG &DAG,
                                      const SDLoc &DL,
                                      TargetLowering::DAGCombinerInfo &DCI,
                                      const X86Subtarget &Subtarget) {}static SDValue combineLogicBlendIntoConditionalNegate(
    EVT VT, SDValue Mask, SDValue X, SDValue Y, const SDLoc &DL,
    SelectionDAG &DAG, const X86Subtarget &Subtarget) {}static SDValue commuteSelect(SDNode *N, SelectionDAG &DAG, const SDLoc &DL,
                             const X86Subtarget &Subtarget) {}static SDValue combineSelect(SDNode *N, SelectionDAG &DAG,
                             TargetLowering::DAGCombinerInfo &DCI,
                             const X86Subtarget &Subtarget) {}static SDValue combineSetCCAtomicArith(SDValue Cmp, X86::CondCode &CC,
                                       SelectionDAG &DAG,
                                       const X86Subtarget &Subtarget) {}static SDValue checkSignTestSetCCCombine(SDValue Cmp, X86::CondCode &CC,
                                         SelectionDAG &DAG) {}static SDValue checkBoolTestSetCCCombine(SDValue Cmp, X86::CondCode &CC) {}static bool checkBoolTestAndOrSetCCCombine(SDValue Cond, X86::CondCode &CC0,
                                           X86::CondCode &CC1, SDValue &Flags,
                                           bool &isAnd) {}static SDValue combineCarryThroughADD(SDValue EFLAGS, SelectionDAG &DAG) {}static SDValue combinePTESTCC(SDValue EFLAGS, X86::CondCode &CC,
                              SelectionDAG &DAG,
                              const X86Subtarget &Subtarget) {}static SDValue combineSetCCMOVMSK(SDValue EFLAGS, X86::CondCode &CC,
                                  SelectionDAG &DAG,
                                  const X86Subtarget &Subtarget) {}static SDValue combineSetCCEFLAGS(SDValue EFLAGS, X86::CondCode &CC,
                                  SelectionDAG &DAG,
                                  const X86Subtarget &Subtarget) {}static SDValue combineCMov(SDNode *N, SelectionDAG &DAG,
                           TargetLowering::DAGCombinerInfo &DCI,
                           const X86Subtarget &Subtarget) {}enum class ShrinkMode {}static bool canReduceVMulWidth(SDNode *N, SelectionDAG &DAG, ShrinkMode &Mode) {}static SDValue reduceVMULWidth(SDNode *N, const SDLoc &DL, SelectionDAG &DAG,
                               const X86Subtarget &Subtarget) {}static SDValue combineMulSpecial(uint64_t MulAmt, SDNode *N, SelectionDAG &DAG,
                                 EVT VT, const SDLoc &DL) {}static SDValue combineMulToPMADDWD(SDNode *N, const SDLoc &DL,
                                   SelectionDAG &DAG,
                                   const X86Subtarget &Subtarget) {}static SDValue combineMulToPMULDQ(SDNode *N, const SDLoc &DL, SelectionDAG &DAG,
                                  const X86Subtarget &Subtarget) {}static SDValue combineMul(SDNode *N, SelectionDAG &DAG,
                          TargetLowering::DAGCombinerInfo &DCI,
                          const X86Subtarget &Subtarget) {}static SDValue combineShiftToPMULH(SDNode *N, SelectionDAG &DAG,
                                   const SDLoc &DL,
                                   const X86Subtarget &Subtarget) {}static SDValue combineShiftLeft(SDNode *N, SelectionDAG &DAG,
                                const X86Subtarget &Subtarget) {}static SDValue combineShiftRightArithmetic(SDNode *N, SelectionDAG &DAG,
                                           const X86Subtarget &Subtarget) {}static SDValue combineShiftRightLogical(SDNode *N, SelectionDAG &DAG,
                                        TargetLowering::DAGCombinerInfo &DCI,
                                        const X86Subtarget &Subtarget) {}static SDValue combineHorizOpWithShuffle(SDNode *N, SelectionDAG &DAG,
                                         const X86Subtarget &Subtarget) {}static SDValue combineVectorPack(SDNode *N, SelectionDAG &DAG,
                                 TargetLowering::DAGCombinerInfo &DCI,
                                 const X86Subtarget &Subtarget) {}static SDValue combineVectorHADDSUB(SDNode *N, SelectionDAG &DAG,
                                    TargetLowering::DAGCombinerInfo &DCI,
                                    const X86Subtarget &Subtarget) {}static SDValue combineVectorShiftVar(SDNode *N, SelectionDAG &DAG,
                                     TargetLowering::DAGCombinerInfo &DCI,
                                     const X86Subtarget &Subtarget) {}static SDValue combineVectorShiftImm(SDNode *N, SelectionDAG &DAG,
                                     TargetLowering::DAGCombinerInfo &DCI,
                                     const X86Subtarget &Subtarget) {}static SDValue combineVectorInsert(SDNode *N, SelectionDAG &DAG,
                                   TargetLowering::DAGCombinerInfo &DCI,
                                   const X86Subtarget &Subtarget) {}static SDValue combineCompareEqual(SDNode *N, SelectionDAG &DAG,
                                   TargetLowering::DAGCombinerInfo &DCI,
                                   const X86Subtarget &Subtarget) {}static SDValue combineAndNotIntoANDNP(SDNode *N, SelectionDAG &DAG) {}static SDValue combineAndShuffleNot(SDNode *N, SelectionDAG &DAG,
                                    const X86Subtarget &Subtarget) {}static SDValue PromoteMaskArithmetic(SDValue N, const SDLoc &DL, EVT VT,
                                     SelectionDAG &DAG, unsigned Depth) {}static SDValue PromoteMaskArithmetic(SDValue N, const SDLoc &DL,
                                     SelectionDAG &DAG,
                                     const X86Subtarget &Subtarget) {}static unsigned convertIntLogicToFPLogicOpcode(unsigned Opcode) {}static SDValue convertIntLogicToFPLogic(SDNode *N, SelectionDAG &DAG,
                                        TargetLowering::DAGCombinerInfo &DCI,
                                        const X86Subtarget &Subtarget) {}static SDValue combineBitOpWithMOVMSK(SDNode *N, SelectionDAG &DAG) {}static SDValue combineBitOpWithShift(SDNode *N, SelectionDAG &DAG) {}static SDValue combineBitOpWithPACK(SDNode *N, SelectionDAG &DAG) {}static SDValue combineAndMaskToShift(SDNode *N, SelectionDAG &DAG,
                                     const X86Subtarget &Subtarget) {}static SDValue getIndexFromUnindexedLoad(LoadSDNode *Ld) {}static bool hasBZHI(const X86Subtarget &Subtarget, MVT VT) {}static SDValue combineAndLoadToBZHI(SDNode *Node, SelectionDAG &DAG,
                                    const X86Subtarget &Subtarget) {}static SDValue combineScalarAndWithMaskSetcc(SDNode *N, SelectionDAG &DAG,
                                             const X86Subtarget &Subtarget) {}static SDValue getBMIMatchingOp(unsigned Opc, SelectionDAG &DAG,
                                SDValue OpMustEq, SDValue Op, unsigned Depth) {}static SDValue combineBMILogicOp(SDNode *N, SelectionDAG &DAG,
                                 const X86Subtarget &Subtarget) {}static SDValue combineX86SubCmpForFlags(SDNode *N, SDValue Flag,
                                        SelectionDAG &DAG,
                                        TargetLowering::DAGCombinerInfo &DCI,
                                        const X86Subtarget &ST) {}static SDValue combineAndOrForCcmpCtest(SDNode *N, SelectionDAG &DAG,
                                        TargetLowering::DAGCombinerInfo &DCI,
                                        const X86Subtarget &ST) {}static SDValue combineAnd(SDNode *N, SelectionDAG &DAG,
                          TargetLowering::DAGCombinerInfo &DCI,
                          const X86Subtarget &Subtarget) {}static SDValue canonicalizeBitSelect(SDNode *N, SelectionDAG &DAG,
                                     const X86Subtarget &Subtarget) {}static bool matchLogicBlend(SDNode *N, SDValue &X, SDValue &Y, SDValue &Mask) {}static SDValue combineLogicBlendIntoPBLENDV(SDNode *N, SelectionDAG &DAG,
                                            const X86Subtarget &Subtarget) {}static SDValue lowerX86CmpEqZeroToCtlzSrl(SDValue Op, SelectionDAG &DAG) {}static SDValue combineOrCmpEqZeroToCtlzSrl(SDNode *N, SelectionDAG &DAG,
                                           TargetLowering::DAGCombinerInfo &DCI,
                                           const X86Subtarget &Subtarget) {}static SDValue foldMaskedMergeImpl(SDValue And0_L, SDValue And0_R,
                                   SDValue And1_L, SDValue And1_R,
                                   const SDLoc &DL, SelectionDAG &DAG) {}static SDValue foldMaskedMerge(SDNode *Node, SelectionDAG &DAG) {}static SDValue combineAddOrSubToADCOrSBB(bool IsSub, const SDLoc &DL, EVT VT,
                                         SDValue X, SDValue Y,
                                         SelectionDAG &DAG,
                                         bool ZeroSecondOpOnly = false) {}static SDValue combineAddOrSubToADCOrSBB(SDNode *N, const SDLoc &DL,
                                         SelectionDAG &DAG) {}static SDValue combineOrXorWithSETCC(SDNode *N, SDValue N0, SDValue N1,
                                     SelectionDAG &DAG) {}static SDValue combineOr(SDNode *N, SelectionDAG &DAG,
                         TargetLowering::DAGCombinerInfo &DCI,
                         const X86Subtarget &Subtarget) {}static SDValue foldXorTruncShiftIntoCmp(SDNode *N, SelectionDAG &DAG) {}static SDValue foldVectorXorShiftIntoCmp(SDNode *N, SelectionDAG &DAG,
                                         const X86Subtarget &Subtarget) {}static SDValue detectUSatPattern(SDValue In, EVT VT, SelectionDAG &DAG,
                                 const SDLoc &DL) {}static SDValue detectSSatPattern(SDValue In, EVT VT, bool MatchPackUS = false) {}static SDValue combineTruncateWithSat(SDValue In, EVT VT, const SDLoc &DL,
                                      SelectionDAG &DAG,
                                      const X86Subtarget &Subtarget) {}static SDValue combineConstantPoolLoads(SDNode *N, const SDLoc &dl,
                                        SelectionDAG &DAG,
                                        TargetLowering::DAGCombinerInfo &DCI,
                                        const X86Subtarget &Subtarget) {}static SDValue combineLoad(SDNode *N, SelectionDAG &DAG,
                           TargetLowering::DAGCombinerInfo &DCI,
                           const X86Subtarget &Subtarget) {}static int getOneTrueElt(SDValue V) {}static bool getParamsForOneTrueMaskedElt(MaskedLoadStoreSDNode *MaskedOp,
                                         SelectionDAG &DAG, SDValue &Addr,
                                         SDValue &Index, Align &Alignment,
                                         unsigned &Offset) {}static SDValue
reduceMaskedLoadToScalarLoad(MaskedLoadSDNode *ML, SelectionDAG &DAG,
                             TargetLowering::DAGCombinerInfo &DCI,
                             const X86Subtarget &Subtarget) {}static SDValue
combineMaskedLoadConstantMask(MaskedLoadSDNode *ML, SelectionDAG &DAG,
                              TargetLowering::DAGCombinerInfo &DCI) {}static SDValue combineMaskedLoad(SDNode *N, SelectionDAG &DAG,
                                 TargetLowering::DAGCombinerInfo &DCI,
                                 const X86Subtarget &Subtarget) {}static SDValue reduceMaskedStoreToScalarStore(MaskedStoreSDNode *MS,
                                              SelectionDAG &DAG,
                                              const X86Subtarget &Subtarget) {}static SDValue combineMaskedStore(SDNode *N, SelectionDAG &DAG,
                                  TargetLowering::DAGCombinerInfo &DCI,
                                  const X86Subtarget &Subtarget) {}static SDValue combineStore(SDNode *N, SelectionDAG &DAG,
                            TargetLowering::DAGCombinerInfo &DCI,
                            const X86Subtarget &Subtarget) {}static SDValue combineVEXTRACT_STORE(SDNode *N, SelectionDAG &DAG,
                                     TargetLowering::DAGCombinerInfo &DCI,
                                     const X86Subtarget &Subtarget) {}static bool isHorizontalBinOp(unsigned HOpcode, SDValue &LHS, SDValue &RHS,
                              SelectionDAG &DAG, const X86Subtarget &Subtarget,
                              bool IsCommutative,
                              SmallVectorImpl<int> &PostShuffleMask,
                              bool ForceHorizOp) {}static SDValue combineToHorizontalAddSub(SDNode *N, SelectionDAG &DAG,
                                         const X86Subtarget &Subtarget) {}static SDValue combineFMulcFCMulc(SDNode *N, SelectionDAG &DAG,
                                  const X86Subtarget &Subtarget) {}static SDValue combineFaddCFmul(SDNode *N, SelectionDAG &DAG,
                                const X86Subtarget &Subtarget) {}static SDValue combineFaddFsub(SDNode *N, SelectionDAG &DAG,
                               const X86Subtarget &Subtarget) {}static SDValue combineLRINT_LLRINT(SDNode *N, SelectionDAG &DAG,
                                   const X86Subtarget &Subtarget) {}static SDValue combineTruncatedArithmetic(SDNode *N, SelectionDAG &DAG,
                                          const X86Subtarget &Subtarget,
                                          const SDLoc &DL) {}static SDValue combinePMULH(SDValue Src, EVT VT, const SDLoc &DL,
                            SelectionDAG &DAG, const X86Subtarget &Subtarget) {}static SDValue detectPMADDUBSW(SDValue In, EVT VT, SelectionDAG &DAG,
                               const X86Subtarget &Subtarget,
                               const SDLoc &DL) {}static SDValue combineTruncate(SDNode *N, SelectionDAG &DAG,
                               const X86Subtarget &Subtarget) {}static SDValue combineVTRUNC(SDNode *N, SelectionDAG &DAG,
                             TargetLowering::DAGCombinerInfo &DCI) {}static SDValue isFNEG(SelectionDAG &DAG, SDNode *N, unsigned Depth = 0) {}static unsigned negateFMAOpcode(unsigned Opcode, bool NegMul, bool NegAcc,
                                bool NegRes) {}static SDValue combineFneg(SDNode *N, SelectionDAG &DAG,
                           TargetLowering::DAGCombinerInfo &DCI,
                           const X86Subtarget &Subtarget) {}SDValue X86TargetLowering::getNegatedExpression(SDValue Op, SelectionDAG &DAG,
                                                bool LegalOperations,
                                                bool ForCodeSize,
                                                NegatibleCost &Cost,
                                                unsigned Depth) const {}static SDValue lowerX86FPLogicOp(SDNode *N, SelectionDAG &DAG,
                                 const X86Subtarget &Subtarget) {}static SDValue foldXor1SetCC(SDNode *N, SelectionDAG &DAG) {}static SDValue combineXorSubCTLZ(SDNode *N, const SDLoc &DL, SelectionDAG &DAG,
                                 const X86Subtarget &Subtarget) {}static SDValue combineXor(SDNode *N, SelectionDAG &DAG,
                          TargetLowering::DAGCombinerInfo &DCI,
                          const X86Subtarget &Subtarget) {}static SDValue combineBITREVERSE(SDNode *N, SelectionDAG &DAG,
                                 TargetLowering::DAGCombinerInfo &DCI,
                                 const X86Subtarget &Subtarget) {}static SDValue combineAVG(SDNode *N, SelectionDAG &DAG,
                          TargetLowering::DAGCombinerInfo &DCI,
                          const X86Subtarget &Subtarget) {}static SDValue combineBEXTR(SDNode *N, SelectionDAG &DAG,
                            TargetLowering::DAGCombinerInfo &DCI,
                            const X86Subtarget &Subtarget) {}static bool isNullFPScalarOrVectorConst(SDValue V) {}static SDValue getNullFPConstForNullVal(SDValue V, SelectionDAG &DAG,
                                        const X86Subtarget &Subtarget) {}static SDValue combineFAndFNotToFAndn(SDNode *N, SelectionDAG &DAG,
                                      const X86Subtarget &Subtarget) {}static SDValue combineFAnd(SDNode *N, SelectionDAG &DAG,
                           const X86Subtarget &Subtarget) {}static SDValue combineFAndn(SDNode *N, SelectionDAG &DAG,
                            const X86Subtarget &Subtarget) {}static SDValue combineFOr(SDNode *N, SelectionDAG &DAG,
                          TargetLowering::DAGCombinerInfo &DCI,
                          const X86Subtarget &Subtarget) {}static SDValue combineFMinFMax(SDNode *N, SelectionDAG &DAG) {}static SDValue combineFMinNumFMaxNum(SDNode *N, SelectionDAG &DAG,
                                     const X86Subtarget &Subtarget) {}static SDValue combineX86INT_TO_FP(SDNode *N, SelectionDAG &DAG,
                                   TargetLowering::DAGCombinerInfo &DCI) {}static SDValue combineCVTP2I_CVTTP2I(SDNode *N, SelectionDAG &DAG,
                                     TargetLowering::DAGCombinerInfo &DCI) {}static SDValue combineAndnp(SDNode *N, SelectionDAG &DAG,
                            TargetLowering::DAGCombinerInfo &DCI,
                            const X86Subtarget &Subtarget) {}static SDValue combineBT(SDNode *N, SelectionDAG &DAG,
                         TargetLowering::DAGCombinerInfo &DCI) {}static SDValue combineCVTPH2PS(SDNode *N, SelectionDAG &DAG,
                               TargetLowering::DAGCombinerInfo &DCI) {}static SDValue combineSextInRegCmov(SDNode *N, SelectionDAG &DAG) {}static SDValue combineSignExtendInReg(SDNode *N, SelectionDAG &DAG,
                                      const X86Subtarget &Subtarget) {}static SDValue promoteExtBeforeAdd(SDNode *Ext, SelectionDAG &DAG,
                                   const X86Subtarget &Subtarget) {}static SDValue combineToExtendCMOV(SDNode *Extend, SelectionDAG &DAG) {}static SDValue combineExtSetcc(SDNode *N, SelectionDAG &DAG,
                               const X86Subtarget &Subtarget) {}static SDValue combineSext(SDNode *N, SelectionDAG &DAG,
                           TargetLowering::DAGCombinerInfo &DCI,
                           const X86Subtarget &Subtarget) {}static SDValue getInvertedVectorForFMA(SDValue V, SelectionDAG &DAG) {}static SDValue combineFMA(SDNode *N, SelectionDAG &DAG,
                          TargetLowering::DAGCombinerInfo &DCI,
                          const X86Subtarget &Subtarget) {}static SDValue combineFMADDSUB(SDNode *N, SelectionDAG &DAG,
                               TargetLowering::DAGCombinerInfo &DCI) {}static SDValue combineZext(SDNode *N, SelectionDAG &DAG,
                           TargetLowering::DAGCombinerInfo &DCI,
                           const X86Subtarget &Subtarget) {}static SDValue truncateAVX512SetCCNoBWI(EVT VT, EVT OpVT, SDValue LHS,
                                        SDValue RHS, ISD::CondCode CC,
                                        const SDLoc &DL, SelectionDAG &DAG,
                                        const X86Subtarget &Subtarget) {}static SDValue combineSetCC(SDNode *N, SelectionDAG &DAG,
                            TargetLowering::DAGCombinerInfo &DCI,
                            const X86Subtarget &Subtarget) {}static SDValue combineMOVMSK(SDNode *N, SelectionDAG &DAG,
                             TargetLowering::DAGCombinerInfo &DCI,
                             const X86Subtarget &Subtarget) {}static SDValue combineTESTP(SDNode *N, SelectionDAG &DAG,
                            TargetLowering::DAGCombinerInfo &DCI,
                            const X86Subtarget &Subtarget) {}static SDValue combineX86GatherScatter(SDNode *N, SelectionDAG &DAG,
                                       TargetLowering::DAGCombinerInfo &DCI) {}static SDValue rebuildGatherScatter(MaskedGatherScatterSDNode *GorS,
                                    SDValue Index, SDValue Base, SDValue Scale,
                                    SelectionDAG &DAG) {}static SDValue combineGatherScatter(SDNode *N, SelectionDAG &DAG,
                                    TargetLowering::DAGCombinerInfo &DCI) {}static SDValue combineX86SetCC(SDNode *N, SelectionDAG &DAG,
                               const X86Subtarget &Subtarget) {}static SDValue combineBrCond(SDNode *N, SelectionDAG &DAG,
                             const X86Subtarget &Subtarget) {}static SDValue combineVectorCompareAndMaskUnaryOp(SDNode *N,
                                                  SelectionDAG &DAG) {}static SDValue combineToFPTruncExtElt(SDNode *N, SelectionDAG &DAG) {}static SDValue combineUIntToFP(SDNode *N, SelectionDAG &DAG,
                               const X86Subtarget &Subtarget) {}static SDValue combineSIntToFP(SDNode *N, SelectionDAG &DAG,
                               TargetLowering::DAGCombinerInfo &DCI,
                               const X86Subtarget &Subtarget) {}static bool needCarryOrOverflowFlag(SDValue Flags) {}static bool onlyZeroFlagUsed(SDValue Flags) {}static SDValue combineCMP(SDNode *N, SelectionDAG &DAG,
                          TargetLowering::DAGCombinerInfo &DCI,
                          const X86Subtarget &Subtarget) {}static SDValue combineX86AddSub(SDNode *N, SelectionDAG &DAG,
                                TargetLowering::DAGCombinerInfo &DCI,
                                const X86Subtarget &ST) {}static SDValue combineSBB(SDNode *N, SelectionDAG &DAG) {}static SDValue combineADC(SDNode *N, SelectionDAG &DAG,
                          TargetLowering::DAGCombinerInfo &DCI) {}static SDValue matchPMADDWD(SelectionDAG &DAG, SDValue Op0, SDValue Op1,
                            const SDLoc &DL, EVT VT,
                            const X86Subtarget &Subtarget) {}static SDValue matchPMADDWD_2(SelectionDAG &DAG, SDValue N0, SDValue N1,
                              const SDLoc &DL, EVT VT,
                              const X86Subtarget &Subtarget) {}static SDValue combineAddOfPMADDWD(SelectionDAG &DAG, SDValue N0, SDValue N1,
                                   const SDLoc &DL, EVT VT) {}static SDValue pushAddIntoCmovOfConsts(SDNode *N, const SDLoc &DL,
                                       SelectionDAG &DAG,
                                       const X86Subtarget &Subtarget) {}static SDValue combineAdd(SDNode *N, SelectionDAG &DAG,
                          TargetLowering::DAGCombinerInfo &DCI,
                          const X86Subtarget &Subtarget) {}static SDValue combineSubABS(SDNode *N, SelectionDAG &DAG) {}static SDValue combineSubSetcc(SDNode *N, SelectionDAG &DAG) {}static SDValue combineX86CloadCstore(SDNode *N, SelectionDAG &DAG) {}static SDValue combineSub(SDNode *N, SelectionDAG &DAG,
                          TargetLowering::DAGCombinerInfo &DCI,
                          const X86Subtarget &Subtarget) {}static SDValue combineVectorCompare(SDNode *N, SelectionDAG &DAG,
                                    const X86Subtarget &Subtarget) {}static std::optional<unsigned>
CastIntSETCCtoFP(MVT VT, ISD::CondCode CC, unsigned NumSignificantBitsLHS,
                 unsigned NumSignificantBitsRHS) {}static SDValue combineConcatVectorOps(const SDLoc &DL, MVT VT,
                                      ArrayRef<SDValue> Ops, SelectionDAG &DAG,
                                      TargetLowering::DAGCombinerInfo &DCI,
                                      const X86Subtarget &Subtarget) {}static SDValue combineCONCAT_VECTORS(SDNode *N, SelectionDAG &DAG,
                                     TargetLowering::DAGCombinerInfo &DCI,
                                     const X86Subtarget &Subtarget) {}static SDValue combineINSERT_SUBVECTOR(SDNode *N, SelectionDAG &DAG,
                                       TargetLowering::DAGCombinerInfo &DCI,
                                       const X86Subtarget &Subtarget) {}static SDValue narrowExtractedVectorSelect(SDNode *Ext, const SDLoc &DL,
                                           SelectionDAG &DAG) {}static SDValue combineEXTRACT_SUBVECTOR(SDNode *N, SelectionDAG &DAG,
                                        TargetLowering::DAGCombinerInfo &DCI,
                                        const X86Subtarget &Subtarget) {}static SDValue combineScalarToVector(SDNode *N, SelectionDAG &DAG,
                                     const X86Subtarget &Subtarget) {}static SDValue combinePMULDQ(SDNode *N, SelectionDAG &DAG,
                             TargetLowering::DAGCombinerInfo &DCI,
                             const X86Subtarget &Subtarget) {}static SDValue combineVPMADD(SDNode *N, SelectionDAG &DAG,
                             TargetLowering::DAGCombinerInfo &DCI) {}static SDValue combineEXTEND_VECTOR_INREG(SDNode *N, SelectionDAG &DAG,
                                          TargetLowering::DAGCombinerInfo &DCI,
                                          const X86Subtarget &Subtarget) {}static SDValue combineKSHIFT(SDNode *N, SelectionDAG &DAG,
                             TargetLowering::DAGCombinerInfo &DCI) {}static SDValue combineFP16_TO_FP(SDNode *N, SelectionDAG &DAG,
                                 const X86Subtarget &Subtarget) {}static SDValue combineFP_EXTEND(SDNode *N, SelectionDAG &DAG,
                                TargetLowering::DAGCombinerInfo &DCI,
                                const X86Subtarget &Subtarget) {}static SDValue combineBROADCAST_LOAD(SDNode *N, SelectionDAG &DAG,
                                     TargetLowering::DAGCombinerInfo &DCI) {}static SDValue combineFP_ROUND(SDNode *N, SelectionDAG &DAG,
                               const X86Subtarget &Subtarget) {}static SDValue combineMOVDQ2Q(SDNode *N, SelectionDAG &DAG) {}static SDValue combinePDEP(SDNode *N, SelectionDAG &DAG,
                           TargetLowering::DAGCombinerInfo &DCI) {}static SDValue FixupMMXIntrinsicTypes(SDNode *N, SelectionDAG &DAG) {}static SDValue combineINTRINSIC_WO_CHAIN(SDNode *N, SelectionDAG &DAG,
                                         TargetLowering::DAGCombinerInfo &DCI) {}static SDValue combineINTRINSIC_W_CHAIN(SDNode *N, SelectionDAG &DAG,
                                        TargetLowering::DAGCombinerInfo &DCI) {}static SDValue combineINTRINSIC_VOID(SDNode *N, SelectionDAG &DAG,
                                     TargetLowering::DAGCombinerInfo &DCI) {}SDValue X86TargetLowering::PerformDAGCombine(SDNode *N,
                                             DAGCombinerInfo &DCI) const {}bool X86TargetLowering::preferABDSToABSWithNSW(EVT VT) const {}bool X86TargetLowering::preferSextInRegOfTruncate(EVT TruncVT, EVT VT,
                                                  EVT ExtVT) const {}bool X86TargetLowering::isTypeDesirableForOp(unsigned Opc, EVT VT) const {}SDValue X86TargetLowering::expandIndirectJTBranch(const SDLoc &dl,
                                                  SDValue Value, SDValue Addr,
                                                  int JTI,
                                                  SelectionDAG &DAG) const {}TargetLowering::AndOrSETCCFoldKind
X86TargetLowering::isDesirableToCombineLogicOpOfSETCC(
    const SDNode *LogicOp, const SDNode *SETCC0, const SDNode *SETCC1) const {}bool X86TargetLowering::IsDesirableToPromoteOp(SDValue Op, EVT &PVT) const {}static bool matchAsm(StringRef S, ArrayRef<const char *> Pieces) {}static bool clobbersFlagRegisters(const SmallVector<StringRef, 4> &AsmPieces) {}bool X86TargetLowering::ExpandInlineAsm(CallInst *CI) const {}static X86::CondCode parseConstraintCode(llvm::StringRef Constraint) {}X86TargetLowering::ConstraintType
X86TargetLowering::getConstraintType(StringRef Constraint) const {}TargetLowering::ConstraintWeight
X86TargetLowering::getSingleConstraintMatchWeight(
    AsmOperandInfo &Info, const char *Constraint) const {}const char *X86TargetLowering::
LowerXConstraint(EVT ConstraintVT) const {}SDValue X86TargetLowering::LowerAsmOutputForConstraint(
    SDValue &Chain, SDValue &Glue, const SDLoc &DL,
    const AsmOperandInfo &OpInfo, SelectionDAG &DAG) const {}void X86TargetLowering::LowerAsmOperandForConstraint(SDValue Op,
                                                     StringRef Constraint,
                                                     std::vector<SDValue> &Ops,
                                                     SelectionDAG &DAG) const {}static bool isGRClass(const TargetRegisterClass &RC) {}static bool isFRClass(const TargetRegisterClass &RC) {}static bool isVKClass(const TargetRegisterClass &RC) {}static bool useEGPRInlineAsm(const X86Subtarget &Subtarget) {}std::pair<unsigned, const TargetRegisterClass *>
X86TargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
                                                StringRef Constraint,
                                                MVT VT) const {}bool X86TargetLowering::isIntDivCheap(EVT VT, AttributeList Attr) const {}void X86TargetLowering::initializeSplitCSR(MachineBasicBlock *Entry) const {}void X86TargetLowering::insertCopiesSplitCSR(
    MachineBasicBlock *Entry,
    const SmallVectorImpl<MachineBasicBlock *> &Exits) const {}bool X86TargetLowering::supportSwiftError() const {}MachineInstr *
X86TargetLowering::EmitKCFICheck(MachineBasicBlock &MBB,
                                 MachineBasicBlock::instr_iterator &MBBI,
                                 const TargetInstrInfo *TII) const {}bool X86TargetLowering::hasStackProbeSymbol(const MachineFunction &MF) const {}bool X86TargetLowering::hasInlineStackProbe(const MachineFunction &MF) const {}StringRef
X86TargetLowering::getStackProbeSymbolName(const MachineFunction &MF) const {}unsigned
X86TargetLowering::getStackProbeSize(const MachineFunction &MF) const {}Align X86TargetLowering::getPrefLoopAlignment(MachineLoop *ML) const {}