llvm/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp

//===-- AArch64ISelLowering.cpp - AArch64 DAG Lowering Implementation  ----===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file implements the AArch64TargetLowering class.
//
//===----------------------------------------------------------------------===//

#include "AArch64ISelLowering.h"
#include "AArch64CallingConvention.h"
#include "AArch64ExpandImm.h"
#include "AArch64MachineFunctionInfo.h"
#include "AArch64PerfectShuffle.h"
#include "AArch64RegisterInfo.h"
#include "AArch64Subtarget.h"
#include "MCTargetDesc/AArch64AddressingModes.h"
#include "Utils/AArch64BaseInfo.h"
#include "llvm/ADT/APFloat.h"
#include "llvm/ADT/APInt.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallSet.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/Twine.h"
#include "llvm/Analysis/LoopInfo.h"
#include "llvm/Analysis/MemoryLocation.h"
#include "llvm/Analysis/ObjCARCUtil.h"
#include "llvm/Analysis/OptimizationRemarkEmitter.h"
#include "llvm/Analysis/TargetTransformInfo.h"
#include "llvm/Analysis/ValueTracking.h"
#include "llvm/Analysis/VectorUtils.h"
#include "llvm/CodeGen/Analysis.h"
#include "llvm/CodeGen/CallingConvLower.h"
#include "llvm/CodeGen/ComplexDeinterleavingPass.h"
#include "llvm/CodeGen/GlobalISel/GenericMachineInstrs.h"
#include "llvm/CodeGen/GlobalISel/Utils.h"
#include "llvm/CodeGen/ISDOpcodes.h"
#include "llvm/CodeGen/MachineBasicBlock.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineInstr.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/CodeGen/MachineMemOperand.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/RuntimeLibcallUtil.h"
#include "llvm/CodeGen/SelectionDAG.h"
#include "llvm/CodeGen/SelectionDAGNodes.h"
#include "llvm/CodeGen/TargetCallingConv.h"
#include "llvm/CodeGen/TargetInstrInfo.h"
#include "llvm/CodeGen/TargetOpcodes.h"
#include "llvm/CodeGen/ValueTypes.h"
#include "llvm/CodeGenTypes/MachineValueType.h"
#include "llvm/IR/Attributes.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/DebugLoc.h"
#include "llvm/IR/DerivedTypes.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/GetElementPtrTypeIterator.h"
#include "llvm/IR/GlobalValue.h"
#include "llvm/IR/IRBuilder.h"
#include "llvm/IR/Instruction.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/IntrinsicInst.h"
#include "llvm/IR/Intrinsics.h"
#include "llvm/IR/IntrinsicsAArch64.h"
#include "llvm/IR/Module.h"
#include "llvm/IR/PatternMatch.h"
#include "llvm/IR/Type.h"
#include "llvm/IR/Use.h"
#include "llvm/IR/Value.h"
#include "llvm/MC/MCRegisterInfo.h"
#include "llvm/Support/AtomicOrdering.h"
#include "llvm/Support/Casting.h"
#include "llvm/Support/CodeGen.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/InstructionCost.h"
#include "llvm/Support/KnownBits.h"
#include "llvm/Support/MathExtras.h"
#include "llvm/Support/SipHash.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Target/TargetMachine.h"
#include "llvm/Target/TargetOptions.h"
#include "llvm/TargetParser/Triple.h"
#include <algorithm>
#include <bitset>
#include <cassert>
#include <cctype>
#include <cstdint>
#include <cstdlib>
#include <iterator>
#include <limits>
#include <optional>
#include <tuple>
#include <utility>
#include <vector>

usingnamespacellvm;
usingnamespacellvm::PatternMatch;

#define DEBUG_TYPE

STATISTIC(NumTailCalls, "Number of tail calls");
STATISTIC(NumShiftInserts, "Number of vector shift inserts");
STATISTIC(NumOptimizedImms, "Number of times immediates were optimized");

// FIXME: The necessary dtprel relocations don't seem to be supported
// well in the GNU bfd and gold linkers at the moment. Therefore, by
// default, for now, fall back to GeneralDynamic code generation.
cl::opt<bool> EnableAArch64ELFLocalDynamicTLSGeneration(
    "aarch64-elf-ldtls-generation", cl::Hidden,
    cl::desc("Allow AArch64 Local Dynamic TLS code generation"),
    cl::init(false));

static cl::opt<bool>
EnableOptimizeLogicalImm("aarch64-enable-logical-imm", cl::Hidden,
                         cl::desc("Enable AArch64 logical imm instruction "
                                  "optimization"),
                         cl::init(true));

// Temporary option added for the purpose of testing functionality added
// to DAGCombiner.cpp in D92230. It is expected that this can be removed
// in future when both implementations will be based off MGATHER rather
// than the GLD1 nodes added for the SVE gather load intrinsics.
static cl::opt<bool>
EnableCombineMGatherIntrinsics("aarch64-enable-mgather-combine", cl::Hidden,
                                cl::desc("Combine extends of AArch64 masked "
                                         "gather intrinsics"),
                                cl::init(true));

static cl::opt<bool> EnableExtToTBL("aarch64-enable-ext-to-tbl", cl::Hidden,
                                    cl::desc("Combine ext and trunc to TBL"),
                                    cl::init(true));

// All of the XOR, OR and CMP use ALU ports, and data dependency will become the
// bottleneck after this transform on high end CPU. So this max leaf node
// limitation is guard cmp+ccmp will be profitable.
static cl::opt<unsigned> MaxXors("aarch64-max-xors", cl::init(16), cl::Hidden,
                                 cl::desc("Maximum of xors"));

// By turning this on, we will not fallback to DAG ISel when encountering
// scalable vector types for all instruction, even if SVE is not yet supported
// with some instructions.
// See [AArch64TargetLowering::fallbackToDAGISel] for implementation details.
cl::opt<bool> EnableSVEGISel(
    "aarch64-enable-gisel-sve", cl::Hidden,
    cl::desc("Enable / disable SVE scalable vectors in Global ISel"),
    cl::init(false));

/// Value type used for condition codes.
static const MVT MVT_CC =;

static const MCPhysReg GPRArgRegs[] =;
static const MCPhysReg FPRArgRegs[] =;

ArrayRef<MCPhysReg> llvm::AArch64::getGPRArgRegs() {}

ArrayRef<MCPhysReg> llvm::AArch64::getFPRArgRegs() {}

static inline EVT getPackedSVEVectorVT(EVT VT) {}

// NOTE: Currently there's only a need to return integer vector types. If this
// changes then just add an extra "type" parameter.
static inline EVT getPackedSVEVectorVT(ElementCount EC) {}

static inline EVT getPromotedVTForPredicate(EVT VT) {}

/// Returns true if VT's elements occupy the lowest bit positions of its
/// associated register class without any intervening space.
///
/// For example, nxv2f16, nxv4f16 and nxv8f16 are legal types that belong to the
/// same register class, but only nxv8f16 can be treated as a packed vector.
static inline bool isPackedVectorType(EVT VT, SelectionDAG &DAG) {}

// Returns true for ####_MERGE_PASSTHRU opcodes, whose operands have a leading
// predicate and end with a passthru value matching the result type.
static bool isMergePassthruOpcode(unsigned Opc) {}

// Returns true if inactive lanes are known to be zeroed by construction.
static bool isZeroingInactiveLanes(SDValue Op) {}

static std::tuple<SDValue, SDValue>
extractPtrauthBlendDiscriminators(SDValue Disc, SelectionDAG *DAG) {}

AArch64TargetLowering::AArch64TargetLowering(const TargetMachine &TM,
                                             const AArch64Subtarget &STI)
    :{}

void AArch64TargetLowering::addTypeForNEON(MVT VT) {}

bool AArch64TargetLowering::shouldExpandGetActiveLaneMask(EVT ResVT,
                                                          EVT OpVT) const {}

bool AArch64TargetLowering::shouldExpandPartialReductionIntrinsic(
    const IntrinsicInst *I) const {}

bool AArch64TargetLowering::shouldExpandCttzElements(EVT VT) const {}

void AArch64TargetLowering::addTypeForFixedLengthSVE(MVT VT) {}

void AArch64TargetLowering::addDRType(MVT VT) {}

void AArch64TargetLowering::addQRType(MVT VT) {}

EVT AArch64TargetLowering::getSetCCResultType(const DataLayout &,
                                              LLVMContext &C, EVT VT) const {}

// isIntImmediate - This method tests to see if the node is a constant
// operand. If so Imm will receive the value.
static bool isIntImmediate(const SDNode *N, uint64_t &Imm) {}

// isOpcWithIntImmediate - This method tests to see if the node is a specific
// opcode and that it has a immediate integer right operand.
// If so Imm will receive the value.
static bool isOpcWithIntImmediate(const SDNode *N, unsigned Opc,
                                  uint64_t &Imm) {}

static bool optimizeLogicalImm(SDValue Op, unsigned Size, uint64_t Imm,
                               const APInt &Demanded,
                               TargetLowering::TargetLoweringOpt &TLO,
                               unsigned NewOpc) {}

bool AArch64TargetLowering::targetShrinkDemandedConstant(
    SDValue Op, const APInt &DemandedBits, const APInt &DemandedElts,
    TargetLoweringOpt &TLO) const {}

/// computeKnownBitsForTargetNode - Determine which of the bits specified in
/// Mask are known to be either zero or one and return them Known.
void AArch64TargetLowering::computeKnownBitsForTargetNode(
    const SDValue Op, KnownBits &Known, const APInt &DemandedElts,
    const SelectionDAG &DAG, unsigned Depth) const {}

unsigned AArch64TargetLowering::ComputeNumSignBitsForTargetNode(
    SDValue Op, const APInt &DemandedElts, const SelectionDAG &DAG,
    unsigned Depth) const {}

MVT AArch64TargetLowering::getScalarShiftAmountTy(const DataLayout &DL,
                                                  EVT) const {}

bool AArch64TargetLowering::allowsMisalignedMemoryAccesses(
    EVT VT, unsigned AddrSpace, Align Alignment, MachineMemOperand::Flags Flags,
    unsigned *Fast) const {}

// Same as above but handling LLTs instead.
bool AArch64TargetLowering::allowsMisalignedMemoryAccesses(
    LLT Ty, unsigned AddrSpace, Align Alignment, MachineMemOperand::Flags Flags,
    unsigned *Fast) const {}

FastISel *
AArch64TargetLowering::createFastISel(FunctionLoweringInfo &funcInfo,
                                      const TargetLibraryInfo *libInfo) const {}

const char *AArch64TargetLowering::getTargetNodeName(unsigned Opcode) const {}

MachineBasicBlock *
AArch64TargetLowering::EmitF128CSEL(MachineInstr &MI,
                                    MachineBasicBlock *MBB) const {}

MachineBasicBlock *AArch64TargetLowering::EmitLoweredCatchRet(
       MachineInstr &MI, MachineBasicBlock *BB) const {}

MachineBasicBlock *
AArch64TargetLowering::EmitDynamicProbedAlloc(MachineInstr &MI,
                                              MachineBasicBlock *MBB) const {}

MachineBasicBlock *
AArch64TargetLowering::EmitTileLoad(unsigned Opc, unsigned BaseReg,
                                    MachineInstr &MI,
                                    MachineBasicBlock *BB) const {}

MachineBasicBlock *
AArch64TargetLowering::EmitFill(MachineInstr &MI, MachineBasicBlock *BB) const {}

MachineBasicBlock *AArch64TargetLowering::EmitZTInstr(MachineInstr &MI,
                                                      MachineBasicBlock *BB,
                                                      unsigned Opcode,
                                                      bool Op0IsDef) const {}

MachineBasicBlock *
AArch64TargetLowering::EmitZAInstr(unsigned Opc, unsigned BaseReg,
                                   MachineInstr &MI,
                                   MachineBasicBlock *BB) const {}

MachineBasicBlock *
AArch64TargetLowering::EmitZero(MachineInstr &MI, MachineBasicBlock *BB) const {}

MachineBasicBlock *
AArch64TargetLowering::EmitInitTPIDR2Object(MachineInstr &MI,
                                            MachineBasicBlock *BB) const {}

MachineBasicBlock *
AArch64TargetLowering::EmitAllocateZABuffer(MachineInstr &MI,
                                            MachineBasicBlock *BB) const {}

MachineBasicBlock *AArch64TargetLowering::EmitInstrWithCustomInserter(
    MachineInstr &MI, MachineBasicBlock *BB) const {}

//===----------------------------------------------------------------------===//
// AArch64 Lowering private implementation.
//===----------------------------------------------------------------------===//

//===----------------------------------------------------------------------===//
// Lowering Code
//===----------------------------------------------------------------------===//

// Forward declarations of SVE fixed length lowering helpers
static EVT getContainerForFixedLengthVector(SelectionDAG &DAG, EVT VT);
static SDValue convertToScalableVector(SelectionDAG &DAG, EVT VT, SDValue V);
static SDValue convertFromScalableVector(SelectionDAG &DAG, EVT VT, SDValue V);
static SDValue convertFixedMaskToScalableVector(SDValue Mask,
                                                SelectionDAG &DAG);
static SDValue getPredicateForVector(SelectionDAG &DAG, SDLoc &DL, EVT VT);
static SDValue getPredicateForScalableVector(SelectionDAG &DAG, SDLoc &DL,
                                             EVT VT);

/// isZerosVector - Check whether SDNode N is a zero-filled vector.
static bool isZerosVector(const SDNode *N) {}

/// changeIntCCToAArch64CC - Convert a DAG integer condition code to an AArch64
/// CC
static AArch64CC::CondCode changeIntCCToAArch64CC(ISD::CondCode CC) {}

/// changeFPCCToAArch64CC - Convert a DAG fp condition code to an AArch64 CC.
static void changeFPCCToAArch64CC(ISD::CondCode CC,
                                  AArch64CC::CondCode &CondCode,
                                  AArch64CC::CondCode &CondCode2) {}

/// Convert a DAG fp condition code to an AArch64 CC.
/// This differs from changeFPCCToAArch64CC in that it returns cond codes that
/// should be AND'ed instead of OR'ed.
static void changeFPCCToANDAArch64CC(ISD::CondCode CC,
                                     AArch64CC::CondCode &CondCode,
                                     AArch64CC::CondCode &CondCode2) {}

/// changeVectorFPCCToAArch64CC - Convert a DAG fp condition code to an AArch64
/// CC usable with the vector instructions. Fewer operations are available
/// without a real NZCV register, so we have to use less efficient combinations
/// to get the same effect.
static void changeVectorFPCCToAArch64CC(ISD::CondCode CC,
                                        AArch64CC::CondCode &CondCode,
                                        AArch64CC::CondCode &CondCode2,
                                        bool &Invert) {}

static bool isLegalArithImmed(uint64_t C) {}

static bool cannotBeIntMin(SDValue CheckedVal, SelectionDAG &DAG) {}

// Can a (CMP op1, (sub 0, op2) be turned into a CMN instruction on
// the grounds that "op1 - (-op2) == op1 + op2" ? Not always, the C and V flags
// can be set differently by this operation. It comes down to whether
// "SInt(~op2)+1 == SInt(~op2+1)" (and the same for UInt). If they are then
// everything is fine. If not then the optimization is wrong. Thus general
// comparisons are only valid if op2 != 0.
//
// So, finally, the only LLVM-native comparisons that don't mention C or V
// are the ones that aren't unsigned comparisons. They're the only ones we can
// safely use CMN for in the absence of information about op2.
static bool isCMN(SDValue Op, ISD::CondCode CC, SelectionDAG &DAG) {}

static SDValue emitStrictFPComparison(SDValue LHS, SDValue RHS, const SDLoc &dl,
                                      SelectionDAG &DAG, SDValue Chain,
                                      bool IsSignaling) {}

static SDValue emitComparison(SDValue LHS, SDValue RHS, ISD::CondCode CC,
                              const SDLoc &dl, SelectionDAG &DAG) {}

/// \defgroup AArch64CCMP CMP;CCMP matching
///
/// These functions deal with the formation of CMP;CCMP;... sequences.
/// The CCMP/CCMN/FCCMP/FCCMPE instructions allow the conditional execution of
/// a comparison. They set the NZCV flags to a predefined value if their
/// predicate is false. This allows to express arbitrary conjunctions, for
/// example "cmp 0 (and (setCA (cmp A)) (setCB (cmp B)))"
/// expressed as:
///   cmp A
///   ccmp B, inv(CB), CA
///   check for CB flags
///
/// This naturally lets us implement chains of AND operations with SETCC
/// operands. And we can even implement some other situations by transforming
/// them:
///   - We can implement (NEG SETCC) i.e. negating a single comparison by
///     negating the flags used in a CCMP/FCCMP operations.
///   - We can negate the result of a whole chain of CMP/CCMP/FCCMP operations
///     by negating the flags we test for afterwards. i.e.
///     NEG (CMP CCMP CCCMP ...) can be implemented.
///   - Note that we can only ever negate all previously processed results.
///     What we can not implement by flipping the flags to test is a negation
///     of two sub-trees (because the negation affects all sub-trees emitted so
///     far, so the 2nd sub-tree we emit would also affect the first).
/// With those tools we can implement some OR operations:
///   - (OR (SETCC A) (SETCC B)) can be implemented via:
///     NEG (AND (NEG (SETCC A)) (NEG (SETCC B)))
///   - After transforming OR to NEG/AND combinations we may be able to use NEG
///     elimination rules from earlier to implement the whole thing as a
///     CCMP/FCCMP chain.
///
/// As complete example:
///     or (or (setCA (cmp A)) (setCB (cmp B)))
///        (and (setCC (cmp C)) (setCD (cmp D)))"
/// can be reassociated to:
///     or (and (setCC (cmp C)) setCD (cmp D))
//         (or (setCA (cmp A)) (setCB (cmp B)))
/// can be transformed to:
///     not (and (not (and (setCC (cmp C)) (setCD (cmp D))))
///              (and (not (setCA (cmp A)) (not (setCB (cmp B))))))"
/// which can be implemented as:
///   cmp C
///   ccmp D, inv(CD), CC
///   ccmp A, CA, inv(CD)
///   ccmp B, CB, inv(CA)
///   check for CB flags
///
/// A counterexample is "or (and A B) (and C D)" which translates to
/// not (and (not (and (not A) (not B))) (not (and (not C) (not D)))), we
/// can only implement 1 of the inner (not) operations, but not both!
/// @{

/// Create a conditional comparison; Use CCMP, CCMN or FCCMP as appropriate.
static SDValue emitConditionalComparison(SDValue LHS, SDValue RHS,
                                         ISD::CondCode CC, SDValue CCOp,
                                         AArch64CC::CondCode Predicate,
                                         AArch64CC::CondCode OutCC,
                                         const SDLoc &DL, SelectionDAG &DAG) {}

/// Returns true if @p Val is a tree of AND/OR/SETCC operations that can be
/// expressed as a conjunction. See \ref AArch64CCMP.
/// \param CanNegate    Set to true if we can negate the whole sub-tree just by
///                     changing the conditions on the SETCC tests.
///                     (this means we can call emitConjunctionRec() with
///                      Negate==true on this sub-tree)
/// \param MustBeFirst  Set to true if this subtree needs to be negated and we
///                     cannot do the negation naturally. We are required to
///                     emit the subtree first in this case.
/// \param WillNegate   Is true if are called when the result of this
///                     subexpression must be negated. This happens when the
///                     outer expression is an OR. We can use this fact to know
///                     that we have a double negation (or (or ...) ...) that
///                     can be implemented for free.
static bool canEmitConjunction(const SDValue Val, bool &CanNegate,
                               bool &MustBeFirst, bool WillNegate,
                               unsigned Depth = 0) {}

/// Emit conjunction or disjunction tree with the CMP/FCMP followed by a chain
/// of CCMP/CFCMP ops. See @ref AArch64CCMP.
/// Tries to transform the given i1 producing node @p Val to a series compare
/// and conditional compare operations. @returns an NZCV flags producing node
/// and sets @p OutCC to the flags that should be tested or returns SDValue() if
/// transformation was not possible.
/// \p Negate is true if we want this sub-tree being negated just by changing
/// SETCC conditions.
static SDValue emitConjunctionRec(SelectionDAG &DAG, SDValue Val,
    AArch64CC::CondCode &OutCC, bool Negate, SDValue CCOp,
    AArch64CC::CondCode Predicate) {}

/// Emit expression as a conjunction (a series of CCMP/CFCMP ops).
/// In some cases this is even possible with OR operations in the expression.
/// See \ref AArch64CCMP.
/// \see emitConjunctionRec().
static SDValue emitConjunction(SelectionDAG &DAG, SDValue Val,
                               AArch64CC::CondCode &OutCC) {}

/// @}

/// Returns how profitable it is to fold a comparison's operand's shift and/or
/// extension operations.
static unsigned getCmpOperandFoldingProfit(SDValue Op) {}

static SDValue getAArch64Cmp(SDValue LHS, SDValue RHS, ISD::CondCode CC,
                             SDValue &AArch64cc, SelectionDAG &DAG,
                             const SDLoc &dl) {}

static std::pair<SDValue, SDValue>
getAArch64XALUOOp(AArch64CC::CondCode &CC, SDValue Op, SelectionDAG &DAG) {}

SDValue AArch64TargetLowering::LowerXOR(SDValue Op, SelectionDAG &DAG) const {}

// If Invert is false, sets 'C' bit of NZCV to 0 if value is 0, else sets 'C'
// bit to 1. If Invert is true, sets 'C' bit of NZCV to 1 if value is 0, else
// sets 'C' bit to 0.
static SDValue valueToCarryFlag(SDValue Value, SelectionDAG &DAG, bool Invert) {}

// If Invert is false, value is 1 if 'C' bit of NZCV is 1, else 0.
// If Invert is true, value is 0 if 'C' bit of NZCV is 1, else 1.
static SDValue carryFlagToValue(SDValue Glue, EVT VT, SelectionDAG &DAG,
                                bool Invert) {}

// Value is 1 if 'V' bit of NZCV is 1, else 0
static SDValue overflowFlagToValue(SDValue Glue, EVT VT, SelectionDAG &DAG) {}

// This lowering is inefficient, but it will get cleaned up by
// `foldOverflowCheck`
static SDValue lowerADDSUBO_CARRY(SDValue Op, SelectionDAG &DAG,
                                  unsigned Opcode, bool IsSigned) {}

static SDValue LowerXALUO(SDValue Op, SelectionDAG &DAG) {}

// Prefetch operands are:
// 1: Address to prefetch
// 2: bool isWrite
// 3: int locality (0 = no locality ... 3 = extreme locality)
// 4: bool isDataCache
static SDValue LowerPREFETCH(SDValue Op, SelectionDAG &DAG) {}

SDValue AArch64TargetLowering::LowerFP_EXTEND(SDValue Op,
                                              SelectionDAG &DAG) const {}

SDValue AArch64TargetLowering::LowerFP_ROUND(SDValue Op,
                                             SelectionDAG &DAG) const {}

SDValue AArch64TargetLowering::LowerVectorFP_TO_INT(SDValue Op,
                                                    SelectionDAG &DAG) const {}

SDValue AArch64TargetLowering::LowerFP_TO_INT(SDValue Op,
                                              SelectionDAG &DAG) const {}

SDValue
AArch64TargetLowering::LowerVectorFP_TO_INT_SAT(SDValue Op,
                                                SelectionDAG &DAG) const {}

SDValue AArch64TargetLowering::LowerFP_TO_INT_SAT(SDValue Op,
                                                  SelectionDAG &DAG) const {}

SDValue AArch64TargetLowering::LowerVectorXRINT(SDValue Op,
                                                SelectionDAG &DAG) const {}

SDValue AArch64TargetLowering::LowerVectorINT_TO_FP(SDValue Op,
                                                    SelectionDAG &DAG) const {}

SDValue AArch64TargetLowering::LowerINT_TO_FP(SDValue Op,
                                            SelectionDAG &DAG) const {}

SDValue AArch64TargetLowering::LowerFSINCOS(SDValue Op,
                                            SelectionDAG &DAG) const {}

static MVT getSVEContainerType(EVT ContentTy);

SDValue AArch64TargetLowering::LowerBITCAST(SDValue Op,
                                            SelectionDAG &DAG) const {}

static EVT getExtensionTo64Bits(const EVT &OrigVT) {}

static SDValue addRequiredExtensionForVectorMULL(SDValue N, SelectionDAG &DAG,
                                                 const EVT &OrigTy,
                                                 const EVT &ExtTy,
                                                 unsigned ExtOpcode) {}

// Returns lane if Op extracts from a two-element vector and lane is constant
// (i.e., extractelt(<2 x Ty> %v, ConstantLane)), and std::nullopt otherwise.
static std::optional<uint64_t>
getConstantLaneNumOfExtractHalfOperand(SDValue &Op) {}

static bool isExtendedBUILD_VECTOR(SDValue N, SelectionDAG &DAG,
                                   bool isSigned) {}

static SDValue skipExtensionForVectorMULL(SDValue N, SelectionDAG &DAG) {}

static bool isSignExtended(SDValue N, SelectionDAG &DAG) {}

static bool isZeroExtended(SDValue N, SelectionDAG &DAG) {}

static bool isAddSubSExt(SDValue N, SelectionDAG &DAG) {}

static bool isAddSubZExt(SDValue N, SelectionDAG &DAG) {}

SDValue AArch64TargetLowering::LowerGET_ROUNDING(SDValue Op,
                                                 SelectionDAG &DAG) const {}

SDValue AArch64TargetLowering::LowerSET_ROUNDING(SDValue Op,
                                                 SelectionDAG &DAG) const {}

SDValue AArch64TargetLowering::LowerGET_FPMODE(SDValue Op,
                                               SelectionDAG &DAG) const {}

SDValue AArch64TargetLowering::LowerSET_FPMODE(SDValue Op,
                                               SelectionDAG &DAG) const {}

SDValue AArch64TargetLowering::LowerRESET_FPMODE(SDValue Op,
                                                 SelectionDAG &DAG) const {}

static unsigned selectUmullSmull(SDValue &N0, SDValue &N1, SelectionDAG &DAG,
                                 SDLoc DL, bool &IsMLA) {}

SDValue AArch64TargetLowering::LowerMUL(SDValue Op, SelectionDAG &DAG) const {}

static inline SDValue getPTrue(SelectionDAG &DAG, SDLoc DL, EVT VT,
                               int Pattern) {}

static SDValue optimizeIncrementingWhile(SDValue Op, SelectionDAG &DAG,
                                         bool IsSigned, bool IsEqual) {}

// Returns a safe bitcast between two scalable vector predicates, where
// any newly created lanes from a widening bitcast are defined as zero.
static SDValue getSVEPredicateBitCast(EVT VT, SDValue Op, SelectionDAG &DAG) {}

SDValue AArch64TargetLowering::getRuntimePStateSM(SelectionDAG &DAG,
                                                  SDValue Chain, SDLoc DL,
                                                  EVT VT) const {}

// Lower an SME LDR/STR ZA intrinsic
// Case 1: If the vector number (vecnum) is an immediate in range, it gets
// folded into the instruction
//    ldr(%tileslice, %ptr, 11) -> ldr [%tileslice, 11], [%ptr, 11]
// Case 2: If the vecnum is not an immediate, then it is used to modify the base
// and tile slice registers
//    ldr(%tileslice, %ptr, %vecnum)
//    ->
//    %svl = rdsvl
//    %ptr2 = %ptr + %svl * %vecnum
//    %tileslice2 = %tileslice + %vecnum
//    ldr [%tileslice2, 0], [%ptr2, 0]
// Case 3: If the vecnum is an immediate out of range, then the same is done as
// case 2, but the base and slice registers are modified by the greatest
// multiple of 15 lower than the vecnum and the remainder is folded into the
// instruction. This means that successive loads and stores that are offset from
// each other can share the same base and slice register updates.
//    ldr(%tileslice, %ptr, 22)
//    ldr(%tileslice, %ptr, 23)
//    ->
//    %svl = rdsvl
//    %ptr2 = %ptr + %svl * 15
//    %tileslice2 = %tileslice + 15
//    ldr [%tileslice2, 7], [%ptr2, 7]
//    ldr [%tileslice2, 8], [%ptr2, 8]
// Case 4: If the vecnum is an add of an immediate, then the non-immediate
// operand and the immediate can be folded into the instruction, like case 2.
//    ldr(%tileslice, %ptr, %vecnum + 7)
//    ldr(%tileslice, %ptr, %vecnum + 8)
//    ->
//    %svl = rdsvl
//    %ptr2 = %ptr + %svl * %vecnum
//    %tileslice2 = %tileslice + %vecnum
//    ldr [%tileslice2, 7], [%ptr2, 7]
//    ldr [%tileslice2, 8], [%ptr2, 8]
// Case 5: The vecnum being an add of an immediate out of range is also handled,
// in which case the same remainder logic as case 3 is used.
SDValue LowerSMELdrStr(SDValue N, SelectionDAG &DAG, bool IsLoad) {}

SDValue AArch64TargetLowering::LowerINTRINSIC_VOID(SDValue Op,
                                                   SelectionDAG &DAG) const {}

SDValue AArch64TargetLowering::LowerINTRINSIC_W_CHAIN(SDValue Op,
                                                      SelectionDAG &DAG) const {}

SDValue AArch64TargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
                                                     SelectionDAG &DAG) const {}

bool AArch64TargetLowering::shouldExtendGSIndex(EVT VT, EVT &EltTy) const {}

bool AArch64TargetLowering::shouldRemoveExtendFromGSIndex(SDValue Extend,
                                                          EVT DataVT) const {}

bool AArch64TargetLowering::isVectorLoadExtDesirable(SDValue ExtVal) const {}

unsigned getGatherVecOpcode(bool IsScaled, bool IsSigned, bool NeedsExtend) {}

unsigned getSignExtendedGatherOpcode(unsigned Opcode) {}

SDValue AArch64TargetLowering::LowerMGATHER(SDValue Op,
                                            SelectionDAG &DAG) const {}

SDValue AArch64TargetLowering::LowerMSCATTER(SDValue Op,
                                             SelectionDAG &DAG) const {}

SDValue AArch64TargetLowering::LowerMLOAD(SDValue Op, SelectionDAG &DAG) const {}

// Custom lower trunc store for v4i8 vectors, since it is promoted to v4i16.
static SDValue LowerTruncateVectorStore(SDLoc DL, StoreSDNode *ST,
                                        EVT VT, EVT MemVT,
                                        SelectionDAG &DAG) {}

// Custom lowering for any store, vector or scalar and/or default or with
// a truncate operations.  Currently only custom lower truncate operation
// from vector v4i16 to v4i8 or volatile stores of i128.
SDValue AArch64TargetLowering::LowerSTORE(SDValue Op,
                                          SelectionDAG &DAG) const {}

/// Lower atomic or volatile 128-bit stores to a single STP instruction.
SDValue AArch64TargetLowering::LowerStore128(SDValue Op,
                                             SelectionDAG &DAG) const {}

SDValue AArch64TargetLowering::LowerLOAD(SDValue Op,
                                         SelectionDAG &DAG) const {}

SDValue AArch64TargetLowering::LowerVECTOR_COMPRESS(SDValue Op,
                                                    SelectionDAG &DAG) const {}

// Generate SUBS and CSEL for integer abs.
SDValue AArch64TargetLowering::LowerABS(SDValue Op, SelectionDAG &DAG) const {}

static SDValue LowerBRCOND(SDValue Op, SelectionDAG &DAG) {}

// Treat FSHR with constant shifts as legal operation, otherwise it is expanded
// FSHL is converted to FSHR before deciding what to do with it
static SDValue LowerFunnelShift(SDValue Op, SelectionDAG &DAG) {}

static SDValue LowerFLDEXP(SDValue Op, SelectionDAG &DAG) {}

SDValue AArch64TargetLowering::LowerADJUST_TRAMPOLINE(SDValue Op,
                                                      SelectionDAG &DAG) const {}

SDValue AArch64TargetLowering::LowerINIT_TRAMPOLINE(SDValue Op,
                                                    SelectionDAG &DAG) const {}

SDValue AArch64TargetLowering::LowerOperation(SDValue Op,
                                              SelectionDAG &DAG) const {}

bool AArch64TargetLowering::mergeStoresAfterLegalization(EVT VT) const {}

bool AArch64TargetLowering::useSVEForFixedLengthVectorVT(
    EVT VT, bool OverrideNEON) const {}

//===----------------------------------------------------------------------===//
//                      Calling Convention Implementation
//===----------------------------------------------------------------------===//

static unsigned getIntrinsicID(const SDNode *N) {}

bool AArch64TargetLowering::isReassocProfitable(SelectionDAG &DAG, SDValue N0,
                                                SDValue N1) const {}

/// Selects the correct CCAssignFn for a given CallingConvention value.
CCAssignFn *AArch64TargetLowering::CCAssignFnForCall(CallingConv::ID CC,
                                                     bool IsVarArg) const {}

CCAssignFn *
AArch64TargetLowering::CCAssignFnForReturn(CallingConv::ID CC) const {}

static bool isPassedInFPR(EVT VT) {}

SDValue AArch64TargetLowering::LowerFormalArguments(
    SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
    const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL,
    SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {}

void AArch64TargetLowering::saveVarArgRegisters(CCState &CCInfo,
                                                SelectionDAG &DAG,
                                                const SDLoc &DL,
                                                SDValue &Chain) const {}

/// LowerCallResult - Lower the result values of a call into the
/// appropriate copies out of appropriate physical registers.
SDValue AArch64TargetLowering::LowerCallResult(
    SDValue Chain, SDValue InGlue, CallingConv::ID CallConv, bool isVarArg,
    const SmallVectorImpl<CCValAssign> &RVLocs, const SDLoc &DL,
    SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals, bool isThisReturn,
    SDValue ThisVal, bool RequiresSMChange) const {}

/// Return true if the calling convention is one that we can guarantee TCO for.
static bool canGuaranteeTCO(CallingConv::ID CC, bool GuaranteeTailCalls) {}

/// Return true if we might ever do TCO for calls with this calling convention.
static bool mayTailCallThisCC(CallingConv::ID CC) {}

/// Return true if the call convention supports varargs
/// Currently only those that pass varargs like the C
/// calling convention does are eligible
/// Calling conventions listed in this function must also
/// be properly handled in AArch64Subtarget::isCallingConvWin64
static bool callConvSupportsVarArgs(CallingConv::ID CC) {}

static void analyzeCallOperands(const AArch64TargetLowering &TLI,
                                const AArch64Subtarget *Subtarget,
                                const TargetLowering::CallLoweringInfo &CLI,
                                CCState &CCInfo) {}

bool AArch64TargetLowering::isEligibleForTailCallOptimization(
    const CallLoweringInfo &CLI) const {}

SDValue AArch64TargetLowering::addTokenForArgument(SDValue Chain,
                                                   SelectionDAG &DAG,
                                                   MachineFrameInfo &MFI,
                                                   int ClobberedFI) const {}

bool AArch64TargetLowering::DoesCalleeRestoreStack(CallingConv::ID CallCC,
                                                   bool TailCallOpt) const {}

// Check if the value is zero-extended from i1 to i8
static bool checkZExtBool(SDValue Arg, const SelectionDAG &DAG) {}

void AArch64TargetLowering::AdjustInstrPostInstrSelection(MachineInstr &MI,
                                                          SDNode *Node) const {}

SDValue AArch64TargetLowering::changeStreamingMode(SelectionDAG &DAG, SDLoc DL,
                                                   bool Enable, SDValue Chain,
                                                   SDValue InGlue,
                                                   unsigned Condition,
                                                   SDValue PStateSM) const {}

static unsigned getSMCondition(const SMEAttrs &CallerAttrs,
                               const SMEAttrs &CalleeAttrs) {}

/// LowerCall - Lower a call to a callseq_start + CALL + callseq_end chain,
/// and add input and output parameter nodes.
SDValue
AArch64TargetLowering::LowerCall(CallLoweringInfo &CLI,
                                 SmallVectorImpl<SDValue> &InVals) const {}

bool AArch64TargetLowering::CanLowerReturn(
    CallingConv::ID CallConv, MachineFunction &MF, bool isVarArg,
    const SmallVectorImpl<ISD::OutputArg> &Outs, LLVMContext &Context) const {}

SDValue
AArch64TargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
                                   bool isVarArg,
                                   const SmallVectorImpl<ISD::OutputArg> &Outs,
                                   const SmallVectorImpl<SDValue> &OutVals,
                                   const SDLoc &DL, SelectionDAG &DAG) const {}

//===----------------------------------------------------------------------===//
//  Other Lowering Code
//===----------------------------------------------------------------------===//

SDValue AArch64TargetLowering::getTargetNode(GlobalAddressSDNode *N, EVT Ty,
                                             SelectionDAG &DAG,
                                             unsigned Flag) const {}

SDValue AArch64TargetLowering::getTargetNode(JumpTableSDNode *N, EVT Ty,
                                             SelectionDAG &DAG,
                                             unsigned Flag) const {}

SDValue AArch64TargetLowering::getTargetNode(ConstantPoolSDNode *N, EVT Ty,
                                             SelectionDAG &DAG,
                                             unsigned Flag) const {}

SDValue AArch64TargetLowering::getTargetNode(BlockAddressSDNode* N, EVT Ty,
                                             SelectionDAG &DAG,
                                             unsigned Flag) const {}

SDValue AArch64TargetLowering::getTargetNode(ExternalSymbolSDNode *N, EVT Ty,
                                             SelectionDAG &DAG,
                                             unsigned Flag) const {}

// (loadGOT sym)
template <class NodeTy>
SDValue AArch64TargetLowering::getGOT(NodeTy *N, SelectionDAG &DAG,
                                      unsigned Flags) const {}

// (wrapper %highest(sym), %higher(sym), %hi(sym), %lo(sym))
template <class NodeTy>
SDValue AArch64TargetLowering::getAddrLarge(NodeTy *N, SelectionDAG &DAG,
                                            unsigned Flags) const {}

// (addlow (adrp %hi(sym)) %lo(sym))
template <class NodeTy>
SDValue AArch64TargetLowering::getAddr(NodeTy *N, SelectionDAG &DAG,
                                       unsigned Flags) const {}

// (adr sym)
template <class NodeTy>
SDValue AArch64TargetLowering::getAddrTiny(NodeTy *N, SelectionDAG &DAG,
                                           unsigned Flags) const {}

SDValue AArch64TargetLowering::LowerGlobalAddress(SDValue Op,
                                                  SelectionDAG &DAG) const {}

/// Convert a TLS address reference into the correct sequence of loads
/// and calls to compute the variable's address (for Darwin, currently) and
/// return an SDValue containing the final node.

/// Darwin only has one TLS scheme which must be capable of dealing with the
/// fully general situation, in the worst case. This means:
///     + "extern __thread" declaration.
///     + Defined in a possibly unknown dynamic library.
///
/// The general system is that each __thread variable has a [3 x i64] descriptor
/// which contains information used by the runtime to calculate the address. The
/// only part of this the compiler needs to know about is the first xword, which
/// contains a function pointer that must be called with the address of the
/// entire descriptor in "x0".
///
/// Since this descriptor may be in a different unit, in general even the
/// descriptor must be accessed via an indirect load. The "ideal" code sequence
/// is:
///     adrp x0, _var@TLVPPAGE
///     ldr x0, [x0, _var@TLVPPAGEOFF]   ; x0 now contains address of descriptor
///     ldr x1, [x0]                     ; x1 contains 1st entry of descriptor,
///                                      ; the function pointer
///     blr x1                           ; Uses descriptor address in x0
///     ; Address of _var is now in x0.
///
/// If the address of _var's descriptor *is* known to the linker, then it can
/// change the first "ldr" instruction to an appropriate "add x0, x0, #imm" for
/// a slight efficiency gain.
SDValue
AArch64TargetLowering::LowerDarwinGlobalTLSAddress(SDValue Op,
                                                   SelectionDAG &DAG) const {}

/// Convert a thread-local variable reference into a sequence of instructions to
/// compute the variable's address for the local exec TLS model of ELF targets.
/// The sequence depends on the maximum TLS area size.
SDValue AArch64TargetLowering::LowerELFTLSLocalExec(const GlobalValue *GV,
                                                    SDValue ThreadBase,
                                                    const SDLoc &DL,
                                                    SelectionDAG &DAG) const {}

/// When accessing thread-local variables under either the general-dynamic or
/// local-dynamic system, we make a "TLS-descriptor" call. The variable will
/// have a descriptor, accessible via a PC-relative ADRP, and whose first entry
/// is a function pointer to carry out the resolution.
///
/// The sequence is:
///    adrp  x0, :tlsdesc:var
///    ldr   x1, [x0, #:tlsdesc_lo12:var]
///    add   x0, x0, #:tlsdesc_lo12:var
///    .tlsdesccall var
///    blr   x1
///    (TPIDR_EL0 offset now in x0)
///
///  The above sequence must be produced unscheduled, to enable the linker to
///  optimize/relax this sequence.
///  Therefore, a pseudo-instruction (TLSDESC_CALLSEQ) is used to represent the
///  above sequence, and expanded really late in the compilation flow, to ensure
///  the sequence is produced as per above.
SDValue AArch64TargetLowering::LowerELFTLSDescCallSeq(SDValue SymAddr,
                                                      const SDLoc &DL,
                                                      SelectionDAG &DAG) const {}

SDValue
AArch64TargetLowering::LowerELFGlobalTLSAddress(SDValue Op,
                                                SelectionDAG &DAG) const {}

SDValue
AArch64TargetLowering::LowerWindowsGlobalTLSAddress(SDValue Op,
                                                    SelectionDAG &DAG) const {}

SDValue AArch64TargetLowering::LowerGlobalTLSAddress(SDValue Op,
                                                     SelectionDAG &DAG) const {}

//===----------------------------------------------------------------------===//
//                      PtrAuthGlobalAddress lowering
//
// We have 3 lowering alternatives to choose from:
// - MOVaddrPAC: similar to MOVaddr, with added PAC.
//   If the GV doesn't need a GOT load (i.e., is locally defined)
//   materialize the pointer using adrp+add+pac. See LowerMOVaddrPAC.
//
// - LOADgotPAC: similar to LOADgot, with added PAC.
//   If the GV needs a GOT load, materialize the pointer using the usual
//   GOT adrp+ldr, +pac. Pointers in GOT are assumed to be not signed, the GOT
//   section is assumed to be read-only (for example, via relro mechanism). See
//   LowerMOVaddrPAC.
//
// - LOADauthptrstatic: similar to LOADgot, but use a
//   special stub slot instead of a GOT slot.
//   Load a signed pointer for symbol 'sym' from a stub slot named
//   'sym$auth_ptr$key$disc' filled by dynamic linker during relocation
//   resolving. This usually lowers to adrp+ldr, but also emits an entry into
//   .data with an @AUTH relocation. See LowerLOADauthptrstatic.
//
// All 3 are pseudos that are expand late to longer sequences: this lets us
// provide integrity guarantees on the to-be-signed intermediate values.
//
// LOADauthptrstatic is undesirable because it requires a large section filled
// with often similarly-signed pointers, making it a good harvesting target.
// Thus, it's only used for ptrauth references to extern_weak to avoid null
// checks.

SDValue AArch64TargetLowering::LowerPtrAuthGlobalAddressStatically(
    SDValue TGA, SDLoc DL, EVT VT, AArch64PACKey::ID KeyC,
    SDValue Discriminator, SDValue AddrDiscriminator, SelectionDAG &DAG) const {}

SDValue
AArch64TargetLowering::LowerPtrAuthGlobalAddress(SDValue Op,
                                                 SelectionDAG &DAG) const {}

// Looks through \param Val to determine the bit that can be used to
// check the sign of the value. It returns the unextended value and
// the sign bit position.
std::pair<SDValue, uint64_t> lookThroughSignExtension(SDValue Val) {}

SDValue AArch64TargetLowering::LowerBR_CC(SDValue Op, SelectionDAG &DAG) const {}

SDValue AArch64TargetLowering::LowerFCOPYSIGN(SDValue Op,
                                              SelectionDAG &DAG) const {}

SDValue AArch64TargetLowering::LowerCTPOP_PARITY(SDValue Op,
                                                 SelectionDAG &DAG) const {}

SDValue AArch64TargetLowering::LowerCTTZ(SDValue Op, SelectionDAG &DAG) const {}

SDValue AArch64TargetLowering::LowerMinMax(SDValue Op,
                                           SelectionDAG &DAG) const {}

SDValue AArch64TargetLowering::LowerBitreverse(SDValue Op,
                                               SelectionDAG &DAG) const {}

// Check whether the continuous comparison sequence.
static bool
isOrXorChain(SDValue N, unsigned &Num,
             SmallVector<std::pair<SDValue, SDValue>, 16> &WorkList) {}

// Transform chains of ORs and XORs, which usually outlined by memcmp/bmp.
static SDValue performOrXorChainCombine(SDNode *N, SelectionDAG &DAG) {}

SDValue AArch64TargetLowering::LowerSETCC(SDValue Op, SelectionDAG &DAG) const {}

SDValue AArch64TargetLowering::LowerSETCCCARRY(SDValue Op,
                                               SelectionDAG &DAG) const {}

SDValue AArch64TargetLowering::LowerSELECT_CC(ISD::CondCode CC, SDValue LHS,
                                              SDValue RHS, SDValue TVal,
                                              SDValue FVal, const SDLoc &dl,
                                              SelectionDAG &DAG) const {}

SDValue AArch64TargetLowering::LowerVECTOR_SPLICE(SDValue Op,
                                                  SelectionDAG &DAG) const {}

SDValue AArch64TargetLowering::LowerSELECT_CC(SDValue Op,
                                              SelectionDAG &DAG) const {}

SDValue AArch64TargetLowering::LowerSELECT(SDValue Op,
                                           SelectionDAG &DAG) const {}

SDValue AArch64TargetLowering::LowerJumpTable(SDValue Op,
                                              SelectionDAG &DAG) const {}

SDValue AArch64TargetLowering::LowerBR_JT(SDValue Op,
                                          SelectionDAG &DAG) const {}

SDValue AArch64TargetLowering::LowerBRIND(SDValue Op, SelectionDAG &DAG) const {}

SDValue AArch64TargetLowering::LowerConstantPool(SDValue Op,
                                                 SelectionDAG &DAG) const {}

SDValue AArch64TargetLowering::LowerBlockAddress(SDValue Op,
                                               SelectionDAG &DAG) const {}

SDValue AArch64TargetLowering::LowerDarwin_VASTART(SDValue Op,
                                                 SelectionDAG &DAG) const {}

SDValue AArch64TargetLowering::LowerWin64_VASTART(SDValue Op,
                                                  SelectionDAG &DAG) const {}

SDValue AArch64TargetLowering::LowerAAPCS_VASTART(SDValue Op,
                                                  SelectionDAG &DAG) const {}

SDValue AArch64TargetLowering::LowerVASTART(SDValue Op,
                                            SelectionDAG &DAG) const {}

SDValue AArch64TargetLowering::LowerVACOPY(SDValue Op,
                                           SelectionDAG &DAG) const {}

SDValue AArch64TargetLowering::LowerVAARG(SDValue Op, SelectionDAG &DAG) const {}

SDValue AArch64TargetLowering::LowerFRAMEADDR(SDValue Op,
                                              SelectionDAG &DAG) const {}

SDValue AArch64TargetLowering::LowerSPONENTRY(SDValue Op,
                                              SelectionDAG &DAG) const {}

#define GET_REGISTER_MATCHER
#include "AArch64GenAsmMatcher.inc"

// FIXME? Maybe this could be a TableGen attribute on some registers and
// this table could be generated automatically from RegInfo.
Register AArch64TargetLowering::
getRegisterByName(const char* RegName, LLT VT, const MachineFunction &MF) const {}

SDValue AArch64TargetLowering::LowerADDROFRETURNADDR(SDValue Op,
                                                     SelectionDAG &DAG) const {}

SDValue AArch64TargetLowering::LowerRETURNADDR(SDValue Op,
                                               SelectionDAG &DAG) const {}

/// LowerShiftParts - Lower SHL_PARTS/SRA_PARTS/SRL_PARTS, which returns two
/// i32 values and take a 2 x i32 value to shift plus a shift amount.
SDValue AArch64TargetLowering::LowerShiftParts(SDValue Op,
                                               SelectionDAG &DAG) const {}

bool AArch64TargetLowering::isOffsetFoldingLegal(
    const GlobalAddressSDNode *GA) const {}

bool AArch64TargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT,
                                         bool OptForSize) const {}

//===----------------------------------------------------------------------===//
//                          AArch64 Optimization Hooks
//===----------------------------------------------------------------------===//

static SDValue getEstimate(const AArch64Subtarget *ST, unsigned Opcode,
                           SDValue Operand, SelectionDAG &DAG,
                           int &ExtraSteps) {}

SDValue
AArch64TargetLowering::getSqrtInputTest(SDValue Op, SelectionDAG &DAG,
                                        const DenormalMode &Mode) const {}

SDValue
AArch64TargetLowering::getSqrtResultForDenormInput(SDValue Op,
                                                   SelectionDAG &DAG) const {}

SDValue AArch64TargetLowering::getSqrtEstimate(SDValue Operand,
                                               SelectionDAG &DAG, int Enabled,
                                               int &ExtraSteps,
                                               bool &UseOneConst,
                                               bool Reciprocal) const {}

SDValue AArch64TargetLowering::getRecipEstimate(SDValue Operand,
                                                SelectionDAG &DAG, int Enabled,
                                                int &ExtraSteps) const {}

//===----------------------------------------------------------------------===//
//                          AArch64 Inline Assembly Support
//===----------------------------------------------------------------------===//

// Table of Constraints
// TODO: This is the current set of constraints supported by ARM for the
// compiler, not all of them may make sense.
//
// r - A general register
// w - An FP/SIMD register of some size in the range v0-v31
// x - An FP/SIMD register of some size in the range v0-v15
// I - Constant that can be used with an ADD instruction
// J - Constant that can be used with a SUB instruction
// K - Constant that can be used with a 32-bit logical instruction
// L - Constant that can be used with a 64-bit logical instruction
// M - Constant that can be used as a 32-bit MOV immediate
// N - Constant that can be used as a 64-bit MOV immediate
// Q - A memory reference with base register and no offset
// S - A symbolic address
// Y - Floating point constant zero
// Z - Integer constant zero
//
//   Note that general register operands will be output using their 64-bit x
// register name, whatever the size of the variable, unless the asm operand
// is prefixed by the %w modifier. Floating-point and SIMD register operands
// will be output with the v prefix unless prefixed by the %b, %h, %s, %d or
// %q modifier.
const char *AArch64TargetLowering::LowerXConstraint(EVT ConstraintVT) const {}

enum class PredicateConstraint {};

static std::optional<PredicateConstraint>
parsePredicateConstraint(StringRef Constraint) {}

static const TargetRegisterClass *
getPredicateRegisterClass(PredicateConstraint Constraint, EVT VT) {}

enum class ReducedGprConstraint {};

static std::optional<ReducedGprConstraint>
parseReducedGprConstraint(StringRef Constraint) {}

static const TargetRegisterClass *
getReducedGprRegisterClass(ReducedGprConstraint Constraint, EVT VT) {}

// The set of cc code supported is from
// https://gcc.gnu.org/onlinedocs/gcc/Extended-Asm.html#Flag-Output-Operands
static AArch64CC::CondCode parseConstraintCode(llvm::StringRef Constraint) {}

/// Helper function to create 'CSET', which is equivalent to 'CSINC <Wd>, WZR,
/// WZR, invert(<cond>)'.
static SDValue getSETCC(AArch64CC::CondCode CC, SDValue NZCV, const SDLoc &DL,
                        SelectionDAG &DAG) {}

// Lower @cc flag output via getSETCC.
SDValue AArch64TargetLowering::LowerAsmOutputForConstraint(
    SDValue &Chain, SDValue &Glue, const SDLoc &DL,
    const AsmOperandInfo &OpInfo, SelectionDAG &DAG) const {}

/// getConstraintType - Given a constraint letter, return the type of
/// constraint it is for this target.
AArch64TargetLowering::ConstraintType
AArch64TargetLowering::getConstraintType(StringRef Constraint) const {}

/// Examine constraint type and operand type and determine a weight value.
/// This object must already have been set up with the operand type
/// and the current alternative constraint selected.
TargetLowering::ConstraintWeight
AArch64TargetLowering::getSingleConstraintMatchWeight(
    AsmOperandInfo &info, const char *constraint) const {}

std::pair<unsigned, const TargetRegisterClass *>
AArch64TargetLowering::getRegForInlineAsmConstraint(
    const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const {}

EVT AArch64TargetLowering::getAsmOperandValueType(const DataLayout &DL,
                                                  llvm::Type *Ty,
                                                  bool AllowUnknown) const {}

/// LowerAsmOperandForConstraint - Lower the specified operand into the Ops
/// vector.  If it is invalid, don't add anything to Ops.
void AArch64TargetLowering::LowerAsmOperandForConstraint(
    SDValue Op, StringRef Constraint, std::vector<SDValue> &Ops,
    SelectionDAG &DAG) const {}

//===----------------------------------------------------------------------===//
//                     AArch64 Advanced SIMD Support
//===----------------------------------------------------------------------===//

/// WidenVector - Given a value in the V64 register class, produce the
/// equivalent value in the V128 register class.
static SDValue WidenVector(SDValue V64Reg, SelectionDAG &DAG) {}

/// getExtFactor - Determine the adjustment factor for the position when
/// generating an "extract from vector registers" instruction.
static unsigned getExtFactor(SDValue &V) {}

// Check if a vector is built from one vector via extracted elements of
// another together with an AND mask, ensuring that all elements fit
// within range. This can be reconstructed using AND and NEON's TBL1.
SDValue ReconstructShuffleWithRuntimeMask(SDValue Op, SelectionDAG &DAG) {}

// Gather data to see if the operation can be modelled as a
// shuffle in combination with VEXTs.
SDValue AArch64TargetLowering::ReconstructShuffle(SDValue Op,
                                                  SelectionDAG &DAG) const {}

// check if an EXT instruction can handle the shuffle mask when the
// vector sources of the shuffle are the same.
static bool isSingletonEXTMask(ArrayRef<int> M, EVT VT, unsigned &Imm) {}

// Detect patterns of a0,a1,a2,a3,b0,b1,b2,b3,c0,c1,c2,c3,d0,d1,d2,d3 from
// v4i32s. This is really a truncate, which we can construct out of (legal)
// concats and truncate nodes.
static SDValue ReconstructTruncateFromBuildVector(SDValue V, SelectionDAG &DAG) {}

/// Check if a vector shuffle corresponds to a DUP instructions with a larger
/// element width than the vector lane type. If that is the case the function
/// returns true and writes the value of the DUP instruction lane operand into
/// DupLaneOp
static bool isWideDUPMask(ArrayRef<int> M, EVT VT, unsigned BlockSize,
                          unsigned &DupLaneOp) {}

// check if an EXT instruction can handle the shuffle mask when the
// vector sources of the shuffle are different.
static bool isEXTMask(ArrayRef<int> M, EVT VT, bool &ReverseEXT,
                      unsigned &Imm) {}

/// isZIP_v_undef_Mask - Special case of isZIPMask for canonical form of
/// "vector_shuffle v, v", i.e., "vector_shuffle v, undef".
/// Mask is e.g., <0, 0, 1, 1> instead of <0, 4, 1, 5>.
static bool isZIP_v_undef_Mask(ArrayRef<int> M, EVT VT, unsigned &WhichResult) {}

/// isUZP_v_undef_Mask - Special case of isUZPMask for canonical form of
/// "vector_shuffle v, v", i.e., "vector_shuffle v, undef".
/// Mask is e.g., <0, 2, 0, 2> instead of <0, 2, 4, 6>,
static bool isUZP_v_undef_Mask(ArrayRef<int> M, EVT VT, unsigned &WhichResult) {}

/// isTRN_v_undef_Mask - Special case of isTRNMask for canonical form of
/// "vector_shuffle v, v", i.e., "vector_shuffle v, undef".
/// Mask is e.g., <0, 0, 2, 2> instead of <0, 4, 2, 6>.
static bool isTRN_v_undef_Mask(ArrayRef<int> M, EVT VT, unsigned &WhichResult) {}

static bool isINSMask(ArrayRef<int> M, int NumInputElements,
                      bool &DstIsLeft, int &Anomaly) {}

static bool isConcatMask(ArrayRef<int> Mask, EVT VT, bool SplitLHS) {}

static SDValue tryFormConcatFromShuffle(SDValue Op, SelectionDAG &DAG) {}

/// GeneratePerfectShuffle - Given an entry in the perfect-shuffle table, emit
/// the specified operations to build the shuffle. ID is the perfect-shuffle
//ID, V1 and V2 are the original shuffle inputs. PFEntry is the Perfect shuffle
//table entry and LHS/RHS are the immediate inputs for this stage of the
//shuffle.
static SDValue GeneratePerfectShuffle(unsigned ID, SDValue V1,
                                      SDValue V2, unsigned PFEntry, SDValue LHS,
                                      SDValue RHS, SelectionDAG &DAG,
                                      const SDLoc &dl) {}

static SDValue GenerateTBL(SDValue Op, ArrayRef<int> ShuffleMask,
                           SelectionDAG &DAG) {}

static unsigned getDUPLANEOp(EVT EltType) {}

static SDValue constructDup(SDValue V, int Lane, SDLoc dl, EVT VT,
                            unsigned Opcode, SelectionDAG &DAG) {}

// Return true if we can get a new shuffle mask by checking the parameter mask
// array to test whether every two adjacent mask values are continuous and
// starting from an even number.
static bool isWideTypeMask(ArrayRef<int> M, EVT VT,
                           SmallVectorImpl<int> &NewMask) {}

// Try to widen element type to get a new mask value for a better permutation
// sequence, so that we can use NEON shuffle instructions, such as zip1/2,
// UZP1/2, TRN1/2, REV, INS, etc.
// For example:
//  shufflevector <4 x i32> %a, <4 x i32> %b,
//                <4 x i32> <i32 6, i32 7, i32 2, i32 3>
// is equivalent to:
//  shufflevector <2 x i64> %a, <2 x i64> %b, <2 x i32> <i32 3, i32 1>
// Finally, we can get:
//  mov     v0.d[0], v1.d[1]
static SDValue tryWidenMaskForShuffle(SDValue Op, SelectionDAG &DAG) {}

// Try to fold shuffle (tbl2, tbl2) into a single tbl4.
static SDValue tryToConvertShuffleOfTbl2ToTbl4(SDValue Op,
                                               ArrayRef<int> ShuffleMask,
                                               SelectionDAG &DAG) {}

// Baseline legalization for ZERO_EXTEND_VECTOR_INREG will blend-in zeros,
// but we don't have an appropriate instruction,
// so custom-lower it as ZIP1-with-zeros.
SDValue
AArch64TargetLowering::LowerZERO_EXTEND_VECTOR_INREG(SDValue Op,
                                                     SelectionDAG &DAG) const {}

SDValue AArch64TargetLowering::LowerVECTOR_SHUFFLE(SDValue Op,
                                                   SelectionDAG &DAG) const {}

SDValue AArch64TargetLowering::LowerSPLAT_VECTOR(SDValue Op,
                                                 SelectionDAG &DAG) const {}

SDValue AArch64TargetLowering::LowerDUPQLane(SDValue Op,
                                             SelectionDAG &DAG) const {}


static bool resolveBuildVector(BuildVectorSDNode *BVN, APInt &CnstBits,
                               APInt &UndefBits) {}

// Try 64-bit splatted SIMD immediate.
static SDValue tryAdvSIMDModImm64(unsigned NewOp, SDValue Op, SelectionDAG &DAG,
                                 const APInt &Bits) {}

// Try 32-bit splatted SIMD immediate.
static SDValue tryAdvSIMDModImm32(unsigned NewOp, SDValue Op, SelectionDAG &DAG,
                                  const APInt &Bits,
                                  const SDValue *LHS = nullptr) {}

// Try 16-bit splatted SIMD immediate.
static SDValue tryAdvSIMDModImm16(unsigned NewOp, SDValue Op, SelectionDAG &DAG,
                                  const APInt &Bits,
                                  const SDValue *LHS = nullptr) {}

// Try 32-bit splatted SIMD immediate with shifted ones.
static SDValue tryAdvSIMDModImm321s(unsigned NewOp, SDValue Op,
                                    SelectionDAG &DAG, const APInt &Bits) {}

// Try 8-bit splatted SIMD immediate.
static SDValue tryAdvSIMDModImm8(unsigned NewOp, SDValue Op, SelectionDAG &DAG,
                                 const APInt &Bits) {}

// Try FP splatted SIMD immediate.
static SDValue tryAdvSIMDModImmFP(unsigned NewOp, SDValue Op, SelectionDAG &DAG,
                                  const APInt &Bits) {}

// Specialized code to quickly find if PotentialBVec is a BuildVector that
// consists of only the same constant int value, returned in reference arg
// ConstVal
static bool isAllConstantBuildVector(const SDValue &PotentialBVec,
                                     uint64_t &ConstVal) {}

static bool isAllInactivePredicate(SDValue N) {}

static bool isAllActivePredicate(SelectionDAG &DAG, SDValue N) {}

// Attempt to form a vector S[LR]I from (or (and X, BvecC1), (lsl Y, C2)),
// to (SLI X, Y, C2), where X and Y have matching vector types, BvecC1 is a
// BUILD_VECTORs with constant element C1, C2 is a constant, and:
//   - for the SLI case: C1 == ~(Ones(ElemSizeInBits) << C2)
//   - for the SRI case: C1 == ~(Ones(ElemSizeInBits) >> C2)
// The (or (lsl Y, C2), (and X, BvecC1)) case is also handled.
static SDValue tryLowerToSLI(SDNode *N, SelectionDAG &DAG) {}

/// Try to lower the construction of a pointer alias mask to a WHILEWR.
/// The mask's enabled lanes represent the elements that will not overlap across
/// one loop iteration. This tries to match:
/// or (splat (setcc_lt (sub ptrA, ptrB), -(element_size - 1))),
///    (get_active_lane_mask 0, (div (sub ptrA, ptrB), element_size))
SDValue tryWhileWRFromOR(SDValue Op, SelectionDAG &DAG,
                         const AArch64Subtarget &Subtarget) {}

SDValue AArch64TargetLowering::LowerVectorOR(SDValue Op,
                                             SelectionDAG &DAG) const {}

// Normalize the operands of BUILD_VECTOR. The value of constant operands will
// be truncated to fit element width.
static SDValue NormalizeBuildVector(SDValue Op,
                                    SelectionDAG &DAG) {}

static SDValue ConstantBuildVector(SDValue Op, SelectionDAG &DAG,
                                   const AArch64Subtarget *ST) {}SDValue AArch64TargetLowering::LowerBUILD_VECTOR(SDValue Op,
                                                 SelectionDAG &DAG) const {}SDValue AArch64TargetLowering::LowerCONCAT_VECTORS(SDValue Op,
                                                   SelectionDAG &DAG) const {}SDValue AArch64TargetLowering::LowerINSERT_VECTOR_ELT(SDValue Op,
                                                      SelectionDAG &DAG) const {}SDValue
AArch64TargetLowering::LowerEXTRACT_VECTOR_ELT(SDValue Op,
                                               SelectionDAG &DAG) const {}SDValue AArch64TargetLowering::LowerEXTRACT_SUBVECTOR(SDValue Op,
                                                      SelectionDAG &DAG) const {}SDValue AArch64TargetLowering::LowerINSERT_SUBVECTOR(SDValue Op,
                                                     SelectionDAG &DAG) const {}static bool isPow2Splat(SDValue Op, uint64_t &SplatVal, bool &Negated) {}SDValue AArch64TargetLowering::LowerDIV(SDValue Op, SelectionDAG &DAG) const {}bool AArch64TargetLowering::shouldExpandBuildVectorWithShuffles(
    EVT VT, unsigned DefinedValues) const {}bool AArch64TargetLowering::isShuffleMaskLegal(ArrayRef<int> M, EVT VT) const {}bool AArch64TargetLowering::isVectorClearMaskLegal(ArrayRef<int> M,
                                                   EVT VT) const {}static bool getVShiftImm(SDValue Op, unsigned ElementBits, int64_t &Cnt) {}static bool isVShiftLImm(SDValue Op, EVT VT, bool isLong, int64_t &Cnt) {}static bool isVShiftRImm(SDValue Op, EVT VT, bool isNarrow, int64_t &Cnt) {}SDValue AArch64TargetLowering::LowerTRUNCATE(SDValue Op,
                                             SelectionDAG &DAG) const {}static bool canLowerSRLToRoundingShiftForVT(SDValue Shift, EVT ResVT,
                                            SelectionDAG &DAG,
                                            unsigned &ShiftValue,
                                            SDValue &RShOperand) {}SDValue AArch64TargetLowering::LowerVectorSRA_SRL_SHL(SDValue Op,
                                                      SelectionDAG &DAG) const {}static SDValue EmitVectorComparison(SDValue LHS, SDValue RHS,
                                    AArch64CC::CondCode CC, bool NoNans, EVT VT,
                                    const SDLoc &dl, SelectionDAG &DAG) {}SDValue AArch64TargetLowering::LowerVSETCC(SDValue Op,
                                           SelectionDAG &DAG) const {}static SDValue getReductionSDNode(unsigned Op, SDLoc DL, SDValue ScalarOp,
                                  SelectionDAG &DAG) {}static SDValue getVectorBitwiseReduce(unsigned Opcode, SDValue Vec, EVT VT,
                                      SDLoc DL, SelectionDAG &DAG) {}SDValue AArch64TargetLowering::LowerVECREDUCE(SDValue Op,
                                              SelectionDAG &DAG) const {}SDValue AArch64TargetLowering::LowerATOMIC_LOAD_AND(SDValue Op,
                                                    SelectionDAG &DAG) const {}SDValue
AArch64TargetLowering::LowerWindowsDYNAMIC_STACKALLOC(SDValue Op,
                                                      SelectionDAG &DAG) const {}SDValue
AArch64TargetLowering::LowerInlineDYNAMIC_STACKALLOC(SDValue Op,
                                                     SelectionDAG &DAG) const {}SDValue
AArch64TargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op,
                                               SelectionDAG &DAG) const {}SDValue AArch64TargetLowering::LowerAVG(SDValue Op, SelectionDAG &DAG,
                                        unsigned NewOp) const {}SDValue AArch64TargetLowering::LowerVSCALE(SDValue Op,
                                           SelectionDAG &DAG) const {}template <unsigned NumVecs>
static bool
setInfoSVEStN(const AArch64TargetLowering &TLI, const DataLayout &DL,
              AArch64TargetLowering::IntrinsicInfo &Info, const CallInst &CI) {}bool AArch64TargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
                                               const CallInst &I,
                                               MachineFunction &MF,
                                               unsigned Intrinsic) const {}bool AArch64TargetLowering::shouldReduceLoadWidth(SDNode *Load,
                                                  ISD::LoadExtType ExtTy,
                                                  EVT NewVT) const {}bool AArch64TargetLowering::shouldRemoveRedundantExtend(SDValue Extend) const {}bool AArch64TargetLowering::isTruncateFree(Type *Ty1, Type *Ty2) const {}bool AArch64TargetLowering::isTruncateFree(EVT VT1, EVT VT2) const {}bool AArch64TargetLowering::isProfitableToHoist(Instruction *I) const {}bool AArch64TargetLowering::isZExtFree(Type *Ty1, Type *Ty2) const {}bool AArch64TargetLowering::isZExtFree(EVT VT1, EVT VT2) const {}bool AArch64TargetLowering::isZExtFree(SDValue Val, EVT VT2) const {}bool AArch64TargetLowering::isExtFreeImpl(const Instruction *Ext) const {}static bool isSplatShuffle(Value *V) {}static bool areExtractShuffleVectors(Value *Op1, Value *Op2,
                                     bool AllowSplat = false) {}static bool areExtractExts(Value *Ext1, Value *Ext2) {}static bool isOperandOfVmullHighP64(Value *Op) {}static bool areOperandsOfVmullHighP64(Value *Op1, Value *Op2) {}static bool shouldSinkVectorOfPtrs(Value *Ptrs, SmallVectorImpl<Use *> &Ops) {}static bool shouldSinkVScale(Value *Op, SmallVectorImpl<Use *> &Ops) {}bool AArch64TargetLowering::shouldSinkOperands(
    Instruction *I, SmallVectorImpl<Use *> &Ops) const {}static bool createTblShuffleMask(unsigned SrcWidth, unsigned DstWidth,
                                 unsigned NumElts, bool IsLittleEndian,
                                 SmallVectorImpl<int> &Mask) {}static Value *createTblShuffleForZExt(IRBuilderBase &Builder, Value *Op,
                                      FixedVectorType *ZExtTy,
                                      FixedVectorType *DstTy,
                                      bool IsLittleEndian) {}static Value *createTblShuffleForSExt(IRBuilderBase &Builder, Value *Op,
                                      FixedVectorType *DstTy,
                                      bool IsLittleEndian) {}static void createTblForTrunc(TruncInst *TI, bool IsLittleEndian) {}bool AArch64TargetLowering::optimizeExtendOrTruncateConversion(
    Instruction *I, Loop *L, const TargetTransformInfo &TTI) const {}bool AArch64TargetLowering::hasPairedLoad(EVT LoadedType,
                                          Align &RequiredAligment) const {}unsigned AArch64TargetLowering::getNumInterleavedAccesses(
    VectorType *VecTy, const DataLayout &DL, bool UseScalable) const {}MachineMemOperand::Flags
AArch64TargetLowering::getTargetMMOFlags(const Instruction &I) const {}bool AArch64TargetLowering::isLegalInterleavedAccessType(
    VectorType *VecTy, const DataLayout &DL, bool &UseScalable) const {}static ScalableVectorType *getSVEContainerIRType(FixedVectorType *VTy) {}static Function *getStructuredLoadFunction(Module *M, unsigned Factor,
                                           bool Scalable, Type *LDVTy,
                                           Type *PtrTy) {}static Function *getStructuredStoreFunction(Module *M, unsigned Factor,
                                            bool Scalable, Type *STVTy,
                                            Type *PtrTy) {}bool AArch64TargetLowering::lowerInterleavedLoad(
    LoadInst *LI, ArrayRef<ShuffleVectorInst *> Shuffles,
    ArrayRef<unsigned> Indices, unsigned Factor) const {}template <typename Iter>
bool hasNearbyPairedStore(Iter It, Iter End, Value *Ptr, const DataLayout &DL) {}bool AArch64TargetLowering::lowerInterleavedStore(StoreInst *SI,
                                                  ShuffleVectorInst *SVI,
                                                  unsigned Factor) const {}bool getDeinterleave2Values(
    Value *DI, SmallVectorImpl<Instruction *> &DeinterleavedValues,
    SmallVectorImpl<Instruction *> &DeInterleaveDeadInsts) {}bool getDeinterleave4Values(
    Value *DI, SmallVectorImpl<Instruction *> &DeinterleavedValues,
    SmallVectorImpl<Instruction *> &DeInterleaveDeadInsts) {}bool getDeinterleavedValues(
    Value *DI, SmallVectorImpl<Instruction *> &DeinterleavedValues,
    SmallVectorImpl<Instruction *> &DeInterleaveDeadInsts) {}bool AArch64TargetLowering::lowerDeinterleaveIntrinsicToLoad(
    IntrinsicInst *DI, LoadInst *LI,
    SmallVectorImpl<Instruction *> &DeadInsts) const {}bool getValuesToInterleave(
    Value *II, SmallVectorImpl<Value *> &InterleavedValues,
    SmallVectorImpl<Instruction *> &InterleaveDeadInsts) {}bool AArch64TargetLowering::lowerInterleaveIntrinsicToStore(
    IntrinsicInst *II, StoreInst *SI,
    SmallVectorImpl<Instruction *> &DeadInsts) const {}EVT AArch64TargetLowering::getOptimalMemOpType(
    const MemOp &Op, const AttributeList &FuncAttributes) const {}LLT AArch64TargetLowering::getOptimalMemOpLLT(
    const MemOp &Op, const AttributeList &FuncAttributes) const {}bool AArch64TargetLowering::isLegalAddImmediate(int64_t Immed) const {}bool AArch64TargetLowering::isLegalAddScalableImmediate(int64_t Imm) const {}bool AArch64TargetLowering::isMulAddWithConstProfitable(
    SDValue AddNode, SDValue ConstNode) const {}bool AArch64TargetLowering::isLegalICmpImmediate(int64_t Immed) const {}bool AArch64TargetLowering::isLegalAddressingMode(const DataLayout &DL,
                                                  const AddrMode &AMode, Type *Ty,
                                                  unsigned AS, Instruction *I) const {}int64_t
AArch64TargetLowering::getPreferredLargeGEPBaseOffset(int64_t MinOffset,
                                                      int64_t MaxOffset) const {}bool AArch64TargetLowering::shouldConsiderGEPOffsetSplit() const {}bool AArch64TargetLowering::isFMAFasterThanFMulAndFAdd(
    const MachineFunction &MF, EVT VT) const {}bool AArch64TargetLowering::isFMAFasterThanFMulAndFAdd(const Function &F,
                                                       Type *Ty) const {}bool AArch64TargetLowering::generateFMAsInMachineCombiner(
    EVT VT, CodeGenOptLevel OptLevel) const {}const MCPhysReg *
AArch64TargetLowering::getScratchRegisters(CallingConv::ID) const {}ArrayRef<MCPhysReg> AArch64TargetLowering::getRoundingControlRegisters() const {}bool
AArch64TargetLowering::isDesirableToCommuteWithShift(const SDNode *N,
                                                     CombineLevel Level) const {}bool AArch64TargetLowering::isDesirableToCommuteXorWithShift(
    const SDNode *N) const {}bool AArch64TargetLowering::shouldFoldConstantShiftPairToMask(
    const SDNode *N, CombineLevel Level) const {}bool AArch64TargetLowering::shouldFoldSelectWithIdentityConstant(
    unsigned BinOpcode, EVT VT) const {}bool AArch64TargetLowering::shouldConvertConstantLoadToIntImm(const APInt &Imm,
                                                              Type *Ty) const {}bool AArch64TargetLowering::isExtractSubvectorCheap(EVT ResVT, EVT SrcVT,
                                                    unsigned Index) const {}static SDValue foldVectorXorShiftIntoCmp(SDNode *N, SelectionDAG &DAG,
                                         const AArch64Subtarget *Subtarget) {}static SDValue performVecReduceAddCombineWithUADDLP(SDNode *N,
                                                    SelectionDAG &DAG) {}static SDValue performVecReduceAddCombine(SDNode *N, SelectionDAG &DAG,
                                          const AArch64Subtarget *ST) {}static SDValue performUADDVAddCombine(SDValue A, SelectionDAG &DAG) {}static SDValue performUADDVZextCombine(SDValue A, SelectionDAG &DAG) {}static SDValue performUADDVCombine(SDNode *N, SelectionDAG &DAG) {}static SDValue performXorCombine(SDNode *N, SelectionDAG &DAG,
                                 TargetLowering::DAGCombinerInfo &DCI,
                                 const AArch64Subtarget *Subtarget) {}SDValue
AArch64TargetLowering::BuildSDIVPow2(SDNode *N, const APInt &Divisor,
                                     SelectionDAG &DAG,
                                     SmallVectorImpl<SDNode *> &Created) const {}SDValue
AArch64TargetLowering::BuildSREMPow2(SDNode *N, const APInt &Divisor,
                                     SelectionDAG &DAG,
                                     SmallVectorImpl<SDNode *> &Created) const {}static std::optional<unsigned> IsSVECntIntrinsic(SDValue S) {}static EVT calculatePreExtendType(SDValue Extend) {}static SDValue performBuildShuffleExtendCombine(SDValue BV, SelectionDAG &DAG) {}static SDValue performMulVectorExtendCombine(SDNode *Mul, SelectionDAG &DAG) {}static SDValue performMulVectorCmpZeroCombine(SDNode *N, SelectionDAG &DAG) {}static SDValue performVectorExtCombine(SDNode *N, SelectionDAG &DAG) {}static SDValue performMulCombine(SDNode *N, SelectionDAG &DAG,
                                 TargetLowering::DAGCombinerInfo &DCI,
                                 const AArch64Subtarget *Subtarget) {}static SDValue performVectorCompareAndMaskUnaryOpCombine(SDNode *N,
                                                         SelectionDAG &DAG) {}static SDValue performIntToFpCombine(SDNode *N, SelectionDAG &DAG,
                                     const AArch64Subtarget *Subtarget) {}static SDValue performFpToIntCombine(SDNode *N, SelectionDAG &DAG,
                                     TargetLowering::DAGCombinerInfo &DCI,
                                     const AArch64Subtarget *Subtarget) {}static SDValue tryCombineToBSL(SDNode *N, TargetLowering::DAGCombinerInfo &DCI,
                               const AArch64TargetLowering &TLI) {}static SDValue performANDORCSELCombine(SDNode *N, SelectionDAG &DAG) {}static SDValue performORCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI,
                                const AArch64Subtarget *Subtarget,
                                const AArch64TargetLowering &TLI) {}static bool isConstantSplatVectorMaskForType(SDNode *N, EVT MemVT) {}static SDValue performReinterpretCastCombine(SDNode *N) {}static SDValue performSVEAndCombine(SDNode *N,
                                    TargetLowering::DAGCombinerInfo &DCI) {}static SDValue performANDSETCCCombine(SDNode *N,
                                      TargetLowering::DAGCombinerInfo &DCI) {}static SDValue performANDCombine(SDNode *N,
                                 TargetLowering::DAGCombinerInfo &DCI) {}static SDValue performFADDCombine(SDNode *N,
                                  TargetLowering::DAGCombinerInfo &DCI) {}static bool hasPairwiseAdd(unsigned Opcode, EVT VT, bool FullFP16) {}static SDValue getPTest(SelectionDAG &DAG, EVT VT, SDValue Pg, SDValue Op,
                        AArch64CC::CondCode Cond)static bool isPredicateCCSettingOp(SDValue N) {}static SDValue
performFirstTrueTestVectorCombine(SDNode *N,
                                  TargetLowering::DAGCombinerInfo &DCI,
                                  const AArch64Subtarget *Subtarget) {}static SDValue
performLastTrueTestVectorCombine(SDNode *N,
                                 TargetLowering::DAGCombinerInfo &DCI,
                                 const AArch64Subtarget *Subtarget) {}static SDValue
performExtractVectorEltCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI,
                               const AArch64Subtarget *Subtarget) {}static SDValue performConcatVectorsCombine(SDNode *N,
                                           TargetLowering::DAGCombinerInfo &DCI,
                                           SelectionDAG &DAG) {}static SDValue
performExtractSubvectorCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI,
                               SelectionDAG &DAG) {}static SDValue
performInsertSubvectorCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI,
                              SelectionDAG &DAG) {}static SDValue tryCombineFixedPointConvert(SDNode *N,
                                           TargetLowering::DAGCombinerInfo &DCI,
                                           SelectionDAG &DAG) {}static SDValue tryExtendDUPToExtractHigh(SDValue N, SelectionDAG &DAG) {}static bool isEssentiallyExtractHighSubvector(SDValue N) {}struct GenericSetCCInfo {}struct AArch64SetCCInfo {}SetCCInfostruct SetCCInfoAndKind {}static bool isSetCC(SDValue Op, SetCCInfoAndKind &SetCCInfo) {}static bool isSetCCOrZExtSetCC(const SDValue& Op, SetCCInfoAndKind &Info) {}static SDValue performSetccAddFolding(SDNode *Op, SelectionDAG &DAG) {}static SDValue performAddUADDVCombine(SDNode *N, SelectionDAG &DAG) {}static SDValue performAddCSelIntoCSinc(SDNode *N, SelectionDAG &DAG) {}static SDValue performAddDotCombine(SDNode *N, SelectionDAG &DAG) {}static bool isNegatedInteger(SDValue Op) {}static SDValue getNegatedInteger(SDValue Op, SelectionDAG &DAG) {}static SDValue performNegCSelCombine(SDNode *N, SelectionDAG &DAG) {}static SDValue performAddSubLongCombine(SDNode *N,
                                        TargetLowering::DAGCombinerInfo &DCI) {}static bool isCMP(SDValue Op) {}static std::optional<AArch64CC::CondCode> getCSETCondCode(SDValue Op) {}static SDValue foldOverflowCheck(SDNode *Op, SelectionDAG &DAG, bool IsAdd) {}static SDValue foldADCToCINC(SDNode *N, SelectionDAG &DAG) {}static SDValue performBuildVectorCombine(SDNode *N,
                                         TargetLowering::DAGCombinerInfo &DCI,
                                         SelectionDAG &DAG) {}static SDValue performTruncateCombine(SDNode *N,
                                      SelectionDAG &DAG) {}static bool isExtendOrShiftOperand(SDValue N) {}static SDValue performAddCombineSubShift(SDNode *N, SDValue SUB, SDValue Z,
                                         SelectionDAG &DAG) {}static SDValue performAddCombineForShiftedOperands(SDNode *N,
                                                   SelectionDAG &DAG) {}static SDValue performSubAddMULCombine(SDNode *N, SelectionDAG &DAG) {}static SDValue
performSVEMulAddSubCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI) {}static SDValue performAddSubIntoVectorOp(SDNode *N, SelectionDAG &DAG) {}static bool isLoadOrMultipleLoads(SDValue B, SmallVector<LoadSDNode *> &Loads) {}static bool areLoadedOffsetButOtherwiseSame(SDValue Op0, SDValue Op1,
                                            SelectionDAG &DAG,
                                            unsigned &NumSubLoads) {}static SDValue performExtBinopLoadFold(SDNode *N, SelectionDAG &DAG) {}static SDValue performAddSubCombine(SDNode *N,
                                    TargetLowering::DAGCombinerInfo &DCI) {}static SDValue tryCombineLongOpWithDup(unsigned IID, SDNode *N,
                                       TargetLowering::DAGCombinerInfo &DCI,
                                       SelectionDAG &DAG) {}static SDValue tryCombineShiftImm(unsigned IID, SDNode *N, SelectionDAG &DAG) {}static SDValue tryCombineCRC32(unsigned Mask, SDNode *N, SelectionDAG &DAG) {}static SDValue combineAcrossLanesIntrinsic(unsigned Opc, SDNode *N,
                                           SelectionDAG &DAG) {}static SDValue LowerSVEIntrinsicIndex(SDNode *N, SelectionDAG &DAG) {}static SDValue LowerSVEIntrinsicDUP(SDNode *N, SelectionDAG &DAG) {}static SDValue LowerSVEIntrinsicEXT(SDNode *N, SelectionDAG &DAG) {}static SDValue tryConvertSVEWideCompare(SDNode *N, ISD::CondCode CC,
                                        TargetLowering::DAGCombinerInfo &DCI,
                                        SelectionDAG &DAG) {}static SDValue getPTest(SelectionDAG &DAG, EVT VT, SDValue Pg, SDValue Op,
                        AArch64CC::CondCode Cond) {}static SDValue combineSVEReductionInt(SDNode *N, unsigned Opc,
                                      SelectionDAG &DAG) {}static SDValue combineSVEReductionFP(SDNode *N, unsigned Opc,
                                     SelectionDAG &DAG) {}static SDValue combineSVEReductionOrderedFP(SDNode *N, unsigned Opc,
                                            SelectionDAG &DAG) {}static SDValue convertMergedOpToPredOp(SDNode *N, unsigned Opc,
                                       SelectionDAG &DAG, bool UnpredOp = false,
                                       bool SwapOperands = false) {}static SDValue tryCombineWhileLo(SDNode *N,
                                 TargetLowering::DAGCombinerInfo &DCI,
                                 const AArch64Subtarget *Subtarget) {}SDValue tryLowerPartialReductionToDot(SDNode *N,
                                      const AArch64Subtarget *Subtarget,
                                      SelectionDAG &DAG) {}static SDValue performIntrinsicCombine(SDNode *N,
                                       TargetLowering::DAGCombinerInfo &DCI,
                                       const AArch64Subtarget *Subtarget) {}static bool isCheapToExtend(const SDValue &N) {}static SDValue
performSignExtendSetCCCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI,
                              SelectionDAG &DAG) {}static SDValue performZExtDeinterleaveShuffleCombine(SDNode *N,
                                                     SelectionDAG &DAG) {}static SDValue performZExtUZPCombine(SDNode *N, SelectionDAG &DAG) {}static SDValue performExtendCombine(SDNode *N,
                                    TargetLowering::DAGCombinerInfo &DCI,
                                    SelectionDAG &DAG) {}static SDValue splitStoreSplat(SelectionDAG &DAG, StoreSDNode &St,
                               SDValue SplatVal, unsigned NumVecElts) {}static MVT getSVEContainerType(EVT ContentTy) {}static SDValue performLD1Combine(SDNode *N, SelectionDAG &DAG, unsigned Opc) {}static SDValue performLDNT1Combine(SDNode *N, SelectionDAG &DAG) {}template <unsigned Opcode>
static SDValue performLD1ReplicateCombine(SDNode *N, SelectionDAG &DAG) {}static SDValue performST1Combine(SDNode *N, SelectionDAG &DAG) {}static SDValue performSTNT1Combine(SDNode *N, SelectionDAG &DAG) {}static SDValue replaceZeroVectorStore(SelectionDAG &DAG, StoreSDNode &St) {}static SDValue replaceSplatVectorStore(SelectionDAG &DAG, StoreSDNode &St) {}static SDValue splitStores(SDNode *N, TargetLowering::DAGCombinerInfo &DCI,
                           SelectionDAG &DAG,
                           const AArch64Subtarget *Subtarget) {}static SDValue performSpliceCombine(SDNode *N, SelectionDAG &DAG) {}static SDValue performUnpackCombine(SDNode *N, SelectionDAG &DAG,
                                    const AArch64Subtarget *Subtarget) {}static bool isHalvingTruncateAndConcatOfLegalIntScalableType(SDNode *N) {}static SDValue tryCombineExtendRShTrunc(SDNode *N, SelectionDAG &DAG) {}static SDValue trySimplifySrlAddToRshrnb(SDValue Srl, SelectionDAG &DAG,
                                         const AArch64Subtarget *Subtarget) {}static SDValue isNVCastToHalfWidthElements(SDValue V) {}static SDValue performUzpCombine(SDNode *N, SelectionDAG &DAG,
                                 const AArch64Subtarget *Subtarget) {}static SDValue performGLD1Combine(SDNode *N, SelectionDAG &DAG) {}static SDValue performVectorShiftCombine(SDNode *N,
                                         const AArch64TargetLowering &TLI,
                                         TargetLowering::DAGCombinerInfo &DCI) {}static SDValue performSunpkloCombine(SDNode *N, SelectionDAG &DAG) {}static SDValue performPostLD1Combine(SDNode *N,
                                     TargetLowering::DAGCombinerInfo &DCI,
                                     bool IsLaneOp) {}static bool performTBISimplification(SDValue Addr,
                                     TargetLowering::DAGCombinerInfo &DCI,
                                     SelectionDAG &DAG) {}static SDValue foldTruncStoreOfExt(SelectionDAG &DAG, SDNode *N) {}static SDValue combineV3I8LoadExt(LoadSDNode *LD, SelectionDAG &DAG) {}static SDValue performLOADCombine(SDNode *N,
                                  TargetLowering::DAGCombinerInfo &DCI,
                                  SelectionDAG &DAG,
                                  const AArch64Subtarget *Subtarget) {}static EVT tryGetOriginalBoolVectorType(SDValue Op, int Depth = 0) {}static SDValue vectorToScalarBitmask(SDNode *N, SelectionDAG &DAG) {}static SDValue combineBoolVectorAndTruncateStore(SelectionDAG &DAG,
                                                 StoreSDNode *Store) {}bool isHalvingTruncateOfLegalScalableType(EVT SrcVT, EVT DstVT) {}static SDValue combineI8TruncStore(StoreSDNode *ST, SelectionDAG &DAG,
                                   const AArch64Subtarget *Subtarget) {}static SDValue performSTORECombine(SDNode *N,
                                   TargetLowering::DAGCombinerInfo &DCI,
                                   SelectionDAG &DAG,
                                   const AArch64Subtarget *Subtarget) {}static SDValue performMSTORECombine(SDNode *N,
                                    TargetLowering::DAGCombinerInfo &DCI,
                                    SelectionDAG &DAG,
                                    const AArch64Subtarget *Subtarget) {}static bool foldIndexIntoBase(SDValue &BasePtr, SDValue &Index, SDValue Scale,
                              SDLoc DL, SelectionDAG &DAG) {}static bool findMoreOptimalIndexType(const MaskedGatherScatterSDNode *N,
                                     SDValue &BasePtr, SDValue &Index,
                                     SelectionDAG &DAG) {}static SDValue performMaskedGatherScatterCombine(
    SDNode *N, TargetLowering::DAGCombinerInfo &DCI, SelectionDAG &DAG) {}static SDValue performNEONPostLDSTCombine(SDNode *N,
                                          TargetLowering::DAGCombinerInfo &DCI,
                                          SelectionDAG &DAG) {}static
bool checkValueWidth(SDValue V, unsigned width, ISD::LoadExtType &ExtType) {}static bool isEquivalentMaskless(unsigned CC, unsigned width,
                                 ISD::LoadExtType ExtType, int AddConstant,
                                 int CompConstant) {}static SDValue performSubsToAndsCombine(SDNode *N, SDNode *SubsNode,
                                        SDNode *AndNode, SelectionDAG &DAG,
                                        unsigned CCIndex, unsigned CmpIndex,
                                        unsigned CC) {}static
SDValue performCONDCombine(SDNode *N,
                           TargetLowering::DAGCombinerInfo &DCI,
                           SelectionDAG &DAG, unsigned CCIndex,
                           unsigned CmpIndex) {}static SDValue performBRCONDCombine(SDNode *N,
                                    TargetLowering::DAGCombinerInfo &DCI,
                                    SelectionDAG &DAG) {}static SDValue foldCSELofCTTZ(SDNode *N, SelectionDAG &DAG) {}static SDValue foldCSELOfCSEL(SDNode *Op, SelectionDAG &DAG) {}static SDValue performCSELCombine(SDNode *N,
                                  TargetLowering::DAGCombinerInfo &DCI,
                                  SelectionDAG &DAG) {}static SDValue tryToWidenSetCCOperands(SDNode *Op, SelectionDAG &DAG) {}static SDValue
performVecReduceBitwiseCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI,
                               SelectionDAG &DAG) {}static SDValue performSETCCCombine(SDNode *N,
                                   TargetLowering::DAGCombinerInfo &DCI,
                                   SelectionDAG &DAG) {}static SDValue performFlagSettingCombine(SDNode *N,
                                         TargetLowering::DAGCombinerInfo &DCI,
                                         unsigned GenericOpcode) {}static SDValue performSetCCPunpkCombine(SDNode *N, SelectionDAG &DAG) {}static SDValue
performSetccMergeZeroCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI) {}static SDValue getTestBitOperand(SDValue Op, unsigned &Bit, bool &Invert,
                                 SelectionDAG &DAG) {}static SDValue performTBZCombine(SDNode *N,
                                 TargetLowering::DAGCombinerInfo &DCI,
                                 SelectionDAG &DAG) {}static SDValue trySwapVSelectOperands(SDNode *N, SelectionDAG &DAG) {}static SDValue performVSelectCombine(SDNode *N, SelectionDAG &DAG) {}static SDValue performSelectCombine(SDNode *N,
                                    TargetLowering::DAGCombinerInfo &DCI) {}static SDValue performDUPCombine(SDNode *N,
                                 TargetLowering::DAGCombinerInfo &DCI) {}static SDValue performNVCASTCombine(SDNode *N, SelectionDAG &DAG) {}static SDValue performGlobalAddressCombine(SDNode *N, SelectionDAG &DAG,
                                           const AArch64Subtarget *Subtarget,
                                           const TargetMachine &TM) {}static SDValue performCTLZCombine(SDNode *N, SelectionDAG &DAG,
                                  const AArch64Subtarget *Subtarget) {}static SDValue getScaledOffsetForBitWidth(SelectionDAG &DAG, SDValue Offset,
                                          SDLoc DL, unsigned BitWidth) {}inline static bool isValidImmForSVEVecImmAddrMode(unsigned OffsetInBytes,
                                                  unsigned ScalarSizeInBytes) {}static bool isValidImmForSVEVecImmAddrMode(SDValue Offset,
                                           unsigned ScalarSizeInBytes) {}static SDValue performScatterStoreCombine(SDNode *N, SelectionDAG &DAG,
                                          unsigned Opcode,
                                          bool OnlyPackedOffsets = true) {}static SDValue performGatherLoadCombine(SDNode *N, SelectionDAG &DAG,
                                        unsigned Opcode,
                                        bool OnlyPackedOffsets = true) {}static SDValue
performSignExtendInRegCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI,
                              SelectionDAG &DAG) {}static SDValue legalizeSVEGatherPrefetchOffsVec(SDNode *N, SelectionDAG &DAG) {}static SDValue combineSVEPrefetchVecBaseImmOff(SDNode *N, SelectionDAG &DAG,
                                               unsigned ScalarSizeInBytes) {}static bool isLanes1toNKnownZero(SDValue Op) {}static SDValue removeRedundantInsertVectorElt(SDNode *N) {}static SDValue
performInsertVectorEltCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI) {}static SDValue performFPExtendCombine(SDNode *N, SelectionDAG &DAG,
                                      TargetLowering::DAGCombinerInfo &DCI,
                                      const AArch64Subtarget *Subtarget) {}static SDValue performBSPExpandForSVE(SDNode *N, SelectionDAG &DAG,
                                      const AArch64Subtarget *Subtarget) {}static SDValue performDupLane128Combine(SDNode *N, SelectionDAG &DAG) {}static SDValue tryCombineMULLWithUZP1(SDNode *N,
                                      TargetLowering::DAGCombinerInfo &DCI,
                                      SelectionDAG &DAG) {}static SDValue performMULLCombine(SDNode *N,
                                  TargetLowering::DAGCombinerInfo &DCI,
                                  SelectionDAG &DAG) {}static SDValue
performScalarToVectorCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI,
                             SelectionDAG &DAG) {}SDValue AArch64TargetLowering::PerformDAGCombine(SDNode *N,
                                                 DAGCombinerInfo &DCI) const {}bool AArch64TargetLowering::isUsedByReturnOnly(SDNode *N,
                                               SDValue &Chain) const {}bool AArch64TargetLowering::mayBeEmittedAsTailCall(const CallInst *CI) const {}bool AArch64TargetLowering::isIndexingLegal(MachineInstr &MI, Register Base,
                                            Register Offset, bool IsPre,
                                            MachineRegisterInfo &MRI) const {}bool AArch64TargetLowering::getIndexedAddressParts(SDNode *N, SDNode *Op,
                                                   SDValue &Base,
                                                   SDValue &Offset,
                                                   SelectionDAG &DAG) const {}bool AArch64TargetLowering::getPreIndexedAddressParts(SDNode *N, SDValue &Base,
                                                      SDValue &Offset,
                                                      ISD::MemIndexedMode &AM,
                                                      SelectionDAG &DAG) const {}bool AArch64TargetLowering::getPostIndexedAddressParts(
    SDNode *N, SDNode *Op, SDValue &Base, SDValue &Offset,
    ISD::MemIndexedMode &AM, SelectionDAG &DAG) const {}static void replaceBoolVectorBitcast(SDNode *N,
                                     SmallVectorImpl<SDValue> &Results,
                                     SelectionDAG &DAG) {}static void CustomNonLegalBITCASTResults(SDNode *N,
                                         SmallVectorImpl<SDValue> &Results,
                                         SelectionDAG &DAG, EVT ExtendVT,
                                         EVT CastVT) {}void AArch64TargetLowering::ReplaceBITCASTResults(
    SDNode *N, SmallVectorImpl<SDValue> &Results, SelectionDAG &DAG) const {}static void ReplaceAddWithADDP(SDNode *N, SmallVectorImpl<SDValue> &Results,
                               SelectionDAG &DAG,
                               const AArch64Subtarget *Subtarget) {}static void ReplaceReductionResults(SDNode *N,
                                    SmallVectorImpl<SDValue> &Results,
                                    SelectionDAG &DAG, unsigned InterOp,
                                    unsigned AcrossOp) {}void AArch64TargetLowering::ReplaceExtractSubVectorResults(
    SDNode *N, SmallVectorImpl<SDValue> &Results, SelectionDAG &DAG) const {}static SDValue createGPRPairNode(SelectionDAG &DAG, SDValue V) {}static void ReplaceCMP_SWAP_128Results(SDNode *N,
                                       SmallVectorImpl<SDValue> &Results,
                                       SelectionDAG &DAG,
                                       const AArch64Subtarget *Subtarget) {}static unsigned getAtomicLoad128Opcode(unsigned ISDOpcode,
                                       AtomicOrdering Ordering) {}static void ReplaceATOMIC_LOAD_128Results(SDNode *N,
                                          SmallVectorImpl<SDValue> &Results,
                                          SelectionDAG &DAG,
                                          const AArch64Subtarget *Subtarget) {}void AArch64TargetLowering::ReplaceNodeResults(
    SDNode *N, SmallVectorImpl<SDValue> &Results, SelectionDAG &DAG) const {}bool AArch64TargetLowering::useLoadStackGuardNode() const {}unsigned AArch64TargetLowering::combineRepeatedFPDivisors() const {}TargetLoweringBase::LegalizeTypeAction
AArch64TargetLowering::getPreferredVectorAction(MVT VT) const {}bool AArch64TargetLowering::isOpSuitableForLDPSTP(const Instruction *I) const {}bool AArch64TargetLowering::isOpSuitableForLSE128(const Instruction *I) const {}bool AArch64TargetLowering::isOpSuitableForRCPC3(const Instruction *I) const {}bool AArch64TargetLowering::shouldInsertFencesForAtomic(
    const Instruction *I) const {}bool AArch64TargetLowering::shouldInsertTrailingFenceForAtomicStore(
    const Instruction *I) const {}TargetLoweringBase::AtomicExpansionKind
AArch64TargetLowering::shouldExpandAtomicStoreInIR(StoreInst *SI) const {}TargetLowering::AtomicExpansionKind
AArch64TargetLowering::shouldExpandAtomicLoadInIR(LoadInst *LI) const {}static bool rmwOpMayLowerToLibcall(const AArch64Subtarget &Subtarget,
                                   const AtomicRMWInst *RMW) {}TargetLowering::AtomicExpansionKind
AArch64TargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const {}TargetLowering::AtomicExpansionKind
AArch64TargetLowering::shouldExpandAtomicCmpXchgInIR(
    AtomicCmpXchgInst *AI) const {}Value *AArch64TargetLowering::emitLoadLinked(IRBuilderBase &Builder,
                                             Type *ValueTy, Value *Addr,
                                             AtomicOrdering Ord) const {}void AArch64TargetLowering::emitAtomicCmpXchgNoStoreLLBalance(
    IRBuilderBase &Builder) const {}Value *AArch64TargetLowering::emitStoreConditional(IRBuilderBase &Builder,
                                                   Value *Val, Value *Addr,
                                                   AtomicOrdering Ord) const {}bool AArch64TargetLowering::functionArgumentNeedsConsecutiveRegisters(
    Type *Ty, CallingConv::ID CallConv, bool isVarArg,
    const DataLayout &DL) const {}bool AArch64TargetLowering::shouldNormalizeToSelectSequence(LLVMContext &,
                                                            EVT) const {}static Value *UseTlsOffset(IRBuilderBase &IRB, unsigned Offset) {}Value *AArch64TargetLowering::getIRStackGuard(IRBuilderBase &IRB) const {}void AArch64TargetLowering::insertSSPDeclarations(Module &M) const {}Value *AArch64TargetLowering::getSDagStackGuard(const Module &M) const {}Function *AArch64TargetLowering::getSSPStackGuardCheck(const Module &M) const {}Value *
AArch64TargetLowering::getSafeStackPointerLocation(IRBuilderBase &IRB) const {}bool AArch64TargetLowering::isMaskAndCmp0FoldingBeneficial(
    const Instruction &AndI) const {}bool AArch64TargetLowering::
    shouldProduceAndByConstByHoistingConstFromShiftsLHSOfAnd(
        SDValue X, ConstantSDNode *XC, ConstantSDNode *CC, SDValue Y,
        unsigned OldShiftOpcode, unsigned NewShiftOpcode,
        SelectionDAG &DAG) const {}TargetLowering::ShiftLegalizationStrategy
AArch64TargetLowering::preferredShiftLegalizationStrategy(
    SelectionDAG &DAG, SDNode *N, unsigned int ExpansionFactor) const {}void AArch64TargetLowering::initializeSplitCSR(MachineBasicBlock *Entry) const {}void AArch64TargetLowering::insertCopiesSplitCSR(
    MachineBasicBlock *Entry,
    const SmallVectorImpl<MachineBasicBlock *> &Exits) const {}bool AArch64TargetLowering::isIntDivCheap(EVT VT, AttributeList Attr) const {}bool AArch64TargetLowering::preferIncOfAddToSubOfNot(EVT VT) const {}bool AArch64TargetLowering::shouldConvertFpToSat(unsigned Op, EVT FPVT,
                                                 EVT VT) const {}bool AArch64TargetLowering::shouldExpandCmpUsingSelects(EVT VT) const {}MachineInstr *
AArch64TargetLowering::EmitKCFICheck(MachineBasicBlock &MBB,
                                     MachineBasicBlock::instr_iterator &MBBI,
                                     const TargetInstrInfo *TII) const {}bool AArch64TargetLowering::enableAggressiveFMAFusion(EVT VT) const {}unsigned
AArch64TargetLowering::getVaListSizeInBits(const DataLayout &DL) const {}void AArch64TargetLowering::finalizeLowering(MachineFunction &MF) const {}bool AArch64TargetLowering::needsFixedCatchObjects() const {}bool AArch64TargetLowering::shouldLocalize(
    const MachineInstr &MI, const TargetTransformInfo *TTI) const {}bool AArch64TargetLowering::fallBackToDAGISel(const Instruction &Inst) const {}static EVT getContainerForFixedLengthVector(SelectionDAG &DAG, EVT VT) {}static SDValue getPredicateForFixedLengthVector(SelectionDAG &DAG, SDLoc &DL,
                                                EVT VT) {}static SDValue getPredicateForScalableVector(SelectionDAG &DAG, SDLoc &DL,
                                             EVT VT) {}static SDValue getPredicateForVector(SelectionDAG &DAG, SDLoc &DL, EVT VT) {}static SDValue convertToScalableVector(SelectionDAG &DAG, EVT VT, SDValue V) {}static SDValue convertFromScalableVector(SelectionDAG &DAG, EVT VT, SDValue V) {}SDValue AArch64TargetLowering::LowerFixedLengthVectorLoadToSVE(
    SDValue Op, SelectionDAG &DAG) const {}static SDValue convertFixedMaskToScalableVector(SDValue Mask,
                                                SelectionDAG &DAG) {}SDValue AArch64TargetLowering::LowerFixedLengthVectorMLoadToSVE(
    SDValue Op, SelectionDAG &DAG) const {}SDValue AArch64TargetLowering::LowerFixedLengthVectorStoreToSVE(
    SDValue Op, SelectionDAG &DAG) const {}SDValue AArch64TargetLowering::LowerFixedLengthVectorMStoreToSVE(
    SDValue Op, SelectionDAG &DAG) const {}SDValue AArch64TargetLowering::LowerFixedLengthVectorIntDivideToSVE(
    SDValue Op, SelectionDAG &DAG) const {}SDValue AArch64TargetLowering::LowerFixedLengthVectorIntExtendToSVE(
    SDValue Op, SelectionDAG &DAG) const {}SDValue AArch64TargetLowering::LowerFixedLengthVectorTruncateToSVE(
    SDValue Op, SelectionDAG &DAG) const {}SDValue AArch64TargetLowering::LowerFixedLengthExtractVectorElt(
    SDValue Op, SelectionDAG &DAG) const {}SDValue AArch64TargetLowering::LowerFixedLengthInsertVectorElt(
    SDValue Op, SelectionDAG &DAG) const {}SDValue AArch64TargetLowering::LowerToPredicatedOp(SDValue Op,
                                                   SelectionDAG &DAG,
                                                   unsigned NewOp) const {}SDValue AArch64TargetLowering::LowerToScalableOp(SDValue Op,
                                                 SelectionDAG &DAG) const {}SDValue AArch64TargetLowering::LowerVECREDUCE_SEQ_FADD(SDValue ScalarOp,
    SelectionDAG &DAG) const {}SDValue AArch64TargetLowering::LowerPredReductionToSVE(SDValue ReduceOp,
                                                       SelectionDAG &DAG) const {}SDValue AArch64TargetLowering::LowerReductionToSVE(unsigned Opcode,
                                                   SDValue ScalarOp,
                                                   SelectionDAG &DAG) const {}SDValue
AArch64TargetLowering::LowerFixedLengthVectorSelectToSVE(SDValue Op,
    SelectionDAG &DAG) const {}SDValue AArch64TargetLowering::LowerFixedLengthVectorSetccToSVE(
    SDValue Op, SelectionDAG &DAG) const {}SDValue
AArch64TargetLowering::LowerFixedLengthBitcastToSVE(SDValue Op,
                                                    SelectionDAG &DAG) const {}SDValue AArch64TargetLowering::LowerFixedLengthConcatVectorsToSVE(
    SDValue Op, SelectionDAG &DAG) const {}SDValue
AArch64TargetLowering::LowerFixedLengthFPExtendToSVE(SDValue Op,
                                                     SelectionDAG &DAG) const {}SDValue
AArch64TargetLowering::LowerFixedLengthFPRoundToSVE(SDValue Op,
                                                    SelectionDAG &DAG) const {}SDValue
AArch64TargetLowering::LowerFixedLengthIntToFPToSVE(SDValue Op,
                                                    SelectionDAG &DAG) const {}SDValue
AArch64TargetLowering::LowerVECTOR_DEINTERLEAVE(SDValue Op,
                                                SelectionDAG &DAG) const {}SDValue AArch64TargetLowering::LowerVECTOR_INTERLEAVE(SDValue Op,
                                                      SelectionDAG &DAG) const {}SDValue AArch64TargetLowering::LowerVECTOR_HISTOGRAM(SDValue Op,
                                                     SelectionDAG &DAG) const {}SDValue
AArch64TargetLowering::LowerFixedLengthFPToIntToSVE(SDValue Op,
                                                    SelectionDAG &DAG) const {}static SDValue GenerateFixedLengthSVETBL(SDValue Op, SDValue Op1, SDValue Op2,
                                         ArrayRef<int> ShuffleMask, EVT VT,
                                         EVT ContainerVT, SelectionDAG &DAG) {}SDValue AArch64TargetLowering::LowerFixedLengthVECTOR_SHUFFLEToSVE(
    SDValue Op, SelectionDAG &DAG) const {}SDValue AArch64TargetLowering::getSVESafeBitCast(EVT VT, SDValue Op,
                                                 SelectionDAG &DAG) const {}bool AArch64TargetLowering::isAllActivePredicate(SelectionDAG &DAG,
                                                 SDValue N) const {}EVT AArch64TargetLowering::getPromotedVTForPredicate(EVT VT) const {}bool AArch64TargetLowering::SimplifyDemandedBitsForTargetNode(
    SDValue Op, const APInt &OriginalDemandedBits,
    const APInt &OriginalDemandedElts, KnownBits &Known, TargetLoweringOpt &TLO,
    unsigned Depth) const {}bool AArch64TargetLowering::isTargetCanonicalConstantNode(SDValue Op) const {}bool AArch64TargetLowering::isComplexDeinterleavingSupported() const {}bool AArch64TargetLowering::isComplexDeinterleavingOperationSupported(
    ComplexDeinterleavingOperation Operation, Type *Ty) const {}Value *AArch64TargetLowering::createComplexDeinterleavingIR(
    IRBuilderBase &B, ComplexDeinterleavingOperation OperationType,
    ComplexDeinterleavingRotation Rotation, Value *InputA, Value *InputB,
    Value *Accumulator) const {}bool AArch64TargetLowering::preferScalarizeSplat(SDNode *N) const {}unsigned AArch64TargetLowering::getMinimumJumpTableEntries() const {}MVT AArch64TargetLowering::getRegisterTypeForCallingConv(LLVMContext &Context,
                                                         CallingConv::ID CC,
                                                         EVT VT) const {}unsigned AArch64TargetLowering::getNumRegistersForCallingConv(
    LLVMContext &Context, CallingConv::ID CC, EVT VT) const {}unsigned AArch64TargetLowering::getVectorTypeBreakdownForCallingConv(
    LLVMContext &Context, CallingConv::ID CC, EVT VT, EVT &IntermediateVT,
    unsigned &NumIntermediates, MVT &RegisterVT) const {}bool AArch64TargetLowering::hasInlineStackProbe(
    const MachineFunction &MF) const {}bool AArch64TargetLowering::isTypeDesirableForOp(unsigned Opc, EVT VT) const {}#ifndef NDEBUG#endif