llvm/llvm/lib/Target/ARM/ARMISelLowering.cpp

//===- ARMISelLowering.cpp - ARM DAG Lowering Implementation --------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines the interfaces that ARM uses to lower LLVM code into a
// selection DAG.
//
//===----------------------------------------------------------------------===//

#include "ARMISelLowering.h"
#include "ARMBaseInstrInfo.h"
#include "ARMBaseRegisterInfo.h"
#include "ARMCallingConv.h"
#include "ARMConstantPoolValue.h"
#include "ARMMachineFunctionInfo.h"
#include "ARMPerfectShuffle.h"
#include "ARMRegisterInfo.h"
#include "ARMSelectionDAGInfo.h"
#include "ARMSubtarget.h"
#include "ARMTargetTransformInfo.h"
#include "MCTargetDesc/ARMAddressingModes.h"
#include "MCTargetDesc/ARMBaseInfo.h"
#include "Utils/ARMBaseInfo.h"
#include "llvm/ADT/APFloat.h"
#include "llvm/ADT/APInt.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/BitVector.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/StringSwitch.h"
#include "llvm/ADT/Twine.h"
#include "llvm/Analysis/VectorUtils.h"
#include "llvm/CodeGen/CallingConvLower.h"
#include "llvm/CodeGen/ComplexDeinterleavingPass.h"
#include "llvm/CodeGen/ISDOpcodes.h"
#include "llvm/CodeGen/IntrinsicLowering.h"
#include "llvm/CodeGen/MachineBasicBlock.h"
#include "llvm/CodeGen/MachineConstantPool.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineInstr.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/CodeGen/MachineJumpTableInfo.h"
#include "llvm/CodeGen/MachineMemOperand.h"
#include "llvm/CodeGen/MachineOperand.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/RuntimeLibcallUtil.h"
#include "llvm/CodeGen/SelectionDAG.h"
#include "llvm/CodeGen/SelectionDAGAddressAnalysis.h"
#include "llvm/CodeGen/SelectionDAGNodes.h"
#include "llvm/CodeGen/TargetInstrInfo.h"
#include "llvm/CodeGen/TargetLowering.h"
#include "llvm/CodeGen/TargetOpcodes.h"
#include "llvm/CodeGen/TargetRegisterInfo.h"
#include "llvm/CodeGen/TargetSubtargetInfo.h"
#include "llvm/CodeGen/ValueTypes.h"
#include "llvm/CodeGenTypes/MachineValueType.h"
#include "llvm/IR/Attributes.h"
#include "llvm/IR/CallingConv.h"
#include "llvm/IR/Constant.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/DebugLoc.h"
#include "llvm/IR/DerivedTypes.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/GlobalAlias.h"
#include "llvm/IR/GlobalValue.h"
#include "llvm/IR/GlobalVariable.h"
#include "llvm/IR/IRBuilder.h"
#include "llvm/IR/InlineAsm.h"
#include "llvm/IR/Instruction.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/IntrinsicInst.h"
#include "llvm/IR/Intrinsics.h"
#include "llvm/IR/IntrinsicsARM.h"
#include "llvm/IR/Module.h"
#include "llvm/IR/PatternMatch.h"
#include "llvm/IR/Type.h"
#include "llvm/IR/User.h"
#include "llvm/IR/Value.h"
#include "llvm/MC/MCInstrDesc.h"
#include "llvm/MC/MCInstrItineraries.h"
#include "llvm/MC/MCRegisterInfo.h"
#include "llvm/MC/MCSchedule.h"
#include "llvm/Support/AtomicOrdering.h"
#include "llvm/Support/BranchProbability.h"
#include "llvm/Support/Casting.h"
#include "llvm/Support/CodeGen.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/KnownBits.h"
#include "llvm/Support/MathExtras.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Target/TargetMachine.h"
#include "llvm/Target/TargetOptions.h"
#include "llvm/TargetParser/Triple.h"
#include <algorithm>
#include <cassert>
#include <cstdint>
#include <cstdlib>
#include <iterator>
#include <limits>
#include <optional>
#include <tuple>
#include <utility>
#include <vector>

usingnamespacellvm;
usingnamespacellvm::PatternMatch;

#define DEBUG_TYPE

STATISTIC(NumTailCalls, "Number of tail calls");
STATISTIC(NumMovwMovt, "Number of GAs materialized with movw + movt");
STATISTIC(NumLoopByVals, "Number of loops generated for byval arguments");
STATISTIC(NumConstpoolPromoted,
  "Number of constants with their storage promoted into constant pools");

static cl::opt<bool>
ARMInterworking("arm-interworking", cl::Hidden,
  cl::desc("Enable / disable ARM interworking (for debugging only)"),
  cl::init(true));

static cl::opt<bool> EnableConstpoolPromotion(
    "arm-promote-constant", cl::Hidden,
    cl::desc("Enable / disable promotion of unnamed_addr constants into "
             "constant pools"),
    cl::init(false)); // FIXME: set to true by default once PR32780 is fixed
static cl::opt<unsigned> ConstpoolPromotionMaxSize(
    "arm-promote-constant-max-size", cl::Hidden,
    cl::desc("Maximum size of constant to promote into a constant pool"),
    cl::init(64));
static cl::opt<unsigned> ConstpoolPromotionMaxTotal(
    "arm-promote-constant-max-total", cl::Hidden,
    cl::desc("Maximum size of ALL constants to promote into a constant pool"),
    cl::init(128));

cl::opt<unsigned>
MVEMaxSupportedInterleaveFactor("mve-max-interleave-factor", cl::Hidden,
  cl::desc("Maximum interleave factor for MVE VLDn to generate."),
  cl::init(2));

// The APCS parameter registers.
static const MCPhysReg GPRArgRegs[] =;

static SDValue handleCMSEValue(const SDValue &Value, const ISD::InputArg &Arg,
                               SelectionDAG &DAG, const SDLoc &DL) {}

void ARMTargetLowering::addTypeForNEON(MVT VT, MVT PromotedLdStVT) {}

void ARMTargetLowering::addDRTypeForNEON(MVT VT) {}

void ARMTargetLowering::addQRTypeForNEON(MVT VT) {}

void ARMTargetLowering::setAllExpand(MVT VT) {}

void ARMTargetLowering::addAllExtLoads(const MVT From, const MVT To,
                                       LegalizeAction Action) {}

void ARMTargetLowering::addMVEVectorTypes(bool HasMVEFP) {}

ARMTargetLowering::ARMTargetLowering(const TargetMachine &TM,
                                     const ARMSubtarget &STI)
    :{}

bool ARMTargetLowering::useSoftFloat() const {}

// FIXME: It might make sense to define the representative register class as the
// nearest super-register that has a non-null superset. For example, DPR_VFP2 is
// a super-register of SPR, and DPR is a superset if DPR_VFP2. Consequently,
// SPR's representative would be DPR_VFP2. This should work well if register
// pressure tracking were modified such that a register use would increment the
// pressure of the register class's representative and all of it's super
// classes' representatives transitively. We have not implemented this because
// of the difficulty prior to coalescing of modeling operand register classes
// due to the common occurrence of cross class copies and subregister insertions
// and extractions.
std::pair<const TargetRegisterClass *, uint8_t>
ARMTargetLowering::findRepresentativeClass(const TargetRegisterInfo *TRI,
                                           MVT VT) const {}

const char *ARMTargetLowering::getTargetNodeName(unsigned Opcode) const {}

EVT ARMTargetLowering::getSetCCResultType(const DataLayout &DL, LLVMContext &,
                                          EVT VT) const {}

/// getRegClassFor - Return the register class that should be used for the
/// specified value type.
const TargetRegisterClass *
ARMTargetLowering::getRegClassFor(MVT VT, bool isDivergent) const {}

// memcpy, and other memory intrinsics, typically tries to use LDM/STM if the
// source/dest is aligned and the copy size is large enough. We therefore want
// to align such objects passed to memory intrinsics.
bool ARMTargetLowering::shouldAlignPointerArgs(CallInst *CI, unsigned &MinSize,
                                               Align &PrefAlign) const {}

// Create a fast isel object.
FastISel *
ARMTargetLowering::createFastISel(FunctionLoweringInfo &funcInfo,
                                  const TargetLibraryInfo *libInfo) const {}

Sched::Preference ARMTargetLowering::getSchedulingPreference(SDNode *N) const {}

//===----------------------------------------------------------------------===//
// Lowering Code
//===----------------------------------------------------------------------===//

static bool isSRL16(const SDValue &Op) {}

static bool isSRA16(const SDValue &Op) {}

static bool isSHL16(const SDValue &Op) {}

// Check for a signed 16-bit value. We special case SRA because it makes it
// more simple when also looking for SRAs that aren't sign extending a
// smaller value. Without the check, we'd need to take extra care with
// checking order for some operations.
static bool isS16(const SDValue &Op, SelectionDAG &DAG) {}

/// IntCCToARMCC - Convert a DAG integer condition code to an ARM CC
static ARMCC::CondCodes IntCCToARMCC(ISD::CondCode CC) {}

/// FPCCToARMCC - Convert a DAG fp condition code to an ARM CC.
static void FPCCToARMCC(ISD::CondCode CC, ARMCC::CondCodes &CondCode,
                        ARMCC::CondCodes &CondCode2) {}

//===----------------------------------------------------------------------===//
//                      Calling Convention Implementation
//===----------------------------------------------------------------------===//

/// getEffectiveCallingConv - Get the effective calling convention, taking into
/// account presence of floating point hardware and calling convention
/// limitations, such as support for variadic functions.
CallingConv::ID
ARMTargetLowering::getEffectiveCallingConv(CallingConv::ID CC,
                                           bool isVarArg) const {}

CCAssignFn *ARMTargetLowering::CCAssignFnForCall(CallingConv::ID CC,
                                                 bool isVarArg) const {}

CCAssignFn *ARMTargetLowering::CCAssignFnForReturn(CallingConv::ID CC,
                                                   bool isVarArg) const {}

/// CCAssignFnForNode - Selects the correct CCAssignFn for the given
/// CallingConvention.
CCAssignFn *ARMTargetLowering::CCAssignFnForNode(CallingConv::ID CC,
                                                 bool Return,
                                                 bool isVarArg) const {}

SDValue ARMTargetLowering::MoveToHPR(const SDLoc &dl, SelectionDAG &DAG,
                                     MVT LocVT, MVT ValVT, SDValue Val) const {}

SDValue ARMTargetLowering::MoveFromHPR(const SDLoc &dl, SelectionDAG &DAG,
                                       MVT LocVT, MVT ValVT,
                                       SDValue Val) const {}

/// LowerCallResult - Lower the result values of a call into the
/// appropriate copies out of appropriate physical registers.
SDValue ARMTargetLowering::LowerCallResult(
    SDValue Chain, SDValue InGlue, CallingConv::ID CallConv, bool isVarArg,
    const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
    SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals, bool isThisReturn,
    SDValue ThisVal, bool isCmseNSCall) const {}

std::pair<SDValue, MachinePointerInfo> ARMTargetLowering::computeAddrForCallArg(
    const SDLoc &dl, SelectionDAG &DAG, const CCValAssign &VA, SDValue StackPtr,
    bool IsTailCall, int SPDiff) const {}

void ARMTargetLowering::PassF64ArgInRegs(const SDLoc &dl, SelectionDAG &DAG,
                                         SDValue Chain, SDValue &Arg,
                                         RegsToPassVector &RegsToPass,
                                         CCValAssign &VA, CCValAssign &NextVA,
                                         SDValue &StackPtr,
                                         SmallVectorImpl<SDValue> &MemOpChains,
                                         bool IsTailCall,
                                         int SPDiff) const {}

static bool canGuaranteeTCO(CallingConv::ID CC, bool GuaranteeTailCalls) {}

/// LowerCall - Lowering a call into a callseq_start <-
/// ARMISD:CALL <- callseq_end chain. Also add input and output parameter
/// nodes.
SDValue
ARMTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
                             SmallVectorImpl<SDValue> &InVals) const {}

/// HandleByVal - Every parameter *after* a byval parameter is passed
/// on the stack.  Remember the next parameter register to allocate,
/// and then confiscate the rest of the parameter registers to insure
/// this.
void ARMTargetLowering::HandleByVal(CCState *State, unsigned &Size,
                                    Align Alignment) const {}

/// MatchingStackOffset - Return true if the given stack call argument is
/// already available in the same position (relatively) of the caller's
/// incoming argument stack.
static
bool MatchingStackOffset(SDValue Arg, unsigned Offset, ISD::ArgFlagsTy Flags,
                         MachineFrameInfo &MFI, const MachineRegisterInfo *MRI,
                         const TargetInstrInfo *TII) {}

/// IsEligibleForTailCallOptimization - Check whether the call is eligible
/// for tail call optimization. Targets which want to do tail call
/// optimization should implement this function. Note that this function also
/// processes musttail calls, so when this function returns false on a valid
/// musttail call, a fatal backend error occurs.
bool ARMTargetLowering::IsEligibleForTailCallOptimization(
    TargetLowering::CallLoweringInfo &CLI, CCState &CCInfo,
    SmallVectorImpl<CCValAssign> &ArgLocs, const bool isIndirect) const {}

bool
ARMTargetLowering::CanLowerReturn(CallingConv::ID CallConv,
                                  MachineFunction &MF, bool isVarArg,
                                  const SmallVectorImpl<ISD::OutputArg> &Outs,
                                  LLVMContext &Context) const {}

static SDValue LowerInterruptReturn(SmallVectorImpl<SDValue> &RetOps,
                                    const SDLoc &DL, SelectionDAG &DAG) {}

SDValue
ARMTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
                               bool isVarArg,
                               const SmallVectorImpl<ISD::OutputArg> &Outs,
                               const SmallVectorImpl<SDValue> &OutVals,
                               const SDLoc &dl, SelectionDAG &DAG) const {}

bool ARMTargetLowering::isUsedByReturnOnly(SDNode *N, SDValue &Chain) const {}

bool ARMTargetLowering::mayBeEmittedAsTailCall(const CallInst *CI) const {}

// Trying to write a 64 bit value so need to split into two 32 bit values first,
// and pass the lower and high parts through.
static SDValue LowerWRITE_REGISTER(SDValue Op, SelectionDAG &DAG) {}

// ConstantPool, JumpTable, GlobalAddress, and ExternalSymbol are lowered as
// their target counterpart wrapped in the ARMISD::Wrapper node. Suppose N is
// one of the above mentioned nodes. It has to be wrapped because otherwise
// Select(N) returns N. So the raw TargetGlobalAddress nodes, etc. can only
// be used to form addressing mode. These wrapped nodes will be selected
// into MOVi.
SDValue ARMTargetLowering::LowerConstantPool(SDValue Op,
                                             SelectionDAG &DAG) const {}

unsigned ARMTargetLowering::getJumpTableEncoding() const {}

SDValue ARMTargetLowering::LowerBlockAddress(SDValue Op,
                                             SelectionDAG &DAG) const {}

/// Convert a TLS address reference into the correct sequence of loads
/// and calls to compute the variable's address for Darwin, and return an
/// SDValue containing the final node.

/// Darwin only has one TLS scheme which must be capable of dealing with the
/// fully general situation, in the worst case. This means:
///     + "extern __thread" declaration.
///     + Defined in a possibly unknown dynamic library.
///
/// The general system is that each __thread variable has a [3 x i32] descriptor
/// which contains information used by the runtime to calculate the address. The
/// only part of this the compiler needs to know about is the first word, which
/// contains a function pointer that must be called with the address of the
/// entire descriptor in "r0".
///
/// Since this descriptor may be in a different unit, in general access must
/// proceed along the usual ARM rules. A common sequence to produce is:
///
///     movw rT1, :lower16:_var$non_lazy_ptr
///     movt rT1, :upper16:_var$non_lazy_ptr
///     ldr r0, [rT1]
///     ldr rT2, [r0]
///     blx rT2
///     [...address now in r0...]
SDValue
ARMTargetLowering::LowerGlobalTLSAddressDarwin(SDValue Op,
                                               SelectionDAG &DAG) const {}

SDValue
ARMTargetLowering::LowerGlobalTLSAddressWindows(SDValue Op,
                                                SelectionDAG &DAG) const {}

// Lower ISD::GlobalTLSAddress using the "general dynamic" model
SDValue
ARMTargetLowering::LowerToTLSGeneralDynamicModel(GlobalAddressSDNode *GA,
                                                 SelectionDAG &DAG) const {}

// Lower ISD::GlobalTLSAddress using the "initial exec" or
// "local exec" model.
SDValue
ARMTargetLowering::LowerToTLSExecModels(GlobalAddressSDNode *GA,
                                        SelectionDAG &DAG,
                                        TLSModel::Model model) const {}

SDValue
ARMTargetLowering::LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const {}

/// Return true if all users of V are within function F, looking through
/// ConstantExprs.
static bool allUsersAreInFunction(const Value *V, const Function *F) {}

static SDValue promoteToConstantPool(const ARMTargetLowering *TLI,
                                     const GlobalValue *GV, SelectionDAG &DAG,
                                     EVT PtrVT, const SDLoc &dl) {}

bool ARMTargetLowering::isReadOnly(const GlobalValue *GV) const {}

SDValue ARMTargetLowering::LowerGlobalAddress(SDValue Op,
                                              SelectionDAG &DAG) const {}

SDValue ARMTargetLowering::LowerGlobalAddressELF(SDValue Op,
                                                 SelectionDAG &DAG) const {}

SDValue ARMTargetLowering::LowerGlobalAddressDarwin(SDValue Op,
                                                    SelectionDAG &DAG) const {}

SDValue ARMTargetLowering::LowerGlobalAddressWindows(SDValue Op,
                                                     SelectionDAG &DAG) const {}

SDValue
ARMTargetLowering::LowerEH_SJLJ_SETJMP(SDValue Op, SelectionDAG &DAG) const {}

SDValue
ARMTargetLowering::LowerEH_SJLJ_LONGJMP(SDValue Op, SelectionDAG &DAG) const {}

SDValue ARMTargetLowering::LowerEH_SJLJ_SETUP_DISPATCH(SDValue Op,
                                                      SelectionDAG &DAG) const {}

SDValue ARMTargetLowering::LowerINTRINSIC_VOID(
    SDValue Op, SelectionDAG &DAG, const ARMSubtarget *Subtarget) const {}

SDValue
ARMTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG,
                                          const ARMSubtarget *Subtarget) const {}

static SDValue LowerATOMIC_FENCE(SDValue Op, SelectionDAG &DAG,
                                 const ARMSubtarget *Subtarget) {}

static SDValue LowerPREFETCH(SDValue Op, SelectionDAG &DAG,
                             const ARMSubtarget *Subtarget) {}

static SDValue LowerVASTART(SDValue Op, SelectionDAG &DAG) {}

SDValue ARMTargetLowering::GetF64FormalArgument(CCValAssign &VA,
                                                CCValAssign &NextVA,
                                                SDValue &Root,
                                                SelectionDAG &DAG,
                                                const SDLoc &dl) const {}

// The remaining GPRs hold either the beginning of variable-argument
// data, or the beginning of an aggregate passed by value (usually
// byval).  Either way, we allocate stack slots adjacent to the data
// provided by our caller, and store the unallocated registers there.
// If this is a variadic function, the va_list pointer will begin with
// these values; otherwise, this reassembles a (byval) structure that
// was split between registers and memory.
// Return: The frame index registers were stored into.
int ARMTargetLowering::StoreByValRegs(CCState &CCInfo, SelectionDAG &DAG,
                                      const SDLoc &dl, SDValue &Chain,
                                      const Value *OrigArg,
                                      unsigned InRegsParamRecordIdx,
                                      int ArgOffset, unsigned ArgSize) const {}

// Setup stack frame, the va_list pointer will start from.
void ARMTargetLowering::VarArgStyleRegisters(CCState &CCInfo, SelectionDAG &DAG,
                                             const SDLoc &dl, SDValue &Chain,
                                             unsigned ArgOffset,
                                             unsigned TotalArgRegsSaveSize,
                                             bool ForceMutable) const {}

bool ARMTargetLowering::splitValueIntoRegisterParts(
    SelectionDAG &DAG, const SDLoc &DL, SDValue Val, SDValue *Parts,
    unsigned NumParts, MVT PartVT, std::optional<CallingConv::ID> CC) const {}

SDValue ARMTargetLowering::joinRegisterPartsIntoValue(
    SelectionDAG &DAG, const SDLoc &DL, const SDValue *Parts, unsigned NumParts,
    MVT PartVT, EVT ValueVT, std::optional<CallingConv::ID> CC) const {}

SDValue ARMTargetLowering::LowerFormalArguments(
    SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
    const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
    SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {}

/// isFloatingPointZero - Return true if this is +0.0.
static bool isFloatingPointZero(SDValue Op) {}

/// Returns appropriate ARM CMP (cmp) and corresponding condition code for
/// the given operands.
SDValue ARMTargetLowering::getARMCmp(SDValue LHS, SDValue RHS, ISD::CondCode CC,
                                     SDValue &ARMcc, SelectionDAG &DAG,
                                     const SDLoc &dl) const {}

/// Returns a appropriate VFP CMP (fcmp{s|d}+fmstat) for the given operands.
SDValue ARMTargetLowering::getVFPCmp(SDValue LHS, SDValue RHS,
                                     SelectionDAG &DAG, const SDLoc &dl,
                                     bool Signaling) const {}

/// duplicateCmp - Glue values can have only one use, so this function
/// duplicates a comparison node.
SDValue
ARMTargetLowering::duplicateCmp(SDValue Cmp, SelectionDAG &DAG) const {}

// This function returns three things: the arithmetic computation itself
// (Value), a comparison (OverflowCmp), and a condition code (ARMcc).  The
// comparison and the condition code define the case in which the arithmetic
// computation *does not* overflow.
std::pair<SDValue, SDValue>
ARMTargetLowering::getARMXALUOOp(SDValue Op, SelectionDAG &DAG,
                                 SDValue &ARMcc) const {}

SDValue
ARMTargetLowering::LowerSignedALUO(SDValue Op, SelectionDAG &DAG) const {}

static SDValue ConvertBooleanCarryToCarryFlag(SDValue BoolCarry,
                                              SelectionDAG &DAG) {}

static SDValue ConvertCarryFlagToBooleanCarry(SDValue Flags, EVT VT,
                                              SelectionDAG &DAG) {}

SDValue ARMTargetLowering::LowerUnsignedALUO(SDValue Op,
                                             SelectionDAG &DAG) const {}

static SDValue LowerADDSUBSAT(SDValue Op, SelectionDAG &DAG,
                              const ARMSubtarget *Subtarget) {}

SDValue ARMTargetLowering::LowerSELECT(SDValue Op, SelectionDAG &DAG) const {}

static void checkVSELConstraints(ISD::CondCode CC, ARMCC::CondCodes &CondCode,
                                 bool &swpCmpOps, bool &swpVselOps) {}

SDValue ARMTargetLowering::getCMOV(const SDLoc &dl, EVT VT, SDValue FalseVal,
                                   SDValue TrueVal, SDValue ARMcc, SDValue CCR,
                                   SDValue Cmp, SelectionDAG &DAG) const {}

static bool isGTorGE(ISD::CondCode CC) {}

static bool isLTorLE(ISD::CondCode CC) {}

// See if a conditional (LHS CC RHS ? TrueVal : FalseVal) is lower-saturating.
// All of these conditions (and their <= and >= counterparts) will do:
//          x < k ? k : x
//          x > k ? x : k
//          k < x ? x : k
//          k > x ? k : x
static bool isLowerSaturate(const SDValue LHS, const SDValue RHS,
                            const SDValue TrueVal, const SDValue FalseVal,
                            const ISD::CondCode CC, const SDValue K) {}

// Check if two chained conditionals could be converted into SSAT or USAT.
//
// SSAT can replace a set of two conditional selectors that bound a number to an
// interval of type [k, ~k] when k + 1 is a power of 2. Here are some examples:
//
//     x < -k ? -k : (x > k ? k : x)
//     x < -k ? -k : (x < k ? x : k)
//     x > -k ? (x > k ? k : x) : -k
//     x < k ? (x < -k ? -k : x) : k
//     etc.
//
// LLVM canonicalizes these to either a min(max()) or a max(min())
// pattern. This function tries to match one of these and will return a SSAT
// node if successful.
//
// USAT works similarily to SSAT but bounds on the interval [0, k] where k + 1
// is a power of 2.
static SDValue LowerSaturatingConditional(SDValue Op, SelectionDAG &DAG) {}

// Check if a condition of the type x < k ? k : x can be converted into a
// bit operation instead of conditional moves.
// Currently this is allowed given:
// - The conditions and values match up
// - k is 0 or -1 (all ones)
// This function will not check the last condition, thats up to the caller
// It returns true if the transformation can be made, and in such case
// returns x in V, and k in SatK.
static bool isLowerSaturatingConditional(const SDValue &Op, SDValue &V,
                                         SDValue &SatK)
{}

bool ARMTargetLowering::isUnsupportedFloatingType(EVT VT) const {}

SDValue ARMTargetLowering::LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const {}

/// canChangeToInt - Given the fp compare operand, return true if it is suitable
/// to morph to an integer compare sequence.
static bool canChangeToInt(SDValue Op, bool &SeenZero,
                           const ARMSubtarget *Subtarget) {}

static SDValue bitcastf32Toi32(SDValue Op, SelectionDAG &DAG) {}

static void expandf64Toi32(SDValue Op, SelectionDAG &DAG,
                           SDValue &RetVal1, SDValue &RetVal2) {}

/// OptimizeVFPBrcond - With -enable-unsafe-fp-math, it's legal to optimize some
/// f32 and even f64 comparisons to integer ones.
SDValue
ARMTargetLowering::OptimizeVFPBrcond(SDValue Op, SelectionDAG &DAG) const {}

SDValue ARMTargetLowering::LowerBRCOND(SDValue Op, SelectionDAG &DAG) const {}

SDValue ARMTargetLowering::LowerBR_CC(SDValue Op, SelectionDAG &DAG) const {}

SDValue ARMTargetLowering::LowerBR_JT(SDValue Op, SelectionDAG &DAG) const {}

static SDValue LowerVectorFP_TO_INT(SDValue Op, SelectionDAG &DAG) {}

SDValue ARMTargetLowering::LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG) const {}

static SDValue LowerFP_TO_INT_SAT(SDValue Op, SelectionDAG &DAG,
                                  const ARMSubtarget *Subtarget) {}

static SDValue LowerVectorINT_TO_FP(SDValue Op, SelectionDAG &DAG) {}

SDValue ARMTargetLowering::LowerINT_TO_FP(SDValue Op, SelectionDAG &DAG) const {}

SDValue ARMTargetLowering::LowerFCOPYSIGN(SDValue Op, SelectionDAG &DAG) const {}

SDValue ARMTargetLowering::LowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const{}

SDValue ARMTargetLowering::LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const {}

// FIXME? Maybe this could be a TableGen attribute on some registers and
// this table could be generated automatically from RegInfo.
Register ARMTargetLowering::getRegisterByName(const char* RegName, LLT VT,
                                              const MachineFunction &MF) const {}

// Result is 64 bit value so split into two 32 bit values and return as a
// pair of values.
static void ExpandREAD_REGISTER(SDNode *N, SmallVectorImpl<SDValue> &Results,
                                SelectionDAG &DAG) {}

/// \p BC is a bitcast that is about to be turned into a VMOVDRR.
/// When \p DstVT, the destination type of \p BC, is on the vector
/// register bank and the source of bitcast, \p Op, operates on the same bank,
/// it might be possible to combine them, such that everything stays on the
/// vector register bank.
/// \p return The node that would replace \p BT, if the combine
/// is possible.
static SDValue CombineVMOVDRRCandidateWithVecOp(const SDNode *BC,
                                                SelectionDAG &DAG) {}

/// ExpandBITCAST - If the target supports VFP, this function is called to
/// expand a bit convert where either the source or destination type is i64 to
/// use a VMOVDRR or VMOVRRD node.  This should not be done when the non-i64
/// operand type is illegal (e.g., v2f32 for a target that doesn't support
/// vectors), since the legalizer won't know what to do with that.
SDValue ARMTargetLowering::ExpandBITCAST(SDNode *N, SelectionDAG &DAG,
                                         const ARMSubtarget *Subtarget) const {}

/// getZeroVector - Returns a vector of specified type with all zero elements.
/// Zero vectors are used to represent vector negation and in those cases
/// will be implemented with the NEON VNEG instruction.  However, VNEG does
/// not support i64 elements, so sometimes the zero vectors will need to be
/// explicitly constructed.  Regardless, use a canonical VMOV to create the
/// zero vector.
static SDValue getZeroVector(EVT VT, SelectionDAG &DAG, const SDLoc &dl) {}

/// LowerShiftRightParts - Lower SRA_PARTS, which returns two
/// i32 values and take a 2 x i32 value to shift plus a shift amount.
SDValue ARMTargetLowering::LowerShiftRightParts(SDValue Op,
                                                SelectionDAG &DAG) const {}

/// LowerShiftLeftParts - Lower SHL_PARTS, which returns two
/// i32 values and take a 2 x i32 value to shift plus a shift amount.
SDValue ARMTargetLowering::LowerShiftLeftParts(SDValue Op,
                                               SelectionDAG &DAG) const {}

SDValue ARMTargetLowering::LowerGET_ROUNDING(SDValue Op,
                                             SelectionDAG &DAG) const {}

SDValue ARMTargetLowering::LowerSET_ROUNDING(SDValue Op,
                                             SelectionDAG &DAG) const {}

SDValue ARMTargetLowering::LowerSET_FPMODE(SDValue Op,
                                           SelectionDAG &DAG) const {}

SDValue ARMTargetLowering::LowerRESET_FPMODE(SDValue Op,
                                             SelectionDAG &DAG) const {}

static SDValue LowerCTTZ(SDNode *N, SelectionDAG &DAG,
                         const ARMSubtarget *ST) {}

static SDValue LowerCTPOP(SDNode *N, SelectionDAG &DAG,
                          const ARMSubtarget *ST) {}

/// Getvshiftimm - Check if this is a valid build_vector for the immediate
/// operand of a vector shift operation, where all the elements of the
/// build_vector must have the same constant integer value.
static bool getVShiftImm(SDValue Op, unsigned ElementBits, int64_t &Cnt) {}

/// isVShiftLImm - Check if this is a valid build_vector for the immediate
/// operand of a vector shift left operation.  That value must be in the range:
///   0 <= Value < ElementBits for a left shift; or
///   0 <= Value <= ElementBits for a long left shift.
static bool isVShiftLImm(SDValue Op, EVT VT, bool isLong, int64_t &Cnt) {}

/// isVShiftRImm - Check if this is a valid build_vector for the immediate
/// operand of a vector shift right operation.  For a shift opcode, the value
/// is positive, but for an intrinsic the value count must be negative. The
/// absolute value must be in the range:
///   1 <= |Value| <= ElementBits for a right shift; or
///   1 <= |Value| <= ElementBits/2 for a narrow right shift.
static bool isVShiftRImm(SDValue Op, EVT VT, bool isNarrow, bool isIntrinsic,
                         int64_t &Cnt) {}

static SDValue LowerShift(SDNode *N, SelectionDAG &DAG,
                          const ARMSubtarget *ST) {}

static SDValue Expand64BitShift(SDNode *N, SelectionDAG &DAG,
                                const ARMSubtarget *ST) {}

static SDValue LowerVSETCC(SDValue Op, SelectionDAG &DAG,
                           const ARMSubtarget *ST) {}

static SDValue LowerSETCCCARRY(SDValue Op, SelectionDAG &DAG) {}

/// isVMOVModifiedImm - Check if the specified splat value corresponds to a
/// valid vector constant for a NEON or MVE instruction with a "modified
/// immediate" operand (e.g., VMOV).  If so, return the encoded value.
static SDValue isVMOVModifiedImm(uint64_t SplatBits, uint64_t SplatUndef,
                                 unsigned SplatBitSize, SelectionDAG &DAG,
                                 const SDLoc &dl, EVT &VT, EVT VectorVT,
                                 VMOVModImmType type) {}

SDValue ARMTargetLowering::LowerConstantFP(SDValue Op, SelectionDAG &DAG,
                                           const ARMSubtarget *ST) const {}

// check if an VEXT instruction can handle the shuffle mask when the
// vector sources of the shuffle are the same.
static bool isSingletonVEXTMask(ArrayRef<int> M, EVT VT, unsigned &Imm) {}

static bool isVEXTMask(ArrayRef<int> M, EVT VT,
                       bool &ReverseVEXT, unsigned &Imm) {}

static bool isVTBLMask(ArrayRef<int> M, EVT VT) {}

static unsigned SelectPairHalf(unsigned Elements, ArrayRef<int> Mask,
                               unsigned Index) {}

// Checks whether the shuffle mask represents a vector transpose (VTRN) by
// checking that pairs of elements in the shuffle mask represent the same index
// in each vector, incrementing the expected index by 2 at each step.
// e.g. For v1,v2 of type v4i32 a valid shuffle mask is: [0, 4, 2, 6]
//  v1={a,b,c,d} => x=shufflevector v1, v2 shufflemask => x={a,e,c,g}
//  v2={e,f,g,h}
// WhichResult gives the offset for each element in the mask based on which
// of the two results it belongs to.
//
// The transpose can be represented either as:
// result1 = shufflevector v1, v2, result1_shuffle_mask
// result2 = shufflevector v1, v2, result2_shuffle_mask
// where v1/v2 and the shuffle masks have the same number of elements
// (here WhichResult (see below) indicates which result is being checked)
//
// or as:
// results = shufflevector v1, v2, shuffle_mask
// where both results are returned in one vector and the shuffle mask has twice
// as many elements as v1/v2 (here WhichResult will always be 0 if true) here we
// want to check the low half and high half of the shuffle mask as if it were
// the other case
static bool isVTRNMask(ArrayRef<int> M, EVT VT, unsigned &WhichResult) {}

/// isVTRN_v_undef_Mask - Special case of isVTRNMask for canonical form of
/// "vector_shuffle v, v", i.e., "vector_shuffle v, undef".
/// Mask is e.g., <0, 0, 2, 2> instead of <0, 4, 2, 6>.
static bool isVTRN_v_undef_Mask(ArrayRef<int> M, EVT VT, unsigned &WhichResult){}

// Checks whether the shuffle mask represents a vector unzip (VUZP) by checking
// that the mask elements are either all even and in steps of size 2 or all odd
// and in steps of size 2.
// e.g. For v1,v2 of type v4i32 a valid shuffle mask is: [0, 2, 4, 6]
//  v1={a,b,c,d} => x=shufflevector v1, v2 shufflemask => x={a,c,e,g}
//  v2={e,f,g,h}
// Requires similar checks to that of isVTRNMask with
// respect the how results are returned.
static bool isVUZPMask(ArrayRef<int> M, EVT VT, unsigned &WhichResult) {}

/// isVUZP_v_undef_Mask - Special case of isVUZPMask for canonical form of
/// "vector_shuffle v, v", i.e., "vector_shuffle v, undef".
/// Mask is e.g., <0, 2, 0, 2> instead of <0, 2, 4, 6>,
static bool isVUZP_v_undef_Mask(ArrayRef<int> M, EVT VT, unsigned &WhichResult){}

// Checks whether the shuffle mask represents a vector zip (VZIP) by checking
// that pairs of elements of the shufflemask represent the same index in each
// vector incrementing sequentially through the vectors.
// e.g. For v1,v2 of type v4i32 a valid shuffle mask is: [0, 4, 1, 5]
//  v1={a,b,c,d} => x=shufflevector v1, v2 shufflemask => x={a,e,b,f}
//  v2={e,f,g,h}
// Requires similar checks to that of isVTRNMask with respect the how results
// are returned.
static bool isVZIPMask(ArrayRef<int> M, EVT VT, unsigned &WhichResult) {}

/// isVZIP_v_undef_Mask - Special case of isVZIPMask for canonical form of
/// "vector_shuffle v, v", i.e., "vector_shuffle v, undef".
/// Mask is e.g., <0, 0, 1, 1> instead of <0, 4, 1, 5>.
static bool isVZIP_v_undef_Mask(ArrayRef<int> M, EVT VT, unsigned &WhichResult){}

/// Check if \p ShuffleMask is a NEON two-result shuffle (VZIP, VUZP, VTRN),
/// and return the corresponding ARMISD opcode if it is, or 0 if it isn't.
static unsigned isNEONTwoResultShuffleMask(ArrayRef<int> ShuffleMask, EVT VT,
                                           unsigned &WhichResult,
                                           bool &isV_UNDEF) {}

/// \return true if this is a reverse operation on an vector.
static bool isReverseMask(ArrayRef<int> M, EVT VT) {}

static bool isTruncMask(ArrayRef<int> M, EVT VT, bool Top, bool SingleSource) {}

static bool isVMOVNMask(ArrayRef<int> M, EVT VT, bool Top, bool SingleSource) {}

static bool isVMOVNTruncMask(ArrayRef<int> M, EVT ToVT, bool rev) {}

// Reconstruct an MVE VCVT from a BuildVector of scalar fptrunc, all extracted
// from a pair of inputs. For example:
// BUILDVECTOR(FP_ROUND(EXTRACT_ELT(X, 0),
//             FP_ROUND(EXTRACT_ELT(Y, 0),
//             FP_ROUND(EXTRACT_ELT(X, 1),
//             FP_ROUND(EXTRACT_ELT(Y, 1), ...)
static SDValue LowerBuildVectorOfFPTrunc(SDValue BV, SelectionDAG &DAG,
                                         const ARMSubtarget *ST) {}

// Reconstruct an MVE VCVT from a BuildVector of scalar fpext, all extracted
// from a single input on alternating lanes. For example:
// BUILDVECTOR(FP_ROUND(EXTRACT_ELT(X, 0),
//             FP_ROUND(EXTRACT_ELT(X, 2),
//             FP_ROUND(EXTRACT_ELT(X, 4), ...)
static SDValue LowerBuildVectorOfFPExt(SDValue BV, SelectionDAG &DAG,
                                       const ARMSubtarget *ST) {}

// If N is an integer constant that can be moved into a register in one
// instruction, return an SDValue of such a constant (will become a MOV
// instruction).  Otherwise return null.
static SDValue IsSingleInstrConstant(SDValue N, SelectionDAG &DAG,
                                     const ARMSubtarget *ST, const SDLoc &dl) {}

static SDValue LowerBUILD_VECTOR_i1(SDValue Op, SelectionDAG &DAG,
                                    const ARMSubtarget *ST) {}

static SDValue LowerBUILD_VECTORToVIDUP(SDValue Op, SelectionDAG &DAG,
                                        const ARMSubtarget *ST) {}

// Returns true if the operation N can be treated as qr instruction variant at
// operand Op.
static bool IsQRMVEInstruction(const SDNode *N, const SDNode *Op) {}

// If this is a case we can't handle, return null and let the default
// expansion code take care of it.
SDValue ARMTargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG,
                                             const ARMSubtarget *ST) const {}

// Gather data to see if the operation can be modelled as a
// shuffle in combination with VEXTs.
SDValue ARMTargetLowering::ReconstructShuffle(SDValue Op,
                                              SelectionDAG &DAG) const {}

enum ShuffleOpCodes {};

static bool isLegalMVEShuffleOp(unsigned PFEntry) {}

/// isShuffleMaskLegal - Targets can use this to indicate that they only
/// support *some* VECTOR_SHUFFLE operations, those with specific masks.
/// By default, if a target supports the VECTOR_SHUFFLE node, all mask values
/// are assumed to be legal.
bool ARMTargetLowering::isShuffleMaskLegal(ArrayRef<int> M, EVT VT) const {}

/// GeneratePerfectShuffle - Given an entry in the perfect-shuffle table, emit
/// the specified operations to build the shuffle.
static SDValue GeneratePerfectShuffle(unsigned PFEntry, SDValue LHS,
                                      SDValue RHS, SelectionDAG &DAG,
                                      const SDLoc &dl) {}

static SDValue LowerVECTOR_SHUFFLEv8i8(SDValue Op,
                                       ArrayRef<int> ShuffleMask,
                                       SelectionDAG &DAG) {}

static SDValue LowerReverse_VECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) {}

static EVT getVectorTyFromPredicateVector(EVT VT) {}

static SDValue PromoteMVEPredVector(SDLoc dl, SDValue Pred, EVT VT,
                                    SelectionDAG &DAG) {}

static SDValue LowerVECTOR_SHUFFLE_i1(SDValue Op, SelectionDAG &DAG,
                                      const ARMSubtarget *ST) {}

static SDValue LowerVECTOR_SHUFFLEUsingMovs(SDValue Op,
                                            ArrayRef<int> ShuffleMask,
                                            SelectionDAG &DAG) {}

static SDValue LowerVECTOR_SHUFFLEUsingOneOff(SDValue Op,
                                              ArrayRef<int> ShuffleMask,
                                              SelectionDAG &DAG) {}

static SDValue LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG,
                                   const ARMSubtarget *ST) {}

static SDValue LowerINSERT_VECTOR_ELT_i1(SDValue Op, SelectionDAG &DAG,
                                         const ARMSubtarget *ST) {}

SDValue ARMTargetLowering::LowerINSERT_VECTOR_ELT(SDValue Op,
                                                  SelectionDAG &DAG) const {}

static SDValue LowerEXTRACT_VECTOR_ELT_i1(SDValue Op, SelectionDAG &DAG,
                                          const ARMSubtarget *ST) {}

static SDValue LowerEXTRACT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG,
                                       const ARMSubtarget *ST) {}

static SDValue LowerCONCAT_VECTORS_i1(SDValue Op, SelectionDAG &DAG,
                                      const ARMSubtarget *ST) {}

static SDValue LowerCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG,
                                   const ARMSubtarget *ST) {}

static SDValue LowerEXTRACT_SUBVECTOR(SDValue Op, SelectionDAG &DAG,
                                      const ARMSubtarget *ST) {}

// Turn a truncate into a predicate (an i1 vector) into icmp(and(x, 1), 0).
static SDValue LowerTruncatei1(SDNode *N, SelectionDAG &DAG,
                               const ARMSubtarget *ST) {}

static SDValue LowerTruncate(SDNode *N, SelectionDAG &DAG,
                             const ARMSubtarget *Subtarget) {}

static SDValue LowerVectorExtend(SDNode *N, SelectionDAG &DAG,
                                 const ARMSubtarget *Subtarget) {}

/// isExtendedBUILD_VECTOR - Check if N is a constant BUILD_VECTOR where each
/// element has been zero/sign-extended, depending on the isSigned parameter,
/// from an integer type half its size.
static bool isExtendedBUILD_VECTOR(SDNode *N, SelectionDAG &DAG,
                                   bool isSigned) {}

/// isSignExtended - Check if a node is a vector value that is sign-extended
/// or a constant BUILD_VECTOR with sign-extended elements.
static bool isSignExtended(SDNode *N, SelectionDAG &DAG) {}

/// isZeroExtended - Check if a node is a vector value that is zero-extended (or
/// any-extended) or a constant BUILD_VECTOR with zero-extended elements.
static bool isZeroExtended(SDNode *N, SelectionDAG &DAG) {}

static EVT getExtensionTo64Bits(const EVT &OrigVT) {}

/// AddRequiredExtensionForVMULL - Add a sign/zero extension to extend the total
/// value size to 64 bits. We need a 64-bit D register as an operand to VMULL.
/// We insert the required extension here to get the vector to fill a D register.
static SDValue AddRequiredExtensionForVMULL(SDValue N, SelectionDAG &DAG,
                                            const EVT &OrigTy,
                                            const EVT &ExtTy,
                                            unsigned ExtOpcode) {}

/// SkipLoadExtensionForVMULL - return a load of the original vector size that
/// does not do any sign/zero extension. If the original vector is less
/// than 64 bits, an appropriate extension will be added after the load to
/// reach a total size of 64 bits. We have to add the extension separately
/// because ARM does not have a sign/zero extending load for vectors.
static SDValue SkipLoadExtensionForVMULL(LoadSDNode *LD, SelectionDAG& DAG) {}

/// SkipExtensionForVMULL - For a node that is a SIGN_EXTEND, ZERO_EXTEND,
/// ANY_EXTEND, extending load, or BUILD_VECTOR with extended elements, return
/// the unextended value. The unextended vector should be 64 bits so that it can
/// be used as an operand to a VMULL instruction. If the original vector size
/// before extension is less than 64 bits we add a an extension to resize
/// the vector to 64 bits.
static SDValue SkipExtensionForVMULL(SDNode *N, SelectionDAG &DAG) {}

static bool isAddSubSExt(SDNode *N, SelectionDAG &DAG) {}

static bool isAddSubZExt(SDNode *N, SelectionDAG &DAG) {}

static SDValue LowerMUL(SDValue Op, SelectionDAG &DAG) {}

static SDValue LowerSDIV_v4i8(SDValue X, SDValue Y, const SDLoc &dl,
                              SelectionDAG &DAG) {}

static SDValue LowerSDIV_v4i16(SDValue N0, SDValue N1, const SDLoc &dl,
                               SelectionDAG &DAG) {}

static SDValue LowerSDIV(SDValue Op, SelectionDAG &DAG,
                         const ARMSubtarget *ST) {}

static SDValue LowerUDIV(SDValue Op, SelectionDAG &DAG,
                         const ARMSubtarget *ST) {}

static SDValue LowerUADDSUBO_CARRY(SDValue Op, SelectionDAG &DAG) {}

SDValue ARMTargetLowering::LowerFSINCOS(SDValue Op, SelectionDAG &DAG) const {}

SDValue ARMTargetLowering::LowerWindowsDIVLibCall(SDValue Op, SelectionDAG &DAG,
                                                  bool Signed,
                                                  SDValue &Chain) const {}

// This is a code size optimisation: return the original SDIV node to
// DAGCombiner when we don't want to expand SDIV into a sequence of
// instructions, and an empty node otherwise which will cause the
// SDIV to be expanded in DAGCombine.
SDValue
ARMTargetLowering::BuildSDIVPow2(SDNode *N, const APInt &Divisor,
                                 SelectionDAG &DAG,
                                 SmallVectorImpl<SDNode *> &Created) const {}

SDValue ARMTargetLowering::LowerDIV_Windows(SDValue Op, SelectionDAG &DAG,
                                            bool Signed) const {}

static SDValue WinDBZCheckDenominator(SelectionDAG &DAG, SDNode *N, SDValue InChain) {}

void ARMTargetLowering::ExpandDIV_Windows(
    SDValue Op, SelectionDAG &DAG, bool Signed,
    SmallVectorImpl<SDValue> &Results) const {}

static SDValue LowerPredicateLoad(SDValue Op, SelectionDAG &DAG) {}

void ARMTargetLowering::LowerLOAD(SDNode *N, SmallVectorImpl<SDValue> &Results,
                                  SelectionDAG &DAG) const {}

static SDValue LowerPredicateStore(SDValue Op, SelectionDAG &DAG) {}

static SDValue LowerSTORE(SDValue Op, SelectionDAG &DAG,
                          const ARMSubtarget *Subtarget) {}

static bool isZeroVector(SDValue N) {}

static SDValue LowerMLOAD(SDValue Op, SelectionDAG &DAG) {}

static SDValue LowerVecReduce(SDValue Op, SelectionDAG &DAG,
                              const ARMSubtarget *ST) {}

static SDValue LowerVecReduceF(SDValue Op, SelectionDAG &DAG,
                               const ARMSubtarget *ST) {}

static SDValue LowerVecReduceMinMax(SDValue Op, SelectionDAG &DAG,
                                    const ARMSubtarget *ST) {}

static SDValue LowerAtomicLoadStore(SDValue Op, SelectionDAG &DAG) {}

static void ReplaceREADCYCLECOUNTER(SDNode *N,
                                    SmallVectorImpl<SDValue> &Results,
                                    SelectionDAG &DAG,
                                    const ARMSubtarget *Subtarget) {}

static SDValue createGPRPairNode2xi32(SelectionDAG &DAG, SDValue V0,
                                      SDValue V1) {}

static SDValue createGPRPairNodei64(SelectionDAG &DAG, SDValue V) {}

static void ReplaceCMP_SWAP_64Results(SDNode *N,
                                      SmallVectorImpl<SDValue> &Results,
                                      SelectionDAG &DAG) {}

SDValue ARMTargetLowering::LowerFSETCC(SDValue Op, SelectionDAG &DAG) const {}

SDValue ARMTargetLowering::LowerSPONENTRY(SDValue Op, SelectionDAG &DAG) const {}

SDValue ARMTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {}

static void ReplaceLongIntrinsic(SDNode *N, SmallVectorImpl<SDValue> &Results,
                                 SelectionDAG &DAG) {}

/// ReplaceNodeResults - Replace the results of node with an illegal result
/// type with new values built out of custom code.
void ARMTargetLowering::ReplaceNodeResults(SDNode *N,
                                           SmallVectorImpl<SDValue> &Results,
                                           SelectionDAG &DAG) const {}

//===----------------------------------------------------------------------===//
//                           ARM Scheduler Hooks
//===----------------------------------------------------------------------===//

/// SetupEntryBlockForSjLj - Insert code into the entry block that creates and
/// registers the function context.
void ARMTargetLowering::SetupEntryBlockForSjLj(MachineInstr &MI,
                                               MachineBasicBlock *MBB,
                                               MachineBasicBlock *DispatchBB,
                                               int FI) const {}

void ARMTargetLowering::EmitSjLjDispatchBlock(MachineInstr &MI,
                                              MachineBasicBlock *MBB) const {}

static
MachineBasicBlock *OtherSucc(MachineBasicBlock *MBB, MachineBasicBlock *Succ) {}

/// Return the load opcode for a given load size. If load size >= 8,
/// neon opcode will be returned.
static unsigned getLdOpcode(unsigned LdSize, bool IsThumb1, bool IsThumb2) {}

/// Return the store opcode for a given store size. If store size >= 8,
/// neon opcode will be returned.
static unsigned getStOpcode(unsigned StSize, bool IsThumb1, bool IsThumb2) {}

/// Emit a post-increment load operation with given size. The instructions
/// will be added to BB at Pos.
static void emitPostLd(MachineBasicBlock *BB, MachineBasicBlock::iterator Pos,
                       const TargetInstrInfo *TII, const DebugLoc &dl,
                       unsigned LdSize, unsigned Data, unsigned AddrIn,
                       unsigned AddrOut, bool IsThumb1, bool IsThumb2) {}

/// Emit a post-increment store operation with given size. The instructions
/// will be added to BB at Pos.
static void emitPostSt(MachineBasicBlock *BB, MachineBasicBlock::iterator Pos,
                       const TargetInstrInfo *TII, const DebugLoc &dl,
                       unsigned StSize, unsigned Data, unsigned AddrIn,
                       unsigned AddrOut, bool IsThumb1, bool IsThumb2) {}

MachineBasicBlock *
ARMTargetLowering::EmitStructByval(MachineInstr &MI,
                                   MachineBasicBlock *BB) const {}

MachineBasicBlock *
ARMTargetLowering::EmitLowered__chkstk(MachineInstr &MI,
                                       MachineBasicBlock *MBB) const {}

MachineBasicBlock *
ARMTargetLowering::EmitLowered__dbzchk(MachineInstr &MI,
                                       MachineBasicBlock *MBB) const {}

// The CPSR operand of SelectItr might be missing a kill marker
// because there were multiple uses of CPSR, and ISel didn't know
// which to mark. Figure out whether SelectItr should have had a
// kill marker, and set it if it should. Returns the correct kill
// marker value.
static bool checkAndUpdateCPSRKill(MachineBasicBlock::iterator SelectItr,
                                   MachineBasicBlock* BB,
                                   const TargetRegisterInfo* TRI) {}

/// Adds logic in loop entry MBB to calculate loop iteration count and adds
/// t2WhileLoopSetup and t2WhileLoopStart to generate WLS loop
static Register genTPEntry(MachineBasicBlock *TpEntry,
                           MachineBasicBlock *TpLoopBody,
                           MachineBasicBlock *TpExit, Register OpSizeReg,
                           const TargetInstrInfo *TII, DebugLoc Dl,
                           MachineRegisterInfo &MRI) {}

/// Adds logic in the loopBody MBB to generate MVE_VCTP, t2DoLoopDec and
/// t2DoLoopEnd. These are used by later passes to generate tail predicated
/// loops.
static void genTPLoopBody(MachineBasicBlock *TpLoopBody,
                          MachineBasicBlock *TpEntry, MachineBasicBlock *TpExit,
                          const TargetInstrInfo *TII, DebugLoc Dl,
                          MachineRegisterInfo &MRI, Register OpSrcReg,
                          Register OpDestReg, Register ElementCountReg,
                          Register TotalIterationsReg, bool IsMemcpy) {}

MachineBasicBlock *
ARMTargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI,
                                               MachineBasicBlock *BB) const {}

/// Attaches vregs to MEMCPY that it will use as scratch registers
/// when it is expanded into LDM/STM. This is done as a post-isel lowering
/// instead of as a custom inserter because we need the use list from the SDNode.
static void attachMEMCPYScratchRegs(const ARMSubtarget *Subtarget,
                                    MachineInstr &MI, const SDNode *Node) {}

void ARMTargetLowering::AdjustInstrPostInstrSelection(MachineInstr &MI,
                                                      SDNode *Node) const {}

//===----------------------------------------------------------------------===//
//                           ARM Optimization Hooks
//===----------------------------------------------------------------------===//

// Helper function that checks if N is a null or all ones constant.
static inline bool isZeroOrAllOnes(SDValue N, bool AllOnes) {}

// Return true if N is conditionally 0 or all ones.
// Detects these expressions where cc is an i1 value:
//
//   (select cc 0, y)   [AllOnes=0]
//   (select cc y, 0)   [AllOnes=0]
//   (zext cc)          [AllOnes=0]
//   (sext cc)          [AllOnes=0/1]
//   (select cc -1, y)  [AllOnes=1]
//   (select cc y, -1)  [AllOnes=1]
//
// Invert is set when N is the null/all ones constant when CC is false.
// OtherOp is set to the alternative value of N.
static bool isConditionalZeroOrAllOnes(SDNode *N, bool AllOnes,
                                       SDValue &CC, bool &Invert,
                                       SDValue &OtherOp,
                                       SelectionDAG &DAG) {}

// Combine a constant select operand into its use:
//
//   (add (select cc, 0, c), x)  -> (select cc, x, (add, x, c))
//   (sub x, (select cc, 0, c))  -> (select cc, x, (sub, x, c))
//   (and (select cc, -1, c), x) -> (select cc, x, (and, x, c))  [AllOnes=1]
//   (or  (select cc, 0, c), x)  -> (select cc, x, (or, x, c))
//   (xor (select cc, 0, c), x)  -> (select cc, x, (xor, x, c))
//
// The transform is rejected if the select doesn't have a constant operand that
// is null, or all ones when AllOnes is set.
//
// Also recognize sext/zext from i1:
//
//   (add (zext cc), x) -> (select cc (add x, 1), x)
//   (add (sext cc), x) -> (select cc (add x, -1), x)
//
// These transformations eventually create predicated instructions.
//
// @param N       The node to transform.
// @param Slct    The N operand that is a select.
// @param OtherOp The other N operand (x above).
// @param DCI     Context.
// @param AllOnes Require the select constant to be all ones instead of null.
// @returns The new node, or SDValue() on failure.
static
SDValue combineSelectAndUse(SDNode *N, SDValue Slct, SDValue OtherOp,
                            TargetLowering::DAGCombinerInfo &DCI,
                            bool AllOnes = false) {}

// Attempt combineSelectAndUse on each operand of a commutative operator N.
static
SDValue combineSelectAndUseCommutative(SDNode *N, bool AllOnes,
                                       TargetLowering::DAGCombinerInfo &DCI) {}

static bool IsVUZPShuffleNode(SDNode *N) {}

static SDValue AddCombineToVPADD(SDNode *N, SDValue N0, SDValue N1,
                                 TargetLowering::DAGCombinerInfo &DCI,
                                 const ARMSubtarget *Subtarget) {}

static SDValue AddCombineVUZPToVPADDL(SDNode *N, SDValue N0, SDValue N1,
                                      TargetLowering::DAGCombinerInfo &DCI,
                                      const ARMSubtarget *Subtarget) {}

// FIXME: This function shouldn't be necessary; if we lower BUILD_VECTOR in
// an appropriate manner, we end up with ADD(VUZP(ZEXT(N))), which is
// much easier to match.
static SDValue
AddCombineBUILD_VECTORToVPADDL(SDNode *N, SDValue N0, SDValue N1,
                               TargetLowering::DAGCombinerInfo &DCI,
                               const ARMSubtarget *Subtarget) {}

static SDValue findMUL_LOHI(SDValue V) {}

static SDValue AddCombineTo64BitSMLAL16(SDNode *AddcNode, SDNode *AddeNode,
                                        TargetLowering::DAGCombinerInfo &DCI,
                                        const ARMSubtarget *Subtarget) {}

static SDValue AddCombineTo64bitMLAL(SDNode *AddeSubeNode,
                                     TargetLowering::DAGCombinerInfo &DCI,
                                     const ARMSubtarget *Subtarget) {}

static SDValue AddCombineTo64bitUMAAL(SDNode *AddeNode,
                                      TargetLowering::DAGCombinerInfo &DCI,
                                      const ARMSubtarget *Subtarget) {}

static SDValue PerformUMLALCombine(SDNode *N, SelectionDAG &DAG,
                                   const ARMSubtarget *Subtarget) {}

static SDValue PerformAddcSubcCombine(SDNode *N,
                                      TargetLowering::DAGCombinerInfo &DCI,
                                      const ARMSubtarget *Subtarget) {}

static SDValue PerformAddeSubeCombine(SDNode *N,
                                      TargetLowering::DAGCombinerInfo &DCI,
                                      const ARMSubtarget *Subtarget) {}

static SDValue PerformSELECTCombine(SDNode *N,
                                    TargetLowering::DAGCombinerInfo &DCI,
                                    const ARMSubtarget *Subtarget) {}

// A special combine for the vqdmulh family of instructions. This is one of the
// potential set of patterns that could patch this instruction. The base pattern
// you would expect to be min(max(ashr(mul(mul(sext(x), 2), sext(y)), 16))).
// This matches the different min(max(ashr(mul(mul(sext(x), sext(y)), 2), 16))),
// which llvm will have optimized to min(ashr(mul(sext(x), sext(y)), 15))) as
// the max is unnecessary.
static SDValue PerformVQDMULHCombine(SDNode *N, SelectionDAG &DAG) {}

static SDValue PerformVSELECTCombine(SDNode *N,
                                     TargetLowering::DAGCombinerInfo &DCI,
                                     const ARMSubtarget *Subtarget) {}

// Convert vsetcc([0,1,2,..], splat(n), ult) -> vctp n
static SDValue PerformVSetCCToVCTPCombine(SDNode *N,
                                          TargetLowering::DAGCombinerInfo &DCI,
                                          const ARMSubtarget *Subtarget) {}

/// PerformADDECombine - Target-specific dag combine transform from
/// ARMISD::ADDC, ARMISD::ADDE, and ISD::MUL_LOHI to MLAL or
/// ARMISD::ADDC, ARMISD::ADDE and ARMISD::UMLAL to ARMISD::UMAAL
static SDValue PerformADDECombine(SDNode *N,
                                  TargetLowering::DAGCombinerInfo &DCI,
                                  const ARMSubtarget *Subtarget) {}

/// PerformADDCombineWithOperands - Try DAG combinations for an ADD with
/// operands N0 and N1.  This is a helper for PerformADDCombine that is
/// called with the default operands, and if that fails, with commuted
/// operands.
static SDValue PerformADDCombineWithOperands(SDNode *N, SDValue N0, SDValue N1,
                                          TargetLowering::DAGCombinerInfo &DCI,
                                          const ARMSubtarget *Subtarget){}

static SDValue TryDistrubutionADDVecReduce(SDNode *N, SelectionDAG &DAG) {}

static SDValue PerformADDVecReduce(SDNode *N, SelectionDAG &DAG,
                                   const ARMSubtarget *Subtarget) {}

bool
ARMTargetLowering::isDesirableToCommuteWithShift(const SDNode *N,
                                                 CombineLevel Level) const {}

bool ARMTargetLowering::isDesirableToCommuteXorWithShift(
    const SDNode *N) const {}bool ARMTargetLowering::shouldFoldConstantShiftPairToMask(
    const SDNode *N, CombineLevel Level) const {}bool ARMTargetLowering::shouldFoldSelectWithIdentityConstant(unsigned BinOpcode,
                                                             EVT VT) const {}bool ARMTargetLowering::preferIncOfAddToSubOfNot(EVT VT) const {}bool ARMTargetLowering::shouldConvertFpToSat(unsigned Op, EVT FPVT,
                                             EVT VT) const {}static SDValue PerformSHLSimplify(SDNode *N,
                                TargetLowering::DAGCombinerInfo &DCI,
                                const ARMSubtarget *ST) {}static SDValue PerformADDCombine(SDNode *N,
                                 TargetLowering::DAGCombinerInfo &DCI,
                                 const ARMSubtarget *Subtarget) {}static SDValue PerformSubCSINCCombine(SDNode *N, SelectionDAG &DAG) {}static SDValue PerformSUBCombine(SDNode *N,
                                 TargetLowering::DAGCombinerInfo &DCI,
                                 const ARMSubtarget *Subtarget) {}static SDValue PerformVMULCombine(SDNode *N,
                                  TargetLowering::DAGCombinerInfo &DCI,
                                  const ARMSubtarget *Subtarget) {}static SDValue PerformMVEVMULLCombine(SDNode *N, SelectionDAG &DAG,
                                      const ARMSubtarget *Subtarget) {}static SDValue PerformMULCombine(SDNode *N,
                                 TargetLowering::DAGCombinerInfo &DCI,
                                 const ARMSubtarget *Subtarget) {}static SDValue CombineANDShift(SDNode *N,
                               TargetLowering::DAGCombinerInfo &DCI,
                               const ARMSubtarget *Subtarget) {}static SDValue PerformANDCombine(SDNode *N,
                                 TargetLowering::DAGCombinerInfo &DCI,
                                 const ARMSubtarget *Subtarget) {}static SDValue PerformORCombineToSMULWBT(SDNode *OR,
                                         TargetLowering::DAGCombinerInfo &DCI,
                                         const ARMSubtarget *Subtarget) {}static SDValue PerformORCombineToBFI(SDNode *N,
                                     TargetLowering::DAGCombinerInfo &DCI,
                                     const ARMSubtarget *Subtarget) {}static bool isValidMVECond(unsigned CC, bool IsFloat) {}static ARMCC::CondCodes getVCMPCondCode(SDValue N) {}static bool CanInvertMVEVCMP(SDValue N) {}static SDValue PerformORCombine_i1(SDNode *N, SelectionDAG &DAG,
                                   const ARMSubtarget *Subtarget) {}static SDValue PerformORCombine(SDNode *N,
                                TargetLowering::DAGCombinerInfo &DCI,
                                const ARMSubtarget *Subtarget) {}static SDValue PerformXORCombine(SDNode *N,
                                 TargetLowering::DAGCombinerInfo &DCI,
                                 const ARMSubtarget *Subtarget) {}static SDValue ParseBFI(SDNode *N, APInt &ToMask, APInt &FromMask) {}static bool BitsProperlyConcatenate(const APInt &A, const APInt &B) {}static SDValue FindBFIToCombineWith(SDNode *N) {}static SDValue PerformBFICombine(SDNode *N, SelectionDAG &DAG) {}static SDValue IsCMPZCSINC(SDNode *Cmp, ARMCC::CondCodes &CC) {}static SDValue PerformCMPZCombine(SDNode *N, SelectionDAG &DAG) {}static SDValue PerformCSETCombine(SDNode *N, SelectionDAG &DAG) {}static SDValue PerformVMOVRRDCombine(SDNode *N,
                                     TargetLowering::DAGCombinerInfo &DCI,
                                     const ARMSubtarget *Subtarget) {}static SDValue PerformVMOVDRRCombine(SDNode *N, SelectionDAG &DAG) {}static SDValue PerformVMOVhrCombine(SDNode *N,
                                    TargetLowering::DAGCombinerInfo &DCI) {}static SDValue PerformVMOVrhCombine(SDNode *N, SelectionDAG &DAG) {}static bool hasNormalLoadOperand(SDNode *N) {}static SDValue PerformBUILD_VECTORCombine(SDNode *N,
                                          TargetLowering::DAGCombinerInfo &DCI,
                                          const ARMSubtarget *Subtarget) {}static SDValue
PerformARMBUILD_VECTORCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI) {}static SDValue
PerformPREDICATE_CASTCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI) {}static SDValue PerformVECTOR_REG_CASTCombine(SDNode *N, SelectionDAG &DAG,
                                             const ARMSubtarget *ST) {}static SDValue PerformVCMPCombine(SDNode *N, SelectionDAG &DAG,
                                  const ARMSubtarget *Subtarget) {}static SDValue PerformInsertEltCombine(SDNode *N,
                                       TargetLowering::DAGCombinerInfo &DCI) {}static SDValue
PerformExtractEltToVMOVRRD(SDNode *N, TargetLowering::DAGCombinerInfo &DCI) {}static SDValue PerformExtractEltCombine(SDNode *N,
                                        TargetLowering::DAGCombinerInfo &DCI,
                                        const ARMSubtarget *ST) {}static SDValue PerformSignExtendInregCombine(SDNode *N, SelectionDAG &DAG) {}static SDValue
PerformInsertSubvectorCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI) {}static SDValue PerformShuffleVMOVNCombine(ShuffleVectorSDNode *N,
                                          SelectionDAG &DAG) {}static SDValue PerformVECTOR_SHUFFLECombine(SDNode *N, SelectionDAG &DAG) {}struct BaseUpdateTarget {}struct BaseUpdateUser {}static bool TryCombineBaseUpdate(struct BaseUpdateTarget &Target,
                                 struct BaseUpdateUser &User,
                                 bool SimpleConstIncOnly,
                                 TargetLowering::DAGCombinerInfo &DCI) {}static unsigned getPointerConstIncrement(unsigned Opcode, SDValue Ptr,
                                         SDValue Inc, const SelectionDAG &DAG) {}static bool findPointerConstIncrement(SDNode *N, SDValue *Ptr, SDValue *CInc) {}static bool isValidBaseUpdate(SDNode *N, SDNode *User) {}static SDValue CombineBaseUpdate(SDNode *N,
                                 TargetLowering::DAGCombinerInfo &DCI) {}static SDValue PerformVLDCombine(SDNode *N,
                                 TargetLowering::DAGCombinerInfo &DCI) {}static SDValue PerformMVEVLDCombine(SDNode *N,
                                    TargetLowering::DAGCombinerInfo &DCI) {}static bool CombineVLDDUP(SDNode *N, TargetLowering::DAGCombinerInfo &DCI) {}static SDValue PerformVDUPLANECombine(SDNode *N,
                                      TargetLowering::DAGCombinerInfo &DCI,
                                      const ARMSubtarget *Subtarget) {}static SDValue PerformVDUPCombine(SDNode *N, SelectionDAG &DAG,
                                  const ARMSubtarget *Subtarget) {}static SDValue PerformLOADCombine(SDNode *N,
                                  TargetLowering::DAGCombinerInfo &DCI,
                                  const ARMSubtarget *Subtarget) {}static SDValue PerformTruncatingStoreCombine(StoreSDNode *St,
                                             SelectionDAG &DAG) {}static SDValue PerformSplittingToNarrowingStores(StoreSDNode *St,
                                                 SelectionDAG &DAG) {}static SDValue PerformSplittingMVETruncToNarrowingStores(StoreSDNode *St,
                                                         SelectionDAG &DAG) {}static SDValue PerformExtractFpToIntStores(StoreSDNode *St, SelectionDAG &DAG) {}static SDValue PerformSTORECombine(SDNode *N,
                                   TargetLowering::DAGCombinerInfo &DCI,
                                   const ARMSubtarget *Subtarget) {}static SDValue PerformVCVTCombine(SDNode *N, SelectionDAG &DAG,
                                  const ARMSubtarget *Subtarget) {}static SDValue PerformFAddVSelectCombine(SDNode *N, SelectionDAG &DAG,
                                         const ARMSubtarget *Subtarget) {}static SDValue PerformFADDVCMLACombine(SDNode *N, SelectionDAG &DAG) {}static SDValue PerformFADDCombine(SDNode *N, SelectionDAG &DAG,
                                  const ARMSubtarget *Subtarget) {}static SDValue PerformVMulVCTPCombine(SDNode *N, SelectionDAG &DAG,
                                      const ARMSubtarget *Subtarget) {}static SDValue PerformVECREDUCE_ADDCombine(SDNode *N, SelectionDAG &DAG,
                                           const ARMSubtarget *ST) {}static SDValue PerformReduceShuffleCombine(SDNode *N, SelectionDAG &DAG) {}static SDValue PerformVMOVNCombine(SDNode *N,
                                   TargetLowering::DAGCombinerInfo &DCI) {}static SDValue PerformVQMOVNCombine(SDNode *N,
                                    TargetLowering::DAGCombinerInfo &DCI) {}static SDValue PerformVQDMULHCombine(SDNode *N,
                                     TargetLowering::DAGCombinerInfo &DCI) {}static SDValue PerformLongShiftCombine(SDNode *N, SelectionDAG &DAG) {}SDValue ARMTargetLowering::PerformIntrinsicCombine(SDNode *N,
                                                   DAGCombinerInfo &DCI) const {}static SDValue PerformShiftCombine(SDNode *N,
                                   TargetLowering::DAGCombinerInfo &DCI,
                                   const ARMSubtarget *ST) {}static SDValue PerformSplittingToWideningLoad(SDNode *N, SelectionDAG &DAG) {}static SDValue PerformExtendCombine(SDNode *N, SelectionDAG &DAG,
                                    const ARMSubtarget *ST) {}static SDValue PerformFPExtendCombine(SDNode *N, SelectionDAG &DAG,
                                      const ARMSubtarget *ST) {}static SDValue PerformMinMaxToSatCombine(SDValue Op, SelectionDAG &DAG,
                                         const ARMSubtarget *Subtarget) {}static SDValue PerformMinMaxCombine(SDNode *N, SelectionDAG &DAG,
                                    const ARMSubtarget *ST) {}static const APInt *isPowerOf2Constant(SDValue V) {}SDValue ARMTargetLowering::PerformCMOVToBFICombine(SDNode *CMOV, SelectionDAG &DAG) const {}static SDValue SearchLoopIntrinsic(SDValue N, ISD::CondCode &CC, int &Imm,
                                   bool &Negate) {}static SDValue PerformHWLoopCombine(SDNode *N,
                                    TargetLowering::DAGCombinerInfo &DCI,
                                    const ARMSubtarget *ST) {}SDValue
ARMTargetLowering::PerformBRCONDCombine(SDNode *N, SelectionDAG &DAG) const {}SDValue
ARMTargetLowering::PerformCMOVCombine(SDNode *N, SelectionDAG &DAG) const {}static SDValue PerformBITCASTCombine(SDNode *N,
                                     TargetLowering::DAGCombinerInfo &DCI,
                                     const ARMSubtarget *ST) {}SDValue ARMTargetLowering::PerformMVETruncCombine(
    SDNode *N, TargetLowering::DAGCombinerInfo &DCI) const {}static SDValue PerformSplittingMVEEXTToWideningLoad(SDNode *N,
                                                    SelectionDAG &DAG) {}SDValue ARMTargetLowering::PerformMVEExtCombine(
    SDNode *N, TargetLowering::DAGCombinerInfo &DCI) const {}SDValue ARMTargetLowering::PerformDAGCombine(SDNode *N,
                                             DAGCombinerInfo &DCI) const {}bool ARMTargetLowering::isDesirableToTransformToIntegerOp(unsigned Opc,
                                                          EVT VT) const {}bool ARMTargetLowering::allowsMisalignedMemoryAccesses(EVT VT, unsigned,
                                                       Align Alignment,
                                                       MachineMemOperand::Flags,
                                                       unsigned *Fast) const {}EVT ARMTargetLowering::getOptimalMemOpType(
    const MemOp &Op, const AttributeList &FuncAttributes) const {}bool ARMTargetLowering::isTruncateFree(Type *SrcTy, Type *DstTy) const {}bool ARMTargetLowering::isTruncateFree(EVT SrcVT, EVT DstVT) const {}bool ARMTargetLowering::isZExtFree(SDValue Val, EVT VT2) const {}bool ARMTargetLowering::isFNegFree(EVT VT) const {}static bool areExtractExts(Value *Ext1, Value *Ext2) {}bool ARMTargetLowering::shouldSinkOperands(Instruction *I,
                                           SmallVectorImpl<Use *> &Ops) const {}Type *ARMTargetLowering::shouldConvertSplatType(ShuffleVectorInst *SVI) const {}bool ARMTargetLowering::isVectorLoadExtDesirable(SDValue ExtVal) const {}bool ARMTargetLowering::allowTruncateForTailCall(Type *Ty1, Type *Ty2) const {}bool ARMTargetLowering::isFMAFasterThanFMulAndFAdd(const MachineFunction &MF,
                                                   EVT VT) const {}static bool isLegalT1AddressImmediate(int64_t V, EVT VT) {}static bool isLegalT2AddressImmediate(int64_t V, EVT VT,
                                      const ARMSubtarget *Subtarget) {}static bool isLegalAddressImmediate(int64_t V, EVT VT,
                                    const ARMSubtarget *Subtarget) {}bool ARMTargetLowering::isLegalT2ScaledAddressingMode(const AddrMode &AM,
                                                      EVT VT) const {}bool ARMTargetLowering::isLegalT1ScaledAddressingMode(const AddrMode &AM,
                                                      EVT VT) const {}bool ARMTargetLowering::isLegalAddressingMode(const DataLayout &DL,
                                              const AddrMode &AM, Type *Ty,
                                              unsigned AS, Instruction *I) const {}bool ARMTargetLowering::isLegalICmpImmediate(int64_t Imm) const {}bool ARMTargetLowering::isLegalAddImmediate(int64_t Imm) const {}bool ARMTargetLowering::isMulAddWithConstProfitable(SDValue AddNode,
                                                    SDValue ConstNode) const {}static bool getARMIndexedAddressParts(SDNode *Ptr, EVT VT,
                                      bool isSEXTLoad, SDValue &Base,
                                      SDValue &Offset, bool &isInc,
                                      SelectionDAG &DAG) {}static bool getT2IndexedAddressParts(SDNode *Ptr, EVT VT,
                                     bool isSEXTLoad, SDValue &Base,
                                     SDValue &Offset, bool &isInc,
                                     SelectionDAG &DAG) {}static bool getMVEIndexedAddressParts(SDNode *Ptr, EVT VT, Align Alignment,
                                      bool isSEXTLoad, bool IsMasked, bool isLE,
                                      SDValue &Base, SDValue &Offset,
                                      bool &isInc, SelectionDAG &DAG) {}bool
ARMTargetLowering::getPreIndexedAddressParts(SDNode *N, SDValue &Base,
                                             SDValue &Offset,
                                             ISD::MemIndexedMode &AM,
                                             SelectionDAG &DAG) const {}bool ARMTargetLowering::getPostIndexedAddressParts(SDNode *N, SDNode *Op,
                                                   SDValue &Base,
                                                   SDValue &Offset,
                                                   ISD::MemIndexedMode &AM,
                                                   SelectionDAG &DAG) const {}void ARMTargetLowering::computeKnownBitsForTargetNode(const SDValue Op,
                                                      KnownBits &Known,
                                                      const APInt &DemandedElts,
                                                      const SelectionDAG &DAG,
                                                      unsigned Depth) const {}bool ARMTargetLowering::targetShrinkDemandedConstant(
    SDValue Op, const APInt &DemandedBits, const APInt &DemandedElts,
    TargetLoweringOpt &TLO) const {}bool ARMTargetLowering::SimplifyDemandedBitsForTargetNode(
    SDValue Op, const APInt &OriginalDemandedBits,
    const APInt &OriginalDemandedElts, KnownBits &Known, TargetLoweringOpt &TLO,
    unsigned Depth) const {}bool ARMTargetLowering::ExpandInlineAsm(CallInst *CI) const {}const char *ARMTargetLowering::LowerXConstraint(EVT ConstraintVT) const {}ARMTargetLowering::ConstraintType
ARMTargetLowering::getConstraintType(StringRef Constraint) const {}TargetLowering::ConstraintWeight
ARMTargetLowering::getSingleConstraintMatchWeight(
    AsmOperandInfo &info, const char *constraint) const {}RCPairRCPair ARMTargetLowering::getRegForInlineAsmConstraint(
    const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const {}void ARMTargetLowering::LowerAsmOperandForConstraint(SDValue Op,
                                                     StringRef Constraint,
                                                     std::vector<SDValue> &Ops,
                                                     SelectionDAG &DAG) const {}static RTLIB::Libcall getDivRemLibcall(
    const SDNode *N, MVT::SimpleValueType SVT) {}static TargetLowering::ArgListTy getDivRemArgList(
    const SDNode *N, LLVMContext *Context, const ARMSubtarget *Subtarget) {}SDValue ARMTargetLowering::LowerDivRem(SDValue Op, SelectionDAG &DAG) const {}SDValue ARMTargetLowering::LowerREM(SDNode *N, SelectionDAG &DAG) const {}SDValue
ARMTargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG) const {}SDValue ARMTargetLowering::LowerFP_EXTEND(SDValue Op, SelectionDAG &DAG) const {}SDValue ARMTargetLowering::LowerFP_ROUND(SDValue Op, SelectionDAG &DAG) const {}bool
ARMTargetLowering::isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const {}bool ARM::isBitFieldInvertedMask(unsigned v) {}bool ARMTargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT,
                                     bool ForCodeSize) const {}bool ARMTargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
                                           const CallInst &I,
                                           MachineFunction &MF,
                                           unsigned Intrinsic) const {}bool ARMTargetLowering::shouldConvertConstantLoadToIntImm(const APInt &Imm,
                                                          Type *Ty) const {}bool ARMTargetLowering::isExtractSubvectorCheap(EVT ResVT, EVT SrcVT,
                                                unsigned Index) const {}Instruction *ARMTargetLowering::makeDMB(IRBuilderBase &Builder,
                                        ARM_MB::MemBOpt Domain) const {}Instruction *ARMTargetLowering::emitLeadingFence(IRBuilderBase &Builder,
                                                 Instruction *Inst,
                                                 AtomicOrdering Ord) const {}Instruction *ARMTargetLowering::emitTrailingFence(IRBuilderBase &Builder,
                                                  Instruction *Inst,
                                                  AtomicOrdering Ord) const {}TargetLoweringBase::AtomicExpansionKind
ARMTargetLowering::shouldExpandAtomicStoreInIR(StoreInst *SI) const {}TargetLowering::AtomicExpansionKind
ARMTargetLowering::shouldExpandAtomicLoadInIR(LoadInst *LI) const {}TargetLowering::AtomicExpansionKind
ARMTargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const {}TargetLowering::AtomicExpansionKind
ARMTargetLowering::shouldExpandAtomicCmpXchgInIR(AtomicCmpXchgInst *AI) const {}bool ARMTargetLowering::shouldInsertFencesForAtomic(
    const Instruction *I) const {}bool ARMTargetLowering::useLoadStackGuardNode() const {}void ARMTargetLowering::insertSSPDeclarations(Module &M) const {}Value *ARMTargetLowering::getSDagStackGuard(const Module &M) const {}Function *ARMTargetLowering::getSSPStackGuardCheck(const Module &M) const {}bool ARMTargetLowering::canCombineStoreAndExtract(Type *VectorTy, Value *Idx,
                                                  unsigned &Cost) const {}bool ARMTargetLowering::isCheapToSpeculateCttz(Type *Ty) const {}bool ARMTargetLowering::isCheapToSpeculateCtlz(Type *Ty) const {}bool ARMTargetLowering::isMaskAndCmp0FoldingBeneficial(
    const Instruction &AndI) const {}TargetLowering::ShiftLegalizationStrategy
ARMTargetLowering::preferredShiftLegalizationStrategy(
    SelectionDAG &DAG, SDNode *N, unsigned ExpansionFactor) const {}Value *ARMTargetLowering::emitLoadLinked(IRBuilderBase &Builder, Type *ValueTy,
                                         Value *Addr,
                                         AtomicOrdering Ord) const {}void ARMTargetLowering::emitAtomicCmpXchgNoStoreLLBalance(
    IRBuilderBase &Builder) const {}Value *ARMTargetLowering::emitStoreConditional(IRBuilderBase &Builder,
                                               Value *Val, Value *Addr,
                                               AtomicOrdering Ord) const {}bool ARMTargetLowering::alignLoopsWithOptSize() const {}unsigned
ARMTargetLowering::getNumInterleavedAccesses(VectorType *VecTy,
                                             const DataLayout &DL) const {}bool ARMTargetLowering::isLegalInterleavedAccessType(
    unsigned Factor, FixedVectorType *VecTy, Align Alignment,
    const DataLayout &DL) const {}unsigned ARMTargetLowering::getMaxSupportedInterleaveFactor() const {}bool ARMTargetLowering::lowerInterleavedLoad(
    LoadInst *LI, ArrayRef<ShuffleVectorInst *> Shuffles,
    ArrayRef<unsigned> Indices, unsigned Factor) const {}bool ARMTargetLowering::lowerInterleavedStore(StoreInst *SI,
                                              ShuffleVectorInst *SVI,
                                              unsigned Factor) const {}enum HABaseType {}static bool isHomogeneousAggregate(Type *Ty, HABaseType &Base,
                                   uint64_t &Members) {}Align ARMTargetLowering::getABIAlignmentForCallingConv(
    Type *ArgTy, const DataLayout &DL) const {}bool ARMTargetLowering::functionArgumentNeedsConsecutiveRegisters(
    Type *Ty, CallingConv::ID CallConv, bool isVarArg,
    const DataLayout &DL) const {}Register ARMTargetLowering::getExceptionPointerRegister(
    const Constant *PersonalityFn) const {}Register ARMTargetLowering::getExceptionSelectorRegister(
    const Constant *PersonalityFn) const {}void ARMTargetLowering::initializeSplitCSR(MachineBasicBlock *Entry) const {}void ARMTargetLowering::insertCopiesSplitCSR(
    MachineBasicBlock *Entry,
    const SmallVectorImpl<MachineBasicBlock *> &Exits) const {}void ARMTargetLowering::finalizeLowering(MachineFunction &MF) const {}bool ARMTargetLowering::isComplexDeinterleavingSupported() const {}bool ARMTargetLowering::isComplexDeinterleavingOperationSupported(
    ComplexDeinterleavingOperation Operation, Type *Ty) const {}Value *ARMTargetLowering::createComplexDeinterleavingIR(
    IRBuilderBase &B, ComplexDeinterleavingOperation OperationType,
    ComplexDeinterleavingRotation Rotation, Value *InputA, Value *InputB,
    Value *Accumulator) const {}