llvm/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp

//===-- AArch64ISelDAGToDAG.cpp - A dag to dag inst selector for AArch64 --===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines an instruction selector for the AArch64 target.
//
//===----------------------------------------------------------------------===//

#include "AArch64MachineFunctionInfo.h"
#include "AArch64TargetMachine.h"
#include "MCTargetDesc/AArch64AddressingModes.h"
#include "llvm/ADT/APSInt.h"
#include "llvm/CodeGen/ISDOpcodes.h"
#include "llvm/CodeGen/SelectionDAGISel.h"
#include "llvm/IR/Function.h" // To access function attributes.
#include "llvm/IR/GlobalValue.h"
#include "llvm/IR/Intrinsics.h"
#include "llvm/IR/IntrinsicsAArch64.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/KnownBits.h"
#include "llvm/Support/MathExtras.h"
#include "llvm/Support/raw_ostream.h"

usingnamespacellvm;

#define DEBUG_TYPE
#define PASS_NAME

//===--------------------------------------------------------------------===//
/// AArch64DAGToDAGISel - AArch64 specific code to select AArch64 machine
/// instructions for SelectionDAG operations.
///
namespace {

class AArch64DAGToDAGISel : public SelectionDAGISel {};

class AArch64DAGToDAGISelLegacy : public SelectionDAGISelLegacy {};
} // end anonymous namespace

char AArch64DAGToDAGISelLegacy::ID =;

INITIALIZE_PASS()

/// isIntImmediate - This method tests to see if the node is a constant
/// operand. If so Imm will receive the 32-bit value.
static bool isIntImmediate(const SDNode *N, uint64_t &Imm) {}

// isIntImmediate - This method tests to see if a constant operand.
// If so Imm will receive the value.
static bool isIntImmediate(SDValue N, uint64_t &Imm) {}

// isOpcWithIntImmediate - This method tests to see if the node is a specific
// opcode and that it has a immediate integer right operand.
// If so Imm will receive the 32 bit value.
static bool isOpcWithIntImmediate(const SDNode *N, unsigned Opc,
                                  uint64_t &Imm) {}

// isIntImmediateEq - This method tests to see if N is a constant operand that
// is equivalent to 'ImmExpected'.
#ifndef NDEBUG
static bool isIntImmediateEq(SDValue N, const uint64_t ImmExpected) {
  uint64_t Imm;
  if (!isIntImmediate(N.getNode(), Imm))
    return false;
  return Imm == ImmExpected;
}
#endif

bool AArch64DAGToDAGISel::SelectInlineAsmMemoryOperand(
    const SDValue &Op, const InlineAsm::ConstraintCode ConstraintID,
    std::vector<SDValue> &OutOps) {}

/// SelectArithImmed - Select an immediate value that can be represented as
/// a 12-bit value shifted left by either 0 or 12.  If so, return true with
/// Val set to the 12-bit value and Shift set to the shifter operand.
bool AArch64DAGToDAGISel::SelectArithImmed(SDValue N, SDValue &Val,
                                           SDValue &Shift) {}

/// SelectNegArithImmed - As above, but negates the value before trying to
/// select it.
bool AArch64DAGToDAGISel::SelectNegArithImmed(SDValue N, SDValue &Val,
                                              SDValue &Shift) {}

/// getShiftTypeForNode - Translate a shift node to the corresponding
/// ShiftType value.
static AArch64_AM::ShiftExtendType getShiftTypeForNode(SDValue N) {}

/// Determine whether it is worth it to fold SHL into the addressing
/// mode.
static bool isWorthFoldingSHL(SDValue V) {}

/// Determine whether it is worth to fold V into an extended register addressing
/// mode.
bool AArch64DAGToDAGISel::isWorthFoldingAddr(SDValue V, unsigned Size) const {}

/// and (shl/srl/sra, x, c), mask --> shl (srl/sra, x, c1), c2
/// to select more shifted register
bool AArch64DAGToDAGISel::SelectShiftedRegisterFromAnd(SDValue N, SDValue &Reg,
                                                       SDValue &Shift) {}

/// getExtendTypeForNode - Translate an extend node to the corresponding
/// ExtendType value.
static AArch64_AM::ShiftExtendType
getExtendTypeForNode(SDValue N, bool IsLoadStore = false) {}

/// Determine whether it is worth to fold V into an extended register of an
/// Add/Sub. LSL means we are folding into an `add w0, w1, w2, lsl #N`
/// instruction, and the shift should be treated as worth folding even if has
/// multiple uses.
bool AArch64DAGToDAGISel::isWorthFoldingALU(SDValue V, bool LSL) const {}

/// SelectShiftedRegister - Select a "shifted register" operand.  If the value
/// is not shifted, set the Shift operand to default of "LSL 0".  The logical
/// instructions allow the shifted register to be rotated, but the arithmetic
/// instructions do not.  The AllowROR parameter specifies whether ROR is
/// supported.
bool AArch64DAGToDAGISel::SelectShiftedRegister(SDValue N, bool AllowROR,
                                                SDValue &Reg, SDValue &Shift) {}

/// Instructions that accept extend modifiers like UXTW expect the register
/// being extended to be a GPR32, but the incoming DAG might be acting on a
/// GPR64 (either via SEXT_INREG or AND). Extract the appropriate low bits if
/// this is the case.
static SDValue narrowIfNeeded(SelectionDAG *CurDAG, SDValue N) {}

// Returns a suitable CNT/INC/DEC/RDVL multiplier to calculate VSCALE*N.
template<signed Low, signed High, signed Scale>
bool AArch64DAGToDAGISel::SelectRDVLImm(SDValue N, SDValue &Imm) {}

/// SelectArithExtendedRegister - Select a "extended register" operand.  This
/// operand folds in an extend followed by an optional left shift.
bool AArch64DAGToDAGISel::SelectArithExtendedRegister(SDValue N, SDValue &Reg,
                                                      SDValue &Shift) {}

/// SelectArithUXTXRegister - Select a "UXTX register" operand. This
/// operand is refered by the instructions have SP operand
bool AArch64DAGToDAGISel::SelectArithUXTXRegister(SDValue N, SDValue &Reg,
                                                  SDValue &Shift) {}

/// If there's a use of this ADDlow that's not itself a load/store then we'll
/// need to create a real ADD instruction from it anyway and there's no point in
/// folding it into the mem op. Theoretically, it shouldn't matter, but there's
/// a single pseudo-instruction for an ADRP/ADD pair so over-aggressive folding
/// leads to duplicated ADRP instructions.
static bool isWorthFoldingADDlow(SDValue N) {}

/// Check if the immediate offset is valid as a scaled immediate.
static bool isValidAsScaledImmediate(int64_t Offset, unsigned Range,
                                     unsigned Size) {}

/// SelectAddrModeIndexedBitWidth - Select a "register plus scaled (un)signed BW-bit
/// immediate" address.  The "Size" argument is the size in bytes of the memory
/// reference, which determines the scale.
bool AArch64DAGToDAGISel::SelectAddrModeIndexedBitWidth(SDValue N, bool IsSignedImm,
                                                        unsigned BW, unsigned Size,
                                                        SDValue &Base,
                                                        SDValue &OffImm) {}

/// SelectAddrModeIndexed - Select a "register plus scaled unsigned 12-bit
/// immediate" address.  The "Size" argument is the size in bytes of the memory
/// reference, which determines the scale.
bool AArch64DAGToDAGISel::SelectAddrModeIndexed(SDValue N, unsigned Size,
                                              SDValue &Base, SDValue &OffImm) {}

/// SelectAddrModeUnscaled - Select a "register plus unscaled signed 9-bit
/// immediate" address.  This should only match when there is an offset that
/// is not valid for a scaled immediate addressing mode.  The "Size" argument
/// is the size in bytes of the memory reference, which is needed here to know
/// what is valid for a scaled immediate.
bool AArch64DAGToDAGISel::SelectAddrModeUnscaled(SDValue N, unsigned Size,
                                                 SDValue &Base,
                                                 SDValue &OffImm) {}

static SDValue Widen(SelectionDAG *CurDAG, SDValue N) {}

/// Check if the given SHL node (\p N), can be used to form an
/// extended register for an addressing mode.
bool AArch64DAGToDAGISel::SelectExtendedSHL(SDValue N, unsigned Size,
                                            bool WantExtend, SDValue &Offset,
                                            SDValue &SignExtend) {}

bool AArch64DAGToDAGISel::SelectAddrModeWRO(SDValue N, unsigned Size,
                                            SDValue &Base, SDValue &Offset,
                                            SDValue &SignExtend,
                                            SDValue &DoShift) {}

// Check if the given immediate is preferred by ADD. If an immediate can be
// encoded in an ADD, or it can be encoded in an "ADD LSL #12" and can not be
// encoded by one MOVZ, return true.
static bool isPreferredADD(int64_t ImmOff) {}

bool AArch64DAGToDAGISel::SelectAddrModeXRO(SDValue N, unsigned Size,
                                            SDValue &Base, SDValue &Offset,
                                            SDValue &SignExtend,
                                            SDValue &DoShift) {}

SDValue AArch64DAGToDAGISel::createDTuple(ArrayRef<SDValue> Regs) {}

SDValue AArch64DAGToDAGISel::createQTuple(ArrayRef<SDValue> Regs) {}

SDValue AArch64DAGToDAGISel::createZTuple(ArrayRef<SDValue> Regs) {}

SDValue AArch64DAGToDAGISel::createZMulTuple(ArrayRef<SDValue> Regs) {}

SDValue AArch64DAGToDAGISel::createTuple(ArrayRef<SDValue> Regs,
                                         const unsigned RegClassIDs[],
                                         const unsigned SubRegs[]) {}

void AArch64DAGToDAGISel::SelectTable(SDNode *N, unsigned NumVecs, unsigned Opc,
                                      bool isExt) {}

static std::tuple<SDValue, SDValue>
extractPtrauthBlendDiscriminators(SDValue Disc, SelectionDAG *DAG) {}

void AArch64DAGToDAGISel::SelectPtrauthAuth(SDNode *N) {}

void AArch64DAGToDAGISel::SelectPtrauthResign(SDNode *N) {}

bool AArch64DAGToDAGISel::tryIndexedLoad(SDNode *N) {}

void AArch64DAGToDAGISel::SelectLoad(SDNode *N, unsigned NumVecs, unsigned Opc,
                                     unsigned SubRegIdx) {}

void AArch64DAGToDAGISel::SelectPostLoad(SDNode *N, unsigned NumVecs,
                                         unsigned Opc, unsigned SubRegIdx) {}

/// Optimize \param OldBase and \param OldOffset selecting the best addressing
/// mode. Returns a tuple consisting of an Opcode, an SDValue representing the
/// new Base and an SDValue representing the new offset.
std::tuple<unsigned, SDValue, SDValue>
AArch64DAGToDAGISel::findAddrModeSVELoadStore(SDNode *N, unsigned Opc_rr,
                                              unsigned Opc_ri,
                                              const SDValue &OldBase,
                                              const SDValue &OldOffset,
                                              unsigned Scale) {}

enum class SelectTypeKind {};

/// This function selects an opcode from a list of opcodes, which is
/// expected to be the opcode for { 8-bit, 16-bit, 32-bit, 64-bit }
/// element types, in this order.
template <SelectTypeKind Kind>
static unsigned SelectOpcodeFromVT(EVT VT, ArrayRef<unsigned> Opcodes) {}

// This function is almost identical to SelectWhilePair, but has an
// extra check on the range of the immediate operand.
// TODO: Merge these two functions together at some point?
void AArch64DAGToDAGISel::SelectPExtPair(SDNode *N, unsigned Opc) {}

void AArch64DAGToDAGISel::SelectWhilePair(SDNode *N, unsigned Opc) {}

void AArch64DAGToDAGISel::SelectCVTIntrinsic(SDNode *N, unsigned NumVecs,
                                             unsigned Opcode) {}

void AArch64DAGToDAGISel::SelectDestructiveMultiIntrinsic(SDNode *N,
                                                          unsigned NumVecs,
                                                          bool IsZmMulti,
                                                          unsigned Opcode,
                                                          bool HasPred) {}

void AArch64DAGToDAGISel::SelectPredicatedLoad(SDNode *N, unsigned NumVecs,
                                               unsigned Scale, unsigned Opc_ri,
                                               unsigned Opc_rr, bool IsIntr) {}

void AArch64DAGToDAGISel::SelectContiguousMultiVectorLoad(SDNode *N,
                                                          unsigned NumVecs,
                                                          unsigned Scale,
                                                          unsigned Opc_ri,
                                                          unsigned Opc_rr) {}

void AArch64DAGToDAGISel::SelectFrintFromVT(SDNode *N, unsigned NumVecs,
                                            unsigned Opcode) {}

void AArch64DAGToDAGISel::SelectMultiVectorLuti(SDNode *Node,
                                                unsigned NumOutVecs,
                                                unsigned Opc, uint32_t MaxImm) {}

void AArch64DAGToDAGISel::SelectClamp(SDNode *N, unsigned NumVecs,
                                      unsigned Op) {}

bool SelectSMETile(unsigned &BaseReg, unsigned TileNum) {}

template <unsigned MaxIdx, unsigned Scale>
void AArch64DAGToDAGISel::SelectMultiVectorMove(SDNode *N, unsigned NumVecs,
                                                unsigned BaseReg, unsigned Op) {}

void AArch64DAGToDAGISel::SelectMultiVectorMoveZ(SDNode *N, unsigned NumVecs,
                                                 unsigned Op, unsigned MaxIdx,
                                                 unsigned Scale, unsigned BaseReg) {}

void AArch64DAGToDAGISel::SelectUnaryMultiIntrinsic(SDNode *N,
                                                    unsigned NumOutVecs,
                                                    bool IsTupleInput,
                                                    unsigned Opc) {}

void AArch64DAGToDAGISel::SelectStore(SDNode *N, unsigned NumVecs,
                                      unsigned Opc) {}

void AArch64DAGToDAGISel::SelectPredicatedStore(SDNode *N, unsigned NumVecs,
                                                unsigned Scale, unsigned Opc_rr,
                                                unsigned Opc_ri) {}

bool AArch64DAGToDAGISel::SelectAddrModeFrameIndexSVE(SDValue N, SDValue &Base,
                                                      SDValue &OffImm) {}

void AArch64DAGToDAGISel::SelectPostStore(SDNode *N, unsigned NumVecs,
                                          unsigned Opc) {}

namespace {
/// WidenVector - Given a value in the V64 register class, produce the
/// equivalent value in the V128 register class.
class WidenVector {};
} // namespace

/// NarrowVector - Given a value in the V128 register class, produce the
/// equivalent value in the V64 register class.
static SDValue NarrowVector(SDValue V128Reg, SelectionDAG &DAG) {}

void AArch64DAGToDAGISel::SelectLoadLane(SDNode *N, unsigned NumVecs,
                                         unsigned Opc) {}

void AArch64DAGToDAGISel::SelectPostLoadLane(SDNode *N, unsigned NumVecs,
                                             unsigned Opc) {}

void AArch64DAGToDAGISel::SelectStoreLane(SDNode *N, unsigned NumVecs,
                                          unsigned Opc) {}

void AArch64DAGToDAGISel::SelectPostStoreLane(SDNode *N, unsigned NumVecs,
                                              unsigned Opc) {}

static bool isBitfieldExtractOpFromAnd(SelectionDAG *CurDAG, SDNode *N,
                                       unsigned &Opc, SDValue &Opd0,
                                       unsigned &LSB, unsigned &MSB,
                                       unsigned NumberOfIgnoredLowBits,
                                       bool BiggerPattern) {}

static bool isBitfieldExtractOpFromSExtInReg(SDNode *N, unsigned &Opc,
                                             SDValue &Opd0, unsigned &Immr,
                                             unsigned &Imms) {}

static bool isSeveralBitsExtractOpFromShr(SDNode *N, unsigned &Opc,
                                          SDValue &Opd0, unsigned &LSB,
                                          unsigned &MSB) {}

static bool isBitfieldExtractOpFromShr(SDNode *N, unsigned &Opc, SDValue &Opd0,
                                       unsigned &Immr, unsigned &Imms,
                                       bool BiggerPattern) {}

bool AArch64DAGToDAGISel::tryBitfieldExtractOpFromSExt(SDNode *N) {}

static bool isBitfieldExtractOp(SelectionDAG *CurDAG, SDNode *N, unsigned &Opc,
                                SDValue &Opd0, unsigned &Immr, unsigned &Imms,
                                unsigned NumberOfIgnoredLowBits = 0,
                                bool BiggerPattern = false) {}

bool AArch64DAGToDAGISel::tryBitfieldExtractOp(SDNode *N) {}

/// Does DstMask form a complementary pair with the mask provided by
/// BitsToBeInserted, suitable for use in a BFI instruction. Roughly speaking,
/// this asks whether DstMask zeroes precisely those bits that will be set by
/// the other half.
static bool isBitfieldDstMask(uint64_t DstMask, const APInt &BitsToBeInserted,
                              unsigned NumberOfIgnoredHighBits, EVT VT) {}

// Look for bits that will be useful for later uses.
// A bit is consider useless as soon as it is dropped and never used
// before it as been dropped.
// E.g., looking for useful bit of x
// 1. y = x & 0x7
// 2. z = y >> 2
// After #1, x useful bits are 0x7, then the useful bits of x, live through
// y.
// After #2, the useful bits of x are 0x4.
// However, if x is used on an unpredicatable instruction, then all its bits
// are useful.
// E.g.
// 1. y = x & 0x7
// 2. z = y >> 2
// 3. str x, [@x]
static void getUsefulBits(SDValue Op, APInt &UsefulBits, unsigned Depth = 0);

static void getUsefulBitsFromAndWithImmediate(SDValue Op, APInt &UsefulBits,
                                              unsigned Depth) {}

static void getUsefulBitsFromBitfieldMoveOpd(SDValue Op, APInt &UsefulBits,
                                             uint64_t Imm, uint64_t MSB,
                                             unsigned Depth) {}

static void getUsefulBitsFromUBFM(SDValue Op, APInt &UsefulBits,
                                  unsigned Depth) {}

static void getUsefulBitsFromOrWithShiftedReg(SDValue Op, APInt &UsefulBits,
                                              unsigned Depth) {}

static void getUsefulBitsFromBFM(SDValue Op, SDValue Orig, APInt &UsefulBits,
                                 unsigned Depth) {}

static void getUsefulBitsForUse(SDNode *UserNode, APInt &UsefulBits,
                                SDValue Orig, unsigned Depth) {}

static void getUsefulBits(SDValue Op, APInt &UsefulBits, unsigned Depth) {}

/// Create a machine node performing a notional SHL of Op by ShlAmount. If
/// ShlAmount is negative, do a (logical) right-shift instead. If ShlAmount is
/// 0, return Op unchanged.
static SDValue getLeftShift(SelectionDAG *CurDAG, SDValue Op, int ShlAmount) {}

// For bit-field-positioning pattern "(and (shl VAL, N), ShiftedMask)".
static bool isBitfieldPositioningOpFromAnd(SelectionDAG *CurDAG, SDValue Op,
                                           bool BiggerPattern,
                                           const uint64_t NonZeroBits,
                                           SDValue &Src, int &DstLSB,
                                           int &Width);

// For bit-field-positioning pattern "shl VAL, N)".
static bool isBitfieldPositioningOpFromShl(SelectionDAG *CurDAG, SDValue Op,
                                           bool BiggerPattern,
                                           const uint64_t NonZeroBits,
                                           SDValue &Src, int &DstLSB,
                                           int &Width);

/// Does this tree qualify as an attempt to move a bitfield into position,
/// essentially "(and (shl VAL, N), Mask)" or (shl VAL, N).
static bool isBitfieldPositioningOp(SelectionDAG *CurDAG, SDValue Op,
                                    bool BiggerPattern, SDValue &Src,
                                    int &DstLSB, int &Width) {}

static bool isBitfieldPositioningOpFromAnd(SelectionDAG *CurDAG, SDValue Op,
                                           bool BiggerPattern,
                                           const uint64_t NonZeroBits,
                                           SDValue &Src, int &DstLSB,
                                           int &Width) {}

// For node (shl (and val, mask), N)), returns true if the node is equivalent to
// UBFIZ.
static bool isSeveralBitsPositioningOpFromShl(const uint64_t ShlImm, SDValue Op,
                                              SDValue &Src, int &DstLSB,
                                              int &Width) {}

static bool isBitfieldPositioningOpFromShl(SelectionDAG *CurDAG, SDValue Op,
                                           bool BiggerPattern,
                                           const uint64_t NonZeroBits,
                                           SDValue &Src, int &DstLSB,
                                           int &Width) {}

static bool isShiftedMask(uint64_t Mask, EVT VT) {}

// Generate a BFI/BFXIL from 'or (and X, MaskImm), OrImm' iff the value being
// inserted only sets known zero bits.
static bool tryBitfieldInsertOpFromOrAndImm(SDNode *N, SelectionDAG *CurDAG) {}

static bool isWorthFoldingIntoOrrWithShift(SDValue Dst, SelectionDAG *CurDAG,
                                           SDValue &ShiftedOperand,
                                           uint64_t &EncodedShiftImm) {}

// Given an 'ISD::OR' node that is going to be selected as BFM, analyze
// the operands and select it to AArch64::ORR with shifted registers if
// that's more efficient. Returns true iff selection to AArch64::ORR happens.
static bool tryOrrWithShift(SDNode *N, SDValue OrOpd0, SDValue OrOpd1,
                            SDValue Src, SDValue Dst, SelectionDAG *CurDAG,
                            const bool BiggerPattern) {}

static bool tryBitfieldInsertOpFromOr(SDNode *N, const APInt &UsefulBits,
                                      SelectionDAG *CurDAG) {}

bool AArch64DAGToDAGISel::tryBitfieldInsertOp(SDNode *N) {}

/// SelectBitfieldInsertInZeroOp - Match a UBFIZ instruction that is the
/// equivalent of a left shift by a constant amount followed by an and masking
/// out a contiguous set of bits.
bool AArch64DAGToDAGISel::tryBitfieldInsertInZeroOp(SDNode *N) {}

/// tryShiftAmountMod - Take advantage of built-in mod of shift amount in
/// variable shift/rotate instructions.
bool AArch64DAGToDAGISel::tryShiftAmountMod(SDNode *N) {}

static bool checkCVTFixedPointOperandWithFBits(SelectionDAG *CurDAG, SDValue N,
                                               SDValue &FixedPos,
                                               unsigned RegWidth,
                                               bool isReciprocal) {}

bool AArch64DAGToDAGISel::SelectCVTFixedPosOperand(SDValue N, SDValue &FixedPos,
                                                   unsigned RegWidth) {}

bool AArch64DAGToDAGISel::SelectCVTFixedPosRecipOperand(SDValue N,
                                                        SDValue &FixedPos,
                                                        unsigned RegWidth) {}

// Inspects a register string of the form o0:op1:CRn:CRm:op2 gets the fields
// of the string and obtains the integer values from them and combines these
// into a single value to be used in the MRS/MSR instruction.
static int getIntOperandFromRegisterString(StringRef RegString) {}

// Lower the read_register intrinsic to an MRS instruction node if the special
// register string argument is either of the form detailed in the ALCE (the
// form described in getIntOperandsFromRegsterString) or is a named register
// known by the MRS SysReg mapper.
bool AArch64DAGToDAGISel::tryReadRegister(SDNode *N) {}

// Lower the write_register intrinsic to an MSR instruction node if the special
// register string argument is either of the form detailed in the ALCE (the
// form described in getIntOperandsFromRegsterString) or is a named register
// known by the MSR SysReg mapper.
bool AArch64DAGToDAGISel::tryWriteRegister(SDNode *N) {}

/// We've got special pseudo-instructions for these
bool AArch64DAGToDAGISel::SelectCMP_SWAP(SDNode *N) {}

bool AArch64DAGToDAGISel::SelectSVEAddSubImm(SDValue N, MVT VT, SDValue &Imm,
                                             SDValue &Shift) {}

bool AArch64DAGToDAGISel::SelectSVEAddSubSSatImm(SDValue N, MVT VT,
                                                 SDValue &Imm, SDValue &Shift,
                                                 bool Negate) {}

bool AArch64DAGToDAGISel::SelectSVECpyDupImm(SDValue N, MVT VT, SDValue &Imm,
                                             SDValue &Shift) {}

bool AArch64DAGToDAGISel::SelectSVESignedArithImm(SDValue N, SDValue &Imm) {}

bool AArch64DAGToDAGISel::SelectSVEArithImm(SDValue N, MVT VT, SDValue &Imm) {}

bool AArch64DAGToDAGISel::SelectSVELogicalImm(SDValue N, MVT VT, SDValue &Imm,
                                              bool Invert) {}

// SVE shift intrinsics allow shift amounts larger than the element's bitwidth.
// Rather than attempt to normalise everything we can sometimes saturate the
// shift amount during selection. This function also allows for consistent
// isel patterns by ensuring the resulting "Imm" node is of the i32 type
// required by the instructions.
bool AArch64DAGToDAGISel::SelectSVEShiftImm(SDValue N, uint64_t Low,
                                            uint64_t High, bool AllowSaturation,
                                            SDValue &Imm) {}

bool AArch64DAGToDAGISel::trySelectStackSlotTagP(SDNode *N) {}

void AArch64DAGToDAGISel::SelectTagP(SDNode *N) {}

bool AArch64DAGToDAGISel::trySelectCastFixedLengthToScalableVector(SDNode *N) {}

bool AArch64DAGToDAGISel::trySelectCastScalableToFixedLengthVector(SDNode *N) {}

bool AArch64DAGToDAGISel::trySelectXAR(SDNode *N) {}

void AArch64DAGToDAGISel::Select(SDNode *Node) {}

/// createAArch64ISelDag - This pass converts a legalized DAG into a
/// AArch64-specific DAG, ready for instruction scheduling.
FunctionPass *llvm::createAArch64ISelDag(AArch64TargetMachine &TM,
                                         CodeGenOptLevel OptLevel) {}

/// When \p PredVT is a scalable vector predicate in the form
/// MVT::nx<M>xi1, it builds the correspondent scalable vector of
/// integers MVT::nx<M>xi<bits> s.t. M x bits = 128. When targeting
/// structured vectors (NumVec >1), the output data type is
/// MVT::nx<M*NumVec>xi<bits> s.t. M x bits = 128. If the input
/// PredVT is not in the form MVT::nx<M>xi1, it returns an invalid
/// EVT.
static EVT getPackedVectorTypeFromPredicateType(LLVMContext &Ctx, EVT PredVT,
                                                unsigned NumVec) {}

/// Return the EVT of the data associated to a memory operation in \p
/// Root. If such EVT cannot be retrived, it returns an invalid EVT.
static EVT getMemVTFromNode(LLVMContext &Ctx, SDNode *Root) {}

/// SelectAddrModeIndexedSVE - Attempt selection of the addressing mode:
/// Base + OffImm * sizeof(MemVT) for Min >= OffImm <= Max
/// where Root is the memory access using N for its address.
template <int64_t Min, int64_t Max>
bool AArch64DAGToDAGISel::SelectAddrModeIndexedSVE(SDNode *Root, SDValue N,
                                                   SDValue &Base,
                                                   SDValue &OffImm) {}

/// Select register plus register addressing mode for SVE, with scaled
/// offset.
bool AArch64DAGToDAGISel::SelectSVERegRegAddrMode(SDValue N, unsigned Scale,
                                                  SDValue &Base,
                                                  SDValue &Offset) {}

bool AArch64DAGToDAGISel::SelectAllActivePredicate(SDValue N) {}

bool AArch64DAGToDAGISel::SelectAnyPredicate(SDValue N) {}

bool AArch64DAGToDAGISel::SelectSMETileSlice(SDValue N, unsigned MaxSize,
                                             SDValue &Base, SDValue &Offset,
                                             unsigned Scale) {}