#include "AArch64ISelLowering.h"
#include "AArch64CallingConvention.h"
#include "AArch64ExpandImm.h"
#include "AArch64MachineFunctionInfo.h"
#include "AArch64PerfectShuffle.h"
#include "AArch64RegisterInfo.h"
#include "AArch64Subtarget.h"
#include "MCTargetDesc/AArch64AddressingModes.h"
#include "Utils/AArch64BaseInfo.h"
#include "llvm/ADT/APFloat.h"
#include "llvm/ADT/APInt.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallSet.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/Twine.h"
#include "llvm/Analysis/LoopInfo.h"
#include "llvm/Analysis/MemoryLocation.h"
#include "llvm/Analysis/ObjCARCUtil.h"
#include "llvm/Analysis/OptimizationRemarkEmitter.h"
#include "llvm/Analysis/TargetTransformInfo.h"
#include "llvm/Analysis/ValueTracking.h"
#include "llvm/Analysis/VectorUtils.h"
#include "llvm/CodeGen/Analysis.h"
#include "llvm/CodeGen/CallingConvLower.h"
#include "llvm/CodeGen/ComplexDeinterleavingPass.h"
#include "llvm/CodeGen/GlobalISel/GenericMachineInstrs.h"
#include "llvm/CodeGen/GlobalISel/Utils.h"
#include "llvm/CodeGen/ISDOpcodes.h"
#include "llvm/CodeGen/MachineBasicBlock.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineInstr.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/CodeGen/MachineMemOperand.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/RuntimeLibcallUtil.h"
#include "llvm/CodeGen/SelectionDAG.h"
#include "llvm/CodeGen/SelectionDAGNodes.h"
#include "llvm/CodeGen/TargetCallingConv.h"
#include "llvm/CodeGen/TargetInstrInfo.h"
#include "llvm/CodeGen/TargetOpcodes.h"
#include "llvm/CodeGen/ValueTypes.h"
#include "llvm/CodeGenTypes/MachineValueType.h"
#include "llvm/IR/Attributes.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/DebugLoc.h"
#include "llvm/IR/DerivedTypes.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/GetElementPtrTypeIterator.h"
#include "llvm/IR/GlobalValue.h"
#include "llvm/IR/IRBuilder.h"
#include "llvm/IR/Instruction.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/IntrinsicInst.h"
#include "llvm/IR/Intrinsics.h"
#include "llvm/IR/IntrinsicsAArch64.h"
#include "llvm/IR/Module.h"
#include "llvm/IR/PatternMatch.h"
#include "llvm/IR/Type.h"
#include "llvm/IR/Use.h"
#include "llvm/IR/Value.h"
#include "llvm/MC/MCRegisterInfo.h"
#include "llvm/Support/AtomicOrdering.h"
#include "llvm/Support/Casting.h"
#include "llvm/Support/CodeGen.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/InstructionCost.h"
#include "llvm/Support/KnownBits.h"
#include "llvm/Support/MathExtras.h"
#include "llvm/Support/SipHash.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Target/TargetMachine.h"
#include "llvm/Target/TargetOptions.h"
#include "llvm/TargetParser/Triple.h"
#include <algorithm>
#include <bitset>
#include <cassert>
#include <cctype>
#include <cstdint>
#include <cstdlib>
#include <iterator>
#include <limits>
#include <optional>
#include <tuple>
#include <utility>
#include <vector>
usingnamespacellvm;
usingnamespacellvm::PatternMatch;
#define DEBUG_TYPE …
STATISTIC(NumTailCalls, "Number of tail calls");
STATISTIC(NumShiftInserts, "Number of vector shift inserts");
STATISTIC(NumOptimizedImms, "Number of times immediates were optimized");
cl::opt<bool> EnableAArch64ELFLocalDynamicTLSGeneration(
"aarch64-elf-ldtls-generation", cl::Hidden,
cl::desc("Allow AArch64 Local Dynamic TLS code generation"),
cl::init(false));
static cl::opt<bool>
EnableOptimizeLogicalImm("aarch64-enable-logical-imm", cl::Hidden,
cl::desc("Enable AArch64 logical imm instruction "
"optimization"),
cl::init(true));
static cl::opt<bool>
EnableCombineMGatherIntrinsics("aarch64-enable-mgather-combine", cl::Hidden,
cl::desc("Combine extends of AArch64 masked "
"gather intrinsics"),
cl::init(true));
static cl::opt<bool> EnableExtToTBL("aarch64-enable-ext-to-tbl", cl::Hidden,
cl::desc("Combine ext and trunc to TBL"),
cl::init(true));
static cl::opt<unsigned> MaxXors("aarch64-max-xors", cl::init(16), cl::Hidden,
cl::desc("Maximum of xors"));
cl::opt<bool> EnableSVEGISel(
"aarch64-enable-gisel-sve", cl::Hidden,
cl::desc("Enable / disable SVE scalable vectors in Global ISel"),
cl::init(false));
static const MVT MVT_CC = …;
static const MCPhysReg GPRArgRegs[] = …;
static const MCPhysReg FPRArgRegs[] = …;
ArrayRef<MCPhysReg> llvm::AArch64::getGPRArgRegs() { … }
ArrayRef<MCPhysReg> llvm::AArch64::getFPRArgRegs() { … }
static inline EVT getPackedSVEVectorVT(EVT VT) { … }
static inline EVT getPackedSVEVectorVT(ElementCount EC) { … }
static inline EVT getPromotedVTForPredicate(EVT VT) { … }
static inline bool isPackedVectorType(EVT VT, SelectionDAG &DAG) { … }
static bool isMergePassthruOpcode(unsigned Opc) { … }
static bool isZeroingInactiveLanes(SDValue Op) { … }
static std::tuple<SDValue, SDValue>
extractPtrauthBlendDiscriminators(SDValue Disc, SelectionDAG *DAG) { … }
AArch64TargetLowering::AArch64TargetLowering(const TargetMachine &TM,
const AArch64Subtarget &STI)
: … { … }
void AArch64TargetLowering::addTypeForNEON(MVT VT) { … }
bool AArch64TargetLowering::shouldExpandGetActiveLaneMask(EVT ResVT,
EVT OpVT) const { … }
bool AArch64TargetLowering::shouldExpandPartialReductionIntrinsic(
const IntrinsicInst *I) const { … }
bool AArch64TargetLowering::shouldExpandCttzElements(EVT VT) const { … }
void AArch64TargetLowering::addTypeForFixedLengthSVE(MVT VT) { … }
void AArch64TargetLowering::addDRType(MVT VT) { … }
void AArch64TargetLowering::addQRType(MVT VT) { … }
EVT AArch64TargetLowering::getSetCCResultType(const DataLayout &,
LLVMContext &C, EVT VT) const { … }
static bool isIntImmediate(const SDNode *N, uint64_t &Imm) { … }
static bool isOpcWithIntImmediate(const SDNode *N, unsigned Opc,
uint64_t &Imm) { … }
static bool optimizeLogicalImm(SDValue Op, unsigned Size, uint64_t Imm,
const APInt &Demanded,
TargetLowering::TargetLoweringOpt &TLO,
unsigned NewOpc) { … }
bool AArch64TargetLowering::targetShrinkDemandedConstant(
SDValue Op, const APInt &DemandedBits, const APInt &DemandedElts,
TargetLoweringOpt &TLO) const { … }
void AArch64TargetLowering::computeKnownBitsForTargetNode(
const SDValue Op, KnownBits &Known, const APInt &DemandedElts,
const SelectionDAG &DAG, unsigned Depth) const { … }
unsigned AArch64TargetLowering::ComputeNumSignBitsForTargetNode(
SDValue Op, const APInt &DemandedElts, const SelectionDAG &DAG,
unsigned Depth) const { … }
MVT AArch64TargetLowering::getScalarShiftAmountTy(const DataLayout &DL,
EVT) const { … }
bool AArch64TargetLowering::allowsMisalignedMemoryAccesses(
EVT VT, unsigned AddrSpace, Align Alignment, MachineMemOperand::Flags Flags,
unsigned *Fast) const { … }
bool AArch64TargetLowering::allowsMisalignedMemoryAccesses(
LLT Ty, unsigned AddrSpace, Align Alignment, MachineMemOperand::Flags Flags,
unsigned *Fast) const { … }
FastISel *
AArch64TargetLowering::createFastISel(FunctionLoweringInfo &funcInfo,
const TargetLibraryInfo *libInfo) const { … }
const char *AArch64TargetLowering::getTargetNodeName(unsigned Opcode) const { … }
MachineBasicBlock *
AArch64TargetLowering::EmitF128CSEL(MachineInstr &MI,
MachineBasicBlock *MBB) const { … }
MachineBasicBlock *AArch64TargetLowering::EmitLoweredCatchRet(
MachineInstr &MI, MachineBasicBlock *BB) const { … }
MachineBasicBlock *
AArch64TargetLowering::EmitDynamicProbedAlloc(MachineInstr &MI,
MachineBasicBlock *MBB) const { … }
MachineBasicBlock *
AArch64TargetLowering::EmitTileLoad(unsigned Opc, unsigned BaseReg,
MachineInstr &MI,
MachineBasicBlock *BB) const { … }
MachineBasicBlock *
AArch64TargetLowering::EmitFill(MachineInstr &MI, MachineBasicBlock *BB) const { … }
MachineBasicBlock *AArch64TargetLowering::EmitZTInstr(MachineInstr &MI,
MachineBasicBlock *BB,
unsigned Opcode,
bool Op0IsDef) const { … }
MachineBasicBlock *
AArch64TargetLowering::EmitZAInstr(unsigned Opc, unsigned BaseReg,
MachineInstr &MI,
MachineBasicBlock *BB) const { … }
MachineBasicBlock *
AArch64TargetLowering::EmitZero(MachineInstr &MI, MachineBasicBlock *BB) const { … }
MachineBasicBlock *
AArch64TargetLowering::EmitInitTPIDR2Object(MachineInstr &MI,
MachineBasicBlock *BB) const { … }
MachineBasicBlock *
AArch64TargetLowering::EmitAllocateZABuffer(MachineInstr &MI,
MachineBasicBlock *BB) const { … }
MachineBasicBlock *AArch64TargetLowering::EmitInstrWithCustomInserter(
MachineInstr &MI, MachineBasicBlock *BB) const { … }
static EVT getContainerForFixedLengthVector(SelectionDAG &DAG, EVT VT);
static SDValue convertToScalableVector(SelectionDAG &DAG, EVT VT, SDValue V);
static SDValue convertFromScalableVector(SelectionDAG &DAG, EVT VT, SDValue V);
static SDValue convertFixedMaskToScalableVector(SDValue Mask,
SelectionDAG &DAG);
static SDValue getPredicateForVector(SelectionDAG &DAG, SDLoc &DL, EVT VT);
static SDValue getPredicateForScalableVector(SelectionDAG &DAG, SDLoc &DL,
EVT VT);
static bool isZerosVector(const SDNode *N) { … }
static AArch64CC::CondCode changeIntCCToAArch64CC(ISD::CondCode CC) { … }
static void changeFPCCToAArch64CC(ISD::CondCode CC,
AArch64CC::CondCode &CondCode,
AArch64CC::CondCode &CondCode2) { … }
static void changeFPCCToANDAArch64CC(ISD::CondCode CC,
AArch64CC::CondCode &CondCode,
AArch64CC::CondCode &CondCode2) { … }
static void changeVectorFPCCToAArch64CC(ISD::CondCode CC,
AArch64CC::CondCode &CondCode,
AArch64CC::CondCode &CondCode2,
bool &Invert) { … }
static bool isLegalArithImmed(uint64_t C) { … }
static bool cannotBeIntMin(SDValue CheckedVal, SelectionDAG &DAG) { … }
static bool isCMN(SDValue Op, ISD::CondCode CC, SelectionDAG &DAG) { … }
static SDValue emitStrictFPComparison(SDValue LHS, SDValue RHS, const SDLoc &dl,
SelectionDAG &DAG, SDValue Chain,
bool IsSignaling) { … }
static SDValue emitComparison(SDValue LHS, SDValue RHS, ISD::CondCode CC,
const SDLoc &dl, SelectionDAG &DAG) { … }
static SDValue emitConditionalComparison(SDValue LHS, SDValue RHS,
ISD::CondCode CC, SDValue CCOp,
AArch64CC::CondCode Predicate,
AArch64CC::CondCode OutCC,
const SDLoc &DL, SelectionDAG &DAG) { … }
static bool canEmitConjunction(const SDValue Val, bool &CanNegate,
bool &MustBeFirst, bool WillNegate,
unsigned Depth = 0) { … }
static SDValue emitConjunctionRec(SelectionDAG &DAG, SDValue Val,
AArch64CC::CondCode &OutCC, bool Negate, SDValue CCOp,
AArch64CC::CondCode Predicate) { … }
static SDValue emitConjunction(SelectionDAG &DAG, SDValue Val,
AArch64CC::CondCode &OutCC) { … }
static unsigned getCmpOperandFoldingProfit(SDValue Op) { … }
static SDValue getAArch64Cmp(SDValue LHS, SDValue RHS, ISD::CondCode CC,
SDValue &AArch64cc, SelectionDAG &DAG,
const SDLoc &dl) { … }
static std::pair<SDValue, SDValue>
getAArch64XALUOOp(AArch64CC::CondCode &CC, SDValue Op, SelectionDAG &DAG) { … }
SDValue AArch64TargetLowering::LowerXOR(SDValue Op, SelectionDAG &DAG) const { … }
static SDValue valueToCarryFlag(SDValue Value, SelectionDAG &DAG, bool Invert) { … }
static SDValue carryFlagToValue(SDValue Glue, EVT VT, SelectionDAG &DAG,
bool Invert) { … }
static SDValue overflowFlagToValue(SDValue Glue, EVT VT, SelectionDAG &DAG) { … }
static SDValue lowerADDSUBO_CARRY(SDValue Op, SelectionDAG &DAG,
unsigned Opcode, bool IsSigned) { … }
static SDValue LowerXALUO(SDValue Op, SelectionDAG &DAG) { … }
static SDValue LowerPREFETCH(SDValue Op, SelectionDAG &DAG) { … }
static void simplifySetCCIntoEq(ISD::CondCode &CC, SDValue &LHS, SDValue &RHS,
SelectionDAG &DAG, const SDLoc dl) { … }
SDValue AArch64TargetLowering::LowerFP_EXTEND(SDValue Op,
SelectionDAG &DAG) const { … }
SDValue AArch64TargetLowering::LowerFP_ROUND(SDValue Op,
SelectionDAG &DAG) const { … }
SDValue AArch64TargetLowering::LowerVectorFP_TO_INT(SDValue Op,
SelectionDAG &DAG) const { … }
SDValue AArch64TargetLowering::LowerFP_TO_INT(SDValue Op,
SelectionDAG &DAG) const { … }
SDValue
AArch64TargetLowering::LowerVectorFP_TO_INT_SAT(SDValue Op,
SelectionDAG &DAG) const { … }
SDValue AArch64TargetLowering::LowerFP_TO_INT_SAT(SDValue Op,
SelectionDAG &DAG) const { … }
SDValue AArch64TargetLowering::LowerVectorXRINT(SDValue Op,
SelectionDAG &DAG) const { … }
SDValue AArch64TargetLowering::LowerVectorINT_TO_FP(SDValue Op,
SelectionDAG &DAG) const { … }
SDValue AArch64TargetLowering::LowerINT_TO_FP(SDValue Op,
SelectionDAG &DAG) const { … }
SDValue AArch64TargetLowering::LowerFSINCOS(SDValue Op,
SelectionDAG &DAG) const { … }
static MVT getSVEContainerType(EVT ContentTy);
SDValue AArch64TargetLowering::LowerBITCAST(SDValue Op,
SelectionDAG &DAG) const { … }
static EVT getExtensionTo64Bits(const EVT &OrigVT) { … }
static SDValue addRequiredExtensionForVectorMULL(SDValue N, SelectionDAG &DAG,
const EVT &OrigTy,
const EVT &ExtTy,
unsigned ExtOpcode) { … }
static std::optional<uint64_t>
getConstantLaneNumOfExtractHalfOperand(SDValue &Op) { … }
static bool isExtendedBUILD_VECTOR(SDValue N, SelectionDAG &DAG,
bool isSigned) { … }
static SDValue skipExtensionForVectorMULL(SDValue N, SelectionDAG &DAG) { … }
static bool isSignExtended(SDValue N, SelectionDAG &DAG) { … }
static bool isZeroExtended(SDValue N, SelectionDAG &DAG) { … }
static bool isAddSubSExt(SDValue N, SelectionDAG &DAG) { … }
static bool isAddSubZExt(SDValue N, SelectionDAG &DAG) { … }
SDValue AArch64TargetLowering::LowerGET_ROUNDING(SDValue Op,
SelectionDAG &DAG) const { … }
SDValue AArch64TargetLowering::LowerSET_ROUNDING(SDValue Op,
SelectionDAG &DAG) const { … }
SDValue AArch64TargetLowering::LowerGET_FPMODE(SDValue Op,
SelectionDAG &DAG) const { … }
SDValue AArch64TargetLowering::LowerSET_FPMODE(SDValue Op,
SelectionDAG &DAG) const { … }
SDValue AArch64TargetLowering::LowerRESET_FPMODE(SDValue Op,
SelectionDAG &DAG) const { … }
static unsigned selectUmullSmull(SDValue &N0, SDValue &N1, SelectionDAG &DAG,
SDLoc DL, bool &IsMLA) { … }
SDValue AArch64TargetLowering::LowerMUL(SDValue Op, SelectionDAG &DAG) const { … }
static inline SDValue getPTrue(SelectionDAG &DAG, SDLoc DL, EVT VT,
int Pattern) { … }
static SDValue optimizeIncrementingWhile(SDValue Op, SelectionDAG &DAG,
bool IsSigned, bool IsEqual) { … }
static SDValue getSVEPredicateBitCast(EVT VT, SDValue Op, SelectionDAG &DAG) { … }
SDValue AArch64TargetLowering::getRuntimePStateSM(SelectionDAG &DAG,
SDValue Chain, SDLoc DL,
EVT VT) const { … }
SDValue LowerSMELdrStr(SDValue N, SelectionDAG &DAG, bool IsLoad) { … }
SDValue AArch64TargetLowering::LowerINTRINSIC_VOID(SDValue Op,
SelectionDAG &DAG) const { … }
SDValue AArch64TargetLowering::LowerINTRINSIC_W_CHAIN(SDValue Op,
SelectionDAG &DAG) const { … }
SDValue AArch64TargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
SelectionDAG &DAG) const { … }
bool AArch64TargetLowering::shouldExtendGSIndex(EVT VT, EVT &EltTy) const { … }
bool AArch64TargetLowering::shouldRemoveExtendFromGSIndex(SDValue Extend,
EVT DataVT) const { … }
bool AArch64TargetLowering::isVectorLoadExtDesirable(SDValue ExtVal) const { … }
unsigned getGatherVecOpcode(bool IsScaled, bool IsSigned, bool NeedsExtend) { … }
unsigned getSignExtendedGatherOpcode(unsigned Opcode) { … }
SDValue AArch64TargetLowering::LowerMGATHER(SDValue Op,
SelectionDAG &DAG) const { … }
SDValue AArch64TargetLowering::LowerMSCATTER(SDValue Op,
SelectionDAG &DAG) const { … }
SDValue AArch64TargetLowering::LowerMLOAD(SDValue Op, SelectionDAG &DAG) const { … }
static SDValue LowerTruncateVectorStore(SDLoc DL, StoreSDNode *ST,
EVT VT, EVT MemVT,
SelectionDAG &DAG) { … }
SDValue AArch64TargetLowering::LowerSTORE(SDValue Op,
SelectionDAG &DAG) const { … }
SDValue AArch64TargetLowering::LowerStore128(SDValue Op,
SelectionDAG &DAG) const { … }
SDValue AArch64TargetLowering::LowerLOAD(SDValue Op,
SelectionDAG &DAG) const { … }
SDValue AArch64TargetLowering::LowerVECTOR_COMPRESS(SDValue Op,
SelectionDAG &DAG) const { … }
SDValue AArch64TargetLowering::LowerABS(SDValue Op, SelectionDAG &DAG) const { … }
static SDValue LowerBRCOND(SDValue Op, SelectionDAG &DAG) { … }
static SDValue LowerFunnelShift(SDValue Op, SelectionDAG &DAG) { … }
static SDValue LowerFLDEXP(SDValue Op, SelectionDAG &DAG) { … }
SDValue AArch64TargetLowering::LowerADJUST_TRAMPOLINE(SDValue Op,
SelectionDAG &DAG) const { … }
SDValue AArch64TargetLowering::LowerINIT_TRAMPOLINE(SDValue Op,
SelectionDAG &DAG) const { … }
SDValue AArch64TargetLowering::LowerOperation(SDValue Op,
SelectionDAG &DAG) const { … }
bool AArch64TargetLowering::mergeStoresAfterLegalization(EVT VT) const { … }
bool AArch64TargetLowering::useSVEForFixedLengthVectorVT(
EVT VT, bool OverrideNEON) const { … }
static unsigned getIntrinsicID(const SDNode *N) { … }
bool AArch64TargetLowering::isReassocProfitable(SelectionDAG &DAG, SDValue N0,
SDValue N1) const { … }
CCAssignFn *AArch64TargetLowering::CCAssignFnForCall(CallingConv::ID CC,
bool IsVarArg) const { … }
CCAssignFn *
AArch64TargetLowering::CCAssignFnForReturn(CallingConv::ID CC) const { … }
static bool isPassedInFPR(EVT VT) { … }
SDValue AArch64TargetLowering::LowerFormalArguments(
SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL,
SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const { … }
void AArch64TargetLowering::saveVarArgRegisters(CCState &CCInfo,
SelectionDAG &DAG,
const SDLoc &DL,
SDValue &Chain) const { … }
SDValue AArch64TargetLowering::LowerCallResult(
SDValue Chain, SDValue InGlue, CallingConv::ID CallConv, bool isVarArg,
const SmallVectorImpl<CCValAssign> &RVLocs, const SDLoc &DL,
SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals, bool isThisReturn,
SDValue ThisVal, bool RequiresSMChange) const { … }
static bool canGuaranteeTCO(CallingConv::ID CC, bool GuaranteeTailCalls) { … }
static bool mayTailCallThisCC(CallingConv::ID CC) { … }
static bool callConvSupportsVarArgs(CallingConv::ID CC) { … }
static void analyzeCallOperands(const AArch64TargetLowering &TLI,
const AArch64Subtarget *Subtarget,
const TargetLowering::CallLoweringInfo &CLI,
CCState &CCInfo) { … }
bool AArch64TargetLowering::isEligibleForTailCallOptimization(
const CallLoweringInfo &CLI) const { … }
SDValue AArch64TargetLowering::addTokenForArgument(SDValue Chain,
SelectionDAG &DAG,
MachineFrameInfo &MFI,
int ClobberedFI) const { … }
bool AArch64TargetLowering::DoesCalleeRestoreStack(CallingConv::ID CallCC,
bool TailCallOpt) const { … }
static bool checkZExtBool(SDValue Arg, const SelectionDAG &DAG) { … }
void AArch64TargetLowering::AdjustInstrPostInstrSelection(MachineInstr &MI,
SDNode *Node) const { … }
SDValue AArch64TargetLowering::changeStreamingMode(SelectionDAG &DAG, SDLoc DL,
bool Enable, SDValue Chain,
SDValue InGlue,
unsigned Condition,
SDValue PStateSM) const { … }
static unsigned getSMCondition(const SMEAttrs &CallerAttrs,
const SMEAttrs &CalleeAttrs) { … }
SDValue
AArch64TargetLowering::LowerCall(CallLoweringInfo &CLI,
SmallVectorImpl<SDValue> &InVals) const { … }
bool AArch64TargetLowering::CanLowerReturn(
CallingConv::ID CallConv, MachineFunction &MF, bool isVarArg,
const SmallVectorImpl<ISD::OutputArg> &Outs, LLVMContext &Context) const { … }
SDValue
AArch64TargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
bool isVarArg,
const SmallVectorImpl<ISD::OutputArg> &Outs,
const SmallVectorImpl<SDValue> &OutVals,
const SDLoc &DL, SelectionDAG &DAG) const { … }
SDValue AArch64TargetLowering::getTargetNode(GlobalAddressSDNode *N, EVT Ty,
SelectionDAG &DAG,
unsigned Flag) const { … }
SDValue AArch64TargetLowering::getTargetNode(JumpTableSDNode *N, EVT Ty,
SelectionDAG &DAG,
unsigned Flag) const { … }
SDValue AArch64TargetLowering::getTargetNode(ConstantPoolSDNode *N, EVT Ty,
SelectionDAG &DAG,
unsigned Flag) const { … }
SDValue AArch64TargetLowering::getTargetNode(BlockAddressSDNode* N, EVT Ty,
SelectionDAG &DAG,
unsigned Flag) const { … }
SDValue AArch64TargetLowering::getTargetNode(ExternalSymbolSDNode *N, EVT Ty,
SelectionDAG &DAG,
unsigned Flag) const { … }
template <class NodeTy>
SDValue AArch64TargetLowering::getGOT(NodeTy *N, SelectionDAG &DAG,
unsigned Flags) const { … }
template <class NodeTy>
SDValue AArch64TargetLowering::getAddrLarge(NodeTy *N, SelectionDAG &DAG,
unsigned Flags) const { … }
template <class NodeTy>
SDValue AArch64TargetLowering::getAddr(NodeTy *N, SelectionDAG &DAG,
unsigned Flags) const { … }
template <class NodeTy>
SDValue AArch64TargetLowering::getAddrTiny(NodeTy *N, SelectionDAG &DAG,
unsigned Flags) const { … }
SDValue AArch64TargetLowering::LowerGlobalAddress(SDValue Op,
SelectionDAG &DAG) const { … }
SDValue
AArch64TargetLowering::LowerDarwinGlobalTLSAddress(SDValue Op,
SelectionDAG &DAG) const { … }
SDValue AArch64TargetLowering::LowerELFTLSLocalExec(const GlobalValue *GV,
SDValue ThreadBase,
const SDLoc &DL,
SelectionDAG &DAG) const { … }
SDValue AArch64TargetLowering::LowerELFTLSDescCallSeq(SDValue SymAddr,
const SDLoc &DL,
SelectionDAG &DAG) const { … }
SDValue
AArch64TargetLowering::LowerELFGlobalTLSAddress(SDValue Op,
SelectionDAG &DAG) const { … }
SDValue
AArch64TargetLowering::LowerWindowsGlobalTLSAddress(SDValue Op,
SelectionDAG &DAG) const { … }
SDValue AArch64TargetLowering::LowerGlobalTLSAddress(SDValue Op,
SelectionDAG &DAG) const { … }
SDValue AArch64TargetLowering::LowerPtrAuthGlobalAddressStatically(
SDValue TGA, SDLoc DL, EVT VT, AArch64PACKey::ID KeyC,
SDValue Discriminator, SDValue AddrDiscriminator, SelectionDAG &DAG) const { … }
SDValue
AArch64TargetLowering::LowerPtrAuthGlobalAddress(SDValue Op,
SelectionDAG &DAG) const { … }
std::pair<SDValue, uint64_t> lookThroughSignExtension(SDValue Val) { … }
SDValue AArch64TargetLowering::LowerBR_CC(SDValue Op, SelectionDAG &DAG) const { … }
SDValue AArch64TargetLowering::LowerFCOPYSIGN(SDValue Op,
SelectionDAG &DAG) const { … }
SDValue AArch64TargetLowering::LowerCTPOP_PARITY(SDValue Op,
SelectionDAG &DAG) const { … }
SDValue AArch64TargetLowering::LowerCTTZ(SDValue Op, SelectionDAG &DAG) const { … }
SDValue AArch64TargetLowering::LowerMinMax(SDValue Op,
SelectionDAG &DAG) const { … }
SDValue AArch64TargetLowering::LowerBitreverse(SDValue Op,
SelectionDAG &DAG) const { … }
static bool
isOrXorChain(SDValue N, unsigned &Num,
SmallVector<std::pair<SDValue, SDValue>, 16> &WorkList) { … }
static SDValue performOrXorChainCombine(SDNode *N, SelectionDAG &DAG) { … }
SDValue AArch64TargetLowering::LowerSETCC(SDValue Op, SelectionDAG &DAG) const { … }
SDValue AArch64TargetLowering::LowerSETCCCARRY(SDValue Op,
SelectionDAG &DAG) const { … }
SDValue AArch64TargetLowering::LowerSELECT_CC(ISD::CondCode CC, SDValue LHS,
SDValue RHS, SDValue TVal,
SDValue FVal, const SDLoc &dl,
SelectionDAG &DAG) const { … }
SDValue AArch64TargetLowering::LowerVECTOR_SPLICE(SDValue Op,
SelectionDAG &DAG) const { … }
SDValue AArch64TargetLowering::LowerSELECT_CC(SDValue Op,
SelectionDAG &DAG) const { … }
SDValue AArch64TargetLowering::LowerSELECT(SDValue Op,
SelectionDAG &DAG) const { … }
SDValue AArch64TargetLowering::LowerJumpTable(SDValue Op,
SelectionDAG &DAG) const { … }
SDValue AArch64TargetLowering::LowerBR_JT(SDValue Op,
SelectionDAG &DAG) const { … }
SDValue AArch64TargetLowering::LowerBRIND(SDValue Op, SelectionDAG &DAG) const { … }
SDValue AArch64TargetLowering::LowerConstantPool(SDValue Op,
SelectionDAG &DAG) const { … }
SDValue AArch64TargetLowering::LowerBlockAddress(SDValue Op,
SelectionDAG &DAG) const { … }
SDValue AArch64TargetLowering::LowerDarwin_VASTART(SDValue Op,
SelectionDAG &DAG) const { … }
SDValue AArch64TargetLowering::LowerWin64_VASTART(SDValue Op,
SelectionDAG &DAG) const { … }
SDValue AArch64TargetLowering::LowerAAPCS_VASTART(SDValue Op,
SelectionDAG &DAG) const { … }
SDValue AArch64TargetLowering::LowerVASTART(SDValue Op,
SelectionDAG &DAG) const { … }
SDValue AArch64TargetLowering::LowerVACOPY(SDValue Op,
SelectionDAG &DAG) const { … }
SDValue AArch64TargetLowering::LowerVAARG(SDValue Op, SelectionDAG &DAG) const { … }
SDValue AArch64TargetLowering::LowerFRAMEADDR(SDValue Op,
SelectionDAG &DAG) const { … }
SDValue AArch64TargetLowering::LowerSPONENTRY(SDValue Op,
SelectionDAG &DAG) const { … }
#define GET_REGISTER_MATCHER
#include "AArch64GenAsmMatcher.inc"
Register AArch64TargetLowering::
getRegisterByName(const char* RegName, LLT VT, const MachineFunction &MF) const { … }
SDValue AArch64TargetLowering::LowerADDROFRETURNADDR(SDValue Op,
SelectionDAG &DAG) const { … }
SDValue AArch64TargetLowering::LowerRETURNADDR(SDValue Op,
SelectionDAG &DAG) const { … }
SDValue AArch64TargetLowering::LowerShiftParts(SDValue Op,
SelectionDAG &DAG) const { … }
bool AArch64TargetLowering::isOffsetFoldingLegal(
const GlobalAddressSDNode *GA) const { … }
bool AArch64TargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT,
bool OptForSize) const { … }
static SDValue getEstimate(const AArch64Subtarget *ST, unsigned Opcode,
SDValue Operand, SelectionDAG &DAG,
int &ExtraSteps) { … }
SDValue
AArch64TargetLowering::getSqrtInputTest(SDValue Op, SelectionDAG &DAG,
const DenormalMode &Mode) const { … }
SDValue
AArch64TargetLowering::getSqrtResultForDenormInput(SDValue Op,
SelectionDAG &DAG) const { … }
SDValue AArch64TargetLowering::getSqrtEstimate(SDValue Operand,
SelectionDAG &DAG, int Enabled,
int &ExtraSteps,
bool &UseOneConst,
bool Reciprocal) const { … }
SDValue AArch64TargetLowering::getRecipEstimate(SDValue Operand,
SelectionDAG &DAG, int Enabled,
int &ExtraSteps) const { … }
const char *AArch64TargetLowering::LowerXConstraint(EVT ConstraintVT) const { … }
enum class PredicateConstraint { … };
static std::optional<PredicateConstraint>
parsePredicateConstraint(StringRef Constraint) { … }
static const TargetRegisterClass *
getPredicateRegisterClass(PredicateConstraint Constraint, EVT VT) { … }
enum class ReducedGprConstraint { … };
static std::optional<ReducedGprConstraint>
parseReducedGprConstraint(StringRef Constraint) { … }
static const TargetRegisterClass *
getReducedGprRegisterClass(ReducedGprConstraint Constraint, EVT VT) { … }
static AArch64CC::CondCode parseConstraintCode(llvm::StringRef Constraint) { … }
static SDValue getSETCC(AArch64CC::CondCode CC, SDValue NZCV, const SDLoc &DL,
SelectionDAG &DAG) { … }
SDValue AArch64TargetLowering::LowerAsmOutputForConstraint(
SDValue &Chain, SDValue &Glue, const SDLoc &DL,
const AsmOperandInfo &OpInfo, SelectionDAG &DAG) const { … }
AArch64TargetLowering::ConstraintType
AArch64TargetLowering::getConstraintType(StringRef Constraint) const { … }
TargetLowering::ConstraintWeight
AArch64TargetLowering::getSingleConstraintMatchWeight(
AsmOperandInfo &info, const char *constraint) const { … }
std::pair<unsigned, const TargetRegisterClass *>
AArch64TargetLowering::getRegForInlineAsmConstraint(
const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const { … }
EVT AArch64TargetLowering::getAsmOperandValueType(const DataLayout &DL,
llvm::Type *Ty,
bool AllowUnknown) const { … }
void AArch64TargetLowering::LowerAsmOperandForConstraint(
SDValue Op, StringRef Constraint, std::vector<SDValue> &Ops,
SelectionDAG &DAG) const { … }
static SDValue WidenVector(SDValue V64Reg, SelectionDAG &DAG) { … }
static unsigned getExtFactor(SDValue &V) { … }
SDValue ReconstructShuffleWithRuntimeMask(SDValue Op, SelectionDAG &DAG) { … }
SDValue AArch64TargetLowering::ReconstructShuffle(SDValue Op,
SelectionDAG &DAG) const { … }
static bool isSingletonEXTMask(ArrayRef<int> M, EVT VT, unsigned &Imm) { … }
static SDValue ReconstructTruncateFromBuildVector(SDValue V, SelectionDAG &DAG) { … }
static bool isWideDUPMask(ArrayRef<int> M, EVT VT, unsigned BlockSize,
unsigned &DupLaneOp) { … }
static bool isEXTMask(ArrayRef<int> M, EVT VT, bool &ReverseEXT,
unsigned &Imm) { … }
static bool isZIP_v_undef_Mask(ArrayRef<int> M, EVT VT, unsigned &WhichResult) { … }
static bool isUZP_v_undef_Mask(ArrayRef<int> M, EVT VT, unsigned &WhichResult) { … }
static bool isTRN_v_undef_Mask(ArrayRef<int> M, EVT VT, unsigned &WhichResult) { … }
static bool isINSMask(ArrayRef<int> M, int NumInputElements,
bool &DstIsLeft, int &Anomaly) { … }
static bool isConcatMask(ArrayRef<int> Mask, EVT VT, bool SplitLHS) { … }
static SDValue tryFormConcatFromShuffle(SDValue Op, SelectionDAG &DAG) { … }
static SDValue GeneratePerfectShuffle(unsigned ID, SDValue V1,
SDValue V2, unsigned PFEntry, SDValue LHS,
SDValue RHS, SelectionDAG &DAG,
const SDLoc &dl) { … }
static SDValue GenerateTBL(SDValue Op, ArrayRef<int> ShuffleMask,
SelectionDAG &DAG) { … }
static unsigned getDUPLANEOp(EVT EltType) { … }
static SDValue constructDup(SDValue V, int Lane, SDLoc dl, EVT VT,
unsigned Opcode, SelectionDAG &DAG) { … }
static bool isWideTypeMask(ArrayRef<int> M, EVT VT,
SmallVectorImpl<int> &NewMask) { … }
static SDValue tryWidenMaskForShuffle(SDValue Op, SelectionDAG &DAG) { … }
static SDValue tryToConvertShuffleOfTbl2ToTbl4(SDValue Op,
ArrayRef<int> ShuffleMask,
SelectionDAG &DAG) { … }
SDValue
AArch64TargetLowering::LowerZERO_EXTEND_VECTOR_INREG(SDValue Op,
SelectionDAG &DAG) const { … }
SDValue AArch64TargetLowering::LowerVECTOR_SHUFFLE(SDValue Op,
SelectionDAG &DAG) const { … }
SDValue AArch64TargetLowering::LowerSPLAT_VECTOR(SDValue Op,
SelectionDAG &DAG) const { … }
SDValue AArch64TargetLowering::LowerDUPQLane(SDValue Op,
SelectionDAG &DAG) const { … }
static bool resolveBuildVector(BuildVectorSDNode *BVN, APInt &CnstBits,
APInt &UndefBits) { … }
static SDValue tryAdvSIMDModImm64(unsigned NewOp, SDValue Op, SelectionDAG &DAG,
const APInt &Bits) { … }
static SDValue tryAdvSIMDModImm32(unsigned NewOp, SDValue Op, SelectionDAG &DAG,
const APInt &Bits,
const SDValue *LHS = nullptr) { … }
static SDValue tryAdvSIMDModImm16(unsigned NewOp, SDValue Op, SelectionDAG &DAG,
const APInt &Bits,
const SDValue *LHS = nullptr) { … }
static SDValue tryAdvSIMDModImm321s(unsigned NewOp, SDValue Op,
SelectionDAG &DAG, const APInt &Bits) { … }
static SDValue tryAdvSIMDModImm8(unsigned NewOp, SDValue Op, SelectionDAG &DAG,
const APInt &Bits) { … }
static SDValue tryAdvSIMDModImmFP(unsigned NewOp, SDValue Op, SelectionDAG &DAG,
const APInt &Bits) { … }
static bool isAllConstantBuildVector(const SDValue &PotentialBVec,
uint64_t &ConstVal) { … }
static bool isAllInactivePredicate(SDValue N) { … }
static bool isAllActivePredicate(SelectionDAG &DAG, SDValue N) { … }
static SDValue tryLowerToSLI(SDNode *N, SelectionDAG &DAG) { … }
SDValue tryWhileWRFromOR(SDValue Op, SelectionDAG &DAG,
const AArch64Subtarget &Subtarget) { … }
SDValue AArch64TargetLowering::LowerVectorOR(SDValue Op,
SelectionDAG &DAG) const { … }
static SDValue NormalizeBuildVector(SDValue Op,
SelectionDAG &DAG) { … }
static SDValue ConstantBuildVector(SDValue Op, SelectionDAG &DAG,
const AArch64Subtarget *ST) { … }SDValue AArch64TargetLowering::LowerBUILD_VECTOR(SDValue Op,
SelectionDAG &DAG) const { … }SDValue AArch64TargetLowering::LowerCONCAT_VECTORS(SDValue Op,
SelectionDAG &DAG) const { … }SDValue AArch64TargetLowering::LowerINSERT_VECTOR_ELT(SDValue Op,
SelectionDAG &DAG) const { … }SDValue
AArch64TargetLowering::LowerEXTRACT_VECTOR_ELT(SDValue Op,
SelectionDAG &DAG) const { … }SDValue AArch64TargetLowering::LowerEXTRACT_SUBVECTOR(SDValue Op,
SelectionDAG &DAG) const { … }SDValue AArch64TargetLowering::LowerINSERT_SUBVECTOR(SDValue Op,
SelectionDAG &DAG) const { … }static bool isPow2Splat(SDValue Op, uint64_t &SplatVal, bool &Negated) { … }SDValue AArch64TargetLowering::LowerDIV(SDValue Op, SelectionDAG &DAG) const { … }bool AArch64TargetLowering::shouldExpandBuildVectorWithShuffles(
EVT VT, unsigned DefinedValues) const { … }bool AArch64TargetLowering::isShuffleMaskLegal(ArrayRef<int> M, EVT VT) const { … }bool AArch64TargetLowering::isVectorClearMaskLegal(ArrayRef<int> M,
EVT VT) const { … }static bool getVShiftImm(SDValue Op, unsigned ElementBits, int64_t &Cnt) { … }static bool isVShiftLImm(SDValue Op, EVT VT, bool isLong, int64_t &Cnt) { … }static bool isVShiftRImm(SDValue Op, EVT VT, bool isNarrow, int64_t &Cnt) { … }SDValue AArch64TargetLowering::LowerTRUNCATE(SDValue Op,
SelectionDAG &DAG) const { … }static bool canLowerSRLToRoundingShiftForVT(SDValue Shift, EVT ResVT,
SelectionDAG &DAG,
unsigned &ShiftValue,
SDValue &RShOperand) { … }SDValue AArch64TargetLowering::LowerVectorSRA_SRL_SHL(SDValue Op,
SelectionDAG &DAG) const { … }static SDValue EmitVectorComparison(SDValue LHS, SDValue RHS,
AArch64CC::CondCode CC, bool NoNans, EVT VT,
const SDLoc &dl, SelectionDAG &DAG) { … }SDValue AArch64TargetLowering::LowerVSETCC(SDValue Op,
SelectionDAG &DAG) const { … }static SDValue getReductionSDNode(unsigned Op, SDLoc DL, SDValue ScalarOp,
SelectionDAG &DAG) { … }static SDValue getVectorBitwiseReduce(unsigned Opcode, SDValue Vec, EVT VT,
SDLoc DL, SelectionDAG &DAG) { … }SDValue AArch64TargetLowering::LowerVECREDUCE(SDValue Op,
SelectionDAG &DAG) const { … }SDValue AArch64TargetLowering::LowerATOMIC_LOAD_AND(SDValue Op,
SelectionDAG &DAG) const { … }SDValue
AArch64TargetLowering::LowerWindowsDYNAMIC_STACKALLOC(SDValue Op,
SelectionDAG &DAG) const { … }SDValue
AArch64TargetLowering::LowerInlineDYNAMIC_STACKALLOC(SDValue Op,
SelectionDAG &DAG) const { … }SDValue
AArch64TargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op,
SelectionDAG &DAG) const { … }SDValue AArch64TargetLowering::LowerAVG(SDValue Op, SelectionDAG &DAG,
unsigned NewOp) const { … }SDValue AArch64TargetLowering::LowerVSCALE(SDValue Op,
SelectionDAG &DAG) const { … }template <unsigned NumVecs>
static bool
setInfoSVEStN(const AArch64TargetLowering &TLI, const DataLayout &DL,
AArch64TargetLowering::IntrinsicInfo &Info, const CallInst &CI) { … }bool AArch64TargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
const CallInst &I,
MachineFunction &MF,
unsigned Intrinsic) const { … }bool AArch64TargetLowering::shouldReduceLoadWidth(SDNode *Load,
ISD::LoadExtType ExtTy,
EVT NewVT) const { … }bool AArch64TargetLowering::shouldRemoveRedundantExtend(SDValue Extend) const { … }bool AArch64TargetLowering::isTruncateFree(Type *Ty1, Type *Ty2) const { … }bool AArch64TargetLowering::isTruncateFree(EVT VT1, EVT VT2) const { … }bool AArch64TargetLowering::isProfitableToHoist(Instruction *I) const { … }bool AArch64TargetLowering::isZExtFree(Type *Ty1, Type *Ty2) const { … }bool AArch64TargetLowering::isZExtFree(EVT VT1, EVT VT2) const { … }bool AArch64TargetLowering::isZExtFree(SDValue Val, EVT VT2) const { … }bool AArch64TargetLowering::isExtFreeImpl(const Instruction *Ext) const { … }static bool isSplatShuffle(Value *V) { … }static bool areExtractShuffleVectors(Value *Op1, Value *Op2,
bool AllowSplat = false) { … }static bool areExtractExts(Value *Ext1, Value *Ext2) { … }static bool isOperandOfVmullHighP64(Value *Op) { … }static bool areOperandsOfVmullHighP64(Value *Op1, Value *Op2) { … }static bool shouldSinkVectorOfPtrs(Value *Ptrs, SmallVectorImpl<Use *> &Ops) { … }static bool shouldSinkVScale(Value *Op, SmallVectorImpl<Use *> &Ops) { … }bool AArch64TargetLowering::shouldSinkOperands(
Instruction *I, SmallVectorImpl<Use *> &Ops) const { … }static bool createTblShuffleMask(unsigned SrcWidth, unsigned DstWidth,
unsigned NumElts, bool IsLittleEndian,
SmallVectorImpl<int> &Mask) { … }static Value *createTblShuffleForZExt(IRBuilderBase &Builder, Value *Op,
FixedVectorType *ZExtTy,
FixedVectorType *DstTy,
bool IsLittleEndian) { … }static Value *createTblShuffleForSExt(IRBuilderBase &Builder, Value *Op,
FixedVectorType *DstTy,
bool IsLittleEndian) { … }static void createTblForTrunc(TruncInst *TI, bool IsLittleEndian) { … }bool AArch64TargetLowering::optimizeExtendOrTruncateConversion(
Instruction *I, Loop *L, const TargetTransformInfo &TTI) const { … }bool AArch64TargetLowering::hasPairedLoad(EVT LoadedType,
Align &RequiredAligment) const { … }unsigned AArch64TargetLowering::getNumInterleavedAccesses(
VectorType *VecTy, const DataLayout &DL, bool UseScalable) const { … }MachineMemOperand::Flags
AArch64TargetLowering::getTargetMMOFlags(const Instruction &I) const { … }bool AArch64TargetLowering::isLegalInterleavedAccessType(
VectorType *VecTy, const DataLayout &DL, bool &UseScalable) const { … }static ScalableVectorType *getSVEContainerIRType(FixedVectorType *VTy) { … }static Function *getStructuredLoadFunction(Module *M, unsigned Factor,
bool Scalable, Type *LDVTy,
Type *PtrTy) { … }static Function *getStructuredStoreFunction(Module *M, unsigned Factor,
bool Scalable, Type *STVTy,
Type *PtrTy) { … }bool AArch64TargetLowering::lowerInterleavedLoad(
LoadInst *LI, ArrayRef<ShuffleVectorInst *> Shuffles,
ArrayRef<unsigned> Indices, unsigned Factor) const { … }template <typename Iter>
bool hasNearbyPairedStore(Iter It, Iter End, Value *Ptr, const DataLayout &DL) { … }bool AArch64TargetLowering::lowerInterleavedStore(StoreInst *SI,
ShuffleVectorInst *SVI,
unsigned Factor) const { … }bool getDeinterleave2Values(
Value *DI, SmallVectorImpl<Instruction *> &DeinterleavedValues,
SmallVectorImpl<Instruction *> &DeInterleaveDeadInsts) { … }bool getDeinterleave4Values(
Value *DI, SmallVectorImpl<Instruction *> &DeinterleavedValues,
SmallVectorImpl<Instruction *> &DeInterleaveDeadInsts) { … }bool getDeinterleavedValues(
Value *DI, SmallVectorImpl<Instruction *> &DeinterleavedValues,
SmallVectorImpl<Instruction *> &DeInterleaveDeadInsts) { … }bool AArch64TargetLowering::lowerDeinterleaveIntrinsicToLoad(
IntrinsicInst *DI, LoadInst *LI,
SmallVectorImpl<Instruction *> &DeadInsts) const { … }bool getValuesToInterleave(
Value *II, SmallVectorImpl<Value *> &InterleavedValues,
SmallVectorImpl<Instruction *> &InterleaveDeadInsts) { … }bool AArch64TargetLowering::lowerInterleaveIntrinsicToStore(
IntrinsicInst *II, StoreInst *SI,
SmallVectorImpl<Instruction *> &DeadInsts) const { … }EVT AArch64TargetLowering::getOptimalMemOpType(
const MemOp &Op, const AttributeList &FuncAttributes) const { … }LLT AArch64TargetLowering::getOptimalMemOpLLT(
const MemOp &Op, const AttributeList &FuncAttributes) const { … }bool AArch64TargetLowering::isLegalAddImmediate(int64_t Immed) const { … }bool AArch64TargetLowering::isLegalAddScalableImmediate(int64_t Imm) const { … }bool AArch64TargetLowering::isMulAddWithConstProfitable(
SDValue AddNode, SDValue ConstNode) const { … }bool AArch64TargetLowering::isLegalICmpImmediate(int64_t Immed) const { … }bool AArch64TargetLowering::isLegalAddressingMode(const DataLayout &DL,
const AddrMode &AMode, Type *Ty,
unsigned AS, Instruction *I) const { … }int64_t
AArch64TargetLowering::getPreferredLargeGEPBaseOffset(int64_t MinOffset,
int64_t MaxOffset) const { … }bool AArch64TargetLowering::shouldConsiderGEPOffsetSplit() const { … }bool AArch64TargetLowering::isFMAFasterThanFMulAndFAdd(
const MachineFunction &MF, EVT VT) const { … }bool AArch64TargetLowering::isFMAFasterThanFMulAndFAdd(const Function &F,
Type *Ty) const { … }bool AArch64TargetLowering::generateFMAsInMachineCombiner(
EVT VT, CodeGenOptLevel OptLevel) const { … }const MCPhysReg *
AArch64TargetLowering::getScratchRegisters(CallingConv::ID) const { … }ArrayRef<MCPhysReg> AArch64TargetLowering::getRoundingControlRegisters() const { … }bool
AArch64TargetLowering::isDesirableToCommuteWithShift(const SDNode *N,
CombineLevel Level) const { … }bool AArch64TargetLowering::isDesirableToCommuteXorWithShift(
const SDNode *N) const { … }bool AArch64TargetLowering::shouldFoldConstantShiftPairToMask(
const SDNode *N, CombineLevel Level) const { … }bool AArch64TargetLowering::shouldFoldSelectWithIdentityConstant(
unsigned BinOpcode, EVT VT) const { … }bool AArch64TargetLowering::shouldConvertConstantLoadToIntImm(const APInt &Imm,
Type *Ty) const { … }bool AArch64TargetLowering::isExtractSubvectorCheap(EVT ResVT, EVT SrcVT,
unsigned Index) const { … }static SDValue foldVectorXorShiftIntoCmp(SDNode *N, SelectionDAG &DAG,
const AArch64Subtarget *Subtarget) { … }static SDValue performVecReduceAddCombineWithUADDLP(SDNode *N,
SelectionDAG &DAG) { … }static SDValue performVecReduceAddCombine(SDNode *N, SelectionDAG &DAG,
const AArch64Subtarget *ST) { … }static SDValue performUADDVAddCombine(SDValue A, SelectionDAG &DAG) { … }static SDValue performUADDVZextCombine(SDValue A, SelectionDAG &DAG) { … }static SDValue performUADDVCombine(SDNode *N, SelectionDAG &DAG) { … }static SDValue performXorCombine(SDNode *N, SelectionDAG &DAG,
TargetLowering::DAGCombinerInfo &DCI,
const AArch64Subtarget *Subtarget) { … }SDValue
AArch64TargetLowering::BuildSDIVPow2(SDNode *N, const APInt &Divisor,
SelectionDAG &DAG,
SmallVectorImpl<SDNode *> &Created) const { … }SDValue
AArch64TargetLowering::BuildSREMPow2(SDNode *N, const APInt &Divisor,
SelectionDAG &DAG,
SmallVectorImpl<SDNode *> &Created) const { … }static std::optional<unsigned> IsSVECntIntrinsic(SDValue S) { … }static EVT calculatePreExtendType(SDValue Extend) { … }static SDValue performBuildShuffleExtendCombine(SDValue BV, SelectionDAG &DAG) { … }static SDValue performMulVectorExtendCombine(SDNode *Mul, SelectionDAG &DAG) { … }static SDValue performMulVectorCmpZeroCombine(SDNode *N, SelectionDAG &DAG) { … }static SDValue performVectorExtCombine(SDNode *N, SelectionDAG &DAG) { … }static SDValue performMulCombine(SDNode *N, SelectionDAG &DAG,
TargetLowering::DAGCombinerInfo &DCI,
const AArch64Subtarget *Subtarget) { … }static SDValue performVectorCompareAndMaskUnaryOpCombine(SDNode *N,
SelectionDAG &DAG) { … }static SDValue performIntToFpCombine(SDNode *N, SelectionDAG &DAG,
const AArch64Subtarget *Subtarget) { … }static SDValue performFpToIntCombine(SDNode *N, SelectionDAG &DAG,
TargetLowering::DAGCombinerInfo &DCI,
const AArch64Subtarget *Subtarget) { … }static SDValue tryCombineToBSL(SDNode *N, TargetLowering::DAGCombinerInfo &DCI,
const AArch64TargetLowering &TLI) { … }static SDValue performANDORCSELCombine(SDNode *N, SelectionDAG &DAG) { … }static SDValue performORCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI,
const AArch64Subtarget *Subtarget,
const AArch64TargetLowering &TLI) { … }static bool isConstantSplatVectorMaskForType(SDNode *N, EVT MemVT) { … }static SDValue performReinterpretCastCombine(SDNode *N) { … }static SDValue performSVEAndCombine(SDNode *N,
TargetLowering::DAGCombinerInfo &DCI) { … }static SDValue performANDSETCCCombine(SDNode *N,
TargetLowering::DAGCombinerInfo &DCI) { … }static SDValue performANDCombine(SDNode *N,
TargetLowering::DAGCombinerInfo &DCI) { … }static SDValue performFADDCombine(SDNode *N,
TargetLowering::DAGCombinerInfo &DCI) { … }static bool hasPairwiseAdd(unsigned Opcode, EVT VT, bool FullFP16) { … }static SDValue getPTest(SelectionDAG &DAG, EVT VT, SDValue Pg, SDValue Op,
AArch64CC::CondCode Cond)static bool isPredicateCCSettingOp(SDValue N) { … }static SDValue
performFirstTrueTestVectorCombine(SDNode *N,
TargetLowering::DAGCombinerInfo &DCI,
const AArch64Subtarget *Subtarget) { … }static SDValue
performLastTrueTestVectorCombine(SDNode *N,
TargetLowering::DAGCombinerInfo &DCI,
const AArch64Subtarget *Subtarget) { … }static SDValue
performExtractVectorEltCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI,
const AArch64Subtarget *Subtarget) { … }static SDValue performConcatVectorsCombine(SDNode *N,
TargetLowering::DAGCombinerInfo &DCI,
SelectionDAG &DAG) { … }static SDValue
performExtractSubvectorCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI,
SelectionDAG &DAG) { … }static SDValue
performInsertSubvectorCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI,
SelectionDAG &DAG) { … }static SDValue tryCombineFixedPointConvert(SDNode *N,
TargetLowering::DAGCombinerInfo &DCI,
SelectionDAG &DAG) { … }static SDValue tryExtendDUPToExtractHigh(SDValue N, SelectionDAG &DAG) { … }static bool isEssentiallyExtractHighSubvector(SDValue N) { … }struct GenericSetCCInfo { … }struct AArch64SetCCInfo { … }SetCCInfostruct SetCCInfoAndKind { … }static bool isSetCC(SDValue Op, SetCCInfoAndKind &SetCCInfo) { … }static bool isSetCCOrZExtSetCC(const SDValue& Op, SetCCInfoAndKind &Info) { … }static SDValue performSetccAddFolding(SDNode *Op, SelectionDAG &DAG) { … }static SDValue performAddUADDVCombine(SDNode *N, SelectionDAG &DAG) { … }static SDValue performAddCSelIntoCSinc(SDNode *N, SelectionDAG &DAG) { … }static SDValue performAddDotCombine(SDNode *N, SelectionDAG &DAG) { … }static bool isNegatedInteger(SDValue Op) { … }static SDValue getNegatedInteger(SDValue Op, SelectionDAG &DAG) { … }static SDValue performNegCSelCombine(SDNode *N, SelectionDAG &DAG) { … }static SDValue performAddSubLongCombine(SDNode *N,
TargetLowering::DAGCombinerInfo &DCI) { … }static bool isCMP(SDValue Op) { … }static std::optional<AArch64CC::CondCode> getCSETCondCode(SDValue Op) { … }static SDValue foldOverflowCheck(SDNode *Op, SelectionDAG &DAG, bool IsAdd) { … }static SDValue foldADCToCINC(SDNode *N, SelectionDAG &DAG) { … }static SDValue performBuildVectorCombine(SDNode *N,
TargetLowering::DAGCombinerInfo &DCI,
SelectionDAG &DAG) { … }static SDValue performTruncateCombine(SDNode *N,
SelectionDAG &DAG) { … }static bool isExtendOrShiftOperand(SDValue N) { … }static SDValue performAddCombineSubShift(SDNode *N, SDValue SUB, SDValue Z,
SelectionDAG &DAG) { … }static SDValue performAddCombineForShiftedOperands(SDNode *N,
SelectionDAG &DAG) { … }static SDValue performSubAddMULCombine(SDNode *N, SelectionDAG &DAG) { … }static SDValue
performSVEMulAddSubCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI) { … }static SDValue performAddSubIntoVectorOp(SDNode *N, SelectionDAG &DAG) { … }static bool isLoadOrMultipleLoads(SDValue B, SmallVector<LoadSDNode *> &Loads) { … }static bool areLoadedOffsetButOtherwiseSame(SDValue Op0, SDValue Op1,
SelectionDAG &DAG,
unsigned &NumSubLoads) { … }static SDValue performExtBinopLoadFold(SDNode *N, SelectionDAG &DAG) { … }static SDValue performAddSubCombine(SDNode *N,
TargetLowering::DAGCombinerInfo &DCI) { … }static SDValue tryCombineLongOpWithDup(unsigned IID, SDNode *N,
TargetLowering::DAGCombinerInfo &DCI,
SelectionDAG &DAG) { … }static SDValue tryCombineShiftImm(unsigned IID, SDNode *N, SelectionDAG &DAG) { … }static SDValue tryCombineCRC32(unsigned Mask, SDNode *N, SelectionDAG &DAG) { … }static SDValue combineAcrossLanesIntrinsic(unsigned Opc, SDNode *N,
SelectionDAG &DAG) { … }static SDValue LowerSVEIntrinsicIndex(SDNode *N, SelectionDAG &DAG) { … }static SDValue LowerSVEIntrinsicDUP(SDNode *N, SelectionDAG &DAG) { … }static SDValue LowerSVEIntrinsicEXT(SDNode *N, SelectionDAG &DAG) { … }static SDValue tryConvertSVEWideCompare(SDNode *N, ISD::CondCode CC,
TargetLowering::DAGCombinerInfo &DCI,
SelectionDAG &DAG) { … }static SDValue getPTest(SelectionDAG &DAG, EVT VT, SDValue Pg, SDValue Op,
AArch64CC::CondCode Cond) { … }static SDValue combineSVEReductionInt(SDNode *N, unsigned Opc,
SelectionDAG &DAG) { … }static SDValue combineSVEReductionFP(SDNode *N, unsigned Opc,
SelectionDAG &DAG) { … }static SDValue combineSVEReductionOrderedFP(SDNode *N, unsigned Opc,
SelectionDAG &DAG) { … }static SDValue convertMergedOpToPredOp(SDNode *N, unsigned Opc,
SelectionDAG &DAG, bool UnpredOp = false,
bool SwapOperands = false) { … }static SDValue tryCombineWhileLo(SDNode *N,
TargetLowering::DAGCombinerInfo &DCI,
const AArch64Subtarget *Subtarget) { … }SDValue tryLowerPartialReductionToDot(SDNode *N,
const AArch64Subtarget *Subtarget,
SelectionDAG &DAG) { … }static SDValue performIntrinsicCombine(SDNode *N,
TargetLowering::DAGCombinerInfo &DCI,
const AArch64Subtarget *Subtarget) { … }static bool isCheapToExtend(const SDValue &N) { … }static SDValue
performSignExtendSetCCCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI,
SelectionDAG &DAG) { … }static SDValue performZExtDeinterleaveShuffleCombine(SDNode *N,
SelectionDAG &DAG) { … }static SDValue performZExtUZPCombine(SDNode *N, SelectionDAG &DAG) { … }static SDValue performExtendCombine(SDNode *N,
TargetLowering::DAGCombinerInfo &DCI,
SelectionDAG &DAG) { … }static SDValue splitStoreSplat(SelectionDAG &DAG, StoreSDNode &St,
SDValue SplatVal, unsigned NumVecElts) { … }static MVT getSVEContainerType(EVT ContentTy) { … }static SDValue performLD1Combine(SDNode *N, SelectionDAG &DAG, unsigned Opc) { … }static SDValue performLDNT1Combine(SDNode *N, SelectionDAG &DAG) { … }template <unsigned Opcode>
static SDValue performLD1ReplicateCombine(SDNode *N, SelectionDAG &DAG) { … }static SDValue performST1Combine(SDNode *N, SelectionDAG &DAG) { … }static SDValue performSTNT1Combine(SDNode *N, SelectionDAG &DAG) { … }static SDValue replaceZeroVectorStore(SelectionDAG &DAG, StoreSDNode &St) { … }static SDValue replaceSplatVectorStore(SelectionDAG &DAG, StoreSDNode &St) { … }static SDValue splitStores(SDNode *N, TargetLowering::DAGCombinerInfo &DCI,
SelectionDAG &DAG,
const AArch64Subtarget *Subtarget) { … }static SDValue performSpliceCombine(SDNode *N, SelectionDAG &DAG) { … }static SDValue performUnpackCombine(SDNode *N, SelectionDAG &DAG,
const AArch64Subtarget *Subtarget) { … }static bool isHalvingTruncateAndConcatOfLegalIntScalableType(SDNode *N) { … }static SDValue tryCombineExtendRShTrunc(SDNode *N, SelectionDAG &DAG) { … }static SDValue trySimplifySrlAddToRshrnb(SDValue Srl, SelectionDAG &DAG,
const AArch64Subtarget *Subtarget) { … }static SDValue isNVCastToHalfWidthElements(SDValue V) { … }static SDValue performUzpCombine(SDNode *N, SelectionDAG &DAG,
const AArch64Subtarget *Subtarget) { … }static SDValue performGLD1Combine(SDNode *N, SelectionDAG &DAG) { … }static SDValue performVectorShiftCombine(SDNode *N,
const AArch64TargetLowering &TLI,
TargetLowering::DAGCombinerInfo &DCI) { … }static SDValue performSunpkloCombine(SDNode *N, SelectionDAG &DAG) { … }static SDValue performPostLD1Combine(SDNode *N,
TargetLowering::DAGCombinerInfo &DCI,
bool IsLaneOp) { … }static bool performTBISimplification(SDValue Addr,
TargetLowering::DAGCombinerInfo &DCI,
SelectionDAG &DAG) { … }static SDValue foldTruncStoreOfExt(SelectionDAG &DAG, SDNode *N) { … }static SDValue combineV3I8LoadExt(LoadSDNode *LD, SelectionDAG &DAG) { … }static SDValue performLOADCombine(SDNode *N,
TargetLowering::DAGCombinerInfo &DCI,
SelectionDAG &DAG,
const AArch64Subtarget *Subtarget) { … }static EVT tryGetOriginalBoolVectorType(SDValue Op, int Depth = 0) { … }static SDValue vectorToScalarBitmask(SDNode *N, SelectionDAG &DAG) { … }static SDValue combineBoolVectorAndTruncateStore(SelectionDAG &DAG,
StoreSDNode *Store) { … }bool isHalvingTruncateOfLegalScalableType(EVT SrcVT, EVT DstVT) { … }static SDValue combineI8TruncStore(StoreSDNode *ST, SelectionDAG &DAG,
const AArch64Subtarget *Subtarget) { … }static SDValue performSTORECombine(SDNode *N,
TargetLowering::DAGCombinerInfo &DCI,
SelectionDAG &DAG,
const AArch64Subtarget *Subtarget) { … }static SDValue performMSTORECombine(SDNode *N,
TargetLowering::DAGCombinerInfo &DCI,
SelectionDAG &DAG,
const AArch64Subtarget *Subtarget) { … }static bool foldIndexIntoBase(SDValue &BasePtr, SDValue &Index, SDValue Scale,
SDLoc DL, SelectionDAG &DAG) { … }static bool findMoreOptimalIndexType(const MaskedGatherScatterSDNode *N,
SDValue &BasePtr, SDValue &Index,
SelectionDAG &DAG) { … }static SDValue performMaskedGatherScatterCombine(
SDNode *N, TargetLowering::DAGCombinerInfo &DCI, SelectionDAG &DAG) { … }static SDValue performNEONPostLDSTCombine(SDNode *N,
TargetLowering::DAGCombinerInfo &DCI,
SelectionDAG &DAG) { … }static
bool checkValueWidth(SDValue V, unsigned width, ISD::LoadExtType &ExtType) { … }static bool isEquivalentMaskless(unsigned CC, unsigned width,
ISD::LoadExtType ExtType, int AddConstant,
int CompConstant) { … }static SDValue performSubsToAndsCombine(SDNode *N, SDNode *SubsNode,
SDNode *AndNode, SelectionDAG &DAG,
unsigned CCIndex, unsigned CmpIndex,
unsigned CC) { … }static
SDValue performCONDCombine(SDNode *N,
TargetLowering::DAGCombinerInfo &DCI,
SelectionDAG &DAG, unsigned CCIndex,
unsigned CmpIndex) { … }static SDValue performBRCONDCombine(SDNode *N,
TargetLowering::DAGCombinerInfo &DCI,
SelectionDAG &DAG) { … }static SDValue foldCSELofCTTZ(SDNode *N, SelectionDAG &DAG) { … }static SDValue foldCSELOfCSEL(SDNode *Op, SelectionDAG &DAG) { … }static SDValue performCSELCombine(SDNode *N,
TargetLowering::DAGCombinerInfo &DCI,
SelectionDAG &DAG) { … }static SDValue tryToWidenSetCCOperands(SDNode *Op, SelectionDAG &DAG) { … }static SDValue
performVecReduceBitwiseCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI,
SelectionDAG &DAG) { … }static SDValue performSETCCCombine(SDNode *N,
TargetLowering::DAGCombinerInfo &DCI,
SelectionDAG &DAG) { … }static SDValue performFlagSettingCombine(SDNode *N,
TargetLowering::DAGCombinerInfo &DCI,
unsigned GenericOpcode) { … }static SDValue performSetCCPunpkCombine(SDNode *N, SelectionDAG &DAG) { … }static SDValue
performSetccMergeZeroCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI) { … }static SDValue getTestBitOperand(SDValue Op, unsigned &Bit, bool &Invert,
SelectionDAG &DAG) { … }static SDValue performTBZCombine(SDNode *N,
TargetLowering::DAGCombinerInfo &DCI,
SelectionDAG &DAG) { … }static SDValue trySwapVSelectOperands(SDNode *N, SelectionDAG &DAG) { … }static SDValue performVSelectCombine(SDNode *N, SelectionDAG &DAG) { … }static SDValue performSelectCombine(SDNode *N,
TargetLowering::DAGCombinerInfo &DCI) { … }static SDValue performDUPCombine(SDNode *N,
TargetLowering::DAGCombinerInfo &DCI) { … }static SDValue performNVCASTCombine(SDNode *N, SelectionDAG &DAG) { … }static SDValue performGlobalAddressCombine(SDNode *N, SelectionDAG &DAG,
const AArch64Subtarget *Subtarget,
const TargetMachine &TM) { … }static SDValue performCTLZCombine(SDNode *N, SelectionDAG &DAG,
const AArch64Subtarget *Subtarget) { … }static SDValue getScaledOffsetForBitWidth(SelectionDAG &DAG, SDValue Offset,
SDLoc DL, unsigned BitWidth) { … }inline static bool isValidImmForSVEVecImmAddrMode(unsigned OffsetInBytes,
unsigned ScalarSizeInBytes) { … }static bool isValidImmForSVEVecImmAddrMode(SDValue Offset,
unsigned ScalarSizeInBytes) { … }static SDValue performScatterStoreCombine(SDNode *N, SelectionDAG &DAG,
unsigned Opcode,
bool OnlyPackedOffsets = true) { … }static SDValue performGatherLoadCombine(SDNode *N, SelectionDAG &DAG,
unsigned Opcode,
bool OnlyPackedOffsets = true) { … }static SDValue
performSignExtendInRegCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI,
SelectionDAG &DAG) { … }static SDValue legalizeSVEGatherPrefetchOffsVec(SDNode *N, SelectionDAG &DAG) { … }static SDValue combineSVEPrefetchVecBaseImmOff(SDNode *N, SelectionDAG &DAG,
unsigned ScalarSizeInBytes) { … }static bool isLanes1toNKnownZero(SDValue Op) { … }static SDValue removeRedundantInsertVectorElt(SDNode *N) { … }static SDValue
performInsertVectorEltCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI) { … }static SDValue performFPExtendCombine(SDNode *N, SelectionDAG &DAG,
TargetLowering::DAGCombinerInfo &DCI,
const AArch64Subtarget *Subtarget) { … }static SDValue performBSPExpandForSVE(SDNode *N, SelectionDAG &DAG,
const AArch64Subtarget *Subtarget) { … }static SDValue performDupLane128Combine(SDNode *N, SelectionDAG &DAG) { … }static SDValue tryCombineMULLWithUZP1(SDNode *N,
TargetLowering::DAGCombinerInfo &DCI,
SelectionDAG &DAG) { … }static SDValue performMULLCombine(SDNode *N,
TargetLowering::DAGCombinerInfo &DCI,
SelectionDAG &DAG) { … }static SDValue
performScalarToVectorCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI,
SelectionDAG &DAG) { … }SDValue AArch64TargetLowering::PerformDAGCombine(SDNode *N,
DAGCombinerInfo &DCI) const { … }bool AArch64TargetLowering::isUsedByReturnOnly(SDNode *N,
SDValue &Chain) const { … }bool AArch64TargetLowering::mayBeEmittedAsTailCall(const CallInst *CI) const { … }bool AArch64TargetLowering::isIndexingLegal(MachineInstr &MI, Register Base,
Register Offset, bool IsPre,
MachineRegisterInfo &MRI) const { … }bool AArch64TargetLowering::getIndexedAddressParts(SDNode *N, SDNode *Op,
SDValue &Base,
SDValue &Offset,
SelectionDAG &DAG) const { … }bool AArch64TargetLowering::getPreIndexedAddressParts(SDNode *N, SDValue &Base,
SDValue &Offset,
ISD::MemIndexedMode &AM,
SelectionDAG &DAG) const { … }bool AArch64TargetLowering::getPostIndexedAddressParts(
SDNode *N, SDNode *Op, SDValue &Base, SDValue &Offset,
ISD::MemIndexedMode &AM, SelectionDAG &DAG) const { … }static void replaceBoolVectorBitcast(SDNode *N,
SmallVectorImpl<SDValue> &Results,
SelectionDAG &DAG) { … }static void CustomNonLegalBITCASTResults(SDNode *N,
SmallVectorImpl<SDValue> &Results,
SelectionDAG &DAG, EVT ExtendVT,
EVT CastVT) { … }void AArch64TargetLowering::ReplaceBITCASTResults(
SDNode *N, SmallVectorImpl<SDValue> &Results, SelectionDAG &DAG) const { … }static void ReplaceAddWithADDP(SDNode *N, SmallVectorImpl<SDValue> &Results,
SelectionDAG &DAG,
const AArch64Subtarget *Subtarget) { … }static void ReplaceReductionResults(SDNode *N,
SmallVectorImpl<SDValue> &Results,
SelectionDAG &DAG, unsigned InterOp,
unsigned AcrossOp) { … }void AArch64TargetLowering::ReplaceExtractSubVectorResults(
SDNode *N, SmallVectorImpl<SDValue> &Results, SelectionDAG &DAG) const { … }static SDValue createGPRPairNode(SelectionDAG &DAG, SDValue V) { … }static void ReplaceCMP_SWAP_128Results(SDNode *N,
SmallVectorImpl<SDValue> &Results,
SelectionDAG &DAG,
const AArch64Subtarget *Subtarget) { … }static unsigned getAtomicLoad128Opcode(unsigned ISDOpcode,
AtomicOrdering Ordering) { … }static void ReplaceATOMIC_LOAD_128Results(SDNode *N,
SmallVectorImpl<SDValue> &Results,
SelectionDAG &DAG,
const AArch64Subtarget *Subtarget) { … }void AArch64TargetLowering::ReplaceNodeResults(
SDNode *N, SmallVectorImpl<SDValue> &Results, SelectionDAG &DAG) const { … }bool AArch64TargetLowering::useLoadStackGuardNode() const { … }unsigned AArch64TargetLowering::combineRepeatedFPDivisors() const { … }TargetLoweringBase::LegalizeTypeAction
AArch64TargetLowering::getPreferredVectorAction(MVT VT) const { … }bool AArch64TargetLowering::isOpSuitableForLDPSTP(const Instruction *I) const { … }bool AArch64TargetLowering::isOpSuitableForLSE128(const Instruction *I) const { … }bool AArch64TargetLowering::isOpSuitableForRCPC3(const Instruction *I) const { … }bool AArch64TargetLowering::shouldInsertFencesForAtomic(
const Instruction *I) const { … }bool AArch64TargetLowering::shouldInsertTrailingFenceForAtomicStore(
const Instruction *I) const { … }TargetLoweringBase::AtomicExpansionKind
AArch64TargetLowering::shouldExpandAtomicStoreInIR(StoreInst *SI) const { … }TargetLowering::AtomicExpansionKind
AArch64TargetLowering::shouldExpandAtomicLoadInIR(LoadInst *LI) const { … }static bool rmwOpMayLowerToLibcall(const AArch64Subtarget &Subtarget,
const AtomicRMWInst *RMW) { … }TargetLowering::AtomicExpansionKind
AArch64TargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const { … }TargetLowering::AtomicExpansionKind
AArch64TargetLowering::shouldExpandAtomicCmpXchgInIR(
AtomicCmpXchgInst *AI) const { … }Value *AArch64TargetLowering::emitLoadLinked(IRBuilderBase &Builder,
Type *ValueTy, Value *Addr,
AtomicOrdering Ord) const { … }void AArch64TargetLowering::emitAtomicCmpXchgNoStoreLLBalance(
IRBuilderBase &Builder) const { … }Value *AArch64TargetLowering::emitStoreConditional(IRBuilderBase &Builder,
Value *Val, Value *Addr,
AtomicOrdering Ord) const { … }bool AArch64TargetLowering::functionArgumentNeedsConsecutiveRegisters(
Type *Ty, CallingConv::ID CallConv, bool isVarArg,
const DataLayout &DL) const { … }bool AArch64TargetLowering::shouldNormalizeToSelectSequence(LLVMContext &,
EVT) const { … }static Value *UseTlsOffset(IRBuilderBase &IRB, unsigned Offset) { … }Value *AArch64TargetLowering::getIRStackGuard(IRBuilderBase &IRB) const { … }void AArch64TargetLowering::insertSSPDeclarations(Module &M) const { … }Value *AArch64TargetLowering::getSDagStackGuard(const Module &M) const { … }Function *AArch64TargetLowering::getSSPStackGuardCheck(const Module &M) const { … }Value *
AArch64TargetLowering::getSafeStackPointerLocation(IRBuilderBase &IRB) const { … }bool AArch64TargetLowering::isMaskAndCmp0FoldingBeneficial(
const Instruction &AndI) const { … }bool AArch64TargetLowering::
shouldProduceAndByConstByHoistingConstFromShiftsLHSOfAnd(
SDValue X, ConstantSDNode *XC, ConstantSDNode *CC, SDValue Y,
unsigned OldShiftOpcode, unsigned NewShiftOpcode,
SelectionDAG &DAG) const { … }TargetLowering::ShiftLegalizationStrategy
AArch64TargetLowering::preferredShiftLegalizationStrategy(
SelectionDAG &DAG, SDNode *N, unsigned int ExpansionFactor) const { … }void AArch64TargetLowering::initializeSplitCSR(MachineBasicBlock *Entry) const { … }void AArch64TargetLowering::insertCopiesSplitCSR(
MachineBasicBlock *Entry,
const SmallVectorImpl<MachineBasicBlock *> &Exits) const { … }bool AArch64TargetLowering::isIntDivCheap(EVT VT, AttributeList Attr) const { … }bool AArch64TargetLowering::preferIncOfAddToSubOfNot(EVT VT) const { … }bool AArch64TargetLowering::shouldConvertFpToSat(unsigned Op, EVT FPVT,
EVT VT) const { … }bool AArch64TargetLowering::shouldExpandCmpUsingSelects(EVT VT) const { … }MachineInstr *
AArch64TargetLowering::EmitKCFICheck(MachineBasicBlock &MBB,
MachineBasicBlock::instr_iterator &MBBI,
const TargetInstrInfo *TII) const { … }bool AArch64TargetLowering::enableAggressiveFMAFusion(EVT VT) const { … }unsigned
AArch64TargetLowering::getVaListSizeInBits(const DataLayout &DL) const { … }void AArch64TargetLowering::finalizeLowering(MachineFunction &MF) const { … }bool AArch64TargetLowering::needsFixedCatchObjects() const { … }bool AArch64TargetLowering::shouldLocalize(
const MachineInstr &MI, const TargetTransformInfo *TTI) const { … }bool AArch64TargetLowering::fallBackToDAGISel(const Instruction &Inst) const { … }static EVT getContainerForFixedLengthVector(SelectionDAG &DAG, EVT VT) { … }static SDValue getPredicateForFixedLengthVector(SelectionDAG &DAG, SDLoc &DL,
EVT VT) { … }static SDValue getPredicateForScalableVector(SelectionDAG &DAG, SDLoc &DL,
EVT VT) { … }static SDValue getPredicateForVector(SelectionDAG &DAG, SDLoc &DL, EVT VT) { … }static SDValue convertToScalableVector(SelectionDAG &DAG, EVT VT, SDValue V) { … }static SDValue convertFromScalableVector(SelectionDAG &DAG, EVT VT, SDValue V) { … }SDValue AArch64TargetLowering::LowerFixedLengthVectorLoadToSVE(
SDValue Op, SelectionDAG &DAG) const { … }static SDValue convertFixedMaskToScalableVector(SDValue Mask,
SelectionDAG &DAG) { … }SDValue AArch64TargetLowering::LowerFixedLengthVectorMLoadToSVE(
SDValue Op, SelectionDAG &DAG) const { … }SDValue AArch64TargetLowering::LowerFixedLengthVectorStoreToSVE(
SDValue Op, SelectionDAG &DAG) const { … }SDValue AArch64TargetLowering::LowerFixedLengthVectorMStoreToSVE(
SDValue Op, SelectionDAG &DAG) const { … }SDValue AArch64TargetLowering::LowerFixedLengthVectorIntDivideToSVE(
SDValue Op, SelectionDAG &DAG) const { … }SDValue AArch64TargetLowering::LowerFixedLengthVectorIntExtendToSVE(
SDValue Op, SelectionDAG &DAG) const { … }SDValue AArch64TargetLowering::LowerFixedLengthVectorTruncateToSVE(
SDValue Op, SelectionDAG &DAG) const { … }SDValue AArch64TargetLowering::LowerFixedLengthExtractVectorElt(
SDValue Op, SelectionDAG &DAG) const { … }SDValue AArch64TargetLowering::LowerFixedLengthInsertVectorElt(
SDValue Op, SelectionDAG &DAG) const { … }SDValue AArch64TargetLowering::LowerToPredicatedOp(SDValue Op,
SelectionDAG &DAG,
unsigned NewOp) const { … }SDValue AArch64TargetLowering::LowerToScalableOp(SDValue Op,
SelectionDAG &DAG) const { … }SDValue AArch64TargetLowering::LowerVECREDUCE_SEQ_FADD(SDValue ScalarOp,
SelectionDAG &DAG) const { … }SDValue AArch64TargetLowering::LowerPredReductionToSVE(SDValue ReduceOp,
SelectionDAG &DAG) const { … }SDValue AArch64TargetLowering::LowerReductionToSVE(unsigned Opcode,
SDValue ScalarOp,
SelectionDAG &DAG) const { … }SDValue
AArch64TargetLowering::LowerFixedLengthVectorSelectToSVE(SDValue Op,
SelectionDAG &DAG) const { … }SDValue AArch64TargetLowering::LowerFixedLengthVectorSetccToSVE(
SDValue Op, SelectionDAG &DAG) const { … }SDValue
AArch64TargetLowering::LowerFixedLengthBitcastToSVE(SDValue Op,
SelectionDAG &DAG) const { … }SDValue AArch64TargetLowering::LowerFixedLengthConcatVectorsToSVE(
SDValue Op, SelectionDAG &DAG) const { … }SDValue
AArch64TargetLowering::LowerFixedLengthFPExtendToSVE(SDValue Op,
SelectionDAG &DAG) const { … }SDValue
AArch64TargetLowering::LowerFixedLengthFPRoundToSVE(SDValue Op,
SelectionDAG &DAG) const { … }SDValue
AArch64TargetLowering::LowerFixedLengthIntToFPToSVE(SDValue Op,
SelectionDAG &DAG) const { … }SDValue
AArch64TargetLowering::LowerVECTOR_DEINTERLEAVE(SDValue Op,
SelectionDAG &DAG) const { … }SDValue AArch64TargetLowering::LowerVECTOR_INTERLEAVE(SDValue Op,
SelectionDAG &DAG) const { … }SDValue AArch64TargetLowering::LowerVECTOR_HISTOGRAM(SDValue Op,
SelectionDAG &DAG) const { … }SDValue
AArch64TargetLowering::LowerFixedLengthFPToIntToSVE(SDValue Op,
SelectionDAG &DAG) const { … }static SDValue GenerateFixedLengthSVETBL(SDValue Op, SDValue Op1, SDValue Op2,
ArrayRef<int> ShuffleMask, EVT VT,
EVT ContainerVT, SelectionDAG &DAG) { … }SDValue AArch64TargetLowering::LowerFixedLengthVECTOR_SHUFFLEToSVE(
SDValue Op, SelectionDAG &DAG) const { … }SDValue AArch64TargetLowering::getSVESafeBitCast(EVT VT, SDValue Op,
SelectionDAG &DAG) const { … }bool AArch64TargetLowering::isAllActivePredicate(SelectionDAG &DAG,
SDValue N) const { … }EVT AArch64TargetLowering::getPromotedVTForPredicate(EVT VT) const { … }bool AArch64TargetLowering::SimplifyDemandedBitsForTargetNode(
SDValue Op, const APInt &OriginalDemandedBits,
const APInt &OriginalDemandedElts, KnownBits &Known, TargetLoweringOpt &TLO,
unsigned Depth) const { … }bool AArch64TargetLowering::isTargetCanonicalConstantNode(SDValue Op) const { … }bool AArch64TargetLowering::isComplexDeinterleavingSupported() const { … }bool AArch64TargetLowering::isComplexDeinterleavingOperationSupported(
ComplexDeinterleavingOperation Operation, Type *Ty) const { … }Value *AArch64TargetLowering::createComplexDeinterleavingIR(
IRBuilderBase &B, ComplexDeinterleavingOperation OperationType,
ComplexDeinterleavingRotation Rotation, Value *InputA, Value *InputB,
Value *Accumulator) const { … }bool AArch64TargetLowering::preferScalarizeSplat(SDNode *N) const { … }unsigned AArch64TargetLowering::getMinimumJumpTableEntries() const { … }MVT AArch64TargetLowering::getRegisterTypeForCallingConv(LLVMContext &Context,
CallingConv::ID CC,
EVT VT) const { … }unsigned AArch64TargetLowering::getNumRegistersForCallingConv(
LLVMContext &Context, CallingConv::ID CC, EVT VT) const { … }unsigned AArch64TargetLowering::getVectorTypeBreakdownForCallingConv(
LLVMContext &Context, CallingConv::ID CC, EVT VT, EVT &IntermediateVT,
unsigned &NumIntermediates, MVT &RegisterVT) const { … }bool AArch64TargetLowering::hasInlineStackProbe(
const MachineFunction &MF) const { … }bool AArch64TargetLowering::isTypeDesirableForOp(unsigned Opc, EVT VT) const { … }#ifndef NDEBUG#endif