#include "ARMISelLowering.h"
#include "ARMBaseInstrInfo.h"
#include "ARMBaseRegisterInfo.h"
#include "ARMCallingConv.h"
#include "ARMConstantPoolValue.h"
#include "ARMMachineFunctionInfo.h"
#include "ARMPerfectShuffle.h"
#include "ARMRegisterInfo.h"
#include "ARMSelectionDAGInfo.h"
#include "ARMSubtarget.h"
#include "ARMTargetTransformInfo.h"
#include "MCTargetDesc/ARMAddressingModes.h"
#include "MCTargetDesc/ARMBaseInfo.h"
#include "Utils/ARMBaseInfo.h"
#include "llvm/ADT/APFloat.h"
#include "llvm/ADT/APInt.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/BitVector.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/StringSwitch.h"
#include "llvm/ADT/Twine.h"
#include "llvm/Analysis/VectorUtils.h"
#include "llvm/CodeGen/CallingConvLower.h"
#include "llvm/CodeGen/ComplexDeinterleavingPass.h"
#include "llvm/CodeGen/ISDOpcodes.h"
#include "llvm/CodeGen/IntrinsicLowering.h"
#include "llvm/CodeGen/MachineBasicBlock.h"
#include "llvm/CodeGen/MachineConstantPool.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineInstr.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/CodeGen/MachineJumpTableInfo.h"
#include "llvm/CodeGen/MachineMemOperand.h"
#include "llvm/CodeGen/MachineOperand.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/RuntimeLibcallUtil.h"
#include "llvm/CodeGen/SelectionDAG.h"
#include "llvm/CodeGen/SelectionDAGAddressAnalysis.h"
#include "llvm/CodeGen/SelectionDAGNodes.h"
#include "llvm/CodeGen/TargetInstrInfo.h"
#include "llvm/CodeGen/TargetLowering.h"
#include "llvm/CodeGen/TargetOpcodes.h"
#include "llvm/CodeGen/TargetRegisterInfo.h"
#include "llvm/CodeGen/TargetSubtargetInfo.h"
#include "llvm/CodeGen/ValueTypes.h"
#include "llvm/CodeGenTypes/MachineValueType.h"
#include "llvm/IR/Attributes.h"
#include "llvm/IR/CallingConv.h"
#include "llvm/IR/Constant.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/DebugLoc.h"
#include "llvm/IR/DerivedTypes.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/GlobalAlias.h"
#include "llvm/IR/GlobalValue.h"
#include "llvm/IR/GlobalVariable.h"
#include "llvm/IR/IRBuilder.h"
#include "llvm/IR/InlineAsm.h"
#include "llvm/IR/Instruction.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/IntrinsicInst.h"
#include "llvm/IR/Intrinsics.h"
#include "llvm/IR/IntrinsicsARM.h"
#include "llvm/IR/Module.h"
#include "llvm/IR/PatternMatch.h"
#include "llvm/IR/Type.h"
#include "llvm/IR/User.h"
#include "llvm/IR/Value.h"
#include "llvm/MC/MCInstrDesc.h"
#include "llvm/MC/MCInstrItineraries.h"
#include "llvm/MC/MCRegisterInfo.h"
#include "llvm/MC/MCSchedule.h"
#include "llvm/Support/AtomicOrdering.h"
#include "llvm/Support/BranchProbability.h"
#include "llvm/Support/Casting.h"
#include "llvm/Support/CodeGen.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/KnownBits.h"
#include "llvm/Support/MathExtras.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Target/TargetMachine.h"
#include "llvm/Target/TargetOptions.h"
#include "llvm/TargetParser/Triple.h"
#include <algorithm>
#include <cassert>
#include <cstdint>
#include <cstdlib>
#include <iterator>
#include <limits>
#include <optional>
#include <tuple>
#include <utility>
#include <vector>
usingnamespacellvm;
usingnamespacellvm::PatternMatch;
#define DEBUG_TYPE …
STATISTIC(NumTailCalls, "Number of tail calls");
STATISTIC(NumMovwMovt, "Number of GAs materialized with movw + movt");
STATISTIC(NumLoopByVals, "Number of loops generated for byval arguments");
STATISTIC(NumConstpoolPromoted,
"Number of constants with their storage promoted into constant pools");
static cl::opt<bool>
ARMInterworking("arm-interworking", cl::Hidden,
cl::desc("Enable / disable ARM interworking (for debugging only)"),
cl::init(true));
static cl::opt<bool> EnableConstpoolPromotion(
"arm-promote-constant", cl::Hidden,
cl::desc("Enable / disable promotion of unnamed_addr constants into "
"constant pools"),
cl::init(false));
static cl::opt<unsigned> ConstpoolPromotionMaxSize(
"arm-promote-constant-max-size", cl::Hidden,
cl::desc("Maximum size of constant to promote into a constant pool"),
cl::init(64));
static cl::opt<unsigned> ConstpoolPromotionMaxTotal(
"arm-promote-constant-max-total", cl::Hidden,
cl::desc("Maximum size of ALL constants to promote into a constant pool"),
cl::init(128));
cl::opt<unsigned>
MVEMaxSupportedInterleaveFactor("mve-max-interleave-factor", cl::Hidden,
cl::desc("Maximum interleave factor for MVE VLDn to generate."),
cl::init(2));
static const MCPhysReg GPRArgRegs[] = …;
static SDValue handleCMSEValue(const SDValue &Value, const ISD::InputArg &Arg,
SelectionDAG &DAG, const SDLoc &DL) { … }
void ARMTargetLowering::addTypeForNEON(MVT VT, MVT PromotedLdStVT) { … }
void ARMTargetLowering::addDRTypeForNEON(MVT VT) { … }
void ARMTargetLowering::addQRTypeForNEON(MVT VT) { … }
void ARMTargetLowering::setAllExpand(MVT VT) { … }
void ARMTargetLowering::addAllExtLoads(const MVT From, const MVT To,
LegalizeAction Action) { … }
void ARMTargetLowering::addMVEVectorTypes(bool HasMVEFP) { … }
ARMTargetLowering::ARMTargetLowering(const TargetMachine &TM,
const ARMSubtarget &STI)
: … { … }
bool ARMTargetLowering::useSoftFloat() const { … }
std::pair<const TargetRegisterClass *, uint8_t>
ARMTargetLowering::findRepresentativeClass(const TargetRegisterInfo *TRI,
MVT VT) const { … }
const char *ARMTargetLowering::getTargetNodeName(unsigned Opcode) const { … }
EVT ARMTargetLowering::getSetCCResultType(const DataLayout &DL, LLVMContext &,
EVT VT) const { … }
const TargetRegisterClass *
ARMTargetLowering::getRegClassFor(MVT VT, bool isDivergent) const { … }
bool ARMTargetLowering::shouldAlignPointerArgs(CallInst *CI, unsigned &MinSize,
Align &PrefAlign) const { … }
FastISel *
ARMTargetLowering::createFastISel(FunctionLoweringInfo &funcInfo,
const TargetLibraryInfo *libInfo) const { … }
Sched::Preference ARMTargetLowering::getSchedulingPreference(SDNode *N) const { … }
static bool isSRL16(const SDValue &Op) { … }
static bool isSRA16(const SDValue &Op) { … }
static bool isSHL16(const SDValue &Op) { … }
static bool isS16(const SDValue &Op, SelectionDAG &DAG) { … }
static ARMCC::CondCodes IntCCToARMCC(ISD::CondCode CC) { … }
static void FPCCToARMCC(ISD::CondCode CC, ARMCC::CondCodes &CondCode,
ARMCC::CondCodes &CondCode2) { … }
CallingConv::ID
ARMTargetLowering::getEffectiveCallingConv(CallingConv::ID CC,
bool isVarArg) const { … }
CCAssignFn *ARMTargetLowering::CCAssignFnForCall(CallingConv::ID CC,
bool isVarArg) const { … }
CCAssignFn *ARMTargetLowering::CCAssignFnForReturn(CallingConv::ID CC,
bool isVarArg) const { … }
CCAssignFn *ARMTargetLowering::CCAssignFnForNode(CallingConv::ID CC,
bool Return,
bool isVarArg) const { … }
SDValue ARMTargetLowering::MoveToHPR(const SDLoc &dl, SelectionDAG &DAG,
MVT LocVT, MVT ValVT, SDValue Val) const { … }
SDValue ARMTargetLowering::MoveFromHPR(const SDLoc &dl, SelectionDAG &DAG,
MVT LocVT, MVT ValVT,
SDValue Val) const { … }
SDValue ARMTargetLowering::LowerCallResult(
SDValue Chain, SDValue InGlue, CallingConv::ID CallConv, bool isVarArg,
const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals, bool isThisReturn,
SDValue ThisVal, bool isCmseNSCall) const { … }
std::pair<SDValue, MachinePointerInfo> ARMTargetLowering::computeAddrForCallArg(
const SDLoc &dl, SelectionDAG &DAG, const CCValAssign &VA, SDValue StackPtr,
bool IsTailCall, int SPDiff) const { … }
void ARMTargetLowering::PassF64ArgInRegs(const SDLoc &dl, SelectionDAG &DAG,
SDValue Chain, SDValue &Arg,
RegsToPassVector &RegsToPass,
CCValAssign &VA, CCValAssign &NextVA,
SDValue &StackPtr,
SmallVectorImpl<SDValue> &MemOpChains,
bool IsTailCall,
int SPDiff) const { … }
static bool canGuaranteeTCO(CallingConv::ID CC, bool GuaranteeTailCalls) { … }
SDValue
ARMTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
SmallVectorImpl<SDValue> &InVals) const { … }
void ARMTargetLowering::HandleByVal(CCState *State, unsigned &Size,
Align Alignment) const { … }
static
bool MatchingStackOffset(SDValue Arg, unsigned Offset, ISD::ArgFlagsTy Flags,
MachineFrameInfo &MFI, const MachineRegisterInfo *MRI,
const TargetInstrInfo *TII) { … }
bool ARMTargetLowering::IsEligibleForTailCallOptimization(
TargetLowering::CallLoweringInfo &CLI, CCState &CCInfo,
SmallVectorImpl<CCValAssign> &ArgLocs, const bool isIndirect) const { … }
bool
ARMTargetLowering::CanLowerReturn(CallingConv::ID CallConv,
MachineFunction &MF, bool isVarArg,
const SmallVectorImpl<ISD::OutputArg> &Outs,
LLVMContext &Context) const { … }
static SDValue LowerInterruptReturn(SmallVectorImpl<SDValue> &RetOps,
const SDLoc &DL, SelectionDAG &DAG) { … }
SDValue
ARMTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
bool isVarArg,
const SmallVectorImpl<ISD::OutputArg> &Outs,
const SmallVectorImpl<SDValue> &OutVals,
const SDLoc &dl, SelectionDAG &DAG) const { … }
bool ARMTargetLowering::isUsedByReturnOnly(SDNode *N, SDValue &Chain) const { … }
bool ARMTargetLowering::mayBeEmittedAsTailCall(const CallInst *CI) const { … }
static SDValue LowerWRITE_REGISTER(SDValue Op, SelectionDAG &DAG) { … }
SDValue ARMTargetLowering::LowerConstantPool(SDValue Op,
SelectionDAG &DAG) const { … }
unsigned ARMTargetLowering::getJumpTableEncoding() const { … }
SDValue ARMTargetLowering::LowerBlockAddress(SDValue Op,
SelectionDAG &DAG) const { … }
SDValue
ARMTargetLowering::LowerGlobalTLSAddressDarwin(SDValue Op,
SelectionDAG &DAG) const { … }
SDValue
ARMTargetLowering::LowerGlobalTLSAddressWindows(SDValue Op,
SelectionDAG &DAG) const { … }
SDValue
ARMTargetLowering::LowerToTLSGeneralDynamicModel(GlobalAddressSDNode *GA,
SelectionDAG &DAG) const { … }
SDValue
ARMTargetLowering::LowerToTLSExecModels(GlobalAddressSDNode *GA,
SelectionDAG &DAG,
TLSModel::Model model) const { … }
SDValue
ARMTargetLowering::LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const { … }
static bool allUsersAreInFunction(const Value *V, const Function *F) { … }
static SDValue promoteToConstantPool(const ARMTargetLowering *TLI,
const GlobalValue *GV, SelectionDAG &DAG,
EVT PtrVT, const SDLoc &dl) { … }
bool ARMTargetLowering::isReadOnly(const GlobalValue *GV) const { … }
SDValue ARMTargetLowering::LowerGlobalAddress(SDValue Op,
SelectionDAG &DAG) const { … }
SDValue ARMTargetLowering::LowerGlobalAddressELF(SDValue Op,
SelectionDAG &DAG) const { … }
SDValue ARMTargetLowering::LowerGlobalAddressDarwin(SDValue Op,
SelectionDAG &DAG) const { … }
SDValue ARMTargetLowering::LowerGlobalAddressWindows(SDValue Op,
SelectionDAG &DAG) const { … }
SDValue
ARMTargetLowering::LowerEH_SJLJ_SETJMP(SDValue Op, SelectionDAG &DAG) const { … }
SDValue
ARMTargetLowering::LowerEH_SJLJ_LONGJMP(SDValue Op, SelectionDAG &DAG) const { … }
SDValue ARMTargetLowering::LowerEH_SJLJ_SETUP_DISPATCH(SDValue Op,
SelectionDAG &DAG) const { … }
SDValue ARMTargetLowering::LowerINTRINSIC_VOID(
SDValue Op, SelectionDAG &DAG, const ARMSubtarget *Subtarget) const { … }
SDValue
ARMTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG,
const ARMSubtarget *Subtarget) const { … }
static SDValue LowerATOMIC_FENCE(SDValue Op, SelectionDAG &DAG,
const ARMSubtarget *Subtarget) { … }
static SDValue LowerPREFETCH(SDValue Op, SelectionDAG &DAG,
const ARMSubtarget *Subtarget) { … }
static SDValue LowerVASTART(SDValue Op, SelectionDAG &DAG) { … }
SDValue ARMTargetLowering::GetF64FormalArgument(CCValAssign &VA,
CCValAssign &NextVA,
SDValue &Root,
SelectionDAG &DAG,
const SDLoc &dl) const { … }
int ARMTargetLowering::StoreByValRegs(CCState &CCInfo, SelectionDAG &DAG,
const SDLoc &dl, SDValue &Chain,
const Value *OrigArg,
unsigned InRegsParamRecordIdx,
int ArgOffset, unsigned ArgSize) const { … }
void ARMTargetLowering::VarArgStyleRegisters(CCState &CCInfo, SelectionDAG &DAG,
const SDLoc &dl, SDValue &Chain,
unsigned ArgOffset,
unsigned TotalArgRegsSaveSize,
bool ForceMutable) const { … }
bool ARMTargetLowering::splitValueIntoRegisterParts(
SelectionDAG &DAG, const SDLoc &DL, SDValue Val, SDValue *Parts,
unsigned NumParts, MVT PartVT, std::optional<CallingConv::ID> CC) const { … }
SDValue ARMTargetLowering::joinRegisterPartsIntoValue(
SelectionDAG &DAG, const SDLoc &DL, const SDValue *Parts, unsigned NumParts,
MVT PartVT, EVT ValueVT, std::optional<CallingConv::ID> CC) const { … }
SDValue ARMTargetLowering::LowerFormalArguments(
SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const { … }
static bool isFloatingPointZero(SDValue Op) { … }
SDValue ARMTargetLowering::getARMCmp(SDValue LHS, SDValue RHS, ISD::CondCode CC,
SDValue &ARMcc, SelectionDAG &DAG,
const SDLoc &dl) const { … }
SDValue ARMTargetLowering::getVFPCmp(SDValue LHS, SDValue RHS,
SelectionDAG &DAG, const SDLoc &dl,
bool Signaling) const { … }
SDValue
ARMTargetLowering::duplicateCmp(SDValue Cmp, SelectionDAG &DAG) const { … }
std::pair<SDValue, SDValue>
ARMTargetLowering::getARMXALUOOp(SDValue Op, SelectionDAG &DAG,
SDValue &ARMcc) const { … }
SDValue
ARMTargetLowering::LowerSignedALUO(SDValue Op, SelectionDAG &DAG) const { … }
static SDValue ConvertBooleanCarryToCarryFlag(SDValue BoolCarry,
SelectionDAG &DAG) { … }
static SDValue ConvertCarryFlagToBooleanCarry(SDValue Flags, EVT VT,
SelectionDAG &DAG) { … }
SDValue ARMTargetLowering::LowerUnsignedALUO(SDValue Op,
SelectionDAG &DAG) const { … }
static SDValue LowerADDSUBSAT(SDValue Op, SelectionDAG &DAG,
const ARMSubtarget *Subtarget) { … }
SDValue ARMTargetLowering::LowerSELECT(SDValue Op, SelectionDAG &DAG) const { … }
static void checkVSELConstraints(ISD::CondCode CC, ARMCC::CondCodes &CondCode,
bool &swpCmpOps, bool &swpVselOps) { … }
SDValue ARMTargetLowering::getCMOV(const SDLoc &dl, EVT VT, SDValue FalseVal,
SDValue TrueVal, SDValue ARMcc, SDValue CCR,
SDValue Cmp, SelectionDAG &DAG) const { … }
static bool isGTorGE(ISD::CondCode CC) { … }
static bool isLTorLE(ISD::CondCode CC) { … }
static bool isLowerSaturate(const SDValue LHS, const SDValue RHS,
const SDValue TrueVal, const SDValue FalseVal,
const ISD::CondCode CC, const SDValue K) { … }
static SDValue LowerSaturatingConditional(SDValue Op, SelectionDAG &DAG) { … }
static bool isLowerSaturatingConditional(const SDValue &Op, SDValue &V,
SDValue &SatK)
{ … }
bool ARMTargetLowering::isUnsupportedFloatingType(EVT VT) const { … }
SDValue ARMTargetLowering::LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const { … }
static bool canChangeToInt(SDValue Op, bool &SeenZero,
const ARMSubtarget *Subtarget) { … }
static SDValue bitcastf32Toi32(SDValue Op, SelectionDAG &DAG) { … }
static void expandf64Toi32(SDValue Op, SelectionDAG &DAG,
SDValue &RetVal1, SDValue &RetVal2) { … }
SDValue
ARMTargetLowering::OptimizeVFPBrcond(SDValue Op, SelectionDAG &DAG) const { … }
SDValue ARMTargetLowering::LowerBRCOND(SDValue Op, SelectionDAG &DAG) const { … }
SDValue ARMTargetLowering::LowerBR_CC(SDValue Op, SelectionDAG &DAG) const { … }
SDValue ARMTargetLowering::LowerBR_JT(SDValue Op, SelectionDAG &DAG) const { … }
static SDValue LowerVectorFP_TO_INT(SDValue Op, SelectionDAG &DAG) { … }
SDValue ARMTargetLowering::LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG) const { … }
static SDValue LowerFP_TO_INT_SAT(SDValue Op, SelectionDAG &DAG,
const ARMSubtarget *Subtarget) { … }
static SDValue LowerVectorINT_TO_FP(SDValue Op, SelectionDAG &DAG) { … }
SDValue ARMTargetLowering::LowerINT_TO_FP(SDValue Op, SelectionDAG &DAG) const { … }
SDValue ARMTargetLowering::LowerFCOPYSIGN(SDValue Op, SelectionDAG &DAG) const { … }
SDValue ARMTargetLowering::LowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const{ … }
SDValue ARMTargetLowering::LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const { … }
Register ARMTargetLowering::getRegisterByName(const char* RegName, LLT VT,
const MachineFunction &MF) const { … }
static void ExpandREAD_REGISTER(SDNode *N, SmallVectorImpl<SDValue> &Results,
SelectionDAG &DAG) { … }
static SDValue CombineVMOVDRRCandidateWithVecOp(const SDNode *BC,
SelectionDAG &DAG) { … }
SDValue ARMTargetLowering::ExpandBITCAST(SDNode *N, SelectionDAG &DAG,
const ARMSubtarget *Subtarget) const { … }
static SDValue getZeroVector(EVT VT, SelectionDAG &DAG, const SDLoc &dl) { … }
SDValue ARMTargetLowering::LowerShiftRightParts(SDValue Op,
SelectionDAG &DAG) const { … }
SDValue ARMTargetLowering::LowerShiftLeftParts(SDValue Op,
SelectionDAG &DAG) const { … }
SDValue ARMTargetLowering::LowerGET_ROUNDING(SDValue Op,
SelectionDAG &DAG) const { … }
SDValue ARMTargetLowering::LowerSET_ROUNDING(SDValue Op,
SelectionDAG &DAG) const { … }
SDValue ARMTargetLowering::LowerSET_FPMODE(SDValue Op,
SelectionDAG &DAG) const { … }
SDValue ARMTargetLowering::LowerRESET_FPMODE(SDValue Op,
SelectionDAG &DAG) const { … }
static SDValue LowerCTTZ(SDNode *N, SelectionDAG &DAG,
const ARMSubtarget *ST) { … }
static SDValue LowerCTPOP(SDNode *N, SelectionDAG &DAG,
const ARMSubtarget *ST) { … }
static bool getVShiftImm(SDValue Op, unsigned ElementBits, int64_t &Cnt) { … }
static bool isVShiftLImm(SDValue Op, EVT VT, bool isLong, int64_t &Cnt) { … }
static bool isVShiftRImm(SDValue Op, EVT VT, bool isNarrow, bool isIntrinsic,
int64_t &Cnt) { … }
static SDValue LowerShift(SDNode *N, SelectionDAG &DAG,
const ARMSubtarget *ST) { … }
static SDValue Expand64BitShift(SDNode *N, SelectionDAG &DAG,
const ARMSubtarget *ST) { … }
static SDValue LowerVSETCC(SDValue Op, SelectionDAG &DAG,
const ARMSubtarget *ST) { … }
static SDValue LowerSETCCCARRY(SDValue Op, SelectionDAG &DAG) { … }
static SDValue isVMOVModifiedImm(uint64_t SplatBits, uint64_t SplatUndef,
unsigned SplatBitSize, SelectionDAG &DAG,
const SDLoc &dl, EVT &VT, EVT VectorVT,
VMOVModImmType type) { … }
SDValue ARMTargetLowering::LowerConstantFP(SDValue Op, SelectionDAG &DAG,
const ARMSubtarget *ST) const { … }
static bool isSingletonVEXTMask(ArrayRef<int> M, EVT VT, unsigned &Imm) { … }
static bool isVEXTMask(ArrayRef<int> M, EVT VT,
bool &ReverseVEXT, unsigned &Imm) { … }
static bool isVTBLMask(ArrayRef<int> M, EVT VT) { … }
static unsigned SelectPairHalf(unsigned Elements, ArrayRef<int> Mask,
unsigned Index) { … }
static bool isVTRNMask(ArrayRef<int> M, EVT VT, unsigned &WhichResult) { … }
static bool isVTRN_v_undef_Mask(ArrayRef<int> M, EVT VT, unsigned &WhichResult){ … }
static bool isVUZPMask(ArrayRef<int> M, EVT VT, unsigned &WhichResult) { … }
static bool isVUZP_v_undef_Mask(ArrayRef<int> M, EVT VT, unsigned &WhichResult){ … }
static bool isVZIPMask(ArrayRef<int> M, EVT VT, unsigned &WhichResult) { … }
static bool isVZIP_v_undef_Mask(ArrayRef<int> M, EVT VT, unsigned &WhichResult){ … }
static unsigned isNEONTwoResultShuffleMask(ArrayRef<int> ShuffleMask, EVT VT,
unsigned &WhichResult,
bool &isV_UNDEF) { … }
static bool isReverseMask(ArrayRef<int> M, EVT VT) { … }
static bool isTruncMask(ArrayRef<int> M, EVT VT, bool Top, bool SingleSource) { … }
static bool isVMOVNMask(ArrayRef<int> M, EVT VT, bool Top, bool SingleSource) { … }
static bool isVMOVNTruncMask(ArrayRef<int> M, EVT ToVT, bool rev) { … }
static SDValue LowerBuildVectorOfFPTrunc(SDValue BV, SelectionDAG &DAG,
const ARMSubtarget *ST) { … }
static SDValue LowerBuildVectorOfFPExt(SDValue BV, SelectionDAG &DAG,
const ARMSubtarget *ST) { … }
static SDValue IsSingleInstrConstant(SDValue N, SelectionDAG &DAG,
const ARMSubtarget *ST, const SDLoc &dl) { … }
static SDValue LowerBUILD_VECTOR_i1(SDValue Op, SelectionDAG &DAG,
const ARMSubtarget *ST) { … }
static SDValue LowerBUILD_VECTORToVIDUP(SDValue Op, SelectionDAG &DAG,
const ARMSubtarget *ST) { … }
static bool IsQRMVEInstruction(const SDNode *N, const SDNode *Op) { … }
SDValue ARMTargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG,
const ARMSubtarget *ST) const { … }
SDValue ARMTargetLowering::ReconstructShuffle(SDValue Op,
SelectionDAG &DAG) const { … }
enum ShuffleOpCodes { … };
static bool isLegalMVEShuffleOp(unsigned PFEntry) { … }
bool ARMTargetLowering::isShuffleMaskLegal(ArrayRef<int> M, EVT VT) const { … }
static SDValue GeneratePerfectShuffle(unsigned PFEntry, SDValue LHS,
SDValue RHS, SelectionDAG &DAG,
const SDLoc &dl) { … }
static SDValue LowerVECTOR_SHUFFLEv8i8(SDValue Op,
ArrayRef<int> ShuffleMask,
SelectionDAG &DAG) { … }
static SDValue LowerReverse_VECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) { … }
static EVT getVectorTyFromPredicateVector(EVT VT) { … }
static SDValue PromoteMVEPredVector(SDLoc dl, SDValue Pred, EVT VT,
SelectionDAG &DAG) { … }
static SDValue LowerVECTOR_SHUFFLE_i1(SDValue Op, SelectionDAG &DAG,
const ARMSubtarget *ST) { … }
static SDValue LowerVECTOR_SHUFFLEUsingMovs(SDValue Op,
ArrayRef<int> ShuffleMask,
SelectionDAG &DAG) { … }
static SDValue LowerVECTOR_SHUFFLEUsingOneOff(SDValue Op,
ArrayRef<int> ShuffleMask,
SelectionDAG &DAG) { … }
static SDValue LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG,
const ARMSubtarget *ST) { … }
static SDValue LowerINSERT_VECTOR_ELT_i1(SDValue Op, SelectionDAG &DAG,
const ARMSubtarget *ST) { … }
SDValue ARMTargetLowering::LowerINSERT_VECTOR_ELT(SDValue Op,
SelectionDAG &DAG) const { … }
static SDValue LowerEXTRACT_VECTOR_ELT_i1(SDValue Op, SelectionDAG &DAG,
const ARMSubtarget *ST) { … }
static SDValue LowerEXTRACT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG,
const ARMSubtarget *ST) { … }
static SDValue LowerCONCAT_VECTORS_i1(SDValue Op, SelectionDAG &DAG,
const ARMSubtarget *ST) { … }
static SDValue LowerCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG,
const ARMSubtarget *ST) { … }
static SDValue LowerEXTRACT_SUBVECTOR(SDValue Op, SelectionDAG &DAG,
const ARMSubtarget *ST) { … }
static SDValue LowerTruncatei1(SDNode *N, SelectionDAG &DAG,
const ARMSubtarget *ST) { … }
static SDValue LowerTruncate(SDNode *N, SelectionDAG &DAG,
const ARMSubtarget *Subtarget) { … }
static SDValue LowerVectorExtend(SDNode *N, SelectionDAG &DAG,
const ARMSubtarget *Subtarget) { … }
static bool isExtendedBUILD_VECTOR(SDNode *N, SelectionDAG &DAG,
bool isSigned) { … }
static bool isSignExtended(SDNode *N, SelectionDAG &DAG) { … }
static bool isZeroExtended(SDNode *N, SelectionDAG &DAG) { … }
static EVT getExtensionTo64Bits(const EVT &OrigVT) { … }
static SDValue AddRequiredExtensionForVMULL(SDValue N, SelectionDAG &DAG,
const EVT &OrigTy,
const EVT &ExtTy,
unsigned ExtOpcode) { … }
static SDValue SkipLoadExtensionForVMULL(LoadSDNode *LD, SelectionDAG& DAG) { … }
static SDValue SkipExtensionForVMULL(SDNode *N, SelectionDAG &DAG) { … }
static bool isAddSubSExt(SDNode *N, SelectionDAG &DAG) { … }
static bool isAddSubZExt(SDNode *N, SelectionDAG &DAG) { … }
static SDValue LowerMUL(SDValue Op, SelectionDAG &DAG) { … }
static SDValue LowerSDIV_v4i8(SDValue X, SDValue Y, const SDLoc &dl,
SelectionDAG &DAG) { … }
static SDValue LowerSDIV_v4i16(SDValue N0, SDValue N1, const SDLoc &dl,
SelectionDAG &DAG) { … }
static SDValue LowerSDIV(SDValue Op, SelectionDAG &DAG,
const ARMSubtarget *ST) { … }
static SDValue LowerUDIV(SDValue Op, SelectionDAG &DAG,
const ARMSubtarget *ST) { … }
static SDValue LowerUADDSUBO_CARRY(SDValue Op, SelectionDAG &DAG) { … }
SDValue ARMTargetLowering::LowerFSINCOS(SDValue Op, SelectionDAG &DAG) const { … }
SDValue ARMTargetLowering::LowerWindowsDIVLibCall(SDValue Op, SelectionDAG &DAG,
bool Signed,
SDValue &Chain) const { … }
SDValue
ARMTargetLowering::BuildSDIVPow2(SDNode *N, const APInt &Divisor,
SelectionDAG &DAG,
SmallVectorImpl<SDNode *> &Created) const { … }
SDValue ARMTargetLowering::LowerDIV_Windows(SDValue Op, SelectionDAG &DAG,
bool Signed) const { … }
static SDValue WinDBZCheckDenominator(SelectionDAG &DAG, SDNode *N, SDValue InChain) { … }
void ARMTargetLowering::ExpandDIV_Windows(
SDValue Op, SelectionDAG &DAG, bool Signed,
SmallVectorImpl<SDValue> &Results) const { … }
static SDValue LowerPredicateLoad(SDValue Op, SelectionDAG &DAG) { … }
void ARMTargetLowering::LowerLOAD(SDNode *N, SmallVectorImpl<SDValue> &Results,
SelectionDAG &DAG) const { … }
static SDValue LowerPredicateStore(SDValue Op, SelectionDAG &DAG) { … }
static SDValue LowerSTORE(SDValue Op, SelectionDAG &DAG,
const ARMSubtarget *Subtarget) { … }
static bool isZeroVector(SDValue N) { … }
static SDValue LowerMLOAD(SDValue Op, SelectionDAG &DAG) { … }
static SDValue LowerVecReduce(SDValue Op, SelectionDAG &DAG,
const ARMSubtarget *ST) { … }
static SDValue LowerVecReduceF(SDValue Op, SelectionDAG &DAG,
const ARMSubtarget *ST) { … }
static SDValue LowerVecReduceMinMax(SDValue Op, SelectionDAG &DAG,
const ARMSubtarget *ST) { … }
static SDValue LowerAtomicLoadStore(SDValue Op, SelectionDAG &DAG) { … }
static void ReplaceREADCYCLECOUNTER(SDNode *N,
SmallVectorImpl<SDValue> &Results,
SelectionDAG &DAG,
const ARMSubtarget *Subtarget) { … }
static SDValue createGPRPairNode2xi32(SelectionDAG &DAG, SDValue V0,
SDValue V1) { … }
static SDValue createGPRPairNodei64(SelectionDAG &DAG, SDValue V) { … }
static void ReplaceCMP_SWAP_64Results(SDNode *N,
SmallVectorImpl<SDValue> &Results,
SelectionDAG &DAG) { … }
SDValue ARMTargetLowering::LowerFSETCC(SDValue Op, SelectionDAG &DAG) const { … }
SDValue ARMTargetLowering::LowerSPONENTRY(SDValue Op, SelectionDAG &DAG) const { … }
SDValue ARMTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const { … }
static void ReplaceLongIntrinsic(SDNode *N, SmallVectorImpl<SDValue> &Results,
SelectionDAG &DAG) { … }
void ARMTargetLowering::ReplaceNodeResults(SDNode *N,
SmallVectorImpl<SDValue> &Results,
SelectionDAG &DAG) const { … }
void ARMTargetLowering::SetupEntryBlockForSjLj(MachineInstr &MI,
MachineBasicBlock *MBB,
MachineBasicBlock *DispatchBB,
int FI) const { … }
void ARMTargetLowering::EmitSjLjDispatchBlock(MachineInstr &MI,
MachineBasicBlock *MBB) const { … }
static
MachineBasicBlock *OtherSucc(MachineBasicBlock *MBB, MachineBasicBlock *Succ) { … }
static unsigned getLdOpcode(unsigned LdSize, bool IsThumb1, bool IsThumb2) { … }
static unsigned getStOpcode(unsigned StSize, bool IsThumb1, bool IsThumb2) { … }
static void emitPostLd(MachineBasicBlock *BB, MachineBasicBlock::iterator Pos,
const TargetInstrInfo *TII, const DebugLoc &dl,
unsigned LdSize, unsigned Data, unsigned AddrIn,
unsigned AddrOut, bool IsThumb1, bool IsThumb2) { … }
static void emitPostSt(MachineBasicBlock *BB, MachineBasicBlock::iterator Pos,
const TargetInstrInfo *TII, const DebugLoc &dl,
unsigned StSize, unsigned Data, unsigned AddrIn,
unsigned AddrOut, bool IsThumb1, bool IsThumb2) { … }
MachineBasicBlock *
ARMTargetLowering::EmitStructByval(MachineInstr &MI,
MachineBasicBlock *BB) const { … }
MachineBasicBlock *
ARMTargetLowering::EmitLowered__chkstk(MachineInstr &MI,
MachineBasicBlock *MBB) const { … }
MachineBasicBlock *
ARMTargetLowering::EmitLowered__dbzchk(MachineInstr &MI,
MachineBasicBlock *MBB) const { … }
static bool checkAndUpdateCPSRKill(MachineBasicBlock::iterator SelectItr,
MachineBasicBlock* BB,
const TargetRegisterInfo* TRI) { … }
static Register genTPEntry(MachineBasicBlock *TpEntry,
MachineBasicBlock *TpLoopBody,
MachineBasicBlock *TpExit, Register OpSizeReg,
const TargetInstrInfo *TII, DebugLoc Dl,
MachineRegisterInfo &MRI) { … }
static void genTPLoopBody(MachineBasicBlock *TpLoopBody,
MachineBasicBlock *TpEntry, MachineBasicBlock *TpExit,
const TargetInstrInfo *TII, DebugLoc Dl,
MachineRegisterInfo &MRI, Register OpSrcReg,
Register OpDestReg, Register ElementCountReg,
Register TotalIterationsReg, bool IsMemcpy) { … }
MachineBasicBlock *
ARMTargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI,
MachineBasicBlock *BB) const { … }
static void attachMEMCPYScratchRegs(const ARMSubtarget *Subtarget,
MachineInstr &MI, const SDNode *Node) { … }
void ARMTargetLowering::AdjustInstrPostInstrSelection(MachineInstr &MI,
SDNode *Node) const { … }
static inline bool isZeroOrAllOnes(SDValue N, bool AllOnes) { … }
static bool isConditionalZeroOrAllOnes(SDNode *N, bool AllOnes,
SDValue &CC, bool &Invert,
SDValue &OtherOp,
SelectionDAG &DAG) { … }
static
SDValue combineSelectAndUse(SDNode *N, SDValue Slct, SDValue OtherOp,
TargetLowering::DAGCombinerInfo &DCI,
bool AllOnes = false) { … }
static
SDValue combineSelectAndUseCommutative(SDNode *N, bool AllOnes,
TargetLowering::DAGCombinerInfo &DCI) { … }
static bool IsVUZPShuffleNode(SDNode *N) { … }
static SDValue AddCombineToVPADD(SDNode *N, SDValue N0, SDValue N1,
TargetLowering::DAGCombinerInfo &DCI,
const ARMSubtarget *Subtarget) { … }
static SDValue AddCombineVUZPToVPADDL(SDNode *N, SDValue N0, SDValue N1,
TargetLowering::DAGCombinerInfo &DCI,
const ARMSubtarget *Subtarget) { … }
static SDValue
AddCombineBUILD_VECTORToVPADDL(SDNode *N, SDValue N0, SDValue N1,
TargetLowering::DAGCombinerInfo &DCI,
const ARMSubtarget *Subtarget) { … }
static SDValue findMUL_LOHI(SDValue V) { … }
static SDValue AddCombineTo64BitSMLAL16(SDNode *AddcNode, SDNode *AddeNode,
TargetLowering::DAGCombinerInfo &DCI,
const ARMSubtarget *Subtarget) { … }
static SDValue AddCombineTo64bitMLAL(SDNode *AddeSubeNode,
TargetLowering::DAGCombinerInfo &DCI,
const ARMSubtarget *Subtarget) { … }
static SDValue AddCombineTo64bitUMAAL(SDNode *AddeNode,
TargetLowering::DAGCombinerInfo &DCI,
const ARMSubtarget *Subtarget) { … }
static SDValue PerformUMLALCombine(SDNode *N, SelectionDAG &DAG,
const ARMSubtarget *Subtarget) { … }
static SDValue PerformAddcSubcCombine(SDNode *N,
TargetLowering::DAGCombinerInfo &DCI,
const ARMSubtarget *Subtarget) { … }
static SDValue PerformAddeSubeCombine(SDNode *N,
TargetLowering::DAGCombinerInfo &DCI,
const ARMSubtarget *Subtarget) { … }
static SDValue PerformSELECTCombine(SDNode *N,
TargetLowering::DAGCombinerInfo &DCI,
const ARMSubtarget *Subtarget) { … }
static SDValue PerformVQDMULHCombine(SDNode *N, SelectionDAG &DAG) { … }
static SDValue PerformVSELECTCombine(SDNode *N,
TargetLowering::DAGCombinerInfo &DCI,
const ARMSubtarget *Subtarget) { … }
static SDValue PerformVSetCCToVCTPCombine(SDNode *N,
TargetLowering::DAGCombinerInfo &DCI,
const ARMSubtarget *Subtarget) { … }
static SDValue PerformADDECombine(SDNode *N,
TargetLowering::DAGCombinerInfo &DCI,
const ARMSubtarget *Subtarget) { … }
static SDValue PerformADDCombineWithOperands(SDNode *N, SDValue N0, SDValue N1,
TargetLowering::DAGCombinerInfo &DCI,
const ARMSubtarget *Subtarget){ … }
static SDValue TryDistrubutionADDVecReduce(SDNode *N, SelectionDAG &DAG) { … }
static SDValue PerformADDVecReduce(SDNode *N, SelectionDAG &DAG,
const ARMSubtarget *Subtarget) { … }
bool
ARMTargetLowering::isDesirableToCommuteWithShift(const SDNode *N,
CombineLevel Level) const { … }
bool ARMTargetLowering::isDesirableToCommuteXorWithShift(
const SDNode *N) const { … }bool ARMTargetLowering::shouldFoldConstantShiftPairToMask(
const SDNode *N, CombineLevel Level) const { … }bool ARMTargetLowering::shouldFoldSelectWithIdentityConstant(unsigned BinOpcode,
EVT VT) const { … }bool ARMTargetLowering::preferIncOfAddToSubOfNot(EVT VT) const { … }bool ARMTargetLowering::shouldConvertFpToSat(unsigned Op, EVT FPVT,
EVT VT) const { … }static SDValue PerformSHLSimplify(SDNode *N,
TargetLowering::DAGCombinerInfo &DCI,
const ARMSubtarget *ST) { … }static SDValue PerformADDCombine(SDNode *N,
TargetLowering::DAGCombinerInfo &DCI,
const ARMSubtarget *Subtarget) { … }static SDValue PerformSubCSINCCombine(SDNode *N, SelectionDAG &DAG) { … }static SDValue PerformSUBCombine(SDNode *N,
TargetLowering::DAGCombinerInfo &DCI,
const ARMSubtarget *Subtarget) { … }static SDValue PerformVMULCombine(SDNode *N,
TargetLowering::DAGCombinerInfo &DCI,
const ARMSubtarget *Subtarget) { … }static SDValue PerformMVEVMULLCombine(SDNode *N, SelectionDAG &DAG,
const ARMSubtarget *Subtarget) { … }static SDValue PerformMULCombine(SDNode *N,
TargetLowering::DAGCombinerInfo &DCI,
const ARMSubtarget *Subtarget) { … }static SDValue CombineANDShift(SDNode *N,
TargetLowering::DAGCombinerInfo &DCI,
const ARMSubtarget *Subtarget) { … }static SDValue PerformANDCombine(SDNode *N,
TargetLowering::DAGCombinerInfo &DCI,
const ARMSubtarget *Subtarget) { … }static SDValue PerformORCombineToSMULWBT(SDNode *OR,
TargetLowering::DAGCombinerInfo &DCI,
const ARMSubtarget *Subtarget) { … }static SDValue PerformORCombineToBFI(SDNode *N,
TargetLowering::DAGCombinerInfo &DCI,
const ARMSubtarget *Subtarget) { … }static bool isValidMVECond(unsigned CC, bool IsFloat) { … }static ARMCC::CondCodes getVCMPCondCode(SDValue N) { … }static bool CanInvertMVEVCMP(SDValue N) { … }static SDValue PerformORCombine_i1(SDNode *N, SelectionDAG &DAG,
const ARMSubtarget *Subtarget) { … }static SDValue PerformORCombine(SDNode *N,
TargetLowering::DAGCombinerInfo &DCI,
const ARMSubtarget *Subtarget) { … }static SDValue PerformXORCombine(SDNode *N,
TargetLowering::DAGCombinerInfo &DCI,
const ARMSubtarget *Subtarget) { … }static SDValue ParseBFI(SDNode *N, APInt &ToMask, APInt &FromMask) { … }static bool BitsProperlyConcatenate(const APInt &A, const APInt &B) { … }static SDValue FindBFIToCombineWith(SDNode *N) { … }static SDValue PerformBFICombine(SDNode *N, SelectionDAG &DAG) { … }static SDValue IsCMPZCSINC(SDNode *Cmp, ARMCC::CondCodes &CC) { … }static SDValue PerformCMPZCombine(SDNode *N, SelectionDAG &DAG) { … }static SDValue PerformCSETCombine(SDNode *N, SelectionDAG &DAG) { … }static SDValue PerformVMOVRRDCombine(SDNode *N,
TargetLowering::DAGCombinerInfo &DCI,
const ARMSubtarget *Subtarget) { … }static SDValue PerformVMOVDRRCombine(SDNode *N, SelectionDAG &DAG) { … }static SDValue PerformVMOVhrCombine(SDNode *N,
TargetLowering::DAGCombinerInfo &DCI) { … }static SDValue PerformVMOVrhCombine(SDNode *N, SelectionDAG &DAG) { … }static bool hasNormalLoadOperand(SDNode *N) { … }static SDValue PerformBUILD_VECTORCombine(SDNode *N,
TargetLowering::DAGCombinerInfo &DCI,
const ARMSubtarget *Subtarget) { … }static SDValue
PerformARMBUILD_VECTORCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI) { … }static SDValue
PerformPREDICATE_CASTCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI) { … }static SDValue PerformVECTOR_REG_CASTCombine(SDNode *N, SelectionDAG &DAG,
const ARMSubtarget *ST) { … }static SDValue PerformVCMPCombine(SDNode *N, SelectionDAG &DAG,
const ARMSubtarget *Subtarget) { … }static SDValue PerformInsertEltCombine(SDNode *N,
TargetLowering::DAGCombinerInfo &DCI) { … }static SDValue
PerformExtractEltToVMOVRRD(SDNode *N, TargetLowering::DAGCombinerInfo &DCI) { … }static SDValue PerformExtractEltCombine(SDNode *N,
TargetLowering::DAGCombinerInfo &DCI,
const ARMSubtarget *ST) { … }static SDValue PerformSignExtendInregCombine(SDNode *N, SelectionDAG &DAG) { … }static SDValue
PerformInsertSubvectorCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI) { … }static SDValue PerformShuffleVMOVNCombine(ShuffleVectorSDNode *N,
SelectionDAG &DAG) { … }static SDValue PerformVECTOR_SHUFFLECombine(SDNode *N, SelectionDAG &DAG) { … }struct BaseUpdateTarget { … }struct BaseUpdateUser { … }static bool TryCombineBaseUpdate(struct BaseUpdateTarget &Target,
struct BaseUpdateUser &User,
bool SimpleConstIncOnly,
TargetLowering::DAGCombinerInfo &DCI) { … }static unsigned getPointerConstIncrement(unsigned Opcode, SDValue Ptr,
SDValue Inc, const SelectionDAG &DAG) { … }static bool findPointerConstIncrement(SDNode *N, SDValue *Ptr, SDValue *CInc) { … }static bool isValidBaseUpdate(SDNode *N, SDNode *User) { … }static SDValue CombineBaseUpdate(SDNode *N,
TargetLowering::DAGCombinerInfo &DCI) { … }static SDValue PerformVLDCombine(SDNode *N,
TargetLowering::DAGCombinerInfo &DCI) { … }static SDValue PerformMVEVLDCombine(SDNode *N,
TargetLowering::DAGCombinerInfo &DCI) { … }static bool CombineVLDDUP(SDNode *N, TargetLowering::DAGCombinerInfo &DCI) { … }static SDValue PerformVDUPLANECombine(SDNode *N,
TargetLowering::DAGCombinerInfo &DCI,
const ARMSubtarget *Subtarget) { … }static SDValue PerformVDUPCombine(SDNode *N, SelectionDAG &DAG,
const ARMSubtarget *Subtarget) { … }static SDValue PerformLOADCombine(SDNode *N,
TargetLowering::DAGCombinerInfo &DCI,
const ARMSubtarget *Subtarget) { … }static SDValue PerformTruncatingStoreCombine(StoreSDNode *St,
SelectionDAG &DAG) { … }static SDValue PerformSplittingToNarrowingStores(StoreSDNode *St,
SelectionDAG &DAG) { … }static SDValue PerformSplittingMVETruncToNarrowingStores(StoreSDNode *St,
SelectionDAG &DAG) { … }static SDValue PerformExtractFpToIntStores(StoreSDNode *St, SelectionDAG &DAG) { … }static SDValue PerformSTORECombine(SDNode *N,
TargetLowering::DAGCombinerInfo &DCI,
const ARMSubtarget *Subtarget) { … }static SDValue PerformVCVTCombine(SDNode *N, SelectionDAG &DAG,
const ARMSubtarget *Subtarget) { … }static SDValue PerformFAddVSelectCombine(SDNode *N, SelectionDAG &DAG,
const ARMSubtarget *Subtarget) { … }static SDValue PerformFADDVCMLACombine(SDNode *N, SelectionDAG &DAG) { … }static SDValue PerformFADDCombine(SDNode *N, SelectionDAG &DAG,
const ARMSubtarget *Subtarget) { … }static SDValue PerformVMulVCTPCombine(SDNode *N, SelectionDAG &DAG,
const ARMSubtarget *Subtarget) { … }static SDValue PerformVECREDUCE_ADDCombine(SDNode *N, SelectionDAG &DAG,
const ARMSubtarget *ST) { … }static SDValue PerformReduceShuffleCombine(SDNode *N, SelectionDAG &DAG) { … }static SDValue PerformVMOVNCombine(SDNode *N,
TargetLowering::DAGCombinerInfo &DCI) { … }static SDValue PerformVQMOVNCombine(SDNode *N,
TargetLowering::DAGCombinerInfo &DCI) { … }static SDValue PerformVQDMULHCombine(SDNode *N,
TargetLowering::DAGCombinerInfo &DCI) { … }static SDValue PerformLongShiftCombine(SDNode *N, SelectionDAG &DAG) { … }SDValue ARMTargetLowering::PerformIntrinsicCombine(SDNode *N,
DAGCombinerInfo &DCI) const { … }static SDValue PerformShiftCombine(SDNode *N,
TargetLowering::DAGCombinerInfo &DCI,
const ARMSubtarget *ST) { … }static SDValue PerformSplittingToWideningLoad(SDNode *N, SelectionDAG &DAG) { … }static SDValue PerformExtendCombine(SDNode *N, SelectionDAG &DAG,
const ARMSubtarget *ST) { … }static SDValue PerformFPExtendCombine(SDNode *N, SelectionDAG &DAG,
const ARMSubtarget *ST) { … }static SDValue PerformMinMaxToSatCombine(SDValue Op, SelectionDAG &DAG,
const ARMSubtarget *Subtarget) { … }static SDValue PerformMinMaxCombine(SDNode *N, SelectionDAG &DAG,
const ARMSubtarget *ST) { … }static const APInt *isPowerOf2Constant(SDValue V) { … }SDValue ARMTargetLowering::PerformCMOVToBFICombine(SDNode *CMOV, SelectionDAG &DAG) const { … }static SDValue SearchLoopIntrinsic(SDValue N, ISD::CondCode &CC, int &Imm,
bool &Negate) { … }static SDValue PerformHWLoopCombine(SDNode *N,
TargetLowering::DAGCombinerInfo &DCI,
const ARMSubtarget *ST) { … }SDValue
ARMTargetLowering::PerformBRCONDCombine(SDNode *N, SelectionDAG &DAG) const { … }SDValue
ARMTargetLowering::PerformCMOVCombine(SDNode *N, SelectionDAG &DAG) const { … }static SDValue PerformBITCASTCombine(SDNode *N,
TargetLowering::DAGCombinerInfo &DCI,
const ARMSubtarget *ST) { … }SDValue ARMTargetLowering::PerformMVETruncCombine(
SDNode *N, TargetLowering::DAGCombinerInfo &DCI) const { … }static SDValue PerformSplittingMVEEXTToWideningLoad(SDNode *N,
SelectionDAG &DAG) { … }SDValue ARMTargetLowering::PerformMVEExtCombine(
SDNode *N, TargetLowering::DAGCombinerInfo &DCI) const { … }SDValue ARMTargetLowering::PerformDAGCombine(SDNode *N,
DAGCombinerInfo &DCI) const { … }bool ARMTargetLowering::isDesirableToTransformToIntegerOp(unsigned Opc,
EVT VT) const { … }bool ARMTargetLowering::allowsMisalignedMemoryAccesses(EVT VT, unsigned,
Align Alignment,
MachineMemOperand::Flags,
unsigned *Fast) const { … }EVT ARMTargetLowering::getOptimalMemOpType(
const MemOp &Op, const AttributeList &FuncAttributes) const { … }bool ARMTargetLowering::isTruncateFree(Type *SrcTy, Type *DstTy) const { … }bool ARMTargetLowering::isTruncateFree(EVT SrcVT, EVT DstVT) const { … }bool ARMTargetLowering::isZExtFree(SDValue Val, EVT VT2) const { … }bool ARMTargetLowering::isFNegFree(EVT VT) const { … }static bool areExtractExts(Value *Ext1, Value *Ext2) { … }bool ARMTargetLowering::shouldSinkOperands(Instruction *I,
SmallVectorImpl<Use *> &Ops) const { … }Type *ARMTargetLowering::shouldConvertSplatType(ShuffleVectorInst *SVI) const { … }bool ARMTargetLowering::isVectorLoadExtDesirable(SDValue ExtVal) const { … }bool ARMTargetLowering::allowTruncateForTailCall(Type *Ty1, Type *Ty2) const { … }bool ARMTargetLowering::isFMAFasterThanFMulAndFAdd(const MachineFunction &MF,
EVT VT) const { … }static bool isLegalT1AddressImmediate(int64_t V, EVT VT) { … }static bool isLegalT2AddressImmediate(int64_t V, EVT VT,
const ARMSubtarget *Subtarget) { … }static bool isLegalAddressImmediate(int64_t V, EVT VT,
const ARMSubtarget *Subtarget) { … }bool ARMTargetLowering::isLegalT2ScaledAddressingMode(const AddrMode &AM,
EVT VT) const { … }bool ARMTargetLowering::isLegalT1ScaledAddressingMode(const AddrMode &AM,
EVT VT) const { … }bool ARMTargetLowering::isLegalAddressingMode(const DataLayout &DL,
const AddrMode &AM, Type *Ty,
unsigned AS, Instruction *I) const { … }bool ARMTargetLowering::isLegalICmpImmediate(int64_t Imm) const { … }bool ARMTargetLowering::isLegalAddImmediate(int64_t Imm) const { … }bool ARMTargetLowering::isMulAddWithConstProfitable(SDValue AddNode,
SDValue ConstNode) const { … }static bool getARMIndexedAddressParts(SDNode *Ptr, EVT VT,
bool isSEXTLoad, SDValue &Base,
SDValue &Offset, bool &isInc,
SelectionDAG &DAG) { … }static bool getT2IndexedAddressParts(SDNode *Ptr, EVT VT,
bool isSEXTLoad, SDValue &Base,
SDValue &Offset, bool &isInc,
SelectionDAG &DAG) { … }static bool getMVEIndexedAddressParts(SDNode *Ptr, EVT VT, Align Alignment,
bool isSEXTLoad, bool IsMasked, bool isLE,
SDValue &Base, SDValue &Offset,
bool &isInc, SelectionDAG &DAG) { … }bool
ARMTargetLowering::getPreIndexedAddressParts(SDNode *N, SDValue &Base,
SDValue &Offset,
ISD::MemIndexedMode &AM,
SelectionDAG &DAG) const { … }bool ARMTargetLowering::getPostIndexedAddressParts(SDNode *N, SDNode *Op,
SDValue &Base,
SDValue &Offset,
ISD::MemIndexedMode &AM,
SelectionDAG &DAG) const { … }void ARMTargetLowering::computeKnownBitsForTargetNode(const SDValue Op,
KnownBits &Known,
const APInt &DemandedElts,
const SelectionDAG &DAG,
unsigned Depth) const { … }bool ARMTargetLowering::targetShrinkDemandedConstant(
SDValue Op, const APInt &DemandedBits, const APInt &DemandedElts,
TargetLoweringOpt &TLO) const { … }bool ARMTargetLowering::SimplifyDemandedBitsForTargetNode(
SDValue Op, const APInt &OriginalDemandedBits,
const APInt &OriginalDemandedElts, KnownBits &Known, TargetLoweringOpt &TLO,
unsigned Depth) const { … }bool ARMTargetLowering::ExpandInlineAsm(CallInst *CI) const { … }const char *ARMTargetLowering::LowerXConstraint(EVT ConstraintVT) const { … }ARMTargetLowering::ConstraintType
ARMTargetLowering::getConstraintType(StringRef Constraint) const { … }TargetLowering::ConstraintWeight
ARMTargetLowering::getSingleConstraintMatchWeight(
AsmOperandInfo &info, const char *constraint) const { … }RCPairRCPair ARMTargetLowering::getRegForInlineAsmConstraint(
const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const { … }void ARMTargetLowering::LowerAsmOperandForConstraint(SDValue Op,
StringRef Constraint,
std::vector<SDValue> &Ops,
SelectionDAG &DAG) const { … }static RTLIB::Libcall getDivRemLibcall(
const SDNode *N, MVT::SimpleValueType SVT) { … }static TargetLowering::ArgListTy getDivRemArgList(
const SDNode *N, LLVMContext *Context, const ARMSubtarget *Subtarget) { … }SDValue ARMTargetLowering::LowerDivRem(SDValue Op, SelectionDAG &DAG) const { … }SDValue ARMTargetLowering::LowerREM(SDNode *N, SelectionDAG &DAG) const { … }SDValue
ARMTargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG) const { … }SDValue ARMTargetLowering::LowerFP_EXTEND(SDValue Op, SelectionDAG &DAG) const { … }SDValue ARMTargetLowering::LowerFP_ROUND(SDValue Op, SelectionDAG &DAG) const { … }bool
ARMTargetLowering::isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const { … }bool ARM::isBitFieldInvertedMask(unsigned v) { … }bool ARMTargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT,
bool ForCodeSize) const { … }bool ARMTargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
const CallInst &I,
MachineFunction &MF,
unsigned Intrinsic) const { … }bool ARMTargetLowering::shouldConvertConstantLoadToIntImm(const APInt &Imm,
Type *Ty) const { … }bool ARMTargetLowering::isExtractSubvectorCheap(EVT ResVT, EVT SrcVT,
unsigned Index) const { … }Instruction *ARMTargetLowering::makeDMB(IRBuilderBase &Builder,
ARM_MB::MemBOpt Domain) const { … }Instruction *ARMTargetLowering::emitLeadingFence(IRBuilderBase &Builder,
Instruction *Inst,
AtomicOrdering Ord) const { … }Instruction *ARMTargetLowering::emitTrailingFence(IRBuilderBase &Builder,
Instruction *Inst,
AtomicOrdering Ord) const { … }TargetLoweringBase::AtomicExpansionKind
ARMTargetLowering::shouldExpandAtomicStoreInIR(StoreInst *SI) const { … }TargetLowering::AtomicExpansionKind
ARMTargetLowering::shouldExpandAtomicLoadInIR(LoadInst *LI) const { … }TargetLowering::AtomicExpansionKind
ARMTargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const { … }TargetLowering::AtomicExpansionKind
ARMTargetLowering::shouldExpandAtomicCmpXchgInIR(AtomicCmpXchgInst *AI) const { … }bool ARMTargetLowering::shouldInsertFencesForAtomic(
const Instruction *I) const { … }bool ARMTargetLowering::useLoadStackGuardNode() const { … }void ARMTargetLowering::insertSSPDeclarations(Module &M) const { … }Value *ARMTargetLowering::getSDagStackGuard(const Module &M) const { … }Function *ARMTargetLowering::getSSPStackGuardCheck(const Module &M) const { … }bool ARMTargetLowering::canCombineStoreAndExtract(Type *VectorTy, Value *Idx,
unsigned &Cost) const { … }bool ARMTargetLowering::isCheapToSpeculateCttz(Type *Ty) const { … }bool ARMTargetLowering::isCheapToSpeculateCtlz(Type *Ty) const { … }bool ARMTargetLowering::isMaskAndCmp0FoldingBeneficial(
const Instruction &AndI) const { … }TargetLowering::ShiftLegalizationStrategy
ARMTargetLowering::preferredShiftLegalizationStrategy(
SelectionDAG &DAG, SDNode *N, unsigned ExpansionFactor) const { … }Value *ARMTargetLowering::emitLoadLinked(IRBuilderBase &Builder, Type *ValueTy,
Value *Addr,
AtomicOrdering Ord) const { … }void ARMTargetLowering::emitAtomicCmpXchgNoStoreLLBalance(
IRBuilderBase &Builder) const { … }Value *ARMTargetLowering::emitStoreConditional(IRBuilderBase &Builder,
Value *Val, Value *Addr,
AtomicOrdering Ord) const { … }bool ARMTargetLowering::alignLoopsWithOptSize() const { … }unsigned
ARMTargetLowering::getNumInterleavedAccesses(VectorType *VecTy,
const DataLayout &DL) const { … }bool ARMTargetLowering::isLegalInterleavedAccessType(
unsigned Factor, FixedVectorType *VecTy, Align Alignment,
const DataLayout &DL) const { … }unsigned ARMTargetLowering::getMaxSupportedInterleaveFactor() const { … }bool ARMTargetLowering::lowerInterleavedLoad(
LoadInst *LI, ArrayRef<ShuffleVectorInst *> Shuffles,
ArrayRef<unsigned> Indices, unsigned Factor) const { … }bool ARMTargetLowering::lowerInterleavedStore(StoreInst *SI,
ShuffleVectorInst *SVI,
unsigned Factor) const { … }enum HABaseType { … }static bool isHomogeneousAggregate(Type *Ty, HABaseType &Base,
uint64_t &Members) { … }Align ARMTargetLowering::getABIAlignmentForCallingConv(
Type *ArgTy, const DataLayout &DL) const { … }bool ARMTargetLowering::functionArgumentNeedsConsecutiveRegisters(
Type *Ty, CallingConv::ID CallConv, bool isVarArg,
const DataLayout &DL) const { … }Register ARMTargetLowering::getExceptionPointerRegister(
const Constant *PersonalityFn) const { … }Register ARMTargetLowering::getExceptionSelectorRegister(
const Constant *PersonalityFn) const { … }void ARMTargetLowering::initializeSplitCSR(MachineBasicBlock *Entry) const { … }void ARMTargetLowering::insertCopiesSplitCSR(
MachineBasicBlock *Entry,
const SmallVectorImpl<MachineBasicBlock *> &Exits) const { … }void ARMTargetLowering::finalizeLowering(MachineFunction &MF) const { … }bool ARMTargetLowering::isComplexDeinterleavingSupported() const { … }bool ARMTargetLowering::isComplexDeinterleavingOperationSupported(
ComplexDeinterleavingOperation Operation, Type *Ty) const { … }Value *ARMTargetLowering::createComplexDeinterleavingIR(
IRBuilderBase &B, ComplexDeinterleavingOperation OperationType,
ComplexDeinterleavingRotation Rotation, Value *InputA, Value *InputB,
Value *Accumulator) const { … }