#include "SIISelLowering.h"
#include "AMDGPU.h"
#include "AMDGPUInstrInfo.h"
#include "AMDGPUTargetMachine.h"
#include "GCNSubtarget.h"
#include "MCTargetDesc/AMDGPUMCTargetDesc.h"
#include "SIMachineFunctionInfo.h"
#include "SIRegisterInfo.h"
#include "llvm/ADT/APInt.h"
#include "llvm/ADT/FloatingPointMode.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/Analysis/OptimizationRemarkEmitter.h"
#include "llvm/Analysis/UniformityAnalysis.h"
#include "llvm/BinaryFormat/ELF.h"
#include "llvm/CodeGen/Analysis.h"
#include "llvm/CodeGen/ByteProvider.h"
#include "llvm/CodeGen/FunctionLoweringInfo.h"
#include "llvm/CodeGen/GlobalISel/GISelKnownBits.h"
#include "llvm/CodeGen/GlobalISel/GenericMachineInstrs.h"
#include "llvm/CodeGen/GlobalISel/MIPatternMatch.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineLoopInfo.h"
#include "llvm/IR/DiagnosticInfo.h"
#include "llvm/IR/IRBuilder.h"
#include "llvm/IR/IntrinsicInst.h"
#include "llvm/IR/IntrinsicsAMDGPU.h"
#include "llvm/IR/IntrinsicsR600.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/KnownBits.h"
#include "llvm/Support/ModRef.h"
#include "llvm/Transforms/Utils/LowerAtomic.h"
#include <optional>
usingnamespacellvm;
#define DEBUG_TYPE …
STATISTIC(NumTailCalls, "Number of tail calls");
static cl::opt<bool> DisableLoopAlignment(
"amdgpu-disable-loop-alignment",
cl::desc("Do not align and prefetch loops"),
cl::init(false));
static cl::opt<bool> UseDivergentRegisterIndexing(
"amdgpu-use-divergent-register-indexing",
cl::Hidden,
cl::desc("Use indirect register addressing for divergent indexes"),
cl::init(false));
static bool denormalModeIsFlushAllF32(const MachineFunction &MF) { … }
static bool denormalModeIsFlushAllF64F16(const MachineFunction &MF) { … }
static unsigned findFirstFreeSGPR(CCState &CCInfo) { … }
SITargetLowering::SITargetLowering(const TargetMachine &TM,
const GCNSubtarget &STI)
: … { … }
const GCNSubtarget *SITargetLowering::getSubtarget() const { … }
ArrayRef<MCPhysReg> SITargetLowering::getRoundingControlRegisters() const { … }
bool SITargetLowering::isFPExtFoldable(const SelectionDAG &DAG, unsigned Opcode,
EVT DestVT, EVT SrcVT) const { … }
bool SITargetLowering::isFPExtFoldable(const MachineInstr &MI, unsigned Opcode,
LLT DestTy, LLT SrcTy) const { … }
bool SITargetLowering::isShuffleMaskLegal(ArrayRef<int>, EVT) const { … }
MVT SITargetLowering::getRegisterTypeForCallingConv(LLVMContext &Context,
CallingConv::ID CC,
EVT VT) const { … }
unsigned SITargetLowering::getNumRegistersForCallingConv(LLVMContext &Context,
CallingConv::ID CC,
EVT VT) const { … }
unsigned SITargetLowering::getVectorTypeBreakdownForCallingConv(
LLVMContext &Context, CallingConv::ID CC,
EVT VT, EVT &IntermediateVT,
unsigned &NumIntermediates, MVT &RegisterVT) const { … }
static EVT memVTFromLoadIntrData(const SITargetLowering &TLI,
const DataLayout &DL, Type *Ty,
unsigned MaxNumLanes) { … }
static EVT memVTFromLoadIntrReturn(const SITargetLowering &TLI,
const DataLayout &DL, Type *Ty,
unsigned MaxNumLanes) { … }
MVT SITargetLowering::getPointerTy(const DataLayout &DL, unsigned AS) const { … }
MVT SITargetLowering::getPointerMemTy(const DataLayout &DL, unsigned AS) const { … }
bool SITargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
const CallInst &CI,
MachineFunction &MF,
unsigned IntrID) const { … }
void SITargetLowering::CollectTargetIntrinsicOperands(
const CallInst &I, SmallVectorImpl<SDValue> &Ops, SelectionDAG &DAG) const { … }
bool SITargetLowering::getAddrModeArguments(IntrinsicInst *II,
SmallVectorImpl<Value*> &Ops,
Type *&AccessTy) const { … }
bool SITargetLowering::isLegalFlatAddressingMode(const AddrMode &AM,
unsigned AddrSpace) const { … }
bool SITargetLowering::isLegalGlobalAddressingMode(const AddrMode &AM) const { … }
bool SITargetLowering::isLegalMUBUFAddressingMode(const AddrMode &AM) const { … }
bool SITargetLowering::isLegalAddressingMode(const DataLayout &DL,
const AddrMode &AM, Type *Ty,
unsigned AS, Instruction *I) const { … }
bool SITargetLowering::canMergeStoresTo(unsigned AS, EVT MemVT,
const MachineFunction &MF) const { … }
bool SITargetLowering::allowsMisalignedMemoryAccessesImpl(
unsigned Size, unsigned AddrSpace, Align Alignment,
MachineMemOperand::Flags Flags, unsigned *IsFast) const { … }
bool SITargetLowering::allowsMisalignedMemoryAccesses(
EVT VT, unsigned AddrSpace, Align Alignment, MachineMemOperand::Flags Flags,
unsigned *IsFast) const { … }
EVT SITargetLowering::getOptimalMemOpType(
const MemOp &Op, const AttributeList &FuncAttributes) const { … }
bool SITargetLowering::isMemOpHasNoClobberedMemOperand(const SDNode *N) const { … }
bool SITargetLowering::isNonGlobalAddrSpace(unsigned AS) { … }
bool SITargetLowering::isFreeAddrSpaceCast(unsigned SrcAS,
unsigned DestAS) const { … }
TargetLoweringBase::LegalizeTypeAction
SITargetLowering::getPreferredVectorAction(MVT VT) const { … }
bool SITargetLowering::shouldConvertConstantLoadToIntImm(const APInt &Imm,
Type *Ty) const { … }
bool SITargetLowering::isExtractSubvectorCheap(EVT ResVT, EVT SrcVT,
unsigned Index) const { … }
bool SITargetLowering::isTypeDesirableForOp(unsigned Op, EVT VT) const { … }
SDValue SITargetLowering::lowerKernArgParameterPtr(SelectionDAG &DAG,
const SDLoc &SL,
SDValue Chain,
uint64_t Offset) const { … }
SDValue SITargetLowering::getImplicitArgPtr(SelectionDAG &DAG,
const SDLoc &SL) const { … }
SDValue SITargetLowering::getLDSKernelId(SelectionDAG &DAG,
const SDLoc &SL) const { … }
SDValue SITargetLowering::convertArgType(SelectionDAG &DAG, EVT VT, EVT MemVT,
const SDLoc &SL, SDValue Val,
bool Signed,
const ISD::InputArg *Arg) const { … }
SDValue SITargetLowering::lowerKernargMemParameter(
SelectionDAG &DAG, EVT VT, EVT MemVT, const SDLoc &SL, SDValue Chain,
uint64_t Offset, Align Alignment, bool Signed,
const ISD::InputArg *Arg) const { … }
SDValue SITargetLowering::lowerStackParameter(SelectionDAG &DAG, CCValAssign &VA,
const SDLoc &SL, SDValue Chain,
const ISD::InputArg &Arg) const { … }
SDValue SITargetLowering::getPreloadedValue(SelectionDAG &DAG,
const SIMachineFunctionInfo &MFI,
EVT VT,
AMDGPUFunctionArgInfo::PreloadedValue PVID) const { … }
static void processPSInputArgs(SmallVectorImpl<ISD::InputArg> &Splits,
CallingConv::ID CallConv,
ArrayRef<ISD::InputArg> Ins, BitVector &Skipped,
FunctionType *FType,
SIMachineFunctionInfo *Info) { … }
void SITargetLowering::allocateSpecialEntryInputVGPRs(CCState &CCInfo,
MachineFunction &MF,
const SIRegisterInfo &TRI,
SIMachineFunctionInfo &Info) const { … }
static ArgDescriptor allocateVGPR32Input(CCState &CCInfo, unsigned Mask = ~0u,
ArgDescriptor Arg = ArgDescriptor()) { … }
static ArgDescriptor allocateSGPR32InputImpl(CCState &CCInfo,
const TargetRegisterClass *RC,
unsigned NumArgRegs) { … }
static void allocateFixedSGPRInputImpl(CCState &CCInfo,
const TargetRegisterClass *RC,
MCRegister Reg) { … }
static void allocateSGPR32Input(CCState &CCInfo, ArgDescriptor &Arg) { … }
static void allocateSGPR64Input(CCState &CCInfo, ArgDescriptor &Arg) { … }
void SITargetLowering::allocateSpecialInputVGPRs(
CCState &CCInfo, MachineFunction &MF,
const SIRegisterInfo &TRI, SIMachineFunctionInfo &Info) const { … }
void SITargetLowering::allocateSpecialInputVGPRsFixed(
CCState &CCInfo, MachineFunction &MF,
const SIRegisterInfo &TRI, SIMachineFunctionInfo &Info) const { … }
void SITargetLowering::allocateSpecialInputSGPRs(
CCState &CCInfo,
MachineFunction &MF,
const SIRegisterInfo &TRI,
SIMachineFunctionInfo &Info) const { … }
void SITargetLowering::allocateHSAUserSGPRs(CCState &CCInfo,
MachineFunction &MF,
const SIRegisterInfo &TRI,
SIMachineFunctionInfo &Info) const { … }
void SITargetLowering::allocatePreloadKernArgSGPRs(
CCState &CCInfo, SmallVectorImpl<CCValAssign> &ArgLocs,
const SmallVectorImpl<ISD::InputArg> &Ins, MachineFunction &MF,
const SIRegisterInfo &TRI, SIMachineFunctionInfo &Info) const { … }
void SITargetLowering::allocateLDSKernelId(CCState &CCInfo, MachineFunction &MF,
const SIRegisterInfo &TRI,
SIMachineFunctionInfo &Info) const { … }
void SITargetLowering::allocateSystemSGPRs(CCState &CCInfo,
MachineFunction &MF,
SIMachineFunctionInfo &Info,
CallingConv::ID CallConv,
bool IsShader) const { … }
static void reservePrivateMemoryRegs(const TargetMachine &TM,
MachineFunction &MF,
const SIRegisterInfo &TRI,
SIMachineFunctionInfo &Info) { … }
bool SITargetLowering::supportSplitCSR(MachineFunction *MF) const { … }
void SITargetLowering::initializeSplitCSR(MachineBasicBlock *Entry) const { … }
void SITargetLowering::insertCopiesSplitCSR(
MachineBasicBlock *Entry,
const SmallVectorImpl<MachineBasicBlock *> &Exits) const { … }
SDValue SITargetLowering::LowerFormalArguments(
SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL,
SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const { … }
bool SITargetLowering::CanLowerReturn(
CallingConv::ID CallConv,
MachineFunction &MF, bool IsVarArg,
const SmallVectorImpl<ISD::OutputArg> &Outs,
LLVMContext &Context) const { … }
SDValue
SITargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
bool isVarArg,
const SmallVectorImpl<ISD::OutputArg> &Outs,
const SmallVectorImpl<SDValue> &OutVals,
const SDLoc &DL, SelectionDAG &DAG) const { … }
SDValue SITargetLowering::LowerCallResult(
SDValue Chain, SDValue InGlue, CallingConv::ID CallConv, bool IsVarArg,
const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL,
SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals, bool IsThisReturn,
SDValue ThisVal) const { … }
void SITargetLowering::passSpecialInputs(
CallLoweringInfo &CLI,
CCState &CCInfo,
const SIMachineFunctionInfo &Info,
SmallVectorImpl<std::pair<unsigned, SDValue>> &RegsToPass,
SmallVectorImpl<SDValue> &MemOpChains,
SDValue Chain) const { … }
static bool canGuaranteeTCO(CallingConv::ID CC) { … }
static bool mayTailCallThisCC(CallingConv::ID CC) { … }
bool SITargetLowering::isEligibleForTailCallOptimization(
SDValue Callee, CallingConv::ID CalleeCC, bool IsVarArg,
const SmallVectorImpl<ISD::OutputArg> &Outs,
const SmallVectorImpl<SDValue> &OutVals,
const SmallVectorImpl<ISD::InputArg> &Ins, SelectionDAG &DAG) const { … }
bool SITargetLowering::mayBeEmittedAsTailCall(const CallInst *CI) const { … }
SDValue SITargetLowering::LowerCall(CallLoweringInfo &CLI,
SmallVectorImpl<SDValue> &InVals) const { … }
SDValue SITargetLowering::lowerDYNAMIC_STACKALLOCImpl(
SDValue Op, SelectionDAG &DAG) const { … }
SDValue SITargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op,
SelectionDAG &DAG) const { … }
SDValue SITargetLowering::LowerSTACKSAVE(SDValue Op, SelectionDAG &DAG) const { … }
SDValue SITargetLowering::lowerGET_ROUNDING(SDValue Op,
SelectionDAG &DAG) const { … }
SDValue SITargetLowering::lowerSET_ROUNDING(SDValue Op,
SelectionDAG &DAG) const { … }
SDValue SITargetLowering::lowerPREFETCH(SDValue Op, SelectionDAG &DAG) const { … }
SDValue SITargetLowering::lowerFP_EXTEND(SDValue Op, SelectionDAG &DAG) const { … }
SDValue SITargetLowering::lowerGET_FPENV(SDValue Op, SelectionDAG &DAG) const { … }
SDValue SITargetLowering::lowerSET_FPENV(SDValue Op, SelectionDAG &DAG) const { … }
Register SITargetLowering::getRegisterByName(const char* RegName, LLT VT,
const MachineFunction &MF) const { … }
MachineBasicBlock *
SITargetLowering::splitKillBlock(MachineInstr &MI,
MachineBasicBlock *BB) const { … }
static std::pair<MachineBasicBlock *, MachineBasicBlock *>
splitBlockForLoop(MachineInstr &MI, MachineBasicBlock &MBB, bool InstInLoop) { … }
void SITargetLowering::bundleInstWithWaitcnt(MachineInstr &MI) const { … }
MachineBasicBlock *
SITargetLowering::emitGWSMemViolTestLoop(MachineInstr &MI,
MachineBasicBlock *BB) const { … }
static MachineBasicBlock::iterator
emitLoadM0FromVGPRLoop(const SIInstrInfo *TII, MachineRegisterInfo &MRI,
MachineBasicBlock &OrigBB, MachineBasicBlock &LoopBB,
const DebugLoc &DL, const MachineOperand &Idx,
unsigned InitReg, unsigned ResultReg, unsigned PhiReg,
unsigned InitSaveExecReg, int Offset, bool UseGPRIdxMode,
Register &SGPRIdxReg) { … }
static MachineBasicBlock::iterator
loadM0FromVGPR(const SIInstrInfo *TII, MachineBasicBlock &MBB, MachineInstr &MI,
unsigned InitResultReg, unsigned PhiReg, int Offset,
bool UseGPRIdxMode, Register &SGPRIdxReg) { … }
static std::pair<unsigned, int>
computeIndirectRegAndOffset(const SIRegisterInfo &TRI,
const TargetRegisterClass *SuperRC,
unsigned VecReg,
int Offset) { … }
static void setM0ToIndexFromSGPR(const SIInstrInfo *TII,
MachineRegisterInfo &MRI, MachineInstr &MI,
int Offset) { … }
static Register getIndirectSGPRIdx(const SIInstrInfo *TII,
MachineRegisterInfo &MRI, MachineInstr &MI,
int Offset) { … }
static MachineBasicBlock *emitIndirectSrc(MachineInstr &MI,
MachineBasicBlock &MBB,
const GCNSubtarget &ST) { … }
static MachineBasicBlock *emitIndirectDst(MachineInstr &MI,
MachineBasicBlock &MBB,
const GCNSubtarget &ST) { … }
static MachineBasicBlock *lowerWaveReduce(MachineInstr &MI,
MachineBasicBlock &BB,
const GCNSubtarget &ST,
unsigned Opc) { … }
MachineBasicBlock *SITargetLowering::EmitInstrWithCustomInserter(
MachineInstr &MI, MachineBasicBlock *BB) const { … }
bool SITargetLowering::enableAggressiveFMAFusion(EVT VT) const { … }
bool SITargetLowering::enableAggressiveFMAFusion(LLT Ty) const { … }
EVT SITargetLowering::getSetCCResultType(const DataLayout &DL, LLVMContext &Ctx,
EVT VT) const { … }
MVT SITargetLowering::getScalarShiftAmountTy(const DataLayout &, EVT VT) const { … }
LLT SITargetLowering::getPreferredShiftAmountTy(LLT Ty) const { … }
bool SITargetLowering::isFMAFasterThanFMulAndFAdd(const MachineFunction &MF,
EVT VT) const { … }
bool SITargetLowering::isFMAFasterThanFMulAndFAdd(const MachineFunction &MF,
LLT Ty) const { … }
bool SITargetLowering::isFMADLegal(const MachineInstr &MI, LLT Ty) const { … }
bool SITargetLowering::isFMADLegal(const SelectionDAG &DAG,
const SDNode *N) const { … }
SDValue SITargetLowering::splitUnaryVectorOp(SDValue Op,
SelectionDAG &DAG) const { … }
SDValue SITargetLowering::splitBinaryVectorOp(SDValue Op,
SelectionDAG &DAG) const { … }
SDValue SITargetLowering::splitTernaryVectorOp(SDValue Op,
SelectionDAG &DAG) const { … }
SDValue SITargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const { … }
static SDValue adjustLoadValueTypeImpl(SDValue Result, EVT LoadVT,
const SDLoc &DL,
SelectionDAG &DAG, bool Unpacked) { … }
SDValue SITargetLowering::adjustLoadValueType(unsigned Opcode,
MemSDNode *M,
SelectionDAG &DAG,
ArrayRef<SDValue> Ops,
bool IsIntrinsic) const { … }
SDValue SITargetLowering::lowerIntrinsicLoad(MemSDNode *M, bool IsFormat,
SelectionDAG &DAG,
ArrayRef<SDValue> Ops) const { … }
static SDValue lowerICMPIntrinsic(const SITargetLowering &TLI,
SDNode *N, SelectionDAG &DAG) { … }
static SDValue lowerFCMPIntrinsic(const SITargetLowering &TLI,
SDNode *N, SelectionDAG &DAG) { … }
static SDValue lowerBALLOTIntrinsic(const SITargetLowering &TLI, SDNode *N,
SelectionDAG &DAG) { … }
static SDValue lowerLaneOp(const SITargetLowering &TLI, SDNode *N,
SelectionDAG &DAG) { … }
void SITargetLowering::ReplaceNodeResults(SDNode *N,
SmallVectorImpl<SDValue> &Results,
SelectionDAG &DAG) const { … }
static SDNode *findUser(SDValue Value, unsigned Opcode) { … }
unsigned SITargetLowering::isCFIntrinsic(const SDNode *Intr) const { … }
bool SITargetLowering::shouldEmitFixup(const GlobalValue *GV) const { … }
bool SITargetLowering::shouldEmitGOTReloc(const GlobalValue *GV) const { … }
bool SITargetLowering::shouldEmitPCReloc(const GlobalValue *GV) const { … }
bool SITargetLowering::shouldUseLDSConstAddress(const GlobalValue *GV) const { … }
SDValue SITargetLowering::LowerBRCOND(SDValue BRCOND,
SelectionDAG &DAG) const { … }
SDValue SITargetLowering::LowerRETURNADDR(SDValue Op,
SelectionDAG &DAG) const { … }
SDValue SITargetLowering::getFPExtOrFPRound(SelectionDAG &DAG,
SDValue Op,
const SDLoc &DL,
EVT VT) const { … }
SDValue SITargetLowering::lowerFP_ROUND(SDValue Op, SelectionDAG &DAG) const { … }
SDValue SITargetLowering::lowerFMINNUM_FMAXNUM(SDValue Op,
SelectionDAG &DAG) const { … }
SDValue SITargetLowering::lowerFLDEXP(SDValue Op, SelectionDAG &DAG) const { … }
static unsigned getExtOpcodeForPromotedOp(SDValue Op) { … }
SDValue SITargetLowering::promoteUniformOpToI32(SDValue Op,
DAGCombinerInfo &DCI) const { … }
SDValue SITargetLowering::lowerMUL(SDValue Op, SelectionDAG &DAG) const { … }
SDValue SITargetLowering::lowerXMULO(SDValue Op, SelectionDAG &DAG) const { … }
SDValue SITargetLowering::lowerXMUL_LOHI(SDValue Op, SelectionDAG &DAG) const { … }
SDValue SITargetLowering::lowerTRAP(SDValue Op, SelectionDAG &DAG) const { … }
SDValue SITargetLowering::lowerTrapEndpgm(
SDValue Op, SelectionDAG &DAG) const { … }
SDValue SITargetLowering::loadImplicitKernelArgument(SelectionDAG &DAG, MVT VT,
const SDLoc &DL, Align Alignment, ImplicitParameter Param) const { … }
SDValue SITargetLowering::lowerTrapHsaQueuePtr(
SDValue Op, SelectionDAG &DAG) const { … }
SDValue SITargetLowering::lowerTrapHsa(
SDValue Op, SelectionDAG &DAG) const { … }
SDValue SITargetLowering::lowerDEBUGTRAP(SDValue Op, SelectionDAG &DAG) const { … }
SDValue SITargetLowering::getSegmentAperture(unsigned AS, const SDLoc &DL,
SelectionDAG &DAG) const { … }
static bool isKnownNonNull(SDValue Val, SelectionDAG &DAG,
const AMDGPUTargetMachine &TM, unsigned AddrSpace) { … }
SDValue SITargetLowering::lowerADDRSPACECAST(SDValue Op,
SelectionDAG &DAG) const { … }
SDValue SITargetLowering::lowerINSERT_SUBVECTOR(SDValue Op,
SelectionDAG &DAG) const { … }
SDValue SITargetLowering::lowerINSERT_VECTOR_ELT(SDValue Op,
SelectionDAG &DAG) const { … }
SDValue SITargetLowering::lowerEXTRACT_VECTOR_ELT(SDValue Op,
SelectionDAG &DAG) const { … }
static bool elementPairIsContiguous(ArrayRef<int> Mask, int Elt) { … }
SDValue SITargetLowering::lowerVECTOR_SHUFFLE(SDValue Op,
SelectionDAG &DAG) const { … }
SDValue SITargetLowering::lowerSCALAR_TO_VECTOR(SDValue Op,
SelectionDAG &DAG) const { … }
SDValue SITargetLowering::lowerBUILD_VECTOR(SDValue Op,
SelectionDAG &DAG) const { … }
bool
SITargetLowering::isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const { … }
static SDValue
buildPCRelGlobalAddress(SelectionDAG &DAG, const GlobalValue *GV,
const SDLoc &DL, int64_t Offset, EVT PtrVT,
unsigned GAFlags = SIInstrInfo::MO_NONE) { … }
SDValue SITargetLowering::LowerGlobalAddress(AMDGPUMachineFunction *MFI,
SDValue Op,
SelectionDAG &DAG) const { … }
SDValue SITargetLowering::copyToM0(SelectionDAG &DAG, SDValue Chain,
const SDLoc &DL, SDValue V) const { … }
SDValue SITargetLowering::lowerImplicitZextParam(SelectionDAG &DAG,
SDValue Op,
MVT VT,
unsigned Offset) const { … }
static SDValue emitNonHSAIntrinsicError(SelectionDAG &DAG, const SDLoc &DL,
EVT VT) { … }
static SDValue emitRemovedIntrinsicError(SelectionDAG &DAG, const SDLoc &DL,
EVT VT) { … }
static SDValue getBuildDwordsVector(SelectionDAG &DAG, SDLoc DL,
ArrayRef<SDValue> Elts) { … }
static SDValue padEltsToUndef(SelectionDAG &DAG, const SDLoc &DL, EVT CastVT,
SDValue Src, int ExtraElts) { … }
static SDValue constructRetValue(SelectionDAG &DAG, MachineSDNode *Result,
ArrayRef<EVT> ResultTypes, bool IsTexFail,
bool Unpacked, bool IsD16, int DMaskPop,
int NumVDataDwords, bool IsAtomicPacked16Bit,
const SDLoc &DL) { … }
static bool parseTexFail(SDValue TexFailCtrl, SelectionDAG &DAG, SDValue *TFE,
SDValue *LWE, bool &IsTexFail) { … }
static void packImage16bitOpsToDwords(SelectionDAG &DAG, SDValue Op,
MVT PackVectorVT,
SmallVectorImpl<SDValue> &PackedAddrs,
unsigned DimIdx, unsigned EndIdx,
unsigned NumGradients) { … }
SDValue SITargetLowering::lowerImage(SDValue Op,
const AMDGPU::ImageDimIntrinsicInfo *Intr,
SelectionDAG &DAG, bool WithChain) const { … }
SDValue SITargetLowering::lowerSBuffer(EVT VT, SDLoc DL, SDValue Rsrc,
SDValue Offset, SDValue CachePolicy,
SelectionDAG &DAG) const { … }
SDValue SITargetLowering::lowerWaveID(SelectionDAG &DAG, SDValue Op) const { … }
SDValue SITargetLowering::lowerWorkitemID(SelectionDAG &DAG, SDValue Op,
unsigned Dim,
const ArgDescriptor &Arg) const { … }
SDValue SITargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
SelectionDAG &DAG) const { … }
static SDValue selectSOffset(SDValue SOffset, SelectionDAG &DAG,
const GCNSubtarget *Subtarget) { … }
SDValue SITargetLowering::lowerRawBufferAtomicIntrin(SDValue Op,
SelectionDAG &DAG,
unsigned NewOpcode) const { … }
SDValue
SITargetLowering::lowerStructBufferAtomicIntrin(SDValue Op, SelectionDAG &DAG,
unsigned NewOpcode) const { … }
SDValue SITargetLowering::LowerINTRINSIC_W_CHAIN(SDValue Op,
SelectionDAG &DAG) const { … }
SDValue SITargetLowering::getMemIntrinsicNode(unsigned Opcode, const SDLoc &DL,
SDVTList VTList,
ArrayRef<SDValue> Ops, EVT MemVT,
MachineMemOperand *MMO,
SelectionDAG &DAG) const { … }
SDValue SITargetLowering::handleD16VData(SDValue VData, SelectionDAG &DAG,
bool ImageStore) const { … }
SDValue SITargetLowering::LowerINTRINSIC_VOID(SDValue Op,
SelectionDAG &DAG) const { … }
std::pair<SDValue, SDValue> SITargetLowering::splitBufferOffsets(
SDValue Offset, SelectionDAG &DAG) const { … }
void SITargetLowering::setBufferOffsets(SDValue CombinedOffset,
SelectionDAG &DAG, SDValue *Offsets,
Align Alignment) const { … }
SDValue SITargetLowering::bufferRsrcPtrToVector(SDValue MaybePointer,
SelectionDAG &DAG) const { … }
SDValue SITargetLowering::lowerPointerAsRsrcIntrin(SDNode *Op,
SelectionDAG &DAG) const { … }
SDValue SITargetLowering::handleByteShortBufferLoads(SelectionDAG &DAG,
EVT LoadVT, SDLoc DL,
ArrayRef<SDValue> Ops,
MachineMemOperand *MMO,
bool IsTFE) const { … }
SDValue SITargetLowering::handleByteShortBufferStores(SelectionDAG &DAG,
EVT VDataType, SDLoc DL,
SDValue Ops[],
MemSDNode *M) const { … }
static SDValue getLoadExtOrTrunc(SelectionDAG &DAG,
ISD::LoadExtType ExtType, SDValue Op,
const SDLoc &SL, EVT VT) { … }
SDValue SITargetLowering::widenLoad(LoadSDNode *Ld, DAGCombinerInfo &DCI) const { … }
static bool addressMayBeAccessedAsPrivate(const MachineMemOperand *MMO,
const SIMachineFunctionInfo &Info) { … }
SDValue SITargetLowering::LowerLOAD(SDValue Op, SelectionDAG &DAG) const { … }
SDValue SITargetLowering::LowerSELECT(SDValue Op, SelectionDAG &DAG) const { … }
SDValue SITargetLowering::lowerFastUnsafeFDIV(SDValue Op,
SelectionDAG &DAG) const { … }
SDValue SITargetLowering::lowerFastUnsafeFDIV64(SDValue Op,
SelectionDAG &DAG) const { … }
static SDValue getFPBinOp(SelectionDAG &DAG, unsigned Opcode, const SDLoc &SL,
EVT VT, SDValue A, SDValue B, SDValue GlueChain,
SDNodeFlags Flags) { … }
static SDValue getFPTernOp(SelectionDAG &DAG, unsigned Opcode, const SDLoc &SL,
EVT VT, SDValue A, SDValue B, SDValue C,
SDValue GlueChain, SDNodeFlags Flags) { … }
SDValue SITargetLowering::LowerFDIV16(SDValue Op, SelectionDAG &DAG) const { … }
SDValue SITargetLowering::lowerFDIV_FAST(SDValue Op, SelectionDAG &DAG) const { … }
static SDValue getSPDenormModeValue(uint32_t SPDenormMode, SelectionDAG &DAG,
const SIMachineFunctionInfo *Info,
const GCNSubtarget *ST) { … }
SDValue SITargetLowering::LowerFDIV32(SDValue Op, SelectionDAG &DAG) const { … }
SDValue SITargetLowering::LowerFDIV64(SDValue Op, SelectionDAG &DAG) const { … }
SDValue SITargetLowering::LowerFDIV(SDValue Op, SelectionDAG &DAG) const { … }
SDValue SITargetLowering::LowerFFREXP(SDValue Op, SelectionDAG &DAG) const { … }
SDValue SITargetLowering::LowerSTORE(SDValue Op, SelectionDAG &DAG) const { … }
SDValue SITargetLowering::lowerFSQRTF16(SDValue Op, SelectionDAG &DAG) const { … }
SDValue SITargetLowering::lowerFSQRTF32(SDValue Op, SelectionDAG &DAG) const { … }
SDValue SITargetLowering::lowerFSQRTF64(SDValue Op, SelectionDAG &DAG) const { … }
SDValue SITargetLowering::LowerTrig(SDValue Op, SelectionDAG &DAG) const { … }
SDValue SITargetLowering::LowerATOMIC_CMP_SWAP(SDValue Op, SelectionDAG &DAG) const { … }
SDValue SITargetLowering::performUCharToFloatCombine(SDNode *N,
DAGCombinerInfo &DCI) const { … }
SDValue SITargetLowering::performFCopySignCombine(SDNode *N,
DAGCombinerInfo &DCI) const { … }
SDValue SITargetLowering::performSHLPtrCombine(SDNode *N,
unsigned AddrSpace,
EVT MemVT,
DAGCombinerInfo &DCI) const { … }
static unsigned getBasePtrIndex(const MemSDNode *N) { … }
SDValue SITargetLowering::performMemSDNodeCombine(MemSDNode *N,
DAGCombinerInfo &DCI) const { … }
static bool bitOpWithConstantIsReducible(unsigned Opc, uint32_t Val) { … }
SDValue SITargetLowering::splitBinaryBitConstantOp(
DAGCombinerInfo &DCI,
const SDLoc &SL,
unsigned Opc, SDValue LHS,
const ConstantSDNode *CRHS) const { … }
bool llvm::isBoolSGPR(SDValue V) { … }
static uint32_t getConstantPermuteMask(uint32_t C) { … }
static uint32_t getPermuteMask(SDValue V) { … }
SDValue SITargetLowering::performAndCombine(SDNode *N,
DAGCombinerInfo &DCI) const { … }
static const std::optional<ByteProvider<SDValue>>
calculateSrcByte(const SDValue Op, uint64_t DestByte, uint64_t SrcIndex = 0,
unsigned Depth = 0) { … }
static const std::optional<ByteProvider<SDValue>>
calculateByteProvider(const SDValue &Op, unsigned Index, unsigned Depth,
unsigned StartingIndex = 0) { … }
static bool isExtendedFrom16Bits(SDValue &Operand) { … }
static bool addresses16Bits(int Mask) { … }
static bool hasNon16BitAccesses(uint64_t PermMask, SDValue &Op,
SDValue &OtherOp) { … }
static SDValue getDWordFromOffset(SelectionDAG &DAG, SDLoc SL, SDValue Src,
unsigned DWordOffset) { … }
static SDValue matchPERM(SDNode *N, TargetLowering::DAGCombinerInfo &DCI) { … }
SDValue SITargetLowering::performOrCombine(SDNode *N,
DAGCombinerInfo &DCI) const { … }
SDValue SITargetLowering::performXorCombine(SDNode *N,
DAGCombinerInfo &DCI) const { … }
SDValue SITargetLowering::performZeroExtendCombine(SDNode *N,
DAGCombinerInfo &DCI) const { … }
SDValue
SITargetLowering::performSignExtendInRegCombine(SDNode *N,
DAGCombinerInfo &DCI) const { … }
SDValue SITargetLowering::performClassCombine(SDNode *N,
DAGCombinerInfo &DCI) const { … }
SDValue SITargetLowering::performRcpCombine(SDNode *N,
DAGCombinerInfo &DCI) const { … }
bool SITargetLowering::isCanonicalized(SelectionDAG &DAG, SDValue Op,
unsigned MaxDepth) const { … }
bool SITargetLowering::isCanonicalized(Register Reg, const MachineFunction &MF,
unsigned MaxDepth) const { … }
SDValue SITargetLowering::getCanonicalConstantFP(
SelectionDAG &DAG, const SDLoc &SL, EVT VT, const APFloat &C) const { … }
static bool vectorEltWillFoldAway(SDValue Op) { … }
SDValue SITargetLowering::performFCanonicalizeCombine(
SDNode *N,
DAGCombinerInfo &DCI) const { … }
static unsigned minMaxOpcToMin3Max3Opc(unsigned Opc) { … }
SDValue SITargetLowering::performIntMed3ImmCombine(SelectionDAG &DAG,
const SDLoc &SL, SDValue Src,
SDValue MinVal,
SDValue MaxVal,
bool Signed) const { … }
static ConstantFPSDNode *getSplatConstantFP(SDValue Op) { … }
SDValue SITargetLowering::performFPMed3ImmCombine(SelectionDAG &DAG,
const SDLoc &SL,
SDValue Op0,
SDValue Op1) const { … }
static bool supportsMin3Max3(const GCNSubtarget &Subtarget, unsigned Opc,
EVT VT) { … }
SDValue SITargetLowering::performMinMaxCombine(SDNode *N,
DAGCombinerInfo &DCI) const { … }
static bool isClampZeroToOne(SDValue A, SDValue B) { … }
SDValue SITargetLowering::performFMed3Combine(SDNode *N,
DAGCombinerInfo &DCI) const { … }
SDValue SITargetLowering::performCvtPkRTZCombine(SDNode *N,
DAGCombinerInfo &DCI) const { … }
bool SITargetLowering::shouldExpandVectorDynExt(unsigned EltSize,
unsigned NumElem,
bool IsDivergentIdx,
const GCNSubtarget *Subtarget) { … }
bool SITargetLowering::shouldExpandVectorDynExt(SDNode *N) const { … }
SDValue SITargetLowering::performExtractVectorEltCombine(
SDNode *N, DAGCombinerInfo &DCI) const { … }
SDValue
SITargetLowering::performInsertVectorEltCombine(SDNode *N,
DAGCombinerInfo &DCI) const { … }
static SDValue strictFPExtFromF16(SelectionDAG &DAG, SDValue Src) { … }
SDValue SITargetLowering::performFPRoundCombine(SDNode *N,
DAGCombinerInfo &DCI) const { … }
unsigned SITargetLowering::getFusedOpcode(const SelectionDAG &DAG,
const SDNode *N0,
const SDNode *N1) const { … }
SDValue SITargetLowering::reassociateScalarOps(SDNode *N,
SelectionDAG &DAG) const { … }
static SDValue getMad64_32(SelectionDAG &DAG, const SDLoc &SL,
EVT VT,
SDValue N0, SDValue N1, SDValue N2,
bool Signed) { … }
SDValue SITargetLowering::tryFoldToMad64_32(SDNode *N,
DAGCombinerInfo &DCI) const { … }
static std::optional<ByteProvider<SDValue>>
handleMulOperand(const SDValue &MulOperand) { … }
static unsigned addPermMasks(unsigned First, unsigned Second) { … }
struct DotSrc { … };
static void placeSources(ByteProvider<SDValue> &Src0,
ByteProvider<SDValue> &Src1,
SmallVectorImpl<DotSrc> &Src0s,
SmallVectorImpl<DotSrc> &Src1s, int Step) { … }
static SDValue resolveSources(SelectionDAG &DAG, SDLoc SL,
SmallVectorImpl<DotSrc> &Srcs, bool IsSigned,
bool IsAny) { … }
static void fixMasks(SmallVectorImpl<DotSrc> &Srcs, unsigned ChainLength) { … }
static bool isMul(const SDValue Op) { … }
static std::optional<bool>
checkDot4MulSignedness(const SDValue &N, ByteProvider<SDValue> &Src0,
ByteProvider<SDValue> &Src1, const SDValue &S0Op,
const SDValue &S1Op, const SelectionDAG &DAG) { … }
SDValue SITargetLowering::performAddCombine(SDNode *N,
DAGCombinerInfo &DCI) const { … }
SDValue SITargetLowering::performSubCombine(SDNode *N,
DAGCombinerInfo &DCI) const { … }
SDValue SITargetLowering::performAddCarrySubCarryCombine(SDNode *N,
DAGCombinerInfo &DCI) const { … }
SDValue SITargetLowering::performFAddCombine(SDNode *N,
DAGCombinerInfo &DCI) const { … }
SDValue SITargetLowering::performFSubCombine(SDNode *N,
DAGCombinerInfo &DCI) const { … }
SDValue SITargetLowering::performFDivCombine(SDNode *N,
DAGCombinerInfo &DCI) const { … }
SDValue SITargetLowering::performFMACombine(SDNode *N,
DAGCombinerInfo &DCI) const { … }
SDValue SITargetLowering::performSetCCCombine(SDNode *N,
DAGCombinerInfo &DCI) const { … }
SDValue SITargetLowering::performCvtF32UByteNCombine(SDNode *N,
DAGCombinerInfo &DCI) const { … }SDValue SITargetLowering::performClampCombine(SDNode *N,
DAGCombinerInfo &DCI) const { … }SDValue SITargetLowering::PerformDAGCombine(SDNode *N,
DAGCombinerInfo &DCI) const { … }static unsigned SubIdx2Lane(unsigned Idx) { … }SDNode *SITargetLowering::adjustWritemask(MachineSDNode *&Node,
SelectionDAG &DAG) const { … }static bool isFrameIndexOp(SDValue Op) { … }SDNode *SITargetLowering::legalizeTargetIndependentNode(SDNode *Node,
SelectionDAG &DAG) const { … }SDNode *SITargetLowering::PostISelFolding(MachineSDNode *Node,
SelectionDAG &DAG) const { … }void SITargetLowering::AddMemOpInit(MachineInstr &MI) const { … }void SITargetLowering::AdjustInstrPostInstrSelection(MachineInstr &MI,
SDNode *Node) const { … }static SDValue buildSMovImm32(SelectionDAG &DAG, const SDLoc &DL,
uint64_t Val) { … }MachineSDNode *SITargetLowering::wrapAddr64Rsrc(SelectionDAG &DAG,
const SDLoc &DL,
SDValue Ptr) const { … }MachineSDNode *SITargetLowering::buildRSRC(SelectionDAG &DAG, const SDLoc &DL,
SDValue Ptr, uint32_t RsrcDword1,
uint64_t RsrcDword2And3) const { … }std::pair<unsigned, const TargetRegisterClass *>
SITargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI_,
StringRef Constraint,
MVT VT) const { … }static bool isImmConstraint(StringRef Constraint) { … }SITargetLowering::ConstraintType
SITargetLowering::getConstraintType(StringRef Constraint) const { … }static uint64_t clearUnusedBits(uint64_t Val, unsigned Size) { … }void SITargetLowering::LowerAsmOperandForConstraint(SDValue Op,
StringRef Constraint,
std::vector<SDValue> &Ops,
SelectionDAG &DAG) const { … }bool SITargetLowering::getAsmOperandConstVal(SDValue Op, uint64_t &Val) const { … }bool SITargetLowering::checkAsmConstraintVal(SDValue Op, StringRef Constraint,
uint64_t Val) const { … }bool SITargetLowering::checkAsmConstraintValA(SDValue Op, uint64_t Val,
unsigned MaxSize) const { … }static int getAlignedAGPRClassID(unsigned UnalignedClassID) { … }void SITargetLowering::finalizeLowering(MachineFunction &MF) const { … }void SITargetLowering::computeKnownBitsForTargetNode(const SDValue Op,
KnownBits &Known,
const APInt &DemandedElts,
const SelectionDAG &DAG,
unsigned Depth) const { … }void SITargetLowering::computeKnownBitsForFrameIndex(
const int FI, KnownBits &Known, const MachineFunction &MF) const { … }static void knownBitsForWorkitemID(const GCNSubtarget &ST, GISelKnownBits &KB,
KnownBits &Known, unsigned Dim) { … }void SITargetLowering::computeKnownBitsForTargetInstr(
GISelKnownBits &KB, Register R, KnownBits &Known, const APInt &DemandedElts,
const MachineRegisterInfo &MRI, unsigned Depth) const { … }Align SITargetLowering::computeKnownAlignForTargetInstr(
GISelKnownBits &KB, Register R, const MachineRegisterInfo &MRI,
unsigned Depth) const { … }Align SITargetLowering::getPrefLoopAlignment(MachineLoop *ML) const { … }LLVM_ATTRIBUTE_UNUSED
static bool isCopyFromRegOfInlineAsm(const SDNode *N) { … }bool SITargetLowering::isSDNodeSourceOfDivergence(const SDNode *N,
FunctionLoweringInfo *FLI,
UniformityInfo *UA) const { … }bool SITargetLowering::denormalsEnabledForType(const SelectionDAG &DAG,
EVT VT) const { … }bool SITargetLowering::denormalsEnabledForType(
LLT Ty, const MachineFunction &MF) const { … }bool SITargetLowering::isKnownNeverNaNForTargetNode(SDValue Op,
const SelectionDAG &DAG,
bool SNaN,
unsigned Depth) const { … }static bool atomicIgnoresDenormalModeOrFPModeIsFTZ(const AtomicRMWInst *RMW) { … }static OptimizationRemark emitAtomicRMWLegalRemark(const AtomicRMWInst *RMW) { … }static bool isV2F16OrV2BF16(Type *Ty) { … }static bool isV2F16(Type *Ty) { … }static bool isV2BF16(Type *Ty) { … }static bool isAtomicRMWLegalIntTy(Type *Ty) { … }static bool isAtomicRMWLegalXChgTy(const AtomicRMWInst *RMW) { … }static bool globalMemoryFPAtomicIsLegal(const GCNSubtarget &Subtarget,
const AtomicRMWInst *RMW,
bool HasSystemScope) { … }static TargetLowering::AtomicExpansionKind
atomicSupportedIfLegalIntType(const AtomicRMWInst *RMW) { … }TargetLowering::AtomicExpansionKind
SITargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *RMW) const { … }TargetLowering::AtomicExpansionKind
SITargetLowering::shouldExpandAtomicLoadInIR(LoadInst *LI) const { … }TargetLowering::AtomicExpansionKind
SITargetLowering::shouldExpandAtomicStoreInIR(StoreInst *SI) const { … }TargetLowering::AtomicExpansionKind
SITargetLowering::shouldExpandAtomicCmpXchgInIR(AtomicCmpXchgInst *CmpX) const { … }const TargetRegisterClass *
SITargetLowering::getRegClassFor(MVT VT, bool isDivergent) const { … }static bool hasCFUser(const Value *V, SmallPtrSet<const Value *, 16> &Visited,
unsigned WaveSize) { … }bool SITargetLowering::requiresUniformRegister(MachineFunction &MF,
const Value *V) const { … }bool SITargetLowering::hasMemSDNodeUser(SDNode *N) const { … }bool SITargetLowering::isReassocProfitable(SelectionDAG &DAG, SDValue N0,
SDValue N1) const { … }bool SITargetLowering::isReassocProfitable(MachineRegisterInfo &MRI,
Register N0, Register N1) const { … }MachineMemOperand::Flags
SITargetLowering::getTargetMMOFlags(const Instruction &I) const { … }bool SITargetLowering::checkForPhysRegDependency(
SDNode *Def, SDNode *User, unsigned Op, const TargetRegisterInfo *TRI,
const TargetInstrInfo *TII, unsigned &PhysReg, int &Cost) const { … }void SITargetLowering::emitExpandAtomicRMW(AtomicRMWInst *AI) const { … }LoadInst *
SITargetLowering::lowerIdempotentRMWIntoFencedLoad(AtomicRMWInst *AI) const { … }