llvm/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp

//===- AMDGPUInstructionSelector.cpp ----------------------------*- C++ -*-==//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
/// \file
/// This file implements the targeting of the InstructionSelector class for
/// AMDGPU.
/// \todo This should be generated by TableGen.
//===----------------------------------------------------------------------===//

#include "AMDGPUInstructionSelector.h"
#include "AMDGPU.h"
#include "AMDGPUGlobalISelUtils.h"
#include "AMDGPUInstrInfo.h"
#include "AMDGPURegisterBankInfo.h"
#include "AMDGPUTargetMachine.h"
#include "SIMachineFunctionInfo.h"
#include "Utils/AMDGPUBaseInfo.h"
#include "llvm/CodeGen/GlobalISel/GIMatchTableExecutorImpl.h"
#include "llvm/CodeGen/GlobalISel/GISelKnownBits.h"
#include "llvm/CodeGen/GlobalISel/GenericMachineInstrs.h"
#include "llvm/CodeGen/GlobalISel/MIPatternMatch.h"
#include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/IR/DiagnosticInfo.h"
#include "llvm/IR/IntrinsicsAMDGPU.h"
#include <optional>

#define DEBUG_TYPE

usingnamespacellvm;
usingnamespaceMIPatternMatch;

#define GET_GLOBALISEL_IMPL
#define AMDGPUSubtarget
#include "AMDGPUGenGlobalISel.inc"
#undef GET_GLOBALISEL_IMPL
#undef AMDGPUSubtarget

AMDGPUInstructionSelector::AMDGPUInstructionSelector(
    const GCNSubtarget &STI, const AMDGPURegisterBankInfo &RBI,
    const AMDGPUTargetMachine &TM)
    :{}

const char *AMDGPUInstructionSelector::getName() {}

void AMDGPUInstructionSelector::setupMF(MachineFunction &MF, GISelKnownBits *KB,
                                        CodeGenCoverage *CoverageInfo,
                                        ProfileSummaryInfo *PSI,
                                        BlockFrequencyInfo *BFI) {}

// Return the wave level SGPR base address if this is a wave address.
static Register getWaveAddress(const MachineInstr *Def) {}

bool AMDGPUInstructionSelector::isVCC(Register Reg,
                                      const MachineRegisterInfo &MRI) const {}

bool AMDGPUInstructionSelector::constrainCopyLikeIntrin(MachineInstr &MI,
                                                        unsigned NewOpc) const {}

bool AMDGPUInstructionSelector::selectCOPY(MachineInstr &I) const {}

bool AMDGPUInstructionSelector::selectPHI(MachineInstr &I) const {}

MachineOperand
AMDGPUInstructionSelector::getSubOperand64(MachineOperand &MO,
                                           const TargetRegisterClass &SubRC,
                                           unsigned SubIdx) const {}

static unsigned getLogicalBitOpcode(unsigned Opc, bool Is64) {}

bool AMDGPUInstructionSelector::selectG_AND_OR_XOR(MachineInstr &I) const {}

bool AMDGPUInstructionSelector::selectG_ADD_SUB(MachineInstr &I) const {}

bool AMDGPUInstructionSelector::selectG_UADDO_USUBO_UADDE_USUBE(
  MachineInstr &I) const {}

bool AMDGPUInstructionSelector::selectG_AMDGPU_MAD_64_32(
    MachineInstr &I) const {}

// TODO: We should probably legalize these to only using 32-bit results.
bool AMDGPUInstructionSelector::selectG_EXTRACT(MachineInstr &I) const {}

bool AMDGPUInstructionSelector::selectG_MERGE_VALUES(MachineInstr &MI) const {}

bool AMDGPUInstructionSelector::selectG_UNMERGE_VALUES(MachineInstr &MI) const {}

bool AMDGPUInstructionSelector::selectG_BUILD_VECTOR(MachineInstr &MI) const {}

bool AMDGPUInstructionSelector::selectG_IMPLICIT_DEF(MachineInstr &I) const {}

bool AMDGPUInstructionSelector::selectG_INSERT(MachineInstr &I) const {}

bool AMDGPUInstructionSelector::selectG_SBFX_UBFX(MachineInstr &MI) const {}

bool AMDGPUInstructionSelector::selectInterpP1F16(MachineInstr &MI) const {}

// Writelane is special in that it can use SGPR and M0 (which would normally
// count as using the constant bus twice - but in this case it is allowed since
// the lane selector doesn't count as a use of the constant bus). However, it is
// still required to abide by the 1 SGPR rule. Fix this up if we might have
// multiple SGPRs.
bool AMDGPUInstructionSelector::selectWritelane(MachineInstr &MI) const {}

// We need to handle this here because tablegen doesn't support matching
// instructions with multiple outputs.
bool AMDGPUInstructionSelector::selectDivScale(MachineInstr &MI) const {}

bool AMDGPUInstructionSelector::selectG_INTRINSIC(MachineInstr &I) const {}

static int getV_CMPOpcode(CmpInst::Predicate P, unsigned Size,
                          const GCNSubtarget &ST) {}

int AMDGPUInstructionSelector::getS_CMPOpcode(CmpInst::Predicate P,
                                              unsigned Size) const {}

bool AMDGPUInstructionSelector::selectG_ICMP_or_FCMP(MachineInstr &I) const {}

bool AMDGPUInstructionSelector::selectIntrinsicCmp(MachineInstr &I) const {}

bool AMDGPUInstructionSelector::selectBallot(MachineInstr &I) const {}

bool AMDGPUInstructionSelector::selectRelocConstant(MachineInstr &I) const {}

bool AMDGPUInstructionSelector::selectGroupStaticSize(MachineInstr &I) const {}

bool AMDGPUInstructionSelector::selectReturnAddress(MachineInstr &I) const {}

bool AMDGPUInstructionSelector::selectEndCfIntrinsic(MachineInstr &MI) const {}

bool AMDGPUInstructionSelector::selectDSOrderedIntrinsic(
  MachineInstr &MI, Intrinsic::ID IntrID) const {}

static unsigned gwsIntrinToOpcode(unsigned IntrID) {}

bool AMDGPUInstructionSelector::selectDSGWSIntrinsic(MachineInstr &MI,
                                                     Intrinsic::ID IID) const {}

bool AMDGPUInstructionSelector::selectDSAppendConsume(MachineInstr &MI,
                                                      bool IsAppend) const {}

bool AMDGPUInstructionSelector::selectInitWholeWave(MachineInstr &MI) const {}

bool AMDGPUInstructionSelector::selectSBarrier(MachineInstr &MI) const {}

static bool parseTexFail(uint64_t TexFailCtrl, bool &TFE, bool &LWE,
                         bool &IsTexFail) {}

bool AMDGPUInstructionSelector::selectImageIntrinsic(
  MachineInstr &MI, const AMDGPU::ImageDimIntrinsicInfo *Intr) const {}

// We need to handle this here because tablegen doesn't support matching
// instructions with multiple outputs.
bool AMDGPUInstructionSelector::selectDSBvhStackIntrinsic(
    MachineInstr &MI) const {}

bool AMDGPUInstructionSelector::selectG_INTRINSIC_W_SIDE_EFFECTS(
    MachineInstr &I) const {}

bool AMDGPUInstructionSelector::selectG_SELECT(MachineInstr &I) const {}

bool AMDGPUInstructionSelector::selectG_TRUNC(MachineInstr &I) const {}

/// \returns true if a bitmask for \p Size bits will be an inline immediate.
static bool shouldUseAndMask(unsigned Size, unsigned &Mask) {}

// Like RegisterBankInfo::getRegBank, but don't assume vcc for s1.
const RegisterBank *AMDGPUInstructionSelector::getArtifactRegBank(
  Register Reg, const MachineRegisterInfo &MRI,
  const TargetRegisterInfo &TRI) const {}

bool AMDGPUInstructionSelector::selectG_SZA_EXT(MachineInstr &I) const {}

static Register stripCopy(Register Reg, MachineRegisterInfo &MRI) {}

static Register stripBitCast(Register Reg, MachineRegisterInfo &MRI) {}

static bool isExtractHiElt(MachineRegisterInfo &MRI, Register In,
                           Register &Out) {}

bool AMDGPUInstructionSelector::selectG_FPEXT(MachineInstr &I) const {}

bool AMDGPUInstructionSelector::selectG_FNEG(MachineInstr &MI) const {}

// FIXME: This is a workaround for the same tablegen problems as G_FNEG
bool AMDGPUInstructionSelector::selectG_FABS(MachineInstr &MI) const {}

static bool isConstant(const MachineInstr &MI) {}

void AMDGPUInstructionSelector::getAddrModeInfo(const MachineInstr &Load,
    const MachineRegisterInfo &MRI, SmallVectorImpl<GEPInfo> &AddrInfo) const {}

bool AMDGPUInstructionSelector::isSGPR(Register Reg) const {}

bool AMDGPUInstructionSelector::isInstrUniform(const MachineInstr &MI) const {}

bool AMDGPUInstructionSelector::hasVgprParts(ArrayRef<GEPInfo> AddrInfo) const {}

void AMDGPUInstructionSelector::initM0(MachineInstr &I) const {}

bool AMDGPUInstructionSelector::selectG_LOAD_STORE_ATOMICRMW(
  MachineInstr &I) const {}

static bool isVCmpResult(Register Reg, MachineRegisterInfo &MRI) {}

bool AMDGPUInstructionSelector::selectG_BRCOND(MachineInstr &I) const {}

bool AMDGPUInstructionSelector::selectG_GLOBAL_VALUE(
  MachineInstr &I) const {}

bool AMDGPUInstructionSelector::selectG_PTRMASK(MachineInstr &I) const {}

/// Return the register to use for the index value, and the subregister to use
/// for the indirectly accessed register.
static std::pair<Register, unsigned>
computeIndirectRegIndex(MachineRegisterInfo &MRI, const SIRegisterInfo &TRI,
                        const TargetRegisterClass *SuperRC, Register IdxReg,
                        unsigned EltSize, GISelKnownBits &KnownBits) {}

bool AMDGPUInstructionSelector::selectG_EXTRACT_VECTOR_ELT(
  MachineInstr &MI) const {}

// TODO: Fold insert_vector_elt (extract_vector_elt) into movrelsd
bool AMDGPUInstructionSelector::selectG_INSERT_VECTOR_ELT(
  MachineInstr &MI) const {}

bool AMDGPUInstructionSelector::selectBufferLoadLds(MachineInstr &MI) const {}

/// Match a zero extend from a 32-bit value to 64-bits.
static Register matchZeroExtendFromS32(MachineRegisterInfo &MRI, Register Reg) {}

bool AMDGPUInstructionSelector::selectGlobalLoadLds(MachineInstr &MI) const{}

bool AMDGPUInstructionSelector::selectBVHIntrinsic(MachineInstr &MI) const{}

bool AMDGPUInstructionSelector::selectSMFMACIntrin(MachineInstr &MI) const {}

bool AMDGPUInstructionSelector::selectWaveAddress(MachineInstr &MI) const {}

bool AMDGPUInstructionSelector::selectStackRestore(MachineInstr &MI) const {}

bool AMDGPUInstructionSelector::select(MachineInstr &I) {}

InstructionSelector::ComplexRendererFns
AMDGPUInstructionSelector::selectVCSRC(MachineOperand &Root) const {}

std::pair<Register, unsigned> AMDGPUInstructionSelector::selectVOP3ModsImpl(
    Register Src, bool IsCanonicalizing, bool AllowAbs, bool OpSel) const {}

Register AMDGPUInstructionSelector::copyToVGPRIfSrcFolded(
    Register Src, unsigned Mods, MachineOperand Root, MachineInstr *InsertPt,
    bool ForceVGPR) const {}

///
/// This will select either an SGPR or VGPR operand and will save us from
/// having to write an extra tablegen pattern.
InstructionSelector::ComplexRendererFns
AMDGPUInstructionSelector::selectVSRC0(MachineOperand &Root) const {}

InstructionSelector::ComplexRendererFns
AMDGPUInstructionSelector::selectVOP3Mods0(MachineOperand &Root) const {}

InstructionSelector::ComplexRendererFns
AMDGPUInstructionSelector::selectVOP3BMods0(MachineOperand &Root) const {}

InstructionSelector::ComplexRendererFns
AMDGPUInstructionSelector::selectVOP3OMods(MachineOperand &Root) const {}

InstructionSelector::ComplexRendererFns
AMDGPUInstructionSelector::selectVOP3Mods(MachineOperand &Root) const {}

InstructionSelector::ComplexRendererFns
AMDGPUInstructionSelector::selectVOP3ModsNonCanonicalizing(
    MachineOperand &Root) const {}

InstructionSelector::ComplexRendererFns
AMDGPUInstructionSelector::selectVOP3BMods(MachineOperand &Root) const {}

InstructionSelector::ComplexRendererFns
AMDGPUInstructionSelector::selectVOP3NoMods(MachineOperand &Root) const {}

std::pair<Register, unsigned>
AMDGPUInstructionSelector::selectVOP3PModsImpl(
  Register Src, const MachineRegisterInfo &MRI, bool IsDOT) const {}

InstructionSelector::ComplexRendererFns
AMDGPUInstructionSelector::selectVOP3PMods(MachineOperand &Root) const {}

InstructionSelector::ComplexRendererFns
AMDGPUInstructionSelector::selectVOP3PModsDOT(MachineOperand &Root) const {}

InstructionSelector::ComplexRendererFns
AMDGPUInstructionSelector::selectVOP3PModsNeg(MachineOperand &Root) const {}

InstructionSelector::ComplexRendererFns
AMDGPUInstructionSelector::selectWMMAOpSelVOP3PMods(
    MachineOperand &Root) const {}

static Register buildRegSequence(SmallVectorImpl<Register> &Elts,
                                 MachineInstr *InsertPt,
                                 MachineRegisterInfo &MRI) {}

static void selectWMMAModsNegAbs(unsigned ModOpcode, unsigned &Mods,
                                 SmallVectorImpl<Register> &Elts, Register &Src,
                                 MachineInstr *InsertPt,
                                 MachineRegisterInfo &MRI) {}

InstructionSelector::ComplexRendererFns
AMDGPUInstructionSelector::selectWMMAModsF32NegAbs(MachineOperand &Root) const {}

InstructionSelector::ComplexRendererFns
AMDGPUInstructionSelector::selectWMMAModsF16Neg(MachineOperand &Root) const {}

InstructionSelector::ComplexRendererFns
AMDGPUInstructionSelector::selectWMMAModsF16NegAbs(MachineOperand &Root) const {}

InstructionSelector::ComplexRendererFns
AMDGPUInstructionSelector::selectWMMAVISrc(MachineOperand &Root) const {}

InstructionSelector::ComplexRendererFns
AMDGPUInstructionSelector::selectSWMMACIndex8(MachineOperand &Root) const {}

InstructionSelector::ComplexRendererFns
AMDGPUInstructionSelector::selectSWMMACIndex16(MachineOperand &Root) const {}

InstructionSelector::ComplexRendererFns
AMDGPUInstructionSelector::selectVOP3OpSelMods(MachineOperand &Root) const {}

InstructionSelector::ComplexRendererFns
AMDGPUInstructionSelector::selectVINTERPMods(MachineOperand &Root) const {}

InstructionSelector::ComplexRendererFns
AMDGPUInstructionSelector::selectVINTERPModsHi(MachineOperand &Root) const {}

bool AMDGPUInstructionSelector::selectSmrdOffset(MachineOperand &Root,
                                                 Register &Base,
                                                 Register *SOffset,
                                                 int64_t *Offset) const {}

InstructionSelector::ComplexRendererFns
AMDGPUInstructionSelector::selectSmrdImm(MachineOperand &Root) const {}

InstructionSelector::ComplexRendererFns
AMDGPUInstructionSelector::selectSmrdImm32(MachineOperand &Root) const {}

InstructionSelector::ComplexRendererFns
AMDGPUInstructionSelector::selectSmrdSgpr(MachineOperand &Root) const {}

InstructionSelector::ComplexRendererFns
AMDGPUInstructionSelector::selectSmrdSgprImm(MachineOperand &Root) const {}

std::pair<Register, int>
AMDGPUInstructionSelector::selectFlatOffsetImpl(MachineOperand &Root,
                                                uint64_t FlatVariant) const {}

InstructionSelector::ComplexRendererFns
AMDGPUInstructionSelector::selectFlatOffset(MachineOperand &Root) const {}

InstructionSelector::ComplexRendererFns
AMDGPUInstructionSelector::selectGlobalOffset(MachineOperand &Root) const {}

InstructionSelector::ComplexRendererFns
AMDGPUInstructionSelector::selectScratchOffset(MachineOperand &Root) const {}

// Match (64-bit SGPR base) + (zext vgpr offset) + sext(imm offset)
InstructionSelector::ComplexRendererFns
AMDGPUInstructionSelector::selectGlobalSAddr(MachineOperand &Root) const {}

InstructionSelector::ComplexRendererFns
AMDGPUInstructionSelector::selectScratchSAddr(MachineOperand &Root) const {}

// Check whether the flat scratch SVS swizzle bug affects this access.
bool AMDGPUInstructionSelector::checkFlatScratchSVSSwizzleBug(
    Register VAddr, Register SAddr, uint64_t ImmOffset) const {}

InstructionSelector::ComplexRendererFns
AMDGPUInstructionSelector::selectScratchSVAddr(MachineOperand &Root) const {}

InstructionSelector::ComplexRendererFns
AMDGPUInstructionSelector::selectMUBUFScratchOffen(MachineOperand &Root) const {}

bool AMDGPUInstructionSelector::isDSOffsetLegal(Register Base,
                                                int64_t Offset) const {}

bool AMDGPUInstructionSelector::isDSOffset2Legal(Register Base, int64_t Offset0,
                                                 int64_t Offset1,
                                                 unsigned Size) const {}

// Return whether the operation has NoUnsignedWrap property.
static bool isNoUnsignedWrap(MachineInstr *Addr) {}

// Check that the base address of flat scratch load/store in the form of `base +
// offset` is legal to be put in SGPR/VGPR (i.e. unsigned per hardware
// requirement). We always treat the first operand as the base address here.
bool AMDGPUInstructionSelector::isFlatScratchBaseLegal(Register Addr) const {}

// Check address value in SGPR/VGPR are legal for flat scratch in the form
// of: SGPR + VGPR.
bool AMDGPUInstructionSelector::isFlatScratchBaseLegalSV(Register Addr) const {}

// Check address value in SGPR/VGPR are legal for flat scratch in the form
// of: SGPR + VGPR + Imm.
bool AMDGPUInstructionSelector::isFlatScratchBaseLegalSVImm(
    Register Addr) const {}

bool AMDGPUInstructionSelector::isUnneededShiftMask(const MachineInstr &MI,
                                                    unsigned ShAmtBits) const {}

InstructionSelector::ComplexRendererFns
AMDGPUInstructionSelector::selectMUBUFScratchOffset(
    MachineOperand &Root) const {}

std::pair<Register, unsigned>
AMDGPUInstructionSelector::selectDS1Addr1OffsetImpl(MachineOperand &Root) const {}

InstructionSelector::ComplexRendererFns
AMDGPUInstructionSelector::selectDS1Addr1Offset(MachineOperand &Root) const {}

InstructionSelector::ComplexRendererFns
AMDGPUInstructionSelector::selectDS64Bit4ByteAligned(MachineOperand &Root) const {}

InstructionSelector::ComplexRendererFns
AMDGPUInstructionSelector::selectDS128Bit8ByteAligned(MachineOperand &Root) const {}

InstructionSelector::ComplexRendererFns
AMDGPUInstructionSelector::selectDSReadWrite2(MachineOperand &Root,
                                              unsigned Size) const {}

std::pair<Register, unsigned>
AMDGPUInstructionSelector::selectDSReadWrite2Impl(MachineOperand &Root,
                                                  unsigned Size) const {}

/// If \p Root is a G_PTR_ADD with a G_CONSTANT on the right hand side, return
/// the base value with the constant offset. There may be intervening copies
/// between \p Root and the identified constant. Returns \p Root, 0 if this does
/// not match the pattern.
std::pair<Register, int64_t>
AMDGPUInstructionSelector::getPtrBaseWithConstantOffset(
  Register Root, const MachineRegisterInfo &MRI) const {}

static void addZeroImm(MachineInstrBuilder &MIB) {}

/// Return a resource descriptor for use with an arbitrary 64-bit pointer. If \p
/// BasePtr is not valid, a null base pointer will be used.
static Register buildRSRC(MachineIRBuilder &B, MachineRegisterInfo &MRI,
                          uint32_t FormatLo, uint32_t FormatHi,
                          Register BasePtr) {}

static Register buildAddr64RSrc(MachineIRBuilder &B, MachineRegisterInfo &MRI,
                                const SIInstrInfo &TII, Register BasePtr) {}

static Register buildOffsetSrc(MachineIRBuilder &B, MachineRegisterInfo &MRI,
                               const SIInstrInfo &TII, Register BasePtr) {}

AMDGPUInstructionSelector::MUBUFAddressData
AMDGPUInstructionSelector::parseMUBUFAddress(Register Src) const {}

/// Return if the addr64 mubuf mode should be used for the given address.
bool AMDGPUInstructionSelector::shouldUseAddr64(MUBUFAddressData Addr) const {}

/// Split an immediate offset \p ImmOffset depending on whether it fits in the
/// immediate field. Modifies \p ImmOffset and sets \p SOffset to the variable
/// component.
void AMDGPUInstructionSelector::splitIllegalMUBUFOffset(
  MachineIRBuilder &B, Register &SOffset, int64_t &ImmOffset) const {}

bool AMDGPUInstructionSelector::selectMUBUFAddr64Impl(
  MachineOperand &Root, Register &VAddr, Register &RSrcReg,
  Register &SOffset, int64_t &Offset) const {}

bool AMDGPUInstructionSelector::selectMUBUFOffsetImpl(
  MachineOperand &Root, Register &RSrcReg, Register &SOffset,
  int64_t &Offset) const {}

InstructionSelector::ComplexRendererFns
AMDGPUInstructionSelector::selectMUBUFAddr64(MachineOperand &Root) const {}

InstructionSelector::ComplexRendererFns
AMDGPUInstructionSelector::selectMUBUFOffset(MachineOperand &Root) const {}

InstructionSelector::ComplexRendererFns
AMDGPUInstructionSelector::selectBUFSOffset(MachineOperand &Root) const {}

/// Get an immediate that must be 32-bits, and treated as zero extended.
static std::optional<uint64_t>
getConstantZext32Val(Register Reg, const MachineRegisterInfo &MRI) {}

InstructionSelector::ComplexRendererFns
AMDGPUInstructionSelector::selectSMRDBufferImm(MachineOperand &Root) const {}

InstructionSelector::ComplexRendererFns
AMDGPUInstructionSelector::selectSMRDBufferImm32(MachineOperand &Root) const {}

InstructionSelector::ComplexRendererFns
AMDGPUInstructionSelector::selectSMRDBufferSgprImm(MachineOperand &Root) const {}

std::pair<Register, unsigned>
AMDGPUInstructionSelector::selectVOP3PMadMixModsImpl(MachineOperand &Root,
                                                     bool &Matched) const {}

InstructionSelector::ComplexRendererFns
AMDGPUInstructionSelector::selectVOP3PMadMixModsExt(
    MachineOperand &Root) const {}

InstructionSelector::ComplexRendererFns
AMDGPUInstructionSelector::selectVOP3PMadMixMods(MachineOperand &Root) const {}

bool AMDGPUInstructionSelector::selectSBarrierSignalIsfirst(
    MachineInstr &I, Intrinsic::ID IntrID) const {}

unsigned getNamedBarrierOp(bool HasInlineConst, Intrinsic::ID IntrID) {}

bool AMDGPUInstructionSelector::selectNamedBarrierInst(
    MachineInstr &I, Intrinsic::ID IntrID) const {}

bool AMDGPUInstructionSelector::selectSBarrierLeave(MachineInstr &I) const {}

void AMDGPUInstructionSelector::renderTruncImm32(MachineInstrBuilder &MIB,
                                                 const MachineInstr &MI,
                                                 int OpIdx) const {}

void AMDGPUInstructionSelector::renderNegateImm(MachineInstrBuilder &MIB,
                                                const MachineInstr &MI,
                                                int OpIdx) const {}

void AMDGPUInstructionSelector::renderBitcastFPImm(MachineInstrBuilder &MIB,
                                                   const MachineInstr &MI,
                                                   int OpIdx) const {}

void AMDGPUInstructionSelector::renderPopcntImm(MachineInstrBuilder &MIB,
                                                const MachineInstr &MI,
                                                int OpIdx) const {}

/// This only really exists to satisfy DAG type checking machinery, so is a
/// no-op here.
void AMDGPUInstructionSelector::renderTruncTImm(MachineInstrBuilder &MIB,
                                                const MachineInstr &MI,
                                                int OpIdx) const {}

void AMDGPUInstructionSelector::renderOpSelTImm(MachineInstrBuilder &MIB,
                                                const MachineInstr &MI,
                                                int OpIdx) const {}

void AMDGPUInstructionSelector::renderExtractCPol(MachineInstrBuilder &MIB,
                                                  const MachineInstr &MI,
                                                  int OpIdx) const {}

void AMDGPUInstructionSelector::renderExtractSWZ(MachineInstrBuilder &MIB,
                                                 const MachineInstr &MI,
                                                 int OpIdx) const {}

void AMDGPUInstructionSelector::renderExtractCpolSetGLC(
    MachineInstrBuilder &MIB, const MachineInstr &MI, int OpIdx) const {}

void AMDGPUInstructionSelector::renderFrameIndex(MachineInstrBuilder &MIB,
                                                 const MachineInstr &MI,
                                                 int OpIdx) const {}

void AMDGPUInstructionSelector::renderFPPow2ToExponent(MachineInstrBuilder &MIB,
                                                       const MachineInstr &MI,
                                                       int OpIdx) const {}

void AMDGPUInstructionSelector::renderRoundMode(MachineInstrBuilder &MIB,
                                                const MachineInstr &MI,
                                                int OpIdx) const {}

bool AMDGPUInstructionSelector::isInlineImmediate(const APInt &Imm) const {}

bool AMDGPUInstructionSelector::isInlineImmediate(const APFloat &Imm) const {}