llvm/llvm/lib/Target/RISCV/RISCVISelLowering.cpp

//===-- RISCVISelLowering.cpp - RISC-V DAG Lowering Implementation  -------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines the interfaces that RISC-V uses to lower LLVM code into a
// selection DAG.
//
//===----------------------------------------------------------------------===//

#include "RISCVISelLowering.h"
#include "MCTargetDesc/RISCVMatInt.h"
#include "RISCV.h"
#include "RISCVConstantPoolValue.h"
#include "RISCVMachineFunctionInfo.h"
#include "RISCVRegisterInfo.h"
#include "RISCVSubtarget.h"
#include "RISCVTargetMachine.h"
#include "llvm/ADT/SmallSet.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/Analysis/MemoryLocation.h"
#include "llvm/Analysis/VectorUtils.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/CodeGen/MachineJumpTableInfo.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/SelectionDAGAddressAnalysis.h"
#include "llvm/CodeGen/TargetLoweringObjectFileImpl.h"
#include "llvm/CodeGen/ValueTypes.h"
#include "llvm/IR/DiagnosticInfo.h"
#include "llvm/IR/DiagnosticPrinter.h"
#include "llvm/IR/IRBuilder.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/IntrinsicsRISCV.h"
#include "llvm/IR/PatternMatch.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/InstructionCost.h"
#include "llvm/Support/KnownBits.h"
#include "llvm/Support/MathExtras.h"
#include "llvm/Support/raw_ostream.h"
#include <optional>

usingnamespacellvm;

#define DEBUG_TYPE

STATISTIC(NumTailCalls, "Number of tail calls");

static cl::opt<unsigned> ExtensionMaxWebSize(
    DEBUG_TYPE "-ext-max-web-size", cl::Hidden,
    cl::desc("Give the maximum size (in number of nodes) of the web of "
             "instructions that we will consider for VW expansion"),
    cl::init(18));

static cl::opt<bool>
    AllowSplatInVW_W(DEBUG_TYPE "-form-vw-w-with-splat", cl::Hidden,
                     cl::desc("Allow the formation of VW_W operations (e.g., "
                              "VWADD_W) with splat constants"),
                     cl::init(false));

static cl::opt<unsigned> NumRepeatedDivisors(
    DEBUG_TYPE "-fp-repeated-divisors", cl::Hidden,
    cl::desc("Set the minimum number of repetitions of a divisor to allow "
             "transformation to multiplications by the reciprocal"),
    cl::init(2));

static cl::opt<int>
    FPImmCost(DEBUG_TYPE "-fpimm-cost", cl::Hidden,
              cl::desc("Give the maximum number of instructions that we will "
                       "use for creating a floating-point immediate value"),
              cl::init(2));

RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM,
                                         const RISCVSubtarget &STI)
    :{}

EVT RISCVTargetLowering::getSetCCResultType(const DataLayout &DL,
                                            LLVMContext &Context,
                                            EVT VT) const {}

MVT RISCVTargetLowering::getVPExplicitVectorLengthTy() const {}

// Return false if we can lower get_vector_length to a vsetvli intrinsic.
bool RISCVTargetLowering::shouldExpandGetVectorLength(EVT TripCountVT,
                                                      unsigned VF,
                                                      bool IsScalable) const {}

bool RISCVTargetLowering::shouldExpandCttzElements(EVT VT) const {}

bool RISCVTargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
                                             const CallInst &I,
                                             MachineFunction &MF,
                                             unsigned Intrinsic) const {}

bool RISCVTargetLowering::isLegalAddressingMode(const DataLayout &DL,
                                                const AddrMode &AM, Type *Ty,
                                                unsigned AS,
                                                Instruction *I) const {}

bool RISCVTargetLowering::isLegalICmpImmediate(int64_t Imm) const {}

bool RISCVTargetLowering::isLegalAddImmediate(int64_t Imm) const {}

// On RV32, 64-bit integers are split into their high and low parts and held
// in two different registers, so the trunc is free since the low register can
// just be used.
// FIXME: Should we consider i64->i32 free on RV64 to match the EVT version of
// isTruncateFree?
bool RISCVTargetLowering::isTruncateFree(Type *SrcTy, Type *DstTy) const {}

bool RISCVTargetLowering::isTruncateFree(EVT SrcVT, EVT DstVT) const {}

bool RISCVTargetLowering::isTruncateFree(SDValue Val, EVT VT2) const {}

bool RISCVTargetLowering::isZExtFree(SDValue Val, EVT VT2) const {}

bool RISCVTargetLowering::isSExtCheaperThanZExt(EVT SrcVT, EVT DstVT) const {}

bool RISCVTargetLowering::signExtendConstant(const ConstantInt *CI) const {}

bool RISCVTargetLowering::isCheapToSpeculateCttz(Type *Ty) const {}

bool RISCVTargetLowering::isCheapToSpeculateCtlz(Type *Ty) const {}

bool RISCVTargetLowering::isMaskAndCmp0FoldingBeneficial(
    const Instruction &AndI) const {}

bool RISCVTargetLowering::hasAndNotCompare(SDValue Y) const {}

bool RISCVTargetLowering::hasBitTest(SDValue X, SDValue Y) const {}

bool RISCVTargetLowering::shouldFoldSelectWithIdentityConstant(unsigned Opcode,
                                                               EVT VT) const {}

bool RISCVTargetLowering::shouldConvertConstantLoadToIntImm(const APInt &Imm,
                                                            Type *Ty) const {}

bool RISCVTargetLowering::
    shouldProduceAndByConstByHoistingConstFromShiftsLHSOfAnd(
        SDValue X, ConstantSDNode *XC, ConstantSDNode *CC, SDValue Y,
        unsigned OldShiftOpcode, unsigned NewShiftOpcode,
        SelectionDAG &DAG) const {}

bool RISCVTargetLowering::canSplatOperand(unsigned Opcode, int Operand) const {}


bool RISCVTargetLowering::canSplatOperand(Instruction *I, int Operand) const {}

/// Check if sinking \p I's operands to I's basic block is profitable, because
/// the operands can be folded into a target instruction, e.g.
/// splats of scalars can fold into vector instructions.
bool RISCVTargetLowering::shouldSinkOperands(
    Instruction *I, SmallVectorImpl<Use *> &Ops) const {}

bool RISCVTargetLowering::shouldScalarizeBinop(SDValue VecOp) const {}

bool RISCVTargetLowering::isOffsetFoldingLegal(
    const GlobalAddressSDNode *GA) const {}

// Returns 0-31 if the fli instruction is available for the type and this is
// legal FP immediate for the type. Returns -1 otherwise.
int RISCVTargetLowering::getLegalZfaFPImm(const APFloat &Imm, EVT VT) const {}

bool RISCVTargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT,
                                       bool ForCodeSize) const {}

// TODO: This is very conservative.
bool RISCVTargetLowering::isExtractSubvectorCheap(EVT ResVT, EVT SrcVT,
                                                  unsigned Index) const {}

MVT RISCVTargetLowering::getRegisterTypeForCallingConv(LLVMContext &Context,
                                                      CallingConv::ID CC,
                                                      EVT VT) const {}

unsigned RISCVTargetLowering::getNumRegistersForCallingConv(LLVMContext &Context,
                                                           CallingConv::ID CC,
                                                           EVT VT) const {}

unsigned RISCVTargetLowering::getVectorTypeBreakdownForCallingConv(
    LLVMContext &Context, CallingConv::ID CC, EVT VT, EVT &IntermediateVT,
    unsigned &NumIntermediates, MVT &RegisterVT) const {}

// Changes the condition code and swaps operands if necessary, so the SetCC
// operation matches one of the comparisons supported directly by branches
// in the RISC-V ISA. May adjust compares to favor compare with 0 over compare
// with 1/-1.
static void translateSetCCForBranch(const SDLoc &DL, SDValue &LHS, SDValue &RHS,
                                    ISD::CondCode &CC, SelectionDAG &DAG) {}

RISCVII::VLMUL RISCVTargetLowering::getLMUL(MVT VT) {}

unsigned RISCVTargetLowering::getRegClassIDForLMUL(RISCVII::VLMUL LMul) {}

unsigned RISCVTargetLowering::getSubregIndexByMVT(MVT VT, unsigned Index) {}

unsigned RISCVTargetLowering::getRegClassIDForVecVT(MVT VT) {}

// Attempt to decompose a subvector insert/extract between VecVT and
// SubVecVT via subregister indices. Returns the subregister index that
// can perform the subvector insert/extract with the given element index, as
// well as the index corresponding to any leftover subvectors that must be
// further inserted/extracted within the register class for SubVecVT.
std::pair<unsigned, unsigned>
RISCVTargetLowering::decomposeSubvectorInsertExtractToSubRegs(
    MVT VecVT, MVT SubVecVT, unsigned InsertExtractIdx,
    const RISCVRegisterInfo *TRI) {}

// Permit combining of mask vectors as BUILD_VECTOR never expands to scalar
// stores for those types.
bool RISCVTargetLowering::mergeStoresAfterLegalization(EVT VT) const {}

bool RISCVTargetLowering::isLegalElementTypeForRVV(EVT ScalarTy) const {}


unsigned RISCVTargetLowering::combineRepeatedFPDivisors() const {}

static SDValue getVLOperand(SDValue Op) {}

static bool useRVVForFixedLengthVectorVT(MVT VT,
                                         const RISCVSubtarget &Subtarget) {}

bool RISCVTargetLowering::useRVVForFixedLengthVectorVT(MVT VT) const {}

// Return the largest legal scalable vector type that matches VT's element type.
static MVT getContainerForFixedLengthVector(const TargetLowering &TLI, MVT VT,
                                            const RISCVSubtarget &Subtarget) {}

static MVT getContainerForFixedLengthVector(SelectionDAG &DAG, MVT VT,
                                            const RISCVSubtarget &Subtarget) {}

MVT RISCVTargetLowering::getContainerForFixedLengthVector(MVT VT) const {}

// Grow V to consume an entire RVV register.
static SDValue convertToScalableVector(EVT VT, SDValue V, SelectionDAG &DAG,
                                       const RISCVSubtarget &Subtarget) {}

// Shrink V so it's just big enough to maintain a VT's worth of data.
static SDValue convertFromScalableVector(EVT VT, SDValue V, SelectionDAG &DAG,
                                         const RISCVSubtarget &Subtarget) {}

/// Return the type of the mask type suitable for masking the provided
/// vector type.  This is simply an i1 element type vector of the same
/// (possibly scalable) length.
static MVT getMaskTypeFor(MVT VecVT) {}

/// Creates an all ones mask suitable for masking a vector of type VecTy with
/// vector length VL.  .
static SDValue getAllOnesMask(MVT VecVT, SDValue VL, const SDLoc &DL,
                              SelectionDAG &DAG) {}

static std::pair<SDValue, SDValue>
getDefaultScalableVLOps(MVT VecVT, const SDLoc &DL, SelectionDAG &DAG,
                        const RISCVSubtarget &Subtarget) {}

static std::pair<SDValue, SDValue>
getDefaultVLOps(uint64_t NumElts, MVT ContainerVT, const SDLoc &DL,
                SelectionDAG &DAG, const RISCVSubtarget &Subtarget) {}

// Gets the two common "VL" operands: an all-ones mask and the vector length.
// VecVT is a vector type, either fixed-length or scalable, and ContainerVT is
// the vector type that the fixed-length vector is contained in. Otherwise if
// VecVT is scalable, then ContainerVT should be the same as VecVT.
static std::pair<SDValue, SDValue>
getDefaultVLOps(MVT VecVT, MVT ContainerVT, const SDLoc &DL, SelectionDAG &DAG,
                const RISCVSubtarget &Subtarget) {}

SDValue RISCVTargetLowering::computeVLMax(MVT VecVT, const SDLoc &DL,
                                          SelectionDAG &DAG) const {}

std::pair<unsigned, unsigned>
RISCVTargetLowering::computeVLMAXBounds(MVT VecVT,
                                        const RISCVSubtarget &Subtarget) {}

// The state of RVV BUILD_VECTOR and VECTOR_SHUFFLE lowering is that very few
// of either is (currently) supported. This can get us into an infinite loop
// where we try to lower a BUILD_VECTOR as a VECTOR_SHUFFLE as a BUILD_VECTOR
// as a ..., etc.
// Until either (or both) of these can reliably lower any node, reporting that
// we don't want to expand BUILD_VECTORs via VECTOR_SHUFFLEs at least breaks
// the infinite loop. Note that this lowers BUILD_VECTOR through the stack,
// which is not desirable.
bool RISCVTargetLowering::shouldExpandBuildVectorWithShuffles(
    EVT VT, unsigned DefinedValues) const {}

InstructionCost RISCVTargetLowering::getLMULCost(MVT VT) const {}


/// Return the cost of a vrgather.vv instruction for the type VT.  vrgather.vv
/// is generally quadratic in the number of vreg implied by LMUL.  Note that
/// operand (index and possibly mask) are handled separately.
InstructionCost RISCVTargetLowering::getVRGatherVVCost(MVT VT) const {}

/// Return the cost of a vrgather.vi (or vx) instruction for the type VT.
/// vrgather.vi/vx may be linear in the number of vregs implied by LMUL,
/// or may track the vrgather.vv cost. It is implementation-dependent.
InstructionCost RISCVTargetLowering::getVRGatherVICost(MVT VT) const {}

/// Return the cost of a vslidedown.vx or vslideup.vx instruction
/// for the type VT.  (This does not cover the vslide1up or vslide1down
/// variants.)  Slides may be linear in the number of vregs implied by LMUL,
/// or may track the vrgather.vv cost. It is implementation-dependent.
InstructionCost RISCVTargetLowering::getVSlideVXCost(MVT VT) const {}

/// Return the cost of a vslidedown.vi or vslideup.vi instruction
/// for the type VT.  (This does not cover the vslide1up or vslide1down
/// variants.)  Slides may be linear in the number of vregs implied by LMUL,
/// or may track the vrgather.vv cost. It is implementation-dependent.
InstructionCost RISCVTargetLowering::getVSlideVICost(MVT VT) const {}

static SDValue lowerINT_TO_FP(SDValue Op, SelectionDAG &DAG,
                              const RISCVSubtarget &Subtarget) {}

static SDValue lowerFP_TO_INT_SAT(SDValue Op, SelectionDAG &DAG,
                                  const RISCVSubtarget &Subtarget) {}

static SDValue lowerFP_TO_INT(SDValue Op, SelectionDAG &DAG,
                              const RISCVSubtarget &Subtarget) {}

static RISCVFPRndMode::RoundingMode matchRoundingOp(unsigned Opc) {}

// Expand vector FTRUNC, FCEIL, FFLOOR, FROUND, VP_FCEIL, VP_FFLOOR, VP_FROUND
// VP_FROUNDEVEN, VP_FROUNDTOZERO, VP_FRINT and VP_FNEARBYINT by converting to
// the integer domain and back. Taking care to avoid converting values that are
// nan or already correct.
static SDValue
lowerVectorFTRUNC_FCEIL_FFLOOR_FROUND(SDValue Op, SelectionDAG &DAG,
                                      const RISCVSubtarget &Subtarget) {}

// Expand vector STRICT_FTRUNC, STRICT_FCEIL, STRICT_FFLOOR, STRICT_FROUND
// STRICT_FROUNDEVEN and STRICT_FNEARBYINT by converting sNan of the source to
// qNan and coverting the new source to integer and back to FP.
static SDValue
lowerVectorStrictFTRUNC_FCEIL_FFLOOR_FROUND(SDValue Op, SelectionDAG &DAG,
                                            const RISCVSubtarget &Subtarget) {}

static SDValue
lowerFTRUNC_FCEIL_FFLOOR_FROUND(SDValue Op, SelectionDAG &DAG,
                                const RISCVSubtarget &Subtarget) {}

// Expand vector LRINT and LLRINT by converting to the integer domain.
static SDValue lowerVectorXRINT(SDValue Op, SelectionDAG &DAG,
                                const RISCVSubtarget &Subtarget) {}

static SDValue
getVSlidedown(SelectionDAG &DAG, const RISCVSubtarget &Subtarget,
              const SDLoc &DL, EVT VT, SDValue Passthru, SDValue Op,
              SDValue Offset, SDValue Mask, SDValue VL,
              unsigned Policy = RISCVII::TAIL_UNDISTURBED_MASK_UNDISTURBED) {}

static SDValue
getVSlideup(SelectionDAG &DAG, const RISCVSubtarget &Subtarget, const SDLoc &DL,
            EVT VT, SDValue Passthru, SDValue Op, SDValue Offset, SDValue Mask,
            SDValue VL,
            unsigned Policy = RISCVII::TAIL_UNDISTURBED_MASK_UNDISTURBED) {}

static MVT getLMUL1VT(MVT VT) {}

struct VIDSequence {};

static std::optional<APInt> getExactInteger(const APFloat &APF,
                                            uint32_t BitWidth) {}

// Try to match an arithmetic-sequence BUILD_VECTOR [X,X+S,X+2*S,...,X+(N-1)*S]
// to the (non-zero) step S and start value X. This can be then lowered as the
// RVV sequence (VID * S) + X, for example.
// The step S is represented as an integer numerator divided by a positive
// denominator. Note that the implementation currently only identifies
// sequences in which either the numerator is +/- 1 or the denominator is 1. It
// cannot detect 2/3, for example.
// Note that this method will also match potentially unappealing index
// sequences, like <i32 0, i32 50939494>, however it is left to the caller to
// determine whether this is worth generating code for.
//
// EltSizeInBits is the size of the type that the sequence will be calculated
// in, i.e. SEW for build_vectors or XLEN for address calculations.
static std::optional<VIDSequence> isSimpleVIDSequence(SDValue Op,
                                                      unsigned EltSizeInBits) {}

// Match a splatted value (SPLAT_VECTOR/BUILD_VECTOR) of an EXTRACT_VECTOR_ELT
// and lower it as a VRGATHER_VX_VL from the source vector.
static SDValue matchSplatAsGather(SDValue SplatVal, MVT VT, const SDLoc &DL,
                                  SelectionDAG &DAG,
                                  const RISCVSubtarget &Subtarget) {}


/// Try and optimize BUILD_VECTORs with "dominant values" - these are values
/// which constitute a large proportion of the elements. In such cases we can
/// splat a vector with the dominant element and make up the shortfall with
/// INSERT_VECTOR_ELTs.  Returns SDValue if not profitable.
/// Note that this includes vectors of 2 elements by association. The
/// upper-most element is the "dominant" one, allowing us to use a splat to
/// "insert" the upper element, and an insert of the lower element at position
/// 0, which improves codegen.
static SDValue lowerBuildVectorViaDominantValues(SDValue Op, SelectionDAG &DAG,
                                                 const RISCVSubtarget &Subtarget) {}

static SDValue lowerBuildVectorOfConstants(SDValue Op, SelectionDAG &DAG,
                                           const RISCVSubtarget &Subtarget) {}

static unsigned getPACKOpcode(unsigned DestBW,
                              const RISCVSubtarget &Subtarget) {}

/// Double the element size of the build vector to reduce the number
/// of vslide1down in the build vector chain.  In the worst case, this
/// trades three scalar operations for 1 vector operation.  Scalar
/// operations are generally lower latency, and for out-of-order cores
/// we also benefit from additional parallelism.
static SDValue lowerBuildVectorViaPacking(SDValue Op, SelectionDAG &DAG,
                                          const RISCVSubtarget &Subtarget) {}

static SDValue lowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG,
                                 const RISCVSubtarget &Subtarget) {}

static SDValue splatPartsI64WithVL(const SDLoc &DL, MVT VT, SDValue Passthru,
                                   SDValue Lo, SDValue Hi, SDValue VL,
                                   SelectionDAG &DAG) {}

// Called by type legalization to handle splat of i64 on RV32.
// FIXME: We can optimize this when the type has sign or zero bits in one
// of the halves.
static SDValue splatSplitI64WithVL(const SDLoc &DL, MVT VT, SDValue Passthru,
                                   SDValue Scalar, SDValue VL,
                                   SelectionDAG &DAG) {}

// This function lowers a splat of a scalar operand Splat with the vector
// length VL. It ensures the final sequence is type legal, which is useful when
// lowering a splat after type legalization.
static SDValue lowerScalarSplat(SDValue Passthru, SDValue Scalar, SDValue VL,
                                MVT VT, const SDLoc &DL, SelectionDAG &DAG,
                                const RISCVSubtarget &Subtarget) {}

// This function lowers an insert of a scalar operand Scalar into lane
// 0 of the vector regardless of the value of VL.  The contents of the
// remaining lanes of the result vector are unspecified.  VL is assumed
// to be non-zero.
static SDValue lowerScalarInsert(SDValue Scalar, SDValue VL, MVT VT,
                                 const SDLoc &DL, SelectionDAG &DAG,
                                 const RISCVSubtarget &Subtarget) {}

// Is this a shuffle extracts either the even or odd elements of a vector?
// That is, specifically, either (a) or (b) below.
// t34: v8i8 = extract_subvector t11, Constant:i64<0>
// t33: v8i8 = extract_subvector t11, Constant:i64<8>
// a) t35: v8i8 = vector_shuffle<0,2,4,6,8,10,12,14> t34, t33
// b) t35: v8i8 = vector_shuffle<1,3,5,7,9,11,13,15> t34, t33
// Returns {Src Vector, Even Elements} om success
static bool isDeinterleaveShuffle(MVT VT, MVT ContainerVT, SDValue V1,
                                  SDValue V2, ArrayRef<int> Mask,
                                  const RISCVSubtarget &Subtarget) {}

/// Is this shuffle interleaving contiguous elements from one vector into the
/// even elements and contiguous elements from another vector into the odd
/// elements. \p EvenSrc will contain the element that should be in the first
/// even element. \p OddSrc will contain the element that should be in the first
/// odd element. These can be the first element in a source or the element half
/// way through the source.
static bool isInterleaveShuffle(ArrayRef<int> Mask, MVT VT, int &EvenSrc,
                                int &OddSrc, const RISCVSubtarget &Subtarget) {}

/// Match shuffles that concatenate two vectors, rotate the concatenation,
/// and then extract the original number of elements from the rotated result.
/// This is equivalent to vector.splice or X86's PALIGNR instruction. The
/// returned rotation amount is for a rotate right, where elements move from
/// higher elements to lower elements. \p LoSrc indicates the first source
/// vector of the rotate or -1 for undef. \p HiSrc indicates the second vector
/// of the rotate or -1 for undef. At least one of \p LoSrc and \p HiSrc will be
/// 0 or 1 if a rotation is found.
///
/// NOTE: We talk about rotate to the right which matches how bit shift and
/// rotate instructions are described where LSBs are on the right, but LLVM IR
/// and the table below write vectors with the lowest elements on the left.
static int isElementRotate(int &LoSrc, int &HiSrc, ArrayRef<int> Mask) {}

// Lower a deinterleave shuffle to vnsrl.
// [a, p, b, q, c, r, d, s] -> [a, b, c, d] (EvenElts == true)
//                          -> [p, q, r, s] (EvenElts == false)
// VT is the type of the vector to return, <[vscale x ]n x ty>
// Src is the vector to deinterleave of type <[vscale x ]n*2 x ty>
static SDValue getDeinterleaveViaVNSRL(const SDLoc &DL, MVT VT, SDValue Src,
                                       bool EvenElts,
                                       const RISCVSubtarget &Subtarget,
                                       SelectionDAG &DAG) {}

// Lower the following shuffle to vslidedown.
// a)
// t49: v8i8 = extract_subvector t13, Constant:i64<0>
// t109: v8i8 = extract_subvector t13, Constant:i64<8>
// t108: v8i8 = vector_shuffle<1,2,3,4,5,6,7,8> t49, t106
// b)
// t69: v16i16 = extract_subvector t68, Constant:i64<0>
// t23: v8i16 = extract_subvector t69, Constant:i64<0>
// t29: v4i16 = extract_subvector t23, Constant:i64<4>
// t26: v8i16 = extract_subvector t69, Constant:i64<8>
// t30: v4i16 = extract_subvector t26, Constant:i64<0>
// t54: v4i16 = vector_shuffle<1,2,3,4> t29, t30
static SDValue lowerVECTOR_SHUFFLEAsVSlidedown(const SDLoc &DL, MVT VT,
                                               SDValue V1, SDValue V2,
                                               ArrayRef<int> Mask,
                                               const RISCVSubtarget &Subtarget,
                                               SelectionDAG &DAG) {}

// Because vslideup leaves the destination elements at the start intact, we can
// use it to perform shuffles that insert subvectors:
//
// vector_shuffle v8:v8i8, v9:v8i8, <0, 1, 2, 3, 8, 9, 10, 11>
// ->
// vsetvli zero, 8, e8, mf2, ta, ma
// vslideup.vi v8, v9, 4
//
// vector_shuffle v8:v8i8, v9:v8i8 <0, 1, 8, 9, 10, 5, 6, 7>
// ->
// vsetvli zero, 5, e8, mf2, tu, ma
// vslideup.v1 v8, v9, 2
static SDValue lowerVECTOR_SHUFFLEAsVSlideup(const SDLoc &DL, MVT VT,
                                             SDValue V1, SDValue V2,
                                             ArrayRef<int> Mask,
                                             const RISCVSubtarget &Subtarget,
                                             SelectionDAG &DAG) {}

/// Match v(f)slide1up/down idioms.  These operations involve sliding
/// N-1 elements to make room for an inserted scalar at one end.
static SDValue lowerVECTOR_SHUFFLEAsVSlide1(const SDLoc &DL, MVT VT,
                                            SDValue V1, SDValue V2,
                                            ArrayRef<int> Mask,
                                            const RISCVSubtarget &Subtarget,
                                            SelectionDAG &DAG) {}

// Given two input vectors of <[vscale x ]n x ty>, use vwaddu.vv and vwmaccu.vx
// to create an interleaved vector of <[vscale x] n*2 x ty>.
// This requires that the size of ty is less than the subtarget's maximum ELEN.
static SDValue getWideningInterleave(SDValue EvenV, SDValue OddV,
                                     const SDLoc &DL, SelectionDAG &DAG,
                                     const RISCVSubtarget &Subtarget) {}

// If we have a vector of bits that we want to reverse, we can use a vbrev on a
// larger element type, e.g. v32i1 can be reversed with a v1i32 bitreverse.
static SDValue lowerBitreverseShuffle(ShuffleVectorSDNode *SVN,
                                      SelectionDAG &DAG,
                                      const RISCVSubtarget &Subtarget) {}

static bool isLegalBitRotate(ShuffleVectorSDNode *SVN,
                             SelectionDAG &DAG,
                             const RISCVSubtarget &Subtarget,
                             MVT &RotateVT, unsigned &RotateAmt) {}

// Given a shuffle mask like <3, 0, 1, 2, 7, 4, 5, 6> for v8i8, we can
// reinterpret it as a v2i32 and rotate it right by 8 instead. We can lower this
// as a vror.vi if we have Zvkb, or otherwise as a vsll, vsrl and vor.
static SDValue lowerVECTOR_SHUFFLEAsRotate(ShuffleVectorSDNode *SVN,
                                           SelectionDAG &DAG,
                                           const RISCVSubtarget &Subtarget) {}

// If compiling with an exactly known VLEN, see if we can split a
// shuffle on m2 or larger into a small number of m1 sized shuffles
// which write each destination registers exactly once.
static SDValue lowerShuffleViaVRegSplitting(ShuffleVectorSDNode *SVN,
                                            SelectionDAG &DAG,
                                            const RISCVSubtarget &Subtarget) {}

static SDValue lowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG,
                                   const RISCVSubtarget &Subtarget) {}

bool RISCVTargetLowering::isShuffleMaskLegal(ArrayRef<int> M, EVT VT) const {}

// Lower CTLZ_ZERO_UNDEF or CTTZ_ZERO_UNDEF by converting to FP and extracting
// the exponent.
SDValue
RISCVTargetLowering::lowerCTLZ_CTTZ_ZERO_UNDEF(SDValue Op,
                                               SelectionDAG &DAG) const {}

SDValue RISCVTargetLowering::lowerVPCttzElements(SDValue Op,
                                                 SelectionDAG &DAG) const {}

// While RVV has alignment restrictions, we should always be able to load as a
// legal equivalently-sized byte-typed vector instead. This method is
// responsible for re-expressing a ISD::LOAD via a correctly-aligned type. If
// the load is already correctly-aligned, it returns SDValue().
SDValue RISCVTargetLowering::expandUnalignedRVVLoad(SDValue Op,
                                                    SelectionDAG &DAG) const {}

// While RVV has alignment restrictions, we should always be able to store as a
// legal equivalently-sized byte-typed vector instead. This method is
// responsible for re-expressing a ISD::STORE via a correctly-aligned type. It
// returns SDValue() if the store is already correctly aligned.
SDValue RISCVTargetLowering::expandUnalignedRVVStore(SDValue Op,
                                                     SelectionDAG &DAG) const {}

static SDValue lowerConstant(SDValue Op, SelectionDAG &DAG,
                             const RISCVSubtarget &Subtarget) {}

SDValue RISCVTargetLowering::lowerConstantFP(SDValue Op,
                                             SelectionDAG &DAG) const {}

static SDValue LowerATOMIC_FENCE(SDValue Op, SelectionDAG &DAG,
                                 const RISCVSubtarget &Subtarget) {}

SDValue RISCVTargetLowering::LowerIS_FPCLASS(SDValue Op,
                                             SelectionDAG &DAG) const {}

// Lower fmaximum and fminimum. Unlike our fmax and fmin instructions, these
// operations propagate nans.
static SDValue lowerFMAXIMUM_FMINIMUM(SDValue Op, SelectionDAG &DAG,
                                      const RISCVSubtarget &Subtarget) {}

static SDValue lowerFABSorFNEG(SDValue Op, SelectionDAG &DAG,
                               const RISCVSubtarget &Subtarget) {}

static SDValue lowerFCOPYSIGN(SDValue Op, SelectionDAG &DAG,
                              const RISCVSubtarget &Subtarget) {}

/// Get a RISC-V target specified VL op for a given SDNode.
static unsigned getRISCVVLOp(SDValue Op) {}

/// Return true if a RISC-V target specified op has a passthru operand.
static bool hasPassthruOp(unsigned Opcode) {}

/// Return true if a RISC-V target specified op has a mask operand.
static bool hasMaskOp(unsigned Opcode) {}

static SDValue SplitVectorOp(SDValue Op, SelectionDAG &DAG) {}

static SDValue SplitVPOp(SDValue Op, SelectionDAG &DAG) {}

static SDValue SplitVectorReductionOp(SDValue Op, SelectionDAG &DAG) {}

static SDValue SplitStrictFPVectorOp(SDValue Op, SelectionDAG &DAG) {}

SDValue RISCVTargetLowering::LowerOperation(SDValue Op,
                                            SelectionDAG &DAG) const {}

SDValue RISCVTargetLowering::emitFlushICache(SelectionDAG &DAG, SDValue InChain,
                                             SDValue Start, SDValue End,
                                             SDValue Flags, SDLoc DL) const {}

static SDValue getTargetNode(GlobalAddressSDNode *N, const SDLoc &DL, EVT Ty,
                             SelectionDAG &DAG, unsigned Flags) {}

static SDValue getTargetNode(BlockAddressSDNode *N, const SDLoc &DL, EVT Ty,
                             SelectionDAG &DAG, unsigned Flags) {}

static SDValue getTargetNode(ConstantPoolSDNode *N, const SDLoc &DL, EVT Ty,
                             SelectionDAG &DAG, unsigned Flags) {}

static SDValue getTargetNode(JumpTableSDNode *N, const SDLoc &DL, EVT Ty,
                             SelectionDAG &DAG, unsigned Flags) {}

static SDValue getLargeGlobalAddress(GlobalAddressSDNode *N, const SDLoc &DL,
                                     EVT Ty, SelectionDAG &DAG) {}

static SDValue getLargeExternalSymbol(ExternalSymbolSDNode *N, const SDLoc &DL,
                                      EVT Ty, SelectionDAG &DAG) {}

template <class NodeTy>
SDValue RISCVTargetLowering::getAddr(NodeTy *N, SelectionDAG &DAG,
                                     bool IsLocal, bool IsExternWeak) const {}

SDValue RISCVTargetLowering::lowerGlobalAddress(SDValue Op,
                                                SelectionDAG &DAG) const {}

SDValue RISCVTargetLowering::lowerBlockAddress(SDValue Op,
                                               SelectionDAG &DAG) const {}

SDValue RISCVTargetLowering::lowerConstantPool(SDValue Op,
                                               SelectionDAG &DAG) const {}

SDValue RISCVTargetLowering::lowerJumpTable(SDValue Op,
                                            SelectionDAG &DAG) const {}

SDValue RISCVTargetLowering::getStaticTLSAddr(GlobalAddressSDNode *N,
                                              SelectionDAG &DAG,
                                              bool UseGOT) const {}

SDValue RISCVTargetLowering::getDynamicTLSAddr(GlobalAddressSDNode *N,
                                               SelectionDAG &DAG) const {}

SDValue RISCVTargetLowering::getTLSDescAddr(GlobalAddressSDNode *N,
                                            SelectionDAG &DAG) const {}

SDValue RISCVTargetLowering::lowerGlobalTLSAddress(SDValue Op,
                                                   SelectionDAG &DAG) const {}

// Return true if Val is equal to (setcc LHS, RHS, CC).
// Return false if Val is the inverse of (setcc LHS, RHS, CC).
// Otherwise, return std::nullopt.
static std::optional<bool> matchSetCC(SDValue LHS, SDValue RHS,
                                      ISD::CondCode CC, SDValue Val) {}

static SDValue combineSelectToBinOp(SDNode *N, SelectionDAG &DAG,
                                    const RISCVSubtarget &Subtarget) {}

// Transform `binOp (select cond, x, c0), c1` where `c0` and `c1` are constants
// into `select cond, binOp(x, c1), binOp(c0, c1)` if profitable.
// For now we only consider transformation profitable if `binOp(c0, c1)` ends up
// being `0` or `-1`. In such cases we can replace `select` with `and`.
// TODO: Should we also do this if `binOp(c0, c1)` is cheaper to materialize
// than `c0`?
static SDValue
foldBinOpIntoSelectIfProfitable(SDNode *BO, SelectionDAG &DAG,
                                const RISCVSubtarget &Subtarget) {}

SDValue RISCVTargetLowering::lowerSELECT(SDValue Op, SelectionDAG &DAG) const {}

SDValue RISCVTargetLowering::lowerBRCOND(SDValue Op, SelectionDAG &DAG) const {}

SDValue RISCVTargetLowering::lowerVASTART(SDValue Op, SelectionDAG &DAG) const {}

SDValue RISCVTargetLowering::lowerFRAMEADDR(SDValue Op,
                                            SelectionDAG &DAG) const {}

SDValue RISCVTargetLowering::lowerRETURNADDR(SDValue Op,
                                             SelectionDAG &DAG) const {}

SDValue RISCVTargetLowering::lowerShiftLeftParts(SDValue Op,
                                                 SelectionDAG &DAG) const {}

SDValue RISCVTargetLowering::lowerShiftRightParts(SDValue Op, SelectionDAG &DAG,
                                                  bool IsSRA) const {}

// Lower splats of i1 types to SETCC. For each mask vector type, we have a
// legal equivalently-sized i8 type, so we can use that as a go-between.
SDValue RISCVTargetLowering::lowerVectorMaskSplat(SDValue Op,
                                                  SelectionDAG &DAG) const {}

// Custom-lower a SPLAT_VECTOR_PARTS where XLEN<SEW, as the SEW element type is
// illegal (currently only vXi64 RV32).
// FIXME: We could also catch non-constant sign-extended i32 values and lower
// them to VMV_V_X_VL.
SDValue RISCVTargetLowering::lowerSPLAT_VECTOR_PARTS(SDValue Op,
                                                     SelectionDAG &DAG) const {}

// Custom-lower extensions from mask vectors by using a vselect either with 1
// for zero/any-extension or -1 for sign-extension:
//   (vXiN = (s|z)ext vXi1:vmask) -> (vXiN = vselect vmask, (-1 or 1), 0)
// Note that any-extension is lowered identically to zero-extension.
SDValue RISCVTargetLowering::lowerVectorMaskExt(SDValue Op, SelectionDAG &DAG,
                                                int64_t ExtTrueVal) const {}

SDValue RISCVTargetLowering::lowerFixedLengthVectorExtendToRVV(
    SDValue Op, SelectionDAG &DAG, unsigned ExtendOpc) const {}

// Custom-lower truncations from vectors to mask vectors by using a mask and a
// setcc operation:
//   (vXi1 = trunc vXiN vec) -> (vXi1 = setcc (and vec, 1), 0, ne)
SDValue RISCVTargetLowering::lowerVectorMaskTruncLike(SDValue Op,
                                                      SelectionDAG &DAG) const {}

SDValue RISCVTargetLowering::lowerVectorTruncLike(SDValue Op,
                                                  SelectionDAG &DAG) const {}

SDValue
RISCVTargetLowering::lowerStrictFPExtendOrRoundLike(SDValue Op,
                                                    SelectionDAG &DAG) const {}

SDValue
RISCVTargetLowering::lowerVectorFPExtendOrRoundLike(SDValue Op,
                                                    SelectionDAG &DAG) const {}

// Given a scalable vector type and an index into it, returns the type for the
// smallest subvector that the index fits in. This can be used to reduce LMUL
// for operations like vslidedown.
//
// E.g. With Zvl128b, index 3 in a nxv4i32 fits within the first nxv2i32.
static std::optional<MVT>
getSmallestVTForIndex(MVT VecVT, unsigned MaxIdx, SDLoc DL, SelectionDAG &DAG,
                      const RISCVSubtarget &Subtarget) {}

// Custom-legalize INSERT_VECTOR_ELT so that the value is inserted into the
// first position of a vector, and that vector is slid up to the insert index.
// By limiting the active vector length to index+1 and merging with the
// original vector (with an undisturbed tail policy for elements >= VL), we
// achieve the desired result of leaving all elements untouched except the one
// at VL-1, which is replaced with the desired value.
SDValue RISCVTargetLowering::lowerINSERT_VECTOR_ELT(SDValue Op,
                                                    SelectionDAG &DAG) const {}

// Custom-lower EXTRACT_VECTOR_ELT operations to slide the vector down, then
// extract the first element: (extractelt (slidedown vec, idx), 0). For integer
// types this is done using VMV_X_S to allow us to glean information about the
// sign bits of the result.
SDValue RISCVTargetLowering::lowerEXTRACT_VECTOR_ELT(SDValue Op,
                                                     SelectionDAG &DAG) const {}

// Some RVV intrinsics may claim that they want an integer operand to be
// promoted or expanded.
static SDValue lowerVectorIntrinsicScalars(SDValue Op, SelectionDAG &DAG,
                                           const RISCVSubtarget &Subtarget) {}

// Lower the llvm.get.vector.length intrinsic to vsetvli. We only support
// scalable vector llvm.get.vector.length for now.
//
// We need to convert from a scalable VF to a vsetvli with VLMax equal to
// (vscale * VF). The vscale and VF are independent of element width. We use
// SEW=8 for the vsetvli because it is the only element width that supports all
// fractional LMULs. The LMUL is choosen so that with SEW=8 the VLMax is
// (vscale * VF). Where vscale is defined as VLEN/RVVBitsPerBlock. The
// InsertVSETVLI pass can fix up the vtype of the vsetvli if a different
// SEW and LMUL are better for the surrounding vector instructions.
static SDValue lowerGetVectorLength(SDNode *N, SelectionDAG &DAG,
                                    const RISCVSubtarget &Subtarget) {}

static SDValue lowerCttzElts(SDNode *N, SelectionDAG &DAG,
                             const RISCVSubtarget &Subtarget) {}

static inline void promoteVCIXScalar(const SDValue &Op,
                                     SmallVectorImpl<SDValue> &Operands,
                                     SelectionDAG &DAG) {}

static void processVCIXOperands(SDValue &OrigOp,
                                SmallVectorImpl<SDValue> &Operands,
                                SelectionDAG &DAG) {}

// LMUL * VLEN should be greater than or equal to EGS * SEW
static inline bool isValidEGW(int EGS, EVT VT,
                              const RISCVSubtarget &Subtarget) {}

SDValue RISCVTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
                                                     SelectionDAG &DAG) const {}

static inline SDValue getVCIXISDNodeWCHAIN(SDValue &Op, SelectionDAG &DAG,
                                           unsigned Type) {}

static inline SDValue getVCIXISDNodeVOID(SDValue &Op, SelectionDAG &DAG,
                                         unsigned Type) {}

SDValue RISCVTargetLowering::LowerINTRINSIC_W_CHAIN(SDValue Op,
                                                    SelectionDAG &DAG) const {}

SDValue RISCVTargetLowering::LowerINTRINSIC_VOID(SDValue Op,
                                                 SelectionDAG &DAG) const {}

static unsigned getRVVReductionOp(unsigned ISDOpcode) {}

SDValue RISCVTargetLowering::lowerVectorMaskVecReduction(SDValue Op,
                                                         SelectionDAG &DAG,
                                                         bool IsVP) const {}

static bool isNonZeroAVL(SDValue AVL) {}

/// Helper to lower a reduction sequence of the form:
/// scalar = reduce_op vec, scalar_start
static SDValue lowerReductionSeq(unsigned RVVOpcode, MVT ResVT,
                                 SDValue StartValue, SDValue Vec, SDValue Mask,
                                 SDValue VL, const SDLoc &DL, SelectionDAG &DAG,
                                 const RISCVSubtarget &Subtarget) {}

SDValue RISCVTargetLowering::lowerVECREDUCE(SDValue Op,
                                            SelectionDAG &DAG) const {}

// Given a reduction op, this function returns the matching reduction opcode,
// the vector SDValue and the scalar SDValue required to lower this to a
// RISCVISD node.
static std::tuple<unsigned, SDValue, SDValue>
getRVVFPReductionOpAndOperands(SDValue Op, SelectionDAG &DAG, EVT EltVT,
                               const RISCVSubtarget &Subtarget) {}

SDValue RISCVTargetLowering::lowerFPVECREDUCE(SDValue Op,
                                              SelectionDAG &DAG) const {}

SDValue RISCVTargetLowering::lowerVPREDUCE(SDValue Op,
                                           SelectionDAG &DAG) const {}

SDValue RISCVTargetLowering::lowerINSERT_SUBVECTOR(SDValue Op,
                                                   SelectionDAG &DAG) const {}

SDValue RISCVTargetLowering::lowerEXTRACT_SUBVECTOR(SDValue Op,
                                                    SelectionDAG &DAG) const {}

// Widen a vector's operands to i8, then truncate its results back to the
// original type, typically i1.  All operand and result types must be the same.
static SDValue widenVectorOpsToi8(SDValue N, const SDLoc &DL,
                                  SelectionDAG &DAG) {}

SDValue RISCVTargetLowering::lowerVECTOR_DEINTERLEAVE(SDValue Op,
                                                      SelectionDAG &DAG) const {}

SDValue RISCVTargetLowering::lowerVECTOR_INTERLEAVE(SDValue Op,
                                                    SelectionDAG &DAG) const {}

// Lower step_vector to the vid instruction. Any non-identity step value must
// be accounted for my manual expansion.
SDValue RISCVTargetLowering::lowerSTEP_VECTOR(SDValue Op,
                                              SelectionDAG &DAG) const {}

// Implement vector_reverse using vrgather.vv with indices determined by
// subtracting the id of each element from (VLMAX-1). This will convert
// the indices like so:
// (0, 1,..., VLMAX-2, VLMAX-1) -> (VLMAX-1, VLMAX-2,..., 1, 0).
// TODO: This code assumes VLMAX <= 65536 for LMUL=8 SEW=16.
SDValue RISCVTargetLowering::lowerVECTOR_REVERSE(SDValue Op,
                                                 SelectionDAG &DAG) const {}

SDValue RISCVTargetLowering::lowerVECTOR_SPLICE(SDValue Op,
                                                SelectionDAG &DAG) const {}

SDValue
RISCVTargetLowering::lowerFixedLengthVectorLoadToRVV(SDValue Op,
                                                     SelectionDAG &DAG) const {}

SDValue
RISCVTargetLowering::lowerFixedLengthVectorStoreToRVV(SDValue Op,
                                                      SelectionDAG &DAG) const {}

SDValue RISCVTargetLowering::lowerMaskedLoad(SDValue Op,
                                             SelectionDAG &DAG) const {}

SDValue RISCVTargetLowering::lowerMaskedStore(SDValue Op,
                                              SelectionDAG &DAG) const {}

SDValue
RISCVTargetLowering::lowerFixedLengthVectorSetccToRVV(SDValue Op,
                                                      SelectionDAG &DAG) const {}

SDValue RISCVTargetLowering::lowerVectorStrictFSetcc(SDValue Op,
                                                     SelectionDAG &DAG) const {}

// Lower vector ABS to smax(X, sub(0, X)).
SDValue RISCVTargetLowering::lowerABS(SDValue Op, SelectionDAG &DAG) const {}

SDValue RISCVTargetLowering::lowerFixedLengthVectorFCOPYSIGNToRVV(
    SDValue Op, SelectionDAG &DAG) const {}

SDValue RISCVTargetLowering::lowerFixedLengthVectorSelectToRVV(
    SDValue Op, SelectionDAG &DAG) const {}

SDValue RISCVTargetLowering::lowerToScalableOp(SDValue Op,
                                               SelectionDAG &DAG) const {}

// Lower a VP_* ISD node to the corresponding RISCVISD::*_VL node:
// * Operands of each node are assumed to be in the same order.
// * The EVL operand is promoted from i32 to i64 on RV64.
// * Fixed-length vectors are converted to their scalable-vector container
//   types.
SDValue RISCVTargetLowering::lowerVPOp(SDValue Op, SelectionDAG &DAG) const {}

SDValue RISCVTargetLowering::lowerVPExtMaskOp(SDValue Op,
                                              SelectionDAG &DAG) const {}

SDValue RISCVTargetLowering::lowerVPSetCCMaskOp(SDValue Op,
                                                SelectionDAG &DAG) const {}

// Lower Floating-Point/Integer Type-Convert VP SDNodes
SDValue RISCVTargetLowering::lowerVPFPIntConvOp(SDValue Op,
                                                SelectionDAG &DAG) const {}

SDValue
RISCVTargetLowering::lowerVPSpliceExperimental(SDValue Op,
                                               SelectionDAG &DAG) const {}

SDValue RISCVTargetLowering::lowerVPSplatExperimental(SDValue Op,
                                                      SelectionDAG &DAG) const {}

SDValue
RISCVTargetLowering::lowerVPReverseExperimental(SDValue Op,
                                                SelectionDAG &DAG) const {}

SDValue RISCVTargetLowering::lowerLogicVPOp(SDValue Op,
                                            SelectionDAG &DAG) const {}

SDValue RISCVTargetLowering::lowerVPStridedLoad(SDValue Op,
                                                SelectionDAG &DAG) const {}

SDValue RISCVTargetLowering::lowerVPStridedStore(SDValue Op,
                                                 SelectionDAG &DAG) const {}

// Custom lower MGATHER/VP_GATHER to a legalized form for RVV. It will then be
// matched to a RVV indexed load. The RVV indexed load instructions only
// support the "unsigned unscaled" addressing mode; indices are implicitly
// zero-extended or truncated to XLEN and are treated as byte offsets. Any
// signed or scaled indexing is extended to the XLEN value type and scaled
// accordingly.
SDValue RISCVTargetLowering::lowerMaskedGather(SDValue Op,
                                               SelectionDAG &DAG) const {}

// Custom lower MSCATTER/VP_SCATTER to a legalized form for RVV. It will then be
// matched to a RVV indexed store. The RVV indexed store instructions only
// support the "unsigned unscaled" addressing mode; indices are implicitly
// zero-extended or truncated to XLEN and are treated as byte offsets. Any
// signed or scaled indexing is extended to the XLEN value type and scaled
// accordingly.
SDValue RISCVTargetLowering::lowerMaskedScatter(SDValue Op,
                                                SelectionDAG &DAG) const {}

SDValue RISCVTargetLowering::lowerGET_ROUNDING(SDValue Op,
                                               SelectionDAG &DAG) const {}

SDValue RISCVTargetLowering::lowerSET_ROUNDING(SDValue Op,
                                               SelectionDAG &DAG) const {}

SDValue RISCVTargetLowering::lowerEH_DWARF_CFA(SDValue Op,
                                               SelectionDAG &DAG) const {}

// Returns the opcode of the target-specific SDNode that implements the 32-bit
// form of the given Opcode.
static RISCVISD::NodeType getRISCVWOpcode(unsigned Opcode) {}

// Converts the given i8/i16/i32 operation to a target-specific SelectionDAG
// node. Because i8/i16/i32 isn't a legal type for RV64, these operations would
// otherwise be promoted to i64, making it difficult to select the
// SLLW/DIVUW/.../*W later one because the fact the operation was originally of
// type i8/i16/i32 is lost.
static SDValue customLegalizeToWOp(SDNode *N, SelectionDAG &DAG,
                                   unsigned ExtOpc = ISD::ANY_EXTEND) {}

// Converts the given 32-bit operation to a i64 operation with signed extension
// semantic to reduce the signed extension instructions.
static SDValue customLegalizeToWOpWithSExt(SDNode *N, SelectionDAG &DAG) {}

void RISCVTargetLowering::ReplaceNodeResults(SDNode *N,
                                             SmallVectorImpl<SDValue> &Results,
                                             SelectionDAG &DAG) const {}

/// Given a binary operator, return the *associative* generic ISD::VECREDUCE_OP
/// which corresponds to it.
static unsigned getVecReduceOpcode(unsigned Opc) {}

/// Perform two related transforms whose purpose is to incrementally recognize
/// an explode_vector followed by scalar reduction as a vector reduction node.
/// This exists to recover from a deficiency in SLP which can't handle
/// forests with multiple roots sharing common nodes.  In some cases, one
/// of the trees will be vectorized, and the other will remain (unprofitably)
/// scalarized.
static SDValue
combineBinOpOfExtractToReduceTree(SDNode *N, SelectionDAG &DAG,
                                  const RISCVSubtarget &Subtarget) {}


// Try to fold (<bop> x, (reduction.<bop> vec, start))
static SDValue combineBinOpToReduce(SDNode *N, SelectionDAG &DAG,
                                    const RISCVSubtarget &Subtarget) {}

// Optimize (add (shl x, c0), (shl y, c1)) ->
//          (SLLI (SH*ADD x, y), c0), if c1-c0 equals to [1|2|3].
static SDValue transformAddShlImm(SDNode *N, SelectionDAG &DAG,
                                  const RISCVSubtarget &Subtarget) {}

// Combine a constant select operand into its use:
//
// (and (select cond, -1, c), x)
//   -> (select cond, x, (and x, c))  [AllOnes=1]
// (or  (select cond, 0, c), x)
//   -> (select cond, x, (or x, c))  [AllOnes=0]
// (xor (select cond, 0, c), x)
//   -> (select cond, x, (xor x, c))  [AllOnes=0]
// (add (select cond, 0, c), x)
//   -> (select cond, x, (add x, c))  [AllOnes=0]
// (sub x, (select cond, 0, c))
//   -> (select cond, x, (sub x, c))  [AllOnes=0]
static SDValue combineSelectAndUse(SDNode *N, SDValue Slct, SDValue OtherOp,
                                   SelectionDAG &DAG, bool AllOnes,
                                   const RISCVSubtarget &Subtarget) {}

// Attempt combineSelectAndUse on each operand of a commutative operator N.
static SDValue combineSelectAndUseCommutative(SDNode *N, SelectionDAG &DAG,
                                              bool AllOnes,
                                              const RISCVSubtarget &Subtarget) {}

// Transform (add (mul x, c0), c1) ->
//           (add (mul (add x, c1/c0), c0), c1%c0).
// if c1/c0 and c1%c0 are simm12, while c1 is not. A special corner case
// that should be excluded is when c0*(c1/c0) is simm12, which will lead
// to an infinite loop in DAGCombine if transformed.
// Or transform (add (mul x, c0), c1) ->
//              (add (mul (add x, c1/c0+1), c0), c1%c0-c0),
// if c1/c0+1 and c1%c0-c0 are simm12, while c1 is not. A special corner
// case that should be excluded is when c0*(c1/c0+1) is simm12, which will
// lead to an infinite loop in DAGCombine if transformed.
// Or transform (add (mul x, c0), c1) ->
//              (add (mul (add x, c1/c0-1), c0), c1%c0+c0),
// if c1/c0-1 and c1%c0+c0 are simm12, while c1 is not. A special corner
// case that should be excluded is when c0*(c1/c0-1) is simm12, which will
// lead to an infinite loop in DAGCombine if transformed.
// Or transform (add (mul x, c0), c1) ->
//              (mul (add x, c1/c0), c0).
// if c1%c0 is zero, and c1/c0 is simm12 while c1 is not.
static SDValue transformAddImmMulImm(SDNode *N, SelectionDAG &DAG,
                                     const RISCVSubtarget &Subtarget) {}

// add (zext, zext) -> zext (add (zext, zext))
// sub (zext, zext) -> sext (sub (zext, zext))
// mul (zext, zext) -> zext (mul (zext, zext))
// sdiv (zext, zext) -> zext (sdiv (zext, zext))
// udiv (zext, zext) -> zext (udiv (zext, zext))
// srem (zext, zext) -> zext (srem (zext, zext))
// urem (zext, zext) -> zext (urem (zext, zext))
//
// where the sum of the extend widths match, and the the range of the bin op
// fits inside the width of the narrower bin op. (For profitability on rvv, we
// use a power of two for both inner and outer extend.)
static SDValue combineBinOpOfZExt(SDNode *N, SelectionDAG &DAG) {}

// Try to turn (add (xor bool, 1) -1) into (neg bool).
static SDValue combineAddOfBooleanXor(SDNode *N, SelectionDAG &DAG) {}

static SDValue performADDCombine(SDNode *N,
                                 TargetLowering::DAGCombinerInfo &DCI,
                                 const RISCVSubtarget &Subtarget) {}

// Try to turn a sub boolean RHS and constant LHS into an addi.
static SDValue combineSubOfBoolean(SDNode *N, SelectionDAG &DAG) {}

// Looks for (sub (shl X, 8), X) where only bits 8, 16, 24, 32, etc. of X are
// non-zero. Replace with orc.b.
static SDValue combineSubShiftToOrcB(SDNode *N, SelectionDAG &DAG,
                                     const RISCVSubtarget &Subtarget) {}

static SDValue performSUBCombine(SDNode *N, SelectionDAG &DAG,
                                 const RISCVSubtarget &Subtarget) {}

// Apply DeMorgan's law to (and/or (xor X, 1), (xor Y, 1)) if X and Y are 0/1.
// Legalizing setcc can introduce xors like this. Doing this transform reduces
// the number of xors and may allow the xor to fold into a branch condition.
static SDValue combineDeMorganOfBoolean(SDNode *N, SelectionDAG &DAG) {}

// Fold (vXi8 (trunc (vselect (setltu, X, 256), X, (sext (setgt X, 0))))) to
// (vXi8 (trunc (smin (smax X, 0), 255))). This represents saturating a signed
// value to an unsigned value. This will be lowered to vmax and series of
// vnclipu instructions later. This can be extended to other truncated types
// other than i8 by replacing 256 and 255 with the equivalent constants for the
// type.
static SDValue combineTruncSelectToSMaxUSat(SDNode *N, SelectionDAG &DAG) {}

static SDValue performTRUNCATECombine(SDNode *N, SelectionDAG &DAG,
                                      const RISCVSubtarget &Subtarget) {}

// Combines two comparison operation and logic operation to one selection
// operation(min, max) and logic operation. Returns new constructed Node if
// conditions for optimization are satisfied.
static SDValue performANDCombine(SDNode *N,
                                 TargetLowering::DAGCombinerInfo &DCI,
                                 const RISCVSubtarget &Subtarget) {}

// Try to pull an xor with 1 through a select idiom that uses czero_eqz/nez.
// FIXME: Generalize to other binary operators with same operand.
static SDValue combineOrOfCZERO(SDNode *N, SDValue N0, SDValue N1,
                                SelectionDAG &DAG) {}

static SDValue performORCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI,
                                const RISCVSubtarget &Subtarget) {}

static SDValue performXORCombine(SDNode *N, SelectionDAG &DAG,
                                 const RISCVSubtarget &Subtarget) {}

// Try to expand a scalar multiply to a faster sequence.
static SDValue expandMul(SDNode *N, SelectionDAG &DAG,
                         TargetLowering::DAGCombinerInfo &DCI,
                         const RISCVSubtarget &Subtarget) {}

// Combine vXi32 (mul (and (lshr X, 15), 0x10001), 0xffff) ->
// (bitcast (sra (v2Xi16 (bitcast X)), 15))
// Same for other equivalent types with other equivalent constants.
static SDValue combineVectorMulToSraBitcast(SDNode *N, SelectionDAG &DAG) {}

static SDValue performMULCombine(SDNode *N, SelectionDAG &DAG,
                                 TargetLowering::DAGCombinerInfo &DCI,
                                 const RISCVSubtarget &Subtarget) {}

/// According to the property that indexed load/store instructions zero-extend
/// their indices, try to narrow the type of index operand.
static bool narrowIndex(SDValue &N, ISD::MemIndexType IndexType, SelectionDAG &DAG) {}static SDValue performSETCCCombine(SDNode *N, SelectionDAG &DAG,
                                   const RISCVSubtarget &Subtarget) {}static SDValue
performSIGN_EXTEND_INREGCombine(SDNode *N, SelectionDAG &DAG,
                                const RISCVSubtarget &Subtarget) {}struct CombineResultenum ExtKind : uint8_t {}struct NodeExtensionHelper {}struct CombineResult {}static std::optional<CombineResult>
canFoldToVWWithSameExtensionImpl(SDNode *Root, const NodeExtensionHelper &LHS,
                                 const NodeExtensionHelper &RHS,
                                 uint8_t AllowExtMask, SelectionDAG &DAG,
                                 const RISCVSubtarget &Subtarget) {}static std::optional<CombineResult>
canFoldToVWWithSameExtension(SDNode *Root, const NodeExtensionHelper &LHS,
                             const NodeExtensionHelper &RHS, SelectionDAG &DAG,
                             const RISCVSubtarget &Subtarget) {}static std::optional<CombineResult>
canFoldToVW_W(SDNode *Root, const NodeExtensionHelper &LHS,
              const NodeExtensionHelper &RHS, SelectionDAG &DAG,
              const RISCVSubtarget &Subtarget) {}static std::optional<CombineResult>
canFoldToVWWithSEXT(SDNode *Root, const NodeExtensionHelper &LHS,
                    const NodeExtensionHelper &RHS, SelectionDAG &DAG,
                    const RISCVSubtarget &Subtarget) {}static std::optional<CombineResult>
canFoldToVWWithZEXT(SDNode *Root, const NodeExtensionHelper &LHS,
                    const NodeExtensionHelper &RHS, SelectionDAG &DAG,
                    const RISCVSubtarget &Subtarget) {}static std::optional<CombineResult>
canFoldToVWWithFPEXT(SDNode *Root, const NodeExtensionHelper &LHS,
                     const NodeExtensionHelper &RHS, SelectionDAG &DAG,
                     const RISCVSubtarget &Subtarget) {}static std::optional<CombineResult>
canFoldToVW_SU(SDNode *Root, const NodeExtensionHelper &LHS,
               const NodeExtensionHelper &RHS, SelectionDAG &DAG,
               const RISCVSubtarget &Subtarget) {}SmallVector<NodeExtensionHelper::CombineToTry>
NodeExtensionHelper::getSupportedFoldings(const SDNode *Root) {}static SDValue combineOp_VLToVWOp_VL(SDNode *N,
                                     TargetLowering::DAGCombinerInfo &DCI,
                                     const RISCVSubtarget &Subtarget) {}static SDValue combineVWADDSUBWSelect(SDNode *N, SelectionDAG &DAG) {}static SDValue performVWADDSUBW_VLCombine(SDNode *N,
                                          TargetLowering::DAGCombinerInfo &DCI,
                                          const RISCVSubtarget &Subtarget) {}static SDValue tryMemPairCombine(SelectionDAG &DAG, LSBaseSDNode *LSNode1,
                                 LSBaseSDNode *LSNode2, SDValue BasePtr,
                                 uint64_t Imm) {}static SDValue performMemPairCombine(SDNode *N,
                                     TargetLowering::DAGCombinerInfo &DCI) {}static SDValue performFP_TO_INTCombine(SDNode *N,
                                       TargetLowering::DAGCombinerInfo &DCI,
                                       const RISCVSubtarget &Subtarget) {}static SDValue performFP_TO_INT_SATCombine(SDNode *N,
                                       TargetLowering::DAGCombinerInfo &DCI,
                                       const RISCVSubtarget &Subtarget) {}static SDValue performBITREVERSECombine(SDNode *N, SelectionDAG &DAG,
                                        const RISCVSubtarget &Subtarget) {}static unsigned negateFMAOpcode(unsigned Opcode, bool NegMul, bool NegAcc) {}static SDValue combineVFMADD_VLWithVFNEG_VL(SDNode *N, SelectionDAG &DAG) {}static SDValue performVFMADD_VLCombine(SDNode *N,
                                       TargetLowering::DAGCombinerInfo &DCI,
                                       const RISCVSubtarget &Subtarget) {}static SDValue performSRACombine(SDNode *N, SelectionDAG &DAG,
                                 const RISCVSubtarget &Subtarget) {}static SDValue tryDemorganOfBooleanCondition(SDValue Cond, SelectionDAG &DAG) {}static bool combine_CC(SDValue &LHS, SDValue &RHS, SDValue &CC, const SDLoc &DL,
                       SelectionDAG &DAG, const RISCVSubtarget &Subtarget) {}static SDValue tryFoldSelectIntoOp(SDNode *N, SelectionDAG &DAG,
                                   SDValue TrueVal, SDValue FalseVal,
                                   bool Swapped) {}static SDValue foldSelectOfCTTZOrCTLZ(SDNode *N, SelectionDAG &DAG) {}static SDValue useInversedSetcc(SDNode *N, SelectionDAG &DAG,
                                const RISCVSubtarget &Subtarget) {}static SDValue performSELECTCombine(SDNode *N, SelectionDAG &DAG,
                                    const RISCVSubtarget &Subtarget) {}static SDValue performBUILD_VECTORCombine(SDNode *N, SelectionDAG &DAG,
                                          const RISCVSubtarget &Subtarget,
                                          const RISCVTargetLowering &TLI) {}static SDValue performINSERT_VECTOR_ELTCombine(SDNode *N, SelectionDAG &DAG,
                                               const RISCVSubtarget &Subtarget,
                                               const RISCVTargetLowering &TLI) {}static SDValue performCONCAT_VECTORSCombine(SDNode *N, SelectionDAG &DAG,
                                            const RISCVSubtarget &Subtarget,
                                            const RISCVTargetLowering &TLI) {}static SDValue combineToVWMACC(SDNode *N, SelectionDAG &DAG,
                               const RISCVSubtarget &Subtarget) {}static bool legalizeScatterGatherIndexType(SDLoc DL, SDValue &Index,
                                           ISD::MemIndexType &IndexType,
                                           RISCVTargetLowering::DAGCombinerInfo &DCI) {}static bool matchIndexAsShuffle(EVT VT, SDValue Index, SDValue Mask,
                                SmallVector<int> &ShuffleMask) {}static bool matchIndexAsWiderOp(EVT VT, SDValue Index, SDValue Mask,
                                Align BaseAlign, const RISCVSubtarget &ST) {}static SDValue combineTruncOfSraSext(SDNode *N, SelectionDAG &DAG) {}static SDValue combineTruncToVnclip(SDNode *N, SelectionDAG &DAG,
                                    const RISCVSubtarget &Subtarget) {}SDValue RISCVTargetLowering::PerformDAGCombine(SDNode *N,
                                               DAGCombinerInfo &DCI) const {}bool RISCVTargetLowering::shouldTransformSignedTruncationCheck(
    EVT XVT, unsigned KeptBits) const {}bool RISCVTargetLowering::isDesirableToCommuteWithShift(
    const SDNode *N, CombineLevel Level) const {}bool RISCVTargetLowering::targetShrinkDemandedConstant(
    SDValue Op, const APInt &DemandedBits, const APInt &DemandedElts,
    TargetLoweringOpt &TLO) const {}static uint64_t computeGREVOrGORC(uint64_t x, unsigned ShAmt, bool IsGORC) {}void RISCVTargetLowering::computeKnownBitsForTargetNode(const SDValue Op,
                                                        KnownBits &Known,
                                                        const APInt &DemandedElts,
                                                        const SelectionDAG &DAG,
                                                        unsigned Depth) const {}unsigned RISCVTargetLowering::ComputeNumSignBitsForTargetNode(
    SDValue Op, const APInt &DemandedElts, const SelectionDAG &DAG,
    unsigned Depth) const {}bool RISCVTargetLowering::canCreateUndefOrPoisonForTargetNode(
    SDValue Op, const APInt &DemandedElts, const SelectionDAG &DAG,
    bool PoisonOnly, bool ConsiderFlags, unsigned Depth) const {}const Constant *
RISCVTargetLowering::getTargetConstantFromLoad(LoadSDNode *Ld) const {}static MachineBasicBlock *emitReadCounterWidePseudo(MachineInstr &MI,
                                                    MachineBasicBlock *BB) {}static MachineBasicBlock *emitSplitF64Pseudo(MachineInstr &MI,
                                             MachineBasicBlock *BB,
                                             const RISCVSubtarget &Subtarget) {}static MachineBasicBlock *emitBuildPairF64Pseudo(MachineInstr &MI,
                                                 MachineBasicBlock *BB,
                                                 const RISCVSubtarget &Subtarget) {}static bool isSelectPseudo(MachineInstr &MI) {}static MachineBasicBlock *emitQuietFCMP(MachineInstr &MI, MachineBasicBlock *BB,
                                        unsigned RelOpcode, unsigned EqOpcode,
                                        const RISCVSubtarget &Subtarget) {}static MachineBasicBlock *
EmitLoweredCascadedSelect(MachineInstr &First, MachineInstr &Second,
                          MachineBasicBlock *ThisMBB,
                          const RISCVSubtarget &Subtarget) {}static MachineBasicBlock *emitSelectPseudo(MachineInstr &MI,
                                           MachineBasicBlock *BB,
                                           const RISCVSubtarget &Subtarget) {}static const RISCV::RISCVMaskedPseudoInfo *
lookupMaskedIntrinsic(uint16_t MCOpcode, RISCVII::VLMUL LMul, unsigned SEW) {}static MachineBasicBlock *emitVFROUND_NOEXCEPT_MASK(MachineInstr &MI,
                                                    MachineBasicBlock *BB,
                                                    unsigned CVTXOpc) {}static MachineBasicBlock *emitFROUND(MachineInstr &MI, MachineBasicBlock *MBB,
                                     const RISCVSubtarget &Subtarget) {}MachineBasicBlock *
RISCVTargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI,
                                                 MachineBasicBlock *BB) const {}void RISCVTargetLowering::AdjustInstrPostInstrSelection(MachineInstr &MI,
                                                        SDNode *Node) const {}void RISCVTargetLowering::analyzeInputArgs(
    MachineFunction &MF, CCState &CCInfo,
    const SmallVectorImpl<ISD::InputArg> &Ins, bool IsRet,
    RISCVCCAssignFn Fn) const {}void RISCVTargetLowering::analyzeOutputArgs(
    MachineFunction &MF, CCState &CCInfo,
    const SmallVectorImpl<ISD::OutputArg> &Outs, bool IsRet,
    CallLoweringInfo *CLI, RISCVCCAssignFn Fn) const {}static SDValue convertLocVTToValVT(SelectionDAG &DAG, SDValue Val,
                                   const CCValAssign &VA, const SDLoc &DL,
                                   const RISCVSubtarget &Subtarget) {}static SDValue unpackFromRegLoc(SelectionDAG &DAG, SDValue Chain,
                                const CCValAssign &VA, const SDLoc &DL,
                                const ISD::InputArg &In,
                                const RISCVTargetLowering &TLI) {}static SDValue convertValVTToLocVT(SelectionDAG &DAG, SDValue Val,
                                   const CCValAssign &VA, const SDLoc &DL,
                                   const RISCVSubtarget &Subtarget) {}static SDValue unpackFromMemLoc(SelectionDAG &DAG, SDValue Chain,
                                const CCValAssign &VA, const SDLoc &DL) {}static SDValue unpackF64OnRV32DSoftABI(SelectionDAG &DAG, SDValue Chain,
                                       const CCValAssign &VA,
                                       const CCValAssign &HiVA,
                                       const SDLoc &DL) {}SDValue RISCVTargetLowering::LowerFormalArguments(
    SDValue Chain, CallingConv::ID CallConv, bool IsVarArg,
    const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL,
    SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {}bool RISCVTargetLowering::isEligibleForTailCallOptimization(
    CCState &CCInfo, CallLoweringInfo &CLI, MachineFunction &MF,
    const SmallVector<CCValAssign, 16> &ArgLocs) const {}static Align getPrefTypeAlign(EVT VT, SelectionDAG &DAG) {}SDValue RISCVTargetLowering::LowerCall(CallLoweringInfo &CLI,
                                       SmallVectorImpl<SDValue> &InVals) const {}bool RISCVTargetLowering::CanLowerReturn(
    CallingConv::ID CallConv, MachineFunction &MF, bool IsVarArg,
    const SmallVectorImpl<ISD::OutputArg> &Outs, LLVMContext &Context) const {}SDValue
RISCVTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
                                 bool IsVarArg,
                                 const SmallVectorImpl<ISD::OutputArg> &Outs,
                                 const SmallVectorImpl<SDValue> &OutVals,
                                 const SDLoc &DL, SelectionDAG &DAG) const {}void RISCVTargetLowering::validateCCReservedRegs(
    const SmallVectorImpl<std::pair<llvm::Register, llvm::SDValue>> &Regs,
    MachineFunction &MF) const {}bool RISCVTargetLowering::isUsedByReturnOnly(SDNode *N, SDValue &Chain) const {}bool RISCVTargetLowering::mayBeEmittedAsTailCall(const CallInst *CI) const {}const char *RISCVTargetLowering::getTargetNodeName(unsigned Opcode) const {}RISCVTargetLowering::ConstraintType
RISCVTargetLowering::getConstraintType(StringRef Constraint) const {}std::pair<unsigned, const TargetRegisterClass *>
RISCVTargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
                                                  StringRef Constraint,
                                                  MVT VT) const {}InlineAsm::ConstraintCode
RISCVTargetLowering::getInlineAsmMemConstraint(StringRef ConstraintCode) const {}void RISCVTargetLowering::LowerAsmOperandForConstraint(
    SDValue Op, StringRef Constraint, std::vector<SDValue> &Ops,
    SelectionDAG &DAG) const {}Instruction *RISCVTargetLowering::emitLeadingFence(IRBuilderBase &Builder,
                                                   Instruction *Inst,
                                                   AtomicOrdering Ord) const {}Instruction *RISCVTargetLowering::emitTrailingFence(IRBuilderBase &Builder,
                                                    Instruction *Inst,
                                                    AtomicOrdering Ord) const {}TargetLowering::AtomicExpansionKind
RISCVTargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const {}static Intrinsic::ID
getIntrinsicForMaskedAtomicRMWBinOp(unsigned XLen, AtomicRMWInst::BinOp BinOp) {}Value *RISCVTargetLowering::emitMaskedAtomicRMWIntrinsic(
    IRBuilderBase &Builder, AtomicRMWInst *AI, Value *AlignedAddr, Value *Incr,
    Value *Mask, Value *ShiftAmt, AtomicOrdering Ord) const {}TargetLowering::AtomicExpansionKind
RISCVTargetLowering::shouldExpandAtomicCmpXchgInIR(
    AtomicCmpXchgInst *CI) const {}Value *RISCVTargetLowering::emitMaskedAtomicCmpXchgIntrinsic(
    IRBuilderBase &Builder, AtomicCmpXchgInst *CI, Value *AlignedAddr,
    Value *CmpVal, Value *NewVal, Value *Mask, AtomicOrdering Ord) const {}bool RISCVTargetLowering::shouldRemoveExtendFromGSIndex(SDValue Extend,
                                                        EVT DataVT) const {}bool RISCVTargetLowering::shouldConvertFpToSat(unsigned Op, EVT FPVT,
                                               EVT VT) const {}unsigned RISCVTargetLowering::getJumpTableEncoding() const {}const MCExpr *RISCVTargetLowering::LowerCustomJumpTableEntry(
    const MachineJumpTableInfo *MJTI, const MachineBasicBlock *MBB,
    unsigned uid, MCContext &Ctx) const {}bool RISCVTargetLowering::isVScaleKnownToBeAPowerOfTwo() const {}bool RISCVTargetLowering::getIndexedAddressParts(SDNode *Op, SDValue &Base,
                                                 SDValue &Offset,
                                                 ISD::MemIndexedMode &AM,
                                                 SelectionDAG &DAG) const {}bool RISCVTargetLowering::getPreIndexedAddressParts(SDNode *N, SDValue &Base,
                                                    SDValue &Offset,
                                                    ISD::MemIndexedMode &AM,
                                                    SelectionDAG &DAG) const {}bool RISCVTargetLowering::getPostIndexedAddressParts(SDNode *N, SDNode *Op,
                                                     SDValue &Base,
                                                     SDValue &Offset,
                                                     ISD::MemIndexedMode &AM,
                                                     SelectionDAG &DAG) const {}bool RISCVTargetLowering::isFMAFasterThanFMulAndFAdd(const MachineFunction &MF,
                                                     EVT VT) const {}ISD::NodeType RISCVTargetLowering::getExtendForAtomicCmpSwapArg() const {}Register RISCVTargetLowering::getExceptionPointerRegister(
    const Constant *PersonalityFn) const {}Register RISCVTargetLowering::getExceptionSelectorRegister(
    const Constant *PersonalityFn) const {}bool RISCVTargetLowering::shouldExtendTypeInLibCall(EVT Type) const {}bool RISCVTargetLowering::shouldSignExtendTypeInLibCall(EVT Type, bool IsSigned) const {}bool RISCVTargetLowering::decomposeMulByConstant(LLVMContext &Context, EVT VT,
                                                 SDValue C) const {}bool RISCVTargetLowering::isMulAddWithConstProfitable(SDValue AddNode,
                                                      SDValue ConstNode) const {}bool RISCVTargetLowering::allowsMisalignedMemoryAccesses(
    EVT VT, unsigned AddrSpace, Align Alignment, MachineMemOperand::Flags Flags,
    unsigned *Fast) const {}EVT RISCVTargetLowering::getOptimalMemOpType(const MemOp &Op,
                                             const AttributeList &FuncAttributes) const {}bool RISCVTargetLowering::splitValueIntoRegisterParts(
    SelectionDAG &DAG, const SDLoc &DL, SDValue Val, SDValue *Parts,
    unsigned NumParts, MVT PartVT, std::optional<CallingConv::ID> CC) const {}SDValue RISCVTargetLowering::joinRegisterPartsIntoValue(
    SelectionDAG &DAG, const SDLoc &DL, const SDValue *Parts, unsigned NumParts,
    MVT PartVT, EVT ValueVT, std::optional<CallingConv::ID> CC) const {}bool RISCVTargetLowering::isIntDivCheap(EVT VT, AttributeList Attr) const {}bool RISCVTargetLowering::preferScalarizeSplat(SDNode *N) const {}static Value *useTpOffset(IRBuilderBase &IRB, unsigned Offset) {}Value *RISCVTargetLowering::getIRStackGuard(IRBuilderBase &IRB) const {}bool RISCVTargetLowering::isLegalInterleavedAccessType(
    VectorType *VTy, unsigned Factor, Align Alignment, unsigned AddrSpace,
    const DataLayout &DL) const {}bool RISCVTargetLowering::isLegalStridedLoadStore(EVT DataType,
                                                  Align Alignment) const {}static const Intrinsic::ID FixedVlsegIntrIds[] =bool RISCVTargetLowering::lowerInterleavedLoad(
    LoadInst *LI, ArrayRef<ShuffleVectorInst *> Shuffles,
    ArrayRef<unsigned> Indices, unsigned Factor) const {}static const Intrinsic::ID FixedVssegIntrIds[] =bool RISCVTargetLowering::lowerInterleavedStore(StoreInst *SI,
                                                ShuffleVectorInst *SVI,
                                                unsigned Factor) const {}bool RISCVTargetLowering::lowerDeinterleaveIntrinsicToLoad(
    IntrinsicInst *DI, LoadInst *LI,
    SmallVectorImpl<Instruction *> &DeadInsts) const {}bool RISCVTargetLowering::lowerInterleaveIntrinsicToStore(
    IntrinsicInst *II, StoreInst *SI,
    SmallVectorImpl<Instruction *> &DeadInsts) const {}MachineInstr *
RISCVTargetLowering::EmitKCFICheck(MachineBasicBlock &MBB,
                                   MachineBasicBlock::instr_iterator &MBBI,
                                   const TargetInstrInfo *TII) const {}#define GET_REGISTER_MATCHER#include "RISCVGenAsmMatcher.inc"Register
RISCVTargetLowering::getRegisterByName(const char *RegName, LLT VT,
                                       const MachineFunction &MF) const {}MachineMemOperand::Flags
RISCVTargetLowering::getTargetMMOFlags(const Instruction &I) const {}MachineMemOperand::Flags
RISCVTargetLowering::getTargetMMOFlags(const MemSDNode &Node) const {}bool RISCVTargetLowering::areTwoSDNodeTargetMMOFlagsMergeable(
    const MemSDNode &NodeX, const MemSDNode &NodeY) const {}bool RISCVTargetLowering::isCtpopFast(EVT VT) const {}unsigned RISCVTargetLowering::getCustomCtpopCost(EVT VT,
                                                 ISD::CondCode Cond) const {}bool RISCVTargetLowering::fallBackToDAGISel(const Instruction &Inst) const {}SDValue
RISCVTargetLowering::BuildSDIVPow2(SDNode *N, const APInt &Divisor,
                                   SelectionDAG &DAG,
                                   SmallVectorImpl<SDNode *> &Created) const {}bool RISCVTargetLowering::shouldFoldSelectWithSingleBitTest(
    EVT VT, const APInt &AndMask) const {}unsigned RISCVTargetLowering::getMinimumJumpTableEntries() const {}SDValue RISCVTargetLowering::expandIndirectJTBranch(const SDLoc &dl,
                                                    SDValue Value, SDValue Addr,
                                                    int JTI,
                                                    SelectionDAG &DAG) const {}#define GET_RISCVVIntrinsicsTable_IMPL#include "RISCVGenSearchableTables.inc"