llvm/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp

//=- LoongArchISelLowering.cpp - LoongArch DAG Lowering Implementation  ---===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines the interfaces that LoongArch uses to lower LLVM code into
// a selection DAG.
//
//===----------------------------------------------------------------------===//

#include "LoongArchISelLowering.h"
#include "LoongArch.h"
#include "LoongArchMachineFunctionInfo.h"
#include "LoongArchRegisterInfo.h"
#include "LoongArchSubtarget.h"
#include "LoongArchTargetMachine.h"
#include "MCTargetDesc/LoongArchBaseInfo.h"
#include "MCTargetDesc/LoongArchMCTargetDesc.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/CodeGen/ISDOpcodes.h"
#include "llvm/CodeGen/RuntimeLibcallUtil.h"
#include "llvm/CodeGen/SelectionDAGNodes.h"
#include "llvm/IR/IRBuilder.h"
#include "llvm/IR/IntrinsicInst.h"
#include "llvm/IR/IntrinsicsLoongArch.h"
#include "llvm/Support/CodeGen.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/KnownBits.h"
#include "llvm/Support/MathExtras.h"

usingnamespacellvm;

#define DEBUG_TYPE

STATISTIC(NumTailCalls, "Number of tail calls");

static cl::opt<bool> ZeroDivCheck("loongarch-check-zero-division", cl::Hidden,
                                  cl::desc("Trap on integer division by zero."),
                                  cl::init(false));

LoongArchTargetLowering::LoongArchTargetLowering(const TargetMachine &TM,
                                                 const LoongArchSubtarget &STI)
    :{}

bool LoongArchTargetLowering::isOffsetFoldingLegal(
    const GlobalAddressSDNode *GA) const {}

SDValue LoongArchTargetLowering::LowerOperation(SDValue Op,
                                                SelectionDAG &DAG) const {}

/// Determine whether a range fits a regular pattern of values.
/// This function accounts for the possibility of jumping over the End iterator.
template <typename ValType>
static bool
fitsRegularPattern(typename SmallVectorImpl<ValType>::const_iterator Begin,
                   unsigned CheckStride,
                   typename SmallVectorImpl<ValType>::const_iterator End,
                   ValType ExpectedIndex, unsigned ExpectedIndexStride) {}

/// Lower VECTOR_SHUFFLE into VREPLVEI (if possible).
///
/// VREPLVEI performs vector broadcast based on an element specified by an
/// integer immediate, with its mask being similar to:
///   <x, x, x, ...>
/// where x is any valid index.
///
/// When undef's appear in the mask they are treated as if they were whatever
/// value is necessary in order to fit the above form.
static SDValue lowerVECTOR_SHUFFLE_VREPLVEI(const SDLoc &DL, ArrayRef<int> Mask,
                                            MVT VT, SDValue V1, SDValue V2,
                                            SelectionDAG &DAG) {}

/// Lower VECTOR_SHUFFLE into VSHUF4I (if possible).
///
/// VSHUF4I splits the vector into blocks of four elements, then shuffles these
/// elements according to a <4 x i2> constant (encoded as an integer immediate).
///
/// It is therefore possible to lower into VSHUF4I when the mask takes the form:
///   <a, b, c, d, a+4, b+4, c+4, d+4, a+8, b+8, c+8, d+8, ...>
/// When undef's appear they are treated as if they were whatever value is
/// necessary in order to fit the above forms.
///
/// For example:
///   %2 = shufflevector <8 x i16> %0, <8 x i16> undef,
///                      <8 x i32> <i32 3, i32 2, i32 1, i32 0,
///                                 i32 7, i32 6, i32 5, i32 4>
/// is lowered to:
///   (VSHUF4I_H $v0, $v1, 27)
/// where the 27 comes from:
///   3 + (2 << 2) + (1 << 4) + (0 << 6)
static SDValue lowerVECTOR_SHUFFLE_VSHUF4I(const SDLoc &DL, ArrayRef<int> Mask,
                                           MVT VT, SDValue V1, SDValue V2,
                                           SelectionDAG &DAG) {}

/// Lower VECTOR_SHUFFLE into VPACKEV (if possible).
///
/// VPACKEV interleaves the even elements from each vector.
///
/// It is possible to lower into VPACKEV when the mask consists of two of the
/// following forms interleaved:
///   <0, 2, 4, ...>
///   <n, n+2, n+4, ...>
/// where n is the number of elements in the vector.
/// For example:
///   <0, 0, 2, 2, 4, 4, ...>
///   <0, n, 2, n+2, 4, n+4, ...>
///
/// When undef's appear in the mask they are treated as if they were whatever
/// value is necessary in order to fit the above forms.
static SDValue lowerVECTOR_SHUFFLE_VPACKEV(const SDLoc &DL, ArrayRef<int> Mask,
                                           MVT VT, SDValue V1, SDValue V2,
                                           SelectionDAG &DAG) {}

/// Lower VECTOR_SHUFFLE into VPACKOD (if possible).
///
/// VPACKOD interleaves the odd elements from each vector.
///
/// It is possible to lower into VPACKOD when the mask consists of two of the
/// following forms interleaved:
///   <1, 3, 5, ...>
///   <n+1, n+3, n+5, ...>
/// where n is the number of elements in the vector.
/// For example:
///   <1, 1, 3, 3, 5, 5, ...>
///   <1, n+1, 3, n+3, 5, n+5, ...>
///
/// When undef's appear in the mask they are treated as if they were whatever
/// value is necessary in order to fit the above forms.
static SDValue lowerVECTOR_SHUFFLE_VPACKOD(const SDLoc &DL, ArrayRef<int> Mask,
                                           MVT VT, SDValue V1, SDValue V2,
                                           SelectionDAG &DAG) {}

/// Lower VECTOR_SHUFFLE into VILVH (if possible).
///
/// VILVH interleaves consecutive elements from the left (highest-indexed) half
/// of each vector.
///
/// It is possible to lower into VILVH when the mask consists of two of the
/// following forms interleaved:
///   <x, x+1, x+2, ...>
///   <n+x, n+x+1, n+x+2, ...>
/// where n is the number of elements in the vector and x is half n.
/// For example:
///   <x, x, x+1, x+1, x+2, x+2, ...>
///   <x, n+x, x+1, n+x+1, x+2, n+x+2, ...>
///
/// When undef's appear in the mask they are treated as if they were whatever
/// value is necessary in order to fit the above forms.
static SDValue lowerVECTOR_SHUFFLE_VILVH(const SDLoc &DL, ArrayRef<int> Mask,
                                         MVT VT, SDValue V1, SDValue V2,
                                         SelectionDAG &DAG) {}

/// Lower VECTOR_SHUFFLE into VILVL (if possible).
///
/// VILVL interleaves consecutive elements from the right (lowest-indexed) half
/// of each vector.
///
/// It is possible to lower into VILVL when the mask consists of two of the
/// following forms interleaved:
///   <0, 1, 2, ...>
///   <n, n+1, n+2, ...>
/// where n is the number of elements in the vector.
/// For example:
///   <0, 0, 1, 1, 2, 2, ...>
///   <0, n, 1, n+1, 2, n+2, ...>
///
/// When undef's appear in the mask they are treated as if they were whatever
/// value is necessary in order to fit the above forms.
static SDValue lowerVECTOR_SHUFFLE_VILVL(const SDLoc &DL, ArrayRef<int> Mask,
                                         MVT VT, SDValue V1, SDValue V2,
                                         SelectionDAG &DAG) {}

/// Lower VECTOR_SHUFFLE into VPICKEV (if possible).
///
/// VPICKEV copies the even elements of each vector into the result vector.
///
/// It is possible to lower into VPICKEV when the mask consists of two of the
/// following forms concatenated:
///   <0, 2, 4, ...>
///   <n, n+2, n+4, ...>
/// where n is the number of elements in the vector.
/// For example:
///   <0, 2, 4, ..., 0, 2, 4, ...>
///   <0, 2, 4, ..., n, n+2, n+4, ...>
///
/// When undef's appear in the mask they are treated as if they were whatever
/// value is necessary in order to fit the above forms.
static SDValue lowerVECTOR_SHUFFLE_VPICKEV(const SDLoc &DL, ArrayRef<int> Mask,
                                           MVT VT, SDValue V1, SDValue V2,
                                           SelectionDAG &DAG) {}

/// Lower VECTOR_SHUFFLE into VPICKOD (if possible).
///
/// VPICKOD copies the odd elements of each vector into the result vector.
///
/// It is possible to lower into VPICKOD when the mask consists of two of the
/// following forms concatenated:
///   <1, 3, 5, ...>
///   <n+1, n+3, n+5, ...>
/// where n is the number of elements in the vector.
/// For example:
///   <1, 3, 5, ..., 1, 3, 5, ...>
///   <1, 3, 5, ..., n+1, n+3, n+5, ...>
///
/// When undef's appear in the mask they are treated as if they were whatever
/// value is necessary in order to fit the above forms.
static SDValue lowerVECTOR_SHUFFLE_VPICKOD(const SDLoc &DL, ArrayRef<int> Mask,
                                           MVT VT, SDValue V1, SDValue V2,
                                           SelectionDAG &DAG) {}

/// Lower VECTOR_SHUFFLE into VSHUF.
///
/// This mostly consists of converting the shuffle mask into a BUILD_VECTOR and
/// adding it as an operand to the resulting VSHUF.
static SDValue lowerVECTOR_SHUFFLE_VSHUF(const SDLoc &DL, ArrayRef<int> Mask,
                                         MVT VT, SDValue V1, SDValue V2,
                                         SelectionDAG &DAG) {}

/// Dispatching routine to lower various 128-bit LoongArch vector shuffles.
///
/// This routine breaks down the specific type of 128-bit shuffle and
/// dispatches to the lowering routines accordingly.
static SDValue lower128BitShuffle(const SDLoc &DL, ArrayRef<int> Mask, MVT VT,
                                  SDValue V1, SDValue V2, SelectionDAG &DAG) {}

/// Lower VECTOR_SHUFFLE into XVREPLVEI (if possible).
///
/// It is a XVREPLVEI when the mask is:
///   <x, x, x, ..., x+n, x+n, x+n, ...>
/// where the number of x is equal to n and n is half the length of vector.
///
/// When undef's appear in the mask they are treated as if they were whatever
/// value is necessary in order to fit the above form.
static SDValue lowerVECTOR_SHUFFLE_XVREPLVEI(const SDLoc &DL,
                                             ArrayRef<int> Mask, MVT VT,
                                             SDValue V1, SDValue V2,
                                             SelectionDAG &DAG) {}

/// Lower VECTOR_SHUFFLE into XVSHUF4I (if possible).
static SDValue lowerVECTOR_SHUFFLE_XVSHUF4I(const SDLoc &DL, ArrayRef<int> Mask,
                                            MVT VT, SDValue V1, SDValue V2,
                                            SelectionDAG &DAG) {}

/// Lower VECTOR_SHUFFLE into XVPACKEV (if possible).
static SDValue lowerVECTOR_SHUFFLE_XVPACKEV(const SDLoc &DL, ArrayRef<int> Mask,
                                            MVT VT, SDValue V1, SDValue V2,
                                            SelectionDAG &DAG) {}

/// Lower VECTOR_SHUFFLE into XVPACKOD (if possible).
static SDValue lowerVECTOR_SHUFFLE_XVPACKOD(const SDLoc &DL, ArrayRef<int> Mask,
                                            MVT VT, SDValue V1, SDValue V2,
                                            SelectionDAG &DAG) {}

/// Lower VECTOR_SHUFFLE into XVILVH (if possible).
static SDValue lowerVECTOR_SHUFFLE_XVILVH(const SDLoc &DL, ArrayRef<int> Mask,
                                          MVT VT, SDValue V1, SDValue V2,
                                          SelectionDAG &DAG) {}

/// Lower VECTOR_SHUFFLE into XVILVL (if possible).
static SDValue lowerVECTOR_SHUFFLE_XVILVL(const SDLoc &DL, ArrayRef<int> Mask,
                                          MVT VT, SDValue V1, SDValue V2,
                                          SelectionDAG &DAG) {}

/// Lower VECTOR_SHUFFLE into XVPICKEV (if possible).
static SDValue lowerVECTOR_SHUFFLE_XVPICKEV(const SDLoc &DL, ArrayRef<int> Mask,
                                            MVT VT, SDValue V1, SDValue V2,
                                            SelectionDAG &DAG) {}

/// Lower VECTOR_SHUFFLE into XVPICKOD (if possible).
static SDValue lowerVECTOR_SHUFFLE_XVPICKOD(const SDLoc &DL, ArrayRef<int> Mask,
                                            MVT VT, SDValue V1, SDValue V2,
                                            SelectionDAG &DAG) {}

/// Lower VECTOR_SHUFFLE into XVSHUF (if possible).
static SDValue lowerVECTOR_SHUFFLE_XVSHUF(const SDLoc &DL, ArrayRef<int> Mask,
                                          MVT VT, SDValue V1, SDValue V2,
                                          SelectionDAG &DAG) {}

/// Shuffle vectors by lane to generate more optimized instructions.
/// 256-bit shuffles are always considered as 2-lane 128-bit shuffles.
///
/// Therefore, except for the following four cases, other cases are regarded
/// as cross-lane shuffles, where optimization is relatively limited.
///
/// - Shuffle high, low lanes of two inputs vector
///   <0, 1, 2, 3> + <4, 5, 6, 7> --- <0, 5, 3, 6>
/// - Shuffle low, high lanes of two inputs vector
///   <0, 1, 2, 3> + <4, 5, 6, 7> --- <3, 6, 0, 5>
/// - Shuffle low, low lanes of two inputs vector
///   <0, 1, 2, 3> + <4, 5, 6, 7> --- <3, 6, 3, 6>
/// - Shuffle high, high lanes of two inputs vector
///   <0, 1, 2, 3> + <4, 5, 6, 7> --- <0, 5, 0, 5>
///
/// The first case is the closest to LoongArch instructions and the other
/// cases need to be converted to it for processing.
///
/// This function may modify V1, V2 and Mask
static void canonicalizeShuffleVectorByLane(const SDLoc &DL,
                                            MutableArrayRef<int> Mask, MVT VT,
                                            SDValue &V1, SDValue &V2,
                                            SelectionDAG &DAG) {}

/// Dispatching routine to lower various 256-bit LoongArch vector shuffles.
///
/// This routine breaks down the specific type of 256-bit shuffle and
/// dispatches to the lowering routines accordingly.
static SDValue lower256BitShuffle(const SDLoc &DL, ArrayRef<int> Mask, MVT VT,
                                  SDValue V1, SDValue V2, SelectionDAG &DAG) {}

SDValue LoongArchTargetLowering::lowerVECTOR_SHUFFLE(SDValue Op,
                                                     SelectionDAG &DAG) const {}

static bool isConstantOrUndef(const SDValue Op) {}

static bool isConstantOrUndefBUILD_VECTOR(const BuildVectorSDNode *Op) {}

SDValue LoongArchTargetLowering::lowerBUILD_VECTOR(SDValue Op,
                                                   SelectionDAG &DAG) const {}

SDValue
LoongArchTargetLowering::lowerEXTRACT_VECTOR_ELT(SDValue Op,
                                                 SelectionDAG &DAG) const {}

SDValue
LoongArchTargetLowering::lowerINSERT_VECTOR_ELT(SDValue Op,
                                                SelectionDAG &DAG) const {}

SDValue LoongArchTargetLowering::lowerATOMIC_FENCE(SDValue Op,
                                                   SelectionDAG &DAG) const {}

SDValue LoongArchTargetLowering::lowerWRITE_REGISTER(SDValue Op,
                                                     SelectionDAG &DAG) const {}

SDValue LoongArchTargetLowering::lowerFRAMEADDR(SDValue Op,
                                                SelectionDAG &DAG) const {}

SDValue LoongArchTargetLowering::lowerRETURNADDR(SDValue Op,
                                                 SelectionDAG &DAG) const {}

SDValue LoongArchTargetLowering::lowerEH_DWARF_CFA(SDValue Op,
                                                   SelectionDAG &DAG) const {}

SDValue LoongArchTargetLowering::lowerVASTART(SDValue Op,
                                              SelectionDAG &DAG) const {}

SDValue LoongArchTargetLowering::lowerUINT_TO_FP(SDValue Op,
                                                 SelectionDAG &DAG) const {}

SDValue LoongArchTargetLowering::lowerSINT_TO_FP(SDValue Op,
                                                 SelectionDAG &DAG) const {}

SDValue LoongArchTargetLowering::lowerBITCAST(SDValue Op,
                                              SelectionDAG &DAG) const {}

SDValue LoongArchTargetLowering::lowerFP_TO_SINT(SDValue Op,
                                                 SelectionDAG &DAG) const {}

static SDValue getTargetNode(GlobalAddressSDNode *N, SDLoc DL, EVT Ty,
                             SelectionDAG &DAG, unsigned Flags) {}

static SDValue getTargetNode(BlockAddressSDNode *N, SDLoc DL, EVT Ty,
                             SelectionDAG &DAG, unsigned Flags) {}

static SDValue getTargetNode(ConstantPoolSDNode *N, SDLoc DL, EVT Ty,
                             SelectionDAG &DAG, unsigned Flags) {}

static SDValue getTargetNode(JumpTableSDNode *N, SDLoc DL, EVT Ty,
                             SelectionDAG &DAG, unsigned Flags) {}

template <class NodeTy>
SDValue LoongArchTargetLowering::getAddr(NodeTy *N, SelectionDAG &DAG,
                                         CodeModel::Model M,
                                         bool IsLocal) const {}

SDValue LoongArchTargetLowering::lowerBlockAddress(SDValue Op,
                                                   SelectionDAG &DAG) const {}

SDValue LoongArchTargetLowering::lowerJumpTable(SDValue Op,
                                                SelectionDAG &DAG) const {}

SDValue LoongArchTargetLowering::lowerConstantPool(SDValue Op,
                                                   SelectionDAG &DAG) const {}

SDValue LoongArchTargetLowering::lowerGlobalAddress(SDValue Op,
                                                    SelectionDAG &DAG) const {}

SDValue LoongArchTargetLowering::getStaticTLSAddr(GlobalAddressSDNode *N,
                                                  SelectionDAG &DAG,
                                                  unsigned Opc, bool UseGOT,
                                                  bool Large) const {}

SDValue LoongArchTargetLowering::getDynamicTLSAddr(GlobalAddressSDNode *N,
                                                   SelectionDAG &DAG,
                                                   unsigned Opc,
                                                   bool Large) const {}

SDValue LoongArchTargetLowering::getTLSDescAddr(GlobalAddressSDNode *N,
                                                SelectionDAG &DAG, unsigned Opc,
                                                bool Large) const {}

SDValue
LoongArchTargetLowering::lowerGlobalTLSAddress(SDValue Op,
                                               SelectionDAG &DAG) const {}

template <unsigned N>
static SDValue checkIntrinsicImmArg(SDValue Op, unsigned ImmOp,
                                    SelectionDAG &DAG, bool IsSigned = false) {}

SDValue
LoongArchTargetLowering::lowerINTRINSIC_WO_CHAIN(SDValue Op,
                                                 SelectionDAG &DAG) const {}

// Helper function that emits error message for intrinsics with chain and return
// merge values of a UNDEF and the chain.
static SDValue emitIntrinsicWithChainErrorMessage(SDValue Op,
                                                  StringRef ErrorMsg,
                                                  SelectionDAG &DAG) {}

SDValue
LoongArchTargetLowering::lowerINTRINSIC_W_CHAIN(SDValue Op,
                                                SelectionDAG &DAG) const {}

// Helper function that emits error message for intrinsics with void return
// value and return the chain.
static SDValue emitIntrinsicErrorMessage(SDValue Op, StringRef ErrorMsg,
                                         SelectionDAG &DAG) {}

SDValue LoongArchTargetLowering::lowerINTRINSIC_VOID(SDValue Op,
                                                     SelectionDAG &DAG) const {}

SDValue LoongArchTargetLowering::lowerShiftLeftParts(SDValue Op,
                                                     SelectionDAG &DAG) const {}

SDValue LoongArchTargetLowering::lowerShiftRightParts(SDValue Op,
                                                      SelectionDAG &DAG,
                                                      bool IsSRA) const {}

// Returns the opcode of the target-specific SDNode that implements the 32-bit
// form of the given Opcode.
static LoongArchISD::NodeType getLoongArchWOpcode(unsigned Opcode) {}

// Converts the given i8/i16/i32 operation to a target-specific SelectionDAG
// node. Because i8/i16/i32 isn't a legal type for LA64, these operations would
// otherwise be promoted to i64, making it difficult to select the
// SLL_W/.../*W later one because the fact the operation was originally of
// type i8/i16/i32 is lost.
static SDValue customLegalizeToWOp(SDNode *N, SelectionDAG &DAG, int NumOp,
                                   unsigned ExtOpc = ISD::ANY_EXTEND) {}

// Converts the given 32-bit operation to a i64 operation with signed extension
// semantic to reduce the signed extension instructions.
static SDValue customLegalizeToWOpWithSExt(SDNode *N, SelectionDAG &DAG) {}

// Helper function that emits error message for intrinsics with/without chain
// and return a UNDEF or and the chain as the results.
static void emitErrorAndReplaceIntrinsicResults(
    SDNode *N, SmallVectorImpl<SDValue> &Results, SelectionDAG &DAG,
    StringRef ErrorMsg, bool WithChain = true) {}

template <unsigned N>
static void
replaceVPICKVE2GRResults(SDNode *Node, SmallVectorImpl<SDValue> &Results,
                         SelectionDAG &DAG, const LoongArchSubtarget &Subtarget,
                         unsigned ResOp) {}

static void replaceVecCondBranchResults(SDNode *N,
                                        SmallVectorImpl<SDValue> &Results,
                                        SelectionDAG &DAG,
                                        const LoongArchSubtarget &Subtarget,
                                        unsigned ResOp) {}

static void
replaceINTRINSIC_WO_CHAINResults(SDNode *N, SmallVectorImpl<SDValue> &Results,
                                 SelectionDAG &DAG,
                                 const LoongArchSubtarget &Subtarget) {}

void LoongArchTargetLowering::ReplaceNodeResults(
    SDNode *N, SmallVectorImpl<SDValue> &Results, SelectionDAG &DAG) const {}

static SDValue performANDCombine(SDNode *N, SelectionDAG &DAG,
                                 TargetLowering::DAGCombinerInfo &DCI,
                                 const LoongArchSubtarget &Subtarget) {}

static SDValue performSRLCombine(SDNode *N, SelectionDAG &DAG,
                                 TargetLowering::DAGCombinerInfo &DCI,
                                 const LoongArchSubtarget &Subtarget) {}

static SDValue performORCombine(SDNode *N, SelectionDAG &DAG,
                                TargetLowering::DAGCombinerInfo &DCI,
                                const LoongArchSubtarget &Subtarget) {}

static bool checkValueWidth(SDValue V, ISD::LoadExtType &ExtType) {}

// Eliminate redundant truncation and zero-extension nodes.
// * Case 1:
//  +------------+ +------------+ +------------+
//  |   Input1   | |   Input2   | |     CC     |
//  +------------+ +------------+ +------------+
//         |              |              |
//         V              V              +----+
//  +------------+ +------------+             |
//  |  TRUNCATE  | |  TRUNCATE  |             |
//  +------------+ +------------+             |
//         |              |                   |
//         V              V                   |
//  +------------+ +------------+             |
//  |  ZERO_EXT  | |  ZERO_EXT  |             |
//  +------------+ +------------+             |
//         |              |                   |
//         |              +-------------+     |
//         V              V             |     |
//        +----------------+            |     |
//        |      AND       |            |     |
//        +----------------+            |     |
//                |                     |     |
//                +---------------+     |     |
//                                |     |     |
//                                V     V     V
//                               +-------------+
//                               |     CMP     |
//                               +-------------+
// * Case 2:
//  +------------+ +------------+ +-------------+ +------------+ +------------+
//  |   Input1   | |   Input2   | | Constant -1 | | Constant 0 | |     CC     |
//  +------------+ +------------+ +-------------+ +------------+ +------------+
//         |              |             |               |               |
//         V              |             |               |               |
//  +------------+        |             |               |               |
//  |     XOR    |<---------------------+               |               |
//  +------------+        |                             |               |
//         |              |                             |               |
//         V              V             +---------------+               |
//  +------------+ +------------+       |                               |
//  |  TRUNCATE  | |  TRUNCATE  |       |     +-------------------------+
//  +------------+ +------------+       |     |
//         |              |             |     |
//         V              V             |     |
//  +------------+ +------------+       |     |
//  |  ZERO_EXT  | |  ZERO_EXT  |       |     |
//  +------------+ +------------+       |     |
//         |              |             |     |
//         V              V             |     |
//        +----------------+            |     |
//        |      AND       |            |     |
//        +----------------+            |     |
//                |                     |     |
//                +---------------+     |     |
//                                |     |     |
//                                V     V     V
//                               +-------------+
//                               |     CMP     |
//                               +-------------+
static SDValue performSETCCCombine(SDNode *N, SelectionDAG &DAG,
                                   TargetLowering::DAGCombinerInfo &DCI,
                                   const LoongArchSubtarget &Subtarget) {}

// Combine (loongarch_bitrev_w (loongarch_revb_2w X)) to loongarch_bitrev_4b.
static SDValue performBITREV_WCombine(SDNode *N, SelectionDAG &DAG,
                                      TargetLowering::DAGCombinerInfo &DCI,
                                      const LoongArchSubtarget &Subtarget) {}

template <unsigned N>
static SDValue legalizeIntrinsicImmArg(SDNode *Node, unsigned ImmOp,
                                       SelectionDAG &DAG,
                                       const LoongArchSubtarget &Subtarget,
                                       bool IsSigned = false) {}

template <unsigned N>
static SDValue lowerVectorSplatImm(SDNode *Node, unsigned ImmOp,
                                   SelectionDAG &DAG, bool IsSigned = false) {}

static SDValue truncateVecElts(SDNode *Node, SelectionDAG &DAG) {}

static SDValue lowerVectorBitClear(SDNode *Node, SelectionDAG &DAG) {}

template <unsigned N>
static SDValue lowerVectorBitClearImm(SDNode *Node, SelectionDAG &DAG) {}

template <unsigned N>
static SDValue lowerVectorBitSetImm(SDNode *Node, SelectionDAG &DAG) {}

template <unsigned N>
static SDValue lowerVectorBitRevImm(SDNode *Node, SelectionDAG &DAG) {}

static SDValue
performINTRINSIC_WO_CHAINCombine(SDNode *N, SelectionDAG &DAG,
                                 TargetLowering::DAGCombinerInfo &DCI,
                                 const LoongArchSubtarget &Subtarget) {}

SDValue LoongArchTargetLowering::PerformDAGCombine(SDNode *N,
                                                   DAGCombinerInfo &DCI) const {}

static MachineBasicBlock *insertDivByZeroTrap(MachineInstr &MI,
                                              MachineBasicBlock *MBB) {}

static MachineBasicBlock *
emitVecCondBranchPseudo(MachineInstr &MI, MachineBasicBlock *BB,
                        const LoongArchSubtarget &Subtarget) {}

static MachineBasicBlock *
emitPseudoXVINSGR2VR(MachineInstr &MI, MachineBasicBlock *BB,
                     const LoongArchSubtarget &Subtarget) {}

static MachineBasicBlock *emitPseudoCTPOP(MachineInstr &MI,
                                          MachineBasicBlock *BB,
                                          const LoongArchSubtarget &Subtarget) {}

MachineBasicBlock *LoongArchTargetLowering::EmitInstrWithCustomInserter(
    MachineInstr &MI, MachineBasicBlock *BB) const {}

bool LoongArchTargetLowering::allowsMisalignedMemoryAccesses(
    EVT VT, unsigned AddrSpace, Align Alignment, MachineMemOperand::Flags Flags,
    unsigned *Fast) const {}

const char *LoongArchTargetLowering::getTargetNodeName(unsigned Opcode) const {}

//===----------------------------------------------------------------------===//
//                     Calling Convention Implementation
//===----------------------------------------------------------------------===//

// Eight general-purpose registers a0-a7 used for passing integer arguments,
// with a0-a1 reused to return values. Generally, the GPRs are used to pass
// fixed-point arguments, and floating-point arguments when no FPR is available
// or with soft float ABI.
const MCPhysReg ArgGPRs[] =;
// Eight floating-point registers fa0-fa7 used for passing floating-point
// arguments, and fa0-fa1 are also used to return values.
const MCPhysReg ArgFPR32s[] =;
// FPR32 and FPR64 alias each other.
const MCPhysReg ArgFPR64s[] =;

const MCPhysReg ArgVRs[] =;

const MCPhysReg ArgXRs[] =;

// Pass a 2*GRLen argument that has been split into two GRLen values through
// registers or the stack as necessary.
static bool CC_LoongArchAssign2GRLen(unsigned GRLen, CCState &State,
                                     CCValAssign VA1, ISD::ArgFlagsTy ArgFlags1,
                                     unsigned ValNo2, MVT ValVT2, MVT LocVT2,
                                     ISD::ArgFlagsTy ArgFlags2) {}

// Implements the LoongArch calling convention. Returns true upon failure.
static bool CC_LoongArch(const DataLayout &DL, LoongArchABI::ABI ABI,
                         unsigned ValNo, MVT ValVT,
                         CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags,
                         CCState &State, bool IsFixed, bool IsRet,
                         Type *OrigTy) {}

void LoongArchTargetLowering::analyzeInputArgs(
    MachineFunction &MF, CCState &CCInfo,
    const SmallVectorImpl<ISD::InputArg> &Ins, bool IsRet,
    LoongArchCCAssignFn Fn) const {}

void LoongArchTargetLowering::analyzeOutputArgs(
    MachineFunction &MF, CCState &CCInfo,
    const SmallVectorImpl<ISD::OutputArg> &Outs, bool IsRet,
    CallLoweringInfo *CLI, LoongArchCCAssignFn Fn) const {}

// Convert Val to a ValVT. Should not be called for CCValAssign::Indirect
// values.
static SDValue convertLocVTToValVT(SelectionDAG &DAG, SDValue Val,
                                   const CCValAssign &VA, const SDLoc &DL) {}

static SDValue unpackFromRegLoc(SelectionDAG &DAG, SDValue Chain,
                                const CCValAssign &VA, const SDLoc &DL,
                                const ISD::InputArg &In,
                                const LoongArchTargetLowering &TLI) {}

// The caller is responsible for loading the full value if the argument is
// passed with CCValAssign::Indirect.
static SDValue unpackFromMemLoc(SelectionDAG &DAG, SDValue Chain,
                                const CCValAssign &VA, const SDLoc &DL) {}

static SDValue convertValVTToLocVT(SelectionDAG &DAG, SDValue Val,
                                   const CCValAssign &VA, const SDLoc &DL) {}

static bool CC_LoongArch_GHC(unsigned ValNo, MVT ValVT, MVT LocVT,
                             CCValAssign::LocInfo LocInfo,
                             ISD::ArgFlagsTy ArgFlags, CCState &State) {}

// Transform physical registers into virtual registers.
SDValue LoongArchTargetLowering::LowerFormalArguments(
    SDValue Chain, CallingConv::ID CallConv, bool IsVarArg,
    const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL,
    SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {}

bool LoongArchTargetLowering::mayBeEmittedAsTailCall(const CallInst *CI) const {}

// Check if the return value is used as only a return value, as otherwise
// we can't perform a tail-call.
bool LoongArchTargetLowering::isUsedByReturnOnly(SDNode *N,
                                                 SDValue &Chain) const {}

// Check whether the call is eligible for tail call optimization.
bool LoongArchTargetLowering::isEligibleForTailCallOptimization(
    CCState &CCInfo, CallLoweringInfo &CLI, MachineFunction &MF,
    const SmallVectorImpl<CCValAssign> &ArgLocs) const {}

static Align getPrefTypeAlign(EVT VT, SelectionDAG &DAG) {}

// Lower a call to a callseq_start + CALL + callseq_end chain, and add input
// and output parameter nodes.
SDValue
LoongArchTargetLowering::LowerCall(CallLoweringInfo &CLI,
                                   SmallVectorImpl<SDValue> &InVals) const {}

bool LoongArchTargetLowering::CanLowerReturn(
    CallingConv::ID CallConv, MachineFunction &MF, bool IsVarArg,
    const SmallVectorImpl<ISD::OutputArg> &Outs, LLVMContext &Context) const {}

SDValue LoongArchTargetLowering::LowerReturn(
    SDValue Chain, CallingConv::ID CallConv, bool IsVarArg,
    const SmallVectorImpl<ISD::OutputArg> &Outs,
    const SmallVectorImpl<SDValue> &OutVals, const SDLoc &DL,
    SelectionDAG &DAG) const {}

bool LoongArchTargetLowering::isFPImmVLDILegal(const APFloat &Imm,
                                               EVT VT) const {}

bool LoongArchTargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT,
                                           bool ForCodeSize) const {}

bool LoongArchTargetLowering::isCheapToSpeculateCttz(Type *) const {}

bool LoongArchTargetLowering::isCheapToSpeculateCtlz(Type *) const {}

bool LoongArchTargetLowering::shouldInsertFencesForAtomic(
    const Instruction *I) const {}

EVT LoongArchTargetLowering::getSetCCResultType(const DataLayout &DL,
                                                LLVMContext &Context,
                                                EVT VT) const {}

bool LoongArchTargetLowering::hasAndNot(SDValue Y) const {}

bool LoongArchTargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
                                                 const CallInst &I,
                                                 MachineFunction &MF,
                                                 unsigned Intrinsic) const {}

TargetLowering::AtomicExpansionKind
LoongArchTargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const {}

static Intrinsic::ID
getIntrinsicForMaskedAtomicRMWBinOp(unsigned GRLen,
                                    AtomicRMWInst::BinOp BinOp) {}

TargetLowering::AtomicExpansionKind
LoongArchTargetLowering::shouldExpandAtomicCmpXchgInIR(
    AtomicCmpXchgInst *CI) const {}

Value *LoongArchTargetLowering::emitMaskedAtomicCmpXchgIntrinsic(
    IRBuilderBase &Builder, AtomicCmpXchgInst *CI, Value *AlignedAddr,
    Value *CmpVal, Value *NewVal, Value *Mask, AtomicOrdering Ord) const {}

Value *LoongArchTargetLowering::emitMaskedAtomicRMWIntrinsic(
    IRBuilderBase &Builder, AtomicRMWInst *AI, Value *AlignedAddr, Value *Incr,
    Value *Mask, Value *ShiftAmt, AtomicOrdering Ord) const {}

bool LoongArchTargetLowering::isFMAFasterThanFMulAndFAdd(
    const MachineFunction &MF, EVT VT) const {}

Register LoongArchTargetLowering::getExceptionPointerRegister(
    const Constant *PersonalityFn) const {}

Register LoongArchTargetLowering::getExceptionSelectorRegister(
    const Constant *PersonalityFn) const {}

//===----------------------------------------------------------------------===//
//                           LoongArch Inline Assembly Support
//===----------------------------------------------------------------------===//

LoongArchTargetLowering::ConstraintType
LoongArchTargetLowering::getConstraintType(StringRef Constraint) const {}

InlineAsm::ConstraintCode LoongArchTargetLowering::getInlineAsmMemConstraint(
    StringRef ConstraintCode) const {}

std::pair<unsigned, const TargetRegisterClass *>
LoongArchTargetLowering::getRegForInlineAsmConstraint(
    const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const {}

void LoongArchTargetLowering::LowerAsmOperandForConstraint(
    SDValue Op, StringRef Constraint, std::vector<SDValue> &Ops,
    SelectionDAG &DAG) const {}

#define GET_REGISTER_MATCHER
#include "LoongArchGenAsmMatcher.inc"

Register
LoongArchTargetLowering::getRegisterByName(const char *RegName, LLT VT,
                                           const MachineFunction &MF) const {}

bool LoongArchTargetLowering::decomposeMulByConstant(LLVMContext &Context,
                                                     EVT VT, SDValue C) const {}

bool LoongArchTargetLowering::isLegalAddressingMode(const DataLayout &DL,
                                                    const AddrMode &AM,
                                                    Type *Ty, unsigned AS,
                                                    Instruction *I) const {}

bool LoongArchTargetLowering::isLegalICmpImmediate(int64_t Imm) const {}

bool LoongArchTargetLowering::isLegalAddImmediate(int64_t Imm) const {}

bool LoongArchTargetLowering::isZExtFree(SDValue Val, EVT VT2) const {}

bool LoongArchTargetLowering::isSExtCheaperThanZExt(EVT SrcVT,
                                                    EVT DstVT) const {}

bool LoongArchTargetLowering::signExtendConstant(const ConstantInt *CI) const {}

bool LoongArchTargetLowering::hasAndNotCompare(SDValue Y) const {}

ISD::NodeType LoongArchTargetLowering::getExtendForAtomicCmpSwapArg() const {}

bool LoongArchTargetLowering::shouldSignExtendTypeInLibCall(
    EVT Type, bool IsSigned) const {}

bool LoongArchTargetLowering::shouldExtendTypeInLibCall(EVT Type) const {}

// memcpy, and other memory intrinsics, typically tries to use wider load/store
// if the source/dest is aligned and the copy size is large enough. We therefore
// want to align such objects passed to memory intrinsics.
bool LoongArchTargetLowering::shouldAlignPointerArgs(CallInst *CI,
                                                     unsigned &MinSize,
                                                     Align &PrefAlign) const {}