llvm/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp

//===-- HexagonISelLowering.cpp - Hexagon DAG Lowering Implementation -----===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file implements the interfaces that Hexagon uses to lower LLVM code
// into a selection DAG.
//
//===----------------------------------------------------------------------===//

#include "HexagonISelLowering.h"
#include "Hexagon.h"
#include "HexagonMachineFunctionInfo.h"
#include "HexagonRegisterInfo.h"
#include "HexagonSubtarget.h"
#include "HexagonTargetMachine.h"
#include "HexagonTargetObjectFile.h"
#include "llvm/ADT/APInt.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringSwitch.h"
#include "llvm/CodeGen/CallingConvLower.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineMemOperand.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/RuntimeLibcallUtil.h"
#include "llvm/CodeGen/SelectionDAG.h"
#include "llvm/CodeGen/TargetCallingConv.h"
#include "llvm/CodeGen/ValueTypes.h"
#include "llvm/IR/BasicBlock.h"
#include "llvm/IR/CallingConv.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/DerivedTypes.h"
#include "llvm/IR/DiagnosticInfo.h"
#include "llvm/IR/DiagnosticPrinter.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/GlobalValue.h"
#include "llvm/IR/IRBuilder.h"
#include "llvm/IR/InlineAsm.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/IntrinsicInst.h"
#include "llvm/IR/Intrinsics.h"
#include "llvm/IR/IntrinsicsHexagon.h"
#include "llvm/IR/Module.h"
#include "llvm/IR/Type.h"
#include "llvm/IR/Value.h"
#include "llvm/MC/MCRegisterInfo.h"
#include "llvm/Support/Casting.h"
#include "llvm/Support/CodeGen.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/MathExtras.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Target/TargetMachine.h"
#include <algorithm>
#include <cassert>
#include <cstddef>
#include <cstdint>
#include <limits>
#include <utility>

usingnamespacellvm;

#define DEBUG_TYPE

static cl::opt<bool> EmitJumpTables("hexagon-emit-jump-tables",
  cl::init(true), cl::Hidden,
  cl::desc("Control jump table emission on Hexagon target"));

static cl::opt<bool>
    EnableHexSDNodeSched("enable-hexagon-sdnode-sched", cl::Hidden,
                         cl::desc("Enable Hexagon SDNode scheduling"));

static cl::opt<bool> EnableFastMath("ffast-math", cl::Hidden,
                                    cl::desc("Enable Fast Math processing"));

static cl::opt<int> MinimumJumpTables("minimum-jump-tables", cl::Hidden,
                                      cl::init(5),
                                      cl::desc("Set minimum jump tables"));

static cl::opt<int>
    MaxStoresPerMemcpyCL("max-store-memcpy", cl::Hidden, cl::init(6),
                         cl::desc("Max #stores to inline memcpy"));

static cl::opt<int>
    MaxStoresPerMemcpyOptSizeCL("max-store-memcpy-Os", cl::Hidden, cl::init(4),
                                cl::desc("Max #stores to inline memcpy"));

static cl::opt<int>
    MaxStoresPerMemmoveCL("max-store-memmove", cl::Hidden, cl::init(6),
                          cl::desc("Max #stores to inline memmove"));

static cl::opt<int>
    MaxStoresPerMemmoveOptSizeCL("max-store-memmove-Os", cl::Hidden,
                                 cl::init(4),
                                 cl::desc("Max #stores to inline memmove"));

static cl::opt<int>
    MaxStoresPerMemsetCL("max-store-memset", cl::Hidden, cl::init(8),
                         cl::desc("Max #stores to inline memset"));

static cl::opt<int>
    MaxStoresPerMemsetOptSizeCL("max-store-memset-Os", cl::Hidden, cl::init(4),
                                cl::desc("Max #stores to inline memset"));

static cl::opt<bool> AlignLoads("hexagon-align-loads",
  cl::Hidden, cl::init(false),
  cl::desc("Rewrite unaligned loads as a pair of aligned loads"));

static cl::opt<bool>
    DisableArgsMinAlignment("hexagon-disable-args-min-alignment", cl::Hidden,
                            cl::init(false),
                            cl::desc("Disable minimum alignment of 1 for "
                                     "arguments passed by value on stack"));

namespace {

  class HexagonCCState : public CCState {};

} // end anonymous namespace


// Implement calling convention for Hexagon.

static bool CC_SkipOdd(unsigned &ValNo, MVT &ValVT, MVT &LocVT,
                       CCValAssign::LocInfo &LocInfo,
                       ISD::ArgFlagsTy &ArgFlags, CCState &State) {}

#include "HexagonGenCallingConv.inc"


SDValue
HexagonTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG)
      const {}

/// CreateCopyOfByValArgument - Make a copy of an aggregate at address specified
/// by "Src" to address "Dst" of size "Size".  Alignment information is
/// specified by the specific parameter attribute. The copy will be passed as
/// a byval function parameter.  Sometimes what we are copying is the end of a
/// larger object, the part that does not fit in registers.
static SDValue CreateCopyOfByValArgument(SDValue Src, SDValue Dst,
                                         SDValue Chain, ISD::ArgFlagsTy Flags,
                                         SelectionDAG &DAG, const SDLoc &dl) {}

bool
HexagonTargetLowering::CanLowerReturn(
    CallingConv::ID CallConv, MachineFunction &MF, bool IsVarArg,
    const SmallVectorImpl<ISD::OutputArg> &Outs,
    LLVMContext &Context) const {}

// LowerReturn - Lower ISD::RET. If a struct is larger than 8 bytes and is
// passed by value, the function prototype is modified to return void and
// the value is stored in memory pointed by a pointer passed by caller.
SDValue
HexagonTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
                                   bool IsVarArg,
                                   const SmallVectorImpl<ISD::OutputArg> &Outs,
                                   const SmallVectorImpl<SDValue> &OutVals,
                                   const SDLoc &dl, SelectionDAG &DAG) const {}

bool HexagonTargetLowering::mayBeEmittedAsTailCall(const CallInst *CI) const {}

Register HexagonTargetLowering::getRegisterByName(
      const char* RegName, LLT VT, const MachineFunction &) const {}

/// LowerCallResult - Lower the result values of an ISD::CALL into the
/// appropriate copies out of appropriate physical registers.  This assumes that
/// Chain/Glue are the input chain/glue to use, and that TheCall is the call
/// being lowered. Returns a SDNode with the same number of values as the
/// ISD::CALL.
SDValue HexagonTargetLowering::LowerCallResult(
    SDValue Chain, SDValue Glue, CallingConv::ID CallConv, bool IsVarArg,
    const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
    SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals,
    const SmallVectorImpl<SDValue> &OutVals, SDValue Callee) const {}

/// LowerCall - Functions arguments are copied from virtual regs to
/// (physical regs)/(stack frame), CALLSEQ_START and CALLSEQ_END are emitted.
SDValue
HexagonTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
                                 SmallVectorImpl<SDValue> &InVals) const {}

/// Returns true by value, base pointer and offset pointer and addressing
/// mode by reference if this node can be combined with a load / store to
/// form a post-indexed load / store.
bool HexagonTargetLowering::getPostIndexedAddressParts(SDNode *N, SDNode *Op,
      SDValue &Base, SDValue &Offset, ISD::MemIndexedMode &AM,
      SelectionDAG &DAG) const {}

SDValue HexagonTargetLowering::LowerFDIV(SDValue Op, SelectionDAG &DAG) const {}

SDValue
HexagonTargetLowering::LowerINLINEASM(SDValue Op, SelectionDAG &DAG) const {}

// Need to transform ISD::PREFETCH into something that doesn't inherit
// all of the properties of ISD::PREFETCH, specifically SDNPMayLoad and
// SDNPMayStore.
SDValue HexagonTargetLowering::LowerPREFETCH(SDValue Op,
                                             SelectionDAG &DAG) const {}

// Custom-handle ISD::READCYCLECOUNTER because the target-independent SDNode
// is marked as having side-effects, while the register read on Hexagon does
// not have any. TableGen refuses to accept the direct pattern from that node
// to the A4_tfrcpp.
SDValue HexagonTargetLowering::LowerREADCYCLECOUNTER(SDValue Op,
                                                     SelectionDAG &DAG) const {}

// Custom-handle ISD::READSTEADYCOUNTER because the target-independent SDNode
// is marked as having side-effects, while the register read on Hexagon does
// not have any. TableGen refuses to accept the direct pattern from that node
// to the A4_tfrcpp.
SDValue HexagonTargetLowering::LowerREADSTEADYCOUNTER(SDValue Op,
                                                      SelectionDAG &DAG) const {}

SDValue HexagonTargetLowering::LowerINTRINSIC_VOID(SDValue Op,
      SelectionDAG &DAG) const {}

SDValue
HexagonTargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op,
                                               SelectionDAG &DAG) const {}

SDValue HexagonTargetLowering::LowerFormalArguments(
    SDValue Chain, CallingConv::ID CallConv, bool IsVarArg,
    const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
    SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {}

SDValue
HexagonTargetLowering::LowerVASTART(SDValue Op, SelectionDAG &DAG) const {}

SDValue
HexagonTargetLowering::LowerVACOPY(SDValue Op, SelectionDAG &DAG) const {}

SDValue HexagonTargetLowering::LowerSETCC(SDValue Op, SelectionDAG &DAG) const {}

SDValue
HexagonTargetLowering::LowerVSELECT(SDValue Op, SelectionDAG &DAG) const {}

SDValue
HexagonTargetLowering::LowerConstantPool(SDValue Op, SelectionDAG &DAG) const {}

SDValue
HexagonTargetLowering::LowerJumpTable(SDValue Op, SelectionDAG &DAG) const {}

SDValue
HexagonTargetLowering::LowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const {}

SDValue
HexagonTargetLowering::LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const {}

SDValue
HexagonTargetLowering::LowerATOMIC_FENCE(SDValue Op, SelectionDAG& DAG) const {}

SDValue
HexagonTargetLowering::LowerGLOBALADDRESS(SDValue Op, SelectionDAG &DAG) const {}

// Specifies that for loads and stores VT can be promoted to PromotedLdStVT.
SDValue
HexagonTargetLowering::LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const {}

SDValue
HexagonTargetLowering::LowerGLOBAL_OFFSET_TABLE(SDValue Op, SelectionDAG &DAG)
      const {}

SDValue
HexagonTargetLowering::GetDynamicTLSAddr(SelectionDAG &DAG, SDValue Chain,
      GlobalAddressSDNode *GA, SDValue Glue, EVT PtrVT, unsigned ReturnReg,
      unsigned char OperandFlags) const {}

//
// Lower using the intial executable model for TLS addresses
//
SDValue
HexagonTargetLowering::LowerToTLSInitialExecModel(GlobalAddressSDNode *GA,
      SelectionDAG &DAG) const {}

//
// Lower using the local executable model for TLS addresses
//
SDValue
HexagonTargetLowering::LowerToTLSLocalExecModel(GlobalAddressSDNode *GA,
      SelectionDAG &DAG) const {}

//
// Lower using the general dynamic model for TLS addresses
//
SDValue
HexagonTargetLowering::LowerToTLSGeneralDynamicModel(GlobalAddressSDNode *GA,
      SelectionDAG &DAG) const {}

//
// Lower TLS addresses.
//
// For now for dynamic models, we only support the general dynamic model.
//
SDValue
HexagonTargetLowering::LowerGlobalTLSAddress(SDValue Op,
      SelectionDAG &DAG) const {}

//===----------------------------------------------------------------------===//
// TargetLowering Implementation
//===----------------------------------------------------------------------===//

HexagonTargetLowering::HexagonTargetLowering(const TargetMachine &TM,
                                             const HexagonSubtarget &ST)
    :{}

const char* HexagonTargetLowering::getTargetNodeName(unsigned Opcode) const {}

bool
HexagonTargetLowering::validateConstPtrAlignment(SDValue Ptr, Align NeedAlign,
      const SDLoc &dl, SelectionDAG &DAG) const {}

SDValue
HexagonTargetLowering::replaceMemWithUndef(SDValue Op, SelectionDAG &DAG)
      const {}

// Bit-reverse Load Intrinsic: Check if the instruction is a bit reverse load
// intrinsic.
static bool isBrevLdIntrinsic(const Value *Inst) {}

// Bit-reverse Load Intrinsic :Crawl up and figure out the object from previous
// instruction. So far we only handle bitcast, extract value and bit reverse
// load intrinsic instructions. Should we handle CGEP ?
static Value *getBrevLdObject(Value *V) {}

// Bit-reverse Load Intrinsic: For a PHI Node return either an incoming edge or
// a back edge. If the back edge comes from the intrinsic itself, the incoming
// edge is returned.
static Value *returnEdge(const PHINode *PN, Value *IntrBaseVal) {}

// Bit-reverse Load Intrinsic: Figure out the underlying object the base
// pointer points to, for the bit-reverse load intrinsic. Setting this to
// memoperand might help alias analysis to figure out the dependencies.
static Value *getUnderLyingObjectForBrevLdIntr(Value *V) {}

/// Given an intrinsic, checks if on the target the intrinsic will need to map
/// to a MemIntrinsicNode (touches memory). If this is the case, it returns
/// true and store the intrinsic information into the IntrinsicInfo that was
/// passed to the function.
bool HexagonTargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
                                               const CallInst &I,
                                               MachineFunction &MF,
                                               unsigned Intrinsic) const {}

bool HexagonTargetLowering::hasBitTest(SDValue X, SDValue Y) const {}

bool HexagonTargetLowering::isTruncateFree(Type *Ty1, Type *Ty2) const {}

bool HexagonTargetLowering::isTruncateFree(EVT VT1, EVT VT2) const {}

bool HexagonTargetLowering::isFMAFasterThanFMulAndFAdd(
    const MachineFunction &MF, EVT VT) const {}

// Should we expand the build vector with shuffles?
bool HexagonTargetLowering::shouldExpandBuildVectorWithShuffles(EVT VT,
      unsigned DefinedValues) const {}

bool HexagonTargetLowering::isExtractSubvectorCheap(EVT ResVT, EVT SrcVT,
      unsigned Index) const {}

bool HexagonTargetLowering::isTargetCanonicalConstantNode(SDValue Op) const {}

bool HexagonTargetLowering::isShuffleMaskLegal(ArrayRef<int> Mask,
                                               EVT VT) const {}

TargetLoweringBase::LegalizeTypeAction
HexagonTargetLowering::getPreferredVectorAction(MVT VT) const {}

TargetLoweringBase::LegalizeAction
HexagonTargetLowering::getCustomOperationAction(SDNode &Op) const {}

std::pair<SDValue, int>
HexagonTargetLowering::getBaseAndOffset(SDValue Addr) const {}

// Lower a vector shuffle (V1, V2, V3).  V1 and V2 are the two vectors
// to select data from, V3 is the permutation.
SDValue
HexagonTargetLowering::LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG)
      const {}

SDValue
HexagonTargetLowering::getSplatValue(SDValue Op, SelectionDAG &DAG) const {}

// Create a Hexagon-specific node for shifting a vector by an integer.
SDValue
HexagonTargetLowering::getVectorShiftByInt(SDValue Op, SelectionDAG &DAG)
      const {}

SDValue
HexagonTargetLowering::LowerVECTOR_SHIFT(SDValue Op, SelectionDAG &DAG) const {}

SDValue
HexagonTargetLowering::LowerROTL(SDValue Op, SelectionDAG &DAG) const {}

SDValue
HexagonTargetLowering::LowerBITCAST(SDValue Op, SelectionDAG &DAG) const {}

bool
HexagonTargetLowering::getBuildVectorConstInts(ArrayRef<SDValue> Values,
      MVT VecTy, SelectionDAG &DAG,
      MutableArrayRef<ConstantInt*> Consts) const {}

SDValue
HexagonTargetLowering::buildVector32(ArrayRef<SDValue> Elem, const SDLoc &dl,
                                     MVT VecTy, SelectionDAG &DAG) const {}

SDValue
HexagonTargetLowering::buildVector64(ArrayRef<SDValue> Elem, const SDLoc &dl,
                                     MVT VecTy, SelectionDAG &DAG) const {}

SDValue
HexagonTargetLowering::extractVector(SDValue VecV, SDValue IdxV,
                                     const SDLoc &dl, MVT ValTy, MVT ResTy,
                                     SelectionDAG &DAG) const {}

SDValue
HexagonTargetLowering::extractVectorPred(SDValue VecV, SDValue IdxV,
                                         const SDLoc &dl, MVT ValTy, MVT ResTy,
                                         SelectionDAG &DAG) const {}

SDValue
HexagonTargetLowering::insertVector(SDValue VecV, SDValue ValV, SDValue IdxV,
                                    const SDLoc &dl, MVT ValTy,
                                    SelectionDAG &DAG) const {}

SDValue
HexagonTargetLowering::insertVectorPred(SDValue VecV, SDValue ValV,
                                        SDValue IdxV, const SDLoc &dl,
                                        MVT ValTy, SelectionDAG &DAG) const {}

SDValue
HexagonTargetLowering::expandPredicate(SDValue Vec32, const SDLoc &dl,
                                       SelectionDAG &DAG) const {}

SDValue
HexagonTargetLowering::contractPredicate(SDValue Vec64, const SDLoc &dl,
                                         SelectionDAG &DAG) const {}

SDValue
HexagonTargetLowering::getZero(const SDLoc &dl, MVT Ty, SelectionDAG &DAG)
      const {}

SDValue
HexagonTargetLowering::appendUndef(SDValue Val, MVT ResTy, SelectionDAG &DAG)
      const {}

SDValue
HexagonTargetLowering::getCombine(SDValue Hi, SDValue Lo, const SDLoc &dl,
                                  MVT ResTy, SelectionDAG &DAG) const {}

SDValue
HexagonTargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const {}

SDValue
HexagonTargetLowering::LowerCONCAT_VECTORS(SDValue Op,
                                           SelectionDAG &DAG) const {}

SDValue
HexagonTargetLowering::LowerEXTRACT_VECTOR_ELT(SDValue Op,
                                               SelectionDAG &DAG) const {}

SDValue
HexagonTargetLowering::LowerEXTRACT_SUBVECTOR(SDValue Op,
                                              SelectionDAG &DAG) const {}

SDValue
HexagonTargetLowering::LowerINSERT_VECTOR_ELT(SDValue Op,
                                              SelectionDAG &DAG) const {}

SDValue
HexagonTargetLowering::LowerINSERT_SUBVECTOR(SDValue Op,
                                             SelectionDAG &DAG) const {}

bool
HexagonTargetLowering::allowTruncateForTailCall(Type *Ty1, Type *Ty2) const {}

SDValue
HexagonTargetLowering::LowerLoad(SDValue Op, SelectionDAG &DAG) const {}

SDValue
HexagonTargetLowering::LowerStore(SDValue Op, SelectionDAG &DAG) const {}

SDValue
HexagonTargetLowering::LowerUnalignedLoad(SDValue Op, SelectionDAG &DAG)
      const {}

SDValue
HexagonTargetLowering::LowerUAddSubO(SDValue Op, SelectionDAG &DAG) const {}

SDValue HexagonTargetLowering::LowerUAddSubOCarry(SDValue Op,
                                                  SelectionDAG &DAG) const {}

SDValue
HexagonTargetLowering::LowerEH_RETURN(SDValue Op, SelectionDAG &DAG) const {}

SDValue
HexagonTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {}

void
HexagonTargetLowering::LowerOperationWrapper(SDNode *N,
                                             SmallVectorImpl<SDValue> &Results,
                                             SelectionDAG &DAG) const {}

void
HexagonTargetLowering::ReplaceNodeResults(SDNode *N,
                                          SmallVectorImpl<SDValue> &Results,
                                          SelectionDAG &DAG) const {}

SDValue
HexagonTargetLowering::PerformDAGCombine(SDNode *N,
                                         DAGCombinerInfo &DCI) const {}

/// Returns relocation base for the given PIC jumptable.
SDValue
HexagonTargetLowering::getPICJumpTableRelocBase(SDValue Table,
                                                SelectionDAG &DAG) const {}

//===----------------------------------------------------------------------===//
// Inline Assembly Support
//===----------------------------------------------------------------------===//

TargetLowering::ConstraintType
HexagonTargetLowering::getConstraintType(StringRef Constraint) const {}

std::pair<unsigned, const TargetRegisterClass*>
HexagonTargetLowering::getRegForInlineAsmConstraint(
    const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const {}

/// isFPImmLegal - Returns true if the target can instruction select the
/// specified FP immediate natively. If false, the legalizer will
/// materialize the FP immediate as a load from a constant pool.
bool HexagonTargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT,
                                         bool ForCodeSize) const {}

/// isLegalAddressingMode - Return true if the addressing mode represented by
/// AM is legal for this target, for a load/store of the specified type.
bool HexagonTargetLowering::isLegalAddressingMode(const DataLayout &DL,
                                                  const AddrMode &AM, Type *Ty,
                                                  unsigned AS, Instruction *I) const {}

/// Return true if folding a constant offset with the given GlobalAddress is
/// legal.  It is frequently not legal in PIC relocation models.
bool HexagonTargetLowering::isOffsetFoldingLegal(const GlobalAddressSDNode *GA)
      const {}

/// isLegalICmpImmediate - Return true if the specified immediate is legal
/// icmp immediate, that is the target has icmp instructions which can compare
/// a register against the immediate without having to materialize the
/// immediate into a register.
bool HexagonTargetLowering::isLegalICmpImmediate(int64_t Imm) const {}

/// IsEligibleForTailCallOptimization - Check whether the call is eligible
/// for tail call optimization. Targets which want to do tail call
/// optimization should implement this function.
bool HexagonTargetLowering::IsEligibleForTailCallOptimization(
                                 SDValue Callee,
                                 CallingConv::ID CalleeCC,
                                 bool IsVarArg,
                                 bool IsCalleeStructRet,
                                 bool IsCallerStructRet,
                                 const SmallVectorImpl<ISD::OutputArg> &Outs,
                                 const SmallVectorImpl<SDValue> &OutVals,
                                 const SmallVectorImpl<ISD::InputArg> &Ins,
                                 SelectionDAG& DAG) const {}

/// Returns the target specific optimal type for load and store operations as
/// a result of memset, memcpy, and memmove lowering.
///
/// If DstAlign is zero that means it's safe to destination alignment can
/// satisfy any constraint. Similarly if SrcAlign is zero it means there isn't
/// a need to check it against alignment requirement, probably because the
/// source does not need to be loaded. If 'IsMemset' is true, that means it's
/// expanding a memset. If 'ZeroMemset' is true, that means it's a memset of
/// zero. 'MemcpyStrSrc' indicates whether the memcpy source is constant so it
/// does not need to be loaded.  It returns EVT::Other if the type should be
/// determined using generic target-independent logic.
EVT HexagonTargetLowering::getOptimalMemOpType(
    const MemOp &Op, const AttributeList &FuncAttributes) const {}

bool HexagonTargetLowering::allowsMemoryAccess(
    LLVMContext &Context, const DataLayout &DL, EVT VT, unsigned AddrSpace,
    Align Alignment, MachineMemOperand::Flags Flags, unsigned *Fast) const {}

bool HexagonTargetLowering::allowsMisalignedMemoryAccesses(
    EVT VT, unsigned AddrSpace, Align Alignment, MachineMemOperand::Flags Flags,
    unsigned *Fast) const {}

std::pair<const TargetRegisterClass*, uint8_t>
HexagonTargetLowering::findRepresentativeClass(const TargetRegisterInfo *TRI,
      MVT VT) const {}

bool HexagonTargetLowering::shouldReduceLoadWidth(SDNode *Load,
      ISD::LoadExtType ExtTy, EVT NewVT) const {}

void HexagonTargetLowering::AdjustInstrPostInstrSelection(MachineInstr &MI,
      SDNode *Node) const {}

Value *HexagonTargetLowering::emitLoadLinked(IRBuilderBase &Builder,
                                             Type *ValueTy, Value *Addr,
                                             AtomicOrdering Ord) const {}

/// Perform a store-conditional operation to Addr. Return the status of the
/// store. This should be 0 if the store succeeded, non-zero otherwise.
Value *HexagonTargetLowering::emitStoreConditional(IRBuilderBase &Builder,
                                                   Value *Val, Value *Addr,
                                                   AtomicOrdering Ord) const {}

TargetLowering::AtomicExpansionKind
HexagonTargetLowering::shouldExpandAtomicLoadInIR(LoadInst *LI) const {}

TargetLowering::AtomicExpansionKind
HexagonTargetLowering::shouldExpandAtomicStoreInIR(StoreInst *SI) const {}

TargetLowering::AtomicExpansionKind
HexagonTargetLowering::shouldExpandAtomicCmpXchgInIR(
    AtomicCmpXchgInst *AI) const {}