llvm/llvm/lib/Target/PowerPC/PPCISelLowering.cpp

//===-- PPCISelLowering.cpp - PPC DAG Lowering Implementation -------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file implements the PPCISelLowering class.
//
//===----------------------------------------------------------------------===//

#include "PPCISelLowering.h"
#include "MCTargetDesc/PPCMCTargetDesc.h"
#include "MCTargetDesc/PPCPredicates.h"
#include "PPC.h"
#include "PPCCCState.h"
#include "PPCCallingConv.h"
#include "PPCFrameLowering.h"
#include "PPCInstrInfo.h"
#include "PPCMachineFunctionInfo.h"
#include "PPCPerfectShuffle.h"
#include "PPCRegisterInfo.h"
#include "PPCSubtarget.h"
#include "PPCTargetMachine.h"
#include "llvm/ADT/APFloat.h"
#include "llvm/ADT/APInt.h"
#include "llvm/ADT/APSInt.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/SmallSet.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/StringSwitch.h"
#include "llvm/CodeGen/CallingConvLower.h"
#include "llvm/CodeGen/ISDOpcodes.h"
#include "llvm/CodeGen/MachineBasicBlock.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineInstr.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/CodeGen/MachineJumpTableInfo.h"
#include "llvm/CodeGen/MachineLoopInfo.h"
#include "llvm/CodeGen/MachineMemOperand.h"
#include "llvm/CodeGen/MachineModuleInfo.h"
#include "llvm/CodeGen/MachineOperand.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/RuntimeLibcallUtil.h"
#include "llvm/CodeGen/SelectionDAG.h"
#include "llvm/CodeGen/SelectionDAGNodes.h"
#include "llvm/CodeGen/TargetInstrInfo.h"
#include "llvm/CodeGen/TargetLowering.h"
#include "llvm/CodeGen/TargetLoweringObjectFileImpl.h"
#include "llvm/CodeGen/TargetRegisterInfo.h"
#include "llvm/CodeGen/ValueTypes.h"
#include "llvm/CodeGenTypes/MachineValueType.h"
#include "llvm/IR/CallingConv.h"
#include "llvm/IR/Constant.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/DebugLoc.h"
#include "llvm/IR/DerivedTypes.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/GlobalValue.h"
#include "llvm/IR/IRBuilder.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/Intrinsics.h"
#include "llvm/IR/IntrinsicsPowerPC.h"
#include "llvm/IR/Module.h"
#include "llvm/IR/Type.h"
#include "llvm/IR/Use.h"
#include "llvm/IR/Value.h"
#include "llvm/MC/MCContext.h"
#include "llvm/MC/MCExpr.h"
#include "llvm/MC/MCRegisterInfo.h"
#include "llvm/MC/MCSectionXCOFF.h"
#include "llvm/MC/MCSymbolXCOFF.h"
#include "llvm/Support/AtomicOrdering.h"
#include "llvm/Support/BranchProbability.h"
#include "llvm/Support/Casting.h"
#include "llvm/Support/CodeGen.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/Format.h"
#include "llvm/Support/KnownBits.h"
#include "llvm/Support/MathExtras.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Target/TargetMachine.h"
#include "llvm/Target/TargetOptions.h"
#include <algorithm>
#include <cassert>
#include <cstdint>
#include <iterator>
#include <list>
#include <optional>
#include <utility>
#include <vector>

usingnamespacellvm;

#define DEBUG_TYPE

static cl::opt<bool> DisableP10StoreForward(
    "disable-p10-store-forward",
    cl::desc("disable P10 store forward-friendly conversion"), cl::Hidden,
    cl::init(false));

static cl::opt<bool> DisablePPCPreinc("disable-ppc-preinc",
cl::desc("disable preincrement load/store generation on PPC"), cl::Hidden);

static cl::opt<bool> DisableILPPref("disable-ppc-ilp-pref",
cl::desc("disable setting the node scheduling preference to ILP on PPC"), cl::Hidden);

static cl::opt<bool> DisablePPCUnaligned("disable-ppc-unaligned",
cl::desc("disable unaligned load/store generation on PPC"), cl::Hidden);

static cl::opt<bool> DisableSCO("disable-ppc-sco",
cl::desc("disable sibling call optimization on ppc"), cl::Hidden);

static cl::opt<bool> DisableInnermostLoopAlign32("disable-ppc-innermost-loop-align32",
cl::desc("don't always align innermost loop to 32 bytes on ppc"), cl::Hidden);

static cl::opt<bool> UseAbsoluteJumpTables("ppc-use-absolute-jumptables",
cl::desc("use absolute jump tables on ppc"), cl::Hidden);

static cl::opt<bool>
    DisablePerfectShuffle("ppc-disable-perfect-shuffle",
                          cl::desc("disable vector permute decomposition"),
                          cl::init(true), cl::Hidden);

cl::opt<bool> DisableAutoPairedVecSt(
    "disable-auto-paired-vec-st",
    cl::desc("disable automatically generated 32byte paired vector stores"),
    cl::init(true), cl::Hidden);

static cl::opt<unsigned> PPCMinimumJumpTableEntries(
    "ppc-min-jump-table-entries", cl::init(64), cl::Hidden,
    cl::desc("Set minimum number of entries to use a jump table on PPC"));

static cl::opt<unsigned> PPCGatherAllAliasesMaxDepth(
    "ppc-gather-alias-max-depth", cl::init(18), cl::Hidden,
    cl::desc("max depth when checking alias info in GatherAllAliases()"));

static cl::opt<unsigned> PPCAIXTLSModelOptUseIEForLDLimit(
    "ppc-aix-shared-lib-tls-model-opt-limit", cl::init(1), cl::Hidden,
    cl::desc("Set inclusive limit count of TLS local-dynamic access(es) in a "
             "function to use initial-exec"));

STATISTIC(NumTailCalls, "Number of tail calls");
STATISTIC(NumSiblingCalls, "Number of sibling calls");
STATISTIC(ShufflesHandledWithVPERM,
          "Number of shuffles lowered to a VPERM or XXPERM");
STATISTIC(NumDynamicAllocaProbed, "Number of dynamic stack allocation probed");

static bool isNByteElemShuffleMask(ShuffleVectorSDNode *, unsigned, int);

static SDValue widenVec(SelectionDAG &DAG, SDValue Vec, const SDLoc &dl);

static const char AIXSSPCanaryWordName[] =;

// A faster local-[exec|dynamic] TLS access sequence (enabled with the
// -maix-small-local-[exec|dynamic]-tls option) can be produced for TLS
// variables; consistent with the IBM XL compiler, we apply a max size of
// slightly under 32KB.
constexpr uint64_t AIXSmallTlsPolicySizeLimit =;

// FIXME: Remove this once the bug has been fixed!
extern cl::opt<bool> ANDIGlueBug;

PPCTargetLowering::PPCTargetLowering(const PPCTargetMachine &TM,
                                     const PPCSubtarget &STI)
    :{}

// *********************************** NOTE ************************************
// For selecting load and store instructions, the addressing modes are defined
// as ComplexPatterns in PPCInstrInfo.td, which are then utilized in the TD
// patterns to match the load the store instructions.
//
// The TD definitions for the addressing modes correspond to their respective
// Select<AddrMode>Form() function in PPCISelDAGToDAG.cpp. These functions rely
// on SelectOptimalAddrMode(), which calls computeMOFlags() to compute the
// address mode flags of a particular node. Afterwards, the computed address
// flags are passed into getAddrModeForFlags() in order to retrieve the optimal
// addressing mode. SelectOptimalAddrMode() then sets the Base and Displacement
// accordingly, based on the preferred addressing mode.
//
// Within PPCISelLowering.h, there are two enums: MemOpFlags and AddrMode.
// MemOpFlags contains all the possible flags that can be used to compute the
// optimal addressing mode for load and store instructions.
// AddrMode contains all the possible load and store addressing modes available
// on Power (such as DForm, DSForm, DQForm, XForm, etc.)
//
// When adding new load and store instructions, it is possible that new address
// flags may need to be added into MemOpFlags, and a new addressing mode will
// need to be added to AddrMode. An entry of the new addressing mode (consisting
// of the minimal and main distinguishing address flags for the new load/store
// instructions) will need to be added into initializeAddrModeMap() below.
// Finally, when adding new addressing modes, the getAddrModeForFlags() will
// need to be updated to account for selecting the optimal addressing mode.
// *****************************************************************************
/// Initialize the map that relates the different addressing modes of the load
/// and store instructions to a set of flags. This ensures the load/store
/// instruction is correctly matched during instruction selection.
void PPCTargetLowering::initializeAddrModeMap() {}

/// getMaxByValAlign - Helper for getByValTypeAlignment to determine
/// the desired ByVal argument alignment.
static void getMaxByValAlign(Type *Ty, Align &MaxAlign, Align MaxMaxAlign) {}

/// getByValTypeAlignment - Return the desired alignment for ByVal aggregate
/// function arguments in the caller parameter area.
uint64_t PPCTargetLowering::getByValTypeAlignment(Type *Ty,
                                                  const DataLayout &DL) const {}

bool PPCTargetLowering::useSoftFloat() const {}

bool PPCTargetLowering::hasSPE() const {}

bool PPCTargetLowering::preferIncOfAddToSubOfNot(EVT VT) const {}

bool PPCTargetLowering::shallExtractConstSplatVectorElementToStore(
    Type *VectorTy, unsigned ElemSizeInBits, unsigned &Index) const {}

const char *PPCTargetLowering::getTargetNodeName(unsigned Opcode) const {}

EVT PPCTargetLowering::getSetCCResultType(const DataLayout &DL, LLVMContext &C,
                                          EVT VT) const {}

bool PPCTargetLowering::enableAggressiveFMAFusion(EVT VT) const {}

//===----------------------------------------------------------------------===//
// Node matching predicates, for use by the tblgen matching code.
//===----------------------------------------------------------------------===//

/// isFloatingPointZero - Return true if this is 0.0 or -0.0.
static bool isFloatingPointZero(SDValue Op) {}

/// isConstantOrUndef - Op is either an undef node or a ConstantSDNode.  Return
/// true if Op is undef or if it matches the specified value.
static bool isConstantOrUndef(int Op, int Val) {}

/// isVPKUHUMShuffleMask - Return true if this is the shuffle mask for a
/// VPKUHUM instruction.
/// The ShuffleKind distinguishes between big-endian operations with
/// two different inputs (0), either-endian operations with two identical
/// inputs (1), and little-endian operations with two different inputs (2).
/// For the latter, the input operands are swapped (see PPCInstrAltivec.td).
bool PPC::isVPKUHUMShuffleMask(ShuffleVectorSDNode *N, unsigned ShuffleKind,
                               SelectionDAG &DAG) {}

/// isVPKUWUMShuffleMask - Return true if this is the shuffle mask for a
/// VPKUWUM instruction.
/// The ShuffleKind distinguishes between big-endian operations with
/// two different inputs (0), either-endian operations with two identical
/// inputs (1), and little-endian operations with two different inputs (2).
/// For the latter, the input operands are swapped (see PPCInstrAltivec.td).
bool PPC::isVPKUWUMShuffleMask(ShuffleVectorSDNode *N, unsigned ShuffleKind,
                               SelectionDAG &DAG) {}

/// isVPKUDUMShuffleMask - Return true if this is the shuffle mask for a
/// VPKUDUM instruction, AND the VPKUDUM instruction exists for the
/// current subtarget.
///
/// The ShuffleKind distinguishes between big-endian operations with
/// two different inputs (0), either-endian operations with two identical
/// inputs (1), and little-endian operations with two different inputs (2).
/// For the latter, the input operands are swapped (see PPCInstrAltivec.td).
bool PPC::isVPKUDUMShuffleMask(ShuffleVectorSDNode *N, unsigned ShuffleKind,
                               SelectionDAG &DAG) {}

/// isVMerge - Common function, used to match vmrg* shuffles.
///
static bool isVMerge(ShuffleVectorSDNode *N, unsigned UnitSize,
                     unsigned LHSStart, unsigned RHSStart) {}

/// isVMRGLShuffleMask - Return true if this is a shuffle mask suitable for
/// a VMRGL* instruction with the specified unit size (1,2 or 4 bytes).
/// The ShuffleKind distinguishes between big-endian merges with two
/// different inputs (0), either-endian merges with two identical inputs (1),
/// and little-endian merges with two different inputs (2).  For the latter,
/// the input operands are swapped (see PPCInstrAltivec.td).
bool PPC::isVMRGLShuffleMask(ShuffleVectorSDNode *N, unsigned UnitSize,
                             unsigned ShuffleKind, SelectionDAG &DAG) {}

/// isVMRGHShuffleMask - Return true if this is a shuffle mask suitable for
/// a VMRGH* instruction with the specified unit size (1,2 or 4 bytes).
/// The ShuffleKind distinguishes between big-endian merges with two
/// different inputs (0), either-endian merges with two identical inputs (1),
/// and little-endian merges with two different inputs (2).  For the latter,
/// the input operands are swapped (see PPCInstrAltivec.td).
bool PPC::isVMRGHShuffleMask(ShuffleVectorSDNode *N, unsigned UnitSize,
                             unsigned ShuffleKind, SelectionDAG &DAG) {}

/**
 * Common function used to match vmrgew and vmrgow shuffles
 *
 * The indexOffset determines whether to look for even or odd words in
 * the shuffle mask. This is based on the of the endianness of the target
 * machine.
 *   - Little Endian:
 *     - Use offset of 0 to check for odd elements
 *     - Use offset of 4 to check for even elements
 *   - Big Endian:
 *     - Use offset of 0 to check for even elements
 *     - Use offset of 4 to check for odd elements
 * A detailed description of the vector element ordering for little endian and
 * big endian can be found at
 * http://www.ibm.com/developerworks/library/l-ibm-xl-c-cpp-compiler/index.html
 * Targeting your applications - what little endian and big endian IBM XL C/C++
 * compiler differences mean to you
 *
 * The mask to the shuffle vector instruction specifies the indices of the
 * elements from the two input vectors to place in the result. The elements are
 * numbered in array-access order, starting with the first vector. These vectors
 * are always of type v16i8, thus each vector will contain 16 elements of size
 * 8. More info on the shuffle vector can be found in the
 * http://llvm.org/docs/LangRef.html#shufflevector-instruction
 * Language Reference.
 *
 * The RHSStartValue indicates whether the same input vectors are used (unary)
 * or two different input vectors are used, based on the following:
 *   - If the instruction uses the same vector for both inputs, the range of the
 *     indices will be 0 to 15. In this case, the RHSStart value passed should
 *     be 0.
 *   - If the instruction has two different vectors then the range of the
 *     indices will be 0 to 31. In this case, the RHSStart value passed should
 *     be 16 (indices 0-15 specify elements in the first vector while indices 16
 *     to 31 specify elements in the second vector).
 *
 * \param[in] N The shuffle vector SD Node to analyze
 * \param[in] IndexOffset Specifies whether to look for even or odd elements
 * \param[in] RHSStartValue Specifies the starting index for the righthand input
 * vector to the shuffle_vector instruction
 * \return true iff this shuffle vector represents an even or odd word merge
 */
static bool isVMerge(ShuffleVectorSDNode *N, unsigned IndexOffset,
                     unsigned RHSStartValue) {}

/**
 * Determine if the specified shuffle mask is suitable for the vmrgew or
 * vmrgow instructions.
 *
 * \param[in] N The shuffle vector SD Node to analyze
 * \param[in] CheckEven Check for an even merge (true) or an odd merge (false)
 * \param[in] ShuffleKind Identify the type of merge:
 *   - 0 = big-endian merge with two different inputs;
 *   - 1 = either-endian merge with two identical inputs;
 *   - 2 = little-endian merge with two different inputs (inputs are swapped for
 *     little-endian merges).
 * \param[in] DAG The current SelectionDAG
 * \return true iff this shuffle mask
 */
bool PPC::isVMRGEOShuffleMask(ShuffleVectorSDNode *N, bool CheckEven,
                              unsigned ShuffleKind, SelectionDAG &DAG) {}

/// isVSLDOIShuffleMask - If this is a vsldoi shuffle mask, return the shift
/// amount, otherwise return -1.
/// The ShuffleKind distinguishes between big-endian operations with two
/// different inputs (0), either-endian operations with two identical inputs
/// (1), and little-endian operations with two different inputs (2).  For the
/// latter, the input operands are swapped (see PPCInstrAltivec.td).
int PPC::isVSLDOIShuffleMask(SDNode *N, unsigned ShuffleKind,
                             SelectionDAG &DAG) {}

/// isSplatShuffleMask - Return true if the specified VECTOR_SHUFFLE operand
/// specifies a splat of a single element that is suitable for input to
/// one of the splat operations (VSPLTB/VSPLTH/VSPLTW/XXSPLTW/LXVDSX/etc.).
bool PPC::isSplatShuffleMask(ShuffleVectorSDNode *N, unsigned EltSize) {}

/// Check that the mask is shuffling N byte elements. Within each N byte
/// element of the mask, the indices could be either in increasing or
/// decreasing order as long as they are consecutive.
/// \param[in] N the shuffle vector SD Node to analyze
/// \param[in] Width the element width in bytes, could be 2/4/8/16 (HalfWord/
/// Word/DoubleWord/QuadWord).
/// \param[in] StepLen the delta indices number among the N byte element, if
/// the mask is in increasing/decreasing order then it is 1/-1.
/// \return true iff the mask is shuffling N byte elements.
static bool isNByteElemShuffleMask(ShuffleVectorSDNode *N, unsigned Width,
                                   int StepLen) {}

bool PPC::isXXINSERTWMask(ShuffleVectorSDNode *N, unsigned &ShiftElts,
                          unsigned &InsertAtByte, bool &Swap, bool IsLE) {}

bool PPC::isXXSLDWIShuffleMask(ShuffleVectorSDNode *N, unsigned &ShiftElts,
                               bool &Swap, bool IsLE) {}

bool static isXXBRShuffleMaskHelper(ShuffleVectorSDNode *N, int Width) {}

bool PPC::isXXBRHShuffleMask(ShuffleVectorSDNode *N) {}

bool PPC::isXXBRWShuffleMask(ShuffleVectorSDNode *N) {}

bool PPC::isXXBRDShuffleMask(ShuffleVectorSDNode *N) {}

bool PPC::isXXBRQShuffleMask(ShuffleVectorSDNode *N) {}

/// Can node \p N be lowered to an XXPERMDI instruction? If so, set \p Swap
/// if the inputs to the instruction should be swapped and set \p DM to the
/// value for the immediate.
/// Specifically, set \p Swap to true only if \p N can be lowered to XXPERMDI
/// AND element 0 of the result comes from the first input (LE) or second input
/// (BE). Set \p DM to the calculated result (0-3) only if \p N can be lowered.
/// \return true iff the given mask of shuffle node \p N is a XXPERMDI shuffle
/// mask.
bool PPC::isXXPERMDIShuffleMask(ShuffleVectorSDNode *N, unsigned &DM,
                               bool &Swap, bool IsLE) {}


/// getSplatIdxForPPCMnemonics - Return the splat index as a value that is
/// appropriate for PPC mnemonics (which have a big endian bias - namely
/// elements are counted from the left of the vector register).
unsigned PPC::getSplatIdxForPPCMnemonics(SDNode *N, unsigned EltSize,
                                         SelectionDAG &DAG) {}

/// get_VSPLTI_elt - If this is a build_vector of constants which can be formed
/// by using a vspltis[bhw] instruction of the specified element size, return
/// the constant being splatted.  The ByteSize field indicates the number of
/// bytes of each element [124] -> [bhw].
SDValue PPC::get_VSPLTI_elt(SDNode *N, unsigned ByteSize, SelectionDAG &DAG) {}

//===----------------------------------------------------------------------===//
//  Addressing Mode Selection
//===----------------------------------------------------------------------===//

/// isIntS16Immediate - This method tests to see if the node is either a 32-bit
/// or 64-bit immediate, and if the value can be accurately represented as a
/// sign extension from a 16-bit value.  If so, this returns true and the
/// immediate.
bool llvm::isIntS16Immediate(SDNode *N, int16_t &Imm) {}
bool llvm::isIntS16Immediate(SDValue Op, int16_t &Imm) {}

/// Used when computing address flags for selecting loads and stores.
/// If we have an OR, check if the LHS and RHS are provably disjoint.
/// An OR of two provably disjoint values is equivalent to an ADD.
/// Most PPC load/store instructions compute the effective address as a sum,
/// so doing this conversion is useful.
static bool provablyDisjointOr(SelectionDAG &DAG, const SDValue &N) {}

/// SelectAddressEVXRegReg - Given the specified address, check to see if it can
/// be represented as an indexed [r+r] operation.
bool PPCTargetLowering::SelectAddressEVXRegReg(SDValue N, SDValue &Base,
                                               SDValue &Index,
                                               SelectionDAG &DAG) const {}

/// isIntS34Immediate - This method tests if value of node given can be
/// accurately represented as a sign extension from a 34-bit value.  If so,
/// this returns true and the immediate.
bool llvm::isIntS34Immediate(SDNode *N, int64_t &Imm) {}
bool llvm::isIntS34Immediate(SDValue Op, int64_t &Imm) {}

/// SelectAddressRegReg - Given the specified addressed, check to see if it
/// can be represented as an indexed [r+r] operation.  Returns false if it
/// can be more efficiently represented as [r+imm]. If \p EncodingAlignment is
/// non-zero and N can be represented by a base register plus a signed 16-bit
/// displacement, make a more precise judgement by checking (displacement % \p
/// EncodingAlignment).
bool PPCTargetLowering::SelectAddressRegReg(
    SDValue N, SDValue &Base, SDValue &Index, SelectionDAG &DAG,
    MaybeAlign EncodingAlignment) const {}

// If we happen to be doing an i64 load or store into a stack slot that has
// less than a 4-byte alignment, then the frame-index elimination may need to
// use an indexed load or store instruction (because the offset may not be a
// multiple of 4). The extra register needed to hold the offset comes from the
// register scavenger, and it is possible that the scavenger will need to use
// an emergency spill slot. As a result, we need to make sure that a spill slot
// is allocated when doing an i64 load/store into a less-than-4-byte-aligned
// stack slot.
static void fixupFuncForFI(SelectionDAG &DAG, int FrameIdx, EVT VT) {}

/// Returns true if the address N can be represented by a base register plus
/// a signed 16-bit displacement [r+imm], and if it is not better
/// represented as reg+reg.  If \p EncodingAlignment is non-zero, only accept
/// displacements that are multiples of that value.
bool PPCTargetLowering::SelectAddressRegImm(
    SDValue N, SDValue &Disp, SDValue &Base, SelectionDAG &DAG,
    MaybeAlign EncodingAlignment) const {}

/// Similar to the 16-bit case but for instructions that take a 34-bit
/// displacement field (prefixed loads/stores).
bool PPCTargetLowering::SelectAddressRegImm34(SDValue N, SDValue &Disp,
                                              SDValue &Base,
                                              SelectionDAG &DAG) const {}

/// SelectAddressRegRegOnly - Given the specified addressed, force it to be
/// represented as an indexed [r+r] operation.
bool PPCTargetLowering::SelectAddressRegRegOnly(SDValue N, SDValue &Base,
                                                SDValue &Index,
                                                SelectionDAG &DAG) const {}

template <typename Ty> static bool isValidPCRelNode(SDValue N) {}

/// Returns true if this address is a PC Relative address.
/// PC Relative addresses are marked with the flag PPCII::MO_PCREL_FLAG
/// or if the node opcode is PPCISD::MAT_PCREL_ADDR.
bool PPCTargetLowering::SelectAddressPCRel(SDValue N, SDValue &Base) const {}

/// Returns true if we should use a direct load into vector instruction
/// (such as lxsd or lfd), instead of a load into gpr + direct move sequence.
static bool usePartialVectorLoads(SDNode *N, const PPCSubtarget& ST) {}

/// getPreIndexedAddressParts - returns true by value, base pointer and
/// offset pointer and addressing mode by reference if the node's address
/// can be legally represented as pre-indexed load / store address.
bool PPCTargetLowering::getPreIndexedAddressParts(SDNode *N, SDValue &Base,
                                                  SDValue &Offset,
                                                  ISD::MemIndexedMode &AM,
                                                  SelectionDAG &DAG) const {}

//===----------------------------------------------------------------------===//
//  LowerOperation implementation
//===----------------------------------------------------------------------===//

/// Return true if we should reference labels using a PICBase, set the HiOpFlags
/// and LoOpFlags to the target MO flags.
static void getLabelAccessInfo(bool IsPIC, const PPCSubtarget &Subtarget,
                               unsigned &HiOpFlags, unsigned &LoOpFlags,
                               const GlobalValue *GV = nullptr) {}

static SDValue LowerLabelRef(SDValue HiPart, SDValue LoPart, bool isPIC,
                             SelectionDAG &DAG) {}

static void setUsesTOCBasePtr(MachineFunction &MF) {}

static void setUsesTOCBasePtr(SelectionDAG &DAG) {}

SDValue PPCTargetLowering::getTOCEntry(SelectionDAG &DAG, const SDLoc &dl,
                                       SDValue GA) const {}

SDValue PPCTargetLowering::LowerConstantPool(SDValue Op,
                                             SelectionDAG &DAG) const {}

// For 64-bit PowerPC, prefer the more compact relative encodings.
// This trades 32 bits per jump table entry for one or two instructions
// on the jump site.
unsigned PPCTargetLowering::getJumpTableEncoding() const {}

bool PPCTargetLowering::isJumpTableRelative() const {}

SDValue PPCTargetLowering::getPICJumpTableRelocBase(SDValue Table,
                                                    SelectionDAG &DAG) const {}

const MCExpr *
PPCTargetLowering::getPICJumpTableRelocBaseExpr(const MachineFunction *MF,
                                                unsigned JTI,
                                                MCContext &Ctx) const {}

SDValue PPCTargetLowering::LowerJumpTable(SDValue Op, SelectionDAG &DAG) const {}

SDValue PPCTargetLowering::LowerBlockAddress(SDValue Op,
                                             SelectionDAG &DAG) const {}

SDValue PPCTargetLowering::LowerGlobalTLSAddress(SDValue Op,
                                              SelectionDAG &DAG) const {}

/// updateForAIXShLibTLSModelOpt - Helper to initialize TLS model opt settings,
/// and then apply the update.
static void updateForAIXShLibTLSModelOpt(TLSModel::Model &Model,
                                         SelectionDAG &DAG,
                                         const TargetMachine &TM) {}

SDValue PPCTargetLowering::LowerGlobalTLSAddressAIX(SDValue Op,
                                                    SelectionDAG &DAG) const {}

SDValue PPCTargetLowering::LowerGlobalTLSAddressLinux(SDValue Op,
                                                      SelectionDAG &DAG) const {}

SDValue PPCTargetLowering::LowerGlobalAddress(SDValue Op,
                                              SelectionDAG &DAG) const {}

SDValue PPCTargetLowering::LowerSETCC(SDValue Op, SelectionDAG &DAG) const {}

SDValue PPCTargetLowering::LowerVAARG(SDValue Op, SelectionDAG &DAG) const {}

SDValue PPCTargetLowering::LowerVACOPY(SDValue Op, SelectionDAG &DAG) const {}

SDValue PPCTargetLowering::LowerADJUST_TRAMPOLINE(SDValue Op,
                                                  SelectionDAG &DAG) const {}

SDValue PPCTargetLowering::LowerINLINEASM(SDValue Op, SelectionDAG &DAG) const {}

SDValue PPCTargetLowering::LowerINIT_TRAMPOLINE(SDValue Op,
                                                SelectionDAG &DAG) const {}

SDValue PPCTargetLowering::LowerVASTART(SDValue Op, SelectionDAG &DAG) const {}

/// FPR - The set of FP registers that should be allocated for arguments
/// on Darwin and AIX.
static const MCPhysReg FPR[] =;

/// CalculateStackSlotSize - Calculates the size reserved for this argument on
/// the stack.
static unsigned CalculateStackSlotSize(EVT ArgVT, ISD::ArgFlagsTy Flags,
                                       unsigned PtrByteSize) {}

/// CalculateStackSlotAlignment - Calculates the alignment of this argument
/// on the stack.
static Align CalculateStackSlotAlignment(EVT ArgVT, EVT OrigVT,
                                         ISD::ArgFlagsTy Flags,
                                         unsigned PtrByteSize) {}

/// CalculateStackSlotUsed - Return whether this argument will use its
/// stack slot (instead of being passed in registers).  ArgOffset,
/// AvailableFPRs, and AvailableVRs must hold the current argument
/// position, and will be updated to account for this argument.
static bool CalculateStackSlotUsed(EVT ArgVT, EVT OrigVT, ISD::ArgFlagsTy Flags,
                                   unsigned PtrByteSize, unsigned LinkageSize,
                                   unsigned ParamAreaSize, unsigned &ArgOffset,
                                   unsigned &AvailableFPRs,
                                   unsigned &AvailableVRs) {}

/// EnsureStackAlignment - Round stack frame size up from NumBytes to
/// ensure minimum alignment required for target.
static unsigned EnsureStackAlignment(const PPCFrameLowering *Lowering,
                                     unsigned NumBytes) {}

SDValue PPCTargetLowering::LowerFormalArguments(
    SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
    const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
    SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {}

SDValue PPCTargetLowering::LowerFormalArguments_32SVR4(
    SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
    const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
    SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {}

// PPC64 passes i8, i16, and i32 values in i64 registers. Promote
// value to MVT::i64 and then truncate to the correct register size.
SDValue PPCTargetLowering::extendArgForPPC64(ISD::ArgFlagsTy Flags,
                                             EVT ObjectVT, SelectionDAG &DAG,
                                             SDValue ArgVal,
                                             const SDLoc &dl) const {}

SDValue PPCTargetLowering::LowerFormalArguments_64SVR4(
    SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
    const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
    SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {}

/// CalculateTailCallSPDiff - Get the amount the stack pointer has to be
/// adjusted to accommodate the arguments for the tailcall.
static int CalculateTailCallSPDiff(SelectionDAG& DAG, bool isTailCall,
                                   unsigned ParamSize) {}

static bool isFunctionGlobalAddress(const GlobalValue *CalleeGV);

static bool callsShareTOCBase(const Function *Caller,
                              const GlobalValue *CalleeGV,
                              const TargetMachine &TM) {}

static bool
needStackSlotPassParameters(const PPCSubtarget &Subtarget,
                            const SmallVectorImpl<ISD::OutputArg> &Outs) {}

static bool hasSameArgumentList(const Function *CallerFn, const CallBase &CB) {}

// Returns true if TCO is possible between the callers and callees
// calling conventions.
static bool
areCallingConvEligibleForTCO_64SVR4(CallingConv::ID CallerCC,
                                    CallingConv::ID CalleeCC) {}

bool PPCTargetLowering::IsEligibleForTailCallOptimization_64SVR4(
    const GlobalValue *CalleeGV, CallingConv::ID CalleeCC,
    CallingConv::ID CallerCC, const CallBase *CB, bool isVarArg,
    const SmallVectorImpl<ISD::OutputArg> &Outs,
    const SmallVectorImpl<ISD::InputArg> &Ins, const Function *CallerFunc,
    bool isCalleeExternalSymbol) const {}

/// IsEligibleForTailCallOptimization - Check whether the call is eligible
/// for tail call optimization. Targets which want to do tail call
/// optimization should implement this function.
bool PPCTargetLowering::IsEligibleForTailCallOptimization(
    const GlobalValue *CalleeGV, CallingConv::ID CalleeCC,
    CallingConv::ID CallerCC, bool isVarArg,
    const SmallVectorImpl<ISD::InputArg> &Ins) const {}

/// isCallCompatibleAddress - Return the immediate to use if the specified
/// 32-bit value is representable in the immediate field of a BxA instruction.
static SDNode *isBLACompatibleAddress(SDValue Op, SelectionDAG &DAG) {}

namespace {

struct TailCallArgumentInfo {};

} // end anonymous namespace

/// StoreTailCallArgumentsToStackSlot - Stores arguments to their stack slot.
static void StoreTailCallArgumentsToStackSlot(
    SelectionDAG &DAG, SDValue Chain,
    const SmallVectorImpl<TailCallArgumentInfo> &TailCallArgs,
    SmallVectorImpl<SDValue> &MemOpChains, const SDLoc &dl) {}

/// EmitTailCallStoreFPAndRetAddr - Move the frame pointer and return address to
/// the appropriate stack slot for the tail call optimized function call.
static SDValue EmitTailCallStoreFPAndRetAddr(SelectionDAG &DAG, SDValue Chain,
                                             SDValue OldRetAddr, SDValue OldFP,
                                             int SPDiff, const SDLoc &dl) {}

/// CalculateTailCallArgDest - Remember Argument for later processing. Calculate
/// the position of the argument.
static void
CalculateTailCallArgDest(SelectionDAG &DAG, MachineFunction &MF, bool isPPC64,
                         SDValue Arg, int SPDiff, unsigned ArgOffset,
                     SmallVectorImpl<TailCallArgumentInfo>& TailCallArguments) {}

/// EmitTCFPAndRetAddrLoad - Emit load from frame pointer and return address
/// stack slot. Returns the chain as result and the loaded frame pointers in
/// LROpOut/FPOpout. Used when tail calling.
SDValue PPCTargetLowering::EmitTailCallLoadFPAndRetAddr(
    SelectionDAG &DAG, int SPDiff, SDValue Chain, SDValue &LROpOut,
    SDValue &FPOpOut, const SDLoc &dl) const {}

/// CreateCopyOfByValArgument - Make a copy of an aggregate at address specified
/// by "Src" to address "Dst" of size "Size".  Alignment information is
/// specified by the specific parameter attribute. The copy will be passed as
/// a byval function parameter.
/// Sometimes what we are copying is the end of a larger object, the part that
/// does not fit in registers.
static SDValue CreateCopyOfByValArgument(SDValue Src, SDValue Dst,
                                         SDValue Chain, ISD::ArgFlagsTy Flags,
                                         SelectionDAG &DAG, const SDLoc &dl) {}

/// LowerMemOpCallTo - Store the argument to the stack or remember it in case of
/// tail calls.
static void LowerMemOpCallTo(
    SelectionDAG &DAG, MachineFunction &MF, SDValue Chain, SDValue Arg,
    SDValue PtrOff, int SPDiff, unsigned ArgOffset, bool isPPC64,
    bool isTailCall, bool isVector, SmallVectorImpl<SDValue> &MemOpChains,
    SmallVectorImpl<TailCallArgumentInfo> &TailCallArguments, const SDLoc &dl) {}

static void
PrepareTailCall(SelectionDAG &DAG, SDValue &InGlue, SDValue &Chain,
                const SDLoc &dl, int SPDiff, unsigned NumBytes, SDValue LROp,
                SDValue FPOp,
                SmallVectorImpl<TailCallArgumentInfo> &TailCallArguments) {}

// Is this global address that of a function that can be called by name? (as
// opposed to something that must hold a descriptor for an indirect call).
static bool isFunctionGlobalAddress(const GlobalValue *GV) {}

SDValue PPCTargetLowering::LowerCallResult(
    SDValue Chain, SDValue InGlue, CallingConv::ID CallConv, bool isVarArg,
    const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
    SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {}

static bool isIndirectCall(const SDValue &Callee, SelectionDAG &DAG,
                           const PPCSubtarget &Subtarget, bool isPatchPoint) {}

// AIX and 64-bit ELF ABIs w/o PCRel require a TOC save/restore around calls.
static inline bool isTOCSaveRestoreRequired(const PPCSubtarget &Subtarget) {}

static unsigned getCallOpcode(PPCTargetLowering::CallFlags CFlags,
                              const Function &Caller, const SDValue &Callee,
                              const PPCSubtarget &Subtarget,
                              const TargetMachine &TM,
                              bool IsStrictFPCall = false) {}

static SDValue transformCallee(const SDValue &Callee, SelectionDAG &DAG,
                               const SDLoc &dl, const PPCSubtarget &Subtarget) {}

static SDValue getOutputChainFromCallSeq(SDValue CallSeqStart) {}

// Creates the node that moves a functions address into the count register
// to prepare for an indirect call instruction.
static void prepareIndirectCall(SelectionDAG &DAG, SDValue &Callee,
                                SDValue &Glue, SDValue &Chain,
                                const SDLoc &dl) {}

static void prepareDescriptorIndirectCall(SelectionDAG &DAG, SDValue &Callee,
                                          SDValue &Glue, SDValue &Chain,
                                          SDValue CallSeqStart,
                                          const CallBase *CB, const SDLoc &dl,
                                          bool hasNest,
                                          const PPCSubtarget &Subtarget) {}

static void
buildCallOperands(SmallVectorImpl<SDValue> &Ops,
                  PPCTargetLowering::CallFlags CFlags, const SDLoc &dl,
                  SelectionDAG &DAG,
                  SmallVector<std::pair<unsigned, SDValue>, 8> &RegsToPass,
                  SDValue Glue, SDValue Chain, SDValue &Callee, int SPDiff,
                  const PPCSubtarget &Subtarget) {}

SDValue PPCTargetLowering::FinishCall(
    CallFlags CFlags, const SDLoc &dl, SelectionDAG &DAG,
    SmallVector<std::pair<unsigned, SDValue>, 8> &RegsToPass, SDValue Glue,
    SDValue Chain, SDValue CallSeqStart, SDValue &Callee, int SPDiff,
    unsigned NumBytes, const SmallVectorImpl<ISD::InputArg> &Ins,
    SmallVectorImpl<SDValue> &InVals, const CallBase *CB) const {}

bool PPCTargetLowering::supportsTailCallFor(const CallBase *CB) const {}

bool PPCTargetLowering::isEligibleForTCO(
    const GlobalValue *CalleeGV, CallingConv::ID CalleeCC,
    CallingConv::ID CallerCC, const CallBase *CB, bool isVarArg,
    const SmallVectorImpl<ISD::OutputArg> &Outs,
    const SmallVectorImpl<ISD::InputArg> &Ins, const Function *CallerFunc,
    bool isCalleeExternalSymbol) const {}

SDValue
PPCTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
                             SmallVectorImpl<SDValue> &InVals) const {}

SDValue PPCTargetLowering::LowerCall_32SVR4(
    SDValue Chain, SDValue Callee, CallFlags CFlags,
    const SmallVectorImpl<ISD::OutputArg> &Outs,
    const SmallVectorImpl<SDValue> &OutVals,
    const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
    SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals,
    const CallBase *CB) const {}

// Copy an argument into memory, being careful to do this outside the
// call sequence for the call to which the argument belongs.
SDValue PPCTargetLowering::createMemcpyOutsideCallSeq(
    SDValue Arg, SDValue PtrOff, SDValue CallSeqStart, ISD::ArgFlagsTy Flags,
    SelectionDAG &DAG, const SDLoc &dl) const {}

SDValue PPCTargetLowering::LowerCall_64SVR4(
    SDValue Chain, SDValue Callee, CallFlags CFlags,
    const SmallVectorImpl<ISD::OutputArg> &Outs,
    const SmallVectorImpl<SDValue> &OutVals,
    const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
    SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals,
    const CallBase *CB) const {}

// Returns true when the shadow of a general purpose argument register
// in the parameter save area is aligned to at least 'RequiredAlign'.
static bool isGPRShadowAligned(MCPhysReg Reg, Align RequiredAlign) {}

static bool CC_AIX(unsigned ValNo, MVT ValVT, MVT LocVT,
                   CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags,
                   CCState &S) {}

// So far, this function is only used by LowerFormalArguments_AIX()
static const TargetRegisterClass *getRegClassForSVT(MVT::SimpleValueType SVT,
                                                    bool IsPPC64,
                                                    bool HasP8Vector,
                                                    bool HasVSX) {}

static SDValue truncateScalarIntegerArg(ISD::ArgFlagsTy Flags, EVT ValVT,
                                        SelectionDAG &DAG, SDValue ArgValue,
                                        MVT LocVT, const SDLoc &dl) {}

static unsigned mapArgRegToOffsetAIX(unsigned Reg, const PPCFrameLowering *FL) {}

//   AIX ABI Stack Frame Layout:
//
//   Low Memory +--------------------------------------------+
//   SP   +---> | Back chain                                 | ---+
//        |     +--------------------------------------------+    |   
//        |     | Saved Condition Register                   |    |
//        |     +--------------------------------------------+    |
//        |     | Saved Linkage Register                     |    |
//        |     +--------------------------------------------+    | Linkage Area
//        |     | Reserved for compilers                     |    |
//        |     +--------------------------------------------+    |
//        |     | Reserved for binders                       |    |
//        |     +--------------------------------------------+    |
//        |     | Saved TOC pointer                          | ---+
//        |     +--------------------------------------------+
//        |     | Parameter save area                        |
//        |     +--------------------------------------------+
//        |     | Alloca space                               |
//        |     +--------------------------------------------+
//        |     | Local variable space                       |
//        |     +--------------------------------------------+
//        |     | Float/int conversion temporary             |
//        |     +--------------------------------------------+
//        |     | Save area for AltiVec registers            |
//        |     +--------------------------------------------+
//        |     | AltiVec alignment padding                  |
//        |     +--------------------------------------------+
//        |     | Save area for VRSAVE register              |
//        |     +--------------------------------------------+
//        |     | Save area for General Purpose registers    |
//        |     +--------------------------------------------+
//        |     | Save area for Floating Point registers     |
//        |     +--------------------------------------------+
//        +---- | Back chain                                 |
// High Memory  +--------------------------------------------+
//
//  Specifications:
//  AIX 7.2 Assembler Language Reference
//  Subroutine linkage convention

SDValue PPCTargetLowering::LowerFormalArguments_AIX(
    SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
    const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
    SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {}

SDValue PPCTargetLowering::LowerCall_AIX(
    SDValue Chain, SDValue Callee, CallFlags CFlags,
    const SmallVectorImpl<ISD::OutputArg> &Outs,
    const SmallVectorImpl<SDValue> &OutVals,
    const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
    SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals,
    const CallBase *CB) const {}

bool
PPCTargetLowering::CanLowerReturn(CallingConv::ID CallConv,
                                  MachineFunction &MF, bool isVarArg,
                                  const SmallVectorImpl<ISD::OutputArg> &Outs,
                                  LLVMContext &Context) const {}

SDValue
PPCTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
                               bool isVarArg,
                               const SmallVectorImpl<ISD::OutputArg> &Outs,
                               const SmallVectorImpl<SDValue> &OutVals,
                               const SDLoc &dl, SelectionDAG &DAG) const {}

SDValue
PPCTargetLowering::LowerGET_DYNAMIC_AREA_OFFSET(SDValue Op,
                                                SelectionDAG &DAG) const {}

SDValue PPCTargetLowering::LowerSTACKRESTORE(SDValue Op,
                                             SelectionDAG &DAG) const {}

SDValue PPCTargetLowering::getReturnAddrFrameIndex(SelectionDAG &DAG) const {}

SDValue
PPCTargetLowering::getFramePointerFrameIndex(SelectionDAG & DAG) const {}

SDValue PPCTargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op,
                                                   SelectionDAG &DAG) const {}

SDValue PPCTargetLowering::LowerEH_DWARF_CFA(SDValue Op,
                                                     SelectionDAG &DAG) const {}

SDValue PPCTargetLowering::lowerEH_SJLJ_SETJMP(SDValue Op,
                                               SelectionDAG &DAG) const {}

SDValue PPCTargetLowering::lowerEH_SJLJ_LONGJMP(SDValue Op,
                                                SelectionDAG &DAG) const {}

SDValue PPCTargetLowering::LowerLOAD(SDValue Op, SelectionDAG &DAG) const {}

SDValue PPCTargetLowering::LowerSTORE(SDValue Op, SelectionDAG &DAG) const {}

// FIXME: Remove this once the ANDI glue bug is fixed:
SDValue PPCTargetLowering::LowerTRUNCATE(SDValue Op, SelectionDAG &DAG) const {}

SDValue PPCTargetLowering::LowerTRUNCATEVector(SDValue Op,
                                               SelectionDAG &DAG) const {}

/// LowerSELECT_CC - Lower floating point select_cc's into fsel instruction when
/// possible.
SDValue PPCTargetLowering::LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const {}

static unsigned getPPCStrictOpcode(unsigned Opc) {}

static SDValue convertFPToInt(SDValue Op, SelectionDAG &DAG,
                              const PPCSubtarget &Subtarget) {}

void PPCTargetLowering::LowerFP_TO_INTForReuse(SDValue Op, ReuseLoadInfo &RLI,
                                               SelectionDAG &DAG,
                                               const SDLoc &dl) const {}

/// Custom lowers floating point to integer conversions to use
/// the direct move instructions available in ISA 2.07 to avoid the
/// need for load/store combinations.
SDValue PPCTargetLowering::LowerFP_TO_INTDirectMove(SDValue Op,
                                                    SelectionDAG &DAG,
                                                    const SDLoc &dl) const {}

SDValue PPCTargetLowering::LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG,
                                          const SDLoc &dl) const {}

// We're trying to insert a regular store, S, and then a load, L. If the
// incoming value, O, is a load, we might just be able to have our load use the
// address used by O. However, we don't know if anything else will store to
// that address before we can load from it. To prevent this situation, we need
// to insert our load, L, into the chain as a peer of O. To do this, we give L
// the same chain operand as O, we create a token factor from the chain results
// of O and L, and we replace all uses of O's chain result with that token
// factor (see spliceIntoChain below for this last part).
bool PPCTargetLowering::canReuseLoadAddress(SDValue Op, EVT MemVT,
                                            ReuseLoadInfo &RLI,
                                            SelectionDAG &DAG,
                                            ISD::LoadExtType ET) const {}

// Given the head of the old chain, ResChain, insert a token factor containing
// it and NewResChain, and make users of ResChain now be users of that token
// factor.
// TODO: Remove and use DAG::makeEquivalentMemoryOrdering() instead.
void PPCTargetLowering::spliceIntoChain(SDValue ResChain,
                                        SDValue NewResChain,
                                        SelectionDAG &DAG) const {}

/// Analyze profitability of direct move
/// prefer float load to int load plus direct move
/// when there is no integer use of int load
bool PPCTargetLowering::directMoveIsProfitable(const SDValue &Op) const {}

static SDValue convertIntToFP(SDValue Op, SDValue Src, SelectionDAG &DAG,
                              const PPCSubtarget &Subtarget,
                              SDValue Chain = SDValue()) {}

/// Custom lowers integer to floating point conversions to use
/// the direct move instructions available in ISA 2.07 to avoid the
/// need for load/store combinations.
SDValue PPCTargetLowering::LowerINT_TO_FPDirectMove(SDValue Op,
                                                    SelectionDAG &DAG,
                                                    const SDLoc &dl) const {}

static SDValue widenVec(SelectionDAG &DAG, SDValue Vec, const SDLoc &dl) {}

SDValue PPCTargetLowering::LowerINT_TO_FPVector(SDValue Op, SelectionDAG &DAG,
                                                const SDLoc &dl) const {}

SDValue PPCTargetLowering::LowerINT_TO_FP(SDValue Op,
                                          SelectionDAG &DAG) const {}

SDValue PPCTargetLowering::LowerSET_ROUNDING(SDValue Op,
                                             SelectionDAG &DAG) const {}

SDValue PPCTargetLowering::LowerGET_ROUNDING(SDValue Op,
                                             SelectionDAG &DAG) const {}

SDValue PPCTargetLowering::LowerSHL_PARTS(SDValue Op, SelectionDAG &DAG) const {}

SDValue PPCTargetLowering::LowerSRL_PARTS(SDValue Op, SelectionDAG &DAG) const {}

SDValue PPCTargetLowering::LowerSRA_PARTS(SDValue Op, SelectionDAG &DAG) const {}

SDValue PPCTargetLowering::LowerFunnelShift(SDValue Op,
                                            SelectionDAG &DAG) const {}

//===----------------------------------------------------------------------===//
// Vector related lowering.
//

/// getCanonicalConstSplat - Build a canonical splat immediate of Val with an
/// element size of SplatSize. Cast the result to VT.
static SDValue getCanonicalConstSplat(uint64_t Val, unsigned SplatSize, EVT VT,
                                      SelectionDAG &DAG, const SDLoc &dl) {}

/// BuildIntrinsicOp - Return a unary operator intrinsic node with the
/// specified intrinsic ID.
static SDValue BuildIntrinsicOp(unsigned IID, SDValue Op, SelectionDAG &DAG,
                                const SDLoc &dl, EVT DestVT = MVT::Other) {}

/// BuildIntrinsicOp - Return a binary operator intrinsic node with the
/// specified intrinsic ID.
static SDValue BuildIntrinsicOp(unsigned IID, SDValue LHS, SDValue RHS,
                                SelectionDAG &DAG, const SDLoc &dl,
                                EVT DestVT = MVT::Other) {}

/// BuildIntrinsicOp - Return a ternary operator intrinsic node with the
/// specified intrinsic ID.
static SDValue BuildIntrinsicOp(unsigned IID, SDValue Op0, SDValue Op1,
                                SDValue Op2, SelectionDAG &DAG, const SDLoc &dl,
                                EVT DestVT = MVT::Other) {}

/// BuildVSLDOI - Return a VECTOR_SHUFFLE that is a vsldoi of the specified
/// amount.  The result has the specified value type.
static SDValue BuildVSLDOI(SDValue LHS, SDValue RHS, unsigned Amt, EVT VT,
                           SelectionDAG &DAG, const SDLoc &dl) {}

/// Do we have an efficient pattern in a .td file for this node?
///
/// \param V - pointer to the BuildVectorSDNode being matched
/// \param HasDirectMove - does this subtarget have VSR <-> GPR direct moves?
///
/// There are some patterns where it is beneficial to keep a BUILD_VECTOR
/// node as a BUILD_VECTOR node rather than expanding it. The patterns where
/// the opposite is true (expansion is beneficial) are:
/// - The node builds a vector out of integers that are not 32 or 64-bits
/// - The node builds a vector out of constants
/// - The node is a "load-and-splat"
/// In all other cases, we will choose to keep the BUILD_VECTOR.
static bool haveEfficientBuildVectorPattern(BuildVectorSDNode *V,
                                            bool HasDirectMove,
                                            bool HasP8Vector) {}

// Lower BITCAST(f128, (build_pair i64, i64)) to BUILD_FP128.
SDValue PPCTargetLowering::LowerBITCAST(SDValue Op, SelectionDAG &DAG) const {}

static const SDValue *getNormalLoadInput(const SDValue &Op, bool &IsPermuted) {}

// Convert the argument APFloat to a single precision APFloat if there is no
// loss in information during the conversion to single precision APFloat and the
// resulting number is not a denormal number. Return true if successful.
bool llvm::convertToNonDenormSingle(APFloat &ArgAPFloat) {}

// Bitcast the argument APInt to a double and convert it to a single precision
// APFloat, bitcast the APFloat to an APInt and assign it to the original
// argument if there is no loss in information during the conversion from
// double to single precision APFloat and the resulting number is not a denormal
// number. Return true if successful.
bool llvm::convertToNonDenormSingle(APInt &ArgAPInt) {}

// Nondestructive check for convertTonNonDenormSingle.
bool llvm::checkConvertToNonDenormSingle(APFloat &ArgAPFloat) {}

static bool isValidSplatLoad(const PPCSubtarget &Subtarget, const SDValue &Op,
                             unsigned &Opcode) {}

// If this is a case we can't handle, return null and let the default
// expansion code take care of it.  If we CAN select this case, and if it
// selects to a single instruction, return Op.  Otherwise, if we can codegen
// this case more efficiently than a constant pool load, lower it to the
// sequence of ops that should be used.
SDValue PPCTargetLowering::LowerBUILD_VECTOR(SDValue Op,
                                             SelectionDAG &DAG) const {}

/// GeneratePerfectShuffle - Given an entry in the perfect-shuffle table, emit
/// the specified operations to build the shuffle.
static SDValue GeneratePerfectShuffle(unsigned PFEntry, SDValue LHS,
                                      SDValue RHS, SelectionDAG &DAG,
                                      const SDLoc &dl) {}

/// lowerToVINSERTB - Return the SDValue if this VECTOR_SHUFFLE can be handled
/// by the VINSERTB instruction introduced in ISA 3.0, else just return default
/// SDValue.
SDValue PPCTargetLowering::lowerToVINSERTB(ShuffleVectorSDNode *N,
                                           SelectionDAG &DAG) const {}

/// lowerToVINSERTH - Return the SDValue if this VECTOR_SHUFFLE can be handled
/// by the VINSERTH instruction introduced in ISA 3.0, else just return default
/// SDValue.
SDValue PPCTargetLowering::lowerToVINSERTH(ShuffleVectorSDNode *N,
                                           SelectionDAG &DAG) const {}

/// lowerToXXSPLTI32DX - Return the SDValue if this VECTOR_SHUFFLE can be
/// handled by the XXSPLTI32DX instruction introduced in ISA 3.1, otherwise
/// return the default SDValue.
SDValue PPCTargetLowering::lowerToXXSPLTI32DX(ShuffleVectorSDNode *SVN,
                                              SelectionDAG &DAG) const {}

/// LowerROTL - Custom lowering for ROTL(v1i128) to vector_shuffle(v16i8).
/// We lower ROTL(v1i128) to vector_shuffle(v16i8) only if shift amount is
/// a multiple of 8. Otherwise convert it to a scalar rotation(i128)
/// i.e (or (shl x, C1), (srl x, 128-C1)).
SDValue PPCTargetLowering::LowerROTL(SDValue Op, SelectionDAG &DAG) const {}

/// LowerVECTOR_SHUFFLE - Return the code we lower for VECTOR_SHUFFLE.  If this
/// is a shuffle we can handle in a single instruction, return it.  Otherwise,
/// return the code it can be lowered into.  Worst case, it can always be
/// lowered into a vperm.
SDValue PPCTargetLowering::LowerVECTOR_SHUFFLE(SDValue Op,
                                               SelectionDAG &DAG) const {}

SDValue PPCTargetLowering::LowerVPERM(SDValue Op, SelectionDAG &DAG,
                                      ArrayRef<int> PermMask, EVT VT,
                                      SDValue V1, SDValue V2) const {}

/// getVectorCompareInfo - Given an intrinsic, return false if it is not a
/// vector comparison.  If it is, return true and fill in Opc/isDot with
/// information about the intrinsic.
static bool getVectorCompareInfo(SDValue Intrin, int &CompareOpc,
                                 bool &isDot, const PPCSubtarget &Subtarget) {}

/// LowerINTRINSIC_WO_CHAIN - If this is an intrinsic that we want to custom
/// lower, do it, otherwise return null.
SDValue PPCTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
                                                   SelectionDAG &DAG) const {}

SDValue PPCTargetLowering::LowerINTRINSIC_VOID(SDValue Op,
                                               SelectionDAG &DAG) const {}

// Lower scalar BSWAP64 to xxbrd.
SDValue PPCTargetLowering::LowerBSWAP(SDValue Op, SelectionDAG &DAG) const {}

// ATOMIC_CMP_SWAP for i8/i16 needs to zero-extend its input since it will be
// compared to a value that is atomically loaded (atomic loads zero-extend).
SDValue PPCTargetLowering::LowerATOMIC_CMP_SWAP(SDValue Op,
                                                SelectionDAG &DAG) const {}

SDValue PPCTargetLowering::LowerATOMIC_LOAD_STORE(SDValue Op,
                                                  SelectionDAG &DAG) const {}

static SDValue getDataClassTest(SDValue Op, FPClassTest Mask, const SDLoc &Dl,
                                SelectionDAG &DAG,
                                const PPCSubtarget &Subtarget) {}

SDValue PPCTargetLowering::LowerIS_FPCLASS(SDValue Op,
                                           SelectionDAG &DAG) const {}

SDValue PPCTargetLowering::LowerSCALAR_TO_VECTOR(SDValue Op,
                                                 SelectionDAG &DAG) const {}

SDValue PPCTargetLowering::LowerINSERT_VECTOR_ELT(SDValue Op,
                                                  SelectionDAG &DAG) const {}

SDValue PPCTargetLowering::LowerVectorLoad(SDValue Op,
                                           SelectionDAG &DAG) const {}

SDValue PPCTargetLowering::LowerVectorStore(SDValue Op,
                                            SelectionDAG &DAG) const {}

SDValue PPCTargetLowering::LowerMUL(SDValue Op, SelectionDAG &DAG) const {}

SDValue PPCTargetLowering::LowerFP_ROUND(SDValue Op, SelectionDAG &DAG) const {}

// Custom lowering for fpext vf32 to v2f64
SDValue PPCTargetLowering::LowerFP_EXTEND(SDValue Op, SelectionDAG &DAG) const {}

/// LowerOperation - Provide custom lowering hooks for some operations.
///
SDValue PPCTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {}

void PPCTargetLowering::ReplaceNodeResults(SDNode *N,
                                           SmallVectorImpl<SDValue>&Results,
                                           SelectionDAG &DAG) const {}

//===----------------------------------------------------------------------===//
//  Other Lowering Code
//===----------------------------------------------------------------------===//

static Instruction *callIntrinsic(IRBuilderBase &Builder, Intrinsic::ID Id) {}

// The mappings for emitLeading/TrailingFence is taken from
// http://www.cl.cam.ac.uk/~pes20/cpp/cpp0xmappings.html
Instruction *PPCTargetLowering::emitLeadingFence(IRBuilderBase &Builder,
                                                 Instruction *Inst,
                                                 AtomicOrdering Ord) const {}

Instruction *PPCTargetLowering::emitTrailingFence(IRBuilderBase &Builder,
                                                  Instruction *Inst,
                                                  AtomicOrdering Ord) const {}

MachineBasicBlock *
PPCTargetLowering::EmitAtomicBinary(MachineInstr &MI, MachineBasicBlock *BB,
                                    unsigned AtomicSize,
                                    unsigned BinOpcode,
                                    unsigned CmpOpcode,
                                    unsigned CmpPred) const {}

static bool isSignExtended(MachineInstr &MI, const PPCInstrInfo *TII) {}

MachineBasicBlock *PPCTargetLowering::EmitPartwordAtomicBinary(
    MachineInstr &MI, MachineBasicBlock *BB,
    bool is8bit, // operation
    unsigned BinOpcode, unsigned CmpOpcode, unsigned CmpPred) const {}

llvm::MachineBasicBlock *
PPCTargetLowering::emitEHSjLjSetJmp(MachineInstr &MI,
                                    MachineBasicBlock *MBB) const {}

MachineBasicBlock *
PPCTargetLowering::emitEHSjLjLongJmp(MachineInstr &MI,
                                     MachineBasicBlock *MBB) const {}

bool PPCTargetLowering::hasInlineStackProbe(const MachineFunction &MF) const {}

unsigned PPCTargetLowering::getStackProbeSize(const MachineFunction &MF) const {}

// Lower dynamic stack allocation with probing. `emitProbedAlloca` is splitted
// into three phases. In the first phase, it uses pseudo instruction
// PREPARE_PROBED_ALLOCA to get the future result of actual FramePointer and
// FinalStackPtr. In the second phase, it generates a loop for probing blocks.
// At last, it uses pseudo instruction DYNAREAOFFSET to get the future result of
// MaxCallFrameSize so that it can calculate correct data area pointer.
MachineBasicBlock *
PPCTargetLowering::emitProbedAlloca(MachineInstr &MI,
                                    MachineBasicBlock *MBB) const {}

static bool IsSelectCC(MachineInstr &MI) {}

static bool IsSelect(MachineInstr &MI) {}

MachineBasicBlock *
PPCTargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI,
                                               MachineBasicBlock *BB) const {}

//===----------------------------------------------------------------------===//
// Target Optimization Hooks
//===----------------------------------------------------------------------===//

static int getEstimateRefinementSteps(EVT VT, const PPCSubtarget &Subtarget) {}

SDValue PPCTargetLowering::getSqrtInputTest(SDValue Op, SelectionDAG &DAG,
                                            const DenormalMode &Mode) const {}

SDValue
PPCTargetLowering::getSqrtResultForDenormInput(SDValue Op,
                                               SelectionDAG &DAG) const {}

SDValue PPCTargetLowering::getSqrtEstimate(SDValue Operand, SelectionDAG &DAG,
                                           int Enabled, int &RefinementSteps,
                                           bool &UseOneConstNR,
                                           bool Reciprocal) const {}

SDValue PPCTargetLowering::getRecipEstimate(SDValue Operand, SelectionDAG &DAG,
                                            int Enabled,
                                            int &RefinementSteps) const {}

unsigned PPCTargetLowering::combineRepeatedFPDivisors() const {}

// isConsecutiveLSLoc needs to work even if all adds have not yet been
// collapsed, and so we need to look through chains of them.
static void getBaseWithConstantOffset(SDValue Loc, SDValue &Base,
                                     int64_t& Offset, SelectionDAG &DAG) {}

static bool isConsecutiveLSLoc(SDValue Loc, EVT VT, LSBaseSDNode *Base,
                            unsigned Bytes, int Dist,
                            SelectionDAG &DAG) {}

// Like SelectionDAG::isConsecutiveLoad, but also works for stores, and does
// not enforce equality of the chain operands.
static bool isConsecutiveLS(SDNode *N, LSBaseSDNode *Base,
                            unsigned Bytes, int Dist,
                            SelectionDAG &DAG) {}

// Return true is there is a nearyby consecutive load to the one provided
// (regardless of alignment). We search up and down the chain, looking though
// token factors and other loads (but nothing else). As a result, a true result
// indicates that it is safe to create a new consecutive load adjacent to the
// load provided.
static bool findConsecutiveLoad(LoadSDNode *LD, SelectionDAG &DAG) {}

/// This function is called when we have proved that a SETCC node can be replaced
/// by subtraction (and other supporting instructions) so that the result of
/// comparison is kept in a GPR instead of CR. This function is purely for
/// codegen purposes and has some flags to guide the codegen process.
static SDValue generateEquivalentSub(SDNode *N, int Size, bool Complement,
                                     bool Swap, SDLoc &DL, SelectionDAG &DAG) {}

SDValue PPCTargetLowering::ConvertSETCCToSubtract(SDNode *N,
                                                  DAGCombinerInfo &DCI) const {}

SDValue PPCTargetLowering::DAGCombineTruncBoolExt(SDNode *N,
                                                  DAGCombinerInfo &DCI) const {}

SDValue PPCTargetLowering::DAGCombineExtBoolTrunc(SDNode *N,
                                                  DAGCombinerInfo &DCI) const {}SDValue PPCTargetLowering::combineSetCC(SDNode *N,
                                        DAGCombinerInfo &DCI) const {}static bool isFPExtLoad(SDValue Op) {}SDValue PPCTargetLowering::
combineElementTruncationToVectorTruncation(SDNode *N,
                                           DAGCombinerInfo &DCI) const {}static SDValue combineBVOfConsecutiveLoads(SDNode *N, SelectionDAG &DAG) {}static SDValue addShuffleForVecExtend(SDNode *N, SelectionDAG &DAG,
                                      SDValue Input, uint64_t Elems,
                                      uint64_t CorrectElems) {}static SDValue combineBVOfVecSExt(SDNode *N, SelectionDAG &DAG) {}static SDValue combineBVZEXTLOAD(SDNode *N, SelectionDAG &DAG) {}SDValue PPCTargetLowering::DAGCombineBuildVector(SDNode *N,
                                                 DAGCombinerInfo &DCI) const {}SDValue PPCTargetLowering::combineFPToIntToFP(SDNode *N,
                                              DAGCombinerInfo &DCI) const {}SDValue PPCTargetLowering::expandVSXLoadForLE(SDNode *N,
                                              DAGCombinerInfo &DCI) const {}SDValue PPCTargetLowering::expandVSXStoreForLE(SDNode *N,
                                               DAGCombinerInfo &DCI) const {}SDValue PPCTargetLowering::combineStoreFPToInt(SDNode *N,
                                               DAGCombinerInfo &DCI) const {}static bool isAlternatingShuffMask(const ArrayRef<int> &Mask, int NumElts) {}static bool isSplatBV(SDValue Op) {}static SDValue isScalarToVec(SDValue Op) {}static void fixupShuffleMaskForPermutedSToV(SmallVectorImpl<int> &ShuffV,
                                            int LHSMaxIdx, int RHSMinIdx,
                                            int RHSMaxIdx, int HalfVec,
                                            unsigned ValidLaneWidth,
                                            const PPCSubtarget &Subtarget) {}static SDValue getSToVPermuted(SDValue OrigSToV, SelectionDAG &DAG,
                               const PPCSubtarget &Subtarget) {}SDValue PPCTargetLowering::combineVectorShuffle(ShuffleVectorSDNode *SVN,
                                                SelectionDAG &DAG) const {}SDValue PPCTargetLowering::combineVReverseMemOP(ShuffleVectorSDNode *SVN,
                                                LSBaseSDNode *LSBase,
                                                DAGCombinerInfo &DCI) const {}static bool isStoreConditional(SDValue Intrin, unsigned &StoreWidth) {}SDValue PPCTargetLowering::PerformDAGCombine(SDNode *N,
                                             DAGCombinerInfo &DCI) const {}SDValue
PPCTargetLowering::BuildSDIVPow2(SDNode *N, const APInt &Divisor,
                                 SelectionDAG &DAG,
                                 SmallVectorImpl<SDNode *> &Created) const {}void PPCTargetLowering::computeKnownBitsForTargetNode(const SDValue Op,
                                                      KnownBits &Known,
                                                      const APInt &DemandedElts,
                                                      const SelectionDAG &DAG,
                                                      unsigned Depth) const {}Align PPCTargetLowering::getPrefLoopAlignment(MachineLoop *ML) const {}PPCTargetLowering::ConstraintType
PPCTargetLowering::getConstraintType(StringRef Constraint) const {}TargetLowering::ConstraintWeight
PPCTargetLowering::getSingleConstraintMatchWeight(
    AsmOperandInfo &info, const char *constraint) const {}std::pair<unsigned, const TargetRegisterClass *>
PPCTargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
                                                StringRef Constraint,
                                                MVT VT) const {}void PPCTargetLowering::LowerAsmOperandForConstraint(SDValue Op,
                                                     StringRef Constraint,
                                                     std::vector<SDValue> &Ops,
                                                     SelectionDAG &DAG) const {}void PPCTargetLowering::CollectTargetIntrinsicOperands(const CallInst &I,
                                              SmallVectorImpl<SDValue> &Ops,
                                              SelectionDAG &DAG) const {}bool PPCTargetLowering::isLegalAddressingMode(const DataLayout &DL,
                                              const AddrMode &AM, Type *Ty,
                                              unsigned AS,
                                              Instruction *I) const {}SDValue PPCTargetLowering::LowerRETURNADDR(SDValue Op,
                                           SelectionDAG &DAG) const {}SDValue PPCTargetLowering::LowerFRAMEADDR(SDValue Op,
                                          SelectionDAG &DAG) const {}Register PPCTargetLowering::getRegisterByName(const char* RegName, LLT VT,
                                              const MachineFunction &MF) const {}bool PPCTargetLowering::isAccessedAsGotIndirect(SDValue GA) const {}bool
PPCTargetLowering::isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const {}bool PPCTargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
                                           const CallInst &I,
                                           MachineFunction &MF,
                                           unsigned Intrinsic) const {}EVT PPCTargetLowering::getOptimalMemOpType(
    const MemOp &Op, const AttributeList &FuncAttributes) const {}bool PPCTargetLowering::shouldConvertConstantLoadToIntImm(const APInt &Imm,
                                                          Type *Ty) const {}bool PPCTargetLowering::isTruncateFree(Type *Ty1, Type *Ty2) const {}bool PPCTargetLowering::isTruncateFree(EVT VT1, EVT VT2) const {}bool PPCTargetLowering::isZExtFree(SDValue Val, EVT VT2) const {}bool PPCTargetLowering::isFPExtFree(EVT DestVT, EVT SrcVT) const {}bool PPCTargetLowering::isLegalICmpImmediate(int64_t Imm) const {}bool PPCTargetLowering::isLegalAddImmediate(int64_t Imm) const {}bool PPCTargetLowering::allowsMisalignedMemoryAccesses(EVT VT, unsigned, Align,
                                                       MachineMemOperand::Flags,
                                                       unsigned *Fast) const {}bool PPCTargetLowering::decomposeMulByConstant(LLVMContext &Context, EVT VT,
                                               SDValue C) const {}bool PPCTargetLowering::isFMAFasterThanFMulAndFAdd(const MachineFunction &MF,
                                                   EVT VT) const {}bool PPCTargetLowering::isFMAFasterThanFMulAndFAdd(const Function &F,
                                                   Type *Ty) const {}bool PPCTargetLowering::isProfitableToHoist(Instruction *I) const {}const MCPhysReg *
PPCTargetLowering::getScratchRegisters(CallingConv::ID) const {}Register PPCTargetLowering::getExceptionPointerRegister(
    const Constant *PersonalityFn) const {}Register PPCTargetLowering::getExceptionSelectorRegister(
    const Constant *PersonalityFn) const {}bool
PPCTargetLowering::shouldExpandBuildVectorWithShuffles(
                     EVT VT , unsigned DefinedValues) const {}Sched::Preference PPCTargetLowering::getSchedulingPreference(SDNode *N) const {}FastISel *
PPCTargetLowering::createFastISel(FunctionLoweringInfo &FuncInfo,
                                  const TargetLibraryInfo *LibInfo) const {}static unsigned invertFMAOpcode(unsigned Opc) {}SDValue PPCTargetLowering::getNegatedExpression(SDValue Op, SelectionDAG &DAG,
                                                bool LegalOps, bool OptForSize,
                                                NegatibleCost &Cost,
                                                unsigned Depth) const {}bool PPCTargetLowering::useLoadStackGuardNode() const {}void PPCTargetLowering::insertSSPDeclarations(Module &M) const {}Value *PPCTargetLowering::getSDagStackGuard(const Module &M) const {}bool PPCTargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT,
                                     bool ForCodeSize) const {}static SDValue stripModuloOnShift(const TargetLowering &TLI, SDNode *N,
                                  SelectionDAG &DAG) {}SDValue PPCTargetLowering::combineSHL(SDNode *N, DAGCombinerInfo &DCI) const {}SDValue PPCTargetLowering::combineSRA(SDNode *N, DAGCombinerInfo &DCI) const {}SDValue PPCTargetLowering::combineSRL(SDNode *N, DAGCombinerInfo &DCI) const {}static SDValue combineADDToADDZE(SDNode *N, SelectionDAG &DAG,
                                 const PPCSubtarget &Subtarget) {}static SDValue combineADDToMAT_PCREL_ADDR(SDNode *N, SelectionDAG &DAG,
                                          const PPCSubtarget &Subtarget) {}SDValue PPCTargetLowering::combineADD(SDNode *N, DAGCombinerInfo &DCI) const {}SDValue PPCTargetLowering::combineTRUNCATE(SDNode *N,
                                           DAGCombinerInfo &DCI) const {}SDValue PPCTargetLowering::combineMUL(SDNode *N, DAGCombinerInfo &DCI) const {}SDValue PPCTargetLowering::combineFMALike(SDNode *N,
                                          DAGCombinerInfo &DCI) const {}bool PPCTargetLowering::mayBeEmittedAsTailCall(const CallInst *CI) const {}bool PPCTargetLowering::
isMaskAndCmp0FoldingBeneficial(const Instruction &AndI) const {}PPC::AddrMode PPCTargetLowering::getAddrModeForFlags(unsigned Flags) const {}static void setAlignFlagsForFI(SDValue N, unsigned &FlagSet,
                               SelectionDAG &DAG) {}static void computeFlagsForAddressComputation(SDValue N, unsigned &FlagSet,
                                              SelectionDAG &DAG) {}static bool isPCRelNode(SDValue N) {}unsigned PPCTargetLowering::computeMOFlags(const SDNode *Parent, SDValue N,
                                           SelectionDAG &DAG) const {}PPC::AddrMode PPCTargetLowering::SelectForceXFormMode(SDValue N, SDValue &Disp,
                                                      SDValue &Base,
                                                      SelectionDAG &DAG) const {}bool PPCTargetLowering::splitValueIntoRegisterParts(
    SelectionDAG &DAG, const SDLoc &DL, SDValue Val, SDValue *Parts,
    unsigned NumParts, MVT PartVT, std::optional<CallingConv::ID> CC) const {}SDValue PPCTargetLowering::lowerToLibCall(const char *LibCallName, SDValue Op,
                                          SelectionDAG &DAG) const {}SDValue PPCTargetLowering::lowerLibCallBasedOnType(
    const char *LibCallFloatName, const char *LibCallDoubleName, SDValue Op,
    SelectionDAG &DAG) const {}bool PPCTargetLowering::isLowringToMASSFiniteSafe(SDValue Op) const {}bool PPCTargetLowering::isLowringToMASSSafe(SDValue Op) const {}bool PPCTargetLowering::isScalarMASSConversionEnabled() const {}SDValue PPCTargetLowering::lowerLibCallBase(const char *LibCallDoubleName,
                                            const char *LibCallFloatName,
                                            const char *LibCallDoubleNameFinite,
                                            const char *LibCallFloatNameFinite,
                                            SDValue Op,
                                            SelectionDAG &DAG) const {}SDValue PPCTargetLowering::lowerPow(SDValue Op, SelectionDAG &DAG) const {}SDValue PPCTargetLowering::lowerSin(SDValue Op, SelectionDAG &DAG) const {}SDValue PPCTargetLowering::lowerCos(SDValue Op, SelectionDAG &DAG) const {}SDValue PPCTargetLowering::lowerLog(SDValue Op, SelectionDAG &DAG) const {}SDValue PPCTargetLowering::lowerLog10(SDValue Op, SelectionDAG &DAG) const {}SDValue PPCTargetLowering::lowerExp(SDValue Op, SelectionDAG &DAG) const {}static void setXFormForUnalignedFI(SDValue N, unsigned Flags,
                                   PPC::AddrMode &Mode) {}PPC::AddrMode PPCTargetLowering::SelectOptimalAddrMode(const SDNode *Parent,
                                                       SDValue N, SDValue &Disp,
                                                       SDValue &Base,
                                                       SelectionDAG &DAG,
                                                       MaybeAlign Align) const {}CCAssignFn *PPCTargetLowering::ccAssignFnForCall(CallingConv::ID CC,
                                                 bool Return,
                                                 bool IsVarArg) const {}bool PPCTargetLowering::shouldInlineQuadwordAtomics() const {}TargetLowering::AtomicExpansionKind
PPCTargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const {}TargetLowering::AtomicExpansionKind
PPCTargetLowering::shouldExpandAtomicCmpXchgInIR(AtomicCmpXchgInst *AI) const {}static Intrinsic::ID
getIntrinsicForAtomicRMWBinOp128(AtomicRMWInst::BinOp BinOp) {}Value *PPCTargetLowering::emitMaskedAtomicRMWIntrinsic(
    IRBuilderBase &Builder, AtomicRMWInst *AI, Value *AlignedAddr, Value *Incr,
    Value *Mask, Value *ShiftAmt, AtomicOrdering Ord) const {}Value *PPCTargetLowering::emitMaskedAtomicCmpXchgIntrinsic(
    IRBuilderBase &Builder, AtomicCmpXchgInst *CI, Value *AlignedAddr,
    Value *CmpVal, Value *NewVal, Value *Mask, AtomicOrdering Ord) const {}