llvm/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp

//===-- SystemZISelLowering.cpp - SystemZ DAG lowering implementation -----===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file implements the SystemZTargetLowering class.
//
//===----------------------------------------------------------------------===//

#include "SystemZISelLowering.h"
#include "SystemZCallingConv.h"
#include "SystemZConstantPoolValue.h"
#include "SystemZMachineFunctionInfo.h"
#include "SystemZTargetMachine.h"
#include "llvm/CodeGen/CallingConvLower.h"
#include "llvm/CodeGen/ISDOpcodes.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/TargetLoweringObjectFileImpl.h"
#include "llvm/IR/GlobalAlias.h"
#include "llvm/IR/IntrinsicInst.h"
#include "llvm/IR/Intrinsics.h"
#include "llvm/IR/IntrinsicsS390.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/KnownBits.h"
#include <cctype>
#include <optional>

usingnamespacellvm;

#define DEBUG_TYPE

// Temporarily let this be disabled by default until all known problems
// related to argument extensions are fixed.
static cl::opt<bool> EnableIntArgExtCheck(
    "argext-abi-check", cl::init(false),
    cl::desc("Verify that narrow int args are properly extended per the "
             "SystemZ ABI."));

namespace {
// Represents information about a comparison.
struct Comparison {};
} // end anonymous namespace

// Classify VT as either 32 or 64 bit.
static bool is32Bit(EVT VT) {}

// Return a version of MachineOperand that can be safely used before the
// final use.
static MachineOperand earlyUseOperand(MachineOperand Op) {}

SystemZTargetLowering::SystemZTargetLowering(const TargetMachine &TM,
                                             const SystemZSubtarget &STI)
    :{}

bool SystemZTargetLowering::useSoftFloat() const {}

EVT SystemZTargetLowering::getSetCCResultType(const DataLayout &DL,
                                              LLVMContext &, EVT VT) const {}

bool SystemZTargetLowering::isFMAFasterThanFMulAndFAdd(
    const MachineFunction &MF, EVT VT) const {}

// Return true if the constant can be generated with a vector instruction,
// such as VGM, VGMB or VREPI.
bool SystemZVectorConstantInfo::isVectorConstantLegal(
    const SystemZSubtarget &Subtarget) {}

SystemZVectorConstantInfo::SystemZVectorConstantInfo(APInt IntImm) {}

SystemZVectorConstantInfo::SystemZVectorConstantInfo(BuildVectorSDNode *BVN) {}

bool SystemZTargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT,
                                         bool ForCodeSize) const {}

/// Returns true if stack probing through inline assembly is requested.
bool SystemZTargetLowering::hasInlineStackProbe(const MachineFunction &MF) const {}

TargetLowering::AtomicExpansionKind
SystemZTargetLowering::shouldCastAtomicLoadInIR(LoadInst *LI) const {}

TargetLowering::AtomicExpansionKind
SystemZTargetLowering::shouldCastAtomicStoreInIR(StoreInst *SI) const {}

TargetLowering::AtomicExpansionKind
SystemZTargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *RMW) const {}

bool SystemZTargetLowering::isLegalICmpImmediate(int64_t Imm) const {}

bool SystemZTargetLowering::isLegalAddImmediate(int64_t Imm) const {}

bool SystemZTargetLowering::allowsMisalignedMemoryAccesses(
    EVT VT, unsigned, Align, MachineMemOperand::Flags, unsigned *Fast) const {}

// Information about the addressing mode for a memory access.
struct AddressingMode {};

// Return the desired addressing mode for a Load which has only one use (in
// the same block) which is a Store.
static AddressingMode getLoadStoreAddrMode(bool HasVector,
                                          Type *Ty) {}

// Return the addressing mode which seems most desirable given an LLVM
// Instruction pointer.
static AddressingMode
supportedAddressingMode(Instruction *I, bool HasVector) {}

bool SystemZTargetLowering::isLegalAddressingMode(const DataLayout &DL,
       const AddrMode &AM, Type *Ty, unsigned AS, Instruction *I) const {}

bool SystemZTargetLowering::findOptimalMemOpLowering(
    std::vector<EVT> &MemOps, unsigned Limit, const MemOp &Op, unsigned DstAS,
    unsigned SrcAS, const AttributeList &FuncAttributes) const {}

EVT SystemZTargetLowering::getOptimalMemOpType(const MemOp &Op,
                                   const AttributeList &FuncAttributes) const {}

bool SystemZTargetLowering::isTruncateFree(Type *FromType, Type *ToType) const {}

bool SystemZTargetLowering::isTruncateFree(EVT FromVT, EVT ToVT) const {}

//===----------------------------------------------------------------------===//
// Inline asm support
//===----------------------------------------------------------------------===//

TargetLowering::ConstraintType
SystemZTargetLowering::getConstraintType(StringRef Constraint) const {}

TargetLowering::ConstraintWeight SystemZTargetLowering::
getSingleConstraintMatchWeight(AsmOperandInfo &info,
                               const char *constraint) const {}

// Parse a "{tNNN}" register constraint for which the register type "t"
// has already been verified.  MC is the class associated with "t" and
// Map maps 0-based register numbers to LLVM register numbers.
static std::pair<unsigned, const TargetRegisterClass *>
parseRegisterNumber(StringRef Constraint, const TargetRegisterClass *RC,
                    const unsigned *Map, unsigned Size) {}

std::pair<unsigned, const TargetRegisterClass *>
SystemZTargetLowering::getRegForInlineAsmConstraint(
    const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const {}

// FIXME? Maybe this could be a TableGen attribute on some registers and
// this table could be generated automatically from RegInfo.
Register
SystemZTargetLowering::getRegisterByName(const char *RegName, LLT VT,
                                         const MachineFunction &MF) const {}

Register SystemZTargetLowering::getExceptionPointerRegister(
    const Constant *PersonalityFn) const {}

Register SystemZTargetLowering::getExceptionSelectorRegister(
    const Constant *PersonalityFn) const {}

void SystemZTargetLowering::LowerAsmOperandForConstraint(
    SDValue Op, StringRef Constraint, std::vector<SDValue> &Ops,
    SelectionDAG &DAG) const {}

//===----------------------------------------------------------------------===//
// Calling conventions
//===----------------------------------------------------------------------===//

#include "SystemZGenCallingConv.inc"

const MCPhysReg *SystemZTargetLowering::getScratchRegisters(
  CallingConv::ID) const {}

bool SystemZTargetLowering::allowTruncateForTailCall(Type *FromType,
                                                     Type *ToType) const {}

bool SystemZTargetLowering::mayBeEmittedAsTailCall(const CallInst *CI) const {}

// Value is a value that has been passed to us in the location described by VA
// (and so has type VA.getLocVT()).  Convert Value to VA.getValVT(), chaining
// any loads onto Chain.
static SDValue convertLocVTToValVT(SelectionDAG &DAG, const SDLoc &DL,
                                   CCValAssign &VA, SDValue Chain,
                                   SDValue Value) {}

// Value is a value of type VA.getValVT() that we need to copy into
// the location described by VA.  Return a copy of Value converted to
// VA.getValVT().  The caller is responsible for handling indirect values.
static SDValue convertValVTToLocVT(SelectionDAG &DAG, const SDLoc &DL,
                                   CCValAssign &VA, SDValue Value) {}

static SDValue lowerI128ToGR128(SelectionDAG &DAG, SDValue In) {}

static SDValue lowerGR128ToI128(SelectionDAG &DAG, SDValue In) {}

bool SystemZTargetLowering::splitValueIntoRegisterParts(
    SelectionDAG &DAG, const SDLoc &DL, SDValue Val, SDValue *Parts,
    unsigned NumParts, MVT PartVT, std::optional<CallingConv::ID> CC) const {}

SDValue SystemZTargetLowering::joinRegisterPartsIntoValue(
    SelectionDAG &DAG, const SDLoc &DL, const SDValue *Parts, unsigned NumParts,
    MVT PartVT, EVT ValueVT, std::optional<CallingConv::ID> CC) const {}

SDValue SystemZTargetLowering::LowerFormalArguments(
    SDValue Chain, CallingConv::ID CallConv, bool IsVarArg,
    const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL,
    SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {}

static bool canUseSiblingCall(const CCState &ArgCCInfo,
                              SmallVectorImpl<CCValAssign> &ArgLocs,
                              SmallVectorImpl<ISD::OutputArg> &Outs) {}

static SDValue getADAEntry(SelectionDAG &DAG, SDValue Val, SDLoc DL,
                           unsigned Offset, bool LoadAdr = false) {}

// ADA access using Global value
// Note: for functions, address of descriptor is returned
static SDValue getADAEntry(SelectionDAG &DAG, const GlobalValue *GV, SDLoc DL,
                           EVT PtrVT) {}

static bool getzOSCalleeAndADA(SelectionDAG &DAG, SDValue &Callee, SDValue &ADA,
                               SDLoc &DL, SDValue &Chain) {}

SDValue
SystemZTargetLowering::LowerCall(CallLoweringInfo &CLI,
                                 SmallVectorImpl<SDValue> &InVals) const {}

// Generate a call taking the given operands as arguments and returning a
// result of type RetVT.
std::pair<SDValue, SDValue> SystemZTargetLowering::makeExternalCall(
    SDValue Chain, SelectionDAG &DAG, const char *CalleeName, EVT RetVT,
    ArrayRef<SDValue> Ops, CallingConv::ID CallConv, bool IsSigned, SDLoc DL,
    bool DoesNotReturn, bool IsReturnValueUsed) const {}

bool SystemZTargetLowering::
CanLowerReturn(CallingConv::ID CallConv,
               MachineFunction &MF, bool isVarArg,
               const SmallVectorImpl<ISD::OutputArg> &Outs,
               LLVMContext &Context) const {}

SDValue
SystemZTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
                                   bool IsVarArg,
                                   const SmallVectorImpl<ISD::OutputArg> &Outs,
                                   const SmallVectorImpl<SDValue> &OutVals,
                                   const SDLoc &DL, SelectionDAG &DAG) const {}

// Return true if Op is an intrinsic node with chain that returns the CC value
// as its only (other) argument.  Provide the associated SystemZISD opcode and
// the mask of valid CC values if so.
static bool isIntrinsicWithCCAndChain(SDValue Op, unsigned &Opcode,
                                      unsigned &CCValid) {}

// Return true if Op is an intrinsic node without chain that returns the
// CC value as its final argument.  Provide the associated SystemZISD
// opcode and the mask of valid CC values if so.
static bool isIntrinsicWithCC(SDValue Op, unsigned &Opcode, unsigned &CCValid) {}

// Emit an intrinsic with chain and an explicit CC register result.
static SDNode *emitIntrinsicWithCCAndChain(SelectionDAG &DAG, SDValue Op,
                                           unsigned Opcode) {}

// Emit an intrinsic with an explicit CC register result.
static SDNode *emitIntrinsicWithCC(SelectionDAG &DAG, SDValue Op,
                                   unsigned Opcode) {}

// CC is a comparison that will be implemented using an integer or
// floating-point comparison.  Return the condition code mask for
// a branch on true.  In the integer case, CCMASK_CMP_UO is set for
// unsigned comparisons and clear for signed ones.  In the floating-point
// case, CCMASK_CMP_UO has its normal mask meaning (unordered).
static unsigned CCMaskForCondCode(ISD::CondCode CC) {}

// If C can be converted to a comparison against zero, adjust the operands
// as necessary.
static void adjustZeroCmp(SelectionDAG &DAG, const SDLoc &DL, Comparison &C) {}

// If a comparison described by C is suitable for CLI(Y), CHHSI or CLHHSI,
// adjust the operands as necessary.
static void adjustSubwordCmp(SelectionDAG &DAG, const SDLoc &DL,
                             Comparison &C) {}

// Return true if Op is either an unextended load, or a load suitable
// for integer register-memory comparisons of type ICmpType.
static bool isNaturalMemoryOperand(SDValue Op, unsigned ICmpType) {}

// Return true if it is better to swap the operands of C.
static bool shouldSwapCmpOperands(const Comparison &C) {}

// Check whether C tests for equality between X and Y and whether X - Y
// or Y - X is also computed.  In that case it's better to compare the
// result of the subtraction against zero.
static void adjustForSubtraction(SelectionDAG &DAG, const SDLoc &DL,
                                 Comparison &C) {}

// Check whether C compares a floating-point value with zero and if that
// floating-point value is also negated.  In this case we can use the
// negation to set CC, so avoiding separate LOAD AND TEST and
// LOAD (NEGATIVE/COMPLEMENT) instructions.
static void adjustForFNeg(Comparison &C) {}

// Check whether C compares (shl X, 32) with 0 and whether X is
// also sign-extended.  In that case it is better to test the result
// of the sign extension using LTGFR.
//
// This case is important because InstCombine transforms a comparison
// with (sext (trunc X)) into a comparison with (shl X, 32).
static void adjustForLTGFR(Comparison &C) {}

// If C compares the truncation of an extending load, try to compare
// the untruncated value instead.  This exposes more opportunities to
// reuse CC.
static void adjustICmpTruncate(SelectionDAG &DAG, const SDLoc &DL,
                               Comparison &C) {}

// Return true if shift operation N has an in-range constant shift value.
// Store it in ShiftVal if so.
static bool isSimpleShift(SDValue N, unsigned &ShiftVal) {}

// Check whether an AND with Mask is suitable for a TEST UNDER MASK
// instruction and whether the CC value is descriptive enough to handle
// a comparison of type Opcode between the AND result and CmpVal.
// CCMask says which comparison result is being tested and BitSize is
// the number of bits in the operands.  If TEST UNDER MASK can be used,
// return the corresponding CC mask, otherwise return 0.
static unsigned getTestUnderMaskCond(unsigned BitSize, unsigned CCMask,
                                     uint64_t Mask, uint64_t CmpVal,
                                     unsigned ICmpType) {}

// See whether C can be implemented as a TEST UNDER MASK instruction.
// Update the arguments with the TM version if so.
static void adjustForTestUnderMask(SelectionDAG &DAG, const SDLoc &DL,
                                   Comparison &C) {}

// Implement i128 comparison in vector registers.
static void adjustICmp128(SelectionDAG &DAG, const SDLoc &DL,
                          Comparison &C) {}

// See whether the comparison argument contains a redundant AND
// and remove it if so.  This sometimes happens due to the generic
// BRCOND expansion.
static void adjustForRedundantAnd(SelectionDAG &DAG, const SDLoc &DL,
                                  Comparison &C) {}

// Return a Comparison that tests the condition-code result of intrinsic
// node Call against constant integer CC using comparison code Cond.
// Opcode is the opcode of the SystemZISD operation for the intrinsic
// and CCValid is the set of possible condition-code results.
static Comparison getIntrinsicCmp(SelectionDAG &DAG, unsigned Opcode,
                                  SDValue Call, unsigned CCValid, uint64_t CC,
                                  ISD::CondCode Cond) {}

// Decide how to implement a comparison of type Cond between CmpOp0 with CmpOp1.
static Comparison getCmp(SelectionDAG &DAG, SDValue CmpOp0, SDValue CmpOp1,
                         ISD::CondCode Cond, const SDLoc &DL,
                         SDValue Chain = SDValue(),
                         bool IsSignaling = false) {}

// Emit the comparison instruction described by C.
static SDValue emitCmp(SelectionDAG &DAG, const SDLoc &DL, Comparison &C) {}

// Implement a 32-bit *MUL_LOHI operation by extending both operands to
// 64 bits.  Extend is the extension type to use.  Store the high part
// in Hi and the low part in Lo.
static void lowerMUL_LOHI32(SelectionDAG &DAG, const SDLoc &DL, unsigned Extend,
                            SDValue Op0, SDValue Op1, SDValue &Hi,
                            SDValue &Lo) {}

// Lower a binary operation that produces two VT results, one in each
// half of a GR128 pair.  Op0 and Op1 are the VT operands to the operation,
// and Opcode performs the GR128 operation.  Store the even register result
// in Even and the odd register result in Odd.
static void lowerGR128Binary(SelectionDAG &DAG, const SDLoc &DL, EVT VT,
                             unsigned Opcode, SDValue Op0, SDValue Op1,
                             SDValue &Even, SDValue &Odd) {}

// Return an i32 value that is 1 if the CC value produced by CCReg is
// in the mask CCMask and 0 otherwise.  CC is known to have a value
// in CCValid, so other values can be ignored.
static SDValue emitSETCC(SelectionDAG &DAG, const SDLoc &DL, SDValue CCReg,
                         unsigned CCValid, unsigned CCMask) {}

// Return the SystemISD vector comparison operation for CC, or 0 if it cannot
// be done directly.  Mode is CmpMode::Int for integer comparisons, CmpMode::FP
// for regular floating-point comparisons, CmpMode::StrictFP for strict (quiet)
// floating-point comparisons, and CmpMode::SignalingFP for strict signaling
// floating-point comparisons.
enum class CmpMode {};
static unsigned getVectorComparison(ISD::CondCode CC, CmpMode Mode) {}

// Return the SystemZISD vector comparison operation for CC or its inverse,
// or 0 if neither can be done directly.  Indicate in Invert whether the
// result is for the inverse of CC.  Mode is as above.
static unsigned getVectorComparisonOrInvert(ISD::CondCode CC, CmpMode Mode,
                                            bool &Invert) {}

// Return a v2f64 that contains the extended form of elements Start and Start+1
// of v4f32 value Op.  If Chain is nonnull, return the strict form.
static SDValue expandV4F32ToV2F64(SelectionDAG &DAG, int Start, const SDLoc &DL,
                                  SDValue Op, SDValue Chain) {}

// Build a comparison of vectors CmpOp0 and CmpOp1 using opcode Opcode,
// producing a result of type VT.  If Chain is nonnull, return the strict form.
SDValue SystemZTargetLowering::getVectorCmp(SelectionDAG &DAG, unsigned Opcode,
                                            const SDLoc &DL, EVT VT,
                                            SDValue CmpOp0,
                                            SDValue CmpOp1,
                                            SDValue Chain) const {}

// Lower a vector comparison of type CC between CmpOp0 and CmpOp1, producing
// an integer mask of type VT.  If Chain is nonnull, we have a strict
// floating-point comparison.  If in addition IsSignaling is true, we have
// a strict signaling floating-point comparison.
SDValue SystemZTargetLowering::lowerVectorSETCC(SelectionDAG &DAG,
                                                const SDLoc &DL, EVT VT,
                                                ISD::CondCode CC,
                                                SDValue CmpOp0,
                                                SDValue CmpOp1,
                                                SDValue Chain,
                                                bool IsSignaling) const {}

SDValue SystemZTargetLowering::lowerSETCC(SDValue Op,
                                          SelectionDAG &DAG) const {}

SDValue SystemZTargetLowering::lowerSTRICT_FSETCC(SDValue Op,
                                                  SelectionDAG &DAG,
                                                  bool IsSignaling) const {}

SDValue SystemZTargetLowering::lowerBR_CC(SDValue Op, SelectionDAG &DAG) const {}

// Return true if Pos is CmpOp and Neg is the negative of CmpOp,
// allowing Pos and Neg to be wider than CmpOp.
static bool isAbsolute(SDValue CmpOp, SDValue Pos, SDValue Neg) {}

// Return the absolute or negative absolute of Op; IsNegative decides which.
static SDValue getAbsolute(SelectionDAG &DAG, const SDLoc &DL, SDValue Op,
                           bool IsNegative) {}

SDValue SystemZTargetLowering::lowerSELECT_CC(SDValue Op,
                                              SelectionDAG &DAG) const {}

SDValue SystemZTargetLowering::lowerGlobalAddress(GlobalAddressSDNode *Node,
                                                  SelectionDAG &DAG) const {}

SDValue SystemZTargetLowering::lowerTLSGetOffset(GlobalAddressSDNode *Node,
                                                 SelectionDAG &DAG,
                                                 unsigned Opcode,
                                                 SDValue GOTOffset) const {}

SDValue SystemZTargetLowering::lowerThreadPointer(const SDLoc &DL,
                                                  SelectionDAG &DAG) const {}

SDValue SystemZTargetLowering::lowerGlobalTLSAddress(GlobalAddressSDNode *Node,
                                                     SelectionDAG &DAG) const {}

SDValue SystemZTargetLowering::lowerBlockAddress(BlockAddressSDNode *Node,
                                                 SelectionDAG &DAG) const {}

SDValue SystemZTargetLowering::lowerJumpTable(JumpTableSDNode *JT,
                                              SelectionDAG &DAG) const {}

SDValue SystemZTargetLowering::lowerConstantPool(ConstantPoolSDNode *CP,
                                                 SelectionDAG &DAG) const {}

SDValue SystemZTargetLowering::lowerFRAMEADDR(SDValue Op,
                                              SelectionDAG &DAG) const {}

SDValue SystemZTargetLowering::lowerRETURNADDR(SDValue Op,
                                               SelectionDAG &DAG) const {}

SDValue SystemZTargetLowering::lowerBITCAST(SDValue Op,
                                            SelectionDAG &DAG) const {}

SDValue SystemZTargetLowering::lowerVASTART(SDValue Op,
                                            SelectionDAG &DAG) const {}

SDValue SystemZTargetLowering::lowerVASTART_XPLINK(SDValue Op,
                                                   SelectionDAG &DAG) const {}

SDValue SystemZTargetLowering::lowerVASTART_ELF(SDValue Op,
                                                SelectionDAG &DAG) const {}

SDValue SystemZTargetLowering::lowerVACOPY(SDValue Op,
                                           SelectionDAG &DAG) const {}

SDValue
SystemZTargetLowering::lowerDYNAMIC_STACKALLOC(SDValue Op,
                                               SelectionDAG &DAG) const {}

SDValue
SystemZTargetLowering::lowerDYNAMIC_STACKALLOC_XPLINK(SDValue Op,
                                                      SelectionDAG &DAG) const {}

SDValue
SystemZTargetLowering::lowerDYNAMIC_STACKALLOC_ELF(SDValue Op,
                                                   SelectionDAG &DAG) const {}

SDValue SystemZTargetLowering::lowerGET_DYNAMIC_AREA_OFFSET(
    SDValue Op, SelectionDAG &DAG) const {}

SDValue SystemZTargetLowering::lowerSMUL_LOHI(SDValue Op,
                                              SelectionDAG &DAG) const {}

SDValue SystemZTargetLowering::lowerUMUL_LOHI(SDValue Op,
                                              SelectionDAG &DAG) const {}

SDValue SystemZTargetLowering::lowerSDIVREM(SDValue Op,
                                            SelectionDAG &DAG) const {}

SDValue SystemZTargetLowering::lowerUDIVREM(SDValue Op,
                                            SelectionDAG &DAG) const {}

SDValue SystemZTargetLowering::lowerOR(SDValue Op, SelectionDAG &DAG) const {}

// Lower SADDO/SSUBO/UADDO/USUBO nodes.
SDValue SystemZTargetLowering::lowerXALUO(SDValue Op,
                                          SelectionDAG &DAG) const {}

static bool isAddCarryChain(SDValue Carry) {}

static bool isSubBorrowChain(SDValue Carry) {}

// Lower UADDO_CARRY/USUBO_CARRY nodes.
SDValue SystemZTargetLowering::lowerUADDSUBO_CARRY(SDValue Op,
                                                   SelectionDAG &DAG) const {}

SDValue SystemZTargetLowering::lowerCTPOP(SDValue Op,
                                          SelectionDAG &DAG) const {}

SDValue SystemZTargetLowering::lowerATOMIC_FENCE(SDValue Op,
                                                 SelectionDAG &DAG) const {}

SDValue SystemZTargetLowering::lowerATOMIC_LDST_I128(SDValue Op,
                                                     SelectionDAG &DAG) const {}

// Prepare for a Compare And Swap for a subword operation. This needs to be
// done in memory with 4 bytes at natural alignment.
static void getCSAddressAndShifts(SDValue Addr, SelectionDAG &DAG, SDLoc DL,
                                  SDValue &AlignedAddr, SDValue &BitShift,
                                  SDValue &NegBitShift) {}

// Op is an 8-, 16-bit or 32-bit ATOMIC_LOAD_* operation.  Lower the first
// two into the fullword ATOMIC_LOADW_* operation given by Opcode.
SDValue SystemZTargetLowering::lowerATOMIC_LOAD_OP(SDValue Op,
                                                   SelectionDAG &DAG,
                                                   unsigned Opcode) const {}

// Op is an ATOMIC_LOAD_SUB operation.  Lower 8- and 16-bit operations into
// ATOMIC_LOADW_SUBs and convert 32- and 64-bit operations into additions.
SDValue SystemZTargetLowering::lowerATOMIC_LOAD_SUB(SDValue Op,
                                                    SelectionDAG &DAG) const {}

// Lower 8/16/32/64-bit ATOMIC_CMP_SWAP_WITH_SUCCESS node.
SDValue SystemZTargetLowering::lowerATOMIC_CMP_SWAP(SDValue Op,
                                                    SelectionDAG &DAG) const {}

MachineMemOperand::Flags
SystemZTargetLowering::getTargetMMOFlags(const Instruction &I) const {}

SDValue SystemZTargetLowering::lowerSTACKSAVE(SDValue Op,
                                              SelectionDAG &DAG) const {}

SDValue SystemZTargetLowering::lowerSTACKRESTORE(SDValue Op,
                                                 SelectionDAG &DAG) const {}

SDValue SystemZTargetLowering::lowerPREFETCH(SDValue Op,
                                             SelectionDAG &DAG) const {}

// Convert condition code in CCReg to an i32 value.
static SDValue getCCResult(SelectionDAG &DAG, SDValue CCReg) {}

SDValue
SystemZTargetLowering::lowerINTRINSIC_W_CHAIN(SDValue Op,
                                              SelectionDAG &DAG) const {}

SDValue
SystemZTargetLowering::lowerINTRINSIC_WO_CHAIN(SDValue Op,
                                               SelectionDAG &DAG) const {}

namespace {
// Says that SystemZISD operation Opcode can be used to perform the equivalent
// of a VPERM with permute vector Bytes.  If Opcode takes three operands,
// Operand is the constant third operand, otherwise it is the number of
// bytes in each element of the result.
struct Permute {};
}

static const Permute PermuteForms[] =;

// Called after matching a vector shuffle against a particular pattern.
// Both the original shuffle and the pattern have two vector operands.
// OpNos[0] is the operand of the original shuffle that should be used for
// operand 0 of the pattern, or -1 if operand 0 of the pattern can be anything.
// OpNos[1] is the same for operand 1 of the pattern.  Resolve these -1s and
// set OpNo0 and OpNo1 to the shuffle operands that should actually be used
// for operands 0 and 1 of the pattern.
static bool chooseShuffleOpNos(int *OpNos, unsigned &OpNo0, unsigned &OpNo1) {}

// Bytes is a VPERM-like permute vector, except that -1 is used for
// undefined bytes.  Return true if the VPERM can be implemented using P.
// When returning true set OpNo0 to the VPERM operand that should be
// used for operand 0 of P and likewise OpNo1 for operand 1 of P.
//
// For example, if swapping the VPERM operands allows P to match, OpNo0
// will be 1 and OpNo1 will be 0.  If instead Bytes only refers to one
// operand, but rewriting it to use two duplicated operands allows it to
// match P, then OpNo0 and OpNo1 will be the same.
static bool matchPermute(const SmallVectorImpl<int> &Bytes, const Permute &P,
                         unsigned &OpNo0, unsigned &OpNo1) {}

// As above, but search for a matching permute.
static const Permute *matchPermute(const SmallVectorImpl<int> &Bytes,
                                   unsigned &OpNo0, unsigned &OpNo1) {}

// Bytes is a VPERM-like permute vector, except that -1 is used for
// undefined bytes.  This permute is an operand of an outer permute.
// See whether redistributing the -1 bytes gives a shuffle that can be
// implemented using P.  If so, set Transform to a VPERM-like permute vector
// that, when applied to the result of P, gives the original permute in Bytes.
static bool matchDoublePermute(const SmallVectorImpl<int> &Bytes,
                               const Permute &P,
                               SmallVectorImpl<int> &Transform) {}

// As above, but search for a matching permute.
static const Permute *matchDoublePermute(const SmallVectorImpl<int> &Bytes,
                                         SmallVectorImpl<int> &Transform) {}

// Convert the mask of the given shuffle op into a byte-level mask,
// as if it had type vNi8.
static bool getVPermMask(SDValue ShuffleOp,
                         SmallVectorImpl<int> &Bytes) {}

// Bytes is a VPERM-like permute vector, except that -1 is used for
// undefined bytes.  See whether bytes [Start, Start + BytesPerElement) of
// the result come from a contiguous sequence of bytes from one input.
// Set Base to the selector for the first byte if so.
static bool getShuffleInput(const SmallVectorImpl<int> &Bytes, unsigned Start,
                            unsigned BytesPerElement, int &Base) {}

// Bytes is a VPERM-like permute vector, except that -1 is used for
// undefined bytes.  Return true if it can be performed using VSLDB.
// When returning true, set StartIndex to the shift amount and OpNo0
// and OpNo1 to the VPERM operands that should be used as the first
// and second shift operand respectively.
static bool isShlDoublePermute(const SmallVectorImpl<int> &Bytes,
                               unsigned &StartIndex, unsigned &OpNo0,
                               unsigned &OpNo1) {}

// Create a node that performs P on operands Op0 and Op1, casting the
// operands to the appropriate type.  The type of the result is determined by P.
static SDValue getPermuteNode(SelectionDAG &DAG, const SDLoc &DL,
                              const Permute &P, SDValue Op0, SDValue Op1) {}

static bool isZeroVector(SDValue N) {}

// Return the index of the zero/undef vector, or UINT32_MAX if not found.
static uint32_t findZeroVectorIdx(SDValue *Ops, unsigned Num) {}

// Bytes is a VPERM-like permute vector, except that -1 is used for
// undefined bytes.  Implement it on operands Ops[0] and Ops[1] using
// VSLDB or VPERM.
static SDValue getGeneralPermuteNode(SelectionDAG &DAG, const SDLoc &DL,
                                     SDValue *Ops,
                                     const SmallVectorImpl<int> &Bytes) {}

namespace {
// Describes a general N-operand vector shuffle.
struct GeneralShuffle {};
}

// Add an extra undefined element to the shuffle.
void GeneralShuffle::addUndef() {}

// Add an extra element to the shuffle, taking it from element Elem of Op.
// A null Op indicates a vector input whose value will be calculated later;
// there is at most one such input per shuffle and it always has the same
// type as the result. Aborts and returns false if the source vector elements
// of an EXTRACT_VECTOR_ELT are smaller than the destination elements. Per
// LLVM they become implicitly extended, but this is rare and not optimized.
bool GeneralShuffle::add(SDValue Op, unsigned Elem) {}

// Return SDNodes for the completed shuffle.
SDValue GeneralShuffle::getNode(SelectionDAG &DAG, const SDLoc &DL) {}

#ifndef NDEBUG
static void dumpBytes(const SmallVectorImpl<int> &Bytes, std::string Msg) {
  dbgs() << Msg.c_str() << " { ";
  for (unsigned i = 0; i < Bytes.size(); i++)
    dbgs() << Bytes[i] << " ";
  dbgs() << "}\n";
}
#endif

// If the Bytes vector matches an unpack operation, prepare to do the unpack
// after all else by removing the zero vector and the effect of the unpack on
// Bytes.
void GeneralShuffle::tryPrepareForUnpack() {}

SDValue GeneralShuffle::insertUnpackIfPrepared(SelectionDAG &DAG,
                                               const SDLoc &DL,
                                               SDValue Op) {}

// Return true if the given BUILD_VECTOR is a scalar-to-vector conversion.
static bool isScalarToVector(SDValue Op) {}

// Return a vector of type VT that contains Value in the first element.
// The other elements don't matter.
static SDValue buildScalarToVector(SelectionDAG &DAG, const SDLoc &DL, EVT VT,
                                   SDValue Value) {}

// Return a vector of type VT in which Op0 is in element 0 and Op1 is in
// element 1.  Used for cases in which replication is cheap.
static SDValue buildMergeScalars(SelectionDAG &DAG, const SDLoc &DL, EVT VT,
                                 SDValue Op0, SDValue Op1) {}

// Extend GPR scalars Op0 and Op1 to doublewords and return a v2i64
// vector for them.
static SDValue joinDwords(SelectionDAG &DAG, const SDLoc &DL, SDValue Op0,
                          SDValue Op1) {}

// If a BUILD_VECTOR contains some EXTRACT_VECTOR_ELTs, it's usually
// better to use VECTOR_SHUFFLEs on them, only using BUILD_VECTOR for
// the non-EXTRACT_VECTOR_ELT elements.  See if the given BUILD_VECTOR
// would benefit from this representation and return it if so.
static SDValue tryBuildVectorShuffle(SelectionDAG &DAG,
                                     BuildVectorSDNode *BVN) {}

bool SystemZTargetLowering::isVectorElementLoad(SDValue Op) const {}

// Combine GPR scalar values Elems into a vector of type VT.
SDValue
SystemZTargetLowering::buildVector(SelectionDAG &DAG, const SDLoc &DL, EVT VT,
                                   SmallVectorImpl<SDValue> &Elems) const {}

SDValue SystemZTargetLowering::lowerBUILD_VECTOR(SDValue Op,
                                                 SelectionDAG &DAG) const {}

SDValue SystemZTargetLowering::lowerVECTOR_SHUFFLE(SDValue Op,
                                                   SelectionDAG &DAG) const {}

SDValue SystemZTargetLowering::lowerSCALAR_TO_VECTOR(SDValue Op,
                                                     SelectionDAG &DAG) const {}

SDValue SystemZTargetLowering::lowerINSERT_VECTOR_ELT(SDValue Op,
                                                      SelectionDAG &DAG) const {}

SDValue
SystemZTargetLowering::lowerEXTRACT_VECTOR_ELT(SDValue Op,
                                               SelectionDAG &DAG) const {}

SDValue SystemZTargetLowering::
lowerSIGN_EXTEND_VECTOR_INREG(SDValue Op, SelectionDAG &DAG) const {}

// Lower a ZERO_EXTEND_VECTOR_INREG to a vector shuffle with a zero vector.
SDValue SystemZTargetLowering::
lowerZERO_EXTEND_VECTOR_INREG(SDValue Op, SelectionDAG &DAG) const {}

SDValue SystemZTargetLowering::lowerShift(SDValue Op, SelectionDAG &DAG,
                                          unsigned ByScalar) const {}

static SDValue lowerAddrSpaceCast(SDValue Op, SelectionDAG &DAG) {}

SDValue SystemZTargetLowering::lowerIS_FPCLASS(SDValue Op,
                                               SelectionDAG &DAG) const {}

SDValue SystemZTargetLowering::lowerREADCYCLECOUNTER(SDValue Op,
                                                     SelectionDAG &DAG) const {}

SDValue SystemZTargetLowering::LowerOperation(SDValue Op,
                                              SelectionDAG &DAG) const {}

static SDValue expandBitCastI128ToF128(SelectionDAG &DAG, SDValue Src,
                                       const SDLoc &SL) {}

static SDValue expandBitCastF128ToI128(SelectionDAG &DAG, SDValue Src,
                                       const SDLoc &SL) {}

// Lower operations with invalid operand or result types (currently used
// only for 128-bit integer types).
void
SystemZTargetLowering::LowerOperationWrapper(SDNode *N,
                                             SmallVectorImpl<SDValue> &Results,
                                             SelectionDAG &DAG) const {}

void
SystemZTargetLowering::ReplaceNodeResults(SDNode *N,
                                          SmallVectorImpl<SDValue> &Results,
                                          SelectionDAG &DAG) const {}

const char *SystemZTargetLowering::getTargetNodeName(unsigned Opcode) const {}

// Return true if VT is a vector whose elements are a whole number of bytes
// in width. Also check for presence of vector support.
bool SystemZTargetLowering::canTreatAsByteVector(EVT VT) const {}

// Try to simplify an EXTRACT_VECTOR_ELT from a vector of type VecVT
// producing a result of type ResVT.  Op is a possibly bitcast version
// of the input vector and Index is the index (based on type VecVT) that
// should be extracted.  Return the new extraction if a simplification
// was possible or if Force is true.
SDValue SystemZTargetLowering::combineExtract(const SDLoc &DL, EVT ResVT,
                                              EVT VecVT, SDValue Op,
                                              unsigned Index,
                                              DAGCombinerInfo &DCI,
                                              bool Force) const {}

// Optimize vector operations in scalar value Op on the basis that Op
// is truncated to TruncVT.
SDValue SystemZTargetLowering::combineTruncateExtract(
    const SDLoc &DL, EVT TruncVT, SDValue Op, DAGCombinerInfo &DCI) const {}

SDValue SystemZTargetLowering::combineZERO_EXTEND(
    SDNode *N, DAGCombinerInfo &DCI) const {}

SDValue SystemZTargetLowering::combineSIGN_EXTEND_INREG(
    SDNode *N, DAGCombinerInfo &DCI) const {}

SDValue SystemZTargetLowering::combineSIGN_EXTEND(
    SDNode *N, DAGCombinerInfo &DCI) const {}

SDValue SystemZTargetLowering::combineMERGE(
    SDNode *N, DAGCombinerInfo &DCI) const {}

static bool isI128MovedToParts(LoadSDNode *LD, SDNode *&LoPart,
                               SDNode *&HiPart) {}

static bool isF128MovedToParts(LoadSDNode *LD, SDNode *&LoPart,
                               SDNode *&HiPart) {}

SDValue SystemZTargetLowering::combineLOAD(
    SDNode *N, DAGCombinerInfo &DCI) const {}

bool SystemZTargetLowering::canLoadStoreByteSwapped(EVT VT) const {}

static bool isVectorElementSwap(ArrayRef<int> M, EVT VT) {}

static bool isOnlyUsedByStores(SDValue StoredVal, SelectionDAG &DAG) {}

static bool isI128MovedFromParts(SDValue Val, SDValue &LoPart,
                                 SDValue &HiPart) {}

static bool isF128MovedFromParts(SDValue Val, SDValue &LoPart,
                                 SDValue &HiPart) {}

SDValue SystemZTargetLowering::combineSTORE(
    SDNode *N, DAGCombinerInfo &DCI) const {}

SDValue SystemZTargetLowering::combineVECTOR_SHUFFLE(
    SDNode *N, DAGCombinerInfo &DCI) const {}

SDValue SystemZTargetLowering::combineEXTRACT_VECTOR_ELT(
    SDNode *N, DAGCombinerInfo &DCI) const {}

SDValue SystemZTargetLowering::combineJOIN_DWORDS(
    SDNode *N, DAGCombinerInfo &DCI) const {}

static SDValue MergeInputChains(SDNode *N1, SDNode *N2) {}

SDValue SystemZTargetLowering::combineFP_ROUND(
    SDNode *N, DAGCombinerInfo &DCI) const {}

SDValue SystemZTargetLowering::combineFP_EXTEND(
    SDNode *N, DAGCombinerInfo &DCI) const {}

SDValue SystemZTargetLowering::combineINT_TO_FP(
    SDNode *N, DAGCombinerInfo &DCI) const {}

SDValue SystemZTargetLowering::combineBSWAP(
    SDNode *N, DAGCombinerInfo &DCI) const {}

static bool combineCCMask(SDValue &CCReg, int &CCValid, int &CCMask) {}

SDValue SystemZTargetLowering::combineBR_CCMASK(
    SDNode *N, DAGCombinerInfo &DCI) const {}

SDValue SystemZTargetLowering::combineSELECT_CCMASK(
    SDNode *N, DAGCombinerInfo &DCI) const {}


SDValue SystemZTargetLowering::combineGET_CCMASK(
    SDNode *N, DAGCombinerInfo &DCI) const {}

SDValue SystemZTargetLowering::combineIntDIVREM(
    SDNode *N, DAGCombinerInfo &DCI) const {}

SDValue SystemZTargetLowering::combineINTRINSIC(
    SDNode *N, DAGCombinerInfo &DCI) const {}

SDValue SystemZTargetLowering::unwrapAddress(SDValue N) const {}

SDValue SystemZTargetLowering::PerformDAGCombine(SDNode *N,
                                                 DAGCombinerInfo &DCI) const {}

// Return the demanded elements for the OpNo source operand of Op. DemandedElts
// are for Op.
static APInt getDemandedSrcElements(SDValue Op, const APInt &DemandedElts,
                                    unsigned OpNo) {}

static void computeKnownBitsBinOp(const SDValue Op, KnownBits &Known,
                                  const APInt &DemandedElts,
                                  const SelectionDAG &DAG, unsigned Depth,
                                  unsigned OpNo) {}

void
SystemZTargetLowering::computeKnownBitsForTargetNode(const SDValue Op,
                                                     KnownBits &Known,
                                                     const APInt &DemandedElts,
                                                     const SelectionDAG &DAG,
                                                     unsigned Depth) const {}

static unsigned computeNumSignBitsBinOp(SDValue Op, const APInt &DemandedElts,
                                        const SelectionDAG &DAG, unsigned Depth,
                                        unsigned OpNo) {}

unsigned
SystemZTargetLowering::ComputeNumSignBitsForTargetNode(
    SDValue Op, const APInt &DemandedElts, const SelectionDAG &DAG,
    unsigned Depth) const {}

bool SystemZTargetLowering::
isGuaranteedNotToBeUndefOrPoisonForTargetNode(SDValue Op,
         const APInt &DemandedElts, const SelectionDAG &DAG,
         bool PoisonOnly, unsigned Depth) const {}

unsigned
SystemZTargetLowering::getStackProbeSize(const MachineFunction &MF) const {}

//===----------------------------------------------------------------------===//
// Custom insertion
//===----------------------------------------------------------------------===//

// Force base value Base into a register before MI.  Return the register.
static Register forceReg(MachineInstr &MI, MachineOperand &Base,
                         const SystemZInstrInfo *TII) {}

// The CC operand of MI might be missing a kill marker because there
// were multiple uses of CC, and ISel didn't know which to mark.
// Figure out whether MI should have had a kill marker.
static bool checkCCKill(MachineInstr &MI, MachineBasicBlock *MBB) {}

// Return true if it is OK for this Select pseudo-opcode to be cascaded
// together with other Select pseudo-opcodes into a single basic-block with
// a conditional jump around it.
static bool isSelectPseudo(MachineInstr &MI) {}

// Helper function, which inserts PHI functions into SinkMBB:
//   %Result(i) = phi [ %FalseValue(i), FalseMBB ], [ %TrueValue(i), TrueMBB ],
// where %FalseValue(i) and %TrueValue(i) are taken from Selects.
static void createPHIsForSelects(SmallVector<MachineInstr*, 8> &Selects,
                                 MachineBasicBlock *TrueMBB,
                                 MachineBasicBlock *FalseMBB,
                                 MachineBasicBlock *SinkMBB) {}

MachineBasicBlock *
SystemZTargetLowering::emitAdjCallStack(MachineInstr &MI,
                                        MachineBasicBlock *BB) const {}

// Implement EmitInstrWithCustomInserter for pseudo Select* instruction MI.
MachineBasicBlock *
SystemZTargetLowering::emitSelect(MachineInstr &MI,
                                  MachineBasicBlock *MBB) const {}

// Implement EmitInstrWithCustomInserter for pseudo CondStore* instruction MI.
// StoreOpcode is the store to use and Invert says whether the store should
// happen when the condition is false rather than true.  If a STORE ON
// CONDITION is available, STOCOpcode is its opcode, otherwise it is 0.
MachineBasicBlock *SystemZTargetLowering::emitCondStore(MachineInstr &MI,
                                                        MachineBasicBlock *MBB,
                                                        unsigned StoreOpcode,
                                                        unsigned STOCOpcode,
                                                        bool Invert) const {}

// Implement EmitInstrWithCustomInserter for pseudo [SU]Cmp128Hi instruction MI.
MachineBasicBlock *
SystemZTargetLowering::emitICmp128Hi(MachineInstr &MI,
                                     MachineBasicBlock *MBB,
                                     bool Unsigned) const {}

// Implement EmitInstrWithCustomInserter for subword pseudo ATOMIC_LOADW_* or
// ATOMIC_SWAPW instruction MI.  BinOpcode is the instruction that performs
// the binary operation elided by "*", or 0 for ATOMIC_SWAPW.  Invert says
// whether the field should be inverted after performing BinOpcode (e.g. for
// NAND).
MachineBasicBlock *SystemZTargetLowering::emitAtomicLoadBinary(
    MachineInstr &MI, MachineBasicBlock *MBB, unsigned BinOpcode,
    bool Invert) const {}

// Implement EmitInstrWithCustomInserter for subword pseudo
// ATOMIC_LOADW_{,U}{MIN,MAX} instruction MI.  CompareOpcode is the
// instruction that should be used to compare the current field with the
// minimum or maximum value.  KeepOldMask is the BRC condition-code mask
// for when the current field should be kept.
MachineBasicBlock *SystemZTargetLowering::emitAtomicLoadMinMax(
    MachineInstr &MI, MachineBasicBlock *MBB, unsigned CompareOpcode,
    unsigned KeepOldMask) const {}

// Implement EmitInstrWithCustomInserter for subword pseudo ATOMIC_CMP_SWAPW
// instruction MI.
MachineBasicBlock *
SystemZTargetLowering::emitAtomicCmpSwapW(MachineInstr &MI,
                                          MachineBasicBlock *MBB) const {}

// Emit a move from two GR64s to a GR128.
MachineBasicBlock *
SystemZTargetLowering::emitPair128(MachineInstr &MI,
                                   MachineBasicBlock *MBB) const {}

// Emit an extension from a GR64 to a GR128.  ClearEven is true
// if the high register of the GR128 value must be cleared or false if
// it's "don't care".
MachineBasicBlock *SystemZTargetLowering::emitExt128(MachineInstr &MI,
                                                     MachineBasicBlock *MBB,
                                                     bool ClearEven) const {}

MachineBasicBlock *
SystemZTargetLowering::emitMemMemWrapper(MachineInstr &MI,
                                         MachineBasicBlock *MBB,
                                         unsigned Opcode, bool IsMemset) const {}

// Decompose string pseudo-instruction MI into a loop that continually performs
// Opcode until CC != 3.
MachineBasicBlock *SystemZTargetLowering::emitStringWrapper(
    MachineInstr &MI, MachineBasicBlock *MBB, unsigned Opcode) const {}

// Update TBEGIN instruction with final opcode and register clobbers.
MachineBasicBlock *SystemZTargetLowering::emitTransactionBegin(
    MachineInstr &MI, MachineBasicBlock *MBB, unsigned Opcode,
    bool NoFloat) const {}

MachineBasicBlock *SystemZTargetLowering::emitLoadAndTestCmp0(
    MachineInstr &MI, MachineBasicBlock *MBB, unsigned Opcode) const {}

MachineBasicBlock *SystemZTargetLowering::emitProbedAlloca(
    MachineInstr &MI, MachineBasicBlock *MBB) const {}

SDValue SystemZTargetLowering::
getBackchainAddress(SDValue SP, SelectionDAG &DAG) const {}

MachineBasicBlock *SystemZTargetLowering::EmitInstrWithCustomInserter(
    MachineInstr &MI, MachineBasicBlock *MBB) const {}

// This is only used by the isel schedulers, and is needed only to prevent
// compiler from crashing when list-ilp is used.
const TargetRegisterClass *
SystemZTargetLowering::getRepRegClassFor(MVT VT) const {}

SDValue SystemZTargetLowering::lowerGET_ROUNDING(SDValue Op,
                                                 SelectionDAG &DAG) const {}

SDValue SystemZTargetLowering::lowerVECREDUCE_ADD(SDValue Op,
                                                  SelectionDAG &DAG) const {}

// Only consider a function fully internal as long as it has local linkage
// and is not used in any other way than acting as the called function at
// call sites.
bool SystemZTargetLowering::isFullyInternal(const Function *Fn) const {}

static void printFunctionArgExts(const Function *F, raw_fd_ostream &OS) {}

void SystemZTargetLowering::
verifyNarrowIntegerArgs_Call(const SmallVectorImpl<ISD::OutputArg> &Outs,
                             const Function *F, SDValue Callee) const {}

void SystemZTargetLowering::
verifyNarrowIntegerArgs_Ret(const SmallVectorImpl<ISD::OutputArg> &Outs,
                            const Function *F) const {}

// Verify that narrow integer arguments are extended as required by the ABI.
// Return false if an error is found.
bool SystemZTargetLowering::
verifyNarrowIntegerArgs(const SmallVectorImpl<ISD::OutputArg> &Outs,
                        bool IsInternal) const {}