llvm/llvm/lib/Target/ARM/ARMFastISel.cpp

//===- ARMFastISel.cpp - ARM FastISel implementation ----------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines the ARM-specific support for the FastISel class. Some
// of the target-specific code is generated by tablegen in the file
// ARMGenFastISel.inc, which is #included here.
//
//===----------------------------------------------------------------------===//

#include "ARM.h"
#include "ARMBaseInstrInfo.h"
#include "ARMBaseRegisterInfo.h"
#include "ARMCallingConv.h"
#include "ARMConstantPoolValue.h"
#include "ARMISelLowering.h"
#include "ARMMachineFunctionInfo.h"
#include "ARMSubtarget.h"
#include "MCTargetDesc/ARMAddressingModes.h"
#include "MCTargetDesc/ARMBaseInfo.h"
#include "Utils/ARMBaseInfo.h"
#include "llvm/ADT/APFloat.h"
#include "llvm/ADT/APInt.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/CodeGen/CallingConvLower.h"
#include "llvm/CodeGen/FastISel.h"
#include "llvm/CodeGen/FunctionLoweringInfo.h"
#include "llvm/CodeGen/ISDOpcodes.h"
#include "llvm/CodeGen/MachineBasicBlock.h"
#include "llvm/CodeGen/MachineConstantPool.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineInstr.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/CodeGen/MachineMemOperand.h"
#include "llvm/CodeGen/MachineOperand.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/RuntimeLibcallUtil.h"
#include "llvm/CodeGen/TargetInstrInfo.h"
#include "llvm/CodeGen/TargetLowering.h"
#include "llvm/CodeGen/TargetOpcodes.h"
#include "llvm/CodeGen/TargetRegisterInfo.h"
#include "llvm/CodeGen/ValueTypes.h"
#include "llvm/CodeGenTypes/MachineValueType.h"
#include "llvm/IR/Argument.h"
#include "llvm/IR/Attributes.h"
#include "llvm/IR/CallingConv.h"
#include "llvm/IR/Constant.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/DerivedTypes.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/GetElementPtrTypeIterator.h"
#include "llvm/IR/GlobalValue.h"
#include "llvm/IR/GlobalVariable.h"
#include "llvm/IR/InstrTypes.h"
#include "llvm/IR/Instruction.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/IntrinsicInst.h"
#include "llvm/IR/Intrinsics.h"
#include "llvm/IR/Module.h"
#include "llvm/IR/Operator.h"
#include "llvm/IR/Type.h"
#include "llvm/IR/User.h"
#include "llvm/IR/Value.h"
#include "llvm/MC/MCInstrDesc.h"
#include "llvm/MC/MCRegisterInfo.h"
#include "llvm/Support/Casting.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/MathExtras.h"
#include "llvm/Target/TargetMachine.h"
#include "llvm/Target/TargetOptions.h"
#include <cassert>
#include <cstdint>
#include <utility>

usingnamespacellvm;

namespace {

  // All possible address modes, plus some.
  struct Address {};

class ARMFastISel final : public FastISel {};

} // end anonymous namespace

// DefinesOptionalPredicate - This is different from DefinesPredicate in that
// we don't care about implicit defs here, just places we'll need to add a
// default CCReg argument. Sets CPSR if we're setting CPSR instead of CCR.
bool ARMFastISel::DefinesOptionalPredicate(MachineInstr *MI, bool *CPSR) {}

bool ARMFastISel::isARMNEONPred(const MachineInstr *MI) {}

// If the machine is predicable go ahead and add the predicate operands, if
// it needs default CC operands add those.
// TODO: If we want to support thumb1 then we'll need to deal with optional
// CPSR defs that need to be added before the remaining operands. See s_cc_out
// for descriptions why.
const MachineInstrBuilder &
ARMFastISel::AddOptionalDefs(const MachineInstrBuilder &MIB) {}

unsigned ARMFastISel::fastEmitInst_r(unsigned MachineInstOpcode,
                                     const TargetRegisterClass *RC,
                                     unsigned Op0) {}

unsigned ARMFastISel::fastEmitInst_rr(unsigned MachineInstOpcode,
                                      const TargetRegisterClass *RC,
                                      unsigned Op0, unsigned Op1) {}

unsigned ARMFastISel::fastEmitInst_ri(unsigned MachineInstOpcode,
                                      const TargetRegisterClass *RC,
                                      unsigned Op0, uint64_t Imm) {}

unsigned ARMFastISel::fastEmitInst_i(unsigned MachineInstOpcode,
                                     const TargetRegisterClass *RC,
                                     uint64_t Imm) {}

// TODO: Don't worry about 64-bit now, but when this is fixed remove the
// checks from the various callers.
unsigned ARMFastISel::ARMMoveToFPReg(MVT VT, unsigned SrcReg) {}

unsigned ARMFastISel::ARMMoveToIntReg(MVT VT, unsigned SrcReg) {}

// For double width floating point we need to materialize two constants
// (the high and the low) into integer registers then use a move to get
// the combined constant into an FP reg.
unsigned ARMFastISel::ARMMaterializeFP(const ConstantFP *CFP, MVT VT) {}

unsigned ARMFastISel::ARMMaterializeInt(const Constant *C, MVT VT) {}

bool ARMFastISel::isPositionIndependent() const {}

unsigned ARMFastISel::ARMMaterializeGV(const GlobalValue *GV, MVT VT) {}

unsigned ARMFastISel::fastMaterializeConstant(const Constant *C) {}

// TODO: unsigned ARMFastISel::TargetMaterializeFloatZero(const ConstantFP *CF);

unsigned ARMFastISel::fastMaterializeAlloca(const AllocaInst *AI) {}

bool ARMFastISel::isTypeLegal(Type *Ty, MVT &VT) {}

bool ARMFastISel::isLoadTypeLegal(Type *Ty, MVT &VT) {}

// Computes the address to get to an object.
bool ARMFastISel::ARMComputeAddress(const Value *Obj, Address &Addr) {}

void ARMFastISel::ARMSimplifyAddress(Address &Addr, MVT VT, bool useAM3) {}

void ARMFastISel::AddLoadStoreOperands(MVT VT, Address &Addr,
                                       const MachineInstrBuilder &MIB,
                                       MachineMemOperand::Flags Flags,
                                       bool useAM3) {}

bool ARMFastISel::ARMEmitLoad(MVT VT, Register &ResultReg, Address &Addr,
                              MaybeAlign Alignment, bool isZExt,
                              bool allocReg) {}

bool ARMFastISel::SelectLoad(const Instruction *I) {}

bool ARMFastISel::ARMEmitStore(MVT VT, unsigned SrcReg, Address &Addr,
                               MaybeAlign Alignment) {}

bool ARMFastISel::SelectStore(const Instruction *I) {}

static ARMCC::CondCodes getComparePred(CmpInst::Predicate Pred) {}

bool ARMFastISel::SelectBranch(const Instruction *I) {}

bool ARMFastISel::SelectIndirectBr(const Instruction *I) {}

bool ARMFastISel::ARMEmitCmp(const Value *Src1Value, const Value *Src2Value,
                             bool isZExt) {}

bool ARMFastISel::SelectCmp(const Instruction *I) {}

bool ARMFastISel::SelectFPExt(const Instruction *I) {}

bool ARMFastISel::SelectFPTrunc(const Instruction *I) {}

bool ARMFastISel::SelectIToFP(const Instruction *I, bool isSigned) {}

bool ARMFastISel::SelectFPToI(const Instruction *I, bool isSigned) {}

bool ARMFastISel::SelectSelect(const Instruction *I) {}

bool ARMFastISel::SelectDiv(const Instruction *I, bool isSigned) {}

bool ARMFastISel::SelectRem(const Instruction *I, bool isSigned) {}

bool ARMFastISel::SelectBinaryIntOp(const Instruction *I, unsigned ISDOpcode) {}

bool ARMFastISel::SelectBinaryFPOp(const Instruction *I, unsigned ISDOpcode) {}

// Call Handling Code

// This is largely taken directly from CCAssignFnForNode
// TODO: We may not support all of this.
CCAssignFn *ARMFastISel::CCAssignFnForCall(CallingConv::ID CC,
                                           bool Return,
                                           bool isVarArg) {}

bool ARMFastISel::ProcessCallArgs(SmallVectorImpl<Value*> &Args,
                                  SmallVectorImpl<Register> &ArgRegs,
                                  SmallVectorImpl<MVT> &ArgVTs,
                                  SmallVectorImpl<ISD::ArgFlagsTy> &ArgFlags,
                                  SmallVectorImpl<Register> &RegArgs,
                                  CallingConv::ID CC,
                                  unsigned &NumBytes,
                                  bool isVarArg) {}

bool ARMFastISel::FinishCall(MVT RetVT, SmallVectorImpl<Register> &UsedRegs,
                             const Instruction *I, CallingConv::ID CC,
                             unsigned &NumBytes, bool isVarArg) {}

bool ARMFastISel::SelectRet(const Instruction *I) {}

unsigned ARMFastISel::ARMSelectCallOp(bool UseReg) {}

unsigned ARMFastISel::getLibcallReg(const Twine &Name) {}

// A quick function that will emit a call for a named libcall in F with the
// vector of passed arguments for the Instruction in I. We can assume that we
// can emit a call for any libcall we can produce. This is an abridged version
// of the full call infrastructure since we won't need to worry about things
// like computed function pointers or strange arguments at call sites.
// TODO: Try to unify this and the normal call bits for ARM, then try to unify
// with X86.
bool ARMFastISel::ARMEmitLibcall(const Instruction *I, RTLIB::Libcall Call) {}

bool ARMFastISel::SelectCall(const Instruction *I,
                             const char *IntrMemName = nullptr) {}

bool ARMFastISel::ARMIsMemCpySmall(uint64_t Len) {}

bool ARMFastISel::ARMTryEmitSmallMemCpy(Address Dest, Address Src, uint64_t Len,
                                        MaybeAlign Alignment) {}

bool ARMFastISel::SelectIntrinsicCall(const IntrinsicInst &I) {}

bool ARMFastISel::SelectTrunc(const Instruction *I) {}

unsigned ARMFastISel::ARMEmitIntExt(MVT SrcVT, unsigned SrcReg, MVT DestVT,
                                    bool isZExt) {}

bool ARMFastISel::SelectIntExt(const Instruction *I) {}

bool ARMFastISel::SelectShift(const Instruction *I,
                              ARM_AM::ShiftOpc ShiftTy) {}

// TODO: SoftFP support.
bool ARMFastISel::fastSelectInstruction(const Instruction *I) {}

// This table describes sign- and zero-extend instructions which can be
// folded into a preceding load. All of these extends have an immediate
// (sometimes a mask and sometimes a shift) that's applied after
// extension.
static const struct FoldableLoadExtendsStruct {} FoldableLoadExtends[] =;

/// The specified machine instr operand is a vreg, and that
/// vreg is being provided by the specified load instruction.  If possible,
/// try to fold the load as an operand to the instruction, returning true if
/// successful.
bool ARMFastISel::tryToFoldLoadIntoMI(MachineInstr *MI, unsigned OpNo,
                                      const LoadInst *LI) {}

unsigned ARMFastISel::ARMLowerPICELF(const GlobalValue *GV, MVT VT) {}

bool ARMFastISel::fastLowerArguments() {}

namespace llvm {

  FastISel *ARM::createFastISel(FunctionLoweringInfo &funcInfo,
                                const TargetLibraryInfo *libInfo) {}

} // end namespace llvm