llvm/llvm/lib/Target/AArch64/AArch64FastISel.cpp

//===- AArch6464FastISel.cpp - AArch64 FastISel implementation ------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines the AArch64-specific support for the FastISel class. Some
// of the target-specific code is generated by tablegen in the file
// AArch64GenFastISel.inc, which is #included here.
//
//===----------------------------------------------------------------------===//

#include "AArch64.h"
#include "AArch64CallingConvention.h"
#include "AArch64MachineFunctionInfo.h"
#include "AArch64RegisterInfo.h"
#include "AArch64Subtarget.h"
#include "MCTargetDesc/AArch64AddressingModes.h"
#include "Utils/AArch64BaseInfo.h"
#include "llvm/ADT/APFloat.h"
#include "llvm/ADT/APInt.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/Analysis/BranchProbabilityInfo.h"
#include "llvm/CodeGen/CallingConvLower.h"
#include "llvm/CodeGen/FastISel.h"
#include "llvm/CodeGen/FunctionLoweringInfo.h"
#include "llvm/CodeGen/ISDOpcodes.h"
#include "llvm/CodeGen/MachineBasicBlock.h"
#include "llvm/CodeGen/MachineConstantPool.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/CodeGen/MachineInstr.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/CodeGen/MachineMemOperand.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/RuntimeLibcallUtil.h"
#include "llvm/CodeGen/ValueTypes.h"
#include "llvm/CodeGenTypes/MachineValueType.h"
#include "llvm/IR/Argument.h"
#include "llvm/IR/Attributes.h"
#include "llvm/IR/BasicBlock.h"
#include "llvm/IR/CallingConv.h"
#include "llvm/IR/Constant.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/DerivedTypes.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/GetElementPtrTypeIterator.h"
#include "llvm/IR/GlobalValue.h"
#include "llvm/IR/InstrTypes.h"
#include "llvm/IR/Instruction.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/IntrinsicInst.h"
#include "llvm/IR/Intrinsics.h"
#include "llvm/IR/IntrinsicsAArch64.h"
#include "llvm/IR/Module.h"
#include "llvm/IR/Operator.h"
#include "llvm/IR/Type.h"
#include "llvm/IR/User.h"
#include "llvm/IR/Value.h"
#include "llvm/MC/MCInstrDesc.h"
#include "llvm/MC/MCRegisterInfo.h"
#include "llvm/MC/MCSymbol.h"
#include "llvm/Support/AtomicOrdering.h"
#include "llvm/Support/Casting.h"
#include "llvm/Support/CodeGen.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/MathExtras.h"
#include <algorithm>
#include <cassert>
#include <cstdint>
#include <iterator>
#include <utility>

usingnamespacellvm;

namespace {

class AArch64FastISel final : public FastISel {};

} // end anonymous namespace

/// Check if the sign-/zero-extend will be a noop.
static bool isIntExtFree(const Instruction *I) {}

/// Determine the implicit scale factor that is applied by a memory
/// operation for a given value type.
static unsigned getImplicitScaleFactor(MVT VT) {}

CCAssignFn *AArch64FastISel::CCAssignFnForCall(CallingConv::ID CC) const {}

unsigned AArch64FastISel::fastMaterializeAlloca(const AllocaInst *AI) {}

unsigned AArch64FastISel::materializeInt(const ConstantInt *CI, MVT VT) {}

unsigned AArch64FastISel::materializeFP(const ConstantFP *CFP, MVT VT) {}

unsigned AArch64FastISel::materializeGV(const GlobalValue *GV) {}

unsigned AArch64FastISel::fastMaterializeConstant(const Constant *C) {}

unsigned AArch64FastISel::fastMaterializeFloatZero(const ConstantFP* CFP) {}

/// Check if the multiply is by a power-of-2 constant.
static bool isMulPowOf2(const Value *I) {}

// Computes the address to get to an object.
bool AArch64FastISel::computeAddress(const Value *Obj, Address &Addr, Type *Ty)
{}

bool AArch64FastISel::computeCallAddress(const Value *V, Address &Addr) {}

bool AArch64FastISel::isTypeLegal(Type *Ty, MVT &VT) {}

/// Determine if the value type is supported by FastISel.
///
/// FastISel for AArch64 can handle more value types than are legal. This adds
/// simple value type such as i1, i8, and i16.
bool AArch64FastISel::isTypeSupported(Type *Ty, MVT &VT, bool IsVectorAllowed) {}

bool AArch64FastISel::isValueAvailable(const Value *V) const {}

bool AArch64FastISel::simplifyAddress(Address &Addr, MVT VT) {}

void AArch64FastISel::addLoadStoreOperands(Address &Addr,
                                           const MachineInstrBuilder &MIB,
                                           MachineMemOperand::Flags Flags,
                                           unsigned ScaleFactor,
                                           MachineMemOperand *MMO) {}

unsigned AArch64FastISel::emitAddSub(bool UseAdd, MVT RetVT, const Value *LHS,
                                     const Value *RHS, bool SetFlags,
                                     bool WantResult,  bool IsZExt) {}

unsigned AArch64FastISel::emitAddSub_rr(bool UseAdd, MVT RetVT, unsigned LHSReg,
                                        unsigned RHSReg, bool SetFlags,
                                        bool WantResult) {}

unsigned AArch64FastISel::emitAddSub_ri(bool UseAdd, MVT RetVT, unsigned LHSReg,
                                        uint64_t Imm, bool SetFlags,
                                        bool WantResult) {}

unsigned AArch64FastISel::emitAddSub_rs(bool UseAdd, MVT RetVT, unsigned LHSReg,
                                        unsigned RHSReg,
                                        AArch64_AM::ShiftExtendType ShiftType,
                                        uint64_t ShiftImm, bool SetFlags,
                                        bool WantResult) {}

unsigned AArch64FastISel::emitAddSub_rx(bool UseAdd, MVT RetVT, unsigned LHSReg,
                                        unsigned RHSReg,
                                        AArch64_AM::ShiftExtendType ExtType,
                                        uint64_t ShiftImm, bool SetFlags,
                                        bool WantResult) {}

bool AArch64FastISel::emitCmp(const Value *LHS, const Value *RHS, bool IsZExt) {}

bool AArch64FastISel::emitICmp(MVT RetVT, const Value *LHS, const Value *RHS,
                               bool IsZExt) {}

bool AArch64FastISel::emitICmp_ri(MVT RetVT, unsigned LHSReg, uint64_t Imm) {}

bool AArch64FastISel::emitFCmp(MVT RetVT, const Value *LHS, const Value *RHS) {}

unsigned AArch64FastISel::emitAdd(MVT RetVT, const Value *LHS, const Value *RHS,
                                  bool SetFlags, bool WantResult, bool IsZExt) {}

/// This method is a wrapper to simplify add emission.
///
/// First try to emit an add with an immediate operand using emitAddSub_ri. If
/// that fails, then try to materialize the immediate into a register and use
/// emitAddSub_rr instead.
unsigned AArch64FastISel::emitAdd_ri_(MVT VT, unsigned Op0, int64_t Imm) {}

unsigned AArch64FastISel::emitSub(MVT RetVT, const Value *LHS, const Value *RHS,
                                  bool SetFlags, bool WantResult, bool IsZExt) {}

unsigned AArch64FastISel::emitSubs_rr(MVT RetVT, unsigned LHSReg,
                                      unsigned RHSReg, bool WantResult) {}

unsigned AArch64FastISel::emitSubs_rs(MVT RetVT, unsigned LHSReg,
                                      unsigned RHSReg,
                                      AArch64_AM::ShiftExtendType ShiftType,
                                      uint64_t ShiftImm, bool WantResult) {}

unsigned AArch64FastISel::emitLogicalOp(unsigned ISDOpc, MVT RetVT,
                                        const Value *LHS, const Value *RHS) {}

unsigned AArch64FastISel::emitLogicalOp_ri(unsigned ISDOpc, MVT RetVT,
                                           unsigned LHSReg, uint64_t Imm) {}

unsigned AArch64FastISel::emitLogicalOp_rs(unsigned ISDOpc, MVT RetVT,
                                           unsigned LHSReg, unsigned RHSReg,
                                           uint64_t ShiftImm) {}

unsigned AArch64FastISel::emitAnd_ri(MVT RetVT, unsigned LHSReg,
                                     uint64_t Imm) {}

unsigned AArch64FastISel::emitLoad(MVT VT, MVT RetVT, Address Addr,
                                   bool WantZExt, MachineMemOperand *MMO) {}

bool AArch64FastISel::selectAddSub(const Instruction *I) {}

bool AArch64FastISel::selectLogicalOp(const Instruction *I) {}

bool AArch64FastISel::selectLoad(const Instruction *I) {}

bool AArch64FastISel::emitStoreRelease(MVT VT, unsigned SrcReg,
                                       unsigned AddrReg,
                                       MachineMemOperand *MMO) {}

bool AArch64FastISel::emitStore(MVT VT, unsigned SrcReg, Address Addr,
                                MachineMemOperand *MMO) {}

bool AArch64FastISel::selectStore(const Instruction *I) {}

static AArch64CC::CondCode getCompareCC(CmpInst::Predicate Pred) {}

/// Try to emit a combined compare-and-branch instruction.
bool AArch64FastISel::emitCompareAndBranch(const BranchInst *BI) {}

bool AArch64FastISel::selectBranch(const Instruction *I) {}

bool AArch64FastISel::selectIndirectBr(const Instruction *I) {}

bool AArch64FastISel::selectCmp(const Instruction *I) {}

/// Optimize selects of i1 if one of the operands has a 'true' or 'false'
/// value.
bool AArch64FastISel::optimizeSelect(const SelectInst *SI) {}

bool AArch64FastISel::selectSelect(const Instruction *I) {}

bool AArch64FastISel::selectFPExt(const Instruction *I) {}

bool AArch64FastISel::selectFPTrunc(const Instruction *I) {}

// FPToUI and FPToSI
bool AArch64FastISel::selectFPToInt(const Instruction *I, bool Signed) {}

bool AArch64FastISel::selectIntToFP(const Instruction *I, bool Signed) {}

bool AArch64FastISel::fastLowerArguments() {}

bool AArch64FastISel::processCallArgs(CallLoweringInfo &CLI,
                                      SmallVectorImpl<MVT> &OutVTs,
                                      unsigned &NumBytes) {}

bool AArch64FastISel::finishCall(CallLoweringInfo &CLI, unsigned NumBytes) {}

bool AArch64FastISel::fastLowerCall(CallLoweringInfo &CLI) {}

bool AArch64FastISel::isMemCpySmall(uint64_t Len, MaybeAlign Alignment) {}

bool AArch64FastISel::tryEmitSmallMemCpy(Address Dest, Address Src,
                                         uint64_t Len, MaybeAlign Alignment) {}

/// Check if it is possible to fold the condition from the XALU intrinsic
/// into the user. The condition code will only be updated on success.
bool AArch64FastISel::foldXALUIntrinsic(AArch64CC::CondCode &CC,
                                        const Instruction *I,
                                        const Value *Cond) {}

bool AArch64FastISel::fastLowerIntrinsicCall(const IntrinsicInst *II) {}

bool AArch64FastISel::selectRet(const Instruction *I) {}

bool AArch64FastISel::selectTrunc(const Instruction *I) {}

unsigned AArch64FastISel::emiti1Ext(unsigned SrcReg, MVT DestVT, bool IsZExt) {}

unsigned AArch64FastISel::emitMul_rr(MVT RetVT, unsigned Op0, unsigned Op1) {}

unsigned AArch64FastISel::emitSMULL_rr(MVT RetVT, unsigned Op0, unsigned Op1) {}

unsigned AArch64FastISel::emitUMULL_rr(MVT RetVT, unsigned Op0, unsigned Op1) {}

unsigned AArch64FastISel::emitLSL_rr(MVT RetVT, unsigned Op0Reg,
                                     unsigned Op1Reg) {}

unsigned AArch64FastISel::emitLSL_ri(MVT RetVT, MVT SrcVT, unsigned Op0,
                                     uint64_t Shift, bool IsZExt) {}

unsigned AArch64FastISel::emitLSR_rr(MVT RetVT, unsigned Op0Reg,
                                     unsigned Op1Reg) {}

unsigned AArch64FastISel::emitLSR_ri(MVT RetVT, MVT SrcVT, unsigned Op0,
                                     uint64_t Shift, bool IsZExt) {}

unsigned AArch64FastISel::emitASR_rr(MVT RetVT, unsigned Op0Reg,
                                     unsigned Op1Reg) {}

unsigned AArch64FastISel::emitASR_ri(MVT RetVT, MVT SrcVT, unsigned Op0,
                                     uint64_t Shift, bool IsZExt) {}

unsigned AArch64FastISel::emitIntExt(MVT SrcVT, unsigned SrcReg, MVT DestVT,
                                     bool IsZExt) {}

static bool isZExtLoad(const MachineInstr *LI) {}

static bool isSExtLoad(const MachineInstr *LI) {}

bool AArch64FastISel::optimizeIntExtLoad(const Instruction *I, MVT RetVT,
                                         MVT SrcVT) {}

bool AArch64FastISel::selectIntExt(const Instruction *I) {}

bool AArch64FastISel::selectRem(const Instruction *I, unsigned ISDOpcode) {}

bool AArch64FastISel::selectMul(const Instruction *I) {}

bool AArch64FastISel::selectShift(const Instruction *I) {}

bool AArch64FastISel::selectBitCast(const Instruction *I) {}

bool AArch64FastISel::selectFRem(const Instruction *I) {}

bool AArch64FastISel::selectSDiv(const Instruction *I) {}

/// This is mostly a copy of the existing FastISel getRegForGEPIndex code. We
/// have to duplicate it for AArch64, because otherwise we would fail during the
/// sign-extend emission.
unsigned AArch64FastISel::getRegForGEPIndex(const Value *Idx) {}

/// This is mostly a copy of the existing FastISel GEP code, but we have to
/// duplicate it for AArch64, because otherwise we would bail out even for
/// simple cases. This is because the standard fastEmit functions don't cover
/// MUL at all and ADD is lowered very inefficientily.
bool AArch64FastISel::selectGetElementPtr(const Instruction *I) {}

bool AArch64FastISel::selectAtomicCmpXchg(const AtomicCmpXchgInst *I) {}

bool AArch64FastISel::fastSelectInstruction(const Instruction *I) {}

FastISel *AArch64::createFastISel(FunctionLoweringInfo &FuncInfo,
                                        const TargetLibraryInfo *LibInfo) {}