llvm/llvm/lib/Target/SystemZ/SystemZTargetTransformInfo.cpp

//===-- SystemZTargetTransformInfo.cpp - SystemZ-specific TTI -------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file implements a TargetTransformInfo analysis pass specific to the
// SystemZ target machine. It uses the target's detailed information to provide
// more precise answers to certain TTI queries, while letting the target
// independent and default TTI implementations handle the rest.
//
//===----------------------------------------------------------------------===//

#include "SystemZTargetTransformInfo.h"
#include "llvm/Analysis/TargetTransformInfo.h"
#include "llvm/CodeGen/BasicTTIImpl.h"
#include "llvm/CodeGen/CostTable.h"
#include "llvm/CodeGen/TargetLowering.h"
#include "llvm/IR/DerivedTypes.h"
#include "llvm/IR/IntrinsicInst.h"
#include "llvm/IR/Intrinsics.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/MathExtras.h"

usingnamespacellvm;

#define DEBUG_TYPE

//===----------------------------------------------------------------------===//
//
// SystemZ cost model.
//
//===----------------------------------------------------------------------===//

static bool isUsedAsMemCpySource(const Value *V, bool &OtherUse) {}

unsigned SystemZTTIImpl::adjustInliningThreshold(const CallBase *CB) const {}

InstructionCost SystemZTTIImpl::getIntImmCost(const APInt &Imm, Type *Ty,
                                              TTI::TargetCostKind CostKind) {}

InstructionCost SystemZTTIImpl::getIntImmCostInst(unsigned Opcode, unsigned Idx,
                                                  const APInt &Imm, Type *Ty,
                                                  TTI::TargetCostKind CostKind,
                                                  Instruction *Inst) {}

InstructionCost
SystemZTTIImpl::getIntImmCostIntrin(Intrinsic::ID IID, unsigned Idx,
                                    const APInt &Imm, Type *Ty,
                                    TTI::TargetCostKind CostKind) {}

TargetTransformInfo::PopcntSupportKind
SystemZTTIImpl::getPopcntSupport(unsigned TyWidth) {}

void SystemZTTIImpl::getUnrollingPreferences(Loop *L, ScalarEvolution &SE,
                                             TTI::UnrollingPreferences &UP,
                                             OptimizationRemarkEmitter *ORE) {}

void SystemZTTIImpl::getPeelingPreferences(Loop *L, ScalarEvolution &SE,
                                           TTI::PeelingPreferences &PP) {}

bool SystemZTTIImpl::isLSRCostLess(const TargetTransformInfo::LSRCost &C1,
                                   const TargetTransformInfo::LSRCost &C2) {}

unsigned SystemZTTIImpl::getNumberOfRegisters(unsigned ClassID) const {}

TypeSize
SystemZTTIImpl::getRegisterBitWidth(TargetTransformInfo::RegisterKind K) const {}

unsigned SystemZTTIImpl::getMinPrefetchStride(unsigned NumMemAccesses,
                                              unsigned NumStridedMemAccesses,
                                              unsigned NumPrefetches,
                                              bool HasCall) const {}

bool SystemZTTIImpl::hasDivRemOp(Type *DataType, bool IsSigned) {}

// Return the bit size for the scalar type or vector element
// type. getScalarSizeInBits() returns 0 for a pointer type.
static unsigned getScalarSizeInBits(Type *Ty) {}

// getNumberOfParts() calls getTypeLegalizationCost() which splits the vector
// type until it is legal. This would e.g. return 4 for <6 x i64>, instead of
// 3.
static unsigned getNumVectorRegs(Type *Ty) {}

InstructionCost SystemZTTIImpl::getArithmeticInstrCost(
    unsigned Opcode, Type *Ty, TTI::TargetCostKind CostKind,
    TTI::OperandValueInfo Op1Info, TTI::OperandValueInfo Op2Info,
    ArrayRef<const Value *> Args,
    const Instruction *CxtI) {}

InstructionCost SystemZTTIImpl::getShuffleCost(
    TTI::ShuffleKind Kind, VectorType *Tp, ArrayRef<int> Mask,
    TTI::TargetCostKind CostKind, int Index, VectorType *SubTp,
    ArrayRef<const Value *> Args, const Instruction *CxtI) {}

// Return the log2 difference of the element sizes of the two vector types.
static unsigned getElSizeLog2Diff(Type *Ty0, Type *Ty1) {}

// Return the number of instructions needed to truncate SrcTy to DstTy.
unsigned SystemZTTIImpl::
getVectorTruncCost(Type *SrcTy, Type *DstTy) {}

// Return the cost of converting a vector bitmask produced by a compare
// (SrcTy), to the type of the select or extend instruction (DstTy).
unsigned SystemZTTIImpl::
getVectorBitmaskConversionCost(Type *SrcTy, Type *DstTy) {}

// Return the type of the compared operands. This is needed to compute the
// cost for a Select / ZExt or SExt instruction.
static Type *getCmpOpsType(const Instruction *I, unsigned VF = 1) {}

// Get the cost of converting a boolean vector to a vector with same width
// and element size as Dst, plus the cost of zero extending if needed.
unsigned SystemZTTIImpl::
getBoolVecToIntConversionCost(unsigned Opcode, Type *Dst,
                              const Instruction *I) {}

InstructionCost SystemZTTIImpl::getCastInstrCost(unsigned Opcode, Type *Dst,
                                                 Type *Src,
                                                 TTI::CastContextHint CCH,
                                                 TTI::TargetCostKind CostKind,
                                                 const Instruction *I) {}

// Scalar i8 / i16 operations will typically be made after first extending
// the operands to i32.
static unsigned getOperandsExtensionCost(const Instruction *I) {}

InstructionCost SystemZTTIImpl::getCmpSelInstrCost(unsigned Opcode, Type *ValTy,
                                                   Type *CondTy,
                                                   CmpInst::Predicate VecPred,
                                                   TTI::TargetCostKind CostKind,
                                                   const Instruction *I) {}

InstructionCost SystemZTTIImpl::getVectorInstrCost(unsigned Opcode, Type *Val,
                                                   TTI::TargetCostKind CostKind,
                                                   unsigned Index, Value *Op0,
                                                   Value *Op1) {}

// Check if a load may be folded as a memory operand in its user.
bool SystemZTTIImpl::
isFoldableLoad(const LoadInst *Ld, const Instruction *&FoldedValue) {}

static bool isBswapIntrinsicCall(const Value *V) {}

InstructionCost SystemZTTIImpl::getMemoryOpCost(unsigned Opcode, Type *Src,
                                                MaybeAlign Alignment,
                                                unsigned AddressSpace,
                                                TTI::TargetCostKind CostKind,
                                                TTI::OperandValueInfo OpInfo,
                                                const Instruction *I) {}

// The generic implementation of getInterleavedMemoryOpCost() is based on
// adding costs of the memory operations plus all the extracts and inserts
// needed for using / defining the vector operands. The SystemZ version does
// roughly the same but bases the computations on vector permutations
// instead.
InstructionCost SystemZTTIImpl::getInterleavedMemoryOpCost(
    unsigned Opcode, Type *VecTy, unsigned Factor, ArrayRef<unsigned> Indices,
    Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind,
    bool UseMaskForCond, bool UseMaskForGaps) {}

static int
getVectorIntrinsicInstrCost(Intrinsic::ID ID, Type *RetTy,
                            const SmallVectorImpl<Type *> &ParamTys) {}

InstructionCost
SystemZTTIImpl::getIntrinsicInstrCost(const IntrinsicCostAttributes &ICA,
                                      TTI::TargetCostKind CostKind) {}

bool SystemZTTIImpl::shouldExpandReduction(const IntrinsicInst *II) const {}