llvm/llvm/lib/Target/AMDGPU/AMDGPULibCalls.cpp

//===- AMDGPULibCalls.cpp -------------------------------------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
/// \file
/// This file does AMD library function optimizations.
//
//===----------------------------------------------------------------------===//

#include "AMDGPU.h"
#include "AMDGPULibFunc.h"
#include "GCNSubtarget.h"
#include "llvm/Analysis/AssumptionCache.h"
#include "llvm/Analysis/TargetLibraryInfo.h"
#include "llvm/Analysis/ValueTracking.h"
#include "llvm/IR/AttributeMask.h"
#include "llvm/IR/Dominators.h"
#include "llvm/IR/IRBuilder.h"
#include "llvm/IR/IntrinsicInst.h"
#include "llvm/IR/IntrinsicsAMDGPU.h"
#include "llvm/IR/MDBuilder.h"
#include "llvm/IR/PatternMatch.h"
#include "llvm/InitializePasses.h"
#include <cmath>

#define DEBUG_TYPE

usingnamespacellvm;
usingnamespacellvm::PatternMatch;

static cl::opt<bool> EnablePreLink("amdgpu-prelink",
  cl::desc("Enable pre-link mode optimizations"),
  cl::init(false),
  cl::Hidden);

static cl::list<std::string> UseNative("amdgpu-use-native",
  cl::desc("Comma separated list of functions to replace with native, or all"),
  cl::CommaSeparated, cl::ValueOptional,
  cl::Hidden);

#define MATH_PI
#define MATH_E
#define MATH_SQRT2
#define MATH_SQRT1_2

namespace llvm {

class AMDGPULibCalls {};

} // end namespace llvm

template <typename IRB>
static CallInst *CreateCallEx(IRB &B, FunctionCallee Callee, Value *Arg,
                              const Twine &Name = "") {}

template <typename IRB>
static CallInst *CreateCallEx2(IRB &B, FunctionCallee Callee, Value *Arg1,
                               Value *Arg2, const Twine &Name = "") {}

static FunctionType *getPownType(FunctionType *FT) {}

//  Data structures for table-driven optimizations.
//  FuncTbl works for both f32 and f64 functions with 1 input argument

struct TableEntry {};

/* a list of {result, input} */
static const TableEntry tbl_acos[] =;
static const TableEntry tbl_acosh[] =;
static const TableEntry tbl_acospi[] =;
static const TableEntry tbl_asin[] =;
static const TableEntry tbl_asinh[] =;
static const TableEntry tbl_asinpi[] =;
static const TableEntry tbl_atan[] =;
static const TableEntry tbl_atanh[] =;
static const TableEntry tbl_atanpi[] =;
static const TableEntry tbl_cbrt[] =;
static const TableEntry tbl_cos[] =;
static const TableEntry tbl_cosh[] =;
static const TableEntry tbl_cospi[] =;
static const TableEntry tbl_erfc[] =;
static const TableEntry tbl_erf[] =;
static const TableEntry tbl_exp[] =;
static const TableEntry tbl_exp2[] =;
static const TableEntry tbl_exp10[] =;
static const TableEntry tbl_expm1[] =;
static const TableEntry tbl_log[] =;
static const TableEntry tbl_log2[] =;
static const TableEntry tbl_log10[] =;
static const TableEntry tbl_rsqrt[] =;
static const TableEntry tbl_sin[] =;
static const TableEntry tbl_sinh[] =;
static const TableEntry tbl_sinpi[] =;
static const TableEntry tbl_sqrt[] =;
static const TableEntry tbl_tan[] =;
static const TableEntry tbl_tanh[] =;
static const TableEntry tbl_tanpi[] =;
static const TableEntry tbl_tgamma[] =;

static bool HasNative(AMDGPULibFunc::EFuncId id) {}

TableRef;

static TableRef getOptTable(AMDGPULibFunc::EFuncId id) {}

static inline int getVecSize(const AMDGPULibFunc& FInfo) {}

static inline AMDGPULibFunc::EType getArgType(const AMDGPULibFunc& FInfo) {}

FunctionCallee AMDGPULibCalls::getFunction(Module *M, const FuncInfo &fInfo) {}

bool AMDGPULibCalls::parseFunctionName(const StringRef &FMangledName,
                                       FuncInfo &FInfo) {}

bool AMDGPULibCalls::isUnsafeMath(const FPMathOperator *FPOp) const {}

bool AMDGPULibCalls::isUnsafeFiniteOnlyMath(const FPMathOperator *FPOp) const {}

bool AMDGPULibCalls::canIncreasePrecisionOfConstantFold(
    const FPMathOperator *FPOp) const {}

void AMDGPULibCalls::initFunction(Function &F, FunctionAnalysisManager &FAM) {}

bool AMDGPULibCalls::useNativeFunc(const StringRef F) const {}

void AMDGPULibCalls::initNativeFuncs() {}

bool AMDGPULibCalls::sincosUseNative(CallInst *aCI, const FuncInfo &FInfo) {}

bool AMDGPULibCalls::useNative(CallInst *aCI) {}

// Clang emits call of __read_pipe_2 or __read_pipe_4 for OpenCL read_pipe
// builtin, with appended type size and alignment arguments, where 2 or 4
// indicates the original number of arguments. The library has optimized version
// of __read_pipe_2/__read_pipe_4 when the type size and alignment has the same
// power of 2 value. This function transforms __read_pipe_2 to __read_pipe_2_N
// for such cases where N is the size in bytes of the type (N = 1, 2, 4, 8, ...,
// 128). The same for __read_pipe_4, write_pipe_2, and write_pipe_4.
bool AMDGPULibCalls::fold_read_write_pipe(CallInst *CI, IRBuilder<> &B,
                                          const FuncInfo &FInfo) {}

static bool isKnownIntegral(const Value *V, const DataLayout &DL,
                            FastMathFlags FMF) {}

// This function returns false if no change; return true otherwise.
bool AMDGPULibCalls::fold(CallInst *CI) {}

bool AMDGPULibCalls::TDOFold(CallInst *CI, const FuncInfo &FInfo) {}

namespace llvm {
static double log2(double V) {}
} // namespace llvm

bool AMDGPULibCalls::fold_pow(FPMathOperator *FPOp, IRBuilder<> &B,
                              const FuncInfo &FInfo) {}

bool AMDGPULibCalls::fold_rootn(FPMathOperator *FPOp, IRBuilder<> &B,
                                const FuncInfo &FInfo) {}

// Get a scalar native builtin single argument FP function
FunctionCallee AMDGPULibCalls::getNativeFunction(Module *M,
                                                 const FuncInfo &FInfo) {}

// Some library calls are just wrappers around llvm intrinsics, but compiled
// conservatively. Preserve the flags from the original call site by
// substituting them with direct calls with all the flags.
bool AMDGPULibCalls::shouldReplaceLibcallWithIntrinsic(const CallInst *CI,
                                                       bool AllowMinSizeF32,
                                                       bool AllowF64,
                                                       bool AllowStrictFP) {}

void AMDGPULibCalls::replaceLibCallWithSimpleIntrinsic(IRBuilder<> &B,
                                                       CallInst *CI,
                                                       Intrinsic::ID IntrID) {}

bool AMDGPULibCalls::tryReplaceLibcallWithSimpleIntrinsic(
    IRBuilder<> &B, CallInst *CI, Intrinsic::ID IntrID, bool AllowMinSizeF32,
    bool AllowF64, bool AllowStrictFP) {}

std::tuple<Value *, Value *, Value *>
AMDGPULibCalls::insertSinCos(Value *Arg, FastMathFlags FMF, IRBuilder<> &B,
                             FunctionCallee Fsincos) {}

// fold sin, cos -> sincos.
bool AMDGPULibCalls::fold_sincos(FPMathOperator *FPOp, IRBuilder<> &B,
                                 const FuncInfo &fInfo) {}

bool AMDGPULibCalls::evaluateScalarMathFunc(const FuncInfo &FInfo, double &Res0,
                                            double &Res1, Constant *copr0,
                                            Constant *copr1) {}

bool AMDGPULibCalls::evaluateCall(CallInst *aCI, const FuncInfo &FInfo) {}

PreservedAnalyses AMDGPUSimplifyLibCallsPass::run(Function &F,
                                                  FunctionAnalysisManager &AM) {}

PreservedAnalyses AMDGPUUseNativeCallsPass::run(Function &F,
                                                FunctionAnalysisManager &AM) {}