llvm/llvm/lib/Analysis/MLInlineAdvisor.cpp

//===- MLInlineAdvisor.cpp - machine learned InlineAdvisor ----------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file implements the interface between the inliner and a learned model.
// It delegates model evaluation to either the AOT compiled model (the
// 'release' mode) or a runtime-loaded model (the 'development' case).
//
//===----------------------------------------------------------------------===//
#include "llvm/Analysis/MLInlineAdvisor.h"
#include "llvm/ADT/SCCIterator.h"
#include "llvm/Analysis/AssumptionCache.h"
#include "llvm/Analysis/BlockFrequencyInfo.h"
#include "llvm/Analysis/CallGraph.h"
#include "llvm/Analysis/FunctionPropertiesAnalysis.h"
#include "llvm/Analysis/InlineCost.h"
#include "llvm/Analysis/InlineModelFeatureMaps.h"
#include "llvm/Analysis/InteractiveModelRunner.h"
#include "llvm/Analysis/LazyCallGraph.h"
#include "llvm/Analysis/LoopInfo.h"
#include "llvm/Analysis/MLModelRunner.h"
#include "llvm/Analysis/OptimizationRemarkEmitter.h"
#include "llvm/Analysis/ProfileSummaryInfo.h"
#include "llvm/Analysis/ReleaseModeModelRunner.h"
#include "llvm/Analysis/TargetTransformInfo.h"
#include "llvm/IR/Dominators.h"
#include "llvm/IR/InstIterator.h"
#include "llvm/IR/Module.h"
#include "llvm/IR/PassManager.h"
#include "llvm/Support/CommandLine.h"

usingnamespacellvm;

static cl::opt<std::string> InteractiveChannelBaseName(
    "inliner-interactive-channel-base", cl::Hidden,
    cl::desc(
        "Base file path for the interactive mode. The incoming filename should "
        "have the name <inliner-interactive-channel-base>.in, while the "
        "outgoing name should be <inliner-interactive-channel-base>.out"));
static const std::string InclDefaultMsg =;
static cl::opt<bool>
    InteractiveIncludeDefault("inliner-interactive-include-default", cl::Hidden,
                              cl::desc(InclDefaultMsg));

enum class SkipMLPolicyCriteria {};

static cl::opt<SkipMLPolicyCriteria> SkipPolicy(
    "ml-inliner-skip-policy", cl::Hidden, cl::init(SkipMLPolicyCriteria::Never),
    cl::values(clEnumValN(SkipMLPolicyCriteria::Never, "never", "never"),
               clEnumValN(SkipMLPolicyCriteria::IfCallerIsNotCold,
                          "if-caller-not-cold", "if the caller is not cold")));

static cl::opt<std::string> ModelSelector("ml-inliner-model-selector",
                                          cl::Hidden, cl::init(""));

#if defined(LLVM_HAVE_TF_AOT_INLINERSIZEMODEL)
// codegen-ed file
#include "InlinerSizeModel.h" // NOLINT
using CompiledModelType = llvm::InlinerSizeModel;
#else
CompiledModelType;
#endif

std::unique_ptr<InlineAdvisor>
llvm::getReleaseModeAdvisor(Module &M, ModuleAnalysisManager &MAM,
                            std::function<bool(CallBase &)> GetDefaultAdvice) {}

#define DEBUG_TYPE

static cl::opt<float> SizeIncreaseThreshold(
    "ml-advisor-size-increase-threshold", cl::Hidden,
    cl::desc("Maximum factor by which expected native size may increase before "
             "blocking any further inlining."),
    cl::init(2.0));

static cl::opt<bool> KeepFPICache(
    "ml-advisor-keep-fpi-cache", cl::Hidden,
    cl::desc(
        "For test - keep the ML Inline advisor's FunctionPropertiesInfo cache"),
    cl::init(false));

// clang-format off
const std::vector<TensorSpec> llvm::FeatureMap{};
// clang-format on

const char *const llvm::DecisionName =;
const TensorSpec llvm::InlineDecisionSpec =;
const char *const llvm::DefaultDecisionName =;
const TensorSpec llvm::DefaultDecisionSpec =;
const char *const llvm::RewardName =;

CallBase *getInlinableCS(Instruction &I) {}

MLInlineAdvisor::MLInlineAdvisor(
    Module &M, ModuleAnalysisManager &MAM,
    std::unique_ptr<MLModelRunner> Runner,
    std::function<bool(CallBase &)> GetDefaultAdvice)
    :{}

unsigned MLInlineAdvisor::getInitialFunctionLevel(const Function &F) const {}

void MLInlineAdvisor::onPassEntry(LazyCallGraph::SCC *CurSCC) {}

void MLInlineAdvisor::onPassExit(LazyCallGraph::SCC *CurSCC) {}

int64_t MLInlineAdvisor::getLocalCalls(Function &F) {}

// Update the internal state of the advisor, and force invalidate feature
// analysis. Currently, we maintain minimal (and very simple) global state - the
// number of functions and the number of static calls. We also keep track of the
// total IR size in this module, to stop misbehaving policies at a certain bloat
// factor (SizeIncreaseThreshold)
void MLInlineAdvisor::onSuccessfulInlining(const MLInlineAdvice &Advice,
                                           bool CalleeWasDeleted) {}

int64_t MLInlineAdvisor::getModuleIRSize() const {}

FunctionPropertiesInfo &MLInlineAdvisor::getCachedFPI(Function &F) const {}

std::unique_ptr<InlineAdvice> MLInlineAdvisor::getAdviceImpl(CallBase &CB) {}

std::unique_ptr<MLInlineAdvice>
MLInlineAdvisor::getAdviceFromModel(CallBase &CB,
                                    OptimizationRemarkEmitter &ORE) {}

std::unique_ptr<InlineAdvice>
MLInlineAdvisor::getSkipAdviceIfUnreachableCallsite(CallBase &CB) {}

std::unique_ptr<InlineAdvice> MLInlineAdvisor::getMandatoryAdvice(CallBase &CB,
                                                                  bool Advice) {}

std::unique_ptr<MLInlineAdvice>
MLInlineAdvisor::getMandatoryAdviceImpl(CallBase &CB) {}

void MLInlineAdvisor::print(raw_ostream &OS) const {}

MLInlineAdvice::MLInlineAdvice(MLInlineAdvisor *Advisor, CallBase &CB,
                               OptimizationRemarkEmitter &ORE,
                               bool Recommendation)
    :{}

void MLInlineAdvice::reportContextForRemark(
    DiagnosticInfoOptimizationBase &OR) {}

void MLInlineAdvice::updateCachedCallerFPI(FunctionAnalysisManager &FAM) const {}

void MLInlineAdvice::recordInliningImpl() {}

void MLInlineAdvice::recordInliningWithCalleeDeletedImpl() {}

void MLInlineAdvice::recordUnsuccessfulInliningImpl(
    const InlineResult &Result) {}
void MLInlineAdvice::recordUnattemptedInliningImpl() {}