#include "tensorflow/lite/tools/benchmark/benchmark_tflite_model.h"
#include <algorithm>
#include <cstdarg>
#include <cstdint>
#include <cstdlib>
#include <cstring>
#include <fstream>
#include <functional>
#include <iostream>
#include <memory>
#include <random>
#include <sstream>
#include <string>
#include <unordered_set>
#include <utility>
#include <vector>
#include "absl/base/attributes.h"
#include "absl/strings/numbers.h"
#include "absl/strings/str_replace.h"
#include "absl/strings/str_split.h"
#include "absl/strings/string_view.h"
#include "ruy/profiler/profiler.h"
#include "tensorflow/lite/core/c/c_api_types.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/core/kernels/register.h"
#include "tensorflow/lite/core/model.h"
#include "tensorflow/lite/core/model_builder.h"
#include "tensorflow/lite/core/signature_runner.h"
#include "tensorflow/lite/core/subgraph.h"
#include "tensorflow/lite/interpreter.h"
#include "tensorflow/lite/kernels/cpu_backend_context.h"
#include "tensorflow/lite/op_resolver.h"
#include "tensorflow/lite/optional_debug_tools.h"
#include "tensorflow/lite/profiling/profile_summary_formatter.h"
#include "tensorflow/lite/string_util.h"
#include "tensorflow/lite/tools/benchmark/benchmark_utils.h"
#include "tensorflow/lite/tools/benchmark/profiling_listener.h"
#include "tensorflow/lite/tools/delegates/delegate_provider.h"
#include "tensorflow/lite/tools/logging.h"
#include "tensorflow/lite/tools/model_loader.h"
#include "tensorflow/lite/tools/utils.h"
void RegisterSelectedOps(::tflite::MutableOpResolver* resolver);
void ABSL_ATTRIBUTE_WEAK
RegisterSelectedOps(::tflite::MutableOpResolver* resolver) { … }
namespace tflite {
namespace benchmark {
namespace {
InputTensorData;
VoidUniquePtr;
#if defined(TFLITE_PROFILING_ENABLED)
constexpr bool kOpProfilingEnabledDefault = true;
#else
constexpr bool kOpProfilingEnabledDefault = …;
#endif
constexpr char kOpProfilingOutputModeStdout[] = …;
constexpr char kOpProfilingOutputModeCsv[] = …;
constexpr char kOpProfilingOutputModeProto[] = …;
const char* kOpProfilingOutputModes[] = …;
class RuyProfileListener : public BenchmarkListener { … };
void RuyProfileListener::OnBenchmarkStart(const BenchmarkParams& params) { … }
void RuyProfileListener::OnBenchmarkEnd(const BenchmarkResults& results) { … }
class InterpreterStatePrinter : public BenchmarkListener { … };
class OutputSaver : public BenchmarkListener { … };
std::vector<std::string> Split(const std::string& str, const char delim) { … }
int GetNumElements(const TfLiteIntArray* dim_array) { … }
void FillRandomString(tflite::DynamicBuffer* buffer,
const TfLiteIntArray* dim_array,
const std::function<std::string()>& random_func) { … }
int FindLayerInfoIndex(std::vector<BenchmarkTfLiteModel::InputLayerInfo>* info,
const std::string& input_name,
const string& names_string) { … }
TfLiteStatus PopulateInputValueRanges(
const std::string& names_string, const std::string& value_ranges_string,
std::vector<BenchmarkTfLiteModel::InputLayerInfo>* info) { … }
TfLiteStatus PopulateInputValueFiles(
const std::string& names_string, const std::string& value_files_string,
std::vector<BenchmarkTfLiteModel::InputLayerInfo>* info) { … }
TfLiteStatus PopulateInputLayerInfo(
const std::string& names_string, const std::string& shapes_string,
const std::string& value_ranges_string,
const std::string& value_files_string,
std::vector<BenchmarkTfLiteModel::InputLayerInfo>* info) { … }
std::shared_ptr<profiling::ProfileSummaryFormatter>
CreateProfileSummaryFormatter(const std::string& output_mode) { … }
}
TfLiteStatus SplitInputLayerNameAndValueFile(
const std::string& name_and_value_file,
std::pair<std::string, std::string>& name_file_pair) { … }
std::pair<TfLiteStatus, std::unique_ptr<BenchmarkInterpreterRunner>>
BenchmarkInterpreterRunner::Create(tflite::Interpreter* const interpreter,
std::string signature_key) { … }
TfLiteStatus BenchmarkInterpreterRunner::AllocateTensors() { … }
TfLiteStatus BenchmarkInterpreterRunner::Invoke() { … }
const std::vector<int>& BenchmarkInterpreterRunner::execution_plan() const { … }
const std::vector<int>& BenchmarkInterpreterRunner::inputs() const { … }
const std::vector<int>& BenchmarkInterpreterRunner::outputs() const { … }
TfLiteTensor* BenchmarkInterpreterRunner::tensor(int tensor_index) { … }
const std::pair<TfLiteNode, TfLiteRegistration>*
BenchmarkInterpreterRunner::node_and_registration(int node_index) const { … }
TfLiteStatus BenchmarkInterpreterRunner::ResizeInputTensor(
int tensor_index, const std::vector<int>& new_size) { … }
BenchmarkParams BenchmarkTfLiteModel::DefaultParams() { … }
BenchmarkTfLiteModel::BenchmarkTfLiteModel(BenchmarkParams params)
: … { … }
void BenchmarkTfLiteModel::CleanUp() { … }
BenchmarkTfLiteModel::~BenchmarkTfLiteModel() { … }
std::vector<Flag> BenchmarkTfLiteModel::GetFlags() { … }
void BenchmarkTfLiteModel::LogParams() { … }
TfLiteStatus BenchmarkTfLiteModel::ValidateParams() { … }
uint64_t BenchmarkTfLiteModel::ComputeInputBytes() { … }
int64_t BenchmarkTfLiteModel::MayGetModelFileSize() { … }
InputTensorData BenchmarkTfLiteModel::LoadInputTensorData(
const TfLiteTensor& t, const std::string& input_file_path) { … }
InputTensorData BenchmarkTfLiteModel::CreateRandomTensorData(
const TfLiteTensor& t, const InputLayerInfo* layer_info) { … }
TfLiteStatus BenchmarkTfLiteModel::PrepareInputData() { … }
TfLiteStatus BenchmarkTfLiteModel::ResetInputsAndOutputs() { … }
TfLiteStatus BenchmarkTfLiteModel::InitInterpreter() { … }
TfLiteStatus BenchmarkTfLiteModel::Init() { … }
TfLiteStatus BenchmarkTfLiteModel::LoadModel() { … }
std::unique_ptr<tflite::OpResolver> BenchmarkTfLiteModel::GetOpResolver()
const { … }
std::unique_ptr<BenchmarkListener>
BenchmarkTfLiteModel::MayCreateProfilingListener() const { … }
TfLiteStatus BenchmarkTfLiteModel::RunImpl() { … }
}
}