chromium/third_party/tflite/src/tensorflow/lite/optional_debug_tools.cc

/* Copyright 2017 The TensorFlow Authors. All Rights Reserved.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#include "tensorflow/lite/optional_debug_tools.h"

#include <cassert>
#include <cinttypes>
#include <cstddef>
#include <cstdio>
#include <functional>
#include <limits>
#include <set>
#include <sstream>
#include <string>
#include <utility>
#include <vector>

#include "tensorflow/lite/context_util.h"
#include "tensorflow/lite/core/interpreter.h"
#include "tensorflow/lite/core/subgraph.h"
#include "tensorflow/lite/schema/schema_generated.h"

namespace tflite {

namespace {
// Just forward declarations.
const char* AllocTypeName(TfLiteAllocationType type);

void PrintIntVector(const std::vector<int>& v,
                    bool collapse_consecutives = true,
                    bool add_newline = false);

// A class to represent the information of a memory arena that's used in TfLite
// runtime for holding allocated memory of tensors. The information includes
// the following:
// 1. The memory allocation type.
// 2. The tensor id of the tensor that has the most amount of memory allocated,
// and the memory size.
// 3. The estimated memory boundary and size of the arena.
class MemoryArenaInfo {};

class DynamicMemoryInfo {};

class ModelTensorMemoryInfo {};

template <typename T>
void PrintTotalBytesOfTensors(const Subgraph& subgraph, const T& tensor_ids,
                              const std::string& prefix = " -> ") {}

void PrintIntVector(const std::vector<int>& v, bool collapse_consecutives,
                    bool add_newline) {}

void PrintTfLiteIntVector(const TfLiteIntArray* v,
                          bool collapse_consecutives = true,
                          bool add_newline = false) {}

const char* TensorTypeName(TfLiteType type) {}

const char* AllocTypeName(TfLiteAllocationType type) {}

std::string TruncateString(const char* str, int size_limit,
                           bool truncate_at_end = false) {}

}  // namespace

// Prints a dump of what tensors and what nodes are in the interpreter.
void PrintInterpreterState(const Interpreter* interpreter,
                           const int32_t tensor_name_display_length,
                           const int32_t tensor_type_display_length,
                           const int32_t alloc_type_display_length) {}

}  // namespace tflite