chromium/third_party/tflite/src/tensorflow/lite/core/subgraph.cc

/* Copyright 2017 The TensorFlow Authors. All Rights Reserved.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/

#include "tensorflow/lite/core/subgraph.h"

#include <algorithm>
#include <atomic>
#include <cstdarg>
#include <cstddef>
#include <cstdint>
#include <cstdlib>
#include <cstring>
#include <iterator>
#include <memory>
#include <string>
#include <unordered_set>
#include <utility>
#include <vector>

#include "tensorflow/lite/allocation.h"
#include "tensorflow/lite/array.h"
#include "tensorflow/lite/builtin_ops.h"
#include "tensorflow/lite/c/common_internal.h"
#include "tensorflow/lite/context_util.h"
#include "tensorflow/lite/core/api/error_reporter.h"
#include "tensorflow/lite/core/api/op_resolver.h"
#include "tensorflow/lite/core/api/profiler.h"
#include "tensorflow/lite/core/api/tensor_utils.h"
#include "tensorflow/lite/core/c/c_api_types.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/experimental/resource/initialization_status.h"
#include "tensorflow/lite/experimental/resource/resource_base.h"
#include "tensorflow/lite/graph_info.h"
#include "tensorflow/lite/logger.h"
#include "tensorflow/lite/memory_planner.h"
#include "tensorflow/lite/minimal_logging.h"
#include "tensorflow/lite/profiling/telemetry/telemetry.h"
#include "tensorflow/lite/schema/schema_generated.h"
#include "tensorflow/lite/util.h"
#ifdef TFLITE_USE_SIMPLE_MEMORY_PLANNER
#include "tensorflow/lite/simple_planner.h"
#else
#include "tensorflow/lite/arena_planner.h"
#endif
#ifdef TF_LITE_TENSORFLOW_PROFILER
#include "tensorflow/lite/tensorflow_profiler_logger.h"
#endif  // TF_LITE_TENSORFLOW_PROFILER

namespace tflite {

namespace {

struct TfLiteQuantizationDeleter {};

ScopedTfLiteQuantization;

struct TfLiteSparsityDeleter {};

ScopedTfLiteSparsity;

TfLiteStatus ReportOpError(TfLiteContext* context, const TfLiteNode& node,
                           const TfLiteRegistration& registration,
                           int node_index, const char* message) {}

// Stub method which returns kTfLiteError when the function is forbidden.
// We're registering this function to several different function to save
// compiled binary size. Please note the restrictions:
// * The type of first parameter have to be `TfLiteContext*`.
// * All parameters must be trivially destructible. (E.g. No C++ class)
TfLiteStatus ForbiddenContextFunction(TfLiteContext* context, ...) {}

// Set the ForbiddenContextFunction to a compatible function pointer.
template <typename FunctionType>
void SetForbiddenContextFunction(FunctionType* func) {}

// Returns true if at least one tensor in the given list is kTfLiteDynamic.
template <typename TensorIntArray>
bool HasDynamicTensorImpl(const TfLiteContext& context,
                          const TensorIntArray& int_array,
                          int* dynamic_tensor_index) {}

bool HasDynamicTensor(const TfLiteContext& context,
                      const TfLiteIntArray* int_array,
                      int* dynamic_tensor_index) {}

// Gets the legacy TfLiteQuantizationParams from the current TfLiteQuantization.
TfLiteQuantizationParams GetLegacyQuantization(
    const TfLiteQuantization& quantization) {}

static constexpr const char kUnknownCustomOpName[] =;
const char* GetTFLiteOpName(const TfLiteRegistration& op_reg) {}

// Verifies custom allocation for tensor, if applicable.
TfLiteStatus VerifyCustomAllocationForTensor(
    TfLiteContext* context,
    const std::map<int, TfLiteCustomAllocation>& tensor_idx_to_alloc,
    const int tensor_idx) {}

}  // namespace

// A trivial implementation of GraphInfo around the Interpreter.
// NOTE: this interpreter info represents the subset of the
// graph that is executed according to execution plan. Thus,
// the indices are execution plan indices rather than raw node
// indices.
class InterpreterInfo : public GraphInfo {};

Subgraph::Subgraph(ErrorReporter* error_reporter,
                   TfLiteExternalContext** external_contexts,
                   std::vector<std::unique_ptr<Subgraph>>* subgraphs,
                   resource::ResourceMap* resources,
                   resource::ResourceIDMap* resource_ids,
                   resource::InitializationStatusMap* initialization_status_map,
                   int subgraph_index)
    :{}

Subgraph::~Subgraph() {}

void Subgraph::CleanupNode(int node_index) {}

TfLiteStatus Subgraph::ReplaceNodeSubsetsWithDelegateKernels(
    TfLiteContext* context, TfLiteRegistration registration,
    const TfLiteIntArray* nodes_to_replace, TfLiteDelegate* delegate) {}

namespace {

// Copy a std::vector<int> to an existing TfLiteIntArray.
// This is a low-level data manipulation function, and it's caller's
// responsibility to ensure TfLiteIntArray has enough size.
void CopyVectorToTfLiteIntArray(const std::vector<int>& vec,
                                TfLiteIntArray* arr) {}

// This function template allocates a continuous memory space that contains a
// TfLiteDelegateParams followed by a several TfLiteIntArray.
// When calling `free` at TfLiteDelegateParams*, all the allocated space
// will be freed together.
//
// +-----------------------------------+
// | TfLiteDelegateParams              |
// | TfLiteDelegate* delegate;         |
// | TfLiteIntArray* nodes_to_replace; |--\
// | TfLiteIntArray* input_tensors;    |--+--\
// | TfLiteIntArray* output_tensors;   |--+--+--\
// +-----------------------------------+  |  |  |
// | TfLiteIntArray (variable size)    |<-/  |  |
// +-----------------------------------+     |  |
// | TfLiteIntArray (variable size)    |<----/  |
// +-----------------------------------+        |
// | TfLiteIntArray (variable size)    |<-------/
// +-----------------------------------+
//
// Note that the 'delegate' field has to be set by the caller of this function
// template.
//
// This function can also be used with TfLiteOpaqueDelegateParams as a template
// parameter instead of TfLiteDelegateParams, in which case the layout looks
// as follows:
//
// +----------------------------------------------+
// | TfLiteOpaqueDelegateParams                   |
// | struct TfLiteOpaqueDelegate* delegate;       |
// | void* delegate_data;                         |
// | TfLiteIntArray* nodes_to_replace;            |--\
// | TfLiteIntArray* input_tensors;               |--+--\
// | TfLiteIntArray* output_tensors;              |--+--+--\
// +----------------------------------------------+  |  |  |
// | TfLiteIntArray (variable size)               |<-/  |  |
// +----------------------------------------------+     |  |
// | TfLiteIntArray (variable size)               |<----/  |
// +----------------------------------------------+        |
// | TfLiteIntArray (variable size)               |<-------/
// +----------------------------------------------+
//
// Note that the 'delegate' and delegate_data field has to be set by the caller
// of this function template.
template <typename Params>
Params* CreateDelegateParamsImpl(TfLiteDelegate* delegate,
                                 const NodeSubset& node_subset) {}

TfLiteDelegateParams* CreateDelegateParams(TfLiteDelegate* delegate,
                                           const NodeSubset& node_subset) {}

TfLiteOpaqueDelegateParams* CreateOpaqueDelegateParams(
    TfLiteDelegate* delegate, const NodeSubset& node_subset) {}

// Assumes that params is not nullptr.
void PopulatePreviewDelegateParams(const NodeSubset& node_subset,
                                   TfLiteDelegateParams* params) {}

// Returns the 'custom_name' associated with the provided 'registration', or
// "unknown" if the registration does not have a custom name.
//
// Note that 'TfLiteRegistration' has a top-level 'custom_name' field and also
// a nested 'custom_name' field defined inside the optionally set
// 'registration_external' structure.  The top-level field takes precedence over
// the nested field.  'TfLiteRegistration'
// objects can optionally carry a 'TfLiteOperator' pointer in their
// 'registration_external' field.  If that's the case then the
// 'TfLiteRegistration' object is merely a wrapper over a
// 'TfLiteOperator', with all fields except 'registration_external'
// being null, that contains the actual logic that the registration represents.
// See also the comment inside
// 'TfLiteOpaqueContextReplaceNodeSubsetsWithDelegateKernels'.
const char* GetDelegateKernalName(const TfLiteRegistration& registration) {}

}  // namespace

TfLiteStatus Subgraph::PartitionGraph(const TfLiteIntArray* nodes_to_replace,
                                      std::vector<NodeSubset>* node_subsets) {}

TfLiteStatus Subgraph::ReplaceNodeSubsetsWithDelegateKernels(
    TfLiteRegistration registration, const TfLiteIntArray* nodes_to_replace,
    TfLiteDelegate* delegate) {}

TfLiteExternalContext* Subgraph::GetExternalContext(
    TfLiteExternalContextType type) {}

TfLiteExternalContext* Subgraph::GetExternalContext(
    struct TfLiteContext* context, TfLiteExternalContextType type) {}

void Subgraph::SetExternalContext(TfLiteExternalContextType type,
                                  TfLiteExternalContext* ctx) {}

void Subgraph::SetExternalContext(struct TfLiteContext* context,
                                  TfLiteExternalContextType type,
                                  TfLiteExternalContext* ctx) {}

// Gets an TfLiteIntArray* representing the execution plan. The interpreter owns
// this memory and it is only guaranteed to exist during the invocation of the
// delegate prepare.
TfLiteStatus Subgraph::GetExecutionPlan(TfLiteIntArray** execution_plan) {}

// WARNING: This is an experimental interface that is subject to change.
// Entry point for C node plugin API to get the execution plan
TfLiteStatus Subgraph::GetExecutionPlan(struct TfLiteContext* context,
                                        TfLiteIntArray** execution_plan) {}

void Subgraph::FreeDelegatePartitioningData() {}

TfLiteStatus Subgraph::GetModelMetadata(const char* name, const char** ptr,
                                        size_t* bytes) {}

TfLiteStatus Subgraph::GetModelMetadata(const struct TfLiteContext* context,
                                        const char* name, const char** ptr,
                                        size_t* bytes) {}

TfLiteStatus Subgraph::AcquireSubgraphContext(
    int subgraph_index, TfLiteContext** acquired_context) {}

TfLiteStatus Subgraph::AcquireSubgraphContext(
    struct TfLiteContext* context, int subgraph_index,
    TfLiteContext** acquired_context) {}

TfLiteStatus Subgraph::ReleaseSubgraphContext(int subgraph_index) {}

TfLiteStatus Subgraph::ReleaseSubgraphContext(struct TfLiteContext* context,
                                              int subgraph_index) {}

TfLiteStatus Subgraph::MarkSubgraphAsDelegationSkippable(int subgraph_index) {}

TfLiteStatus Subgraph::GetNodeInitDataMmapInfo(
    const TfLiteNode* node, int* fd,
    int64_t* custom_initial_data_offset_in_file,
    int64_t* custom_initial_data_size) const {}

TfLiteStatus Subgraph::PreviewDelegatePartitioning(
    const TfLiteIntArray* nodes_to_replace,
    TfLiteDelegateParams** partition_params_array, int* num_partitions) {}

TfLiteStatus Subgraph::PreviewDelegatePartitioning(
    struct TfLiteContext* context, const TfLiteIntArray* nodes_to_replace,
    TfLiteDelegateParams** partition_params_array, int* num_partitions) {}

TfLiteStatus Subgraph::SetInputs(std::vector<int> inputs) {}

TfLiteStatus Subgraph::SetOutputs(std::vector<int> outputs) {}

TfLiteStatus Subgraph::SetVariables(std::vector<int> variables) {}

TfLiteStatus Subgraph::SetMetadata(
    const std::map<std::string, std::string>* metadata,
    const ControlEdges* control_edges) {}

void Subgraph::SetCancellationFunction(void* data,
                                       bool (*check_cancelled_func)(void*)) {}

TfLiteStatus Subgraph::EnsureTensorDataIsReadable(int tensor_index) {}

TfLiteStatus Subgraph::EnableCancellation(std::atomic_flag* flag) {}

TfLiteStatus Subgraph::Cancel() {}

bool Subgraph::IsCancelled() {}

void Subgraph::ReserveNodes(int count) {}

TfLiteStatus Subgraph::CheckTensorIndices(const char* label, const int* indices,
                                          int length) {}

// We have two arrays and we need to check that elements from one array don't
// show up in the other. We could sort both arrays and then iterate with two
// pointers from start to finish always increasing the smaller one but since
// these arrays are usually short (<25 elements for inputs, usually <3 for
// outputs), this might be slower than the naive approach (if arrays have size n
// and m, with n >> m ~ O(1), first approach is O(nlogn) whereas the other is
// O(n)). Plus, sorting the input and output arrays might not be something we
// want as it destroys ordering of elements.
//
// If it turns out that this is an issue, we can switch to the other algorithm.
TfLiteStatus Subgraph::CheckInputAndOutputForOverlap(const int* input_indices,
                                                     int num_inputs,
                                                     const int* output_indices,
                                                     int num_outputs) {}

std::vector<int> Subgraph::GetInputTensorsCount() {}

TfLiteStatus Subgraph::AllocateTensors() {}

// TODO(b/115961645): Support non-zero default values.
TfLiteStatus Subgraph::ResetVariableTensors() {}

TfLiteStatus Subgraph::AddNodeWithParameters(
    const std::vector<int>& inputs, const std::vector<int>& outputs,
    const std::vector<int>& intermediates, const char* init_data,
    size_t init_data_size, void* builtin_data,
    const TfLiteRegistration* registration, int* node_index) {}

namespace {
// Returns true if any tensor identified by indexes in 'tensor_indexes' is
// of type 'kTfLiteResource'. False otherwise.
bool AnyTensorOfTypeResource(const std::vector<TfLiteTensor>& tensors,
                             const TfLiteIntArray* tensor_indexes) {}

}  // namespace

bool Subgraph::OpMightHaveSideEffect(
    const TfLiteNode* node, const TfLiteRegistration* registration) const {}

TfLiteStatus Subgraph::ResizeInputTensor(int tensor_index,
                                         const int* const dims_data,
                                         const int rank) {}

TfLiteStatus Subgraph::ResizeInputTensor(int tensor_index,
                                         const std::vector<int>& dims) {}

TfLiteStatus Subgraph::ResizeInputTensorStrict(int tensor_index,
                                               const std::vector<int>& dims) {}

TfLiteStatus Subgraph::ReleaseNonPersistentMemory() {}

TfLiteStatus Subgraph::ReleaseMemory() {}

// Give 'op_reg' a chance to initialize itself using the contents of
// 'buffer'. If registration_external is valid, use the 'init' callback from
// that.
void* Subgraph::OpInit(const TfLiteRegistration& op_reg, const char* buffer,
                       size_t length) {}

TfLiteStatus Subgraph::OpPrepare(const TfLiteRegistration& op_reg,
                                 TfLiteNode* node) {}

// Invoke the operator represented by 'node'.
TfLiteStatus Subgraph::OpInvoke(const TfLiteRegistration& op_reg,
                                TfLiteNode* node) {}

// Let 'op_reg' release any memory it might have allocated via 'OpInit'.
// If registration_external is valid, use the 'free' callback from that.
void Subgraph::OpFree(const TfLiteRegistration& op_reg, void* buffer) {}

TfLiteStatus Subgraph::MayAllocateOpOutput(TfLiteNode* node) {}

TfLiteStatus Subgraph::PrepareOpsStartingAt(
    int first_execution_plan_index, const std::vector<int>& execution_plan,
    int* last_execution_plan_index_prepared) {}

TfLiteStatus Subgraph::PrepareOpsAndTensors() {}

TfLiteStatus Subgraph::RemoveUnusedInputs() {}

TfLiteStatus Subgraph::Invoke() {}
TfLiteStatus Subgraph::InvokeImpl() {}

TfLiteStatus Subgraph::ResizeTensor(TfLiteContext* context,
                                    TfLiteTensor* tensor,
                                    TfLiteIntArray* new_size) {}

void Subgraph::ReportErrorImpl(const char* format, va_list args) {}

void Subgraph::ReportErrorC(TfLiteContext* context, const char* format, ...) {}

// Entry point for C node plugin API to report an error.
void Subgraph::ReportError(const char* format, ...) {}

TfLiteStatus Subgraph::AddTensors(int tensors_to_add,
                                  int* first_new_tensor_index) {}

TfLiteStatus Subgraph::AddTensors(TfLiteContext* context, int tensors_to_add,
                                  int* first_new_tensor_index) {}

TfLiteStatus Subgraph::GetNodeAndRegistration(
    int node_index, TfLiteNode** node, TfLiteRegistration** registration) {}

TfLiteStatus Subgraph::GetNodeAndRegistration(
    struct TfLiteContext* context, int node_index, TfLiteNode** node,
    TfLiteRegistration** registration) {}

TfLiteStatus Subgraph::SetTensorParametersReadOnly(
    int tensor_index, TfLiteType type, const char* name, const size_t ndims,
    const int* dims, TfLiteQuantization quantization, const char* buffer,
    size_t bytes, const Allocation* allocation, TfLiteSparsity* sparsity,
    const size_t buffer_identifier) {}

// Set description of inputs/outputs/data/fptrs for node `node_index`.
// This variant assumes an external buffer has been allocated of size
// bytes. The lifetime of buffer must be ensured to be greater or equal
// to Interpreter.
TfLiteStatus Subgraph::SetTensorParametersReadWrite(
    int tensor_index, TfLiteType type, const char* name, const size_t ndims,
    const int* dims, TfLiteQuantization quantization, bool is_variable,
    const size_t ndims_signature, const int* dims_signature) {}

TfLiteStatus Subgraph::SetExecutionPlan(const std::vector<int>& new_plan) {}

TfLiteStatus Subgraph::ResizeTensorImpl(TfLiteTensor* tensor,
                                        TfLiteIntArray* new_size) {}

void Subgraph::OptimizeMemoryForLargeTensors(
    int large_tensors_thresholds_in_bytes) {}

TfLiteStatus Subgraph::SwitchToDelegateContext() {}

TfLiteStatus Subgraph::SwitchToKernelContext() {}

TfLiteStatus Subgraph::UndoAllDelegates() {}

TfLiteStatus Subgraph::RedoAllDelegates() {}

TfLiteStatus Subgraph::RemoveAllDelegates() {}

bool Subgraph::HasDelegates() {}

bool Subgraph::IsFullyDelegated() const {}

void Subgraph::EnsureTensorsVectorCapacity() {}

TfLiteStatus Subgraph::EnsureMemoryAllocations() {}

TfLiteStatus Subgraph::ModifyGraphWithDelegate(TfLiteDelegate* delegate) {}

TfLiteStatus Subgraph::ModifyGraphWithDelegateImpl(TfLiteDelegate* delegate) {}

TfLiteStatus Subgraph::SetCustomAllocationForTensor(
    int tensor_index, const TfLiteCustomAllocation& allocation, int64_t flags) {}

void Subgraph::SetName(const char* name) {}

const std::string& Subgraph::GetName() const {}

void Subgraph::DumpMemoryPlannerDebugInfo() const {}

void Subgraph::GetMemoryAllocInfo(SubgraphAllocInfo* alloc_info) const {}

std::unique_ptr<GraphInfo> Subgraph::CreateGraphInfo() {}

void Subgraph::InitializeTensorReleaseMap() {}

void Subgraph::MaybeReleaseDynamicTensors(const TfLiteNode& node,
                                          size_t node_index) {}

TfLiteStatus Subgraph::SetBufferHandleImpl(
    TfLiteContext* context, TfLiteTensor* tensor,
    TfLiteBufferHandle buffer_handle, TfLiteDelegate* delegate,
    bool release_existing_buffer_handle) {}

}  // namespace tflite