chromium/third_party/tflite/src/tensorflow/lite/arena_planner.cc

/* Copyright 2017 The TensorFlow Authors. All Rights Reserved.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#include "tensorflow/lite/arena_planner.h"

#include <stddef.h>

#include <algorithm>
#include <cstdint>
#include <limits>
#include <memory>
#include <utility>
#include <vector>

#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/graph_info.h"
#include "tensorflow/lite/simple_memory_arena.h"

namespace tflite {

constexpr int32_t kLastActiveNodeUndefined =;
constexpr int32_t kNodeNotAssigned =;
constexpr int32_t kScalarTensorBytes =;

ArenaPlanner::ArenaPlanner(TfLiteContext* context,
                           std::unique_ptr<GraphInfo> graph_info,
                           bool preserve_all_tensors, int tensor_alignment,
                           int subgraph_index)
    :{}

ArenaPlanner::~ArenaPlanner() {}

std::intptr_t ArenaPlanner::BasePointer(TfLiteAllocationType type) {}

TfLiteStatus ArenaPlanner::ResetAllocations() {}

TfLiteStatus ArenaPlanner::ResetAllocationsAfter(int node) {}

int ArenaPlanner::FindSharedTensor(int tensor_index) {}

bool ArenaPlanner::InputTensorCanBeShared(const TfLiteTensor& input_tensor,
                                          const TfLiteTensor& output_tensor,
                                          int input_id, int output_id,
                                          bool tensor_changed) {}

// An op can reuse one of the input tensors if:
// The sizes are equal (broadcast is an example where this may not be true)
// The tensors are allocated within the same arena.
// The number of references to the shared input is one in the case of ops which
// modify the contents.
// Subgraph inputs and outputs cannot be shared.
void ArenaPlanner::IdentifyInPlaceTensors() {}

TfLiteStatus ArenaPlanner::PlanAllocations() {}

TfLiteStatus ArenaPlanner::ExecuteAllocations(int first_node, int last_node) {}

TfLiteStatus ArenaPlanner::ReleaseNonPersistentMemory() {}

TfLiteStatus ArenaPlanner::AcquireNonPersistentMemory() {}

bool ArenaPlanner::HasNonPersistentMemory() {}

void ArenaPlanner::DumpDebugInfo(const std::vector<int>& execution_plan) const {}

void ArenaPlanner::GetAllocInfo(size_t* arena_size,
                                size_t* arena_persist_size) const {}

TfLiteStatus ArenaPlanner::Commit(bool* reallocated) {}

void ArenaPlanner::CreateTensorAllocationVector(
    std::vector<int32_t>* tensors_to_allocate) {}

std::vector<int32_t> ArenaPlanner::GetTensorsToAllocate(int first_node,
                                                        int last_node) {}

TfLiteStatus ArenaPlanner::CalculateAllocations(
    int first_node, int last_node, std::vector<int32_t>* tensors_allocated) {}

bool AreTensorsAllocatedInSameArena(int32_t root_tensor_index,
                                    int32_t tensor_index,
                                    const TfLiteTensor* tensors) {}

TfLiteStatus ArenaPlanner::ResolveTensorAllocation(int32_t tensor_index,
                                                   TfLiteTensor* tensors) {}

}  // namespace tflite