chromium/third_party/tflite/src/tensorflow/lite/core/c/common.cc

/* Copyright 2019 The TensorFlow Authors. All Rights Reserved.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/

#include "tensorflow/lite/core/c/common.h"

#ifndef TF_LITE_STATIC_MEMORY
#include <cstdlib>
#endif  // TF_LITE_STATIC_MEMORY

#include <cstring>
#include <type_traits>
#include <utility>

#include "tensorflow/lite/core/c/c_api_types.h"
#ifdef TF_LITE_TENSORFLOW_PROFILER
#include "tensorflow/lite/tensorflow_profiler_logger.h"
#endif

namespace {

template <class T>
size_t TfLiteVarArrayGetSizeInBytes(const int size) {}

template <class T, class U>
int TfLiteVarArrayEqualsArray(const T* const a, const int b_size,
                              const U* const b_data) {}

template <class T>
int TfLiteVarArrayEqual(const T* const a, const T* const b) {}

#ifndef TF_LITE_STATIC_MEMORY

template <class T>
T* TfLiteVarArrayCreate(const int size) {}

template <class T>
T* TfLiteVarArrayCopy(const T* const src) {}

#endif  // TF_LITE_STATIC_MEMORY

template <class T>
void TfLiteVarArrayFree(T* a) {}

}  // namespace

extern "C" {

size_t TfLiteIntArrayGetSizeInBytes(int size) {}

int TfLiteIntArrayEqual(const TfLiteIntArray* a, const TfLiteIntArray* b) {}

int TfLiteIntArrayEqualsArray(const TfLiteIntArray* a, int b_size,
                              const int b_data[]) {}

#ifndef TF_LITE_STATIC_MEMORY

TfLiteIntArray* TfLiteIntArrayCreate(int size) {}

TfLiteIntArray* TfLiteIntArrayCopy(const TfLiteIntArray* src) {}

void TfLiteIntArrayFree(TfLiteIntArray* a) {}

#endif  // TF_LITE_STATIC_MEMORY

int TfLiteFloatArrayGetSizeInBytes(int size) {}

#ifndef TF_LITE_STATIC_MEMORY

TfLiteFloatArray* TfLiteFloatArrayCreate(int size) {}

TfLiteFloatArray* TfLiteFloatArrayCopy(const TfLiteFloatArray* src) {}

void TfLiteFloatArrayFree(TfLiteFloatArray* a) {}

void TfLiteTensorDataFree(TfLiteTensor* t) {}

void TfLiteQuantizationFree(TfLiteQuantization* quantization) {}

void TfLiteSparsityFree(TfLiteSparsity* sparsity) {}

void TfLiteTensorFree(TfLiteTensor* t) {}

void TfLiteTensorReset(TfLiteType type, const char* name, TfLiteIntArray* dims,
                       TfLiteQuantizationParams quantization, char* buffer,
                       size_t size, TfLiteAllocationType allocation_type,
                       const void* allocation, bool is_variable,
                       TfLiteTensor* tensor) {}

TfLiteStatus TfLiteTensorCopy(const TfLiteTensor* src, TfLiteTensor* dst) {}

TfLiteStatus TfLiteTensorResizeMaybeCopy(size_t num_bytes, TfLiteTensor* tensor,
                                         bool preserve_data) {}

TfLiteStatus TfLiteTensorRealloc(size_t num_bytes, TfLiteTensor* tensor) {}
#endif  // TF_LITE_STATIC_MEMORY

const char* TfLiteTypeGetName(TfLiteType type) {}

TfLiteDelegate TfLiteDelegateCreate() {}

// Returns a tensor data allocation strategy.
TfLiteAllocationStrategy TfLiteTensorGetAllocationStrategy(
    const TfLiteTensor* const t) {}

// Returns how stable a tensor data buffer address is across runs.
TfLiteRunStability TfLiteTensorGetBufferAddressStability(
    const TfLiteTensor* const t) {}

// Returns how stable a tensor data values are across runs.
TfLiteRunStability TfLiteTensorGetDataStability(const TfLiteTensor* const t) {}

// Returns the operation step when the data of a tensor is populated.
//
// Some operations can precompute their results before the evaluation step. This
// makes the data available earlier for subsequent operations.
TfLiteRunStep TfLiteTensorGetDataKnownStep(const TfLiteTensor* t) {}

// Returns the operation steop when the shape of a tensor is computed.
//
// Some operations can precompute the shape of their results before the
// evaluation step. This makes the shape available earlier for subsequent
// operations.
TfLiteRunStep TfLiteTensorGetShapeKnownStep(const TfLiteTensor* t) {}

}  // extern "C"