chromium/third_party/tflite/src/tensorflow/lite/kernels/internal/types.h

/* Copyright 2023 The TensorFlow Authors. All Rights Reserved.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_TYPES_H_
#define TENSORFLOW_LITE_KERNELS_INTERNAL_TYPES_H_

#include <algorithm>
#include <cstdint>
#include <cstring>
#include <initializer_list>

#include "tensorflow/lite/kernels/internal/compatibility.h"
#include "tensorflow/lite/kernels/internal/runtime_shape.h"

namespace tflite {

enum class FusedActivationFunctionType : uint8_t {};
enum class PaddingType : uint8_t {};

struct PaddingValues {};

struct Padding3DValues {};

// This enumeration allows for non-default formats for the weights array
// of a fully-connected operator, allowing the use of special optimized
// runtime paths.
enum class FullyConnectedWeightsFormat : uint8_t {};

// Quantization parameters, determining the mapping of quantized values
// to real values (i.e. determining how quantized values are mathematically
// interpreted).
//
// The correspondence is as follows:
//
//   real_value = scale * (quantized_value - zero_point);
//
// In other words, zero_point designates which quantized value corresponds to
// the real 0 value, and scale designates the difference between the real values
// corresponding to consecutive quantized values differing by 1.
struct QuantizationParams {};

inline bool operator==(const QuantizationParams& qp1,
                       const QuantizationParams& qp2) {}

// Quantization parameters for each channel, determining the mapping of
// quantized values to real values. See QuantizationParams for a single set of
// parameters per tensor. This has one parameters set per each channel.
//
// The correspondence is as follows:
//
//   real_value = scale[channel] * (quantized_value - zero_point[channel]);
//
struct PerChannelQuantizationParams {};

// Gets next index to iterate through a multidimensional array.
template <typename IndexType = int>
inline bool NextIndex(const int num_dims, const int* dims, IndexType* current) {}

// Gets offset of index if reducing on axis. When reducing, the flattened offset
// will not change, if the input index changes on the given axis. For example,
// if you have a 3D tensor and you are reducing to 2D by eliminating axis 0,
// then index (0, 1, 2) and index (1, 1, 2) will map to the same flattened
// offset.
// TODO(kanlig): uses Dims to represent dimensions.
inline size_t ReducedOutputOffset(const int num_dims, const int* dims,
                                  const int* index, const int num_axis,
                                  const int* axis) {}

// Since tensors with '0' in their shape are valid in TF, these offset functions
// allow that as long as the corresponding index is also 0. It is upto the
// calling ops to ensure that they perform verification checks on tensor shapes
// if they don't support a particular behavior.

inline int Offset(const Dims<4>& dims, int i0, int i1, int i2, int i3) {}

inline int Offset(const Dims<4>& dims, int* index) {}

// Get array size, DCHECKing that the dim index is in range.
//
// Note that this will be phased out with Dims<4>, since RuntimeShape::Dims()
// already performs this check.
template <int N>
int ArraySize(const Dims<N>& array, int index) {}

// Get common array size, DCHECKing that they all agree.
template <typename ArrayType1, typename ArrayType2>
int MatchingArraySize(const ArrayType1& array1, int index1,
                      const ArrayType2& array2, int index2) {}

template <typename ArrayType1, typename ArrayType2, typename... Args>
int MatchingArraySize(const ArrayType1& array1, int index1,
                      const ArrayType2& array2, int index2, Args... args) {}

// Get common shape dim, DCHECKing that they all agree.
inline int MatchingDim(const RuntimeShape& shape1, int index1,
                       const RuntimeShape& shape2, int index2) {}

template <typename... Args>
int MatchingDim(const RuntimeShape& shape1, int index1,
                const RuntimeShape& shape2, int index2, Args... args) {}

// Will be phased out with Dims<4>, replaced by RuntimeShape::FlatSize().
template <int N>
inline int FlatSize(const Dims<N>& dims) {}

TFLITE_DEPRECATED("Prefer FlatSize.")
inline int RequiredBufferSizeForDims(const Dims<4>& dims) {}

inline int MatchingElementsSize(const RuntimeShape& shape,
                                const RuntimeShape& check_shape_0) {}

inline int MatchingElementsSize(const RuntimeShape& shape,
                                const RuntimeShape& check_shape_0,
                                const RuntimeShape& check_shape_1) {}

// Flat size calculation, checking that dimensions match with one or more other
// arrays.
inline int MatchingFlatSize(const RuntimeShape& shape,
                            const RuntimeShape& check_shape_0) {}

inline int MatchingFlatSize(const RuntimeShape& shape,
                            const RuntimeShape& check_shape_0,
                            const RuntimeShape& check_shape_1) {}

inline int MatchingFlatSize(const RuntimeShape& shape,
                            const RuntimeShape& check_shape_0,
                            const RuntimeShape& check_shape_1,
                            const RuntimeShape& check_shape_2) {}

inline int MatchingFlatSize(const RuntimeShape& shape,
                            const RuntimeShape& check_shape_0,
                            const RuntimeShape& check_shape_1,
                            const RuntimeShape& check_shape_2,
                            const RuntimeShape& check_shape_3) {}

// Flat size calculation, checking that dimensions match with one or more other
// arrays.
template <int N>
inline int MatchingFlatSize(const Dims<N>& dims, const Dims<N>& check_dims_0) {}

template <int N>
inline int MatchingFlatSize(const Dims<N>& dims, const Dims<N>& check_dims_0,
                            const Dims<N>& check_dims_1) {}

template <int N>
inline int MatchingFlatSize(const Dims<N>& dims, const Dims<N>& check_dims_0,
                            const Dims<N>& check_dims_1,
                            const Dims<N>& check_dims_2) {}

template <int N>
inline int MatchingFlatSize(const Dims<N>& dims, const Dims<N>& check_dims_0,
                            const Dims<N>& check_dims_1,
                            const Dims<N>& check_dims_2,
                            const Dims<N>& check_dims_3) {}

// Flat size calculation, checking if their extended shapes match.
inline int MatchingExtendedShapeFlatSize(const RuntimeShape& shape,
                                         const RuntimeShape& check_shape_0) {}

inline int MatchingExtendedShapeFlatSize(const RuntimeShape& shape,
                                         const RuntimeShape& check_shape_0,
                                         const RuntimeShape& check_shape_1) {}

inline int MatchingExtendedShapeFlatSize(const RuntimeShape& shape,
                                         const RuntimeShape& check_shape_0,
                                         const RuntimeShape& check_shape_1,
                                         const RuntimeShape& check_shape_2) {}

inline int MatchingExtendedShapeFlatSize(const RuntimeShape& shape,
                                         const RuntimeShape& check_shape_0,
                                         const RuntimeShape& check_shape_1,
                                         const RuntimeShape& check_shape_2,
                                         const RuntimeShape& check_shape_3) {}

// Data is required to be contiguous, and so many operators can use either the
// full array flat size or the flat size with one dimension skipped (commonly
// the depth).
template <int N>
inline int FlatSizeSkipDim(const Dims<N>& dims, int skip_dim) {}

// A combination of MatchingFlatSize() and FlatSizeSkipDim().
template <int N>
inline int MatchingFlatSizeSkipDim(const Dims<N>& dims, int skip_dim,
                                   const Dims<N>& check_dims_0) {}

template <int N>
inline int MatchingFlatSizeSkipDim(const Dims<N>& dims, int skip_dim,
                                   const Dims<N>& check_dims_0,
                                   const Dims<N>& check_dims_1) {}

template <int N>
inline int MatchingFlatSizeSkipDim(const Dims<N>& dims, int skip_dim,
                                   const Dims<N>& check_dims_0,
                                   const Dims<N>& check_dims_1,
                                   const Dims<N>& check_dims_2) {}

template <int N>
inline int MatchingFlatSizeSkipDim(const Dims<N>& dims, int skip_dim,
                                   const Dims<N>& check_dims_0,
                                   const Dims<N>& check_dims_1,
                                   const Dims<N>& check_dims_2,
                                   const Dims<N>& check_dims_3) {}

// Data is required to be contiguous, and so many operators can use either the
// full array flat size or the flat size with one dimension skipped (commonly
// the depth).
inline int FlatSizeSkipDim(const RuntimeShape& shape, int skip_dim) {}

// A combination of MatchingFlatSize() and FlatSizeSkipDim().
inline int MatchingFlatSizeSkipDim(const RuntimeShape& shape, int skip_dim,
                                   const RuntimeShape& check_shape_0) {}

inline int MatchingFlatSizeSkipDim(const RuntimeShape& shape, int skip_dim,
                                   const RuntimeShape& check_shape_0,
                                   const RuntimeShape& check_shape_1) {}

inline int MatchingFlatSizeSkipDim(const RuntimeShape& shape, int skip_dim,
                                   const RuntimeShape& check_shape_0,
                                   const RuntimeShape& check_shape_1,
                                   const RuntimeShape& check_shape_2) {}

inline int MatchingFlatSizeSkipDim(const RuntimeShape& shape, int skip_dim,
                                   const RuntimeShape& check_shape_0,
                                   const RuntimeShape& check_shape_1,
                                   const RuntimeShape& check_shape_2,
                                   const RuntimeShape& check_shape_3) {}

template <int N>
bool IsPackedWithoutStrides(const Dims<N>& dims) {}

template <int N>
void ComputeStrides(Dims<N>* dims) {}

enum class BroadcastableOpCategory : uint8_t {};

struct MinMax {};
static_assert;

struct ActivationParams {};

struct ReluParams : public ActivationParams {};

// Styles of resizing op usages. For example, kImageStyle can be used with a Pad
// op for pattern-specific optimization.
enum class ResizingCategory : uint8_t {};

// For Add, Sub, Mul ops.
struct ArithmeticParams {};

struct ConcatenationParams {};

struct ComparisonParams {};

struct ConvParams {};

struct Conv3DParams {};

Conv3DTransposeParams;

struct DepthToSpaceParams {};

struct DepthwiseParams {};

struct DequantizationParams {};

struct PerChannelDequantizationParams {};

struct FakeQuantParams {};

struct FullyConnectedParams {};

struct GatherParams {};

struct L2NormalizationParams {};

struct LocalResponseNormalizationParams {};

struct HardSwishParams {};

struct LogisticParams {};

struct LstmCellParams {};

struct MeanParams {};

struct PackParams {};

struct PadParams {};

struct PreluParams {};

struct PoolParams {};

struct ReshapeParams {};

struct ResizeBilinearParams {};

struct ResizeNearestNeighborParams {};

struct SliceParams {};

struct SoftmaxParams {};

struct SpaceToBatchParams {};

struct SpaceToDepthParams {};

struct SplitParams {};

struct SqueezeParams {};

struct StridedSliceParams {};

struct TanhParams {};

constexpr int kTransposeMaxDimensions =;

struct TransposeParams {};

struct UnpackParams {};

struct LeakyReluParams {};

template <typename P>
inline void SetActivationParams(float min, float max, P* params) {}

template <typename P>
inline void SetActivationParams(int32_t min, int32_t max, P* params) {}

template <typename P>
inline void SetActivationParams(uint32_t min, uint32_t max, P* params) {}

template <typename P>
inline void SetActivationParams(int16_t min, int16_t max, P* params) {}

template <typename P>
inline void SetActivationParams(int64_t min, int64_t max, P* params) {}

template <typename P>
inline void GetActivationParams(const P& params, int32_t* min, int32_t* max) {}

template <typename P>
inline void GetActivationParams(const P& params, uint32_t* min, uint32_t* max) {}

template <typename P>
inline void GetActivationParams(const P& params, int16_t* min, int16_t* max) {}

template <typename P>
inline void GetActivationParams(const P& params, float* min, float* max) {}

template <typename P>
inline void GetActivationParams(const P& params, int64_t* min, int64_t* max) {}

// Type trait to check of given type has size smaller than 4 bytes.
template <typename T>
struct is_small_integer
    : public std::integral_constant<bool,
                                    std::is_same<T, int8_t>::value ||
                                        std::is_same<T, uint8_t>::value ||
                                        std::is_same<T, int16_t>::value ||
                                        std::is_same<T, uint16_t>::value> {};

// Type trait to check of given type is int32 or int64.
template <typename T>
struct is_int32_or_int64
    : public std::integral_constant<bool, std::is_same<T, int32_t>::value ||
                                              std::is_same<T, int64_t>::value> {};

}  // namespace tflite

#endif  // TENSORFLOW_LITE_KERNELS_INTERNAL_TYPES_H_