chromium/third_party/tflite/src/tensorflow/lite/kernels/reduce.cc

/* Copyright 2017 The TensorFlow Authors. All Rights Reserved.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#include "tensorflow/lite/kernels/internal/reference/reduce.h"

#include <stddef.h>

#include <cstdint>
#include <limits>

#include "ruy/profiler/instrumentation.h"  // from @ruy
#include "tensorflow/lite/core/c/builtin_op_data.h"
#include "tensorflow/lite/core/c/c_api_types.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/kernels/cpu_backend_context.h"
#include "tensorflow/lite/kernels/internal/compatibility.h"
#include "tensorflow/lite/kernels/internal/optimized/integer_ops/mean.h"
#include "tensorflow/lite/kernels/internal/optimized/neon_check.h"
#include "tensorflow/lite/kernels/internal/optimized/optimized_ops.h"
#include "tensorflow/lite/kernels/internal/optimized/reduce.h"
#include "tensorflow/lite/kernels/internal/quantization_util.h"
#include "tensorflow/lite/kernels/internal/reference/reference_ops.h"
#include "tensorflow/lite/kernels/internal/tensor.h"
#include "tensorflow/lite/kernels/internal/tensor_ctypes.h"
#include "tensorflow/lite/kernels/internal/types.h"
#include "tensorflow/lite/kernels/kernel_util.h"

namespace tflite {
namespace ops {
namespace builtin {
namespace reduce {

const int kMaxConstantOutputTensorSize =;
// This file has reference implementation of reduce_* operators.
enum KernelType {};

struct OpData {};

struct OpContext {};

void* Init(TfLiteContext* context, const char* buffer, size_t length) {}

void Free(TfLiteContext* context, void* buffer) {}

template <KernelType kernel_type>
TfLiteStatus EvalImpl(TfLiteContext* context, TfLiteNode* node);

// Resizes the temp tensor that stores resolved axis.
TfLiteStatus ResizeTempAxis(TfLiteContext* context, OpContext* op_context,
                            TfLiteTensor* resolved_axis) {}

// Resizes the temp tensor that stores temp sum of reduced elements.
TfLiteStatus ResizeTempAccum(TfLiteContext* context, OpContext* op_context,
                             TfLiteTensor* temp_accum) {}

// Returns the output shape.
TfLiteStatus GetOutputShape(TfLiteContext* context, OpContext* op_context,
                            TfLiteIntArray** output_shape) {}

// Resizes output array based on the input size and resolved axis.
TfLiteStatus ResizeOutputTensor(TfLiteContext* context, OpContext* op_context) {}

// Resizes the temp tensor that stores normalized dims.
TfLiteStatus ResizeTempDims(TfLiteContext* context, OpContext* op_context,
                            TfLiteTensor* normalized_dims) {}

// Initializes temp tensors to store index and resolved axis.
TfLiteStatus InitializeTemporaries(TfLiteContext* context, TfLiteNode* node,
                                   OpContext* op_context) {}

TfLiteStatus PrepareSimple(TfLiteContext* context, TfLiteNode* node) {}

TfLiteStatus PrepareAllOrAny(TfLiteContext* context, TfLiteNode* node) {}

TfLiteStatus PrepareMeanOrSum(TfLiteContext* context, TfLiteNode* node) {}

double GetQuantProdScaling(double input_scale, double output_scale,
                           int reduced_axis_size) {}

TfLiteStatus PrepareProd(TfLiteContext* context, TfLiteNode* node) {}

void ResolveAxis(const int* axis_data, int axis_count,
                 tflite::MeanParams* op_params) {}

template <typename T, typename U, KernelType kernel_type>
TfLiteStatus Mean(TfLiteContext* context, const OpContext* op_context,
                  int* temp_index, int* resolved_axis, U* temp_sum) {}

template <typename T, KernelType kernel_type>
TfLiteStatus QuantizedMeanOrSum(TfLiteContext* context,
                                const OpContext& op_context,
                                const OpData* op_data, TfLiteTensor* temp_index,
                                TfLiteTensor* resolved_axis,
                                TfLiteTensor* temp_sum, bool compute_sum) {}

template <typename integer_type>
TfLiteStatus EvalQuantizedMean(TfLiteContext* context,
                               const OpContext& op_context, int num_axis,
                               OpData* data, TfLiteTensor* temp_index,
                               TfLiteTensor* resolved_axis,
                               TfLiteTensor* temp_sum) {}

template <typename T>
void InitializeMeanOutputTyped(TfLiteTensor* output) {}

TfLiteStatus InitializeMeanOutput(TfLiteTensor* output) {}

template <KernelType kernel_type>
TfLiteStatus EvalMean(TfLiteContext* context, TfLiteNode* node) {}

template <typename T>
struct EvalData {};

// Returns true if 'axis' holds all dims [0 ... N-1] where N is num_dims.
bool IsReduceAllDims(const TfLiteTensor* axis, int num_axis, int num_dims) {}

// Worker for reducing single interval. Interval is identified by index
// from [start, end).
template <typename T>
struct ReduceWorkerTask : cpu_backend_threadpool::Task {};

// Apply reduce operation using the 'reducer' function on all of 'input_data'.
// and reduce all to single element.
template <typename T>
void ReduceAllDims(const T* input_data, const int* input_dims,
                   const int input_num_dims, T* output_data, T init_value,
                   T reducer(const T current, const T in),
                   TfLiteContext* context) {}

// The underlying logic for Reduce Sum/Prod/Max/Min/Any
template <typename T, KernelType kernel_type>
TfLiteStatus EvalType(TfLiteContext* context, TfLiteNode* node,
                      OpContext* op_context, ReduceType reduce_type) {}

// The entry point that handles input types and then calls template functions to
// handle ReduceType.
template <KernelType kernel_type, ReduceType reduce_type>
TfLiteStatus EvalGeneric(TfLiteContext* context, TfLiteNode* node) {}

template <KernelType kernel_type>
TfLiteStatus EvalSum(TfLiteContext* context, TfLiteNode* node) {}

template <KernelType kernel_type, typename T>
TfLiteStatus EvalQuantizedProd(TfLiteContext* context, TfLiteNode* node,
                               OpContext* op_context) {}

template <KernelType kernel_type>
TfLiteStatus EvalProd(TfLiteContext* context, TfLiteNode* node) {}

template <KernelType kernel_type>
TfLiteStatus EvalImpl(TfLiteContext* context, TfLiteNode* node) {}

}  // namespace reduce

ReduceType;

TfLiteRegistration* Register_MEAN_OPT() {}

TfLiteRegistration* Register_MEAN_REF() {}

TfLiteRegistration* Register_SUM_REF() {}

TfLiteRegistration* Register_SUM_OPT() {}

TfLiteRegistration* Register_REDUCE_PROD_REF() {}

TfLiteRegistration* Register_REDUCE_PROD_OPT() {}

TfLiteRegistration* Register_REDUCE_MAX_REF() {}

TfLiteRegistration* Register_REDUCE_MAX_OPT() {}

TfLiteRegistration* Register_REDUCE_MIN_REF() {}

TfLiteRegistration* Register_REDUCE_MIN_OPT() {}

TfLiteRegistration* Register_REDUCE_ANY_REF() {}

TfLiteRegistration* Register_REDUCE_ANY_OPT() {}

TfLiteRegistration* Register_REDUCE_ALL_REF() {}

TfLiteRegistration* Register_REDUCE_ALL_OPT() {}

TfLiteRegistration* Register_MEAN() {}

TfLiteRegistration* Register_SUM() {}
TfLiteRegistration* Register_REDUCE_PROD() {}
TfLiteRegistration* Register_REDUCE_MAX() {}
TfLiteRegistration* Register_REDUCE_MIN() {}
TfLiteRegistration* Register_REDUCE_ANY() {}
TfLiteRegistration* Register_REDUCE_ALL() {}

}  // namespace builtin
}  // namespace ops
}  // namespace tflite