#include "tensorflow/lite/kernels/internal/reference/reduce.h"
#include <stddef.h>
#include <cstdint>
#include <limits>
#include "ruy/profiler/instrumentation.h"
#include "tensorflow/lite/core/c/builtin_op_data.h"
#include "tensorflow/lite/core/c/c_api_types.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/kernels/cpu_backend_context.h"
#include "tensorflow/lite/kernels/internal/compatibility.h"
#include "tensorflow/lite/kernels/internal/optimized/integer_ops/mean.h"
#include "tensorflow/lite/kernels/internal/optimized/neon_check.h"
#include "tensorflow/lite/kernels/internal/optimized/optimized_ops.h"
#include "tensorflow/lite/kernels/internal/optimized/reduce.h"
#include "tensorflow/lite/kernels/internal/quantization_util.h"
#include "tensorflow/lite/kernels/internal/reference/reference_ops.h"
#include "tensorflow/lite/kernels/internal/tensor.h"
#include "tensorflow/lite/kernels/internal/tensor_ctypes.h"
#include "tensorflow/lite/kernels/internal/types.h"
#include "tensorflow/lite/kernels/kernel_util.h"
namespace tflite {
namespace ops {
namespace builtin {
namespace reduce {
const int kMaxConstantOutputTensorSize = …;
enum KernelType { … };
struct OpData { … };
struct OpContext { … };
void* Init(TfLiteContext* context, const char* buffer, size_t length) { … }
void Free(TfLiteContext* context, void* buffer) { … }
template <KernelType kernel_type>
TfLiteStatus EvalImpl(TfLiteContext* context, TfLiteNode* node);
TfLiteStatus ResizeTempAxis(TfLiteContext* context, OpContext* op_context,
TfLiteTensor* resolved_axis) { … }
TfLiteStatus ResizeTempAccum(TfLiteContext* context, OpContext* op_context,
TfLiteTensor* temp_accum) { … }
TfLiteStatus GetOutputShape(TfLiteContext* context, OpContext* op_context,
TfLiteIntArray** output_shape) { … }
TfLiteStatus ResizeOutputTensor(TfLiteContext* context, OpContext* op_context) { … }
TfLiteStatus ResizeTempDims(TfLiteContext* context, OpContext* op_context,
TfLiteTensor* normalized_dims) { … }
TfLiteStatus InitializeTemporaries(TfLiteContext* context, TfLiteNode* node,
OpContext* op_context) { … }
TfLiteStatus PrepareSimple(TfLiteContext* context, TfLiteNode* node) { … }
TfLiteStatus PrepareAllOrAny(TfLiteContext* context, TfLiteNode* node) { … }
TfLiteStatus PrepareMeanOrSum(TfLiteContext* context, TfLiteNode* node) { … }
double GetQuantProdScaling(double input_scale, double output_scale,
int reduced_axis_size) { … }
TfLiteStatus PrepareProd(TfLiteContext* context, TfLiteNode* node) { … }
void ResolveAxis(const int* axis_data, int axis_count,
tflite::MeanParams* op_params) { … }
template <typename T, typename U, KernelType kernel_type>
TfLiteStatus Mean(TfLiteContext* context, const OpContext* op_context,
int* temp_index, int* resolved_axis, U* temp_sum) { … }
template <typename T, KernelType kernel_type>
TfLiteStatus QuantizedMeanOrSum(TfLiteContext* context,
const OpContext& op_context,
const OpData* op_data, TfLiteTensor* temp_index,
TfLiteTensor* resolved_axis,
TfLiteTensor* temp_sum, bool compute_sum) { … }
template <typename integer_type>
TfLiteStatus EvalQuantizedMean(TfLiteContext* context,
const OpContext& op_context, int num_axis,
OpData* data, TfLiteTensor* temp_index,
TfLiteTensor* resolved_axis,
TfLiteTensor* temp_sum) { … }
template <typename T>
void InitializeMeanOutputTyped(TfLiteTensor* output) { … }
TfLiteStatus InitializeMeanOutput(TfLiteTensor* output) { … }
template <KernelType kernel_type>
TfLiteStatus EvalMean(TfLiteContext* context, TfLiteNode* node) { … }
template <typename T>
struct EvalData { … };
bool IsReduceAllDims(const TfLiteTensor* axis, int num_axis, int num_dims) { … }
template <typename T>
struct ReduceWorkerTask : cpu_backend_threadpool::Task { … };
template <typename T>
void ReduceAllDims(const T* input_data, const int* input_dims,
const int input_num_dims, T* output_data, T init_value,
T reducer(const T current, const T in),
TfLiteContext* context) { … }
template <typename T, KernelType kernel_type>
TfLiteStatus EvalType(TfLiteContext* context, TfLiteNode* node,
OpContext* op_context, ReduceType reduce_type) { … }
template <KernelType kernel_type, ReduceType reduce_type>
TfLiteStatus EvalGeneric(TfLiteContext* context, TfLiteNode* node) { … }
template <KernelType kernel_type>
TfLiteStatus EvalSum(TfLiteContext* context, TfLiteNode* node) { … }
template <KernelType kernel_type, typename T>
TfLiteStatus EvalQuantizedProd(TfLiteContext* context, TfLiteNode* node,
OpContext* op_context) { … }
template <KernelType kernel_type>
TfLiteStatus EvalProd(TfLiteContext* context, TfLiteNode* node) { … }
template <KernelType kernel_type>
TfLiteStatus EvalImpl(TfLiteContext* context, TfLiteNode* node) { … }
}
ReduceType;
TfLiteRegistration* Register_MEAN_OPT() { … }
TfLiteRegistration* Register_MEAN_REF() { … }
TfLiteRegistration* Register_SUM_REF() { … }
TfLiteRegistration* Register_SUM_OPT() { … }
TfLiteRegistration* Register_REDUCE_PROD_REF() { … }
TfLiteRegistration* Register_REDUCE_PROD_OPT() { … }
TfLiteRegistration* Register_REDUCE_MAX_REF() { … }
TfLiteRegistration* Register_REDUCE_MAX_OPT() { … }
TfLiteRegistration* Register_REDUCE_MIN_REF() { … }
TfLiteRegistration* Register_REDUCE_MIN_OPT() { … }
TfLiteRegistration* Register_REDUCE_ANY_REF() { … }
TfLiteRegistration* Register_REDUCE_ANY_OPT() { … }
TfLiteRegistration* Register_REDUCE_ALL_REF() { … }
TfLiteRegistration* Register_REDUCE_ALL_OPT() { … }
TfLiteRegistration* Register_MEAN() { … }
TfLiteRegistration* Register_SUM() { … }
TfLiteRegistration* Register_REDUCE_PROD() { … }
TfLiteRegistration* Register_REDUCE_MAX() { … }
TfLiteRegistration* Register_REDUCE_MIN() { … }
TfLiteRegistration* Register_REDUCE_ANY() { … }
TfLiteRegistration* Register_REDUCE_ALL() { … }
}
}
}