chromium/third_party/tflite/src/tensorflow/lite/kernels/pooling.cc

/* Copyright 2017 The TensorFlow Authors. All Rights Reserved.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#include "tensorflow/lite/kernels/internal/optimized/integer_ops/pooling.h"

#include <stddef.h>
#include <stdint.h>

#include <cstdlib>

#include "tensorflow/lite/core/c/builtin_op_data.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/kernels/internal/compatibility.h"
#include "tensorflow/lite/kernels/internal/optimized/optimized_ops.h"
#include "tensorflow/lite/kernels/internal/reference/integer_ops/pooling.h"
#include "tensorflow/lite/kernels/internal/reference/pooling.h"
#include "tensorflow/lite/kernels/internal/reference/reference_ops.h"
#include "tensorflow/lite/kernels/internal/tensor.h"
#include "tensorflow/lite/kernels/internal/tensor_ctypes.h"
#include "tensorflow/lite/kernels/internal/types.h"
#include "tensorflow/lite/kernels/kernel_util.h"
#include "tensorflow/lite/kernels/padding.h"

namespace tflite {
namespace ops {
namespace builtin {
namespace pooling {

// This file has two implementation of each pooling op.
enum KernelType {};

enum PoolType {};

struct OpData {};

void* Init(TfLiteContext* context, const char* buffer, size_t length) {}

void Free(TfLiteContext* context, void* buffer) {}

template <PoolType pool_type>
TfLiteStatus GenericPrepare(TfLiteContext* context, TfLiteNode* node) {}

template <KernelType kernel_type>
TfLiteStatus AverageEvalFloat(TfLiteContext* context, TfLiteNode* node,
                              TfLitePoolParams* params, OpData* data,
                              const TfLiteTensor* input, TfLiteTensor* output) {}

template <KernelType kernel_type>
TfLiteStatus AverageEvalQuantizedUint8(TfLiteContext* context, TfLiteNode* node,
                                       TfLitePoolParams* params, OpData* data,
                                       const TfLiteTensor* input,
                                       TfLiteTensor* output) {}

template <KernelType kernel_type>
TfLiteStatus AverageEvalQuantizedInt8(TfLiteContext* context, TfLiteNode* node,
                                      TfLitePoolParams* params, OpData* data,
                                      const TfLiteTensor* input,
                                      TfLiteTensor* output) {}

template <KernelType kernel_type>
TfLiteStatus AverageEvalQuantizedInt16(TfLiteContext* context, TfLiteNode* node,
                                       TfLitePoolParams* params, OpData* data,
                                       const TfLiteTensor* input,
                                       TfLiteTensor* output) {}

template <KernelType kernel_type>
void MaxEvalFloat(TfLiteContext* context, TfLiteNode* node,
                  TfLitePoolParams* params, OpData* data,
                  const TfLiteTensor* input, TfLiteTensor* output) {}

template <KernelType kernel_type>
void MaxEvalQuantizedUInt8(TfLiteContext* context, TfLiteNode* node,
                           TfLitePoolParams* params, OpData* data,
                           const TfLiteTensor* input, TfLiteTensor* output) {}

template <KernelType kernel_type>
void MaxEvalQuantizedInt8(TfLiteContext* context, TfLiteNode* node,
                          TfLitePoolParams* params, OpData* data,
                          const TfLiteTensor* input, TfLiteTensor* output) {}

template <KernelType kernel_type>
void MaxEvalQuantizedInt16(TfLiteContext* context, TfLiteNode* node,
                           TfLitePoolParams* params, OpData* data,
                           const TfLiteTensor* input, TfLiteTensor* output) {}

template <KernelType kernel_type>
void L2EvalFloat(TfLiteContext* context, TfLiteNode* node,
                 TfLitePoolParams* params, OpData* data,
                 const TfLiteTensor* input, TfLiteTensor* output) {}

#undef TF_LITE_KERNEL_TYPE_DISPATCH

template <KernelType kernel_type>
TfLiteStatus AverageEval(TfLiteContext* context, TfLiteNode* node) {}

template <KernelType kernel_type>
TfLiteStatus MaxEval(TfLiteContext* context, TfLiteNode* node) {}

template <KernelType kernel_type>
TfLiteStatus L2Eval(TfLiteContext* context, TfLiteNode* node) {}

}  // namespace pooling

TfLiteRegistration* Register_AVERAGE_POOL_REF() {}

TfLiteRegistration* Register_MAX_POOL_REF() {}

TfLiteRegistration* Register_L2_POOL_REF() {}

TfLiteRegistration* Register_AVERAGE_POOL_GENERIC_OPT() {}

TfLiteRegistration* Register_MAX_POOL_GENERIC_OPT() {}

TfLiteRegistration* Register_L2_POOL_GENERIC_OPT() {}

TfLiteRegistration* Register_AVERAGE_POOL_2D() {}

TfLiteRegistration* Register_MAX_POOL_2D() {}

TfLiteRegistration* Register_L2_POOL_2D() {}

}  // namespace builtin
}  // namespace ops
}  // namespace tflite