chromium/third_party/tflite/src/tensorflow/lite/kernels/transpose_conv.cc

/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/

#include <stddef.h>
#include <stdint.h>

#include <vector>

#include "tensorflow/lite/core/c/builtin_op_data.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/kernels/cpu_backend_context.h"
#include "tensorflow/lite/kernels/internal/compatibility.h"
// NOLINTNEXTLINE - This header file shouldn't go to the top.
#include "tensorflow/lite/kernels/internal/optimized/integer_ops/transpose_conv.h"
#include "tensorflow/lite/kernels/internal/optimized/optimized_ops.h"
// NOLINTNEXTLINE - This header file shouldn't go to the top.
#include "tensorflow/lite/kernels/internal/portable_tensor_utils.h"
#include "tensorflow/lite/kernels/internal/reference/integer_ops/transpose_conv.h"
#include "tensorflow/lite/kernels/internal/reference/reference_ops.h"
#include "tensorflow/lite/kernels/internal/reference/transpose_conv.h"
#include "tensorflow/lite/kernels/internal/tensor.h"
#include "tensorflow/lite/kernels/internal/tensor_ctypes.h"
#include "tensorflow/lite/kernels/internal/types.h"
#include "tensorflow/lite/kernels/kernel_util.h"
#include "tensorflow/lite/kernels/padding.h"

namespace tflite {
namespace ops {
namespace builtin {
namespace transpose_conv {

// This file has 2 implementation of TransposeConv.
enum KernelType {};

constexpr int kOutputShapeTensor =;
constexpr int kWeightsTensor =;
constexpr int kDataInputTensor =;
constexpr int kBiasTensor =;
constexpr int kOutputTensor =;

const int kTensorNotAllocated =;

struct OpData {};

void* Init(TfLiteContext* context, const char* buffer, size_t length) {}

void Free(TfLiteContext* context, void* buffer) {}

TfLiteStatus ResizeTensor(TfLiteContext* context,
                          const TfLiteTensor* shape_tensor,
                          TfLiteTensor* tensor_to_resize) {}

// Allocate temporary tensors if necessary.
template <KernelType kernel_type>
static TfLiteStatus AllocateTemporaryTensorsIfRequired(TfLiteContext* context,
                                                       TfLiteType input_type,
                                                       TfLiteType weights_type,
                                                       TfLiteNode* node) {}

TfLiteStatus ResizeCol2ImTensor(TfLiteContext* context,
                                const TfLiteTensor* output_shape,
                                const TfLiteTensor* weights,
                                const TfLiteTensor* input,
                                TfLiteTensor* col2im) {}

TfLiteStatus ResizeAndTransposeWeights(TfLiteContext* context,
                                       const TfLiteTensor* weights,
                                       TfLiteTensor* transposed_weights) {}

template <KernelType kernel_type>
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {}

template <KernelType kernel_type>
void EvalFloat(TfLiteContext* context, const TfLiteTransposeConvParams* params,
               const OpData* data, const TfLiteTensor* input,
               const TfLiteTensor* weights, const TfLiteTensor* bias,
               const TfLiteTensor* transposed_weights, TfLiteTensor* col2im,
               TfLiteTensor* output) {}

template <KernelType kernel_type>
void EvalQuantized(TfLiteContext* context,
                   const TfLiteTransposeConvParams* params, OpData* data,
                   const TfLiteTensor* input, const TfLiteTensor* weights,
                   const TfLiteTensor* transposed_weights,
                   const TfLiteTensor* bias, TfLiteTensor* col2im,
                   TfLiteTensor* output, TfLiteTensor* scratch_buffer) {}

template <KernelType kernel_type>
void EvalQuantizedPerChannel(
    TfLiteContext* context, const TfLiteTransposeConvParams* params,
    OpData* data, const TfLiteTensor* input, const TfLiteTensor* weights,
    const TfLiteTensor* transposed_weights, const TfLiteTensor* bias,
    TfLiteTensor* col2im, TfLiteTensor* output, TfLiteTensor* scratch_buffer) {}

template <KernelType kernel_type>
void EvalQuantizedPerChannel16x8(
    TfLiteContext* context, const TfLiteTransposeConvParams* params,
    OpData* data, const TfLiteTensor* input, const TfLiteTensor* weights,
    const TfLiteTensor* transposed_weights, const TfLiteTensor* bias,
    TfLiteTensor* col2im, TfLiteTensor* output, TfLiteTensor* scratch_buffer) {}

TfLiteStatus EvalHybrid(TfLiteContext* context, TfLiteNode* node,
                        const TfLiteTransposeConvParams* params, OpData* data,
                        const TfLiteTensor* input, const TfLiteTensor* weights,
                        const TfLiteTensor* bias, TfLiteTensor* output) {}

template <KernelType kernel_type>
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {}

}  // namespace transpose_conv

TfLiteRegistration* Register_TRANSPOSECONV_REF() {}

TfLiteRegistration* Register_TRANSPOSECONV_GENERIC_OPT() {}

TfLiteRegistration* Register_TRANSPOSE_CONV() {}

}  // namespace builtin
}  // namespace ops
}  // namespace tflite