#include <stddef.h>
#include <stdint.h>
#include <vector>
#include "tensorflow/lite/core/c/builtin_op_data.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/kernels/cpu_backend_context.h"
#include "tensorflow/lite/kernels/internal/compatibility.h"
#include "tensorflow/lite/kernels/internal/optimized/integer_ops/transpose_conv.h"
#include "tensorflow/lite/kernels/internal/optimized/optimized_ops.h"
#include "tensorflow/lite/kernels/internal/portable_tensor_utils.h"
#include "tensorflow/lite/kernels/internal/reference/integer_ops/transpose_conv.h"
#include "tensorflow/lite/kernels/internal/reference/reference_ops.h"
#include "tensorflow/lite/kernels/internal/reference/transpose_conv.h"
#include "tensorflow/lite/kernels/internal/tensor.h"
#include "tensorflow/lite/kernels/internal/tensor_ctypes.h"
#include "tensorflow/lite/kernels/internal/types.h"
#include "tensorflow/lite/kernels/kernel_util.h"
#include "tensorflow/lite/kernels/padding.h"
namespace tflite {
namespace ops {
namespace builtin {
namespace transpose_conv {
enum KernelType { … };
constexpr int kOutputShapeTensor = …;
constexpr int kWeightsTensor = …;
constexpr int kDataInputTensor = …;
constexpr int kBiasTensor = …;
constexpr int kOutputTensor = …;
const int kTensorNotAllocated = …;
struct OpData { … };
void* Init(TfLiteContext* context, const char* buffer, size_t length) { … }
void Free(TfLiteContext* context, void* buffer) { … }
TfLiteStatus ResizeTensor(TfLiteContext* context,
const TfLiteTensor* shape_tensor,
TfLiteTensor* tensor_to_resize) { … }
template <KernelType kernel_type>
static TfLiteStatus AllocateTemporaryTensorsIfRequired(TfLiteContext* context,
TfLiteType input_type,
TfLiteType weights_type,
TfLiteNode* node) { … }
TfLiteStatus ResizeCol2ImTensor(TfLiteContext* context,
const TfLiteTensor* output_shape,
const TfLiteTensor* weights,
const TfLiteTensor* input,
TfLiteTensor* col2im) { … }
TfLiteStatus ResizeAndTransposeWeights(TfLiteContext* context,
const TfLiteTensor* weights,
TfLiteTensor* transposed_weights) { … }
template <KernelType kernel_type>
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { … }
template <KernelType kernel_type>
void EvalFloat(TfLiteContext* context, const TfLiteTransposeConvParams* params,
const OpData* data, const TfLiteTensor* input,
const TfLiteTensor* weights, const TfLiteTensor* bias,
const TfLiteTensor* transposed_weights, TfLiteTensor* col2im,
TfLiteTensor* output) { … }
template <KernelType kernel_type>
void EvalQuantized(TfLiteContext* context,
const TfLiteTransposeConvParams* params, OpData* data,
const TfLiteTensor* input, const TfLiteTensor* weights,
const TfLiteTensor* transposed_weights,
const TfLiteTensor* bias, TfLiteTensor* col2im,
TfLiteTensor* output, TfLiteTensor* scratch_buffer) { … }
template <KernelType kernel_type>
void EvalQuantizedPerChannel(
TfLiteContext* context, const TfLiteTransposeConvParams* params,
OpData* data, const TfLiteTensor* input, const TfLiteTensor* weights,
const TfLiteTensor* transposed_weights, const TfLiteTensor* bias,
TfLiteTensor* col2im, TfLiteTensor* output, TfLiteTensor* scratch_buffer) { … }
template <KernelType kernel_type>
void EvalQuantizedPerChannel16x8(
TfLiteContext* context, const TfLiteTransposeConvParams* params,
OpData* data, const TfLiteTensor* input, const TfLiteTensor* weights,
const TfLiteTensor* transposed_weights, const TfLiteTensor* bias,
TfLiteTensor* col2im, TfLiteTensor* output, TfLiteTensor* scratch_buffer) { … }
TfLiteStatus EvalHybrid(TfLiteContext* context, TfLiteNode* node,
const TfLiteTransposeConvParams* params, OpData* data,
const TfLiteTensor* input, const TfLiteTensor* weights,
const TfLiteTensor* bias, TfLiteTensor* output) { … }
template <KernelType kernel_type>
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { … }
}
TfLiteRegistration* Register_TRANSPOSECONV_REF() { … }
TfLiteRegistration* Register_TRANSPOSECONV_GENERIC_OPT() { … }
TfLiteRegistration* Register_TRANSPOSE_CONV() { … }
}
}
}