#include "tensorflow/lite/kernels/internal/optimized/integer_ops/conv.h"
#include <stddef.h>
#include <cstdint>
#include <memory>
#include <vector>
#if !defined(TFLITE_WITH_RUY)
#define TFLITE_WITH_MULTITHREADED_EIGEN
#endif
#include "tensorflow/lite/core/c/builtin_op_data.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/kernels/cpu_backend_context.h"
#if defined(TFLITE_WITH_MULTITHREADED_EIGEN)
#include "tensorflow/lite/kernels/eigen_support.h"
#endif
#include "tensorflow/lite/kernels/internal/compatibility.h"
#include "tensorflow/lite/kernels/internal/types.h"
#if defined(TFLITE_WITH_MULTITHREADED_EIGEN)
#include "tensorflow/lite/kernels/internal/optimized/multithreaded_conv.h"
#endif
#include "tensorflow/lite/kernels/internal/optimized/optimized_ops.h"
#include "tensorflow/lite/kernels/internal/quantization_util.h"
#include "tensorflow/lite/kernels/internal/reference/conv.h"
#include "tensorflow/lite/kernels/internal/reference/integer_ops/conv.h"
#include "tensorflow/lite/kernels/internal/tensor.h"
#include "tensorflow/lite/kernels/internal/tensor_ctypes.h"
#include "tensorflow/lite/kernels/internal/tensor_utils.h"
#include "tensorflow/lite/kernels/kernel_util.h"
#include "tensorflow/lite/kernels/padding.h"
#include "tensorflow/lite/util.h"
namespace tflite {
namespace ops {
namespace builtin {
namespace conv {
enum KernelType { … };
const int kTensorNotAllocated = …;
static constexpr size_t kMaxIm2colBufferSizeMobile = …;
struct OpData { … };
inline PaddingType RuntimePaddingType(TfLitePadding padding) { … }
void* Init(TfLiteContext* context, const char* buffer, size_t length) { … }
void Free(TfLiteContext* context, void* buffer) { … }
void TransposeFloatTensor(const TfLiteTensor* input, TfLiteTensor* output) { … }
bool IsIm2ColRequired(const TfLiteTensor* input, TfLiteConvParams* params,
const TfLiteTensor* filter, OpData* data, bool is_hybrid,
KernelType kernel_type) { … }
static TfLiteStatus AllocateTemporaryTensorsIfRequired(
TfLiteContext* context, TfLiteNode* node, bool is_hybrid,
bool is_per_channel, KernelType kernel_type, size_t im2col_bytes) { … }
TfLiteStatus Prepare(KernelType kernel_type, TfLiteContext* context,
TfLiteNode* node) { … }
template <KernelType kernel_type>
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { … }
template <KernelType kernel_type>
void EvalQuantized(TfLiteContext* context, TfLiteNode* node,
TfLiteConvParams* params, OpData* data,
const TfLiteTensor* input, const TfLiteTensor* filter,
const TfLiteTensor* bias, TfLiteTensor* im2col,
TfLiteTensor* output) { … }
template <KernelType kernel_type>
void EvalQuantizedPerChannel(TfLiteContext* context, TfLiteNode* node,
TfLiteConvParams* params, OpData* data,
const TfLiteTensor* input,
const TfLiteTensor* filter,
const TfLiteTensor* bias, TfLiteTensor* output,
TfLiteTensor* im2col) { … }
template <KernelType kernel_type>
void EvalQuantizedPerChannel16x8(TfLiteContext* context, TfLiteNode* node,
TfLiteConvParams* params, OpData* data,
const TfLiteTensor* input,
const TfLiteTensor* filter,
const TfLiteTensor* bias, TfLiteTensor* output,
TfLiteTensor* im2col) { … }
template <KernelType kernel_type>
void EvalFloat(TfLiteContext* context, TfLiteNode* node,
TfLiteConvParams* params, OpData* data,
const TfLiteTensor* input, const TfLiteTensor* filter,
const TfLiteTensor* bias, TfLiteTensor* im2col,
TfLiteTensor* hwcn_weights, TfLiteTensor* output) { … }
template <KernelType kernel_type>
TfLiteStatus EvalHybridPerChannel(TfLiteContext* context, TfLiteNode* node,
TfLiteConvParams* params, OpData* data,
const TfLiteTensor* input,
const TfLiteTensor* filter,
const TfLiteTensor* bias,
TfLiteTensor* im2col, TfLiteTensor* output) { … }
template <KernelType kernel_type>
TfLiteStatus EvalHybrid(TfLiteContext* context, TfLiteNode* node,
TfLiteConvParams* params, OpData* data,
const TfLiteTensor* input, const TfLiteTensor* filter,
const TfLiteTensor* bias, TfLiteTensor* im2col,
TfLiteTensor* accum_scratch, TfLiteTensor* output) { … }
template <KernelType kernel_type, TfLiteType input_type>
TfLiteStatus EvalImpl(TfLiteContext* context, TfLiteNode* node) { … }
template <KernelType kernel_type>
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { … }
}
TfLiteRegistration* Register_CONVOLUTION_REF() { … }
TfLiteRegistration* Register_CONVOLUTION_GENERIC_OPT() { … }
TfLiteRegistration* Register_CONVOLUTION_GENERIC_OPT_UINT8() { … }
TfLiteRegistration* Register_CONVOLUTION_MULTITHREADED_OPT() { … }
TfLiteRegistration* Register_CONVOLUTION_CBLAS_OPT() { … }
TfLiteRegistration* Register_CONV_2D() { … }
TfLiteRegistration* Register_CONV_2D_UINT8() { … }
}
}
}