#include "tensorflow/lite/kernels/internal/reference/quantize.h"
#include <cstddef>
#include <cstdint>
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/kernels/internal/optimized/optimized_ops.h"
#include "tensorflow/lite/kernels/internal/quantization_util.h"
#include "tensorflow/lite/kernels/internal/reference/reference_ops.h"
#include "tensorflow/lite/kernels/internal/reference/requantize.h"
#include "tensorflow/lite/kernels/internal/tensor.h"
#include "tensorflow/lite/kernels/internal/tensor_ctypes.h"
#include "tensorflow/lite/kernels/internal/types.h"
#include "tensorflow/lite/kernels/kernel_util.h"
namespace tflite {
namespace ops {
namespace builtin {
namespace quantize {
enum KernelType { … };
struct OpData { … };
inline bool IsQuantizedPerChannel(const TfLiteTensor* input) { … }
namespace {
template <KernelType kernel_type, typename output_type>
static inline void AffineQuantize(const tflite::QuantizationParams& op_params,
const RuntimeShape& input_shape,
const float* input_data,
const RuntimeShape& output_shape,
output_type* output_data) { … }
template <KernelType kernel_type, typename input_type, typename output_type>
static inline void Requantize(const input_type* input_data, int32_t size,
int32_t effective_scale_multiplier,
int32_t effective_scale_shift,
int32_t input_zeropoint, int32_t output_zeropoint,
output_type* output_data) { … }
void ReportError(TfLiteContext* context, TfLiteType input_type,
TfLiteType output_type) { … }
}
void* Init(TfLiteContext* context, const char* buffer, size_t length) { … }
void Free(TfLiteContext* context, void* buffer) { … }
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { … }
template <KernelType kernel_type>
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { … }
}
TfLiteRegistration* Register_QUANTIZE_OPT() { … }
TfLiteRegistration* Register_QUANTIZE_REF() { … }
TfLiteRegistration* Register_QUANTIZE() { … }
}
}
}