#include "tensorflow/lite/delegates/xnnpack/quantization_util.h"
#include <algorithm>
#include "fp16.h"
#include "tensorflow/lite/kernels/internal/optimized/optimized_ops.h"
#include "tensorflow/lite/kernels/internal/reference/dequantize.h"
#include "tensorflow/lite/kernels/internal/runtime_shape.h"
#include "tensorflow/lite/kernels/internal/types.h"
namespace tflite {
namespace xnnpack {
void DequantizeFloat16(const uint16_t *packed_fp16_data,
float *unpacked_fp32_data, size_t tensor_elements) { … }
void DequantizeInt8(const int8_t *packed_s8_data, float *unpacked_fp32_data,
const RuntimeShape &tensor_shape, int32_t zero_point,
double scale) { … }
void PerChannelDequantizeInt8(const int8_t* packed_s8_data,
float* unpacked_fp32_data,
const RuntimeShape& tensor_shape,
const int32_t* zero_points, const float* scales,
int32_t quantized_dimension) { … }
}
}