#ifndef TENSORFLOW_LITE_KERNELS_DEQUANTIZE_H_
#define TENSORFLOW_LITE_KERNELS_DEQUANTIZE_H_
#include <stdint.h>
#include <memory>
#include "Eigen/Core"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/kernels/internal/optimized/optimized_ops.h"
#include "tensorflow/lite/kernels/internal/portable_tensor_utils.h"
#include "tensorflow/lite/kernels/internal/reference/dequantize.h"
#include "tensorflow/lite/kernels/internal/reference/integer_ops/dequantize.h"
#include "tensorflow/lite/kernels/internal/reference/reference_ops.h"
#include "tensorflow/lite/kernels/internal/tensor.h"
#include "tensorflow/lite/kernels/internal/tensor_ctypes.h"
#include "tensorflow/lite/kernels/internal/types.h"
namespace tflite {
namespace ops {
namespace builtin {
namespace dequantize {
enum KernelType { … };
inline bool IsQuantizedPerChannel(const TfLiteTensor* input) { … }
inline TfLiteStatus PerChannelDequantizeImpl(TfLiteContext* context,
TfLiteNode* node,
const TfLiteTensor* input,
TfLiteTensor* output) { … }
template <KernelType kernel_type>
TfLiteStatus DequantizeImpl(TfLiteContext* context, TfLiteNode* node,
const TfLiteTensor* input, TfLiteTensor* output) { … }
}
}
}
}
#endif