#ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_OPTIMIZED_INTEGER_OPS_FULLY_CONNECTED_H_
#define TENSORFLOW_LITE_KERNELS_INTERNAL_OPTIMIZED_INTEGER_OPS_FULLY_CONNECTED_H_
#include "ruy/profiler/instrumentation.h"
#include "tensorflow/lite/kernels/cpu_backend_context.h"
#include "tensorflow/lite/kernels/cpu_backend_gemm.h"
#include "tensorflow/lite/kernels/cpu_backend_gemm_params.h"
#include "tensorflow/lite/kernels/internal/common.h"
#include "tensorflow/lite/kernels/internal/compatibility.h"
#include "tensorflow/lite/kernels/internal/reference/integer_ops/fully_connected.h"
#include "tensorflow/lite/kernels/internal/types.h"
namespace tflite {
namespace optimized_integer_ops {
template <typename InputScalar, typename DstScalar>
inline void FullyConnectedPerChannel(
const FullyConnectedParams& params, const int32_t* output_multiplier,
const int* output_shift, const RuntimeShape& input_shape,
const InputScalar* input_data, const RuntimeShape& filter_shape,
const int8_t* filter_data, const RuntimeShape& bias_shape,
const int32_t* bias_data, const RuntimeShape& output_shape,
DstScalar* output_data, CpuBackendContext* cpu_backend_context) { … }
template <typename InputScalar, typename DstScalar>
inline void FullyConnected(
const FullyConnectedParams& params, const RuntimeShape& input_shape,
const InputScalar* input_data, const RuntimeShape& filter_shape,
const int8_t* filter_data, const RuntimeShape& bias_shape,
const int32_t* bias_data, const RuntimeShape& output_shape,
DstScalar* output_data, CpuBackendContext* cpu_backend_context) { … }
}
}
#endif