#ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_OPTIMIZED_INTEGER_OPS_ADD_H_
#define TENSORFLOW_LITE_KERNELS_INTERNAL_OPTIMIZED_INTEGER_OPS_ADD_H_
#include <algorithm>
#include "fixedpoint/fixedpoint.h"
#include "ruy/profiler/instrumentation.h"
#include "tensorflow/lite/kernels/internal/common.h"
#include "tensorflow/lite/kernels/internal/compatibility.h"
#include "tensorflow/lite/kernels/internal/optimized/avx2_quantization_utils.h"
#include "tensorflow/lite/kernels/internal/optimized/cpu_check.h"
#include "tensorflow/lite/kernels/internal/optimized/neon_check.h"
#include "tensorflow/lite/kernels/internal/optimized/optimized_ops.h"
#include "tensorflow/lite/kernels/internal/reference/integer_ops/add.h"
#include "tensorflow/lite/kernels/internal/types.h"
namespace tflite {
namespace optimized_integer_ops {
inline void AddElementwiseInt8(int size, const ArithmeticParams& params,
const int8* input1_data, const int8* input2_data,
int8* output_data) { … }
inline void AddElementwiseInt16(int size, const ArithmeticParams& params,
const int16* input1_data,
const int16* input2_data, int16* output_data) { … }
inline void AddScalarBroadcast(int size, const ArithmeticParams& params,
int8 input1_data, const int8* input2_data,
int8* output_data) { … }
inline void Add(const ArithmeticParams& params,
const RuntimeShape& input1_shape, const int8* input1_data,
const RuntimeShape& input2_shape, const int8* input2_data,
const RuntimeShape& output_shape, int8* output_data) { … }
inline void Add(const ArithmeticParams& params,
const RuntimeShape& input1_shape, const int16* input1_data,
const RuntimeShape& input2_shape, const int16* input2_data,
const RuntimeShape& output_shape, int16* output_data) { … }
inline void BroadcastAddDispatch(const ArithmeticParams& params,
const RuntimeShape& input1_shape,
const int8* input1_data,
const RuntimeShape& input2_shape,
const int8* input2_data,
const RuntimeShape& output_shape,
int8* output_data) { … }
}
}
#endif