#include "tensorflow/lite/kernels/internal/reference/sub.h"
#include <stddef.h>
#include <stdint.h>
#include <algorithm>
#include <limits>
#include "tensorflow/lite/core/c/builtin_op_data.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/kernels/internal/compatibility.h"
#include "tensorflow/lite/kernels/internal/optimized/cpu_check.h"
#include "tensorflow/lite/kernels/internal/optimized/integer_ops/sub.h"
#include "tensorflow/lite/kernels/internal/optimized/neon_check.h"
#include "tensorflow/lite/kernels/internal/optimized/optimized_ops.h"
#include "tensorflow/lite/kernels/internal/quantization_util.h"
#include "tensorflow/lite/kernels/internal/reference/add.h"
#include "tensorflow/lite/kernels/internal/reference/integer_ops/add.h"
#include "tensorflow/lite/kernels/internal/reference/process_broadcast_shapes.h"
#include "tensorflow/lite/kernels/internal/reference/reference_ops.h"
#include "tensorflow/lite/kernels/internal/tensor.h"
#include "tensorflow/lite/kernels/internal/tensor_ctypes.h"
#include "tensorflow/lite/kernels/internal/types.h"
#include "tensorflow/lite/kernels/kernel_util.h"
namespace tflite {
namespace ops {
namespace builtin {
namespace sub {
enum KernelType { … };
constexpr int kInputTensor1 = …;
constexpr int kInputTensor2 = …;
constexpr int kOutputTensor = …;
struct OpData { … };
void* Init(TfLiteContext* context, const char* buffer, size_t length) { … }
void Free(TfLiteContext* context, void* buffer) { … }
TfLiteStatus PrepareGeneralSubOp(TfLiteContext* context,
const TfLiteTensor* input_1,
const TfLiteTensor* input_2,
TfLiteTensor* output, TfLiteSubParams* params,
OpData* op_params) { … }
TfLiteStatus PrepareInt16SubOpPOT(TfLiteContext* context,
const TfLiteTensor* input1,
const TfLiteTensor* input2,
TfLiteTensor* output, TfLiteSubParams* params,
OpData* data) { … }
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { … }
template <KernelType kernel_type, typename data_type>
void EvalSubImpl(TfLiteContext* context, TfLiteNode* node,
TfLiteSubParams* params, const OpData* data,
const TfLiteTensor* input1, const TfLiteTensor* input2,
bool requires_broadcast, TfLiteTensor* output) { … }
template <KernelType kernel_type>
void EvalSub(TfLiteContext* context, TfLiteNode* node, TfLiteSubParams* params,
const OpData* data, const TfLiteTensor* input1,
const TfLiteTensor* input2, TfLiteTensor* output) { … }
template <KernelType kernel_type>
void EvalQuantized(TfLiteContext* context, TfLiteNode* node,
TfLiteSubParams* params, const OpData* data,
const TfLiteTensor* input1, const TfLiteTensor* input2,
TfLiteTensor* output) { … }
template <KernelType kernel_type>
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { … }
}
TfLiteRegistration* Register_SUB_REF() { … }
TfLiteRegistration* Register_SUB_GENERIC_OPT() { … }
TfLiteRegistration* Register_SUB_NEON_OPT() { … }
TfLiteRegistration* Register_SUB() { … }
}
}
}