#include <stddef.h>
#include <algorithm>
#include <cmath>
#include <cstdint>
#include <functional>
#include <limits>
#include "tensorflow/lite/core/c/builtin_op_data.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/kernels/cpu_backend_context.h"
#include "tensorflow/lite/kernels/internal/common.h"
#include "tensorflow/lite/kernels/internal/compatibility.h"
#include "tensorflow/lite/kernels/internal/cppmath.h"
#include "tensorflow/lite/kernels/internal/optimized/integer_ops/leaky_relu.h"
#include "tensorflow/lite/kernels/internal/optimized/integer_ops/lut.h"
#include "tensorflow/lite/kernels/internal/optimized/optimized_ops.h"
#include "tensorflow/lite/kernels/internal/quantization_util.h"
#include "tensorflow/lite/kernels/internal/reference/binary_function.h"
#include "tensorflow/lite/kernels/internal/reference/gelu.h"
#include "tensorflow/lite/kernels/internal/reference/integer_ops/log_softmax.h"
#include "tensorflow/lite/kernels/internal/reference/integer_ops/logistic.h"
#include "tensorflow/lite/kernels/internal/reference/integer_ops/lut.h"
#include "tensorflow/lite/kernels/internal/reference/integer_ops/tanh.h"
#include "tensorflow/lite/kernels/internal/reference/logistic.h"
#include "tensorflow/lite/kernels/internal/reference/prelu.h"
#include "tensorflow/lite/kernels/internal/reference/reference_ops.h"
#include "tensorflow/lite/kernels/internal/reference/softmax.h"
#include "tensorflow/lite/kernels/internal/reference/tanh.h"
#include "tensorflow/lite/kernels/internal/tensor.h"
#include "tensorflow/lite/kernels/internal/tensor_ctypes.h"
#include "tensorflow/lite/kernels/internal/types.h"
#include "tensorflow/lite/kernels/kernel_util.h"
#ifdef TFLITE_KERNEL_USE_XNNPACK
#include "xnnpack.h"
#include "tensorflow/lite/logger.h"
#include "tensorflow/lite/minimal_logging.h"
#include "pthreadpool.h"
#endif
namespace tflite {
namespace ops {
namespace builtin {
namespace activations {
enum KernelType { … };
struct OpData { … };
struct SoftmaxOpData { … };
struct LogSoftmaxOpData : public OpData { … };
struct LeakyReluOpData : public OpData { … };
struct PreluOpData : public OpData { … };
struct HardSwishData { … };
struct ReluOpData : public OpData { … };
namespace {
template <typename T>
void QuantizedReluX(float act_min, float act_max, const TfLiteTensor* input,
TfLiteTensor* output, const ReluOpData* data) { … }
}
void* Init(TfLiteContext* context, const char* buffer, size_t length) { … }
void* SoftmaxInit(TfLiteContext* context, const char* buffer, size_t length) { … }
void SoftmaxFree(TfLiteContext* context, void* buffer) { … }
void* LogSoftmaxInit(TfLiteContext* context, const char* buffer,
size_t length) { … }
void* PreluInit(TfLiteContext* context, const char* buffer, size_t length) { … }
void Free(TfLiteContext* context, void* buffer) { … }
void LogSoftmaxFree(TfLiteContext* context, void* buffer) { … }
void PreluFree(TfLiteContext* context, void* buffer) { … }
void* HardSwishInit(TfLiteContext* context, const char* buffer, size_t length) { … }
TfLiteStatus GenericPrepare(TfLiteContext* context, TfLiteNode* node) { … }
void* ReluInit(TfLiteContext* context, const char* buffer, size_t length) { … }
void ReluFree(TfLiteContext* context, void* buffer) { … }
TfLiteStatus ReluPrepare(TfLiteContext* context, TfLiteNode* node) { … }
void* LeakyReluInit(TfLiteContext* context, const char* buffer, size_t length) { … }
void LeakyReluFree(TfLiteContext* context, void* buffer) { … }
void HardSwishFree(TfLiteContext* context, void* buffer) { … }
TfLiteStatus HardSwishPrepare(TfLiteContext* context, TfLiteNode* node) { … }
TfLiteStatus LeakyReluPrepare(TfLiteContext* context, TfLiteNode* node) { … }
template <KernelType kernel_type>
TfLiteStatus TanhPrepare(TfLiteContext* context, TfLiteNode* node) { … }
template <KernelType kernel_type>
TfLiteStatus SigmoidPrepare(TfLiteContext* context, TfLiteNode* node) { … }
template <KernelType kernel_type>
TfLiteStatus SoftmaxPrepare(TfLiteContext* context, TfLiteNode* node) { … }
template <KernelType kernel_type>
TfLiteStatus LogSoftmaxPrepare(TfLiteContext* context, TfLiteNode* node) { … }
TfLiteStatus PreluPrepare(TfLiteContext* context, TfLiteNode* node) { … }
TfLiteStatus ReluEval(TfLiteContext* context, TfLiteNode* node) { … }
TfLiteStatus Relu1Eval(TfLiteContext* context, TfLiteNode* node) { … }
template <KernelType kernel_type>
TfLiteStatus HardSwishEval(TfLiteContext* context, TfLiteNode* node) { … }
TfLiteStatus Relu0to1Eval(TfLiteContext* context, TfLiteNode* node) { … }
TfLiteStatus Relu6Eval(TfLiteContext* context, TfLiteNode* node) { … }
template <KernelType kernel_type>
TfLiteStatus TanhEval(TfLiteContext* context, TfLiteNode* node) { … }
template <KernelType kernel_type>
TfLiteStatus SigmoidEval(TfLiteContext* context, TfLiteNode* node) { … }
TfLiteStatus SoftmaxFloat(TfLiteContext* context, const TfLiteTensor* input,
TfLiteTensor* output, TfLiteSoftmaxParams* params,
KernelType kernel_type = kGenericOptimized) { … }
template <typename In, typename Out>
TfLiteStatus SoftmaxQuantized(TfLiteContext* context, const TfLiteTensor* input,
TfLiteTensor* output, SoftmaxOpData* data,
KernelType kernel_type = kGenericOptimized) { … }
template <>
TfLiteStatus SoftmaxQuantized<int8_t, int8_t>(TfLiteContext* context,
const TfLiteTensor* input,
TfLiteTensor* output,
SoftmaxOpData* data,
KernelType kernel_type) { … }
template <>
TfLiteStatus SoftmaxQuantized<uint8_t, uint8_t>(TfLiteContext* context,
const TfLiteTensor* input,
TfLiteTensor* output,
SoftmaxOpData* data,
KernelType kernel_type) { … }
template <>
TfLiteStatus SoftmaxQuantized<int16, int16>(TfLiteContext* context,
const TfLiteTensor* input,
TfLiteTensor* output,
SoftmaxOpData* data,
KernelType kernel_type) { … }
template <KernelType kernel_type>
TfLiteStatus SoftmaxEval(TfLiteContext* context, TfLiteNode* node) { … }
template <KernelType kernel_type>
TfLiteStatus LogSoftmaxEval(TfLiteContext* context, TfLiteNode* node) { … }
template <typename T>
T ApplyPrelu(T input, T alpha) { … }
template <KernelType kernel_type>
TfLiteStatus PreluEval(TfLiteContext* context, TfLiteNode* node) { … }
template <KernelType kernel_type, typename T>
void QuantizeLeakyRelu(const TfLiteTensor* input, TfLiteTensor* output,
const LeakyReluOpData* data) { … }
template <KernelType kernel_type>
TfLiteStatus LeakyReluEval(TfLiteContext* context, TfLiteNode* node) { … }
TfLiteStatus EluPrepare(TfLiteContext* context, TfLiteNode* node) { … }
TfLiteStatus EluEval(TfLiteContext* context, TfLiteNode* node) { … }
TfLiteStatus GeluPrepare(TfLiteContext* context, TfLiteNode* node) { … }
TfLiteStatus GeluEval(TfLiteContext* context, TfLiteNode* node) { … }
}
TfLiteRegistration* Register_ELU() { … }
TfLiteRegistration* Register_RELU() { … }
TfLiteRegistration* Register_RELU_N1_TO_1() { … }
TfLiteRegistration* Register_RELU6() { … }
TfLiteRegistration* Register_RELU_0_TO_1() { … }
TfLiteRegistration* Register_TANH_REF() { … }
TfLiteRegistration* Register_TANH_GENERIC_OPT() { … }
TfLiteRegistration* Register_TANH_FIXED_POINT_OPT() { … }
TfLiteRegistration* Register_TANH() { … }
TfLiteRegistration* Register_LOGISTIC_REF() { … }
TfLiteRegistration* Register_LOGISTIC_GENERIC_OPT() { … }
TfLiteRegistration* Register_LOGISTIC_FIXED_POINT_OPT() { … }
TfLiteRegistration* Register_LOGISTIC() { … }
TfLiteRegistration* Register_SOFTMAX_REF() { … }
TfLiteRegistration* Register_SOFTMAX() { … }
TfLiteRegistration* Register_LOG_SOFTMAX_REF() { … }
TfLiteRegistration* Register_LOG_SOFTMAX() { … }
TfLiteRegistration* Register_PRELU_REF() { … }
TfLiteRegistration* Register_PRELU() { … }
TfLiteRegistration* Register_LEAKY_RELU_REF() { … }
TfLiteRegistration* Register_LEAKY_RELU() { … }
TfLiteRegistration* Register_HARD_SWISH() { … }
TfLiteRegistration* Register_HARD_SWISH_REF() { … }
TfLiteRegistration* Register_GELU() { … }
}
}
}