#include <algorithm>
#include <cmath>
#include <complex>
#include <cstddef>
#include <cstdint>
#include <limits>
#include "Eigen/Core"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/core/subgraph.h"
#include "tensorflow/lite/interpreter_options.h"
#include "tensorflow/lite/kernels/internal/tensor_ctypes.h"
#include "tensorflow/lite/kernels/kernel_util.h"
#include "tensorflow/lite/kernels/op_macros.h"
namespace tflite {
namespace ops {
namespace builtin {
namespace cast {
namespace {
constexpr int kInputTensor = …;
constexpr int kOutputTensor = …;
void copyCast(const float* in, int32_t* out, int num_elements) { … }
void copyCast(const float* in, int16_t* out, int num_elements) { … }
void copyCast(const float* in, uint8_t* out, int num_elements) { … }
template <typename FromT, typename ToT>
void copyCast(const FromT* in, ToT* out, int num_elements) { … }
template <typename ToT>
void copyCast(const std::complex<float>* in, ToT* out, int num_elements) { … }
template <>
void copyCast(const std::complex<float>* in, std::complex<float>* out,
int num_elements) { … }
template <typename ToT>
void copyCast(const Eigen::half* in, ToT* out, int num_elements) { … }
template <>
void copyCast(const Eigen::half* in, std::complex<float>* out,
int num_elements) { … }
template <typename FromT>
void copyCastToFloat16(const FromT* in, Eigen::half* out, int num_elements) { … }
template <>
void copyCastToFloat16(const std::complex<float>* in, Eigen::half* out,
int num_elements) { … }
template <>
void copyCastToFloat16(const Eigen::half* in, Eigen::half* out,
int num_elements) { … }
TfLiteStatus castInt4ToFloat(TfLiteContext* context, const TfLiteTensor* in,
TfLiteTensor* out, int num_elements) { … }
template <typename FromT>
TfLiteStatus copyToTensor(TfLiteContext* context, const FromT* in,
TfLiteTensor* out, int num_elements) { … }
TfLiteStatus EvalImpl(TfLiteContext* context, const TfLiteTensor* input,
TfLiteTensor* output, const int num_elements) { … }
struct OpData { … };
void* Init(TfLiteContext* context, const char* , size_t ) { … }
void Free(TfLiteContext* context, void* op_data) { … }
bool OutputCachingEnabled(const TfLiteContext* context) { … }
bool ShouldCacheOutput(const TfLiteContext* context,
const TfLiteTensor* input) { … }
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { … }
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { … }
}
}
TfLiteRegistration* Register_CAST() { … }
}
}
}