#include "mediapipe/calculators/tensor/inference_calculator_utils.h"
#include <cstdint>
#include <cstring>
#include <ostream>
#include <string>
#include <vector>
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_format.h"
#include "absl/strings/str_join.h"
#include "absl/types/span.h"
#include "mediapipe/calculators/tensor/inference_calculator.pb.h"
#include "mediapipe/framework/formats/tensor.h"
#include "mediapipe/framework/memory_manager.h"
#include "mediapipe/framework/port.h"
#include "mediapipe/framework/port/ret_check.h"
#include "mediapipe/framework/port/status_macros.h"
#include "tensorflow/lite/c/common.h"
#include "tensorflow/lite/interpreter.h"
#include "tensorflow/lite/portable_type_to_tflitetype.h"
#include "tensorflow/lite/string_util.h"
#if !defined(__EMSCRIPTEN__) || defined(__EMSCRIPTEN_PTHREADS__)
#include "mediapipe/util/cpu_util.h"
#endif
namespace mediapipe {
namespace {
int GetXnnpackDefaultNumThreads() { … }
bool operator==(Tensor::ElementType tensor_type, TfLiteType tflite_type) { … }
template <typename T>
absl::Status CopyTensorToTfLiteTensor(const Tensor& input_tensor,
TfLiteTensor& tflite_tensor) { … }
template <>
absl::Status CopyTensorToTfLiteTensor<char>(const Tensor& input_tensor,
TfLiteTensor& tflite_tensor) { … }
bool operator==(const TfLiteIntArray& lhs, const std::vector<int>& rhs) { … }
std::ostream& operator<<(std::ostream& os, const TfLiteIntArray& array) { … }
template <typename T>
absl::Status CopyTfLiteTensorToTensor(const TfLiteTensor& tflite_tensor,
Tensor& output_tensor) { … }
template <>
absl::Status CopyTfLiteTensorToTensor<char>(const TfLiteTensor& tflite_tensor,
Tensor& output_tensor) { … }
}
int GetXnnpackNumThreads(
const bool opts_has_delegate,
const mediapipe::InferenceCalculatorOptions::Delegate& opts_delegate) { … }
absl::Status CopyCpuInputIntoInterpreterTensor(const Tensor& input_tensor,
tflite::Interpreter& interpreter,
int input_tensor_index) { … }
absl::Status CopyCpuInputIntoTfLiteTensor(const Tensor& input_tensor,
TfLiteTensor& tflite_tensor) { … }
absl::Status CopyInterpreterTensorIntoCpuOutput(
const tflite::Interpreter& interpreter, int output_tensor_index,
Tensor& output_tensor) { … }
absl::Status CopyTfLiteTensorIntoCpuOutput(const TfLiteTensor& tflite_tensor,
Tensor& output_tensor) { … }
absl::StatusOr<Tensor> ConvertTfLiteTensorToTensor(
const TfLiteTensor& tflite_tensor) { … }
absl::StatusOr<Tensor> CreateTensorWithTfLiteTensorSpecs(
const TfLiteTensor& reference_tflite_tensor, MemoryManager* memory_manager,
int alignment) { … }
}