#include "mediapipe/calculators/tensor/inference_interpreter_delegate_runner.h"
#include <cstdint>
#include <cstring>
#include <memory>
#include <utility>
#include <vector>
#include "absl/container/flat_hash_set.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "mediapipe/calculators/tensor/inference_calculator_utils.h"
#include "mediapipe/calculators/tensor/inference_feedback_manager.h"
#include "mediapipe/calculators/tensor/inference_io_mapper.h"
#include "mediapipe/calculators/tensor/tensor_span.h"
#include "mediapipe/calculators/tensor/tflite_delegate_ptr.h"
#include "mediapipe/framework/api2/packet.h"
#include "mediapipe/framework/calculator_framework.h"
#include "mediapipe/framework/formats/tensor.h"
#include "mediapipe/framework/mediapipe_profiling.h"
#include "mediapipe/framework/port/ret_check.h"
#include "mediapipe/framework/port/status_macros.h"
#include "tensorflow/lite/c/c_api_types.h"
#include "tensorflow/lite/c/common.h"
#include "tensorflow/lite/interpreter_builder.h"
#include "tensorflow/lite/string_util.h"
#include "tensorflow/lite/util.h"
namespace mediapipe {
namespace {
Interpreter;
InterpreterBuilder;
template <typename T>
void CopyTensorBufferToInterpreter(const Tensor& input_tensor,
Interpreter* interpreter,
int input_tensor_index) { … }
template <>
void CopyTensorBufferToInterpreter<char>(const Tensor& input_tensor,
Interpreter* interpreter,
int input_tensor_index) { … }
template <typename T>
void CopyTensorBufferFromInterpreter(Interpreter* interpreter,
int output_tensor_index,
Tensor* output_tensor) { … }
absl::Status VerifyModelTensorsForCustomAllocation(
const Interpreter& interpreter) { … }
absl::StatusOr<Tensor> AllocateOutputTensor(const int tensor_index,
const Interpreter& interpreter) { … }
absl::Status CopyTensorToInterpreter(const Tensor& input_tensor,
Interpreter* interpreter,
int model_input_index) { … }
absl::Status CopyTensorFromInterpreter(Interpreter* interpreter,
int output_model_index,
Tensor* output_tensor) { … }
absl::StatusOr<std::vector<Tensor>> AllocateOutputTensors(
const std::vector<int>& model_output_indexes,
const Interpreter& interpreter) { … }
}
class InferenceInterpreterDelegateRunner : public InferenceRunner { … };
absl::StatusOr<std::vector<Tensor>> InferenceInterpreterDelegateRunner::Run(
CalculatorContext* cc, const TensorSpan& tensor_span) { … }
absl::StatusOr<std::unique_ptr<InferenceRunner>>
CreateInferenceInterpreterDelegateRunner(
api2::Packet<TfLiteModelPtr> model,
api2::Packet<tflite::OpResolver> op_resolver, TfLiteDelegatePtr delegate,
int interpreter_num_threads,
const mediapipe::InferenceCalculatorOptions::InputOutputConfig*
input_output_config,
bool enable_zero_copy_tensor_io) { … }
}