/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ #ifndef TENSORFLOW_LITE_SUPPORT_CC_PORT_DEFAULT_TFLITE_WRAPPER_H_ #define TENSORFLOW_LITE_SUPPORT_CC_PORT_DEFAULT_TFLITE_WRAPPER_H_ #include <memory> #include <string> #include <utility> #include "absl/status/status.h" // from @com_google_absl #include "flatbuffers/flatbuffers.h" // from @flatbuffers #include "tensorflow/lite/acceleration/configuration/configuration.pb.h" #include "tensorflow/lite/acceleration/configuration/delegate_registry.h" #include "tensorflow/lite/c/common.h" #include "tensorflow/lite/experimental/acceleration/mini_benchmark/mini_benchmark.h" #include "tensorflow/lite/interpreter.h" #include "tensorflow/lite/interpreter_builder.h" namespace tflite { namespace support { // Options that are created by `TFLiteInterpreterWrapper` and will help to // initialize Interpreter in the callback function. `TFLiteInterpreterWrapper` // retains ownership of the included options, and will ensure that they remain // valid for the duration of the created interpreter's lifetime. struct InterpreterCreationResources { … }; // Wrapper for a TfLiteInterpreter that may be accelerated [1]. Meant to be // substituted for `unique_ptr<tflite::Interpreter>` class members. // // This class is in charge of: // * Picking, instantiating and configuring the right delegate for the provided // ComputeSettings [2], // * Providing methods to initialize and invoke the Interpreter with optional // (controlled through the ComputeSettings) automatic fallback to CPU if any // acceleration-related error occurs at compilation or runtime. // * TODO(b/169474250) Cache interpreters for multiple input sizes to enable // performant acceleration for the case where input size changes frequently. // // IMPORTANT: The only supported delegates are (as defined in [1]) NONE, GPU, // HEXAGON, NNAPI, EDGETPU (Google internal), and EDGETPU_CORAL. Specifying // another delegate type may cause an UnimplementedError to be thrown. // // Like TfLiteInterpreter, this class is thread-compatible. Use from multiple // threads must be guarded by synchronization outside this class. // // [1]: // https://github.com/tensorflow/tensorflow/blob/master/tensorflow/lite/acceleration/configuration/configuration.proto class TfLiteInterpreterWrapper { … }; } // namespace support } // namespace tflite #endif // TENSORFLOW_LITE_SUPPORT_CC_PORT_DEFAULT_TFLITE_WRAPPER_H_