/* Copyright 2017 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ /// \file /// /// Deserialization infrastructure for tflite. Provides functionality /// to go from a serialized tflite model in flatbuffer format to an /// in-memory representation of the model. /// /// WARNING: Users of TensorFlow Lite should not include this file directly, /// but should instead include "third_party/tensorflow/lite/model_builder.h". /// Only the TensorFlow Lite implementation itself should include this /// file directly. // IWYU pragma: private, include "third_party/tensorflow/lite/model_builder.h" #ifndef TENSORFLOW_COMPILER_MLIR_LITE_CORE_MODEL_BUILDER_BASE_H_ #define TENSORFLOW_COMPILER_MLIR_LITE_CORE_MODEL_BUILDER_BASE_H_ #include <stddef.h> #include <algorithm> #include <cstdint> #include <map> #include <memory> #include <string> #include <utility> #include "flatbuffers/base.h" // from @flatbuffers #include "flatbuffers/buffer.h" // from @flatbuffers #include "flatbuffers/vector.h" // from @flatbuffers #include "flatbuffers/verifier.h" // from @flatbuffers #include "tensorflow/compiler/mlir/lite/core/macros.h" #include "tensorflow/compiler/mlir/lite/schema/schema_generated.h" #include "tensorflow/lite/allocation.h" #include "tensorflow/lite/core/api/error_reporter.h" #include "tensorflow/lite/core/api/verifier.h" #include "tensorflow/lite/string_type.h" namespace tflite { std::unique_ptr<Allocation> GetAllocationFromFile( const char* filename, ErrorReporter* error_reporter); std::unique_ptr<Allocation> GetAllocationFromFile( int fd, ErrorReporter* error_reporter); /// An RAII object that represents a read-only tflite model, copied from disk, /// or mmapped. This uses flatbuffers as the serialization format. /// /// NOTE: The current API requires that a FlatBufferModelBase instance be kept /// alive by the client as long as it is in use by any dependent Interpreter /// instances. As the FlatBufferModelBase instance is effectively immutable /// after creation, the client may safely use a single model with multiple /// dependent Interpreter instances, even across multiple threads (though note /// that each Interpreter instance is *not* thread-safe). /// /// <pre><code> /// using namespace tflite; /// StderrReporter error_reporter; /// auto model = FlatBufferModelBase::BuildFromFile("interesting_model.tflite", /// &error_reporter); /// MyOpResolver resolver; // You need to subclass OpResolver to provide /// // implementations. /// InterpreterBuilder builder(*model, resolver); /// std::unique_ptr<Interpreter> interpreter; /// if(builder(&interpreter) == kTfLiteOk) { /// .. run model inference with interpreter /// } /// </code></pre> /// /// OpResolver must be defined to provide your kernel implementations to the /// interpreter. This is environment specific and may consist of just the /// builtin ops, or some custom operators you defined to extend tflite. namespace impl { template <typename T> class FlatBufferModelBase { … }; } // namespace impl } // namespace tflite #endif // TENSORFLOW_COMPILER_MLIR_LITE_CORE_MODEL_BUILDER_BASE_H_