// Copyright 2019 The MediaPipe Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
syntax = "proto2";
package mediapipe;
import "mediapipe/framework/packet_generator.proto";
import "tensorflow/core/protobuf/config.proto";
message TensorFlowSessionFromFrozenGraphGeneratorOptions {
extend mediapipe.PacketGeneratorOptions {
optional TensorFlowSessionFromFrozenGraphGeneratorOptions ext = 160666123;
}
// Path to file containing serialized proto of type tensorflow::GraphDef.
optional string graph_proto_path = 1;
// To run inference with MediaPipe inputs MediaPipe streams need to be mapped
// to TensorFlow tensors. This map defines the which streams are fed into
// which tensors in the model. The MediaPipe tag of the stream is the map key.
// Tags must be capitalized, matching regex [A-Z0-9_]+. Examples: "JPG_STRING"
// and "SOFTMAX". Then, those tags can be used as the MediaPipe tags of
// input_stream or output_stream of the TensorflowInferenceCalculator
// consuming the packet produced by this generator. The tensor names must
// match the tensor names in the graph that you want to feed or fetch into or
// out of. Examples: "DecodeJpeg/contents:0" or "softmax:0". For example, a
// mediapipe graph can include the nodes:
//
// packet_generator {
// packet_generator: "TensorFlowSessionFromFrozenGraphGenerator"
// output_side_packet: "SESSION:session"
// options {
// [mediapipe.TensorFlowSessionFromFrozenGraphGeneratorOptions.ext]: {
// graph_proto_path: "[PATH]"
// tag_to_tensor_names {
// key: "JPG_STRING"
// value: "input:0"
// }
// tag_to_tensor_names {
// key: "SOFTMAX"
// value: "softmax:0"
// }
// }
// }
// }
// node {
// calculator: "TensorflowInferenceCalculator"
// input_side_packet: "SESSION:graph_with_bindings"
// input_stream: "JPG_STRING:jpg_string_tensor"
// output_stream: "SOFTMAX:softmax_tensor"
// }
map<string, string> tag_to_tensor_names = 2;
// Tensorflow session config options.
optional tensorflow.ConfigProto config = 3;
// Graph nodes to run to initialize the model. Any output of these ops is
// ignored.
repeated string initialization_op_names = 4;
// The id of the device you would prefer to execute the graph nodes on.
// If set, all graph nodes without a previously specified device, will be set
// to run on preferred_device_id. Example values include:
// ["/device:GPU:0","/device:CPU:0", ...]
// NOTE: If config.allow_soft_placement = false, and the device is not found,
// an error will be thrown.
optional string preferred_device_id = 5;
}