chromium/third_party/mediapipe/src/mediapipe/graphs/iris_tracking/iris_tracking_gpu.pbtxt

# MediaPipe graph that performs iris tracking with TensorFlow Lite on GPU.
# Used in the examples in
# mediapipie/examples/android/src/java/com/mediapipe/apps/iristrackinggpu and

# GPU buffer. (GpuBuffer)
input_stream: "input_video"

# GPU buffer. (GpuBuffer)
output_stream: "output_video"
# Face landmarks with iris. (NormalizedLandmarkList)
output_stream: "face_landmarks_with_iris"

# Throttles the images flowing downstream for flow control. It passes through
# the very first incoming image unaltered, and waits for downstream nodes
# (calculators and subgraphs) in the graph to finish their tasks before it
# passes through another image. All images that come in while waiting are
# dropped, limiting the number of in-flight images in most part of the graph to
# 1. This prevents the downstream nodes from queuing up incoming images and data
# excessively, which leads to increased latency and memory usage, unwanted in
# real-time mobile applications. It also eliminates unnecessarily computation,
# e.g., the output produced by a node may get dropped downstream if the
# subsequent nodes are still busy processing previous inputs.
node {
  calculator: "FlowLimiterCalculator"
  input_stream: "input_video"
  input_stream: "FINISHED:output_video"
  input_stream_info: {
    tag_index: "FINISHED"
    back_edge: true
  }
  output_stream: "throttled_input_video"
}

# Defines how many faces to detect. Iris tracking currently only handles one
# face (left and right eye), and therefore this should always be set to 1.
node {
  calculator: "ConstantSidePacketCalculator"
  output_side_packet: "PACKET:num_faces"
  node_options: {
    [type.googleapis.com/mediapipe.ConstantSidePacketCalculatorOptions]: {
      packet { int_value: 1 }
    }
  }
}

# Detects faces and corresponding landmarks.
node {
  calculator: "FaceLandmarkFrontGpu"
  input_stream: "IMAGE:throttled_input_video"
  input_side_packet: "NUM_FACES:num_faces"
  output_stream: "LANDMARKS:multi_face_landmarks"
  output_stream: "ROIS_FROM_LANDMARKS:face_rects_from_landmarks"
  output_stream: "DETECTIONS:face_detections"
  output_stream: "ROIS_FROM_DETECTIONS:face_rects_from_detections"
}

# Gets the very first and only face from "multi_face_landmarks" vector.
node {
  calculator: "SplitNormalizedLandmarkListVectorCalculator"
  input_stream: "multi_face_landmarks"
  output_stream: "face_landmarks"
  node_options: {
    [type.googleapis.com/mediapipe.SplitVectorCalculatorOptions] {
      ranges: { begin: 0 end: 1 }
      element_only: true
    }
  }
}

# Gets the very first and only face rect from "face_rects_from_landmarks"
# vector.
node {
  calculator: "SplitNormalizedRectVectorCalculator"
  input_stream: "face_rects_from_landmarks"
  output_stream: "face_rect"
  node_options: {
    [type.googleapis.com/mediapipe.SplitVectorCalculatorOptions] {
      ranges: { begin: 0 end: 1 }
      element_only: true
    }
  }
}

# Gets two landmarks which define left eye boundary.
node {
  calculator: "SplitNormalizedLandmarkListCalculator"
  input_stream: "face_landmarks"
  output_stream: "left_eye_boundary_landmarks"
  node_options: {
    [type.googleapis.com/mediapipe.SplitVectorCalculatorOptions] {
      ranges: { begin: 33 end: 34 }
      ranges: { begin: 133 end: 134 }
      combine_outputs: true
    }
  }
}

# Gets two landmarks which define right eye boundary.
node {
  calculator: "SplitNormalizedLandmarkListCalculator"
  input_stream: "face_landmarks"
  output_stream: "right_eye_boundary_landmarks"
  node_options: {
    [type.googleapis.com/mediapipe.SplitVectorCalculatorOptions] {
      ranges: { begin: 362 end: 363 }
      ranges: { begin: 263 end: 264 }
      combine_outputs: true
    }
  }
}

# Detects iris landmarks, eye contour landmarks, and corresponding rect (ROI).
node {
  calculator: "IrisLandmarkLeftAndRightGpu"
  input_stream: "IMAGE:throttled_input_video"
  input_stream: "LEFT_EYE_BOUNDARY_LANDMARKS:left_eye_boundary_landmarks"
  input_stream: "RIGHT_EYE_BOUNDARY_LANDMARKS:right_eye_boundary_landmarks"
  output_stream: "LEFT_EYE_CONTOUR_LANDMARKS:left_eye_contour_landmarks"
  output_stream: "LEFT_EYE_IRIS_LANDMARKS:left_iris_landmarks"
  output_stream: "LEFT_EYE_ROI:left_eye_rect_from_landmarks"
  output_stream: "RIGHT_EYE_CONTOUR_LANDMARKS:right_eye_contour_landmarks"
  output_stream: "RIGHT_EYE_IRIS_LANDMARKS:right_iris_landmarks"
  output_stream: "RIGHT_EYE_ROI:right_eye_rect_from_landmarks"
}

node {
  calculator: "ConcatenateNormalizedLandmarkListCalculator"
  input_stream: "left_eye_contour_landmarks"
  input_stream: "right_eye_contour_landmarks"
  output_stream: "refined_eye_landmarks"
}

node {
  calculator: "UpdateFaceLandmarksCalculator"
  input_stream: "NEW_EYE_LANDMARKS:refined_eye_landmarks"
  input_stream: "FACE_LANDMARKS:face_landmarks"
  output_stream: "UPDATED_FACE_LANDMARKS:updated_face_landmarks"
}

# Renders annotations and overlays them on top of the input images.
node {
  calculator: "IrisAndDepthRendererGpu"
  input_stream: "IMAGE:throttled_input_video"
  input_stream: "FACE_LANDMARKS:updated_face_landmarks"
  input_stream: "EYE_LANDMARKS_LEFT:left_eye_contour_landmarks"
  input_stream: "EYE_LANDMARKS_RIGHT:right_eye_contour_landmarks"
  input_stream: "IRIS_LANDMARKS_LEFT:left_iris_landmarks"
  input_stream: "IRIS_LANDMARKS_RIGHT:right_iris_landmarks"
  input_stream: "NORM_RECT:face_rect"
  input_stream: "LEFT_EYE_RECT:left_eye_rect_from_landmarks"
  input_stream: "RIGHT_EYE_RECT:right_eye_rect_from_landmarks"
  input_stream: "DETECTIONS:face_detections"
  input_side_packet: "FOCAL_LENGTH:focal_length_pixel"
  output_stream: "IRIS_LANDMARKS:iris_landmarks"
  output_stream: "IMAGE:output_video"
}

node {
  calculator: "ConcatenateNormalizedLandmarkListCalculator"
  input_stream: "updated_face_landmarks"
  input_stream: "iris_landmarks"
  output_stream: "face_landmarks_with_iris"
}