chromium/third_party/mediapipe/src/mediapipe/modules/face_detection/face_detection.pbtxt

# MediaPipe graph to detect faces.
#
# EXAMPLE:
#   node {
#     calculator: "FaceDetectionFrontCpu"
#     input_stream: "IMAGE:image"
#     input_stream: "ROI:roi"
#     output_stream: "DETECTIONS:face_detections"
#   }

type: "FaceDetection"

# The input image, either ImageFrame, GpuBuffer, or (multi-backend) Image.
input_stream: "IMAGE:image"

# ROI (region of interest) within the given image where faces should be
# detected. (NormalizedRect)
input_stream: "ROI:roi"

# Detected faces. (std::vector<Detection>)
# NOTE: there will not be an output packet in the DETECTIONS stream for this
# particular timestamp if none of faces detected. However, the MediaPipe
# framework will internally inform the downstream calculators of the absence of
# this packet so that they don't wait for it unnecessarily.
output_stream: "DETECTIONS:detections"

graph_options: {
  [type.googleapis.com/mediapipe.FaceDetectionOptions] {}
}

# Converts the input CPU or GPU image to the multi-backend image type (Image).
node: {
  calculator: "ToImageCalculator"
  input_stream: "IMAGE:image"
  output_stream: "IMAGE:multi_backend_image"
}

# Transforms the input image into a 128x128 tensor while keeping the aspect
# ratio (what is expected by the corresponding face detection model), resulting
# in potential letterboxing in the transformed image.
node: {
  calculator: "ImageToTensorCalculator"
  input_stream: "IMAGE:multi_backend_image"
  input_stream: "NORM_RECT:roi"
  output_stream: "TENSORS:input_tensors"
  output_stream: "MATRIX:transform_matrix"
  options: {
    [mediapipe.ImageToTensorCalculatorOptions.ext] {
        keep_aspect_ratio: true
        output_tensor_float_range {
          min: -1.0
          max: 1.0
        }
        border_mode: BORDER_ZERO
    }
  }
  option_value: "gpu_origin:options/gpu_origin"
  option_value: "output_tensor_width:options/tensor_width"
  option_value: "output_tensor_height:options/tensor_height"
}

# Runs a TensorFlow Lite model on CPU that takes an image tensor and outputs a
# vector of tensors representing, for instance, detection boxes/keypoints and
# scores.
node {
  calculator: "InferenceCalculator"
  input_stream: "TENSORS:input_tensors"
  output_stream: "TENSORS:detection_tensors"
  options: {
    [mediapipe.InferenceCalculatorOptions.ext] {}
  }
  option_value: "delegate:options/delegate"
  option_value: "model_path:options/model_path"
}

# Detection tensors. (std::vector<Tensor>)
#input_stream: "TENSORS:detection_tensors"

# A 4x4 row-major-order matrix that maps a point represented in the detection
# tensors to a desired coordinate system, e.g., in the original input image
# before scaling/cropping. (std::array<float, 16>)
#input_stream: "MATRIX:transform_matrix"

# Detected faces. (std::vector<Detection>)
# NOTE: there will not be an output packet in the DETECTIONS stream for this
# particular timestamp if none of faces detected. However, the MediaPipe
# framework will internally inform the downstream calculators of the absence of
# this packet so that they don't wait for it unnecessarily.
#output_stream: "DETECTIONS:detections"

# Generates a single side packet containing a vector of SSD anchors based on
# the specification in the options.
node {
  calculator: "SsdAnchorsCalculator"
  output_side_packet: "anchors"
  options: {
    [mediapipe.SsdAnchorsCalculatorOptions.ext] {
        num_layers: 1
        min_scale: 0.1484375
        max_scale: 0.75
        anchor_offset_x: 0.5
        anchor_offset_y: 0.5
        aspect_ratios: 1.0
        fixed_anchor_size: true
    }
  }
  option_value: "input_size_width:tensor_width"
  option_value: "input_size_height:tensor_height"
  option_value: "num_layers:num_layers"
  option_value: "strides:strides"
  option_value: "interpolated_scale_aspect_ratio:interpolated_scale_aspect_ratio"
}

# Decodes the detection tensors generated by the TensorFlow Lite model, based on
# the SSD anchors and the specification in the options, into a vector of
# detections. Each detection describes a detected object.
node {
  calculator: "TensorsToDetectionsCalculator"
  input_stream: "TENSORS:detection_tensors"
  input_side_packet: "ANCHORS:anchors"
  output_stream: "DETECTIONS:unfiltered_detections"
  options: {
    [mediapipe.TensorsToDetectionsCalculatorOptions.ext] {
      num_classes: 1
      num_coords: 16
      box_coord_offset: 0
      keypoint_coord_offset: 4
      num_keypoints: 6
      num_values_per_keypoint: 2
      sigmoid_score: true
      score_clipping_thresh: 100.0
      reverse_output_order: true
    }
  }
  option_value: "num_boxes:num_boxes"
  option_value: "x_scale:x_scale"
  option_value: "y_scale:y_scale"
  option_value: "h_scale:h_scale"
  option_value: "w_scale:w_scale"
  option_value: "min_score_thresh:min_score_thresh"
}

# Performs non-max suppression to remove excessive detections.
node {
  calculator: "NonMaxSuppressionCalculator"
  input_stream: "unfiltered_detections"
  output_stream: "filtered_detections"
  options: {
    [mediapipe.NonMaxSuppressionCalculatorOptions.ext] {
      min_suppression_threshold: 0.3
      overlap_type: INTERSECTION_OVER_UNION
      algorithm: WEIGHTED
    }
  }
}

# Projects the detections from input tensor to the corresponding locations on
# the original image (input to the graph).
node {
  calculator: "DetectionProjectionCalculator"
  input_stream: "DETECTIONS:filtered_detections"
  input_stream: "PROJECTION_MATRIX:transform_matrix"
  output_stream: "DETECTIONS:detections"
}