# MediaPipe graph to detect faces. (CPU input and inference.)
type: "FaceDetectionFullRangeCpu"
# The input image, either ImageFrame, or (multi-backend) Image.
input_stream: "IMAGE:image"
# Detected faces. (std::vector<Detection>)
output_stream: "DETECTIONS:detections"
graph_options: {
[type.googleapis.com/mediapipe.FaceDetectionOptions] {}
}
node {
calculator: "FaceDetectionFullRange"
input_stream: "IMAGE:image"
output_stream: "DETECTIONS:detections"
node_options: {
[type.googleapis.com/mediapipe.FaceDetectionOptions] {
delegate { xnnpack {} }
}
}
option_value: "OPTIONS:options"
}