chromium/third_party/mediapipe/src/mediapipe/tasks/cc/core/proto/acceleration.proto

/* Copyright 2022 The MediaPipe Authors.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/

syntax = "proto2";

package mediapipe.tasks.core.proto;

import "mediapipe/calculators/tensor/inference_calculator.proto";

option java_package = "com.google.mediapipe.tasks.core.proto";
option java_outer_classname = "AccelerationProto";

message Acceleration {
  // TODO Enable Automatic mode once it is ready.

  // Chooses which delegate to use, and the detailed configuration is set by
  // Mediapipe Tasks by default.
  // For GPU delegate, Mediapipe Tasks tries to run the whole pipeline on GPU,
  // and falls back to CPU if calculators are not GPU supported.
  oneof delegate {
    mediapipe.InferenceCalculatorOptions.Delegate.Xnnpack xnnpack = 1;
    mediapipe.InferenceCalculatorOptions.Delegate.Gpu gpu = 2;
    mediapipe.InferenceCalculatorOptions.Delegate.TfLite tflite = 4;
    mediapipe.InferenceCalculatorOptions.Delegate.Nnapi nnapi = 5;
  }
}