chromium/components/assist_ranker/proto/quantized_nn_classifier.proto

// Copyright 2018 The Chromium Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.

syntax = "proto2";

option optimize_for = LITE_RUNTIME;

package assist_ranker;

// The weights and biases for a single quantized neural-network layer. All
// weight and bias values are 8-bit and can be converted to floats using
// value = x * (high - low) / 256 + low.
message QuantizedNNLayer {
  // The weights for the layer.
  repeated bytes weights = 1;

  // The bias vectors for the layer.
  optional bytes biases = 2;

  // The low value used to dequantize the weights.
  optional float low = 3;

  // The high value used to dequantize the weights.
  optional float high = 4;
}

// Defines the model weights and biases for a single layer neural network.
message QuantizedNNClassifierModel {
  optional QuantizedNNLayer hidden_layer = 1;
  optional QuantizedNNLayer logits_layer = 2;
}