#ifndef GOOGLE_PROTOBUF_INCLUDED_quantized_5fnn_5fclassifier_2eproto
#define GOOGLE_PROTOBUF_INCLUDED_quantized_5fnn_5fclassifier_2eproto
#include <limits>
#include <string>
#include <google/protobuf/port_def.inc>
#if PROTOBUF_VERSION < 3021000
#error This file was generated by a newer version of protoc which is
#error incompatible with your Protocol Buffer headers. Please update
#error your headers.
#endif
#if 3021012 < PROTOBUF_MIN_PROTOC_VERSION
#error This file was generated by an older version of protoc which is
#error incompatible with your Protocol Buffer headers. Please
#error regenerate this file with a newer version of protoc.
#endif
#include <google/protobuf/port_undef.inc>
#include <google/protobuf/io/coded_stream.h>
#include <google/protobuf/arena.h>
#include <google/protobuf/arenastring.h>
#include <google/protobuf/generated_message_util.h>
#include <google/protobuf/metadata_lite.h>
#include <google/protobuf/message_lite.h>
#include <google/protobuf/repeated_field.h>
#include <google/protobuf/extension_set.h>
#include <google/protobuf/port_def.inc>
#define PROTOBUF_INTERNAL_EXPORT_quantized_5fnn_5fclassifier_2eproto
PROTOBUF_NAMESPACE_OPEN
namespace internal {
class AnyMetadata;
}
PROTOBUF_NAMESPACE_CLOSE
struct TableStruct_quantized_5fnn_5fclassifier_2eproto { … };
namespace assist_ranker {
class QuantizedNNClassifierModel;
struct QuantizedNNClassifierModelDefaultTypeInternal;
extern QuantizedNNClassifierModelDefaultTypeInternal _QuantizedNNClassifierModel_default_instance_;
class QuantizedNNLayer;
struct QuantizedNNLayerDefaultTypeInternal;
extern QuantizedNNLayerDefaultTypeInternal _QuantizedNNLayer_default_instance_;
}
PROTOBUF_NAMESPACE_OPEN
template<> ::assist_ranker::QuantizedNNClassifierModel* Arena::CreateMaybeMessage<::assist_ranker::QuantizedNNClassifierModel>(Arena*);
template<> ::assist_ranker::QuantizedNNLayer* Arena::CreateMaybeMessage<::assist_ranker::QuantizedNNLayer>(Arena*);
PROTOBUF_NAMESPACE_CLOSE
namespace assist_ranker {
class QuantizedNNLayer final :
public ::PROTOBUF_NAMESPACE_ID::MessageLite { … };
class QuantizedNNClassifierModel final :
public ::PROTOBUF_NAMESPACE_ID::MessageLite { … };
#ifdef __GNUC__
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wstrict-aliasing"
#endif
inline int QuantizedNNLayer::_internal_weights_size() const { … }
inline int QuantizedNNLayer::weights_size() const { … }
inline void QuantizedNNLayer::clear_weights() { … }
inline std::string* QuantizedNNLayer::add_weights() { … }
inline const std::string& QuantizedNNLayer::_internal_weights(int index) const { … }
inline const std::string& QuantizedNNLayer::weights(int index) const { … }
inline std::string* QuantizedNNLayer::mutable_weights(int index) { … }
inline void QuantizedNNLayer::set_weights(int index, const std::string& value) { … }
inline void QuantizedNNLayer::set_weights(int index, std::string&& value) { … }
inline void QuantizedNNLayer::set_weights(int index, const char* value) { … }
inline void QuantizedNNLayer::set_weights(int index, const void* value, size_t size) { … }
inline std::string* QuantizedNNLayer::_internal_add_weights() { … }
inline void QuantizedNNLayer::add_weights(const std::string& value) { … }
inline void QuantizedNNLayer::add_weights(std::string&& value) { … }
inline void QuantizedNNLayer::add_weights(const char* value) { … }
inline void QuantizedNNLayer::add_weights(const void* value, size_t size) { … }
inline const ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField<std::string>&
QuantizedNNLayer::weights() const { … }
inline ::PROTOBUF_NAMESPACE_ID::RepeatedPtrField<std::string>*
QuantizedNNLayer::mutable_weights() { … }
inline bool QuantizedNNLayer::_internal_has_biases() const { … }
inline bool QuantizedNNLayer::has_biases() const { … }
inline void QuantizedNNLayer::clear_biases() { … }
inline const std::string& QuantizedNNLayer::biases() const { … }
template <typename ArgT0, typename... ArgT>
inline PROTOBUF_ALWAYS_INLINE
void QuantizedNNLayer::set_biases(ArgT0&& arg0, ArgT... args) { … }
inline std::string* QuantizedNNLayer::mutable_biases() { … }
inline const std::string& QuantizedNNLayer::_internal_biases() const { … }
inline void QuantizedNNLayer::_internal_set_biases(const std::string& value) { … }
inline std::string* QuantizedNNLayer::_internal_mutable_biases() { … }
inline std::string* QuantizedNNLayer::release_biases() { … }
inline void QuantizedNNLayer::set_allocated_biases(std::string* biases) { … }
inline bool QuantizedNNLayer::_internal_has_low() const { … }
inline bool QuantizedNNLayer::has_low() const { … }
inline void QuantizedNNLayer::clear_low() { … }
inline float QuantizedNNLayer::_internal_low() const { … }
inline float QuantizedNNLayer::low() const { … }
inline void QuantizedNNLayer::_internal_set_low(float value) { … }
inline void QuantizedNNLayer::set_low(float value) { … }
inline bool QuantizedNNLayer::_internal_has_high() const { … }
inline bool QuantizedNNLayer::has_high() const { … }
inline void QuantizedNNLayer::clear_high() { … }
inline float QuantizedNNLayer::_internal_high() const { … }
inline float QuantizedNNLayer::high() const { … }
inline void QuantizedNNLayer::_internal_set_high(float value) { … }
inline void QuantizedNNLayer::set_high(float value) { … }
inline bool QuantizedNNClassifierModel::_internal_has_hidden_layer() const { … }
inline bool QuantizedNNClassifierModel::has_hidden_layer() const { … }
inline void QuantizedNNClassifierModel::clear_hidden_layer() { … }
inline const ::assist_ranker::QuantizedNNLayer& QuantizedNNClassifierModel::_internal_hidden_layer() const { … }
inline const ::assist_ranker::QuantizedNNLayer& QuantizedNNClassifierModel::hidden_layer() const { … }
inline void QuantizedNNClassifierModel::unsafe_arena_set_allocated_hidden_layer(
::assist_ranker::QuantizedNNLayer* hidden_layer) { … }
inline ::assist_ranker::QuantizedNNLayer* QuantizedNNClassifierModel::release_hidden_layer() { … }
inline ::assist_ranker::QuantizedNNLayer* QuantizedNNClassifierModel::unsafe_arena_release_hidden_layer() { … }
inline ::assist_ranker::QuantizedNNLayer* QuantizedNNClassifierModel::_internal_mutable_hidden_layer() { … }
inline ::assist_ranker::QuantizedNNLayer* QuantizedNNClassifierModel::mutable_hidden_layer() { … }
inline void QuantizedNNClassifierModel::set_allocated_hidden_layer(::assist_ranker::QuantizedNNLayer* hidden_layer) { … }
inline bool QuantizedNNClassifierModel::_internal_has_logits_layer() const { … }
inline bool QuantizedNNClassifierModel::has_logits_layer() const { … }
inline void QuantizedNNClassifierModel::clear_logits_layer() { … }
inline const ::assist_ranker::QuantizedNNLayer& QuantizedNNClassifierModel::_internal_logits_layer() const { … }
inline const ::assist_ranker::QuantizedNNLayer& QuantizedNNClassifierModel::logits_layer() const { … }
inline void QuantizedNNClassifierModel::unsafe_arena_set_allocated_logits_layer(
::assist_ranker::QuantizedNNLayer* logits_layer) { … }
inline ::assist_ranker::QuantizedNNLayer* QuantizedNNClassifierModel::release_logits_layer() { … }
inline ::assist_ranker::QuantizedNNLayer* QuantizedNNClassifierModel::unsafe_arena_release_logits_layer() { … }
inline ::assist_ranker::QuantizedNNLayer* QuantizedNNClassifierModel::_internal_mutable_logits_layer() { … }
inline ::assist_ranker::QuantizedNNLayer* QuantizedNNClassifierModel::mutable_logits_layer() { … }
inline void QuantizedNNClassifierModel::set_allocated_logits_layer(::assist_ranker::QuantizedNNLayer* logits_layer) { … }
#ifdef __GNUC__
#pragma GCC diagnostic pop
#endif
}
#include <google/protobuf/port_undef.inc>
#endif