chromium/out/Default/gen/media/mojo/mojom/speech_recognition.mojom-blink.cc

// media/mojo/mojom/speech_recognition.mojom-blink.cc is auto generated by mojom_bindings_generator.py, do not edit

// Copyright 2013 The Chromium Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.

#if defined(__clang__)
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wunused-private-field"
#endif

#include "media/mojo/mojom/speech_recognition.mojom-blink.h"

#include <math.h>
#include <stdint.h>
#include <utility>

#include "base/debug/alias.h"
#include "base/hash/md5_constexpr.h"
#include "base/run_loop.h"
#include "base/strings/string_number_conversions.h"
#include "base/task/thread_pool/thread_pool_instance.h"
#include "base/trace_event/trace_event.h"
#include "base/trace_event/typed_macros.h"
#include "mojo/public/cpp/bindings/features.h"
#include "mojo/public/cpp/bindings/lib/default_construct_tag_internal.h"
#include "mojo/public/cpp/bindings/lib/generated_code_util.h"
#include "mojo/public/cpp/bindings/lib/message_internal.h"
#include "mojo/public/cpp/bindings/lib/proxy_to_responder.h"
#include "mojo/public/cpp/bindings/lib/send_message_helper.h"
#include "mojo/public/cpp/bindings/lib/serialization_util.h"
#include "mojo/public/cpp/bindings/lib/unserialized_message_context.h"
#include "mojo/public/cpp/bindings/lib/validate_params.h"
#include "mojo/public/cpp/bindings/lib/validation_errors.h"
#include "mojo/public/cpp/bindings/mojo_buildflags.h"
#include "mojo/public/cpp/bindings/urgent_message_scope.h"
#include "mojo/public/interfaces/bindings/interface_control_messages.mojom.h"
#include "third_party/perfetto/include/perfetto/tracing/traced_value.h"

#include "media/mojo/mojom/speech_recognition.mojom-params-data.h"
#include "media/mojo/mojom/speech_recognition.mojom-shared-message-ids.h"

#include "media/mojo/mojom/speech_recognition.mojom-blink-import-headers.h"
#include "media/mojo/mojom/speech_recognition.mojom-blink-test-utils.h"
#include "mojo/public/cpp/bindings/lib/wtf_serialization.h"


namespace media::mojom::blink {
HypothesisParts::HypothesisParts()
    :{}

HypothesisParts::HypothesisParts(
    WTF::Vector<WTF::String> text_in,
    ::base::TimeDelta hypothesis_part_offset_in)
    :{}

HypothesisParts::~HypothesisParts() = default;

void HypothesisParts::WriteIntoTrace(
    perfetto::TracedValue traced_context) const {}

bool HypothesisParts::Validate(
    const void* data,
    mojo::internal::ValidationContext* validation_context) {}
TimingInformation::TimingInformation()
    :{}

TimingInformation::TimingInformation(
    ::base::TimeDelta audio_start_time_in,
    ::base::TimeDelta audio_end_time_in,
    std::optional<WTF::Vector<HypothesisPartsPtr>> hypothesis_parts_in)
    :{}

TimingInformation::~TimingInformation() = default;

void TimingInformation::WriteIntoTrace(
    perfetto::TracedValue traced_context) const {}

bool TimingInformation::Validate(
    const void* data,
    mojo::internal::ValidationContext* validation_context) {}
SpeechRecognitionResult::SpeechRecognitionResult()
    :{}

SpeechRecognitionResult::SpeechRecognitionResult(
    const WTF::String& transcription_in,
    bool is_final_in,
    TimingInformationPtr timing_information_in)
    :{}

SpeechRecognitionResult::~SpeechRecognitionResult() = default;

void SpeechRecognitionResult::WriteIntoTrace(
    perfetto::TracedValue traced_context) const {}

bool SpeechRecognitionResult::Validate(
    const void* data,
    mojo::internal::ValidationContext* validation_context) {}
LanguageIdentificationEvent::LanguageIdentificationEvent()
    :{}

LanguageIdentificationEvent::LanguageIdentificationEvent(
    const WTF::String& language_in,
    ConfidenceLevel confidence_level_in)
    :{}

LanguageIdentificationEvent::LanguageIdentificationEvent(
    const WTF::String& language_in,
    ConfidenceLevel confidence_level_in,
    std::optional<AsrSwitchResult> asr_switch_result_in)
    :{}

LanguageIdentificationEvent::~LanguageIdentificationEvent() = default;

void LanguageIdentificationEvent::WriteIntoTrace(
    perfetto::TracedValue traced_context) const {}

bool LanguageIdentificationEvent::Validate(
    const void* data,
    mojo::internal::ValidationContext* validation_context) {}
SpeechRecognitionSurfaceMetadata::SpeechRecognitionSurfaceMetadata()
    :{}

SpeechRecognitionSurfaceMetadata::SpeechRecognitionSurfaceMetadata(
    const ::base::UnguessableToken& session_id_in)
    :{}

SpeechRecognitionSurfaceMetadata::~SpeechRecognitionSurfaceMetadata() = default;

void SpeechRecognitionSurfaceMetadata::WriteIntoTrace(
    perfetto::TracedValue traced_context) const {}

bool SpeechRecognitionSurfaceMetadata::Validate(
    const void* data,
    mojo::internal::ValidationContext* validation_context) {}
SpeechRecognitionOptions::SpeechRecognitionOptions()
    :{}

SpeechRecognitionOptions::SpeechRecognitionOptions(
    SpeechRecognitionMode recognition_mode_in,
    bool enable_formatting_in,
    const WTF::String& language_in)
    :{}

SpeechRecognitionOptions::SpeechRecognitionOptions(
    SpeechRecognitionMode recognition_mode_in,
    bool enable_formatting_in,
    const WTF::String& language_in,
    bool is_server_based_in,
    RecognizerClientType recognizer_client_type_in)
    :{}

SpeechRecognitionOptions::SpeechRecognitionOptions(
    SpeechRecognitionMode recognition_mode_in,
    bool enable_formatting_in,
    const WTF::String& language_in,
    bool is_server_based_in,
    RecognizerClientType recognizer_client_type_in,
    bool skip_continuously_empty_audio_in)
    :{}

SpeechRecognitionOptions::SpeechRecognitionOptions(
    SpeechRecognitionMode recognition_mode_in,
    bool enable_formatting_in,
    const WTF::String& language_in,
    bool is_server_based_in,
    RecognizerClientType recognizer_client_type_in,
    bool skip_continuously_empty_audio_in,
    const WTF::String& experiment_recognizer_routing_key_in)
    :{}

SpeechRecognitionOptions::SpeechRecognitionOptions(
    SpeechRecognitionMode recognition_mode_in,
    bool enable_formatting_in,
    const WTF::String& language_in,
    bool is_server_based_in,
    RecognizerClientType recognizer_client_type_in,
    bool skip_continuously_empty_audio_in,
    const WTF::String& experiment_recognizer_routing_key_in,
    int32_t channel_count_in,
    int32_t sample_rate_in)
    :{}

SpeechRecognitionOptions::~SpeechRecognitionOptions() = default;

void SpeechRecognitionOptions::WriteIntoTrace(
    perfetto::TracedValue traced_context) const {}

bool SpeechRecognitionOptions::Validate(
    const void* data,
    mojo::internal::ValidationContext* validation_context) {}
const char SpeechRecognitionContext::Name_[] =;

SpeechRecognitionContext::IPCStableHashFunction SpeechRecognitionContext::MessageToMethodInfo_(mojo::Message& message) {}


const char* SpeechRecognitionContext::MessageToMethodName_(mojo::Message& message) {}

#if !BUILDFLAG(IS_FUCHSIA)
uint32_t SpeechRecognitionContext::BindRecognizer_Sym::IPCStableHash() {}
uint32_t SpeechRecognitionContext::BindWebSpeechRecognizer_Sym::IPCStableHash() {}
# endif // !BUILDFLAG(IS_FUCHSIA)

class SpeechRecognitionContext_BindRecognizer_ForwardToCallback
    : public mojo::MessageReceiver {};

SpeechRecognitionContextProxy::SpeechRecognitionContextProxy(mojo::MessageReceiverWithResponder* receiver)
    :{}

void SpeechRecognitionContextProxy::BindRecognizer(
    ::mojo::PendingReceiver<SpeechRecognitionRecognizer> in_receiver, ::mojo::PendingRemote<SpeechRecognitionRecognizerClient> in_client, SpeechRecognitionOptionsPtr in_options, BindRecognizerCallback callback) {}

void SpeechRecognitionContextProxy::BindWebSpeechRecognizer(
    ::mojo::PendingReceiver<::media::mojom::blink::SpeechRecognitionSession> in_session_receiver, ::mojo::PendingRemote<::media::mojom::blink::SpeechRecognitionSessionClient> in_session_client, ::mojo::PendingReceiver<::media::mojom::blink::SpeechRecognitionAudioForwarder> in_audio_forwarder, int32_t in_channel_count, int32_t in_sample_rate, SpeechRecognitionOptionsPtr in_options, bool in_continuous) {}
class SpeechRecognitionContext_BindRecognizer_ProxyToResponder : public ::mojo::internal::ProxyToResponder {};

bool SpeechRecognitionContext_BindRecognizer_ForwardToCallback::Accept(
    mojo::Message* message) {}

void SpeechRecognitionContext_BindRecognizer_ProxyToResponder::Run(
    bool in_is_multichannel_supported) {}

// static
bool SpeechRecognitionContextStubDispatch::Accept(
    SpeechRecognitionContext* impl,
    mojo::Message* message) {}

// static
bool SpeechRecognitionContextStubDispatch::AcceptWithResponder(
    SpeechRecognitionContext* impl,
    mojo::Message* message,
    std::unique_ptr<mojo::MessageReceiverWithStatus> responder) {}
namespace {
}  // namespace
static const mojo::internal::GenericValidationInfo kSpeechRecognitionContextValidationInfo[] =;

bool SpeechRecognitionContextRequestValidator::Accept(mojo::Message* message) {}

bool SpeechRecognitionContextResponseValidator::Accept(mojo::Message* message) {}
const char SpeechRecognitionRecognizer::Name_[] =;

SpeechRecognitionRecognizer::IPCStableHashFunction SpeechRecognitionRecognizer::MessageToMethodInfo_(mojo::Message& message) {}


const char* SpeechRecognitionRecognizer::MessageToMethodName_(mojo::Message& message) {}

#if !BUILDFLAG(IS_FUCHSIA)
uint32_t SpeechRecognitionRecognizer::SendAudioToSpeechRecognitionService_Sym::IPCStableHash() {}
uint32_t SpeechRecognitionRecognizer::MarkDone_Sym::IPCStableHash() {}
uint32_t SpeechRecognitionRecognizer::OnLanguageChanged_Sym::IPCStableHash() {}
uint32_t SpeechRecognitionRecognizer::OnMaskOffensiveWordsChanged_Sym::IPCStableHash() {}
# endif // !BUILDFLAG(IS_FUCHSIA)

SpeechRecognitionRecognizerProxy::SpeechRecognitionRecognizerProxy(mojo::MessageReceiverWithResponder* receiver)
    :{}

void SpeechRecognitionRecognizerProxy::SendAudioToSpeechRecognitionService(
    ::media::mojom::blink::AudioDataS16Ptr in_buffer) {}

void SpeechRecognitionRecognizerProxy::MarkDone(
    ) {}

void SpeechRecognitionRecognizerProxy::OnLanguageChanged(
    const WTF::String& in_language) {}

void SpeechRecognitionRecognizerProxy::OnMaskOffensiveWordsChanged(
    bool in_mask_offensive_words) {}

// static
bool SpeechRecognitionRecognizerStubDispatch::Accept(
    SpeechRecognitionRecognizer* impl,
    mojo::Message* message) {}

// static
bool SpeechRecognitionRecognizerStubDispatch::AcceptWithResponder(
    SpeechRecognitionRecognizer* impl,
    mojo::Message* message,
    std::unique_ptr<mojo::MessageReceiverWithStatus> responder) {}
namespace {
}  // namespace
static const mojo::internal::GenericValidationInfo kSpeechRecognitionRecognizerValidationInfo[] =;

bool SpeechRecognitionRecognizerRequestValidator::Accept(mojo::Message* message) {}

const char SpeechRecognitionRecognizerClient::Name_[] =;

SpeechRecognitionRecognizerClient::IPCStableHashFunction SpeechRecognitionRecognizerClient::MessageToMethodInfo_(mojo::Message& message) {}


const char* SpeechRecognitionRecognizerClient::MessageToMethodName_(mojo::Message& message) {}

#if !BUILDFLAG(IS_FUCHSIA)
uint32_t SpeechRecognitionRecognizerClient::OnSpeechRecognitionRecognitionEvent_Sym::IPCStableHash() {}
uint32_t SpeechRecognitionRecognizerClient::OnSpeechRecognitionStopped_Sym::IPCStableHash() {}
uint32_t SpeechRecognitionRecognizerClient::OnSpeechRecognitionError_Sym::IPCStableHash() {}
uint32_t SpeechRecognitionRecognizerClient::OnLanguageIdentificationEvent_Sym::IPCStableHash() {}
# endif // !BUILDFLAG(IS_FUCHSIA)

class SpeechRecognitionRecognizerClient_OnSpeechRecognitionRecognitionEvent_ForwardToCallback
    : public mojo::MessageReceiver {};

SpeechRecognitionRecognizerClientProxy::SpeechRecognitionRecognizerClientProxy(mojo::MessageReceiverWithResponder* receiver)
    :{}

void SpeechRecognitionRecognizerClientProxy::OnSpeechRecognitionRecognitionEvent(
    SpeechRecognitionResultPtr in_result, OnSpeechRecognitionRecognitionEventCallback callback) {}

void SpeechRecognitionRecognizerClientProxy::OnSpeechRecognitionStopped(
    ) {}

void SpeechRecognitionRecognizerClientProxy::OnSpeechRecognitionError(
    ) {}

void SpeechRecognitionRecognizerClientProxy::OnLanguageIdentificationEvent(
    LanguageIdentificationEventPtr in_event) {}
class SpeechRecognitionRecognizerClient_OnSpeechRecognitionRecognitionEvent_ProxyToResponder : public ::mojo::internal::ProxyToResponder {};

bool SpeechRecognitionRecognizerClient_OnSpeechRecognitionRecognitionEvent_ForwardToCallback::Accept(
    mojo::Message* message) {}

void SpeechRecognitionRecognizerClient_OnSpeechRecognitionRecognitionEvent_ProxyToResponder::Run(
    bool in_continue_recognition) {}

// static
bool SpeechRecognitionRecognizerClientStubDispatch::Accept(
    SpeechRecognitionRecognizerClient* impl,
    mojo::Message* message) {}

// static
bool SpeechRecognitionRecognizerClientStubDispatch::AcceptWithResponder(
    SpeechRecognitionRecognizerClient* impl,
    mojo::Message* message,
    std::unique_ptr<mojo::MessageReceiverWithStatus> responder) {}
namespace {
}  // namespace
static const mojo::internal::GenericValidationInfo kSpeechRecognitionRecognizerClientValidationInfo[] =;

bool SpeechRecognitionRecognizerClientRequestValidator::Accept(mojo::Message* message) {}

bool SpeechRecognitionRecognizerClientResponseValidator::Accept(mojo::Message* message) {}
const char SpeechRecognitionBrowserObserver::Name_[] =;

SpeechRecognitionBrowserObserver::IPCStableHashFunction SpeechRecognitionBrowserObserver::MessageToMethodInfo_(mojo::Message& message) {}


const char* SpeechRecognitionBrowserObserver::MessageToMethodName_(mojo::Message& message) {}

#if !BUILDFLAG(IS_FUCHSIA)
uint32_t SpeechRecognitionBrowserObserver::SpeechRecognitionAvailabilityChanged_Sym::IPCStableHash() {}
uint32_t SpeechRecognitionBrowserObserver::SpeechRecognitionLanguageChanged_Sym::IPCStableHash() {}
uint32_t SpeechRecognitionBrowserObserver::SpeechRecognitionMaskOffensiveWordsChanged_Sym::IPCStableHash() {}
# endif // !BUILDFLAG(IS_FUCHSIA)

SpeechRecognitionBrowserObserverProxy::SpeechRecognitionBrowserObserverProxy(mojo::MessageReceiverWithResponder* receiver)
    :{}

void SpeechRecognitionBrowserObserverProxy::SpeechRecognitionAvailabilityChanged(
    bool in_is_speech_recognition_available) {}

void SpeechRecognitionBrowserObserverProxy::SpeechRecognitionLanguageChanged(
    const WTF::String& in_language) {}

void SpeechRecognitionBrowserObserverProxy::SpeechRecognitionMaskOffensiveWordsChanged(
    bool in_mask_offensive_words) {}

// static
bool SpeechRecognitionBrowserObserverStubDispatch::Accept(
    SpeechRecognitionBrowserObserver* impl,
    mojo::Message* message) {}

// static
bool SpeechRecognitionBrowserObserverStubDispatch::AcceptWithResponder(
    SpeechRecognitionBrowserObserver* impl,
    mojo::Message* message,
    std::unique_ptr<mojo::MessageReceiverWithStatus> responder) {}
namespace {
}  // namespace
static const mojo::internal::GenericValidationInfo kSpeechRecognitionBrowserObserverValidationInfo[] =;

bool SpeechRecognitionBrowserObserverRequestValidator::Accept(mojo::Message* message) {}

const char SpeechRecognitionSurface::Name_[] =;

SpeechRecognitionSurface::IPCStableHashFunction SpeechRecognitionSurface::MessageToMethodInfo_(mojo::Message& message) {}


const char* SpeechRecognitionSurface::MessageToMethodName_(mojo::Message& message) {}

#if !BUILDFLAG(IS_FUCHSIA)
uint32_t SpeechRecognitionSurface::Activate_Sym::IPCStableHash() {}
uint32_t SpeechRecognitionSurface::GetBounds_Sym::IPCStableHash() {}
# endif // !BUILDFLAG(IS_FUCHSIA)

class SpeechRecognitionSurface_GetBounds_ForwardToCallback
    : public mojo::MessageReceiver {};

SpeechRecognitionSurfaceProxy::SpeechRecognitionSurfaceProxy(mojo::MessageReceiverWithResponder* receiver)
    :{}

void SpeechRecognitionSurfaceProxy::Activate(
    ) {}

void SpeechRecognitionSurfaceProxy::GetBounds(
    GetBoundsCallback callback) {}
class SpeechRecognitionSurface_GetBounds_ProxyToResponder : public ::mojo::internal::ProxyToResponder {};

bool SpeechRecognitionSurface_GetBounds_ForwardToCallback::Accept(
    mojo::Message* message) {}

void SpeechRecognitionSurface_GetBounds_ProxyToResponder::Run(
    const std::optional<::gfx::Rect>& in_bounds) {}

// static
bool SpeechRecognitionSurfaceStubDispatch::Accept(
    SpeechRecognitionSurface* impl,
    mojo::Message* message) {}

// static
bool SpeechRecognitionSurfaceStubDispatch::AcceptWithResponder(
    SpeechRecognitionSurface* impl,
    mojo::Message* message,
    std::unique_ptr<mojo::MessageReceiverWithStatus> responder) {}
namespace {
}  // namespace
static const mojo::internal::GenericValidationInfo kSpeechRecognitionSurfaceValidationInfo[] =;

bool SpeechRecognitionSurfaceRequestValidator::Accept(mojo::Message* message) {}

bool SpeechRecognitionSurfaceResponseValidator::Accept(mojo::Message* message) {}
const char SpeechRecognitionSurfaceClient::Name_[] =;

SpeechRecognitionSurfaceClient::IPCStableHashFunction SpeechRecognitionSurfaceClient::MessageToMethodInfo_(mojo::Message& message) {}


const char* SpeechRecognitionSurfaceClient::MessageToMethodName_(mojo::Message& message) {}

#if !BUILDFLAG(IS_FUCHSIA)
uint32_t SpeechRecognitionSurfaceClient::OnSessionEnded_Sym::IPCStableHash() {}
uint32_t SpeechRecognitionSurfaceClient::OnFullscreenToggled_Sym::IPCStableHash() {}
# endif // !BUILDFLAG(IS_FUCHSIA)

SpeechRecognitionSurfaceClientProxy::SpeechRecognitionSurfaceClientProxy(mojo::MessageReceiverWithResponder* receiver)
    :{}

void SpeechRecognitionSurfaceClientProxy::OnSessionEnded(
    ) {}

void SpeechRecognitionSurfaceClientProxy::OnFullscreenToggled(
    ) {}

// static
bool SpeechRecognitionSurfaceClientStubDispatch::Accept(
    SpeechRecognitionSurfaceClient* impl,
    mojo::Message* message) {}

// static
bool SpeechRecognitionSurfaceClientStubDispatch::AcceptWithResponder(
    SpeechRecognitionSurfaceClient* impl,
    mojo::Message* message,
    std::unique_ptr<mojo::MessageReceiverWithStatus> responder) {}
namespace {
}  // namespace
static const mojo::internal::GenericValidationInfo kSpeechRecognitionSurfaceClientValidationInfo[] =;

bool SpeechRecognitionSurfaceClientRequestValidator::Accept(mojo::Message* message) {}

const char SpeechRecognitionClientBrowserInterface::Name_[] =;

SpeechRecognitionClientBrowserInterface::IPCStableHashFunction SpeechRecognitionClientBrowserInterface::MessageToMethodInfo_(mojo::Message& message) {}


const char* SpeechRecognitionClientBrowserInterface::MessageToMethodName_(mojo::Message& message) {}

#if !BUILDFLAG(IS_FUCHSIA)
uint32_t SpeechRecognitionClientBrowserInterface::BindSpeechRecognitionBrowserObserver_Sym::IPCStableHash() {}
uint32_t SpeechRecognitionClientBrowserInterface::BindRecognizerToRemoteClient_Sym::IPCStableHash() {}
# endif // !BUILDFLAG(IS_FUCHSIA)

SpeechRecognitionClientBrowserInterfaceProxy::SpeechRecognitionClientBrowserInterfaceProxy(mojo::MessageReceiverWithResponder* receiver)
    :{}

void SpeechRecognitionClientBrowserInterfaceProxy::BindSpeechRecognitionBrowserObserver(
    ::mojo::PendingRemote<SpeechRecognitionBrowserObserver> in_observer) {}

void SpeechRecognitionClientBrowserInterfaceProxy::BindRecognizerToRemoteClient(
    ::mojo::PendingReceiver<SpeechRecognitionRecognizerClient> in_client, ::mojo::PendingReceiver<SpeechRecognitionSurfaceClient> in_surface_client, ::mojo::PendingRemote<SpeechRecognitionSurface> in_surface, SpeechRecognitionSurfaceMetadataPtr in_metadata) {}

// static
bool SpeechRecognitionClientBrowserInterfaceStubDispatch::Accept(
    SpeechRecognitionClientBrowserInterface* impl,
    mojo::Message* message) {}

// static
bool SpeechRecognitionClientBrowserInterfaceStubDispatch::AcceptWithResponder(
    SpeechRecognitionClientBrowserInterface* impl,
    mojo::Message* message,
    std::unique_ptr<mojo::MessageReceiverWithStatus> responder) {}
namespace {
}  // namespace
static const mojo::internal::GenericValidationInfo kSpeechRecognitionClientBrowserInterfaceValidationInfo[] =;

bool SpeechRecognitionClientBrowserInterfaceRequestValidator::Accept(mojo::Message* message) {}



}  // media::mojom::blink


namespace mojo {


// static
bool StructTraits<::media::mojom::blink::HypothesisParts::DataView, ::media::mojom::blink::HypothesisPartsPtr>::Read(
    ::media::mojom::blink::HypothesisParts::DataView input,
    ::media::mojom::blink::HypothesisPartsPtr* output) {}


// static
bool StructTraits<::media::mojom::blink::TimingInformation::DataView, ::media::mojom::blink::TimingInformationPtr>::Read(
    ::media::mojom::blink::TimingInformation::DataView input,
    ::media::mojom::blink::TimingInformationPtr* output) {}


// static
bool StructTraits<::media::mojom::blink::SpeechRecognitionResult::DataView, ::media::mojom::blink::SpeechRecognitionResultPtr>::Read(
    ::media::mojom::blink::SpeechRecognitionResult::DataView input,
    ::media::mojom::blink::SpeechRecognitionResultPtr* output) {}


// static
bool StructTraits<::media::mojom::blink::LanguageIdentificationEvent::DataView, ::media::mojom::blink::LanguageIdentificationEventPtr>::Read(
    ::media::mojom::blink::LanguageIdentificationEvent::DataView input,
    ::media::mojom::blink::LanguageIdentificationEventPtr* output) {}


// static
bool StructTraits<::media::mojom::blink::SpeechRecognitionSurfaceMetadata::DataView, ::media::mojom::blink::SpeechRecognitionSurfaceMetadataPtr>::Read(
    ::media::mojom::blink::SpeechRecognitionSurfaceMetadata::DataView input,
    ::media::mojom::blink::SpeechRecognitionSurfaceMetadataPtr* output) {}


// static
bool StructTraits<::media::mojom::blink::SpeechRecognitionOptions::DataView, ::media::mojom::blink::SpeechRecognitionOptionsPtr>::Read(
    ::media::mojom::blink::SpeechRecognitionOptions::DataView input,
    ::media::mojom::blink::SpeechRecognitionOptionsPtr* output) {}

}  // namespace mojo


// Symbols declared in the -test-utils.h header are defined here instead of a
// separate .cc file to save compile time.


namespace media::mojom::blink {


void SpeechRecognitionContextInterceptorForTesting::BindRecognizer(::mojo::PendingReceiver<SpeechRecognitionRecognizer> receiver, ::mojo::PendingRemote<SpeechRecognitionRecognizerClient> client, SpeechRecognitionOptionsPtr options, BindRecognizerCallback callback) {}
void SpeechRecognitionContextInterceptorForTesting::BindWebSpeechRecognizer(::mojo::PendingReceiver<::media::mojom::blink::SpeechRecognitionSession> session_receiver, ::mojo::PendingRemote<::media::mojom::blink::SpeechRecognitionSessionClient> session_client, ::mojo::PendingReceiver<::media::mojom::blink::SpeechRecognitionAudioForwarder> audio_forwarder, int32_t channel_count, int32_t sample_rate, SpeechRecognitionOptionsPtr options, bool continuous) {}
SpeechRecognitionContextAsyncWaiter::SpeechRecognitionContextAsyncWaiter(
    SpeechRecognitionContext* proxy) :{}

SpeechRecognitionContextAsyncWaiter::~SpeechRecognitionContextAsyncWaiter() = default;

void SpeechRecognitionContextAsyncWaiter::BindRecognizer(
    ::mojo::PendingReceiver<SpeechRecognitionRecognizer> receiver, ::mojo::PendingRemote<SpeechRecognitionRecognizerClient> client, SpeechRecognitionOptionsPtr options, bool* out_is_multichannel_supported) {}

bool SpeechRecognitionContextAsyncWaiter::BindRecognizer(
    ::mojo::PendingReceiver<SpeechRecognitionRecognizer> receiver, ::mojo::PendingRemote<SpeechRecognitionRecognizerClient> client, SpeechRecognitionOptionsPtr options) {}




void SpeechRecognitionRecognizerInterceptorForTesting::SendAudioToSpeechRecognitionService(::media::mojom::blink::AudioDataS16Ptr buffer) {}
void SpeechRecognitionRecognizerInterceptorForTesting::MarkDone() {}
void SpeechRecognitionRecognizerInterceptorForTesting::OnLanguageChanged(const WTF::String& language) {}
void SpeechRecognitionRecognizerInterceptorForTesting::OnMaskOffensiveWordsChanged(bool mask_offensive_words) {}
SpeechRecognitionRecognizerAsyncWaiter::SpeechRecognitionRecognizerAsyncWaiter(
    SpeechRecognitionRecognizer* proxy) :{}

SpeechRecognitionRecognizerAsyncWaiter::~SpeechRecognitionRecognizerAsyncWaiter() = default;




void SpeechRecognitionRecognizerClientInterceptorForTesting::OnSpeechRecognitionRecognitionEvent(SpeechRecognitionResultPtr result, OnSpeechRecognitionRecognitionEventCallback callback) {}
void SpeechRecognitionRecognizerClientInterceptorForTesting::OnSpeechRecognitionStopped() {}
void SpeechRecognitionRecognizerClientInterceptorForTesting::OnSpeechRecognitionError() {}
void SpeechRecognitionRecognizerClientInterceptorForTesting::OnLanguageIdentificationEvent(LanguageIdentificationEventPtr event) {}
SpeechRecognitionRecognizerClientAsyncWaiter::SpeechRecognitionRecognizerClientAsyncWaiter(
    SpeechRecognitionRecognizerClient* proxy) :{}

SpeechRecognitionRecognizerClientAsyncWaiter::~SpeechRecognitionRecognizerClientAsyncWaiter() = default;

void SpeechRecognitionRecognizerClientAsyncWaiter::OnSpeechRecognitionRecognitionEvent(
    SpeechRecognitionResultPtr result, bool* out_continue_recognition) {}

bool SpeechRecognitionRecognizerClientAsyncWaiter::OnSpeechRecognitionRecognitionEvent(
    SpeechRecognitionResultPtr result) {}




void SpeechRecognitionBrowserObserverInterceptorForTesting::SpeechRecognitionAvailabilityChanged(bool is_speech_recognition_available) {}
void SpeechRecognitionBrowserObserverInterceptorForTesting::SpeechRecognitionLanguageChanged(const WTF::String& language) {}
void SpeechRecognitionBrowserObserverInterceptorForTesting::SpeechRecognitionMaskOffensiveWordsChanged(bool mask_offensive_words) {}
SpeechRecognitionBrowserObserverAsyncWaiter::SpeechRecognitionBrowserObserverAsyncWaiter(
    SpeechRecognitionBrowserObserver* proxy) :{}

SpeechRecognitionBrowserObserverAsyncWaiter::~SpeechRecognitionBrowserObserverAsyncWaiter() = default;




void SpeechRecognitionSurfaceInterceptorForTesting::Activate() {}
void SpeechRecognitionSurfaceInterceptorForTesting::GetBounds(GetBoundsCallback callback) {}
SpeechRecognitionSurfaceAsyncWaiter::SpeechRecognitionSurfaceAsyncWaiter(
    SpeechRecognitionSurface* proxy) :{}

SpeechRecognitionSurfaceAsyncWaiter::~SpeechRecognitionSurfaceAsyncWaiter() = default;

void SpeechRecognitionSurfaceAsyncWaiter::GetBounds(
    std::optional<::gfx::Rect>* out_bounds) {}

std::optional<::gfx::Rect> SpeechRecognitionSurfaceAsyncWaiter::GetBounds(
    ) {}




void SpeechRecognitionSurfaceClientInterceptorForTesting::OnSessionEnded() {}
void SpeechRecognitionSurfaceClientInterceptorForTesting::OnFullscreenToggled() {}
SpeechRecognitionSurfaceClientAsyncWaiter::SpeechRecognitionSurfaceClientAsyncWaiter(
    SpeechRecognitionSurfaceClient* proxy) :{}

SpeechRecognitionSurfaceClientAsyncWaiter::~SpeechRecognitionSurfaceClientAsyncWaiter() = default;




void SpeechRecognitionClientBrowserInterfaceInterceptorForTesting::BindSpeechRecognitionBrowserObserver(::mojo::PendingRemote<SpeechRecognitionBrowserObserver> observer) {}
void SpeechRecognitionClientBrowserInterfaceInterceptorForTesting::BindRecognizerToRemoteClient(::mojo::PendingReceiver<SpeechRecognitionRecognizerClient> client, ::mojo::PendingReceiver<SpeechRecognitionSurfaceClient> surface_client, ::mojo::PendingRemote<SpeechRecognitionSurface> surface, SpeechRecognitionSurfaceMetadataPtr metadata) {}
SpeechRecognitionClientBrowserInterfaceAsyncWaiter::SpeechRecognitionClientBrowserInterfaceAsyncWaiter(
    SpeechRecognitionClientBrowserInterface* proxy) :{}

SpeechRecognitionClientBrowserInterfaceAsyncWaiter::~SpeechRecognitionClientBrowserInterfaceAsyncWaiter() = default;






}  // media::mojom::blink


#if defined(__clang__)
#pragma clang diagnostic pop
#endif