#if defined(__clang__)
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wunused-private-field"
#endif
#include "media/mojo/mojom/speech_recognition.mojom.h"
#include <math.h>
#include <stdint.h>
#include <utility>
#include "base/debug/alias.h"
#include "base/hash/md5_constexpr.h"
#include "base/run_loop.h"
#include "base/strings/string_number_conversions.h"
#include "base/task/thread_pool/thread_pool_instance.h"
#include "base/trace_event/trace_event.h"
#include "base/trace_event/typed_macros.h"
#include "mojo/public/cpp/bindings/features.h"
#include "mojo/public/cpp/bindings/lib/default_construct_tag_internal.h"
#include "mojo/public/cpp/bindings/lib/generated_code_util.h"
#include "mojo/public/cpp/bindings/lib/message_internal.h"
#include "mojo/public/cpp/bindings/lib/proxy_to_responder.h"
#include "mojo/public/cpp/bindings/lib/send_message_helper.h"
#include "mojo/public/cpp/bindings/lib/serialization_util.h"
#include "mojo/public/cpp/bindings/lib/unserialized_message_context.h"
#include "mojo/public/cpp/bindings/lib/validate_params.h"
#include "mojo/public/cpp/bindings/lib/validation_errors.h"
#include "mojo/public/cpp/bindings/mojo_buildflags.h"
#include "mojo/public/cpp/bindings/urgent_message_scope.h"
#include "mojo/public/interfaces/bindings/interface_control_messages.mojom.h"
#include "third_party/perfetto/include/perfetto/tracing/traced_value.h"
#include "media/mojo/mojom/speech_recognition.mojom-params-data.h"
#include "media/mojo/mojom/speech_recognition.mojom-shared-message-ids.h"
#include "media/mojo/mojom/speech_recognition.mojom-import-headers.h"
#include "media/mojo/mojom/speech_recognition.mojom-test-utils.h"
namespace media::mojom {
HypothesisParts::HypothesisParts()
: … { … }
HypothesisParts::HypothesisParts(
std::vector<std::string> text_in,
::base::TimeDelta hypothesis_part_offset_in)
: … { … }
HypothesisParts::~HypothesisParts() = default;
void HypothesisParts::WriteIntoTrace(
perfetto::TracedValue traced_context) const { … }
bool HypothesisParts::Validate(
const void* data,
mojo::internal::ValidationContext* validation_context) { … }
TimingInformation::TimingInformation()
: … { … }
TimingInformation::TimingInformation(
::base::TimeDelta audio_start_time_in,
::base::TimeDelta audio_end_time_in,
std::optional<std::vector<::media::HypothesisParts>> hypothesis_parts_in)
: … { … }
TimingInformation::~TimingInformation() = default;
void TimingInformation::WriteIntoTrace(
perfetto::TracedValue traced_context) const { … }
bool TimingInformation::Validate(
const void* data,
mojo::internal::ValidationContext* validation_context) { … }
SpeechRecognitionResult::SpeechRecognitionResult()
: … { … }
SpeechRecognitionResult::SpeechRecognitionResult(
const std::string& transcription_in,
bool is_final_in,
const std::optional<::media::TimingInformation>& timing_information_in)
: … { … }
SpeechRecognitionResult::~SpeechRecognitionResult() = default;
void SpeechRecognitionResult::WriteIntoTrace(
perfetto::TracedValue traced_context) const { … }
bool SpeechRecognitionResult::Validate(
const void* data,
mojo::internal::ValidationContext* validation_context) { … }
LanguageIdentificationEvent::LanguageIdentificationEvent()
: … { … }
LanguageIdentificationEvent::LanguageIdentificationEvent(
const std::string& language_in,
ConfidenceLevel confidence_level_in)
: … { … }
LanguageIdentificationEvent::LanguageIdentificationEvent(
const std::string& language_in,
ConfidenceLevel confidence_level_in,
std::optional<AsrSwitchResult> asr_switch_result_in)
: … { … }
LanguageIdentificationEvent::~LanguageIdentificationEvent() = default;
void LanguageIdentificationEvent::WriteIntoTrace(
perfetto::TracedValue traced_context) const { … }
bool LanguageIdentificationEvent::Validate(
const void* data,
mojo::internal::ValidationContext* validation_context) { … }
SpeechRecognitionSurfaceMetadata::SpeechRecognitionSurfaceMetadata()
: … { … }
SpeechRecognitionSurfaceMetadata::SpeechRecognitionSurfaceMetadata(
const ::base::UnguessableToken& session_id_in)
: … { … }
SpeechRecognitionSurfaceMetadata::~SpeechRecognitionSurfaceMetadata() = default;
void SpeechRecognitionSurfaceMetadata::WriteIntoTrace(
perfetto::TracedValue traced_context) const { … }
bool SpeechRecognitionSurfaceMetadata::Validate(
const void* data,
mojo::internal::ValidationContext* validation_context) { … }
SpeechRecognitionOptions::SpeechRecognitionOptions()
: … { … }
SpeechRecognitionOptions::SpeechRecognitionOptions(
SpeechRecognitionMode recognition_mode_in,
bool enable_formatting_in,
const std::optional<std::string>& language_in)
: … { … }
SpeechRecognitionOptions::SpeechRecognitionOptions(
SpeechRecognitionMode recognition_mode_in,
bool enable_formatting_in,
const std::optional<std::string>& language_in,
bool is_server_based_in,
RecognizerClientType recognizer_client_type_in)
: … { … }
SpeechRecognitionOptions::SpeechRecognitionOptions(
SpeechRecognitionMode recognition_mode_in,
bool enable_formatting_in,
const std::optional<std::string>& language_in,
bool is_server_based_in,
RecognizerClientType recognizer_client_type_in,
bool skip_continuously_empty_audio_in)
: … { … }
SpeechRecognitionOptions::SpeechRecognitionOptions(
SpeechRecognitionMode recognition_mode_in,
bool enable_formatting_in,
const std::optional<std::string>& language_in,
bool is_server_based_in,
RecognizerClientType recognizer_client_type_in,
bool skip_continuously_empty_audio_in,
const std::optional<std::string>& experiment_recognizer_routing_key_in)
: … { … }
SpeechRecognitionOptions::SpeechRecognitionOptions(
SpeechRecognitionMode recognition_mode_in,
bool enable_formatting_in,
const std::optional<std::string>& language_in,
bool is_server_based_in,
RecognizerClientType recognizer_client_type_in,
bool skip_continuously_empty_audio_in,
const std::optional<std::string>& experiment_recognizer_routing_key_in,
int32_t channel_count_in,
int32_t sample_rate_in)
: … { … }
SpeechRecognitionOptions::~SpeechRecognitionOptions() = default;
void SpeechRecognitionOptions::WriteIntoTrace(
perfetto::TracedValue traced_context) const { … }
bool SpeechRecognitionOptions::Validate(
const void* data,
mojo::internal::ValidationContext* validation_context) { … }
const char SpeechRecognitionContext::Name_[] = …;
SpeechRecognitionContext::IPCStableHashFunction SpeechRecognitionContext::MessageToMethodInfo_(mojo::Message& message) { … }
const char* SpeechRecognitionContext::MessageToMethodName_(mojo::Message& message) { … }
#if !BUILDFLAG(IS_FUCHSIA)
uint32_t SpeechRecognitionContext::BindRecognizer_Sym::IPCStableHash() { … }
uint32_t SpeechRecognitionContext::BindWebSpeechRecognizer_Sym::IPCStableHash() { … }
# endif
class SpeechRecognitionContext_BindRecognizer_ForwardToCallback
: public mojo::MessageReceiver { … };
SpeechRecognitionContextProxy::SpeechRecognitionContextProxy(mojo::MessageReceiverWithResponder* receiver)
: … { … }
void SpeechRecognitionContextProxy::BindRecognizer(
::mojo::PendingReceiver<SpeechRecognitionRecognizer> in_receiver, ::mojo::PendingRemote<SpeechRecognitionRecognizerClient> in_client, SpeechRecognitionOptionsPtr in_options, BindRecognizerCallback callback) { … }
void SpeechRecognitionContextProxy::BindWebSpeechRecognizer(
::mojo::PendingReceiver<::media::mojom::SpeechRecognitionSession> in_session_receiver, ::mojo::PendingRemote<::media::mojom::SpeechRecognitionSessionClient> in_session_client, ::mojo::PendingReceiver<::media::mojom::SpeechRecognitionAudioForwarder> in_audio_forwarder, int32_t in_channel_count, int32_t in_sample_rate, SpeechRecognitionOptionsPtr in_options, bool in_continuous) { … }
class SpeechRecognitionContext_BindRecognizer_ProxyToResponder : public ::mojo::internal::ProxyToResponder { … };
bool SpeechRecognitionContext_BindRecognizer_ForwardToCallback::Accept(
mojo::Message* message) { … }
void SpeechRecognitionContext_BindRecognizer_ProxyToResponder::Run(
bool in_is_multichannel_supported) { … }
bool SpeechRecognitionContextStubDispatch::Accept(
SpeechRecognitionContext* impl,
mojo::Message* message) { … }
bool SpeechRecognitionContextStubDispatch::AcceptWithResponder(
SpeechRecognitionContext* impl,
mojo::Message* message,
std::unique_ptr<mojo::MessageReceiverWithStatus> responder) { … }
namespace {
}
static const mojo::internal::GenericValidationInfo kSpeechRecognitionContextValidationInfo[] = …;
bool SpeechRecognitionContextRequestValidator::Accept(mojo::Message* message) { … }
bool SpeechRecognitionContextResponseValidator::Accept(mojo::Message* message) { … }
const char SpeechRecognitionRecognizer::Name_[] = …;
SpeechRecognitionRecognizer::IPCStableHashFunction SpeechRecognitionRecognizer::MessageToMethodInfo_(mojo::Message& message) { … }
const char* SpeechRecognitionRecognizer::MessageToMethodName_(mojo::Message& message) { … }
#if !BUILDFLAG(IS_FUCHSIA)
uint32_t SpeechRecognitionRecognizer::SendAudioToSpeechRecognitionService_Sym::IPCStableHash() { … }
uint32_t SpeechRecognitionRecognizer::MarkDone_Sym::IPCStableHash() { … }
uint32_t SpeechRecognitionRecognizer::OnLanguageChanged_Sym::IPCStableHash() { … }
uint32_t SpeechRecognitionRecognizer::OnMaskOffensiveWordsChanged_Sym::IPCStableHash() { … }
# endif
SpeechRecognitionRecognizerProxy::SpeechRecognitionRecognizerProxy(mojo::MessageReceiverWithResponder* receiver)
: … { … }
void SpeechRecognitionRecognizerProxy::SendAudioToSpeechRecognitionService(
::media::mojom::AudioDataS16Ptr in_buffer) { … }
void SpeechRecognitionRecognizerProxy::MarkDone(
) { … }
void SpeechRecognitionRecognizerProxy::OnLanguageChanged(
const std::string& in_language) { … }
void SpeechRecognitionRecognizerProxy::OnMaskOffensiveWordsChanged(
bool in_mask_offensive_words) { … }
bool SpeechRecognitionRecognizerStubDispatch::Accept(
SpeechRecognitionRecognizer* impl,
mojo::Message* message) { … }
bool SpeechRecognitionRecognizerStubDispatch::AcceptWithResponder(
SpeechRecognitionRecognizer* impl,
mojo::Message* message,
std::unique_ptr<mojo::MessageReceiverWithStatus> responder) { … }
namespace {
}
static const mojo::internal::GenericValidationInfo kSpeechRecognitionRecognizerValidationInfo[] = …;
bool SpeechRecognitionRecognizerRequestValidator::Accept(mojo::Message* message) { … }
const char SpeechRecognitionRecognizerClient::Name_[] = …;
SpeechRecognitionRecognizerClient::IPCStableHashFunction SpeechRecognitionRecognizerClient::MessageToMethodInfo_(mojo::Message& message) { … }
const char* SpeechRecognitionRecognizerClient::MessageToMethodName_(mojo::Message& message) { … }
#if !BUILDFLAG(IS_FUCHSIA)
uint32_t SpeechRecognitionRecognizerClient::OnSpeechRecognitionRecognitionEvent_Sym::IPCStableHash() { … }
uint32_t SpeechRecognitionRecognizerClient::OnSpeechRecognitionStopped_Sym::IPCStableHash() { … }
uint32_t SpeechRecognitionRecognizerClient::OnSpeechRecognitionError_Sym::IPCStableHash() { … }
uint32_t SpeechRecognitionRecognizerClient::OnLanguageIdentificationEvent_Sym::IPCStableHash() { … }
# endif
class SpeechRecognitionRecognizerClient_OnSpeechRecognitionRecognitionEvent_ForwardToCallback
: public mojo::MessageReceiver { … };
SpeechRecognitionRecognizerClientProxy::SpeechRecognitionRecognizerClientProxy(mojo::MessageReceiverWithResponder* receiver)
: … { … }
void SpeechRecognitionRecognizerClientProxy::OnSpeechRecognitionRecognitionEvent(
const ::media::SpeechRecognitionResult& in_result, OnSpeechRecognitionRecognitionEventCallback callback) { … }
void SpeechRecognitionRecognizerClientProxy::OnSpeechRecognitionStopped(
) { … }
void SpeechRecognitionRecognizerClientProxy::OnSpeechRecognitionError(
) { … }
void SpeechRecognitionRecognizerClientProxy::OnLanguageIdentificationEvent(
LanguageIdentificationEventPtr in_event) { … }
class SpeechRecognitionRecognizerClient_OnSpeechRecognitionRecognitionEvent_ProxyToResponder : public ::mojo::internal::ProxyToResponder { … };
bool SpeechRecognitionRecognizerClient_OnSpeechRecognitionRecognitionEvent_ForwardToCallback::Accept(
mojo::Message* message) { … }
void SpeechRecognitionRecognizerClient_OnSpeechRecognitionRecognitionEvent_ProxyToResponder::Run(
bool in_continue_recognition) { … }
bool SpeechRecognitionRecognizerClientStubDispatch::Accept(
SpeechRecognitionRecognizerClient* impl,
mojo::Message* message) { … }
bool SpeechRecognitionRecognizerClientStubDispatch::AcceptWithResponder(
SpeechRecognitionRecognizerClient* impl,
mojo::Message* message,
std::unique_ptr<mojo::MessageReceiverWithStatus> responder) { … }
namespace {
}
static const mojo::internal::GenericValidationInfo kSpeechRecognitionRecognizerClientValidationInfo[] = …;
bool SpeechRecognitionRecognizerClientRequestValidator::Accept(mojo::Message* message) { … }
bool SpeechRecognitionRecognizerClientResponseValidator::Accept(mojo::Message* message) { … }
const char SpeechRecognitionBrowserObserver::Name_[] = …;
SpeechRecognitionBrowserObserver::IPCStableHashFunction SpeechRecognitionBrowserObserver::MessageToMethodInfo_(mojo::Message& message) { … }
const char* SpeechRecognitionBrowserObserver::MessageToMethodName_(mojo::Message& message) { … }
#if !BUILDFLAG(IS_FUCHSIA)
uint32_t SpeechRecognitionBrowserObserver::SpeechRecognitionAvailabilityChanged_Sym::IPCStableHash() { … }
uint32_t SpeechRecognitionBrowserObserver::SpeechRecognitionLanguageChanged_Sym::IPCStableHash() { … }
uint32_t SpeechRecognitionBrowserObserver::SpeechRecognitionMaskOffensiveWordsChanged_Sym::IPCStableHash() { … }
# endif
SpeechRecognitionBrowserObserverProxy::SpeechRecognitionBrowserObserverProxy(mojo::MessageReceiverWithResponder* receiver)
: … { … }
void SpeechRecognitionBrowserObserverProxy::SpeechRecognitionAvailabilityChanged(
bool in_is_speech_recognition_available) { … }
void SpeechRecognitionBrowserObserverProxy::SpeechRecognitionLanguageChanged(
const std::string& in_language) { … }
void SpeechRecognitionBrowserObserverProxy::SpeechRecognitionMaskOffensiveWordsChanged(
bool in_mask_offensive_words) { … }
bool SpeechRecognitionBrowserObserverStubDispatch::Accept(
SpeechRecognitionBrowserObserver* impl,
mojo::Message* message) { … }
bool SpeechRecognitionBrowserObserverStubDispatch::AcceptWithResponder(
SpeechRecognitionBrowserObserver* impl,
mojo::Message* message,
std::unique_ptr<mojo::MessageReceiverWithStatus> responder) { … }
namespace {
}
static const mojo::internal::GenericValidationInfo kSpeechRecognitionBrowserObserverValidationInfo[] = …;
bool SpeechRecognitionBrowserObserverRequestValidator::Accept(mojo::Message* message) { … }
const char SpeechRecognitionSurface::Name_[] = …;
SpeechRecognitionSurface::IPCStableHashFunction SpeechRecognitionSurface::MessageToMethodInfo_(mojo::Message& message) { … }
const char* SpeechRecognitionSurface::MessageToMethodName_(mojo::Message& message) { … }
#if !BUILDFLAG(IS_FUCHSIA)
uint32_t SpeechRecognitionSurface::Activate_Sym::IPCStableHash() { … }
uint32_t SpeechRecognitionSurface::GetBounds_Sym::IPCStableHash() { … }
# endif
class SpeechRecognitionSurface_GetBounds_ForwardToCallback
: public mojo::MessageReceiver { … };
SpeechRecognitionSurfaceProxy::SpeechRecognitionSurfaceProxy(mojo::MessageReceiverWithResponder* receiver)
: … { … }
void SpeechRecognitionSurfaceProxy::Activate(
) { … }
void SpeechRecognitionSurfaceProxy::GetBounds(
GetBoundsCallback callback) { … }
class SpeechRecognitionSurface_GetBounds_ProxyToResponder : public ::mojo::internal::ProxyToResponder { … };
bool SpeechRecognitionSurface_GetBounds_ForwardToCallback::Accept(
mojo::Message* message) { … }
void SpeechRecognitionSurface_GetBounds_ProxyToResponder::Run(
const std::optional<::gfx::Rect>& in_bounds) { … }
bool SpeechRecognitionSurfaceStubDispatch::Accept(
SpeechRecognitionSurface* impl,
mojo::Message* message) { … }
bool SpeechRecognitionSurfaceStubDispatch::AcceptWithResponder(
SpeechRecognitionSurface* impl,
mojo::Message* message,
std::unique_ptr<mojo::MessageReceiverWithStatus> responder) { … }
namespace {
}
static const mojo::internal::GenericValidationInfo kSpeechRecognitionSurfaceValidationInfo[] = …;
bool SpeechRecognitionSurfaceRequestValidator::Accept(mojo::Message* message) { … }
bool SpeechRecognitionSurfaceResponseValidator::Accept(mojo::Message* message) { … }
const char SpeechRecognitionSurfaceClient::Name_[] = …;
SpeechRecognitionSurfaceClient::IPCStableHashFunction SpeechRecognitionSurfaceClient::MessageToMethodInfo_(mojo::Message& message) { … }
const char* SpeechRecognitionSurfaceClient::MessageToMethodName_(mojo::Message& message) { … }
#if !BUILDFLAG(IS_FUCHSIA)
uint32_t SpeechRecognitionSurfaceClient::OnSessionEnded_Sym::IPCStableHash() { … }
uint32_t SpeechRecognitionSurfaceClient::OnFullscreenToggled_Sym::IPCStableHash() { … }
# endif
SpeechRecognitionSurfaceClientProxy::SpeechRecognitionSurfaceClientProxy(mojo::MessageReceiverWithResponder* receiver)
: … { … }
void SpeechRecognitionSurfaceClientProxy::OnSessionEnded(
) { … }
void SpeechRecognitionSurfaceClientProxy::OnFullscreenToggled(
) { … }
bool SpeechRecognitionSurfaceClientStubDispatch::Accept(
SpeechRecognitionSurfaceClient* impl,
mojo::Message* message) { … }
bool SpeechRecognitionSurfaceClientStubDispatch::AcceptWithResponder(
SpeechRecognitionSurfaceClient* impl,
mojo::Message* message,
std::unique_ptr<mojo::MessageReceiverWithStatus> responder) { … }
namespace {
}
static const mojo::internal::GenericValidationInfo kSpeechRecognitionSurfaceClientValidationInfo[] = …;
bool SpeechRecognitionSurfaceClientRequestValidator::Accept(mojo::Message* message) { … }
const char SpeechRecognitionClientBrowserInterface::Name_[] = …;
SpeechRecognitionClientBrowserInterface::IPCStableHashFunction SpeechRecognitionClientBrowserInterface::MessageToMethodInfo_(mojo::Message& message) { … }
const char* SpeechRecognitionClientBrowserInterface::MessageToMethodName_(mojo::Message& message) { … }
#if !BUILDFLAG(IS_FUCHSIA)
uint32_t SpeechRecognitionClientBrowserInterface::BindSpeechRecognitionBrowserObserver_Sym::IPCStableHash() { … }
uint32_t SpeechRecognitionClientBrowserInterface::BindRecognizerToRemoteClient_Sym::IPCStableHash() { … }
# endif
SpeechRecognitionClientBrowserInterfaceProxy::SpeechRecognitionClientBrowserInterfaceProxy(mojo::MessageReceiverWithResponder* receiver)
: … { … }
void SpeechRecognitionClientBrowserInterfaceProxy::BindSpeechRecognitionBrowserObserver(
::mojo::PendingRemote<SpeechRecognitionBrowserObserver> in_observer) { … }
void SpeechRecognitionClientBrowserInterfaceProxy::BindRecognizerToRemoteClient(
::mojo::PendingReceiver<SpeechRecognitionRecognizerClient> in_client, ::mojo::PendingReceiver<SpeechRecognitionSurfaceClient> in_surface_client, ::mojo::PendingRemote<SpeechRecognitionSurface> in_surface, SpeechRecognitionSurfaceMetadataPtr in_metadata) { … }
bool SpeechRecognitionClientBrowserInterfaceStubDispatch::Accept(
SpeechRecognitionClientBrowserInterface* impl,
mojo::Message* message) { … }
bool SpeechRecognitionClientBrowserInterfaceStubDispatch::AcceptWithResponder(
SpeechRecognitionClientBrowserInterface* impl,
mojo::Message* message,
std::unique_ptr<mojo::MessageReceiverWithStatus> responder) { … }
namespace {
}
static const mojo::internal::GenericValidationInfo kSpeechRecognitionClientBrowserInterfaceValidationInfo[] = …;
bool SpeechRecognitionClientBrowserInterfaceRequestValidator::Accept(mojo::Message* message) { … }
}
namespace mojo {
bool StructTraits<::media::mojom::HypothesisParts::DataView, ::media::mojom::HypothesisPartsPtr>::Read(
::media::mojom::HypothesisParts::DataView input,
::media::mojom::HypothesisPartsPtr* output) { … }
bool StructTraits<::media::mojom::TimingInformation::DataView, ::media::mojom::TimingInformationPtr>::Read(
::media::mojom::TimingInformation::DataView input,
::media::mojom::TimingInformationPtr* output) { … }
bool StructTraits<::media::mojom::SpeechRecognitionResult::DataView, ::media::mojom::SpeechRecognitionResultPtr>::Read(
::media::mojom::SpeechRecognitionResult::DataView input,
::media::mojom::SpeechRecognitionResultPtr* output) { … }
bool StructTraits<::media::mojom::LanguageIdentificationEvent::DataView, ::media::mojom::LanguageIdentificationEventPtr>::Read(
::media::mojom::LanguageIdentificationEvent::DataView input,
::media::mojom::LanguageIdentificationEventPtr* output) { … }
bool StructTraits<::media::mojom::SpeechRecognitionSurfaceMetadata::DataView, ::media::mojom::SpeechRecognitionSurfaceMetadataPtr>::Read(
::media::mojom::SpeechRecognitionSurfaceMetadata::DataView input,
::media::mojom::SpeechRecognitionSurfaceMetadataPtr* output) { … }
bool StructTraits<::media::mojom::SpeechRecognitionOptions::DataView, ::media::mojom::SpeechRecognitionOptionsPtr>::Read(
::media::mojom::SpeechRecognitionOptions::DataView input,
::media::mojom::SpeechRecognitionOptionsPtr* output) { … }
}
namespace media::mojom {
void SpeechRecognitionContextInterceptorForTesting::BindRecognizer(::mojo::PendingReceiver<SpeechRecognitionRecognizer> receiver, ::mojo::PendingRemote<SpeechRecognitionRecognizerClient> client, SpeechRecognitionOptionsPtr options, BindRecognizerCallback callback) { … }
void SpeechRecognitionContextInterceptorForTesting::BindWebSpeechRecognizer(::mojo::PendingReceiver<::media::mojom::SpeechRecognitionSession> session_receiver, ::mojo::PendingRemote<::media::mojom::SpeechRecognitionSessionClient> session_client, ::mojo::PendingReceiver<::media::mojom::SpeechRecognitionAudioForwarder> audio_forwarder, int32_t channel_count, int32_t sample_rate, SpeechRecognitionOptionsPtr options, bool continuous) { … }
SpeechRecognitionContextAsyncWaiter::SpeechRecognitionContextAsyncWaiter(
SpeechRecognitionContext* proxy) : … { … }
SpeechRecognitionContextAsyncWaiter::~SpeechRecognitionContextAsyncWaiter() = default;
void SpeechRecognitionContextAsyncWaiter::BindRecognizer(
::mojo::PendingReceiver<SpeechRecognitionRecognizer> receiver, ::mojo::PendingRemote<SpeechRecognitionRecognizerClient> client, SpeechRecognitionOptionsPtr options, bool* out_is_multichannel_supported) { … }
bool SpeechRecognitionContextAsyncWaiter::BindRecognizer(
::mojo::PendingReceiver<SpeechRecognitionRecognizer> receiver, ::mojo::PendingRemote<SpeechRecognitionRecognizerClient> client, SpeechRecognitionOptionsPtr options) { … }
void SpeechRecognitionRecognizerInterceptorForTesting::SendAudioToSpeechRecognitionService(::media::mojom::AudioDataS16Ptr buffer) { … }
void SpeechRecognitionRecognizerInterceptorForTesting::MarkDone() { … }
void SpeechRecognitionRecognizerInterceptorForTesting::OnLanguageChanged(const std::string& language) { … }
void SpeechRecognitionRecognizerInterceptorForTesting::OnMaskOffensiveWordsChanged(bool mask_offensive_words) { … }
SpeechRecognitionRecognizerAsyncWaiter::SpeechRecognitionRecognizerAsyncWaiter(
SpeechRecognitionRecognizer* proxy) : … { … }
SpeechRecognitionRecognizerAsyncWaiter::~SpeechRecognitionRecognizerAsyncWaiter() = default;
void SpeechRecognitionRecognizerClientInterceptorForTesting::OnSpeechRecognitionRecognitionEvent(const ::media::SpeechRecognitionResult& result, OnSpeechRecognitionRecognitionEventCallback callback) { … }
void SpeechRecognitionRecognizerClientInterceptorForTesting::OnSpeechRecognitionStopped() { … }
void SpeechRecognitionRecognizerClientInterceptorForTesting::OnSpeechRecognitionError() { … }
void SpeechRecognitionRecognizerClientInterceptorForTesting::OnLanguageIdentificationEvent(LanguageIdentificationEventPtr event) { … }
SpeechRecognitionRecognizerClientAsyncWaiter::SpeechRecognitionRecognizerClientAsyncWaiter(
SpeechRecognitionRecognizerClient* proxy) : … { … }
SpeechRecognitionRecognizerClientAsyncWaiter::~SpeechRecognitionRecognizerClientAsyncWaiter() = default;
void SpeechRecognitionRecognizerClientAsyncWaiter::OnSpeechRecognitionRecognitionEvent(
const ::media::SpeechRecognitionResult& result, bool* out_continue_recognition) { … }
bool SpeechRecognitionRecognizerClientAsyncWaiter::OnSpeechRecognitionRecognitionEvent(
const ::media::SpeechRecognitionResult& result) { … }
void SpeechRecognitionBrowserObserverInterceptorForTesting::SpeechRecognitionAvailabilityChanged(bool is_speech_recognition_available) { … }
void SpeechRecognitionBrowserObserverInterceptorForTesting::SpeechRecognitionLanguageChanged(const std::string& language) { … }
void SpeechRecognitionBrowserObserverInterceptorForTesting::SpeechRecognitionMaskOffensiveWordsChanged(bool mask_offensive_words) { … }
SpeechRecognitionBrowserObserverAsyncWaiter::SpeechRecognitionBrowserObserverAsyncWaiter(
SpeechRecognitionBrowserObserver* proxy) : … { … }
SpeechRecognitionBrowserObserverAsyncWaiter::~SpeechRecognitionBrowserObserverAsyncWaiter() = default;
void SpeechRecognitionSurfaceInterceptorForTesting::Activate() { … }
void SpeechRecognitionSurfaceInterceptorForTesting::GetBounds(GetBoundsCallback callback) { … }
SpeechRecognitionSurfaceAsyncWaiter::SpeechRecognitionSurfaceAsyncWaiter(
SpeechRecognitionSurface* proxy) : … { … }
SpeechRecognitionSurfaceAsyncWaiter::~SpeechRecognitionSurfaceAsyncWaiter() = default;
void SpeechRecognitionSurfaceAsyncWaiter::GetBounds(
std::optional<::gfx::Rect>* out_bounds) { … }
std::optional<::gfx::Rect> SpeechRecognitionSurfaceAsyncWaiter::GetBounds(
) { … }
void SpeechRecognitionSurfaceClientInterceptorForTesting::OnSessionEnded() { … }
void SpeechRecognitionSurfaceClientInterceptorForTesting::OnFullscreenToggled() { … }
SpeechRecognitionSurfaceClientAsyncWaiter::SpeechRecognitionSurfaceClientAsyncWaiter(
SpeechRecognitionSurfaceClient* proxy) : … { … }
SpeechRecognitionSurfaceClientAsyncWaiter::~SpeechRecognitionSurfaceClientAsyncWaiter() = default;
void SpeechRecognitionClientBrowserInterfaceInterceptorForTesting::BindSpeechRecognitionBrowserObserver(::mojo::PendingRemote<SpeechRecognitionBrowserObserver> observer) { … }
void SpeechRecognitionClientBrowserInterfaceInterceptorForTesting::BindRecognizerToRemoteClient(::mojo::PendingReceiver<SpeechRecognitionRecognizerClient> client, ::mojo::PendingReceiver<SpeechRecognitionSurfaceClient> surface_client, ::mojo::PendingRemote<SpeechRecognitionSurface> surface, SpeechRecognitionSurfaceMetadataPtr metadata) { … }
SpeechRecognitionClientBrowserInterfaceAsyncWaiter::SpeechRecognitionClientBrowserInterfaceAsyncWaiter(
SpeechRecognitionClientBrowserInterface* proxy) : … { … }
SpeechRecognitionClientBrowserInterfaceAsyncWaiter::~SpeechRecognitionClientBrowserInterfaceAsyncWaiter() = default;
}
#if defined(__clang__)
#pragma clang diagnostic pop
#endif