#if defined(__clang__)
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wunused-private-field"
#endif
#include "media/mojo/mojom/speech_recognition_service.mojom-blink.h"
#include <math.h>
#include <stdint.h>
#include <utility>
#include "base/debug/alias.h"
#include "base/hash/md5_constexpr.h"
#include "base/run_loop.h"
#include "base/strings/string_number_conversions.h"
#include "base/task/thread_pool/thread_pool_instance.h"
#include "base/trace_event/trace_event.h"
#include "base/trace_event/typed_macros.h"
#include "mojo/public/cpp/bindings/features.h"
#include "mojo/public/cpp/bindings/lib/default_construct_tag_internal.h"
#include "mojo/public/cpp/bindings/lib/generated_code_util.h"
#include "mojo/public/cpp/bindings/lib/message_internal.h"
#include "mojo/public/cpp/bindings/lib/proxy_to_responder.h"
#include "mojo/public/cpp/bindings/lib/send_message_helper.h"
#include "mojo/public/cpp/bindings/lib/serialization_util.h"
#include "mojo/public/cpp/bindings/lib/unserialized_message_context.h"
#include "mojo/public/cpp/bindings/lib/validate_params.h"
#include "mojo/public/cpp/bindings/lib/validation_errors.h"
#include "mojo/public/cpp/bindings/mojo_buildflags.h"
#include "mojo/public/cpp/bindings/urgent_message_scope.h"
#include "mojo/public/interfaces/bindings/interface_control_messages.mojom.h"
#include "third_party/perfetto/include/perfetto/tracing/traced_value.h"
#include "media/mojo/mojom/speech_recognition_service.mojom-params-data.h"
#include "media/mojo/mojom/speech_recognition_service.mojom-shared-message-ids.h"
#include "media/mojo/mojom/speech_recognition_service.mojom-blink-import-headers.h"
#include "media/mojo/mojom/speech_recognition_service.mojom-blink-test-utils.h"
#include "mojo/public/cpp/bindings/lib/wtf_serialization.h"
namespace media::mojom::blink {
const char AudioSourceSpeechRecognitionContext::Name_[] = …;
AudioSourceSpeechRecognitionContext::IPCStableHashFunction AudioSourceSpeechRecognitionContext::MessageToMethodInfo_(mojo::Message& message) { … }
const char* AudioSourceSpeechRecognitionContext::MessageToMethodName_(mojo::Message& message) { … }
#if !BUILDFLAG(IS_FUCHSIA)
uint32_t AudioSourceSpeechRecognitionContext::BindAudioSourceFetcher_Sym::IPCStableHash() { … }
# endif
class AudioSourceSpeechRecognitionContext_BindAudioSourceFetcher_ForwardToCallback
: public mojo::MessageReceiver { … };
AudioSourceSpeechRecognitionContextProxy::AudioSourceSpeechRecognitionContextProxy(mojo::MessageReceiverWithResponder* receiver)
: … { … }
void AudioSourceSpeechRecognitionContextProxy::BindAudioSourceFetcher(
::mojo::PendingReceiver<AudioSourceFetcher> in_fetcher_receiver, ::mojo::PendingRemote<::media::mojom::blink::SpeechRecognitionRecognizerClient> in_client, ::media::mojom::blink::SpeechRecognitionOptionsPtr in_options, BindAudioSourceFetcherCallback callback) { … }
class AudioSourceSpeechRecognitionContext_BindAudioSourceFetcher_ProxyToResponder : public ::mojo::internal::ProxyToResponder { … };
bool AudioSourceSpeechRecognitionContext_BindAudioSourceFetcher_ForwardToCallback::Accept(
mojo::Message* message) { … }
void AudioSourceSpeechRecognitionContext_BindAudioSourceFetcher_ProxyToResponder::Run(
bool in_is_multichannel_supported) { … }
bool AudioSourceSpeechRecognitionContextStubDispatch::Accept(
AudioSourceSpeechRecognitionContext* impl,
mojo::Message* message) { … }
bool AudioSourceSpeechRecognitionContextStubDispatch::AcceptWithResponder(
AudioSourceSpeechRecognitionContext* impl,
mojo::Message* message,
std::unique_ptr<mojo::MessageReceiverWithStatus> responder) { … }
namespace {
}
static const mojo::internal::GenericValidationInfo kAudioSourceSpeechRecognitionContextValidationInfo[] = …;
bool AudioSourceSpeechRecognitionContextRequestValidator::Accept(mojo::Message* message) { … }
bool AudioSourceSpeechRecognitionContextResponseValidator::Accept(mojo::Message* message) { … }
const char SpeechRecognitionService::Name_[] = …;
SpeechRecognitionService::IPCStableHashFunction SpeechRecognitionService::MessageToMethodInfo_(mojo::Message& message) { … }
const char* SpeechRecognitionService::MessageToMethodName_(mojo::Message& message) { … }
#if !BUILDFLAG(IS_FUCHSIA)
uint32_t SpeechRecognitionService::BindSpeechRecognitionContext_Sym::IPCStableHash() { … }
uint32_t SpeechRecognitionService::BindAudioSourceSpeechRecognitionContext_Sym::IPCStableHash() { … }
uint32_t SpeechRecognitionService::SetSodaPaths_Sym::IPCStableHash() { … }
uint32_t SpeechRecognitionService::SetSodaParams_Sym::IPCStableHash() { … }
uint32_t SpeechRecognitionService::SetSodaConfigPaths_Sym::IPCStableHash() { … }
# endif
SpeechRecognitionServiceProxy::SpeechRecognitionServiceProxy(mojo::MessageReceiverWithResponder* receiver)
: … { … }
void SpeechRecognitionServiceProxy::BindSpeechRecognitionContext(
::mojo::PendingReceiver<::media::mojom::blink::SpeechRecognitionContext> in_context) { … }
void SpeechRecognitionServiceProxy::BindAudioSourceSpeechRecognitionContext(
::mojo::PendingReceiver<AudioSourceSpeechRecognitionContext> in_context) { … }
void SpeechRecognitionServiceProxy::SetSodaPaths(
const ::base::FilePath& in_binary_path, const WTF::HashMap<WTF::String, ::base::FilePath>& in_config_paths, const WTF::String& in_primary_language_name) { … }
void SpeechRecognitionServiceProxy::SetSodaParams(
bool in_mask_offensive_words) { … }
void SpeechRecognitionServiceProxy::SetSodaConfigPaths(
const WTF::HashMap<WTF::String, ::base::FilePath>& in_config_paths) { … }
bool SpeechRecognitionServiceStubDispatch::Accept(
SpeechRecognitionService* impl,
mojo::Message* message) { … }
bool SpeechRecognitionServiceStubDispatch::AcceptWithResponder(
SpeechRecognitionService* impl,
mojo::Message* message,
std::unique_ptr<mojo::MessageReceiverWithStatus> responder) { … }
namespace {
}
static const mojo::internal::GenericValidationInfo kSpeechRecognitionServiceValidationInfo[] = …;
bool SpeechRecognitionServiceRequestValidator::Accept(mojo::Message* message) { … }
const char AudioSourceFetcher::Name_[] = …;
AudioSourceFetcher::IPCStableHashFunction AudioSourceFetcher::MessageToMethodInfo_(mojo::Message& message) { … }
const char* AudioSourceFetcher::MessageToMethodName_(mojo::Message& message) { … }
#if !BUILDFLAG(IS_FUCHSIA)
uint32_t AudioSourceFetcher::Start_Sym::IPCStableHash() { … }
uint32_t AudioSourceFetcher::Stop_Sym::IPCStableHash() { … }
# endif
AudioSourceFetcherProxy::AudioSourceFetcherProxy(mojo::MessageReceiverWithResponder* receiver)
: … { … }
void AudioSourceFetcherProxy::Start(
::mojo::PendingRemote<::media::mojom::blink::AudioStreamFactory> in_factory, const WTF::String& in_device_id, const ::media::AudioParameters& in_audio_parameters) { … }
void AudioSourceFetcherProxy::Stop(
) { … }
bool AudioSourceFetcherStubDispatch::Accept(
AudioSourceFetcher* impl,
mojo::Message* message) { … }
bool AudioSourceFetcherStubDispatch::AcceptWithResponder(
AudioSourceFetcher* impl,
mojo::Message* message,
std::unique_ptr<mojo::MessageReceiverWithStatus> responder) { … }
namespace {
}
static const mojo::internal::GenericValidationInfo kAudioSourceFetcherValidationInfo[] = …;
bool AudioSourceFetcherRequestValidator::Accept(mojo::Message* message) { … }
}
namespace mojo {
}
namespace media::mojom::blink {
void AudioSourceSpeechRecognitionContextInterceptorForTesting::BindAudioSourceFetcher(::mojo::PendingReceiver<AudioSourceFetcher> fetcher_receiver, ::mojo::PendingRemote<::media::mojom::blink::SpeechRecognitionRecognizerClient> client, ::media::mojom::blink::SpeechRecognitionOptionsPtr options, BindAudioSourceFetcherCallback callback) { … }
AudioSourceSpeechRecognitionContextAsyncWaiter::AudioSourceSpeechRecognitionContextAsyncWaiter(
AudioSourceSpeechRecognitionContext* proxy) : … { … }
AudioSourceSpeechRecognitionContextAsyncWaiter::~AudioSourceSpeechRecognitionContextAsyncWaiter() = default;
void AudioSourceSpeechRecognitionContextAsyncWaiter::BindAudioSourceFetcher(
::mojo::PendingReceiver<AudioSourceFetcher> fetcher_receiver, ::mojo::PendingRemote<::media::mojom::blink::SpeechRecognitionRecognizerClient> client, ::media::mojom::blink::SpeechRecognitionOptionsPtr options, bool* out_is_multichannel_supported) { … }
bool AudioSourceSpeechRecognitionContextAsyncWaiter::BindAudioSourceFetcher(
::mojo::PendingReceiver<AudioSourceFetcher> fetcher_receiver, ::mojo::PendingRemote<::media::mojom::blink::SpeechRecognitionRecognizerClient> client, ::media::mojom::blink::SpeechRecognitionOptionsPtr options) { … }
void SpeechRecognitionServiceInterceptorForTesting::BindSpeechRecognitionContext(::mojo::PendingReceiver<::media::mojom::blink::SpeechRecognitionContext> context) { … }
void SpeechRecognitionServiceInterceptorForTesting::BindAudioSourceSpeechRecognitionContext(::mojo::PendingReceiver<AudioSourceSpeechRecognitionContext> context) { … }
void SpeechRecognitionServiceInterceptorForTesting::SetSodaPaths(const ::base::FilePath& binary_path, const WTF::HashMap<WTF::String, ::base::FilePath>& config_paths, const WTF::String& primary_language_name) { … }
void SpeechRecognitionServiceInterceptorForTesting::SetSodaParams(bool mask_offensive_words) { … }
void SpeechRecognitionServiceInterceptorForTesting::SetSodaConfigPaths(const WTF::HashMap<WTF::String, ::base::FilePath>& config_paths) { … }
SpeechRecognitionServiceAsyncWaiter::SpeechRecognitionServiceAsyncWaiter(
SpeechRecognitionService* proxy) : … { … }
SpeechRecognitionServiceAsyncWaiter::~SpeechRecognitionServiceAsyncWaiter() = default;
void AudioSourceFetcherInterceptorForTesting::Start(::mojo::PendingRemote<::media::mojom::blink::AudioStreamFactory> factory, const WTF::String& device_id, const ::media::AudioParameters& audio_parameters) { … }
void AudioSourceFetcherInterceptorForTesting::Stop() { … }
AudioSourceFetcherAsyncWaiter::AudioSourceFetcherAsyncWaiter(
AudioSourceFetcher* proxy) : … { … }
AudioSourceFetcherAsyncWaiter::~AudioSourceFetcherAsyncWaiter() = default;
}
#if defined(__clang__)
#pragma clang diagnostic pop
#endif