chromium/media/webrtc/audio_processor.cc

// Copyright 2021 The Chromium Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.

#ifdef UNSAFE_BUFFERS_BUILD
// TODO(crbug.com/40285824): Remove this and convert code to safer constructs.
#pragma allow_unsafe_buffers
#endif

#include "media/webrtc/audio_processor.h"

#include <stddef.h>
#include <stdint.h>

#include <algorithm>
#include <array>
#include <limits>
#include <memory>
#include <optional>
#include <utility>

#include "base/feature_list.h"
#include "base/functional/callback_helpers.h"
#include "base/logging.h"
#include "base/strings/stringprintf.h"
#include "base/task/thread_pool.h"
#include "base/trace_event/trace_event.h"
#include "build/build_config.h"
#include "build/chromecast_buildflags.h"
#include "build/chromeos_buildflags.h"
#include "media/base/audio_fifo.h"
#include "media/base/audio_parameters.h"
#include "media/base/audio_timestamp_helper.h"
#include "media/base/channel_layout.h"
#include "media/base/limits.h"
#include "media/webrtc/constants.h"
#include "media/webrtc/helpers.h"
#include "media/webrtc/webrtc_features.h"
#include "third_party/webrtc/modules/audio_processing/include/audio_processing.h"
#include "third_party/webrtc_overrides/task_queue_factory.h"

namespace media {
namespace {
constexpr int kBuffersPerSecond =;  // 10 ms per buffer.

int GetCaptureBufferSize(bool need_webrtc_processing,
                         const AudioParameters device_format) {}

bool ApmNeedsPlayoutReference(const webrtc::AudioProcessing* apm,
                              const AudioProcessingSettings& settings) {}
}  // namespace

// Wraps AudioBus to provide access to the array of channel pointers, since this
// is the type webrtc::AudioProcessing deals in. The array is refreshed on every
// channel_ptrs() call, and will be valid until the underlying AudioBus pointers
// are changed, e.g. through calls to SetChannelData() or SwapChannels().
class AudioProcessorCaptureBus {};

// Wraps AudioFifo to provide a cleaner interface to AudioProcessor.
// It avoids the FIFO when the source and destination frames match. If
// |source_channels| is larger than |destination_channels|, only the first
// |destination_channels| are kept from the source.
// Does not support concurrent access.
class AudioProcessorCaptureFifo {};

// static
std::unique_ptr<AudioProcessor> AudioProcessor::Create(
    DeliverProcessedAudioCallback deliver_processed_audio_callback,
    LogCallback log_callback,
    const AudioProcessingSettings& settings,
    const media::AudioParameters& input_format,
    const media::AudioParameters& output_format) {}

AudioProcessor::AudioProcessor(
    DeliverProcessedAudioCallback deliver_processed_audio_callback,
    LogCallback log_callback,
    const media::AudioParameters& input_format,
    const media::AudioParameters& output_format,
    rtc::scoped_refptr<webrtc::AudioProcessing> webrtc_audio_processing,
    bool stereo_mirroring,
    bool needs_playout_reference)
    :{}

AudioProcessor::~AudioProcessor() {}

void AudioProcessor::ProcessCapturedAudio(const media::AudioBus& audio_source,
                                          base::TimeTicks audio_capture_time,
                                          int num_preferred_channels,
                                          double volume,
                                          bool key_pressed) {}

void AudioProcessor::SetOutputWillBeMuted(bool muted) {}

void AudioProcessor::OnStartDump(base::File dump_file) {}

void AudioProcessor::OnStopDump() {}

void AudioProcessor::OnPlayoutData(const AudioBus& audio_bus,
                                   int sample_rate,
                                   base::TimeDelta audio_delay) {}

void AudioProcessor::AnalyzePlayoutData(const AudioBus& audio_bus,
                                        int frame_delay) {}

webrtc::AudioProcessingStats AudioProcessor::GetStats() {}

std::optional<double> AudioProcessor::ProcessData(
    const float* const* process_ptrs,
    int process_frames,
    base::TimeDelta capture_delay,
    double volume,
    bool key_pressed,
    int num_preferred_channels,
    float* const* output_ptrs) {}

// Called on the owning sequence.
void AudioProcessor::SendLogMessage(const std::string& message) {}

std::optional<AudioParameters> AudioProcessor::ComputeInputFormat(
    const AudioParameters& device_format,
    const AudioProcessingSettings& audio_processing_settings) {}

// If WebRTC audio processing is used, the default output format is fixed to the
// native WebRTC processing format in order to avoid rebuffering and resampling.
// If not, then the input format is essentially preserved.
// static
AudioParameters AudioProcessor::GetDefaultOutputFormat(
    const AudioParameters& input_format,
    const AudioProcessingSettings& settings) {}

}  // namespace media