chromium/media/gpu/vaapi/av1_vaapi_video_encoder_delegate.cc

// Copyright 2023 The Chromium Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.

#ifdef UNSAFE_BUFFERS_BUILD
// TODO(crbug.com/40285824): Remove this and convert code to safer constructs.
#pragma allow_unsafe_buffers
#endif

#include "media/gpu/vaapi/av1_vaapi_video_encoder_delegate.h"

#include <bit>
#include <utility>

#include "base/bits.h"
#include "base/logging.h"
#include "media/gpu/macros.h"
#include "media/gpu/vaapi/vaapi_common.h"
#include "media/gpu/vaapi/vaapi_wrapper.h"
#include "third_party/libaom/source/libaom/av1/ratectrl_rtc.h"
#include "third_party/libgav1/src/src/utils/constants.h"

namespace media {
namespace {

// Values from
// third_party/webrtc/modules/video_coding/codecs/av1/libaom_av1_encoder.cc
constexpr int kKFPeriod =;

// Quantization parameter. They are av1 ac/dc indices and their ranges are
// 0-255. These are based on WebRTC's defaults.
constexpr uint8_t kMinQP =;
constexpr uint8_t kMaxQP =;

// This needs to be 64, not 16, because of superblocks.
// TODO: Look into whether or not we can reduce alignment to 16.
constexpr gfx::Size kAV1AlignmentSize(64, 64);
constexpr int kCDEFStrengthDivisor =;
constexpr int kPrimaryReferenceNone =;

#define ARRAY_SIZE(x)

// Convert Qindex, whose range is 0-255, to the quantizer parameter used in
// libaom av1 rate control, whose range is 0-63.
// The table is generated from the table of
// ited from //third_party/libaom/source/libaom/av1/encoder/av1_quantize.c.
uint8_t QindexToQuantizer(uint8_t q_index) {}

// TODO: Do we need other reference modes?
enum AV1ReferenceMode {};

struct {} kAV1LevelSpecs[] =;

// Computes the "level" of the bitstream based on resolution and framerate.
// In the AV1 specifications, Annex A section A.3 provides a table for computing
// the appropriate "level" based on the samples (pixels) per second and
// resolution.
// Returns -1 when the given resolution and framerate are invalid.
int ComputeLevel(const gfx::Size& coded_size, uint32_t framerate) {}

scoped_refptr<AV1Picture> GetAV1Picture(
    const VaapiVideoEncoderDelegate::EncodeJob& job) {}

void DownscaleSegmentMap(const uint8_t* src_seg_map,
                         uint32_t src_seg_size,
                         size_t num_segments,
                         uint8_t* dst_seg_map,
                         uint32_t dst_seg_size,
                         const gfx::Size& coded_size) {}

AV1BitstreamBuilder::SequenceHeader FillAV1BuilderSequenceHeader(
    const gfx::Size& visible_size,
    int level_idx) {}

AV1BitstreamBuilder::FrameHeader FillAV1BuilderFrameHeader(
    const VAEncPictureParameterBufferAV1& pic_param,
    const AV1VaapiVideoEncoderDelegate::EncodeParams& current_params) {}

}  // namespace

AV1VaapiVideoEncoderDelegate::EncodeParams::EncodeParams()
    :{}

AV1VaapiVideoEncoderDelegate::AV1VaapiVideoEncoderDelegate(
    scoped_refptr<VaapiWrapper> vaapi_wrapper,
    base::RepeatingClosure error_cb)
    :{}

bool AV1VaapiVideoEncoderDelegate::Initialize(
    const VideoEncodeAccelerator::Config& config,
    const VaapiVideoEncoderDelegate::Config& ave_config) {}

AV1VaapiVideoEncoderDelegate::~AV1VaapiVideoEncoderDelegate() = default;

bool AV1VaapiVideoEncoderDelegate::UpdateRates(
    const VideoBitrateAllocation& bitrate_allocation,
    uint32_t framerate) {}

gfx::Size AV1VaapiVideoEncoderDelegate::GetCodedSize() const {}

size_t AV1VaapiVideoEncoderDelegate::GetMaxNumOfRefFrames() const {}

std::vector<gfx::Size> AV1VaapiVideoEncoderDelegate::GetSVCLayerResolutions() {}

BitstreamBufferMetadata AV1VaapiVideoEncoderDelegate::GetMetadata(
    const EncodeJob& encode_job,
    size_t payload_size) {}

// We produce a bitstream with the following OBUs in order:
// 1. Temporal Delimiter OBU (section 5.6) to signal new frame.
// 2. If we're transmitting keyframe, a sequence header OBU (section 5.5).
// 3. Frame OBU (section 5.10), which consists of a FrameHeader (5.9) and
//    compressed data.
VaapiVideoEncoderDelegate::PrepareEncodeJobResult
AV1VaapiVideoEncoderDelegate::PrepareEncodeJob(EncodeJob& encode_job) {}

void AV1VaapiVideoEncoderDelegate::BitrateControlUpdate(
    const BitstreamBufferMetadata& metadata) {}

// See section 5.6 of the AV1 specification.
bool AV1VaapiVideoEncoderDelegate::SubmitTemporalDelimiter(
    size_t& temporal_delimiter_obu_size) {}

bool AV1VaapiVideoEncoderDelegate::SubmitSequenceHeader(
    size_t& sequence_header_obu_size) {}

// TODO(b:274756117): Consider tuning these parameters.
bool AV1VaapiVideoEncoderDelegate::SubmitSequenceParam() {}

bool AV1VaapiVideoEncoderDelegate::SubmitSequenceHeaderOBU(
    size_t& sequence_header_obu_size) {}

bool AV1VaapiVideoEncoderDelegate::SubmitFrame(const EncodeJob& job,
                                               size_t frame_header_obu_offset) {}

// Fill the Picture Parameter struct.
// Sensible default values for most parameters taken from
// https://github.com/intel/libva-utils/blob/master/encode/av1encode.c
// TODO(b:274756117): Tune these parameters
bool AV1VaapiVideoEncoderDelegate::FillPictureParam(
    VAEncPictureParameterBufferAV1& pic_param,
    VAEncSegMapBufferAV1& segment_map_param,
    const EncodeJob& job,
    const AV1Picture& pic) {}

// See section 5.9 of the AV1 Specification
// AV1 is somewhat confusing in that there is both a standalone FrameHeader OBU,
// and a "sub-OBU" FrameHeader that's part of the Frame OBU. The former appears
// to be optional, while the latter does not.
bool AV1VaapiVideoEncoderDelegate::SubmitFrameOBU(
    const VAEncPictureParameterBufferAV1& pic_param,
    size_t& frame_header_obu_size_offset) {}

bool AV1VaapiVideoEncoderDelegate::SubmitPictureParam(
    const VAEncPictureParameterBufferAV1& pic_param) {}

bool AV1VaapiVideoEncoderDelegate::SubmitSegmentMap(
    const VAEncSegMapBufferAV1& segment_map_param) {}

bool AV1VaapiVideoEncoderDelegate::SubmitTileGroup() {}

bool AV1VaapiVideoEncoderDelegate::SubmitPackedData(
    const std::vector<uint8_t>& data) {}

}  // namespace media