chromium/third_party/blink/renderer/platform/peerconnection/rtc_video_encoder_test.cc

// Copyright 2016 The Chromium Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.

#ifdef UNSAFE_BUFFERS_BUILD
// TODO(crbug.com/351564777): Remove this and convert code to safer constructs.
#pragma allow_unsafe_buffers
#endif

#include "third_party/blink/renderer/platform/peerconnection/rtc_video_encoder.h"

#include <stdint.h>

#include "base/functional/bind.h"
#include "base/logging.h"
#include "base/memory/raw_ptr.h"
#include "base/memory/scoped_refptr.h"
#include "base/notreached.h"
#include "base/synchronization/waitable_event.h"
#include "base/task/sequenced_task_runner.h"
#include "base/task/single_thread_task_runner.h"
#include "base/test/scoped_feature_list.h"
#include "base/test/task_environment.h"
#include "base/threading/thread.h"
#include "base/time/time.h"
#include "build/build_config.h"
#include "build/chromeos_buildflags.h"
#include "media/base/media_log.h"
#include "media/base/media_switches.h"
#include "media/base/mock_filters.h"
#include "media/base/video_encoder_metrics_provider.h"
#include "media/capture/capture_switches.h"
#include "media/mojo/clients/mock_mojo_video_encoder_metrics_provider_factory.h"
#include "media/mojo/clients/mojo_video_encoder_metrics_provider.h"
#include "media/video/fake_gpu_memory_buffer.h"
#include "media/video/mock_gpu_video_accelerator_factories.h"
#include "media/video/mock_video_encode_accelerator.h"
#include "testing/gtest/include/gtest/gtest.h"
#include "third_party/blink/public/common/features.h"
#include "third_party/blink/renderer/platform/webrtc/webrtc_video_frame_adapter.h"
#include "third_party/blink/renderer/platform/wtf/cross_thread_functional.h"
#include "third_party/libyuv/include/libyuv/planar_functions.h"
#include "third_party/webrtc/api/video/i420_buffer.h"
#include "third_party/webrtc/api/video_codecs/video_encoder.h"
#include "third_party/webrtc/modules/video_coding/include/video_codec_interface.h"
#include "third_party/webrtc/rtc_base/ref_counted_object.h"
#include "third_party/webrtc/rtc_base/time_utils.h"
#if BUILDFLAG(RTC_USE_H265)
#include "third_party/blink/renderer/platform/peerconnection/h265_parameter_sets_tracker.h"
#endif

_;
AllOf;
AtLeast;
ByMove;
DoAll;
ElementsAre;
Field;
Invoke;
IsEmpty;
NotNull;
Property;
Return;
SaveArg;
SizeIs;
Values;
ValuesIn;
WithArgs;

SpatialLayer;

namespace blink {

namespace {

const int kInputFrameFillY =;
const int kInputFrameFillU =;
const int kInputFrameFillV =;
// 360p is a valid HW resolution (unless `kForcingSoftwareIncludes360` is
// enabled).
const uint16_t kInputFrameWidth =;
const uint16_t kInputFrameHeight =;
const uint16_t kStartBitrate =;

#if !BUILDFLAG(IS_ANDROID)
// Less than 360p should result in SW fallback.
const uint16_t kSoftwareFallbackInputFrameWidth =;
const uint16_t kSoftwareFallbackInputFrameHeight =;
#endif

constexpr size_t kDefaultEncodedPayloadSize =;

const webrtc::VideoEncoder::Capabilities kVideoEncoderCapabilities(
    /* loss_notification= */ false);
const webrtc::VideoEncoder::Settings
    kVideoEncoderSettings(kVideoEncoderCapabilities, 1, 12345);

class EncodedImageCallbackWrapper : public webrtc::EncodedImageCallback {};

class RTCVideoEncoderWrapper : public webrtc::VideoEncoder {};
}  // anonymous namespace

MATCHER_P3(CheckConfig,
           pixel_format,
           storage_type,
           drop_frame,
           "Check pixel format, storage type and drop frame in VEAConfig") {}

MATCHER_P(CheckStatusCode, code, "Check the code of media::EncoderStatusCode") {}

class RTCVideoEncoderTest {};

class RTCVideoEncoderInitTest
    : public RTCVideoEncoderTest,
      public ::testing::TestWithParam<webrtc::VideoCodecType> {};

TEST_P(RTCVideoEncoderInitTest, CreateAndInitSucceeds) {}

TEST_P(RTCVideoEncoderInitTest, RepeatedInitSucceeds) {}

// Software fallback for low resolution is not applicable on Android.
#if !BUILDFLAG(IS_ANDROID)

TEST_P(RTCVideoEncoderInitTest, SoftwareFallbackForLowResolution) {}

TEST_P(RTCVideoEncoderInitTest, SoftwareFallbackForLowResolutionIncludes360p) {}

#endif

TEST_P(RTCVideoEncoderInitTest, CreateAndInitSucceedsForTemporalLayer) {}

const webrtc::VideoCodecType kInitTestCases[] =;

INSTANTIATE_TEST_SUITE_P();

class RTCVideoEncoderEncodeTest : public RTCVideoEncoderTest,
                                  public ::testing::Test {};

class RTCVideoEncoderFrameSizeChangeTest : public RTCVideoEncoderEncodeTest {};

TEST_F(RTCVideoEncoderEncodeTest, H264SoftwareFallbackForOddSize) {}

TEST_F(RTCVideoEncoderEncodeTest, VP8CreateAndInitSucceedsForOddSize) {}

TEST_F(RTCVideoEncoderEncodeTest, VP9CreateAndInitSucceedsForOddSize) {}

TEST_F(RTCVideoEncoderEncodeTest, VP9SoftwareFallbackForVEANotSupport) {}

TEST_F(RTCVideoEncoderEncodeTest, ClearSetErrorRequestWhenInitNewEncoder) {}

// Checks that WEBRTC_VIDEO_CODEC_FALLBACK_SOFTWARE is returned when there is
// platform error.
TEST_F(RTCVideoEncoderEncodeTest, SoftwareFallbackAfterError) {}

TEST_F(RTCVideoEncoderEncodeTest, SoftwareFallbackOnBadEncodeInput) {}

TEST_F(RTCVideoEncoderEncodeTest, ZeroCopyEncodingIfFirstFrameisGMB) {}

TEST_F(RTCVideoEncoderEncodeTest, NonZeroCopyEncodingIfFirstFrameisShmem) {}

TEST_F(RTCVideoEncoderEncodeTest, EncodeScaledFrame) {}

TEST_F(RTCVideoEncoderEncodeTest, PreserveTimestamps) {}

TEST_F(RTCVideoEncoderEncodeTest, AcceptsRepeatedWrappedMediaVideoFrame) {}

TEST_F(RTCVideoEncoderEncodeTest, EncodeVP9TemporalLayer) {}

TEST_F(RTCVideoEncoderEncodeTest, EncodeWithDropFrame) {}

TEST_F(RTCVideoEncoderEncodeTest, InitializeWithTooHighBitrateFails) {}

#if defined(ARCH_CPU_X86_FAMILY) && BUILDFLAG(IS_CHROMEOS_ASH)
//  Currently we only test spatial SVC encoding on CrOS since only CrOS platform
//  support spatial SVC encoding.

// http://crbug.com/1226875
TEST_F(RTCVideoEncoderEncodeTest, EncodeSpatialLayer) {
  const webrtc::VideoCodecType codec_type = webrtc::kVideoCodecVP9;
  CreateEncoder(codec_type);
  constexpr size_t kNumSpatialLayers = 3;
  webrtc::VideoCodec sl_codec =
      GetSVCLayerCodec(webrtc::kVideoCodecVP9, kNumSpatialLayers);
  if (!InitializeOnFirstFrameEnabled()) {
    ExpectCreateInitAndDestroyVEA();
  }
  EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
            rtc_encoder_->InitEncode(&sl_codec, kVideoEncoderSettings));

  constexpr size_t kNumEncodeFrames = 5u;
  class CodecSpecificVerifier : public webrtc::EncodedImageCallback {
   public:
    explicit CodecSpecificVerifier(const webrtc::VideoCodec& codec)
        : codec_(codec) {}
    webrtc::EncodedImageCallback::Result OnEncodedImage(
        const webrtc::EncodedImage& encoded_image,
        const webrtc::CodecSpecificInfo* codec_specific_info) override {
      if (encoded_image._frameType == webrtc::VideoFrameType::kVideoFrameKey) {
        EXPECT_TRUE(codec_specific_info->codecSpecific.VP9.ss_data_available);
        const size_t num_spatial_layers = codec_->VP9().numberOfSpatialLayers;
        const auto& vp9_specific = codec_specific_info->codecSpecific.VP9;
        EXPECT_EQ(vp9_specific.num_spatial_layers, num_spatial_layers);
        for (size_t i = 0; i < num_spatial_layers; ++i) {
          EXPECT_EQ(vp9_specific.width[i], codec_->spatialLayers[i].width);
          EXPECT_EQ(vp9_specific.height[i], codec_->spatialLayers[i].height);
        }
      }

      if (encoded_image.RtpTimestamp() == kNumEncodeFrames - 1 &&
          codec_specific_info->end_of_picture) {
        waiter_.Signal();
      }

      if (encoded_image.TemporalIndex().has_value()) {
        EXPECT_EQ(encoded_image.TemporalIndex(),
                  codec_specific_info->codecSpecific.VP9.temporal_idx);
      }

      return Result(Result::OK);
    }

    void Wait() { waiter_.Wait(); }

   private:
    const raw_ref<const webrtc::VideoCodec> codec_;
    base::WaitableEvent waiter_;
  };
  CodecSpecificVerifier sl_verifier(sl_codec);
  rtc_encoder_->RegisterEncodeCompleteCallback(&sl_verifier);
  if (InitializeOnFirstFrameEnabled()) {
    ExpectCreateInitAndDestroyVEA();
  }
  for (size_t i = 0; i < kNumEncodeFrames; i++) {
    const rtc::scoped_refptr<webrtc::I420Buffer> buffer =
        webrtc::I420Buffer::Create(kInputFrameWidth, kInputFrameHeight);
    FillFrameBuffer(buffer);
    std::vector<webrtc::VideoFrameType> frame_types;
    if (i == 0)
      frame_types.emplace_back(webrtc::VideoFrameType::kVideoFrameKey);
    base::WaitableEvent event;
    if (i > 0) {
      EXPECT_CALL(*mock_vea_, UseOutputBitstreamBuffer(_))
          .Times(kNumSpatialLayers);
    }
    EXPECT_CALL(*mock_vea_, Encode)
        .WillOnce(DoAll(
            Invoke(this,
                   &RTCVideoEncoderTest::ReturnSVCLayerFrameWithVp9Metadata),
            [&event]() { event.Signal(); }));
    EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
              rtc_encoder_->Encode(webrtc::VideoFrame::Builder()
                                       .set_video_frame_buffer(buffer)
                                       .set_rtp_timestamp(i)
                                       .set_timestamp_us(i)
                                       .set_rotation(webrtc::kVideoRotation_0)
                                       .build(),
                                   &frame_types));
    event.Wait();
  }
  sl_verifier.Wait();
  RunUntilIdle();
}

TEST_F(RTCVideoEncoderEncodeTest, EncodeSpatialLayerWithDropFrame) {
  const webrtc::VideoCodecType codec_type = webrtc::kVideoCodecVP9;
  CreateEncoder(codec_type);
  constexpr size_t kNumSpatialLayers = 3;
  webrtc::VideoCodec sl_codec =
      GetSVCLayerCodec(webrtc::kVideoCodecVP9, kNumSpatialLayers);
  if (!InitializeOnFirstFrameEnabled()) {
    ExpectCreateInitAndDestroyVEA();
  }
  EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
            rtc_encoder_->InitEncode(&sl_codec, kVideoEncoderSettings));
  if (InitializeOnFirstFrameEnabled()) {
    ExpectCreateInitAndDestroyVEA();
  }

  constexpr static size_t kNumEncodeFrames = 5u;
  constexpr static size_t kDropIndices[] = {1, 3};
  class DropFrameVerifier : public webrtc::EncodedImageCallback {
   public:
    DropFrameVerifier() = default;
    ~DropFrameVerifier() override = default;

    void OnDroppedFrame(DropReason reason) override {
      AddResult(EncodeResult::kDropped);
    }

    webrtc::EncodedImageCallback::Result OnEncodedImage(
        const webrtc::EncodedImage& encoded_image,
        const webrtc::CodecSpecificInfo* codec_specific_info) override {
      if (codec_specific_info->end_of_picture) {
        AddResult(EncodeResult::kEncoded);
      }
      return Result(Result::OK);
    }

    void Verify() {
      base::AutoLock auto_lock(lock_);
      ASSERT_EQ(encode_results_.size(), kNumEncodeFrames);
      for (size_t i = 0; i < kNumEncodeFrames; ++i) {
        EncodeResult expected = EncodeResult::kEncoded;
        if (base::Contains(kDropIndices, i)) {
          expected = EncodeResult::kDropped;
        }
        EXPECT_EQ(encode_results_[i], expected);
      }
    }

   private:
    enum class EncodeResult {
      kEncoded,
      kDropped,
    };

    void AddResult(EncodeResult result) {
      base::AutoLock auto_lock(lock_);
      encode_results_.push_back(result);
    }

    base::Lock lock_;
    std::vector<EncodeResult> encode_results_ GUARDED_BY(lock_);
  };
  DropFrameVerifier dropframe_verifier;
  rtc_encoder_->RegisterEncodeCompleteCallback(&dropframe_verifier);
  for (size_t i = 0; i < kNumEncodeFrames; i++) {
    const rtc::scoped_refptr<webrtc::I420Buffer> buffer =
        webrtc::I420Buffer::Create(kInputFrameWidth, kInputFrameHeight);
    FillFrameBuffer(buffer);
    std::vector<webrtc::VideoFrameType> frame_types;
    if (i == 0) {
      frame_types.emplace_back(webrtc::VideoFrameType::kVideoFrameKey);
    }
    base::WaitableEvent event;
    if (i > 0) {
      EXPECT_CALL(*mock_vea_, UseOutputBitstreamBuffer(_))
          .Times(kNumSpatialLayers);
    }
    if (base::Contains(kDropIndices, i)) {
      EXPECT_CALL(*mock_vea_, Encode)
          .WillOnce(DoAll(
              Invoke(this,
                     &RTCVideoEncoderTest::ReturnSvcFramesThatShouldBeDropped),
              [&event]() { event.Signal(); }));
    } else {
      EXPECT_CALL(*mock_vea_, Encode)
          .WillOnce(DoAll(
              Invoke(this,
                     &RTCVideoEncoderTest::ReturnSVCLayerFrameWithVp9Metadata),
              [&event]() { event.Signal(); }));
    }
    EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
              rtc_encoder_->Encode(webrtc::VideoFrame::Builder()
                                       .set_video_frame_buffer(buffer)
                                       .set_rtp_timestamp(i)
                                       .set_timestamp_us(i)
                                       .set_rotation(webrtc::kVideoRotation_0)
                                       .build(),
                                   &frame_types));
    event.Wait();
  }
  RunUntilIdle();
  dropframe_verifier.Verify();
  rtc_encoder_.reset();
}

TEST_F(RTCVideoEncoderEncodeTest, CreateAndInitVP9ThreeLayerSvc) {
  webrtc::VideoCodec tl_codec = GetSVCLayerCodec(webrtc::kVideoCodecVP9,
                                                 /*num_spatial_layers=*/3);
  CreateEncoder(tl_codec.codecType);

  if (InitializeOnFirstFrameEnabled()) {
    EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
              rtc_encoder_->InitEncode(&tl_codec, kVideoEncoderSettings));
    const rtc::scoped_refptr<webrtc::I420Buffer> buffer =
        webrtc::I420Buffer::Create(kInputFrameWidth, kInputFrameHeight);
    FillFrameBuffer(buffer);
    std::vector<webrtc::VideoFrameType> frame_types;
    ExpectCreateInitAndDestroyVEA();
    EXPECT_CALL(*mock_vea_, Encode(_, _)).WillOnce(Return());
    EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
              rtc_encoder_->Encode(webrtc::VideoFrame::Builder()
                                       .set_video_frame_buffer(buffer)
                                       .set_rtp_timestamp(0)
                                       .set_timestamp_us(0)
                                       .set_rotation(webrtc::kVideoRotation_0)
                                       .build(),
                                   &frame_types));
    EXPECT_THAT(
        *config_,
        Field(&media::VideoEncodeAccelerator::Config::spatial_layers,
              ElementsAre(
                  AllOf(Field(&SpatialLayer::width, kInputFrameWidth / 4),
                        Field(&SpatialLayer::height, kInputFrameHeight / 4)),
                  AllOf(Field(&SpatialLayer::width, kInputFrameWidth / 2),
                        Field(&SpatialLayer::height, kInputFrameHeight / 2)),
                  AllOf(Field(&SpatialLayer::width, kInputFrameWidth),
                        Field(&SpatialLayer::height, kInputFrameHeight)))));
  } else {
    ExpectCreateInitAndDestroyVEA();
    EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
              rtc_encoder_->InitEncode(&tl_codec, kVideoEncoderSettings));
    EXPECT_THAT(
        *config_,
        Field(&media::VideoEncodeAccelerator::Config::spatial_layers,
              ElementsAre(
                  AllOf(Field(&SpatialLayer::width, kInputFrameWidth / 4),
                        Field(&SpatialLayer::height, kInputFrameHeight / 4)),
                  AllOf(Field(&SpatialLayer::width, kInputFrameWidth / 2),
                        Field(&SpatialLayer::height, kInputFrameHeight / 2)),
                  AllOf(Field(&SpatialLayer::width, kInputFrameWidth),
                        Field(&SpatialLayer::height, kInputFrameHeight)))));
  }
}

TEST_F(RTCVideoEncoderEncodeTest, CreateAndInitVP9SvcSinglecast) {
  webrtc::VideoCodec tl_codec = GetSVCLayerCodec(webrtc::kVideoCodecVP9,
                                                 /*num_spatial_layers=*/3);
  tl_codec.spatialLayers[1].active = false;
  tl_codec.spatialLayers[2].active = false;
  CreateEncoder(tl_codec.codecType);

  if (InitializeOnFirstFrameEnabled()) {
    EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
              rtc_encoder_->InitEncode(&tl_codec, kVideoEncoderSettings));
    const rtc::scoped_refptr<webrtc::I420Buffer> buffer =
        webrtc::I420Buffer::Create(kInputFrameWidth, kInputFrameHeight);
    FillFrameBuffer(buffer);
    std::vector<webrtc::VideoFrameType> frame_types;
    ExpectCreateInitAndDestroyVEA();
    EXPECT_CALL(*mock_vea_, Encode(_, _)).WillOnce(Return());
    EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
              rtc_encoder_->Encode(webrtc::VideoFrame::Builder()
                                       .set_video_frame_buffer(buffer)
                                       .set_rtp_timestamp(0)
                                       .set_timestamp_us(0)
                                       .set_rotation(webrtc::kVideoRotation_0)
                                       .build(),
                                   &frame_types));
    EXPECT_THAT(
        *config_,
        Field(&media::VideoEncodeAccelerator::Config::spatial_layers,
              ElementsAre(
                  AllOf(Field(&SpatialLayer::width, kInputFrameWidth / 4),
                        Field(&SpatialLayer::height, kInputFrameHeight / 4)))));
  } else {
    ExpectCreateInitAndDestroyVEA();
    EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
              rtc_encoder_->InitEncode(&tl_codec, kVideoEncoderSettings));
    EXPECT_THAT(
        *config_,
        Field(&media::VideoEncodeAccelerator::Config::spatial_layers,
              ElementsAre(
                  AllOf(Field(&SpatialLayer::width, kInputFrameWidth / 4),
                        Field(&SpatialLayer::height, kInputFrameHeight / 4)))));
  }
}

TEST_F(RTCVideoEncoderEncodeTest,
       CreateAndInitVP9SvcSinglecastWithoutTemporalLayers) {
  webrtc::VideoCodec tl_codec = GetSVCLayerCodec(webrtc::kVideoCodecVP9,
                                                 /*num_spatial_layers=*/3);
  tl_codec.spatialLayers[1].active = false;
  tl_codec.spatialLayers[2].active = false;
  tl_codec.spatialLayers[0].numberOfTemporalLayers = 1;
  tl_codec.VP9()->numberOfTemporalLayers = 1;
  CreateEncoder(tl_codec.codecType);

  if (InitializeOnFirstFrameEnabled()) {
    EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
              rtc_encoder_->InitEncode(&tl_codec, kVideoEncoderSettings));
    const rtc::scoped_refptr<webrtc::I420Buffer> buffer =
        webrtc::I420Buffer::Create(kInputFrameWidth, kInputFrameHeight);
    FillFrameBuffer(buffer);
    std::vector<webrtc::VideoFrameType> frame_types;
    ExpectCreateInitAndDestroyVEA();
    EXPECT_CALL(*mock_vea_, Encode(_, _)).WillOnce(Return());
    EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
              rtc_encoder_->Encode(webrtc::VideoFrame::Builder()
                                       .set_video_frame_buffer(buffer)
                                       .set_rtp_timestamp(0)
                                       .set_timestamp_us(0)
                                       .set_rotation(webrtc::kVideoRotation_0)
                                       .build(),
                                   &frame_types));
    EXPECT_THAT(config_->spatial_layers, IsEmpty());
  } else {
    ExpectCreateInitAndDestroyVEA();
    EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
              rtc_encoder_->InitEncode(&tl_codec, kVideoEncoderSettings));
    EXPECT_THAT(config_->spatial_layers, IsEmpty());
  }
}

TEST_F(RTCVideoEncoderEncodeTest,
       CreateAndInitVP9ThreeLayerSvcWithTopLayerInactive) {
  webrtc::VideoCodec tl_codec = GetSVCLayerCodec(webrtc::kVideoCodecVP9,
                                                 /*num_spatial_layers=*/3);
  tl_codec.spatialLayers[2].active = false;
  CreateEncoder(tl_codec.codecType);

  if (InitializeOnFirstFrameEnabled()) {
    EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
              rtc_encoder_->InitEncode(&tl_codec, kVideoEncoderSettings));
    const rtc::scoped_refptr<webrtc::I420Buffer> buffer =
        webrtc::I420Buffer::Create(kInputFrameWidth, kInputFrameHeight);
    FillFrameBuffer(buffer);
    std::vector<webrtc::VideoFrameType> frame_types;
    ExpectCreateInitAndDestroyVEA();
    EXPECT_CALL(*mock_vea_, Encode).WillOnce(Return());
    EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
              rtc_encoder_->Encode(webrtc::VideoFrame::Builder()
                                       .set_video_frame_buffer(buffer)
                                       .set_rtp_timestamp(0)
                                       .set_timestamp_us(0)
                                       .set_rotation(webrtc::kVideoRotation_0)
                                       .build(),
                                   &frame_types));
    EXPECT_THAT(
        *config_,
        Field(&media::VideoEncodeAccelerator::Config::spatial_layers,
              ElementsAre(
                  AllOf(Field(&SpatialLayer::width, kInputFrameWidth / 4),
                        Field(&SpatialLayer::height, kInputFrameHeight / 4)),
                  AllOf(Field(&SpatialLayer::width, kInputFrameWidth / 2),
                        Field(&SpatialLayer::height, kInputFrameHeight / 2)))));
    EXPECT_THAT(
        *config_,
        Field(&media::VideoEncodeAccelerator::Config::input_visible_size,
              AllOf(Property(&gfx::Size::width, kInputFrameWidth / 2),
                    Property(&gfx::Size::height, kInputFrameHeight / 2))));
  } else {
    ExpectCreateInitAndDestroyVEA();
    EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
              rtc_encoder_->InitEncode(&tl_codec, kVideoEncoderSettings));
    EXPECT_THAT(
        *config_,
        Field(&media::VideoEncodeAccelerator::Config::spatial_layers,
              ElementsAre(
                  AllOf(Field(&SpatialLayer::width, kInputFrameWidth / 4),
                        Field(&SpatialLayer::height, kInputFrameHeight / 4)),
                  AllOf(Field(&SpatialLayer::width, kInputFrameWidth / 2),
                        Field(&SpatialLayer::height, kInputFrameHeight / 2)))));
    EXPECT_THAT(
        *config_,
        Field(&media::VideoEncodeAccelerator::Config::input_visible_size,
              AllOf(Property(&gfx::Size::width, kInputFrameWidth / 2),
                    Property(&gfx::Size::height, kInputFrameHeight / 2))));
  }
}

TEST_F(RTCVideoEncoderEncodeTest, RaiseErrorOnMissingEndOfPicture) {
  webrtc::VideoCodec tl_codec = GetSVCLayerCodec(webrtc::kVideoCodecVP9,
                                                 /*num_spatial_layers=*/2);
  tl_codec.VP9()->numberOfTemporalLayers = 1;
  tl_codec.spatialLayers[0].numberOfTemporalLayers = 1;
  tl_codec.spatialLayers[1].numberOfTemporalLayers = 1;
  CreateEncoder(tl_codec.codecType);
  if (!InitializeOnFirstFrameEnabled()) {
    ExpectCreateInitAndDestroyVEA();
  }
  EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
            rtc_encoder_->InitEncode(&tl_codec, kVideoEncoderSettings));

  if (InitializeOnFirstFrameEnabled()) {
    ExpectCreateInitAndDestroyVEA();
  }
  EXPECT_CALL(*mock_vea_, Encode).WillOnce([&] {
    media::BitstreamBufferMetadata metadata(
        100u /* payload_size_bytes */,
        /*keyframe=*/true,
        /*timestamp=*/base::Milliseconds(0));
    metadata.key_frame = true;
    metadata.vp9.emplace();
    metadata.vp9->end_of_picture = false;
    metadata.vp9->spatial_idx = 0;
    metadata.vp9->spatial_layer_resolutions = ToResolutionList(tl_codec);
    ASSERT_EQ(metadata.vp9->spatial_layer_resolutions.size(), 2u);
    client_->BitstreamBufferReady(/*buffer_id=*/0, metadata);

    metadata.key_frame = false;

    metadata.vp9.emplace();
    // Incorrectly mark last spatial layer with eop = false.
    metadata.vp9->end_of_picture = false;
    metadata.vp9->spatial_idx = 1;
    metadata.vp9->reference_lower_spatial_layers = true;
    client_->BitstreamBufferReady(/*buffer_id=*/1, metadata);
  });
  const rtc::scoped_refptr<webrtc::I420Buffer> buffer =
      webrtc::I420Buffer::Create(kInputFrameWidth, kInputFrameHeight);
  FillFrameBuffer(buffer);
  std::vector<webrtc::VideoFrameType> frame_types{
      webrtc::VideoFrameType::kVideoFrameKey};

  // BitstreamBufferReady() is called after the first Encode() returns.
  // The error is reported on the second call.
  base::WaitableEvent error_waiter(
      base::WaitableEvent::ResetPolicy::MANUAL,
      base::WaitableEvent::InitialState::NOT_SIGNALED);
  rtc_encoder_->SetErrorWaiter(&error_waiter);

  EXPECT_EQ(rtc_encoder_->Encode(webrtc::VideoFrame::Builder()
                                     .set_video_frame_buffer(buffer)
                                     .set_rtp_timestamp(0)
                                     .set_timestamp_us(0)
                                     .set_rotation(webrtc::kVideoRotation_0)
                                     .build(),
                                 &frame_types),
            WEBRTC_VIDEO_CODEC_OK);
  error_waiter.Wait();
  EXPECT_EQ(rtc_encoder_->Encode(webrtc::VideoFrame::Builder()
                                     .set_video_frame_buffer(buffer)
                                     .set_rtp_timestamp(0)
                                     .set_timestamp_us(0)
                                     .set_rotation(webrtc::kVideoRotation_0)
                                     .build(),
                                 &frame_types),
            WEBRTC_VIDEO_CODEC_FALLBACK_SOFTWARE);
}

TEST_F(RTCVideoEncoderEncodeTest, RaiseErrorOnMismatchingResolutions) {
  webrtc::VideoCodec tl_codec = GetSVCLayerCodec(webrtc::kVideoCodecVP9,
                                                 /*num_spatial_layers=*/2);
  tl_codec.VP9()->numberOfTemporalLayers = 1;
  tl_codec.spatialLayers[0].numberOfTemporalLayers = 1;
  tl_codec.spatialLayers[1].numberOfTemporalLayers = 1;
  CreateEncoder(tl_codec.codecType);
  if (!InitializeOnFirstFrameEnabled()) {
    ExpectCreateInitAndDestroyVEA();
  }
  EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
            rtc_encoder_->InitEncode(&tl_codec, kVideoEncoderSettings));

  if (InitializeOnFirstFrameEnabled()) {
    ExpectCreateInitAndDestroyVEA();
  }
  EXPECT_CALL(*mock_vea_, Encode).WillOnce([&] {
    media::BitstreamBufferMetadata metadata(
        100u /* payload_size_bytes */,
        /*keyframe=*/true,
        /*timestamp=*/base::Milliseconds(0));
    metadata.key_frame = true;
    metadata.vp9.emplace();
    metadata.vp9->end_of_picture = false;
    metadata.vp9->spatial_layer_resolutions = {gfx::Size(
        tl_codec.spatialLayers[0].width, tl_codec.spatialLayers[0].height)};
    client_->BitstreamBufferReady(/*buffer_id=*/0, metadata);
  });

  const rtc::scoped_refptr<webrtc::I420Buffer> buffer =
      webrtc::I420Buffer::Create(kInputFrameWidth, kInputFrameHeight);
  FillFrameBuffer(buffer);
  std::vector<webrtc::VideoFrameType> frame_types{
      webrtc::VideoFrameType::kVideoFrameKey};

  // BitstreamBufferReady() is called after the first Encode() returns.
  // The error is reported on the second call.
  base::WaitableEvent error_waiter(
      base::WaitableEvent::ResetPolicy::MANUAL,
      base::WaitableEvent::InitialState::NOT_SIGNALED);
  rtc_encoder_->SetErrorWaiter(&error_waiter);
  EXPECT_EQ(rtc_encoder_->Encode(webrtc::VideoFrame::Builder()
                                     .set_video_frame_buffer(buffer)
                                     .set_rtp_timestamp(0)
                                     .set_timestamp_us(0)
                                     .set_rotation(webrtc::kVideoRotation_0)
                                     .build(),
                                 &frame_types),
            WEBRTC_VIDEO_CODEC_OK);
  error_waiter.Wait();
  EXPECT_EQ(rtc_encoder_->Encode(webrtc::VideoFrame::Builder()
                                     .set_video_frame_buffer(buffer)
                                     .set_rtp_timestamp(0)
                                     .set_timestamp_us(0)
                                     .set_rotation(webrtc::kVideoRotation_0)
                                     .build(),
                                 &frame_types),
            WEBRTC_VIDEO_CODEC_FALLBACK_SOFTWARE);
}

TEST_F(RTCVideoEncoderEncodeTest, SpatialLayerTurnedOffAndOnAgain) {
  webrtc::VideoCodec tl_codec = GetSVCLayerCodec(webrtc::kVideoCodecVP9,
                                                 /*num_spatial_layers=*/2);
  tl_codec.VP9()->numberOfTemporalLayers = 1;
  tl_codec.spatialLayers[0].numberOfTemporalLayers = 1;
  tl_codec.spatialLayers[1].numberOfTemporalLayers = 1;
  CreateEncoder(tl_codec.codecType);
  if (!InitializeOnFirstFrameEnabled()) {
    ExpectCreateInitAndDestroyVEA();
  }
  EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
            rtc_encoder_->InitEncode(&tl_codec, kVideoEncoderSettings));

  if (InitializeOnFirstFrameEnabled()) {
    ExpectCreateInitAndDestroyVEA();
  }
  // Start with two active spatial layers.
  EXPECT_CALL(*mock_vea_, Encode).WillOnce([&] {
    media::BitstreamBufferMetadata metadata(
        100u /* payload_size_bytes */,
        /*keyframe=*/true,
        /*timestamp=*/base::Milliseconds(0));
    metadata.key_frame = true;
    metadata.vp9.emplace();
    metadata.vp9->end_of_picture = false;
    metadata.vp9->spatial_idx = 0;
    metadata.vp9->spatial_layer_resolutions = ToResolutionList(tl_codec);
    ASSERT_EQ(metadata.vp9->spatial_layer_resolutions.size(), 2u);
    metadata.vp9->begin_active_spatial_layer_index = 0;
    metadata.vp9->end_active_spatial_layer_index = 2;
    client_->BitstreamBufferReady(/*buffer_id=*/0, metadata);

    metadata.key_frame = false;
    metadata.vp9.emplace();
    metadata.vp9->end_of_picture = true;
    metadata.vp9->spatial_idx = 1;
    metadata.vp9->reference_lower_spatial_layers = true;
    client_->BitstreamBufferReady(/*buffer_id=*/1, metadata);
  });
  const rtc::scoped_refptr<webrtc::I420Buffer> buffer =
      webrtc::I420Buffer::Create(kInputFrameWidth, kInputFrameHeight);
  FillFrameBuffer(buffer);
  std::vector<webrtc::VideoFrameType> frame_types{
      webrtc::VideoFrameType::kVideoFrameKey};
  EXPECT_EQ(rtc_encoder_->Encode(webrtc::VideoFrame::Builder()
                                     .set_video_frame_buffer(buffer)
                                     .set_rtp_timestamp(0)
                                     .set_timestamp_us(0)
                                     .set_rotation(webrtc::kVideoRotation_0)
                                     .build(),
                                 &frame_types),
            WEBRTC_VIDEO_CODEC_OK);
  RunUntilIdle();

  // Sind bitrate allocation disabling the top spatial layer.
  webrtc::VideoBitrateAllocation bitrate_allocation;
  bitrate_allocation.SetBitrate(0, 0, 100000);
  EXPECT_CALL(*mock_vea_, RequestEncodingParametersChange);
  rtc_encoder_->SetRates(webrtc::VideoEncoder::RateControlParameters(
      bitrate_allocation, tl_codec.maxFramerate));
  EXPECT_CALL(*mock_vea_, Encode).WillOnce([&] {
    media::BitstreamBufferMetadata metadata(
        100u /* payload_size_bytes */,
        /*keyframe=*/true,
        /*timestamp=*/base::Microseconds(1));
    metadata.vp9.emplace();
    metadata.vp9->spatial_idx = 0;
    metadata.vp9->inter_pic_predicted = true;
    metadata.vp9->spatial_layer_resolutions = {
        gfx::Size(tl_codec.spatialLayers[0].width,
                  tl_codec.spatialLayers[0].height),
    };
    metadata.vp9->begin_active_spatial_layer_index = 0;
    metadata.vp9->end_active_spatial_layer_index = 1;
    client_->BitstreamBufferReady(/*buffer_id=*/0, metadata);
  });
  frame_types[0] = webrtc::VideoFrameType::kVideoFrameDelta;
  EXPECT_EQ(rtc_encoder_->Encode(webrtc::VideoFrame::Builder()
                                     .set_video_frame_buffer(buffer)
                                     .set_rtp_timestamp(1)
                                     .set_timestamp_us(1)
                                     .set_rotation(webrtc::kVideoRotation_0)
                                     .build(),
                                 &frame_types),
            WEBRTC_VIDEO_CODEC_OK);
  RunUntilIdle();

  // Re-enable the top layer.
  bitrate_allocation.SetBitrate(1, 0, 500000);
  EXPECT_CALL(*mock_vea_, RequestEncodingParametersChange);
  rtc_encoder_->SetRates(webrtc::VideoEncoder::RateControlParameters(
      bitrate_allocation, tl_codec.maxFramerate));
  EXPECT_CALL(*mock_vea_, Encode).WillOnce([&] {
    media::BitstreamBufferMetadata metadata(
        100u /* payload_size_bytes */,
        /*keyframe=*/true,
        /*timestamp=*/base::Microseconds(2));
    metadata.vp9.emplace();
    metadata.vp9->end_of_picture = false;
    metadata.vp9->spatial_idx = 0;
    metadata.vp9->inter_pic_predicted = true;
    metadata.vp9->spatial_layer_resolutions = {
        gfx::Size(tl_codec.spatialLayers[0].width,
                  tl_codec.spatialLayers[0].height),
        gfx::Size(tl_codec.spatialLayers[1].width,
                  tl_codec.spatialLayers[1].height),
    };
    metadata.vp9->begin_active_spatial_layer_index = 0;
    metadata.vp9->end_active_spatial_layer_index = 2;
    client_->BitstreamBufferReady(/*buffer_id=*/0, metadata);

    metadata.key_frame = false;
    metadata.vp9.emplace();
    metadata.vp9->end_of_picture = true;
    metadata.vp9->spatial_idx = 1;
    metadata.vp9->inter_pic_predicted = true;
    client_->BitstreamBufferReady(/*buffer_id=*/1, metadata);
  });
  EXPECT_EQ(rtc_encoder_->Encode(webrtc::VideoFrame::Builder()
                                     .set_video_frame_buffer(buffer)
                                     .set_rtp_timestamp(2)
                                     .set_timestamp_us(2)
                                     .set_rotation(webrtc::kVideoRotation_0)
                                     .build(),
                                 &frame_types),
            WEBRTC_VIDEO_CODEC_OK);
  RunUntilIdle();
}

TEST_F(RTCVideoEncoderEncodeTest, LowerSpatialLayerTurnedOffAndOnAgain) {
  // This test generates 6 layer frames with following dependencies:
  // disable S0 and S2 layers
  //       |
  //       V
  // S2  O
  //     |
  // S1  O---O---O
  //     |       |
  // S0  O       O
  //           ^
  //           |
  // re-enable S0 layer

  class Vp9CodecSpecificInfoContainer : public webrtc::EncodedImageCallback {
   public:
    webrtc::EncodedImageCallback::Result OnEncodedImage(
        const webrtc::EncodedImage& encoded_image,
        const webrtc::CodecSpecificInfo* codec_specific_info) override {
      EXPECT_THAT(codec_specific_info, NotNull());
      if (codec_specific_info != nullptr) {
        EXPECT_EQ(codec_specific_info->codecType, webrtc::kVideoCodecVP9);
        infos_.push_back(codec_specific_info->codecSpecific.VP9);
      }
      if (encoded_image.TemporalIndex().has_value()) {
        EXPECT_EQ(encoded_image.TemporalIndex(),
                  codec_specific_info->codecSpecific.VP9.temporal_idx);
      }

      return Result(Result::OK);
    }

    const std::vector<webrtc::CodecSpecificInfoVP9>& infos() { return infos_; }

   private:
    std::vector<webrtc::CodecSpecificInfoVP9> infos_;
  };
  Vp9CodecSpecificInfoContainer encoded_callback;

  webrtc::VideoCodec tl_codec = GetSVCLayerCodec(webrtc::kVideoCodecVP9,
                                                 /*num_spatial_layers=*/3);
  tl_codec.VP9()->numberOfTemporalLayers = 1;
  tl_codec.spatialLayers[0].numberOfTemporalLayers = 1;
  tl_codec.spatialLayers[1].numberOfTemporalLayers = 1;
  tl_codec.spatialLayers[2].numberOfTemporalLayers = 1;
  CreateEncoder(tl_codec.codecType);
  if (!InitializeOnFirstFrameEnabled()) {
    ExpectCreateInitAndDestroyVEA();
  }
  EXPECT_EQ(rtc_encoder_->InitEncode(&tl_codec, kVideoEncoderSettings),
            WEBRTC_VIDEO_CODEC_OK);

  rtc_encoder_->RegisterEncodeCompleteCallback(&encoded_callback);

  if (InitializeOnFirstFrameEnabled()) {
    ExpectCreateInitAndDestroyVEA();
  }
  // Start with all three active spatial layers.
  EXPECT_CALL(*mock_vea_, Encode).WillOnce([&] {
    media::BitstreamBufferMetadata metadata(
        /*payload_size_bytes=*/100u,
        /*keyframe=*/true, /*timestamp=*/base::Milliseconds(0));
    metadata.key_frame = true;
    metadata.vp9.emplace();
    metadata.vp9->end_of_picture = false;
    metadata.vp9->spatial_idx = 0;
    metadata.vp9->spatial_layer_resolutions = ToResolutionList(tl_codec);
    ASSERT_THAT(metadata.vp9->spatial_layer_resolutions, SizeIs(3));
    metadata.vp9->begin_active_spatial_layer_index = 0;
    metadata.vp9->end_active_spatial_layer_index = 3;
    client_->BitstreamBufferReady(/*buffer_id=*/0, metadata);

    metadata.key_frame = false;
    metadata.vp9.emplace();
    metadata.vp9->end_of_picture = false;
    metadata.vp9->spatial_idx = 1;
    metadata.vp9->reference_lower_spatial_layers = true;
    client_->BitstreamBufferReady(/*buffer_id=*/1, metadata);

    metadata.key_frame = false;
    metadata.vp9.emplace();
    metadata.vp9->end_of_picture = true;
    metadata.vp9->spatial_idx = 2;
    metadata.vp9->reference_lower_spatial_layers = true;
    client_->BitstreamBufferReady(/*buffer_id=*/2, metadata);
  });
  const rtc::scoped_refptr<webrtc::I420Buffer> buffer =
      webrtc::I420Buffer::Create(kInputFrameWidth, kInputFrameHeight);
  FillFrameBuffer(buffer);
  std::vector<webrtc::VideoFrameType> frame_types{
      webrtc::VideoFrameType::kVideoFrameKey};
  EXPECT_EQ(rtc_encoder_->Encode(webrtc::VideoFrame::Builder()
                                     .set_video_frame_buffer(buffer)
                                     .set_rtp_timestamp(0)
                                     .set_timestamp_us(0)
                                     .set_rotation(webrtc::kVideoRotation_0)
                                     .build(),
                                 &frame_types),
            WEBRTC_VIDEO_CODEC_OK);
  RunUntilIdle();
  ASSERT_THAT(encoded_callback.infos(), SizeIs(3));
  EXPECT_EQ(encoded_callback.infos()[0].first_active_layer, 0u);
  EXPECT_EQ(encoded_callback.infos()[0].num_spatial_layers, 3u);
  EXPECT_EQ(encoded_callback.infos()[1].first_active_layer, 0u);
  EXPECT_EQ(encoded_callback.infos()[1].num_spatial_layers, 3u);
  EXPECT_EQ(encoded_callback.infos()[2].first_active_layer, 0u);
  EXPECT_EQ(encoded_callback.infos()[2].num_spatial_layers, 3u);

  // Send bitrate allocation disabling the first and the last spatial layers.
  webrtc::VideoBitrateAllocation bitrate_allocation;
  bitrate_allocation.SetBitrate(/*spatial_index=*/1, 0, 500'000);
  EXPECT_CALL(*mock_vea_, RequestEncodingParametersChange);
  rtc_encoder_->SetRates(webrtc::VideoEncoder::RateControlParameters(
      bitrate_allocation, tl_codec.maxFramerate));
  EXPECT_CALL(*mock_vea_, Encode).WillOnce([&] {
    media::BitstreamBufferMetadata metadata(
        /*payload_size_bytes=*/100u,
        /*keyframe=*/true, /*timestamp=*/base::Microseconds(1));
    metadata.vp9.emplace();
    metadata.vp9->spatial_idx = 0;
    metadata.vp9->reference_lower_spatial_layers = false;
    metadata.vp9->inter_pic_predicted = true;
    metadata.vp9->spatial_layer_resolutions = {
        gfx::Size(tl_codec.spatialLayers[1].width,
                  tl_codec.spatialLayers[1].height),
    };
    metadata.vp9->begin_active_spatial_layer_index = 1;
    metadata.vp9->end_active_spatial_layer_index = 2;
    client_->BitstreamBufferReady(/*buffer_id=*/1, metadata);
  });
  frame_types[0] = webrtc::VideoFrameType::kVideoFrameDelta;
  EXPECT_EQ(rtc_encoder_->Encode(webrtc::VideoFrame::Builder()
                                     .set_video_frame_buffer(buffer)
                                     .set_rtp_timestamp(1)
                                     .set_timestamp_us(1)
                                     .set_rotation(webrtc::kVideoRotation_0)
                                     .build(),
                                 &frame_types),
            WEBRTC_VIDEO_CODEC_OK);
  RunUntilIdle();
  ASSERT_THAT(encoded_callback.infos(), SizeIs(4));
  EXPECT_EQ(encoded_callback.infos()[3].first_active_layer, 1u);
  EXPECT_EQ(encoded_callback.infos()[3].num_spatial_layers, 2u);

  // Re-enable the bottom layer.
  bitrate_allocation.SetBitrate(0, 0, 100'000);
  EXPECT_CALL(*mock_vea_, RequestEncodingParametersChange);
  rtc_encoder_->SetRates(webrtc::VideoEncoder::RateControlParameters(
      bitrate_allocation, tl_codec.maxFramerate));
  EXPECT_CALL(*mock_vea_, Encode).WillOnce([&] {
    media::BitstreamBufferMetadata metadata(
        /*payload_size_bytes=*/100u,
        /*keyframe=*/true, /*timestamp=*/base::Microseconds(2));
    metadata.vp9.emplace();
    metadata.vp9->end_of_picture = false;
    metadata.vp9->spatial_idx = 0;
    metadata.vp9->inter_pic_predicted = false;
    metadata.vp9->spatial_layer_resolutions = {
        gfx::Size(tl_codec.spatialLayers[0].width,
                  tl_codec.spatialLayers[0].height),
        gfx::Size(tl_codec.spatialLayers[1].width,
                  tl_codec.spatialLayers[1].height),
    };
    metadata.vp9->begin_active_spatial_layer_index = 0;
    metadata.vp9->end_active_spatial_layer_index = 2;
    client_->BitstreamBufferReady(/*buffer_id=*/0, metadata);

    metadata.key_frame = false;
    metadata.vp9.emplace();
    metadata.vp9->end_of_picture = true;
    metadata.vp9->spatial_idx = 1;
    metadata.vp9->inter_pic_predicted = true;
    metadata.vp9->reference_lower_spatial_layers = true;
    client_->BitstreamBufferReady(/*buffer_id=*/1, metadata);
  });
  EXPECT_EQ(rtc_encoder_->Encode(webrtc::VideoFrame::Builder()
                                     .set_video_frame_buffer(buffer)
                                     .set_rtp_timestamp(2)
                                     .set_timestamp_us(2)
                                     .set_rotation(webrtc::kVideoRotation_0)
                                     .build(),
                                 &frame_types),
            WEBRTC_VIDEO_CODEC_OK);
  RunUntilIdle();
  ASSERT_THAT(encoded_callback.infos(), SizeIs(6));
  EXPECT_EQ(encoded_callback.infos()[4].first_active_layer, 0u);
  EXPECT_EQ(encoded_callback.infos()[4].num_spatial_layers, 2u);
  EXPECT_EQ(encoded_callback.infos()[5].first_active_layer, 0u);
  EXPECT_EQ(encoded_callback.infos()[5].num_spatial_layers, 2u);
}

TEST_F(RTCVideoEncoderFrameSizeChangeTest,
       FrameSizeChangeSupportedVP9SpatialLayer) {
  webrtc::VideoCodec tl_codec = GetSVCLayerCodec(webrtc::kVideoCodecVP9,
                                                 /*num_spatial_layers=*/3);
  CreateEncoder(tl_codec.codecType);
  ExpectCreateInitAndDestroyVEA();
  EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
            rtc_encoder_->InitEncode(&tl_codec, kVideoEncoderSettings));

  // report frame size change support
  media::VideoEncoderInfo info;
  info.supports_frame_size_change = true;
  encoder_thread_.task_runner()->PostTask(
      FROM_HERE,
      base::BindOnce(
          &media::VideoEncodeAccelerator::Client::NotifyEncoderInfoChange,
          base::Unretained(client_), info));

  size_t kNumEncodeFrames = 3u;
  for (size_t i = 0; i < kNumEncodeFrames; i++) {
    const rtc::scoped_refptr<webrtc::I420Buffer> buffer =
        webrtc::I420Buffer::Create(kInputFrameWidth, kInputFrameHeight);
    FillFrameBuffer(buffer);
    std::vector<webrtc::VideoFrameType> frame_types;
    if (i == 0) {
      frame_types.emplace_back(webrtc::VideoFrameType::kVideoFrameKey);
    }
    base::WaitableEvent event;
    EXPECT_CALL(*mock_vea_, Encode(_, _))
        .WillOnce(DoAll(
            Invoke(this,
                   &RTCVideoEncoderTest::ReturnSVCLayerFrameWithVp9Metadata),
            [&event]() { event.Signal(); }));

    EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
              rtc_encoder_->Encode(webrtc::VideoFrame::Builder()
                                       .set_video_frame_buffer(buffer)
                                       .set_rtp_timestamp(0)
                                       .set_timestamp_us(i)
                                       .set_rotation(webrtc::kVideoRotation_0)
                                       .build(),
                                   &frame_types));
    event.Wait();
  }
  EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK, rtc_encoder_->Release());

  // Change frame size.
  tl_codec.width *= 2;
  tl_codec.height *= 2;
  for (auto& sl : tl_codec.spatialLayers) {
    sl.width *= 2;
    sl.height *= 2;
  }
  EXPECT_CALL(*mock_vea_, Flush)
      .WillOnce(Invoke(this, &RTCVideoEncoderTest::FlushComplete));
  ExpectFrameSizeChange(gfx::Size(tl_codec.width, tl_codec.height));
  EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
            rtc_encoder_->InitEncode(&tl_codec, kVideoEncoderSettings));

  ResetSVCLayerFrameTimes();
  for (size_t i = 0; i < kNumEncodeFrames; i++) {
    const rtc::scoped_refptr<webrtc::I420Buffer> buffer =
        webrtc::I420Buffer::Create(tl_codec.width, tl_codec.height);
    FillFrameBuffer(buffer);
    std::vector<webrtc::VideoFrameType> frame_types;
    base::WaitableEvent event;
    if (i == 0) {
      frame_types.emplace_back(webrtc::VideoFrameType::kVideoFrameKey);
    }
    EXPECT_CALL(*mock_vea_, Encode(_, _))
        .WillOnce(DoAll(
            Invoke(this,
                   &RTCVideoEncoderTest::ReturnSVCLayerFrameWithVp9Metadata),
            [&event]() { event.Signal(); }));

    EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
              rtc_encoder_->Encode(webrtc::VideoFrame::Builder()
                                       .set_video_frame_buffer(buffer)
                                       .set_rtp_timestamp(0)
                                       .set_timestamp_us(i + 3)
                                       .set_rotation(webrtc::kVideoRotation_0)
                                       .build(),
                                   &frame_types));
    event.Wait();
  }
}

#endif  // defined(ARCH_CPU_X86_FAMILY) && BUILDFLAG(IS_CHROMEOS_ASH)

TEST_F(RTCVideoEncoderEncodeTest, MetricsProviderSetErrorIsCalledOnError) {}

TEST_F(RTCVideoEncoderEncodeTest, EncodeVp9FrameWithMetricsProvider) {}

TEST_F(RTCVideoEncoderEncodeTest, EncodeFrameWithAdapter) {}

TEST_F(RTCVideoEncoderEncodeTest, EncodedBufferLifetimeExceedsEncoderLifetime) {}

TEST_F(RTCVideoEncoderEncodeTest, EncodeAndDropWhenTooManyFramesInEncoder) {}

#if BUILDFLAG(RTC_USE_H265)
class FakeH265ParameterSetsTracker : public H265ParameterSetsTracker {
 public:
  FakeH265ParameterSetsTracker() = delete;
  explicit FakeH265ParameterSetsTracker(
      H265ParameterSetsTracker::PacketAction action)
      : action_(action) {}
  explicit FakeH265ParameterSetsTracker(rtc::ArrayView<const uint8_t> prefix)
      : action_(H265ParameterSetsTracker::PacketAction::kInsert),
        prefix_(prefix) {
    EXPECT_GT(prefix.size(), 0u);
  }

  FixedBitstream MaybeFixBitstream(
      rtc::ArrayView<const uint8_t> bitstream) override {
    FixedBitstream fixed;
    fixed.action = action_;
    if (prefix_.size() > 0) {
      fixed.bitstream =
          webrtc::EncodedImageBuffer::Create(bitstream.size() + prefix_.size());
      memcpy(fixed.bitstream->data(), prefix_.data(), prefix_.size());
      memcpy(fixed.bitstream->data() + prefix_.size(), bitstream.data(),
             bitstream.size());
    }
    return fixed;
  }

 private:
  H265ParameterSetsTracker::PacketAction action_;
  rtc::ArrayView<const uint8_t> prefix_;
};

TEST_F(RTCVideoEncoderEncodeTest, EncodeH265WithBitstreamFix) {
  class FixedBitstreamVerifier : public webrtc::EncodedImageCallback {
   public:
    explicit FixedBitstreamVerifier(rtc::ArrayView<const uint8_t> prefix,
                                    size_t encoded_image_size)
        : prefix_(prefix), encoded_image_size_(encoded_image_size) {}

    webrtc::EncodedImageCallback::Result OnEncodedImage(
        const webrtc::EncodedImage& encoded_image,
        const webrtc::CodecSpecificInfo* codec_specific_info) override {
      EXPECT_EQ(encoded_image.size(), encoded_image_size_ + prefix_.size());
      EXPECT_THAT(
          rtc::ArrayView<const uint8_t>(encoded_image.data(), prefix_.size()),
          ::testing::ElementsAreArray(prefix_));
      waiter_.Signal();
      return Result(Result::OK);
    }

    void Wait() { waiter_.Wait(); }

   private:
    base::WaitableEvent waiter_;
    rtc::ArrayView<const uint8_t> prefix_;
    size_t encoded_image_size_;
  };

  const webrtc::VideoCodecType codec_type = webrtc::kVideoCodecH265;
  CreateEncoder(codec_type);
  webrtc::VideoCodec codec = GetDefaultCodec();
  codec.codecType = codec_type;
  if (!InitializeOnFirstFrameEnabled()) {
    ExpectCreateInitAndDestroyVEA();
  }

  EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
            rtc_encoder_->InitEncode(&codec, kVideoEncoderSettings));

  uint8_t prefix[] = {0x90, 0x91, 0x92, 0x93};
  rtc::ArrayView<uint8_t> prefix_view =
      rtc::ArrayView<uint8_t>(prefix, sizeof(prefix));
  rtc_encoder_->SetH265ParameterSetsTracker(
      std::make_unique<FakeH265ParameterSetsTracker>(prefix_view));
  FixedBitstreamVerifier bitstream_verifier(prefix_view,
                                            kDefaultEncodedPayloadSize);
  rtc_encoder_->RegisterEncodeCompleteCallback(&bitstream_verifier);

  if (InitializeOnFirstFrameEnabled()) {
    ExpectCreateInitAndDestroyVEA();
  }
  EXPECT_CALL(*mock_vea_, Encode(_, _))
      .WillOnce(Invoke(this, &RTCVideoEncoderTest::ReturnFrameWithTimeStamp));

  const rtc::scoped_refptr<webrtc::I420Buffer> buffer =
      webrtc::I420Buffer::Create(kInputFrameWidth, kInputFrameHeight);
  FillFrameBuffer(buffer);
  std::vector<webrtc::VideoFrameType> frame_types;
  EXPECT_EQ(WEBRTC_VIDEO_CODEC_OK,
            rtc_encoder_->Encode(webrtc::VideoFrame::Builder()
                                     .set_video_frame_buffer(buffer)
                                     .set_timestamp_rtp(0)
                                     .set_timestamp_us(0)
                                     .set_rotation(webrtc::kVideoRotation_0)
                                     .build(),
                                 &frame_types));
  RunUntilIdle();
}
#endif

TEST_F(RTCVideoEncoderFrameSizeChangeTest,
       FrameSizeChangeSupportedReCreateEncoder) {}

TEST_F(RTCVideoEncoderFrameSizeChangeTest, FrameSizeChangeSupportedVP9) {}

TEST_F(RTCVideoEncoderFrameSizeChangeTest,
       FrameSizeChangeSupportedVP9TemporalLayer) {}

TEST_F(RTCVideoEncoderFrameSizeChangeTest, FrameSizeChangeSupported) {}

TEST_F(RTCVideoEncoderFrameSizeChangeTest,
       FrameSizeChangeSameSizeAfterSoftwareFallback) {}

TEST_F(RTCVideoEncoderFrameSizeChangeTest, FrameSizeChangeFlushFailure) {}

TEST_F(RTCVideoEncoderFrameSizeChangeTest, FrameSizeChangeFailure) {}

}  // namespace blink