chromium/media/gpu/chromeos/image_processor_test.cc

// Copyright 2019 The Chromium Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.

#include "media/gpu/chromeos/image_processor.h"

#include <sys/mman.h>
#include <sys/poll.h>

#include <memory>
#include <string>
#include <tuple>
#include <vector>

#include "base/bits.h"
#include "base/command_line.h"
#include "base/files/file_path.h"
#include "base/files/file_util.h"
#include "base/hash/md5.h"
#include "base/rand_util.h"
#include "base/run_loop.h"
#include "base/test/launcher/unit_test_launcher.h"
#include "base/test/test_suite.h"
#include "build/build_config.h"
#include "build/chromeos_buildflags.h"
#include "components/viz/common/resources/shared_image_format.h"
#include "gpu/command_buffer/common/mailbox.h"
#include "gpu/command_buffer/common/shared_image_usage.h"
#include "gpu/command_buffer/service/shared_context_state.h"
#include "gpu/command_buffer/service/shared_image/shared_image_backing.h"
#include "gpu/command_buffer/service/shared_image/shared_image_factory.h"
#include "gpu/command_buffer/service/shared_image/shared_image_manager.h"
#include "gpu/command_buffer/service/shared_image/shared_image_representation.h"
#include "gpu/config/gpu_feature_info.h"
#include "media/base/video_frame.h"
#include "media/base/video_frame_layout.h"
#include "media/base/video_types.h"
#include "media/gpu/chromeos/chromeos_compressed_gpu_memory_buffer_video_frame_utils.h"
#include "media/gpu/chromeos/fourcc.h"
#include "media/gpu/chromeos/gl_image_processor_backend.h"
#include "media/gpu/chromeos/image_processor_backend.h"
#include "media/gpu/chromeos/image_processor_factory.h"
#include "media/gpu/chromeos/libyuv_image_processor_backend.h"
#include "media/gpu/chromeos/platform_video_frame_utils.h"
#include "media/gpu/chromeos/vulkan_overlay_adaptor.h"
#include "media/gpu/test/image.h"
#include "media/gpu/test/image_processor/image_processor_client.h"
#include "media/gpu/test/image_quality_metrics.h"
#include "media/gpu/test/video_frame_file_writer.h"
#include "media/gpu/test/video_frame_helpers.h"
#include "media/gpu/test/video_frame_validator.h"
#include "media/gpu/test/video_test_environment.h"
#if BUILDFLAG(USE_V4L2_CODEC)
#include "media/gpu/v4l2/v4l2_device.h"
#include "media/gpu/v4l2/v4l2_image_processor_backend.h"
#endif
#if BUILDFLAG(USE_VAAPI)
#include "media/gpu/vaapi/vaapi_image_processor_backend.h"
#endif
#include "media/gpu/video_frame_mapper.h"
#include "media/gpu/video_frame_mapper_factory.h"
#include "mojo/core/embedder/embedder.h"
#include "testing/gtest/include/gtest/gtest.h"
#include "third_party/libyuv/include/libyuv.h"
#include "ui/gfx/geometry/rect.h"
#include "ui/gfx/geometry/rect_f.h"
#include "ui/gfx/geometry/size.h"
#include "ui/gfx/overlay_transform.h"
#include "ui/gl/gl_bindings.h"
#include "ui/gl/gl_context.h"
#include "ui/gl/gl_surface.h"
#include "ui/gl/gl_utils.h"
#include "ui/gl/init/gl_factory.h"
#include "ui/gl/test/gl_surface_test_support.h"

#if BUILDFLAG(IS_OZONE)
#include "ui/ozone/public/ozone_platform.h"
#endif

#define MM21_TILE_WIDTH
#define MM21_TILE_HEIGHT

namespace media {
namespace {

const char* usage_msg =;

const char* help_msg =
#endif  // BUILDFLAG(USE_VAAPI)
    ;

bool g_save_images =;
base::FilePath g_source_directory =;

// BackendType defines an enum for specifying a particular backend.
enum class BackendType {};

const char* ToString(BackendType backend) {}

// Creates a CreateBackendCB for the specified BackendType. If backend is not
// set, then returns std::nullopt.
std::optional<ImageProcessor::CreateBackendCB> GetCreateBackendCB(
    std::optional<BackendType> backend) {}

std::optional<BackendType> g_backend_type;

base::FilePath BuildSourceFilePath(const base::FilePath& filename) {}

media::test::VideoTestEnvironment* g_env;

// Files for pixel format conversion test.
const base::FilePath::CharType* kNV12Image =);
const base::FilePath::CharType* kYV12Image =);
const base::FilePath::CharType* kI420Image =);
const base::FilePath::CharType* kI422Image =);
const base::FilePath::CharType* kYUYVImage =);

// Files for scaling test.
const base::FilePath::CharType* kNV12Image720P =);
const base::FilePath::CharType* kNV12Image360P =);
const base::FilePath::CharType* kNV12Image270P =);
const base::FilePath::CharType* kNV12Image180P =);
const base::FilePath::CharType* kNV12Image360PIn480P =);
const base::FilePath::CharType* kI422Image360P =);
const base::FilePath::CharType* kYUYVImage360P =);
const base::FilePath::CharType* kI420Image360P =);
const base::FilePath::CharType* kI420Image270P =);

enum class YuvSubsampling {};

YuvSubsampling ToYuvSubsampling(VideoPixelFormat format) {}

#if BUILDFLAG(IS_CHROMEOS_ASH)
bool IsFormatTestedForDmabufAndGbm(VideoPixelFormat format) {
  switch (format) {
    case PIXEL_FORMAT_NV12:
    case PIXEL_FORMAT_YV12:
      return true;
    default:
      return false;
  }
}
#endif  // BUILDFLAG(IS_CHROMEOS_ASH)

#if BUILDFLAG(USE_V4L2_CODEC)
bool SupportsNecessaryGLExtension() {
  bool ret;

  scoped_refptr<gl::GLSurface> gl_surface =
      gl::init::CreateOffscreenGLSurface(gl::GetDefaultDisplay(), gfx::Size());
  if (!gl_surface) {
    LOG(ERROR) << "Error creating GL surface";
    return false;
  }
  scoped_refptr<gl::GLContext> gl_context = gl::init::CreateGLContext(
      nullptr, gl_surface.get(), gl::GLContextAttribs());
  if (!gl_context) {
    LOG(ERROR) << "Error creating GL context";
    return false;
  }
  if (!gl_context->MakeCurrent(gl_surface.get())) {
    LOG(ERROR) << "Error making GL context current";
    return false;
  }
  ret = gl_context->HasExtension("GL_EXT_YUV_target");
  gl_context->ReleaseCurrent(gl_surface.get());

  return ret;
}

scoped_refptr<VideoFrame> CreateNV12Frame(const gfx::Size& size,
                                          VideoFrame::StorageType type) {
  const gfx::Rect visible_rect(size);
  constexpr base::TimeDelta kNullTimestamp;
  if (type == VideoFrame::STORAGE_GPU_MEMORY_BUFFER) {
    return CreateGpuMemoryBufferVideoFrame(
        VideoPixelFormat::PIXEL_FORMAT_NV12, size, visible_rect, size,
        kNullTimestamp, gfx::BufferUsage::SCANOUT_CPU_READ_WRITE);
  } else {
    DCHECK(type == VideoFrame::STORAGE_DMABUFS);
    return CreatePlatformVideoFrame(VideoPixelFormat::PIXEL_FORMAT_NV12, size,
                                    visible_rect, size, kNullTimestamp,
                                    gfx::BufferUsage::SCANOUT_CPU_READ_WRITE);
  }
}

scoped_refptr<VideoFrame> CreateRandomMM21Frame(const gfx::Size& size,
                                                VideoFrame::StorageType type) {
  DCHECK_EQ(static_cast<unsigned int>(size.width()),
            base::bits::AlignUp(static_cast<unsigned int>(size.width()),
                                MM21_TILE_WIDTH));
  DCHECK_EQ(static_cast<unsigned int>(size.height()),
            base::bits::AlignUp(static_cast<unsigned int>(size.height()),
                                MM21_TILE_HEIGHT));

  scoped_refptr<VideoFrame> ret = CreateNV12Frame(size, type);
  if (!ret) {
    LOG(ERROR) << "Failed to create MM21 frame";
    return nullptr;
  }

  // The MM21 path only makes sense for V4L2, so we should never get an Intel
  // media compressed buffer here.
  CHECK(!IsIntelMediaCompressedModifier(ret->layout().modifier()));
  std::unique_ptr<VideoFrameMapper> frame_mapper =
      VideoFrameMapperFactory::CreateMapper(
          VideoPixelFormat::PIXEL_FORMAT_NV12, type,
          /*force_linear_buffer_mapper=*/true,
          /*must_support_intel_media_compressed_buffers=*/false);
  if (!frame_mapper) {
    LOG(ERROR) << "Unable to create a VideoFrameMapper";
    return nullptr;
  }
  scoped_refptr<VideoFrame> mapped_ret =
      frame_mapper->Map(ret, PROT_READ | PROT_WRITE);
  if (!mapped_ret) {
    LOG(ERROR) << "Unable to map MM21 frame";
    return nullptr;
  }

  uint8_t* y_plane = mapped_ret->GetWritableVisibleData(VideoFrame::Plane::kY);
  uint8_t* uv_plane =
      mapped_ret->GetWritableVisibleData(VideoFrame::Plane::kUV);
  for (int row = 0; row < size.height(); row++) {
    for (int col = 0; col < size.width(); col++) {
      y_plane[col] = base::RandInt(/*min=*/0, /*max=*/255);
      if (row % 2 == 0) {
        uv_plane[col] = base::RandInt(/*min=*/0, /*max=*/255);
      }
    }
    y_plane += mapped_ret->stride(VideoFrame::Plane::kY);
    if (row % 2 == 0) {
      uv_plane += mapped_ret->stride(VideoFrame::Plane::kUV);
    }
  }

  return ret;
}

bool CompareNV12VideoFrames(scoped_refptr<VideoFrame> test_frame,
                            scoped_refptr<VideoFrame> golden_frame) {
  if (test_frame->coded_size() != golden_frame->coded_size() ||
      test_frame->visible_rect() != golden_frame->visible_rect() ||
      test_frame->format() != VideoPixelFormat::PIXEL_FORMAT_NV12 ||
      golden_frame->format() != VideoPixelFormat::PIXEL_FORMAT_NV12) {
    return false;
  }

  // We run this test for the V4L2 path only, so we should never get Intel media
  // compressed frames here.
  CHECK(!IsIntelMediaCompressedModifier(test_frame->layout().modifier()));
  CHECK(!IsIntelMediaCompressedModifier(golden_frame->layout().modifier()));

  std::unique_ptr<VideoFrameMapper> test_frame_mapper =
      VideoFrameMapperFactory::CreateMapper(
          VideoPixelFormat::PIXEL_FORMAT_NV12, test_frame->storage_type(),
          /*force_linear_buffer_mapper=*/true,
          /*must_support_intel_media_compressed_buffers=*/false);
  if (!test_frame_mapper) {
    return false;
  }
  std::unique_ptr<VideoFrameMapper> golden_frame_mapper =
      VideoFrameMapperFactory::CreateMapper(
          VideoPixelFormat::PIXEL_FORMAT_NV12, golden_frame->storage_type(),
          /*force_linear_buffer_mapper=*/true,
          /*must_support_intel_media_compressed_buffers=*/false);
  if (!golden_frame_mapper) {
    return false;
  }
  scoped_refptr<VideoFrame> mapped_test_frame =
      test_frame_mapper->Map(test_frame, PROT_READ | PROT_WRITE);
  if (!mapped_test_frame) {
    LOG(ERROR) << "Unable to map test frame";
    return false;
  }
  scoped_refptr<VideoFrame> mapped_golden_frame =
      golden_frame_mapper->Map(golden_frame, PROT_READ | PROT_WRITE);
  if (!mapped_golden_frame) {
    LOG(ERROR) << "Unable to map golden frame";
    return false;
  }

  const uint8_t* test_y_plane =
      mapped_test_frame->visible_data(VideoFrame::Plane::kY);
  const uint8_t* test_uv_plane =
      mapped_test_frame->visible_data(VideoFrame::Plane::kUV);
  const uint8_t* golden_y_plane =
      mapped_golden_frame->visible_data(VideoFrame::Plane::kY);
  const uint8_t* golden_uv_plane =
      mapped_golden_frame->visible_data(VideoFrame::Plane::kUV);
  for (int y = 0; y < test_frame->coded_size().height(); y++) {
    for (int x = 0; x < test_frame->coded_size().width(); x++) {
      if (test_y_plane[x] != golden_y_plane[x]) {
        return false;
      }

      if (y % 2 == 0) {
        if (test_uv_plane[x] != golden_uv_plane[x]) {
          return false;
        }
      }
    }
    test_y_plane += mapped_test_frame->stride(VideoFrame::Plane::kY);
    golden_y_plane += mapped_golden_frame->stride(VideoFrame::Plane::kY);
    if (y % 2 == 0) {
      test_uv_plane += mapped_test_frame->stride(VideoFrame::Plane::kUV);
      golden_uv_plane += mapped_golden_frame->stride(VideoFrame::Plane::kUV);
    }
  }

  return true;
}
#endif

class ImageProcessorParamTest
    : public ::testing::Test,
      public ::testing::WithParamInterface<
          std::tuple<base::FilePath, base::FilePath>> {};

TEST_P(ImageProcessorParamTest, ConvertOneTime_MemToMem) {}

#if BUILDFLAG(IS_CHROMEOS_ASH)
// We don't yet have the function to create Dmabuf-backed VideoFrame on
// platforms except ChromeOS. So MemToDmabuf test is limited on ChromeOS.
TEST_P(ImageProcessorParamTest, ConvertOneTime_DmabufToMem) {
  // Load the test input image. We only need the output image's metadata so we
  // can compare checksums.
  test::Image input_image(BuildSourceFilePath(std::get<0>(GetParam())));
  test::Image output_image(BuildSourceFilePath(std::get<1>(GetParam())));
  ASSERT_TRUE(input_image.Load());
  ASSERT_TRUE(output_image.LoadMetadata());
  if (!IsFormatTestedForDmabufAndGbm(input_image.PixelFormat()))
    GTEST_SKIP() << "Skipping Dmabuf format " << input_image.PixelFormat();
  const bool is_scaling = (input_image.PixelFormat() == PIXEL_FORMAT_NV12 &&
                           output_image.PixelFormat() == PIXEL_FORMAT_NV12);
  const auto storage = is_scaling ? VideoFrame::STORAGE_GPU_MEMORY_BUFFER
                                  : VideoFrame::STORAGE_OWNED_MEMORY;
  auto ip_client =
      CreateImageProcessorClient(input_image, storage, &output_image, storage);
  if (!ip_client && g_backend_type.has_value()) {
    GTEST_SKIP() << "Forced backend " << ToString(*g_backend_type)
                 << " does not support this test";
  }
  ASSERT_TRUE(ip_client);

  ip_client->Process(input_image, output_image);

  EXPECT_TRUE(ip_client->WaitUntilNumImageProcessed(1u));
  EXPECT_EQ(ip_client->GetErrorCount(), 0u);
  EXPECT_EQ(ip_client->GetNumOfProcessedImages(), 1u);
  EXPECT_TRUE(ip_client->WaitForFrameProcessors());
}

TEST_P(ImageProcessorParamTest, ConvertOneTime_DmabufToDmabuf) {
  // Load the test input image. We only need the output image's metadata so we
  // can compare checksums.
  test::Image input_image(BuildSourceFilePath(std::get<0>(GetParam())));
  test::Image output_image(BuildSourceFilePath(std::get<1>(GetParam())));
  ASSERT_TRUE(input_image.Load());
  ASSERT_TRUE(output_image.LoadMetadata());
  if (!IsFormatTestedForDmabufAndGbm(input_image.PixelFormat()))
    GTEST_SKIP() << "Skipping Dmabuf format " << input_image.PixelFormat();
  if (!IsFormatTestedForDmabufAndGbm(output_image.PixelFormat()))
    GTEST_SKIP() << "Skipping Dmabuf format " << output_image.PixelFormat();

  auto ip_client =
      CreateImageProcessorClient(input_image, VideoFrame::STORAGE_DMABUFS,
                                 &output_image, VideoFrame::STORAGE_DMABUFS);
  if (!ip_client && g_backend_type.has_value()) {
    GTEST_SKIP() << "Forced backend " << ToString(*g_backend_type)
                 << " does not support this test";
  }
  ASSERT_TRUE(ip_client);
  ip_client->Process(input_image, output_image);

  EXPECT_TRUE(ip_client->WaitUntilNumImageProcessed(1u));
  EXPECT_EQ(ip_client->GetErrorCount(), 0u);
  EXPECT_EQ(ip_client->GetNumOfProcessedImages(), 1u);
  EXPECT_TRUE(ip_client->WaitForFrameProcessors());
}

// Although GpuMemoryBuffer is a cross platform class, code for image processor
// test is designed only for ChromeOS. So this test runs on ChromeOS only.
TEST_P(ImageProcessorParamTest, ConvertOneTime_GmbToGmb) {
  // Load the test input image. We only need the output image's metadata so we
  // can compare checksums.
  test::Image input_image(BuildSourceFilePath(std::get<0>(GetParam())));
  test::Image output_image(BuildSourceFilePath(std::get<1>(GetParam())));
  ASSERT_TRUE(input_image.Load());
  ASSERT_TRUE(output_image.LoadMetadata());
  if (!IsFormatTestedForDmabufAndGbm(input_image.PixelFormat())) {
    GTEST_SKIP() << "Skipping GpuMemoryBuffer format "
                 << input_image.PixelFormat();
  }
  if (!IsFormatTestedForDmabufAndGbm(output_image.PixelFormat())) {
    GTEST_SKIP() << "Skipping GpuMemoryBuffer format "
                 << output_image.PixelFormat();
  }

  auto ip_client = CreateImageProcessorClient(
      input_image, VideoFrame::STORAGE_GPU_MEMORY_BUFFER, &output_image,
      VideoFrame::STORAGE_GPU_MEMORY_BUFFER);
  if (!ip_client && g_backend_type.has_value()) {
    GTEST_SKIP() << "Forced backend " << ToString(*g_backend_type)
                 << " does not support this test";
  }
  ASSERT_TRUE(ip_client);
  ip_client->Process(input_image, output_image);

  EXPECT_TRUE(ip_client->WaitUntilNumImageProcessed(1u));
  EXPECT_EQ(ip_client->GetErrorCount(), 0u);
  EXPECT_EQ(ip_client->GetNumOfProcessedImages(), 1u);
  EXPECT_TRUE(ip_client->WaitForFrameProcessors());
}
#endif  // BUILDFLAG(IS_CHROMEOS_ASH)

INSTANTIATE_TEST_SUITE_P();

INSTANTIATE_TEST_SUITE_P();

INSTANTIATE_TEST_SUITE_P();

INSTANTIATE_TEST_SUITE_P();

INSTANTIATE_TEST_SUITE_P();

INSTANTIATE_TEST_SUITE_P();

// Crop 360P frame from 480P.
INSTANTIATE_TEST_SUITE_P();
// Crop 360p frame from 480P and scale the area to 270P.
INSTANTIATE_TEST_SUITE_P();

#if BUILDFLAG(IS_CHROMEOS_ASH)
// TODO(hiroh): Add more tests.
// MEM->DMABUF (V4L2VideoEncodeAccelerator),
#endif

#if BUILDFLAG(USE_V4L2_CODEC)
TEST(ImageProcessorBackendTest, CompareLibYUVAndGLBackendsForMM21Image) {
  if (!SupportsNecessaryGLExtension()) {
    GTEST_SKIP() << "Skipping GL Backend test, unsupported platform.";
  }
  if (g_backend_type.has_value()) {
    GTEST_SKIP() << "Skipping test since a particular backend was specified in "
                    "the command line arguments.";
  }

  constexpr gfx::Size kTestImageSize(1920, 1088);
  constexpr gfx::Rect kTestImageVisibleRect(kTestImageSize);
  const ImageProcessor::PixelLayoutCandidate candidate = {Fourcc(Fourcc::MM21),
                                                          kTestImageSize};
  std::vector<ImageProcessor::PixelLayoutCandidate> candidates = {candidate};

  auto client_task_runner = base::SequencedTaskRunner::GetCurrentDefault();
  base::RunLoop run_loop;
  base::RepeatingClosure quit_closure = run_loop.QuitClosure();
  bool image_processor_error = false;
  ImageProcessor::ErrorCB error_cb = base::BindRepeating(
      [](scoped_refptr<base::SequencedTaskRunner> client_task_runner,
         base::RepeatingClosure quit_closure, bool* image_processor_error) {
        CHECK(client_task_runner->RunsTasksInCurrentSequence());
        *image_processor_error = true;
        quit_closure.Run();
      },
      client_task_runner, quit_closure, &image_processor_error);
  ImageProcessorFactory::PickFormatCB pick_format_cb = base::BindRepeating(
      [](const std::vector<Fourcc>&, std::optional<Fourcc>) {
        return std::make_optional<Fourcc>(Fourcc::NV12);
      });

  std::unique_ptr<ImageProcessor> libyuv_image_processor =
      ImageProcessorFactory::
          CreateLibYUVImageProcessorWithInputCandidatesForTesting(
              candidates, kTestImageVisibleRect, kTestImageSize,
              /*num_buffers=*/1, client_task_runner, pick_format_cb, error_cb);
  ASSERT_TRUE(libyuv_image_processor)
      << "Error creating LibYUV image processor";
  std::unique_ptr<ImageProcessor> gl_image_processor = ImageProcessorFactory::
      CreateGLImageProcessorWithInputCandidatesForTesting(
          candidates, kTestImageVisibleRect, kTestImageSize, /*num_buffers=*/1,
          client_task_runner, pick_format_cb, error_cb);
  ASSERT_TRUE(gl_image_processor) << "Error creating GLImageProcessor";

  scoped_refptr<VideoFrame> input_frame =
      CreateRandomMM21Frame(kTestImageSize, VideoFrame::STORAGE_DMABUFS);
  ASSERT_TRUE(input_frame) << "Error creating input frame";
  scoped_refptr<VideoFrame> gl_output_frame =
      CreateNV12Frame(kTestImageSize, VideoFrame::STORAGE_GPU_MEMORY_BUFFER);
  ASSERT_TRUE(gl_output_frame) << "Error creating GL output frame";
  scoped_refptr<VideoFrame> libyuv_output_frame =
      CreateNV12Frame(kTestImageSize, VideoFrame::STORAGE_GPU_MEMORY_BUFFER);
  ASSERT_TRUE(libyuv_output_frame) << "Error creating LibYUV output frame";

  int outstanding_processors = 2;
  ImageProcessor::FrameReadyCB libyuv_callback = base::BindOnce(
      [](scoped_refptr<base::SequencedTaskRunner> client_task_runner,
         base::RepeatingClosure quit_closure, int* outstanding_processors,
         scoped_refptr<VideoFrame>* libyuv_output_frame,
         scoped_refptr<VideoFrame> frame) {
        CHECK(client_task_runner->RunsTasksInCurrentSequence());
        *libyuv_output_frame = std::move(frame);
        if (!(--*outstanding_processors)) {
          quit_closure.Run();
        }
      },
      client_task_runner, quit_closure, &outstanding_processors,
      &libyuv_output_frame);

  ImageProcessor::FrameReadyCB gl_callback = base::BindOnce(
      [](scoped_refptr<base::SequencedTaskRunner> client_task_runner,
         base::RepeatingClosure quit_closure, int* outstanding_processors,
         scoped_refptr<VideoFrame>* gl_output_frame,
         scoped_refptr<VideoFrame> frame) {
        CHECK(client_task_runner->RunsTasksInCurrentSequence());
        *gl_output_frame = std::move(frame);
        if (!(--*outstanding_processors)) {
          quit_closure.Run();
        }
      },
      client_task_runner, quit_closure, &outstanding_processors,
      &gl_output_frame);

  libyuv_image_processor->Process(input_frame, libyuv_output_frame,
                                  std::move(libyuv_callback));
  gl_image_processor->Process(input_frame, gl_output_frame,
                              std::move(gl_callback));

  run_loop.Run();

  ASSERT_FALSE(image_processor_error);
  ASSERT_TRUE(libyuv_output_frame);
  ASSERT_TRUE(gl_output_frame);
  ASSERT_TRUE(CompareNV12VideoFrames(gl_output_frame, libyuv_output_frame));
}
#endif

}  // namespace
}  // namespace media

// Argument handler for setting a forced ImageProcessor backend
static int HandleForcedBackendArgument(const std::string& arg,
                                       media::BackendType type) {}

int main(int argc, char** argv) {}