chromium/gpu/command_buffer/service/service_transfer_cache.cc

// Copyright 2017 The Chromium Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.

#include "gpu/command_buffer/service/service_transfer_cache.h"

#include <inttypes.h>

#include <utility>

#include "base/auto_reset.h"
#include "base/feature_list.h"
#include "base/functional/bind.h"
#include "base/metrics/histogram_macros.h"
#include "base/strings/stringprintf.h"
#include "base/system/sys_info.h"
#include "base/task/single_thread_task_runner.h"
#include "base/trace_event/memory_dump_manager.h"
#include "cc/paint/image_transfer_cache_entry.h"
#include "gpu/command_buffer/service/service_discardable_manager.h"
#include "gpu/config/gpu_finch_features.h"
#include "third_party/skia/include/core/SkImage.h"
#include "third_party/skia/include/gpu/GrBackendSurface.h"
#include "third_party/skia/include/gpu/ganesh/SkImageGanesh.h"
#include "third_party/skia/include/gpu/ganesh/gl/GrGLBackendSurface.h"
#include "third_party/skia/include/gpu/gl/GrGLTypes.h"
#include "ui/gl/trace_util.h"

namespace gpu {
namespace {

// Put an arbitrary (high) limit on number of cache entries to prevent
// unbounded handle growth with tiny entries.
static size_t kMaxCacheEntries =;

constexpr base::TimeDelta kOldEntryCutoffTimeDelta =;
constexpr base::TimeDelta kOldEntryPruneInterval =;

// Alias the image entry to its skia counterpart, taking ownership of the
// memory and preventing double counting.
//
// TODO(ericrk): Move this into ServiceImageTransferCacheEntry - here for now
// due to ui/gl dependency.
void DumpMemoryForImageTransferCacheEntry(
    base::trace_event::ProcessMemoryDump* pmd,
    const std::string& dump_name,
    const cc::ServiceImageTransferCacheEntry* entry) {}

// Alias each texture of the YUV image entry to its Skia texture counterpart,
// taking ownership of the memory and preventing double counting.
//
// Because hardware-decoded images do not have knowledge of the individual plane
// sizes, we allow |plane_sizes| to be empty and report the aggregate size for
// plane_0 and give plane_1 and plane_2 size 0.
//
// TODO(ericrk): Move this into ServiceImageTransferCacheEntry - here for now
// due to ui/gl dependency.
void DumpMemoryForYUVImageTransferCacheEntry(
    base::trace_event::ProcessMemoryDump* pmd,
    const std::string& dump_base_name,
    const cc::ServiceImageTransferCacheEntry* entry) {}

}  // namespace

ServiceTransferCache::CacheEntryInternal::CacheEntryInternal(
    std::optional<ServiceDiscardableHandle> handle,
    std::unique_ptr<cc::ServiceTransferCacheEntry> entry)
    :{}

ServiceTransferCache::CacheEntryInternal::~CacheEntryInternal() {}

ServiceTransferCache::CacheEntryInternal::CacheEntryInternal(
    CacheEntryInternal&& other) = default;

ServiceTransferCache::CacheEntryInternal&
ServiceTransferCache::CacheEntryInternal::operator=(
    CacheEntryInternal&& other) = default;

ServiceTransferCache::ServiceTransferCache(
    const GpuPreferences& preferences,
    base::RepeatingClosure flush_callback)
    :{}

ServiceTransferCache::~ServiceTransferCache() {}

bool ServiceTransferCache::CreateLockedEntry(
    const EntryKey& key,
    ServiceDiscardableHandle handle,
    GrDirectContext* context,
    skgpu::graphite::Recorder* graphite_recorder,
    base::span<uint8_t> data) {}

void ServiceTransferCache::CreateLocalEntry(
    const EntryKey& key,
    std::unique_ptr<cc::ServiceTransferCacheEntry> entry) {}

bool ServiceTransferCache::UnlockEntry(const EntryKey& key) {}

template <typename Iterator>
Iterator ServiceTransferCache::ForceDeleteEntry(Iterator it) {}

bool ServiceTransferCache::DeleteEntry(const EntryKey& key) {}

cc::ServiceTransferCacheEntry* ServiceTransferCache::GetEntry(
    const EntryKey& key) {}

void ServiceTransferCache::EnforceLimits() {}

void ServiceTransferCache::MaybePostPruneOldEntries() {}

void ServiceTransferCache::PruneOldEntries() {}

int ServiceTransferCache::RemoveOldEntriesUntil(
    base::FunctionRef<bool(EntryCache::reverse_iterator)> should_stop) {}

void ServiceTransferCache::PurgeMemory(
    base::MemoryPressureListener::MemoryPressureLevel memory_pressure_level) {}

void ServiceTransferCache::DeleteAllEntriesForDecoder(int decoder_id) {}

bool ServiceTransferCache::CreateLockedHardwareDecodedImageEntry(
    int decoder_id,
    uint32_t entry_id,
    ServiceDiscardableHandle handle,
    GrDirectContext* context,
    std::vector<sk_sp<SkImage>> plane_images,
    SkYUVAInfo::PlaneConfig plane_config,
    SkYUVAInfo::Subsampling subsampling,
    SkYUVColorSpace yuv_color_space,
    size_t buffer_byte_size,
    bool needs_mips) {}

bool ServiceTransferCache::OnMemoryDump(
    const base::trace_event::MemoryDumpArgs& args,
    base::trace_event::ProcessMemoryDump* pmd) {}

ServiceTransferCache::EntryKey::EntryKey(int decoder_id,
                                         cc::TransferCacheEntryType entry_type,
                                         uint32_t entry_id)
    :{}

}  // namespace gpu