chromium/gpu/command_buffer/client/fenced_allocator.cc

// Copyright 2011 The Chromium Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.

// This file contains the implementation of the FencedAllocator class.

#include "gpu/command_buffer/client/fenced_allocator.h"

#include <stdint.h>

#include <algorithm>

#include "base/not_fatal_until.h"
#include "base/numerics/clamped_math.h"
#include "gpu/command_buffer/client/cmd_buffer_helper.h"

namespace gpu {

namespace {

// Round down to the largest multiple of kAllocAlignment no greater than |size|.
uint32_t RoundDown(uint32_t size) {}

// Round up to the smallest multiple of kAllocAlignment no smaller than |size|.
base::CheckedNumeric<uint32_t> RoundUp(uint32_t size) {}

}  // namespace

FencedAllocator::FencedAllocator(uint32_t size, CommandBufferHelper* helper)
    :{}

FencedAllocator::~FencedAllocator() {}

// Looks for a non-allocated block that is big enough. Search in the FREE
// blocks first (for direct usage), first-fit, then in the FREE_PENDING_TOKEN
// blocks, waiting for them. The current implementation isn't smart about
// optimizing what to wait for, just looks inside the block in order (first-fit
// as well).
FencedAllocator::Offset FencedAllocator::Alloc(uint32_t size) {}

// Looks for the corresponding block, mark it FREE, and collapse it if
// necessary.
void FencedAllocator::Free(FencedAllocator::Offset offset) {}

// Looks for the corresponding block, mark it FREE_PENDING_TOKEN.
void FencedAllocator::FreePendingToken(FencedAllocator::Offset offset,
                                       int32_t token) {}

// Gets the max of the size of the blocks marked as free.
uint32_t FencedAllocator::GetLargestFreeSize() {}

// Gets the size of the largest segment of blocks that are either FREE or
// FREE_PENDING_TOKEN.
uint32_t FencedAllocator::GetLargestFreeOrPendingSize() {}

// Gets the total size of all blocks marked as free.
uint32_t FencedAllocator::GetFreeSize() {}

// Makes sure that:
// - there is at least one block.
// - there are no contiguous FREE blocks (they should have been collapsed).
// - the successive offsets match the block sizes, and they are in order.
bool FencedAllocator::CheckConsistency() {}

// Returns false if all blocks are actually FREE, in which
// case they would be coalesced into one block, true otherwise.
bool FencedAllocator::InUseOrFreePending() {}

FencedAllocator::State FencedAllocator::GetBlockStatusForTest(
    Offset offset,
    int32_t* token_if_pending) {}

// Collapse the block to the next one, then to the previous one. Provided the
// structure is consistent, those are the only blocks eligible for collapse.
FencedAllocator::BlockIndex FencedAllocator::CollapseFreeBlock(
    BlockIndex index) {}

// Waits for the block's token, then mark the block as free, then collapse it.
FencedAllocator::BlockIndex FencedAllocator::WaitForTokenAndFreeBlock(
    BlockIndex index) {}

// Frees any blocks pending a token for which the token has been read.
void FencedAllocator::FreeUnused() {}

// If the block is exactly the requested size, simply mark it IN_USE, otherwise
// split it and mark the first one (of the requested size) IN_USE.
FencedAllocator::Offset FencedAllocator::AllocInBlock(BlockIndex index,
                                                      uint32_t size) {}

// The blocks are in offset order, so we can do a binary search.
FencedAllocator::BlockIndex FencedAllocator::GetBlockByOffset(Offset offset) {}

}  // namespace gpu