chromium/third_party/vulkan-validation-layers/src/layers/sync/sync_access_state.h

/* Copyright (c) 2019-2024 The Khronos Group Inc.
 * Copyright (c) 2019-2024 Valve Corporation
 * Copyright (c) 2019-2024 LunarG, Inc.
 *
 * Licensed under the Apache License, Version 2.0 (the "License");
 * you may not use this file except in compliance with the License.
 * You may obtain a copy of the License at
 *
 *     http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */

#pragma once
#include "sync/sync_common.h"

class ResourceAccessState;
class ResourceAccessWriteState;
struct ResourceFirstAccess;

// NOTE: the attachement read flag is put *only* in the access scope and not in the exect scope, since the ordering
//       rules apply only to this specific access for this stage, and not the stage as a whole. The ordering detection
//       also reflects this special case for read hazard detection (using access instead of exec scope)
constexpr VkPipelineStageFlags2KHR kColorAttachmentExecScope =;
const SyncStageAccessFlags kColorAttachmentAccessScope =;  // Note: this is intentionally not in the exec scope
constexpr VkPipelineStageFlags2KHR kDepthStencilAttachmentExecScope =;
const SyncStageAccessFlags kDepthStencilAttachmentAccessScope =;  // Note: this is intentionally not in the exec scope
constexpr VkPipelineStageFlags2KHR kRasterAttachmentExecScope =;
const SyncStageAccessFlags kRasterAttachmentAccessScope =;

enum SyncHazard {};

enum class SyncOrdering : uint8_t {};
const char *string_SyncHazard(SyncHazard hazard);
const char *string_SyncHazardVUID(SyncHazard hazard);

class HazardResult {};

struct SyncExecScope {};

struct SemaphoreScope : SyncExecScope {};

struct SyncBarrier {};

struct ResourceFirstAccess {};

QueueId;
struct OrderingBarrier {};

ResourceUsageTagSet;

class ResourceAccessWriteState {};

class ResourceAccessState : public SyncStageAccess {};
ResourceAccessStateFunction;
ResourceAccessRangeMap;
ResourceRangeMergeIterator;

// Apply the memory barrier without updating the existing barriers.  The execution barrier
// changes the "chaining" state, but to keep barriers independent, we defer this until all barriers
// of the batch have been processed. Also, depending on whether layout transition happens, we'll either
// replace the current write barriers or add to them, so accumulate to pending as well.
template <typename ScopeOps>
void ResourceAccessState::ApplyBarrier(ScopeOps &&scope, const SyncBarrier &barrier, bool layout_transition) {}

// Return if the resulting state is "empty"
template <typename Predicate>
bool ResourceAccessState::ApplyPredicatedWait(Predicate &predicate) {}

template <typename Barrier>
SyncBarrier::SyncBarrier(const Barrier &barrier, const SyncExecScope &src, const SyncExecScope &dst)
    : src_exec_scope(src),
      src_access_scope(SyncStageAccess::AccessScope(src.valid_accesses, barrier.srcAccessMask)),
      dst_exec_scope(dst),
      dst_access_scope(SyncStageAccess::AccessScope(dst.valid_accesses, barrier.dstAccessMask)) {}

template <typename Barrier>
SyncBarrier::SyncBarrier(VkQueueFlags queue_flags, const Barrier &barrier) {}