chromium/third_party/vulkan-validation-layers/src/layers/utils/vk_layer_utils.h

/* Copyright (c) 2015-2017, 2019-2024 The Khronos Group Inc.
 * Copyright (c) 2015-2017, 2019-2024 Valve Corporation
 * Copyright (c) 2015-2017, 2019-2024 LunarG, Inc.
 * Modifications Copyright (C) 2022 RasterGrid Kft.
 *
 * Licensed under the Apache License, Version 2.0 (the "License");
 * you may not use this file except in compliance with the License.
 * You may obtain a copy of the License at
 *
 *     http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */

#pragma once

#include <cassert>
#include <cctype>
#include <cstring>
#include <string>
#include <vector>
#include <bitset>
#include <shared_mutex>

#include <vulkan/utility/vk_format_utils.h>
#include <vulkan/utility/vk_concurrent_unordered_map.hpp>

#include "generated/vk_extension_helper.h"
#include "error_message/logging.h"

#ifndef WIN32
#include <strings.h>  // For ffs()
#else
#include <intrin.h>  // For __lzcnt()
#endif

#define STRINGIFY(s)
#define STRINGIFY_HELPER(s)

#if defined __PRETTY_FUNCTION__
#define VVL_PRETTY_FUNCTION
#else
// For MSVC
#if defined(__FUNCSIG__)
#define VVL_PRETTY_FUNCTION
#else
#define VVL_PRETTY_FUNCTION
#endif
#endif

// There are many times we want to assert, but also it is highly important to not crash for release builds.
// This Macro also makes it more obvious if we are returning early because of a known situation or if we are just guarding against
// something wrong actually happening.
#define ASSERT_AND_RETURN(cond)

#define ASSERT_AND_RETURN_SKIP(cond)

#define ASSERT_AND_CONTINUE(cond)

static inline VkExtent3D CastTo3D(const VkExtent2D &d2) {}

static inline VkOffset3D CastTo3D(const VkOffset2D &d2) {}

// It is very rare to have more than 3 stages (really only geo/tess) and better to save memory/time for the 99% use cases
static const uint32_t kCommonMaxGraphicsShaderStages =;

dispatch_key;
static inline dispatch_key GetDispatchKey(const void *object) {}

VkLayerInstanceCreateInfo *GetChainInfo(const VkInstanceCreateInfo *pCreateInfo, VkLayerFunction func);
VkLayerDeviceCreateInfo *GetChainInfo(const VkDeviceCreateInfo *pCreateInfo, VkLayerFunction func);

template <typename T>
constexpr bool IsPowerOfTwo(T x) {}

// Returns the 0-based index of the MSB, like the x86 bit scan reverse (bsr) instruction
// Note: an input mask of 0 yields -1
static inline int MostSignificantBit(uint32_t mask) {}

static inline int u_ffs(int val) {}

// Given p2 a power of two, returns smallest multiple of p2 greater than or equal to x
// Different than std::align in that it simply aligns an unsigned integer, when std::align aligns a virtual address and does the
// necessary bookkeeping to be able to correctly free memory at the new address
template <typename T>
constexpr T Align(T x, T p2) {}

// Returns the 0-based index of the LSB. An input mask of 0 yields -1
static inline int LeastSignificantBit(uint32_t mask) {}

template <typename FlagBits, typename Flags>
FlagBits LeastSignificantFlag(Flags flags) {}

// Iterates over all set bits and calls the callback with a bit mask corresponding to each flag.
// FlagBits and Flags follow Vulkan naming convensions for flag types.
// An example of a more efficient implementation: https://lemire.me/blog/2018/02/21/iterating-over-set-bits-quickly/
template <typename FlagBits, typename Flags, typename Callback>
void IterateFlags(Flags flags, Callback callback) {}

static inline uint32_t SampleCountSize(VkSampleCountFlagBits sample_count) {}

static inline bool IsImageLayoutReadOnly(VkImageLayout layout) {}

static inline bool IsImageLayoutDepthOnly(VkImageLayout layout) {}

static inline bool IsImageLayoutDepthReadOnly(VkImageLayout layout) {}

static inline bool IsImageLayoutStencilOnly(VkImageLayout layout) {}

static inline bool IsImageLayoutStencilReadOnly(VkImageLayout layout) {}

static inline bool IsIdentitySwizzle(VkComponentMapping components) {}

static inline uint32_t GetIndexAlignment(VkIndexType indexType) {}

// vkspec.html#formats-planes-image-aspect
static inline bool IsValidPlaneAspect(VkFormat format, VkImageAspectFlags aspect_mask) {}

static inline bool IsOnlyOneValidPlaneAspect(VkFormat format, VkImageAspectFlags aspect_mask) {}

static inline bool IsMultiplePlaneAspect(VkImageAspectFlags aspect_mask) {}

static inline bool IsAnyPlaneAspect(VkImageAspectFlags aspect_mask) {}

static const VkShaderStageFlags kShaderStageAllGraphics =;

static const VkShaderStageFlags kShaderStageAllRayTracing =;

static bool inline IsStageInPipelineBindPoint(VkShaderStageFlags stages, VkPipelineBindPoint bind_point) {}

// all "advanced blend operation" found in spec
static inline bool IsAdvanceBlendOperation(const VkBlendOp blend_op) {}

// Helper for Dual-Source Blending
static inline bool IsSecondaryColorInputBlendFactor(VkBlendFactor blend_factor) {}

// Check if size is in range
static inline bool IsBetweenInclusive(VkDeviceSize value, VkDeviceSize min, VkDeviceSize max) {}

static inline bool IsBetweenInclusive(const VkExtent2D &value, const VkExtent2D &min, const VkExtent2D &max) {}

static inline bool IsBetweenInclusive(float value, float min, float max) {}

// Check if value is integer multiple of granularity
static inline bool IsIntegerMultipleOf(VkDeviceSize value, VkDeviceSize granularity) {}

static inline bool IsIntegerMultipleOf(const VkOffset2D &value, const VkOffset2D &granularity) {}

// Perform a zero-tolerant modulo operation
static inline VkDeviceSize SafeModulo(VkDeviceSize dividend, VkDeviceSize divisor) {}

static inline VkDeviceSize SafeDivision(VkDeviceSize dividend, VkDeviceSize divisor) {}

inline std::optional<VkDeviceSize> ComputeValidSize(VkDeviceSize offset, VkDeviceSize size, VkDeviceSize whole_size) {}

// Only 32 bit fields should need a bit count
static inline uint32_t GetBitSetCount(uint32_t field) {}

static inline uint32_t FullMipChainLevels(VkExtent3D extent) {}

// Returns the effective extent of an image subresource, adjusted for mip level and array depth.
VkExtent3D GetEffectiveExtent(const VkImageCreateInfo &ci, const VkImageAspectFlags aspect_mask, const uint32_t mip_level);

// Used to get the VkExternalFormatANDROID without having to use ifdef in logic
// Result of zero is same of not having pNext struct
constexpr uint64_t GetExternalFormat(const void *pNext) {}

// Find whether or not an element is in list
// Two definitions, to be able to do the following calls:
// IsValueIn(1, {1, 2, 3});
// std::array arr {1, 2, 3};
// IsValueIn(1, arr);
template <typename T, typename RANGE>
bool IsValueIn(const T &v, const RANGE &range) {}

template <typename T>
bool IsValueIn(const T &v, const std::initializer_list<T> &list) {}

#define VK_LAYER_API_VERSION

VkStringErrorFlagBits;
VkStringErrorFlags;

void LayerDebugMessengerActions(DebugReport *debug_report, const char *layer_identifier);

std::string GetTempFilePath();

// Aliases to avoid excessive typing. We can't easily auto these away because
// there are virtual methods in ValidationObject which return lock guards
// and those cannot use return type deduction.
ReadLockGuard;
WriteLockGuard;

// helper class for the very common case of getting and then locking a command buffer (or other state object)
template <typename T, typename Guard>
class LockedSharedPtr : public std::shared_ptr<T> {};

static constexpr VkPipelineStageFlags2KHR kFramebufferStagePipelineStageFlags =;

static constexpr VkAccessFlags2 kShaderTileImageAllowedAccessFlags =;

static constexpr bool HasNonFramebufferStagePipelineStageFlags(VkPipelineStageFlags2KHR inflags) {}

static constexpr bool HasFramebufferStagePipelineStageFlags(VkPipelineStageFlags2KHR inflags) {}

static constexpr bool HasNonShaderTileImageAccessFlags(VkAccessFlags2 in_flags) {}

bool RangesIntersect(int64_t x, uint64_t x_size, int64_t y, uint64_t y_size);

namespace vvl {

static inline void ToLower(std::string &str) {}

static inline void ToUpper(std::string &str) {}

// The standard does not specify the value of data() for zero-sized contatiners as being null or non-null,
// only that it is not dereferenceable.
//
// Vulkan VUID's OTOH frequently require NULLs for zero-sized entries, or for option entries with non-zero counts
template <typename T>
const typename T::value_type *DataOrNull(const T &container) {}

// Workaround for static_assert(false) before C++ 23 arrives
// https://en.cppreference.com/w/cpp/language/static_assert
// https://cplusplus.github.io/CWG/issues/2518.html
dependent_false_v;

// Until C++ 26 std::atomic<T>::fetch_max arrives
// https://en.cppreference.com/w/cpp/atomic/atomic/fetch_max
// https://www.open-std.org/jtc1/sc22/wg21/docs/papers/2024/p0493r5.pdf
template <typename T>
inline T atomic_fetch_max(std::atomic<T> &current_max, const T &value) noexcept {}

}  // namespace vvl