chromium/third_party/abseil-cpp/absl/container/internal/raw_hash_set.cc

// Copyright 2018 The Abseil Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//      https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#include "absl/container/internal/raw_hash_set.h"

#include <atomic>
#include <cassert>
#include <cstddef>
#include <cstdint>
#include <cstring>

#include "absl/base/attributes.h"
#include "absl/base/config.h"
#include "absl/base/dynamic_annotations.h"
#include "absl/base/internal/endian.h"
#include "absl/base/optimization.h"
#include "absl/container/internal/container_memory.h"
#include "absl/container/internal/hashtablez_sampler.h"
#include "absl/hash/hash.h"

namespace absl {
ABSL_NAMESPACE_BEGIN
namespace container_internal {

// Represents a control byte corresponding to a full slot with arbitrary hash.
constexpr ctrl_t ZeroCtrlT() {}

// We have space for `growth_info` before a single block of control bytes. A
// single block of empty control bytes for tables without any slots allocated.
// This enables removing a branch in the hot path of find(). In order to ensure
// that the control bytes are aligned to 16, we have 16 bytes before the control
// bytes even though growth_info only needs 8.
alignas(16) ABSL_CONST_INIT ABSL_DLL const ctrl_t kEmptyGroup[32] =;

// We need one full byte followed by a sentinel byte for iterator::operator++ to
// work. We have a full group after kSentinel to be safe (in case operator++ is
// changed to read a full group).
ABSL_CONST_INIT ABSL_DLL const ctrl_t kSooControl[17] =;
static_assert;

#ifdef ABSL_INTERNAL_NEED_REDUNDANT_CONSTEXPR_DECL
constexpr size_t Group::kWidth;
#endif

namespace {

// Returns "random" seed.
inline size_t RandomSeed() {}

bool ShouldRehashForBugDetection(const ctrl_t* ctrl, size_t capacity) {}

}  // namespace

GenerationType* EmptyGeneration() {}

bool CommonFieldsGenerationInfoEnabled::
    should_rehash_for_bug_detection_on_insert(const ctrl_t* ctrl,
                                              size_t capacity) const {}

bool CommonFieldsGenerationInfoEnabled::should_rehash_for_bug_detection_on_move(
    const ctrl_t* ctrl, size_t capacity) const {}

bool ShouldInsertBackwardsForDebug(size_t capacity, size_t hash,
                                   const ctrl_t* ctrl) {}

size_t PrepareInsertAfterSoo(size_t hash, size_t slot_size,
                             CommonFields& common) {}

void ConvertDeletedToEmptyAndFullToDeleted(ctrl_t* ctrl, size_t capacity) {}
// Extern template instantiation for inline function.
template FindInfo find_first_non_full(const CommonFields&, size_t);

FindInfo find_first_non_full_outofline(const CommonFields& common,
                                       size_t hash) {}

namespace {

// Returns the address of the slot just after slot assuming each slot has the
// specified size.
static inline void* NextSlot(void* slot, size_t slot_size) {}

// Returns the address of the slot just before slot assuming each slot has the
// specified size.
static inline void* PrevSlot(void* slot, size_t slot_size) {}

// Finds guaranteed to exists empty slot from the given position.
// NOTE: this function is almost never triggered inside of the
// DropDeletesWithoutResize, so we keep it simple.
// The table is rather sparse, so empty slot will be found very quickly.
size_t FindEmptySlot(size_t start, size_t end, const ctrl_t* ctrl) {}

void DropDeletesWithoutResize(CommonFields& common,
                              const PolicyFunctions& policy) {}

static bool WasNeverFull(CommonFields& c, size_t index) {}

}  // namespace

void EraseMetaOnly(CommonFields& c, size_t index, size_t slot_size) {}

void ClearBackingArray(CommonFields& c, const PolicyFunctions& policy,
                       bool reuse, bool soo_enabled) {}

void HashSetResizeHelper::GrowIntoSingleGroupShuffleControlBytes(
    ctrl_t* __restrict new_ctrl, size_t new_capacity) const {}

void HashSetResizeHelper::InitControlBytesAfterSoo(ctrl_t* new_ctrl, ctrl_t h2,
                                                   size_t new_capacity) {}

void HashSetResizeHelper::GrowIntoSingleGroupShuffleTransferableSlots(
    void* new_slots, size_t slot_size) const {}

void HashSetResizeHelper::GrowSizeIntoSingleGroupTransferable(
    CommonFields& c, size_t slot_size) {}

void HashSetResizeHelper::TransferSlotAfterSoo(CommonFields& c,
                                               size_t slot_size) {}

namespace {

// Called whenever the table needs to vacate empty slots either by removing
// tombstones via rehash or growth.
ABSL_ATTRIBUTE_NOINLINE
FindInfo FindInsertPositionWithGrowthOrRehash(CommonFields& common, size_t hash,
                                              const PolicyFunctions& policy) {}

}  // namespace

const void* GetHashRefForEmptyHasher(const CommonFields& common) {}

size_t PrepareInsertNonSoo(CommonFields& common, size_t hash, FindInfo target,
                           const PolicyFunctions& policy) {}

}  // namespace container_internal
ABSL_NAMESPACE_END
}  // namespace absl