chromium/third_party/abseil-cpp/absl/container/internal/raw_hash_set.h

// Copyright 2018 The Abseil Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//      https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// An open-addressing
// hashtable with quadratic probing.
//
// This is a low level hashtable on top of which different interfaces can be
// implemented, like flat_hash_set, node_hash_set, string_hash_set, etc.
//
// The table interface is similar to that of std::unordered_set. Notable
// differences are that most member functions support heterogeneous keys when
// BOTH the hash and eq functions are marked as transparent. They do so by
// providing a typedef called `is_transparent`.
//
// When heterogeneous lookup is enabled, functions that take key_type act as if
// they have an overload set like:
//
//   iterator find(const key_type& key);
//   template <class K>
//   iterator find(const K& key);
//
//   size_type erase(const key_type& key);
//   template <class K>
//   size_type erase(const K& key);
//
//   std::pair<iterator, iterator> equal_range(const key_type& key);
//   template <class K>
//   std::pair<iterator, iterator> equal_range(const K& key);
//
// When heterogeneous lookup is disabled, only the explicit `key_type` overloads
// exist.
//
// find() also supports passing the hash explicitly:
//
//   iterator find(const key_type& key, size_t hash);
//   template <class U>
//   iterator find(const U& key, size_t hash);
//
// In addition the pointer to element and iterator stability guarantees are
// weaker: all iterators and pointers are invalidated after a new element is
// inserted.
//
// IMPLEMENTATION DETAILS
//
// # Table Layout
//
// A raw_hash_set's backing array consists of control bytes followed by slots
// that may or may not contain objects.
//
// The layout of the backing array, for `capacity` slots, is thus, as a
// pseudo-struct:
//
//   struct BackingArray {
//     // Sampling handler. This field isn't present when the sampling is
//     // disabled or this allocation hasn't been selected for sampling.
//     HashtablezInfoHandle infoz_;
//     // The number of elements we can insert before growing the capacity.
//     size_t growth_left;
//     // Control bytes for the "real" slots.
//     ctrl_t ctrl[capacity];
//     // Always `ctrl_t::kSentinel`. This is used by iterators to find when to
//     // stop and serves no other purpose.
//     ctrl_t sentinel;
//     // A copy of the first `kWidth - 1` elements of `ctrl`. This is used so
//     // that if a probe sequence picks a value near the end of `ctrl`,
//     // `Group` will have valid control bytes to look at.
//     ctrl_t clones[kWidth - 1];
//     // The actual slot data.
//     slot_type slots[capacity];
//   };
//
// The length of this array is computed by `RawHashSetLayout::alloc_size` below.
//
// Control bytes (`ctrl_t`) are bytes (collected into groups of a
// platform-specific size) that define the state of the corresponding slot in
// the slot array. Group manipulation is tightly optimized to be as efficient
// as possible: SSE and friends on x86, clever bit operations on other arches.
//
//      Group 1         Group 2        Group 3
// +---------------+---------------+---------------+
// | | | | | | | | | | | | | | | | | | | | | | | | |
// +---------------+---------------+---------------+
//
// Each control byte is either a special value for empty slots, deleted slots
// (sometimes called *tombstones*), and a special end-of-table marker used by
// iterators, or, if occupied, seven bits (H2) from the hash of the value in the
// corresponding slot.
//
// Storing control bytes in a separate array also has beneficial cache effects,
// since more logical slots will fit into a cache line.
//
// # Small Object Optimization (SOO)
//
// When the size/alignment of the value_type and the capacity of the table are
// small, we enable small object optimization and store the values inline in
// the raw_hash_set object. This optimization allows us to avoid
// allocation/deallocation as well as cache/dTLB misses.
//
// # Hashing
//
// We compute two separate hashes, `H1` and `H2`, from the hash of an object.
// `H1(hash(x))` is an index into `slots`, and essentially the starting point
// for the probe sequence. `H2(hash(x))` is a 7-bit value used to filter out
// objects that cannot possibly be the one we are looking for.
//
// # Table operations.
//
// The key operations are `insert`, `find`, and `erase`.
//
// Since `insert` and `erase` are implemented in terms of `find`, we describe
// `find` first. To `find` a value `x`, we compute `hash(x)`. From
// `H1(hash(x))` and the capacity, we construct a `probe_seq` that visits every
// group of slots in some interesting order.
//
// We now walk through these indices. At each index, we select the entire group
// starting with that index and extract potential candidates: occupied slots
// with a control byte equal to `H2(hash(x))`. If we find an empty slot in the
// group, we stop and return an error. Each candidate slot `y` is compared with
// `x`; if `x == y`, we are done and return `&y`; otherwise we continue to the
// next probe index. Tombstones effectively behave like full slots that never
// match the value we're looking for.
//
// The `H2` bits ensure when we compare a slot to an object with `==`, we are
// likely to have actually found the object.  That is, the chance is low that
// `==` is called and returns `false`.  Thus, when we search for an object, we
// are unlikely to call `==` many times.  This likelyhood can be analyzed as
// follows (assuming that H2 is a random enough hash function).
//
// Let's assume that there are `k` "wrong" objects that must be examined in a
// probe sequence.  For example, when doing a `find` on an object that is in the
// table, `k` is the number of objects between the start of the probe sequence
// and the final found object (not including the final found object).  The
// expected number of objects with an H2 match is then `k/128`.  Measurements
// and analysis indicate that even at high load factors, `k` is less than 32,
// meaning that the number of "false positive" comparisons we must perform is
// less than 1/8 per `find`.

// `insert` is implemented in terms of `unchecked_insert`, which inserts a
// value presumed to not be in the table (violating this requirement will cause
// the table to behave erratically). Given `x` and its hash `hash(x)`, to insert
// it, we construct a `probe_seq` once again, and use it to find the first
// group with an unoccupied (empty *or* deleted) slot. We place `x` into the
// first such slot in the group and mark it as full with `x`'s H2.
//
// To `insert`, we compose `unchecked_insert` with `find`. We compute `h(x)` and
// perform a `find` to see if it's already present; if it is, we're done. If
// it's not, we may decide the table is getting overcrowded (i.e. the load
// factor is greater than 7/8 for big tables; `is_small()` tables use a max load
// factor of 1); in this case, we allocate a bigger array, `unchecked_insert`
// each element of the table into the new array (we know that no insertion here
// will insert an already-present value), and discard the old backing array. At
// this point, we may `unchecked_insert` the value `x`.
//
// Below, `unchecked_insert` is partly implemented by `prepare_insert`, which
// presents a viable, initialized slot pointee to the caller.
//
// `erase` is implemented in terms of `erase_at`, which takes an index to a
// slot. Given an offset, we simply create a tombstone and destroy its contents.
// If we can prove that the slot would not appear in a probe sequence, we can
// make the slot as empty, instead. We can prove this by observing that if a
// group has any empty slots, it has never been full (assuming we never create
// an empty slot in a group with no empties, which this heuristic guarantees we
// never do) and find would stop at this group anyways (since it does not probe
// beyond groups with empties).
//
// `erase` is `erase_at` composed with `find`: if we
// have a value `x`, we can perform a `find`, and then `erase_at` the resulting
// slot.
//
// To iterate, we simply traverse the array, skipping empty and deleted slots
// and stopping when we hit a `kSentinel`.

#ifndef ABSL_CONTAINER_INTERNAL_RAW_HASH_SET_H_
#define ABSL_CONTAINER_INTERNAL_RAW_HASH_SET_H_

#include <algorithm>
#include <cassert>
#include <cmath>
#include <cstddef>
#include <cstdint>
#include <cstring>
#include <functional>
#include <initializer_list>
#include <iterator>
#include <limits>
#include <memory>
#include <tuple>
#include <type_traits>
#include <utility>

#include "absl/base/attributes.h"
#include "absl/base/config.h"
#include "absl/base/internal/endian.h"
#include "absl/base/internal/raw_logging.h"
#include "absl/base/macros.h"
#include "absl/base/optimization.h"
#include "absl/base/options.h"
#include "absl/base/port.h"
#include "absl/base/prefetch.h"
#include "absl/container/internal/common.h"  // IWYU pragma: export // for node_handle
#include "absl/container/internal/compressed_tuple.h"
#include "absl/container/internal/container_memory.h"
#include "absl/container/internal/hash_function_defaults.h"
#include "absl/container/internal/hash_policy_traits.h"
#include "absl/container/internal/hashtable_debug_hooks.h"
#include "absl/container/internal/hashtablez_sampler.h"
#include "absl/hash/hash.h"
#include "absl/memory/memory.h"
#include "absl/meta/type_traits.h"
#include "absl/numeric/bits.h"
#include "absl/utility/utility.h"

#ifdef ABSL_INTERNAL_HAVE_SSE2
#include <emmintrin.h>
#endif

#ifdef ABSL_INTERNAL_HAVE_SSSE3
#include <tmmintrin.h>
#endif

#ifdef _MSC_VER
#include <intrin.h>
#endif

#ifdef ABSL_INTERNAL_HAVE_ARM_NEON
#include <arm_neon.h>
#endif

namespace absl {
ABSL_NAMESPACE_BEGIN
namespace container_internal {

#ifdef ABSL_SWISSTABLE_ENABLE_GENERATIONS
#error ABSL_SWISSTABLE_ENABLE_GENERATIONS cannot be directly set
#elif (defined(ABSL_HAVE_ADDRESS_SANITIZER) ||   \
       defined(ABSL_HAVE_HWADDRESS_SANITIZER) || \
       defined(ABSL_HAVE_MEMORY_SANITIZER)) &&   \
    !defined(NDEBUG_SANITIZER)  // If defined, performance is important.
// When compiled in sanitizer mode, we add generation integers to the backing
// array and iterators. In the backing array, we store the generation between
// the control bytes and the slots. When iterators are dereferenced, we assert
// that the container has not been mutated in a way that could cause iterator
// invalidation since the iterator was initialized.
#define ABSL_SWISSTABLE_ENABLE_GENERATIONS
#endif

// We use uint8_t so we don't need to worry about padding.
GenerationType;

// A sentinel value for empty generations. Using 0 makes it easy to constexpr
// initialize an array of this value.
constexpr GenerationType SentinelEmptyGeneration() {}

constexpr GenerationType NextGeneration(GenerationType generation) {}

#ifdef ABSL_SWISSTABLE_ENABLE_GENERATIONS
constexpr bool SwisstableGenerationsEnabled() { return true; }
constexpr size_t NumGenerationBytes() { return sizeof(GenerationType); }
#else
constexpr bool SwisstableGenerationsEnabled() {}
constexpr size_t NumGenerationBytes() {}
#endif

template <typename AllocType>
void SwapAlloc(AllocType& lhs, AllocType& rhs,
               std::true_type /* propagate_on_container_swap */) {}
template <typename AllocType>
void SwapAlloc(AllocType& lhs, AllocType& rhs,
               std::false_type /* propagate_on_container_swap */) {}

template <typename AllocType>
void CopyAlloc(AllocType& lhs, AllocType& rhs,
               std::true_type /* propagate_alloc */) {}
template <typename AllocType>
void CopyAlloc(AllocType&, AllocType&, std::false_type /* propagate_alloc */) {}

// The state for a probe sequence.
//
// Currently, the sequence is a triangular progression of the form
//
//   p(i) := Width * (i^2 + i)/2 + hash (mod mask + 1)
//
// The use of `Width` ensures that each probe step does not overlap groups;
// the sequence effectively outputs the addresses of *groups* (although not
// necessarily aligned to any boundary). The `Group` machinery allows us
// to check an entire group with minimal branching.
//
// Wrapping around at `mask + 1` is important, but not for the obvious reason.
// As described above, the first few entries of the control byte array
// are mirrored at the end of the array, which `Group` will find and use
// for selecting candidates. However, when those candidates' slots are
// actually inspected, there are no corresponding slots for the cloned bytes,
// so we need to make sure we've treated those offsets as "wrapping around".
//
// It turns out that this probe sequence visits every group exactly once if the
// number of groups is a power of two, since (i^2+i)/2 is a bijection in
// Z/(2^m). See https://en.wikipedia.org/wiki/Quadratic_probing
template <size_t Width>
class probe_seq {};

template <class ContainerKey, class Hash, class Eq>
struct RequireUsableKey {};

template <class E, class Policy, class Hash, class Eq, class... Ts>
struct IsDecomposable : std::false_type {};

IsDecomposable<absl::void_t<decltype(Policy::apply(RequireUsableKey<typename Policy::key_type, Hash, Eq>(), std::declval<Ts>()...))>, Policy, Hash, Eq, Ts...>;

// TODO(alkis): Switch to std::is_nothrow_swappable when gcc/clang supports it.
template <class T>
constexpr bool IsNoThrowSwappable(std::true_type = {}
template <class T>
constexpr bool IsNoThrowSwappable(std::false_type /* is_swappable */) {}

template <typename T>
uint32_t TrailingZeros(T x) {}

// 8 bytes bitmask with most significant bit set for every byte.
constexpr uint64_t kMsbs8Bytes =;

// An abstract bitmask, such as that emitted by a SIMD instruction.
//
// Specifically, this type implements a simple bitset whose representation is
// controlled by `SignificantBits` and `Shift`. `SignificantBits` is the number
// of abstract bits in the bitset, while `Shift` is the log-base-two of the
// width of an abstract bit in the representation.
// This mask provides operations for any number of real bits set in an abstract
// bit. To add iteration on top of that, implementation must guarantee no more
// than the most significant real bit is set in a set abstract bit.
template <class T, int SignificantBits, int Shift = 0>
class NonIterableBitMask {};

// Mask that can be iterable
//
// For example, when `SignificantBits` is 16 and `Shift` is zero, this is just
// an ordinary 16-bit bitset occupying the low 16 bits of `mask`. When
// `SignificantBits` is 8 and `Shift` is 3, abstract bits are represented as
// the bytes `0x00` and `0x80`, and it occupies all 64 bits of the bitmask.
// If NullifyBitsOnIteration is true (only allowed for Shift == 3),
// non zero abstract bit is allowed to have additional bits
// (e.g., `0xff`, `0x83` and `0x9c` are ok, but `0x6f` is not).
//
// For example:
//   for (int i : BitMask<uint32_t, 16>(0b101)) -> yields 0, 2
//   for (int i : BitMask<uint64_t, 8, 3>(0x0000000080800000)) -> yields 2, 3
template <class T, int SignificantBits, int Shift = 0,
          bool NullifyBitsOnIteration = false>
class BitMask : public NonIterableBitMask<T, SignificantBits, Shift> {};

h2_t;

// The values here are selected for maximum performance. See the static asserts
// below for details.

// A `ctrl_t` is a single control byte, which can have one of four
// states: empty, deleted, full (which has an associated seven-bit h2_t value)
// and the sentinel. They have the following bit patterns:
//
//      empty: 1 0 0 0 0 0 0 0
//    deleted: 1 1 1 1 1 1 1 0
//       full: 0 h h h h h h h  // h represents the hash bits.
//   sentinel: 1 1 1 1 1 1 1 1
//
// These values are specifically tuned for SSE-flavored SIMD.
// The static_asserts below detail the source of these choices.
//
// We use an enum class so that when strict aliasing is enabled, the compiler
// knows ctrl_t doesn't alias other types.
enum class ctrl_t : int8_t {};
static_assert;
static_assert;
static_assert;
static_assert;
static_assert;
static_assert;

// See definition comment for why this is size 32.
ABSL_DLL extern const ctrl_t kEmptyGroup[32];

// We use these sentinel capacity values in debug mode to indicate different
// classes of bugs.
enum InvalidCapacity : size_t {};

// Returns a pointer to a control byte group that can be used by empty tables.
inline ctrl_t* EmptyGroup() {}

// For use in SOO iterators.
// TODO(b/289225379): we could potentially get rid of this by adding an is_soo
// bit in iterators. This would add branches but reduce cache misses.
ABSL_DLL extern const ctrl_t kSooControl[17];

// Returns a pointer to a full byte followed by a sentinel byte.
inline ctrl_t* SooControl() {}
// Whether ctrl is from the SooControl array.
inline bool IsSooControl(const ctrl_t* ctrl) {}

// Returns a pointer to a generation to use for an empty hashtable.
GenerationType* EmptyGeneration();

// Returns whether `generation` is a generation for an empty hashtable that
// could be returned by EmptyGeneration().
inline bool IsEmptyGeneration(const GenerationType* generation) {}

// Mixes a randomly generated per-process seed with `hash` and `ctrl` to
// randomize insertion order within groups.
bool ShouldInsertBackwardsForDebug(size_t capacity, size_t hash,
                                   const ctrl_t* ctrl);

ABSL_ATTRIBUTE_ALWAYS_INLINE inline bool ShouldInsertBackwards(
    ABSL_ATTRIBUTE_UNUSED size_t capacity, ABSL_ATTRIBUTE_UNUSED size_t hash,
    ABSL_ATTRIBUTE_UNUSED const ctrl_t* ctrl) {}

// Returns insert position for the given mask.
// We want to add entropy even when ASLR is not enabled.
// In debug build we will randomly insert in either the front or back of
// the group.
// TODO(kfm,sbenza): revisit after we do unconditional mixing
template <class Mask>
ABSL_ATTRIBUTE_ALWAYS_INLINE inline auto GetInsertionOffset(
    Mask mask, ABSL_ATTRIBUTE_UNUSED size_t capacity,
    ABSL_ATTRIBUTE_UNUSED size_t hash,
    ABSL_ATTRIBUTE_UNUSED const ctrl_t* ctrl) {}

// Returns a per-table, hash salt, which changes on resize. This gets mixed into
// H1 to randomize iteration order per-table.
//
// The seed consists of the ctrl_ pointer, which adds enough entropy to ensure
// non-determinism of iteration order in most cases.
inline size_t PerTableSalt(const ctrl_t* ctrl) {}
// Extracts the H1 portion of a hash: 57 bits mixed with a per-table salt.
inline size_t H1(size_t hash, const ctrl_t* ctrl) {}

// Extracts the H2 portion of a hash: the 7 bits not used for H1.
//
// These are used as an occupied control byte.
inline h2_t H2(size_t hash) {}

// Helpers for checking the state of a control byte.
inline bool IsEmpty(ctrl_t c) {}
inline bool IsFull(ctrl_t c) {}
inline bool IsDeleted(ctrl_t c) {}
inline bool IsEmptyOrDeleted(ctrl_t c) {}

#ifdef ABSL_INTERNAL_HAVE_SSE2
// Quick reference guide for intrinsics used below:
//
// * __m128i: An XMM (128-bit) word.
//
// * _mm_setzero_si128: Returns a zero vector.
// * _mm_set1_epi8:     Returns a vector with the same i8 in each lane.
//
// * _mm_subs_epi8:    Saturating-subtracts two i8 vectors.
// * _mm_and_si128:    Ands two i128s together.
// * _mm_or_si128:     Ors two i128s together.
// * _mm_andnot_si128: And-nots two i128s together.
//
// * _mm_cmpeq_epi8: Component-wise compares two i8 vectors for equality,
//                   filling each lane with 0x00 or 0xff.
// * _mm_cmpgt_epi8: Same as above, but using > rather than ==.
//
// * _mm_loadu_si128:  Performs an unaligned load of an i128.
// * _mm_storeu_si128: Performs an unaligned store of an i128.
//
// * _mm_sign_epi8:     Retains, negates, or zeroes each i8 lane of the first
//                      argument if the corresponding lane of the second
//                      argument is positive, negative, or zero, respectively.
// * _mm_movemask_epi8: Selects the sign bit out of each i8 lane and produces a
//                      bitmask consisting of those bits.
// * _mm_shuffle_epi8:  Selects i8s from the first argument, using the low
//                      four bits of each i8 lane in the second argument as
//                      indices.

// https://github.com/abseil/abseil-cpp/issues/209
// https://gcc.gnu.org/bugzilla/show_bug.cgi?id=87853
// _mm_cmpgt_epi8 is broken under GCC with -funsigned-char
// Work around this by using the portable implementation of Group
// when using -funsigned-char under GCC.
inline __m128i _mm_cmpgt_epi8_fixed(__m128i a, __m128i b) {}

struct GroupSse2Impl {};
#endif  // ABSL_INTERNAL_RAW_HASH_SET_HAVE_SSE2

#if defined(ABSL_INTERNAL_HAVE_ARM_NEON) && defined(ABSL_IS_LITTLE_ENDIAN)
struct GroupAArch64Impl {
  static constexpr size_t kWidth = 8;

  explicit GroupAArch64Impl(const ctrl_t* pos) {
    ctrl = vld1_u8(reinterpret_cast<const uint8_t*>(pos));
  }

  auto Match(h2_t hash) const {
    uint8x8_t dup = vdup_n_u8(hash);
    auto mask = vceq_u8(ctrl, dup);
    return BitMask<uint64_t, kWidth, /*Shift=*/3,
                   /*NullifyBitsOnIteration=*/true>(
        vget_lane_u64(vreinterpret_u64_u8(mask), 0));
  }

  NonIterableBitMask<uint64_t, kWidth, 3> MaskEmpty() const {
    uint64_t mask =
        vget_lane_u64(vreinterpret_u64_u8(vceq_s8(
                          vdup_n_s8(static_cast<int8_t>(ctrl_t::kEmpty)),
                          vreinterpret_s8_u8(ctrl))),
                      0);
    return NonIterableBitMask<uint64_t, kWidth, 3>(mask);
  }

  // Returns a bitmask representing the positions of full slots.
  // Note: for `is_small()` tables group may contain the "same" slot twice:
  // original and mirrored.
  auto MaskFull() const {
    uint64_t mask = vget_lane_u64(
        vreinterpret_u64_u8(vcge_s8(vreinterpret_s8_u8(ctrl),
                                    vdup_n_s8(static_cast<int8_t>(0)))),
        0);
    return BitMask<uint64_t, kWidth, /*Shift=*/3,
                   /*NullifyBitsOnIteration=*/true>(mask);
  }

  // Returns a bitmask representing the positions of non full slots.
  // Note: this includes: kEmpty, kDeleted, kSentinel.
  // It is useful in contexts when kSentinel is not present.
  auto MaskNonFull() const {
    uint64_t mask = vget_lane_u64(
        vreinterpret_u64_u8(vclt_s8(vreinterpret_s8_u8(ctrl),
                                    vdup_n_s8(static_cast<int8_t>(0)))),
        0);
    return BitMask<uint64_t, kWidth, /*Shift=*/3,
                   /*NullifyBitsOnIteration=*/true>(mask);
  }

  NonIterableBitMask<uint64_t, kWidth, 3> MaskEmptyOrDeleted() const {
    uint64_t mask =
        vget_lane_u64(vreinterpret_u64_u8(vcgt_s8(
                          vdup_n_s8(static_cast<int8_t>(ctrl_t::kSentinel)),
                          vreinterpret_s8_u8(ctrl))),
                      0);
    return NonIterableBitMask<uint64_t, kWidth, 3>(mask);
  }

  uint32_t CountLeadingEmptyOrDeleted() const {
    uint64_t mask =
        vget_lane_u64(vreinterpret_u64_u8(vcle_s8(
                          vdup_n_s8(static_cast<int8_t>(ctrl_t::kSentinel)),
                          vreinterpret_s8_u8(ctrl))),
                      0);
    // Similar to MaskEmptyorDeleted() but we invert the logic to invert the
    // produced bitfield. We then count number of trailing zeros.
    // Clang and GCC optimize countr_zero to rbit+clz without any check for 0,
    // so we should be fine.
    return static_cast<uint32_t>(countr_zero(mask)) >> 3;
  }

  void ConvertSpecialToEmptyAndFullToDeleted(ctrl_t* dst) const {
    uint64_t mask = vget_lane_u64(vreinterpret_u64_u8(ctrl), 0);
    constexpr uint64_t slsbs = 0x0202020202020202ULL;
    constexpr uint64_t midbs = 0x7e7e7e7e7e7e7e7eULL;
    auto x = slsbs & (mask >> 6);
    auto res = (x + midbs) | kMsbs8Bytes;
    little_endian::Store64(dst, res);
  }

  uint8x8_t ctrl;
};
#endif  // ABSL_INTERNAL_HAVE_ARM_NEON && ABSL_IS_LITTLE_ENDIAN

struct GroupPortableImpl {};

#ifdef ABSL_INTERNAL_HAVE_SSE2
Group;
GroupFullEmptyOrDeleted;
#elif defined(ABSL_INTERNAL_HAVE_ARM_NEON) && defined(ABSL_IS_LITTLE_ENDIAN)
using Group = GroupAArch64Impl;
// For Aarch64, we use the portable implementation for counting and masking
// full, empty or deleted group elements. This is to avoid the latency of moving
// between data GPRs and Neon registers when it does not provide a benefit.
// Using Neon is profitable when we call Match(), but is not when we don't,
// which is the case when we do *EmptyOrDeleted and MaskFull operations.
// It is difficult to make a similar approach beneficial on other architectures
// such as x86 since they have much lower GPR <-> vector register transfer
// latency and 16-wide Groups.
using GroupFullEmptyOrDeleted = GroupPortableImpl;
#else
using Group = GroupPortableImpl;
using GroupFullEmptyOrDeleted = GroupPortableImpl;
#endif

// When there is an insertion with no reserved growth, we rehash with
// probability `min(1, RehashProbabilityConstant() / capacity())`. Using a
// constant divided by capacity ensures that inserting N elements is still O(N)
// in the average case. Using the constant 16 means that we expect to rehash ~8
// times more often than when generations are disabled. We are adding expected
// rehash_probability * #insertions/capacity_growth = 16/capacity * ((7/8 -
// 7/16) * capacity)/capacity_growth = ~7 extra rehashes per capacity growth.
inline size_t RehashProbabilityConstant() {}

class CommonFieldsGenerationInfoEnabled {};

class CommonFieldsGenerationInfoDisabled {};

class HashSetIteratorGenerationInfoEnabled {};

class HashSetIteratorGenerationInfoDisabled {};

#ifdef ABSL_SWISSTABLE_ENABLE_GENERATIONS
using CommonFieldsGenerationInfo = CommonFieldsGenerationInfoEnabled;
using HashSetIteratorGenerationInfo = HashSetIteratorGenerationInfoEnabled;
#else
CommonFieldsGenerationInfo;
HashSetIteratorGenerationInfo;
#endif

// Stored the information regarding number of slots we can still fill
// without needing to rehash.
//
// We want to ensure sufficient number of empty slots in the table in order
// to keep probe sequences relatively short. Empty slot in the probe group
// is required to stop probing.
//
// Tombstones (kDeleted slots) are not included in the growth capacity,
// because we'd like to rehash when the table is filled with tombstones and/or
// full slots.
//
// GrowthInfo also stores a bit that encodes whether table may have any
// deleted slots.
// Most of the tables (>95%) have no deleted slots, so some functions can
// be more efficient with this information.
//
// Callers can also force a rehash via the standard `rehash(0)`,
// which will recompute this value as a side-effect.
//
// See also `CapacityToGrowth()`.
class GrowthInfo {};

static_assert;
static_assert;

// Returns whether `n` is a valid capacity (i.e., number of slots).
//
// A valid capacity is a non-zero integer `2^m - 1`.
inline bool IsValidCapacity(size_t n) {}

// Returns the number of "cloned control bytes".
//
// This is the number of control bytes that are present both at the beginning
// of the control byte array and at the end, such that we can create a
// `Group::kWidth`-width probe window starting from any control byte.
constexpr size_t NumClonedBytes() {}

// Returns the number of control bytes including cloned.
constexpr size_t NumControlBytes(size_t capacity) {}

// Computes the offset from the start of the backing allocation of control.
// infoz and growth_info are stored at the beginning of the backing array.
inline static size_t ControlOffset(bool has_infoz) {}

// Helper class for computing offsets and allocation size of hash set fields.
class RawHashSetLayout {};

struct HashtableFreeFunctionsAccess;

// We only allow a maximum of 1 SOO element, which makes the implementation
// much simpler. Complications with multiple SOO elements include:
// - Satisfying the guarantee that erasing one element doesn't invalidate
//   iterators to other elements means we would probably need actual SOO
//   control bytes.
// - In order to prevent user code from depending on iteration order for small
//   tables, we would need to randomize the iteration order somehow.
constexpr size_t SooCapacity() {}
// Sentinel type to indicate SOO CommonFields construction.
struct soo_tag_t {};
// Sentinel type to indicate SOO CommonFields construction with full size.
struct full_soo_tag_t {};

// Suppress erroneous uninitialized memory errors on GCC. For example, GCC
// thinks that the call to slot_array() in find_or_prepare_insert() is reading
// uninitialized memory, but slot_array is only called there when the table is
// non-empty and this memory is initialized when the table is non-empty.
#if !defined(__clang__) && defined(__GNUC__)
#define ABSL_SWISSTABLE_IGNORE_UNINITIALIZED
#define ABSL_SWISSTABLE_IGNORE_UNINITIALIZED_RETURN
#else
#define ABSL_SWISSTABLE_IGNORE_UNINITIALIZED
#define ABSL_SWISSTABLE_IGNORE_UNINITIALIZED_RETURN
#endif

// This allows us to work around an uninitialized memory warning when
// constructing begin() iterators in empty hashtables.
MaybeInitializedPtr;

struct HeapPtrs {};

// Manages the backing array pointers or the SOO slot. When raw_hash_set::is_soo
// is true, the SOO slot is stored in `soo_data`. Otherwise, we use `heap`.
HeapOrSoo;

// CommonFields hold the fields in raw_hash_set that do not depend
// on template parameters. This allows us to conveniently pass all
// of this state to helper functions as a single argument.
class CommonFields : public CommonFieldsGenerationInfo {};

template <class Policy, class Hash, class Eq, class Alloc>
class raw_hash_set;

// Returns the next valid capacity after `n`.
inline size_t NextCapacity(size_t n) {}

// Applies the following mapping to every byte in the control array:
//   * kDeleted -> kEmpty
//   * kEmpty -> kEmpty
//   * _ -> kDeleted
// PRECONDITION:
//   IsValidCapacity(capacity)
//   ctrl[capacity] == ctrl_t::kSentinel
//   ctrl[i] != ctrl_t::kSentinel for all i < capacity
void ConvertDeletedToEmptyAndFullToDeleted(ctrl_t* ctrl, size_t capacity);

// Converts `n` into the next valid capacity, per `IsValidCapacity`.
inline size_t NormalizeCapacity(size_t n) {}

// General notes on capacity/growth methods below:
// - We use 7/8th as maximum load factor. For 16-wide groups, that gives an
//   average of two empty slots per group.
// - For (capacity+1) >= Group::kWidth, growth is 7/8*capacity.
// - For (capacity+1) < Group::kWidth, growth == capacity. In this case, we
//   never need to probe (the whole table fits in one group) so we don't need a
//   load factor less than 1.

// Given `capacity`, applies the load factor; i.e., it returns the maximum
// number of values we should put into the table before a resizing rehash.
inline size_t CapacityToGrowth(size_t capacity) {}

// Given `growth`, "unapplies" the load factor to find how large the capacity
// should be to stay within the load factor.
//
// This might not be a valid capacity and `NormalizeCapacity()` should be
// called on this.
inline size_t GrowthToLowerboundCapacity(size_t growth) {}

template <class InputIter>
size_t SelectBucketCountForIterRange(InputIter first, InputIter last,
                                     size_t bucket_count) {}

constexpr bool SwisstableDebugEnabled() {}

inline void AssertIsFull(const ctrl_t* ctrl, GenerationType generation,
                         const GenerationType* generation_ptr,
                         const char* operation) {}

// Note that for comparisons, null/end iterators are valid.
inline void AssertIsValidForComparison(const ctrl_t* ctrl,
                                       GenerationType generation,
                                       const GenerationType* generation_ptr) {}

// If the two iterators come from the same container, then their pointers will
// interleave such that ctrl_a <= ctrl_b < slot_a <= slot_b or vice/versa.
// Note: we take slots by reference so that it's not UB if they're uninitialized
// as long as we don't read them (when ctrl is null).
inline bool AreItersFromSameContainer(const ctrl_t* ctrl_a,
                                      const ctrl_t* ctrl_b,
                                      const void* const& slot_a,
                                      const void* const& slot_b) {}

// Asserts that two iterators come from the same container.
// Note: we take slots by reference so that it's not UB if they're uninitialized
// as long as we don't read them (when ctrl is null).
inline void AssertSameContainer(const ctrl_t* ctrl_a, const ctrl_t* ctrl_b,
                                const void* const& slot_a,
                                const void* const& slot_b,
                                const GenerationType* generation_ptr_a,
                                const GenerationType* generation_ptr_b) {}

struct FindInfo {};

// Whether a table is "small". A small table fits entirely into a probing
// group, i.e., has a capacity < `Group::kWidth`.
//
// In small mode we are able to use the whole capacity. The extra control
// bytes give us at least one "empty" control byte to stop the iteration.
// This is important to make 1 a valid capacity.
//
// In small mode only the first `capacity` control bytes after the sentinel
// are valid. The rest contain dummy ctrl_t::kEmpty values that do not
// represent a real slot. This is important to take into account on
// `find_first_non_full()`, where we never try
// `ShouldInsertBackwards()` for small tables.
inline bool is_small(size_t capacity) {}

// Whether a table fits entirely into a probing group.
// Arbitrary order of elements in such tables is correct.
inline bool is_single_group(size_t capacity) {}

// Begins a probing operation on `common.control`, using `hash`.
inline probe_seq<Group::kWidth> probe(const ctrl_t* ctrl, const size_t capacity,
                                      size_t hash) {}
inline probe_seq<Group::kWidth> probe(const CommonFields& common, size_t hash) {}

// Probes an array of control bits using a probe sequence derived from `hash`,
// and returns the offset corresponding to the first deleted or empty slot.
//
// Behavior when the entire table is full is undefined.
//
// NOTE: this function must work with tables having both empty and deleted
// slots in the same group. Such tables appear during `erase()`.
template <typename = void>
inline FindInfo find_first_non_full(const CommonFields& common, size_t hash) {}

// Extern template for inline function keep possibility of inlining.
// When compiler decided to not inline, no symbols will be added to the
// corresponding translation unit.
extern template FindInfo find_first_non_full(const CommonFields&, size_t);

// Non-inlined version of find_first_non_full for use in less
// performance critical routines.
FindInfo find_first_non_full_outofline(const CommonFields&, size_t);

inline void ResetGrowthLeft(CommonFields& common) {}

// Sets `ctrl` to `{kEmpty, kSentinel, ..., kEmpty}`, marking the entire
// array as marked as empty.
inline void ResetCtrl(CommonFields& common, size_t slot_size) {}

// Sets sanitizer poisoning for slot corresponding to control byte being set.
inline void DoSanitizeOnSetCtrl(const CommonFields& c, size_t i, ctrl_t h,
                                size_t slot_size) {}

// Sets `ctrl[i]` to `h`.
//
// Unlike setting it directly, this function will perform bounds checks and
// mirror the value to the cloned tail if necessary.
inline void SetCtrl(const CommonFields& c, size_t i, ctrl_t h,
                    size_t slot_size) {}
// Overload for setting to an occupied `h2_t` rather than a special `ctrl_t`.
inline void SetCtrl(const CommonFields& c, size_t i, h2_t h, size_t slot_size) {}

// Like SetCtrl, but in a single group table, we can save some operations when
// setting the cloned control byte.
inline void SetCtrlInSingleGroupTable(const CommonFields& c, size_t i, ctrl_t h,
                                      size_t slot_size) {}
// Overload for setting to an occupied `h2_t` rather than a special `ctrl_t`.
inline void SetCtrlInSingleGroupTable(const CommonFields& c, size_t i, h2_t h,
                                      size_t slot_size) {}

// growth_info (which is a size_t) is stored with the backing array.
constexpr size_t BackingArrayAlignment(size_t align_of_slot) {}

// Returns the address of the ith slot in slots where each slot occupies
// slot_size.
inline void* SlotAddress(void* slot_array, size_t slot, size_t slot_size) {}

// Iterates over all full slots and calls `cb(const ctrl_t*, SlotType*)`.
// No insertion to the table allowed during Callback call.
// Erasure is allowed only for the element passed to the callback.
template <class SlotType, class Callback>
ABSL_ATTRIBUTE_ALWAYS_INLINE inline void IterateOverFullSlots(
    const CommonFields& c, SlotType* slot, Callback cb) {}

template <typename CharAlloc>
constexpr bool ShouldSampleHashtablezInfo() {}

template <bool kSooEnabled>
HashtablezInfoHandle SampleHashtablezInfo(size_t sizeof_slot, size_t sizeof_key,
                                          size_t sizeof_value,
                                          size_t old_capacity, bool was_soo,
                                          HashtablezInfoHandle forced_infoz,
                                          CommonFields& c) {}

// Helper class to perform resize of the hash set.
//
// It contains special optimizations for small group resizes.
// See GrowIntoSingleGroupShuffleControlBytes for details.
class HashSetResizeHelper {};

inline void PrepareInsertCommon(CommonFields& common) {}

// Like prepare_insert, but for the case of inserting into a full SOO table.
size_t PrepareInsertAfterSoo(size_t hash, size_t slot_size,
                             CommonFields& common);

// PolicyFunctions bundles together some information for a particular
// raw_hash_set<T, ...> instantiation. This information is passed to
// type-erased functions that want to do small amounts of type-specific
// work.
struct PolicyFunctions {};

// ClearBackingArray clears the backing array, either modifying it in place,
// or creating a new one based on the value of "reuse".
// REQUIRES: c.capacity > 0
void ClearBackingArray(CommonFields& c, const PolicyFunctions& policy,
                       bool reuse, bool soo_enabled);

// Type-erased version of raw_hash_set::erase_meta_only.
void EraseMetaOnly(CommonFields& c, size_t index, size_t slot_size);

// Function to place in PolicyFunctions::dealloc for raw_hash_sets
// that are using std::allocator. This allows us to share the same
// function body for raw_hash_set instantiations that have the
// same slot alignment.
template <size_t AlignOfSlot>
ABSL_ATTRIBUTE_NOINLINE void DeallocateStandard(CommonFields& common,
                                                const PolicyFunctions& policy) {}

// For trivially relocatable types we use memcpy directly. This allows us to
// share the same function body for raw_hash_set instantiations that have the
// same slot size as long as they are relocatable.
template <size_t SizeOfSlot>
ABSL_ATTRIBUTE_NOINLINE void TransferRelocatable(void*, void* dst, void* src) {}

// Type erased raw_hash_set::get_hash_ref_fn for the empty hash function case.
const void* GetHashRefForEmptyHasher(const CommonFields& common);

// Given the hash of a value not currently in the table and the first empty
// slot in the probe sequence, finds a viable slot index to insert it at.
//
// In case there's no space left, the table can be resized or rehashed
// (for tables with deleted slots, see FindInsertPositionWithGrowthOrRehash).
//
// In the case of absence of deleted slots and positive growth_left, the element
// can be inserted in the provided `target` position.
//
// When the table has deleted slots (according to GrowthInfo), the target
// position will be searched one more time using `find_first_non_full`.
//
// REQUIRES: Table is not SOO.
// REQUIRES: At least one non-full slot available.
// REQUIRES: `target` is a valid empty position to insert.
size_t PrepareInsertNonSoo(CommonFields& common, size_t hash, FindInfo target,
                           const PolicyFunctions& policy);

// A SwissTable.
//
// Policy: a policy defines how to perform different operations on
// the slots of the hashtable (see hash_policy_traits.h for the full interface
// of policy).
//
// Hash: a (possibly polymorphic) functor that hashes keys of the hashtable. The
// functor should accept a key and return size_t as hash. For best performance
// it is important that the hash function provides high entropy across all bits
// of the hash.
//
// Eq: a (possibly polymorphic) functor that compares two keys for equality. It
// should accept two (of possibly different type) keys and return a bool: true
// if they are equal, false if they are not. If two keys compare equal, then
// their hash values as defined by Hash MUST be equal.
//
// Allocator: an Allocator
// [https://en.cppreference.com/w/cpp/named_req/Allocator] with which
// the storage of the hashtable will be allocated and the elements will be
// constructed and destroyed.
template <class Policy, class Hash, class Eq, class Alloc>
class raw_hash_set {};

// Friend access for free functions in raw_hash_set.h.
struct HashtableFreeFunctionsAccess {};

// Erases all elements that satisfy the predicate `pred` from the container `c`.
template <typename P, typename H, typename E, typename A, typename Predicate>
typename raw_hash_set<P, H, E, A>::size_type EraseIf(
    Predicate& pred, raw_hash_set<P, H, E, A>* c) {}

// Calls `cb` for all elements in the container `c`.
template <typename P, typename H, typename E, typename A, typename Callback>
void ForEach(Callback& cb, raw_hash_set<P, H, E, A>* c) {}
template <typename P, typename H, typename E, typename A, typename Callback>
void ForEach(Callback& cb, const raw_hash_set<P, H, E, A>* c) {}

namespace hashtable_debug_internal {
HashtableDebugAccess<Set, absl::void_t<typename Set::raw_hash_set>>;

}  // namespace hashtable_debug_internal
}  // namespace container_internal
ABSL_NAMESPACE_END
}  // namespace absl

#undef ABSL_SWISSTABLE_ENABLE_GENERATIONS
#undef ABSL_SWISSTABLE_IGNORE_UNINITIALIZED
#undef ABSL_SWISSTABLE_IGNORE_UNINITIALIZED_RETURN

#endif  // ABSL_CONTAINER_INTERNAL_RAW_HASH_SET_H_