cpython/Objects/dictobject.c

/* Dictionary object implementation using a hash table */

/* The distribution includes a separate file, Objects/dictnotes.txt,
   describing explorations into dictionary design and optimization.
   It covers typical dictionary use patterns, the parameters for
   tuning dictionaries, and several ideas for possible optimizations.
*/

/* PyDictKeysObject

This implements the dictionary's hashtable.

As of Python 3.6, this is compact and ordered. Basic idea is described here:
* https://mail.python.org/pipermail/python-dev/2012-December/123028.html
* https://morepypy.blogspot.com/2015/01/faster-more-memory-efficient-and-more.html

layout:

+---------------------+
| dk_refcnt           |
| dk_log2_size        |
| dk_log2_index_bytes |
| dk_kind             |
| dk_version          |
| dk_usable           |
| dk_nentries         |
+---------------------+
| dk_indices[]        |
|                     |
+---------------------+
| dk_entries[]        |
|                     |
+---------------------+

dk_indices is actual hashtable.  It holds index in entries, or DKIX_EMPTY(-1)
or DKIX_DUMMY(-2).
Size of indices is dk_size.  Type of each index in indices varies with dk_size:

* int8  for          dk_size <= 128
* int16 for 256   <= dk_size <= 2**15
* int32 for 2**16 <= dk_size <= 2**31
* int64 for 2**32 <= dk_size

dk_entries is array of PyDictKeyEntry when dk_kind == DICT_KEYS_GENERAL or
PyDictUnicodeEntry otherwise. Its length is USABLE_FRACTION(dk_size).

NOTE: Since negative value is used for DKIX_EMPTY and DKIX_DUMMY, type of
dk_indices entry is signed integer and int16 is used for table which
dk_size == 256.
*/


/*
The DictObject can be in one of two forms.

Either:
  A combined table:
    ma_values == NULL, dk_refcnt == 1.
    Values are stored in the me_value field of the PyDictKeyEntry.
Or:
  A split table:
    ma_values != NULL, dk_refcnt >= 1
    Values are stored in the ma_values array.
    Only string (unicode) keys are allowed.

There are four kinds of slots in the table (slot is index, and
DK_ENTRIES(keys)[index] if index >= 0):

1. Unused.  index == DKIX_EMPTY
   Does not hold an active (key, value) pair now and never did.  Unused can
   transition to Active upon key insertion.  This is each slot's initial state.

2. Active.  index >= 0, me_key != NULL and me_value != NULL
   Holds an active (key, value) pair.  Active can transition to Dummy or
   Pending upon key deletion (for combined and split tables respectively).
   This is the only case in which me_value != NULL.

3. Dummy.  index == DKIX_DUMMY  (combined only)
   Previously held an active (key, value) pair, but that was deleted and an
   active pair has not yet overwritten the slot.  Dummy can transition to
   Active upon key insertion.  Dummy slots cannot be made Unused again
   else the probe sequence in case of collision would have no way to know
   they were once active.
   In free-threaded builds dummy slots are not re-used to allow lock-free
   lookups to proceed safely.

4. Pending. index >= 0, key != NULL, and value == NULL  (split only)
   Not yet inserted in split-table.
*/

/*
Preserving insertion order

It's simple for combined table.  Since dk_entries is mostly append only, we can
get insertion order by just iterating dk_entries.

One exception is .popitem().  It removes last item in dk_entries and decrement
dk_nentries to achieve amortized O(1).  Since there are DKIX_DUMMY remains in
dk_indices, we can't increment dk_usable even though dk_nentries is
decremented.

To preserve the order in a split table, a bit vector is used  to record the
insertion order. When a key is inserted the bit vector is shifted up by 4 bits
and the index of the key is stored in the low 4 bits.
As a consequence of this, split keys have a maximum size of 16.
*/

/* PyDict_MINSIZE is the starting size for any new dict.
 * 8 allows dicts with no more than 5 active entries; experiments suggested
 * this suffices for the majority of dicts (consisting mostly of usually-small
 * dicts created to pass keyword arguments).
 * Making this 8, rather than 4 reduces the number of resizes for most
 * dictionaries, without any significant extra memory use.
 */
#define PyDict_LOG_MINSIZE
#define PyDict_MINSIZE

#include "Python.h"
#include "pycore_bitutils.h"             // _Py_bit_length
#include "pycore_call.h"                 // _PyObject_CallNoArgs()
#include "pycore_ceval.h"                // _PyEval_GetBuiltin()
#include "pycore_code.h"                 // stats
#include "pycore_critical_section.h"     // Py_BEGIN_CRITICAL_SECTION, Py_END_CRITICAL_SECTION
#include "pycore_dict.h"                 // export _PyDict_SizeOf()
#include "pycore_freelist.h"             // _PyFreeListState_GET()
#include "pycore_gc.h"                   // _PyObject_GC_IS_TRACKED()
#include "pycore_object.h"               // _PyObject_GC_TRACK(), _PyDebugAllocatorStats()
#include "pycore_pyatomic_ft_wrappers.h" // FT_ATOMIC_LOAD_SSIZE_RELAXED
#include "pycore_pyerrors.h"             // _PyErr_GetRaisedException()
#include "pycore_pystate.h"              // _PyThreadState_GET()
#include "pycore_setobject.h"            // _PySet_NextEntry()
#include "stringlib/eq.h"                // unicode_eq()

#include <stdbool.h>

/*[clinic input]
class dict "PyDictObject *" "&PyDict_Type"
[clinic start generated code]*/
/*[clinic end generated code: output=da39a3ee5e6b4b0d input=f157a5a0ce9589d6]*/


/*
To ensure the lookup algorithm terminates, there must be at least one Unused
slot (NULL key) in the table.
To avoid slowing down lookups on a near-full table, we resize the table when
it's USABLE_FRACTION (currently two-thirds) full.
*/

#ifdef Py_GIL_DISABLED

static inline void
ASSERT_DICT_LOCKED(PyObject *op)
{
    _Py_CRITICAL_SECTION_ASSERT_OBJECT_LOCKED(op);
}
#define ASSERT_DICT_LOCKED
#define ASSERT_WORLD_STOPPED_OR_DICT_LOCKED
#define ASSERT_WORLD_STOPPED_OR_OBJ_LOCKED

#define IS_DICT_SHARED
#define SET_DICT_SHARED
#define LOAD_INDEX
#define STORE_INDEX
#define ASSERT_OWNED_OR_SHARED

#define LOCK_KEYS_IF_SPLIT

#define UNLOCK_KEYS_IF_SPLIT

static inline Py_ssize_t
load_keys_nentries(PyDictObject *mp)
{
    PyDictKeysObject *keys = _Py_atomic_load_ptr(&mp->ma_keys);
    return _Py_atomic_load_ssize(&keys->dk_nentries);
}

static inline void
set_keys(PyDictObject *mp, PyDictKeysObject *keys)
{
    ASSERT_OWNED_OR_SHARED(mp);
    _Py_atomic_store_ptr_release(&mp->ma_keys, keys);
}

static inline void
set_values(PyDictObject *mp, PyDictValues *values)
{
    ASSERT_OWNED_OR_SHARED(mp);
    _Py_atomic_store_ptr_release(&mp->ma_values, values);
}

#define LOCK_KEYS
#define UNLOCK_KEYS

#define ASSERT_KEYS_LOCKED
#define LOAD_SHARED_KEY
#define STORE_SHARED_KEY
// Inc refs the keys object, giving the previous value
#define INCREF_KEYS
// Dec refs the keys object, giving the previous value
#define DECREF_KEYS
#define LOAD_KEYS_NENTRIES

#define INCREF_KEYS_FT
#define DECREF_KEYS_FT

static inline void split_keys_entry_added(PyDictKeysObject *keys)
{
    ASSERT_KEYS_LOCKED(keys);

    // We increase before we decrease so we never get too small of a value
    // when we're racing with reads
    _Py_atomic_store_ssize_relaxed(&keys->dk_nentries, keys->dk_nentries + 1);
    _Py_atomic_store_ssize_release(&keys->dk_usable, keys->dk_usable - 1);
}

#else /* Py_GIL_DISABLED */

#define ASSERT_DICT_LOCKED(op)
#define ASSERT_WORLD_STOPPED_OR_DICT_LOCKED(op)
#define ASSERT_WORLD_STOPPED_OR_OBJ_LOCKED(op)
#define LOCK_KEYS(keys)
#define UNLOCK_KEYS(keys)
#define ASSERT_KEYS_LOCKED(keys)
#define LOAD_SHARED_KEY(key)
#define STORE_SHARED_KEY(key, value)
#define INCREF_KEYS(dk)
#define DECREF_KEYS(dk)
#define LOAD_KEYS_NENTRIES(keys)
#define INCREF_KEYS_FT(dk)
#define DECREF_KEYS_FT(dk, shared)
#define LOCK_KEYS_IF_SPLIT(keys, kind)
#define UNLOCK_KEYS_IF_SPLIT(keys, kind)
#define IS_DICT_SHARED(mp)
#define SET_DICT_SHARED(mp)
#define LOAD_INDEX(keys, size, idx)
#define STORE_INDEX(keys, size, idx, value)

static inline void split_keys_entry_added(PyDictKeysObject *keys)
{}

static inline void
set_keys(PyDictObject *mp, PyDictKeysObject *keys)
{}

static inline void
set_values(PyDictObject *mp, PyDictValues *values)
{}

static inline Py_ssize_t
load_keys_nentries(PyDictObject *mp)
{}


#endif

#define STORE_KEY(ep, key)
#define STORE_VALUE(ep, value)
#define STORE_SPLIT_VALUE(mp, idx, value)
#define STORE_HASH(ep, hash)
#define STORE_KEYS_USABLE(keys, usable)
#define STORE_KEYS_NENTRIES(keys, nentries)
#define STORE_USED(mp, used)

#define PERTURB_SHIFT

/*
Major subtleties ahead:  Most hash schemes depend on having a "good" hash
function, in the sense of simulating randomness.  Python doesn't:  its most
important hash functions (for ints) are very regular in common
cases:

  >>>[hash(i) for i in range(4)]
  [0, 1, 2, 3]

This isn't necessarily bad!  To the contrary, in a table of size 2**i, taking
the low-order i bits as the initial table index is extremely fast, and there
are no collisions at all for dicts indexed by a contiguous range of ints. So
this gives better-than-random behavior in common cases, and that's very
desirable.

OTOH, when collisions occur, the tendency to fill contiguous slices of the
hash table makes a good collision resolution strategy crucial.  Taking only
the last i bits of the hash code is also vulnerable:  for example, consider
the list [i << 16 for i in range(20000)] as a set of keys.  Since ints are
their own hash codes, and this fits in a dict of size 2**15, the last 15 bits
 of every hash code are all 0:  they *all* map to the same table index.

But catering to unusual cases should not slow the usual ones, so we just take
the last i bits anyway.  It's up to collision resolution to do the rest.  If
we *usually* find the key we're looking for on the first try (and, it turns
out, we usually do -- the table load factor is kept under 2/3, so the odds
are solidly in our favor), then it makes best sense to keep the initial index
computation dirt cheap.

The first half of collision resolution is to visit table indices via this
recurrence:

    j = ((5*j) + 1) mod 2**i

For any initial j in range(2**i), repeating that 2**i times generates each
int in range(2**i) exactly once (see any text on random-number generation for
proof).  By itself, this doesn't help much:  like linear probing (setting
j += 1, or j -= 1, on each loop trip), it scans the table entries in a fixed
order.  This would be bad, except that's not the only thing we do, and it's
actually *good* in the common cases where hash keys are consecutive.  In an
example that's really too small to make this entirely clear, for a table of
size 2**3 the order of indices is:

    0 -> 1 -> 6 -> 7 -> 4 -> 5 -> 2 -> 3 -> 0 [and here it's repeating]

If two things come in at index 5, the first place we look after is index 2,
not 6, so if another comes in at index 6 the collision at 5 didn't hurt it.
Linear probing is deadly in this case because there the fixed probe order
is the *same* as the order consecutive keys are likely to arrive.  But it's
extremely unlikely hash codes will follow a 5*j+1 recurrence by accident,
and certain that consecutive hash codes do not.

The other half of the strategy is to get the other bits of the hash code
into play.  This is done by initializing a (unsigned) vrbl "perturb" to the
full hash code, and changing the recurrence to:

    perturb >>= PERTURB_SHIFT;
    j = (5*j) + 1 + perturb;
    use j % 2**i as the next table index;

Now the probe sequence depends (eventually) on every bit in the hash code,
and the pseudo-scrambling property of recurring on 5*j+1 is more valuable,
because it quickly magnifies small differences in the bits that didn't affect
the initial index.  Note that because perturb is unsigned, if the recurrence
is executed often enough perturb eventually becomes and remains 0.  At that
point (very rarely reached) the recurrence is on (just) 5*j+1 again, and
that's certain to find an empty slot eventually (since it generates every int
in range(2**i), and we make sure there's always at least one empty slot).

Selecting a good value for PERTURB_SHIFT is a balancing act.  You want it
small so that the high bits of the hash code continue to affect the probe
sequence across iterations; but you want it large so that in really bad cases
the high-order hash bits have an effect on early iterations.  5 was "the
best" in minimizing total collisions across experiments Tim Peters ran (on
both normal and pathological cases), but 4 and 6 weren't significantly worse.

Historical: Reimer Behrends contributed the idea of using a polynomial-based
approach, using repeated multiplication by x in GF(2**n) where an irreducible
polynomial for each table size was chosen such that x was a primitive root.
Christian Tismer later extended that to use division by x instead, as an
efficient way to get the high bits of the hash code into play.  This scheme
also gave excellent collision statistics, but was more expensive:  two
if-tests were required inside the loop; computing "the next" index took about
the same number of operations but without as much potential parallelism
(e.g., computing 5*j can go on at the same time as computing 1+perturb in the
above, and then shifting perturb can be done while the table index is being
masked); and the PyDictObject struct required a member to hold the table's
polynomial.  In Tim's experiments the current scheme ran faster, produced
equally good collision statistics, needed less code & used less memory.

*/

static int dictresize(PyInterpreterState *interp, PyDictObject *mp,
                      uint8_t log_newsize, int unicode);

static PyObject* dict_iter(PyObject *dict);

static int
setitem_lock_held(PyDictObject *mp, PyObject *key, PyObject *value);
static int
dict_setdefault_ref_lock_held(PyObject *d, PyObject *key, PyObject *default_value,
                    PyObject **result, int incref_result);

#ifndef NDEBUG
static int _PyObject_InlineValuesConsistencyCheck(PyObject *obj);
#endif

#include "clinic/dictobject.c.h"


static inline Py_hash_t
unicode_get_hash(PyObject *o)
{}

/* Print summary info about the state of the optimized allocator */
void
_PyDict_DebugMallocStats(FILE *out)
{}

#define DK_MASK(dk)

#define _Py_DICT_IMMORTAL_INITIAL_REFCNT

static void free_keys_object(PyDictKeysObject *keys, bool use_qsbr);

/* PyDictKeysObject has refcounts like PyObject does, so we have the
   following two functions to mirror what Py_INCREF() and Py_DECREF() do.
   (Keep in mind that PyDictKeysObject isn't actually a PyObject.)
   Likewise a PyDictKeysObject can be immortal (e.g. Py_EMPTY_KEYS),
   so we apply a naive version of what Py_INCREF() and Py_DECREF() do
   for immortal objects. */

static inline void
dictkeys_incref(PyDictKeysObject *dk)
{}

static inline void
dictkeys_decref(PyInterpreterState *interp, PyDictKeysObject *dk, bool use_qsbr)
{}

/* lookup indices.  returns DKIX_EMPTY, DKIX_DUMMY, or ix >=0 */
static inline Py_ssize_t
dictkeys_get_index(const PyDictKeysObject *keys, Py_ssize_t i)
{}

/* write to indices. */
static inline void
dictkeys_set_index(PyDictKeysObject *keys, Py_ssize_t i, Py_ssize_t ix)
{}


/* USABLE_FRACTION is the maximum dictionary load.
 * Increasing this ratio makes dictionaries more dense resulting in more
 * collisions.  Decreasing it improves sparseness at the expense of spreading
 * indices over more cache lines and at the cost of total memory consumed.
 *
 * USABLE_FRACTION must obey the following:
 *     (0 < USABLE_FRACTION(n) < n) for all n >= 2
 *
 * USABLE_FRACTION should be quick to calculate.
 * Fractions around 1/2 to 2/3 seem to work well in practice.
 */
#define USABLE_FRACTION(n)

/* Find the smallest dk_size >= minsize. */
static inline uint8_t
calculate_log2_keysize(Py_ssize_t minsize)
{}

/* estimate_keysize is reverse function of USABLE_FRACTION.
 *
 * This can be used to reserve enough size to insert n entries without
 * resizing.
 */
static inline uint8_t
estimate_log2_keysize(Py_ssize_t n)
{}


/* GROWTH_RATE. Growth rate upon hitting maximum load.
 * Currently set to used*3.
 * This means that dicts double in size when growing without deletions,
 * but have more head room when the number of deletions is on a par with the
 * number of insertions.  See also bpo-17563 and bpo-33205.
 *
 * GROWTH_RATE was set to used*4 up to version 3.2.
 * GROWTH_RATE was set to used*2 in version 3.3.0
 * GROWTH_RATE was set to used*2 + capacity/2 in 3.4.0-3.6.0.
 */
#define GROWTH_RATE(d)

/* This immutable, empty PyDictKeysObject is used for PyDict_Clear()
 * (which cannot fail and thus can do no allocation).
 */
static PyDictKeysObject empty_keys_struct =;

#define Py_EMPTY_KEYS

/* Uncomment to check the dict content in _PyDict_CheckConsistency() */
// #define DEBUG_PYDICT

#ifdef DEBUG_PYDICT
#define ASSERT_CONSISTENT
#else
#define ASSERT_CONSISTENT(op)
#endif

static inline int
get_index_from_order(PyDictObject *mp, Py_ssize_t i)
{}

#ifdef DEBUG_PYDICT
static void
dump_entries(PyDictKeysObject *dk)
{
    for (Py_ssize_t i = 0; i < dk->dk_nentries; i++) {
        if (DK_IS_UNICODE(dk)) {
            PyDictUnicodeEntry *ep = &DK_UNICODE_ENTRIES(dk)[i];
            printf("key=%p value=%p\n", ep->me_key, ep->me_value);
        }
        else {
            PyDictKeyEntry *ep = &DK_ENTRIES(dk)[i];
            printf("key=%p hash=%lx value=%p\n", ep->me_key, ep->me_hash, ep->me_value);
        }
    }
}
#endif

int
_PyDict_CheckConsistency(PyObject *op, int check_content)
{}


static PyDictKeysObject*
new_keys_object(PyInterpreterState *interp, uint8_t log2_size, bool unicode)
{}

static void
free_keys_object(PyDictKeysObject *keys, bool use_qsbr)
{}

static size_t
values_size_from_count(size_t count)
{}

#define CACHED_KEYS(tp)

static inline PyDictValues*
new_values(size_t size)
{}

static inline void
free_values(PyDictValues *values, bool use_qsbr)
{}

/* Consumes a reference to the keys object */
static PyObject *
new_dict(PyInterpreterState *interp,
         PyDictKeysObject *keys, PyDictValues *values,
         Py_ssize_t used, int free_values_on_failure)
{}

static PyObject *
new_dict_with_shared_keys(PyInterpreterState *interp, PyDictKeysObject *keys)
{}


static PyDictKeysObject *
clone_combined_dict_keys(PyDictObject *orig)
{}

PyObject *
PyDict_New(void)
{}

/* Search index of hash table from offset of entry table */
static Py_ssize_t
lookdict_index(PyDictKeysObject *k, Py_hash_t hash, Py_ssize_t index)
{}

static inline Py_ALWAYS_INLINE Py_ssize_t
do_lookup(PyDictObject *mp, PyDictKeysObject *dk, PyObject *key, Py_hash_t hash,
          int (*check_lookup)(PyDictObject *, PyDictKeysObject *, void *, Py_ssize_t ix, PyObject *key, Py_hash_t))
{}

static inline int
compare_unicode_generic(PyDictObject *mp, PyDictKeysObject *dk,
                        void *ep0, Py_ssize_t ix, PyObject *key, Py_hash_t hash)
{}

// Search non-Unicode key from Unicode table
static Py_ssize_t
unicodekeys_lookup_generic(PyDictObject *mp, PyDictKeysObject* dk, PyObject *key, Py_hash_t hash)
{}

static inline int
compare_unicode_unicode(PyDictObject *mp, PyDictKeysObject *dk,
                        void *ep0, Py_ssize_t ix, PyObject *key, Py_hash_t hash)
{}

static Py_ssize_t _Py_HOT_FUNCTION
unicodekeys_lookup_unicode(PyDictKeysObject* dk, PyObject *key, Py_hash_t hash)
{}

static inline int
compare_generic(PyDictObject *mp, PyDictKeysObject *dk,
                void *ep0, Py_ssize_t ix, PyObject *key, Py_hash_t hash)
{}

static Py_ssize_t
dictkeys_generic_lookup(PyDictObject *mp, PyDictKeysObject* dk, PyObject *key, Py_hash_t hash)
{}

/* Lookup a string in a (all unicode) dict keys.
 * Returns DKIX_ERROR if key is not a string,
 * or if the dict keys is not all strings.
 * If the keys is present then return the index of key.
 * If the key is not present then return DKIX_EMPTY.
 */
Py_ssize_t
_PyDictKeys_StringLookup(PyDictKeysObject* dk, PyObject *key)
{}

#ifdef Py_GIL_DISABLED

static Py_ssize_t
unicodekeys_lookup_unicode_threadsafe(PyDictKeysObject* dk, PyObject *key,
                                      Py_hash_t hash);

#endif

/*
The basic lookup function used by all operations.
This is based on Algorithm D from Knuth Vol. 3, Sec. 6.4.
Open addressing is preferred over chaining since the link overhead for
chaining would be substantial (100% with typical malloc overhead).

The initial probe index is computed as hash mod the table size. Subsequent
probe indices are computed as explained earlier.

All arithmetic on hash should ignore overflow.

_Py_dict_lookup() is general-purpose, and may return DKIX_ERROR if (and only if) a
comparison raises an exception.
When the key isn't found a DKIX_EMPTY is returned.
*/
Py_ssize_t
_Py_dict_lookup(PyDictObject *mp, PyObject *key, Py_hash_t hash, PyObject **value_addr)
{}

#ifdef Py_GIL_DISABLED
static inline void
ensure_shared_on_read(PyDictObject *mp)
{
    if (!_Py_IsOwnedByCurrentThread((PyObject *)mp) && !IS_DICT_SHARED(mp)) {
        // The first time we access a dict from a non-owning thread we mark it
        // as shared. This ensures that a concurrent resize operation will
        // delay freeing the old keys or values using QSBR, which is necessary
        // to safely allow concurrent reads without locking...
        Py_BEGIN_CRITICAL_SECTION(mp);
        if (!IS_DICT_SHARED(mp)) {
            SET_DICT_SHARED(mp);
        }
        Py_END_CRITICAL_SECTION();
    }
}
#endif

static inline void
ensure_shared_on_resize(PyDictObject *mp)
{}

static inline void
ensure_shared_on_keys_version_assignment(PyDictObject *mp)
{}

#ifdef Py_GIL_DISABLED

static inline Py_ALWAYS_INLINE int
compare_unicode_generic_threadsafe(PyDictObject *mp, PyDictKeysObject *dk,
                                   void *ep0, Py_ssize_t ix, PyObject *key, Py_hash_t hash)
{
    PyDictUnicodeEntry *ep = &((PyDictUnicodeEntry *)ep0)[ix];
    PyObject *startkey = _Py_atomic_load_ptr_relaxed(&ep->me_key);
    assert(startkey == NULL || PyUnicode_CheckExact(ep->me_key));
    assert(!PyUnicode_CheckExact(key));

    if (startkey != NULL) {
        if (!_Py_TryIncrefCompare(&ep->me_key, startkey)) {
            return DKIX_KEY_CHANGED;
        }

        if (unicode_get_hash(startkey) == hash) {
            int cmp = PyObject_RichCompareBool(startkey, key, Py_EQ);
            Py_DECREF(startkey);
            if (cmp < 0) {
                return DKIX_ERROR;
            }
            if (dk == _Py_atomic_load_ptr_relaxed(&mp->ma_keys) &&
                startkey == _Py_atomic_load_ptr_relaxed(&ep->me_key)) {
                return cmp;
            }
            else {
                /* The dict was mutated, restart */
                return DKIX_KEY_CHANGED;
            }
        }
        else {
            Py_DECREF(startkey);
        }
    }
    return 0;
}

// Search non-Unicode key from Unicode table
static Py_ssize_t
unicodekeys_lookup_generic_threadsafe(PyDictObject *mp, PyDictKeysObject* dk, PyObject *key, Py_hash_t hash)
{
    return do_lookup(mp, dk, key, hash, compare_unicode_generic_threadsafe);
}

static inline Py_ALWAYS_INLINE int
compare_unicode_unicode_threadsafe(PyDictObject *mp, PyDictKeysObject *dk,
                                   void *ep0, Py_ssize_t ix, PyObject *key, Py_hash_t hash)
{
    PyDictUnicodeEntry *ep = &((PyDictUnicodeEntry *)ep0)[ix];
    PyObject *startkey = _Py_atomic_load_ptr_relaxed(&ep->me_key);
    assert(startkey == NULL || PyUnicode_CheckExact(startkey));
    if (startkey == key) {
        return 1;
    }
    if (startkey != NULL) {
        if (_Py_IsImmortal(startkey)) {
            return unicode_get_hash(startkey) == hash && unicode_eq(startkey, key);
        }
        else {
            if (!_Py_TryIncrefCompare(&ep->me_key, startkey)) {
                return DKIX_KEY_CHANGED;
            }
            if (unicode_get_hash(startkey) == hash && unicode_eq(startkey, key)) {
                Py_DECREF(startkey);
                return 1;
            }
            Py_DECREF(startkey);
        }
    }
    return 0;
}

static Py_ssize_t _Py_HOT_FUNCTION
unicodekeys_lookup_unicode_threadsafe(PyDictKeysObject* dk, PyObject *key, Py_hash_t hash)
{
    return do_lookup(NULL, dk, key, hash, compare_unicode_unicode_threadsafe);
}

static inline Py_ALWAYS_INLINE int
compare_generic_threadsafe(PyDictObject *mp, PyDictKeysObject *dk,
                           void *ep0, Py_ssize_t ix, PyObject *key, Py_hash_t hash)
{
    PyDictKeyEntry *ep = &((PyDictKeyEntry *)ep0)[ix];
    PyObject *startkey = _Py_atomic_load_ptr_relaxed(&ep->me_key);
    if (startkey == key) {
        return 1;
    }
    Py_ssize_t ep_hash = _Py_atomic_load_ssize_relaxed(&ep->me_hash);
    if (ep_hash == hash) {
        if (startkey == NULL || !_Py_TryIncrefCompare(&ep->me_key, startkey)) {
            return DKIX_KEY_CHANGED;
        }
        int cmp = PyObject_RichCompareBool(startkey, key, Py_EQ);
        Py_DECREF(startkey);
        if (cmp < 0) {
            return DKIX_ERROR;
        }
        if (dk == _Py_atomic_load_ptr_relaxed(&mp->ma_keys) &&
            startkey == _Py_atomic_load_ptr_relaxed(&ep->me_key)) {
            return cmp;
        }
        else {
            /* The dict was mutated, restart */
            return DKIX_KEY_CHANGED;
        }
    }
    return 0;
}

static Py_ssize_t
dictkeys_generic_lookup_threadsafe(PyDictObject *mp, PyDictKeysObject* dk, PyObject *key, Py_hash_t hash)
{
    return do_lookup(mp, dk, key, hash, compare_generic_threadsafe);
}

Py_ssize_t
_Py_dict_lookup_threadsafe(PyDictObject *mp, PyObject *key, Py_hash_t hash, PyObject **value_addr)
{
    PyDictKeysObject *dk;
    DictKeysKind kind;
    Py_ssize_t ix;
    PyObject *value;

    ensure_shared_on_read(mp);

    dk = _Py_atomic_load_ptr(&mp->ma_keys);
    kind = dk->dk_kind;

    if (kind != DICT_KEYS_GENERAL) {
        if (PyUnicode_CheckExact(key)) {
            ix = unicodekeys_lookup_unicode_threadsafe(dk, key, hash);
        }
        else {
            ix = unicodekeys_lookup_generic_threadsafe(mp, dk, key, hash);
        }
        if (ix == DKIX_KEY_CHANGED) {
            goto read_failed;
        }

        if (ix >= 0) {
            if (kind == DICT_KEYS_SPLIT) {
                PyDictValues *values = _Py_atomic_load_ptr(&mp->ma_values);
                if (values == NULL)
                    goto read_failed;

                uint8_t capacity = _Py_atomic_load_uint8_relaxed(&values->capacity);
                if (ix >= (Py_ssize_t)capacity)
                    goto read_failed;

                value = _Py_TryXGetRef(&values->values[ix]);
                if (value == NULL)
                    goto read_failed;

                if (values != _Py_atomic_load_ptr(&mp->ma_values)) {
                    Py_DECREF(value);
                    goto read_failed;
                }
            }
            else {
                value = _Py_TryXGetRef(&DK_UNICODE_ENTRIES(dk)[ix].me_value);
                if (value == NULL) {
                    goto read_failed;
                }

                if (dk != _Py_atomic_load_ptr(&mp->ma_keys)) {
                    Py_DECREF(value);
                    goto read_failed;
                }
            }
        }
        else {
            value = NULL;
        }
    }
    else {
        ix = dictkeys_generic_lookup_threadsafe(mp, dk, key, hash);
        if (ix == DKIX_KEY_CHANGED) {
            goto read_failed;
        }
        if (ix >= 0) {
            value = _Py_TryXGetRef(&DK_ENTRIES(dk)[ix].me_value);
            if (value == NULL)
                goto read_failed;

            if (dk != _Py_atomic_load_ptr(&mp->ma_keys)) {
                Py_DECREF(value);
                goto read_failed;
            }
        }
        else {
            value = NULL;
        }
    }

    *value_addr = value;
    return ix;

read_failed:
    // In addition to the normal races of the dict being modified the _Py_TryXGetRef
    // can all fail if they don't yet have a shared ref count.  That can happen here
    // or in the *_lookup_* helper.  In that case we need to take the lock to avoid
    // mutation and do a normal incref which will make them shared.
    Py_BEGIN_CRITICAL_SECTION(mp);
    ix = _Py_dict_lookup(mp, key, hash, &value);
    *value_addr = value;
    if (value != NULL) {
        assert(ix >= 0);
        _Py_NewRefWithLock(value);
    }
    Py_END_CRITICAL_SECTION();
    return ix;
}

Py_ssize_t
_Py_dict_lookup_threadsafe_stackref(PyDictObject *mp, PyObject *key, Py_hash_t hash, _PyStackRef *value_addr)
{
    PyDictKeysObject *dk = _Py_atomic_load_ptr(&mp->ma_keys);
    if (dk->dk_kind == DICT_KEYS_UNICODE && PyUnicode_CheckExact(key)) {
        Py_ssize_t ix = unicodekeys_lookup_unicode_threadsafe(dk, key, hash);
        if (ix == DKIX_EMPTY) {
            *value_addr = PyStackRef_NULL;
            return ix;
        }
        else if (ix >= 0) {
            PyObject **addr_of_value = &DK_UNICODE_ENTRIES(dk)[ix].me_value;
            PyObject *value = _Py_atomic_load_ptr(addr_of_value);
            if (value == NULL) {
                *value_addr = PyStackRef_NULL;
                return DKIX_EMPTY;
            }
            if (_Py_IsImmortal(value) || _PyObject_HasDeferredRefcount(value)) {
                *value_addr =  (_PyStackRef){ .bits = (uintptr_t)value | Py_TAG_DEFERRED };
                return ix;
            }
            if (_Py_TryIncrefCompare(addr_of_value, value)) {
                *value_addr = PyStackRef_FromPyObjectSteal(value);
                return ix;
            }
        }
    }

    PyObject *obj;
    Py_ssize_t ix = _Py_dict_lookup_threadsafe(mp, key, hash, &obj);
    if (ix >= 0 && obj != NULL) {
        *value_addr = PyStackRef_FromPyObjectSteal(obj);
    }
    else {
        *value_addr = PyStackRef_NULL;
    }
    return ix;
}

#else   // Py_GIL_DISABLED

Py_ssize_t
_Py_dict_lookup_threadsafe(PyDictObject *mp, PyObject *key, Py_hash_t hash, PyObject **value_addr)
{}

Py_ssize_t
_Py_dict_lookup_threadsafe_stackref(PyDictObject *mp, PyObject *key, Py_hash_t hash, _PyStackRef *value_addr)
{}

#endif

int
_PyDict_HasOnlyStringKeys(PyObject *dict)
{}

void
_PyDict_EnablePerThreadRefcounting(PyObject *op)
{}

static inline int
is_unusable_slot(Py_ssize_t ix)
{}

/* Internal function to find slot for an item from its hash
   when it is known that the key is not present in the dict.
 */
static Py_ssize_t
find_empty_slot(PyDictKeysObject *keys, Py_hash_t hash)
{}

static int
insertion_resize(PyInterpreterState *interp, PyDictObject *mp, int unicode)
{}

static inline int
insert_combined_dict(PyInterpreterState *interp, PyDictObject *mp,
                     Py_hash_t hash, PyObject *key, PyObject *value)
{}

static Py_ssize_t
insert_split_key(PyDictKeysObject *keys, PyObject *key, Py_hash_t hash)
{}

static void
insert_split_value(PyInterpreterState *interp, PyDictObject *mp, PyObject *key, PyObject *value, Py_ssize_t ix)
{}

/*
Internal routine to insert a new item into the table.
Used both by the internal resize routine and by the public insert routine.
Returns -1 if an error occurred, or 0 on success.
Consumes key and value references.
*/
static int
insertdict(PyInterpreterState *interp, PyDictObject *mp,
           PyObject *key, Py_hash_t hash, PyObject *value)
{}

// Same as insertdict but specialized for ma_keys == Py_EMPTY_KEYS.
// Consumes key and value references.
static int
insert_to_emptydict(PyInterpreterState *interp, PyDictObject *mp,
                    PyObject *key, Py_hash_t hash, PyObject *value)
{}

/*
Internal routine used by dictresize() to build a hashtable of entries.
*/
static void
build_indices_generic(PyDictKeysObject *keys, PyDictKeyEntry *ep, Py_ssize_t n)
{}

static void
build_indices_unicode(PyDictKeysObject *keys, PyDictUnicodeEntry *ep, Py_ssize_t n)
{}

/*
Restructure the table by allocating a new table and reinserting all
items again.  When entries have been deleted, the new table may
actually be smaller than the old one.
If a table is split (its keys and hashes are shared, its values are not),
then the values are temporarily copied into the table, it is resized as
a combined table, then the me_value slots in the old table are NULLed out.
After resizing, a table is always combined.

This function supports:
 - Unicode split -> Unicode combined or Generic
 - Unicode combined -> Unicode combined or Generic
 - Generic -> Generic
*/
static int
dictresize(PyInterpreterState *interp, PyDictObject *mp,
           uint8_t log2_newsize, int unicode)
{}

static PyObject *
dict_new_presized(PyInterpreterState *interp, Py_ssize_t minused, bool unicode)
{}

PyObject *
_PyDict_NewPresized(Py_ssize_t minused)
{}

PyObject *
_PyDict_FromItems(PyObject *const *keys, Py_ssize_t keys_offset,
                  PyObject *const *values, Py_ssize_t values_offset,
                  Py_ssize_t length)
{}

/* Note that, for historical reasons, PyDict_GetItem() suppresses all errors
 * that may occur (originally dicts supported only string keys, and exceptions
 * weren't possible).  So, while the original intent was that a NULL return
 * meant the key wasn't present, in reality it can mean that, or that an error
 * (suppressed) occurred while computing the key's hash, or that some error
 * (suppressed) occurred when comparing keys in the dict's internal probe
 * sequence.  A nasty example of the latter is when a Python-coded comparison
 * function hits a stack-depth error, which can cause this to return NULL
 * even if the key is present.
 */
static PyObject *
dict_getitem(PyObject *op, PyObject *key, const char *warnmsg)
{}

PyObject *
PyDict_GetItem(PyObject *op, PyObject *key)
{}

Py_ssize_t
_PyDict_LookupIndex(PyDictObject *mp, PyObject *key)
{}

/* Same as PyDict_GetItemWithError() but with hash supplied by caller.
   This returns NULL *with* an exception set if an exception occurred.
   It returns NULL *without* an exception set if the key wasn't present.
*/
PyObject *
_PyDict_GetItem_KnownHash(PyObject *op, PyObject *key, Py_hash_t hash)
{}

/* Gets an item and provides a new reference if the value is present.
 * Returns 1 if the key is present, 0 if the key is missing, and -1 if an
 * exception occurred.
*/
int
_PyDict_GetItemRef_KnownHash_LockHeld(PyDictObject *op, PyObject *key,
                                      Py_hash_t hash, PyObject **result)
{}

/* Gets an item and provides a new reference if the value is present.
 * Returns 1 if the key is present, 0 if the key is missing, and -1 if an
 * exception occurred.
*/
int
_PyDict_GetItemRef_KnownHash(PyDictObject *op, PyObject *key, Py_hash_t hash, PyObject **result)
{}

int
PyDict_GetItemRef(PyObject *op, PyObject *key, PyObject **result)
{}

int
_PyDict_GetItemRef_Unicode_LockHeld(PyDictObject *op, PyObject *key, PyObject **result)
{}

/* Variant of PyDict_GetItem() that doesn't suppress exceptions.
   This returns NULL *with* an exception set if an exception occurred.
   It returns NULL *without* an exception set if the key wasn't present.
*/
PyObject *
PyDict_GetItemWithError(PyObject *op, PyObject *key)
{}

PyObject *
_PyDict_GetItemWithError(PyObject *dp, PyObject *kv)
{}

PyObject *
_PyDict_GetItemIdWithError(PyObject *dp, _Py_Identifier *key)
{}

PyObject *
_PyDict_GetItemStringWithError(PyObject *v, const char *key)
{}

/* Fast version of global value lookup (LOAD_GLOBAL).
 * Lookup in globals, then builtins.
 *
 *
 *
 *
 * Raise an exception and return NULL if an error occurred (ex: computing the
 * key hash failed, key comparison failed, ...). Return NULL if the key doesn't
 * exist. Return the value if the key exists.
 *
 * Returns a new reference.
 */
PyObject *
_PyDict_LoadGlobal(PyDictObject *globals, PyDictObject *builtins, PyObject *key)
{}

void
_PyDict_LoadGlobalStackRef(PyDictObject *globals, PyDictObject *builtins, PyObject *key, _PyStackRef *res)
{}

PyObject *
_PyDict_LoadBuiltinsFromGlobals(PyObject *globals)
{}

/* Consumes references to key and value */
static int
setitem_take2_lock_held(PyDictObject *mp, PyObject *key, PyObject *value)
{}

int
_PyDict_SetItem_Take2(PyDictObject *mp, PyObject *key, PyObject *value)
{}

/* CAUTION: PyDict_SetItem() must guarantee that it won't resize the
 * dictionary if it's merely replacing the value for an existing key.
 * This means that it's safe to loop over a dictionary with PyDict_Next()
 * and occasionally replace a value -- but you can't insert new keys or
 * remove them.
 */
int
PyDict_SetItem(PyObject *op, PyObject *key, PyObject *value)
{}

static int
setitem_lock_held(PyDictObject *mp, PyObject *key, PyObject *value)
{}


int
_PyDict_SetItem_KnownHash_LockHeld(PyDictObject *mp, PyObject *key, PyObject *value,
                                   Py_hash_t hash)
{}

int
_PyDict_SetItem_KnownHash(PyObject *op, PyObject *key, PyObject *value,
                          Py_hash_t hash)
{}

static void
delete_index_from_values(PyDictValues *values, Py_ssize_t ix)
{}

static void
delitem_common(PyDictObject *mp, Py_hash_t hash, Py_ssize_t ix,
               PyObject *old_value)
{}

int
PyDict_DelItem(PyObject *op, PyObject *key)
{}

static int
delitem_knownhash_lock_held(PyObject *op, PyObject *key, Py_hash_t hash)
{}

int
_PyDict_DelItem_KnownHash(PyObject *op, PyObject *key, Py_hash_t hash)
{}

static int
delitemif_lock_held(PyObject *op, PyObject *key,
                    int (*predicate)(PyObject *value, void *arg),
                    void *arg)
{}
/* This function promises that the predicate -> deletion sequence is atomic
 * (i.e. protected by the GIL or the per-dict mutex in free threaded builds),
 * assuming the predicate itself doesn't release the GIL (or cause re-entrancy
 * which would release the per-dict mutex)
 */
int
_PyDict_DelItemIf(PyObject *op, PyObject *key,
                  int (*predicate)(PyObject *value, void *arg),
                  void *arg)
{}

static void
clear_lock_held(PyObject *op)
{}

void
PyDict_Clear(PyObject *op)
{}

/* Internal version of PyDict_Next that returns a hash value in addition
 * to the key and value.
 * Return 1 on success, return 0 when the reached the end of the dictionary
 * (or if op is not a dictionary)
 */
int
_PyDict_Next(PyObject *op, Py_ssize_t *ppos, PyObject **pkey,
             PyObject **pvalue, Py_hash_t *phash)
{}

/*
 * Iterate over a dict.  Use like so:
 *
 *     Py_ssize_t i;
 *     PyObject *key, *value;
 *     i = 0;   # important!  i should not otherwise be changed by you
 *     while (PyDict_Next(yourdict, &i, &key, &value)) {
 *         Refer to borrowed references in key and value.
 *     }
 *
 * Return 1 on success, return 0 when the reached the end of the dictionary
 * (or if op is not a dictionary)
 *
 * CAUTION:  In general, it isn't safe to use PyDict_Next in a loop that
 * mutates the dict.  One exception:  it is safe if the loop merely changes
 * the values associated with the keys (but doesn't insert new keys or
 * delete keys), via PyDict_SetItem().
 */
int
PyDict_Next(PyObject *op, Py_ssize_t *ppos, PyObject **pkey, PyObject **pvalue)
{}


/* Internal version of dict.pop(). */
int
_PyDict_Pop_KnownHash(PyDictObject *mp, PyObject *key, Py_hash_t hash,
                      PyObject **result)
{}

static int
pop_lock_held(PyObject *op, PyObject *key, PyObject **result)
{}

int
PyDict_Pop(PyObject *op, PyObject *key, PyObject **result)
{}


int
PyDict_PopString(PyObject *op, const char *key, PyObject **result)
{}


PyObject *
_PyDict_Pop(PyObject *dict, PyObject *key, PyObject *default_value)
{}

static PyDictObject *
dict_dict_fromkeys(PyInterpreterState *interp, PyDictObject *mp,
                   PyObject *iterable, PyObject *value)
{}

static PyDictObject *
dict_set_fromkeys(PyInterpreterState *interp, PyDictObject *mp,
                  PyObject *iterable, PyObject *value)
{}

/* Internal version of dict.from_keys().  It is subclass-friendly. */
PyObject *
_PyDict_FromKeys(PyObject *cls, PyObject *iterable, PyObject *value)
{}

/* Methods */

static void
dict_dealloc(PyObject *self)
{}


static PyObject *
dict_repr_lock_held(PyObject *self)
{}

static PyObject *
dict_repr(PyObject *self)
{}

static Py_ssize_t
dict_length(PyObject *self)
{}

static PyObject *
dict_subscript(PyObject *self, PyObject *key)
{}

static int
dict_ass_sub(PyObject *mp, PyObject *v, PyObject *w)
{}

static PyMappingMethods dict_as_mapping =;

static PyObject *
keys_lock_held(PyObject *dict)
{}

PyObject *
PyDict_Keys(PyObject *dict)
{}

static PyObject *
values_lock_held(PyObject *dict)
{}

PyObject *
PyDict_Values(PyObject *dict)
{}

static PyObject *
items_lock_held(PyObject *dict)
{}

PyObject *
PyDict_Items(PyObject *dict)
{}

/*[clinic input]
@classmethod
dict.fromkeys
    iterable: object
    value: object=None
    /

Create a new dictionary with keys from iterable and values set to value.
[clinic start generated code]*/

static PyObject *
dict_fromkeys_impl(PyTypeObject *type, PyObject *iterable, PyObject *value)
/*[clinic end generated code: output=8fb98e4b10384999 input=382ba4855d0f74c3]*/
{}

/* Single-arg dict update; used by dict_update_common and operators. */
static int
dict_update_arg(PyObject *self, PyObject *arg)
{}

static int
dict_update_common(PyObject *self, PyObject *args, PyObject *kwds,
                   const char *methname)
{}

/* Note: dict.update() uses the METH_VARARGS|METH_KEYWORDS calling convention.
   Using METH_FASTCALL|METH_KEYWORDS would make dict.update(**dict2) calls
   slower, see the issue #29312. */
static PyObject *
dict_update(PyObject *self, PyObject *args, PyObject *kwds)
{}

/* Update unconditionally replaces existing items.
   Merge has a 3rd argument 'override'; if set, it acts like Update,
   otherwise it leaves existing items unchanged.

   PyDict_{Update,Merge} update/merge from a mapping object.

   PyDict_MergeFromSeq2 updates/merges from any iterable object
   producing iterable objects of length 2.
*/

static int
merge_from_seq2_lock_held(PyObject *d, PyObject *seq2, int override)
{}

int
PyDict_MergeFromSeq2(PyObject *d, PyObject *seq2, int override)
{}

static int
dict_dict_merge(PyInterpreterState *interp, PyDictObject *mp, PyDictObject *other, int override)
{}

static int
dict_merge(PyInterpreterState *interp, PyObject *a, PyObject *b, int override)
{}

int
PyDict_Update(PyObject *a, PyObject *b)
{}

int
PyDict_Merge(PyObject *a, PyObject *b, int override)
{}

int
_PyDict_MergeEx(PyObject *a, PyObject *b, int override)
{}

/*[clinic input]
dict.copy

Return a shallow copy of the dict.
[clinic start generated code]*/

static PyObject *
dict_copy_impl(PyDictObject *self)
/*[clinic end generated code: output=ffb782cf970a5c39 input=73935f042b639de4]*/
{}

/* Copies the values, but does not change the reference
 * counts of the objects in the array.
 * Return NULL, but does *not* set an exception on failure  */
static PyDictValues *
copy_values(PyDictValues *values)
{}

static PyObject *
copy_lock_held(PyObject *o)
{}

PyObject *
PyDict_Copy(PyObject *o)
{}

Py_ssize_t
PyDict_Size(PyObject *mp)
{}

/* Return 1 if dicts equal, 0 if not, -1 if error.
 * Gets out as soon as any difference is detected.
 * Uses only Py_EQ comparison.
 */
static int
dict_equal_lock_held(PyDictObject *a, PyDictObject *b)
{}

static int
dict_equal(PyDictObject *a, PyDictObject *b)
{}

static PyObject *
dict_richcompare(PyObject *v, PyObject *w, int op)
{}

/*[clinic input]

@coexist
dict.__contains__

  key: object
  /

True if the dictionary has the specified key, else False.
[clinic start generated code]*/

static PyObject *
dict___contains__(PyDictObject *self, PyObject *key)
/*[clinic end generated code: output=a3d03db709ed6e6b input=fe1cb42ad831e820]*/
{}

/*[clinic input]
@critical_section
dict.get

    key: object
    default: object = None
    /

Return the value for key if key is in the dictionary, else default.
[clinic start generated code]*/

static PyObject *
dict_get_impl(PyDictObject *self, PyObject *key, PyObject *default_value)
/*[clinic end generated code: output=bba707729dee05bf input=a631d3f18f584c60]*/
{}

static int
dict_setdefault_ref_lock_held(PyObject *d, PyObject *key, PyObject *default_value,
                    PyObject **result, int incref_result)
{}

int
PyDict_SetDefaultRef(PyObject *d, PyObject *key, PyObject *default_value,
                     PyObject **result)
{}

PyObject *
PyDict_SetDefault(PyObject *d, PyObject *key, PyObject *defaultobj)
{}

/*[clinic input]
@critical_section
dict.setdefault

    key: object
    default: object = None
    /

Insert key with a value of default if key is not in the dictionary.

Return the value for key if key is in the dictionary, else default.
[clinic start generated code]*/

static PyObject *
dict_setdefault_impl(PyDictObject *self, PyObject *key,
                     PyObject *default_value)
/*[clinic end generated code: output=f8c1101ebf69e220 input=9237af9a0a224302]*/
{}


/*[clinic input]
dict.clear

Remove all items from the dict.
[clinic start generated code]*/

static PyObject *
dict_clear_impl(PyDictObject *self)
/*[clinic end generated code: output=5139a830df00830a input=0bf729baba97a4c2]*/
{}

/*[clinic input]
dict.pop

    key: object
    default: object = NULL
    /

D.pop(k[,d]) -> v, remove specified key and return the corresponding value.

If the key is not found, return the default if given; otherwise,
raise a KeyError.
[clinic start generated code]*/

static PyObject *
dict_pop_impl(PyDictObject *self, PyObject *key, PyObject *default_value)
/*[clinic end generated code: output=3abb47b89f24c21c input=e221baa01044c44c]*/
{}

/*[clinic input]
@critical_section
dict.popitem

Remove and return a (key, value) pair as a 2-tuple.

Pairs are returned in LIFO (last-in, first-out) order.
Raises KeyError if the dict is empty.
[clinic start generated code]*/

static PyObject *
dict_popitem_impl(PyDictObject *self)
/*[clinic end generated code: output=e65fcb04420d230d input=ef28b4da5f0f762e]*/
{}

static int
dict_traverse(PyObject *op, visitproc visit, void *arg)
{}

static int
dict_tp_clear(PyObject *op)
{}

static PyObject *dictiter_new(PyDictObject *, PyTypeObject *);

static Py_ssize_t
sizeof_lock_held(PyDictObject *mp)
{}

Py_ssize_t
_PyDict_SizeOf(PyDictObject *mp)
{}

size_t
_PyDict_KeysSize(PyDictKeysObject *keys)
{}

/*[clinic input]
dict.__sizeof__

Return the size of the dict in memory, in bytes.
[clinic start generated code]*/

static PyObject *
dict___sizeof___impl(PyDictObject *self)
/*[clinic end generated code: output=44279379b3824bda input=4fec4ddfc44a4d1a]*/
{}

static PyObject *
dict_or(PyObject *self, PyObject *other)
{}

static PyObject *
dict_ior(PyObject *self, PyObject *other)
{}

PyDoc_STRVAR(getitem__doc__,
"__getitem__($self, key, /)\n--\n\nReturn self[key].");

PyDoc_STRVAR(update__doc__,
"D.update([E, ]**F) -> None.  Update D from mapping/iterable E and F.\n\
If E is present and has a .keys() method, then does:  for k in E.keys(): D[k] = E[k]\n\
If E is present and lacks a .keys() method, then does:  for k, v in E: D[k] = v\n\
In either case, this is followed by: for k in F:  D[k] = F[k]");

/* Forward */

static PyMethodDef mapp_methods[] =;

/* Return 1 if `key` is in dict `op`, 0 if not, and -1 on error. */
int
PyDict_Contains(PyObject *op, PyObject *key)
{}

int
PyDict_ContainsString(PyObject *op, const char *key)
{}

/* Internal version of PyDict_Contains used when the hash value is already known */
int
_PyDict_Contains_KnownHash(PyObject *op, PyObject *key, Py_hash_t hash)
{}

int
_PyDict_ContainsId(PyObject *op, _Py_Identifier *key)
{}

/* Hack to implement "key in dict" */
static PySequenceMethods dict_as_sequence =;

static PyNumberMethods dict_as_number =;

static PyObject *
dict_new(PyTypeObject *type, PyObject *args, PyObject *kwds)
{}

static int
dict_init(PyObject *self, PyObject *args, PyObject *kwds)
{}

static PyObject *
dict_vectorcall(PyObject *type, PyObject * const*args,
                size_t nargsf, PyObject *kwnames)
{}

static PyObject *
dict_iter(PyObject *self)
{}

PyDoc_STRVAR(dictionary_doc,
"dict() -> new empty dictionary\n"
"dict(mapping) -> new dictionary initialized from a mapping object's\n"
"    (key, value) pairs\n"
"dict(iterable) -> new dictionary initialized as if via:\n"
"    d = {}\n"
"    for k, v in iterable:\n"
"        d[k] = v\n"
"dict(**kwargs) -> new dictionary initialized with the name=value pairs\n"
"    in the keyword argument list.  For example:  dict(one=1, two=2)");

PyTypeObject PyDict_Type =;

/* For backward compatibility with old dictionary interface */

PyObject *
PyDict_GetItemString(PyObject *v, const char *key)
{}

int
PyDict_GetItemStringRef(PyObject *v, const char *key, PyObject **result)
{}

int
_PyDict_SetItemId(PyObject *v, _Py_Identifier *key, PyObject *item)
{}

int
PyDict_SetItemString(PyObject *v, const char *key, PyObject *item)
{}

int
_PyDict_DelItemId(PyObject *v, _Py_Identifier *key)
{}

int
PyDict_DelItemString(PyObject *v, const char *key)
{}

/* Dictionary iterator types */

dictiterobject;

static PyObject *
dictiter_new(PyDictObject *dict, PyTypeObject *itertype)
{}

static void
dictiter_dealloc(PyObject *self)
{}

static int
dictiter_traverse(PyObject *self, visitproc visit, void *arg)
{}

static PyObject *
dictiter_len(PyObject *self, PyObject *Py_UNUSED(ignored))
{}

PyDoc_STRVAR(length_hint_doc,
             "Private method returning an estimate of len(list(it)).");

static PyObject *
dictiter_reduce(PyObject *di, PyObject *Py_UNUSED(ignored));

PyDoc_STRVAR(reduce_doc, "Return state information for pickling.");

static PyMethodDef dictiter_methods[] =;

#ifdef Py_GIL_DISABLED

static int
dictiter_iternext_threadsafe(PyDictObject *d, PyObject *self,
                             PyObject **out_key, PyObject **out_value);

#else /* Py_GIL_DISABLED */

static PyObject*
dictiter_iternextkey_lock_held(PyDictObject *d, PyObject *self)
{}

#endif  /* Py_GIL_DISABLED */

static PyObject*
dictiter_iternextkey(PyObject *self)
{}

PyTypeObject PyDictIterKey_Type =;

#ifndef Py_GIL_DISABLED

static PyObject *
dictiter_iternextvalue_lock_held(PyDictObject *d, PyObject *self)
{}

#endif  /* Py_GIL_DISABLED */

static PyObject *
dictiter_iternextvalue(PyObject *self)
{}

PyTypeObject PyDictIterValue_Type =;

static int
dictiter_iternextitem_lock_held(PyDictObject *d, PyObject *self,
                                PyObject **out_key, PyObject **out_value)
{}

#ifdef Py_GIL_DISABLED

// Grabs the key and/or value from the provided locations and if successful
// returns them with an increased reference count.  If either one is unsuccessful
// nothing is incref'd and returns -1.
static int
acquire_key_value(PyObject **key_loc, PyObject *value, PyObject **value_loc,
                  PyObject **out_key, PyObject **out_value)
{
    if (out_key) {
        *out_key = _Py_TryXGetRef(key_loc);
        if (*out_key == NULL) {
            return -1;
        }
    }

    if (out_value) {
        if (!_Py_TryIncrefCompare(value_loc, value)) {
            if (out_key) {
                Py_DECREF(*out_key);
            }
            return -1;
        }
        *out_value = value;
    }

    return 0;
}

static int
dictiter_iternext_threadsafe(PyDictObject *d, PyObject *self,
                             PyObject **out_key, PyObject **out_value)
{
    int res;
    dictiterobject *di = (dictiterobject *)self;
    Py_ssize_t i;
    PyDictKeysObject *k;

    assert (PyDict_Check(d));

    if (di->di_used != _Py_atomic_load_ssize_relaxed(&d->ma_used)) {
        PyErr_SetString(PyExc_RuntimeError,
                        "dictionary changed size during iteration");
        di->di_used = -1; /* Make this state sticky */
        return -1;
    }

    ensure_shared_on_read(d);

    i = _Py_atomic_load_ssize_relaxed(&di->di_pos);
    k = _Py_atomic_load_ptr_relaxed(&d->ma_keys);
    assert(i >= 0);
    if (_PyDict_HasSplitTable(d)) {
        PyDictValues *values = _Py_atomic_load_ptr_relaxed(&d->ma_values);
        if (values == NULL) {
            goto concurrent_modification;
        }

        Py_ssize_t used = (Py_ssize_t)_Py_atomic_load_uint8(&values->size);
        if (i >= used) {
            goto fail;
        }

        // We're racing against writes to the order from delete_index_from_values, but
        // single threaded can suffer from concurrent modification to those as well and
        // can have either duplicated or skipped attributes, so we strive to do no better
        // here.
        int index = get_index_from_order(d, i);
        PyObject *value = _Py_atomic_load_ptr(&values->values[index]);
        if (acquire_key_value(&DK_UNICODE_ENTRIES(k)[index].me_key, value,
                               &values->values[index], out_key, out_value) < 0) {
            goto try_locked;
        }
    }
    else {
        Py_ssize_t n = _Py_atomic_load_ssize_relaxed(&k->dk_nentries);
        if (DK_IS_UNICODE(k)) {
            PyDictUnicodeEntry *entry_ptr = &DK_UNICODE_ENTRIES(k)[i];
            PyObject *value;
            while (i < n &&
                  (value = _Py_atomic_load_ptr(&entry_ptr->me_value)) == NULL) {
                entry_ptr++;
                i++;
            }
            if (i >= n)
                goto fail;

            if (acquire_key_value(&entry_ptr->me_key, value,
                                   &entry_ptr->me_value, out_key, out_value) < 0) {
                goto try_locked;
            }
        }
        else {
            PyDictKeyEntry *entry_ptr = &DK_ENTRIES(k)[i];
            PyObject *value;
            while (i < n &&
                  (value = _Py_atomic_load_ptr(&entry_ptr->me_value)) == NULL) {
                entry_ptr++;
                i++;
            }

            if (i >= n)
                goto fail;

            if (acquire_key_value(&entry_ptr->me_key, value,
                                   &entry_ptr->me_value, out_key, out_value) < 0) {
                goto try_locked;
            }
        }
    }
    // We found an element (key), but did not expect it
    Py_ssize_t len;
    if ((len = _Py_atomic_load_ssize_relaxed(&di->len)) == 0) {
        goto concurrent_modification;
    }

    _Py_atomic_store_ssize_relaxed(&di->di_pos, i + 1);
    _Py_atomic_store_ssize_relaxed(&di->len, len - 1);
    return 0;

concurrent_modification:
    PyErr_SetString(PyExc_RuntimeError,
                    "dictionary keys changed during iteration");

fail:
    di->di_dict = NULL;
    Py_DECREF(d);
    return -1;

try_locked:
    Py_BEGIN_CRITICAL_SECTION(d);
    res = dictiter_iternextitem_lock_held(d, self, out_key, out_value);
    Py_END_CRITICAL_SECTION();
    return res;
}

#endif

static bool
has_unique_reference(PyObject *op)
{}

static bool
acquire_iter_result(PyObject *result)
{}

static PyObject *
dictiter_iternextitem(PyObject *self)
{}

PyTypeObject PyDictIterItem_Type =;


/* dictreviter */

static PyObject *
dictreviter_iter_lock_held(PyDictObject *d, PyObject *self)
{}

static PyObject *
dictreviter_iternext(PyObject *self)
{}

PyTypeObject PyDictRevIterKey_Type =;


/*[clinic input]
dict.__reversed__

Return a reverse iterator over the dict keys.
[clinic start generated code]*/

static PyObject *
dict___reversed___impl(PyDictObject *self)
/*[clinic end generated code: output=e674483336d1ed51 input=23210ef3477d8c4d]*/
{}

static PyObject *
dictiter_reduce(PyObject *self, PyObject *Py_UNUSED(ignored))
{}

PyTypeObject PyDictRevIterItem_Type =;

PyTypeObject PyDictRevIterValue_Type =;

/***********************************************/
/* View objects for keys(), items(), values(). */
/***********************************************/

/* The instance lay-out is the same for all three; but the type differs. */

static void
dictview_dealloc(PyObject *self)
{}

static int
dictview_traverse(PyObject *self, visitproc visit, void *arg)
{}

static Py_ssize_t
dictview_len(PyObject *self)
{}

PyObject *
_PyDictView_New(PyObject *dict, PyTypeObject *type)
{}

static PyObject *
dictview_mapping(PyObject *view, void *Py_UNUSED(ignored)) {}

static PyGetSetDef dictview_getset[] =;

/* TODO(guido): The views objects are not complete:

 * support more set operations
 * support arbitrary mappings?
   - either these should be static or exported in dictobject.h
   - if public then they should probably be in builtins
*/

/* Return 1 if self is a subset of other, iterating over self;
   0 if not; -1 if an error occurred. */
static int
all_contained_in(PyObject *self, PyObject *other)
{}

static PyObject *
dictview_richcompare(PyObject *self, PyObject *other, int op)
{}

static PyObject *
dictview_repr(PyObject *self)
{}

/*** dict_keys ***/

static PyObject *
dictkeys_iter(PyObject *self)
{}

static int
dictkeys_contains(PyObject *self, PyObject *obj)
{}

static PySequenceMethods dictkeys_as_sequence =;

// Create a set object from dictviews object.
// Returns a new reference.
// This utility function is used by set operations.
static PyObject*
dictviews_to_set(PyObject *self)
{}

static PyObject*
dictviews_sub(PyObject *self, PyObject *other)
{}

static int
dictitems_contains(PyObject *dv, PyObject *obj);

PyObject *
_PyDictView_Intersect(PyObject* self, PyObject *other)
{}

static PyObject*
dictviews_or(PyObject* self, PyObject *other)
{}

static PyObject *
dictitems_xor_lock_held(PyObject *d1, PyObject *d2)
{}

static PyObject *
dictitems_xor(PyObject *self, PyObject *other)
{}

static PyObject*
dictviews_xor(PyObject* self, PyObject *other)
{}

static PyNumberMethods dictviews_as_number =;

static PyObject*
dictviews_isdisjoint(PyObject *self, PyObject *other)
{}

PyDoc_STRVAR(isdisjoint_doc,
"Return True if the view and the given iterable have a null intersection.");

static PyObject* dictkeys_reversed(PyObject *dv, PyObject *Py_UNUSED(ignored));

PyDoc_STRVAR(reversed_keys_doc,
"Return a reverse iterator over the dict keys.");

static PyMethodDef dictkeys_methods[] =;

PyTypeObject PyDictKeys_Type =;

/*[clinic input]
dict.keys

Return a set-like object providing a view on the dict's keys.
[clinic start generated code]*/

static PyObject *
dict_keys_impl(PyDictObject *self)
/*[clinic end generated code: output=aac2830c62990358 input=42f48a7a771212a7]*/
{}

static PyObject *
dictkeys_reversed(PyObject *self, PyObject *Py_UNUSED(ignored))
{}

/*** dict_items ***/

static PyObject *
dictitems_iter(PyObject *self)
{}

static int
dictitems_contains(PyObject *self, PyObject *obj)
{}

static PySequenceMethods dictitems_as_sequence =;

static PyObject* dictitems_reversed(PyObject *dv, PyObject *Py_UNUSED(ignored));

PyDoc_STRVAR(reversed_items_doc,
"Return a reverse iterator over the dict items.");

static PyMethodDef dictitems_methods[] =;

PyTypeObject PyDictItems_Type =;

/*[clinic input]
dict.items

Return a set-like object providing a view on the dict's items.
[clinic start generated code]*/

static PyObject *
dict_items_impl(PyDictObject *self)
/*[clinic end generated code: output=88c7db7150c7909a input=87c822872eb71f5a]*/
{}

static PyObject *
dictitems_reversed(PyObject *self, PyObject *Py_UNUSED(ignored))
{}

/*** dict_values ***/

static PyObject *
dictvalues_iter(PyObject *self)
{}

static PySequenceMethods dictvalues_as_sequence =;

static PyObject* dictvalues_reversed(PyObject *dv, PyObject *Py_UNUSED(ignored));

PyDoc_STRVAR(reversed_values_doc,
"Return a reverse iterator over the dict values.");

static PyMethodDef dictvalues_methods[] =;

PyTypeObject PyDictValues_Type =;

/*[clinic input]
dict.values

Return an object providing a view on the dict's values.
[clinic start generated code]*/

static PyObject *
dict_values_impl(PyDictObject *self)
/*[clinic end generated code: output=ce9f2e9e8a959dd4 input=b46944f85493b230]*/
{}

static PyObject *
dictvalues_reversed(PyObject *self, PyObject *Py_UNUSED(ignored))
{}


/* Returns NULL if cannot allocate a new PyDictKeysObject,
   but does not set an error */
PyDictKeysObject *
_PyDict_NewKeysForClass(PyHeapTypeObject *cls)
{}

void
_PyObject_InitInlineValues(PyObject *obj, PyTypeObject *tp)
{}

static PyDictObject *
make_dict_from_instance_attributes(PyInterpreterState *interp,
                                   PyDictKeysObject *keys, PyDictValues *values)
{}

PyDictObject *
_PyObject_MaterializeManagedDict_LockHeld(PyObject *obj)
{}

PyDictObject *
_PyObject_MaterializeManagedDict(PyObject *obj)
{}

int
_PyDict_SetItem_LockHeld(PyDictObject *dict, PyObject *name, PyObject *value)
{}

// Called with either the object's lock or the dict's lock held
// depending on whether or not a dict has been materialized for
// the object.
static int
store_instance_attr_lock_held(PyObject *obj, PyDictValues *values,
                              PyObject *name, PyObject *value)
{}

static inline int
store_instance_attr_dict(PyObject *obj, PyDictObject *dict, PyObject *name, PyObject *value)
{}

int
_PyObject_StoreInstanceAttribute(PyObject *obj, PyObject *name, PyObject *value)
{}

/* Sanity check for managed dicts */
#if 0
#define CHECK

int
_PyObject_ManagedDictValidityCheck(PyObject *obj)
{
    PyTypeObject *tp = Py_TYPE(obj);
    CHECK(tp->tp_flags & Py_TPFLAGS_MANAGED_DICT);
    PyManagedDictPointer *managed_dict = _PyObject_ManagedDictPointer(obj);
    if (_PyManagedDictPointer_IsValues(*managed_dict)) {
        PyDictValues *values = _PyManagedDictPointer_GetValues(*managed_dict);
        int size = ((uint8_t *)values)[-2];
        int count = 0;
        PyDictKeysObject *keys = CACHED_KEYS(tp);
        for (Py_ssize_t i = 0; i < keys->dk_nentries; i++) {
            if (values->values[i] != NULL) {
                count++;
            }
        }
        CHECK(size == count);
    }
    else {
        if (managed_dict->dict != NULL) {
            CHECK(PyDict_Check(managed_dict->dict));
        }
    }
    return 1;
}
#endif

// Attempts to get an instance attribute from the inline values. Returns true
// if successful, or false if the caller needs to lookup in the dictionary.
bool
_PyObject_TryGetInstanceAttribute(PyObject *obj, PyObject *name, PyObject **attr)
{}

int
_PyObject_IsInstanceDictEmpty(PyObject *obj)
{}

int
PyObject_VisitManagedDict(PyObject *obj, visitproc visit, void *arg)
{}

static void
set_dict_inline_values(PyObject *obj, PyDictObject *new_dict)
{}

#ifdef Py_GIL_DISABLED

// Trys and sets the dictionary for an object in the easy case when our current
// dictionary is either completely not materialized or is a dictionary which
// does not point at the inline values.
static bool
try_set_dict_inline_only_or_other_dict(PyObject *obj, PyObject *new_dict, PyDictObject **cur_dict)
{
    bool replaced = false;
    Py_BEGIN_CRITICAL_SECTION(obj);

    PyDictObject *dict = *cur_dict = _PyObject_GetManagedDict(obj);
    if (dict == NULL) {
        // We only have inline values, we can just completely replace them.
        set_dict_inline_values(obj, (PyDictObject *)new_dict);
        replaced = true;
        goto exit_lock;
    }

    if (FT_ATOMIC_LOAD_PTR_RELAXED(dict->ma_values) != _PyObject_InlineValues(obj)) {
        // We have a materialized dict which doesn't point at the inline values,
        // We get to simply swap dictionaries and free the old dictionary.
        FT_ATOMIC_STORE_PTR(_PyObject_ManagedDictPointer(obj)->dict,
                            (PyDictObject *)Py_XNewRef(new_dict));
        replaced = true;
        goto exit_lock;
    }
    else {
        // We have inline values, we need to lock the dict and the object
        // at the same time to safely dematerialize them. To do that while releasing
        // the object lock we need a strong reference to the current dictionary.
        Py_INCREF(dict);
    }
exit_lock:
    Py_END_CRITICAL_SECTION();
    return replaced;
}

// Replaces a dictionary that is probably the dictionary which has been
// materialized and points at the inline values. We could have raced
// and replaced it with another dictionary though.
static int
replace_dict_probably_inline_materialized(PyObject *obj, PyDictObject *inline_dict,
                                          PyDictObject *cur_dict, PyObject *new_dict)
{
    _Py_CRITICAL_SECTION_ASSERT_OBJECT_LOCKED(obj);

    if (cur_dict == inline_dict) {
        assert(FT_ATOMIC_LOAD_PTR_RELAXED(inline_dict->ma_values) == _PyObject_InlineValues(obj));

        int err = _PyDict_DetachFromObject(inline_dict, obj);
        if (err != 0) {
            assert(new_dict == NULL);
            return err;
        }
    }

    FT_ATOMIC_STORE_PTR(_PyObject_ManagedDictPointer(obj)->dict,
                        (PyDictObject *)Py_XNewRef(new_dict));
    return 0;
}

#endif

static void
decref_maybe_delay(PyObject *obj, bool delay)
{}

static int
set_or_clear_managed_dict(PyObject *obj, PyObject *new_dict, bool clear)
{}

int
_PyObject_SetManagedDict(PyObject *obj, PyObject *new_dict)
{}

void
PyObject_ClearManagedDict(PyObject *obj)
{}

int
_PyDict_DetachFromObject(PyDictObject *mp, PyObject *obj)
{}

static inline PyObject *
ensure_managed_dict(PyObject *obj)
{}

static inline PyObject *
ensure_nonmanaged_dict(PyObject *obj, PyObject **dictptr)
{}

PyObject *
PyObject_GenericGetDict(PyObject *obj, void *context)
{}

int
_PyObjectDict_SetItem(PyTypeObject *tp, PyObject *obj, PyObject **dictptr,
                      PyObject *key, PyObject *value)
{}

void
_PyDictKeys_DecRef(PyDictKeysObject *keys)
{}

static inline uint32_t
get_next_dict_keys_version(PyInterpreterState *interp)
{}

// In free-threaded builds the caller must ensure that the keys object is not
// being mutated concurrently by another thread.
uint32_t
_PyDictKeys_GetVersionForCurrentState(PyInterpreterState *interp,
                                      PyDictKeysObject *dictkeys)
{}

uint32_t
_PyDict_GetKeysVersionForCurrentState(PyInterpreterState *interp,
                                      PyDictObject *dict)
{}

static inline int
validate_watcher_id(PyInterpreterState *interp, int watcher_id)
{}

int
PyDict_Watch(int watcher_id, PyObject* dict)
{}

int
PyDict_Unwatch(int watcher_id, PyObject* dict)
{}

int
PyDict_AddWatcher(PyDict_WatchCallback callback)
{}

int
PyDict_ClearWatcher(int watcher_id)
{}

static const char *
dict_event_name(PyDict_WatchEvent event) {}

void
_PyDict_SendEvent(int watcher_bits,
                  PyDict_WatchEvent event,
                  PyDictObject *mp,
                  PyObject *key,
                  PyObject *value)
{}

#ifndef NDEBUG
static int
_PyObject_InlineValuesConsistencyCheck(PyObject *obj)
{
    if ((Py_TYPE(obj)->tp_flags & Py_TPFLAGS_INLINE_VALUES) == 0) {
        return 1;
    }
    assert(Py_TYPE(obj)->tp_flags & Py_TPFLAGS_MANAGED_DICT);
    PyDictObject *dict = _PyObject_GetManagedDict(obj);
    if (dict == NULL) {
        return 1;
    }
    if (dict->ma_values == _PyObject_InlineValues(obj) ||
        _PyObject_InlineValues(obj)->valid == 0) {
        return 1;
    }
    assert(0);
    return 0;
}
#endif