cpython/Objects/object.c


/* Generic object operations; and implementation of None */

#include "Python.h"
#include "pycore_brc.h"           // _Py_brc_queue_object()
#include "pycore_call.h"          // _PyObject_CallNoArgs()
#include "pycore_ceval.h"         // _Py_EnterRecursiveCallTstate()
#include "pycore_context.h"       // _PyContextTokenMissing_Type
#include "pycore_critical_section.h"     // Py_BEGIN_CRITICAL_SECTION, Py_END_CRITICAL_SECTION
#include "pycore_descrobject.h"   // _PyMethodWrapper_Type
#include "pycore_dict.h"          // _PyObject_MakeDictFromInstanceAttributes()
#include "pycore_floatobject.h"   // _PyFloat_DebugMallocStats()
#include "pycore_freelist.h"      // _PyObject_ClearFreeLists()
#include "pycore_initconfig.h"    // _PyStatus_EXCEPTION()
#include "pycore_instruction_sequence.h" // _PyInstructionSequence_Type
#include "pycore_hashtable.h"     // _Py_hashtable_new()
#include "pycore_memoryobject.h"  // _PyManagedBuffer_Type
#include "pycore_namespace.h"     // _PyNamespace_Type
#include "pycore_object.h"        // PyAPI_DATA() _Py_SwappedOp definition
#include "pycore_long.h"          // _PyLong_GetZero()
#include "pycore_optimizer.h"     // _PyUOpExecutor_Type, _PyUOpOptimizer_Type, ...
#include "pycore_pyerrors.h"      // _PyErr_Occurred()
#include "pycore_pymem.h"         // _PyMem_IsPtrFreed()
#include "pycore_pystate.h"       // _PyThreadState_GET()
#include "pycore_symtable.h"      // PySTEntry_Type
#include "pycore_typeobject.h"    // _PyBufferWrapper_Type
#include "pycore_typevarobject.h" // _PyTypeAlias_Type, _Py_initialize_generic
#include "pycore_unionobject.h"   // _PyUnion_Type


#ifdef Py_LIMITED_API
   // Prevent recursive call _Py_IncRef() <=> Py_INCREF()
#  error "Py_LIMITED_API macro must not be defined"
#endif

/* Defined in tracemalloc.c */
extern void _PyMem_DumpTraceback(int fd, const void *ptr);


int
_PyObject_CheckConsistency(PyObject *op, int check_content)
{}


#ifdef Py_REF_DEBUG
/* We keep the legacy symbol around for backward compatibility. */
Py_ssize_t _Py_RefTotal;

static inline Py_ssize_t
get_legacy_reftotal(void)
{
    return _Py_RefTotal;
}
#endif

#ifdef Py_REF_DEBUG

#define REFTOTAL

static inline void
reftotal_add(PyThreadState *tstate, Py_ssize_t n)
{
#ifdef Py_GIL_DISABLED
    _PyThreadStateImpl *tstate_impl = (_PyThreadStateImpl *)tstate;
    // relaxed store to avoid data race with read in get_reftotal()
    Py_ssize_t reftotal = tstate_impl->reftotal + n;
    _Py_atomic_store_ssize_relaxed(&tstate_impl->reftotal, reftotal);
#else
    REFTOTAL(tstate->interp) += n;
#endif
}

static inline Py_ssize_t get_global_reftotal(_PyRuntimeState *);

/* We preserve the number of refs leaked during runtime finalization,
   so they can be reported if the runtime is initialized again. */
// XXX We don't lose any information by dropping this,
// so we should consider doing so.
static Py_ssize_t last_final_reftotal = 0;

void
_Py_FinalizeRefTotal(_PyRuntimeState *runtime)
{
    last_final_reftotal = get_global_reftotal(runtime);
    runtime->object_state.interpreter_leaks = 0;
}

void
_PyInterpreterState_FinalizeRefTotal(PyInterpreterState *interp)
{
    interp->runtime->object_state.interpreter_leaks += REFTOTAL(interp);
    REFTOTAL(interp) = 0;
}

static inline Py_ssize_t
get_reftotal(PyInterpreterState *interp)
{
    /* For a single interpreter, we ignore the legacy _Py_RefTotal,
       since we can't determine which interpreter updated it. */
    Py_ssize_t total = REFTOTAL(interp);
#ifdef Py_GIL_DISABLED
    for (PyThreadState *p = interp->threads.head; p != NULL; p = p->next) {
        /* This may race with other threads modifications to their reftotal */
        _PyThreadStateImpl *tstate_impl = (_PyThreadStateImpl *)p;
        total += _Py_atomic_load_ssize_relaxed(&tstate_impl->reftotal);
    }
#endif
    return total;
}

static inline Py_ssize_t
get_global_reftotal(_PyRuntimeState *runtime)
{
    Py_ssize_t total = 0;

    /* Add up the total from each interpreter. */
    HEAD_LOCK(&_PyRuntime);
    PyInterpreterState *interp = PyInterpreterState_Head();
    for (; interp != NULL; interp = PyInterpreterState_Next(interp)) {
        total += get_reftotal(interp);
    }
    HEAD_UNLOCK(&_PyRuntime);

    /* Add in the updated value from the legacy _Py_RefTotal. */
    total += get_legacy_reftotal();
    total += last_final_reftotal;
    total += runtime->object_state.interpreter_leaks;

    return total;
}

#undef REFTOTAL

void
_PyDebug_PrintTotalRefs(void) {
    _PyRuntimeState *runtime = &_PyRuntime;
    fprintf(stderr,
            "[%zd refs, %zd blocks]\n",
            get_global_reftotal(runtime), _Py_GetGlobalAllocatedBlocks());
    /* It may be helpful to also print the "legacy" reftotal separately.
       Likewise for the total for each interpreter. */
}
#endif /* Py_REF_DEBUG */

/* Object allocation routines used by NEWOBJ and NEWVAROBJ macros.
   These are used by the individual routines for object creation.
   Do not call them otherwise, they do not initialize the object! */

#ifdef Py_TRACE_REFS

#define REFCHAIN
#define REFCHAIN_VALUE

bool
_PyRefchain_IsTraced(PyInterpreterState *interp, PyObject *obj)
{
    return (_Py_hashtable_get(REFCHAIN(interp), obj) == REFCHAIN_VALUE);
}


static void
_PyRefchain_Trace(PyInterpreterState *interp, PyObject *obj)
{
    if (_Py_hashtable_set(REFCHAIN(interp), obj, REFCHAIN_VALUE) < 0) {
        // Use a fatal error because _Py_NewReference() cannot report
        // the error to the caller.
        Py_FatalError("_Py_hashtable_set() memory allocation failed");
    }
}


static void
_PyRefchain_Remove(PyInterpreterState *interp, PyObject *obj)
{
    void *value = _Py_hashtable_steal(REFCHAIN(interp), obj);
#ifndef NDEBUG
    assert(value == REFCHAIN_VALUE);
#else
    (void)value;
#endif
}


/* Add an object to the refchain hash table.
 *
 * Note that objects are normally added to the list by PyObject_Init()
 * indirectly.  Not all objects are initialized that way, though; exceptions
 * include statically allocated type objects, and statically allocated
 * singletons (like Py_True and Py_None). */
void
_Py_AddToAllObjects(PyObject *op)
{
    PyInterpreterState *interp = _PyInterpreterState_GET();
    if (!_PyRefchain_IsTraced(interp, op)) {
        _PyRefchain_Trace(interp, op);
    }
}
#endif  /* Py_TRACE_REFS */

#ifdef Py_REF_DEBUG
/* Log a fatal error; doesn't return. */
void
_Py_NegativeRefcount(const char *filename, int lineno, PyObject *op)
{
    _PyObject_AssertFailed(op, NULL, "object has negative ref count",
                           filename, lineno, __func__);
}

/* This is used strictly by Py_INCREF(). */
void
_Py_INCREF_IncRefTotal(void)
{
    reftotal_add(_PyThreadState_GET(), 1);
}

/* This is used strictly by Py_DECREF(). */
void
_Py_DECREF_DecRefTotal(void)
{
    reftotal_add(_PyThreadState_GET(), -1);
}

void
_Py_IncRefTotal(PyThreadState *tstate)
{
    reftotal_add(tstate, 1);
}

void
_Py_DecRefTotal(PyThreadState *tstate)
{
    reftotal_add(tstate, -1);
}

void
_Py_AddRefTotal(PyThreadState *tstate, Py_ssize_t n)
{
    reftotal_add(tstate, n);
}

/* This includes the legacy total
   and any carried over from the last runtime init/fini cycle. */
Py_ssize_t
_Py_GetGlobalRefTotal(void)
{
    return get_global_reftotal(&_PyRuntime);
}

Py_ssize_t
_Py_GetLegacyRefTotal(void)
{
    return get_legacy_reftotal();
}

Py_ssize_t
_PyInterpreterState_GetRefTotal(PyInterpreterState *interp)
{
    HEAD_LOCK(&_PyRuntime);
    Py_ssize_t total = get_reftotal(interp);
    HEAD_UNLOCK(&_PyRuntime);
    return total;
}

#endif /* Py_REF_DEBUG */

void
Py_IncRef(PyObject *o)
{}

void
Py_DecRef(PyObject *o)
{}

void
_Py_IncRef(PyObject *o)
{}

void
_Py_DecRef(PyObject *o)
{}

#ifdef Py_GIL_DISABLED
# ifdef Py_REF_DEBUG
static int
is_dead(PyObject *o)
{
#  if SIZEOF_SIZE_T == 8
    return (uintptr_t)o->ob_type == 0xDDDDDDDDDDDDDDDD;
#  else
    return (uintptr_t)o->ob_type == 0xDDDDDDDD;
#  endif
}
# endif

void
_Py_DecRefSharedDebug(PyObject *o, const char *filename, int lineno)
{
    // Should we queue the object for the owning thread to merge?
    int should_queue;

    Py_ssize_t new_shared;
    Py_ssize_t shared = _Py_atomic_load_ssize_relaxed(&o->ob_ref_shared);
    do {
        should_queue = (shared == 0 || shared == _Py_REF_MAYBE_WEAKREF);

        if (should_queue) {
            // If the object had refcount zero, not queued, and not merged,
            // then we enqueue the object to be merged by the owning thread.
            // In this case, we don't subtract one from the reference count
            // because the queue holds a reference.
            new_shared = _Py_REF_QUEUED;
        }
        else {
            // Otherwise, subtract one from the reference count. This might
            // be negative!
            new_shared = shared - (1 << _Py_REF_SHARED_SHIFT);
        }

#ifdef Py_REF_DEBUG
        if ((new_shared < 0 && _Py_REF_IS_MERGED(new_shared)) ||
            (should_queue && is_dead(o)))
        {
            _Py_NegativeRefcount(filename, lineno, o);
        }
#endif
    } while (!_Py_atomic_compare_exchange_ssize(&o->ob_ref_shared,
                                                &shared, new_shared));

    if (should_queue) {
#ifdef Py_REF_DEBUG
        _Py_IncRefTotal(_PyThreadState_GET());
#endif
        _Py_brc_queue_object(o);
    }
    else if (new_shared == _Py_REF_MERGED) {
        // refcount is zero AND merged
        _Py_Dealloc(o);
    }
}

void
_Py_DecRefShared(PyObject *o)
{
    _Py_DecRefSharedDebug(o, NULL, 0);
}

void
_Py_MergeZeroLocalRefcount(PyObject *op)
{
    assert(op->ob_ref_local == 0);

    Py_ssize_t shared = _Py_atomic_load_ssize_acquire(&op->ob_ref_shared);
    if (shared == 0) {
        // Fast-path: shared refcount is zero (including flags)
        _Py_Dealloc(op);
        return;
    }

    // gh-121794: This must be before the store to `ob_ref_shared` (gh-119999),
    // but should outside the fast-path to maintain the invariant that
    // a zero `ob_tid` implies a merged refcount.
    _Py_atomic_store_uintptr_relaxed(&op->ob_tid, 0);

    // Slow-path: atomically set the flags (low two bits) to _Py_REF_MERGED.
    Py_ssize_t new_shared;
    do {
        new_shared = (shared & ~_Py_REF_SHARED_FLAG_MASK) | _Py_REF_MERGED;
    } while (!_Py_atomic_compare_exchange_ssize(&op->ob_ref_shared,
                                                &shared, new_shared));

    if (new_shared == _Py_REF_MERGED) {
        // i.e., the shared refcount is zero (only the flags are set) so we
        // deallocate the object.
        _Py_Dealloc(op);
    }
}

Py_ssize_t
_Py_ExplicitMergeRefcount(PyObject *op, Py_ssize_t extra)
{
    assert(!_Py_IsImmortal(op));

#ifdef Py_REF_DEBUG
    _Py_AddRefTotal(_PyThreadState_GET(), extra);
#endif

    // gh-119999: Write to ob_ref_local and ob_tid before merging the refcount.
    Py_ssize_t local = (Py_ssize_t)op->ob_ref_local;
    _Py_atomic_store_uint32_relaxed(&op->ob_ref_local, 0);
    _Py_atomic_store_uintptr_relaxed(&op->ob_tid, 0);

    Py_ssize_t refcnt;
    Py_ssize_t new_shared;
    Py_ssize_t shared = _Py_atomic_load_ssize_relaxed(&op->ob_ref_shared);
    do {
        refcnt = Py_ARITHMETIC_RIGHT_SHIFT(Py_ssize_t, shared, _Py_REF_SHARED_SHIFT);
        refcnt += local;
        refcnt += extra;

        new_shared = _Py_REF_SHARED(refcnt, _Py_REF_MERGED);
    } while (!_Py_atomic_compare_exchange_ssize(&op->ob_ref_shared,
                                                &shared, new_shared));
    return refcnt;
}
#endif  /* Py_GIL_DISABLED */


/**************************************/

PyObject *
PyObject_Init(PyObject *op, PyTypeObject *tp)
{}

PyVarObject *
PyObject_InitVar(PyVarObject *op, PyTypeObject *tp, Py_ssize_t size)
{}

PyObject *
_PyObject_New(PyTypeObject *tp)
{}

PyVarObject *
_PyObject_NewVar(PyTypeObject *tp, Py_ssize_t nitems)
{}

void
PyObject_CallFinalizer(PyObject *self)
{}

int
PyObject_CallFinalizerFromDealloc(PyObject *self)
{}

int
PyObject_Print(PyObject *op, FILE *fp, int flags)
{}

/* For debugging convenience.  Set a breakpoint here and call it from your DLL */
void
_Py_BreakPoint(void)
{}


/* Heuristic checking if the object memory is uninitialized or deallocated.
   Rely on the debug hooks on Python memory allocators:
   see _PyMem_IsPtrFreed().

   The function can be used to prevent segmentation fault on dereferencing
   pointers like 0xDDDDDDDDDDDDDDDD. */
int
_PyObject_IsFreed(PyObject *op)
{}


/* For debugging convenience.  See Misc/gdbinit for some useful gdb hooks */
void
_PyObject_Dump(PyObject* op)
{}

PyObject *
PyObject_Repr(PyObject *v)
{}

PyObject *
PyObject_Str(PyObject *v)
{}

PyObject *
PyObject_ASCII(PyObject *v)
{}

PyObject *
PyObject_Bytes(PyObject *v)
{}

static void
clear_freelist(struct _Py_freelist *freelist, int is_finalization,
               freefunc dofree)
{}

static void
free_object(void *obj)
{}

void
_PyObject_ClearFreeLists(struct _Py_freelists *freelists, int is_finalization)
{}

/*
def _PyObject_FunctionStr(x):
    try:
        qualname = x.__qualname__
    except AttributeError:
        return str(x)
    try:
        mod = x.__module__
        if mod is not None and mod != 'builtins':
            return f"{x.__module__}.{qualname}()"
    except AttributeError:
        pass
    return qualname
*/
PyObject *
_PyObject_FunctionStr(PyObject *x)
{}

/* For Python 3.0.1 and later, the old three-way comparison has been
   completely removed in favour of rich comparisons.  PyObject_Compare() and
   PyObject_Cmp() are gone, and the builtin cmp function no longer exists.
   The old tp_compare slot has been renamed to tp_as_async, and should no
   longer be used.  Use tp_richcompare instead.

   See (*) below for practical amendments.

   tp_richcompare gets called with a first argument of the appropriate type
   and a second object of an arbitrary type.  We never do any kind of
   coercion.

   The tp_richcompare slot should return an object, as follows:

    NULL if an exception occurred
    NotImplemented if the requested comparison is not implemented
    any other false value if the requested comparison is false
    any other true value if the requested comparison is true

  The PyObject_RichCompare[Bool]() wrappers raise TypeError when they get
  NotImplemented.

  (*) Practical amendments:

  - If rich comparison returns NotImplemented, == and != are decided by
    comparing the object pointer (i.e. falling back to the base object
    implementation).

*/

/* Map rich comparison operators to their swapped version, e.g. LT <--> GT */
int _Py_SwappedOp[] =;

static const char * const opstrings[] =;

/* Perform a rich comparison, raising TypeError when the requested comparison
   operator is not supported. */
static PyObject *
do_richcompare(PyThreadState *tstate, PyObject *v, PyObject *w, int op)
{}

/* Perform a rich comparison with object result.  This wraps do_richcompare()
   with a check for NULL arguments and a recursion check. */

PyObject *
PyObject_RichCompare(PyObject *v, PyObject *w, int op)
{}

/* Perform a rich comparison with integer result.  This wraps
   PyObject_RichCompare(), returning -1 for error, 0 for false, 1 for true. */
int
PyObject_RichCompareBool(PyObject *v, PyObject *w, int op)
{}

Py_hash_t
PyObject_HashNotImplemented(PyObject *v)
{}

Py_hash_t
PyObject_Hash(PyObject *v)
{}

PyObject *
PyObject_GetAttrString(PyObject *v, const char *name)
{}

int
PyObject_HasAttrStringWithError(PyObject *obj, const char *name)
{}


int
PyObject_HasAttrString(PyObject *obj, const char *name)
{}

int
PyObject_SetAttrString(PyObject *v, const char *name, PyObject *w)
{}

int
PyObject_DelAttrString(PyObject *v, const char *name)
{}

int
_PyObject_IsAbstract(PyObject *obj)
{}

PyObject *
_PyObject_GetAttrId(PyObject *v, _Py_Identifier *name)
{}

int
_PyObject_SetAttributeErrorContext(PyObject* v, PyObject* name)
{}

PyObject *
PyObject_GetAttr(PyObject *v, PyObject *name)
{}

int
PyObject_GetOptionalAttr(PyObject *v, PyObject *name, PyObject **result)
{}

int
PyObject_GetOptionalAttrString(PyObject *obj, const char *name, PyObject **result)
{}

int
PyObject_HasAttrWithError(PyObject *obj, PyObject *name)
{}

int
PyObject_HasAttr(PyObject *obj, PyObject *name)
{}

int
PyObject_SetAttr(PyObject *v, PyObject *name, PyObject *value)
{}

int
PyObject_DelAttr(PyObject *v, PyObject *name)
{}

PyObject **
_PyObject_ComputedDictPointer(PyObject *obj)
{}

/* Helper to get a pointer to an object's __dict__ slot, if any.
 * Creates the dict from inline attributes if necessary.
 * Does not set an exception.
 *
 * Note that the tp_dictoffset docs used to recommend this function,
 * so it should be treated as part of the public API.
 */
PyObject **
_PyObject_GetDictPtr(PyObject *obj)
{}

PyObject *
PyObject_SelfIter(PyObject *obj)
{}

/* Helper used when the __next__ method is removed from a type:
   tp_iternext is never NULL and can be safely called without checking
   on every iteration.
 */

PyObject *
_PyObject_NextNotImplemented(PyObject *self)
{}


/* Specialized version of _PyObject_GenericGetAttrWithDict
   specifically for the LOAD_METHOD opcode.

   Return 1 if a method is found, 0 if it's a regular attribute
   from __dict__ or something returned by using a descriptor
   protocol.

   `method` will point to the resolved attribute or NULL.  In the
   latter case, an error will be set.
*/
int
_PyObject_GetMethod(PyObject *obj, PyObject *name, PyObject **method)
{}

/* Generic GetAttr functions - put these in your tp_[gs]etattro slot. */

PyObject *
_PyObject_GenericGetAttrWithDict(PyObject *obj, PyObject *name,
                                 PyObject *dict, int suppress)
{}

PyObject *
PyObject_GenericGetAttr(PyObject *obj, PyObject *name)
{}

int
_PyObject_GenericSetAttrWithDict(PyObject *obj, PyObject *name,
                                 PyObject *value, PyObject *dict)
{}

int
PyObject_GenericSetAttr(PyObject *obj, PyObject *name, PyObject *value)
{}

int
PyObject_GenericSetDict(PyObject *obj, PyObject *value, void *context)
{}


/* Test a value used as condition, e.g., in a while or if statement.
   Return -1 if an error occurred */

int
PyObject_IsTrue(PyObject *v)
{}

/* equivalent of 'not v'
   Return -1 if an error occurred */

int
PyObject_Not(PyObject *v)
{}

/* Test whether an object can be called */

int
PyCallable_Check(PyObject *x)
{}


/* Helper for PyObject_Dir without arguments: returns the local scope. */
static PyObject *
_dir_locals(void)
{}

/* Helper for PyObject_Dir: object introspection. */
static PyObject *
_dir_object(PyObject *obj)
{}

/* Implementation of dir() -- if obj is NULL, returns the names in the current
   (local) scope.  Otherwise, performs introspection of the object: returns a
   sorted list of attribute names (supposedly) accessible from the object
*/
PyObject *
PyObject_Dir(PyObject *obj)
{}

/*
None is a non-NULL undefined value.
There is (and should be!) no way to create other objects of this type,
so there is exactly one (which is indestructible, by the way).
*/

/* ARGSUSED */
static PyObject *
none_repr(PyObject *op)
{}

static void
none_dealloc(PyObject* none)
{}

static PyObject *
none_new(PyTypeObject *type, PyObject *args, PyObject *kwargs)
{}

static int
none_bool(PyObject *v)
{}

static Py_hash_t none_hash(PyObject *v)
{}

static PyNumberMethods none_as_number =;

PyDoc_STRVAR(none_doc,
"NoneType()\n"
"--\n\n"
"The type of the None singleton.");

PyTypeObject _PyNone_Type =;

PyObject _Py_NoneStruct =;

/* NotImplemented is an object that can be used to signal that an
   operation is not implemented for the given type combination. */

static PyObject *
NotImplemented_repr(PyObject *op)
{}

static PyObject *
NotImplemented_reduce(PyObject *op, PyObject *Py_UNUSED(ignored))
{}

static PyMethodDef notimplemented_methods[] =;

static PyObject *
notimplemented_new(PyTypeObject *type, PyObject *args, PyObject *kwargs)
{}

static void
notimplemented_dealloc(PyObject *notimplemented)
{}

static int
notimplemented_bool(PyObject *v)
{}

static PyNumberMethods notimplemented_as_number =;

PyDoc_STRVAR(notimplemented_doc,
"NotImplementedType()\n"
"--\n\n"
"The type of the NotImplemented singleton.");

PyTypeObject _PyNotImplemented_Type =;

PyObject _Py_NotImplementedStruct =;


PyStatus
_PyObject_InitState(PyInterpreterState *interp)
{}

void
_PyObject_FiniState(PyInterpreterState *interp)
{}


extern PyTypeObject _PyAnextAwaitable_Type;
extern PyTypeObject _PyLegacyEventHandler_Type;
extern PyTypeObject _PyLineIterator;
extern PyTypeObject _PyMemoryIter_Type;
extern PyTypeObject _PyPositionsIterator;
extern PyTypeObject _Py_GenericAliasIterType;

static PyTypeObject* static_types[] =;


PyStatus
_PyTypes_InitTypes(PyInterpreterState *interp)
{}


// Best-effort function clearing static types.
//
// Don't deallocate a type if it still has subclasses. If a Py_Finalize()
// sub-function is interrupted by CTRL+C or fails with MemoryError, some
// subclasses are not cleared properly. Leave the static type unchanged in this
// case.
void
_PyTypes_FiniTypes(PyInterpreterState *interp)
{}


static inline void
new_reference(PyObject *op)
{}

void
_Py_NewReference(PyObject *op)
{}

void
_Py_NewReferenceNoTotal(PyObject *op)
{}

void
_Py_SetImmortalUntracked(PyObject *op)
{}

void
_Py_SetImmortal(PyObject *op)
{}

void
_PyObject_SetDeferredRefcount(PyObject *op)
{}

void
_Py_ResurrectReference(PyObject *op)
{}


#ifdef Py_TRACE_REFS
/* Make sure the ref is associated with the right interpreter.
 * This only needs special attention for heap-allocated objects
 * that have been immortalized, and only when the object might
 * outlive the interpreter where it was created.  That means the
 * object was necessarily created using a global allocator
 * (i.e. from the main interpreter).  Thus in that specific case
 * we move the object over to the main interpreter's refchain.
 *
 * This was added for the sake of the immortal interned strings,
 * where legacy subinterpreters share the main interpreter's
 * interned dict (and allocator), and therefore the strings can
 * outlive the subinterpreter.
 *
 * It may make sense to fold this into _Py_SetImmortalUntracked(),
 * but that requires further investigation.  In the meantime, it is
 * up to the caller to know if this is needed.  There should be
 * very few cases.
 */
void
_Py_NormalizeImmortalReference(PyObject *op)
{
    assert(_Py_IsImmortal(op));
    PyInterpreterState *interp = _PyInterpreterState_GET();
    if (!_PyRefchain_IsTraced(interp, op)) {
        return;
    }
    PyInterpreterState *main_interp = _PyInterpreterState_Main();
    if (interp != main_interp
           && interp->feature_flags & Py_RTFLAGS_USE_MAIN_OBMALLOC)
    {
        assert(!_PyRefchain_IsTraced(main_interp, op));
        _PyRefchain_Remove(interp, op);
        _PyRefchain_Trace(main_interp, op);
    }
}

void
_Py_ForgetReference(PyObject *op)
{
    if (Py_REFCNT(op) < 0) {
        _PyObject_ASSERT_FAILED_MSG(op, "negative refcnt");
    }

    PyInterpreterState *interp = _PyInterpreterState_GET();

#ifdef SLOW_UNREF_CHECK
    if (!_PyRefchain_Get(interp, op)) {
        /* Not found */
        _PyObject_ASSERT_FAILED_MSG(op,
                                    "object not found in the objects list");
    }
#endif

    _PyRefchain_Remove(interp, op);
}

static int
_Py_PrintReference(_Py_hashtable_t *ht,
                   const void *key, const void *value,
                   void *user_data)
{
    PyObject *op = (PyObject*)key;
    FILE *fp = (FILE *)user_data;
    fprintf(fp, "%p [%zd] ", (void *)op, Py_REFCNT(op));
    if (PyObject_Print(op, fp, 0) != 0) {
        PyErr_Clear();
    }
    putc('\n', fp);
    return 0;
}


/* Print all live objects.  Because PyObject_Print is called, the
 * interpreter must be in a healthy state.
 */
void
_Py_PrintReferences(PyInterpreterState *interp, FILE *fp)
{
    if (interp == NULL) {
        interp = _PyInterpreterState_Main();
    }
    fprintf(fp, "Remaining objects:\n");
    _Py_hashtable_foreach(REFCHAIN(interp), _Py_PrintReference, fp);
}


static int
_Py_PrintReferenceAddress(_Py_hashtable_t *ht,
                          const void *key, const void *value,
                          void *user_data)
{
    PyObject *op = (PyObject*)key;
    FILE *fp = (FILE *)user_data;
    fprintf(fp, "%p [%zd] %s\n",
            (void *)op, Py_REFCNT(op), Py_TYPE(op)->tp_name);
    return 0;
}


/* Print the addresses of all live objects.  Unlike _Py_PrintReferences, this
 * doesn't make any calls to the Python C API, so is always safe to call.
 */
// XXX This function is not safe to use if the interpreter has been
// freed or is in an unhealthy state (e.g. late in finalization).
// The call in Py_FinalizeEx() is okay since the main interpreter
// is statically allocated.
void
_Py_PrintReferenceAddresses(PyInterpreterState *interp, FILE *fp)
{
    fprintf(fp, "Remaining object addresses:\n");
    _Py_hashtable_foreach(REFCHAIN(interp), _Py_PrintReferenceAddress, fp);
}


typedef struct {
    PyObject *self;
    PyObject *args;
    PyObject *list;
    PyObject *type;
    Py_ssize_t limit;
} _Py_GetObjectsData;

enum {
    _PY_GETOBJECTS_IGNORE = 0,
    _PY_GETOBJECTS_ERROR = 1,
    _PY_GETOBJECTS_STOP = 2,
};

static int
_Py_GetObject(_Py_hashtable_t *ht,
              const void *key, const void *value,
              void *user_data)
{
    PyObject *op = (PyObject *)key;
    _Py_GetObjectsData *data = user_data;
    if (data->limit > 0) {
        if (PyList_GET_SIZE(data->list) >= data->limit) {
            return _PY_GETOBJECTS_STOP;
        }
    }

    if (op == data->self) {
        return _PY_GETOBJECTS_IGNORE;
    }
    if (op == data->args) {
        return _PY_GETOBJECTS_IGNORE;
    }
    if (op == data->list) {
        return _PY_GETOBJECTS_IGNORE;
    }
    if (data->type != NULL) {
        if (op == data->type) {
            return _PY_GETOBJECTS_IGNORE;
        }
        if (!Py_IS_TYPE(op, (PyTypeObject *)data->type)) {
            return _PY_GETOBJECTS_IGNORE;
        }
    }

    if (PyList_Append(data->list, op) < 0) {
        return _PY_GETOBJECTS_ERROR;
    }
    return 0;
}


/* The implementation of sys.getobjects(). */
PyObject *
_Py_GetObjects(PyObject *self, PyObject *args)
{
    Py_ssize_t limit;
    PyObject *type = NULL;
    if (!PyArg_ParseTuple(args, "n|O", &limit, &type)) {
        return NULL;
    }

    PyObject *list = PyList_New(0);
    if (list == NULL) {
        return NULL;
    }

    _Py_GetObjectsData data = {
        .self = self,
        .args = args,
        .list = list,
        .type = type,
        .limit = limit,
    };
    PyInterpreterState *interp = _PyInterpreterState_GET();
    int res = _Py_hashtable_foreach(REFCHAIN(interp), _Py_GetObject, &data);
    if (res == _PY_GETOBJECTS_ERROR) {
        Py_DECREF(list);
        return NULL;
    }
    return list;
}

#undef REFCHAIN
#undef REFCHAIN_VALUE

#endif  /* Py_TRACE_REFS */


/* Hack to force loading of abstract.o */
Py_ssize_t (*_Py_abstract_hack)(PyObject *) =;


void
_PyObject_DebugTypeStats(FILE *out)
{}

/* These methods are used to control infinite recursion in repr, str, print,
   etc.  Container objects that may recursively contain themselves,
   e.g. builtin dictionaries and lists, should use Py_ReprEnter() and
   Py_ReprLeave() to avoid infinite recursion.

   Py_ReprEnter() returns 0 the first time it is called for a particular
   object and 1 every time thereafter.  It returns -1 if an exception
   occurred.  Py_ReprLeave() has no return value.

   See dictobject.c and listobject.c for examples of use.
*/

int
Py_ReprEnter(PyObject *obj)
{}

void
Py_ReprLeave(PyObject *obj)
{}

/* Trashcan support. */

/* Add op to the gcstate->trash_delete_later list.  Called when the current
 * call-stack depth gets large.  op must be a currently untracked gc'ed
 * object, with refcount 0.  Py_DECREF must already have been called on it.
 */
void
_PyTrash_thread_deposit_object(PyThreadState *tstate, PyObject *op)
{}

/* Deallocate all the objects in the gcstate->trash_delete_later list.
 * Called when the call-stack unwinds again. */
void
_PyTrash_thread_destroy_chain(PyThreadState *tstate)
{}

void _Py_NO_RETURN
_PyObject_AssertFailed(PyObject *obj, const char *expr, const char *msg,
                       const char *file, int line, const char *function)
{}


void
_Py_Dealloc(PyObject *op)
{}


PyObject **
PyObject_GET_WEAKREFS_LISTPTR(PyObject *op)
{}


#undef Py_NewRef
#undef Py_XNewRef

// Export Py_NewRef() and Py_XNewRef() as regular functions for the stable ABI.
PyObject*
Py_NewRef(PyObject *obj)
{}

PyObject*
Py_XNewRef(PyObject *obj)
{}

#undef Py_Is
#undef Py_IsNone
#undef Py_IsTrue
#undef Py_IsFalse

// Export Py_Is(), Py_IsNone(), Py_IsTrue(), Py_IsFalse() as regular functions
// for the stable ABI.
int Py_Is(PyObject *x, PyObject *y)
{}

int Py_IsNone(PyObject *x)
{}

int Py_IsTrue(PyObject *x)
{}

int Py_IsFalse(PyObject *x)
{}


// Py_SET_REFCNT() implementation for stable ABI
void
_Py_SetRefcnt(PyObject *ob, Py_ssize_t refcnt)
{}

int PyRefTracer_SetTracer(PyRefTracer tracer, void *data) {}

PyRefTracer PyRefTracer_GetTracer(void** data) {}



static PyObject* constants[] =;

void
_Py_GetConstant_Init(void)
{}

PyObject*
Py_GetConstant(unsigned int constant_id)
{}


PyObject*
Py_GetConstantBorrowed(unsigned int constant_id)
{}


// Py_TYPE() implementation for the stable ABI
#undef Py_TYPE
PyTypeObject*
Py_TYPE(PyObject *ob)
{}


// Py_REFCNT() implementation for the stable ABI
#undef Py_REFCNT
Py_ssize_t
Py_REFCNT(PyObject *ob)
{}