cpython/Python/lock.c

// Lock implementation

#include "Python.h"

#include "pycore_lock.h"
#include "pycore_parking_lot.h"
#include "pycore_semaphore.h"
#include "pycore_time.h"          // _PyTime_Add()

#ifdef MS_WINDOWS
#define WIN32_LEAN_AND_MEAN
#  include <windows.h>            // SwitchToThread()
#elif defined(HAVE_SCHED_H)
#  include <sched.h>              // sched_yield()
#endif

// If a thread waits on a lock for longer than TIME_TO_BE_FAIR_NS (1 ms), then
// the unlocking thread directly hands off ownership of the lock. This avoids
// starvation.
static const PyTime_t TIME_TO_BE_FAIR_NS =;

// Spin for a bit before parking the thread. This is only enabled for
// `--disable-gil` builds because it is unlikely to be helpful if the GIL is
// enabled.
#if Py_GIL_DISABLED
static const int MAX_SPIN_COUNT = 40;
#else
static const int MAX_SPIN_COUNT =;
#endif

struct mutex_entry {};

static void
_Py_yield(void)
{}

PyLockStatus
_PyMutex_LockTimed(PyMutex *m, PyTime_t timeout, _PyLockFlags flags)
{}

static void
mutex_unpark(PyMutex *m, struct mutex_entry *entry, int has_more_waiters)
{}

int
_PyMutex_TryUnlock(PyMutex *m)
{}

// _PyRawMutex stores a linked list of `struct raw_mutex_entry`, one for each
// thread waiting on the mutex, directly in the mutex itself.
struct raw_mutex_entry {};

void
_PyRawMutex_LockSlow(_PyRawMutex *m)
{}

void
_PyRawMutex_UnlockSlow(_PyRawMutex *m)
{}

int
_PyEvent_IsSet(PyEvent *evt)
{}

void
_PyEvent_Notify(PyEvent *evt)
{}

void
PyEvent_Wait(PyEvent *evt)
{}

int
PyEvent_WaitTimed(PyEvent *evt, PyTime_t timeout_ns, int detach)
{}

static int
unlock_once(_PyOnceFlag *o, int res)
{}

int
_PyOnceFlag_CallOnceSlow(_PyOnceFlag *flag, _Py_once_fn_t *fn, void *arg)
{}

static int
recursive_mutex_is_owned_by(_PyRecursiveMutex *m, PyThread_ident_t tid)
{}

int
_PyRecursiveMutex_IsLockedByCurrentThread(_PyRecursiveMutex *m)
{}

void
_PyRecursiveMutex_Lock(_PyRecursiveMutex *m)
{}

PyLockStatus
_PyRecursiveMutex_LockTimed(_PyRecursiveMutex *m, PyTime_t timeout, _PyLockFlags flags)
{}

void
_PyRecursiveMutex_Unlock(_PyRecursiveMutex *m)
{}

int
_PyRecursiveMutex_TryUnlock(_PyRecursiveMutex *m)
{}

#define _Py_WRITE_LOCKED
#define _PyRWMutex_READER_SHIFT
#define _Py_RWMUTEX_MAX_READERS

static uintptr_t
rwmutex_set_parked_and_wait(_PyRWMutex *rwmutex, uintptr_t bits)
{}

// The number of readers holding the lock
static uintptr_t
rwmutex_reader_count(uintptr_t bits)
{}

void
_PyRWMutex_RLock(_PyRWMutex *rwmutex)
{}

void
_PyRWMutex_RUnlock(_PyRWMutex *rwmutex)
{}

void
_PyRWMutex_Lock(_PyRWMutex *rwmutex)
{}

void
_PyRWMutex_Unlock(_PyRWMutex *rwmutex)
{}

#define SEQLOCK_IS_UPDATING(sequence)

void _PySeqLock_LockWrite(_PySeqLock *seqlock)
{}

void _PySeqLock_AbandonWrite(_PySeqLock *seqlock)
{}

void _PySeqLock_UnlockWrite(_PySeqLock *seqlock)
{}

uint32_t _PySeqLock_BeginRead(_PySeqLock *seqlock)
{}

int _PySeqLock_EndRead(_PySeqLock *seqlock, uint32_t previous)
{}

int _PySeqLock_AfterFork(_PySeqLock *seqlock)
{}

#undef PyMutex_Lock
void
PyMutex_Lock(PyMutex *m)
{}

#undef PyMutex_Unlock
void
PyMutex_Unlock(PyMutex *m)
{}