// SPDX-License-Identifier: GPL-2.0-or-later /* * Queued spinlock * * (C) Copyright 2013-2015 Hewlett-Packard Development Company, L.P. * (C) Copyright 2013-2014,2018 Red Hat, Inc. * (C) Copyright 2015 Intel Corp. * (C) Copyright 2015 Hewlett-Packard Enterprise Development LP * * Authors: Waiman Long <[email protected]> * Peter Zijlstra <[email protected]> */ #ifndef _GEN_PV_LOCK_SLOWPATH #include <linux/smp.h> #include <linux/bug.h> #include <linux/cpumask.h> #include <linux/percpu.h> #include <linux/hardirq.h> #include <linux/mutex.h> #include <linux/prefetch.h> #include <asm/byteorder.h> #include <asm/qspinlock.h> #include <trace/events/lock.h> /* * Include queued spinlock statistics code */ #include "qspinlock_stat.h" /* * The basic principle of a queue-based spinlock can best be understood * by studying a classic queue-based spinlock implementation called the * MCS lock. A copy of the original MCS lock paper ("Algorithms for Scalable * Synchronization on Shared-Memory Multiprocessors by Mellor-Crummey and * Scott") is available at * * https://bugzilla.kernel.org/show_bug.cgi?id=206115 * * This queued spinlock implementation is based on the MCS lock, however to * make it fit the 4 bytes we assume spinlock_t to be, and preserve its * existing API, we must modify it somehow. * * In particular; where the traditional MCS lock consists of a tail pointer * (8 bytes) and needs the next pointer (another 8 bytes) of its own node to * unlock the next pending (next->locked), we compress both these: {tail, * next->locked} into a single u32 value. * * Since a spinlock disables recursion of its own context and there is a limit * to the contexts that can nest; namely: task, softirq, hardirq, nmi. As there * are at most 4 nesting levels, it can be encoded by a 2-bit number. Now * we can encode the tail by combining the 2-bit nesting level with the cpu * number. With one byte for the lock value and 3 bytes for the tail, only a * 32-bit word is now needed. Even though we only need 1 bit for the lock, * we extend it to a full byte to achieve better performance for architectures * that support atomic byte write. * * We also change the first spinner to spin on the lock bit instead of its * node; whereby avoiding the need to carry a node from lock to unlock, and * preserving existing lock API. This also makes the unlock code simpler and * faster. * * N.B. The current implementation only supports architectures that allow * atomic operations on smaller 8-bit and 16-bit data types. * */ #include "mcs_spinlock.h" #define MAX_NODES … /* * On 64-bit architectures, the mcs_spinlock structure will be 16 bytes in * size and four of them will fit nicely in one 64-byte cacheline. For * pvqspinlock, however, we need more space for extra data. To accommodate * that, we insert two more long words to pad it up to 32 bytes. IOW, only * two of them can fit in a cacheline in this case. That is OK as it is rare * to have more than 2 levels of slowpath nesting in actual use. We don't * want to penalize pvqspinlocks to optimize for a rare case in native * qspinlocks. */ struct qnode { … }; /* * The pending bit spinning loop count. * This heuristic is used to limit the number of lockword accesses * made by atomic_cond_read_relaxed when waiting for the lock to * transition out of the "== _Q_PENDING_VAL" state. We don't spin * indefinitely because there's no guarantee that we'll make forward * progress. */ #ifndef _Q_PENDING_LOOPS #define _Q_PENDING_LOOPS … #endif /* * Per-CPU queue node structures; we can never have more than 4 nested * contexts: task, softirq, hardirq, nmi. * * Exactly fits one 64-byte cacheline on a 64-bit architecture. * * PV doubles the storage and uses the second cacheline for PV state. */ static DEFINE_PER_CPU_ALIGNED(struct qnode, qnodes[MAX_NODES]); /* * We must be able to distinguish between no-tail and the tail at 0:0, * therefore increment the cpu number by one. */ static inline __pure u32 encode_tail(int cpu, int idx) { … } static inline __pure struct mcs_spinlock *decode_tail(u32 tail) { … } static inline __pure struct mcs_spinlock *grab_mcs_node(struct mcs_spinlock *base, int idx) { … } #define _Q_LOCKED_PENDING_MASK … #if _Q_PENDING_BITS == 8 /** * clear_pending - clear the pending bit. * @lock: Pointer to queued spinlock structure * * *,1,* -> *,0,* */ static __always_inline void clear_pending(struct qspinlock *lock) { … } /** * clear_pending_set_locked - take ownership and clear the pending bit. * @lock: Pointer to queued spinlock structure * * *,1,0 -> *,0,1 * * Lock stealing is not allowed if this function is used. */ static __always_inline void clear_pending_set_locked(struct qspinlock *lock) { … } /* * xchg_tail - Put in the new queue tail code word & retrieve previous one * @lock : Pointer to queued spinlock structure * @tail : The new queue tail code word * Return: The previous queue tail code word * * xchg(lock, tail), which heads an address dependency * * p,*,* -> n,*,* ; prev = xchg(lock, node) */ static __always_inline u32 xchg_tail(struct qspinlock *lock, u32 tail) { … } #else /* _Q_PENDING_BITS == 8 */ /** * clear_pending - clear the pending bit. * @lock: Pointer to queued spinlock structure * * *,1,* -> *,0,* */ static __always_inline void clear_pending(struct qspinlock *lock) { atomic_andnot(_Q_PENDING_VAL, &lock->val); } /** * clear_pending_set_locked - take ownership and clear the pending bit. * @lock: Pointer to queued spinlock structure * * *,1,0 -> *,0,1 */ static __always_inline void clear_pending_set_locked(struct qspinlock *lock) { atomic_add(-_Q_PENDING_VAL + _Q_LOCKED_VAL, &lock->val); } /** * xchg_tail - Put in the new queue tail code word & retrieve previous one * @lock : Pointer to queued spinlock structure * @tail : The new queue tail code word * Return: The previous queue tail code word * * xchg(lock, tail) * * p,*,* -> n,*,* ; prev = xchg(lock, node) */ static __always_inline u32 xchg_tail(struct qspinlock *lock, u32 tail) { u32 old, new; old = atomic_read(&lock->val); do { new = (old & _Q_LOCKED_PENDING_MASK) | tail; /* * We can use relaxed semantics since the caller ensures that * the MCS node is properly initialized before updating the * tail. */ } while (!atomic_try_cmpxchg_relaxed(&lock->val, &old, new)); return old; } #endif /* _Q_PENDING_BITS == 8 */ /** * queued_fetch_set_pending_acquire - fetch the whole lock value and set pending * @lock : Pointer to queued spinlock structure * Return: The previous lock value * * *,*,* -> *,1,* */ #ifndef queued_fetch_set_pending_acquire static __always_inline u32 queued_fetch_set_pending_acquire(struct qspinlock *lock) { return atomic_fetch_or_acquire(_Q_PENDING_VAL, &lock->val); } #endif /** * set_locked - Set the lock bit and own the lock * @lock: Pointer to queued spinlock structure * * *,*,0 -> *,0,1 */ static __always_inline void set_locked(struct qspinlock *lock) { … } /* * Generate the native code for queued_spin_unlock_slowpath(); provide NOPs for * all the PV callbacks. */ static __always_inline void __pv_init_node(struct mcs_spinlock *node) { … } static __always_inline void __pv_wait_node(struct mcs_spinlock *node, struct mcs_spinlock *prev) { … } static __always_inline void __pv_kick_node(struct qspinlock *lock, struct mcs_spinlock *node) { … } static __always_inline u32 __pv_wait_head_or_lock(struct qspinlock *lock, struct mcs_spinlock *node) { … } #define pv_enabled() … #define pv_init_node … #define pv_wait_node … #define pv_kick_node … #define pv_wait_head_or_lock … #ifdef CONFIG_PARAVIRT_SPINLOCKS #define queued_spin_lock_slowpath … #endif #endif /* _GEN_PV_LOCK_SLOWPATH */ /** * queued_spin_lock_slowpath - acquire the queued spinlock * @lock: Pointer to queued spinlock structure * @val: Current value of the queued spinlock 32-bit word * * (queue tail, pending bit, lock value) * * fast : slow : unlock * : : * uncontended (0,0,0) -:--> (0,0,1) ------------------------------:--> (*,*,0) * : | ^--------.------. / : * : v \ \ | : * pending : (0,1,1) +--> (0,1,0) \ | : * : | ^--' | | : * : v | | : * uncontended : (n,x,y) +--> (n,0,0) --' | : * queue : | ^--' | : * : v | : * contended : (*,x,y) +--> (*,0,0) ---> (*,0,1) -' : * queue : ^--' : */ void __lockfunc queued_spin_lock_slowpath(struct qspinlock *lock, u32 val) { … } EXPORT_SYMBOL(…); /* * Generate the paravirt code for queued_spin_unlock_slowpath(). */ #if !defined(_GEN_PV_LOCK_SLOWPATH) && defined(CONFIG_PARAVIRT_SPINLOCKS) #define _GEN_PV_LOCK_SLOWPATH #undef pv_enabled #define pv_enabled() … #undef pv_init_node #undef pv_wait_node #undef pv_kick_node #undef pv_wait_head_or_lock #undef queued_spin_lock_slowpath #define queued_spin_lock_slowpath … #include "qspinlock_paravirt.h" #include "qspinlock.c" bool nopvspin; static __init int parse_nopvspin(char *arg) { … } early_param(…); #endif