// SPDX-License-Identifier: GPL-2.0 #include <linux/export.h> #include <linux/log2.h> #include <linux/percpu.h> #include <linux/preempt.h> #include <linux/rcupdate.h> #include <linux/sched.h> #include <linux/sched/clock.h> #include <linux/sched/rt.h> #include <linux/sched/task.h> #include <linux/slab.h> #include <trace/events/lock.h> #include "six.h" #ifdef DEBUG #define EBUG_ON … #else #define EBUG_ON(cond) … #endif #define six_acquire(l, t, r, ip) … #define six_release(l, ip) … static void do_six_unlock_type(struct six_lock *lock, enum six_lock_type type); #define SIX_LOCK_HELD_read_OFFSET … #define SIX_LOCK_HELD_read … #define SIX_LOCK_HELD_intent … #define SIX_LOCK_HELD_write … #define SIX_LOCK_WAITING_read … #define SIX_LOCK_WAITING_write … #define SIX_LOCK_NOSPIN … struct six_lock_vals { … }; static const struct six_lock_vals l[] = …; static inline void six_set_bitmask(struct six_lock *lock, u32 mask) { … } static inline void six_clear_bitmask(struct six_lock *lock, u32 mask) { … } static inline void six_set_owner(struct six_lock *lock, enum six_lock_type type, u32 old, struct task_struct *owner) { … } static inline unsigned pcpu_read_count(struct six_lock *lock) { … } /* * __do_six_trylock() - main trylock routine * * Returns 1 on success, 0 on failure * * In percpu reader mode, a failed trylock may cause a spurious trylock failure * for anoter thread taking the competing lock type, and we may havve to do a * wakeup: when a wakeup is required, we return -1 - wakeup_type. */ static int __do_six_trylock(struct six_lock *lock, enum six_lock_type type, struct task_struct *task, bool try) { … } static void __six_lock_wakeup(struct six_lock *lock, enum six_lock_type lock_type) { … } __always_inline static void six_lock_wakeup(struct six_lock *lock, u32 state, enum six_lock_type lock_type) { … } __always_inline static bool do_six_trylock(struct six_lock *lock, enum six_lock_type type, bool try) { … } /** * six_trylock_ip - attempt to take a six lock without blocking * @lock: lock to take * @type: SIX_LOCK_read, SIX_LOCK_intent, or SIX_LOCK_write * @ip: ip parameter for lockdep/lockstat, i.e. _THIS_IP_ * * Return: true on success, false on failure. */ bool six_trylock_ip(struct six_lock *lock, enum six_lock_type type, unsigned long ip) { … } EXPORT_SYMBOL_GPL(…); /** * six_relock_ip - attempt to re-take a lock that was held previously * @lock: lock to take * @type: SIX_LOCK_read, SIX_LOCK_intent, or SIX_LOCK_write * @seq: lock sequence number obtained from six_lock_seq() while lock was * held previously * @ip: ip parameter for lockdep/lockstat, i.e. _THIS_IP_ * * Return: true on success, false on failure. */ bool six_relock_ip(struct six_lock *lock, enum six_lock_type type, unsigned seq, unsigned long ip) { … } EXPORT_SYMBOL_GPL(…); #ifdef CONFIG_BCACHEFS_SIX_OPTIMISTIC_SPIN static inline bool six_owner_running(struct six_lock *lock) { … } static inline bool six_optimistic_spin(struct six_lock *lock, struct six_lock_waiter *wait, enum six_lock_type type) { … } #else /* CONFIG_LOCK_SPIN_ON_OWNER */ static inline bool six_optimistic_spin(struct six_lock *lock, struct six_lock_waiter *wait, enum six_lock_type type) { return false; } #endif noinline static int six_lock_slowpath(struct six_lock *lock, enum six_lock_type type, struct six_lock_waiter *wait, six_lock_should_sleep_fn should_sleep_fn, void *p, unsigned long ip) { … } /** * six_lock_ip_waiter - take a lock, with full waitlist interface * @lock: lock to take * @type: SIX_LOCK_read, SIX_LOCK_intent, or SIX_LOCK_write * @wait: pointer to wait object, which will be added to lock's waitlist * @should_sleep_fn: callback run after adding to waitlist, immediately prior * to scheduling * @p: passed through to @should_sleep_fn * @ip: ip parameter for lockdep/lockstat, i.e. _THIS_IP_ * * This is the most general six_lock() variant, with parameters to support full * cycle detection for deadlock avoidance. * * The code calling this function must implement tracking of held locks, and the * @wait object should be embedded into the struct that tracks held locks - * which must also be accessible in a thread-safe way. * * @should_sleep_fn should invoke the cycle detector; it should walk each * lock's waiters, and for each waiter recursively walk their held locks. * * When this function must block, @wait will be added to @lock's waitlist before * calling trylock, and before calling @should_sleep_fn, and @wait will not be * removed from the lock waitlist until the lock has been successfully acquired, * or we abort. * * @wait.start_time will be monotonically increasing for any given waitlist, and * thus may be used as a loop cursor. * * Return: 0 on success, or the return code from @should_sleep_fn on failure. */ int six_lock_ip_waiter(struct six_lock *lock, enum six_lock_type type, struct six_lock_waiter *wait, six_lock_should_sleep_fn should_sleep_fn, void *p, unsigned long ip) { … } EXPORT_SYMBOL_GPL(…); __always_inline static void do_six_unlock_type(struct six_lock *lock, enum six_lock_type type) { … } /** * six_unlock_ip - drop a six lock * @lock: lock to unlock * @type: SIX_LOCK_read, SIX_LOCK_intent, or SIX_LOCK_write * @ip: ip parameter for lockdep/lockstat, i.e. _THIS_IP_ * * When a lock is held multiple times (because six_lock_incement()) was used), * this decrements the 'lock held' counter by one. * * For example: * six_lock_read(&foo->lock); read count 1 * six_lock_increment(&foo->lock, SIX_LOCK_read); read count 2 * six_lock_unlock(&foo->lock, SIX_LOCK_read); read count 1 * six_lock_unlock(&foo->lock, SIX_LOCK_read); read count 0 */ void six_unlock_ip(struct six_lock *lock, enum six_lock_type type, unsigned long ip) { … } EXPORT_SYMBOL_GPL(…); /** * six_lock_downgrade - convert an intent lock to a read lock * @lock: lock to dowgrade * * @lock will have read count incremented and intent count decremented */ void six_lock_downgrade(struct six_lock *lock) { … } EXPORT_SYMBOL_GPL(…); /** * six_lock_tryupgrade - attempt to convert read lock to an intent lock * @lock: lock to upgrade * * On success, @lock will have intent count incremented and read count * decremented * * Return: true on success, false on failure */ bool six_lock_tryupgrade(struct six_lock *lock) { … } EXPORT_SYMBOL_GPL(…); /** * six_trylock_convert - attempt to convert a held lock from one type to another * @lock: lock to upgrade * @from: SIX_LOCK_read or SIX_LOCK_intent * @to: SIX_LOCK_read or SIX_LOCK_intent * * On success, @lock will have intent count incremented and read count * decremented * * Return: true on success, false on failure */ bool six_trylock_convert(struct six_lock *lock, enum six_lock_type from, enum six_lock_type to) { … } EXPORT_SYMBOL_GPL(…); /** * six_lock_increment - increase held lock count on a lock that is already held * @lock: lock to increment * @type: SIX_LOCK_read or SIX_LOCK_intent * * @lock must already be held, with a lock type that is greater than or equal to * @type * * A corresponding six_unlock_type() call will be required for @lock to be fully * unlocked. */ void six_lock_increment(struct six_lock *lock, enum six_lock_type type) { … } EXPORT_SYMBOL_GPL(…); /** * six_lock_wakeup_all - wake up all waiters on @lock * @lock: lock to wake up waiters for * * Wakeing up waiters will cause them to re-run should_sleep_fn, which may then * abort the lock operation. * * This function is never needed in a bug-free program; it's only useful in * debug code, e.g. to determine if a cycle detector is at fault. */ void six_lock_wakeup_all(struct six_lock *lock) { … } EXPORT_SYMBOL_GPL(…); /** * six_lock_counts - return held lock counts, for each lock type * @lock: lock to return counters for * * Return: the number of times a lock is held for read, intent and write. */ struct six_lock_count six_lock_counts(struct six_lock *lock) { … } EXPORT_SYMBOL_GPL(…); /** * six_lock_readers_add - directly manipulate reader count of a lock * @lock: lock to add/subtract readers for * @nr: reader count to add/subtract * * When an upper layer is implementing lock reentrency, we may have both read * and intent locks on the same lock. * * When we need to take a write lock, the read locks will cause self-deadlock, * because six locks themselves do not track which read locks are held by the * current thread and which are held by a different thread - it does no * per-thread tracking of held locks. * * The upper layer that is tracking held locks may however, if trylock() has * failed, count up its own read locks, subtract them, take the write lock, and * then re-add them. * * As in any other situation when taking a write lock, @lock must be held for * intent one (or more) times, so @lock will never be left unlocked. */ void six_lock_readers_add(struct six_lock *lock, int nr) { … } EXPORT_SYMBOL_GPL(…); /** * six_lock_exit - release resources held by a lock prior to freeing * @lock: lock to exit * * When a lock was initialized in percpu mode (SIX_OLCK_INIT_PCPU), this is * required to free the percpu read counts. */ void six_lock_exit(struct six_lock *lock) { … } EXPORT_SYMBOL_GPL(…); void __six_lock_init(struct six_lock *lock, const char *name, struct lock_class_key *key, enum six_lock_init_flags flags) { … } EXPORT_SYMBOL_GPL(…);