linux/kernel/locking/rtmutex.c

// SPDX-License-Identifier: GPL-2.0-only
/*
 * RT-Mutexes: simple blocking mutual exclusion locks with PI support
 *
 * started by Ingo Molnar and Thomas Gleixner.
 *
 *  Copyright (C) 2004-2006 Red Hat, Inc., Ingo Molnar <[email protected]>
 *  Copyright (C) 2005-2006 Timesys Corp., Thomas Gleixner <[email protected]>
 *  Copyright (C) 2005 Kihon Technologies Inc., Steven Rostedt
 *  Copyright (C) 2006 Esben Nielsen
 * Adaptive Spinlocks:
 *  Copyright (C) 2008 Novell, Inc., Gregory Haskins, Sven Dietrich,
 *				     and Peter Morreale,
 * Adaptive Spinlocks simplification:
 *  Copyright (C) 2008 Red Hat, Inc., Steven Rostedt <[email protected]>
 *
 *  See Documentation/locking/rt-mutex-design.rst for details.
 */
#include <linux/sched.h>
#include <linux/sched/debug.h>
#include <linux/sched/deadline.h>
#include <linux/sched/signal.h>
#include <linux/sched/rt.h>
#include <linux/sched/wake_q.h>
#include <linux/ww_mutex.h>

#include <trace/events/lock.h>

#include "rtmutex_common.h"

#ifndef WW_RT
#define build_ww_mutex()
#define ww_container_of(rtm)

static inline int __ww_mutex_add_waiter(struct rt_mutex_waiter *waiter,
					struct rt_mutex *lock,
					struct ww_acquire_ctx *ww_ctx)
{}

static inline void __ww_mutex_check_waiters(struct rt_mutex *lock,
					    struct ww_acquire_ctx *ww_ctx)
{}

static inline void ww_mutex_lock_acquired(struct ww_mutex *lock,
					  struct ww_acquire_ctx *ww_ctx)
{}

static inline int __ww_mutex_check_kill(struct rt_mutex *lock,
					struct rt_mutex_waiter *waiter,
					struct ww_acquire_ctx *ww_ctx)
{}

#else
#define build_ww_mutex
#define ww_container_of
# include "ww_mutex.h"
#endif

/*
 * lock->owner state tracking:
 *
 * lock->owner holds the task_struct pointer of the owner. Bit 0
 * is used to keep track of the "lock has waiters" state.
 *
 * owner	bit0
 * NULL		0	lock is free (fast acquire possible)
 * NULL		1	lock is free and has waiters and the top waiter
 *				is going to take the lock*
 * taskpointer	0	lock is held (fast release possible)
 * taskpointer	1	lock is held and has waiters**
 *
 * The fast atomic compare exchange based acquire and release is only
 * possible when bit 0 of lock->owner is 0.
 *
 * (*) It also can be a transitional state when grabbing the lock
 * with ->wait_lock is held. To prevent any fast path cmpxchg to the lock,
 * we need to set the bit0 before looking at the lock, and the owner may be
 * NULL in this small time, hence this can be a transitional state.
 *
 * (**) There is a small time when bit 0 is set but there are no
 * waiters. This can happen when grabbing the lock in the slow path.
 * To prevent a cmpxchg of the owner releasing the lock, we need to
 * set this bit before looking at the lock.
 */

static __always_inline struct task_struct *
rt_mutex_owner_encode(struct rt_mutex_base *lock, struct task_struct *owner)
{}

static __always_inline void
rt_mutex_set_owner(struct rt_mutex_base *lock, struct task_struct *owner)
{}

static __always_inline void rt_mutex_clear_owner(struct rt_mutex_base *lock)
{}

static __always_inline void clear_rt_mutex_waiters(struct rt_mutex_base *lock)
{}

static __always_inline void
fixup_rt_mutex_waiters(struct rt_mutex_base *lock, bool acquire_lock)
{}

/*
 * We can speed up the acquire/release, if there's no debugging state to be
 * set up.
 */
#ifndef CONFIG_DEBUG_RT_MUTEXES
static __always_inline bool rt_mutex_cmpxchg_acquire(struct rt_mutex_base *lock,
						     struct task_struct *old,
						     struct task_struct *new)
{
	return try_cmpxchg_acquire(&lock->owner, &old, new);
}

static __always_inline bool rt_mutex_try_acquire(struct rt_mutex_base *lock)
{
	return rt_mutex_cmpxchg_acquire(lock, NULL, current);
}

static __always_inline bool rt_mutex_cmpxchg_release(struct rt_mutex_base *lock,
						     struct task_struct *old,
						     struct task_struct *new)
{
	return try_cmpxchg_release(&lock->owner, &old, new);
}

/*
 * Callers must hold the ->wait_lock -- which is the whole purpose as we force
 * all future threads that attempt to [Rmw] the lock to the slowpath. As such
 * relaxed semantics suffice.
 */
static __always_inline void mark_rt_mutex_waiters(struct rt_mutex_base *lock)
{
	unsigned long *p = (unsigned long *) &lock->owner;
	unsigned long owner, new;

	owner = READ_ONCE(*p);
	do {
		new = owner | RT_MUTEX_HAS_WAITERS;
	} while (!try_cmpxchg_relaxed(p, &owner, new));

	/*
	 * The cmpxchg loop above is relaxed to avoid back-to-back ACQUIRE
	 * operations in the event of contention. Ensure the successful
	 * cmpxchg is visible.
	 */
	smp_mb__after_atomic();
}

/*
 * Safe fastpath aware unlock:
 * 1) Clear the waiters bit
 * 2) Drop lock->wait_lock
 * 3) Try to unlock the lock with cmpxchg
 */
static __always_inline bool unlock_rt_mutex_safe(struct rt_mutex_base *lock,
						 unsigned long flags)
	__releases(lock->wait_lock)
{
	struct task_struct *owner = rt_mutex_owner(lock);

	clear_rt_mutex_waiters(lock);
	raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
	/*
	 * If a new waiter comes in between the unlock and the cmpxchg
	 * we have two situations:
	 *
	 * unlock(wait_lock);
	 *					lock(wait_lock);
	 * cmpxchg(p, owner, 0) == owner
	 *					mark_rt_mutex_waiters(lock);
	 *					acquire(lock);
	 * or:
	 *
	 * unlock(wait_lock);
	 *					lock(wait_lock);
	 *					mark_rt_mutex_waiters(lock);
	 *
	 * cmpxchg(p, owner, 0) != owner
	 *					enqueue_waiter();
	 *					unlock(wait_lock);
	 * lock(wait_lock);
	 * wake waiter();
	 * unlock(wait_lock);
	 *					lock(wait_lock);
	 *					acquire(lock);
	 */
	return rt_mutex_cmpxchg_release(lock, owner, NULL);
}

#else
static __always_inline bool rt_mutex_cmpxchg_acquire(struct rt_mutex_base *lock,
						     struct task_struct *old,
						     struct task_struct *new)
{}

static int __sched rt_mutex_slowtrylock(struct rt_mutex_base *lock);

static __always_inline bool rt_mutex_try_acquire(struct rt_mutex_base *lock)
{}

static __always_inline bool rt_mutex_cmpxchg_release(struct rt_mutex_base *lock,
						     struct task_struct *old,
						     struct task_struct *new)
{}

static __always_inline void mark_rt_mutex_waiters(struct rt_mutex_base *lock)
{}

/*
 * Simple slow path only version: lock->owner is protected by lock->wait_lock.
 */
static __always_inline bool unlock_rt_mutex_safe(struct rt_mutex_base *lock,
						 unsigned long flags)
	__releases(lock->wait_lock)
{}
#endif

static __always_inline int __waiter_prio(struct task_struct *task)
{}

/*
 * Update the waiter->tree copy of the sort keys.
 */
static __always_inline void
waiter_update_prio(struct rt_mutex_waiter *waiter, struct task_struct *task)
{}

/*
 * Update the waiter->pi_tree copy of the sort keys (from the tree copy).
 */
static __always_inline void
waiter_clone_prio(struct rt_mutex_waiter *waiter, struct task_struct *task)
{}

/*
 * Only use with rt_waiter_node_{less,equal}()
 */
#define task_to_waiter_node(p)
#define task_to_waiter(p)

static __always_inline int rt_waiter_node_less(struct rt_waiter_node *left,
					       struct rt_waiter_node *right)
{}

static __always_inline int rt_waiter_node_equal(struct rt_waiter_node *left,
						 struct rt_waiter_node *right)
{}

static inline bool rt_mutex_steal(struct rt_mutex_waiter *waiter,
				  struct rt_mutex_waiter *top_waiter)
{}

#define __node_2_waiter(node)

static __always_inline bool __waiter_less(struct rb_node *a, const struct rb_node *b)
{}

static __always_inline void
rt_mutex_enqueue(struct rt_mutex_base *lock, struct rt_mutex_waiter *waiter)
{}

static __always_inline void
rt_mutex_dequeue(struct rt_mutex_base *lock, struct rt_mutex_waiter *waiter)
{}

#define __node_2_rt_node(node)

static __always_inline bool __pi_waiter_less(struct rb_node *a, const struct rb_node *b)
{}

static __always_inline void
rt_mutex_enqueue_pi(struct task_struct *task, struct rt_mutex_waiter *waiter)
{}

static __always_inline void
rt_mutex_dequeue_pi(struct task_struct *task, struct rt_mutex_waiter *waiter)
{}

static __always_inline void rt_mutex_adjust_prio(struct rt_mutex_base *lock,
						 struct task_struct *p)
{}

/* RT mutex specific wake_q wrappers */
static __always_inline void rt_mutex_wake_q_add_task(struct rt_wake_q_head *wqh,
						     struct task_struct *task,
						     unsigned int wake_state)
{}

static __always_inline void rt_mutex_wake_q_add(struct rt_wake_q_head *wqh,
						struct rt_mutex_waiter *w)
{}

static __always_inline void rt_mutex_wake_up_q(struct rt_wake_q_head *wqh)
{}

/*
 * Deadlock detection is conditional:
 *
 * If CONFIG_DEBUG_RT_MUTEXES=n, deadlock detection is only conducted
 * if the detect argument is == RT_MUTEX_FULL_CHAINWALK.
 *
 * If CONFIG_DEBUG_RT_MUTEXES=y, deadlock detection is always
 * conducted independent of the detect argument.
 *
 * If the waiter argument is NULL this indicates the deboost path and
 * deadlock detection is disabled independent of the detect argument
 * and the config settings.
 */
static __always_inline bool
rt_mutex_cond_detect_deadlock(struct rt_mutex_waiter *waiter,
			      enum rtmutex_chainwalk chwalk)
{}

static __always_inline struct rt_mutex_base *task_blocked_on_lock(struct task_struct *p)
{}

/*
 * Adjust the priority chain. Also used for deadlock detection.
 * Decreases task's usage by one - may thus free the task.
 *
 * @task:	the task owning the mutex (owner) for which a chain walk is
 *		probably needed
 * @chwalk:	do we have to carry out deadlock detection?
 * @orig_lock:	the mutex (can be NULL if we are walking the chain to recheck
 *		things for a task that has just got its priority adjusted, and
 *		is waiting on a mutex)
 * @next_lock:	the mutex on which the owner of @orig_lock was blocked before
 *		we dropped its pi_lock. Is never dereferenced, only used for
 *		comparison to detect lock chain changes.
 * @orig_waiter: rt_mutex_waiter struct for the task that has just donated
 *		its priority to the mutex owner (can be NULL in the case
 *		depicted above or if the top waiter is gone away and we are
 *		actually deboosting the owner)
 * @top_task:	the current top waiter
 *
 * Returns 0 or -EDEADLK.
 *
 * Chain walk basics and protection scope
 *
 * [R] refcount on task
 * [Pn] task->pi_lock held
 * [L] rtmutex->wait_lock held
 *
 * Normal locking order:
 *
 *   rtmutex->wait_lock
 *     task->pi_lock
 *
 * Step	Description				Protected by
 *	function arguments:
 *	@task					[R]
 *	@orig_lock if != NULL			@top_task is blocked on it
 *	@next_lock				Unprotected. Cannot be
 *						dereferenced. Only used for
 *						comparison.
 *	@orig_waiter if != NULL			@top_task is blocked on it
 *	@top_task				current, or in case of proxy
 *						locking protected by calling
 *						code
 *	again:
 *	  loop_sanity_check();
 *	retry:
 * [1]	  lock(task->pi_lock);			[R] acquire [P1]
 * [2]	  waiter = task->pi_blocked_on;		[P1]
 * [3]	  check_exit_conditions_1();		[P1]
 * [4]	  lock = waiter->lock;			[P1]
 * [5]	  if (!try_lock(lock->wait_lock)) {	[P1] try to acquire [L]
 *	    unlock(task->pi_lock);		release [P1]
 *	    goto retry;
 *	  }
 * [6]	  check_exit_conditions_2();		[P1] + [L]
 * [7]	  requeue_lock_waiter(lock, waiter);	[P1] + [L]
 * [8]	  unlock(task->pi_lock);		release [P1]
 *	  put_task_struct(task);		release [R]
 * [9]	  check_exit_conditions_3();		[L]
 * [10]	  task = owner(lock);			[L]
 *	  get_task_struct(task);		[L] acquire [R]
 *	  lock(task->pi_lock);			[L] acquire [P2]
 * [11]	  requeue_pi_waiter(tsk, waiters(lock));[P2] + [L]
 * [12]	  check_exit_conditions_4();		[P2] + [L]
 * [13]	  unlock(task->pi_lock);		release [P2]
 *	  unlock(lock->wait_lock);		release [L]
 *	  goto again;
 *
 * Where P1 is the blocking task and P2 is the lock owner; going up one step
 * the owner becomes the next blocked task etc..
 *
*
 */
static int __sched rt_mutex_adjust_prio_chain(struct task_struct *task,
					      enum rtmutex_chainwalk chwalk,
					      struct rt_mutex_base *orig_lock,
					      struct rt_mutex_base *next_lock,
					      struct rt_mutex_waiter *orig_waiter,
					      struct task_struct *top_task)
{}

/*
 * Try to take an rt-mutex
 *
 * Must be called with lock->wait_lock held and interrupts disabled
 *
 * @lock:   The lock to be acquired.
 * @task:   The task which wants to acquire the lock
 * @waiter: The waiter that is queued to the lock's wait tree if the
 *	    callsite called task_blocked_on_lock(), otherwise NULL
 */
static int __sched
try_to_take_rt_mutex(struct rt_mutex_base *lock, struct task_struct *task,
		     struct rt_mutex_waiter *waiter)
{}

/*
 * Task blocks on lock.
 *
 * Prepare waiter and propagate pi chain
 *
 * This must be called with lock->wait_lock held and interrupts disabled
 */
static int __sched task_blocks_on_rt_mutex(struct rt_mutex_base *lock,
					   struct rt_mutex_waiter *waiter,
					   struct task_struct *task,
					   struct ww_acquire_ctx *ww_ctx,
					   enum rtmutex_chainwalk chwalk)
{}

/*
 * Remove the top waiter from the current tasks pi waiter tree and
 * queue it up.
 *
 * Called with lock->wait_lock held and interrupts disabled.
 */
static void __sched mark_wakeup_next_waiter(struct rt_wake_q_head *wqh,
					    struct rt_mutex_base *lock)
{}

static int __sched __rt_mutex_slowtrylock(struct rt_mutex_base *lock)
{}

/*
 * Slow path try-lock function:
 */
static int __sched rt_mutex_slowtrylock(struct rt_mutex_base *lock)
{}

static __always_inline int __rt_mutex_trylock(struct rt_mutex_base *lock)
{}

/*
 * Slow path to release a rt-mutex.
 */
static void __sched rt_mutex_slowunlock(struct rt_mutex_base *lock)
{}

static __always_inline void __rt_mutex_unlock(struct rt_mutex_base *lock)
{}

#ifdef CONFIG_SMP
static bool rtmutex_spin_on_owner(struct rt_mutex_base *lock,
				  struct rt_mutex_waiter *waiter,
				  struct task_struct *owner)
{}
#else
static bool rtmutex_spin_on_owner(struct rt_mutex_base *lock,
				  struct rt_mutex_waiter *waiter,
				  struct task_struct *owner)
{
	return false;
}
#endif

#ifdef RT_MUTEX_BUILD_MUTEX
/*
 * Functions required for:
 *	- rtmutex, futex on all kernels
 *	- mutex and rwsem substitutions on RT kernels
 */

/*
 * Remove a waiter from a lock and give up
 *
 * Must be called with lock->wait_lock held and interrupts disabled. It must
 * have just failed to try_to_take_rt_mutex().
 */
static void __sched remove_waiter(struct rt_mutex_base *lock,
				  struct rt_mutex_waiter *waiter)
{}

/**
 * rt_mutex_slowlock_block() - Perform the wait-wake-try-to-take loop
 * @lock:		 the rt_mutex to take
 * @ww_ctx:		 WW mutex context pointer
 * @state:		 the state the task should block in (TASK_INTERRUPTIBLE
 *			 or TASK_UNINTERRUPTIBLE)
 * @timeout:		 the pre-initialized and started timer, or NULL for none
 * @waiter:		 the pre-initialized rt_mutex_waiter
 *
 * Must be called with lock->wait_lock held and interrupts disabled
 */
static int __sched rt_mutex_slowlock_block(struct rt_mutex_base *lock,
					   struct ww_acquire_ctx *ww_ctx,
					   unsigned int state,
					   struct hrtimer_sleeper *timeout,
					   struct rt_mutex_waiter *waiter)
{}

static void __sched rt_mutex_handle_deadlock(int res, int detect_deadlock,
					     struct rt_mutex_waiter *w)
{}

/**
 * __rt_mutex_slowlock - Locking slowpath invoked with lock::wait_lock held
 * @lock:	The rtmutex to block lock
 * @ww_ctx:	WW mutex context pointer
 * @state:	The task state for sleeping
 * @chwalk:	Indicator whether full or partial chainwalk is requested
 * @waiter:	Initializer waiter for blocking
 */
static int __sched __rt_mutex_slowlock(struct rt_mutex_base *lock,
				       struct ww_acquire_ctx *ww_ctx,
				       unsigned int state,
				       enum rtmutex_chainwalk chwalk,
				       struct rt_mutex_waiter *waiter)
{}

static inline int __rt_mutex_slowlock_locked(struct rt_mutex_base *lock,
					     struct ww_acquire_ctx *ww_ctx,
					     unsigned int state)
{}

/*
 * rt_mutex_slowlock - Locking slowpath invoked when fast path fails
 * @lock:	The rtmutex to block lock
 * @ww_ctx:	WW mutex context pointer
 * @state:	The task state for sleeping
 */
static int __sched rt_mutex_slowlock(struct rt_mutex_base *lock,
				     struct ww_acquire_ctx *ww_ctx,
				     unsigned int state)
{}

static __always_inline int __rt_mutex_lock(struct rt_mutex_base *lock,
					   unsigned int state)
{}
#endif /* RT_MUTEX_BUILD_MUTEX */

#ifdef RT_MUTEX_BUILD_SPINLOCKS
/*
 * Functions required for spin/rw_lock substitution on RT kernels
 */

/**
 * rtlock_slowlock_locked - Slow path lock acquisition for RT locks
 * @lock:	The underlying RT mutex
 */
static void __sched rtlock_slowlock_locked(struct rt_mutex_base *lock)
{
	struct rt_mutex_waiter waiter;
	struct task_struct *owner;

	lockdep_assert_held(&lock->wait_lock);

	if (try_to_take_rt_mutex(lock, current, NULL))
		return;

	rt_mutex_init_rtlock_waiter(&waiter);

	/* Save current state and set state to TASK_RTLOCK_WAIT */
	current_save_and_set_rtlock_wait_state();

	trace_contention_begin(lock, LCB_F_RT);

	task_blocks_on_rt_mutex(lock, &waiter, current, NULL, RT_MUTEX_MIN_CHAINWALK);

	for (;;) {
		/* Try to acquire the lock again */
		if (try_to_take_rt_mutex(lock, current, &waiter))
			break;

		if (&waiter == rt_mutex_top_waiter(lock))
			owner = rt_mutex_owner(lock);
		else
			owner = NULL;
		raw_spin_unlock_irq(&lock->wait_lock);

		if (!owner || !rtmutex_spin_on_owner(lock, &waiter, owner))
			schedule_rtlock();

		raw_spin_lock_irq(&lock->wait_lock);
		set_current_state(TASK_RTLOCK_WAIT);
	}

	/* Restore the task state */
	current_restore_rtlock_saved_state();

	/*
	 * try_to_take_rt_mutex() sets the waiter bit unconditionally.
	 * We might have to fix that up:
	 */
	fixup_rt_mutex_waiters(lock, true);
	debug_rt_mutex_free_waiter(&waiter);

	trace_contention_end(lock, 0);
}

static __always_inline void __sched rtlock_slowlock(struct rt_mutex_base *lock)
{
	unsigned long flags;

	raw_spin_lock_irqsave(&lock->wait_lock, flags);
	rtlock_slowlock_locked(lock);
	raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
}

#endif /* RT_MUTEX_BUILD_SPINLOCKS */