#include <linux/sched.h>
#include <linux/sched/debug.h>
#include <linux/sched/deadline.h>
#include <linux/sched/signal.h>
#include <linux/sched/rt.h>
#include <linux/sched/wake_q.h>
#include <linux/ww_mutex.h>
#include <trace/events/lock.h>
#include "rtmutex_common.h"
#ifndef WW_RT
#define build_ww_mutex() …
#define ww_container_of(rtm) …
static inline int __ww_mutex_add_waiter(struct rt_mutex_waiter *waiter,
struct rt_mutex *lock,
struct ww_acquire_ctx *ww_ctx)
{ … }
static inline void __ww_mutex_check_waiters(struct rt_mutex *lock,
struct ww_acquire_ctx *ww_ctx)
{ … }
static inline void ww_mutex_lock_acquired(struct ww_mutex *lock,
struct ww_acquire_ctx *ww_ctx)
{ … }
static inline int __ww_mutex_check_kill(struct rt_mutex *lock,
struct rt_mutex_waiter *waiter,
struct ww_acquire_ctx *ww_ctx)
{ … }
#else
#define build_ww_mutex …
#define ww_container_of …
# include "ww_mutex.h"
#endif
static __always_inline struct task_struct *
rt_mutex_owner_encode(struct rt_mutex_base *lock, struct task_struct *owner)
{ … }
static __always_inline void
rt_mutex_set_owner(struct rt_mutex_base *lock, struct task_struct *owner)
{ … }
static __always_inline void rt_mutex_clear_owner(struct rt_mutex_base *lock)
{ … }
static __always_inline void clear_rt_mutex_waiters(struct rt_mutex_base *lock)
{ … }
static __always_inline void
fixup_rt_mutex_waiters(struct rt_mutex_base *lock, bool acquire_lock)
{ … }
#ifndef CONFIG_DEBUG_RT_MUTEXES
static __always_inline bool rt_mutex_cmpxchg_acquire(struct rt_mutex_base *lock,
struct task_struct *old,
struct task_struct *new)
{
return try_cmpxchg_acquire(&lock->owner, &old, new);
}
static __always_inline bool rt_mutex_try_acquire(struct rt_mutex_base *lock)
{
return rt_mutex_cmpxchg_acquire(lock, NULL, current);
}
static __always_inline bool rt_mutex_cmpxchg_release(struct rt_mutex_base *lock,
struct task_struct *old,
struct task_struct *new)
{
return try_cmpxchg_release(&lock->owner, &old, new);
}
static __always_inline void mark_rt_mutex_waiters(struct rt_mutex_base *lock)
{
unsigned long *p = (unsigned long *) &lock->owner;
unsigned long owner, new;
owner = READ_ONCE(*p);
do {
new = owner | RT_MUTEX_HAS_WAITERS;
} while (!try_cmpxchg_relaxed(p, &owner, new));
smp_mb__after_atomic();
}
static __always_inline bool unlock_rt_mutex_safe(struct rt_mutex_base *lock,
unsigned long flags)
__releases(lock->wait_lock)
{
struct task_struct *owner = rt_mutex_owner(lock);
clear_rt_mutex_waiters(lock);
raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
return rt_mutex_cmpxchg_release(lock, owner, NULL);
}
#else
static __always_inline bool rt_mutex_cmpxchg_acquire(struct rt_mutex_base *lock,
struct task_struct *old,
struct task_struct *new)
{ … }
static int __sched rt_mutex_slowtrylock(struct rt_mutex_base *lock);
static __always_inline bool rt_mutex_try_acquire(struct rt_mutex_base *lock)
{ … }
static __always_inline bool rt_mutex_cmpxchg_release(struct rt_mutex_base *lock,
struct task_struct *old,
struct task_struct *new)
{ … }
static __always_inline void mark_rt_mutex_waiters(struct rt_mutex_base *lock)
{ … }
static __always_inline bool unlock_rt_mutex_safe(struct rt_mutex_base *lock,
unsigned long flags)
__releases(lock->wait_lock)
{ … }
#endif
static __always_inline int __waiter_prio(struct task_struct *task)
{ … }
static __always_inline void
waiter_update_prio(struct rt_mutex_waiter *waiter, struct task_struct *task)
{ … }
static __always_inline void
waiter_clone_prio(struct rt_mutex_waiter *waiter, struct task_struct *task)
{ … }
#define task_to_waiter_node(p) …
#define task_to_waiter(p) …
static __always_inline int rt_waiter_node_less(struct rt_waiter_node *left,
struct rt_waiter_node *right)
{ … }
static __always_inline int rt_waiter_node_equal(struct rt_waiter_node *left,
struct rt_waiter_node *right)
{ … }
static inline bool rt_mutex_steal(struct rt_mutex_waiter *waiter,
struct rt_mutex_waiter *top_waiter)
{ … }
#define __node_2_waiter(node) …
static __always_inline bool __waiter_less(struct rb_node *a, const struct rb_node *b)
{ … }
static __always_inline void
rt_mutex_enqueue(struct rt_mutex_base *lock, struct rt_mutex_waiter *waiter)
{ … }
static __always_inline void
rt_mutex_dequeue(struct rt_mutex_base *lock, struct rt_mutex_waiter *waiter)
{ … }
#define __node_2_rt_node(node) …
static __always_inline bool __pi_waiter_less(struct rb_node *a, const struct rb_node *b)
{ … }
static __always_inline void
rt_mutex_enqueue_pi(struct task_struct *task, struct rt_mutex_waiter *waiter)
{ … }
static __always_inline void
rt_mutex_dequeue_pi(struct task_struct *task, struct rt_mutex_waiter *waiter)
{ … }
static __always_inline void rt_mutex_adjust_prio(struct rt_mutex_base *lock,
struct task_struct *p)
{ … }
static __always_inline void rt_mutex_wake_q_add_task(struct rt_wake_q_head *wqh,
struct task_struct *task,
unsigned int wake_state)
{ … }
static __always_inline void rt_mutex_wake_q_add(struct rt_wake_q_head *wqh,
struct rt_mutex_waiter *w)
{ … }
static __always_inline void rt_mutex_wake_up_q(struct rt_wake_q_head *wqh)
{ … }
static __always_inline bool
rt_mutex_cond_detect_deadlock(struct rt_mutex_waiter *waiter,
enum rtmutex_chainwalk chwalk)
{ … }
static __always_inline struct rt_mutex_base *task_blocked_on_lock(struct task_struct *p)
{ … }
static int __sched rt_mutex_adjust_prio_chain(struct task_struct *task,
enum rtmutex_chainwalk chwalk,
struct rt_mutex_base *orig_lock,
struct rt_mutex_base *next_lock,
struct rt_mutex_waiter *orig_waiter,
struct task_struct *top_task)
{ … }
static int __sched
try_to_take_rt_mutex(struct rt_mutex_base *lock, struct task_struct *task,
struct rt_mutex_waiter *waiter)
{ … }
static int __sched task_blocks_on_rt_mutex(struct rt_mutex_base *lock,
struct rt_mutex_waiter *waiter,
struct task_struct *task,
struct ww_acquire_ctx *ww_ctx,
enum rtmutex_chainwalk chwalk)
{ … }
static void __sched mark_wakeup_next_waiter(struct rt_wake_q_head *wqh,
struct rt_mutex_base *lock)
{ … }
static int __sched __rt_mutex_slowtrylock(struct rt_mutex_base *lock)
{ … }
static int __sched rt_mutex_slowtrylock(struct rt_mutex_base *lock)
{ … }
static __always_inline int __rt_mutex_trylock(struct rt_mutex_base *lock)
{ … }
static void __sched rt_mutex_slowunlock(struct rt_mutex_base *lock)
{ … }
static __always_inline void __rt_mutex_unlock(struct rt_mutex_base *lock)
{ … }
#ifdef CONFIG_SMP
static bool rtmutex_spin_on_owner(struct rt_mutex_base *lock,
struct rt_mutex_waiter *waiter,
struct task_struct *owner)
{ … }
#else
static bool rtmutex_spin_on_owner(struct rt_mutex_base *lock,
struct rt_mutex_waiter *waiter,
struct task_struct *owner)
{
return false;
}
#endif
#ifdef RT_MUTEX_BUILD_MUTEX
static void __sched remove_waiter(struct rt_mutex_base *lock,
struct rt_mutex_waiter *waiter)
{ … }
static int __sched rt_mutex_slowlock_block(struct rt_mutex_base *lock,
struct ww_acquire_ctx *ww_ctx,
unsigned int state,
struct hrtimer_sleeper *timeout,
struct rt_mutex_waiter *waiter)
{ … }
static void __sched rt_mutex_handle_deadlock(int res, int detect_deadlock,
struct rt_mutex_waiter *w)
{ … }
static int __sched __rt_mutex_slowlock(struct rt_mutex_base *lock,
struct ww_acquire_ctx *ww_ctx,
unsigned int state,
enum rtmutex_chainwalk chwalk,
struct rt_mutex_waiter *waiter)
{ … }
static inline int __rt_mutex_slowlock_locked(struct rt_mutex_base *lock,
struct ww_acquire_ctx *ww_ctx,
unsigned int state)
{ … }
static int __sched rt_mutex_slowlock(struct rt_mutex_base *lock,
struct ww_acquire_ctx *ww_ctx,
unsigned int state)
{ … }
static __always_inline int __rt_mutex_lock(struct rt_mutex_base *lock,
unsigned int state)
{ … }
#endif
#ifdef RT_MUTEX_BUILD_SPINLOCKS
static void __sched rtlock_slowlock_locked(struct rt_mutex_base *lock)
{
struct rt_mutex_waiter waiter;
struct task_struct *owner;
lockdep_assert_held(&lock->wait_lock);
if (try_to_take_rt_mutex(lock, current, NULL))
return;
rt_mutex_init_rtlock_waiter(&waiter);
current_save_and_set_rtlock_wait_state();
trace_contention_begin(lock, LCB_F_RT);
task_blocks_on_rt_mutex(lock, &waiter, current, NULL, RT_MUTEX_MIN_CHAINWALK);
for (;;) {
if (try_to_take_rt_mutex(lock, current, &waiter))
break;
if (&waiter == rt_mutex_top_waiter(lock))
owner = rt_mutex_owner(lock);
else
owner = NULL;
raw_spin_unlock_irq(&lock->wait_lock);
if (!owner || !rtmutex_spin_on_owner(lock, &waiter, owner))
schedule_rtlock();
raw_spin_lock_irq(&lock->wait_lock);
set_current_state(TASK_RTLOCK_WAIT);
}
current_restore_rtlock_saved_state();
fixup_rt_mutex_waiters(lock, true);
debug_rt_mutex_free_waiter(&waiter);
trace_contention_end(lock, 0);
}
static __always_inline void __sched rtlock_slowlock(struct rt_mutex_base *lock)
{
unsigned long flags;
raw_spin_lock_irqsave(&lock->wait_lock, flags);
rtlock_slowlock_locked(lock);
raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
}
#endif