#ifndef WW_RT
#define MUTEX …
#define MUTEX_WAITER …
static inline struct mutex_waiter *
__ww_waiter_first(struct mutex *lock)
{ … }
static inline struct mutex_waiter *
__ww_waiter_next(struct mutex *lock, struct mutex_waiter *w)
{ … }
static inline struct mutex_waiter *
__ww_waiter_prev(struct mutex *lock, struct mutex_waiter *w)
{ … }
static inline struct mutex_waiter *
__ww_waiter_last(struct mutex *lock)
{ … }
static inline void
__ww_waiter_add(struct mutex *lock, struct mutex_waiter *waiter, struct mutex_waiter *pos)
{ … }
static inline struct task_struct *
__ww_mutex_owner(struct mutex *lock)
{ … }
static inline bool
__ww_mutex_has_waiters(struct mutex *lock)
{ … }
static inline void lock_wait_lock(struct mutex *lock)
{ … }
static inline void unlock_wait_lock(struct mutex *lock)
{ … }
static inline void lockdep_assert_wait_lock_held(struct mutex *lock)
{ … }
#else
#define MUTEX …
#define MUTEX_WAITER …
static inline struct rt_mutex_waiter *
__ww_waiter_first(struct rt_mutex *lock)
{
struct rb_node *n = rb_first(&lock->rtmutex.waiters.rb_root);
if (!n)
return NULL;
return rb_entry(n, struct rt_mutex_waiter, tree.entry);
}
static inline struct rt_mutex_waiter *
__ww_waiter_next(struct rt_mutex *lock, struct rt_mutex_waiter *w)
{
struct rb_node *n = rb_next(&w->tree.entry);
if (!n)
return NULL;
return rb_entry(n, struct rt_mutex_waiter, tree.entry);
}
static inline struct rt_mutex_waiter *
__ww_waiter_prev(struct rt_mutex *lock, struct rt_mutex_waiter *w)
{
struct rb_node *n = rb_prev(&w->tree.entry);
if (!n)
return NULL;
return rb_entry(n, struct rt_mutex_waiter, tree.entry);
}
static inline struct rt_mutex_waiter *
__ww_waiter_last(struct rt_mutex *lock)
{
struct rb_node *n = rb_last(&lock->rtmutex.waiters.rb_root);
if (!n)
return NULL;
return rb_entry(n, struct rt_mutex_waiter, tree.entry);
}
static inline void
__ww_waiter_add(struct rt_mutex *lock, struct rt_mutex_waiter *waiter, struct rt_mutex_waiter *pos)
{
}
static inline struct task_struct *
__ww_mutex_owner(struct rt_mutex *lock)
{
return rt_mutex_owner(&lock->rtmutex);
}
static inline bool
__ww_mutex_has_waiters(struct rt_mutex *lock)
{
return rt_mutex_has_waiters(&lock->rtmutex);
}
static inline void lock_wait_lock(struct rt_mutex *lock)
{
raw_spin_lock(&lock->rtmutex.wait_lock);
}
static inline void unlock_wait_lock(struct rt_mutex *lock)
{
raw_spin_unlock(&lock->rtmutex.wait_lock);
}
static inline void lockdep_assert_wait_lock_held(struct rt_mutex *lock)
{
lockdep_assert_held(&lock->rtmutex.wait_lock);
}
#endif
static __always_inline void
ww_mutex_lock_acquired(struct ww_mutex *ww, struct ww_acquire_ctx *ww_ctx)
{ … }
static inline bool
__ww_ctx_less(struct ww_acquire_ctx *a, struct ww_acquire_ctx *b)
{ … }
static bool
__ww_mutex_die(struct MUTEX *lock, struct MUTEX_WAITER *waiter,
struct ww_acquire_ctx *ww_ctx)
{ … }
static bool __ww_mutex_wound(struct MUTEX *lock,
struct ww_acquire_ctx *ww_ctx,
struct ww_acquire_ctx *hold_ctx)
{ … }
static void
__ww_mutex_check_waiters(struct MUTEX *lock, struct ww_acquire_ctx *ww_ctx)
{ … }
static __always_inline void
ww_mutex_set_context_fastpath(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
{ … }
static __always_inline int
__ww_mutex_kill(struct MUTEX *lock, struct ww_acquire_ctx *ww_ctx)
{ … }
static inline int
__ww_mutex_check_kill(struct MUTEX *lock, struct MUTEX_WAITER *waiter,
struct ww_acquire_ctx *ctx)
{ … }
static inline int
__ww_mutex_add_waiter(struct MUTEX_WAITER *waiter,
struct MUTEX *lock,
struct ww_acquire_ctx *ww_ctx)
{ … }
static inline void __ww_mutex_unlock(struct ww_mutex *lock)
{ … }