#ifndef __LINUX_SPINLOCK_H
#define __LINUX_SPINLOCK_H
#define __LINUX_INSIDE_SPINLOCK_H
#include <linux/typecheck.h>
#include <linux/preempt.h>
#include <linux/linkage.h>
#include <linux/compiler.h>
#include <linux/irqflags.h>
#include <linux/thread_info.h>
#include <linux/stringify.h>
#include <linux/bottom_half.h>
#include <linux/lockdep.h>
#include <linux/cleanup.h>
#include <asm/barrier.h>
#include <asm/mmiowb.h>
#define LOCK_SECTION_NAME …
#define LOCK_SECTION_START(extra) …
#define LOCK_SECTION_END …
#define __lockfunc …
#include <linux/spinlock_types.h>
#ifdef CONFIG_SMP
# include <asm/spinlock.h>
#else
# include <linux/spinlock_up.h>
#endif
#ifdef CONFIG_DEBUG_SPINLOCK
extern void __raw_spin_lock_init(raw_spinlock_t *lock, const char *name,
struct lock_class_key *key, short inner);
#define raw_spin_lock_init(lock) …
#else
#define raw_spin_lock_init …
#endif
#define raw_spin_is_locked(lock) …
#ifdef arch_spin_is_contended
#define raw_spin_is_contended(lock) …
#else
#define raw_spin_is_contended …
#endif
#ifndef smp_mb__after_spinlock
#define smp_mb__after_spinlock() …
#endif
#ifdef CONFIG_DEBUG_SPINLOCK
extern void do_raw_spin_lock(raw_spinlock_t *lock) __acquires(…);
extern int do_raw_spin_trylock(raw_spinlock_t *lock);
extern void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(…);
#else
static inline void do_raw_spin_lock(raw_spinlock_t *lock) __acquires(lock)
{
__acquire(lock);
arch_spin_lock(&lock->raw_lock);
mmiowb_spin_lock();
}
static inline int do_raw_spin_trylock(raw_spinlock_t *lock)
{
int ret = arch_spin_trylock(&(lock)->raw_lock);
if (ret)
mmiowb_spin_lock();
return ret;
}
static inline void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock)
{
mmiowb_spin_unlock();
arch_spin_unlock(&lock->raw_lock);
__release(lock);
}
#endif
#define raw_spin_trylock(lock) …
#define raw_spin_lock(lock) …
#ifdef CONFIG_DEBUG_LOCK_ALLOC
#define raw_spin_lock_nested(lock, subclass) …
#define raw_spin_lock_nest_lock(lock, nest_lock) …
#else
#define raw_spin_lock_nested …
#define raw_spin_lock_nest_lock …
#endif
#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
#define raw_spin_lock_irqsave(lock, flags) …
#ifdef CONFIG_DEBUG_LOCK_ALLOC
#define raw_spin_lock_irqsave_nested(lock, flags, subclass) …
#else
#define raw_spin_lock_irqsave_nested …
#endif
#else
#define raw_spin_lock_irqsave …
#define raw_spin_lock_irqsave_nested …
#endif
#define raw_spin_lock_irq(lock) …
#define raw_spin_lock_bh(lock) …
#define raw_spin_unlock(lock) …
#define raw_spin_unlock_irq(lock) …
#define raw_spin_unlock_irqrestore(lock, flags) …
#define raw_spin_unlock_bh(lock) …
#define raw_spin_trylock_bh(lock) …
#define raw_spin_trylock_irq(lock) …
#define raw_spin_trylock_irqsave(lock, flags) …
#ifndef CONFIG_PREEMPT_RT
#include <linux/rwlock.h>
#endif
#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
# include <linux/spinlock_api_smp.h>
#else
# include <linux/spinlock_api_up.h>
#endif
#ifndef CONFIG_PREEMPT_RT
static __always_inline raw_spinlock_t *spinlock_check(spinlock_t *lock)
{ … }
#ifdef CONFIG_DEBUG_SPINLOCK
#define spin_lock_init(lock) …
#else
#define spin_lock_init …
#endif
static __always_inline void spin_lock(spinlock_t *lock)
{ … }
static __always_inline void spin_lock_bh(spinlock_t *lock)
{ … }
static __always_inline int spin_trylock(spinlock_t *lock)
{ … }
#define spin_lock_nested(lock, subclass) …
#define spin_lock_nest_lock(lock, nest_lock) …
static __always_inline void spin_lock_irq(spinlock_t *lock)
{ … }
#define spin_lock_irqsave(lock, flags) …
#define spin_lock_irqsave_nested(lock, flags, subclass) …
static __always_inline void spin_unlock(spinlock_t *lock)
{ … }
static __always_inline void spin_unlock_bh(spinlock_t *lock)
{ … }
static __always_inline void spin_unlock_irq(spinlock_t *lock)
{ … }
static __always_inline void spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags)
{ … }
static __always_inline int spin_trylock_bh(spinlock_t *lock)
{ … }
static __always_inline int spin_trylock_irq(spinlock_t *lock)
{ … }
#define spin_trylock_irqsave(lock, flags) …
static __always_inline int spin_is_locked(spinlock_t *lock)
{ … }
static __always_inline int spin_is_contended(spinlock_t *lock)
{ … }
#define assert_spin_locked(lock) …
#else
# include <linux/spinlock_rt.h>
#endif
static inline int spin_needbreak(spinlock_t *lock)
{ … }
static inline int rwlock_needbreak(rwlock_t *lock)
{ … }
#include <linux/atomic.h>
extern int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock);
#define atomic_dec_and_lock(atomic, lock) …
extern int _atomic_dec_and_lock_irqsave(atomic_t *atomic, spinlock_t *lock,
unsigned long *flags);
#define atomic_dec_and_lock_irqsave(atomic, lock, flags) …
extern int _atomic_dec_and_raw_lock(atomic_t *atomic, raw_spinlock_t *lock);
#define atomic_dec_and_raw_lock(atomic, lock) …
extern int _atomic_dec_and_raw_lock_irqsave(atomic_t *atomic, raw_spinlock_t *lock,
unsigned long *flags);
#define atomic_dec_and_raw_lock_irqsave(atomic, lock, flags) …
int __alloc_bucket_spinlocks(spinlock_t **locks, unsigned int *lock_mask,
size_t max_size, unsigned int cpu_mult,
gfp_t gfp, const char *name,
struct lock_class_key *key);
#define alloc_bucket_spinlocks(locks, lock_mask, max_size, cpu_mult, gfp) …
void free_bucket_spinlocks(spinlock_t *locks);
DEFINE_LOCK_GUARD_1(raw_spinlock, raw_spinlock_t,
raw_spin_lock(_T->lock),
raw_spin_unlock(_T->lock))
DEFINE_LOCK_GUARD_1_COND(raw_spinlock, _try, raw_spin_trylock(_T->lock))
DEFINE_LOCK_GUARD_1(raw_spinlock_nested, raw_spinlock_t,
raw_spin_lock_nested(_T->lock, SINGLE_DEPTH_NESTING),
raw_spin_unlock(_T->lock))
DEFINE_LOCK_GUARD_1(raw_spinlock_irq, raw_spinlock_t,
raw_spin_lock_irq(_T->lock),
raw_spin_unlock_irq(_T->lock))
DEFINE_LOCK_GUARD_1_COND(raw_spinlock_irq, _try, raw_spin_trylock_irq(_T->lock))
DEFINE_LOCK_GUARD_1(raw_spinlock_irqsave, raw_spinlock_t,
raw_spin_lock_irqsave(_T->lock, _T->flags),
raw_spin_unlock_irqrestore(_T->lock, _T->flags),
… }
DEFINE_LOCK_GUARD_1_COND(raw_spinlock_irqsave, _try,
raw_spin_trylock_irqsave(_T->lock, _T->flags))
DEFINE_LOCK_GUARD_1(spinlock, spinlock_t,
spin_lock(_T->lock),
spin_unlock(_T->lock))
DEFINE_LOCK_GUARD_1_COND(spinlock, _try, spin_trylock(_T->lock))
DEFINE_LOCK_GUARD_1(spinlock_irq, spinlock_t,
spin_lock_irq(_T->lock),
spin_unlock_irq(_T->lock))
DEFINE_LOCK_GUARD_1_COND(spinlock_irq, _try,
spin_trylock_irq(_T->lock))
DEFINE_LOCK_GUARD_1(spinlock_irqsave, spinlock_t,
spin_lock_irqsave(_T->lock, _T->flags),
spin_unlock_irqrestore(_T->lock, _T->flags),
… }
DEFINE_LOCK_GUARD_1_COND(spinlock_irqsave, _try,
spin_trylock_irqsave(_T->lock, _T->flags))
DEFINE_LOCK_GUARD_1(read_lock, rwlock_t,
read_lock(_T->lock),
read_unlock(_T->lock))
DEFINE_LOCK_GUARD_1(read_lock_irq, rwlock_t,
read_lock_irq(_T->lock),
read_unlock_irq(_T->lock))
DEFINE_LOCK_GUARD_1(read_lock_irqsave, rwlock_t,
read_lock_irqsave(_T->lock, _T->flags),
read_unlock_irqrestore(_T->lock, _T->flags),
… }
DEFINE_LOCK_GUARD_1(write_lock, rwlock_t,
write_lock(_T->lock),
write_unlock(_T->lock))
DEFINE_LOCK_GUARD_1(write_lock_irq, rwlock_t,
write_lock_irq(_T->lock),
write_unlock_irq(_T->lock))
DEFINE_LOCK_GUARD_1(write_lock_irqsave, rwlock_t,
write_lock_irqsave(_T->lock, _T->flags),
write_unlock_irqrestore(_T->lock, _T->flags),
… }
#undef __LINUX_INSIDE_SPINLOCK_H
#endif