linux/kernel/locking/locktorture.c

// SPDX-License-Identifier: GPL-2.0+
/*
 * Module-based torture test facility for locking
 *
 * Copyright (C) IBM Corporation, 2014
 *
 * Authors: Paul E. McKenney <[email protected]>
 *          Davidlohr Bueso <[email protected]>
 *	Based on kernel/rcu/torture.c.
 */

#define pr_fmt(fmt)

#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/kthread.h>
#include <linux/sched/rt.h>
#include <linux/spinlock.h>
#include <linux/mutex.h>
#include <linux/rwsem.h>
#include <linux/smp.h>
#include <linux/interrupt.h>
#include <linux/sched.h>
#include <uapi/linux/sched/types.h>
#include <linux/rtmutex.h>
#include <linux/atomic.h>
#include <linux/moduleparam.h>
#include <linux/delay.h>
#include <linux/slab.h>
#include <linux/torture.h>
#include <linux/reboot.h>

MODULE_DESCRIPTION();
MODULE_LICENSE();
MODULE_AUTHOR();

torture_param(int, acq_writer_lim, 0, "Write_acquisition time limit (jiffies).");
torture_param(int, call_rcu_chains, 0, "Self-propagate call_rcu() chains during test (0=disable).");
torture_param(int, long_hold, 100, "Do occasional long hold of lock (ms), 0=disable");
torture_param(int, nested_locks, 0, "Number of nested locks (max = 8)");
torture_param(int, nreaders_stress, -1, "Number of read-locking stress-test threads");
torture_param(int, nwriters_stress, -1, "Number of write-locking stress-test threads");
torture_param(int, onoff_holdoff, 0, "Time after boot before CPU hotplugs (s)");
torture_param(int, onoff_interval, 0, "Time between CPU hotplugs (s), 0=disable");
torture_param(int, rt_boost, 2,
		   "Do periodic rt-boost. 0=Disable, 1=Only for rt_mutex, 2=For all lock types.");
torture_param(int, rt_boost_factor, 50, "A factor determining how often rt-boost happens.");
torture_param(int, shuffle_interval, 3, "Number of jiffies between shuffles, 0=disable");
torture_param(int, shutdown_secs, 0, "Shutdown time (j), <= zero to disable.");
torture_param(int, stat_interval, 60, "Number of seconds between stats printk()s");
torture_param(int, stutter, 5, "Number of jiffies to run/halt test, 0=disable");
torture_param(int, verbose, 1, "Enable verbose debugging printk()s");
torture_param(int, writer_fifo, 0, "Run writers at sched_set_fifo() priority");
/* Going much higher trips "BUG: MAX_LOCKDEP_CHAIN_HLOCKS too low!" errors */
#define MAX_NESTED_LOCKS

static char *torture_type =;
module_param(torture_type, charp, 0444);
MODULE_PARM_DESC();

static cpumask_var_t bind_readers; // Bind the readers to the specified set of CPUs.
static cpumask_var_t bind_writers; // Bind the writers to the specified set of CPUs.

// Parse a cpumask kernel parameter.  If there are more users later on,
// this might need to got to a more central location.
static int param_set_cpumask(const char *val, const struct kernel_param *kp)
{}

// Output a cpumask kernel parameter.
static int param_get_cpumask(char *buffer, const struct kernel_param *kp)
{}

static bool cpumask_nonempty(cpumask_var_t mask)
{}

static const struct kernel_param_ops lt_bind_ops =;

module_param_cb();
module_param_cb();

long torture_sched_setaffinity(pid_t pid, const struct cpumask *in_mask);

static struct task_struct *stats_task;
static struct task_struct **writer_tasks;
static struct task_struct **reader_tasks;

static bool lock_is_write_held;
static atomic_t lock_is_read_held;
static unsigned long last_lock_release;

struct lock_stress_stats {};

struct call_rcu_chain {};
struct call_rcu_chain *call_rcu_chain_list;

/* Forward reference. */
static void lock_torture_cleanup(void);

/*
 * Operations vector for selecting different types of tests.
 */
struct lock_torture_ops {};

struct lock_torture_cxt {};
static struct lock_torture_cxt cxt =;
/*
 * Definitions for lock torture testing.
 */

static int torture_lock_busted_write_lock(int tid __maybe_unused)
{}

static void torture_lock_busted_write_delay(struct torture_random_state *trsp)
{}

static void torture_lock_busted_write_unlock(int tid __maybe_unused)
{}

static void __torture_rt_boost(struct torture_random_state *trsp)
{}

static void torture_rt_boost(struct torture_random_state *trsp)
{}

static struct lock_torture_ops lock_busted_ops =;

static DEFINE_SPINLOCK(torture_spinlock);

static int torture_spin_lock_write_lock(int tid __maybe_unused)
__acquires(torture_spinlock)
{}

static void torture_spin_lock_write_delay(struct torture_random_state *trsp)
{}

static void torture_spin_lock_write_unlock(int tid __maybe_unused)
__releases(torture_spinlock)
{}

static struct lock_torture_ops spin_lock_ops =;

static int torture_spin_lock_write_lock_irq(int tid __maybe_unused)
__acquires(torture_spinlock)
{}

static void torture_lock_spin_write_unlock_irq(int tid __maybe_unused)
__releases(torture_spinlock)
{}

static struct lock_torture_ops spin_lock_irq_ops =;

static DEFINE_RAW_SPINLOCK(torture_raw_spinlock);

static int torture_raw_spin_lock_write_lock(int tid __maybe_unused)
__acquires(torture_raw_spinlock)
{}

static void torture_raw_spin_lock_write_unlock(int tid __maybe_unused)
__releases(torture_raw_spinlock)
{}

static struct lock_torture_ops raw_spin_lock_ops =;

static int torture_raw_spin_lock_write_lock_irq(int tid __maybe_unused)
__acquires(torture_raw_spinlock)
{}

static void torture_raw_spin_lock_write_unlock_irq(int tid __maybe_unused)
__releases(torture_raw_spinlock)
{}

static struct lock_torture_ops raw_spin_lock_irq_ops =;

static DEFINE_RWLOCK(torture_rwlock);

static int torture_rwlock_write_lock(int tid __maybe_unused)
__acquires(torture_rwlock)
{}

static void torture_rwlock_write_delay(struct torture_random_state *trsp)
{}

static void torture_rwlock_write_unlock(int tid __maybe_unused)
__releases(torture_rwlock)
{}

static int torture_rwlock_read_lock(int tid __maybe_unused)
__acquires(torture_rwlock)
{}

static void torture_rwlock_read_delay(struct torture_random_state *trsp)
{}

static void torture_rwlock_read_unlock(int tid __maybe_unused)
__releases(torture_rwlock)
{}

static struct lock_torture_ops rw_lock_ops =;

static int torture_rwlock_write_lock_irq(int tid __maybe_unused)
__acquires(torture_rwlock)
{}

static void torture_rwlock_write_unlock_irq(int tid __maybe_unused)
__releases(torture_rwlock)
{}

static int torture_rwlock_read_lock_irq(int tid __maybe_unused)
__acquires(torture_rwlock)
{}

static void torture_rwlock_read_unlock_irq(int tid __maybe_unused)
__releases(torture_rwlock)
{}

static struct lock_torture_ops rw_lock_irq_ops =;

static DEFINE_MUTEX(torture_mutex);
static struct mutex torture_nested_mutexes[MAX_NESTED_LOCKS];
static struct lock_class_key nested_mutex_keys[MAX_NESTED_LOCKS];

static void torture_mutex_init(void)
{}

static int torture_mutex_nested_lock(int tid __maybe_unused,
				     u32 lockset)
{}

static int torture_mutex_lock(int tid __maybe_unused)
__acquires(torture_mutex)
{}

static void torture_mutex_delay(struct torture_random_state *trsp)
{}

static void torture_mutex_unlock(int tid __maybe_unused)
__releases(torture_mutex)
{}

static void torture_mutex_nested_unlock(int tid __maybe_unused,
					u32 lockset)
{}

static struct lock_torture_ops mutex_lock_ops =;

#include <linux/ww_mutex.h>
/*
 * The torture ww_mutexes should belong to the same lock class as
 * torture_ww_class to avoid lockdep problem. The ww_mutex_init()
 * function is called for initialization to ensure that.
 */
static DEFINE_WD_CLASS(torture_ww_class);
static struct ww_mutex torture_ww_mutex_0, torture_ww_mutex_1, torture_ww_mutex_2;
static struct ww_acquire_ctx *ww_acquire_ctxs;

static void torture_ww_mutex_init(void)
{}

static void torture_ww_mutex_exit(void)
{}

static int torture_ww_mutex_lock(int tid)
__acquires(torture_ww_mutex_0)
__acquires(torture_ww_mutex_1)
__acquires(torture_ww_mutex_2)
{}

static void torture_ww_mutex_unlock(int tid)
__releases(torture_ww_mutex_0)
__releases(torture_ww_mutex_1)
__releases(torture_ww_mutex_2)
{}

static struct lock_torture_ops ww_mutex_lock_ops =;

#ifdef CONFIG_RT_MUTEXES
static DEFINE_RT_MUTEX(torture_rtmutex);
static struct rt_mutex torture_nested_rtmutexes[MAX_NESTED_LOCKS];
static struct lock_class_key nested_rtmutex_keys[MAX_NESTED_LOCKS];

static void torture_rtmutex_init(void)
{}

static int torture_rtmutex_nested_lock(int tid __maybe_unused,
				       u32 lockset)
{}

static int torture_rtmutex_lock(int tid __maybe_unused)
__acquires(torture_rtmutex)
{}

static void torture_rtmutex_delay(struct torture_random_state *trsp)
{}

static void torture_rtmutex_unlock(int tid __maybe_unused)
__releases(torture_rtmutex)
{}

static void torture_rt_boost_rtmutex(struct torture_random_state *trsp)
{}

static void torture_rtmutex_nested_unlock(int tid __maybe_unused,
					  u32 lockset)
{}

static struct lock_torture_ops rtmutex_lock_ops =;
#endif

static DECLARE_RWSEM(torture_rwsem);
static int torture_rwsem_down_write(int tid __maybe_unused)
__acquires(torture_rwsem)
{}

static void torture_rwsem_write_delay(struct torture_random_state *trsp)
{}

static void torture_rwsem_up_write(int tid __maybe_unused)
__releases(torture_rwsem)
{}

static int torture_rwsem_down_read(int tid __maybe_unused)
__acquires(torture_rwsem)
{}

static void torture_rwsem_read_delay(struct torture_random_state *trsp)
{}

static void torture_rwsem_up_read(int tid __maybe_unused)
__releases(torture_rwsem)
{}

static struct lock_torture_ops rwsem_lock_ops =;

#include <linux/percpu-rwsem.h>
static struct percpu_rw_semaphore pcpu_rwsem;

static void torture_percpu_rwsem_init(void)
{}

static void torture_percpu_rwsem_exit(void)
{}

static int torture_percpu_rwsem_down_write(int tid __maybe_unused)
__acquires(pcpu_rwsem)
{}

static void torture_percpu_rwsem_up_write(int tid __maybe_unused)
__releases(pcpu_rwsem)
{}

static int torture_percpu_rwsem_down_read(int tid __maybe_unused)
__acquires(pcpu_rwsem)
{}

static void torture_percpu_rwsem_up_read(int tid __maybe_unused)
__releases(pcpu_rwsem)
{}

static struct lock_torture_ops percpu_rwsem_lock_ops =;

/*
 * Lock torture writer kthread.  Repeatedly acquires and releases
 * the lock, checking for duplicate acquisitions.
 */
static int lock_torture_writer(void *arg)
{}

/*
 * Lock torture reader kthread.  Repeatedly acquires and releases
 * the reader lock.
 */
static int lock_torture_reader(void *arg)
{}

/*
 * Create an lock-torture-statistics message in the specified buffer.
 */
static void __torture_print_stats(char *page,
				  struct lock_stress_stats *statp, bool write)
{}

/*
 * Print torture statistics.  Caller must ensure that there is only one
 * call to this function at a given time!!!  This is normally accomplished
 * by relying on the module system to only have one copy of the module
 * loaded, and then by giving the lock_torture_stats kthread full control
 * (or the init/cleanup functions when lock_torture_stats thread is not
 * running).
 */
static void lock_torture_stats_print(void)
{}

/*
 * Periodically prints torture statistics, if periodic statistics printing
 * was specified via the stat_interval module parameter.
 *
 * No need to worry about fullstop here, since this one doesn't reference
 * volatile state or register callbacks.
 */
static int lock_torture_stats(void *arg)
{}


static inline void
lock_torture_print_module_parms(struct lock_torture_ops *cur_ops,
				const char *tag)
{}

// If requested, maintain call_rcu() chains to keep a grace period always
// in flight.  These increase the probability of getting an RCU CPU stall
// warning and associated diagnostics when a locking primitive stalls.

static void call_rcu_chain_cb(struct rcu_head *rhp)
{}

// Start the requested number of call_rcu() chains.
static int call_rcu_chain_init(void)
{}

// Stop all of the call_rcu() chains.
static void call_rcu_chain_cleanup(void)
{}

static void lock_torture_cleanup(void)
{}

static int __init lock_torture_init(void)
{}

module_init();
module_exit(lock_torture_cleanup);