#define pr_fmt(fmt) …
#include <linux/atomic.h>
#include <linux/bitops.h>
#include <linux/completion.h>
#include <linux/cpu.h>
#include <linux/delay.h>
#include <linux/err.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/kthread.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/notifier.h>
#include <linux/percpu.h>
#include <linux/rcupdate.h>
#include <linux/rcupdate_trace.h>
#include <linux/reboot.h>
#include <linux/sched.h>
#include <linux/spinlock.h>
#include <linux/smp.h>
#include <linux/stat.h>
#include <linux/srcu.h>
#include <linux/slab.h>
#include <linux/torture.h>
#include <linux/types.h>
#include "rcu.h"
#define SCALE_FLAG …
#define SCALEOUT(s, x...) …
#define VERBOSE_SCALEOUT(s, x...) …
static atomic_t verbose_batch_ctr;
#define VERBOSE_SCALEOUT_BATCH(s, x...) …
#define SCALEOUT_ERRSTRING(s, x...) …
MODULE_DESCRIPTION(…) …;
MODULE_LICENSE(…) …;
MODULE_AUTHOR(…) …;
static char *scale_type = …;
module_param(scale_type, charp, 0444);
MODULE_PARM_DESC(…) …;
torture_param(int, verbose, 0, "Enable verbose debugging printk()s");
torture_param(int, verbose_batched, 0, "Batch verbose debugging printk()s");
torture_param(int, holdoff, IS_BUILTIN(CONFIG_RCU_REF_SCALE_TEST) ? 10 : 0,
"Holdoff time before test start (s)");
torture_param(long, lookup_instances, 0, "Number of typesafe_lookup structures.");
torture_param(long, loops, 10000, "Number of loops per experiment.");
torture_param(int, nreaders, -1, "Number of readers, -1 for 75% of CPUs.");
torture_param(int, nruns, 30, "Number of experiments to run.");
torture_param(int, readdelay, 0, "Read-side delay in nanoseconds.");
#ifdef MODULE
#define REFSCALE_SHUTDOWN …
#else
#define REFSCALE_SHUTDOWN …
#endif
torture_param(bool, shutdown, REFSCALE_SHUTDOWN,
"Shutdown at end of scalability tests.");
struct reader_task { … };
static struct task_struct *shutdown_task;
static wait_queue_head_t shutdown_wq;
static struct task_struct *main_task;
static wait_queue_head_t main_wq;
static int shutdown_start;
static struct reader_task *reader_tasks;
static atomic_t nreaders_exp;
static atomic_t n_init;
static atomic_t n_started;
static atomic_t n_warmedup;
static atomic_t n_cooleddown;
static int exp_idx;
struct ref_scale_ops { … };
static struct ref_scale_ops *cur_ops;
static void un_delay(const int udl, const int ndl)
{ … }
static void ref_rcu_read_section(const int nloops)
{ … }
static void ref_rcu_delay_section(const int nloops, const int udl, const int ndl)
{ … }
static bool rcu_sync_scale_init(void)
{ … }
static struct ref_scale_ops rcu_ops = …;
DEFINE_STATIC_SRCU(…);
static struct srcu_struct *srcu_ctlp = …;
static void srcu_ref_scale_read_section(const int nloops)
{ … }
static void srcu_ref_scale_delay_section(const int nloops, const int udl, const int ndl)
{ … }
static struct ref_scale_ops srcu_ops = …;
#ifdef CONFIG_TASKS_RCU
static void rcu_tasks_ref_scale_read_section(const int nloops)
{ … }
static void rcu_tasks_ref_scale_delay_section(const int nloops, const int udl, const int ndl)
{ … }
static struct ref_scale_ops rcu_tasks_ops = …;
#define RCU_TASKS_OPS …
#else
#define RCU_TASKS_OPS
#endif
#ifdef CONFIG_TASKS_TRACE_RCU
static void rcu_trace_ref_scale_read_section(const int nloops)
{ … }
static void rcu_trace_ref_scale_delay_section(const int nloops, const int udl, const int ndl)
{ … }
static struct ref_scale_ops rcu_trace_ops = …;
#define RCU_TRACE_OPS …
#else
#define RCU_TRACE_OPS
#endif
static atomic_t refcnt;
static void ref_refcnt_section(const int nloops)
{ … }
static void ref_refcnt_delay_section(const int nloops, const int udl, const int ndl)
{ … }
static struct ref_scale_ops refcnt_ops = …;
static rwlock_t test_rwlock;
static bool ref_rwlock_init(void)
{ … }
static void ref_rwlock_section(const int nloops)
{ … }
static void ref_rwlock_delay_section(const int nloops, const int udl, const int ndl)
{ … }
static struct ref_scale_ops rwlock_ops = …;
static struct rw_semaphore test_rwsem;
static bool ref_rwsem_init(void)
{ … }
static void ref_rwsem_section(const int nloops)
{ … }
static void ref_rwsem_delay_section(const int nloops, const int udl, const int ndl)
{ … }
static struct ref_scale_ops rwsem_ops = …;
static DEFINE_RAW_SPINLOCK(test_lock);
static void ref_lock_section(const int nloops)
{ … }
static void ref_lock_delay_section(const int nloops, const int udl, const int ndl)
{ … }
static struct ref_scale_ops lock_ops = …;
static void ref_lock_irq_section(const int nloops)
{ … }
static void ref_lock_irq_delay_section(const int nloops, const int udl, const int ndl)
{ … }
static struct ref_scale_ops lock_irq_ops = …;
static DEFINE_PER_CPU(unsigned long, test_acqrel);
static void ref_acqrel_section(const int nloops)
{ … }
static void ref_acqrel_delay_section(const int nloops, const int udl, const int ndl)
{ … }
static struct ref_scale_ops acqrel_ops = …;
static volatile u64 stopopts;
static void ref_clock_section(const int nloops)
{ … }
static void ref_clock_delay_section(const int nloops, const int udl, const int ndl)
{ … }
static struct ref_scale_ops clock_ops = …;
static void ref_jiffies_section(const int nloops)
{ … }
static void ref_jiffies_delay_section(const int nloops, const int udl, const int ndl)
{ … }
static struct ref_scale_ops jiffies_ops = …;
struct refscale_typesafe { … };
static struct kmem_cache *typesafe_kmem_cachep;
static struct refscale_typesafe **rtsarray;
static long rtsarray_size;
static DEFINE_TORTURE_RANDOM_PERCPU(refscale_rand);
static bool (*rts_acquire)(struct refscale_typesafe *rtsp, unsigned int *start);
static bool (*rts_release)(struct refscale_typesafe *rtsp, unsigned int start);
static bool typesafe_ref_acquire(struct refscale_typesafe *rtsp, unsigned int *start)
{ … }
static bool typesafe_ref_release(struct refscale_typesafe *rtsp, unsigned int start)
{ … }
static bool typesafe_lock_acquire(struct refscale_typesafe *rtsp, unsigned int *start)
{ … }
static bool typesafe_lock_release(struct refscale_typesafe *rtsp, unsigned int start)
{ … }
static bool typesafe_seqlock_acquire(struct refscale_typesafe *rtsp, unsigned int *start)
{ … }
static bool typesafe_seqlock_release(struct refscale_typesafe *rtsp, unsigned int start)
{ … }
static void typesafe_delay_section(const int nloops, const int udl, const int ndl)
{ … }
static void typesafe_read_section(const int nloops)
{ … }
static struct refscale_typesafe *typesafe_alloc_one(void)
{ … }
static void refscale_typesafe_ctor(void *rtsp_in)
{ … }
static struct ref_scale_ops typesafe_ref_ops;
static struct ref_scale_ops typesafe_lock_ops;
static struct ref_scale_ops typesafe_seqlock_ops;
static bool typesafe_init(void)
{ … }
static void typesafe_cleanup(void)
{ … }
static struct ref_scale_ops typesafe_ref_ops = …;
static struct ref_scale_ops typesafe_lock_ops = …;
static struct ref_scale_ops typesafe_seqlock_ops = …;
static void rcu_scale_one_reader(void)
{ … }
static int
ref_scale_reader(void *arg)
{ … }
static void reset_readers(void)
{ … }
static u64 process_durations(int n)
{ … }
static int main_func(void *arg)
{ … }
static void
ref_scale_print_module_parms(struct ref_scale_ops *cur_ops, const char *tag)
{ … }
static void
ref_scale_cleanup(void)
{ … }
static int
ref_scale_shutdown(void *arg)
{ … }
static int __init
ref_scale_init(void)
{ … }
module_init(…) …;
module_exit(ref_scale_cleanup);