linux/kernel/rcu/refscale.c

// SPDX-License-Identifier: GPL-2.0+
//
// Scalability test comparing RCU vs other mechanisms
// for acquiring references on objects.
//
// Copyright (C) Google, 2020.
//
// Author: Joel Fernandes <[email protected]>

#define pr_fmt(fmt)

#include <linux/atomic.h>
#include <linux/bitops.h>
#include <linux/completion.h>
#include <linux/cpu.h>
#include <linux/delay.h>
#include <linux/err.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/kthread.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/notifier.h>
#include <linux/percpu.h>
#include <linux/rcupdate.h>
#include <linux/rcupdate_trace.h>
#include <linux/reboot.h>
#include <linux/sched.h>
#include <linux/spinlock.h>
#include <linux/smp.h>
#include <linux/stat.h>
#include <linux/srcu.h>
#include <linux/slab.h>
#include <linux/torture.h>
#include <linux/types.h>

#include "rcu.h"

#define SCALE_FLAG

#define SCALEOUT(s, x...)

#define VERBOSE_SCALEOUT(s, x...)

static atomic_t verbose_batch_ctr;

#define VERBOSE_SCALEOUT_BATCH(s, x...)

#define SCALEOUT_ERRSTRING(s, x...)

MODULE_DESCRIPTION();
MODULE_LICENSE();
MODULE_AUTHOR();

static char *scale_type =;
module_param(scale_type, charp, 0444);
MODULE_PARM_DESC();

torture_param(int, verbose, 0, "Enable verbose debugging printk()s");
torture_param(int, verbose_batched, 0, "Batch verbose debugging printk()s");

// Wait until there are multiple CPUs before starting test.
torture_param(int, holdoff, IS_BUILTIN(CONFIG_RCU_REF_SCALE_TEST) ? 10 : 0,
	      "Holdoff time before test start (s)");
// Number of typesafe_lookup structures, that is, the degree of concurrency.
torture_param(long, lookup_instances, 0, "Number of typesafe_lookup structures.");
// Number of loops per experiment, all readers execute operations concurrently.
torture_param(long, loops, 10000, "Number of loops per experiment.");
// Number of readers, with -1 defaulting to about 75% of the CPUs.
torture_param(int, nreaders, -1, "Number of readers, -1 for 75% of CPUs.");
// Number of runs.
torture_param(int, nruns, 30, "Number of experiments to run.");
// Reader delay in nanoseconds, 0 for no delay.
torture_param(int, readdelay, 0, "Read-side delay in nanoseconds.");

#ifdef MODULE
#define REFSCALE_SHUTDOWN
#else
#define REFSCALE_SHUTDOWN
#endif

torture_param(bool, shutdown, REFSCALE_SHUTDOWN,
	      "Shutdown at end of scalability tests.");

struct reader_task {};

static struct task_struct *shutdown_task;
static wait_queue_head_t shutdown_wq;

static struct task_struct *main_task;
static wait_queue_head_t main_wq;
static int shutdown_start;

static struct reader_task *reader_tasks;

// Number of readers that are part of the current experiment.
static atomic_t nreaders_exp;

// Use to wait for all threads to start.
static atomic_t n_init;
static atomic_t n_started;
static atomic_t n_warmedup;
static atomic_t n_cooleddown;

// Track which experiment is currently running.
static int exp_idx;

// Operations vector for selecting different types of tests.
struct ref_scale_ops {};

static struct ref_scale_ops *cur_ops;

static void un_delay(const int udl, const int ndl)
{}

static void ref_rcu_read_section(const int nloops)
{}

static void ref_rcu_delay_section(const int nloops, const int udl, const int ndl)
{}

static bool rcu_sync_scale_init(void)
{}

static struct ref_scale_ops rcu_ops =;

// Definitions for SRCU ref scale testing.
DEFINE_STATIC_SRCU();
static struct srcu_struct *srcu_ctlp =;

static void srcu_ref_scale_read_section(const int nloops)
{}

static void srcu_ref_scale_delay_section(const int nloops, const int udl, const int ndl)
{}

static struct ref_scale_ops srcu_ops =;

#ifdef CONFIG_TASKS_RCU

// Definitions for RCU Tasks ref scale testing: Empty read markers.
// These definitions also work for RCU Rude readers.
static void rcu_tasks_ref_scale_read_section(const int nloops)
{}

static void rcu_tasks_ref_scale_delay_section(const int nloops, const int udl, const int ndl)
{}

static struct ref_scale_ops rcu_tasks_ops =;

#define RCU_TASKS_OPS

#else // #ifdef CONFIG_TASKS_RCU

#define RCU_TASKS_OPS

#endif // #else // #ifdef CONFIG_TASKS_RCU

#ifdef CONFIG_TASKS_TRACE_RCU

// Definitions for RCU Tasks Trace ref scale testing.
static void rcu_trace_ref_scale_read_section(const int nloops)
{}

static void rcu_trace_ref_scale_delay_section(const int nloops, const int udl, const int ndl)
{}

static struct ref_scale_ops rcu_trace_ops =;

#define RCU_TRACE_OPS

#else // #ifdef CONFIG_TASKS_TRACE_RCU

#define RCU_TRACE_OPS

#endif // #else // #ifdef CONFIG_TASKS_TRACE_RCU

// Definitions for reference count
static atomic_t refcnt;

static void ref_refcnt_section(const int nloops)
{}

static void ref_refcnt_delay_section(const int nloops, const int udl, const int ndl)
{}

static struct ref_scale_ops refcnt_ops =;

// Definitions for rwlock
static rwlock_t test_rwlock;

static bool ref_rwlock_init(void)
{}

static void ref_rwlock_section(const int nloops)
{}

static void ref_rwlock_delay_section(const int nloops, const int udl, const int ndl)
{}

static struct ref_scale_ops rwlock_ops =;

// Definitions for rwsem
static struct rw_semaphore test_rwsem;

static bool ref_rwsem_init(void)
{}

static void ref_rwsem_section(const int nloops)
{}

static void ref_rwsem_delay_section(const int nloops, const int udl, const int ndl)
{}

static struct ref_scale_ops rwsem_ops =;

// Definitions for global spinlock
static DEFINE_RAW_SPINLOCK(test_lock);

static void ref_lock_section(const int nloops)
{}

static void ref_lock_delay_section(const int nloops, const int udl, const int ndl)
{}

static struct ref_scale_ops lock_ops =;

// Definitions for global irq-save spinlock

static void ref_lock_irq_section(const int nloops)
{}

static void ref_lock_irq_delay_section(const int nloops, const int udl, const int ndl)
{}

static struct ref_scale_ops lock_irq_ops =;

// Definitions acquire-release.
static DEFINE_PER_CPU(unsigned long, test_acqrel);

static void ref_acqrel_section(const int nloops)
{}

static void ref_acqrel_delay_section(const int nloops, const int udl, const int ndl)
{}

static struct ref_scale_ops acqrel_ops =;

static volatile u64 stopopts;

static void ref_clock_section(const int nloops)
{}

static void ref_clock_delay_section(const int nloops, const int udl, const int ndl)
{}

static struct ref_scale_ops clock_ops =;

static void ref_jiffies_section(const int nloops)
{}

static void ref_jiffies_delay_section(const int nloops, const int udl, const int ndl)
{}

static struct ref_scale_ops jiffies_ops =;

////////////////////////////////////////////////////////////////////////
//
// Methods leveraging SLAB_TYPESAFE_BY_RCU.
//

// Item to look up in a typesafe manner.  Array of pointers to these.
struct refscale_typesafe {};

static struct kmem_cache *typesafe_kmem_cachep;
static struct refscale_typesafe **rtsarray;
static long rtsarray_size;
static DEFINE_TORTURE_RANDOM_PERCPU(refscale_rand);
static bool (*rts_acquire)(struct refscale_typesafe *rtsp, unsigned int *start);
static bool (*rts_release)(struct refscale_typesafe *rtsp, unsigned int start);

// Conditionally acquire an explicit in-structure reference count.
static bool typesafe_ref_acquire(struct refscale_typesafe *rtsp, unsigned int *start)
{}

// Unconditionally release an explicit in-structure reference count.
static bool typesafe_ref_release(struct refscale_typesafe *rtsp, unsigned int start)
{}

// Unconditionally acquire an explicit in-structure spinlock.
static bool typesafe_lock_acquire(struct refscale_typesafe *rtsp, unsigned int *start)
{}

// Unconditionally release an explicit in-structure spinlock.
static bool typesafe_lock_release(struct refscale_typesafe *rtsp, unsigned int start)
{}

// Unconditionally acquire an explicit in-structure sequence lock.
static bool typesafe_seqlock_acquire(struct refscale_typesafe *rtsp, unsigned int *start)
{}

// Conditionally release an explicit in-structure sequence lock.  Return
// true if this release was successful, that is, if no retry is required.
static bool typesafe_seqlock_release(struct refscale_typesafe *rtsp, unsigned int start)
{}

// Do a read-side critical section with the specified delay in
// microseconds and nanoseconds inserted so as to increase probability
// of failure.
static void typesafe_delay_section(const int nloops, const int udl, const int ndl)
{}

// Because the acquisition and release methods are expensive, there
// is no point in optimizing away the un_delay() function's two checks.
// Thus simply define typesafe_read_section() as a simple wrapper around
// typesafe_delay_section().
static void typesafe_read_section(const int nloops)
{}

// Allocate and initialize one refscale_typesafe structure.
static struct refscale_typesafe *typesafe_alloc_one(void)
{}

// Slab-allocator constructor for refscale_typesafe structures created
// out of a new slab of system memory.
static void refscale_typesafe_ctor(void *rtsp_in)
{}

static struct ref_scale_ops typesafe_ref_ops;
static struct ref_scale_ops typesafe_lock_ops;
static struct ref_scale_ops typesafe_seqlock_ops;

// Initialize for a typesafe test.
static bool typesafe_init(void)
{}

// Clean up after a typesafe test.
static void typesafe_cleanup(void)
{}

// The typesafe_init() function distinguishes these structures by address.
static struct ref_scale_ops typesafe_ref_ops =;

static struct ref_scale_ops typesafe_lock_ops =;

static struct ref_scale_ops typesafe_seqlock_ops =;

static void rcu_scale_one_reader(void)
{}

// Reader kthread.  Repeatedly does empty RCU read-side
// critical section, minimizing update-side interference.
static int
ref_scale_reader(void *arg)
{}

static void reset_readers(void)
{}

// Print the results of each reader and return the sum of all their durations.
static u64 process_durations(int n)
{}

// The main_func is the main orchestrator, it performs a bunch of
// experiments.  For every experiment, it orders all the readers
// involved to start and waits for them to finish the experiment. It
// then reads their timestamps and starts the next experiment. Each
// experiment progresses from 1 concurrent reader to N of them at which
// point all the timestamps are printed.
static int main_func(void *arg)
{}

static void
ref_scale_print_module_parms(struct ref_scale_ops *cur_ops, const char *tag)
{}

static void
ref_scale_cleanup(void)
{}

// Shutdown kthread.  Just waits to be awakened, then shuts down system.
static int
ref_scale_shutdown(void *arg)
{}

static int __init
ref_scale_init(void)
{}

module_init();
module_exit(ref_scale_cleanup);