linux/kernel/kcsan/kcsan_test.c

// SPDX-License-Identifier: GPL-2.0
/*
 * KCSAN test with various race scenarious to test runtime behaviour. Since the
 * interface with which KCSAN's reports are obtained is via the console, this is
 * the output we should verify. For each test case checks the presence (or
 * absence) of generated reports. Relies on 'console' tracepoint to capture
 * reports as they appear in the kernel log.
 *
 * Makes use of KUnit for test organization, and the Torture framework for test
 * thread control.
 *
 * Copyright (C) 2020, Google LLC.
 * Author: Marco Elver <[email protected]>
 */

#define pr_fmt(fmt)

#include <kunit/test.h>
#include <linux/atomic.h>
#include <linux/bitops.h>
#include <linux/jiffies.h>
#include <linux/kcsan-checks.h>
#include <linux/kernel.h>
#include <linux/mutex.h>
#include <linux/sched.h>
#include <linux/seqlock.h>
#include <linux/spinlock.h>
#include <linux/string.h>
#include <linux/timer.h>
#include <linux/torture.h>
#include <linux/tracepoint.h>
#include <linux/types.h>
#include <trace/events/printk.h>

#define KCSAN_TEST_REQUIRES(test, cond)

#ifdef CONFIG_CC_HAS_TSAN_COMPOUND_READ_BEFORE_WRITE
#define __KCSAN_ACCESS_RW(alt)
#else
#define __KCSAN_ACCESS_RW
#endif

/* Points to current test-case memory access "kernels". */
static void (*access_kernels[2])(void);

static struct task_struct **threads; /* Lists of threads. */
static unsigned long end_time;       /* End time of test. */

/* Report as observed from console. */
static struct {} observed =;

/* Setup test checking loop. */
static __no_kcsan inline void
begin_test_checks(void (*func1)(void), void (*func2)(void))
{}

/* End test checking loop. */
static __no_kcsan inline bool
end_test_checks(bool stop)
{}

/*
 * Probe for console output: checks if a race was reported, and obtains observed
 * lines of interest.
 */
__no_kcsan
static void probe_console(void *ignore, const char *buf, size_t len)
{}

/* Check if a report related to the test exists. */
__no_kcsan
static bool report_available(void)
{}

/* Report information we expect in a report. */
struct expect_report {};

/* Check observed report matches information in @r. */
__no_kcsan
static bool __report_matches(const struct expect_report *r)
{}

static __always_inline const struct expect_report *
__report_set_scoped(struct expect_report *r, int accesses)
{}

__no_kcsan
static bool report_matches_any_reordered(struct expect_report *r)
{}

#ifdef CONFIG_KCSAN_WEAK_MEMORY
/* Due to reordering accesses, any access may appear as "(reordered)". */
#define report_matches
#else
#define report_matches
#endif

/* ===== Test kernels ===== */

static long test_sink;
static long test_var;
/* @test_array should be large enough to fall into multiple watchpoint slots. */
static long test_array[3 * PAGE_SIZE / sizeof(long)];
static struct {} test_struct;
static long __data_racy test_data_racy;
static DEFINE_SEQLOCK(test_seqlock);
static DEFINE_SPINLOCK(test_spinlock);
static DEFINE_MUTEX(test_mutex);

/*
 * Helper to avoid compiler optimizing out reads, and to generate source values
 * for writes.
 */
__no_kcsan
static noinline void sink_value(long v) {}

/*
 * Generates a delay and some accesses that enter the runtime but do not produce
 * data races.
 */
static noinline void test_delay(int iter)
{}

static noinline void test_kernel_read(void) {}

static noinline void test_kernel_write(void)
{}

static noinline void test_kernel_write_nochange(void) {}

/* Suffixed by value-change exception filter. */
static noinline void test_kernel_write_nochange_rcu(void) {}

static noinline void test_kernel_read_atomic(void)
{}

static noinline void test_kernel_write_atomic(void)
{}

static noinline void test_kernel_atomic_rmw(void)
{}

__no_kcsan
static noinline void test_kernel_write_uninstrumented(void) {}

static noinline void test_kernel_data_race(void) {}

static noinline void test_kernel_data_racy_qualifier(void) {}

static noinline void test_kernel_assert_writer(void)
{}

static noinline void test_kernel_assert_access(void)
{}

#define TEST_CHANGE_BITS

static noinline void test_kernel_change_bits(void)
{}

static noinline void test_kernel_assert_bits_change(void)
{}

static noinline void test_kernel_assert_bits_nochange(void)
{}

/*
 * Scoped assertions do trigger anywhere in scope. However, the report should
 * still only point at the start of the scope.
 */
static noinline void test_enter_scope(void)
{}

static noinline void test_kernel_assert_writer_scoped(void)
{}

static noinline void test_kernel_assert_access_scoped(void)
{}

static noinline void test_kernel_rmw_array(void)
{}

static noinline void test_kernel_write_struct(void)
{}

static noinline void test_kernel_write_struct_part(void)
{}

static noinline void test_kernel_read_struct_zero_size(void)
{}

static noinline void test_kernel_jiffies_reader(void)
{}

static noinline void test_kernel_seqlock_reader(void)
{}

static noinline void test_kernel_seqlock_writer(void)
{}

static noinline void test_kernel_atomic_builtins(void)
{}

static noinline void test_kernel_xor_1bit(void)
{}

#define TEST_KERNEL_LOCKED(name, acquire, release)

TEST_KERNEL_LOCKED(with_memorder,
		   cmpxchg_acquire(flag, 0, 1) == 0,
		   smp_store_release(flag, 0));
TEST_KERNEL_LOCKED(wrong_memorder,
		   cmpxchg_relaxed(flag, 0, 1) == 0,
		   WRITE_ONCE(*flag, 0));
TEST_KERNEL_LOCKED(atomic_builtin_with_memorder,
		   __atomic_compare_exchange_n(flag, &v, 1, 0, __ATOMIC_ACQUIRE, __ATOMIC_RELAXED),
		   __atomic_store_n(flag, 0, __ATOMIC_RELEASE));
TEST_KERNEL_LOCKED(atomic_builtin_wrong_memorder,
		   __atomic_compare_exchange_n(flag, &v, 1, 0, __ATOMIC_RELAXED, __ATOMIC_RELAXED),
		   __atomic_store_n(flag, 0, __ATOMIC_RELAXED));

/* ===== Test cases ===== */

/*
 * Tests that various barriers have the expected effect on internal state. Not
 * exhaustive on atomic_t operations. Unlike the selftest, also checks for
 * too-strict barrier instrumentation; these can be tolerated, because it does
 * not cause false positives, but at least we should be aware of such cases.
 */
static void test_barrier_nothreads(struct kunit *test)
{}

/* Simple test with normal data race. */
__no_kcsan
static void test_basic(struct kunit *test)
{}

/*
 * Stress KCSAN with lots of concurrent races on different addresses until
 * timeout.
 */
__no_kcsan
static void test_concurrent_races(struct kunit *test)
{}

/* Test the KCSAN_REPORT_VALUE_CHANGE_ONLY option. */
__no_kcsan
static void test_novalue_change(struct kunit *test)
{}

/*
 * Test that the rules where the KCSAN_REPORT_VALUE_CHANGE_ONLY option should
 * never apply work.
 */
__no_kcsan
static void test_novalue_change_exception(struct kunit *test)
{}

/* Test that data races of unknown origin are reported. */
__no_kcsan
static void test_unknown_origin(struct kunit *test)
{}

/* Test KCSAN_ASSUME_PLAIN_WRITES_ATOMIC if it is selected. */
__no_kcsan
static void test_write_write_assume_atomic(struct kunit *test)
{}

/*
 * Test that data races with writes larger than word-size are always reported,
 * even if KCSAN_ASSUME_PLAIN_WRITES_ATOMIC is selected.
 */
__no_kcsan
static void test_write_write_struct(struct kunit *test)
{}

/*
 * Test that data races where only one write is larger than word-size are always
 * reported, even if KCSAN_ASSUME_PLAIN_WRITES_ATOMIC is selected.
 */
__no_kcsan
static void test_write_write_struct_part(struct kunit *test)
{}

/* Test that races with atomic accesses never result in reports. */
__no_kcsan
static void test_read_atomic_write_atomic(struct kunit *test)
{}

/* Test that a race with an atomic and plain access result in reports. */
__no_kcsan
static void test_read_plain_atomic_write(struct kunit *test)
{}

/* Test that atomic RMWs generate correct report. */
__no_kcsan
static void test_read_plain_atomic_rmw(struct kunit *test)
{}

/* Zero-sized accesses should never cause data race reports. */
__no_kcsan
static void test_zero_size_access(struct kunit *test)
{}

/* Test the data_race() macro. */
__no_kcsan
static void test_data_race(struct kunit *test)
{}

/* Test the __data_racy type qualifier. */
__no_kcsan
static void test_data_racy_qualifier(struct kunit *test)
{}

__no_kcsan
static void test_assert_exclusive_writer(struct kunit *test)
{}

__no_kcsan
static void test_assert_exclusive_access(struct kunit *test)
{}

__no_kcsan
static void test_assert_exclusive_access_writer(struct kunit *test)
{}

__no_kcsan
static void test_assert_exclusive_bits_change(struct kunit *test)
{}

__no_kcsan
static void test_assert_exclusive_bits_nochange(struct kunit *test)
{}

__no_kcsan
static void test_assert_exclusive_writer_scoped(struct kunit *test)
{}

__no_kcsan
static void test_assert_exclusive_access_scoped(struct kunit *test)
{}

/*
 * jiffies is special (declared to be volatile) and its accesses are typically
 * not marked; this test ensures that the compiler nor KCSAN gets confused about
 * jiffies's declaration on different architectures.
 */
__no_kcsan
static void test_jiffies_noreport(struct kunit *test)
{}

/* Test that racing accesses in seqlock critical sections are not reported. */
__no_kcsan
static void test_seqlock_noreport(struct kunit *test)
{}

/*
 * Test atomic builtins work and required instrumentation functions exist. We
 * also test that KCSAN understands they're atomic by racing with them via
 * test_kernel_atomic_builtins(), and expect no reports.
 *
 * The atomic builtins _SHOULD NOT_ be used in normal kernel code!
 */
static void test_atomic_builtins(struct kunit *test)
{}

__no_kcsan
static void test_1bit_value_change(struct kunit *test)
{}

__no_kcsan
static void test_correct_barrier(struct kunit *test)
{}

__no_kcsan
static void test_missing_barrier(struct kunit *test)
{}

__no_kcsan
static void test_atomic_builtins_correct_barrier(struct kunit *test)
{}

__no_kcsan
static void test_atomic_builtins_missing_barrier(struct kunit *test)
{}

/*
 * Generate thread counts for all test cases. Values generated are in interval
 * [2, 5] followed by exponentially increasing thread counts from 8 to 32.
 *
 * The thread counts are chosen to cover potentially interesting boundaries and
 * corner cases (2 to 5), and then stress the system with larger counts.
 */
static const void *nthreads_gen_params(const void *prev, char *desc)
{}

#define KCSAN_KUNIT_CASE(test_name)
static struct kunit_case kcsan_test_cases[] =;

/* ===== End test cases ===== */

/* Concurrent accesses from interrupts. */
__no_kcsan
static void access_thread_timer(struct timer_list *timer)
{}

/* The main loop for each thread. */
__no_kcsan
static int access_thread(void *arg)
{}

__no_kcsan
static int test_init(struct kunit *test)
{}

__no_kcsan
static void test_exit(struct kunit *test)
{}

__no_kcsan
static void register_tracepoints(void)
{}

__no_kcsan
static void unregister_tracepoints(void)
{}

static int kcsan_suite_init(struct kunit_suite *suite)
{}

static void kcsan_suite_exit(struct kunit_suite *suite)
{}

static struct kunit_suite kcsan_test_suite =;

kunit_test_suites();

MODULE_DESCRIPTION();
MODULE_LICENSE();
MODULE_AUTHOR();