linux/kernel/kcsan/core.c

// SPDX-License-Identifier: GPL-2.0
/*
 * KCSAN core runtime.
 *
 * Copyright (C) 2019, Google LLC.
 */

#define pr_fmt(fmt)

#include <linux/atomic.h>
#include <linux/bug.h>
#include <linux/delay.h>
#include <linux/export.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/minmax.h>
#include <linux/moduleparam.h>
#include <linux/percpu.h>
#include <linux/preempt.h>
#include <linux/sched.h>
#include <linux/string.h>
#include <linux/uaccess.h>

#include "encoding.h"
#include "kcsan.h"
#include "permissive.h"

static bool kcsan_early_enable = IS_ENABLED();
unsigned int kcsan_udelay_task =;
unsigned int kcsan_udelay_interrupt =;
static long kcsan_skip_watch =;
static bool kcsan_interrupt_watcher = IS_ENABLED();

#ifdef MODULE_PARAM_PREFIX
#undef MODULE_PARAM_PREFIX
#endif
#define MODULE_PARAM_PREFIX
module_param_named(early_enable, kcsan_early_enable, bool, 0);
module_param_named(udelay_task, kcsan_udelay_task, uint, 0644);
module_param_named(udelay_interrupt, kcsan_udelay_interrupt, uint, 0644);
module_param_named(skip_watch, kcsan_skip_watch, long, 0644);
module_param_named(interrupt_watcher, kcsan_interrupt_watcher, bool, 0444);

#ifdef CONFIG_KCSAN_WEAK_MEMORY
static bool kcsan_weak_memory =;
module_param_named(weak_memory, kcsan_weak_memory, bool, 0644);
#else
#define kcsan_weak_memory
#endif

bool kcsan_enabled;

/* Per-CPU kcsan_ctx for interrupts */
static DEFINE_PER_CPU(struct kcsan_ctx, kcsan_cpu_ctx) =;

/*
 * Helper macros to index into adjacent slots, starting from address slot
 * itself, followed by the right and left slots.
 *
 * The purpose is 2-fold:
 *
 *	1. if during insertion the address slot is already occupied, check if
 *	   any adjacent slots are free;
 *	2. accesses that straddle a slot boundary due to size that exceeds a
 *	   slot's range may check adjacent slots if any watchpoint matches.
 *
 * Note that accesses with very large size may still miss a watchpoint; however,
 * given this should be rare, this is a reasonable trade-off to make, since this
 * will avoid:
 *
 *	1. excessive contention between watchpoint checks and setup;
 *	2. larger number of simultaneous watchpoints without sacrificing
 *	   performance.
 *
 * Example: SLOT_IDX values for KCSAN_CHECK_ADJACENT=1, where i is [0, 1, 2]:
 *
 *   slot=0:  [ 1,  2,  0]
 *   slot=9:  [10, 11,  9]
 *   slot=63: [64, 65, 63]
 */
#define SLOT_IDX(slot, i)

/*
 * SLOT_IDX_FAST is used in the fast-path. Not first checking the address's primary
 * slot (middle) is fine if we assume that races occur rarely. The set of
 * indices {SLOT_IDX(slot, i) | i in [0, NUM_SLOTS)} is equivalent to
 * {SLOT_IDX_FAST(slot, i) | i in [0, NUM_SLOTS)}.
 */
#define SLOT_IDX_FAST(slot, i)

/*
 * Watchpoints, with each entry encoded as defined in encoding.h: in order to be
 * able to safely update and access a watchpoint without introducing locking
 * overhead, we encode each watchpoint as a single atomic long. The initial
 * zero-initialized state matches INVALID_WATCHPOINT.
 *
 * Add NUM_SLOTS-1 entries to account for overflow; this helps avoid having to
 * use more complicated SLOT_IDX_FAST calculation with modulo in the fast-path.
 */
static atomic_long_t watchpoints[CONFIG_KCSAN_NUM_WATCHPOINTS + NUM_SLOTS-1];

/*
 * Instructions to skip watching counter, used in should_watch(). We use a
 * per-CPU counter to avoid excessive contention.
 */
static DEFINE_PER_CPU(long, kcsan_skip);

/* For kcsan_prandom_u32_max(). */
static DEFINE_PER_CPU(u32, kcsan_rand_state);

static __always_inline atomic_long_t *find_watchpoint(unsigned long addr,
						      size_t size,
						      bool expect_write,
						      long *encoded_watchpoint)
{}

static inline atomic_long_t *
insert_watchpoint(unsigned long addr, size_t size, bool is_write)
{}

/*
 * Return true if watchpoint was successfully consumed, false otherwise.
 *
 * This may return false if:
 *
 *	1. another thread already consumed the watchpoint;
 *	2. the thread that set up the watchpoint already removed it;
 *	3. the watchpoint was removed and then re-used.
 */
static __always_inline bool
try_consume_watchpoint(atomic_long_t *watchpoint, long encoded_watchpoint)
{}

/* Return true if watchpoint was not touched, false if already consumed. */
static inline bool consume_watchpoint(atomic_long_t *watchpoint)
{}

/* Remove the watchpoint -- its slot may be reused after. */
static inline void remove_watchpoint(atomic_long_t *watchpoint)
{}

static __always_inline struct kcsan_ctx *get_ctx(void)
{}

static __always_inline void
check_access(const volatile void *ptr, size_t size, int type, unsigned long ip);

/* Check scoped accesses; never inline because this is a slow-path! */
static noinline void kcsan_check_scoped_accesses(void)
{}

/* Rules for generic atomic accesses. Called from fast-path. */
static __always_inline bool
is_atomic(struct kcsan_ctx *ctx, const volatile void *ptr, size_t size, int type)
{}

static __always_inline bool
should_watch(struct kcsan_ctx *ctx, const volatile void *ptr, size_t size, int type)
{}

/*
 * Returns a pseudo-random number in interval [0, ep_ro). Simple linear
 * congruential generator, using constants from "Numerical Recipes".
 */
static u32 kcsan_prandom_u32_max(u32 ep_ro)
{}

static inline void reset_kcsan_skip(void)
{}

static __always_inline bool kcsan_is_enabled(struct kcsan_ctx *ctx)
{}

/* Introduce delay depending on context and configuration. */
static void delay_access(int type)
{}

/*
 * Reads the instrumented memory for value change detection; value change
 * detection is currently done for accesses up to a size of 8 bytes.
 */
static __always_inline u64 read_instrumented_memory(const volatile void *ptr, size_t size)
{}

void kcsan_save_irqtrace(struct task_struct *task)
{}

void kcsan_restore_irqtrace(struct task_struct *task)
{}

static __always_inline int get_kcsan_stack_depth(void)
{}

static __always_inline void add_kcsan_stack_depth(int val)
{}

static __always_inline struct kcsan_scoped_access *get_reorder_access(struct kcsan_ctx *ctx)
{}

static __always_inline bool
find_reorder_access(struct kcsan_ctx *ctx, const volatile void *ptr, size_t size,
		    int type, unsigned long ip)
{}

static inline void
set_reorder_access(struct kcsan_ctx *ctx, const volatile void *ptr, size_t size,
		   int type, unsigned long ip)
{}

/*
 * Pull everything together: check_access() below contains the performance
 * critical operations; the fast-path (including check_access) functions should
 * all be inlinable by the instrumentation functions.
 *
 * The slow-path (kcsan_found_watchpoint, kcsan_setup_watchpoint) are
 * non-inlinable -- note that, we prefix these with "kcsan_" to ensure they can
 * be filtered from the stacktrace, as well as give them unique names for the
 * UACCESS whitelist of objtool. Each function uses user_access_save/restore(),
 * since they do not access any user memory, but instrumentation is still
 * emitted in UACCESS regions.
 */

static noinline void kcsan_found_watchpoint(const volatile void *ptr,
					    size_t size,
					    int type,
					    unsigned long ip,
					    atomic_long_t *watchpoint,
					    long encoded_watchpoint)
{}

static noinline void
kcsan_setup_watchpoint(const volatile void *ptr, size_t size, int type, unsigned long ip)
{}

static __always_inline void
check_access(const volatile void *ptr, size_t size, int type, unsigned long ip)
{}

/* === Public interface ===================================================== */

void __init kcsan_init(void)
{}

/* === Exported interface =================================================== */

void kcsan_disable_current(void)
{}
EXPORT_SYMBOL();

void kcsan_enable_current(void)
{}
EXPORT_SYMBOL();

void kcsan_enable_current_nowarn(void)
{}
EXPORT_SYMBOL();

void kcsan_nestable_atomic_begin(void)
{}
EXPORT_SYMBOL();

void kcsan_nestable_atomic_end(void)
{}
EXPORT_SYMBOL();

void kcsan_flat_atomic_begin(void)
{}
EXPORT_SYMBOL();

void kcsan_flat_atomic_end(void)
{}
EXPORT_SYMBOL();

void kcsan_atomic_next(int n)
{}
EXPORT_SYMBOL();

void kcsan_set_access_mask(unsigned long mask)
{}
EXPORT_SYMBOL();

struct kcsan_scoped_access *
kcsan_begin_scoped_access(const volatile void *ptr, size_t size, int type,
			  struct kcsan_scoped_access *sa)
{}
EXPORT_SYMBOL();

void kcsan_end_scoped_access(struct kcsan_scoped_access *sa)
{}
EXPORT_SYMBOL();

void __kcsan_check_access(const volatile void *ptr, size_t size, int type)
{}
EXPORT_SYMBOL();

#define DEFINE_MEMORY_BARRIER(name, order_before_cond)

DEFINE_MEMORY_BARRIER();
DEFINE_MEMORY_BARRIER();
DEFINE_MEMORY_BARRIER();
DEFINE_MEMORY_BARRIER();

/*
 * KCSAN uses the same instrumentation that is emitted by supported compilers
 * for ThreadSanitizer (TSAN).
 *
 * When enabled, the compiler emits instrumentation calls (the functions
 * prefixed with "__tsan" below) for all loads and stores that it generated;
 * inline asm is not instrumented.
 *
 * Note that, not all supported compiler versions distinguish aligned/unaligned
 * accesses, but e.g. recent versions of Clang do. We simply alias the unaligned
 * version to the generic version, which can handle both.
 */

#define DEFINE_TSAN_READ_WRITE(size)

DEFINE_TSAN_READ_WRITE();
DEFINE_TSAN_READ_WRITE();
DEFINE_TSAN_READ_WRITE();
DEFINE_TSAN_READ_WRITE();
DEFINE_TSAN_READ_WRITE();

void __tsan_read_range(void *ptr, size_t size);
void __tsan_read_range(void *ptr, size_t size)
{}
EXPORT_SYMBOL();

void __tsan_write_range(void *ptr, size_t size);
void __tsan_write_range(void *ptr, size_t size)
{}
EXPORT_SYMBOL();

/*
 * Use of explicit volatile is generally disallowed [1], however, volatile is
 * still used in various concurrent context, whether in low-level
 * synchronization primitives or for legacy reasons.
 * [1] https://lwn.net/Articles/233479/
 *
 * We only consider volatile accesses atomic if they are aligned and would pass
 * the size-check of compiletime_assert_rwonce_type().
 */
#define DEFINE_TSAN_VOLATILE_READ_WRITE(size)

DEFINE_TSAN_VOLATILE_READ_WRITE();
DEFINE_TSAN_VOLATILE_READ_WRITE();
DEFINE_TSAN_VOLATILE_READ_WRITE();
DEFINE_TSAN_VOLATILE_READ_WRITE();
DEFINE_TSAN_VOLATILE_READ_WRITE();

/*
 * Function entry and exit are used to determine the validty of reorder_access.
 * Reordering of the access ends at the end of the function scope where the
 * access happened. This is done for two reasons:
 *
 *	1. Artificially limits the scope where missing barriers are detected.
 *	   This minimizes false positives due to uninstrumented functions that
 *	   contain the required barriers but were missed.
 *
 *	2. Simplifies generating the stack trace of the access.
 */
void __tsan_func_entry(void *call_pc);
noinline void __tsan_func_entry(void *call_pc)
{}
EXPORT_SYMBOL();

void __tsan_func_exit(void);
noinline void __tsan_func_exit(void)
{}
EXPORT_SYMBOL();

void __tsan_init(void);
void __tsan_init(void)
{}
EXPORT_SYMBOL();

/*
 * Instrumentation for atomic builtins (__atomic_*, __sync_*).
 *
 * Normal kernel code _should not_ be using them directly, but some
 * architectures may implement some or all atomics using the compilers'
 * builtins.
 *
 * Note: If an architecture decides to fully implement atomics using the
 * builtins, because they are implicitly instrumented by KCSAN (and KASAN,
 * etc.), implementing the ARCH_ATOMIC interface (to get instrumentation via
 * atomic-instrumented) is no longer necessary.
 *
 * TSAN instrumentation replaces atomic accesses with calls to any of the below
 * functions, whose job is to also execute the operation itself.
 */

static __always_inline void kcsan_atomic_builtin_memorder(int memorder)
{}

#define DEFINE_TSAN_ATOMIC_LOAD_STORE(bits)

#define DEFINE_TSAN_ATOMIC_RMW(op, bits, suffix)

/*
 * Note: CAS operations are always classified as write, even in case they
 * fail. We cannot perform check_access() after a write, as it might lead to
 * false positives, in cases such as:
 *
 *	T0: __atomic_compare_exchange_n(&p->flag, &old, 1, ...)
 *
 *	T1: if (__atomic_load_n(&p->flag, ...)) {
 *		modify *p;
 *		p->flag = 0;
 *	    }
 *
 * The only downside is that, if there are 3 threads, with one CAS that
 * succeeds, another CAS that fails, and an unmarked racing operation, we may
 * point at the wrong CAS as the source of the race. However, if we assume that
 * all CAS can succeed in some other execution, the data race is still valid.
 */
#define DEFINE_TSAN_ATOMIC_CMPXCHG(bits, strength, weak)

#define DEFINE_TSAN_ATOMIC_CMPXCHG_VAL(bits)

#define DEFINE_TSAN_ATOMIC_OPS(bits)

DEFINE_TSAN_ATOMIC_OPS();
DEFINE_TSAN_ATOMIC_OPS();
DEFINE_TSAN_ATOMIC_OPS();
#ifdef CONFIG_64BIT
DEFINE_TSAN_ATOMIC_OPS();
#endif

void __tsan_atomic_thread_fence(int memorder);
void __tsan_atomic_thread_fence(int memorder)
{}
EXPORT_SYMBOL();

/*
 * In instrumented files, we emit instrumentation for barriers by mapping the
 * kernel barriers to an __atomic_signal_fence(), which is interpreted specially
 * and otherwise has no relation to a real __atomic_signal_fence(). No known
 * kernel code uses __atomic_signal_fence().
 *
 * Since fsanitize=thread instrumentation handles __atomic_signal_fence(), which
 * are turned into calls to __tsan_atomic_signal_fence(), such instrumentation
 * can be disabled via the __no_kcsan function attribute (vs. an explicit call
 * which could not). When __no_kcsan is requested, __atomic_signal_fence()
 * generates no code.
 *
 * Note: The result of using __atomic_signal_fence() with KCSAN enabled is
 * potentially limiting the compiler's ability to reorder operations; however,
 * if barriers were instrumented with explicit calls (without LTO), the compiler
 * couldn't optimize much anyway. The result of a hypothetical architecture
 * using __atomic_signal_fence() in normal code would be KCSAN false negatives.
 */
void __tsan_atomic_signal_fence(int memorder);
noinline void __tsan_atomic_signal_fence(int memorder)
{}
EXPORT_SYMBOL();

#ifdef __HAVE_ARCH_MEMSET
void *__tsan_memset(void *s, int c, size_t count);
noinline void *__tsan_memset(void *s, int c, size_t count)
{}
#else
void *__tsan_memset(void *s, int c, size_t count) __alias(memset);
#endif
EXPORT_SYMBOL();

#ifdef __HAVE_ARCH_MEMMOVE
void *__tsan_memmove(void *dst, const void *src, size_t len);
noinline void *__tsan_memmove(void *dst, const void *src, size_t len)
{}
#else
void *__tsan_memmove(void *dst, const void *src, size_t len) __alias(memmove);
#endif
EXPORT_SYMBOL();

#ifdef __HAVE_ARCH_MEMCPY
void *__tsan_memcpy(void *dst, const void *src, size_t len);
noinline void *__tsan_memcpy(void *dst, const void *src, size_t len)
{}
#else
void *__tsan_memcpy(void *dst, const void *src, size_t len) __alias(memcpy);
#endif
EXPORT_SYMBOL();