#define pr_fmt(fmt) …
#include <linux/atomic.h>
#include <linux/bug.h>
#include <linux/debugfs.h>
#include <linux/hash.h>
#include <linux/irq_work.h>
#include <linux/jhash.h>
#include <linux/kcsan-checks.h>
#include <linux/kfence.h>
#include <linux/kmemleak.h>
#include <linux/list.h>
#include <linux/lockdep.h>
#include <linux/log2.h>
#include <linux/memblock.h>
#include <linux/moduleparam.h>
#include <linux/notifier.h>
#include <linux/panic_notifier.h>
#include <linux/random.h>
#include <linux/rcupdate.h>
#include <linux/sched/clock.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/string.h>
#include <asm/kfence.h>
#include "kfence.h"
#define KFENCE_WARN_ON(cond) …
static bool kfence_enabled __read_mostly;
static bool disabled_by_warn __read_mostly;
unsigned long kfence_sample_interval __read_mostly = …;
EXPORT_SYMBOL_GPL(…);
#ifdef MODULE_PARAM_PREFIX
#undef MODULE_PARAM_PREFIX
#endif
#define MODULE_PARAM_PREFIX …
static int kfence_enable_late(void);
static int param_set_sample_interval(const char *val, const struct kernel_param *kp)
{ … }
static int param_get_sample_interval(char *buffer, const struct kernel_param *kp)
{ … }
static const struct kernel_param_ops sample_interval_param_ops = …;
module_param_cb(…);
static unsigned long kfence_skip_covered_thresh __read_mostly = …;
module_param_named(skip_covered_thresh, kfence_skip_covered_thresh, ulong, 0644);
static unsigned int kfence_burst __read_mostly;
module_param_named(burst, kfence_burst, uint, 0644);
static bool kfence_deferrable __read_mostly = … IS_ENABLED(…);
module_param_named(deferrable, kfence_deferrable, bool, 0444);
static bool kfence_check_on_panic __read_mostly;
module_param_named(check_on_panic, kfence_check_on_panic, bool, 0444);
char *__kfence_pool __read_mostly;
EXPORT_SYMBOL(…);
static_assert(…);
struct kfence_metadata *kfence_metadata __read_mostly;
static struct kfence_metadata *kfence_metadata_init __read_mostly;
static struct list_head kfence_freelist = …;
static DEFINE_RAW_SPINLOCK(kfence_freelist_lock);
DEFINE_STATIC_KEY_FALSE(kfence_allocation_key);
atomic_t kfence_allocation_gate = …;
#define ALLOC_COVERED_HNUM …
#define ALLOC_COVERED_ORDER …
#define ALLOC_COVERED_SIZE …
#define ALLOC_COVERED_HNEXT(h) …
#define ALLOC_COVERED_MASK …
static atomic_t alloc_covered[ALLOC_COVERED_SIZE];
#define UNIQUE_ALLOC_STACK_DEPTH …
static u32 stack_hash_seed __ro_after_init;
enum kfence_counter_id { … };
static atomic_long_t counters[KFENCE_COUNTER_COUNT];
static const char *const counter_names[] = …;
static_assert(…);
static inline bool should_skip_covered(void)
{ … }
static u32 get_alloc_stack_hash(unsigned long *stack_entries, size_t num_entries)
{ … }
static void alloc_covered_add(u32 alloc_stack_hash, int val)
{ … }
static bool alloc_covered_contains(u32 alloc_stack_hash)
{ … }
static bool kfence_protect(unsigned long addr)
{ … }
static bool kfence_unprotect(unsigned long addr)
{ … }
static inline unsigned long metadata_to_pageaddr(const struct kfence_metadata *meta)
{ … }
static inline bool kfence_obj_allocated(const struct kfence_metadata *meta)
{ … }
static noinline void
metadata_update_state(struct kfence_metadata *meta, enum kfence_object_state next,
unsigned long *stack_entries, size_t num_stack_entries)
{ … }
#ifdef CONFIG_KMSAN
#define check_canary_attributes …
#else
#define check_canary_attributes …
#endif
static check_canary_attributes bool check_canary_byte(u8 *addr)
{ … }
static inline void set_canary(const struct kfence_metadata *meta)
{ … }
static check_canary_attributes void
check_canary(const struct kfence_metadata *meta)
{ … }
static void *kfence_guarded_alloc(struct kmem_cache *cache, size_t size, gfp_t gfp,
unsigned long *stack_entries, size_t num_stack_entries,
u32 alloc_stack_hash)
{ … }
static void kfence_guarded_free(void *addr, struct kfence_metadata *meta, bool zombie)
{ … }
static void rcu_guarded_free(struct rcu_head *h)
{ … }
static unsigned long kfence_init_pool(void)
{ … }
static bool __init kfence_init_pool_early(void)
{ … }
static int stats_show(struct seq_file *seq, void *v)
{ … }
DEFINE_SHOW_ATTRIBUTE(…);
static void *start_object(struct seq_file *seq, loff_t *pos)
{ … }
static void stop_object(struct seq_file *seq, void *v)
{ … }
static void *next_object(struct seq_file *seq, void *v, loff_t *pos)
{ … }
static int show_object(struct seq_file *seq, void *v)
{ … }
static const struct seq_operations objects_sops = …;
DEFINE_SEQ_ATTRIBUTE(…);
static int kfence_debugfs_init(void)
{ … }
late_initcall(kfence_debugfs_init);
static void kfence_check_all_canary(void)
{ … }
static int kfence_check_canary_callback(struct notifier_block *nb,
unsigned long reason, void *arg)
{ … }
static struct notifier_block kfence_check_canary_notifier = …;
static struct delayed_work kfence_timer;
#ifdef CONFIG_KFENCE_STATIC_KEYS
static DECLARE_WAIT_QUEUE_HEAD(allocation_wait);
static void wake_up_kfence_timer(struct irq_work *work)
{ … }
static DEFINE_IRQ_WORK(wake_up_kfence_timer_work, wake_up_kfence_timer);
#endif
static void toggle_allocation_gate(struct work_struct *work)
{ … }
void __init kfence_alloc_pool_and_metadata(void)
{ … }
static void kfence_init_enable(void)
{ … }
void __init kfence_init(void)
{ … }
static int kfence_init_late(void)
{ … }
static int kfence_enable_late(void)
{ … }
void kfence_shutdown_cache(struct kmem_cache *s)
{ … }
void *__kfence_alloc(struct kmem_cache *s, size_t size, gfp_t flags)
{ … }
size_t kfence_ksize(const void *addr)
{ … }
void *kfence_object_start(const void *addr)
{ … }
void __kfence_free(void *addr)
{ … }
bool kfence_handle_page_fault(unsigned long addr, bool is_write, struct pt_regs *regs)
{ … }