linux/kernel/events/hw_breakpoint.c

// SPDX-License-Identifier: GPL-2.0+
/*
 * Copyright (C) 2007 Alan Stern
 * Copyright (C) IBM Corporation, 2009
 * Copyright (C) 2009, Frederic Weisbecker <[email protected]>
 *
 * Thanks to Ingo Molnar for his many suggestions.
 *
 * Authors: Alan Stern <[email protected]>
 *          K.Prasad <[email protected]>
 *          Frederic Weisbecker <[email protected]>
 */

/*
 * HW_breakpoint: a unified kernel/user-space hardware breakpoint facility,
 * using the CPU's debug registers.
 * This file contains the arch-independent routines.
 */

#include <linux/hw_breakpoint.h>

#include <linux/atomic.h>
#include <linux/bug.h>
#include <linux/cpu.h>
#include <linux/export.h>
#include <linux/init.h>
#include <linux/irqflags.h>
#include <linux/kdebug.h>
#include <linux/kernel.h>
#include <linux/mutex.h>
#include <linux/notifier.h>
#include <linux/percpu-rwsem.h>
#include <linux/percpu.h>
#include <linux/rhashtable.h>
#include <linux/sched.h>
#include <linux/slab.h>

/*
 * Datastructure to track the total uses of N slots across tasks or CPUs;
 * bp_slots_histogram::count[N] is the number of assigned N+1 breakpoint slots.
 */
struct bp_slots_histogram {};

/*
 * Per-CPU constraints data.
 */
struct bp_cpuinfo {};

static DEFINE_PER_CPU(struct bp_cpuinfo, bp_cpuinfo[TYPE_MAX]);

static struct bp_cpuinfo *get_bp_info(int cpu, enum bp_type_idx type)
{}

/* Number of pinned CPU breakpoints globally. */
static struct bp_slots_histogram cpu_pinned[TYPE_MAX];
/* Number of pinned CPU-independent task breakpoints. */
static struct bp_slots_histogram tsk_pinned_all[TYPE_MAX];

/* Keep track of the breakpoints attached to tasks */
static struct rhltable task_bps_ht;
static const struct rhashtable_params task_bps_ht_params =;

static bool constraints_initialized __ro_after_init;

/*
 * Synchronizes accesses to the per-CPU constraints; the locking rules are:
 *
 *  1. Atomic updates to bp_cpuinfo::tsk_pinned only require a held read-lock
 *     (due to bp_slots_histogram::count being atomic, no update are lost).
 *
 *  2. Holding a write-lock is required for computations that require a
 *     stable snapshot of all bp_cpuinfo::tsk_pinned.
 *
 *  3. In all other cases, non-atomic accesses require the appropriately held
 *     lock (read-lock for read-only accesses; write-lock for reads/writes).
 */
DEFINE_STATIC_PERCPU_RWSEM();

/*
 * Return mutex to serialize accesses to per-task lists in task_bps_ht. Since
 * rhltable synchronizes concurrent insertions/deletions, independent tasks may
 * insert/delete concurrently; therefore, a mutex per task is sufficient.
 *
 * Uses task_struct::perf_event_mutex, to avoid extending task_struct with a
 * hw_breakpoint-only mutex, which may be infrequently used. The caveat here is
 * that hw_breakpoint may contend with per-task perf event list management. The
 * assumption is that perf usecases involving hw_breakpoints are very unlikely
 * to result in unnecessary contention.
 */
static inline struct mutex *get_task_bps_mutex(struct perf_event *bp)
{}

static struct mutex *bp_constraints_lock(struct perf_event *bp)
{}

static void bp_constraints_unlock(struct mutex *tsk_mtx)
{}

static bool bp_constraints_is_locked(struct perf_event *bp)
{}

static inline void assert_bp_constraints_lock_held(struct perf_event *bp)
{}

#ifdef hw_breakpoint_slots
/*
 * Number of breakpoint slots is constant, and the same for all types.
 */
static_assert();
static inline int hw_breakpoint_slots_cached(int type)	{}
static inline int init_breakpoint_slots(void)		{}
#else
/*
 * Dynamic number of breakpoint slots.
 */
static int __nr_bp_slots[TYPE_MAX] __ro_after_init;

static inline int hw_breakpoint_slots_cached(int type)
{
	return __nr_bp_slots[type];
}

static __init bool
bp_slots_histogram_alloc(struct bp_slots_histogram *hist, enum bp_type_idx type)
{
	hist->count = kcalloc(hw_breakpoint_slots_cached(type), sizeof(*hist->count), GFP_KERNEL);
	return hist->count;
}

static __init void bp_slots_histogram_free(struct bp_slots_histogram *hist)
{
	kfree(hist->count);
}

static __init int init_breakpoint_slots(void)
{
	int i, cpu, err_cpu;

	for (i = 0; i < TYPE_MAX; i++)
		__nr_bp_slots[i] = hw_breakpoint_slots(i);

	for_each_possible_cpu(cpu) {
		for (i = 0; i < TYPE_MAX; i++) {
			struct bp_cpuinfo *info = get_bp_info(cpu, i);

			if (!bp_slots_histogram_alloc(&info->tsk_pinned, i))
				goto err;
		}
	}
	for (i = 0; i < TYPE_MAX; i++) {
		if (!bp_slots_histogram_alloc(&cpu_pinned[i], i))
			goto err;
		if (!bp_slots_histogram_alloc(&tsk_pinned_all[i], i))
			goto err;
	}

	return 0;
err:
	for_each_possible_cpu(err_cpu) {
		for (i = 0; i < TYPE_MAX; i++)
			bp_slots_histogram_free(&get_bp_info(err_cpu, i)->tsk_pinned);
		if (err_cpu == cpu)
			break;
	}
	for (i = 0; i < TYPE_MAX; i++) {
		bp_slots_histogram_free(&cpu_pinned[i]);
		bp_slots_histogram_free(&tsk_pinned_all[i]);
	}

	return -ENOMEM;
}
#endif

static inline void
bp_slots_histogram_add(struct bp_slots_histogram *hist, int old, int val)
{}

static int
bp_slots_histogram_max(struct bp_slots_histogram *hist, enum bp_type_idx type)
{}

static int
bp_slots_histogram_max_merge(struct bp_slots_histogram *hist1, struct bp_slots_histogram *hist2,
			     enum bp_type_idx type)
{}

#ifndef hw_breakpoint_weight
static inline int hw_breakpoint_weight(struct perf_event *bp)
{}
#endif

static inline enum bp_type_idx find_slot_idx(u64 bp_type)
{}

/*
 * Return the maximum number of pinned breakpoints a task has in this CPU.
 */
static unsigned int max_task_bp_pinned(int cpu, enum bp_type_idx type)
{}

/*
 * Count the number of breakpoints of the same type and same task.
 * The given event must be not on the list.
 *
 * If @cpu is -1, but the result of task_bp_pinned() is not CPU-independent,
 * returns a negative value.
 */
static int task_bp_pinned(int cpu, struct perf_event *bp, enum bp_type_idx type)
{}

static const struct cpumask *cpumask_of_bp(struct perf_event *bp)
{}

/*
 * Returns the max pinned breakpoint slots in a given
 * CPU (cpu > -1) or across all of them (cpu = -1).
 */
static int
max_bp_pinned_slots(struct perf_event *bp, enum bp_type_idx type)
{}

/*
 * Add/remove the given breakpoint in our constraint table
 */
static int
toggle_bp_slot(struct perf_event *bp, bool enable, enum bp_type_idx type, int weight)
{}

/*
 * Constraints to check before allowing this new breakpoint counter.
 *
 * Note: Flexible breakpoints are currently unimplemented, but outlined in the
 * below algorithm for completeness.  The implementation treats flexible as
 * pinned due to no guarantee that we currently always schedule flexible events
 * before a pinned event in a same CPU.
 *
 *  == Non-pinned counter == (Considered as pinned for now)
 *
 *   - If attached to a single cpu, check:
 *
 *       (per_cpu(info->flexible, cpu) || (per_cpu(info->cpu_pinned, cpu)
 *           + max(per_cpu(info->tsk_pinned, cpu)))) < HBP_NUM
 *
 *       -> If there are already non-pinned counters in this cpu, it means
 *          there is already a free slot for them.
 *          Otherwise, we check that the maximum number of per task
 *          breakpoints (for this cpu) plus the number of per cpu breakpoint
 *          (for this cpu) doesn't cover every registers.
 *
 *   - If attached to every cpus, check:
 *
 *       (per_cpu(info->flexible, *) || (max(per_cpu(info->cpu_pinned, *))
 *           + max(per_cpu(info->tsk_pinned, *)))) < HBP_NUM
 *
 *       -> This is roughly the same, except we check the number of per cpu
 *          bp for every cpu and we keep the max one. Same for the per tasks
 *          breakpoints.
 *
 *
 * == Pinned counter ==
 *
 *   - If attached to a single cpu, check:
 *
 *       ((per_cpu(info->flexible, cpu) > 1) + per_cpu(info->cpu_pinned, cpu)
 *            + max(per_cpu(info->tsk_pinned, cpu))) < HBP_NUM
 *
 *       -> Same checks as before. But now the info->flexible, if any, must keep
 *          one register at least (or they will never be fed).
 *
 *   - If attached to every cpus, check:
 *
 *       ((per_cpu(info->flexible, *) > 1) + max(per_cpu(info->cpu_pinned, *))
 *            + max(per_cpu(info->tsk_pinned, *))) < HBP_NUM
 */
static int __reserve_bp_slot(struct perf_event *bp, u64 bp_type)
{}

int reserve_bp_slot(struct perf_event *bp)
{}

static void __release_bp_slot(struct perf_event *bp, u64 bp_type)
{}

void release_bp_slot(struct perf_event *bp)
{}

static int __modify_bp_slot(struct perf_event *bp, u64 old_type, u64 new_type)
{}

static int modify_bp_slot(struct perf_event *bp, u64 old_type, u64 new_type)
{}

/*
 * Allow the kernel debugger to reserve breakpoint slots without
 * taking a lock using the dbg_* variant of for the reserve and
 * release breakpoint slots.
 */
int dbg_reserve_bp_slot(struct perf_event *bp)
{}

int dbg_release_bp_slot(struct perf_event *bp)
{}

static int hw_breakpoint_parse(struct perf_event *bp,
			       const struct perf_event_attr *attr,
			       struct arch_hw_breakpoint *hw)
{}

int register_perf_hw_breakpoint(struct perf_event *bp)
{}

/**
 * register_user_hw_breakpoint - register a hardware breakpoint for user space
 * @attr: breakpoint attributes
 * @triggered: callback to trigger when we hit the breakpoint
 * @context: context data could be used in the triggered callback
 * @tsk: pointer to 'task_struct' of the process to which the address belongs
 */
struct perf_event *
register_user_hw_breakpoint(struct perf_event_attr *attr,
			    perf_overflow_handler_t triggered,
			    void *context,
			    struct task_struct *tsk)
{}
EXPORT_SYMBOL_GPL();

static void hw_breakpoint_copy_attr(struct perf_event_attr *to,
				    struct perf_event_attr *from)
{}

int
modify_user_hw_breakpoint_check(struct perf_event *bp, struct perf_event_attr *attr,
			        bool check)
{}

/**
 * modify_user_hw_breakpoint - modify a user-space hardware breakpoint
 * @bp: the breakpoint structure to modify
 * @attr: new breakpoint attributes
 */
int modify_user_hw_breakpoint(struct perf_event *bp, struct perf_event_attr *attr)
{}
EXPORT_SYMBOL_GPL();

/**
 * unregister_hw_breakpoint - unregister a user-space hardware breakpoint
 * @bp: the breakpoint structure to unregister
 */
void unregister_hw_breakpoint(struct perf_event *bp)
{}
EXPORT_SYMBOL_GPL();

/**
 * register_wide_hw_breakpoint - register a wide breakpoint in the kernel
 * @attr: breakpoint attributes
 * @triggered: callback to trigger when we hit the breakpoint
 * @context: context data could be used in the triggered callback
 *
 * @return a set of per_cpu pointers to perf events
 */
struct perf_event * __percpu *
register_wide_hw_breakpoint(struct perf_event_attr *attr,
			    perf_overflow_handler_t triggered,
			    void *context)
{}
EXPORT_SYMBOL_GPL();

/**
 * unregister_wide_hw_breakpoint - unregister a wide breakpoint in the kernel
 * @cpu_events: the per cpu set of events to unregister
 */
void unregister_wide_hw_breakpoint(struct perf_event * __percpu *cpu_events)
{}
EXPORT_SYMBOL_GPL();

/**
 * hw_breakpoint_is_used - check if breakpoints are currently used
 *
 * Returns: true if breakpoints are used, false otherwise.
 */
bool hw_breakpoint_is_used(void)
{}

static struct notifier_block hw_breakpoint_exceptions_nb =;

static void bp_perf_event_destroy(struct perf_event *event)
{}

static int hw_breakpoint_event_init(struct perf_event *bp)
{}

static int hw_breakpoint_add(struct perf_event *bp, int flags)
{}

static void hw_breakpoint_del(struct perf_event *bp, int flags)
{}

static void hw_breakpoint_start(struct perf_event *bp, int flags)
{}

static void hw_breakpoint_stop(struct perf_event *bp, int flags)
{}

static struct pmu perf_breakpoint =;

int __init init_hw_breakpoint(void)
{}