#include <linux/hw_breakpoint.h>
#include <linux/atomic.h>
#include <linux/bug.h>
#include <linux/cpu.h>
#include <linux/export.h>
#include <linux/init.h>
#include <linux/irqflags.h>
#include <linux/kdebug.h>
#include <linux/kernel.h>
#include <linux/mutex.h>
#include <linux/notifier.h>
#include <linux/percpu-rwsem.h>
#include <linux/percpu.h>
#include <linux/rhashtable.h>
#include <linux/sched.h>
#include <linux/slab.h>
struct bp_slots_histogram { … };
struct bp_cpuinfo { … };
static DEFINE_PER_CPU(struct bp_cpuinfo, bp_cpuinfo[TYPE_MAX]);
static struct bp_cpuinfo *get_bp_info(int cpu, enum bp_type_idx type)
{ … }
static struct bp_slots_histogram cpu_pinned[TYPE_MAX];
static struct bp_slots_histogram tsk_pinned_all[TYPE_MAX];
static struct rhltable task_bps_ht;
static const struct rhashtable_params task_bps_ht_params = …;
static bool constraints_initialized __ro_after_init;
DEFINE_STATIC_PERCPU_RWSEM(…);
static inline struct mutex *get_task_bps_mutex(struct perf_event *bp)
{ … }
static struct mutex *bp_constraints_lock(struct perf_event *bp)
{ … }
static void bp_constraints_unlock(struct mutex *tsk_mtx)
{ … }
static bool bp_constraints_is_locked(struct perf_event *bp)
{ … }
static inline void assert_bp_constraints_lock_held(struct perf_event *bp)
{ … }
#ifdef hw_breakpoint_slots
static_assert(…);
static inline int hw_breakpoint_slots_cached(int type) { … }
static inline int init_breakpoint_slots(void) { … }
#else
static int __nr_bp_slots[TYPE_MAX] __ro_after_init;
static inline int hw_breakpoint_slots_cached(int type)
{
return __nr_bp_slots[type];
}
static __init bool
bp_slots_histogram_alloc(struct bp_slots_histogram *hist, enum bp_type_idx type)
{
hist->count = kcalloc(hw_breakpoint_slots_cached(type), sizeof(*hist->count), GFP_KERNEL);
return hist->count;
}
static __init void bp_slots_histogram_free(struct bp_slots_histogram *hist)
{
kfree(hist->count);
}
static __init int init_breakpoint_slots(void)
{
int i, cpu, err_cpu;
for (i = 0; i < TYPE_MAX; i++)
__nr_bp_slots[i] = hw_breakpoint_slots(i);
for_each_possible_cpu(cpu) {
for (i = 0; i < TYPE_MAX; i++) {
struct bp_cpuinfo *info = get_bp_info(cpu, i);
if (!bp_slots_histogram_alloc(&info->tsk_pinned, i))
goto err;
}
}
for (i = 0; i < TYPE_MAX; i++) {
if (!bp_slots_histogram_alloc(&cpu_pinned[i], i))
goto err;
if (!bp_slots_histogram_alloc(&tsk_pinned_all[i], i))
goto err;
}
return 0;
err:
for_each_possible_cpu(err_cpu) {
for (i = 0; i < TYPE_MAX; i++)
bp_slots_histogram_free(&get_bp_info(err_cpu, i)->tsk_pinned);
if (err_cpu == cpu)
break;
}
for (i = 0; i < TYPE_MAX; i++) {
bp_slots_histogram_free(&cpu_pinned[i]);
bp_slots_histogram_free(&tsk_pinned_all[i]);
}
return -ENOMEM;
}
#endif
static inline void
bp_slots_histogram_add(struct bp_slots_histogram *hist, int old, int val)
{ … }
static int
bp_slots_histogram_max(struct bp_slots_histogram *hist, enum bp_type_idx type)
{ … }
static int
bp_slots_histogram_max_merge(struct bp_slots_histogram *hist1, struct bp_slots_histogram *hist2,
enum bp_type_idx type)
{ … }
#ifndef hw_breakpoint_weight
static inline int hw_breakpoint_weight(struct perf_event *bp)
{ … }
#endif
static inline enum bp_type_idx find_slot_idx(u64 bp_type)
{ … }
static unsigned int max_task_bp_pinned(int cpu, enum bp_type_idx type)
{ … }
static int task_bp_pinned(int cpu, struct perf_event *bp, enum bp_type_idx type)
{ … }
static const struct cpumask *cpumask_of_bp(struct perf_event *bp)
{ … }
static int
max_bp_pinned_slots(struct perf_event *bp, enum bp_type_idx type)
{ … }
static int
toggle_bp_slot(struct perf_event *bp, bool enable, enum bp_type_idx type, int weight)
{ … }
static int __reserve_bp_slot(struct perf_event *bp, u64 bp_type)
{ … }
int reserve_bp_slot(struct perf_event *bp)
{ … }
static void __release_bp_slot(struct perf_event *bp, u64 bp_type)
{ … }
void release_bp_slot(struct perf_event *bp)
{ … }
static int __modify_bp_slot(struct perf_event *bp, u64 old_type, u64 new_type)
{ … }
static int modify_bp_slot(struct perf_event *bp, u64 old_type, u64 new_type)
{ … }
int dbg_reserve_bp_slot(struct perf_event *bp)
{ … }
int dbg_release_bp_slot(struct perf_event *bp)
{ … }
static int hw_breakpoint_parse(struct perf_event *bp,
const struct perf_event_attr *attr,
struct arch_hw_breakpoint *hw)
{ … }
int register_perf_hw_breakpoint(struct perf_event *bp)
{ … }
struct perf_event *
register_user_hw_breakpoint(struct perf_event_attr *attr,
perf_overflow_handler_t triggered,
void *context,
struct task_struct *tsk)
{ … }
EXPORT_SYMBOL_GPL(…);
static void hw_breakpoint_copy_attr(struct perf_event_attr *to,
struct perf_event_attr *from)
{ … }
int
modify_user_hw_breakpoint_check(struct perf_event *bp, struct perf_event_attr *attr,
bool check)
{ … }
int modify_user_hw_breakpoint(struct perf_event *bp, struct perf_event_attr *attr)
{ … }
EXPORT_SYMBOL_GPL(…);
void unregister_hw_breakpoint(struct perf_event *bp)
{ … }
EXPORT_SYMBOL_GPL(…);
struct perf_event * __percpu *
register_wide_hw_breakpoint(struct perf_event_attr *attr,
perf_overflow_handler_t triggered,
void *context)
{ … }
EXPORT_SYMBOL_GPL(…);
void unregister_wide_hw_breakpoint(struct perf_event * __percpu *cpu_events)
{ … }
EXPORT_SYMBOL_GPL(…);
bool hw_breakpoint_is_used(void)
{ … }
static struct notifier_block hw_breakpoint_exceptions_nb = …;
static void bp_perf_event_destroy(struct perf_event *event)
{ … }
static int hw_breakpoint_event_init(struct perf_event *bp)
{ … }
static int hw_breakpoint_add(struct perf_event *bp, int flags)
{ … }
static void hw_breakpoint_del(struct perf_event *bp, int flags)
{ … }
static void hw_breakpoint_start(struct perf_event *bp, int flags)
{ … }
static void hw_breakpoint_stop(struct perf_event *bp, int flags)
{ … }
static struct pmu perf_breakpoint = …;
int __init init_hw_breakpoint(void)
{ … }