linux/arch/x86/events/core.c

/*
 * Performance events x86 architecture code
 *
 *  Copyright (C) 2008 Thomas Gleixner <[email protected]>
 *  Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
 *  Copyright (C) 2009 Jaswinder Singh Rajput
 *  Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter
 *  Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra
 *  Copyright (C) 2009 Intel Corporation, <[email protected]>
 *  Copyright (C) 2009 Google, Inc., Stephane Eranian
 *
 *  For licencing details see kernel-base/COPYING
 */

#include <linux/perf_event.h>
#include <linux/capability.h>
#include <linux/notifier.h>
#include <linux/hardirq.h>
#include <linux/kprobes.h>
#include <linux/export.h>
#include <linux/init.h>
#include <linux/kdebug.h>
#include <linux/sched/mm.h>
#include <linux/sched/clock.h>
#include <linux/uaccess.h>
#include <linux/slab.h>
#include <linux/cpu.h>
#include <linux/bitops.h>
#include <linux/device.h>
#include <linux/nospec.h>
#include <linux/static_call.h>

#include <asm/apic.h>
#include <asm/stacktrace.h>
#include <asm/nmi.h>
#include <asm/smp.h>
#include <asm/alternative.h>
#include <asm/mmu_context.h>
#include <asm/tlbflush.h>
#include <asm/timer.h>
#include <asm/desc.h>
#include <asm/ldt.h>
#include <asm/unwind.h>

#include "perf_event.h"

struct x86_pmu x86_pmu __read_mostly;
static struct pmu pmu;

DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) =;

DEFINE_STATIC_KEY_FALSE(rdpmc_never_available_key);
DEFINE_STATIC_KEY_FALSE(rdpmc_always_available_key);
DEFINE_STATIC_KEY_FALSE(perf_is_hybrid);

/*
 * This here uses DEFINE_STATIC_CALL_NULL() to get a static_call defined
 * from just a typename, as opposed to an actual function.
 */
DEFINE_STATIC_CALL_NULL();
DEFINE_STATIC_CALL_NULL();
DEFINE_STATIC_CALL_NULL();
DEFINE_STATIC_CALL_NULL();
DEFINE_STATIC_CALL_NULL();

DEFINE_STATIC_CALL_NULL();

DEFINE_STATIC_CALL_NULL();
DEFINE_STATIC_CALL_NULL();
DEFINE_STATIC_CALL_NULL();

DEFINE_STATIC_CALL_NULL();
DEFINE_STATIC_CALL_NULL();
DEFINE_STATIC_CALL_NULL();

DEFINE_STATIC_CALL_NULL();
DEFINE_STATIC_CALL_NULL();
DEFINE_STATIC_CALL_NULL();

DEFINE_STATIC_CALL_NULL();
DEFINE_STATIC_CALL_NULL();
DEFINE_STATIC_CALL_NULL();

DEFINE_STATIC_CALL_NULL();
DEFINE_STATIC_CALL_NULL();

DEFINE_STATIC_CALL_NULL();
DEFINE_STATIC_CALL_NULL();

DEFINE_STATIC_CALL_NULL();

/*
 * This one is magic, it will get called even when PMU init fails (because
 * there is no PMU), in which case it should simply return NULL.
 */
DEFINE_STATIC_CALL_RET0();

u64 __read_mostly hw_cache_event_ids
				[PERF_COUNT_HW_CACHE_MAX]
				[PERF_COUNT_HW_CACHE_OP_MAX]
				[PERF_COUNT_HW_CACHE_RESULT_MAX];
u64 __read_mostly hw_cache_extra_regs
				[PERF_COUNT_HW_CACHE_MAX]
				[PERF_COUNT_HW_CACHE_OP_MAX]
				[PERF_COUNT_HW_CACHE_RESULT_MAX];

/*
 * Propagate event elapsed time into the generic event.
 * Can only be executed on the CPU where the event is active.
 * Returns the delta events processed.
 */
u64 x86_perf_event_update(struct perf_event *event)
{}

/*
 * Find and validate any extra registers to set up.
 */
static int x86_pmu_extra_regs(u64 config, struct perf_event *event)
{}

static atomic_t active_events;
static atomic_t pmc_refcount;
static DEFINE_MUTEX(pmc_reserve_mutex);

#ifdef CONFIG_X86_LOCAL_APIC

static inline u64 get_possible_counter_mask(void)
{}

static bool reserve_pmc_hardware(void)
{}

static void release_pmc_hardware(void)
{}

#else

static bool reserve_pmc_hardware(void) { return true; }
static void release_pmc_hardware(void) {}

#endif

bool check_hw_exists(struct pmu *pmu, unsigned long *cntr_mask,
		     unsigned long *fixed_cntr_mask)
{}

static void hw_perf_event_destroy(struct perf_event *event)
{}

void hw_perf_lbr_event_destroy(struct perf_event *event)
{}

static inline int x86_pmu_initialized(void)
{}

static inline int
set_ext_hw_attr(struct hw_perf_event *hwc, struct perf_event *event)
{}

int x86_reserve_hardware(void)
{}

void x86_release_hardware(void)
{}

/*
 * Check if we can create event of a certain type (that no conflicting events
 * are present).
 */
int x86_add_exclusive(unsigned int what)
{}

void x86_del_exclusive(unsigned int what)
{}

int x86_setup_perfctr(struct perf_event *event)
{}

/*
 * check that branch_sample_type is compatible with
 * settings needed for precise_ip > 1 which implies
 * using the LBR to capture ALL taken branches at the
 * priv levels of the measurement
 */
static inline int precise_br_compat(struct perf_event *event)
{}

int x86_pmu_max_precise(void)
{}

int x86_pmu_hw_config(struct perf_event *event)
{}

/*
 * Setup the hardware configuration for a given attr_type
 */
static int __x86_pmu_event_init(struct perf_event *event)
{}

void x86_pmu_disable_all(void)
{}

struct perf_guest_switch_msr *perf_guest_get_msrs(int *nr, void *data)
{}
EXPORT_SYMBOL_GPL();

/*
 * There may be PMI landing after enabled=0. The PMI hitting could be before or
 * after disable_all.
 *
 * If PMI hits before disable_all, the PMU will be disabled in the NMI handler.
 * It will not be re-enabled in the NMI handler again, because enabled=0. After
 * handling the NMI, disable_all will be called, which will not change the
 * state either. If PMI hits after disable_all, the PMU is already disabled
 * before entering NMI handler. The NMI handler will not change the state
 * either.
 *
 * So either situation is harmless.
 */
static void x86_pmu_disable(struct pmu *pmu)
{}

void x86_pmu_enable_all(int added)
{}

static inline int is_x86_event(struct perf_event *event)
{}

struct pmu *x86_get_pmu(unsigned int cpu)
{}
/*
 * Event scheduler state:
 *
 * Assign events iterating over all events and counters, beginning
 * with events with least weights first. Keep the current iterator
 * state in struct sched_state.
 */
struct sched_state {};

/* Total max is X86_PMC_IDX_MAX, but we are O(n!) limited */
#define SCHED_STATES_MAX

struct perf_sched {};

/*
 * Initialize iterator that runs through all events and counters.
 */
static void perf_sched_init(struct perf_sched *sched, struct event_constraint **constraints,
			    int num, int wmin, int wmax, int gpmax)
{}

static void perf_sched_save_state(struct perf_sched *sched)
{}

static bool perf_sched_restore_state(struct perf_sched *sched)
{}

/*
 * Select a counter for the current event to schedule. Return true on
 * success.
 */
static bool __perf_sched_find_counter(struct perf_sched *sched)
{}

static bool perf_sched_find_counter(struct perf_sched *sched)
{}

/*
 * Go through all unassigned events and find the next one to schedule.
 * Take events with the least weight first. Return true on success.
 */
static bool perf_sched_next_event(struct perf_sched *sched)
{}

/*
 * Assign a counter for each event.
 */
int perf_assign_events(struct event_constraint **constraints, int n,
			int wmin, int wmax, int gpmax, int *assign)
{}
EXPORT_SYMBOL_GPL();

int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign)
{}

static int add_nr_metric_event(struct cpu_hw_events *cpuc,
			       struct perf_event *event)
{}

static void del_nr_metric_event(struct cpu_hw_events *cpuc,
				struct perf_event *event)
{}

static int collect_event(struct cpu_hw_events *cpuc, struct perf_event *event,
			 int max_count, int n)
{}

/*
 * dogrp: true if must collect siblings events (group)
 * returns total number of events and error code
 */
static int collect_events(struct cpu_hw_events *cpuc, struct perf_event *leader, bool dogrp)
{}

static inline void x86_assign_hw_event(struct perf_event *event,
				struct cpu_hw_events *cpuc, int i)
{}

/**
 * x86_perf_rdpmc_index - Return PMC counter used for event
 * @event: the perf_event to which the PMC counter was assigned
 *
 * The counter assigned to this performance event may change if interrupts
 * are enabled. This counter should thus never be used while interrupts are
 * enabled. Before this function is used to obtain the assigned counter the
 * event should be checked for validity using, for example,
 * perf_event_read_local(), within the same interrupt disabled section in
 * which this counter is planned to be used.
 *
 * Return: The index of the performance monitoring counter assigned to
 * @perf_event.
 */
int x86_perf_rdpmc_index(struct perf_event *event)
{}

static inline int match_prev_assignment(struct hw_perf_event *hwc,
					struct cpu_hw_events *cpuc,
					int i)
{}

static void x86_pmu_start(struct perf_event *event, int flags);

static void x86_pmu_enable(struct pmu *pmu)
{}

DEFINE_PER_CPU(u64 [X86_PMC_IDX_MAX], pmc_prev_left);

/*
 * Set the next IRQ period, based on the hwc->period_left value.
 * To be called with the event disabled in hw:
 */
int x86_perf_event_set_period(struct perf_event *event)
{}

void x86_pmu_enable_event(struct perf_event *event)
{}

/*
 * Add a single event to the PMU.
 *
 * The event is added to the group of enabled events
 * but only if it can be scheduled with existing events.
 */
static int x86_pmu_add(struct perf_event *event, int flags)
{}

static void x86_pmu_start(struct perf_event *event, int flags)
{}

void perf_event_print_debug(void)
{}

void x86_pmu_stop(struct perf_event *event, int flags)
{}

static void x86_pmu_del(struct perf_event *event, int flags)
{}

int x86_pmu_handle_irq(struct pt_regs *regs)
{}

void perf_events_lapic_init(void)
{}

static int
perf_event_nmi_handler(unsigned int cmd, struct pt_regs *regs)
{}
NOKPROBE_SYMBOL(perf_event_nmi_handler);

struct event_constraint emptyconstraint;
struct event_constraint unconstrained;

static int x86_pmu_prepare_cpu(unsigned int cpu)
{}

static int x86_pmu_dead_cpu(unsigned int cpu)
{}

static int x86_pmu_online_cpu(unsigned int cpu)
{}

static int x86_pmu_starting_cpu(unsigned int cpu)
{}

static int x86_pmu_dying_cpu(unsigned int cpu)
{}

static void __init pmu_check_apic(void)
{}

static struct attribute_group x86_pmu_format_group __ro_after_init =;

ssize_t events_sysfs_show(struct device *dev, struct device_attribute *attr, char *page)
{}
EXPORT_SYMBOL_GPL();

ssize_t events_ht_sysfs_show(struct device *dev, struct device_attribute *attr,
			  char *page)
{}

ssize_t events_hybrid_sysfs_show(struct device *dev,
				 struct device_attribute *attr,
				 char *page)
{}
EXPORT_SYMBOL_GPL();

EVENT_ATTR(cpu-cycles,			CPU_CYCLES		);
EVENT_ATTR(instructions,		INSTRUCTIONS		);
EVENT_ATTR(cache-references,		CACHE_REFERENCES	);
EVENT_ATTR(cache-misses, 		CACHE_MISSES		);
EVENT_ATTR(branch-instructions,		BRANCH_INSTRUCTIONS	);
EVENT_ATTR(branch-misses,		BRANCH_MISSES		);
EVENT_ATTR(bus-cycles,			BUS_CYCLES		);
EVENT_ATTR(stalled-cycles-frontend,	STALLED_CYCLES_FRONTEND	);
EVENT_ATTR(stalled-cycles-backend,	STALLED_CYCLES_BACKEND	);
EVENT_ATTR(ref-cycles,			REF_CPU_CYCLES		);

static struct attribute *empty_attrs;

static struct attribute *events_attr[] =;

/*
 * Remove all undefined events (x86_pmu.event_map(id) == 0)
 * out of events_attr attributes.
 */
static umode_t
is_visible(struct kobject *kobj, struct attribute *attr, int idx)
{}

static struct attribute_group x86_pmu_events_group __ro_after_init =;

ssize_t x86_event_sysfs_show(char *page, u64 config, u64 event)
{}

static struct attribute_group x86_pmu_attr_group;
static struct attribute_group x86_pmu_caps_group;

static void x86_pmu_static_call_update(void)
{}

static void _x86_pmu_read(struct perf_event *event)
{}

void x86_pmu_show_pmu_cap(struct pmu *pmu)
{}

static int __init init_hw_perf_events(void)
{}
early_initcall(init_hw_perf_events);

static void x86_pmu_read(struct perf_event *event)
{}

/*
 * Start group events scheduling transaction
 * Set the flag to make pmu::enable() not perform the
 * schedulability test, it will be performed at commit time
 *
 * We only support PERF_PMU_TXN_ADD transactions. Save the
 * transaction flags but otherwise ignore non-PERF_PMU_TXN_ADD
 * transactions.
 */
static void x86_pmu_start_txn(struct pmu *pmu, unsigned int txn_flags)
{}

/*
 * Stop group events scheduling transaction
 * Clear the flag and pmu::enable() will perform the
 * schedulability test.
 */
static void x86_pmu_cancel_txn(struct pmu *pmu)
{}

/*
 * Commit group events scheduling transaction
 * Perform the group schedulability test as a whole
 * Return 0 if success
 *
 * Does not cancel the transaction on failure; expects the caller to do this.
 */
static int x86_pmu_commit_txn(struct pmu *pmu)
{}
/*
 * a fake_cpuc is used to validate event groups. Due to
 * the extra reg logic, we need to also allocate a fake
 * per_core and per_cpu structure. Otherwise, group events
 * using extra reg may conflict without the kernel being
 * able to catch this when the last event gets added to
 * the group.
 */
static void free_fake_cpuc(struct cpu_hw_events *cpuc)
{}

static struct cpu_hw_events *allocate_fake_cpuc(struct pmu *event_pmu)
{}

/*
 * validate that we can schedule this event
 */
static int validate_event(struct perf_event *event)
{}

/*
 * validate a single event group
 *
 * validation include:
 *	- check events are compatible which each other
 *	- events do not compete for the same counter
 *	- number of events <= number of counters
 *
 * validation ensures the group can be loaded onto the
 * PMU if it was the only group available.
 */
static int validate_group(struct perf_event *event)
{}

static int x86_pmu_event_init(struct perf_event *event)
{}

void perf_clear_dirty_counters(void)
{}

static void x86_pmu_event_mapped(struct perf_event *event, struct mm_struct *mm)
{}

static void x86_pmu_event_unmapped(struct perf_event *event, struct mm_struct *mm)
{}

static int x86_pmu_event_idx(struct perf_event *event)
{}

static ssize_t get_attr_rdpmc(struct device *cdev,
			      struct device_attribute *attr,
			      char *buf)
{}

static ssize_t set_attr_rdpmc(struct device *cdev,
			      struct device_attribute *attr,
			      const char *buf, size_t count)
{}

static DEVICE_ATTR(rdpmc, S_IRUSR | S_IWUSR, get_attr_rdpmc, set_attr_rdpmc);

static struct attribute *x86_pmu_attrs[] =;

static struct attribute_group x86_pmu_attr_group __ro_after_init =;

static ssize_t max_precise_show(struct device *cdev,
				  struct device_attribute *attr,
				  char *buf)
{}

static DEVICE_ATTR_RO(max_precise);

static struct attribute *x86_pmu_caps_attrs[] =;

static struct attribute_group x86_pmu_caps_group __ro_after_init =;

static const struct attribute_group *x86_pmu_attr_groups[] =;

static void x86_pmu_sched_task(struct perf_event_pmu_context *pmu_ctx, bool sched_in)
{}

static void x86_pmu_swap_task_ctx(struct perf_event_pmu_context *prev_epc,
				  struct perf_event_pmu_context *next_epc)
{}

void perf_check_microcode(void)
{}

static int x86_pmu_check_period(struct perf_event *event, u64 value)
{}

static int x86_pmu_aux_output_match(struct perf_event *event)
{}

static bool x86_pmu_filter(struct pmu *pmu, int cpu)
{}

static struct pmu pmu =;

void arch_perf_update_userpage(struct perf_event *event,
			       struct perf_event_mmap_page *userpg, u64 now)
{}

/*
 * Determine whether the regs were taken from an irq/exception handler rather
 * than from perf_arch_fetch_caller_regs().
 */
static bool perf_hw_regs(struct pt_regs *regs)
{}

void
perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs)
{}

static inline int
valid_user_frame(const void __user *fp, unsigned long size)
{}

static unsigned long get_segment_base(unsigned int segment)
{}

#ifdef CONFIG_IA32_EMULATION

#include <linux/compat.h>

static inline int
perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry_ctx *entry)
{}
#else
static inline int
perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry_ctx *entry)
{
    return 0;
}
#endif

void
perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs)
{}

/*
 * Deal with code segment offsets for the various execution modes:
 *
 *   VM86 - the good olde 16 bit days, where the linear address is
 *          20 bits and we use regs->ip + 0x10 * regs->cs.
 *
 *   IA32 - Where we need to look at GDT/LDT segment descriptor tables
 *          to figure out what the 32bit base address is.
 *
 *    X32 - has TIF_X32 set, but is running in x86_64
 *
 * X86_64 - CS,DS,SS,ES are all zero based.
 */
static unsigned long code_segment_base(struct pt_regs *regs)
{}

unsigned long perf_instruction_pointer(struct pt_regs *regs)
{}

unsigned long perf_misc_flags(struct pt_regs *regs)
{}

void perf_get_x86_pmu_capability(struct x86_pmu_capability *cap)
{}
EXPORT_SYMBOL_GPL();

u64 perf_get_hw_event_config(int hw_event)
{}
EXPORT_SYMBOL_GPL();