linux/arch/x86/events/amd/brs.c

// SPDX-License-Identifier: GPL-2.0
/*
 * Implement support for AMD Fam19h Branch Sampling feature
 * Based on specifications published in AMD PPR Fam19 Model 01
 *
 * Copyright 2021 Google LLC
 * Contributed by Stephane Eranian <[email protected]>
 */
#include <linux/kernel.h>
#include <linux/jump_label.h>
#include <asm/msr.h>
#include <asm/cpufeature.h>

#include "../perf_event.h"

#define BRS_POISON

/* Debug Extension Configuration register layout */
amd_debug_extn_cfg;

static inline unsigned int brs_from(int idx)
{}

static inline unsigned int brs_to(int idx)
{}

static __always_inline void set_debug_extn_cfg(u64 val)
{}

static __always_inline u64 get_debug_extn_cfg(void)
{}

static bool __init amd_brs_detect(void)
{}

/*
 * Current BRS implementation does not support branch type or privilege level
 * filtering. Therefore, this function simply enforces these limitations. No need for
 * a br_sel_map. Software filtering is not supported because it would not correlate well
 * with a sampling period.
 */
static int amd_brs_setup_filter(struct perf_event *event)
{}

static inline int amd_is_brs_event(struct perf_event *e)
{}

int amd_brs_hw_config(struct perf_event *event)
{}

/* tos = top of stack, i.e., last valid entry written */
static inline int amd_brs_get_tos(union amd_debug_extn_cfg *cfg)
{}

/*
 * make sure we have a sane BRS offset to begin with
 * especially with kexec
 */
void amd_brs_reset(void)
{}

int __init amd_brs_init(void)
{}

void amd_brs_enable(void)
{}

void amd_brs_enable_all(void)
{}

void amd_brs_disable(void)
{}

void amd_brs_disable_all(void)
{}

static bool amd_brs_match_plm(struct perf_event *event, u64 to)
{}

/*
 * Caller must ensure amd_brs_inuse() is true before calling
 * return:
 */
void amd_brs_drain(void)
{}

/*
 * Poison most recent entry to prevent reuse by next task
 * required because BRS entry are not tagged by PID
 */
static void amd_brs_poison_buffer(void)
{}

/*
 * On context switch in, we need to make sure no samples from previous user
 * are left in the BRS.
 *
 * On ctxswin, sched_in = true, called after the PMU has started
 * On ctxswout, sched_in = false, called before the PMU is stopped
 */
void amd_pmu_brs_sched_task(struct perf_event_pmu_context *pmu_ctx, bool sched_in)
{}

/*
 * called from ACPI processor_idle.c or acpi_pad.c
 * with interrupts disabled
 */
void noinstr perf_amd_brs_lopwr_cb(bool lopwr_in)
{}

DEFINE_STATIC_CALL_NULL();
EXPORT_STATIC_CALL_TRAMP_GPL();

void __init amd_brs_lopwr_init(void)
{}