linux/arch/x86/kvm/pmu.c

// SPDX-License-Identifier: GPL-2.0-only
/*
 * Kernel-based Virtual Machine -- Performance Monitoring Unit support
 *
 * Copyright 2015 Red Hat, Inc. and/or its affiliates.
 *
 * Authors:
 *   Avi Kivity   <[email protected]>
 *   Gleb Natapov <[email protected]>
 *   Wei Huang    <[email protected]>
 */
#define pr_fmt(fmt)

#include <linux/types.h>
#include <linux/kvm_host.h>
#include <linux/perf_event.h>
#include <linux/bsearch.h>
#include <linux/sort.h>
#include <asm/perf_event.h>
#include <asm/cpu_device_id.h>
#include "x86.h"
#include "cpuid.h"
#include "lapic.h"
#include "pmu.h"

/* This is enough to filter the vast majority of currently defined events. */
#define KVM_PMU_EVENT_FILTER_MAX_EVENTS

struct x86_pmu_capability __read_mostly kvm_pmu_cap;
EXPORT_SYMBOL_GPL();

struct kvm_pmu_emulated_event_selectors __read_mostly kvm_pmu_eventsel;
EXPORT_SYMBOL_GPL();

/* Precise Distribution of Instructions Retired (PDIR) */
static const struct x86_cpu_id vmx_pebs_pdir_cpu[] =;

/* Precise Distribution (PDist) */
static const struct x86_cpu_id vmx_pebs_pdist_cpu[] =;

/* NOTE:
 * - Each perf counter is defined as "struct kvm_pmc";
 * - There are two types of perf counters: general purpose (gp) and fixed.
 *   gp counters are stored in gp_counters[] and fixed counters are stored
 *   in fixed_counters[] respectively. Both of them are part of "struct
 *   kvm_pmu";
 * - pmu.c understands the difference between gp counters and fixed counters.
 *   However AMD doesn't support fixed-counters;
 * - There are three types of index to access perf counters (PMC):
 *     1. MSR (named msr): For example Intel has MSR_IA32_PERFCTRn and AMD
 *        has MSR_K7_PERFCTRn and, for families 15H and later,
 *        MSR_F15H_PERF_CTRn, where MSR_F15H_PERF_CTR[0-3] are
 *        aliased to MSR_K7_PERFCTRn.
 *     2. MSR Index (named idx): This normally is used by RDPMC instruction.
 *        For instance AMD RDPMC instruction uses 0000_0003h in ECX to access
 *        C001_0007h (MSR_K7_PERCTR3). Intel has a similar mechanism, except
 *        that it also supports fixed counters. idx can be used to as index to
 *        gp and fixed counters.
 *     3. Global PMC Index (named pmc): pmc is an index specific to PMU
 *        code. Each pmc, stored in kvm_pmc.idx field, is unique across
 *        all perf counters (both gp and fixed). The mapping relationship
 *        between pmc and perf counters is as the following:
 *        * Intel: [0 .. KVM_MAX_NR_INTEL_GP_COUNTERS-1] <=> gp counters
 *                 [KVM_FIXED_PMC_BASE_IDX .. KVM_FIXED_PMC_BASE_IDX + 2] <=> fixed
 *        * AMD:   [0 .. AMD64_NUM_COUNTERS-1] and, for families 15H
 *          and later, [0 .. AMD64_NUM_COUNTERS_CORE-1] <=> gp counters
 */

static struct kvm_pmu_ops kvm_pmu_ops __read_mostly;

#define KVM_X86_PMU_OP
#define KVM_X86_PMU_OP_OPTIONAL
#include <asm/kvm-x86-pmu-ops.h>

void kvm_pmu_ops_update(const struct kvm_pmu_ops *pmu_ops)
{}

static inline void __kvm_perf_overflow(struct kvm_pmc *pmc, bool in_pmi)
{}

static void kvm_perf_overflow(struct perf_event *perf_event,
			      struct perf_sample_data *data,
			      struct pt_regs *regs)
{}

static u64 pmc_get_pebs_precise_level(struct kvm_pmc *pmc)
{}

static u64 get_sample_period(struct kvm_pmc *pmc, u64 counter_value)
{}

static int pmc_reprogram_counter(struct kvm_pmc *pmc, u32 type, u64 config,
				 bool exclude_user, bool exclude_kernel,
				 bool intr)
{}

static bool pmc_pause_counter(struct kvm_pmc *pmc)
{}

static bool pmc_resume_counter(struct kvm_pmc *pmc)
{}

static void pmc_release_perf_event(struct kvm_pmc *pmc)
{}

static void pmc_stop_counter(struct kvm_pmc *pmc)
{}

static void pmc_update_sample_period(struct kvm_pmc *pmc)
{}

void pmc_write_counter(struct kvm_pmc *pmc, u64 val)
{}
EXPORT_SYMBOL_GPL();

static int filter_cmp(const void *pa, const void *pb, u64 mask)
{}


static int filter_sort_cmp(const void *pa, const void *pb)
{}

/*
 * For the event filter, searching is done on the 'includes' list and
 * 'excludes' list separately rather than on the 'events' list (which
 * has both).  As a result the exclude bit can be ignored.
 */
static int filter_event_cmp(const void *pa, const void *pb)
{}

static int find_filter_index(u64 *events, u64 nevents, u64 key)
{}

static bool is_filter_entry_match(u64 filter_event, u64 umask)
{}

static bool filter_contains_match(u64 *events, u64 nevents, u64 eventsel)
{}

static bool is_gp_event_allowed(struct kvm_x86_pmu_event_filter *f,
				u64 eventsel)
{}

static bool is_fixed_event_allowed(struct kvm_x86_pmu_event_filter *filter,
				   int idx)
{}

static bool check_pmu_event_filter(struct kvm_pmc *pmc)
{}

static bool pmc_event_is_allowed(struct kvm_pmc *pmc)
{}

static int reprogram_counter(struct kvm_pmc *pmc)
{}

void kvm_pmu_handle_event(struct kvm_vcpu *vcpu)
{}

int kvm_pmu_check_rdpmc_early(struct kvm_vcpu *vcpu, unsigned int idx)
{}

bool is_vmware_backdoor_pmc(u32 pmc_idx)
{}

static int kvm_pmu_rdpmc_vmware(struct kvm_vcpu *vcpu, unsigned idx, u64 *data)
{}

int kvm_pmu_rdpmc(struct kvm_vcpu *vcpu, unsigned idx, u64 *data)
{}

void kvm_pmu_deliver_pmi(struct kvm_vcpu *vcpu)
{}

bool kvm_pmu_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr)
{}

static void kvm_pmu_mark_pmc_in_use(struct kvm_vcpu *vcpu, u32 msr)
{}

int kvm_pmu_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
{}

int kvm_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
{}

static void kvm_pmu_reset(struct kvm_vcpu *vcpu)
{}


/*
 * Refresh the PMU configuration for the vCPU, e.g. if userspace changes CPUID
 * and/or PERF_CAPABILITIES.
 */
void kvm_pmu_refresh(struct kvm_vcpu *vcpu)
{}

void kvm_pmu_init(struct kvm_vcpu *vcpu)
{}

/* Release perf_events for vPMCs that have been unused for a full time slice.  */
void kvm_pmu_cleanup(struct kvm_vcpu *vcpu)
{}

void kvm_pmu_destroy(struct kvm_vcpu *vcpu)
{}

static void kvm_pmu_incr_counter(struct kvm_pmc *pmc)
{}

static inline bool cpl_is_matched(struct kvm_pmc *pmc)
{}

void kvm_pmu_trigger_event(struct kvm_vcpu *vcpu, u64 eventsel)
{}
EXPORT_SYMBOL_GPL();

static bool is_masked_filter_valid(const struct kvm_x86_pmu_event_filter *filter)
{}

static void convert_to_masked_filter(struct kvm_x86_pmu_event_filter *filter)
{}

static int prepare_filter_lists(struct kvm_x86_pmu_event_filter *filter)
{}

int kvm_vm_ioctl_set_pmu_event_filter(struct kvm *kvm, void __user *argp)
{}