#define pr_fmt(fmt) …
#include <linux/types.h>
#include <linux/kvm_host.h>
#include <linux/perf_event.h>
#include <linux/bsearch.h>
#include <linux/sort.h>
#include <asm/perf_event.h>
#include <asm/cpu_device_id.h>
#include "x86.h"
#include "cpuid.h"
#include "lapic.h"
#include "pmu.h"
#define KVM_PMU_EVENT_FILTER_MAX_EVENTS …
struct x86_pmu_capability __read_mostly kvm_pmu_cap;
EXPORT_SYMBOL_GPL(…);
struct kvm_pmu_emulated_event_selectors __read_mostly kvm_pmu_eventsel;
EXPORT_SYMBOL_GPL(…);
static const struct x86_cpu_id vmx_pebs_pdir_cpu[] = …;
static const struct x86_cpu_id vmx_pebs_pdist_cpu[] = …;
static struct kvm_pmu_ops kvm_pmu_ops __read_mostly;
#define KVM_X86_PMU_OP …
#define KVM_X86_PMU_OP_OPTIONAL …
#include <asm/kvm-x86-pmu-ops.h>
void kvm_pmu_ops_update(const struct kvm_pmu_ops *pmu_ops)
{ … }
static inline void __kvm_perf_overflow(struct kvm_pmc *pmc, bool in_pmi)
{ … }
static void kvm_perf_overflow(struct perf_event *perf_event,
struct perf_sample_data *data,
struct pt_regs *regs)
{ … }
static u64 pmc_get_pebs_precise_level(struct kvm_pmc *pmc)
{ … }
static u64 get_sample_period(struct kvm_pmc *pmc, u64 counter_value)
{ … }
static int pmc_reprogram_counter(struct kvm_pmc *pmc, u32 type, u64 config,
bool exclude_user, bool exclude_kernel,
bool intr)
{ … }
static bool pmc_pause_counter(struct kvm_pmc *pmc)
{ … }
static bool pmc_resume_counter(struct kvm_pmc *pmc)
{ … }
static void pmc_release_perf_event(struct kvm_pmc *pmc)
{ … }
static void pmc_stop_counter(struct kvm_pmc *pmc)
{ … }
static void pmc_update_sample_period(struct kvm_pmc *pmc)
{ … }
void pmc_write_counter(struct kvm_pmc *pmc, u64 val)
{ … }
EXPORT_SYMBOL_GPL(…);
static int filter_cmp(const void *pa, const void *pb, u64 mask)
{ … }
static int filter_sort_cmp(const void *pa, const void *pb)
{ … }
static int filter_event_cmp(const void *pa, const void *pb)
{ … }
static int find_filter_index(u64 *events, u64 nevents, u64 key)
{ … }
static bool is_filter_entry_match(u64 filter_event, u64 umask)
{ … }
static bool filter_contains_match(u64 *events, u64 nevents, u64 eventsel)
{ … }
static bool is_gp_event_allowed(struct kvm_x86_pmu_event_filter *f,
u64 eventsel)
{ … }
static bool is_fixed_event_allowed(struct kvm_x86_pmu_event_filter *filter,
int idx)
{ … }
static bool check_pmu_event_filter(struct kvm_pmc *pmc)
{ … }
static bool pmc_event_is_allowed(struct kvm_pmc *pmc)
{ … }
static int reprogram_counter(struct kvm_pmc *pmc)
{ … }
void kvm_pmu_handle_event(struct kvm_vcpu *vcpu)
{ … }
int kvm_pmu_check_rdpmc_early(struct kvm_vcpu *vcpu, unsigned int idx)
{ … }
bool is_vmware_backdoor_pmc(u32 pmc_idx)
{ … }
static int kvm_pmu_rdpmc_vmware(struct kvm_vcpu *vcpu, unsigned idx, u64 *data)
{ … }
int kvm_pmu_rdpmc(struct kvm_vcpu *vcpu, unsigned idx, u64 *data)
{ … }
void kvm_pmu_deliver_pmi(struct kvm_vcpu *vcpu)
{ … }
bool kvm_pmu_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr)
{ … }
static void kvm_pmu_mark_pmc_in_use(struct kvm_vcpu *vcpu, u32 msr)
{ … }
int kvm_pmu_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
{ … }
int kvm_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
{ … }
static void kvm_pmu_reset(struct kvm_vcpu *vcpu)
{ … }
void kvm_pmu_refresh(struct kvm_vcpu *vcpu)
{ … }
void kvm_pmu_init(struct kvm_vcpu *vcpu)
{ … }
void kvm_pmu_cleanup(struct kvm_vcpu *vcpu)
{ … }
void kvm_pmu_destroy(struct kvm_vcpu *vcpu)
{ … }
static void kvm_pmu_incr_counter(struct kvm_pmc *pmc)
{ … }
static inline bool cpl_is_matched(struct kvm_pmc *pmc)
{ … }
void kvm_pmu_trigger_event(struct kvm_vcpu *vcpu, u64 eventsel)
{ … }
EXPORT_SYMBOL_GPL(…);
static bool is_masked_filter_valid(const struct kvm_x86_pmu_event_filter *filter)
{ … }
static void convert_to_masked_filter(struct kvm_x86_pmu_event_filter *filter)
{ … }
static int prepare_filter_lists(struct kvm_x86_pmu_event_filter *filter)
{ … }
int kvm_vm_ioctl_set_pmu_event_filter(struct kvm *kvm, void __user *argp)
{ … }