#ifndef __KVM_X86_PMU_H
#define __KVM_X86_PMU_H
#include <linux/nospec.h>
#include <asm/kvm_host.h>
#define vcpu_to_pmu(vcpu) …
#define pmu_to_vcpu(pmu) …
#define pmc_to_pmu(pmc) …
#define MSR_IA32_MISC_ENABLE_PMU_RO_MASK …
#define fixed_ctrl_field(ctrl_reg, idx) …
#define VMWARE_BACKDOOR_PMC_HOST_TSC …
#define VMWARE_BACKDOOR_PMC_REAL_TIME …
#define VMWARE_BACKDOOR_PMC_APPARENT_TIME …
#define KVM_FIXED_PMC_BASE_IDX …
struct kvm_pmu_emulated_event_selectors { … };
struct kvm_pmu_ops { … };
void kvm_pmu_ops_update(const struct kvm_pmu_ops *pmu_ops);
static inline bool kvm_pmu_has_perf_global_ctrl(struct kvm_pmu *pmu)
{ … }
static inline struct kvm_pmc *kvm_pmc_idx_to_pmc(struct kvm_pmu *pmu, int idx)
{ … }
#define kvm_for_each_pmc(pmu, pmc, i, bitmap) … \
static inline u64 pmc_bitmask(struct kvm_pmc *pmc)
{ … }
static inline u64 pmc_read_counter(struct kvm_pmc *pmc)
{ … }
void pmc_write_counter(struct kvm_pmc *pmc, u64 val);
static inline bool pmc_is_gp(struct kvm_pmc *pmc)
{ … }
static inline bool pmc_is_fixed(struct kvm_pmc *pmc)
{ … }
static inline bool kvm_valid_perf_global_ctrl(struct kvm_pmu *pmu,
u64 data)
{ … }
static inline struct kvm_pmc *get_gp_pmc(struct kvm_pmu *pmu, u32 msr,
u32 base)
{ … }
static inline struct kvm_pmc *get_fixed_pmc(struct kvm_pmu *pmu, u32 msr)
{ … }
static inline bool pmc_speculative_in_use(struct kvm_pmc *pmc)
{ … }
extern struct x86_pmu_capability kvm_pmu_cap;
extern struct kvm_pmu_emulated_event_selectors kvm_pmu_eventsel;
static inline void kvm_init_pmu_capability(const struct kvm_pmu_ops *pmu_ops)
{ … }
static inline void kvm_pmu_request_counter_reprogram(struct kvm_pmc *pmc)
{ … }
static inline void reprogram_counters(struct kvm_pmu *pmu, u64 diff)
{ … }
static inline bool pmc_is_globally_enabled(struct kvm_pmc *pmc)
{ … }
void kvm_pmu_deliver_pmi(struct kvm_vcpu *vcpu);
void kvm_pmu_handle_event(struct kvm_vcpu *vcpu);
int kvm_pmu_rdpmc(struct kvm_vcpu *vcpu, unsigned pmc, u64 *data);
int kvm_pmu_check_rdpmc_early(struct kvm_vcpu *vcpu, unsigned int idx);
bool kvm_pmu_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr);
int kvm_pmu_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info);
int kvm_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info);
void kvm_pmu_refresh(struct kvm_vcpu *vcpu);
void kvm_pmu_init(struct kvm_vcpu *vcpu);
void kvm_pmu_cleanup(struct kvm_vcpu *vcpu);
void kvm_pmu_destroy(struct kvm_vcpu *vcpu);
int kvm_vm_ioctl_set_pmu_event_filter(struct kvm *kvm, void __user *argp);
void kvm_pmu_trigger_event(struct kvm_vcpu *vcpu, u64 eventsel);
bool is_vmware_backdoor_pmc(u32 pmc_idx);
extern struct kvm_pmu_ops intel_pmu_ops;
extern struct kvm_pmu_ops amd_pmu_ops;
#endif