linux/arch/x86/kvm/pmu.h

/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __KVM_X86_PMU_H
#define __KVM_X86_PMU_H

#include <linux/nospec.h>

#include <asm/kvm_host.h>

#define vcpu_to_pmu(vcpu)
#define pmu_to_vcpu(pmu)
#define pmc_to_pmu(pmc)

#define MSR_IA32_MISC_ENABLE_PMU_RO_MASK

/* retrieve the 4 bits for EN and PMI out of IA32_FIXED_CTR_CTRL */
#define fixed_ctrl_field(ctrl_reg, idx)

#define VMWARE_BACKDOOR_PMC_HOST_TSC
#define VMWARE_BACKDOOR_PMC_REAL_TIME
#define VMWARE_BACKDOOR_PMC_APPARENT_TIME

#define KVM_FIXED_PMC_BASE_IDX

struct kvm_pmu_emulated_event_selectors {};

struct kvm_pmu_ops {};

void kvm_pmu_ops_update(const struct kvm_pmu_ops *pmu_ops);

static inline bool kvm_pmu_has_perf_global_ctrl(struct kvm_pmu *pmu)
{}

/*
 * KVM tracks all counters in 64-bit bitmaps, with general purpose counters
 * mapped to bits 31:0 and fixed counters mapped to 63:32, e.g. fixed counter 0
 * is tracked internally via index 32.  On Intel, (AMD doesn't support fixed
 * counters), this mirrors how fixed counters are mapped to PERF_GLOBAL_CTRL
 * and similar MSRs, i.e. tracking fixed counters at base index 32 reduces the
 * amounter of boilerplate needed to iterate over PMCs *and* simplifies common
 * enabling/disable/reset operations.
 *
 * WARNING!  This helper is only for lookups that are initiated by KVM, it is
 * NOT safe for guest lookups, e.g. will do the wrong thing if passed a raw
 * ECX value from RDPMC (fixed counters are accessed by setting bit 30 in ECX
 * for RDPMC, not by adding 32 to the fixed counter index).
 */
static inline struct kvm_pmc *kvm_pmc_idx_to_pmc(struct kvm_pmu *pmu, int idx)
{}

#define kvm_for_each_pmc(pmu, pmc, i, bitmap)						\

static inline u64 pmc_bitmask(struct kvm_pmc *pmc)
{}

static inline u64 pmc_read_counter(struct kvm_pmc *pmc)
{}

void pmc_write_counter(struct kvm_pmc *pmc, u64 val);

static inline bool pmc_is_gp(struct kvm_pmc *pmc)
{}

static inline bool pmc_is_fixed(struct kvm_pmc *pmc)
{}

static inline bool kvm_valid_perf_global_ctrl(struct kvm_pmu *pmu,
						 u64 data)
{}

/* returns general purpose PMC with the specified MSR. Note that it can be
 * used for both PERFCTRn and EVNTSELn; that is why it accepts base as a
 * parameter to tell them apart.
 */
static inline struct kvm_pmc *get_gp_pmc(struct kvm_pmu *pmu, u32 msr,
					 u32 base)
{}

/* returns fixed PMC with the specified MSR */
static inline struct kvm_pmc *get_fixed_pmc(struct kvm_pmu *pmu, u32 msr)
{}

static inline bool pmc_speculative_in_use(struct kvm_pmc *pmc)
{}

extern struct x86_pmu_capability kvm_pmu_cap;
extern struct kvm_pmu_emulated_event_selectors kvm_pmu_eventsel;

static inline void kvm_init_pmu_capability(const struct kvm_pmu_ops *pmu_ops)
{}

static inline void kvm_pmu_request_counter_reprogram(struct kvm_pmc *pmc)
{}

static inline void reprogram_counters(struct kvm_pmu *pmu, u64 diff)
{}

/*
 * Check if a PMC is enabled by comparing it against global_ctrl bits.
 *
 * If the vPMU doesn't have global_ctrl MSR, all vPMCs are enabled.
 */
static inline bool pmc_is_globally_enabled(struct kvm_pmc *pmc)
{}

void kvm_pmu_deliver_pmi(struct kvm_vcpu *vcpu);
void kvm_pmu_handle_event(struct kvm_vcpu *vcpu);
int kvm_pmu_rdpmc(struct kvm_vcpu *vcpu, unsigned pmc, u64 *data);
int kvm_pmu_check_rdpmc_early(struct kvm_vcpu *vcpu, unsigned int idx);
bool kvm_pmu_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr);
int kvm_pmu_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info);
int kvm_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info);
void kvm_pmu_refresh(struct kvm_vcpu *vcpu);
void kvm_pmu_init(struct kvm_vcpu *vcpu);
void kvm_pmu_cleanup(struct kvm_vcpu *vcpu);
void kvm_pmu_destroy(struct kvm_vcpu *vcpu);
int kvm_vm_ioctl_set_pmu_event_filter(struct kvm *kvm, void __user *argp);
void kvm_pmu_trigger_event(struct kvm_vcpu *vcpu, u64 eventsel);

bool is_vmware_backdoor_pmc(u32 pmc_idx);

extern struct kvm_pmu_ops intel_pmu_ops;
extern struct kvm_pmu_ops amd_pmu_ops;
#endif /* __KVM_X86_PMU_H */