linux/arch/x86/kvm/vmx/pmu_intel.c

// SPDX-License-Identifier: GPL-2.0-only
/*
 * KVM PMU support for Intel CPUs
 *
 * Copyright 2011 Red Hat, Inc. and/or its affiliates.
 *
 * Authors:
 *   Avi Kivity   <[email protected]>
 *   Gleb Natapov <[email protected]>
 */
#define pr_fmt(fmt)

#include <linux/types.h>
#include <linux/kvm_host.h>
#include <linux/perf_event.h>
#include <asm/perf_event.h>
#include "x86.h"
#include "cpuid.h"
#include "lapic.h"
#include "nested.h"
#include "pmu.h"

/*
 * Perf's "BASE" is wildly misleading, architectural PMUs use bits 31:16 of ECX
 * to encode the "type" of counter to read, i.e. this is not a "base".  And to
 * further confuse things, non-architectural PMUs use bit 31 as a flag for
 * "fast" reads, whereas the "type" is an explicit value.
 */
#define INTEL_RDPMC_GP
#define INTEL_RDPMC_FIXED

#define INTEL_RDPMC_TYPE_MASK
#define INTEL_RDPMC_INDEX_MASK

#define MSR_PMC_FULL_WIDTH_BIT

static void reprogram_fixed_counters(struct kvm_pmu *pmu, u64 data)
{}

static struct kvm_pmc *intel_rdpmc_ecx_to_pmc(struct kvm_vcpu *vcpu,
					    unsigned int idx, u64 *mask)
{}

static inline u64 vcpu_get_perf_capabilities(struct kvm_vcpu *vcpu)
{}

static inline bool fw_writes_is_enabled(struct kvm_vcpu *vcpu)
{}

static inline struct kvm_pmc *get_fw_gp_pmc(struct kvm_pmu *pmu, u32 msr)
{}

static bool intel_pmu_is_valid_lbr_msr(struct kvm_vcpu *vcpu, u32 index)
{}

static bool intel_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr)
{}

static struct kvm_pmc *intel_msr_idx_to_pmc(struct kvm_vcpu *vcpu, u32 msr)
{}

static inline void intel_pmu_release_guest_lbr_event(struct kvm_vcpu *vcpu)
{}

int intel_pmu_create_guest_lbr_event(struct kvm_vcpu *vcpu)
{}

/*
 * It's safe to access LBR msrs from guest when they have not
 * been passthrough since the host would help restore or reset
 * the LBR msrs records when the guest LBR event is scheduled in.
 */
static bool intel_pmu_handle_lbr_msrs_access(struct kvm_vcpu *vcpu,
				     struct msr_data *msr_info, bool read)
{}

static int intel_pmu_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
{}

static int intel_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
{}

/*
 * Map fixed counter events to architectural general purpose event encodings.
 * Perf doesn't provide APIs to allow KVM to directly program a fixed counter,
 * and so KVM instead programs the architectural event to effectively request
 * the fixed counter.  Perf isn't guaranteed to use a fixed counter and may
 * instead program the encoding into a general purpose counter, e.g. if a
 * different perf_event is already utilizing the requested counter, but the end
 * result is the same (ignoring the fact that using a general purpose counter
 * will likely exacerbate counter contention).
 *
 * Forcibly inlined to allow asserting on @index at build time, and there should
 * never be more than one user.
 */
static __always_inline u64 intel_get_fixed_pmc_eventsel(unsigned int index)
{}

static void intel_pmu_enable_fixed_counter_bits(struct kvm_pmu *pmu, u64 bits)
{}

static void intel_pmu_refresh(struct kvm_vcpu *vcpu)
{}

static void intel_pmu_init(struct kvm_vcpu *vcpu)
{}

static void intel_pmu_reset(struct kvm_vcpu *vcpu)
{}

/*
 * Emulate LBR_On_PMI behavior for 1 < pmu.version < 4.
 *
 * If Freeze_LBR_On_PMI = 1, the LBR is frozen on PMI and
 * the KVM emulates to clear the LBR bit (bit 0) in IA32_DEBUGCTL.
 *
 * Guest needs to re-enable LBR to resume branches recording.
 */
static void intel_pmu_legacy_freezing_lbrs_on_pmi(struct kvm_vcpu *vcpu)
{}

static void intel_pmu_deliver_pmi(struct kvm_vcpu *vcpu)
{}

static void vmx_update_intercept_for_lbr_msrs(struct kvm_vcpu *vcpu, bool set)
{}

static inline void vmx_disable_lbr_msrs_passthrough(struct kvm_vcpu *vcpu)
{}

static inline void vmx_enable_lbr_msrs_passthrough(struct kvm_vcpu *vcpu)
{}

/*
 * Higher priority host perf events (e.g. cpu pinned) could reclaim the
 * pmu resources (e.g. LBR) that were assigned to the guest. This is
 * usually done via ipi calls (more details in perf_install_in_context).
 *
 * Before entering the non-root mode (with irq disabled here), double
 * confirm that the pmu features enabled to the guest are not reclaimed
 * by higher priority host events. Otherwise, disallow vcpu's access to
 * the reclaimed features.
 */
void vmx_passthrough_lbr_msrs(struct kvm_vcpu *vcpu)
{}

static void intel_pmu_cleanup(struct kvm_vcpu *vcpu)
{}

void intel_pmu_cross_mapped_check(struct kvm_pmu *pmu)
{}

struct kvm_pmu_ops intel_pmu_ops __initdata =;