linux/arch/x86/kvm/lapic.c

// SPDX-License-Identifier: GPL-2.0-only

/*
 * Local APIC virtualization
 *
 * Copyright (C) 2006 Qumranet, Inc.
 * Copyright (C) 2007 Novell
 * Copyright (C) 2007 Intel
 * Copyright 2009 Red Hat, Inc. and/or its affiliates.
 *
 * Authors:
 *   Dor Laor <[email protected]>
 *   Gregory Haskins <[email protected]>
 *   Yaozu (Eddie) Dong <[email protected]>
 *
 * Based on Xen 3.1 code, Copyright (c) 2004, Intel Corporation.
 */
#define pr_fmt(fmt)

#include <linux/kvm_host.h>
#include <linux/kvm.h>
#include <linux/mm.h>
#include <linux/highmem.h>
#include <linux/smp.h>
#include <linux/hrtimer.h>
#include <linux/io.h>
#include <linux/export.h>
#include <linux/math64.h>
#include <linux/slab.h>
#include <asm/processor.h>
#include <asm/mce.h>
#include <asm/msr.h>
#include <asm/page.h>
#include <asm/current.h>
#include <asm/apicdef.h>
#include <asm/delay.h>
#include <linux/atomic.h>
#include <linux/jump_label.h>
#include "kvm_cache_regs.h"
#include "irq.h"
#include "ioapic.h"
#include "trace.h"
#include "x86.h"
#include "xen.h"
#include "cpuid.h"
#include "hyperv.h"
#include "smm.h"

#ifndef CONFIG_X86_64
#define mod_64
#else
#define mod_64(x, y)
#endif

/* 14 is the version for Xeon and Pentium 8.4.8*/
#define APIC_VERSION
#define LAPIC_MMIO_LENGTH
/* followed define is not in apicdef.h */
#define MAX_APIC_VECTOR
#define APIC_VECTORS_PER_REG

/*
 * Enable local APIC timer advancement (tscdeadline mode only) with adaptive
 * tuning.  When enabled, KVM programs the host timer event to fire early, i.e.
 * before the deadline expires, to account for the delay between taking the
 * VM-Exit (to inject the guest event) and the subsequent VM-Enter to resume
 * the guest, i.e. so that the interrupt arrives in the guest with minimal
 * latency relative to the deadline programmed by the guest.
 */
static bool lapic_timer_advance __read_mostly =;
module_param(lapic_timer_advance, bool, 0444);

#define LAPIC_TIMER_ADVANCE_ADJUST_MIN
#define LAPIC_TIMER_ADVANCE_ADJUST_MAX
#define LAPIC_TIMER_ADVANCE_NS_INIT
#define LAPIC_TIMER_ADVANCE_NS_MAX
/* step-by-step approximation to mitigate fluctuation */
#define LAPIC_TIMER_ADVANCE_ADJUST_STEP
static int kvm_lapic_msr_read(struct kvm_lapic *apic, u32 reg, u64 *data);
static int kvm_lapic_msr_write(struct kvm_lapic *apic, u32 reg, u64 data);

static inline void __kvm_lapic_set_reg(char *regs, int reg_off, u32 val)
{}

static inline void kvm_lapic_set_reg(struct kvm_lapic *apic, int reg_off, u32 val)
{}

static __always_inline u64 __kvm_lapic_get_reg64(char *regs, int reg)
{}

static __always_inline u64 kvm_lapic_get_reg64(struct kvm_lapic *apic, int reg)
{}

static __always_inline void __kvm_lapic_set_reg64(char *regs, int reg, u64 val)
{}

static __always_inline void kvm_lapic_set_reg64(struct kvm_lapic *apic,
						int reg, u64 val)
{}

static inline int apic_test_vector(int vec, void *bitmap)
{}

bool kvm_apic_pending_eoi(struct kvm_vcpu *vcpu, int vector)
{}

static inline int __apic_test_and_set_vector(int vec, void *bitmap)
{}

static inline int __apic_test_and_clear_vector(int vec, void *bitmap)
{}

__read_mostly DEFINE_STATIC_KEY_FALSE(kvm_has_noapic_vcpu);
EXPORT_SYMBOL_GPL();

__read_mostly DEFINE_STATIC_KEY_DEFERRED_FALSE();
__read_mostly DEFINE_STATIC_KEY_DEFERRED_FALSE();

static inline int apic_enabled(struct kvm_lapic *apic)
{}

#define LVT_MASK

#define LINT_MASK

static inline u32 kvm_x2apic_id(struct kvm_lapic *apic)
{}

static bool kvm_can_post_timer_interrupt(struct kvm_vcpu *vcpu)
{}

bool kvm_can_use_hv_timer(struct kvm_vcpu *vcpu)
{}

static bool kvm_use_posted_timer_interrupt(struct kvm_vcpu *vcpu)
{}

static inline u32 kvm_apic_calc_x2apic_ldr(u32 id)
{}

static inline bool kvm_apic_map_get_logical_dest(struct kvm_apic_map *map,
		u32 dest_id, struct kvm_lapic ***cluster, u16 *mask) {}

static void kvm_apic_map_free(struct rcu_head *rcu)
{}

static int kvm_recalculate_phys_map(struct kvm_apic_map *new,
				    struct kvm_vcpu *vcpu,
				    bool *xapic_id_mismatch)
{}

static void kvm_recalculate_logical_map(struct kvm_apic_map *new,
					struct kvm_vcpu *vcpu)
{}

/*
 * CLEAN -> DIRTY and UPDATE_IN_PROGRESS -> DIRTY changes happen without a lock.
 *
 * DIRTY -> UPDATE_IN_PROGRESS and UPDATE_IN_PROGRESS -> CLEAN happen with
 * apic_map_lock_held.
 */
enum {};

void kvm_recalculate_apic_map(struct kvm *kvm)
{}

static inline void apic_set_spiv(struct kvm_lapic *apic, u32 val)
{}

static inline void kvm_apic_set_xapic_id(struct kvm_lapic *apic, u8 id)
{}

static inline void kvm_apic_set_ldr(struct kvm_lapic *apic, u32 id)
{}

static inline void kvm_apic_set_dfr(struct kvm_lapic *apic, u32 val)
{}

static inline void kvm_apic_set_x2apic_id(struct kvm_lapic *apic, u32 id)
{}

static inline int apic_lvt_enabled(struct kvm_lapic *apic, int lvt_type)
{}

static inline int apic_lvtt_oneshot(struct kvm_lapic *apic)
{}

static inline int apic_lvtt_period(struct kvm_lapic *apic)
{}

static inline int apic_lvtt_tscdeadline(struct kvm_lapic *apic)
{}

static inline int apic_lvt_nmi_mode(u32 lvt_val)
{}

static inline bool kvm_lapic_lvt_supported(struct kvm_lapic *apic, int lvt_index)
{}

static inline int kvm_apic_calc_nr_lvt_entries(struct kvm_vcpu *vcpu)
{}

void kvm_apic_set_version(struct kvm_vcpu *vcpu)
{}

void kvm_apic_after_set_mcg_cap(struct kvm_vcpu *vcpu)
{}

static const unsigned int apic_lvt_mask[KVM_APIC_MAX_NR_LVT_ENTRIES] =;

static int find_highest_vector(void *bitmap)
{}

static u8 count_vectors(void *bitmap)
{}

bool __kvm_apic_update_irr(u32 *pir, void *regs, int *max_irr)
{}
EXPORT_SYMBOL_GPL();

bool kvm_apic_update_irr(struct kvm_vcpu *vcpu, u32 *pir, int *max_irr)
{}
EXPORT_SYMBOL_GPL();

static inline int apic_search_irr(struct kvm_lapic *apic)
{}

static inline int apic_find_highest_irr(struct kvm_lapic *apic)
{}

static inline void apic_clear_irr(int vec, struct kvm_lapic *apic)
{}

void kvm_apic_clear_irr(struct kvm_vcpu *vcpu, int vec)
{}
EXPORT_SYMBOL_GPL();

static inline void apic_set_isr(int vec, struct kvm_lapic *apic)
{}

static inline int apic_find_highest_isr(struct kvm_lapic *apic)
{}

static inline void apic_clear_isr(int vec, struct kvm_lapic *apic)
{}

int kvm_lapic_find_highest_irr(struct kvm_vcpu *vcpu)
{}
EXPORT_SYMBOL_GPL();

static int __apic_accept_irq(struct kvm_lapic *apic, int delivery_mode,
			     int vector, int level, int trig_mode,
			     struct dest_map *dest_map);

int kvm_apic_set_irq(struct kvm_vcpu *vcpu, struct kvm_lapic_irq *irq,
		     struct dest_map *dest_map)
{}

static int __pv_send_ipi(unsigned long *ipi_bitmap, struct kvm_apic_map *map,
			 struct kvm_lapic_irq *irq, u32 min)
{}

int kvm_pv_send_ipi(struct kvm *kvm, unsigned long ipi_bitmap_low,
		    unsigned long ipi_bitmap_high, u32 min,
		    unsigned long icr, int op_64_bit)
{}

static int pv_eoi_put_user(struct kvm_vcpu *vcpu, u8 val)
{}

static int pv_eoi_get_user(struct kvm_vcpu *vcpu, u8 *val)
{}

static inline bool pv_eoi_enabled(struct kvm_vcpu *vcpu)
{}

static void pv_eoi_set_pending(struct kvm_vcpu *vcpu)
{}

static bool pv_eoi_test_and_clr_pending(struct kvm_vcpu *vcpu)
{}

static int apic_has_interrupt_for_ppr(struct kvm_lapic *apic, u32 ppr)
{}

static bool __apic_update_ppr(struct kvm_lapic *apic, u32 *new_ppr)
{}

static void apic_update_ppr(struct kvm_lapic *apic)
{}

void kvm_apic_update_ppr(struct kvm_vcpu *vcpu)
{}
EXPORT_SYMBOL_GPL();

static void apic_set_tpr(struct kvm_lapic *apic, u32 tpr)
{}

static bool kvm_apic_broadcast(struct kvm_lapic *apic, u32 mda)
{}

static bool kvm_apic_match_physical_addr(struct kvm_lapic *apic, u32 mda)
{}

static bool kvm_apic_match_logical_addr(struct kvm_lapic *apic, u32 mda)
{}

/* The KVM local APIC implementation has two quirks:
 *
 *  - Real hardware delivers interrupts destined to x2APIC ID > 0xff to LAPICs
 *    in xAPIC mode if the "destination & 0xff" matches its xAPIC ID.
 *    KVM doesn't do that aliasing.
 *
 *  - in-kernel IOAPIC messages have to be delivered directly to
 *    x2APIC, because the kernel does not support interrupt remapping.
 *    In order to support broadcast without interrupt remapping, x2APIC
 *    rewrites the destination of non-IPI messages from APIC_BROADCAST
 *    to X2APIC_BROADCAST.
 *
 * The broadcast quirk can be disabled with KVM_CAP_X2APIC_API.  This is
 * important when userspace wants to use x2APIC-format MSIs, because
 * APIC_BROADCAST (0xff) is a legal route for "cluster 0, CPUs 0-7".
 */
static u32 kvm_apic_mda(struct kvm_vcpu *vcpu, unsigned int dest_id,
		struct kvm_lapic *source, struct kvm_lapic *target)
{}

bool kvm_apic_match_dest(struct kvm_vcpu *vcpu, struct kvm_lapic *source,
			   int shorthand, unsigned int dest, int dest_mode)
{}
EXPORT_SYMBOL_GPL();

int kvm_vector_to_index(u32 vector, u32 dest_vcpus,
		       const unsigned long *bitmap, u32 bitmap_size)
{}

static void kvm_apic_disabled_lapic_found(struct kvm *kvm)
{}

static bool kvm_apic_is_broadcast_dest(struct kvm *kvm, struct kvm_lapic **src,
		struct kvm_lapic_irq *irq, struct kvm_apic_map *map)
{}

/* Return true if the interrupt can be handled by using *bitmap as index mask
 * for valid destinations in *dst array.
 * Return false if kvm_apic_map_get_dest_lapic did nothing useful.
 * Note: we may have zero kvm_lapic destinations when we return true, which
 * means that the interrupt should be dropped.  In this case, *bitmap would be
 * zero and *dst undefined.
 */
static inline bool kvm_apic_map_get_dest_lapic(struct kvm *kvm,
		struct kvm_lapic **src, struct kvm_lapic_irq *irq,
		struct kvm_apic_map *map, struct kvm_lapic ***dst,
		unsigned long *bitmap)
{}

bool kvm_irq_delivery_to_apic_fast(struct kvm *kvm, struct kvm_lapic *src,
		struct kvm_lapic_irq *irq, int *r, struct dest_map *dest_map)
{}

/*
 * This routine tries to handle interrupts in posted mode, here is how
 * it deals with different cases:
 * - For single-destination interrupts, handle it in posted mode
 * - Else if vector hashing is enabled and it is a lowest-priority
 *   interrupt, handle it in posted mode and use the following mechanism
 *   to find the destination vCPU.
 *	1. For lowest-priority interrupts, store all the possible
 *	   destination vCPUs in an array.
 *	2. Use "guest vector % max number of destination vCPUs" to find
 *	   the right destination vCPU in the array for the lowest-priority
 *	   interrupt.
 * - Otherwise, use remapped mode to inject the interrupt.
 */
bool kvm_intr_is_single_vcpu_fast(struct kvm *kvm, struct kvm_lapic_irq *irq,
			struct kvm_vcpu **dest_vcpu)
{}

/*
 * Add a pending IRQ into lapic.
 * Return 1 if successfully added and 0 if discarded.
 */
static int __apic_accept_irq(struct kvm_lapic *apic, int delivery_mode,
			     int vector, int level, int trig_mode,
			     struct dest_map *dest_map)
{}

/*
 * This routine identifies the destination vcpus mask meant to receive the
 * IOAPIC interrupts. It either uses kvm_apic_map_get_dest_lapic() to find
 * out the destination vcpus array and set the bitmap or it traverses to
 * each available vcpu to identify the same.
 */
void kvm_bitmap_or_dest_vcpus(struct kvm *kvm, struct kvm_lapic_irq *irq,
			      unsigned long *vcpu_bitmap)
{}

int kvm_apic_compare_prio(struct kvm_vcpu *vcpu1, struct kvm_vcpu *vcpu2)
{}

static bool kvm_ioapic_handles_vector(struct kvm_lapic *apic, int vector)
{}

static void kvm_ioapic_send_eoi(struct kvm_lapic *apic, int vector)
{}

static int apic_set_eoi(struct kvm_lapic *apic)
{}

/*
 * this interface assumes a trap-like exit, which has already finished
 * desired side effect including vISR and vPPR update.
 */
void kvm_apic_set_eoi_accelerated(struct kvm_vcpu *vcpu, int vector)
{}
EXPORT_SYMBOL_GPL();

void kvm_apic_send_ipi(struct kvm_lapic *apic, u32 icr_low, u32 icr_high)
{}
EXPORT_SYMBOL_GPL();

static u32 apic_get_tmcct(struct kvm_lapic *apic)
{}

static void __report_tpr_access(struct kvm_lapic *apic, bool write)
{}

static inline void report_tpr_access(struct kvm_lapic *apic, bool write)
{}

static u32 __apic_read(struct kvm_lapic *apic, unsigned int offset)
{}

static inline struct kvm_lapic *to_lapic(struct kvm_io_device *dev)
{}

#define APIC_REG_MASK(reg)
#define APIC_REGS_MASK(first, count)

u64 kvm_lapic_readable_reg_mask(struct kvm_lapic *apic)
{}
EXPORT_SYMBOL_GPL();

static int kvm_lapic_reg_read(struct kvm_lapic *apic, u32 offset, int len,
			      void *data)
{}

static int apic_mmio_in_range(struct kvm_lapic *apic, gpa_t addr)
{}

static int apic_mmio_read(struct kvm_vcpu *vcpu, struct kvm_io_device *this,
			   gpa_t address, int len, void *data)
{}

static void update_divide_count(struct kvm_lapic *apic)
{}

static void limit_periodic_timer_frequency(struct kvm_lapic *apic)
{}

static void cancel_hv_timer(struct kvm_lapic *apic);

static void cancel_apic_timer(struct kvm_lapic *apic)
{}

static void apic_update_lvtt(struct kvm_lapic *apic)
{}

/*
 * On APICv, this test will cause a busy wait
 * during a higher-priority task.
 */

static bool lapic_timer_int_injected(struct kvm_vcpu *vcpu)
{}

static inline void __wait_lapic_expire(struct kvm_vcpu *vcpu, u64 guest_cycles)
{}

static inline void adjust_lapic_timer_advance(struct kvm_vcpu *vcpu,
					      s64 advance_expire_delta)
{}

static void __kvm_wait_lapic_expire(struct kvm_vcpu *vcpu)
{}

void kvm_wait_lapic_expire(struct kvm_vcpu *vcpu)
{}
EXPORT_SYMBOL_GPL();

static void kvm_apic_inject_pending_timer_irqs(struct kvm_lapic *apic)
{}

static void apic_timer_expired(struct kvm_lapic *apic, bool from_timer_fn)
{}

static void start_sw_tscdeadline(struct kvm_lapic *apic)
{}

static inline u64 tmict_to_ns(struct kvm_lapic *apic, u32 tmict)
{}

static void update_target_expiration(struct kvm_lapic *apic, uint32_t old_divisor)
{}

static bool set_target_expiration(struct kvm_lapic *apic, u32 count_reg)
{}

static void advance_periodic_target_expiration(struct kvm_lapic *apic)
{}

static void start_sw_period(struct kvm_lapic *apic)
{}

bool kvm_lapic_hv_timer_in_use(struct kvm_vcpu *vcpu)
{}

static void cancel_hv_timer(struct kvm_lapic *apic)
{}

static bool start_hv_timer(struct kvm_lapic *apic)
{}

static void start_sw_timer(struct kvm_lapic *apic)
{}

static void restart_apic_timer(struct kvm_lapic *apic)
{}

void kvm_lapic_expired_hv_timer(struct kvm_vcpu *vcpu)
{}
EXPORT_SYMBOL_GPL();

void kvm_lapic_switch_to_hv_timer(struct kvm_vcpu *vcpu)
{}

void kvm_lapic_switch_to_sw_timer(struct kvm_vcpu *vcpu)
{}

void kvm_lapic_restart_hv_timer(struct kvm_vcpu *vcpu)
{}

static void __start_apic_timer(struct kvm_lapic *apic, u32 count_reg)
{}

static void start_apic_timer(struct kvm_lapic *apic)
{}

static void apic_manage_nmi_watchdog(struct kvm_lapic *apic, u32 lvt0_val)
{}

static int get_lvt_index(u32 reg)
{}

static int kvm_lapic_reg_write(struct kvm_lapic *apic, u32 reg, u32 val)
{}

static int apic_mmio_write(struct kvm_vcpu *vcpu, struct kvm_io_device *this,
			    gpa_t address, int len, const void *data)
{}

void kvm_lapic_set_eoi(struct kvm_vcpu *vcpu)
{}
EXPORT_SYMBOL_GPL();

#define X2APIC_ICR_RESERVED_BITS

int kvm_x2apic_icr_write(struct kvm_lapic *apic, u64 data)
{}

static u64 kvm_x2apic_icr_read(struct kvm_lapic *apic)
{}

/* emulate APIC access in a trap manner */
void kvm_apic_write_nodecode(struct kvm_vcpu *vcpu, u32 offset)
{}
EXPORT_SYMBOL_GPL();

void kvm_free_lapic(struct kvm_vcpu *vcpu)
{}

/*
 *----------------------------------------------------------------------
 * LAPIC interface
 *----------------------------------------------------------------------
 */
u64 kvm_get_lapic_tscdeadline_msr(struct kvm_vcpu *vcpu)
{}

void kvm_set_lapic_tscdeadline_msr(struct kvm_vcpu *vcpu, u64 data)
{}

void kvm_lapic_set_tpr(struct kvm_vcpu *vcpu, unsigned long cr8)
{}

u64 kvm_lapic_get_cr8(struct kvm_vcpu *vcpu)
{}

void kvm_lapic_set_base(struct kvm_vcpu *vcpu, u64 value)
{}

void kvm_apic_update_apicv(struct kvm_vcpu *vcpu)
{}

int kvm_alloc_apic_access_page(struct kvm *kvm)
{}
EXPORT_SYMBOL_GPL();

void kvm_inhibit_apic_access_page(struct kvm_vcpu *vcpu)
{}

void kvm_lapic_reset(struct kvm_vcpu *vcpu, bool init_event)
{}

/*
 *----------------------------------------------------------------------
 * timer interface
 *----------------------------------------------------------------------
 */

static bool lapic_is_periodic(struct kvm_lapic *apic)
{}

int apic_has_pending_timer(struct kvm_vcpu *vcpu)
{}

int kvm_apic_local_deliver(struct kvm_lapic *apic, int lvt_type)
{}

void kvm_apic_nmi_wd_deliver(struct kvm_vcpu *vcpu)
{}

static const struct kvm_io_device_ops apic_mmio_ops =;

static enum hrtimer_restart apic_timer_fn(struct hrtimer *data)
{}

int kvm_create_lapic(struct kvm_vcpu *vcpu)
{}

int kvm_apic_has_interrupt(struct kvm_vcpu *vcpu)
{}
EXPORT_SYMBOL_GPL();

int kvm_apic_accept_pic_intr(struct kvm_vcpu *vcpu)
{}

void kvm_inject_apic_timer_irqs(struct kvm_vcpu *vcpu)
{}

void kvm_apic_ack_interrupt(struct kvm_vcpu *vcpu, int vector)
{}
EXPORT_SYMBOL_GPL();

static int kvm_apic_state_fixup(struct kvm_vcpu *vcpu,
		struct kvm_lapic_state *s, bool set)
{}

int kvm_apic_get_state(struct kvm_vcpu *vcpu, struct kvm_lapic_state *s)
{}

int kvm_apic_set_state(struct kvm_vcpu *vcpu, struct kvm_lapic_state *s)
{}

void __kvm_migrate_apic_timer(struct kvm_vcpu *vcpu)
{}

/*
 * apic_sync_pv_eoi_from_guest - called on vmexit or cancel interrupt
 *
 * Detect whether guest triggered PV EOI since the
 * last entry. If yes, set EOI on guests's behalf.
 * Clear PV EOI in guest memory in any case.
 */
static void apic_sync_pv_eoi_from_guest(struct kvm_vcpu *vcpu,
					struct kvm_lapic *apic)
{}

void kvm_lapic_sync_from_vapic(struct kvm_vcpu *vcpu)
{}

/*
 * apic_sync_pv_eoi_to_guest - called before vmentry
 *
 * Detect whether it's safe to enable PV EOI and
 * if yes do so.
 */
static void apic_sync_pv_eoi_to_guest(struct kvm_vcpu *vcpu,
					struct kvm_lapic *apic)
{}

void kvm_lapic_sync_to_vapic(struct kvm_vcpu *vcpu)
{}

int kvm_lapic_set_vapic_addr(struct kvm_vcpu *vcpu, gpa_t vapic_addr)
{}

static int kvm_lapic_msr_read(struct kvm_lapic *apic, u32 reg, u64 *data)
{}

static int kvm_lapic_msr_write(struct kvm_lapic *apic, u32 reg, u64 data)
{}

int kvm_x2apic_msr_write(struct kvm_vcpu *vcpu, u32 msr, u64 data)
{}

int kvm_x2apic_msr_read(struct kvm_vcpu *vcpu, u32 msr, u64 *data)
{}

int kvm_hv_vapic_msr_write(struct kvm_vcpu *vcpu, u32 reg, u64 data)
{}

int kvm_hv_vapic_msr_read(struct kvm_vcpu *vcpu, u32 reg, u64 *data)
{}

int kvm_lapic_set_pv_eoi(struct kvm_vcpu *vcpu, u64 data, unsigned long len)
{}

int kvm_apic_accept_events(struct kvm_vcpu *vcpu)
{}

void kvm_lapic_exit(void)
{}