#ifndef _ASM_X86_MSHYPER_H
#define _ASM_X86_MSHYPER_H
#include <linux/types.h>
#include <linux/nmi.h>
#include <linux/msi.h>
#include <linux/io.h>
#include <asm/hyperv-tlfs.h>
#include <asm/nospec-branch.h>
#include <asm/paravirt.h>
#include <asm/mshyperv.h>
#define HV_IOAPIC_BASE_ADDRESS …
#define HV_VTL_NORMAL …
#define HV_VTL_SECURE …
#define HV_VTL_MGMT …
hv_ghcb;
DECLARE_STATIC_KEY_FALSE(isolation_type_snp);
DECLARE_STATIC_KEY_FALSE(isolation_type_tdx);
hyperv_fill_flush_list_func;
void hyperv_vector_handler(struct pt_regs *regs);
static inline unsigned char hv_get_nmi_reason(void)
{ … }
#if IS_ENABLED(CONFIG_HYPERV)
extern int hyperv_init_cpuhp;
extern bool hyperv_paravisor_present;
extern void *hv_hypercall_pg;
extern u64 hv_current_partition_id;
extern union hv_ghcb * __percpu *hv_ghcb_pg;
bool hv_isolation_type_snp(void);
bool hv_isolation_type_tdx(void);
u64 hv_tdx_hypercall(u64 control, u64 param1, u64 param2);
#define HV_AP_INIT_GPAT_DEFAULT …
#define HV_AP_SEGMENT_LIMIT …
int hv_call_deposit_pages(int node, u64 partition_id, u32 num_pages);
int hv_call_add_logical_proc(int node, u32 lp_index, u32 acpi_id);
int hv_call_create_vp(int node, u64 partition_id, u32 vp_index, u32 flags);
static inline u64 hv_do_hypercall(u64 control, void *input, void *output)
{ … }
static inline u64 hv_do_nested_hypercall(u64 control, void *input, void *output)
{ … }
static inline u64 _hv_do_fast_hypercall8(u64 control, u64 input1)
{ … }
static inline u64 hv_do_fast_hypercall8(u16 code, u64 input1)
{ … }
static inline u64 hv_do_fast_nested_hypercall8(u16 code, u64 input1)
{ … }
static inline u64 _hv_do_fast_hypercall16(u64 control, u64 input1, u64 input2)
{ … }
static inline u64 hv_do_fast_hypercall16(u16 code, u64 input1, u64 input2)
{ … }
static inline u64 hv_do_fast_nested_hypercall16(u16 code, u64 input1, u64 input2)
{ … }
extern struct hv_vp_assist_page **hv_vp_assist_page;
static inline struct hv_vp_assist_page *hv_get_vp_assist_page(unsigned int cpu)
{ … }
void __init hyperv_init(void);
void hyperv_setup_mmu_ops(void);
void set_hv_tscchange_cb(void (*cb)(void));
void clear_hv_tscchange_cb(void);
void hyperv_stop_tsc_emulation(void);
int hyperv_flush_guest_mapping(u64 as);
int hyperv_flush_guest_mapping_range(u64 as,
hyperv_fill_flush_list_func fill_func, void *data);
int hyperv_fill_flush_guest_mapping_list(
struct hv_guest_mapping_flush_list *flush,
u64 start_gfn, u64 end_gfn);
#ifdef CONFIG_X86_64
void hv_apic_init(void);
void __init hv_init_spinlocks(void);
bool hv_vcpu_is_preempted(int vcpu);
#else
static inline void hv_apic_init(void) {}
#endif
struct irq_domain *hv_create_pci_msi_domain(void);
int hv_map_ioapic_interrupt(int ioapic_id, bool level, int vcpu, int vector,
struct hv_interrupt_entry *entry);
int hv_unmap_ioapic_interrupt(int ioapic_id, struct hv_interrupt_entry *entry);
#ifdef CONFIG_AMD_MEM_ENCRYPT
bool hv_ghcb_negotiate_protocol(void);
void __noreturn hv_ghcb_terminate(unsigned int set, unsigned int reason);
int hv_snp_boot_ap(u32 cpu, unsigned long start_ip);
#else
static inline bool hv_ghcb_negotiate_protocol(void) { return false; }
static inline void hv_ghcb_terminate(unsigned int set, unsigned int reason) {}
static inline int hv_snp_boot_ap(u32 cpu, unsigned long start_ip) { return 0; }
#endif
#if defined(CONFIG_AMD_MEM_ENCRYPT) || defined(CONFIG_INTEL_TDX_GUEST)
void hv_vtom_init(void);
void hv_ivm_msr_write(u64 msr, u64 value);
void hv_ivm_msr_read(u64 msr, u64 *value);
#else
static inline void hv_vtom_init(void) {}
static inline void hv_ivm_msr_write(u64 msr, u64 value) {}
static inline void hv_ivm_msr_read(u64 msr, u64 *value) {}
#endif
static inline bool hv_is_synic_msr(unsigned int reg)
{ … }
static inline bool hv_is_sint_msr(unsigned int reg)
{ … }
u64 hv_get_msr(unsigned int reg);
void hv_set_msr(unsigned int reg, u64 value);
u64 hv_get_non_nested_msr(unsigned int reg);
void hv_set_non_nested_msr(unsigned int reg, u64 value);
static __always_inline u64 hv_raw_get_msr(unsigned int reg)
{ … }
#else
static inline void hyperv_init(void) {}
static inline void hyperv_setup_mmu_ops(void) {}
static inline void set_hv_tscchange_cb(void (*cb)(void)) {}
static inline void clear_hv_tscchange_cb(void) {}
static inline void hyperv_stop_tsc_emulation(void) {};
static inline struct hv_vp_assist_page *hv_get_vp_assist_page(unsigned int cpu)
{
return NULL;
}
static inline int hyperv_flush_guest_mapping(u64 as) { return -1; }
static inline int hyperv_flush_guest_mapping_range(u64 as,
hyperv_fill_flush_list_func fill_func, void *data)
{
return -1;
}
static inline void hv_set_msr(unsigned int reg, u64 value) { }
static inline u64 hv_get_msr(unsigned int reg) { return 0; }
static inline void hv_set_non_nested_msr(unsigned int reg, u64 value) { }
static inline u64 hv_get_non_nested_msr(unsigned int reg) { return 0; }
#endif
#ifdef CONFIG_HYPERV_VTL_MODE
void __init hv_vtl_init_platform(void);
int __init hv_vtl_early_init(void);
#else
static inline void __init hv_vtl_init_platform(void) {}
static inline int __init hv_vtl_early_init(void) { return 0; }
#endif
#include <asm-generic/mshyperv.h>
#endif