linux/arch/x86/kvm/xen.c

// SPDX-License-Identifier: GPL-2.0
/*
 * Copyright © 2019 Oracle and/or its affiliates. All rights reserved.
 * Copyright © 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
 *
 * KVM Xen emulation
 */
#define pr_fmt(fmt)

#include "x86.h"
#include "xen.h"
#include "hyperv.h"
#include "irq.h"

#include <linux/eventfd.h>
#include <linux/kvm_host.h>
#include <linux/sched/stat.h>

#include <trace/events/kvm.h>
#include <xen/interface/xen.h>
#include <xen/interface/vcpu.h>
#include <xen/interface/version.h>
#include <xen/interface/event_channel.h>
#include <xen/interface/sched.h>

#include <asm/xen/cpuid.h>
#include <asm/pvclock.h>

#include "cpuid.h"
#include "trace.h"

static int kvm_xen_set_evtchn(struct kvm_xen_evtchn *xe, struct kvm *kvm);
static int kvm_xen_setattr_evtchn(struct kvm *kvm, struct kvm_xen_hvm_attr *data);
static bool kvm_xen_hcall_evtchn_send(struct kvm_vcpu *vcpu, u64 param, u64 *r);

DEFINE_STATIC_KEY_DEFERRED_FALSE();

static int kvm_xen_shared_info_init(struct kvm *kvm)
{}

void kvm_xen_inject_timer_irqs(struct kvm_vcpu *vcpu)
{}

static enum hrtimer_restart xen_timer_callback(struct hrtimer *timer)
{}

static void kvm_xen_start_timer(struct kvm_vcpu *vcpu, u64 guest_abs,
				bool linux_wa)
{}

static void kvm_xen_stop_timer(struct kvm_vcpu *vcpu)
{}

static void kvm_xen_init_timer(struct kvm_vcpu *vcpu)
{}

static void kvm_xen_update_runstate_guest(struct kvm_vcpu *v, bool atomic)
{}

void kvm_xen_update_runstate(struct kvm_vcpu *v, int state)
{}

void kvm_xen_inject_vcpu_vector(struct kvm_vcpu *v)
{}

/*
 * On event channel delivery, the vcpu_info may not have been accessible.
 * In that case, there are bits in vcpu->arch.xen.evtchn_pending_sel which
 * need to be marked into the vcpu_info (and evtchn_upcall_pending set).
 * Do so now that we can sleep in the context of the vCPU to bring the
 * page in, and refresh the pfn cache for it.
 */
void kvm_xen_inject_pending_events(struct kvm_vcpu *v)
{}

int __kvm_xen_has_interrupt(struct kvm_vcpu *v)
{}

int kvm_xen_hvm_set_attr(struct kvm *kvm, struct kvm_xen_hvm_attr *data)
{}

int kvm_xen_hvm_get_attr(struct kvm *kvm, struct kvm_xen_hvm_attr *data)
{}

int kvm_xen_vcpu_set_attr(struct kvm_vcpu *vcpu, struct kvm_xen_vcpu_attr *data)
{}

int kvm_xen_vcpu_get_attr(struct kvm_vcpu *vcpu, struct kvm_xen_vcpu_attr *data)
{}

int kvm_xen_write_hypercall_page(struct kvm_vcpu *vcpu, u64 data)
{}

int kvm_xen_hvm_config(struct kvm *kvm, struct kvm_xen_hvm_config *xhc)
{}

static int kvm_xen_hypercall_set_result(struct kvm_vcpu *vcpu, u64 result)
{}

static int kvm_xen_hypercall_complete_userspace(struct kvm_vcpu *vcpu)
{}

static inline int max_evtchn_port(struct kvm *kvm)
{}

static bool wait_pending_event(struct kvm_vcpu *vcpu, int nr_ports,
			       evtchn_port_t *ports)
{}

static bool kvm_xen_schedop_poll(struct kvm_vcpu *vcpu, bool longmode,
				 u64 param, u64 *r)
{}

static void cancel_evtchn_poll(struct timer_list *t)
{}

static bool kvm_xen_hcall_sched_op(struct kvm_vcpu *vcpu, bool longmode,
				   int cmd, u64 param, u64 *r)
{}

struct compat_vcpu_set_singleshot_timer {} __attribute__((packed));

static bool kvm_xen_hcall_vcpu_op(struct kvm_vcpu *vcpu, bool longmode, int cmd,
				  int vcpu_id, u64 param, u64 *r)
{}

static bool kvm_xen_hcall_set_timer_op(struct kvm_vcpu *vcpu, uint64_t timeout,
				       u64 *r)
{}

int kvm_xen_hypercall(struct kvm_vcpu *vcpu)
{}

static void kvm_xen_check_poller(struct kvm_vcpu *vcpu, int port)
{}

/*
 * The return value from this function is propagated to kvm_set_irq() API,
 * so it returns:
 *  < 0   Interrupt was ignored (masked or not delivered for other reasons)
 *  = 0   Interrupt was coalesced (previous irq is still pending)
 *  > 0   Number of CPUs interrupt was delivered to
 *
 * It is also called directly from kvm_arch_set_irq_inatomic(), where the
 * only check on its return value is a comparison with -EWOULDBLOCK'.
 */
int kvm_xen_set_evtchn_fast(struct kvm_xen_evtchn *xe, struct kvm *kvm)
{}

static int kvm_xen_set_evtchn(struct kvm_xen_evtchn *xe, struct kvm *kvm)
{}

/* This is the version called from kvm_set_irq() as the .set function */
static int evtchn_set_fn(struct kvm_kernel_irq_routing_entry *e, struct kvm *kvm,
			 int irq_source_id, int level, bool line_status)
{}

/*
 * Set up an event channel interrupt from the KVM IRQ routing table.
 * Used for e.g. PIRQ from passed through physical devices.
 */
int kvm_xen_setup_evtchn(struct kvm *kvm,
			 struct kvm_kernel_irq_routing_entry *e,
			 const struct kvm_irq_routing_entry *ue)

{}

/*
 * Explicit event sending from userspace with KVM_XEN_HVM_EVTCHN_SEND ioctl.
 */
int kvm_xen_hvm_evtchn_send(struct kvm *kvm, struct kvm_irq_routing_xen_evtchn *uxe)
{}

/*
 * Support for *outbound* event channel events via the EVTCHNOP_send hypercall.
 */
struct evtchnfd {};

/*
 * Update target vCPU or priority for a registered sending channel.
 */
static int kvm_xen_eventfd_update(struct kvm *kvm,
				  struct kvm_xen_hvm_attr *data)
{}

/*
 * Configure the target (eventfd or local port delivery) for sending on
 * a given event channel.
 */
static int kvm_xen_eventfd_assign(struct kvm *kvm,
				  struct kvm_xen_hvm_attr *data)
{}

static int kvm_xen_eventfd_deassign(struct kvm *kvm, u32 port)
{}

static int kvm_xen_eventfd_reset(struct kvm *kvm)
{}

static int kvm_xen_setattr_evtchn(struct kvm *kvm, struct kvm_xen_hvm_attr *data)
{}

static bool kvm_xen_hcall_evtchn_send(struct kvm_vcpu *vcpu, u64 param, u64 *r)
{}

void kvm_xen_init_vcpu(struct kvm_vcpu *vcpu)
{}

void kvm_xen_destroy_vcpu(struct kvm_vcpu *vcpu)
{}

void kvm_xen_update_tsc_info(struct kvm_vcpu *vcpu)
{}

void kvm_xen_init_vm(struct kvm *kvm)
{}

void kvm_xen_destroy_vm(struct kvm *kvm)
{}