linux/arch/x86/xen/setup.c

// SPDX-License-Identifier: GPL-2.0
/*
 * Machine specific setup for xen
 *
 * Jeremy Fitzhardinge <[email protected]>, XenSource Inc, 2007
 */

#include <linux/init.h>
#include <linux/iscsi_ibft.h>
#include <linux/sched.h>
#include <linux/kstrtox.h>
#include <linux/mm.h>
#include <linux/pm.h>
#include <linux/memblock.h>
#include <linux/cpuidle.h>
#include <linux/cpufreq.h>
#include <linux/memory_hotplug.h>
#include <linux/acpi.h>

#include <asm/elf.h>
#include <asm/vdso.h>
#include <asm/e820/api.h>
#include <asm/setup.h>
#include <asm/numa.h>
#include <asm/idtentry.h>
#include <asm/xen/hypervisor.h>
#include <asm/xen/hypercall.h>

#include <xen/xen.h>
#include <xen/page.h>
#include <xen/interface/callback.h>
#include <xen/interface/memory.h>
#include <xen/interface/physdev.h>
#include <xen/features.h>
#include <xen/hvc-console.h>
#include "xen-ops.h"

#define GB(x)

/* Number of pages released from the initial allocation. */
unsigned long xen_released_pages;

/* Memory map would allow PCI passthrough. */
bool xen_pv_pci_possible;

/* E820 map used during setting up memory. */
static struct e820_table xen_e820_table __initdata;

/* Number of initially usable memory pages. */
static unsigned long ini_nr_pages __initdata;

/*
 * Buffer used to remap identity mapped pages. We only need the virtual space.
 * The physical page behind this address is remapped as needed to different
 * buffer pages.
 */
#define REMAP_SIZE
static struct {} xen_remap_buf __initdata __aligned();
static unsigned long xen_remap_mfn __initdata =;

static bool xen_512gb_limit __initdata = IS_ENABLED();

static void __init xen_parse_512gb(void)
{}

static void __init xen_del_extra_mem(unsigned long start_pfn,
				     unsigned long n_pfns)
{}

/*
 * Called during boot before the p2m list can take entries beyond the
 * hypervisor supplied p2m list. Entries in extra mem are to be regarded as
 * invalid.
 */
unsigned long __ref xen_chk_extra_mem(unsigned long pfn)
{}

/*
 * Mark all pfns of extra mem as invalid in p2m list.
 */
void __init xen_inv_extra_mem(void)
{}

/*
 * Finds the next RAM pfn available in the E820 map after min_pfn.
 * This function updates min_pfn with the pfn found and returns
 * the size of that range or zero if not found.
 */
static unsigned long __init xen_find_pfn_range(unsigned long *min_pfn)
{}

static int __init xen_free_mfn(unsigned long mfn)
{}

/*
 * This releases a chunk of memory and then does the identity map. It's used
 * as a fallback if the remapping fails.
 */
static void __init xen_set_identity_and_release_chunk(unsigned long start_pfn,
						      unsigned long end_pfn)
{}

/*
 * Helper function to update the p2m and m2p tables and kernel mapping.
 */
static void __init xen_update_mem_tables(unsigned long pfn, unsigned long mfn)
{}

/*
 * This function updates the p2m and m2p tables with an identity map from
 * start_pfn to start_pfn+size and prepares remapping the underlying RAM of the
 * original allocation at remap_pfn. The information needed for remapping is
 * saved in the memory itself to avoid the need for allocating buffers. The
 * complete remap information is contained in a list of MFNs each containing
 * up to REMAP_SIZE MFNs and the start target PFN for doing the remap.
 * This enables us to preserve the original mfn sequence while doing the
 * remapping at a time when the memory management is capable of allocating
 * virtual and physical memory in arbitrary amounts, see 'xen_remap_memory' and
 * its callers.
 */
static void __init xen_do_set_identity_and_remap_chunk(
        unsigned long start_pfn, unsigned long size, unsigned long remap_pfn)
{}

/*
 * This function takes a contiguous pfn range that needs to be identity mapped
 * and:
 *
 *  1) Finds a new range of pfns to use to remap based on E820 and remap_pfn.
 *  2) Calls the do_ function to actually do the mapping/remapping work.
 *
 * The goal is to not allocate additional memory but to remap the existing
 * pages. In the case of an error the underlying memory is simply released back
 * to Xen and not remapped.
 */
static unsigned long __init xen_set_identity_and_remap_chunk(
	unsigned long start_pfn, unsigned long end_pfn, unsigned long remap_pfn)
{}

static unsigned long __init xen_count_remap_pages(
	unsigned long start_pfn, unsigned long end_pfn,
	unsigned long remap_pages)
{}

static unsigned long __init xen_foreach_remap_area(
	unsigned long (*func)(unsigned long start_pfn, unsigned long end_pfn,
			      unsigned long last_val))
{}

/*
 * Remap the memory prepared in xen_do_set_identity_and_remap_chunk().
 * The remap information (which mfn remap to which pfn) is contained in the
 * to be remapped memory itself in a linked list anchored at xen_remap_mfn.
 * This scheme allows to remap the different chunks in arbitrary order while
 * the resulting mapping will be independent from the order.
 */
void __init xen_remap_memory(void)
{}

static unsigned long __init xen_get_pages_limit(void)
{}

static unsigned long __init xen_get_max_pages(void)
{}

static void __init xen_align_and_add_e820_region(phys_addr_t start,
						 phys_addr_t size, int type)
{}

static void __init xen_ignore_unusable(void)
{}

static bool __init xen_is_e820_reserved(phys_addr_t start, phys_addr_t size)
{}

/*
 * Find a free area in physical memory not yet reserved and compliant with
 * E820 map.
 * Used to relocate pre-allocated areas like initrd or p2m list which are in
 * conflict with the to be used E820 map.
 * In case no area is found, return 0. Otherwise return the physical address
 * of the area which is already reserved for convenience.
 */
phys_addr_t __init xen_find_free_area(phys_addr_t size)
{}

/*
 * Swap a non-RAM E820 map entry with RAM above ini_nr_pages.
 * Note that the E820 map is modified accordingly, but the P2M map isn't yet.
 * The adaption of the P2M must be deferred until page allocation is possible.
 */
static void __init xen_e820_swap_entry_with_ram(struct e820_entry *swap_entry)
{}

/*
 * Look for non-RAM memory types in a specific guest physical area and move
 * those away if possible (ACPI NVS only for now).
 */
static void __init xen_e820_resolve_conflicts(phys_addr_t start,
					      phys_addr_t size)
{}

/*
 * Check for an area in physical memory to be usable for non-movable purposes.
 * An area is considered to usable if the used E820 map lists it to be RAM or
 * some other type which can be moved to higher PFNs while keeping the MFNs.
 * In case the area is not usable, crash the system with an error message.
 */
void __init xen_chk_is_e820_usable(phys_addr_t start, phys_addr_t size,
				   const char *component)
{}

/*
 * Like memcpy, but with physical addresses for dest and src.
 */
static void __init xen_phys_memcpy(phys_addr_t dest, phys_addr_t src,
				   phys_addr_t n)
{}

/*
 * Reserve Xen mfn_list.
 */
static void __init xen_reserve_xen_mfnlist(void)
{}

/**
 * xen_memory_setup - Hook for machine specific memory setup.
 **/
char * __init xen_memory_setup(void)
{}

static int register_callback(unsigned type, const void *func)
{}

void xen_enable_sysenter(void)
{}

void xen_enable_syscall(void)
{}

static void __init xen_pvmmu_arch_setup(void)
{}

/* This function is not called for HVM domains */
void __init xen_arch_setup(void)
{}