linux/kernel/kexec_core.c

// SPDX-License-Identifier: GPL-2.0-only
/*
 * kexec.c - kexec system call core code.
 * Copyright (C) 2002-2004 Eric Biederman  <[email protected]>
 */

#define pr_fmt(fmt)

#include <linux/btf.h>
#include <linux/capability.h>
#include <linux/mm.h>
#include <linux/file.h>
#include <linux/slab.h>
#include <linux/fs.h>
#include <linux/kexec.h>
#include <linux/mutex.h>
#include <linux/list.h>
#include <linux/highmem.h>
#include <linux/syscalls.h>
#include <linux/reboot.h>
#include <linux/ioport.h>
#include <linux/hardirq.h>
#include <linux/elf.h>
#include <linux/elfcore.h>
#include <linux/utsname.h>
#include <linux/numa.h>
#include <linux/suspend.h>
#include <linux/device.h>
#include <linux/freezer.h>
#include <linux/panic_notifier.h>
#include <linux/pm.h>
#include <linux/cpu.h>
#include <linux/uaccess.h>
#include <linux/io.h>
#include <linux/console.h>
#include <linux/vmalloc.h>
#include <linux/swap.h>
#include <linux/syscore_ops.h>
#include <linux/compiler.h>
#include <linux/hugetlb.h>
#include <linux/objtool.h>
#include <linux/kmsg_dump.h>

#include <asm/page.h>
#include <asm/sections.h>

#include <crypto/hash.h>
#include "kexec_internal.h"

atomic_t __kexec_lock =;

/* Flag to indicate we are going to kexec a new kernel */
bool kexec_in_progress =;

bool kexec_file_dbg_print;

/*
 * When kexec transitions to the new kernel there is a one-to-one
 * mapping between physical and virtual addresses.  On processors
 * where you can disable the MMU this is trivial, and easy.  For
 * others it is still a simple predictable page table to setup.
 *
 * In that environment kexec copies the new kernel to its final
 * resting place.  This means I can only support memory whose
 * physical address can fit in an unsigned long.  In particular
 * addresses where (pfn << PAGE_SHIFT) > ULONG_MAX cannot be handled.
 * If the assembly stub has more restrictive requirements
 * KEXEC_SOURCE_MEMORY_LIMIT and KEXEC_DEST_MEMORY_LIMIT can be
 * defined more restrictively in <asm/kexec.h>.
 *
 * The code for the transition from the current kernel to the
 * new kernel is placed in the control_code_buffer, whose size
 * is given by KEXEC_CONTROL_PAGE_SIZE.  In the best case only a single
 * page of memory is necessary, but some architectures require more.
 * Because this memory must be identity mapped in the transition from
 * virtual to physical addresses it must live in the range
 * 0 - TASK_SIZE, as only the user space mappings are arbitrarily
 * modifiable.
 *
 * The assembly stub in the control code buffer is passed a linked list
 * of descriptor pages detailing the source pages of the new kernel,
 * and the destination addresses of those source pages.  As this data
 * structure is not used in the context of the current OS, it must
 * be self-contained.
 *
 * The code has been made to work with highmem pages and will use a
 * destination page in its final resting place (if it happens
 * to allocate it).  The end product of this is that most of the
 * physical address space, and most of RAM can be used.
 *
 * Future directions include:
 *  - allocating a page table with the control code buffer identity
 *    mapped, to simplify machine_kexec and make kexec_on_panic more
 *    reliable.
 */

/*
 * KIMAGE_NO_DEST is an impossible destination address..., for
 * allocating pages whose destination address we do not care about.
 */
#define KIMAGE_NO_DEST
#define PAGE_COUNT(x)

static struct page *kimage_alloc_page(struct kimage *image,
				       gfp_t gfp_mask,
				       unsigned long dest);

int sanity_check_segment_list(struct kimage *image)
{}

struct kimage *do_kimage_alloc_init(void)
{}

int kimage_is_destination_range(struct kimage *image,
					unsigned long start,
					unsigned long end)
{}

static struct page *kimage_alloc_pages(gfp_t gfp_mask, unsigned int order)
{}

static void kimage_free_pages(struct page *page)
{}

void kimage_free_page_list(struct list_head *list)
{}

static struct page *kimage_alloc_normal_control_pages(struct kimage *image,
							unsigned int order)
{}

#ifdef CONFIG_CRASH_DUMP
static struct page *kimage_alloc_crash_control_pages(struct kimage *image,
						      unsigned int order)
{}
#endif


struct page *kimage_alloc_control_pages(struct kimage *image,
					 unsigned int order)
{}

static int kimage_add_entry(struct kimage *image, kimage_entry_t entry)
{}

static int kimage_set_destination(struct kimage *image,
				   unsigned long destination)
{}


static int kimage_add_page(struct kimage *image, unsigned long page)
{}


static void kimage_free_extra_pages(struct kimage *image)
{}

void kimage_terminate(struct kimage *image)
{}

#define for_each_kimage_entry(image, ptr, entry)

static void kimage_free_entry(kimage_entry_t entry)
{}

void kimage_free(struct kimage *image)
{}

static kimage_entry_t *kimage_dst_used(struct kimage *image,
					unsigned long page)
{}

static struct page *kimage_alloc_page(struct kimage *image,
					gfp_t gfp_mask,
					unsigned long destination)
{}

static int kimage_load_normal_segment(struct kimage *image,
					 struct kexec_segment *segment)
{}

#ifdef CONFIG_CRASH_DUMP
static int kimage_load_crash_segment(struct kimage *image,
					struct kexec_segment *segment)
{}
#endif

int kimage_load_segment(struct kimage *image,
				struct kexec_segment *segment)
{}

struct kexec_load_limit {};

static struct kexec_load_limit load_limit_reboot =;

static struct kexec_load_limit load_limit_panic =;

struct kimage *kexec_image;
struct kimage *kexec_crash_image;
static int kexec_load_disabled;

#ifdef CONFIG_SYSCTL
static int kexec_limit_handler(const struct ctl_table *table, int write,
			       void *buffer, size_t *lenp, loff_t *ppos)
{}

static struct ctl_table kexec_core_sysctls[] =;

static int __init kexec_core_sysctl_init(void)
{}
late_initcall(kexec_core_sysctl_init);
#endif

bool kexec_load_permitted(int kexec_image_type)
{}

/*
 * Move into place and start executing a preloaded standalone
 * executable.  If nothing was preloaded return an error.
 */
int kernel_kexec(void)
{}