linux/arch/x86/mm/init_64.c

// SPDX-License-Identifier: GPL-2.0-only
/*
 *  linux/arch/x86_64/mm/init.c
 *
 *  Copyright (C) 1995  Linus Torvalds
 *  Copyright (C) 2000  Pavel Machek <[email protected]>
 *  Copyright (C) 2002,2003 Andi Kleen <[email protected]>
 */

#include <linux/signal.h>
#include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/string.h>
#include <linux/types.h>
#include <linux/ptrace.h>
#include <linux/mman.h>
#include <linux/mm.h>
#include <linux/swap.h>
#include <linux/smp.h>
#include <linux/init.h>
#include <linux/initrd.h>
#include <linux/pagemap.h>
#include <linux/memblock.h>
#include <linux/proc_fs.h>
#include <linux/pci.h>
#include <linux/pfn.h>
#include <linux/poison.h>
#include <linux/dma-mapping.h>
#include <linux/memory.h>
#include <linux/memory_hotplug.h>
#include <linux/memremap.h>
#include <linux/nmi.h>
#include <linux/gfp.h>
#include <linux/kcore.h>
#include <linux/bootmem_info.h>

#include <asm/processor.h>
#include <asm/bios_ebda.h>
#include <linux/uaccess.h>
#include <asm/pgalloc.h>
#include <asm/dma.h>
#include <asm/fixmap.h>
#include <asm/e820/api.h>
#include <asm/apic.h>
#include <asm/tlb.h>
#include <asm/mmu_context.h>
#include <asm/proto.h>
#include <asm/smp.h>
#include <asm/sections.h>
#include <asm/kdebug.h>
#include <asm/numa.h>
#include <asm/set_memory.h>
#include <asm/init.h>
#include <asm/uv/uv.h>
#include <asm/setup.h>
#include <asm/ftrace.h>

#include "mm_internal.h"

#include "ident_map.c"

#define DEFINE_POPULATE(fname, type1, type2, init)

DEFINE_POPULATE(p4d_populate, p4d, pud, init)
DEFINE_POPULATE(pgd_populate, pgd, p4d, init)
DEFINE_POPULATE(pud_populate, pud, pmd, init)
DEFINE_POPULATE(pmd_populate_kernel, pmd, pte, init)

#define DEFINE_ENTRY(type1, type2, init)

DEFINE_ENTRY(p4d, p4d, init)
DEFINE_ENTRY(pud, pud, init)
DEFINE_ENTRY(pmd, pmd, init)
DEFINE_ENTRY(pte, pte, init)

static inline pgprot_t prot_sethuge(pgprot_t prot)
{}

/*
 * NOTE: pagetable_init alloc all the fixmap pagetables contiguous on the
 * physical space so we can cache the place of the first one and move
 * around without checking the pgd every time.
 */

/* Bits supported by the hardware: */
pteval_t __supported_pte_mask __read_mostly =;
/* Bits allowed in normal kernel mappings: */
pteval_t __default_kernel_pte_mask __read_mostly =;
EXPORT_SYMBOL_GPL();
/* Used in PAGE_KERNEL_* macros which are reasonably used out-of-tree: */
EXPORT_SYMBOL();

int force_personality32;

/*
 * noexec32=on|off
 * Control non executable heap for 32bit processes.
 *
 * on	PROT_READ does not imply PROT_EXEC for 32-bit processes (default)
 * off	PROT_READ implies PROT_EXEC
 */
static int __init nonx32_setup(char *str)
{}
__setup();

static void sync_global_pgds_l5(unsigned long start, unsigned long end)
{}

static void sync_global_pgds_l4(unsigned long start, unsigned long end)
{}

/*
 * When memory was added make sure all the processes MM have
 * suitable PGD entries in the local PGD level page.
 */
static void sync_global_pgds(unsigned long start, unsigned long end)
{}

/*
 * NOTE: This function is marked __ref because it calls __init function
 * (alloc_bootmem_pages). It's safe to do it ONLY when after_bootmem == 0.
 */
static __ref void *spp_getpage(void)
{}

static p4d_t *fill_p4d(pgd_t *pgd, unsigned long vaddr)
{}

static pud_t *fill_pud(p4d_t *p4d, unsigned long vaddr)
{}

static pmd_t *fill_pmd(pud_t *pud, unsigned long vaddr)
{}

static pte_t *fill_pte(pmd_t *pmd, unsigned long vaddr)
{}

static void __set_pte_vaddr(pud_t *pud, unsigned long vaddr, pte_t new_pte)
{}

void set_pte_vaddr_p4d(p4d_t *p4d_page, unsigned long vaddr, pte_t new_pte)
{}

void set_pte_vaddr_pud(pud_t *pud_page, unsigned long vaddr, pte_t new_pte)
{}

void set_pte_vaddr(unsigned long vaddr, pte_t pteval)
{}

pmd_t * __init populate_extra_pmd(unsigned long vaddr)
{}

pte_t * __init populate_extra_pte(unsigned long vaddr)
{}

/*
 * Create large page table mappings for a range of physical addresses.
 */
static void __init __init_extra_mapping(unsigned long phys, unsigned long size,
					enum page_cache_mode cache)
{}

void __init init_extra_mapping_wb(unsigned long phys, unsigned long size)
{}

void __init init_extra_mapping_uc(unsigned long phys, unsigned long size)
{}

/*
 * The head.S code sets up the kernel high mapping:
 *
 *   from __START_KERNEL_map to __START_KERNEL_map + size (== _end-_text)
 *
 * phys_base holds the negative offset to the kernel, which is added
 * to the compile time generated pmds. This results in invalid pmds up
 * to the point where we hit the physaddr 0 mapping.
 *
 * We limit the mappings to the region from _text to _brk_end.  _brk_end
 * is rounded up to the 2MB boundary. This catches the invalid pmds as
 * well, as they are located before _text:
 */
void __init cleanup_highmap(void)
{}

/*
 * Create PTE level page table mapping for physical addresses.
 * It returns the last physical address mapped.
 */
static unsigned long __meminit
phys_pte_init(pte_t *pte_page, unsigned long paddr, unsigned long paddr_end,
	      pgprot_t prot, bool init)
{}

/*
 * Create PMD level page table mapping for physical addresses. The virtual
 * and physical address have to be aligned at this level.
 * It returns the last physical address mapped.
 */
static unsigned long __meminit
phys_pmd_init(pmd_t *pmd_page, unsigned long paddr, unsigned long paddr_end,
	      unsigned long page_size_mask, pgprot_t prot, bool init)
{}

/*
 * Create PUD level page table mapping for physical addresses. The virtual
 * and physical address do not have to be aligned at this level. KASLR can
 * randomize virtual addresses up to this level.
 * It returns the last physical address mapped.
 */
static unsigned long __meminit
phys_pud_init(pud_t *pud_page, unsigned long paddr, unsigned long paddr_end,
	      unsigned long page_size_mask, pgprot_t _prot, bool init)
{}

static unsigned long __meminit
phys_p4d_init(p4d_t *p4d_page, unsigned long paddr, unsigned long paddr_end,
	      unsigned long page_size_mask, pgprot_t prot, bool init)
{}

static unsigned long __meminit
__kernel_physical_mapping_init(unsigned long paddr_start,
			       unsigned long paddr_end,
			       unsigned long page_size_mask,
			       pgprot_t prot, bool init)
{}


/*
 * Create page table mapping for the physical memory for specific physical
 * addresses. Note that it can only be used to populate non-present entries.
 * The virtual and physical addresses have to be aligned on PMD level
 * down. It returns the last physical address mapped.
 */
unsigned long __meminit
kernel_physical_mapping_init(unsigned long paddr_start,
			     unsigned long paddr_end,
			     unsigned long page_size_mask, pgprot_t prot)
{}

/*
 * This function is similar to kernel_physical_mapping_init() above with the
 * exception that it uses set_{pud,pmd}() instead of the set_{pud,pte}_safe()
 * when updating the mapping. The caller is responsible to flush the TLBs after
 * the function returns.
 */
unsigned long __meminit
kernel_physical_mapping_change(unsigned long paddr_start,
			       unsigned long paddr_end,
			       unsigned long page_size_mask)
{}

#ifndef CONFIG_NUMA
void __init initmem_init(void)
{
	memblock_set_node(0, PHYS_ADDR_MAX, &memblock.memory, 0);
}
#endif

void __init paging_init(void)
{}

#ifdef CONFIG_SPARSEMEM_VMEMMAP
#define PAGE_UNUSED

/*
 * The unused vmemmap range, which was not yet memset(PAGE_UNUSED), ranges
 * from unused_pmd_start to next PMD_SIZE boundary.
 */
static unsigned long unused_pmd_start __meminitdata;

static void __meminit vmemmap_flush_unused_pmd(void)
{}

#ifdef CONFIG_MEMORY_HOTPLUG
/* Returns true if the PMD is completely unused and thus it can be freed */
static bool __meminit vmemmap_pmd_is_unused(unsigned long addr, unsigned long end)
{}
#endif

static void __meminit __vmemmap_use_sub_pmd(unsigned long start)
{}

static void __meminit vmemmap_use_sub_pmd(unsigned long start, unsigned long end)
{}


static void __meminit vmemmap_use_new_sub_pmd(unsigned long start, unsigned long end)
{}
#endif

/*
 * Memory hotplug specific functions
 */
#ifdef CONFIG_MEMORY_HOTPLUG
/*
 * After memory hotplug the variables max_pfn, max_low_pfn and high_memory need
 * updating.
 */
static void update_end_of_memory_vars(u64 start, u64 size)
{}

int add_pages(int nid, unsigned long start_pfn, unsigned long nr_pages,
	      struct mhp_params *params)
{}

int arch_add_memory(int nid, u64 start, u64 size,
		    struct mhp_params *params)
{}

static void __meminit free_pagetable(struct page *page, int order)
{}

static void __meminit free_hugepage_table(struct page *page,
		struct vmem_altmap *altmap)
{}

static void __meminit free_pte_table(pte_t *pte_start, pmd_t *pmd)
{}

static void __meminit free_pmd_table(pmd_t *pmd_start, pud_t *pud)
{}

static void __meminit free_pud_table(pud_t *pud_start, p4d_t *p4d)
{}

static void __meminit
remove_pte_table(pte_t *pte_start, unsigned long addr, unsigned long end,
		 bool direct)
{}

static void __meminit
remove_pmd_table(pmd_t *pmd_start, unsigned long addr, unsigned long end,
		 bool direct, struct vmem_altmap *altmap)
{}

static void __meminit
remove_pud_table(pud_t *pud_start, unsigned long addr, unsigned long end,
		 struct vmem_altmap *altmap, bool direct)
{}

static void __meminit
remove_p4d_table(p4d_t *p4d_start, unsigned long addr, unsigned long end,
		 struct vmem_altmap *altmap, bool direct)
{}

/* start and end are both virtual address. */
static void __meminit
remove_pagetable(unsigned long start, unsigned long end, bool direct,
		struct vmem_altmap *altmap)
{}

void __ref vmemmap_free(unsigned long start, unsigned long end,
		struct vmem_altmap *altmap)
{}

static void __meminit
kernel_physical_mapping_remove(unsigned long start, unsigned long end)
{}

void __ref arch_remove_memory(u64 start, u64 size, struct vmem_altmap *altmap)
{}
#endif /* CONFIG_MEMORY_HOTPLUG */

static struct kcore_list kcore_vsyscall;

static void __init register_page_bootmem_info(void)
{}

/*
 * Pre-allocates page-table pages for the vmalloc area in the kernel page-table.
 * Only the level which needs to be synchronized between all page-tables is
 * allocated because the synchronization can be expensive.
 */
static void __init preallocate_vmalloc_pages(void)
{}

void __init mem_init(void)
{}

int kernel_set_to_readonly;

void mark_rodata_ro(void)
{}

/*
 * Block size is the minimum amount of memory which can be hotplugged or
 * hotremoved. It must be power of two and must be equal or larger than
 * MIN_MEMORY_BLOCK_SIZE.
 */
#define MAX_BLOCK_SIZE

/* Amount of ram needed to start using large blocks */
#define MEM_SIZE_FOR_LARGE_BLOCK

/* Adjustable memory block size */
static unsigned long set_memory_block_size;
int __init set_memory_block_size_order(unsigned int order)
{}

static unsigned long probe_memory_block_size(void)
{}

static unsigned long memory_block_size_probed;
unsigned long memory_block_size_bytes(void)
{}

#ifdef CONFIG_SPARSEMEM_VMEMMAP
/*
 * Initialise the sparsemem vmemmap using huge-pages at the PMD level.
 */
static long __meminitdata addr_start, addr_end;
static void __meminitdata *p_start, *p_end;
static int __meminitdata node_start;

void __meminit vmemmap_set_pmd(pmd_t *pmd, void *p, int node,
			       unsigned long addr, unsigned long next)
{}

int __meminit vmemmap_check_pmd(pmd_t *pmd, int node,
				unsigned long addr, unsigned long next)
{}

int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node,
		struct vmem_altmap *altmap)
{}

#ifdef CONFIG_HAVE_BOOTMEM_INFO_NODE
void register_page_bootmem_memmap(unsigned long section_nr,
				  struct page *start_page, unsigned long nr_pages)
{}
#endif

void __meminit vmemmap_populate_print_last(void)
{}
#endif