#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/kobject.h>
#include <linux/export.h>
#include <linux/memory.h>
#include <linux/notifier.h>
#include <linux/sched.h>
#include <linux/mman.h>
#include <linux/memblock.h>
#include <linux/page-isolation.h>
#include <linux/padata.h>
#include <linux/nmi.h>
#include <linux/buffer_head.h>
#include <linux/kmemleak.h>
#include <linux/kfence.h>
#include <linux/page_ext.h>
#include <linux/pti.h>
#include <linux/pgtable.h>
#include <linux/stackdepot.h>
#include <linux/swap.h>
#include <linux/cma.h>
#include <linux/crash_dump.h>
#include <linux/execmem.h>
#include <linux/vmstat.h>
#include "internal.h"
#include "slab.h"
#include "shuffle.h"
#include <asm/setup.h>
#ifdef CONFIG_DEBUG_MEMORY_INIT
int __meminitdata mminit_loglevel;
void __init mminit_verify_zonelist(void)
{ … }
void __init mminit_verify_pageflags_layout(void)
{ … }
static __init int set_mminit_loglevel(char *str)
{ … }
early_param(…);
#endif
struct kobject *mm_kobj;
#ifdef CONFIG_SMP
s32 vm_committed_as_batch = …;
void mm_compute_batch(int overcommit_policy)
{ … }
static int __meminit mm_compute_batch_notifier(struct notifier_block *self,
unsigned long action, void *arg)
{ … }
static int __init mm_compute_batch_init(void)
{ … }
__initcall(mm_compute_batch_init);
#endif
static int __init mm_sysfs_init(void)
{ … }
postcore_initcall(mm_sysfs_init);
static unsigned long arch_zone_lowest_possible_pfn[MAX_NR_ZONES] __initdata;
static unsigned long arch_zone_highest_possible_pfn[MAX_NR_ZONES] __initdata;
static unsigned long zone_movable_pfn[MAX_NUMNODES] __initdata;
static unsigned long required_kernelcore __initdata;
static unsigned long required_kernelcore_percent __initdata;
static unsigned long required_movablecore __initdata;
static unsigned long required_movablecore_percent __initdata;
static unsigned long nr_kernel_pages __initdata;
static unsigned long nr_all_pages __initdata;
static bool deferred_struct_pages __meminitdata;
static DEFINE_PER_CPU(struct per_cpu_nodestat, boot_nodestats);
static int __init cmdline_parse_core(char *p, unsigned long *core,
unsigned long *percent)
{ … }
bool mirrored_kernelcore __initdata_memblock;
static int __init cmdline_parse_kernelcore(char *p)
{ … }
early_param(…);
static int __init cmdline_parse_movablecore(char *p)
{ … }
early_param(…);
static unsigned long __init early_calculate_totalpages(void)
{ … }
static void __init find_usable_zone_for_movable(void)
{ … }
static void __init find_zone_movable_pfns_for_nodes(void)
{ … }
void __meminit __init_single_page(struct page *page, unsigned long pfn,
unsigned long zone, int nid)
{ … }
#ifdef CONFIG_NUMA
struct mminit_pfnnid_cache { … };
static struct mminit_pfnnid_cache early_pfnnid_cache __meminitdata;
static int __meminit __early_pfn_to_nid(unsigned long pfn,
struct mminit_pfnnid_cache *state)
{ … }
int __meminit early_pfn_to_nid(unsigned long pfn)
{ … }
int hashdist = … HASHDIST_DEFAULT;
static int __init set_hashdist(char *str)
{ … }
__setup(…);
static inline void fixup_hashdist(void)
{ … }
#else
static inline void fixup_hashdist(void) {}
#endif
#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
static inline void pgdat_set_deferred_range(pg_data_t *pgdat)
{ … }
static inline bool __meminit early_page_initialised(unsigned long pfn, int nid)
{ … }
static bool __meminit
defer_init(int nid, unsigned long pfn, unsigned long end_pfn)
{ … }
static void __meminit init_reserved_page(unsigned long pfn, int nid)
{ … }
#else
static inline void pgdat_set_deferred_range(pg_data_t *pgdat) {}
static inline bool early_page_initialised(unsigned long pfn, int nid)
{
return true;
}
static inline bool defer_init(int nid, unsigned long pfn, unsigned long end_pfn)
{
return false;
}
static inline void init_reserved_page(unsigned long pfn, int nid)
{
}
#endif
void __meminit reserve_bootmem_region(phys_addr_t start,
phys_addr_t end, int nid)
{ … }
static bool __meminit
overlap_memmap_init(unsigned long zone, unsigned long *pfn)
{ … }
static void __init init_unavailable_range(unsigned long spfn,
unsigned long epfn,
int zone, int node)
{ … }
void __meminit memmap_init_range(unsigned long size, int nid, unsigned long zone,
unsigned long start_pfn, unsigned long zone_end_pfn,
enum meminit_context context,
struct vmem_altmap *altmap, int migratetype)
{ … }
static void __init memmap_init_zone_range(struct zone *zone,
unsigned long start_pfn,
unsigned long end_pfn,
unsigned long *hole_pfn)
{ … }
static void __init memmap_init(void)
{ … }
#ifdef CONFIG_ZONE_DEVICE
static void __ref __init_zone_device_page(struct page *page, unsigned long pfn,
unsigned long zone_idx, int nid,
struct dev_pagemap *pgmap)
{ … }
static inline unsigned long compound_nr_pages(struct vmem_altmap *altmap,
struct dev_pagemap *pgmap)
{ … }
static void __ref memmap_init_compound(struct page *head,
unsigned long head_pfn,
unsigned long zone_idx, int nid,
struct dev_pagemap *pgmap,
unsigned long nr_pages)
{ … }
void __ref memmap_init_zone_device(struct zone *zone,
unsigned long start_pfn,
unsigned long nr_pages,
struct dev_pagemap *pgmap)
{ … }
#endif
static void __init adjust_zone_range_for_zone_movable(int nid,
unsigned long zone_type,
unsigned long node_end_pfn,
unsigned long *zone_start_pfn,
unsigned long *zone_end_pfn)
{ … }
static unsigned long __init __absent_pages_in_range(int nid,
unsigned long range_start_pfn,
unsigned long range_end_pfn)
{ … }
unsigned long __init absent_pages_in_range(unsigned long start_pfn,
unsigned long end_pfn)
{ … }
static unsigned long __init zone_absent_pages_in_node(int nid,
unsigned long zone_type,
unsigned long zone_start_pfn,
unsigned long zone_end_pfn)
{ … }
static unsigned long __init zone_spanned_pages_in_node(int nid,
unsigned long zone_type,
unsigned long node_start_pfn,
unsigned long node_end_pfn,
unsigned long *zone_start_pfn,
unsigned long *zone_end_pfn)
{ … }
static void __init reset_memoryless_node_totalpages(struct pglist_data *pgdat)
{ … }
static void __init calc_nr_kernel_pages(void)
{ … }
static void __init calculate_node_totalpages(struct pglist_data *pgdat,
unsigned long node_start_pfn,
unsigned long node_end_pfn)
{ … }
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
static void pgdat_init_split_queue(struct pglist_data *pgdat)
{ … }
#else
static void pgdat_init_split_queue(struct pglist_data *pgdat) {}
#endif
#ifdef CONFIG_COMPACTION
static void pgdat_init_kcompactd(struct pglist_data *pgdat)
{ … }
#else
static void pgdat_init_kcompactd(struct pglist_data *pgdat) {}
#endif
static void __meminit pgdat_init_internals(struct pglist_data *pgdat)
{ … }
static void __meminit zone_init_internals(struct zone *zone, enum zone_type idx, int nid,
unsigned long remaining_pages)
{ … }
static void __meminit zone_init_free_lists(struct zone *zone)
{ … }
void __meminit init_currently_empty_zone(struct zone *zone,
unsigned long zone_start_pfn,
unsigned long size)
{ … }
#ifndef CONFIG_SPARSEMEM
static unsigned long __init usemap_size(unsigned long zone_start_pfn, unsigned long zonesize)
{
unsigned long usemapsize;
zonesize += zone_start_pfn & (pageblock_nr_pages-1);
usemapsize = roundup(zonesize, pageblock_nr_pages);
usemapsize = usemapsize >> pageblock_order;
usemapsize *= NR_PAGEBLOCK_BITS;
usemapsize = roundup(usemapsize, BITS_PER_LONG);
return usemapsize / BITS_PER_BYTE;
}
static void __ref setup_usemap(struct zone *zone)
{
unsigned long usemapsize = usemap_size(zone->zone_start_pfn,
zone->spanned_pages);
zone->pageblock_flags = NULL;
if (usemapsize) {
zone->pageblock_flags =
memblock_alloc_node(usemapsize, SMP_CACHE_BYTES,
zone_to_nid(zone));
if (!zone->pageblock_flags)
panic("Failed to allocate %ld bytes for zone %s pageblock flags on node %d\n",
usemapsize, zone->name, zone_to_nid(zone));
}
}
#else
static inline void setup_usemap(struct zone *zone) { … }
#endif
#ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE
void __init set_pageblock_order(void)
{
unsigned int order = MAX_PAGE_ORDER;
if (pageblock_order)
return;
if (HPAGE_SHIFT > PAGE_SHIFT && HUGETLB_PAGE_ORDER < order)
order = HUGETLB_PAGE_ORDER;
pageblock_order = order;
}
#else
void __init set_pageblock_order(void)
{ … }
#endif
#ifdef CONFIG_MEMORY_HOTPLUG
void __ref free_area_init_core_hotplug(struct pglist_data *pgdat)
{ … }
#endif
static void __init free_area_init_core(struct pglist_data *pgdat)
{ … }
void __init *memmap_alloc(phys_addr_t size, phys_addr_t align,
phys_addr_t min_addr, int nid, bool exact_nid)
{ … }
#ifdef CONFIG_FLATMEM
static void __init alloc_node_mem_map(struct pglist_data *pgdat)
{
unsigned long start, offset, size, end;
struct page *map;
if (!pgdat->node_spanned_pages)
return;
start = pgdat->node_start_pfn & ~(MAX_ORDER_NR_PAGES - 1);
offset = pgdat->node_start_pfn - start;
end = ALIGN(pgdat_end_pfn(pgdat), MAX_ORDER_NR_PAGES);
size = (end - start) * sizeof(struct page);
map = memmap_alloc(size, SMP_CACHE_BYTES, MEMBLOCK_LOW_LIMIT,
pgdat->node_id, false);
if (!map)
panic("Failed to allocate %ld bytes for node %d memory map\n",
size, pgdat->node_id);
pgdat->node_mem_map = map + offset;
mod_node_early_perpage_metadata(pgdat->node_id,
DIV_ROUND_UP(size, PAGE_SIZE));
pr_debug("%s: node %d, pgdat %08lx, node_mem_map %08lx\n",
__func__, pgdat->node_id, (unsigned long)pgdat,
(unsigned long)pgdat->node_mem_map);
#ifndef CONFIG_NUMA
if (pgdat == NODE_DATA(0)) {
mem_map = NODE_DATA(0)->node_mem_map;
if (page_to_pfn(mem_map) != pgdat->node_start_pfn)
mem_map -= offset;
}
#endif
}
#else
static inline void alloc_node_mem_map(struct pglist_data *pgdat) { … }
#endif
void __init get_pfn_range_for_nid(unsigned int nid,
unsigned long *start_pfn, unsigned long *end_pfn)
{ … }
static void __init free_area_init_node(int nid)
{ … }
static void __init check_for_memory(pg_data_t *pgdat)
{ … }
#if MAX_NUMNODES > 1
void __init setup_nr_node_ids(void)
{ … }
#endif
static bool arch_has_descending_max_zone_pfns(void)
{ … }
void __init free_area_init(unsigned long *max_zone_pfn)
{ … }
unsigned long __init node_map_pfn_alignment(void)
{ … }
#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
static void __init deferred_free_pages(unsigned long pfn,
unsigned long nr_pages)
{ … }
static atomic_t pgdat_init_n_undone __initdata;
static __initdata DECLARE_COMPLETION(pgdat_init_all_done_comp);
static inline void __init pgdat_init_report_one_done(void)
{ … }
static unsigned long __init deferred_init_pages(struct zone *zone,
unsigned long pfn, unsigned long end_pfn)
{ … }
static bool __init
deferred_init_mem_pfn_range_in_zone(u64 *i, struct zone *zone,
unsigned long *spfn, unsigned long *epfn,
unsigned long first_init_pfn)
{ … }
static unsigned long __init
deferred_init_maxorder(u64 *i, struct zone *zone, unsigned long *start_pfn,
unsigned long *end_pfn)
{ … }
static void __init
deferred_init_memmap_chunk(unsigned long start_pfn, unsigned long end_pfn,
void *arg)
{ … }
static unsigned int __init
deferred_page_init_max_threads(const struct cpumask *node_cpumask)
{ … }
static int __init deferred_init_memmap(void *data)
{ … }
bool __init deferred_grow_zone(struct zone *zone, unsigned int order)
{ … }
#endif
#ifdef CONFIG_CMA
void __init init_cma_reserved_pageblock(struct page *page)
{ … }
#endif
void set_zone_contiguous(struct zone *zone)
{ … }
static void __init mem_init_print_info(void);
void __init page_alloc_init_late(void)
{ … }
#if __BITS_PER_LONG > 32
#define ADAPT_SCALE_BASE …
#define ADAPT_SCALE_SHIFT …
#define ADAPT_SCALE_NPAGES …
#endif
void *__init alloc_large_system_hash(const char *tablename,
unsigned long bucketsize,
unsigned long numentries,
int scale,
int flags,
unsigned int *_hash_shift,
unsigned int *_hash_mask,
unsigned long low_limit,
unsigned long high_limit)
{ … }
void __init memblock_free_pages(struct page *page, unsigned long pfn,
unsigned int order)
{ … }
DEFINE_STATIC_KEY_MAYBE(CONFIG_INIT_ON_ALLOC_DEFAULT_ON, init_on_alloc);
EXPORT_SYMBOL(…);
DEFINE_STATIC_KEY_MAYBE(CONFIG_INIT_ON_FREE_DEFAULT_ON, init_on_free);
EXPORT_SYMBOL(…);
static bool _init_on_alloc_enabled_early __read_mostly
= … IS_ENABLED(…);
static int __init early_init_on_alloc(char *buf)
{ … }
early_param(…);
static bool _init_on_free_enabled_early __read_mostly
= … IS_ENABLED(…);
static int __init early_init_on_free(char *buf)
{ … }
early_param(…);
DEFINE_STATIC_KEY_MAYBE(CONFIG_DEBUG_VM, check_pages_enabled);
static void __init mem_debugging_and_hardening_init(void)
{ … }
static void __init report_meminit(void)
{ … }
static void __init mem_init_print_info(void)
{ … }
void __init mm_core_init(void)
{ … }