#include <linux/mm.h>
#include <linux/slab.h>
#include <linux/mmzone.h>
#include <linux/memblock.h>
#include <linux/compiler.h>
#include <linux/highmem.h>
#include <linux/export.h>
#include <linux/spinlock.h>
#include <linux/vmalloc.h>
#include <linux/swap.h>
#include <linux/swapops.h>
#include <linux/bootmem_info.h>
#include <linux/vmstat.h>
#include "internal.h"
#include <asm/dma.h>
#ifdef CONFIG_SPARSEMEM_EXTREME
struct mem_section **mem_section;
#else
struct mem_section mem_section[NR_SECTION_ROOTS][SECTIONS_PER_ROOT]
____cacheline_internodealigned_in_smp;
#endif
EXPORT_SYMBOL(…);
#ifdef NODE_NOT_IN_PAGE_FLAGS
#if MAX_NUMNODES <= 256
static u8 section_to_node_table[NR_MEM_SECTIONS] __cacheline_aligned;
#else
static u16 section_to_node_table[NR_MEM_SECTIONS] __cacheline_aligned;
#endif
int page_to_nid(const struct page *page)
{
return section_to_node_table[page_to_section(page)];
}
EXPORT_SYMBOL(page_to_nid);
static void set_section_nid(unsigned long section_nr, int nid)
{
section_to_node_table[section_nr] = nid;
}
#else
static inline void set_section_nid(unsigned long section_nr, int nid)
{ … }
#endif
#ifdef CONFIG_SPARSEMEM_EXTREME
static noinline struct mem_section __ref *sparse_index_alloc(int nid)
{ … }
static int __meminit sparse_index_init(unsigned long section_nr, int nid)
{ … }
#else
static inline int sparse_index_init(unsigned long section_nr, int nid)
{
return 0;
}
#endif
static inline unsigned long sparse_encode_early_nid(int nid)
{ … }
static inline int sparse_early_nid(struct mem_section *section)
{ … }
static void __meminit mminit_validate_memmodel_limits(unsigned long *start_pfn,
unsigned long *end_pfn)
{ … }
unsigned long __highest_present_section_nr;
static void __section_mark_present(struct mem_section *ms,
unsigned long section_nr)
{ … }
#define for_each_present_section_nr(start, section_nr) …
static inline unsigned long first_present_section_nr(void)
{ … }
#ifdef CONFIG_SPARSEMEM_VMEMMAP
static void subsection_mask_set(unsigned long *map, unsigned long pfn,
unsigned long nr_pages)
{ … }
void __init subsection_map_init(unsigned long pfn, unsigned long nr_pages)
{ … }
#else
void __init subsection_map_init(unsigned long pfn, unsigned long nr_pages)
{
}
#endif
static void __init memory_present(int nid, unsigned long start, unsigned long end)
{ … }
static void __init memblocks_present(void)
{ … }
static unsigned long sparse_encode_mem_map(struct page *mem_map, unsigned long pnum)
{ … }
#ifdef CONFIG_MEMORY_HOTPLUG
struct page *sparse_decode_mem_map(unsigned long coded_mem_map, unsigned long pnum)
{ … }
#endif
static void __meminit sparse_init_one_section(struct mem_section *ms,
unsigned long pnum, struct page *mem_map,
struct mem_section_usage *usage, unsigned long flags)
{ … }
static unsigned long usemap_size(void)
{ … }
size_t mem_section_usage_size(void)
{ … }
#ifdef CONFIG_MEMORY_HOTREMOVE
static inline phys_addr_t pgdat_to_phys(struct pglist_data *pgdat)
{ … }
static struct mem_section_usage * __init
sparse_early_usemaps_alloc_pgdat_section(struct pglist_data *pgdat,
unsigned long size)
{ … }
static void __init check_usemap_section_nr(int nid,
struct mem_section_usage *usage)
{ … }
#else
static struct mem_section_usage * __init
sparse_early_usemaps_alloc_pgdat_section(struct pglist_data *pgdat,
unsigned long size)
{
return memblock_alloc_node(size, SMP_CACHE_BYTES, pgdat->node_id);
}
static void __init check_usemap_section_nr(int nid,
struct mem_section_usage *usage)
{
}
#endif
#ifdef CONFIG_SPARSEMEM_VMEMMAP
static unsigned long __init section_map_size(void)
{ … }
#else
static unsigned long __init section_map_size(void)
{
return PAGE_ALIGN(sizeof(struct page) * PAGES_PER_SECTION);
}
struct page __init *__populate_section_memmap(unsigned long pfn,
unsigned long nr_pages, int nid, struct vmem_altmap *altmap,
struct dev_pagemap *pgmap)
{
unsigned long size = section_map_size();
struct page *map = sparse_buffer_alloc(size);
phys_addr_t addr = __pa(MAX_DMA_ADDRESS);
if (map)
return map;
map = memmap_alloc(size, size, addr, nid, false);
if (!map)
panic("%s: Failed to allocate %lu bytes align=0x%lx nid=%d from=%pa\n",
__func__, size, PAGE_SIZE, nid, &addr);
return map;
}
#endif
static void *sparsemap_buf __meminitdata;
static void *sparsemap_buf_end __meminitdata;
static inline void __meminit sparse_buffer_free(unsigned long size)
{ … }
static void __init sparse_buffer_init(unsigned long size, int nid)
{ … }
static void __init sparse_buffer_fini(void)
{ … }
void * __meminit sparse_buffer_alloc(unsigned long size)
{ … }
void __weak __meminit vmemmap_populate_print_last(void)
{ … }
static void __init sparse_init_nid(int nid, unsigned long pnum_begin,
unsigned long pnum_end,
unsigned long map_count)
{ … }
void __init sparse_init(void)
{ … }
#ifdef CONFIG_MEMORY_HOTPLUG
void online_mem_sections(unsigned long start_pfn, unsigned long end_pfn)
{ … }
void offline_mem_sections(unsigned long start_pfn, unsigned long end_pfn)
{ … }
#ifdef CONFIG_SPARSEMEM_VMEMMAP
static struct page * __meminit populate_section_memmap(unsigned long pfn,
unsigned long nr_pages, int nid, struct vmem_altmap *altmap,
struct dev_pagemap *pgmap)
{ … }
static void depopulate_section_memmap(unsigned long pfn, unsigned long nr_pages,
struct vmem_altmap *altmap)
{ … }
static void free_map_bootmem(struct page *memmap)
{ … }
static int clear_subsection_map(unsigned long pfn, unsigned long nr_pages)
{ … }
static bool is_subsection_map_empty(struct mem_section *ms)
{ … }
static int fill_subsection_map(unsigned long pfn, unsigned long nr_pages)
{ … }
#else
static struct page * __meminit populate_section_memmap(unsigned long pfn,
unsigned long nr_pages, int nid, struct vmem_altmap *altmap,
struct dev_pagemap *pgmap)
{
return kvmalloc_node(array_size(sizeof(struct page),
PAGES_PER_SECTION), GFP_KERNEL, nid);
}
static void depopulate_section_memmap(unsigned long pfn, unsigned long nr_pages,
struct vmem_altmap *altmap)
{
kvfree(pfn_to_page(pfn));
}
static void free_map_bootmem(struct page *memmap)
{
unsigned long maps_section_nr, removing_section_nr, i;
unsigned long magic, nr_pages;
struct page *page = virt_to_page(memmap);
nr_pages = PAGE_ALIGN(PAGES_PER_SECTION * sizeof(struct page))
>> PAGE_SHIFT;
for (i = 0; i < nr_pages; i++, page++) {
magic = page->index;
BUG_ON(magic == NODE_INFO);
maps_section_nr = pfn_to_section_nr(page_to_pfn(page));
removing_section_nr = page_private(page);
if (maps_section_nr != removing_section_nr)
put_page_bootmem(page);
}
}
static int clear_subsection_map(unsigned long pfn, unsigned long nr_pages)
{
return 0;
}
static bool is_subsection_map_empty(struct mem_section *ms)
{
return true;
}
static int fill_subsection_map(unsigned long pfn, unsigned long nr_pages)
{
return 0;
}
#endif
static void section_deactivate(unsigned long pfn, unsigned long nr_pages,
struct vmem_altmap *altmap)
{ … }
static struct page * __meminit section_activate(int nid, unsigned long pfn,
unsigned long nr_pages, struct vmem_altmap *altmap,
struct dev_pagemap *pgmap)
{ … }
int __meminit sparse_add_section(int nid, unsigned long start_pfn,
unsigned long nr_pages, struct vmem_altmap *altmap,
struct dev_pagemap *pgmap)
{ … }
void sparse_remove_section(unsigned long pfn, unsigned long nr_pages,
struct vmem_altmap *altmap)
{ … }
#endif