linux/mm/memremap.c

// SPDX-License-Identifier: GPL-2.0
/* Copyright(c) 2015 Intel Corporation. All rights reserved. */
#include <linux/device.h>
#include <linux/io.h>
#include <linux/kasan.h>
#include <linux/memory_hotplug.h>
#include <linux/memremap.h>
#include <linux/pfn_t.h>
#include <linux/swap.h>
#include <linux/mm.h>
#include <linux/mmzone.h>
#include <linux/swapops.h>
#include <linux/types.h>
#include <linux/wait_bit.h>
#include <linux/xarray.h>
#include "internal.h"

static DEFINE_XARRAY(pgmap_array);

/*
 * The memremap() and memremap_pages() interfaces are alternately used
 * to map persistent memory namespaces. These interfaces place different
 * constraints on the alignment and size of the mapping (namespace).
 * memremap() can map individual PAGE_SIZE pages. memremap_pages() can
 * only map subsections (2MB), and at least one architecture (PowerPC)
 * the minimum mapping granularity of memremap_pages() is 16MB.
 *
 * The role of memremap_compat_align() is to communicate the minimum
 * arch supported alignment of a namespace such that it can freely
 * switch modes without violating the arch constraint. Namely, do not
 * allow a namespace to be PAGE_SIZE aligned since that namespace may be
 * reconfigured into a mode that requires SUBSECTION_SIZE alignment.
 */
#ifndef CONFIG_ARCH_HAS_MEMREMAP_COMPAT_ALIGN
unsigned long memremap_compat_align(void)
{}
EXPORT_SYMBOL_GPL();
#endif

#ifdef CONFIG_FS_DAX
DEFINE_STATIC_KEY_FALSE(devmap_managed_key);
EXPORT_SYMBOL();

static void devmap_managed_enable_put(struct dev_pagemap *pgmap)
{}

static void devmap_managed_enable_get(struct dev_pagemap *pgmap)
{}
#else
static void devmap_managed_enable_get(struct dev_pagemap *pgmap)
{
}
static void devmap_managed_enable_put(struct dev_pagemap *pgmap)
{
}
#endif /* CONFIG_FS_DAX */

static void pgmap_array_delete(struct range *range)
{}

static unsigned long pfn_first(struct dev_pagemap *pgmap, int range_id)
{}

bool pgmap_pfn_valid(struct dev_pagemap *pgmap, unsigned long pfn)
{}

static unsigned long pfn_end(struct dev_pagemap *pgmap, int range_id)
{}

static unsigned long pfn_len(struct dev_pagemap *pgmap, unsigned long range_id)
{}

static void pageunmap_range(struct dev_pagemap *pgmap, int range_id)
{}

void memunmap_pages(struct dev_pagemap *pgmap)
{}
EXPORT_SYMBOL_GPL();

static void devm_memremap_pages_release(void *data)
{}

static void dev_pagemap_percpu_release(struct percpu_ref *ref)
{}

static int pagemap_range(struct dev_pagemap *pgmap, struct mhp_params *params,
		int range_id, int nid)
{}


/*
 * Not device managed version of devm_memremap_pages, undone by
 * memunmap_pages().  Please use devm_memremap_pages if you have a struct
 * device available.
 */
void *memremap_pages(struct dev_pagemap *pgmap, int nid)
{}
EXPORT_SYMBOL_GPL();

/**
 * devm_memremap_pages - remap and provide memmap backing for the given resource
 * @dev: hosting device for @res
 * @pgmap: pointer to a struct dev_pagemap
 *
 * Notes:
 * 1/ At a minimum the range and type members of @pgmap must be initialized
 *    by the caller before passing it to this function
 *
 * 2/ The altmap field may optionally be initialized, in which case
 *    PGMAP_ALTMAP_VALID must be set in pgmap->flags.
 *
 * 3/ The ref field may optionally be provided, in which pgmap->ref must be
 *    'live' on entry and will be killed and reaped at
 *    devm_memremap_pages_release() time, or if this routine fails.
 *
 * 4/ range is expected to be a host memory range that could feasibly be
 *    treated as a "System RAM" range, i.e. not a device mmio range, but
 *    this is not enforced.
 */
void *devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap)
{}
EXPORT_SYMBOL_GPL();

void devm_memunmap_pages(struct device *dev, struct dev_pagemap *pgmap)
{}
EXPORT_SYMBOL_GPL();

/**
 * get_dev_pagemap() - take a new live reference on the dev_pagemap for @pfn
 * @pfn: page frame number to lookup page_map
 * @pgmap: optional known pgmap that already has a reference
 *
 * If @pgmap is non-NULL and covers @pfn it will be returned as-is.  If @pgmap
 * is non-NULL but does not cover @pfn the reference to it will be released.
 */
struct dev_pagemap *get_dev_pagemap(unsigned long pfn,
		struct dev_pagemap *pgmap)
{}
EXPORT_SYMBOL_GPL();

void free_zone_device_folio(struct folio *folio)
{}

void zone_device_page_init(struct page *page)
{}
EXPORT_SYMBOL_GPL();

#ifdef CONFIG_FS_DAX
bool __put_devmap_managed_folio_refs(struct folio *folio, int refs)
{}
EXPORT_SYMBOL();
#endif /* CONFIG_FS_DAX */