linux/drivers/nvdimm/region_devs.c

// SPDX-License-Identifier: GPL-2.0-only
/*
 * Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
 */
#include <linux/scatterlist.h>
#include <linux/memregion.h>
#include <linux/highmem.h>
#include <linux/kstrtox.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/hash.h>
#include <linux/sort.h>
#include <linux/io.h>
#include <linux/nd.h>
#include "nd-core.h"
#include "nd.h"

/*
 * For readq() and writeq() on 32-bit builds, the hi-lo, lo-hi order is
 * irrelevant.
 */
#include <linux/io-64-nonatomic-hi-lo.h>

static DEFINE_PER_CPU(int, flush_idx);

static int nvdimm_map_flush(struct device *dev, struct nvdimm *nvdimm, int dimm,
		struct nd_region_data *ndrd)
{}

static int nd_region_invalidate_memregion(struct nd_region *nd_region)
{}

int nd_region_activate(struct nd_region *nd_region)
{}

static void nd_region_release(struct device *dev)
{}

struct nd_region *to_nd_region(struct device *dev)
{}
EXPORT_SYMBOL_GPL();

struct device *nd_region_dev(struct nd_region *nd_region)
{}
EXPORT_SYMBOL_GPL();

void *nd_region_provider_data(struct nd_region *nd_region)
{}
EXPORT_SYMBOL_GPL();

/**
 * nd_region_to_nstype() - region to an integer namespace type
 * @nd_region: region-device to interrogate
 *
 * This is the 'nstype' attribute of a region as well, an input to the
 * MODALIAS for namespace devices, and bit number for a nvdimm_bus to match
 * namespace devices with namespace drivers.
 */
int nd_region_to_nstype(struct nd_region *nd_region)
{}
EXPORT_SYMBOL();

static unsigned long long region_size(struct nd_region *nd_region)
{}

static ssize_t size_show(struct device *dev,
		struct device_attribute *attr, char *buf)
{}
static DEVICE_ATTR_RO(size);

static ssize_t deep_flush_show(struct device *dev,
		struct device_attribute *attr, char *buf)
{}

static ssize_t deep_flush_store(struct device *dev, struct device_attribute *attr,
		const char *buf, size_t len)
{}
static DEVICE_ATTR_RW(deep_flush);

static ssize_t mappings_show(struct device *dev,
		struct device_attribute *attr, char *buf)
{}
static DEVICE_ATTR_RO(mappings);

static ssize_t nstype_show(struct device *dev,
		struct device_attribute *attr, char *buf)
{}
static DEVICE_ATTR_RO(nstype);

static ssize_t set_cookie_show(struct device *dev,
		struct device_attribute *attr, char *buf)
{}
static DEVICE_ATTR_RO(set_cookie);

resource_size_t nd_region_available_dpa(struct nd_region *nd_region)
{}

resource_size_t nd_region_allocatable_dpa(struct nd_region *nd_region)
{}

static ssize_t available_size_show(struct device *dev,
		struct device_attribute *attr, char *buf)
{}
static DEVICE_ATTR_RO(available_size);

static ssize_t max_available_extent_show(struct device *dev,
		struct device_attribute *attr, char *buf)
{}
static DEVICE_ATTR_RO(max_available_extent);

static ssize_t init_namespaces_show(struct device *dev,
		struct device_attribute *attr, char *buf)
{}
static DEVICE_ATTR_RO(init_namespaces);

static ssize_t namespace_seed_show(struct device *dev,
		struct device_attribute *attr, char *buf)
{}
static DEVICE_ATTR_RO(namespace_seed);

static ssize_t btt_seed_show(struct device *dev,
		struct device_attribute *attr, char *buf)
{}
static DEVICE_ATTR_RO(btt_seed);

static ssize_t pfn_seed_show(struct device *dev,
		struct device_attribute *attr, char *buf)
{}
static DEVICE_ATTR_RO(pfn_seed);

static ssize_t dax_seed_show(struct device *dev,
		struct device_attribute *attr, char *buf)
{}
static DEVICE_ATTR_RO(dax_seed);

static ssize_t read_only_show(struct device *dev,
		struct device_attribute *attr, char *buf)
{}

static int revalidate_read_only(struct device *dev, void *data)
{}

static ssize_t read_only_store(struct device *dev,
		struct device_attribute *attr, const char *buf, size_t len)
{}
static DEVICE_ATTR_RW(read_only);

static ssize_t align_show(struct device *dev,
		struct device_attribute *attr, char *buf)
{}

static ssize_t align_store(struct device *dev,
		struct device_attribute *attr, const char *buf, size_t len)
{}
static DEVICE_ATTR_RW(align);

static ssize_t region_badblocks_show(struct device *dev,
		struct device_attribute *attr, char *buf)
{}
static DEVICE_ATTR(badblocks, 0444, region_badblocks_show, NULL);

static ssize_t resource_show(struct device *dev,
		struct device_attribute *attr, char *buf)
{}
static DEVICE_ATTR_ADMIN_RO(resource);

static ssize_t persistence_domain_show(struct device *dev,
		struct device_attribute *attr, char *buf)
{}
static DEVICE_ATTR_RO(persistence_domain);

static struct attribute *nd_region_attributes[] =;

static umode_t region_visible(struct kobject *kobj, struct attribute *a, int n)
{}

static ssize_t mappingN(struct device *dev, char *buf, int n)
{}

#define REGION_MAPPING(idx)

/*
 * 32 should be enough for a while, even in the presence of socket
 * interleave a 32-way interleave set is a degenerate case.
 */
REGION_MAPPING();
REGION_MAPPING();
REGION_MAPPING();
REGION_MAPPING();
REGION_MAPPING();
REGION_MAPPING();
REGION_MAPPING();
REGION_MAPPING();
REGION_MAPPING();
REGION_MAPPING();
REGION_MAPPING();
REGION_MAPPING();
REGION_MAPPING();
REGION_MAPPING();
REGION_MAPPING();
REGION_MAPPING();
REGION_MAPPING();
REGION_MAPPING();
REGION_MAPPING();
REGION_MAPPING();
REGION_MAPPING();
REGION_MAPPING();
REGION_MAPPING();
REGION_MAPPING();
REGION_MAPPING();
REGION_MAPPING();
REGION_MAPPING();
REGION_MAPPING();
REGION_MAPPING();
REGION_MAPPING();
REGION_MAPPING();
REGION_MAPPING();

static umode_t mapping_visible(struct kobject *kobj, struct attribute *a, int n)
{}

static struct attribute *mapping_attributes[] =;

static const struct attribute_group nd_mapping_attribute_group =;

static const struct attribute_group nd_region_attribute_group =;

static const struct attribute_group *nd_region_attribute_groups[] =;

static const struct device_type nd_pmem_device_type =;

static const struct device_type nd_volatile_device_type =;

bool is_nd_pmem(const struct device *dev)
{}

bool is_nd_volatile(const struct device *dev)
{}

u64 nd_region_interleave_set_cookie(struct nd_region *nd_region,
		struct nd_namespace_index *nsindex)
{}

u64 nd_region_interleave_set_altcookie(struct nd_region *nd_region)
{}

void nd_mapping_free_labels(struct nd_mapping *nd_mapping)
{}

/*
 * When a namespace is activated create new seeds for the next
 * namespace, or namespace-personality to be configured.
 */
void nd_region_advance_seeds(struct nd_region *nd_region, struct device *dev)
{}

/**
 * nd_region_acquire_lane - allocate and lock a lane
 * @nd_region: region id and number of lanes possible
 *
 * A lane correlates to a BLK-data-window and/or a log slot in the BTT.
 * We optimize for the common case where there are 256 lanes, one
 * per-cpu.  For larger systems we need to lock to share lanes.  For now
 * this implementation assumes the cost of maintaining an allocator for
 * free lanes is on the order of the lock hold time, so it implements a
 * static lane = cpu % num_lanes mapping.
 *
 * In the case of a BTT instance on top of a BLK namespace a lane may be
 * acquired recursively.  We lock on the first instance.
 *
 * In the case of a BTT instance on top of PMEM, we only acquire a lane
 * for the BTT metadata updates.
 */
unsigned int nd_region_acquire_lane(struct nd_region *nd_region)
{}
EXPORT_SYMBOL();

void nd_region_release_lane(struct nd_region *nd_region, unsigned int lane)
{}
EXPORT_SYMBOL();

/*
 * PowerPC requires this alignment for memremap_pages(). All other archs
 * should be ok with SUBSECTION_SIZE (see memremap_compat_align()).
 */
#define MEMREMAP_COMPAT_ALIGN_MAX

static unsigned long default_align(struct nd_region *nd_region)
{}

static struct lock_class_key nvdimm_region_key;

static struct nd_region *nd_region_create(struct nvdimm_bus *nvdimm_bus,
		struct nd_region_desc *ndr_desc,
		const struct device_type *dev_type, const char *caller)
{}

struct nd_region *nvdimm_pmem_region_create(struct nvdimm_bus *nvdimm_bus,
		struct nd_region_desc *ndr_desc)
{}
EXPORT_SYMBOL_GPL();

struct nd_region *nvdimm_volatile_region_create(struct nvdimm_bus *nvdimm_bus,
		struct nd_region_desc *ndr_desc)
{}
EXPORT_SYMBOL_GPL();

void nvdimm_region_delete(struct nd_region *nd_region)
{}
EXPORT_SYMBOL_GPL();

int nvdimm_flush(struct nd_region *nd_region, struct bio *bio)
{}
/**
 * generic_nvdimm_flush() - flush any posted write queues between the cpu and pmem media
 * @nd_region: interleaved pmem region
 */
int generic_nvdimm_flush(struct nd_region *nd_region)
{}
EXPORT_SYMBOL_GPL();

/**
 * nvdimm_has_flush - determine write flushing requirements
 * @nd_region: interleaved pmem region
 *
 * Returns 1 if writes require flushing
 * Returns 0 if writes do not require flushing
 * Returns -ENXIO if flushing capability can not be determined
 */
int nvdimm_has_flush(struct nd_region *nd_region)
{}
EXPORT_SYMBOL_GPL();

int nvdimm_has_cache(struct nd_region *nd_region)
{}
EXPORT_SYMBOL_GPL();

bool is_nvdimm_sync(struct nd_region *nd_region)
{}
EXPORT_SYMBOL_GPL();

struct conflict_context {};

static int region_conflict(struct device *dev, void *data)
{}

int nd_region_conflict(struct nd_region *nd_region, resource_size_t start,
		resource_size_t size)
{}

MODULE_IMPORT_NS();