linux/drivers/cxl/core/port.c

// SPDX-License-Identifier: GPL-2.0-only
/* Copyright(c) 2020 Intel Corporation. All rights reserved. */
#include <linux/platform_device.h>
#include <linux/memregion.h>
#include <linux/workqueue.h>
#include <linux/einj-cxl.h>
#include <linux/debugfs.h>
#include <linux/device.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/slab.h>
#include <linux/idr.h>
#include <linux/node.h>
#include <cxlmem.h>
#include <cxlpci.h>
#include <cxl.h>
#include "core.h"

/**
 * DOC: cxl core
 *
 * The CXL core provides a set of interfaces that can be consumed by CXL aware
 * drivers. The interfaces allow for creation, modification, and destruction of
 * regions, memory devices, ports, and decoders. CXL aware drivers must register
 * with the CXL core via these interfaces in order to be able to participate in
 * cross-device interleave coordination. The CXL core also establishes and
 * maintains the bridge to the nvdimm subsystem.
 *
 * CXL core introduces sysfs hierarchy to control the devices that are
 * instantiated by the core.
 */

/*
 * All changes to the interleave configuration occur with this lock held
 * for write.
 */
DECLARE_RWSEM();

static DEFINE_IDA(cxl_port_ida);
static DEFINE_XARRAY(cxl_root_buses);

int cxl_num_decoders_committed(struct cxl_port *port)
{}

static ssize_t devtype_show(struct device *dev, struct device_attribute *attr,
			    char *buf)
{}
static DEVICE_ATTR_RO(devtype);

static int cxl_device_id(const struct device *dev)
{}

static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
			     char *buf)
{}
static DEVICE_ATTR_RO(modalias);

static struct attribute *cxl_base_attributes[] =;

struct attribute_group cxl_base_attribute_group =;

static ssize_t start_show(struct device *dev, struct device_attribute *attr,
			  char *buf)
{}
static DEVICE_ATTR_ADMIN_RO(start);

static ssize_t size_show(struct device *dev, struct device_attribute *attr,
			char *buf)
{}
static DEVICE_ATTR_RO(size);

#define CXL_DECODER_FLAG_ATTR(name, flag)

CXL_DECODER_FLAG_ATTR();
CXL_DECODER_FLAG_ATTR();
CXL_DECODER_FLAG_ATTR();
CXL_DECODER_FLAG_ATTR();
CXL_DECODER_FLAG_ATTR();

static ssize_t target_type_show(struct device *dev,
				struct device_attribute *attr, char *buf)
{}
static DEVICE_ATTR_RO(target_type);

static ssize_t emit_target_list(struct cxl_switch_decoder *cxlsd, char *buf)
{}

static ssize_t target_list_show(struct device *dev,
				struct device_attribute *attr, char *buf)
{}
static DEVICE_ATTR_RO(target_list);

static ssize_t mode_show(struct device *dev, struct device_attribute *attr,
			 char *buf)
{}

static ssize_t mode_store(struct device *dev, struct device_attribute *attr,
			  const char *buf, size_t len)
{}
static DEVICE_ATTR_RW(mode);

static ssize_t dpa_resource_show(struct device *dev, struct device_attribute *attr,
			    char *buf)
{}
static DEVICE_ATTR_RO(dpa_resource);

static ssize_t dpa_size_show(struct device *dev, struct device_attribute *attr,
			     char *buf)
{}

static ssize_t dpa_size_store(struct device *dev, struct device_attribute *attr,
			      const char *buf, size_t len)
{}
static DEVICE_ATTR_RW(dpa_size);

static ssize_t interleave_granularity_show(struct device *dev,
					   struct device_attribute *attr,
					   char *buf)
{}

static DEVICE_ATTR_RO(interleave_granularity);

static ssize_t interleave_ways_show(struct device *dev,
				    struct device_attribute *attr, char *buf)
{}

static DEVICE_ATTR_RO(interleave_ways);

static ssize_t qos_class_show(struct device *dev,
			      struct device_attribute *attr, char *buf)
{}
static DEVICE_ATTR_RO(qos_class);

static struct attribute *cxl_decoder_base_attrs[] =;

static struct attribute_group cxl_decoder_base_attribute_group =;

static struct attribute *cxl_decoder_root_attrs[] =;

static bool can_create_pmem(struct cxl_root_decoder *cxlrd)
{}

static bool can_create_ram(struct cxl_root_decoder *cxlrd)
{}

static umode_t cxl_root_decoder_visible(struct kobject *kobj, struct attribute *a, int n)
{}

static struct attribute_group cxl_decoder_root_attribute_group =;

static const struct attribute_group *cxl_decoder_root_attribute_groups[] =;

static struct attribute *cxl_decoder_switch_attrs[] =;

static struct attribute_group cxl_decoder_switch_attribute_group =;

static const struct attribute_group *cxl_decoder_switch_attribute_groups[] =;

static struct attribute *cxl_decoder_endpoint_attrs[] =;

static struct attribute_group cxl_decoder_endpoint_attribute_group =;

static const struct attribute_group *cxl_decoder_endpoint_attribute_groups[] =;

static void __cxl_decoder_release(struct cxl_decoder *cxld)
{}

static void cxl_endpoint_decoder_release(struct device *dev)
{}

static void cxl_switch_decoder_release(struct device *dev)
{}

struct cxl_root_decoder *to_cxl_root_decoder(struct device *dev)
{}
EXPORT_SYMBOL_NS_GPL();

static void cxl_root_decoder_release(struct device *dev)
{}

static const struct device_type cxl_decoder_endpoint_type =;

static const struct device_type cxl_decoder_switch_type =;

static const struct device_type cxl_decoder_root_type =;

bool is_endpoint_decoder(struct device *dev)
{}
EXPORT_SYMBOL_NS_GPL();

bool is_root_decoder(struct device *dev)
{}
EXPORT_SYMBOL_NS_GPL();

bool is_switch_decoder(struct device *dev)
{}
EXPORT_SYMBOL_NS_GPL();

struct cxl_decoder *to_cxl_decoder(struct device *dev)
{}
EXPORT_SYMBOL_NS_GPL();

struct cxl_endpoint_decoder *to_cxl_endpoint_decoder(struct device *dev)
{}
EXPORT_SYMBOL_NS_GPL();

struct cxl_switch_decoder *to_cxl_switch_decoder(struct device *dev)
{}
EXPORT_SYMBOL_NS_GPL();

static void cxl_ep_release(struct cxl_ep *ep)
{}

static void cxl_ep_remove(struct cxl_port *port, struct cxl_ep *ep)
{}

static void cxl_port_release(struct device *dev)
{}

static ssize_t decoders_committed_show(struct device *dev,
				       struct device_attribute *attr, char *buf)
{}

static DEVICE_ATTR_RO(decoders_committed);

static struct attribute *cxl_port_attrs[] =;

static struct attribute_group cxl_port_attribute_group =;

static const struct attribute_group *cxl_port_attribute_groups[] =;

static const struct device_type cxl_port_type =;

bool is_cxl_port(const struct device *dev)
{}
EXPORT_SYMBOL_NS_GPL();

struct cxl_port *to_cxl_port(const struct device *dev)
{}
EXPORT_SYMBOL_NS_GPL();

static void unregister_port(void *_port)
{}

static void cxl_unlink_uport(void *_port)
{}

static int devm_cxl_link_uport(struct device *host, struct cxl_port *port)
{}

static void cxl_unlink_parent_dport(void *_port)
{}

static int devm_cxl_link_parent_dport(struct device *host,
				      struct cxl_port *port,
				      struct cxl_dport *parent_dport)
{}

static struct lock_class_key cxl_port_key;

static struct cxl_port *cxl_port_alloc(struct device *uport_dev,
				       struct cxl_dport *parent_dport)
{}

static int cxl_setup_comp_regs(struct device *host, struct cxl_register_map *map,
			       resource_size_t component_reg_phys)
{}

static int cxl_port_setup_regs(struct cxl_port *port,
			resource_size_t component_reg_phys)
{}

static int cxl_dport_setup_regs(struct device *host, struct cxl_dport *dport,
				resource_size_t component_reg_phys)
{}

DEFINE_SHOW_ATTRIBUTE();

static int cxl_einj_inject(void *data, u64 type)
{}
DEFINE_DEBUGFS_ATTRIBUTE();

static void cxl_debugfs_create_dport_dir(struct cxl_dport *dport)
{}

static struct cxl_port *__devm_cxl_add_port(struct device *host,
					    struct device *uport_dev,
					    resource_size_t component_reg_phys,
					    struct cxl_dport *parent_dport)
{}

/**
 * devm_cxl_add_port - register a cxl_port in CXL memory decode hierarchy
 * @host: host device for devm operations
 * @uport_dev: "physical" device implementing this upstream port
 * @component_reg_phys: (optional) for configurable cxl_port instances
 * @parent_dport: next hop up in the CXL memory decode hierarchy
 */
struct cxl_port *devm_cxl_add_port(struct device *host,
				   struct device *uport_dev,
				   resource_size_t component_reg_phys,
				   struct cxl_dport *parent_dport)
{}
EXPORT_SYMBOL_NS_GPL();

struct cxl_root *devm_cxl_add_root(struct device *host,
				   const struct cxl_root_ops *ops)
{}
EXPORT_SYMBOL_NS_GPL();

struct pci_bus *cxl_port_to_pci_bus(struct cxl_port *port)
{}
EXPORT_SYMBOL_NS_GPL();

static void unregister_pci_bus(void *uport_dev)
{}

int devm_cxl_register_pci_bus(struct device *host, struct device *uport_dev,
			      struct pci_bus *bus)
{}
EXPORT_SYMBOL_NS_GPL();

static bool dev_is_cxl_root_child(struct device *dev)
{}

struct cxl_root *find_cxl_root(struct cxl_port *port)
{}
EXPORT_SYMBOL_NS_GPL();

void put_cxl_root(struct cxl_root *cxl_root)
{}
EXPORT_SYMBOL_NS_GPL();

static struct cxl_dport *find_dport(struct cxl_port *port, int id)
{}

static int add_dport(struct cxl_port *port, struct cxl_dport *dport)
{}

/*
 * Since root-level CXL dports cannot be enumerated by PCI they are not
 * enumerated by the common port driver that acquires the port lock over
 * dport add/remove. Instead, root dports are manually added by a
 * platform driver and cond_cxl_root_lock() is used to take the missing
 * port lock in that case.
 */
static void cond_cxl_root_lock(struct cxl_port *port)
{}

static void cond_cxl_root_unlock(struct cxl_port *port)
{}

static void cxl_dport_remove(void *data)
{}

static void cxl_dport_unlink(void *data)
{}

static struct cxl_dport *
__devm_cxl_add_dport(struct cxl_port *port, struct device *dport_dev,
		     int port_id, resource_size_t component_reg_phys,
		     resource_size_t rcrb)
{}

/**
 * devm_cxl_add_dport - append VH downstream port data to a cxl_port
 * @port: the cxl_port that references this dport
 * @dport_dev: firmware or PCI device representing the dport
 * @port_id: identifier for this dport in a decoder's target list
 * @component_reg_phys: optional location of CXL component registers
 *
 * Note that dports are appended to the devm release action's of the
 * either the port's host (for root ports), or the port itself (for
 * switch ports)
 */
struct cxl_dport *devm_cxl_add_dport(struct cxl_port *port,
				     struct device *dport_dev, int port_id,
				     resource_size_t component_reg_phys)
{}
EXPORT_SYMBOL_NS_GPL();

/**
 * devm_cxl_add_rch_dport - append RCH downstream port data to a cxl_port
 * @port: the cxl_port that references this dport
 * @dport_dev: firmware or PCI device representing the dport
 * @port_id: identifier for this dport in a decoder's target list
 * @rcrb: mandatory location of a Root Complex Register Block
 *
 * See CXL 3.0 9.11.8 CXL Devices Attached to an RCH
 */
struct cxl_dport *devm_cxl_add_rch_dport(struct cxl_port *port,
					 struct device *dport_dev, int port_id,
					 resource_size_t rcrb)
{}
EXPORT_SYMBOL_NS_GPL();

static int add_ep(struct cxl_ep *new)
{}

/**
 * cxl_add_ep - register an endpoint's interest in a port
 * @dport: the dport that routes to @ep_dev
 * @ep_dev: device representing the endpoint
 *
 * Intermediate CXL ports are scanned based on the arrival of endpoints.
 * When those endpoints depart the port can be destroyed once all
 * endpoints that care about that port have been removed.
 */
static int cxl_add_ep(struct cxl_dport *dport, struct device *ep_dev)
{}

struct cxl_find_port_ctx {};

static int match_port_by_dport(struct device *dev, const void *data)
{}

static struct cxl_port *__find_cxl_port(struct cxl_find_port_ctx *ctx)
{}

static struct cxl_port *find_cxl_port(struct device *dport_dev,
				      struct cxl_dport **dport)
{}

static struct cxl_port *find_cxl_port_at(struct cxl_port *parent_port,
					 struct device *dport_dev,
					 struct cxl_dport **dport)
{}

/*
 * All users of grandparent() are using it to walk PCIe-like switch port
 * hierarchy. A PCIe switch is comprised of a bridge device representing the
 * upstream switch port and N bridges representing downstream switch ports. When
 * bridges stack the grand-parent of a downstream switch port is another
 * downstream switch port in the immediate ancestor switch.
 */
static struct device *grandparent(struct device *dev)
{}

static struct device *endpoint_host(struct cxl_port *endpoint)
{}

static void delete_endpoint(void *data)
{}

int cxl_endpoint_autoremove(struct cxl_memdev *cxlmd, struct cxl_port *endpoint)
{}
EXPORT_SYMBOL_NS_GPL();

/*
 * The natural end of life of a non-root 'cxl_port' is when its parent port goes
 * through a ->remove() event ("top-down" unregistration). The unnatural trigger
 * for a port to be unregistered is when all memdevs beneath that port have gone
 * through ->remove(). This "bottom-up" removal selectively removes individual
 * child ports manually. This depends on devm_cxl_add_port() to not change is
 * devm action registration order, and for dports to have already been
 * destroyed by reap_dports().
 */
static void delete_switch_port(struct cxl_port *port)
{}

static void reap_dports(struct cxl_port *port)
{}

struct detach_ctx {};

static int port_has_memdev(struct device *dev, const void *data)
{}

static void cxl_detach_ep(void *data)
{}

static resource_size_t find_component_registers(struct device *dev)
{}

static int add_port_attach_ep(struct cxl_memdev *cxlmd,
			      struct device *uport_dev,
			      struct device *dport_dev)
{}

int devm_cxl_enumerate_ports(struct cxl_memdev *cxlmd)
{}
EXPORT_SYMBOL_NS_GPL();

struct cxl_port *cxl_pci_find_port(struct pci_dev *pdev,
				   struct cxl_dport **dport)
{}
EXPORT_SYMBOL_NS_GPL();

struct cxl_port *cxl_mem_find_port(struct cxl_memdev *cxlmd,
				   struct cxl_dport **dport)
{}
EXPORT_SYMBOL_NS_GPL();

static int decoder_populate_targets(struct cxl_switch_decoder *cxlsd,
				    struct cxl_port *port, int *target_map)
{}

static struct lock_class_key cxl_decoder_key;

/**
 * cxl_decoder_init - Common decoder setup / initialization
 * @port: owning port of this decoder
 * @cxld: common decoder properties to initialize
 *
 * A port may contain one or more decoders. Each of those decoders
 * enable some address space for CXL.mem utilization. A decoder is
 * expected to be configured by the caller before registering via
 * cxl_decoder_add()
 */
static int cxl_decoder_init(struct cxl_port *port, struct cxl_decoder *cxld)
{}

static int cxl_switch_decoder_init(struct cxl_port *port,
				   struct cxl_switch_decoder *cxlsd,
				   int nr_targets)
{}

/**
 * cxl_root_decoder_alloc - Allocate a root level decoder
 * @port: owning CXL root of this decoder
 * @nr_targets: static number of downstream targets
 *
 * Return: A new cxl decoder to be registered by cxl_decoder_add(). A
 * 'CXL root' decoder is one that decodes from a top-level / static platform
 * firmware description of CXL resources into a CXL standard decode
 * topology.
 */
struct cxl_root_decoder *cxl_root_decoder_alloc(struct cxl_port *port,
						unsigned int nr_targets)
{}
EXPORT_SYMBOL_NS_GPL();

/**
 * cxl_switch_decoder_alloc - Allocate a switch level decoder
 * @port: owning CXL switch port of this decoder
 * @nr_targets: max number of dynamically addressable downstream targets
 *
 * Return: A new cxl decoder to be registered by cxl_decoder_add(). A
 * 'switch' decoder is any decoder that can be enumerated by PCIe
 * topology and the HDM Decoder Capability. This includes the decoders
 * that sit between Switch Upstream Ports / Switch Downstream Ports and
 * Host Bridges / Root Ports.
 */
struct cxl_switch_decoder *cxl_switch_decoder_alloc(struct cxl_port *port,
						    unsigned int nr_targets)
{}
EXPORT_SYMBOL_NS_GPL();

/**
 * cxl_endpoint_decoder_alloc - Allocate an endpoint decoder
 * @port: owning port of this decoder
 *
 * Return: A new cxl decoder to be registered by cxl_decoder_add()
 */
struct cxl_endpoint_decoder *cxl_endpoint_decoder_alloc(struct cxl_port *port)
{}
EXPORT_SYMBOL_NS_GPL();

/**
 * cxl_decoder_add_locked - Add a decoder with targets
 * @cxld: The cxl decoder allocated by cxl_<type>_decoder_alloc()
 * @target_map: A list of downstream ports that this decoder can direct memory
 *              traffic to. These numbers should correspond with the port number
 *              in the PCIe Link Capabilities structure.
 *
 * Certain types of decoders may not have any targets. The main example of this
 * is an endpoint device. A more awkward example is a hostbridge whose root
 * ports get hot added (technically possible, though unlikely).
 *
 * This is the locked variant of cxl_decoder_add().
 *
 * Context: Process context. Expects the device lock of the port that owns the
 *	    @cxld to be held.
 *
 * Return: Negative error code if the decoder wasn't properly configured; else
 *	   returns 0.
 */
int cxl_decoder_add_locked(struct cxl_decoder *cxld, int *target_map)
{}
EXPORT_SYMBOL_NS_GPL();

/**
 * cxl_decoder_add - Add a decoder with targets
 * @cxld: The cxl decoder allocated by cxl_<type>_decoder_alloc()
 * @target_map: A list of downstream ports that this decoder can direct memory
 *              traffic to. These numbers should correspond with the port number
 *              in the PCIe Link Capabilities structure.
 *
 * This is the unlocked variant of cxl_decoder_add_locked().
 * See cxl_decoder_add_locked().
 *
 * Context: Process context. Takes and releases the device lock of the port that
 *	    owns the @cxld.
 */
int cxl_decoder_add(struct cxl_decoder *cxld, int *target_map)
{}
EXPORT_SYMBOL_NS_GPL();

static void cxld_unregister(void *dev)
{}

int cxl_decoder_autoremove(struct device *host, struct cxl_decoder *cxld)
{}
EXPORT_SYMBOL_NS_GPL();

/**
 * __cxl_driver_register - register a driver for the cxl bus
 * @cxl_drv: cxl driver structure to attach
 * @owner: owning module/driver
 * @modname: KBUILD_MODNAME for parent driver
 */
int __cxl_driver_register(struct cxl_driver *cxl_drv, struct module *owner,
			  const char *modname)
{}
EXPORT_SYMBOL_NS_GPL();

void cxl_driver_unregister(struct cxl_driver *cxl_drv)
{}
EXPORT_SYMBOL_NS_GPL();

static int cxl_bus_uevent(const struct device *dev, struct kobj_uevent_env *env)
{}

static int cxl_bus_match(struct device *dev, const struct device_driver *drv)
{}

static int cxl_bus_probe(struct device *dev)
{}

static void cxl_bus_remove(struct device *dev)
{}

static struct workqueue_struct *cxl_bus_wq;

static void cxl_bus_rescan_queue(struct work_struct *w)
{}

void cxl_bus_rescan(void)
{}
EXPORT_SYMBOL_NS_GPL();

void cxl_bus_drain(void)
{}
EXPORT_SYMBOL_NS_GPL();

bool schedule_cxl_memdev_detach(struct cxl_memdev *cxlmd)
{}
EXPORT_SYMBOL_NS_GPL();

static void add_latency(struct access_coordinate *c, long latency)
{}

static bool coordinates_valid(struct access_coordinate *c)
{}

static void set_min_bandwidth(struct access_coordinate *c, unsigned int bw)
{}

static void set_access_coordinates(struct access_coordinate *out,
				   struct access_coordinate *in)
{}

static bool parent_port_is_cxl_root(struct cxl_port *port)
{}

/**
 * cxl_endpoint_get_perf_coordinates - Retrieve performance numbers stored in dports
 *				   of CXL path
 * @port: endpoint cxl_port
 * @coord: output performance data
 *
 * Return: errno on failure, 0 on success.
 */
int cxl_endpoint_get_perf_coordinates(struct cxl_port *port,
				      struct access_coordinate *coord)
{}
EXPORT_SYMBOL_NS_GPL();

/* for user tooling to ensure port disable work has completed */
static ssize_t flush_store(const struct bus_type *bus, const char *buf, size_t count)
{}

static BUS_ATTR_WO(flush);

static struct attribute *cxl_bus_attributes[] =;

static struct attribute_group cxl_bus_attribute_group =;

static const struct attribute_group *cxl_bus_attribute_groups[] =;

struct bus_type cxl_bus_type =;
EXPORT_SYMBOL_NS_GPL();

static struct dentry *cxl_debugfs;

struct dentry *cxl_debugfs_create_dir(const char *dir)
{}
EXPORT_SYMBOL_NS_GPL();

static __init int cxl_core_init(void)
{}

static void cxl_core_exit(void)
{}

subsys_initcall(cxl_core_init);
module_exit(cxl_core_exit);
MODULE_DESCRIPTION();
MODULE_LICENSE();
MODULE_IMPORT_NS();