linux/drivers/nvme/host/fc.c

// SPDX-License-Identifier: GPL-2.0
/*
 * Copyright (c) 2016 Avago Technologies.  All rights reserved.
 */
#define pr_fmt(fmt)
#include <linux/module.h>
#include <linux/parser.h>
#include <uapi/scsi/fc/fc_fs.h>
#include <uapi/scsi/fc/fc_els.h>
#include <linux/delay.h>
#include <linux/overflow.h>
#include <linux/blk-cgroup.h>
#include "nvme.h"
#include "fabrics.h"
#include <linux/nvme-fc-driver.h>
#include <linux/nvme-fc.h>
#include "fc.h"
#include <scsi/scsi_transport_fc.h>
#include <linux/blk-mq-pci.h>

/* *************************** Data Structures/Defines ****************** */


enum nvme_fc_queue_flags {};

#define NVME_FC_DEFAULT_DEV_LOSS_TMO
#define NVME_FC_DEFAULT_RECONNECT_TMO

struct nvme_fc_queue {} __aligned();	/* alignment for other things alloc'd with */

enum nvme_fcop_flags {};

struct nvmefc_ls_req_op {};

struct nvmefc_ls_rcv_op {} __aligned();	/* alignment for other things alloc'd with */

enum nvme_fcpop_state {};

struct nvme_fc_fcp_op {};

struct nvme_fcp_op_w_sgl {};

struct nvme_fc_lport {} __aligned();	/* alignment for other things alloc'd with */

struct nvme_fc_rport {} __aligned();	/* alignment for other things alloc'd with */

/* fc_ctrl flags values - specified as bit positions */
#define ASSOC_ACTIVE
#define ASSOC_FAILED
#define FCCTRL_TERMIO

struct nvme_fc_ctrl {};

static inline struct nvme_fc_ctrl *
to_fc_ctrl(struct nvme_ctrl *ctrl)
{}

static inline struct nvme_fc_lport *
localport_to_lport(struct nvme_fc_local_port *portptr)
{}

static inline struct nvme_fc_rport *
remoteport_to_rport(struct nvme_fc_remote_port *portptr)
{}

static inline struct nvmefc_ls_req_op *
ls_req_to_lsop(struct nvmefc_ls_req *lsreq)
{}

static inline struct nvme_fc_fcp_op *
fcp_req_to_fcp_op(struct nvmefc_fcp_req *fcpreq)
{}



/* *************************** Globals **************************** */


static DEFINE_SPINLOCK(nvme_fc_lock);

static LIST_HEAD(nvme_fc_lport_list);
static DEFINE_IDA(nvme_fc_local_port_cnt);
static DEFINE_IDA(nvme_fc_ctrl_cnt);

/*
 * These items are short-term. They will eventually be moved into
 * a generic FC class. See comments in module init.
 */
static struct device *fc_udev_device;

static void nvme_fc_complete_rq(struct request *rq);

/* *********************** FC-NVME Port Management ************************ */

static void __nvme_fc_delete_hw_queue(struct nvme_fc_ctrl *,
			struct nvme_fc_queue *, unsigned int);

static void nvme_fc_handle_ls_rqst_work(struct work_struct *work);


static void
nvme_fc_free_lport(struct kref *ref)
{}

static void
nvme_fc_lport_put(struct nvme_fc_lport *lport)
{}

static int
nvme_fc_lport_get(struct nvme_fc_lport *lport)
{}


static struct nvme_fc_lport *
nvme_fc_attach_to_unreg_lport(struct nvme_fc_port_info *pinfo,
			struct nvme_fc_port_template *ops,
			struct device *dev)
{}

/**
 * nvme_fc_register_localport - transport entry point called by an
 *                              LLDD to register the existence of a NVME
 *                              host FC port.
 * @pinfo:     pointer to information about the port to be registered
 * @template:  LLDD entrypoints and operational parameters for the port
 * @dev:       physical hardware device node port corresponds to. Will be
 *             used for DMA mappings
 * @portptr:   pointer to a local port pointer. Upon success, the routine
 *             will allocate a nvme_fc_local_port structure and place its
 *             address in the local port pointer. Upon failure, local port
 *             pointer will be set to 0.
 *
 * Returns:
 * a completion status. Must be 0 upon success; a negative errno
 * (ex: -ENXIO) upon failure.
 */
int
nvme_fc_register_localport(struct nvme_fc_port_info *pinfo,
			struct nvme_fc_port_template *template,
			struct device *dev,
			struct nvme_fc_local_port **portptr)
{}
EXPORT_SYMBOL_GPL();

/**
 * nvme_fc_unregister_localport - transport entry point called by an
 *                              LLDD to deregister/remove a previously
 *                              registered a NVME host FC port.
 * @portptr: pointer to the (registered) local port that is to be deregistered.
 *
 * Returns:
 * a completion status. Must be 0 upon success; a negative errno
 * (ex: -ENXIO) upon failure.
 */
int
nvme_fc_unregister_localport(struct nvme_fc_local_port *portptr)
{}
EXPORT_SYMBOL_GPL();

/*
 * TRADDR strings, per FC-NVME are fixed format:
 *   "nn-0x<16hexdigits>:pn-0x<16hexdigits>" - 43 characters
 * udev event will only differ by prefix of what field is
 * being specified:
 *    "NVMEFC_HOST_TRADDR=" or "NVMEFC_TRADDR=" - 19 max characters
 *  19 + 43 + null_fudge = 64 characters
 */
#define FCNVME_TRADDR_LENGTH

static void
nvme_fc_signal_discovery_scan(struct nvme_fc_lport *lport,
		struct nvme_fc_rport *rport)
{}

static void
nvme_fc_free_rport(struct kref *ref)
{}

static void
nvme_fc_rport_put(struct nvme_fc_rport *rport)
{}

static int
nvme_fc_rport_get(struct nvme_fc_rport *rport)
{}

static void
nvme_fc_resume_controller(struct nvme_fc_ctrl *ctrl)
{}

static struct nvme_fc_rport *
nvme_fc_attach_to_suspended_rport(struct nvme_fc_lport *lport,
				struct nvme_fc_port_info *pinfo)
{}

static inline void
__nvme_fc_set_dev_loss_tmo(struct nvme_fc_rport *rport,
			struct nvme_fc_port_info *pinfo)
{}

/**
 * nvme_fc_register_remoteport - transport entry point called by an
 *                              LLDD to register the existence of a NVME
 *                              subsystem FC port on its fabric.
 * @localport: pointer to the (registered) local port that the remote
 *             subsystem port is connected to.
 * @pinfo:     pointer to information about the port to be registered
 * @portptr:   pointer to a remote port pointer. Upon success, the routine
 *             will allocate a nvme_fc_remote_port structure and place its
 *             address in the remote port pointer. Upon failure, remote port
 *             pointer will be set to 0.
 *
 * Returns:
 * a completion status. Must be 0 upon success; a negative errno
 * (ex: -ENXIO) upon failure.
 */
int
nvme_fc_register_remoteport(struct nvme_fc_local_port *localport,
				struct nvme_fc_port_info *pinfo,
				struct nvme_fc_remote_port **portptr)
{}
EXPORT_SYMBOL_GPL();

static int
nvme_fc_abort_lsops(struct nvme_fc_rport *rport)
{}

static void
nvme_fc_ctrl_connectivity_loss(struct nvme_fc_ctrl *ctrl)
{}

/**
 * nvme_fc_unregister_remoteport - transport entry point called by an
 *                              LLDD to deregister/remove a previously
 *                              registered a NVME subsystem FC port.
 * @portptr: pointer to the (registered) remote port that is to be
 *           deregistered.
 *
 * Returns:
 * a completion status. Must be 0 upon success; a negative errno
 * (ex: -ENXIO) upon failure.
 */
int
nvme_fc_unregister_remoteport(struct nvme_fc_remote_port *portptr)
{}
EXPORT_SYMBOL_GPL();

/**
 * nvme_fc_rescan_remoteport - transport entry point called by an
 *                              LLDD to request a nvme device rescan.
 * @remoteport: pointer to the (registered) remote port that is to be
 *              rescanned.
 *
 * Returns: N/A
 */
void
nvme_fc_rescan_remoteport(struct nvme_fc_remote_port *remoteport)
{}
EXPORT_SYMBOL_GPL();

int
nvme_fc_set_remoteport_devloss(struct nvme_fc_remote_port *portptr,
			u32 dev_loss_tmo)
{}
EXPORT_SYMBOL_GPL();


/* *********************** FC-NVME DMA Handling **************************** */

/*
 * The fcloop device passes in a NULL device pointer. Real LLD's will
 * pass in a valid device pointer. If NULL is passed to the dma mapping
 * routines, depending on the platform, it may or may not succeed, and
 * may crash.
 *
 * As such:
 * Wrapper all the dma routines and check the dev pointer.
 *
 * If simple mappings (return just a dma address, we'll noop them,
 * returning a dma address of 0.
 *
 * On more complex mappings (dma_map_sg), a pseudo routine fills
 * in the scatter list, setting all dma addresses to 0.
 */

static inline dma_addr_t
fc_dma_map_single(struct device *dev, void *ptr, size_t size,
		enum dma_data_direction dir)
{}

static inline int
fc_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
{}

static inline void
fc_dma_unmap_single(struct device *dev, dma_addr_t addr, size_t size,
	enum dma_data_direction dir)
{}

static inline void
fc_dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr, size_t size,
		enum dma_data_direction dir)
{}

static inline void
fc_dma_sync_single_for_device(struct device *dev, dma_addr_t addr, size_t size,
		enum dma_data_direction dir)
{}

/* pseudo dma_map_sg call */
static int
fc_map_sg(struct scatterlist *sg, int nents)
{}

static inline int
fc_dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
		enum dma_data_direction dir)
{}

static inline void
fc_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
		enum dma_data_direction dir)
{}

/* *********************** FC-NVME LS Handling **************************** */

static void nvme_fc_ctrl_put(struct nvme_fc_ctrl *);
static int nvme_fc_ctrl_get(struct nvme_fc_ctrl *);

static void nvme_fc_error_recovery(struct nvme_fc_ctrl *ctrl, char *errmsg);

static void
__nvme_fc_finish_ls_req(struct nvmefc_ls_req_op *lsop)
{}

static int
__nvme_fc_send_ls_req(struct nvme_fc_rport *rport,
		struct nvmefc_ls_req_op *lsop,
		void (*done)(struct nvmefc_ls_req *req, int status))
{}

static void
nvme_fc_send_ls_req_done(struct nvmefc_ls_req *lsreq, int status)
{}

static int
nvme_fc_send_ls_req(struct nvme_fc_rport *rport, struct nvmefc_ls_req_op *lsop)
{}

static int
nvme_fc_send_ls_req_async(struct nvme_fc_rport *rport,
		struct nvmefc_ls_req_op *lsop,
		void (*done)(struct nvmefc_ls_req *req, int status))
{}

static int
nvme_fc_connect_admin_queue(struct nvme_fc_ctrl *ctrl,
	struct nvme_fc_queue *queue, u16 qsize, u16 ersp_ratio)
{}

static int
nvme_fc_connect_queue(struct nvme_fc_ctrl *ctrl, struct nvme_fc_queue *queue,
			u16 qsize, u16 ersp_ratio)
{}

static void
nvme_fc_disconnect_assoc_done(struct nvmefc_ls_req *lsreq, int status)
{}

/*
 * This routine sends a FC-NVME LS to disconnect (aka terminate)
 * the FC-NVME Association.  Terminating the association also
 * terminates the FC-NVME connections (per queue, both admin and io
 * queues) that are part of the association. E.g. things are torn
 * down, and the related FC-NVME Association ID and Connection IDs
 * become invalid.
 *
 * The behavior of the fc-nvme initiator is such that it's
 * understanding of the association and connections will implicitly
 * be torn down. The action is implicit as it may be due to a loss of
 * connectivity with the fc-nvme target, so you may never get a
 * response even if you tried.  As such, the action of this routine
 * is to asynchronously send the LS, ignore any results of the LS, and
 * continue on with terminating the association. If the fc-nvme target
 * is present and receives the LS, it too can tear down.
 */
static void
nvme_fc_xmt_disconnect_assoc(struct nvme_fc_ctrl *ctrl)
{}

static void
nvme_fc_xmt_ls_rsp_done(struct nvmefc_ls_rsp *lsrsp)
{}

static void
nvme_fc_xmt_ls_rsp(struct nvmefc_ls_rcv_op *lsop)
{}

static struct nvme_fc_ctrl *
nvme_fc_match_disconn_ls(struct nvme_fc_rport *rport,
		      struct nvmefc_ls_rcv_op *lsop)
{}

/*
 * returns true to mean LS handled and ls_rsp can be sent
 * returns false to defer ls_rsp xmt (will be done as part of
 *     association termination)
 */
static bool
nvme_fc_ls_disconnect_assoc(struct nvmefc_ls_rcv_op *lsop)
{}

/*
 * Actual Processing routine for received FC-NVME LS Requests from the LLD
 * returns true if a response should be sent afterward, false if rsp will
 * be sent asynchronously.
 */
static bool
nvme_fc_handle_ls_rqst(struct nvmefc_ls_rcv_op *lsop)
{}

static void
nvme_fc_handle_ls_rqst_work(struct work_struct *work)
{}

static
void nvme_fc_rcv_ls_req_err_msg(struct nvme_fc_lport *lport,
				struct fcnvme_ls_rqst_w0 *w0)
{}

/**
 * nvme_fc_rcv_ls_req - transport entry point called by an LLDD
 *                       upon the reception of a NVME LS request.
 *
 * The nvme-fc layer will copy payload to an internal structure for
 * processing.  As such, upon completion of the routine, the LLDD may
 * immediately free/reuse the LS request buffer passed in the call.
 *
 * If this routine returns error, the LLDD should abort the exchange.
 *
 * @portptr:    pointer to the (registered) remote port that the LS
 *              was received from. The remoteport is associated with
 *              a specific localport.
 * @lsrsp:      pointer to a nvmefc_ls_rsp response structure to be
 *              used to reference the exchange corresponding to the LS
 *              when issuing an ls response.
 * @lsreqbuf:   pointer to the buffer containing the LS Request
 * @lsreqbuf_len: length, in bytes, of the received LS request
 */
int
nvme_fc_rcv_ls_req(struct nvme_fc_remote_port *portptr,
			struct nvmefc_ls_rsp *lsrsp,
			void *lsreqbuf, u32 lsreqbuf_len)
{}
EXPORT_SYMBOL_GPL();


/* *********************** NVME Ctrl Routines **************************** */

static void
__nvme_fc_exit_request(struct nvme_fc_ctrl *ctrl,
		struct nvme_fc_fcp_op *op)
{}

static void
nvme_fc_exit_request(struct blk_mq_tag_set *set, struct request *rq,
		unsigned int hctx_idx)
{}

static int
__nvme_fc_abort_op(struct nvme_fc_ctrl *ctrl, struct nvme_fc_fcp_op *op)
{}

static void
nvme_fc_abort_aen_ops(struct nvme_fc_ctrl *ctrl)
{}

static inline void
__nvme_fc_fcpop_chk_teardowns(struct nvme_fc_ctrl *ctrl,
		struct nvme_fc_fcp_op *op, int opstate)
{}

static void
nvme_fc_ctrl_ioerr_work(struct work_struct *work)
{}

/*
 * nvme_fc_io_getuuid - Routine called to get the appid field
 * associated with request by the lldd
 * @req:IO request from nvme fc to driver
 * Returns: UUID if there is an appid associated with VM or
 * NULL if the user/libvirt has not set the appid to VM
 */
char *nvme_fc_io_getuuid(struct nvmefc_fcp_req *req)
{}
EXPORT_SYMBOL_GPL();

static void
nvme_fc_fcpio_done(struct nvmefc_fcp_req *req)
{}

static int
__nvme_fc_init_request(struct nvme_fc_ctrl *ctrl,
		struct nvme_fc_queue *queue, struct nvme_fc_fcp_op *op,
		struct request *rq, u32 rqno)
{}

static int
nvme_fc_init_request(struct blk_mq_tag_set *set, struct request *rq,
		unsigned int hctx_idx, unsigned int numa_node)
{}

static int
nvme_fc_init_aen_ops(struct nvme_fc_ctrl *ctrl)
{}

static void
nvme_fc_term_aen_ops(struct nvme_fc_ctrl *ctrl)
{}

static inline int
__nvme_fc_init_hctx(struct blk_mq_hw_ctx *hctx, void *data, unsigned int qidx)
{}

static int
nvme_fc_init_hctx(struct blk_mq_hw_ctx *hctx, void *data, unsigned int hctx_idx)
{}

static int
nvme_fc_init_admin_hctx(struct blk_mq_hw_ctx *hctx, void *data,
		unsigned int hctx_idx)
{}

static void
nvme_fc_init_queue(struct nvme_fc_ctrl *ctrl, int idx)
{}

/*
 * This routine terminates a queue at the transport level.
 * The transport has already ensured that all outstanding ios on
 * the queue have been terminated.
 * The transport will send a Disconnect LS request to terminate
 * the queue's connection. Termination of the admin queue will also
 * terminate the association at the target.
 */
static void
nvme_fc_free_queue(struct nvme_fc_queue *queue)
{}

static void
__nvme_fc_delete_hw_queue(struct nvme_fc_ctrl *ctrl,
	struct nvme_fc_queue *queue, unsigned int qidx)
{}

static void
nvme_fc_free_io_queues(struct nvme_fc_ctrl *ctrl)
{}

static int
__nvme_fc_create_hw_queue(struct nvme_fc_ctrl *ctrl,
	struct nvme_fc_queue *queue, unsigned int qidx, u16 qsize)
{}

static void
nvme_fc_delete_hw_io_queues(struct nvme_fc_ctrl *ctrl)
{}

static int
nvme_fc_create_hw_io_queues(struct nvme_fc_ctrl *ctrl, u16 qsize)
{}

static int
nvme_fc_connect_io_queues(struct nvme_fc_ctrl *ctrl, u16 qsize)
{}

static void
nvme_fc_init_io_queues(struct nvme_fc_ctrl *ctrl)
{}

static void
nvme_fc_ctrl_free(struct kref *ref)
{}

static void
nvme_fc_ctrl_put(struct nvme_fc_ctrl *ctrl)
{}

static int
nvme_fc_ctrl_get(struct nvme_fc_ctrl *ctrl)
{}

/*
 * All accesses from nvme core layer done - can now free the
 * controller. Called after last nvme_put_ctrl() call
 */
static void
nvme_fc_free_ctrl(struct nvme_ctrl *nctrl)
{}

/*
 * This routine is used by the transport when it needs to find active
 * io on a queue that is to be terminated. The transport uses
 * blk_mq_tagset_busy_itr() to find the busy requests, which then invoke
 * this routine to kill them on a 1 by 1 basis.
 *
 * As FC allocates FC exchange for each io, the transport must contact
 * the LLDD to terminate the exchange, thus releasing the FC exchange.
 * After terminating the exchange the LLDD will call the transport's
 * normal io done path for the request, but it will have an aborted
 * status. The done path will return the io request back to the block
 * layer with an error status.
 */
static bool nvme_fc_terminate_exchange(struct request *req, void *data)
{}

/*
 * This routine runs through all outstanding commands on the association
 * and aborts them.  This routine is typically be called by the
 * delete_association routine. It is also called due to an error during
 * reconnect. In that scenario, it is most likely a command that initializes
 * the controller, including fabric Connect commands on io queues, that
 * may have timed out or failed thus the io must be killed for the connect
 * thread to see the error.
 */
static void
__nvme_fc_abort_outstanding_ios(struct nvme_fc_ctrl *ctrl, bool start_queues)
{}

static void
nvme_fc_error_recovery(struct nvme_fc_ctrl *ctrl, char *errmsg)
{}

static enum blk_eh_timer_return nvme_fc_timeout(struct request *rq)
{}

static int
nvme_fc_map_data(struct nvme_fc_ctrl *ctrl, struct request *rq,
		struct nvme_fc_fcp_op *op)
{}

static void
nvme_fc_unmap_data(struct nvme_fc_ctrl *ctrl, struct request *rq,
		struct nvme_fc_fcp_op *op)
{}

/*
 * In FC, the queue is a logical thing. At transport connect, the target
 * creates its "queue" and returns a handle that is to be given to the
 * target whenever it posts something to the corresponding SQ.  When an
 * SQE is sent on a SQ, FC effectively considers the SQE, or rather the
 * command contained within the SQE, an io, and assigns a FC exchange
 * to it. The SQE and the associated SQ handle are sent in the initial
 * CMD IU sents on the exchange. All transfers relative to the io occur
 * as part of the exchange.  The CQE is the last thing for the io,
 * which is transferred (explicitly or implicitly) with the RSP IU
 * sent on the exchange. After the CQE is received, the FC exchange is
 * terminaed and the Exchange may be used on a different io.
 *
 * The transport to LLDD api has the transport making a request for a
 * new fcp io request to the LLDD. The LLDD then allocates a FC exchange
 * resource and transfers the command. The LLDD will then process all
 * steps to complete the io. Upon completion, the transport done routine
 * is called.
 *
 * So - while the operation is outstanding to the LLDD, there is a link
 * level FC exchange resource that is also outstanding. This must be
 * considered in all cleanup operations.
 */
static blk_status_t
nvme_fc_start_fcp_op(struct nvme_fc_ctrl *ctrl, struct nvme_fc_queue *queue,
	struct nvme_fc_fcp_op *op, u32 data_len,
	enum nvmefc_fcp_datadir	io_dir)
{}

static blk_status_t
nvme_fc_queue_rq(struct blk_mq_hw_ctx *hctx,
			const struct blk_mq_queue_data *bd)
{}

static void
nvme_fc_submit_async_event(struct nvme_ctrl *arg)
{}

static void
nvme_fc_complete_rq(struct request *rq)
{}

static void nvme_fc_map_queues(struct blk_mq_tag_set *set)
{}

static const struct blk_mq_ops nvme_fc_mq_ops =;

static int
nvme_fc_create_io_queues(struct nvme_fc_ctrl *ctrl)
{}

static int
nvme_fc_recreate_io_queues(struct nvme_fc_ctrl *ctrl)
{}

static void
nvme_fc_rport_active_on_lport(struct nvme_fc_rport *rport)
{}

static void
nvme_fc_rport_inactive_on_lport(struct nvme_fc_rport *rport)
{}

static int
nvme_fc_ctlr_active_on_rport(struct nvme_fc_ctrl *ctrl)
{}

static int
nvme_fc_ctlr_inactive_on_rport(struct nvme_fc_ctrl *ctrl)
{}

/*
 * This routine restarts the controller on the host side, and
 * on the link side, recreates the controller association.
 */
static int
nvme_fc_create_association(struct nvme_fc_ctrl *ctrl)
{}


/*
 * This routine stops operation of the controller on the host side.
 * On the host os stack side: Admin and IO queues are stopped,
 *   outstanding ios on them terminated via FC ABTS.
 * On the link side: the association is terminated.
 */
static void
nvme_fc_delete_association(struct nvme_fc_ctrl *ctrl)
{}

static void
nvme_fc_delete_ctrl(struct nvme_ctrl *nctrl)
{}

static void
nvme_fc_reconnect_or_delete(struct nvme_fc_ctrl *ctrl, int status)
{}

static void
nvme_fc_reset_ctrl_work(struct work_struct *work)
{}


static const struct nvme_ctrl_ops nvme_fc_ctrl_ops =;

static void
nvme_fc_connect_ctrl_work(struct work_struct *work)
{}


static const struct blk_mq_ops nvme_fc_admin_mq_ops =;


/*
 * Fails a controller request if it matches an existing controller
 * (association) with the same tuple:
 * <Host NQN, Host ID, local FC port, remote FC port, SUBSYS NQN>
 *
 * The ports don't need to be compared as they are intrinsically
 * already matched by the port pointers supplied.
 */
static bool
nvme_fc_existing_controller(struct nvme_fc_rport *rport,
		struct nvmf_ctrl_options *opts)
{}

static struct nvme_fc_ctrl *
nvme_fc_alloc_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
	struct nvme_fc_lport *lport, struct nvme_fc_rport *rport)
{}

static struct nvme_ctrl *
nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
	struct nvme_fc_lport *lport, struct nvme_fc_rport *rport)
{}

struct nvmet_fc_traddr {};

static int
__nvme_fc_parse_u64(substring_t *sstr, u64 *val)
{}

/*
 * This routine validates and extracts the WWN's from the TRADDR string.
 * As kernel parsers need the 0x to determine number base, universally
 * build string to parse with 0x prefix before parsing name strings.
 */
static int
nvme_fc_parse_traddr(struct nvmet_fc_traddr *traddr, char *buf, size_t blen)
{}

static struct nvme_ctrl *
nvme_fc_create_ctrl(struct device *dev, struct nvmf_ctrl_options *opts)
{}


static struct nvmf_transport_ops nvme_fc_transport =;

/* Arbitrary successive failures max. With lots of subsystems could be high */
#define DISCOVERY_MAX_FAIL

static ssize_t nvme_fc_nvme_discovery_store(struct device *dev,
		struct device_attribute *attr, const char *buf, size_t count)
{}

static DEVICE_ATTR(nvme_discovery, 0200, NULL, nvme_fc_nvme_discovery_store);

#ifdef CONFIG_BLK_CGROUP_FC_APPID
/* Parse the cgroup id from a buf and return the length of cgrpid */
static int fc_parse_cgrpid(const char *buf, u64 *id)
{}

/*
 * Parse and update the appid in the blkcg associated with the cgroupid.
 */
static ssize_t fc_appid_store(struct device *dev,
		struct device_attribute *attr, const char *buf, size_t count)
{}
static DEVICE_ATTR(appid_store, 0200, NULL, fc_appid_store);
#endif /* CONFIG_BLK_CGROUP_FC_APPID */

static struct attribute *nvme_fc_attrs[] =;

static const struct attribute_group nvme_fc_attr_group =;

static const struct attribute_group *nvme_fc_attr_groups[] =;

static struct class fc_class =;

static int __init nvme_fc_init_module(void)
{}

static void
nvme_fc_delete_controllers(struct nvme_fc_rport *rport)
{}

static void __exit nvme_fc_exit_module(void)
{}

module_init();
module_exit(nvme_fc_exit_module);

MODULE_DESCRIPTION();
MODULE_LICENSE();