linux/drivers/nvme/target/fc.c

// SPDX-License-Identifier: GPL-2.0
/*
 * Copyright (c) 2016 Avago Technologies.  All rights reserved.
 */
#define pr_fmt(fmt)
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/blk-mq.h>
#include <linux/parser.h>
#include <linux/random.h>
#include <uapi/scsi/fc/fc_fs.h>
#include <uapi/scsi/fc/fc_els.h>

#include "nvmet.h"
#include <linux/nvme-fc-driver.h>
#include <linux/nvme-fc.h>
#include "../host/fc.h"


/* *************************** Data Structures/Defines ****************** */


#define NVMET_LS_CTX_COUNT

struct nvmet_fc_tgtport;
struct nvmet_fc_tgt_assoc;

struct nvmet_fc_ls_iod {} __aligned();

struct nvmet_fc_ls_req_op {};


/* desired maximum for a single sequence - if sg list allows it */
#define NVMET_FC_MAX_SEQ_LENGTH

enum nvmet_fcp_datadir {};

struct nvmet_fc_fcp_iod {};

struct nvmet_fc_tgtport {};

struct nvmet_fc_port_entry {};

struct nvmet_fc_defer_fcp_req {};

struct nvmet_fc_tgt_queue {} __aligned();

struct nvmet_fc_hostport {};

struct nvmet_fc_tgt_assoc {};


static inline int
nvmet_fc_iodnum(struct nvmet_fc_ls_iod *iodptr)
{}

static inline int
nvmet_fc_fodnum(struct nvmet_fc_fcp_iod *fodptr)
{}


/*
 * Association and Connection IDs:
 *
 * Association ID will have random number in upper 6 bytes and zero
 *   in lower 2 bytes
 *
 * Connection IDs will be Association ID with QID or'd in lower 2 bytes
 *
 * note: Association ID = Connection ID for queue 0
 */
#define BYTES_FOR_QID
#define BYTES_FOR_QID_SHIFT
#define NVMET_FC_QUEUEID_MASK

static inline u64
nvmet_fc_makeconnid(struct nvmet_fc_tgt_assoc *assoc, u16 qid)
{}

static inline u64
nvmet_fc_getassociationid(u64 connectionid)
{}

static inline u16
nvmet_fc_getqueueid(u64 connectionid)
{}

static inline struct nvmet_fc_tgtport *
targetport_to_tgtport(struct nvmet_fc_target_port *targetport)
{}

static inline struct nvmet_fc_fcp_iod *
nvmet_req_to_fod(struct nvmet_req *nvme_req)
{}


/* *************************** Globals **************************** */


static DEFINE_SPINLOCK(nvmet_fc_tgtlock);

static LIST_HEAD(nvmet_fc_target_list);
static DEFINE_IDA(nvmet_fc_tgtport_cnt);
static LIST_HEAD(nvmet_fc_portentry_list);


static void nvmet_fc_handle_ls_rqst_work(struct work_struct *work);
static void nvmet_fc_fcp_rqst_op_defer_work(struct work_struct *work);
static void nvmet_fc_tgt_a_put(struct nvmet_fc_tgt_assoc *assoc);
static int nvmet_fc_tgt_a_get(struct nvmet_fc_tgt_assoc *assoc);
static void nvmet_fc_tgt_q_put(struct nvmet_fc_tgt_queue *queue);
static int nvmet_fc_tgt_q_get(struct nvmet_fc_tgt_queue *queue);
static void nvmet_fc_tgtport_put(struct nvmet_fc_tgtport *tgtport);
static void nvmet_fc_put_tgtport_work(struct work_struct *work)
{}
static int nvmet_fc_tgtport_get(struct nvmet_fc_tgtport *tgtport);
static void nvmet_fc_handle_fcp_rqst(struct nvmet_fc_tgtport *tgtport,
					struct nvmet_fc_fcp_iod *fod);
static void nvmet_fc_delete_target_assoc(struct nvmet_fc_tgt_assoc *assoc);
static void nvmet_fc_xmt_ls_rsp(struct nvmet_fc_tgtport *tgtport,
				struct nvmet_fc_ls_iod *iod);


/* *********************** FC-NVME DMA Handling **************************** */

/*
 * The fcloop device passes in a NULL device pointer. Real LLD's will
 * pass in a valid device pointer. If NULL is passed to the dma mapping
 * routines, depending on the platform, it may or may not succeed, and
 * may crash.
 *
 * As such:
 * Wrapper all the dma routines and check the dev pointer.
 *
 * If simple mappings (return just a dma address, we'll noop them,
 * returning a dma address of 0.
 *
 * On more complex mappings (dma_map_sg), a pseudo routine fills
 * in the scatter list, setting all dma addresses to 0.
 */

static inline dma_addr_t
fc_dma_map_single(struct device *dev, void *ptr, size_t size,
		enum dma_data_direction dir)
{}

static inline int
fc_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
{}

static inline void
fc_dma_unmap_single(struct device *dev, dma_addr_t addr, size_t size,
	enum dma_data_direction dir)
{}

static inline void
fc_dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr, size_t size,
		enum dma_data_direction dir)
{}

static inline void
fc_dma_sync_single_for_device(struct device *dev, dma_addr_t addr, size_t size,
		enum dma_data_direction dir)
{}

/* pseudo dma_map_sg call */
static int
fc_map_sg(struct scatterlist *sg, int nents)
{}

static inline int
fc_dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
		enum dma_data_direction dir)
{}

static inline void
fc_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
		enum dma_data_direction dir)
{}


/* ********************** FC-NVME LS XMT Handling ************************* */


static void
__nvmet_fc_finish_ls_req(struct nvmet_fc_ls_req_op *lsop)
{}

static int
__nvmet_fc_send_ls_req(struct nvmet_fc_tgtport *tgtport,
		struct nvmet_fc_ls_req_op *lsop,
		void (*done)(struct nvmefc_ls_req *req, int status))
{}

static int
nvmet_fc_send_ls_req_async(struct nvmet_fc_tgtport *tgtport,
		struct nvmet_fc_ls_req_op *lsop,
		void (*done)(struct nvmefc_ls_req *req, int status))
{}

static void
nvmet_fc_disconnect_assoc_done(struct nvmefc_ls_req *lsreq, int status)
{}

/*
 * This routine sends a FC-NVME LS to disconnect (aka terminate)
 * the FC-NVME Association.  Terminating the association also
 * terminates the FC-NVME connections (per queue, both admin and io
 * queues) that are part of the association. E.g. things are torn
 * down, and the related FC-NVME Association ID and Connection IDs
 * become invalid.
 *
 * The behavior of the fc-nvme target is such that it's
 * understanding of the association and connections will implicitly
 * be torn down. The action is implicit as it may be due to a loss of
 * connectivity with the fc-nvme host, so the target may never get a
 * response even if it tried.  As such, the action of this routine
 * is to asynchronously send the LS, ignore any results of the LS, and
 * continue on with terminating the association. If the fc-nvme host
 * is present and receives the LS, it too can tear down.
 */
static void
nvmet_fc_xmt_disconnect_assoc(struct nvmet_fc_tgt_assoc *assoc)
{}


/* *********************** FC-NVME Port Management ************************ */


static int
nvmet_fc_alloc_ls_iodlist(struct nvmet_fc_tgtport *tgtport)
{}

static void
nvmet_fc_free_ls_iodlist(struct nvmet_fc_tgtport *tgtport)
{}

static struct nvmet_fc_ls_iod *
nvmet_fc_alloc_ls_iod(struct nvmet_fc_tgtport *tgtport)
{}


static void
nvmet_fc_free_ls_iod(struct nvmet_fc_tgtport *tgtport,
			struct nvmet_fc_ls_iod *iod)
{}

static void
nvmet_fc_prep_fcp_iodlist(struct nvmet_fc_tgtport *tgtport,
				struct nvmet_fc_tgt_queue *queue)
{}

static void
nvmet_fc_destroy_fcp_iodlist(struct nvmet_fc_tgtport *tgtport,
				struct nvmet_fc_tgt_queue *queue)
{}

static struct nvmet_fc_fcp_iod *
nvmet_fc_alloc_fcp_iod(struct nvmet_fc_tgt_queue *queue)
{}


static void
nvmet_fc_queue_fcp_req(struct nvmet_fc_tgtport *tgtport,
		       struct nvmet_fc_tgt_queue *queue,
		       struct nvmefc_tgt_fcp_req *fcpreq)
{}

static void
nvmet_fc_fcp_rqst_op_defer_work(struct work_struct *work)
{}

static void
nvmet_fc_free_fcp_iod(struct nvmet_fc_tgt_queue *queue,
			struct nvmet_fc_fcp_iod *fod)
{}

static struct nvmet_fc_tgt_queue *
nvmet_fc_alloc_target_queue(struct nvmet_fc_tgt_assoc *assoc,
			u16 qid, u16 sqsize)
{}


static void
nvmet_fc_tgt_queue_free(struct kref *ref)
{}

static void
nvmet_fc_tgt_q_put(struct nvmet_fc_tgt_queue *queue)
{}

static int
nvmet_fc_tgt_q_get(struct nvmet_fc_tgt_queue *queue)
{}


static void
nvmet_fc_delete_target_queue(struct nvmet_fc_tgt_queue *queue)
{}

static struct nvmet_fc_tgt_queue *
nvmet_fc_find_target_queue(struct nvmet_fc_tgtport *tgtport,
				u64 connection_id)
{}

static void
nvmet_fc_hostport_free(struct kref *ref)
{}

static void
nvmet_fc_hostport_put(struct nvmet_fc_hostport *hostport)
{}

static int
nvmet_fc_hostport_get(struct nvmet_fc_hostport *hostport)
{}

static void
nvmet_fc_free_hostport(struct nvmet_fc_hostport *hostport)
{}

static struct nvmet_fc_hostport *
nvmet_fc_match_hostport(struct nvmet_fc_tgtport *tgtport, void *hosthandle)
{}

static struct nvmet_fc_hostport *
nvmet_fc_alloc_hostport(struct nvmet_fc_tgtport *tgtport, void *hosthandle)
{}

static void
nvmet_fc_delete_assoc(struct nvmet_fc_tgt_assoc *assoc)
{}

static void
nvmet_fc_delete_assoc_work(struct work_struct *work)
{}

static void
nvmet_fc_schedule_delete_assoc(struct nvmet_fc_tgt_assoc *assoc)
{}

static bool
nvmet_fc_assoc_exists(struct nvmet_fc_tgtport *tgtport, u64 association_id)
{}

static struct nvmet_fc_tgt_assoc *
nvmet_fc_alloc_target_assoc(struct nvmet_fc_tgtport *tgtport, void *hosthandle)
{}

static void
nvmet_fc_target_assoc_free(struct kref *ref)
{}

static void
nvmet_fc_tgt_a_put(struct nvmet_fc_tgt_assoc *assoc)
{}

static int
nvmet_fc_tgt_a_get(struct nvmet_fc_tgt_assoc *assoc)
{}

static void
nvmet_fc_delete_target_assoc(struct nvmet_fc_tgt_assoc *assoc)
{}

static struct nvmet_fc_tgt_assoc *
nvmet_fc_find_target_assoc(struct nvmet_fc_tgtport *tgtport,
				u64 association_id)
{}

static void
nvmet_fc_portentry_bind(struct nvmet_fc_tgtport *tgtport,
			struct nvmet_fc_port_entry *pe,
			struct nvmet_port *port)
{}

static void
nvmet_fc_portentry_unbind(struct nvmet_fc_port_entry *pe)
{}

/*
 * called when a targetport deregisters. Breaks the relationship
 * with the nvmet port, but leaves the port_entry in place so that
 * re-registration can resume operation.
 */
static void
nvmet_fc_portentry_unbind_tgt(struct nvmet_fc_tgtport *tgtport)
{}

/*
 * called when a new targetport is registered. Looks in the
 * existing nvmet port_entries to see if the nvmet layer is
 * configured for the targetport's wwn's. (the targetport existed,
 * nvmet configured, the lldd unregistered the tgtport, and is now
 * reregistering the same targetport).  If so, set the nvmet port
 * port entry on the targetport.
 */
static void
nvmet_fc_portentry_rebind_tgt(struct nvmet_fc_tgtport *tgtport)
{}

/**
 * nvmet_fc_register_targetport - transport entry point called by an
 *                              LLDD to register the existence of a local
 *                              NVME subystem FC port.
 * @pinfo:     pointer to information about the port to be registered
 * @template:  LLDD entrypoints and operational parameters for the port
 * @dev:       physical hardware device node port corresponds to. Will be
 *             used for DMA mappings
 * @portptr:   pointer to a local port pointer. Upon success, the routine
 *             will allocate a nvme_fc_local_port structure and place its
 *             address in the local port pointer. Upon failure, local port
 *             pointer will be set to NULL.
 *
 * Returns:
 * a completion status. Must be 0 upon success; a negative errno
 * (ex: -ENXIO) upon failure.
 */
int
nvmet_fc_register_targetport(struct nvmet_fc_port_info *pinfo,
			struct nvmet_fc_target_template *template,
			struct device *dev,
			struct nvmet_fc_target_port **portptr)
{}
EXPORT_SYMBOL_GPL();


static void
nvmet_fc_free_tgtport(struct kref *ref)
{}

static void
nvmet_fc_tgtport_put(struct nvmet_fc_tgtport *tgtport)
{}

static int
nvmet_fc_tgtport_get(struct nvmet_fc_tgtport *tgtport)
{}

static void
__nvmet_fc_free_assocs(struct nvmet_fc_tgtport *tgtport)
{}

/**
 * nvmet_fc_invalidate_host - transport entry point called by an LLDD
 *                       to remove references to a hosthandle for LS's.
 *
 * The nvmet-fc layer ensures that any references to the hosthandle
 * on the targetport are forgotten (set to NULL).  The LLDD will
 * typically call this when a login with a remote host port has been
 * lost, thus LS's for the remote host port are no longer possible.
 *
 * If an LS request is outstanding to the targetport/hosthandle (or
 * issued concurrently with the call to invalidate the host), the
 * LLDD is responsible for terminating/aborting the LS and completing
 * the LS request. It is recommended that these terminations/aborts
 * occur after calling to invalidate the host handle to avoid additional
 * retries by the nvmet-fc transport. The nvmet-fc transport may
 * continue to reference host handle while it cleans up outstanding
 * NVME associations. The nvmet-fc transport will call the
 * ops->host_release() callback to notify the LLDD that all references
 * are complete and the related host handle can be recovered.
 * Note: if there are no references, the callback may be called before
 * the invalidate host call returns.
 *
 * @target_port: pointer to the (registered) target port that a prior
 *              LS was received on and which supplied the transport the
 *              hosthandle.
 * @hosthandle: the handle (pointer) that represents the host port
 *              that no longer has connectivity and that LS's should
 *              no longer be directed to.
 */
void
nvmet_fc_invalidate_host(struct nvmet_fc_target_port *target_port,
			void *hosthandle)
{}
EXPORT_SYMBOL_GPL();

/*
 * nvmet layer has called to terminate an association
 */
static void
nvmet_fc_delete_ctrl(struct nvmet_ctrl *ctrl)
{}

/**
 * nvmet_fc_unregister_targetport - transport entry point called by an
 *                              LLDD to deregister/remove a previously
 *                              registered a local NVME subsystem FC port.
 * @target_port: pointer to the (registered) target port that is to be
 *               deregistered.
 *
 * Returns:
 * a completion status. Must be 0 upon success; a negative errno
 * (ex: -ENXIO) upon failure.
 */
int
nvmet_fc_unregister_targetport(struct nvmet_fc_target_port *target_port)
{}
EXPORT_SYMBOL_GPL();


/* ********************** FC-NVME LS RCV Handling ************************* */


static void
nvmet_fc_ls_create_association(struct nvmet_fc_tgtport *tgtport,
			struct nvmet_fc_ls_iod *iod)
{}

static void
nvmet_fc_ls_create_connection(struct nvmet_fc_tgtport *tgtport,
			struct nvmet_fc_ls_iod *iod)
{}

/*
 * Returns true if the LS response is to be transmit
 * Returns false if the LS response is to be delayed
 */
static int
nvmet_fc_ls_disconnect(struct nvmet_fc_tgtport *tgtport,
			struct nvmet_fc_ls_iod *iod)
{}


/* *********************** NVME Ctrl Routines **************************** */


static void nvmet_fc_fcp_nvme_cmd_done(struct nvmet_req *nvme_req);

static const struct nvmet_fabrics_ops nvmet_fc_tgt_fcp_ops;

static void
nvmet_fc_xmt_ls_rsp_done(struct nvmefc_ls_rsp *lsrsp)
{}

static void
nvmet_fc_xmt_ls_rsp(struct nvmet_fc_tgtport *tgtport,
				struct nvmet_fc_ls_iod *iod)
{}

/*
 * Actual processing routine for received FC-NVME LS Requests from the LLD
 */
static void
nvmet_fc_handle_ls_rqst(struct nvmet_fc_tgtport *tgtport,
			struct nvmet_fc_ls_iod *iod)
{}

/*
 * Actual processing routine for received FC-NVME LS Requests from the LLD
 */
static void
nvmet_fc_handle_ls_rqst_work(struct work_struct *work)
{}


/**
 * nvmet_fc_rcv_ls_req - transport entry point called by an LLDD
 *                       upon the reception of a NVME LS request.
 *
 * The nvmet-fc layer will copy payload to an internal structure for
 * processing.  As such, upon completion of the routine, the LLDD may
 * immediately free/reuse the LS request buffer passed in the call.
 *
 * If this routine returns error, the LLDD should abort the exchange.
 *
 * @target_port: pointer to the (registered) target port the LS was
 *              received on.
 * @hosthandle: pointer to the host specific data, gets stored in iod.
 * @lsrsp:      pointer to a lsrsp structure to be used to reference
 *              the exchange corresponding to the LS.
 * @lsreqbuf:   pointer to the buffer containing the LS Request
 * @lsreqbuf_len: length, in bytes, of the received LS request
 */
int
nvmet_fc_rcv_ls_req(struct nvmet_fc_target_port *target_port,
			void *hosthandle,
			struct nvmefc_ls_rsp *lsrsp,
			void *lsreqbuf, u32 lsreqbuf_len)
{}
EXPORT_SYMBOL_GPL();


/*
 * **********************
 * Start of FCP handling
 * **********************
 */

static int
nvmet_fc_alloc_tgt_pgs(struct nvmet_fc_fcp_iod *fod)
{}

static void
nvmet_fc_free_tgt_pgs(struct nvmet_fc_fcp_iod *fod)
{}


static bool
queue_90percent_full(struct nvmet_fc_tgt_queue *q, u32 sqhd)
{}

/*
 * Prep RSP payload.
 * May be a NVMET_FCOP_RSP or NVMET_FCOP_READDATA_RSP op
 */
static void
nvmet_fc_prep_fcp_rsp(struct nvmet_fc_tgtport *tgtport,
				struct nvmet_fc_fcp_iod *fod)
{}

static void nvmet_fc_xmt_fcp_op_done(struct nvmefc_tgt_fcp_req *fcpreq);

static void
nvmet_fc_abort_op(struct nvmet_fc_tgtport *tgtport,
				struct nvmet_fc_fcp_iod *fod)
{}

static void
nvmet_fc_xmt_fcp_rsp(struct nvmet_fc_tgtport *tgtport,
				struct nvmet_fc_fcp_iod *fod)
{}

static void
nvmet_fc_transfer_fcp_data(struct nvmet_fc_tgtport *tgtport,
				struct nvmet_fc_fcp_iod *fod, u8 op)
{}

static inline bool
__nvmet_fc_fod_op_abort(struct nvmet_fc_fcp_iod *fod, bool abort)
{}

/*
 * actual done handler for FCP operations when completed by the lldd
 */
static void
nvmet_fc_fod_op_done(struct nvmet_fc_fcp_iod *fod)
{}

static void
nvmet_fc_xmt_fcp_op_done(struct nvmefc_tgt_fcp_req *fcpreq)
{}

/*
 * actual completion handler after execution by the nvmet layer
 */
static void
__nvmet_fc_fcp_nvme_cmd_done(struct nvmet_fc_tgtport *tgtport,
			struct nvmet_fc_fcp_iod *fod, int status)
{}


static void
nvmet_fc_fcp_nvme_cmd_done(struct nvmet_req *nvme_req)
{}


/*
 * Actual processing routine for received FC-NVME I/O Requests from the LLD
 */
static void
nvmet_fc_handle_fcp_rqst(struct nvmet_fc_tgtport *tgtport,
			struct nvmet_fc_fcp_iod *fod)
{}

/**
 * nvmet_fc_rcv_fcp_req - transport entry point called by an LLDD
 *                       upon the reception of a NVME FCP CMD IU.
 *
 * Pass a FC-NVME FCP CMD IU received from the FC link to the nvmet-fc
 * layer for processing.
 *
 * The nvmet_fc layer allocates a local job structure (struct
 * nvmet_fc_fcp_iod) from the queue for the io and copies the
 * CMD IU buffer to the job structure. As such, on a successful
 * completion (returns 0), the LLDD may immediately free/reuse
 * the CMD IU buffer passed in the call.
 *
 * However, in some circumstances, due to the packetized nature of FC
 * and the api of the FC LLDD which may issue a hw command to send the
 * response, but the LLDD may not get the hw completion for that command
 * and upcall the nvmet_fc layer before a new command may be
 * asynchronously received - its possible for a command to be received
 * before the LLDD and nvmet_fc have recycled the job structure. It gives
 * the appearance of more commands received than fits in the sq.
 * To alleviate this scenario, a temporary queue is maintained in the
 * transport for pending LLDD requests waiting for a queue job structure.
 * In these "overrun" cases, a temporary queue element is allocated
 * the LLDD request and CMD iu buffer information remembered, and the
 * routine returns a -EOVERFLOW status. Subsequently, when a queue job
 * structure is freed, it is immediately reallocated for anything on the
 * pending request list. The LLDDs defer_rcv() callback is called,
 * informing the LLDD that it may reuse the CMD IU buffer, and the io
 * is then started normally with the transport.
 *
 * The LLDD, when receiving an -EOVERFLOW completion status, is to treat
 * the completion as successful but must not reuse the CMD IU buffer
 * until the LLDD's defer_rcv() callback has been called for the
 * corresponding struct nvmefc_tgt_fcp_req pointer.
 *
 * If there is any other condition in which an error occurs, the
 * transport will return a non-zero status indicating the error.
 * In all cases other than -EOVERFLOW, the transport has not accepted the
 * request and the LLDD should abort the exchange.
 *
 * @target_port: pointer to the (registered) target port the FCP CMD IU
 *              was received on.
 * @fcpreq:     pointer to a fcpreq request structure to be used to reference
 *              the exchange corresponding to the FCP Exchange.
 * @cmdiubuf:   pointer to the buffer containing the FCP CMD IU
 * @cmdiubuf_len: length, in bytes, of the received FCP CMD IU
 */
int
nvmet_fc_rcv_fcp_req(struct nvmet_fc_target_port *target_port,
			struct nvmefc_tgt_fcp_req *fcpreq,
			void *cmdiubuf, u32 cmdiubuf_len)
{}
EXPORT_SYMBOL_GPL();

/**
 * nvmet_fc_rcv_fcp_abort - transport entry point called by an LLDD
 *                       upon the reception of an ABTS for a FCP command
 *
 * Notify the transport that an ABTS has been received for a FCP command
 * that had been given to the transport via nvmet_fc_rcv_fcp_req(). The
 * LLDD believes the command is still being worked on
 * (template_ops->fcp_req_release() has not been called).
 *
 * The transport will wait for any outstanding work (an op to the LLDD,
 * which the lldd should complete with error due to the ABTS; or the
 * completion from the nvmet layer of the nvme command), then will
 * stop processing and call the nvmet_fc_rcv_fcp_req() callback to
 * return the i/o context to the LLDD.  The LLDD may send the BA_ACC
 * to the ABTS either after return from this function (assuming any
 * outstanding op work has been terminated) or upon the callback being
 * called.
 *
 * @target_port: pointer to the (registered) target port the FCP CMD IU
 *              was received on.
 * @fcpreq:     pointer to the fcpreq request structure that corresponds
 *              to the exchange that received the ABTS.
 */
void
nvmet_fc_rcv_fcp_abort(struct nvmet_fc_target_port *target_port,
			struct nvmefc_tgt_fcp_req *fcpreq)
{}
EXPORT_SYMBOL_GPL();


struct nvmet_fc_traddr {};

static int
__nvme_fc_parse_u64(substring_t *sstr, u64 *val)
{}

/*
 * This routine validates and extracts the WWN's from the TRADDR string.
 * As kernel parsers need the 0x to determine number base, universally
 * build string to parse with 0x prefix before parsing name strings.
 */
static int
nvme_fc_parse_traddr(struct nvmet_fc_traddr *traddr, char *buf, size_t blen)
{}

static int
nvmet_fc_add_port(struct nvmet_port *port)
{}

static void
nvmet_fc_remove_port(struct nvmet_port *port)
{}

static void
nvmet_fc_discovery_chg(struct nvmet_port *port)
{}

static ssize_t
nvmet_fc_host_traddr(struct nvmet_ctrl *ctrl,
		char *traddr, size_t traddr_size)
{}

static const struct nvmet_fabrics_ops nvmet_fc_tgt_fcp_ops =;

static int __init nvmet_fc_init_module(void)
{}

static void __exit nvmet_fc_exit_module(void)
{}

module_init();
module_exit(nvmet_fc_exit_module);

MODULE_DESCRIPTION();
MODULE_LICENSE();