linux/drivers/scsi/lpfc/lpfc_nvme.c

/*******************************************************************
 * This file is part of the Emulex Linux Device Driver for         *
 * Fibre Channel Host Bus Adapters.                                *
 * Copyright (C) 2017-2024 Broadcom. All Rights Reserved. The term *
 * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.  *
 * Copyright (C) 2004-2016 Emulex.  All rights reserved.           *
 * EMULEX and SLI are trademarks of Emulex.                        *
 * www.broadcom.com                                                *
 * Portions Copyright (C) 2004-2005 Christoph Hellwig              *
 *                                                                 *
 * This program is free software; you can redistribute it and/or   *
 * modify it under the terms of version 2 of the GNU General       *
 * Public License as published by the Free Software Foundation.    *
 * This program is distributed in the hope that it will be useful. *
 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND          *
 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY,  *
 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE      *
 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
 * TO BE LEGALLY INVALID.  See the GNU General Public License for  *
 * more details, a copy of which can be found in the file COPYING  *
 * included with this package.                                     *
 ********************************************************************/
#include <linux/pci.h>
#include <linux/slab.h>
#include <linux/interrupt.h>
#include <linux/delay.h>
#include <asm/unaligned.h>
#include <linux/crc-t10dif.h>
#include <net/checksum.h>

#include <scsi/scsi.h>
#include <scsi/scsi_device.h>
#include <scsi/scsi_eh.h>
#include <scsi/scsi_host.h>
#include <scsi/scsi_tcq.h>
#include <scsi/scsi_transport_fc.h>
#include <scsi/fc/fc_fs.h>

#include "lpfc_version.h"
#include "lpfc_hw4.h"
#include "lpfc_hw.h"
#include "lpfc_sli.h"
#include "lpfc_sli4.h"
#include "lpfc_nl.h"
#include "lpfc_disc.h"
#include "lpfc.h"
#include "lpfc_nvme.h"
#include "lpfc_scsi.h"
#include "lpfc_logmsg.h"
#include "lpfc_crtn.h"
#include "lpfc_vport.h"
#include "lpfc_debugfs.h"

/* NVME initiator-based functions */

static struct lpfc_io_buf *
lpfc_get_nvme_buf(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
		  int idx, int expedite);

static void
lpfc_release_nvme_buf(struct lpfc_hba *, struct lpfc_io_buf *);

static struct nvme_fc_port_template lpfc_nvme_template;

/**
 * lpfc_nvme_create_queue -
 * @pnvme_lport: Transport localport that LS is to be issued from
 * @qidx: An cpu index used to affinitize IO queues and MSIX vectors.
 * @qsize: Size of the queue in bytes
 * @handle: An opaque driver handle used in follow-up calls.
 *
 * Driver registers this routine to preallocate and initialize any
 * internal data structures to bind the @qidx to its internal IO queues.
 * A hardware queue maps (qidx) to a specific driver MSI-X vector/EQ/CQ/WQ.
 *
 * Return value :
 *   0 - Success
 *   -EINVAL - Unsupported input value.
 *   -ENOMEM - Could not alloc necessary memory
 **/
static int
lpfc_nvme_create_queue(struct nvme_fc_local_port *pnvme_lport,
		       unsigned int qidx, u16 qsize,
		       void **handle)
{}

/**
 * lpfc_nvme_delete_queue -
 * @pnvme_lport: Transport localport that LS is to be issued from
 * @qidx: An cpu index used to affinitize IO queues and MSIX vectors.
 * @handle: An opaque driver handle from lpfc_nvme_create_queue
 *
 * Driver registers this routine to free
 * any internal data structures to bind the @qidx to its internal
 * IO queues.
 *
 * Return value :
 *   0 - Success
 *   TODO:  What are the failure codes.
 **/
static void
lpfc_nvme_delete_queue(struct nvme_fc_local_port *pnvme_lport,
		       unsigned int qidx,
		       void *handle)
{}

static void
lpfc_nvme_localport_delete(struct nvme_fc_local_port *localport)
{}

/* lpfc_nvme_remoteport_delete
 *
 * @remoteport: Pointer to an nvme transport remoteport instance.
 *
 * This is a template downcall.  NVME transport calls this function
 * when it has completed the unregistration of a previously
 * registered remoteport.
 *
 * Return value :
 * None
 */
static void
lpfc_nvme_remoteport_delete(struct nvme_fc_remote_port *remoteport)
{}

/**
 * lpfc_nvme_handle_lsreq - Process an unsolicited NVME LS request
 * @phba: pointer to lpfc hba data structure.
 * @axchg: pointer to exchange context for the NVME LS request
 *
 * This routine is used for processing an asychronously received NVME LS
 * request. Any remaining validation is done and the LS is then forwarded
 * to the nvme-fc transport via nvme_fc_rcv_ls_req().
 *
 * The calling sequence should be: nvme_fc_rcv_ls_req() -> (processing)
 * -> lpfc_nvme_xmt_ls_rsp/cmp -> req->done.
 * __lpfc_nvme_xmt_ls_rsp_cmp should free the allocated axchg.
 *
 * Returns 0 if LS was handled and delivered to the transport
 * Returns 1 if LS failed to be handled and should be dropped
 */
int
lpfc_nvme_handle_lsreq(struct lpfc_hba *phba,
			struct lpfc_async_xchg_ctx *axchg)
{}

/**
 * __lpfc_nvme_ls_req_cmp - Generic completion handler for a NVME
 *        LS request.
 * @phba: Pointer to HBA context object
 * @vport: The local port that issued the LS
 * @cmdwqe: Pointer to driver command WQE object.
 * @wcqe: Pointer to driver response CQE object.
 *
 * This function is the generic completion handler for NVME LS requests.
 * The function updates any states and statistics, calls the transport
 * ls_req done() routine, then tears down the command and buffers used
 * for the LS request.
 **/
void
__lpfc_nvme_ls_req_cmp(struct lpfc_hba *phba, struct lpfc_vport *vport,
			struct lpfc_iocbq *cmdwqe,
			struct lpfc_wcqe_complete *wcqe)
{}

static void
lpfc_nvme_ls_req_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
		     struct lpfc_iocbq *rspwqe)
{}

static int
lpfc_nvme_gen_req(struct lpfc_vport *vport, struct lpfc_dmabuf *bmp,
		  struct lpfc_dmabuf *inp,
		  struct nvmefc_ls_req *pnvme_lsreq,
		  void (*cmpl)(struct lpfc_hba *, struct lpfc_iocbq *,
			       struct lpfc_iocbq *),
		  struct lpfc_nodelist *ndlp, uint32_t num_entry,
		  uint32_t tmo, uint8_t retry)
{}


/**
 * __lpfc_nvme_ls_req - Generic service routine to issue an NVME LS request
 * @vport: The local port issuing the LS
 * @ndlp: The remote port to send the LS to
 * @pnvme_lsreq: Pointer to LS request structure from the transport
 * @gen_req_cmp: Completion call-back
 *
 * Routine validates the ndlp, builds buffers and sends a GEN_REQUEST
 * WQE to perform the LS operation.
 *
 * Return value :
 *   0 - Success
 *   non-zero: various error codes, in form of -Exxx
 **/
int
__lpfc_nvme_ls_req(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
		      struct nvmefc_ls_req *pnvme_lsreq,
		      void (*gen_req_cmp)(struct lpfc_hba *phba,
				struct lpfc_iocbq *cmdwqe,
				struct lpfc_iocbq *rspwqe))
{}

/**
 * lpfc_nvme_ls_req - Issue an NVME Link Service request
 * @pnvme_lport: Transport localport that LS is to be issued from.
 * @pnvme_rport: Transport remoteport that LS is to be sent to.
 * @pnvme_lsreq: the transport nvme_ls_req structure for the LS
 *
 * Driver registers this routine to handle any link service request
 * from the nvme_fc transport to a remote nvme-aware port.
 *
 * Return value :
 *   0 - Success
 *   non-zero: various error codes, in form of -Exxx
 **/
static int
lpfc_nvme_ls_req(struct nvme_fc_local_port *pnvme_lport,
		 struct nvme_fc_remote_port *pnvme_rport,
		 struct nvmefc_ls_req *pnvme_lsreq)
{}

/**
 * __lpfc_nvme_ls_abort - Generic service routine to abort a prior
 *         NVME LS request
 * @vport: The local port that issued the LS
 * @ndlp: The remote port the LS was sent to
 * @pnvme_lsreq: Pointer to LS request structure from the transport
 *
 * The driver validates the ndlp, looks for the LS, and aborts the
 * LS if found.
 *
 * Returns:
 * 0 : if LS found and aborted
 * non-zero: various error conditions in form -Exxx
 **/
int
__lpfc_nvme_ls_abort(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
			struct nvmefc_ls_req *pnvme_lsreq)
{}

static int
lpfc_nvme_xmt_ls_rsp(struct nvme_fc_local_port *localport,
		     struct nvme_fc_remote_port *remoteport,
		     struct nvmefc_ls_rsp *ls_rsp)
{}

/**
 * lpfc_nvme_ls_abort - Abort a prior NVME LS request
 * @pnvme_lport: Transport localport that LS is to be issued from.
 * @pnvme_rport: Transport remoteport that LS is to be sent to.
 * @pnvme_lsreq: the transport nvme_ls_req structure for the LS
 *
 * Driver registers this routine to abort a NVME LS request that is
 * in progress (from the transports perspective).
 **/
static void
lpfc_nvme_ls_abort(struct nvme_fc_local_port *pnvme_lport,
		   struct nvme_fc_remote_port *pnvme_rport,
		   struct nvmefc_ls_req *pnvme_lsreq)
{}

/* Fix up the existing sgls for NVME IO. */
static inline void
lpfc_nvme_adj_fcp_sgls(struct lpfc_vport *vport,
		       struct lpfc_io_buf *lpfc_ncmd,
		       struct nvmefc_fcp_req *nCmd)
{}


/*
 * lpfc_nvme_io_cmd_cmpl - Complete an NVME-over-FCP IO
 *
 * Driver registers this routine as it io request handler.  This
 * routine issues an fcp WQE with data from the @lpfc_nvme_fcpreq
 * data structure to the rport indicated in @lpfc_nvme_rport.
 *
 * Return value :
 *   0 - Success
 *   TODO: What are the failure codes.
 **/
static void
lpfc_nvme_io_cmd_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn,
		      struct lpfc_iocbq *pwqeOut)
{}


/**
 * lpfc_nvme_prep_io_cmd - Issue an NVME-over-FCP IO
 * @vport: pointer to a host virtual N_Port data structure
 * @lpfc_ncmd: Pointer to lpfc scsi command
 * @pnode: pointer to a node-list data structure
 * @cstat: pointer to the control status structure
 *
 * Driver registers this routine as it io request handler.  This
 * routine issues an fcp WQE with data from the @lpfc_nvme_fcpreq
 * data structure to the rport indicated in @lpfc_nvme_rport.
 *
 * Return value :
 *   0 - Success
 *   TODO: What are the failure codes.
 **/
static int
lpfc_nvme_prep_io_cmd(struct lpfc_vport *vport,
		      struct lpfc_io_buf *lpfc_ncmd,
		      struct lpfc_nodelist *pnode,
		      struct lpfc_fc4_ctrl_stat *cstat)
{}


/**
 * lpfc_nvme_prep_io_dma - Issue an NVME-over-FCP IO
 * @vport: pointer to a host virtual N_Port data structure
 * @lpfc_ncmd: Pointer to lpfc scsi command
 *
 * Driver registers this routine as it io request handler.  This
 * routine issues an fcp WQE with data from the @lpfc_nvme_fcpreq
 * data structure to the rport indicated in @lpfc_nvme_rport.
 *
 * Return value :
 *   0 - Success
 *   TODO: What are the failure codes.
 **/
static int
lpfc_nvme_prep_io_dma(struct lpfc_vport *vport,
		      struct lpfc_io_buf *lpfc_ncmd)
{}

/**
 * lpfc_nvme_fcp_io_submit - Issue an NVME-over-FCP IO
 * @pnvme_lport: Pointer to the driver's local port data
 * @pnvme_rport: Pointer to the rport getting the @lpfc_nvme_ereq
 * @hw_queue_handle: Driver-returned handle in lpfc_nvme_create_queue
 * @pnvme_fcreq: IO request from nvme fc to driver.
 *
 * Driver registers this routine as it io request handler.  This
 * routine issues an fcp WQE with data from the @lpfc_nvme_fcpreq
 * data structure to the rport indicated in @lpfc_nvme_rport.
 *
 * Return value :
 *   0 - Success
 *   TODO: What are the failure codes.
 **/
static int
lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport,
			struct nvme_fc_remote_port *pnvme_rport,
			void *hw_queue_handle,
			struct nvmefc_fcp_req *pnvme_fcreq)
{}

/**
 * lpfc_nvme_abort_fcreq_cmpl - Complete an NVME FCP abort request.
 * @phba: Pointer to HBA context object
 * @cmdiocb: Pointer to command iocb object.
 * @rspiocb: Pointer to response iocb object.
 *
 * This is the callback function for any NVME FCP IO that was aborted.
 *
 * Return value:
 *   None
 **/
void
lpfc_nvme_abort_fcreq_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
			   struct lpfc_iocbq *rspiocb)
{}

/**
 * lpfc_nvme_fcp_abort - Issue an NVME-over-FCP ABTS
 * @pnvme_lport: Pointer to the driver's local port data
 * @pnvme_rport: Pointer to the rport getting the @lpfc_nvme_ereq
 * @hw_queue_handle: Driver-returned handle in lpfc_nvme_create_queue
 * @pnvme_fcreq: IO request from nvme fc to driver.
 *
 * Driver registers this routine as its nvme request io abort handler.  This
 * routine issues an fcp Abort WQE with data from the @lpfc_nvme_fcpreq
 * data structure to the rport indicated in @lpfc_nvme_rport.  This routine
 * is executed asynchronously - one the target is validated as "MAPPED" and
 * ready for IO, the driver issues the abort request and returns.
 *
 * Return value:
 *   None
 **/
static void
lpfc_nvme_fcp_abort(struct nvme_fc_local_port *pnvme_lport,
		    struct nvme_fc_remote_port *pnvme_rport,
		    void *hw_queue_handle,
		    struct nvmefc_fcp_req *pnvme_fcreq)
{}

/* Declare and initialization an instance of the FC NVME template. */
static struct nvme_fc_port_template lpfc_nvme_template =;

/*
 * lpfc_get_nvme_buf - Get a nvme buffer from io_buf_list of the HBA
 *
 * This routine removes a nvme buffer from head of @hdwq io_buf_list
 * and returns to caller.
 *
 * Return codes:
 *   NULL - Error
 *   Pointer to lpfc_nvme_buf - Success
 **/
static struct lpfc_io_buf *
lpfc_get_nvme_buf(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
		  int idx, int expedite)
{}

/**
 * lpfc_release_nvme_buf: Return a nvme buffer back to hba nvme buf list.
 * @phba: The Hba for which this call is being executed.
 * @lpfc_ncmd: The nvme buffer which is being released.
 *
 * This routine releases @lpfc_ncmd nvme buffer by adding it to tail of @phba
 * lpfc_io_buf_list list. For SLI4 XRI's are tied to the nvme buffer
 * and cannot be reused for at least RA_TOV amount of time if it was
 * aborted.
 **/
static void
lpfc_release_nvme_buf(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_ncmd)
{}

/**
 * lpfc_nvme_create_localport - Create/Bind an nvme localport instance.
 * @vport: the lpfc_vport instance requesting a localport.
 *
 * This routine is invoked to create an nvme localport instance to bind
 * to the nvme_fc_transport.  It is called once during driver load
 * like lpfc_create_shost after all other services are initialized.
 * It requires a vport, vpi, and wwns at call time.  Other localport
 * parameters are modified as the driver's FCID and the Fabric WWN
 * are established.
 *
 * Return codes
 *      0 - successful
 *      -ENOMEM - no heap memory available
 *      other values - from nvme registration upcall
 **/
int
lpfc_nvme_create_localport(struct lpfc_vport *vport)
{}

#if (IS_ENABLED(CONFIG_NVME_FC))
/* lpfc_nvme_lport_unreg_wait - Wait for the host to complete an lport unreg.
 *
 * The driver has to wait for the host nvme transport to callback
 * indicating the localport has successfully unregistered all
 * resources.  Since this is an uninterruptible wait, loop every ten
 * seconds and print a message indicating no progress.
 *
 * An uninterruptible wait is used because of the risk of transport-to-
 * driver state mismatch.
 */
static void
lpfc_nvme_lport_unreg_wait(struct lpfc_vport *vport,
			   struct lpfc_nvme_lport *lport,
			   struct completion *lport_unreg_cmp)
{}
#endif

/**
 * lpfc_nvme_destroy_localport - Destroy lpfc_nvme bound to nvme transport.
 * @vport: pointer to a host virtual N_Port data structure
 *
 * This routine is invoked to destroy all lports bound to the phba.
 * The lport memory was allocated by the nvme fc transport and is
 * released there.  This routine ensures all rports bound to the
 * lport have been disconnected.
 *
 **/
void
lpfc_nvme_destroy_localport(struct lpfc_vport *vport)
{}

void
lpfc_nvme_update_localport(struct lpfc_vport *vport)
{}

int
lpfc_nvme_register_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
{}

/*
 * lpfc_nvme_rescan_port - Check to see if we should rescan this remoteport
 *
 * If the ndlp represents an NVME Target, that we are logged into,
 * ping the NVME FC Transport layer to initiate a device rescan
 * on this remote NPort.
 */
void
lpfc_nvme_rescan_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
{}

/* lpfc_nvme_unregister_port - unbind the DID and port_role from this rport.
 *
 * There is no notion of Devloss or rport recovery from the current
 * nvme_transport perspective.  Loss of an rport just means IO cannot
 * be sent and recovery is completely up to the initator.
 * For now, the driver just unbinds the DID and port_role so that
 * no further IO can be issued.
 */
void
lpfc_nvme_unregister_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
{}

/**
 * lpfc_sli4_nvme_pci_offline_aborted - Fast-path process of NVME xri abort
 * @phba: pointer to lpfc hba data structure.
 * @lpfc_ncmd: The nvme job structure for the request being aborted.
 *
 * This routine is invoked by the worker thread to process a SLI4 fast-path
 * NVME aborted xri.  Aborted NVME IO commands are completed to the transport
 * here.
 **/
void
lpfc_sli4_nvme_pci_offline_aborted(struct lpfc_hba *phba,
				   struct lpfc_io_buf *lpfc_ncmd)
{}

/**
 * lpfc_sli4_nvme_xri_aborted - Fast-path process of NVME xri abort
 * @phba: pointer to lpfc hba data structure.
 * @axri: pointer to the fcp xri abort wcqe structure.
 * @lpfc_ncmd: The nvme job structure for the request being aborted.
 *
 * This routine is invoked by the worker thread to process a SLI4 fast-path
 * NVME aborted xri.  Aborted NVME IO commands are completed to the transport
 * here.
 **/
void
lpfc_sli4_nvme_xri_aborted(struct lpfc_hba *phba,
			   struct sli4_wcqe_xri_aborted *axri,
			   struct lpfc_io_buf *lpfc_ncmd)
{}

/**
 * lpfc_nvme_wait_for_io_drain - Wait for all NVME wqes to complete
 * @phba: Pointer to HBA context object.
 *
 * This function flushes all wqes in the nvme rings and frees all resources
 * in the txcmplq. This function does not issue abort wqes for the IO
 * commands in txcmplq, they will just be returned with
 * IOERR_SLI_DOWN. This function is invoked with EEH when device's PCI
 * slot has been permanently disabled.
 **/
void
lpfc_nvme_wait_for_io_drain(struct lpfc_hba *phba)
{}

void
lpfc_nvme_cancel_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn,
		      uint32_t stat, uint32_t param)
{}