linux/drivers/target/target_core_transport.c

// SPDX-License-Identifier: GPL-2.0-or-later
/*******************************************************************************
 * Filename:  target_core_transport.c
 *
 * This file contains the Generic Target Engine Core.
 *
 * (c) Copyright 2002-2013 Datera, Inc.
 *
 * Nicholas A. Bellinger <[email protected]>
 *
 ******************************************************************************/

#include <linux/net.h>
#include <linux/delay.h>
#include <linux/string.h>
#include <linux/timer.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/kthread.h>
#include <linux/in.h>
#include <linux/cdrom.h>
#include <linux/module.h>
#include <linux/ratelimit.h>
#include <linux/vmalloc.h>
#include <asm/unaligned.h>
#include <net/sock.h>
#include <net/tcp.h>
#include <scsi/scsi_proto.h>
#include <scsi/scsi_common.h>

#include <target/target_core_base.h>
#include <target/target_core_backend.h>
#include <target/target_core_fabric.h>

#include "target_core_internal.h"
#include "target_core_alua.h"
#include "target_core_pr.h"
#include "target_core_ua.h"

#define CREATE_TRACE_POINTS
#include <trace/events/target.h>

static struct workqueue_struct *target_completion_wq;
static struct workqueue_struct *target_submission_wq;
static struct kmem_cache *se_sess_cache;
struct kmem_cache *se_ua_cache;
struct kmem_cache *t10_pr_reg_cache;
struct kmem_cache *t10_alua_lu_gp_cache;
struct kmem_cache *t10_alua_lu_gp_mem_cache;
struct kmem_cache *t10_alua_tg_pt_gp_cache;
struct kmem_cache *t10_alua_lba_map_cache;
struct kmem_cache *t10_alua_lba_map_mem_cache;

static void transport_complete_task_attr(struct se_cmd *cmd);
static void translate_sense_reason(struct se_cmd *cmd, sense_reason_t reason);
static void transport_handle_queue_full(struct se_cmd *cmd,
		struct se_device *dev, int err, bool write_pending);
static void target_complete_ok_work(struct work_struct *work);

int init_se_kmem_caches(void)
{}

void release_se_kmem_caches(void)
{}

/* This code ensures unique mib indexes are handed out. */
static DEFINE_SPINLOCK(scsi_mib_index_lock);
static u32 scsi_mib_index[SCSI_INDEX_TYPE_MAX];

/*
 * Allocate a new row index for the entry type specified
 */
u32 scsi_get_new_index(scsi_index_t type)
{}

void transport_subsystem_check_init(void)
{}

static void target_release_cmd_refcnt(struct percpu_ref *ref)
{}

struct target_cmd_counter *target_alloc_cmd_counter(void)
{}
EXPORT_SYMBOL_GPL();

void target_free_cmd_counter(struct target_cmd_counter *cmd_cnt)
{}
EXPORT_SYMBOL_GPL();

/**
 * transport_init_session - initialize a session object
 * @se_sess: Session object pointer.
 *
 * The caller must have zero-initialized @se_sess before calling this function.
 */
void transport_init_session(struct se_session *se_sess)
{}
EXPORT_SYMBOL();

/**
 * transport_alloc_session - allocate a session object and initialize it
 * @sup_prot_ops: bitmask that defines which T10-PI modes are supported.
 */
struct se_session *transport_alloc_session(enum target_prot_op sup_prot_ops)
{}
EXPORT_SYMBOL();

/**
 * transport_alloc_session_tags - allocate target driver private data
 * @se_sess:  Session pointer.
 * @tag_num:  Maximum number of in-flight commands between initiator and target.
 * @tag_size: Size in bytes of the private data a target driver associates with
 *	      each command.
 */
int transport_alloc_session_tags(struct se_session *se_sess,
			         unsigned int tag_num, unsigned int tag_size)
{}
EXPORT_SYMBOL();

/**
 * transport_init_session_tags - allocate a session and target driver private data
 * @tag_num:  Maximum number of in-flight commands between initiator and target.
 * @tag_size: Size in bytes of the private data a target driver associates with
 *	      each command.
 * @sup_prot_ops: bitmask that defines which T10-PI modes are supported.
 */
static struct se_session *
transport_init_session_tags(unsigned int tag_num, unsigned int tag_size,
			    enum target_prot_op sup_prot_ops)
{}

/*
 * Called with spin_lock_irqsave(&struct se_portal_group->session_lock called.
 */
void __transport_register_session(
	struct se_portal_group *se_tpg,
	struct se_node_acl *se_nacl,
	struct se_session *se_sess,
	void *fabric_sess_ptr)
{}
EXPORT_SYMBOL();

void transport_register_session(
	struct se_portal_group *se_tpg,
	struct se_node_acl *se_nacl,
	struct se_session *se_sess,
	void *fabric_sess_ptr)
{}
EXPORT_SYMBOL();

struct se_session *
target_setup_session(struct se_portal_group *tpg,
		     unsigned int tag_num, unsigned int tag_size,
		     enum target_prot_op prot_op,
		     const char *initiatorname, void *private,
		     int (*callback)(struct se_portal_group *,
				     struct se_session *, void *))
{}
EXPORT_SYMBOL();

ssize_t target_show_dynamic_sessions(struct se_portal_group *se_tpg, char *page)
{}
EXPORT_SYMBOL();

static void target_complete_nacl(struct kref *kref)
{}

void target_put_nacl(struct se_node_acl *nacl)
{}
EXPORT_SYMBOL();

void transport_deregister_session_configfs(struct se_session *se_sess)
{}
EXPORT_SYMBOL();

void transport_free_session(struct se_session *se_sess)
{}
EXPORT_SYMBOL();

static int target_release_res(struct se_device *dev, void *data)
{}

void transport_deregister_session(struct se_session *se_sess)
{}
EXPORT_SYMBOL();

void target_remove_session(struct se_session *se_sess)
{}
EXPORT_SYMBOL();

static void target_remove_from_state_list(struct se_cmd *cmd)
{}

static void target_remove_from_tmr_list(struct se_cmd *cmd)
{}
/*
 * This function is called by the target core after the target core has
 * finished processing a SCSI command or SCSI TMF. Both the regular command
 * processing code and the code for aborting commands can call this
 * function. CMD_T_STOP is set if and only if another thread is waiting
 * inside transport_wait_for_tasks() for t_transport_stop_comp.
 */
static int transport_cmd_check_stop_to_fabric(struct se_cmd *cmd)
{}

static void transport_lun_remove_cmd(struct se_cmd *cmd)
{}

static void target_complete_failure_work(struct work_struct *work)
{}

/*
 * Used when asking transport to copy Sense Data from the underlying
 * Linux/SCSI struct scsi_cmnd
 */
static unsigned char *transport_get_sense_buffer(struct se_cmd *cmd)
{}

void transport_copy_sense_to_cmd(struct se_cmd *cmd, unsigned char *sense)
{}
EXPORT_SYMBOL();

static void target_handle_abort(struct se_cmd *cmd)
{}

static void target_abort_work(struct work_struct *work)
{}

static bool target_cmd_interrupted(struct se_cmd *cmd)
{}

/* May be called from interrupt context so must not sleep. */
void target_complete_cmd_with_sense(struct se_cmd *cmd, u8 scsi_status,
				    sense_reason_t sense_reason)
{}
EXPORT_SYMBOL();

void target_complete_cmd(struct se_cmd *cmd, u8 scsi_status)
{}
EXPORT_SYMBOL();

void target_set_cmd_data_length(struct se_cmd *cmd, int length)
{}
EXPORT_SYMBOL();

void target_complete_cmd_with_length(struct se_cmd *cmd, u8 scsi_status, int length)
{}
EXPORT_SYMBOL();

static void target_add_to_state_list(struct se_cmd *cmd)
{}

/*
 * Handle QUEUE_FULL / -EAGAIN and -ENOMEM status
 */
static void transport_write_pending_qf(struct se_cmd *cmd);
static void transport_complete_qf(struct se_cmd *cmd);

void target_qf_do_work(struct work_struct *work)
{}

unsigned char *transport_dump_cmd_direction(struct se_cmd *cmd)
{}

void transport_dump_dev_state(
	struct se_device *dev,
	char *b,
	int *bl)
{}

void transport_dump_vpd_proto_id(
	struct t10_vpd *vpd,
	unsigned char *p_buf,
	int p_buf_len)
{}

void
transport_set_vpd_proto_id(struct t10_vpd *vpd, unsigned char *page_83)
{}
EXPORT_SYMBOL();

int transport_dump_vpd_assoc(
	struct t10_vpd *vpd,
	unsigned char *p_buf,
	int p_buf_len)
{}

int transport_set_vpd_assoc(struct t10_vpd *vpd, unsigned char *page_83)
{}
EXPORT_SYMBOL();

int transport_dump_vpd_ident_type(
	struct t10_vpd *vpd,
	unsigned char *p_buf,
	int p_buf_len)
{}

int transport_set_vpd_ident_type(struct t10_vpd *vpd, unsigned char *page_83)
{}
EXPORT_SYMBOL();

int transport_dump_vpd_ident(
	struct t10_vpd *vpd,
	unsigned char *p_buf,
	int p_buf_len)
{}

int
transport_set_vpd_ident(struct t10_vpd *vpd, unsigned char *page_83)
{}
EXPORT_SYMBOL();

static sense_reason_t
target_check_max_data_sg_nents(struct se_cmd *cmd, struct se_device *dev,
			       unsigned int size)
{}

/**
 * target_cmd_size_check - Check whether there will be a residual.
 * @cmd: SCSI command.
 * @size: Data buffer size derived from CDB. The data buffer size provided by
 *   the SCSI transport driver is available in @cmd->data_length.
 *
 * Compare the data buffer size from the CDB with the data buffer limit from the transport
 * header. Set @cmd->residual_count and SCF_OVERFLOW_BIT or SCF_UNDERFLOW_BIT if necessary.
 *
 * Note: target drivers set @cmd->data_length by calling __target_init_cmd().
 *
 * Return: TCM_NO_SENSE
 */
sense_reason_t
target_cmd_size_check(struct se_cmd *cmd, unsigned int size)
{}

/*
 * Used by fabric modules containing a local struct se_cmd within their
 * fabric dependent per I/O descriptor.
 *
 * Preserves the value of @cmd->tag.
 */
void __target_init_cmd(struct se_cmd *cmd,
		       const struct target_core_fabric_ops *tfo,
		       struct se_session *se_sess, u32 data_length,
		       int data_direction, int task_attr,
		       unsigned char *sense_buffer, u64 unpacked_lun,
		       struct target_cmd_counter *cmd_cnt)
{}
EXPORT_SYMBOL();

static sense_reason_t
transport_check_alloc_task_attr(struct se_cmd *cmd)
{}

sense_reason_t
target_cmd_init_cdb(struct se_cmd *cmd, unsigned char *cdb, gfp_t gfp)
{}
EXPORT_SYMBOL();

sense_reason_t
target_cmd_parse_cdb(struct se_cmd *cmd)
{}
EXPORT_SYMBOL();

static int __target_submit(struct se_cmd *cmd)
{}

sense_reason_t
transport_generic_map_mem_to_cmd(struct se_cmd *cmd, struct scatterlist *sgl,
		u32 sgl_count, struct scatterlist *sgl_bidi, u32 sgl_bidi_count)
{}

/**
 * target_init_cmd - initialize se_cmd
 * @se_cmd: command descriptor to init
 * @se_sess: associated se_sess for endpoint
 * @sense: pointer to SCSI sense buffer
 * @unpacked_lun: unpacked LUN to reference for struct se_lun
 * @data_length: fabric expected data transfer length
 * @task_attr: SAM task attribute
 * @data_dir: DMA data direction
 * @flags: flags for command submission from target_sc_flags_tables
 *
 * Task tags are supported if the caller has set @se_cmd->tag.
 *
 * Returns:
 *	- less than zero to signal active I/O shutdown failure.
 *	- zero on success.
 *
 * If the fabric driver calls target_stop_session, then it must check the
 * return code and handle failures. This will never fail for other drivers,
 * and the return code can be ignored.
 */
int target_init_cmd(struct se_cmd *se_cmd, struct se_session *se_sess,
		    unsigned char *sense, u64 unpacked_lun,
		    u32 data_length, int task_attr, int data_dir, int flags)
{}
EXPORT_SYMBOL_GPL();

/**
 * target_submit_prep - prepare cmd for submission
 * @se_cmd: command descriptor to prep
 * @cdb: pointer to SCSI CDB
 * @sgl: struct scatterlist memory for unidirectional mapping
 * @sgl_count: scatterlist count for unidirectional mapping
 * @sgl_bidi: struct scatterlist memory for bidirectional READ mapping
 * @sgl_bidi_count: scatterlist count for bidirectional READ mapping
 * @sgl_prot: struct scatterlist memory protection information
 * @sgl_prot_count: scatterlist count for protection information
 * @gfp: gfp allocation type
 *
 * Returns:
 *	- less than zero to signal failure.
 *	- zero on success.
 *
 * If failure is returned, lio will the callers queue_status to complete
 * the cmd.
 */
int target_submit_prep(struct se_cmd *se_cmd, unsigned char *cdb,
		       struct scatterlist *sgl, u32 sgl_count,
		       struct scatterlist *sgl_bidi, u32 sgl_bidi_count,
		       struct scatterlist *sgl_prot, u32 sgl_prot_count,
		       gfp_t gfp)
{}
EXPORT_SYMBOL_GPL();

/**
 * target_submit_cmd - lookup unpacked lun and submit uninitialized se_cmd
 *
 * @se_cmd: command descriptor to submit
 * @se_sess: associated se_sess for endpoint
 * @cdb: pointer to SCSI CDB
 * @sense: pointer to SCSI sense buffer
 * @unpacked_lun: unpacked LUN to reference for struct se_lun
 * @data_length: fabric expected data transfer length
 * @task_attr: SAM task attribute
 * @data_dir: DMA data direction
 * @flags: flags for command submission from target_sc_flags_tables
 *
 * Task tags are supported if the caller has set @se_cmd->tag.
 *
 * This may only be called from process context, and also currently
 * assumes internal allocation of fabric payload buffer by target-core.
 *
 * It also assumes interal target core SGL memory allocation.
 *
 * This function must only be used by drivers that do their own
 * sync during shutdown and does not use target_stop_session. If there
 * is a failure this function will call into the fabric driver's
 * queue_status with a CHECK_CONDITION.
 */
void target_submit_cmd(struct se_cmd *se_cmd, struct se_session *se_sess,
		unsigned char *cdb, unsigned char *sense, u64 unpacked_lun,
		u32 data_length, int task_attr, int data_dir, int flags)
{}
EXPORT_SYMBOL();


static struct se_dev_plug *target_plug_device(struct se_device *se_dev)
{}

static void target_unplug_device(struct se_dev_plug *se_plug)
{}

void target_queued_submit_work(struct work_struct *work)
{}

/**
 * target_queue_submission - queue the cmd to run on the LIO workqueue
 * @se_cmd: command descriptor to submit
 */
static void target_queue_submission(struct se_cmd *se_cmd)
{}

/**
 * target_submit - perform final initialization and submit cmd to LIO core
 * @se_cmd: command descriptor to submit
 *
 * target_submit_prep or something similar must have been called on the cmd,
 * and this must be called from process context.
 */
int target_submit(struct se_cmd *se_cmd)
{}
EXPORT_SYMBOL_GPL();

static void target_complete_tmr_failure(struct work_struct *work)
{}

/**
 * target_submit_tmr - lookup unpacked lun and submit uninitialized se_cmd
 *                     for TMR CDBs
 *
 * @se_cmd: command descriptor to submit
 * @se_sess: associated se_sess for endpoint
 * @sense: pointer to SCSI sense buffer
 * @unpacked_lun: unpacked LUN to reference for struct se_lun
 * @fabric_tmr_ptr: fabric context for TMR req
 * @tm_type: Type of TM request
 * @gfp: gfp type for caller
 * @tag: referenced task tag for TMR_ABORT_TASK
 * @flags: submit cmd flags
 *
 * Callable from all contexts.
 **/

int target_submit_tmr(struct se_cmd *se_cmd, struct se_session *se_sess,
		unsigned char *sense, u64 unpacked_lun,
		void *fabric_tmr_ptr, unsigned char tm_type,
		gfp_t gfp, u64 tag, int flags)
{}
EXPORT_SYMBOL();

/*
 * Handle SAM-esque emulation for generic transport request failures.
 */
void transport_generic_request_failure(struct se_cmd *cmd,
		sense_reason_t sense_reason)
{}
EXPORT_SYMBOL();

void __target_execute_cmd(struct se_cmd *cmd, bool do_checks)
{}

static int target_write_prot_action(struct se_cmd *cmd)
{}

static bool target_handle_task_attr(struct se_cmd *cmd)
{}

void target_execute_cmd(struct se_cmd *cmd)
{}
EXPORT_SYMBOL();

/*
 * Process all commands up to the last received ORDERED task attribute which
 * requires another blocking boundary
 */
void target_do_delayed_work(struct work_struct *work)
{}

/*
 * Called from I/O completion to determine which dormant/delayed
 * and ordered cmds need to have their tasks added to the execution queue.
 */
static void transport_complete_task_attr(struct se_cmd *cmd)
{}

static void transport_complete_qf(struct se_cmd *cmd)
{}

static void transport_handle_queue_full(struct se_cmd *cmd, struct se_device *dev,
					int err, bool write_pending)
{}

static bool target_read_prot_action(struct se_cmd *cmd)
{}

static void target_complete_ok_work(struct work_struct *work)
{}

void target_free_sgl(struct scatterlist *sgl, int nents)
{}
EXPORT_SYMBOL();

static inline void transport_reset_sgl_orig(struct se_cmd *cmd)
{}

static inline void transport_free_pages(struct se_cmd *cmd)
{}

void *transport_kmap_data_sg(struct se_cmd *cmd)
{}
EXPORT_SYMBOL();

void transport_kunmap_data_sg(struct se_cmd *cmd)
{}
EXPORT_SYMBOL();

int
target_alloc_sgl(struct scatterlist **sgl, unsigned int *nents, u32 length,
		 bool zero_page, bool chainable)
{}
EXPORT_SYMBOL();

/*
 * Allocate any required resources to execute the command.  For writes we
 * might not have the payload yet, so notify the fabric via a call to
 * ->write_pending instead. Otherwise place it on the execution queue.
 */
sense_reason_t
transport_generic_new_cmd(struct se_cmd *cmd)
{}
EXPORT_SYMBOL();

static void transport_write_pending_qf(struct se_cmd *cmd)
{}

static bool
__transport_wait_for_tasks(struct se_cmd *, bool, bool *, bool *,
			   unsigned long *flags);

static void target_wait_free_cmd(struct se_cmd *cmd, bool *aborted, bool *tas)
{}

/*
 * Call target_put_sess_cmd() and wait until target_release_cmd_kref(@cmd) has
 * finished.
 */
void target_put_cmd_and_wait(struct se_cmd *cmd)
{}

/*
 * This function is called by frontend drivers after processing of a command
 * has finished.
 *
 * The protocol for ensuring that either the regular frontend command
 * processing flow or target_handle_abort() code drops one reference is as
 * follows:
 * - Calling .queue_data_in(), .queue_status() or queue_tm_rsp() will cause
 *   the frontend driver to call this function synchronously or asynchronously.
 *   That will cause one reference to be dropped.
 * - During regular command processing the target core sets CMD_T_COMPLETE
 *   before invoking one of the .queue_*() functions.
 * - The code that aborts commands skips commands and TMFs for which
 *   CMD_T_COMPLETE has been set.
 * - CMD_T_ABORTED is set atomically after the CMD_T_COMPLETE check for
 *   commands that will be aborted.
 * - If the CMD_T_ABORTED flag is set but CMD_T_TAS has not been set
 *   transport_generic_free_cmd() skips its call to target_put_sess_cmd().
 * - For aborted commands for which CMD_T_TAS has been set .queue_status() will
 *   be called and will drop a reference.
 * - For aborted commands for which CMD_T_TAS has not been set .aborted_task()
 *   will be called. target_handle_abort() will drop the final reference.
 */
int transport_generic_free_cmd(struct se_cmd *cmd, int wait_for_tasks)
{}
EXPORT_SYMBOL();

/**
 * target_get_sess_cmd - Verify the session is accepting cmds and take ref
 * @se_cmd:	command descriptor to add
 * @ack_kref:	Signal that fabric will perform an ack target_put_sess_cmd()
 */
int target_get_sess_cmd(struct se_cmd *se_cmd, bool ack_kref)
{}
EXPORT_SYMBOL();

static void target_free_cmd_mem(struct se_cmd *cmd)
{}

static void target_release_cmd_kref(struct kref *kref)
{}

/**
 * target_put_sess_cmd - decrease the command reference count
 * @se_cmd:	command to drop a reference from
 *
 * Returns 1 if and only if this target_put_sess_cmd() call caused the
 * refcount to drop to zero. Returns zero otherwise.
 */
int target_put_sess_cmd(struct se_cmd *se_cmd)
{}
EXPORT_SYMBOL();

static const char *data_dir_name(enum dma_data_direction d)
{}

static const char *cmd_state_name(enum transport_state_table t)
{}

static void target_append_str(char **str, const char *txt)
{}

/*
 * Convert a transport state bitmask into a string. The caller is
 * responsible for freeing the returned pointer.
 */
static char *target_ts_to_str(u32 ts)
{}

static const char *target_tmf_name(enum tcm_tmreq_table tmf)
{}

void target_show_cmd(const char *pfx, struct se_cmd *cmd)
{}
EXPORT_SYMBOL();

static void target_stop_cmd_counter_confirm(struct percpu_ref *ref)
{}

/**
 * target_stop_cmd_counter - Stop new IO from being added to the counter.
 * @cmd_cnt: counter to stop
 */
void target_stop_cmd_counter(struct target_cmd_counter *cmd_cnt)
{}
EXPORT_SYMBOL_GPL();

/**
 * target_stop_session - Stop new IO from being queued on the session.
 * @se_sess: session to stop
 */
void target_stop_session(struct se_session *se_sess)
{}
EXPORT_SYMBOL();

/**
 * target_wait_for_cmds - Wait for outstanding cmds.
 * @cmd_cnt: counter to wait for active I/O for.
 */
void target_wait_for_cmds(struct target_cmd_counter *cmd_cnt)
{}
EXPORT_SYMBOL_GPL();

/**
 * target_wait_for_sess_cmds - Wait for outstanding commands
 * @se_sess: session to wait for active I/O
 */
void target_wait_for_sess_cmds(struct se_session *se_sess)
{}
EXPORT_SYMBOL();

/*
 * Prevent that new percpu_ref_tryget_live() calls succeed and wait until
 * all references to the LUN have been released. Called during LUN shutdown.
 */
void transport_clear_lun_ref(struct se_lun *lun)
{}

static bool
__transport_wait_for_tasks(struct se_cmd *cmd, bool fabric_stop,
			   bool *aborted, bool *tas, unsigned long *flags)
	__releases(&cmd->t_state_lock)
	__acquires(&cmd->t_state_lock)
{}

/**
 * transport_wait_for_tasks - set CMD_T_STOP and wait for t_transport_stop_comp
 * @cmd: command to wait on
 */
bool transport_wait_for_tasks(struct se_cmd *cmd)
{}
EXPORT_SYMBOL();

struct sense_detail {};

static const struct sense_detail sense_detail_table[] =;

/**
 * translate_sense_reason - translate a sense reason into T10 key, asc and ascq
 * @cmd: SCSI command in which the resulting sense buffer or SCSI status will
 *   be stored.
 * @reason: LIO sense reason code. If this argument has the value
 *   TCM_CHECK_CONDITION_UNIT_ATTENTION, try to dequeue a unit attention. If
 *   dequeuing a unit attention fails due to multiple commands being processed
 *   concurrently, set the command status to BUSY.
 *
 * Return: 0 upon success or -EINVAL if the sense buffer is too small.
 */
static void translate_sense_reason(struct se_cmd *cmd, sense_reason_t reason)
{}

int
transport_send_check_condition_and_sense(struct se_cmd *cmd,
		sense_reason_t reason, int from_transport)
{}
EXPORT_SYMBOL();

/**
 * target_send_busy - Send SCSI BUSY status back to the initiator
 * @cmd: SCSI command for which to send a BUSY reply.
 *
 * Note: Only call this function if target_submit_cmd*() failed.
 */
int target_send_busy(struct se_cmd *cmd)
{}
EXPORT_SYMBOL();

static void target_tmr_work(struct work_struct *work)
{}

int transport_generic_handle_tmr(
	struct se_cmd *cmd)
{}
EXPORT_SYMBOL();

bool
target_check_wce(struct se_device *dev)
{}

bool
target_check_fua(struct se_device *dev)
{}