linux/drivers/scsi/scsi_lib.c

// SPDX-License-Identifier: GPL-2.0-only
/*
 * Copyright (C) 1999 Eric Youngdale
 * Copyright (C) 2014 Christoph Hellwig
 *
 *  SCSI queueing library.
 *      Initial versions: Eric Youngdale ([email protected]).
 *                        Based upon conversations with large numbers
 *                        of people at Linux Expo.
 */

#include <linux/bio.h>
#include <linux/bitops.h>
#include <linux/blkdev.h>
#include <linux/completion.h>
#include <linux/kernel.h>
#include <linux/export.h>
#include <linux/init.h>
#include <linux/pci.h>
#include <linux/delay.h>
#include <linux/hardirq.h>
#include <linux/scatterlist.h>
#include <linux/blk-mq.h>
#include <linux/blk-integrity.h>
#include <linux/ratelimit.h>
#include <asm/unaligned.h>

#include <scsi/scsi.h>
#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_dbg.h>
#include <scsi/scsi_device.h>
#include <scsi/scsi_driver.h>
#include <scsi/scsi_eh.h>
#include <scsi/scsi_host.h>
#include <scsi/scsi_transport.h> /* scsi_init_limits() */
#include <scsi/scsi_dh.h>

#include <trace/events/scsi.h>

#include "scsi_debugfs.h"
#include "scsi_priv.h"
#include "scsi_logging.h"

/*
 * Size of integrity metadata is usually small, 1 inline sg should
 * cover normal cases.
 */
#ifdef CONFIG_ARCH_NO_SG_CHAIN
#define SCSI_INLINE_PROT_SG_CNT
#define SCSI_INLINE_SG_CNT
#else
#define SCSI_INLINE_PROT_SG_CNT
#define SCSI_INLINE_SG_CNT
#endif

static struct kmem_cache *scsi_sense_cache;
static DEFINE_MUTEX(scsi_sense_cache_mutex);

static void scsi_mq_uninit_cmd(struct scsi_cmnd *cmd);

int scsi_init_sense_cache(struct Scsi_Host *shost)
{}

static void
scsi_set_blocked(struct scsi_cmnd *cmd, int reason)
{}

static void scsi_mq_requeue_cmd(struct scsi_cmnd *cmd, unsigned long msecs)
{}

/**
 * __scsi_queue_insert - private queue insertion
 * @cmd: The SCSI command being requeued
 * @reason:  The reason for the requeue
 * @unbusy: Whether the queue should be unbusied
 *
 * This is a private queue insertion.  The public interface
 * scsi_queue_insert() always assumes the queue should be unbusied
 * because it's always called before the completion.  This function is
 * for a requeue after completion, which should only occur in this
 * file.
 */
static void __scsi_queue_insert(struct scsi_cmnd *cmd, int reason, bool unbusy)
{}

/**
 * scsi_queue_insert - Reinsert a command in the queue.
 * @cmd:    command that we are adding to queue.
 * @reason: why we are inserting command to queue.
 *
 * We do this for one of two cases. Either the host is busy and it cannot accept
 * any more commands for the time being, or the device returned QUEUE_FULL and
 * can accept no more commands.
 *
 * Context: This could be called either from an interrupt context or a normal
 * process context.
 */
void scsi_queue_insert(struct scsi_cmnd *cmd, int reason)
{}

void scsi_failures_reset_retries(struct scsi_failures *failures)
{}
EXPORT_SYMBOL_GPL();

/**
 * scsi_check_passthrough - Determine if passthrough scsi_cmnd needs a retry.
 * @scmd: scsi_cmnd to check.
 * @failures: scsi_failures struct that lists failures to check for.
 *
 * Returns -EAGAIN if the caller should retry else 0.
 */
static int scsi_check_passthrough(struct scsi_cmnd *scmd,
				  struct scsi_failures *failures)
{}

/**
 * scsi_execute_cmd - insert request and wait for the result
 * @sdev:	scsi_device
 * @cmd:	scsi command
 * @opf:	block layer request cmd_flags
 * @buffer:	data buffer
 * @bufflen:	len of buffer
 * @timeout:	request timeout in HZ
 * @ml_retries:	number of times SCSI midlayer will retry request
 * @args:	Optional args. See struct definition for field descriptions
 *
 * Returns the scsi_cmnd result field if a command was executed, or a negative
 * Linux error code if we didn't get that far.
 */
int scsi_execute_cmd(struct scsi_device *sdev, const unsigned char *cmd,
		     blk_opf_t opf, void *buffer, unsigned int bufflen,
		     int timeout, int ml_retries,
		     const struct scsi_exec_args *args)
{}
EXPORT_SYMBOL();

/*
 * Wake up the error handler if necessary. Avoid as follows that the error
 * handler is not woken up if host in-flight requests number ==
 * shost->host_failed: use call_rcu() in scsi_eh_scmd_add() in combination
 * with an RCU read lock in this function to ensure that this function in
 * its entirety either finishes before scsi_eh_scmd_add() increases the
 * host_failed counter or that it notices the shost state change made by
 * scsi_eh_scmd_add().
 */
static void scsi_dec_host_busy(struct Scsi_Host *shost, struct scsi_cmnd *cmd)
{}

void scsi_device_unbusy(struct scsi_device *sdev, struct scsi_cmnd *cmd)
{}

/*
 * Kick the queue of SCSI device @sdev if @sdev != current_sdev. Called with
 * interrupts disabled.
 */
static void scsi_kick_sdev_queue(struct scsi_device *sdev, void *data)
{}

/*
 * Called for single_lun devices on IO completion. Clear starget_sdev_user,
 * and call blk_run_queue for all the scsi_devices on the target -
 * including current_sdev first.
 *
 * Called with *no* scsi locks held.
 */
static void scsi_single_lun_run(struct scsi_device *current_sdev)
{}

static inline bool scsi_device_is_busy(struct scsi_device *sdev)
{}

static inline bool scsi_target_is_busy(struct scsi_target *starget)
{}

static inline bool scsi_host_is_busy(struct Scsi_Host *shost)
{}

static void scsi_starved_list_run(struct Scsi_Host *shost)
{}

/**
 * scsi_run_queue - Select a proper request queue to serve next.
 * @q:  last request's queue
 *
 * The previous command was completely finished, start a new one if possible.
 */
static void scsi_run_queue(struct request_queue *q)
{}

void scsi_requeue_run_queue(struct work_struct *work)
{}

void scsi_run_host_queues(struct Scsi_Host *shost)
{}

static void scsi_uninit_cmd(struct scsi_cmnd *cmd)
{}

void scsi_free_sgtables(struct scsi_cmnd *cmd)
{}
EXPORT_SYMBOL_GPL();

static void scsi_mq_uninit_cmd(struct scsi_cmnd *cmd)
{}

static void scsi_run_queue_async(struct scsi_device *sdev)
{}

/* Returns false when no more bytes to process, true if there are more */
static bool scsi_end_request(struct request *req, blk_status_t error,
		unsigned int bytes)
{}

/**
 * scsi_result_to_blk_status - translate a SCSI result code into blk_status_t
 * @result:	scsi error code
 *
 * Translate a SCSI result code into a blk_status_t value.
 */
static blk_status_t scsi_result_to_blk_status(int result)
{}

/**
 * scsi_rq_err_bytes - determine number of bytes till the next failure boundary
 * @rq: request to examine
 *
 * Description:
 *     A request could be merge of IOs which require different failure
 *     handling.  This function determines the number of bytes which
 *     can be failed from the beginning of the request without
 *     crossing into area which need to be retried further.
 *
 * Return:
 *     The number of bytes to fail.
 */
static unsigned int scsi_rq_err_bytes(const struct request *rq)
{}

static bool scsi_cmd_runtime_exceeced(struct scsi_cmnd *cmd)
{}

/*
 * When ALUA transition state is returned, reprep the cmd to
 * use the ALUA handler's transition timeout. Delay the reprep
 * 1 sec to avoid aggressive retries of the target in that
 * state.
 */
#define ALUA_TRANSITION_REPREP_DELAY

/* Helper for scsi_io_completion() when special action required. */
static void scsi_io_completion_action(struct scsi_cmnd *cmd, int result)
{}

/*
 * Helper for scsi_io_completion() when cmd->result is non-zero. Returns a
 * new result that may suppress further error checking. Also modifies
 * *blk_statp in some cases.
 */
static int scsi_io_completion_nz_result(struct scsi_cmnd *cmd, int result,
					blk_status_t *blk_statp)
{}

/**
 * scsi_io_completion - Completion processing for SCSI commands.
 * @cmd:	command that is finished.
 * @good_bytes:	number of processed bytes.
 *
 * We will finish off the specified number of sectors. If we are done, the
 * command block will be released and the queue function will be goosed. If we
 * are not done then we have to figure out what to do next:
 *
 *   a) We can call scsi_mq_requeue_cmd().  The request will be
 *	unprepared and put back on the queue.  Then a new command will
 *	be created for it.  This should be used if we made forward
 *	progress, or if we want to switch from READ(10) to READ(6) for
 *	example.
 *
 *   b) We can call scsi_io_completion_action().  The request will be
 *	put back on the queue and retried using the same command as
 *	before, possibly after a delay.
 *
 *   c) We can call scsi_end_request() with blk_stat other than
 *	BLK_STS_OK, to fail the remainder of the request.
 */
void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
{}

static inline bool scsi_cmd_needs_dma_drain(struct scsi_device *sdev,
		struct request *rq)
{}

/**
 * scsi_alloc_sgtables - Allocate and initialize data and integrity scatterlists
 * @cmd: SCSI command data structure to initialize.
 *
 * Initializes @cmd->sdb and also @cmd->prot_sdb if data integrity is enabled
 * for @cmd.
 *
 * Returns:
 * * BLK_STS_OK       - on success
 * * BLK_STS_RESOURCE - if the failure is retryable
 * * BLK_STS_IOERR    - if the failure is fatal
 */
blk_status_t scsi_alloc_sgtables(struct scsi_cmnd *cmd)
{}
EXPORT_SYMBOL();

/**
 * scsi_initialize_rq - initialize struct scsi_cmnd partially
 * @rq: Request associated with the SCSI command to be initialized.
 *
 * This function initializes the members of struct scsi_cmnd that must be
 * initialized before request processing starts and that won't be
 * reinitialized if a SCSI command is requeued.
 */
static void scsi_initialize_rq(struct request *rq)
{}

struct request *scsi_alloc_request(struct request_queue *q, blk_opf_t opf,
				   blk_mq_req_flags_t flags)
{}
EXPORT_SYMBOL_GPL();

/*
 * Only called when the request isn't completed by SCSI, and not freed by
 * SCSI
 */
static void scsi_cleanup_rq(struct request *rq)
{}

/* Called before a request is prepared. See also scsi_mq_prep_fn(). */
void scsi_init_command(struct scsi_device *dev, struct scsi_cmnd *cmd)
{}

static blk_status_t scsi_setup_scsi_cmnd(struct scsi_device *sdev,
		struct request *req)
{}

static blk_status_t
scsi_device_state_check(struct scsi_device *sdev, struct request *req)
{}

/*
 * scsi_dev_queue_ready: if we can send requests to sdev, assign one token
 * and return the token else return -1.
 */
static inline int scsi_dev_queue_ready(struct request_queue *q,
				  struct scsi_device *sdev)
{}

/*
 * scsi_target_queue_ready: checks if there we can send commands to target
 * @sdev: scsi device on starget to check.
 */
static inline int scsi_target_queue_ready(struct Scsi_Host *shost,
					   struct scsi_device *sdev)
{}

/*
 * scsi_host_queue_ready: if we can send requests to shost, return 1 else
 * return 0. We must end up running the queue again whenever 0 is
 * returned, else IO can hang.
 */
static inline int scsi_host_queue_ready(struct request_queue *q,
				   struct Scsi_Host *shost,
				   struct scsi_device *sdev,
				   struct scsi_cmnd *cmd)
{}

/*
 * Busy state exporting function for request stacking drivers.
 *
 * For efficiency, no lock is taken to check the busy state of
 * shost/starget/sdev, since the returned value is not guaranteed and
 * may be changed after request stacking drivers call the function,
 * regardless of taking lock or not.
 *
 * When scsi can't dispatch I/Os anymore and needs to kill I/Os scsi
 * needs to return 'not busy'. Otherwise, request stacking drivers
 * may hold requests forever.
 */
static bool scsi_mq_lld_busy(struct request_queue *q)
{}

/*
 * Block layer request completion callback. May be called from interrupt
 * context.
 */
static void scsi_complete(struct request *rq)
{}

/**
 * scsi_dispatch_cmd - Dispatch a command to the low-level driver.
 * @cmd: command block we are dispatching.
 *
 * Return: nonzero return request was rejected and device's queue needs to be
 * plugged.
 */
static int scsi_dispatch_cmd(struct scsi_cmnd *cmd)
{}

/* Size in bytes of the sg-list stored in the scsi-mq command-private data. */
static unsigned int scsi_mq_inline_sgl_size(struct Scsi_Host *shost)
{}

static blk_status_t scsi_prepare_cmd(struct request *req)
{}

static void scsi_done_internal(struct scsi_cmnd *cmd, bool complete_directly)
{}

void scsi_done(struct scsi_cmnd *cmd)
{}
EXPORT_SYMBOL();

void scsi_done_direct(struct scsi_cmnd *cmd)
{}
EXPORT_SYMBOL();

static void scsi_mq_put_budget(struct request_queue *q, int budget_token)
{}

/*
 * When to reinvoke queueing after a resource shortage. It's 3 msecs to
 * not change behaviour from the previous unplug mechanism, experimentation
 * may prove this needs changing.
 */
#define SCSI_QUEUE_DELAY

static int scsi_mq_get_budget(struct request_queue *q)
{}

static void scsi_mq_set_rq_budget_token(struct request *req, int token)
{}

static int scsi_mq_get_rq_budget_token(struct request *req)
{}

static blk_status_t scsi_queue_rq(struct blk_mq_hw_ctx *hctx,
			 const struct blk_mq_queue_data *bd)
{}

static int scsi_mq_init_request(struct blk_mq_tag_set *set, struct request *rq,
				unsigned int hctx_idx, unsigned int numa_node)
{}

static void scsi_mq_exit_request(struct blk_mq_tag_set *set, struct request *rq,
				 unsigned int hctx_idx)
{}


static int scsi_mq_poll(struct blk_mq_hw_ctx *hctx, struct io_comp_batch *iob)
{}

static int scsi_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
			  unsigned int hctx_idx)
{}

static void scsi_map_queues(struct blk_mq_tag_set *set)
{}

void scsi_init_limits(struct Scsi_Host *shost, struct queue_limits *lim)
{}
EXPORT_SYMBOL_GPL();

static const struct blk_mq_ops scsi_mq_ops_no_commit =;


static void scsi_commit_rqs(struct blk_mq_hw_ctx *hctx)
{}

static const struct blk_mq_ops scsi_mq_ops =;

int scsi_mq_setup_tags(struct Scsi_Host *shost)
{}

void scsi_mq_free_tags(struct kref *kref)
{}

/**
 * scsi_device_from_queue - return sdev associated with a request_queue
 * @q: The request queue to return the sdev from
 *
 * Return the sdev associated with a request queue or NULL if the
 * request_queue does not reference a SCSI device.
 */
struct scsi_device *scsi_device_from_queue(struct request_queue *q)
{}
/*
 * pktcdvd should have been integrated into the SCSI layers, but for historical
 * reasons like the old IDE driver it isn't.  This export allows it to safely
 * probe if a given device is a SCSI one and only attach to that.
 */
#ifdef CONFIG_CDROM_PKTCDVD_MODULE
EXPORT_SYMBOL_GPL(scsi_device_from_queue);
#endif

/**
 * scsi_block_requests - Utility function used by low-level drivers to prevent
 * further commands from being queued to the device.
 * @shost:  host in question
 *
 * There is no timer nor any other means by which the requests get unblocked
 * other than the low-level driver calling scsi_unblock_requests().
 */
void scsi_block_requests(struct Scsi_Host *shost)
{}
EXPORT_SYMBOL();

/**
 * scsi_unblock_requests - Utility function used by low-level drivers to allow
 * further commands to be queued to the device.
 * @shost:  host in question
 *
 * There is no timer nor any other means by which the requests get unblocked
 * other than the low-level driver calling scsi_unblock_requests(). This is done
 * as an API function so that changes to the internals of the scsi mid-layer
 * won't require wholesale changes to drivers that use this feature.
 */
void scsi_unblock_requests(struct Scsi_Host *shost)
{}
EXPORT_SYMBOL();

void scsi_exit_queue(void)
{}

/**
 *	scsi_mode_select - issue a mode select
 *	@sdev:	SCSI device to be queried
 *	@pf:	Page format bit (1 == standard, 0 == vendor specific)
 *	@sp:	Save page bit (0 == don't save, 1 == save)
 *	@buffer: request buffer (may not be smaller than eight bytes)
 *	@len:	length of request buffer.
 *	@timeout: command timeout
 *	@retries: number of retries before failing
 *	@data: returns a structure abstracting the mode header data
 *	@sshdr: place to put sense data (or NULL if no sense to be collected).
 *		must be SCSI_SENSE_BUFFERSIZE big.
 *
 *	Returns zero if successful; negative error number or scsi
 *	status on error
 *
 */
int scsi_mode_select(struct scsi_device *sdev, int pf, int sp,
		     unsigned char *buffer, int len, int timeout, int retries,
		     struct scsi_mode_data *data, struct scsi_sense_hdr *sshdr)
{}
EXPORT_SYMBOL_GPL();

/**
 *	scsi_mode_sense - issue a mode sense, falling back from 10 to six bytes if necessary.
 *	@sdev:	SCSI device to be queried
 *	@dbd:	set to prevent mode sense from returning block descriptors
 *	@modepage: mode page being requested
 *	@subpage: sub-page of the mode page being requested
 *	@buffer: request buffer (may not be smaller than eight bytes)
 *	@len:	length of request buffer.
 *	@timeout: command timeout
 *	@retries: number of retries before failing
 *	@data: returns a structure abstracting the mode header data
 *	@sshdr: place to put sense data (or NULL if no sense to be collected).
 *		must be SCSI_SENSE_BUFFERSIZE big.
 *
 *	Returns zero if successful, or a negative error number on failure
 */
int
scsi_mode_sense(struct scsi_device *sdev, int dbd, int modepage, int subpage,
		  unsigned char *buffer, int len, int timeout, int retries,
		  struct scsi_mode_data *data, struct scsi_sense_hdr *sshdr)
{}
EXPORT_SYMBOL();

/**
 *	scsi_test_unit_ready - test if unit is ready
 *	@sdev:	scsi device to change the state of.
 *	@timeout: command timeout
 *	@retries: number of retries before failing
 *	@sshdr: outpout pointer for decoded sense information.
 *
 *	Returns zero if unsuccessful or an error if TUR failed.  For
 *	removable media, UNIT_ATTENTION sets ->changed flag.
 **/
int
scsi_test_unit_ready(struct scsi_device *sdev, int timeout, int retries,
		     struct scsi_sense_hdr *sshdr)
{}
EXPORT_SYMBOL();

/**
 *	scsi_device_set_state - Take the given device through the device state model.
 *	@sdev:	scsi device to change the state of.
 *	@state:	state to change to.
 *
 *	Returns zero if successful or an error if the requested
 *	transition is illegal.
 */
int
scsi_device_set_state(struct scsi_device *sdev, enum scsi_device_state state)
{}
EXPORT_SYMBOL();

/**
 *	scsi_evt_emit - emit a single SCSI device uevent
 *	@sdev: associated SCSI device
 *	@evt: event to emit
 *
 *	Send a single uevent (scsi_event) to the associated scsi_device.
 */
static void scsi_evt_emit(struct scsi_device *sdev, struct scsi_event *evt)
{}

/**
 *	scsi_evt_thread - send a uevent for each scsi event
 *	@work: work struct for scsi_device
 *
 *	Dispatch queued events to their associated scsi_device kobjects
 *	as uevents.
 */
void scsi_evt_thread(struct work_struct *work)
{}

/**
 * 	sdev_evt_send - send asserted event to uevent thread
 *	@sdev: scsi_device event occurred on
 *	@evt: event to send
 *
 *	Assert scsi device event asynchronously.
 */
void sdev_evt_send(struct scsi_device *sdev, struct scsi_event *evt)
{}
EXPORT_SYMBOL_GPL();

/**
 * 	sdev_evt_alloc - allocate a new scsi event
 *	@evt_type: type of event to allocate
 *	@gfpflags: GFP flags for allocation
 *
 *	Allocates and returns a new scsi_event.
 */
struct scsi_event *sdev_evt_alloc(enum scsi_device_event evt_type,
				  gfp_t gfpflags)
{}
EXPORT_SYMBOL_GPL();

/**
 * 	sdev_evt_send_simple - send asserted event to uevent thread
 *	@sdev: scsi_device event occurred on
 *	@evt_type: type of event to send
 *	@gfpflags: GFP flags for allocation
 *
 *	Assert scsi device event asynchronously, given an event type.
 */
void sdev_evt_send_simple(struct scsi_device *sdev,
			  enum scsi_device_event evt_type, gfp_t gfpflags)
{}
EXPORT_SYMBOL_GPL();

/**
 *	scsi_device_quiesce - Block all commands except power management.
 *	@sdev:	scsi device to quiesce.
 *
 *	This works by trying to transition to the SDEV_QUIESCE state
 *	(which must be a legal transition).  When the device is in this
 *	state, only power management requests will be accepted, all others will
 *	be deferred.
 *
 *	Must be called with user context, may sleep.
 *
 *	Returns zero if unsuccessful or an error if not.
 */
int
scsi_device_quiesce(struct scsi_device *sdev)
{}
EXPORT_SYMBOL();

/**
 *	scsi_device_resume - Restart user issued commands to a quiesced device.
 *	@sdev:	scsi device to resume.
 *
 *	Moves the device from quiesced back to running and restarts the
 *	queues.
 *
 *	Must be called with user context, may sleep.
 */
void scsi_device_resume(struct scsi_device *sdev)
{}
EXPORT_SYMBOL();

static void
device_quiesce_fn(struct scsi_device *sdev, void *data)
{}

void
scsi_target_quiesce(struct scsi_target *starget)
{}
EXPORT_SYMBOL();

static void
device_resume_fn(struct scsi_device *sdev, void *data)
{}

void
scsi_target_resume(struct scsi_target *starget)
{}
EXPORT_SYMBOL();

static int __scsi_internal_device_block_nowait(struct scsi_device *sdev)
{}

void scsi_start_queue(struct scsi_device *sdev)
{}

static void scsi_stop_queue(struct scsi_device *sdev)
{}

/**
 * scsi_internal_device_block_nowait - try to transition to the SDEV_BLOCK state
 * @sdev: device to block
 *
 * Pause SCSI command processing on the specified device. Does not sleep.
 *
 * Returns zero if successful or a negative error code upon failure.
 *
 * Notes:
 * This routine transitions the device to the SDEV_BLOCK state (which must be
 * a legal transition). When the device is in this state, command processing
 * is paused until the device leaves the SDEV_BLOCK state. See also
 * scsi_internal_device_unblock_nowait().
 */
int scsi_internal_device_block_nowait(struct scsi_device *sdev)
{}
EXPORT_SYMBOL_GPL();

/**
 * scsi_device_block - try to transition to the SDEV_BLOCK state
 * @sdev: device to block
 * @data: dummy argument, ignored
 *
 * Pause SCSI command processing on the specified device. Callers must wait
 * until all ongoing scsi_queue_rq() calls have finished after this function
 * returns.
 *
 * Note:
 * This routine transitions the device to the SDEV_BLOCK state (which must be
 * a legal transition). When the device is in this state, command processing
 * is paused until the device leaves the SDEV_BLOCK state. See also
 * scsi_internal_device_unblock().
 */
static void scsi_device_block(struct scsi_device *sdev, void *data)
{}

/**
 * scsi_internal_device_unblock_nowait - resume a device after a block request
 * @sdev:	device to resume
 * @new_state:	state to set the device to after unblocking
 *
 * Restart the device queue for a previously suspended SCSI device. Does not
 * sleep.
 *
 * Returns zero if successful or a negative error code upon failure.
 *
 * Notes:
 * This routine transitions the device to the SDEV_RUNNING state or to one of
 * the offline states (which must be a legal transition) allowing the midlayer
 * to goose the queue for this device.
 */
int scsi_internal_device_unblock_nowait(struct scsi_device *sdev,
					enum scsi_device_state new_state)
{}
EXPORT_SYMBOL_GPL();

/**
 * scsi_internal_device_unblock - resume a device after a block request
 * @sdev:	device to resume
 * @new_state:	state to set the device to after unblocking
 *
 * Restart the device queue for a previously suspended SCSI device. May sleep.
 *
 * Returns zero if successful or a negative error code upon failure.
 *
 * Notes:
 * This routine transitions the device to the SDEV_RUNNING state or to one of
 * the offline states (which must be a legal transition) allowing the midlayer
 * to goose the queue for this device.
 */
static int scsi_internal_device_unblock(struct scsi_device *sdev,
					enum scsi_device_state new_state)
{}

static int
target_block(struct device *dev, void *data)
{}

/**
 * scsi_block_targets - transition all SCSI child devices to SDEV_BLOCK state
 * @dev: a parent device of one or more scsi_target devices
 * @shost: the Scsi_Host to which this device belongs
 *
 * Iterate over all children of @dev, which should be scsi_target devices,
 * and switch all subordinate scsi devices to SDEV_BLOCK state. Wait for
 * ongoing scsi_queue_rq() calls to finish. May sleep.
 *
 * Note:
 * @dev must not itself be a scsi_target device.
 */
void
scsi_block_targets(struct Scsi_Host *shost, struct device *dev)
{}
EXPORT_SYMBOL_GPL();

static void
device_unblock(struct scsi_device *sdev, void *data)
{}

static int
target_unblock(struct device *dev, void *data)
{}

void
scsi_target_unblock(struct device *dev, enum scsi_device_state new_state)
{}
EXPORT_SYMBOL_GPL();

/**
 * scsi_host_block - Try to transition all logical units to the SDEV_BLOCK state
 * @shost: device to block
 *
 * Pause SCSI command processing for all logical units associated with the SCSI
 * host and wait until pending scsi_queue_rq() calls have finished.
 *
 * Returns zero if successful or a negative error code upon failure.
 */
int
scsi_host_block(struct Scsi_Host *shost)
{}
EXPORT_SYMBOL_GPL();

int
scsi_host_unblock(struct Scsi_Host *shost, int new_state)
{}
EXPORT_SYMBOL_GPL();

/**
 * scsi_kmap_atomic_sg - find and atomically map an sg-elemnt
 * @sgl:	scatter-gather list
 * @sg_count:	number of segments in sg
 * @offset:	offset in bytes into sg, on return offset into the mapped area
 * @len:	bytes to map, on return number of bytes mapped
 *
 * Returns virtual address of the start of the mapped page
 */
void *scsi_kmap_atomic_sg(struct scatterlist *sgl, int sg_count,
			  size_t *offset, size_t *len)
{}
EXPORT_SYMBOL();

/**
 * scsi_kunmap_atomic_sg - atomically unmap a virtual address, previously mapped with scsi_kmap_atomic_sg
 * @virt:	virtual address to be unmapped
 */
void scsi_kunmap_atomic_sg(void *virt)
{}
EXPORT_SYMBOL();

void sdev_disable_disk_events(struct scsi_device *sdev)
{}
EXPORT_SYMBOL();

void sdev_enable_disk_events(struct scsi_device *sdev)
{}
EXPORT_SYMBOL();

static unsigned char designator_prio(const unsigned char *d)
{}

/**
 * scsi_vpd_lun_id - return a unique device identification
 * @sdev: SCSI device
 * @id:   buffer for the identification
 * @id_len:  length of the buffer
 *
 * Copies a unique device identification into @id based
 * on the information in the VPD page 0x83 of the device.
 * The string will be formatted as a SCSI name string.
 *
 * Returns the length of the identification or error on failure.
 * If the identifier is longer than the supplied buffer the actual
 * identifier length is returned and the buffer is not zero-padded.
 */
int scsi_vpd_lun_id(struct scsi_device *sdev, char *id, size_t id_len)
{}
EXPORT_SYMBOL();

/*
 * scsi_vpd_tpg_id - return a target port group identifier
 * @sdev: SCSI device
 *
 * Returns the Target Port Group identifier from the information
 * froom VPD page 0x83 of the device.
 *
 * Returns the identifier or error on failure.
 */
int scsi_vpd_tpg_id(struct scsi_device *sdev, int *rel_id)
{}
EXPORT_SYMBOL();

/**
 * scsi_build_sense - build sense data for a command
 * @scmd:	scsi command for which the sense should be formatted
 * @desc:	Sense format (non-zero == descriptor format,
 *              0 == fixed format)
 * @key:	Sense key
 * @asc:	Additional sense code
 * @ascq:	Additional sense code qualifier
 *
 **/
void scsi_build_sense(struct scsi_cmnd *scmd, int desc, u8 key, u8 asc, u8 ascq)
{}
EXPORT_SYMBOL_GPL();

#ifdef CONFIG_SCSI_LIB_KUNIT_TEST
#include "scsi_lib_test.c"
#endif