linux/drivers/scsi/scsi_debug.c

// SPDX-License-Identifier: GPL-2.0-or-later
/*
 * vvvvvvvvvvvvvvvvvvvvvvv Original vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv
 *  Copyright (C) 1992  Eric Youngdale
 *  Simulate a host adapter with 2 disks attached.  Do a lot of checking
 *  to make sure that we are not getting blocks mixed up, and PANIC if
 *  anything out of the ordinary is seen.
 * ^^^^^^^^^^^^^^^^^^^^^^^ Original ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
 *
 * Copyright (C) 2001 - 2021 Douglas Gilbert
 *
 *  For documentation see http://sg.danny.cz/sg/scsi_debug.html
 */


#define pr_fmt(fmt)

#include <linux/module.h>
#include <linux/align.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/jiffies.h>
#include <linux/slab.h>
#include <linux/types.h>
#include <linux/string.h>
#include <linux/fs.h>
#include <linux/init.h>
#include <linux/proc_fs.h>
#include <linux/vmalloc.h>
#include <linux/moduleparam.h>
#include <linux/scatterlist.h>
#include <linux/blkdev.h>
#include <linux/crc-t10dif.h>
#include <linux/spinlock.h>
#include <linux/interrupt.h>
#include <linux/atomic.h>
#include <linux/hrtimer.h>
#include <linux/uuid.h>
#include <linux/t10-pi.h>
#include <linux/msdos_partition.h>
#include <linux/random.h>
#include <linux/xarray.h>
#include <linux/prefetch.h>
#include <linux/debugfs.h>
#include <linux/async.h>
#include <linux/cleanup.h>

#include <net/checksum.h>

#include <asm/unaligned.h>

#include <scsi/scsi.h>
#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_device.h>
#include <scsi/scsi_host.h>
#include <scsi/scsicam.h>
#include <scsi/scsi_eh.h>
#include <scsi/scsi_tcq.h>
#include <scsi/scsi_dbg.h>

#include "sd.h"
#include "scsi_logging.h"

/* make sure inq_product_rev string corresponds to this version */
#define SDEBUG_VERSION
static const char *sdebug_version_date =;

#define MY_NAME

/* Additional Sense Code (ASC) */
#define NO_ADDITIONAL_SENSE
#define OVERLAP_ATOMIC_COMMAND_ASC
#define OVERLAP_ATOMIC_COMMAND_ASCQ
#define LOGICAL_UNIT_NOT_READY
#define LOGICAL_UNIT_COMMUNICATION_FAILURE
#define UNRECOVERED_READ_ERR
#define PARAMETER_LIST_LENGTH_ERR
#define INVALID_OPCODE
#define LBA_OUT_OF_RANGE
#define INVALID_FIELD_IN_CDB
#define INVALID_FIELD_IN_PARAM_LIST
#define WRITE_PROTECTED
#define UA_RESET_ASC
#define UA_CHANGED_ASC
#define TARGET_CHANGED_ASC
#define LUNS_CHANGED_ASCQ
#define INSUFF_RES_ASC
#define INSUFF_RES_ASCQ
#define POWER_ON_RESET_ASCQ
#define POWER_ON_OCCURRED_ASCQ
#define BUS_RESET_ASCQ
#define MODE_CHANGED_ASCQ
#define CAPACITY_CHANGED_ASCQ
#define SAVING_PARAMS_UNSUP
#define TRANSPORT_PROBLEM
#define THRESHOLD_EXCEEDED
#define LOW_POWER_COND_ON
#define MISCOMPARE_VERIFY_ASC
#define MICROCODE_CHANGED_ASCQ
#define MICROCODE_CHANGED_WO_RESET_ASCQ
#define WRITE_ERROR_ASC
#define UNALIGNED_WRITE_ASCQ
#define WRITE_BOUNDARY_ASCQ
#define READ_INVDATA_ASCQ
#define READ_BOUNDARY_ASCQ
#define ATTEMPT_ACCESS_GAP
#define INSUFF_ZONE_ASCQ
/* see drivers/scsi/sense_codes.h */

/* Additional Sense Code Qualifier (ASCQ) */
#define ACK_NAK_TO

/* Default values for driver parameters */
#define DEF_NUM_HOST
#define DEF_NUM_TGTS
#define DEF_MAX_LUNS
/* With these defaults, this driver will make 1 host with 1 target
 * (id 0) containing 1 logical unit (lun 0). That is 1 device.
 */
#define DEF_ATO
#define DEF_CDB_LEN
#define DEF_JDELAY
#define DEF_DEV_SIZE_PRE_INIT
#define DEF_DEV_SIZE_MB
#define DEF_ZBC_DEV_SIZE_MB
#define DEF_DIF
#define DEF_DIX
#define DEF_PER_HOST_STORE
#define DEF_D_SENSE
#define DEF_EVERY_NTH
#define DEF_FAKE_RW
#define DEF_GUARD
#define DEF_HOST_LOCK
#define DEF_LBPU
#define DEF_LBPWS
#define DEF_LBPWS10
#define DEF_LBPRZ
#define DEF_LOWEST_ALIGNED
#define DEF_NDELAY
#define DEF_NO_LUN_0
#define DEF_NUM_PARTS
#define DEF_OPTS
#define DEF_OPT_BLKS
#define DEF_PHYSBLK_EXP
#define DEF_OPT_XFERLEN_EXP
#define DEF_PTYPE
#define DEF_RANDOM
#define DEF_REMOVABLE
#define DEF_SCSI_LEVEL
#define DEF_SECTOR_SIZE
#define DEF_UNMAP_ALIGNMENT
#define DEF_UNMAP_GRANULARITY
#define DEF_UNMAP_MAX_BLOCKS
#define DEF_UNMAP_MAX_DESC
#define DEF_VIRTUAL_GB
#define DEF_VPD_USE_HOSTNO
#define DEF_WRITESAME_LENGTH
#define DEF_ATOMIC_WR
#define DEF_ATOMIC_WR_MAX_LENGTH
#define DEF_ATOMIC_WR_ALIGN
#define DEF_ATOMIC_WR_GRAN
#define DEF_ATOMIC_WR_MAX_LENGTH_BNDRY
#define DEF_ATOMIC_WR_MAX_BNDRY
#define DEF_STRICT
#define DEF_STATISTICS
#define DEF_SUBMIT_QUEUES
#define DEF_TUR_MS_TO_READY
#define DEF_UUID_CTL
#define JDELAY_OVERRIDDEN

/* Default parameters for ZBC drives */
#define DEF_ZBC_ZONE_SIZE_MB
#define DEF_ZBC_MAX_OPEN_ZONES
#define DEF_ZBC_NR_CONV_ZONES

#define SDEBUG_LUN_0_VAL

/* bit mask values for sdebug_opts */
#define SDEBUG_OPT_NOISE
#define SDEBUG_OPT_MEDIUM_ERR
#define SDEBUG_OPT_TIMEOUT
#define SDEBUG_OPT_RECOVERED_ERR
#define SDEBUG_OPT_TRANSPORT_ERR
#define SDEBUG_OPT_DIF_ERR
#define SDEBUG_OPT_DIX_ERR
#define SDEBUG_OPT_MAC_TIMEOUT
#define SDEBUG_OPT_SHORT_TRANSFER
#define SDEBUG_OPT_Q_NOISE
#define SDEBUG_OPT_ALL_TSF
#define SDEBUG_OPT_RARE_TSF
#define SDEBUG_OPT_N_WCE
#define SDEBUG_OPT_RESET_NOISE
#define SDEBUG_OPT_NO_CDB_NOISE
#define SDEBUG_OPT_HOST_BUSY
#define SDEBUG_OPT_CMD_ABORT
#define SDEBUG_OPT_ALL_NOISE
#define SDEBUG_OPT_ALL_INJECTING
#define SDEBUG_OPT_RECOV_DIF_DIX

/* As indicated in SAM-5 and SPC-4 Unit Attentions (UAs) are returned in
 * priority order. In the subset implemented here lower numbers have higher
 * priority. The UA numbers should be a sequence starting from 0 with
 * SDEBUG_NUM_UAS being 1 higher than the highest numbered UA. */
#define SDEBUG_UA_POR
#define SDEBUG_UA_POOCCUR
#define SDEBUG_UA_BUS_RESET
#define SDEBUG_UA_MODE_CHANGED
#define SDEBUG_UA_CAPACITY_CHANGED
#define SDEBUG_UA_LUNS_CHANGED
#define SDEBUG_UA_MICROCODE_CHANGED
#define SDEBUG_UA_MICROCODE_CHANGED_WO_RESET
#define SDEBUG_NUM_UAS

/* when 1==SDEBUG_OPT_MEDIUM_ERR, a medium error is simulated at this
 * sector on read commands: */
#define OPT_MEDIUM_ERR_ADDR
#define OPT_MEDIUM_ERR_NUM

/* SDEBUG_CANQUEUE is the maximum number of commands that can be queued
 * (for response) per submit queue at one time. Can be reduced by max_queue
 * option. Command responses are not queued when jdelay=0 and ndelay=0. The
 * per-device DEF_CMD_PER_LUN can be changed via sysfs:
 * /sys/class/scsi_device/<h:c:t:l>/device/queue_depth
 * but cannot exceed SDEBUG_CANQUEUE .
 */
#define SDEBUG_CANQUEUE_WORDS
#define SDEBUG_CANQUEUE
#define DEF_CMD_PER_LUN

/* UA - Unit Attention; SA - Service Action; SSU - Start Stop Unit */
#define F_D_IN
#define F_D_OUT
#define F_D_OUT_MAYBE
#define F_D_UNKN
#define F_RL_WLUN_OK
#define F_SKIP_UA
#define F_DELAY_OVERR
#define F_SA_LOW
#define F_SA_HIGH
#define F_INV_OP
#define F_FAKE_RW
#define F_M_ACCESS
#define F_SSU_DELAY
#define F_SYNC_DELAY

/* Useful combinations of the above flags */
#define FF_RESPOND
#define FF_MEDIA_IO
#define FF_SA
#define F_LONG_DELAY

#define SDEBUG_MAX_PARTS

#define SDEBUG_MAX_CMD_LEN

#define SDEB_XA_NOT_IN_USE

static struct kmem_cache *queued_cmd_cache;

#define TO_QUEUED_CMD(scmd)
#define ASSIGN_QUEUED_CMD(scmnd, qc)

/* Zone types (zbcr05 table 25) */
enum sdebug_z_type {};

/* enumeration names taken from table 26, zbcr05 */
enum sdebug_z_cond {};

struct sdeb_zone_state {};

enum sdebug_err_type {};

struct sdebug_err_inject {};

struct sdebug_dev_info {};

struct sdebug_target_info {};

struct sdebug_host_info {};

/* There is an xarray of pointers to this struct's objects, one per host */
struct sdeb_store_info {};

#define dev_to_sdebug_host(d)

#define shost_to_sdebug_host(shost)

enum sdeb_defer_type {};

struct sdebug_defer {};

struct sdebug_device_access_info {};

struct sdebug_queued_cmd {};

struct sdebug_scsi_cmd {};

static atomic_t sdebug_cmnd_count;   /* number of incoming commands */
static atomic_t sdebug_completions;  /* count of deferred completions */
static atomic_t sdebug_miss_cpus;    /* submission + completion cpus differ */
static atomic_t sdebug_a_tsf;	     /* 'almost task set full' counter */
static atomic_t sdeb_inject_pending;
static atomic_t sdeb_mq_poll_count;  /* bumped when mq_poll returns > 0 */

struct opcode_info_t {};

/* SCSI opcodes (first byte of cdb) of interest mapped onto these indexes */
enum sdeb_opcode_index {};


static const unsigned char opcode_ind_arr[256] =;

/*
 * The following "response" functions return the SCSI mid-level's 4 byte
 * tuple-in-an-int. To handle commands with an IMMED bit, for a faster
 * command completion, they can mask their return value with
 * SDEG_RES_IMMED_MASK .
 */
#define SDEG_RES_IMMED_MASK

static int resp_inquiry(struct scsi_cmnd *, struct sdebug_dev_info *);
static int resp_report_luns(struct scsi_cmnd *, struct sdebug_dev_info *);
static int resp_requests(struct scsi_cmnd *, struct sdebug_dev_info *);
static int resp_mode_sense(struct scsi_cmnd *, struct sdebug_dev_info *);
static int resp_mode_select(struct scsi_cmnd *, struct sdebug_dev_info *);
static int resp_log_sense(struct scsi_cmnd *, struct sdebug_dev_info *);
static int resp_readcap(struct scsi_cmnd *, struct sdebug_dev_info *);
static int resp_read_dt0(struct scsi_cmnd *, struct sdebug_dev_info *);
static int resp_write_dt0(struct scsi_cmnd *, struct sdebug_dev_info *);
static int resp_write_scat(struct scsi_cmnd *, struct sdebug_dev_info *);
static int resp_start_stop(struct scsi_cmnd *, struct sdebug_dev_info *);
static int resp_readcap16(struct scsi_cmnd *, struct sdebug_dev_info *);
static int resp_get_lba_status(struct scsi_cmnd *, struct sdebug_dev_info *);
static int resp_get_stream_status(struct scsi_cmnd *scp,
				  struct sdebug_dev_info *devip);
static int resp_report_tgtpgs(struct scsi_cmnd *, struct sdebug_dev_info *);
static int resp_unmap(struct scsi_cmnd *, struct sdebug_dev_info *);
static int resp_rsup_opcodes(struct scsi_cmnd *, struct sdebug_dev_info *);
static int resp_rsup_tmfs(struct scsi_cmnd *, struct sdebug_dev_info *);
static int resp_verify(struct scsi_cmnd *, struct sdebug_dev_info *);
static int resp_write_same_10(struct scsi_cmnd *, struct sdebug_dev_info *);
static int resp_write_same_16(struct scsi_cmnd *, struct sdebug_dev_info *);
static int resp_comp_write(struct scsi_cmnd *, struct sdebug_dev_info *);
static int resp_write_buffer(struct scsi_cmnd *, struct sdebug_dev_info *);
static int resp_sync_cache(struct scsi_cmnd *, struct sdebug_dev_info *);
static int resp_pre_fetch(struct scsi_cmnd *, struct sdebug_dev_info *);
static int resp_report_zones(struct scsi_cmnd *, struct sdebug_dev_info *);
static int resp_atomic_write(struct scsi_cmnd *, struct sdebug_dev_info *);
static int resp_open_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
static int resp_close_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
static int resp_finish_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
static int resp_rwp_zone(struct scsi_cmnd *, struct sdebug_dev_info *);

static int sdebug_do_add_host(bool mk_new_store);
static int sdebug_add_host_helper(int per_host_idx);
static void sdebug_do_remove_host(bool the_end);
static int sdebug_add_store(void);
static void sdebug_erase_store(int idx, struct sdeb_store_info *sip);
static void sdebug_erase_all_stores(bool apart_from_first);

static void sdebug_free_queued_cmd(struct sdebug_queued_cmd *sqcp);

/*
 * The following are overflow arrays for cdbs that "hit" the same index in
 * the opcode_info_arr array. The most time sensitive (or commonly used) cdb
 * should be placed in opcode_info_arr[], the others should be placed here.
 */
static const struct opcode_info_t msense_iarr[] =;

static const struct opcode_info_t mselect_iarr[] =;

static const struct opcode_info_t read_iarr[] =;

static const struct opcode_info_t write_iarr[] =;

static const struct opcode_info_t verify_iarr[] =;

static const struct opcode_info_t sa_in_16_iarr[] =;

static const struct opcode_info_t vl_iarr[] =;

static const struct opcode_info_t maint_in_iarr[] =;

static const struct opcode_info_t write_same_iarr[] =;

static const struct opcode_info_t reserve_iarr[] =;

static const struct opcode_info_t release_iarr[] =;

static const struct opcode_info_t sync_cache_iarr[] =;

static const struct opcode_info_t pre_fetch_iarr[] =;

static const struct opcode_info_t zone_out_iarr[] =;

static const struct opcode_info_t zone_in_iarr[] =;


/* This array is accessed via SDEB_I_* values. Make sure all are mapped,
 * plus the terminating elements for logic that scans this table such as
 * REPORT SUPPORTED OPERATION CODES. */
static const struct opcode_info_t opcode_info_arr[SDEB_I_LAST_ELEM_P1 + 1] =;

static int sdebug_num_hosts;
static int sdebug_add_host =;  /* in sysfs this is relative */
static int sdebug_ato =;
static int sdebug_cdb_len =;
static int sdebug_jdelay =;	/* if > 0 then unit is jiffies */
static int sdebug_dev_size_mb =;
static int sdebug_dif =;
static int sdebug_dix =;
static int sdebug_dsense =;
static int sdebug_every_nth =;
static int sdebug_fake_rw =;
static unsigned int sdebug_guard =;
static int sdebug_host_max_queue;	/* per host */
static int sdebug_lowest_aligned =;
static int sdebug_max_luns =;
static int sdebug_max_queue =;	/* per submit queue */
static unsigned int sdebug_medium_error_start =;
static int sdebug_medium_error_count =;
static int sdebug_ndelay =;	/* if > 0 then unit is nanoseconds */
static int sdebug_no_lun_0 =;
static int sdebug_no_uld;
static int sdebug_num_parts =;
static int sdebug_num_tgts =; /* targets per host */
static int sdebug_opt_blks =;
static int sdebug_opts =;
static int sdebug_physblk_exp =;
static int sdebug_opt_xferlen_exp =;
static int sdebug_ptype =; /* SCSI peripheral device type */
static int sdebug_scsi_level =;
static int sdebug_sector_size =;
static int sdeb_tur_ms_to_ready =;
static int sdebug_virtual_gb =;
static int sdebug_vpd_use_hostno =;
static unsigned int sdebug_lbpu =;
static unsigned int sdebug_lbpws =;
static unsigned int sdebug_lbpws10 =;
static unsigned int sdebug_lbprz =;
static unsigned int sdebug_unmap_alignment =;
static unsigned int sdebug_unmap_granularity =;
static unsigned int sdebug_unmap_max_blocks =;
static unsigned int sdebug_unmap_max_desc =;
static unsigned int sdebug_write_same_length =;
static unsigned int sdebug_atomic_wr =;
static unsigned int sdebug_atomic_wr_max_length =;
static unsigned int sdebug_atomic_wr_align =;
static unsigned int sdebug_atomic_wr_gran =;
static unsigned int sdebug_atomic_wr_max_length_bndry =;
static unsigned int sdebug_atomic_wr_max_bndry =;
static int sdebug_uuid_ctl =;
static bool sdebug_random =;
static bool sdebug_per_host_store =;
static bool sdebug_removable =;
static bool sdebug_clustering;
static bool sdebug_host_lock =;
static bool sdebug_strict =;
static bool sdebug_any_injecting_opt;
static bool sdebug_no_rwlock;
static bool sdebug_verbose;
static bool have_dif_prot;
static bool write_since_sync;
static bool sdebug_statistics =;
static bool sdebug_wp;
static bool sdebug_allow_restart;
static enum {} sdeb_zbc_model =;
static char *sdeb_zbc_model_s;

enum sam_lun_addr_method {};
static enum sam_lun_addr_method sdebug_lun_am =;
static int sdebug_lun_am_i =;

static unsigned int sdebug_store_sectors;
static sector_t sdebug_capacity;	/* in sectors */

/* old BIOS stuff, kernel may get rid of them but some mode sense pages
   may still need them */
static int sdebug_heads;		/* heads per disk */
static int sdebug_cylinders_per;	/* cylinders per surface */
static int sdebug_sectors_per;		/* sectors per cylinder */

static LIST_HEAD(sdebug_host_list);
static DEFINE_MUTEX(sdebug_host_list_mutex);

static struct xarray per_store_arr;
static struct xarray *per_store_ap =;
static int sdeb_first_idx =;		/* invalid index ==> none created */
static int sdeb_most_recent_idx =;
static DEFINE_RWLOCK(sdeb_fake_rw_lck);	/* need a RW lock when fake_rw=1 */

static unsigned long map_size;
static int num_aborts;
static int num_dev_resets;
static int num_target_resets;
static int num_bus_resets;
static int num_host_resets;
static int dix_writes;
static int dix_reads;
static int dif_errors;

/* ZBC global data */
static bool sdeb_zbc_in_use;	/* true for host-aware and host-managed disks */
static int sdeb_zbc_zone_cap_mb;
static int sdeb_zbc_zone_size_mb;
static int sdeb_zbc_max_open =;
static int sdeb_zbc_nr_conv =;

static int submit_queues =;  /* > 1 for multi-queue (mq) */
static int poll_queues; /* iouring iopoll interface.*/

static atomic_long_t writes_by_group_number[64];

static char sdebug_proc_name[] =;
static const char *my_name =;

static const struct bus_type pseudo_lld_bus;

static struct device_driver sdebug_driverfs_driver =;

static const int check_condition_result =;

static const int illegal_condition_result =;

static const int device_qfull_result =;

static const int condition_met_result =;

static struct dentry *sdebug_debugfs_root;
static ASYNC_DOMAIN_EXCLUSIVE(sdebug_async_domain);

static void sdebug_err_free(struct rcu_head *head)
{}

static void sdebug_err_add(struct scsi_device *sdev, struct sdebug_err_inject *new)
{}

static int sdebug_err_remove(struct scsi_device *sdev, const char *buf, size_t count)
{}

static int sdebug_error_show(struct seq_file *m, void *p)
{}

static int sdebug_error_open(struct inode *inode, struct file *file)
{}

static ssize_t sdebug_error_write(struct file *file, const char __user *ubuf,
		size_t count, loff_t *ppos)
{}

static const struct file_operations sdebug_error_fops =;

static int sdebug_target_reset_fail_show(struct seq_file *m, void *p)
{}

static int sdebug_target_reset_fail_open(struct inode *inode, struct file *file)
{}

static ssize_t sdebug_target_reset_fail_write(struct file *file,
		const char __user *ubuf, size_t count, loff_t *ppos)
{}

static const struct file_operations sdebug_target_reset_fail_fops =;

static int sdebug_target_alloc(struct scsi_target *starget)
{}

static void sdebug_tartget_cleanup_async(void *data, async_cookie_t cookie)
{}

static void sdebug_target_destroy(struct scsi_target *starget)
{}

/* Only do the extra work involved in logical block provisioning if one or
 * more of the lbpu, lbpws or lbpws10 parameters are given and we are doing
 * real reads and writes (i.e. not skipping them for speed).
 */
static inline bool scsi_debug_lbp(void)
{}

static inline bool scsi_debug_atomic_write(void)
{}

static void *lba2fake_store(struct sdeb_store_info *sip,
			    unsigned long long lba)
{}

static struct t10_pi_tuple *dif_store(struct sdeb_store_info *sip,
				      sector_t sector)
{}

static void sdebug_max_tgts_luns(void)
{}

enum sdeb_cmd_data {};

/* Set in_bit to -1 to indicate no bit position of invalid field */
static void mk_sense_invalid_fld(struct scsi_cmnd *scp,
				 enum sdeb_cmd_data c_d,
				 int in_byte, int in_bit)
{}

static void mk_sense_buffer(struct scsi_cmnd *scp, int key, int asc, int asq)
{}

static void mk_sense_invalid_opcode(struct scsi_cmnd *scp)
{}

static int scsi_debug_ioctl(struct scsi_device *dev, unsigned int cmd,
			    void __user *arg)
{}

static void config_cdb_len(struct scsi_device *sdev)
{}

static void all_config_cdb_len(void)
{}

static void clear_luns_changed_on_target(struct sdebug_dev_info *devip)
{}

static int make_ua(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
{}

/* Build SCSI "data-in" buffer. Returns 0 if ok else (DID_ERROR << 16). */
static int fill_from_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr,
				int arr_len)
{}

/* Partial build of SCSI "data-in" buffer. Returns 0 if ok else
 * (DID_ERROR << 16). Can write to offset in data-in buffer. If multiple
 * calls, not required to write in ascending offset order. Assumes resid
 * set to scsi_bufflen() prior to any calls.
 */
static int p_fill_from_dev_buffer(struct scsi_cmnd *scp, const void *arr,
				  int arr_len, unsigned int off_dst)
{}

/* Fetches from SCSI "data-out" buffer. Returns number of bytes fetched into
 * 'arr' or -1 if error.
 */
static int fetch_to_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr,
			       int arr_len)
{}


static char sdebug_inq_vendor_id[9] =;
static char sdebug_inq_product_id[17] =;
static char sdebug_inq_product_rev[5] =;
/* Use some locally assigned NAAs for SAS addresses. */
static const u64 naa3_comp_a =;
static const u64 naa3_comp_b =;
static const u64 naa3_comp_c =;

/* Device identification VPD page. Returns number of bytes placed in arr */
static int inquiry_vpd_83(unsigned char *arr, int port_group_id,
			  int target_dev_id, int dev_id_num,
			  const char *dev_id_str, int dev_id_str_len,
			  const uuid_t *lu_name)
{}

static unsigned char vpd84_data[] =;

/*  Software interface identification VPD page */
static int inquiry_vpd_84(unsigned char *arr)
{}

/* Management network addresses VPD page */
static int inquiry_vpd_85(unsigned char *arr)
{}

/* SCSI ports VPD page */
static int inquiry_vpd_88(unsigned char *arr, int target_dev_id)
{}


static unsigned char vpd89_data[] =;

/* ATA Information VPD page */
static int inquiry_vpd_89(unsigned char *arr)
{}


static unsigned char vpdb0_data[] =;

/* Block limits VPD page (SBC-3) */
static int inquiry_vpd_b0(unsigned char *arr)
{}

/* Block device characteristics VPD page (SBC-3) */
static int inquiry_vpd_b1(struct sdebug_dev_info *devip, unsigned char *arr)
{}

/* Logical block provisioning VPD page (SBC-4) */
static int inquiry_vpd_b2(unsigned char *arr)
{}

/* Zoned block device characteristics VPD page (ZBC mandatory) */
static int inquiry_vpd_b6(struct sdebug_dev_info *devip, unsigned char *arr)
{}

#define SDEBUG_BLE_LEN_AFTER_B4

enum {};

/* Block limits extension VPD page (SBC-4) */
static int inquiry_vpd_b7(unsigned char *arrb4)
{}

#define SDEBUG_LONG_INQ_SZ
#define SDEBUG_MAX_INQ_ARR_SZ

static int resp_inquiry(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
{}

/* See resp_iec_m_pg() for how this data is manipulated */
static unsigned char iec_m_pg[] =;

static int resp_requests(struct scsi_cmnd *scp,
			 struct sdebug_dev_info *devip)
{}

static int resp_start_stop(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
{}

static sector_t get_sdebug_capacity(void)
{}

#define SDEBUG_READCAP_ARR_SZ
static int resp_readcap(struct scsi_cmnd *scp,
			struct sdebug_dev_info *devip)
{}

#define SDEBUG_READCAP16_ARR_SZ
static int resp_readcap16(struct scsi_cmnd *scp,
			  struct sdebug_dev_info *devip)
{}

#define SDEBUG_MAX_TGTPGS_ARR_SZ

static int resp_report_tgtpgs(struct scsi_cmnd *scp,
			      struct sdebug_dev_info *devip)
{}

static int resp_rsup_opcodes(struct scsi_cmnd *scp,
			     struct sdebug_dev_info *devip)
{}

static int resp_rsup_tmfs(struct scsi_cmnd *scp,
			  struct sdebug_dev_info *devip)
{}

/* <<Following mode page info copied from ST318451LW>> */

static int resp_err_recov_pg(unsigned char *p, int pcontrol, int target)
{}

static int resp_disconnect_pg(unsigned char *p, int pcontrol, int target)
{}

static int resp_format_pg(unsigned char *p, int pcontrol, int target)
{}

static unsigned char caching_pg[] =;

static int resp_caching_pg(unsigned char *p, int pcontrol, int target)
{}

static unsigned char ctrl_m_pg[] =;

static int resp_ctrl_m_pg(unsigned char *p, int pcontrol, int target)
{}

/* IO Advice Hints Grouping mode page */
static int resp_grouping_m_pg(unsigned char *p, int pcontrol, int target)
{}

static int resp_iec_m_pg(unsigned char *p, int pcontrol, int target)
{}

static int resp_sas_sf_m_pg(unsigned char *p, int pcontrol, int target)
{}


static int resp_sas_pcd_m_spg(unsigned char *p, int pcontrol, int target,
			      int target_dev_id)
{}

static int resp_sas_sha_m_spg(unsigned char *p, int pcontrol)
{}

/* PAGE_SIZE is more than necessary but provides room for future expansion. */
#define SDEBUG_MAX_MSENSE_SZ

static int resp_mode_sense(struct scsi_cmnd *scp,
			   struct sdebug_dev_info *devip)
{}

#define SDEBUG_MAX_MSELECT_SZ

static int resp_mode_select(struct scsi_cmnd *scp,
			    struct sdebug_dev_info *devip)
{}

static int resp_temp_l_pg(unsigned char *arr)
{}

static int resp_ie_l_pg(unsigned char *arr)
{}

static int resp_env_rep_l_spg(unsigned char *arr)
{}

#define SDEBUG_MAX_LSENSE_SZ

static int resp_log_sense(struct scsi_cmnd *scp,
			  struct sdebug_dev_info *devip)
{}

static inline bool sdebug_dev_is_zoned(struct sdebug_dev_info *devip)
{}

static struct sdeb_zone_state *zbc_zone(struct sdebug_dev_info *devip,
					unsigned long long lba)
{}

static inline bool zbc_zone_is_conv(struct sdeb_zone_state *zsp)
{}

static inline bool zbc_zone_is_gap(struct sdeb_zone_state *zsp)
{}

static inline bool zbc_zone_is_seq(struct sdeb_zone_state *zsp)
{}

static void zbc_close_zone(struct sdebug_dev_info *devip,
			   struct sdeb_zone_state *zsp)
{}

static void zbc_close_imp_open_zone(struct sdebug_dev_info *devip)
{}

static void zbc_open_zone(struct sdebug_dev_info *devip,
			  struct sdeb_zone_state *zsp, bool explicit)
{}

static inline void zbc_set_zone_full(struct sdebug_dev_info *devip,
				     struct sdeb_zone_state *zsp)
{}

static void zbc_inc_wp(struct sdebug_dev_info *devip,
		       unsigned long long lba, unsigned int num)
{}

static int check_zbc_access_params(struct scsi_cmnd *scp,
			unsigned long long lba, unsigned int num, bool write)
{}

static inline int check_device_access_params
			(struct scsi_cmnd *scp, unsigned long long lba,
			 unsigned int num, bool write)
{}

/*
 * Note: if BUG_ON() fires it usually indicates a problem with the parser
 * tables. Perhaps a missing F_FAKE_RW or FF_MEDIA_IO flag. Response functions
 * that access any of the "stores" in struct sdeb_store_info should call this
 * function with bug_if_fake_rw set to true.
 */
static inline struct sdeb_store_info *devip2sip(struct sdebug_dev_info *devip,
						bool bug_if_fake_rw)
{}

static inline void
sdeb_read_lock(rwlock_t *lock)
{}

static inline void
sdeb_read_unlock(rwlock_t *lock)
{}

static inline void
sdeb_write_lock(rwlock_t *lock)
{}

static inline void
sdeb_write_unlock(rwlock_t *lock)
{}

static inline void
sdeb_data_read_lock(struct sdeb_store_info *sip)
{}

static inline void
sdeb_data_read_unlock(struct sdeb_store_info *sip)
{}

static inline void
sdeb_data_write_lock(struct sdeb_store_info *sip)
{}

static inline void
sdeb_data_write_unlock(struct sdeb_store_info *sip)
{}

static inline void
sdeb_data_sector_read_lock(struct sdeb_store_info *sip)
{}

static inline void
sdeb_data_sector_read_unlock(struct sdeb_store_info *sip)
{}

static inline void
sdeb_data_sector_write_lock(struct sdeb_store_info *sip)
{}

static inline void
sdeb_data_sector_write_unlock(struct sdeb_store_info *sip)
{}

/*
 * Atomic locking:
 * We simplify the atomic model to allow only 1x atomic write and many non-
 * atomic reads or writes for all LBAs.

 * A RW lock has a similar bahaviour:
 * Only 1x writer and many readers.

 * So use a RW lock for per-device read and write locking:
 * An atomic access grabs the lock as a writer and non-atomic grabs the lock
 * as a reader.
 */

static inline void
sdeb_data_lock(struct sdeb_store_info *sip, bool atomic)
{}

static inline void
sdeb_data_unlock(struct sdeb_store_info *sip, bool atomic)
{}

/* Allow many reads but only 1x write per sector */
static inline void
sdeb_data_sector_lock(struct sdeb_store_info *sip, bool do_write)
{}

static inline void
sdeb_data_sector_unlock(struct sdeb_store_info *sip, bool do_write)
{}

static inline void
sdeb_meta_read_lock(struct sdeb_store_info *sip)
{}

static inline void
sdeb_meta_read_unlock(struct sdeb_store_info *sip)
{}

static inline void
sdeb_meta_write_lock(struct sdeb_store_info *sip)
{}

static inline void
sdeb_meta_write_unlock(struct sdeb_store_info *sip)
{}

/* Returns number of bytes copied or -1 if error. */
static int do_device_access(struct sdeb_store_info *sip, struct scsi_cmnd *scp,
			    u32 sg_skip, u64 lba, u32 num, u8 group_number,
			    bool do_write, bool atomic)
{}

/* Returns number of bytes copied or -1 if error. */
static int do_dout_fetch(struct scsi_cmnd *scp, u32 num, u8 *doutp)
{}

/* If sip->storep+lba compares equal to arr(num), then copy top half of
 * arr into sip->storep+lba and return true. If comparison fails then
 * return false. */
static bool comp_write_worker(struct sdeb_store_info *sip, u64 lba, u32 num,
			      const u8 *arr, bool compare_only)
{}

static __be16 dif_compute_csum(const void *buf, int len)
{}

static int dif_verify(struct t10_pi_tuple *sdt, const void *data,
		      sector_t sector, u32 ei_lba)
{}

static void dif_copy_prot(struct scsi_cmnd *scp, sector_t sector,
			  unsigned int sectors, bool read)
{}

static int prot_verify_read(struct scsi_cmnd *scp, sector_t start_sec,
			    unsigned int sectors, u32 ei_lba)
{}

static int resp_read_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
{}

static int prot_verify_write(struct scsi_cmnd *SCpnt, sector_t start_sec,
			     unsigned int sectors, u32 ei_lba)
{}

static unsigned long lba_to_map_index(sector_t lba)
{}

static sector_t map_index_to_lba(unsigned long index)
{}

static unsigned int map_state(struct sdeb_store_info *sip, sector_t lba,
			      unsigned int *num)
{}

static void map_region(struct sdeb_store_info *sip, sector_t lba,
		       unsigned int len)
{}

static void unmap_region(struct sdeb_store_info *sip, sector_t lba,
			 unsigned int len)
{}

static int resp_write_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
{}

/*
 * T10 has only specified WRITE SCATTERED(16) and WRITE SCATTERED(32).
 * No READ GATHERED yet (requires bidi or long cdb holding gather list).
 */
static int resp_write_scat(struct scsi_cmnd *scp,
			   struct sdebug_dev_info *devip)
{}

static int resp_write_same(struct scsi_cmnd *scp, u64 lba, u32 num,
			   u32 ei_lba, bool unmap, bool ndob)
{}

static int resp_write_same_10(struct scsi_cmnd *scp,
			      struct sdebug_dev_info *devip)
{}

static int resp_write_same_16(struct scsi_cmnd *scp,
			      struct sdebug_dev_info *devip)
{}

/* Note the mode field is in the same position as the (lower) service action
 * field. For the Report supported operation codes command, SPC-4 suggests
 * each mode of this command should be reported separately; for future. */
static int resp_write_buffer(struct scsi_cmnd *scp,
			     struct sdebug_dev_info *devip)
{}

static int resp_comp_write(struct scsi_cmnd *scp,
			   struct sdebug_dev_info *devip)
{}

struct unmap_block_desc {};

static int resp_unmap(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
{}

#define SDEBUG_GET_LBA_STATUS_LEN

static int resp_get_lba_status(struct scsi_cmnd *scp,
			       struct sdebug_dev_info *devip)
{}

static int resp_get_stream_status(struct scsi_cmnd *scp,
				  struct sdebug_dev_info *devip)
{}

static int resp_sync_cache(struct scsi_cmnd *scp,
			   struct sdebug_dev_info *devip)
{}

/*
 * Assuming the LBA+num_blocks is not out-of-range, this function will return
 * CONDITION MET if the specified blocks will/have fitted in the cache, and
 * a GOOD status otherwise. Model a disk with a big cache and yield
 * CONDITION MET. Actually tries to bring range in main memory into the
 * cache associated with the CPU(s).
 */
static int resp_pre_fetch(struct scsi_cmnd *scp,
			  struct sdebug_dev_info *devip)
{}

#define RL_BUCKET_ELEMS

/* Even though each pseudo target has a REPORT LUNS "well known logical unit"
 * (W-LUN), the normal Linux scanning logic does not associate it with a
 * device (e.g. /dev/sg7). The following magic will make that association:
 *   "cd /sys/class/scsi_host/host<n> ; echo '- - 49409' > scan"
 * where <n> is a host number. If there are multiple targets in a host then
 * the above will associate a W-LUN to each target. To only get a W-LUN
 * for target 2, then use "echo '- 2 49409' > scan" .
 */
static int resp_report_luns(struct scsi_cmnd *scp,
			    struct sdebug_dev_info *devip)
{}

static int resp_verify(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
{}

#define RZONES_DESC_HD

/* Report zones depending on start LBA and reporting options */
static int resp_report_zones(struct scsi_cmnd *scp,
			     struct sdebug_dev_info *devip)
{}

static int resp_atomic_write(struct scsi_cmnd *scp,
			     struct sdebug_dev_info *devip)
{}

/* Logic transplanted from tcmu-runner, file_zbc.c */
static void zbc_open_all(struct sdebug_dev_info *devip)
{}

static int resp_open_zone(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
{}

static void zbc_close_all(struct sdebug_dev_info *devip)
{}

static int resp_close_zone(struct scsi_cmnd *scp,
			   struct sdebug_dev_info *devip)
{}

static void zbc_finish_zone(struct sdebug_dev_info *devip,
			    struct sdeb_zone_state *zsp, bool empty)
{}

static void zbc_finish_all(struct sdebug_dev_info *devip)
{}

static int resp_finish_zone(struct scsi_cmnd *scp,
			    struct sdebug_dev_info *devip)
{}

static void zbc_rwp_zone(struct sdebug_dev_info *devip,
			 struct sdeb_zone_state *zsp)
{}

static void zbc_rwp_all(struct sdebug_dev_info *devip)
{}

static int resp_rwp_zone(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
{}

static u32 get_tag(struct scsi_cmnd *cmnd)
{}

/* Queued (deferred) command completions converge here. */
static void sdebug_q_cmd_complete(struct sdebug_defer *sd_dp)
{}

/* When high resolution timer goes off this function is called. */
static enum hrtimer_restart sdebug_q_cmd_hrt_complete(struct hrtimer *timer)
{}

/* When work queue schedules work, it calls this function. */
static void sdebug_q_cmd_wq_complete(struct work_struct *work)
{}

static bool got_shared_uuid;
static uuid_t shared_uuid;

static int sdebug_device_create_zones(struct sdebug_dev_info *devip)
{}

static struct sdebug_dev_info *sdebug_device_create(
			struct sdebug_host_info *sdbg_host, gfp_t flags)
{}

static struct sdebug_dev_info *find_build_dev_info(struct scsi_device *sdev)
{}

static int scsi_debug_slave_alloc(struct scsi_device *sdp)
{}

static int scsi_debug_slave_configure(struct scsi_device *sdp)
{}

static void scsi_debug_slave_destroy(struct scsi_device *sdp)
{}

/* Returns true if we require the queued memory to be freed by the caller. */
static bool stop_qc_helper(struct sdebug_defer *sd_dp,
			   enum sdeb_defer_type defer_t)
{}


static bool scsi_debug_stop_cmnd(struct scsi_cmnd *cmnd)
{}

/*
 * Called from scsi_debug_abort() only, which is for timed-out cmd.
 */
static bool scsi_debug_abort_cmnd(struct scsi_cmnd *cmnd)
{}

/*
 * All we can do is set the cmnd as internally aborted and wait for it to
 * finish. We cannot call scsi_done() as normal completion path may do that.
 */
static bool sdebug_stop_cmnd(struct request *rq, void *data)
{}

/* Deletes (stops) timers or work queues of all queued commands */
static void stop_all_queued(void)
{}

static int sdebug_fail_abort(struct scsi_cmnd *cmnd)
{}

static int scsi_debug_abort(struct scsi_cmnd *SCpnt)
{}

static bool scsi_debug_stop_all_queued_iter(struct request *rq, void *data)
{}

/* Deletes (stops) timers or work queues of all queued commands per sdev */
static void scsi_debug_stop_all_queued(struct scsi_device *sdp)
{}

static int sdebug_fail_lun_reset(struct scsi_cmnd *cmnd)
{}

static int scsi_debug_device_reset(struct scsi_cmnd *SCpnt)
{}

static int sdebug_fail_target_reset(struct scsi_cmnd *cmnd)
{}

static int scsi_debug_target_reset(struct scsi_cmnd *SCpnt)
{}

static int scsi_debug_bus_reset(struct scsi_cmnd *SCpnt)
{}

static int scsi_debug_host_reset(struct scsi_cmnd *SCpnt)
{}

static void sdebug_build_parts(unsigned char *ramp, unsigned long store_size)
{}

static void block_unblock_all_queues(bool block)
{}

/* Adjust (by rounding down) the sdebug_cmnd_count so abs(every_nth)-1
 * commands will be processed normally before triggers occur.
 */
static void tweak_cmnd_count(void)
{}

static void clear_queue_stats(void)
{}

static bool inject_on_this_cmd(void)
{}

#define INCLUSIVE_TIMING_MAX_NS


void sdebug_free_queued_cmd(struct sdebug_queued_cmd *sqcp)
{}

static struct sdebug_queued_cmd *sdebug_alloc_queued_cmd(struct scsi_cmnd *scmd)
{}

/* Complete the processing of the thread that queued a SCSI command to this
 * driver. It either completes the command by calling cmnd_done() or
 * schedules a hr timer or work queue then returns 0. Returns
 * SCSI_MLQUEUE_HOST_BUSY if temporarily out of resources.
 */
static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip,
			 int scsi_result,
			 int (*pfp)(struct scsi_cmnd *,
				    struct sdebug_dev_info *),
			 int delta_jiff, int ndelay)
{}

/* Note: The following macros create attribute files in the
   /sys/module/scsi_debug/parameters directory. Unfortunately this
   driver is unaware of a change and cannot trigger auxiliary actions
   as it can when the corresponding attribute in the
   /sys/bus/pseudo/drivers/scsi_debug directory is changed.
 */
module_param_named(add_host, sdebug_add_host, int, S_IRUGO | S_IWUSR);
module_param_named(ato, sdebug_ato, int, S_IRUGO);
module_param_named(cdb_len, sdebug_cdb_len, int, 0644);
module_param_named(clustering, sdebug_clustering, bool, S_IRUGO | S_IWUSR);
module_param_named(delay, sdebug_jdelay, int, S_IRUGO | S_IWUSR);
module_param_named(dev_size_mb, sdebug_dev_size_mb, int, S_IRUGO);
module_param_named(dif, sdebug_dif, int, S_IRUGO);
module_param_named(dix, sdebug_dix, int, S_IRUGO);
module_param_named(dsense, sdebug_dsense, int, S_IRUGO | S_IWUSR);
module_param_named(every_nth, sdebug_every_nth, int, S_IRUGO | S_IWUSR);
module_param_named(fake_rw, sdebug_fake_rw, int, S_IRUGO | S_IWUSR);
module_param_named(guard, sdebug_guard, uint, S_IRUGO);
module_param_named(host_lock, sdebug_host_lock, bool, S_IRUGO | S_IWUSR);
module_param_named(host_max_queue, sdebug_host_max_queue, int, S_IRUGO);
module_param_string();
module_param_string();
module_param_string();
module_param_named(lbprz, sdebug_lbprz, int, S_IRUGO);
module_param_named(lbpu, sdebug_lbpu, int, S_IRUGO);
module_param_named(lbpws, sdebug_lbpws, int, S_IRUGO);
module_param_named(lbpws10, sdebug_lbpws10, int, S_IRUGO);
module_param_named(atomic_wr, sdebug_atomic_wr, int, S_IRUGO);
module_param_named(lowest_aligned, sdebug_lowest_aligned, int, S_IRUGO);
module_param_named(lun_format, sdebug_lun_am_i, int, S_IRUGO | S_IWUSR);
module_param_named(max_luns, sdebug_max_luns, int, S_IRUGO | S_IWUSR);
module_param_named(max_queue, sdebug_max_queue, int, S_IRUGO | S_IWUSR);
module_param_named(medium_error_count, sdebug_medium_error_count, int,
		   S_IRUGO | S_IWUSR);
module_param_named(medium_error_start, sdebug_medium_error_start, int,
		   S_IRUGO | S_IWUSR);
module_param_named(ndelay, sdebug_ndelay, int, S_IRUGO | S_IWUSR);
module_param_named(no_lun_0, sdebug_no_lun_0, int, S_IRUGO | S_IWUSR);
module_param_named(no_rwlock, sdebug_no_rwlock, bool, S_IRUGO | S_IWUSR);
module_param_named(no_uld, sdebug_no_uld, int, S_IRUGO);
module_param_named(num_parts, sdebug_num_parts, int, S_IRUGO);
module_param_named(num_tgts, sdebug_num_tgts, int, S_IRUGO | S_IWUSR);
module_param_named(opt_blks, sdebug_opt_blks, int, S_IRUGO);
module_param_named(opt_xferlen_exp, sdebug_opt_xferlen_exp, int, S_IRUGO);
module_param_named(opts, sdebug_opts, int, S_IRUGO | S_IWUSR);
module_param_named(per_host_store, sdebug_per_host_store, bool,
		   S_IRUGO | S_IWUSR);
module_param_named(physblk_exp, sdebug_physblk_exp, int, S_IRUGO);
module_param_named(ptype, sdebug_ptype, int, S_IRUGO | S_IWUSR);
module_param_named(random, sdebug_random, bool, S_IRUGO | S_IWUSR);
module_param_named(removable, sdebug_removable, bool, S_IRUGO | S_IWUSR);
module_param_named(scsi_level, sdebug_scsi_level, int, S_IRUGO);
module_param_named(sector_size, sdebug_sector_size, int, S_IRUGO);
module_param_named(statistics, sdebug_statistics, bool, S_IRUGO | S_IWUSR);
module_param_named(strict, sdebug_strict, bool, S_IRUGO | S_IWUSR);
module_param_named(submit_queues, submit_queues, int, S_IRUGO);
module_param_named(poll_queues, poll_queues, int, S_IRUGO);
module_param_named(tur_ms_to_ready, sdeb_tur_ms_to_ready, int, S_IRUGO);
module_param_named(unmap_alignment, sdebug_unmap_alignment, int, S_IRUGO);
module_param_named(unmap_granularity, sdebug_unmap_granularity, int, S_IRUGO);
module_param_named(unmap_max_blocks, sdebug_unmap_max_blocks, int, S_IRUGO);
module_param_named(unmap_max_desc, sdebug_unmap_max_desc, int, S_IRUGO);
module_param_named(atomic_wr_max_length, sdebug_atomic_wr_max_length, int, S_IRUGO);
module_param_named(atomic_wr_align, sdebug_atomic_wr_align, int, S_IRUGO);
module_param_named(atomic_wr_gran, sdebug_atomic_wr_gran, int, S_IRUGO);
module_param_named(atomic_wr_max_length_bndry, sdebug_atomic_wr_max_length_bndry, int, S_IRUGO);
module_param_named(atomic_wr_max_bndry, sdebug_atomic_wr_max_bndry, int, S_IRUGO);
module_param_named(uuid_ctl, sdebug_uuid_ctl, int, S_IRUGO);
module_param_named(virtual_gb, sdebug_virtual_gb, int, S_IRUGO | S_IWUSR);
module_param_named(vpd_use_hostno, sdebug_vpd_use_hostno, int,
		   S_IRUGO | S_IWUSR);
module_param_named(wp, sdebug_wp, bool, S_IRUGO | S_IWUSR);
module_param_named(write_same_length, sdebug_write_same_length, int,
		   S_IRUGO | S_IWUSR);
module_param_named(zbc, sdeb_zbc_model_s, charp, S_IRUGO);
module_param_named(zone_cap_mb, sdeb_zbc_zone_cap_mb, int, S_IRUGO);
module_param_named(zone_max_open, sdeb_zbc_max_open, int, S_IRUGO);
module_param_named(zone_nr_conv, sdeb_zbc_nr_conv, int, S_IRUGO);
module_param_named(zone_size_mb, sdeb_zbc_zone_size_mb, int, S_IRUGO);
module_param_named(allow_restart, sdebug_allow_restart, bool, S_IRUGO | S_IWUSR);

MODULE_AUTHOR();
MODULE_DESCRIPTION();
MODULE_LICENSE();
MODULE_VERSION();

MODULE_PARM_DESC();
MODULE_PARM_DESC();
MODULE_PARM_DESC();
MODULE_PARM_DESC();
MODULE_PARM_DESC();
MODULE_PARM_DESC();
MODULE_PARM_DESC();
MODULE_PARM_DESC();
MODULE_PARM_DESC();
MODULE_PARM_DESC();
MODULE_PARM_DESC();
MODULE_PARM_DESC();
MODULE_PARM_DESC();
MODULE_PARM_DESC();
MODULE_PARM_DESC();
MODULE_PARM_DESC();
MODULE_PARM_DESC();
MODULE_PARM_DESC();
MODULE_PARM_DESC();
MODULE_PARM_DESC();
MODULE_PARM_DESC();
MODULE_PARM_DESC();
MODULE_PARM_DESC();
MODULE_PARM_DESC();
MODULE_PARM_DESC();
MODULE_PARM_DESC();
MODULE_PARM_DESC();
MODULE_PARM_DESC();
MODULE_PARM_DESC();
MODULE_PARM_DESC();
MODULE_PARM_DESC();
MODULE_PARM_DESC();
MODULE_PARM_DESC();
MODULE_PARM_DESC();
MODULE_PARM_DESC();
MODULE_PARM_DESC();
MODULE_PARM_DESC();
MODULE_PARM_DESC();
MODULE_PARM_DESC();
MODULE_PARM_DESC();
MODULE_PARM_DESC();
MODULE_PARM_DESC();
MODULE_PARM_DESC();
MODULE_PARM_DESC();
MODULE_PARM_DESC();
MODULE_PARM_DESC();
MODULE_PARM_DESC();
MODULE_PARM_DESC();
MODULE_PARM_DESC();
MODULE_PARM_DESC();
MODULE_PARM_DESC();
MODULE_PARM_DESC();
MODULE_PARM_DESC();
MODULE_PARM_DESC();
MODULE_PARM_DESC();
MODULE_PARM_DESC();
MODULE_PARM_DESC();
MODULE_PARM_DESC();
MODULE_PARM_DESC();
MODULE_PARM_DESC();
MODULE_PARM_DESC();
MODULE_PARM_DESC();
MODULE_PARM_DESC();
MODULE_PARM_DESC();
MODULE_PARM_DESC();
MODULE_PARM_DESC();
MODULE_PARM_DESC();
MODULE_PARM_DESC();
MODULE_PARM_DESC();

#define SDEBUG_INFO_LEN
static char sdebug_info[SDEBUG_INFO_LEN];

static const char *scsi_debug_info(struct Scsi_Host *shp)
{}

/* 'echo <val> > /proc/scsi/scsi_debug/<host_id>' writes to opts */
static int scsi_debug_write_info(struct Scsi_Host *host, char *buffer,
				 int length)
{}

struct sdebug_submit_queue_data {};

static bool sdebug_submit_queue_iter(struct request *rq, void *opaque)
{}

/* Output seen with 'cat /proc/scsi/scsi_debug/<host_id>'. It will be the
 * same for each scsi_debug host (if more than one). Some of the counters
 * output are not atomics so might be inaccurate in a busy system. */
static int scsi_debug_show_info(struct seq_file *m, struct Scsi_Host *host)
{}

static ssize_t delay_show(struct device_driver *ddp, char *buf)
{}
/* Returns -EBUSY if jdelay is being changed and commands are queued. The unit
 * of delay is jiffies.
 */
static ssize_t delay_store(struct device_driver *ddp, const char *buf,
			   size_t count)
{}
static DRIVER_ATTR_RW(delay);

static ssize_t ndelay_show(struct device_driver *ddp, char *buf)
{}
/* Returns -EBUSY if ndelay is being changed and commands are queued */
/* If > 0 and accepted then sdebug_jdelay is set to JDELAY_OVERRIDDEN */
static ssize_t ndelay_store(struct device_driver *ddp, const char *buf,
			    size_t count)
{}
static DRIVER_ATTR_RW(ndelay);

static ssize_t opts_show(struct device_driver *ddp, char *buf)
{}

static ssize_t opts_store(struct device_driver *ddp, const char *buf,
			  size_t count)
{}
static DRIVER_ATTR_RW(opts);

static ssize_t ptype_show(struct device_driver *ddp, char *buf)
{}
static ssize_t ptype_store(struct device_driver *ddp, const char *buf,
			   size_t count)
{}
static DRIVER_ATTR_RW(ptype);

static ssize_t dsense_show(struct device_driver *ddp, char *buf)
{}
static ssize_t dsense_store(struct device_driver *ddp, const char *buf,
			    size_t count)
{}
static DRIVER_ATTR_RW(dsense);

static ssize_t fake_rw_show(struct device_driver *ddp, char *buf)
{}
static ssize_t fake_rw_store(struct device_driver *ddp, const char *buf,
			     size_t count)
{}
static DRIVER_ATTR_RW(fake_rw);

static ssize_t no_lun_0_show(struct device_driver *ddp, char *buf)
{}
static ssize_t no_lun_0_store(struct device_driver *ddp, const char *buf,
			      size_t count)
{}
static DRIVER_ATTR_RW(no_lun_0);

static ssize_t num_tgts_show(struct device_driver *ddp, char *buf)
{}
static ssize_t num_tgts_store(struct device_driver *ddp, const char *buf,
			      size_t count)
{}
static DRIVER_ATTR_RW(num_tgts);

static ssize_t dev_size_mb_show(struct device_driver *ddp, char *buf)
{}
static DRIVER_ATTR_RO(dev_size_mb);

static ssize_t per_host_store_show(struct device_driver *ddp, char *buf)
{}

static ssize_t per_host_store_store(struct device_driver *ddp, const char *buf,
				    size_t count)
{}
static DRIVER_ATTR_RW(per_host_store);

static ssize_t num_parts_show(struct device_driver *ddp, char *buf)
{}
static DRIVER_ATTR_RO(num_parts);

static ssize_t every_nth_show(struct device_driver *ddp, char *buf)
{}
static ssize_t every_nth_store(struct device_driver *ddp, const char *buf,
			       size_t count)
{}
static DRIVER_ATTR_RW(every_nth);

static ssize_t lun_format_show(struct device_driver *ddp, char *buf)
{}
static ssize_t lun_format_store(struct device_driver *ddp, const char *buf,
				size_t count)
{}
static DRIVER_ATTR_RW(lun_format);

static ssize_t max_luns_show(struct device_driver *ddp, char *buf)
{}
static ssize_t max_luns_store(struct device_driver *ddp, const char *buf,
			      size_t count)
{}
static DRIVER_ATTR_RW(max_luns);

static ssize_t max_queue_show(struct device_driver *ddp, char *buf)
{}
/* N.B. max_queue can be changed while there are queued commands. In flight
 * commands beyond the new max_queue will be completed. */
static ssize_t max_queue_store(struct device_driver *ddp, const char *buf,
			       size_t count)
{}
static DRIVER_ATTR_RW(max_queue);

static ssize_t host_max_queue_show(struct device_driver *ddp, char *buf)
{}

static ssize_t no_rwlock_show(struct device_driver *ddp, char *buf)
{}

static ssize_t no_rwlock_store(struct device_driver *ddp, const char *buf, size_t count)
{}
static DRIVER_ATTR_RW(no_rwlock);

/*
 * Since this is used for .can_queue, and we get the hc_idx tag from the bitmap
 * in range [0, sdebug_host_max_queue), we can't change it.
 */
static DRIVER_ATTR_RO(host_max_queue);

static ssize_t no_uld_show(struct device_driver *ddp, char *buf)
{}
static DRIVER_ATTR_RO(no_uld);

static ssize_t scsi_level_show(struct device_driver *ddp, char *buf)
{}
static DRIVER_ATTR_RO(scsi_level);

static ssize_t virtual_gb_show(struct device_driver *ddp, char *buf)
{}
static ssize_t virtual_gb_store(struct device_driver *ddp, const char *buf,
				size_t count)
{}
static DRIVER_ATTR_RW(virtual_gb);

static ssize_t add_host_show(struct device_driver *ddp, char *buf)
{}

static ssize_t add_host_store(struct device_driver *ddp, const char *buf,
			      size_t count)
{}
static DRIVER_ATTR_RW(add_host);

static ssize_t vpd_use_hostno_show(struct device_driver *ddp, char *buf)
{}
static ssize_t vpd_use_hostno_store(struct device_driver *ddp, const char *buf,
				    size_t count)
{}
static DRIVER_ATTR_RW(vpd_use_hostno);

static ssize_t statistics_show(struct device_driver *ddp, char *buf)
{}
static ssize_t statistics_store(struct device_driver *ddp, const char *buf,
				size_t count)
{}
static DRIVER_ATTR_RW(statistics);

static ssize_t sector_size_show(struct device_driver *ddp, char *buf)
{}
static DRIVER_ATTR_RO(sector_size);

static ssize_t submit_queues_show(struct device_driver *ddp, char *buf)
{}
static DRIVER_ATTR_RO(submit_queues);

static ssize_t dix_show(struct device_driver *ddp, char *buf)
{}
static DRIVER_ATTR_RO(dix);

static ssize_t dif_show(struct device_driver *ddp, char *buf)
{}
static DRIVER_ATTR_RO(dif);

static ssize_t guard_show(struct device_driver *ddp, char *buf)
{}
static DRIVER_ATTR_RO(guard);

static ssize_t ato_show(struct device_driver *ddp, char *buf)
{}
static DRIVER_ATTR_RO(ato);

static ssize_t map_show(struct device_driver *ddp, char *buf)
{}
static DRIVER_ATTR_RO(map);

static ssize_t random_show(struct device_driver *ddp, char *buf)
{}

static ssize_t random_store(struct device_driver *ddp, const char *buf,
			    size_t count)
{}
static DRIVER_ATTR_RW(random);

static ssize_t removable_show(struct device_driver *ddp, char *buf)
{}
static ssize_t removable_store(struct device_driver *ddp, const char *buf,
			       size_t count)
{}
static DRIVER_ATTR_RW(removable);

static ssize_t host_lock_show(struct device_driver *ddp, char *buf)
{}
/* N.B. sdebug_host_lock does nothing, kept for backward compatibility */
static ssize_t host_lock_store(struct device_driver *ddp, const char *buf,
			       size_t count)
{}
static DRIVER_ATTR_RW(host_lock);

static ssize_t strict_show(struct device_driver *ddp, char *buf)
{}
static ssize_t strict_store(struct device_driver *ddp, const char *buf,
			    size_t count)
{}
static DRIVER_ATTR_RW(strict);

static ssize_t uuid_ctl_show(struct device_driver *ddp, char *buf)
{}
static DRIVER_ATTR_RO(uuid_ctl);

static ssize_t cdb_len_show(struct device_driver *ddp, char *buf)
{}
static ssize_t cdb_len_store(struct device_driver *ddp, const char *buf,
			     size_t count)
{}
static DRIVER_ATTR_RW(cdb_len);

static const char * const zbc_model_strs_a[] =;

static const char * const zbc_model_strs_b[] =;

static const char * const zbc_model_strs_c[] =;

static int sdeb_zbc_model_str(const char *cp)
{}

static ssize_t zbc_show(struct device_driver *ddp, char *buf)
{}
static DRIVER_ATTR_RO(zbc);

static ssize_t tur_ms_to_ready_show(struct device_driver *ddp, char *buf)
{}
static DRIVER_ATTR_RO(tur_ms_to_ready);

static ssize_t group_number_stats_show(struct device_driver *ddp, char *buf)
{}

static ssize_t group_number_stats_store(struct device_driver *ddp,
					const char *buf, size_t count)
{}
static DRIVER_ATTR_RW(group_number_stats);

/* Note: The following array creates attribute files in the
   /sys/bus/pseudo/drivers/scsi_debug directory. The advantage of these
   files (over those found in the /sys/module/scsi_debug/parameters
   directory) is that auxiliary actions can be triggered when an attribute
   is changed. For example see: add_host_store() above.
 */

static struct attribute *sdebug_drv_attrs[] =;
ATTRIBUTE_GROUPS();

static struct device *pseudo_primary;

static int __init scsi_debug_init(void)
{}

static void __exit scsi_debug_exit(void)
{}

device_initcall(scsi_debug_init);
module_exit(scsi_debug_exit);

static void sdebug_release_adapter(struct device *dev)
{}

/* idx must be valid, if sip is NULL then it will be obtained using idx */
static void sdebug_erase_store(int idx, struct sdeb_store_info *sip)
{}

/* Assume apart_from_first==false only in shutdown case. */
static void sdebug_erase_all_stores(bool apart_from_first)
{}

/*
 * Returns store xarray new element index (idx) if >=0 else negated errno.
 * Limit the number of stores to 65536.
 */
static int sdebug_add_store(void)
{}

static int sdebug_add_host_helper(int per_host_idx)
{}

static int sdebug_do_add_host(bool mk_new_store)
{}

static void sdebug_do_remove_host(bool the_end)
{}

static int sdebug_change_qdepth(struct scsi_device *sdev, int qdepth)
{}

static bool fake_timeout(struct scsi_cmnd *scp)
{}

/* Response to TUR or media access command when device stopped */
static int resp_not_ready(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
{}

static void sdebug_map_queues(struct Scsi_Host *shost)
{}

struct sdebug_blk_mq_poll_data {};

/*
 * We don't handle aborted commands here, but it does not seem possible to have
 * aborted polled commands from schedule_resp()
 */
static bool sdebug_blk_mq_poll_iter(struct request *rq, void *opaque)
{}

static int sdebug_blk_mq_poll(struct Scsi_Host *shost, unsigned int queue_num)
{}

static int sdebug_timeout_cmd(struct scsi_cmnd *cmnd)
{}

static int sdebug_fail_queue_cmd(struct scsi_cmnd *cmnd)
{}

static int sdebug_fail_cmd(struct scsi_cmnd *cmnd, int *retval,
			   struct sdebug_err_inject *info)
{}

static int scsi_debug_queuecommand(struct Scsi_Host *shost,
				   struct scsi_cmnd *scp)
{}

static int sdebug_init_cmd_priv(struct Scsi_Host *shost, struct scsi_cmnd *cmd)
{}

static struct scsi_host_template sdebug_driver_template =;

static int sdebug_driver_probe(struct device *dev)
{}

static void sdebug_driver_remove(struct device *dev)
{}

static const struct bus_type pseudo_lld_bus =;