linux/drivers/scsi/smartpqi/smartpqi_init.c

// SPDX-License-Identifier: GPL-2.0
/*
 *    driver for Microchip PQI-based storage controllers
 *    Copyright (c) 2019-2023 Microchip Technology Inc. and its subsidiaries
 *    Copyright (c) 2016-2018 Microsemi Corporation
 *    Copyright (c) 2016 PMC-Sierra, Inc.
 *
 *    Questions/Comments/Bugfixes to [email protected]
 *
 */

#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/pci.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/sched.h>
#include <linux/rtc.h>
#include <linux/bcd.h>
#include <linux/reboot.h>
#include <linux/cciss_ioctl.h>
#include <linux/blk-mq-pci.h>
#include <scsi/scsi_host.h>
#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_device.h>
#include <scsi/scsi_eh.h>
#include <scsi/scsi_transport_sas.h>
#include <asm/unaligned.h>
#include "smartpqi.h"
#include "smartpqi_sis.h"

#if !defined(BUILD_TIMESTAMP)
#define BUILD_TIMESTAMP
#endif

#define DRIVER_VERSION
#define DRIVER_MAJOR
#define DRIVER_MINOR
#define DRIVER_RELEASE
#define DRIVER_REVISION

#define DRIVER_NAME
#define DRIVER_NAME_SHORT

#define PQI_EXTRA_SGL_MEMORY

#define PQI_POST_RESET_DELAY_SECS
#define PQI_POST_OFA_RESET_DELAY_UPON_TIMEOUT_SECS

#define PQI_NO_COMPLETION

MODULE_AUTHOR();
MODULE_DESCRIPTION();
MODULE_VERSION();
MODULE_LICENSE();

struct pqi_cmd_priv {};

static struct pqi_cmd_priv *pqi_cmd_priv(struct scsi_cmnd *cmd)
{}

static void pqi_verify_structures(void);
static void pqi_take_ctrl_offline(struct pqi_ctrl_info *ctrl_info,
	enum pqi_ctrl_shutdown_reason ctrl_shutdown_reason);
static void pqi_ctrl_offline_worker(struct work_struct *work);
static int pqi_scan_scsi_devices(struct pqi_ctrl_info *ctrl_info);
static void pqi_scan_start(struct Scsi_Host *shost);
static void pqi_start_io(struct pqi_ctrl_info *ctrl_info,
	struct pqi_queue_group *queue_group, enum pqi_io_path path,
	struct pqi_io_request *io_request);
static int pqi_submit_raid_request_synchronous(struct pqi_ctrl_info *ctrl_info,
	struct pqi_iu_header *request, unsigned int flags,
	struct pqi_raid_error_info *error_info);
static int pqi_aio_submit_io(struct pqi_ctrl_info *ctrl_info,
	struct scsi_cmnd *scmd, u32 aio_handle, u8 *cdb,
	unsigned int cdb_length, struct pqi_queue_group *queue_group,
	struct pqi_encryption_info *encryption_info, bool raid_bypass, bool io_high_prio);
static  int pqi_aio_submit_r1_write_io(struct pqi_ctrl_info *ctrl_info,
	struct scsi_cmnd *scmd, struct pqi_queue_group *queue_group,
	struct pqi_encryption_info *encryption_info, struct pqi_scsi_dev *device,
	struct pqi_scsi_dev_raid_map_data *rmd);
static int pqi_aio_submit_r56_write_io(struct pqi_ctrl_info *ctrl_info,
	struct scsi_cmnd *scmd, struct pqi_queue_group *queue_group,
	struct pqi_encryption_info *encryption_info, struct pqi_scsi_dev *device,
	struct pqi_scsi_dev_raid_map_data *rmd);
static void pqi_ofa_ctrl_quiesce(struct pqi_ctrl_info *ctrl_info);
static void pqi_ofa_ctrl_unquiesce(struct pqi_ctrl_info *ctrl_info);
static int pqi_ofa_ctrl_restart(struct pqi_ctrl_info *ctrl_info, unsigned int delay_secs);
static void pqi_ofa_setup_host_buffer(struct pqi_ctrl_info *ctrl_info);
static void pqi_ofa_free_host_buffer(struct pqi_ctrl_info *ctrl_info);
static int pqi_ofa_host_memory_update(struct pqi_ctrl_info *ctrl_info);
static int pqi_device_wait_for_pending_io(struct pqi_ctrl_info *ctrl_info,
	struct pqi_scsi_dev *device, u8 lun, unsigned long timeout_msecs);
static void pqi_fail_all_outstanding_requests(struct pqi_ctrl_info *ctrl_info);
static void pqi_tmf_worker(struct work_struct *work);

/* for flags argument to pqi_submit_raid_request_synchronous() */
#define PQI_SYNC_FLAGS_INTERRUPTABLE

static struct scsi_transport_template *pqi_sas_transport_template;

static atomic_t pqi_controller_count =;

enum pqi_lockup_action {};

static enum pqi_lockup_action pqi_lockup_action =;

static struct {} pqi_lockup_actions[] =;

static unsigned int pqi_supported_event_types[] =;

static int pqi_disable_device_id_wildcards;
module_param_named(disable_device_id_wildcards,
	pqi_disable_device_id_wildcards, int, 0644);
MODULE_PARM_DESC();

static int pqi_disable_heartbeat;
module_param_named(disable_heartbeat,
	pqi_disable_heartbeat, int, 0644);
MODULE_PARM_DESC();

static int pqi_disable_ctrl_shutdown;
module_param_named(disable_ctrl_shutdown,
	pqi_disable_ctrl_shutdown, int, 0644);
MODULE_PARM_DESC();

static char *pqi_lockup_action_param;
module_param_named(lockup_action,
	pqi_lockup_action_param, charp, 0644);
MODULE_PARM_DESC();

static int pqi_expose_ld_first;
module_param_named(expose_ld_first,
	pqi_expose_ld_first, int, 0644);
MODULE_PARM_DESC();

static int pqi_hide_vsep;
module_param_named(hide_vsep,
	pqi_hide_vsep, int, 0644);
MODULE_PARM_DESC();

static int pqi_disable_managed_interrupts;
module_param_named(disable_managed_interrupts,
	pqi_disable_managed_interrupts, int, 0644);
MODULE_PARM_DESC();

static unsigned int pqi_ctrl_ready_timeout_secs;
module_param_named(ctrl_ready_timeout,
	pqi_ctrl_ready_timeout_secs, uint, 0644);
MODULE_PARM_DESC();

static char *raid_levels[] =;

static char *pqi_raid_level_to_string(u8 raid_level)
{}

#define SA_RAID_0
#define SA_RAID_4
#define SA_RAID_1
#define SA_RAID_5
#define SA_RAID_51
#define SA_RAID_6
#define SA_RAID_TRIPLE
#define SA_RAID_MAX
#define SA_RAID_UNKNOWN

static inline void pqi_scsi_done(struct scsi_cmnd *scmd)
{}

static inline void pqi_disable_write_same(struct scsi_device *sdev)
{}

static inline bool pqi_scsi3addr_equal(u8 *scsi3addr1, u8 *scsi3addr2)
{}

static inline bool pqi_is_logical_device(struct pqi_scsi_dev *device)
{}

static inline bool pqi_is_external_raid_addr(u8 *scsi3addr)
{}

static inline bool pqi_ctrl_offline(struct pqi_ctrl_info *ctrl_info)
{}

static inline void pqi_check_ctrl_health(struct pqi_ctrl_info *ctrl_info)
{}

static inline bool pqi_is_hba_lunid(u8 *scsi3addr)
{}

#define PQI_DRIVER_SCRATCH_PQI_MODE
#define PQI_DRIVER_SCRATCH_FW_TRIAGE_SUPPORTED

static inline enum pqi_ctrl_mode pqi_get_ctrl_mode(struct pqi_ctrl_info *ctrl_info)
{}

static inline void pqi_save_ctrl_mode(struct pqi_ctrl_info *ctrl_info,
	enum pqi_ctrl_mode mode)
{}

static inline bool pqi_is_fw_triage_supported(struct pqi_ctrl_info *ctrl_info)
{}

static inline void pqi_save_fw_triage_setting(struct pqi_ctrl_info *ctrl_info, bool is_supported)
{}

static inline void pqi_ctrl_block_scan(struct pqi_ctrl_info *ctrl_info)
{}

static inline void pqi_ctrl_unblock_scan(struct pqi_ctrl_info *ctrl_info)
{}

static inline bool pqi_ctrl_scan_blocked(struct pqi_ctrl_info *ctrl_info)
{}

static inline void pqi_ctrl_block_device_reset(struct pqi_ctrl_info *ctrl_info)
{}

static inline void pqi_ctrl_unblock_device_reset(struct pqi_ctrl_info *ctrl_info)
{}

static inline void pqi_scsi_block_requests(struct pqi_ctrl_info *ctrl_info)
{}

static inline void pqi_scsi_unblock_requests(struct pqi_ctrl_info *ctrl_info)
{}

static inline void pqi_ctrl_busy(struct pqi_ctrl_info *ctrl_info)
{}

static inline void pqi_ctrl_unbusy(struct pqi_ctrl_info *ctrl_info)
{}

static inline bool pqi_ctrl_blocked(struct pqi_ctrl_info *ctrl_info)
{}

static inline void pqi_ctrl_block_requests(struct pqi_ctrl_info *ctrl_info)
{}

static inline void pqi_ctrl_unblock_requests(struct pqi_ctrl_info *ctrl_info)
{}

static void pqi_wait_if_ctrl_blocked(struct pqi_ctrl_info *ctrl_info)
{}

#define PQI_QUIESCE_WARNING_TIMEOUT_SECS

static inline void pqi_ctrl_wait_until_quiesced(struct pqi_ctrl_info *ctrl_info)
{}

static inline bool pqi_device_offline(struct pqi_scsi_dev *device)
{}

static inline void pqi_ctrl_ofa_start(struct pqi_ctrl_info *ctrl_info)
{}

static inline void pqi_ctrl_ofa_done(struct pqi_ctrl_info *ctrl_info)
{}

static inline void pqi_wait_until_ofa_finished(struct pqi_ctrl_info *ctrl_info)
{}

static inline bool pqi_ofa_in_progress(struct pqi_ctrl_info *ctrl_info)
{}

static inline void pqi_device_remove_start(struct pqi_scsi_dev *device)
{}

static inline bool pqi_device_in_remove(struct pqi_scsi_dev *device)
{}

static inline void pqi_device_reset_start(struct pqi_scsi_dev *device, u8 lun)
{}

static inline void pqi_device_reset_done(struct pqi_scsi_dev *device, u8 lun)
{}

static inline bool pqi_device_in_reset(struct pqi_scsi_dev *device, u8 lun)
{}

static inline int pqi_event_type_to_event_index(unsigned int event_type)
{}

static inline bool pqi_is_supported_event(unsigned int event_type)
{}

static inline void pqi_schedule_rescan_worker_with_delay(struct pqi_ctrl_info *ctrl_info,
	unsigned long delay)
{}

static inline void pqi_schedule_rescan_worker(struct pqi_ctrl_info *ctrl_info)
{}

#define PQI_RESCAN_WORK_DELAY

static inline void pqi_schedule_rescan_worker_delayed(struct pqi_ctrl_info *ctrl_info)
{}

static inline void pqi_cancel_rescan_worker(struct pqi_ctrl_info *ctrl_info)
{}

static inline u32 pqi_read_heartbeat_counter(struct pqi_ctrl_info *ctrl_info)
{}

static inline u8 pqi_read_soft_reset_status(struct pqi_ctrl_info *ctrl_info)
{}

static inline void pqi_clear_soft_reset_status(struct pqi_ctrl_info *ctrl_info)
{}

static inline bool pqi_is_io_high_priority(struct pqi_scsi_dev *device, struct scsi_cmnd *scmd)
{}

static int pqi_map_single(struct pci_dev *pci_dev,
	struct pqi_sg_descriptor *sg_descriptor, void *buffer,
	size_t buffer_length, enum dma_data_direction data_direction)
{}

static void pqi_pci_unmap(struct pci_dev *pci_dev,
	struct pqi_sg_descriptor *descriptors, int num_descriptors,
	enum dma_data_direction data_direction)
{}

static int pqi_build_raid_path_request(struct pqi_ctrl_info *ctrl_info,
	struct pqi_raid_path_request *request, u8 cmd,
	u8 *scsi3addr, void *buffer, size_t buffer_length,
	u16 vpd_page, enum dma_data_direction *dir)
{}

static inline void pqi_reinit_io_request(struct pqi_io_request *io_request)
{}

static inline struct pqi_io_request *pqi_alloc_io_request(struct pqi_ctrl_info *ctrl_info, struct scsi_cmnd *scmd)
{}

static void pqi_free_io_request(struct pqi_io_request *io_request)
{}

static int pqi_send_scsi_raid_request(struct pqi_ctrl_info *ctrl_info, u8 cmd,
	u8 *scsi3addr, void *buffer, size_t buffer_length, u16 vpd_page,
	struct pqi_raid_error_info *error_info)
{}

/* helper functions for pqi_send_scsi_raid_request */

static inline int pqi_send_ctrl_raid_request(struct pqi_ctrl_info *ctrl_info,
	u8 cmd, void *buffer, size_t buffer_length)
{}

static inline int pqi_send_ctrl_raid_with_error(struct pqi_ctrl_info *ctrl_info,
	u8 cmd, void *buffer, size_t buffer_length,
	struct pqi_raid_error_info *error_info)
{}

static inline int pqi_identify_controller(struct pqi_ctrl_info *ctrl_info,
	struct bmic_identify_controller *buffer)
{}

static inline int pqi_sense_subsystem_info(struct  pqi_ctrl_info *ctrl_info,
	struct bmic_sense_subsystem_info *sense_info)
{}

static inline int pqi_scsi_inquiry(struct pqi_ctrl_info *ctrl_info,
	u8 *scsi3addr, u16 vpd_page, void *buffer, size_t buffer_length)
{}

static int pqi_identify_physical_device(struct pqi_ctrl_info *ctrl_info,
	struct pqi_scsi_dev *device,
	struct bmic_identify_physical_device *buffer, size_t buffer_length)
{}

static inline u32 pqi_aio_limit_to_bytes(__le16 *limit)
{}

#pragma pack(1)

struct bmic_sense_feature_buffer {};

#pragma pack()

#define MINIMUM_AIO_SUBPAGE_BUFFER_LENGTH

#define MINIMUM_AIO_SUBPAGE_LENGTH

static int pqi_get_advanced_raid_bypass_config(struct pqi_ctrl_info *ctrl_info)
{}

static int pqi_flush_cache(struct pqi_ctrl_info *ctrl_info,
	enum bmic_flush_cache_shutdown_event shutdown_event)
{}

int pqi_csmi_smp_passthru(struct pqi_ctrl_info *ctrl_info,
	struct bmic_csmi_smp_passthru_buffer *buffer, size_t buffer_length,
	struct pqi_raid_error_info *error_info)
{}

#define PQI_FETCH_PTRAID_DATA

static int pqi_set_diag_rescan(struct pqi_ctrl_info *ctrl_info)
{}

static inline int pqi_write_host_wellness(struct pqi_ctrl_info *ctrl_info,
	void *buffer, size_t buffer_length)
{}

#pragma pack(1)

struct bmic_host_wellness_driver_version {};

#pragma pack()

static int pqi_write_driver_version_to_host_wellness(
	struct pqi_ctrl_info *ctrl_info)
{}

#pragma pack(1)

struct bmic_host_wellness_time {};

#pragma pack()

static int pqi_write_current_time_to_host_wellness(
	struct pqi_ctrl_info *ctrl_info)
{}

#define PQI_UPDATE_TIME_WORK_INTERVAL

static void pqi_update_time_worker(struct work_struct *work)
{}

static inline void pqi_schedule_update_time_worker(struct pqi_ctrl_info *ctrl_info)
{}

static inline void pqi_cancel_update_time_worker(struct pqi_ctrl_info *ctrl_info)
{}

static inline int pqi_report_luns(struct pqi_ctrl_info *ctrl_info, u8 cmd, void *buffer,
	size_t buffer_length)
{}

static int pqi_report_phys_logical_luns(struct pqi_ctrl_info *ctrl_info, u8 cmd, void **buffer)
{}

static inline int pqi_report_phys_luns(struct pqi_ctrl_info *ctrl_info, void **buffer)
{}

static inline int pqi_report_logical_luns(struct pqi_ctrl_info *ctrl_info, void **buffer)
{}

static int pqi_get_device_lists(struct pqi_ctrl_info *ctrl_info,
	struct report_phys_lun_16byte_wwid_list **physdev_list,
	struct report_log_lun_list **logdev_list)
{}

static inline void pqi_set_bus_target_lun(struct pqi_scsi_dev *device,
	int bus, int target, int lun)
{}

static void pqi_assign_bus_target_lun(struct pqi_scsi_dev *device)
{}

static void pqi_get_raid_level(struct pqi_ctrl_info *ctrl_info,
	struct pqi_scsi_dev *device)
{}

static int pqi_validate_raid_map(struct pqi_ctrl_info *ctrl_info,
	struct pqi_scsi_dev *device, struct raid_map *raid_map)
{}

static int pqi_get_raid_map(struct pqi_ctrl_info *ctrl_info,
	struct pqi_scsi_dev *device)
{}

static void pqi_set_max_transfer_encrypted(struct pqi_ctrl_info *ctrl_info,
	struct pqi_scsi_dev *device)
{}

static void pqi_get_raid_bypass_status(struct pqi_ctrl_info *ctrl_info,
	struct pqi_scsi_dev *device)
{}

/*
 * Use vendor-specific VPD to determine online/offline status of a volume.
 */

static void pqi_get_volume_status(struct pqi_ctrl_info *ctrl_info,
	struct pqi_scsi_dev *device)
{}

#define PQI_DEVICE_NCQ_PRIO_SUPPORTED
#define PQI_DEVICE_PHY_MAP_SUPPORTED
#define PQI_DEVICE_ERASE_IN_PROGRESS

static int pqi_get_physical_device_info(struct pqi_ctrl_info *ctrl_info,
	struct pqi_scsi_dev *device,
	struct bmic_identify_physical_device *id_phys)
{}

static int pqi_get_logical_device_info(struct pqi_ctrl_info *ctrl_info,
	struct pqi_scsi_dev *device)
{}

/*
 * Prevent adding drive to OS for some corner cases such as a drive
 * undergoing a sanitize (erase) operation. Some OSes will continue to poll
 * the drive until the sanitize completes, which can take hours,
 * resulting in long bootup delays. Commands such as TUR, READ_CAP
 * are allowed, but READ/WRITE cause check condition. So the OS
 * cannot check/read the partition table.
 * Note: devices that have completed sanitize must be re-enabled
 *       using the management utility.
 */
static inline bool pqi_keep_device_offline(struct pqi_scsi_dev *device)
{}

static int pqi_get_device_info_phys_logical(struct pqi_ctrl_info *ctrl_info,
	struct pqi_scsi_dev *device,
	struct bmic_identify_physical_device *id_phys)
{}

static int pqi_get_device_info(struct pqi_ctrl_info *ctrl_info,
	struct pqi_scsi_dev *device,
	struct bmic_identify_physical_device *id_phys)
{}

static void pqi_show_volume_status(struct pqi_ctrl_info *ctrl_info,
	struct pqi_scsi_dev *device)
{}

static void pqi_rescan_worker(struct work_struct *work)
{}

static int pqi_add_device(struct pqi_ctrl_info *ctrl_info,
	struct pqi_scsi_dev *device)
{}

#define PQI_REMOVE_DEVICE_PENDING_IO_TIMEOUT_MSECS

static inline void pqi_remove_device(struct pqi_ctrl_info *ctrl_info, struct pqi_scsi_dev *device)
{}

/* Assumes the SCSI device list lock is held. */

static struct pqi_scsi_dev *pqi_find_scsi_dev(struct pqi_ctrl_info *ctrl_info,
	int bus, int target, int lun)
{}

static inline bool pqi_device_equal(struct pqi_scsi_dev *dev1, struct pqi_scsi_dev *dev2)
{}

enum pqi_find_result {};

static enum pqi_find_result pqi_scsi_find_entry(struct pqi_ctrl_info *ctrl_info,
	struct pqi_scsi_dev *device_to_find, struct pqi_scsi_dev **matching_device)
{}

static inline const char *pqi_device_type(struct pqi_scsi_dev *device)
{}

#define PQI_DEV_INFO_BUFFER_LENGTH

static void pqi_dev_info(struct pqi_ctrl_info *ctrl_info,
	char *action, struct pqi_scsi_dev *device)
{}

static bool pqi_raid_maps_equal(struct raid_map *raid_map1, struct raid_map *raid_map2)
{}

/* Assumes the SCSI device list lock is held. */

static void pqi_scsi_update_device(struct pqi_ctrl_info *ctrl_info,
	struct pqi_scsi_dev *existing_device, struct pqi_scsi_dev *new_device)
{}

static inline void pqi_free_device(struct pqi_scsi_dev *device)
{}

/*
 * Called when exposing a new device to the OS fails in order to re-adjust
 * our internal SCSI device list to match the SCSI ML's view.
 */

static inline void pqi_fixup_botched_add(struct pqi_ctrl_info *ctrl_info,
	struct pqi_scsi_dev *device)
{}

static inline bool pqi_is_device_added(struct pqi_scsi_dev *device)
{}

static inline void pqi_init_device_tmf_work(struct pqi_scsi_dev *device)
{}

static inline bool pqi_volume_rescan_needed(struct pqi_scsi_dev *device)
{}

static void pqi_update_device_list(struct pqi_ctrl_info *ctrl_info,
	struct pqi_scsi_dev *new_device_list[], unsigned int num_new_devices)
{}

static inline bool pqi_is_supported_device(struct pqi_scsi_dev *device)
{}

static inline bool pqi_skip_device(u8 *scsi3addr)
{}

static inline void pqi_mask_device(u8 *scsi3addr)
{}

static inline bool pqi_is_multipath_device(struct pqi_scsi_dev *device)
{}

static inline bool pqi_expose_device(struct pqi_scsi_dev *device)
{}

static int pqi_update_scsi_devices(struct pqi_ctrl_info *ctrl_info)
{}

static int pqi_scan_scsi_devices(struct pqi_ctrl_info *ctrl_info)
{}

static void pqi_scan_start(struct Scsi_Host *shost)
{}

/* Returns TRUE if scan is finished. */

static int pqi_scan_finished(struct Scsi_Host *shost,
	unsigned long elapsed_time)
{}

static inline void pqi_set_encryption_info(struct pqi_encryption_info *encryption_info,
	struct raid_map *raid_map, u64 first_block)
{}

/*
 * Attempt to perform RAID bypass mapping for a logical volume I/O.
 */

static bool pqi_aio_raid_level_supported(struct pqi_ctrl_info *ctrl_info,
	struct pqi_scsi_dev_raid_map_data *rmd)
{}

#define PQI_RAID_BYPASS_INELIGIBLE

static int pqi_get_aio_lba_and_block_count(struct scsi_cmnd *scmd,
	struct pqi_scsi_dev_raid_map_data *rmd)
{}

static int pci_get_aio_common_raid_map_values(struct pqi_ctrl_info *ctrl_info,
	struct pqi_scsi_dev_raid_map_data *rmd, struct raid_map *raid_map)
{}

static int pqi_calc_aio_r5_or_r6(struct pqi_scsi_dev_raid_map_data *rmd,
	struct raid_map *raid_map)
{}

static void pqi_set_aio_cdb(struct pqi_scsi_dev_raid_map_data *rmd)
{}

static void pqi_calc_aio_r1_nexus(struct raid_map *raid_map,
	struct pqi_scsi_dev_raid_map_data *rmd)
{}

static int pqi_raid_bypass_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info,
	struct pqi_scsi_dev *device, struct scsi_cmnd *scmd,
	struct pqi_queue_group *queue_group)
{}

#define PQI_STATUS_IDLE

#define PQI_CREATE_ADMIN_QUEUE_PAIR
#define PQI_DELETE_ADMIN_QUEUE_PAIR

#define PQI_DEVICE_STATE_POWER_ON_AND_RESET
#define PQI_DEVICE_STATE_STATUS_AVAILABLE
#define PQI_DEVICE_STATE_ALL_REGISTERS_READY
#define PQI_DEVICE_STATE_ADMIN_QUEUE_PAIR_READY
#define PQI_DEVICE_STATE_ERROR

#define PQI_MODE_READY_TIMEOUT_SECS
#define PQI_MODE_READY_POLL_INTERVAL_MSECS

static int pqi_wait_for_pqi_mode_ready(struct pqi_ctrl_info *ctrl_info)
{}

static inline void pqi_aio_path_disabled(struct pqi_io_request *io_request)
{}

static inline void pqi_take_device_offline(struct scsi_device *sdev, char *path)
{}

static void pqi_process_raid_io_error(struct pqi_io_request *io_request)
{}

static void pqi_process_aio_io_error(struct pqi_io_request *io_request)
{}

static void pqi_process_io_error(unsigned int iu_type,
	struct pqi_io_request *io_request)
{}

static int pqi_interpret_task_management_response(struct pqi_ctrl_info *ctrl_info,
	struct pqi_task_management_response *response)
{}

static inline void pqi_invalid_response(struct pqi_ctrl_info *ctrl_info,
	enum pqi_ctrl_shutdown_reason ctrl_shutdown_reason)
{}

static int pqi_process_io_intr(struct pqi_ctrl_info *ctrl_info, struct pqi_queue_group *queue_group)
{}

static inline unsigned int pqi_num_elements_free(unsigned int pi,
	unsigned int ci, unsigned int elements_in_queue)
{}

static void pqi_send_event_ack(struct pqi_ctrl_info *ctrl_info,
	struct pqi_event_acknowledge_request *iu, size_t iu_length)
{}

static void pqi_acknowledge_event(struct pqi_ctrl_info *ctrl_info,
	struct pqi_event *event)
{}

#define PQI_SOFT_RESET_STATUS_TIMEOUT_SECS
#define PQI_SOFT_RESET_STATUS_POLL_INTERVAL_SECS

static enum pqi_soft_reset_status pqi_poll_for_soft_reset_status(
	struct pqi_ctrl_info *ctrl_info)
{}

static void pqi_process_soft_reset(struct pqi_ctrl_info *ctrl_info)
{}

static void pqi_ofa_memory_alloc_worker(struct work_struct *work)
{}

static void pqi_ofa_quiesce_worker(struct work_struct *work)
{}

static bool pqi_ofa_process_event(struct pqi_ctrl_info *ctrl_info,
	struct pqi_event *event)
{}

static void pqi_mark_volumes_for_rescan(struct pqi_ctrl_info *ctrl_info)
{}

static void pqi_disable_raid_bypass(struct pqi_ctrl_info *ctrl_info)
{}

static void pqi_event_worker(struct work_struct *work)
{}

#define PQI_HEARTBEAT_TIMER_INTERVAL

static void pqi_heartbeat_timer_handler(struct timer_list *t)
{}

static void pqi_start_heartbeat_timer(struct pqi_ctrl_info *ctrl_info)
{}

static inline void pqi_stop_heartbeat_timer(struct pqi_ctrl_info *ctrl_info)
{}

static void pqi_ofa_capture_event_payload(struct pqi_ctrl_info *ctrl_info,
	struct pqi_event *event, struct pqi_event_response *response)
{}

static int pqi_process_event_intr(struct pqi_ctrl_info *ctrl_info)
{}

#define PQI_LEGACY_INTX_MASK

static inline void pqi_configure_legacy_intx(struct pqi_ctrl_info *ctrl_info, bool enable_intx)
{}

static void pqi_change_irq_mode(struct pqi_ctrl_info *ctrl_info,
	enum pqi_irq_mode new_mode)
{}

#define PQI_LEGACY_INTX_PENDING

static inline bool pqi_is_valid_irq(struct pqi_ctrl_info *ctrl_info)
{}

static irqreturn_t pqi_irq_handler(int irq, void *data)
{}

static int pqi_request_irqs(struct pqi_ctrl_info *ctrl_info)
{}

static void pqi_free_irqs(struct pqi_ctrl_info *ctrl_info)
{}

static int pqi_enable_msix_interrupts(struct pqi_ctrl_info *ctrl_info)
{}

static void pqi_disable_msix_interrupts(struct pqi_ctrl_info *ctrl_info)
{}

static int pqi_alloc_operational_queues(struct pqi_ctrl_info *ctrl_info)
{}

static void pqi_init_operational_queues(struct pqi_ctrl_info *ctrl_info)
{}

static int pqi_alloc_admin_queues(struct pqi_ctrl_info *ctrl_info)
{}

#define PQI_ADMIN_QUEUE_CREATE_TIMEOUT_JIFFIES
#define PQI_ADMIN_QUEUE_CREATE_POLL_INTERVAL_MSECS

static int pqi_create_admin_queues(struct pqi_ctrl_info *ctrl_info)
{}

static void pqi_submit_admin_request(struct pqi_ctrl_info *ctrl_info,
	struct pqi_general_admin_request *request)
{}

#define PQI_ADMIN_REQUEST_TIMEOUT_SECS

static int pqi_poll_for_admin_response(struct pqi_ctrl_info *ctrl_info,
	struct pqi_general_admin_response *response)
{}

static void pqi_start_io(struct pqi_ctrl_info *ctrl_info,
	struct pqi_queue_group *queue_group, enum pqi_io_path path,
	struct pqi_io_request *io_request)
{}

#define PQI_WAIT_FOR_COMPLETION_IO_TIMEOUT_SECS

static int pqi_wait_for_completion_io(struct pqi_ctrl_info *ctrl_info,
	struct completion *wait)
{}

static void pqi_raid_synchronous_complete(struct pqi_io_request *io_request,
	void *context)
{}

static int pqi_process_raid_io_error_synchronous(
	struct pqi_raid_error_info *error_info)
{}

static inline bool pqi_is_blockable_request(struct pqi_iu_header *request)
{}

static int pqi_submit_raid_request_synchronous(struct pqi_ctrl_info *ctrl_info,
	struct pqi_iu_header *request, unsigned int flags,
	struct pqi_raid_error_info *error_info)
{}

static int pqi_validate_admin_response(
	struct pqi_general_admin_response *response, u8 expected_function_code)
{}

static int pqi_submit_admin_request_synchronous(
	struct pqi_ctrl_info *ctrl_info,
	struct pqi_general_admin_request *request,
	struct pqi_general_admin_response *response)
{}

static int pqi_report_device_capability(struct pqi_ctrl_info *ctrl_info)
{}

static int pqi_validate_device_capability(struct pqi_ctrl_info *ctrl_info)
{}

static int pqi_create_event_queue(struct pqi_ctrl_info *ctrl_info)
{}

static int pqi_create_queue_group(struct pqi_ctrl_info *ctrl_info,
	unsigned int group_number)
{}

static int pqi_create_queues(struct pqi_ctrl_info *ctrl_info)
{}

#define PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH

static int pqi_configure_events(struct pqi_ctrl_info *ctrl_info,
	bool enable_events)
{}

static inline int pqi_enable_events(struct pqi_ctrl_info *ctrl_info)
{}

static void pqi_free_all_io_requests(struct pqi_ctrl_info *ctrl_info)
{}

static inline int pqi_alloc_error_buffer(struct pqi_ctrl_info *ctrl_info)
{}

static int pqi_alloc_io_resources(struct pqi_ctrl_info *ctrl_info)
{}

/*
 * Calculate required resources that are sized based on max. outstanding
 * requests and max. transfer size.
 */

static void pqi_calculate_io_resources(struct pqi_ctrl_info *ctrl_info)
{}

static void pqi_calculate_queue_resources(struct pqi_ctrl_info *ctrl_info)
{}

static inline void pqi_set_sg_descriptor(struct pqi_sg_descriptor *sg_descriptor,
	struct scatterlist *sg)
{}

static unsigned int pqi_build_sg_list(struct pqi_sg_descriptor *sg_descriptor,
	struct scatterlist *sg, int sg_count, struct pqi_io_request *io_request,
	int max_sg_per_iu, bool *chained)
{}

static int pqi_build_raid_sg_list(struct pqi_ctrl_info *ctrl_info,
	struct pqi_raid_path_request *request, struct scsi_cmnd *scmd,
	struct pqi_io_request *io_request)
{}

static int pqi_build_aio_r1_sg_list(struct pqi_ctrl_info *ctrl_info,
	struct pqi_aio_r1_path_request *request, struct scsi_cmnd *scmd,
	struct pqi_io_request *io_request)
{}

static int pqi_build_aio_r56_sg_list(struct pqi_ctrl_info *ctrl_info,
	struct pqi_aio_r56_path_request *request, struct scsi_cmnd *scmd,
	struct pqi_io_request *io_request)
{}

static int pqi_build_aio_sg_list(struct pqi_ctrl_info *ctrl_info,
	struct pqi_aio_path_request *request, struct scsi_cmnd *scmd,
	struct pqi_io_request *io_request)
{}

static void pqi_raid_io_complete(struct pqi_io_request *io_request,
	void *context)
{}

static int pqi_raid_submit_io(struct pqi_ctrl_info *ctrl_info,
	struct pqi_scsi_dev *device, struct scsi_cmnd *scmd,
	struct pqi_queue_group *queue_group, bool io_high_prio)
{}

static inline int pqi_raid_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info,
	struct pqi_scsi_dev *device, struct scsi_cmnd *scmd,
	struct pqi_queue_group *queue_group)
{}

static bool pqi_raid_bypass_retry_needed(struct pqi_io_request *io_request)
{}

static void pqi_aio_io_complete(struct pqi_io_request *io_request,
	void *context)
{}

static inline int pqi_aio_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info,
	struct pqi_scsi_dev *device, struct scsi_cmnd *scmd,
	struct pqi_queue_group *queue_group)
{}

static int pqi_aio_submit_io(struct pqi_ctrl_info *ctrl_info,
	struct scsi_cmnd *scmd, u32 aio_handle, u8 *cdb,
	unsigned int cdb_length, struct pqi_queue_group *queue_group,
	struct pqi_encryption_info *encryption_info, bool raid_bypass,
	bool io_high_prio)
{}

static  int pqi_aio_submit_r1_write_io(struct pqi_ctrl_info *ctrl_info,
	struct scsi_cmnd *scmd, struct pqi_queue_group *queue_group,
	struct pqi_encryption_info *encryption_info, struct pqi_scsi_dev *device,
	struct pqi_scsi_dev_raid_map_data *rmd)
{}

static int pqi_aio_submit_r56_write_io(struct pqi_ctrl_info *ctrl_info,
	struct scsi_cmnd *scmd, struct pqi_queue_group *queue_group,
	struct pqi_encryption_info *encryption_info, struct pqi_scsi_dev *device,
	struct pqi_scsi_dev_raid_map_data *rmd)
{}

static inline u16 pqi_get_hw_queue(struct pqi_ctrl_info *ctrl_info,
	struct scsi_cmnd *scmd)
{}

static inline bool pqi_is_bypass_eligible_request(struct scsi_cmnd *scmd)
{}

/*
 * This function gets called just before we hand the completed SCSI request
 * back to the SML.
 */

void pqi_prep_for_scsi_done(struct scsi_cmnd *scmd)
{}

static bool pqi_is_parity_write_stream(struct pqi_ctrl_info *ctrl_info,
	struct scsi_cmnd *scmd)
{}

static int pqi_scsi_queue_command(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
{}

static unsigned int pqi_queued_io_count(struct pqi_ctrl_info *ctrl_info)
{}

static unsigned int pqi_nonempty_inbound_queue_count(struct pqi_ctrl_info *ctrl_info)
{}

#define PQI_INBOUND_QUEUES_NONEMPTY_WARNING_TIMEOUT_SECS

static int pqi_wait_until_inbound_queues_empty(struct pqi_ctrl_info *ctrl_info)
{}

static void pqi_fail_io_queued_for_device(struct pqi_ctrl_info *ctrl_info,
	struct pqi_scsi_dev *device, u8 lun)
{}

#define PQI_PENDING_IO_WARNING_TIMEOUT_SECS

static int pqi_device_wait_for_pending_io(struct pqi_ctrl_info *ctrl_info,
	struct pqi_scsi_dev *device, u8 lun, unsigned long timeout_msecs)
{}

static void pqi_lun_reset_complete(struct pqi_io_request *io_request,
	void *context)
{}

#define PQI_LUN_RESET_POLL_COMPLETION_SECS

static int pqi_wait_for_lun_reset_completion(struct pqi_ctrl_info *ctrl_info,
	struct pqi_scsi_dev *device, u8 lun, struct completion *wait)
{}

#define PQI_LUN_RESET_FIRMWARE_TIMEOUT_SECS

static int pqi_lun_reset(struct pqi_ctrl_info *ctrl_info, struct pqi_scsi_dev *device, u8 lun)
{}

#define PQI_LUN_RESET_RETRIES
#define PQI_LUN_RESET_RETRY_INTERVAL_MSECS
#define PQI_LUN_RESET_PENDING_IO_TIMEOUT_MSECS
#define PQI_LUN_RESET_FAILED_PENDING_IO_TIMEOUT_MSECS

static int pqi_lun_reset_with_retries(struct pqi_ctrl_info *ctrl_info, struct pqi_scsi_dev *device, u8 lun)
{}

static int pqi_device_reset(struct pqi_ctrl_info *ctrl_info, struct pqi_scsi_dev *device, u8 lun)
{}

static int pqi_device_reset_handler(struct pqi_ctrl_info *ctrl_info, struct pqi_scsi_dev *device, u8 lun, struct scsi_cmnd *scmd, u8 scsi_opcode)
{}

static int pqi_eh_device_reset_handler(struct scsi_cmnd *scmd)
{}

static void pqi_tmf_worker(struct work_struct *work)
{}

static int pqi_eh_abort_handler(struct scsi_cmnd *scmd)
{}

static int pqi_slave_alloc(struct scsi_device *sdev)
{}

static void pqi_map_queues(struct Scsi_Host *shost)
{}

static inline bool pqi_is_tape_changer_device(struct pqi_scsi_dev *device)
{}

static int pqi_slave_configure(struct scsi_device *sdev)
{}

static void pqi_slave_destroy(struct scsi_device *sdev)
{}

static int pqi_getpciinfo_ioctl(struct pqi_ctrl_info *ctrl_info, void __user *arg)
{}

static int pqi_getdrivver_ioctl(void __user *arg)
{}

struct ciss_error_info {};

static void pqi_error_info_to_ciss(struct pqi_raid_error_info *pqi_error_info,
	struct ciss_error_info *ciss_error_info)
{}

static int pqi_passthru_ioctl(struct pqi_ctrl_info *ctrl_info, void __user *arg)
{}

static int pqi_ioctl(struct scsi_device *sdev, unsigned int cmd,
		     void __user *arg)
{}

static ssize_t pqi_firmware_version_show(struct device *dev,
	struct device_attribute *attr, char *buffer)
{}

static ssize_t pqi_serial_number_show(struct device *dev,
	struct device_attribute *attr, char *buffer)
{}

static ssize_t pqi_model_show(struct device *dev,
	struct device_attribute *attr, char *buffer)
{}

static ssize_t pqi_vendor_show(struct device *dev,
	struct device_attribute *attr, char *buffer)
{}

static ssize_t pqi_host_rescan_store(struct device *dev,
	struct device_attribute *attr, const char *buffer, size_t count)
{}

static ssize_t pqi_lockup_action_show(struct device *dev,
	struct device_attribute *attr, char *buffer)
{}

static ssize_t pqi_lockup_action_store(struct device *dev,
	struct device_attribute *attr, const char *buffer, size_t count)
{}

static ssize_t pqi_host_enable_stream_detection_show(struct device *dev,
	struct device_attribute *attr, char *buffer)
{}

static ssize_t pqi_host_enable_stream_detection_store(struct device *dev,
	struct device_attribute *attr, const char *buffer, size_t count)
{}

static ssize_t pqi_host_enable_r5_writes_show(struct device *dev,
	struct device_attribute *attr, char *buffer)
{}

static ssize_t pqi_host_enable_r5_writes_store(struct device *dev,
	struct device_attribute *attr, const char *buffer, size_t count)
{}

static ssize_t pqi_host_enable_r6_writes_show(struct device *dev,
	struct device_attribute *attr, char *buffer)
{}

static ssize_t pqi_host_enable_r6_writes_store(struct device *dev,
	struct device_attribute *attr, const char *buffer, size_t count)
{}

static DEVICE_STRING_ATTR_RO(driver_version, 0444,
	DRIVER_VERSION BUILD_TIMESTAMP);
static DEVICE_ATTR(firmware_version, 0444, pqi_firmware_version_show, NULL);
static DEVICE_ATTR(model, 0444, pqi_model_show, NULL);
static DEVICE_ATTR(serial_number, 0444, pqi_serial_number_show, NULL);
static DEVICE_ATTR(vendor, 0444, pqi_vendor_show, NULL);
static DEVICE_ATTR(rescan, 0200, NULL, pqi_host_rescan_store);
static DEVICE_ATTR(lockup_action, 0644, pqi_lockup_action_show,
	pqi_lockup_action_store);
static DEVICE_ATTR(enable_stream_detection, 0644,
	pqi_host_enable_stream_detection_show,
	pqi_host_enable_stream_detection_store);
static DEVICE_ATTR(enable_r5_writes, 0644,
	pqi_host_enable_r5_writes_show, pqi_host_enable_r5_writes_store);
static DEVICE_ATTR(enable_r6_writes, 0644,
	pqi_host_enable_r6_writes_show, pqi_host_enable_r6_writes_store);

static struct attribute *pqi_shost_attrs[] =;

ATTRIBUTE_GROUPS();

static ssize_t pqi_unique_id_show(struct device *dev,
	struct device_attribute *attr, char *buffer)
{}

static ssize_t pqi_lunid_show(struct device *dev,
	struct device_attribute *attr, char *buffer)
{}

#define MAX_PATHS

static ssize_t pqi_path_info_show(struct device *dev,
	struct device_attribute *attr, char *buf)
{}

static ssize_t pqi_sas_address_show(struct device *dev,
	struct device_attribute *attr, char *buffer)
{}

static ssize_t pqi_ssd_smart_path_enabled_show(struct device *dev,
	struct device_attribute *attr, char *buffer)
{}

static ssize_t pqi_raid_level_show(struct device *dev,
	struct device_attribute *attr, char *buffer)
{}

static ssize_t pqi_raid_bypass_cnt_show(struct device *dev,
	struct device_attribute *attr, char *buffer)
{}

static ssize_t pqi_sas_ncq_prio_enable_show(struct device *dev,
		struct device_attribute *attr, char *buf)
{}

static ssize_t pqi_sas_ncq_prio_enable_store(struct device *dev,
			struct device_attribute *attr,
			const char *buf, size_t count)
{}

static ssize_t pqi_numa_node_show(struct device *dev,
	struct device_attribute *attr, char *buffer)
{}

static DEVICE_ATTR(lunid, 0444, pqi_lunid_show, NULL);
static DEVICE_ATTR(unique_id, 0444, pqi_unique_id_show, NULL);
static DEVICE_ATTR(path_info, 0444, pqi_path_info_show, NULL);
static DEVICE_ATTR(sas_address, 0444, pqi_sas_address_show, NULL);
static DEVICE_ATTR(ssd_smart_path_enabled, 0444, pqi_ssd_smart_path_enabled_show, NULL);
static DEVICE_ATTR(raid_level, 0444, pqi_raid_level_show, NULL);
static DEVICE_ATTR(raid_bypass_cnt, 0444, pqi_raid_bypass_cnt_show, NULL);
static DEVICE_ATTR(sas_ncq_prio_enable, 0644,
		pqi_sas_ncq_prio_enable_show, pqi_sas_ncq_prio_enable_store);
static DEVICE_ATTR(numa_node, 0444, pqi_numa_node_show, NULL);

static struct attribute *pqi_sdev_attrs[] =;

ATTRIBUTE_GROUPS();

static const struct scsi_host_template pqi_driver_template =;

static int pqi_register_scsi(struct pqi_ctrl_info *ctrl_info)
{}

static void pqi_unregister_scsi(struct pqi_ctrl_info *ctrl_info)
{}

static int pqi_wait_for_pqi_reset_completion(struct pqi_ctrl_info *ctrl_info)
{}

static int pqi_reset(struct pqi_ctrl_info *ctrl_info)
{}

static int pqi_get_ctrl_serial_number(struct pqi_ctrl_info *ctrl_info)
{}

static int pqi_get_ctrl_product_details(struct pqi_ctrl_info *ctrl_info)
{}

struct pqi_config_table_section_info {};

static inline bool pqi_is_firmware_feature_supported(
	struct pqi_config_table_firmware_features *firmware_features,
	unsigned int bit_position)
{}

static inline bool pqi_is_firmware_feature_enabled(
	struct pqi_config_table_firmware_features *firmware_features,
	void __iomem *firmware_features_iomem_addr,
	unsigned int bit_position)
{}

static inline void pqi_request_firmware_feature(
	struct pqi_config_table_firmware_features *firmware_features,
	unsigned int bit_position)
{}

static int pqi_config_table_update(struct pqi_ctrl_info *ctrl_info,
	u16 first_section, u16 last_section)
{}

static int pqi_enable_firmware_features(struct pqi_ctrl_info *ctrl_info,
	struct pqi_config_table_firmware_features *firmware_features,
	void __iomem *firmware_features_iomem_addr)
{}

struct pqi_firmware_feature {};

static void pqi_firmware_feature_status(struct pqi_ctrl_info *ctrl_info,
	struct pqi_firmware_feature *firmware_feature)
{}

static void pqi_ctrl_update_feature_flags(struct pqi_ctrl_info *ctrl_info,
	struct pqi_firmware_feature *firmware_feature)
{}

static inline void pqi_firmware_feature_update(struct pqi_ctrl_info *ctrl_info,
	struct pqi_firmware_feature *firmware_feature)
{}

static DEFINE_MUTEX(pqi_firmware_features_mutex);

static struct pqi_firmware_feature pqi_firmware_features[] =;

static void pqi_process_firmware_features(
	struct pqi_config_table_section_info *section_info)
{}

static void pqi_init_firmware_features(void)
{}

static void pqi_process_firmware_features_section(
	struct pqi_config_table_section_info *section_info)
{}

/*
 * Reset all controller settings that can be initialized during the processing
 * of the PQI Configuration Table.
 */

static void pqi_ctrl_reset_config(struct pqi_ctrl_info *ctrl_info)
{}

static int pqi_process_config_table(struct pqi_ctrl_info *ctrl_info)
{}

/* Switches the controller from PQI mode back into SIS mode. */

static int pqi_revert_to_sis_mode(struct pqi_ctrl_info *ctrl_info)
{}

/*
 * If the controller isn't already in SIS mode, this function forces it into
 * SIS mode.
 */

static int pqi_force_sis_mode(struct pqi_ctrl_info *ctrl_info)
{}

static void pqi_perform_lockup_action(void)
{}

static int pqi_ctrl_init(struct pqi_ctrl_info *ctrl_info)
{}

static void pqi_reinit_queues(struct pqi_ctrl_info *ctrl_info)
{}

static int pqi_ctrl_init_resume(struct pqi_ctrl_info *ctrl_info)
{}

static inline int pqi_set_pcie_completion_timeout(struct pci_dev *pci_dev, u16 timeout)
{}

static int pqi_pci_init(struct pqi_ctrl_info *ctrl_info)
{}

static void pqi_cleanup_pci_init(struct pqi_ctrl_info *ctrl_info)
{}

static struct pqi_ctrl_info *pqi_alloc_ctrl_info(int numa_node)
{}

static inline void pqi_free_ctrl_info(struct pqi_ctrl_info *ctrl_info)
{}

static void pqi_free_interrupts(struct pqi_ctrl_info *ctrl_info)
{}

static void pqi_free_ctrl_resources(struct pqi_ctrl_info *ctrl_info)
{}

static void pqi_remove_ctrl(struct pqi_ctrl_info *ctrl_info)
{}

static void pqi_ofa_ctrl_quiesce(struct pqi_ctrl_info *ctrl_info)
{}

static void pqi_ofa_ctrl_unquiesce(struct pqi_ctrl_info *ctrl_info)
{}

static int pqi_ofa_alloc_mem(struct pqi_ctrl_info *ctrl_info, u32 total_size, u32 chunk_size)
{}

static int pqi_ofa_alloc_host_buffer(struct pqi_ctrl_info *ctrl_info)
{}

static void pqi_ofa_setup_host_buffer(struct pqi_ctrl_info *ctrl_info)
{}

static void pqi_ofa_free_host_buffer(struct pqi_ctrl_info *ctrl_info)
{}

static int pqi_ofa_host_memory_update(struct pqi_ctrl_info *ctrl_info)
{}

static int pqi_ofa_ctrl_restart(struct pqi_ctrl_info *ctrl_info, unsigned int delay_secs)
{}

static struct pqi_raid_error_info pqi_ctrl_offline_raid_error_info =;

static void pqi_fail_all_outstanding_requests(struct pqi_ctrl_info *ctrl_info)
{}

static void pqi_take_ctrl_offline_deferred(struct pqi_ctrl_info *ctrl_info)
{}

static void pqi_ctrl_offline_worker(struct work_struct *work)
{}

static char *pqi_ctrl_shutdown_reason_to_string(enum pqi_ctrl_shutdown_reason ctrl_shutdown_reason)
{}

static void pqi_take_ctrl_offline(struct pqi_ctrl_info *ctrl_info,
	enum pqi_ctrl_shutdown_reason ctrl_shutdown_reason)
{}

static void pqi_print_ctrl_info(struct pci_dev *pci_dev,
	const struct pci_device_id *id)
{}

static int pqi_pci_probe(struct pci_dev *pci_dev,
	const struct pci_device_id *id)
{}

static void pqi_pci_remove(struct pci_dev *pci_dev)
{}

static void pqi_crash_if_pending_command(struct pqi_ctrl_info *ctrl_info)
{}

static void pqi_shutdown(struct pci_dev *pci_dev)
{}

static void pqi_process_lockup_action_param(void)
{}

#define PQI_CTRL_READY_TIMEOUT_PARAM_MIN_SECS
#define PQI_CTRL_READY_TIMEOUT_PARAM_MAX_SECS

static void pqi_process_ctrl_ready_timeout_param(void)
{}

static void pqi_process_module_params(void)
{}

#if defined(CONFIG_PM)

static inline enum bmic_flush_cache_shutdown_event pqi_get_flush_cache_shutdown_event(struct pci_dev *pci_dev)
{}

static int pqi_suspend_or_freeze(struct device *dev, bool suspend)
{}

static __maybe_unused int pqi_suspend(struct device *dev)
{}

static int pqi_resume_or_restore(struct device *dev)
{}

static int pqi_freeze(struct device *dev)
{}

static int pqi_thaw(struct device *dev)
{}

static int pqi_poweroff(struct device *dev)
{}

static const struct dev_pm_ops pqi_pm_ops =;

#endif /* CONFIG_PM */

/* Define the PCI IDs for the controllers that we support. */
static const struct pci_device_id pqi_pci_id_table[] =;

MODULE_DEVICE_TABLE(pci, pqi_pci_id_table);

static struct pci_driver pqi_pci_driver =;

static int __init pqi_init(void)
{}

static void __exit pqi_cleanup(void)
{}

module_init();
module_exit(pqi_cleanup);

static void pqi_verify_structures(void)
{}