linux/drivers/ufs/core/ufs-mcq.c

// SPDX-License-Identifier: GPL-2.0-only
/*
 * Copyright (c) 2022 Qualcomm Innovation Center. All rights reserved.
 *
 * Authors:
 *	Asutosh Das <[email protected]>
 *	Can Guo <[email protected]>
 */

#include <linux/unaligned.h>
#include <linux/dma-mapping.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include "ufshcd-priv.h"
#include <linux/delay.h>
#include <scsi/scsi_cmnd.h>
#include <linux/bitfield.h>
#include <linux/iopoll.h>

#define MAX_QUEUE_SUP
#define QCFGPTR
#define UFS_MCQ_MIN_RW_QUEUES
#define UFS_MCQ_MIN_READ_QUEUES
#define UFS_MCQ_MIN_POLL_QUEUES
#define QUEUE_EN_OFFSET
#define QUEUE_ID_OFFSET

#define MCQ_CFG_MAC_MASK
#define MCQ_ENTRY_SIZE_IN_DWORD
#define CQE_UCD_BA

/* Max mcq register polling time in microseconds */
#define MCQ_POLL_US

static int rw_queue_count_set(const char *val, const struct kernel_param *kp)
{}

static const struct kernel_param_ops rw_queue_count_ops =;

static unsigned int rw_queues;
module_param_cb();
MODULE_PARM_DESC();

static int read_queue_count_set(const char *val, const struct kernel_param *kp)
{}

static const struct kernel_param_ops read_queue_count_ops =;

static unsigned int read_queues;
module_param_cb();
MODULE_PARM_DESC();

static int poll_queue_count_set(const char *val, const struct kernel_param *kp)
{}

static const struct kernel_param_ops poll_queue_count_ops =;

static unsigned int poll_queues =;
module_param_cb();
MODULE_PARM_DESC();

/**
 * ufshcd_mcq_config_mac - Set the #Max Activ Cmds.
 * @hba: per adapter instance
 * @max_active_cmds: maximum # of active commands to the device at any time.
 *
 * The controller won't send more than the max_active_cmds to the device at
 * any time.
 */
void ufshcd_mcq_config_mac(struct ufs_hba *hba, u32 max_active_cmds)
{}
EXPORT_SYMBOL_GPL();

/**
 * ufshcd_mcq_req_to_hwq - find the hardware queue on which the
 * request would be issued.
 * @hba: per adapter instance
 * @req: pointer to the request to be issued
 *
 * Return: the hardware queue instance on which the request will be or has
 * been queued. %NULL if the request has already been freed.
 */
struct ufs_hw_queue *ufshcd_mcq_req_to_hwq(struct ufs_hba *hba,
					 struct request *req)
{}

/**
 * ufshcd_mcq_queue_cfg_addr - get an start address of the MCQ Queue Config
 * Registers.
 * @hba: per adapter instance
 *
 * Return: Start address of MCQ Queue Config Registers in HCI
 */
unsigned int ufshcd_mcq_queue_cfg_addr(struct ufs_hba *hba)
{}
EXPORT_SYMBOL_GPL();

/**
 * ufshcd_mcq_decide_queue_depth - decide the queue depth
 * @hba: per adapter instance
 *
 * Return: queue-depth on success, non-zero on error
 *
 * MAC - Max. Active Command of the Host Controller (HC)
 * HC wouldn't send more than this commands to the device.
 * Calculates and adjusts the queue depth based on the depth
 * supported by the HC and ufs device.
 */
int ufshcd_mcq_decide_queue_depth(struct ufs_hba *hba)
{}

static int ufshcd_mcq_config_nr_queues(struct ufs_hba *hba)
{}

int ufshcd_mcq_memory_alloc(struct ufs_hba *hba)
{}

static void __iomem *mcq_opr_base(struct ufs_hba *hba,
					 enum ufshcd_mcq_opr n, int i)
{}

u32 ufshcd_mcq_read_cqis(struct ufs_hba *hba, int i)
{}
EXPORT_SYMBOL_GPL();

void ufshcd_mcq_write_cqis(struct ufs_hba *hba, u32 val, int i)
{}
EXPORT_SYMBOL_GPL();

/*
 * Current MCQ specification doesn't provide a Task Tag or its equivalent in
 * the Completion Queue Entry. Find the Task Tag using an indirect method.
 */
static int ufshcd_mcq_get_tag(struct ufs_hba *hba, struct cq_entry *cqe)
{}

static void ufshcd_mcq_process_cqe(struct ufs_hba *hba,
				   struct ufs_hw_queue *hwq)
{}

void ufshcd_mcq_compl_all_cqes_lock(struct ufs_hba *hba,
				    struct ufs_hw_queue *hwq)
{}

unsigned long ufshcd_mcq_poll_cqe_lock(struct ufs_hba *hba,
				       struct ufs_hw_queue *hwq)
{}
EXPORT_SYMBOL_GPL();

void ufshcd_mcq_make_queues_operational(struct ufs_hba *hba)
{}
EXPORT_SYMBOL_GPL();

void ufshcd_mcq_enable_esi(struct ufs_hba *hba)
{}
EXPORT_SYMBOL_GPL();

void ufshcd_mcq_enable(struct ufs_hba *hba)
{}
EXPORT_SYMBOL_GPL();

void ufshcd_mcq_disable(struct ufs_hba *hba)
{}

void ufshcd_mcq_config_esi(struct ufs_hba *hba, struct msi_msg *msg)
{}
EXPORT_SYMBOL_GPL();

int ufshcd_mcq_init(struct ufs_hba *hba)
{}

static int ufshcd_mcq_sq_stop(struct ufs_hba *hba, struct ufs_hw_queue *hwq)
{}

static int ufshcd_mcq_sq_start(struct ufs_hba *hba, struct ufs_hw_queue *hwq)
{}

/**
 * ufshcd_mcq_sq_cleanup - Clean up submission queue resources
 * associated with the pending command.
 * @hba: per adapter instance.
 * @task_tag: The command's task tag.
 *
 * Return: 0 for success; error code otherwise.
 */
int ufshcd_mcq_sq_cleanup(struct ufs_hba *hba, int task_tag)
{}

/**
 * ufshcd_mcq_nullify_sqe - Nullify the submission queue entry.
 * Write the sqe's Command Type to 0xF. The host controller will not
 * fetch any sqe with Command Type = 0xF.
 *
 * @utrd: UTP Transfer Request Descriptor to be nullified.
 */
static void ufshcd_mcq_nullify_sqe(struct utp_transfer_req_desc *utrd)
{}

/**
 * ufshcd_mcq_sqe_search - Search for the command in the submission queue
 * If the command is in the submission queue and not issued to the device yet,
 * nullify the sqe so the host controller will skip fetching the sqe.
 *
 * @hba: per adapter instance.
 * @hwq: Hardware Queue to be searched.
 * @task_tag: The command's task tag.
 *
 * Return: true if the SQE containing the command is present in the SQ
 * (not fetched by the controller); returns false if the SQE is not in the SQ.
 */
static bool ufshcd_mcq_sqe_search(struct ufs_hba *hba,
				  struct ufs_hw_queue *hwq, int task_tag)
{}

/**
 * ufshcd_mcq_abort - Abort the command in MCQ.
 * @cmd: The command to be aborted.
 *
 * Return: SUCCESS or FAILED error codes
 */
int ufshcd_mcq_abort(struct scsi_cmnd *cmd)
{}