linux/drivers/mmc/host/cqhci-core.c

// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (c) 2015, The Linux Foundation. All rights reserved.
 */

#include <linux/delay.h>
#include <linux/highmem.h>
#include <linux/io.h>
#include <linux/iopoll.h>
#include <linux/module.h>
#include <linux/dma-mapping.h>
#include <linux/slab.h>
#include <linux/scatterlist.h>
#include <linux/platform_device.h>
#include <linux/ktime.h>

#include <linux/mmc/mmc.h>
#include <linux/mmc/host.h>
#include <linux/mmc/card.h>

#include "cqhci.h"
#include "cqhci-crypto.h"

#define DCMD_SLOT
#define NUM_SLOTS

struct cqhci_slot {};

static bool cqhci_halted(struct cqhci_host *cq_host)
{}

static inline u8 *get_desc(struct cqhci_host *cq_host, u8 tag)
{}

static inline u8 *get_link_desc(struct cqhci_host *cq_host, u8 tag)
{}

static inline size_t get_trans_desc_offset(struct cqhci_host *cq_host, u8 tag)
{}

static inline dma_addr_t get_trans_desc_dma(struct cqhci_host *cq_host, u8 tag)
{}

static inline u8 *get_trans_desc(struct cqhci_host *cq_host, u8 tag)
{}

static void setup_trans_desc(struct cqhci_host *cq_host, u8 tag)
{}

static void cqhci_set_irqs(struct cqhci_host *cq_host, u32 set)
{}

#define DRV_NAME

#define CQHCI_DUMP(f, x...)

static void cqhci_dumpregs(struct cqhci_host *cq_host)
{}

/*
 * The allocated descriptor table for task, link & transfer descriptors
 * looks like:
 * |----------|
 * |task desc |  |->|----------|
 * |----------|  |  |trans desc|
 * |link desc-|->|  |----------|
 * |----------|          .
 *      .                .
 *  no. of slots      max-segs
 *      .           |----------|
 * |----------|
 * The idea here is to create the [task+trans] table and mark & point the
 * link desc to the transfer desc table on a per slot basis.
 */
static int cqhci_host_alloc_tdl(struct cqhci_host *cq_host)
{}

static void __cqhci_enable(struct cqhci_host *cq_host)
{}

static void __cqhci_disable(struct cqhci_host *cq_host)
{}

int cqhci_deactivate(struct mmc_host *mmc)
{}
EXPORT_SYMBOL();

int cqhci_resume(struct mmc_host *mmc)
{}
EXPORT_SYMBOL();

static int cqhci_enable(struct mmc_host *mmc, struct mmc_card *card)
{}

/* CQHCI is idle and should halt immediately, so set a small timeout */
#define CQHCI_OFF_TIMEOUT

static u32 cqhci_read_ctl(struct cqhci_host *cq_host)
{}

static void cqhci_off(struct mmc_host *mmc)
{}

static void cqhci_disable(struct mmc_host *mmc)
{}

static void cqhci_prep_task_desc(struct mmc_request *mrq,
				 struct cqhci_host *cq_host, int tag)
{}

static int cqhci_dma_map(struct mmc_host *host, struct mmc_request *mrq)
{}

void cqhci_set_tran_desc(u8 *desc, dma_addr_t addr, int len, bool end,
			 bool dma64)
{}
EXPORT_SYMBOL();

static int cqhci_prep_tran_desc(struct mmc_request *mrq,
			       struct cqhci_host *cq_host, int tag)
{}

static void cqhci_prep_dcmd_desc(struct mmc_host *mmc,
				   struct mmc_request *mrq)
{}

static void cqhci_post_req(struct mmc_host *host, struct mmc_request *mrq)
{}

static inline int cqhci_tag(struct mmc_request *mrq)
{}

static int cqhci_request(struct mmc_host *mmc, struct mmc_request *mrq)
{}

static void cqhci_recovery_needed(struct mmc_host *mmc, struct mmc_request *mrq,
				  bool notify)
{}

static unsigned int cqhci_error_flags(int error1, int error2)
{}

static void cqhci_error_irq(struct mmc_host *mmc, u32 status, int cmd_error,
			    int data_error)
{}

static void cqhci_finish_mrq(struct mmc_host *mmc, unsigned int tag)
{}

irqreturn_t cqhci_irq(struct mmc_host *mmc, u32 intmask, int cmd_error,
		      int data_error)
{}
EXPORT_SYMBOL();

static bool cqhci_is_idle(struct cqhci_host *cq_host, int *ret)
{}

static int cqhci_wait_for_idle(struct mmc_host *mmc)
{}

static bool cqhci_timeout(struct mmc_host *mmc, struct mmc_request *mrq,
			  bool *recovery_needed)
{}

static bool cqhci_tasks_cleared(struct cqhci_host *cq_host)
{}

static bool cqhci_clear_all_tasks(struct mmc_host *mmc, unsigned int timeout)
{}

static bool cqhci_halt(struct mmc_host *mmc, unsigned int timeout)
{}

/*
 * After halting we expect to be able to use the command line. We interpret the
 * failure to halt to mean the data lines might still be in use (and the upper
 * layers will need to send a STOP command), however failing to halt complicates
 * the recovery, so set a timeout that would reasonably allow I/O to complete.
 */
#define CQHCI_START_HALT_TIMEOUT

static void cqhci_recovery_start(struct mmc_host *mmc)
{}

static int cqhci_error_from_flags(unsigned int flags)
{}

static void cqhci_recover_mrq(struct cqhci_host *cq_host, unsigned int tag)
{}

static void cqhci_recover_mrqs(struct cqhci_host *cq_host)
{}

/*
 * By now the command and data lines should be unused so there is no reason for
 * CQHCI to take a long time to halt, but if it doesn't halt there could be
 * problems clearing tasks, so be generous.
 */
#define CQHCI_FINISH_HALT_TIMEOUT

/* CQHCI could be expected to clear it's internal state pretty quickly */
#define CQHCI_CLEAR_TIMEOUT

static void cqhci_recovery_finish(struct mmc_host *mmc)
{}

static const struct mmc_cqe_ops cqhci_cqe_ops =;

struct cqhci_host *cqhci_pltfm_init(struct platform_device *pdev)
{}
EXPORT_SYMBOL();

static unsigned int cqhci_ver_major(struct cqhci_host *cq_host)
{}

static unsigned int cqhci_ver_minor(struct cqhci_host *cq_host)
{}

int cqhci_init(struct cqhci_host *cq_host, struct mmc_host *mmc,
	      bool dma64)
{}
EXPORT_SYMBOL();

MODULE_AUTHOR();
MODULE_DESCRIPTION();
MODULE_LICENSE();