linux/drivers/ata/sata_mv.c

// SPDX-License-Identifier: GPL-2.0-only
/*
 * sata_mv.c - Marvell SATA support
 *
 * Copyright 2008-2009: Marvell Corporation, all rights reserved.
 * Copyright 2005: EMC Corporation, all rights reserved.
 * Copyright 2005 Red Hat, Inc.  All rights reserved.
 *
 * Originally written by Brett Russ.
 * Extensive overhaul and enhancement by Mark Lord <[email protected]>.
 *
 * Please ALWAYS copy [email protected] on emails.
 */

/*
 * sata_mv TODO list:
 *
 * --> Develop a low-power-consumption strategy, and implement it.
 *
 * --> Add sysfs attributes for per-chip / per-HC IRQ coalescing thresholds.
 *
 * --> [Experiment, Marvell value added] Is it possible to use target
 *       mode to cross-connect two Linux boxes with Marvell cards?  If so,
 *       creating LibATA target mode support would be very interesting.
 *
 *       Target mode, for those without docs, is the ability to directly
 *       connect two SATA ports.
 */

/*
 * 80x1-B2 errata PCI#11:
 *
 * Users of the 6041/6081 Rev.B2 chips (current is C0)
 * should be careful to insert those cards only onto PCI-X bus #0,
 * and only in device slots 0..7, not higher.  The chips may not
 * work correctly otherwise  (note: this is a pretty rare condition).
 */

#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/init.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/dmapool.h>
#include <linux/dma-mapping.h>
#include <linux/device.h>
#include <linux/clk.h>
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
#include <linux/ata_platform.h>
#include <linux/mbus.h>
#include <linux/bitops.h>
#include <linux/gfp.h>
#include <linux/of.h>
#include <linux/of_irq.h>
#include <scsi/scsi_host.h>
#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_device.h>
#include <linux/libata.h>

#define DRV_NAME
#define DRV_VERSION

/*
 * module options
 */

#ifdef CONFIG_PCI
static int msi;
module_param(msi, int, S_IRUGO);
MODULE_PARM_DESC();
#endif

static int irq_coalescing_io_count;
module_param(irq_coalescing_io_count, int, S_IRUGO);
MODULE_PARM_DESC();

static int irq_coalescing_usecs;
module_param(irq_coalescing_usecs, int, S_IRUGO);
MODULE_PARM_DESC();

enum {};

#define IS_GEN_I(hpriv)
#define IS_GEN_II(hpriv)
#define IS_GEN_IIE(hpriv)
#define IS_PCIE(hpriv)
#define IS_SOC(hpriv)

#define WINDOW_CTRL(i)
#define WINDOW_BASE(i)

enum {};

enum chip_type {};

/* Command ReQuest Block: 32B */
struct mv_crqb {};

struct mv_crqb_iie {};

/* Command ResPonse Block: 8B */
struct mv_crpb {};

/* EDMA Physical Region Descriptor (ePRD); A.K.A. SG */
struct mv_sg {};

/*
 * We keep a local cache of a few frequently accessed port
 * registers here, to avoid having to read them (very slow)
 * when switching between EDMA and non-EDMA modes.
 */
struct mv_cached_regs {};

struct mv_port_priv {};

struct mv_port_signal {};

struct mv_host_priv {};

struct mv_hw_ops {};

static int mv_scr_read(struct ata_link *link, unsigned int sc_reg_in, u32 *val);
static int mv_scr_write(struct ata_link *link, unsigned int sc_reg_in, u32 val);
static int mv5_scr_read(struct ata_link *link, unsigned int sc_reg_in, u32 *val);
static int mv5_scr_write(struct ata_link *link, unsigned int sc_reg_in, u32 val);
static int mv_port_start(struct ata_port *ap);
static void mv_port_stop(struct ata_port *ap);
static int mv_qc_defer(struct ata_queued_cmd *qc);
static enum ata_completion_errors mv_qc_prep(struct ata_queued_cmd *qc);
static enum ata_completion_errors mv_qc_prep_iie(struct ata_queued_cmd *qc);
static unsigned int mv_qc_issue(struct ata_queued_cmd *qc);
static int mv_hardreset(struct ata_link *link, unsigned int *class,
			unsigned long deadline);
static void mv_eh_freeze(struct ata_port *ap);
static void mv_eh_thaw(struct ata_port *ap);
static void mv6_dev_config(struct ata_device *dev);

static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
			   unsigned int port);
static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
			   void __iomem *mmio);
static int mv5_reset_hc(struct ata_host *host, void __iomem *mmio,
			unsigned int n_hc);
static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
static void mv5_reset_bus(struct ata_host *host, void __iomem *mmio);

static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
			   unsigned int port);
static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
			   void __iomem *mmio);
static int mv6_reset_hc(struct ata_host *host, void __iomem *mmio,
			unsigned int n_hc);
static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
static void mv_soc_enable_leds(struct mv_host_priv *hpriv,
				      void __iomem *mmio);
static void mv_soc_read_preamp(struct mv_host_priv *hpriv, int idx,
				      void __iomem *mmio);
static int mv_soc_reset_hc(struct ata_host *host,
				  void __iomem *mmio, unsigned int n_hc);
static void mv_soc_reset_flash(struct mv_host_priv *hpriv,
				      void __iomem *mmio);
static void mv_soc_reset_bus(struct ata_host *host, void __iomem *mmio);
static void mv_soc_65n_phy_errata(struct mv_host_priv *hpriv,
				  void __iomem *mmio, unsigned int port);
static void mv_reset_pci_bus(struct ata_host *host, void __iomem *mmio);
static void mv_reset_channel(struct mv_host_priv *hpriv, void __iomem *mmio,
			     unsigned int port_no);
static int mv_stop_edma(struct ata_port *ap);
static int mv_stop_edma_engine(void __iomem *port_mmio);
static void mv_edma_cfg(struct ata_port *ap, int want_ncq, int want_edma);

static void mv_pmp_select(struct ata_port *ap, int pmp);
static int mv_pmp_hardreset(struct ata_link *link, unsigned int *class,
				unsigned long deadline);
static int  mv_softreset(struct ata_link *link, unsigned int *class,
				unsigned long deadline);
static void mv_pmp_error_handler(struct ata_port *ap);
static void mv_process_crpb_entries(struct ata_port *ap,
					struct mv_port_priv *pp);

static void mv_sff_irq_clear(struct ata_port *ap);
static int mv_check_atapi_dma(struct ata_queued_cmd *qc);
static void mv_bmdma_setup(struct ata_queued_cmd *qc);
static void mv_bmdma_start(struct ata_queued_cmd *qc);
static void mv_bmdma_stop(struct ata_queued_cmd *qc);
static u8   mv_bmdma_status(struct ata_port *ap);
static u8 mv_sff_check_status(struct ata_port *ap);

/* .sg_tablesize is (MV_MAX_SG_CT / 2) in the structures below
 * because we have to allow room for worst case splitting of
 * PRDs for 64K boundaries in mv_fill_sg().
 */
#ifdef CONFIG_PCI
static const struct scsi_host_template mv5_sht =;
#endif
static const struct scsi_host_template mv6_sht =;

static struct ata_port_operations mv5_ops =;

static struct ata_port_operations mv6_ops =;

static struct ata_port_operations mv_iie_ops =;

static const struct ata_port_info mv_port_info[] =;

static const struct mv_hw_ops mv5xxx_ops =;

static const struct mv_hw_ops mv6xxx_ops =;

static const struct mv_hw_ops mv_soc_ops =;

static const struct mv_hw_ops mv_soc_65n_ops =;

/*
 * Functions
 */

static inline void writelfl(unsigned long data, void __iomem *addr)
{}

static inline unsigned int mv_hc_from_port(unsigned int port)
{}

static inline unsigned int mv_hardport_from_port(unsigned int port)
{}

/*
 * Consolidate some rather tricky bit shift calculations.
 * This is hot-path stuff, so not a function.
 * Simple code, with two return values, so macro rather than inline.
 *
 * port is the sole input, in range 0..7.
 * shift is one output, for use with main_irq_cause / main_irq_mask registers.
 * hardport is the other output, in range 0..3.
 *
 * Note that port and hardport may be the same variable in some cases.
 */
#define MV_PORT_TO_SHIFT_AND_HARDPORT(port, shift, hardport)

static inline void __iomem *mv_hc_base(void __iomem *base, unsigned int hc)
{}

static inline void __iomem *mv_hc_base_from_port(void __iomem *base,
						 unsigned int port)
{}

static inline void __iomem *mv_port_base(void __iomem *base, unsigned int port)
{}

static void __iomem *mv5_phy_base(void __iomem *mmio, unsigned int port)
{}

static inline void __iomem *mv_host_base(struct ata_host *host)
{}

static inline void __iomem *mv_ap_base(struct ata_port *ap)
{}

static inline int mv_get_hc_count(unsigned long port_flags)
{}

/**
 *      mv_save_cached_regs - (re-)initialize cached port registers
 *      @ap: the port whose registers we are caching
 *
 *	Initialize the local cache of port registers,
 *	so that reading them over and over again can
 *	be avoided on the hotter paths of this driver.
 *	This saves a few microseconds each time we switch
 *	to/from EDMA mode to perform (eg.) a drive cache flush.
 */
static void mv_save_cached_regs(struct ata_port *ap)
{}

/**
 *      mv_write_cached_reg - write to a cached port register
 *      @addr: hardware address of the register
 *      @old: pointer to cached value of the register
 *      @new: new value for the register
 *
 *	Write a new value to a cached register,
 *	but only if the value is different from before.
 */
static inline void mv_write_cached_reg(void __iomem *addr, u32 *old, u32 new)
{}

static void mv_set_edma_ptrs(void __iomem *port_mmio,
			     struct mv_host_priv *hpriv,
			     struct mv_port_priv *pp)
{}

static void mv_write_main_irq_mask(u32 mask, struct mv_host_priv *hpriv)
{}

static void mv_set_main_irq_mask(struct ata_host *host,
				 u32 disable_bits, u32 enable_bits)
{}

static void mv_enable_port_irqs(struct ata_port *ap,
				     unsigned int port_bits)
{}

static void mv_clear_and_enable_port_irqs(struct ata_port *ap,
					  void __iomem *port_mmio,
					  unsigned int port_irqs)
{}

static void mv_set_irq_coalescing(struct ata_host *host,
				  unsigned int count, unsigned int usecs)
{}

/*
 *      mv_start_edma - Enable eDMA engine
 *      @pp: port private data
 *
 *      Verify the local cache of the eDMA state is accurate with a
 *      WARN_ON.
 *
 *      LOCKING:
 *      Inherited from caller.
 */
static void mv_start_edma(struct ata_port *ap, void __iomem *port_mmio,
			 struct mv_port_priv *pp, u8 protocol)
{}

static void mv_wait_for_edma_empty_idle(struct ata_port *ap)
{}

/**
 *      mv_stop_edma_engine - Disable eDMA engine
 *      @port_mmio: io base address
 *
 *      LOCKING:
 *      Inherited from caller.
 */
static int mv_stop_edma_engine(void __iomem *port_mmio)
{}

static int mv_stop_edma(struct ata_port *ap)
{}

static void mv_dump_mem(struct device *dev, void __iomem *start, unsigned bytes)
{}

static void mv_dump_pci_cfg(struct pci_dev *pdev, unsigned bytes)
{}

static void mv_dump_all_regs(void __iomem *mmio_base,
			     struct pci_dev *pdev)
{}

static unsigned int mv_scr_offset(unsigned int sc_reg_in)
{}

static int mv_scr_read(struct ata_link *link, unsigned int sc_reg_in, u32 *val)
{}

static int mv_scr_write(struct ata_link *link, unsigned int sc_reg_in, u32 val)
{}

static void mv6_dev_config(struct ata_device *adev)
{}

static int mv_qc_defer(struct ata_queued_cmd *qc)
{}

static void mv_config_fbs(struct ata_port *ap, int want_ncq, int want_fbs)
{}

static void mv_60x1_errata_sata25(struct ata_port *ap, int want_ncq)
{}

/*
 *	mv_bmdma_enable - set a magic bit on GEN_IIE to allow bmdma
 *	@ap: Port being initialized
 *
 *	There are two DMA modes on these chips:  basic DMA, and EDMA.
 *
 *	Bit-0 of the "EDMA RESERVED" register enables/disables use
 *	of basic DMA on the GEN_IIE versions of the chips.
 *
 *	This bit survives EDMA resets, and must be set for basic DMA
 *	to function, and should be cleared when EDMA is active.
 */
static void mv_bmdma_enable_iie(struct ata_port *ap, int enable_bmdma)
{}

/*
 * SOC chips have an issue whereby the HDD LEDs don't always blink
 * during I/O when NCQ is enabled. Enabling a special "LED blink" mode
 * of the SOC takes care of it, generating a steady blink rate when
 * any drive on the chip is active.
 *
 * Unfortunately, the blink mode is a global hardware setting for the SOC,
 * so we must use it whenever at least one port on the SOC has NCQ enabled.
 *
 * We turn "LED blink" off when NCQ is not in use anywhere, because the normal
 * LED operation works then, and provides better (more accurate) feedback.
 *
 * Note that this code assumes that an SOC never has more than one HC onboard.
 */
static void mv_soc_led_blink_enable(struct ata_port *ap)
{}

static void mv_soc_led_blink_disable(struct ata_port *ap)
{}

static void mv_edma_cfg(struct ata_port *ap, int want_ncq, int want_edma)
{}

static void mv_port_free_dma_mem(struct ata_port *ap)
{}

/**
 *      mv_port_start - Port specific init/start routine.
 *      @ap: ATA channel to manipulate
 *
 *      Allocate and point to DMA memory, init port private memory,
 *      zero indices.
 *
 *      LOCKING:
 *      Inherited from caller.
 */
static int mv_port_start(struct ata_port *ap)
{}

/**
 *      mv_port_stop - Port specific cleanup/stop routine.
 *      @ap: ATA channel to manipulate
 *
 *      Stop DMA, cleanup port memory.
 *
 *      LOCKING:
 *      This routine uses the host lock to protect the DMA stop.
 */
static void mv_port_stop(struct ata_port *ap)
{}

/**
 *      mv_fill_sg - Fill out the Marvell ePRD (scatter gather) entries
 *      @qc: queued command whose SG list to source from
 *
 *      Populate the SG list and mark the last entry.
 *
 *      LOCKING:
 *      Inherited from caller.
 */
static void mv_fill_sg(struct ata_queued_cmd *qc)
{}

static void mv_crqb_pack_cmd(__le16 *cmdw, u8 data, u8 addr, unsigned last)
{}

/**
 *	mv_sff_irq_clear - Clear hardware interrupt after DMA.
 *	@ap: Port associated with this ATA transaction.
 *
 *	We need this only for ATAPI bmdma transactions,
 *	as otherwise we experience spurious interrupts
 *	after libata-sff handles the bmdma interrupts.
 */
static void mv_sff_irq_clear(struct ata_port *ap)
{}

/**
 *	mv_check_atapi_dma - Filter ATAPI cmds which are unsuitable for DMA.
 *	@qc: queued command to check for chipset/DMA compatibility.
 *
 *	The bmdma engines cannot handle speculative data sizes
 *	(bytecount under/over flow).  So only allow DMA for
 *	data transfer commands with known data sizes.
 *
 *	LOCKING:
 *	Inherited from caller.
 */
static int mv_check_atapi_dma(struct ata_queued_cmd *qc)
{}

/**
 *	mv_bmdma_setup - Set up BMDMA transaction
 *	@qc: queued command to prepare DMA for.
 *
 *	LOCKING:
 *	Inherited from caller.
 */
static void mv_bmdma_setup(struct ata_queued_cmd *qc)
{}

/**
 *	mv_bmdma_start - Start a BMDMA transaction
 *	@qc: queued command to start DMA on.
 *
 *	LOCKING:
 *	Inherited from caller.
 */
static void mv_bmdma_start(struct ata_queued_cmd *qc)
{}

/**
 *	mv_bmdma_stop_ap - Stop BMDMA transfer
 *	@ap: port to stop
 *
 *	Clears the ATA_DMA_START flag in the bmdma control register
 *
 *	LOCKING:
 *	Inherited from caller.
 */
static void mv_bmdma_stop_ap(struct ata_port *ap)
{}

static void mv_bmdma_stop(struct ata_queued_cmd *qc)
{}

/**
 *	mv_bmdma_status - Read BMDMA status
 *	@ap: port for which to retrieve DMA status.
 *
 *	Read and return equivalent of the sff BMDMA status register.
 *
 *	LOCKING:
 *	Inherited from caller.
 */
static u8 mv_bmdma_status(struct ata_port *ap)
{}

static void mv_rw_multi_errata_sata24(struct ata_queued_cmd *qc)
{}

/**
 *      mv_qc_prep - Host specific command preparation.
 *      @qc: queued command to prepare
 *
 *      This routine simply redirects to the general purpose routine
 *      if command is not DMA.  Else, it handles prep of the CRQB
 *      (command request block), does some sanity checking, and calls
 *      the SG load routine.
 *
 *      LOCKING:
 *      Inherited from caller.
 */
static enum ata_completion_errors mv_qc_prep(struct ata_queued_cmd *qc)
{}

/**
 *      mv_qc_prep_iie - Host specific command preparation.
 *      @qc: queued command to prepare
 *
 *      This routine simply redirects to the general purpose routine
 *      if command is not DMA.  Else, it handles prep of the CRQB
 *      (command request block), does some sanity checking, and calls
 *      the SG load routine.
 *
 *      LOCKING:
 *      Inherited from caller.
 */
static enum ata_completion_errors mv_qc_prep_iie(struct ata_queued_cmd *qc)
{}

/**
 *	mv_sff_check_status - fetch device status, if valid
 *	@ap: ATA port to fetch status from
 *
 *	When using command issue via mv_qc_issue_fis(),
 *	the initial ATA_BUSY state does not show up in the
 *	ATA status (shadow) register.  This can confuse libata!
 *
 *	So we have a hook here to fake ATA_BUSY for that situation,
 *	until the first time a BUSY, DRQ, or ERR bit is seen.
 *
 *	The rest of the time, it simply returns the ATA status register.
 */
static u8 mv_sff_check_status(struct ata_port *ap)
{}

/**
 *	mv_send_fis - Send a FIS, using the "Vendor-Unique FIS" register
 *	@ap: ATA port to send a FIS
 *	@fis: fis to be sent
 *	@nwords: number of 32-bit words in the fis
 */
static unsigned int mv_send_fis(struct ata_port *ap, u32 *fis, int nwords)
{}

/**
 *	mv_qc_issue_fis - Issue a command directly as a FIS
 *	@qc: queued command to start
 *
 *	Note that the ATA shadow registers are not updated
 *	after command issue, so the device will appear "READY"
 *	if polled, even while it is BUSY processing the command.
 *
 *	So we use a status hook to fake ATA_BUSY until the drive changes state.
 *
 *	Note: we don't get updated shadow regs on *completion*
 *	of non-data commands. So avoid sending them via this function,
 *	as they will appear to have completed immediately.
 *
 *	GEN_IIE has special registers that we could get the result tf from,
 *	but earlier chipsets do not.  For now, we ignore those registers.
 */
static unsigned int mv_qc_issue_fis(struct ata_queued_cmd *qc)
{}

/**
 *      mv_qc_issue - Initiate a command to the host
 *      @qc: queued command to start
 *
 *      This routine simply redirects to the general purpose routine
 *      if command is not DMA.  Else, it sanity checks our local
 *      caches of the request producer/consumer indices then enables
 *      DMA and bumps the request producer index.
 *
 *      LOCKING:
 *      Inherited from caller.
 */
static unsigned int mv_qc_issue(struct ata_queued_cmd *qc)
{}

static struct ata_queued_cmd *mv_get_active_qc(struct ata_port *ap)
{}

static void mv_pmp_error_handler(struct ata_port *ap)
{}

static unsigned int mv_get_err_pmp_map(struct ata_port *ap)
{}

static void mv_pmp_eh_prep(struct ata_port *ap, unsigned int pmp_map)
{}

static int mv_req_q_empty(struct ata_port *ap)
{}

static int mv_handle_fbs_ncq_dev_err(struct ata_port *ap)
{}

static int mv_handle_fbs_non_ncq_dev_err(struct ata_port *ap)
{}

static int mv_handle_dev_err(struct ata_port *ap, u32 edma_err_cause)
{}

static void mv_unexpected_intr(struct ata_port *ap, int edma_was_enabled)
{}

/**
 *      mv_err_intr - Handle error interrupts on the port
 *      @ap: ATA channel to manipulate
 *
 *      Most cases require a full reset of the chip's state machine,
 *      which also performs a COMRESET.
 *      Also, if the port disabled DMA, update our cached copy to match.
 *
 *      LOCKING:
 *      Inherited from caller.
 */
static void mv_err_intr(struct ata_port *ap)
{}

static bool mv_process_crpb_response(struct ata_port *ap,
		struct mv_crpb *response, unsigned int tag, int ncq_enabled)
{}

static void mv_process_crpb_entries(struct ata_port *ap, struct mv_port_priv *pp)
{}

static void mv_port_intr(struct ata_port *ap, u32 port_cause)
{}

/**
 *      mv_host_intr - Handle all interrupts on the given host controller
 *      @host: host specific structure
 *      @main_irq_cause: Main interrupt cause register for the chip.
 *
 *      LOCKING:
 *      Inherited from caller.
 */
static int mv_host_intr(struct ata_host *host, u32 main_irq_cause)
{}

static int mv_pci_error(struct ata_host *host, void __iomem *mmio)
{}

/**
 *      mv_interrupt - Main interrupt event handler
 *      @irq: unused
 *      @dev_instance: private data; in this case the host structure
 *
 *      Read the read only register to determine if any host
 *      controllers have pending interrupts.  If so, call lower level
 *      routine to handle.  Also check for PCI errors which are only
 *      reported here.
 *
 *      LOCKING:
 *      This routine holds the host lock while processing pending
 *      interrupts.
 */
static irqreturn_t mv_interrupt(int irq, void *dev_instance)
{}

static unsigned int mv5_scr_offset(unsigned int sc_reg_in)
{}

static int mv5_scr_read(struct ata_link *link, unsigned int sc_reg_in, u32 *val)
{}

static int mv5_scr_write(struct ata_link *link, unsigned int sc_reg_in, u32 val)
{}

static void mv5_reset_bus(struct ata_host *host, void __iomem *mmio)
{}

static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
{}

static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
			   void __iomem *mmio)
{}

static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
{}

static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
			   unsigned int port)
{}


#undef ZERO
#define ZERO
static void mv5_reset_hc_port(struct mv_host_priv *hpriv, void __iomem *mmio,
			     unsigned int port)
{}
#undef ZERO

#define ZERO
static void mv5_reset_one_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
			unsigned int hc)
{}
#undef ZERO

static int mv5_reset_hc(struct ata_host *host, void __iomem *mmio,
			unsigned int n_hc)
{}

#undef ZERO
#define ZERO
static void mv_reset_pci_bus(struct ata_host *host, void __iomem *mmio)
{}
#undef ZERO

static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
{}

/*
 *      mv6_reset_hc - Perform the 6xxx global soft reset
 *      @mmio: base address of the HBA
 *
 *      This routine only applies to 6xxx parts.
 *
 *      LOCKING:
 *      Inherited from caller.
 */
static int mv6_reset_hc(struct ata_host *host, void __iomem *mmio,
			unsigned int n_hc)
{}

static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
			   void __iomem *mmio)
{}

static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
{}

static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
			   unsigned int port)
{}

/* TODO: use the generic LED interface to configure the SATA Presence */
/* & Acitivy LEDs on the board */
static void mv_soc_enable_leds(struct mv_host_priv *hpriv,
				      void __iomem *mmio)
{}

static void mv_soc_read_preamp(struct mv_host_priv *hpriv, int idx,
			   void __iomem *mmio)
{}

#undef ZERO
#define ZERO
static void mv_soc_reset_hc_port(struct mv_host_priv *hpriv,
					void __iomem *mmio, unsigned int port)
{}

#undef ZERO

#define ZERO
static void mv_soc_reset_one_hc(struct mv_host_priv *hpriv,
				       void __iomem *mmio)
{}

#undef ZERO

static int mv_soc_reset_hc(struct ata_host *host,
				  void __iomem *mmio, unsigned int n_hc)
{}

static void mv_soc_reset_flash(struct mv_host_priv *hpriv,
				      void __iomem *mmio)
{}

static void mv_soc_reset_bus(struct ata_host *host, void __iomem *mmio)
{}

static void mv_soc_65n_phy_errata(struct mv_host_priv *hpriv,
				  void __iomem *mmio, unsigned int port)
{}

/*
 *	soc_is_65 - check if the soc is 65 nano device
 *
 *	Detect the type of the SoC, this is done by reading the PHYCFG_OFS
 *	register, this register should contain non-zero value and it exists only
 *	in the 65 nano devices, when reading it from older devices we get 0.
 */
static bool soc_is_65n(struct mv_host_priv *hpriv)
{}

static void mv_setup_ifcfg(void __iomem *port_mmio, int want_gen2i)
{}

static void mv_reset_channel(struct mv_host_priv *hpriv, void __iomem *mmio,
			     unsigned int port_no)
{}

static void mv_pmp_select(struct ata_port *ap, int pmp)
{}

static int mv_pmp_hardreset(struct ata_link *link, unsigned int *class,
				unsigned long deadline)
{}

static int mv_softreset(struct ata_link *link, unsigned int *class,
				unsigned long deadline)
{}

static int mv_hardreset(struct ata_link *link, unsigned int *class,
			unsigned long deadline)
{}

static void mv_eh_freeze(struct ata_port *ap)
{}

static void mv_eh_thaw(struct ata_port *ap)
{}

/**
 *      mv_port_init - Perform some early initialization on a single port.
 *      @port: libata data structure storing shadow register addresses
 *      @port_mmio: base address of the port
 *
 *      Initialize shadow register mmio addresses, clear outstanding
 *      interrupts on the port, and unmask interrupts for the future
 *      start of the port.
 *
 *      LOCKING:
 *      Inherited from caller.
 */
static void mv_port_init(struct ata_ioports *port,  void __iomem *port_mmio)
{}

static unsigned int mv_in_pcix_mode(struct ata_host *host)
{}

static int mv_pci_cut_through_okay(struct ata_host *host)
{}

static void mv_60x1b2_errata_pci7(struct ata_host *host)
{}

static int mv_chip_id(struct ata_host *host, unsigned int board_idx)
{}

/**
 *      mv_init_host - Perform some early initialization of the host.
 *	@host: ATA host to initialize
 *
 *      If possible, do an early global reset of the host.  Then do
 *      our port init and clear/unmask all/relevant host interrupts.
 *
 *      LOCKING:
 *      Inherited from caller.
 */
static int mv_init_host(struct ata_host *host)
{}

static int mv_create_dma_pools(struct mv_host_priv *hpriv, struct device *dev)
{}

static void mv_conf_mbus_windows(struct mv_host_priv *hpriv,
				 const struct mbus_dram_target_info *dram)
{}

/**
 *      mv_platform_probe - handle a positive probe of an soc Marvell
 *      host
 *      @pdev: platform device found
 *
 *      LOCKING:
 *      Inherited from caller.
 */
static int mv_platform_probe(struct platform_device *pdev)
{}

/*
 *
 *      mv_platform_remove    -       unplug a platform interface
 *      @pdev: platform device
 *
 *      A platform bus SATA device has been unplugged. Perform the needed
 *      cleanup. Also called on module unload for any active devices.
 */
static void mv_platform_remove(struct platform_device *pdev)
{}

#ifdef CONFIG_PM_SLEEP
static int mv_platform_suspend(struct platform_device *pdev, pm_message_t state)
{}

static int mv_platform_resume(struct platform_device *pdev)
{}
#else
#define mv_platform_suspend
#define mv_platform_resume
#endif

#ifdef CONFIG_OF
static const struct of_device_id mv_sata_dt_ids[] =;
MODULE_DEVICE_TABLE(of, mv_sata_dt_ids);
#endif

static struct platform_driver mv_platform_driver =;


#ifdef CONFIG_PCI
static int mv_pci_init_one(struct pci_dev *pdev,
			   const struct pci_device_id *ent);
#ifdef CONFIG_PM_SLEEP
static int mv_pci_device_resume(struct pci_dev *pdev);
#endif

static const struct pci_device_id mv_pci_tbl[] =;

static struct pci_driver mv_pci_driver =;
MODULE_DEVICE_TABLE(pci, mv_pci_tbl);

/**
 *      mv_print_info - Dump key info to kernel log for perusal.
 *      @host: ATA host to print info about
 *
 *      FIXME: complete this.
 *
 *      LOCKING:
 *      Inherited from caller.
 */
static void mv_print_info(struct ata_host *host)
{}

/**
 *      mv_pci_init_one - handle a positive probe of a PCI Marvell host
 *      @pdev: PCI device found
 *      @ent: PCI device ID entry for the matched host
 *
 *      LOCKING:
 *      Inherited from caller.
 */
static int mv_pci_init_one(struct pci_dev *pdev,
			   const struct pci_device_id *ent)
{}

#ifdef CONFIG_PM_SLEEP
static int mv_pci_device_resume(struct pci_dev *pdev)
{}
#endif
#endif

static int __init mv_init(void)
{}

static void __exit mv_exit(void)
{}

MODULE_AUTHOR();
MODULE_DESCRIPTION();
MODULE_LICENSE();
MODULE_VERSION();
MODULE_ALIAS();

module_init();
module_exit(mv_exit);