linux/drivers/usb/musb/musb_host.c

// SPDX-License-Identifier: GPL-2.0
/*
 * MUSB OTG driver host support
 *
 * Copyright 2005 Mentor Graphics Corporation
 * Copyright (C) 2005-2006 by Texas Instruments
 * Copyright (C) 2006-2007 Nokia Corporation
 * Copyright (C) 2008-2009 MontaVista Software, Inc. <[email protected]>
 */

#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/delay.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/errno.h>
#include <linux/list.h>
#include <linux/dma-mapping.h>

#include "musb_core.h"
#include "musb_host.h"
#include "musb_trace.h"

/* MUSB HOST status 22-mar-2006
 *
 * - There's still lots of partial code duplication for fault paths, so
 *   they aren't handled as consistently as they need to be.
 *
 * - PIO mostly behaved when last tested.
 *     + including ep0, with all usbtest cases 9, 10
 *     + usbtest 14 (ep0out) doesn't seem to run at all
 *     + double buffered OUT/TX endpoints saw stalls(!) with certain usbtest
 *       configurations, but otherwise double buffering passes basic tests.
 *     + for 2.6.N, for N > ~10, needs API changes for hcd framework.
 *
 * - DMA (CPPI) ... partially behaves, not currently recommended
 *     + about 1/15 the speed of typical EHCI implementations (PCI)
 *     + RX, all too often reqpkt seems to misbehave after tx
 *     + TX, no known issues (other than evident silicon issue)
 *
 * - DMA (Mentor/OMAP) ...has at least toggle update problems
 *
 * - [23-feb-2009] minimal traffic scheduling to avoid bulk RX packet
 *   starvation ... nothing yet for TX, interrupt, or bulk.
 *
 * - Not tested with HNP, but some SRP paths seem to behave.
 *
 * NOTE 24-August-2006:
 *
 * - Bulk traffic finally uses both sides of hardware ep1, freeing up an
 *   extra endpoint for periodic use enabling hub + keybd + mouse.  That
 *   mostly works, except that with "usbnet" it's easy to trigger cases
 *   with "ping" where RX loses.  (a) ping to davinci, even "ping -f",
 *   fine; but (b) ping _from_ davinci, even "ping -c 1", ICMP RX loses
 *   although ARP RX wins.  (That test was done with a full speed link.)
 */


/*
 * NOTE on endpoint usage:
 *
 * CONTROL transfers all go through ep0.  BULK ones go through dedicated IN
 * and OUT endpoints ... hardware is dedicated for those "async" queue(s).
 * (Yes, bulk _could_ use more of the endpoints than that, and would even
 * benefit from it.)
 *
 * INTERUPPT and ISOCHRONOUS transfers are scheduled to the other endpoints.
 * So far that scheduling is both dumb and optimistic:  the endpoint will be
 * "claimed" until its software queue is no longer refilled.  No multiplexing
 * of transfers between endpoints, or anything clever.
 */

struct musb *hcd_to_musb(struct usb_hcd *hcd)
{}


static void musb_ep_program(struct musb *musb, u8 epnum,
			struct urb *urb, int is_out,
			u8 *buf, u32 offset, u32 len);

/*
 * Clear TX fifo. Needed to avoid BABBLE errors.
 */
static void musb_h_tx_flush_fifo(struct musb_hw_ep *ep)
{}

static void musb_h_ep0_flush_fifo(struct musb_hw_ep *ep)
{}

/*
 * Start transmit. Caller is responsible for locking shared resources.
 * musb must be locked.
 */
static inline void musb_h_tx_start(struct musb_hw_ep *ep)
{}

static inline void musb_h_tx_dma_start(struct musb_hw_ep *ep)
{}

static void musb_ep_set_qh(struct musb_hw_ep *ep, int is_in, struct musb_qh *qh)
{}

static struct musb_qh *musb_ep_get_qh(struct musb_hw_ep *ep, int is_in)
{}

/*
 * Start the URB at the front of an endpoint's queue
 * end must be claimed from the caller.
 *
 * Context: controller locked, irqs blocked
 */
static void
musb_start_urb(struct musb *musb, int is_in, struct musb_qh *qh)
{}

/* Context: caller owns controller lock, IRQs are blocked */
static void musb_giveback(struct musb *musb, struct urb *urb, int status)
__releases(musb->lock)
__acquires(musb->lock)
{}

/*
 * Advance this hardware endpoint's queue, completing the specified URB and
 * advancing to either the next URB queued to that qh, or else invalidating
 * that qh and advancing to the next qh scheduled after the current one.
 *
 * Context: caller owns controller lock, IRQs are blocked
 */
static void musb_advance_schedule(struct musb *musb, struct urb *urb,
				  struct musb_hw_ep *hw_ep, int is_in)
{}

static u16 musb_h_flush_rxfifo(struct musb_hw_ep *hw_ep, u16 csr)
{}

/*
 * PIO RX for a packet (or part of it).
 */
static bool
musb_host_packet_rx(struct musb *musb, struct urb *urb, u8 epnum, u8 iso_err)
{}

/* we don't always need to reinit a given side of an endpoint...
 * when we do, use tx/rx reinit routine and then construct a new CSR
 * to address data toggle, NYET, and DMA or PIO.
 *
 * it's possible that driver bugs (especially for DMA) or aborting a
 * transfer might have left the endpoint busier than it should be.
 * the busy/not-empty tests are basically paranoia.
 */
static void
musb_rx_reinit(struct musb *musb, struct musb_qh *qh, u8 epnum)
{}

static void musb_tx_dma_set_mode_mentor(struct musb_hw_ep *hw_ep, 
					struct musb_qh *qh,
					u32 *length, u8 *mode)
{}

static void musb_tx_dma_set_mode_cppi_tusb(struct musb_hw_ep *hw_ep,
					   struct urb *urb,
					   u8 *mode)
{}

static bool musb_tx_dma_program(struct dma_controller *dma,
		struct musb_hw_ep *hw_ep, struct musb_qh *qh,
		struct urb *urb, u32 offset, u32 length)
{}

/*
 * Program an HDRC endpoint as per the given URB
 * Context: irqs blocked, controller lock held
 */
static void musb_ep_program(struct musb *musb, u8 epnum,
			struct urb *urb, int is_out,
			u8 *buf, u32 offset, u32 len)
{}

/* Schedule next QH from musb->in_bulk/out_bulk and move the current qh to
 * the end; avoids starvation for other endpoints.
 */
static void musb_bulk_nak_timeout(struct musb *musb, struct musb_hw_ep *ep,
	int is_in)
{}

/*
 * Service the default endpoint (ep0) as host.
 * Return true until it's time to start the status stage.
 */
static bool musb_h_ep0_continue(struct musb *musb, u16 len, struct urb *urb)
{}

/*
 * Handle default endpoint interrupt as host. Only called in IRQ time
 * from musb_interrupt().
 *
 * called with controller irqlocked
 */
irqreturn_t musb_h_ep0_irq(struct musb *musb)
{}


#ifdef CONFIG_USB_INVENTRA_DMA

/* Host side TX (OUT) using Mentor DMA works as follows:
	submit_urb ->
		- if queue was empty, Program Endpoint
		- ... which starts DMA to fifo in mode 1 or 0

	DMA Isr (transfer complete) -> TxAvail()
		- Stop DMA (~DmaEnab)	(<--- Alert ... currently happens
					only in musb_cleanup_urb)
		- TxPktRdy has to be set in mode 0 or for
			short packets in mode 1.
*/

#endif

/* Service a Tx-Available or dma completion irq for the endpoint */
void musb_host_tx(struct musb *musb, u8 epnum)
{}

#ifdef CONFIG_USB_TI_CPPI41_DMA
/* Seems to set up ISO for cppi41 and not advance len. See commit c57c41d */
static int musb_rx_dma_iso_cppi41(struct dma_controller *dma,
				  struct musb_hw_ep *hw_ep,
				  struct musb_qh *qh,
				  struct urb *urb,
				  size_t len)
{
	struct dma_channel *channel = hw_ep->rx_channel;
	void __iomem *epio = hw_ep->regs;
	dma_addr_t *buf;
	u32 length;
	u16 val;

	buf = (void *)urb->iso_frame_desc[qh->iso_idx].offset +
		(u32)urb->transfer_dma;

	length = urb->iso_frame_desc[qh->iso_idx].length;

	val = musb_readw(epio, MUSB_RXCSR);
	val |= MUSB_RXCSR_DMAENAB;
	musb_writew(hw_ep->regs, MUSB_RXCSR, val);

	return dma->channel_program(channel, qh->maxpacket, 0,
				   (u32)buf, length);
}
#else
static inline int musb_rx_dma_iso_cppi41(struct dma_controller *dma,
					 struct musb_hw_ep *hw_ep,
					 struct musb_qh *qh,
					 struct urb *urb,
					 size_t len)
{}
#endif

#if defined(CONFIG_USB_INVENTRA_DMA) || defined(CONFIG_USB_UX500_DMA) || \
	defined(CONFIG_USB_TI_CPPI41_DMA)
/* Host side RX (IN) using Mentor DMA works as follows:
	submit_urb ->
		- if queue was empty, ProgramEndpoint
		- first IN token is sent out (by setting ReqPkt)
	LinuxIsr -> RxReady()
	/\	=> first packet is received
	|	- Set in mode 0 (DmaEnab, ~ReqPkt)
	|		-> DMA Isr (transfer complete) -> RxReady()
	|		    - Ack receive (~RxPktRdy), turn off DMA (~DmaEnab)
	|		    - if urb not complete, send next IN token (ReqPkt)
	|			   |		else complete urb.
	|			   |
	---------------------------
 *
 * Nuances of mode 1:
 *	For short packets, no ack (+RxPktRdy) is sent automatically
 *	(even if AutoClear is ON)
 *	For full packets, ack (~RxPktRdy) and next IN token (+ReqPkt) is sent
 *	automatically => major problem, as collecting the next packet becomes
 *	difficult. Hence mode 1 is not used.
 *
 * REVISIT
 *	All we care about at this driver level is that
 *       (a) all URBs terminate with REQPKT cleared and fifo(s) empty;
 *       (b) termination conditions are: short RX, or buffer full;
 *       (c) fault modes include
 *           - iff URB_SHORT_NOT_OK, short RX status is -EREMOTEIO.
 *             (and that endpoint's dma queue stops immediately)
 *           - overflow (full, PLUS more bytes in the terminal packet)
 *
 *	So for example, usb-storage sets URB_SHORT_NOT_OK, and would
 *	thus be a great candidate for using mode 1 ... for all but the
 *	last packet of one URB's transfer.
 */
static int musb_rx_dma_inventra_cppi41(struct dma_controller *dma,
				       struct musb_hw_ep *hw_ep,
				       struct musb_qh *qh,
				       struct urb *urb,
				       size_t len)
{
	struct dma_channel *channel = hw_ep->rx_channel;
	void __iomem *epio = hw_ep->regs;
	u16 val;
	int pipe;
	bool done;

	pipe = urb->pipe;

	if (usb_pipeisoc(pipe)) {
		struct usb_iso_packet_descriptor *d;

		d = urb->iso_frame_desc + qh->iso_idx;
		d->actual_length = len;

		/* even if there was an error, we did the dma
		 * for iso_frame_desc->length
		 */
		if (d->status != -EILSEQ && d->status != -EOVERFLOW)
			d->status = 0;

		if (++qh->iso_idx >= urb->number_of_packets) {
			done = true;
		} else {
			/* REVISIT: Why ignore return value here? */
			if (musb_dma_cppi41(hw_ep->musb))
				done = musb_rx_dma_iso_cppi41(dma, hw_ep, qh,
							      urb, len);
			done = false;
		}

	} else  {
		/* done if urb buffer is full or short packet is recd */
		done = (urb->actual_length + len >=
			urb->transfer_buffer_length
			|| channel->actual_len < qh->maxpacket
			|| channel->rx_packet_done);
	}

	/* send IN token for next packet, without AUTOREQ */
	if (!done) {
		val = musb_readw(epio, MUSB_RXCSR);
		val |= MUSB_RXCSR_H_REQPKT;
		musb_writew(epio, MUSB_RXCSR, MUSB_RXCSR_H_WZC_BITS | val);
	}

	return done;
}

/* Disadvantage of using mode 1:
 *	It's basically usable only for mass storage class; essentially all
 *	other protocols also terminate transfers on short packets.
 *
 * Details:
 *	An extra IN token is sent at the end of the transfer (due to AUTOREQ)
 *	If you try to use mode 1 for (transfer_buffer_length - 512), and try
 *	to use the extra IN token to grab the last packet using mode 0, then
 *	the problem is that you cannot be sure when the device will send the
 *	last packet and RxPktRdy set. Sometimes the packet is recd too soon
 *	such that it gets lost when RxCSR is re-set at the end of the mode 1
 *	transfer, while sometimes it is recd just a little late so that if you
 *	try to configure for mode 0 soon after the mode 1 transfer is
 *	completed, you will find rxcount 0. Okay, so you might think why not
 *	wait for an interrupt when the pkt is recd. Well, you won't get any!
 */
static int musb_rx_dma_in_inventra_cppi41(struct dma_controller *dma,
					  struct musb_hw_ep *hw_ep,
					  struct musb_qh *qh,
					  struct urb *urb,
					  size_t len,
					  u8 iso_err)
{
	struct musb *musb = hw_ep->musb;
	void __iomem *epio = hw_ep->regs;
	struct dma_channel *channel = hw_ep->rx_channel;
	u16 rx_count, val;
	int length, pipe, done;
	dma_addr_t buf;

	rx_count = musb_readw(epio, MUSB_RXCOUNT);
	pipe = urb->pipe;

	if (usb_pipeisoc(pipe)) {
		int d_status = 0;
		struct usb_iso_packet_descriptor *d;

		d = urb->iso_frame_desc + qh->iso_idx;

		if (iso_err) {
			d_status = -EILSEQ;
			urb->error_count++;
		}
		if (rx_count > d->length) {
			if (d_status == 0) {
				d_status = -EOVERFLOW;
				urb->error_count++;
			}
			musb_dbg(musb, "** OVERFLOW %d into %d",
				rx_count, d->length);

			length = d->length;
		} else
			length = rx_count;
		d->status = d_status;
		buf = urb->transfer_dma + d->offset;
	} else {
		length = rx_count;
		buf = urb->transfer_dma + urb->actual_length;
	}

	channel->desired_mode = 0;
#ifdef USE_MODE1
	/* because of the issue below, mode 1 will
	 * only rarely behave with correct semantics.
	 */
	if ((urb->transfer_flags & URB_SHORT_NOT_OK)
	    && (urb->transfer_buffer_length - urb->actual_length)
	    > qh->maxpacket)
		channel->desired_mode = 1;
	if (rx_count < hw_ep->max_packet_sz_rx) {
		length = rx_count;
		channel->desired_mode = 0;
	} else {
		length = urb->transfer_buffer_length;
	}
#endif

	/* See comments above on disadvantages of using mode 1 */
	val = musb_readw(epio, MUSB_RXCSR);
	val &= ~MUSB_RXCSR_H_REQPKT;

	if (channel->desired_mode == 0)
		val &= ~MUSB_RXCSR_H_AUTOREQ;
	else
		val |= MUSB_RXCSR_H_AUTOREQ;
	val |= MUSB_RXCSR_DMAENAB;

	/* autoclear shouldn't be set in high bandwidth */
	if (qh->hb_mult == 1)
		val |= MUSB_RXCSR_AUTOCLEAR;

	musb_writew(epio, MUSB_RXCSR, MUSB_RXCSR_H_WZC_BITS | val);

	/* REVISIT if when actual_length != 0,
	 * transfer_buffer_length needs to be
	 * adjusted first...
	 */
	done = dma->channel_program(channel, qh->maxpacket,
				   channel->desired_mode,
				   buf, length);

	if (!done) {
		dma->channel_release(channel);
		hw_ep->rx_channel = NULL;
		channel = NULL;
		val = musb_readw(epio, MUSB_RXCSR);
		val &= ~(MUSB_RXCSR_DMAENAB
			 | MUSB_RXCSR_H_AUTOREQ
			 | MUSB_RXCSR_AUTOCLEAR);
		musb_writew(epio, MUSB_RXCSR, val);
	}

	return done;
}
#else
static inline int musb_rx_dma_inventra_cppi41(struct dma_controller *dma,
					      struct musb_hw_ep *hw_ep,
					      struct musb_qh *qh,
					      struct urb *urb,
					      size_t len)
{}

static inline int musb_rx_dma_in_inventra_cppi41(struct dma_controller *dma,
						 struct musb_hw_ep *hw_ep,
						 struct musb_qh *qh,
						 struct urb *urb,
						 size_t len,
						 u8 iso_err)
{}
#endif

/*
 * Service an RX interrupt for the given IN endpoint; docs cover bulk, iso,
 * and high-bandwidth IN transfer cases.
 */
void musb_host_rx(struct musb *musb, u8 epnum)
{}

/* schedule nodes correspond to peripheral endpoints, like an OHCI QH.
 * the software schedule associates multiple such nodes with a given
 * host side hardware endpoint + direction; scheduling may activate
 * that hardware endpoint.
 */
static int musb_schedule(
	struct musb		*musb,
	struct musb_qh		*qh,
	int			is_in)
{}

static int musb_urb_enqueue(
	struct usb_hcd			*hcd,
	struct urb			*urb,
	gfp_t				mem_flags)
{}


/*
 * abort a transfer that's at the head of a hardware queue.
 * called with controller locked, irqs blocked
 * that hardware queue advances to the next transfer, unless prevented
 */
static int musb_cleanup_urb(struct urb *urb, struct musb_qh *qh)
{}

static int musb_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
{}

/* disable an endpoint */
static void
musb_h_disable(struct usb_hcd *hcd, struct usb_host_endpoint *hep)
{}

static int musb_h_get_frame_number(struct usb_hcd *hcd)
{}

static int musb_h_start(struct usb_hcd *hcd)
{}

static void musb_h_stop(struct usb_hcd *hcd)
{}

static int musb_bus_suspend(struct usb_hcd *hcd)
{}

static int musb_bus_resume(struct usb_hcd *hcd)
{}

#ifndef CONFIG_MUSB_PIO_ONLY

#define MUSB_USB_DMA_ALIGN

struct musb_temp_buffer {
	void *kmalloc_ptr;
	void *old_xfer_buffer;
	u8 data[];
};

static void musb_free_temp_buffer(struct urb *urb)
{
	enum dma_data_direction dir;
	struct musb_temp_buffer *temp;
	size_t length;

	if (!(urb->transfer_flags & URB_ALIGNED_TEMP_BUFFER))
		return;

	dir = usb_urb_dir_in(urb) ? DMA_FROM_DEVICE : DMA_TO_DEVICE;

	temp = container_of(urb->transfer_buffer, struct musb_temp_buffer,
			    data);

	if (dir == DMA_FROM_DEVICE) {
		if (usb_pipeisoc(urb->pipe))
			length = urb->transfer_buffer_length;
		else
			length = urb->actual_length;

		memcpy(temp->old_xfer_buffer, temp->data, length);
	}
	urb->transfer_buffer = temp->old_xfer_buffer;
	kfree(temp->kmalloc_ptr);

	urb->transfer_flags &= ~URB_ALIGNED_TEMP_BUFFER;
}

static int musb_alloc_temp_buffer(struct urb *urb, gfp_t mem_flags)
{
	enum dma_data_direction dir;
	struct musb_temp_buffer *temp;
	void *kmalloc_ptr;
	size_t kmalloc_size;

	if (urb->num_sgs || urb->sg ||
	    urb->transfer_buffer_length == 0 ||
	    !((uintptr_t)urb->transfer_buffer & (MUSB_USB_DMA_ALIGN - 1)))
		return 0;

	dir = usb_urb_dir_in(urb) ? DMA_FROM_DEVICE : DMA_TO_DEVICE;

	/* Allocate a buffer with enough padding for alignment */
	kmalloc_size = urb->transfer_buffer_length +
		sizeof(struct musb_temp_buffer) + MUSB_USB_DMA_ALIGN - 1;

	kmalloc_ptr = kmalloc(kmalloc_size, mem_flags);
	if (!kmalloc_ptr)
		return -ENOMEM;

	/* Position our struct temp_buffer such that data is aligned */
	temp = PTR_ALIGN(kmalloc_ptr, MUSB_USB_DMA_ALIGN);


	temp->kmalloc_ptr = kmalloc_ptr;
	temp->old_xfer_buffer = urb->transfer_buffer;
	if (dir == DMA_TO_DEVICE)
		memcpy(temp->data, urb->transfer_buffer,
		       urb->transfer_buffer_length);
	urb->transfer_buffer = temp->data;

	urb->transfer_flags |= URB_ALIGNED_TEMP_BUFFER;

	return 0;
}

static int musb_map_urb_for_dma(struct usb_hcd *hcd, struct urb *urb,
				      gfp_t mem_flags)
{
	struct musb	*musb = hcd_to_musb(hcd);
	int ret;

	/*
	 * The DMA engine in RTL1.8 and above cannot handle
	 * DMA addresses that are not aligned to a 4 byte boundary.
	 * For such engine implemented (un)map_urb_for_dma hooks.
	 * Do not use these hooks for RTL<1.8
	 */
	if (musb->hwvers < MUSB_HWVERS_1800)
		return usb_hcd_map_urb_for_dma(hcd, urb, mem_flags);

	ret = musb_alloc_temp_buffer(urb, mem_flags);
	if (ret)
		return ret;

	ret = usb_hcd_map_urb_for_dma(hcd, urb, mem_flags);
	if (ret)
		musb_free_temp_buffer(urb);

	return ret;
}

static void musb_unmap_urb_for_dma(struct usb_hcd *hcd, struct urb *urb)
{
	struct musb	*musb = hcd_to_musb(hcd);

	usb_hcd_unmap_urb_for_dma(hcd, urb);

	/* Do not use this hook for RTL<1.8 (see description above) */
	if (musb->hwvers < MUSB_HWVERS_1800)
		return;

	musb_free_temp_buffer(urb);
}
#endif /* !CONFIG_MUSB_PIO_ONLY */

static const struct hc_driver musb_hc_driver =;

int musb_host_alloc(struct musb *musb)
{}

void musb_host_cleanup(struct musb *musb)
{}

void musb_host_free(struct musb *musb)
{}

int musb_host_setup(struct musb *musb, int power_budget)
{}

void musb_host_resume_root_hub(struct musb *musb)
{}

void musb_host_poke_root_hub(struct musb *musb)
{}