linux/drivers/usb/host/ehci-sched.c

// SPDX-License-Identifier: GPL-2.0+
/*
 * Copyright (c) 2001-2004 by David Brownell
 * Copyright (c) 2003 Michal Sojka, for high-speed iso transfers
 */

/* this file is part of ehci-hcd.c */

/*-------------------------------------------------------------------------*/

/*
 * EHCI scheduled transaction support:  interrupt, iso, split iso
 * These are called "periodic" transactions in the EHCI spec.
 *
 * Note that for interrupt transfers, the QH/QTD manipulation is shared
 * with the "asynchronous" transaction support (control/bulk transfers).
 * The only real difference is in how interrupt transfers are scheduled.
 *
 * For ISO, we make an "iso_stream" head to serve the same role as a QH.
 * It keeps track of every ITD (or SITD) that's linked, and holds enough
 * pre-calculated schedule data to make appending to the queue be quick.
 */

static int ehci_get_frame(struct usb_hcd *hcd);

/*
 * periodic_next_shadow - return "next" pointer on shadow list
 * @periodic: host pointer to qh/itd/sitd
 * @tag: hardware tag for type of this record
 */
static union ehci_shadow *
periodic_next_shadow(struct ehci_hcd *ehci, union ehci_shadow *periodic,
		__hc32 tag)
{}

static __hc32 *
shadow_next_periodic(struct ehci_hcd *ehci, union ehci_shadow *periodic,
		__hc32 tag)
{}

/* caller must hold ehci->lock */
static void periodic_unlink(struct ehci_hcd *ehci, unsigned frame, void *ptr)
{}

/*-------------------------------------------------------------------------*/

/* Bandwidth and TT management */

/* Find the TT data structure for this device; create it if necessary */
static struct ehci_tt *find_tt(struct usb_device *udev)
{}

/* Release the TT above udev, if it's not in use */
static void drop_tt(struct usb_device *udev)
{}

static void bandwidth_dbg(struct ehci_hcd *ehci, int sign, char *type,
		struct ehci_per_sched *ps)
{}

static void reserve_release_intr_bandwidth(struct ehci_hcd *ehci,
		struct ehci_qh *qh, int sign)
{}

/*-------------------------------------------------------------------------*/

static void compute_tt_budget(u8 budget_table[EHCI_BANDWIDTH_SIZE],
		struct ehci_tt *tt)
{}

static int __maybe_unused same_tt(struct usb_device *dev1,
		struct usb_device *dev2)
{}

#ifdef CONFIG_USB_EHCI_TT_NEWSCHED

static const unsigned char
max_tt_usecs[] =;

/* carryover low/fullspeed bandwidth that crosses uframe boundries */
static inline void carryover_tt_bandwidth(unsigned short tt_usecs[8])
{}

/*
 * Return true if the device's tt's downstream bus is available for a
 * periodic transfer of the specified length (usecs), starting at the
 * specified frame/uframe.  Note that (as summarized in section 11.19
 * of the usb 2.0 spec) TTs can buffer multiple transactions for each
 * uframe.
 *
 * The uframe parameter is when the fullspeed/lowspeed transfer
 * should be executed in "B-frame" terms, which is the same as the
 * highspeed ssplit's uframe (which is in "H-frame" terms).  For example
 * a ssplit in "H-frame" 0 causes a transfer in "B-frame" 0.
 * See the EHCI spec sec 4.5 and fig 4.7.
 *
 * This checks if the full/lowspeed bus, at the specified starting uframe,
 * has the specified bandwidth available, according to rules listed
 * in USB 2.0 spec section 11.18.1 fig 11-60.
 *
 * This does not check if the transfer would exceed the max ssplit
 * limit of 16, specified in USB 2.0 spec section 11.18.4 requirement #4,
 * since proper scheduling limits ssplits to less than 16 per uframe.
 */
static int tt_available(
	struct ehci_hcd		*ehci,
	struct ehci_per_sched	*ps,
	struct ehci_tt		*tt,
	unsigned		frame,
	unsigned		uframe
)
{}

#else

/* return true iff the device's transaction translator is available
 * for a periodic transfer starting at the specified frame, using
 * all the uframes in the mask.
 */
static int tt_no_collision(
	struct ehci_hcd		*ehci,
	unsigned		period,
	struct usb_device	*dev,
	unsigned		frame,
	u32			uf_mask
)
{
	if (period == 0)	/* error */
		return 0;

	/* note bandwidth wastage:  split never follows csplit
	 * (different dev or endpoint) until the next uframe.
	 * calling convention doesn't make that distinction.
	 */
	for (; frame < ehci->periodic_size; frame += period) {
		union ehci_shadow	here;
		__hc32			type;
		struct ehci_qh_hw	*hw;

		here = ehci->pshadow[frame];
		type = Q_NEXT_TYPE(ehci, ehci->periodic[frame]);
		while (here.ptr) {
			switch (hc32_to_cpu(ehci, type)) {
			case Q_TYPE_ITD:
				type = Q_NEXT_TYPE(ehci, here.itd->hw_next);
				here = here.itd->itd_next;
				continue;
			case Q_TYPE_QH:
				hw = here.qh->hw;
				if (same_tt(dev, here.qh->ps.udev)) {
					u32		mask;

					mask = hc32_to_cpu(ehci,
							hw->hw_info2);
					/* "knows" no gap is needed */
					mask |= mask >> 8;
					if (mask & uf_mask)
						break;
				}
				type = Q_NEXT_TYPE(ehci, hw->hw_next);
				here = here.qh->qh_next;
				continue;
			case Q_TYPE_SITD:
				if (same_tt(dev, here.sitd->urb->dev)) {
					u16		mask;

					mask = hc32_to_cpu(ehci, here.sitd
								->hw_uframe);
					/* FIXME assumes no gap for IN! */
					mask |= mask >> 8;
					if (mask & uf_mask)
						break;
				}
				type = Q_NEXT_TYPE(ehci, here.sitd->hw_next);
				here = here.sitd->sitd_next;
				continue;
			/* case Q_TYPE_FSTN: */
			default:
				ehci_dbg(ehci,
					"periodic frame %d bogus type %d\n",
					frame, type);
			}

			/* collision or error */
			return 0;
		}
	}

	/* no collision */
	return 1;
}

#endif /* CONFIG_USB_EHCI_TT_NEWSCHED */

/*-------------------------------------------------------------------------*/

static void enable_periodic(struct ehci_hcd *ehci)
{}

static void disable_periodic(struct ehci_hcd *ehci)
{}

/*-------------------------------------------------------------------------*/

/* periodic schedule slots have iso tds (normal or split) first, then a
 * sparse tree for active interrupt transfers.
 *
 * this just links in a qh; caller guarantees uframe masks are set right.
 * no FSTN support (yet; ehci 0.96+)
 */
static void qh_link_periodic(struct ehci_hcd *ehci, struct ehci_qh *qh)
{}

static void qh_unlink_periodic(struct ehci_hcd *ehci, struct ehci_qh *qh)
{}

static void cancel_unlink_wait_intr(struct ehci_hcd *ehci, struct ehci_qh *qh)
{}

static void start_unlink_intr(struct ehci_hcd *ehci, struct ehci_qh *qh)
{}

/*
 * It is common only one intr URB is scheduled on one qh, and
 * given complete() is run in tasklet context, introduce a bit
 * delay to avoid unlink qh too early.
 */
static void start_unlink_intr_wait(struct ehci_hcd *ehci,
				   struct ehci_qh *qh)
{}

static void end_unlink_intr(struct ehci_hcd *ehci, struct ehci_qh *qh)
{}

/*-------------------------------------------------------------------------*/

static int check_period(
	struct ehci_hcd *ehci,
	unsigned	frame,
	unsigned	uframe,
	unsigned	uperiod,
	unsigned	usecs
) {}

static int check_intr_schedule(
	struct ehci_hcd		*ehci,
	unsigned		frame,
	unsigned		uframe,
	struct ehci_qh		*qh,
	unsigned		*c_maskp,
	struct ehci_tt		*tt
)
{}

/* "first fit" scheduling policy used the first time through,
 * or when the previous schedule slot can't be re-used.
 */
static int qh_schedule(struct ehci_hcd *ehci, struct ehci_qh *qh)
{}

static int intr_submit(
	struct ehci_hcd		*ehci,
	struct urb		*urb,
	struct list_head	*qtd_list,
	gfp_t			mem_flags
) {}

static void scan_intr(struct ehci_hcd *ehci)
{}

/*-------------------------------------------------------------------------*/

/* ehci_iso_stream ops work with both ITD and SITD */

static struct ehci_iso_stream *
iso_stream_alloc(gfp_t mem_flags)
{}

static void
iso_stream_init(
	struct ehci_hcd		*ehci,
	struct ehci_iso_stream	*stream,
	struct urb		*urb
)
{}

static struct ehci_iso_stream *
iso_stream_find(struct ehci_hcd *ehci, struct urb *urb)
{}

/*-------------------------------------------------------------------------*/

/* ehci_iso_sched ops can be ITD-only or SITD-only */

static struct ehci_iso_sched *
iso_sched_alloc(unsigned packets, gfp_t mem_flags)
{}

static inline void
itd_sched_init(
	struct ehci_hcd		*ehci,
	struct ehci_iso_sched	*iso_sched,
	struct ehci_iso_stream	*stream,
	struct urb		*urb
)
{}

static void
iso_sched_free(
	struct ehci_iso_stream	*stream,
	struct ehci_iso_sched	*iso_sched
)
{}

static int
itd_urb_transaction(
	struct ehci_iso_stream	*stream,
	struct ehci_hcd		*ehci,
	struct urb		*urb,
	gfp_t			mem_flags
)
{}

/*-------------------------------------------------------------------------*/

static void reserve_release_iso_bandwidth(struct ehci_hcd *ehci,
		struct ehci_iso_stream *stream, int sign)
{}

static inline int
itd_slot_ok(
	struct ehci_hcd		*ehci,
	struct ehci_iso_stream	*stream,
	unsigned		uframe
)
{}

static inline int
sitd_slot_ok(
	struct ehci_hcd		*ehci,
	struct ehci_iso_stream	*stream,
	unsigned		uframe,
	struct ehci_iso_sched	*sched,
	struct ehci_tt		*tt
)
{}

/*
 * This scheduler plans almost as far into the future as it has actual
 * periodic schedule slots.  (Affected by TUNE_FLS, which defaults to
 * "as small as possible" to be cache-friendlier.)  That limits the size
 * transfers you can stream reliably; avoid more than 64 msec per urb.
 * Also avoid queue depths of less than ehci's worst irq latency (affected
 * by the per-urb URB_NO_INTERRUPT hint, the log2_irq_thresh module parameter,
 * and other factors); or more than about 230 msec total (for portability,
 * given EHCI_TUNE_FLS and the slop).  Or, write a smarter scheduler!
 */

static int
iso_stream_schedule(
	struct ehci_hcd		*ehci,
	struct urb		*urb,
	struct ehci_iso_stream	*stream
)
{}

/*-------------------------------------------------------------------------*/

static inline void
itd_init(struct ehci_hcd *ehci, struct ehci_iso_stream *stream,
		struct ehci_itd *itd)
{}

static inline void
itd_patch(
	struct ehci_hcd		*ehci,
	struct ehci_itd		*itd,
	struct ehci_iso_sched	*iso_sched,
	unsigned		index,
	u16			uframe
)
{}

static inline void
itd_link(struct ehci_hcd *ehci, unsigned frame, struct ehci_itd *itd)
{}

/* fit urb's itds into the selected schedule slot; activate as needed */
static void itd_link_urb(
	struct ehci_hcd		*ehci,
	struct urb		*urb,
	unsigned		mod,
	struct ehci_iso_stream	*stream
)
{}

#define ISO_ERRS

/* Process and recycle a completed ITD.  Return true iff its urb completed,
 * and hence its completion callback probably added things to the hardware
 * schedule.
 *
 * Note that we carefully avoid recycling this descriptor until after any
 * completion callback runs, so that it won't be reused quickly.  That is,
 * assuming (a) no more than two urbs per frame on this endpoint, and also
 * (b) only this endpoint's completions submit URBs.  It seems some silicon
 * corrupts things if you reuse completed descriptors very quickly...
 */
static bool itd_complete(struct ehci_hcd *ehci, struct ehci_itd *itd)
{}

/*-------------------------------------------------------------------------*/

static int itd_submit(struct ehci_hcd *ehci, struct urb *urb,
	gfp_t mem_flags)
{}

/*-------------------------------------------------------------------------*/

/*
 * "Split ISO TDs" ... used for USB 1.1 devices going through the
 * TTs in USB 2.0 hubs.  These need microframe scheduling.
 */

static inline void
sitd_sched_init(
	struct ehci_hcd		*ehci,
	struct ehci_iso_sched	*iso_sched,
	struct ehci_iso_stream	*stream,
	struct urb		*urb
)
{}

static int
sitd_urb_transaction(
	struct ehci_iso_stream	*stream,
	struct ehci_hcd		*ehci,
	struct urb		*urb,
	gfp_t			mem_flags
)
{}

/*-------------------------------------------------------------------------*/

static inline void
sitd_patch(
	struct ehci_hcd		*ehci,
	struct ehci_iso_stream	*stream,
	struct ehci_sitd	*sitd,
	struct ehci_iso_sched	*iso_sched,
	unsigned		index
)
{}

static inline void
sitd_link(struct ehci_hcd *ehci, unsigned frame, struct ehci_sitd *sitd)
{}

/* fit urb's sitds into the selected schedule slot; activate as needed */
static void sitd_link_urb(
	struct ehci_hcd		*ehci,
	struct urb		*urb,
	unsigned		mod,
	struct ehci_iso_stream	*stream
)
{}

/*-------------------------------------------------------------------------*/

#define SITD_ERRS

/* Process and recycle a completed SITD.  Return true iff its urb completed,
 * and hence its completion callback probably added things to the hardware
 * schedule.
 *
 * Note that we carefully avoid recycling this descriptor until after any
 * completion callback runs, so that it won't be reused quickly.  That is,
 * assuming (a) no more than two urbs per frame on this endpoint, and also
 * (b) only this endpoint's completions submit URBs.  It seems some silicon
 * corrupts things if you reuse completed descriptors very quickly...
 */
static bool sitd_complete(struct ehci_hcd *ehci, struct ehci_sitd *sitd)
{}


static int sitd_submit(struct ehci_hcd *ehci, struct urb *urb,
	gfp_t mem_flags)
{}

/*-------------------------------------------------------------------------*/

static void scan_isoc(struct ehci_hcd *ehci)
{}