linux/drivers/usb/host/xhci.h

/* SPDX-License-Identifier: GPL-2.0 */

/*
 * xHCI host controller driver
 *
 * Copyright (C) 2008 Intel Corp.
 *
 * Author: Sarah Sharp
 * Some code borrowed from the Linux EHCI driver.
 */

#ifndef __LINUX_XHCI_HCD_H
#define __LINUX_XHCI_HCD_H

#include <linux/usb.h>
#include <linux/timer.h>
#include <linux/kernel.h>
#include <linux/usb/hcd.h>
#include <linux/io-64-nonatomic-lo-hi.h>
#include <linux/io-64-nonatomic-hi-lo.h>

/* Code sharing between pci-quirks and xhci hcd */
#include	"xhci-ext-caps.h"
#include "pci-quirks.h"

#include "xhci-port.h"
#include "xhci-caps.h"

/* max buffer size for trace and debug messages */
#define XHCI_MSG_MAX

/* xHCI PCI Configuration Registers */
#define XHCI_SBRN_OFFSET

/* Max number of USB devices for any host controller - limit in section 6.1 */
#define MAX_HC_SLOTS
/* Section 5.3.3 - MaxPorts */
#define MAX_HC_PORTS

/*
 * xHCI register interface.
 * This corresponds to the eXtensible Host Controller Interface (xHCI)
 * Revision 0.95 specification
 */

/**
 * struct xhci_cap_regs - xHCI Host Controller Capability Registers.
 * @hc_capbase:		length of the capabilities register and HC version number
 * @hcs_params1:	HCSPARAMS1 - Structural Parameters 1
 * @hcs_params2:	HCSPARAMS2 - Structural Parameters 2
 * @hcs_params3:	HCSPARAMS3 - Structural Parameters 3
 * @hcc_params:		HCCPARAMS - Capability Parameters
 * @db_off:		DBOFF - Doorbell array offset
 * @run_regs_off:	RTSOFF - Runtime register space offset
 * @hcc_params2:	HCCPARAMS2 Capability Parameters 2, xhci 1.1 only
 */
struct xhci_cap_regs {};

/* Number of registers per port */
#define NUM_PORT_REGS

#define PORTSC
#define PORTPMSC
#define PORTLI
#define PORTHLPMC

/**
 * struct xhci_op_regs - xHCI Host Controller Operational Registers.
 * @command:		USBCMD - xHC command register
 * @status:		USBSTS - xHC status register
 * @page_size:		This indicates the page size that the host controller
 * 			supports.  If bit n is set, the HC supports a page size
 * 			of 2^(n+12), up to a 128MB page size.
 * 			4K is the minimum page size.
 * @cmd_ring:		CRP - 64-bit Command Ring Pointer
 * @dcbaa_ptr:		DCBAAP - 64-bit Device Context Base Address Array Pointer
 * @config_reg:		CONFIG - Configure Register
 * @port_status_base:	PORTSCn - base address for Port Status and Control
 * 			Each port has a Port Status and Control register,
 * 			followed by a Port Power Management Status and Control
 * 			register, a Port Link Info register, and a reserved
 * 			register.
 * @port_power_base:	PORTPMSCn - base address for
 * 			Port Power Management Status and Control
 * @port_link_base:	PORTLIn - base address for Port Link Info (current
 * 			Link PM state and control) for USB 2.1 and USB 3.0
 * 			devices.
 */
struct xhci_op_regs {};

/* USBCMD - USB command - command bitmasks */
/* start/stop HC execution - do not write unless HC is halted*/
#define CMD_RUN
/* Reset HC - resets internal HC state machine and all registers (except
 * PCI config regs).  HC does NOT drive a USB reset on the downstream ports.
 * The xHCI driver must reinitialize the xHC after setting this bit.
 */
#define CMD_RESET
/* Event Interrupt Enable - a '1' allows interrupts from the host controller */
#define CMD_EIE
/* Host System Error Interrupt Enable - get out-of-band signal for HC errors */
#define CMD_HSEIE
/* bits 4:6 are reserved (and should be preserved on writes). */
/* light reset (port status stays unchanged) - reset completed when this is 0 */
#define CMD_LRESET
/* host controller save/restore state. */
#define CMD_CSS
#define CMD_CRS
/* Enable Wrap Event - '1' means xHC generates an event when MFINDEX wraps. */
#define CMD_EWE
/* MFINDEX power management - '1' means xHC can stop MFINDEX counter if all root
 * hubs are in U3 (selective suspend), disconnect, disabled, or powered-off.
 * '0' means the xHC can power it off if all ports are in the disconnect,
 * disabled, or powered-off state.
 */
#define CMD_PM_INDEX
/* bit 14 Extended TBC Enable, changes Isoc TRB fields to support larger TBC */
#define CMD_ETE
/* bits 15:31 are reserved (and should be preserved on writes). */

#define XHCI_RESET_LONG_USEC
#define XHCI_RESET_SHORT_USEC

/* IMAN - Interrupt Management Register */
#define IMAN_IE
#define IMAN_IP

/* USBSTS - USB status - status bitmasks */
/* HC not running - set to 1 when run/stop bit is cleared. */
#define STS_HALT
/* serious error, e.g. PCI parity error.  The HC will clear the run/stop bit. */
#define STS_FATAL
/* event interrupt - clear this prior to clearing any IP flags in IR set*/
#define STS_EINT
/* port change detect */
#define STS_PORT
/* bits 5:7 reserved and zeroed */
/* save state status - '1' means xHC is saving state */
#define STS_SAVE
/* restore state status - '1' means xHC is restoring state */
#define STS_RESTORE
/* true: save or restore error */
#define STS_SRE
/* true: Controller Not Ready to accept doorbell or op reg writes after reset */
#define STS_CNR
/* true: internal Host Controller Error - SW needs to reset and reinitialize */
#define STS_HCE
/* bits 13:31 reserved and should be preserved */

/*
 * DNCTRL - Device Notification Control Register - dev_notification bitmasks
 * Generate a device notification event when the HC sees a transaction with a
 * notification type that matches a bit set in this bit field.
 */
#define DEV_NOTE_MASK
#define ENABLE_DEV_NOTE(x)
/* Most of the device notification types should only be used for debug.
 * SW does need to pay attention to function wake notifications.
 */
#define DEV_NOTE_FWAKE

/* CRCR - Command Ring Control Register - cmd_ring bitmasks */
/* bit 0 is the command ring cycle state */
/* stop ring operation after completion of the currently executing command */
#define CMD_RING_PAUSE
/* stop ring immediately - abort the currently executing command */
#define CMD_RING_ABORT
/* true: command ring is running */
#define CMD_RING_RUNNING
/* bits 4:5 reserved and should be preserved */
/* Command Ring pointer - bit mask for the lower 32 bits. */
#define CMD_RING_RSVD_BITS

/* CONFIG - Configure Register - config_reg bitmasks */
/* bits 0:7 - maximum number of device slots enabled (NumSlotsEn) */
#define MAX_DEVS(p)
/* bit 8: U3 Entry Enabled, assert PLC when root port enters U3, xhci 1.1 */
#define CONFIG_U3E
/* bit 9: Configuration Information Enable, xhci 1.1 */
#define CONFIG_CIE
/* bits 10:31 - reserved and should be preserved */

/**
 * struct xhci_intr_reg - Interrupt Register Set
 * @irq_pending:	IMAN - Interrupt Management Register.  Used to enable
 *			interrupts and check for pending interrupts.
 * @irq_control:	IMOD - Interrupt Moderation Register.
 * 			Used to throttle interrupts.
 * @erst_size:		Number of segments in the Event Ring Segment Table (ERST).
 * @erst_base:		ERST base address.
 * @erst_dequeue:	Event ring dequeue pointer.
 *
 * Each interrupter (defined by a MSI-X vector) has an event ring and an Event
 * Ring Segment Table (ERST) associated with it.  The event ring is comprised of
 * multiple segments of the same size.  The HC places events on the ring and
 * "updates the Cycle bit in the TRBs to indicate to software the current
 * position of the Enqueue Pointer." The HCD (Linux) processes those events and
 * updates the dequeue pointer.
 */
struct xhci_intr_reg {};

/* irq_pending bitmasks */
#define ER_IRQ_PENDING(p)
/* bits 2:31 need to be preserved */
/* THIS IS BUGGY - FIXME - IP IS WRITE 1 TO CLEAR */
#define ER_IRQ_CLEAR(p)
#define ER_IRQ_ENABLE(p)
#define ER_IRQ_DISABLE(p)

/* irq_control bitmasks */
/* Minimum interval between interrupts (in 250ns intervals).  The interval
 * between interrupts will be longer if there are no events on the event ring.
 * Default is 4000 (1 ms).
 */
#define ER_IRQ_INTERVAL_MASK
/* Counter used to count down the time to the next interrupt - HW use only */
#define ER_IRQ_COUNTER_MASK

/* erst_size bitmasks */
/* Preserve bits 16:31 of erst_size */
#define ERST_SIZE_MASK

/* erst_base bitmasks */
#define ERST_BASE_RSVDP

/* erst_dequeue bitmasks */
/* Dequeue ERST Segment Index (DESI) - Segment number (or alias)
 * where the current dequeue pointer lies.  This is an optional HW hint.
 */
#define ERST_DESI_MASK
/* Event Handler Busy (EHB) - is the event ring scheduled to be serviced by
 * a work queue (or delayed service routine)?
 */
#define ERST_EHB
#define ERST_PTR_MASK

/**
 * struct xhci_run_regs
 * @microframe_index:
 * 		MFINDEX - current microframe number
 *
 * Section 5.5 Host Controller Runtime Registers:
 * "Software should read and write these registers using only Dword (32 bit)
 * or larger accesses"
 */
struct xhci_run_regs {};

/**
 * struct doorbell_array
 *
 * Bits  0 -  7: Endpoint target
 * Bits  8 - 15: RsvdZ
 * Bits 16 - 31: Stream ID
 *
 * Section 5.6
 */
struct xhci_doorbell_array {};

#define DB_VALUE(ep, stream)
#define DB_VALUE_HOST

#define PLT_MASK
#define PLT_SYM
#define PLT_ASYM_RX
#define PLT_ASYM_TX

/**
 * struct xhci_container_ctx
 * @type: Type of context.  Used to calculated offsets to contained contexts.
 * @size: Size of the context data
 * @bytes: The raw context data given to HW
 * @dma: dma address of the bytes
 *
 * Represents either a Device or Input context.  Holds a pointer to the raw
 * memory used for the context (bytes) and dma address of it (dma).
 */
struct xhci_container_ctx {};

/**
 * struct xhci_slot_ctx
 * @dev_info:	Route string, device speed, hub info, and last valid endpoint
 * @dev_info2:	Max exit latency for device number, root hub port number
 * @tt_info:	tt_info is used to construct split transaction tokens
 * @dev_state:	slot state and device address
 *
 * Slot Context - section 6.2.1.1.  This assumes the HC uses 32-byte context
 * structures.  If the HC uses 64-byte contexts, there is an additional 32 bytes
 * reserved at the end of the slot context for HC internal use.
 */
struct xhci_slot_ctx {};

/* dev_info bitmasks */
/* Route String - 0:19 */
#define ROUTE_STRING_MASK
/* Device speed - values defined by PORTSC Device Speed field - 20:23 */
#define DEV_SPEED
#define GET_DEV_SPEED(n)
/* bit 24 reserved */
/* Is this LS/FS device connected through a HS hub? - bit 25 */
#define DEV_MTT
/* Set if the device is a hub - bit 26 */
#define DEV_HUB
/* Index of the last valid endpoint context in this device context - 27:31 */
#define LAST_CTX_MASK
#define LAST_CTX(p)
#define LAST_CTX_TO_EP_NUM(p)
#define SLOT_FLAG
#define EP0_FLAG

/* dev_info2 bitmasks */
/* Max Exit Latency (ms) - worst case time to wake up all links in dev path */
#define MAX_EXIT
/* Root hub port number that is needed to access the USB device */
#define ROOT_HUB_PORT(p)
#define DEVINFO_TO_ROOT_HUB_PORT(p)
/* Maximum number of ports under a hub device */
#define XHCI_MAX_PORTS(p)
#define DEVINFO_TO_MAX_PORTS(p)

/* tt_info bitmasks */
/*
 * TT Hub Slot ID - for low or full speed devices attached to a high-speed hub
 * The Slot ID of the hub that isolates the high speed signaling from
 * this low or full-speed device.  '0' if attached to root hub port.
 */
#define TT_SLOT
/*
 * The number of the downstream facing port of the high-speed hub
 * '0' if the device is not low or full speed.
 */
#define TT_PORT
#define TT_THINK_TIME(p)
#define GET_TT_THINK_TIME(p)

/* dev_state bitmasks */
/* USB device address - assigned by the HC */
#define DEV_ADDR_MASK
/* bits 8:26 reserved */
/* Slot state */
#define SLOT_STATE
#define GET_SLOT_STATE(p)

#define SLOT_STATE_DISABLED
#define SLOT_STATE_ENABLED
#define SLOT_STATE_DEFAULT
#define SLOT_STATE_ADDRESSED
#define SLOT_STATE_CONFIGURED

/**
 * struct xhci_ep_ctx
 * @ep_info:	endpoint state, streams, mult, and interval information.
 * @ep_info2:	information on endpoint type, max packet size, max burst size,
 * 		error count, and whether the HC will force an event for all
 * 		transactions.
 * @deq:	64-bit ring dequeue pointer address.  If the endpoint only
 * 		defines one stream, this points to the endpoint transfer ring.
 * 		Otherwise, it points to a stream context array, which has a
 * 		ring pointer for each flow.
 * @tx_info:
 * 		Average TRB lengths for the endpoint ring and
 * 		max payload within an Endpoint Service Interval Time (ESIT).
 *
 * Endpoint Context - section 6.2.1.2.  This assumes the HC uses 32-byte context
 * structures.  If the HC uses 64-byte contexts, there is an additional 32 bytes
 * reserved at the end of the endpoint context for HC internal use.
 */
struct xhci_ep_ctx {};

/* ep_info bitmasks */
/*
 * Endpoint State - bits 0:2
 * 0 - disabled
 * 1 - running
 * 2 - halted due to halt condition - ok to manipulate endpoint ring
 * 3 - stopped
 * 4 - TRB error
 * 5-7 - reserved
 */
#define EP_STATE_MASK
#define EP_STATE_DISABLED
#define EP_STATE_RUNNING
#define EP_STATE_HALTED
#define EP_STATE_STOPPED
#define EP_STATE_ERROR
#define GET_EP_CTX_STATE(ctx)

/* Mult - Max number of burtst within an interval, in EP companion desc. */
#define EP_MULT(p)
#define CTX_TO_EP_MULT(p)
/* bits 10:14 are Max Primary Streams */
/* bit 15 is Linear Stream Array */
/* Interval - period between requests to an endpoint - 125u increments. */
#define EP_INTERVAL(p)
#define EP_INTERVAL_TO_UFRAMES(p)
#define CTX_TO_EP_INTERVAL(p)
#define EP_MAXPSTREAMS_MASK
#define EP_MAXPSTREAMS(p)
#define CTX_TO_EP_MAXPSTREAMS(p)
/* Endpoint is set up with a Linear Stream Array (vs. Secondary Stream Array) */
#define EP_HAS_LSA
/* hosts with LEC=1 use bits 31:24 as ESIT high bits. */
#define CTX_TO_MAX_ESIT_PAYLOAD_HI(p)

/* ep_info2 bitmasks */
/*
 * Force Event - generate transfer events for all TRBs for this endpoint
 * This will tell the HC to ignore the IOC and ISP flags (for debugging only).
 */
#define FORCE_EVENT
#define ERROR_COUNT(p)
#define CTX_TO_EP_TYPE(p)
#define EP_TYPE(p)
#define ISOC_OUT_EP
#define BULK_OUT_EP
#define INT_OUT_EP
#define CTRL_EP
#define ISOC_IN_EP
#define BULK_IN_EP
#define INT_IN_EP
/* bit 6 reserved */
/* bit 7 is Host Initiate Disable - for disabling stream selection */
#define MAX_BURST(p)
#define CTX_TO_MAX_BURST(p)
#define MAX_PACKET(p)
#define MAX_PACKET_MASK
#define MAX_PACKET_DECODED(p)

/* tx_info bitmasks */
#define EP_AVG_TRB_LENGTH(p)
#define EP_MAX_ESIT_PAYLOAD_LO(p)
#define EP_MAX_ESIT_PAYLOAD_HI(p)
#define CTX_TO_MAX_ESIT_PAYLOAD(p)

/* deq bitmasks */
#define EP_CTX_CYCLE_MASK
#define SCTX_DEQ_MASK


/**
 * struct xhci_input_control_context
 * Input control context; see section 6.2.5.
 *
 * @drop_context:	set the bit of the endpoint context you want to disable
 * @add_context:	set the bit of the endpoint context you want to enable
 */
struct xhci_input_control_ctx {};

#define EP_IS_ADDED(ctrl_ctx, i)
#define EP_IS_DROPPED(ctrl_ctx, i)

/* Represents everything that is needed to issue a command on the command ring.
 * It's useful to pre-allocate these for commands that cannot fail due to
 * out-of-memory errors, like freeing streams.
 */
struct xhci_command {};

/* drop context bitmasks */
#define DROP_EP(x)
/* add context bitmasks */
#define ADD_EP(x)

struct xhci_stream_ctx {};

/* Stream Context Types (section 6.4.1) - bits 3:1 of stream ctx deq ptr */
#define SCT_FOR_CTX(p)
/* Secondary stream array type, dequeue pointer is to a transfer ring */
#define SCT_SEC_TR
/* Primary stream array type, dequeue pointer is to a transfer ring */
#define SCT_PRI_TR
/* Dequeue pointer is for a secondary stream array (SSA) with 8 entries */
#define SCT_SSA_8
#define SCT_SSA_16
#define SCT_SSA_32
#define SCT_SSA_64
#define SCT_SSA_128
#define SCT_SSA_256

/* Assume no secondary streams for now */
struct xhci_stream_info {};

#define SMALL_STREAM_ARRAY_SIZE
#define MEDIUM_STREAM_ARRAY_SIZE

/* Some Intel xHCI host controllers need software to keep track of the bus
 * bandwidth.  Keep track of endpoint info here.  Each root port is allocated
 * the full bus bandwidth.  We must also treat TTs (including each port under a
 * multi-TT hub) as a separate bandwidth domain.  The direct memory interface
 * (DMI) also limits the total bandwidth (across all domains) that can be used.
 */
struct xhci_bw_info {};

/* "Block" sizes in bytes the hardware uses for different device speeds.
 * The logic in this part of the hardware limits the number of bits the hardware
 * can use, so must represent bandwidth in a less precise manner to mimic what
 * the scheduler hardware computes.
 */
#define FS_BLOCK
#define HS_BLOCK
#define SS_BLOCK
#define DMI_BLOCK

/* Each device speed has a protocol overhead (CRC, bit stuffing, etc) associated
 * with each byte transferred.  SuperSpeed devices have an initial overhead to
 * set up bursts.  These are in blocks, see above.  LS overhead has already been
 * translated into FS blocks.
 */
#define DMI_OVERHEAD
#define DMI_OVERHEAD_BURST
#define SS_OVERHEAD
#define SS_OVERHEAD_BURST
#define HS_OVERHEAD
#define FS_OVERHEAD
#define LS_OVERHEAD
/* The TTs need to claim roughly twice as much bandwidth (94 bytes per
 * microframe ~= 24Mbps) of the HS bus as the devices can actually use because
 * of overhead associated with split transfers crossing microframe boundaries.
 * 31 blocks is pure protocol overhead.
 */
#define TT_HS_OVERHEAD
#define TT_DMI_OVERHEAD

/* Bandwidth limits in blocks */
#define FS_BW_LIMIT
#define TT_BW_LIMIT
#define HS_BW_LIMIT
#define SS_BW_LIMIT_IN
#define DMI_BW_LIMIT_IN
#define SS_BW_LIMIT_OUT
#define DMI_BW_LIMIT_OUT

/* Percentage of bus bandwidth reserved for non-periodic transfers */
#define FS_BW_RESERVED
#define HS_BW_RESERVED
#define SS_BW_RESERVED

struct xhci_virt_ep {};

enum xhci_overhead_type {};

struct xhci_interval_bw {};

#define XHCI_MAX_INTERVAL

struct xhci_interval_bw_table {};

#define EP_CTX_PER_DEV

struct xhci_virt_device {};

/*
 * For each roothub, keep track of the bandwidth information for each periodic
 * interval.
 *
 * If a high speed hub is attached to the roothub, each TT associated with that
 * hub is a separate bandwidth domain.  The interval information for the
 * endpoints on the devices under that TT will appear in the TT structure.
 */
struct xhci_root_port_bw_info {};

struct xhci_tt_bw_info {};


/**
 * struct xhci_device_context_array
 * @dev_context_ptr	array of 64-bit DMA addresses for device contexts
 */
struct xhci_device_context_array {};
/* TODO: write function to set the 64-bit device DMA address */
/*
 * TODO: change this to be dynamically sized at HC mem init time since the HC
 * might not be able to handle the maximum number of devices possible.
 */


struct xhci_transfer_event {};

/* Transfer event flags bitfield, also for select command completion events */
#define TRB_TO_SLOT_ID(p)
#define SLOT_ID_FOR_TRB(p)

#define TRB_TO_EP_ID(p)
#define EP_ID_FOR_TRB(p)

#define TRB_TO_EP_INDEX(p)
#define EP_INDEX_FOR_TRB(p)

/* Transfer event TRB length bit mask */
#define EVENT_TRB_LEN(p)

/* Completion Code - only applicable for some types of TRBs */
#define COMP_CODE_MASK
#define GET_COMP_CODE(p)
#define COMP_INVALID
#define COMP_SUCCESS
#define COMP_DATA_BUFFER_ERROR
#define COMP_BABBLE_DETECTED_ERROR
#define COMP_USB_TRANSACTION_ERROR
#define COMP_TRB_ERROR
#define COMP_STALL_ERROR
#define COMP_RESOURCE_ERROR
#define COMP_BANDWIDTH_ERROR
#define COMP_NO_SLOTS_AVAILABLE_ERROR
#define COMP_INVALID_STREAM_TYPE_ERROR
#define COMP_SLOT_NOT_ENABLED_ERROR
#define COMP_ENDPOINT_NOT_ENABLED_ERROR
#define COMP_SHORT_PACKET
#define COMP_RING_UNDERRUN
#define COMP_RING_OVERRUN
#define COMP_VF_EVENT_RING_FULL_ERROR
#define COMP_PARAMETER_ERROR
#define COMP_BANDWIDTH_OVERRUN_ERROR
#define COMP_CONTEXT_STATE_ERROR
#define COMP_NO_PING_RESPONSE_ERROR
#define COMP_EVENT_RING_FULL_ERROR
#define COMP_INCOMPATIBLE_DEVICE_ERROR
#define COMP_MISSED_SERVICE_ERROR
#define COMP_COMMAND_RING_STOPPED
#define COMP_COMMAND_ABORTED
#define COMP_STOPPED
#define COMP_STOPPED_LENGTH_INVALID
#define COMP_STOPPED_SHORT_PACKET
#define COMP_MAX_EXIT_LATENCY_TOO_LARGE_ERROR
#define COMP_ISOCH_BUFFER_OVERRUN
#define COMP_EVENT_LOST_ERROR
#define COMP_UNDEFINED_ERROR
#define COMP_INVALID_STREAM_ID_ERROR
#define COMP_SECONDARY_BANDWIDTH_ERROR
#define COMP_SPLIT_TRANSACTION_ERROR

static inline const char *xhci_trb_comp_code_string(u8 status)
{}

struct xhci_link_trb {};

/* control bitfields */
#define LINK_TOGGLE

/* Command completion event TRB */
struct xhci_event_cmd {};

/* Address device - disable SetAddress */
#define TRB_BSR

/* Configure Endpoint - Deconfigure */
#define TRB_DC

/* Stop Ring - Transfer State Preserve */
#define TRB_TSP

enum xhci_ep_reset_type {};

/* Force Event */
#define TRB_TO_VF_INTR_TARGET(p)
#define TRB_TO_VF_ID(p)

/* Set Latency Tolerance Value */
#define TRB_TO_BELT(p)

/* Get Port Bandwidth */
#define TRB_TO_DEV_SPEED(p)

/* Force Header */
#define TRB_TO_PACKET_TYPE(p)
#define TRB_TO_ROOTHUB_PORT(p)

enum xhci_setup_dev {};

/* bits 16:23 are the virtual function ID */
/* bits 24:31 are the slot ID */

/* Stop Endpoint TRB - ep_index to endpoint ID for this TRB */
#define SUSPEND_PORT_FOR_TRB(p)
#define TRB_TO_SUSPEND_PORT(p)
#define LAST_EP_INDEX

/* Set TR Dequeue Pointer command TRB fields, 6.4.3.9 */
#define TRB_TO_STREAM_ID(p)
#define STREAM_ID_FOR_TRB(p)
#define SCT_FOR_TRB(p)

/* Link TRB specific fields */
#define TRB_TC

/* Port Status Change Event TRB fields */
/* Port ID - bits 31:24 */
#define GET_PORT_ID(p)

#define EVENT_DATA

/* Normal TRB fields */
/* transfer_len bitmasks - bits 0:16 */
#define TRB_LEN(p)
/* TD Size, packets remaining in this TD, bits 21:17 (5 bits, so max 31) */
#define TRB_TD_SIZE(p)
#define GET_TD_SIZE(p)
/* xhci 1.1 uses the TD_SIZE field for TBC if Extended TBC is enabled (ETE) */
#define TRB_TD_SIZE_TBC(p)
/* Interrupter Target - which MSI-X vector to target the completion event at */
#define TRB_INTR_TARGET(p)
#define GET_INTR_TARGET(p)
/* Total burst count field, Rsvdz on xhci 1.1 with Extended TBC enabled (ETE) */
#define TRB_TBC(p)
#define TRB_TLBPC(p)

/* Cycle bit - indicates TRB ownership by HC or HCD */
#define TRB_CYCLE
/*
 * Force next event data TRB to be evaluated before task switch.
 * Used to pass OS data back after a TD completes.
 */
#define TRB_ENT
/* Interrupt on short packet */
#define TRB_ISP
/* Set PCIe no snoop attribute */
#define TRB_NO_SNOOP
/* Chain multiple TRBs into a TD */
#define TRB_CHAIN
/* Interrupt on completion */
#define TRB_IOC
/* The buffer pointer contains immediate data */
#define TRB_IDT
/* TDs smaller than this might use IDT */
#define TRB_IDT_MAX_SIZE

/* Block Event Interrupt */
#define TRB_BEI

/* Control transfer TRB specific fields */
#define TRB_DIR_IN
#define TRB_TX_TYPE(p)
#define TRB_DATA_OUT
#define TRB_DATA_IN

/* Isochronous TRB specific fields */
#define TRB_SIA
#define TRB_FRAME_ID(p)

/* TRB cache size for xHC with TRB cache */
#define TRB_CACHE_SIZE_HS
#define TRB_CACHE_SIZE_SS

struct xhci_generic_trb {};

xhci_trb;

/* TRB bit mask */
#define TRB_TYPE_BITMASK
#define TRB_TYPE(p)
#define TRB_FIELD_TO_TYPE(p)
/* TRB type IDs */
/* bulk, interrupt, isoc scatter/gather, and control data stage */
#define TRB_NORMAL
/* setup stage for control transfers */
#define TRB_SETUP
/* data stage for control transfers */
#define TRB_DATA
/* status stage for control transfers */
#define TRB_STATUS
/* isoc transfers */
#define TRB_ISOC
/* TRB for linking ring segments */
#define TRB_LINK
#define TRB_EVENT_DATA
/* Transfer Ring No-op (not for the command ring) */
#define TRB_TR_NOOP
/* Command TRBs */
/* Enable Slot Command */
#define TRB_ENABLE_SLOT
/* Disable Slot Command */
#define TRB_DISABLE_SLOT
/* Address Device Command */
#define TRB_ADDR_DEV
/* Configure Endpoint Command */
#define TRB_CONFIG_EP
/* Evaluate Context Command */
#define TRB_EVAL_CONTEXT
/* Reset Endpoint Command */
#define TRB_RESET_EP
/* Stop Transfer Ring Command */
#define TRB_STOP_RING
/* Set Transfer Ring Dequeue Pointer Command */
#define TRB_SET_DEQ
/* Reset Device Command */
#define TRB_RESET_DEV
/* Force Event Command (opt) */
#define TRB_FORCE_EVENT
/* Negotiate Bandwidth Command (opt) */
#define TRB_NEG_BANDWIDTH
/* Set Latency Tolerance Value Command (opt) */
#define TRB_SET_LT
/* Get port bandwidth Command */
#define TRB_GET_BW
/* Force Header Command - generate a transaction or link management packet */
#define TRB_FORCE_HEADER
/* No-op Command - not for transfer rings */
#define TRB_CMD_NOOP
/* TRB IDs 24-31 reserved */
/* Event TRBS */
/* Transfer Event */
#define TRB_TRANSFER
/* Command Completion Event */
#define TRB_COMPLETION
/* Port Status Change Event */
#define TRB_PORT_STATUS
/* Bandwidth Request Event (opt) */
#define TRB_BANDWIDTH_EVENT
/* Doorbell Event (opt) */
#define TRB_DOORBELL
/* Host Controller Event */
#define TRB_HC_EVENT
/* Device Notification Event - device sent function wake notification */
#define TRB_DEV_NOTE
/* MFINDEX Wrap Event - microframe counter wrapped */
#define TRB_MFINDEX_WRAP
/* TRB IDs 40-47 reserved, 48-63 is vendor-defined */
#define TRB_VENDOR_DEFINED_LOW
/* Nec vendor-specific command completion event. */
#define TRB_NEC_CMD_COMP
/* Get NEC firmware revision. */
#define TRB_NEC_GET_FW

static inline const char *xhci_trb_type_string(u8 type)
{}

#define TRB_TYPE_LINK(x)
/* Above, but for __le32 types -- can avoid work by swapping constants: */
#define TRB_TYPE_LINK_LE32(x)
#define TRB_TYPE_NOOP_LE32(x)

#define NEC_FW_MINOR(p)
#define NEC_FW_MAJOR(p)

/*
 * TRBS_PER_SEGMENT must be a multiple of 4,
 * since the command ring is 64-byte aligned.
 * It must also be greater than 16.
 */
#define TRBS_PER_SEGMENT
/* Allow two commands + a link TRB, along with any reserved command TRBs */
#define MAX_RSVD_CMD_TRBS
#define TRB_SEGMENT_SIZE
#define TRB_SEGMENT_SHIFT
/* TRB buffer pointers can't cross 64KB boundaries */
#define TRB_MAX_BUFF_SHIFT
#define TRB_MAX_BUFF_SIZE
/* How much data is left before the 64KB boundary? */
#define TRB_BUFF_LEN_UP_TO_BOUNDARY(addr)
#define MAX_SOFT_RETRY
/*
 * Limits of consecutive isoc trbs that can Block Event Interrupt (BEI) if
 * XHCI_AVOID_BEI quirk is in use.
 */
#define AVOID_BEI_INTERVAL_MIN
#define AVOID_BEI_INTERVAL_MAX

struct xhci_segment {};

enum xhci_cancelled_td_status {};

struct xhci_td {};

/*
 * xHCI command default timeout value in milliseconds.
 * USB 3.2 spec, section 9.2.6.1
 */
#define XHCI_CMD_DEFAULT_TIMEOUT

/* command descriptor */
struct xhci_cd {};

enum xhci_ring_type {};

static inline const char *xhci_ring_type_string(enum xhci_ring_type type)
{}

struct xhci_ring {};

struct xhci_erst_entry {};

struct xhci_erst {};

struct xhci_scratchpad {};

struct urb_priv {};

/* Number of Event Ring segments to allocate, when amount is not specified. (spec allows 32k) */
#define ERST_DEFAULT_SEGS
/* Poll every 60 seconds */
#define POLL_TIMEOUT
/* Stop endpoint command timeout (secs) for URB cancellation watchdog timer */
#define XHCI_STOP_EP_CMD_TIMEOUT
/* XXX: Make these module parameters */

struct s3_save {};

/* Use for lpm */
struct dev_info {};

struct xhci_bus_state {};

struct xhci_interrupter {};
/*
 * It can take up to 20 ms to transition from RExit to U0 on the
 * Intel Lynx Point LP xHCI host.
 */
#define XHCI_MAX_REXIT_TIMEOUT_MS
struct xhci_port_cap {};

struct xhci_port {};

struct xhci_hub {};

/* There is one xhci_hcd structure per controller */
struct xhci_hcd {};

/* Platform specific overrides to generic XHCI hc_driver ops */
struct xhci_driver_overrides {};

#define XHCI_CFC_DELAY

/* convert between an HCD pointer and the corresponding EHCI_HCD */
static inline struct xhci_hcd *hcd_to_xhci(struct usb_hcd *hcd)
{}

static inline struct usb_hcd *xhci_to_hcd(struct xhci_hcd *xhci)
{}

static inline struct usb_hcd *xhci_get_usb3_hcd(struct xhci_hcd *xhci)
{}

static inline bool xhci_hcd_is_usb3(struct usb_hcd *hcd)
{}

static inline bool xhci_has_one_roothub(struct xhci_hcd *xhci)
{}

#define xhci_dbg(xhci, fmt, args...)
#define xhci_err(xhci, fmt, args...)
#define xhci_warn(xhci, fmt, args...)
#define xhci_info(xhci, fmt, args...)

/*
 * Registers should always be accessed with double word or quad word accesses.
 *
 * Some xHCI implementations may support 64-bit address pointers.  Registers
 * with 64-bit address pointers should be written to with dword accesses by
 * writing the low dword first (ptr[0]), then the high dword (ptr[1]) second.
 * xHCI implementations that do not support 64-bit address pointers will ignore
 * the high dword, and write order is irrelevant.
 */
static inline u64 xhci_read_64(const struct xhci_hcd *xhci,
		__le64 __iomem *regs)
{}
static inline void xhci_write_64(struct xhci_hcd *xhci,
				 const u64 val, __le64 __iomem *regs)
{}


/* Link TRB chain should always be set on 0.95 hosts, and AMD 0.96 ISOC rings */
static inline bool xhci_link_chain_quirk(struct xhci_hcd *xhci, enum xhci_ring_type type)
{}

/* xHCI debugging */
char *xhci_get_slot_state(struct xhci_hcd *xhci,
		struct xhci_container_ctx *ctx);
void xhci_dbg_trace(struct xhci_hcd *xhci, void (*trace)(struct va_format *),
			const char *fmt, ...);

/* xHCI memory management */
void xhci_mem_cleanup(struct xhci_hcd *xhci);
int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags);
void xhci_free_virt_device(struct xhci_hcd *xhci, int slot_id);
int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id, struct usb_device *udev, gfp_t flags);
int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *udev);
void xhci_copy_ep0_dequeue_into_input_ctx(struct xhci_hcd *xhci,
		struct usb_device *udev);
unsigned int xhci_get_endpoint_index(struct usb_endpoint_descriptor *desc);
unsigned int xhci_last_valid_endpoint(u32 added_ctxs);
void xhci_endpoint_zero(struct xhci_hcd *xhci, struct xhci_virt_device *virt_dev, struct usb_host_endpoint *ep);
void xhci_update_tt_active_eps(struct xhci_hcd *xhci,
		struct xhci_virt_device *virt_dev,
		int old_active_eps);
void xhci_clear_endpoint_bw_info(struct xhci_bw_info *bw_info);
void xhci_update_bw_info(struct xhci_hcd *xhci,
		struct xhci_container_ctx *in_ctx,
		struct xhci_input_control_ctx *ctrl_ctx,
		struct xhci_virt_device *virt_dev);
void xhci_endpoint_copy(struct xhci_hcd *xhci,
		struct xhci_container_ctx *in_ctx,
		struct xhci_container_ctx *out_ctx,
		unsigned int ep_index);
void xhci_slot_copy(struct xhci_hcd *xhci,
		struct xhci_container_ctx *in_ctx,
		struct xhci_container_ctx *out_ctx);
int xhci_endpoint_init(struct xhci_hcd *xhci, struct xhci_virt_device *virt_dev,
		struct usb_device *udev, struct usb_host_endpoint *ep,
		gfp_t mem_flags);
struct xhci_ring *xhci_ring_alloc(struct xhci_hcd *xhci,
		unsigned int num_segs, unsigned int cycle_state,
		enum xhci_ring_type type, unsigned int max_packet, gfp_t flags);
void xhci_ring_free(struct xhci_hcd *xhci, struct xhci_ring *ring);
int xhci_ring_expansion(struct xhci_hcd *xhci, struct xhci_ring *ring,
		unsigned int num_trbs, gfp_t flags);
void xhci_initialize_ring_info(struct xhci_ring *ring,
			unsigned int cycle_state);
void xhci_free_endpoint_ring(struct xhci_hcd *xhci,
		struct xhci_virt_device *virt_dev,
		unsigned int ep_index);
struct xhci_stream_info *xhci_alloc_stream_info(struct xhci_hcd *xhci,
		unsigned int num_stream_ctxs,
		unsigned int num_streams,
		unsigned int max_packet, gfp_t flags);
void xhci_free_stream_info(struct xhci_hcd *xhci,
		struct xhci_stream_info *stream_info);
void xhci_setup_streams_ep_input_ctx(struct xhci_hcd *xhci,
		struct xhci_ep_ctx *ep_ctx,
		struct xhci_stream_info *stream_info);
void xhci_setup_no_streams_ep_input_ctx(struct xhci_ep_ctx *ep_ctx,
		struct xhci_virt_ep *ep);
void xhci_free_device_endpoint_resources(struct xhci_hcd *xhci,
	struct xhci_virt_device *virt_dev, bool drop_control_ep);
struct xhci_ring *xhci_dma_to_transfer_ring(
		struct xhci_virt_ep *ep,
		u64 address);
struct xhci_command *xhci_alloc_command(struct xhci_hcd *xhci,
		bool allocate_completion, gfp_t mem_flags);
struct xhci_command *xhci_alloc_command_with_ctx(struct xhci_hcd *xhci,
		bool allocate_completion, gfp_t mem_flags);
void xhci_urb_free_priv(struct urb_priv *urb_priv);
void xhci_free_command(struct xhci_hcd *xhci,
		struct xhci_command *command);
struct xhci_container_ctx *xhci_alloc_container_ctx(struct xhci_hcd *xhci,
		int type, gfp_t flags);
void xhci_free_container_ctx(struct xhci_hcd *xhci,
		struct xhci_container_ctx *ctx);
struct xhci_interrupter *
xhci_create_secondary_interrupter(struct usb_hcd *hcd, unsigned int segs);
void xhci_remove_secondary_interrupter(struct usb_hcd
				       *hcd, struct xhci_interrupter *ir);

/* xHCI host controller glue */
xhci_get_quirks_t;
int xhci_handshake(void __iomem *ptr, u32 mask, u32 done, u64 timeout_us);
int xhci_handshake_check_state(struct xhci_hcd *xhci, void __iomem *ptr,
		u32 mask, u32 done, int usec, unsigned int exit_state);
void xhci_quiesce(struct xhci_hcd *xhci);
int xhci_halt(struct xhci_hcd *xhci);
int xhci_start(struct xhci_hcd *xhci);
int xhci_reset(struct xhci_hcd *xhci, u64 timeout_us);
int xhci_run(struct usb_hcd *hcd);
int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks);
void xhci_shutdown(struct usb_hcd *hcd);
void xhci_stop(struct usb_hcd *hcd);
void xhci_init_driver(struct hc_driver *drv,
		      const struct xhci_driver_overrides *over);
int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
		      struct usb_host_endpoint *ep);
int xhci_drop_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
		       struct usb_host_endpoint *ep);
int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev);
void xhci_reset_bandwidth(struct usb_hcd *hcd, struct usb_device *udev);
int xhci_update_hub_device(struct usb_hcd *hcd, struct usb_device *hdev,
			   struct usb_tt *tt, gfp_t mem_flags);
int xhci_disable_slot(struct xhci_hcd *xhci, u32 slot_id);
int xhci_ext_cap_init(struct xhci_hcd *xhci);

int xhci_suspend(struct xhci_hcd *xhci, bool do_wakeup);
int xhci_resume(struct xhci_hcd *xhci, pm_message_t msg);

irqreturn_t xhci_irq(struct usb_hcd *hcd);
irqreturn_t xhci_msi_irq(int irq, void *hcd);
int xhci_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev);
int xhci_alloc_tt_info(struct xhci_hcd *xhci,
		struct xhci_virt_device *virt_dev,
		struct usb_device *hdev,
		struct usb_tt *tt, gfp_t mem_flags);

/* xHCI ring, segment, TRB, and TD functions */
dma_addr_t xhci_trb_virt_to_dma(struct xhci_segment *seg, union xhci_trb *trb);
struct xhci_segment *trb_in_td(struct xhci_hcd *xhci, struct xhci_td *td,
			       dma_addr_t suspect_dma, bool debug);
int xhci_is_vendor_info_code(struct xhci_hcd *xhci, unsigned int trb_comp_code);
void xhci_ring_cmd_db(struct xhci_hcd *xhci);
int xhci_queue_slot_control(struct xhci_hcd *xhci, struct xhci_command *cmd,
		u32 trb_type, u32 slot_id);
int xhci_queue_address_device(struct xhci_hcd *xhci, struct xhci_command *cmd,
		dma_addr_t in_ctx_ptr, u32 slot_id, enum xhci_setup_dev);
int xhci_queue_vendor_command(struct xhci_hcd *xhci, struct xhci_command *cmd,
		u32 field1, u32 field2, u32 field3, u32 field4);
int xhci_queue_stop_endpoint(struct xhci_hcd *xhci, struct xhci_command *cmd,
		int slot_id, unsigned int ep_index, int suspend);
int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags, struct urb *urb,
		int slot_id, unsigned int ep_index);
int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags, struct urb *urb,
		int slot_id, unsigned int ep_index);
int xhci_queue_intr_tx(struct xhci_hcd *xhci, gfp_t mem_flags, struct urb *urb,
		int slot_id, unsigned int ep_index);
int xhci_queue_isoc_tx_prepare(struct xhci_hcd *xhci, gfp_t mem_flags,
		struct urb *urb, int slot_id, unsigned int ep_index);
int xhci_queue_configure_endpoint(struct xhci_hcd *xhci,
		struct xhci_command *cmd, dma_addr_t in_ctx_ptr, u32 slot_id,
		bool command_must_succeed);
int xhci_queue_evaluate_context(struct xhci_hcd *xhci, struct xhci_command *cmd,
		dma_addr_t in_ctx_ptr, u32 slot_id, bool command_must_succeed);
int xhci_queue_reset_ep(struct xhci_hcd *xhci, struct xhci_command *cmd,
		int slot_id, unsigned int ep_index,
		enum xhci_ep_reset_type reset_type);
int xhci_queue_reset_device(struct xhci_hcd *xhci, struct xhci_command *cmd,
		u32 slot_id);
void xhci_cleanup_stalled_ring(struct xhci_hcd *xhci, unsigned int slot_id,
			       unsigned int ep_index, unsigned int stream_id,
			       struct xhci_td *td);
void xhci_stop_endpoint_command_watchdog(struct timer_list *t);
void xhci_handle_command_timeout(struct work_struct *work);

void xhci_ring_ep_doorbell(struct xhci_hcd *xhci, unsigned int slot_id,
		unsigned int ep_index, unsigned int stream_id);
void xhci_ring_doorbell_for_active_rings(struct xhci_hcd *xhci,
		unsigned int slot_id,
		unsigned int ep_index);
void xhci_cleanup_command_queue(struct xhci_hcd *xhci);
void inc_deq(struct xhci_hcd *xhci, struct xhci_ring *ring);
unsigned int count_trbs(u64 addr, u64 len);

/* xHCI roothub code */
void xhci_set_link_state(struct xhci_hcd *xhci, struct xhci_port *port,
				u32 link_state);
void xhci_test_and_clear_bit(struct xhci_hcd *xhci, struct xhci_port *port,
				u32 port_bit);
int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, u16 wIndex,
		char *buf, u16 wLength);
int xhci_hub_status_data(struct usb_hcd *hcd, char *buf);
int xhci_find_raw_port_number(struct usb_hcd *hcd, int port1);
struct xhci_hub *xhci_get_rhub(struct usb_hcd *hcd);

void xhci_hc_died(struct xhci_hcd *xhci);

#ifdef CONFIG_PM
int xhci_bus_suspend(struct usb_hcd *hcd);
int xhci_bus_resume(struct usb_hcd *hcd);
unsigned long xhci_get_resuming_ports(struct usb_hcd *hcd);
#else
#define xhci_bus_suspend
#define xhci_bus_resume
#define xhci_get_resuming_ports
#endif	/* CONFIG_PM */

u32 xhci_port_state_to_neutral(u32 state);
void xhci_ring_device(struct xhci_hcd *xhci, int slot_id);

/* xHCI contexts */
struct xhci_input_control_ctx *xhci_get_input_control_ctx(struct xhci_container_ctx *ctx);
struct xhci_slot_ctx *xhci_get_slot_ctx(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx);
struct xhci_ep_ctx *xhci_get_ep_ctx(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx, unsigned int ep_index);

struct xhci_ring *xhci_triad_to_transfer_ring(struct xhci_hcd *xhci,
		unsigned int slot_id, unsigned int ep_index,
		unsigned int stream_id);

static inline struct xhci_ring *xhci_urb_to_transfer_ring(struct xhci_hcd *xhci,
								struct urb *urb)
{}

/*
 * TODO: As per spec Isochronous IDT transmissions are supported. We bypass
 * them anyways as we where unable to find a device that matches the
 * constraints.
 */
static inline bool xhci_urb_suitable_for_idt(struct urb *urb)
{}

static inline char *xhci_slot_state_string(u32 state)
{}

static inline const char *xhci_decode_trb(char *str, size_t size,
					  u32 field0, u32 field1, u32 field2, u32 field3)
{}

static inline const char *xhci_decode_ctrl_ctx(char *str,
		unsigned long drop, unsigned long add)
{}

static inline const char *xhci_decode_slot_context(char *str,
		u32 info, u32 info2, u32 tt_info, u32 state)
{}


static inline const char *xhci_portsc_link_state_string(u32 portsc)
{}

static inline const char *xhci_decode_portsc(char *str, u32 portsc)
{}

static inline const char *xhci_decode_usbsts(char *str, u32 usbsts)
{}

static inline const char *xhci_decode_doorbell(char *str, u32 slot, u32 doorbell)
{}

static inline const char *xhci_ep_state_string(u8 state)
{}

static inline const char *xhci_ep_type_string(u8 type)
{}

static inline const char *xhci_decode_ep_context(char *str, u32 info,
		u32 info2, u64 deq, u32 tx_info)
{}

#endif /* __LINUX_XHCI_HCD_H */