// SPDX-License-Identifier: GPL-2.0 /* * MUSB OTG driver core code * * Copyright 2005 Mentor Graphics Corporation * Copyright (C) 2005-2006 by Texas Instruments * Copyright (C) 2006-2007 Nokia Corporation */ /* * Inventra (Multipoint) Dual-Role Controller Driver for Linux. * * This consists of a Host Controller Driver (HCD) and a peripheral * controller driver implementing the "Gadget" API; OTG support is * in the works. These are normal Linux-USB controller drivers which * use IRQs and have no dedicated thread. * * This version of the driver has only been used with products from * Texas Instruments. Those products integrate the Inventra logic * with other DMA, IRQ, and bus modules, as well as other logic that * needs to be reflected in this driver. * * * NOTE: the original Mentor code here was pretty much a collection * of mechanisms that don't seem to have been fully integrated/working * for *any* Linux kernel version. This version aims at Linux 2.6.now, * Key open issues include: * * - Lack of host-side transaction scheduling, for all transfer types. * The hardware doesn't do it; instead, software must. * * This is not an issue for OTG devices that don't support external * hubs, but for more "normal" USB hosts it's a user issue that the * "multipoint" support doesn't scale in the expected ways. That * includes DaVinci EVM in a common non-OTG mode. * * * Control and bulk use dedicated endpoints, and there's as * yet no mechanism to either (a) reclaim the hardware when * peripherals are NAKing, which gets complicated with bulk * endpoints, or (b) use more than a single bulk endpoint in * each direction. * * RESULT: one device may be perceived as blocking another one. * * * Interrupt and isochronous will dynamically allocate endpoint * hardware, but (a) there's no record keeping for bandwidth; * (b) in the common case that few endpoints are available, there * is no mechanism to reuse endpoints to talk to multiple devices. * * RESULT: At one extreme, bandwidth can be overcommitted in * some hardware configurations, no faults will be reported. * At the other extreme, the bandwidth capabilities which do * exist tend to be severely undercommitted. You can't yet hook * up both a keyboard and a mouse to an external USB hub. */ /* * This gets many kinds of configuration information: * - Kconfig for everything user-configurable * - platform_device for addressing, irq, and platform_data * - platform_data is mostly for board-specific information * (plus recentrly, SOC or family details) * * Most of the conditional compilation will (someday) vanish. */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/sched.h> #include <linux/slab.h> #include <linux/list.h> #include <linux/kobject.h> #include <linux/prefetch.h> #include <linux/platform_device.h> #include <linux/io.h> #include <linux/iopoll.h> #include <linux/dma-mapping.h> #include <linux/usb.h> #include <linux/usb/of.h> #include "musb_core.h" #include "musb_trace.h" #define TA_WAIT_BCON(m) … #define DRIVER_AUTHOR … #define DRIVER_DESC … #define MUSB_VERSION … #define DRIVER_INFO … #define MUSB_DRIVER_NAME … const char musb_driver_name[] = …; MODULE_DESCRIPTION(…); MODULE_AUTHOR(…); MODULE_LICENSE(…) …; MODULE_ALIAS(…) …; /*-------------------------------------------------------------------------*/ static inline struct musb *dev_to_musb(struct device *dev) { … } enum musb_mode musb_get_mode(struct device *dev) { … } EXPORT_SYMBOL_GPL(…); /*-------------------------------------------------------------------------*/ static int musb_ulpi_read(struct usb_phy *phy, u32 reg) { … } static int musb_ulpi_write(struct usb_phy *phy, u32 val, u32 reg) { … } static struct usb_phy_io_ops musb_ulpi_access = …; /*-------------------------------------------------------------------------*/ static u32 musb_default_fifo_offset(u8 epnum) { … } /* "flat" mapping: each endpoint has its own i/o address */ static void musb_flat_ep_select(void __iomem *mbase, u8 epnum) { … } static u32 musb_flat_ep_offset(u8 epnum, u16 offset) { … } /* "indexed" mapping: INDEX register controls register bank select */ static void musb_indexed_ep_select(void __iomem *mbase, u8 epnum) { … } static u32 musb_indexed_ep_offset(u8 epnum, u16 offset) { … } static u32 musb_default_busctl_offset(u8 epnum, u16 offset) { … } static u8 musb_default_readb(void __iomem *addr, u32 offset) { … } static void musb_default_writeb(void __iomem *addr, u32 offset, u8 data) { … } static u16 musb_default_readw(void __iomem *addr, u32 offset) { … } static void musb_default_writew(void __iomem *addr, u32 offset, u16 data) { … } static u16 musb_default_get_toggle(struct musb_qh *qh, int is_out) { … } static u16 musb_default_set_toggle(struct musb_qh *qh, int is_out, struct urb *urb) { … } /* * Load an endpoint's FIFO */ static void musb_default_write_fifo(struct musb_hw_ep *hw_ep, u16 len, const u8 *src) { … } /* * Unload an endpoint's FIFO */ static void musb_default_read_fifo(struct musb_hw_ep *hw_ep, u16 len, u8 *dst) { … } /* * Old style IO functions */ u8 (*musb_readb)(void __iomem *addr, u32 offset); EXPORT_SYMBOL_GPL(…); void (*musb_writeb)(void __iomem *addr, u32 offset, u8 data); EXPORT_SYMBOL_GPL(…); u8 (*musb_clearb)(void __iomem *addr, u32 offset); EXPORT_SYMBOL_GPL(…); u16 (*musb_readw)(void __iomem *addr, u32 offset); EXPORT_SYMBOL_GPL(…); void (*musb_writew)(void __iomem *addr, u32 offset, u16 data); EXPORT_SYMBOL_GPL(…); u16 (*musb_clearw)(void __iomem *addr, u32 offset); EXPORT_SYMBOL_GPL(…); u32 musb_readl(void __iomem *addr, u32 offset) { … } EXPORT_SYMBOL_GPL(…); void musb_writel(void __iomem *addr, u32 offset, u32 data) { … } EXPORT_SYMBOL_GPL(…); #ifndef CONFIG_MUSB_PIO_ONLY struct dma_controller * (*musb_dma_controller_create)(struct musb *musb, void __iomem *base); EXPORT_SYMBOL(musb_dma_controller_create); void (*musb_dma_controller_destroy)(struct dma_controller *c); EXPORT_SYMBOL(musb_dma_controller_destroy); #endif /* * New style IO functions */ void musb_read_fifo(struct musb_hw_ep *hw_ep, u16 len, u8 *dst) { … } void musb_write_fifo(struct musb_hw_ep *hw_ep, u16 len, const u8 *src) { … } static u8 musb_read_devctl(struct musb *musb) { … } /** * musb_set_host - set and initialize host mode * @musb: musb controller driver data * * At least some musb revisions need to enable devctl session bit in * peripheral mode to switch to host mode. Initializes things to host * mode and sets A_IDLE. SoC glue needs to advance state further * based on phy provided VBUS state. * * Note that the SoC glue code may need to wait for musb to settle * on enable before calling this to avoid babble. */ int musb_set_host(struct musb *musb) { … } EXPORT_SYMBOL_GPL(…); /** * musb_set_peripheral - set and initialize peripheral mode * @musb: musb controller driver data * * Clears devctl session bit and initializes things for peripheral * mode and sets B_IDLE. SoC glue needs to advance state further * based on phy provided VBUS state. */ int musb_set_peripheral(struct musb *musb) { … } EXPORT_SYMBOL_GPL(…); /*-------------------------------------------------------------------------*/ /* for high speed test mode; see USB 2.0 spec 7.1.20 */ static const u8 musb_test_packet[53] = …; void musb_load_testpacket(struct musb *musb) { … } /*-------------------------------------------------------------------------*/ /* * Handles OTG hnp timeouts, such as b_ase0_brst */ static void musb_otg_timer_func(struct timer_list *t) { … } /* * Stops the HNP transition. Caller must take care of locking. */ void musb_hnp_stop(struct musb *musb) { … } static void musb_recover_from_babble(struct musb *musb); static void musb_handle_intr_resume(struct musb *musb, u8 devctl) { … } /* return IRQ_HANDLED to tell the caller to return immediately */ static irqreturn_t musb_handle_intr_sessreq(struct musb *musb, u8 devctl) { … } static void musb_handle_intr_vbuserr(struct musb *musb, u8 devctl) { … } static void musb_handle_intr_suspend(struct musb *musb, u8 devctl) { … } static void musb_handle_intr_connect(struct musb *musb, u8 devctl, u8 int_usb) { … } static void musb_handle_intr_disconnect(struct musb *musb, u8 devctl) { … } /* * mentor saves a bit: bus reset and babble share the same irq. * only host sees babble; only peripheral sees bus reset. */ static void musb_handle_intr_reset(struct musb *musb) { … } /* * Interrupt Service Routine to record USB "global" interrupts. * Since these do not happen often and signify things of * paramount importance, it seems OK to check them individually; * the order of the tests is specified in the manual * * @param musb instance pointer * @param int_usb register contents * @param devctl */ static irqreturn_t musb_stage0_irq(struct musb *musb, u8 int_usb, u8 devctl) { … } /*-------------------------------------------------------------------------*/ static void musb_disable_interrupts(struct musb *musb) { … } static void musb_enable_interrupts(struct musb *musb) { … } /* * Program the HDRC to start (enable interrupts, dma, etc.). */ void musb_start(struct musb *musb) { … } /* * Make the HDRC stop (disable interrupts, etc.); * reversible by musb_start * called on gadget driver unregister * with controller locked, irqs blocked * acts as a NOP unless some role activated the hardware */ void musb_stop(struct musb *musb) { … } /*-------------------------------------------------------------------------*/ /* * The silicon either has hard-wired endpoint configurations, or else * "dynamic fifo" sizing. The driver has support for both, though at this * writing only the dynamic sizing is very well tested. Since we switched * away from compile-time hardware parameters, we can no longer rely on * dead code elimination to leave only the relevant one in the object file. * * We don't currently use dynamic fifo setup capability to do anything * more than selecting one of a bunch of predefined configurations. */ static ushort fifo_mode; /* "modprobe ... fifo_mode=1" etc */ module_param(fifo_mode, ushort, 0); MODULE_PARM_DESC(…) …; /* * tables defining fifo_mode values. define more if you like. * for host side, make sure both halves of ep1 are set up. */ /* mode 0 - fits in 2KB */ static struct musb_fifo_cfg mode_0_cfg[] = …; /* mode 1 - fits in 4KB */ static struct musb_fifo_cfg mode_1_cfg[] = …; /* mode 2 - fits in 4KB */ static struct musb_fifo_cfg mode_2_cfg[] = …; /* mode 3 - fits in 4KB */ static struct musb_fifo_cfg mode_3_cfg[] = …; /* mode 4 - fits in 16KB */ static struct musb_fifo_cfg mode_4_cfg[] = …; /* mode 5 - fits in 8KB */ static struct musb_fifo_cfg mode_5_cfg[] = …; /* * configure a fifo; for non-shared endpoints, this may be called * once for a tx fifo and once for an rx fifo. * * returns negative errno or offset for next fifo. */ static int fifo_setup(struct musb *musb, struct musb_hw_ep *hw_ep, const struct musb_fifo_cfg *cfg, u16 offset) { … } static struct musb_fifo_cfg ep0_cfg = …; static int ep_config_from_table(struct musb *musb) { … } /* * ep_config_from_hw - when MUSB_C_DYNFIFO_DEF is false * @param musb the controller */ static int ep_config_from_hw(struct musb *musb) { … } enum { … }; /* Initialize MUSB (M)HDRC part of the USB hardware subsystem; * configure endpoints, or take their config from silicon */ static int musb_core_init(u16 musb_type, struct musb *musb) { … } /*-------------------------------------------------------------------------*/ /* * handle all the irqs defined by the HDRC core. for now we expect: other * irq sources (phy, dma, etc) will be handled first, musb->int_* values * will be assigned, and the irq will already have been acked. * * called in irq context with spinlock held, irqs blocked */ irqreturn_t musb_interrupt(struct musb *musb) { … } EXPORT_SYMBOL_GPL(…); #ifndef CONFIG_MUSB_PIO_ONLY static bool use_dma = true; /* "modprobe ... use_dma=0" etc */ module_param(use_dma, bool, 0644); MODULE_PARM_DESC(use_dma, "enable/disable use of DMA"); void musb_dma_completion(struct musb *musb, u8 epnum, u8 transmit) { /* called with controller lock already held */ if (!epnum) { if (!is_cppi_enabled(musb)) { /* endpoint 0 */ if (is_host_active(musb)) musb_h_ep0_irq(musb); else musb_g_ep0_irq(musb); } } else { /* endpoints 1..15 */ if (transmit) { if (is_host_active(musb)) musb_host_tx(musb, epnum); else musb_g_tx(musb, epnum); } else { /* receive */ if (is_host_active(musb)) musb_host_rx(musb, epnum); else musb_g_rx(musb, epnum); } } } EXPORT_SYMBOL_GPL(musb_dma_completion); #else #define use_dma … #endif static int (*musb_phy_callback)(enum musb_vbus_id_status status); /* * musb_mailbox - optional phy notifier function * @status phy state change * * Optionally gets called from the USB PHY. Note that the USB PHY must be * disabled at the point the phy_callback is registered or unregistered. */ int musb_mailbox(enum musb_vbus_id_status status) { if (musb_phy_callback) return musb_phy_callback(status); return -ENODEV; }; EXPORT_SYMBOL_GPL(…); /*-------------------------------------------------------------------------*/ static ssize_t mode_show(struct device *dev, struct device_attribute *attr, char *buf) { … } static ssize_t mode_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t n) { … } static DEVICE_ATTR_RW(mode); static ssize_t vbus_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t n) { … } static ssize_t vbus_show(struct device *dev, struct device_attribute *attr, char *buf) { … } static DEVICE_ATTR_RW(vbus); /* Gadget drivers can't know that a host is connected so they might want * to start SRP, but users can. This allows userspace to trigger SRP. */ static ssize_t srp_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t n) { … } static DEVICE_ATTR_WO(srp); static struct attribute *musb_attrs[] = …; ATTRIBUTE_GROUPS(…); #define MUSB_QUIRK_B_INVALID_VBUS_91 … #define MUSB_QUIRK_B_DISCONNECT_99 … #define MUSB_QUIRK_A_DISCONNECT_19 … static bool musb_state_needs_recheck(struct musb *musb, u8 devctl, const char *desc) { … } /* * Check the musb devctl session bit to determine if we want to * allow PM runtime for the device. In general, we want to keep things * active when the session bit is set except after host disconnect. * * Only called from musb_irq_work. If this ever needs to get called * elsewhere, proper locking must be implemented for musb->session. */ static void musb_pm_runtime_check_session(struct musb *musb) { … } /* Only used to provide driver mode change events */ static void musb_irq_work(struct work_struct *data) { … } static void musb_recover_from_babble(struct musb *musb) { … } /* -------------------------------------------------------------------------- * Init support */ static struct musb *allocate_instance(struct device *dev, const struct musb_hdrc_config *config, void __iomem *mbase) { … } static void musb_free(struct musb *musb) { … } struct musb_pending_work { … }; #ifdef CONFIG_PM /* * Called from musb_runtime_resume(), musb_resume(), and * musb_queue_resume_work(). Callers must take musb->lock. */ static int musb_run_resume_work(struct musb *musb) { … } #endif /* * Called to run work if device is active or else queue the work to happen * on resume. Caller must take musb->lock and must hold an RPM reference. * * Note that we cowardly refuse queuing work after musb PM runtime * resume is done calling musb_run_resume_work() and return -EINPROGRESS * instead. */ int musb_queue_resume_work(struct musb *musb, int (*callback)(struct musb *musb, void *data), void *data) { … } EXPORT_SYMBOL_GPL(…); static void musb_deassert_reset(struct work_struct *work) { … } /* * Perform generic per-controller initialization. * * @dev: the controller (already clocked, etc) * @nIrq: IRQ number * @ctrl: virtual address of controller registers, * not yet corrected for platform-specific offsets */ static int musb_init_controller(struct device *dev, int nIrq, void __iomem *ctrl) { … } /*-------------------------------------------------------------------------*/ /* all implementations (PCI bridge to FPGA, VLYNQ, etc) should just * bridge to a platform device; this driver then suffices. */ static int musb_probe(struct platform_device *pdev) { … } static void musb_remove(struct platform_device *pdev) { … } #ifdef CONFIG_PM static void musb_save_context(struct musb *musb) { … } static void musb_restore_context(struct musb *musb) { … } static int musb_suspend(struct device *dev) { … } static int musb_resume(struct device *dev) { … } static int musb_runtime_suspend(struct device *dev) { … } static int musb_runtime_resume(struct device *dev) { … } static const struct dev_pm_ops musb_dev_pm_ops = …; #define MUSB_DEV_PM_OPS … #else #define MUSB_DEV_PM_OPS … #endif static struct platform_driver musb_driver = …; module_platform_driver(…) …;