#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/delay.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/errno.h>
#include <linux/list.h>
#include <linux/dma-mapping.h>
#include "musb_core.h"
#include "musb_host.h"
#include "musb_trace.h"
struct musb *hcd_to_musb(struct usb_hcd *hcd)
{ … }
static void musb_ep_program(struct musb *musb, u8 epnum,
struct urb *urb, int is_out,
u8 *buf, u32 offset, u32 len);
static void musb_h_tx_flush_fifo(struct musb_hw_ep *ep)
{ … }
static void musb_h_ep0_flush_fifo(struct musb_hw_ep *ep)
{ … }
static inline void musb_h_tx_start(struct musb_hw_ep *ep)
{ … }
static inline void musb_h_tx_dma_start(struct musb_hw_ep *ep)
{ … }
static void musb_ep_set_qh(struct musb_hw_ep *ep, int is_in, struct musb_qh *qh)
{ … }
static struct musb_qh *musb_ep_get_qh(struct musb_hw_ep *ep, int is_in)
{ … }
static void
musb_start_urb(struct musb *musb, int is_in, struct musb_qh *qh)
{ … }
static void musb_giveback(struct musb *musb, struct urb *urb, int status)
__releases(musb->lock)
__acquires(musb->lock)
{ … }
static void musb_advance_schedule(struct musb *musb, struct urb *urb,
struct musb_hw_ep *hw_ep, int is_in)
{ … }
static u16 musb_h_flush_rxfifo(struct musb_hw_ep *hw_ep, u16 csr)
{ … }
static bool
musb_host_packet_rx(struct musb *musb, struct urb *urb, u8 epnum, u8 iso_err)
{ … }
static void
musb_rx_reinit(struct musb *musb, struct musb_qh *qh, u8 epnum)
{ … }
static void musb_tx_dma_set_mode_mentor(struct musb_hw_ep *hw_ep,
struct musb_qh *qh,
u32 *length, u8 *mode)
{ … }
static void musb_tx_dma_set_mode_cppi_tusb(struct musb_hw_ep *hw_ep,
struct urb *urb,
u8 *mode)
{ … }
static bool musb_tx_dma_program(struct dma_controller *dma,
struct musb_hw_ep *hw_ep, struct musb_qh *qh,
struct urb *urb, u32 offset, u32 length)
{ … }
static void musb_ep_program(struct musb *musb, u8 epnum,
struct urb *urb, int is_out,
u8 *buf, u32 offset, u32 len)
{ … }
static void musb_bulk_nak_timeout(struct musb *musb, struct musb_hw_ep *ep,
int is_in)
{ … }
static bool musb_h_ep0_continue(struct musb *musb, u16 len, struct urb *urb)
{ … }
irqreturn_t musb_h_ep0_irq(struct musb *musb)
{ … }
#ifdef CONFIG_USB_INVENTRA_DMA
#endif
void musb_host_tx(struct musb *musb, u8 epnum)
{ … }
#ifdef CONFIG_USB_TI_CPPI41_DMA
static int musb_rx_dma_iso_cppi41(struct dma_controller *dma,
struct musb_hw_ep *hw_ep,
struct musb_qh *qh,
struct urb *urb,
size_t len)
{
struct dma_channel *channel = hw_ep->rx_channel;
void __iomem *epio = hw_ep->regs;
dma_addr_t *buf;
u32 length;
u16 val;
buf = (void *)urb->iso_frame_desc[qh->iso_idx].offset +
(u32)urb->transfer_dma;
length = urb->iso_frame_desc[qh->iso_idx].length;
val = musb_readw(epio, MUSB_RXCSR);
val |= MUSB_RXCSR_DMAENAB;
musb_writew(hw_ep->regs, MUSB_RXCSR, val);
return dma->channel_program(channel, qh->maxpacket, 0,
(u32)buf, length);
}
#else
static inline int musb_rx_dma_iso_cppi41(struct dma_controller *dma,
struct musb_hw_ep *hw_ep,
struct musb_qh *qh,
struct urb *urb,
size_t len)
{ … }
#endif
#if defined(CONFIG_USB_INVENTRA_DMA) || defined(CONFIG_USB_UX500_DMA) || \
defined(CONFIG_USB_TI_CPPI41_DMA)
static int musb_rx_dma_inventra_cppi41(struct dma_controller *dma,
struct musb_hw_ep *hw_ep,
struct musb_qh *qh,
struct urb *urb,
size_t len)
{
struct dma_channel *channel = hw_ep->rx_channel;
void __iomem *epio = hw_ep->regs;
u16 val;
int pipe;
bool done;
pipe = urb->pipe;
if (usb_pipeisoc(pipe)) {
struct usb_iso_packet_descriptor *d;
d = urb->iso_frame_desc + qh->iso_idx;
d->actual_length = len;
if (d->status != -EILSEQ && d->status != -EOVERFLOW)
d->status = 0;
if (++qh->iso_idx >= urb->number_of_packets) {
done = true;
} else {
if (musb_dma_cppi41(hw_ep->musb))
done = musb_rx_dma_iso_cppi41(dma, hw_ep, qh,
urb, len);
done = false;
}
} else {
done = (urb->actual_length + len >=
urb->transfer_buffer_length
|| channel->actual_len < qh->maxpacket
|| channel->rx_packet_done);
}
if (!done) {
val = musb_readw(epio, MUSB_RXCSR);
val |= MUSB_RXCSR_H_REQPKT;
musb_writew(epio, MUSB_RXCSR, MUSB_RXCSR_H_WZC_BITS | val);
}
return done;
}
static int musb_rx_dma_in_inventra_cppi41(struct dma_controller *dma,
struct musb_hw_ep *hw_ep,
struct musb_qh *qh,
struct urb *urb,
size_t len,
u8 iso_err)
{
struct musb *musb = hw_ep->musb;
void __iomem *epio = hw_ep->regs;
struct dma_channel *channel = hw_ep->rx_channel;
u16 rx_count, val;
int length, pipe, done;
dma_addr_t buf;
rx_count = musb_readw(epio, MUSB_RXCOUNT);
pipe = urb->pipe;
if (usb_pipeisoc(pipe)) {
int d_status = 0;
struct usb_iso_packet_descriptor *d;
d = urb->iso_frame_desc + qh->iso_idx;
if (iso_err) {
d_status = -EILSEQ;
urb->error_count++;
}
if (rx_count > d->length) {
if (d_status == 0) {
d_status = -EOVERFLOW;
urb->error_count++;
}
musb_dbg(musb, "** OVERFLOW %d into %d",
rx_count, d->length);
length = d->length;
} else
length = rx_count;
d->status = d_status;
buf = urb->transfer_dma + d->offset;
} else {
length = rx_count;
buf = urb->transfer_dma + urb->actual_length;
}
channel->desired_mode = 0;
#ifdef USE_MODE1
if ((urb->transfer_flags & URB_SHORT_NOT_OK)
&& (urb->transfer_buffer_length - urb->actual_length)
> qh->maxpacket)
channel->desired_mode = 1;
if (rx_count < hw_ep->max_packet_sz_rx) {
length = rx_count;
channel->desired_mode = 0;
} else {
length = urb->transfer_buffer_length;
}
#endif
val = musb_readw(epio, MUSB_RXCSR);
val &= ~MUSB_RXCSR_H_REQPKT;
if (channel->desired_mode == 0)
val &= ~MUSB_RXCSR_H_AUTOREQ;
else
val |= MUSB_RXCSR_H_AUTOREQ;
val |= MUSB_RXCSR_DMAENAB;
if (qh->hb_mult == 1)
val |= MUSB_RXCSR_AUTOCLEAR;
musb_writew(epio, MUSB_RXCSR, MUSB_RXCSR_H_WZC_BITS | val);
done = dma->channel_program(channel, qh->maxpacket,
channel->desired_mode,
buf, length);
if (!done) {
dma->channel_release(channel);
hw_ep->rx_channel = NULL;
channel = NULL;
val = musb_readw(epio, MUSB_RXCSR);
val &= ~(MUSB_RXCSR_DMAENAB
| MUSB_RXCSR_H_AUTOREQ
| MUSB_RXCSR_AUTOCLEAR);
musb_writew(epio, MUSB_RXCSR, val);
}
return done;
}
#else
static inline int musb_rx_dma_inventra_cppi41(struct dma_controller *dma,
struct musb_hw_ep *hw_ep,
struct musb_qh *qh,
struct urb *urb,
size_t len)
{ … }
static inline int musb_rx_dma_in_inventra_cppi41(struct dma_controller *dma,
struct musb_hw_ep *hw_ep,
struct musb_qh *qh,
struct urb *urb,
size_t len,
u8 iso_err)
{ … }
#endif
void musb_host_rx(struct musb *musb, u8 epnum)
{ … }
static int musb_schedule(
struct musb *musb,
struct musb_qh *qh,
int is_in)
{ … }
static int musb_urb_enqueue(
struct usb_hcd *hcd,
struct urb *urb,
gfp_t mem_flags)
{ … }
static int musb_cleanup_urb(struct urb *urb, struct musb_qh *qh)
{ … }
static int musb_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
{ … }
static void
musb_h_disable(struct usb_hcd *hcd, struct usb_host_endpoint *hep)
{ … }
static int musb_h_get_frame_number(struct usb_hcd *hcd)
{ … }
static int musb_h_start(struct usb_hcd *hcd)
{ … }
static void musb_h_stop(struct usb_hcd *hcd)
{ … }
static int musb_bus_suspend(struct usb_hcd *hcd)
{ … }
static int musb_bus_resume(struct usb_hcd *hcd)
{ … }
#ifndef CONFIG_MUSB_PIO_ONLY
#define MUSB_USB_DMA_ALIGN …
struct musb_temp_buffer {
void *kmalloc_ptr;
void *old_xfer_buffer;
u8 data[];
};
static void musb_free_temp_buffer(struct urb *urb)
{
enum dma_data_direction dir;
struct musb_temp_buffer *temp;
size_t length;
if (!(urb->transfer_flags & URB_ALIGNED_TEMP_BUFFER))
return;
dir = usb_urb_dir_in(urb) ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
temp = container_of(urb->transfer_buffer, struct musb_temp_buffer,
data);
if (dir == DMA_FROM_DEVICE) {
if (usb_pipeisoc(urb->pipe))
length = urb->transfer_buffer_length;
else
length = urb->actual_length;
memcpy(temp->old_xfer_buffer, temp->data, length);
}
urb->transfer_buffer = temp->old_xfer_buffer;
kfree(temp->kmalloc_ptr);
urb->transfer_flags &= ~URB_ALIGNED_TEMP_BUFFER;
}
static int musb_alloc_temp_buffer(struct urb *urb, gfp_t mem_flags)
{
enum dma_data_direction dir;
struct musb_temp_buffer *temp;
void *kmalloc_ptr;
size_t kmalloc_size;
if (urb->num_sgs || urb->sg ||
urb->transfer_buffer_length == 0 ||
!((uintptr_t)urb->transfer_buffer & (MUSB_USB_DMA_ALIGN - 1)))
return 0;
dir = usb_urb_dir_in(urb) ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
kmalloc_size = urb->transfer_buffer_length +
sizeof(struct musb_temp_buffer) + MUSB_USB_DMA_ALIGN - 1;
kmalloc_ptr = kmalloc(kmalloc_size, mem_flags);
if (!kmalloc_ptr)
return -ENOMEM;
temp = PTR_ALIGN(kmalloc_ptr, MUSB_USB_DMA_ALIGN);
temp->kmalloc_ptr = kmalloc_ptr;
temp->old_xfer_buffer = urb->transfer_buffer;
if (dir == DMA_TO_DEVICE)
memcpy(temp->data, urb->transfer_buffer,
urb->transfer_buffer_length);
urb->transfer_buffer = temp->data;
urb->transfer_flags |= URB_ALIGNED_TEMP_BUFFER;
return 0;
}
static int musb_map_urb_for_dma(struct usb_hcd *hcd, struct urb *urb,
gfp_t mem_flags)
{
struct musb *musb = hcd_to_musb(hcd);
int ret;
if (musb->hwvers < MUSB_HWVERS_1800)
return usb_hcd_map_urb_for_dma(hcd, urb, mem_flags);
ret = musb_alloc_temp_buffer(urb, mem_flags);
if (ret)
return ret;
ret = usb_hcd_map_urb_for_dma(hcd, urb, mem_flags);
if (ret)
musb_free_temp_buffer(urb);
return ret;
}
static void musb_unmap_urb_for_dma(struct usb_hcd *hcd, struct urb *urb)
{
struct musb *musb = hcd_to_musb(hcd);
usb_hcd_unmap_urb_for_dma(hcd, urb);
if (musb->hwvers < MUSB_HWVERS_1800)
return;
musb_free_temp_buffer(urb);
}
#endif
static const struct hc_driver musb_hc_driver = …;
int musb_host_alloc(struct musb *musb)
{ … }
void musb_host_cleanup(struct musb *musb)
{ … }
void musb_host_free(struct musb *musb)
{ … }
int musb_host_setup(struct musb *musb, int power_budget)
{ … }
void musb_host_resume_root_hub(struct musb *musb)
{ … }
void musb_host_poke_root_hub(struct musb *musb)
{ … }