linux/drivers/usb/gadget/udc/cdns2/cdns2-gadget.c

// SPDX-License-Identifier: GPL-2.0
/*
 * Cadence USBHS-DEV Driver - gadget side.
 *
 * Copyright (C) 2023 Cadence Design Systems.
 *
 * Authors: Pawel Laszczak <[email protected]>
 */

/*
 * Work around 1:
 * At some situations, the controller may get stale data address in TRB
 * at below sequences:
 * 1. Controller read TRB includes data address
 * 2. Software updates TRBs includes data address and Cycle bit
 * 3. Controller read TRB which includes Cycle bit
 * 4. DMA run with stale data address
 *
 * To fix this problem, driver needs to make the first TRB in TD as invalid.
 * After preparing all TRBs driver needs to check the position of DMA and
 * if the DMA point to the first just added TRB and doorbell is 1,
 * then driver must defer making this TRB as valid. This TRB will be make
 * as valid during adding next TRB only if DMA is stopped or at TRBERR
 * interrupt.
 *
 */

#include <linux/dma-mapping.h>
#include <linux/pm_runtime.h>
#include <linux/interrupt.h>
#include <linux/property.h>
#include <linux/dmapool.h>
#include <linux/iopoll.h>

#include "cdns2-gadget.h"
#include "cdns2-trace.h"

/**
 * set_reg_bit_32 - set bit in given 32 bits register.
 * @ptr: register address.
 * @mask: bits to set.
 */
static void set_reg_bit_32(void __iomem *ptr, u32 mask)
{}

/*
 * clear_reg_bit_32 - clear bit in given 32 bits register.
 * @ptr: register address.
 * @mask: bits to clear.
 */
static void clear_reg_bit_32(void __iomem *ptr, u32 mask)
{}

/* Clear bit in given 8 bits register. */
static void clear_reg_bit_8(void __iomem *ptr, u8 mask)
{}

/* Set bit in given 16 bits register. */
void set_reg_bit_8(void __iomem *ptr, u8 mask)
{}

static int cdns2_get_dma_pos(struct cdns2_device *pdev,
			     struct cdns2_endpoint *pep)
{}

/* Get next private request from list. */
struct cdns2_request *cdns2_next_preq(struct list_head *list)
{}

void cdns2_select_ep(struct cdns2_device *pdev, u32 ep)
{}

dma_addr_t cdns2_trb_virt_to_dma(struct cdns2_endpoint *pep,
				 struct cdns2_trb *trb)
{}

static void cdns2_free_tr_segment(struct cdns2_endpoint *pep)
{}

/* Allocates Transfer Ring segment. */
static int cdns2_alloc_tr_segment(struct cdns2_endpoint *pep)
{}

/*
 * Stalls and flushes selected endpoint.
 * Endpoint must be selected before invoking this function.
 */
static void cdns2_ep_stall_flush(struct cdns2_endpoint *pep)
{}

/*
 * Increment a trb index.
 *
 * The index should never point to the last link TRB in TR. After incrementing,
 * if it point to the link TRB, wrap around to the beginning and revert
 * cycle state bit. The link TRB is always at the last TRB entry.
 */
static void cdns2_ep_inc_trb(int *index, u8 *cs, int trb_in_seg)
{}

static void cdns2_ep_inc_enq(struct cdns2_ring *ring)
{}

static void cdns2_ep_inc_deq(struct cdns2_ring *ring)
{}

/*
 * Enable/disable LPM.
 *
 * If bit USBCS_LPMNYET is not set and device receive Extended Token packet,
 * then controller answer with ACK handshake.
 * If bit USBCS_LPMNYET is set and device receive Extended Token packet,
 * then controller answer with NYET handshake.
 */
static void cdns2_enable_l1(struct cdns2_device *pdev, int enable)
{}

static enum usb_device_speed cdns2_get_speed(struct cdns2_device *pdev)
{}

static struct cdns2_trb *cdns2_next_trb(struct cdns2_endpoint *pep,
					struct cdns2_trb *trb)
{}

void cdns2_gadget_giveback(struct cdns2_endpoint *pep,
			   struct cdns2_request *preq,
			   int status)
{}

static void cdns2_wa1_restore_cycle_bit(struct cdns2_endpoint *pep)
{}

static int cdns2_wa1_update_guard(struct cdns2_endpoint *pep,
				  struct cdns2_trb *trb)
{}

static void cdns2_wa1_tray_restore_cycle_bit(struct cdns2_device *pdev,
					     struct cdns2_endpoint *pep)
{}

static int cdns2_prepare_ring(struct cdns2_device *pdev,
			      struct cdns2_endpoint *pep,
			      int num_trbs)
{}

static void cdns2_dbg_request_trbs(struct cdns2_endpoint *pep,
				   struct cdns2_request *preq)
{}

static unsigned int cdns2_count_trbs(struct cdns2_endpoint *pep,
				     u64 addr, u64 len)
{}

static unsigned int cdns2_count_sg_trbs(struct cdns2_endpoint *pep,
					struct usb_request *req)
{}

/*
 * Function prepares the array with optimized AXI burst value for different
 * transfer lengths. Controller handles the final data which are less
 * then AXI burst size as single byte transactions.
 * e.g.:
 * Let's assume that driver prepares trb with trb->length 700 and burst size
 * will be set to 128. In this case the controller will handle a first 512 as
 * single AXI transaction but the next 188 bytes will be handled
 * as 47 separate AXI transaction.
 * The better solution is to use the burst size equal 16 and then we will
 * have only 25 AXI transaction (10 * 64 + 15 *4).
 */
static void cdsn2_isoc_burst_opt(struct cdns2_device *pdev)
{}

static void cdns2_ep_tx_isoc(struct cdns2_endpoint *pep,
			     struct cdns2_request *preq,
			     int num_trbs)
{}

static void cdns2_ep_tx_bulk(struct cdns2_endpoint *pep,
			     struct cdns2_request *preq,
			     int trbs_per_td)
{}

static void cdns2_set_drdy(struct cdns2_device *pdev,
			   struct cdns2_endpoint *pep)
{}

static int cdns2_prepare_first_isoc_transfer(struct cdns2_device *pdev,
					     struct cdns2_endpoint *pep)
{}

/* Prepare and start transfer on no-default endpoint. */
static int cdns2_ep_run_transfer(struct cdns2_endpoint *pep,
				 struct cdns2_request *preq)
{}

/* Prepare and start transfer for all not started requests. */
static int cdns2_start_all_request(struct cdns2_device *pdev,
				   struct cdns2_endpoint *pep)
{}

/*
 * Check whether trb has been handled by DMA.
 *
 * Endpoint must be selected before invoking this function.
 *
 * Returns false if request has not been handled by DMA, else returns true.
 *
 * SR - start ring
 * ER - end ring
 * DQ = ring->dequeue - dequeue position
 * EQ = ring->enqueue - enqueue position
 * ST = preq->start_trb - index of first TRB in transfer ring
 * ET = preq->end_trb - index of last TRB in transfer ring
 * CI = current_index - index of processed TRB by DMA.
 *
 * As first step, we check if the TRB between the ST and ET.
 * Then, we check if cycle bit for index pep->dequeue
 * is correct.
 *
 * some rules:
 * 1. ring->dequeue never equals to current_index.
 * 2  ring->enqueue never exceed ring->dequeue
 * 3. exception: ring->enqueue == ring->dequeue
 *    and ring->free_trbs is zero.
 *    This case indicate that TR is full.
 *
 * At below two cases, the request have been handled.
 * Case 1 - ring->dequeue < current_index
 *      SR ... EQ ... DQ ... CI ... ER
 *      SR ... DQ ... CI ... EQ ... ER
 *
 * Case 2 - ring->dequeue > current_index
 * This situation takes place when CI go through the LINK TRB at the end of
 * transfer ring.
 *      SR ... CI ... EQ ... DQ ... ER
 */
static bool cdns2_trb_handled(struct cdns2_endpoint *pep,
			      struct cdns2_request *preq)
{}

static void cdns2_skip_isoc_td(struct cdns2_device *pdev,
			       struct cdns2_endpoint *pep,
			       struct cdns2_request *preq)
{}

static void cdns2_transfer_completed(struct cdns2_device *pdev,
				     struct cdns2_endpoint *pep)
{}

static void cdns2_wakeup(struct cdns2_device *pdev)
{}

static void cdns2_rearm_transfer(struct cdns2_endpoint *pep, u8 rearm)
{}

static void cdns2_handle_epx_interrupt(struct cdns2_endpoint *pep)
{}

static void cdns2_disconnect_gadget(struct cdns2_device *pdev)
{}

static irqreturn_t cdns2_usb_irq_handler(int irq, void *data)
{}

static irqreturn_t cdns2_thread_usb_irq_handler(struct cdns2_device *pdev)
{}

/* Deferred USB interrupt handler. */
static irqreturn_t cdns2_thread_irq_handler(int irq, void *data)
{}

/* Calculates and assigns onchip memory for endpoints. */
static void cdns2_eps_onchip_buffer_init(struct cdns2_device *pdev)
{}

/* Configure hardware endpoint. */
static int cdns2_ep_config(struct cdns2_endpoint *pep, bool enable)
{}

struct usb_request *cdns2_gadget_ep_alloc_request(struct usb_ep *ep,
						  gfp_t gfp_flags)
{}

void cdns2_gadget_ep_free_request(struct usb_ep *ep,
				  struct usb_request *request)
{}

static int cdns2_gadget_ep_enable(struct usb_ep *ep,
				  const struct usb_endpoint_descriptor *desc)
{}

static int cdns2_gadget_ep_disable(struct usb_ep *ep)
{}

static int cdns2_ep_enqueue(struct cdns2_endpoint *pep,
			    struct cdns2_request *preq,
			    gfp_t gfp_flags)
{}

static int cdns2_gadget_ep_queue(struct usb_ep *ep, struct usb_request *request,
				 gfp_t gfp_flags)
{}

int cdns2_gadget_ep_dequeue(struct usb_ep *ep,
			    struct usb_request *request)
{}

int cdns2_halt_endpoint(struct cdns2_device *pdev,
			struct cdns2_endpoint *pep,
			int value)
{}

/* Sets/clears stall on selected endpoint. */
static int cdns2_gadget_ep_set_halt(struct usb_ep *ep, int value)
{}

static int cdns2_gadget_ep_set_wedge(struct usb_ep *ep)
{}

static struct
cdns2_endpoint *cdns2_find_available_ep(struct cdns2_device *pdev,
					struct usb_endpoint_descriptor *desc)
{}

/*
 * Function used to recognize which endpoints will be used to optimize
 * on-chip memory usage.
 */
static struct
usb_ep *cdns2_gadget_match_ep(struct usb_gadget *gadget,
			      struct usb_endpoint_descriptor *desc,
			      struct usb_ss_ep_comp_descriptor *comp_desc)
{}

static const struct usb_ep_ops cdns2_gadget_ep_ops =;

static int cdns2_gadget_get_frame(struct usb_gadget *gadget)
{}

static int cdns2_gadget_wakeup(struct usb_gadget *gadget)
{}

static int cdns2_gadget_set_selfpowered(struct usb_gadget *gadget,
					int is_selfpowered)
{}

/*  Disable interrupts and begin the controller halting process. */
static void cdns2_quiesce(struct cdns2_device *pdev)
{}

static void cdns2_gadget_config(struct cdns2_device *pdev)
{}

static int cdns2_gadget_pullup(struct usb_gadget *gadget, int is_on)
{}

static int cdns2_gadget_udc_start(struct usb_gadget *gadget,
				  struct usb_gadget_driver *driver)
{}

static int cdns2_gadget_udc_stop(struct usb_gadget *gadget)
{}

static const struct usb_gadget_ops cdns2_gadget_ops =;

static void cdns2_free_all_eps(struct cdns2_device *pdev)
{}

/* Initializes software endpoints of gadget. */
static int cdns2_init_eps(struct cdns2_device *pdev)
{}

static int cdns2_gadget_start(struct cdns2_device *pdev)
{}

int cdns2_gadget_suspend(struct cdns2_device *pdev)
{}

int cdns2_gadget_resume(struct cdns2_device *pdev, bool hibernated)
{}

void cdns2_gadget_remove(struct cdns2_device *pdev)
{}

int cdns2_gadget_init(struct cdns2_device *pdev)
{}