// SPDX-License-Identifier: GPL-2.0 /* * Cadence CDNSP DRD Driver. * * Copyright (C) 2020 Cadence. * * Author: Pawel Laszczak <[email protected]> * * Code based on Linux XHCI driver. * Origin: Copyright (C) 2008 Intel Corp */ /* * Ring initialization rules: * 1. Each segment is initialized to zero, except for link TRBs. * 2. Ring cycle state = 0. This represents Producer Cycle State (PCS) or * Consumer Cycle State (CCS), depending on ring function. * 3. Enqueue pointer = dequeue pointer = address of first TRB in the segment. * * Ring behavior rules: * 1. A ring is empty if enqueue == dequeue. This means there will always be at * least one free TRB in the ring. This is useful if you want to turn that * into a link TRB and expand the ring. * 2. When incrementing an enqueue or dequeue pointer, if the next TRB is a * link TRB, then load the pointer with the address in the link TRB. If the * link TRB had its toggle bit set, you may need to update the ring cycle * state (see cycle bit rules). You may have to do this multiple times * until you reach a non-link TRB. * 3. A ring is full if enqueue++ (for the definition of increment above) * equals the dequeue pointer. * * Cycle bit rules: * 1. When a consumer increments a dequeue pointer and encounters a toggle bit * in a link TRB, it must toggle the ring cycle state. * 2. When a producer increments an enqueue pointer and encounters a toggle bit * in a link TRB, it must toggle the ring cycle state. * * Producer rules: * 1. Check if ring is full before you enqueue. * 2. Write the ring cycle state to the cycle bit in the TRB you're enqueuing. * Update enqueue pointer between each write (which may update the ring * cycle state). * 3. Notify consumer. If SW is producer, it rings the doorbell for command * and endpoint rings. If controller is the producer for the event ring, * and it generates an interrupt according to interrupt modulation rules. * * Consumer rules: * 1. Check if TRB belongs to you. If the cycle bit == your ring cycle state, * the TRB is owned by the consumer. * 2. Update dequeue pointer (which may update the ring cycle state) and * continue processing TRBs until you reach a TRB which is not owned by you. * 3. Notify the producer. SW is the consumer for the event ring, and it * updates event ring dequeue pointer. Controller is the consumer for the * command and endpoint rings; it generates events on the event ring * for these. */ #include <linux/scatterlist.h> #include <linux/dma-mapping.h> #include <linux/delay.h> #include <linux/slab.h> #include <linux/irq.h> #include "cdnsp-trace.h" #include "cdnsp-gadget.h" /* * Returns zero if the TRB isn't in this segment, otherwise it returns the DMA * address of the TRB. */ dma_addr_t cdnsp_trb_virt_to_dma(struct cdnsp_segment *seg, union cdnsp_trb *trb) { … } static bool cdnsp_trb_is_noop(union cdnsp_trb *trb) { … } static bool cdnsp_trb_is_link(union cdnsp_trb *trb) { … } bool cdnsp_last_trb_on_seg(struct cdnsp_segment *seg, union cdnsp_trb *trb) { … } bool cdnsp_last_trb_on_ring(struct cdnsp_ring *ring, struct cdnsp_segment *seg, union cdnsp_trb *trb) { … } static bool cdnsp_link_trb_toggles_cycle(union cdnsp_trb *trb) { … } static void cdnsp_trb_to_noop(union cdnsp_trb *trb, u32 noop_type) { … } /* * Updates trb to point to the next TRB in the ring, and updates seg if the next * TRB is in a new segment. This does not skip over link TRBs, and it does not * effect the ring dequeue or enqueue pointers. */ static void cdnsp_next_trb(struct cdnsp_device *pdev, struct cdnsp_ring *ring, struct cdnsp_segment **seg, union cdnsp_trb **trb) { … } /* * See Cycle bit rules. SW is the consumer for the event ring only. * Don't make a ring full of link TRBs. That would be dumb and this would loop. */ void cdnsp_inc_deq(struct cdnsp_device *pdev, struct cdnsp_ring *ring) { … } /* * See Cycle bit rules. SW is the consumer for the event ring only. * Don't make a ring full of link TRBs. That would be dumb and this would loop. * * If we've just enqueued a TRB that is in the middle of a TD (meaning the * chain bit is set), then set the chain bit in all the following link TRBs. * If we've enqueued the last TRB in a TD, make sure the following link TRBs * have their chain bit cleared (so that each Link TRB is a separate TD). * * @more_trbs_coming: Will you enqueue more TRBs before ringing the doorbell. */ static void cdnsp_inc_enq(struct cdnsp_device *pdev, struct cdnsp_ring *ring, bool more_trbs_coming) { … } /* * Check to see if there's room to enqueue num_trbs on the ring and make sure * enqueue pointer will not advance into dequeue segment. */ static bool cdnsp_room_on_ring(struct cdnsp_device *pdev, struct cdnsp_ring *ring, unsigned int num_trbs) { … } /* * Workaround for L1: controller has issue with resuming from L1 after * setting doorbell for endpoint during L1 state. This function forces * resume signal in such case. */ static void cdnsp_force_l0_go(struct cdnsp_device *pdev) { … } /* Ring the doorbell after placing a command on the ring. */ void cdnsp_ring_cmd_db(struct cdnsp_device *pdev) { … } /* * Ring the doorbell after placing a transfer on the ring. * Returns true if doorbell was set, otherwise false. */ static bool cdnsp_ring_ep_doorbell(struct cdnsp_device *pdev, struct cdnsp_ep *pep, unsigned int stream_id) { … } /* * Get the right ring for the given pep and stream_id. * If the endpoint supports streams, boundary check the USB request's stream ID. * If the endpoint doesn't support streams, return the singular endpoint ring. */ static struct cdnsp_ring *cdnsp_get_transfer_ring(struct cdnsp_device *pdev, struct cdnsp_ep *pep, unsigned int stream_id) { … } static struct cdnsp_ring * cdnsp_request_to_transfer_ring(struct cdnsp_device *pdev, struct cdnsp_request *preq) { … } /* Ring the doorbell for any rings with pending requests. */ void cdnsp_ring_doorbell_for_active_rings(struct cdnsp_device *pdev, struct cdnsp_ep *pep) { … } /* * Get the hw dequeue pointer controller stopped on, either directly from the * endpoint context, or if streams are in use from the stream context. * The returned hw_dequeue contains the lowest four bits with cycle state * and possible stream context type. */ static u64 cdnsp_get_hw_deq(struct cdnsp_device *pdev, unsigned int ep_index, unsigned int stream_id) { … } /* * Move the controller endpoint ring dequeue pointer past cur_td. * Record the new state of the controller endpoint ring dequeue segment, * dequeue pointer, and new consumer cycle state in state. * Update internal representation of the ring's dequeue pointer. * * We do this in three jumps: * - First we update our new ring state to be the same as when the * controller stopped. * - Then we traverse the ring to find the segment that contains * the last TRB in the TD. We toggle the controller new cycle state * when we pass any link TRBs with the toggle cycle bit set. * - Finally we move the dequeue state one TRB further, toggling the cycle bit * if we've moved it past a link TRB with the toggle cycle bit set. */ static void cdnsp_find_new_dequeue_state(struct cdnsp_device *pdev, struct cdnsp_ep *pep, unsigned int stream_id, struct cdnsp_td *cur_td, struct cdnsp_dequeue_state *state) { … } /* * flip_cycle means flip the cycle bit of all but the first and last TRB. * (The last TRB actually points to the ring enqueue pointer, which is not part * of this TD.) This is used to remove partially enqueued isoc TDs from a ring. */ static void cdnsp_td_to_noop(struct cdnsp_device *pdev, struct cdnsp_ring *ep_ring, struct cdnsp_td *td, bool flip_cycle) { … } /* * This TD is defined by the TRBs starting at start_trb in start_seg and ending * at end_trb, which may be in another segment. If the suspect DMA address is a * TRB in this TD, this function returns that TRB's segment. Otherwise it * returns 0. */ static struct cdnsp_segment *cdnsp_trb_in_td(struct cdnsp_device *pdev, struct cdnsp_segment *start_seg, union cdnsp_trb *start_trb, union cdnsp_trb *end_trb, dma_addr_t suspect_dma) { … } static void cdnsp_unmap_td_bounce_buffer(struct cdnsp_device *pdev, struct cdnsp_ring *ring, struct cdnsp_td *td) { … } static int cdnsp_cmd_set_deq(struct cdnsp_device *pdev, struct cdnsp_ep *pep, struct cdnsp_dequeue_state *deq_state) { … } int cdnsp_remove_request(struct cdnsp_device *pdev, struct cdnsp_request *preq, struct cdnsp_ep *pep) { … } static int cdnsp_update_port_id(struct cdnsp_device *pdev, u32 port_id) { … } static void cdnsp_handle_port_status(struct cdnsp_device *pdev, union cdnsp_trb *event) { … } static void cdnsp_td_cleanup(struct cdnsp_device *pdev, struct cdnsp_td *td, struct cdnsp_ring *ep_ring, int *status) { … } static void cdnsp_finish_td(struct cdnsp_device *pdev, struct cdnsp_td *td, struct cdnsp_transfer_event *event, struct cdnsp_ep *ep, int *status) { … } /* sum trb lengths from ring dequeue up to stop_trb, _excluding_ stop_trb */ static int cdnsp_sum_trb_lengths(struct cdnsp_device *pdev, struct cdnsp_ring *ring, union cdnsp_trb *stop_trb) { … } static int cdnsp_giveback_first_trb(struct cdnsp_device *pdev, struct cdnsp_ep *pep, unsigned int stream_id, int start_cycle, struct cdnsp_generic_trb *start_trb) { … } /* * Process control tds, update USB request status and actual_length. */ static void cdnsp_process_ctrl_td(struct cdnsp_device *pdev, struct cdnsp_td *td, union cdnsp_trb *event_trb, struct cdnsp_transfer_event *event, struct cdnsp_ep *pep, int *status) { … } /* * Process isochronous tds, update usb request status and actual_length. */ static void cdnsp_process_isoc_td(struct cdnsp_device *pdev, struct cdnsp_td *td, union cdnsp_trb *ep_trb, struct cdnsp_transfer_event *event, struct cdnsp_ep *pep, int status) { … } static void cdnsp_skip_isoc_td(struct cdnsp_device *pdev, struct cdnsp_td *td, struct cdnsp_transfer_event *event, struct cdnsp_ep *pep, int status) { … } /* * Process bulk and interrupt tds, update usb request status and actual_length. */ static void cdnsp_process_bulk_intr_td(struct cdnsp_device *pdev, struct cdnsp_td *td, union cdnsp_trb *ep_trb, struct cdnsp_transfer_event *event, struct cdnsp_ep *ep, int *status) { … } static void cdnsp_handle_tx_nrdy(struct cdnsp_device *pdev, struct cdnsp_transfer_event *event) { … } /* * If this function returns an error condition, it means it got a Transfer * event with a corrupted TRB DMA address or endpoint is disabled. */ static int cdnsp_handle_tx_event(struct cdnsp_device *pdev, struct cdnsp_transfer_event *event) { … } /* * This function handles all events on the event ring. * Returns true for "possibly more events to process" (caller should call * again), otherwise false if done. */ static bool cdnsp_handle_event(struct cdnsp_device *pdev) { … } irqreturn_t cdnsp_thread_irq_handler(int irq, void *data) { … } irqreturn_t cdnsp_irq_handler(int irq, void *priv) { … } /* * Generic function for queuing a TRB on a ring. * The caller must have checked to make sure there's room on the ring. * * @more_trbs_coming: Will you enqueue more TRBs before setting doorbell? */ static void cdnsp_queue_trb(struct cdnsp_device *pdev, struct cdnsp_ring *ring, bool more_trbs_coming, u32 field1, u32 field2, u32 field3, u32 field4) { … } /* * Does various checks on the endpoint ring, and makes it ready to * queue num_trbs. */ static int cdnsp_prepare_ring(struct cdnsp_device *pdev, struct cdnsp_ring *ep_ring, u32 ep_state, unsigned int num_trbs, gfp_t mem_flags) { … } static int cdnsp_prepare_transfer(struct cdnsp_device *pdev, struct cdnsp_request *preq, unsigned int num_trbs) { … } static unsigned int cdnsp_count_trbs(u64 addr, u64 len) { … } static unsigned int count_trbs_needed(struct cdnsp_request *preq) { … } static unsigned int count_sg_trbs_needed(struct cdnsp_request *preq) { … } static void cdnsp_check_trb_math(struct cdnsp_request *preq, int running_total) { … } /* * TD size is the number of max packet sized packets remaining in the TD * (*not* including this TRB). * * Total TD packet count = total_packet_count = * DIV_ROUND_UP(TD size in bytes / wMaxPacketSize) * * Packets transferred up to and including this TRB = packets_transferred = * rounddown(total bytes transferred including this TRB / wMaxPacketSize) * * TD size = total_packet_count - packets_transferred * * It must fit in bits 21:17, so it can't be bigger than 31. * This is taken care of in the TRB_TD_SIZE() macro * * The last TRB in a TD must have the TD size set to zero. */ static u32 cdnsp_td_remainder(struct cdnsp_device *pdev, int transferred, int trb_buff_len, unsigned int td_total_len, struct cdnsp_request *preq, bool more_trbs_coming, bool zlp) { … } static int cdnsp_align_td(struct cdnsp_device *pdev, struct cdnsp_request *preq, u32 enqd_len, u32 *trb_buff_len, struct cdnsp_segment *seg) { … } int cdnsp_queue_bulk_tx(struct cdnsp_device *pdev, struct cdnsp_request *preq) { … } int cdnsp_queue_ctrl_tx(struct cdnsp_device *pdev, struct cdnsp_request *preq) { … } int cdnsp_cmd_stop_ep(struct cdnsp_device *pdev, struct cdnsp_ep *pep) { … } /* * The transfer burst count field of the isochronous TRB defines the number of * bursts that are required to move all packets in this TD. Only SuperSpeed * devices can burst up to bMaxBurst number of packets per service interval. * This field is zero based, meaning a value of zero in the field means one * burst. Basically, for everything but SuperSpeed devices, this field will be * zero. */ static unsigned int cdnsp_get_burst_count(struct cdnsp_device *pdev, struct cdnsp_request *preq, unsigned int total_packet_count) { … } /* * Returns the number of packets in the last "burst" of packets. This field is * valid for all speeds of devices. USB 2.0 devices can only do one "burst", so * the last burst packet count is equal to the total number of packets in the * TD. SuperSpeed endpoints can have up to 3 bursts. All but the last burst * must contain (bMaxBurst + 1) number of packets, but the last burst can * contain 1 to (bMaxBurst + 1) packets. */ static unsigned int cdnsp_get_last_burst_packet_count(struct cdnsp_device *pdev, struct cdnsp_request *preq, unsigned int total_packet_count) { … } /* Queue function isoc transfer */ int cdnsp_queue_isoc_tx(struct cdnsp_device *pdev, struct cdnsp_request *preq) { … } /**** Command Ring Operations ****/ /* * Generic function for queuing a command TRB on the command ring. * Driver queue only one command to ring in the moment. */ static void cdnsp_queue_command(struct cdnsp_device *pdev, u32 field1, u32 field2, u32 field3, u32 field4) { … } /* Queue a slot enable or disable request on the command ring */ void cdnsp_queue_slot_control(struct cdnsp_device *pdev, u32 trb_type) { … } /* Queue an address device command TRB */ void cdnsp_queue_address_device(struct cdnsp_device *pdev, dma_addr_t in_ctx_ptr, enum cdnsp_setup_dev setup) { … } /* Queue a reset device command TRB */ void cdnsp_queue_reset_device(struct cdnsp_device *pdev) { … } /* Queue a configure endpoint command TRB */ void cdnsp_queue_configure_endpoint(struct cdnsp_device *pdev, dma_addr_t in_ctx_ptr) { … } /* * Suspend is set to indicate "Stop Endpoint Command" is being issued to stop * activity on an endpoint that is about to be suspended. */ void cdnsp_queue_stop_endpoint(struct cdnsp_device *pdev, unsigned int ep_index) { … } /* Set Transfer Ring Dequeue Pointer command. */ void cdnsp_queue_new_dequeue_state(struct cdnsp_device *pdev, struct cdnsp_ep *pep, struct cdnsp_dequeue_state *deq_state) { … } void cdnsp_queue_reset_ep(struct cdnsp_device *pdev, unsigned int ep_index) { … } /* * Queue a halt endpoint request on the command ring. */ void cdnsp_queue_halt_endpoint(struct cdnsp_device *pdev, unsigned int ep_index) { … } void cdnsp_force_header_wakeup(struct cdnsp_device *pdev, int intf_num) { … }