// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* * Copyright (C) 2003-2014, 2018-2024 Intel Corporation * Copyright (C) 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2016-2017 Intel Deutschland GmbH */ #include <linux/sched.h> #include <linux/wait.h> #include <linux/gfp.h> #include "iwl-prph.h" #include "iwl-io.h" #include "internal.h" #include "iwl-op-mode.h" #include "iwl-context-info-gen3.h" /****************************************************************************** * * RX path functions * ******************************************************************************/ /* * Rx theory of operation * * Driver allocates a circular buffer of Receive Buffer Descriptors (RBDs), * each of which point to Receive Buffers to be filled by the NIC. These get * used not only for Rx frames, but for any command response or notification * from the NIC. The driver and NIC manage the Rx buffers by means * of indexes into the circular buffer. * * Rx Queue Indexes * The host/firmware share two index registers for managing the Rx buffers. * * The READ index maps to the first position that the firmware may be writing * to -- the driver can read up to (but not including) this position and get * good data. * The READ index is managed by the firmware once the card is enabled. * * The WRITE index maps to the last position the driver has read from -- the * position preceding WRITE is the last slot the firmware can place a packet. * * The queue is empty (no good data) if WRITE = READ - 1, and is full if * WRITE = READ. * * During initialization, the host sets up the READ queue position to the first * INDEX position, and WRITE to the last (READ - 1 wrapped) * * When the firmware places a packet in a buffer, it will advance the READ index * and fire the RX interrupt. The driver can then query the READ index and * process as many packets as possible, moving the WRITE index forward as it * resets the Rx queue buffers with new memory. * * The management in the driver is as follows: * + A list of pre-allocated RBDs is stored in iwl->rxq->rx_free. * When the interrupt handler is called, the request is processed. * The page is either stolen - transferred to the upper layer * or reused - added immediately to the iwl->rxq->rx_free list. * + When the page is stolen - the driver updates the matching queue's used * count, detaches the RBD and transfers it to the queue used list. * When there are two used RBDs - they are transferred to the allocator empty * list. Work is then scheduled for the allocator to start allocating * eight buffers. * When there are another 6 used RBDs - they are transferred to the allocator * empty list and the driver tries to claim the pre-allocated buffers and * add them to iwl->rxq->rx_free. If it fails - it continues to claim them * until ready. * When there are 8+ buffers in the free list - either from allocation or from * 8 reused unstolen pages - restock is called to update the FW and indexes. * + In order to make sure the allocator always has RBDs to use for allocation * the allocator has initial pool in the size of num_queues*(8-2) - the * maximum missing RBDs per allocation request (request posted with 2 * empty RBDs, there is no guarantee when the other 6 RBDs are supplied). * The queues supplies the recycle of the rest of the RBDs. * + A received packet is processed and handed to the kernel network stack, * detached from the iwl->rxq. The driver 'processed' index is updated. * + If there are no allocated buffers in iwl->rxq->rx_free, * the READ INDEX is not incremented and iwl->status(RX_STALLED) is set. * If there were enough free buffers and RX_STALLED is set it is cleared. * * * Driver sequence: * * iwl_rxq_alloc() Allocates rx_free * iwl_pcie_rx_replenish() Replenishes rx_free list from rx_used, and calls * iwl_pcie_rxq_restock. * Used only during initialization. * iwl_pcie_rxq_restock() Moves available buffers from rx_free into Rx * queue, updates firmware pointers, and updates * the WRITE index. * iwl_pcie_rx_allocator() Background work for allocating pages. * * -- enable interrupts -- * ISR - iwl_rx() Detach iwl_rx_mem_buffers from pool up to the * READ INDEX, detaching the SKB from the pool. * Moves the packet buffer from queue to rx_used. * Posts and claims requests to the allocator. * Calls iwl_pcie_rxq_restock to refill any empty * slots. * * RBD life-cycle: * * Init: * rxq.pool -> rxq.rx_used -> rxq.rx_free -> rxq.queue * * Regular Receive interrupt: * Page Stolen: * rxq.queue -> rxq.rx_used -> allocator.rbd_empty -> * allocator.rbd_allocated -> rxq.rx_free -> rxq.queue * Page not Stolen: * rxq.queue -> rxq.rx_free -> rxq.queue * ... * */ /* * iwl_rxq_space - Return number of free slots available in queue. */ static int iwl_rxq_space(const struct iwl_rxq *rxq) { … } /* * iwl_dma_addr2rbd_ptr - convert a DMA address to a uCode read buffer ptr */ static inline __le32 iwl_pcie_dma_addr2rbd_ptr(dma_addr_t dma_addr) { … } /* * iwl_pcie_rx_stop - stops the Rx DMA */ int iwl_pcie_rx_stop(struct iwl_trans *trans) { … } /* * iwl_pcie_rxq_inc_wr_ptr - Update the write pointer for the RX queue */ static void iwl_pcie_rxq_inc_wr_ptr(struct iwl_trans *trans, struct iwl_rxq *rxq) { … } static void iwl_pcie_rxq_check_wrptr(struct iwl_trans *trans) { … } static void iwl_pcie_restock_bd(struct iwl_trans *trans, struct iwl_rxq *rxq, struct iwl_rx_mem_buffer *rxb) { … } /* * iwl_pcie_rxmq_restock - restock implementation for multi-queue rx */ static void iwl_pcie_rxmq_restock(struct iwl_trans *trans, struct iwl_rxq *rxq) { … } /* * iwl_pcie_rxsq_restock - restock implementation for single queue rx */ static void iwl_pcie_rxsq_restock(struct iwl_trans *trans, struct iwl_rxq *rxq) { … } /* * iwl_pcie_rxq_restock - refill RX queue from pre-allocated pool * * If there are slots in the RX queue that need to be restocked, * and we have free pre-allocated buffers, fill the ranks as much * as we can, pulling from rx_free. * * This moves the 'write' index forward to catch up with 'processed', and * also updates the memory address in the firmware to reference the new * target buffer. */ static void iwl_pcie_rxq_restock(struct iwl_trans *trans, struct iwl_rxq *rxq) { … } /* * iwl_pcie_rx_alloc_page - allocates and returns a page. * */ static struct page *iwl_pcie_rx_alloc_page(struct iwl_trans *trans, u32 *offset, gfp_t priority) { … } /* * iwl_pcie_rxq_alloc_rbs - allocate a page for each used RBD * * A used RBD is an Rx buffer that has been given to the stack. To use it again * a page must be allocated and the RBD must point to the page. This function * doesn't change the HW pointer but handles the list of pages that is used by * iwl_pcie_rxq_restock. The latter function will update the HW to use the newly * allocated buffers. */ void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans, gfp_t priority, struct iwl_rxq *rxq) { … } void iwl_pcie_free_rbs_pool(struct iwl_trans *trans) { … } /* * iwl_pcie_rx_allocator - Allocates pages in the background for RX queues * * Allocates for each received request 8 pages * Called as a scheduled work item. */ static void iwl_pcie_rx_allocator(struct iwl_trans *trans) { … } /* * iwl_pcie_rx_allocator_get - returns the pre-allocated pages .* .* Called by queue when the queue posted allocation request and * has freed 8 RBDs in order to restock itself. * This function directly moves the allocated RBs to the queue's ownership * and updates the relevant counters. */ static void iwl_pcie_rx_allocator_get(struct iwl_trans *trans, struct iwl_rxq *rxq) { … } void iwl_pcie_rx_allocator_work(struct work_struct *data) { … } static int iwl_pcie_free_bd_size(struct iwl_trans *trans) { … } static int iwl_pcie_used_bd_size(struct iwl_trans *trans) { … } static void iwl_pcie_free_rxq_dma(struct iwl_trans *trans, struct iwl_rxq *rxq) { … } static size_t iwl_pcie_rb_stts_size(struct iwl_trans *trans) { … } static int iwl_pcie_alloc_rxq_dma(struct iwl_trans *trans, struct iwl_rxq *rxq) { … } static int iwl_pcie_rx_alloc(struct iwl_trans *trans) { … } static void iwl_pcie_rx_hw_init(struct iwl_trans *trans, struct iwl_rxq *rxq) { … } static void iwl_pcie_rx_mq_hw_init(struct iwl_trans *trans) { … } void iwl_pcie_rx_init_rxb_lists(struct iwl_rxq *rxq) { … } static int iwl_pcie_rx_handle(struct iwl_trans *trans, int queue, int budget); static inline struct iwl_trans_pcie *iwl_netdev_to_trans_pcie(struct net_device *dev) { … } static int iwl_pcie_napi_poll(struct napi_struct *napi, int budget) { … } static int iwl_pcie_napi_poll_msix(struct napi_struct *napi, int budget) { … } void iwl_pcie_rx_napi_sync(struct iwl_trans *trans) { … } static int _iwl_pcie_rx_init(struct iwl_trans *trans) { … } int iwl_pcie_rx_init(struct iwl_trans *trans) { … } int iwl_pcie_gen2_rx_init(struct iwl_trans *trans) { … } void iwl_pcie_rx_free(struct iwl_trans *trans) { … } static void iwl_pcie_rx_move_to_allocator(struct iwl_rxq *rxq, struct iwl_rb_allocator *rba) { … } /* * iwl_pcie_rx_reuse_rbd - Recycle used RBDs * * Called when a RBD can be reused. The RBD is transferred to the allocator. * When there are 2 empty RBDs - a request for allocation is posted */ static void iwl_pcie_rx_reuse_rbd(struct iwl_trans *trans, struct iwl_rx_mem_buffer *rxb, struct iwl_rxq *rxq, bool emergency) { … } static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans, struct iwl_rxq *rxq, struct iwl_rx_mem_buffer *rxb, bool emergency, int i) { … } static struct iwl_rx_mem_buffer *iwl_pcie_get_rxb(struct iwl_trans *trans, struct iwl_rxq *rxq, int i, bool *join) { … } /* * iwl_pcie_rx_handle - Main entry function for receiving responses from fw */ static int iwl_pcie_rx_handle(struct iwl_trans *trans, int queue, int budget) { … } static struct iwl_trans_pcie *iwl_pcie_get_trans_pcie(struct msix_entry *entry) { … } /* * iwl_pcie_rx_msix_handle - Main entry function for receiving responses from fw * This interrupt handler should be used with RSS queue only. */ irqreturn_t iwl_pcie_irq_rx_msix_handler(int irq, void *dev_id) { … } /* * iwl_pcie_irq_handle_error - called for HW or SW error interrupt from card */ static void iwl_pcie_irq_handle_error(struct iwl_trans *trans) { … } static u32 iwl_pcie_int_cause_non_ict(struct iwl_trans *trans) { … } /* a device (PCI-E) page is 4096 bytes long */ #define ICT_SHIFT … #define ICT_SIZE … #define ICT_COUNT … /* interrupt handler using ict table, with this interrupt driver will * stop using INTA register to get device's interrupt, reading this register * is expensive, device will write interrupts in ICT dram table, increment * index then will fire interrupt to driver, driver will OR all ICT table * entries from current index up to table entry with 0 value. the result is * the interrupt we need to service, driver will set the entries back to 0 and * set index. */ static u32 iwl_pcie_int_cause_ict(struct iwl_trans *trans) { … } void iwl_pcie_handle_rfkill_irq(struct iwl_trans *trans, bool from_irq) { … } irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id) { … } /****************************************************************************** * * ICT functions * ******************************************************************************/ /* Free dram table */ void iwl_pcie_free_ict(struct iwl_trans *trans) { … } /* * allocate dram shared table, it is an aligned memory * block of ICT_SIZE. * also reset all data related to ICT table interrupt. */ int iwl_pcie_alloc_ict(struct iwl_trans *trans) { … } /* Device is going up inform it about using ICT interrupt table, * also we need to tell the driver to start using ICT interrupt. */ void iwl_pcie_reset_ict(struct iwl_trans *trans) { … } /* Device is going down disable ict interrupt usage */ void iwl_pcie_disable_ict(struct iwl_trans *trans) { … } irqreturn_t iwl_pcie_isr(int irq, void *data) { … } irqreturn_t iwl_pcie_msix_isr(int irq, void *data) { … } irqreturn_t iwl_pcie_irq_msix_handler(int irq, void *dev_id) { … }