// SPDX-License-Identifier: GPL-2.0-only /* Copyright (C) 2023 Intel Corporation */ #include "idpf_controlq.h" /** * idpf_ctlq_alloc_desc_ring - Allocate Control Queue (CQ) rings * @hw: pointer to hw struct * @cq: pointer to the specific Control queue */ static int idpf_ctlq_alloc_desc_ring(struct idpf_hw *hw, struct idpf_ctlq_info *cq) { … } /** * idpf_ctlq_alloc_bufs - Allocate Control Queue (CQ) buffers * @hw: pointer to hw struct * @cq: pointer to the specific Control queue * * Allocate the buffer head for all control queues, and if it's a receive * queue, allocate DMA buffers */ static int idpf_ctlq_alloc_bufs(struct idpf_hw *hw, struct idpf_ctlq_info *cq) { … } /** * idpf_ctlq_free_desc_ring - Free Control Queue (CQ) rings * @hw: pointer to hw struct * @cq: pointer to the specific Control queue * * This assumes the posted send buffers have already been cleaned * and de-allocated */ static void idpf_ctlq_free_desc_ring(struct idpf_hw *hw, struct idpf_ctlq_info *cq) { … } /** * idpf_ctlq_free_bufs - Free CQ buffer info elements * @hw: pointer to hw struct * @cq: pointer to the specific Control queue * * Free the DMA buffers for RX queues, and DMA buffer header for both RX and TX * queues. The upper layers are expected to manage freeing of TX DMA buffers */ static void idpf_ctlq_free_bufs(struct idpf_hw *hw, struct idpf_ctlq_info *cq) { … } /** * idpf_ctlq_dealloc_ring_res - Free memory allocated for control queue * @hw: pointer to hw struct * @cq: pointer to the specific Control queue * * Free the memory used by the ring, buffers and other related structures */ void idpf_ctlq_dealloc_ring_res(struct idpf_hw *hw, struct idpf_ctlq_info *cq) { … } /** * idpf_ctlq_alloc_ring_res - allocate memory for descriptor ring and bufs * @hw: pointer to hw struct * @cq: pointer to control queue struct * * Do *NOT* hold cq_lock when calling this as the memory allocation routines * called are not going to be atomic context safe */ int idpf_ctlq_alloc_ring_res(struct idpf_hw *hw, struct idpf_ctlq_info *cq) { … }