linux/drivers/soc/fsl/qbman/qman.c

/* Copyright 2008 - 2016 Freescale Semiconductor, Inc.
 *
 * Redistribution and use in source and binary forms, with or without
 * modification, are permitted provided that the following conditions are met:
 *     * Redistributions of source code must retain the above copyright
 *	 notice, this list of conditions and the following disclaimer.
 *     * Redistributions in binary form must reproduce the above copyright
 *	 notice, this list of conditions and the following disclaimer in the
 *	 documentation and/or other materials provided with the distribution.
 *     * Neither the name of Freescale Semiconductor nor the
 *	 names of its contributors may be used to endorse or promote products
 *	 derived from this software without specific prior written permission.
 *
 * ALTERNATIVELY, this software may be distributed under the terms of the
 * GNU General Public License ("GPL") as published by the Free Software
 * Foundation, either version 2 of that License or (at your option) any
 * later version.
 *
 * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
 * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 */

#include "qman_priv.h"

#define DQRR_MAXFILL
#define EQCR_ITHRESH
#define IRQNAME
#define MAX_IRQNAME
#define QMAN_POLL_LIMIT
#define QMAN_PIRQ_DQRR_ITHRESH
#define QMAN_DQRR_IT_MAX
#define QMAN_ITP_MAX
#define QMAN_PIRQ_MR_ITHRESH
#define QMAN_PIRQ_IPERIOD

/* Portal register assists */

#if defined(CONFIG_ARM) || defined(CONFIG_ARM64)
/* Cache-inhibited register offsets */
#define QM_REG_EQCR_PI_CINH
#define QM_REG_EQCR_CI_CINH
#define QM_REG_EQCR_ITR
#define QM_REG_DQRR_PI_CINH
#define QM_REG_DQRR_CI_CINH
#define QM_REG_DQRR_ITR
#define QM_REG_DQRR_DCAP
#define QM_REG_DQRR_SDQCR
#define QM_REG_DQRR_VDQCR
#define QM_REG_DQRR_PDQCR
#define QM_REG_MR_PI_CINH
#define QM_REG_MR_CI_CINH
#define QM_REG_MR_ITR
#define QM_REG_CFG
#define QM_REG_ISR
#define QM_REG_IER
#define QM_REG_ISDR
#define QM_REG_IIR
#define QM_REG_ITPR

/* Cache-enabled register offsets */
#define QM_CL_EQCR
#define QM_CL_DQRR
#define QM_CL_MR
#define QM_CL_EQCR_PI_CENA
#define QM_CL_EQCR_CI_CENA
#define QM_CL_DQRR_PI_CENA
#define QM_CL_DQRR_CI_CENA
#define QM_CL_MR_PI_CENA
#define QM_CL_MR_CI_CENA
#define QM_CL_CR
#define QM_CL_RR0
#define QM_CL_RR1

#else
/* Cache-inhibited register offsets */
#define QM_REG_EQCR_PI_CINH
#define QM_REG_EQCR_CI_CINH
#define QM_REG_EQCR_ITR
#define QM_REG_DQRR_PI_CINH
#define QM_REG_DQRR_CI_CINH
#define QM_REG_DQRR_ITR
#define QM_REG_DQRR_DCAP
#define QM_REG_DQRR_SDQCR
#define QM_REG_DQRR_VDQCR
#define QM_REG_DQRR_PDQCR
#define QM_REG_MR_PI_CINH
#define QM_REG_MR_CI_CINH
#define QM_REG_MR_ITR
#define QM_REG_CFG
#define QM_REG_ISR
#define QM_REG_IER
#define QM_REG_ISDR
#define QM_REG_IIR
#define QM_REG_ITPR

/* Cache-enabled register offsets */
#define QM_CL_EQCR
#define QM_CL_DQRR
#define QM_CL_MR
#define QM_CL_EQCR_PI_CENA
#define QM_CL_EQCR_CI_CENA
#define QM_CL_DQRR_PI_CENA
#define QM_CL_DQRR_CI_CENA
#define QM_CL_MR_PI_CENA
#define QM_CL_MR_CI_CENA
#define QM_CL_CR
#define QM_CL_RR0
#define QM_CL_RR1
#endif

/*
 * BTW, the drivers (and h/w programming model) already obtain the required
 * synchronisation for portal accesses and data-dependencies. Use of barrier()s
 * or other order-preserving primitives simply degrade performance. Hence the
 * use of the __raw_*() interfaces, which simply ensure that the compiler treats
 * the portal registers as volatile
 */

/* Cache-enabled ring access */
#define qm_cl(base, idx)

/*
 * Portal modes.
 *   Enum types;
 *     pmode == production mode
 *     cmode == consumption mode,
 *     dmode == h/w dequeue mode.
 *   Enum values use 3 letter codes. First letter matches the portal mode,
 *   remaining two letters indicate;
 *     ci == cache-inhibited portal register
 *     ce == cache-enabled portal register
 *     vb == in-band valid-bit (cache-enabled)
 *     dc == DCA (Discrete Consumption Acknowledgment), DQRR-only
 *   As for "enum qm_dqrr_dmode", it should be self-explanatory.
 */
enum qm_eqcr_pmode {};
enum qm_dqrr_dmode {};
enum qm_dqrr_pmode {};
enum qm_dqrr_cmode {};
enum qm_mr_pmode {};
enum qm_mr_cmode {};

/* --- Portal structures --- */

#define QM_EQCR_SIZE
#define QM_DQRR_SIZE
#define QM_MR_SIZE

/* "Enqueue Command" */
struct qm_eqcr_entry {} __packed __aligned();
#define QM_EQCR_VERB_VBIT
#define QM_EQCR_VERB_CMD_MASK
#define QM_EQCR_VERB_CMD_ENQUEUE
#define QM_EQCR_SEQNUM_NESN
#define QM_EQCR_SEQNUM_NLIS
#define QM_EQCR_SEQNUM_SEQMASK

struct qm_eqcr {};

struct qm_dqrr {};

struct qm_mr {};

/* MC (Management Command) command */
/* "FQ" command layout */
struct qm_mcc_fq {} __packed;

/* "CGR" command layout */
struct qm_mcc_cgr {};

#define QM_MCC_VERB_VBIT
#define QM_MCC_VERB_MASK
#define QM_MCC_VERB_INITFQ_PARKED
#define QM_MCC_VERB_INITFQ_SCHED
#define QM_MCC_VERB_QUERYFQ
#define QM_MCC_VERB_QUERYFQ_NP
#define QM_MCC_VERB_QUERYWQ
#define QM_MCC_VERB_QUERYWQ_DEDICATED
#define QM_MCC_VERB_ALTER_SCHED
#define QM_MCC_VERB_ALTER_FE
#define QM_MCC_VERB_ALTER_RETIRE
#define QM_MCC_VERB_ALTER_OOS
#define QM_MCC_VERB_ALTER_FQXON
#define QM_MCC_VERB_ALTER_FQXOFF
#define QM_MCC_VERB_INITCGR
#define QM_MCC_VERB_MODIFYCGR
#define QM_MCC_VERB_CGRTESTWRITE
#define QM_MCC_VERB_QUERYCGR
#define QM_MCC_VERB_QUERYCONGESTION
qm_mc_command;

/* MC (Management Command) result */
/* "Query FQ" */
struct qm_mcr_queryfq {} __packed;

/* "Alter FQ State Commands" */
struct qm_mcr_alterfq {};
#define QM_MCR_VERB_RRID
#define QM_MCR_VERB_MASK
#define QM_MCR_VERB_INITFQ_PARKED
#define QM_MCR_VERB_INITFQ_SCHED
#define QM_MCR_VERB_QUERYFQ
#define QM_MCR_VERB_QUERYFQ_NP
#define QM_MCR_VERB_QUERYWQ
#define QM_MCR_VERB_QUERYWQ_DEDICATED
#define QM_MCR_VERB_ALTER_SCHED
#define QM_MCR_VERB_ALTER_FE
#define QM_MCR_VERB_ALTER_RETIRE
#define QM_MCR_VERB_ALTER_OOS
#define QM_MCR_RESULT_NULL
#define QM_MCR_RESULT_OK
#define QM_MCR_RESULT_ERR_FQID
#define QM_MCR_RESULT_ERR_FQSTATE
#define QM_MCR_RESULT_ERR_NOTEMPTY
#define QM_MCR_RESULT_ERR_BADCHANNEL
#define QM_MCR_RESULT_PENDING
#define QM_MCR_RESULT_ERR_BADCOMMAND
#define QM_MCR_FQS_ORLPRESENT
#define QM_MCR_FQS_NOTEMPTY
#define QM_MCR_TIMEOUT
qm_mc_result;

struct qm_mc {};

struct qm_addr {};

struct qm_portal {} ____cacheline_aligned;

/* Cache-inhibited register access. */
static inline u32 qm_in(struct qm_portal *p, u32 offset)
{}

static inline void qm_out(struct qm_portal *p, u32 offset, u32 val)
{}

/* Cache Enabled Portal Access */
static inline void qm_cl_invalidate(struct qm_portal *p, u32 offset)
{}

static inline void qm_cl_touch_ro(struct qm_portal *p, u32 offset)
{}

static inline u32 qm_ce_in(struct qm_portal *p, u32 offset)
{}

/* --- EQCR API --- */

#define EQCR_SHIFT
#define EQCR_CARRY

/* Bit-wise logic to wrap a ring pointer by clearing the "carry bit" */
static struct qm_eqcr_entry *eqcr_carryclear(struct qm_eqcr_entry *p)
{}

/* Bit-wise logic to convert a ring pointer to a ring index */
static int eqcr_ptr2idx(struct qm_eqcr_entry *e)
{}

/* Increment the 'cursor' ring pointer, taking 'vbit' into account */
static inline void eqcr_inc(struct qm_eqcr *eqcr)
{}

static inline int qm_eqcr_init(struct qm_portal *portal,
				enum qm_eqcr_pmode pmode,
				unsigned int eq_stash_thresh,
				int eq_stash_prio)
{}

static inline void qm_eqcr_finish(struct qm_portal *portal)
{}

static inline struct qm_eqcr_entry *qm_eqcr_start_no_stash(struct qm_portal
								 *portal)
{}

static inline struct qm_eqcr_entry *qm_eqcr_start_stash(struct qm_portal
								*portal)
{}

static inline void eqcr_commit_checks(struct qm_eqcr *eqcr)
{}

static inline void qm_eqcr_pvb_commit(struct qm_portal *portal, u8 myverb)
{}

static inline void qm_eqcr_cce_prefetch(struct qm_portal *portal)
{}

static inline u8 qm_eqcr_cce_update(struct qm_portal *portal)
{}

static inline void qm_eqcr_set_ithresh(struct qm_portal *portal, u8 ithresh)
{}

static inline u8 qm_eqcr_get_avail(struct qm_portal *portal)
{}

static inline u8 qm_eqcr_get_fill(struct qm_portal *portal)
{}

/* --- DQRR API --- */

#define DQRR_SHIFT
#define DQRR_CARRY

static const struct qm_dqrr_entry *dqrr_carryclear(
					const struct qm_dqrr_entry *p)
{}

static inline int dqrr_ptr2idx(const struct qm_dqrr_entry *e)
{}

static const struct qm_dqrr_entry *dqrr_inc(const struct qm_dqrr_entry *e)
{}

static inline void qm_dqrr_set_maxfill(struct qm_portal *portal, u8 mf)
{}

static inline int qm_dqrr_init(struct qm_portal *portal,
			       const struct qm_portal_config *config,
			       enum qm_dqrr_dmode dmode,
			       enum qm_dqrr_pmode pmode,
			       enum qm_dqrr_cmode cmode, u8 max_fill)
{}

static inline void qm_dqrr_finish(struct qm_portal *portal)
{}

static inline const struct qm_dqrr_entry *qm_dqrr_current(
						struct qm_portal *portal)
{}

static inline u8 qm_dqrr_next(struct qm_portal *portal)
{}

static inline void qm_dqrr_pvb_update(struct qm_portal *portal)
{}

static inline void qm_dqrr_cdc_consume_1ptr(struct qm_portal *portal,
					const struct qm_dqrr_entry *dq,
					int park)
{}

static inline void qm_dqrr_cdc_consume_n(struct qm_portal *portal, u32 bitmask)
{}

static inline void qm_dqrr_sdqcr_set(struct qm_portal *portal, u32 sdqcr)
{}

static inline void qm_dqrr_vdqcr_set(struct qm_portal *portal, u32 vdqcr)
{}

static inline int qm_dqrr_set_ithresh(struct qm_portal *portal, u8 ithresh)
{}

/* --- MR API --- */

#define MR_SHIFT
#define MR_CARRY

static union qm_mr_entry *mr_carryclear(union qm_mr_entry *p)
{}

static inline int mr_ptr2idx(const union qm_mr_entry *e)
{}

static inline union qm_mr_entry *mr_inc(union qm_mr_entry *e)
{}

static inline int qm_mr_init(struct qm_portal *portal, enum qm_mr_pmode pmode,
			     enum qm_mr_cmode cmode)
{}

static inline void qm_mr_finish(struct qm_portal *portal)
{}

static inline const union qm_mr_entry *qm_mr_current(struct qm_portal *portal)
{}

static inline int qm_mr_next(struct qm_portal *portal)
{}

static inline void qm_mr_pvb_update(struct qm_portal *portal)
{}

static inline void qm_mr_cci_consume(struct qm_portal *portal, u8 num)
{}

static inline void qm_mr_cci_consume_to_current(struct qm_portal *portal)
{}

static inline void qm_mr_set_ithresh(struct qm_portal *portal, u8 ithresh)
{}

/* --- Management command API --- */

static inline int qm_mc_init(struct qm_portal *portal)
{}

static inline void qm_mc_finish(struct qm_portal *portal)
{}

static inline union qm_mc_command *qm_mc_start(struct qm_portal *portal)
{}

static inline void qm_mc_commit(struct qm_portal *portal, u8 myverb)
{}

static inline union qm_mc_result *qm_mc_result(struct qm_portal *portal)
{}

static inline int qm_mc_result_timeout(struct qm_portal *portal,
				       union qm_mc_result **mcr)
{}

static inline void fq_set(struct qman_fq *fq, u32 mask)
{}

static inline void fq_clear(struct qman_fq *fq, u32 mask)
{}

static inline int fq_isset(struct qman_fq *fq, u32 mask)
{}

static inline int fq_isclear(struct qman_fq *fq, u32 mask)
{}

struct qman_portal {};

static cpumask_t affine_mask;
static DEFINE_SPINLOCK(affine_mask_lock);
static u16 affine_channels[NR_CPUS];
static DEFINE_PER_CPU(struct qman_portal, qman_affine_portal);
struct qman_portal *affine_portals[NR_CPUS];

static inline struct qman_portal *get_affine_portal(void)
{}

static inline void put_affine_portal(void)
{}


static inline struct qman_portal *get_portal_for_channel(u16 channel)
{}

static struct workqueue_struct *qm_portal_wq;

int qman_dqrr_set_ithresh(struct qman_portal *portal, u8 ithresh)
{}
EXPORT_SYMBOL();

void qman_dqrr_get_ithresh(struct qman_portal *portal, u8 *ithresh)
{}
EXPORT_SYMBOL();

void qman_portal_get_iperiod(struct qman_portal *portal, u32 *iperiod)
{}
EXPORT_SYMBOL();

int qman_portal_set_iperiod(struct qman_portal *portal, u32 iperiod)
{}
EXPORT_SYMBOL();

int qman_wq_alloc(void)
{}


void qman_enable_irqs(void)
{}

/*
 * This is what everything can wait on, even if it migrates to a different cpu
 * to the one whose affine portal it is waiting on.
 */
static DECLARE_WAIT_QUEUE_HEAD(affine_queue);

static struct qman_fq **fq_table;
static u32 num_fqids;

int qman_alloc_fq_table(u32 _num_fqids)
{}

static struct qman_fq *idx_to_fq(u32 idx)
{}

/*
 * Only returns full-service fq objects, not enqueue-only
 * references (QMAN_FQ_FLAG_NO_MODIFY).
 */
static struct qman_fq *fqid_to_fq(u32 fqid)
{}

static struct qman_fq *tag_to_fq(u32 tag)
{}

static u32 fq_to_tag(struct qman_fq *fq)
{}

static u32 __poll_portal_slow(struct qman_portal *p, u32 is);
static inline unsigned int __poll_portal_fast(struct qman_portal *p,
					unsigned int poll_limit, bool sched_napi);
static void qm_congestion_task(struct work_struct *work);
static void qm_mr_process_task(struct work_struct *work);

static irqreturn_t portal_isr(int irq, void *ptr)
{}

static int drain_mr_fqrni(struct qm_portal *p)
{}

static int qman_create_portal(struct qman_portal *portal,
			      const struct qm_portal_config *c,
			      const struct qman_cgrs *cgrs)
{}

struct qman_portal *qman_create_affine_portal(const struct qm_portal_config *c,
					      const struct qman_cgrs *cgrs)
{}

static void qman_destroy_portal(struct qman_portal *qm)
{}

const struct qm_portal_config *qman_destroy_affine_portal(void)
{}

/* Inline helper to reduce nesting in __poll_portal_slow() */
static inline void fq_state_change(struct qman_portal *p, struct qman_fq *fq,
				   const union qm_mr_entry *msg, u8 verb)
{}

static void qm_congestion_task(struct work_struct *work)
{}

static void qm_mr_process_task(struct work_struct *work)
{}

static u32 __poll_portal_slow(struct qman_portal *p, u32 is)
{}

/*
 * remove some slowish-path stuff from the "fast path" and make sure it isn't
 * inlined.
 */
static noinline void clear_vdqcr(struct qman_portal *p, struct qman_fq *fq)
{}

/*
 * The only states that would conflict with other things if they ran at the
 * same time on the same cpu are:
 *
 *   (i) setting/clearing vdqcr_owned, and
 *  (ii) clearing the NE (Not Empty) flag.
 *
 * Both are safe. Because;
 *
 *   (i) this clearing can only occur after qman_volatile_dequeue() has set the
 *	 vdqcr_owned field (which it does before setting VDQCR), and
 *	 qman_volatile_dequeue() blocks interrupts and preemption while this is
 *	 done so that we can't interfere.
 *  (ii) the NE flag is only cleared after qman_retire_fq() has set it, and as
 *	 with (i) that API prevents us from interfering until it's safe.
 *
 * The good thing is that qman_volatile_dequeue() and qman_retire_fq() run far
 * less frequently (ie. per-FQ) than __poll_portal_fast() does, so the nett
 * advantage comes from this function not having to "lock" anything at all.
 *
 * Note also that the callbacks are invoked at points which are safe against the
 * above potential conflicts, but that this function itself is not re-entrant
 * (this is because the function tracks one end of each FIFO in the portal and
 * we do *not* want to lock that). So the consequence is that it is safe for
 * user callbacks to call into any QMan API.
 */
static inline unsigned int __poll_portal_fast(struct qman_portal *p,
					unsigned int poll_limit, bool sched_napi)
{}

void qman_p_irqsource_add(struct qman_portal *p, u32 bits)
{}
EXPORT_SYMBOL();

void qman_p_irqsource_remove(struct qman_portal *p, u32 bits)
{}
EXPORT_SYMBOL();

const cpumask_t *qman_affine_cpus(void)
{}
EXPORT_SYMBOL();

u16 qman_affine_channel(int cpu)
{}
EXPORT_SYMBOL();

struct qman_portal *qman_get_affine_portal(int cpu)
{}
EXPORT_SYMBOL();

int qman_start_using_portal(struct qman_portal *p, struct device *dev)
{}
EXPORT_SYMBOL();

int qman_p_poll_dqrr(struct qman_portal *p, unsigned int limit)
{}
EXPORT_SYMBOL();

void qman_p_static_dequeue_add(struct qman_portal *p, u32 pools)
{}
EXPORT_SYMBOL();

/* Frame queue API */

static const char *mcr_result_str(u8 result)
{}

int qman_create_fq(u32 fqid, u32 flags, struct qman_fq *fq)
{}
EXPORT_SYMBOL();

void qman_destroy_fq(struct qman_fq *fq)
{}
EXPORT_SYMBOL();

u32 qman_fq_fqid(struct qman_fq *fq)
{}
EXPORT_SYMBOL();

int qman_init_fq(struct qman_fq *fq, u32 flags, struct qm_mcc_initfq *opts)
{}
EXPORT_SYMBOL();

int qman_schedule_fq(struct qman_fq *fq)
{}
EXPORT_SYMBOL();

int qman_retire_fq(struct qman_fq *fq, u32 *flags)
{}
EXPORT_SYMBOL();

int qman_oos_fq(struct qman_fq *fq)
{}
EXPORT_SYMBOL();

int qman_query_fq(struct qman_fq *fq, struct qm_fqd *fqd)
{}

int qman_query_fq_np(struct qman_fq *fq, struct qm_mcr_queryfq_np *np)
{}
EXPORT_SYMBOL();

static int qman_query_cgr(struct qman_cgr *cgr,
			  struct qm_mcr_querycgr *cgrd)
{}

int qman_query_cgr_congested(struct qman_cgr *cgr, bool *result)
{}
EXPORT_SYMBOL();

/* internal function used as a wait_event() expression */
static int set_p_vdqcr(struct qman_portal *p, struct qman_fq *fq, u32 vdqcr)
{}

static int set_vdqcr(struct qman_portal **p, struct qman_fq *fq, u32 vdqcr)
{}

static int wait_vdqcr_start(struct qman_portal **p, struct qman_fq *fq,
				u32 vdqcr, u32 flags)
{}

int qman_volatile_dequeue(struct qman_fq *fq, u32 flags, u32 vdqcr)
{}
EXPORT_SYMBOL();

static void update_eqcr_ci(struct qman_portal *p, u8 avail)
{}

int qman_enqueue(struct qman_fq *fq, const struct qm_fd *fd)
{}
EXPORT_SYMBOL();

static int qm_modify_cgr(struct qman_cgr *cgr, u32 flags,
			 struct qm_mcc_initcgr *opts)
{}

#define PORTAL_IDX(n)

/* congestion state change notification target update control */
static void qm_cgr_cscn_targ_set(struct __qm_mc_cgr *cgr, int pi, u32 val)
{}

static void qm_cgr_cscn_targ_clear(struct __qm_mc_cgr *cgr, int pi, u32 val)
{}

static u8 qman_cgr_cpus[CGR_NUM];

void qman_init_cgr_all(void)
{}

int qman_create_cgr(struct qman_cgr *cgr, u32 flags,
		    struct qm_mcc_initcgr *opts)
{}
EXPORT_SYMBOL();

static struct qman_portal *qman_cgr_get_affine_portal(struct qman_cgr *cgr)
{}

int qman_delete_cgr(struct qman_cgr *cgr)
{}
EXPORT_SYMBOL();

struct cgr_comp {};

static void qman_delete_cgr_smp_call(void *p)
{}

void qman_delete_cgr_safe(struct qman_cgr *cgr)
{}
EXPORT_SYMBOL();

static int qman_update_cgr(struct qman_cgr *cgr, struct qm_mcc_initcgr *opts)
{}

struct update_cgr_params {};

static void qman_update_cgr_smp_call(void *p)
{}

int qman_update_cgr_safe(struct qman_cgr *cgr, struct qm_mcc_initcgr *opts)
{}
EXPORT_SYMBOL();

/* Cleanup FQs */

static int _qm_mr_consume_and_match_verb(struct qm_portal *p, int v)
{}

static int _qm_dqrr_consume_and_match(struct qm_portal *p, u32 fqid, int s,
				      bool wait)
{}

#define qm_mr_drain(p, V)

#define qm_dqrr_drain(p, f, S)

#define qm_dqrr_drain_wait(p, f, S)

#define qm_dqrr_drain_nomatch(p)

int qman_shutdown_fq(u32 fqid)
{}

const struct qm_portal_config *qman_get_qm_portal_config(
						struct qman_portal *portal)
{}
EXPORT_SYMBOL();

struct gen_pool *qm_fqalloc; /* FQID allocator */
struct gen_pool *qm_qpalloc; /* pool-channel allocator */
struct gen_pool *qm_cgralloc; /* CGR ID allocator */

static int qman_alloc_range(struct gen_pool *p, u32 *result, u32 cnt)
{}

int qman_alloc_fqid_range(u32 *result, u32 count)
{}
EXPORT_SYMBOL();

int qman_alloc_pool_range(u32 *result, u32 count)
{}
EXPORT_SYMBOL();

int qman_alloc_cgrid_range(u32 *result, u32 count)
{}
EXPORT_SYMBOL();

int qman_release_fqid(u32 fqid)
{}
EXPORT_SYMBOL();

static int qpool_cleanup(u32 qp)
{}

int qman_release_pool(u32 qp)
{}
EXPORT_SYMBOL();

static int cgr_cleanup(u32 cgrid)
{}

int qman_release_cgrid(u32 cgrid)
{}
EXPORT_SYMBOL();