linux/drivers/infiniband/core/cq.c

// SPDX-License-Identifier: GPL-2.0-only
/*
 * Copyright (c) 2015 HGST, a Western Digital Company.
 */
#include <linux/err.h>
#include <linux/slab.h>
#include <rdma/ib_verbs.h>

#include "core_priv.h"

#include <trace/events/rdma_core.h>
/* Max size for shared CQ, may require tuning */
#define IB_MAX_SHARED_CQ_SZ

/* # of WCs to poll for with a single call to ib_poll_cq */
#define IB_POLL_BATCH
#define IB_POLL_BATCH_DIRECT

/* # of WCs to iterate over before yielding */
#define IB_POLL_BUDGET_IRQ
#define IB_POLL_BUDGET_WORKQUEUE

#define IB_POLL_FLAGS

static const struct dim_cq_moder
rdma_dim_prof[RDMA_DIM_PARAMS_NUM_PROFILES] =;

static void ib_cq_rdma_dim_work(struct work_struct *w)
{}

static void rdma_dim_init(struct ib_cq *cq)
{}

static void rdma_dim_destroy(struct ib_cq *cq)
{}

static int __poll_cq(struct ib_cq *cq, int num_entries, struct ib_wc *wc)
{}

static int __ib_process_cq(struct ib_cq *cq, int budget, struct ib_wc *wcs,
			   int batch)
{}

/**
 * ib_process_cq_direct - process a CQ in caller context
 * @cq:		CQ to process
 * @budget:	number of CQEs to poll for
 *
 * This function is used to process all outstanding CQ entries.
 * It does not offload CQ processing to a different context and does
 * not ask for completion interrupts from the HCA.
 * Using direct processing on CQ with non IB_POLL_DIRECT type may trigger
 * concurrent processing.
 *
 * Note: do not pass -1 as %budget unless it is guaranteed that the number
 * of completions that will be processed is small.
 */
int ib_process_cq_direct(struct ib_cq *cq, int budget)
{}
EXPORT_SYMBOL();

static void ib_cq_completion_direct(struct ib_cq *cq, void *private)
{}

static int ib_poll_handler(struct irq_poll *iop, int budget)
{}

static void ib_cq_completion_softirq(struct ib_cq *cq, void *private)
{}

static void ib_cq_poll_work(struct work_struct *work)
{}

static void ib_cq_completion_workqueue(struct ib_cq *cq, void *private)
{}

/**
 * __ib_alloc_cq - allocate a completion queue
 * @dev:		device to allocate the CQ for
 * @private:		driver private data, accessible from cq->cq_context
 * @nr_cqe:		number of CQEs to allocate
 * @comp_vector:	HCA completion vectors for this CQ
 * @poll_ctx:		context to poll the CQ from.
 * @caller:		module owner name.
 *
 * This is the proper interface to allocate a CQ for in-kernel users. A
 * CQ allocated with this interface will automatically be polled from the
 * specified context. The ULP must use wr->wr_cqe instead of wr->wr_id
 * to use this CQ abstraction.
 */
struct ib_cq *__ib_alloc_cq(struct ib_device *dev, void *private, int nr_cqe,
			    int comp_vector, enum ib_poll_context poll_ctx,
			    const char *caller)
{}
EXPORT_SYMBOL();

/**
 * __ib_alloc_cq_any - allocate a completion queue
 * @dev:		device to allocate the CQ for
 * @private:		driver private data, accessible from cq->cq_context
 * @nr_cqe:		number of CQEs to allocate
 * @poll_ctx:		context to poll the CQ from
 * @caller:		module owner name
 *
 * Attempt to spread ULP Completion Queues over each device's interrupt
 * vectors. A simple best-effort mechanism is used.
 */
struct ib_cq *__ib_alloc_cq_any(struct ib_device *dev, void *private,
				int nr_cqe, enum ib_poll_context poll_ctx,
				const char *caller)
{}
EXPORT_SYMBOL();

/**
 * ib_free_cq - free a completion queue
 * @cq:		completion queue to free.
 */
void ib_free_cq(struct ib_cq *cq)
{}
EXPORT_SYMBOL();

void ib_cq_pool_cleanup(struct ib_device *dev)
{}

static int ib_alloc_cqs(struct ib_device *dev, unsigned int nr_cqes,
			enum ib_poll_context poll_ctx)
{}

/**
 * ib_cq_pool_get() - Find the least used completion queue that matches
 *   a given cpu hint (or least used for wild card affinity) and fits
 *   nr_cqe.
 * @dev: rdma device
 * @nr_cqe: number of needed cqe entries
 * @comp_vector_hint: completion vector hint (-1) for the driver to assign
 *   a comp vector based on internal counter
 * @poll_ctx: cq polling context
 *
 * Finds a cq that satisfies @comp_vector_hint and @nr_cqe requirements and
 * claim entries in it for us.  In case there is no available cq, allocate
 * a new cq with the requirements and add it to the device pool.
 * IB_POLL_DIRECT cannot be used for shared cqs so it is not a valid value
 * for @poll_ctx.
 */
struct ib_cq *ib_cq_pool_get(struct ib_device *dev, unsigned int nr_cqe,
			     int comp_vector_hint,
			     enum ib_poll_context poll_ctx)
{}
EXPORT_SYMBOL();

/**
 * ib_cq_pool_put - Return a CQ taken from a shared pool.
 * @cq: The CQ to return.
 * @nr_cqe: The max number of cqes that the user had requested.
 */
void ib_cq_pool_put(struct ib_cq *cq, unsigned int nr_cqe)
{}
EXPORT_SYMBOL();