linux/drivers/net/ethernet/mellanox/mlx5/core/irq_affinity.c

// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
/* Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */

#include "mlx5_core.h"
#include "mlx5_irq.h"
#include "pci_irq.h"

static void cpu_put(struct mlx5_irq_pool *pool, int cpu)
{}

static void cpu_get(struct mlx5_irq_pool *pool, int cpu)
{}

/* Gets the least loaded CPU. e.g.: the CPU with least IRQs bound to it */
static int cpu_get_least_loaded(struct mlx5_irq_pool *pool,
				const struct cpumask *req_mask)
{}

/* Creating an IRQ from irq_pool */
static struct mlx5_irq *
irq_pool_request_irq(struct mlx5_irq_pool *pool, struct irq_affinity_desc *af_desc)
{}

/* Looking for the IRQ with the smallest refcount that fits req_mask.
 * If pool is sf_comp_pool, then we are looking for an IRQ with any of the
 * requested CPUs in req_mask.
 * for example: req_mask = 0xf, irq0_mask = 0x10, irq1_mask = 0x1. irq0_mask
 * isn't subset of req_mask, so we will skip it. irq1_mask is subset of req_mask,
 * we don't skip it.
 * If pool is sf_ctrl_pool, then all IRQs have the same mask, so any IRQ will
 * fit. And since mask is subset of itself, we will pass the first if bellow.
 */
static struct mlx5_irq *
irq_pool_find_least_loaded(struct mlx5_irq_pool *pool, const struct cpumask *req_mask)
{}

/**
 * mlx5_irq_affinity_request - request an IRQ according to the given mask.
 * @dev: mlx5 core device which is requesting the IRQ.
 * @pool: IRQ pool to request from.
 * @af_desc: affinity descriptor for this IRQ.
 *
 * This function returns a pointer to IRQ, or ERR_PTR in case of error.
 */
struct mlx5_irq *
mlx5_irq_affinity_request(struct mlx5_core_dev *dev, struct mlx5_irq_pool *pool,
			  struct irq_affinity_desc *af_desc)
{}

void mlx5_irq_affinity_irq_release(struct mlx5_core_dev *dev, struct mlx5_irq *irq)
{}