#include <linux/interrupt.h>
#include <linux/notifier.h>
#include <linux/mlx5/driver.h>
#include <linux/mlx5/vport.h>
#include <linux/mlx5/eq.h>
#ifdef CONFIG_RFS_ACCEL
#include <linux/cpu_rmap.h>
#endif
#include "mlx5_core.h"
#include "lib/eq.h"
#include "fpga/core.h"
#include "eswitch.h"
#include "lib/clock.h"
#include "diag/fw_tracer.h"
#include "mlx5_irq.h"
#include "pci_irq.h"
#include "devlink.h"
#include "en_accel/ipsec.h"
enum { … };
enum { … };
enum { … };
enum { … };
static_assert(…);
struct mlx5_eq_table { … };
#define MLX5_ASYNC_EVENT_MASK …
static int mlx5_cmd_destroy_eq(struct mlx5_core_dev *dev, u8 eqn)
{ … }
static struct mlx5_core_cq *mlx5_eq_cq_get(struct mlx5_eq *eq, u32 cqn)
{ … }
static int mlx5_eq_comp_int(struct notifier_block *nb,
__always_unused unsigned long action,
__always_unused void *data)
{ … }
u32 mlx5_eq_poll_irq_disabled(struct mlx5_eq_comp *eq)
{ … }
static void mlx5_eq_async_int_lock(struct mlx5_eq_async *eq, bool recovery,
unsigned long *flags)
__acquires(&eq->lock)
{ … }
static void mlx5_eq_async_int_unlock(struct mlx5_eq_async *eq, bool recovery,
unsigned long *flags)
__releases(&eq->lock)
{ … }
enum async_eq_nb_action { … };
static int mlx5_eq_async_int(struct notifier_block *nb,
unsigned long action, void *data)
{ … }
void mlx5_cmd_eq_recover(struct mlx5_core_dev *dev)
{ … }
static void init_eq_buf(struct mlx5_eq *eq)
{ … }
static int
create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq,
struct mlx5_eq_param *param)
{ … }
int mlx5_eq_enable(struct mlx5_core_dev *dev, struct mlx5_eq *eq,
struct notifier_block *nb)
{ … }
EXPORT_SYMBOL(…);
void mlx5_eq_disable(struct mlx5_core_dev *dev, struct mlx5_eq *eq,
struct notifier_block *nb)
{ … }
EXPORT_SYMBOL(…);
static int destroy_unmap_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq)
{ … }
int mlx5_eq_add_cq(struct mlx5_eq *eq, struct mlx5_core_cq *cq)
{ … }
void mlx5_eq_del_cq(struct mlx5_eq *eq, struct mlx5_core_cq *cq)
{ … }
int mlx5_eq_table_init(struct mlx5_core_dev *dev)
{ … }
void mlx5_eq_table_cleanup(struct mlx5_core_dev *dev)
{ … }
static int create_async_eq(struct mlx5_core_dev *dev,
struct mlx5_eq *eq, struct mlx5_eq_param *param)
{ … }
static int destroy_async_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq)
{ … }
static int cq_err_event_notifier(struct notifier_block *nb,
unsigned long type, void *data)
{ … }
static void gather_user_async_events(struct mlx5_core_dev *dev, u64 mask[4])
{ … }
static void gather_async_events_mask(struct mlx5_core_dev *dev, u64 mask[4])
{ … }
static int
setup_async_eq(struct mlx5_core_dev *dev, struct mlx5_eq_async *eq,
struct mlx5_eq_param *param, const char *name)
{ … }
static void cleanup_async_eq(struct mlx5_core_dev *dev,
struct mlx5_eq_async *eq, const char *name)
{ … }
static u16 async_eq_depth_devlink_param_get(struct mlx5_core_dev *dev)
{ … }
static int create_async_eqs(struct mlx5_core_dev *dev)
{ … }
static void destroy_async_eqs(struct mlx5_core_dev *dev)
{ … }
struct mlx5_eq *mlx5_get_async_eq(struct mlx5_core_dev *dev)
{ … }
void mlx5_eq_synchronize_async_irq(struct mlx5_core_dev *dev)
{ … }
void mlx5_eq_synchronize_cmd_irq(struct mlx5_core_dev *dev)
{ … }
struct mlx5_eq *
mlx5_eq_create_generic(struct mlx5_core_dev *dev,
struct mlx5_eq_param *param)
{ … }
EXPORT_SYMBOL(…);
int mlx5_eq_destroy_generic(struct mlx5_core_dev *dev, struct mlx5_eq *eq)
{ … }
EXPORT_SYMBOL(…);
struct mlx5_eqe *mlx5_eq_get_eqe(struct mlx5_eq *eq, u32 cc)
{ … }
EXPORT_SYMBOL(…);
void mlx5_eq_update_ci(struct mlx5_eq *eq, u32 cc, bool arm)
{ … }
EXPORT_SYMBOL(…);
static void comp_irq_release_pci(struct mlx5_core_dev *dev, u16 vecidx)
{ … }
static int mlx5_cpumask_default_spread(int numa_node, int index)
{ … }
static struct cpu_rmap *mlx5_eq_table_get_pci_rmap(struct mlx5_core_dev *dev)
{ … }
static int comp_irq_request_pci(struct mlx5_core_dev *dev, u16 vecidx)
{ … }
static void comp_irq_release_sf(struct mlx5_core_dev *dev, u16 vecidx)
{ … }
static int comp_irq_request_sf(struct mlx5_core_dev *dev, u16 vecidx)
{ … }
static void comp_irq_release(struct mlx5_core_dev *dev, u16 vecidx)
{ … }
static int comp_irq_request(struct mlx5_core_dev *dev, u16 vecidx)
{ … }
#ifdef CONFIG_RFS_ACCEL
static int alloc_rmap(struct mlx5_core_dev *mdev)
{ … }
static void free_rmap(struct mlx5_core_dev *mdev)
{ … }
#else
static int alloc_rmap(struct mlx5_core_dev *mdev) { return 0; }
static void free_rmap(struct mlx5_core_dev *mdev) {}
#endif
static void destroy_comp_eq(struct mlx5_core_dev *dev, struct mlx5_eq_comp *eq, u16 vecidx)
{ … }
static u16 comp_eq_depth_devlink_param_get(struct mlx5_core_dev *dev)
{ … }
static int create_comp_eq(struct mlx5_core_dev *dev, u16 vecidx)
{ … }
int mlx5_comp_eqn_get(struct mlx5_core_dev *dev, u16 vecidx, int *eqn)
{ … }
EXPORT_SYMBOL(…);
int mlx5_comp_irqn_get(struct mlx5_core_dev *dev, int vector, unsigned int *irqn)
{ … }
unsigned int mlx5_comp_vectors_max(struct mlx5_core_dev *dev)
{ … }
EXPORT_SYMBOL(…);
static struct cpumask *
mlx5_comp_irq_get_affinity_mask(struct mlx5_core_dev *dev, int vector)
{ … }
int mlx5_comp_vector_get_cpu(struct mlx5_core_dev *dev, int vector)
{ … }
EXPORT_SYMBOL(…);
#ifdef CONFIG_RFS_ACCEL
struct cpu_rmap *mlx5_eq_table_get_rmap(struct mlx5_core_dev *dev)
{ … }
#endif
struct mlx5_eq_comp *mlx5_eqn2comp_eq(struct mlx5_core_dev *dev, int eqn)
{ … }
void mlx5_core_eq_free_irqs(struct mlx5_core_dev *dev)
{ … }
#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
#define MLX5_MAX_ASYNC_EQS …
#else
#define MLX5_MAX_ASYNC_EQS …
#endif
static int get_num_eqs(struct mlx5_core_dev *dev)
{ … }
int mlx5_eq_table_create(struct mlx5_core_dev *dev)
{ … }
void mlx5_eq_table_destroy(struct mlx5_core_dev *dev)
{ … }
int mlx5_eq_notifier_register(struct mlx5_core_dev *dev, struct mlx5_nb *nb)
{ … }
EXPORT_SYMBOL(…);
int mlx5_eq_notifier_unregister(struct mlx5_core_dev *dev, struct mlx5_nb *nb)
{ … }
EXPORT_SYMBOL(…);