linux/drivers/infiniband/hw/hfi1/affinity.c

// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
 * Copyright(c) 2015 - 2020 Intel Corporation.
 */

#include <linux/topology.h>
#include <linux/cpumask.h>
#include <linux/interrupt.h>
#include <linux/numa.h>

#include "hfi.h"
#include "affinity.h"
#include "sdma.h"
#include "trace.h"

struct hfi1_affinity_node_list node_affinity =;

/* Name of IRQ types, indexed by enum irq_type */
static const char * const irq_type_names[] =;

/* Per NUMA node count of HFI devices */
static unsigned int *hfi1_per_node_cntr;

static inline void init_cpu_mask_set(struct cpu_mask_set *set)
{}

/* Increment generation of CPU set if needed */
static void _cpu_mask_set_gen_inc(struct cpu_mask_set *set)
{}

static void _cpu_mask_set_gen_dec(struct cpu_mask_set *set)
{}

/* Get the first CPU from the list of unused CPUs in a CPU set data structure */
static int cpu_mask_set_get_first(struct cpu_mask_set *set, cpumask_var_t diff)
{}

static void cpu_mask_set_put(struct cpu_mask_set *set, int cpu)
{}

/* Initialize non-HT cpu cores mask */
void init_real_cpu_mask(void)
{}

int node_affinity_init(void)
{}

static void node_affinity_destroy(struct hfi1_affinity_node *entry)
{}

void node_affinity_destroy_all(void)
{}

static struct hfi1_affinity_node *node_affinity_allocate(int node)
{}

/*
 * It appends an entry to the list.
 * It *must* be called with node_affinity.lock held.
 */
static void node_affinity_add_tail(struct hfi1_affinity_node *entry)
{}

/* It must be called with node_affinity.lock held */
static struct hfi1_affinity_node *node_affinity_lookup(int node)
{}

static int per_cpu_affinity_get(cpumask_var_t possible_cpumask,
				u16 __percpu *comp_vect_affinity)
{}

static int per_cpu_affinity_put_max(cpumask_var_t possible_cpumask,
				    u16 __percpu *comp_vect_affinity)
{}

/*
 * Non-interrupt CPUs are used first, then interrupt CPUs.
 * Two already allocated cpu masks must be passed.
 */
static int _dev_comp_vect_cpu_get(struct hfi1_devdata *dd,
				  struct hfi1_affinity_node *entry,
				  cpumask_var_t non_intr_cpus,
				  cpumask_var_t available_cpus)
	__must_hold(&node_affinity.lock)
{}

static void _dev_comp_vect_cpu_put(struct hfi1_devdata *dd, int cpu)
{}

/* _dev_comp_vect_mappings_destroy() is reentrant */
static void _dev_comp_vect_mappings_destroy(struct hfi1_devdata *dd)
{}

/*
 * This function creates the table for looking up CPUs for completion vectors.
 * num_comp_vectors needs to have been initilized before calling this function.
 */
static int _dev_comp_vect_mappings_create(struct hfi1_devdata *dd,
					  struct hfi1_affinity_node *entry)
	__must_hold(&node_affinity.lock)
{}

int hfi1_comp_vectors_set_up(struct hfi1_devdata *dd)
{}

void hfi1_comp_vectors_clean_up(struct hfi1_devdata *dd)
{}

int hfi1_comp_vect_mappings_lookup(struct rvt_dev_info *rdi, int comp_vect)
{}

/*
 * It assumes dd->comp_vect_possible_cpus is available.
 */
static int _dev_comp_vect_cpu_mask_init(struct hfi1_devdata *dd,
					struct hfi1_affinity_node *entry,
					bool first_dev_init)
	__must_hold(&node_affinity.lock)
{}

/*
 * It assumes dd->comp_vect_possible_cpus is available.
 */
static void _dev_comp_vect_cpu_mask_clean_up(struct hfi1_devdata *dd,
					     struct hfi1_affinity_node *entry)
	__must_hold(&node_affinity.lock)
{}

/*
 * Interrupt affinity.
 *
 * non-rcv avail gets a default mask that
 * starts as possible cpus with threads reset
 * and each rcv avail reset.
 *
 * rcv avail gets node relative 1 wrapping back
 * to the node relative 1 as necessary.
 *
 */
int hfi1_dev_affinity_init(struct hfi1_devdata *dd)
{}

void hfi1_dev_affinity_clean_up(struct hfi1_devdata *dd)
{}

/*
 * Function updates the irq affinity hint for msix after it has been changed
 * by the user using the /proc/irq interface. This function only accepts
 * one cpu in the mask.
 */
static void hfi1_update_sdma_affinity(struct hfi1_msix_entry *msix, int cpu)
{}

static void hfi1_irq_notifier_notify(struct irq_affinity_notify *notify,
				     const cpumask_t *mask)
{}

static void hfi1_irq_notifier_release(struct kref *ref)
{}

static void hfi1_setup_sdma_notifier(struct hfi1_msix_entry *msix)
{}

static void hfi1_cleanup_sdma_notifier(struct hfi1_msix_entry *msix)
{}

/*
 * Function sets the irq affinity for msix.
 * It *must* be called with node_affinity.lock held.
 */
static int get_irq_affinity(struct hfi1_devdata *dd,
			    struct hfi1_msix_entry *msix)
{}

int hfi1_get_irq_affinity(struct hfi1_devdata *dd, struct hfi1_msix_entry *msix)
{}

void hfi1_put_irq_affinity(struct hfi1_devdata *dd,
			   struct hfi1_msix_entry *msix)
{}

/* This should be called with node_affinity.lock held */
static void find_hw_thread_mask(uint hw_thread_no, cpumask_var_t hw_thread_mask,
				struct hfi1_affinity_node_list *affinity)
{}

int hfi1_get_proc_affinity(int node)
{}

void hfi1_put_proc_affinity(int cpu)
{}