#include <linux/topology.h>
#include <linux/cpumask.h>
#include <linux/interrupt.h>
#include <linux/numa.h>
#include "hfi.h"
#include "affinity.h"
#include "sdma.h"
#include "trace.h"
struct hfi1_affinity_node_list node_affinity = …;
static const char * const irq_type_names[] = …;
static unsigned int *hfi1_per_node_cntr;
static inline void init_cpu_mask_set(struct cpu_mask_set *set)
{ … }
static void _cpu_mask_set_gen_inc(struct cpu_mask_set *set)
{ … }
static void _cpu_mask_set_gen_dec(struct cpu_mask_set *set)
{ … }
static int cpu_mask_set_get_first(struct cpu_mask_set *set, cpumask_var_t diff)
{ … }
static void cpu_mask_set_put(struct cpu_mask_set *set, int cpu)
{ … }
void init_real_cpu_mask(void)
{ … }
int node_affinity_init(void)
{ … }
static void node_affinity_destroy(struct hfi1_affinity_node *entry)
{ … }
void node_affinity_destroy_all(void)
{ … }
static struct hfi1_affinity_node *node_affinity_allocate(int node)
{ … }
static void node_affinity_add_tail(struct hfi1_affinity_node *entry)
{ … }
static struct hfi1_affinity_node *node_affinity_lookup(int node)
{ … }
static int per_cpu_affinity_get(cpumask_var_t possible_cpumask,
u16 __percpu *comp_vect_affinity)
{ … }
static int per_cpu_affinity_put_max(cpumask_var_t possible_cpumask,
u16 __percpu *comp_vect_affinity)
{ … }
static int _dev_comp_vect_cpu_get(struct hfi1_devdata *dd,
struct hfi1_affinity_node *entry,
cpumask_var_t non_intr_cpus,
cpumask_var_t available_cpus)
__must_hold(&node_affinity.lock)
{ … }
static void _dev_comp_vect_cpu_put(struct hfi1_devdata *dd, int cpu)
{ … }
static void _dev_comp_vect_mappings_destroy(struct hfi1_devdata *dd)
{ … }
static int _dev_comp_vect_mappings_create(struct hfi1_devdata *dd,
struct hfi1_affinity_node *entry)
__must_hold(&node_affinity.lock)
{ … }
int hfi1_comp_vectors_set_up(struct hfi1_devdata *dd)
{ … }
void hfi1_comp_vectors_clean_up(struct hfi1_devdata *dd)
{ … }
int hfi1_comp_vect_mappings_lookup(struct rvt_dev_info *rdi, int comp_vect)
{ … }
static int _dev_comp_vect_cpu_mask_init(struct hfi1_devdata *dd,
struct hfi1_affinity_node *entry,
bool first_dev_init)
__must_hold(&node_affinity.lock)
{ … }
static void _dev_comp_vect_cpu_mask_clean_up(struct hfi1_devdata *dd,
struct hfi1_affinity_node *entry)
__must_hold(&node_affinity.lock)
{ … }
int hfi1_dev_affinity_init(struct hfi1_devdata *dd)
{ … }
void hfi1_dev_affinity_clean_up(struct hfi1_devdata *dd)
{ … }
static void hfi1_update_sdma_affinity(struct hfi1_msix_entry *msix, int cpu)
{ … }
static void hfi1_irq_notifier_notify(struct irq_affinity_notify *notify,
const cpumask_t *mask)
{ … }
static void hfi1_irq_notifier_release(struct kref *ref)
{ … }
static void hfi1_setup_sdma_notifier(struct hfi1_msix_entry *msix)
{ … }
static void hfi1_cleanup_sdma_notifier(struct hfi1_msix_entry *msix)
{ … }
static int get_irq_affinity(struct hfi1_devdata *dd,
struct hfi1_msix_entry *msix)
{ … }
int hfi1_get_irq_affinity(struct hfi1_devdata *dd, struct hfi1_msix_entry *msix)
{ … }
void hfi1_put_irq_affinity(struct hfi1_devdata *dd,
struct hfi1_msix_entry *msix)
{ … }
static void find_hw_thread_mask(uint hw_thread_no, cpumask_var_t hw_thread_mask,
struct hfi1_affinity_node_list *affinity)
{ … }
int hfi1_get_proc_affinity(int node)
{ … }
void hfi1_put_proc_affinity(int cpu)
{ … }