#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/cpu.h>
#include <linux/sort.h>
#include <linux/group_cpus.h>
#ifdef CONFIG_SMP
static void grp_spread_init_one(struct cpumask *irqmsk, struct cpumask *nmsk,
unsigned int cpus_per_grp)
{ … }
static cpumask_var_t *alloc_node_to_cpumask(void)
{ … }
static void free_node_to_cpumask(cpumask_var_t *masks)
{ … }
static void build_node_to_cpumask(cpumask_var_t *masks)
{ … }
static int get_nodes_in_cpumask(cpumask_var_t *node_to_cpumask,
const struct cpumask *mask, nodemask_t *nodemsk)
{ … }
struct node_groups { … };
static int ncpus_cmp_func(const void *l, const void *r)
{ … }
static void alloc_nodes_groups(unsigned int numgrps,
cpumask_var_t *node_to_cpumask,
const struct cpumask *cpu_mask,
const nodemask_t nodemsk,
struct cpumask *nmsk,
struct node_groups *node_groups)
{ … }
static int __group_cpus_evenly(unsigned int startgrp, unsigned int numgrps,
cpumask_var_t *node_to_cpumask,
const struct cpumask *cpu_mask,
struct cpumask *nmsk, struct cpumask *masks)
{ … }
struct cpumask *group_cpus_evenly(unsigned int numgrps)
{ … }
#else
struct cpumask *group_cpus_evenly(unsigned int numgrps)
{
struct cpumask *masks = kcalloc(numgrps, sizeof(*masks), GFP_KERNEL);
if (!masks)
return NULL;
cpumask_copy(&masks[0], cpu_possible_mask);
return masks;
}
#endif
EXPORT_SYMBOL_GPL(…);