linux/include/linux/sched/topology.h

/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_SCHED_TOPOLOGY_H
#define _LINUX_SCHED_TOPOLOGY_H

#include <linux/topology.h>

#include <linux/sched/idle.h>

/*
 * sched-domains (multiprocessor balancing) declarations:
 */
#ifdef CONFIG_SMP

/* Generate SD flag indexes */
#define SD_FLAG
enum {};
#undef SD_FLAG
/* Generate SD flag bits */
#define SD_FLAG
enum {};
#undef SD_FLAG

#ifdef CONFIG_SCHED_DEBUG

struct sd_flag_debug {};
extern const struct sd_flag_debug sd_flag_debug[];

#endif

#ifdef CONFIG_SCHED_SMT
static inline int cpu_smt_flags(void)
{}
#endif

#ifdef CONFIG_SCHED_CLUSTER
static inline int cpu_cluster_flags(void)
{}
#endif

#ifdef CONFIG_SCHED_MC
static inline int cpu_core_flags(void)
{}
#endif

#ifdef CONFIG_NUMA
static inline int cpu_numa_flags(void)
{}
#endif

extern int arch_asym_cpu_priority(int cpu);

struct sched_domain_attr {};

#define SD_ATTR_INIT

extern int sched_domain_level_max;

struct sched_group;

struct sched_domain_shared {};

struct sched_domain {};

static inline struct cpumask *sched_domain_span(struct sched_domain *sd)
{}

extern void partition_sched_domains_locked(int ndoms_new,
					   cpumask_var_t doms_new[],
					   struct sched_domain_attr *dattr_new);

extern void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
				    struct sched_domain_attr *dattr_new);

/* Allocate an array of sched domains, for partition_sched_domains(). */
cpumask_var_t *alloc_sched_domains(unsigned int ndoms);
void free_sched_domains(cpumask_var_t doms[], unsigned int ndoms);

bool cpus_equal_capacity(int this_cpu, int that_cpu);
bool cpus_share_cache(int this_cpu, int that_cpu);
bool cpus_share_resources(int this_cpu, int that_cpu);

sched_domain_mask_f;
sched_domain_flags_f;

#define SDTL_OVERLAP

struct sd_data {};

struct sched_domain_topology_level {};

extern void __init set_sched_topology(struct sched_domain_topology_level *tl);

#ifdef CONFIG_SCHED_DEBUG
#define SD_INIT_NAME(type)
#else
#define SD_INIT_NAME
#endif

#else /* CONFIG_SMP */

struct sched_domain_attr;

static inline void
partition_sched_domains_locked(int ndoms_new, cpumask_var_t doms_new[],
			       struct sched_domain_attr *dattr_new)
{
}

static inline void
partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
			struct sched_domain_attr *dattr_new)
{
}

static inline bool cpus_equal_capacity(int this_cpu, int that_cpu)
{
	return true;
}

static inline bool cpus_share_cache(int this_cpu, int that_cpu)
{
	return true;
}

static inline bool cpus_share_resources(int this_cpu, int that_cpu)
{
	return true;
}

#endif	/* !CONFIG_SMP */

#if defined(CONFIG_ENERGY_MODEL) && defined(CONFIG_CPU_FREQ_GOV_SCHEDUTIL)
extern void rebuild_sched_domains_energy(void);
#else
static inline void rebuild_sched_domains_energy(void)
{
}
#endif

#ifndef arch_scale_cpu_capacity
/**
 * arch_scale_cpu_capacity - get the capacity scale factor of a given CPU.
 * @cpu: the CPU in question.
 *
 * Return: the CPU scale factor normalized against SCHED_CAPACITY_SCALE, i.e.
 *
 *             max_perf(cpu)
 *      ----------------------------- * SCHED_CAPACITY_SCALE
 *      max(max_perf(c) : c \in CPUs)
 */
static __always_inline
unsigned long arch_scale_cpu_capacity(int cpu)
{}
#endif

#ifndef arch_scale_hw_pressure
static __always_inline
unsigned long arch_scale_hw_pressure(int cpu)
{}
#endif

#ifndef arch_update_hw_pressure
static __always_inline
void arch_update_hw_pressure(const struct cpumask *cpus,
				  unsigned long capped_frequency)
{}
#endif

#ifndef arch_scale_freq_ref
static __always_inline
unsigned int arch_scale_freq_ref(int cpu)
{}
#endif

static inline int task_node(const struct task_struct *p)
{}

#endif /* _LINUX_SCHED_TOPOLOGY_H */