linux/arch/x86/kernel/cpu/resctrl/rdtgroup.c

// SPDX-License-Identifier: GPL-2.0-only
/*
 * User interface for Resource Allocation in Resource Director Technology(RDT)
 *
 * Copyright (C) 2016 Intel Corporation
 *
 * Author: Fenghua Yu <[email protected]>
 *
 * More information about RDT be found in the Intel (R) x86 Architecture
 * Software Developer Manual.
 */

#define pr_fmt(fmt)

#include <linux/cpu.h>
#include <linux/debugfs.h>
#include <linux/fs.h>
#include <linux/fs_parser.h>
#include <linux/sysfs.h>
#include <linux/kernfs.h>
#include <linux/seq_buf.h>
#include <linux/seq_file.h>
#include <linux/sched/signal.h>
#include <linux/sched/task.h>
#include <linux/slab.h>
#include <linux/task_work.h>
#include <linux/user_namespace.h>

#include <uapi/linux/magic.h>

#include <asm/resctrl.h>
#include "internal.h"

DEFINE_STATIC_KEY_FALSE(rdt_enable_key);
DEFINE_STATIC_KEY_FALSE(rdt_mon_enable_key);
DEFINE_STATIC_KEY_FALSE(rdt_alloc_enable_key);

/* Mutex to protect rdtgroup access. */
DEFINE_MUTEX();

static struct kernfs_root *rdt_root;
struct rdtgroup rdtgroup_default;
LIST_HEAD();

/* list of entries for the schemata file */
LIST_HEAD();

/* The filesystem can only be mounted once. */
bool resctrl_mounted;

/* Kernel fs node for "info" directory under root */
static struct kernfs_node *kn_info;

/* Kernel fs node for "mon_groups" directory under root */
static struct kernfs_node *kn_mongrp;

/* Kernel fs node for "mon_data" directory under root */
static struct kernfs_node *kn_mondata;

static struct seq_buf last_cmd_status;
static char last_cmd_status_buf[512];

static int rdtgroup_setup_root(struct rdt_fs_context *ctx);
static void rdtgroup_destroy_root(void);

struct dentry *debugfs_resctrl;

static bool resctrl_debug;

void rdt_last_cmd_clear(void)
{}

void rdt_last_cmd_puts(const char *s)
{}

void rdt_last_cmd_printf(const char *fmt, ...)
{}

void rdt_staged_configs_clear(void)
{}

/*
 * Trivial allocator for CLOSIDs. Since h/w only supports a small number,
 * we can keep a bitmap of free CLOSIDs in a single integer.
 *
 * Using a global CLOSID across all resources has some advantages and
 * some drawbacks:
 * + We can simply set current's closid to assign a task to a resource
 *   group.
 * + Context switch code can avoid extra memory references deciding which
 *   CLOSID to load into the PQR_ASSOC MSR
 * - We give up some options in configuring resource groups across multi-socket
 *   systems.
 * - Our choices on how to configure each resource become progressively more
 *   limited as the number of resources grows.
 */
static unsigned long closid_free_map;
static int closid_free_map_len;

int closids_supported(void)
{}

static void closid_init(void)
{}

static int closid_alloc(void)
{}

void closid_free(int closid)
{}

/**
 * closid_allocated - test if provided closid is in use
 * @closid: closid to be tested
 *
 * Return: true if @closid is currently associated with a resource group,
 * false if @closid is free
 */
bool closid_allocated(unsigned int closid)
{}

/**
 * rdtgroup_mode_by_closid - Return mode of resource group with closid
 * @closid: closid if the resource group
 *
 * Each resource group is associated with a @closid. Here the mode
 * of a resource group can be queried by searching for it using its closid.
 *
 * Return: mode as &enum rdtgrp_mode of resource group with closid @closid
 */
enum rdtgrp_mode rdtgroup_mode_by_closid(int closid)
{}

static const char * const rdt_mode_str[] =;

/**
 * rdtgroup_mode_str - Return the string representation of mode
 * @mode: the resource group mode as &enum rdtgroup_mode
 *
 * Return: string representation of valid mode, "unknown" otherwise
 */
static const char *rdtgroup_mode_str(enum rdtgrp_mode mode)
{}

/* set uid and gid of rdtgroup dirs and files to that of the creator */
static int rdtgroup_kn_set_ugid(struct kernfs_node *kn)
{}

static int rdtgroup_add_file(struct kernfs_node *parent_kn, struct rftype *rft)
{}

static int rdtgroup_seqfile_show(struct seq_file *m, void *arg)
{}

static ssize_t rdtgroup_file_write(struct kernfs_open_file *of, char *buf,
				   size_t nbytes, loff_t off)
{}

static const struct kernfs_ops rdtgroup_kf_single_ops =;

static const struct kernfs_ops kf_mondata_ops =;

static bool is_cpu_list(struct kernfs_open_file *of)
{}

static int rdtgroup_cpus_show(struct kernfs_open_file *of,
			      struct seq_file *s, void *v)
{}

/*
 * This is safe against resctrl_sched_in() called from __switch_to()
 * because __switch_to() is executed with interrupts disabled. A local call
 * from update_closid_rmid() is protected against __switch_to() because
 * preemption is disabled.
 */
static void update_cpu_closid_rmid(void *info)
{}

/*
 * Update the PGR_ASSOC MSR on all cpus in @cpu_mask,
 *
 * Per task closids/rmids must have been set up before calling this function.
 */
static void
update_closid_rmid(const struct cpumask *cpu_mask, struct rdtgroup *r)
{}

static int cpus_mon_write(struct rdtgroup *rdtgrp, cpumask_var_t newmask,
			  cpumask_var_t tmpmask)
{}

static void cpumask_rdtgrp_clear(struct rdtgroup *r, struct cpumask *m)
{}

static int cpus_ctrl_write(struct rdtgroup *rdtgrp, cpumask_var_t newmask,
			   cpumask_var_t tmpmask, cpumask_var_t tmpmask1)
{}

static ssize_t rdtgroup_cpus_write(struct kernfs_open_file *of,
				   char *buf, size_t nbytes, loff_t off)
{}

/**
 * rdtgroup_remove - the helper to remove resource group safely
 * @rdtgrp: resource group to remove
 *
 * On resource group creation via a mkdir, an extra kernfs_node reference is
 * taken to ensure that the rdtgroup structure remains accessible for the
 * rdtgroup_kn_unlock() calls where it is removed.
 *
 * Drop the extra reference here, then free the rdtgroup structure.
 *
 * Return: void
 */
static void rdtgroup_remove(struct rdtgroup *rdtgrp)
{}

static void _update_task_closid_rmid(void *task)
{}

static void update_task_closid_rmid(struct task_struct *t)
{}

static bool task_in_rdtgroup(struct task_struct *tsk, struct rdtgroup *rdtgrp)
{}

static int __rdtgroup_move_task(struct task_struct *tsk,
				struct rdtgroup *rdtgrp)
{}

static bool is_closid_match(struct task_struct *t, struct rdtgroup *r)
{}

static bool is_rmid_match(struct task_struct *t, struct rdtgroup *r)
{}

/**
 * rdtgroup_tasks_assigned - Test if tasks have been assigned to resource group
 * @r: Resource group
 *
 * Return: 1 if tasks have been assigned to @r, 0 otherwise
 */
int rdtgroup_tasks_assigned(struct rdtgroup *r)
{}

static int rdtgroup_task_write_permission(struct task_struct *task,
					  struct kernfs_open_file *of)
{}

static int rdtgroup_move_task(pid_t pid, struct rdtgroup *rdtgrp,
			      struct kernfs_open_file *of)
{}

static ssize_t rdtgroup_tasks_write(struct kernfs_open_file *of,
				    char *buf, size_t nbytes, loff_t off)
{}

static void show_rdt_tasks(struct rdtgroup *r, struct seq_file *s)
{}

static int rdtgroup_tasks_show(struct kernfs_open_file *of,
			       struct seq_file *s, void *v)
{}

static int rdtgroup_closid_show(struct kernfs_open_file *of,
				struct seq_file *s, void *v)
{}

static int rdtgroup_rmid_show(struct kernfs_open_file *of,
			      struct seq_file *s, void *v)
{}

#ifdef CONFIG_PROC_CPU_RESCTRL

/*
 * A task can only be part of one resctrl control group and of one monitor
 * group which is associated to that control group.
 *
 * 1)   res:
 *      mon:
 *
 *    resctrl is not available.
 *
 * 2)   res:/
 *      mon:
 *
 *    Task is part of the root resctrl control group, and it is not associated
 *    to any monitor group.
 *
 * 3)  res:/
 *     mon:mon0
 *
 *    Task is part of the root resctrl control group and monitor group mon0.
 *
 * 4)  res:group0
 *     mon:
 *
 *    Task is part of resctrl control group group0, and it is not associated
 *    to any monitor group.
 *
 * 5) res:group0
 *    mon:mon1
 *
 *    Task is part of resctrl control group group0 and monitor group mon1.
 */
int proc_resctrl_show(struct seq_file *s, struct pid_namespace *ns,
		      struct pid *pid, struct task_struct *tsk)
{}
#endif

static int rdt_last_cmd_status_show(struct kernfs_open_file *of,
				    struct seq_file *seq, void *v)
{}

static int rdt_num_closids_show(struct kernfs_open_file *of,
				struct seq_file *seq, void *v)
{}

static int rdt_default_ctrl_show(struct kernfs_open_file *of,
			     struct seq_file *seq, void *v)
{}

static int rdt_min_cbm_bits_show(struct kernfs_open_file *of,
			     struct seq_file *seq, void *v)
{}

static int rdt_shareable_bits_show(struct kernfs_open_file *of,
				   struct seq_file *seq, void *v)
{}

/*
 * rdt_bit_usage_show - Display current usage of resources
 *
 * A domain is a shared resource that can now be allocated differently. Here
 * we display the current regions of the domain as an annotated bitmask.
 * For each domain of this resource its allocation bitmask
 * is annotated as below to indicate the current usage of the corresponding bit:
 *   0 - currently unused
 *   X - currently available for sharing and used by software and hardware
 *   H - currently used by hardware only but available for software use
 *   S - currently used and shareable by software only
 *   E - currently used exclusively by one resource group
 *   P - currently pseudo-locked by one resource group
 */
static int rdt_bit_usage_show(struct kernfs_open_file *of,
			      struct seq_file *seq, void *v)
{}

static int rdt_min_bw_show(struct kernfs_open_file *of,
			     struct seq_file *seq, void *v)
{}

static int rdt_num_rmids_show(struct kernfs_open_file *of,
			      struct seq_file *seq, void *v)
{}

static int rdt_mon_features_show(struct kernfs_open_file *of,
				 struct seq_file *seq, void *v)
{}

static int rdt_bw_gran_show(struct kernfs_open_file *of,
			     struct seq_file *seq, void *v)
{}

static int rdt_delay_linear_show(struct kernfs_open_file *of,
			     struct seq_file *seq, void *v)
{}

static int max_threshold_occ_show(struct kernfs_open_file *of,
				  struct seq_file *seq, void *v)
{}

static int rdt_thread_throttle_mode_show(struct kernfs_open_file *of,
					 struct seq_file *seq, void *v)
{}

static ssize_t max_threshold_occ_write(struct kernfs_open_file *of,
				       char *buf, size_t nbytes, loff_t off)
{}

/*
 * rdtgroup_mode_show - Display mode of this resource group
 */
static int rdtgroup_mode_show(struct kernfs_open_file *of,
			      struct seq_file *s, void *v)
{}

static enum resctrl_conf_type resctrl_peer_type(enum resctrl_conf_type my_type)
{}

static int rdt_has_sparse_bitmasks_show(struct kernfs_open_file *of,
					struct seq_file *seq, void *v)
{}

/**
 * __rdtgroup_cbm_overlaps - Does CBM for intended closid overlap with other
 * @r: Resource to which domain instance @d belongs.
 * @d: The domain instance for which @closid is being tested.
 * @cbm: Capacity bitmask being tested.
 * @closid: Intended closid for @cbm.
 * @type: CDP type of @r.
 * @exclusive: Only check if overlaps with exclusive resource groups
 *
 * Checks if provided @cbm intended to be used for @closid on domain
 * @d overlaps with any other closids or other hardware usage associated
 * with this domain. If @exclusive is true then only overlaps with
 * resource groups in exclusive mode will be considered. If @exclusive
 * is false then overlaps with any resource group or hardware entities
 * will be considered.
 *
 * @cbm is unsigned long, even if only 32 bits are used, to make the
 * bitmap functions work correctly.
 *
 * Return: false if CBM does not overlap, true if it does.
 */
static bool __rdtgroup_cbm_overlaps(struct rdt_resource *r, struct rdt_ctrl_domain *d,
				    unsigned long cbm, int closid,
				    enum resctrl_conf_type type, bool exclusive)
{}

/**
 * rdtgroup_cbm_overlaps - Does CBM overlap with other use of hardware
 * @s: Schema for the resource to which domain instance @d belongs.
 * @d: The domain instance for which @closid is being tested.
 * @cbm: Capacity bitmask being tested.
 * @closid: Intended closid for @cbm.
 * @exclusive: Only check if overlaps with exclusive resource groups
 *
 * Resources that can be allocated using a CBM can use the CBM to control
 * the overlap of these allocations. rdtgroup_cmb_overlaps() is the test
 * for overlap. Overlap test is not limited to the specific resource for
 * which the CBM is intended though - when dealing with CDP resources that
 * share the underlying hardware the overlap check should be performed on
 * the CDP resource sharing the hardware also.
 *
 * Refer to description of __rdtgroup_cbm_overlaps() for the details of the
 * overlap test.
 *
 * Return: true if CBM overlap detected, false if there is no overlap
 */
bool rdtgroup_cbm_overlaps(struct resctrl_schema *s, struct rdt_ctrl_domain *d,
			   unsigned long cbm, int closid, bool exclusive)
{}

/**
 * rdtgroup_mode_test_exclusive - Test if this resource group can be exclusive
 * @rdtgrp: Resource group identified through its closid.
 *
 * An exclusive resource group implies that there should be no sharing of
 * its allocated resources. At the time this group is considered to be
 * exclusive this test can determine if its current schemata supports this
 * setting by testing for overlap with all other resource groups.
 *
 * Return: true if resource group can be exclusive, false if there is overlap
 * with allocations of other resource groups and thus this resource group
 * cannot be exclusive.
 */
static bool rdtgroup_mode_test_exclusive(struct rdtgroup *rdtgrp)
{}

/*
 * rdtgroup_mode_write - Modify the resource group's mode
 */
static ssize_t rdtgroup_mode_write(struct kernfs_open_file *of,
				   char *buf, size_t nbytes, loff_t off)
{}

/**
 * rdtgroup_cbm_to_size - Translate CBM to size in bytes
 * @r: RDT resource to which @d belongs.
 * @d: RDT domain instance.
 * @cbm: bitmask for which the size should be computed.
 *
 * The bitmask provided associated with the RDT domain instance @d will be
 * translated into how many bytes it represents. The size in bytes is
 * computed by first dividing the total cache size by the CBM length to
 * determine how many bytes each bit in the bitmask represents. The result
 * is multiplied with the number of bits set in the bitmask.
 *
 * @cbm is unsigned long, even if only 32 bits are used to make the
 * bitmap functions work correctly.
 */
unsigned int rdtgroup_cbm_to_size(struct rdt_resource *r,
				  struct rdt_ctrl_domain *d, unsigned long cbm)
{}

/*
 * rdtgroup_size_show - Display size in bytes of allocated regions
 *
 * The "size" file mirrors the layout of the "schemata" file, printing the
 * size in bytes of each region instead of the capacity bitmask.
 */
static int rdtgroup_size_show(struct kernfs_open_file *of,
			      struct seq_file *s, void *v)
{}

struct mon_config_info {};

#define INVALID_CONFIG_INDEX

/**
 * mon_event_config_index_get - get the hardware index for the
 *                              configurable event
 * @evtid: event id.
 *
 * Return: 0 for evtid == QOS_L3_MBM_TOTAL_EVENT_ID
 *         1 for evtid == QOS_L3_MBM_LOCAL_EVENT_ID
 *         INVALID_CONFIG_INDEX for invalid evtid
 */
static inline unsigned int mon_event_config_index_get(u32 evtid)
{}

static void mon_event_config_read(void *info)
{}

static void mondata_config_read(struct rdt_mon_domain *d, struct mon_config_info *mon_info)
{}

static int mbm_config_show(struct seq_file *s, struct rdt_resource *r, u32 evtid)
{}

static int mbm_total_bytes_config_show(struct kernfs_open_file *of,
				       struct seq_file *seq, void *v)
{}

static int mbm_local_bytes_config_show(struct kernfs_open_file *of,
				       struct seq_file *seq, void *v)
{}

static void mon_event_config_write(void *info)
{}

static void mbm_config_write_domain(struct rdt_resource *r,
				    struct rdt_mon_domain *d, u32 evtid, u32 val)
{}

static int mon_config_write(struct rdt_resource *r, char *tok, u32 evtid)
{}

static ssize_t mbm_total_bytes_config_write(struct kernfs_open_file *of,
					    char *buf, size_t nbytes,
					    loff_t off)
{}

static ssize_t mbm_local_bytes_config_write(struct kernfs_open_file *of,
					    char *buf, size_t nbytes,
					    loff_t off)
{}

/* rdtgroup information files for one cache resource. */
static struct rftype res_common_files[] =;

static int rdtgroup_add_files(struct kernfs_node *kn, unsigned long fflags)
{}

static struct rftype *rdtgroup_get_rftype_by_name(const char *name)
{}

void __init thread_throttle_mode_init(void)
{}

void __init mbm_config_rftype_init(const char *config)
{}

/**
 * rdtgroup_kn_mode_restrict - Restrict user access to named resctrl file
 * @r: The resource group with which the file is associated.
 * @name: Name of the file
 *
 * The permissions of named resctrl file, directory, or link are modified
 * to not allow read, write, or execute by any user.
 *
 * WARNING: This function is intended to communicate to the user that the
 * resctrl file has been locked down - that it is not relevant to the
 * particular state the system finds itself in. It should not be relied
 * on to protect from user access because after the file's permissions
 * are restricted the user can still change the permissions using chmod
 * from the command line.
 *
 * Return: 0 on success, <0 on failure.
 */
int rdtgroup_kn_mode_restrict(struct rdtgroup *r, const char *name)
{}

/**
 * rdtgroup_kn_mode_restore - Restore user access to named resctrl file
 * @r: The resource group with which the file is associated.
 * @name: Name of the file
 * @mask: Mask of permissions that should be restored
 *
 * Restore the permissions of the named file. If @name is a directory the
 * permissions of its parent will be used.
 *
 * Return: 0 on success, <0 on failure.
 */
int rdtgroup_kn_mode_restore(struct rdtgroup *r, const char *name,
			     umode_t mask)
{}

static int rdtgroup_mkdir_info_resdir(void *priv, char *name,
				      unsigned long fflags)
{}

static int rdtgroup_create_info_dir(struct kernfs_node *parent_kn)
{}

static int
mongroup_create_dir(struct kernfs_node *parent_kn, struct rdtgroup *prgrp,
		    char *name, struct kernfs_node **dest_kn)
{}

static void l3_qos_cfg_update(void *arg)
{}

static void l2_qos_cfg_update(void *arg)
{}

static inline bool is_mba_linear(void)
{}

static int set_cache_qos_cfg(int level, bool enable)
{}

/* Restore the qos cfg state when a domain comes online */
void rdt_domain_reconfigure_cdp(struct rdt_resource *r)
{}

static int mba_sc_domain_allocate(struct rdt_resource *r, struct rdt_ctrl_domain *d)
{}

static void mba_sc_domain_destroy(struct rdt_resource *r,
				  struct rdt_ctrl_domain *d)
{}

/*
 * MBA software controller is supported only if
 * MBM is supported and MBA is in linear scale,
 * and the MBM monitor scope is the same as MBA
 * control scope.
 */
static bool supports_mba_mbps(void)
{}

/*
 * Enable or disable the MBA software controller
 * which helps user specify bandwidth in MBps.
 */
static int set_mba_sc(bool mba_sc)
{}

static int cdp_enable(int level)
{}

static void cdp_disable(int level)
{}

int resctrl_arch_set_cdp_enabled(enum resctrl_res_level l, bool enable)
{}

/*
 * We don't allow rdtgroup directories to be created anywhere
 * except the root directory. Thus when looking for the rdtgroup
 * structure for a kernfs node we are either looking at a directory,
 * in which case the rdtgroup structure is pointed at by the "priv"
 * field, otherwise we have a file, and need only look to the parent
 * to find the rdtgroup.
 */
static struct rdtgroup *kernfs_to_rdtgroup(struct kernfs_node *kn)
{}

static void rdtgroup_kn_get(struct rdtgroup *rdtgrp, struct kernfs_node *kn)
{}

static void rdtgroup_kn_put(struct rdtgroup *rdtgrp, struct kernfs_node *kn)
{}

struct rdtgroup *rdtgroup_kn_lock_live(struct kernfs_node *kn)
{}

void rdtgroup_kn_unlock(struct kernfs_node *kn)
{}

static int mkdir_mondata_all(struct kernfs_node *parent_kn,
			     struct rdtgroup *prgrp,
			     struct kernfs_node **mon_data_kn);

static void rdt_disable_ctx(void)
{}

static int rdt_enable_ctx(struct rdt_fs_context *ctx)
{}

static int schemata_list_add(struct rdt_resource *r, enum resctrl_conf_type type)
{}

static int schemata_list_create(void)
{}

static void schemata_list_destroy(void)
{}

static int rdt_get_tree(struct fs_context *fc)
{}

enum rdt_param {};

static const struct fs_parameter_spec rdt_fs_parameters[] =;

static int rdt_parse_param(struct fs_context *fc, struct fs_parameter *param)
{}

static void rdt_fs_context_free(struct fs_context *fc)
{}

static const struct fs_context_operations rdt_fs_context_ops =;

static int rdt_init_fs_context(struct fs_context *fc)
{}

static int reset_all_ctrls(struct rdt_resource *r)
{}

/*
 * Move tasks from one to the other group. If @from is NULL, then all tasks
 * in the systems are moved unconditionally (used for teardown).
 *
 * If @mask is not NULL the cpus on which moved tasks are running are set
 * in that mask so the update smp function call is restricted to affected
 * cpus.
 */
static void rdt_move_group_tasks(struct rdtgroup *from, struct rdtgroup *to,
				 struct cpumask *mask)
{}

static void free_all_child_rdtgrp(struct rdtgroup *rdtgrp)
{}

/*
 * Forcibly remove all of subdirectories under root.
 */
static void rmdir_all_sub(void)
{}

static void rdt_kill_sb(struct super_block *sb)
{}

static struct file_system_type rdt_fs_type =;

static int mon_addfile(struct kernfs_node *parent_kn, const char *name,
		       void *priv)
{}

static void mon_rmdir_one_subdir(struct kernfs_node *pkn, char *name, char *subname)
{}

/*
 * Remove all subdirectories of mon_data of ctrl_mon groups
 * and monitor groups for the given domain.
 * Remove files and directories containing "sum" of domain data
 * when last domain being summed is removed.
 */
static void rmdir_mondata_subdir_allrdtgrp(struct rdt_resource *r,
					   struct rdt_mon_domain *d)
{}

static int mon_add_all_files(struct kernfs_node *kn, struct rdt_mon_domain *d,
			     struct rdt_resource *r, struct rdtgroup *prgrp,
			     bool do_sum)
{}

static int mkdir_mondata_subdir(struct kernfs_node *parent_kn,
				struct rdt_mon_domain *d,
				struct rdt_resource *r, struct rdtgroup *prgrp)
{}

/*
 * Add all subdirectories of mon_data for "ctrl_mon" groups
 * and "monitor" groups with given domain id.
 */
static void mkdir_mondata_subdir_allrdtgrp(struct rdt_resource *r,
					   struct rdt_mon_domain *d)
{}

static int mkdir_mondata_subdir_alldom(struct kernfs_node *parent_kn,
				       struct rdt_resource *r,
				       struct rdtgroup *prgrp)
{}

/*
 * This creates a directory mon_data which contains the monitored data.
 *
 * mon_data has one directory for each domain which are named
 * in the format mon_<domain_name>_<domain_id>. For ex: A mon_data
 * with L3 domain looks as below:
 * ./mon_data:
 * mon_L3_00
 * mon_L3_01
 * mon_L3_02
 * ...
 *
 * Each domain directory has one file per event:
 * ./mon_L3_00/:
 * llc_occupancy
 *
 */
static int mkdir_mondata_all(struct kernfs_node *parent_kn,
			     struct rdtgroup *prgrp,
			     struct kernfs_node **dest_kn)
{}

/**
 * cbm_ensure_valid - Enforce validity on provided CBM
 * @_val:	Candidate CBM
 * @r:		RDT resource to which the CBM belongs
 *
 * The provided CBM represents all cache portions available for use. This
 * may be represented by a bitmap that does not consist of contiguous ones
 * and thus be an invalid CBM.
 * Here the provided CBM is forced to be a valid CBM by only considering
 * the first set of contiguous bits as valid and clearing all bits.
 * The intention here is to provide a valid default CBM with which a new
 * resource group is initialized. The user can follow this with a
 * modification to the CBM if the default does not satisfy the
 * requirements.
 */
static u32 cbm_ensure_valid(u32 _val, struct rdt_resource *r)
{}

/*
 * Initialize cache resources per RDT domain
 *
 * Set the RDT domain up to start off with all usable allocations. That is,
 * all shareable and unused bits. All-zero CBM is invalid.
 */
static int __init_one_rdt_domain(struct rdt_ctrl_domain *d, struct resctrl_schema *s,
				 u32 closid)
{}

/*
 * Initialize cache resources with default values.
 *
 * A new RDT group is being created on an allocation capable (CAT)
 * supporting system. Set this group up to start off with all usable
 * allocations.
 *
 * If there are no more shareable bits available on any domain then
 * the entire allocation will fail.
 */
static int rdtgroup_init_cat(struct resctrl_schema *s, u32 closid)
{}

/* Initialize MBA resource with default values. */
static void rdtgroup_init_mba(struct rdt_resource *r, u32 closid)
{}

/* Initialize the RDT group's allocations. */
static int rdtgroup_init_alloc(struct rdtgroup *rdtgrp)
{}

static int mkdir_rdt_prepare_rmid_alloc(struct rdtgroup *rdtgrp)
{}

static void mkdir_rdt_prepare_rmid_free(struct rdtgroup *rgrp)
{}

static int mkdir_rdt_prepare(struct kernfs_node *parent_kn,
			     const char *name, umode_t mode,
			     enum rdt_group_type rtype, struct rdtgroup **r)
{}

static void mkdir_rdt_prepare_clean(struct rdtgroup *rgrp)
{}

/*
 * Create a monitor group under "mon_groups" directory of a control
 * and monitor group(ctrl_mon). This is a resource group
 * to monitor a subset of tasks and cpus in its parent ctrl_mon group.
 */
static int rdtgroup_mkdir_mon(struct kernfs_node *parent_kn,
			      const char *name, umode_t mode)
{}

/*
 * These are rdtgroups created under the root directory. Can be used
 * to allocate and monitor resources.
 */
static int rdtgroup_mkdir_ctrl_mon(struct kernfs_node *parent_kn,
				   const char *name, umode_t mode)
{}

/*
 * We allow creating mon groups only with in a directory called "mon_groups"
 * which is present in every ctrl_mon group. Check if this is a valid
 * "mon_groups" directory.
 *
 * 1. The directory should be named "mon_groups".
 * 2. The mon group itself should "not" be named "mon_groups".
 *   This makes sure "mon_groups" directory always has a ctrl_mon group
 *   as parent.
 */
static bool is_mon_groups(struct kernfs_node *kn, const char *name)
{}

static int rdtgroup_mkdir(struct kernfs_node *parent_kn, const char *name,
			  umode_t mode)
{}

static int rdtgroup_rmdir_mon(struct rdtgroup *rdtgrp, cpumask_var_t tmpmask)
{}

static int rdtgroup_ctrl_remove(struct rdtgroup *rdtgrp)
{}

static int rdtgroup_rmdir_ctrl(struct rdtgroup *rdtgrp, cpumask_var_t tmpmask)
{}

static int rdtgroup_rmdir(struct kernfs_node *kn)
{}

/**
 * mongrp_reparent() - replace parent CTRL_MON group of a MON group
 * @rdtgrp:		the MON group whose parent should be replaced
 * @new_prdtgrp:	replacement parent CTRL_MON group for @rdtgrp
 * @cpus:		cpumask provided by the caller for use during this call
 *
 * Replaces the parent CTRL_MON group for a MON group, resulting in all member
 * tasks' CLOSID immediately changing to that of the new parent group.
 * Monitoring data for the group is unaffected by this operation.
 */
static void mongrp_reparent(struct rdtgroup *rdtgrp,
			    struct rdtgroup *new_prdtgrp,
			    cpumask_var_t cpus)
{}

static int rdtgroup_rename(struct kernfs_node *kn,
			   struct kernfs_node *new_parent, const char *new_name)
{}

static int rdtgroup_show_options(struct seq_file *seq, struct kernfs_root *kf)
{}

static struct kernfs_syscall_ops rdtgroup_kf_syscall_ops =;

static int rdtgroup_setup_root(struct rdt_fs_context *ctx)
{}

static void rdtgroup_destroy_root(void)
{}

static void __init rdtgroup_setup_default(void)
{}

static void domain_destroy_mon_state(struct rdt_mon_domain *d)
{}

void resctrl_offline_ctrl_domain(struct rdt_resource *r, struct rdt_ctrl_domain *d)
{}

void resctrl_offline_mon_domain(struct rdt_resource *r, struct rdt_mon_domain *d)
{}

static int domain_setup_mon_state(struct rdt_resource *r, struct rdt_mon_domain *d)
{}

int resctrl_online_ctrl_domain(struct rdt_resource *r, struct rdt_ctrl_domain *d)
{}

int resctrl_online_mon_domain(struct rdt_resource *r, struct rdt_mon_domain *d)
{}

void resctrl_online_cpu(unsigned int cpu)
{}

static void clear_childcpus(struct rdtgroup *r, unsigned int cpu)
{}

void resctrl_offline_cpu(unsigned int cpu)
{}

/*
 * rdtgroup_init - rdtgroup initialization
 *
 * Setup resctrl file system including set up root, create mount point,
 * register rdtgroup filesystem, and initialize files under root directory.
 *
 * Return: 0 on success or -errno
 */
int __init rdtgroup_init(void)
{}

void __exit rdtgroup_exit(void)
{}