linux/kernel/bpf/cgroup_iter.c

// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (c) 2022 Google */
#include <linux/bpf.h>
#include <linux/btf_ids.h>
#include <linux/cgroup.h>
#include <linux/kernel.h>
#include <linux/seq_file.h>

#include "../cgroup/cgroup-internal.h"  /* cgroup_mutex and cgroup_is_dead */

/* cgroup_iter provides four modes of traversal to the cgroup hierarchy.
 *
 *  1. Walk the descendants of a cgroup in pre-order.
 *  2. Walk the descendants of a cgroup in post-order.
 *  3. Walk the ancestors of a cgroup.
 *  4. Show the given cgroup only.
 *
 * For walking descendants, cgroup_iter can walk in either pre-order or
 * post-order. For walking ancestors, the iter walks up from a cgroup to
 * the root.
 *
 * The iter program can terminate the walk early by returning 1. Walk
 * continues if prog returns 0.
 *
 * The prog can check (seq->num == 0) to determine whether this is
 * the first element. The prog may also be passed a NULL cgroup,
 * which means the walk has completed and the prog has a chance to
 * do post-processing, such as outputting an epilogue.
 *
 * Note: the iter_prog is called with cgroup_mutex held.
 *
 * Currently only one session is supported, which means, depending on the
 * volume of data bpf program intends to send to user space, the number
 * of cgroups that can be walked is limited. For example, given the current
 * buffer size is 8 * PAGE_SIZE, if the program sends 64B data for each
 * cgroup, assuming PAGE_SIZE is 4kb, the total number of cgroups that can
 * be walked is 512. This is a limitation of cgroup_iter. If the output data
 * is larger than the kernel buffer size, after all data in the kernel buffer
 * is consumed by user space, the subsequent read() syscall will signal
 * EOPNOTSUPP. In order to work around, the user may have to update their
 * program to reduce the volume of data sent to output. For example, skip
 * some uninteresting cgroups.
 */

struct bpf_iter__cgroup {};

struct cgroup_iter_priv {};

static void *cgroup_iter_seq_start(struct seq_file *seq, loff_t *pos)
{}

static int __cgroup_iter_seq_show(struct seq_file *seq,
				  struct cgroup_subsys_state *css, int in_stop);

static void cgroup_iter_seq_stop(struct seq_file *seq, void *v)
{}

static void *cgroup_iter_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{}

static int __cgroup_iter_seq_show(struct seq_file *seq,
				  struct cgroup_subsys_state *css, int in_stop)
{}

static int cgroup_iter_seq_show(struct seq_file *seq, void *v)
{}

static const struct seq_operations cgroup_iter_seq_ops =;

BTF_ID_LIST_GLOBAL_SINGLE(bpf_cgroup_btf_id, struct, cgroup)

static int cgroup_iter_seq_init(void *priv, struct bpf_iter_aux_info *aux)
{}

static void cgroup_iter_seq_fini(void *priv)
{}

static const struct bpf_iter_seq_info cgroup_iter_seq_info =;

static int bpf_iter_attach_cgroup(struct bpf_prog *prog,
				  union bpf_iter_link_info *linfo,
				  struct bpf_iter_aux_info *aux)
{}

static void bpf_iter_detach_cgroup(struct bpf_iter_aux_info *aux)
{}

static void bpf_iter_cgroup_show_fdinfo(const struct bpf_iter_aux_info *aux,
					struct seq_file *seq)
{}

static int bpf_iter_cgroup_fill_link_info(const struct bpf_iter_aux_info *aux,
					  struct bpf_link_info *info)
{}

DEFINE_BPF_ITER_FUNC(cgroup, struct bpf_iter_meta *meta,
		     struct cgroup *cgroup)

static struct bpf_iter_reg bpf_cgroup_reg_info =;

static int __init bpf_cgroup_iter_init(void)
{}

late_initcall(bpf_cgroup_iter_init);

struct bpf_iter_css {} __attribute__((aligned));

struct bpf_iter_css_kern {} __attribute__((aligned));

__bpf_kfunc_start_defs();

__bpf_kfunc int bpf_iter_css_new(struct bpf_iter_css *it,
		struct cgroup_subsys_state *start, unsigned int flags)
{}

__bpf_kfunc struct cgroup_subsys_state *bpf_iter_css_next(struct bpf_iter_css *it)
{}

__bpf_kfunc void bpf_iter_css_destroy(struct bpf_iter_css *it)
{}

__bpf_kfunc_end_defs();