linux/kernel/bpf/task_iter.c

// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (c) 2020 Facebook */

#include <linux/init.h>
#include <linux/namei.h>
#include <linux/pid_namespace.h>
#include <linux/fs.h>
#include <linux/fdtable.h>
#include <linux/filter.h>
#include <linux/bpf_mem_alloc.h>
#include <linux/btf_ids.h>
#include <linux/mm_types.h>
#include "mmap_unlock_work.h"

static const char * const iter_task_type_names[] =;

struct bpf_iter_seq_task_common {};

struct bpf_iter_seq_task_info {};

static struct task_struct *task_group_seq_get_next(struct bpf_iter_seq_task_common *common,
						   u32 *tid,
						   bool skip_if_dup_files)
{}

static struct task_struct *task_seq_get_next(struct bpf_iter_seq_task_common *common,
					     u32 *tid,
					     bool skip_if_dup_files)
{}

static void *task_seq_start(struct seq_file *seq, loff_t *pos)
{}

static void *task_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{}

struct bpf_iter__task {};

DEFINE_BPF_ITER_FUNC(task, struct bpf_iter_meta *meta, struct task_struct *task)

static int __task_seq_show(struct seq_file *seq, struct task_struct *task,
			   bool in_stop)
{}

static int task_seq_show(struct seq_file *seq, void *v)
{}

static void task_seq_stop(struct seq_file *seq, void *v)
{}

static int bpf_iter_attach_task(struct bpf_prog *prog,
				union bpf_iter_link_info *linfo,
				struct bpf_iter_aux_info *aux)
{}

static const struct seq_operations task_seq_ops =;

struct bpf_iter_seq_task_file_info {};

static struct file *
task_file_seq_get_next(struct bpf_iter_seq_task_file_info *info)
{}

static void *task_file_seq_start(struct seq_file *seq, loff_t *pos)
{}

static void *task_file_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{}

struct bpf_iter__task_file {};

DEFINE_BPF_ITER_FUNC(task_file, struct bpf_iter_meta *meta,
		     struct task_struct *task, u32 fd,
		     struct file *file)

static int __task_file_seq_show(struct seq_file *seq, struct file *file,
				bool in_stop)
{}

static int task_file_seq_show(struct seq_file *seq, void *v)
{}

static void task_file_seq_stop(struct seq_file *seq, void *v)
{}

static int init_seq_pidns(void *priv_data, struct bpf_iter_aux_info *aux)
{}

static void fini_seq_pidns(void *priv_data)
{}

static const struct seq_operations task_file_seq_ops =;

struct bpf_iter_seq_task_vma_info {};

enum bpf_task_vma_iter_find_op {};

static struct vm_area_struct *
task_vma_seq_get_next(struct bpf_iter_seq_task_vma_info *info)
{}

static void *task_vma_seq_start(struct seq_file *seq, loff_t *pos)
{}

static void *task_vma_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{}

struct bpf_iter__task_vma {};

DEFINE_BPF_ITER_FUNC(task_vma, struct bpf_iter_meta *meta,
		     struct task_struct *task, struct vm_area_struct *vma)

static int __task_vma_seq_show(struct seq_file *seq, bool in_stop)
{}

static int task_vma_seq_show(struct seq_file *seq, void *v)
{}

static void task_vma_seq_stop(struct seq_file *seq, void *v)
{}

static const struct seq_operations task_vma_seq_ops =;

static const struct bpf_iter_seq_info task_seq_info =;

static int bpf_iter_fill_link_info(const struct bpf_iter_aux_info *aux, struct bpf_link_info *info)
{}

static void bpf_iter_task_show_fdinfo(const struct bpf_iter_aux_info *aux, struct seq_file *seq)
{}

static struct bpf_iter_reg task_reg_info =;

static const struct bpf_iter_seq_info task_file_seq_info =;

static struct bpf_iter_reg task_file_reg_info =;

static const struct bpf_iter_seq_info task_vma_seq_info =;

static struct bpf_iter_reg task_vma_reg_info =;

BPF_CALL_5(bpf_find_vma, struct task_struct *, task, u64, start,
	   bpf_callback_t, callback_fn, void *, callback_ctx, u64, flags)
{}

const struct bpf_func_proto bpf_find_vma_proto =;

struct bpf_iter_task_vma_kern_data {};

struct bpf_iter_task_vma {} __attribute__((aligned));

/* Non-opaque version of bpf_iter_task_vma */
struct bpf_iter_task_vma_kern {} __attribute__((aligned));

__bpf_kfunc_start_defs();

__bpf_kfunc int bpf_iter_task_vma_new(struct bpf_iter_task_vma *it,
				      struct task_struct *task, u64 addr)
{}

__bpf_kfunc struct vm_area_struct *bpf_iter_task_vma_next(struct bpf_iter_task_vma *it)
{}

__bpf_kfunc void bpf_iter_task_vma_destroy(struct bpf_iter_task_vma *it)
{}

__bpf_kfunc_end_defs();

#ifdef CONFIG_CGROUPS

struct bpf_iter_css_task {} __attribute__((aligned));

struct bpf_iter_css_task_kern {} __attribute__((aligned));

__bpf_kfunc_start_defs();

__bpf_kfunc int bpf_iter_css_task_new(struct bpf_iter_css_task *it,
		struct cgroup_subsys_state *css, unsigned int flags)
{}

__bpf_kfunc struct task_struct *bpf_iter_css_task_next(struct bpf_iter_css_task *it)
{}

__bpf_kfunc void bpf_iter_css_task_destroy(struct bpf_iter_css_task *it)
{}

__bpf_kfunc_end_defs();

#endif /* CONFIG_CGROUPS */

struct bpf_iter_task {} __attribute__((aligned));

struct bpf_iter_task_kern {} __attribute__((aligned));

enum {};

__bpf_kfunc_start_defs();

__bpf_kfunc int bpf_iter_task_new(struct bpf_iter_task *it,
		struct task_struct *task__nullable, unsigned int flags)
{}

__bpf_kfunc struct task_struct *bpf_iter_task_next(struct bpf_iter_task *it)
{}

__bpf_kfunc void bpf_iter_task_destroy(struct bpf_iter_task *it)
{}

__bpf_kfunc_end_defs();

DEFINE_PER_CPU(struct mmap_unlock_irq_work, mmap_unlock_work);

static void do_mmap_read_unlock(struct irq_work *entry)
{}

static int __init task_iter_init(void)
{}
late_initcall(task_iter_init);