linux/kernel/audit_tree.c

// SPDX-License-Identifier: GPL-2.0
#include "audit.h"
#include <linux/fsnotify_backend.h>
#include <linux/namei.h>
#include <linux/mount.h>
#include <linux/kthread.h>
#include <linux/refcount.h>
#include <linux/slab.h>

struct audit_tree;
struct audit_chunk;

struct audit_tree {};

struct audit_chunk {};

struct audit_tree_mark {};

static LIST_HEAD(tree_list);
static LIST_HEAD(prune_list);
static struct task_struct *prune_thread;

/*
 * One struct chunk is attached to each inode of interest through
 * audit_tree_mark (fsnotify mark). We replace struct chunk on tagging /
 * untagging, the mark is stable as long as there is chunk attached. The
 * association between mark and chunk is protected by hash_lock and
 * audit_tree_group->mark_mutex. Thus as long as we hold
 * audit_tree_group->mark_mutex and check that the mark is alive by
 * FSNOTIFY_MARK_FLAG_ATTACHED flag check, we are sure the mark points to
 * the current chunk.
 *
 * Rules have pointer to struct audit_tree.
 * Rules have struct list_head rlist forming a list of rules over
 * the same tree.
 * References to struct chunk are collected at audit_inode{,_child}()
 * time and used in AUDIT_TREE rule matching.
 * These references are dropped at the same time we are calling
 * audit_free_names(), etc.
 *
 * Cyclic lists galore:
 * tree.chunks anchors chunk.owners[].list			hash_lock
 * tree.rules anchors rule.rlist				audit_filter_mutex
 * chunk.trees anchors tree.same_root				hash_lock
 * chunk.hash is a hash with middle bits of watch.inode as
 * a hash function.						RCU, hash_lock
 *
 * tree is refcounted; one reference for "some rules on rules_list refer to
 * it", one for each chunk with pointer to it.
 *
 * chunk is refcounted by embedded .refs. Mark associated with the chunk holds
 * one chunk reference. This reference is dropped either when a mark is going
 * to be freed (corresponding inode goes away) or when chunk attached to the
 * mark gets replaced. This reference must be dropped using
 * audit_mark_put_chunk() to make sure the reference is dropped only after RCU
 * grace period as it protects RCU readers of the hash table.
 *
 * node.index allows to get from node.list to containing chunk.
 * MSB of that sucker is stolen to mark taggings that we might have to
 * revert - several operations have very unpleasant cleanup logics and
 * that makes a difference.  Some.
 */

static struct fsnotify_group *audit_tree_group __ro_after_init;
static struct kmem_cache *audit_tree_mark_cachep __ro_after_init;

static struct audit_tree *alloc_tree(const char *s)
{}

static inline void get_tree(struct audit_tree *tree)
{}

static inline void put_tree(struct audit_tree *tree)
{}

/* to avoid bringing the entire thing in audit.h */
const char *audit_tree_path(struct audit_tree *tree)
{}

static void free_chunk(struct audit_chunk *chunk)
{}

void audit_put_chunk(struct audit_chunk *chunk)
{}

static void __put_chunk(struct rcu_head *rcu)
{}

/*
 * Drop reference to the chunk that was held by the mark. This is the reference
 * that gets dropped after we've removed the chunk from the hash table and we
 * use it to make sure chunk cannot be freed before RCU grace period expires.
 */
static void audit_mark_put_chunk(struct audit_chunk *chunk)
{}

static inline struct audit_tree_mark *audit_mark(struct fsnotify_mark *mark)
{}

static struct audit_chunk *mark_chunk(struct fsnotify_mark *mark)
{}

static void audit_tree_destroy_watch(struct fsnotify_mark *mark)
{}

static struct fsnotify_mark *alloc_mark(void)
{}

static struct audit_chunk *alloc_chunk(int count)
{}

enum {};
static struct list_head chunk_hash_heads[HASH_SIZE];
static __cacheline_aligned_in_smp DEFINE_SPINLOCK(hash_lock);

/* Function to return search key in our hash from inode. */
static unsigned long inode_to_key(const struct inode *inode)
{}

static inline struct list_head *chunk_hash(unsigned long key)
{}

/* hash_lock & mark->group->mark_mutex is held by caller */
static void insert_hash(struct audit_chunk *chunk)
{}

/* called under rcu_read_lock */
struct audit_chunk *audit_tree_lookup(const struct inode *inode)
{}

bool audit_tree_match(struct audit_chunk *chunk, struct audit_tree *tree)
{}

/* tagging and untagging inodes with trees */

static struct audit_chunk *find_chunk(struct audit_node *p)
{}

static void replace_mark_chunk(struct fsnotify_mark *mark,
			       struct audit_chunk *chunk)
{}

static void replace_chunk(struct audit_chunk *new, struct audit_chunk *old)
{}

static void remove_chunk_node(struct audit_chunk *chunk, struct audit_node *p)
{}

static int chunk_count_trees(struct audit_chunk *chunk)
{}

static void untag_chunk(struct audit_chunk *chunk, struct fsnotify_mark *mark)
{}

/* Call with group->mark_mutex held, releases it */
static int create_chunk(struct inode *inode, struct audit_tree *tree)
{}

/* the first tagged inode becomes root of tree */
static int tag_chunk(struct inode *inode, struct audit_tree *tree)
{}

static void audit_tree_log_remove_rule(struct audit_context *context,
				       struct audit_krule *rule)
{}

static void kill_rules(struct audit_context *context, struct audit_tree *tree)
{}

/*
 * Remove tree from chunks. If 'tagged' is set, remove tree only from tagged
 * chunks. The function expects tagged chunks are all at the beginning of the
 * chunks list.
 */
static void prune_tree_chunks(struct audit_tree *victim, bool tagged)
{}

/*
 * finish killing struct audit_tree
 */
static void prune_one(struct audit_tree *victim)
{}

/* trim the uncommitted chunks from tree */

static void trim_marked(struct audit_tree *tree)
{}

static void audit_schedule_prune(void);

/* called with audit_filter_mutex */
int audit_remove_tree_rule(struct audit_krule *rule)
{}

static int compare_root(struct vfsmount *mnt, void *arg)
{}

void audit_trim_trees(void)
{}

int audit_make_tree(struct audit_krule *rule, char *pathname, u32 op)
{}

void audit_put_tree(struct audit_tree *tree)
{}

static int tag_mount(struct vfsmount *mnt, void *arg)
{}

/*
 * That gets run when evict_chunk() ends up needing to kill audit_tree.
 * Runs from a separate thread.
 */
static int prune_tree_thread(void *unused)
{}

static int audit_launch_prune(void)
{}

/* called with audit_filter_mutex */
int audit_add_tree_rule(struct audit_krule *rule)
{}

int audit_tag_tree(char *old, char *new)
{}


static void audit_schedule_prune(void)
{}

/*
 * ... and that one is done if evict_chunk() decides to delay until the end
 * of syscall.  Runs synchronously.
 */
void audit_kill_trees(struct audit_context *context)
{}

/*
 *  Here comes the stuff asynchronous to auditctl operations
 */

static void evict_chunk(struct audit_chunk *chunk)
{}

static int audit_tree_handle_event(struct fsnotify_mark *mark, u32 mask,
				   struct inode *inode, struct inode *dir,
				   const struct qstr *file_name, u32 cookie)
{}

static void audit_tree_freeing_mark(struct fsnotify_mark *mark,
				    struct fsnotify_group *group)
{}

static const struct fsnotify_ops audit_tree_ops =;

static int __init audit_tree_init(void)
{}
__initcall(audit_tree_init);