linux/net/unix/garbage.c

// SPDX-License-Identifier: GPL-2.0-or-later
/*
 * NET3:	Garbage Collector For AF_UNIX sockets
 *
 * Garbage Collector:
 *	Copyright (C) Barak A. Pearlmutter.
 *
 * Chopped about by Alan Cox 22/3/96 to make it fit the AF_UNIX socket problem.
 * If it doesn't work blame me, it worked when Barak sent it.
 *
 * Assumptions:
 *
 *  - object w/ a bit
 *  - free list
 *
 * Current optimizations:
 *
 *  - explicit stack instead of recursion
 *  - tail recurse on first born instead of immediate push/pop
 *  - we gather the stuff that should not be killed into tree
 *    and stack is just a path from root to the current pointer.
 *
 *  Future optimizations:
 *
 *  - don't just push entire root set; process in place
 *
 *  Fixes:
 *	Alan Cox	07 Sept	1997	Vmalloc internal stack as needed.
 *					Cope with changing max_files.
 *	Al Viro		11 Oct 1998
 *		Graph may have cycles. That is, we can send the descriptor
 *		of foo to bar and vice versa. Current code chokes on that.
 *		Fix: move SCM_RIGHTS ones into the separate list and then
 *		skb_free() them all instead of doing explicit fput's.
 *		Another problem: since fput() may block somebody may
 *		create a new unix_socket when we are in the middle of sweep
 *		phase. Fix: revert the logic wrt MARKED. Mark everything
 *		upon the beginning and unmark non-junk ones.
 *
 *		[12 Oct 1998] AAARGH! New code purges all SCM_RIGHTS
 *		sent to connect()'ed but still not accept()'ed sockets.
 *		Fixed. Old code had slightly different problem here:
 *		extra fput() in situation when we passed the descriptor via
 *		such socket and closed it (descriptor). That would happen on
 *		each unix_gc() until the accept(). Since the struct file in
 *		question would go to the free list and might be reused...
 *		That might be the reason of random oopses on filp_close()
 *		in unrelated processes.
 *
 *	AV		28 Feb 1999
 *		Kill the explicit allocation of stack. Now we keep the tree
 *		with root in dummy + pointer (gc_current) to one of the nodes.
 *		Stack is represented as path from gc_current to dummy. Unmark
 *		now means "add to tree". Push == "make it a son of gc_current".
 *		Pop == "move gc_current to parent". We keep only pointers to
 *		parents (->gc_tree).
 *	AV		1 Mar 1999
 *		Damn. Added missing check for ->dead in listen queues scanning.
 *
 *	Miklos Szeredi 25 Jun 2007
 *		Reimplement with a cycle collecting algorithm. This should
 *		solve several problems with the previous code, like being racy
 *		wrt receive and holding up unrelated socket operations.
 */

#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/socket.h>
#include <linux/un.h>
#include <linux/net.h>
#include <linux/fs.h>
#include <linux/skbuff.h>
#include <linux/netdevice.h>
#include <linux/file.h>
#include <linux/proc_fs.h>
#include <linux/mutex.h>
#include <linux/wait.h>

#include <net/sock.h>
#include <net/af_unix.h>
#include <net/scm.h>
#include <net/tcp_states.h>

struct unix_sock *unix_get_socket(struct file *filp)
{}

static struct unix_vertex *unix_edge_successor(struct unix_edge *edge)
{}

static bool unix_graph_maybe_cyclic;
static bool unix_graph_grouped;

static void unix_update_graph(struct unix_vertex *vertex)
{}

static LIST_HEAD(unix_unvisited_vertices);

enum unix_vertex_index {};

static unsigned long unix_vertex_unvisited_index =;

static void unix_add_edge(struct scm_fp_list *fpl, struct unix_edge *edge)
{}

static void unix_del_edge(struct scm_fp_list *fpl, struct unix_edge *edge)
{}

static void unix_free_vertices(struct scm_fp_list *fpl)
{}

static DEFINE_SPINLOCK(unix_gc_lock);
unsigned int unix_tot_inflight;

void unix_add_edges(struct scm_fp_list *fpl, struct unix_sock *receiver)
{}

void unix_del_edges(struct scm_fp_list *fpl)
{}

void unix_update_edges(struct unix_sock *receiver)
{}

int unix_prepare_fpl(struct scm_fp_list *fpl)
{}

void unix_destroy_fpl(struct scm_fp_list *fpl)
{}

static bool unix_vertex_dead(struct unix_vertex *vertex)
{}

static void unix_collect_queue(struct unix_sock *u, struct sk_buff_head *hitlist)
{}

static void unix_collect_skb(struct list_head *scc, struct sk_buff_head *hitlist)
{}

static bool unix_scc_cyclic(struct list_head *scc)
{}

static LIST_HEAD(unix_visited_vertices);
static unsigned long unix_vertex_grouped_index =;

static void __unix_walk_scc(struct unix_vertex *vertex, unsigned long *last_index,
			    struct sk_buff_head *hitlist)
{}

static void unix_walk_scc(struct sk_buff_head *hitlist)
{}

static void unix_walk_scc_fast(struct sk_buff_head *hitlist)
{}

static bool gc_in_progress;

static void __unix_gc(struct work_struct *work)
{}

static DECLARE_WORK(unix_gc_work, __unix_gc);

void unix_gc(void)
{}

#define UNIX_INFLIGHT_TRIGGER_GC
#define UNIX_INFLIGHT_SANE_USER

void wait_for_unix_gc(struct scm_fp_list *fpl)
{}