linux/net/ipv4/inet_fragment.c

// SPDX-License-Identifier: GPL-2.0-or-later
/*
 * inet fragments management
 *
 * 		Authors:	Pavel Emelyanov <[email protected]>
 *				Started as consolidation of ipv4/ip_fragment.c,
 *				ipv6/reassembly. and ipv6 nf conntrack reassembly
 */

#include <linux/list.h>
#include <linux/spinlock.h>
#include <linux/module.h>
#include <linux/timer.h>
#include <linux/mm.h>
#include <linux/random.h>
#include <linux/skbuff.h>
#include <linux/rtnetlink.h>
#include <linux/slab.h>
#include <linux/rhashtable.h>

#include <net/sock.h>
#include <net/inet_frag.h>
#include <net/inet_ecn.h>
#include <net/ip.h>
#include <net/ipv6.h>

#include "../core/sock_destructor.h"

/* Use skb->cb to track consecutive/adjacent fragments coming at
 * the end of the queue. Nodes in the rb-tree queue will
 * contain "runs" of one or more adjacent fragments.
 *
 * Invariants:
 * - next_frag is NULL at the tail of a "run";
 * - the head of a "run" has the sum of all fragment lengths in frag_run_len.
 */
struct ipfrag_skb_cb {};

#define FRAG_CB(skb)

static void fragcb_clear(struct sk_buff *skb)
{}

/* Append skb to the last "run". */
static void fragrun_append_to_last(struct inet_frag_queue *q,
				   struct sk_buff *skb)
{}

/* Create a new "run" with the skb. */
static void fragrun_create(struct inet_frag_queue *q, struct sk_buff *skb)
{}

/* Given the OR values of all fragments, apply RFC 3168 5.3 requirements
 * Value : 0xff if frame should be dropped.
 *         0 or INET_ECN_CE value, to be ORed in to final iph->tos field
 */
const u8 ip_frag_ecn_table[16] =;
EXPORT_SYMBOL();

int inet_frags_init(struct inet_frags *f)
{}
EXPORT_SYMBOL();

void inet_frags_fini(struct inet_frags *f)
{}
EXPORT_SYMBOL();

/* called from rhashtable_free_and_destroy() at netns_frags dismantle */
static void inet_frags_free_cb(void *ptr, void *arg)
{}

static LLIST_HEAD(fqdir_free_list);

static void fqdir_free_fn(struct work_struct *work)
{}

static DECLARE_DELAYED_WORK(fqdir_free_work, fqdir_free_fn);

static void fqdir_work_fn(struct work_struct *work)
{}

int fqdir_init(struct fqdir **fqdirp, struct inet_frags *f, struct net *net)
{}
EXPORT_SYMBOL();

static struct workqueue_struct *inet_frag_wq;

static int __init inet_frag_wq_init(void)
{}

pure_initcall(inet_frag_wq_init);

void fqdir_exit(struct fqdir *fqdir)
{}
EXPORT_SYMBOL();

void inet_frag_kill(struct inet_frag_queue *fq)
{}
EXPORT_SYMBOL();

static void inet_frag_destroy_rcu(struct rcu_head *head)
{}

unsigned int inet_frag_rbtree_purge(struct rb_root *root,
				    enum skb_drop_reason reason)
{}
EXPORT_SYMBOL();

void inet_frag_destroy(struct inet_frag_queue *q)
{}
EXPORT_SYMBOL();

static struct inet_frag_queue *inet_frag_alloc(struct fqdir *fqdir,
					       struct inet_frags *f,
					       void *arg)
{}

static struct inet_frag_queue *inet_frag_create(struct fqdir *fqdir,
						void *arg,
						struct inet_frag_queue **prev)
{}

/* TODO : call from rcu_read_lock() and no longer use refcount_inc_not_zero() */
struct inet_frag_queue *inet_frag_find(struct fqdir *fqdir, void *key)
{}
EXPORT_SYMBOL();

int inet_frag_queue_insert(struct inet_frag_queue *q, struct sk_buff *skb,
			   int offset, int end)
{}
EXPORT_SYMBOL();

void *inet_frag_reasm_prepare(struct inet_frag_queue *q, struct sk_buff *skb,
			      struct sk_buff *parent)
{}
EXPORT_SYMBOL();

void inet_frag_reasm_finish(struct inet_frag_queue *q, struct sk_buff *head,
			    void *reasm_data, bool try_coalesce)
{}
EXPORT_SYMBOL();

struct sk_buff *inet_frag_pull_head(struct inet_frag_queue *q)
{}
EXPORT_SYMBOL();