linux/net/netfilter/nf_conntrack_core.c

// SPDX-License-Identifier: GPL-2.0-only
/* Connection state tracking for netfilter.  This is separated from,
   but required by, the NAT layer; it can also be used by an iptables
   extension. */

/* (C) 1999-2001 Paul `Rusty' Russell
 * (C) 2002-2006 Netfilter Core Team <[email protected]>
 * (C) 2003,2004 USAGI/WIDE Project <http://www.linux-ipv6.org>
 * (C) 2005-2012 Patrick McHardy <[email protected]>
 */

#define pr_fmt(fmt)

#include <linux/types.h>
#include <linux/netfilter.h>
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/skbuff.h>
#include <linux/proc_fs.h>
#include <linux/vmalloc.h>
#include <linux/stddef.h>
#include <linux/slab.h>
#include <linux/random.h>
#include <linux/siphash.h>
#include <linux/err.h>
#include <linux/percpu.h>
#include <linux/moduleparam.h>
#include <linux/notifier.h>
#include <linux/kernel.h>
#include <linux/netdevice.h>
#include <linux/socket.h>
#include <linux/mm.h>
#include <linux/nsproxy.h>
#include <linux/rculist_nulls.h>

#include <net/netfilter/nf_conntrack.h>
#include <net/netfilter/nf_conntrack_bpf.h>
#include <net/netfilter/nf_conntrack_l4proto.h>
#include <net/netfilter/nf_conntrack_expect.h>
#include <net/netfilter/nf_conntrack_helper.h>
#include <net/netfilter/nf_conntrack_core.h>
#include <net/netfilter/nf_conntrack_extend.h>
#include <net/netfilter/nf_conntrack_acct.h>
#include <net/netfilter/nf_conntrack_ecache.h>
#include <net/netfilter/nf_conntrack_zones.h>
#include <net/netfilter/nf_conntrack_timestamp.h>
#include <net/netfilter/nf_conntrack_timeout.h>
#include <net/netfilter/nf_conntrack_labels.h>
#include <net/netfilter/nf_conntrack_synproxy.h>
#include <net/netfilter/nf_nat.h>
#include <net/netfilter/nf_nat_helper.h>
#include <net/netns/hash.h>
#include <net/ip.h>

#include "nf_internals.h"

__cacheline_aligned_in_smp spinlock_t nf_conntrack_locks[CONNTRACK_LOCKS];
EXPORT_SYMBOL_GPL();

__cacheline_aligned_in_smp DEFINE_SPINLOCK();
EXPORT_SYMBOL_GPL();

struct hlist_nulls_head *nf_conntrack_hash __read_mostly;
EXPORT_SYMBOL_GPL();

struct conntrack_gc_work {};

static __read_mostly struct kmem_cache *nf_conntrack_cachep;
static DEFINE_SPINLOCK(nf_conntrack_locks_all_lock);
static __read_mostly bool nf_conntrack_locks_all;

/* serialize hash resizes and nf_ct_iterate_cleanup */
static DEFINE_MUTEX(nf_conntrack_mutex);

#define GC_SCAN_INTERVAL_MAX
#define GC_SCAN_INTERVAL_MIN

/* clamp timeouts to this value (TCP unacked) */
#define GC_SCAN_INTERVAL_CLAMP

/* Initial bias pretending we have 100 entries at the upper bound so we don't
 * wakeup often just because we have three entries with a 1s timeout while still
 * allowing non-idle machines to wakeup more often when needed.
 */
#define GC_SCAN_INITIAL_COUNT
#define GC_SCAN_INTERVAL_INIT

#define GC_SCAN_MAX_DURATION
#define GC_SCAN_EXPIRED_MAX

#define MIN_CHAINLEN
#define MAX_CHAINLEN

static struct conntrack_gc_work conntrack_gc_work;

void nf_conntrack_lock(spinlock_t *lock) __acquires(lock)
{}
EXPORT_SYMBOL_GPL();

static void nf_conntrack_double_unlock(unsigned int h1, unsigned int h2)
{}

/* return true if we need to recompute hashes (in case hash table was resized) */
static bool nf_conntrack_double_lock(struct net *net, unsigned int h1,
				     unsigned int h2, unsigned int sequence)
{}

static void nf_conntrack_all_lock(void)
	__acquires(&nf_conntrack_locks_all_lock)
{}

static void nf_conntrack_all_unlock(void)
	__releases(&nf_conntrack_locks_all_lock)
{}

unsigned int nf_conntrack_htable_size __read_mostly;
EXPORT_SYMBOL_GPL();

unsigned int nf_conntrack_max __read_mostly;
EXPORT_SYMBOL_GPL();
seqcount_spinlock_t nf_conntrack_generation __read_mostly;
static siphash_aligned_key_t nf_conntrack_hash_rnd;

static u32 hash_conntrack_raw(const struct nf_conntrack_tuple *tuple,
			      unsigned int zoneid,
			      const struct net *net)
{}

static u32 scale_hash(u32 hash)
{}

static u32 __hash_conntrack(const struct net *net,
			    const struct nf_conntrack_tuple *tuple,
			    unsigned int zoneid,
			    unsigned int size)
{}

static u32 hash_conntrack(const struct net *net,
			  const struct nf_conntrack_tuple *tuple,
			  unsigned int zoneid)
{}

static bool nf_ct_get_tuple_ports(const struct sk_buff *skb,
				  unsigned int dataoff,
				  struct nf_conntrack_tuple *tuple)
{}

static bool
nf_ct_get_tuple(const struct sk_buff *skb,
		unsigned int nhoff,
		unsigned int dataoff,
		u_int16_t l3num,
		u_int8_t protonum,
		struct net *net,
		struct nf_conntrack_tuple *tuple)
{}

static int ipv4_get_l4proto(const struct sk_buff *skb, unsigned int nhoff,
			    u_int8_t *protonum)
{}

#if IS_ENABLED(CONFIG_IPV6)
static int ipv6_get_l4proto(const struct sk_buff *skb, unsigned int nhoff,
			    u8 *protonum)
{}
#endif

static int get_l4proto(const struct sk_buff *skb,
		       unsigned int nhoff, u8 pf, u8 *l4num)
{}

bool nf_ct_get_tuplepr(const struct sk_buff *skb, unsigned int nhoff,
		       u_int16_t l3num,
		       struct net *net, struct nf_conntrack_tuple *tuple)
{}
EXPORT_SYMBOL_GPL();

bool
nf_ct_invert_tuple(struct nf_conntrack_tuple *inverse,
		   const struct nf_conntrack_tuple *orig)
{}
EXPORT_SYMBOL_GPL();

/* Generate a almost-unique pseudo-id for a given conntrack.
 *
 * intentionally doesn't re-use any of the seeds used for hash
 * table location, we assume id gets exposed to userspace.
 *
 * Following nf_conn items do not change throughout lifetime
 * of the nf_conn:
 *
 * 1. nf_conn address
 * 2. nf_conn->master address (normally NULL)
 * 3. the associated net namespace
 * 4. the original direction tuple
 */
u32 nf_ct_get_id(const struct nf_conn *ct)
{}
EXPORT_SYMBOL_GPL();

static void
clean_from_lists(struct nf_conn *ct)
{}

#define NFCT_ALIGN(len)

/* Released via nf_ct_destroy() */
struct nf_conn *nf_ct_tmpl_alloc(struct net *net,
				 const struct nf_conntrack_zone *zone,
				 gfp_t flags)
{}
EXPORT_SYMBOL_GPL();

void nf_ct_tmpl_free(struct nf_conn *tmpl)
{}
EXPORT_SYMBOL_GPL();

static void destroy_gre_conntrack(struct nf_conn *ct)
{}

void nf_ct_destroy(struct nf_conntrack *nfct)
{}
EXPORT_SYMBOL();

static void __nf_ct_delete_from_lists(struct nf_conn *ct)
{}

static void nf_ct_delete_from_lists(struct nf_conn *ct)
{}

static void nf_ct_add_to_ecache_list(struct nf_conn *ct)
{}

bool nf_ct_delete(struct nf_conn *ct, u32 portid, int report)
{}
EXPORT_SYMBOL_GPL();

static inline bool
nf_ct_key_equal(struct nf_conntrack_tuple_hash *h,
		const struct nf_conntrack_tuple *tuple,
		const struct nf_conntrack_zone *zone,
		const struct net *net)
{}

static inline bool
nf_ct_match(const struct nf_conn *ct1, const struct nf_conn *ct2)
{}

/* caller must hold rcu readlock and none of the nf_conntrack_locks */
static void nf_ct_gc_expired(struct nf_conn *ct)
{}

/*
 * Warning :
 * - Caller must take a reference on returned object
 *   and recheck nf_ct_tuple_equal(tuple, &h->tuple)
 */
static struct nf_conntrack_tuple_hash *
____nf_conntrack_find(struct net *net, const struct nf_conntrack_zone *zone,
		      const struct nf_conntrack_tuple *tuple, u32 hash)
{}

/* Find a connection corresponding to a tuple. */
static struct nf_conntrack_tuple_hash *
__nf_conntrack_find_get(struct net *net, const struct nf_conntrack_zone *zone,
			const struct nf_conntrack_tuple *tuple, u32 hash)
{}

struct nf_conntrack_tuple_hash *
nf_conntrack_find_get(struct net *net, const struct nf_conntrack_zone *zone,
		      const struct nf_conntrack_tuple *tuple)
{}
EXPORT_SYMBOL_GPL();

static void __nf_conntrack_hash_insert(struct nf_conn *ct,
				       unsigned int hash,
				       unsigned int reply_hash)
{}

static bool nf_ct_ext_valid_pre(const struct nf_ct_ext *ext)
{}

static bool nf_ct_ext_valid_post(struct nf_ct_ext *ext)
{}

int
nf_conntrack_hash_check_insert(struct nf_conn *ct)
{}
EXPORT_SYMBOL_GPL();

void nf_ct_acct_add(struct nf_conn *ct, u32 dir, unsigned int packets,
		    unsigned int bytes)
{}
EXPORT_SYMBOL_GPL();

static void nf_ct_acct_merge(struct nf_conn *ct, enum ip_conntrack_info ctinfo,
			     const struct nf_conn *loser_ct)
{}

static void __nf_conntrack_insert_prepare(struct nf_conn *ct)
{}

/* caller must hold locks to prevent concurrent changes */
static int __nf_ct_resolve_clash(struct sk_buff *skb,
				 struct nf_conntrack_tuple_hash *h)
{}

/**
 * nf_ct_resolve_clash_harder - attempt to insert clashing conntrack entry
 *
 * @skb: skb that causes the collision
 * @repl_idx: hash slot for reply direction
 *
 * Called when origin or reply direction had a clash.
 * The skb can be handled without packet drop provided the reply direction
 * is unique or there the existing entry has the identical tuple in both
 * directions.
 *
 * Caller must hold conntrack table locks to prevent concurrent updates.
 *
 * Returns NF_DROP if the clash could not be handled.
 */
static int nf_ct_resolve_clash_harder(struct sk_buff *skb, u32 repl_idx)
{}

/**
 * nf_ct_resolve_clash - attempt to handle clash without packet drop
 *
 * @skb: skb that causes the clash
 * @h: tuplehash of the clashing entry already in table
 * @reply_hash: hash slot for reply direction
 *
 * A conntrack entry can be inserted to the connection tracking table
 * if there is no existing entry with an identical tuple.
 *
 * If there is one, @skb (and the associated, unconfirmed conntrack) has
 * to be dropped.  In case @skb is retransmitted, next conntrack lookup
 * will find the already-existing entry.
 *
 * The major problem with such packet drop is the extra delay added by
 * the packet loss -- it will take some time for a retransmit to occur
 * (or the sender to time out when waiting for a reply).
 *
 * This function attempts to handle the situation without packet drop.
 *
 * If @skb has no NAT transformation or if the colliding entries are
 * exactly the same, only the to-be-confirmed conntrack entry is discarded
 * and @skb is associated with the conntrack entry already in the table.
 *
 * Failing that, the new, unconfirmed conntrack is still added to the table
 * provided that the collision only occurs in the ORIGINAL direction.
 * The new entry will be added only in the non-clashing REPLY direction,
 * so packets in the ORIGINAL direction will continue to match the existing
 * entry.  The new entry will also have a fixed timeout so it expires --
 * due to the collision, it will only see reply traffic.
 *
 * Returns NF_DROP if the clash could not be resolved.
 */
static __cold noinline int
nf_ct_resolve_clash(struct sk_buff *skb, struct nf_conntrack_tuple_hash *h,
		    u32 reply_hash)
{}

/* Confirm a connection given skb; places it in hash table */
int
__nf_conntrack_confirm(struct sk_buff *skb)
{}
EXPORT_SYMBOL_GPL();

/* Returns true if a connection corresponds to the tuple (required
   for NAT). */
int
nf_conntrack_tuple_taken(const struct nf_conntrack_tuple *tuple,
			 const struct nf_conn *ignored_conntrack)
{}
EXPORT_SYMBOL_GPL();

#define NF_CT_EVICTION_RANGE

/* There's a small race here where we may free a just-assured
   connection.  Too bad: we're in trouble anyway. */
static unsigned int early_drop_list(struct net *net,
				    struct hlist_nulls_head *head)
{}

static noinline int early_drop(struct net *net, unsigned int hash)
{}

static bool gc_worker_skip_ct(const struct nf_conn *ct)
{}

static bool gc_worker_can_early_drop(const struct nf_conn *ct)
{}

static void gc_worker(struct work_struct *work)
{}

static void conntrack_gc_work_init(struct conntrack_gc_work *gc_work)
{}

static struct nf_conn *
__nf_conntrack_alloc(struct net *net,
		     const struct nf_conntrack_zone *zone,
		     const struct nf_conntrack_tuple *orig,
		     const struct nf_conntrack_tuple *repl,
		     gfp_t gfp, u32 hash)
{}

struct nf_conn *nf_conntrack_alloc(struct net *net,
				   const struct nf_conntrack_zone *zone,
				   const struct nf_conntrack_tuple *orig,
				   const struct nf_conntrack_tuple *repl,
				   gfp_t gfp)
{}
EXPORT_SYMBOL_GPL();

void nf_conntrack_free(struct nf_conn *ct)
{}
EXPORT_SYMBOL_GPL();


/* Allocate a new conntrack: we return -ENOMEM if classification
   failed due to stress.  Otherwise it really is unclassifiable. */
static noinline struct nf_conntrack_tuple_hash *
init_conntrack(struct net *net, struct nf_conn *tmpl,
	       const struct nf_conntrack_tuple *tuple,
	       struct sk_buff *skb,
	       unsigned int dataoff, u32 hash)
{}

/* On success, returns 0, sets skb->_nfct | ctinfo */
static int
resolve_normal_ct(struct nf_conn *tmpl,
		  struct sk_buff *skb,
		  unsigned int dataoff,
		  u_int8_t protonum,
		  const struct nf_hook_state *state)
{}

/*
 * icmp packets need special treatment to handle error messages that are
 * related to a connection.
 *
 * Callers need to check if skb has a conntrack assigned when this
 * helper returns; in such case skb belongs to an already known connection.
 */
static unsigned int __cold
nf_conntrack_handle_icmp(struct nf_conn *tmpl,
			 struct sk_buff *skb,
			 unsigned int dataoff,
			 u8 protonum,
			 const struct nf_hook_state *state)
{}

static int generic_packet(struct nf_conn *ct, struct sk_buff *skb,
			  enum ip_conntrack_info ctinfo)
{}

/* Returns verdict for packet, or -1 for invalid. */
static int nf_conntrack_handle_packet(struct nf_conn *ct,
				      struct sk_buff *skb,
				      unsigned int dataoff,
				      enum ip_conntrack_info ctinfo,
				      const struct nf_hook_state *state)
{}

unsigned int
nf_conntrack_in(struct sk_buff *skb, const struct nf_hook_state *state)
{}
EXPORT_SYMBOL_GPL();

/* Refresh conntrack for this many jiffies and do accounting if do_acct is 1 */
void __nf_ct_refresh_acct(struct nf_conn *ct,
			  enum ip_conntrack_info ctinfo,
			  const struct sk_buff *skb,
			  u32 extra_jiffies,
			  bool do_acct)
{}
EXPORT_SYMBOL_GPL();

bool nf_ct_kill_acct(struct nf_conn *ct,
		     enum ip_conntrack_info ctinfo,
		     const struct sk_buff *skb)
{}
EXPORT_SYMBOL_GPL();

#if IS_ENABLED(CONFIG_NF_CT_NETLINK)

#include <linux/netfilter/nfnetlink.h>
#include <linux/netfilter/nfnetlink_conntrack.h>
#include <linux/mutex.h>

/* Generic function for tcp/udp/sctp/dccp and alike. */
int nf_ct_port_tuple_to_nlattr(struct sk_buff *skb,
			       const struct nf_conntrack_tuple *tuple)
{}
EXPORT_SYMBOL_GPL();

const struct nla_policy nf_ct_port_nla_policy[CTA_PROTO_MAX+1] =;
EXPORT_SYMBOL_GPL();

int nf_ct_port_nlattr_to_tuple(struct nlattr *tb[],
			       struct nf_conntrack_tuple *t,
			       u_int32_t flags)
{}
EXPORT_SYMBOL_GPL();

unsigned int nf_ct_port_nlattr_tuple_size(void)
{}
EXPORT_SYMBOL_GPL();
#endif

/* Used by ipt_REJECT and ip6t_REJECT. */
static void nf_conntrack_attach(struct sk_buff *nskb, const struct sk_buff *skb)
{}

static int __nf_conntrack_update(struct net *net, struct sk_buff *skb,
				 struct nf_conn *ct,
				 enum ip_conntrack_info ctinfo)
{}

/* This packet is coming from userspace via nf_queue, complete the packet
 * processing after the helper invocation in nf_confirm().
 */
static int nf_confirm_cthelper(struct sk_buff *skb, struct nf_conn *ct,
			       enum ip_conntrack_info ctinfo)
{}

static int nf_conntrack_update(struct net *net, struct sk_buff *skb)
{}

static bool nf_conntrack_get_tuple_skb(struct nf_conntrack_tuple *dst_tuple,
				       const struct sk_buff *skb)
{}

/* Bring out ya dead! */
static struct nf_conn *
get_next_corpse(int (*iter)(struct nf_conn *i, void *data),
		const struct nf_ct_iter_data *iter_data, unsigned int *bucket)
{}

static void nf_ct_iterate_cleanup(int (*iter)(struct nf_conn *i, void *data),
				  const struct nf_ct_iter_data *iter_data)
{}

void nf_ct_iterate_cleanup_net(int (*iter)(struct nf_conn *i, void *data),
			       const struct nf_ct_iter_data *iter_data)
{}
EXPORT_SYMBOL_GPL();

/**
 * nf_ct_iterate_destroy - destroy unconfirmed conntracks and iterate table
 * @iter: callback to invoke for each conntrack
 * @data: data to pass to @iter
 *
 * Like nf_ct_iterate_cleanup, but first marks conntracks on the
 * unconfirmed list as dying (so they will not be inserted into
 * main table).
 *
 * Can only be called in module exit path.
 */
void
nf_ct_iterate_destroy(int (*iter)(struct nf_conn *i, void *data), void *data)
{}
EXPORT_SYMBOL_GPL();

static int kill_all(struct nf_conn *i, void *data)
{}

void nf_conntrack_cleanup_start(void)
{}

void nf_conntrack_cleanup_end(void)
{}

/*
 * Mishearing the voices in his head, our hero wonders how he's
 * supposed to kill the mall.
 */
void nf_conntrack_cleanup_net(struct net *net)
{}

void nf_conntrack_cleanup_net_list(struct list_head *net_exit_list)
{}

void *nf_ct_alloc_hashtable(unsigned int *sizep, int nulls)
{}
EXPORT_SYMBOL_GPL();

int nf_conntrack_hash_resize(unsigned int hashsize)
{}

int nf_conntrack_set_hashsize(const char *val, const struct kernel_param *kp)
{}

int nf_conntrack_init_start(void)
{}

static void nf_conntrack_set_closing(struct nf_conntrack *nfct)
{}

static const struct nf_ct_hook nf_conntrack_hook =;

void nf_conntrack_init_end(void)
{}

/*
 * We need to use special "null" values, not used in hash table
 */
#define UNCONFIRMED_NULLS_VAL

int nf_conntrack_init_net(struct net *net)
{}

/* ctnetlink code shared by both ctnetlink and nf_conntrack_bpf */

int __nf_ct_change_timeout(struct nf_conn *ct, u64 timeout)
{}
EXPORT_SYMBOL_GPL();

void __nf_ct_change_status(struct nf_conn *ct, unsigned long on, unsigned long off)
{}
EXPORT_SYMBOL_GPL();

int nf_ct_change_status_common(struct nf_conn *ct, unsigned int status)
{}
EXPORT_SYMBOL_GPL();