linux/net/netfilter/nf_nat_core.c

// SPDX-License-Identifier: GPL-2.0-only
/*
 * (C) 1999-2001 Paul `Rusty' Russell
 * (C) 2002-2006 Netfilter Core Team <[email protected]>
 * (C) 2011 Patrick McHardy <[email protected]>
 */

#define pr_fmt(fmt)

#include <linux/module.h>
#include <linux/types.h>
#include <linux/timer.h>
#include <linux/skbuff.h>
#include <linux/gfp.h>
#include <net/xfrm.h>
#include <linux/siphash.h>
#include <linux/rtnetlink.h>

#include <net/netfilter/nf_conntrack_bpf.h>
#include <net/netfilter/nf_conntrack_core.h>
#include <net/netfilter/nf_conntrack_helper.h>
#include <net/netfilter/nf_conntrack_seqadj.h>
#include <net/netfilter/nf_conntrack_zones.h>
#include <net/netfilter/nf_nat.h>
#include <net/netfilter/nf_nat_helper.h>
#include <uapi/linux/netfilter/nf_nat.h>

#include "nf_internals.h"

#define NF_NAT_MAX_ATTEMPTS
#define NF_NAT_HARDER_THRESH

static spinlock_t nf_nat_locks[CONNTRACK_LOCKS];

static DEFINE_MUTEX(nf_nat_proto_mutex);
static unsigned int nat_net_id __read_mostly;

static struct hlist_head *nf_nat_bysource __read_mostly;
static unsigned int nf_nat_htable_size __read_mostly;
static siphash_aligned_key_t nf_nat_hash_rnd;

struct nf_nat_lookup_hook_priv {};

struct nf_nat_hooks_net {};

struct nat_net {};

#ifdef CONFIG_XFRM
static void nf_nat_ipv4_decode_session(struct sk_buff *skb,
				       const struct nf_conn *ct,
				       enum ip_conntrack_dir dir,
				       unsigned long statusbit,
				       struct flowi *fl)
{}

static void nf_nat_ipv6_decode_session(struct sk_buff *skb,
				       const struct nf_conn *ct,
				       enum ip_conntrack_dir dir,
				       unsigned long statusbit,
				       struct flowi *fl)
{}

static void __nf_nat_decode_session(struct sk_buff *skb, struct flowi *fl)
{}
#endif /* CONFIG_XFRM */

/* We keep an extra hash for each conntrack, for fast searching. */
static unsigned int
hash_by_src(const struct net *net,
	    const struct nf_conntrack_zone *zone,
	    const struct nf_conntrack_tuple *tuple)
{}

/* Is this tuple already taken? (not by us) */
static int
nf_nat_used_tuple(const struct nf_conntrack_tuple *tuple,
		  const struct nf_conn *ignored_conntrack)
{}

static bool nf_nat_may_kill(struct nf_conn *ct, unsigned long flags)
{}

/* reverse direction will send packets to new source, so
 * make sure such packets are invalid.
 */
static bool nf_seq_has_advanced(const struct nf_conn *old, const struct nf_conn *new)
{}

static int
nf_nat_used_tuple_harder(const struct nf_conntrack_tuple *tuple,
			 const struct nf_conn *ignored_conntrack,
			 unsigned int attempts_left)
{}

static bool nf_nat_inet_in_range(const struct nf_conntrack_tuple *t,
				 const struct nf_nat_range2 *range)
{}

/* Is the manipable part of the tuple between min and max incl? */
static bool l4proto_in_range(const struct nf_conntrack_tuple *tuple,
			     enum nf_nat_manip_type maniptype,
			     const union nf_conntrack_man_proto *min,
			     const union nf_conntrack_man_proto *max)
{}

/* If we source map this tuple so reply looks like reply_tuple, will
 * that meet the constraints of range.
 */
static int nf_in_range(const struct nf_conntrack_tuple *tuple,
		    const struct nf_nat_range2 *range)
{}

static inline int
same_src(const struct nf_conn *ct,
	 const struct nf_conntrack_tuple *tuple)
{}

/* Only called for SRC manip */
static int
find_appropriate_src(struct net *net,
		     const struct nf_conntrack_zone *zone,
		     const struct nf_conntrack_tuple *tuple,
		     struct nf_conntrack_tuple *result,
		     const struct nf_nat_range2 *range)
{}

/* For [FUTURE] fragmentation handling, we want the least-used
 * src-ip/dst-ip/proto triple.  Fairness doesn't come into it.  Thus
 * if the range specifies 1.2.3.4 ports 10000-10005 and 1.2.3.5 ports
 * 1-65535, we don't do pro-rata allocation based on ports; we choose
 * the ip with the lowest src-ip/dst-ip/proto usage.
 */
static void
find_best_ips_proto(const struct nf_conntrack_zone *zone,
		    struct nf_conntrack_tuple *tuple,
		    const struct nf_nat_range2 *range,
		    const struct nf_conn *ct,
		    enum nf_nat_manip_type maniptype)
{}

/* Alter the per-proto part of the tuple (depending on maniptype), to
 * give a unique tuple in the given range if possible.
 *
 * Per-protocol part of tuple is initialized to the incoming packet.
 */
static void nf_nat_l4proto_unique_tuple(struct nf_conntrack_tuple *tuple,
					const struct nf_nat_range2 *range,
					enum nf_nat_manip_type maniptype,
					const struct nf_conn *ct)
{}

/* Manipulate the tuple into the range given. For NF_INET_POST_ROUTING,
 * we change the source to map into the range. For NF_INET_PRE_ROUTING
 * and NF_INET_LOCAL_OUT, we change the destination to map into the
 * range. It might not be possible to get a unique tuple, but we try.
 * At worst (or if we race), we will end up with a final duplicate in
 * __nf_conntrack_confirm and drop the packet. */
static void
get_unique_tuple(struct nf_conntrack_tuple *tuple,
		 const struct nf_conntrack_tuple *orig_tuple,
		 const struct nf_nat_range2 *range,
		 struct nf_conn *ct,
		 enum nf_nat_manip_type maniptype)
{}

struct nf_conn_nat *nf_ct_nat_ext_add(struct nf_conn *ct)
{}
EXPORT_SYMBOL_GPL();

unsigned int
nf_nat_setup_info(struct nf_conn *ct,
		  const struct nf_nat_range2 *range,
		  enum nf_nat_manip_type maniptype)
{}
EXPORT_SYMBOL();

static unsigned int
__nf_nat_alloc_null_binding(struct nf_conn *ct, enum nf_nat_manip_type manip)
{}

unsigned int
nf_nat_alloc_null_binding(struct nf_conn *ct, unsigned int hooknum)
{}
EXPORT_SYMBOL_GPL();

/* Do packet manipulations according to nf_nat_setup_info. */
unsigned int nf_nat_packet(struct nf_conn *ct,
			   enum ip_conntrack_info ctinfo,
			   unsigned int hooknum,
			   struct sk_buff *skb)
{}
EXPORT_SYMBOL_GPL();

static bool in_vrf_postrouting(const struct nf_hook_state *state)
{}

unsigned int
nf_nat_inet_fn(void *priv, struct sk_buff *skb,
	       const struct nf_hook_state *state)
{}
EXPORT_SYMBOL_GPL();

struct nf_nat_proto_clean {};

/* kill conntracks with affected NAT section */
static int nf_nat_proto_remove(struct nf_conn *i, void *data)
{}

static void nf_nat_cleanup_conntrack(struct nf_conn *ct)
{}

static int nf_nat_proto_clean(struct nf_conn *ct, void *data)
{}

#if IS_ENABLED(CONFIG_NF_CT_NETLINK)

#include <linux/netfilter/nfnetlink.h>
#include <linux/netfilter/nfnetlink_conntrack.h>

static const struct nla_policy protonat_nla_policy[CTA_PROTONAT_MAX+1] =;

static int nf_nat_l4proto_nlattr_to_range(struct nlattr *tb[],
					  struct nf_nat_range2 *range)
{}

static int nfnetlink_parse_nat_proto(struct nlattr *attr,
				     const struct nf_conn *ct,
				     struct nf_nat_range2 *range)
{}

static const struct nla_policy nat_nla_policy[CTA_NAT_MAX+1] =;

static int nf_nat_ipv4_nlattr_to_range(struct nlattr *tb[],
				       struct nf_nat_range2 *range)
{}

static int nf_nat_ipv6_nlattr_to_range(struct nlattr *tb[],
				       struct nf_nat_range2 *range)
{}

static int
nfnetlink_parse_nat(const struct nlattr *nat,
		    const struct nf_conn *ct, struct nf_nat_range2 *range)
{}

/* This function is called under rcu_read_lock() */
static int
nfnetlink_parse_nat_setup(struct nf_conn *ct,
			  enum nf_nat_manip_type manip,
			  const struct nlattr *attr)
{}
#else
static int
nfnetlink_parse_nat_setup(struct nf_conn *ct,
			  enum nf_nat_manip_type manip,
			  const struct nlattr *attr)
{
	return -EOPNOTSUPP;
}
#endif

static struct nf_ct_helper_expectfn follow_master_nat =;

int nf_nat_register_fn(struct net *net, u8 pf, const struct nf_hook_ops *ops,
		       const struct nf_hook_ops *orig_nat_ops, unsigned int ops_count)
{}

void nf_nat_unregister_fn(struct net *net, u8 pf, const struct nf_hook_ops *ops,
			  unsigned int ops_count)
{}

static struct pernet_operations nat_net_ops =;

static const struct nf_nat_hook nat_hook =;

static int __init nf_nat_init(void)
{}

static void __exit nf_nat_cleanup(void)
{}

MODULE_LICENSE();
MODULE_DESCRIPTION();

module_init();
module_exit(nf_nat_cleanup);