linux/net/netfilter/x_tables.c

// SPDX-License-Identifier: GPL-2.0-only
/*
 * x_tables core - Backend for {ip,ip6,arp}_tables
 *
 * Copyright (C) 2006-2006 Harald Welte <[email protected]>
 * Copyright (C) 2006-2012 Patrick McHardy <[email protected]>
 *
 * Based on existing ip_tables code which is
 *   Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling
 *   Copyright (C) 2000-2005 Netfilter Core Team <[email protected]>
 */
#define pr_fmt(fmt)
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/socket.h>
#include <linux/net.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/string.h>
#include <linux/vmalloc.h>
#include <linux/mutex.h>
#include <linux/mm.h>
#include <linux/slab.h>
#include <linux/audit.h>
#include <linux/user_namespace.h>
#include <net/net_namespace.h>
#include <net/netns/generic.h>

#include <linux/netfilter/x_tables.h>
#include <linux/netfilter_arp.h>
#include <linux/netfilter_ipv4/ip_tables.h>
#include <linux/netfilter_ipv6/ip6_tables.h>
#include <linux/netfilter_arp/arp_tables.h>

MODULE_LICENSE();
MODULE_AUTHOR();
MODULE_DESCRIPTION();

#define XT_PCPU_BLOCK_SIZE
#define XT_MAX_TABLE_SIZE

struct xt_template {};

static struct list_head xt_templates[NFPROTO_NUMPROTO];

struct xt_pernet {};

struct compat_delta {};

struct xt_af {};

static unsigned int xt_pernet_id __read_mostly;
static struct xt_af *xt __read_mostly;

static const char *const xt_prefix[NFPROTO_NUMPROTO] =;

/* Registration hooks for targets. */
int xt_register_target(struct xt_target *target)
{}
EXPORT_SYMBOL();

void
xt_unregister_target(struct xt_target *target)
{}
EXPORT_SYMBOL();

int
xt_register_targets(struct xt_target *target, unsigned int n)
{}
EXPORT_SYMBOL();

void
xt_unregister_targets(struct xt_target *target, unsigned int n)
{}
EXPORT_SYMBOL();

int xt_register_match(struct xt_match *match)
{}
EXPORT_SYMBOL();

void
xt_unregister_match(struct xt_match *match)
{}
EXPORT_SYMBOL();

int
xt_register_matches(struct xt_match *match, unsigned int n)
{}
EXPORT_SYMBOL();

void
xt_unregister_matches(struct xt_match *match, unsigned int n)
{}
EXPORT_SYMBOL();


/*
 * These are weird, but module loading must not be done with mutex
 * held (since they will register), and we have to have a single
 * function to use.
 */

/* Find match, grabs ref.  Returns ERR_PTR() on error. */
struct xt_match *xt_find_match(u8 af, const char *name, u8 revision)
{}
EXPORT_SYMBOL();

struct xt_match *
xt_request_find_match(uint8_t nfproto, const char *name, uint8_t revision)
{}
EXPORT_SYMBOL_GPL();

/* Find target, grabs ref.  Returns ERR_PTR() on error. */
static struct xt_target *xt_find_target(u8 af, const char *name, u8 revision)
{}

struct xt_target *xt_request_find_target(u8 af, const char *name, u8 revision)
{}
EXPORT_SYMBOL_GPL();


static int xt_obj_to_user(u16 __user *psize, u16 size,
			  void __user *pname, const char *name,
			  u8 __user *prev, u8 rev)
{}

#define XT_OBJ_TO_USER(U, K, TYPE, C_SIZE)

int xt_data_to_user(void __user *dst, const void *src,
		    int usersize, int size, int aligned_size)
{}
EXPORT_SYMBOL_GPL();

#define XT_DATA_TO_USER(U, K, TYPE)

int xt_match_to_user(const struct xt_entry_match *m,
		     struct xt_entry_match __user *u)
{}
EXPORT_SYMBOL_GPL();

int xt_target_to_user(const struct xt_entry_target *t,
		      struct xt_entry_target __user *u)
{}
EXPORT_SYMBOL_GPL();

static int match_revfn(u8 af, const char *name, u8 revision, int *bestp)
{}

static int target_revfn(u8 af, const char *name, u8 revision, int *bestp)
{}

/* Returns true or false (if no such extension at all) */
int xt_find_revision(u8 af, const char *name, u8 revision, int target,
		     int *err)
{}
EXPORT_SYMBOL_GPL();

static char *
textify_hooks(char *buf, size_t size, unsigned int mask, uint8_t nfproto)
{}

/**
 * xt_check_proc_name - check that name is suitable for /proc file creation
 *
 * @name: file name candidate
 * @size: length of buffer
 *
 * some x_tables modules wish to create a file in /proc.
 * This function makes sure that the name is suitable for this
 * purpose, it checks that name is NUL terminated and isn't a 'special'
 * name, like "..".
 *
 * returns negative number on error or 0 if name is useable.
 */
int xt_check_proc_name(const char *name, unsigned int size)
{}
EXPORT_SYMBOL();

int xt_check_match(struct xt_mtchk_param *par,
		   unsigned int size, u16 proto, bool inv_proto)
{}
EXPORT_SYMBOL_GPL();

/** xt_check_entry_match - check that matches end before start of target
 *
 * @match: beginning of xt_entry_match
 * @target: beginning of this rules target (alleged end of matches)
 * @alignment: alignment requirement of match structures
 *
 * Validates that all matches add up to the beginning of the target,
 * and that each match covers at least the base structure size.
 *
 * Return: 0 on success, negative errno on failure.
 */
static int xt_check_entry_match(const char *match, const char *target,
				const size_t alignment)
{}

/** xt_check_table_hooks - check hook entry points are sane
 *
 * @info xt_table_info to check
 * @valid_hooks - hook entry points that we can enter from
 *
 * Validates that the hook entry and underflows points are set up.
 *
 * Return: 0 on success, negative errno on failure.
 */
int xt_check_table_hooks(const struct xt_table_info *info, unsigned int valid_hooks)
{}
EXPORT_SYMBOL();

static bool verdict_ok(int verdict)
{}

static bool error_tg_ok(unsigned int usersize, unsigned int kernsize,
			const char *msg, unsigned int msglen)
{}

#ifdef CONFIG_NETFILTER_XTABLES_COMPAT
int xt_compat_add_offset(u_int8_t af, unsigned int offset, int delta)
{}
EXPORT_SYMBOL_GPL();

void xt_compat_flush_offsets(u_int8_t af)
{}
EXPORT_SYMBOL_GPL();

int xt_compat_calc_jump(u_int8_t af, unsigned int offset)
{}
EXPORT_SYMBOL_GPL();

int xt_compat_init_offsets(u8 af, unsigned int number)
{}
EXPORT_SYMBOL();

int xt_compat_match_offset(const struct xt_match *match)
{}
EXPORT_SYMBOL_GPL();

void xt_compat_match_from_user(struct xt_entry_match *m, void **dstptr,
			       unsigned int *size)
{}
EXPORT_SYMBOL_GPL();

#define COMPAT_XT_DATA_TO_USER(U, K, TYPE, C_SIZE)

int xt_compat_match_to_user(const struct xt_entry_match *m,
			    void __user **dstptr, unsigned int *size)
{}
EXPORT_SYMBOL_GPL();

/* non-compat version may have padding after verdict */
struct compat_xt_standard_target {};

struct compat_xt_error_target {};

int xt_compat_check_entry_offsets(const void *base, const char *elems,
				  unsigned int target_offset,
				  unsigned int next_offset)
{}
EXPORT_SYMBOL();
#endif /* CONFIG_NETFILTER_XTABLES_COMPAT */

/**
 * xt_check_entry_offsets - validate arp/ip/ip6t_entry
 *
 * @base: pointer to arp/ip/ip6t_entry
 * @elems: pointer to first xt_entry_match, i.e. ip(6)t_entry->elems
 * @target_offset: the arp/ip/ip6_t->target_offset
 * @next_offset: the arp/ip/ip6_t->next_offset
 *
 * validates that target_offset and next_offset are sane and that all
 * match sizes (if any) align with the target offset.
 *
 * This function does not validate the targets or matches themselves, it
 * only tests that all the offsets and sizes are correct, that all
 * match structures are aligned, and that the last structure ends where
 * the target structure begins.
 *
 * Also see xt_compat_check_entry_offsets for CONFIG_NETFILTER_XTABLES_COMPAT version.
 *
 * The arp/ip/ip6t_entry structure @base must have passed following tests:
 * - it must point to a valid memory location
 * - base to base + next_offset must be accessible, i.e. not exceed allocated
 *   length.
 *
 * A well-formed entry looks like this:
 *
 * ip(6)t_entry   match [mtdata]  match [mtdata] target [tgdata] ip(6)t_entry
 * e->elems[]-----'                              |               |
 *                matchsize                      |               |
 *                                matchsize      |               |
 *                                               |               |
 * target_offset---------------------------------'               |
 * next_offset---------------------------------------------------'
 *
 * elems[]: flexible array member at end of ip(6)/arpt_entry struct.
 *          This is where matches (if any) and the target reside.
 * target_offset: beginning of target.
 * next_offset: start of the next rule; also: size of this rule.
 * Since targets have a minimum size, target_offset + minlen <= next_offset.
 *
 * Every match stores its size, sum of sizes must not exceed target_offset.
 *
 * Return: 0 on success, negative errno on failure.
 */
int xt_check_entry_offsets(const void *base,
			   const char *elems,
			   unsigned int target_offset,
			   unsigned int next_offset)
{}
EXPORT_SYMBOL();

/**
 * xt_alloc_entry_offsets - allocate array to store rule head offsets
 *
 * @size: number of entries
 *
 * Return: NULL or zeroed kmalloc'd or vmalloc'd array
 */
unsigned int *xt_alloc_entry_offsets(unsigned int size)
{}
EXPORT_SYMBOL();

/**
 * xt_find_jump_offset - check if target is a valid jump offset
 *
 * @offsets: array containing all valid rule start offsets of a rule blob
 * @target: the jump target to search for
 * @size: entries in @offset
 */
bool xt_find_jump_offset(const unsigned int *offsets,
			 unsigned int target, unsigned int size)
{}
EXPORT_SYMBOL();

int xt_check_target(struct xt_tgchk_param *par,
		    unsigned int size, u16 proto, bool inv_proto)
{}
EXPORT_SYMBOL_GPL();

/**
 * xt_copy_counters - copy counters and metadata from a sockptr_t
 *
 * @arg: src sockptr
 * @len: alleged size of userspace memory
 * @info: where to store the xt_counters_info metadata
 *
 * Copies counter meta data from @user and stores it in @info.
 *
 * vmallocs memory to hold the counters, then copies the counter data
 * from @user to the new memory and returns a pointer to it.
 *
 * If called from a compat syscall, @info gets converted automatically to the
 * 64bit representation.
 *
 * The metadata associated with the counters is stored in @info.
 *
 * Return: returns pointer that caller has to test via IS_ERR().
 * If IS_ERR is false, caller has to vfree the pointer.
 */
void *xt_copy_counters(sockptr_t arg, unsigned int len,
		       struct xt_counters_info *info)
{}
EXPORT_SYMBOL_GPL();

#ifdef CONFIG_NETFILTER_XTABLES_COMPAT
int xt_compat_target_offset(const struct xt_target *target)
{}
EXPORT_SYMBOL_GPL();

void xt_compat_target_from_user(struct xt_entry_target *t, void **dstptr,
				unsigned int *size)
{}
EXPORT_SYMBOL_GPL();

int xt_compat_target_to_user(const struct xt_entry_target *t,
			     void __user **dstptr, unsigned int *size)
{}
EXPORT_SYMBOL_GPL();
#endif

struct xt_table_info *xt_alloc_table_info(unsigned int size)
{}
EXPORT_SYMBOL();

void xt_free_table_info(struct xt_table_info *info)
{}
EXPORT_SYMBOL();

struct xt_table *xt_find_table(struct net *net, u8 af, const char *name)
{}
EXPORT_SYMBOL();

/* Find table by name, grabs mutex & ref.  Returns ERR_PTR on error. */
struct xt_table *xt_find_table_lock(struct net *net, u_int8_t af,
				    const char *name)
{}
EXPORT_SYMBOL_GPL();

struct xt_table *xt_request_find_table_lock(struct net *net, u_int8_t af,
					    const char *name)
{}
EXPORT_SYMBOL_GPL();

void xt_table_unlock(struct xt_table *table)
{}
EXPORT_SYMBOL_GPL();

#ifdef CONFIG_NETFILTER_XTABLES_COMPAT
void xt_compat_lock(u_int8_t af)
{}
EXPORT_SYMBOL_GPL();

void xt_compat_unlock(u_int8_t af)
{}
EXPORT_SYMBOL_GPL();
#endif

DEFINE_PER_CPU(seqcount_t, xt_recseq);
EXPORT_PER_CPU_SYMBOL_GPL();

struct static_key xt_tee_enabled __read_mostly;
EXPORT_SYMBOL_GPL();

static int xt_jumpstack_alloc(struct xt_table_info *i)
{}

struct xt_counters *xt_counters_alloc(unsigned int counters)
{}
EXPORT_SYMBOL();

struct xt_table_info *
xt_replace_table(struct xt_table *table,
	      unsigned int num_counters,
	      struct xt_table_info *newinfo,
	      int *error)
{}
EXPORT_SYMBOL_GPL();

struct xt_table *xt_register_table(struct net *net,
				   const struct xt_table *input_table,
				   struct xt_table_info *bootstrap,
				   struct xt_table_info *newinfo)
{}
EXPORT_SYMBOL_GPL();

void *xt_unregister_table(struct xt_table *table)
{}
EXPORT_SYMBOL_GPL();

#ifdef CONFIG_PROC_FS
static void *xt_table_seq_start(struct seq_file *seq, loff_t *pos)
{}

static void *xt_table_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{}

static void xt_table_seq_stop(struct seq_file *seq, void *v)
{}

static int xt_table_seq_show(struct seq_file *seq, void *v)
{}

static const struct seq_operations xt_table_seq_ops =;

/*
 * Traverse state for ip{,6}_{tables,matches} for helping crossing
 * the multi-AF mutexes.
 */
struct nf_mttg_trav {};

enum {};

static void *xt_mttg_seq_next(struct seq_file *seq, void *v, loff_t *ppos,
    bool is_target)
{}

static void *xt_mttg_seq_start(struct seq_file *seq, loff_t *pos,
    bool is_target)
{}

static void xt_mttg_seq_stop(struct seq_file *seq, void *v)
{}

static void *xt_match_seq_start(struct seq_file *seq, loff_t *pos)
{}

static void *xt_match_seq_next(struct seq_file *seq, void *v, loff_t *ppos)
{}

static int xt_match_seq_show(struct seq_file *seq, void *v)
{}

static const struct seq_operations xt_match_seq_ops =;

static void *xt_target_seq_start(struct seq_file *seq, loff_t *pos)
{}

static void *xt_target_seq_next(struct seq_file *seq, void *v, loff_t *ppos)
{}

static int xt_target_seq_show(struct seq_file *seq, void *v)
{}

static const struct seq_operations xt_target_seq_ops =;

#define FORMAT_TABLES
#define FORMAT_MATCHES
#define FORMAT_TARGETS

#endif /* CONFIG_PROC_FS */

/**
 * xt_hook_ops_alloc - set up hooks for a new table
 * @table:	table with metadata needed to set up hooks
 * @fn:		Hook function
 *
 * This function will create the nf_hook_ops that the x_table needs
 * to hand to xt_hook_link_net().
 */
struct nf_hook_ops *
xt_hook_ops_alloc(const struct xt_table *table, nf_hookfn *fn)
{}
EXPORT_SYMBOL_GPL();

int xt_register_template(const struct xt_table *table,
			 int (*table_init)(struct net *net))
{}
EXPORT_SYMBOL_GPL();

void xt_unregister_template(const struct xt_table *table)
{}
EXPORT_SYMBOL_GPL();

int xt_proto_init(struct net *net, u_int8_t af)
{}
EXPORT_SYMBOL_GPL();

void xt_proto_fini(struct net *net, u_int8_t af)
{}
EXPORT_SYMBOL_GPL();

/**
 * xt_percpu_counter_alloc - allocate x_tables rule counter
 *
 * @state: pointer to xt_percpu allocation state
 * @counter: pointer to counter struct inside the ip(6)/arpt_entry struct
 *
 * On SMP, the packet counter [ ip(6)t_entry->counters.pcnt ] will then
 * contain the address of the real (percpu) counter.
 *
 * Rule evaluation needs to use xt_get_this_cpu_counter() helper
 * to fetch the real percpu counter.
 *
 * To speed up allocation and improve data locality, a 4kb block is
 * allocated.  Freeing any counter may free an entire block, so all
 * counters allocated using the same state must be freed at the same
 * time.
 *
 * xt_percpu_counter_alloc_state contains the base address of the
 * allocated page and the current sub-offset.
 *
 * returns false on error.
 */
bool xt_percpu_counter_alloc(struct xt_percpu_counter_alloc_state *state,
			     struct xt_counters *counter)
{}
EXPORT_SYMBOL_GPL();

void xt_percpu_counter_free(struct xt_counters *counters)
{}
EXPORT_SYMBOL_GPL();

static int __net_init xt_net_init(struct net *net)
{}

static void __net_exit xt_net_exit(struct net *net)
{}

static struct pernet_operations xt_net_ops =;

static int __init xt_init(void)
{}

static void __exit xt_fini(void)
{}

module_init();
module_exit(xt_fini);