linux/net/packet/af_packet.c

// SPDX-License-Identifier: GPL-2.0-or-later
/*
 * INET		An implementation of the TCP/IP protocol suite for the LINUX
 *		operating system.  INET is implemented using the  BSD Socket
 *		interface as the means of communication with the user level.
 *
 *		PACKET - implements raw packet sockets.
 *
 * Authors:	Ross Biro
 *		Fred N. van Kempen, <[email protected]>
 *		Alan Cox, <[email protected]>
 *
 * Fixes:
 *		Alan Cox	:	verify_area() now used correctly
 *		Alan Cox	:	new skbuff lists, look ma no backlogs!
 *		Alan Cox	:	tidied skbuff lists.
 *		Alan Cox	:	Now uses generic datagram routines I
 *					added. Also fixed the peek/read crash
 *					from all old Linux datagram code.
 *		Alan Cox	:	Uses the improved datagram code.
 *		Alan Cox	:	Added NULL's for socket options.
 *		Alan Cox	:	Re-commented the code.
 *		Alan Cox	:	Use new kernel side addressing
 *		Rob Janssen	:	Correct MTU usage.
 *		Dave Platt	:	Counter leaks caused by incorrect
 *					interrupt locking and some slightly
 *					dubious gcc output. Can you read
 *					compiler: it said _VOLATILE_
 *	Richard Kooijman	:	Timestamp fixes.
 *		Alan Cox	:	New buffers. Use sk->mac.raw.
 *		Alan Cox	:	sendmsg/recvmsg support.
 *		Alan Cox	:	Protocol setting support
 *	Alexey Kuznetsov	:	Untied from IPv4 stack.
 *	Cyrus Durgin		:	Fixed kerneld for kmod.
 *	Michal Ostrowski        :       Module initialization cleanup.
 *         Ulises Alonso        :       Frame number limit removal and
 *                                      packet_set_ring memory leak.
 *		Eric Biederman	:	Allow for > 8 byte hardware addresses.
 *					The convention is that longer addresses
 *					will simply extend the hardware address
 *					byte arrays at the end of sockaddr_ll
 *					and packet_mreq.
 *		Johann Baudy	:	Added TX RING.
 *		Chetan Loke	:	Implemented TPACKET_V3 block abstraction
 *					layer.
 *					Copyright (C) 2011, <[email protected]>
 */

#define pr_fmt(fmt)

#include <linux/ethtool.h>
#include <linux/filter.h>
#include <linux/types.h>
#include <linux/mm.h>
#include <linux/capability.h>
#include <linux/fcntl.h>
#include <linux/socket.h>
#include <linux/in.h>
#include <linux/inet.h>
#include <linux/netdevice.h>
#include <linux/if_packet.h>
#include <linux/wireless.h>
#include <linux/kernel.h>
#include <linux/kmod.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <net/net_namespace.h>
#include <net/ip.h>
#include <net/protocol.h>
#include <linux/skbuff.h>
#include <net/sock.h>
#include <linux/errno.h>
#include <linux/timer.h>
#include <linux/uaccess.h>
#include <asm/ioctls.h>
#include <asm/page.h>
#include <asm/cacheflush.h>
#include <asm/io.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/poll.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/mutex.h>
#include <linux/if_vlan.h>
#include <linux/virtio_net.h>
#include <linux/errqueue.h>
#include <linux/net_tstamp.h>
#include <linux/percpu.h>
#ifdef CONFIG_INET
#include <net/inet_common.h>
#endif
#include <linux/bpf.h>
#include <net/compat.h>
#include <linux/netfilter_netdev.h>

#include "internal.h"

/*
   Assumptions:
   - If the device has no dev->header_ops->create, there is no LL header
     visible above the device. In this case, its hard_header_len should be 0.
     The device may prepend its own header internally. In this case, its
     needed_headroom should be set to the space needed for it to add its
     internal header.
     For example, a WiFi driver pretending to be an Ethernet driver should
     set its hard_header_len to be the Ethernet header length, and set its
     needed_headroom to be (the real WiFi header length - the fake Ethernet
     header length).
   - packet socket receives packets with pulled ll header,
     so that SOCK_RAW should push it back.

On receive:
-----------

Incoming, dev_has_header(dev) == true
   mac_header -> ll header
   data       -> data

Outgoing, dev_has_header(dev) == true
   mac_header -> ll header
   data       -> ll header

Incoming, dev_has_header(dev) == false
   mac_header -> data
     However drivers often make it point to the ll header.
     This is incorrect because the ll header should be invisible to us.
   data       -> data

Outgoing, dev_has_header(dev) == false
   mac_header -> data. ll header is invisible to us.
   data       -> data

Resume
  If dev_has_header(dev) == false we are unable to restore the ll header,
    because it is invisible to us.


On transmit:
------------

dev_has_header(dev) == true
   mac_header -> ll header
   data       -> ll header

dev_has_header(dev) == false (ll header is invisible to us)
   mac_header -> data
   data       -> data

   We should set network_header on output to the correct position,
   packet classifier depends on it.
 */

/* Private packet socket structures. */

/* identical to struct packet_mreq except it has
 * a longer address field.
 */
struct packet_mreq_max {};

tpacket_uhdr;

static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
		int closing, int tx_ring);

#define V3_ALIGNMENT

#define BLK_HDR_LEN

#define BLK_PLUS_PRIV(sz_of_priv)

#define BLOCK_STATUS(x)
#define BLOCK_NUM_PKTS(x)
#define BLOCK_O2FP(x)
#define BLOCK_LEN(x)
#define BLOCK_SNUM(x)
#define BLOCK_O2PRIV(x)

struct packet_sock;
static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
		       struct packet_type *pt, struct net_device *orig_dev);

static void *packet_previous_frame(struct packet_sock *po,
		struct packet_ring_buffer *rb,
		int status);
static void packet_increment_head(struct packet_ring_buffer *buff);
static int prb_curr_blk_in_use(struct tpacket_block_desc *);
static void *prb_dispatch_next_block(struct tpacket_kbdq_core *,
			struct packet_sock *);
static void prb_retire_current_block(struct tpacket_kbdq_core *,
		struct packet_sock *, unsigned int status);
static int prb_queue_frozen(struct tpacket_kbdq_core *);
static void prb_open_block(struct tpacket_kbdq_core *,
		struct tpacket_block_desc *);
static void prb_retire_rx_blk_timer_expired(struct timer_list *);
static void _prb_refresh_rx_retire_blk_timer(struct tpacket_kbdq_core *);
static void prb_fill_rxhash(struct tpacket_kbdq_core *, struct tpacket3_hdr *);
static void prb_clear_rxhash(struct tpacket_kbdq_core *,
		struct tpacket3_hdr *);
static void prb_fill_vlan_info(struct tpacket_kbdq_core *,
		struct tpacket3_hdr *);
static void packet_flush_mclist(struct sock *sk);
static u16 packet_pick_tx_queue(struct sk_buff *skb);

struct packet_skb_cb {};

#define vio_le()

#define PACKET_SKB_CB(__skb)

#define GET_PBDQC_FROM_RB(x)
#define GET_PBLOCK_DESC(x, bid)
#define GET_CURR_PBLOCK_DESC_FROM_CORE(x)
#define GET_NEXT_PRB_BLK_NUM(x)

static void __fanout_unlink(struct sock *sk, struct packet_sock *po);
static void __fanout_link(struct sock *sk, struct packet_sock *po);

#ifdef CONFIG_NETFILTER_EGRESS
static noinline struct sk_buff *nf_hook_direct_egress(struct sk_buff *skb)
{}
#endif

static int packet_xmit(const struct packet_sock *po, struct sk_buff *skb)
{}

static struct net_device *packet_cached_dev_get(struct packet_sock *po)
{}

static void packet_cached_dev_assign(struct packet_sock *po,
				     struct net_device *dev)
{}

static void packet_cached_dev_reset(struct packet_sock *po)
{}

static u16 packet_pick_tx_queue(struct sk_buff *skb)
{}

/* __register_prot_hook must be invoked through register_prot_hook
 * or from a context in which asynchronous accesses to the packet
 * socket is not possible (packet_create()).
 */
static void __register_prot_hook(struct sock *sk)
{}

static void register_prot_hook(struct sock *sk)
{}

/* If the sync parameter is true, we will temporarily drop
 * the po->bind_lock and do a synchronize_net to make sure no
 * asynchronous packet processing paths still refer to the elements
 * of po->prot_hook.  If the sync parameter is false, it is the
 * callers responsibility to take care of this.
 */
static void __unregister_prot_hook(struct sock *sk, bool sync)
{}

static void unregister_prot_hook(struct sock *sk, bool sync)
{}

static inline struct page * __pure pgv_to_page(void *addr)
{}

static void __packet_set_status(struct packet_sock *po, void *frame, int status)
{}

static int __packet_get_status(const struct packet_sock *po, void *frame)
{}

static __u32 tpacket_get_timestamp(struct sk_buff *skb, struct timespec64 *ts,
				   unsigned int flags)
{}

static __u32 __packet_set_timestamp(struct packet_sock *po, void *frame,
				    struct sk_buff *skb)
{}

static void *packet_lookup_frame(const struct packet_sock *po,
				 const struct packet_ring_buffer *rb,
				 unsigned int position,
				 int status)
{}

static void *packet_current_frame(struct packet_sock *po,
		struct packet_ring_buffer *rb,
		int status)
{}

static u16 vlan_get_tci(struct sk_buff *skb, struct net_device *dev)
{}

static __be16 vlan_get_protocol_dgram(struct sk_buff *skb)
{}

static void prb_del_retire_blk_timer(struct tpacket_kbdq_core *pkc)
{}

static void prb_shutdown_retire_blk_timer(struct packet_sock *po,
		struct sk_buff_head *rb_queue)
{}

static void prb_setup_retire_blk_timer(struct packet_sock *po)
{}

static int prb_calc_retire_blk_tmo(struct packet_sock *po,
				int blk_size_in_bytes)
{}

static void prb_init_ft_ops(struct tpacket_kbdq_core *p1,
			union tpacket_req_u *req_u)
{}

static void init_prb_bdqc(struct packet_sock *po,
			struct packet_ring_buffer *rb,
			struct pgv *pg_vec,
			union tpacket_req_u *req_u)
{}

/*  Do NOT update the last_blk_num first.
 *  Assumes sk_buff_head lock is held.
 */
static void _prb_refresh_rx_retire_blk_timer(struct tpacket_kbdq_core *pkc)
{}

/*
 * Timer logic:
 * 1) We refresh the timer only when we open a block.
 *    By doing this we don't waste cycles refreshing the timer
 *	  on packet-by-packet basis.
 *
 * With a 1MB block-size, on a 1Gbps line, it will take
 * i) ~8 ms to fill a block + ii) memcpy etc.
 * In this cut we are not accounting for the memcpy time.
 *
 * So, if the user sets the 'tmo' to 10ms then the timer
 * will never fire while the block is still getting filled
 * (which is what we want). However, the user could choose
 * to close a block early and that's fine.
 *
 * But when the timer does fire, we check whether or not to refresh it.
 * Since the tmo granularity is in msecs, it is not too expensive
 * to refresh the timer, lets say every '8' msecs.
 * Either the user can set the 'tmo' or we can derive it based on
 * a) line-speed and b) block-size.
 * prb_calc_retire_blk_tmo() calculates the tmo.
 *
 */
static void prb_retire_rx_blk_timer_expired(struct timer_list *t)
{}

static void prb_flush_block(struct tpacket_kbdq_core *pkc1,
		struct tpacket_block_desc *pbd1, __u32 status)
{}

/*
 * Side effect:
 *
 * 1) flush the block
 * 2) Increment active_blk_num
 *
 * Note:We DONT refresh the timer on purpose.
 *	Because almost always the next block will be opened.
 */
static void prb_close_block(struct tpacket_kbdq_core *pkc1,
		struct tpacket_block_desc *pbd1,
		struct packet_sock *po, unsigned int stat)
{}

static void prb_thaw_queue(struct tpacket_kbdq_core *pkc)
{}

/*
 * Side effect of opening a block:
 *
 * 1) prb_queue is thawed.
 * 2) retire_blk_timer is refreshed.
 *
 */
static void prb_open_block(struct tpacket_kbdq_core *pkc1,
	struct tpacket_block_desc *pbd1)
{}

/*
 * Queue freeze logic:
 * 1) Assume tp_block_nr = 8 blocks.
 * 2) At time 't0', user opens Rx ring.
 * 3) Some time past 't0', kernel starts filling blocks starting from 0 .. 7
 * 4) user-space is either sleeping or processing block '0'.
 * 5) tpacket_rcv is currently filling block '7', since there is no space left,
 *    it will close block-7,loop around and try to fill block '0'.
 *    call-flow:
 *    __packet_lookup_frame_in_block
 *      prb_retire_current_block()
 *      prb_dispatch_next_block()
 *        |->(BLOCK_STATUS == USER) evaluates to true
 *    5.1) Since block-0 is currently in-use, we just freeze the queue.
 * 6) Now there are two cases:
 *    6.1) Link goes idle right after the queue is frozen.
 *         But remember, the last open_block() refreshed the timer.
 *         When this timer expires,it will refresh itself so that we can
 *         re-open block-0 in near future.
 *    6.2) Link is busy and keeps on receiving packets. This is a simple
 *         case and __packet_lookup_frame_in_block will check if block-0
 *         is free and can now be re-used.
 */
static void prb_freeze_queue(struct tpacket_kbdq_core *pkc,
				  struct packet_sock *po)
{}

#define TOTAL_PKT_LEN_INCL_ALIGN(length)

/*
 * If the next block is free then we will dispatch it
 * and return a good offset.
 * Else, we will freeze the queue.
 * So, caller must check the return value.
 */
static void *prb_dispatch_next_block(struct tpacket_kbdq_core *pkc,
		struct packet_sock *po)
{}

static void prb_retire_current_block(struct tpacket_kbdq_core *pkc,
		struct packet_sock *po, unsigned int status)
{}

static int prb_curr_blk_in_use(struct tpacket_block_desc *pbd)
{}

static int prb_queue_frozen(struct tpacket_kbdq_core *pkc)
{}

static void prb_clear_blk_fill_status(struct packet_ring_buffer *rb)
	__releases(&pkc->blk_fill_in_prog_lock)
{}

static void prb_fill_rxhash(struct tpacket_kbdq_core *pkc,
			struct tpacket3_hdr *ppd)
{}

static void prb_clear_rxhash(struct tpacket_kbdq_core *pkc,
			struct tpacket3_hdr *ppd)
{}

static void prb_fill_vlan_info(struct tpacket_kbdq_core *pkc,
			struct tpacket3_hdr *ppd)
{}

static void prb_run_all_ft_ops(struct tpacket_kbdq_core *pkc,
			struct tpacket3_hdr *ppd)
{}

static void prb_fill_curr_block(char *curr,
				struct tpacket_kbdq_core *pkc,
				struct tpacket_block_desc *pbd,
				unsigned int len)
	__acquires(&pkc->blk_fill_in_prog_lock)
{}

/* Assumes caller has the sk->rx_queue.lock */
static void *__packet_lookup_frame_in_block(struct packet_sock *po,
					    struct sk_buff *skb,
					    unsigned int len
					    )
{}

static void *packet_current_rx_frame(struct packet_sock *po,
					    struct sk_buff *skb,
					    int status, unsigned int len)
{}

static void *prb_lookup_block(const struct packet_sock *po,
			      const struct packet_ring_buffer *rb,
			      unsigned int idx,
			      int status)
{}

static int prb_previous_blk_num(struct packet_ring_buffer *rb)
{}

/* Assumes caller has held the rx_queue.lock */
static void *__prb_previous_block(struct packet_sock *po,
					 struct packet_ring_buffer *rb,
					 int status)
{}

static void *packet_previous_rx_frame(struct packet_sock *po,
					     struct packet_ring_buffer *rb,
					     int status)
{}

static void packet_increment_rx_head(struct packet_sock *po,
					    struct packet_ring_buffer *rb)
{}

static void *packet_previous_frame(struct packet_sock *po,
		struct packet_ring_buffer *rb,
		int status)
{}

static void packet_increment_head(struct packet_ring_buffer *buff)
{}

static void packet_inc_pending(struct packet_ring_buffer *rb)
{}

static void packet_dec_pending(struct packet_ring_buffer *rb)
{}

static unsigned int packet_read_pending(const struct packet_ring_buffer *rb)
{}

static int packet_alloc_pending(struct packet_sock *po)
{}

static void packet_free_pending(struct packet_sock *po)
{}

#define ROOM_POW_OFF
#define ROOM_NONE
#define ROOM_LOW
#define ROOM_NORMAL

static bool __tpacket_has_room(const struct packet_sock *po, int pow_off)
{}

static bool __tpacket_v3_has_room(const struct packet_sock *po, int pow_off)
{}

static int __packet_rcv_has_room(const struct packet_sock *po,
				 const struct sk_buff *skb)
{}

static int packet_rcv_has_room(struct packet_sock *po, struct sk_buff *skb)
{}

static void packet_rcv_try_clear_pressure(struct packet_sock *po)
{}

static void packet_sock_destruct(struct sock *sk)
{}

static bool fanout_flow_is_huge(struct packet_sock *po, struct sk_buff *skb)
{}

static unsigned int fanout_demux_hash(struct packet_fanout *f,
				      struct sk_buff *skb,
				      unsigned int num)
{}

static unsigned int fanout_demux_lb(struct packet_fanout *f,
				    struct sk_buff *skb,
				    unsigned int num)
{}

static unsigned int fanout_demux_cpu(struct packet_fanout *f,
				     struct sk_buff *skb,
				     unsigned int num)
{}

static unsigned int fanout_demux_rnd(struct packet_fanout *f,
				     struct sk_buff *skb,
				     unsigned int num)
{}

static unsigned int fanout_demux_rollover(struct packet_fanout *f,
					  struct sk_buff *skb,
					  unsigned int idx, bool try_self,
					  unsigned int num)
{}

static unsigned int fanout_demux_qm(struct packet_fanout *f,
				    struct sk_buff *skb,
				    unsigned int num)
{}

static unsigned int fanout_demux_bpf(struct packet_fanout *f,
				     struct sk_buff *skb,
				     unsigned int num)
{}

static bool fanout_has_flag(struct packet_fanout *f, u16 flag)
{}

static int packet_rcv_fanout(struct sk_buff *skb, struct net_device *dev,
			     struct packet_type *pt, struct net_device *orig_dev)
{}

DEFINE_MUTEX();
EXPORT_SYMBOL_GPL();
static LIST_HEAD(fanout_list);
static u16 fanout_next_id;

static void __fanout_link(struct sock *sk, struct packet_sock *po)
{}

static void __fanout_unlink(struct sock *sk, struct packet_sock *po)
{}

static bool match_fanout_group(struct packet_type *ptype, struct sock *sk)
{}

static void fanout_init_data(struct packet_fanout *f)
{}

static void __fanout_set_data_bpf(struct packet_fanout *f, struct bpf_prog *new)
{}

static int fanout_set_data_cbpf(struct packet_sock *po, sockptr_t data,
				unsigned int len)
{}

static int fanout_set_data_ebpf(struct packet_sock *po, sockptr_t data,
				unsigned int len)
{}

static int fanout_set_data(struct packet_sock *po, sockptr_t data,
			   unsigned int len)
{}

static void fanout_release_data(struct packet_fanout *f)
{}

static bool __fanout_id_is_free(struct sock *sk, u16 candidate_id)
{}

static bool fanout_find_new_id(struct sock *sk, u16 *new_id)
{}

static int fanout_add(struct sock *sk, struct fanout_args *args)
{}

/* If pkt_sk(sk)->fanout->sk_ref is zero, this function removes
 * pkt_sk(sk)->fanout from fanout_list and returns pkt_sk(sk)->fanout.
 * It is the responsibility of the caller to call fanout_release_data() and
 * free the returned packet_fanout (after synchronize_net())
 */
static struct packet_fanout *fanout_release(struct sock *sk)
{}

static bool packet_extra_vlan_len_allowed(const struct net_device *dev,
					  struct sk_buff *skb)
{}

static const struct proto_ops packet_ops;

static const struct proto_ops packet_ops_spkt;

static int packet_rcv_spkt(struct sk_buff *skb, struct net_device *dev,
			   struct packet_type *pt, struct net_device *orig_dev)
{}

static void packet_parse_headers(struct sk_buff *skb, struct socket *sock)
{}

/*
 *	Output a raw packet to a device layer. This bypasses all the other
 *	protocol layers and you must therefore supply it with a complete frame
 */

static int packet_sendmsg_spkt(struct socket *sock, struct msghdr *msg,
			       size_t len)
{}

static unsigned int run_filter(struct sk_buff *skb,
			       const struct sock *sk,
			       unsigned int res)
{}

static int packet_rcv_vnet(struct msghdr *msg, const struct sk_buff *skb,
			   size_t *len, int vnet_hdr_sz)
{}

/*
 * This function makes lazy skb cloning in hope that most of packets
 * are discarded by BPF.
 *
 * Note tricky part: we DO mangle shared skb! skb->data, skb->len
 * and skb->cb are mangled. It works because (and until) packets
 * falling here are owned by current CPU. Output packets are cloned
 * by dev_queue_xmit_nit(), input packets are processed by net_bh
 * sequentially, so that if we return skb to original state on exit,
 * we will not harm anyone.
 */

static int packet_rcv(struct sk_buff *skb, struct net_device *dev,
		      struct packet_type *pt, struct net_device *orig_dev)
{}

static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
		       struct packet_type *pt, struct net_device *orig_dev)
{}

static void tpacket_destruct_skb(struct sk_buff *skb)
{}

static int __packet_snd_vnet_parse(struct virtio_net_hdr *vnet_hdr, size_t len)
{}

static int packet_snd_vnet_parse(struct msghdr *msg, size_t *len,
				 struct virtio_net_hdr *vnet_hdr, int vnet_hdr_sz)
{}

static int tpacket_fill_skb(struct packet_sock *po, struct sk_buff *skb,
		void *frame, struct net_device *dev, void *data, int tp_len,
		__be16 proto, unsigned char *addr, int hlen, int copylen,
		const struct sockcm_cookie *sockc)
{}

static int tpacket_parse_header(struct packet_sock *po, void *frame,
				int size_max, void **data)
{}

static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
{}

static struct sk_buff *packet_alloc_skb(struct sock *sk, size_t prepad,
				        size_t reserve, size_t len,
				        size_t linear, int noblock,
				        int *err)
{}

static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
{}

static int packet_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
{}

/*
 *	Close a PACKET socket. This is fairly simple. We immediately go
 *	to 'closed' state and remove our protocol entry in the device list.
 */

static int packet_release(struct socket *sock)
{}

/*
 *	Attach a packet hook.
 */

static int packet_do_bind(struct sock *sk, const char *name, int ifindex,
			  __be16 proto)
{}

/*
 *	Bind a packet socket to a device
 */

static int packet_bind_spkt(struct socket *sock, struct sockaddr *uaddr,
			    int addr_len)
{}

static int packet_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
{}

static struct proto packet_proto =;

/*
 *	Create a packet of type SOCK_PACKET.
 */

static int packet_create(struct net *net, struct socket *sock, int protocol,
			 int kern)
{}

/*
 *	Pull a packet from our receive queue and hand it to the user.
 *	If necessary we block.
 */

static int packet_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
			  int flags)
{}

static int packet_getname_spkt(struct socket *sock, struct sockaddr *uaddr,
			       int peer)
{}

static int packet_getname(struct socket *sock, struct sockaddr *uaddr,
			  int peer)
{}

static int packet_dev_mc(struct net_device *dev, struct packet_mclist *i,
			 int what)
{}

static void packet_dev_mclist_delete(struct net_device *dev,
				     struct packet_mclist **mlp)
{}

static int packet_mc_add(struct sock *sk, struct packet_mreq_max *mreq)
{}

static int packet_mc_drop(struct sock *sk, struct packet_mreq_max *mreq)
{}

static void packet_flush_mclist(struct sock *sk)
{}

static int
packet_setsockopt(struct socket *sock, int level, int optname, sockptr_t optval,
		  unsigned int optlen)
{}

static int packet_getsockopt(struct socket *sock, int level, int optname,
			     char __user *optval, int __user *optlen)
{}

static int packet_notifier(struct notifier_block *this,
			   unsigned long msg, void *ptr)
{}


static int packet_ioctl(struct socket *sock, unsigned int cmd,
			unsigned long arg)
{}

static __poll_t packet_poll(struct file *file, struct socket *sock,
				poll_table *wait)
{}


/* Dirty? Well, I still did not learn better way to account
 * for user mmaps.
 */

static void packet_mm_open(struct vm_area_struct *vma)
{}

static void packet_mm_close(struct vm_area_struct *vma)
{}

static const struct vm_operations_struct packet_mmap_ops =;

static void free_pg_vec(struct pgv *pg_vec, unsigned int order,
			unsigned int len)
{}

static char *alloc_one_pg_vec_page(unsigned long order)
{}

static struct pgv *alloc_pg_vec(struct tpacket_req *req, int order)
{}

static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
		int closing, int tx_ring)
{}

static int packet_mmap(struct file *file, struct socket *sock,
		struct vm_area_struct *vma)
{}

static const struct proto_ops packet_ops_spkt =;

static const struct proto_ops packet_ops =;

static const struct net_proto_family packet_family_ops =;

static struct notifier_block packet_netdev_notifier =;

#ifdef CONFIG_PROC_FS

static void *packet_seq_start(struct seq_file *seq, loff_t *pos)
	__acquires(RCU)
{}

static void *packet_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{}

static void packet_seq_stop(struct seq_file *seq, void *v)
	__releases(RCU)
{}

static int packet_seq_show(struct seq_file *seq, void *v)
{}

static const struct seq_operations packet_seq_ops =;
#endif

static int __net_init packet_net_init(struct net *net)
{}

static void __net_exit packet_net_exit(struct net *net)
{}

static struct pernet_operations packet_net_ops =;


static void __exit packet_exit(void)
{}

static int __init packet_init(void)
{}

module_init();
module_exit(packet_exit);
MODULE_DESCRIPTION();
MODULE_LICENSE();
MODULE_ALIAS_NETPROTO();