linux/net/netlink/af_netlink.c

// SPDX-License-Identifier: GPL-2.0-or-later
/*
 * NETLINK      Kernel-user communication protocol.
 *
 * 		Authors:	Alan Cox <[email protected]>
 * 				Alexey Kuznetsov <[email protected]>
 * 				Patrick McHardy <[email protected]>
 *
 * Tue Jun 26 14:36:48 MEST 2001 Herbert "herp" Rosmanith
 *                               added netlink_proto_exit
 * Tue Jan 22 18:32:44 BRST 2002 Arnaldo C. de Melo <[email protected]>
 * 				 use nlk_sk, as sk->protinfo is on a diet 8)
 * Fri Jul 22 19:51:12 MEST 2005 Harald Welte <[email protected]>
 * 				 - inc module use count of module that owns
 * 				   the kernel socket in case userspace opens
 * 				   socket of same protocol
 * 				 - remove all module support, since netlink is
 * 				   mandatory if CONFIG_NET=y these days
 */

#include <linux/module.h>

#include <linux/bpf.h>
#include <linux/capability.h>
#include <linux/kernel.h>
#include <linux/filter.h>
#include <linux/init.h>
#include <linux/signal.h>
#include <linux/sched.h>
#include <linux/errno.h>
#include <linux/string.h>
#include <linux/stat.h>
#include <linux/socket.h>
#include <linux/un.h>
#include <linux/fcntl.h>
#include <linux/termios.h>
#include <linux/sockios.h>
#include <linux/net.h>
#include <linux/fs.h>
#include <linux/slab.h>
#include <linux/uaccess.h>
#include <linux/skbuff.h>
#include <linux/netdevice.h>
#include <linux/rtnetlink.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/notifier.h>
#include <linux/security.h>
#include <linux/jhash.h>
#include <linux/jiffies.h>
#include <linux/random.h>
#include <linux/bitops.h>
#include <linux/mm.h>
#include <linux/types.h>
#include <linux/audit.h>
#include <linux/mutex.h>
#include <linux/vmalloc.h>
#include <linux/if_arp.h>
#include <linux/rhashtable.h>
#include <asm/cacheflush.h>
#include <linux/hash.h>
#include <linux/net_namespace.h>
#include <linux/nospec.h>
#include <linux/btf_ids.h>

#include <net/net_namespace.h>
#include <net/netns/generic.h>
#include <net/sock.h>
#include <net/scm.h>
#include <net/netlink.h>
#define CREATE_TRACE_POINTS
#include <trace/events/netlink.h>

#include "af_netlink.h"
#include "genetlink.h"

struct listeners {};

/* state bits */
#define NETLINK_S_CONGESTED

static inline int netlink_is_kernel(struct sock *sk)
{}

struct netlink_table *nl_table __read_mostly;
EXPORT_SYMBOL_GPL();

static DECLARE_WAIT_QUEUE_HEAD(nl_table_wait);

static struct lock_class_key nlk_cb_mutex_keys[MAX_LINKS];

static const char *const nlk_cb_mutex_key_strings[MAX_LINKS + 1] =;

static int netlink_dump(struct sock *sk, bool lock_taken);

/* nl_table locking explained:
 * Lookup and traversal are protected with an RCU read-side lock. Insertion
 * and removal are protected with per bucket lock while using RCU list
 * modification primitives and may run in parallel to RCU protected lookups.
 * Destruction of the Netlink socket may only occur *after* nl_table_lock has
 * been acquired * either during or after the socket has been removed from
 * the list and after an RCU grace period.
 */
DEFINE_RWLOCK();
EXPORT_SYMBOL_GPL();
static atomic_t nl_table_users =;

#define nl_deref_protected(X)

static BLOCKING_NOTIFIER_HEAD(netlink_chain);


static const struct rhashtable_params netlink_rhashtable_params;

void do_trace_netlink_extack(const char *msg)
{}
EXPORT_SYMBOL();

static inline u32 netlink_group_mask(u32 group)
{}

static struct sk_buff *netlink_to_full_skb(const struct sk_buff *skb,
					   gfp_t gfp_mask)
{}

static unsigned int netlink_tap_net_id;

struct netlink_tap_net {};

int netlink_add_tap(struct netlink_tap *nt)
{}
EXPORT_SYMBOL_GPL();

static int __netlink_remove_tap(struct netlink_tap *nt)
{}

int netlink_remove_tap(struct netlink_tap *nt)
{}
EXPORT_SYMBOL_GPL();

static __net_init int netlink_tap_init_net(struct net *net)
{}

static struct pernet_operations netlink_tap_net_ops =;

static bool netlink_filter_tap(const struct sk_buff *skb)
{}

static int __netlink_deliver_tap_skb(struct sk_buff *skb,
				     struct net_device *dev)
{}

static void __netlink_deliver_tap(struct sk_buff *skb, struct netlink_tap_net *nn)
{}

static void netlink_deliver_tap(struct net *net, struct sk_buff *skb)
{}

static void netlink_deliver_tap_kernel(struct sock *dst, struct sock *src,
				       struct sk_buff *skb)
{}

static void netlink_overrun(struct sock *sk)
{}

static void netlink_rcv_wake(struct sock *sk)
{}

static void netlink_skb_destructor(struct sk_buff *skb)
{}

static void netlink_skb_set_owner_r(struct sk_buff *skb, struct sock *sk)
{}

static void netlink_sock_destruct(struct sock *sk)
{}

static void netlink_sock_destruct_work(struct work_struct *work)
{}

/* This lock without WQ_FLAG_EXCLUSIVE is good on UP and it is _very_ bad on
 * SMP. Look, when several writers sleep and reader wakes them up, all but one
 * immediately hit write lock and grab all the cpus. Exclusive sleep solves
 * this, _but_ remember, it adds useless work on UP machines.
 */

void netlink_table_grab(void)
	__acquires(nl_table_lock)
{}

void netlink_table_ungrab(void)
	__releases(nl_table_lock)
{}

static inline void
netlink_lock_table(void)
{}

static inline void
netlink_unlock_table(void)
{}

struct netlink_compare_arg
{};

/* Doing sizeof directly may yield 4 extra bytes on 64-bit. */
#define netlink_compare_arg_len

static inline int netlink_compare(struct rhashtable_compare_arg *arg,
				  const void *ptr)
{}

static void netlink_compare_arg_init(struct netlink_compare_arg *arg,
				     struct net *net, u32 portid)
{}

static struct sock *__netlink_lookup(struct netlink_table *table, u32 portid,
				     struct net *net)
{}

static int __netlink_insert(struct netlink_table *table, struct sock *sk)
{}

static struct sock *netlink_lookup(struct net *net, int protocol, u32 portid)
{}

static const struct proto_ops netlink_ops;

static void
netlink_update_listeners(struct sock *sk)
{}

static int netlink_insert(struct sock *sk, u32 portid)
{}

static void netlink_remove(struct sock *sk)
{}

static struct proto netlink_proto =;

static int __netlink_create(struct net *net, struct socket *sock,
			    int protocol, int kern)
{}

static int netlink_create(struct net *net, struct socket *sock, int protocol,
			  int kern)
{}

static void deferred_put_nlk_sk(struct rcu_head *head)
{}

static int netlink_release(struct socket *sock)
{}

static int netlink_autobind(struct socket *sock)
{}

/**
 * __netlink_ns_capable - General netlink message capability test
 * @nsp: NETLINK_CB of the socket buffer holding a netlink command from userspace.
 * @user_ns: The user namespace of the capability to use
 * @cap: The capability to use
 *
 * Test to see if the opener of the socket we received the message
 * from had when the netlink socket was created and the sender of the
 * message has the capability @cap in the user namespace @user_ns.
 */
bool __netlink_ns_capable(const struct netlink_skb_parms *nsp,
			struct user_namespace *user_ns, int cap)
{}
EXPORT_SYMBOL();

/**
 * netlink_ns_capable - General netlink message capability test
 * @skb: socket buffer holding a netlink command from userspace
 * @user_ns: The user namespace of the capability to use
 * @cap: The capability to use
 *
 * Test to see if the opener of the socket we received the message
 * from had when the netlink socket was created and the sender of the
 * message has the capability @cap in the user namespace @user_ns.
 */
bool netlink_ns_capable(const struct sk_buff *skb,
			struct user_namespace *user_ns, int cap)
{}
EXPORT_SYMBOL();

/**
 * netlink_capable - Netlink global message capability test
 * @skb: socket buffer holding a netlink command from userspace
 * @cap: The capability to use
 *
 * Test to see if the opener of the socket we received the message
 * from had when the netlink socket was created and the sender of the
 * message has the capability @cap in all user namespaces.
 */
bool netlink_capable(const struct sk_buff *skb, int cap)
{}
EXPORT_SYMBOL();

/**
 * netlink_net_capable - Netlink network namespace message capability test
 * @skb: socket buffer holding a netlink command from userspace
 * @cap: The capability to use
 *
 * Test to see if the opener of the socket we received the message
 * from had when the netlink socket was created and the sender of the
 * message has the capability @cap over the network namespace of
 * the socket we received the message from.
 */
bool netlink_net_capable(const struct sk_buff *skb, int cap)
{}
EXPORT_SYMBOL();

static inline int netlink_allowed(const struct socket *sock, unsigned int flag)
{}

static void
netlink_update_subscriptions(struct sock *sk, unsigned int subscriptions)
{}

static int netlink_realloc_groups(struct sock *sk)
{}

static void netlink_undo_bind(int group, long unsigned int groups,
			      struct sock *sk)
{}

static int netlink_bind(struct socket *sock, struct sockaddr *addr,
			int addr_len)
{}

static int netlink_connect(struct socket *sock, struct sockaddr *addr,
			   int alen, int flags)
{}

static int netlink_getname(struct socket *sock, struct sockaddr *addr,
			   int peer)
{}

static int netlink_ioctl(struct socket *sock, unsigned int cmd,
			 unsigned long arg)
{}

static struct sock *netlink_getsockbyportid(struct sock *ssk, u32 portid)
{}

struct sock *netlink_getsockbyfilp(struct file *filp)
{}

struct sk_buff *netlink_alloc_large_skb(unsigned int size, int broadcast)
{}

/*
 * Attach a skb to a netlink socket.
 * The caller must hold a reference to the destination socket. On error, the
 * reference is dropped. The skb is not send to the destination, just all
 * all error checks are performed and memory in the queue is reserved.
 * Return values:
 * < 0: error. skb freed, reference to sock dropped.
 * 0: continue
 * 1: repeat lookup - reference dropped while waiting for socket memory.
 */
int netlink_attachskb(struct sock *sk, struct sk_buff *skb,
		      long *timeo, struct sock *ssk)
{}

static int __netlink_sendskb(struct sock *sk, struct sk_buff *skb)
{}

int netlink_sendskb(struct sock *sk, struct sk_buff *skb)
{}

void netlink_detachskb(struct sock *sk, struct sk_buff *skb)
{}

static struct sk_buff *netlink_trim(struct sk_buff *skb, gfp_t allocation)
{}

static int netlink_unicast_kernel(struct sock *sk, struct sk_buff *skb,
				  struct sock *ssk)
{}

int netlink_unicast(struct sock *ssk, struct sk_buff *skb,
		    u32 portid, int nonblock)
{}
EXPORT_SYMBOL();

int netlink_has_listeners(struct sock *sk, unsigned int group)
{}
EXPORT_SYMBOL_GPL();

bool netlink_strict_get_check(struct sk_buff *skb)
{}
EXPORT_SYMBOL_GPL();

static int netlink_broadcast_deliver(struct sock *sk, struct sk_buff *skb)
{}

struct netlink_broadcast_data {};

static void do_one_broadcast(struct sock *sk,
				    struct netlink_broadcast_data *p)
{}

int netlink_broadcast_filtered(struct sock *ssk, struct sk_buff *skb,
			       u32 portid,
			       u32 group, gfp_t allocation,
			       netlink_filter_fn filter,
			       void *filter_data)
{}
EXPORT_SYMBOL();

int netlink_broadcast(struct sock *ssk, struct sk_buff *skb, u32 portid,
		      u32 group, gfp_t allocation)
{}
EXPORT_SYMBOL();

struct netlink_set_err_data {};

static int do_one_set_err(struct sock *sk, struct netlink_set_err_data *p)
{}

/**
 * netlink_set_err - report error to broadcast listeners
 * @ssk: the kernel netlink socket, as returned by netlink_kernel_create()
 * @portid: the PORTID of a process that we want to skip (if any)
 * @group: the broadcast group that will notice the error
 * @code: error code, must be negative (as usual in kernelspace)
 *
 * This function returns the number of broadcast listeners that have set the
 * NETLINK_NO_ENOBUFS socket option.
 */
int netlink_set_err(struct sock *ssk, u32 portid, u32 group, int code)
{}
EXPORT_SYMBOL();

/* must be called with netlink table grabbed */
static void netlink_update_socket_mc(struct netlink_sock *nlk,
				     unsigned int group,
				     int is_new)
{}

static int netlink_setsockopt(struct socket *sock, int level, int optname,
			      sockptr_t optval, unsigned int optlen)
{}

static int netlink_getsockopt(struct socket *sock, int level, int optname,
			      char __user *optval, int __user *optlen)
{}

static void netlink_cmsg_recv_pktinfo(struct msghdr *msg, struct sk_buff *skb)
{}

static void netlink_cmsg_listen_all_nsid(struct sock *sk, struct msghdr *msg,
					 struct sk_buff *skb)
{}

static int netlink_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
{}

static int netlink_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
			   int flags)
{}

static void netlink_data_ready(struct sock *sk)
{}

/*
 *	We export these functions to other modules. They provide a
 *	complete set of kernel non-blocking support for message
 *	queueing.
 */

struct sock *
__netlink_kernel_create(struct net *net, int unit, struct module *module,
			struct netlink_kernel_cfg *cfg)
{}
EXPORT_SYMBOL();

void
netlink_kernel_release(struct sock *sk)
{}
EXPORT_SYMBOL();

int __netlink_change_ngroups(struct sock *sk, unsigned int groups)
{}

/**
 * netlink_change_ngroups - change number of multicast groups
 *
 * This changes the number of multicast groups that are available
 * on a certain netlink family. Note that it is not possible to
 * change the number of groups to below 32. Also note that it does
 * not implicitly call netlink_clear_multicast_users() when the
 * number of groups is reduced.
 *
 * @sk: The kernel netlink socket, as returned by netlink_kernel_create().
 * @groups: The new number of groups.
 */
int netlink_change_ngroups(struct sock *sk, unsigned int groups)
{}

void __netlink_clear_multicast_users(struct sock *ksk, unsigned int group)
{}

struct nlmsghdr *
__nlmsg_put(struct sk_buff *skb, u32 portid, u32 seq, int type, int len, int flags)
{}
EXPORT_SYMBOL();

static size_t
netlink_ack_tlv_len(struct netlink_sock *nlk, int err,
		    const struct netlink_ext_ack *extack)
{}

static void
netlink_ack_tlv_fill(struct sk_buff *in_skb, struct sk_buff *skb,
		     const struct nlmsghdr *nlh, int err,
		     const struct netlink_ext_ack *extack)
{}

/*
 * It looks a bit ugly.
 * It would be better to create kernel thread.
 */

static int netlink_dump_done(struct netlink_sock *nlk, struct sk_buff *skb,
			     struct netlink_callback *cb,
			     struct netlink_ext_ack *extack)
{}

static int netlink_dump(struct sock *sk, bool lock_taken)
{}

int __netlink_dump_start(struct sock *ssk, struct sk_buff *skb,
			 const struct nlmsghdr *nlh,
			 struct netlink_dump_control *control)
{}
EXPORT_SYMBOL();

void netlink_ack(struct sk_buff *in_skb, struct nlmsghdr *nlh, int err,
		 const struct netlink_ext_ack *extack)
{}
EXPORT_SYMBOL();

int netlink_rcv_skb(struct sk_buff *skb, int (*cb)(struct sk_buff *,
						   struct nlmsghdr *,
						   struct netlink_ext_ack *))
{}
EXPORT_SYMBOL();

/**
 * nlmsg_notify - send a notification netlink message
 * @sk: netlink socket to use
 * @skb: notification message
 * @portid: destination netlink portid for reports or 0
 * @group: destination multicast group or 0
 * @report: 1 to report back, 0 to disable
 * @flags: allocation flags
 */
int nlmsg_notify(struct sock *sk, struct sk_buff *skb, u32 portid,
		 unsigned int group, int report, gfp_t flags)
{}
EXPORT_SYMBOL();

#ifdef CONFIG_PROC_FS
struct nl_seq_iter {};

static void netlink_walk_start(struct nl_seq_iter *iter)
{}

static void netlink_walk_stop(struct nl_seq_iter *iter)
{}

static void *__netlink_seq_next(struct seq_file *seq)
{}

static void *netlink_seq_start(struct seq_file *seq, loff_t *posp)
	__acquires(RCU)
{}

static void *netlink_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{}

static void netlink_native_seq_stop(struct seq_file *seq, void *v)
{}


static int netlink_native_seq_show(struct seq_file *seq, void *v)
{}

#ifdef CONFIG_BPF_SYSCALL
struct bpf_iter__netlink {};

DEFINE_BPF_ITER_FUNC(netlink, struct bpf_iter_meta *meta, struct netlink_sock *sk)

static int netlink_prog_seq_show(struct bpf_prog *prog,
				  struct bpf_iter_meta *meta,
				  void *v)
{}

static int netlink_seq_show(struct seq_file *seq, void *v)
{}

static void netlink_seq_stop(struct seq_file *seq, void *v)
{}
#else
static int netlink_seq_show(struct seq_file *seq, void *v)
{
	return netlink_native_seq_show(seq, v);
}

static void netlink_seq_stop(struct seq_file *seq, void *v)
{
	netlink_native_seq_stop(seq, v);
}
#endif

static const struct seq_operations netlink_seq_ops =;
#endif

int netlink_register_notifier(struct notifier_block *nb)
{}
EXPORT_SYMBOL();

int netlink_unregister_notifier(struct notifier_block *nb)
{}
EXPORT_SYMBOL();

static const struct proto_ops netlink_ops =;

static const struct net_proto_family netlink_family_ops =;

static int __net_init netlink_net_init(struct net *net)
{}

static void __net_exit netlink_net_exit(struct net *net)
{}

static void __init netlink_add_usersock_entry(void)
{}

static struct pernet_operations __net_initdata netlink_net_ops =;

static inline u32 netlink_hash(const void *data, u32 len, u32 seed)
{}

static const struct rhashtable_params netlink_rhashtable_params =;

#if defined(CONFIG_BPF_SYSCALL) && defined(CONFIG_PROC_FS)
BTF_ID_LIST(btf_netlink_sock_id)
BTF_ID()

static const struct bpf_iter_seq_info netlink_seq_info =;

static struct bpf_iter_reg netlink_reg_info =;

static int __init bpf_iter_register(void)
{}
#endif

static int __init netlink_proto_init(void)
{}

core_initcall(netlink_proto_init);