linux/net/ipv4/ipmr.c

// SPDX-License-Identifier: GPL-2.0-or-later
/*
 *	IP multicast routing support for mrouted 3.6/3.8
 *
 *		(c) 1995 Alan Cox, <[email protected]>
 *	  Linux Consultancy and Custom Driver Development
 *
 *	Fixes:
 *	Michael Chastain	:	Incorrect size of copying.
 *	Alan Cox		:	Added the cache manager code
 *	Alan Cox		:	Fixed the clone/copy bug and device race.
 *	Mike McLagan		:	Routing by source
 *	Malcolm Beattie		:	Buffer handling fixes.
 *	Alexey Kuznetsov	:	Double buffer free and other fixes.
 *	SVR Anand		:	Fixed several multicast bugs and problems.
 *	Alexey Kuznetsov	:	Status, optimisations and more.
 *	Brad Parker		:	Better behaviour on mrouted upcall
 *					overflow.
 *      Carlos Picoto           :       PIMv1 Support
 *	Pavlin Ivanov Radoslavov:	PIMv2 Registers must checksum only PIM header
 *					Relax this requirement to work with older peers.
 */

#include <linux/uaccess.h>
#include <linux/types.h>
#include <linux/cache.h>
#include <linux/capability.h>
#include <linux/errno.h>
#include <linux/mm.h>
#include <linux/kernel.h>
#include <linux/fcntl.h>
#include <linux/stat.h>
#include <linux/socket.h>
#include <linux/in.h>
#include <linux/inet.h>
#include <linux/netdevice.h>
#include <linux/inetdevice.h>
#include <linux/igmp.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/mroute.h>
#include <linux/init.h>
#include <linux/if_ether.h>
#include <linux/slab.h>
#include <net/net_namespace.h>
#include <net/ip.h>
#include <net/protocol.h>
#include <linux/skbuff.h>
#include <net/route.h>
#include <net/icmp.h>
#include <net/udp.h>
#include <net/raw.h>
#include <linux/notifier.h>
#include <linux/if_arp.h>
#include <linux/netfilter_ipv4.h>
#include <linux/compat.h>
#include <linux/export.h>
#include <linux/rhashtable.h>
#include <net/ip_tunnels.h>
#include <net/checksum.h>
#include <net/netlink.h>
#include <net/fib_rules.h>
#include <linux/netconf.h>
#include <net/rtnh.h>

#include <linux/nospec.h>

struct ipmr_rule {};

struct ipmr_result {};

/* Big lock, protecting vif table, mrt cache and mroute socket state.
 * Note that the changes are semaphored via rtnl_lock.
 */

static DEFINE_SPINLOCK(mrt_lock);

static struct net_device *vif_dev_read(const struct vif_device *vif)
{}

/* Multicast router control variables */

/* Special spinlock for queue of unresolved entries */
static DEFINE_SPINLOCK(mfc_unres_lock);

/* We return to original Alan's scheme. Hash table of resolved
 * entries is changed only in process context and protected
 * with weak lock mrt_lock. Queue of unresolved entries is protected
 * with strong spinlock mfc_unres_lock.
 *
 * In this case data path is free of exclusive locks at all.
 */

static struct kmem_cache *mrt_cachep __ro_after_init;

static struct mr_table *ipmr_new_table(struct net *net, u32 id);
static void ipmr_free_table(struct mr_table *mrt);

static void ip_mr_forward(struct net *net, struct mr_table *mrt,
			  struct net_device *dev, struct sk_buff *skb,
			  struct mfc_cache *cache, int local);
static int ipmr_cache_report(const struct mr_table *mrt,
			     struct sk_buff *pkt, vifi_t vifi, int assert);
static void mroute_netlink_event(struct mr_table *mrt, struct mfc_cache *mfc,
				 int cmd);
static void igmpmsg_netlink_event(const struct mr_table *mrt, struct sk_buff *pkt);
static void mroute_clean_tables(struct mr_table *mrt, int flags);
static void ipmr_expire_process(struct timer_list *t);

#ifdef CONFIG_IP_MROUTE_MULTIPLE_TABLES
#define ipmr_for_each_table(mrt, net)

static struct mr_table *ipmr_mr_table_iter(struct net *net,
					   struct mr_table *mrt)
{}

static struct mr_table *ipmr_get_table(struct net *net, u32 id)
{}

static int ipmr_fib_lookup(struct net *net, struct flowi4 *flp4,
			   struct mr_table **mrt)
{}

static int ipmr_rule_action(struct fib_rule *rule, struct flowi *flp,
			    int flags, struct fib_lookup_arg *arg)
{}

static int ipmr_rule_match(struct fib_rule *rule, struct flowi *fl, int flags)
{}

static int ipmr_rule_configure(struct fib_rule *rule, struct sk_buff *skb,
			       struct fib_rule_hdr *frh, struct nlattr **tb,
			       struct netlink_ext_ack *extack)
{}

static int ipmr_rule_compare(struct fib_rule *rule, struct fib_rule_hdr *frh,
			     struct nlattr **tb)
{}

static int ipmr_rule_fill(struct fib_rule *rule, struct sk_buff *skb,
			  struct fib_rule_hdr *frh)
{}

static const struct fib_rules_ops __net_initconst ipmr_rules_ops_template =;

static int __net_init ipmr_rules_init(struct net *net)
{}

static void __net_exit ipmr_rules_exit(struct net *net)
{}

static int ipmr_rules_dump(struct net *net, struct notifier_block *nb,
			   struct netlink_ext_ack *extack)
{}

static unsigned int ipmr_rules_seq_read(struct net *net)
{}

bool ipmr_rule_default(const struct fib_rule *rule)
{}
EXPORT_SYMBOL();
#else
#define ipmr_for_each_table

static struct mr_table *ipmr_mr_table_iter(struct net *net,
					   struct mr_table *mrt)
{
	if (!mrt)
		return net->ipv4.mrt;
	return NULL;
}

static struct mr_table *ipmr_get_table(struct net *net, u32 id)
{
	return net->ipv4.mrt;
}

static int ipmr_fib_lookup(struct net *net, struct flowi4 *flp4,
			   struct mr_table **mrt)
{
	*mrt = net->ipv4.mrt;
	return 0;
}

static int __net_init ipmr_rules_init(struct net *net)
{
	struct mr_table *mrt;

	mrt = ipmr_new_table(net, RT_TABLE_DEFAULT);
	if (IS_ERR(mrt))
		return PTR_ERR(mrt);
	net->ipv4.mrt = mrt;
	return 0;
}

static void __net_exit ipmr_rules_exit(struct net *net)
{
	ASSERT_RTNL();
	ipmr_free_table(net->ipv4.mrt);
	net->ipv4.mrt = NULL;
}

static int ipmr_rules_dump(struct net *net, struct notifier_block *nb,
			   struct netlink_ext_ack *extack)
{
	return 0;
}

static unsigned int ipmr_rules_seq_read(struct net *net)
{
	return 0;
}

bool ipmr_rule_default(const struct fib_rule *rule)
{
	return true;
}
EXPORT_SYMBOL(ipmr_rule_default);
#endif

static inline int ipmr_hash_cmp(struct rhashtable_compare_arg *arg,
				const void *ptr)
{}

static const struct rhashtable_params ipmr_rht_params =;

static void ipmr_new_table_set(struct mr_table *mrt,
			       struct net *net)
{}

static struct mfc_cache_cmp_arg ipmr_mr_table_ops_cmparg_any =;

static struct mr_table_ops ipmr_mr_table_ops =;

static struct mr_table *ipmr_new_table(struct net *net, u32 id)
{}

static void ipmr_free_table(struct mr_table *mrt)
{}

/* Service routines creating virtual interfaces: DVMRP tunnels and PIMREG */

/* Initialize ipmr pimreg/tunnel in_device */
static bool ipmr_init_vif_indev(const struct net_device *dev)
{}

static struct net_device *ipmr_new_tunnel(struct net *net, struct vifctl *v)
{}

#if defined(CONFIG_IP_PIMSM_V1) || defined(CONFIG_IP_PIMSM_V2)
static netdev_tx_t reg_vif_xmit(struct sk_buff *skb, struct net_device *dev)
{}

static int reg_vif_get_iflink(const struct net_device *dev)
{}

static const struct net_device_ops reg_vif_netdev_ops =;

static void reg_vif_setup(struct net_device *dev)
{}

static struct net_device *ipmr_reg_vif(struct net *net, struct mr_table *mrt)
{}

/* called with rcu_read_lock() */
static int __pim_rcv(struct mr_table *mrt, struct sk_buff *skb,
		     unsigned int pimlen)
{}
#else
static struct net_device *ipmr_reg_vif(struct net *net, struct mr_table *mrt)
{
	return NULL;
}
#endif

static int call_ipmr_vif_entry_notifiers(struct net *net,
					 enum fib_event_type event_type,
					 struct vif_device *vif,
					 struct net_device *vif_dev,
					 vifi_t vif_index, u32 tb_id)
{}

static int call_ipmr_mfc_entry_notifiers(struct net *net,
					 enum fib_event_type event_type,
					 struct mfc_cache *mfc, u32 tb_id)
{}

/**
 *	vif_delete - Delete a VIF entry
 *	@mrt: Table to delete from
 *	@vifi: VIF identifier to delete
 *	@notify: Set to 1, if the caller is a notifier_call
 *	@head: if unregistering the VIF, place it on this queue
 */
static int vif_delete(struct mr_table *mrt, int vifi, int notify,
		      struct list_head *head)
{}

static void ipmr_cache_free_rcu(struct rcu_head *head)
{}

static void ipmr_cache_free(struct mfc_cache *c)
{}

/* Destroy an unresolved cache entry, killing queued skbs
 * and reporting error to netlink readers.
 */
static void ipmr_destroy_unres(struct mr_table *mrt, struct mfc_cache *c)
{}

/* Timer process for the unresolved queue. */
static void ipmr_expire_process(struct timer_list *t)
{}

/* Fill oifs list. It is called under locked mrt_lock. */
static void ipmr_update_thresholds(struct mr_table *mrt, struct mr_mfc *cache,
				   unsigned char *ttls)
{}

static int vif_add(struct net *net, struct mr_table *mrt,
		   struct vifctl *vifc, int mrtsock)
{}

/* called with rcu_read_lock() */
static struct mfc_cache *ipmr_cache_find(struct mr_table *mrt,
					 __be32 origin,
					 __be32 mcastgrp)
{}

/* Look for a (*,G) entry */
static struct mfc_cache *ipmr_cache_find_any(struct mr_table *mrt,
					     __be32 mcastgrp, int vifi)
{}

/* Look for a (S,G,iif) entry if parent != -1 */
static struct mfc_cache *ipmr_cache_find_parent(struct mr_table *mrt,
						__be32 origin, __be32 mcastgrp,
						int parent)
{}

/* Allocate a multicast cache entry */
static struct mfc_cache *ipmr_cache_alloc(void)
{}

static struct mfc_cache *ipmr_cache_alloc_unres(void)
{}

/* A cache entry has gone into a resolved state from queued */
static void ipmr_cache_resolve(struct net *net, struct mr_table *mrt,
			       struct mfc_cache *uc, struct mfc_cache *c)
{}

/* Bounce a cache query up to mrouted and netlink.
 *
 * Called under rcu_read_lock().
 */
static int ipmr_cache_report(const struct mr_table *mrt,
			     struct sk_buff *pkt, vifi_t vifi, int assert)
{}

/* Queue a packet for resolution. It gets locked cache entry! */
/* Called under rcu_read_lock() */
static int ipmr_cache_unresolved(struct mr_table *mrt, vifi_t vifi,
				 struct sk_buff *skb, struct net_device *dev)
{}

/* MFC cache manipulation by user space mroute daemon */

static int ipmr_mfc_delete(struct mr_table *mrt, struct mfcctl *mfc, int parent)
{}

static int ipmr_mfc_add(struct net *net, struct mr_table *mrt,
			struct mfcctl *mfc, int mrtsock, int parent)
{}

/* Close the multicast socket, and clear the vif tables etc */
static void mroute_clean_tables(struct mr_table *mrt, int flags)
{}

/* called from ip_ra_control(), before an RCU grace period,
 * we don't need to call synchronize_rcu() here
 */
static void mrtsock_destruct(struct sock *sk)
{}

/* Socket options and virtual interface manipulation. The whole
 * virtual interface system is a complete heap, but unfortunately
 * that's how BSD mrouted happens to think. Maybe one day with a proper
 * MOSPF/PIM router set up we can clean this up.
 */

int ip_mroute_setsockopt(struct sock *sk, int optname, sockptr_t optval,
			 unsigned int optlen)
{}

/* Execute if this ioctl is a special mroute ioctl */
int ipmr_sk_ioctl(struct sock *sk, unsigned int cmd, void __user *arg)
{}

/* Getsock opt support for the multicast routing system. */
int ip_mroute_getsockopt(struct sock *sk, int optname, sockptr_t optval,
			 sockptr_t optlen)
{}

/* The IP multicast ioctl support routines. */
int ipmr_ioctl(struct sock *sk, int cmd, void *arg)
{}

#ifdef CONFIG_COMPAT
struct compat_sioc_sg_req {};

struct compat_sioc_vif_req {};

int ipmr_compat_ioctl(struct sock *sk, unsigned int cmd, void __user *arg)
{}
#endif

static int ipmr_device_event(struct notifier_block *this, unsigned long event, void *ptr)
{}

static struct notifier_block ip_mr_notifier =;

/* Encapsulate a packet by attaching a valid IPIP header to it.
 * This avoids tunnel drivers and other mess and gives us the speed so
 * important for multicast video.
 */
static void ip_encap(struct net *net, struct sk_buff *skb,
		     __be32 saddr, __be32 daddr)
{}

static inline int ipmr_forward_finish(struct net *net, struct sock *sk,
				      struct sk_buff *skb)
{}

#ifdef CONFIG_NET_SWITCHDEV
static bool ipmr_forward_offloaded(struct sk_buff *skb, struct mr_table *mrt,
				   int in_vifi, int out_vifi)
{}
#else
static bool ipmr_forward_offloaded(struct sk_buff *skb, struct mr_table *mrt,
				   int in_vifi, int out_vifi)
{
	return false;
}
#endif

/* Processing handlers for ipmr_forward, under rcu_read_lock() */

static void ipmr_queue_xmit(struct net *net, struct mr_table *mrt,
			    int in_vifi, struct sk_buff *skb, int vifi)
{}

/* Called with mrt_lock or rcu_read_lock() */
static int ipmr_find_vif(const struct mr_table *mrt, struct net_device *dev)
{}

/* "local" means that we should preserve one skb (for local delivery) */
/* Called uner rcu_read_lock() */
static void ip_mr_forward(struct net *net, struct mr_table *mrt,
			  struct net_device *dev, struct sk_buff *skb,
			  struct mfc_cache *c, int local)
{}

static struct mr_table *ipmr_rt_fib_lookup(struct net *net, struct sk_buff *skb)
{}

/* Multicast packets for forwarding arrive here
 * Called with rcu_read_lock();
 */
int ip_mr_input(struct sk_buff *skb)
{}

#ifdef CONFIG_IP_PIMSM_V1
/* Handle IGMP messages of PIMv1 */
int pim_rcv_v1(struct sk_buff *skb)
{}
#endif

#ifdef CONFIG_IP_PIMSM_V2
static int pim_rcv(struct sk_buff *skb)
{}
#endif

int ipmr_get_route(struct net *net, struct sk_buff *skb,
		   __be32 saddr, __be32 daddr,
		   struct rtmsg *rtm, u32 portid)
{}

static int ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb,
			    u32 portid, u32 seq, struct mfc_cache *c, int cmd,
			    int flags)
{}

static int _ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb,
			     u32 portid, u32 seq, struct mr_mfc *c, int cmd,
			     int flags)
{}

static size_t mroute_msgsize(bool unresolved, int maxvif)
{}

static void mroute_netlink_event(struct mr_table *mrt, struct mfc_cache *mfc,
				 int cmd)
{}

static size_t igmpmsg_netlink_msgsize(size_t payloadlen)
{}

static void igmpmsg_netlink_event(const struct mr_table *mrt, struct sk_buff *pkt)
{}

static int ipmr_rtm_valid_getroute_req(struct sk_buff *skb,
				       const struct nlmsghdr *nlh,
				       struct nlattr **tb,
				       struct netlink_ext_ack *extack)
{}

static int ipmr_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh,
			     struct netlink_ext_ack *extack)
{}

static int ipmr_rtm_dumproute(struct sk_buff *skb, struct netlink_callback *cb)
{}

static const struct nla_policy rtm_ipmr_policy[RTA_MAX + 1] =;

static bool ipmr_rtm_validate_proto(unsigned char rtm_protocol)
{}

static int ipmr_nla_get_ttls(const struct nlattr *nla, struct mfcctl *mfcc)
{}

/* returns < 0 on error, 0 for ADD_MFC and 1 for ADD_MFC_PROXY */
static int rtm_to_ipmr_mfcc(struct net *net, struct nlmsghdr *nlh,
			    struct mfcctl *mfcc, int *mrtsock,
			    struct mr_table **mrtret,
			    struct netlink_ext_ack *extack)
{}

/* takes care of both newroute and delroute */
static int ipmr_rtm_route(struct sk_buff *skb, struct nlmsghdr *nlh,
			  struct netlink_ext_ack *extack)
{}

static bool ipmr_fill_table(struct mr_table *mrt, struct sk_buff *skb)
{}

static bool ipmr_fill_vif(struct mr_table *mrt, u32 vifid, struct sk_buff *skb)
{}

static int ipmr_valid_dumplink(const struct nlmsghdr *nlh,
			       struct netlink_ext_ack *extack)
{}

static int ipmr_rtm_dumplink(struct sk_buff *skb, struct netlink_callback *cb)
{}

#ifdef CONFIG_PROC_FS
/* The /proc interfaces to multicast routing :
 * /proc/net/ip_mr_cache & /proc/net/ip_mr_vif
 */

static void *ipmr_vif_seq_start(struct seq_file *seq, loff_t *pos)
	__acquires(RCU)
{}

static void ipmr_vif_seq_stop(struct seq_file *seq, void *v)
	__releases(RCU)
{}

static int ipmr_vif_seq_show(struct seq_file *seq, void *v)
{}

static const struct seq_operations ipmr_vif_seq_ops =;

static void *ipmr_mfc_seq_start(struct seq_file *seq, loff_t *pos)
{}

static int ipmr_mfc_seq_show(struct seq_file *seq, void *v)
{}

static const struct seq_operations ipmr_mfc_seq_ops =;
#endif

#ifdef CONFIG_IP_PIMSM_V2
static const struct net_protocol pim_protocol =;
#endif

static unsigned int ipmr_seq_read(struct net *net)
{}

static int ipmr_dump(struct net *net, struct notifier_block *nb,
		     struct netlink_ext_ack *extack)
{}

static const struct fib_notifier_ops ipmr_notifier_ops_template =;

static int __net_init ipmr_notifier_init(struct net *net)
{}

static void __net_exit ipmr_notifier_exit(struct net *net)
{}

/* Setup for IP multicast routing */
static int __net_init ipmr_net_init(struct net *net)
{}

static void __net_exit ipmr_net_exit(struct net *net)
{}

static void __net_exit ipmr_net_exit_batch(struct list_head *net_list)
{}

static struct pernet_operations ipmr_net_ops =;

int __init ip_mr_init(void)
{}