linux/net/openvswitch/datapath.c

// SPDX-License-Identifier: GPL-2.0-only
/*
 * Copyright (c) 2007-2014 Nicira, Inc.
 */

#define pr_fmt(fmt)

#include <linux/init.h>
#include <linux/module.h>
#include <linux/if_arp.h>
#include <linux/if_vlan.h>
#include <linux/in.h>
#include <linux/ip.h>
#include <linux/jhash.h>
#include <linux/delay.h>
#include <linux/time.h>
#include <linux/etherdevice.h>
#include <linux/kernel.h>
#include <linux/kthread.h>
#include <linux/mutex.h>
#include <linux/percpu.h>
#include <linux/rcupdate.h>
#include <linux/tcp.h>
#include <linux/udp.h>
#include <linux/ethtool.h>
#include <linux/wait.h>
#include <asm/div64.h>
#include <linux/highmem.h>
#include <linux/netfilter_bridge.h>
#include <linux/netfilter_ipv4.h>
#include <linux/inetdevice.h>
#include <linux/list.h>
#include <linux/openvswitch.h>
#include <linux/rculist.h>
#include <linux/dmi.h>
#include <net/genetlink.h>
#include <net/gso.h>
#include <net/net_namespace.h>
#include <net/netns/generic.h>
#include <net/pkt_cls.h>

#include "datapath.h"
#include "drop.h"
#include "flow.h"
#include "flow_table.h"
#include "flow_netlink.h"
#include "meter.h"
#include "openvswitch_trace.h"
#include "vport-internal_dev.h"
#include "vport-netdev.h"

unsigned int ovs_net_id __read_mostly;

static struct genl_family dp_packet_genl_family;
static struct genl_family dp_flow_genl_family;
static struct genl_family dp_datapath_genl_family;

static const struct nla_policy flow_policy[];

static const struct genl_multicast_group ovs_dp_flow_multicast_group =;

static const struct genl_multicast_group ovs_dp_datapath_multicast_group =;

static const struct genl_multicast_group ovs_dp_vport_multicast_group =;

/* Check if need to build a reply message.
 * OVS userspace sets the NLM_F_ECHO flag if it needs the reply. */
static bool ovs_must_notify(struct genl_family *family, struct genl_info *info,
			    unsigned int group)
{}

static void ovs_notify(struct genl_family *family,
		       struct sk_buff *skb, struct genl_info *info)
{}

/**
 * DOC: Locking:
 *
 * All writes e.g. Writes to device state (add/remove datapath, port, set
 * operations on vports, etc.), Writes to other state (flow table
 * modifications, set miscellaneous datapath parameters, etc.) are protected
 * by ovs_lock.
 *
 * Reads are protected by RCU.
 *
 * There are a few special cases (mostly stats) that have their own
 * synchronization but they nest under all of above and don't interact with
 * each other.
 *
 * The RTNL lock nests inside ovs_mutex.
 */

static DEFINE_MUTEX(ovs_mutex);

void ovs_lock(void)
{}

void ovs_unlock(void)
{}

#ifdef CONFIG_LOCKDEP
int lockdep_ovsl_is_held(void)
{}
#endif

static struct vport *new_vport(const struct vport_parms *);
static int queue_gso_packets(struct datapath *dp, struct sk_buff *,
			     const struct sw_flow_key *,
			     const struct dp_upcall_info *,
			     uint32_t cutlen);
static int queue_userspace_packet(struct datapath *dp, struct sk_buff *,
				  const struct sw_flow_key *,
				  const struct dp_upcall_info *,
				  uint32_t cutlen);

static void ovs_dp_masks_rebalance(struct work_struct *work);

static int ovs_dp_set_upcall_portids(struct datapath *, const struct nlattr *);

/* Must be called with rcu_read_lock or ovs_mutex. */
const char *ovs_dp_name(const struct datapath *dp)
{}

static int get_dpifindex(const struct datapath *dp)
{}

static void destroy_dp_rcu(struct rcu_head *rcu)
{}

static struct hlist_head *vport_hash_bucket(const struct datapath *dp,
					    u16 port_no)
{}

/* Called with ovs_mutex or RCU read lock. */
struct vport *ovs_lookup_vport(const struct datapath *dp, u16 port_no)
{}

/* Called with ovs_mutex. */
static struct vport *new_vport(const struct vport_parms *parms)
{}

static void ovs_vport_update_upcall_stats(struct sk_buff *skb,
					  const struct dp_upcall_info *upcall_info,
					  bool upcall_result)
{}

void ovs_dp_detach_port(struct vport *p)
{}

/* Must be called with rcu_read_lock. */
void ovs_dp_process_packet(struct sk_buff *skb, struct sw_flow_key *key)
{}

int ovs_dp_upcall(struct datapath *dp, struct sk_buff *skb,
		  const struct sw_flow_key *key,
		  const struct dp_upcall_info *upcall_info,
		  uint32_t cutlen)
{}

static int queue_gso_packets(struct datapath *dp, struct sk_buff *skb,
			     const struct sw_flow_key *key,
			     const struct dp_upcall_info *upcall_info,
			     uint32_t cutlen)
{}

static size_t upcall_msg_size(const struct dp_upcall_info *upcall_info,
			      unsigned int hdrlen, int actions_attrlen)
{}

static void pad_packet(struct datapath *dp, struct sk_buff *skb)
{}

static int queue_userspace_packet(struct datapath *dp, struct sk_buff *skb,
				  const struct sw_flow_key *key,
				  const struct dp_upcall_info *upcall_info,
				  uint32_t cutlen)
{}

static int ovs_packet_cmd_execute(struct sk_buff *skb, struct genl_info *info)
{}

static const struct nla_policy packet_policy[OVS_PACKET_ATTR_MAX + 1] =;

static const struct genl_small_ops dp_packet_genl_ops[] =;

static struct genl_family dp_packet_genl_family __ro_after_init =;

static void get_dp_stats(const struct datapath *dp, struct ovs_dp_stats *stats,
			 struct ovs_dp_megaflow_stats *mega_stats)
{}

static bool should_fill_key(const struct sw_flow_id *sfid, uint32_t ufid_flags)
{}

static bool should_fill_mask(uint32_t ufid_flags)
{}

static bool should_fill_actions(uint32_t ufid_flags)
{}

static size_t ovs_flow_cmd_msg_size(const struct sw_flow_actions *acts,
				    const struct sw_flow_id *sfid,
				    uint32_t ufid_flags)
{}

/* Called with ovs_mutex or RCU read lock. */
static int ovs_flow_cmd_fill_stats(const struct sw_flow *flow,
				   struct sk_buff *skb)
{}

/* Called with ovs_mutex or RCU read lock. */
static int ovs_flow_cmd_fill_actions(const struct sw_flow *flow,
				     struct sk_buff *skb, int skb_orig_len)
{}

/* Called with ovs_mutex or RCU read lock. */
static int ovs_flow_cmd_fill_info(const struct sw_flow *flow, int dp_ifindex,
				  struct sk_buff *skb, u32 portid,
				  u32 seq, u32 flags, u8 cmd, u32 ufid_flags)
{}

/* May not be called with RCU read lock. */
static struct sk_buff *ovs_flow_cmd_alloc_info(const struct sw_flow_actions *acts,
					       const struct sw_flow_id *sfid,
					       struct genl_info *info,
					       bool always,
					       uint32_t ufid_flags)
{}

/* Called with ovs_mutex. */
static struct sk_buff *ovs_flow_cmd_build_info(const struct sw_flow *flow,
					       int dp_ifindex,
					       struct genl_info *info, u8 cmd,
					       bool always, u32 ufid_flags)
{}

static int ovs_flow_cmd_new(struct sk_buff *skb, struct genl_info *info)
{}

/* Factor out action copy to avoid "Wframe-larger-than=1024" warning. */
static noinline_for_stack
struct sw_flow_actions *get_flow_actions(struct net *net,
					 const struct nlattr *a,
					 const struct sw_flow_key *key,
					 const struct sw_flow_mask *mask,
					 bool log)
{}

/* Factor out match-init and action-copy to avoid
 * "Wframe-larger-than=1024" warning. Because mask is only
 * used to get actions, we new a function to save some
 * stack space.
 *
 * If there are not key and action attrs, we return 0
 * directly. In the case, the caller will also not use the
 * match as before. If there is action attr, we try to get
 * actions and save them to *acts. Before returning from
 * the function, we reset the match->mask pointer. Because
 * we should not to return match object with dangling reference
 * to mask.
 * */
static noinline_for_stack int
ovs_nla_init_match_and_action(struct net *net,
			      struct sw_flow_match *match,
			      struct sw_flow_key *key,
			      struct nlattr **a,
			      struct sw_flow_actions **acts,
			      bool log)
{}

static int ovs_flow_cmd_set(struct sk_buff *skb, struct genl_info *info)
{}

static int ovs_flow_cmd_get(struct sk_buff *skb, struct genl_info *info)
{}

static int ovs_flow_cmd_del(struct sk_buff *skb, struct genl_info *info)
{}

static int ovs_flow_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb)
{}

static const struct nla_policy flow_policy[OVS_FLOW_ATTR_MAX + 1] =;

static const struct genl_small_ops dp_flow_genl_ops[] =;

static struct genl_family dp_flow_genl_family __ro_after_init =;

static size_t ovs_dp_cmd_msg_size(void)
{}

/* Called with ovs_mutex. */
static int ovs_dp_cmd_fill_info(struct datapath *dp, struct sk_buff *skb,
				u32 portid, u32 seq, u32 flags, u8 cmd)
{}

static struct sk_buff *ovs_dp_cmd_alloc_info(void)
{}

/* Called with rcu_read_lock or ovs_mutex. */
static struct datapath *lookup_datapath(struct net *net,
					const struct ovs_header *ovs_header,
					struct nlattr *a[OVS_DP_ATTR_MAX + 1])
{}

static void ovs_dp_reset_user_features(struct sk_buff *skb,
				       struct genl_info *info)
{}

static int ovs_dp_set_upcall_portids(struct datapath *dp,
			      const struct nlattr *ids)
{}

u32 ovs_dp_get_upcall_portid(const struct datapath *dp, uint32_t cpu_id)
{}

static int ovs_dp_change(struct datapath *dp, struct nlattr *a[])
{}

static int ovs_dp_stats_init(struct datapath *dp)
{}

static int ovs_dp_vport_init(struct datapath *dp)
{}

static int ovs_dp_cmd_new(struct sk_buff *skb, struct genl_info *info)
{}

/* Called with ovs_mutex. */
static void __dp_destroy(struct datapath *dp)
{}

static int ovs_dp_cmd_del(struct sk_buff *skb, struct genl_info *info)
{}

static int ovs_dp_cmd_set(struct sk_buff *skb, struct genl_info *info)
{}

static int ovs_dp_cmd_get(struct sk_buff *skb, struct genl_info *info)
{}

static int ovs_dp_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb)
{}

static const struct nla_policy datapath_policy[OVS_DP_ATTR_MAX + 1] =;

static const struct genl_small_ops dp_datapath_genl_ops[] =;

static struct genl_family dp_datapath_genl_family __ro_after_init =;

/* Called with ovs_mutex or RCU read lock. */
static int ovs_vport_cmd_fill_info(struct vport *vport, struct sk_buff *skb,
				   struct net *net, u32 portid, u32 seq,
				   u32 flags, u8 cmd, gfp_t gfp)
{}

static struct sk_buff *ovs_vport_cmd_alloc_info(void)
{}

/* Called with ovs_mutex, only via ovs_dp_notify_wq(). */
struct sk_buff *ovs_vport_cmd_build_info(struct vport *vport, struct net *net,
					 u32 portid, u32 seq, u8 cmd)
{}

/* Called with ovs_mutex or RCU read lock. */
static struct vport *lookup_vport(struct net *net,
				  const struct ovs_header *ovs_header,
				  struct nlattr *a[OVS_VPORT_ATTR_MAX + 1])
{}

static unsigned int ovs_get_max_headroom(struct datapath *dp)
{}

/* Called with ovs_mutex */
static void ovs_update_headroom(struct datapath *dp, unsigned int new_headroom)
{}

static int ovs_vport_cmd_new(struct sk_buff *skb, struct genl_info *info)
{}

static int ovs_vport_cmd_set(struct sk_buff *skb, struct genl_info *info)
{}

static int ovs_vport_cmd_del(struct sk_buff *skb, struct genl_info *info)
{}

static int ovs_vport_cmd_get(struct sk_buff *skb, struct genl_info *info)
{}

static int ovs_vport_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb)
{}

static void ovs_dp_masks_rebalance(struct work_struct *work)
{}

static const struct nla_policy vport_policy[OVS_VPORT_ATTR_MAX + 1] =;

static const struct genl_small_ops dp_vport_genl_ops[] =;

struct genl_family dp_vport_genl_family __ro_after_init =;

static struct genl_family * const dp_genl_families[] =;

static void dp_unregister_genl(int n_families)
{}

static int __init dp_register_genl(void)
{}

static int __net_init ovs_init_net(struct net *net)
{}

static void __net_exit list_vports_from_net(struct net *net, struct net *dnet,
					    struct list_head *head)
{}

static void __net_exit ovs_exit_net(struct net *dnet)
{}

static struct pernet_operations ovs_net_ops =;

static const char * const ovs_drop_reasons[] =;

static struct drop_reason_list drop_reason_list_ovs =;

static int __init dp_init(void)
{}

static void dp_cleanup(void)
{}

module_init();
module_exit(dp_cleanup);

MODULE_DESCRIPTION();
MODULE_LICENSE();
MODULE_ALIAS_GENL_FAMILY();
MODULE_ALIAS_GENL_FAMILY();
MODULE_ALIAS_GENL_FAMILY();
MODULE_ALIAS_GENL_FAMILY();
MODULE_ALIAS_GENL_FAMILY();
MODULE_ALIAS_GENL_FAMILY();