linux/net/openvswitch/actions.c

// SPDX-License-Identifier: GPL-2.0-only
/*
 * Copyright (c) 2007-2017 Nicira, Inc.
 */

#define pr_fmt(fmt)

#include <linux/skbuff.h>
#include <linux/in.h>
#include <linux/ip.h>
#include <linux/openvswitch.h>
#include <linux/sctp.h>
#include <linux/tcp.h>
#include <linux/udp.h>
#include <linux/in6.h>
#include <linux/if_arp.h>
#include <linux/if_vlan.h>

#include <net/dst.h>
#include <net/gso.h>
#include <net/ip.h>
#include <net/ipv6.h>
#include <net/ip6_fib.h>
#include <net/checksum.h>
#include <net/dsfield.h>
#include <net/mpls.h>

#if IS_ENABLED(CONFIG_PSAMPLE)
#include <net/psample.h>
#endif

#include <net/sctp/checksum.h>

#include "datapath.h"
#include "drop.h"
#include "flow.h"
#include "conntrack.h"
#include "vport.h"
#include "flow_netlink.h"
#include "openvswitch_trace.h"

struct deferred_action {};

#define MAX_L2_LEN
struct ovs_frag_data {};

static DEFINE_PER_CPU(struct ovs_frag_data, ovs_frag_data_storage);

#define DEFERRED_ACTION_FIFO_SIZE
#define OVS_RECURSION_LIMIT
#define OVS_DEFERRED_ACTION_THRESHOLD
struct action_fifo {};

struct action_flow_keys {};

static struct action_fifo __percpu *action_fifos;
static struct action_flow_keys __percpu *flow_keys;
static DEFINE_PER_CPU(int, exec_actions_level);

/* Make a clone of the 'key', using the pre-allocated percpu 'flow_keys'
 * space. Return NULL if out of key spaces.
 */
static struct sw_flow_key *clone_key(const struct sw_flow_key *key_)
{}

static void action_fifo_init(struct action_fifo *fifo)
{}

static bool action_fifo_is_empty(const struct action_fifo *fifo)
{}

static struct deferred_action *action_fifo_get(struct action_fifo *fifo)
{}

static struct deferred_action *action_fifo_put(struct action_fifo *fifo)
{}

/* Return true if fifo is not full */
static struct deferred_action *add_deferred_actions(struct sk_buff *skb,
				    const struct sw_flow_key *key,
				    const struct nlattr *actions,
				    const int actions_len)
{}

static void invalidate_flow_key(struct sw_flow_key *key)
{}

static bool is_flow_key_valid(const struct sw_flow_key *key)
{}

static int clone_execute(struct datapath *dp, struct sk_buff *skb,
			 struct sw_flow_key *key,
			 u32 recirc_id,
			 const struct nlattr *actions, int len,
			 bool last, bool clone_flow_key);

static int do_execute_actions(struct datapath *dp, struct sk_buff *skb,
			      struct sw_flow_key *key,
			      const struct nlattr *attr, int len);

static int push_mpls(struct sk_buff *skb, struct sw_flow_key *key,
		     __be32 mpls_lse, __be16 mpls_ethertype, __u16 mac_len)
{}

static int pop_mpls(struct sk_buff *skb, struct sw_flow_key *key,
		    const __be16 ethertype)
{}

static int set_mpls(struct sk_buff *skb, struct sw_flow_key *flow_key,
		    const __be32 *mpls_lse, const __be32 *mask)
{}

static int pop_vlan(struct sk_buff *skb, struct sw_flow_key *key)
{}

static int push_vlan(struct sk_buff *skb, struct sw_flow_key *key,
		     const struct ovs_action_push_vlan *vlan)
{}

/* 'src' is already properly masked. */
static void ether_addr_copy_masked(u8 *dst_, const u8 *src_, const u8 *mask_)
{}

static int set_eth_addr(struct sk_buff *skb, struct sw_flow_key *flow_key,
			const struct ovs_key_ethernet *key,
			const struct ovs_key_ethernet *mask)
{}

/* pop_eth does not support VLAN packets as this action is never called
 * for them.
 */
static int pop_eth(struct sk_buff *skb, struct sw_flow_key *key)
{}

static int push_eth(struct sk_buff *skb, struct sw_flow_key *key,
		    const struct ovs_action_push_eth *ethh)
{}

static noinline_for_stack int push_nsh(struct sk_buff *skb,
				       struct sw_flow_key *key,
				       const struct nlattr *a)
{}

static int pop_nsh(struct sk_buff *skb, struct sw_flow_key *key)
{}

static void update_ip_l4_checksum(struct sk_buff *skb, struct iphdr *nh,
				  __be32 addr, __be32 new_addr)
{}

static void set_ip_addr(struct sk_buff *skb, struct iphdr *nh,
			__be32 *addr, __be32 new_addr)
{}

static void update_ipv6_checksum(struct sk_buff *skb, u8 l4_proto,
				 __be32 addr[4], const __be32 new_addr[4])
{}

static void mask_ipv6_addr(const __be32 old[4], const __be32 addr[4],
			   const __be32 mask[4], __be32 masked[4])
{}

static void set_ipv6_addr(struct sk_buff *skb, u8 l4_proto,
			  __be32 addr[4], const __be32 new_addr[4],
			  bool recalculate_csum)
{}

static void set_ipv6_dsfield(struct sk_buff *skb, struct ipv6hdr *nh, u8 ipv6_tclass, u8 mask)
{}

static void set_ipv6_fl(struct sk_buff *skb, struct ipv6hdr *nh, u32 fl, u32 mask)
{}

static void set_ipv6_ttl(struct sk_buff *skb, struct ipv6hdr *nh, u8 new_ttl, u8 mask)
{}

static void set_ip_ttl(struct sk_buff *skb, struct iphdr *nh, u8 new_ttl,
		       u8 mask)
{}

static int set_ipv4(struct sk_buff *skb, struct sw_flow_key *flow_key,
		    const struct ovs_key_ipv4 *key,
		    const struct ovs_key_ipv4 *mask)
{}

static bool is_ipv6_mask_nonzero(const __be32 addr[4])
{}

static int set_ipv6(struct sk_buff *skb, struct sw_flow_key *flow_key,
		    const struct ovs_key_ipv6 *key,
		    const struct ovs_key_ipv6 *mask)
{}

static int set_nsh(struct sk_buff *skb, struct sw_flow_key *flow_key,
		   const struct nlattr *a)
{}

/* Must follow skb_ensure_writable() since that can move the skb data. */
static void set_tp_port(struct sk_buff *skb, __be16 *port,
			__be16 new_port, __sum16 *check)
{}

static int set_udp(struct sk_buff *skb, struct sw_flow_key *flow_key,
		   const struct ovs_key_udp *key,
		   const struct ovs_key_udp *mask)
{}

static int set_tcp(struct sk_buff *skb, struct sw_flow_key *flow_key,
		   const struct ovs_key_tcp *key,
		   const struct ovs_key_tcp *mask)
{}

static int set_sctp(struct sk_buff *skb, struct sw_flow_key *flow_key,
		    const struct ovs_key_sctp *key,
		    const struct ovs_key_sctp *mask)
{}

static int ovs_vport_output(struct net *net, struct sock *sk,
			    struct sk_buff *skb)
{}

static unsigned int
ovs_dst_get_mtu(const struct dst_entry *dst)
{}

static struct dst_ops ovs_dst_ops =;

/* prepare_frag() is called once per (larger-than-MTU) frame; its inverse is
 * ovs_vport_output(), which is called once per fragmented packet.
 */
static void prepare_frag(struct vport *vport, struct sk_buff *skb,
			 u16 orig_network_offset, u8 mac_proto)
{}

static void ovs_fragment(struct net *net, struct vport *vport,
			 struct sk_buff *skb, u16 mru,
			 struct sw_flow_key *key)
{}

static void do_output(struct datapath *dp, struct sk_buff *skb, int out_port,
		      struct sw_flow_key *key)
{}

static int output_userspace(struct datapath *dp, struct sk_buff *skb,
			    struct sw_flow_key *key, const struct nlattr *attr,
			    const struct nlattr *actions, int actions_len,
			    uint32_t cutlen)
{}

static int dec_ttl_exception_handler(struct datapath *dp, struct sk_buff *skb,
				     struct sw_flow_key *key,
				     const struct nlattr *attr)
{}

/* When 'last' is true, sample() should always consume the 'skb'.
 * Otherwise, sample() should keep 'skb' intact regardless what
 * actions are executed within sample().
 */
static int sample(struct datapath *dp, struct sk_buff *skb,
		  struct sw_flow_key *key, const struct nlattr *attr,
		  bool last)
{}

/* When 'last' is true, clone() should always consume the 'skb'.
 * Otherwise, clone() should keep 'skb' intact regardless what
 * actions are executed within clone().
 */
static int clone(struct datapath *dp, struct sk_buff *skb,
		 struct sw_flow_key *key, const struct nlattr *attr,
		 bool last)
{}

static void execute_hash(struct sk_buff *skb, struct sw_flow_key *key,
			 const struct nlattr *attr)
{}

static int execute_set_action(struct sk_buff *skb,
			      struct sw_flow_key *flow_key,
			      const struct nlattr *a)
{}

/* Mask is at the midpoint of the data. */
#define get_mask(a, type)

static int execute_masked_set_action(struct sk_buff *skb,
				     struct sw_flow_key *flow_key,
				     const struct nlattr *a)
{}

static int execute_recirc(struct datapath *dp, struct sk_buff *skb,
			  struct sw_flow_key *key,
			  const struct nlattr *a, bool last)
{}

static int execute_check_pkt_len(struct datapath *dp, struct sk_buff *skb,
				 struct sw_flow_key *key,
				 const struct nlattr *attr, bool last)
{}

static int execute_dec_ttl(struct sk_buff *skb, struct sw_flow_key *key)
{}

#if IS_ENABLED(CONFIG_PSAMPLE)
static void execute_psample(struct datapath *dp, struct sk_buff *skb,
			    const struct nlattr *attr)
{}
#else
static void execute_psample(struct datapath *dp, struct sk_buff *skb,
			    const struct nlattr *attr)
{}
#endif

/* Execute a list of actions against 'skb'. */
static int do_execute_actions(struct datapath *dp, struct sk_buff *skb,
			      struct sw_flow_key *key,
			      const struct nlattr *attr, int len)
{}

/* Execute the actions on the clone of the packet. The effect of the
 * execution does not affect the original 'skb' nor the original 'key'.
 *
 * The execution may be deferred in case the actions can not be executed
 * immediately.
 */
static int clone_execute(struct datapath *dp, struct sk_buff *skb,
			 struct sw_flow_key *key, u32 recirc_id,
			 const struct nlattr *actions, int len,
			 bool last, bool clone_flow_key)
{}

static void process_deferred_actions(struct datapath *dp)
{}

/* Execute a list of actions against 'skb'. */
int ovs_execute_actions(struct datapath *dp, struct sk_buff *skb,
			const struct sw_flow_actions *acts,
			struct sw_flow_key *key)
{}

int action_fifos_init(void)
{}

void action_fifos_exit(void)
{}