linux/net/openvswitch/flow_table.c

// SPDX-License-Identifier: GPL-2.0-only
/*
 * Copyright (c) 2007-2014 Nicira, Inc.
 */

#include "flow.h"
#include "datapath.h"
#include "flow_netlink.h"
#include <linux/uaccess.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/if_ether.h>
#include <linux/if_vlan.h>
#include <net/llc_pdu.h>
#include <linux/kernel.h>
#include <linux/jhash.h>
#include <linux/jiffies.h>
#include <linux/llc.h>
#include <linux/module.h>
#include <linux/in.h>
#include <linux/rcupdate.h>
#include <linux/cpumask.h>
#include <linux/if_arp.h>
#include <linux/ip.h>
#include <linux/ipv6.h>
#include <linux/sctp.h>
#include <linux/tcp.h>
#include <linux/udp.h>
#include <linux/icmp.h>
#include <linux/icmpv6.h>
#include <linux/rculist.h>
#include <linux/sort.h>
#include <net/ip.h>
#include <net/ipv6.h>
#include <net/ndisc.h>

#define TBL_MIN_BUCKETS
#define MASK_ARRAY_SIZE_MIN
#define REHASH_INTERVAL

#define MC_DEFAULT_HASH_ENTRIES
#define MC_HASH_SHIFT
#define MC_HASH_SEGS

static struct kmem_cache *flow_cache;
struct kmem_cache *flow_stats_cache __read_mostly;

static u16 range_n_bytes(const struct sw_flow_key_range *range)
{}

void ovs_flow_mask_key(struct sw_flow_key *dst, const struct sw_flow_key *src,
		       bool full, const struct sw_flow_mask *mask)
{}

struct sw_flow *ovs_flow_alloc(void)
{}

int ovs_flow_tbl_count(const struct flow_table *table)
{}

static void flow_free(struct sw_flow *flow)
{}

static void rcu_free_flow_callback(struct rcu_head *rcu)
{}

void ovs_flow_free(struct sw_flow *flow, bool deferred)
{}

static void __table_instance_destroy(struct table_instance *ti)
{}

static struct table_instance *table_instance_alloc(int new_size)
{}

static void __mask_array_destroy(struct mask_array *ma)
{}

static void mask_array_rcu_cb(struct rcu_head *rcu)
{}

static void tbl_mask_array_reset_counters(struct mask_array *ma)
{}

static struct mask_array *tbl_mask_array_alloc(int size)
{}

static int tbl_mask_array_realloc(struct flow_table *tbl, int size)
{}

static int tbl_mask_array_add_mask(struct flow_table *tbl,
				   struct sw_flow_mask *new)
{}

static void tbl_mask_array_del_mask(struct flow_table *tbl,
				    struct sw_flow_mask *mask)
{}

/* Remove 'mask' from the mask list, if it is not needed any more. */
static void flow_mask_remove(struct flow_table *tbl, struct sw_flow_mask *mask)
{}

static void __mask_cache_destroy(struct mask_cache *mc)
{}

static void mask_cache_rcu_cb(struct rcu_head *rcu)
{}

static struct mask_cache *tbl_mask_cache_alloc(u32 size)
{}
int ovs_flow_tbl_masks_cache_resize(struct flow_table *table, u32 size)
{}

int ovs_flow_tbl_init(struct flow_table *table)
{}

static void flow_tbl_destroy_rcu_cb(struct rcu_head *rcu)
{}

static void table_instance_flow_free(struct flow_table *table,
				     struct table_instance *ti,
				     struct table_instance *ufid_ti,
				     struct sw_flow *flow)
{}

/* Must be called with OVS mutex held. */
void table_instance_flow_flush(struct flow_table *table,
			       struct table_instance *ti,
			       struct table_instance *ufid_ti)
{}

static void table_instance_destroy(struct table_instance *ti,
				   struct table_instance *ufid_ti)
{}

/* No need for locking this function is called from RCU callback or
 * error path.
 */
void ovs_flow_tbl_destroy(struct flow_table *table)
{}

struct sw_flow *ovs_flow_tbl_dump_next(struct table_instance *ti,
				       u32 *bucket, u32 *last)
{}

static struct hlist_head *find_bucket(struct table_instance *ti, u32 hash)
{}

static void table_instance_insert(struct table_instance *ti,
				  struct sw_flow *flow)
{}

static void ufid_table_instance_insert(struct table_instance *ti,
				       struct sw_flow *flow)
{}

static void flow_table_copy_flows(struct table_instance *old,
				  struct table_instance *new, bool ufid)
{}

static struct table_instance *table_instance_rehash(struct table_instance *ti,
						    int n_buckets, bool ufid)
{}

int ovs_flow_tbl_flush(struct flow_table *flow_table)
{}

static u32 flow_hash(const struct sw_flow_key *key,
		     const struct sw_flow_key_range *range)
{}

static int flow_key_start(const struct sw_flow_key *key)
{}

static bool cmp_key(const struct sw_flow_key *key1,
		    const struct sw_flow_key *key2,
		    int key_start, int key_end)
{}

static bool flow_cmp_masked_key(const struct sw_flow *flow,
				const struct sw_flow_key *key,
				const struct sw_flow_key_range *range)
{}

static bool ovs_flow_cmp_unmasked_key(const struct sw_flow *flow,
				      const struct sw_flow_match *match)
{}

static struct sw_flow *masked_flow_lookup(struct table_instance *ti,
					  const struct sw_flow_key *unmasked,
					  const struct sw_flow_mask *mask,
					  u32 *n_mask_hit)
{}

/* Flow lookup does full lookup on flow table. It starts with
 * mask from index passed in *index.
 * This function MUST be called with BH disabled due to the use
 * of CPU specific variables.
 */
static struct sw_flow *flow_lookup(struct flow_table *tbl,
				   struct table_instance *ti,
				   struct mask_array *ma,
				   const struct sw_flow_key *key,
				   u32 *n_mask_hit,
				   u32 *n_cache_hit,
				   u32 *index)
{}

/*
 * mask_cache maps flow to probable mask. This cache is not tightly
 * coupled cache, It means updates to  mask list can result in inconsistent
 * cache entry in mask cache.
 * This is per cpu cache and is divided in MC_HASH_SEGS segments.
 * In case of a hash collision the entry is hashed in next segment.
 * */
struct sw_flow *ovs_flow_tbl_lookup_stats(struct flow_table *tbl,
					  const struct sw_flow_key *key,
					  u32 skb_hash,
					  u32 *n_mask_hit,
					  u32 *n_cache_hit)
{}

struct sw_flow *ovs_flow_tbl_lookup(struct flow_table *tbl,
				    const struct sw_flow_key *key)
{}

struct sw_flow *ovs_flow_tbl_lookup_exact(struct flow_table *tbl,
					  const struct sw_flow_match *match)
{}

static u32 ufid_hash(const struct sw_flow_id *sfid)
{}

static bool ovs_flow_cmp_ufid(const struct sw_flow *flow,
			      const struct sw_flow_id *sfid)
{}

bool ovs_flow_cmp(const struct sw_flow *flow,
		  const struct sw_flow_match *match)
{}

struct sw_flow *ovs_flow_tbl_lookup_ufid(struct flow_table *tbl,
					 const struct sw_flow_id *ufid)
{}

int ovs_flow_tbl_num_masks(const struct flow_table *table)
{}

u32 ovs_flow_tbl_masks_cache_size(const struct flow_table *table)
{}

static struct table_instance *table_instance_expand(struct table_instance *ti,
						    bool ufid)
{}

/* Must be called with OVS mutex held. */
void ovs_flow_tbl_remove(struct flow_table *table, struct sw_flow *flow)
{}

static struct sw_flow_mask *mask_alloc(void)
{}

static bool mask_equal(const struct sw_flow_mask *a,
		       const struct sw_flow_mask *b)
{}

static struct sw_flow_mask *flow_mask_find(const struct flow_table *tbl,
					   const struct sw_flow_mask *mask)
{}

/* Add 'mask' into the mask list, if it is not already there. */
static int flow_mask_insert(struct flow_table *tbl, struct sw_flow *flow,
			    const struct sw_flow_mask *new)
{}

/* Must be called with OVS mutex held. */
static void flow_key_insert(struct flow_table *table, struct sw_flow *flow)
{}

/* Must be called with OVS mutex held. */
static void flow_ufid_insert(struct flow_table *table, struct sw_flow *flow)
{}

/* Must be called with OVS mutex held. */
int ovs_flow_tbl_insert(struct flow_table *table, struct sw_flow *flow,
			const struct sw_flow_mask *mask)
{}

static int compare_mask_and_count(const void *a, const void *b)
{}

/* Must be called with OVS mutex held. */
void ovs_flow_masks_rebalance(struct flow_table *table)
{}

/* Initializes the flow module.
 * Returns zero if successful or a negative error code. */
int ovs_flow_init(void)
{}

/* Uninitializes the flow module. */
void ovs_flow_exit(void)
{}