linux/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c

/*
 * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
 *
 * This software is available to you under a choice of one of two
 * licenses.  You may choose to be licensed under the terms of the GNU
 * General Public License (GPL) Version 2, available from the file
 * COPYING in the main directory of this source tree, or the
 * OpenIB.org BSD license below:
 *
 *     Redistribution and use in source and binary forms, with or
 *     without modification, are permitted provided that the following
 *     conditions are met:
 *
 *      - Redistributions of source code must retain the above
 *        copyright notice, this list of conditions and the following
 *        disclaimer.
 *
 *      - Redistributions in binary form must reproduce the above
 *        copyright notice, this list of conditions and the following
 *        disclaimer in the documentation and/or other materials
 *        provided with the distribution.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
 * SOFTWARE.
 */

#include <linux/netdevice.h>
#include <net/bonding.h>
#include <linux/mlx5/driver.h>
#include <linux/mlx5/eswitch.h>
#include <linux/mlx5/vport.h>
#include "lib/devcom.h"
#include "mlx5_core.h"
#include "eswitch.h"
#include "esw/acl/ofld.h"
#include "lag.h"
#include "mp.h"
#include "mpesw.h"

enum {};

/* General purpose, use for short periods of time.
 * Beware of lock dependencies (preferably, no locks should be acquired
 * under it).
 */
static DEFINE_SPINLOCK(lag_lock);

static int get_port_sel_mode(enum mlx5_lag_mode mode, unsigned long flags)
{}

static u8 lag_active_port_bits(struct mlx5_lag *ldev)
{}

static int mlx5_cmd_create_lag(struct mlx5_core_dev *dev, u8 *ports, int mode,
			       unsigned long flags)
{}

static int mlx5_cmd_modify_lag(struct mlx5_core_dev *dev, u8 num_ports,
			       u8 *ports)
{}

int mlx5_cmd_create_vport_lag(struct mlx5_core_dev *dev)
{}
EXPORT_SYMBOL();

int mlx5_cmd_destroy_vport_lag(struct mlx5_core_dev *dev)
{}
EXPORT_SYMBOL();

static void mlx5_infer_tx_disabled(struct lag_tracker *tracker, u8 num_ports,
				   u8 *ports, int *num_disabled)
{}

void mlx5_infer_tx_enabled(struct lag_tracker *tracker, u8 num_ports,
			   u8 *ports, int *num_enabled)
{}

static void mlx5_lag_print_mapping(struct mlx5_core_dev *dev,
				   struct mlx5_lag *ldev,
				   struct lag_tracker *tracker,
				   unsigned long flags)
{}

static int mlx5_lag_netdev_event(struct notifier_block *this,
				 unsigned long event, void *ptr);
static void mlx5_do_bond_work(struct work_struct *work);

static void mlx5_ldev_free(struct kref *ref)
{}

static void mlx5_ldev_put(struct mlx5_lag *ldev)
{}

static void mlx5_ldev_get(struct mlx5_lag *ldev)
{}

static struct mlx5_lag *mlx5_lag_dev_alloc(struct mlx5_core_dev *dev)
{}

int mlx5_lag_dev_get_netdev_idx(struct mlx5_lag *ldev,
				struct net_device *ndev)
{}

static bool __mlx5_lag_is_roce(struct mlx5_lag *ldev)
{}

static bool __mlx5_lag_is_sriov(struct mlx5_lag *ldev)
{}

/* Create a mapping between steering slots and active ports.
 * As we have ldev->buckets slots per port first assume the native
 * mapping should be used.
 * If there are ports that are disabled fill the relevant slots
 * with mapping that points to active ports.
 */
static void mlx5_infer_tx_affinity_mapping(struct lag_tracker *tracker,
					   u8 num_ports,
					   u8 buckets,
					   u8 *ports)
{}

static bool mlx5_lag_has_drop_rule(struct mlx5_lag *ldev)
{}

static void mlx5_lag_drop_rule_cleanup(struct mlx5_lag *ldev)
{}

static void mlx5_lag_drop_rule_setup(struct mlx5_lag *ldev,
				     struct lag_tracker *tracker)
{}

static int mlx5_cmd_modify_active_port(struct mlx5_core_dev *dev, u8 ports)
{}

static int _mlx5_modify_lag(struct mlx5_lag *ldev, u8 *ports)
{}

void mlx5_modify_lag(struct mlx5_lag *ldev,
		     struct lag_tracker *tracker)
{}

static int mlx5_lag_set_port_sel_mode_roce(struct mlx5_lag *ldev,
					   unsigned long *flags)
{}

static void mlx5_lag_set_port_sel_mode_offloads(struct mlx5_lag *ldev,
						struct lag_tracker *tracker,
						enum mlx5_lag_mode mode,
						unsigned long *flags)
{}

static int mlx5_lag_set_flags(struct mlx5_lag *ldev, enum mlx5_lag_mode mode,
			      struct lag_tracker *tracker, bool shared_fdb,
			      unsigned long *flags)
{}

char *mlx5_get_str_port_sel_mode(enum mlx5_lag_mode mode, unsigned long flags)
{}

static int mlx5_lag_create_single_fdb(struct mlx5_lag *ldev)
{}

static int mlx5_create_lag(struct mlx5_lag *ldev,
			   struct lag_tracker *tracker,
			   enum mlx5_lag_mode mode,
			   unsigned long flags)
{}

int mlx5_activate_lag(struct mlx5_lag *ldev,
		      struct lag_tracker *tracker,
		      enum mlx5_lag_mode mode,
		      bool shared_fdb)
{}

int mlx5_deactivate_lag(struct mlx5_lag *ldev)
{}

bool mlx5_lag_check_prereq(struct mlx5_lag *ldev)
{}

void mlx5_lag_add_devices(struct mlx5_lag *ldev)
{}

void mlx5_lag_remove_devices(struct mlx5_lag *ldev)
{}

void mlx5_disable_lag(struct mlx5_lag *ldev)
{}

static bool mlx5_shared_fdb_supported(struct mlx5_lag *ldev)
{}

static bool mlx5_lag_is_roce_lag(struct mlx5_lag *ldev)
{}

static bool mlx5_lag_should_modify_lag(struct mlx5_lag *ldev, bool do_bond)
{}

static bool mlx5_lag_should_disable_lag(struct mlx5_lag *ldev, bool do_bond)
{}

static void mlx5_do_bond(struct mlx5_lag *ldev)
{}

/* The last mdev to unregister will destroy the workqueue before removing the
 * devcom component, and as all the mdevs use the same devcom component we are
 * guaranteed that the devcom is valid while the calling work is running.
 */
struct mlx5_devcom_comp_dev *mlx5_lag_get_devcom_comp(struct mlx5_lag *ldev)
{}

static void mlx5_queue_bond_work(struct mlx5_lag *ldev, unsigned long delay)
{}

static void mlx5_do_bond_work(struct work_struct *work)
{}

static int mlx5_handle_changeupper_event(struct mlx5_lag *ldev,
					 struct lag_tracker *tracker,
					 struct netdev_notifier_changeupper_info *info)
{}

static int mlx5_handle_changelowerstate_event(struct mlx5_lag *ldev,
					      struct lag_tracker *tracker,
					      struct net_device *ndev,
					      struct netdev_notifier_changelowerstate_info *info)
{}

static int mlx5_handle_changeinfodata_event(struct mlx5_lag *ldev,
					    struct lag_tracker *tracker,
					    struct net_device *ndev)
{}

/* this handler is always registered to netdev events */
static int mlx5_lag_netdev_event(struct notifier_block *this,
				 unsigned long event, void *ptr)
{}

static void mlx5_ldev_add_netdev(struct mlx5_lag *ldev,
				 struct mlx5_core_dev *dev,
				 struct net_device *netdev)
{}

static void mlx5_ldev_remove_netdev(struct mlx5_lag *ldev,
				    struct net_device *netdev)
{}

static void mlx5_ldev_add_mdev(struct mlx5_lag *ldev,
			       struct mlx5_core_dev *dev)
{}

static void mlx5_ldev_remove_mdev(struct mlx5_lag *ldev,
				  struct mlx5_core_dev *dev)
{}

/* Must be called with HCA devcom component lock held */
static int __mlx5_lag_dev_add_mdev(struct mlx5_core_dev *dev)
{}

void mlx5_lag_remove_mdev(struct mlx5_core_dev *dev)
{}

void mlx5_lag_add_mdev(struct mlx5_core_dev *dev)
{}

void mlx5_lag_remove_netdev(struct mlx5_core_dev *dev,
			    struct net_device *netdev)
{}

void mlx5_lag_add_netdev(struct mlx5_core_dev *dev,
			 struct net_device *netdev)
{}

bool mlx5_lag_is_roce(struct mlx5_core_dev *dev)
{}
EXPORT_SYMBOL();

bool mlx5_lag_is_active(struct mlx5_core_dev *dev)
{}
EXPORT_SYMBOL();

bool mlx5_lag_mode_is_hash(struct mlx5_core_dev *dev)
{}
EXPORT_SYMBOL();

bool mlx5_lag_is_master(struct mlx5_core_dev *dev)
{}
EXPORT_SYMBOL();

bool mlx5_lag_is_sriov(struct mlx5_core_dev *dev)
{}
EXPORT_SYMBOL();

bool mlx5_lag_is_shared_fdb(struct mlx5_core_dev *dev)
{}
EXPORT_SYMBOL();

void mlx5_lag_disable_change(struct mlx5_core_dev *dev)
{}

void mlx5_lag_enable_change(struct mlx5_core_dev *dev)
{}

struct net_device *mlx5_lag_get_roce_netdev(struct mlx5_core_dev *dev)
{}
EXPORT_SYMBOL();

u8 mlx5_lag_get_slave_port(struct mlx5_core_dev *dev,
			   struct net_device *slave)
{}
EXPORT_SYMBOL();

u8 mlx5_lag_get_num_ports(struct mlx5_core_dev *dev)
{}
EXPORT_SYMBOL();

struct mlx5_core_dev *mlx5_lag_get_next_peer_mdev(struct mlx5_core_dev *dev, int *i)
{}
EXPORT_SYMBOL();

int mlx5_lag_query_cong_counters(struct mlx5_core_dev *dev,
				 u64 *values,
				 int num_counters,
				 size_t *offsets)
{}
EXPORT_SYMBOL();