linux/drivers/infiniband/hw/mlx4/mcg.c

/*
 * Copyright (c) 2012 Mellanox Technologies. All rights reserved.
 *
 * This software is available to you under a choice of one of two
 * licenses.  You may choose to be licensed under the terms of the GNU
 * General Public License (GPL) Version 2, available from the file
 * COPYING in the main directory of this source tree, or the
 * OpenIB.org BSD license below:
 *
 *     Redistribution and use in source and binary forms, with or
 *     without modification, are permitted provided that the following
 *     conditions are met:
 *
 *      - Redistributions of source code must retain the above
 *        copyright notice, this list of conditions and the following
 *        disclaimer.
 *
 *      - Redistributions in binary form must reproduce the above
 *        copyright notice, this list of conditions and the following
 *        disclaimer in the documentation and/or other materials
 *        provided with the distribution.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
 * SOFTWARE.
 */

#include <rdma/ib_mad.h>
#include <rdma/ib_smi.h>
#include <rdma/ib_cache.h>
#include <rdma/ib_sa.h>

#include <linux/mlx4/cmd.h>
#include <linux/rbtree.h>
#include <linux/delay.h>

#include "mlx4_ib.h"

#define MAX_VFS
#define MAX_PEND_REQS_PER_FUNC
#define MAD_TIMEOUT_MS

#define mcg_warn(fmt, arg...)
#define mcg_error(fmt, arg...)
#define mcg_warn_group(group, format, arg...)

#define mcg_debug_group(group, format, arg...)

#define mcg_error_group(group, format, arg...)


static union ib_gid mgid0;

static struct workqueue_struct *clean_wq;

enum mcast_state {};

enum mcast_group_state {};

struct mcast_member {};

struct ib_sa_mcmember_data {} __packed __aligned();

struct mcast_group {};

struct mcast_req {};


#define safe_atomic_dec(ref)

static const char *get_state_string(enum mcast_group_state state)
{}

static struct mcast_group *mcast_find(struct mlx4_ib_demux_ctx *ctx,
				      union ib_gid *mgid)
{}

static struct mcast_group *mcast_insert(struct mlx4_ib_demux_ctx *ctx,
					struct mcast_group *group)
{}

static int send_mad_to_wire(struct mlx4_ib_demux_ctx *ctx, struct ib_mad *mad)
{}

static int send_mad_to_slave(int slave, struct mlx4_ib_demux_ctx *ctx,
			     struct ib_mad *mad)
{}

static int send_join_to_wire(struct mcast_group *group, struct ib_sa_mad *sa_mad)
{}

static int send_leave_to_wire(struct mcast_group *group, u8 join_state)
{}

static int send_reply_to_slave(int slave, struct mcast_group *group,
		struct ib_sa_mad *req_sa_mad, u16 status)
{}

static int check_selector(ib_sa_comp_mask comp_mask,
			  ib_sa_comp_mask selector_mask,
			  ib_sa_comp_mask value_mask,
			  u8 src_value, u8 dst_value)
{}

static u16 cmp_rec(struct ib_sa_mcmember_data *src,
		   struct ib_sa_mcmember_data *dst, ib_sa_comp_mask comp_mask)
{}

/* release group, return 1 if this was last release and group is destroyed
 * timout work is canceled sync */
static int release_group(struct mcast_group *group, int from_timeout_handler)
{}

static void adjust_membership(struct mcast_group *group, u8 join_state, int inc)
{}

static u8 get_leave_state(struct mcast_group *group)
{}

static int join_group(struct mcast_group *group, int slave, u8 join_mask)
{}

static int leave_group(struct mcast_group *group, int slave, u8 leave_state)
{}

static int check_leave(struct mcast_group *group, int slave, u8 leave_mask)
{}

static void mlx4_ib_mcg_timeout_handler(struct work_struct *work)
{}

static int handle_leave_req(struct mcast_group *group, u8 leave_mask,
			    struct mcast_req *req)
{}

static int handle_join_req(struct mcast_group *group, u8 join_mask,
			   struct mcast_req *req)
{}

static void mlx4_ib_mcg_work_handler(struct work_struct *work)
{}

static struct mcast_group *search_relocate_mgid0_group(struct mlx4_ib_demux_ctx *ctx,
						       __be64 tid,
						       union ib_gid *new_mgid)
{}

static ssize_t sysfs_show_group(struct device *dev,
		struct device_attribute *attr, char *buf);

static struct mcast_group *acquire_group(struct mlx4_ib_demux_ctx *ctx,
					 union ib_gid *mgid, int create)
{}

static void queue_req(struct mcast_req *req)
{}

int mlx4_ib_mcg_demux_handler(struct ib_device *ibdev, int port, int slave,
			      struct ib_sa_mad *mad)
{}

int mlx4_ib_mcg_multiplex_handler(struct ib_device *ibdev, int port,
				  int slave, struct ib_sa_mad *sa_mad)
{}

static ssize_t sysfs_show_group(struct device *dev,
				struct device_attribute *attr, char *buf)
{}

int mlx4_ib_mcg_port_init(struct mlx4_ib_demux_ctx *ctx)
{}

static void force_clean_group(struct mcast_group *group)
{}

static void _mlx4_ib_mcg_port_cleanup(struct mlx4_ib_demux_ctx *ctx, int destroy_wq)
{}

struct clean_work {};

static void mcg_clean_task(struct work_struct *work)
{}

void mlx4_ib_mcg_port_cleanup(struct mlx4_ib_demux_ctx *ctx, int destroy_wq)
{}

static void build_leave_mad(struct mcast_req *req)
{}


static void clear_pending_reqs(struct mcast_group *group, int vf)
{}

static int push_deleteing_req(struct mcast_group *group, int slave)
{}

void clean_vf_mcast(struct mlx4_ib_demux_ctx *ctx, int slave)
{}


int mlx4_ib_mcg_init(void)
{}

void mlx4_ib_mcg_destroy(void)
{}