linux/drivers/infiniband/hw/mlx5/devx.c

// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
/*
 * Copyright (c) 2018, Mellanox Technologies inc.  All rights reserved.
 */

#include <rdma/ib_user_verbs.h>
#include <rdma/ib_verbs.h>
#include <rdma/uverbs_types.h>
#include <rdma/uverbs_ioctl.h>
#include <rdma/mlx5_user_ioctl_cmds.h>
#include <rdma/mlx5_user_ioctl_verbs.h>
#include <rdma/ib_umem.h>
#include <rdma/uverbs_std_types.h>
#include <linux/mlx5/driver.h>
#include <linux/mlx5/fs.h>
#include "mlx5_ib.h"
#include "devx.h"
#include "qp.h"
#include <linux/xarray.h>

#define UVERBS_MODULE_NAME
#include <rdma/uverbs_named_ioctl.h>

static void dispatch_event_fd(struct list_head *fd_list, const void *data);

enum devx_obj_flags {};

struct devx_async_data {};

struct devx_async_event_data {};

/* first level XA value data structure */
struct devx_event {};

/* second level XA value data structure */
struct devx_obj_event {};

struct devx_event_subscription {};

struct devx_async_event_file {};

struct devx_umem {};

struct devx_umem_reg_cmd {};

static struct mlx5_ib_ucontext *
devx_ufile2uctx(const struct uverbs_attr_bundle *attrs)
{}

int mlx5_ib_devx_create(struct mlx5_ib_dev *dev, bool is_user)
{}

void mlx5_ib_devx_destroy(struct mlx5_ib_dev *dev, u16 uid)
{}

static bool is_legacy_unaffiliated_event_num(u16 event_num)
{}

static bool is_legacy_obj_event_num(u16 event_num)
{}

static u16 get_legacy_obj_type(u16 opcode)
{}

static u16 get_dec_obj_type(struct devx_obj *obj, u16 event_num)
{}

static u16 get_event_obj_type(unsigned long event_type, struct mlx5_eqe *eqe)
{}

static u32 get_dec_obj_id(u64 obj_id)
{}

/*
 * As the obj_id in the firmware is not globally unique the object type
 * must be considered upon checking for a valid object id.
 * For that the opcode of the creator command is encoded as part of the obj_id.
 */
static u64 get_enc_obj_id(u32 opcode, u32 obj_id)
{}

static u32 devx_get_created_obj_id(const void *in, const void *out, u16 opcode)
{}

static u64 devx_get_obj_id(const void *in)
{}

static bool devx_is_valid_obj_id(struct uverbs_attr_bundle *attrs,
				 struct ib_uobject *uobj, const void *in)
{}

static void devx_set_umem_valid(const void *in)
{}

static bool devx_is_obj_create_cmd(const void *in, u16 *opcode)
{}

static bool devx_is_obj_modify_cmd(const void *in)
{}

static bool devx_is_obj_query_cmd(const void *in)
{}

static bool devx_is_whitelist_cmd(void *in)
{}

static int devx_get_uid(struct mlx5_ib_ucontext *c, void *cmd_in)
{}

static bool devx_is_general_cmd(void *in, struct mlx5_ib_dev *dev)
{}

static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_QUERY_EQN)(
	struct uverbs_attr_bundle *attrs)
{}

/*
 *Security note:
 * The hardware protection mechanism works like this: Each device object that
 * is subject to UAR doorbells (QP/SQ/CQ) gets a UAR ID (called uar_page in
 * the device specification manual) upon its creation. Then upon doorbell,
 * hardware fetches the object context for which the doorbell was rang, and
 * validates that the UAR through which the DB was rang matches the UAR ID
 * of the object.
 * If no match the doorbell is silently ignored by the hardware. Of course,
 * the user cannot ring a doorbell on a UAR that was not mapped to it.
 * Now in devx, as the devx kernel does not manipulate the QP/SQ/CQ command
 * mailboxes (except tagging them with UID), we expose to the user its UAR
 * ID, so it can embed it in these objects in the expected specification
 * format. So the only thing the user can do is hurt itself by creating a
 * QP/SQ/CQ with a UAR ID other than his, and then in this case other users
 * may ring a doorbell on its objects.
 * The consequence of that will be that another user can schedule a QP/SQ
 * of the buggy user for execution (just insert it to the hardware schedule
 * queue or arm its CQ for event generation), no further harm is expected.
 */
static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_QUERY_UAR)(
	struct uverbs_attr_bundle *attrs)
{}

static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OTHER)(
	struct uverbs_attr_bundle *attrs)
{}

static void devx_obj_build_destroy_cmd(void *in, void *out, void *din,
				       u32 *dinlen,
				       u32 *obj_id)
{}

static int devx_handle_mkey_indirect(struct devx_obj *obj,
				     struct mlx5_ib_dev *dev,
				     void *in, void *out)
{}

static int devx_handle_mkey_create(struct mlx5_ib_dev *dev,
				   struct devx_obj *obj,
				   void *in, int in_len)
{}

static void devx_cleanup_subscription(struct mlx5_ib_dev *dev,
				      struct devx_event_subscription *sub)
{}

static int devx_obj_cleanup(struct ib_uobject *uobject,
			    enum rdma_remove_reason why,
			    struct uverbs_attr_bundle *attrs)
{}

static void devx_cq_comp(struct mlx5_core_cq *mcq, struct mlx5_eqe *eqe)
{}

static bool is_apu_cq(struct mlx5_ib_dev *dev, const void *in)
{}

static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OBJ_CREATE)(
	struct uverbs_attr_bundle *attrs)
{}

static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OBJ_MODIFY)(
	struct uverbs_attr_bundle *attrs)
{}

static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OBJ_QUERY)(
	struct uverbs_attr_bundle *attrs)
{}

struct devx_async_event_queue {};

struct devx_async_cmd_event_file {};

static void devx_init_event_queue(struct devx_async_event_queue *ev_queue)
{}

static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_ASYNC_CMD_FD_ALLOC)(
	struct uverbs_attr_bundle *attrs)
{}

static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_ASYNC_EVENT_FD_ALLOC)(
	struct uverbs_attr_bundle *attrs)
{}

static void devx_query_callback(int status, struct mlx5_async_work *context)
{}

#define MAX_ASYNC_BYTES_IN_USE

static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OBJ_ASYNC_QUERY)(
	struct uverbs_attr_bundle *attrs)
{}

static void
subscribe_event_xa_dealloc(struct mlx5_devx_event_table *devx_event_table,
			   u32 key_level1,
			   bool is_level2,
			   u32 key_level2)
{}

static int
subscribe_event_xa_alloc(struct mlx5_devx_event_table *devx_event_table,
			 u32 key_level1,
			 bool is_level2,
			 u32 key_level2)
{}

static bool is_valid_events_legacy(int num_events, u16 *event_type_num_list,
				   struct devx_obj *obj)
{}

#define MAX_SUPP_EVENT_NUM
static bool is_valid_events(struct mlx5_core_dev *dev,
			    int num_events, u16 *event_type_num_list,
			    struct devx_obj *obj)
{}

#define MAX_NUM_EVENTS
static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_SUBSCRIBE_EVENT)(
	struct uverbs_attr_bundle *attrs)
{}

static int devx_umem_get(struct mlx5_ib_dev *dev, struct ib_ucontext *ucontext,
			 struct uverbs_attr_bundle *attrs,
			 struct devx_umem *obj, u32 access_flags)
{}

static unsigned int devx_umem_find_best_pgsize(struct ib_umem *umem,
					       unsigned long pgsz_bitmap)
{}

static int devx_umem_reg_cmd_alloc(struct mlx5_ib_dev *dev,
				   struct uverbs_attr_bundle *attrs,
				   struct devx_umem *obj,
				   struct devx_umem_reg_cmd *cmd,
				   int access)
{}

static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_UMEM_REG)(
	struct uverbs_attr_bundle *attrs)
{}

static int devx_umem_cleanup(struct ib_uobject *uobject,
			     enum rdma_remove_reason why,
			     struct uverbs_attr_bundle *attrs)
{}

static bool is_unaffiliated_event(struct mlx5_core_dev *dev,
				  unsigned long event_type)
{}

static u32 devx_get_obj_id_from_event(unsigned long event_type, void *data)
{}

static int deliver_event(struct devx_event_subscription *event_sub,
			 const void *data)
{}

static void dispatch_event_fd(struct list_head *fd_list,
			      const void *data)
{}

static int devx_event_notifier(struct notifier_block *nb,
			       unsigned long event_type, void *data)
{}

int mlx5_ib_devx_init(struct mlx5_ib_dev *dev)
{}

void mlx5_ib_devx_cleanup(struct mlx5_ib_dev *dev)
{}

static ssize_t devx_async_cmd_event_read(struct file *filp, char __user *buf,
					 size_t count, loff_t *pos)
{}

static __poll_t devx_async_cmd_event_poll(struct file *filp,
					      struct poll_table_struct *wait)
{}

static const struct file_operations devx_async_cmd_event_fops =;

static ssize_t devx_async_event_read(struct file *filp, char __user *buf,
				     size_t count, loff_t *pos)
{}

static __poll_t devx_async_event_poll(struct file *filp,
				      struct poll_table_struct *wait)
{}

static void devx_free_subscription(struct rcu_head *rcu)
{}

static const struct file_operations devx_async_event_fops =;

static void devx_async_cmd_event_destroy_uobj(struct ib_uobject *uobj,
					      enum rdma_remove_reason why)
{
	struct devx_async_cmd_event_file *comp_ev_file =
		container_of(uobj, struct devx_async_cmd_event_file,
			     uobj);
	struct devx_async_event_queue *ev_queue = &comp_ev_file->ev_queue;
	struct devx_async_data *entry, *tmp;

	spin_lock_irq(&ev_queue->lock);
	ev_queue->is_destroyed = 1;
	spin_unlock_irq(&ev_queue->lock);
	wake_up_interruptible(&ev_queue->poll_wait);

	mlx5_cmd_cleanup_async_ctx(&comp_ev_file->async_ctx);

	spin_lock_irq(&comp_ev_file->ev_queue.lock);
	list_for_each_entry_safe(entry, tmp,
				 &comp_ev_file->ev_queue.event_list, list) {
		list_del(&entry->list);
		kvfree(entry);
	}
	spin_unlock_irq(&comp_ev_file->ev_queue.lock);
};

static void devx_async_event_destroy_uobj(struct ib_uobject *uobj,
					  enum rdma_remove_reason why)
{
	struct devx_async_event_file *ev_file =
		container_of(uobj, struct devx_async_event_file,
			     uobj);
	struct devx_event_subscription *event_sub, *event_sub_tmp;
	struct mlx5_ib_dev *dev = ev_file->dev;

	spin_lock_irq(&ev_file->lock);
	ev_file->is_destroyed = 1;

	/* free the pending events allocation */
	if (ev_file->omit_data) {
		struct devx_event_subscription *event_sub, *tmp;

		list_for_each_entry_safe(event_sub, tmp, &ev_file->event_list,
					 event_list)
			list_del_init(&event_sub->event_list);

	} else {
		struct devx_async_event_data *entry, *tmp;

		list_for_each_entry_safe(entry, tmp, &ev_file->event_list,
					 list) {
			list_del(&entry->list);
			kfree(entry);
		}
	}

	spin_unlock_irq(&ev_file->lock);
	wake_up_interruptible(&ev_file->poll_wait);

	mutex_lock(&dev->devx_event_table.event_xa_lock);
	/* delete the subscriptions which are related to this FD */
	list_for_each_entry_safe(event_sub, event_sub_tmp,
				 &ev_file->subscribed_events_list, file_list) {
		devx_cleanup_subscription(dev, event_sub);
		list_del_rcu(&event_sub->file_list);
		/* subscription may not be used by the read API any more */
		call_rcu(&event_sub->rcu, devx_free_subscription);
	}
	mutex_unlock(&dev->devx_event_table.event_xa_lock);

	put_device(&dev->ib_dev.dev);
};

DECLARE_UVERBS_NAMED_METHOD();

DECLARE_UVERBS_NAMED_METHOD_DESTROY();

DECLARE_UVERBS_NAMED_METHOD();

DECLARE_UVERBS_NAMED_METHOD();

DECLARE_UVERBS_NAMED_METHOD();

DECLARE_UVERBS_NAMED_METHOD();

DECLARE_UVERBS_NAMED_METHOD_DESTROY();

DECLARE_UVERBS_NAMED_METHOD();

DECLARE_UVERBS_NAMED_METHOD();

DECLARE_UVERBS_NAMED_METHOD();

DECLARE_UVERBS_NAMED_METHOD();

DECLARE_UVERBS_GLOBAL_METHODS();

DECLARE_UVERBS_NAMED_OBJECT();

DECLARE_UVERBS_NAMED_OBJECT();


DECLARE_UVERBS_NAMED_METHOD();

DECLARE_UVERBS_NAMED_OBJECT();

DECLARE_UVERBS_NAMED_METHOD();

DECLARE_UVERBS_NAMED_OBJECT();

static bool devx_is_supported(struct ib_device *device)
{}

const struct uapi_definition mlx5_ib_devx_defs[] =;