linux/drivers/infiniband/hw/mlx5/mr.c

/*
 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
 * Copyright (c) 2020, Intel Corporation. All rights reserved.
 *
 * This software is available to you under a choice of one of two
 * licenses.  You may choose to be licensed under the terms of the GNU
 * General Public License (GPL) Version 2, available from the file
 * COPYING in the main directory of this source tree, or the
 * OpenIB.org BSD license below:
 *
 *     Redistribution and use in source and binary forms, with or
 *     without modification, are permitted provided that the following
 *     conditions are met:
 *
 *      - Redistributions of source code must retain the above
 *        copyright notice, this list of conditions and the following
 *        disclaimer.
 *
 *      - Redistributions in binary form must reproduce the above
 *        copyright notice, this list of conditions and the following
 *        disclaimer in the documentation and/or other materials
 *        provided with the distribution.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
 * SOFTWARE.
 */


#include <linux/kref.h>
#include <linux/random.h>
#include <linux/debugfs.h>
#include <linux/export.h>
#include <linux/delay.h>
#include <linux/dma-buf.h>
#include <linux/dma-resv.h>
#include <rdma/ib_umem_odp.h>
#include "dm.h"
#include "mlx5_ib.h"
#include "umr.h"

enum {};

#define MLX5_UMR_ALIGN

static void
create_mkey_callback(int status, struct mlx5_async_work *context);
static struct mlx5_ib_mr *reg_create(struct ib_pd *pd, struct ib_umem *umem,
				     u64 iova, int access_flags,
				     unsigned int page_size, bool populate);

static void set_mkc_access_pd_addr_fields(void *mkc, int acc, u64 start_addr,
					  struct ib_pd *pd)
{}

static void assign_mkey_variant(struct mlx5_ib_dev *dev, u32 *mkey, u32 *in)
{}

static int mlx5_ib_create_mkey(struct mlx5_ib_dev *dev,
			       struct mlx5_ib_mkey *mkey, u32 *in, int inlen)
{}

static int mlx5_ib_create_mkey_cb(struct mlx5r_async_create_mkey *async_create)
{}

static int mkey_cache_max_order(struct mlx5_ib_dev *dev);
static void queue_adjust_cache_locked(struct mlx5_cache_ent *ent);

static int destroy_mkey(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
{}

static void create_mkey_warn(struct mlx5_ib_dev *dev, int status, void *out)
{}

static int push_mkey_locked(struct mlx5_cache_ent *ent, u32 mkey)
{}

static int pop_mkey_locked(struct mlx5_cache_ent *ent)
{}

static void create_mkey_callback(int status, struct mlx5_async_work *context)
{}

static int get_mkc_octo_size(unsigned int access_mode, unsigned int ndescs)
{}

static void set_cache_mkc(struct mlx5_cache_ent *ent, void *mkc)
{}

/* Asynchronously schedule new MRs to be populated in the cache. */
static int add_keys(struct mlx5_cache_ent *ent, unsigned int num)
{}

/* Synchronously create a MR in the cache */
static int create_cache_mkey(struct mlx5_cache_ent *ent, u32 *mkey)
{}

static void remove_cache_mr_locked(struct mlx5_cache_ent *ent)
{}

static int resize_available_mrs(struct mlx5_cache_ent *ent, unsigned int target,
				bool limit_fill)
	__acquires(&ent->mkeys_queue.lock) __releases(&ent->mkeys_queue.lock)
{}

static ssize_t size_write(struct file *filp, const char __user *buf,
			  size_t count, loff_t *pos)
{}

static ssize_t size_read(struct file *filp, char __user *buf, size_t count,
			 loff_t *pos)
{}

static const struct file_operations size_fops =;

static ssize_t limit_write(struct file *filp, const char __user *buf,
			   size_t count, loff_t *pos)
{}

static ssize_t limit_read(struct file *filp, char __user *buf, size_t count,
			  loff_t *pos)
{}

static const struct file_operations limit_fops =;

static bool someone_adding(struct mlx5_mkey_cache *cache)
{}

/*
 * Check if the bucket is outside the high/low water mark and schedule an async
 * update. The cache refill has hysteresis, once the low water mark is hit it is
 * refilled up to the high mark.
 */
static void queue_adjust_cache_locked(struct mlx5_cache_ent *ent)
{}

static void __cache_work_func(struct mlx5_cache_ent *ent)
{}

static void delayed_cache_work_func(struct work_struct *work)
{}

static int cache_ent_key_cmp(struct mlx5r_cache_rb_key key1,
			     struct mlx5r_cache_rb_key key2)
{}

static int mlx5_cache_ent_insert(struct mlx5_mkey_cache *cache,
				 struct mlx5_cache_ent *ent)
{}

static struct mlx5_cache_ent *
mkey_cache_ent_from_rb_key(struct mlx5_ib_dev *dev,
			   struct mlx5r_cache_rb_key rb_key)
{}

static struct mlx5_ib_mr *_mlx5_mr_cache_alloc(struct mlx5_ib_dev *dev,
					struct mlx5_cache_ent *ent,
					int access_flags)
{}

static int get_unchangeable_access_flags(struct mlx5_ib_dev *dev,
					 int access_flags)
{}

struct mlx5_ib_mr *mlx5_mr_cache_alloc(struct mlx5_ib_dev *dev,
				       int access_flags, int access_mode,
				       int ndescs)
{}

static void clean_keys(struct mlx5_ib_dev *dev, struct mlx5_cache_ent *ent)
{}

static void mlx5_mkey_cache_debugfs_cleanup(struct mlx5_ib_dev *dev)
{}

static void mlx5_mkey_cache_debugfs_add_ent(struct mlx5_ib_dev *dev,
					    struct mlx5_cache_ent *ent)
{}

static void mlx5_mkey_cache_debugfs_init(struct mlx5_ib_dev *dev)
{}

static void delay_time_func(struct timer_list *t)
{}

static int mlx5r_mkeys_init(struct mlx5_cache_ent *ent)
{}

static void mlx5r_mkeys_uninit(struct mlx5_cache_ent *ent)
{}

struct mlx5_cache_ent *
mlx5r_cache_create_ent_locked(struct mlx5_ib_dev *dev,
			      struct mlx5r_cache_rb_key rb_key,
			      bool persistent_entry)
{}

static void remove_ent_work_func(struct work_struct *work)
{}

int mlx5_mkey_cache_init(struct mlx5_ib_dev *dev)
{}

void mlx5_mkey_cache_cleanup(struct mlx5_ib_dev *dev)
{}

struct ib_mr *mlx5_ib_get_dma_mr(struct ib_pd *pd, int acc)
{}

static int get_octo_len(u64 addr, u64 len, int page_shift)
{}

static int mkey_cache_max_order(struct mlx5_ib_dev *dev)
{}

static void set_mr_fields(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr,
			  u64 length, int access_flags, u64 iova)
{}

static unsigned int mlx5_umem_dmabuf_default_pgsz(struct ib_umem *umem,
						  u64 iova)
{}

static struct mlx5_ib_mr *alloc_cacheable_mr(struct ib_pd *pd,
					     struct ib_umem *umem, u64 iova,
					     int access_flags)
{}

/*
 * If ibmr is NULL it will be allocated by reg_create.
 * Else, the given ibmr will be used.
 */
static struct mlx5_ib_mr *reg_create(struct ib_pd *pd, struct ib_umem *umem,
				     u64 iova, int access_flags,
				     unsigned int page_size, bool populate)
{}

static struct ib_mr *mlx5_ib_get_dm_mr(struct ib_pd *pd, u64 start_addr,
				       u64 length, int acc, int mode)
{}

int mlx5_ib_advise_mr(struct ib_pd *pd,
		      enum ib_uverbs_advise_mr_advice advice,
		      u32 flags,
		      struct ib_sge *sg_list,
		      u32 num_sge,
		      struct uverbs_attr_bundle *attrs)
{}

struct ib_mr *mlx5_ib_reg_dm_mr(struct ib_pd *pd, struct ib_dm *dm,
				struct ib_dm_mr_attr *attr,
				struct uverbs_attr_bundle *attrs)
{}

static struct ib_mr *create_real_mr(struct ib_pd *pd, struct ib_umem *umem,
				    u64 iova, int access_flags)
{}

static struct ib_mr *create_user_odp_mr(struct ib_pd *pd, u64 start, u64 length,
					u64 iova, int access_flags,
					struct ib_udata *udata)
{}

struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
				  u64 iova, int access_flags,
				  struct ib_udata *udata)
{}

static void mlx5_ib_dmabuf_invalidate_cb(struct dma_buf_attachment *attach)
{}

static struct dma_buf_attach_ops mlx5_ib_dmabuf_attach_ops =;

struct ib_mr *mlx5_ib_reg_user_mr_dmabuf(struct ib_pd *pd, u64 offset,
					 u64 length, u64 virt_addr,
					 int fd, int access_flags,
					 struct ib_udata *udata)
{}

/*
 * True if the change in access flags can be done via UMR, only some access
 * flags can be updated.
 */
static bool can_use_umr_rereg_access(struct mlx5_ib_dev *dev,
				     unsigned int current_access_flags,
				     unsigned int target_access_flags)
{}

static bool can_use_umr_rereg_pas(struct mlx5_ib_mr *mr,
				  struct ib_umem *new_umem,
				  int new_access_flags, u64 iova,
				  unsigned long *page_size)
{}

static int umr_rereg_pas(struct mlx5_ib_mr *mr, struct ib_pd *pd,
			 int access_flags, int flags, struct ib_umem *new_umem,
			 u64 iova, unsigned long page_size)
{}

struct ib_mr *mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start,
				    u64 length, u64 iova, int new_access_flags,
				    struct ib_pd *new_pd,
				    struct ib_udata *udata)
{}

static int
mlx5_alloc_priv_descs(struct ib_device *device,
		      struct mlx5_ib_mr *mr,
		      int ndescs,
		      int desc_size)
{}

static void
mlx5_free_priv_descs(struct mlx5_ib_mr *mr)
{}

static int cache_ent_find_and_store(struct mlx5_ib_dev *dev,
				    struct mlx5_ib_mr *mr)
{}

static int mlx5_revoke_mr(struct mlx5_ib_mr *mr)
{}

int mlx5_ib_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata)
{}

static void mlx5_set_umr_free_mkey(struct ib_pd *pd, u32 *in, int ndescs,
				   int access_mode, int page_shift)
{}

static int _mlx5_alloc_mkey_descs(struct ib_pd *pd, struct mlx5_ib_mr *mr,
				  int ndescs, int desc_size, int page_shift,
				  int access_mode, u32 *in, int inlen)
{}

static struct mlx5_ib_mr *mlx5_ib_alloc_pi_mr(struct ib_pd *pd,
				u32 max_num_sg, u32 max_num_meta_sg,
				int desc_size, int access_mode)
{}

static int mlx5_alloc_mem_reg_descs(struct ib_pd *pd, struct mlx5_ib_mr *mr,
				    int ndescs, u32 *in, int inlen)
{}

static int mlx5_alloc_sg_gaps_descs(struct ib_pd *pd, struct mlx5_ib_mr *mr,
				    int ndescs, u32 *in, int inlen)
{}

static int mlx5_alloc_integrity_descs(struct ib_pd *pd, struct mlx5_ib_mr *mr,
				      int max_num_sg, int max_num_meta_sg,
				      u32 *in, int inlen)
{}

static struct ib_mr *__mlx5_ib_alloc_mr(struct ib_pd *pd,
					enum ib_mr_type mr_type, u32 max_num_sg,
					u32 max_num_meta_sg)
{}

struct ib_mr *mlx5_ib_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type,
			       u32 max_num_sg)
{}

struct ib_mr *mlx5_ib_alloc_mr_integrity(struct ib_pd *pd,
					 u32 max_num_sg, u32 max_num_meta_sg)
{}

int mlx5_ib_alloc_mw(struct ib_mw *ibmw, struct ib_udata *udata)
{}

int mlx5_ib_dealloc_mw(struct ib_mw *mw)
{}

int mlx5_ib_check_mr_status(struct ib_mr *ibmr, u32 check_mask,
			    struct ib_mr_status *mr_status)
{}

static int
mlx5_ib_map_pa_mr_sg_pi(struct ib_mr *ibmr, struct scatterlist *data_sg,
			int data_sg_nents, unsigned int *data_sg_offset,
			struct scatterlist *meta_sg, int meta_sg_nents,
			unsigned int *meta_sg_offset)
{}

static int
mlx5_ib_sg_to_klms(struct mlx5_ib_mr *mr,
		   struct scatterlist *sgl,
		   unsigned short sg_nents,
		   unsigned int *sg_offset_p,
		   struct scatterlist *meta_sgl,
		   unsigned short meta_sg_nents,
		   unsigned int *meta_sg_offset_p)
{}

static int mlx5_set_page(struct ib_mr *ibmr, u64 addr)
{}

static int mlx5_set_page_pi(struct ib_mr *ibmr, u64 addr)
{}

static int
mlx5_ib_map_mtt_mr_sg_pi(struct ib_mr *ibmr, struct scatterlist *data_sg,
			 int data_sg_nents, unsigned int *data_sg_offset,
			 struct scatterlist *meta_sg, int meta_sg_nents,
			 unsigned int *meta_sg_offset)
{}

static int
mlx5_ib_map_klm_mr_sg_pi(struct ib_mr *ibmr, struct scatterlist *data_sg,
			 int data_sg_nents, unsigned int *data_sg_offset,
			 struct scatterlist *meta_sg, int meta_sg_nents,
			 unsigned int *meta_sg_offset)
{}

int mlx5_ib_map_mr_sg_pi(struct ib_mr *ibmr, struct scatterlist *data_sg,
			 int data_sg_nents, unsigned int *data_sg_offset,
			 struct scatterlist *meta_sg, int meta_sg_nents,
			 unsigned int *meta_sg_offset)
{}

int mlx5_ib_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
		      unsigned int *sg_offset)
{}