#include <rdma/ib_umem_odp.h>
#include "mlx5_ib.h"
#include "umr.h"
#include "wr.h"
void *xlt_emergency_page;
static DEFINE_MUTEX(xlt_emergency_page_mutex);
static __be64 get_umr_enable_mr_mask(void)
{ … }
static __be64 get_umr_disable_mr_mask(void)
{ … }
static __be64 get_umr_update_translation_mask(void)
{ … }
static __be64 get_umr_update_access_mask(struct mlx5_ib_dev *dev)
{ … }
static __be64 get_umr_update_pd_mask(void)
{ … }
static int umr_check_mkey_mask(struct mlx5_ib_dev *dev, u64 mask)
{ … }
enum { … };
static int mlx5r_umr_qp_rst2rts(struct mlx5_ib_dev *dev, struct ib_qp *qp)
{ … }
int mlx5r_umr_resource_init(struct mlx5_ib_dev *dev)
{ … }
void mlx5r_umr_resource_cleanup(struct mlx5_ib_dev *dev)
{ … }
int mlx5r_umr_init(struct mlx5_ib_dev *dev)
{ … }
void mlx5r_umr_cleanup(struct mlx5_ib_dev *dev)
{ … }
static int mlx5r_umr_recover(struct mlx5_ib_dev *dev)
{ … }
static int mlx5r_umr_post_send(struct ib_qp *ibqp, u32 mkey, struct ib_cqe *cqe,
struct mlx5r_umr_wqe *wqe, bool with_data)
{ … }
static void mlx5r_umr_done(struct ib_cq *cq, struct ib_wc *wc)
{ … }
static inline void mlx5r_umr_init_context(struct mlx5r_umr_context *context)
{ … }
static int mlx5r_umr_post_send_wait(struct mlx5_ib_dev *dev, u32 mkey,
struct mlx5r_umr_wqe *wqe, bool with_data)
{ … }
int mlx5r_umr_revoke_mr(struct mlx5_ib_mr *mr)
{ … }
static void mlx5r_umr_set_access_flags(struct mlx5_ib_dev *dev,
struct mlx5_mkey_seg *seg,
unsigned int access_flags)
{ … }
int mlx5r_umr_rereg_pd_access(struct mlx5_ib_mr *mr, struct ib_pd *pd,
int access_flags)
{ … }
#define MLX5_MAX_UMR_CHUNK …
#define MLX5_SPARE_UMR_CHUNK …
static void *mlx5r_umr_alloc_xlt(size_t *nents, size_t ent_size, gfp_t gfp_mask)
{ … }
static void mlx5r_umr_free_xlt(void *xlt, size_t length)
{ … }
static void mlx5r_umr_unmap_free_xlt(struct mlx5_ib_dev *dev, void *xlt,
struct ib_sge *sg)
{ … }
static void *mlx5r_umr_create_xlt(struct mlx5_ib_dev *dev, struct ib_sge *sg,
size_t nents, size_t ent_size,
unsigned int flags)
{ … }
static void
mlx5r_umr_set_update_xlt_ctrl_seg(struct mlx5_wqe_umr_ctrl_seg *ctrl_seg,
unsigned int flags, struct ib_sge *sg)
{ … }
static void mlx5r_umr_set_update_xlt_mkey_seg(struct mlx5_ib_dev *dev,
struct mlx5_mkey_seg *mkey_seg,
struct mlx5_ib_mr *mr,
unsigned int page_shift)
{ … }
static void
mlx5r_umr_set_update_xlt_data_seg(struct mlx5_wqe_data_seg *data_seg,
struct ib_sge *sg)
{ … }
static void mlx5r_umr_update_offset(struct mlx5_wqe_umr_ctrl_seg *ctrl_seg,
u64 offset)
{ … }
static void mlx5r_umr_final_update_xlt(struct mlx5_ib_dev *dev,
struct mlx5r_umr_wqe *wqe,
struct mlx5_ib_mr *mr, struct ib_sge *sg,
unsigned int flags)
{ … }
static int
_mlx5r_umr_update_mr_pas(struct mlx5_ib_mr *mr, unsigned int flags, bool dd)
{ … }
int mlx5r_umr_update_data_direct_ksm_pas(struct mlx5_ib_mr *mr, unsigned int flags)
{ … }
int mlx5r_umr_update_mr_pas(struct mlx5_ib_mr *mr, unsigned int flags)
{ … }
static bool umr_can_use_indirect_mkey(struct mlx5_ib_dev *dev)
{ … }
int mlx5r_umr_update_xlt(struct mlx5_ib_mr *mr, u64 idx, int npages,
int page_shift, int flags)
{ … }