#include <rdma/ib_umem_odp.h>
#include <linux/kernel.h>
#include <linux/dma-buf.h>
#include <linux/dma-resv.h>
#include "mlx5_ib.h"
#include "cmd.h"
#include "umr.h"
#include "qp.h"
#include <linux/mlx5/eq.h>
struct mlx5_pagefault { … };
#define MAX_PREFETCH_LEN …
#define MMU_NOTIFIER_TIMEOUT …
#define MLX5_IMR_MTT_BITS …
#define MLX5_IMR_MTT_SHIFT …
#define MLX5_IMR_MTT_ENTRIES …
#define MLX5_IMR_MTT_SIZE …
#define MLX5_IMR_MTT_MASK …
#define MLX5_KSM_PAGE_SHIFT …
static u64 mlx5_imr_ksm_entries;
static void populate_klm(struct mlx5_klm *pklm, size_t idx, size_t nentries,
struct mlx5_ib_mr *imr, int flags)
{ … }
static u64 umem_dma_to_mtt(dma_addr_t umem_dma)
{ … }
static void populate_mtt(__be64 *pas, size_t idx, size_t nentries,
struct mlx5_ib_mr *mr, int flags)
{ … }
void mlx5_odp_populate_xlt(void *xlt, size_t idx, size_t nentries,
struct mlx5_ib_mr *mr, int flags)
{ … }
static void free_implicit_child_mr_work(struct work_struct *work)
{ … }
static void destroy_unused_implicit_child_mr(struct mlx5_ib_mr *mr)
{ … }
static bool mlx5_ib_invalidate_range(struct mmu_interval_notifier *mni,
const struct mmu_notifier_range *range,
unsigned long cur_seq)
{ … }
const struct mmu_interval_notifier_ops mlx5_mn_ops = …;
static void internal_fill_odp_caps(struct mlx5_ib_dev *dev)
{ … }
static void mlx5_ib_page_fault_resume(struct mlx5_ib_dev *dev,
struct mlx5_pagefault *pfault,
int error)
{ … }
static struct mlx5_ib_mr *implicit_get_child_mr(struct mlx5_ib_mr *imr,
unsigned long idx)
{ … }
static int alloc_implicit_mr_null_mkey(struct mlx5_ib_dev *dev,
struct mlx5_ib_mr *imr,
struct mlx5_ib_pd *pd)
{ … }
struct mlx5_ib_mr *mlx5_ib_alloc_implicit_mr(struct mlx5_ib_pd *pd,
int access_flags)
{ … }
void mlx5_ib_free_odp_mr(struct mlx5_ib_mr *mr)
{ … }
#define MLX5_PF_FLAGS_DOWNGRADE …
#define MLX5_PF_FLAGS_SNAPSHOT …
#define MLX5_PF_FLAGS_ENABLE …
static int pagefault_real_mr(struct mlx5_ib_mr *mr, struct ib_umem_odp *odp,
u64 user_va, size_t bcnt, u32 *bytes_mapped,
u32 flags)
{ … }
static int pagefault_implicit_mr(struct mlx5_ib_mr *imr,
struct ib_umem_odp *odp_imr, u64 user_va,
size_t bcnt, u32 *bytes_mapped, u32 flags)
{ … }
static int pagefault_dmabuf_mr(struct mlx5_ib_mr *mr, size_t bcnt,
u32 *bytes_mapped, u32 flags)
{ … }
static int pagefault_mr(struct mlx5_ib_mr *mr, u64 io_virt, size_t bcnt,
u32 *bytes_mapped, u32 flags, bool permissive_fault)
{ … }
int mlx5_ib_init_odp_mr(struct mlx5_ib_mr *mr)
{ … }
int mlx5_ib_init_dmabuf_mr(struct mlx5_ib_mr *mr)
{ … }
struct pf_frame { … };
static bool mkey_is_eq(struct mlx5_ib_mkey *mmkey, u32 key)
{ … }
static struct mlx5_ib_mkey *find_odp_mkey(struct mlx5_ib_dev *dev, u32 key)
{ … }
static int pagefault_single_data_segment(struct mlx5_ib_dev *dev,
struct ib_pd *pd, u32 key,
u64 io_virt, size_t bcnt,
u32 *bytes_committed,
u32 *bytes_mapped)
{ … }
static int pagefault_data_segments(struct mlx5_ib_dev *dev,
struct mlx5_pagefault *pfault,
void *wqe,
void *wqe_end, u32 *bytes_mapped,
u32 *total_wqe_bytes, bool receive_queue)
{ … }
static int mlx5_ib_mr_initiator_pfault_handler(
struct mlx5_ib_dev *dev, struct mlx5_pagefault *pfault,
struct mlx5_ib_qp *qp, void **wqe, void **wqe_end, int wqe_length)
{ … }
static int mlx5_ib_mr_responder_pfault_handler_srq(struct mlx5_ib_dev *dev,
struct mlx5_ib_srq *srq,
void **wqe, void **wqe_end,
int wqe_length)
{ … }
static int mlx5_ib_mr_responder_pfault_handler_rq(struct mlx5_ib_dev *dev,
struct mlx5_ib_qp *qp,
void *wqe, void **wqe_end,
int wqe_length)
{ … }
static inline struct mlx5_core_rsc_common *odp_get_rsc(struct mlx5_ib_dev *dev,
u32 wq_num, int pf_type)
{ … }
static inline struct mlx5_ib_qp *res_to_qp(struct mlx5_core_rsc_common *res)
{ … }
static inline struct mlx5_ib_srq *res_to_srq(struct mlx5_core_rsc_common *res)
{ … }
static void mlx5_ib_mr_wqe_pfault_handler(struct mlx5_ib_dev *dev,
struct mlx5_pagefault *pfault)
{ … }
static int pages_in_range(u64 address, u32 length)
{ … }
static void mlx5_ib_mr_rdma_pfault_handler(struct mlx5_ib_dev *dev,
struct mlx5_pagefault *pfault)
{ … }
#define MLX5_MEMORY_PAGE_FAULT_FLAGS_LAST …
static void mlx5_ib_mr_memory_pfault_handler(struct mlx5_ib_dev *dev,
struct mlx5_pagefault *pfault)
{ … }
static void mlx5_ib_pfault(struct mlx5_ib_dev *dev, struct mlx5_pagefault *pfault)
{ … }
static void mlx5_ib_eqe_pf_action(struct work_struct *work)
{ … }
#define MEMORY_SCHEME_PAGE_FAULT_GRANULARITY …
static void mlx5_ib_eq_pf_process(struct mlx5_ib_pf_eq *eq)
{ … }
static int mlx5_ib_eq_pf_int(struct notifier_block *nb, unsigned long type,
void *data)
{ … }
static void mempool_refill(mempool_t *pool)
{ … }
static void mlx5_ib_eq_pf_action(struct work_struct *work)
{ … }
enum { … };
int mlx5r_odp_create_eq(struct mlx5_ib_dev *dev, struct mlx5_ib_pf_eq *eq)
{ … }
static int
mlx5_ib_odp_destroy_eq(struct mlx5_ib_dev *dev, struct mlx5_ib_pf_eq *eq)
{ … }
int mlx5_odp_init_mkey_cache(struct mlx5_ib_dev *dev)
{ … }
static const struct ib_device_ops mlx5_ib_dev_odp_ops = …;
int mlx5_ib_odp_init_one(struct mlx5_ib_dev *dev)
{ … }
void mlx5_ib_odp_cleanup_one(struct mlx5_ib_dev *dev)
{ … }
int mlx5_ib_odp_init(void)
{ … }
struct prefetch_mr_work { … };
static void destroy_prefetch_work(struct prefetch_mr_work *work)
{ … }
static struct mlx5_ib_mr *
get_prefetchable_mr(struct ib_pd *pd, enum ib_uverbs_advise_mr_advice advice,
u32 lkey)
{ … }
static void mlx5_ib_prefetch_mr_work(struct work_struct *w)
{ … }
static int init_prefetch_work(struct ib_pd *pd,
enum ib_uverbs_advise_mr_advice advice,
u32 pf_flags, struct prefetch_mr_work *work,
struct ib_sge *sg_list, u32 num_sge)
{ … }
static int mlx5_ib_prefetch_sg_list(struct ib_pd *pd,
enum ib_uverbs_advise_mr_advice advice,
u32 pf_flags, struct ib_sge *sg_list,
u32 num_sge)
{ … }
int mlx5_ib_advise_mr_prefetch(struct ib_pd *pd,
enum ib_uverbs_advise_mr_advice advice,
u32 flags, struct ib_sge *sg_list, u32 num_sge)
{ … }