linux/drivers/infiniband/hw/mlx5/odp.c

/*
 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
 *
 * This software is available to you under a choice of one of two
 * licenses.  You may choose to be licensed under the terms of the GNU
 * General Public License (GPL) Version 2, available from the file
 * COPYING in the main directory of this source tree, or the
 * OpenIB.org BSD license below:
 *
 *     Redistribution and use in source and binary forms, with or
 *     without modification, are permitted provided that the following
 *     conditions are met:
 *
 *      - Redistributions of source code must retain the above
 *        copyright notice, this list of conditions and the following
 *        disclaimer.
 *
 *      - Redistributions in binary form must reproduce the above
 *        copyright notice, this list of conditions and the following
 *        disclaimer in the documentation and/or other materials
 *        provided with the distribution.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
 * SOFTWARE.
 */

#include <rdma/ib_umem_odp.h>
#include <linux/kernel.h>
#include <linux/dma-buf.h>
#include <linux/dma-resv.h>

#include "mlx5_ib.h"
#include "cmd.h"
#include "umr.h"
#include "qp.h"

#include <linux/mlx5/eq.h>

/* Contains the details of a pagefault. */
struct mlx5_pagefault {};

#define MAX_PREFETCH_LEN

/* Timeout in ms to wait for an active mmu notifier to complete when handling
 * a pagefault. */
#define MMU_NOTIFIER_TIMEOUT

#define MLX5_IMR_MTT_BITS
#define MLX5_IMR_MTT_SHIFT
#define MLX5_IMR_MTT_ENTRIES
#define MLX5_IMR_MTT_SIZE
#define MLX5_IMR_MTT_MASK

#define MLX5_KSM_PAGE_SHIFT

static u64 mlx5_imr_ksm_entries;

static void populate_klm(struct mlx5_klm *pklm, size_t idx, size_t nentries,
			struct mlx5_ib_mr *imr, int flags)
{}

static u64 umem_dma_to_mtt(dma_addr_t umem_dma)
{}

static void populate_mtt(__be64 *pas, size_t idx, size_t nentries,
			 struct mlx5_ib_mr *mr, int flags)
{}

void mlx5_odp_populate_xlt(void *xlt, size_t idx, size_t nentries,
			   struct mlx5_ib_mr *mr, int flags)
{}

/*
 * This must be called after the mr has been removed from implicit_children.
 * NOTE: The MR does not necessarily have to be
 * empty here, parallel page faults could have raced with the free process and
 * added pages to it.
 */
static void free_implicit_child_mr_work(struct work_struct *work)
{}

static void destroy_unused_implicit_child_mr(struct mlx5_ib_mr *mr)
{}

static bool mlx5_ib_invalidate_range(struct mmu_interval_notifier *mni,
				     const struct mmu_notifier_range *range,
				     unsigned long cur_seq)
{}

const struct mmu_interval_notifier_ops mlx5_mn_ops =;

static void internal_fill_odp_caps(struct mlx5_ib_dev *dev)
{}

static void mlx5_ib_page_fault_resume(struct mlx5_ib_dev *dev,
				      struct mlx5_pagefault *pfault,
				      int error)
{}

static struct mlx5_ib_mr *implicit_get_child_mr(struct mlx5_ib_mr *imr,
						unsigned long idx)
{}

/*
 * When using memory scheme ODP, implicit MRs can't use the reserved null mkey
 * and each implicit MR needs to assign a private null mkey to get the page
 * faults on.
 * The null mkey is created with the properties to enable getting the page
 * fault for every time it is accessed and having all relevant access flags.
 */
static int alloc_implicit_mr_null_mkey(struct mlx5_ib_dev *dev,
				       struct mlx5_ib_mr *imr,
				       struct mlx5_ib_pd *pd)
{}

struct mlx5_ib_mr *mlx5_ib_alloc_implicit_mr(struct mlx5_ib_pd *pd,
					     int access_flags)
{}

void mlx5_ib_free_odp_mr(struct mlx5_ib_mr *mr)
{}

#define MLX5_PF_FLAGS_DOWNGRADE
#define MLX5_PF_FLAGS_SNAPSHOT
#define MLX5_PF_FLAGS_ENABLE
static int pagefault_real_mr(struct mlx5_ib_mr *mr, struct ib_umem_odp *odp,
			     u64 user_va, size_t bcnt, u32 *bytes_mapped,
			     u32 flags)
{}

static int pagefault_implicit_mr(struct mlx5_ib_mr *imr,
				 struct ib_umem_odp *odp_imr, u64 user_va,
				 size_t bcnt, u32 *bytes_mapped, u32 flags)
{}

static int pagefault_dmabuf_mr(struct mlx5_ib_mr *mr, size_t bcnt,
			       u32 *bytes_mapped, u32 flags)
{}

/*
 * Returns:
 *  -EFAULT: The io_virt->bcnt is not within the MR, it covers pages that are
 *           not accessible, or the MR is no longer valid.
 *  -EAGAIN/-ENOMEM: The operation should be retried
 *
 *  -EINVAL/others: General internal malfunction
 *  >0: Number of pages mapped
 */
static int pagefault_mr(struct mlx5_ib_mr *mr, u64 io_virt, size_t bcnt,
			u32 *bytes_mapped, u32 flags, bool permissive_fault)
{}

int mlx5_ib_init_odp_mr(struct mlx5_ib_mr *mr)
{}

int mlx5_ib_init_dmabuf_mr(struct mlx5_ib_mr *mr)
{}

struct pf_frame {};

static bool mkey_is_eq(struct mlx5_ib_mkey *mmkey, u32 key)
{}

static struct mlx5_ib_mkey *find_odp_mkey(struct mlx5_ib_dev *dev, u32 key)
{}

/*
 * Handle a single data segment in a page-fault WQE or RDMA region.
 *
 * Returns number of OS pages retrieved on success. The caller may continue to
 * the next data segment.
 * Can return the following error codes:
 * -EAGAIN to designate a temporary error. The caller will abort handling the
 *  page fault and resolve it.
 * -EFAULT when there's an error mapping the requested pages. The caller will
 *  abort the page fault handling.
 */
static int pagefault_single_data_segment(struct mlx5_ib_dev *dev,
					 struct ib_pd *pd, u32 key,
					 u64 io_virt, size_t bcnt,
					 u32 *bytes_committed,
					 u32 *bytes_mapped)
{}

/*
 * Parse a series of data segments for page fault handling.
 *
 * @dev:  Pointer to mlx5 IB device
 * @pfault: contains page fault information.
 * @wqe: points at the first data segment in the WQE.
 * @wqe_end: points after the end of the WQE.
 * @bytes_mapped: receives the number of bytes that the function was able to
 *                map. This allows the caller to decide intelligently whether
 *                enough memory was mapped to resolve the page fault
 *                successfully (e.g. enough for the next MTU, or the entire
 *                WQE).
 * @total_wqe_bytes: receives the total data size of this WQE in bytes (minus
 *                   the committed bytes).
 * @receive_queue: receive WQE end of sg list
 *
 * Returns the number of pages loaded if positive, zero for an empty WQE, or a
 * negative error code.
 */
static int pagefault_data_segments(struct mlx5_ib_dev *dev,
				   struct mlx5_pagefault *pfault,
				   void *wqe,
				   void *wqe_end, u32 *bytes_mapped,
				   u32 *total_wqe_bytes, bool receive_queue)
{}

/*
 * Parse initiator WQE. Advances the wqe pointer to point at the
 * scatter-gather list, and set wqe_end to the end of the WQE.
 */
static int mlx5_ib_mr_initiator_pfault_handler(
	struct mlx5_ib_dev *dev, struct mlx5_pagefault *pfault,
	struct mlx5_ib_qp *qp, void **wqe, void **wqe_end, int wqe_length)
{}

/*
 * Parse responder WQE and set wqe_end to the end of the WQE.
 */
static int mlx5_ib_mr_responder_pfault_handler_srq(struct mlx5_ib_dev *dev,
						   struct mlx5_ib_srq *srq,
						   void **wqe, void **wqe_end,
						   int wqe_length)
{}

static int mlx5_ib_mr_responder_pfault_handler_rq(struct mlx5_ib_dev *dev,
						  struct mlx5_ib_qp *qp,
						  void *wqe, void **wqe_end,
						  int wqe_length)
{}

static inline struct mlx5_core_rsc_common *odp_get_rsc(struct mlx5_ib_dev *dev,
						       u32 wq_num, int pf_type)
{}

static inline struct mlx5_ib_qp *res_to_qp(struct mlx5_core_rsc_common *res)
{}

static inline struct mlx5_ib_srq *res_to_srq(struct mlx5_core_rsc_common *res)
{}

static void mlx5_ib_mr_wqe_pfault_handler(struct mlx5_ib_dev *dev,
					  struct mlx5_pagefault *pfault)
{}

static int pages_in_range(u64 address, u32 length)
{}

static void mlx5_ib_mr_rdma_pfault_handler(struct mlx5_ib_dev *dev,
					   struct mlx5_pagefault *pfault)
{}

#define MLX5_MEMORY_PAGE_FAULT_FLAGS_LAST
static void mlx5_ib_mr_memory_pfault_handler(struct mlx5_ib_dev *dev,
					     struct mlx5_pagefault *pfault)
{}

static void mlx5_ib_pfault(struct mlx5_ib_dev *dev, struct mlx5_pagefault *pfault)
{}

static void mlx5_ib_eqe_pf_action(struct work_struct *work)
{}

#define MEMORY_SCHEME_PAGE_FAULT_GRANULARITY
static void mlx5_ib_eq_pf_process(struct mlx5_ib_pf_eq *eq)
{}

static int mlx5_ib_eq_pf_int(struct notifier_block *nb, unsigned long type,
			     void *data)
{}

/* mempool_refill() was proposed but unfortunately wasn't accepted
 * http://lkml.iu.edu/hypermail/linux/kernel/1512.1/05073.html
 * Cheap workaround.
 */
static void mempool_refill(mempool_t *pool)
{}

static void mlx5_ib_eq_pf_action(struct work_struct *work)
{}

enum {};

int mlx5r_odp_create_eq(struct mlx5_ib_dev *dev, struct mlx5_ib_pf_eq *eq)
{}

static int
mlx5_ib_odp_destroy_eq(struct mlx5_ib_dev *dev, struct mlx5_ib_pf_eq *eq)
{}

int mlx5_odp_init_mkey_cache(struct mlx5_ib_dev *dev)
{}

static const struct ib_device_ops mlx5_ib_dev_odp_ops =;

int mlx5_ib_odp_init_one(struct mlx5_ib_dev *dev)
{}

void mlx5_ib_odp_cleanup_one(struct mlx5_ib_dev *dev)
{}

int mlx5_ib_odp_init(void)
{}

struct prefetch_mr_work {};

static void destroy_prefetch_work(struct prefetch_mr_work *work)
{}

static struct mlx5_ib_mr *
get_prefetchable_mr(struct ib_pd *pd, enum ib_uverbs_advise_mr_advice advice,
		    u32 lkey)
{}

static void mlx5_ib_prefetch_mr_work(struct work_struct *w)
{}

static int init_prefetch_work(struct ib_pd *pd,
			       enum ib_uverbs_advise_mr_advice advice,
			       u32 pf_flags, struct prefetch_mr_work *work,
			       struct ib_sge *sg_list, u32 num_sge)
{}

static int mlx5_ib_prefetch_sg_list(struct ib_pd *pd,
				    enum ib_uverbs_advise_mr_advice advice,
				    u32 pf_flags, struct ib_sge *sg_list,
				    u32 num_sge)
{}

int mlx5_ib_advise_mr_prefetch(struct ib_pd *pd,
			       enum ib_uverbs_advise_mr_advice advice,
			       u32 flags, struct ib_sge *sg_list, u32 num_sge)
{}