linux/drivers/infiniband/sw/rxe/rxe_mr.c

// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
/*
 * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
 * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
 */

#include <linux/libnvdimm.h>

#include "rxe.h"
#include "rxe_loc.h"

/* Return a random 8 bit key value that is
 * different than the last_key. Set last_key to -1
 * if this is the first key for an MR or MW
 */
u8 rxe_get_next_key(u32 last_key)
{}

int mr_check_range(struct rxe_mr *mr, u64 iova, size_t length)
{}

static void rxe_mr_init(int access, struct rxe_mr *mr)
{}

void rxe_mr_init_dma(int access, struct rxe_mr *mr)
{}

static unsigned long rxe_mr_iova_to_index(struct rxe_mr *mr, u64 iova)
{}

static unsigned long rxe_mr_iova_to_page_offset(struct rxe_mr *mr, u64 iova)
{}

static bool is_pmem_page(struct page *pg)
{}

static int rxe_mr_fill_pages_from_sgt(struct rxe_mr *mr, struct sg_table *sgt)
{}

int rxe_mr_init_user(struct rxe_dev *rxe, u64 start, u64 length,
		     int access, struct rxe_mr *mr)
{}

static int rxe_mr_alloc(struct rxe_mr *mr, int num_buf)
{}

int rxe_mr_init_fast(int max_pages, struct rxe_mr *mr)
{}

static int rxe_set_page(struct ib_mr *ibmr, u64 dma_addr)
{}

int rxe_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sgl,
		  int sg_nents, unsigned int *sg_offset)
{}

static int rxe_mr_copy_xarray(struct rxe_mr *mr, u64 iova, void *addr,
			      unsigned int length, enum rxe_mr_copy_dir dir)
{}

static void rxe_mr_copy_dma(struct rxe_mr *mr, u64 dma_addr, void *addr,
			    unsigned int length, enum rxe_mr_copy_dir dir)
{}

int rxe_mr_copy(struct rxe_mr *mr, u64 iova, void *addr,
		unsigned int length, enum rxe_mr_copy_dir dir)
{}

/* copy data in or out of a wqe, i.e. sg list
 * under the control of a dma descriptor
 */
int copy_data(
	struct rxe_pd		*pd,
	int			access,
	struct rxe_dma_info	*dma,
	void			*addr,
	int			length,
	enum rxe_mr_copy_dir	dir)
{}

int rxe_flush_pmem_iova(struct rxe_mr *mr, u64 iova, unsigned int length)
{}

/* Guarantee atomicity of atomic operations at the machine level. */
static DEFINE_SPINLOCK(atomic_ops_lock);

int rxe_mr_do_atomic_op(struct rxe_mr *mr, u64 iova, int opcode,
			u64 compare, u64 swap_add, u64 *orig_val)
{}

#if defined CONFIG_64BIT
/* only implemented or called for 64 bit architectures */
int rxe_mr_do_atomic_write(struct rxe_mr *mr, u64 iova, u64 value)
{}
#else
int rxe_mr_do_atomic_write(struct rxe_mr *mr, u64 iova, u64 value)
{
	return RESPST_ERR_UNSUPPORTED_OPCODE;
}
#endif

int advance_dma_data(struct rxe_dma_info *dma, unsigned int length)
{}

struct rxe_mr *lookup_mr(struct rxe_pd *pd, int access, u32 key,
			 enum rxe_mr_lookup_type type)
{}

int rxe_invalidate_mr(struct rxe_qp *qp, u32 key)
{}

/* user can (re)register fast MR by executing a REG_MR WQE.
 * user is expected to hold a reference on the ib mr until the
 * WQE completes.
 * Once a fast MR is created this is the only way to change the
 * private keys. It is the responsibility of the user to maintain
 * the ib mr keys in sync with rxe mr keys.
 */
int rxe_reg_fast_mr(struct rxe_qp *qp, struct rxe_send_wqe *wqe)
{}

void rxe_mr_cleanup(struct rxe_pool_elem *elem)
{}