#ifndef _SIW_MEM_H
#define _SIW_MEM_H
struct siw_umem *siw_umem_get(struct ib_device *base_dave, u64 start,
u64 len, int rights);
void siw_umem_release(struct siw_umem *umem);
struct siw_pbl *siw_pbl_alloc(u32 num_buf);
dma_addr_t siw_pbl_get_buffer(struct siw_pbl *pbl, u64 off, int *len, int *idx);
struct siw_mem *siw_mem_id2obj(struct siw_device *sdev, int stag_index);
int siw_mem_add(struct siw_device *sdev, struct siw_mem *m);
int siw_invalidate_stag(struct ib_pd *pd, u32 stag);
int siw_check_mem(struct ib_pd *pd, struct siw_mem *mem, u64 addr,
enum ib_access_flags perms, int len);
int siw_check_sge(struct ib_pd *pd, struct siw_sge *sge,
struct siw_mem *mem[], enum ib_access_flags perms,
u32 off, int len);
void siw_wqe_put_mem(struct siw_wqe *wqe, enum siw_opcode op);
int siw_mr_add_mem(struct siw_mr *mr, struct ib_pd *pd, void *mem_obj,
u64 start, u64 len, int rights);
void siw_mr_drop_mem(struct siw_mr *mr);
void siw_free_mem(struct kref *ref);
static inline void siw_mem_put(struct siw_mem *mem)
{ … }
static inline void siw_unref_mem_sgl(struct siw_mem **mem, unsigned int num_sge)
{ … }
#define CHUNK_SHIFT …
#define PAGES_PER_CHUNK …
#define CHUNK_MASK …
#define PAGE_CHUNK_SIZE …
static inline struct page *siw_get_upage(struct siw_umem *umem, u64 addr)
{ … }
#endif