/* * Copyright (c) 2007 Cisco Systems, Inc. All rights reserved. * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #include <linux/slab.h> #include <rdma/ib_user_verbs.h> #include "mlx4_ib.h" static u32 convert_access(int acc) { … } static enum mlx4_mw_type to_mlx4_type(enum ib_mw_type type) { … } struct ib_mr *mlx4_ib_get_dma_mr(struct ib_pd *pd, int acc) { … } enum { … }; static int mlx4_ib_umem_write_mtt_block(struct mlx4_ib_dev *dev, struct mlx4_mtt *mtt, u64 mtt_size, u64 mtt_shift, u64 len, u64 cur_start_addr, u64 *pages, int *start_index, int *npages) { … } static inline u64 alignment_of(u64 ptr) { … } static int mlx4_ib_umem_calc_block_mtt(u64 next_block_start, u64 current_block_end, u64 block_shift) { … } int mlx4_ib_umem_write_mtt(struct mlx4_ib_dev *dev, struct mlx4_mtt *mtt, struct ib_umem *umem) { … } /* * Calculate optimal mtt size based on contiguous pages. * Function will return also the number of pages that are not aligned to the * calculated mtt_size to be added to total number of pages. For that we should * check the first chunk length & last chunk length and if not aligned to * mtt_size we should increment the non_aligned_pages number. All chunks in the * middle already handled as part of mtt shift calculation for both their start * & end addresses. */ int mlx4_ib_umem_calc_optimal_mtt_size(struct ib_umem *umem, u64 start_va, int *num_of_mtts) { … } static struct ib_umem *mlx4_get_umem_mr(struct ib_device *device, u64 start, u64 length, int access_flags) { … } struct ib_mr *mlx4_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, u64 virt_addr, int access_flags, struct ib_udata *udata) { … } struct ib_mr *mlx4_ib_rereg_user_mr(struct ib_mr *mr, int flags, u64 start, u64 length, u64 virt_addr, int mr_access_flags, struct ib_pd *pd, struct ib_udata *udata) { … } static int mlx4_alloc_priv_pages(struct ib_device *device, struct mlx4_ib_mr *mr, int max_pages) { … } static void mlx4_free_priv_pages(struct mlx4_ib_mr *mr) { … } int mlx4_ib_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata) { … } int mlx4_ib_alloc_mw(struct ib_mw *ibmw, struct ib_udata *udata) { … } int mlx4_ib_dealloc_mw(struct ib_mw *ibmw) { … } struct ib_mr *mlx4_ib_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type, u32 max_num_sg) { … } static int mlx4_set_page(struct ib_mr *ibmr, u64 addr) { … } int mlx4_ib_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents, unsigned int *sg_offset) { … }