// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB /* Copyright (c) 2015 - 2021 Intel Corporation */ #include "main.h" /** * irdma_query_device - get device attributes * @ibdev: device pointer from stack * @props: returning device attributes * @udata: user data */ static int irdma_query_device(struct ib_device *ibdev, struct ib_device_attr *props, struct ib_udata *udata) { … } /** * irdma_query_port - get port attributes * @ibdev: device pointer from stack * @port: port number for query * @props: returning device attributes */ static int irdma_query_port(struct ib_device *ibdev, u32 port, struct ib_port_attr *props) { … } /** * irdma_disassociate_ucontext - Disassociate user context * @context: ib user context */ static void irdma_disassociate_ucontext(struct ib_ucontext *context) { … } static int irdma_mmap_legacy(struct irdma_ucontext *ucontext, struct vm_area_struct *vma) { … } static void irdma_mmap_free(struct rdma_user_mmap_entry *rdma_entry) { … } static struct rdma_user_mmap_entry* irdma_user_mmap_entry_insert(struct irdma_ucontext *ucontext, u64 bar_offset, enum irdma_mmap_flag mmap_flag, u64 *mmap_offset) { … } /** * irdma_mmap - user memory map * @context: context created during alloc * @vma: kernel info for user memory map */ static int irdma_mmap(struct ib_ucontext *context, struct vm_area_struct *vma) { … } /** * irdma_alloc_push_page - allocate a push page for qp * @iwqp: qp pointer */ static void irdma_alloc_push_page(struct irdma_qp *iwqp) { … } /** * irdma_alloc_ucontext - Allocate the user context data structure * @uctx: uverbs context pointer * @udata: user data * * This keeps track of all objects associated with a particular * user-mode client. */ static int irdma_alloc_ucontext(struct ib_ucontext *uctx, struct ib_udata *udata) { … } /** * irdma_dealloc_ucontext - deallocate the user context data structure * @context: user context created during alloc */ static void irdma_dealloc_ucontext(struct ib_ucontext *context) { … } /** * irdma_alloc_pd - allocate protection domain * @pd: PD pointer * @udata: user data */ static int irdma_alloc_pd(struct ib_pd *pd, struct ib_udata *udata) { … } /** * irdma_dealloc_pd - deallocate pd * @ibpd: ptr of pd to be deallocated * @udata: user data */ static int irdma_dealloc_pd(struct ib_pd *ibpd, struct ib_udata *udata) { … } /** * irdma_get_pbl - Retrieve pbl from a list given a virtual * address * @va: user virtual address * @pbl_list: pbl list to search in (QP's or CQ's) */ static struct irdma_pbl *irdma_get_pbl(unsigned long va, struct list_head *pbl_list) { … } /** * irdma_clean_cqes - clean cq entries for qp * @iwqp: qp ptr (user or kernel) * @iwcq: cq ptr */ static void irdma_clean_cqes(struct irdma_qp *iwqp, struct irdma_cq *iwcq) { … } static void irdma_remove_push_mmap_entries(struct irdma_qp *iwqp) { … } static int irdma_setup_push_mmap_entries(struct irdma_ucontext *ucontext, struct irdma_qp *iwqp, u64 *push_wqe_mmap_key, u64 *push_db_mmap_key) { … } /** * irdma_destroy_qp - destroy qp * @ibqp: qp's ib pointer also to get to device's qp address * @udata: user data */ static int irdma_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata) { … } /** * irdma_setup_virt_qp - setup for allocation of virtual qp * @iwdev: irdma device * @iwqp: qp ptr * @init_info: initialize info to return */ static void irdma_setup_virt_qp(struct irdma_device *iwdev, struct irdma_qp *iwqp, struct irdma_qp_init_info *init_info) { … } /** * irdma_setup_umode_qp - setup sq and rq size in user mode qp * @udata: udata * @iwdev: iwarp device * @iwqp: qp ptr (user or kernel) * @info: initialize info to return * @init_attr: Initial QP create attributes */ static int irdma_setup_umode_qp(struct ib_udata *udata, struct irdma_device *iwdev, struct irdma_qp *iwqp, struct irdma_qp_init_info *info, struct ib_qp_init_attr *init_attr) { … } /** * irdma_setup_kmode_qp - setup initialization for kernel mode qp * @iwdev: iwarp device * @iwqp: qp ptr (user or kernel) * @info: initialize info to return * @init_attr: Initial QP create attributes */ static int irdma_setup_kmode_qp(struct irdma_device *iwdev, struct irdma_qp *iwqp, struct irdma_qp_init_info *info, struct ib_qp_init_attr *init_attr) { … } static int irdma_cqp_create_qp_cmd(struct irdma_qp *iwqp) { … } static void irdma_roce_fill_and_set_qpctx_info(struct irdma_qp *iwqp, struct irdma_qp_host_ctx_info *ctx_info) { … } static void irdma_iw_fill_and_set_qpctx_info(struct irdma_qp *iwqp, struct irdma_qp_host_ctx_info *ctx_info) { … } static int irdma_validate_qp_attrs(struct ib_qp_init_attr *init_attr, struct irdma_device *iwdev) { … } static void irdma_flush_worker(struct work_struct *work) { … } /** * irdma_create_qp - create qp * @ibqp: ptr of qp * @init_attr: attributes for qp * @udata: user data for create qp */ static int irdma_create_qp(struct ib_qp *ibqp, struct ib_qp_init_attr *init_attr, struct ib_udata *udata) { … } static int irdma_get_ib_acc_flags(struct irdma_qp *iwqp) { … } /** * irdma_query_qp - query qp attributes * @ibqp: qp pointer * @attr: attributes pointer * @attr_mask: Not used * @init_attr: qp attributes to return */ static int irdma_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask, struct ib_qp_init_attr *init_attr) { … } /** * irdma_query_pkey - Query partition key * @ibdev: device pointer from stack * @port: port number * @index: index of pkey * @pkey: pointer to store the pkey */ static int irdma_query_pkey(struct ib_device *ibdev, u32 port, u16 index, u16 *pkey) { … } static u8 irdma_roce_get_vlan_prio(const struct ib_gid_attr *attr, u8 prio) { … } static int irdma_wait_for_suspend(struct irdma_qp *iwqp) { … } /** * irdma_modify_qp_roce - modify qp request * @ibqp: qp's pointer for modify * @attr: access attributes * @attr_mask: state mask * @udata: user data */ int irdma_modify_qp_roce(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask, struct ib_udata *udata) { … } /** * irdma_modify_qp - modify qp request * @ibqp: qp's pointer for modify * @attr: access attributes * @attr_mask: state mask * @udata: user data */ int irdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask, struct ib_udata *udata) { … } /** * irdma_cq_free_rsrc - free up resources for cq * @rf: RDMA PCI function * @iwcq: cq ptr */ static void irdma_cq_free_rsrc(struct irdma_pci_f *rf, struct irdma_cq *iwcq) { … } /** * irdma_free_cqbuf - worker to free a cq buffer * @work: provides access to the cq buffer to free */ static void irdma_free_cqbuf(struct work_struct *work) { … } /** * irdma_process_resize_list - remove resized cq buffers from the resize_list * @iwcq: cq which owns the resize_list * @iwdev: irdma device * @lcqe_buf: the buffer where the last cqe is received */ static int irdma_process_resize_list(struct irdma_cq *iwcq, struct irdma_device *iwdev, struct irdma_cq_buf *lcqe_buf) { … } /** * irdma_destroy_cq - destroy cq * @ib_cq: cq pointer * @udata: user data */ static int irdma_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata) { … } /** * irdma_resize_cq - resize cq * @ibcq: cq to be resized * @entries: desired cq size * @udata: user data */ static int irdma_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata) { … } static inline int cq_validate_flags(u32 flags, u8 hw_rev) { … } /** * irdma_create_cq - create cq * @ibcq: CQ allocated * @attr: attributes for cq * @attrs: uverbs attribute bundle */ static int irdma_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr, struct uverbs_attr_bundle *attrs) { … } /** * irdma_get_mr_access - get hw MR access permissions from IB access flags * @access: IB access flags */ static inline u16 irdma_get_mr_access(int access) { … } /** * irdma_free_stag - free stag resource * @iwdev: irdma device * @stag: stag to free */ static void irdma_free_stag(struct irdma_device *iwdev, u32 stag) { … } /** * irdma_create_stag - create random stag * @iwdev: irdma device */ static u32 irdma_create_stag(struct irdma_device *iwdev) { … } /** * irdma_next_pbl_addr - Get next pbl address * @pbl: pointer to a pble * @pinfo: info pointer * @idx: index */ static inline u64 *irdma_next_pbl_addr(u64 *pbl, struct irdma_pble_info **pinfo, u32 *idx) { … } /** * irdma_copy_user_pgaddrs - copy user page address to pble's os locally * @iwmr: iwmr for IB's user page addresses * @pbl: ple pointer to save 1 level or 0 level pble * @level: indicated level 0, 1 or 2 */ static void irdma_copy_user_pgaddrs(struct irdma_mr *iwmr, u64 *pbl, enum irdma_pble_level level) { … } /** * irdma_check_mem_contiguous - check if pbls stored in arr are contiguous * @arr: lvl1 pbl array * @npages: page count * @pg_size: page size * */ static bool irdma_check_mem_contiguous(u64 *arr, u32 npages, u32 pg_size) { … } /** * irdma_check_mr_contiguous - check if MR is physically contiguous * @palloc: pbl allocation struct * @pg_size: page size */ static bool irdma_check_mr_contiguous(struct irdma_pble_alloc *palloc, u32 pg_size) { … } /** * irdma_setup_pbles - copy user pg address to pble's * @rf: RDMA PCI function * @iwmr: mr pointer for this memory registration * @lvl: requested pble levels */ static int irdma_setup_pbles(struct irdma_pci_f *rf, struct irdma_mr *iwmr, u8 lvl) { … } /** * irdma_handle_q_mem - handle memory for qp and cq * @iwdev: irdma device * @req: information for q memory management * @iwpbl: pble struct * @lvl: pble level mask */ static int irdma_handle_q_mem(struct irdma_device *iwdev, struct irdma_mem_reg_req *req, struct irdma_pbl *iwpbl, u8 lvl) { … } /** * irdma_hw_alloc_mw - create the hw memory window * @iwdev: irdma device * @iwmr: pointer to memory window info */ static int irdma_hw_alloc_mw(struct irdma_device *iwdev, struct irdma_mr *iwmr) { … } /** * irdma_alloc_mw - Allocate memory window * @ibmw: Memory Window * @udata: user data pointer */ static int irdma_alloc_mw(struct ib_mw *ibmw, struct ib_udata *udata) { … } /** * irdma_dealloc_mw - Dealloc memory window * @ibmw: memory window structure. */ static int irdma_dealloc_mw(struct ib_mw *ibmw) { … } /** * irdma_hw_alloc_stag - cqp command to allocate stag * @iwdev: irdma device * @iwmr: irdma mr pointer */ static int irdma_hw_alloc_stag(struct irdma_device *iwdev, struct irdma_mr *iwmr) { … } /** * irdma_alloc_mr - register stag for fast memory registration * @pd: ibpd pointer * @mr_type: memory for stag registrion * @max_num_sg: man number of pages */ static struct ib_mr *irdma_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type, u32 max_num_sg) { … } /** * irdma_set_page - populate pbl list for fmr * @ibmr: ib mem to access iwarp mr pointer * @addr: page dma address fro pbl list */ static int irdma_set_page(struct ib_mr *ibmr, u64 addr) { … } /** * irdma_map_mr_sg - map of sg list for fmr * @ibmr: ib mem to access iwarp mr pointer * @sg: scatter gather list * @sg_nents: number of sg pages * @sg_offset: scatter gather list for fmr */ static int irdma_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents, unsigned int *sg_offset) { … } /** * irdma_hwreg_mr - send cqp command for memory registration * @iwdev: irdma device * @iwmr: irdma mr pointer * @access: access for MR */ static int irdma_hwreg_mr(struct irdma_device *iwdev, struct irdma_mr *iwmr, u16 access) { … } static int irdma_reg_user_mr_type_mem(struct irdma_mr *iwmr, int access, bool create_stag) { … } static struct irdma_mr *irdma_alloc_iwmr(struct ib_umem *region, struct ib_pd *pd, u64 virt, enum irdma_memreg_type reg_type) { … } static void irdma_free_iwmr(struct irdma_mr *iwmr) { … } static int irdma_reg_user_mr_type_qp(struct irdma_mem_reg_req req, struct ib_udata *udata, struct irdma_mr *iwmr) { … } static int irdma_reg_user_mr_type_cq(struct irdma_mem_reg_req req, struct ib_udata *udata, struct irdma_mr *iwmr) { … } /** * irdma_reg_user_mr - Register a user memory region * @pd: ptr of pd * @start: virtual start address * @len: length of mr * @virt: virtual address * @access: access of mr * @udata: user data */ static struct ib_mr *irdma_reg_user_mr(struct ib_pd *pd, u64 start, u64 len, u64 virt, int access, struct ib_udata *udata) { … } static struct ib_mr *irdma_reg_user_mr_dmabuf(struct ib_pd *pd, u64 start, u64 len, u64 virt, int fd, int access, struct ib_udata *udata) { … } static int irdma_hwdereg_mr(struct ib_mr *ib_mr) { … } /* * irdma_rereg_mr_trans - Re-register a user MR for a change translation. * @iwmr: ptr of iwmr * @start: virtual start address * @len: length of mr * @virt: virtual address * * Re-register a user memory region when a change translation is requested. * Re-register a new region while reusing the stag from the original registration. */ static int irdma_rereg_mr_trans(struct irdma_mr *iwmr, u64 start, u64 len, u64 virt) { … } /* * irdma_rereg_user_mr - Re-Register a user memory region(MR) * @ibmr: ib mem to access iwarp mr pointer * @flags: bit mask to indicate which of the attr's of MR modified * @start: virtual start address * @len: length of mr * @virt: virtual address * @new_access: bit mask of access flags * @new_pd: ptr of pd * @udata: user data * * Return: * NULL - Success, existing MR updated * ERR_PTR - error occurred */ static struct ib_mr *irdma_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start, u64 len, u64 virt, int new_access, struct ib_pd *new_pd, struct ib_udata *udata) { … } /** * irdma_reg_phys_mr - register kernel physical memory * @pd: ibpd pointer * @addr: physical address of memory to register * @size: size of memory to register * @access: Access rights * @iova_start: start of virtual address for physical buffers */ struct ib_mr *irdma_reg_phys_mr(struct ib_pd *pd, u64 addr, u64 size, int access, u64 *iova_start) { … } /** * irdma_get_dma_mr - register physical mem * @pd: ptr of pd * @acc: access for memory */ static struct ib_mr *irdma_get_dma_mr(struct ib_pd *pd, int acc) { … } /** * irdma_del_memlist - Deleting pbl list entries for CQ/QP * @iwmr: iwmr for IB's user page addresses * @ucontext: ptr to user context */ static void irdma_del_memlist(struct irdma_mr *iwmr, struct irdma_ucontext *ucontext) { … } /** * irdma_dereg_mr - deregister mr * @ib_mr: mr ptr for dereg * @udata: user data */ static int irdma_dereg_mr(struct ib_mr *ib_mr, struct ib_udata *udata) { … } /** * irdma_post_send - kernel application wr * @ibqp: qp ptr for wr * @ib_wr: work request ptr * @bad_wr: return of bad wr if err */ static int irdma_post_send(struct ib_qp *ibqp, const struct ib_send_wr *ib_wr, const struct ib_send_wr **bad_wr) { … } /** * irdma_post_recv - post receive wr for kernel application * @ibqp: ib qp pointer * @ib_wr: work request for receive * @bad_wr: bad wr caused an error */ static int irdma_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *ib_wr, const struct ib_recv_wr **bad_wr) { … } /** * irdma_flush_err_to_ib_wc_status - return change flush error code to IB status * @opcode: iwarp flush code */ static enum ib_wc_status irdma_flush_err_to_ib_wc_status(enum irdma_flush_opcode opcode) { … } /** * irdma_process_cqe - process cqe info * @entry: processed cqe * @cq_poll_info: cqe info */ static void irdma_process_cqe(struct ib_wc *entry, struct irdma_cq_poll_info *cq_poll_info) { … } /** * irdma_poll_one - poll one entry of the CQ * @ukcq: ukcq to poll * @cur_cqe: current CQE info to be filled in * @entry: ibv_wc object to be filled for non-extended CQ or NULL for extended CQ * * Returns the internal irdma device error code or 0 on success */ static inline int irdma_poll_one(struct irdma_cq_uk *ukcq, struct irdma_cq_poll_info *cur_cqe, struct ib_wc *entry) { … } /** * __irdma_poll_cq - poll cq for completion (kernel apps) * @iwcq: cq to poll * @num_entries: number of entries to poll * @entry: wr of a completed entry */ static int __irdma_poll_cq(struct irdma_cq *iwcq, int num_entries, struct ib_wc *entry) { … } /** * irdma_poll_cq - poll cq for completion (kernel apps) * @ibcq: cq to poll * @num_entries: number of entries to poll * @entry: wr of a completed entry */ static int irdma_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry) { … } /** * irdma_req_notify_cq - arm cq kernel application * @ibcq: cq to arm * @notify_flags: notofication flags */ static int irdma_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags notify_flags) { … } static int irdma_roce_port_immutable(struct ib_device *ibdev, u32 port_num, struct ib_port_immutable *immutable) { … } static int irdma_iw_port_immutable(struct ib_device *ibdev, u32 port_num, struct ib_port_immutable *immutable) { … } static const struct rdma_stat_desc irdma_hw_stat_names[] = …; static void irdma_get_dev_fw_str(struct ib_device *dev, char *str) { … } /** * irdma_alloc_hw_port_stats - Allocate a hw stats structure * @ibdev: device pointer from stack * @port_num: port number */ static struct rdma_hw_stats *irdma_alloc_hw_port_stats(struct ib_device *ibdev, u32 port_num) { … } /** * irdma_get_hw_stats - Populates the rdma_hw_stats structure * @ibdev: device pointer from stack * @stats: stats pointer from stack * @port_num: port number * @index: which hw counter the stack is requesting we update */ static int irdma_get_hw_stats(struct ib_device *ibdev, struct rdma_hw_stats *stats, u32 port_num, int index) { … } /** * irdma_query_gid - Query port GID * @ibdev: device pointer from stack * @port: port number * @index: Entry index * @gid: Global ID */ static int irdma_query_gid(struct ib_device *ibdev, u32 port, int index, union ib_gid *gid) { … } /** * mcast_list_add - Add a new mcast item to list * @rf: RDMA PCI function * @new_elem: pointer to element to add */ static void mcast_list_add(struct irdma_pci_f *rf, struct mc_table_list *new_elem) { … } /** * mcast_list_del - Remove an mcast item from list * @mc_qht_elem: pointer to mcast table list element */ static void mcast_list_del(struct mc_table_list *mc_qht_elem) { … } /** * mcast_list_lookup_ip - Search mcast list for address * @rf: RDMA PCI function * @ip_mcast: pointer to mcast IP address */ static struct mc_table_list *mcast_list_lookup_ip(struct irdma_pci_f *rf, u32 *ip_mcast) { … } /** * irdma_mcast_cqp_op - perform a mcast cqp operation * @iwdev: irdma device * @mc_grp_ctx: mcast group info * @op: operation * * returns error status */ static int irdma_mcast_cqp_op(struct irdma_device *iwdev, struct irdma_mcast_grp_info *mc_grp_ctx, u8 op) { … } /** * irdma_mcast_mac - Get the multicast MAC for an IP address * @ip_addr: IPv4 or IPv6 address * @mac: pointer to result MAC address * @ipv4: flag indicating IPv4 or IPv6 * */ void irdma_mcast_mac(u32 *ip_addr, u8 *mac, bool ipv4) { … } /** * irdma_attach_mcast - attach a qp to a multicast group * @ibqp: ptr to qp * @ibgid: pointer to global ID * @lid: local ID * * returns error status */ static int irdma_attach_mcast(struct ib_qp *ibqp, union ib_gid *ibgid, u16 lid) { … } /** * irdma_detach_mcast - detach a qp from a multicast group * @ibqp: ptr to qp * @ibgid: pointer to global ID * @lid: local ID * * returns error status */ static int irdma_detach_mcast(struct ib_qp *ibqp, union ib_gid *ibgid, u16 lid) { … } static int irdma_create_hw_ah(struct irdma_device *iwdev, struct irdma_ah *ah, bool sleep) { … } static int irdma_setup_ah(struct ib_ah *ibah, struct rdma_ah_init_attr *attr) { … } /** * irdma_ah_exists - Check for existing identical AH * @iwdev: irdma device * @new_ah: AH to check for * * returns true if AH is found, false if not found. */ static bool irdma_ah_exists(struct irdma_device *iwdev, struct irdma_ah *new_ah) { … } /** * irdma_destroy_ah - Destroy address handle * @ibah: pointer to address handle * @ah_flags: flags for sleepable */ static int irdma_destroy_ah(struct ib_ah *ibah, u32 ah_flags) { … } /** * irdma_create_user_ah - create user address handle * @ibah: address handle * @attr: address handle attributes * @udata: User data * * returns 0 on success, error otherwise */ static int irdma_create_user_ah(struct ib_ah *ibah, struct rdma_ah_init_attr *attr, struct ib_udata *udata) { … } /** * irdma_create_ah - create address handle * @ibah: address handle * @attr: address handle attributes * @udata: NULL * * returns 0 on success, error otherwise */ static int irdma_create_ah(struct ib_ah *ibah, struct rdma_ah_init_attr *attr, struct ib_udata *udata) { … } /** * irdma_query_ah - Query address handle * @ibah: pointer to address handle * @ah_attr: address handle attributes */ static int irdma_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr) { … } static enum rdma_link_layer irdma_get_link_layer(struct ib_device *ibdev, u32 port_num) { … } static const struct ib_device_ops irdma_roce_dev_ops = …; static const struct ib_device_ops irdma_iw_dev_ops = …; static const struct ib_device_ops irdma_dev_ops = …; /** * irdma_init_roce_device - initialization of roce rdma device * @iwdev: irdma device */ static void irdma_init_roce_device(struct irdma_device *iwdev) { … } /** * irdma_init_iw_device - initialization of iwarp rdma device * @iwdev: irdma device */ static void irdma_init_iw_device(struct irdma_device *iwdev) { … } /** * irdma_init_rdma_device - initialization of rdma device * @iwdev: irdma device */ static void irdma_init_rdma_device(struct irdma_device *iwdev) { … } /** * irdma_port_ibevent - indicate port event * @iwdev: irdma device */ void irdma_port_ibevent(struct irdma_device *iwdev) { … } /** * irdma_ib_unregister_device - unregister rdma device from IB * core * @iwdev: irdma device */ void irdma_ib_unregister_device(struct irdma_device *iwdev) { … } /** * irdma_ib_register_device - register irdma device to IB core * @iwdev: irdma device */ int irdma_ib_register_device(struct irdma_device *iwdev) { … } /** * irdma_ib_dealloc_device * @ibdev: ib device * * callback from ibdev dealloc_driver to deallocate resources * unber irdma device */ void irdma_ib_dealloc_device(struct ib_device *ibdev) { … }