linux/drivers/infiniband/hw/qib/qib_file_ops.c

/*
 * Copyright (c) 2012, 2013 Intel Corporation. All rights reserved.
 * Copyright (c) 2006 - 2012 QLogic Corporation. All rights reserved.
 * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
 *
 * This software is available to you under a choice of one of two
 * licenses.  You may choose to be licensed under the terms of the GNU
 * General Public License (GPL) Version 2, available from the file
 * COPYING in the main directory of this source tree, or the
 * OpenIB.org BSD license below:
 *
 *     Redistribution and use in source and binary forms, with or
 *     without modification, are permitted provided that the following
 *     conditions are met:
 *
 *      - Redistributions of source code must retain the above
 *        copyright notice, this list of conditions and the following
 *        disclaimer.
 *
 *      - Redistributions in binary form must reproduce the above
 *        copyright notice, this list of conditions and the following
 *        disclaimer in the documentation and/or other materials
 *        provided with the distribution.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
 * SOFTWARE.
 */

#include <linux/pci.h>
#include <linux/poll.h>
#include <linux/cdev.h>
#include <linux/swap.h>
#include <linux/vmalloc.h>
#include <linux/highmem.h>
#include <linux/io.h>
#include <linux/jiffies.h>
#include <linux/delay.h>
#include <linux/export.h>
#include <linux/uio.h>
#include <linux/pgtable.h>

#include <rdma/ib.h>

#include "qib.h"
#include "qib_common.h"
#include "qib_user_sdma.h"

#undef pr_fmt
#define pr_fmt(fmt)

static int qib_open(struct inode *, struct file *);
static int qib_close(struct inode *, struct file *);
static ssize_t qib_write(struct file *, const char __user *, size_t, loff_t *);
static ssize_t qib_write_iter(struct kiocb *, struct iov_iter *);
static __poll_t qib_poll(struct file *, struct poll_table_struct *);
static int qib_mmapf(struct file *, struct vm_area_struct *);

/*
 * This is really, really weird shit - write() and writev() here
 * have completely unrelated semantics.  Sucky userland ABI,
 * film at 11.
 */
static const struct file_operations qib_file_ops =;

/*
 * Convert kernel virtual addresses to physical addresses so they don't
 * potentially conflict with the chip addresses used as mmap offsets.
 * It doesn't really matter what mmap offset we use as long as we can
 * interpret it correctly.
 */
static u64 cvt_kvaddr(void *p)
{}

static int qib_get_base_info(struct file *fp, void __user *ubase,
			     size_t ubase_size)
{}

/**
 * qib_tid_update - update a context TID
 * @rcd: the context
 * @fp: the qib device file
 * @ti: the TID information
 *
 * The new implementation as of Oct 2004 is that the driver assigns
 * the tid and returns it to the caller.   To reduce search time, we
 * keep a cursor for each context, walking the shadow tid array to find
 * one that's not in use.
 *
 * For now, if we can't allocate the full list, we fail, although
 * in the long run, we'll allocate as many as we can, and the
 * caller will deal with that by trying the remaining pages later.
 * That means that when we fail, we have to mark the tids as not in
 * use again, in our shadow copy.
 *
 * It's up to the caller to free the tids when they are done.
 * We'll unlock the pages as they free them.
 *
 * Also, right now we are locking one page at a time, but since
 * the intended use of this routine is for a single group of
 * virtually contiguous pages, that should change to improve
 * performance.
 */
static int qib_tid_update(struct qib_ctxtdata *rcd, struct file *fp,
			  const struct qib_tid_info *ti)
{}

/**
 * qib_tid_free - free a context TID
 * @rcd: the context
 * @subctxt: the subcontext
 * @ti: the TID info
 *
 * right now we are unlocking one page at a time, but since
 * the intended use of this routine is for a single group of
 * virtually contiguous pages, that should change to improve
 * performance.  We check that the TID is in range for this context
 * but otherwise don't check validity; if user has an error and
 * frees the wrong tid, it's only their own data that can thereby
 * be corrupted.  We do check that the TID was in use, for sanity
 * We always use our idea of the saved address, not the address that
 * they pass in to us.
 */
static int qib_tid_free(struct qib_ctxtdata *rcd, unsigned subctxt,
			const struct qib_tid_info *ti)
{}

/**
 * qib_set_part_key - set a partition key
 * @rcd: the context
 * @key: the key
 *
 * We can have up to 4 active at a time (other than the default, which is
 * always allowed).  This is somewhat tricky, since multiple contexts may set
 * the same key, so we reference count them, and clean up at exit.  All 4
 * partition keys are packed into a single qlogic_ib register.  It's an
 * error for a process to set the same pkey multiple times.  We provide no
 * mechanism to de-allocate a pkey at this time, we may eventually need to
 * do that.  I've used the atomic operations, and no locking, and only make
 * a single pass through what's available.  This should be more than
 * adequate for some time. I'll think about spinlocks or the like if and as
 * it's necessary.
 */
static int qib_set_part_key(struct qib_ctxtdata *rcd, u16 key)
{}

/**
 * qib_manage_rcvq - manage a context's receive queue
 * @rcd: the context
 * @subctxt: the subcontext
 * @start_stop: action to carry out
 *
 * start_stop == 0 disables receive on the context, for use in queue
 * overflow conditions.  start_stop==1 re-enables, to be used to
 * re-init the software copy of the head register
 */
static int qib_manage_rcvq(struct qib_ctxtdata *rcd, unsigned subctxt,
			   int start_stop)
{}

static void qib_clean_part_key(struct qib_ctxtdata *rcd,
			       struct qib_devdata *dd)
{}

/* common code for the mappings on dma_alloc_coherent mem */
static int qib_mmap_mem(struct vm_area_struct *vma, struct qib_ctxtdata *rcd,
			unsigned len, void *kvaddr, u32 write_ok, char *what)
{}

static int mmap_ureg(struct vm_area_struct *vma, struct qib_devdata *dd,
		     u64 ureg)
{}

static int mmap_piobufs(struct vm_area_struct *vma,
			struct qib_devdata *dd,
			struct qib_ctxtdata *rcd,
			unsigned piobufs, unsigned piocnt)
{}

static int mmap_rcvegrbufs(struct vm_area_struct *vma,
			   struct qib_ctxtdata *rcd)
{}

/*
 * qib_file_vma_fault - handle a VMA page fault.
 */
static vm_fault_t qib_file_vma_fault(struct vm_fault *vmf)
{}

static const struct vm_operations_struct qib_file_vm_ops =;

static int mmap_kvaddr(struct vm_area_struct *vma, u64 pgaddr,
		       struct qib_ctxtdata *rcd, unsigned subctxt)
{}

/**
 * qib_mmapf - mmap various structures into user space
 * @fp: the file pointer
 * @vma: the VM area
 *
 * We use this to have a shared buffer between the kernel and the user code
 * for the rcvhdr queue, egr buffers, and the per-context user regs and pio
 * buffers in the chip.  We have the open and close entries so we can bump
 * the ref count and keep the driver from being unloaded while still mapped.
 */
static int qib_mmapf(struct file *fp, struct vm_area_struct *vma)
{}

static __poll_t qib_poll_urgent(struct qib_ctxtdata *rcd,
				    struct file *fp,
				    struct poll_table_struct *pt)
{}

static __poll_t qib_poll_next(struct qib_ctxtdata *rcd,
				  struct file *fp,
				  struct poll_table_struct *pt)
{}

static __poll_t qib_poll(struct file *fp, struct poll_table_struct *pt)
{}

static void assign_ctxt_affinity(struct file *fp, struct qib_devdata *dd)
{}

/*
 * Check that userland and driver are compatible for subcontexts.
 */
static int qib_compatible_subctxts(int user_swmajor, int user_swminor)
{}

static int init_subctxts(struct qib_devdata *dd,
			 struct qib_ctxtdata *rcd,
			 const struct qib_user_info *uinfo)
{}

static int setup_ctxt(struct qib_pportdata *ppd, int ctxt,
		      struct file *fp, const struct qib_user_info *uinfo)
{}

static inline int usable(struct qib_pportdata *ppd)
{}

/*
 * Select a context on the given device, either using a requested port
 * or the port based on the context number.
 */
static int choose_port_ctxt(struct file *fp, struct qib_devdata *dd, u32 port,
			    const struct qib_user_info *uinfo)
{}

static int find_free_ctxt(int unit, struct file *fp,
			  const struct qib_user_info *uinfo)
{}

static int get_a_ctxt(struct file *fp, const struct qib_user_info *uinfo,
		      unsigned alg)
{}

static int find_shared_ctxt(struct file *fp,
			    const struct qib_user_info *uinfo)
{}

static int qib_open(struct inode *in, struct file *fp)
{}

static int find_hca(unsigned int cpu, int *unit)
{}

static int do_qib_user_sdma_queue_create(struct file *fp)
{}

/*
 * Get ctxt early, so can set affinity prior to memory allocation.
 */
static int qib_assign_ctxt(struct file *fp, const struct qib_user_info *uinfo)
{}


static int qib_do_user_init(struct file *fp,
			    const struct qib_user_info *uinfo)
{}

/**
 * unlock_expected_tids - unlock any expected TID entries context still had
 * in use
 * @rcd: ctxt
 *
 * We don't actually update the chip here, because we do a bulk update
 * below, using f_clear_tids.
 */
static void unlock_expected_tids(struct qib_ctxtdata *rcd)
{}

static int qib_close(struct inode *in, struct file *fp)
{}

static int qib_ctxt_info(struct file *fp, struct qib_ctxt_info __user *uinfo)
{}

static int qib_sdma_get_inflight(struct qib_user_sdma_queue *pq,
				 u32 __user *inflightp)
{}

static int qib_sdma_get_complete(struct qib_pportdata *ppd,
				 struct qib_user_sdma_queue *pq,
				 u32 __user *completep)
{}

static int disarm_req_delay(struct qib_ctxtdata *rcd)
{}

/*
 * Find all user contexts in use, and set the specified bit in their
 * event mask.
 * See also find_ctxt() for a similar use, that is specific to send buffers.
 */
int qib_set_uevent_bits(struct qib_pportdata *ppd, const int evtbit)
{}

/*
 * clear the event notifier events for this context.
 * For the DISARM_BUFS case, we also take action (this obsoletes
 * the older QIB_CMD_DISARM_BUFS, but we keep it for backwards
 * compatibility.
 * Other bits don't currently require actions, just atomically clear.
 * User process then performs actions appropriate to bit having been
 * set, if desired, and checks again in future.
 */
static int qib_user_event_ack(struct qib_ctxtdata *rcd, int subctxt,
			      unsigned long events)
{}

static ssize_t qib_write(struct file *fp, const char __user *data,
			 size_t count, loff_t *off)
{}

static ssize_t qib_write_iter(struct kiocb *iocb, struct iov_iter *from)
{}

static const struct class qib_class =;
static dev_t qib_dev;

int qib_cdev_init(int minor, const char *name,
		  const struct file_operations *fops,
		  struct cdev **cdevp, struct device **devp)
{}

void qib_cdev_cleanup(struct cdev **cdevp, struct device **devp)
{}

static struct cdev *wildcard_cdev;
static struct device *wildcard_device;

int __init qib_dev_init(void)
{}

void qib_dev_cleanup(void)
{}

static atomic_t user_count =;

static void qib_user_remove(struct qib_devdata *dd)
{}

static int qib_user_add(struct qib_devdata *dd)
{}

/*
 * Create per-unit files in /dev
 */
int qib_device_create(struct qib_devdata *dd)
{}

/*
 * Remove per-unit files in /dev
 * void, core kernel returns no errors for this stuff
 */
void qib_device_remove(struct qib_devdata *dd)
{}