linux/drivers/char/xillybus/xillybus_core.c

// SPDX-License-Identifier: GPL-2.0-only
/*
 * linux/drivers/misc/xillybus_core.c
 *
 * Copyright 2011 Xillybus Ltd, http://xillybus.com
 *
 * Driver for the Xillybus FPGA/host framework.
 *
 * This driver interfaces with a special IP core in an FPGA, setting up
 * a pipe between a hardware FIFO in the programmable logic and a device
 * file in the host. The number of such pipes and their attributes are
 * set up on the logic. This driver detects these automatically and
 * creates the device files accordingly.
 */

#include <linux/list.h>
#include <linux/device.h>
#include <linux/module.h>
#include <linux/io.h>
#include <linux/dma-mapping.h>
#include <linux/interrupt.h>
#include <linux/sched.h>
#include <linux/fs.h>
#include <linux/spinlock.h>
#include <linux/mutex.h>
#include <linux/crc32.h>
#include <linux/poll.h>
#include <linux/delay.h>
#include <linux/slab.h>
#include <linux/workqueue.h>
#include "xillybus.h"
#include "xillybus_class.h"

MODULE_DESCRIPTION();
MODULE_AUTHOR();
MODULE_ALIAS();
MODULE_LICENSE();

/* General timeout is 100 ms, rx timeout is 10 ms */
#define XILLY_RX_TIMEOUT
#define XILLY_TIMEOUT

#define fpga_msg_ctrl_reg
#define fpga_dma_control_reg
#define fpga_dma_bufno_reg
#define fpga_dma_bufaddr_lowaddr_reg
#define fpga_dma_bufaddr_highaddr_reg
#define fpga_buf_ctrl_reg
#define fpga_buf_offset_reg
#define fpga_endian_reg

#define XILLYMSG_OPCODE_RELEASEBUF
#define XILLYMSG_OPCODE_QUIESCEACK
#define XILLYMSG_OPCODE_FIFOEOF
#define XILLYMSG_OPCODE_FATAL_ERROR
#define XILLYMSG_OPCODE_NONEMPTY

static const char xillyname[] =;

static struct workqueue_struct *xillybus_wq;

/*
 * Locking scheme: Mutexes protect invocations of character device methods.
 * If both locks are taken, wr_mutex is taken first, rd_mutex second.
 *
 * wr_spinlock protects wr_*_buf_idx, wr_empty, wr_sleepy, wr_ready and the
 * buffers' end_offset fields against changes made by IRQ handler (and in
 * theory, other file request handlers, but the mutex handles that). Nothing
 * else.
 * They are held for short direct memory manipulations. Needless to say,
 * no mutex locking is allowed when a spinlock is held.
 *
 * rd_spinlock does the same with rd_*_buf_idx, rd_empty and end_offset.
 *
 * register_mutex is endpoint-specific, and is held when non-atomic
 * register operations are performed. wr_mutex and rd_mutex may be
 * held when register_mutex is taken, but none of the spinlocks. Note that
 * register_mutex doesn't protect against sporadic buf_ctrl_reg writes
 * which are unrelated to buf_offset_reg, since they are harmless.
 *
 * Blocking on the wait queues is allowed with mutexes held, but not with
 * spinlocks.
 *
 * Only interruptible blocking is allowed on mutexes and wait queues.
 *
 * All in all, the locking order goes (with skips allowed, of course):
 * wr_mutex -> rd_mutex -> register_mutex -> wr_spinlock -> rd_spinlock
 */

static void malformed_message(struct xilly_endpoint *endpoint, u32 *buf)
{}

/*
 * xillybus_isr assumes the interrupt is allocated exclusively to it,
 * which is the natural case MSI and several other hardware-oriented
 * interrupts. Sharing is not allowed.
 */

irqreturn_t xillybus_isr(int irq, void *data)
{}
EXPORT_SYMBOL();

/*
 * A few trivial memory management functions.
 * NOTE: These functions are used only on probe and remove, and therefore
 * no locks are applied!
 */

static void xillybus_autoflush(struct work_struct *work);

struct xilly_alloc_state {};

static void xilly_unmap(void *ptr)
{}

static int xilly_map_single(struct xilly_endpoint *ep,
			    void *ptr,
			    size_t size,
			    int direction,
			    dma_addr_t *ret_dma_handle
	)
{}

static int xilly_get_dma_buffers(struct xilly_endpoint *ep,
				 struct xilly_alloc_state *s,
				 struct xilly_buffer **buffers,
				 int bufnum, int bytebufsize)
{}

static int xilly_setupchannels(struct xilly_endpoint *ep,
			       unsigned char *chandesc,
			       int entries)
{}

static int xilly_scan_idt(struct xilly_endpoint *endpoint,
			  struct xilly_idt_handle *idt_handle)
{}

static int xilly_obtain_idt(struct xilly_endpoint *endpoint)
{}

static ssize_t xillybus_read(struct file *filp, char __user *userbuf,
			     size_t count, loff_t *f_pos)
{}

/*
 * The timeout argument takes values as follows:
 *  >0 : Flush with timeout
 * ==0 : Flush, and wait idefinitely for the flush to complete
 *  <0 : Autoflush: Flush only if there's a single buffer occupied
 */

static int xillybus_myflush(struct xilly_channel *channel, long timeout)
{}

static int xillybus_flush(struct file *filp, fl_owner_t id)
{}

static void xillybus_autoflush(struct work_struct *work)
{}

static ssize_t xillybus_write(struct file *filp, const char __user *userbuf,
			      size_t count, loff_t *f_pos)
{}

static int xillybus_open(struct inode *inode, struct file *filp)
{}

static int xillybus_release(struct inode *inode, struct file *filp)
{}

static loff_t xillybus_llseek(struct file *filp, loff_t offset, int whence)
{}

static __poll_t xillybus_poll(struct file *filp, poll_table *wait)
{}

static const struct file_operations xillybus_fops =;

struct xilly_endpoint *xillybus_init_endpoint(struct device *dev)
{}
EXPORT_SYMBOL();

static int xilly_quiesce(struct xilly_endpoint *endpoint)
{}

int xillybus_endpoint_discovery(struct xilly_endpoint *endpoint)
{}
EXPORT_SYMBOL();

void xillybus_endpoint_remove(struct xilly_endpoint *endpoint)
{}
EXPORT_SYMBOL();

static int __init xillybus_init(void)
{}

static void __exit xillybus_exit(void)
{}

module_init();
module_exit(xillybus_exit);