linux/drivers/iio/industrialio-buffer.c

// SPDX-License-Identifier: GPL-2.0-only
/* The industrial I/O core
 *
 * Copyright (c) 2008 Jonathan Cameron
 *
 * Handling of buffer allocation / resizing.
 *
 * Things to look at here.
 * - Better memory allocation techniques?
 * - Alternative access techniques?
 */
#include <linux/atomic.h>
#include <linux/anon_inodes.h>
#include <linux/cleanup.h>
#include <linux/kernel.h>
#include <linux/export.h>
#include <linux/device.h>
#include <linux/dma-buf.h>
#include <linux/dma-fence.h>
#include <linux/dma-resv.h>
#include <linux/file.h>
#include <linux/fs.h>
#include <linux/cdev.h>
#include <linux/slab.h>
#include <linux/mm.h>
#include <linux/poll.h>
#include <linux/sched/signal.h>

#include <linux/iio/iio.h>
#include <linux/iio/iio-opaque.h>
#include "iio_core.h"
#include "iio_core_trigger.h"
#include <linux/iio/sysfs.h>
#include <linux/iio/buffer.h>
#include <linux/iio/buffer_impl.h>

#define DMABUF_ENQUEUE_TIMEOUT_MS

MODULE_IMPORT_NS();

struct iio_dmabuf_priv {};

struct iio_dma_fence {};

static const char * const iio_endian_prefix[] =;

static bool iio_buffer_is_active(struct iio_buffer *buf)
{}

static size_t iio_buffer_data_available(struct iio_buffer *buf)
{}

static int iio_buffer_flush_hwfifo(struct iio_dev *indio_dev,
				   struct iio_buffer *buf, size_t required)
{}

static bool iio_buffer_ready(struct iio_dev *indio_dev, struct iio_buffer *buf,
			     size_t to_wait, int to_flush)
{}

/**
 * iio_buffer_read() - chrdev read for buffer access
 * @filp:	File structure pointer for the char device
 * @buf:	Destination buffer for iio buffer read
 * @n:		First n bytes to read
 * @f_ps:	Long offset provided by the user as a seek position
 *
 * This function relies on all buffer implementations having an
 * iio_buffer as their first element.
 *
 * Return: negative values corresponding to error codes or ret != 0
 *	   for ending the reading activity
 **/
static ssize_t iio_buffer_read(struct file *filp, char __user *buf,
			       size_t n, loff_t *f_ps)
{}

static size_t iio_buffer_space_available(struct iio_buffer *buf)
{}

static ssize_t iio_buffer_write(struct file *filp, const char __user *buf,
				size_t n, loff_t *f_ps)
{}

/**
 * iio_buffer_poll() - poll the buffer to find out if it has data
 * @filp:	File structure pointer for device access
 * @wait:	Poll table structure pointer for which the driver adds
 *		a wait queue
 *
 * Return: (EPOLLIN | EPOLLRDNORM) if data is available for reading
 *	   or 0 for other cases
 */
static __poll_t iio_buffer_poll(struct file *filp,
				struct poll_table_struct *wait)
{}

ssize_t iio_buffer_read_wrapper(struct file *filp, char __user *buf,
				size_t n, loff_t *f_ps)
{}

ssize_t iio_buffer_write_wrapper(struct file *filp, const char __user *buf,
				 size_t n, loff_t *f_ps)
{}

__poll_t iio_buffer_poll_wrapper(struct file *filp,
				 struct poll_table_struct *wait)
{}

/**
 * iio_buffer_wakeup_poll - Wakes up the buffer waitqueue
 * @indio_dev: The IIO device
 *
 * Wakes up the event waitqueue used for poll(). Should usually
 * be called when the device is unregistered.
 */
void iio_buffer_wakeup_poll(struct iio_dev *indio_dev)
{}

int iio_pop_from_buffer(struct iio_buffer *buffer, void *data)
{}
EXPORT_SYMBOL_GPL();

void iio_buffer_init(struct iio_buffer *buffer)
{}
EXPORT_SYMBOL();

void iio_device_detach_buffers(struct iio_dev *indio_dev)
{}

static ssize_t iio_show_scan_index(struct device *dev,
				   struct device_attribute *attr,
				   char *buf)
{}

static ssize_t iio_show_fixed_type(struct device *dev,
				   struct device_attribute *attr,
				   char *buf)
{}

static ssize_t iio_scan_el_show(struct device *dev,
				struct device_attribute *attr,
				char *buf)
{}

/* Note NULL used as error indicator as it doesn't make sense. */
static const unsigned long *iio_scan_mask_match(const unsigned long *av_masks,
						unsigned int masklength,
						const unsigned long *mask,
						bool strict)
{}

static bool iio_validate_scan_mask(struct iio_dev *indio_dev,
				   const unsigned long *mask)
{}

/**
 * iio_scan_mask_set() - set particular bit in the scan mask
 * @indio_dev: the iio device
 * @buffer: the buffer whose scan mask we are interested in
 * @bit: the bit to be set.
 *
 * Note that at this point we have no way of knowing what other
 * buffers might request, hence this code only verifies that the
 * individual buffers request is plausible.
 */
static int iio_scan_mask_set(struct iio_dev *indio_dev,
			     struct iio_buffer *buffer, int bit)
{}

static int iio_scan_mask_clear(struct iio_buffer *buffer, int bit)
{}

static int iio_scan_mask_query(struct iio_dev *indio_dev,
			       struct iio_buffer *buffer, int bit)
{
	if (bit > indio_dev->masklength)
		return -EINVAL;

	if (!buffer->scan_mask)
		return 0;

	/* Ensure return value is 0 or 1. */
	return !!test_bit(bit, buffer->scan_mask);
};

static ssize_t iio_scan_el_store(struct device *dev,
				 struct device_attribute *attr,
				 const char *buf,
				 size_t len)
{}

static ssize_t iio_scan_el_ts_show(struct device *dev,
				   struct device_attribute *attr,
				   char *buf)
{}

static ssize_t iio_scan_el_ts_store(struct device *dev,
				    struct device_attribute *attr,
				    const char *buf,
				    size_t len)
{}

static int iio_buffer_add_channel_sysfs(struct iio_dev *indio_dev,
					struct iio_buffer *buffer,
					const struct iio_chan_spec *chan)
{}

static ssize_t length_show(struct device *dev, struct device_attribute *attr,
			   char *buf)
{}

static ssize_t length_store(struct device *dev, struct device_attribute *attr,
			    const char *buf, size_t len)
{}

static ssize_t enable_show(struct device *dev, struct device_attribute *attr,
			   char *buf)
{}

static int iio_storage_bytes_for_si(struct iio_dev *indio_dev,
				    unsigned int scan_index)
{}

static int iio_storage_bytes_for_timestamp(struct iio_dev *indio_dev)
{}

static int iio_compute_scan_bytes(struct iio_dev *indio_dev,
				  const unsigned long *mask, bool timestamp)
{}

static void iio_buffer_activate(struct iio_dev *indio_dev,
				struct iio_buffer *buffer)
{}

static void iio_buffer_deactivate(struct iio_buffer *buffer)
{}

static void iio_buffer_deactivate_all(struct iio_dev *indio_dev)
{}

static int iio_buffer_enable(struct iio_buffer *buffer,
			     struct iio_dev *indio_dev)
{}

static int iio_buffer_disable(struct iio_buffer *buffer,
			      struct iio_dev *indio_dev)
{}

static void iio_buffer_update_bytes_per_datum(struct iio_dev *indio_dev,
					      struct iio_buffer *buffer)
{}

static int iio_buffer_request_update(struct iio_dev *indio_dev,
				     struct iio_buffer *buffer)
{}

static void iio_free_scan_mask(struct iio_dev *indio_dev,
			       const unsigned long *mask)
{}

struct iio_device_config {};

static int iio_verify_update(struct iio_dev *indio_dev,
			     struct iio_buffer *insert_buffer,
			     struct iio_buffer *remove_buffer,
			     struct iio_device_config *config)
{}

/**
 * struct iio_demux_table - table describing demux memcpy ops
 * @from:	index to copy from
 * @to:		index to copy to
 * @length:	how many bytes to copy
 * @l:		list head used for management
 */
struct iio_demux_table {};

static void iio_buffer_demux_free(struct iio_buffer *buffer)
{}

static int iio_buffer_add_demux(struct iio_buffer *buffer,
				struct iio_demux_table **p, unsigned int in_loc,
				unsigned int out_loc,
				unsigned int length)
{}

static int iio_buffer_update_demux(struct iio_dev *indio_dev,
				   struct iio_buffer *buffer)
{}

static int iio_update_demux(struct iio_dev *indio_dev)
{}

static int iio_enable_buffers(struct iio_dev *indio_dev,
			      struct iio_device_config *config)
{}

static int iio_disable_buffers(struct iio_dev *indio_dev)
{}

static int __iio_update_buffers(struct iio_dev *indio_dev,
				struct iio_buffer *insert_buffer,
				struct iio_buffer *remove_buffer)
{}

int iio_update_buffers(struct iio_dev *indio_dev,
		       struct iio_buffer *insert_buffer,
		       struct iio_buffer *remove_buffer)
{}
EXPORT_SYMBOL_GPL();

void iio_disable_all_buffers(struct iio_dev *indio_dev)
{}

static ssize_t enable_store(struct device *dev, struct device_attribute *attr,
			    const char *buf, size_t len)
{}

static ssize_t watermark_show(struct device *dev, struct device_attribute *attr,
			      char *buf)
{}

static ssize_t watermark_store(struct device *dev,
			       struct device_attribute *attr,
			       const char *buf, size_t len)
{}

static ssize_t data_available_show(struct device *dev,
				   struct device_attribute *attr, char *buf)
{}

static ssize_t direction_show(struct device *dev,
			      struct device_attribute *attr,
			      char *buf)
{}

static DEVICE_ATTR_RW(length);
static struct device_attribute dev_attr_length_ro =;
static DEVICE_ATTR_RW(enable);
static DEVICE_ATTR_RW(watermark);
static struct device_attribute dev_attr_watermark_ro =;
static DEVICE_ATTR_RO(data_available);
static DEVICE_ATTR_RO(direction);

/*
 * When adding new attributes here, put the at the end, at least until
 * the code that handles the length/length_ro & watermark/watermark_ro
 * assignments gets cleaned up. Otherwise these can create some weird
 * duplicate attributes errors under some setups.
 */
static struct attribute *iio_buffer_attrs[] =;

#define to_dev_attr(_attr)

static struct attribute *iio_buffer_wrap_attr(struct iio_buffer *buffer,
					      struct attribute *attr)
{}

static int iio_buffer_register_legacy_sysfs_groups(struct iio_dev *indio_dev,
						   struct attribute **buffer_attrs,
						   int buffer_attrcount,
						   int scan_el_attrcount)
{}

static void iio_buffer_unregister_legacy_sysfs_groups(struct iio_dev *indio_dev)
{}

static void iio_buffer_dmabuf_release(struct kref *ref)
{}

static void iio_buffer_dmabuf_get(struct dma_buf_attachment *attach)
{}

static void iio_buffer_dmabuf_put(struct dma_buf_attachment *attach)
{}

static int iio_buffer_chrdev_release(struct inode *inode, struct file *filep)
{}

static int iio_dma_resv_lock(struct dma_buf *dmabuf, bool nonblock)
{}

static struct dma_buf_attachment *
iio_buffer_find_attachment(struct iio_dev_buffer_pair *ib,
			   struct dma_buf *dmabuf, bool nonblock)
{}

static int iio_buffer_attach_dmabuf(struct iio_dev_buffer_pair *ib,
				    int __user *user_fd, bool nonblock)
{}

static int iio_buffer_detach_dmabuf(struct iio_dev_buffer_pair *ib,
				    int __user *user_req, bool nonblock)
{}

static const char *
iio_buffer_dma_fence_get_driver_name(struct dma_fence *fence)
{}

static void iio_buffer_dma_fence_release(struct dma_fence *fence)
{}

static const struct dma_fence_ops iio_buffer_dma_fence_ops =;

static int iio_buffer_enqueue_dmabuf(struct iio_dev_buffer_pair *ib,
				     struct iio_dmabuf __user *iio_dmabuf_req,
				     bool nonblock)
{}

static void iio_buffer_cleanup(struct work_struct *work)
{}

void iio_buffer_signal_dmabuf_done(struct dma_fence *fence, int ret)
{}
EXPORT_SYMBOL_GPL();

static long iio_buffer_chrdev_ioctl(struct file *filp,
				    unsigned int cmd, unsigned long arg)
{}

static const struct file_operations iio_buffer_chrdev_fileops =;

static long iio_device_buffer_getfd(struct iio_dev *indio_dev, unsigned long arg)
{}

static long iio_device_buffer_ioctl(struct iio_dev *indio_dev, struct file *filp,
				    unsigned int cmd, unsigned long arg)
{}

static int iio_channel_validate_scan_type(struct device *dev, int ch,
					  const struct iio_scan_type *scan_type)
{}

static int __iio_buffer_alloc_sysfs_and_mask(struct iio_buffer *buffer,
					     struct iio_dev *indio_dev,
					     int index)
{}

static void __iio_buffer_free_sysfs_and_mask(struct iio_buffer *buffer,
					     struct iio_dev *indio_dev,
					     int index)
{}

int iio_buffers_alloc_sysfs_and_mask(struct iio_dev *indio_dev)
{}

void iio_buffers_free_sysfs_and_mask(struct iio_dev *indio_dev)
{}

/**
 * iio_validate_scan_mask_onehot() - Validates that exactly one channel is selected
 * @indio_dev: the iio device
 * @mask: scan mask to be checked
 *
 * Return true if exactly one bit is set in the scan mask, false otherwise. It
 * can be used for devices where only one channel can be active for sampling at
 * a time.
 */
bool iio_validate_scan_mask_onehot(struct iio_dev *indio_dev,
				   const unsigned long *mask)
{}
EXPORT_SYMBOL_GPL();

static const void *iio_demux(struct iio_buffer *buffer,
			     const void *datain)
{}

static int iio_push_to_buffer(struct iio_buffer *buffer, const void *data)
{}

/**
 * iio_push_to_buffers() - push to a registered buffer.
 * @indio_dev:		iio_dev structure for device.
 * @data:		Full scan.
 */
int iio_push_to_buffers(struct iio_dev *indio_dev, const void *data)
{}
EXPORT_SYMBOL_GPL();

/**
 * iio_push_to_buffers_with_ts_unaligned() - push to registered buffer,
 *    no alignment or space requirements.
 * @indio_dev:		iio_dev structure for device.
 * @data:		channel data excluding the timestamp.
 * @data_sz:		size of data.
 * @timestamp:		timestamp for the sample data.
 *
 * This special variant of iio_push_to_buffers_with_timestamp() does
 * not require space for the timestamp, or 8 byte alignment of data.
 * It does however require an allocation on first call and additional
 * copies on all calls, so should be avoided if possible.
 */
int iio_push_to_buffers_with_ts_unaligned(struct iio_dev *indio_dev,
					  const void *data,
					  size_t data_sz,
					  int64_t timestamp)
{}
EXPORT_SYMBOL_GPL();

/**
 * iio_buffer_release() - Free a buffer's resources
 * @ref: Pointer to the kref embedded in the iio_buffer struct
 *
 * This function is called when the last reference to the buffer has been
 * dropped. It will typically free all resources allocated by the buffer. Do not
 * call this function manually, always use iio_buffer_put() when done using a
 * buffer.
 */
static void iio_buffer_release(struct kref *ref)
{}

/**
 * iio_buffer_get() - Grab a reference to the buffer
 * @buffer: The buffer to grab a reference for, may be NULL
 *
 * Returns the pointer to the buffer that was passed into the function.
 */
struct iio_buffer *iio_buffer_get(struct iio_buffer *buffer)
{}
EXPORT_SYMBOL_GPL();

/**
 * iio_buffer_put() - Release the reference to the buffer
 * @buffer: The buffer to release the reference for, may be NULL
 */
void iio_buffer_put(struct iio_buffer *buffer)
{}
EXPORT_SYMBOL_GPL();

/**
 * iio_device_attach_buffer - Attach a buffer to a IIO device
 * @indio_dev: The device the buffer should be attached to
 * @buffer: The buffer to attach to the device
 *
 * Return 0 if successful, negative if error.
 *
 * This function attaches a buffer to a IIO device. The buffer stays attached to
 * the device until the device is freed. For legacy reasons, the first attached
 * buffer will also be assigned to 'indio_dev->buffer'.
 * The array allocated here, will be free'd via the iio_device_detach_buffers()
 * call which is handled by the iio_device_free().
 */
int iio_device_attach_buffer(struct iio_dev *indio_dev,
			     struct iio_buffer *buffer)
{}
EXPORT_SYMBOL_GPL();