linux/drivers/media/v4l2-core/v4l2-mem2mem.c

// SPDX-License-Identifier: GPL-2.0-or-later
/*
 * Memory-to-memory device framework for Video for Linux 2 and vb2.
 *
 * Helper functions for devices that use vb2 buffers for both their
 * source and destination.
 *
 * Copyright (c) 2009-2010 Samsung Electronics Co., Ltd.
 * Pawel Osciak, <[email protected]>
 * Marek Szyprowski, <[email protected]>
 */
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/slab.h>

#include <media/media-device.h>
#include <media/videobuf2-v4l2.h>
#include <media/v4l2-mem2mem.h>
#include <media/v4l2-dev.h>
#include <media/v4l2-device.h>
#include <media/v4l2-fh.h>
#include <media/v4l2-event.h>

MODULE_DESCRIPTION();
MODULE_AUTHOR();
MODULE_LICENSE();

static bool debug;
module_param(debug, bool, 0644);

#define dprintk(fmt, arg...)


/* Instance is already queued on the job_queue */
#define TRANS_QUEUED
/* Instance is currently running in hardware */
#define TRANS_RUNNING
/* Instance is currently aborting */
#define TRANS_ABORT


/* The job queue is not running new jobs */
#define QUEUE_PAUSED


/* Offset base for buffers on the destination queue - used to distinguish
 * between source and destination buffers when mmapping - they receive the same
 * offsets but for different queues */
#define DST_QUEUE_OFF_BASE

enum v4l2_m2m_entity_type {};

static const char * const m2m_entity_name[] =;

/**
 * struct v4l2_m2m_dev - per-device context
 * @source:		&struct media_entity pointer with the source entity
 *			Used only when the M2M device is registered via
 *			v4l2_m2m_register_media_controller().
 * @source_pad:		&struct media_pad with the source pad.
 *			Used only when the M2M device is registered via
 *			v4l2_m2m_register_media_controller().
 * @sink:		&struct media_entity pointer with the sink entity
 *			Used only when the M2M device is registered via
 *			v4l2_m2m_register_media_controller().
 * @sink_pad:		&struct media_pad with the sink pad.
 *			Used only when the M2M device is registered via
 *			v4l2_m2m_register_media_controller().
 * @proc:		&struct media_entity pointer with the M2M device itself.
 * @proc_pads:		&struct media_pad with the @proc pads.
 *			Used only when the M2M device is registered via
 *			v4l2_m2m_unregister_media_controller().
 * @intf_devnode:	&struct media_intf devnode pointer with the interface
 *			with controls the M2M device.
 * @curr_ctx:		currently running instance
 * @job_queue:		instances queued to run
 * @job_spinlock:	protects job_queue
 * @job_work:		worker to run queued jobs.
 * @job_queue_flags:	flags of the queue status, %QUEUE_PAUSED.
 * @m2m_ops:		driver callbacks
 */
struct v4l2_m2m_dev {};

static struct v4l2_m2m_queue_ctx *get_queue_ctx(struct v4l2_m2m_ctx *m2m_ctx,
						enum v4l2_buf_type type)
{}

struct vb2_queue *v4l2_m2m_get_vq(struct v4l2_m2m_ctx *m2m_ctx,
				       enum v4l2_buf_type type)
{}
EXPORT_SYMBOL();

struct vb2_v4l2_buffer *v4l2_m2m_next_buf(struct v4l2_m2m_queue_ctx *q_ctx)
{}
EXPORT_SYMBOL_GPL();

struct vb2_v4l2_buffer *v4l2_m2m_last_buf(struct v4l2_m2m_queue_ctx *q_ctx)
{}
EXPORT_SYMBOL_GPL();

struct vb2_v4l2_buffer *v4l2_m2m_buf_remove(struct v4l2_m2m_queue_ctx *q_ctx)
{}
EXPORT_SYMBOL_GPL();

void v4l2_m2m_buf_remove_by_buf(struct v4l2_m2m_queue_ctx *q_ctx,
				struct vb2_v4l2_buffer *vbuf)
{}
EXPORT_SYMBOL_GPL();

struct vb2_v4l2_buffer *
v4l2_m2m_buf_remove_by_idx(struct v4l2_m2m_queue_ctx *q_ctx, unsigned int idx)

{}
EXPORT_SYMBOL_GPL();

/*
 * Scheduling handlers
 */

void *v4l2_m2m_get_curr_priv(struct v4l2_m2m_dev *m2m_dev)
{}
EXPORT_SYMBOL();

/**
 * v4l2_m2m_try_run() - select next job to perform and run it if possible
 * @m2m_dev: per-device context
 *
 * Get next transaction (if present) from the waiting jobs list and run it.
 *
 * Note that this function can run on a given v4l2_m2m_ctx context,
 * but call .device_run for another context.
 */
static void v4l2_m2m_try_run(struct v4l2_m2m_dev *m2m_dev)
{}

/*
 * __v4l2_m2m_try_queue() - queue a job
 * @m2m_dev: m2m device
 * @m2m_ctx: m2m context
 *
 * Check if this context is ready to queue a job.
 *
 * This function can run in interrupt context.
 */
static void __v4l2_m2m_try_queue(struct v4l2_m2m_dev *m2m_dev,
				 struct v4l2_m2m_ctx *m2m_ctx)
{}

/**
 * v4l2_m2m_try_schedule() - schedule and possibly run a job for any context
 * @m2m_ctx: m2m context
 *
 * Check if this context is ready to queue a job. If suitable,
 * run the next queued job on the mem2mem device.
 *
 * This function shouldn't run in interrupt context.
 *
 * Note that v4l2_m2m_try_schedule() can schedule one job for this context,
 * and then run another job for another context.
 */
void v4l2_m2m_try_schedule(struct v4l2_m2m_ctx *m2m_ctx)
{}
EXPORT_SYMBOL_GPL();

/**
 * v4l2_m2m_device_run_work() - run pending jobs for the context
 * @work: Work structure used for scheduling the execution of this function.
 */
static void v4l2_m2m_device_run_work(struct work_struct *work)
{}

/**
 * v4l2_m2m_cancel_job() - cancel pending jobs for the context
 * @m2m_ctx: m2m context with jobs to be canceled
 *
 * In case of streamoff or release called on any context,
 * 1] If the context is currently running, then abort job will be called
 * 2] If the context is queued, then the context will be removed from
 *    the job_queue
 */
static void v4l2_m2m_cancel_job(struct v4l2_m2m_ctx *m2m_ctx)
{}

/*
 * Schedule the next job, called from v4l2_m2m_job_finish() or
 * v4l2_m2m_buf_done_and_job_finish().
 */
static void v4l2_m2m_schedule_next_job(struct v4l2_m2m_dev *m2m_dev,
				       struct v4l2_m2m_ctx *m2m_ctx)
{}

/*
 * Assumes job_spinlock is held, called from v4l2_m2m_job_finish() or
 * v4l2_m2m_buf_done_and_job_finish().
 */
static bool _v4l2_m2m_job_finish(struct v4l2_m2m_dev *m2m_dev,
				 struct v4l2_m2m_ctx *m2m_ctx)
{}

void v4l2_m2m_job_finish(struct v4l2_m2m_dev *m2m_dev,
			 struct v4l2_m2m_ctx *m2m_ctx)
{}
EXPORT_SYMBOL();

void v4l2_m2m_buf_done_and_job_finish(struct v4l2_m2m_dev *m2m_dev,
				      struct v4l2_m2m_ctx *m2m_ctx,
				      enum vb2_buffer_state state)
{}
EXPORT_SYMBOL();

void v4l2_m2m_suspend(struct v4l2_m2m_dev *m2m_dev)
{}
EXPORT_SYMBOL();

void v4l2_m2m_resume(struct v4l2_m2m_dev *m2m_dev)
{}
EXPORT_SYMBOL();

int v4l2_m2m_reqbufs(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
		     struct v4l2_requestbuffers *reqbufs)
{}
EXPORT_SYMBOL_GPL();

static void v4l2_m2m_adjust_mem_offset(struct vb2_queue *vq,
				       struct v4l2_buffer *buf)
{}

int v4l2_m2m_querybuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
		      struct v4l2_buffer *buf)
{}
EXPORT_SYMBOL_GPL();

/*
 * This will add the LAST flag and mark the buffer management
 * state as stopped.
 * This is called when the last capture buffer must be flagged as LAST
 * in draining mode from the encoder/decoder driver buf_queue() callback
 * or from v4l2_update_last_buf_state() when a capture buffer is available.
 */
void v4l2_m2m_last_buffer_done(struct v4l2_m2m_ctx *m2m_ctx,
			       struct vb2_v4l2_buffer *vbuf)
{}
EXPORT_SYMBOL_GPL();

/* When stop command is issued, update buffer management state */
static int v4l2_update_last_buf_state(struct v4l2_m2m_ctx *m2m_ctx)
{}

/*
 * Updates the encoding/decoding buffer management state, should
 * be called from encoder/decoder drivers start_streaming()
 */
void v4l2_m2m_update_start_streaming_state(struct v4l2_m2m_ctx *m2m_ctx,
					   struct vb2_queue *q)
{}
EXPORT_SYMBOL_GPL();

/*
 * Updates the encoding/decoding buffer management state, should
 * be called from encoder/decoder driver stop_streaming()
 */
void v4l2_m2m_update_stop_streaming_state(struct v4l2_m2m_ctx *m2m_ctx,
					  struct vb2_queue *q)
{}
EXPORT_SYMBOL_GPL();

static void v4l2_m2m_force_last_buf_done(struct v4l2_m2m_ctx *m2m_ctx,
					 struct vb2_queue *q)
{}

int v4l2_m2m_qbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
		  struct v4l2_buffer *buf)
{}
EXPORT_SYMBOL_GPL();

int v4l2_m2m_dqbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
		   struct v4l2_buffer *buf)
{}
EXPORT_SYMBOL_GPL();

int v4l2_m2m_prepare_buf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
			 struct v4l2_buffer *buf)
{}
EXPORT_SYMBOL_GPL();

int v4l2_m2m_create_bufs(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
			 struct v4l2_create_buffers *create)
{}
EXPORT_SYMBOL_GPL();

int v4l2_m2m_expbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
		  struct v4l2_exportbuffer *eb)
{}
EXPORT_SYMBOL_GPL();

int v4l2_m2m_streamon(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
		      enum v4l2_buf_type type)
{}
EXPORT_SYMBOL_GPL();

int v4l2_m2m_streamoff(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
		       enum v4l2_buf_type type)
{}
EXPORT_SYMBOL_GPL();

static __poll_t v4l2_m2m_poll_for_data(struct file *file,
				       struct v4l2_m2m_ctx *m2m_ctx,
				       struct poll_table_struct *wait)
{}

__poll_t v4l2_m2m_poll(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
		       struct poll_table_struct *wait)
{}
EXPORT_SYMBOL_GPL();

int v4l2_m2m_mmap(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
			 struct vm_area_struct *vma)
{}
EXPORT_SYMBOL();

#ifndef CONFIG_MMU
unsigned long v4l2_m2m_get_unmapped_area(struct file *file, unsigned long addr,
					 unsigned long len, unsigned long pgoff,
					 unsigned long flags)
{
	struct v4l2_fh *fh = file->private_data;
	unsigned long offset = pgoff << PAGE_SHIFT;
	struct vb2_queue *vq;

	if (offset < DST_QUEUE_OFF_BASE) {
		vq = v4l2_m2m_get_src_vq(fh->m2m_ctx);
	} else {
		vq = v4l2_m2m_get_dst_vq(fh->m2m_ctx);
		pgoff -= (DST_QUEUE_OFF_BASE >> PAGE_SHIFT);
	}

	return vb2_get_unmapped_area(vq, addr, len, pgoff, flags);
}
EXPORT_SYMBOL_GPL(v4l2_m2m_get_unmapped_area);
#endif

#if defined(CONFIG_MEDIA_CONTROLLER)
void v4l2_m2m_unregister_media_controller(struct v4l2_m2m_dev *m2m_dev)
{}
EXPORT_SYMBOL_GPL();

static int v4l2_m2m_register_entity(struct media_device *mdev,
	struct v4l2_m2m_dev *m2m_dev, enum v4l2_m2m_entity_type type,
	struct video_device *vdev, int function)
{}

int v4l2_m2m_register_media_controller(struct v4l2_m2m_dev *m2m_dev,
		struct video_device *vdev, int function)
{}
EXPORT_SYMBOL_GPL();
#endif

struct v4l2_m2m_dev *v4l2_m2m_init(const struct v4l2_m2m_ops *m2m_ops)
{}
EXPORT_SYMBOL_GPL();

void v4l2_m2m_release(struct v4l2_m2m_dev *m2m_dev)
{}
EXPORT_SYMBOL_GPL();

struct v4l2_m2m_ctx *v4l2_m2m_ctx_init(struct v4l2_m2m_dev *m2m_dev,
		void *drv_priv,
		int (*queue_init)(void *priv, struct vb2_queue *src_vq, struct vb2_queue *dst_vq))
{}
EXPORT_SYMBOL_GPL();

void v4l2_m2m_ctx_release(struct v4l2_m2m_ctx *m2m_ctx)
{}
EXPORT_SYMBOL_GPL();

void v4l2_m2m_buf_queue(struct v4l2_m2m_ctx *m2m_ctx,
		struct vb2_v4l2_buffer *vbuf)
{}
EXPORT_SYMBOL_GPL();

void v4l2_m2m_buf_copy_metadata(const struct vb2_v4l2_buffer *out_vb,
				struct vb2_v4l2_buffer *cap_vb,
				bool copy_frame_flags)
{}
EXPORT_SYMBOL_GPL();

void v4l2_m2m_request_queue(struct media_request *req)
{}
EXPORT_SYMBOL_GPL();

/* Videobuf2 ioctl helpers */

int v4l2_m2m_ioctl_reqbufs(struct file *file, void *priv,
				struct v4l2_requestbuffers *rb)
{}
EXPORT_SYMBOL_GPL();

int v4l2_m2m_ioctl_create_bufs(struct file *file, void *priv,
				struct v4l2_create_buffers *create)
{}
EXPORT_SYMBOL_GPL();

int v4l2_m2m_ioctl_remove_bufs(struct file *file, void *priv,
			       struct v4l2_remove_buffers *remove)
{}
EXPORT_SYMBOL_GPL();

int v4l2_m2m_ioctl_querybuf(struct file *file, void *priv,
				struct v4l2_buffer *buf)
{}
EXPORT_SYMBOL_GPL();

int v4l2_m2m_ioctl_qbuf(struct file *file, void *priv,
				struct v4l2_buffer *buf)
{}
EXPORT_SYMBOL_GPL();

int v4l2_m2m_ioctl_dqbuf(struct file *file, void *priv,
				struct v4l2_buffer *buf)
{}
EXPORT_SYMBOL_GPL();

int v4l2_m2m_ioctl_prepare_buf(struct file *file, void *priv,
			       struct v4l2_buffer *buf)
{}
EXPORT_SYMBOL_GPL();

int v4l2_m2m_ioctl_expbuf(struct file *file, void *priv,
				struct v4l2_exportbuffer *eb)
{}
EXPORT_SYMBOL_GPL();

int v4l2_m2m_ioctl_streamon(struct file *file, void *priv,
				enum v4l2_buf_type type)
{}
EXPORT_SYMBOL_GPL();

int v4l2_m2m_ioctl_streamoff(struct file *file, void *priv,
				enum v4l2_buf_type type)
{}
EXPORT_SYMBOL_GPL();

int v4l2_m2m_ioctl_try_encoder_cmd(struct file *file, void *fh,
				   struct v4l2_encoder_cmd *ec)
{}
EXPORT_SYMBOL_GPL();

int v4l2_m2m_ioctl_try_decoder_cmd(struct file *file, void *fh,
				   struct v4l2_decoder_cmd *dc)
{}
EXPORT_SYMBOL_GPL();

/*
 * Updates the encoding state on ENC_CMD_STOP/ENC_CMD_START
 * Should be called from the encoder driver encoder_cmd() callback
 */
int v4l2_m2m_encoder_cmd(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
			 struct v4l2_encoder_cmd *ec)
{}
EXPORT_SYMBOL_GPL();

/*
 * Updates the decoding state on DEC_CMD_STOP/DEC_CMD_START
 * Should be called from the decoder driver decoder_cmd() callback
 */
int v4l2_m2m_decoder_cmd(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
			 struct v4l2_decoder_cmd *dc)
{}
EXPORT_SYMBOL_GPL();

int v4l2_m2m_ioctl_encoder_cmd(struct file *file, void *priv,
			       struct v4l2_encoder_cmd *ec)
{}
EXPORT_SYMBOL_GPL();

int v4l2_m2m_ioctl_decoder_cmd(struct file *file, void *priv,
			       struct v4l2_decoder_cmd *dc)
{}
EXPORT_SYMBOL_GPL();

int v4l2_m2m_ioctl_stateless_try_decoder_cmd(struct file *file, void *fh,
					     struct v4l2_decoder_cmd *dc)
{}
EXPORT_SYMBOL_GPL();

int v4l2_m2m_ioctl_stateless_decoder_cmd(struct file *file, void *priv,
					 struct v4l2_decoder_cmd *dc)
{}
EXPORT_SYMBOL_GPL();

/*
 * v4l2_file_operations helpers. It is assumed here same lock is used
 * for the output and the capture buffer queue.
 */

int v4l2_m2m_fop_mmap(struct file *file, struct vm_area_struct *vma)
{}
EXPORT_SYMBOL_GPL();

__poll_t v4l2_m2m_fop_poll(struct file *file, poll_table *wait)
{}
EXPORT_SYMBOL_GPL();