linux/drivers/media/pci/intel/ipu3/ipu3-cio2.c

// SPDX-License-Identifier: GPL-2.0
/*
 * Copyright (C) 2017,2020 Intel Corporation
 *
 * Based partially on Intel IPU4 driver written by
 *  Sakari Ailus <[email protected]>
 *  Samu Onkalo
 *  Jouni Högander <[email protected]>
 *  Jouni Ukkonen
 *  Antti Laakso <[email protected]>
 * et al.
 */

#include <linux/bitops.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/iopoll.h>
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/pfn.h>
#include <linux/pm_runtime.h>
#include <linux/property.h>
#include <linux/vmalloc.h>

#include <media/ipu-bridge.h>
#include <media/v4l2-ctrls.h>
#include <media/v4l2-device.h>
#include <media/v4l2-event.h>
#include <media/v4l2-fwnode.h>
#include <media/v4l2-mc.h>
#include <media/v4l2-ioctl.h>
#include <media/videobuf2-dma-sg.h>

#include "ipu3-cio2.h"

struct ipu3_cio2_fmt {};

/*
 * These are raw formats used in Intel's third generation of
 * Image Processing Unit known as IPU3.
 * 10bit raw bayer packed, 32 bytes for every 25 pixels,
 * last LSB 6 bits unused.
 */
static const struct ipu3_cio2_fmt formats[] =;

/*
 * cio2_find_format - lookup color format by fourcc or/and media bus code
 * @pixelformat: fourcc to match, ignored if null
 * @mbus_code: media bus code to match, ignored if null
 */
static const struct ipu3_cio2_fmt *cio2_find_format(const u32 *pixelformat,
						    const u32 *mbus_code)
{}

static inline u32 cio2_bytesperline(const unsigned int width)
{}

/**************** FBPT operations ****************/

static void cio2_fbpt_exit_dummy(struct cio2_device *cio2)
{}

static int cio2_fbpt_init_dummy(struct cio2_device *cio2)
{}

static void cio2_fbpt_entry_enable(struct cio2_device *cio2,
				   struct cio2_fbpt_entry entry[CIO2_MAX_LOPS])
{}

/* Initialize fpbt entries to point to dummy frame */
static void cio2_fbpt_entry_init_dummy(struct cio2_device *cio2,
				       struct cio2_fbpt_entry
				       entry[CIO2_MAX_LOPS])
{}

/* Initialize fpbt entries to point to a given buffer */
static void cio2_fbpt_entry_init_buf(struct cio2_device *cio2,
				     struct cio2_buffer *b,
				     struct cio2_fbpt_entry
				     entry[CIO2_MAX_LOPS])
{}

static int cio2_fbpt_init(struct cio2_device *cio2, struct cio2_queue *q)
{}

static void cio2_fbpt_exit(struct cio2_queue *q, struct device *dev)
{}

/**************** CSI2 hardware setup ****************/

/*
 * The CSI2 receiver has several parameters affecting
 * the receiver timings. These depend on the MIPI bus frequency
 * F in Hz (sensor transmitter rate) as follows:
 *     register value = (A/1e9 + B * UI) / COUNT_ACC
 * where
 *      UI = 1 / (2 * F) in seconds
 *      COUNT_ACC = counter accuracy in seconds
 *      For IPU3 COUNT_ACC = 0.0625
 *
 * A and B are coefficients from the table below,
 * depending whether the register minimum or maximum value is
 * calculated.
 *                                     Minimum     Maximum
 * Clock lane                          A     B     A     B
 * reg_rx_csi_dly_cnt_termen_clane     0     0    38     0
 * reg_rx_csi_dly_cnt_settle_clane    95    -8   300   -16
 * Data lanes
 * reg_rx_csi_dly_cnt_termen_dlane0    0     0    35     4
 * reg_rx_csi_dly_cnt_settle_dlane0   85    -2   145    -6
 * reg_rx_csi_dly_cnt_termen_dlane1    0     0    35     4
 * reg_rx_csi_dly_cnt_settle_dlane1   85    -2   145    -6
 * reg_rx_csi_dly_cnt_termen_dlane2    0     0    35     4
 * reg_rx_csi_dly_cnt_settle_dlane2   85    -2   145    -6
 * reg_rx_csi_dly_cnt_termen_dlane3    0     0    35     4
 * reg_rx_csi_dly_cnt_settle_dlane3   85    -2   145    -6
 *
 * We use the minimum values of both A and B.
 */

/*
 * shift for keeping value range suitable for 32-bit integer arithmetic
 */
#define LIMIT_SHIFT

static s32 cio2_rx_timing(s32 a, s32 b, s64 freq, int def)
{
	const u32 accinv = 16; /* invert of counter resolution */
	const u32 uiinv = 500000000; /* 1e9 / 2 */
	s32 r;

	freq >>= LIMIT_SHIFT;

	if (WARN_ON(freq <= 0 || freq > S32_MAX))
		return def;
	/*
	 * b could be 0, -2 or -8, so |accinv * b| is always
	 * less than (1 << ds) and thus |r| < 500000000.
	 */
	r = accinv * b * (uiinv >> LIMIT_SHIFT);
	r = r / (s32)freq;
	/* max value of a is 95 */
	r += accinv * a;

	return r;
};

/* Calculate the delay value for termination enable of clock lane HS Rx */
static int cio2_csi2_calc_timing(struct cio2_device *cio2, struct cio2_queue *q,
				 struct cio2_csi2_timing *timing,
				 unsigned int bpp, unsigned int lanes)
{
	struct device *dev = &cio2->pci_dev->dev;
	s64 freq;

	if (!q->sensor)
		return -ENODEV;

	freq = v4l2_get_link_freq(q->sensor->ctrl_handler, bpp, lanes * 2);
	if (freq < 0) {
		dev_err(dev, "error %lld, invalid link_freq\n", freq);
		return freq;
	}

	timing->clk_termen = cio2_rx_timing(CIO2_CSIRX_DLY_CNT_TERMEN_CLANE_A,
					    CIO2_CSIRX_DLY_CNT_TERMEN_CLANE_B,
					    freq,
					    CIO2_CSIRX_DLY_CNT_TERMEN_DEFAULT);
	timing->clk_settle = cio2_rx_timing(CIO2_CSIRX_DLY_CNT_SETTLE_CLANE_A,
					    CIO2_CSIRX_DLY_CNT_SETTLE_CLANE_B,
					    freq,
					    CIO2_CSIRX_DLY_CNT_SETTLE_DEFAULT);
	timing->dat_termen = cio2_rx_timing(CIO2_CSIRX_DLY_CNT_TERMEN_DLANE_A,
					    CIO2_CSIRX_DLY_CNT_TERMEN_DLANE_B,
					    freq,
					    CIO2_CSIRX_DLY_CNT_TERMEN_DEFAULT);
	timing->dat_settle = cio2_rx_timing(CIO2_CSIRX_DLY_CNT_SETTLE_DLANE_A,
					    CIO2_CSIRX_DLY_CNT_SETTLE_DLANE_B,
					    freq,
					    CIO2_CSIRX_DLY_CNT_SETTLE_DEFAULT);

	dev_dbg(dev, "freq ct value is %d\n", timing->clk_termen);
	dev_dbg(dev, "freq cs value is %d\n", timing->clk_settle);
	dev_dbg(dev, "freq dt value is %d\n", timing->dat_termen);
	dev_dbg(dev, "freq ds value is %d\n", timing->dat_settle);

	return 0;
};

static int cio2_hw_init(struct cio2_device *cio2, struct cio2_queue *q)
{}

static void cio2_hw_exit(struct cio2_device *cio2, struct cio2_queue *q)
{}

static void cio2_buffer_done(struct cio2_device *cio2, unsigned int dma_chan)
{}

static void cio2_queue_event_sof(struct cio2_device *cio2, struct cio2_queue *q)
{}

static const char *const cio2_irq_errs[] =;

static void cio2_irq_log_irq_errs(struct device *dev, u8 port, u32 status)
{}

static const char *const cio2_port_errs[] =;

static void cio2_irq_log_port_errs(struct device *dev, u8 port, u32 status)
{}

static void cio2_irq_handle_once(struct cio2_device *cio2, u32 int_status)
{}

static irqreturn_t cio2_irq(int irq, void *cio2_ptr)
{}

/**************** Videobuf2 interface ****************/

static void cio2_vb2_return_all_buffers(struct cio2_queue *q,
					enum vb2_buffer_state state)
{}

static int cio2_vb2_queue_setup(struct vb2_queue *vq,
				unsigned int *num_buffers,
				unsigned int *num_planes,
				unsigned int sizes[],
				struct device *alloc_devs[])
{}

/* Called after each buffer is allocated */
static int cio2_vb2_buf_init(struct vb2_buffer *vb)
{}

/* Transfer buffer ownership to cio2 */
static void cio2_vb2_buf_queue(struct vb2_buffer *vb)
{}

/* Called when each buffer is freed */
static void cio2_vb2_buf_cleanup(struct vb2_buffer *vb)
{}

static int cio2_vb2_start_streaming(struct vb2_queue *vq, unsigned int count)
{}

static void cio2_vb2_stop_streaming(struct vb2_queue *vq)
{}

static const struct vb2_ops cio2_vb2_ops =;

/**************** V4L2 interface ****************/

static int cio2_v4l2_querycap(struct file *file, void *fh,
			      struct v4l2_capability *cap)
{}

static int cio2_v4l2_enum_fmt(struct file *file, void *fh,
			      struct v4l2_fmtdesc *f)
{}

/* The format is validated in cio2_video_link_validate() */
static int cio2_v4l2_g_fmt(struct file *file, void *fh, struct v4l2_format *f)
{}

static int cio2_v4l2_try_fmt(struct file *file, void *fh, struct v4l2_format *f)
{}

static int cio2_v4l2_s_fmt(struct file *file, void *fh, struct v4l2_format *f)
{}

static int
cio2_video_enum_input(struct file *file, void *fh, struct v4l2_input *input)
{}

static int
cio2_video_g_input(struct file *file, void *fh, unsigned int *input)
{}

static int
cio2_video_s_input(struct file *file, void *fh, unsigned int input)
{}

static const struct v4l2_file_operations cio2_v4l2_fops =;

static const struct v4l2_ioctl_ops cio2_v4l2_ioctl_ops =;

static int cio2_subdev_subscribe_event(struct v4l2_subdev *sd,
				       struct v4l2_fh *fh,
				       struct v4l2_event_subscription *sub)
{}

static int cio2_subdev_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
{}

static int cio2_subdev_get_fmt(struct v4l2_subdev *sd,
			       struct v4l2_subdev_state *sd_state,
			       struct v4l2_subdev_format *fmt)
{}

static int cio2_subdev_set_fmt(struct v4l2_subdev *sd,
			       struct v4l2_subdev_state *sd_state,
			       struct v4l2_subdev_format *fmt)
{}

static int cio2_subdev_enum_mbus_code(struct v4l2_subdev *sd,
				      struct v4l2_subdev_state *sd_state,
				      struct v4l2_subdev_mbus_code_enum *code)
{}

static int cio2_subdev_link_validate_get_format(struct media_pad *pad,
						struct v4l2_subdev_format *fmt)
{}

static int cio2_video_link_validate(struct media_link *link)
{}

static const struct v4l2_subdev_core_ops cio2_subdev_core_ops =;

static const struct v4l2_subdev_internal_ops cio2_subdev_internal_ops =;

static const struct v4l2_subdev_pad_ops cio2_subdev_pad_ops =;

static const struct v4l2_subdev_ops cio2_subdev_ops =;

/******* V4L2 sub-device asynchronous registration callbacks***********/

struct sensor_async_subdev {};

#define to_sensor_asd(__asd)

/* The .bound() notifier callback when a match is found */
static int cio2_notifier_bound(struct v4l2_async_notifier *notifier,
			       struct v4l2_subdev *sd,
			       struct v4l2_async_connection *asd)
{}

/* The .unbind callback */
static void cio2_notifier_unbind(struct v4l2_async_notifier *notifier,
				 struct v4l2_subdev *sd,
				 struct v4l2_async_connection *asd)
{}

/* .complete() is called after all subdevices have been located */
static int cio2_notifier_complete(struct v4l2_async_notifier *notifier)
{}

static const struct v4l2_async_notifier_operations cio2_async_ops =;

static int cio2_parse_firmware(struct cio2_device *cio2)
{}

/**************** Queue initialization ****************/
static const struct media_entity_operations cio2_media_ops =;

static const struct media_entity_operations cio2_video_entity_ops =;

static int cio2_queue_init(struct cio2_device *cio2, struct cio2_queue *q)
{}

static void cio2_queue_exit(struct cio2_device *cio2, struct cio2_queue *q)
{}

static int cio2_queues_init(struct cio2_device *cio2)
{}

static void cio2_queues_exit(struct cio2_device *cio2)
{}

/**************** PCI interface ****************/

static int cio2_pci_probe(struct pci_dev *pci_dev,
			  const struct pci_device_id *id)
{}

static void cio2_pci_remove(struct pci_dev *pci_dev)
{}

static int __maybe_unused cio2_runtime_suspend(struct device *dev)
{}

static int __maybe_unused cio2_runtime_resume(struct device *dev)
{}

/*
 * Helper function to advance all the elements of a circular buffer by "start"
 * positions
 */
static void arrange(void *ptr, size_t elem_size, size_t elems, size_t start)
{}

static void cio2_fbpt_rearrange(struct cio2_device *cio2, struct cio2_queue *q)
{}

static int __maybe_unused cio2_suspend(struct device *dev)
{}

static int __maybe_unused cio2_resume(struct device *dev)
{}

static const struct dev_pm_ops cio2_pm_ops =;

static const struct pci_device_id cio2_pci_id_table[] =;

MODULE_DEVICE_TABLE(pci, cio2_pci_id_table);

static struct pci_driver cio2_pci_driver =;

module_pci_driver();

MODULE_AUTHOR();
MODULE_AUTHOR();
MODULE_AUTHOR();
MODULE_AUTHOR();
MODULE_AUTHOR();
MODULE_LICENSE();
MODULE_DESCRIPTION();
MODULE_IMPORT_NS();