#include <linux/bitops.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/iopoll.h>
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/pfn.h>
#include <linux/pm_runtime.h>
#include <linux/property.h>
#include <linux/vmalloc.h>
#include <media/ipu-bridge.h>
#include <media/v4l2-ctrls.h>
#include <media/v4l2-device.h>
#include <media/v4l2-event.h>
#include <media/v4l2-fwnode.h>
#include <media/v4l2-mc.h>
#include <media/v4l2-ioctl.h>
#include <media/videobuf2-dma-sg.h>
#include "ipu3-cio2.h"
struct ipu3_cio2_fmt { … };
static const struct ipu3_cio2_fmt formats[] = …;
static const struct ipu3_cio2_fmt *cio2_find_format(const u32 *pixelformat,
const u32 *mbus_code)
{ … }
static inline u32 cio2_bytesperline(const unsigned int width)
{ … }
static void cio2_fbpt_exit_dummy(struct cio2_device *cio2)
{ … }
static int cio2_fbpt_init_dummy(struct cio2_device *cio2)
{ … }
static void cio2_fbpt_entry_enable(struct cio2_device *cio2,
struct cio2_fbpt_entry entry[CIO2_MAX_LOPS])
{ … }
static void cio2_fbpt_entry_init_dummy(struct cio2_device *cio2,
struct cio2_fbpt_entry
entry[CIO2_MAX_LOPS])
{ … }
static void cio2_fbpt_entry_init_buf(struct cio2_device *cio2,
struct cio2_buffer *b,
struct cio2_fbpt_entry
entry[CIO2_MAX_LOPS])
{ … }
static int cio2_fbpt_init(struct cio2_device *cio2, struct cio2_queue *q)
{ … }
static void cio2_fbpt_exit(struct cio2_queue *q, struct device *dev)
{ … }
#define LIMIT_SHIFT …
static s32 cio2_rx_timing(s32 a, s32 b, s64 freq, int def)
{
const u32 accinv = 16;
const u32 uiinv = 500000000;
s32 r;
freq >>= LIMIT_SHIFT;
if (WARN_ON(freq <= 0 || freq > S32_MAX))
return def;
r = accinv * b * (uiinv >> LIMIT_SHIFT);
r = r / (s32)freq;
r += accinv * a;
return r;
};
static int cio2_csi2_calc_timing(struct cio2_device *cio2, struct cio2_queue *q,
struct cio2_csi2_timing *timing,
unsigned int bpp, unsigned int lanes)
{
struct device *dev = &cio2->pci_dev->dev;
s64 freq;
if (!q->sensor)
return -ENODEV;
freq = v4l2_get_link_freq(q->sensor->ctrl_handler, bpp, lanes * 2);
if (freq < 0) {
dev_err(dev, "error %lld, invalid link_freq\n", freq);
return freq;
}
timing->clk_termen = cio2_rx_timing(CIO2_CSIRX_DLY_CNT_TERMEN_CLANE_A,
CIO2_CSIRX_DLY_CNT_TERMEN_CLANE_B,
freq,
CIO2_CSIRX_DLY_CNT_TERMEN_DEFAULT);
timing->clk_settle = cio2_rx_timing(CIO2_CSIRX_DLY_CNT_SETTLE_CLANE_A,
CIO2_CSIRX_DLY_CNT_SETTLE_CLANE_B,
freq,
CIO2_CSIRX_DLY_CNT_SETTLE_DEFAULT);
timing->dat_termen = cio2_rx_timing(CIO2_CSIRX_DLY_CNT_TERMEN_DLANE_A,
CIO2_CSIRX_DLY_CNT_TERMEN_DLANE_B,
freq,
CIO2_CSIRX_DLY_CNT_TERMEN_DEFAULT);
timing->dat_settle = cio2_rx_timing(CIO2_CSIRX_DLY_CNT_SETTLE_DLANE_A,
CIO2_CSIRX_DLY_CNT_SETTLE_DLANE_B,
freq,
CIO2_CSIRX_DLY_CNT_SETTLE_DEFAULT);
dev_dbg(dev, "freq ct value is %d\n", timing->clk_termen);
dev_dbg(dev, "freq cs value is %d\n", timing->clk_settle);
dev_dbg(dev, "freq dt value is %d\n", timing->dat_termen);
dev_dbg(dev, "freq ds value is %d\n", timing->dat_settle);
return 0;
};
static int cio2_hw_init(struct cio2_device *cio2, struct cio2_queue *q)
{ … }
static void cio2_hw_exit(struct cio2_device *cio2, struct cio2_queue *q)
{ … }
static void cio2_buffer_done(struct cio2_device *cio2, unsigned int dma_chan)
{ … }
static void cio2_queue_event_sof(struct cio2_device *cio2, struct cio2_queue *q)
{ … }
static const char *const cio2_irq_errs[] = …;
static void cio2_irq_log_irq_errs(struct device *dev, u8 port, u32 status)
{ … }
static const char *const cio2_port_errs[] = …;
static void cio2_irq_log_port_errs(struct device *dev, u8 port, u32 status)
{ … }
static void cio2_irq_handle_once(struct cio2_device *cio2, u32 int_status)
{ … }
static irqreturn_t cio2_irq(int irq, void *cio2_ptr)
{ … }
static void cio2_vb2_return_all_buffers(struct cio2_queue *q,
enum vb2_buffer_state state)
{ … }
static int cio2_vb2_queue_setup(struct vb2_queue *vq,
unsigned int *num_buffers,
unsigned int *num_planes,
unsigned int sizes[],
struct device *alloc_devs[])
{ … }
static int cio2_vb2_buf_init(struct vb2_buffer *vb)
{ … }
static void cio2_vb2_buf_queue(struct vb2_buffer *vb)
{ … }
static void cio2_vb2_buf_cleanup(struct vb2_buffer *vb)
{ … }
static int cio2_vb2_start_streaming(struct vb2_queue *vq, unsigned int count)
{ … }
static void cio2_vb2_stop_streaming(struct vb2_queue *vq)
{ … }
static const struct vb2_ops cio2_vb2_ops = …;
static int cio2_v4l2_querycap(struct file *file, void *fh,
struct v4l2_capability *cap)
{ … }
static int cio2_v4l2_enum_fmt(struct file *file, void *fh,
struct v4l2_fmtdesc *f)
{ … }
static int cio2_v4l2_g_fmt(struct file *file, void *fh, struct v4l2_format *f)
{ … }
static int cio2_v4l2_try_fmt(struct file *file, void *fh, struct v4l2_format *f)
{ … }
static int cio2_v4l2_s_fmt(struct file *file, void *fh, struct v4l2_format *f)
{ … }
static int
cio2_video_enum_input(struct file *file, void *fh, struct v4l2_input *input)
{ … }
static int
cio2_video_g_input(struct file *file, void *fh, unsigned int *input)
{ … }
static int
cio2_video_s_input(struct file *file, void *fh, unsigned int input)
{ … }
static const struct v4l2_file_operations cio2_v4l2_fops = …;
static const struct v4l2_ioctl_ops cio2_v4l2_ioctl_ops = …;
static int cio2_subdev_subscribe_event(struct v4l2_subdev *sd,
struct v4l2_fh *fh,
struct v4l2_event_subscription *sub)
{ … }
static int cio2_subdev_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
{ … }
static int cio2_subdev_get_fmt(struct v4l2_subdev *sd,
struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{ … }
static int cio2_subdev_set_fmt(struct v4l2_subdev *sd,
struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{ … }
static int cio2_subdev_enum_mbus_code(struct v4l2_subdev *sd,
struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_mbus_code_enum *code)
{ … }
static int cio2_subdev_link_validate_get_format(struct media_pad *pad,
struct v4l2_subdev_format *fmt)
{ … }
static int cio2_video_link_validate(struct media_link *link)
{ … }
static const struct v4l2_subdev_core_ops cio2_subdev_core_ops = …;
static const struct v4l2_subdev_internal_ops cio2_subdev_internal_ops = …;
static const struct v4l2_subdev_pad_ops cio2_subdev_pad_ops = …;
static const struct v4l2_subdev_ops cio2_subdev_ops = …;
struct sensor_async_subdev { … };
#define to_sensor_asd(__asd) …
static int cio2_notifier_bound(struct v4l2_async_notifier *notifier,
struct v4l2_subdev *sd,
struct v4l2_async_connection *asd)
{ … }
static void cio2_notifier_unbind(struct v4l2_async_notifier *notifier,
struct v4l2_subdev *sd,
struct v4l2_async_connection *asd)
{ … }
static int cio2_notifier_complete(struct v4l2_async_notifier *notifier)
{ … }
static const struct v4l2_async_notifier_operations cio2_async_ops = …;
static int cio2_parse_firmware(struct cio2_device *cio2)
{ … }
static const struct media_entity_operations cio2_media_ops = …;
static const struct media_entity_operations cio2_video_entity_ops = …;
static int cio2_queue_init(struct cio2_device *cio2, struct cio2_queue *q)
{ … }
static void cio2_queue_exit(struct cio2_device *cio2, struct cio2_queue *q)
{ … }
static int cio2_queues_init(struct cio2_device *cio2)
{ … }
static void cio2_queues_exit(struct cio2_device *cio2)
{ … }
static int cio2_pci_probe(struct pci_dev *pci_dev,
const struct pci_device_id *id)
{ … }
static void cio2_pci_remove(struct pci_dev *pci_dev)
{ … }
static int __maybe_unused cio2_runtime_suspend(struct device *dev)
{ … }
static int __maybe_unused cio2_runtime_resume(struct device *dev)
{ … }
static void arrange(void *ptr, size_t elem_size, size_t elems, size_t start)
{ … }
static void cio2_fbpt_rearrange(struct cio2_device *cio2, struct cio2_queue *q)
{ … }
static int __maybe_unused cio2_suspend(struct device *dev)
{ … }
static int __maybe_unused cio2_resume(struct device *dev)
{ … }
static const struct dev_pm_ops cio2_pm_ops = …;
static const struct pci_device_id cio2_pci_id_table[] = …;
MODULE_DEVICE_TABLE(pci, cio2_pci_id_table);
static struct pci_driver cio2_pci_driver = …;
module_pci_driver(…) …;
MODULE_AUTHOR(…) …;
MODULE_AUTHOR(…) …;
MODULE_AUTHOR(…) …;
MODULE_AUTHOR(…) …;
MODULE_AUTHOR(…) …;
MODULE_LICENSE(…) …;
MODULE_DESCRIPTION(…) …;
MODULE_IMPORT_NS(…);