linux/drivers/accel/qaic/qaic_data.c

// SPDX-License-Identifier: GPL-2.0-only

/* Copyright (c) 2019-2021, The Linux Foundation. All rights reserved. */
/* Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved. */

#include <linux/bitfield.h>
#include <linux/bits.h>
#include <linux/completion.h>
#include <linux/delay.h>
#include <linux/dma-buf.h>
#include <linux/dma-mapping.h>
#include <linux/interrupt.h>
#include <linux/kref.h>
#include <linux/list.h>
#include <linux/math64.h>
#include <linux/mm.h>
#include <linux/moduleparam.h>
#include <linux/scatterlist.h>
#include <linux/spinlock.h>
#include <linux/srcu.h>
#include <linux/types.h>
#include <linux/uaccess.h>
#include <linux/wait.h>
#include <drm/drm_file.h>
#include <drm/drm_gem.h>
#include <drm/drm_prime.h>
#include <drm/drm_print.h>
#include <uapi/drm/qaic_accel.h>

#include "qaic.h"

#define SEM_VAL_MASK
#define SEM_INDEX_MASK
#define BULK_XFER
#define GEN_COMPLETION
#define INBOUND_XFER
#define OUTBOUND_XFER
#define REQHP_OFF
#define REQTP_OFF
#define RSPHP_OFF
#define RSPTP_OFF

#define ENCODE_SEM(val, index, sync, cmd, flags)
#define NUM_EVENTS
#define NUM_DELAYS
#define fifo_at(base, offset)

static unsigned int wait_exec_default_timeout_ms =; /* 5 sec default */
module_param(wait_exec_default_timeout_ms, uint, 0600);
MODULE_PARM_DESC();

static unsigned int datapath_poll_interval_us =; /* 100 usec default */
module_param(datapath_poll_interval_us, uint, 0600);
MODULE_PARM_DESC();

struct dbc_req {} __packed;

struct dbc_rsp {} __packed;

static inline bool bo_queued(struct qaic_bo *bo)
{}

inline int get_dbc_req_elem_size(void)
{}

inline int get_dbc_rsp_elem_size(void)
{}

static void free_slice(struct kref *kref)
{}

static int clone_range_of_sgt_for_slice(struct qaic_device *qdev, struct sg_table **sgt_out,
					struct sg_table *sgt_in, u64 size, u64 offset)
{}

static int encode_reqs(struct qaic_device *qdev, struct bo_slice *slice,
		       struct qaic_attach_slice_entry *req)
{}

static int qaic_map_one_slice(struct qaic_device *qdev, struct qaic_bo *bo,
			      struct qaic_attach_slice_entry *slice_ent)
{}

static int create_sgt(struct qaic_device *qdev, struct sg_table **sgt_out, u64 size)
{}

static bool invalid_sem(struct qaic_sem *sem)
{}

static int qaic_validate_req(struct qaic_device *qdev, struct qaic_attach_slice_entry *slice_ent,
			     u32 count, u64 total_size)
{}

static void qaic_free_sgt(struct sg_table *sgt)
{}

static void qaic_gem_print_info(struct drm_printer *p, unsigned int indent,
				const struct drm_gem_object *obj)
{}

static const struct vm_operations_struct drm_vm_ops =;

static int qaic_gem_object_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
{}

static void qaic_free_object(struct drm_gem_object *obj)
{}

static const struct drm_gem_object_funcs qaic_gem_funcs =;

static void qaic_init_bo(struct qaic_bo *bo, bool reinit)
{}

static struct qaic_bo *qaic_alloc_init_bo(void)
{}

int qaic_create_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
{}

int qaic_mmap_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
{}

struct drm_gem_object *qaic_gem_prime_import(struct drm_device *dev, struct dma_buf *dma_buf)
{}

static int qaic_prepare_import_bo(struct qaic_bo *bo, struct qaic_attach_slice_hdr *hdr)
{}

static int qaic_prepare_export_bo(struct qaic_device *qdev, struct qaic_bo *bo,
				  struct qaic_attach_slice_hdr *hdr)
{}

static int qaic_prepare_bo(struct qaic_device *qdev, struct qaic_bo *bo,
			   struct qaic_attach_slice_hdr *hdr)
{}

static void qaic_unprepare_import_bo(struct qaic_bo *bo)
{}

static void qaic_unprepare_export_bo(struct qaic_device *qdev, struct qaic_bo *bo)
{}

static void qaic_unprepare_bo(struct qaic_device *qdev, struct qaic_bo *bo)
{}

static void qaic_free_slices_bo(struct qaic_bo *bo)
{}

static int qaic_attach_slicing_bo(struct qaic_device *qdev, struct qaic_bo *bo,
				  struct qaic_attach_slice_hdr *hdr,
				  struct qaic_attach_slice_entry *slice_ent)
{}

int qaic_attach_slice_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
{}

static inline u32 fifo_space_avail(u32 head, u32 tail, u32 q_size)
{}

static inline int copy_exec_reqs(struct qaic_device *qdev, struct bo_slice *slice, u32 dbc_id,
				 u32 head, u32 *ptail)
{}

static inline int copy_partial_exec_reqs(struct qaic_device *qdev, struct bo_slice *slice,
					 u64 resize, struct dma_bridge_chan *dbc, u32 head,
					 u32 *ptail)
{}

static int send_bo_list_to_device(struct qaic_device *qdev, struct drm_file *file_priv,
				  struct qaic_execute_entry *exec, unsigned int count,
				  bool is_partial, struct dma_bridge_chan *dbc, u32 head,
				  u32 *tail)
{}

static void update_profiling_data(struct drm_file *file_priv,
				  struct qaic_execute_entry *exec, unsigned int count,
				  bool is_partial, u64 received_ts, u64 submit_ts, u32 queue_level)
{}

static int __qaic_execute_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv,
				   bool is_partial)
{}

int qaic_execute_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
{}

int qaic_partial_execute_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
{}

/*
 * Our interrupt handling is a bit more complicated than a simple ideal, but
 * sadly necessary.
 *
 * Each dbc has a completion queue. Entries in the queue correspond to DMA
 * requests which the device has processed. The hardware already has a built
 * in irq mitigation. When the device puts an entry into the queue, it will
 * only trigger an interrupt if the queue was empty. Therefore, when adding
 * the Nth event to a non-empty queue, the hardware doesn't trigger an
 * interrupt. This means the host doesn't get additional interrupts signaling
 * the same thing - the queue has something to process.
 * This behavior can be overridden in the DMA request.
 * This means that when the host receives an interrupt, it is required to
 * drain the queue.
 *
 * This behavior is what NAPI attempts to accomplish, although we can't use
 * NAPI as we don't have a netdev. We use threaded irqs instead.
 *
 * However, there is a situation where the host drains the queue fast enough
 * that every event causes an interrupt. Typically this is not a problem as
 * the rate of events would be low. However, that is not the case with
 * lprnet for example. On an Intel Xeon D-2191 where we run 8 instances of
 * lprnet, the host receives roughly 80k interrupts per second from the device
 * (per /proc/interrupts). While NAPI documentation indicates the host should
 * just chug along, sadly that behavior causes instability in some hosts.
 *
 * Therefore, we implement an interrupt disable scheme similar to NAPI. The
 * key difference is that we will delay after draining the queue for a small
 * time to allow additional events to come in via polling. Using the above
 * lprnet workload, this reduces the number of interrupts processed from
 * ~80k/sec to about 64 in 5 minutes and appears to solve the system
 * instability.
 */
irqreturn_t dbc_irq_handler(int irq, void *data)
{}

void irq_polling_work(struct work_struct *work)
{}

irqreturn_t dbc_irq_threaded_fn(int irq, void *data)
{}

int qaic_wait_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
{}

int qaic_perf_stats_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
{}

static void detach_slice_bo(struct qaic_device *qdev, struct qaic_bo *bo)
{}

int qaic_detach_slice_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
{}

static void empty_xfer_list(struct qaic_device *qdev, struct dma_bridge_chan *dbc)
{}

int disable_dbc(struct qaic_device *qdev, u32 dbc_id, struct qaic_user *usr)
{}

/**
 * enable_dbc - Enable the DBC. DBCs are disabled by removing the context of
 * user. Add user context back to DBC to enable it. This function trusts the
 * DBC ID passed and expects the DBC to be disabled.
 * @qdev: Qranium device handle
 * @dbc_id: ID of the DBC
 * @usr: User context
 */
void enable_dbc(struct qaic_device *qdev, u32 dbc_id, struct qaic_user *usr)
{}

void wakeup_dbc(struct qaic_device *qdev, u32 dbc_id)
{}

void release_dbc(struct qaic_device *qdev, u32 dbc_id)
{}

void qaic_data_get_fifo_info(struct dma_bridge_chan *dbc, u32 *head, u32 *tail)
{}