linux/drivers/dma/idxd/idxd.h

/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright(c) 2019 Intel Corporation. All rights rsvd. */
#ifndef _IDXD_H_
#define _IDXD_H_

#include <linux/sbitmap.h>
#include <linux/dmaengine.h>
#include <linux/percpu-rwsem.h>
#include <linux/wait.h>
#include <linux/cdev.h>
#include <linux/idr.h>
#include <linux/pci.h>
#include <linux/bitmap.h>
#include <linux/perf_event.h>
#include <linux/iommu.h>
#include <linux/crypto.h>
#include <uapi/linux/idxd.h>
#include "registers.h"

#define IDXD_DRIVER_VERSION

extern struct kmem_cache *idxd_desc_pool;
extern bool tc_override;

struct idxd_wq;
struct idxd_dev;

enum idxd_dev_type {};

struct idxd_dev {};

#define IDXD_REG_TIMEOUT
#define IDXD_DRAIN_TIMEOUT

enum idxd_type {};

#define IDXD_NAME_SIZE
#define IDXD_PMU_EVENT_MAX

#define IDXD_ENQCMDS_RETRIES
#define IDXD_ENQCMDS_MAX_RETRIES

enum idxd_complete_type {};

struct idxd_desc;

struct idxd_device_driver {};

extern struct idxd_device_driver dsa_drv;
extern struct idxd_device_driver idxd_drv;
extern struct idxd_device_driver idxd_dmaengine_drv;
extern struct idxd_device_driver idxd_user_drv;

#define INVALID_INT_HANDLE
struct idxd_irq_entry {};

struct idxd_group {};

struct idxd_pmu {};

#define IDXD_MAX_PRIORITY

enum {};

enum idxd_wq_state {};

enum idxd_wq_flag {};

enum idxd_wq_type {};

struct idxd_cdev {};

#define DRIVER_NAME_SIZE

#define IDXD_ALLOCATED_BATCH_SIZE
#define WQ_NAME_SIZE
#define WQ_TYPE_SIZE

#define WQ_DEFAULT_QUEUE_DEPTH
#define WQ_DEFAULT_MAX_XFER
#define WQ_DEFAULT_MAX_BATCH

enum idxd_op_type {};

struct idxd_dma_chan {};

struct idxd_wq {};

struct idxd_engine {};

/* shadow registers */
struct idxd_hw {};

enum idxd_device_state {};

enum idxd_device_flag {};

struct idxd_dma_dev {};

load_device_defaults_fn_t;

struct idxd_driver_data {};

struct idxd_evl {};

struct idxd_evl_fault {};

struct idxd_device {};

static inline unsigned int evl_ent_size(struct idxd_device *idxd)
{}

static inline unsigned int evl_size(struct idxd_device *idxd)
{}

struct crypto_ctx {};

/* IDXD software descriptor */
struct idxd_desc {};

/*
 * This is software defined error for the completion status. We overload the error code
 * that will never appear in completion status and only SWERR register.
 */
enum idxd_completion_status {};

#define idxd_confdev(idxd)
#define wq_confdev(wq)
#define engine_confdev(engine)
#define group_confdev(group)
#define cdev_dev(cdev)
#define user_ctx_dev(ctx)

#define confdev_to_idxd_dev(dev)
#define idxd_dev_to_idxd(idxd_dev)
#define idxd_dev_to_wq(idxd_dev)

static inline struct idxd_device_driver *wq_to_idxd_drv(struct idxd_wq *wq)
{}

static inline struct idxd_device *confdev_to_idxd(struct device *dev)
{}

static inline struct idxd_wq *confdev_to_wq(struct device *dev)
{}

static inline struct idxd_engine *confdev_to_engine(struct device *dev)
{}

static inline struct idxd_group *confdev_to_group(struct device *dev)
{}

static inline struct idxd_cdev *dev_to_cdev(struct device *dev)
{}

static inline void idxd_dev_set_type(struct idxd_dev *idev, int type)
{}

static inline struct idxd_irq_entry *idxd_get_ie(struct idxd_device *idxd, int idx)
{}

static inline struct idxd_wq *ie_to_wq(struct idxd_irq_entry *ie)
{}

static inline struct idxd_device *ie_to_idxd(struct idxd_irq_entry *ie)
{}

static inline void idxd_set_user_intr(struct idxd_device *idxd, bool enable)
{}

extern const struct bus_type dsa_bus_type;

extern bool support_enqcmd;
extern struct ida idxd_ida;
extern const struct device_type dsa_device_type;
extern const struct device_type iax_device_type;
extern const struct device_type idxd_wq_device_type;
extern const struct device_type idxd_engine_device_type;
extern const struct device_type idxd_group_device_type;

static inline bool is_dsa_dev(struct idxd_dev *idxd_dev)
{}

static inline bool is_iax_dev(struct idxd_dev *idxd_dev)
{}

static inline bool is_idxd_dev(struct idxd_dev *idxd_dev)
{}

static inline bool is_idxd_wq_dev(struct idxd_dev *idxd_dev)
{}

static inline bool is_idxd_wq_dmaengine(struct idxd_wq *wq)
{}

static inline bool is_idxd_wq_user(struct idxd_wq *wq)
{}

static inline bool is_idxd_wq_kernel(struct idxd_wq *wq)
{}

static inline bool wq_dedicated(struct idxd_wq *wq)
{}

static inline bool wq_shared(struct idxd_wq *wq)
{}

static inline bool device_pasid_enabled(struct idxd_device *idxd)
{}

static inline bool device_user_pasid_enabled(struct idxd_device *idxd)
{}

static inline bool wq_pasid_enabled(struct idxd_wq *wq)
{}

static inline bool wq_shared_supported(struct idxd_wq *wq)
{}

enum idxd_portal_prot {};

enum idxd_interrupt_type {};

static inline int idxd_get_wq_portal_offset(enum idxd_portal_prot prot)
{}

static inline int idxd_get_wq_portal_full_offset(int wq_id,
						 enum idxd_portal_prot prot)
{}

#define IDXD_PORTAL_MASK

/*
 * Even though this function can be accessed by multiple threads, it is safe to use.
 * At worst the address gets used more than once before it gets incremented. We don't
 * hit a threshold until iops becomes many million times a second. So the occasional
 * reuse of the same address is tolerable compare to using an atomic variable. This is
 * safe on a system that has atomic load/store for 32bit integers. Given that this is an
 * Intel iEP device, that should not be a problem.
 */
static inline void __iomem *idxd_wq_portal_addr(struct idxd_wq *wq)
{}

static inline void idxd_wq_get(struct idxd_wq *wq)
{}

static inline void idxd_wq_put(struct idxd_wq *wq)
{}

static inline int idxd_wq_refcount(struct idxd_wq *wq)
{
	return wq->client_count;
};

static inline void idxd_wq_set_private(struct idxd_wq *wq, void *private)
{}

static inline void *idxd_wq_get_private(struct idxd_wq *wq)
{}

/*
 * Intel IAA does not support batch processing.
 * The max batch size of device, max batch size of wq and
 * max batch shift of wqcfg should be always 0 on IAA.
 */
static inline void idxd_set_max_batch_size(int idxd_type, struct idxd_device *idxd,
					   u32 max_batch_size)
{}

static inline void idxd_wq_set_max_batch_size(int idxd_type, struct idxd_wq *wq,
					      u32 max_batch_size)
{}

static inline void idxd_wqcfg_set_max_batch_shift(int idxd_type, union wqcfg *wqcfg,
						  u32 max_batch_shift)
{}

static inline int idxd_wq_driver_name_match(struct idxd_wq *wq, struct device *dev)
{}

#define MODULE_ALIAS_IDXD_DEVICE(type)
#define IDXD_DEVICES_MODALIAS_FMT

int __must_check __idxd_driver_register(struct idxd_device_driver *idxd_drv,
					struct module *module, const char *mod_name);
#define idxd_driver_register(driver)

void idxd_driver_unregister(struct idxd_device_driver *idxd_drv);

#define module_idxd_driver(__idxd_driver)

void idxd_free_desc(struct idxd_wq *wq, struct idxd_desc *desc);
void idxd_dma_complete_txd(struct idxd_desc *desc,
			   enum idxd_complete_type comp_type,
			   bool free_desc, void *ctx, u32 *status);

static inline void idxd_desc_complete(struct idxd_desc *desc,
				      enum idxd_complete_type comp_type,
				      bool free_desc)
{}

int idxd_register_bus_type(void);
void idxd_unregister_bus_type(void);
int idxd_register_devices(struct idxd_device *idxd);
void idxd_unregister_devices(struct idxd_device *idxd);
void idxd_wqs_quiesce(struct idxd_device *idxd);
bool idxd_queue_int_handle_resubmit(struct idxd_desc *desc);
void multi_u64_to_bmap(unsigned long *bmap, u64 *val, int count);
int idxd_load_iaa_device_defaults(struct idxd_device *idxd);

/* device interrupt control */
irqreturn_t idxd_misc_thread(int vec, void *data);
irqreturn_t idxd_wq_thread(int irq, void *data);
void idxd_mask_error_interrupts(struct idxd_device *idxd);
void idxd_unmask_error_interrupts(struct idxd_device *idxd);

/* device control */
int idxd_device_drv_probe(struct idxd_dev *idxd_dev);
void idxd_device_drv_remove(struct idxd_dev *idxd_dev);
int idxd_drv_enable_wq(struct idxd_wq *wq);
void idxd_drv_disable_wq(struct idxd_wq *wq);
int idxd_device_init_reset(struct idxd_device *idxd);
int idxd_device_enable(struct idxd_device *idxd);
int idxd_device_disable(struct idxd_device *idxd);
void idxd_device_reset(struct idxd_device *idxd);
void idxd_device_clear_state(struct idxd_device *idxd);
int idxd_device_config(struct idxd_device *idxd);
void idxd_device_drain_pasid(struct idxd_device *idxd, int pasid);
int idxd_device_load_config(struct idxd_device *idxd);
int idxd_device_request_int_handle(struct idxd_device *idxd, int idx, int *handle,
				   enum idxd_interrupt_type irq_type);
int idxd_device_release_int_handle(struct idxd_device *idxd, int handle,
				   enum idxd_interrupt_type irq_type);

/* work queue control */
void idxd_wqs_unmap_portal(struct idxd_device *idxd);
int idxd_wq_alloc_resources(struct idxd_wq *wq);
void idxd_wq_free_resources(struct idxd_wq *wq);
int idxd_wq_enable(struct idxd_wq *wq);
int idxd_wq_disable(struct idxd_wq *wq, bool reset_config);
void idxd_wq_drain(struct idxd_wq *wq);
void idxd_wq_reset(struct idxd_wq *wq);
int idxd_wq_map_portal(struct idxd_wq *wq);
void idxd_wq_unmap_portal(struct idxd_wq *wq);
int idxd_wq_set_pasid(struct idxd_wq *wq, int pasid);
int idxd_wq_disable_pasid(struct idxd_wq *wq);
void __idxd_wq_quiesce(struct idxd_wq *wq);
void idxd_wq_quiesce(struct idxd_wq *wq);
int idxd_wq_init_percpu_ref(struct idxd_wq *wq);
void idxd_wq_free_irq(struct idxd_wq *wq);
int idxd_wq_request_irq(struct idxd_wq *wq);

/* submission */
int idxd_submit_desc(struct idxd_wq *wq, struct idxd_desc *desc);
struct idxd_desc *idxd_alloc_desc(struct idxd_wq *wq, enum idxd_op_type optype);
int idxd_enqcmds(struct idxd_wq *wq, void __iomem *portal, const void *desc);

/* dmaengine */
int idxd_register_dma_device(struct idxd_device *idxd);
void idxd_unregister_dma_device(struct idxd_device *idxd);

/* cdev */
int idxd_cdev_register(void);
void idxd_cdev_remove(void);
int idxd_cdev_get_major(struct idxd_device *idxd);
int idxd_wq_add_cdev(struct idxd_wq *wq);
void idxd_wq_del_cdev(struct idxd_wq *wq);
int idxd_copy_cr(struct idxd_wq *wq, ioasid_t pasid, unsigned long addr,
		 void *buf, int len);
void idxd_user_counter_increment(struct idxd_wq *wq, u32 pasid, int index);

/* perfmon */
#if IS_ENABLED(CONFIG_INTEL_IDXD_PERFMON)
int perfmon_pmu_init(struct idxd_device *idxd);
void perfmon_pmu_remove(struct idxd_device *idxd);
void perfmon_counter_overflow(struct idxd_device *idxd);
#else
static inline int perfmon_pmu_init(struct idxd_device *idxd) { return 0; }
static inline void perfmon_pmu_remove(struct idxd_device *idxd) {}
static inline void perfmon_counter_overflow(struct idxd_device *idxd) {}
#endif

/* debugfs */
int idxd_device_init_debugfs(struct idxd_device *idxd);
void idxd_device_remove_debugfs(struct idxd_device *idxd);
int idxd_init_debugfs(void);
void idxd_remove_debugfs(void);

#endif