linux/drivers/crypto/intel/iaa/iaa_crypto_main.c

// SPDX-License-Identifier: GPL-2.0
/* Copyright(c) 2021 Intel Corporation. All rights rsvd. */

#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/device.h>
#include <linux/iommu.h>
#include <uapi/linux/idxd.h>
#include <linux/highmem.h>
#include <linux/sched/smt.h>
#include <crypto/internal/acompress.h>

#include "idxd.h"
#include "iaa_crypto.h"
#include "iaa_crypto_stats.h"

#ifdef pr_fmt
#undef pr_fmt
#endif

#define pr_fmt(fmt)

#define IAA_ALG_PRIORITY

/* number of iaa instances probed */
static unsigned int nr_iaa;
static unsigned int nr_cpus;
static unsigned int nr_nodes;
static unsigned int nr_cpus_per_node;

/* Number of physical cpus sharing each iaa instance */
static unsigned int cpus_per_iaa;

static struct crypto_comp *deflate_generic_tfm;

/* Per-cpu lookup table for balanced wqs */
static struct wq_table_entry __percpu *wq_table;

static struct idxd_wq *wq_table_next_wq(int cpu)
{}

static void wq_table_add(int cpu, struct idxd_wq *wq)
{}

static void wq_table_free_entry(int cpu)
{}

static void wq_table_clear_entry(int cpu)
{}

LIST_HEAD();
DEFINE_MUTEX();

/* If enabled, IAA hw crypto algos are registered, unavailable otherwise */
static bool iaa_crypto_enabled;
static bool iaa_crypto_registered;

/* Verify results of IAA compress or not */
static bool iaa_verify_compress =;

static ssize_t verify_compress_show(struct device_driver *driver, char *buf)
{}

static ssize_t verify_compress_store(struct device_driver *driver,
				     const char *buf, size_t count)
{}
static DRIVER_ATTR_RW(verify_compress);

/*
 * The iaa crypto driver supports three 'sync' methods determining how
 * compressions and decompressions are performed:
 *
 * - sync:      the compression or decompression completes before
 *              returning.  This is the mode used by the async crypto
 *              interface when the sync mode is set to 'sync' and by
 *              the sync crypto interface regardless of setting.
 *
 * - async:     the compression or decompression is submitted and returns
 *              immediately.  Completion interrupts are not used so
 *              the caller is responsible for polling the descriptor
 *              for completion.  This mode is applicable to only the
 *              async crypto interface and is ignored for anything
 *              else.
 *
 * - async_irq: the compression or decompression is submitted and
 *              returns immediately.  Completion interrupts are
 *              enabled so the caller can wait for the completion and
 *              yield to other threads.  When the compression or
 *              decompression completes, the completion is signaled
 *              and the caller awakened.  This mode is applicable to
 *              only the async crypto interface and is ignored for
 *              anything else.
 *
 * These modes can be set using the iaa_crypto sync_mode driver
 * attribute.
 */

/* Use async mode */
static bool async_mode;
/* Use interrupts */
static bool use_irq;

/**
 * set_iaa_sync_mode - Set IAA sync mode
 * @name: The name of the sync mode
 *
 * Make the IAA sync mode named @name the current sync mode used by
 * compression/decompression.
 */

static int set_iaa_sync_mode(const char *name)
{}

static ssize_t sync_mode_show(struct device_driver *driver, char *buf)
{}

static ssize_t sync_mode_store(struct device_driver *driver,
			       const char *buf, size_t count)
{}
static DRIVER_ATTR_RW(sync_mode);

static struct iaa_compression_mode *iaa_compression_modes[IAA_COMP_MODES_MAX];

static int find_empty_iaa_compression_mode(void)
{}

static struct iaa_compression_mode *find_iaa_compression_mode(const char *name, int *idx)
{}

static void free_iaa_compression_mode(struct iaa_compression_mode *mode)
{}

/*
 * IAA Compression modes are defined by an ll_table and a d_table.
 * These tables are typically generated and captured using statistics
 * collected from running actual compress/decompress workloads.
 *
 * A module or other kernel code can add and remove compression modes
 * with a given name using the exported @add_iaa_compression_mode()
 * and @remove_iaa_compression_mode functions.
 *
 * When a new compression mode is added, the tables are saved in a
 * global compression mode list.  When IAA devices are added, a
 * per-IAA device dma mapping is created for each IAA device, for each
 * compression mode.  These are the tables used to do the actual
 * compression/deccompression and are unmapped if/when the devices are
 * removed.  Currently, compression modes must be added before any
 * device is added, and removed after all devices have been removed.
 */

/**
 * remove_iaa_compression_mode - Remove an IAA compression mode
 * @name: The name the compression mode will be known as
 *
 * Remove the IAA compression mode named @name.
 */
void remove_iaa_compression_mode(const char *name)
{}
EXPORT_SYMBOL_GPL();

/**
 * add_iaa_compression_mode - Add an IAA compression mode
 * @name: The name the compression mode will be known as
 * @ll_table: The ll table
 * @ll_table_size: The ll table size in bytes
 * @d_table: The d table
 * @d_table_size: The d table size in bytes
 * @init: Optional callback function to init the compression mode data
 * @free: Optional callback function to free the compression mode data
 *
 * Add a new IAA compression mode named @name.
 *
 * Returns 0 if successful, errcode otherwise.
 */
int add_iaa_compression_mode(const char *name,
			     const u32 *ll_table,
			     int ll_table_size,
			     const u32 *d_table,
			     int d_table_size,
			     iaa_dev_comp_init_fn_t init,
			     iaa_dev_comp_free_fn_t free)
{}
EXPORT_SYMBOL_GPL();

static struct iaa_device_compression_mode *
get_iaa_device_compression_mode(struct iaa_device *iaa_device, int idx)
{}

static void free_device_compression_mode(struct iaa_device *iaa_device,
					 struct iaa_device_compression_mode *device_mode)
{}

#define IDXD_OP_FLAG_AECS_RW_TGLS
#define IAX_AECS_DEFAULT_FLAG
#define IAX_AECS_COMPRESS_FLAG
#define IAX_AECS_DECOMPRESS_FLAG
#define IAX_AECS_GEN_FLAG

static int check_completion(struct device *dev,
			    struct iax_completion_record *comp,
			    bool compress,
			    bool only_once);

static int init_device_compression_mode(struct iaa_device *iaa_device,
					struct iaa_compression_mode *mode,
					int idx, struct idxd_wq *wq)
{}

static int init_device_compression_modes(struct iaa_device *iaa_device,
					 struct idxd_wq *wq)
{}

static void remove_device_compression_modes(struct iaa_device *iaa_device)
{}

static struct iaa_device *iaa_device_alloc(void)
{}

static bool iaa_has_wq(struct iaa_device *iaa_device, struct idxd_wq *wq)
{}

static struct iaa_device *add_iaa_device(struct idxd_device *idxd)
{}

static int init_iaa_device(struct iaa_device *iaa_device, struct iaa_wq *iaa_wq)
{}

static void del_iaa_device(struct iaa_device *iaa_device)
{}

static int add_iaa_wq(struct iaa_device *iaa_device, struct idxd_wq *wq,
		      struct iaa_wq **new_wq)
{}

static void del_iaa_wq(struct iaa_device *iaa_device, struct idxd_wq *wq)
{}

static void clear_wq_table(void)
{}

static void free_iaa_device(struct iaa_device *iaa_device)
{}

static void __free_iaa_wq(struct iaa_wq *iaa_wq)
{}

static void free_iaa_wq(struct iaa_wq *iaa_wq)
{}

static int iaa_wq_get(struct idxd_wq *wq)
{}

static int iaa_wq_put(struct idxd_wq *wq)
{}

static void free_wq_table(void)
{}

static int alloc_wq_table(int max_wqs)
{}

static int save_iaa_wq(struct idxd_wq *wq)
{}

static void remove_iaa_wq(struct idxd_wq *wq)
{}

static int wq_table_add_wqs(int iaa, int cpu)
{}

/*
 * Rebalance the wq table so that given a cpu, it's easy to find the
 * closest IAA instance.  The idea is to try to choose the most
 * appropriate IAA instance for a caller and spread available
 * workqueues around to clients.
 */
static void rebalance_wq_table(void)
{}

static inline int check_completion(struct device *dev,
				   struct iax_completion_record *comp,
				   bool compress,
				   bool only_once)
{}

static int deflate_generic_decompress(struct acomp_req *req)
{}

static int iaa_remap_for_verify(struct device *dev, struct iaa_wq *iaa_wq,
				struct acomp_req *req,
				dma_addr_t *src_addr, dma_addr_t *dst_addr);

static int iaa_compress_verify(struct crypto_tfm *tfm, struct acomp_req *req,
			       struct idxd_wq *wq,
			       dma_addr_t src_addr, unsigned int slen,
			       dma_addr_t dst_addr, unsigned int *dlen,
			       u32 compression_crc);

static void iaa_desc_complete(struct idxd_desc *idxd_desc,
			      enum idxd_complete_type comp_type,
			      bool free_desc, void *__ctx,
			      u32 *status)
{}

static int iaa_compress(struct crypto_tfm *tfm,	struct acomp_req *req,
			struct idxd_wq *wq,
			dma_addr_t src_addr, unsigned int slen,
			dma_addr_t dst_addr, unsigned int *dlen,
			u32 *compression_crc,
			bool disable_async)
{}

static int iaa_remap_for_verify(struct device *dev, struct iaa_wq *iaa_wq,
				struct acomp_req *req,
				dma_addr_t *src_addr, dma_addr_t *dst_addr)
{}

static int iaa_compress_verify(struct crypto_tfm *tfm, struct acomp_req *req,
			       struct idxd_wq *wq,
			       dma_addr_t src_addr, unsigned int slen,
			       dma_addr_t dst_addr, unsigned int *dlen,
			       u32 compression_crc)
{}

static int iaa_decompress(struct crypto_tfm *tfm, struct acomp_req *req,
			  struct idxd_wq *wq,
			  dma_addr_t src_addr, unsigned int slen,
			  dma_addr_t dst_addr, unsigned int *dlen,
			  bool disable_async)
{}

static int iaa_comp_acompress(struct acomp_req *req)
{}

static int iaa_comp_adecompress_alloc_dest(struct acomp_req *req)
{}

static int iaa_comp_adecompress(struct acomp_req *req)
{}

static void compression_ctx_init(struct iaa_compression_ctx *ctx)
{}

static int iaa_comp_init_fixed(struct crypto_acomp *acomp_tfm)
{}

static void dst_free(struct scatterlist *sgl)
{}

static struct acomp_alg iaa_acomp_fixed_deflate =;

static int iaa_register_compression_device(void)
{}

static int iaa_unregister_compression_device(void)
{}

static int iaa_crypto_probe(struct idxd_dev *idxd_dev)
{}

static void iaa_crypto_remove(struct idxd_dev *idxd_dev)
{}

static enum idxd_dev_type dev_types[] =;

static struct idxd_device_driver iaa_crypto_driver =;

static int __init iaa_crypto_init_module(void)
{}

static void __exit iaa_crypto_cleanup_module(void)
{}

MODULE_IMPORT_NS();
MODULE_LICENSE();
MODULE_ALIAS_IDXD_DEVICE();
MODULE_AUTHOR();
MODULE_DESCRIPTION();

module_init();
module_exit(iaa_crypto_cleanup_module);