linux/drivers/crypto/caam/caamalg_qi2.c

// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
/*
 * Copyright 2015-2016 Freescale Semiconductor Inc.
 * Copyright 2017-2019 NXP
 */

#include "compat.h"
#include "regs.h"
#include "caamalg_qi2.h"
#include "dpseci_cmd.h"
#include "desc_constr.h"
#include "error.h"
#include "sg_sw_sec4.h"
#include "sg_sw_qm2.h"
#include "key_gen.h"
#include "caamalg_desc.h"
#include "caamhash_desc.h"
#include "dpseci-debugfs.h"
#include <linux/dma-mapping.h>
#include <linux/fsl/mc.h>
#include <linux/kernel.h>
#include <soc/fsl/dpaa2-io.h>
#include <soc/fsl/dpaa2-fd.h>
#include <crypto/xts.h>
#include <asm/unaligned.h>

#define CAAM_CRA_PRIORITY

/* max key is sum of AES_MAX_KEY_SIZE, max split key size */
#define CAAM_MAX_KEY_SIZE

/*
 * This is a cache of buffers, from which the users of CAAM QI driver
 * can allocate short buffers. It's speedier than doing kmalloc on the hotpath.
 * NOTE: A more elegant solution would be to have some headroom in the frames
 *       being processed. This can be added by the dpaa2-eth driver. This would
 *       pose a problem for userspace application processing which cannot
 *       know of this limitation. So for now, this will work.
 * NOTE: The memcache is SMP-safe. No need to handle spinlocks in-here
 */
static struct kmem_cache *qi_cache;

struct caam_alg_entry {};

struct caam_aead_alg {};

struct caam_skcipher_alg {};

/**
 * struct caam_ctx - per-session context
 * @flc: Flow Contexts array
 * @key:  [authentication key], encryption key
 * @flc_dma: I/O virtual addresses of the Flow Contexts
 * @key_dma: I/O virtual address of the key
 * @dir: DMA direction for mapping key and Flow Contexts
 * @dev: dpseci device
 * @adata: authentication algorithm details
 * @cdata: encryption algorithm details
 * @authsize: authentication tag (a.k.a. ICV / MAC) size
 * @xts_key_fallback: true if fallback tfm needs to be used due
 *		      to unsupported xts key lengths
 * @fallback: xts fallback tfm
 */
struct caam_ctx {};

static void *dpaa2_caam_iova_to_virt(struct dpaa2_caam_priv *priv,
				     dma_addr_t iova_addr)
{}

/*
 * qi_cache_zalloc - Allocate buffers from CAAM-QI cache
 *
 * Allocate data on the hotpath. Instead of using kzalloc, one can use the
 * services of the CAAM QI memory cache (backed by kmem_cache). The buffers
 * will have a size of CAAM_QI_MEMCACHE_SIZE, which should be sufficient for
 * hosting 16 SG entries.
 *
 * @flags - flags that would be used for the equivalent kmalloc(..) call
 *
 * Returns a pointer to a retrieved buffer on success or NULL on failure.
 */
static inline void *qi_cache_zalloc(gfp_t flags)
{}

/*
 * qi_cache_free - Frees buffers allocated from CAAM-QI cache
 *
 * @obj - buffer previously allocated by qi_cache_zalloc
 *
 * No checking is being done, the call is a passthrough call to
 * kmem_cache_free(...)
 */
static inline void qi_cache_free(void *obj)
{}

static struct caam_request *to_caam_req(struct crypto_async_request *areq)
{}

static void caam_unmap(struct device *dev, struct scatterlist *src,
		       struct scatterlist *dst, int src_nents,
		       int dst_nents, dma_addr_t iv_dma, int ivsize,
		       enum dma_data_direction iv_dir, dma_addr_t qm_sg_dma,
		       int qm_sg_bytes)
{}

static int aead_set_sh_desc(struct crypto_aead *aead)
{}

static int aead_setauthsize(struct crypto_aead *authenc, unsigned int authsize)
{}

static int aead_setkey(struct crypto_aead *aead, const u8 *key,
		       unsigned int keylen)
{}

static int des3_aead_setkey(struct crypto_aead *aead, const u8 *key,
			    unsigned int keylen)
{}

static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
					   bool encrypt)
{}

static int chachapoly_set_sh_desc(struct crypto_aead *aead)
{}

static int chachapoly_setauthsize(struct crypto_aead *aead,
				  unsigned int authsize)
{}

static int chachapoly_setkey(struct crypto_aead *aead, const u8 *key,
			     unsigned int keylen)
{}

static int gcm_set_sh_desc(struct crypto_aead *aead)
{}

static int gcm_setauthsize(struct crypto_aead *authenc, unsigned int authsize)
{}

static int gcm_setkey(struct crypto_aead *aead,
		      const u8 *key, unsigned int keylen)
{}

static int rfc4106_set_sh_desc(struct crypto_aead *aead)
{}

static int rfc4106_setauthsize(struct crypto_aead *authenc,
			       unsigned int authsize)
{}

static int rfc4106_setkey(struct crypto_aead *aead,
			  const u8 *key, unsigned int keylen)
{}

static int rfc4543_set_sh_desc(struct crypto_aead *aead)
{}

static int rfc4543_setauthsize(struct crypto_aead *authenc,
			       unsigned int authsize)
{}

static int rfc4543_setkey(struct crypto_aead *aead,
			  const u8 *key, unsigned int keylen)
{}

static int skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
			   unsigned int keylen, const u32 ctx1_iv_off)
{}

static int aes_skcipher_setkey(struct crypto_skcipher *skcipher,
			       const u8 *key, unsigned int keylen)
{}

static int rfc3686_skcipher_setkey(struct crypto_skcipher *skcipher,
				   const u8 *key, unsigned int keylen)
{}

static int ctr_skcipher_setkey(struct crypto_skcipher *skcipher,
			       const u8 *key, unsigned int keylen)
{}

static int chacha20_skcipher_setkey(struct crypto_skcipher *skcipher,
				    const u8 *key, unsigned int keylen)
{}

static int des_skcipher_setkey(struct crypto_skcipher *skcipher,
			       const u8 *key, unsigned int keylen)
{}

static int des3_skcipher_setkey(struct crypto_skcipher *skcipher,
			        const u8 *key, unsigned int keylen)
{}

static int xts_skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
			       unsigned int keylen)
{}

static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req)
{}

static void aead_unmap(struct device *dev, struct aead_edesc *edesc,
		       struct aead_request *req)
{}

static void skcipher_unmap(struct device *dev, struct skcipher_edesc *edesc,
			   struct skcipher_request *req)
{}

static void aead_encrypt_done(void *cbk_ctx, u32 status)
{}

static void aead_decrypt_done(void *cbk_ctx, u32 status)
{}

static int aead_encrypt(struct aead_request *req)
{}

static int aead_decrypt(struct aead_request *req)
{}

static int ipsec_gcm_encrypt(struct aead_request *req)
{}

static int ipsec_gcm_decrypt(struct aead_request *req)
{}

static void skcipher_encrypt_done(void *cbk_ctx, u32 status)
{}

static void skcipher_decrypt_done(void *cbk_ctx, u32 status)
{}

static inline bool xts_skcipher_ivsize(struct skcipher_request *req)
{}

static int skcipher_encrypt(struct skcipher_request *req)
{}

static int skcipher_decrypt(struct skcipher_request *req)
{}

static int caam_cra_init(struct caam_ctx *ctx, struct caam_alg_entry *caam,
			 bool uses_dkp)
{}

static int caam_cra_init_skcipher(struct crypto_skcipher *tfm)
{}

static int caam_cra_init_aead(struct crypto_aead *tfm)
{}

static void caam_exit_common(struct caam_ctx *ctx)
{}

static void caam_cra_exit(struct crypto_skcipher *tfm)
{}

static void caam_cra_exit_aead(struct crypto_aead *tfm)
{}

static struct caam_skcipher_alg driver_algs[] =;

static struct caam_aead_alg driver_aeads[] =;

static void caam_skcipher_alg_init(struct caam_skcipher_alg *t_alg)
{}

static void caam_aead_alg_init(struct caam_aead_alg *t_alg)
{}

/* max hash key is max split key size */
#define CAAM_MAX_HASH_KEY_SIZE

#define CAAM_MAX_HASH_BLOCK_SIZE

/* caam context sizes for hashes: running digest + 8 */
#define HASH_MSG_LEN
#define MAX_CTX_LEN

enum hash_optype {};

/**
 * struct caam_hash_ctx - ahash per-session context
 * @flc: Flow Contexts array
 * @key: authentication key
 * @flc_dma: I/O virtual addresses of the Flow Contexts
 * @dev: dpseci device
 * @ctx_len: size of Context Register
 * @adata: hashing algorithm details
 */
struct caam_hash_ctx {};

/* ahash state */
struct caam_hash_state {};

struct caam_export_state {};

/* Map current buffer in state (if length > 0) and put it in link table */
static inline int buf_map_to_qm_sg(struct device *dev,
				   struct dpaa2_sg_entry *qm_sg,
				   struct caam_hash_state *state)
{}

/* Map state->caam_ctx, and add it to link table */
static inline int ctx_map_to_qm_sg(struct device *dev,
				   struct caam_hash_state *state, int ctx_len,
				   struct dpaa2_sg_entry *qm_sg, u32 flag)
{}

static int ahash_set_sh_desc(struct crypto_ahash *ahash)
{}

struct split_key_sh_result {};

static void split_key_sh_done(void *cbk_ctx, u32 err)
{}

/* Digest hash size if it is too large */
static int hash_digest_key(struct caam_hash_ctx *ctx, u32 *keylen, u8 *key,
			   u32 digestsize)
{}

static int ahash_setkey(struct crypto_ahash *ahash, const u8 *key,
			unsigned int keylen)
{}

static inline void ahash_unmap(struct device *dev, struct ahash_edesc *edesc,
			       struct ahash_request *req)
{}

static inline void ahash_unmap_ctx(struct device *dev,
				   struct ahash_edesc *edesc,
				   struct ahash_request *req, u32 flag)
{}

static void ahash_done(void *cbk_ctx, u32 status)
{}

static void ahash_done_bi(void *cbk_ctx, u32 status)
{}

static void ahash_done_ctx_src(void *cbk_ctx, u32 status)
{}

static void ahash_done_ctx_dst(void *cbk_ctx, u32 status)
{}

static int ahash_update_ctx(struct ahash_request *req)
{}

static int ahash_final_ctx(struct ahash_request *req)
{}

static int ahash_finup_ctx(struct ahash_request *req)
{}

static int ahash_digest(struct ahash_request *req)
{}

static int ahash_final_no_ctx(struct ahash_request *req)
{}

static int ahash_update_no_ctx(struct ahash_request *req)
{}

static int ahash_finup_no_ctx(struct ahash_request *req)
{}

static int ahash_update_first(struct ahash_request *req)
{}

static int ahash_finup_first(struct ahash_request *req)
{}

static int ahash_init(struct ahash_request *req)
{}

static int ahash_update(struct ahash_request *req)
{}

static int ahash_finup(struct ahash_request *req)
{}

static int ahash_final(struct ahash_request *req)
{}

static int ahash_export(struct ahash_request *req, void *out)
{}

static int ahash_import(struct ahash_request *req, const void *in)
{}

struct caam_hash_template {};

/* ahash descriptors */
static struct caam_hash_template driver_hash[] =;

struct caam_hash_alg {};

static int caam_hash_cra_init(struct crypto_tfm *tfm)
{}

static void caam_hash_cra_exit(struct crypto_tfm *tfm)
{}

static struct caam_hash_alg *caam_hash_alloc(struct device *dev,
	struct caam_hash_template *template, bool keyed)
{}

static void dpaa2_caam_fqdan_cb(struct dpaa2_io_notification_ctx *nctx)
{}

static int __cold dpaa2_dpseci_dpio_setup(struct dpaa2_caam_priv *priv)
{}

static void __cold dpaa2_dpseci_dpio_free(struct dpaa2_caam_priv *priv)
{}

static int dpaa2_dpseci_bind(struct dpaa2_caam_priv *priv)
{}

static void dpaa2_dpseci_congestion_free(struct dpaa2_caam_priv *priv)
{}

static void dpaa2_dpseci_free(struct dpaa2_caam_priv *priv)
{}

static void dpaa2_caam_process_fd(struct dpaa2_caam_priv *priv,
				  const struct dpaa2_fd *fd)
{}

static int dpaa2_caam_pull_fq(struct dpaa2_caam_priv_per_cpu *ppriv)
{}

static int dpaa2_caam_store_consume(struct dpaa2_caam_priv_per_cpu *ppriv)
{}

static int dpaa2_dpseci_poll(struct napi_struct *napi, int budget)
{}

static int dpaa2_dpseci_congestion_setup(struct dpaa2_caam_priv *priv,
					 u16 token)
{}

static void free_dpaa2_pcpu_netdev(struct dpaa2_caam_priv *priv, const cpumask_t *cpus)
{}

static int __cold dpaa2_dpseci_setup(struct fsl_mc_device *ls_dev)
{}

static int dpaa2_dpseci_enable(struct dpaa2_caam_priv *priv)
{}

static int __cold dpaa2_dpseci_disable(struct dpaa2_caam_priv *priv)
{}

static struct list_head hash_list;

static int dpaa2_caam_probe(struct fsl_mc_device *dpseci_dev)
{}

static void __cold dpaa2_caam_remove(struct fsl_mc_device *ls_dev)
{}

int dpaa2_caam_enqueue(struct device *dev, struct caam_request *req)
{}
EXPORT_SYMBOL();

static const struct fsl_mc_device_id dpaa2_caam_match_id_table[] =;
MODULE_DEVICE_TABLE(fslmc, dpaa2_caam_match_id_table);

static struct fsl_mc_driver dpaa2_caam_driver =;

MODULE_LICENSE();
MODULE_AUTHOR();
MODULE_DESCRIPTION();

module_fsl_mc_driver();