linux/drivers/crypto/s5p-sss.c

// SPDX-License-Identifier: GPL-2.0
//
// Cryptographic API.
//
// Support for Samsung S5PV210 and Exynos HW acceleration.
//
// Copyright (C) 2011 NetUP Inc. All rights reserved.
// Copyright (c) 2017 Samsung Electronics Co., Ltd. All rights reserved.
//
// Hash part based on omap-sham.c driver.

#include <linux/clk.h>
#include <linux/crypto.h>
#include <linux/dma-mapping.h>
#include <linux/err.h>
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/scatterlist.h>

#include <crypto/ctr.h>
#include <crypto/aes.h>
#include <crypto/algapi.h>
#include <crypto/scatterwalk.h>

#include <crypto/hash.h>
#include <crypto/md5.h>
#include <crypto/sha1.h>
#include <crypto/sha2.h>
#include <crypto/internal/hash.h>

#define _SBF(s, v)

/* Feed control registers */
#define SSS_REG_FCINTSTAT
#define SSS_FCINTSTAT_HPARTINT
#define SSS_FCINTSTAT_HDONEINT
#define SSS_FCINTSTAT_BRDMAINT
#define SSS_FCINTSTAT_BTDMAINT
#define SSS_FCINTSTAT_HRDMAINT
#define SSS_FCINTSTAT_PKDMAINT

#define SSS_REG_FCINTENSET
#define SSS_FCINTENSET_HPARTINTENSET
#define SSS_FCINTENSET_HDONEINTENSET
#define SSS_FCINTENSET_BRDMAINTENSET
#define SSS_FCINTENSET_BTDMAINTENSET
#define SSS_FCINTENSET_HRDMAINTENSET
#define SSS_FCINTENSET_PKDMAINTENSET

#define SSS_REG_FCINTENCLR
#define SSS_FCINTENCLR_HPARTINTENCLR
#define SSS_FCINTENCLR_HDONEINTENCLR
#define SSS_FCINTENCLR_BRDMAINTENCLR
#define SSS_FCINTENCLR_BTDMAINTENCLR
#define SSS_FCINTENCLR_HRDMAINTENCLR
#define SSS_FCINTENCLR_PKDMAINTENCLR

#define SSS_REG_FCINTPEND
#define SSS_FCINTPEND_HPARTINTP
#define SSS_FCINTPEND_HDONEINTP
#define SSS_FCINTPEND_BRDMAINTP
#define SSS_FCINTPEND_BTDMAINTP
#define SSS_FCINTPEND_HRDMAINTP
#define SSS_FCINTPEND_PKDMAINTP

#define SSS_REG_FCFIFOSTAT
#define SSS_FCFIFOSTAT_BRFIFOFUL
#define SSS_FCFIFOSTAT_BRFIFOEMP
#define SSS_FCFIFOSTAT_BTFIFOFUL
#define SSS_FCFIFOSTAT_BTFIFOEMP
#define SSS_FCFIFOSTAT_HRFIFOFUL
#define SSS_FCFIFOSTAT_HRFIFOEMP
#define SSS_FCFIFOSTAT_PKFIFOFUL
#define SSS_FCFIFOSTAT_PKFIFOEMP

#define SSS_REG_FCFIFOCTRL
#define SSS_FCFIFOCTRL_DESSEL
#define SSS_HASHIN_INDEPENDENT
#define SSS_HASHIN_CIPHER_INPUT
#define SSS_HASHIN_CIPHER_OUTPUT
#define SSS_HASHIN_MASK

#define SSS_REG_FCBRDMAS
#define SSS_REG_FCBRDMAL
#define SSS_REG_FCBRDMAC
#define SSS_FCBRDMAC_BYTESWAP
#define SSS_FCBRDMAC_FLUSH

#define SSS_REG_FCBTDMAS
#define SSS_REG_FCBTDMAL
#define SSS_REG_FCBTDMAC
#define SSS_FCBTDMAC_BYTESWAP
#define SSS_FCBTDMAC_FLUSH

#define SSS_REG_FCHRDMAS
#define SSS_REG_FCHRDMAL
#define SSS_REG_FCHRDMAC
#define SSS_FCHRDMAC_BYTESWAP
#define SSS_FCHRDMAC_FLUSH

#define SSS_REG_FCPKDMAS
#define SSS_REG_FCPKDMAL
#define SSS_REG_FCPKDMAC
#define SSS_FCPKDMAC_BYTESWAP
#define SSS_FCPKDMAC_DESCEND
#define SSS_FCPKDMAC_TRANSMIT
#define SSS_FCPKDMAC_FLUSH

#define SSS_REG_FCPKDMAO

/* AES registers */
#define SSS_REG_AES_CONTROL
#define SSS_AES_BYTESWAP_DI
#define SSS_AES_BYTESWAP_DO
#define SSS_AES_BYTESWAP_IV
#define SSS_AES_BYTESWAP_CNT
#define SSS_AES_BYTESWAP_KEY
#define SSS_AES_KEY_CHANGE_MODE
#define SSS_AES_KEY_SIZE_128
#define SSS_AES_KEY_SIZE_192
#define SSS_AES_KEY_SIZE_256
#define SSS_AES_FIFO_MODE
#define SSS_AES_CHAIN_MODE_ECB
#define SSS_AES_CHAIN_MODE_CBC
#define SSS_AES_CHAIN_MODE_CTR
#define SSS_AES_MODE_DECRYPT

#define SSS_REG_AES_STATUS
#define SSS_AES_BUSY
#define SSS_AES_INPUT_READY
#define SSS_AES_OUTPUT_READY

#define SSS_REG_AES_IN_DATA(s)
#define SSS_REG_AES_OUT_DATA(s)
#define SSS_REG_AES_IV_DATA(s)
#define SSS_REG_AES_CNT_DATA(s)
#define SSS_REG_AES_KEY_DATA(s)

#define SSS_REG(dev, reg)
#define SSS_READ(dev, reg)
#define SSS_WRITE(dev, reg, val)

#define SSS_AES_REG(dev, reg)
#define SSS_AES_WRITE(dev, reg, val)

/* HW engine modes */
#define FLAGS_AES_DECRYPT
#define FLAGS_AES_MODE_MASK
#define FLAGS_AES_CBC
#define FLAGS_AES_CTR

#define AES_KEY_LEN
#define CRYPTO_QUEUE_LEN

/* HASH registers */
#define SSS_REG_HASH_CTRL

#define SSS_HASH_USER_IV_EN
#define SSS_HASH_INIT_BIT
#define SSS_HASH_ENGINE_SHA1
#define SSS_HASH_ENGINE_MD5
#define SSS_HASH_ENGINE_SHA256

#define SSS_HASH_ENGINE_MASK

#define SSS_REG_HASH_CTRL_PAUSE

#define SSS_HASH_PAUSE

#define SSS_REG_HASH_CTRL_FIFO

#define SSS_HASH_FIFO_MODE_DMA
#define SSS_HASH_FIFO_MODE_CPU

#define SSS_REG_HASH_CTRL_SWAP

#define SSS_HASH_BYTESWAP_DI
#define SSS_HASH_BYTESWAP_DO
#define SSS_HASH_BYTESWAP_IV
#define SSS_HASH_BYTESWAP_KEY

#define SSS_REG_HASH_STATUS

#define SSS_HASH_STATUS_MSG_DONE
#define SSS_HASH_STATUS_PARTIAL_DONE
#define SSS_HASH_STATUS_BUFFER_READY

#define SSS_REG_HASH_MSG_SIZE_LOW
#define SSS_REG_HASH_MSG_SIZE_HIGH

#define SSS_REG_HASH_PRE_MSG_SIZE_LOW
#define SSS_REG_HASH_PRE_MSG_SIZE_HIGH

#define SSS_REG_HASH_IV(s)
#define SSS_REG_HASH_OUT(s)

#define HASH_BLOCK_SIZE
#define HASH_REG_SIZEOF
#define HASH_MD5_MAX_REG
#define HASH_SHA1_MAX_REG
#define HASH_SHA256_MAX_REG

/*
 * HASH bit numbers, used by device, setting in dev->hash_flags with
 * functions set_bit(), clear_bit() or tested with test_bit() or BIT(),
 * to keep HASH state BUSY or FREE, or to signal state from irq_handler
 * to hash_tasklet. SGS keep track of allocated memory for scatterlist
 */
#define HASH_FLAGS_BUSY
#define HASH_FLAGS_FINAL
#define HASH_FLAGS_DMA_ACTIVE
#define HASH_FLAGS_OUTPUT_READY
#define HASH_FLAGS_DMA_READY
#define HASH_FLAGS_SGS_COPIED
#define HASH_FLAGS_SGS_ALLOCED

/* HASH HW constants */
#define BUFLEN

#define SSS_HASH_QUEUE_LENGTH

/**
 * struct samsung_aes_variant - platform specific SSS driver data
 * @aes_offset: AES register offset from SSS module's base.
 * @hash_offset: HASH register offset from SSS module's base.
 * @clk_names: names of clocks needed to run SSS IP
 *
 * Specifies platform specific configuration of SSS module.
 * Note: A structure for driver specific platform data is used for future
 * expansion of its usage.
 */
struct samsung_aes_variant {};

struct s5p_aes_reqctx {};

struct s5p_aes_ctx {};

/**
 * struct s5p_aes_dev - Crypto device state container
 * @dev:	Associated device
 * @clk:	Clock for accessing hardware
 * @pclk:	APB bus clock necessary to access the hardware
 * @ioaddr:	Mapped IO memory region
 * @aes_ioaddr:	Per-varian offset for AES block IO memory
 * @irq_fc:	Feed control interrupt line
 * @req:	Crypto request currently handled by the device
 * @ctx:	Configuration for currently handled crypto request
 * @sg_src:	Scatter list with source data for currently handled block
 *		in device.  This is DMA-mapped into device.
 * @sg_dst:	Scatter list with destination data for currently handled block
 *		in device. This is DMA-mapped into device.
 * @sg_src_cpy:	In case of unaligned access, copied scatter list
 *		with source data.
 * @sg_dst_cpy:	In case of unaligned access, copied scatter list
 *		with destination data.
 * @tasklet:	New request scheduling jib
 * @queue:	Crypto queue
 * @busy:	Indicates whether the device is currently handling some request
 *		thus it uses some of the fields from this state, like:
 *		req, ctx, sg_src/dst (and copies).  This essentially
 *		protects against concurrent access to these fields.
 * @lock:	Lock for protecting both access to device hardware registers
 *		and fields related to current request (including the busy field).
 * @res:	Resources for hash.
 * @io_hash_base: Per-variant offset for HASH block IO memory.
 * @hash_lock:	Lock for protecting hash_req, hash_queue and hash_flags
 *		variable.
 * @hash_flags:	Flags for current HASH op.
 * @hash_queue:	Async hash queue.
 * @hash_tasklet: New HASH request scheduling job.
 * @xmit_buf:	Buffer for current HASH request transfer into SSS block.
 * @hash_req:	Current request sending to SSS HASH block.
 * @hash_sg_iter: Scatterlist transferred through DMA into SSS HASH block.
 * @hash_sg_cnt: Counter for hash_sg_iter.
 *
 * @use_hash:	true if HASH algs enabled
 */
struct s5p_aes_dev {};

/**
 * struct s5p_hash_reqctx - HASH request context
 * @dd:		Associated device
 * @op_update:	Current request operation (OP_UPDATE or OP_FINAL)
 * @digcnt:	Number of bytes processed by HW (without buffer[] ones)
 * @digest:	Digest message or IV for partial result
 * @nregs:	Number of HW registers for digest or IV read/write
 * @engine:	Bits for selecting type of HASH in SSS block
 * @sg:		sg for DMA transfer
 * @sg_len:	Length of sg for DMA transfer
 * @sgl:	sg for joining buffer and req->src scatterlist
 * @skip:	Skip offset in req->src for current op
 * @total:	Total number of bytes for current request
 * @finup:	Keep state for finup or final.
 * @error:	Keep track of error.
 * @bufcnt:	Number of bytes holded in buffer[]
 * @buffer:	For byte(s) from end of req->src in UPDATE op
 */
struct s5p_hash_reqctx {};

/**
 * struct s5p_hash_ctx - HASH transformation context
 * @dd:		Associated device
 * @flags:	Bits for algorithm HASH.
 * @fallback:	Software transformation for zero message or size < BUFLEN.
 */
struct s5p_hash_ctx {};

static const struct samsung_aes_variant s5p_aes_data =;

static const struct samsung_aes_variant exynos_aes_data =;

static const struct samsung_aes_variant exynos5433_slim_aes_data =;

static const struct of_device_id s5p_sss_dt_match[] =;
MODULE_DEVICE_TABLE(of, s5p_sss_dt_match);

static inline const struct samsung_aes_variant *find_s5p_sss_version
				   (const struct platform_device *pdev)
{}

static struct s5p_aes_dev *s5p_dev;

static void s5p_set_dma_indata(struct s5p_aes_dev *dev,
			       const struct scatterlist *sg)
{}

static void s5p_set_dma_outdata(struct s5p_aes_dev *dev,
				const struct scatterlist *sg)
{}

static void s5p_free_sg_cpy(struct s5p_aes_dev *dev, struct scatterlist **sg)
{}

static void s5p_sg_copy_buf(void *buf, struct scatterlist *sg,
			    unsigned int nbytes, int out)
{}

static void s5p_sg_done(struct s5p_aes_dev *dev)
{}

/* Calls the completion. Cannot be called with dev->lock hold. */
static void s5p_aes_complete(struct skcipher_request *req, int err)
{}

static void s5p_unset_outdata(struct s5p_aes_dev *dev)
{}

static void s5p_unset_indata(struct s5p_aes_dev *dev)
{}

static int s5p_make_sg_cpy(struct s5p_aes_dev *dev, struct scatterlist *src,
			   struct scatterlist **dst)
{}

static int s5p_set_outdata(struct s5p_aes_dev *dev, struct scatterlist *sg)
{}

static int s5p_set_indata(struct s5p_aes_dev *dev, struct scatterlist *sg)
{}

/*
 * Returns -ERRNO on error (mapping of new data failed).
 * On success returns:
 *  - 0 if there is no more data,
 *  - 1 if new transmitting (output) data is ready and its address+length
 *     have to be written to device (by calling s5p_set_dma_outdata()).
 */
static int s5p_aes_tx(struct s5p_aes_dev *dev)
{}

/*
 * Returns -ERRNO on error (mapping of new data failed).
 * On success returns:
 *  - 0 if there is no more data,
 *  - 1 if new receiving (input) data is ready and its address+length
 *     have to be written to device (by calling s5p_set_dma_indata()).
 */
static int s5p_aes_rx(struct s5p_aes_dev *dev/*, bool *set_dma*/)
{}

static inline u32 s5p_hash_read(struct s5p_aes_dev *dd, u32 offset)
{}

static inline void s5p_hash_write(struct s5p_aes_dev *dd,
				  u32 offset, u32 value)
{}

/**
 * s5p_set_dma_hashdata() - start DMA with sg
 * @dev:	device
 * @sg:		scatterlist ready to DMA transmit
 */
static void s5p_set_dma_hashdata(struct s5p_aes_dev *dev,
				 const struct scatterlist *sg)
{}

/**
 * s5p_hash_rx() - get next hash_sg_iter
 * @dev:	device
 *
 * Return:
 * 2	if there is no more data and it is UPDATE op
 * 1	if new receiving (input) data is ready and can be written to device
 * 0	if there is no more data and it is FINAL op
 */
static int s5p_hash_rx(struct s5p_aes_dev *dev)
{}

static irqreturn_t s5p_aes_interrupt(int irq, void *dev_id)
{}

/**
 * s5p_hash_read_msg() - read message or IV from HW
 * @req:	AHASH request
 */
static void s5p_hash_read_msg(struct ahash_request *req)
{}

/**
 * s5p_hash_write_ctx_iv() - write IV for next partial/finup op.
 * @dd:		device
 * @ctx:	request context
 */
static void s5p_hash_write_ctx_iv(struct s5p_aes_dev *dd,
				  const struct s5p_hash_reqctx *ctx)
{}

/**
 * s5p_hash_write_iv() - write IV for next partial/finup op.
 * @req:	AHASH request
 */
static void s5p_hash_write_iv(struct ahash_request *req)
{}

/**
 * s5p_hash_copy_result() - copy digest into req->result
 * @req:	AHASH request
 */
static void s5p_hash_copy_result(struct ahash_request *req)
{}

/**
 * s5p_hash_dma_flush() - flush HASH DMA
 * @dev:	secss device
 */
static void s5p_hash_dma_flush(struct s5p_aes_dev *dev)
{}

/**
 * s5p_hash_dma_enable() - enable DMA mode for HASH
 * @dev:	secss device
 *
 * enable DMA mode for HASH
 */
static void s5p_hash_dma_enable(struct s5p_aes_dev *dev)
{}

/**
 * s5p_hash_irq_disable() - disable irq HASH signals
 * @dev:	secss device
 * @flags:	bitfield with irq's to be disabled
 */
static void s5p_hash_irq_disable(struct s5p_aes_dev *dev, u32 flags)
{}

/**
 * s5p_hash_irq_enable() - enable irq signals
 * @dev:	secss device
 * @flags:	bitfield with irq's to be enabled
 */
static void s5p_hash_irq_enable(struct s5p_aes_dev *dev, int flags)
{}

/**
 * s5p_hash_set_flow() - set flow inside SecSS AES/DES with/without HASH
 * @dev:	secss device
 * @hashflow:	HASH stream flow with/without crypto AES/DES
 */
static void s5p_hash_set_flow(struct s5p_aes_dev *dev, u32 hashflow)
{}

/**
 * s5p_ahash_dma_init() - enable DMA and set HASH flow inside SecSS
 * @dev:	secss device
 * @hashflow:	HASH stream flow with/without AES/DES
 *
 * flush HASH DMA and enable DMA, set HASH stream flow inside SecSS HW,
 * enable HASH irq's HRDMA, HDONE, HPART
 */
static void s5p_ahash_dma_init(struct s5p_aes_dev *dev, u32 hashflow)
{}

/**
 * s5p_hash_write_ctrl() - prepare HASH block in SecSS for processing
 * @dd:		secss device
 * @length:	length for request
 * @final:	true if final op
 *
 * Prepare SSS HASH block for processing bytes in DMA mode. If it is called
 * after previous updates, fill up IV words. For final, calculate and set
 * lengths for HASH so SecSS can finalize hash. For partial, set SSS HASH
 * length as 2^63 so it will be never reached and set to zero prelow and
 * prehigh.
 *
 * This function does not start DMA transfer.
 */
static void s5p_hash_write_ctrl(struct s5p_aes_dev *dd, size_t length,
				bool final)
{}

/**
 * s5p_hash_xmit_dma() - start DMA hash processing
 * @dd:		secss device
 * @length:	length for request
 * @final:	true if final op
 *
 * Update digcnt here, as it is needed for finup/final op.
 */
static int s5p_hash_xmit_dma(struct s5p_aes_dev *dd, size_t length,
			     bool final)
{}

/**
 * s5p_hash_copy_sgs() - copy request's bytes into new buffer
 * @ctx:	request context
 * @sg:		source scatterlist request
 * @new_len:	number of bytes to process from sg
 *
 * Allocate new buffer, copy data for HASH into it. If there was xmit_buf
 * filled, copy it first, then copy data from sg into it. Prepare one sgl[0]
 * with allocated buffer.
 *
 * Set bit in dd->hash_flag so we can free it after irq ends processing.
 */
static int s5p_hash_copy_sgs(struct s5p_hash_reqctx *ctx,
			     struct scatterlist *sg, unsigned int new_len)
{}

/**
 * s5p_hash_copy_sg_lists() - copy sg list and make fixes in copy
 * @ctx:	request context
 * @sg:		source scatterlist request
 * @new_len:	number of bytes to process from sg
 *
 * Allocate new scatterlist table, copy data for HASH into it. If there was
 * xmit_buf filled, prepare it first, then copy page, length and offset from
 * source sg into it, adjusting begin and/or end for skip offset and
 * hash_later value.
 *
 * Resulting sg table will be assigned to ctx->sg. Set flag so we can free
 * it after irq ends processing.
 */
static int s5p_hash_copy_sg_lists(struct s5p_hash_reqctx *ctx,
				  struct scatterlist *sg, unsigned int new_len)
{}

/**
 * s5p_hash_prepare_sgs() - prepare sg for processing
 * @ctx:	request context
 * @sg:		source scatterlist request
 * @new_len:	number of bytes to process from sg
 * @final:	final flag
 *
 * Check two conditions: (1) if buffers in sg have len aligned data, and (2)
 * sg table have good aligned elements (list_ok). If one of this checks fails,
 * then either (1) allocates new buffer for data with s5p_hash_copy_sgs, copy
 * data into this buffer and prepare request in sgl, or (2) allocates new sg
 * table and prepare sg elements.
 *
 * For digest or finup all conditions can be good, and we may not need any
 * fixes.
 */
static int s5p_hash_prepare_sgs(struct s5p_hash_reqctx *ctx,
				struct scatterlist *sg,
				unsigned int new_len, bool final)
{}

/**
 * s5p_hash_prepare_request() - prepare request for processing
 * @req:	AHASH request
 * @update:	true if UPDATE op
 *
 * Note 1: we can have update flag _and_ final flag at the same time.
 * Note 2: we enter here when digcnt > BUFLEN (=HASH_BLOCK_SIZE) or
 *	   either req->nbytes or ctx->bufcnt + req->nbytes is > BUFLEN or
 *	   we have final op
 */
static int s5p_hash_prepare_request(struct ahash_request *req, bool update)
{}

/**
 * s5p_hash_update_dma_stop() - unmap DMA
 * @dd:		secss device
 *
 * Unmap scatterlist ctx->sg.
 */
static void s5p_hash_update_dma_stop(struct s5p_aes_dev *dd)
{}

/**
 * s5p_hash_finish() - copy calculated digest to crypto layer
 * @req:	AHASH request
 */
static void s5p_hash_finish(struct ahash_request *req)
{}

/**
 * s5p_hash_finish_req() - finish request
 * @req:	AHASH request
 * @err:	error
 */
static void s5p_hash_finish_req(struct ahash_request *req, int err)
{}

/**
 * s5p_hash_handle_queue() - handle hash queue
 * @dd:		device s5p_aes_dev
 * @req:	AHASH request
 *
 * If req!=NULL enqueue it on dd->queue, if FLAGS_BUSY is not set on the
 * device then processes the first request from the dd->queue
 *
 * Returns: see s5p_hash_final below.
 */
static int s5p_hash_handle_queue(struct s5p_aes_dev *dd,
				 struct ahash_request *req)
{}

/**
 * s5p_hash_tasklet_cb() - hash tasklet
 * @data:	ptr to s5p_aes_dev
 */
static void s5p_hash_tasklet_cb(unsigned long data)
{}

/**
 * s5p_hash_enqueue() - enqueue request
 * @req:	AHASH request
 * @op:		operation UPDATE (true) or FINAL (false)
 *
 * Returns: see s5p_hash_final below.
 */
static int s5p_hash_enqueue(struct ahash_request *req, bool op)
{}

/**
 * s5p_hash_update() - process the hash input data
 * @req:	AHASH request
 *
 * If request will fit in buffer, copy it and return immediately
 * else enqueue it with OP_UPDATE.
 *
 * Returns: see s5p_hash_final below.
 */
static int s5p_hash_update(struct ahash_request *req)
{}

/**
 * s5p_hash_final() - close up hash and calculate digest
 * @req:	AHASH request
 *
 * Note: in final req->src do not have any data, and req->nbytes can be
 * non-zero.
 *
 * If there were no input data processed yet and the buffered hash data is
 * less than BUFLEN (64) then calculate the final hash immediately by using
 * SW algorithm fallback.
 *
 * Otherwise enqueues the current AHASH request with OP_FINAL operation op
 * and finalize hash message in HW. Note that if digcnt!=0 then there were
 * previous update op, so there are always some buffered bytes in ctx->buffer,
 * which means that ctx->bufcnt!=0
 *
 * Returns:
 * 0 if the request has been processed immediately,
 * -EINPROGRESS if the operation has been queued for later execution or is set
 *		to processing by HW,
 * -EBUSY if queue is full and request should be resubmitted later,
 * other negative values denotes an error.
 */
static int s5p_hash_final(struct ahash_request *req)
{}

/**
 * s5p_hash_finup() - process last req->src and calculate digest
 * @req:	AHASH request containing the last update data
 *
 * Return values: see s5p_hash_final above.
 */
static int s5p_hash_finup(struct ahash_request *req)
{}

/**
 * s5p_hash_init() - initialize AHASH request contex
 * @req:	AHASH request
 *
 * Init async hash request context.
 */
static int s5p_hash_init(struct ahash_request *req)
{}

/**
 * s5p_hash_digest - calculate digest from req->src
 * @req:	AHASH request
 *
 * Return values: see s5p_hash_final above.
 */
static int s5p_hash_digest(struct ahash_request *req)
{}

/**
 * s5p_hash_cra_init_alg - init crypto alg transformation
 * @tfm:	crypto transformation
 */
static int s5p_hash_cra_init_alg(struct crypto_tfm *tfm)
{}

/**
 * s5p_hash_cra_init - init crypto tfm
 * @tfm:	crypto transformation
 */
static int s5p_hash_cra_init(struct crypto_tfm *tfm)
{}

/**
 * s5p_hash_cra_exit - exit crypto tfm
 * @tfm:	crypto transformation
 *
 * free allocated fallback
 */
static void s5p_hash_cra_exit(struct crypto_tfm *tfm)
{}

/**
 * s5p_hash_export - export hash state
 * @req:	AHASH request
 * @out:	buffer for exported state
 */
static int s5p_hash_export(struct ahash_request *req, void *out)
{}

/**
 * s5p_hash_import - import hash state
 * @req:	AHASH request
 * @in:		buffer with state to be imported from
 */
static int s5p_hash_import(struct ahash_request *req, const void *in)
{}

static struct ahash_alg algs_sha1_md5_sha256[] =;

static void s5p_set_aes(struct s5p_aes_dev *dev,
			const u8 *key, const u8 *iv, const u8 *ctr,
			unsigned int keylen)
{}

static bool s5p_is_sg_aligned(struct scatterlist *sg)
{}

static int s5p_set_indata_start(struct s5p_aes_dev *dev,
				struct skcipher_request *req)
{}

static int s5p_set_outdata_start(struct s5p_aes_dev *dev,
				 struct skcipher_request *req)
{}

static void s5p_aes_crypt_start(struct s5p_aes_dev *dev, unsigned long mode)
{}

static void s5p_tasklet_cb(unsigned long data)
{}

static int s5p_aes_handle_req(struct s5p_aes_dev *dev,
			      struct skcipher_request *req)
{}

static int s5p_aes_crypt(struct skcipher_request *req, unsigned long mode)
{}

static int s5p_aes_setkey(struct crypto_skcipher *cipher,
			  const u8 *key, unsigned int keylen)
{}

static int s5p_aes_ecb_encrypt(struct skcipher_request *req)
{}

static int s5p_aes_ecb_decrypt(struct skcipher_request *req)
{}

static int s5p_aes_cbc_encrypt(struct skcipher_request *req)
{}

static int s5p_aes_cbc_decrypt(struct skcipher_request *req)
{}

static int s5p_aes_ctr_crypt(struct skcipher_request *req)
{}

static int s5p_aes_init_tfm(struct crypto_skcipher *tfm)
{}

static struct skcipher_alg algs[] =;

static int s5p_aes_probe(struct platform_device *pdev)
{}

static void s5p_aes_remove(struct platform_device *pdev)
{}

static struct platform_driver s5p_aes_crypto =;

module_platform_driver();

MODULE_DESCRIPTION();
MODULE_LICENSE();
MODULE_AUTHOR();
MODULE_AUTHOR();