linux/drivers/crypto/ccree/cc_request_mgr.c

// SPDX-License-Identifier: GPL-2.0
/* Copyright (C) 2012-2019 ARM Limited (or its affiliates). */

#include <linux/kernel.h>
#include <linux/nospec.h>
#include "cc_driver.h"
#include "cc_buffer_mgr.h"
#include "cc_request_mgr.h"
#include "cc_pm.h"

#define CC_MAX_POLL_ITER
/* The highest descriptor count in used */
#define CC_MAX_DESC_SEQ_LEN

struct cc_req_mgr_handle {};

struct cc_bl_item {};

static const u32 cc_cpp_int_masks[CC_CPP_NUM_ALGS][CC_CPP_NUM_SLOTS] =;

static void comp_handler(unsigned long devarg);
#ifdef COMP_IN_WQ
static void comp_work_handler(struct work_struct *work);
#endif

static inline u32 cc_cpp_int_mask(enum cc_cpp_alg alg, int slot)
{}

void cc_req_mgr_fini(struct cc_drvdata *drvdata)
{}

int cc_req_mgr_init(struct cc_drvdata *drvdata)
{}

static void enqueue_seq(struct cc_drvdata *drvdata, struct cc_hw_desc seq[],
			unsigned int seq_len)
{}

/**
 * request_mgr_complete() - Completion will take place if and only if user
 * requested completion by cc_send_sync_request().
 *
 * @dev: Device pointer
 * @dx_compl_h: The completion event to signal
 * @dummy: unused error code
 */
static void request_mgr_complete(struct device *dev, void *dx_compl_h,
				 int dummy)
{}

static int cc_queues_status(struct cc_drvdata *drvdata,
			    struct cc_req_mgr_handle *req_mgr_h,
			    unsigned int total_seq_len)
{}

/**
 * cc_do_send_request() - Enqueue caller request to crypto hardware.
 * Need to be called with HW lock held and PM running
 *
 * @drvdata: Associated device driver context
 * @cc_req: The request to enqueue
 * @desc: The crypto sequence
 * @len: The crypto sequence length
 * @add_comp: If "true": add an artificial dout DMA to mark completion
 *
 */
static void cc_do_send_request(struct cc_drvdata *drvdata,
			       struct cc_crypto_req *cc_req,
			       struct cc_hw_desc *desc, unsigned int len,
			       bool add_comp)
{}

static void cc_enqueue_backlog(struct cc_drvdata *drvdata,
			       struct cc_bl_item *bli)
{}

static void cc_proc_backlog(struct cc_drvdata *drvdata)
{}

int cc_send_request(struct cc_drvdata *drvdata, struct cc_crypto_req *cc_req,
		    struct cc_hw_desc *desc, unsigned int len,
		    struct crypto_async_request *req)
{}

int cc_send_sync_request(struct cc_drvdata *drvdata,
			 struct cc_crypto_req *cc_req, struct cc_hw_desc *desc,
			 unsigned int len)
{}

/**
 * send_request_init() - Enqueue caller request to crypto hardware during init
 * process.
 * Assume this function is not called in the middle of a flow,
 * since we set QUEUE_LAST_IND flag in the last descriptor.
 *
 * @drvdata: Associated device driver context
 * @desc: The crypto sequence
 * @len: The crypto sequence length
 *
 * Return:
 * Returns "0" upon success
 */
int send_request_init(struct cc_drvdata *drvdata, struct cc_hw_desc *desc,
		      unsigned int len)
{}

void complete_request(struct cc_drvdata *drvdata)
{}

#ifdef COMP_IN_WQ
static void comp_work_handler(struct work_struct *work)
{
	struct cc_drvdata *drvdata =
		container_of(work, struct cc_drvdata, compwork.work);

	comp_handler((unsigned long)drvdata);
}
#endif

static void proc_completions(struct cc_drvdata *drvdata)
{}

static inline u32 cc_axi_comp_count(struct cc_drvdata *drvdata)
{}

/* Deferred service handler, run as interrupt-fired tasklet */
static void comp_handler(unsigned long devarg)
{}