Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 961559f3 authored by Gilad Ben-Yossef's avatar Gilad Ben-Yossef Committed by Greg Kroah-Hartman
Browse files

staging: ccree: rename vars/structs/enums from ssi_ to cc_



Unify naming convention by renaming all ssi_ vars/structs/enums
and variables to cc_*

Signed-off-by: default avatarGilad Ben-Yossef <gilad@benyossef.com>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent 9021a014
Loading
Loading
Loading
Loading
+41 −41
Original line number Diff line number Diff line
@@ -52,7 +52,7 @@
#define ICV_VERIF_OK 0x01

struct cc_aead_handle {
	ssi_sram_addr_t sram_workspace_addr;
	cc_sram_addr_t sram_workspace_addr;
	struct list_head aead_list;
};

@@ -69,7 +69,7 @@ struct cc_xcbc_s {
};

struct cc_aead_ctx {
	struct ssi_drvdata *drvdata;
	struct cc_drvdata *drvdata;
	u8 ctr_nonce[MAX_NONCE_SIZE]; /* used for ctr3686 iv and aes ccm */
	u8 *enckey;
	dma_addr_t enckey_dma_addr;
@@ -148,18 +148,18 @@ static int cc_aead_init(struct crypto_aead *tfm)
{
	struct aead_alg *alg = crypto_aead_alg(tfm);
	struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
	struct ssi_crypto_alg *ssi_alg =
			container_of(alg, struct ssi_crypto_alg, aead_alg);
	struct device *dev = drvdata_to_dev(ssi_alg->drvdata);
	struct cc_crypto_alg *cc_alg =
			container_of(alg, struct cc_crypto_alg, aead_alg);
	struct device *dev = drvdata_to_dev(cc_alg->drvdata);

	dev_dbg(dev, "Initializing context @%p for %s\n", ctx,
		crypto_tfm_alg_name(&tfm->base));

	/* Initialize modes in instance */
	ctx->cipher_mode = ssi_alg->cipher_mode;
	ctx->flow_mode = ssi_alg->flow_mode;
	ctx->auth_mode = ssi_alg->auth_mode;
	ctx->drvdata = ssi_alg->drvdata;
	ctx->cipher_mode = cc_alg->cipher_mode;
	ctx->flow_mode = cc_alg->flow_mode;
	ctx->auth_mode = cc_alg->auth_mode;
	ctx->drvdata = cc_alg->drvdata;
	crypto_aead_set_reqsize(tfm, sizeof(struct aead_req_ctx));

	/* Allocate key buffer, cache line aligned */
@@ -226,11 +226,11 @@ static int cc_aead_init(struct crypto_aead *tfm)
	return -ENOMEM;
}

static void cc_aead_complete(struct device *dev, void *ssi_req)
static void cc_aead_complete(struct device *dev, void *cc_req)
{
	struct aead_request *areq = (struct aead_request *)ssi_req;
	struct aead_request *areq = (struct aead_request *)cc_req;
	struct aead_req_ctx *areq_ctx = aead_request_ctx(areq);
	struct crypto_aead *tfm = crypto_aead_reqtfm(ssi_req);
	struct crypto_aead *tfm = crypto_aead_reqtfm(cc_req);
	struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
	int err = 0;

@@ -442,7 +442,7 @@ cc_get_plain_hmac_key(struct crypto_aead *tfm, const u8 *key,
	struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
	struct device *dev = drvdata_to_dev(ctx->drvdata);
	u32 larval_addr = cc_larval_digest_addr(ctx->drvdata, ctx->auth_mode);
	struct ssi_crypto_req ssi_req = {};
	struct cc_crypto_req cc_req = {};
	unsigned int blocksize;
	unsigned int digestsize;
	unsigned int hashmode;
@@ -546,7 +546,7 @@ cc_get_plain_hmac_key(struct crypto_aead *tfm, const u8 *key,
		idx++;
	}

	rc = send_request(ctx->drvdata, &ssi_req, desc, idx, 0);
	rc = send_request(ctx->drvdata, &cc_req, desc, idx, 0);
	if (rc)
		dev_err(dev, "send_request() failed (rc=%d)\n", rc);

@@ -561,7 +561,7 @@ cc_aead_setkey(struct crypto_aead *tfm, const u8 *key, unsigned int keylen)
{
	struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
	struct rtattr *rta = (struct rtattr *)key;
	struct ssi_crypto_req ssi_req = {};
	struct cc_crypto_req cc_req = {};
	struct crypto_authenc_key_param *param;
	struct cc_hw_desc desc[MAX_AEAD_SETKEY_SEQ];
	int seq_len = 0, rc = -EINVAL;
@@ -645,7 +645,7 @@ cc_aead_setkey(struct crypto_aead *tfm, const u8 *key, unsigned int keylen)
	/* STAT_PHASE_3: Submit sequence to HW */

	if (seq_len > 0) { /* For CCM there is no sequence to setup the key */
		rc = send_request(ctx->drvdata, &ssi_req, desc, seq_len, 0);
		rc = send_request(ctx->drvdata, &cc_req, desc, seq_len, 0);
		if (rc) {
			dev_err(dev, "send_request() failed (rc=%d)\n", rc);
			goto setkey_error;
@@ -734,7 +734,7 @@ static void cc_set_assoc_desc(struct aead_request *areq, unsigned int flow_mode,
	struct crypto_aead *tfm = crypto_aead_reqtfm(areq);
	struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
	struct aead_req_ctx *areq_ctx = aead_request_ctx(areq);
	enum ssi_req_dma_buf_type assoc_dma_type = areq_ctx->assoc_buff_type;
	enum cc_req_dma_buf_type assoc_dma_type = areq_ctx->assoc_buff_type;
	unsigned int idx = *seq_size;
	struct device *dev = drvdata_to_dev(ctx->drvdata);

@@ -773,7 +773,7 @@ static void cc_proc_authen_desc(struct aead_request *areq,
				unsigned int *seq_size, int direct)
{
	struct aead_req_ctx *areq_ctx = aead_request_ctx(areq);
	enum ssi_req_dma_buf_type data_dma_type = areq_ctx->data_buff_type;
	enum cc_req_dma_buf_type data_dma_type = areq_ctx->data_buff_type;
	unsigned int idx = *seq_size;
	struct crypto_aead *tfm = crypto_aead_reqtfm(areq);
	struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
@@ -803,7 +803,7 @@ static void cc_proc_authen_desc(struct aead_request *areq,
		 * assoc. + iv + data -compact in one table
		 * if assoclen is ZERO only IV perform
		 */
		ssi_sram_addr_t mlli_addr = areq_ctx->assoc.sram_addr;
		cc_sram_addr_t mlli_addr = areq_ctx->assoc.sram_addr;
		u32 mlli_nents = areq_ctx->assoc.mlli_nents;

		if (areq_ctx->is_single_pass) {
@@ -838,7 +838,7 @@ static void cc_proc_cipher_desc(struct aead_request *areq,
{
	unsigned int idx = *seq_size;
	struct aead_req_ctx *areq_ctx = aead_request_ctx(areq);
	enum ssi_req_dma_buf_type data_dma_type = areq_ctx->data_buff_type;
	enum cc_req_dma_buf_type data_dma_type = areq_ctx->data_buff_type;
	struct crypto_aead *tfm = crypto_aead_reqtfm(areq);
	struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
	struct device *dev = drvdata_to_dev(ctx->drvdata);
@@ -1954,7 +1954,7 @@ static int cc_proc_aead(struct aead_request *req,
	struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
	struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
	struct device *dev = drvdata_to_dev(ctx->drvdata);
	struct ssi_crypto_req ssi_req = {};
	struct cc_crypto_req cc_req = {};

	dev_dbg(dev, "%s context=%p req=%p iv=%p src=%p src_ofs=%d dst=%p dst_ofs=%d cryptolen=%d\n",
		((direct == DRV_CRYPTO_DIRECTION_ENCRYPT) ? "Enc" : "Dec"),
@@ -1972,8 +1972,8 @@ static int cc_proc_aead(struct aead_request *req,
	}

	/* Setup DX request structure */
	ssi_req.user_cb = (void *)cc_aead_complete;
	ssi_req.user_arg = (void *)req;
	cc_req.user_cb = (void *)cc_aead_complete;
	cc_req.user_arg = (void *)req;

	/* Setup request context */
	areq_ctx->gen_ctx.op_type = direct;
@@ -2040,34 +2040,34 @@ static int cc_proc_aead(struct aead_request *req,
	if (areq_ctx->backup_giv) {
		/* set the DMA mapped IV address*/
		if (ctx->cipher_mode == DRV_CIPHER_CTR) {
			ssi_req.ivgen_dma_addr[0] =
			cc_req.ivgen_dma_addr[0] =
				areq_ctx->gen_ctx.iv_dma_addr +
				CTR_RFC3686_NONCE_SIZE;
			ssi_req.ivgen_dma_addr_len = 1;
			cc_req.ivgen_dma_addr_len = 1;
		} else if (ctx->cipher_mode == DRV_CIPHER_CCM) {
			/* In ccm, the IV needs to exist both inside B0 and
			 * inside the counter.It is also copied to iv_dma_addr
			 * for other reasons (like returning it to the user).
			 * So, using 3 (identical) IV outputs.
			 */
			ssi_req.ivgen_dma_addr[0] =
			cc_req.ivgen_dma_addr[0] =
				areq_ctx->gen_ctx.iv_dma_addr +
				CCM_BLOCK_IV_OFFSET;
			ssi_req.ivgen_dma_addr[1] =
			cc_req.ivgen_dma_addr[1] =
				sg_dma_address(&areq_ctx->ccm_adata_sg) +
				CCM_B0_OFFSET + CCM_BLOCK_IV_OFFSET;
			ssi_req.ivgen_dma_addr[2] =
			cc_req.ivgen_dma_addr[2] =
				sg_dma_address(&areq_ctx->ccm_adata_sg) +
				CCM_CTR_COUNT_0_OFFSET + CCM_BLOCK_IV_OFFSET;
			ssi_req.ivgen_dma_addr_len = 3;
			cc_req.ivgen_dma_addr_len = 3;
		} else {
			ssi_req.ivgen_dma_addr[0] =
			cc_req.ivgen_dma_addr[0] =
				areq_ctx->gen_ctx.iv_dma_addr;
			ssi_req.ivgen_dma_addr_len = 1;
			cc_req.ivgen_dma_addr_len = 1;
		}

		/* set the IV size (8/16 B long)*/
		ssi_req.ivgen_size = crypto_aead_ivsize(tfm);
		cc_req.ivgen_size = crypto_aead_ivsize(tfm);
	}

	/* STAT_PHASE_2: Create sequence */
@@ -2099,7 +2099,7 @@ static int cc_proc_aead(struct aead_request *req,

	/* STAT_PHASE_3: Lock HW and push sequence */

	rc = send_request(ctx->drvdata, &ssi_req, desc, seq_len, 1);
	rc = send_request(ctx->drvdata, &cc_req, desc, seq_len, 1);

	if (rc != -EINPROGRESS) {
		dev_err(dev, "send_request() failed (rc=%d)\n", rc);
@@ -2403,7 +2403,7 @@ static int cc_rfc4543_gcm_decrypt(struct aead_request *req)
}

/* DX Block aead alg */
static struct ssi_alg_template aead_algs[] = {
static struct cc_alg_template aead_algs[] = {
	{
		.name = "authenc(hmac(sha1),cbc(aes))",
		.driver_name = "authenc-hmac-sha1-cbc-aes-dx",
@@ -2653,10 +2653,10 @@ static struct ssi_alg_template aead_algs[] = {
	},
};

static struct ssi_crypto_alg *cc_create_aead_alg(struct ssi_alg_template *tmpl,
static struct cc_crypto_alg *cc_create_aead_alg(struct cc_alg_template *tmpl,
						struct device *dev)
{
	struct ssi_crypto_alg *t_alg;
	struct cc_crypto_alg *t_alg;
	struct aead_alg *alg;

	t_alg = kzalloc(sizeof(*t_alg), GFP_KERNEL);
@@ -2687,9 +2687,9 @@ static struct ssi_crypto_alg *cc_create_aead_alg(struct ssi_alg_template *tmpl,
	return t_alg;
}

int cc_aead_free(struct ssi_drvdata *drvdata)
int cc_aead_free(struct cc_drvdata *drvdata)
{
	struct ssi_crypto_alg *t_alg, *n;
	struct cc_crypto_alg *t_alg, *n;
	struct cc_aead_handle *aead_handle =
		(struct cc_aead_handle *)drvdata->aead_handle;

@@ -2708,10 +2708,10 @@ int cc_aead_free(struct ssi_drvdata *drvdata)
	return 0;
}

int cc_aead_alloc(struct ssi_drvdata *drvdata)
int cc_aead_alloc(struct cc_drvdata *drvdata)
{
	struct cc_aead_handle *aead_handle;
	struct ssi_crypto_alg *t_alg;
	struct cc_crypto_alg *t_alg;
	int rc = -ENOMEM;
	int alg;
	struct device *dev = drvdata_to_dev(drvdata);
+7 −7
Original line number Diff line number Diff line
@@ -96,15 +96,15 @@ struct aead_req_ctx {

	u8 *icv_virt_addr; /* Virt. address of ICV */
	struct async_gen_req_ctx gen_ctx;
	struct ssi_mlli assoc;
	struct ssi_mlli src;
	struct ssi_mlli dst;
	struct cc_mlli assoc;
	struct cc_mlli src;
	struct cc_mlli dst;
	struct scatterlist *src_sgl;
	struct scatterlist *dst_sgl;
	unsigned int src_offset;
	unsigned int dst_offset;
	enum ssi_req_dma_buf_type assoc_buff_type;
	enum ssi_req_dma_buf_type data_buff_type;
	enum cc_req_dma_buf_type assoc_buff_type;
	enum cc_req_dma_buf_type data_buff_type;
	struct mlli_params mlli_params;
	unsigned int cryptlen;
	struct scatterlist ccm_adata_sg;
@@ -116,7 +116,7 @@ struct aead_req_ctx {
	bool plaintext_authenticate_only; //for gcm_rfc4543
};

int cc_aead_alloc(struct ssi_drvdata *drvdata);
int cc_aead_free(struct ssi_drvdata *drvdata);
int cc_aead_alloc(struct cc_drvdata *drvdata);
int cc_aead_free(struct cc_drvdata *drvdata);

#endif /*__CC_AEAD_H__*/
+15 −15
Original line number Diff line number Diff line
@@ -58,7 +58,7 @@ struct buffer_array {
	u32 *mlli_nents[MAX_NUM_OF_BUFFERS_IN_MLLI];
};

static inline char *cc_dma_buf_type(enum ssi_req_dma_buf_type type)
static inline char *cc_dma_buf_type(enum cc_req_dma_buf_type type)
{
	switch (type) {
	case CC_DMA_BUF_NULL:
@@ -80,7 +80,7 @@ static inline char *cc_dma_buf_type(enum ssi_req_dma_buf_type type)
 * @dir: [IN] copy from/to sgl
 */
static void cc_copy_mac(struct device *dev, struct aead_request *req,
			enum ssi_sg_cpy_direct dir)
			enum cc_sg_cpy_direct dir)
{
	struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
@@ -157,7 +157,7 @@ void cc_zero_sgl(struct scatterlist *sgl, u32 data_len)
 * @direct:
 */
void cc_copy_sg_portion(struct device *dev, u8 *dest, struct scatterlist *sg,
			u32 to_skip, u32 end, enum ssi_sg_cpy_direct direct)
			u32 to_skip, u32 end, enum cc_sg_cpy_direct direct)
{
	u32 nents, lbytes;

@@ -496,7 +496,7 @@ void cc_unmap_blkcipher_request(struct device *dev, void *ctx,
	}
}

int cc_map_blkcipher_request(struct ssi_drvdata *drvdata, void *ctx,
int cc_map_blkcipher_request(struct cc_drvdata *drvdata, void *ctx,
			     unsigned int ivsize, unsigned int nbytes,
			     void *info, struct scatterlist *src,
			     struct scatterlist *dst)
@@ -594,7 +594,7 @@ void cc_unmap_aead_request(struct device *dev, struct aead_request *req)
	struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
	unsigned int hw_iv_size = areq_ctx->hw_iv_size;
	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
	struct ssi_drvdata *drvdata = dev_get_drvdata(dev);
	struct cc_drvdata *drvdata = dev_get_drvdata(dev);
	u32 dummy;
	bool chained;
	u32 size_to_unmap = 0;
@@ -734,7 +734,7 @@ static int cc_get_aead_icv_nents(struct device *dev, struct scatterlist *sgl,
	return nents;
}

static int cc_aead_chain_iv(struct ssi_drvdata *drvdata,
static int cc_aead_chain_iv(struct cc_drvdata *drvdata,
			    struct aead_request *req,
			    struct buffer_array *sg_data,
			    bool is_last, bool do_chain)
@@ -778,7 +778,7 @@ static int cc_aead_chain_iv(struct ssi_drvdata *drvdata,
	return rc;
}

static int cc_aead_chain_assoc(struct ssi_drvdata *drvdata,
static int cc_aead_chain_assoc(struct cc_drvdata *drvdata,
			       struct aead_request *req,
			       struct buffer_array *sg_data,
			       bool is_last, bool do_chain)
@@ -898,7 +898,7 @@ static void cc_prepare_aead_data_dlli(struct aead_request *req,
	}
}

static int cc_prepare_aead_data_mlli(struct ssi_drvdata *drvdata,
static int cc_prepare_aead_data_mlli(struct cc_drvdata *drvdata,
				     struct aead_request *req,
				     struct buffer_array *sg_data,
				     u32 *src_last_bytes, u32 *dst_last_bytes,
@@ -1030,7 +1030,7 @@ static int cc_prepare_aead_data_mlli(struct ssi_drvdata *drvdata,
	return rc;
}

static int cc_aead_chain_data(struct ssi_drvdata *drvdata,
static int cc_aead_chain_data(struct cc_drvdata *drvdata,
			      struct aead_request *req,
			      struct buffer_array *sg_data,
			      bool is_last_table, bool do_chain)
@@ -1150,7 +1150,7 @@ static int cc_aead_chain_data(struct ssi_drvdata *drvdata,
	return rc;
}

static void cc_update_aead_mlli_nents(struct ssi_drvdata *drvdata,
static void cc_update_aead_mlli_nents(struct cc_drvdata *drvdata,
				      struct aead_request *req)
{
	struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
@@ -1201,7 +1201,7 @@ static void cc_update_aead_mlli_nents(struct ssi_drvdata *drvdata,
	}
}

int cc_map_aead_request(struct ssi_drvdata *drvdata, struct aead_request *req)
int cc_map_aead_request(struct cc_drvdata *drvdata, struct aead_request *req)
{
	struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
	struct mlli_params *mlli_params = &areq_ctx->mlli_params;
@@ -1400,7 +1400,7 @@ int cc_map_aead_request(struct ssi_drvdata *drvdata, struct aead_request *req)
	return rc;
}

int cc_map_hash_request_final(struct ssi_drvdata *drvdata, void *ctx,
int cc_map_hash_request_final(struct cc_drvdata *drvdata, void *ctx,
			      struct scatterlist *src, unsigned int nbytes,
			      bool do_update)
{
@@ -1481,7 +1481,7 @@ int cc_map_hash_request_final(struct ssi_drvdata *drvdata, void *ctx,
	return -ENOMEM;
}

int cc_map_hash_request_update(struct ssi_drvdata *drvdata, void *ctx,
int cc_map_hash_request_update(struct cc_drvdata *drvdata, void *ctx,
			       struct scatterlist *src, unsigned int nbytes,
			       unsigned int block_size)
{
@@ -1639,7 +1639,7 @@ void cc_unmap_hash_request(struct device *dev, void *ctx,
	}
}

int cc_buffer_mgr_init(struct ssi_drvdata *drvdata)
int cc_buffer_mgr_init(struct cc_drvdata *drvdata)
{
	struct buff_mgr_handle *buff_mgr_handle;
	struct device *dev = drvdata_to_dev(drvdata);
@@ -1666,7 +1666,7 @@ int cc_buffer_mgr_init(struct ssi_drvdata *drvdata)
	return -ENOMEM;
}

int cc_buffer_mgr_fini(struct ssi_drvdata *drvdata)
int cc_buffer_mgr_fini(struct cc_drvdata *drvdata)
{
	struct buff_mgr_handle *buff_mgr_handle = drvdata->buff_mgr_handle;

+11 −11
Original line number Diff line number Diff line
@@ -26,19 +26,19 @@
#include "ssi_config.h"
#include "ssi_driver.h"

enum ssi_req_dma_buf_type {
enum cc_req_dma_buf_type {
	CC_DMA_BUF_NULL = 0,
	CC_DMA_BUF_DLLI,
	CC_DMA_BUF_MLLI
};

enum ssi_sg_cpy_direct {
enum cc_sg_cpy_direct {
	CC_SG_TO_BUF = 0,
	CC_SG_FROM_BUF = 1
};

struct ssi_mlli {
	ssi_sram_addr_t sram_addr;
struct cc_mlli {
	cc_sram_addr_t sram_addr;
	unsigned int nents; //sg nents
	unsigned int mlli_nents; //mlli nents might be different than the above
};
@@ -50,11 +50,11 @@ struct mlli_params {
	u32 mlli_len;
};

int cc_buffer_mgr_init(struct ssi_drvdata *drvdata);
int cc_buffer_mgr_init(struct cc_drvdata *drvdata);

int cc_buffer_mgr_fini(struct ssi_drvdata *drvdata);
int cc_buffer_mgr_fini(struct cc_drvdata *drvdata);

int cc_map_blkcipher_request(struct ssi_drvdata *drvdata, void *ctx,
int cc_map_blkcipher_request(struct cc_drvdata *drvdata, void *ctx,
			     unsigned int ivsize, unsigned int nbytes,
			     void *info, struct scatterlist *src,
			     struct scatterlist *dst);
@@ -64,15 +64,15 @@ void cc_unmap_blkcipher_request(struct device *dev, void *ctx,
				struct scatterlist *src,
				struct scatterlist *dst);

int cc_map_aead_request(struct ssi_drvdata *drvdata, struct aead_request *req);
int cc_map_aead_request(struct cc_drvdata *drvdata, struct aead_request *req);

void cc_unmap_aead_request(struct device *dev, struct aead_request *req);

int cc_map_hash_request_final(struct ssi_drvdata *drvdata, void *ctx,
int cc_map_hash_request_final(struct cc_drvdata *drvdata, void *ctx,
			      struct scatterlist *src, unsigned int nbytes,
			      bool do_update);

int cc_map_hash_request_update(struct ssi_drvdata *drvdata, void *ctx,
int cc_map_hash_request_update(struct cc_drvdata *drvdata, void *ctx,
			       struct scatterlist *src, unsigned int nbytes,
			       unsigned int block_size);

@@ -80,7 +80,7 @@ void cc_unmap_hash_request(struct device *dev, void *ctx,
			   struct scatterlist *src, bool do_revert);

void cc_copy_sg_portion(struct device *dev, u8 *dest, struct scatterlist *sg,
			u32 to_skip, u32 end, enum ssi_sg_cpy_direct direct);
			u32 to_skip, u32 end, enum cc_sg_cpy_direct direct);

void cc_zero_sgl(struct scatterlist *sgl, u32 data_len);

+36 −36
Original line number Diff line number Diff line
@@ -55,7 +55,7 @@ struct cc_hw_key_info {
};

struct cc_cipher_ctx {
	struct ssi_drvdata *drvdata;
	struct cc_drvdata *drvdata;
	int keylen;
	int key_round_number;
	int cipher_mode;
@@ -67,7 +67,7 @@ struct cc_cipher_ctx {
	struct crypto_shash *shash_tfm;
};

static void cc_cipher_complete(struct device *dev, void *ssi_req);
static void cc_cipher_complete(struct device *dev, void *cc_req);

static int validate_keys_sizes(struct cc_cipher_ctx *ctx_p, u32 size)
{
@@ -145,17 +145,17 @@ static int validate_data_size(struct cc_cipher_ctx *ctx_p,

static unsigned int get_max_keysize(struct crypto_tfm *tfm)
{
	struct ssi_crypto_alg *ssi_alg =
		container_of(tfm->__crt_alg, struct ssi_crypto_alg,
	struct cc_crypto_alg *cc_alg =
		container_of(tfm->__crt_alg, struct cc_crypto_alg,
			     crypto_alg);

	if ((ssi_alg->crypto_alg.cra_flags & CRYPTO_ALG_TYPE_MASK) ==
	if ((cc_alg->crypto_alg.cra_flags & CRYPTO_ALG_TYPE_MASK) ==
	    CRYPTO_ALG_TYPE_ABLKCIPHER)
		return ssi_alg->crypto_alg.cra_ablkcipher.max_keysize;
		return cc_alg->crypto_alg.cra_ablkcipher.max_keysize;

	if ((ssi_alg->crypto_alg.cra_flags & CRYPTO_ALG_TYPE_MASK) ==
	if ((cc_alg->crypto_alg.cra_flags & CRYPTO_ALG_TYPE_MASK) ==
	    CRYPTO_ALG_TYPE_BLKCIPHER)
		return ssi_alg->crypto_alg.cra_blkcipher.max_keysize;
		return cc_alg->crypto_alg.cra_blkcipher.max_keysize;

	return 0;
}
@@ -164,9 +164,9 @@ static int cc_cipher_init(struct crypto_tfm *tfm)
{
	struct cc_cipher_ctx *ctx_p = crypto_tfm_ctx(tfm);
	struct crypto_alg *alg = tfm->__crt_alg;
	struct ssi_crypto_alg *ssi_alg =
			container_of(alg, struct ssi_crypto_alg, crypto_alg);
	struct device *dev = drvdata_to_dev(ssi_alg->drvdata);
	struct cc_crypto_alg *cc_alg =
			container_of(alg, struct cc_crypto_alg, crypto_alg);
	struct device *dev = drvdata_to_dev(cc_alg->drvdata);
	int rc = 0;
	unsigned int max_key_buf_size = get_max_keysize(tfm);
	struct ablkcipher_tfm *ablktfm = &tfm->crt_ablkcipher;
@@ -176,9 +176,9 @@ static int cc_cipher_init(struct crypto_tfm *tfm)

	ablktfm->reqsize = sizeof(struct blkcipher_req_ctx);

	ctx_p->cipher_mode = ssi_alg->cipher_mode;
	ctx_p->flow_mode = ssi_alg->flow_mode;
	ctx_p->drvdata = ssi_alg->drvdata;
	ctx_p->cipher_mode = cc_alg->cipher_mode;
	ctx_p->flow_mode = cc_alg->flow_mode;
	ctx_p->drvdata = cc_alg->drvdata;

	/* Allocate key buffer, cache line aligned */
	ctx_p->user.key = kmalloc(max_key_buf_size, GFP_KERNEL | GFP_DMA);
@@ -408,14 +408,14 @@ static void cc_setup_cipher_desc(struct crypto_tfm *tfm,
	dma_addr_t iv_dma_addr = req_ctx->gen_ctx.iv_dma_addr;
	unsigned int du_size = nbytes;

	struct ssi_crypto_alg *ssi_alg =
		container_of(tfm->__crt_alg, struct ssi_crypto_alg,
	struct cc_crypto_alg *cc_alg =
		container_of(tfm->__crt_alg, struct cc_crypto_alg,
			     crypto_alg);

	if ((ssi_alg->crypto_alg.cra_flags & CRYPTO_ALG_BULK_MASK) ==
	if ((cc_alg->crypto_alg.cra_flags & CRYPTO_ALG_BULK_MASK) ==
	    CRYPTO_ALG_BULK_DU_512)
		du_size = 512;
	if ((ssi_alg->crypto_alg.cra_flags & CRYPTO_ALG_BULK_MASK) ==
	if ((cc_alg->crypto_alg.cra_flags & CRYPTO_ALG_BULK_MASK) ==
	    CRYPTO_ALG_BULK_DU_4096)
		du_size = 4096;

@@ -604,9 +604,9 @@ static void cc_setup_cipher_data(struct crypto_tfm *tfm,
	}
}

static void cc_cipher_complete(struct device *dev, void *ssi_req)
static void cc_cipher_complete(struct device *dev, void *cc_req)
{
	struct ablkcipher_request *areq = (struct ablkcipher_request *)ssi_req;
	struct ablkcipher_request *areq = (struct ablkcipher_request *)cc_req;
	struct scatterlist *dst = areq->dst;
	struct scatterlist *src = areq->src;
	struct blkcipher_req_ctx *req_ctx = ablkcipher_request_ctx(areq);
@@ -651,7 +651,7 @@ static int cc_cipher_process(struct ablkcipher_request *req,
	struct cc_cipher_ctx *ctx_p = crypto_tfm_ctx(tfm);
	struct device *dev = drvdata_to_dev(ctx_p->drvdata);
	struct cc_hw_desc desc[MAX_ABLKCIPHER_SEQ_LEN];
	struct ssi_crypto_req ssi_req = {};
	struct cc_crypto_req cc_req = {};
	int rc, seq_len = 0, cts_restore_flag = 0;

	dev_dbg(dev, "%s req=%p info=%p nbytes=%d\n",
@@ -691,11 +691,11 @@ static int cc_cipher_process(struct ablkcipher_request *req,
	}

	/* Setup DX request structure */
	ssi_req.user_cb = (void *)cc_cipher_complete;
	ssi_req.user_arg = (void *)req;
	cc_req.user_cb = (void *)cc_cipher_complete;
	cc_req.user_arg = (void *)req;

#ifdef ENABLE_CYCLE_COUNT
	ssi_req.op_type = (direction == DRV_CRYPTO_DIRECTION_DECRYPT) ?
	cc_req.op_type = (direction == DRV_CRYPTO_DIRECTION_DECRYPT) ?
		STAT_OP_TYPE_DECODE : STAT_OP_TYPE_ENCODE;

#endif
@@ -722,15 +722,15 @@ static int cc_cipher_process(struct ablkcipher_request *req,

	/* do we need to generate IV? */
	if (req_ctx->is_giv) {
		ssi_req.ivgen_dma_addr[0] = req_ctx->gen_ctx.iv_dma_addr;
		ssi_req.ivgen_dma_addr_len = 1;
		cc_req.ivgen_dma_addr[0] = req_ctx->gen_ctx.iv_dma_addr;
		cc_req.ivgen_dma_addr_len = 1;
		/* set the IV size (8/16 B long)*/
		ssi_req.ivgen_size = ivsize;
		cc_req.ivgen_size = ivsize;
	}

	/* STAT_PHASE_3: Lock HW and push sequence */

	rc = send_request(ctx_p->drvdata, &ssi_req, desc, seq_len, 1);
	rc = send_request(ctx_p->drvdata, &cc_req, desc, seq_len, 1);
	if (rc != -EINPROGRESS) {
		/* Failed to send the request or request completed
		 * synchronously
@@ -782,7 +782,7 @@ static int cc_cipher_decrypt(struct ablkcipher_request *req)
}

/* DX Block cipher alg */
static struct ssi_alg_template blkcipher_algs[] = {
static struct cc_alg_template blkcipher_algs[] = {
	{
		.name = "xts(aes)",
		.driver_name = "xts-aes-dx",
@@ -1075,10 +1075,10 @@ static struct ssi_alg_template blkcipher_algs[] = {
};

static
struct ssi_crypto_alg *cc_cipher_create_alg(struct ssi_alg_template *template,
struct cc_crypto_alg *cc_cipher_create_alg(struct cc_alg_template *template,
					   struct device *dev)
{
	struct ssi_crypto_alg *t_alg;
	struct cc_crypto_alg *t_alg;
	struct crypto_alg *alg;

	t_alg = kzalloc(sizeof(*t_alg), GFP_KERNEL);
@@ -1109,9 +1109,9 @@ struct ssi_crypto_alg *cc_cipher_create_alg(struct ssi_alg_template *template,
	return t_alg;
}

int cc_cipher_free(struct ssi_drvdata *drvdata)
int cc_cipher_free(struct cc_drvdata *drvdata)
{
	struct ssi_crypto_alg *t_alg, *n;
	struct cc_crypto_alg *t_alg, *n;
	struct cc_cipher_handle *blkcipher_handle =
						drvdata->blkcipher_handle;
	if (blkcipher_handle) {
@@ -1129,10 +1129,10 @@ int cc_cipher_free(struct ssi_drvdata *drvdata)
	return 0;
}

int cc_cipher_alloc(struct ssi_drvdata *drvdata)
int cc_cipher_alloc(struct cc_drvdata *drvdata)
{
	struct cc_cipher_handle *ablkcipher_handle;
	struct ssi_crypto_alg *t_alg;
	struct cc_crypto_alg *t_alg;
	struct device *dev = drvdata_to_dev(drvdata);
	int rc = -ENOMEM;
	int alg;
Loading