Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 9d6f1a82 authored by Fionnuala Gunter's avatar Fionnuala Gunter Committed by Herbert Xu
Browse files

crypto: nx - fix limits to sg lists for AES-XCBC



This patch updates the NX driver to perform several hyper calls when necessary
so that the length limits of scatter/gather lists are respected.

Reviewed-by: default avatarJoy Latten <jmlatten@linux.vnet.ibm.com>
Reviewed-by: default avatarMarcelo Cerri <mhcerri@linux.vnet.ibm.com>
Signed-off-by: default avatarFionnuala Gunter <fin@linux.vnet.ibm.com>
Signed-off-by: default avatarHerbert Xu <herbert@gondor.apana.org.au>
parent 79980434
Loading
Loading
Loading
Loading
+66 −47
Original line number Original line Diff line number Diff line
@@ -88,60 +88,72 @@ static int nx_xcbc_update(struct shash_desc *desc,
	struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
	struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
	struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
	struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
	struct nx_sg *in_sg;
	struct nx_sg *in_sg;
	u32 to_process, leftover;
	u32 to_process, leftover, total;
	u32 max_sg_len;
	unsigned long irq_flags;
	unsigned long irq_flags;
	int rc = 0;
	int rc = 0;


	spin_lock_irqsave(&nx_ctx->lock, irq_flags);
	spin_lock_irqsave(&nx_ctx->lock, irq_flags);


	if (NX_CPB_FDM(csbcpb) & NX_FDM_CONTINUATION) {

		/* we've hit the nx chip previously and we're updating again,
	total = sctx->count + len;
		 * so copy over the partial digest */
		memcpy(csbcpb->cpb.aes_xcbc.cv,
		       csbcpb->cpb.aes_xcbc.out_cv_mac, AES_BLOCK_SIZE);
	}


	/* 2 cases for total data len:
	/* 2 cases for total data len:
	 *  1: <= AES_BLOCK_SIZE: copy into state, return 0
	 *  1: <= AES_BLOCK_SIZE: copy into state, return 0
	 *  2: > AES_BLOCK_SIZE: process X blocks, copy in leftover
	 *  2: > AES_BLOCK_SIZE: process X blocks, copy in leftover
	 */
	 */
	if (len + sctx->count <= AES_BLOCK_SIZE) {
	if (total <= AES_BLOCK_SIZE) {
		memcpy(sctx->buffer + sctx->count, data, len);
		memcpy(sctx->buffer + sctx->count, data, len);
		sctx->count += len;
		sctx->count += len;
		goto out;
		goto out;
	}
	}


	in_sg = nx_ctx->in_sg;
	max_sg_len = min_t(u32, nx_driver.of.max_sg_len/sizeof(struct nx_sg),
				nx_ctx->ap->sglen);

	do {

		/* to_process: the AES_BLOCK_SIZE data chunk to process in this
		/* to_process: the AES_BLOCK_SIZE data chunk to process in this
		 * update */
		 * update */
	to_process = (sctx->count + len) & ~(AES_BLOCK_SIZE - 1);
		to_process = min_t(u64, total, nx_ctx->ap->databytelen);
	leftover = (sctx->count + len) & (AES_BLOCK_SIZE - 1);
		to_process = min_t(u64, to_process,

					NX_PAGE_SIZE * (max_sg_len - 1));
	/* the hardware will not accept a 0 byte operation for this algorithm
		to_process = to_process & ~(AES_BLOCK_SIZE - 1);
	 * and the operation MUST be finalized to be correct. So if we happen
		leftover = total - to_process;
	 * to get an update that falls on a block sized boundary, we must

	 * save off the last block to finalize with later. */
		/* the hardware will not accept a 0 byte operation for this
		 * algorithm and the operation MUST be finalized to be correct.
		 * So if we happen to get an update that falls on a block sized
		 * boundary, we must save off the last block to finalize with
		 * later. */
		if (!leftover) {
		if (!leftover) {
			to_process -= AES_BLOCK_SIZE;
			to_process -= AES_BLOCK_SIZE;
			leftover = AES_BLOCK_SIZE;
			leftover = AES_BLOCK_SIZE;
		}
		}


		if (sctx->count) {
		if (sctx->count) {
		in_sg = nx_build_sg_list(nx_ctx->in_sg, sctx->buffer,
			in_sg = nx_build_sg_list(nx_ctx->in_sg,
					 sctx->count, nx_ctx->ap->sglen);
						(u8 *) sctx->buffer,
		in_sg = nx_build_sg_list(in_sg, (u8 *)data,
						sctx->count,
						max_sg_len);
		}
		in_sg = nx_build_sg_list(in_sg,
					(u8 *) data,
					to_process - sctx->count,
					to_process - sctx->count,
					 nx_ctx->ap->sglen);
					max_sg_len);
		nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) *
					sizeof(struct nx_sg);
	} else {
		in_sg = nx_build_sg_list(nx_ctx->in_sg, (u8 *)data, to_process,
					 nx_ctx->ap->sglen);
		nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) *
		nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) *
					sizeof(struct nx_sg);
					sizeof(struct nx_sg);

		/* we've hit the nx chip previously and we're updating again,
		 * so copy over the partial digest */
		if (NX_CPB_FDM(csbcpb) & NX_FDM_CONTINUATION) {
			memcpy(csbcpb->cpb.aes_xcbc.cv,
				csbcpb->cpb.aes_xcbc.out_cv_mac,
				AES_BLOCK_SIZE);
		}
		}


		NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE;
		NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE;

		if (!nx_ctx->op.inlen || !nx_ctx->op.outlen) {
		if (!nx_ctx->op.inlen || !nx_ctx->op.outlen) {
			rc = -EINVAL;
			rc = -EINVAL;
			goto out;
			goto out;
@@ -154,12 +166,19 @@ static int nx_xcbc_update(struct shash_desc *desc,


		atomic_inc(&(nx_ctx->stats->aes_ops));
		atomic_inc(&(nx_ctx->stats->aes_ops));


		/* everything after the first update is continuation */
		NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;

		total -= to_process;
		data += to_process - sctx->count;
		sctx->count = 0;
		in_sg = nx_ctx->in_sg;
	} while (leftover > AES_BLOCK_SIZE);

	/* copy the leftover back into the state struct */
	/* copy the leftover back into the state struct */
	memcpy(sctx->buffer, data + len - leftover, leftover);
	memcpy(sctx->buffer, data, leftover);
	sctx->count = leftover;
	sctx->count = leftover;


	/* everything after the first update is continuation */
	NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;
out:
out:
	spin_unlock_irqrestore(&nx_ctx->lock, irq_flags);
	spin_unlock_irqrestore(&nx_ctx->lock, irq_flags);
	return rc;
	return rc;