Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 65fe6740 authored by Herbert Xu's avatar Herbert Xu
Browse files

crypto: chainiv - Move IV seeding into init function



We currently do the IV seeding on the first givencrypt call in
order to conserve entropy.  However, this does not work with
DRBG which cannot be called from interrupt context.  In fact,
with DRBG we don't need to conserve entropy anyway.  So this
patch moves the seeding into the init function.

Signed-off-by: default avatarHerbert Xu <herbert@gondor.apana.org.au>
parent 3154de71
Loading
Loading
Loading
Loading
+9 −57
Original line number Original line Diff line number Diff line
@@ -80,35 +80,15 @@ static int chainiv_givencrypt(struct skcipher_givcrypt_request *req)
	return err;
	return err;
}
}


static int chainiv_givencrypt_first(struct skcipher_givcrypt_request *req)
static int chainiv_init_common(struct crypto_tfm *tfm, char iv[])
{
{
	struct crypto_ablkcipher *geniv = skcipher_givcrypt_reqtfm(req);
	struct crypto_ablkcipher *geniv = __crypto_ablkcipher_cast(tfm);
	struct chainiv_ctx *ctx = crypto_ablkcipher_ctx(geniv);
	int err = 0;

	spin_lock_bh(&ctx->lock);
	if (crypto_ablkcipher_crt(geniv)->givencrypt !=
	    chainiv_givencrypt_first)
		goto unlock;

	crypto_ablkcipher_crt(geniv)->givencrypt = chainiv_givencrypt;
	err = crypto_rng_get_bytes(crypto_default_rng, ctx->iv,
				   crypto_ablkcipher_ivsize(geniv));

unlock:
	spin_unlock_bh(&ctx->lock);


	if (err)
		return err;

	return chainiv_givencrypt(req);
}

static int chainiv_init_common(struct crypto_tfm *tfm)
{
	tfm->crt_ablkcipher.reqsize = sizeof(struct ablkcipher_request);
	tfm->crt_ablkcipher.reqsize = sizeof(struct ablkcipher_request);


	return skcipher_geniv_init(tfm);
	return crypto_rng_get_bytes(crypto_default_rng, iv,
				    crypto_ablkcipher_ivsize(geniv)) ?:
	       skcipher_geniv_init(tfm);
}
}


static int chainiv_init(struct crypto_tfm *tfm)
static int chainiv_init(struct crypto_tfm *tfm)
@@ -117,7 +97,7 @@ static int chainiv_init(struct crypto_tfm *tfm)


	spin_lock_init(&ctx->lock);
	spin_lock_init(&ctx->lock);


	return chainiv_init_common(tfm);
	return chainiv_init_common(tfm, ctx->iv);
}
}


static int async_chainiv_schedule_work(struct async_chainiv_ctx *ctx)
static int async_chainiv_schedule_work(struct async_chainiv_ctx *ctx)
@@ -205,33 +185,6 @@ static int async_chainiv_givencrypt(struct skcipher_givcrypt_request *req)
	return async_chainiv_postpone_request(req);
	return async_chainiv_postpone_request(req);
}
}


static int async_chainiv_givencrypt_first(struct skcipher_givcrypt_request *req)
{
	struct crypto_ablkcipher *geniv = skcipher_givcrypt_reqtfm(req);
	struct async_chainiv_ctx *ctx = crypto_ablkcipher_ctx(geniv);
	int err = 0;

	if (test_and_set_bit(CHAINIV_STATE_INUSE, &ctx->state))
		goto out;

	if (crypto_ablkcipher_crt(geniv)->givencrypt !=
	    async_chainiv_givencrypt_first)
		goto unlock;

	crypto_ablkcipher_crt(geniv)->givencrypt = async_chainiv_givencrypt;
	err = crypto_rng_get_bytes(crypto_default_rng, ctx->iv,
				   crypto_ablkcipher_ivsize(geniv));

unlock:
	clear_bit(CHAINIV_STATE_INUSE, &ctx->state);

	if (err)
		return err;

out:
	return async_chainiv_givencrypt(req);
}

static void async_chainiv_do_postponed(struct work_struct *work)
static void async_chainiv_do_postponed(struct work_struct *work)
{
{
	struct async_chainiv_ctx *ctx = container_of(work,
	struct async_chainiv_ctx *ctx = container_of(work,
@@ -270,7 +223,7 @@ static int async_chainiv_init(struct crypto_tfm *tfm)
	crypto_init_queue(&ctx->queue, 100);
	crypto_init_queue(&ctx->queue, 100);
	INIT_WORK(&ctx->postponed, async_chainiv_do_postponed);
	INIT_WORK(&ctx->postponed, async_chainiv_do_postponed);


	return chainiv_init_common(tfm);
	return chainiv_init_common(tfm, ctx->iv);
}
}


static void async_chainiv_exit(struct crypto_tfm *tfm)
static void async_chainiv_exit(struct crypto_tfm *tfm)
@@ -302,7 +255,7 @@ static struct crypto_instance *chainiv_alloc(struct rtattr **tb)
	if (IS_ERR(inst))
	if (IS_ERR(inst))
		goto put_rng;
		goto put_rng;


	inst->alg.cra_ablkcipher.givencrypt = chainiv_givencrypt_first;
	inst->alg.cra_ablkcipher.givencrypt = chainiv_givencrypt;


	inst->alg.cra_init = chainiv_init;
	inst->alg.cra_init = chainiv_init;
	inst->alg.cra_exit = skcipher_geniv_exit;
	inst->alg.cra_exit = skcipher_geniv_exit;
@@ -312,8 +265,7 @@ static struct crypto_instance *chainiv_alloc(struct rtattr **tb)
	if (!crypto_requires_sync(algt->type, algt->mask)) {
	if (!crypto_requires_sync(algt->type, algt->mask)) {
		inst->alg.cra_flags |= CRYPTO_ALG_ASYNC;
		inst->alg.cra_flags |= CRYPTO_ALG_ASYNC;


		inst->alg.cra_ablkcipher.givencrypt =
		inst->alg.cra_ablkcipher.givencrypt = async_chainiv_givencrypt;
			async_chainiv_givencrypt_first;


		inst->alg.cra_init = async_chainiv_init;
		inst->alg.cra_init = async_chainiv_init;
		inst->alg.cra_exit = async_chainiv_exit;
		inst->alg.cra_exit = async_chainiv_exit;