Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 95619be5 authored by Linus Torvalds's avatar Linus Torvalds
Browse files
* git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6:
  crypto: crypto4xx - Fix build breakage
  n2_crypto: Plumb fallback ahash requests properly.
  n2_crypto: Fix MAU kmem_cache name.
  n2_crypto: Fix build after of_device/of_platform_driver changes.
parents 03cd3739 0f0a8fa7
Loading
Loading
Loading
Loading
+65 −58
Original line number Diff line number Diff line
@@ -251,16 +251,10 @@ static void n2_base_ctx_init(struct n2_base_ctx *ctx)
struct n2_hash_ctx {
	struct n2_base_ctx		base;

	struct crypto_ahash		*fallback;
	struct crypto_ahash		*fallback_tfm;
};

	/* These next three members must match the layout created by
	 * crypto_init_shash_ops_async.  This allows us to properly
	 * plumb requests we can't do in hardware down to the fallback
	 * operation, providing all of the data structures and layouts
	 * expected by those paths.
	 */
	struct ahash_request		fallback_req;
	struct shash_desc		fallback_desc;
struct n2_hash_req_ctx {
	union {
		struct md5_state	md5;
		struct sha1_state	sha1;
@@ -269,56 +263,62 @@ struct n2_hash_ctx {

	unsigned char			hash_key[64];
	unsigned char			keyed_zero_hash[32];

	struct ahash_request		fallback_req;
};

static int n2_hash_async_init(struct ahash_request *req)
{
	struct n2_hash_req_ctx *rctx = ahash_request_ctx(req);
	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
	struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm);

	ctx->fallback_req.base.tfm = crypto_ahash_tfm(ctx->fallback);
	ctx->fallback_req.base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
	ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
	rctx->fallback_req.base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;

	return crypto_ahash_init(&ctx->fallback_req);
	return crypto_ahash_init(&rctx->fallback_req);
}

static int n2_hash_async_update(struct ahash_request *req)
{
	struct n2_hash_req_ctx *rctx = ahash_request_ctx(req);
	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
	struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm);

	ctx->fallback_req.base.tfm = crypto_ahash_tfm(ctx->fallback);
	ctx->fallback_req.base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
	ctx->fallback_req.nbytes = req->nbytes;
	ctx->fallback_req.src = req->src;
	ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
	rctx->fallback_req.base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
	rctx->fallback_req.nbytes = req->nbytes;
	rctx->fallback_req.src = req->src;

	return crypto_ahash_update(&ctx->fallback_req);
	return crypto_ahash_update(&rctx->fallback_req);
}

static int n2_hash_async_final(struct ahash_request *req)
{
	struct n2_hash_req_ctx *rctx = ahash_request_ctx(req);
	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
	struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm);

	ctx->fallback_req.base.tfm = crypto_ahash_tfm(ctx->fallback);
	ctx->fallback_req.base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
	ctx->fallback_req.result = req->result;
	ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
	rctx->fallback_req.base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
	rctx->fallback_req.result = req->result;

	return crypto_ahash_final(&ctx->fallback_req);
	return crypto_ahash_final(&rctx->fallback_req);
}

static int n2_hash_async_finup(struct ahash_request *req)
{
	struct n2_hash_req_ctx *rctx = ahash_request_ctx(req);
	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
	struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm);

	ctx->fallback_req.base.tfm = crypto_ahash_tfm(ctx->fallback);
	ctx->fallback_req.base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
	ctx->fallback_req.nbytes = req->nbytes;
	ctx->fallback_req.src = req->src;
	ctx->fallback_req.result = req->result;
	ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
	rctx->fallback_req.base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
	rctx->fallback_req.nbytes = req->nbytes;
	rctx->fallback_req.src = req->src;
	rctx->fallback_req.result = req->result;

	return crypto_ahash_finup(&ctx->fallback_req);
	return crypto_ahash_finup(&rctx->fallback_req);
}

static int n2_hash_cra_init(struct crypto_tfm *tfm)
@@ -338,7 +338,10 @@ static int n2_hash_cra_init(struct crypto_tfm *tfm)
		goto out;
	}

	ctx->fallback = fallback_tfm;
	crypto_ahash_set_reqsize(ahash, (sizeof(struct n2_hash_req_ctx) +
					 crypto_ahash_reqsize(fallback_tfm)));

	ctx->fallback_tfm = fallback_tfm;
	return 0;

out:
@@ -350,7 +353,7 @@ static void n2_hash_cra_exit(struct crypto_tfm *tfm)
	struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
	struct n2_hash_ctx *ctx = crypto_ahash_ctx(ahash);

	crypto_free_ahash(ctx->fallback);
	crypto_free_ahash(ctx->fallback_tfm);
}

static unsigned long wait_for_tail(struct spu_queue *qp)
@@ -399,14 +402,16 @@ static int n2_hash_async_digest(struct ahash_request *req,
	 * exceed 2^16.
	 */
	if (unlikely(req->nbytes > (1 << 16))) {
		ctx->fallback_req.base.tfm = crypto_ahash_tfm(ctx->fallback);
		ctx->fallback_req.base.flags =
		struct n2_hash_req_ctx *rctx = ahash_request_ctx(req);

		ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
		rctx->fallback_req.base.flags =
			req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
		ctx->fallback_req.nbytes = req->nbytes;
		ctx->fallback_req.src = req->src;
		ctx->fallback_req.result = req->result;
		rctx->fallback_req.nbytes = req->nbytes;
		rctx->fallback_req.src = req->src;
		rctx->fallback_req.result = req->result;

		return crypto_ahash_digest(&ctx->fallback_req);
		return crypto_ahash_digest(&rctx->fallback_req);
	}

	n2_base_ctx_init(&ctx->base);
@@ -472,9 +477,8 @@ static int n2_hash_async_digest(struct ahash_request *req,

static int n2_md5_async_digest(struct ahash_request *req)
{
	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
	struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm);
	struct md5_state *m = &ctx->u.md5;
	struct n2_hash_req_ctx *rctx = ahash_request_ctx(req);
	struct md5_state *m = &rctx->u.md5;

	if (unlikely(req->nbytes == 0)) {
		static const char md5_zero[MD5_DIGEST_SIZE] = {
@@ -497,9 +501,8 @@ static int n2_md5_async_digest(struct ahash_request *req)

static int n2_sha1_async_digest(struct ahash_request *req)
{
	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
	struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm);
	struct sha1_state *s = &ctx->u.sha1;
	struct n2_hash_req_ctx *rctx = ahash_request_ctx(req);
	struct sha1_state *s = &rctx->u.sha1;

	if (unlikely(req->nbytes == 0)) {
		static const char sha1_zero[SHA1_DIGEST_SIZE] = {
@@ -524,9 +527,8 @@ static int n2_sha1_async_digest(struct ahash_request *req)

static int n2_sha256_async_digest(struct ahash_request *req)
{
	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
	struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm);
	struct sha256_state *s = &ctx->u.sha256;
	struct n2_hash_req_ctx *rctx = ahash_request_ctx(req);
	struct sha256_state *s = &rctx->u.sha256;

	if (req->nbytes == 0) {
		static const char sha256_zero[SHA256_DIGEST_SIZE] = {
@@ -555,9 +557,8 @@ static int n2_sha256_async_digest(struct ahash_request *req)

static int n2_sha224_async_digest(struct ahash_request *req)
{
	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
	struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm);
	struct sha256_state *s = &ctx->u.sha256;
	struct n2_hash_req_ctx *rctx = ahash_request_ctx(req);
	struct sha256_state *s = &rctx->u.sha256;

	if (req->nbytes == 0) {
		static const char sha224_zero[SHA224_DIGEST_SIZE] = {
@@ -1398,7 +1399,7 @@ static int find_devino_index(struct of_device *dev, struct spu_mdesc_info *ip,

	intr = ip->ino_table[i].intr;

	dev_intrs = of_get_property(dev->node, "interrupts", NULL);
	dev_intrs = of_get_property(dev->dev.of_node, "interrupts", NULL);
	if (!dev_intrs)
		return -ENODEV;

@@ -1449,7 +1450,7 @@ static int queue_cache_init(void)
{
	if (!queue_cache[HV_NCS_QTYPE_MAU - 1])
		queue_cache[HV_NCS_QTYPE_MAU - 1] =
			kmem_cache_create("cwq_queue",
			kmem_cache_create("mau_queue",
					  (MAU_NUM_ENTRIES *
					   MAU_ENTRY_SIZE),
					  MAU_ENTRY_SIZE, 0, NULL);
@@ -1574,7 +1575,7 @@ static int spu_mdesc_walk_arcs(struct mdesc_handle *mdesc,
		id = mdesc_get_property(mdesc, tgt, "id", NULL);
		if (table[*id] != NULL) {
			dev_err(&dev->dev, "%s: SPU cpu slot already set.\n",
				dev->node->full_name);
				dev->dev.of_node->full_name);
			return -EINVAL;
		}
		cpu_set(*id, p->sharing);
@@ -1595,7 +1596,7 @@ static int handle_exec_unit(struct spu_mdesc_info *ip, struct list_head *list,
	p = kzalloc(sizeof(struct spu_queue), GFP_KERNEL);
	if (!p) {
		dev_err(&dev->dev, "%s: Could not allocate SPU queue.\n",
			dev->node->full_name);
			dev->dev.of_node->full_name);
		return -ENOMEM;
	}

@@ -1684,7 +1685,7 @@ static int __devinit grab_mdesc_irq_props(struct mdesc_handle *mdesc,
	const unsigned int *reg;
	u64 node;

	reg = of_get_property(dev->node, "reg", NULL);
	reg = of_get_property(dev->dev.of_node, "reg", NULL);
	if (!reg)
		return -ENODEV;

@@ -1836,7 +1837,7 @@ static int __devinit n2_crypto_probe(struct of_device *dev,

	n2_spu_driver_version();

	full_name = dev->node->full_name;
	full_name = dev->dev.of_node->full_name;
	pr_info("Found N2CP at %s\n", full_name);

	np = alloc_n2cp();
@@ -1948,7 +1949,7 @@ static int __devinit n2_mau_probe(struct of_device *dev,

	n2_spu_driver_version();

	full_name = dev->node->full_name;
	full_name = dev->dev.of_node->full_name;
	pr_info("Found NCP at %s\n", full_name);

	mp = alloc_ncp();
@@ -2034,8 +2035,11 @@ static struct of_device_id n2_crypto_match[] = {
MODULE_DEVICE_TABLE(of, n2_crypto_match);

static struct of_platform_driver n2_crypto_driver = {
	.driver = {
		.name		=	"n2cp",
	.match_table	=	n2_crypto_match,
		.owner		=	THIS_MODULE,
		.of_match_table	=	n2_crypto_match,
	},
	.probe		=	n2_crypto_probe,
	.remove		=	__devexit_p(n2_crypto_remove),
};
@@ -2055,8 +2059,11 @@ static struct of_device_id n2_mau_match[] = {
MODULE_DEVICE_TABLE(of, n2_mau_match);

static struct of_platform_driver n2_mau_driver = {
	.driver = {
		.name		=	"ncp",
	.match_table	=	n2_mau_match,
		.owner		=	THIS_MODULE,
		.of_match_table	=	n2_mau_match,
	},
	.probe		=	n2_mau_probe,
	.remove		=	__devexit_p(n2_mau_remove),
};