Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit ae5437cb authored by Linux Build Service Account's avatar Linux Build Service Account Committed by Gerrit - the friendly Code Review server
Browse files

Merge "md: dm-req-crypt: Fix compilation errors"

parents db4c6364 ddc48128
Loading
Loading
Loading
Loading
+2 −2
Original line number Diff line number Diff line
@@ -4289,7 +4289,7 @@ int qcrypto_cipher_set_device(struct ablkcipher_request *req, unsigned int dev)
};
EXPORT_SYMBOL(qcrypto_cipher_set_device);

int qcrypto_cipher_set_device_hw(struct ablkcipher_request *req, u32 dev,
int qcrypto_cipher_set_device_hw(struct skcipher_request *req, u32 dev,
			u32 hw_inst)
{
	struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
@@ -4335,7 +4335,7 @@ int qcrypto_ahash_set_device(struct ahash_request *req, unsigned int dev)
};
EXPORT_SYMBOL(qcrypto_ahash_set_device);

int qcrypto_cipher_set_flag(struct ablkcipher_request *req, unsigned int flags)
int qcrypto_cipher_set_flag(struct skcipher_request *req, unsigned int flags)
{
	struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
	struct crypto_priv *cp = ctx->cp;
+48 −46
Original line number Diff line number Diff line
@@ -33,6 +33,8 @@

#include <asm/page.h>
#include <asm/unaligned.h>
#include <crypto/skcipher.h>
#include <crypto/internal/skcipher.h>
#include <crypto/scatterwalk.h>
#include <crypto/hash.h>
#include <crypto/md5.h>
@@ -88,7 +90,7 @@ static mempool_t *req_io_pool;
static mempool_t *req_page_pool;
static mempool_t *req_scatterlist_pool;
static bool is_fde_enabled;
static struct crypto_ablkcipher *tfm;
static struct crypto_skcipher *tfm;
static unsigned int encryption_mode;
static struct ice_crypto_setting *ice_settings;

@@ -323,7 +325,7 @@ static void req_cryptd_crypt_read_convert(struct req_dm_crypt_io *io)
		DMERR("%s req_sg_read allocation failed\n",
						__func__);
		error = DM_REQ_CRYPT_ERROR;
		goto ablkcipher_req_alloc_failure;
		goto skcipher_req_alloc_failure;
	}
	memset(req_sg_read, 0, sizeof(struct scatterlist) * MAX_SG_LIST);

@@ -331,7 +333,7 @@ static void req_cryptd_crypt_read_convert(struct req_dm_crypt_io *io)
	if ((total_sg_len <= 0) || (total_sg_len > MAX_SG_LIST)) {
		DMERR("%s Request Error%d", __func__, total_sg_len);
		error = DM_REQ_CRYPT_ERROR;
		goto ablkcipher_req_alloc_failure;
		goto skcipher_req_alloc_failure;
	}

	total_bytes_in_req = clone->__data_len;
@@ -339,7 +341,7 @@ static void req_cryptd_crypt_read_convert(struct req_dm_crypt_io *io)
		DMERR("%s total_bytes_in_req > 512 MB %d",
				__func__, total_bytes_in_req);
		error = DM_REQ_CRYPT_ERROR;
		goto ablkcipher_req_alloc_failure;
		goto skcipher_req_alloc_failure;
	}


@@ -354,7 +356,7 @@ static void req_cryptd_crypt_read_convert(struct req_dm_crypt_io *io)
		if (!split_io) {
			DMERR("%s split_io allocation failed\n", __func__);
			error = DM_REQ_CRYPT_ERROR;
			goto ablkcipher_req_alloc_failure;
			goto skcipher_req_alloc_failure;
		}

		split_io[0].req_split_sg_read = sg = req_sg_read;
@@ -389,7 +391,7 @@ static void req_cryptd_crypt_read_convert(struct req_dm_crypt_io *io)
		if (!split_io) {
			DMERR("%s split_io allocation failed\n", __func__);
			error = DM_REQ_CRYPT_ERROR;
			goto ablkcipher_req_alloc_failure;
			goto skcipher_req_alloc_failure;
		}
		split_io->engine = &curr_engine_list[0];
		init_completion(&split_io->result.completion);
@@ -406,7 +408,7 @@ static void req_cryptd_crypt_read_convert(struct req_dm_crypt_io *io)
			DMERR("%s error = %d for request\n",
				 __func__, split_io->result.err);
			error = DM_REQ_CRYPT_ERROR;
			goto ablkcipher_req_alloc_failure;
			goto skcipher_req_alloc_failure;
		}
	} else {
		for (i = 0; i < (engine_list_total); i++) {
@@ -416,12 +418,12 @@ static void req_cryptd_crypt_read_convert(struct req_dm_crypt_io *io)
				DMERR("%s error = %d for %dst request\n",
					 __func__, split_io[i].result.err, i);
				error = DM_REQ_CRYPT_ERROR;
				goto ablkcipher_req_alloc_failure;
				goto skcipher_req_alloc_failure;
			}
		}
	}
	error = 0;
ablkcipher_req_alloc_failure:
skcipher_req_alloc_failure:

	mempool_free(req_sg_read, req_scatterlist_pool);
	kfree(split_io);
@@ -463,7 +465,7 @@ static void req_cryptd_crypt_write_convert(struct req_dm_crypt_io *io)
		total_bytes_in_req = 0, error = DM_MAPIO_REMAPPED, rc = 0;
	struct req_iterator iter;
	struct req_iterator iter1;
	struct ablkcipher_request *req = NULL;
	struct skcipher_request *req = NULL;
	struct req_crypt_result result;
	struct bio_vec bvec;
	struct scatterlist *req_sg_in = NULL;
@@ -497,15 +499,15 @@ static void req_cryptd_crypt_write_convert(struct req_dm_crypt_io *io)

	req_crypt_inc_pending(io);

	req = ablkcipher_request_alloc(tfm, GFP_KERNEL);
	req = skcipher_request_alloc(tfm, GFP_KERNEL);
	if (!req) {
		DMERR("%s ablkcipher request allocation failed\n",
		DMERR("%s skcipher request allocation failed\n",
					__func__);
		error = DM_REQ_CRYPT_ERROR;
		goto ablkcipher_req_alloc_failure;
		goto skcipher_req_alloc_failure;
	}

	ablkcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
	skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
				req_crypt_cipher_complete, &result);

	mutex_lock(&engine_list_mutex);
@@ -525,7 +527,7 @@ static void req_cryptd_crypt_write_convert(struct req_dm_crypt_io *io)
		DMERR("%s Unknown Key ID!\n", __func__);
		error = DM_REQ_CRYPT_ERROR;
		mutex_unlock(&engine_list_mutex);
		goto ablkcipher_req_alloc_failure;
		goto skcipher_req_alloc_failure;
	}

	engine = curr_engine_list[*engine_cursor];
@@ -538,7 +540,7 @@ static void req_cryptd_crypt_write_convert(struct req_dm_crypt_io *io)
		DMERR("%s qcrypto_cipher_set_device_hw failed with err %d\n",
				__func__, err);
		mutex_unlock(&engine_list_mutex);
		goto ablkcipher_req_alloc_failure;
		goto skcipher_req_alloc_failure;
	}
	mutex_unlock(&engine_list_mutex);

@@ -546,8 +548,8 @@ static void req_cryptd_crypt_write_convert(struct req_dm_crypt_io *io)

	(dm_qcrypto_func.cipher_flag)(req,
		QCRYPTO_CTX_USE_PIPE_KEY | QCRYPTO_CTX_XTS_DU_SIZE_512B);
	crypto_ablkcipher_clear_flags(tfm, ~0);
	crypto_ablkcipher_setkey(tfm, NULL, KEY_SIZE_XTS);
	crypto_skcipher_clear_flags(tfm, ~0);
	crypto_skcipher_setkey(tfm, NULL, KEY_SIZE_XTS);

	req_sg_in = (struct scatterlist *)mempool_alloc(req_scatterlist_pool,
								GFP_KERNEL);
@@ -555,7 +557,7 @@ static void req_cryptd_crypt_write_convert(struct req_dm_crypt_io *io)
		DMERR("%s req_sg_in allocation failed\n",
					__func__);
		error = DM_REQ_CRYPT_ERROR;
		goto ablkcipher_req_alloc_failure;
		goto skcipher_req_alloc_failure;
	}
	memset(req_sg_in, 0, sizeof(struct scatterlist) * MAX_SG_LIST);

@@ -565,7 +567,7 @@ static void req_cryptd_crypt_write_convert(struct req_dm_crypt_io *io)
		DMERR("%s req_sg_out allocation failed\n",
					__func__);
		error = DM_REQ_CRYPT_ERROR;
		goto ablkcipher_req_alloc_failure;
		goto skcipher_req_alloc_failure;
	}
	memset(req_sg_out, 0, sizeof(struct scatterlist) * MAX_SG_LIST);

@@ -574,7 +576,7 @@ static void req_cryptd_crypt_write_convert(struct req_dm_crypt_io *io)
			(total_sg_len_req_in > MAX_SG_LIST)) {
		DMERR("%s Request Error%d", __func__, total_sg_len_req_in);
		error = DM_REQ_CRYPT_ERROR;
		goto ablkcipher_req_alloc_failure;
		goto skcipher_req_alloc_failure;
	}

	total_bytes_in_req = clone->__data_len;
@@ -582,7 +584,7 @@ static void req_cryptd_crypt_write_convert(struct req_dm_crypt_io *io)
		DMERR("%s total_bytes_in_req > 512 MB %d",
				__func__, total_bytes_in_req);
		error = DM_REQ_CRYPT_ERROR;
		goto ablkcipher_req_alloc_failure;
		goto skcipher_req_alloc_failure;
	}

	rq_for_each_segment(bvec, clone, iter) {
@@ -614,16 +616,16 @@ static void req_cryptd_crypt_write_convert(struct req_dm_crypt_io *io)
			(total_sg_len_req_out > MAX_SG_LIST)) {
		DMERR("%s Request Error %d", __func__, total_sg_len_req_out);
		error = DM_REQ_CRYPT_ERROR_AFTER_PAGE_MALLOC;
		goto ablkcipher_req_alloc_failure;
		goto skcipher_req_alloc_failure;
	}

	memset(IV, 0, AES_XTS_IV_LEN);
	memcpy(IV, &clone->__sector, sizeof(sector_t));

	ablkcipher_request_set_crypt(req, req_sg_in, req_sg_out,
	skcipher_request_set_crypt(req, req_sg_in, req_sg_out,
			total_bytes_in_req, (void *) IV);

	rc = crypto_ablkcipher_encrypt(req);
	rc = crypto_skcipher_encrypt(req);

	switch (rc) {
	case 0:
@@ -640,13 +642,13 @@ static void req_cryptd_crypt_write_convert(struct req_dm_crypt_io *io)
			DMERR("%s error = %d encrypting the request\n",
				 __func__, result.err);
			error = DM_REQ_CRYPT_ERROR_AFTER_PAGE_MALLOC;
			goto ablkcipher_req_alloc_failure;
			goto skcipher_req_alloc_failure;
		}
		break;

	default:
		error = DM_REQ_CRYPT_ERROR_AFTER_PAGE_MALLOC;
		goto ablkcipher_req_alloc_failure;
		goto skcipher_req_alloc_failure;
	}

	__rq_for_each_bio(bio_src, clone) {
@@ -661,9 +663,9 @@ static void req_cryptd_crypt_write_convert(struct req_dm_crypt_io *io)
	 */
	blk_recalc_rq_segments(clone);

ablkcipher_req_alloc_failure:
skcipher_req_alloc_failure:
	if (req)
		ablkcipher_request_free(req);
		skcipher_request_free(req);

	if (error == DM_REQ_CRYPT_ERROR_AFTER_PAGE_MALLOC) {
		rq_for_each_segment(bvec, clone, iter1) {
@@ -727,7 +729,7 @@ static void req_cryptd_split_req_queue_cb(struct work_struct *work)
{
	struct req_dm_split_req_io *io =
			container_of(work, struct req_dm_split_req_io, work);
	struct ablkcipher_request *req = NULL;
	struct skcipher_request *req = NULL;
	struct req_crypt_result result;
	int err = 0;
	struct crypto_engine_entry *engine = NULL;
@@ -739,14 +741,14 @@ static void req_cryptd_split_req_queue_cb(struct work_struct *work)
		/* If io is not populated this should not be called */
		WARN_ON(1);
	}
	req = ablkcipher_request_alloc(tfm, GFP_KERNEL);
	req = skcipher_request_alloc(tfm, GFP_KERNEL);
	if (!req) {
		DMERR("%s ablkcipher request allocation failed\n", __func__);
		DMERR("%s skcipher request allocation failed\n", __func__);
		err = DM_REQ_CRYPT_ERROR;
		goto ablkcipher_req_alloc_failure;
		goto skcipher_req_alloc_failure;
	}

	ablkcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
	skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
					req_crypt_cipher_complete, &result);

	engine = io->engine;
@@ -756,19 +758,19 @@ static void req_cryptd_split_req_queue_cb(struct work_struct *work)
	if (err) {
		DMERR("%s qcrypto_cipher_set_device_hw failed with err %d\n",
				__func__, err);
		goto ablkcipher_req_alloc_failure;
		goto skcipher_req_alloc_failure;
	}
	init_completion(&result.completion);
	(dm_qcrypto_func.cipher_flag)(req,
		QCRYPTO_CTX_USE_PIPE_KEY | QCRYPTO_CTX_XTS_DU_SIZE_512B);

	crypto_ablkcipher_clear_flags(tfm, ~0);
	crypto_ablkcipher_setkey(tfm, NULL, KEY_SIZE_XTS);
	crypto_skcipher_clear_flags(tfm, ~0);
	crypto_skcipher_setkey(tfm, NULL, KEY_SIZE_XTS);

	ablkcipher_request_set_crypt(req, io->req_split_sg_read,
	skcipher_request_set_crypt(req, io->req_split_sg_read,
			io->req_split_sg_read, io->size, (void *) io->IV);

	err = crypto_ablkcipher_decrypt(req);
	err = crypto_skcipher_decrypt(req);
	switch (err) {
	case 0:
		break;
@@ -784,18 +786,18 @@ static void req_cryptd_split_req_queue_cb(struct work_struct *work)
			DMERR("%s error = %d encrypting the request\n",
				 __func__, result.err);
			err = DM_REQ_CRYPT_ERROR;
			goto ablkcipher_req_alloc_failure;
			goto skcipher_req_alloc_failure;
		}
		break;

	default:
		err = DM_REQ_CRYPT_ERROR;
		goto ablkcipher_req_alloc_failure;
		goto skcipher_req_alloc_failure;
	}
	err = 0;
ablkcipher_req_alloc_failure:
skcipher_req_alloc_failure:
	if (req)
		ablkcipher_request_free(req);
		skcipher_request_free(req);

	req_crypt_split_io_complete(&io->result, err);
}
@@ -1029,7 +1031,7 @@ static void deconfigure_qcrypto(void)
	mutex_unlock(&engine_list_mutex);

	if (tfm) {
		crypto_free_ablkcipher(tfm);
		crypto_free_skcipher(tfm);
		tfm = NULL;
	}
}
@@ -1068,9 +1070,9 @@ static int configure_qcrypto(void)
	blk_queue_max_hw_sectors(q, DM_REQ_CRYPT_QUEUE_SIZE);

	/* Allocate the crypto alloc blk cipher and keep the handle */
	tfm = crypto_alloc_ablkcipher("qcom-xts(aes)", 0, 0);
	tfm = crypto_alloc_skcipher("qcom-xts(aes)", 0, 0);
	if (IS_ERR(tfm)) {
		DMERR("%s ablkcipher tfm allocation failed : error\n",
		DMERR("%s skcipher tfm allocation failed : error\n",
						 __func__);
		tfm = NULL;
		goto exit_err;
+5 −4
Original line number Diff line number Diff line
@@ -15,6 +15,7 @@

#include <linux/crypto.h>
#include <crypto/hash.h>
#include <crypto/skcipher.h>

#define QCRYPTO_CTX_KEY_MASK		0x000000ff
#define QCRYPTO_CTX_USE_HW_KEY		0x00000001
@@ -29,7 +30,7 @@ int qcrypto_cipher_set_device(struct ablkcipher_request *req, unsigned int dev);
int qcrypto_ahash_set_device(struct ahash_request *req, unsigned int dev);
/*int qcrypto_aead_set_device(struct aead_request *req, unsigned int dev);*/

int qcrypto_cipher_set_flag(struct ablkcipher_request *req, unsigned int flags);
int qcrypto_cipher_set_flag(struct skcipher_request *req, unsigned int flags);
int qcrypto_ahash_set_flag(struct ahash_request *req, unsigned int flags);
/*int qcrypto_aead_set_flag(struct aead_request *req, unsigned int flags);*/

@@ -47,16 +48,16 @@ struct crypto_engine_entry {
int qcrypto_get_num_engines(void);
void qcrypto_get_engine_list(size_t num_engines,
				struct crypto_engine_entry *arr);
int qcrypto_cipher_set_device_hw(struct ablkcipher_request *req,
int qcrypto_cipher_set_device_hw(struct skcipher_request *req,
				unsigned int fde_pfe,
				unsigned int hw_inst);


struct qcrypto_func_set {
	int (*cipher_set)(struct ablkcipher_request *req,
	int (*cipher_set)(struct skcipher_request *req,
			unsigned int fde_pfe,
			unsigned int hw_inst);
	int (*cipher_flag)(struct ablkcipher_request *req, unsigned int flags);
	int (*cipher_flag)(struct skcipher_request *req, unsigned int flags);
	int (*get_num_engines)(void);
	void (*get_engine_list)(size_t num_engines,
				struct crypto_engine_entry *arr);