Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit c692cb96 authored by Neeraj Soni's avatar Neeraj Soni Committed by Veerabhadrarao Badiganti
Browse files

Resolve merge conflict and enable HW FBE for ext4 fs



HW File Based Encryption (FBE) uses Crypto Engine to
encrypt the user data with unique key for each file.
File name and data both are encrypted with this feature.
 - security/pfk: New module to support per file
   encryption using CE.
 - fs/ext4: changes made to support using crypto engine
   to encyrpt the data.
Other changes made to provide support framework for per
file encryption.

Change-Id: I82b05a73b10ad8c26b0e400cdf246c67a8060f0e
Signed-off-by: default avatarNeeraj Soni <neersoni@codeaurora.org>
parent 897e986e
Loading
Loading
Loading
Loading
+1 −1
Original line number Diff line number Diff line
@@ -589,7 +589,7 @@ void __bio_clone_fast(struct bio *bio, struct bio *bio_src)
	bio->bi_opf = bio_src->bi_opf;
	bio->bi_iter = bio_src->bi_iter;
	bio->bi_io_vec = bio_src->bi_io_vec;

	bio->bi_dio_inode = bio_src->bi_dio_inode;
	bio_clone_blkcg_association(bio, bio_src);
}
EXPORT_SYMBOL(__bio_clone_fast);
+10 −1
Original line number Diff line number Diff line
@@ -6,7 +6,7 @@
#include <linux/bio.h>
#include <linux/blkdev.h>
#include <linux/scatterlist.h>

#include <linux/pfk.h>
#include <trace/events/block.h>

#include "blk.h"
@@ -725,6 +725,11 @@ static void blk_account_io_merge(struct request *req)
	}
}

static bool crypto_not_mergeable(const struct bio *bio, const struct bio *nxt)
{
	return (!pfk_allow_merge_bio(bio, nxt));
}

/*
 * Has to be called with the request spinlock acquired
 */
@@ -752,6 +757,8 @@ static int attempt_merge(struct request_queue *q, struct request *req,
	    !blk_write_same_mergeable(req->bio, next->bio))
		return 0;

	if (crypto_not_mergeable(req->bio, next->bio))
		return 0;
	/*
	 * If we are allowed to merge, then append bio list
	 * from next to rq and release next. merge_requests_fn
@@ -862,6 +869,8 @@ bool blk_rq_merge_ok(struct request *rq, struct bio *bio)
	    !blk_write_same_mergeable(rq->bio, bio))
		return false;

	if (crypto_not_mergeable(rq->bio, bio))
		return false;
	return true;
}

+6 −2
Original line number Diff line number Diff line
@@ -152,6 +152,9 @@ static int qti_ice_setting_config(struct request *req,
		return -EPERM;
	}

	if (!setting)
		return -EINVAL;

	if ((short)(crypto_data->key_index) >= 0) {

		memcpy(&setting->crypto_data, crypto_data,
@@ -1488,7 +1491,7 @@ static int qcom_ice_config_start(struct platform_device *pdev,
	bool is_pfe = false;
	sector_t data_size;

	if (!pdev || !req || !setting) {
	if (!pdev || !req) {
		pr_err("%s: Invalid params passed\n", __func__);
		return -EINVAL;
	}
@@ -1507,6 +1510,7 @@ static int qcom_ice_config_start(struct platform_device *pdev,
		/* It is not an error to have a request with no  bio */
		return 0;
	}
    //pr_err("%s bio is %pK\n", __func__, req->bio);

	ret = pfk_load_key_start(req->bio, &pfk_crypto_data, &is_pfe, async);
	if (is_pfe) {
@@ -1664,7 +1668,7 @@ static struct ice_device *get_ice_device_from_storage_type

	list_for_each_entry(ice_dev, &ice_devices, list) {
		if (!strcmp(ice_dev->ice_instance_type, storage_type)) {
			pr_info("%s: found ice device %p\n", __func__, ice_dev);
			pr_debug("%s: ice device %pK\n", __func__, ice_dev);
			return ice_dev;
		}
	}
+12 −32
Original line number Diff line number Diff line
/*
 * Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
 * Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 and
@@ -172,17 +172,15 @@ int ufs_qcom_ice_get_dev(struct ufs_qcom_host *qcom_host)
static void ufs_qcom_ice_cfg_work(struct work_struct *work)
{
	unsigned long flags;
	struct ice_data_setting ice_set;
	struct ufs_qcom_host *qcom_host =
		container_of(work, struct ufs_qcom_host, ice_cfg_work);
	struct request *req_pending = NULL;

	if (!qcom_host->ice.vops->config_start)
		return;

	spin_lock_irqsave(&qcom_host->ice_work_lock, flags);
	req_pending = qcom_host->req_pending;
	if (!req_pending) {
	if (!qcom_host->req_pending) {
		qcom_host->work_pending = false;
		spin_unlock_irqrestore(&qcom_host->ice_work_lock, flags);
		return;
	}
@@ -191,24 +189,15 @@ static void ufs_qcom_ice_cfg_work(struct work_struct *work)
	/*
	 * config_start is called again as previous attempt returned -EAGAIN,
	 * this call shall now take care of the necessary key setup.
	 * 'ice_set' will not actually be used, instead the next call to
	 * config_start() for this request, in the normal call flow, will
	 * succeed as the key has now been setup.
	 */
	qcom_host->ice.vops->config_start(qcom_host->ice.pdev,
		qcom_host->req_pending, &ice_set, false);
		qcom_host->req_pending, NULL, false);

	spin_lock_irqsave(&qcom_host->ice_work_lock, flags);
	qcom_host->req_pending = NULL;
	qcom_host->work_pending = false;
	spin_unlock_irqrestore(&qcom_host->ice_work_lock, flags);

	/*
	 * Resume with requests processing. We assume config_start has been
	 * successful, but even if it wasn't we still must resume in order to
	 * allow for the request to be retried.
	 */
	ufshcd_scsi_unblock_requests(qcom_host->hba);

}

/**
@@ -294,18 +283,14 @@ int ufs_qcom_ice_req_setup(struct ufs_qcom_host *qcom_host,
			 * requires a non-atomic context, this means we should
			 * call the function again from the worker thread to do
			 * the configuration. For this request the error will
			 * propagate so it will be re-queued and until the
			 * configuration is is completed we block further
			 * request processing.
			 * propagate so it will be re-queued.
			 */
			if (err == -EAGAIN) {
				dev_dbg(qcom_host->hba->dev,
					"%s: scheduling task for ice setup\n",
					__func__);

				if (!qcom_host->req_pending) {
					ufshcd_scsi_block_requests(
						qcom_host->hba);
				if (!qcom_host->work_pending) {
					qcom_host->req_pending = cmd->request;

					if (!queue_work(ice_workqueue,
@@ -316,10 +301,9 @@ int ufs_qcom_ice_req_setup(struct ufs_qcom_host *qcom_host,
						&qcom_host->ice_work_lock,
						flags);

						ufshcd_scsi_unblock_requests(
							qcom_host->hba);
						return err;
					}
					qcom_host->work_pending = true;
				}

			} else {
@@ -418,9 +402,7 @@ int ufs_qcom_ice_cfg_start(struct ufs_qcom_host *qcom_host,
			 * requires a non-atomic context, this means we should
			 * call the function again from the worker thread to do
			 * the configuration. For this request the error will
			 * propagate so it will be re-queued and until the
			 * configuration is is completed we block further
			 * request processing.
			 * propagate so it will be re-queued.
			 */
			if (err == -EAGAIN) {

@@ -428,9 +410,8 @@ int ufs_qcom_ice_cfg_start(struct ufs_qcom_host *qcom_host,
					"%s: scheduling task for ice setup\n",
					__func__);

				if (!qcom_host->req_pending) {
					ufshcd_scsi_block_requests(
						qcom_host->hba);
				if (!qcom_host->work_pending) {

					qcom_host->req_pending = cmd->request;
					if (!queue_work(ice_workqueue,
						&qcom_host->ice_cfg_work)) {
@@ -440,10 +421,9 @@ int ufs_qcom_ice_cfg_start(struct ufs_qcom_host *qcom_host,
						&qcom_host->ice_work_lock,
						flags);

						ufshcd_scsi_unblock_requests(
							qcom_host->hba);
						return err;
					}
					qcom_host->work_pending = true;
				}

			} else {
+1 −0
Original line number Diff line number Diff line
@@ -375,6 +375,7 @@ struct ufs_qcom_host {
	struct work_struct ice_cfg_work;
	struct request *req_pending;
	struct ufs_vreg *vddp_ref_clk;
	bool work_pending;
};

static inline u32
Loading