Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit fe5289af authored by Blagovest Kolenichev's avatar Blagovest Kolenichev
Browse files

Revert "Enable HW File Based Encryption on ext4 file system"



This reverts commit 7b939265.

This is temporary due to hard conflicts against android-4.9.84.

Intake is switched from android-4.9-o to android-4.9 with LSK
tag 4.9.84 and more time is needed to rework the FBE changeset
[1] according to the new upstream changes coming from
android-4.9.

[1] c23efa56 security: pfk: Error code was overwritten
    7b939265 Enable HW File Based Encryption on ext4 file system

Change-Id: I6b539a549fe0016339558025807910c3946f1daf
Signed-off-by: default avatarBlagovest Kolenichev <bkolenichev@codeaurora.org>
parent f689e9e0
Loading
Loading
Loading
Loading
+1 −1
Original line number Diff line number Diff line
@@ -589,7 +589,7 @@ void __bio_clone_fast(struct bio *bio, struct bio *bio_src)
	bio->bi_opf = bio_src->bi_opf;
	bio->bi_iter = bio_src->bi_iter;
	bio->bi_io_vec = bio_src->bi_io_vec;
	bio->bi_dio_inode = bio_src->bi_dio_inode;

	bio_clone_blkcg_association(bio, bio_src);
}
EXPORT_SYMBOL(__bio_clone_fast);
+1 −10
Original line number Diff line number Diff line
@@ -6,7 +6,7 @@
#include <linux/bio.h>
#include <linux/blkdev.h>
#include <linux/scatterlist.h>
#include <linux/pfk.h>

#include <trace/events/block.h>

#include "blk.h"
@@ -725,11 +725,6 @@ static void blk_account_io_merge(struct request *req)
	}
}

static bool crypto_not_mergeable(const struct bio *bio, const struct bio *nxt)
{
	return (!pfk_allow_merge_bio(bio, nxt));
}

/*
 * Has to be called with the request spinlock acquired
 */
@@ -757,8 +752,6 @@ static int attempt_merge(struct request_queue *q, struct request *req,
	    !blk_write_same_mergeable(req->bio, next->bio))
		return 0;

	if (crypto_not_mergeable(req->bio, next->bio))
		return 0;
	/*
	 * If we are allowed to merge, then append bio list
	 * from next to rq and release next. merge_requests_fn
@@ -869,8 +862,6 @@ bool blk_rq_merge_ok(struct request *rq, struct bio *bio)
	    !blk_write_same_mergeable(rq->bio, bio))
		return false;

	if (crypto_not_mergeable(rq->bio, bio))
		return false;
	return true;
}

+20 −6
Original line number Diff line number Diff line
@@ -25,8 +25,26 @@
#include <soc/qcom/scm.h>
#include <soc/qcom/qseecomi.h>
#include "iceregs.h"

#ifdef CONFIG_PFK
#include <linux/pfk.h>
#else
#include <linux/bio.h>
static inline int pfk_load_key_start(const struct bio *bio,
	struct ice_crypto_setting *ice_setting, bool *is_pfe, bool async)
{
	return 0;
}

static inline int pfk_load_key_end(const struct bio *bio, bool *is_pfe)
{
	return 0;
}

static inline void pfk_clear_on_reset(void)
{
}
#endif

#define TZ_SYSCALL_CREATE_SMC_ID(o, s, f) \
	((uint32_t)((((o & 0x3f) << 24) | (s & 0xff) << 8) | (f & 0xff)))
@@ -126,9 +144,6 @@ static int qti_ice_setting_config(struct request *req,
		return -EPERM;
	}

	if (!setting)
		return -EINVAL;

	if ((short)(crypto_data->key_index) >= 0) {

		memcpy(&setting->crypto_data, crypto_data,
@@ -1436,7 +1451,7 @@ static int qcom_ice_config_start(struct platform_device *pdev,
	int ret = 0;
	bool is_pfe = false;

	if (!pdev || !req) {
	if (!pdev || !req || !setting) {
		pr_err("%s: Invalid params passed\n", __func__);
		return -EINVAL;
	}
@@ -1455,7 +1470,6 @@ static int qcom_ice_config_start(struct platform_device *pdev,
		/* It is not an error to have a request with no  bio */
		return 0;
	}
    //pr_err("%s bio is %pK\n", __func__, req->bio);

	ret = pfk_load_key_start(req->bio, &pfk_crypto_data, &is_pfe, async);
	if (is_pfe) {
@@ -1619,7 +1633,7 @@ static struct ice_device *get_ice_device_from_storage_type

	list_for_each_entry(ice_dev, &ice_devices, list) {
		if (!strcmp(ice_dev->ice_instance_type, storage_type)) {
			pr_debug("%s: ice device %pK\n", __func__, ice_dev);
			pr_info("%s: found ice device %p\n", __func__, ice_dev);
			return ice_dev;
		}
	}
+31 −11
Original line number Diff line number Diff line
@@ -172,15 +172,17 @@ int ufs_qcom_ice_get_dev(struct ufs_qcom_host *qcom_host)
static void ufs_qcom_ice_cfg_work(struct work_struct *work)
{
	unsigned long flags;
	struct ice_data_setting ice_set;
	struct ufs_qcom_host *qcom_host =
		container_of(work, struct ufs_qcom_host, ice_cfg_work);
	struct request *req_pending = NULL;

	if (!qcom_host->ice.vops->config_start)
		return;

	spin_lock_irqsave(&qcom_host->ice_work_lock, flags);
	if (!qcom_host->req_pending) {
		qcom_host->work_pending = false;
	req_pending = qcom_host->req_pending;
	if (!req_pending) {
		spin_unlock_irqrestore(&qcom_host->ice_work_lock, flags);
		return;
	}
@@ -189,15 +191,24 @@ static void ufs_qcom_ice_cfg_work(struct work_struct *work)
	/*
	 * config_start is called again as previous attempt returned -EAGAIN,
	 * this call shall now take care of the necessary key setup.
	 * 'ice_set' will not actually be used, instead the next call to
	 * config_start() for this request, in the normal call flow, will
	 * succeed as the key has now been setup.
	 */
	qcom_host->ice.vops->config_start(qcom_host->ice.pdev,
		qcom_host->req_pending, NULL, false);
		qcom_host->req_pending, &ice_set, false);

	spin_lock_irqsave(&qcom_host->ice_work_lock, flags);
	qcom_host->req_pending = NULL;
	qcom_host->work_pending = false;
	spin_unlock_irqrestore(&qcom_host->ice_work_lock, flags);

	/*
	 * Resume with requests processing. We assume config_start has been
	 * successful, but even if it wasn't we still must resume in order to
	 * allow for the request to be retried.
	 */
	ufshcd_scsi_unblock_requests(qcom_host->hba);

}

/**
@@ -283,14 +294,18 @@ int ufs_qcom_ice_req_setup(struct ufs_qcom_host *qcom_host,
			 * requires a non-atomic context, this means we should
			 * call the function again from the worker thread to do
			 * the configuration. For this request the error will
			 * propagate so it will be re-queued.
			 * propagate so it will be re-queued and until the
			 * configuration is is completed we block further
			 * request processing.
			 */
			if (err == -EAGAIN) {
				dev_dbg(qcom_host->hba->dev,
					"%s: scheduling task for ice setup\n",
					__func__);

				if (!qcom_host->work_pending) {
				if (!qcom_host->req_pending) {
					ufshcd_scsi_block_requests(
						qcom_host->hba);
					qcom_host->req_pending = cmd->request;

					if (!queue_work(ice_workqueue,
@@ -301,9 +316,10 @@ int ufs_qcom_ice_req_setup(struct ufs_qcom_host *qcom_host,
						&qcom_host->ice_work_lock,
						flags);

						ufshcd_scsi_unblock_requests(
							qcom_host->hba);
						return err;
					}
					qcom_host->work_pending = true;
				}

			} else {
@@ -402,7 +418,9 @@ int ufs_qcom_ice_cfg_start(struct ufs_qcom_host *qcom_host,
			 * requires a non-atomic context, this means we should
			 * call the function again from the worker thread to do
			 * the configuration. For this request the error will
			 * propagate so it will be re-queued.
			 * propagate so it will be re-queued and until the
			 * configuration is is completed we block further
			 * request processing.
			 */
			if (err == -EAGAIN) {

@@ -410,8 +428,9 @@ int ufs_qcom_ice_cfg_start(struct ufs_qcom_host *qcom_host,
					"%s: scheduling task for ice setup\n",
					__func__);

				if (!qcom_host->work_pending) {

				if (!qcom_host->req_pending) {
					ufshcd_scsi_block_requests(
						qcom_host->hba);
					qcom_host->req_pending = cmd->request;
					if (!queue_work(ice_workqueue,
						&qcom_host->ice_cfg_work)) {
@@ -421,9 +440,10 @@ int ufs_qcom_ice_cfg_start(struct ufs_qcom_host *qcom_host,
						&qcom_host->ice_work_lock,
						flags);

						ufshcd_scsi_unblock_requests(
							qcom_host->hba);
						return err;
					}
					qcom_host->work_pending = true;
				}

			} else {
+0 −1
Original line number Diff line number Diff line
@@ -375,7 +375,6 @@ struct ufs_qcom_host {
	struct work_struct ice_cfg_work;
	struct request *req_pending;
	struct ufs_vreg *vddp_ref_clk;
	bool work_pending;
};

static inline u32
Loading