Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 77931cca authored by Eric Biggers's avatar Eric Biggers Committed by Barani Muthukumaran
Browse files

ANDROID: dm table: propagate inline encryption flag to dm devices



If all targets in a device-mapper table support inline encryption, mark
the mapped device as supporting inline encryption.  This will allow
filesystems such as ext4 to tell whether the hardware supports inline
encryption even in the case where there is an intervening dm device.

Change-Id: Ic83203f8cc8d440d50fefac62d7849ce035c657a
Signed-off-by: default avatarEric Biggers <ebiggers@google.com>
Signed-off-by: default avatarShivaprasad Hongal <shongal@codeaurora.org>
Signed-off-by: default avatarBarani Muthukumaran <bmuthuku@codeaurora.org>
parent 54e99a0a
Loading
Loading
Loading
Loading
+0 −9
Original line number Diff line number Diff line
@@ -54,15 +54,6 @@ static inline void queue_lockdep_assert_held(struct request_queue *q)
		lockdep_assert_held(q->queue_lock);
}

static inline void queue_flag_clear_unlocked(unsigned int flag,
					     struct request_queue *q)
{
	if (test_bit(QUEUE_FLAG_INIT_DONE, &q->queue_flags) &&
	    kref_read(&q->kobj.kref))
		lockdep_assert_held(q->queue_lock);
	__clear_bit(flag, &q->queue_flags);
}

static inline int queue_flag_test_and_clear(unsigned int flag,
					    struct request_queue *q)
{
+15 −0
Original line number Diff line number Diff line
@@ -1727,6 +1727,16 @@ static int queue_supports_sg_merge(struct dm_target *ti, struct dm_dev *dev,
	return q && !test_bit(QUEUE_FLAG_NO_SG_MERGE, &q->queue_flags);
}

static int queue_supports_inline_encryption(struct dm_target *ti,
					    struct dm_dev *dev,
					    sector_t start, sector_t len,
					    void *data)
{
	struct request_queue *q = bdev_get_queue(dev->bdev);

	return q && blk_queue_inlinecrypt(q);
}

static bool dm_table_all_devices_attribute(struct dm_table *t,
					   iterate_devices_callout_fn func)
{
@@ -1928,6 +1938,11 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
	else
		blk_queue_flag_set(QUEUE_FLAG_NO_SG_MERGE, q);

	if (dm_table_all_devices_attribute(t, queue_supports_inline_encryption))
		queue_flag_set_unlocked(QUEUE_FLAG_INLINECRYPT, q);
	else
		queue_flag_clear_unlocked(QUEUE_FLAG_INLINECRYPT, q);

	dm_table_verify_integrity(t);

	/*
+9 −0
Original line number Diff line number Diff line
@@ -891,6 +891,15 @@ static inline void queue_flag_set_unlocked(unsigned int flag,
	__set_bit(flag, &q->queue_flags);
}

static inline void queue_flag_clear_unlocked(unsigned int flag,
		                         struct request_queue *q)
{
	    if (test_bit(QUEUE_FLAG_INIT_DONE, &q->queue_flags) &&
				        kref_read(&q->kobj.kref))
			        lockdep_assert_held(q->queue_lock);
		    __clear_bit(flag, &q->queue_flags);
}

/*
 * q->prep_rq_fn return values
 */