Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit d296696f authored by Linux Build Service Account's avatar Linux Build Service Account Committed by Gerrit - the friendly Code Review server
Browse files

Merge "dm-default-key, f2fs, ICE: support dm-default-key with f2fs/ICE"

parents aeda0e53 3bd012ff
Loading
Loading
Loading
Loading
+4 −1
Original line number Diff line number Diff line
@@ -568,8 +568,11 @@ EXPORT_SYMBOL(bio_phys_segments);
static inline void bio_clone_crypt_key(struct bio *dst, const struct bio *src)
{
#ifdef CONFIG_PFK
	dst->bi_crypt_key = src->bi_crypt_key;
	dst->bi_iter.bi_dun = src->bi_iter.bi_dun;
#ifdef CONFIG_DM_DEFAULT_KEY
	dst->bi_crypt_key = src->bi_crypt_key;
	dst->bi_crypt_skip = src->bi_crypt_skip;
#endif
	dst->bi_dio_inode = src->bi_dio_inode;
#endif
}
+1 −1
Original line number Diff line number Diff line
@@ -133,7 +133,7 @@ static int default_key_map(struct dm_target *ti, struct bio *bio)
			dm_target_offset(ti, bio->bi_iter.bi_sector);
	}

	if (!bio->bi_crypt_key)
	if (!bio->bi_crypt_key && !bio->bi_crypt_skip)
		bio->bi_crypt_key = &dkc->key;

	return DM_MAPIO_REMAPPED;
+15 −0
Original line number Diff line number Diff line
@@ -1495,6 +1495,16 @@ static int queue_supports_sg_merge(struct dm_target *ti, struct dm_dev *dev,
	return q && !test_bit(QUEUE_FLAG_NO_SG_MERGE, &q->queue_flags);
}

static int queue_supports_inline_encryption(struct dm_target *ti,
					    struct dm_dev *dev,
					    sector_t start, sector_t len,
					    void *data)
{
	struct request_queue *q = bdev_get_queue(dev->bdev);

	return q && blk_queue_inlinecrypt(q);
}

static bool dm_table_all_devices_attribute(struct dm_table *t,
					   iterate_devices_callout_fn func)
{
@@ -1615,6 +1625,11 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
	else
		queue_flag_set_unlocked(QUEUE_FLAG_NO_SG_MERGE, q);

	if (dm_table_all_devices_attribute(t, queue_supports_inline_encryption))
		queue_flag_set_unlocked(QUEUE_FLAG_INLINECRYPT, q);
	else
		queue_flag_clear_unlocked(QUEUE_FLAG_INLINECRYPT, q);

	dm_table_verify_integrity(t);

	/*
+14 −1
Original line number Diff line number Diff line
@@ -157,16 +157,29 @@ void fscrypt_set_ice_dun(const struct inode *inode, struct bio *bio, u64 dun)
}
EXPORT_SYMBOL(fscrypt_set_ice_dun);

void fscrypt_set_ice_skip(struct bio *bio, int bi_crypt_skip)
{
#ifdef CONFIG_DM_DEFAULT_KEY
	bio->bi_crypt_skip = bi_crypt_skip;
#endif
}
EXPORT_SYMBOL(fscrypt_set_ice_skip);

/*
 * This function will be used for filesystem when deciding to merge bios.
 * Basic assumption is, if inline_encryption is set, single bio has to
 * guarantee consecutive LBAs as well as ino|pg->index.
 */
bool fscrypt_mergeable_bio(struct bio *bio, u64 dun, bool bio_encrypted)
bool fscrypt_mergeable_bio(struct bio *bio, u64 dun, bool bio_encrypted,
						int bi_crypt_skip)
{
	if (!bio)
		return true;

#ifdef CONFIG_DM_DEFAULT_KEY
	if (bi_crypt_skip != bio->bi_crypt_skip)
		return false;
#endif
	/* if both of them are not encrypted, no further check is needed */
	if (!bio_dun(bio) && !bio_encrypted)
		return true;
+7 −4
Original line number Diff line number Diff line
@@ -451,6 +451,7 @@ int f2fs_submit_page_bio(struct f2fs_io_info *fio)

	if (f2fs_may_encrypt_bio(inode, fio))
		fscrypt_set_ice_dun(inode, bio, PG_DUN(inode, fio->page));
	fscrypt_set_ice_skip(bio, fio->encrypted_page ? 1 : 0);

	if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) {
		bio_put(bio);
@@ -473,6 +474,7 @@ void f2fs_submit_page_write(struct f2fs_io_info *fio)
	struct page *bio_page;
	struct inode *inode;
	bool bio_encrypted;
	int bi_crypt_skip;
	u64 dun;

	f2fs_bug_on(sbi, is_read_io(fio->op));
@@ -498,6 +500,7 @@ void f2fs_submit_page_write(struct f2fs_io_info *fio)
	bio_page = fio->encrypted_page ? fio->encrypted_page : fio->page;
	inode = fio->page->mapping->host;
	dun = PG_DUN(inode, fio->page);
	bi_crypt_skip = fio->encrypted_page ? 1 : 0;
	bio_encrypted = f2fs_may_encrypt_bio(inode, fio);

	/* set submitted = true as a return value */
@@ -511,7 +514,7 @@ void f2fs_submit_page_write(struct f2fs_io_info *fio)
		__submit_merged_bio(io);

	/* ICE support */
	if (!fscrypt_mergeable_bio(io->bio, dun, bio_encrypted))
	if (!fscrypt_mergeable_bio(io->bio, dun, bio_encrypted, bi_crypt_skip))
		__submit_merged_bio(io);

alloc_new:
@@ -527,7 +530,7 @@ void f2fs_submit_page_write(struct f2fs_io_info *fio)
						fio->type, fio->temp);
		if (bio_encrypted)
			fscrypt_set_ice_dun(inode, io->bio, dun);

		fscrypt_set_ice_skip(io->bio, bi_crypt_skip);
		io->fio = *fio;
	}

@@ -1539,7 +1542,7 @@ static int f2fs_mpage_readpages(struct address_space *mapping,

		dun = PG_DUN(inode, page);
		bio_encrypted = f2fs_may_encrypt_bio(inode, NULL);
		if (!fscrypt_mergeable_bio(bio, dun, bio_encrypted)) {
		if (!fscrypt_mergeable_bio(bio, dun, bio_encrypted, 0)) {
			__submit_bio(F2FS_I_SB(inode), bio, DATA);
			bio = NULL;
		}
Loading