Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 9e9fe7fd authored by qctecmdr Service's avatar qctecmdr Service Committed by Gerrit - the friendly Code Review server
Browse files

Merge "[ANDROID] f2fs: Set the bio REQ_NOENCRYPT flag"

parents 1f078aa3 3547983d
Loading
Loading
Loading
Loading
+14 −4
Original line number Diff line number Diff line
@@ -125,7 +125,8 @@ struct iv_tcw_private {
 * and encrypts / decrypts at the same time.
 */
enum flags { DM_CRYPT_SUSPENDED, DM_CRYPT_KEY_VALID,
	     DM_CRYPT_SAME_CPU, DM_CRYPT_NO_OFFLOAD };
	     DM_CRYPT_SAME_CPU, DM_CRYPT_NO_OFFLOAD,
	     DM_CRYPT_ENCRYPT_OVERRIDE };

enum cipher_flags {
	CRYPT_MODE_INTEGRITY_AEAD,	/* Use authenticated mode for cihper */
@@ -2653,6 +2654,8 @@ static int crypt_ctr_optional(struct dm_target *ti, unsigned int argc, char **ar
			cc->sector_shift = __ffs(cc->sector_size) - SECTOR_SHIFT;
		} else if (!strcasecmp(opt_string, "iv_large_sectors"))
			set_bit(CRYPT_IV_LARGE_SECTORS, &cc->cipher_flags);
		else if (!strcasecmp(opt_string, "allow_encrypt_override"))
			set_bit(DM_CRYPT_ENCRYPT_OVERRIDE, &cc->flags);
		else {
			ti->error = "Invalid feature arguments";
			return -EINVAL;
@@ -2862,12 +2865,15 @@ static int crypt_map(struct dm_target *ti, struct bio *bio)
	struct crypt_config *cc = ti->private;

	/*
	 * If bio is REQ_PREFLUSH or REQ_OP_DISCARD, just bypass crypt queues.
	 * If bio is REQ_PREFLUSH, REQ_NOENCRYPT, or REQ_OP_DISCARD,
	 * just bypass crypt queues.
	 * - for REQ_PREFLUSH device-mapper core ensures that no IO is in-flight
	 * - for REQ_OP_DISCARD caller must use flush if IO ordering matters
	 */
	if (unlikely(bio->bi_opf & REQ_PREFLUSH ||
	    bio_op(bio) == REQ_OP_DISCARD)) {
	if (unlikely(bio->bi_opf & REQ_PREFLUSH) ||
	    (unlikely(bio->bi_opf & REQ_NOENCRYPT) &&
	     test_bit(DM_CRYPT_ENCRYPT_OVERRIDE, &cc->flags)) ||
	    bio_op(bio) == REQ_OP_DISCARD) {
		bio_set_dev(bio, cc->dev->bdev);
		if (bio_sectors(bio))
			bio->bi_iter.bi_sector = cc->start +
@@ -2954,6 +2960,8 @@ static void crypt_status(struct dm_target *ti, status_type_t type,
		num_feature_args += test_bit(DM_CRYPT_NO_OFFLOAD, &cc->flags);
		num_feature_args += cc->sector_size != (1 << SECTOR_SHIFT);
		num_feature_args += test_bit(CRYPT_IV_LARGE_SECTORS, &cc->cipher_flags);
		num_feature_args += test_bit(DM_CRYPT_ENCRYPT_OVERRIDE,
							&cc->flags);
		if (cc->on_disk_tag_size)
			num_feature_args++;
		if (num_feature_args) {
@@ -2970,6 +2978,8 @@ static void crypt_status(struct dm_target *ti, status_type_t type,
				DMEMIT(" sector_size:%d", cc->sector_size);
			if (test_bit(CRYPT_IV_LARGE_SECTORS, &cc->cipher_flags))
				DMEMIT(" iv_large_sectors");
			if (test_bit(DM_CRYPT_ENCRYPT_OVERRIDE, &cc->flags))
				DMEMIT(" allow_encrypt_override");
		}

		break;
+1 −1
Original line number Diff line number Diff line
@@ -131,7 +131,7 @@ int fscrypt_zeroout_range(const struct inode *inode, pgoff_t lblk,
		bio_set_dev(bio, inode->i_sb->s_bdev);
		bio->bi_iter.bi_sector =
			pblk << (inode->i_sb->s_blocksize_bits - 9);
		bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
		bio_set_op_attrs(bio, REQ_OP_WRITE, REQ_NOENCRYPT);
		ret = bio_add_page(bio, ciphertext_page,
					inode->i_sb->s_blocksize, 0);
		if (ret != inode->i_sb->s_blocksize) {
+3 −0
Original line number Diff line number Diff line
@@ -205,7 +205,10 @@ typedef struct ext4_io_end {
	ssize_t			size;		/* size of the extent */
} ext4_io_end_t;

#define EXT4_IO_ENCRYPTED	1

struct ext4_io_submit {
	unsigned int		io_flags;
	struct writeback_control *io_wbc;
	struct bio		*io_bio;
	ext4_io_end_t		*io_end;
+9 −6
Original line number Diff line number Diff line
@@ -1215,11 +1215,12 @@ static int ext4_block_write_begin(struct page *page, loff_t pos, unsigned len,
		if (!buffer_uptodate(bh) && !buffer_delay(bh) &&
		    !buffer_unwritten(bh) &&
		    (block_start < from || block_end > to)) {
			ll_rw_block(REQ_OP_READ, 0, 1, &bh);
			*wait_bh++ = bh;
			decrypt = ext4_encrypted_inode(inode) &&
				S_ISREG(inode->i_mode) &&
				!fscrypt_using_hardware_encryption(inode);
			ll_rw_block(REQ_OP_READ, (decrypt ? REQ_NOENCRYPT : 0),
				    1, &bh);
			*wait_bh++ = bh;
		}
	}
	/*
@@ -3989,6 +3990,7 @@ static int __ext4_block_zero_page_range(handle_t *handle,
	struct inode *inode = mapping->host;
	struct buffer_head *bh;
	struct page *page;
	bool decrypt;
	int err = 0;

	page = find_or_create_page(mapping, from >> PAGE_SHIFT,
@@ -4031,14 +4033,15 @@ static int __ext4_block_zero_page_range(handle_t *handle,

	if (!buffer_uptodate(bh)) {
		err = -EIO;
		ll_rw_block(REQ_OP_READ, 0, 1, &bh);
		decrypt = S_ISREG(inode->i_mode) &&
			ext4_encrypted_inode(inode) &&
		    !fscrypt_using_hardware_encryption(inode);
		ll_rw_block(REQ_OP_READ, (decrypt ? REQ_NOENCRYPT : 0), 1, &bh);
		wait_on_buffer(bh);
		/* Uhhuh. Read error. Complain and punt. */
		if (!buffer_uptodate(bh))
			goto unlock;
		if (S_ISREG(inode->i_mode) &&
		    ext4_encrypted_inode(inode) &&
		    !fscrypt_using_hardware_encryption(inode)) {
		if (decrypt) {
			/* We expect the key to be set. */
			BUG_ON(!fscrypt_has_encryption_key(inode));
			BUG_ON(blocksize != PAGE_SIZE);
+5 −0
Original line number Diff line number Diff line
@@ -352,6 +352,8 @@ void ext4_io_submit(struct ext4_io_submit *io)
		int io_op_flags = io->io_wbc->sync_mode == WB_SYNC_ALL ?
				  REQ_SYNC : 0;
		io->io_bio->bi_write_hint = io->io_end->inode->i_write_hint;
		if (io->io_flags & EXT4_IO_ENCRYPTED)
			io_op_flags |= REQ_NOENCRYPT;
		bio_set_op_attrs(io->io_bio, REQ_OP_WRITE, io_op_flags);
		submit_bio(io->io_bio);
	}
@@ -361,6 +363,7 @@ void ext4_io_submit(struct ext4_io_submit *io)
void ext4_io_submit_init(struct ext4_io_submit *io,
			 struct writeback_control *wbc)
{
	io->io_flags = 0;
	io->io_wbc = wbc;
	io->io_bio = NULL;
	io->io_end = NULL;
@@ -504,6 +507,8 @@ int ext4_bio_write_page(struct ext4_io_submit *io,
	do {
		if (!buffer_async_write(bh))
			continue;
		if (data_page)
			io->io_flags |= EXT4_IO_ENCRYPTED;
		ret = io_submit_add_bh(io, inode,
				       data_page ? data_page : page, bh);
		if (ret) {
Loading