Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit cfcc0ad4 authored by Linus Torvalds's avatar Linus Torvalds
Browse files
Pull f2fs updates from Jaegeuk Kim:
 "New features:
   - per-file encryption (e.g., ext4)
   - FALLOC_FL_ZERO_RANGE
   - FALLOC_FL_COLLAPSE_RANGE
   - RENAME_WHITEOUT

  Major enhancement/fixes:
   - recovery broken superblocks
   - enhance f2fs_trim_fs with a discard_map
   - fix a race condition on dentry block allocation
   - fix a deadlock during summary operation
   - fix a missing fiemap result

  .. and many minor bug fixes and clean-ups were done"

* tag 'for-f2fs-4.2' of git://git.kernel.org/pub/scm/linux/kernel/git/jaegeuk/f2fs: (83 commits)
  f2fs: do not trim preallocated blocks when truncating after i_size
  f2fs crypto: add alloc_bounce_page
  f2fs crypto: fix to handle errors likewise ext4
  f2fs: drop the volatile_write flag only
  f2fs: skip committing valid superblock
  f2fs: setting discard option in parse_options()
  f2fs: fix to return exact trimmed size
  f2fs: support FALLOC_FL_INSERT_RANGE
  f2fs: hide common code in f2fs_replace_block
  f2fs: disable the discard option when device doesn't support
  f2fs crypto: remove alloc_page for bounce_page
  f2fs: fix a deadlock for summary page lock vs. sentry_lock
  f2fs crypto: clean up error handling in f2fs_fname_setup_filename
  f2fs crypto: avoid f2fs_inherit_context for symlink
  f2fs crypto: do not set encryption policy for non-directory by ioctl
  f2fs crypto: allow setting encryption policy once
  f2fs crypto: check context consistent for rename2
  f2fs: avoid duplicated code by reusing f2fs_read_end_io
  f2fs crypto: use per-inode tfm structure
  f2fs: recovering broken superblock during mount
  ...
parents a7296b49 3c454145
Loading
Loading
Loading
Loading
+19 −0
Original line number Diff line number Diff line
@@ -72,6 +72,25 @@ config F2FS_CHECK_FS

	  If you want to improve the performance, say N.

config F2FS_FS_ENCRYPTION
	bool "F2FS Encryption"
	depends on F2FS_FS
	depends on F2FS_FS_XATTR
	select CRYPTO_AES
	select CRYPTO_CBC
	select CRYPTO_ECB
	select CRYPTO_XTS
	select CRYPTO_CTS
	select CRYPTO_CTR
	select CRYPTO_SHA256
	select KEYS
	select ENCRYPTED_KEYS
	help
	  Enable encryption of f2fs files and directories.  This
	  feature is similar to ecryptfs, but it is more memory
	  efficient since it avoids caching the encrypted and
	  decrypted pages in the page cache.

config F2FS_IO_TRACE
	bool "F2FS IO tracer"
	depends on F2FS_FS
+2 −0
Original line number Diff line number Diff line
@@ -6,3 +6,5 @@ f2fs-$(CONFIG_F2FS_STAT_FS) += debug.o
f2fs-$(CONFIG_F2FS_FS_XATTR) += xattr.o
f2fs-$(CONFIG_F2FS_FS_POSIX_ACL) += acl.o
f2fs-$(CONFIG_F2FS_IO_TRACE) += trace.o
f2fs-$(CONFIG_F2FS_FS_ENCRYPTION) += crypto_policy.o crypto.o \
		crypto_key.o crypto_fname.o
+20 −26
Original line number Diff line number Diff line
@@ -334,51 +334,45 @@ static int f2fs_acl_create(struct inode *dir, umode_t *mode,
		struct page *dpage)
{
	struct posix_acl *p;
	struct posix_acl *clone;
	int ret;

	*acl = NULL;
	*default_acl = NULL;

	if (S_ISLNK(*mode) || !IS_POSIXACL(dir))
		goto no_acl;
		return 0;

	p = __f2fs_get_acl(dir, ACL_TYPE_DEFAULT, dpage);
	if (IS_ERR(p)) {
		if (p == ERR_PTR(-EOPNOTSUPP))
			goto apply_umask;
		return PTR_ERR(p);
	if (!p || p == ERR_PTR(-EOPNOTSUPP)) {
		*mode &= ~current_umask();
		return 0;
	}
	if (IS_ERR(p))
		return PTR_ERR(p);

	if (!p)
		goto apply_umask;

	*acl = f2fs_acl_clone(p, GFP_NOFS);
	if (!*acl)
	clone = f2fs_acl_clone(p, GFP_NOFS);
	if (!clone)
		goto no_mem;

	ret = f2fs_acl_create_masq(*acl, mode);
	ret = f2fs_acl_create_masq(clone, mode);
	if (ret < 0)
		goto no_mem_clone;

	if (ret == 0) {
		posix_acl_release(*acl);
		*acl = NULL;
	}
	if (ret == 0)
		posix_acl_release(clone);
	else
		*acl = clone;

	if (!S_ISDIR(*mode)) {
	if (!S_ISDIR(*mode))
		posix_acl_release(p);
		*default_acl = NULL;
	} else {
	else
		*default_acl = p;
	}
	return 0;

apply_umask:
	*mode &= ~current_umask();
no_acl:
	*default_acl = NULL;
	*acl = NULL;
	return 0;

no_mem_clone:
	posix_acl_release(*acl);
	posix_acl_release(clone);
no_mem:
	posix_acl_release(p);
	return -ENOMEM;
+26 −30
Original line number Diff line number Diff line
@@ -52,9 +52,11 @@ struct page *get_meta_page(struct f2fs_sb_info *sbi, pgoff_t index)
	struct address_space *mapping = META_MAPPING(sbi);
	struct page *page;
	struct f2fs_io_info fio = {
		.sbi = sbi,
		.type = META,
		.rw = READ_SYNC | REQ_META | REQ_PRIO,
		.blk_addr = index,
		.encrypted_page = NULL,
	};
repeat:
	page = grab_cache_page(mapping, index);
@@ -65,7 +67,9 @@ struct page *get_meta_page(struct f2fs_sb_info *sbi, pgoff_t index)
	if (PageUptodate(page))
		goto out;

	if (f2fs_submit_page_bio(sbi, page, &fio))
	fio.page = page;

	if (f2fs_submit_page_bio(&fio))
		goto repeat;

	lock_page(page);
@@ -77,8 +81,7 @@ struct page *get_meta_page(struct f2fs_sb_info *sbi, pgoff_t index)
	return page;
}

static inline bool is_valid_blkaddr(struct f2fs_sb_info *sbi,
						block_t blkaddr, int type)
bool is_valid_blkaddr(struct f2fs_sb_info *sbi, block_t blkaddr, int type)
{
	switch (type) {
	case META_NAT:
@@ -118,8 +121,10 @@ int ra_meta_pages(struct f2fs_sb_info *sbi, block_t start, int nrpages, int type
	struct page *page;
	block_t blkno = start;
	struct f2fs_io_info fio = {
		.sbi = sbi,
		.type = META,
		.rw = READ_SYNC | REQ_META | REQ_PRIO
		.rw = READ_SYNC | REQ_META | REQ_PRIO,
		.encrypted_page = NULL,
	};

	for (; nrpages-- > 0; blkno++) {
@@ -161,7 +166,8 @@ int ra_meta_pages(struct f2fs_sb_info *sbi, block_t start, int nrpages, int type
			continue;
		}

		f2fs_submit_page_mbio(sbi, page, &fio);
		fio.page = page;
		f2fs_submit_page_mbio(&fio);
		f2fs_put_page(page, 0);
	}
out:
@@ -510,7 +516,12 @@ static void write_orphan_inodes(struct f2fs_sb_info *sbi, block_t start_blk)
		grab_meta_page(sbi, start_blk + index);

	index = 1;
	spin_lock(&im->ino_lock);

	/*
	 * we don't need to do spin_lock(&im->ino_lock) here, since all the
	 * orphan inode operations are covered under f2fs_lock_op().
	 * And, spin_lock should be avoided due to page operations below.
	 */
	head = &im->ino_list;

	/* loop for each orphan inode entry and write them in Jornal block */
@@ -550,8 +561,6 @@ static void write_orphan_inodes(struct f2fs_sb_info *sbi, block_t start_blk)
		set_page_dirty(page);
		f2fs_put_page(page, 1);
	}

	spin_unlock(&im->ino_lock);
}

static struct page *validate_checkpoint(struct f2fs_sb_info *sbi,
@@ -879,10 +888,8 @@ static void do_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc)
	unsigned long orphan_num = sbi->im[ORPHAN_INO].ino_num;
	nid_t last_nid = nm_i->next_scan_nid;
	block_t start_blk;
	struct page *cp_page;
	unsigned int data_sum_blocks, orphan_blocks;
	__u32 crc32 = 0;
	void *kaddr;
	int i;
	int cp_payload_blks = __cp_payload(sbi);

@@ -979,19 +986,11 @@ static void do_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc)
	start_blk = __start_cp_addr(sbi);

	/* write out checkpoint buffer at block 0 */
	cp_page = grab_meta_page(sbi, start_blk++);
	kaddr = page_address(cp_page);
	memcpy(kaddr, ckpt, F2FS_BLKSIZE);
	set_page_dirty(cp_page);
	f2fs_put_page(cp_page, 1);
	update_meta_page(sbi, ckpt, start_blk++);

	for (i = 1; i < 1 + cp_payload_blks; i++) {
		cp_page = grab_meta_page(sbi, start_blk++);
		kaddr = page_address(cp_page);
		memcpy(kaddr, (char *)ckpt + i * F2FS_BLKSIZE, F2FS_BLKSIZE);
		set_page_dirty(cp_page);
		f2fs_put_page(cp_page, 1);
	}
	for (i = 1; i < 1 + cp_payload_blks; i++)
		update_meta_page(sbi, (char *)ckpt + i * F2FS_BLKSIZE,
							start_blk++);

	if (orphan_num) {
		write_orphan_inodes(sbi, start_blk);
@@ -1006,11 +1005,7 @@ static void do_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc)
	}

	/* writeout checkpoint block */
	cp_page = grab_meta_page(sbi, start_blk);
	kaddr = page_address(cp_page);
	memcpy(kaddr, ckpt, F2FS_BLKSIZE);
	set_page_dirty(cp_page);
	f2fs_put_page(cp_page, 1);
	update_meta_page(sbi, ckpt, start_blk);

	/* wait for previous submitted node/meta pages writeback */
	wait_on_all_pages_writeback(sbi);
@@ -1036,7 +1031,7 @@ static void do_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc)
	if (unlikely(f2fs_cp_error(sbi)))
		return;

	clear_prefree_segments(sbi);
	clear_prefree_segments(sbi, cpc);
	clear_sbi_flag(sbi, SBI_IS_DIRTY);
}

@@ -1051,7 +1046,8 @@ void write_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc)
	mutex_lock(&sbi->cp_mutex);

	if (!is_sbi_flag_set(sbi, SBI_IS_DIRTY) &&
		(cpc->reason == CP_FASTBOOT || cpc->reason == CP_SYNC))
		(cpc->reason == CP_FASTBOOT || cpc->reason == CP_SYNC ||
		(cpc->reason == CP_DISCARD && !sbi->discard_blks)))
		goto out;
	if (unlikely(f2fs_cp_error(sbi)))
		goto out;

fs/f2fs/crypto.c

0 → 100644
+491 −0
Original line number Diff line number Diff line
/*
 * linux/fs/f2fs/crypto.c
 *
 * Copied from linux/fs/ext4/crypto.c
 *
 * Copyright (C) 2015, Google, Inc.
 * Copyright (C) 2015, Motorola Mobility
 *
 * This contains encryption functions for f2fs
 *
 * Written by Michael Halcrow, 2014.
 *
 * Filename encryption additions
 *	Uday Savagaonkar, 2014
 * Encryption policy handling additions
 *	Ildar Muslukhov, 2014
 * Remove ext4_encrypted_zeroout(),
 *   add f2fs_restore_and_release_control_page()
 *	Jaegeuk Kim, 2015.
 *
 * This has not yet undergone a rigorous security audit.
 *
 * The usage of AES-XTS should conform to recommendations in NIST
 * Special Publication 800-38E and IEEE P1619/D16.
 */
#include <crypto/hash.h>
#include <crypto/sha.h>
#include <keys/user-type.h>
#include <keys/encrypted-type.h>
#include <linux/crypto.h>
#include <linux/ecryptfs.h>
#include <linux/gfp.h>
#include <linux/kernel.h>
#include <linux/key.h>
#include <linux/list.h>
#include <linux/mempool.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/random.h>
#include <linux/scatterlist.h>
#include <linux/spinlock_types.h>
#include <linux/f2fs_fs.h>
#include <linux/ratelimit.h>
#include <linux/bio.h>

#include "f2fs.h"
#include "xattr.h"

/* Encryption added and removed here! (L: */

static unsigned int num_prealloc_crypto_pages = 32;
static unsigned int num_prealloc_crypto_ctxs = 128;

module_param(num_prealloc_crypto_pages, uint, 0444);
MODULE_PARM_DESC(num_prealloc_crypto_pages,
		"Number of crypto pages to preallocate");
module_param(num_prealloc_crypto_ctxs, uint, 0444);
MODULE_PARM_DESC(num_prealloc_crypto_ctxs,
		"Number of crypto contexts to preallocate");

static mempool_t *f2fs_bounce_page_pool;

static LIST_HEAD(f2fs_free_crypto_ctxs);
static DEFINE_SPINLOCK(f2fs_crypto_ctx_lock);

static struct workqueue_struct *f2fs_read_workqueue;
static DEFINE_MUTEX(crypto_init);

static struct kmem_cache *f2fs_crypto_ctx_cachep;
struct kmem_cache *f2fs_crypt_info_cachep;

/**
 * f2fs_release_crypto_ctx() - Releases an encryption context
 * @ctx: The encryption context to release.
 *
 * If the encryption context was allocated from the pre-allocated pool, returns
 * it to that pool. Else, frees it.
 *
 * If there's a bounce page in the context, this frees that.
 */
void f2fs_release_crypto_ctx(struct f2fs_crypto_ctx *ctx)
{
	unsigned long flags;

	if (ctx->flags & F2FS_WRITE_PATH_FL && ctx->w.bounce_page) {
		mempool_free(ctx->w.bounce_page, f2fs_bounce_page_pool);
		ctx->w.bounce_page = NULL;
	}
	ctx->w.control_page = NULL;
	if (ctx->flags & F2FS_CTX_REQUIRES_FREE_ENCRYPT_FL) {
		kmem_cache_free(f2fs_crypto_ctx_cachep, ctx);
	} else {
		spin_lock_irqsave(&f2fs_crypto_ctx_lock, flags);
		list_add(&ctx->free_list, &f2fs_free_crypto_ctxs);
		spin_unlock_irqrestore(&f2fs_crypto_ctx_lock, flags);
	}
}

/**
 * f2fs_get_crypto_ctx() - Gets an encryption context
 * @inode:       The inode for which we are doing the crypto
 *
 * Allocates and initializes an encryption context.
 *
 * Return: An allocated and initialized encryption context on success; error
 * value or NULL otherwise.
 */
struct f2fs_crypto_ctx *f2fs_get_crypto_ctx(struct inode *inode)
{
	struct f2fs_crypto_ctx *ctx = NULL;
	unsigned long flags;
	struct f2fs_crypt_info *ci = F2FS_I(inode)->i_crypt_info;

	if (ci == NULL)
		return ERR_PTR(-ENOKEY);

	/*
	 * We first try getting the ctx from a free list because in
	 * the common case the ctx will have an allocated and
	 * initialized crypto tfm, so it's probably a worthwhile
	 * optimization. For the bounce page, we first try getting it
	 * from the kernel allocator because that's just about as fast
	 * as getting it from a list and because a cache of free pages
	 * should generally be a "last resort" option for a filesystem
	 * to be able to do its job.
	 */
	spin_lock_irqsave(&f2fs_crypto_ctx_lock, flags);
	ctx = list_first_entry_or_null(&f2fs_free_crypto_ctxs,
					struct f2fs_crypto_ctx, free_list);
	if (ctx)
		list_del(&ctx->free_list);
	spin_unlock_irqrestore(&f2fs_crypto_ctx_lock, flags);
	if (!ctx) {
		ctx = kmem_cache_zalloc(f2fs_crypto_ctx_cachep, GFP_NOFS);
		if (!ctx)
			return ERR_PTR(-ENOMEM);
		ctx->flags |= F2FS_CTX_REQUIRES_FREE_ENCRYPT_FL;
	} else {
		ctx->flags &= ~F2FS_CTX_REQUIRES_FREE_ENCRYPT_FL;
	}
	ctx->flags &= ~F2FS_WRITE_PATH_FL;
	return ctx;
}

/*
 * Call f2fs_decrypt on every single page, reusing the encryption
 * context.
 */
static void completion_pages(struct work_struct *work)
{
	struct f2fs_crypto_ctx *ctx =
		container_of(work, struct f2fs_crypto_ctx, r.work);
	struct bio *bio = ctx->r.bio;
	struct bio_vec *bv;
	int i;

	bio_for_each_segment_all(bv, bio, i) {
		struct page *page = bv->bv_page;
		int ret = f2fs_decrypt(ctx, page);

		if (ret) {
			WARN_ON_ONCE(1);
			SetPageError(page);
		} else
			SetPageUptodate(page);
		unlock_page(page);
	}
	f2fs_release_crypto_ctx(ctx);
	bio_put(bio);
}

void f2fs_end_io_crypto_work(struct f2fs_crypto_ctx *ctx, struct bio *bio)
{
	INIT_WORK(&ctx->r.work, completion_pages);
	ctx->r.bio = bio;
	queue_work(f2fs_read_workqueue, &ctx->r.work);
}

static void f2fs_crypto_destroy(void)
{
	struct f2fs_crypto_ctx *pos, *n;

	list_for_each_entry_safe(pos, n, &f2fs_free_crypto_ctxs, free_list)
		kmem_cache_free(f2fs_crypto_ctx_cachep, pos);
	INIT_LIST_HEAD(&f2fs_free_crypto_ctxs);
	if (f2fs_bounce_page_pool)
		mempool_destroy(f2fs_bounce_page_pool);
	f2fs_bounce_page_pool = NULL;
}

/**
 * f2fs_crypto_initialize() - Set up for f2fs encryption.
 *
 * We only call this when we start accessing encrypted files, since it
 * results in memory getting allocated that wouldn't otherwise be used.
 *
 * Return: Zero on success, non-zero otherwise.
 */
int f2fs_crypto_initialize(void)
{
	int i, res = -ENOMEM;

	if (f2fs_bounce_page_pool)
		return 0;

	mutex_lock(&crypto_init);
	if (f2fs_bounce_page_pool)
		goto already_initialized;

	for (i = 0; i < num_prealloc_crypto_ctxs; i++) {
		struct f2fs_crypto_ctx *ctx;

		ctx = kmem_cache_zalloc(f2fs_crypto_ctx_cachep, GFP_KERNEL);
		if (!ctx)
			goto fail;
		list_add(&ctx->free_list, &f2fs_free_crypto_ctxs);
	}

	/* must be allocated at the last step to avoid race condition above */
	f2fs_bounce_page_pool =
		mempool_create_page_pool(num_prealloc_crypto_pages, 0);
	if (!f2fs_bounce_page_pool)
		goto fail;

already_initialized:
	mutex_unlock(&crypto_init);
	return 0;
fail:
	f2fs_crypto_destroy();
	mutex_unlock(&crypto_init);
	return res;
}

/**
 * f2fs_exit_crypto() - Shutdown the f2fs encryption system
 */
void f2fs_exit_crypto(void)
{
	f2fs_crypto_destroy();

	if (f2fs_read_workqueue)
		destroy_workqueue(f2fs_read_workqueue);
	if (f2fs_crypto_ctx_cachep)
		kmem_cache_destroy(f2fs_crypto_ctx_cachep);
	if (f2fs_crypt_info_cachep)
		kmem_cache_destroy(f2fs_crypt_info_cachep);
}

int __init f2fs_init_crypto(void)
{
	int res = -ENOMEM;

	f2fs_read_workqueue = alloc_workqueue("f2fs_crypto", WQ_HIGHPRI, 0);
	if (!f2fs_read_workqueue)
		goto fail;

	f2fs_crypto_ctx_cachep = KMEM_CACHE(f2fs_crypto_ctx,
						SLAB_RECLAIM_ACCOUNT);
	if (!f2fs_crypto_ctx_cachep)
		goto fail;

	f2fs_crypt_info_cachep = KMEM_CACHE(f2fs_crypt_info,
						SLAB_RECLAIM_ACCOUNT);
	if (!f2fs_crypt_info_cachep)
		goto fail;

	return 0;
fail:
	f2fs_exit_crypto();
	return res;
}

void f2fs_restore_and_release_control_page(struct page **page)
{
	struct f2fs_crypto_ctx *ctx;
	struct page *bounce_page;

	/* The bounce data pages are unmapped. */
	if ((*page)->mapping)
		return;

	/* The bounce data page is unmapped. */
	bounce_page = *page;
	ctx = (struct f2fs_crypto_ctx *)page_private(bounce_page);

	/* restore control page */
	*page = ctx->w.control_page;

	f2fs_restore_control_page(bounce_page);
}

void f2fs_restore_control_page(struct page *data_page)
{
	struct f2fs_crypto_ctx *ctx =
		(struct f2fs_crypto_ctx *)page_private(data_page);

	set_page_private(data_page, (unsigned long)NULL);
	ClearPagePrivate(data_page);
	unlock_page(data_page);
	f2fs_release_crypto_ctx(ctx);
}

/**
 * f2fs_crypt_complete() - The completion callback for page encryption
 * @req: The asynchronous encryption request context
 * @res: The result of the encryption operation
 */
static void f2fs_crypt_complete(struct crypto_async_request *req, int res)
{
	struct f2fs_completion_result *ecr = req->data;

	if (res == -EINPROGRESS)
		return;
	ecr->res = res;
	complete(&ecr->completion);
}

typedef enum {
	F2FS_DECRYPT = 0,
	F2FS_ENCRYPT,
} f2fs_direction_t;

static int f2fs_page_crypto(struct f2fs_crypto_ctx *ctx,
				struct inode *inode,
				f2fs_direction_t rw,
				pgoff_t index,
				struct page *src_page,
				struct page *dest_page)
{
	u8 xts_tweak[F2FS_XTS_TWEAK_SIZE];
	struct ablkcipher_request *req = NULL;
	DECLARE_F2FS_COMPLETION_RESULT(ecr);
	struct scatterlist dst, src;
	struct f2fs_crypt_info *ci = F2FS_I(inode)->i_crypt_info;
	struct crypto_ablkcipher *tfm = ci->ci_ctfm;
	int res = 0;

	req = ablkcipher_request_alloc(tfm, GFP_NOFS);
	if (!req) {
		printk_ratelimited(KERN_ERR
				"%s: crypto_request_alloc() failed\n",
				__func__);
		return -ENOMEM;
	}
	ablkcipher_request_set_callback(
		req, CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP,
		f2fs_crypt_complete, &ecr);

	BUILD_BUG_ON(F2FS_XTS_TWEAK_SIZE < sizeof(index));
	memcpy(xts_tweak, &index, sizeof(index));
	memset(&xts_tweak[sizeof(index)], 0,
			F2FS_XTS_TWEAK_SIZE - sizeof(index));

	sg_init_table(&dst, 1);
	sg_set_page(&dst, dest_page, PAGE_CACHE_SIZE, 0);
	sg_init_table(&src, 1);
	sg_set_page(&src, src_page, PAGE_CACHE_SIZE, 0);
	ablkcipher_request_set_crypt(req, &src, &dst, PAGE_CACHE_SIZE,
					xts_tweak);
	if (rw == F2FS_DECRYPT)
		res = crypto_ablkcipher_decrypt(req);
	else
		res = crypto_ablkcipher_encrypt(req);
	if (res == -EINPROGRESS || res == -EBUSY) {
		BUG_ON(req->base.data != &ecr);
		wait_for_completion(&ecr.completion);
		res = ecr.res;
	}
	ablkcipher_request_free(req);
	if (res) {
		printk_ratelimited(KERN_ERR
			"%s: crypto_ablkcipher_encrypt() returned %d\n",
			__func__, res);
		return res;
	}
	return 0;
}

static struct page *alloc_bounce_page(struct f2fs_crypto_ctx *ctx)
{
	ctx->w.bounce_page = mempool_alloc(f2fs_bounce_page_pool, GFP_NOWAIT);
	if (ctx->w.bounce_page == NULL)
		return ERR_PTR(-ENOMEM);
	ctx->flags |= F2FS_WRITE_PATH_FL;
	return ctx->w.bounce_page;
}

/**
 * f2fs_encrypt() - Encrypts a page
 * @inode:          The inode for which the encryption should take place
 * @plaintext_page: The page to encrypt. Must be locked.
 *
 * Allocates a ciphertext page and encrypts plaintext_page into it using the ctx
 * encryption context.
 *
 * Called on the page write path.  The caller must call
 * f2fs_restore_control_page() on the returned ciphertext page to
 * release the bounce buffer and the encryption context.
 *
 * Return: An allocated page with the encrypted content on success. Else, an
 * error value or NULL.
 */
struct page *f2fs_encrypt(struct inode *inode,
			  struct page *plaintext_page)
{
	struct f2fs_crypto_ctx *ctx;
	struct page *ciphertext_page = NULL;
	int err;

	BUG_ON(!PageLocked(plaintext_page));

	ctx = f2fs_get_crypto_ctx(inode);
	if (IS_ERR(ctx))
		return (struct page *)ctx;

	/* The encryption operation will require a bounce page. */
	ciphertext_page = alloc_bounce_page(ctx);
	if (IS_ERR(ciphertext_page))
		goto err_out;

	ctx->w.control_page = plaintext_page;
	err = f2fs_page_crypto(ctx, inode, F2FS_ENCRYPT, plaintext_page->index,
					plaintext_page, ciphertext_page);
	if (err) {
		ciphertext_page = ERR_PTR(err);
		goto err_out;
	}

	SetPagePrivate(ciphertext_page);
	set_page_private(ciphertext_page, (unsigned long)ctx);
	lock_page(ciphertext_page);
	return ciphertext_page;

err_out:
	f2fs_release_crypto_ctx(ctx);
	return ciphertext_page;
}

/**
 * f2fs_decrypt() - Decrypts a page in-place
 * @ctx:  The encryption context.
 * @page: The page to decrypt. Must be locked.
 *
 * Decrypts page in-place using the ctx encryption context.
 *
 * Called from the read completion callback.
 *
 * Return: Zero on success, non-zero otherwise.
 */
int f2fs_decrypt(struct f2fs_crypto_ctx *ctx, struct page *page)
{
	BUG_ON(!PageLocked(page));

	return f2fs_page_crypto(ctx, page->mapping->host,
				F2FS_DECRYPT, page->index, page, page);
}

/*
 * Convenience function which takes care of allocating and
 * deallocating the encryption context
 */
int f2fs_decrypt_one(struct inode *inode, struct page *page)
{
	struct f2fs_crypto_ctx *ctx = f2fs_get_crypto_ctx(inode);
	int ret;

	if (IS_ERR(ctx))
		return PTR_ERR(ctx);
	ret = f2fs_decrypt(ctx, page);
	f2fs_release_crypto_ctx(ctx);
	return ret;
}

bool f2fs_valid_contents_enc_mode(uint32_t mode)
{
	return (mode == F2FS_ENCRYPTION_MODE_AES_256_XTS);
}

/**
 * f2fs_validate_encryption_key_size() - Validate the encryption key size
 * @mode: The key mode.
 * @size: The key size to validate.
 *
 * Return: The validated key size for @mode. Zero if invalid.
 */
uint32_t f2fs_validate_encryption_key_size(uint32_t mode, uint32_t size)
{
	if (size == f2fs_encryption_key_size(mode))
		return size;
	return 0;
}
Loading