Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 6d8ef53e authored by Linus Torvalds's avatar Linus Torvalds
Browse files
Pull f2fs updates from Jaegeuk Kim:
 "In this round, we've mostly tuned f2fs to provide better user
  experience for Android. Especially, we've worked on atomic write
  feature again with SQLite community in order to support it officially.
  And we added or modified several facilities to analyze and enhance IO
  behaviors.

  Major changes include:
   - add app/fs io stat
   - add inode checksum feature
   - support project/journalled quota
   - enhance atomic write with new ioctl() which exposes feature set
   - enhance background gc/discard/fstrim flows with new gc_urgent mode
   - add F2FS_IOC_FS{GET,SET}XATTR
   - fix some quota flows"

* tag 'f2fs-for-4.14' of git://git.kernel.org/pub/scm/linux/kernel/git/jaegeuk/f2fs: (63 commits)
  f2fs: hurry up to issue discard after io interruption
  f2fs: fix to show correct discard_granularity in sysfs
  f2fs: detect dirty inode in evict_inode
  f2fs: clear radix tree dirty tag of pages whose dirty flag is cleared
  f2fs: speed up gc_urgent mode with SSR
  f2fs: better to wait for fstrim completion
  f2fs: avoid race in between read xattr & write xattr
  f2fs: make get_lock_data_page to handle encrypted inode
  f2fs: use generic terms used for encrypted block management
  f2fs: introduce f2fs_encrypted_file for clean-up
  Revert "f2fs: add a new function get_ssr_cost"
  f2fs: constify super_operations
  f2fs: fix to wake up all sleeping flusher
  f2fs: avoid race in between atomic_read & atomic_inc
  f2fs: remove unneeded parameter of change_curseg
  f2fs: update i_flags correctly
  f2fs: don't check inode's checksum if it was dirtied or writebacked
  f2fs: don't need to update inode checksum for recovery
  f2fs: trigger fdatasync for non-atomic_write file
  f2fs: fix to avoid race in between aio and gc
  ...
parents cdb897e3 e6c6de18
Loading
Loading
Loading
Loading
+21 −0
Original line number Diff line number Diff line
@@ -57,6 +57,15 @@ Contact: "Jaegeuk Kim" <jaegeuk.kim@samsung.com>
Description:
		 Controls the issue rate of small discard commands.

What:          /sys/fs/f2fs/<disk>/discard_granularity
Date:          July 2017
Contact:       "Chao Yu" <yuchao0@huawei.com>
Description:
		Controls discard granularity of inner discard thread, inner thread
		will not issue discards with size that is smaller than granularity.
		The unit size is one block, now only support configuring in range
		of [1, 512].

What:		/sys/fs/f2fs/<disk>/max_victim_search
Date:		January 2014
Contact:	"Jaegeuk Kim" <jaegeuk.kim@samsung.com>
@@ -130,3 +139,15 @@ Date: June 2017
Contact:	"Chao Yu" <yuchao0@huawei.com>
Description:
		 Controls current reserved blocks in system.

What:		/sys/fs/f2fs/<disk>/gc_urgent
Date:		August 2017
Contact:	"Jaegeuk Kim" <jaegeuk@kernel.org>
Description:
		 Do background GC agressively

What:		/sys/fs/f2fs/<disk>/gc_urgent_sleep_time
Date:		August 2017
Contact:	"Jaegeuk Kim" <jaegeuk@kernel.org>
Description:
		 Controls sleep time of GC urgent mode
+19 −0
Original line number Diff line number Diff line
@@ -164,6 +164,16 @@ io_bits=%u Set the bit size of write IO requests. It should be set
                       with "mode=lfs".
usrquota               Enable plain user disk quota accounting.
grpquota               Enable plain group disk quota accounting.
prjquota               Enable plain project quota accounting.
usrjquota=<file>       Appoint specified file and type during mount, so that quota
grpjquota=<file>       information can be properly updated during recovery flow,
prjjquota=<file>       <quota file>: must be in root directory;
jqfmt=<quota type>     <quota type>: [vfsold,vfsv0,vfsv1].
offusrjquota           Turn off user journelled quota.
offgrpjquota           Turn off group journelled quota.
offprjjquota           Turn off project journelled quota.
quota                  Enable plain user disk quota accounting.
noquota                Disable all plain disk quota option.

================================================================================
DEBUGFS ENTRIES
@@ -209,6 +219,15 @@ Files in /sys/fs/f2fs/<devname>
                              gc_idle = 1 will select the Cost Benefit approach
                              & setting gc_idle = 2 will select the greedy approach.

 gc_urgent                    This parameter controls triggering background GCs
                              urgently or not. Setting gc_urgent = 0 [default]
                              makes back to default behavior, while if it is set
                              to 1, background thread starts to do GC by given
                              gc_urgent_sleep_time interval.

 gc_urgent_sleep_time         This parameter controls sleep time for gc_urgent.
                              500 ms is set by default. See above gc_urgent.

 reclaim_segments             This parameter controls the number of prefree
                              segments to be reclaimed. If the number of prefree
			      segments is larger than the number of segments
+3 −2
Original line number Diff line number Diff line
@@ -207,15 +207,16 @@ static int __f2fs_set_acl(struct inode *inode, int type,
	void *value = NULL;
	size_t size = 0;
	int error;
	umode_t mode = inode->i_mode;

	switch (type) {
	case ACL_TYPE_ACCESS:
		name_index = F2FS_XATTR_INDEX_POSIX_ACL_ACCESS;
		if (acl && !ipage) {
			error = posix_acl_update_mode(inode, &inode->i_mode, &acl);
			error = posix_acl_update_mode(inode, &mode, &acl);
			if (error)
				return error;
			set_acl_inode(inode, inode->i_mode);
			set_acl_inode(inode, mode);
		}
		break;

+47 −13
Original line number Diff line number Diff line
@@ -230,8 +230,9 @@ void ra_meta_pages_cond(struct f2fs_sb_info *sbi, pgoff_t index)
		ra_meta_pages(sbi, index, BIO_MAX_PAGES, META_POR, true);
}

static int f2fs_write_meta_page(struct page *page,
				struct writeback_control *wbc)
static int __f2fs_write_meta_page(struct page *page,
				struct writeback_control *wbc,
				enum iostat_type io_type)
{
	struct f2fs_sb_info *sbi = F2FS_P_SB(page);

@@ -244,7 +245,7 @@ static int f2fs_write_meta_page(struct page *page,
	if (unlikely(f2fs_cp_error(sbi)))
		goto redirty_out;

	write_meta_page(sbi, page);
	write_meta_page(sbi, page, io_type);
	dec_page_count(sbi, F2FS_DIRTY_META);

	if (wbc->for_reclaim)
@@ -263,6 +264,12 @@ static int f2fs_write_meta_page(struct page *page,
	return AOP_WRITEPAGE_ACTIVATE;
}

static int f2fs_write_meta_page(struct page *page,
				struct writeback_control *wbc)
{
	return __f2fs_write_meta_page(page, wbc, FS_META_IO);
}

static int f2fs_write_meta_pages(struct address_space *mapping,
				struct writeback_control *wbc)
{
@@ -283,7 +290,7 @@ static int f2fs_write_meta_pages(struct address_space *mapping,

	trace_f2fs_writepages(mapping->host, wbc, META);
	diff = nr_pages_to_write(sbi, META, wbc);
	written = sync_meta_pages(sbi, META, wbc->nr_to_write);
	written = sync_meta_pages(sbi, META, wbc->nr_to_write, FS_META_IO);
	mutex_unlock(&sbi->cp_mutex);
	wbc->nr_to_write = max((long)0, wbc->nr_to_write - written - diff);
	return 0;
@@ -295,7 +302,7 @@ static int f2fs_write_meta_pages(struct address_space *mapping,
}

long sync_meta_pages(struct f2fs_sb_info *sbi, enum page_type type,
						long nr_to_write)
				long nr_to_write, enum iostat_type io_type)
{
	struct address_space *mapping = META_MAPPING(sbi);
	pgoff_t index = 0, end = ULONG_MAX, prev = ULONG_MAX;
@@ -346,7 +353,7 @@ long sync_meta_pages(struct f2fs_sb_info *sbi, enum page_type type,
			if (!clear_page_dirty_for_io(page))
				goto continue_unlock;

			if (mapping->a_ops->writepage(page, &wbc)) {
			if (__f2fs_write_meta_page(page, &wbc, io_type)) {
				unlock_page(page);
				break;
			}
@@ -581,11 +588,24 @@ static int recover_orphan_inode(struct f2fs_sb_info *sbi, nid_t ino)
int recover_orphan_inodes(struct f2fs_sb_info *sbi)
{
	block_t start_blk, orphan_blocks, i, j;
	int err;
	unsigned int s_flags = sbi->sb->s_flags;
	int err = 0;

	if (!is_set_ckpt_flags(sbi, CP_ORPHAN_PRESENT_FLAG))
		return 0;

	if (s_flags & MS_RDONLY) {
		f2fs_msg(sbi->sb, KERN_INFO, "orphan cleanup on readonly fs");
		sbi->sb->s_flags &= ~MS_RDONLY;
	}

#ifdef CONFIG_QUOTA
	/* Needed for iput() to work correctly and not trash data */
	sbi->sb->s_flags |= MS_ACTIVE;
	/* Turn on quotas so that they are updated correctly */
	f2fs_enable_quota_files(sbi);
#endif

	start_blk = __start_cp_addr(sbi) + 1 + __cp_payload(sbi);
	orphan_blocks = __start_sum_addr(sbi) - 1 - __cp_payload(sbi);

@@ -601,14 +621,21 @@ int recover_orphan_inodes(struct f2fs_sb_info *sbi)
			err = recover_orphan_inode(sbi, ino);
			if (err) {
				f2fs_put_page(page, 1);
				return err;
				goto out;
			}
		}
		f2fs_put_page(page, 1);
	}
	/* clear Orphan Flag */
	clear_ckpt_flags(sbi, CP_ORPHAN_PRESENT_FLAG);
	return 0;
out:
#ifdef CONFIG_QUOTA
	/* Turn quotas off */
	f2fs_quota_off_umount(sbi->sb);
#endif
	sbi->sb->s_flags = s_flags; /* Restore MS_RDONLY status */

	return err;
}

static void write_orphan_inodes(struct f2fs_sb_info *sbi, block_t start_blk)
@@ -904,7 +931,14 @@ int sync_dirty_inodes(struct f2fs_sb_info *sbi, enum inode_type type)
	if (inode) {
		unsigned long cur_ino = inode->i_ino;

		if (is_dir)
			F2FS_I(inode)->cp_task = current;

		filemap_fdatawrite(inode->i_mapping);

		if (is_dir)
			F2FS_I(inode)->cp_task = NULL;

		iput(inode);
		/* We need to give cpu to another writers. */
		if (ino == cur_ino) {
@@ -1017,7 +1051,7 @@ static int block_operations(struct f2fs_sb_info *sbi)

	if (get_pages(sbi, F2FS_DIRTY_NODES)) {
		up_write(&sbi->node_write);
		err = sync_node_pages(sbi, &wbc);
		err = sync_node_pages(sbi, &wbc, false, FS_CP_NODE_IO);
		if (err) {
			up_write(&sbi->node_change);
			f2fs_unlock_all(sbi);
@@ -1115,7 +1149,7 @@ static int do_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc)

	/* Flush all the NAT/SIT pages */
	while (get_pages(sbi, F2FS_DIRTY_META)) {
		sync_meta_pages(sbi, META, LONG_MAX);
		sync_meta_pages(sbi, META, LONG_MAX, FS_CP_META_IO);
		if (unlikely(f2fs_cp_error(sbi)))
			return -EIO;
	}
@@ -1194,7 +1228,7 @@ static int do_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc)

		/* Flush all the NAT BITS pages */
		while (get_pages(sbi, F2FS_DIRTY_META)) {
			sync_meta_pages(sbi, META, LONG_MAX);
			sync_meta_pages(sbi, META, LONG_MAX, FS_CP_META_IO);
			if (unlikely(f2fs_cp_error(sbi)))
				return -EIO;
		}
@@ -1249,7 +1283,7 @@ static int do_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc)
	percpu_counter_set(&sbi->alloc_valid_block_count, 0);

	/* Here, we only have one bio having CP pack */
	sync_meta_pages(sbi, META_FLUSH, LONG_MAX);
	sync_meta_pages(sbi, META_FLUSH, LONG_MAX, FS_CP_META_IO);

	/* wait for previous submitted meta pages writeback */
	wait_on_all_pages_writeback(sbi);
+96 −81
Original line number Diff line number Diff line
@@ -457,14 +457,65 @@ int f2fs_submit_page_write(struct f2fs_io_info *fio)
	return err;
}

static struct bio *f2fs_grab_read_bio(struct inode *inode, block_t blkaddr,
							 unsigned nr_pages)
{
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
	struct fscrypt_ctx *ctx = NULL;
	struct bio *bio;

	if (f2fs_encrypted_file(inode)) {
		ctx = fscrypt_get_ctx(inode, GFP_NOFS);
		if (IS_ERR(ctx))
			return ERR_CAST(ctx);

		/* wait the page to be moved by cleaning */
		f2fs_wait_on_block_writeback(sbi, blkaddr);
	}

	bio = bio_alloc(GFP_KERNEL, min_t(int, nr_pages, BIO_MAX_PAGES));
	if (!bio) {
		if (ctx)
			fscrypt_release_ctx(ctx);
		return ERR_PTR(-ENOMEM);
	}
	f2fs_target_device(sbi, blkaddr, bio);
	bio->bi_end_io = f2fs_read_end_io;
	bio->bi_private = ctx;
	bio_set_op_attrs(bio, REQ_OP_READ, 0);

	return bio;
}

/* This can handle encryption stuffs */
static int f2fs_submit_page_read(struct inode *inode, struct page *page,
							block_t blkaddr)
{
	struct bio *bio = f2fs_grab_read_bio(inode, blkaddr, 1);

	if (IS_ERR(bio))
		return PTR_ERR(bio);

	if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) {
		bio_put(bio);
		return -EFAULT;
	}
	__submit_bio(F2FS_I_SB(inode), bio, DATA);
	return 0;
}

static void __set_data_blkaddr(struct dnode_of_data *dn)
{
	struct f2fs_node *rn = F2FS_NODE(dn->node_page);
	__le32 *addr_array;
	int base = 0;

	if (IS_INODE(dn->node_page) && f2fs_has_extra_attr(dn->inode))
		base = get_extra_isize(dn->inode);

	/* Get physical address of data block */
	addr_array = blkaddr_in_node(rn);
	addr_array[dn->ofs_in_node] = cpu_to_le32(dn->data_blkaddr);
	addr_array[base + dn->ofs_in_node] = cpu_to_le32(dn->data_blkaddr);
}

/*
@@ -508,8 +559,8 @@ int reserve_new_blocks(struct dnode_of_data *dn, blkcnt_t count)
	f2fs_wait_on_page_writeback(dn->node_page, NODE, true);

	for (; count > 0; dn->ofs_in_node++) {
		block_t blkaddr =
			datablock_addr(dn->node_page, dn->ofs_in_node);
		block_t blkaddr = datablock_addr(dn->inode,
					dn->node_page, dn->ofs_in_node);
		if (blkaddr == NULL_ADDR) {
			dn->data_blkaddr = NEW_ADDR;
			__set_data_blkaddr(dn);
@@ -570,16 +621,6 @@ struct page *get_read_data_page(struct inode *inode, pgoff_t index,
	struct page *page;
	struct extent_info ei = {0,0,0};
	int err;
	struct f2fs_io_info fio = {
		.sbi = F2FS_I_SB(inode),
		.type = DATA,
		.op = REQ_OP_READ,
		.op_flags = op_flags,
		.encrypted_page = NULL,
	};

	if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode))
		return read_mapping_page(mapping, index, NULL);

	page = f2fs_grab_cache_page(mapping, index, for_write);
	if (!page)
@@ -620,9 +661,7 @@ struct page *get_read_data_page(struct inode *inode, pgoff_t index,
		return page;
	}

	fio.new_blkaddr = fio.old_blkaddr = dn.data_blkaddr;
	fio.page = page;
	err = f2fs_submit_page_bio(&fio);
	err = f2fs_submit_page_read(inode, page, dn.data_blkaddr);
	if (err)
		goto put_err;
	return page;
@@ -756,7 +795,8 @@ static int __allocate_data_block(struct dnode_of_data *dn)
	if (unlikely(is_inode_flag_set(dn->inode, FI_NO_ALLOC)))
		return -EPERM;

	dn->data_blkaddr = datablock_addr(dn->node_page, dn->ofs_in_node);
	dn->data_blkaddr = datablock_addr(dn->inode,
				dn->node_page, dn->ofs_in_node);
	if (dn->data_blkaddr == NEW_ADDR)
		goto alloc;

@@ -782,7 +822,7 @@ static int __allocate_data_block(struct dnode_of_data *dn)

static inline bool __force_buffered_io(struct inode *inode, int rw)
{
	return ((f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode)) ||
	return (f2fs_encrypted_file(inode) ||
			(rw == WRITE && test_opt(F2FS_I_SB(inode), LFS)) ||
			F2FS_I_SB(inode)->s_ndevs);
}
@@ -814,7 +854,7 @@ int f2fs_preallocate_blocks(struct kiocb *iocb, struct iov_iter *from)
				F2FS_GET_BLOCK_PRE_AIO :
				F2FS_GET_BLOCK_PRE_DIO);
	}
	if (iocb->ki_pos + iov_iter_count(from) > MAX_INLINE_DATA) {
	if (iocb->ki_pos + iov_iter_count(from) > MAX_INLINE_DATA(inode)) {
		err = f2fs_convert_inline_inode(inode);
		if (err)
			return err;
@@ -903,7 +943,7 @@ int f2fs_map_blocks(struct inode *inode, struct f2fs_map_blocks *map,
	end_offset = ADDRS_PER_PAGE(dn.node_page, inode);

next_block:
	blkaddr = datablock_addr(dn.node_page, dn.ofs_in_node);
	blkaddr = datablock_addr(dn.inode, dn.node_page, dn.ofs_in_node);

	if (blkaddr == NEW_ADDR || blkaddr == NULL_ADDR) {
		if (create) {
@@ -1040,7 +1080,7 @@ static int get_data_block_dio(struct inode *inode, sector_t iblock,
			struct buffer_head *bh_result, int create)
{
	return __get_data_block(inode, iblock, bh_result, create,
						F2FS_GET_BLOCK_DIO, NULL);
						F2FS_GET_BLOCK_DEFAULT, NULL);
}

static int get_data_block_bmap(struct inode *inode, sector_t iblock,
@@ -1146,35 +1186,6 @@ int f2fs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
	return ret;
}

static struct bio *f2fs_grab_bio(struct inode *inode, block_t blkaddr,
				 unsigned nr_pages)
{
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
	struct fscrypt_ctx *ctx = NULL;
	struct bio *bio;

	if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode)) {
		ctx = fscrypt_get_ctx(inode, GFP_NOFS);
		if (IS_ERR(ctx))
			return ERR_CAST(ctx);

		/* wait the page to be moved by cleaning */
		f2fs_wait_on_encrypted_page_writeback(sbi, blkaddr);
	}

	bio = bio_alloc(GFP_KERNEL, min_t(int, nr_pages, BIO_MAX_PAGES));
	if (!bio) {
		if (ctx)
			fscrypt_release_ctx(ctx);
		return ERR_PTR(-ENOMEM);
	}
	f2fs_target_device(sbi, blkaddr, bio);
	bio->bi_end_io = f2fs_read_end_io;
	bio->bi_private = ctx;

	return bio;
}

/*
 * This function was originally taken from fs/mpage.c, and customized for f2fs.
 * Major change was from block_size == page_size in f2fs by default.
@@ -1240,7 +1251,7 @@ static int f2fs_mpage_readpages(struct address_space *mapping,
			map.m_len = last_block - block_in_file;

			if (f2fs_map_blocks(inode, &map, 0,
						F2FS_GET_BLOCK_READ))
						F2FS_GET_BLOCK_DEFAULT))
				goto set_error_page;
		}
got_it:
@@ -1271,12 +1282,11 @@ static int f2fs_mpage_readpages(struct address_space *mapping,
			bio = NULL;
		}
		if (bio == NULL) {
			bio = f2fs_grab_bio(inode, block_nr, nr_pages);
			bio = f2fs_grab_read_bio(inode, block_nr, nr_pages);
			if (IS_ERR(bio)) {
				bio = NULL;
				goto set_error_page;
			}
			bio_set_op_attrs(bio, REQ_OP_READ, 0);
		}

		if (bio_add_page(bio, page, blocksize, 0) < blocksize)
@@ -1341,11 +1351,11 @@ static int encrypt_one_page(struct f2fs_io_info *fio)
	struct inode *inode = fio->page->mapping->host;
	gfp_t gfp_flags = GFP_NOFS;

	if (!f2fs_encrypted_inode(inode) || !S_ISREG(inode->i_mode))
	if (!f2fs_encrypted_file(inode))
		return 0;

	/* wait for GCed encrypted page writeback */
	f2fs_wait_on_encrypted_page_writeback(fio->sbi, fio->old_blkaddr);
	f2fs_wait_on_block_writeback(fio->sbi, fio->old_blkaddr);

retry_encrypt:
	fio->encrypted_page = fscrypt_encrypt_page(inode, fio->page,
@@ -1471,7 +1481,8 @@ int do_write_data_page(struct f2fs_io_info *fio)
}

static int __write_data_page(struct page *page, bool *submitted,
				struct writeback_control *wbc)
				struct writeback_control *wbc,
				enum iostat_type io_type)
{
	struct inode *inode = page->mapping->host;
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
@@ -1492,6 +1503,7 @@ static int __write_data_page(struct page *page, bool *submitted,
		.encrypted_page = NULL,
		.submitted = false,
		.need_lock = LOCK_RETRY,
		.io_type = io_type,
	};

	trace_f2fs_writepage(page, DATA);
@@ -1598,7 +1610,7 @@ static int __write_data_page(struct page *page, bool *submitted,
static int f2fs_write_data_page(struct page *page,
					struct writeback_control *wbc)
{
	return __write_data_page(page, NULL, wbc);
	return __write_data_page(page, NULL, wbc, FS_DATA_IO);
}

/*
@@ -1607,7 +1619,8 @@ static int f2fs_write_data_page(struct page *page,
 * warm/hot data page.
 */
static int f2fs_write_cache_pages(struct address_space *mapping,
					struct writeback_control *wbc)
					struct writeback_control *wbc,
					enum iostat_type io_type)
{
	int ret = 0;
	int done = 0;
@@ -1697,7 +1710,7 @@ static int f2fs_write_cache_pages(struct address_space *mapping,
			if (!clear_page_dirty_for_io(page))
				goto continue_unlock;

			ret = __write_data_page(page, &submitted, wbc);
			ret = __write_data_page(page, &submitted, wbc, io_type);
			if (unlikely(ret)) {
				/*
				 * keep nr_to_write, since vfs uses this to
@@ -1752,8 +1765,9 @@ static int f2fs_write_cache_pages(struct address_space *mapping,
	return ret;
}

static int f2fs_write_data_pages(struct address_space *mapping,
			    struct writeback_control *wbc)
int __f2fs_write_data_pages(struct address_space *mapping,
						struct writeback_control *wbc,
						enum iostat_type io_type)
{
	struct inode *inode = mapping->host;
	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
@@ -1790,7 +1804,7 @@ static int f2fs_write_data_pages(struct address_space *mapping,
		goto skip_write;

	blk_start_plug(&plug);
	ret = f2fs_write_cache_pages(mapping, wbc);
	ret = f2fs_write_cache_pages(mapping, wbc, io_type);
	blk_finish_plug(&plug);

	if (wbc->sync_mode == WB_SYNC_ALL)
@@ -1809,6 +1823,16 @@ static int f2fs_write_data_pages(struct address_space *mapping,
	return 0;
}

static int f2fs_write_data_pages(struct address_space *mapping,
			    struct writeback_control *wbc)
{
	struct inode *inode = mapping->host;

	return __f2fs_write_data_pages(mapping, wbc,
			F2FS_I(inode)->cp_task == current ?
			FS_CP_DATA_IO : FS_DATA_IO);
}

static void f2fs_write_failed(struct address_space *mapping, loff_t to)
{
	struct inode *inode = mapping->host;
@@ -1858,7 +1882,7 @@ static int prepare_write_begin(struct f2fs_sb_info *sbi,
	set_new_dnode(&dn, inode, ipage, ipage, 0);

	if (f2fs_has_inline_data(inode)) {
		if (pos + len <= MAX_INLINE_DATA) {
		if (pos + len <= MAX_INLINE_DATA(inode)) {
			read_inline_data(page, ipage);
			set_inode_flag(inode, FI_DATA_EXIST);
			if (inode->i_nlink)
@@ -1956,8 +1980,8 @@ static int f2fs_write_begin(struct file *file, struct address_space *mapping,
	f2fs_wait_on_page_writeback(page, DATA, false);

	/* wait for GCed encrypted page writeback */
	if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode))
		f2fs_wait_on_encrypted_page_writeback(sbi, blkaddr);
	if (f2fs_encrypted_file(inode))
		f2fs_wait_on_block_writeback(sbi, blkaddr);

	if (len == PAGE_SIZE || PageUptodate(page))
		return 0;
@@ -1971,21 +1995,9 @@ static int f2fs_write_begin(struct file *file, struct address_space *mapping,
		zero_user_segment(page, 0, PAGE_SIZE);
		SetPageUptodate(page);
	} else {
		struct bio *bio;

		bio = f2fs_grab_bio(inode, blkaddr, 1);
		if (IS_ERR(bio)) {
			err = PTR_ERR(bio);
			goto fail;
		}
		bio->bi_opf = REQ_OP_READ;
		if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) {
			bio_put(bio);
			err = -EFAULT;
		err = f2fs_submit_page_read(inode, page, blkaddr);
		if (err)
			goto fail;
		}

		__submit_bio(sbi, bio, DATA);

		lock_page(page);
		if (unlikely(page->mapping != mapping)) {
@@ -2075,11 +2087,14 @@ static ssize_t f2fs_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
	up_read(&F2FS_I(inode)->dio_rwsem[rw]);

	if (rw == WRITE) {
		if (err > 0)
		if (err > 0) {
			f2fs_update_iostat(F2FS_I_SB(inode), APP_DIRECT_IO,
									err);
			set_inode_flag(inode, FI_UPDATE_WRITE);
		else if (err < 0)
		} else if (err < 0) {
			f2fs_write_failed(mapping, offset + count);
		}
	}

	trace_f2fs_direct_IO_exit(inode, offset, count, rw, err);

Loading