Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 30211125 authored by Linus Torvalds's avatar Linus Torvalds
Browse files
Pull f2fs updates from Jaegeuk Kim:
 "This patch-set includes the following major enhancement patches.
   - introduce large directory support
   - introduce f2fs_issue_flush to merge redundant flush commands
   - merge write IOs as much as possible aligned to the segment
   - add sysfs entries to tune the f2fs configuration
   - use radix_tree for the free_nid_list to reduce in-memory operations
   - remove costly bit operations in f2fs_find_entry
   - enhance the readahead flow for CP/NAT/SIT/SSA blocks

  The other bug fixes are as follows:
   - recover xattr node blocks correctly after sudden-power-cut
   - fix to calculate the maximum number of node ids
   - enhance to handle many error cases

  And, there are a bunch of cleanups"

* tag 'for-f2fs-3.15' of git://git.kernel.org/pub/scm/linux/kernel/git/jaegeuk/f2fs: (62 commits)
  f2fs: fix wrong statistics of inline data
  f2fs: check the acl's validity before setting
  f2fs: introduce f2fs_issue_flush to avoid redundant flush issue
  f2fs: fix to cover io->bio with io_rwsem
  f2fs: fix error path when fail to read inline data
  f2fs: use list_for_each_entry{_safe} for simplyfying code
  f2fs: avoid free slab cache under spinlock
  f2fs: avoid unneeded lookup when xattr name length is too long
  f2fs: avoid unnecessary bio submit when wait page writeback
  f2fs: return -EIO when node id is not matched
  f2fs: avoid RECLAIM_FS-ON-W warning
  f2fs: skip unnecessary node writes during fsync
  f2fs: introduce fi->i_sem to protect fi's info
  f2fs: change reclaim rate in percentage
  f2fs: add missing documentation for dir_level
  f2fs: remove unnecessary threshold
  f2fs: throttle the memory footprint with a sysfs entry
  f2fs: avoid to drop nat entries due to the negative nr_shrink
  f2fs: call f2fs_wait_on_page_writeback instead of native function
  f2fs: introduce nr_pages_to_write for segment alignment
  ...
parents 0af9fb63 48b230a5
Loading
Loading
Loading
Loading
+12 −0
Original line number Diff line number Diff line
@@ -55,3 +55,15 @@ Date: January 2014
Contact:	"Jaegeuk Kim" <jaegeuk.kim@samsung.com>
Description:
		 Controls the number of trials to find a victim segment.

What:		/sys/fs/f2fs/<disk>/dir_level
Date:		March 2014
Contact:	"Jaegeuk Kim" <jaegeuk.kim@samsung.com>
Description:
		 Controls the directory level for large directory.

What:		/sys/fs/f2fs/<disk>/ram_thresh
Date:		March 2014
Contact:	"Jaegeuk Kim" <jaegeuk.kim@samsung.com>
Description:
		 Controls the memory footprint used by f2fs.
+24 −5
Original line number Diff line number Diff line
@@ -122,6 +122,10 @@ disable_ext_identify Disable the extension list configured by mkfs, so f2fs
inline_xattr           Enable the inline xattrs feature.
inline_data            Enable the inline data feature: New created small(<~3.4k)
                       files can be written into inode block.
flush_merge	       Merge concurrent cache_flush commands as much as possible
                       to eliminate redundant command issues. If the underlying
		       device handles the cache_flush command relatively slowly,
		       recommend to enable this option.

================================================================================
DEBUGFS ENTRIES
@@ -169,9 +173,11 @@ Files in /sys/fs/f2fs/<devname>

 reclaim_segments             This parameter controls the number of prefree
                              segments to be reclaimed. If the number of prefree
			      segments is larger than this number, f2fs tries to
			      conduct checkpoint to reclaim the prefree segments
			      to free segments. By default, 100 segments, 200MB.
			      segments is larger than the number of segments
			      in the proportion to the percentage over total
			      volume size, f2fs tries to conduct checkpoint to
			      reclaim the prefree segments to free segments.
			      By default, 5% over total # of segments.

 max_small_discards	      This parameter controls the number of discard
			      commands that consist small blocks less than 2MB.
@@ -195,6 +201,17 @@ Files in /sys/fs/f2fs/<devname>
			      cleaning operations. The default value is 4096
			      which covers 8GB block address range.

 dir_level                    This parameter controls the directory level to
			      support large directory. If a directory has a
			      number of files, it can reduce the file lookup
			      latency by increasing this dir_level value.
			      Otherwise, it needs to decrease this value to
			      reduce the space overhead. The default value is 0.

 ram_thresh                   This parameter controls the memory footprint used
			      by free nids and cached nat entries. By default,
			      10 is set, which indicates 10 MB / 1 GB RAM.

================================================================================
USAGE
================================================================================
@@ -444,9 +461,11 @@ The number of blocks and buckets are determined by,
  # of blocks in level #n = |
                            `- 4, Otherwise

                             ,- 2^n, if n < MAX_DIR_HASH_DEPTH / 2,
                             ,- 2^ (n + dir_level),
			     |            if n < MAX_DIR_HASH_DEPTH / 2,
  # of buckets in level #n = |
                             `- 2^((MAX_DIR_HASH_DEPTH / 2) - 1), Otherwise
                             `- 2^((MAX_DIR_HASH_DEPTH / 2 + dir_level) - 1),
			                  Otherwise

When F2FS finds a file name in a directory, at first a hash value of the file
name is calculated. Then, F2FS scans the hash table in level #0 to find the
+7 −1
Original line number Diff line number Diff line
@@ -174,7 +174,7 @@ struct posix_acl *f2fs_get_acl(struct inode *inode, int type)

	retval = f2fs_getxattr(inode, name_index, "", NULL, 0);
	if (retval > 0) {
		value = kmalloc(retval, GFP_KERNEL);
		value = kmalloc(retval, GFP_F2FS_ZERO);
		if (!value)
			return ERR_PTR(-ENOMEM);
		retval = f2fs_getxattr(inode, name_index, "", value, retval);
@@ -203,6 +203,12 @@ static int __f2fs_set_acl(struct inode *inode, int type,
	size_t size = 0;
	int error;

	if (acl) {
		error = posix_acl_valid(acl);
		if (error < 0)
			return error;
	}

	switch (type) {
	case ACL_TYPE_ACCESS:
		name_index = F2FS_XATTR_INDEX_POSIX_ACL_ACCESS;
+150 −58
Original line number Diff line number Diff line
@@ -33,14 +33,12 @@ struct page *grab_meta_page(struct f2fs_sb_info *sbi, pgoff_t index)
	struct address_space *mapping = META_MAPPING(sbi);
	struct page *page = NULL;
repeat:
	page = grab_cache_page(mapping, index);
	page = grab_cache_page_write_begin(mapping, index, AOP_FLAG_NOFS);
	if (!page) {
		cond_resched();
		goto repeat;
	}

	/* We wait writeback only inside grab_meta_page() */
	wait_on_page_writeback(page);
	SetPageUptodate(page);
	return page;
}
@@ -75,23 +73,102 @@ out:
	return page;
}

inline int get_max_meta_blks(struct f2fs_sb_info *sbi, int type)
{
	switch (type) {
	case META_NAT:
		return NM_I(sbi)->max_nid / NAT_ENTRY_PER_BLOCK;
	case META_SIT:
		return SIT_BLK_CNT(sbi);
	case META_SSA:
	case META_CP:
		return 0;
	default:
		BUG();
	}
}

/*
 * Readahead CP/NAT/SIT/SSA pages
 */
int ra_meta_pages(struct f2fs_sb_info *sbi, int start, int nrpages, int type)
{
	block_t prev_blk_addr = 0;
	struct page *page;
	int blkno = start;
	int max_blks = get_max_meta_blks(sbi, type);

	struct f2fs_io_info fio = {
		.type = META,
		.rw = READ_SYNC | REQ_META | REQ_PRIO
	};

	for (; nrpages-- > 0; blkno++) {
		block_t blk_addr;

		switch (type) {
		case META_NAT:
			/* get nat block addr */
			if (unlikely(blkno >= max_blks))
				blkno = 0;
			blk_addr = current_nat_addr(sbi,
					blkno * NAT_ENTRY_PER_BLOCK);
			break;
		case META_SIT:
			/* get sit block addr */
			if (unlikely(blkno >= max_blks))
				goto out;
			blk_addr = current_sit_addr(sbi,
					blkno * SIT_ENTRY_PER_BLOCK);
			if (blkno != start && prev_blk_addr + 1 != blk_addr)
				goto out;
			prev_blk_addr = blk_addr;
			break;
		case META_SSA:
		case META_CP:
			/* get ssa/cp block addr */
			blk_addr = blkno;
			break;
		default:
			BUG();
		}

		page = grab_cache_page(META_MAPPING(sbi), blk_addr);
		if (!page)
			continue;
		if (PageUptodate(page)) {
			mark_page_accessed(page);
			f2fs_put_page(page, 1);
			continue;
		}

		f2fs_submit_page_mbio(sbi, page, blk_addr, &fio);
		mark_page_accessed(page);
		f2fs_put_page(page, 0);
	}
out:
	f2fs_submit_merged_bio(sbi, META, READ);
	return blkno - start;
}

static int f2fs_write_meta_page(struct page *page,
				struct writeback_control *wbc)
{
	struct inode *inode = page->mapping->host;
	struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);

	/* Should not write any meta pages, if any IO error was occurred */
	if (unlikely(sbi->por_doing ||
			is_set_ckpt_flags(F2FS_CKPT(sbi), CP_ERROR_FLAG)))
	if (unlikely(sbi->por_doing))
		goto redirty_out;

	if (wbc->for_reclaim)
		goto redirty_out;

	wait_on_page_writeback(page);
	/* Should not write any meta pages, if any IO error was occurred */
	if (unlikely(is_set_ckpt_flags(F2FS_CKPT(sbi), CP_ERROR_FLAG)))
		goto no_write;

	f2fs_wait_on_page_writeback(page, META);
	write_meta_page(sbi, page);
no_write:
	dec_page_count(sbi, F2FS_DIRTY_META);
	unlock_page(page);
	return 0;
@@ -99,6 +176,7 @@ static int f2fs_write_meta_page(struct page *page,
redirty_out:
	dec_page_count(sbi, F2FS_DIRTY_META);
	wbc->pages_skipped++;
	account_page_redirty(page);
	set_page_dirty(page);
	return AOP_WRITEPAGE_ACTIVATE;
}
@@ -107,21 +185,23 @@ static int f2fs_write_meta_pages(struct address_space *mapping,
				struct writeback_control *wbc)
{
	struct f2fs_sb_info *sbi = F2FS_SB(mapping->host->i_sb);
	int nrpages = MAX_BIO_BLOCKS(max_hw_blocks(sbi));
	long written;

	if (wbc->for_kupdate)
		return 0;
	long diff, written;

	/* collect a number of dirty meta pages and write together */
	if (get_pages(sbi, F2FS_DIRTY_META) < nrpages)
		return 0;
	if (wbc->for_kupdate ||
		get_pages(sbi, F2FS_DIRTY_META) < nr_pages_to_skip(sbi, META))
		goto skip_write;

	/* if mounting is failed, skip writing node pages */
	mutex_lock(&sbi->cp_mutex);
	written = sync_meta_pages(sbi, META, nrpages);
	diff = nr_pages_to_write(sbi, META, wbc);
	written = sync_meta_pages(sbi, META, wbc->nr_to_write);
	mutex_unlock(&sbi->cp_mutex);
	wbc->nr_to_write -= written;
	wbc->nr_to_write = max((long)0, wbc->nr_to_write - written - diff);
	return 0;

skip_write:
	wbc->pages_skipped += get_pages(sbi, F2FS_DIRTY_META);
	return 0;
}

@@ -148,10 +228,22 @@ long sync_meta_pages(struct f2fs_sb_info *sbi, enum page_type type,

		for (i = 0; i < nr_pages; i++) {
			struct page *page = pvec.pages[i];

			lock_page(page);
			f2fs_bug_on(page->mapping != mapping);
			f2fs_bug_on(!PageDirty(page));
			clear_page_dirty_for_io(page);

			if (unlikely(page->mapping != mapping)) {
continue_unlock:
				unlock_page(page);
				continue;
			}
			if (!PageDirty(page)) {
				/* someone wrote it for us */
				goto continue_unlock;
			}

			if (!clear_page_dirty_for_io(page))
				goto continue_unlock;

			if (f2fs_write_meta_page(page, &wbc)) {
				unlock_page(page);
				break;
@@ -216,16 +308,15 @@ void release_orphan_inode(struct f2fs_sb_info *sbi)

void add_orphan_inode(struct f2fs_sb_info *sbi, nid_t ino)
{
	struct list_head *head, *this;
	struct orphan_inode_entry *new = NULL, *orphan = NULL;
	struct list_head *head;
	struct orphan_inode_entry *new, *orphan;

	new = f2fs_kmem_cache_alloc(orphan_entry_slab, GFP_ATOMIC);
	new->ino = ino;

	spin_lock(&sbi->orphan_inode_lock);
	head = &sbi->orphan_inode_list;
	list_for_each(this, head) {
		orphan = list_entry(this, struct orphan_inode_entry, list);
	list_for_each_entry(orphan, head, list) {
		if (orphan->ino == ino) {
			spin_unlock(&sbi->orphan_inode_lock);
			kmem_cache_free(orphan_entry_slab, new);
@@ -234,14 +325,10 @@ void add_orphan_inode(struct f2fs_sb_info *sbi, nid_t ino)

		if (orphan->ino > ino)
			break;
		orphan = NULL;
	}

	/* add new_oentry into list which is sorted by inode number */
	if (orphan)
		list_add(&new->list, this->prev);
	else
		list_add_tail(&new->list, head);
	/* add new orphan entry into list which is sorted by inode number */
	list_add_tail(&new->list, &orphan->list);
	spin_unlock(&sbi->orphan_inode_lock);
}

@@ -255,10 +342,11 @@ void remove_orphan_inode(struct f2fs_sb_info *sbi, nid_t ino)
	list_for_each_entry(orphan, head, list) {
		if (orphan->ino == ino) {
			list_del(&orphan->list);
			kmem_cache_free(orphan_entry_slab, orphan);
			f2fs_bug_on(sbi->n_orphans == 0);
			sbi->n_orphans--;
			break;
			spin_unlock(&sbi->orphan_inode_lock);
			kmem_cache_free(orphan_entry_slab, orphan);
			return;
		}
	}
	spin_unlock(&sbi->orphan_inode_lock);
@@ -285,6 +373,8 @@ void recover_orphan_inodes(struct f2fs_sb_info *sbi)
	start_blk = __start_cp_addr(sbi) + 1;
	orphan_blkaddr = __start_sum_addr(sbi) - 1;

	ra_meta_pages(sbi, start_blk, orphan_blkaddr, META_CP);

	for (i = 0; i < orphan_blkaddr; i++) {
		struct page *page = get_meta_page(sbi, start_blk + i);
		struct f2fs_orphan_block *orphan_blk;
@@ -466,14 +556,12 @@ static int __add_dirty_inode(struct inode *inode, struct dir_inode_entry *new)
{
	struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
	struct list_head *head = &sbi->dir_inode_list;
	struct list_head *this;

	list_for_each(this, head) {
	struct dir_inode_entry *entry;
		entry = list_entry(this, struct dir_inode_entry, list);

	list_for_each_entry(entry, head, list)
		if (unlikely(entry->inode == inode))
			return -EEXIST;
	}

	list_add_tail(&new->list, head);
	stat_inc_dirty_dir(sbi);
	return 0;
@@ -483,6 +571,7 @@ void set_dirty_dir_page(struct inode *inode, struct page *page)
{
	struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
	struct dir_inode_entry *new;
	int ret = 0;

	if (!S_ISDIR(inode->i_mode))
		return;
@@ -492,13 +581,13 @@ void set_dirty_dir_page(struct inode *inode, struct page *page)
	INIT_LIST_HEAD(&new->list);

	spin_lock(&sbi->dir_inode_lock);
	if (__add_dirty_inode(inode, new))
		kmem_cache_free(inode_entry_slab, new);

	inc_page_count(sbi, F2FS_DIRTY_DENTS);
	ret = __add_dirty_inode(inode, new);
	inode_inc_dirty_dents(inode);
	SetPagePrivate(page);
	spin_unlock(&sbi->dir_inode_lock);

	if (ret)
		kmem_cache_free(inode_entry_slab, new);
}

void add_dirty_dir_inode(struct inode *inode)
@@ -506,44 +595,47 @@ void add_dirty_dir_inode(struct inode *inode)
	struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
	struct dir_inode_entry *new =
			f2fs_kmem_cache_alloc(inode_entry_slab, GFP_NOFS);
	int ret = 0;

	new->inode = inode;
	INIT_LIST_HEAD(&new->list);

	spin_lock(&sbi->dir_inode_lock);
	if (__add_dirty_inode(inode, new))
		kmem_cache_free(inode_entry_slab, new);
	ret = __add_dirty_inode(inode, new);
	spin_unlock(&sbi->dir_inode_lock);

	if (ret)
		kmem_cache_free(inode_entry_slab, new);
}

void remove_dirty_dir_inode(struct inode *inode)
{
	struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);

	struct list_head *this, *head;
	struct list_head *head;
	struct dir_inode_entry *entry;

	if (!S_ISDIR(inode->i_mode))
		return;

	spin_lock(&sbi->dir_inode_lock);
	if (atomic_read(&F2FS_I(inode)->dirty_dents)) {
	if (get_dirty_dents(inode)) {
		spin_unlock(&sbi->dir_inode_lock);
		return;
	}

	head = &sbi->dir_inode_list;
	list_for_each(this, head) {
		struct dir_inode_entry *entry;
		entry = list_entry(this, struct dir_inode_entry, list);
	list_for_each_entry(entry, head, list) {
		if (entry->inode == inode) {
			list_del(&entry->list);
			kmem_cache_free(inode_entry_slab, entry);
			stat_dec_dirty_dir(sbi);
			break;
			spin_unlock(&sbi->dir_inode_lock);
			kmem_cache_free(inode_entry_slab, entry);
			goto done;
		}
	}
	spin_unlock(&sbi->dir_inode_lock);

done:
	/* Only from the recovery routine */
	if (is_inode_flag_set(F2FS_I(inode), FI_DELAY_IPUT)) {
		clear_inode_flag(F2FS_I(inode), FI_DELAY_IPUT);
@@ -554,15 +646,14 @@ void remove_dirty_dir_inode(struct inode *inode)
struct inode *check_dirty_dir_inode(struct f2fs_sb_info *sbi, nid_t ino)
{

	struct list_head *this, *head;
	struct list_head *head;
	struct inode *inode = NULL;
	struct dir_inode_entry *entry;

	spin_lock(&sbi->dir_inode_lock);

	head = &sbi->dir_inode_list;
	list_for_each(this, head) {
		struct dir_inode_entry *entry;
		entry = list_entry(this, struct dir_inode_entry, list);
	list_for_each_entry(entry, head, list) {
		if (entry->inode->i_ino == ino) {
			inode = entry->inode;
			break;
@@ -589,7 +680,7 @@ retry:
	inode = igrab(entry->inode);
	spin_unlock(&sbi->dir_inode_lock);
	if (inode) {
		filemap_flush(inode->i_mapping);
		filemap_fdatawrite(inode->i_mapping);
		iput(inode);
	} else {
		/*
@@ -824,6 +915,7 @@ void write_checkpoint(struct f2fs_sb_info *sbi, bool is_umount)
	unblock_operations(sbi);
	mutex_unlock(&sbi->cp_mutex);

	stat_inc_cp_count(sbi->stat_info);
	trace_f2fs_write_checkpoint(sbi->sb, is_umount, "finish checkpoint");
}

@@ -845,11 +937,11 @@ void init_orphan_info(struct f2fs_sb_info *sbi)
int __init create_checkpoint_caches(void)
{
	orphan_entry_slab = f2fs_kmem_cache_create("f2fs_orphan_entry",
			sizeof(struct orphan_inode_entry), NULL);
			sizeof(struct orphan_inode_entry));
	if (!orphan_entry_slab)
		return -ENOMEM;
	inode_entry_slab = f2fs_kmem_cache_create("f2fs_dirty_dir_entry",
			sizeof(struct dir_inode_entry), NULL);
			sizeof(struct dir_inode_entry));
	if (!inode_entry_slab) {
		kmem_cache_destroy(orphan_entry_slab);
		return -ENOMEM;
+51 −55
Original line number Diff line number Diff line
@@ -45,7 +45,7 @@ static void f2fs_read_end_io(struct bio *bio, int err)

static void f2fs_write_end_io(struct bio *bio, int err)
{
	struct f2fs_sb_info *sbi = F2FS_SB(bio->bi_io_vec->bv_page->mapping->host->i_sb);
	struct f2fs_sb_info *sbi = bio->bi_private;
	struct bio_vec *bvec;
	int i;

@@ -55,15 +55,16 @@ static void f2fs_write_end_io(struct bio *bio, int err)
		if (unlikely(err)) {
			SetPageError(page);
			set_bit(AS_EIO, &page->mapping->flags);
			set_ckpt_flags(sbi->ckpt, CP_ERROR_FLAG);
			sbi->sb->s_flags |= MS_RDONLY;
			f2fs_stop_checkpoint(sbi);
		}
		end_page_writeback(page);
		dec_page_count(sbi, F2FS_WRITEBACK);
	}

	if (bio->bi_private)
		complete(bio->bi_private);
	if (sbi->wait_io) {
		complete(sbi->wait_io);
		sbi->wait_io = NULL;
	}

	if (!get_pages(sbi, F2FS_WRITEBACK) &&
			!list_empty(&sbi->cp_wait.task_list))
@@ -86,6 +87,7 @@ static struct bio *__bio_alloc(struct f2fs_sb_info *sbi, block_t blk_addr,
	bio->bi_bdev = sbi->sb->s_bdev;
	bio->bi_iter.bi_sector = SECTOR_FROM_BLOCK(sbi, blk_addr);
	bio->bi_end_io = is_read ? f2fs_read_end_io : f2fs_write_end_io;
	bio->bi_private = sbi;

	return bio;
}
@@ -113,7 +115,7 @@ static void __submit_merged_bio(struct f2fs_bio_info *io)
		 */
		if (fio->type == META_FLUSH) {
			DECLARE_COMPLETION_ONSTACK(wait);
			io->bio->bi_private = &wait;
			io->sbi->wait_io = &wait;
			submit_bio(rw, io->bio);
			wait_for_completion(&wait);
		} else {
@@ -132,7 +134,7 @@ void f2fs_submit_merged_bio(struct f2fs_sb_info *sbi,

	io = is_read_io(rw) ? &sbi->read_io : &sbi->write_io[btype];

	mutex_lock(&io->io_mutex);
	down_write(&io->io_rwsem);

	/* change META to META_FLUSH in the checkpoint procedure */
	if (type >= META_FLUSH) {
@@ -140,7 +142,7 @@ void f2fs_submit_merged_bio(struct f2fs_sb_info *sbi,
		io->fio.rw = WRITE_FLUSH_FUA | REQ_META | REQ_PRIO;
	}
	__submit_merged_bio(io);
	mutex_unlock(&io->io_mutex);
	up_write(&io->io_rwsem);
}

/*
@@ -178,7 +180,7 @@ void f2fs_submit_page_mbio(struct f2fs_sb_info *sbi, struct page *page,

	verify_block_addr(sbi, blk_addr);

	mutex_lock(&io->io_mutex);
	down_write(&io->io_rwsem);

	if (!is_read)
		inc_page_count(sbi, F2FS_WRITEBACK);
@@ -202,7 +204,7 @@ alloc_new:

	io->last_block_in_bio = blk_addr;

	mutex_unlock(&io->io_mutex);
	up_write(&io->io_rwsem);
	trace_f2fs_submit_page_mbio(page, fio->rw, fio->type, blk_addr);
}

@@ -797,48 +799,36 @@ static int f2fs_write_data_page(struct page *page,
	 */
	offset = i_size & (PAGE_CACHE_SIZE - 1);
	if ((page->index >= end_index + 1) || !offset) {
		if (S_ISDIR(inode->i_mode)) {
			dec_page_count(sbi, F2FS_DIRTY_DENTS);
		inode_dec_dirty_dents(inode);
		}
		goto out;
	}

	zero_user_segment(page, offset, PAGE_CACHE_SIZE);
write:
	if (unlikely(sbi->por_doing)) {
		err = AOP_WRITEPAGE_ACTIVATE;
	if (unlikely(sbi->por_doing))
		goto redirty_out;
	}

	/* Dentry blocks are controlled by checkpoint */
	if (S_ISDIR(inode->i_mode)) {
		dec_page_count(sbi, F2FS_DIRTY_DENTS);
		inode_dec_dirty_dents(inode);
		err = do_write_data_page(page, &fio);
	} else {
		f2fs_lock_op(sbi);

		if (f2fs_has_inline_data(inode) || f2fs_may_inline(inode)) {
			err = f2fs_write_inline_data(inode, page, offset);
			f2fs_unlock_op(sbi);
			goto out;
		} else {
			err = do_write_data_page(page, &fio);
		goto done;
	}

		f2fs_unlock_op(sbi);
	if (!wbc->for_reclaim)
		need_balance_fs = true;
	}
	if (err == -ENOENT)
		goto out;
	else if (err)
	else if (has_not_enough_free_secs(sbi, 0))
		goto redirty_out;

	if (wbc->for_reclaim) {
		f2fs_submit_merged_bio(sbi, DATA, WRITE);
		need_balance_fs = false;
	}
	f2fs_lock_op(sbi);
	if (f2fs_has_inline_data(inode) || f2fs_may_inline(inode))
		err = f2fs_write_inline_data(inode, page, offset);
	else
		err = do_write_data_page(page, &fio);
	f2fs_unlock_op(sbi);
done:
	if (err && err != -ENOENT)
		goto redirty_out;

	clear_cold_data(page);
out:
@@ -849,12 +839,11 @@ out:

redirty_out:
	wbc->pages_skipped++;
	account_page_redirty(page);
	set_page_dirty(page);
	return err;
	return AOP_WRITEPAGE_ACTIVATE;
}

#define MAX_DESIRED_PAGES_WP	4096

static int __f2fs_writepage(struct page *page, struct writeback_control *wbc,
			void *data)
{
@@ -871,17 +860,17 @@ static int f2fs_write_data_pages(struct address_space *mapping,
	struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
	bool locked = false;
	int ret;
	long excess_nrtw = 0, desired_nrtw;
	long diff;

	/* deal with chardevs and other special file */
	if (!mapping->a_ops->writepage)
		return 0;

	if (wbc->nr_to_write < MAX_DESIRED_PAGES_WP) {
		desired_nrtw = MAX_DESIRED_PAGES_WP;
		excess_nrtw = desired_nrtw - wbc->nr_to_write;
		wbc->nr_to_write = desired_nrtw;
	}
	if (S_ISDIR(inode->i_mode) && wbc->sync_mode == WB_SYNC_NONE &&
			get_dirty_dents(inode) < nr_pages_to_skip(sbi, DATA))
		goto skip_write;

	diff = nr_pages_to_write(sbi, DATA, wbc);

	if (!S_ISDIR(inode->i_mode)) {
		mutex_lock(&sbi->writepages);
@@ -895,8 +884,12 @@ static int f2fs_write_data_pages(struct address_space *mapping,

	remove_dirty_dir_inode(inode);

	wbc->nr_to_write -= excess_nrtw;
	wbc->nr_to_write = max((long)0, wbc->nr_to_write - diff);
	return ret;

skip_write:
	wbc->pages_skipped += get_dirty_dents(inode);
	return 0;
}

static int f2fs_write_begin(struct file *file, struct address_space *mapping,
@@ -949,13 +942,19 @@ inline_data:
	if (dn.data_blkaddr == NEW_ADDR) {
		zero_user_segment(page, 0, PAGE_CACHE_SIZE);
	} else {
		if (f2fs_has_inline_data(inode))
		if (f2fs_has_inline_data(inode)) {
			err = f2fs_read_inline_data(inode, page);
		else
			if (err) {
				page_cache_release(page);
				return err;
			}
		} else {
			err = f2fs_submit_page_bio(sbi, page, dn.data_blkaddr,
							READ_SYNC);
			if (err)
				return err;
		}

		lock_page(page);
		if (unlikely(!PageUptodate(page))) {
			f2fs_put_page(page, 1);
@@ -1031,11 +1030,8 @@ static void f2fs_invalidate_data_page(struct page *page, unsigned int offset,
				      unsigned int length)
{
	struct inode *inode = page->mapping->host;
	struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
	if (S_ISDIR(inode->i_mode) && PageDirty(page)) {
		dec_page_count(sbi, F2FS_DIRTY_DENTS);
	if (PageDirty(page))
		inode_dec_dirty_dents(inode);
	}
	ClearPagePrivate(page);
}

Loading