Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 4c12ab7e authored by Linus Torvalds's avatar Linus Torvalds
Browse files
Pull f2fs updates from Jaegeuk Kim:
 "The major work includes fixing and enhancing the existing extent_cache
  feature, which has been well settling down so far and now it becomes a
  default mount option accordingly.

  Also, this version newly registers a f2fs memory shrinker to reclaim
  several objects consumed by a couple of data structures in order to
  avoid memory pressures.

  Another new feature is to add ioctl(F2FS_GARBAGE_COLLECT) which
  triggers a cleaning job explicitly by users.

  Most of the other patches are to fix bugs occurred in the corner cases
  across the whole code area"

* tag 'for-f2fs-4.3' of git://git.kernel.org/pub/scm/linux/kernel/git/jaegeuk/f2fs: (85 commits)
  f2fs: upset segment_info repair
  f2fs: avoid accessing NULL pointer in f2fs_drop_largest_extent
  f2fs: update extent tree in batches
  f2fs: fix to release inode correctly
  f2fs: handle f2fs_truncate error correctly
  f2fs: avoid unneeded initializing when converting inline dentry
  f2fs: atomically set inode->i_flags
  f2fs: fix wrong pointer access during try_to_free_nids
  f2fs: use __GFP_NOFAIL to avoid infinite loop
  f2fs: lookup neighbor extent nodes for merging later
  f2fs: split __insert_extent_tree_ret for readability
  f2fs: kill dead code in __insert_extent_tree
  f2fs: adjust showing of extent cache stat
  f2fs: add largest/cached stat in extent cache
  f2fs: fix incorrect mapping for bmap
  f2fs: add annotation for space utilization of regular/inline dentry
  f2fs: fix to update cached_en of extent tree properly
  f2fs: fix typo
  f2fs: check the node block address of newly allocated nid
  f2fs: go out for insert_inode_locked failure
  ...
parents 9cbf22b3 01a5ad82
Loading
Loading
Loading
Loading
+3 −1
Original line number Diff line number Diff line
@@ -143,7 +143,9 @@ fastboot This option is used when a system wants to reduce mount
extent_cache           Enable an extent cache based on rb-tree, it can cache
                       as many as extent which map between contiguous logical
                       address and physical address per inode, resulting in
                       increasing the cache hit ratio.
                       increasing the cache hit ratio. Set by default.
noextent_cache         Diable an extent cache based on rb-tree explicitly, see
                       the above extent_cache mount option.
noinline_data          Disable the inline data feature, inline data feature is
                       enabled by default.

+2 −0
Original line number Diff line number Diff line
@@ -4416,6 +4416,7 @@ F: include/linux/fscache*.h
F2FS FILE SYSTEM
M:	Jaegeuk Kim <jaegeuk@kernel.org>
M:	Changman Lee <cm224.lee@samsung.com>
R:	Chao Yu <chao2.yu@samsung.com>
L:	linux-f2fs-devel@lists.sourceforge.net
W:	http://en.wikipedia.org/wiki/F2FS
T:	git git://git.kernel.org/pub/scm/linux/kernel/git/jaegeuk/f2fs.git
@@ -4424,6 +4425,7 @@ F: Documentation/filesystems/f2fs.txt
F:	Documentation/ABI/testing/sysfs-fs-f2fs
F:	fs/f2fs/
F:	include/linux/f2fs_fs.h
F:	include/trace/events/f2fs.h

FUJITSU FR-V (FRV) PORT
M:	David Howells <dhowells@redhat.com>
+1 −1
Original line number Diff line number Diff line
@@ -45,7 +45,7 @@ config F2FS_FS_POSIX_ACL
	default y
	help
	  Posix Access Control Lists (ACLs) support permissions for users and
	  gourps beyond the owner/group/world scheme.
	  groups beyond the owner/group/world scheme.

	  To learn more about Access Control Lists, visit the POSIX ACLs for
	  Linux website <http://acl.bestbits.at/>.
+1 −0
Original line number Diff line number Diff line
@@ -2,6 +2,7 @@ obj-$(CONFIG_F2FS_FS) += f2fs.o

f2fs-y		:= dir.o file.o inode.o namei.o hash.o super.o inline.o
f2fs-y		+= checkpoint.o gc.o data.o node.o segment.o recovery.o
f2fs-y		+= shrinker.o extent_cache.o
f2fs-$(CONFIG_F2FS_STAT_FS) += debug.o
f2fs-$(CONFIG_F2FS_FS_XATTR) += xattr.o
f2fs-$(CONFIG_F2FS_FS_POSIX_ACL) += acl.o
+58 −35
Original line number Diff line number Diff line
@@ -69,14 +69,24 @@ struct page *get_meta_page(struct f2fs_sb_info *sbi, pgoff_t index)

	fio.page = page;

	if (f2fs_submit_page_bio(&fio))
	if (f2fs_submit_page_bio(&fio)) {
		f2fs_put_page(page, 1);
		goto repeat;
	}

	lock_page(page);
	if (unlikely(page->mapping != mapping)) {
		f2fs_put_page(page, 1);
		goto repeat;
	}

	/*
	 * if there is any IO error when accessing device, make our filesystem
	 * readonly and make sure do not write checkpoint with non-uptodate
	 * meta page.
	 */
	if (unlikely(!PageUptodate(page)))
		f2fs_stop_checkpoint(sbi);
out:
	return page;
}
@@ -326,26 +336,18 @@ const struct address_space_operations f2fs_meta_aops = {
static void __add_ino_entry(struct f2fs_sb_info *sbi, nid_t ino, int type)
{
	struct inode_management *im = &sbi->im[type];
	struct ino_entry *e;
	struct ino_entry *e, *tmp;

	tmp = f2fs_kmem_cache_alloc(ino_entry_slab, GFP_NOFS);
retry:
	if (radix_tree_preload(GFP_NOFS)) {
		cond_resched();
		goto retry;
	}
	radix_tree_preload(GFP_NOFS | __GFP_NOFAIL);

	spin_lock(&im->ino_lock);

	e = radix_tree_lookup(&im->ino_root, ino);
	if (!e) {
		e = kmem_cache_alloc(ino_entry_slab, GFP_ATOMIC);
		if (!e) {
			spin_unlock(&im->ino_lock);
			radix_tree_preload_end();
			goto retry;
		}
		e = tmp;
		if (radix_tree_insert(&im->ino_root, ino, e)) {
			spin_unlock(&im->ino_lock);
			kmem_cache_free(ino_entry_slab, e);
			radix_tree_preload_end();
			goto retry;
		}
@@ -358,6 +360,9 @@ static void __add_ino_entry(struct f2fs_sb_info *sbi, nid_t ino, int type)
	}
	spin_unlock(&im->ino_lock);
	radix_tree_preload_end();

	if (e != tmp)
		kmem_cache_free(ino_entry_slab, tmp);
}

static void __remove_ino_entry(struct f2fs_sb_info *sbi, nid_t ino, int type)
@@ -458,24 +463,34 @@ void remove_orphan_inode(struct f2fs_sb_info *sbi, nid_t ino)
	__remove_ino_entry(sbi, ino, ORPHAN_INO);
}

static void recover_orphan_inode(struct f2fs_sb_info *sbi, nid_t ino)
static int recover_orphan_inode(struct f2fs_sb_info *sbi, nid_t ino)
{
	struct inode *inode = f2fs_iget(sbi->sb, ino);
	f2fs_bug_on(sbi, IS_ERR(inode));
	struct inode *inode;

	inode = f2fs_iget(sbi->sb, ino);
	if (IS_ERR(inode)) {
		/*
		 * there should be a bug that we can't find the entry
		 * to orphan inode.
		 */
		f2fs_bug_on(sbi, PTR_ERR(inode) == -ENOENT);
		return PTR_ERR(inode);
	}

	clear_nlink(inode);

	/* truncate all the data during iput */
	iput(inode);
	return 0;
}

void recover_orphan_inodes(struct f2fs_sb_info *sbi)
int recover_orphan_inodes(struct f2fs_sb_info *sbi)
{
	block_t start_blk, orphan_blocks, i, j;
	int err;

	if (!is_set_ckpt_flags(F2FS_CKPT(sbi), CP_ORPHAN_PRESENT_FLAG))
		return;

	set_sbi_flag(sbi, SBI_POR_DOING);
		return 0;

	start_blk = __start_cp_addr(sbi) + 1 + __cp_payload(sbi);
	orphan_blocks = __start_sum_addr(sbi) - 1 - __cp_payload(sbi);
@@ -489,14 +504,17 @@ void recover_orphan_inodes(struct f2fs_sb_info *sbi)
		orphan_blk = (struct f2fs_orphan_block *)page_address(page);
		for (j = 0; j < le32_to_cpu(orphan_blk->entry_count); j++) {
			nid_t ino = le32_to_cpu(orphan_blk->ino[j]);
			recover_orphan_inode(sbi, ino);
			err = recover_orphan_inode(sbi, ino);
			if (err) {
				f2fs_put_page(page, 1);
				return err;
			}
		}
		f2fs_put_page(page, 1);
	}
	/* clear Orphan Flag */
	clear_ckpt_flags(F2FS_CKPT(sbi), CP_ORPHAN_PRESENT_FLAG);
	clear_sbi_flag(sbi, SBI_POR_DOING);
	return;
	return 0;
}

static void write_orphan_inodes(struct f2fs_sb_info *sbi, block_t start_blk)
@@ -504,7 +522,7 @@ static void write_orphan_inodes(struct f2fs_sb_info *sbi, block_t start_blk)
	struct list_head *head;
	struct f2fs_orphan_block *orphan_blk = NULL;
	unsigned int nentries = 0;
	unsigned short index;
	unsigned short index = 1;
	unsigned short orphan_blocks;
	struct page *page = NULL;
	struct ino_entry *orphan = NULL;
@@ -512,11 +530,6 @@ static void write_orphan_inodes(struct f2fs_sb_info *sbi, block_t start_blk)

	orphan_blocks = GET_ORPHAN_BLOCKS(im->ino_num);

	for (index = 0; index < orphan_blocks; index++)
		grab_meta_page(sbi, start_blk + index);

	index = 1;

	/*
	 * we don't need to do spin_lock(&im->ino_lock) here, since all the
	 * orphan inode operations are covered under f2fs_lock_op().
@@ -527,12 +540,10 @@ static void write_orphan_inodes(struct f2fs_sb_info *sbi, block_t start_blk)
	/* loop for each orphan inode entry and write them in Jornal block */
	list_for_each_entry(orphan, head, list) {
		if (!page) {
			page = find_get_page(META_MAPPING(sbi), start_blk++);
			f2fs_bug_on(sbi, !page);
			page = grab_meta_page(sbi, start_blk++);
			orphan_blk =
				(struct f2fs_orphan_block *)page_address(page);
			memset(orphan_blk, 0, sizeof(*orphan_blk));
			f2fs_put_page(page, 0);
		}

		orphan_blk->ino[nentries++] = cpu_to_le32(orphan->ino);
@@ -704,7 +715,8 @@ void update_dirty_page(struct inode *inode, struct page *page)
	struct inode_entry *new;
	int ret = 0;

	if (!S_ISDIR(inode->i_mode) && !S_ISREG(inode->i_mode))
	if (!S_ISDIR(inode->i_mode) && !S_ISREG(inode->i_mode) &&
			!S_ISLNK(inode->i_mode))
		return;

	if (!S_ISDIR(inode->i_mode)) {
@@ -892,12 +904,15 @@ static void do_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc)
	__u32 crc32 = 0;
	int i;
	int cp_payload_blks = __cp_payload(sbi);
	block_t discard_blk = NEXT_FREE_BLKADDR(sbi, curseg);
	bool invalidate = false;

	/*
	 * This avoids to conduct wrong roll-forward operations and uses
	 * metapages, so should be called prior to sync_meta_pages below.
	 */
	discard_next_dnode(sbi, NEXT_FREE_BLKADDR(sbi, curseg));
	if (discard_next_dnode(sbi, discard_blk))
		invalidate = true;

	/* Flush all the NAT/SIT pages */
	while (get_pages(sbi, F2FS_DIRTY_META)) {
@@ -1026,6 +1041,14 @@ static void do_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc)
	/* wait for previous submitted meta pages writeback */
	wait_on_all_pages_writeback(sbi);

	/*
	 * invalidate meta page which is used temporarily for zeroing out
	 * block at the end of warm node chain.
	 */
	if (invalidate)
		invalidate_mapping_pages(META_MAPPING(sbi), discard_blk,
								discard_blk);

	release_dirty_inode(sbi);

	if (unlikely(f2fs_cp_error(sbi)))
Loading