Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 06a60dec authored by Linus Torvalds's avatar Linus Torvalds
Browse files
Pull f2fs updates from Jaegeuk Kim:
 "New features:
   - in-memory extent_cache
   - fs_shutdown to test power-off-recovery
   - use inline_data to store symlink path
   - show f2fs as a non-misc filesystem

  Major fixes:
   - avoid CPU stalls on sync_dirty_dir_inodes
   - fix some power-off-recovery procedure
   - fix handling of broken symlink correctly
   - fix missing dot and dotdot made by sudden power cuts
   - handle wrong data index during roll-forward recovery
   - preallocate data blocks for direct_io

  ... and a bunch of minor bug fixes and cleanups"

* tag 'for-f2fs-4.1' of git://git.kernel.org/pub/scm/linux/kernel/git/jaegeuk/f2fs: (71 commits)
  f2fs: pass checkpoint reason on roll-forward recovery
  f2fs: avoid abnormal behavior on broken symlink
  f2fs: flush symlink path to avoid broken symlink after POR
  f2fs: change 0 to false for bool type
  f2fs: do not recover wrong data index
  f2fs: do not increase link count during recovery
  f2fs: assign parent's i_mode for empty dir
  f2fs: add F2FS_INLINE_DOTS to recover missing dot dentries
  f2fs: fix mismatching lock and unlock pages for roll-forward recovery
  f2fs: fix sparse warnings
  f2fs: limit b_size of mapped bh in f2fs_map_bh
  f2fs: persist system.advise into on-disk inode
  f2fs: avoid NULL pointer dereference in f2fs_xattr_advise_get
  f2fs: preallocate fallocated blocks for direct IO
  f2fs: enable inline data by default
  f2fs: preserve extent info for extent cache
  f2fs: initialize extent tree with on-disk extent info of inode
  f2fs: introduce __{find,grab}_extent_tree
  f2fs: split set_data_blkaddr from f2fs_update_extent_cache
  f2fs: enable fast symlink by utilizing inline data
  ...
parents d6a24d06 10027551
Loading
Loading
Loading
Loading
+6 −0
Original line number Diff line number Diff line
@@ -140,6 +140,12 @@ nobarrier This option can be used if underlying storage guarantees
fastboot               This option is used when a system wants to reduce mount
                       time as much as possible, even though normal performance
		       can be sacrificed.
extent_cache           Enable an extent cache based on rb-tree, it can cache
                       as many as extent which map between contiguous logical
                       address and physical address per inode, resulting in
                       increasing the cache hit ratio.
noinline_data          Disable the inline data feature, inline data feature is
                       enabled by default.

================================================================================
DEBUGFS ENTRIES
+1 −1
Original line number Diff line number Diff line
@@ -32,6 +32,7 @@ source "fs/gfs2/Kconfig"
source "fs/ocfs2/Kconfig"
source "fs/btrfs/Kconfig"
source "fs/nilfs2/Kconfig"
source "fs/f2fs/Kconfig"

config FS_DAX
	bool "Direct Access (DAX) support"
@@ -217,7 +218,6 @@ source "fs/pstore/Kconfig"
source "fs/sysv/Kconfig"
source "fs/ufs/Kconfig"
source "fs/exofs/Kconfig"
source "fs/f2fs/Kconfig"

endif # MISC_FILESYSTEMS

+1 −1
Original line number Diff line number Diff line
config F2FS_FS
	tristate "F2FS filesystem support (EXPERIMENTAL)"
	tristate "F2FS filesystem support"
	depends on BLOCK
	help
	  F2FS is based on Log-structured File System (LFS), which supports
+9 −5
Original line number Diff line number Diff line
@@ -351,13 +351,11 @@ static int f2fs_acl_create(struct inode *dir, umode_t *mode,

	*acl = f2fs_acl_clone(p, GFP_NOFS);
	if (!*acl)
		return -ENOMEM;
		goto no_mem;

	ret = f2fs_acl_create_masq(*acl, mode);
	if (ret < 0) {
		posix_acl_release(*acl);
		return -ENOMEM;
	}
	if (ret < 0)
		goto no_mem_clone;

	if (ret == 0) {
		posix_acl_release(*acl);
@@ -378,6 +376,12 @@ static int f2fs_acl_create(struct inode *dir, umode_t *mode,
	*default_acl = NULL;
	*acl = NULL;
	return 0;

no_mem_clone:
	posix_acl_release(*acl);
no_mem:
	posix_acl_release(p);
	return -ENOMEM;
}

int f2fs_init_acl(struct inode *inode, struct inode *dir, struct page *ipage,
+19 −19
Original line number Diff line number Diff line
@@ -276,7 +276,7 @@ long sync_meta_pages(struct f2fs_sb_info *sbi, enum page_type type,
			if (!clear_page_dirty_for_io(page))
				goto continue_unlock;

			if (f2fs_write_meta_page(page, &wbc)) {
			if (mapping->a_ops->writepage(page, &wbc)) {
				unlock_page(page);
				break;
			}
@@ -464,20 +464,19 @@ static void recover_orphan_inode(struct f2fs_sb_info *sbi, nid_t ino)

void recover_orphan_inodes(struct f2fs_sb_info *sbi)
{
	block_t start_blk, orphan_blkaddr, i, j;
	block_t start_blk, orphan_blocks, i, j;

	if (!is_set_ckpt_flags(F2FS_CKPT(sbi), CP_ORPHAN_PRESENT_FLAG))
		return;

	set_sbi_flag(sbi, SBI_POR_DOING);

	start_blk = __start_cp_addr(sbi) + 1 +
		le32_to_cpu(F2FS_RAW_SUPER(sbi)->cp_payload);
	orphan_blkaddr = __start_sum_addr(sbi) - 1;
	start_blk = __start_cp_addr(sbi) + 1 + __cp_payload(sbi);
	orphan_blocks = __start_sum_addr(sbi) - 1 - __cp_payload(sbi);

	ra_meta_pages(sbi, start_blk, orphan_blkaddr, META_CP);
	ra_meta_pages(sbi, start_blk, orphan_blocks, META_CP);

	for (i = 0; i < orphan_blkaddr; i++) {
	for (i = 0; i < orphan_blocks; i++) {
		struct page *page = get_meta_page(sbi, start_blk + i);
		struct f2fs_orphan_block *orphan_blk;

@@ -615,7 +614,7 @@ int get_valid_checkpoint(struct f2fs_sb_info *sbi)
	unsigned long blk_size = sbi->blocksize;
	unsigned long long cp1_version = 0, cp2_version = 0;
	unsigned long long cp_start_blk_no;
	unsigned int cp_blks = 1 + le32_to_cpu(F2FS_RAW_SUPER(sbi)->cp_payload);
	unsigned int cp_blks = 1 + __cp_payload(sbi);
	block_t cp_blk_no;
	int i;

@@ -796,6 +795,7 @@ void sync_dirty_dir_inodes(struct f2fs_sb_info *sbi)
		 * wribacking dentry pages in the freeing inode.
		 */
		f2fs_submit_merged_bio(sbi, DATA, WRITE);
		cond_resched();
	}
	goto retry;
}
@@ -884,7 +884,7 @@ static void do_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc)
	__u32 crc32 = 0;
	void *kaddr;
	int i;
	int cp_payload_blks = le32_to_cpu(F2FS_RAW_SUPER(sbi)->cp_payload);
	int cp_payload_blks = __cp_payload(sbi);

	/*
	 * This avoids to conduct wrong roll-forward operations and uses
@@ -1048,17 +1048,18 @@ void write_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc)
	struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
	unsigned long long ckpt_ver;

	trace_f2fs_write_checkpoint(sbi->sb, cpc->reason, "start block_ops");

	mutex_lock(&sbi->cp_mutex);

	if (!is_sbi_flag_set(sbi, SBI_IS_DIRTY) &&
			cpc->reason != CP_DISCARD && cpc->reason != CP_UMOUNT)
		(cpc->reason == CP_FASTBOOT || cpc->reason == CP_SYNC))
		goto out;
	if (unlikely(f2fs_cp_error(sbi)))
		goto out;
	if (f2fs_readonly(sbi->sb))
		goto out;

	trace_f2fs_write_checkpoint(sbi->sb, cpc->reason, "start block_ops");

	if (block_operations(sbi))
		goto out;

@@ -1085,6 +1086,10 @@ void write_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc)

	unblock_operations(sbi);
	stat_inc_cp_count(sbi->stat_info);

	if (cpc->reason == CP_RECOVERY)
		f2fs_msg(sbi->sb, KERN_NOTICE,
			"checkpoint: version = %llx", ckpt_ver);
out:
	mutex_unlock(&sbi->cp_mutex);
	trace_f2fs_write_checkpoint(sbi->sb, cpc->reason, "finish checkpoint");
@@ -1103,14 +1108,9 @@ void init_ino_entry_info(struct f2fs_sb_info *sbi)
		im->ino_num = 0;
	}

	/*
	 * considering 512 blocks in a segment 8 blocks are needed for cp
	 * and log segment summaries. Remaining blocks are used to keep
	 * orphan entries with the limitation one reserved segment
	 * for cp pack we can have max 1020*504 orphan entries
	 */
	sbi->max_orphans = (sbi->blocks_per_seg - F2FS_CP_PACKS -
			NR_CURSEG_TYPE) * F2FS_ORPHANS_PER_BLOCK;
			NR_CURSEG_TYPE - __cp_payload(sbi)) *
				F2FS_ORPHANS_PER_BLOCK;
}

int __init create_checkpoint_caches(void)
Loading