Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit b0591522 authored by Linus Torvalds's avatar Linus Torvalds
Browse files
Pull f2fs fixes from Jaegeuk Kim:
 - fsmark regression
 - i_size race condition
 - wrong conditions in f2fs_move_file_range

* tag 'for-f2fs-v4.8-rc4' of git://git.kernel.org/pub/scm/linux/kernel/git/jaegeuk/f2fs:
  f2fs: avoid potential deadlock in f2fs_move_file_range
  f2fs: allow copying file range only in between regular files
  Revert "f2fs: move i_size_write in f2fs_write_end"
  Revert "f2fs: use percpu_rw_semaphore"
parents 7a1dcf6a 20a3d61d
Loading
Loading
Loading
Loading
+1 −1
Original line number Diff line number Diff line
@@ -1699,11 +1699,11 @@ static int f2fs_write_end(struct file *file,
	trace_f2fs_write_end(inode, pos, len, copied);

	set_page_dirty(page);
	f2fs_put_page(page, 1);

	if (pos + copied > i_size_read(inode))
		f2fs_i_size_write(inode, pos + copied);

	f2fs_put_page(page, 1);
	f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
	return copied;
}
+6 −6
Original line number Diff line number Diff line
@@ -538,7 +538,7 @@ struct f2fs_nm_info {
	/* NAT cache management */
	struct radix_tree_root nat_root;/* root of the nat entry cache */
	struct radix_tree_root nat_set_root;/* root of the nat set cache */
	struct percpu_rw_semaphore nat_tree_lock;	/* protect nat_tree_lock */
	struct rw_semaphore nat_tree_lock;	/* protect nat_tree_lock */
	struct list_head nat_entries;	/* cached nat entry list (clean) */
	unsigned int nat_cnt;		/* the # of cached nat entries */
	unsigned int dirty_nat_cnt;	/* total num of nat entries in set */
@@ -787,7 +787,7 @@ struct f2fs_sb_info {
	struct f2fs_checkpoint *ckpt;		/* raw checkpoint pointer */
	struct inode *meta_inode;		/* cache meta blocks */
	struct mutex cp_mutex;			/* checkpoint procedure lock */
	struct percpu_rw_semaphore cp_rwsem;		/* blocking FS operations */
	struct rw_semaphore cp_rwsem;		/* blocking FS operations */
	struct rw_semaphore node_write;		/* locking node writes */
	wait_queue_head_t cp_wait;
	unsigned long last_time[MAX_TIME];	/* to store time in jiffies */
@@ -1074,22 +1074,22 @@ static inline void clear_ckpt_flags(struct f2fs_checkpoint *cp, unsigned int f)

static inline void f2fs_lock_op(struct f2fs_sb_info *sbi)
{
	percpu_down_read(&sbi->cp_rwsem);
	down_read(&sbi->cp_rwsem);
}

static inline void f2fs_unlock_op(struct f2fs_sb_info *sbi)
{
	percpu_up_read(&sbi->cp_rwsem);
	up_read(&sbi->cp_rwsem);
}

static inline void f2fs_lock_all(struct f2fs_sb_info *sbi)
{
	percpu_down_write(&sbi->cp_rwsem);
	down_write(&sbi->cp_rwsem);
}

static inline void f2fs_unlock_all(struct f2fs_sb_info *sbi)
{
	percpu_up_write(&sbi->cp_rwsem);
	up_write(&sbi->cp_rwsem);
}

static inline int __get_cp_reason(struct f2fs_sb_info *sbi)
+9 −4
Original line number Diff line number Diff line
@@ -2086,15 +2086,19 @@ static int f2fs_move_file_range(struct file *file_in, loff_t pos_in,
	if (unlikely(f2fs_readonly(src->i_sb)))
		return -EROFS;

	if (S_ISDIR(src->i_mode) || S_ISDIR(dst->i_mode))
		return -EISDIR;
	if (!S_ISREG(src->i_mode) || !S_ISREG(dst->i_mode))
		return -EINVAL;

	if (f2fs_encrypted_inode(src) || f2fs_encrypted_inode(dst))
		return -EOPNOTSUPP;

	inode_lock(src);
	if (src != dst)
		inode_lock(dst);
	if (src != dst) {
		if (!inode_trylock(dst)) {
			ret = -EBUSY;
			goto out;
		}
	}

	ret = -EINVAL;
	if (pos_in + len > src->i_size || pos_in + len < pos_in)
@@ -2152,6 +2156,7 @@ static int f2fs_move_file_range(struct file *file_in, loff_t pos_in,
out_unlock:
	if (src != dst)
		inode_unlock(dst);
out:
	inode_unlock(src);
	return ret;
}
+23 −24
Original line number Diff line number Diff line
@@ -206,14 +206,14 @@ int need_dentry_mark(struct f2fs_sb_info *sbi, nid_t nid)
	struct nat_entry *e;
	bool need = false;

	percpu_down_read(&nm_i->nat_tree_lock);
	down_read(&nm_i->nat_tree_lock);
	e = __lookup_nat_cache(nm_i, nid);
	if (e) {
		if (!get_nat_flag(e, IS_CHECKPOINTED) &&
				!get_nat_flag(e, HAS_FSYNCED_INODE))
			need = true;
	}
	percpu_up_read(&nm_i->nat_tree_lock);
	up_read(&nm_i->nat_tree_lock);
	return need;
}

@@ -223,11 +223,11 @@ bool is_checkpointed_node(struct f2fs_sb_info *sbi, nid_t nid)
	struct nat_entry *e;
	bool is_cp = true;

	percpu_down_read(&nm_i->nat_tree_lock);
	down_read(&nm_i->nat_tree_lock);
	e = __lookup_nat_cache(nm_i, nid);
	if (e && !get_nat_flag(e, IS_CHECKPOINTED))
		is_cp = false;
	percpu_up_read(&nm_i->nat_tree_lock);
	up_read(&nm_i->nat_tree_lock);
	return is_cp;
}

@@ -237,13 +237,13 @@ bool need_inode_block_update(struct f2fs_sb_info *sbi, nid_t ino)
	struct nat_entry *e;
	bool need_update = true;

	percpu_down_read(&nm_i->nat_tree_lock);
	down_read(&nm_i->nat_tree_lock);
	e = __lookup_nat_cache(nm_i, ino);
	if (e && get_nat_flag(e, HAS_LAST_FSYNC) &&
			(get_nat_flag(e, IS_CHECKPOINTED) ||
			 get_nat_flag(e, HAS_FSYNCED_INODE)))
		need_update = false;
	percpu_up_read(&nm_i->nat_tree_lock);
	up_read(&nm_i->nat_tree_lock);
	return need_update;
}

@@ -284,7 +284,7 @@ static void set_node_addr(struct f2fs_sb_info *sbi, struct node_info *ni,
	struct f2fs_nm_info *nm_i = NM_I(sbi);
	struct nat_entry *e;

	percpu_down_write(&nm_i->nat_tree_lock);
	down_write(&nm_i->nat_tree_lock);
	e = __lookup_nat_cache(nm_i, ni->nid);
	if (!e) {
		e = grab_nat_entry(nm_i, ni->nid);
@@ -334,7 +334,7 @@ static void set_node_addr(struct f2fs_sb_info *sbi, struct node_info *ni,
			set_nat_flag(e, HAS_FSYNCED_INODE, true);
		set_nat_flag(e, HAS_LAST_FSYNC, fsync_done);
	}
	percpu_up_write(&nm_i->nat_tree_lock);
	up_write(&nm_i->nat_tree_lock);
}

int try_to_free_nats(struct f2fs_sb_info *sbi, int nr_shrink)
@@ -342,7 +342,8 @@ int try_to_free_nats(struct f2fs_sb_info *sbi, int nr_shrink)
	struct f2fs_nm_info *nm_i = NM_I(sbi);
	int nr = nr_shrink;

	percpu_down_write(&nm_i->nat_tree_lock);
	if (!down_write_trylock(&nm_i->nat_tree_lock))
		return 0;

	while (nr_shrink && !list_empty(&nm_i->nat_entries)) {
		struct nat_entry *ne;
@@ -351,7 +352,7 @@ int try_to_free_nats(struct f2fs_sb_info *sbi, int nr_shrink)
		__del_from_nat_cache(nm_i, ne);
		nr_shrink--;
	}
	percpu_up_write(&nm_i->nat_tree_lock);
	up_write(&nm_i->nat_tree_lock);
	return nr - nr_shrink;
}

@@ -373,13 +374,13 @@ void get_node_info(struct f2fs_sb_info *sbi, nid_t nid, struct node_info *ni)
	ni->nid = nid;

	/* Check nat cache */
	percpu_down_read(&nm_i->nat_tree_lock);
	down_read(&nm_i->nat_tree_lock);
	e = __lookup_nat_cache(nm_i, nid);
	if (e) {
		ni->ino = nat_get_ino(e);
		ni->blk_addr = nat_get_blkaddr(e);
		ni->version = nat_get_version(e);
		percpu_up_read(&nm_i->nat_tree_lock);
		up_read(&nm_i->nat_tree_lock);
		return;
	}

@@ -403,11 +404,11 @@ void get_node_info(struct f2fs_sb_info *sbi, nid_t nid, struct node_info *ni)
	node_info_from_raw_nat(ni, &ne);
	f2fs_put_page(page, 1);
cache:
	percpu_up_read(&nm_i->nat_tree_lock);
	up_read(&nm_i->nat_tree_lock);
	/* cache nat entry */
	percpu_down_write(&nm_i->nat_tree_lock);
	down_write(&nm_i->nat_tree_lock);
	cache_nat_entry(sbi, nid, &ne);
	percpu_up_write(&nm_i->nat_tree_lock);
	up_write(&nm_i->nat_tree_lock);
}

/*
@@ -1788,7 +1789,7 @@ void build_free_nids(struct f2fs_sb_info *sbi)
	ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nid), FREE_NID_PAGES,
							META_NAT, true);

	percpu_down_read(&nm_i->nat_tree_lock);
	down_read(&nm_i->nat_tree_lock);

	while (1) {
		struct page *page = get_current_nat_page(sbi, nid);
@@ -1820,7 +1821,7 @@ void build_free_nids(struct f2fs_sb_info *sbi)
			remove_free_nid(nm_i, nid);
	}
	up_read(&curseg->journal_rwsem);
	percpu_up_read(&nm_i->nat_tree_lock);
	up_read(&nm_i->nat_tree_lock);

	ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nm_i->next_scan_nid),
					nm_i->ra_nid_pages, META_NAT, false);
@@ -2209,7 +2210,7 @@ void flush_nat_entries(struct f2fs_sb_info *sbi)
	if (!nm_i->dirty_nat_cnt)
		return;

	percpu_down_write(&nm_i->nat_tree_lock);
	down_write(&nm_i->nat_tree_lock);

	/*
	 * if there are no enough space in journal to store dirty nat
@@ -2232,7 +2233,7 @@ void flush_nat_entries(struct f2fs_sb_info *sbi)
	list_for_each_entry_safe(set, tmp, &sets, set_list)
		__flush_nat_entry_set(sbi, set);

	percpu_up_write(&nm_i->nat_tree_lock);
	up_write(&nm_i->nat_tree_lock);

	f2fs_bug_on(sbi, nm_i->dirty_nat_cnt);
}
@@ -2268,8 +2269,7 @@ static int init_node_manager(struct f2fs_sb_info *sbi)

	mutex_init(&nm_i->build_lock);
	spin_lock_init(&nm_i->free_nid_list_lock);
	if (percpu_init_rwsem(&nm_i->nat_tree_lock))
		return -ENOMEM;
	init_rwsem(&nm_i->nat_tree_lock);

	nm_i->next_scan_nid = le32_to_cpu(sbi->ckpt->next_free_nid);
	nm_i->bitmap_size = __bitmap_size(sbi, NAT_BITMAP);
@@ -2326,7 +2326,7 @@ void destroy_node_manager(struct f2fs_sb_info *sbi)
	spin_unlock(&nm_i->free_nid_list_lock);

	/* destroy nat cache */
	percpu_down_write(&nm_i->nat_tree_lock);
	down_write(&nm_i->nat_tree_lock);
	while ((found = __gang_lookup_nat_cache(nm_i,
					nid, NATVEC_SIZE, natvec))) {
		unsigned idx;
@@ -2351,9 +2351,8 @@ void destroy_node_manager(struct f2fs_sb_info *sbi)
			kmem_cache_free(nat_entry_set_slab, setvec[idx]);
		}
	}
	percpu_up_write(&nm_i->nat_tree_lock);
	up_write(&nm_i->nat_tree_lock);

	percpu_free_rwsem(&nm_i->nat_tree_lock);
	kfree(nm_i->nat_bitmap);
	sbi->nm_info = NULL;
	kfree(nm_i);
+1 −5
Original line number Diff line number Diff line
@@ -706,8 +706,6 @@ static void destroy_percpu_info(struct f2fs_sb_info *sbi)
		percpu_counter_destroy(&sbi->nr_pages[i]);
	percpu_counter_destroy(&sbi->alloc_valid_block_count);
	percpu_counter_destroy(&sbi->total_valid_inode_count);

	percpu_free_rwsem(&sbi->cp_rwsem);
}

static void f2fs_put_super(struct super_block *sb)
@@ -1483,9 +1481,6 @@ static int init_percpu_info(struct f2fs_sb_info *sbi)
{
	int i, err;

	if (percpu_init_rwsem(&sbi->cp_rwsem))
		return -ENOMEM;

	for (i = 0; i < NR_COUNT_TYPE; i++) {
		err = percpu_counter_init(&sbi->nr_pages[i], 0, GFP_KERNEL);
		if (err)
@@ -1686,6 +1681,7 @@ static int f2fs_fill_super(struct super_block *sb, void *data, int silent)
		sbi->write_io[i].bio = NULL;
	}

	init_rwsem(&sbi->cp_rwsem);
	init_waitqueue_head(&sbi->cp_wait);
	init_sb_info(sbi);