Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 554df79e authored by Jaegeuk Kim's avatar Jaegeuk Kim
Browse files

f2fs: shrink extent_cache entries



This patch registers shrinking extent_caches.

Reviewed-by: default avatarChao Yu <chao2.yu@samsung.com>
Signed-off-by: default avatarJaegeuk Kim <jaegeuk@kernel.org>
parent 1b38dc8e
Loading
Loading
Loading
Loading
+11 −8
Original line number Original line Diff line number Diff line
@@ -767,7 +767,7 @@ void f2fs_preserve_extent_tree(struct inode *inode)
		update_inode_page(inode);
		update_inode_page(inode);
}
}


void f2fs_shrink_extent_tree(struct f2fs_sb_info *sbi, int nr_shrink)
unsigned int f2fs_shrink_extent_tree(struct f2fs_sb_info *sbi, int nr_shrink)
{
{
	struct extent_tree *treevec[EXT_TREE_VEC_SIZE];
	struct extent_tree *treevec[EXT_TREE_VEC_SIZE];
	struct extent_node *en, *tmp;
	struct extent_node *en, *tmp;
@@ -778,10 +778,7 @@ void f2fs_shrink_extent_tree(struct f2fs_sb_info *sbi, int nr_shrink)
	unsigned int node_cnt = 0, tree_cnt = 0;
	unsigned int node_cnt = 0, tree_cnt = 0;


	if (!test_opt(sbi, EXTENT_CACHE))
	if (!test_opt(sbi, EXTENT_CACHE))
		return;
		return 0;

	if (available_free_memory(sbi, EXTENT_CACHE))
		return;


	spin_lock(&sbi->extent_lock);
	spin_lock(&sbi->extent_lock);
	list_for_each_entry_safe(en, tmp, &sbi->extent_list, list) {
	list_for_each_entry_safe(en, tmp, &sbi->extent_list, list) {
@@ -791,7 +788,9 @@ void f2fs_shrink_extent_tree(struct f2fs_sb_info *sbi, int nr_shrink)
	}
	}
	spin_unlock(&sbi->extent_lock);
	spin_unlock(&sbi->extent_lock);


	down_read(&sbi->extent_tree_lock);
	if (!down_read_trylock(&sbi->extent_tree_lock))
		goto out;

	while ((found = radix_tree_gang_lookup(&sbi->extent_tree_root,
	while ((found = radix_tree_gang_lookup(&sbi->extent_tree_root,
				(void **)treevec, ino, EXT_TREE_VEC_SIZE))) {
				(void **)treevec, ino, EXT_TREE_VEC_SIZE))) {
		unsigned i;
		unsigned i;
@@ -809,7 +808,9 @@ void f2fs_shrink_extent_tree(struct f2fs_sb_info *sbi, int nr_shrink)
	}
	}
	up_read(&sbi->extent_tree_lock);
	up_read(&sbi->extent_tree_lock);


	down_write(&sbi->extent_tree_lock);
	if (!down_write_trylock(&sbi->extent_tree_lock))
		goto out;

	radix_tree_for_each_slot(slot, &sbi->extent_tree_root, &iter,
	radix_tree_for_each_slot(slot, &sbi->extent_tree_root, &iter,
							F2FS_ROOT_INO(sbi)) {
							F2FS_ROOT_INO(sbi)) {
		struct extent_tree *et = (struct extent_tree *)*slot;
		struct extent_tree *et = (struct extent_tree *)*slot;
@@ -822,8 +823,10 @@ void f2fs_shrink_extent_tree(struct f2fs_sb_info *sbi, int nr_shrink)
		}
		}
	}
	}
	up_write(&sbi->extent_tree_lock);
	up_write(&sbi->extent_tree_lock);

out:
	trace_f2fs_shrink_extent_tree(sbi, node_cnt, tree_cnt);
	trace_f2fs_shrink_extent_tree(sbi, node_cnt, tree_cnt);

	return node_cnt + tree_cnt;
}
}


void f2fs_destroy_extent_tree(struct inode *inode)
void f2fs_destroy_extent_tree(struct inode *inode)
+1 −1
Original line number Original line Diff line number Diff line
@@ -1754,7 +1754,7 @@ void f2fs_submit_page_mbio(struct f2fs_io_info *);
void set_data_blkaddr(struct dnode_of_data *);
void set_data_blkaddr(struct dnode_of_data *);
int reserve_new_block(struct dnode_of_data *);
int reserve_new_block(struct dnode_of_data *);
int f2fs_reserve_block(struct dnode_of_data *, pgoff_t);
int f2fs_reserve_block(struct dnode_of_data *, pgoff_t);
void f2fs_shrink_extent_tree(struct f2fs_sb_info *, int);
unsigned int f2fs_shrink_extent_tree(struct f2fs_sb_info *, int);
void f2fs_destroy_extent_tree(struct inode *);
void f2fs_destroy_extent_tree(struct inode *);
void f2fs_init_extent_cache(struct inode *, struct f2fs_extent *);
void f2fs_init_extent_cache(struct inode *, struct f2fs_extent *);
void f2fs_update_extent_cache(struct dnode_of_data *);
void f2fs_update_extent_cache(struct dnode_of_data *);
+2 −1
Original line number Original line Diff line number Diff line
@@ -304,6 +304,7 @@ void f2fs_balance_fs(struct f2fs_sb_info *sbi)
void f2fs_balance_fs_bg(struct f2fs_sb_info *sbi)
void f2fs_balance_fs_bg(struct f2fs_sb_info *sbi)
{
{
	/* try to shrink extent cache when there is no enough memory */
	/* try to shrink extent cache when there is no enough memory */
	if (!available_free_memory(sbi, EXTENT_CACHE))
		f2fs_shrink_extent_tree(sbi, EXTENT_CACHE_SHRINK_NUMBER);
		f2fs_shrink_extent_tree(sbi, EXTENT_CACHE_SHRINK_NUMBER);


	/* check the # of cached NAT entries */
	/* check the # of cached NAT entries */
+13 −1
Original line number Original line Diff line number Diff line
@@ -23,6 +23,11 @@ static unsigned long __count_nat_entries(struct f2fs_sb_info *sbi)
	return NM_I(sbi)->nat_cnt - NM_I(sbi)->dirty_nat_cnt;
	return NM_I(sbi)->nat_cnt - NM_I(sbi)->dirty_nat_cnt;
}
}


static unsigned long __count_extent_cache(struct f2fs_sb_info *sbi)
{
	return sbi->total_ext_tree + atomic_read(&sbi->total_ext_node);
}

unsigned long f2fs_shrink_count(struct shrinker *shrink,
unsigned long f2fs_shrink_count(struct shrinker *shrink,
				struct shrink_control *sc)
				struct shrink_control *sc)
{
{
@@ -42,6 +47,9 @@ unsigned long f2fs_shrink_count(struct shrinker *shrink,
		}
		}
		spin_unlock(&f2fs_list_lock);
		spin_unlock(&f2fs_list_lock);


		/* count extent cache entries */
		count += __count_extent_cache(sbi);

		/* shrink clean nat cache entries */
		/* shrink clean nat cache entries */
		count += __count_nat_entries(sbi);
		count += __count_nat_entries(sbi);


@@ -82,8 +90,12 @@ unsigned long f2fs_shrink_scan(struct shrinker *shrink,


		sbi->shrinker_run_no = run_no;
		sbi->shrinker_run_no = run_no;


		/* shrink extent cache entries */
		freed += f2fs_shrink_extent_tree(sbi, nr >> 1);

		/* shrink clean nat cache entries */
		/* shrink clean nat cache entries */
		freed += try_to_free_nats(sbi, nr);
		if (freed < nr)
			freed += try_to_free_nats(sbi, nr - freed);


		spin_lock(&f2fs_list_lock);
		spin_lock(&f2fs_list_lock);
		p = p->next;
		p = p->next;