Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 1ab6c499 authored by Dave Chinner's avatar Dave Chinner Committed by Al Viro
Browse files

fs: convert fs shrinkers to new scan/count API



Convert the filesystem shrinkers to use the new API, and standardise some
of the behaviours of the shrinkers at the same time.  For example,
nr_to_scan means the number of objects to scan, not the number of objects
to free.

I refactored the CIFS idmap shrinker a little - it really needs to be
broken up into a shrinker per tree and keep an item count with the tree
root so that we don't need to walk the tree every time the shrinker needs
to count the number of objects in the tree (i.e.  all the time under
memory pressure).

[glommer@openvz.org: fixes for ext4, ubifs, nfs, cifs and glock. Fixes are needed mainly due to new code merged in the tree]
[assorted fixes folded in]
Signed-off-by: default avatarDave Chinner <dchinner@redhat.com>
Signed-off-by: default avatarGlauber Costa <glommer@openvz.org>
Acked-by: default avatarMel Gorman <mgorman@suse.de>
Acked-by: default avatarArtem Bityutskiy <artem.bityutskiy@linux.intel.com>
Acked-by: default avatarJan Kara <jack@suse.cz>
Acked-by: default avatarSteven Whitehouse <swhiteho@redhat.com>
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: "Theodore Ts'o" <tytso@mit.edu>
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Al Viro <viro@zeniv.linux.org.uk>
Cc: Artem Bityutskiy <artem.bityutskiy@linux.intel.com>
Cc: Arve Hjønnevåg <arve@android.com>
Cc: Carlos Maiolino <cmaiolino@redhat.com>
Cc: Christoph Hellwig <hch@lst.de>
Cc: Chuck Lever <chuck.lever@oracle.com>
Cc: Daniel Vetter <daniel.vetter@ffwll.ch>
Cc: David Rientjes <rientjes@google.com>
Cc: Gleb Natapov <gleb@redhat.com>
Cc: Greg Thelen <gthelen@google.com>
Cc: J. Bruce Fields <bfields@redhat.com>
Cc: Jan Kara <jack@suse.cz>
Cc: Jerome Glisse <jglisse@redhat.com>
Cc: John Stultz <john.stultz@linaro.org>
Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: Kent Overstreet <koverstreet@google.com>
Cc: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: Mel Gorman <mgorman@suse.de>
Cc: Steven Whitehouse <swhiteho@redhat.com>
Cc: Thomas Hellstrom <thellstrom@vmware.com>
Cc: Trond Myklebust <Trond.Myklebust@netapp.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>

Signed-off-by: default avatarAl Viro <viro@zeniv.linux.org.uk>
parent 35163417
Loading
Loading
Loading
Loading
+24 −9
Original line number Diff line number Diff line
@@ -931,13 +931,15 @@ static int __ext4_es_shrink(struct ext4_sb_info *sbi, int nr_to_scan,
	struct ext4_inode_info *ei;
	struct list_head *cur, *tmp;
	LIST_HEAD(skipped);
	int ret, nr_shrunk = 0;
	int nr_shrunk = 0;
	int retried = 0, skip_precached = 1, nr_skipped = 0;

	spin_lock(&sbi->s_es_lru_lock);

retry:
	list_for_each_safe(cur, tmp, &sbi->s_es_lru) {
		int shrunk;

		/*
		 * If we have already reclaimed all extents from extent
		 * status tree, just stop the loop immediately.
@@ -964,13 +966,13 @@ static int __ext4_es_shrink(struct ext4_sb_info *sbi, int nr_to_scan,
			continue;

		write_lock(&ei->i_es_lock);
		ret = __es_try_to_reclaim_extents(ei, nr_to_scan);
		shrunk = __es_try_to_reclaim_extents(ei, nr_to_scan);
		if (ei->i_es_lru_nr == 0)
			list_del_init(&ei->i_es_lru);
		write_unlock(&ei->i_es_lock);

		nr_shrunk += ret;
		nr_to_scan -= ret;
		nr_shrunk += shrunk;
		nr_to_scan -= shrunk;
		if (nr_to_scan == 0)
			break;
	}
@@ -1007,7 +1009,20 @@ static int __ext4_es_shrink(struct ext4_sb_info *sbi, int nr_to_scan,
	return nr_shrunk;
}

static int ext4_es_shrink(struct shrinker *shrink, struct shrink_control *sc)
static unsigned long ext4_es_count(struct shrinker *shrink,
				   struct shrink_control *sc)
{
	unsigned long nr;
	struct ext4_sb_info *sbi;

	sbi = container_of(shrink, struct ext4_sb_info, s_es_shrinker);
	nr = percpu_counter_read_positive(&sbi->s_extent_cache_cnt);
	trace_ext4_es_shrink_enter(sbi->s_sb, sc->nr_to_scan, nr);
	return nr;
}

static unsigned long ext4_es_scan(struct shrinker *shrink,
				  struct shrink_control *sc)
{
	struct ext4_sb_info *sbi = container_of(shrink,
					struct ext4_sb_info, s_es_shrinker);
@@ -1022,9 +1037,8 @@ static int ext4_es_shrink(struct shrinker *shrink, struct shrink_control *sc)

	nr_shrunk = __ext4_es_shrink(sbi, nr_to_scan, NULL);

	ret = percpu_counter_read_positive(&sbi->s_extent_cache_cnt);
	trace_ext4_es_shrink_exit(sbi->s_sb, nr_shrunk, ret);
	return ret;
	return nr_shrunk;
}

void ext4_es_register_shrinker(struct ext4_sb_info *sbi)
@@ -1032,7 +1046,8 @@ void ext4_es_register_shrinker(struct ext4_sb_info *sbi)
	INIT_LIST_HEAD(&sbi->s_es_lru);
	spin_lock_init(&sbi->s_es_lru_lock);
	sbi->s_es_last_sorted = 0;
	sbi->s_es_shrinker.shrink = ext4_es_shrink;
	sbi->s_es_shrinker.scan_objects = ext4_es_scan;
	sbi->s_es_shrinker.count_objects = ext4_es_count;
	sbi->s_es_shrinker.seeks = DEFAULT_SEEKS;
	register_shrinker(&sbi->s_es_shrinker);
}
@@ -1076,7 +1091,7 @@ static int __es_try_to_reclaim_extents(struct ext4_inode_info *ei,
	struct ext4_es_tree *tree = &ei->i_es_tree;
	struct rb_node *node;
	struct extent_status *es;
	int nr_shrunk = 0;
	unsigned long nr_shrunk = 0;
	static DEFINE_RATELIMIT_STATE(_rs, DEFAULT_RATELIMIT_INTERVAL,
				      DEFAULT_RATELIMIT_BURST);

+17 −11
Original line number Diff line number Diff line
@@ -1427,21 +1427,22 @@ __acquires(&lru_lock)
 * gfs2_dispose_glock_lru() above.
 */

static void gfs2_scan_glock_lru(int nr)
static long gfs2_scan_glock_lru(int nr)
{
	struct gfs2_glock *gl;
	LIST_HEAD(skipped);
	LIST_HEAD(dispose);
	long freed = 0;

	spin_lock(&lru_lock);
	while(nr && !list_empty(&lru_list)) {
	while ((nr-- >= 0) && !list_empty(&lru_list)) {
		gl = list_entry(lru_list.next, struct gfs2_glock, gl_lru);

		/* Test for being demotable */
		if (!test_and_set_bit(GLF_LOCK, &gl->gl_flags)) {
			list_move(&gl->gl_lru, &dispose);
			atomic_dec(&lru_count);
			nr--;
			freed++;
			continue;
		}

@@ -1451,23 +1452,28 @@ static void gfs2_scan_glock_lru(int nr)
	if (!list_empty(&dispose))
		gfs2_dispose_glock_lru(&dispose);
	spin_unlock(&lru_lock);

	return freed;
}

static int gfs2_shrink_glock_memory(struct shrinker *shrink,
static unsigned long gfs2_glock_shrink_scan(struct shrinker *shrink,
					    struct shrink_control *sc)
{
	if (sc->nr_to_scan) {
	if (!(sc->gfp_mask & __GFP_FS))
			return -1;
		gfs2_scan_glock_lru(sc->nr_to_scan);
		return SHRINK_STOP;
	return gfs2_scan_glock_lru(sc->nr_to_scan);
}

static unsigned long gfs2_glock_shrink_count(struct shrinker *shrink,
					     struct shrink_control *sc)
{
	return vfs_pressure_ratio(atomic_read(&lru_count));
}

static struct shrinker glock_shrinker = {
	.shrink = gfs2_shrink_glock_memory,
	.seeks = DEFAULT_SEEKS,
	.count_objects = gfs2_glock_shrink_count,
	.scan_objects = gfs2_glock_shrink_scan,
};

/**
+2 −1
Original line number Diff line number Diff line
@@ -32,7 +32,8 @@
struct workqueue_struct *gfs2_control_wq;

static struct shrinker qd_shrinker = {
	.shrink = gfs2_shrink_qd_memory,
	.count_objects = gfs2_qd_shrink_count,
	.scan_objects = gfs2_qd_shrink_scan,
	.seeks = DEFAULT_SEEKS,
};

+10 −6
Original line number Diff line number Diff line
@@ -75,17 +75,16 @@ static LIST_HEAD(qd_lru_list);
static atomic_t qd_lru_count = ATOMIC_INIT(0);
static DEFINE_SPINLOCK(qd_lru_lock);

int gfs2_shrink_qd_memory(struct shrinker *shrink, struct shrink_control *sc)
unsigned long gfs2_qd_shrink_scan(struct shrinker *shrink,
				  struct shrink_control *sc)
{
	struct gfs2_quota_data *qd;
	struct gfs2_sbd *sdp;
	int nr_to_scan = sc->nr_to_scan;

	if (nr_to_scan == 0)
		goto out;
	long freed = 0;

	if (!(sc->gfp_mask & __GFP_FS))
		return -1;
		return SHRINK_STOP;

	spin_lock(&qd_lru_lock);
	while (nr_to_scan && !list_empty(&qd_lru_list)) {
@@ -110,10 +109,15 @@ int gfs2_shrink_qd_memory(struct shrinker *shrink, struct shrink_control *sc)
		kmem_cache_free(gfs2_quotad_cachep, qd);
		spin_lock(&qd_lru_lock);
		nr_to_scan--;
		freed++;
	}
	spin_unlock(&qd_lru_lock);
	return freed;
}

out:
unsigned long gfs2_qd_shrink_count(struct shrinker *shrink,
				   struct shrink_control *sc)
{
	return vfs_pressure_ratio(atomic_read(&qd_lru_count));
}

+4 −2
Original line number Diff line number Diff line
@@ -53,7 +53,9 @@ static inline int gfs2_quota_lock_check(struct gfs2_inode *ip)
	return ret;
}

extern int gfs2_shrink_qd_memory(struct shrinker *shrink,
extern unsigned long gfs2_qd_shrink_count(struct shrinker *shrink,
					  struct shrink_control *sc);
extern unsigned long gfs2_qd_shrink_scan(struct shrinker *shrink,
					 struct shrink_control *sc);
extern const struct quotactl_ops gfs2_quotactl_ops;

Loading