Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 0a234c6d authored by Dave Chinner's avatar Dave Chinner Committed by Al Viro
Browse files

shrinker: convert superblock shrinkers to new API



Convert superblock shrinker to use the new count/scan API, and propagate
the API changes through to the filesystem callouts.  The filesystem
callouts already use a count/scan API, so it's just changing counters to
longs to match the VM API.

This requires the dentry and inode shrinker callouts to be converted to
the count/scan API.  This is mainly a mechanical change.

[glommer@openvz.org: use mult_frac for fractional proportions, build fixes]
Signed-off-by: default avatarDave Chinner <dchinner@redhat.com>
Signed-off-by: default avatarGlauber Costa <glommer@openvz.org>
Acked-by: default avatarMel Gorman <mgorman@suse.de>
Cc: "Theodore Ts'o" <tytso@mit.edu>
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Al Viro <viro@zeniv.linux.org.uk>
Cc: Artem Bityutskiy <artem.bityutskiy@linux.intel.com>
Cc: Arve Hjønnevåg <arve@android.com>
Cc: Carlos Maiolino <cmaiolino@redhat.com>
Cc: Christoph Hellwig <hch@lst.de>
Cc: Chuck Lever <chuck.lever@oracle.com>
Cc: Daniel Vetter <daniel.vetter@ffwll.ch>
Cc: David Rientjes <rientjes@google.com>
Cc: Gleb Natapov <gleb@redhat.com>
Cc: Greg Thelen <gthelen@google.com>
Cc: J. Bruce Fields <bfields@redhat.com>
Cc: Jan Kara <jack@suse.cz>
Cc: Jerome Glisse <jglisse@redhat.com>
Cc: John Stultz <john.stultz@linaro.org>
Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: Kent Overstreet <koverstreet@google.com>
Cc: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: Mel Gorman <mgorman@suse.de>
Cc: Steven Whitehouse <swhiteho@redhat.com>
Cc: Thomas Hellstrom <thellstrom@vmware.com>
Cc: Trond Myklebust <Trond.Myklebust@netapp.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>

Signed-off-by: default avatarAl Viro <viro@zeniv.linux.org.uk>
parent 24f7c6b9
Loading
Loading
Loading
Loading
+5 −2
Original line number Diff line number Diff line
@@ -913,11 +913,12 @@ static void shrink_dentry_list(struct list_head *list)
 * This function may fail to free any resources if all the dentries are in
 * use.
 */
void prune_dcache_sb(struct super_block *sb, int count)
long prune_dcache_sb(struct super_block *sb, unsigned long nr_to_scan)
{
	struct dentry *dentry;
	LIST_HEAD(referenced);
	LIST_HEAD(tmp);
	long freed = 0;

relock:
	spin_lock(&sb->s_dentry_lru_lock);
@@ -942,7 +943,8 @@ void prune_dcache_sb(struct super_block *sb, int count)
			this_cpu_dec(nr_dentry_unused);
			sb->s_nr_dentry_unused--;
			spin_unlock(&dentry->d_lock);
			if (!--count)
			freed++;
			if (!--nr_to_scan)
				break;
		}
		cond_resched_lock(&sb->s_dentry_lru_lock);
@@ -952,6 +954,7 @@ void prune_dcache_sb(struct super_block *sb, int count)
	spin_unlock(&sb->s_dentry_lru_lock);

	shrink_dentry_list(&tmp);
	return freed;
}

/*
+5 −2
Original line number Diff line number Diff line
@@ -706,10 +706,11 @@ static int can_unuse(struct inode *inode)
 * LRU does not have strict ordering. Hence we don't want to reclaim inodes
 * with this flag set because they are the inodes that are out of order.
 */
void prune_icache_sb(struct super_block *sb, int nr_to_scan)
long prune_icache_sb(struct super_block *sb, unsigned long nr_to_scan)
{
	LIST_HEAD(freeable);
	int nr_scanned;
	long nr_scanned;
	long freed = 0;
	unsigned long reap = 0;

	spin_lock(&sb->s_inode_lru_lock);
@@ -779,6 +780,7 @@ void prune_icache_sb(struct super_block *sb, int nr_to_scan)
		list_move(&inode->i_lru, &freeable);
		sb->s_nr_inodes_unused--;
		this_cpu_dec(nr_unused);
		freed++;
	}
	if (current_is_kswapd())
		__count_vm_events(KSWAPD_INODESTEAL, reap);
@@ -789,6 +791,7 @@ void prune_icache_sb(struct super_block *sb, int nr_to_scan)
		current->reclaim_state->reclaimed_slab += reap;

	dispose_list(&freeable);
	return freed;
}

static void __wait_on_freeing_inode(struct inode *inode);
+2 −0
Original line number Diff line number Diff line
@@ -114,6 +114,7 @@ extern int open_check_o_direct(struct file *f);
 * inode.c
 */
extern spinlock_t inode_sb_list_lock;
extern long prune_icache_sb(struct super_block *sb, unsigned long nr_to_scan);
extern void inode_add_lru(struct inode *inode);

/*
@@ -130,6 +131,7 @@ extern int invalidate_inodes(struct super_block *, bool);
 */
extern struct dentry *__d_alloc(struct super_block *, const struct qstr *);
extern int d_set_mounted(struct dentry *dentry);
extern long prune_dcache_sb(struct super_block *sb, unsigned long nr_to_scan);

/*
 * read_write.c
+49 −31
Original line number Diff line number Diff line
@@ -53,11 +53,15 @@ static char *sb_writers_name[SB_FREEZE_LEVELS] = {
 * shrinker path and that leads to deadlock on the shrinker_rwsem. Hence we
 * take a passive reference to the superblock to avoid this from occurring.
 */
static int prune_super(struct shrinker *shrink, struct shrink_control *sc)
static unsigned long super_cache_scan(struct shrinker *shrink,
				      struct shrink_control *sc)
{
	struct super_block *sb;
	int	fs_objects = 0;
	int	total_objects;
	long	fs_objects = 0;
	long	total_objects;
	long	freed = 0;
	long	dentries;
	long	inodes;

	sb = container_of(shrink, struct super_block, s_shrink);

@@ -65,11 +69,11 @@ static int prune_super(struct shrinker *shrink, struct shrink_control *sc)
	 * Deadlock avoidance.  We may hold various FS locks, and we don't want
	 * to recurse into the FS that called us in clear_inode() and friends..
	 */
	if (sc->nr_to_scan && !(sc->gfp_mask & __GFP_FS))
		return -1;
	if (!(sc->gfp_mask & __GFP_FS))
		return SHRINK_STOP;

	if (!grab_super_passive(sb))
		return -1;
		return SHRINK_STOP;

	if (sb->s_op->nr_cached_objects)
		fs_objects = sb->s_op->nr_cached_objects(sb);
@@ -77,33 +81,46 @@ static int prune_super(struct shrinker *shrink, struct shrink_control *sc)
	total_objects = sb->s_nr_dentry_unused +
			sb->s_nr_inodes_unused + fs_objects + 1;

	if (sc->nr_to_scan) {
		int	dentries;
		int	inodes;

	/* proportion the scan between the caches */
	dentries = mult_frac(sc->nr_to_scan, sb->s_nr_dentry_unused,
								total_objects);
	inodes = mult_frac(sc->nr_to_scan, sb->s_nr_inodes_unused,
								total_objects);
		if (fs_objects)
			fs_objects = mult_frac(sc->nr_to_scan, fs_objects,
							total_objects);

	/*
	 * prune the dcache first as the icache is pinned by it, then
	 * prune the icache, followed by the filesystem specific caches
	 */
		prune_dcache_sb(sb, dentries);
		prune_icache_sb(sb, inodes);
	freed = prune_dcache_sb(sb, dentries);
	freed += prune_icache_sb(sb, inodes);

		if (fs_objects && sb->s_op->free_cached_objects) {
			sb->s_op->free_cached_objects(sb, fs_objects);
			fs_objects = sb->s_op->nr_cached_objects(sb);
	if (fs_objects) {
		fs_objects = mult_frac(sc->nr_to_scan, fs_objects,
								total_objects);
		freed += sb->s_op->free_cached_objects(sb, fs_objects);
	}
		total_objects = sb->s_nr_dentry_unused +
				sb->s_nr_inodes_unused + fs_objects;

	drop_super(sb);
	return freed;
}

static unsigned long super_cache_count(struct shrinker *shrink,
				       struct shrink_control *sc)
{
	struct super_block *sb;
	long	total_objects = 0;

	sb = container_of(shrink, struct super_block, s_shrink);

	if (!grab_super_passive(sb))
		return 0;

	if (sb->s_op && sb->s_op->nr_cached_objects)
		total_objects = sb->s_op->nr_cached_objects(sb);

	total_objects += sb->s_nr_dentry_unused;
	total_objects += sb->s_nr_inodes_unused;

	total_objects = vfs_pressure_ratio(total_objects);
	drop_super(sb);
	return total_objects;
@@ -211,7 +228,8 @@ static struct super_block *alloc_super(struct file_system_type *type, int flags)
		s->cleancache_poolid = -1;

		s->s_shrink.seeks = DEFAULT_SEEKS;
		s->s_shrink.shrink = prune_super;
		s->s_shrink.scan_objects = super_cache_scan;
		s->s_shrink.count_objects = super_cache_count;
		s->s_shrink.batch = 1024;
	}
out:
+2 −2
Original line number Diff line number Diff line
@@ -1167,7 +1167,7 @@ xfs_reclaim_inodes(
 * them to be cleaned, which we hope will not be very long due to the
 * background walker having already kicked the IO off on those dirty inodes.
 */
void
long
xfs_reclaim_inodes_nr(
	struct xfs_mount	*mp,
	int			nr_to_scan)
@@ -1176,7 +1176,7 @@ xfs_reclaim_inodes_nr(
	xfs_reclaim_work_queue(mp);
	xfs_ail_push_all(mp->m_ail);

	xfs_reclaim_inodes_ag(mp, SYNC_TRYLOCK | SYNC_WAIT, &nr_to_scan);
	return xfs_reclaim_inodes_ag(mp, SYNC_TRYLOCK | SYNC_WAIT, &nr_to_scan);
}

/*
Loading