Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit cb731d6c authored by Vladimir Davydov's avatar Vladimir Davydov Committed by Linus Torvalds
Browse files

vmscan: per memory cgroup slab shrinkers



This patch adds SHRINKER_MEMCG_AWARE flag.  If a shrinker has this flag
set, it will be called per memory cgroup.  The memory cgroup to scan
objects from is passed in shrink_control->memcg.  If the memory cgroup
is NULL, a memcg aware shrinker is supposed to scan objects from the
global list.  Unaware shrinkers are only called on global pressure with
memcg=NULL.

Signed-off-by: default avatarVladimir Davydov <vdavydov@parallels.com>
Cc: Dave Chinner <david@fromorbit.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Michal Hocko <mhocko@suse.cz>
Cc: Greg Thelen <gthelen@google.com>
Cc: Glauber Costa <glommer@gmail.com>
Cc: Alexander Viro <viro@zeniv.linux.org.uk>
Cc: Christoph Lameter <cl@linux.com>
Cc: Pekka Enberg <penberg@kernel.org>
Cc: David Rientjes <rientjes@google.com>
Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Cc: Tejun Heo <tj@kernel.org>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 4101b624
Loading
Loading
Loading
Loading
+0 −14
Original line number Diff line number Diff line
@@ -37,20 +37,6 @@ static void drop_pagecache_sb(struct super_block *sb, void *unused)
	iput(toput_inode);
}

static void drop_slab(void)
{
	int nr_objects;

	do {
		int nid;

		nr_objects = 0;
		for_each_online_node(nid)
			nr_objects += shrink_node_slabs(GFP_KERNEL, nid,
							1000, 1000);
	} while (nr_objects > 10);
}

int drop_caches_sysctl_handler(struct ctl_table *table, int write,
	void __user *buffer, size_t *length, loff_t *ppos)
{
+7 −0
Original line number Diff line number Diff line
@@ -413,6 +413,8 @@ static inline bool memcg_kmem_enabled(void)
	return static_key_false(&memcg_kmem_enabled_key);
}

bool memcg_kmem_is_active(struct mem_cgroup *memcg);

/*
 * In general, we'll do everything in our power to not incur in any overhead
 * for non-memcg users for the kmem functions. Not even a function call, if we
@@ -542,6 +544,11 @@ static inline bool memcg_kmem_enabled(void)
	return false;
}

static inline bool memcg_kmem_is_active(struct mem_cgroup *memcg)
{
	return false;
}

static inline bool
memcg_kmem_newpage_charge(gfp_t gfp, struct mem_cgroup **memcg, int order)
{
+2 −3
Original line number Diff line number Diff line
@@ -2168,9 +2168,8 @@ int drop_caches_sysctl_handler(struct ctl_table *, int,
					void __user *, size_t *, loff_t *);
#endif

unsigned long shrink_node_slabs(gfp_t gfp_mask, int nid,
				unsigned long nr_scanned,
				unsigned long nr_eligible);
void drop_slab(void);
void drop_slab_node(int nid);

#ifndef CONFIG_MMU
#define randomize_va_space 0
+5 −1
Original line number Diff line number Diff line
@@ -20,6 +20,9 @@ struct shrink_control {

	/* current node being shrunk (for NUMA aware shrinkers) */
	int nid;

	/* current memcg being shrunk (for memcg aware shrinkers) */
	struct mem_cgroup *memcg;
};

#define SHRINK_STOP (~0UL)
@@ -62,6 +65,7 @@ struct shrinker {

/* Flags */
#define SHRINKER_NUMA_AWARE	(1 << 0)
#define SHRINKER_MEMCG_AWARE	(1 << 1)

extern int register_shrinker(struct shrinker *);
extern void unregister_shrinker(struct shrinker *);
+1 −1
Original line number Diff line number Diff line
@@ -352,7 +352,7 @@ struct mem_cgroup {
};

#ifdef CONFIG_MEMCG_KMEM
static bool memcg_kmem_is_active(struct mem_cgroup *memcg)
bool memcg_kmem_is_active(struct mem_cgroup *memcg)
{
	return memcg->kmemcg_id >= 0;
}
Loading