Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit d8c98bb6 authored by Vinayak Menon's avatar Vinayak Menon
Browse files

mm: separate out the invocation of lowmemorykiller shrinker



The commit '6b4f77 (mm: vmscan: invoke slab shrinkers from shrink_zone())'
fixed the invocation of shrinkers but resulted in lowmemorykiller shrinker
being called more. Reduce the number of lowmemorykiller shrinker
invocations by separating out the lowmemorykiller shrinker from
shrink_slab. This will make lowmemorykiller invoked only once for all zones
reclaimed in the direct reclaim path, and once for each zone in the kswap
path. As a consequence the eligible pages passed to shrink_slab_lmk is now
the reclaimable pages of all zones. Reducing the number of lowmemorykiller
invocations reduces the unnecessary time spent in lowmemorykiller and thus
contention or failures on lowmemorykiller's scan_mutex.

Change-Id: Iaabb9e441711f1dc804980b5853b64b3f214698d
Signed-off-by: default avatarVinayak Menon <vinmenon@codeaurora.org>
parent 0293b8a7
Loading
Loading
Loading
Loading
+2 −1
Original line number Diff line number Diff line
@@ -565,7 +565,8 @@ static unsigned long lowmem_scan(struct shrinker *s, struct shrink_control *sc)
static struct shrinker lowmem_shrinker = {
	.scan_objects = lowmem_scan,
	.count_objects = lowmem_count,
	.seeks = DEFAULT_SEEKS * 16
	.seeks = DEFAULT_SEEKS * 16,
	.flags = SHRINKER_LMK
};

static int __init lowmem_init(void)
+1 −0
Original line number Diff line number Diff line
@@ -66,6 +66,7 @@ struct shrinker {
/* Flags */
#define SHRINKER_NUMA_AWARE	(1 << 0)
#define SHRINKER_MEMCG_AWARE	(1 << 1)
#define SHRINKER_LMK		(1 << 2)

extern int register_shrinker(struct shrinker *);
extern void unregister_shrinker(struct shrinker *);
+52 −2
Original line number Diff line number Diff line
@@ -399,6 +399,35 @@ static unsigned long do_shrink_slab(struct shrink_control *shrinkctl,
	return freed;
}

static void shrink_slab_lmk(gfp_t gfp_mask, int nid,
				 struct mem_cgroup *memcg,
				 unsigned long nr_scanned,
				 unsigned long nr_eligible)
{
	struct shrinker *shrinker;

	if (nr_scanned == 0)
		nr_scanned = SWAP_CLUSTER_MAX;

	if (!down_read_trylock(&shrinker_rwsem))
		goto out;

	list_for_each_entry(shrinker, &shrinker_list, list) {
		struct shrink_control sc = {
			.gfp_mask = gfp_mask,
		};

		if (!(shrinker->flags & SHRINKER_LMK))
			continue;

		do_shrink_slab(&sc, shrinker, nr_scanned, nr_eligible);
	}

	up_read(&shrinker_rwsem);
out:
	cond_resched();
}

/**
 * shrink_slab - shrink slab caches
 * @gfp_mask: allocation context
@@ -460,6 +489,9 @@ static unsigned long shrink_slab(gfp_t gfp_mask, int nid,
			.memcg = memcg,
		};

		if (shrinker->flags & SHRINKER_LMK)
			continue;

		if (memcg && !(shrinker->flags & SHRINKER_MEMCG_AWARE))
			continue;

@@ -2626,6 +2658,7 @@ static bool shrink_zones(struct zonelist *zonelist, struct scan_control *sc)
	gfp_t orig_mask;
	enum zone_type requested_highidx = gfp_zone(sc->gfp_mask);
	bool reclaimable = false;
	unsigned long lru_pages = 0;

	/*
	 * If the number of buffer_heads in the machine exceeds the maximum
@@ -2653,6 +2686,7 @@ static bool shrink_zones(struct zonelist *zonelist, struct scan_control *sc)
		 * to global LRU.
		 */
		if (global_reclaim(sc)) {
			lru_pages += zone_reclaimable_pages(zone);
			if (!cpuset_zone_allowed(zone,
						 GFP_KERNEL | __GFP_HARDWALL))
				continue;
@@ -2703,6 +2737,9 @@ static bool shrink_zones(struct zonelist *zonelist, struct scan_control *sc)
			reclaimable = true;
	}

	if (global_reclaim(sc))
		shrink_slab_lmk(sc->gfp_mask, 0, NULL,
				sc->nr_scanned, lru_pages);
	/*
	 * Restore to original mask to avoid the impact on the caller if we
	 * promoted it to __GFP_HIGHMEM.
@@ -3181,7 +3218,8 @@ static bool prepare_kswapd_sleep(pg_data_t *pgdat, int order, long remaining,
 */
static bool kswapd_shrink_zone(struct zone *zone,
			       int classzone_idx,
			       struct scan_control *sc)
			       struct scan_control *sc,
				unsigned long lru_pages)
{
	unsigned long balance_gap;
	bool lowmem_pressure;
@@ -3208,6 +3246,8 @@ static bool kswapd_shrink_zone(struct zone *zone,
		return true;

	shrink_zone(zone, sc, zone_idx(zone) == classzone_idx);
	shrink_slab_lmk(sc->gfp_mask, zone_to_nid(zone), NULL,
			sc->nr_scanned, lru_pages);

	clear_bit(ZONE_WRITEBACK, &zone->flags);

@@ -3265,6 +3305,7 @@ static int balance_pgdat(pg_data_t *pgdat, int order, int classzone_idx)

	do {
		bool raise_priority = true;
		unsigned long lru_pages = 0;

		sc.nr_reclaimed = 0;

@@ -3322,6 +3363,15 @@ static int balance_pgdat(pg_data_t *pgdat, int order, int classzone_idx)
		if (sc.priority < DEF_PRIORITY - 2)
			sc.may_writepage = 1;

		for (i = 0; i <= end_zone; i++) {
			struct zone *zone = pgdat->node_zones + i;

			if (!populated_zone(zone))
				continue;

			lru_pages += zone_reclaimable_pages(zone);
		}

		/*
		 * Now scan the zone in the dma->highmem direction, stopping
		 * at the last zone which needs scanning.
@@ -3358,7 +3408,7 @@ static int balance_pgdat(pg_data_t *pgdat, int order, int classzone_idx)
			 * that that high watermark would be met at 100%
			 * efficiency.
			 */
			if (kswapd_shrink_zone(zone, end_zone, &sc))
			if (kswapd_shrink_zone(zone, end_zone, &sc, lru_pages))
				raise_priority = false;
		}