Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 76395d37 authored by Andrew Morton's avatar Andrew Morton Committed by Linus Torvalds
Browse files

[PATCH] shrink_all_memory(): fix lru_pages handling



At the end of shrink_all_memory() we forget to recalculate lru_pages: it can
be zero.

Fix that up, and add a helper function for this operation too.

Also, recalculate lru_pages each time around the inner loop to get the
balancing correct.

Cc: "Rafael J. Wysocki" <rjw@sisk.pl>
Cc: Pavel Machek <pavel@ucw.cz>
Signed-off-by: default avatarAndrew Morton <akpm@osdl.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@osdl.org>
parent d63b7090
Loading
Loading
Loading
Loading
+16 −17
Original line number Diff line number Diff line
@@ -1406,6 +1406,16 @@ static unsigned long shrink_all_zones(unsigned long nr_pages, int prio,
	return ret;
}

static unsigned long count_lru_pages(void)
{
	struct zone *zone;
	unsigned long ret = 0;

	for_each_zone(zone)
		ret += zone->nr_active + zone->nr_inactive;
	return ret;
}

/*
 * Try to free `nr_pages' of memory, system-wide, and return the number of
 * freed pages.
@@ -1420,7 +1430,6 @@ unsigned long shrink_all_memory(unsigned long nr_pages)
	unsigned long ret = 0;
	int pass;
	struct reclaim_state reclaim_state;
	struct zone *zone;
	struct scan_control sc = {
		.gfp_mask = GFP_KERNEL,
		.may_swap = 0,
@@ -1431,10 +1440,7 @@ unsigned long shrink_all_memory(unsigned long nr_pages)

	current->reclaim_state = &reclaim_state;

	lru_pages = 0;
	for_each_zone(zone)
		lru_pages += zone->nr_active + zone->nr_inactive;

	lru_pages = count_lru_pages();
	nr_slab = global_page_state(NR_SLAB_RECLAIMABLE);
	/* If slab caches are huge, it's better to hit them first */
	while (nr_slab >= lru_pages) {
@@ -1461,13 +1467,6 @@ unsigned long shrink_all_memory(unsigned long nr_pages)
	for (pass = 0; pass < 5; pass++) {
		int prio;

		/* Needed for shrinking slab caches later on */
		if (!lru_pages)
			for_each_zone(zone) {
				lru_pages += zone->nr_active;
				lru_pages += zone->nr_inactive;
			}

		/* Force reclaiming mapped pages in the passes #3 and #4 */
		if (pass > 2) {
			sc.may_swap = 1;
@@ -1483,7 +1482,8 @@ unsigned long shrink_all_memory(unsigned long nr_pages)
				goto out;

			reclaim_state.reclaimed_slab = 0;
			shrink_slab(sc.nr_scanned, sc.gfp_mask, lru_pages);
			shrink_slab(sc.nr_scanned, sc.gfp_mask,
					count_lru_pages());
			ret += reclaim_state.reclaimed_slab;
			if (ret >= nr_pages)
				goto out;
@@ -1491,20 +1491,19 @@ unsigned long shrink_all_memory(unsigned long nr_pages)
			if (sc.nr_scanned && prio < DEF_PRIORITY - 2)
				congestion_wait(WRITE, HZ / 10);
		}

		lru_pages = 0;
	}

	/*
	 * If ret = 0, we could not shrink LRUs, but there may be something
	 * in slab caches
	 */
	if (!ret)
	if (!ret) {
		do {
			reclaim_state.reclaimed_slab = 0;
			shrink_slab(nr_pages, sc.gfp_mask, lru_pages);
			shrink_slab(nr_pages, sc.gfp_mask, count_lru_pages());
			ret += reclaim_state.reclaimed_slab;
		} while (ret < nr_pages && reclaim_state.reclaimed_slab > 0);
	}

out:
	current->reclaim_state = NULL;