Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit e247dbce authored by KOSAKI Motohiro's avatar KOSAKI Motohiro Committed by Linus Torvalds
Browse files

vmscan: simplify shrink_inactive_list()



Now, max_scan of shrink_inactive_list() is always passed less than
SWAP_CLUSTER_MAX.  then, we can remove scanning pages loop in it.  This
patch also help stack diet.

detail
 - remove "while (nr_scanned < max_scan)" loop
 - remove nr_freed (now, we use nr_reclaimed directly)
 - remove nr_scan (now, we use nr_scanned directly)
 - rename max_scan to nr_to_scan
 - pass nr_to_scan into isolate_pages() directly instead
   using SWAP_CLUSTER_MAX

[akpm@linux-foundation.org: coding-style fixes]
Signed-off-by: default avatarKOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Signed-off-by: default avatarMel Gorman <mel@csn.ul.ie>
Reviewed-by: default avatarJohannes Weiner <hannes@cmpxchg.org>
Reviewed-by: default avatarRik van Riel <riel@redhat.com>
Cc: Dave Chinner <david@fromorbit.com>
Cc: Chris Mason <chris.mason@oracle.com>
Cc: Nick Piggin <npiggin@suse.de>
Cc: Rik van Riel <riel@redhat.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Christoph Hellwig <hch@infradead.org>
Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: Michael Rubin <mrubin@google.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 25edde03
Loading
Loading
Loading
Loading
+102 −110
Original line number Diff line number Diff line
@@ -1136,15 +1136,21 @@ static int too_many_isolated(struct zone *zone, int file,
 * shrink_inactive_list() is a helper for shrink_zone().  It returns the number
 * of reclaimed pages
 */
static unsigned long shrink_inactive_list(unsigned long max_scan,
static unsigned long shrink_inactive_list(unsigned long nr_to_scan,
			struct zone *zone, struct scan_control *sc,
			int priority, int file)
{
	LIST_HEAD(page_list);
	struct pagevec pvec;
	unsigned long nr_scanned = 0;
	unsigned long nr_scanned;
	unsigned long nr_reclaimed = 0;
	struct zone_reclaim_stat *reclaim_stat = get_reclaim_stat(zone, sc);
	struct page *page;
	unsigned long nr_taken;
	unsigned long nr_active;
	unsigned int count[NR_LRU_LISTS] = { 0, };
	unsigned long nr_anon;
	unsigned long nr_file;

	while (unlikely(too_many_isolated(zone, file, sc))) {
		congestion_wait(BLK_RW_ASYNC, HZ/10);
@@ -1159,33 +1165,25 @@ static unsigned long shrink_inactive_list(unsigned long max_scan,

	lru_add_drain();
	spin_lock_irq(&zone->lru_lock);
	do {
		struct page *page;
		unsigned long nr_taken;
		unsigned long nr_scan;
		unsigned long nr_freed;
		unsigned long nr_active;
		unsigned int count[NR_LRU_LISTS] = { 0, };
		int mode = sc->lumpy_reclaim_mode ? ISOLATE_BOTH : ISOLATE_INACTIVE;
		unsigned long nr_anon;
		unsigned long nr_file;

	if (scanning_global_lru(sc)) {
			nr_taken = isolate_pages_global(SWAP_CLUSTER_MAX,
							&page_list, &nr_scan,
							sc->order, mode,
		nr_taken = isolate_pages_global(nr_to_scan,
			&page_list, &nr_scanned, sc->order,
			sc->lumpy_reclaim_mode ?
				ISOLATE_BOTH : ISOLATE_INACTIVE,
			zone, 0, file);
			zone->pages_scanned += nr_scan;
		zone->pages_scanned += nr_scanned;
		if (current_is_kswapd())
			__count_zone_vm_events(PGSCAN_KSWAPD, zone,
						       nr_scan);
					       nr_scanned);
		else
			__count_zone_vm_events(PGSCAN_DIRECT, zone,
						       nr_scan);
					       nr_scanned);
	} else {
			nr_taken = mem_cgroup_isolate_pages(SWAP_CLUSTER_MAX,
							&page_list, &nr_scan,
							sc->order, mode,
		nr_taken = mem_cgroup_isolate_pages(nr_to_scan,
			&page_list, &nr_scanned, sc->order,
			sc->lumpy_reclaim_mode ?
				ISOLATE_BOTH : ISOLATE_INACTIVE,
			zone, sc->mem_cgroup,
			0, file);
		/*
@@ -1219,8 +1217,7 @@ static unsigned long shrink_inactive_list(unsigned long max_scan,

	spin_unlock_irq(&zone->lru_lock);

		nr_scanned += nr_scan;
		nr_freed = shrink_page_list(&page_list, sc, PAGEOUT_IO_ASYNC);
	nr_reclaimed = shrink_page_list(&page_list, sc, PAGEOUT_IO_ASYNC);

	/*
	 * If we are direct reclaiming for contiguous pages and we do
@@ -1228,7 +1225,7 @@ static unsigned long shrink_inactive_list(unsigned long max_scan,
	 * for IO to complete. This will stall high-order allocations
	 * but that should be acceptable to the caller
	 */
		if (nr_freed < nr_taken && !current_is_kswapd() &&
	if (nr_reclaimed < nr_taken && !current_is_kswapd() &&
			sc->lumpy_reclaim_mode) {
		congestion_wait(BLK_RW_ASYNC, HZ/10);

@@ -1239,16 +1236,13 @@ static unsigned long shrink_inactive_list(unsigned long max_scan,
		nr_active = clear_active_flags(&page_list, count);
		count_vm_events(PGDEACTIVATE, nr_active);

			nr_freed += shrink_page_list(&page_list, sc,
							PAGEOUT_IO_SYNC);
		nr_reclaimed += shrink_page_list(&page_list, sc, PAGEOUT_IO_SYNC);
	}

		nr_reclaimed += nr_freed;

	local_irq_disable();
	if (current_is_kswapd())
			__count_vm_events(KSWAPD_STEAL, nr_freed);
		__count_zone_vm_events(PGSTEAL, zone, nr_freed);
		__count_vm_events(KSWAPD_STEAL, nr_reclaimed);
	__count_zone_vm_events(PGSTEAL, zone, nr_reclaimed);

	spin_lock(&zone->lru_lock);
	/*
@@ -1281,8 +1275,6 @@ static unsigned long shrink_inactive_list(unsigned long max_scan,
	__mod_zone_page_state(zone, NR_ISOLATED_ANON, -nr_anon);
	__mod_zone_page_state(zone, NR_ISOLATED_FILE, -nr_file);

  	} while (nr_scanned < max_scan);

done:
	spin_unlock_irq(&zone->lru_lock);
	pagevec_release(&pvec);