Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 95d918fc authored by Konstantin Khlebnikov's avatar Konstantin Khlebnikov Committed by Linus Torvalds
Browse files

mm/vmscan: remove update_isolated_counts()



update_isolated_counts() is no longer required, because lumpy-reclaim was
removed.  Insanity is over, now there is only one kind of inactive page.

Signed-off-by: default avatarKonstantin Khlebnikov <khlebnikov@openvz.org>
Cc: Mel Gorman <mel@csn.ul.ie>
Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Acked-by: default avatarHugh Dickins <hughd@google.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 6a18adb3
Loading
Loading
Loading
Loading
+6 −54
Original line number Original line Diff line number Diff line
@@ -1205,52 +1205,6 @@ putback_inactive_pages(struct mem_cgroup_zone *mz,
	list_splice(&pages_to_free, page_list);
	list_splice(&pages_to_free, page_list);
}
}


static noinline_for_stack void
update_isolated_counts(struct mem_cgroup_zone *mz,
		       struct list_head *page_list,
		       unsigned long *nr_anon,
		       unsigned long *nr_file)
{
	struct zone *zone = mz->zone;
	unsigned int count[NR_LRU_LISTS] = { 0, };
	unsigned long nr_active = 0;
	struct page *page;
	int lru;

	/*
	 * Count pages and clear active flags
	 */
	list_for_each_entry(page, page_list, lru) {
		int numpages = hpage_nr_pages(page);
		lru = page_lru_base_type(page);
		if (PageActive(page)) {
			lru += LRU_ACTIVE;
			ClearPageActive(page);
			nr_active += numpages;
		}
		count[lru] += numpages;
	}

	preempt_disable();
	__count_vm_events(PGDEACTIVATE, nr_active);

	__mod_zone_page_state(zone, NR_ACTIVE_FILE,
			      -count[LRU_ACTIVE_FILE]);
	__mod_zone_page_state(zone, NR_INACTIVE_FILE,
			      -count[LRU_INACTIVE_FILE]);
	__mod_zone_page_state(zone, NR_ACTIVE_ANON,
			      -count[LRU_ACTIVE_ANON]);
	__mod_zone_page_state(zone, NR_INACTIVE_ANON,
			      -count[LRU_INACTIVE_ANON]);

	*nr_anon = count[LRU_ACTIVE_ANON] + count[LRU_INACTIVE_ANON];
	*nr_file = count[LRU_ACTIVE_FILE] + count[LRU_INACTIVE_FILE];

	__mod_zone_page_state(zone, NR_ISOLATED_ANON, *nr_anon);
	__mod_zone_page_state(zone, NR_ISOLATED_FILE, *nr_file);
	preempt_enable();
}

/*
/*
 * shrink_inactive_list() is a helper for shrink_zone().  It returns the number
 * shrink_inactive_list() is a helper for shrink_zone().  It returns the number
 * of reclaimed pages
 * of reclaimed pages
@@ -1263,8 +1217,6 @@ shrink_inactive_list(unsigned long nr_to_scan, struct mem_cgroup_zone *mz,
	unsigned long nr_scanned;
	unsigned long nr_scanned;
	unsigned long nr_reclaimed = 0;
	unsigned long nr_reclaimed = 0;
	unsigned long nr_taken;
	unsigned long nr_taken;
	unsigned long nr_anon;
	unsigned long nr_file;
	unsigned long nr_dirty = 0;
	unsigned long nr_dirty = 0;
	unsigned long nr_writeback = 0;
	unsigned long nr_writeback = 0;
	isolate_mode_t isolate_mode = 0;
	isolate_mode_t isolate_mode = 0;
@@ -1292,6 +1244,10 @@ shrink_inactive_list(unsigned long nr_to_scan, struct mem_cgroup_zone *mz,


	nr_taken = isolate_lru_pages(nr_to_scan, lruvec, &page_list,
	nr_taken = isolate_lru_pages(nr_to_scan, lruvec, &page_list,
				     &nr_scanned, sc, isolate_mode, lru);
				     &nr_scanned, sc, isolate_mode, lru);

	__mod_zone_page_state(zone, NR_LRU_BASE + lru, -nr_taken);
	__mod_zone_page_state(zone, NR_ISOLATED_ANON + file, nr_taken);

	if (global_reclaim(sc)) {
	if (global_reclaim(sc)) {
		zone->pages_scanned += nr_scanned;
		zone->pages_scanned += nr_scanned;
		if (current_is_kswapd())
		if (current_is_kswapd())
@@ -1306,15 +1262,12 @@ shrink_inactive_list(unsigned long nr_to_scan, struct mem_cgroup_zone *mz,
	if (nr_taken == 0)
	if (nr_taken == 0)
		return 0;
		return 0;


	update_isolated_counts(mz, &page_list, &nr_anon, &nr_file);

	nr_reclaimed = shrink_page_list(&page_list, zone, sc,
	nr_reclaimed = shrink_page_list(&page_list, zone, sc,
						&nr_dirty, &nr_writeback);
						&nr_dirty, &nr_writeback);


	spin_lock_irq(&zone->lru_lock);
	spin_lock_irq(&zone->lru_lock);


	reclaim_stat->recent_scanned[0] += nr_anon;
	reclaim_stat->recent_scanned[file] += nr_taken;
	reclaim_stat->recent_scanned[1] += nr_file;


	if (global_reclaim(sc)) {
	if (global_reclaim(sc)) {
		if (current_is_kswapd())
		if (current_is_kswapd())
@@ -1327,8 +1280,7 @@ shrink_inactive_list(unsigned long nr_to_scan, struct mem_cgroup_zone *mz,


	putback_inactive_pages(mz, &page_list);
	putback_inactive_pages(mz, &page_list);


	__mod_zone_page_state(zone, NR_ISOLATED_ANON, -nr_anon);
	__mod_zone_page_state(zone, NR_ISOLATED_ANON + file, -nr_taken);
	__mod_zone_page_state(zone, NR_ISOLATED_FILE, -nr_file);


	spin_unlock_irq(&zone->lru_lock);
	spin_unlock_irq(&zone->lru_lock);