Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit d563c050 authored by Hillf Danton's avatar Hillf Danton Committed by Linus Torvalds
Browse files

vmscan: handle isolated pages with lru lock released



When shrinking inactive lru list, isolated pages are queued on locally
private list, so the lock-hold time could be reduced if pages are counted
without lock protection.

To achieve that, firstly updating reclaim stat is delayed until the
putback stage, after reacquiring the lru lock.

Secondly, operations related to vm and zone stats are now proteced with
preemption disabled as they are per-cpu operations.

Signed-off-by: default avatarHillf Danton <dhillf@gmail.com>
Acked-by: default avatarHugh Dickins <hughd@google.com>
Reviewed-by: default avatarKAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 978ea78b
Loading
Loading
Loading
Loading
+10 −11
Original line number Diff line number Diff line
@@ -1413,7 +1413,6 @@ update_isolated_counts(struct mem_cgroup_zone *mz,
		       unsigned long *nr_anon,
		       unsigned long *nr_file)
{
	struct zone_reclaim_stat *reclaim_stat = get_reclaim_stat(mz);
	struct zone *zone = mz->zone;
	unsigned int count[NR_LRU_LISTS] = { 0, };
	unsigned long nr_active = 0;
@@ -1434,6 +1433,7 @@ update_isolated_counts(struct mem_cgroup_zone *mz,
		count[lru] += numpages;
	}

	preempt_disable();
	__count_vm_events(PGDEACTIVATE, nr_active);

	__mod_zone_page_state(zone, NR_ACTIVE_FILE,
@@ -1448,8 +1448,9 @@ update_isolated_counts(struct mem_cgroup_zone *mz,
	*nr_anon = count[LRU_ACTIVE_ANON] + count[LRU_INACTIVE_ANON];
	*nr_file = count[LRU_ACTIVE_FILE] + count[LRU_INACTIVE_FILE];

	reclaim_stat->recent_scanned[0] += *nr_anon;
	reclaim_stat->recent_scanned[1] += *nr_file;
	__mod_zone_page_state(zone, NR_ISOLATED_ANON, *nr_anon);
	__mod_zone_page_state(zone, NR_ISOLATED_FILE, *nr_file);
	preempt_enable();
}

/*
@@ -1511,6 +1512,7 @@ shrink_inactive_list(unsigned long nr_to_scan, struct mem_cgroup_zone *mz,
	unsigned long nr_writeback = 0;
	isolate_mode_t isolate_mode = ISOLATE_INACTIVE;
	struct zone *zone = mz->zone;
	struct zone_reclaim_stat *reclaim_stat = get_reclaim_stat(mz);

	while (unlikely(too_many_isolated(zone, file, sc))) {
		congestion_wait(BLK_RW_ASYNC, HZ/10);
@@ -1544,19 +1546,13 @@ shrink_inactive_list(unsigned long nr_to_scan, struct mem_cgroup_zone *mz,
			__count_zone_vm_events(PGSCAN_DIRECT, zone,
					       nr_scanned);
	}

	if (nr_taken == 0) {
	spin_unlock_irq(&zone->lru_lock);

	if (nr_taken == 0)
		return 0;
	}

	update_isolated_counts(mz, &page_list, &nr_anon, &nr_file);

	__mod_zone_page_state(zone, NR_ISOLATED_ANON, nr_anon);
	__mod_zone_page_state(zone, NR_ISOLATED_FILE, nr_file);

	spin_unlock_irq(&zone->lru_lock);

	nr_reclaimed = shrink_page_list(&page_list, mz, sc, priority,
						&nr_dirty, &nr_writeback);

@@ -1569,6 +1565,9 @@ shrink_inactive_list(unsigned long nr_to_scan, struct mem_cgroup_zone *mz,

	spin_lock_irq(&zone->lru_lock);

	reclaim_stat->recent_scanned[0] += nr_anon;
	reclaim_stat->recent_scanned[1] += nr_file;

	if (current_is_kswapd())
		__count_vm_events(KSWAPD_STEAL, nr_reclaimed);
	__count_zone_vm_events(PGSTEAL, zone, nr_reclaimed);