Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 36917c9f authored by qctecmdr Service's avatar qctecmdr Service Committed by Gerrit - the friendly Code Review server
Browse files

Merge "mm: compaction: fix the page state calculation in too_many_isolated"

parents 0573f602 6093c334
Loading
Loading
Loading
Loading
+1 −1
Original line number Diff line number Diff line
@@ -22,7 +22,7 @@

#define FOR_ALL_ZONES(xx) DMA_ZONE(xx) DMA32_ZONE(xx) xx##_NORMAL, HIGHMEM_ZONE(xx) xx##_MOVABLE

enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT,
enum vm_event_item { PGPGIN, PGPGOUT, PGPGOUTCLEAN, PSWPIN, PSWPOUT,
		FOR_ALL_ZONES(PGALLOC),
		FOR_ALL_ZONES(ALLOCSTALL),
		FOR_ALL_ZONES(PGSCAN_SKIP),
+37 −5
Original line number Diff line number Diff line
@@ -631,20 +631,52 @@ isolate_freepages_range(struct compact_control *cc,
}

/* Similar to reclaim, but different enough that they don't share logic */
static bool too_many_isolated(struct zone *zone)
static bool __too_many_isolated(struct zone *zone, int safe)
{
	unsigned long active, inactive, isolated;

	if (safe) {
		inactive = node_page_state_snapshot(zone->zone_pgdat,
			NR_INACTIVE_FILE) +
			node_page_state_snapshot(zone->zone_pgdat,
			NR_INACTIVE_ANON);
		active = node_page_state_snapshot(zone->zone_pgdat,
			NR_ACTIVE_FILE) +
			node_page_state_snapshot(zone->zone_pgdat,
			NR_ACTIVE_ANON);
		isolated = node_page_state_snapshot(zone->zone_pgdat,
			NR_ISOLATED_FILE) +
			node_page_state_snapshot(zone->zone_pgdat,
			NR_ISOLATED_ANON);
	} else {
		inactive = node_page_state(zone->zone_pgdat, NR_INACTIVE_FILE) +
			node_page_state(zone->zone_pgdat, NR_INACTIVE_ANON);
		active = node_page_state(zone->zone_pgdat, NR_ACTIVE_FILE) +
			node_page_state(zone->zone_pgdat, NR_ACTIVE_ANON);
		isolated = node_page_state(zone->zone_pgdat, NR_ISOLATED_FILE) +
			node_page_state(zone->zone_pgdat, NR_ISOLATED_ANON);
	}

	return isolated > (inactive + active) / 2;
}

/* Similar to reclaim, but different enough that they don't share logic */
static bool too_many_isolated(struct compact_control *cc)
{
	/*
	 * __too_many_isolated(safe=0) is fast but inaccurate, because it
	 * doesn't account for the vm_stat_diff[] counters.  So if it looks
	 * like too_many_isolated() is about to return true, fall back to the
	 * slower, more accurate zone_page_state_snapshot().
	 */
	if (unlikely(__too_many_isolated(cc->zone, 0))) {
		if (cc->mode != MIGRATE_ASYNC)
			return __too_many_isolated(cc->zone, 1);
	}

	return false;
}

/**
 * isolate_migratepages_block() - isolate all migrate-able pages within
 *				  a single pageblock
@@ -682,7 +714,7 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
	 * list by either parallel reclaimers or compaction. If there are,
	 * delay for some time until fewer pages are isolated
	 */
	while (unlikely(too_many_isolated(zone))) {
	while (unlikely(too_many_isolated(cc))) {
		/* async migration should just abort */
		if (cc->mode == MIGRATE_ASYNC)
			return 0;
+4 −2
Original line number Diff line number Diff line
@@ -194,10 +194,12 @@ void __delete_from_page_cache(struct page *page, void *shadow)
	 * invalidate any existing cleancache entries.  We can't leave
	 * stale data around in the cleancache once our page is gone
	 */
	if (PageUptodate(page) && PageMappedToDisk(page))
	if (PageUptodate(page) && PageMappedToDisk(page)) {
		count_vm_event(PGPGOUTCLEAN);
		cleancache_put_page(page);
	else
	} else {
		cleancache_invalidate_page(mapping, page);
	}

	VM_BUG_ON_PAGE(PageTail(page), page);
	VM_BUG_ON_PAGE(page_mapped(page), page);
+1 −0
Original line number Diff line number Diff line
@@ -1099,6 +1099,7 @@ const char * const vmstat_text[] = {
	/* enum vm_event_item counters */
	"pgpgin",
	"pgpgout",
	"pgpgoutclean",
	"pswpin",
	"pswpout",