Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit b40a0371 authored by Linux Build Service Account's avatar Linux Build Service Account Committed by Gerrit - the friendly Code Review server
Browse files

Merge "mm: delete NR_PAGES_SCANNED and pgdat_reclaimable()"

parents ca315166 a1d37a07
Loading
Loading
Loading
Loading
+0 −1
Original line number Diff line number Diff line
@@ -158,7 +158,6 @@ enum node_stat_item {
	NR_UNEVICTABLE,		/*  "     "     "   "       "         */
	NR_ISOLATED_ANON,	/* Temporary isolated pages from anon lru */
	NR_ISOLATED_FILE,	/* Temporary isolated pages from file lru */
	NR_PAGES_SCANNED,	/* pages scanned since last reclaim */
	WORKINGSET_REFAULT,
	WORKINGSET_ACTIVATE,
	WORKINGSET_NODERECLAIM,
+0 −1
Original line number Diff line number Diff line
@@ -84,7 +84,6 @@ extern unsigned long highest_memmap_pfn;
 */
extern int isolate_lru_page(struct page *page);
extern void putback_lru_page(struct page *page);
extern bool pgdat_reclaimable(struct pglist_data *pgdat);

/*
 * in mm/rmap.c:
+0 −11
Original line number Diff line number Diff line
@@ -1099,14 +1099,10 @@ static void free_pcppages_bulk(struct zone *zone, int count,
{
	int migratetype = 0;
	int batch_free = 0;
	unsigned long nr_scanned;
	bool isolated_pageblocks;

	spin_lock(&zone->lock);
	isolated_pageblocks = has_isolate_pageblock(zone);
	nr_scanned = node_page_state(zone->zone_pgdat, NR_PAGES_SCANNED);
	if (nr_scanned)
		__mod_node_page_state(zone->zone_pgdat, NR_PAGES_SCANNED, -nr_scanned);

	while (count) {
		struct page *page;
@@ -1159,12 +1155,7 @@ static void free_one_page(struct zone *zone,
				unsigned int order,
				int migratetype)
{
	unsigned long nr_scanned;
	spin_lock(&zone->lock);
	nr_scanned = node_page_state(zone->zone_pgdat, NR_PAGES_SCANNED);
	if (nr_scanned)
		__mod_node_page_state(zone->zone_pgdat, NR_PAGES_SCANNED, -nr_scanned);

	if (unlikely(has_isolate_pageblock(zone) ||
		is_migrate_isolate(migratetype))) {
		migratetype = get_pfnblock_migratetype(page, pfn);
@@ -4436,7 +4427,6 @@ void show_free_areas(unsigned int filter)
#endif
			" writeback_tmp:%lukB"
			" unstable:%lukB"
			" pages_scanned:%lu"
			" all_unreclaimable? %s"
			"\n",
			pgdat->node_id,
@@ -4459,7 +4449,6 @@ void show_free_areas(unsigned int filter)
#endif
			K(node_page_state(pgdat, NR_WRITEBACK_TEMP)),
			K(node_page_state(pgdat, NR_UNSTABLE_NFS)),
			node_page_state(pgdat, NR_PAGES_SCANNED),
			pgdat->kswapd_failures >= MAX_RECLAIM_RETRIES ?
				"yes" : "no");
	}
+0 −9
Original line number Diff line number Diff line
@@ -235,12 +235,6 @@ unsigned long pgdat_reclaimable_pages(struct pglist_data *pgdat)
	return nr;
}

bool pgdat_reclaimable(struct pglist_data *pgdat)
{
	return node_page_state_snapshot(pgdat, NR_PAGES_SCANNED) <
		pgdat_reclaimable_pages(pgdat) * 6;
}

/**
 * lruvec_lru_size -  Returns the number of pages on the given LRU list.
 * @lruvec: lru vector
@@ -1849,7 +1843,6 @@ shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec,
	reclaim_stat->recent_scanned[file] += nr_taken;

	if (global_reclaim(sc)) {
		__mod_node_page_state(pgdat, NR_PAGES_SCANNED, nr_scanned);
		if (current_is_kswapd())
			__count_vm_events(PGSCAN_KSWAPD, nr_scanned);
		else
@@ -2038,8 +2031,6 @@ static void shrink_active_list(unsigned long nr_to_scan,
	__mod_node_page_state(pgdat, NR_ISOLATED_ANON + file, nr_taken);
	reclaim_stat->recent_scanned[file] += nr_taken;

	if (global_reclaim(sc))
		__mod_node_page_state(pgdat, NR_PAGES_SCANNED, nr_scanned);
	__count_vm_events(PGREFILL, nr_scanned);

	spin_unlock_irq(&pgdat->lru_lock);
+3 −19
Original line number Diff line number Diff line
@@ -954,7 +954,6 @@ const char * const vmstat_text[] = {
	"nr_unevictable",
	"nr_isolated_anon",
	"nr_isolated_file",
	"nr_pages_scanned",
	"workingset_refault",
	"workingset_activate",
	"workingset_nodereclaim",
@@ -1379,7 +1378,6 @@ static void zoneinfo_show_print(struct seq_file *m, pg_data_t *pgdat,
		   "\n        min      %lu"
		   "\n        low      %lu"
		   "\n        high     %lu"
		   "\n   node_scanned  %lu"
		   "\n        spanned  %lu"
		   "\n        present  %lu"
		   "\n        managed  %lu",
@@ -1387,7 +1385,6 @@ static void zoneinfo_show_print(struct seq_file *m, pg_data_t *pgdat,
		   min_wmark_pages(zone),
		   low_wmark_pages(zone),
		   high_wmark_pages(zone),
		   node_page_state(zone->zone_pgdat, NR_PAGES_SCANNED),
		   zone->spanned_pages,
		   zone->present_pages,
		   zone->managed_pages);
@@ -1588,22 +1585,9 @@ int vmstat_refresh(struct ctl_table *table, int write,
	for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++) {
		val = atomic_long_read(&vm_zone_stat[i]);
		if (val < 0) {
			switch (i) {
			case NR_PAGES_SCANNED:
				/*
				 * This is often seen to go negative in
				 * recent kernels, but not to go permanently
				 * negative.  Whilst it would be nicer not to
				 * have exceptions, rooting them out would be
				 * another task, of rather low priority.
				 */
				break;
			default:
			pr_warn("%s: %s %ld\n",
				__func__, vmstat_text[i], val);
			err = -EINVAL;
				break;
			}
		}
	}
	if (err)