Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 11fb9989 authored by Mel Gorman's avatar Mel Gorman Committed by Linus Torvalds
Browse files

mm: move most file-based accounting to the node

There are now a number of accounting oddities such as mapped file pages
being accounted for on the node while the total number of file pages are
accounted on the zone.  This can be coped with to some extent but it's
confusing so this patch moves the relevant file-based accounted.  Due to
throttling logic in the page allocator for reliable OOM detection, it is
still necessary to track dirty and writeback pages on a per-zone basis.

[mgorman@techsingularity.net: fix NR_ZONE_WRITE_PENDING accounting]
  Link: http://lkml.kernel.org/r/1468404004-5085-5-git-send-email-mgorman@techsingularity.net
Link: http://lkml.kernel.org/r/1467970510-21195-20-git-send-email-mgorman@techsingularity.net


Signed-off-by: default avatarMel Gorman <mgorman@techsingularity.net>
Acked-by: default avatarVlastimil Babka <vbabka@suse.cz>
Acked-by: default avatarMichal Hocko <mhocko@suse.com>
Cc: Hillf Danton <hillf.zj@alibaba-inc.com>
Acked-by: default avatarJohannes Weiner <hannes@cmpxchg.org>
Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Cc: Minchan Kim <minchan@kernel.org>
Cc: Rik van Riel <riel@surriel.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 4b9d0fab
Loading
Loading
Loading
Loading
+1 −1
Original line number Diff line number Diff line
@@ -102,7 +102,7 @@ static void appldata_get_mem_data(void *data)
	mem_data->totalhigh = P2K(val.totalhigh);
	mem_data->freehigh  = P2K(val.freehigh);
	mem_data->bufferram = P2K(val.bufferram);
	mem_data->cached    = P2K(global_page_state(NR_FILE_PAGES)
	mem_data->cached    = P2K(global_node_page_state(NR_FILE_PAGES)
				- val.bufferram);

	si_swapinfo(&val);
+4 −4
Original line number Diff line number Diff line
@@ -49,16 +49,16 @@ void show_mem(unsigned int filter)
		global_node_page_state(NR_ACTIVE_FILE)),
	       (global_node_page_state(NR_INACTIVE_ANON) +
		global_node_page_state(NR_INACTIVE_FILE)),
	       global_page_state(NR_FILE_DIRTY),
	       global_page_state(NR_WRITEBACK),
	       global_page_state(NR_UNSTABLE_NFS),
	       global_node_page_state(NR_FILE_DIRTY),
	       global_node_page_state(NR_WRITEBACK),
	       global_node_page_state(NR_UNSTABLE_NFS),
	       global_page_state(NR_FREE_PAGES),
	       (global_page_state(NR_SLAB_RECLAIMABLE) +
		global_page_state(NR_SLAB_UNRECLAIMABLE)),
	       global_node_page_state(NR_FILE_MAPPED),
	       global_page_state(NR_PAGETABLE),
	       global_page_state(NR_BOUNCE),
	       global_page_state(NR_FILE_PAGES),
	       global_node_page_state(NR_FILE_PAGES),
	       get_nr_swap_pages());

	for_each_zone(zone) {
+8 −8
Original line number Diff line number Diff line
@@ -118,28 +118,28 @@ static ssize_t node_read_meminfo(struct device *dev,
		       "Node %d ShmemPmdMapped: %8lu kB\n"
#endif
			,
		       nid, K(sum_zone_node_page_state(nid, NR_FILE_DIRTY)),
		       nid, K(sum_zone_node_page_state(nid, NR_WRITEBACK)),
		       nid, K(sum_zone_node_page_state(nid, NR_FILE_PAGES)),
		       nid, K(node_page_state(pgdat, NR_FILE_DIRTY)),
		       nid, K(node_page_state(pgdat, NR_WRITEBACK)),
		       nid, K(node_page_state(pgdat, NR_FILE_PAGES)),
		       nid, K(node_page_state(pgdat, NR_FILE_MAPPED)),
		       nid, K(node_page_state(pgdat, NR_ANON_MAPPED)),
		       nid, K(i.sharedram),
		       nid, sum_zone_node_page_state(nid, NR_KERNEL_STACK) *
				THREAD_SIZE / 1024,
		       nid, K(sum_zone_node_page_state(nid, NR_PAGETABLE)),
		       nid, K(sum_zone_node_page_state(nid, NR_UNSTABLE_NFS)),
		       nid, K(node_page_state(pgdat, NR_UNSTABLE_NFS)),
		       nid, K(sum_zone_node_page_state(nid, NR_BOUNCE)),
		       nid, K(sum_zone_node_page_state(nid, NR_WRITEBACK_TEMP)),
		       nid, K(node_page_state(pgdat, NR_WRITEBACK_TEMP)),
		       nid, K(sum_zone_node_page_state(nid, NR_SLAB_RECLAIMABLE) +
				sum_zone_node_page_state(nid, NR_SLAB_UNRECLAIMABLE)),
		       nid, K(sum_zone_node_page_state(nid, NR_SLAB_RECLAIMABLE)),
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
		       nid, K(sum_zone_node_page_state(nid, NR_SLAB_UNRECLAIMABLE)),
		       nid, K(sum_zone_node_page_state(nid, NR_ANON_THPS) *
		       nid, K(node_page_state(pgdat, NR_ANON_THPS) *
				       HPAGE_PMD_NR),
		       nid, K(sum_zone_node_page_state(nid, NR_SHMEM_THPS) *
		       nid, K(node_page_state(pgdat, NR_SHMEM_THPS) *
				       HPAGE_PMD_NR),
		       nid, K(sum_zone_node_page_state(nid, NR_SHMEM_PMDMAPPED) *
		       nid, K(node_page_state(pgdat, NR_SHMEM_PMDMAPPED) *
				       HPAGE_PMD_NR));
#else
		       nid, K(sum_zone_node_page_state(nid, NR_SLAB_UNRECLAIMABLE)));
+2 −2
Original line number Diff line number Diff line
@@ -91,8 +91,8 @@ static unsigned long lowmem_scan(struct shrinker *s, struct shrink_control *sc)
	short selected_oom_score_adj;
	int array_size = ARRAY_SIZE(lowmem_adj);
	int other_free = global_page_state(NR_FREE_PAGES) - totalreserve_pages;
	int other_file = global_page_state(NR_FILE_PAGES) -
						global_page_state(NR_SHMEM) -
	int other_file = global_node_page_state(NR_FILE_PAGES) -
						global_node_page_state(NR_SHMEM) -
						total_swapcache_pages();

	if (lowmem_adj_size < array_size)
+4 −2
Original line number Diff line number Diff line
@@ -1864,7 +1864,8 @@ void osc_dec_unstable_pages(struct ptlrpc_request *req)
	LASSERT(page_count >= 0);

	for (i = 0; i < page_count; i++)
		dec_zone_page_state(desc->bd_iov[i].kiov_page, NR_UNSTABLE_NFS);
		dec_node_page_state(desc->bd_iov[i].kiov_page,
							NR_UNSTABLE_NFS);

	atomic_sub(page_count, &cli->cl_cache->ccc_unstable_nr);
	LASSERT(atomic_read(&cli->cl_cache->ccc_unstable_nr) >= 0);
@@ -1898,7 +1899,8 @@ void osc_inc_unstable_pages(struct ptlrpc_request *req)
	LASSERT(page_count >= 0);

	for (i = 0; i < page_count; i++)
		inc_zone_page_state(desc->bd_iov[i].kiov_page, NR_UNSTABLE_NFS);
		inc_node_page_state(desc->bd_iov[i].kiov_page,
							NR_UNSTABLE_NFS);

	LASSERT(atomic_read(&cli->cl_cache->ccc_unstable_nr) >= 0);
	atomic_add(page_count, &cli->cl_cache->ccc_unstable_nr);
Loading