Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit eb4866d0 authored by Dave Hansen's avatar Dave Hansen Committed by Linus Torvalds
Browse files

make /proc/$pid/numa_maps gather_stats() take variable page size



We need to teach the numa_maps code about transparent huge pages.  The
first step is to teach gather_stats() that the pte it is dealing with
might represent more than one page.

Note that will we use this in a moment for transparent huge pages since
they have use a single pmd_t which _acts_ as a "surrogate" for a bunch
of smaller pte_t's.

I'm a _bit_ unhappy that this interface counts in hugetlbfs page sizes
for hugetlbfs pages and PAGE_SIZE for normal pages.  That means that to
figure out how many _bytes_ "dirty=1" means, you must first know the
hugetlbfs page size.  That's easier said than done especially if you
don't have visibility in to the mount.

But, that's probably a discussion for another day especially since it
would change behavior to fix it.  But, just in case anyone wonders why
this patch only passes a '1' in the hugetlb case...

Signed-off-by: default avatarDave Hansen <dave@linux.vnet.ibm.com>
Acked-by: default avatarHugh Dickins <hughd@google.com>
Acked-by: default avatarDavid Rientjes <rientjes@google.com>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 38867a28
Loading
Loading
Loading
Loading
+11 −10
Original line number Original line Diff line number Diff line
@@ -877,30 +877,31 @@ struct numa_maps_private {
	struct numa_maps md;
	struct numa_maps md;
};
};


static void gather_stats(struct page *page, struct numa_maps *md, int pte_dirty)
static void gather_stats(struct page *page, struct numa_maps *md, int pte_dirty,
			unsigned long nr_pages)
{
{
	int count = page_mapcount(page);
	int count = page_mapcount(page);


	md->pages++;
	md->pages += nr_pages;
	if (pte_dirty || PageDirty(page))
	if (pte_dirty || PageDirty(page))
		md->dirty++;
		md->dirty += nr_pages;


	if (PageSwapCache(page))
	if (PageSwapCache(page))
		md->swapcache++;
		md->swapcache += nr_pages;


	if (PageActive(page) || PageUnevictable(page))
	if (PageActive(page) || PageUnevictable(page))
		md->active++;
		md->active += nr_pages;


	if (PageWriteback(page))
	if (PageWriteback(page))
		md->writeback++;
		md->writeback += nr_pages;


	if (PageAnon(page))
	if (PageAnon(page))
		md->anon++;
		md->anon += nr_pages;


	if (count > md->mapcount_max)
	if (count > md->mapcount_max)
		md->mapcount_max = count;
		md->mapcount_max = count;


	md->node[page_to_nid(page)]++;
	md->node[page_to_nid(page)] += nr_pages;
}
}


static int gather_pte_stats(pmd_t *pmd, unsigned long addr,
static int gather_pte_stats(pmd_t *pmd, unsigned long addr,
@@ -931,7 +932,7 @@ static int gather_pte_stats(pmd_t *pmd, unsigned long addr,
		if (!node_isset(nid, node_states[N_HIGH_MEMORY]))
		if (!node_isset(nid, node_states[N_HIGH_MEMORY]))
			continue;
			continue;


		gather_stats(page, md, pte_dirty(*pte));
		gather_stats(page, md, pte_dirty(*pte), 1);


	} while (pte++, addr += PAGE_SIZE, addr != end);
	} while (pte++, addr += PAGE_SIZE, addr != end);
	pte_unmap_unlock(orig_pte, ptl);
	pte_unmap_unlock(orig_pte, ptl);
@@ -952,7 +953,7 @@ static int gather_hugetbl_stats(pte_t *pte, unsigned long hmask,
		return 0;
		return 0;


	md = walk->private;
	md = walk->private;
	gather_stats(page, md, pte_dirty(*pte));
	gather_stats(page, md, pte_dirty(*pte), 1);
	return 0;
	return 0;
}
}