Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 9a4caf1e authored by Johannes Weiner's avatar Johannes Weiner Committed by Linus Torvalds
Browse files

mm: memcontrol: provide shmem statistics

Cgroups currently don't report how much shmem they use, which can be
useful data to have, in particular since shmem is included in the
cache/file item while being reclaimed like anonymous memory.

Add a counter to track shmem pages during charging and uncharging.

Link: http://lkml.kernel.org/r/20170221164343.32252-1-hannes@cmpxchg.org


Signed-off-by: default avatarJohannes Weiner <hannes@cmpxchg.org>
Reported-by: default avatarChris Down <cdown@fb.com>
Cc: Michal Hocko <mhocko@suse.cz>
Cc: Vladimir Davydov <vdavydov.dev@gmail.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent cf8496ea
Loading
Loading
Loading
Loading
+5 −0
Original line number Diff line number Diff line
@@ -871,6 +871,11 @@ PAGE_SIZE multiple when read back.

		Amount of memory used in network transmission buffers

	  shmem

		Amount of cached filesystem data that is swap-backed,
		such as tmpfs, shm segments, shared anonymous mmap()s

	  file_mapped

		Amount of cached filesystem data mapped with mmap()
+1 −0
Original line number Diff line number Diff line
@@ -46,6 +46,7 @@ enum mem_cgroup_stat_index {
	MEM_CGROUP_STAT_CACHE,		/* # of pages charged as cache */
	MEM_CGROUP_STAT_RSS,		/* # of pages charged as anon rss */
	MEM_CGROUP_STAT_RSS_HUGE,	/* # of pages charged as anon huge */
	MEM_CGROUP_STAT_SHMEM,		/* # of pages charged as shmem */
	MEM_CGROUP_STAT_FILE_MAPPED,	/* # of pages charged as file rss */
	MEM_CGROUP_STAT_DIRTY,          /* # of dirty pages in page cache */
	MEM_CGROUP_STAT_WRITEBACK,	/* # of pages under writeback */
+20 −8
Original line number Diff line number Diff line
@@ -104,6 +104,7 @@ static const char * const mem_cgroup_stat_names[] = {
	"cache",
	"rss",
	"rss_huge",
	"shmem",
	"mapped_file",
	"dirty",
	"writeback",
@@ -608,9 +609,13 @@ static void mem_cgroup_charge_statistics(struct mem_cgroup *memcg,
	if (PageAnon(page))
		__this_cpu_add(memcg->stat->count[MEM_CGROUP_STAT_RSS],
				nr_pages);
	else
	else {
		__this_cpu_add(memcg->stat->count[MEM_CGROUP_STAT_CACHE],
				nr_pages);
		if (PageSwapBacked(page))
			__this_cpu_add(memcg->stat->count[MEM_CGROUP_STAT_SHMEM],
				       nr_pages);
	}

	if (compound) {
		VM_BUG_ON_PAGE(!PageTransHuge(page), page);
@@ -5208,6 +5213,8 @@ static int memory_stat_show(struct seq_file *m, void *v)
	seq_printf(m, "sock %llu\n",
		   (u64)stat[MEMCG_SOCK] * PAGE_SIZE);

	seq_printf(m, "shmem %llu\n",
		   (u64)stat[MEM_CGROUP_STAT_SHMEM] * PAGE_SIZE);
	seq_printf(m, "file_mapped %llu\n",
		   (u64)stat[MEM_CGROUP_STAT_FILE_MAPPED] * PAGE_SIZE);
	seq_printf(m, "file_dirty %llu\n",
@@ -5476,8 +5483,8 @@ void mem_cgroup_cancel_charge(struct page *page, struct mem_cgroup *memcg,

static void uncharge_batch(struct mem_cgroup *memcg, unsigned long pgpgout,
			   unsigned long nr_anon, unsigned long nr_file,
			   unsigned long nr_huge, unsigned long nr_kmem,
			   struct page *dummy_page)
			   unsigned long nr_kmem, unsigned long nr_huge,
			   unsigned long nr_shmem, struct page *dummy_page)
{
	unsigned long nr_pages = nr_anon + nr_file + nr_kmem;
	unsigned long flags;
@@ -5495,6 +5502,7 @@ static void uncharge_batch(struct mem_cgroup *memcg, unsigned long pgpgout,
	__this_cpu_sub(memcg->stat->count[MEM_CGROUP_STAT_RSS], nr_anon);
	__this_cpu_sub(memcg->stat->count[MEM_CGROUP_STAT_CACHE], nr_file);
	__this_cpu_sub(memcg->stat->count[MEM_CGROUP_STAT_RSS_HUGE], nr_huge);
	__this_cpu_sub(memcg->stat->count[MEM_CGROUP_STAT_SHMEM], nr_shmem);
	__this_cpu_add(memcg->stat->events[MEM_CGROUP_EVENTS_PGPGOUT], pgpgout);
	__this_cpu_add(memcg->stat->nr_page_events, nr_pages);
	memcg_check_events(memcg, dummy_page);
@@ -5507,6 +5515,7 @@ static void uncharge_batch(struct mem_cgroup *memcg, unsigned long pgpgout,
static void uncharge_list(struct list_head *page_list)
{
	struct mem_cgroup *memcg = NULL;
	unsigned long nr_shmem = 0;
	unsigned long nr_anon = 0;
	unsigned long nr_file = 0;
	unsigned long nr_huge = 0;
@@ -5539,9 +5548,9 @@ static void uncharge_list(struct list_head *page_list)
		if (memcg != page->mem_cgroup) {
			if (memcg) {
				uncharge_batch(memcg, pgpgout, nr_anon, nr_file,
					       nr_huge, nr_kmem, page);
				pgpgout = nr_anon = nr_file =
					nr_huge = nr_kmem = 0;
					       nr_kmem, nr_huge, nr_shmem, page);
				pgpgout = nr_anon = nr_file = nr_kmem = 0;
				nr_huge = nr_shmem = 0;
			}
			memcg = page->mem_cgroup;
		}
@@ -5555,8 +5564,11 @@ static void uncharge_list(struct list_head *page_list)
			}
			if (PageAnon(page))
				nr_anon += nr_pages;
			else
			else {
				nr_file += nr_pages;
				if (PageSwapBacked(page))
					nr_shmem += nr_pages;
			}
			pgpgout++;
		} else {
			nr_kmem += 1 << compound_order(page);
@@ -5568,7 +5580,7 @@ static void uncharge_list(struct list_head *page_list)

	if (memcg)
		uncharge_batch(memcg, pgpgout, nr_anon, nr_file,
			       nr_huge, nr_kmem, page);
			       nr_kmem, nr_huge, nr_shmem, page);
}

/**