Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit efdc9490 authored by Andy Lutomirski's avatar Andy Lutomirski Committed by Linus Torvalds
Browse files

mm: fix memcg stack accounting for sub-page stacks

We should account for stacks regardless of stack size, and we need to
account in sub-page units if THREAD_SIZE < PAGE_SIZE.  Change the units
to kilobytes and Move it into account_kernel_stack().

Fixes: 12580e4b ("mm: memcontrol: report kernel stack usage in cgroup2 memory.stat")
Link: http://lkml.kernel.org/r/9b5314e3ee5eda61b0317ec1563768602c1ef438.1468523549.git.luto@kernel.org


Signed-off-by: default avatarAndy Lutomirski <luto@kernel.org>
Cc: Vladimir Davydov <vdavydov@virtuozzo.com>
Acked-by: default avatarJohannes Weiner <hannes@cmpxchg.org>
Cc: Michal Hocko <mhocko@kernel.org>
Reviewed-by: default avatarJosh Poimboeuf <jpoimboe@redhat.com>
Reviewed-by: default avatarVladimir Davydov <vdavydov@virtuozzo.com>
Acked-by: default avatarMichal Hocko <mhocko@suse.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent d30dd8be
Loading
Loading
Loading
Loading
+1 −1
Original line number Diff line number Diff line
@@ -52,7 +52,7 @@ enum mem_cgroup_stat_index {
	MEM_CGROUP_STAT_SWAP,		/* # of pages, swapped out */
	MEM_CGROUP_STAT_NSTATS,
	/* default hierarchy stats */
	MEMCG_KERNEL_STACK = MEM_CGROUP_STAT_NSTATS,
	MEMCG_KERNEL_STACK_KB = MEM_CGROUP_STAT_NSTATS,
	MEMCG_SLAB_RECLAIMABLE,
	MEMCG_SLAB_UNRECLAIMABLE,
	MEMCG_SOCK,
+8 −11
Original line number Diff line number Diff line
@@ -165,20 +165,12 @@ static unsigned long *alloc_thread_stack_node(struct task_struct *tsk,
	struct page *page = alloc_pages_node(node, THREADINFO_GFP,
					     THREAD_SIZE_ORDER);

	if (page)
		memcg_kmem_update_page_stat(page, MEMCG_KERNEL_STACK,
					    1 << THREAD_SIZE_ORDER);

	return page ? page_address(page) : NULL;
}

static inline void free_thread_stack(unsigned long *stack)
{
	struct page *page = virt_to_page(stack);

	memcg_kmem_update_page_stat(page, MEMCG_KERNEL_STACK,
				    -(1 << THREAD_SIZE_ORDER));
	__free_pages(page, THREAD_SIZE_ORDER);
	__free_pages(virt_to_page(stack), THREAD_SIZE_ORDER);
}
# else
static struct kmem_cache *thread_stack_cache;
@@ -223,10 +215,15 @@ static struct kmem_cache *mm_cachep;

static void account_kernel_stack(unsigned long *stack, int account)
{
	struct zone *zone = page_zone(virt_to_page(stack));
	/* All stack pages are in the same zone and belong to the same memcg. */
	struct page *first_page = virt_to_page(stack);

	mod_zone_page_state(zone, NR_KERNEL_STACK_KB,
	mod_zone_page_state(page_zone(first_page), NR_KERNEL_STACK_KB,
			    THREAD_SIZE / 1024 * account);

	memcg_kmem_update_page_stat(
		first_page, MEMCG_KERNEL_STACK_KB,
		account * (THREAD_SIZE / 1024));
}

void free_task(struct task_struct *tsk)
+1 −1
Original line number Diff line number Diff line
@@ -5171,7 +5171,7 @@ static int memory_stat_show(struct seq_file *m, void *v)
	seq_printf(m, "file %llu\n",
		   (u64)stat[MEM_CGROUP_STAT_CACHE] * PAGE_SIZE);
	seq_printf(m, "kernel_stack %llu\n",
		   (u64)stat[MEMCG_KERNEL_STACK] * PAGE_SIZE);
		   (u64)stat[MEMCG_KERNEL_STACK_KB] * 1024);
	seq_printf(m, "slab %llu\n",
		   (u64)(stat[MEMCG_SLAB_RECLAIMABLE] +
			 stat[MEMCG_SLAB_UNRECLAIMABLE]) * PAGE_SIZE);