Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 29833315 authored by Johannes Weiner's avatar Johannes Weiner Committed by Linus Torvalds
Browse files

mm: memcontrol: remove unnecessary PCG_USED pc->mem_cgroup valid flag



pc->mem_cgroup had to be left intact after uncharge for the final LRU
removal, and !PCG_USED indicated whether the page was uncharged.  But
since commit 0a31bc97 ("mm: memcontrol: rewrite uncharge API") pages
are uncharged after the final LRU removal.  Uncharge can simply clear
the pointer and the PCG_USED/PageCgroupUsed sites can test that instead.

Because this is the last page_cgroup flag, this patch reduces the memcg
per-page overhead to a single pointer.

[akpm@linux-foundation.org: remove unneeded initialization of `memcg', per Michal]
Signed-off-by: default avatarJohannes Weiner <hannes@cmpxchg.org>
Cc: Hugh Dickins <hughd@google.com>
Acked-by: default avatarMichal Hocko <mhocko@suse.cz>
Reviewed-by: default avatarVladimir Davydov <vdavydov@parallels.com>
Acked-by: default avatarKAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent f4aaa8b4
Loading
Loading
Loading
Loading
+0 −10
Original line number Original line Diff line number Diff line
#ifndef __LINUX_PAGE_CGROUP_H
#ifndef __LINUX_PAGE_CGROUP_H
#define __LINUX_PAGE_CGROUP_H
#define __LINUX_PAGE_CGROUP_H


enum {
	/* flags for mem_cgroup */
	PCG_USED = 0x01,	/* This page is charged to a memcg */
};

struct pglist_data;
struct pglist_data;


#ifdef CONFIG_MEMCG
#ifdef CONFIG_MEMCG
@@ -19,7 +14,6 @@ struct mem_cgroup;
 * then the page cgroup for pfn always exists.
 * then the page cgroup for pfn always exists.
 */
 */
struct page_cgroup {
struct page_cgroup {
	unsigned long flags;
	struct mem_cgroup *mem_cgroup;
	struct mem_cgroup *mem_cgroup;
};
};


@@ -39,10 +33,6 @@ static inline void page_cgroup_init(void)


struct page_cgroup *lookup_page_cgroup(struct page *page);
struct page_cgroup *lookup_page_cgroup(struct page *page);


static inline int PageCgroupUsed(struct page_cgroup *pc)
{
	return !!(pc->flags & PCG_USED);
}
#else /* !CONFIG_MEMCG */
#else /* !CONFIG_MEMCG */
struct page_cgroup;
struct page_cgroup;


+41 −66
Original line number Original line Diff line number Diff line
@@ -1284,14 +1284,12 @@ struct lruvec *mem_cgroup_page_lruvec(struct page *page, struct zone *zone)


	pc = lookup_page_cgroup(page);
	pc = lookup_page_cgroup(page);
	memcg = pc->mem_cgroup;
	memcg = pc->mem_cgroup;

	/*
	/*
	 * Swapcache readahead pages are added to the LRU - and
	 * Swapcache readahead pages are added to the LRU - and
	 * possibly migrated - before they are charged.  Ensure
	 * possibly migrated - before they are charged.
	 * pc->mem_cgroup is sane.
	 */
	 */
	if (!PageLRU(page) && !PageCgroupUsed(pc) && memcg != root_mem_cgroup)
	if (!memcg)
		pc->mem_cgroup = memcg = root_mem_cgroup;
		memcg = root_mem_cgroup;


	mz = mem_cgroup_page_zoneinfo(memcg, page);
	mz = mem_cgroup_page_zoneinfo(memcg, page);
	lruvec = &mz->lruvec;
	lruvec = &mz->lruvec;
@@ -2151,7 +2149,7 @@ struct mem_cgroup *mem_cgroup_begin_page_stat(struct page *page,
	pc = lookup_page_cgroup(page);
	pc = lookup_page_cgroup(page);
again:
again:
	memcg = pc->mem_cgroup;
	memcg = pc->mem_cgroup;
	if (unlikely(!memcg || !PageCgroupUsed(pc)))
	if (unlikely(!memcg))
		return NULL;
		return NULL;


	*locked = false;
	*locked = false;
@@ -2159,7 +2157,7 @@ struct mem_cgroup *mem_cgroup_begin_page_stat(struct page *page,
		return memcg;
		return memcg;


	move_lock_mem_cgroup(memcg, flags);
	move_lock_mem_cgroup(memcg, flags);
	if (memcg != pc->mem_cgroup || !PageCgroupUsed(pc)) {
	if (memcg != pc->mem_cgroup) {
		move_unlock_mem_cgroup(memcg, flags);
		move_unlock_mem_cgroup(memcg, flags);
		goto again;
		goto again;
	}
	}
@@ -2525,7 +2523,7 @@ static struct mem_cgroup *mem_cgroup_lookup(unsigned short id)
 */
 */
struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page)
struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page)
{
{
	struct mem_cgroup *memcg = NULL;
	struct mem_cgroup *memcg;
	struct page_cgroup *pc;
	struct page_cgroup *pc;
	unsigned short id;
	unsigned short id;
	swp_entry_t ent;
	swp_entry_t ent;
@@ -2533,9 +2531,10 @@ struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page)
	VM_BUG_ON_PAGE(!PageLocked(page), page);
	VM_BUG_ON_PAGE(!PageLocked(page), page);


	pc = lookup_page_cgroup(page);
	pc = lookup_page_cgroup(page);
	if (PageCgroupUsed(pc)) {
	memcg = pc->mem_cgroup;
	memcg = pc->mem_cgroup;
		if (memcg && !css_tryget_online(&memcg->css))

	if (memcg) {
		if (!css_tryget_online(&memcg->css))
			memcg = NULL;
			memcg = NULL;
	} else if (PageSwapCache(page)) {
	} else if (PageSwapCache(page)) {
		ent.val = page_private(page);
		ent.val = page_private(page);
@@ -2586,7 +2585,7 @@ static void commit_charge(struct page *page, struct mem_cgroup *memcg,
	struct page_cgroup *pc = lookup_page_cgroup(page);
	struct page_cgroup *pc = lookup_page_cgroup(page);
	int isolated;
	int isolated;


	VM_BUG_ON_PAGE(PageCgroupUsed(pc), page);
	VM_BUG_ON_PAGE(pc->mem_cgroup, page);
	/*
	/*
	 * we don't need page_cgroup_lock about tail pages, becase they are not
	 * we don't need page_cgroup_lock about tail pages, becase they are not
	 * accessed by any other context at this point.
	 * accessed by any other context at this point.
@@ -2601,7 +2600,7 @@ static void commit_charge(struct page *page, struct mem_cgroup *memcg,


	/*
	/*
	 * Nobody should be changing or seriously looking at
	 * Nobody should be changing or seriously looking at
	 * pc->mem_cgroup and pc->flags at this point:
	 * pc->mem_cgroup at this point:
	 *
	 *
	 * - the page is uncharged
	 * - the page is uncharged
	 *
	 *
@@ -2614,7 +2613,6 @@ static void commit_charge(struct page *page, struct mem_cgroup *memcg,
	 *   have the page locked
	 *   have the page locked
	 */
	 */
	pc->mem_cgroup = memcg;
	pc->mem_cgroup = memcg;
	pc->flags = PCG_USED;


	if (lrucare)
	if (lrucare)
		unlock_page_lru(page, isolated);
		unlock_page_lru(page, isolated);
@@ -3126,37 +3124,22 @@ void __memcg_kmem_commit_charge(struct page *page, struct mem_cgroup *memcg,
		memcg_uncharge_kmem(memcg, 1 << order);
		memcg_uncharge_kmem(memcg, 1 << order);
		return;
		return;
	}
	}
	/*
	 * The page is freshly allocated and not visible to any
	 * outside callers yet.  Set up pc non-atomically.
	 */
	pc = lookup_page_cgroup(page);
	pc = lookup_page_cgroup(page);
	pc->mem_cgroup = memcg;
	pc->mem_cgroup = memcg;
	pc->flags = PCG_USED;
}
}


void __memcg_kmem_uncharge_pages(struct page *page, int order)
void __memcg_kmem_uncharge_pages(struct page *page, int order)
{
{
	struct mem_cgroup *memcg = NULL;
	struct page_cgroup *pc = lookup_page_cgroup(page);
	struct page_cgroup *pc;
	struct mem_cgroup *memcg = pc->mem_cgroup;


	pc = lookup_page_cgroup(page);
	if (!PageCgroupUsed(pc))
		return;

	memcg = pc->mem_cgroup;
	pc->flags = 0;


	/*
	 * We trust that only if there is a memcg associated with the page, it
	 * is a valid allocation
	 */
	if (!memcg)
	if (!memcg)
		return;
		return;


	VM_BUG_ON_PAGE(mem_cgroup_is_root(memcg), page);
	VM_BUG_ON_PAGE(mem_cgroup_is_root(memcg), page);

	memcg_uncharge_kmem(memcg, 1 << order);
	memcg_uncharge_kmem(memcg, 1 << order);
	pc->mem_cgroup = NULL;
}
}
#else
#else
static inline void memcg_unregister_all_caches(struct mem_cgroup *memcg)
static inline void memcg_unregister_all_caches(struct mem_cgroup *memcg)
@@ -3174,23 +3157,16 @@ static inline void memcg_unregister_all_caches(struct mem_cgroup *memcg)
 */
 */
void mem_cgroup_split_huge_fixup(struct page *head)
void mem_cgroup_split_huge_fixup(struct page *head)
{
{
	struct page_cgroup *head_pc;
	struct page_cgroup *pc = lookup_page_cgroup(head);
	struct page_cgroup *pc;
	struct mem_cgroup *memcg;
	int i;
	int i;


	if (mem_cgroup_disabled())
	if (mem_cgroup_disabled())
		return;
		return;


	head_pc = lookup_page_cgroup(head);
	for (i = 1; i < HPAGE_PMD_NR; i++)
		pc[i].mem_cgroup = pc[0].mem_cgroup;


	memcg = head_pc->mem_cgroup;
	__this_cpu_sub(pc[0].mem_cgroup->stat->count[MEM_CGROUP_STAT_RSS_HUGE],
	for (i = 1; i < HPAGE_PMD_NR; i++) {
		pc = head_pc + i;
		pc->mem_cgroup = memcg;
		pc->flags = head_pc->flags;
	}
	__this_cpu_sub(memcg->stat->count[MEM_CGROUP_STAT_RSS_HUGE],
		       HPAGE_PMD_NR);
		       HPAGE_PMD_NR);
}
}
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
@@ -3240,7 +3216,7 @@ static int mem_cgroup_move_account(struct page *page,
		goto out;
		goto out;


	ret = -EINVAL;
	ret = -EINVAL;
	if (!PageCgroupUsed(pc) || pc->mem_cgroup != from)
	if (pc->mem_cgroup != from)
		goto out_unlock;
		goto out_unlock;


	move_lock_mem_cgroup(from, &flags);
	move_lock_mem_cgroup(from, &flags);
@@ -3350,7 +3326,7 @@ static struct page_cgroup *lookup_page_cgroup_used(struct page *page)
	 * the first time, i.e. during boot or memory hotplug;
	 * the first time, i.e. during boot or memory hotplug;
	 * or when mem_cgroup_disabled().
	 * or when mem_cgroup_disabled().
	 */
	 */
	if (likely(pc) && PageCgroupUsed(pc))
	if (likely(pc) && pc->mem_cgroup)
		return pc;
		return pc;
	return NULL;
	return NULL;
}
}
@@ -3368,10 +3344,8 @@ void mem_cgroup_print_bad_page(struct page *page)
	struct page_cgroup *pc;
	struct page_cgroup *pc;


	pc = lookup_page_cgroup_used(page);
	pc = lookup_page_cgroup_used(page);
	if (pc) {
	if (pc)
		pr_alert("pc:%p pc->flags:%lx pc->mem_cgroup:%p\n",
		pr_alert("pc:%p pc->mem_cgroup:%p\n", pc, pc->mem_cgroup);
			 pc, pc->flags, pc->mem_cgroup);
	}
}
}
#endif
#endif


@@ -5308,7 +5282,7 @@ static enum mc_target_type get_mctgt_type(struct vm_area_struct *vma,
		 * mem_cgroup_move_account() checks the pc is valid or
		 * mem_cgroup_move_account() checks the pc is valid or
		 * not under LRU exclusion.
		 * not under LRU exclusion.
		 */
		 */
		if (PageCgroupUsed(pc) && pc->mem_cgroup == mc.from) {
		if (pc->mem_cgroup == mc.from) {
			ret = MC_TARGET_PAGE;
			ret = MC_TARGET_PAGE;
			if (target)
			if (target)
				target->page = page;
				target->page = page;
@@ -5344,7 +5318,7 @@ static enum mc_target_type get_mctgt_type_thp(struct vm_area_struct *vma,
	if (!move_anon())
	if (!move_anon())
		return ret;
		return ret;
	pc = lookup_page_cgroup(page);
	pc = lookup_page_cgroup(page);
	if (PageCgroupUsed(pc) && pc->mem_cgroup == mc.from) {
	if (pc->mem_cgroup == mc.from) {
		ret = MC_TARGET_PAGE;
		ret = MC_TARGET_PAGE;
		if (target) {
		if (target) {
			get_page(page);
			get_page(page);
@@ -5788,18 +5762,17 @@ void mem_cgroup_swapout(struct page *page, swp_entry_t entry)
		return;
		return;


	pc = lookup_page_cgroup(page);
	pc = lookup_page_cgroup(page);
	memcg = pc->mem_cgroup;


	/* Readahead page, never charged */
	/* Readahead page, never charged */
	if (!PageCgroupUsed(pc))
	if (!memcg)
		return;
		return;


	memcg = pc->mem_cgroup;

	oldid = swap_cgroup_record(entry, mem_cgroup_id(memcg));
	oldid = swap_cgroup_record(entry, mem_cgroup_id(memcg));
	VM_BUG_ON_PAGE(oldid, page);
	VM_BUG_ON_PAGE(oldid, page);
	mem_cgroup_swap_statistics(memcg, true);
	mem_cgroup_swap_statistics(memcg, true);


	pc->flags = 0;
	pc->mem_cgroup = NULL;


	if (!mem_cgroup_is_root(memcg))
	if (!mem_cgroup_is_root(memcg))
		page_counter_uncharge(&memcg->memory, 1);
		page_counter_uncharge(&memcg->memory, 1);
@@ -5874,7 +5847,7 @@ int mem_cgroup_try_charge(struct page *page, struct mm_struct *mm,
		 * the page lock, which serializes swap cache removal, which
		 * the page lock, which serializes swap cache removal, which
		 * in turn serializes uncharging.
		 * in turn serializes uncharging.
		 */
		 */
		if (PageCgroupUsed(pc))
		if (pc->mem_cgroup)
			goto out;
			goto out;
	}
	}


@@ -6036,13 +6009,13 @@ static void uncharge_list(struct list_head *page_list)
		VM_BUG_ON_PAGE(page_count(page), page);
		VM_BUG_ON_PAGE(page_count(page), page);


		pc = lookup_page_cgroup(page);
		pc = lookup_page_cgroup(page);
		if (!PageCgroupUsed(pc))
		if (!pc->mem_cgroup)
			continue;
			continue;


		/*
		/*
		 * Nobody should be changing or seriously looking at
		 * Nobody should be changing or seriously looking at
		 * pc->mem_cgroup and pc->flags at this point, we have
		 * pc->mem_cgroup at this point, we have fully
		 * fully exclusive access to the page.
		 * exclusive access to the page.
		 */
		 */


		if (memcg != pc->mem_cgroup) {
		if (memcg != pc->mem_cgroup) {
@@ -6065,7 +6038,7 @@ static void uncharge_list(struct list_head *page_list)
		else
		else
			nr_file += nr_pages;
			nr_file += nr_pages;


		pc->flags = 0;
		pc->mem_cgroup = NULL;


		pgpgout++;
		pgpgout++;
	} while (next != page_list);
	} while (next != page_list);
@@ -6091,7 +6064,7 @@ void mem_cgroup_uncharge(struct page *page)


	/* Don't touch page->lru of any random page, pre-check: */
	/* Don't touch page->lru of any random page, pre-check: */
	pc = lookup_page_cgroup(page);
	pc = lookup_page_cgroup(page);
	if (!PageCgroupUsed(pc))
	if (!pc->mem_cgroup)
		return;
		return;


	INIT_LIST_HEAD(&page->lru);
	INIT_LIST_HEAD(&page->lru);
@@ -6127,6 +6100,7 @@ void mem_cgroup_uncharge_list(struct list_head *page_list)
void mem_cgroup_migrate(struct page *oldpage, struct page *newpage,
void mem_cgroup_migrate(struct page *oldpage, struct page *newpage,
			bool lrucare)
			bool lrucare)
{
{
	struct mem_cgroup *memcg;
	struct page_cgroup *pc;
	struct page_cgroup *pc;
	int isolated;
	int isolated;


@@ -6143,7 +6117,7 @@ void mem_cgroup_migrate(struct page *oldpage, struct page *newpage,


	/* Page cache replacement: new page already charged? */
	/* Page cache replacement: new page already charged? */
	pc = lookup_page_cgroup(newpage);
	pc = lookup_page_cgroup(newpage);
	if (PageCgroupUsed(pc))
	if (pc->mem_cgroup)
		return;
		return;


	/*
	/*
@@ -6153,18 +6127,19 @@ void mem_cgroup_migrate(struct page *oldpage, struct page *newpage,
	 * reclaim just put back on the LRU but has not released yet.
	 * reclaim just put back on the LRU but has not released yet.
	 */
	 */
	pc = lookup_page_cgroup(oldpage);
	pc = lookup_page_cgroup(oldpage);
	if (!PageCgroupUsed(pc))
	memcg = pc->mem_cgroup;
	if (!memcg)
		return;
		return;


	if (lrucare)
	if (lrucare)
		lock_page_lru(oldpage, &isolated);
		lock_page_lru(oldpage, &isolated);


	pc->flags = 0;
	pc->mem_cgroup = NULL;


	if (lrucare)
	if (lrucare)
		unlock_page_lru(oldpage, isolated);
		unlock_page_lru(oldpage, isolated);


	commit_charge(newpage, pc->mem_cgroup, lrucare);
	commit_charge(newpage, memcg, lrucare);
}
}


/*
/*