Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit ce587e65 authored by Johannes Weiner's avatar Johannes Weiner Committed by Linus Torvalds
Browse files

mm: memcg: move pc lookup point to commit_charge()



None of the callsites actually need the page_cgroup descriptor
themselves, so just pass the page and do the look up in there.

We already had two bugs (6568d4a9 'mm: memcg: update the correct soft
limit tree during migration' and 'memcg: fix Bad page state after
replace_page_cache') where the passed page and pc were not referring
to the same page frame.

Signed-off-by: default avatarJohannes Weiner <hannes@cmpxchg.org>
Acked-by: default avatarHugh Dickins <hughd@google.com>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 4e1c2b28
Loading
Loading
Loading
Loading
+5 −12
Original line number Diff line number Diff line
@@ -2476,10 +2476,10 @@ struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page)
static void __mem_cgroup_commit_charge(struct mem_cgroup *memcg,
				       struct page *page,
				       unsigned int nr_pages,
				       struct page_cgroup *pc,
				       enum charge_type ctype,
				       bool lrucare)
{
	struct page_cgroup *pc = lookup_page_cgroup(page);
	struct zone *uninitialized_var(zone);
	bool was_on_lru = false;
	bool anon;
@@ -2716,7 +2716,6 @@ static int mem_cgroup_charge_common(struct page *page, struct mm_struct *mm,
{
	struct mem_cgroup *memcg = NULL;
	unsigned int nr_pages = 1;
	struct page_cgroup *pc;
	bool oom = true;
	int ret;

@@ -2730,11 +2729,10 @@ static int mem_cgroup_charge_common(struct page *page, struct mm_struct *mm,
		oom = false;
	}

	pc = lookup_page_cgroup(page);
	ret = __mem_cgroup_try_charge(mm, gfp_mask, nr_pages, &memcg, oom);
	if (ret == -ENOMEM)
		return ret;
	__mem_cgroup_commit_charge(memcg, page, nr_pages, pc, ctype, false);
	__mem_cgroup_commit_charge(memcg, page, nr_pages, ctype, false);
	return 0;
}

@@ -2831,16 +2829,13 @@ static void
__mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *memcg,
					enum charge_type ctype)
{
	struct page_cgroup *pc;

	if (mem_cgroup_disabled())
		return;
	if (!memcg)
		return;
	cgroup_exclude_rmdir(&memcg->css);

	pc = lookup_page_cgroup(page);
	__mem_cgroup_commit_charge(memcg, page, 1, pc, ctype, true);
	__mem_cgroup_commit_charge(memcg, page, 1, ctype, true);
	/*
	 * Now swap is on-memory. This means this page may be
	 * counted both as mem and swap....double count.
@@ -3298,14 +3293,13 @@ int mem_cgroup_prepare_migration(struct page *page,
	 * page. In the case new page is migrated but not remapped, new page's
	 * mapcount will be finally 0 and we call uncharge in end_migration().
	 */
	pc = lookup_page_cgroup(newpage);
	if (PageAnon(page))
		ctype = MEM_CGROUP_CHARGE_TYPE_MAPPED;
	else if (page_is_file_cache(page))
		ctype = MEM_CGROUP_CHARGE_TYPE_CACHE;
	else
		ctype = MEM_CGROUP_CHARGE_TYPE_SHMEM;
	__mem_cgroup_commit_charge(memcg, newpage, 1, pc, ctype, false);
	__mem_cgroup_commit_charge(memcg, newpage, 1, ctype, false);
	return ret;
}

@@ -3392,8 +3386,7 @@ void mem_cgroup_replace_page_cache(struct page *oldpage,
	 * the newpage may be on LRU(or pagevec for LRU) already. We lock
	 * LRU while we overwrite pc->mem_cgroup.
	 */
	pc = lookup_page_cgroup(newpage);
	__mem_cgroup_commit_charge(memcg, newpage, 1, pc, type, true);
	__mem_cgroup_commit_charge(memcg, newpage, 1, type, true);
}

#ifdef CONFIG_DEBUG_VM