Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit bdcbb659 authored by Qiang Huang's avatar Qiang Huang Committed by Linus Torvalds
Browse files

memcg: fold mem_cgroup_stolen



It is only used in __mem_cgroup_begin_update_page_stat(), the name is
confusing and 2 routines for one thing also confuse people, so fold this
function seems more clear.

[akpm@linux-foundation.org: fix typo, per Michal]
Signed-off-by: default avatarQiang Huang <h.huangqiang@huawei.com>
Acked-by: default avatarMichal Hocko <mhocko@suse.cz>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 3fb1c8dc
Loading
Loading
Loading
Loading
+7 −18
Original line number Original line Diff line number Diff line
@@ -1594,23 +1594,12 @@ static void mem_cgroup_end_move(struct mem_cgroup *memcg)
}
}


/*
/*
 * 2 routines for checking "mem" is under move_account() or not.
 * A routine for checking "mem" is under move_account() or not.
 *
 *
 * mem_cgroup_stolen() -  checking whether a cgroup is mc.from or not. This
 * Checking a cgroup is mc.from or mc.to or under hierarchy of
 *			  is used for avoiding races in accounting.  If true,
 * moving cgroups. This is for waiting at high-memory pressure
 *			  pc->mem_cgroup may be overwritten.
 * caused by "move".
 *
 * mem_cgroup_under_move() - checking a cgroup is mc.from or mc.to or
 *			  under hierarchy of moving cgroups. This is for
 *			  waiting at hith-memory prressure caused by "move".
 */
 */

static bool mem_cgroup_stolen(struct mem_cgroup *memcg)
{
	VM_BUG_ON(!rcu_read_lock_held());
	return atomic_read(&memcg->moving_account) > 0;
}

static bool mem_cgroup_under_move(struct mem_cgroup *memcg)
static bool mem_cgroup_under_move(struct mem_cgroup *memcg)
{
{
	struct mem_cgroup *from;
	struct mem_cgroup *from;
@@ -1653,7 +1642,6 @@ static bool mem_cgroup_wait_acct_move(struct mem_cgroup *memcg)
 * Take this lock when
 * Take this lock when
 * - a code tries to modify page's memcg while it's USED.
 * - a code tries to modify page's memcg while it's USED.
 * - a code tries to modify page state accounting in a memcg.
 * - a code tries to modify page state accounting in a memcg.
 * see mem_cgroup_stolen(), too.
 */
 */
static void move_lock_mem_cgroup(struct mem_cgroup *memcg,
static void move_lock_mem_cgroup(struct mem_cgroup *memcg,
				  unsigned long *flags)
				  unsigned long *flags)
@@ -2326,9 +2314,10 @@ void __mem_cgroup_begin_update_page_stat(struct page *page,
	 * If this memory cgroup is not under account moving, we don't
	 * If this memory cgroup is not under account moving, we don't
	 * need to take move_lock_mem_cgroup(). Because we already hold
	 * need to take move_lock_mem_cgroup(). Because we already hold
	 * rcu_read_lock(), any calls to move_account will be delayed until
	 * rcu_read_lock(), any calls to move_account will be delayed until
	 * rcu_read_unlock() if mem_cgroup_stolen() == true.
	 * rcu_read_unlock().
	 */
	 */
	if (!mem_cgroup_stolen(memcg))
	VM_BUG_ON(!rcu_read_lock_held());
	if (atomic_read(&memcg->moving_account) <= 0)
		return;
		return;


	move_lock_mem_cgroup(memcg, flags);
	move_lock_mem_cgroup(memcg, flags);