Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit b85a96c0 authored by Daisuke Nishimura's avatar Daisuke Nishimura Committed by Linus Torvalds
Browse files

memcg: memory swap controller: fix limit check



There are scatterd calls of res_counter_check_under_limit(), and most of
them don't take mem+swap accounting into account.

define mem_cgroup_check_under_limit() and avoid direct use of
res_counter_check_limit().

Reported-by: default avatarDaisuke Nishimura <nishimura@mxp.nes.nec.co.jp>
Signed-off-by: default avatarDaisuke Nishimura <nishimura@mxp.nes.nec.co.jp>
Signed-off-by: default avatarKAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: Balbir Singh <balbir@in.ibm.com>
Cc: Daisuke Nishimura <nishimura@mxp.nes.nec.co.jp>
Cc: Hugh Dickins <hugh@veritas.com>
Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent f9717d28
Loading
Loading
Loading
Loading
+17 −9
Original line number Diff line number Diff line
@@ -571,6 +571,18 @@ mem_cgroup_get_first_node(struct mem_cgroup *root_mem)
	return ret;
}

static bool mem_cgroup_check_under_limit(struct mem_cgroup *mem)
{
	if (do_swap_account) {
		if (res_counter_check_under_limit(&mem->res) &&
			res_counter_check_under_limit(&mem->memsw))
			return true;
	} else
		if (res_counter_check_under_limit(&mem->res))
			return true;
	return false;
}

/*
 * Dance down the hierarchy if needed to reclaim memory. We remember the
 * last child we reclaimed from, so that we don't end up penalizing
@@ -592,7 +604,7 @@ static int mem_cgroup_hierarchical_reclaim(struct mem_cgroup *root_mem,
	 * have left.
	 */
	ret = try_to_free_mem_cgroup_pages(root_mem, gfp_mask, noswap);
	if (res_counter_check_under_limit(&root_mem->res))
	if (mem_cgroup_check_under_limit(root_mem))
		return 0;

	next_mem = mem_cgroup_get_first_node(root_mem);
@@ -606,7 +618,7 @@ static int mem_cgroup_hierarchical_reclaim(struct mem_cgroup *root_mem,
			continue;
		}
		ret = try_to_free_mem_cgroup_pages(next_mem, gfp_mask, noswap);
		if (res_counter_check_under_limit(&root_mem->res))
		if (mem_cgroup_check_under_limit(root_mem))
			return 0;
		cgroup_lock();
		next_mem = mem_cgroup_get_next_node(next_mem, root_mem);
@@ -709,11 +721,7 @@ static int __mem_cgroup_try_charge(struct mm_struct *mm,
		 * current usage of the cgroup before giving up
		 *
		 */
		if (do_swap_account) {
			if (res_counter_check_under_limit(&mem_over_limit->res) &&
			    res_counter_check_under_limit(&mem_over_limit->memsw))
				continue;
		} else if (res_counter_check_under_limit(&mem_over_limit->res))
		if (mem_cgroup_check_under_limit(mem_over_limit))
			continue;

		if (!nr_retries--) {
@@ -1334,7 +1342,7 @@ int mem_cgroup_shrink_usage(struct mm_struct *mm, gfp_t gfp_mask)

	do {
		progress = try_to_free_mem_cgroup_pages(mem, gfp_mask, true);
		progress += res_counter_check_under_limit(&mem->res);
		progress += mem_cgroup_check_under_limit(mem);
	} while (!progress && --retry);

	css_put(&mem->css);