Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit ae3abae6 authored by Daisuke Nishimura's avatar Daisuke Nishimura Committed by Linus Torvalds
Browse files

memcg: fix mem_cgroup_shrink_usage()



Current mem_cgroup_shrink_usage() has two problems.

1. It doesn't call mem_cgroup_out_of_memory and doesn't update
   last_oom_jiffies, so pagefault_out_of_memory invokes global OOM.

2. Considering hierarchy, shrinking has to be done from the
   mem_over_limit, not from the memcg which the page would be charged to.

mem_cgroup_try_charge_swapin() does all of these things properly, so we
use it and call cancel_charge_swapin when it succeeded.

The name of "shrink_usage" is not appropriate for this behavior, so we
change it too.

Signed-off-by: default avatarDaisuke Nishimura <nishimura@mxp.nes.nec.co.jp>
Acked-by: default avatarKAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: Li Zefan <lizf@cn.fujitsu.cn>
Cc: Paul Menage <menage@google.com>
Cc: Dhaval Giani <dhaval@linux.vnet.ibm.com>
Cc: Daisuke Nishimura <nishimura@mxp.nes.nec.co.jp>
Cc: YAMAMOTO Takashi <yamamoto@valinux.co.jp>
Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Cc: David Rientjes <rientjes@google.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 08161786
Loading
Loading
Loading
Loading
+2 −2
Original line number Original line Diff line number Diff line
@@ -56,7 +56,7 @@ extern void mem_cgroup_move_lists(struct page *page,
				  enum lru_list from, enum lru_list to);
				  enum lru_list from, enum lru_list to);
extern void mem_cgroup_uncharge_page(struct page *page);
extern void mem_cgroup_uncharge_page(struct page *page);
extern void mem_cgroup_uncharge_cache_page(struct page *page);
extern void mem_cgroup_uncharge_cache_page(struct page *page);
extern int mem_cgroup_shrink_usage(struct page *page,
extern int mem_cgroup_shmem_charge_fallback(struct page *page,
			struct mm_struct *mm, gfp_t gfp_mask);
			struct mm_struct *mm, gfp_t gfp_mask);


extern unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan,
extern unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan,
@@ -155,7 +155,7 @@ static inline void mem_cgroup_uncharge_cache_page(struct page *page)
{
{
}
}


static inline int mem_cgroup_shrink_usage(struct page *page,
static inline int mem_cgroup_shmem_charge_fallback(struct page *page,
			struct mm_struct *mm, gfp_t gfp_mask)
			struct mm_struct *mm, gfp_t gfp_mask)
{
{
	return 0;
	return 0;
+12 −21
Original line number Original line Diff line number Diff line
@@ -1617,37 +1617,28 @@ void mem_cgroup_end_migration(struct mem_cgroup *mem,
}
}


/*
/*
 * A call to try to shrink memory usage under specified resource controller.
 * A call to try to shrink memory usage on charge failure at shmem's swapin.
 * This is typically used for page reclaiming for shmem for reducing side
 * Calling hierarchical_reclaim is not enough because we should update
 * effect of page allocation from shmem, which is used by some mem_cgroup.
 * last_oom_jiffies to prevent pagefault_out_of_memory from invoking global OOM.
 */
 * Moreover considering hierarchy, we should reclaim from the mem_over_limit,
int mem_cgroup_shrink_usage(struct page *page,
 * not from the memcg which this page would be charged to.
 * try_charge_swapin does all of these works properly.
 */
int mem_cgroup_shmem_charge_fallback(struct page *page,
			    struct mm_struct *mm,
			    struct mm_struct *mm,
			    gfp_t gfp_mask)
			    gfp_t gfp_mask)
{
{
	struct mem_cgroup *mem = NULL;
	struct mem_cgroup *mem = NULL;
	int progress = 0;
	int ret;
	int retry = MEM_CGROUP_RECLAIM_RETRIES;


	if (mem_cgroup_disabled())
	if (mem_cgroup_disabled())
		return 0;
		return 0;
	if (page)
		mem = try_get_mem_cgroup_from_swapcache(page);
	if (!mem && mm)
		mem = try_get_mem_cgroup_from_mm(mm);
	if (unlikely(!mem))
		return 0;


	do {
	ret = mem_cgroup_try_charge_swapin(mm, page, gfp_mask, &mem);
		progress = mem_cgroup_hierarchical_reclaim(mem,
	if (!ret)
					gfp_mask, true, false);
		mem_cgroup_cancel_charge_swapin(mem); /* it does !mem check */
		progress += mem_cgroup_check_under_limit(mem);
	} while (!progress && --retry);


	css_put(&mem->css);
	return ret;
	if (!retry)
		return -ENOMEM;
	return 0;
}
}


static DEFINE_MUTEX(set_limit_mutex);
static DEFINE_MUTEX(set_limit_mutex);
+6 −2
Original line number Original line Diff line number Diff line
@@ -1340,8 +1340,12 @@ static int shmem_getpage(struct inode *inode, unsigned long idx,
			shmem_swp_unmap(entry);
			shmem_swp_unmap(entry);
			spin_unlock(&info->lock);
			spin_unlock(&info->lock);
			if (error == -ENOMEM) {
			if (error == -ENOMEM) {
				/* allow reclaim from this memory cgroup */
				/*
				error = mem_cgroup_shrink_usage(swappage,
				 * reclaim from proper memory cgroup and
				 * call memcg's OOM if needed.
				 */
				error = mem_cgroup_shmem_charge_fallback(
								swappage,
								current->mm,
								current->mm,
								gfp);
								gfp);
				if (error) {
				if (error) {