Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit bced0520 authored by KAMEZAWA Hiroyuki's avatar KAMEZAWA Hiroyuki Committed by Linus Torvalds
Browse files

memcg: fix gfp_mask of callers of charge



Fix misuse of gfp_kernel.

Now, most of callers of mem_cgroup_charge_xxx functions uses GFP_KERNEL.

I think that this is from the fact that page_cgroup *was* dynamically
allocated.

But now, we allocate all page_cgroup at boot.  And
mem_cgroup_try_to_free_pages() reclaim memory from GFP_HIGHUSER_MOVABLE +
specified GFP_RECLAIM_MASK.

  * This is because we just want to reduce memory usage.
    "Where we should reclaim from ?" is not a problem in memcg.

This patch modifies gfp masks to be GFP_HIGUSER_MOVABLE if possible.

Note: This patch is not for fixing behavior but for showing sane information
      in source code.

Signed-off-by: default avatarKAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Reviewed-by: default avatarDaisuke Nishimura <nishimura@mxp.nes.nec.co.jp>
Cc: Balbir Singh <balbir@in.ibm.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 7a81b88c
Loading
Loading
Loading
Loading
+5 −3
Original line number Diff line number Diff line
@@ -808,7 +808,8 @@ int mem_cgroup_prepare_migration(struct page *page, struct page *newpage)
	}
	unlock_page_cgroup(pc);
	if (mem) {
		ret = mem_cgroup_charge_common(newpage, NULL, GFP_KERNEL,
		ret = mem_cgroup_charge_common(newpage, NULL,
					GFP_HIGHUSER_MOVABLE,
					ctype, mem);
		css_put(&mem->css);
	}
@@ -889,7 +890,8 @@ static int mem_cgroup_resize_limit(struct mem_cgroup *memcg,
			ret = -EBUSY;
			break;
		}
		progress = try_to_free_mem_cgroup_pages(memcg, GFP_KERNEL);
		progress = try_to_free_mem_cgroup_pages(memcg,
				GFP_HIGHUSER_MOVABLE);
		if (!progress)
			retry_count--;
	}
+5 −4
Original line number Diff line number Diff line
@@ -2000,7 +2000,7 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
	cow_user_page(new_page, old_page, address, vma);
	__SetPageUptodate(new_page);

	if (mem_cgroup_newpage_charge(new_page, mm, GFP_KERNEL))
	if (mem_cgroup_newpage_charge(new_page, mm, GFP_HIGHUSER_MOVABLE))
		goto oom_free_new;

	/*
@@ -2431,7 +2431,7 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
	lock_page(page);
	delayacct_clear_flag(DELAYACCT_PF_SWAPIN);

	if (mem_cgroup_try_charge(mm, GFP_KERNEL, &ptr) == -ENOMEM) {
	if (mem_cgroup_try_charge(mm, GFP_HIGHUSER_MOVABLE, &ptr) == -ENOMEM) {
		ret = VM_FAULT_OOM;
		unlock_page(page);
		goto out;
@@ -2512,7 +2512,7 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
		goto oom;
	__SetPageUptodate(page);

	if (mem_cgroup_newpage_charge(page, mm, GFP_KERNEL))
	if (mem_cgroup_newpage_charge(page, mm, GFP_HIGHUSER_MOVABLE))
		goto oom_free_page;

	entry = mk_pte(page, vma->vm_page_prot);
@@ -2603,7 +2603,8 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
				ret = VM_FAULT_OOM;
				goto out;
			}
			if (mem_cgroup_newpage_charge(page, mm, GFP_KERNEL)) {
			if (mem_cgroup_newpage_charge(page,
						mm, GFP_HIGHUSER_MOVABLE)) {
				ret = VM_FAULT_OOM;
				page_cache_release(page);
				goto out;
+3 −3
Original line number Diff line number Diff line
@@ -928,8 +928,8 @@ static int shmem_unuse_inode(struct shmem_inode_info *info, swp_entry_t entry, s
	error = 1;
	if (!inode)
		goto out;
	/* Precharge page using GFP_KERNEL while we can wait */
	error = mem_cgroup_cache_charge(page, current->mm, GFP_KERNEL);
	/* Charge page using GFP_HIGHUSER_MOVABLE while we can wait */
	error = mem_cgroup_cache_charge(page, current->mm, GFP_HIGHUSER_MOVABLE);
	if (error)
		goto out;
	error = radix_tree_preload(GFP_KERNEL);
@@ -1379,7 +1379,7 @@ static int shmem_getpage(struct inode *inode, unsigned long idx,

			/* Precharge page while we can wait, compensate after */
			error = mem_cgroup_cache_charge(filepage, current->mm,
							gfp & ~__GFP_HIGHMEM);
					GFP_HIGHUSER_MOVABLE);
			if (error) {
				page_cache_release(filepage);
				shmem_unacct_blocks(info->flags, 1);
+1 −1
Original line number Diff line number Diff line
@@ -695,7 +695,7 @@ static int unuse_pte(struct vm_area_struct *vma, pmd_t *pmd,
	pte_t *pte;
	int ret = 1;

	if (mem_cgroup_try_charge(vma->vm_mm, GFP_KERNEL, &ptr))
	if (mem_cgroup_try_charge(vma->vm_mm, GFP_HIGHUSER_MOVABLE, &ptr))
		ret = -ENOMEM;

	pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);