Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 7e924aaf authored by Hugh Dickins's avatar Hugh Dickins Committed by Linus Torvalds
Browse files

memcg: mem_cgroup_charge never NULL



My memcgroup patch to fix hang with shmem/tmpfs added NULL page handling to
mem_cgroup_charge_common.  It seemed convenient at the time, but hard to
justify now: there's a perfectly appropriate swappage to charge and uncharge
instead, this is not on any hot path through shmem_getpage, and no performance
hit was observed from the slight extra overhead.

So revert that NULL page handling from mem_cgroup_charge_common; and make it
clearer by bringing page_cgroup_assign_new_page_cgroup into its body - that
was a helper I found more of a hindrance to understanding.

Signed-off-by: default avatarHugh Dickins <hugh@veritas.com>
Cc: David Rientjes <rientjes@google.com>
Acked-by: default avatarBalbir Singh <balbir@linux.vnet.ibm.com>
Acked-by: default avatarKAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: Hirokazu Takahashi <taka@valinux.co.jp>
Cc: YAMAMOTO Takashi <yamamoto@valinux.co.jp>
Cc: Paul Menage <menage@google.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 9442ec9d
Loading
Loading
Loading
Loading
+21 −40
Original line number Diff line number Diff line
@@ -300,25 +300,6 @@ static void __always_inline unlock_page_cgroup(struct page *page)
	bit_spin_unlock(PAGE_CGROUP_LOCK_BIT, &page->page_cgroup);
}

/*
 * Tie new page_cgroup to struct page under lock_page_cgroup()
 * This can fail if the page has been tied to a page_cgroup.
 * If success, returns 0.
 */
static int page_cgroup_assign_new_page_cgroup(struct page *page,
						struct page_cgroup *pc)
{
	int ret = 0;

	lock_page_cgroup(page);
	if (!page_get_page_cgroup(page))
		page_assign_page_cgroup(page, pc);
	else /* A page is tied to other pc. */
		ret = 1;
	unlock_page_cgroup(page);
	return ret;
}

/*
 * Clear page->page_cgroup member under lock_page_cgroup().
 * If given "pc" value is different from one page->page_cgroup,
@@ -585,7 +566,6 @@ static int mem_cgroup_charge_common(struct page *page, struct mm_struct *mm,
	 * with it
	 */
retry:
	if (page) {
	lock_page_cgroup(page);
	pc = page_get_page_cgroup(page);
	/*
@@ -604,7 +584,6 @@ static int mem_cgroup_charge_common(struct page *page, struct mm_struct *mm,
		}
	}
	unlock_page_cgroup(page);
	}

	pc = kzalloc(sizeof(struct page_cgroup), gfp_mask);
	if (pc == NULL)
@@ -663,7 +642,9 @@ static int mem_cgroup_charge_common(struct page *page, struct mm_struct *mm,
	if (ctype == MEM_CGROUP_CHARGE_TYPE_CACHE)
		pc->flags |= PAGE_CGROUP_FLAG_CACHE;

	if (!page || page_cgroup_assign_new_page_cgroup(page, pc)) {
	lock_page_cgroup(page);
	if (page_get_page_cgroup(page)) {
		unlock_page_cgroup(page);
		/*
		 * Another charge has been added to this page already.
		 * We take lock_page_cgroup(page) again and read
@@ -672,10 +653,10 @@ static int mem_cgroup_charge_common(struct page *page, struct mm_struct *mm,
		res_counter_uncharge(&mem->res, PAGE_SIZE);
		css_put(&mem->css);
		kfree(pc);
		if (!page)
			goto done;
		goto retry;
	}
	page_assign_page_cgroup(page, pc);
	unlock_page_cgroup(page);

	mz = page_cgroup_zoneinfo(pc);
	spin_lock_irqsave(&mz->lru_lock, flags);
+6 −3
Original line number Diff line number Diff line
@@ -1370,14 +1370,17 @@ static int shmem_getpage(struct inode *inode, unsigned long idx,
			shmem_swp_unmap(entry);
			spin_unlock(&info->lock);
			unlock_page(swappage);
			page_cache_release(swappage);
			if (error == -ENOMEM) {
				/* allow reclaim from this memory cgroup */
				error = mem_cgroup_cache_charge(NULL,
				error = mem_cgroup_cache_charge(swappage,
					current->mm, gfp & ~__GFP_HIGHMEM);
				if (error)
				if (error) {
					page_cache_release(swappage);
					goto failed;
				}
				mem_cgroup_uncharge_page(swappage);
			}
			page_cache_release(swappage);
			goto repeat;
		}
	} else if (sgp == SGP_READ && !filepage) {