Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit bd0d24bf authored by Daisuke Nishimura's avatar Daisuke Nishimura Committed by Linus Torvalds
Browse files

memcg: remove redundant code



- try_get_mem_cgroup_from_mm() calls rcu_read_lock/unlock by itself, so we
  don't have to call them in task_in_mem_cgroup().
- *mz is not used in __mem_cgroup_uncharge_common().
- we don't have to call lookup_page_cgroup() in mem_cgroup_end_migration()
  after we've cleared PCG_MIGRATION of @oldpage.
- remove empty comment.
- remove redundant empty line in mem_cgroup_cache_charge().

Signed-off-by: default avatarDaisuke Nishimura <nishimura@mxp.nes.nec.co.jp>
Acked-by: default avatarBalbir Singh <balbir@linux.vnet.ibm.com>
Acked-by: default avatarKAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 2bd9bb20
Loading
Loading
Loading
Loading
+0 −10
Original line number Original line Diff line number Diff line
@@ -840,9 +840,7 @@ int task_in_mem_cgroup(struct task_struct *task, const struct mem_cgroup *mem)
	struct mem_cgroup *curr = NULL;
	struct mem_cgroup *curr = NULL;


	task_lock(task);
	task_lock(task);
	rcu_read_lock();
	curr = try_get_mem_cgroup_from_mm(task->mm);
	curr = try_get_mem_cgroup_from_mm(task->mm);
	rcu_read_unlock();
	task_unlock(task);
	task_unlock(task);
	if (!curr)
	if (!curr)
		return 0;
		return 0;
@@ -2092,7 +2090,6 @@ int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm,
	if (!(gfp_mask & __GFP_WAIT)) {
	if (!(gfp_mask & __GFP_WAIT)) {
		struct page_cgroup *pc;
		struct page_cgroup *pc;



		pc = lookup_page_cgroup(page);
		pc = lookup_page_cgroup(page);
		if (!pc)
		if (!pc)
			return 0;
			return 0;
@@ -2286,7 +2283,6 @@ __mem_cgroup_uncharge_common(struct page *page, enum charge_type ctype)
{
{
	struct page_cgroup *pc;
	struct page_cgroup *pc;
	struct mem_cgroup *mem = NULL;
	struct mem_cgroup *mem = NULL;
	struct mem_cgroup_per_zone *mz;


	if (mem_cgroup_disabled())
	if (mem_cgroup_disabled())
		return NULL;
		return NULL;
@@ -2340,7 +2336,6 @@ __mem_cgroup_uncharge_common(struct page *page, enum charge_type ctype)
	 * special functions.
	 * special functions.
	 */
	 */


	mz = page_cgroup_zoneinfo(pc);
	unlock_page_cgroup(pc);
	unlock_page_cgroup(pc);


	memcg_check_events(mem, page);
	memcg_check_events(mem, page);
@@ -2652,11 +2647,8 @@ void mem_cgroup_end_migration(struct mem_cgroup *mem,
	ClearPageCgroupMigration(pc);
	ClearPageCgroupMigration(pc);
	unlock_page_cgroup(pc);
	unlock_page_cgroup(pc);


	if (unused != oldpage)
		pc = lookup_page_cgroup(unused);
	__mem_cgroup_uncharge_common(unused, MEM_CGROUP_CHARGE_TYPE_FORCE);
	__mem_cgroup_uncharge_common(unused, MEM_CGROUP_CHARGE_TYPE_FORCE);


	pc = lookup_page_cgroup(used);
	/*
	/*
	 * If a page is a file cache, radix-tree replacement is very atomic
	 * If a page is a file cache, radix-tree replacement is very atomic
	 * and we can skip this check. When it was an Anon page, its mapcount
	 * and we can skip this check. When it was an Anon page, its mapcount
@@ -3800,8 +3792,6 @@ static int mem_cgroup_oom_control_read(struct cgroup *cgrp,
	return 0;
	return 0;
}
}


/*
 */
static int mem_cgroup_oom_control_write(struct cgroup *cgrp,
static int mem_cgroup_oom_control_write(struct cgroup *cgrp,
	struct cftype *cft, u64 val)
	struct cftype *cft, u64 val)
{
{