Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit da1def55 authored by Aneesh Kumar K.V's avatar Aneesh Kumar K.V Committed by Linus Torvalds
Browse files

hugetlb/cgroup: add support for cgroup removal



Add support for cgroup removal.  If we don't have parent cgroup, the
charges are moved to root cgroup.

Signed-off-by: default avatarAneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
Cc: David Rientjes <rientjes@google.com>
Acked-by: default avatarKAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: Hillf Danton <dhillf@gmail.com>
Reviewed-by: default avatarMichal Hocko <mhocko@suse.cz>
Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 6d76dcf4
Loading
Loading
Loading
Loading
+68 −2
Original line number Original line Diff line number Diff line
@@ -105,10 +105,76 @@ static void hugetlb_cgroup_destroy(struct cgroup *cgroup)
	kfree(h_cgroup);
	kfree(h_cgroup);
}
}



/*
 * Should be called with hugetlb_lock held.
 * Since we are holding hugetlb_lock, pages cannot get moved from
 * active list or uncharged from the cgroup, So no need to get
 * page reference and test for page active here. This function
 * cannot fail.
 */
static void hugetlb_cgroup_move_parent(int idx, struct cgroup *cgroup,
				       struct page *page)
{
	int csize;
	struct res_counter *counter;
	struct res_counter *fail_res;
	struct hugetlb_cgroup *page_hcg;
	struct hugetlb_cgroup *h_cg   = hugetlb_cgroup_from_cgroup(cgroup);
	struct hugetlb_cgroup *parent = parent_hugetlb_cgroup(cgroup);

	page_hcg = hugetlb_cgroup_from_page(page);
	/*
	 * We can have pages in active list without any cgroup
	 * ie, hugepage with less than 3 pages. We can safely
	 * ignore those pages.
	 */
	if (!page_hcg || page_hcg != h_cg)
		goto out;

	csize = PAGE_SIZE << compound_order(page);
	if (!parent) {
		parent = root_h_cgroup;
		/* root has no limit */
		res_counter_charge_nofail(&parent->hugepage[idx],
					  csize, &fail_res);
	}
	counter = &h_cg->hugepage[idx];
	res_counter_uncharge_until(counter, counter->parent, csize);

	set_hugetlb_cgroup(page, parent);
out:
	return;
}

/*
 * Force the hugetlb cgroup to empty the hugetlb resources by moving them to
 * the parent cgroup.
 */
static int hugetlb_cgroup_pre_destroy(struct cgroup *cgroup)
static int hugetlb_cgroup_pre_destroy(struct cgroup *cgroup)
{
{
	/* We will add the cgroup removal support in later patches */
	struct hstate *h;
	   return -EBUSY;
	struct page *page;
	int ret = 0, idx = 0;

	do {
		if (cgroup_task_count(cgroup) ||
		    !list_empty(&cgroup->children)) {
			ret = -EBUSY;
			goto out;
		}
		for_each_hstate(h) {
			spin_lock(&hugetlb_lock);
			list_for_each_entry(page, &h->hugepage_activelist, lru)
				hugetlb_cgroup_move_parent(idx, cgroup, page);

			spin_unlock(&hugetlb_lock);
			idx++;
		}
		cond_resched();
	} while (hugetlb_cgroup_have_usage(cgroup));
out:
	return ret;
}
}


int hugetlb_cgroup_charge_cgroup(int idx, unsigned long nr_pages,
int hugetlb_cgroup_charge_cgroup(int idx, unsigned long nr_pages,