Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit ddd69148 authored by Tejun Heo's avatar Tejun Heo
Browse files

cgroup: drop unnecessary RCU dancing from __put_css_set()



__put_css_set() does RCU read access on @cgrp across dropping
@cgrp->count so that it can continue accessing @cgrp even if the count
reached zero and destruction of the cgroup commenced.  Given that both
sides - __css_put() and cgroup_destroy_locked() - are cold paths, this
is unnecessary.  Just making cgroup_destroy_locked() grab css_set_lock
while checking @cgrp->count is enough.

Remove the RCU read locking from __put_css_set() and make
cgroup_destroy_locked() read-lock css_set_lock when checking
@cgrp->count.  This will also allow removing @cgrp->count.

Signed-off-by: default avatarTejun Heo <tj@kernel.org>
Acked-by: default avatarLi Zefan <lizefan@huawei.com>
parent 54766d4a
Loading
Loading
Loading
Loading
+10 −10
Original line number Original line Diff line number Diff line
@@ -407,19 +407,13 @@ static void __put_css_set(struct css_set *cset, int taskexit)
		list_del(&link->cset_link);
		list_del(&link->cset_link);
		list_del(&link->cgrp_link);
		list_del(&link->cgrp_link);


		/*
		/* @cgrp can't go away while we're holding css_set_lock */
		 * We may not be holding cgroup_mutex, and if cgrp->count is
		 * dropped to 0 the cgroup can be destroyed at any time, hence
		 * rcu_read_lock is used to keep it alive.
		 */
		rcu_read_lock();
		if (atomic_dec_and_test(&cgrp->count) &&
		if (atomic_dec_and_test(&cgrp->count) &&
		    notify_on_release(cgrp)) {
		    notify_on_release(cgrp)) {
			if (taskexit)
			if (taskexit)
				set_bit(CGRP_RELEASABLE, &cgrp->flags);
				set_bit(CGRP_RELEASABLE, &cgrp->flags);
			check_for_release(cgrp);
			check_for_release(cgrp);
		}
		}
		rcu_read_unlock();


		kfree(link);
		kfree(link);
	}
	}
@@ -4370,11 +4364,19 @@ static int cgroup_destroy_locked(struct cgroup *cgrp)
	struct cgroup *parent = cgrp->parent;
	struct cgroup *parent = cgrp->parent;
	struct cgroup_event *event, *tmp;
	struct cgroup_event *event, *tmp;
	struct cgroup_subsys *ss;
	struct cgroup_subsys *ss;
	bool empty;


	lockdep_assert_held(&d->d_inode->i_mutex);
	lockdep_assert_held(&d->d_inode->i_mutex);
	lockdep_assert_held(&cgroup_mutex);
	lockdep_assert_held(&cgroup_mutex);


	if (atomic_read(&cgrp->count) || !list_empty(&cgrp->children))
	/*
	 * css_set_lock prevents @cgrp from being removed while
	 * __put_css_set() is in progress.
	 */
	read_lock(&css_set_lock);
	empty = !atomic_read(&cgrp->count) && list_empty(&cgrp->children);
	read_unlock(&css_set_lock);
	if (!empty)
		return -EBUSY;
		return -EBUSY;


	/*
	/*
@@ -5051,8 +5053,6 @@ void cgroup_exit(struct task_struct *tsk, int run_callbacks)


static void check_for_release(struct cgroup *cgrp)
static void check_for_release(struct cgroup *cgrp)
{
{
	/* All of these checks rely on RCU to keep the cgroup
	 * structure alive */
	if (cgroup_is_releasable(cgrp) &&
	if (cgroup_is_releasable(cgrp) &&
	    !atomic_read(&cgrp->count) && list_empty(&cgrp->children)) {
	    !atomic_read(&cgrp->count) && list_empty(&cgrp->children)) {
		/*
		/*