Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 52827f38 authored by Linus Torvalds's avatar Linus Torvalds
Browse files
Pull cgroup fixes from Tejun Heo:
 "Three fix patches.  Two are for cgroup / css init failure path.  The
  last one makes css_set_lock irq-safe as the deadline scheduler ends up
  calling put_css_set() from irq context"

* 'for-4.7-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/cgroup:
  cgroup: Disable IRQs while holding css_set_lock
  cgroup: set css->id to -1 during init
  cgroup: remove redundant cleanup in css_create
parents de4921ce 82d6489d
Loading
Loading
Loading
Loading
+76 −72
Original line number Original line Diff line number Diff line
@@ -837,6 +837,8 @@ static void put_css_set_locked(struct css_set *cset)


static void put_css_set(struct css_set *cset)
static void put_css_set(struct css_set *cset)
{
{
	unsigned long flags;

	/*
	/*
	 * Ensure that the refcount doesn't hit zero while any readers
	 * Ensure that the refcount doesn't hit zero while any readers
	 * can see it. Similar to atomic_dec_and_lock(), but for an
	 * can see it. Similar to atomic_dec_and_lock(), but for an
@@ -845,9 +847,9 @@ static void put_css_set(struct css_set *cset)
	if (atomic_add_unless(&cset->refcount, -1, 1))
	if (atomic_add_unless(&cset->refcount, -1, 1))
		return;
		return;


	spin_lock_bh(&css_set_lock);
	spin_lock_irqsave(&css_set_lock, flags);
	put_css_set_locked(cset);
	put_css_set_locked(cset);
	spin_unlock_bh(&css_set_lock);
	spin_unlock_irqrestore(&css_set_lock, flags);
}
}


/*
/*
@@ -1070,11 +1072,11 @@ static struct css_set *find_css_set(struct css_set *old_cset,


	/* First see if we already have a cgroup group that matches
	/* First see if we already have a cgroup group that matches
	 * the desired set */
	 * the desired set */
	spin_lock_bh(&css_set_lock);
	spin_lock_irq(&css_set_lock);
	cset = find_existing_css_set(old_cset, cgrp, template);
	cset = find_existing_css_set(old_cset, cgrp, template);
	if (cset)
	if (cset)
		get_css_set(cset);
		get_css_set(cset);
	spin_unlock_bh(&css_set_lock);
	spin_unlock_irq(&css_set_lock);


	if (cset)
	if (cset)
		return cset;
		return cset;
@@ -1102,7 +1104,7 @@ static struct css_set *find_css_set(struct css_set *old_cset,
	 * find_existing_css_set() */
	 * find_existing_css_set() */
	memcpy(cset->subsys, template, sizeof(cset->subsys));
	memcpy(cset->subsys, template, sizeof(cset->subsys));


	spin_lock_bh(&css_set_lock);
	spin_lock_irq(&css_set_lock);
	/* Add reference counts and links from the new css_set. */
	/* Add reference counts and links from the new css_set. */
	list_for_each_entry(link, &old_cset->cgrp_links, cgrp_link) {
	list_for_each_entry(link, &old_cset->cgrp_links, cgrp_link) {
		struct cgroup *c = link->cgrp;
		struct cgroup *c = link->cgrp;
@@ -1128,7 +1130,7 @@ static struct css_set *find_css_set(struct css_set *old_cset,
		css_get(css);
		css_get(css);
	}
	}


	spin_unlock_bh(&css_set_lock);
	spin_unlock_irq(&css_set_lock);


	return cset;
	return cset;
}
}
@@ -1192,7 +1194,7 @@ static void cgroup_destroy_root(struct cgroup_root *root)
	 * Release all the links from cset_links to this hierarchy's
	 * Release all the links from cset_links to this hierarchy's
	 * root cgroup
	 * root cgroup
	 */
	 */
	spin_lock_bh(&css_set_lock);
	spin_lock_irq(&css_set_lock);


	list_for_each_entry_safe(link, tmp_link, &cgrp->cset_links, cset_link) {
	list_for_each_entry_safe(link, tmp_link, &cgrp->cset_links, cset_link) {
		list_del(&link->cset_link);
		list_del(&link->cset_link);
@@ -1200,7 +1202,7 @@ static void cgroup_destroy_root(struct cgroup_root *root)
		kfree(link);
		kfree(link);
	}
	}


	spin_unlock_bh(&css_set_lock);
	spin_unlock_irq(&css_set_lock);


	if (!list_empty(&root->root_list)) {
	if (!list_empty(&root->root_list)) {
		list_del(&root->root_list);
		list_del(&root->root_list);
@@ -1600,11 +1602,11 @@ static int rebind_subsystems(struct cgroup_root *dst_root, u16 ss_mask)
		ss->root = dst_root;
		ss->root = dst_root;
		css->cgroup = dcgrp;
		css->cgroup = dcgrp;


		spin_lock_bh(&css_set_lock);
		spin_lock_irq(&css_set_lock);
		hash_for_each(css_set_table, i, cset, hlist)
		hash_for_each(css_set_table, i, cset, hlist)
			list_move_tail(&cset->e_cset_node[ss->id],
			list_move_tail(&cset->e_cset_node[ss->id],
				       &dcgrp->e_csets[ss->id]);
				       &dcgrp->e_csets[ss->id]);
		spin_unlock_bh(&css_set_lock);
		spin_unlock_irq(&css_set_lock);


		/* default hierarchy doesn't enable controllers by default */
		/* default hierarchy doesn't enable controllers by default */
		dst_root->subsys_mask |= 1 << ssid;
		dst_root->subsys_mask |= 1 << ssid;
@@ -1640,10 +1642,10 @@ static int cgroup_show_path(struct seq_file *sf, struct kernfs_node *kf_node,
	if (!buf)
	if (!buf)
		return -ENOMEM;
		return -ENOMEM;


	spin_lock_bh(&css_set_lock);
	spin_lock_irq(&css_set_lock);
	ns_cgroup = current_cgns_cgroup_from_root(kf_cgroot);
	ns_cgroup = current_cgns_cgroup_from_root(kf_cgroot);
	len = kernfs_path_from_node(kf_node, ns_cgroup->kn, buf, PATH_MAX);
	len = kernfs_path_from_node(kf_node, ns_cgroup->kn, buf, PATH_MAX);
	spin_unlock_bh(&css_set_lock);
	spin_unlock_irq(&css_set_lock);


	if (len >= PATH_MAX)
	if (len >= PATH_MAX)
		len = -ERANGE;
		len = -ERANGE;
@@ -1897,7 +1899,7 @@ static void cgroup_enable_task_cg_lists(void)
{
{
	struct task_struct *p, *g;
	struct task_struct *p, *g;


	spin_lock_bh(&css_set_lock);
	spin_lock_irq(&css_set_lock);


	if (use_task_css_set_links)
	if (use_task_css_set_links)
		goto out_unlock;
		goto out_unlock;
@@ -1922,8 +1924,12 @@ static void cgroup_enable_task_cg_lists(void)
		 * entry won't be deleted though the process has exited.
		 * entry won't be deleted though the process has exited.
		 * Do it while holding siglock so that we don't end up
		 * Do it while holding siglock so that we don't end up
		 * racing against cgroup_exit().
		 * racing against cgroup_exit().
		 *
		 * Interrupts were already disabled while acquiring
		 * the css_set_lock, so we do not need to disable it
		 * again when acquiring the sighand->siglock here.
		 */
		 */
		spin_lock_irq(&p->sighand->siglock);
		spin_lock(&p->sighand->siglock);
		if (!(p->flags & PF_EXITING)) {
		if (!(p->flags & PF_EXITING)) {
			struct css_set *cset = task_css_set(p);
			struct css_set *cset = task_css_set(p);


@@ -1932,11 +1938,11 @@ static void cgroup_enable_task_cg_lists(void)
			list_add_tail(&p->cg_list, &cset->tasks);
			list_add_tail(&p->cg_list, &cset->tasks);
			get_css_set(cset);
			get_css_set(cset);
		}
		}
		spin_unlock_irq(&p->sighand->siglock);
		spin_unlock(&p->sighand->siglock);
	} while_each_thread(g, p);
	} while_each_thread(g, p);
	read_unlock(&tasklist_lock);
	read_unlock(&tasklist_lock);
out_unlock:
out_unlock:
	spin_unlock_bh(&css_set_lock);
	spin_unlock_irq(&css_set_lock);
}
}


static void init_cgroup_housekeeping(struct cgroup *cgrp)
static void init_cgroup_housekeeping(struct cgroup *cgrp)
@@ -2043,13 +2049,13 @@ static int cgroup_setup_root(struct cgroup_root *root, u16 ss_mask)
	 * Link the root cgroup in this hierarchy into all the css_set
	 * Link the root cgroup in this hierarchy into all the css_set
	 * objects.
	 * objects.
	 */
	 */
	spin_lock_bh(&css_set_lock);
	spin_lock_irq(&css_set_lock);
	hash_for_each(css_set_table, i, cset, hlist) {
	hash_for_each(css_set_table, i, cset, hlist) {
		link_css_set(&tmp_links, cset, root_cgrp);
		link_css_set(&tmp_links, cset, root_cgrp);
		if (css_set_populated(cset))
		if (css_set_populated(cset))
			cgroup_update_populated(root_cgrp, true);
			cgroup_update_populated(root_cgrp, true);
	}
	}
	spin_unlock_bh(&css_set_lock);
	spin_unlock_irq(&css_set_lock);


	BUG_ON(!list_empty(&root_cgrp->self.children));
	BUG_ON(!list_empty(&root_cgrp->self.children));
	BUG_ON(atomic_read(&root->nr_cgrps) != 1);
	BUG_ON(atomic_read(&root->nr_cgrps) != 1);
@@ -2256,11 +2262,11 @@ static struct dentry *cgroup_mount(struct file_system_type *fs_type,
		struct cgroup *cgrp;
		struct cgroup *cgrp;


		mutex_lock(&cgroup_mutex);
		mutex_lock(&cgroup_mutex);
		spin_lock_bh(&css_set_lock);
		spin_lock_irq(&css_set_lock);


		cgrp = cset_cgroup_from_root(ns->root_cset, root);
		cgrp = cset_cgroup_from_root(ns->root_cset, root);


		spin_unlock_bh(&css_set_lock);
		spin_unlock_irq(&css_set_lock);
		mutex_unlock(&cgroup_mutex);
		mutex_unlock(&cgroup_mutex);


		nsdentry = kernfs_node_dentry(cgrp->kn, dentry->d_sb);
		nsdentry = kernfs_node_dentry(cgrp->kn, dentry->d_sb);
@@ -2337,11 +2343,11 @@ char *cgroup_path_ns(struct cgroup *cgrp, char *buf, size_t buflen,
	char *ret;
	char *ret;


	mutex_lock(&cgroup_mutex);
	mutex_lock(&cgroup_mutex);
	spin_lock_bh(&css_set_lock);
	spin_lock_irq(&css_set_lock);


	ret = cgroup_path_ns_locked(cgrp, buf, buflen, ns);
	ret = cgroup_path_ns_locked(cgrp, buf, buflen, ns);


	spin_unlock_bh(&css_set_lock);
	spin_unlock_irq(&css_set_lock);
	mutex_unlock(&cgroup_mutex);
	mutex_unlock(&cgroup_mutex);


	return ret;
	return ret;
@@ -2369,7 +2375,7 @@ char *task_cgroup_path(struct task_struct *task, char *buf, size_t buflen)
	char *path = NULL;
	char *path = NULL;


	mutex_lock(&cgroup_mutex);
	mutex_lock(&cgroup_mutex);
	spin_lock_bh(&css_set_lock);
	spin_lock_irq(&css_set_lock);


	root = idr_get_next(&cgroup_hierarchy_idr, &hierarchy_id);
	root = idr_get_next(&cgroup_hierarchy_idr, &hierarchy_id);


@@ -2382,7 +2388,7 @@ char *task_cgroup_path(struct task_struct *task, char *buf, size_t buflen)
			path = buf;
			path = buf;
	}
	}


	spin_unlock_bh(&css_set_lock);
	spin_unlock_irq(&css_set_lock);
	mutex_unlock(&cgroup_mutex);
	mutex_unlock(&cgroup_mutex);
	return path;
	return path;
}
}
@@ -2557,7 +2563,7 @@ static int cgroup_taskset_migrate(struct cgroup_taskset *tset,
	 * the new cgroup.  There are no failure cases after here, so this
	 * the new cgroup.  There are no failure cases after here, so this
	 * is the commit point.
	 * is the commit point.
	 */
	 */
	spin_lock_bh(&css_set_lock);
	spin_lock_irq(&css_set_lock);
	list_for_each_entry(cset, &tset->src_csets, mg_node) {
	list_for_each_entry(cset, &tset->src_csets, mg_node) {
		list_for_each_entry_safe(task, tmp_task, &cset->mg_tasks, cg_list) {
		list_for_each_entry_safe(task, tmp_task, &cset->mg_tasks, cg_list) {
			struct css_set *from_cset = task_css_set(task);
			struct css_set *from_cset = task_css_set(task);
@@ -2568,7 +2574,7 @@ static int cgroup_taskset_migrate(struct cgroup_taskset *tset,
			put_css_set_locked(from_cset);
			put_css_set_locked(from_cset);
		}
		}
	}
	}
	spin_unlock_bh(&css_set_lock);
	spin_unlock_irq(&css_set_lock);


	/*
	/*
	 * Migration is committed, all target tasks are now on dst_csets.
	 * Migration is committed, all target tasks are now on dst_csets.
@@ -2597,13 +2603,13 @@ static int cgroup_taskset_migrate(struct cgroup_taskset *tset,
		}
		}
	} while_each_subsys_mask();
	} while_each_subsys_mask();
out_release_tset:
out_release_tset:
	spin_lock_bh(&css_set_lock);
	spin_lock_irq(&css_set_lock);
	list_splice_init(&tset->dst_csets, &tset->src_csets);
	list_splice_init(&tset->dst_csets, &tset->src_csets);
	list_for_each_entry_safe(cset, tmp_cset, &tset->src_csets, mg_node) {
	list_for_each_entry_safe(cset, tmp_cset, &tset->src_csets, mg_node) {
		list_splice_tail_init(&cset->mg_tasks, &cset->tasks);
		list_splice_tail_init(&cset->mg_tasks, &cset->tasks);
		list_del_init(&cset->mg_node);
		list_del_init(&cset->mg_node);
	}
	}
	spin_unlock_bh(&css_set_lock);
	spin_unlock_irq(&css_set_lock);
	return ret;
	return ret;
}
}


@@ -2634,7 +2640,7 @@ static void cgroup_migrate_finish(struct list_head *preloaded_csets)


	lockdep_assert_held(&cgroup_mutex);
	lockdep_assert_held(&cgroup_mutex);


	spin_lock_bh(&css_set_lock);
	spin_lock_irq(&css_set_lock);
	list_for_each_entry_safe(cset, tmp_cset, preloaded_csets, mg_preload_node) {
	list_for_each_entry_safe(cset, tmp_cset, preloaded_csets, mg_preload_node) {
		cset->mg_src_cgrp = NULL;
		cset->mg_src_cgrp = NULL;
		cset->mg_dst_cgrp = NULL;
		cset->mg_dst_cgrp = NULL;
@@ -2642,7 +2648,7 @@ static void cgroup_migrate_finish(struct list_head *preloaded_csets)
		list_del_init(&cset->mg_preload_node);
		list_del_init(&cset->mg_preload_node);
		put_css_set_locked(cset);
		put_css_set_locked(cset);
	}
	}
	spin_unlock_bh(&css_set_lock);
	spin_unlock_irq(&css_set_lock);
}
}


/**
/**
@@ -2783,7 +2789,7 @@ static int cgroup_migrate(struct task_struct *leader, bool threadgroup,
	 * already PF_EXITING could be freed from underneath us unless we
	 * already PF_EXITING could be freed from underneath us unless we
	 * take an rcu_read_lock.
	 * take an rcu_read_lock.
	 */
	 */
	spin_lock_bh(&css_set_lock);
	spin_lock_irq(&css_set_lock);
	rcu_read_lock();
	rcu_read_lock();
	task = leader;
	task = leader;
	do {
	do {
@@ -2792,7 +2798,7 @@ static int cgroup_migrate(struct task_struct *leader, bool threadgroup,
			break;
			break;
	} while_each_thread(leader, task);
	} while_each_thread(leader, task);
	rcu_read_unlock();
	rcu_read_unlock();
	spin_unlock_bh(&css_set_lock);
	spin_unlock_irq(&css_set_lock);


	return cgroup_taskset_migrate(&tset, root);
	return cgroup_taskset_migrate(&tset, root);
}
}
@@ -2816,7 +2822,7 @@ static int cgroup_attach_task(struct cgroup *dst_cgrp,
		return -EBUSY;
		return -EBUSY;


	/* look up all src csets */
	/* look up all src csets */
	spin_lock_bh(&css_set_lock);
	spin_lock_irq(&css_set_lock);
	rcu_read_lock();
	rcu_read_lock();
	task = leader;
	task = leader;
	do {
	do {
@@ -2826,7 +2832,7 @@ static int cgroup_attach_task(struct cgroup *dst_cgrp,
			break;
			break;
	} while_each_thread(leader, task);
	} while_each_thread(leader, task);
	rcu_read_unlock();
	rcu_read_unlock();
	spin_unlock_bh(&css_set_lock);
	spin_unlock_irq(&css_set_lock);


	/* prepare dst csets and commit */
	/* prepare dst csets and commit */
	ret = cgroup_migrate_prepare_dst(&preloaded_csets);
	ret = cgroup_migrate_prepare_dst(&preloaded_csets);
@@ -2859,9 +2865,9 @@ static int cgroup_procs_write_permission(struct task_struct *task,
		struct cgroup *cgrp;
		struct cgroup *cgrp;
		struct inode *inode;
		struct inode *inode;


		spin_lock_bh(&css_set_lock);
		spin_lock_irq(&css_set_lock);
		cgrp = task_cgroup_from_root(task, &cgrp_dfl_root);
		cgrp = task_cgroup_from_root(task, &cgrp_dfl_root);
		spin_unlock_bh(&css_set_lock);
		spin_unlock_irq(&css_set_lock);


		while (!cgroup_is_descendant(dst_cgrp, cgrp))
		while (!cgroup_is_descendant(dst_cgrp, cgrp))
			cgrp = cgroup_parent(cgrp);
			cgrp = cgroup_parent(cgrp);
@@ -2962,9 +2968,9 @@ int cgroup_attach_task_all(struct task_struct *from, struct task_struct *tsk)
		if (root == &cgrp_dfl_root)
		if (root == &cgrp_dfl_root)
			continue;
			continue;


		spin_lock_bh(&css_set_lock);
		spin_lock_irq(&css_set_lock);
		from_cgrp = task_cgroup_from_root(from, root);
		from_cgrp = task_cgroup_from_root(from, root);
		spin_unlock_bh(&css_set_lock);
		spin_unlock_irq(&css_set_lock);


		retval = cgroup_attach_task(from_cgrp, tsk, false);
		retval = cgroup_attach_task(from_cgrp, tsk, false);
		if (retval)
		if (retval)
@@ -3080,7 +3086,7 @@ static int cgroup_update_dfl_csses(struct cgroup *cgrp)
	percpu_down_write(&cgroup_threadgroup_rwsem);
	percpu_down_write(&cgroup_threadgroup_rwsem);


	/* look up all csses currently attached to @cgrp's subtree */
	/* look up all csses currently attached to @cgrp's subtree */
	spin_lock_bh(&css_set_lock);
	spin_lock_irq(&css_set_lock);
	cgroup_for_each_live_descendant_pre(dsct, d_css, cgrp) {
	cgroup_for_each_live_descendant_pre(dsct, d_css, cgrp) {
		struct cgrp_cset_link *link;
		struct cgrp_cset_link *link;


@@ -3088,14 +3094,14 @@ static int cgroup_update_dfl_csses(struct cgroup *cgrp)
			cgroup_migrate_add_src(link->cset, dsct,
			cgroup_migrate_add_src(link->cset, dsct,
					       &preloaded_csets);
					       &preloaded_csets);
	}
	}
	spin_unlock_bh(&css_set_lock);
	spin_unlock_irq(&css_set_lock);


	/* NULL dst indicates self on default hierarchy */
	/* NULL dst indicates self on default hierarchy */
	ret = cgroup_migrate_prepare_dst(&preloaded_csets);
	ret = cgroup_migrate_prepare_dst(&preloaded_csets);
	if (ret)
	if (ret)
		goto out_finish;
		goto out_finish;


	spin_lock_bh(&css_set_lock);
	spin_lock_irq(&css_set_lock);
	list_for_each_entry(src_cset, &preloaded_csets, mg_preload_node) {
	list_for_each_entry(src_cset, &preloaded_csets, mg_preload_node) {
		struct task_struct *task, *ntask;
		struct task_struct *task, *ntask;


@@ -3107,7 +3113,7 @@ static int cgroup_update_dfl_csses(struct cgroup *cgrp)
		list_for_each_entry_safe(task, ntask, &src_cset->tasks, cg_list)
		list_for_each_entry_safe(task, ntask, &src_cset->tasks, cg_list)
			cgroup_taskset_add(task, &tset);
			cgroup_taskset_add(task, &tset);
	}
	}
	spin_unlock_bh(&css_set_lock);
	spin_unlock_irq(&css_set_lock);


	ret = cgroup_taskset_migrate(&tset, cgrp->root);
	ret = cgroup_taskset_migrate(&tset, cgrp->root);
out_finish:
out_finish:
@@ -3908,10 +3914,10 @@ static int cgroup_task_count(const struct cgroup *cgrp)
	int count = 0;
	int count = 0;
	struct cgrp_cset_link *link;
	struct cgrp_cset_link *link;


	spin_lock_bh(&css_set_lock);
	spin_lock_irq(&css_set_lock);
	list_for_each_entry(link, &cgrp->cset_links, cset_link)
	list_for_each_entry(link, &cgrp->cset_links, cset_link)
		count += atomic_read(&link->cset->refcount);
		count += atomic_read(&link->cset->refcount);
	spin_unlock_bh(&css_set_lock);
	spin_unlock_irq(&css_set_lock);
	return count;
	return count;
}
}


@@ -4249,7 +4255,7 @@ void css_task_iter_start(struct cgroup_subsys_state *css,


	memset(it, 0, sizeof(*it));
	memset(it, 0, sizeof(*it));


	spin_lock_bh(&css_set_lock);
	spin_lock_irq(&css_set_lock);


	it->ss = css->ss;
	it->ss = css->ss;


@@ -4262,7 +4268,7 @@ void css_task_iter_start(struct cgroup_subsys_state *css,


	css_task_iter_advance_css_set(it);
	css_task_iter_advance_css_set(it);


	spin_unlock_bh(&css_set_lock);
	spin_unlock_irq(&css_set_lock);
}
}


/**
/**
@@ -4280,7 +4286,7 @@ struct task_struct *css_task_iter_next(struct css_task_iter *it)
		it->cur_task = NULL;
		it->cur_task = NULL;
	}
	}


	spin_lock_bh(&css_set_lock);
	spin_lock_irq(&css_set_lock);


	if (it->task_pos) {
	if (it->task_pos) {
		it->cur_task = list_entry(it->task_pos, struct task_struct,
		it->cur_task = list_entry(it->task_pos, struct task_struct,
@@ -4289,7 +4295,7 @@ struct task_struct *css_task_iter_next(struct css_task_iter *it)
		css_task_iter_advance(it);
		css_task_iter_advance(it);
	}
	}


	spin_unlock_bh(&css_set_lock);
	spin_unlock_irq(&css_set_lock);


	return it->cur_task;
	return it->cur_task;
}
}
@@ -4303,10 +4309,10 @@ struct task_struct *css_task_iter_next(struct css_task_iter *it)
void css_task_iter_end(struct css_task_iter *it)
void css_task_iter_end(struct css_task_iter *it)
{
{
	if (it->cur_cset) {
	if (it->cur_cset) {
		spin_lock_bh(&css_set_lock);
		spin_lock_irq(&css_set_lock);
		list_del(&it->iters_node);
		list_del(&it->iters_node);
		put_css_set_locked(it->cur_cset);
		put_css_set_locked(it->cur_cset);
		spin_unlock_bh(&css_set_lock);
		spin_unlock_irq(&css_set_lock);
	}
	}


	if (it->cur_task)
	if (it->cur_task)
@@ -4338,10 +4344,10 @@ int cgroup_transfer_tasks(struct cgroup *to, struct cgroup *from)
	mutex_lock(&cgroup_mutex);
	mutex_lock(&cgroup_mutex);


	/* all tasks in @from are being moved, all csets are source */
	/* all tasks in @from are being moved, all csets are source */
	spin_lock_bh(&css_set_lock);
	spin_lock_irq(&css_set_lock);
	list_for_each_entry(link, &from->cset_links, cset_link)
	list_for_each_entry(link, &from->cset_links, cset_link)
		cgroup_migrate_add_src(link->cset, to, &preloaded_csets);
		cgroup_migrate_add_src(link->cset, to, &preloaded_csets);
	spin_unlock_bh(&css_set_lock);
	spin_unlock_irq(&css_set_lock);


	ret = cgroup_migrate_prepare_dst(&preloaded_csets);
	ret = cgroup_migrate_prepare_dst(&preloaded_csets);
	if (ret)
	if (ret)
@@ -5063,6 +5069,7 @@ static void init_and_link_css(struct cgroup_subsys_state *css,
	memset(css, 0, sizeof(*css));
	memset(css, 0, sizeof(*css));
	css->cgroup = cgrp;
	css->cgroup = cgrp;
	css->ss = ss;
	css->ss = ss;
	css->id = -1;
	INIT_LIST_HEAD(&css->sibling);
	INIT_LIST_HEAD(&css->sibling);
	INIT_LIST_HEAD(&css->children);
	INIT_LIST_HEAD(&css->children);
	css->serial_nr = css_serial_nr_next++;
	css->serial_nr = css_serial_nr_next++;
@@ -5150,7 +5157,7 @@ static struct cgroup_subsys_state *css_create(struct cgroup *cgrp,


	err = cgroup_idr_alloc(&ss->css_idr, NULL, 2, 0, GFP_KERNEL);
	err = cgroup_idr_alloc(&ss->css_idr, NULL, 2, 0, GFP_KERNEL);
	if (err < 0)
	if (err < 0)
		goto err_free_percpu_ref;
		goto err_free_css;
	css->id = err;
	css->id = err;


	/* @css is ready to be brought online now, make it visible */
	/* @css is ready to be brought online now, make it visible */
@@ -5174,9 +5181,6 @@ static struct cgroup_subsys_state *css_create(struct cgroup *cgrp,


err_list_del:
err_list_del:
	list_del_rcu(&css->sibling);
	list_del_rcu(&css->sibling);
	cgroup_idr_remove(&ss->css_idr, css->id);
err_free_percpu_ref:
	percpu_ref_exit(&css->refcnt);
err_free_css:
err_free_css:
	call_rcu(&css->rcu_head, css_free_rcu_fn);
	call_rcu(&css->rcu_head, css_free_rcu_fn);
	return ERR_PTR(err);
	return ERR_PTR(err);
@@ -5451,10 +5455,10 @@ static int cgroup_destroy_locked(struct cgroup *cgrp)
	 */
	 */
	cgrp->self.flags &= ~CSS_ONLINE;
	cgrp->self.flags &= ~CSS_ONLINE;


	spin_lock_bh(&css_set_lock);
	spin_lock_irq(&css_set_lock);
	list_for_each_entry(link, &cgrp->cset_links, cset_link)
	list_for_each_entry(link, &cgrp->cset_links, cset_link)
		link->cset->dead = true;
		link->cset->dead = true;
	spin_unlock_bh(&css_set_lock);
	spin_unlock_irq(&css_set_lock);


	/* initiate massacre of all css's */
	/* initiate massacre of all css's */
	for_each_css(css, ssid, cgrp)
	for_each_css(css, ssid, cgrp)
@@ -5725,7 +5729,7 @@ int proc_cgroup_show(struct seq_file *m, struct pid_namespace *ns,
		goto out;
		goto out;


	mutex_lock(&cgroup_mutex);
	mutex_lock(&cgroup_mutex);
	spin_lock_bh(&css_set_lock);
	spin_lock_irq(&css_set_lock);


	for_each_root(root) {
	for_each_root(root) {
		struct cgroup_subsys *ss;
		struct cgroup_subsys *ss;
@@ -5778,7 +5782,7 @@ int proc_cgroup_show(struct seq_file *m, struct pid_namespace *ns,


	retval = 0;
	retval = 0;
out_unlock:
out_unlock:
	spin_unlock_bh(&css_set_lock);
	spin_unlock_irq(&css_set_lock);
	mutex_unlock(&cgroup_mutex);
	mutex_unlock(&cgroup_mutex);
	kfree(buf);
	kfree(buf);
out:
out:
@@ -5923,13 +5927,13 @@ void cgroup_post_fork(struct task_struct *child)
	if (use_task_css_set_links) {
	if (use_task_css_set_links) {
		struct css_set *cset;
		struct css_set *cset;


		spin_lock_bh(&css_set_lock);
		spin_lock_irq(&css_set_lock);
		cset = task_css_set(current);
		cset = task_css_set(current);
		if (list_empty(&child->cg_list)) {
		if (list_empty(&child->cg_list)) {
			get_css_set(cset);
			get_css_set(cset);
			css_set_move_task(child, NULL, cset, false);
			css_set_move_task(child, NULL, cset, false);
		}
		}
		spin_unlock_bh(&css_set_lock);
		spin_unlock_irq(&css_set_lock);
	}
	}


	/*
	/*
@@ -5974,9 +5978,9 @@ void cgroup_exit(struct task_struct *tsk)
	cset = task_css_set(tsk);
	cset = task_css_set(tsk);


	if (!list_empty(&tsk->cg_list)) {
	if (!list_empty(&tsk->cg_list)) {
		spin_lock_bh(&css_set_lock);
		spin_lock_irq(&css_set_lock);
		css_set_move_task(tsk, cset, NULL, false);
		css_set_move_task(tsk, cset, NULL, false);
		spin_unlock_bh(&css_set_lock);
		spin_unlock_irq(&css_set_lock);
	} else {
	} else {
		get_css_set(cset);
		get_css_set(cset);
	}
	}
@@ -6044,9 +6048,9 @@ static void cgroup_release_agent(struct work_struct *work)
	if (!pathbuf || !agentbuf)
	if (!pathbuf || !agentbuf)
		goto out;
		goto out;


	spin_lock_bh(&css_set_lock);
	spin_lock_irq(&css_set_lock);
	path = cgroup_path_ns_locked(cgrp, pathbuf, PATH_MAX, &init_cgroup_ns);
	path = cgroup_path_ns_locked(cgrp, pathbuf, PATH_MAX, &init_cgroup_ns);
	spin_unlock_bh(&css_set_lock);
	spin_unlock_irq(&css_set_lock);
	if (!path)
	if (!path)
		goto out;
		goto out;


@@ -6306,12 +6310,12 @@ struct cgroup_namespace *copy_cgroup_ns(unsigned long flags,
		return ERR_PTR(-EPERM);
		return ERR_PTR(-EPERM);


	mutex_lock(&cgroup_mutex);
	mutex_lock(&cgroup_mutex);
	spin_lock_bh(&css_set_lock);
	spin_lock_irq(&css_set_lock);


	cset = task_css_set(current);
	cset = task_css_set(current);
	get_css_set(cset);
	get_css_set(cset);


	spin_unlock_bh(&css_set_lock);
	spin_unlock_irq(&css_set_lock);
	mutex_unlock(&cgroup_mutex);
	mutex_unlock(&cgroup_mutex);


	new_ns = alloc_cgroup_ns();
	new_ns = alloc_cgroup_ns();
@@ -6435,7 +6439,7 @@ static int current_css_set_cg_links_read(struct seq_file *seq, void *v)
	if (!name_buf)
	if (!name_buf)
		return -ENOMEM;
		return -ENOMEM;


	spin_lock_bh(&css_set_lock);
	spin_lock_irq(&css_set_lock);
	rcu_read_lock();
	rcu_read_lock();
	cset = rcu_dereference(current->cgroups);
	cset = rcu_dereference(current->cgroups);
	list_for_each_entry(link, &cset->cgrp_links, cgrp_link) {
	list_for_each_entry(link, &cset->cgrp_links, cgrp_link) {
@@ -6446,7 +6450,7 @@ static int current_css_set_cg_links_read(struct seq_file *seq, void *v)
			   c->root->hierarchy_id, name_buf);
			   c->root->hierarchy_id, name_buf);
	}
	}
	rcu_read_unlock();
	rcu_read_unlock();
	spin_unlock_bh(&css_set_lock);
	spin_unlock_irq(&css_set_lock);
	kfree(name_buf);
	kfree(name_buf);
	return 0;
	return 0;
}
}
@@ -6457,7 +6461,7 @@ static int cgroup_css_links_read(struct seq_file *seq, void *v)
	struct cgroup_subsys_state *css = seq_css(seq);
	struct cgroup_subsys_state *css = seq_css(seq);
	struct cgrp_cset_link *link;
	struct cgrp_cset_link *link;


	spin_lock_bh(&css_set_lock);
	spin_lock_irq(&css_set_lock);
	list_for_each_entry(link, &css->cgroup->cset_links, cset_link) {
	list_for_each_entry(link, &css->cgroup->cset_links, cset_link) {
		struct css_set *cset = link->cset;
		struct css_set *cset = link->cset;
		struct task_struct *task;
		struct task_struct *task;
@@ -6480,7 +6484,7 @@ static int cgroup_css_links_read(struct seq_file *seq, void *v)
	overflow:
	overflow:
		seq_puts(seq, "  ...\n");
		seq_puts(seq, "  ...\n");
	}
	}
	spin_unlock_bh(&css_set_lock);
	spin_unlock_irq(&css_set_lock);
	return 0;
	return 0;
}
}