Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 42aee6c4 authored by Hugh Dickins's avatar Hugh Dickins Committed by Linus Torvalds
Browse files

cgroup: revert ss_id_lock to spinlock



Commit c1e2ee2d ("memcg: replace ss->id_lock with a rwlock") has now
been seen to cause the unfair behavior we should have expected from
converting a spinlock to an rwlock: softlockup in cgroup_mkdir(), whose
get_new_cssid() is waiting for the wlock, while there are 19 tasks using
the rlock in css_get_next() to get on with their memcg workload (in an
artificial test, admittedly).  Yet lib/idr.c was made suitable for RCU
way back: revert that commit, restoring ss->id_lock to a spinlock.

Signed-off-by: default avatarHugh Dickins <hughd@google.com>
Acked-by: default avatarKAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Acked-by: default avatarLi Zefan <lizf@cn.fujitsu.com>
Cc: Eric Dumazet <eric.dumazet@gmail.com>
Acked-by: default avatarTejun Heo <tj@kernel.org>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 9f7de827
Loading
Loading
Loading
Loading
+1 −1
Original line number Diff line number Diff line
@@ -498,7 +498,7 @@ struct cgroup_subsys {
	struct list_head sibling;
	/* used when use_id == true */
	struct idr idr;
	rwlock_t id_lock;
	spinlock_t id_lock;

	/* should be defined only by modular subsystems */
	struct module *module;
+9 −9
Original line number Diff line number Diff line
@@ -4885,9 +4885,9 @@ void free_css_id(struct cgroup_subsys *ss, struct cgroup_subsys_state *css)

	rcu_assign_pointer(id->css, NULL);
	rcu_assign_pointer(css->id, NULL);
	write_lock(&ss->id_lock);
	spin_lock(&ss->id_lock);
	idr_remove(&ss->idr, id->id);
	write_unlock(&ss->id_lock);
	spin_unlock(&ss->id_lock);
	kfree_rcu(id, rcu_head);
}
EXPORT_SYMBOL_GPL(free_css_id);
@@ -4913,10 +4913,10 @@ static struct css_id *get_new_cssid(struct cgroup_subsys *ss, int depth)
		error = -ENOMEM;
		goto err_out;
	}
	write_lock(&ss->id_lock);
	spin_lock(&ss->id_lock);
	/* Don't use 0. allocates an ID of 1-65535 */
	error = idr_get_new_above(&ss->idr, newid, 1, &myid);
	write_unlock(&ss->id_lock);
	spin_unlock(&ss->id_lock);

	/* Returns error when there are no free spaces for new ID.*/
	if (error) {
@@ -4931,9 +4931,9 @@ static struct css_id *get_new_cssid(struct cgroup_subsys *ss, int depth)
	return newid;
remove_idr:
	error = -ENOSPC;
	write_lock(&ss->id_lock);
	spin_lock(&ss->id_lock);
	idr_remove(&ss->idr, myid);
	write_unlock(&ss->id_lock);
	spin_unlock(&ss->id_lock);
err_out:
	kfree(newid);
	return ERR_PTR(error);
@@ -4945,7 +4945,7 @@ static int __init_or_module cgroup_init_idr(struct cgroup_subsys *ss,
{
	struct css_id *newid;

	rwlock_init(&ss->id_lock);
	spin_lock_init(&ss->id_lock);
	idr_init(&ss->idr);

	newid = get_new_cssid(ss, 0);
@@ -5040,9 +5040,9 @@ css_get_next(struct cgroup_subsys *ss, int id,
		 * scan next entry from bitmap(tree), tmpid is updated after
		 * idr_get_next().
		 */
		read_lock(&ss->id_lock);
		spin_lock(&ss->id_lock);
		tmp = idr_get_next(&ss->idr, &tmpid);
		read_unlock(&ss->id_lock);
		spin_unlock(&ss->id_lock);

		if (!tmp)
			break;