Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit e4463009 authored by Tejun Heo's avatar Tejun Heo Committed by Greg Kroah-Hartman
Browse files

cgroup: Fix threadgroup_rwsem <-> cpus_read_lock() deadlock



commit 4f7e7236435ca0abe005c674ebd6892c6e83aeb3 upstream.

Add #include <linux/cpu.h> to avoid compile error on some architectures.

commit 9a3284fad42f6 ("cgroup: Optimize single thread migration") and
commit 671c11f0619e5 ("cgroup: Elide write-locking threadgroup_rwsem
when updating csses on an empty subtree") are not backport. So ignore the
input parameter of cgroup_attach_lock/cgroup_attach_unlock.

original commit message:

Bringing up a CPU may involve creating and destroying tasks which requires
read-locking threadgroup_rwsem, so threadgroup_rwsem nests inside
cpus_read_lock(). However, cpuset's ->attach(), which may be called with
thredagroup_rwsem write-locked, also wants to disable CPU hotplug and
acquires cpus_read_lock(), leading to a deadlock.

Fix it by guaranteeing that ->attach() is always called with CPU hotplug
disabled and removing cpus_read_lock() call from cpuset_attach().

Signed-off-by: default avatarTejun Heo <tj@kernel.org>
Reviewed-and-tested-by: default avatarImran Khan <imran.f.khan@oracle.com>
Reported-and-tested-by: default avatarXuewen Yan <xuewen.yan@unisoc.com>
Fixes: 05c7b7a92cc8 ("cgroup/cpuset: Fix a race between cpuset_attach() and cpu hotplug")
Cc: stable@vger.kernel.org # v5.17+
Signed-off-by: default avatarCai Xinchen <caixinchen1@huawei.com>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent 22426258
Loading
Loading
Loading
Loading
+45 −5
Original line number Diff line number Diff line
@@ -55,6 +55,7 @@
#include <linux/nsproxy.h>
#include <linux/file.h>
#include <linux/sched/cputime.h>
#include <linux/cpu.h>
#include <net/sock.h>

#define CREATE_TRACE_POINTS
@@ -2209,6 +2210,45 @@ int task_cgroup_path(struct task_struct *task, char *buf, size_t buflen)
}
EXPORT_SYMBOL_GPL(task_cgroup_path);

/**
 * cgroup_attach_lock - Lock for ->attach()
 * @lock_threadgroup: whether to down_write cgroup_threadgroup_rwsem
 *
 * cgroup migration sometimes needs to stabilize threadgroups against forks and
 * exits by write-locking cgroup_threadgroup_rwsem. However, some ->attach()
 * implementations (e.g. cpuset), also need to disable CPU hotplug.
 * Unfortunately, letting ->attach() operations acquire cpus_read_lock() can
 * lead to deadlocks.
 *
 * Bringing up a CPU may involve creating and destroying tasks which requires
 * read-locking threadgroup_rwsem, so threadgroup_rwsem nests inside
 * cpus_read_lock(). If we call an ->attach() which acquires the cpus lock while
 * write-locking threadgroup_rwsem, the locking order is reversed and we end up
 * waiting for an on-going CPU hotplug operation which in turn is waiting for
 * the threadgroup_rwsem to be released to create new tasks. For more details:
 *
 *   http://lkml.kernel.org/r/20220711174629.uehfmqegcwn2lqzu@wubuntu
 *
 * Resolve the situation by always acquiring cpus_read_lock() before optionally
 * write-locking cgroup_threadgroup_rwsem. This allows ->attach() to assume that
 * CPU hotplug is disabled on entry.
 */
static void cgroup_attach_lock(void)
{
	get_online_cpus();
	percpu_down_write(&cgroup_threadgroup_rwsem);
}

/**
 * cgroup_attach_unlock - Undo cgroup_attach_lock()
 * @lock_threadgroup: whether to up_write cgroup_threadgroup_rwsem
 */
static void cgroup_attach_unlock(void)
{
	percpu_up_write(&cgroup_threadgroup_rwsem);
	put_online_cpus();
}

/**
 * cgroup_migrate_add_task - add a migration target task to a migration context
 * @task: target task
@@ -2694,7 +2734,7 @@ struct task_struct *cgroup_procs_write_start(char *buf, bool threadgroup)
	if (kstrtoint(strstrip(buf), 0, &pid) || pid < 0)
		return ERR_PTR(-EINVAL);

	percpu_down_write(&cgroup_threadgroup_rwsem);
	cgroup_attach_lock();

	rcu_read_lock();
	if (pid) {
@@ -2725,7 +2765,7 @@ struct task_struct *cgroup_procs_write_start(char *buf, bool threadgroup)
	goto out_unlock_rcu;

out_unlock_threadgroup:
	percpu_up_write(&cgroup_threadgroup_rwsem);
	cgroup_attach_unlock();
out_unlock_rcu:
	rcu_read_unlock();
	return tsk;
@@ -2740,7 +2780,7 @@ void cgroup_procs_write_finish(struct task_struct *task)
	/* release reference from cgroup_procs_write_start() */
	put_task_struct(task);

	percpu_up_write(&cgroup_threadgroup_rwsem);
	cgroup_attach_unlock();
	for_each_subsys(ss, ssid)
		if (ss->post_attach)
			ss->post_attach();
@@ -2799,7 +2839,7 @@ static int cgroup_update_dfl_csses(struct cgroup *cgrp)

	lockdep_assert_held(&cgroup_mutex);

	percpu_down_write(&cgroup_threadgroup_rwsem);
	cgroup_attach_lock();

	/* look up all csses currently attached to @cgrp's subtree */
	spin_lock_irq(&css_set_lock);
@@ -2830,7 +2870,7 @@ static int cgroup_update_dfl_csses(struct cgroup *cgrp)
	ret = cgroup_migrate_execute(&mgctx);
out_finish:
	cgroup_migrate_finish(&mgctx);
	percpu_up_write(&cgroup_threadgroup_rwsem);
	cgroup_attach_unlock();
	return ret;
}

+1 −6
Original line number Diff line number Diff line
@@ -1528,11 +1528,7 @@ static void cpuset_attach(struct cgroup_taskset *tset)
	cgroup_taskset_first(tset, &css);
	cs = css_cs(css);

	/*
	 * It should hold cpus lock because a cpu offline event can
	 * cause set_cpus_allowed_ptr() failed.
	 */
	get_online_cpus();
	lockdep_assert_cpus_held();     /* see cgroup_attach_lock() */
	mutex_lock(&cpuset_mutex);

	/* prepare for attach */
@@ -1588,7 +1584,6 @@ static void cpuset_attach(struct cgroup_taskset *tset)
		wake_up(&cpuset_attach_wq);

	mutex_unlock(&cpuset_mutex);
	put_online_cpus();
}

/* The various types of files and directories in a cpuset file system */