Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit b5927605 authored by Lai Jiangshan's avatar Lai Jiangshan Committed by Tejun Heo
Browse files

workqueue: remove pwq_lock which is no longer used



To simplify locking, the previous patches expanded wq->mutex to
protect all fields of each workqueue instance including the pwqs list
leaving pwq_lock without any user.  Remove the unused pwq_lock.

tj: Rebased on top of the current dev branch.  Updated description.

Signed-off-by: default avatarLai Jiangshan <laijs@cn.fujitsu.com>
Signed-off-by: default avatarTejun Heo <tj@kernel.org>
parent a357fc03
Loading
Loading
Loading
Loading
+2 −11
Original line number Diff line number Diff line
@@ -125,12 +125,9 @@ enum {
 *
 * PR: wq_pool_mutex protected for writes.  Sched-RCU protected for reads.
 *
 * PW: pwq_lock protected.
 *
 * WQ: wq->mutex protected.
 *
 * WR: wq->mutex and pwq_lock protected for writes.  Sched-RCU protected
 *     for reads.
 * WR: wq->mutex protected for writes.  Sched-RCU protected for reads.
 *
 * MD: wq_mayday_lock protected.
 */
@@ -257,7 +254,6 @@ struct workqueue_struct {
static struct kmem_cache *pwq_cache;

static DEFINE_MUTEX(wq_pool_mutex);	/* protects pools and workqueues list */
static DEFINE_SPINLOCK(pwq_lock);	/* protects pool_workqueues */
static DEFINE_SPINLOCK(wq_mayday_lock);	/* protects wq->maydays list */

static LIST_HEAD(workqueues);		/* PL: list of all workqueues */
@@ -300,8 +296,7 @@ static void copy_workqueue_attrs(struct workqueue_attrs *to,

#define assert_rcu_or_wq_mutex(wq)					\
	rcu_lockdep_assert(rcu_read_lock_sched_held() ||		\
			   lockdep_is_held(&wq->mutex) ||		\
			   lockdep_is_held(&pwq_lock),			\
			   lockdep_is_held(&wq->mutex),			\
			   "sched RCU or wq->mutex should be held")

#ifdef CONFIG_LOCKDEP
@@ -3549,9 +3544,7 @@ static void pwq_unbound_release_workfn(struct work_struct *work)
	 * and consistent with the linking path.
	 */
	mutex_lock(&wq->mutex);
	spin_lock_irq(&pwq_lock);
	list_del_rcu(&pwq->pwqs_node);
	spin_unlock_irq(&pwq_lock);
	mutex_unlock(&wq->mutex);

	put_unbound_pool(pool);
@@ -3635,9 +3628,7 @@ static void init_and_link_pwq(struct pool_workqueue *pwq,
	pwq_adjust_max_active(pwq);

	/* link in @pwq */
	spin_lock_irq(&pwq_lock);
	list_add_rcu(&pwq->pwqs_node, &wq->pwqs);
	spin_unlock_irq(&pwq_lock);

	mutex_unlock(&wq->mutex);
}