Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit a0111cf6 authored by Lai Jiangshan's avatar Lai Jiangshan Committed by Tejun Heo
Browse files

workqueue: separate out and refactor the locking of applying attrs



Applying attrs requires two locks: get_online_cpus() and wq_pool_mutex,
and this code is duplicated at two places (apply_workqueue_attrs() and
workqueue_set_unbound_cpumask()).  So we separate out this locking
code into apply_wqattrs_[un]lock() and do a minor refactor on
apply_workqueue_attrs().

The apply_wqattrs_[un]lock() will be also used on later patch for
ensuring attrs changes are properly synchronized.

tj: minor updates to comments

Signed-off-by: default avatarLai Jiangshan <laijs@cn.fujitsu.com>
Signed-off-by: default avatarTejun Heo <tj@kernel.org>
parent f7142ed4
Loading
Loading
Loading
Loading
+45 −33
Original line number Diff line number Diff line
@@ -3621,23 +3621,20 @@ static void apply_wqattrs_commit(struct apply_wqattrs_ctx *ctx)
	mutex_unlock(&ctx->wq->mutex);
}

/**
 * apply_workqueue_attrs - apply new workqueue_attrs to an unbound workqueue
 * @wq: the target workqueue
 * @attrs: the workqueue_attrs to apply, allocated with alloc_workqueue_attrs()
 *
 * Apply @attrs to an unbound workqueue @wq.  Unless disabled, on NUMA
 * machines, this function maps a separate pwq to each NUMA node with
 * possibles CPUs in @attrs->cpumask so that work items are affine to the
 * NUMA node it was issued on.  Older pwqs are released as in-flight work
 * items finish.  Note that a work item which repeatedly requeues itself
 * back-to-back will stay on its current pwq.
 *
 * Performs GFP_KERNEL allocations.
 *
 * Return: 0 on success and -errno on failure.
 */
int apply_workqueue_attrs(struct workqueue_struct *wq,
static void apply_wqattrs_lock(void)
{
	/* CPUs should stay stable across pwq creations and installations */
	get_online_cpus();
	mutex_lock(&wq_pool_mutex);
}

static void apply_wqattrs_unlock(void)
{
	mutex_unlock(&wq_pool_mutex);
	put_online_cpus();
}

static int apply_workqueue_attrs_locked(struct workqueue_struct *wq,
					const struct workqueue_attrs *attrs)
{
	struct apply_wqattrs_ctx *ctx;
@@ -3651,14 +3648,6 @@ int apply_workqueue_attrs(struct workqueue_struct *wq,
	if (WARN_ON((wq->flags & __WQ_ORDERED) && !list_empty(&wq->pwqs)))
		return -EINVAL;

	/*
	 * CPUs should stay stable across pwq creations and installations.
	 * Pin CPUs, determine the target cpumask for each node and create
	 * pwqs accordingly.
	 */
	get_online_cpus();
	mutex_lock(&wq_pool_mutex);

	ctx = apply_wqattrs_prepare(wq, attrs);

	/* the ctx has been prepared successfully, let's commit it */
@@ -3667,14 +3656,39 @@ int apply_workqueue_attrs(struct workqueue_struct *wq,
		ret = 0;
	}

	mutex_unlock(&wq_pool_mutex);
	put_online_cpus();

	apply_wqattrs_cleanup(ctx);

	return ret;
}

/**
 * apply_workqueue_attrs - apply new workqueue_attrs to an unbound workqueue
 * @wq: the target workqueue
 * @attrs: the workqueue_attrs to apply, allocated with alloc_workqueue_attrs()
 *
 * Apply @attrs to an unbound workqueue @wq.  Unless disabled, on NUMA
 * machines, this function maps a separate pwq to each NUMA node with
 * possibles CPUs in @attrs->cpumask so that work items are affine to the
 * NUMA node it was issued on.  Older pwqs are released as in-flight work
 * items finish.  Note that a work item which repeatedly requeues itself
 * back-to-back will stay on its current pwq.
 *
 * Performs GFP_KERNEL allocations.
 *
 * Return: 0 on success and -errno on failure.
 */
int apply_workqueue_attrs(struct workqueue_struct *wq,
			  const struct workqueue_attrs *attrs)
{
	int ret;

	apply_wqattrs_lock();
	ret = apply_workqueue_attrs_locked(wq, attrs);
	apply_wqattrs_unlock();

	return ret;
}

/**
 * wq_update_unbound_numa - update NUMA affinity of a wq for CPU hot[un]plug
 * @wq: the target workqueue
@@ -4799,10 +4813,9 @@ int workqueue_set_unbound_cpumask(cpumask_var_t cpumask)
	if (!zalloc_cpumask_var(&saved_cpumask, GFP_KERNEL))
		return -ENOMEM;

	get_online_cpus();
	cpumask_and(cpumask, cpumask, cpu_possible_mask);
	if (!cpumask_empty(cpumask)) {
		mutex_lock(&wq_pool_mutex);
		apply_wqattrs_lock();

		/* save the old wq_unbound_cpumask. */
		cpumask_copy(saved_cpumask, wq_unbound_cpumask);
@@ -4815,9 +4828,8 @@ int workqueue_set_unbound_cpumask(cpumask_var_t cpumask)
		if (ret < 0)
			cpumask_copy(wq_unbound_cpumask, saved_cpumask);

		mutex_unlock(&wq_pool_mutex);
		apply_wqattrs_unlock();
	}
	put_online_cpus();

	free_cpumask_var(saved_cpumask);
	return ret;