Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 7fb98ea7 authored by Tejun Heo's avatar Tejun Heo
Browse files

workqueue: replace get_pwq() with explicit per_cpu_ptr() accesses and first_pwq()



get_pwq() takes @cpu, which can also be WORK_CPU_UNBOUND, and @wq and
returns the matching pwq (pool_workqueue).  We want to move away from
using @cpu for identifying pools and pwqs for unbound pools with
custom attributes and there is only one user - workqueue_congested() -
which makes use of the WQ_UNBOUND conditional in get_pwq().  All other
users already know whether they're dealing with a per-cpu or unbound
workqueue.

Replace get_pwq() with explicit per_cpu_ptr(wq->cpu_pwqs, cpu) for
per-cpu workqueues and first_pwq() for unbound ones, and open-code
WQ_UNBOUND conditional in workqueue_congested().

Note that this makes workqueue_congested() behave sligntly differently
when @cpu other than WORK_CPU_UNBOUND is specified.  It ignores @cpu
for unbound workqueues and always uses the first pwq instead of
oopsing.

Signed-off-by: default avatarTejun Heo <tj@kernel.org>
Reviewed-by: default avatarLai Jiangshan <laijs@cn.fujitsu.com>
parent 420c0ddb
Loading
Loading
Loading
Loading
+14 −15
Original line number Diff line number Diff line
@@ -463,16 +463,9 @@ static struct worker_pool *get_std_worker_pool(int cpu, bool highpri)
	return &pools[highpri];
}

static struct pool_workqueue *get_pwq(int cpu, struct workqueue_struct *wq)
static struct pool_workqueue *first_pwq(struct workqueue_struct *wq)
{
	if (!(wq->flags & WQ_UNBOUND)) {
		if (likely(cpu < nr_cpu_ids))
			return per_cpu_ptr(wq->cpu_pwqs, cpu);
	} else if (likely(cpu == WORK_CPU_UNBOUND)) {
		return list_first_entry(&wq->pwqs, struct pool_workqueue,
					pwqs_node);
	}
	return NULL;
	return list_first_entry(&wq->pwqs, struct pool_workqueue, pwqs_node);
}

static unsigned int work_color_to_flags(int color)
@@ -1191,7 +1184,7 @@ static void __queue_work(int cpu, struct workqueue_struct *wq,
		 * work needs to be queued on that cpu to guarantee
		 * non-reentrancy.
		 */
		pwq = get_pwq(cpu, wq);
		pwq = per_cpu_ptr(wq->cpu_pwqs, cpu);
		last_pool = get_work_pool(work);

		if (last_pool && last_pool != pwq->pool) {
@@ -1202,7 +1195,7 @@ static void __queue_work(int cpu, struct workqueue_struct *wq,
			worker = find_worker_executing_work(last_pool, work);

			if (worker && worker->current_pwq->wq == wq) {
				pwq = get_pwq(last_pool->cpu, wq);
				pwq = per_cpu_ptr(wq->cpu_pwqs, last_pool->cpu);
			} else {
				/* meh... not running there, queue here */
				spin_unlock(&last_pool->lock);
@@ -1212,7 +1205,7 @@ static void __queue_work(int cpu, struct workqueue_struct *wq,
			spin_lock(&pwq->pool->lock);
		}
	} else {
		pwq = get_pwq(WORK_CPU_UNBOUND, wq);
		pwq = first_pwq(wq);
		spin_lock(&pwq->pool->lock);
	}

@@ -1650,7 +1643,7 @@ static void rebind_workers(struct worker_pool *pool)
		else
			wq = system_wq;

		insert_work(get_pwq(pool->cpu, wq), rebind_work,
		insert_work(per_cpu_ptr(wq->cpu_pwqs, pool->cpu), rebind_work,
			    worker->scheduled.next,
			    work_color_to_flags(WORK_NO_COLOR));
	}
@@ -3088,7 +3081,8 @@ static int alloc_and_link_pwqs(struct workqueue_struct *wq)
			return -ENOMEM;

		for_each_possible_cpu(cpu) {
			struct pool_workqueue *pwq = get_pwq(cpu, wq);
			struct pool_workqueue *pwq =
				per_cpu_ptr(wq->cpu_pwqs, cpu);

			pwq->pool = get_std_worker_pool(cpu, highpri);
			list_add_tail(&pwq->pwqs_node, &wq->pwqs);
@@ -3343,7 +3337,12 @@ EXPORT_SYMBOL_GPL(workqueue_set_max_active);
 */
bool workqueue_congested(int cpu, struct workqueue_struct *wq)
{
	struct pool_workqueue *pwq = get_pwq(cpu, wq);
	struct pool_workqueue *pwq;

	if (!(wq->flags & WQ_UNBOUND))
		pwq = per_cpu_ptr(wq->cpu_pwqs, cpu);
	else
		pwq = first_pwq(wq);

	return !list_empty(&pwq->delayed_works);
}