Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit bd7c089e authored by Tejun Heo's avatar Tejun Heo
Browse files

workqueue: relocate rebind_workers()



rebind_workers() will be reimplemented in a way which makes it mostly
decoupled from the rest of worker management.  Move rebind_workers()
so that it's located with other CPU hotplug related functions.

This patch is pure function relocation.

Signed-off-by: default avatarTejun Heo <tj@kernel.org>
Reviewed-by: default avatarLai Jiangshan <laijs@cn.fujitsu.com>
parent 822d8405
Loading
Loading
Loading
Loading
+71 −71
Original line number Original line Diff line number Diff line
@@ -1643,77 +1643,6 @@ static void busy_worker_rebind_fn(struct work_struct *work)
	spin_unlock_irq(&worker->pool->lock);
	spin_unlock_irq(&worker->pool->lock);
}
}


/**
 * rebind_workers - rebind all workers of a pool to the associated CPU
 * @pool: pool of interest
 *
 * @pool->cpu is coming online.  Rebind all workers to the CPU.  Rebinding
 * is different for idle and busy ones.
 *
 * Idle ones will be removed from the idle_list and woken up.  They will
 * add themselves back after completing rebind.  This ensures that the
 * idle_list doesn't contain any unbound workers when re-bound busy workers
 * try to perform local wake-ups for concurrency management.
 *
 * Busy workers can rebind after they finish their current work items.
 * Queueing the rebind work item at the head of the scheduled list is
 * enough.  Note that nr_running will be properly bumped as busy workers
 * rebind.
 *
 * On return, all non-manager workers are scheduled for rebind - see
 * manage_workers() for the manager special case.  Any idle worker
 * including the manager will not appear on @idle_list until rebind is
 * complete, making local wake-ups safe.
 */
static void rebind_workers(struct worker_pool *pool)
{
	struct worker *worker, *n;
	int i;

	lockdep_assert_held(&pool->manager_mutex);
	lockdep_assert_held(&pool->lock);

	/* dequeue and kick idle ones */
	list_for_each_entry_safe(worker, n, &pool->idle_list, entry) {
		/*
		 * idle workers should be off @pool->idle_list until rebind
		 * is complete to avoid receiving premature local wake-ups.
		 */
		list_del_init(&worker->entry);

		/*
		 * worker_thread() will see the above dequeuing and call
		 * idle_worker_rebind().
		 */
		wake_up_process(worker->task);
	}

	/* rebind busy workers */
	for_each_busy_worker(worker, i, pool) {
		struct work_struct *rebind_work = &worker->rebind_work;
		struct workqueue_struct *wq;

		if (test_and_set_bit(WORK_STRUCT_PENDING_BIT,
				     work_data_bits(rebind_work)))
			continue;

		debug_work_activate(rebind_work);

		/*
		 * wq doesn't really matter but let's keep @worker->pool
		 * and @pwq->pool consistent for sanity.
		 */
		if (worker->pool->attrs->nice < 0)
			wq = system_highpri_wq;
		else
			wq = system_wq;

		insert_work(per_cpu_ptr(wq->cpu_pwqs, pool->cpu), rebind_work,
			    worker->scheduled.next,
			    work_color_to_flags(WORK_NO_COLOR));
	}
}

static struct worker *alloc_worker(void)
static struct worker *alloc_worker(void)
{
{
	struct worker *worker;
	struct worker *worker;
@@ -4196,6 +4125,77 @@ static void wq_unbind_fn(struct work_struct *work)
		atomic_set(&pool->nr_running, 0);
		atomic_set(&pool->nr_running, 0);
}
}


/**
 * rebind_workers - rebind all workers of a pool to the associated CPU
 * @pool: pool of interest
 *
 * @pool->cpu is coming online.  Rebind all workers to the CPU.  Rebinding
 * is different for idle and busy ones.
 *
 * Idle ones will be removed from the idle_list and woken up.  They will
 * add themselves back after completing rebind.  This ensures that the
 * idle_list doesn't contain any unbound workers when re-bound busy workers
 * try to perform local wake-ups for concurrency management.
 *
 * Busy workers can rebind after they finish their current work items.
 * Queueing the rebind work item at the head of the scheduled list is
 * enough.  Note that nr_running will be properly bumped as busy workers
 * rebind.
 *
 * On return, all non-manager workers are scheduled for rebind - see
 * manage_workers() for the manager special case.  Any idle worker
 * including the manager will not appear on @idle_list until rebind is
 * complete, making local wake-ups safe.
 */
static void rebind_workers(struct worker_pool *pool)
{
	struct worker *worker, *n;
	int i;

	lockdep_assert_held(&pool->manager_mutex);
	lockdep_assert_held(&pool->lock);

	/* dequeue and kick idle ones */
	list_for_each_entry_safe(worker, n, &pool->idle_list, entry) {
		/*
		 * idle workers should be off @pool->idle_list until rebind
		 * is complete to avoid receiving premature local wake-ups.
		 */
		list_del_init(&worker->entry);

		/*
		 * worker_thread() will see the above dequeuing and call
		 * idle_worker_rebind().
		 */
		wake_up_process(worker->task);
	}

	/* rebind busy workers */
	for_each_busy_worker(worker, i, pool) {
		struct work_struct *rebind_work = &worker->rebind_work;
		struct workqueue_struct *wq;

		if (test_and_set_bit(WORK_STRUCT_PENDING_BIT,
				     work_data_bits(rebind_work)))
			continue;

		debug_work_activate(rebind_work);

		/*
		 * wq doesn't really matter but let's keep @worker->pool
		 * and @pwq->pool consistent for sanity.
		 */
		if (worker->pool->attrs->nice < 0)
			wq = system_highpri_wq;
		else
			wq = system_wq;

		insert_work(per_cpu_ptr(wq->cpu_pwqs, pool->cpu), rebind_work,
			    worker->scheduled.next,
			    work_color_to_flags(WORK_NO_COLOR));
	}
}

/*
/*
 * Workqueues should be brought up before normal priority CPU notifiers.
 * Workqueues should be brought up before normal priority CPU notifiers.
 * This will be registered high priority CPU notifier.
 * This will be registered high priority CPU notifier.