Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 706026c2 authored by Tejun Heo's avatar Tejun Heo
Browse files

workqueue: post global_cwq removal cleanups



Remove remaining references to gcwq.

* __next_gcwq_cpu() steals __next_wq_cpu() name.  The original
  __next_wq_cpu() became __next_cwq_cpu().

* s/for_each_gcwq_cpu/for_each_wq_cpu/
  s/for_each_online_gcwq_cpu/for_each_online_wq_cpu/

* s/gcwq_mayday_timeout/pool_mayday_timeout/

* s/gcwq_unbind_fn/wq_unbind_fn/

* Drop references to gcwq in comments.

This patch doesn't introduce any functional changes.

Signed-off-by: default avatarTejun Heo <tj@kernel.org>
Reviewed-by: default avatarLai Jiangshan <laijs@cn.fujitsu.com>
parent e6e380ed
Loading
Loading
Loading
Loading
+52 −52
Original line number Diff line number Diff line
@@ -246,7 +246,7 @@ EXPORT_SYMBOL_GPL(system_freezable_wq);
#define for_each_busy_worker(worker, i, pos, pool)			\
	hash_for_each(pool->busy_hash, i, pos, worker, hentry)

static inline int __next_gcwq_cpu(int cpu, const struct cpumask *mask,
static inline int __next_wq_cpu(int cpu, const struct cpumask *mask,
				unsigned int sw)
{
	if (cpu < nr_cpu_ids) {
@@ -261,39 +261,39 @@ static inline int __next_gcwq_cpu(int cpu, const struct cpumask *mask,
	return WORK_CPU_NONE;
}

static inline int __next_wq_cpu(int cpu, const struct cpumask *mask,
static inline int __next_cwq_cpu(int cpu, const struct cpumask *mask,
				 struct workqueue_struct *wq)
{
	return __next_gcwq_cpu(cpu, mask, !(wq->flags & WQ_UNBOUND) ? 1 : 2);
	return __next_wq_cpu(cpu, mask, !(wq->flags & WQ_UNBOUND) ? 1 : 2);
}

/*
 * CPU iterators
 *
 * An extra gcwq is defined for an invalid cpu number
 * An extra cpu number is defined using an invalid cpu number
 * (WORK_CPU_UNBOUND) to host workqueues which are not bound to any
 * specific CPU.  The following iterators are similar to
 * for_each_*_cpu() iterators but also considers the unbound gcwq.
 * specific CPU.  The following iterators are similar to for_each_*_cpu()
 * iterators but also considers the unbound CPU.
 *
 * for_each_gcwq_cpu()		: possible CPUs + WORK_CPU_UNBOUND
 * for_each_online_gcwq_cpu()	: online CPUs + WORK_CPU_UNBOUND
 * for_each_wq_cpu()		: possible CPUs + WORK_CPU_UNBOUND
 * for_each_online_wq_cpu()	: online CPUs + WORK_CPU_UNBOUND
 * for_each_cwq_cpu()		: possible CPUs for bound workqueues,
 *				  WORK_CPU_UNBOUND for unbound workqueues
 */
#define for_each_gcwq_cpu(cpu)						\
	for ((cpu) = __next_gcwq_cpu(-1, cpu_possible_mask, 3);		\
#define for_each_wq_cpu(cpu)						\
	for ((cpu) = __next_wq_cpu(-1, cpu_possible_mask, 3);		\
	     (cpu) < WORK_CPU_NONE;					\
	     (cpu) = __next_gcwq_cpu((cpu), cpu_possible_mask, 3))
	     (cpu) = __next_wq_cpu((cpu), cpu_possible_mask, 3))

#define for_each_online_gcwq_cpu(cpu)					\
	for ((cpu) = __next_gcwq_cpu(-1, cpu_online_mask, 3);		\
#define for_each_online_wq_cpu(cpu)					\
	for ((cpu) = __next_wq_cpu(-1, cpu_online_mask, 3);		\
	     (cpu) < WORK_CPU_NONE;					\
	     (cpu) = __next_gcwq_cpu((cpu), cpu_online_mask, 3))
	     (cpu) = __next_wq_cpu((cpu), cpu_online_mask, 3))

#define for_each_cwq_cpu(cpu, wq)					\
	for ((cpu) = __next_wq_cpu(-1, cpu_possible_mask, (wq));	\
	for ((cpu) = __next_cwq_cpu(-1, cpu_possible_mask, (wq));	\
	     (cpu) < WORK_CPU_NONE;					\
	     (cpu) = __next_wq_cpu((cpu), cpu_possible_mask, (wq)))
	     (cpu) = __next_cwq_cpu((cpu), cpu_possible_mask, (wq)))

#ifdef CONFIG_DEBUG_OBJECTS_WORK

@@ -655,7 +655,7 @@ static bool __need_more_worker(struct worker_pool *pool)
 * running workers.
 *
 * Note that, because unbound workers never contribute to nr_running, this
 * function will always return %true for unbound gcwq as long as the
 * function will always return %true for unbound pools as long as the
 * worklist isn't empty.
 */
static bool need_more_worker(struct worker_pool *pool)
@@ -1129,14 +1129,14 @@ static int try_to_grab_pending(struct work_struct *work, bool is_dwork,
}

/**
 * insert_work - insert a work into gcwq
 * insert_work - insert a work into a pool
 * @cwq: cwq @work belongs to
 * @work: work to insert
 * @head: insertion point
 * @extra_flags: extra WORK_STRUCT_* flags to set
 *
 * Insert @work which belongs to @cwq into @gcwq after @head.
 * @extra_flags is or'd to work_struct flags.
 * Insert @work which belongs to @cwq after @head.  @extra_flags is or'd to
 * work_struct flags.
 *
 * CONTEXT:
 * spin_lock_irq(pool->lock).
@@ -1179,7 +1179,7 @@ static bool is_chained_work(struct workqueue_struct *wq)
	unsigned long flags;
	unsigned int cpu;

	for_each_gcwq_cpu(cpu) {
	for_each_wq_cpu(cpu) {
		struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
		struct worker_pool *pool = cwq->pool;
		struct worker *worker;
@@ -1533,7 +1533,7 @@ static void worker_enter_idle(struct worker *worker)
		mod_timer(&pool->idle_timer, jiffies + IDLE_WORKER_TIMEOUT);

	/*
	 * Sanity check nr_running.  Because gcwq_unbind_fn() releases
	 * Sanity check nr_running.  Because wq_unbind_fn() releases
	 * pool->lock between setting %WORKER_UNBOUND and zapping
	 * nr_running, the warning may trigger spuriously.  Check iff
	 * unbind is not in progress.
@@ -1563,7 +1563,7 @@ static void worker_leave_idle(struct worker *worker)
}

/**
 * worker_maybe_bind_and_lock - bind worker to its cpu if possible and lock gcwq
 * worker_maybe_bind_and_lock - bind worker to its cpu if possible and lock pool
 * @worker: self
 *
 * Works which are scheduled while the cpu is online must at least be
@@ -1575,10 +1575,10 @@ static void worker_leave_idle(struct worker *worker)
 * themselves to the target cpu and may race with cpu going down or
 * coming online.  kthread_bind() can't be used because it may put the
 * worker to already dead cpu and set_cpus_allowed_ptr() can't be used
 * verbatim as it's best effort and blocking and gcwq may be
 * verbatim as it's best effort and blocking and pool may be
 * [dis]associated in the meantime.
 *
 * This function tries set_cpus_allowed() and locks gcwq and verifies the
 * This function tries set_cpus_allowed() and locks pool and verifies the
 * binding against %POOL_DISASSOCIATED which is set during
 * %CPU_DOWN_PREPARE and cleared during %CPU_ONLINE, so if the worker
 * enters idle state or fetches works without dropping lock, it can
@@ -1589,7 +1589,7 @@ static void worker_leave_idle(struct worker *worker)
 * held.
 *
 * RETURNS:
 * %true if the associated gcwq is online (@worker is successfully
 * %true if the associated pool is online (@worker is successfully
 * bound), %false if offline.
 */
static bool worker_maybe_bind_and_lock(struct worker *worker)
@@ -1826,7 +1826,7 @@ static struct worker *create_worker(struct worker_pool *pool)
 * start_worker - start a newly created worker
 * @worker: worker to start
 *
 * Make the gcwq aware of @worker and start it.
 * Make the pool aware of @worker and start it.
 *
 * CONTEXT:
 * spin_lock_irq(pool->lock).
@@ -1843,7 +1843,7 @@ static void start_worker(struct worker *worker)
 * destroy_worker - destroy a workqueue worker
 * @worker: worker to be destroyed
 *
 * Destroy @worker and adjust @gcwq stats accordingly.
 * Destroy @worker and adjust @pool stats accordingly.
 *
 * CONTEXT:
 * spin_lock_irq(pool->lock) which is released and regrabbed.
@@ -1919,7 +1919,7 @@ static bool send_mayday(struct work_struct *work)
	return true;
}

static void gcwq_mayday_timeout(unsigned long __pool)
static void pool_mayday_timeout(unsigned long __pool)
{
	struct worker_pool *pool = (void *)__pool;
	struct work_struct *work;
@@ -2047,9 +2047,9 @@ static bool maybe_destroy_workers(struct worker_pool *pool)
 * manage_workers - manage worker pool
 * @worker: self
 *
 * Assume the manager role and manage gcwq worker pool @worker belongs
 * Assume the manager role and manage the worker pool @worker belongs
 * to.  At any given time, there can be only zero or one manager per
 * gcwq.  The exclusion is handled automatically by this function.
 * pool.  The exclusion is handled automatically by this function.
 *
 * The caller can safely start processing works on false return.  On
 * true return, it's guaranteed that need_to_create_worker() is false
@@ -2092,11 +2092,11 @@ static bool manage_workers(struct worker *worker)
		 * CPU hotplug could have happened while we were waiting
		 * for assoc_mutex.  Hotplug itself can't handle us
		 * because manager isn't either on idle or busy list, and
		 * @gcwq's state and ours could have deviated.
		 * @pool's state and ours could have deviated.
		 *
		 * As hotplug is now excluded via assoc_mutex, we can
		 * simply try to bind.  It will succeed or fail depending
		 * on @gcwq's current state.  Try it and adjust
		 * on @pool's current state.  Try it and adjust
		 * %WORKER_UNBOUND accordingly.
		 */
		if (worker_maybe_bind_and_lock(worker))
@@ -2271,8 +2271,8 @@ static void process_scheduled_works(struct worker *worker)
 * worker_thread - the worker thread function
 * @__worker: self
 *
 * The gcwq worker thread function.  There's a single dynamic pool of
 * these per each cpu.  These workers process all works regardless of
 * The worker thread function.  There are NR_CPU_WORKER_POOLS dynamic pools
 * of these per each cpu.  These workers process all works regardless of
 * their specific target workqueue.  The only exception is works which
 * belong to workqueues with a rescuer which will be explained in
 * rescuer_thread().
@@ -2368,14 +2368,14 @@ static int worker_thread(void *__worker)
 * Workqueue rescuer thread function.  There's one rescuer for each
 * workqueue which has WQ_RESCUER set.
 *
 * Regular work processing on a gcwq may block trying to create a new
 * Regular work processing on a pool may block trying to create a new
 * worker which uses GFP_KERNEL allocation which has slight chance of
 * developing into deadlock if some works currently on the same queue
 * need to be processed to satisfy the GFP_KERNEL allocation.  This is
 * the problem rescuer solves.
 *
 * When such condition is possible, the gcwq summons rescuers of all
 * workqueues which have works queued on the gcwq and let them process
 * When such condition is possible, the pool summons rescuers of all
 * workqueues which have works queued on the pool and let them process
 * those works so that forward progress can be guaranteed.
 *
 * This should happen rarely.
@@ -3476,7 +3476,7 @@ EXPORT_SYMBOL_GPL(work_busy);
 *
 * There are two challenges in supporting CPU hotplug.  Firstly, there
 * are a lot of assumptions on strong associations among work, cwq and
 * gcwq which make migrating pending and scheduled works very
 * pool which make migrating pending and scheduled works very
 * difficult to implement without impacting hot paths.  Secondly,
 * worker pools serve mix of short, long and very long running works making
 * blocked draining impractical.
@@ -3486,7 +3486,7 @@ EXPORT_SYMBOL_GPL(work_busy);
 * cpu comes back online.
 */

static void gcwq_unbind_fn(struct work_struct *work)
static void wq_unbind_fn(struct work_struct *work)
{
	int cpu = smp_processor_id();
	struct worker_pool *pool;
@@ -3601,7 +3601,7 @@ static int __cpuinit workqueue_cpu_down_callback(struct notifier_block *nfb,
	switch (action & ~CPU_TASKS_FROZEN) {
	case CPU_DOWN_PREPARE:
		/* unbinding should happen on the local CPU */
		INIT_WORK_ONSTACK(&unbind_work, gcwq_unbind_fn);
		INIT_WORK_ONSTACK(&unbind_work, wq_unbind_fn);
		queue_work_on(cpu, system_highpri_wq, &unbind_work);
		flush_work(&unbind_work);
		break;
@@ -3654,7 +3654,7 @@ EXPORT_SYMBOL_GPL(work_on_cpu);
 *
 * Start freezing workqueues.  After this function returns, all freezable
 * workqueues will queue new works to their frozen_works list instead of
 * gcwq->worklist.
 * pool->worklist.
 *
 * CONTEXT:
 * Grabs and releases workqueue_lock and pool->lock's.
@@ -3668,7 +3668,7 @@ void freeze_workqueues_begin(void)
	BUG_ON(workqueue_freezing);
	workqueue_freezing = true;

	for_each_gcwq_cpu(cpu) {
	for_each_wq_cpu(cpu) {
		struct worker_pool *pool;
		struct workqueue_struct *wq;

@@ -3715,7 +3715,7 @@ bool freeze_workqueues_busy(void)

	BUG_ON(!workqueue_freezing);

	for_each_gcwq_cpu(cpu) {
	for_each_wq_cpu(cpu) {
		struct workqueue_struct *wq;
		/*
		 * nr_active is monotonically decreasing.  It's safe
@@ -3743,7 +3743,7 @@ bool freeze_workqueues_busy(void)
 * thaw_workqueues - thaw workqueues
 *
 * Thaw workqueues.  Normal queueing is restored and all collected
 * frozen works are transferred to their respective gcwq worklists.
 * frozen works are transferred to their respective pool worklists.
 *
 * CONTEXT:
 * Grabs and releases workqueue_lock and pool->lock's.
@@ -3757,7 +3757,7 @@ void thaw_workqueues(void)
	if (!workqueue_freezing)
		goto out_unlock;

	for_each_gcwq_cpu(cpu) {
	for_each_wq_cpu(cpu) {
		struct worker_pool *pool;
		struct workqueue_struct *wq;

@@ -3801,8 +3801,8 @@ static int __init init_workqueues(void)
	cpu_notifier(workqueue_cpu_up_callback, CPU_PRI_WORKQUEUE_UP);
	hotcpu_notifier(workqueue_cpu_down_callback, CPU_PRI_WORKQUEUE_DOWN);

	/* initialize gcwqs */
	for_each_gcwq_cpu(cpu) {
	/* initialize CPU pools */
	for_each_wq_cpu(cpu) {
		struct worker_pool *pool;

		for_each_std_worker_pool(pool, cpu) {
@@ -3817,7 +3817,7 @@ static int __init init_workqueues(void)
			pool->idle_timer.function = idle_worker_timeout;
			pool->idle_timer.data = (unsigned long)pool;

			setup_timer(&pool->mayday_timer, gcwq_mayday_timeout,
			setup_timer(&pool->mayday_timer, pool_mayday_timeout,
				    (unsigned long)pool);

			mutex_init(&pool->assoc_mutex);
@@ -3829,7 +3829,7 @@ static int __init init_workqueues(void)
	}

	/* create the initial worker */
	for_each_online_gcwq_cpu(cpu) {
	for_each_online_wq_cpu(cpu) {
		struct worker_pool *pool;

		for_each_std_worker_pool(pool, cpu) {