Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 72dd379e authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge branch 'for-4.15-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/wq

Pull workqueue fixes from Tejun Heo:

 - Lai's hotplug simplifications inadvertently fix a possible deadlock
   involving cpuset and workqueue

 - CPU isolation fix which was reverted due to the changes in the
   housekeeping code resurrected

 - A trivial unused include removal

* 'for-4.15-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/wq:
  workqueue: remove unneeded kallsyms include
  workqueue/hotplug: remove the workaround in rebind_workers()
  workqueue/hotplug: simplify workqueue_offline_cpu()
  workqueue: respect isolated cpus when queueing an unbound work
  main: kernel_start: move housekeeping_init() before workqueue_init_early()
parents a83cb7e6 01dfee95
Loading
Loading
Loading
Loading
+6 −1
Original line number Diff line number Diff line
@@ -588,6 +588,12 @@ asmlinkage __visible void __init start_kernel(void)
		local_irq_disable();
	radix_tree_init();

	/*
	 * Set up housekeeping before setting up workqueues to allow the unbound
	 * workqueue to take non-housekeeping into account.
	 */
	housekeeping_init();

	/*
	 * Allow workqueue creation and work item queueing/cancelling
	 * early.  Work item execution depends on kthreads and starts after
@@ -605,7 +611,6 @@ asmlinkage __visible void __init start_kernel(void)
	early_irq_init();
	init_IRQ();
	tick_init();
	housekeeping_init();
	rcu_init_nohz();
	init_timers();
	hrtimers_init();
+12 −21
Original line number Diff line number Diff line
@@ -38,7 +38,6 @@
#include <linux/hardirq.h>
#include <linux/mempolicy.h>
#include <linux/freezer.h>
#include <linux/kallsyms.h>
#include <linux/debug_locks.h>
#include <linux/lockdep.h>
#include <linux/idr.h>
@@ -48,6 +47,7 @@
#include <linux/nodemask.h>
#include <linux/moduleparam.h>
#include <linux/uaccess.h>
#include <linux/sched/isolation.h>

#include "workqueue_internal.h"

@@ -1634,7 +1634,7 @@ static void worker_enter_idle(struct worker *worker)
		mod_timer(&pool->idle_timer, jiffies + IDLE_WORKER_TIMEOUT);

	/*
	 * Sanity check nr_running.  Because wq_unbind_fn() releases
	 * Sanity check nr_running.  Because unbind_workers() releases
	 * pool->lock between setting %WORKER_UNBOUND and zapping
	 * nr_running, the warning may trigger spuriously.  Check iff
	 * unbind is not in progress.
@@ -4510,9 +4510,8 @@ void show_workqueue_state(void)
 * cpu comes back online.
 */

static void wq_unbind_fn(struct work_struct *work)
static void unbind_workers(int cpu)
{
	int cpu = smp_processor_id();
	struct worker_pool *pool;
	struct worker *worker;

@@ -4589,16 +4588,6 @@ static void rebind_workers(struct worker_pool *pool)

	spin_lock_irq(&pool->lock);

	/*
	 * XXX: CPU hotplug notifiers are weird and can call DOWN_FAILED
	 * w/o preceding DOWN_PREPARE.  Work around it.  CPU hotplug is
	 * being reworked and this can go away in time.
	 */
	if (!(pool->flags & POOL_DISASSOCIATED)) {
		spin_unlock_irq(&pool->lock);
		return;
	}

	pool->flags &= ~POOL_DISASSOCIATED;

	for_each_pool_worker(worker, pool) {
@@ -4709,12 +4698,13 @@ int workqueue_online_cpu(unsigned int cpu)

int workqueue_offline_cpu(unsigned int cpu)
{
	struct work_struct unbind_work;
	struct workqueue_struct *wq;

	/* unbinding per-cpu workers should happen on the local CPU */
	INIT_WORK_ONSTACK(&unbind_work, wq_unbind_fn);
	queue_work_on(cpu, system_highpri_wq, &unbind_work);
	if (WARN_ON(cpu != smp_processor_id()))
		return -1;

	unbind_workers(cpu);

	/* update NUMA affinity of unbound workqueues */
	mutex_lock(&wq_pool_mutex);
@@ -4722,9 +4712,6 @@ int workqueue_offline_cpu(unsigned int cpu)
		wq_update_unbound_numa(wq, cpu, false);
	mutex_unlock(&wq_pool_mutex);

	/* wait for per-cpu unbinding to finish */
	flush_work(&unbind_work);
	destroy_work_on_stack(&unbind_work);
	return 0;
}

@@ -4957,6 +4944,10 @@ int workqueue_set_unbound_cpumask(cpumask_var_t cpumask)
	if (!zalloc_cpumask_var(&saved_cpumask, GFP_KERNEL))
		return -ENOMEM;

	/*
	 * Not excluding isolated cpus on purpose.
	 * If the user wishes to include them, we allow that.
	 */
	cpumask_and(cpumask, cpumask, cpu_possible_mask);
	if (!cpumask_empty(cpumask)) {
		apply_wqattrs_lock();
@@ -5555,7 +5546,7 @@ int __init workqueue_init_early(void)
	WARN_ON(__alignof__(struct pool_workqueue) < __alignof__(long long));

	BUG_ON(!alloc_cpumask_var(&wq_unbound_cpumask, GFP_KERNEL));
	cpumask_copy(wq_unbound_cpumask, cpu_possible_mask);
	cpumask_copy(wq_unbound_cpumask, housekeeping_cpumask(HK_FLAG_DOMAIN));

	pwq_cache = KMEM_CACHE(pool_workqueue, SLAB_PANIC);