Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 3da1c84c authored by Oleg Nesterov's avatar Oleg Nesterov Committed by Linus Torvalds
Browse files

workqueues: make get_online_cpus() useable for work->func()



workqueue_cpu_callback(CPU_DEAD) flushes cwq->thread under
cpu_maps_update_begin().  This means that the multithreaded workqueues
can't use get_online_cpus() due to the possible deadlock, very bad and
very old problem.

Introduce the new state, CPU_POST_DEAD, which is called after
cpu_hotplug_done() but before cpu_maps_update_done().

Change workqueue_cpu_callback() to use CPU_POST_DEAD instead of CPU_DEAD.
This means that create/destroy functions can't rely on get_online_cpus()
any longer and should take cpu_add_remove_lock instead.

[akpm@linux-foundation.org: fix CONFIG_SMP=n]
Signed-off-by: default avatarOleg Nesterov <oleg@tv-sign.ru>
Acked-by: default avatarGautham R Shenoy <ego@in.ibm.com>
Cc: Heiko Carstens <heiko.carstens@de.ibm.com>
Cc: Max Krasnyansky <maxk@qualcomm.com>
Cc: Paul Jackson <pj@sgi.com>
Cc: Paul Menage <menage@google.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Vegard Nossum <vegard.nossum@gmail.com>
Cc: Martin Schwidefsky <schwidefsky@de.ibm.com>
Cc: Ingo Molnar <mingo@elte.hu>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 8616a89a
Loading
Loading
Loading
Loading
+11 −4
Original line number Diff line number Diff line
@@ -69,10 +69,11 @@ static inline void unregister_cpu_notifier(struct notifier_block *nb)
#endif

int cpu_up(unsigned int cpu);

extern void cpu_hotplug_init(void);
extern void cpu_maps_update_begin(void);
extern void cpu_maps_update_done(void);

#else
#else	/* CONFIG_SMP */

static inline int register_cpu_notifier(struct notifier_block *nb)
{
@@ -87,10 +88,16 @@ static inline void cpu_hotplug_init(void)
{
}

static inline void cpu_maps_update_begin(void)
{
}

static inline void cpu_maps_update_done(void)
{
}

#endif /* CONFIG_SMP */
extern struct sysdev_class cpu_sysdev_class;
extern void cpu_maps_update_begin(void);
extern void cpu_maps_update_done(void);

#ifdef CONFIG_HOTPLUG_CPU
/* Stop CPUs going up and down. */
+2 −0
Original line number Diff line number Diff line
@@ -214,6 +214,8 @@ static inline int notifier_to_errno(int ret)
#define CPU_DEAD		0x0007 /* CPU (unsigned)v dead */
#define CPU_DYING		0x0008 /* CPU (unsigned)v not running any task,
				        * not handling interrupts, soon dead */
#define CPU_POST_DEAD		0x0009 /* CPU (unsigned)v dead, cpu_hotplug
					* lock is dropped */

/* Used for CPU hotplug events occuring while tasks are frozen due to a suspend
 * operation in progress
+5 −0
Original line number Diff line number Diff line
@@ -285,6 +285,11 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen)
	set_cpus_allowed_ptr(current, &old_allowed);
out_release:
	cpu_hotplug_done();
	if (!err) {
		if (raw_notifier_call_chain(&cpu_chain, CPU_POST_DEAD | mod,
					    hcpu) == NOTIFY_BAD)
			BUG();
	}
	return err;
}

+9 −9
Original line number Diff line number Diff line
@@ -828,7 +828,7 @@ struct workqueue_struct *__create_workqueue_key(const char *name,
		err = create_workqueue_thread(cwq, singlethread_cpu);
		start_workqueue_thread(cwq, -1);
	} else {
		get_online_cpus();
		cpu_maps_update_begin();
		spin_lock(&workqueue_lock);
		list_add(&wq->list, &workqueues);
		spin_unlock(&workqueue_lock);
@@ -840,7 +840,7 @@ struct workqueue_struct *__create_workqueue_key(const char *name,
			err = create_workqueue_thread(cwq, cpu);
			start_workqueue_thread(cwq, cpu);
		}
		put_online_cpus();
		cpu_maps_update_done();
	}

	if (err) {
@@ -854,8 +854,8 @@ EXPORT_SYMBOL_GPL(__create_workqueue_key);
static void cleanup_workqueue_thread(struct cpu_workqueue_struct *cwq)
{
	/*
	 * Our caller is either destroy_workqueue() or CPU_DEAD,
	 * get_online_cpus() protects cwq->thread.
	 * Our caller is either destroy_workqueue() or CPU_POST_DEAD,
	 * cpu_add_remove_lock protects cwq->thread.
	 */
	if (cwq->thread == NULL)
		return;
@@ -865,7 +865,7 @@ static void cleanup_workqueue_thread(struct cpu_workqueue_struct *cwq)

	flush_cpu_workqueue(cwq);
	/*
	 * If the caller is CPU_DEAD and cwq->worklist was not empty,
	 * If the caller is CPU_POST_DEAD and cwq->worklist was not empty,
	 * a concurrent flush_workqueue() can insert a barrier after us.
	 * However, in that case run_workqueue() won't return and check
	 * kthread_should_stop() until it flushes all work_struct's.
@@ -889,14 +889,14 @@ void destroy_workqueue(struct workqueue_struct *wq)
	const cpumask_t *cpu_map = wq_cpu_map(wq);
	int cpu;

	get_online_cpus();
	cpu_maps_update_begin();
	spin_lock(&workqueue_lock);
	list_del(&wq->list);
	spin_unlock(&workqueue_lock);

	for_each_cpu_mask_nr(cpu, *cpu_map)
		cleanup_workqueue_thread(per_cpu_ptr(wq->cpu_wq, cpu));
	put_online_cpus();
 	cpu_maps_update_done();

	free_percpu(wq->cpu_wq);
	kfree(wq);
@@ -935,7 +935,7 @@ static int __devinit workqueue_cpu_callback(struct notifier_block *nfb,

		case CPU_UP_CANCELED:
			start_workqueue_thread(cwq, -1);
		case CPU_DEAD:
		case CPU_POST_DEAD:
			cleanup_workqueue_thread(cwq);
			break;
		}
@@ -943,7 +943,7 @@ static int __devinit workqueue_cpu_callback(struct notifier_block *nfb,

	switch (action) {
	case CPU_UP_CANCELED:
	case CPU_DEAD:
	case CPU_POST_DEAD:
		cpu_clear(cpu, cpu_populated_map);
	}