Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit f756d5e2 authored by Nathan Lynch's avatar Nathan Lynch Committed by Linus Torvalds
Browse files

[PATCH] fix workqueue oops during cpu offline



Use first_cpu(cpu_possible_map) for the single-thread workqueue case.  We
used to hardcode 0, but that broke on systems where !cpu_possible(0) when
workqueue_struct->cpu_workqueue_struct was changed from a static array to
alloc_percpu.

Commit id bce61dd4 ("Fix hardcoded cpu=0 in
workqueue for per_cpu_ptr() calls") fixed that for Ben's funky sparc64
system, but it regressed my Power5.  Offlining cpu 0 oopses upon the next
call to queue_work for a single-thread workqueue, because now we try to
manipulate per_cpu_ptr(wq->cpu_wq, 1), which is uninitialized.

So we need to establish an unchanging "slot" for single-thread workqueues
which will have a valid percpu allocation.  Since alloc_percpu keys off of
cpu_possible_map, which must not change after initialization, make this
slot == first_cpu(cpu_possible_map).

Signed-off-by: default avatarNathan Lynch <ntl@pobox.com>
Cc: <stable@kernel.org>
Signed-off-by: default avatarAndrew Morton <akpm@osdl.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@osdl.org>
parent 945f390f
Loading
Loading
Loading
Loading
+10 −6
Original line number Diff line number Diff line
@@ -29,7 +29,8 @@
#include <linux/kthread.h>

/*
 * The per-CPU workqueue (if single thread, we always use cpu 0's).
 * The per-CPU workqueue (if single thread, we always use the first
 * possible cpu).
 *
 * The sequence counters are for flush_scheduled_work().  It wants to wait
 * until until all currently-scheduled works are completed, but it doesn't
@@ -69,6 +70,8 @@ struct workqueue_struct {
static DEFINE_SPINLOCK(workqueue_lock);
static LIST_HEAD(workqueues);

static int singlethread_cpu;

/* If it's single threaded, it isn't in the list of workqueues. */
static inline int is_single_threaded(struct workqueue_struct *wq)
{
@@ -102,7 +105,7 @@ int fastcall queue_work(struct workqueue_struct *wq, struct work_struct *work)

	if (!test_and_set_bit(0, &work->pending)) {
		if (unlikely(is_single_threaded(wq)))
			cpu = any_online_cpu(cpu_online_map);
			cpu = singlethread_cpu;
		BUG_ON(!list_empty(&work->entry));
		__queue_work(per_cpu_ptr(wq->cpu_wq, cpu), work);
		ret = 1;
@@ -118,7 +121,7 @@ static void delayed_work_timer_fn(unsigned long __data)
	int cpu = smp_processor_id();

	if (unlikely(is_single_threaded(wq)))
		cpu = any_online_cpu(cpu_online_map);
		cpu = singlethread_cpu;

	__queue_work(per_cpu_ptr(wq->cpu_wq, cpu), work);
}
@@ -267,7 +270,7 @@ void fastcall flush_workqueue(struct workqueue_struct *wq)

	if (is_single_threaded(wq)) {
		/* Always use first cpu's area. */
		flush_cpu_workqueue(per_cpu_ptr(wq->cpu_wq, any_online_cpu(cpu_online_map)));
		flush_cpu_workqueue(per_cpu_ptr(wq->cpu_wq, singlethread_cpu));
	} else {
		int cpu;

@@ -325,7 +328,7 @@ struct workqueue_struct *__create_workqueue(const char *name,
	lock_cpu_hotplug();
	if (singlethread) {
		INIT_LIST_HEAD(&wq->list);
		p = create_workqueue_thread(wq, any_online_cpu(cpu_online_map));
		p = create_workqueue_thread(wq, singlethread_cpu);
		if (!p)
			destroy = 1;
		else
@@ -379,7 +382,7 @@ void destroy_workqueue(struct workqueue_struct *wq)
	/* We don't need the distraction of CPUs appearing and vanishing. */
	lock_cpu_hotplug();
	if (is_single_threaded(wq))
		cleanup_workqueue_thread(wq, any_online_cpu(cpu_online_map));
		cleanup_workqueue_thread(wq, singlethread_cpu);
	else {
		for_each_online_cpu(cpu)
			cleanup_workqueue_thread(wq, cpu);
@@ -567,6 +570,7 @@ static int __devinit workqueue_cpu_callback(struct notifier_block *nfb,

void init_workqueues(void)
{
	singlethread_cpu = first_cpu(cpu_possible_map);
	hotcpu_notifier(workqueue_cpu_callback, 0);
	keventd_wq = create_workqueue("events");
	BUG_ON(!keventd_wq);