Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 7ee681b2 authored by Thomas Gleixner's avatar Thomas Gleixner Committed by Ingo Molnar
Browse files

workqueue: Convert to state machine callbacks



Get rid of the prio ordering of the separate notifiers and use a proper state
callback pair.

Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
Signed-off-by: default avatarAnna-Maria Gleixner <anna-maria@linutronix.de>
Reviewed-by: default avatarSebastian Andrzej Siewior <bigeasy@linutronix.de>
Acked-by: default avatarTejun Heo <tj@kernel.org>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Lai Jiangshan <jiangshanlai@gmail.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Nicolas Iooss <nicolas.iooss_linux@m4x.org>
Cc: Oleg Nesterov <oleg@redhat.com>
Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Rasmus Villemoes <linux@rasmusvillemoes.dk>
Cc: Rusty Russell <rusty@rustcorp.com.au>
Cc: rt@linutronix.de
Link: http://lkml.kernel.org/r/20160713153335.197083890@linutronix.de


Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent c6a84daa
Loading
Loading
Loading
Loading
+0 −9
Original line number Diff line number Diff line
@@ -55,15 +55,6 @@ extern ssize_t arch_cpu_release(const char *, size_t);
#endif
struct notifier_block;

/*
 * CPU notifier priorities.
 */
enum {
	/* bring up workqueues before normal notifiers and down after */
	CPU_PRI_WORKQUEUE_UP	= 5,
	CPU_PRI_WORKQUEUE_DOWN	= -5,
};

#define CPU_ONLINE		0x0002 /* CPU (unsigned)v is up */
#define CPU_UP_PREPARE		0x0003 /* CPU (unsigned)v coming up */
#define CPU_UP_CANCELED		0x0004 /* CPU (unsigned)v NOT coming up */
+2 −0
Original line number Diff line number Diff line
@@ -12,6 +12,7 @@ enum cpuhp_state {
	CPUHP_PERF_BFIN,
	CPUHP_PERF_POWER,
	CPUHP_PERF_SUPERH,
	CPUHP_WORKQUEUE_PREP,
	CPUHP_NOTIFY_PREPARE,
	CPUHP_BRINGUP_CPU,
	CPUHP_AP_IDLE_DEAD,
@@ -49,6 +50,7 @@ enum cpuhp_state {
	CPUHP_AP_PERF_S390_SF_ONLINE,
	CPUHP_AP_PERF_ARM_CCI_ONLINE,
	CPUHP_AP_PERF_ARM_CCN_ONLINE,
	CPUHP_AP_WORKQUEUE_ONLINE,
	CPUHP_AP_NOTIFY_ONLINE,
	CPUHP_AP_ONLINE_DYN,
	CPUHP_AP_ONLINE_DYN_END		= CPUHP_AP_ONLINE_DYN + 30,
+6 −0
Original line number Diff line number Diff line
@@ -625,4 +625,10 @@ void wq_watchdog_touch(int cpu);
static inline void wq_watchdog_touch(int cpu) { }
#endif	/* CONFIG_WQ_WATCHDOG */

#ifdef CONFIG_SMP
int workqueue_prepare_cpu(unsigned int cpu);
int workqueue_online_cpu(unsigned int cpu);
int workqueue_offline_cpu(unsigned int cpu);
#endif

#endif
+10 −0
Original line number Diff line number Diff line
@@ -1185,6 +1185,11 @@ static struct cpuhp_step cpuhp_bp_states[] = {
		.startup = perf_event_init_cpu,
		.teardown = perf_event_exit_cpu,
	},
	[CPUHP_WORKQUEUE_PREP] = {
		.name = "workqueue prepare",
		.startup = workqueue_prepare_cpu,
		.teardown = NULL,
	},
	/*
	 * Preparatory and dead notifiers. Will be replaced once the notifiers
	 * are converted to states.
@@ -1267,6 +1272,11 @@ static struct cpuhp_step cpuhp_ap_states[] = {
		.startup = perf_event_init_cpu,
		.teardown = perf_event_exit_cpu,
	},
	[CPUHP_AP_WORKQUEUE_ONLINE] = {
		.name = "workqueue online",
		.startup = workqueue_online_cpu,
		.teardown = workqueue_offline_cpu,
	},

	/*
	 * Online/down_prepare notifiers. Will be removed once the notifiers
+43 −65
Original line number Diff line number Diff line
@@ -4611,31 +4611,25 @@ static void restore_unbound_workers_cpumask(struct worker_pool *pool, int cpu)
						  pool->attrs->cpumask) < 0);
}

/*
 * Workqueues should be brought up before normal priority CPU notifiers.
 * This will be registered high priority CPU notifier.
 */
static int workqueue_cpu_up_callback(struct notifier_block *nfb,
					       unsigned long action,
					       void *hcpu)
int workqueue_prepare_cpu(unsigned int cpu)
{
	int cpu = (unsigned long)hcpu;
	struct worker_pool *pool;
	struct workqueue_struct *wq;
	int pi;

	switch (action & ~CPU_TASKS_FROZEN) {
	case CPU_UP_PREPARE:
	for_each_cpu_worker_pool(pool, cpu) {
		if (pool->nr_workers)
			continue;
		if (!create_worker(pool))
				return NOTIFY_BAD;
			return -ENOMEM;
	}
		break;
	return 0;
}

int workqueue_online_cpu(unsigned int cpu)
{
	struct worker_pool *pool;
	struct workqueue_struct *wq;
	int pi;

	case CPU_DOWN_FAILED:
	case CPU_ONLINE:
	mutex_lock(&wq_pool_mutex);

	for_each_pool(pool, pi) {
@@ -4654,25 +4648,14 @@ static int workqueue_cpu_up_callback(struct notifier_block *nfb,
		wq_update_unbound_numa(wq, cpu, true);

	mutex_unlock(&wq_pool_mutex);
		break;
	}
	return NOTIFY_OK;
	return 0;
}

/*
 * Workqueues should be brought down after normal priority CPU notifiers.
 * This will be registered as low priority CPU notifier.
 */
static int workqueue_cpu_down_callback(struct notifier_block *nfb,
						 unsigned long action,
						 void *hcpu)
int workqueue_offline_cpu(unsigned int cpu)
{
	int cpu = (unsigned long)hcpu;
	struct work_struct unbind_work;
	struct workqueue_struct *wq;

	switch (action & ~CPU_TASKS_FROZEN) {
	case CPU_DOWN_PREPARE:
	/* unbinding per-cpu workers should happen on the local CPU */
	INIT_WORK_ONSTACK(&unbind_work, wq_unbind_fn);
	queue_work_on(cpu, system_highpri_wq, &unbind_work);
@@ -4686,9 +4669,7 @@ static int workqueue_cpu_down_callback(struct notifier_block *nfb,
	/* wait for per-cpu unbinding to finish */
	flush_work(&unbind_work);
	destroy_work_on_stack(&unbind_work);
		break;
	}
	return NOTIFY_OK;
	return 0;
}

#ifdef CONFIG_SMP
@@ -5490,9 +5471,6 @@ static int __init init_workqueues(void)

	pwq_cache = KMEM_CACHE(pool_workqueue, SLAB_PANIC);

	cpu_notifier(workqueue_cpu_up_callback, CPU_PRI_WORKQUEUE_UP);
	hotcpu_notifier(workqueue_cpu_down_callback, CPU_PRI_WORKQUEUE_DOWN);

	wq_numa_init();

	/* initialize CPU pools */