Loading kernel/workqueue.c +6 −9 Original line number Diff line number Diff line Loading @@ -1641,7 +1641,7 @@ static void worker_enter_idle(struct worker *worker) mod_timer(&pool->idle_timer, jiffies + IDLE_WORKER_TIMEOUT); /* * Sanity check nr_running. Because wq_unbind_fn() releases * Sanity check nr_running. Because unbind_workers() releases * pool->lock between setting %WORKER_UNBOUND and zapping * nr_running, the warning may trigger spuriously. Check iff * unbind is not in progress. Loading Loading @@ -4478,9 +4478,8 @@ void show_workqueue_state(void) * cpu comes back online. */ static void wq_unbind_fn(struct work_struct *work) static void unbind_workers(int cpu) { int cpu = smp_processor_id(); struct worker_pool *pool; struct worker *worker; Loading Loading @@ -4677,12 +4676,13 @@ int workqueue_online_cpu(unsigned int cpu) int workqueue_offline_cpu(unsigned int cpu) { struct work_struct unbind_work; struct workqueue_struct *wq; /* unbinding per-cpu workers should happen on the local CPU */ INIT_WORK_ONSTACK(&unbind_work, wq_unbind_fn); queue_work_on(cpu, system_highpri_wq, &unbind_work); if (WARN_ON(cpu != smp_processor_id())) return -1; unbind_workers(cpu); /* update NUMA affinity of unbound workqueues */ mutex_lock(&wq_pool_mutex); Loading @@ -4690,9 +4690,6 @@ int workqueue_offline_cpu(unsigned int cpu) wq_update_unbound_numa(wq, cpu, false); mutex_unlock(&wq_pool_mutex); /* wait for per-cpu unbinding to finish */ flush_work(&unbind_work); destroy_work_on_stack(&unbind_work); return 0; } Loading Loading
kernel/workqueue.c +6 −9 Original line number Diff line number Diff line Loading @@ -1641,7 +1641,7 @@ static void worker_enter_idle(struct worker *worker) mod_timer(&pool->idle_timer, jiffies + IDLE_WORKER_TIMEOUT); /* * Sanity check nr_running. Because wq_unbind_fn() releases * Sanity check nr_running. Because unbind_workers() releases * pool->lock between setting %WORKER_UNBOUND and zapping * nr_running, the warning may trigger spuriously. Check iff * unbind is not in progress. Loading Loading @@ -4478,9 +4478,8 @@ void show_workqueue_state(void) * cpu comes back online. */ static void wq_unbind_fn(struct work_struct *work) static void unbind_workers(int cpu) { int cpu = smp_processor_id(); struct worker_pool *pool; struct worker *worker; Loading Loading @@ -4677,12 +4676,13 @@ int workqueue_online_cpu(unsigned int cpu) int workqueue_offline_cpu(unsigned int cpu) { struct work_struct unbind_work; struct workqueue_struct *wq; /* unbinding per-cpu workers should happen on the local CPU */ INIT_WORK_ONSTACK(&unbind_work, wq_unbind_fn); queue_work_on(cpu, system_highpri_wq, &unbind_work); if (WARN_ON(cpu != smp_processor_id())) return -1; unbind_workers(cpu); /* update NUMA affinity of unbound workqueues */ mutex_lock(&wq_pool_mutex); Loading @@ -4690,9 +4690,6 @@ int workqueue_offline_cpu(unsigned int cpu) wq_update_unbound_numa(wq, cpu, false); mutex_unlock(&wq_pool_mutex); /* wait for per-cpu unbinding to finish */ flush_work(&unbind_work); destroy_work_on_stack(&unbind_work); return 0; } Loading