Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 12997d1a authored by Bjorn Helgaas's avatar Bjorn Helgaas
Browse files

Revert "workqueue: allow work_on_cpu() to be called recursively"



This reverts commit c2fda509.

c2fda509 removed lockdep annotation from work_on_cpu() to work around
the PCI path that calls work_on_cpu() from within a work_on_cpu() work item
(PF driver .probe() method -> pci_enable_sriov() -> add VFs -> VF driver
.probe method).

961da7fb6b22 ("PCI: Avoid unnecessary CPU switch when calling driver
.probe() method) avoids that recursive work_on_cpu() use in a different
way, so this revert restores the work_on_cpu() lockdep annotation.

Signed-off-by: default avatarBjorn Helgaas <bhelgaas@google.com>
Acked-by: default avatarTejun Heo <tj@kernel.org>
parent 12c3156f
Loading
Loading
Loading
Loading
+10 −22
Original line number Diff line number Diff line
@@ -2840,19 +2840,6 @@ static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr)
	return false;
}

static bool __flush_work(struct work_struct *work)
{
	struct wq_barrier barr;

	if (start_flush_work(work, &barr)) {
		wait_for_completion(&barr.done);
		destroy_work_on_stack(&barr.work);
		return true;
	} else {
		return false;
	}
}

/**
 * flush_work - wait for a work to finish executing the last queueing instance
 * @work: the work to flush
@@ -2866,10 +2853,18 @@ static bool __flush_work(struct work_struct *work)
 */
bool flush_work(struct work_struct *work)
{
	struct wq_barrier barr;

	lock_map_acquire(&work->lockdep_map);
	lock_map_release(&work->lockdep_map);

	return __flush_work(work);
	if (start_flush_work(work, &barr)) {
		wait_for_completion(&barr.done);
		destroy_work_on_stack(&barr.work);
		return true;
	} else {
		return false;
	}
}
EXPORT_SYMBOL_GPL(flush_work);

@@ -4814,14 +4809,7 @@ long work_on_cpu(int cpu, long (*fn)(void *), void *arg)

	INIT_WORK_ONSTACK(&wfc.work, work_for_cpu_fn);
	schedule_work_on(cpu, &wfc.work);

	/*
	 * The work item is on-stack and can't lead to deadlock through
	 * flushing.  Use __flush_work() to avoid spurious lockdep warnings
	 * when work_on_cpu()s are nested.
	 */
	__flush_work(&wfc.work);

	flush_work(&wfc.work);
	return wfc.ret;
}
EXPORT_SYMBOL_GPL(work_on_cpu);