Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit baf59022 authored by Tejun Heo's avatar Tejun Heo
Browse files

workqueue: factor out start_flush_work()



Factor out start_flush_work() from flush_work().  start_flush_work()
has @wait_executing argument which controls whether the barrier is
queued only if the work is pending or also if executing.  As
flush_work() needs to wait for execution too, it uses %true.

This commit doesn't cause any behavior difference.  start_flush_work()
will be used to implement flush_work_sync().

Signed-off-by: default avatarTejun Heo <tj@kernel.org>
parent 401a8d04
Loading
Loading
Loading
Loading
+37 −27
Original line number Diff line number Diff line
@@ -2326,35 +2326,17 @@ out_unlock:
}
EXPORT_SYMBOL_GPL(flush_workqueue);

/**
 * flush_work - wait for a work to finish executing the last queueing instance
 * @work: the work to flush
 *
 * Wait until @work has finished execution.  This function considers
 * only the last queueing instance of @work.  If @work has been
 * enqueued across different CPUs on a non-reentrant workqueue or on
 * multiple workqueues, @work might still be executing on return on
 * some of the CPUs from earlier queueing.
 *
 * If @work was queued only on a non-reentrant, ordered or unbound
 * workqueue, @work is guaranteed to be idle on return if it hasn't
 * been requeued since flush started.
 *
 * RETURNS:
 * %true if flush_work() waited for the work to finish execution,
 * %false if it was already idle.
 */
bool flush_work(struct work_struct *work)
static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr,
			     bool wait_executing)
{
	struct worker *worker = NULL;
	struct global_cwq *gcwq;
	struct cpu_workqueue_struct *cwq;
	struct wq_barrier barr;

	might_sleep();
	gcwq = get_work_gcwq(work);
	if (!gcwq)
		return 0;
		return false;

	spin_lock_irq(&gcwq->lock);
	if (!list_empty(&work->entry)) {
@@ -2367,24 +2349,52 @@ bool flush_work(struct work_struct *work)
		cwq = get_work_cwq(work);
		if (unlikely(!cwq || gcwq != cwq->gcwq))
			goto already_gone;
	} else {
	} else if (wait_executing) {
		worker = find_worker_executing_work(gcwq, work);
		if (!worker)
			goto already_gone;
		cwq = worker->current_cwq;
	}
	} else
		goto already_gone;

	insert_wq_barrier(cwq, &barr, work, worker);
	insert_wq_barrier(cwq, barr, work, worker);
	spin_unlock_irq(&gcwq->lock);

	lock_map_acquire(&cwq->wq->lockdep_map);
	lock_map_release(&cwq->wq->lockdep_map);
	return true;
already_gone:
	spin_unlock_irq(&gcwq->lock);
	return false;
}

/**
 * flush_work - wait for a work to finish executing the last queueing instance
 * @work: the work to flush
 *
 * Wait until @work has finished execution.  This function considers
 * only the last queueing instance of @work.  If @work has been
 * enqueued across different CPUs on a non-reentrant workqueue or on
 * multiple workqueues, @work might still be executing on return on
 * some of the CPUs from earlier queueing.
 *
 * If @work was queued only on a non-reentrant, ordered or unbound
 * workqueue, @work is guaranteed to be idle on return if it hasn't
 * been requeued since flush started.
 *
 * RETURNS:
 * %true if flush_work() waited for the work to finish execution,
 * %false if it was already idle.
 */
bool flush_work(struct work_struct *work)
{
	struct wq_barrier barr;

	if (start_flush_work(work, &barr, true)) {
		wait_for_completion(&barr.done);
		destroy_work_on_stack(&barr.work);
		return true;
already_gone:
	spin_unlock_irq(&gcwq->lock);
	} else
		return false;
}
EXPORT_SYMBOL_GPL(flush_work);