Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 606a5020 authored by Tejun Heo's avatar Tejun Heo
Browse files

workqueue: gut flush[_delayed]_work_sync()



Now that all workqueues are non-reentrant, flush[_delayed]_work_sync()
are equivalent to flush[_delayed]_work().  Drop the separate
implementation and make them thin wrappers around
flush[_delayed]_work().

* start_flush_work() no longer takes @wait_executing as the only left
  user - flush_work() - always sets it to %true.

* __cancel_work_timer() uses flush_work() instead of wait_on_work().

Signed-off-by: default avatarTejun Heo <tj@kernel.org>
parent dbf2576e
Loading
Loading
Loading
Loading
+12 −2
Original line number Original line Diff line number Diff line
@@ -412,11 +412,9 @@ extern int keventd_up(void);
int execute_in_process_context(work_func_t fn, struct execute_work *);
int execute_in_process_context(work_func_t fn, struct execute_work *);


extern bool flush_work(struct work_struct *work);
extern bool flush_work(struct work_struct *work);
extern bool flush_work_sync(struct work_struct *work);
extern bool cancel_work_sync(struct work_struct *work);
extern bool cancel_work_sync(struct work_struct *work);


extern bool flush_delayed_work(struct delayed_work *dwork);
extern bool flush_delayed_work(struct delayed_work *dwork);
extern bool flush_delayed_work_sync(struct delayed_work *work);
extern bool cancel_delayed_work_sync(struct delayed_work *dwork);
extern bool cancel_delayed_work_sync(struct delayed_work *dwork);


extern void workqueue_set_max_active(struct workqueue_struct *wq,
extern void workqueue_set_max_active(struct workqueue_struct *wq,
@@ -456,6 +454,18 @@ static inline bool __cancel_delayed_work(struct delayed_work *work)
	return ret;
	return ret;
}
}


/* used to be different but now identical to flush_work(), deprecated */
static inline bool flush_work_sync(struct work_struct *work)
{
	return flush_work(work);
}

/* used to be different but now identical to flush_delayed_work(), deprecated */
static inline bool flush_delayed_work_sync(struct delayed_work *dwork)
{
	return flush_delayed_work(dwork);
}

#ifndef CONFIG_SMP
#ifndef CONFIG_SMP
static inline long work_on_cpu(unsigned int cpu, long (*fn)(void *), void *arg)
static inline long work_on_cpu(unsigned int cpu, long (*fn)(void *), void *arg)
{
{
+10 −112
Original line number Original line Diff line number Diff line
@@ -2801,8 +2801,7 @@ void drain_workqueue(struct workqueue_struct *wq)
}
}
EXPORT_SYMBOL_GPL(drain_workqueue);
EXPORT_SYMBOL_GPL(drain_workqueue);


static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr,
static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr)
			     bool wait_executing)
{
{
	struct worker *worker = NULL;
	struct worker *worker = NULL;
	struct global_cwq *gcwq;
	struct global_cwq *gcwq;
@@ -2824,13 +2823,12 @@ static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr,
		cwq = get_work_cwq(work);
		cwq = get_work_cwq(work);
		if (unlikely(!cwq || gcwq != cwq->pool->gcwq))
		if (unlikely(!cwq || gcwq != cwq->pool->gcwq))
			goto already_gone;
			goto already_gone;
	} else if (wait_executing) {
	} else {
		worker = find_worker_executing_work(gcwq, work);
		worker = find_worker_executing_work(gcwq, work);
		if (!worker)
		if (!worker)
			goto already_gone;
			goto already_gone;
		cwq = worker->current_cwq;
		cwq = worker->current_cwq;
	} else
	}
		goto already_gone;


	insert_wq_barrier(cwq, barr, work, worker);
	insert_wq_barrier(cwq, barr, work, worker);
	spin_unlock_irq(&gcwq->lock);
	spin_unlock_irq(&gcwq->lock);
@@ -2857,15 +2855,8 @@ static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr,
 * flush_work - wait for a work to finish executing the last queueing instance
 * flush_work - wait for a work to finish executing the last queueing instance
 * @work: the work to flush
 * @work: the work to flush
 *
 *
 * Wait until @work has finished execution.  This function considers
 * Wait until @work has finished execution.  @work is guaranteed to be idle
 * only the last queueing instance of @work.  If @work has been
 * on return if it hasn't been requeued since flush started.
 * enqueued across different CPUs on a non-reentrant workqueue or on
 * multiple workqueues, @work might still be executing on return on
 * some of the CPUs from earlier queueing.
 *
 * If @work was queued only on a non-reentrant, ordered or unbound
 * workqueue, @work is guaranteed to be idle on return if it hasn't
 * been requeued since flush started.
 *
 *
 * RETURNS:
 * RETURNS:
 * %true if flush_work() waited for the work to finish execution,
 * %true if flush_work() waited for the work to finish execution,
@@ -2878,85 +2869,15 @@ bool flush_work(struct work_struct *work)
	lock_map_acquire(&work->lockdep_map);
	lock_map_acquire(&work->lockdep_map);
	lock_map_release(&work->lockdep_map);
	lock_map_release(&work->lockdep_map);


	if (start_flush_work(work, &barr, true)) {
	if (start_flush_work(work, &barr)) {
		wait_for_completion(&barr.done);
		destroy_work_on_stack(&barr.work);
		return true;
	} else
		return false;
}
EXPORT_SYMBOL_GPL(flush_work);

static bool wait_on_cpu_work(struct global_cwq *gcwq, struct work_struct *work)
{
	struct wq_barrier barr;
	struct worker *worker;

	spin_lock_irq(&gcwq->lock);

	worker = find_worker_executing_work(gcwq, work);
	if (unlikely(worker))
		insert_wq_barrier(worker->current_cwq, &barr, work, worker);

	spin_unlock_irq(&gcwq->lock);

	if (unlikely(worker)) {
		wait_for_completion(&barr.done);
		wait_for_completion(&barr.done);
		destroy_work_on_stack(&barr.work);
		destroy_work_on_stack(&barr.work);
		return true;
		return true;
	} else
	} else {
		return false;
		return false;
	}
	}

static bool wait_on_work(struct work_struct *work)
{
	bool ret = false;
	int cpu;

	might_sleep();

	lock_map_acquire(&work->lockdep_map);
	lock_map_release(&work->lockdep_map);

	for_each_gcwq_cpu(cpu)
		ret |= wait_on_cpu_work(get_gcwq(cpu), work);
	return ret;
}
}

EXPORT_SYMBOL_GPL(flush_work);
/**
 * flush_work_sync - wait until a work has finished execution
 * @work: the work to flush
 *
 * Wait until @work has finished execution.  On return, it's
 * guaranteed that all queueing instances of @work which happened
 * before this function is called are finished.  In other words, if
 * @work hasn't been requeued since this function was called, @work is
 * guaranteed to be idle on return.
 *
 * RETURNS:
 * %true if flush_work_sync() waited for the work to finish execution,
 * %false if it was already idle.
 */
bool flush_work_sync(struct work_struct *work)
{
	struct wq_barrier barr;
	bool pending, waited;

	/* we'll wait for executions separately, queue barr only if pending */
	pending = start_flush_work(work, &barr, false);

	/* wait for executions to finish */
	waited = wait_on_work(work);

	/* wait for the pending one */
	if (pending) {
		wait_for_completion(&barr.done);
		destroy_work_on_stack(&barr.work);
	}

	return pending || waited;
}
EXPORT_SYMBOL_GPL(flush_work_sync);


static bool __cancel_work_timer(struct work_struct *work, bool is_dwork)
static bool __cancel_work_timer(struct work_struct *work, bool is_dwork)
{
{
@@ -2970,14 +2891,14 @@ static bool __cancel_work_timer(struct work_struct *work, bool is_dwork)
		 * would be waiting for before retrying.
		 * would be waiting for before retrying.
		 */
		 */
		if (unlikely(ret == -ENOENT))
		if (unlikely(ret == -ENOENT))
			wait_on_work(work);
			flush_work(work);
	} while (unlikely(ret < 0));
	} while (unlikely(ret < 0));


	/* tell other tasks trying to grab @work to back off */
	/* tell other tasks trying to grab @work to back off */
	mark_work_canceling(work);
	mark_work_canceling(work);
	local_irq_restore(flags);
	local_irq_restore(flags);


	wait_on_work(work);
	flush_work(work);
	clear_work_data(work);
	clear_work_data(work);
	return ret;
	return ret;
}
}
@@ -3029,29 +2950,6 @@ bool flush_delayed_work(struct delayed_work *dwork)
}
}
EXPORT_SYMBOL(flush_delayed_work);
EXPORT_SYMBOL(flush_delayed_work);


/**
 * flush_delayed_work_sync - wait for a dwork to finish
 * @dwork: the delayed work to flush
 *
 * Delayed timer is cancelled and the pending work is queued for
 * execution immediately.  Other than timer handling, its behavior
 * is identical to flush_work_sync().
 *
 * RETURNS:
 * %true if flush_work_sync() waited for the work to finish execution,
 * %false if it was already idle.
 */
bool flush_delayed_work_sync(struct delayed_work *dwork)
{
	local_irq_disable();
	if (del_timer_sync(&dwork->timer))
		__queue_work(dwork->cpu,
			     get_work_cwq(&dwork->work)->wq, &dwork->work);
	local_irq_enable();
	return flush_work_sync(&dwork->work);
}
EXPORT_SYMBOL(flush_delayed_work_sync);

/**
/**
 * cancel_delayed_work_sync - cancel a delayed work and wait for it to finish
 * cancel_delayed_work_sync - cancel a delayed work and wait for it to finish
 * @dwork: the delayed work cancel
 * @dwork: the delayed work cancel