Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 401a8d04 authored by Tejun Heo's avatar Tejun Heo
Browse files

workqueue: cleanup flush/cancel functions



Make the following cleanup changes.

* Relocate flush/cancel function prototypes and definitions.

* Relocate wait_on_cpu_work() and wait_on_work() before
  try_to_grab_pending().  These will be used to implement
  flush_work_sync().

* Make all flush/cancel functions return bool instead of int.

* Update wait_on_cpu_work() and wait_on_work() to return %true if they
  actually waited.

* Add / update comments.

This patch doesn't cause any functional changes.

Signed-off-by: default avatarTejun Heo <tj@kernel.org>
parent 81dcaf65
Loading
Loading
Loading
Loading
+9 −9
Original line number Original line Diff line number Diff line
@@ -343,7 +343,6 @@ extern int queue_delayed_work_on(int cpu, struct workqueue_struct *wq,


extern void flush_workqueue(struct workqueue_struct *wq);
extern void flush_workqueue(struct workqueue_struct *wq);
extern void flush_scheduled_work(void);
extern void flush_scheduled_work(void);
extern void flush_delayed_work(struct delayed_work *work);


extern int schedule_work(struct work_struct *work);
extern int schedule_work(struct work_struct *work);
extern int schedule_work_on(int cpu, struct work_struct *work);
extern int schedule_work_on(int cpu, struct work_struct *work);
@@ -355,8 +354,11 @@ extern int keventd_up(void);


int execute_in_process_context(work_func_t fn, struct execute_work *);
int execute_in_process_context(work_func_t fn, struct execute_work *);


extern int flush_work(struct work_struct *work);
extern bool flush_work(struct work_struct *work);
extern int cancel_work_sync(struct work_struct *work);
extern bool cancel_work_sync(struct work_struct *work);

extern bool flush_delayed_work(struct delayed_work *dwork);
extern bool cancel_delayed_work_sync(struct delayed_work *dwork);


extern void workqueue_set_max_active(struct workqueue_struct *wq,
extern void workqueue_set_max_active(struct workqueue_struct *wq,
				     int max_active);
				     int max_active);
@@ -370,9 +372,9 @@ extern unsigned int work_busy(struct work_struct *work);
 * it returns 1 and the work doesn't re-arm itself. Run flush_workqueue() or
 * it returns 1 and the work doesn't re-arm itself. Run flush_workqueue() or
 * cancel_work_sync() to wait on it.
 * cancel_work_sync() to wait on it.
 */
 */
static inline int cancel_delayed_work(struct delayed_work *work)
static inline bool cancel_delayed_work(struct delayed_work *work)
{
{
	int ret;
	bool ret;


	ret = del_timer_sync(&work->timer);
	ret = del_timer_sync(&work->timer);
	if (ret)
	if (ret)
@@ -385,9 +387,9 @@ static inline int cancel_delayed_work(struct delayed_work *work)
 * if it returns 0 the timer function may be running and the queueing is in
 * if it returns 0 the timer function may be running and the queueing is in
 * progress.
 * progress.
 */
 */
static inline int __cancel_delayed_work(struct delayed_work *work)
static inline bool __cancel_delayed_work(struct delayed_work *work)
{
{
	int ret;
	bool ret;


	ret = del_timer(&work->timer);
	ret = del_timer(&work->timer);
	if (ret)
	if (ret)
@@ -395,8 +397,6 @@ static inline int __cancel_delayed_work(struct delayed_work *work)
	return ret;
	return ret;
}
}


extern int cancel_delayed_work_sync(struct delayed_work *work);

/* Obsolete. use cancel_delayed_work_sync() */
/* Obsolete. use cancel_delayed_work_sync() */
static inline
static inline
void cancel_rearming_delayed_workqueue(struct workqueue_struct *wq,
void cancel_rearming_delayed_workqueue(struct workqueue_struct *wq,
+94 −81
Original line number Original line Diff line number Diff line
@@ -2327,16 +2327,24 @@ void flush_workqueue(struct workqueue_struct *wq)
EXPORT_SYMBOL_GPL(flush_workqueue);
EXPORT_SYMBOL_GPL(flush_workqueue);


/**
/**
 * flush_work - block until a work_struct's callback has terminated
 * flush_work - wait for a work to finish executing the last queueing instance
 * @work: the work which is to be flushed
 * @work: the work to flush
 *
 *
 * Returns false if @work has already terminated.
 * Wait until @work has finished execution.  This function considers
 * only the last queueing instance of @work.  If @work has been
 * enqueued across different CPUs on a non-reentrant workqueue or on
 * multiple workqueues, @work might still be executing on return on
 * some of the CPUs from earlier queueing.
 *
 *
 * It is expected that, prior to calling flush_work(), the caller has
 * If @work was queued only on a non-reentrant, ordered or unbound
 * arranged for the work to not be requeued, otherwise it doesn't make
 * workqueue, @work is guaranteed to be idle on return if it hasn't
 * sense to use this function.
 * been requeued since flush started.
 *
 * RETURNS:
 * %true if flush_work() waited for the work to finish execution,
 * %false if it was already idle.
 */
 */
int flush_work(struct work_struct *work)
bool flush_work(struct work_struct *work)
{
{
	struct worker *worker = NULL;
	struct worker *worker = NULL;
	struct global_cwq *gcwq;
	struct global_cwq *gcwq;
@@ -2374,13 +2382,49 @@ int flush_work(struct work_struct *work)


	wait_for_completion(&barr.done);
	wait_for_completion(&barr.done);
	destroy_work_on_stack(&barr.work);
	destroy_work_on_stack(&barr.work);
	return 1;
	return true;
already_gone:
already_gone:
	spin_unlock_irq(&gcwq->lock);
	spin_unlock_irq(&gcwq->lock);
	return 0;
	return false;
}
}
EXPORT_SYMBOL_GPL(flush_work);
EXPORT_SYMBOL_GPL(flush_work);


static bool wait_on_cpu_work(struct global_cwq *gcwq, struct work_struct *work)
{
	struct wq_barrier barr;
	struct worker *worker;

	spin_lock_irq(&gcwq->lock);

	worker = find_worker_executing_work(gcwq, work);
	if (unlikely(worker))
		insert_wq_barrier(worker->current_cwq, &barr, work, worker);

	spin_unlock_irq(&gcwq->lock);

	if (unlikely(worker)) {
		wait_for_completion(&barr.done);
		destroy_work_on_stack(&barr.work);
		return true;
	} else
		return false;
}

static bool wait_on_work(struct work_struct *work)
{
	bool ret = false;
	int cpu;

	might_sleep();

	lock_map_acquire(&work->lockdep_map);
	lock_map_release(&work->lockdep_map);

	for_each_gcwq_cpu(cpu)
		ret |= wait_on_cpu_work(get_gcwq(cpu), work);
	return ret;
}

/*
/*
 * Upon a successful return (>= 0), the caller "owns" WORK_STRUCT_PENDING bit,
 * Upon a successful return (>= 0), the caller "owns" WORK_STRUCT_PENDING bit,
 * so this work can't be re-armed in any way.
 * so this work can't be re-armed in any way.
@@ -2423,39 +2467,7 @@ static int try_to_grab_pending(struct work_struct *work)
	return ret;
	return ret;
}
}


static void wait_on_cpu_work(struct global_cwq *gcwq, struct work_struct *work)
static bool __cancel_work_timer(struct work_struct *work,
{
	struct wq_barrier barr;
	struct worker *worker;

	spin_lock_irq(&gcwq->lock);

	worker = find_worker_executing_work(gcwq, work);
	if (unlikely(worker))
		insert_wq_barrier(worker->current_cwq, &barr, work, worker);

	spin_unlock_irq(&gcwq->lock);

	if (unlikely(worker)) {
		wait_for_completion(&barr.done);
		destroy_work_on_stack(&barr.work);
	}
}

static void wait_on_work(struct work_struct *work)
{
	int cpu;

	might_sleep();

	lock_map_acquire(&work->lockdep_map);
	lock_map_release(&work->lockdep_map);

	for_each_gcwq_cpu(cpu)
		wait_on_cpu_work(get_gcwq(cpu), work);
}

static int __cancel_work_timer(struct work_struct *work,
				struct timer_list* timer)
				struct timer_list* timer)
{
{
	int ret;
	int ret;
@@ -2472,42 +2484,60 @@ static int __cancel_work_timer(struct work_struct *work,
}
}


/**
/**
 * cancel_work_sync - block until a work_struct's callback has terminated
 * cancel_work_sync - cancel a work and wait for it to finish
 * @work: the work which is to be flushed
 * @work: the work to cancel
 *
 * Returns true if @work was pending.
 *
 *
 * cancel_work_sync() will cancel the work if it is queued. If the work's
 * Cancel @work and wait for its execution to finish.  This function
 * callback appears to be running, cancel_work_sync() will block until it
 * can be used even if the work re-queues itself or migrates to
 * has completed.
 * another workqueue.  On return from this function, @work is
 * guaranteed to be not pending or executing on any CPU.
 *
 *
 * It is possible to use this function if the work re-queues itself. It can
 * cancel_work_sync(&delayed_work->work) must not be used for
 * cancel the work even if it migrates to another workqueue, however in that
 * delayed_work's.  Use cancel_delayed_work_sync() instead.
 * case it only guarantees that work->func() has completed on the last queued
 * workqueue.
 *
 * cancel_work_sync(&delayed_work->work) should be used only if ->timer is not
 * pending, otherwise it goes into a busy-wait loop until the timer expires.
 *
 *
 * The caller must ensure that workqueue_struct on which this work was last
 * The caller must ensure that the workqueue on which @work was last
 * queued can't be destroyed before this function returns.
 * queued can't be destroyed before this function returns.
 *
 * RETURNS:
 * %true if @work was pending, %false otherwise.
 */
 */
int cancel_work_sync(struct work_struct *work)
bool cancel_work_sync(struct work_struct *work)
{
{
	return __cancel_work_timer(work, NULL);
	return __cancel_work_timer(work, NULL);
}
}
EXPORT_SYMBOL_GPL(cancel_work_sync);
EXPORT_SYMBOL_GPL(cancel_work_sync);


/**
/**
 * cancel_delayed_work_sync - reliably kill off a delayed work.
 * flush_delayed_work - wait for a dwork to finish executing the last queueing
 * @dwork: the delayed work struct
 * @dwork: the delayed work to flush
 *
 *
 * Returns true if @dwork was pending.
 * Delayed timer is cancelled and the pending work is queued for
 * immediate execution.  Like flush_work(), this function only
 * considers the last queueing instance of @dwork.
 *
 *
 * It is possible to use this function if @dwork rearms itself via queue_work()
 * RETURNS:
 * or queue_delayed_work(). See also the comment for cancel_work_sync().
 * %true if flush_work() waited for the work to finish execution,
 * %false if it was already idle.
 */
 */
int cancel_delayed_work_sync(struct delayed_work *dwork)
bool flush_delayed_work(struct delayed_work *dwork)
{
	if (del_timer_sync(&dwork->timer))
		__queue_work(raw_smp_processor_id(),
			     get_work_cwq(&dwork->work)->wq, &dwork->work);
	return flush_work(&dwork->work);
}
EXPORT_SYMBOL(flush_delayed_work);

/**
 * cancel_delayed_work_sync - cancel a delayed work and wait for it to finish
 * @dwork: the delayed work cancel
 *
 * This is cancel_work_sync() for delayed works.
 *
 * RETURNS:
 * %true if @dwork was pending, %false otherwise.
 */
bool cancel_delayed_work_sync(struct delayed_work *dwork)
{
{
	return __cancel_work_timer(&dwork->work, &dwork->timer);
	return __cancel_work_timer(&dwork->work, &dwork->timer);
}
}
@@ -2558,23 +2588,6 @@ int schedule_delayed_work(struct delayed_work *dwork,
}
}
EXPORT_SYMBOL(schedule_delayed_work);
EXPORT_SYMBOL(schedule_delayed_work);


/**
 * flush_delayed_work - block until a dwork_struct's callback has terminated
 * @dwork: the delayed work which is to be flushed
 *
 * Any timeout is cancelled, and any pending work is run immediately.
 */
void flush_delayed_work(struct delayed_work *dwork)
{
	if (del_timer_sync(&dwork->timer)) {
		__queue_work(get_cpu(), get_work_cwq(&dwork->work)->wq,
			     &dwork->work);
		put_cpu();
	}
	flush_work(&dwork->work);
}
EXPORT_SYMBOL(flush_delayed_work);

/**
/**
 * schedule_delayed_work_on - queue work in global workqueue on CPU after delay
 * schedule_delayed_work_on - queue work in global workqueue on CPU after delay
 * @cpu: cpu to use
 * @cpu: cpu to use