Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 7097a87a authored by Oleg Nesterov's avatar Oleg Nesterov Committed by Linus Torvalds
Browse files

workqueue: kill run_scheduled_work()



Because it has no callers.

Actually, I think the whole idea of run_scheduled_work() was not right, not
good to mix "unqueue this work and execute its ->func()" in one function.

Signed-off-by: default avatarOleg Nesterov <oleg@tv-sign.ru>
Cc: Ingo Molnar <mingo@elte.hu>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 3af24433
Loading
Loading
Loading
Loading
+0 −1
Original line number Original line Diff line number Diff line
@@ -182,7 +182,6 @@ extern void flush_work(struct workqueue_struct *wq, struct work_struct *work);
extern void flush_work_keventd(struct work_struct *work);
extern void flush_work_keventd(struct work_struct *work);


extern int FASTCALL(schedule_work(struct work_struct *work));
extern int FASTCALL(schedule_work(struct work_struct *work));
extern int FASTCALL(run_scheduled_work(struct work_struct *work));
extern int FASTCALL(schedule_delayed_work(struct delayed_work *work, unsigned long delay));
extern int FASTCALL(schedule_delayed_work(struct delayed_work *work, unsigned long delay));


extern int schedule_delayed_work_on(int cpu, struct delayed_work *work, unsigned long delay);
extern int schedule_delayed_work_on(int cpu, struct delayed_work *work, unsigned long delay);
+0 −73
Original line number Original line Diff line number Diff line
@@ -98,79 +98,6 @@ static inline void *get_wq_data(struct work_struct *work)
	return (void *) (atomic_long_read(&work->data) & WORK_STRUCT_WQ_DATA_MASK);
	return (void *) (atomic_long_read(&work->data) & WORK_STRUCT_WQ_DATA_MASK);
}
}


static int __run_work(struct cpu_workqueue_struct *cwq, struct work_struct *work)
{
	int ret = 0;
	unsigned long flags;

	spin_lock_irqsave(&cwq->lock, flags);
	/*
	 * We need to re-validate the work info after we've gotten
	 * the cpu_workqueue lock. We can run the work now iff:
	 *
	 *  - the wq_data still matches the cpu_workqueue_struct
	 *  - AND the work is still marked pending
	 *  - AND the work is still on a list (which will be this
	 *    workqueue_struct list)
	 *
	 * All these conditions are important, because we
	 * need to protect against the work being run right
	 * now on another CPU (all but the last one might be
	 * true if it's currently running and has not been
	 * released yet, for example).
	 */
	if (get_wq_data(work) == cwq
	    && work_pending(work)
	    && !list_empty(&work->entry)) {
		work_func_t f = work->func;
		cwq->current_work = work;
		list_del_init(&work->entry);
		spin_unlock_irqrestore(&cwq->lock, flags);

		if (!test_bit(WORK_STRUCT_NOAUTOREL, work_data_bits(work)))
			work_release(work);
		f(work);

		spin_lock_irqsave(&cwq->lock, flags);
		cwq->current_work = NULL;
		ret = 1;
	}
	spin_unlock_irqrestore(&cwq->lock, flags);
	return ret;
}

/**
 * run_scheduled_work - run scheduled work synchronously
 * @work: work to run
 *
 * This checks if the work was pending, and runs it
 * synchronously if so. It returns a boolean to indicate
 * whether it had any scheduled work to run or not.
 *
 * NOTE! This _only_ works for normal work_structs. You
 * CANNOT use this for delayed work, because the wq data
 * for delayed work will not point properly to the per-
 * CPU workqueue struct, but will change!
 */
int fastcall run_scheduled_work(struct work_struct *work)
{
	for (;;) {
		struct cpu_workqueue_struct *cwq;

		if (!work_pending(work))
			return 0;
		if (list_empty(&work->entry))
			return 0;
		/* NOTE! This depends intimately on __queue_work! */
		cwq = get_wq_data(work);
		if (!cwq)
			return 0;
		if (__run_work(cwq, work))
			return 1;
	}
}
EXPORT_SYMBOL(run_scheduled_work);

static void insert_work(struct cpu_workqueue_struct *cwq,
static void insert_work(struct cpu_workqueue_struct *cwq,
				struct work_struct *work, int tail)
				struct work_struct *work, int tail)
{
{