Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 83c22520 authored by Oleg Nesterov's avatar Oleg Nesterov Committed by Linus Torvalds
Browse files

flush_cpu_workqueue: don't flush an empty ->worklist



Now when we have ->current_work we can avoid adding a barrier and waiting
for its completition when cwq's queue is empty.

Note: this change is also useful if we change flush_workqueue() to also
check the dead CPUs.

Signed-off-by: default avatarOleg Nesterov <oleg@tv-sign.ru>
Cc: Srivatsa Vaddagiri <vatsa@in.ibm.com>
Cc: Gautham Shenoy <ego@in.ibm.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent edab2516
Loading
Loading
Loading
Loading
+17 −8
Original line number Diff line number Diff line
@@ -404,12 +404,15 @@ static void wq_barrier_func(struct work_struct *work)
	complete(&barr->done);
}

static inline void init_wq_barrier(struct wq_barrier *barr)
static void insert_wq_barrier(struct cpu_workqueue_struct *cwq,
					struct wq_barrier *barr, int tail)
{
	INIT_WORK(&barr->work, wq_barrier_func);
	__set_bit(WORK_STRUCT_PENDING, work_data_bits(&barr->work));

	init_completion(&barr->done);

	insert_work(cwq, &barr->work, tail);
}

static void flush_cpu_workqueue(struct cpu_workqueue_struct *cwq)
@@ -428,15 +431,22 @@ static void flush_cpu_workqueue(struct cpu_workqueue_struct *cwq)
		preempt_disable();
	} else {
		struct wq_barrier barr;
		int active = 0;

		init_wq_barrier(&barr);
		__queue_work(cwq, &barr.work);
		spin_lock_irq(&cwq->lock);
		if (!list_empty(&cwq->worklist) || cwq->current_work != NULL) {
			insert_wq_barrier(cwq, &barr, 1);
			active = 1;
		}
		spin_unlock_irq(&cwq->lock);

		preempt_enable();	/* Can no longer touch *cwq */
		if (active) {
			preempt_enable();
			wait_for_completion(&barr.done);
			preempt_disable();
		}
	}
}

/**
 * flush_workqueue - ensure that any scheduled work has run to completion.
@@ -475,8 +485,7 @@ static void wait_on_work(struct cpu_workqueue_struct *cwq,

	spin_lock_irq(&cwq->lock);
	if (unlikely(cwq->current_work == work)) {
		init_wq_barrier(&barr);
		insert_work(cwq, &barr.work, 0);
		insert_wq_barrier(cwq, &barr, 0);
		running = 1;
	}
	spin_unlock_irq(&cwq->lock);