Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 42f8570f authored by Sasha Levin's avatar Sasha Levin Committed by Tejun Heo
Browse files

workqueue: use new hashtable implementation



Switch workqueues to use the new hashtable implementation. This reduces the
amount of generic unrelated code in the workqueues.

This patch depends on d9b482c8 ("hashtable: introduce a small and naive
hashtable") which was merged in v3.6.

Acked-by: default avatarTejun Heo <tj@kernel.org>
Signed-off-by: default avatarSasha Levin <sasha.levin@oracle.com>
Signed-off-by: default avatarTejun Heo <tj@kernel.org>
parent 848b8141
Loading
Loading
Loading
Loading
+15 −71
Original line number Diff line number Diff line
@@ -41,6 +41,7 @@
#include <linux/debug_locks.h>
#include <linux/lockdep.h>
#include <linux/idr.h>
#include <linux/hashtable.h>

#include "workqueue_sched.h"

@@ -82,8 +83,6 @@ enum {
	NR_WORKER_POOLS		= 2,		/* # worker pools per gcwq */

	BUSY_WORKER_HASH_ORDER	= 6,		/* 64 pointers */
	BUSY_WORKER_HASH_SIZE	= 1 << BUSY_WORKER_HASH_ORDER,
	BUSY_WORKER_HASH_MASK	= BUSY_WORKER_HASH_SIZE - 1,

	MAX_IDLE_WORKERS_RATIO	= 4,		/* 1/4 of busy can be idle */
	IDLE_WORKER_TIMEOUT	= 300 * HZ,	/* keep idle ones for 5 mins */
@@ -180,7 +179,7 @@ struct global_cwq {
	unsigned int		flags;		/* L: GCWQ_* flags */

	/* workers are chained either in busy_hash or pool idle_list */
	struct hlist_head	busy_hash[BUSY_WORKER_HASH_SIZE];
	DECLARE_HASHTABLE(busy_hash, BUSY_WORKER_HASH_ORDER);
						/* L: hash of busy workers */

	struct worker_pool	pools[NR_WORKER_POOLS];
@@ -285,8 +284,7 @@ EXPORT_SYMBOL_GPL(system_freezable_wq);
	     (pool) < &(gcwq)->pools[NR_WORKER_POOLS]; (pool)++)

#define for_each_busy_worker(worker, i, pos, gcwq)			\
	for (i = 0; i < BUSY_WORKER_HASH_SIZE; i++)			\
		hlist_for_each_entry(worker, pos, &gcwq->busy_hash[i], hentry)
	hash_for_each(gcwq->busy_hash, i, pos, worker, hentry)

static inline int __next_gcwq_cpu(int cpu, const struct cpumask *mask,
				  unsigned int sw)
@@ -859,41 +857,13 @@ static inline void worker_clr_flags(struct worker *worker, unsigned int flags)
}

/**
 * busy_worker_head - return the busy hash head for a work
 * @gcwq: gcwq of interest
 * @work: work to be hashed
 *
 * Return hash head of @gcwq for @work.
 *
 * CONTEXT:
 * spin_lock_irq(gcwq->lock).
 *
 * RETURNS:
 * Pointer to the hash head.
 */
static struct hlist_head *busy_worker_head(struct global_cwq *gcwq,
					   struct work_struct *work)
{
	const int base_shift = ilog2(sizeof(struct work_struct));
	unsigned long v = (unsigned long)work;

	/* simple shift and fold hash, do we need something better? */
	v >>= base_shift;
	v += v >> BUSY_WORKER_HASH_ORDER;
	v &= BUSY_WORKER_HASH_MASK;

	return &gcwq->busy_hash[v];
}

/**
 * __find_worker_executing_work - find worker which is executing a work
 * find_worker_executing_work - find worker which is executing a work
 * @gcwq: gcwq of interest
 * @bwh: hash head as returned by busy_worker_head()
 * @work: work to find worker for
 *
 * Find a worker which is executing @work on @gcwq.  @bwh should be
 * the hash head obtained by calling busy_worker_head() with the same
 * work.
 * Find a worker which is executing @work on @gcwq.  This function is
 * identical to __find_worker_executing_work() except that this
 * function calculates @bwh itself.
 *
 * CONTEXT:
 * spin_lock_irq(gcwq->lock).
@@ -902,40 +872,17 @@ static struct hlist_head *busy_worker_head(struct global_cwq *gcwq,
 * Pointer to worker which is executing @work if found, NULL
 * otherwise.
 */
static struct worker *__find_worker_executing_work(struct global_cwq *gcwq,
						   struct hlist_head *bwh,
static struct worker *find_worker_executing_work(struct global_cwq *gcwq,
						 struct work_struct *work)
{
	struct worker *worker;
	struct hlist_node *tmp;

	hlist_for_each_entry(worker, tmp, bwh, hentry)
	hash_for_each_possible(gcwq->busy_hash, worker, tmp, hentry, (unsigned long)work)
		if (worker->current_work == work)
			return worker;
	return NULL;
}

/**
 * find_worker_executing_work - find worker which is executing a work
 * @gcwq: gcwq of interest
 * @work: work to find worker for
 *
 * Find a worker which is executing @work on @gcwq.  This function is
 * identical to __find_worker_executing_work() except that this
 * function calculates @bwh itself.
 *
 * CONTEXT:
 * spin_lock_irq(gcwq->lock).
 *
 * RETURNS:
 * Pointer to worker which is executing @work if found, NULL
 * otherwise.
 */
static struct worker *find_worker_executing_work(struct global_cwq *gcwq,
						 struct work_struct *work)
{
	return __find_worker_executing_work(gcwq, busy_worker_head(gcwq, work),
					    work);
	return NULL;
}

/**
@@ -2166,7 +2113,6 @@ __acquires(&gcwq->lock)
	struct cpu_workqueue_struct *cwq = get_work_cwq(work);
	struct worker_pool *pool = worker->pool;
	struct global_cwq *gcwq = pool->gcwq;
	struct hlist_head *bwh = busy_worker_head(gcwq, work);
	bool cpu_intensive = cwq->wq->flags & WQ_CPU_INTENSIVE;
	work_func_t f = work->func;
	int work_color;
@@ -2198,7 +2144,7 @@ __acquires(&gcwq->lock)
	 * already processing the work.  If so, defer the work to the
	 * currently executing one.
	 */
	collision = __find_worker_executing_work(gcwq, bwh, work);
	collision = find_worker_executing_work(gcwq, work);
	if (unlikely(collision)) {
		move_linked_works(work, &collision->scheduled, NULL);
		return;
@@ -2206,7 +2152,7 @@ __acquires(&gcwq->lock)

	/* claim and dequeue */
	debug_work_deactivate(work);
	hlist_add_head(&worker->hentry, bwh);
	hash_add(gcwq->busy_hash, &worker->hentry, (unsigned long)worker);
	worker->current_work = work;
	worker->current_cwq = cwq;
	work_color = get_work_color(work);
@@ -2264,7 +2210,7 @@ __acquires(&gcwq->lock)
		worker_clr_flags(worker, WORKER_CPU_INTENSIVE);

	/* we're done with it, release */
	hlist_del_init(&worker->hentry);
	hash_del(&worker->hentry);
	worker->current_work = NULL;
	worker->current_cwq = NULL;
	cwq_dec_nr_in_flight(cwq, work_color);
@@ -3831,7 +3777,6 @@ out_unlock:
static int __init init_workqueues(void)
{
	unsigned int cpu;
	int i;

	/* make sure we have enough bits for OFFQ CPU number */
	BUILD_BUG_ON((1LU << (BITS_PER_LONG - WORK_OFFQ_CPU_SHIFT)) <
@@ -3849,8 +3794,7 @@ static int __init init_workqueues(void)
		gcwq->cpu = cpu;
		gcwq->flags |= GCWQ_DISASSOCIATED;

		for (i = 0; i < BUSY_WORKER_HASH_SIZE; i++)
			INIT_HLIST_HEAD(&gcwq->busy_hash[i]);
		hash_init(gcwq->busy_hash);

		for_each_worker_pool(pool, gcwq) {
			pool->gcwq = gcwq;