Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 2055da97 authored by Ingo Molnar's avatar Ingo Molnar
Browse files

sched/wait: Disambiguate wq_entry->task_list and wq_head->task_list naming



So I've noticed a number of instances where it was not obvious from the
code whether ->task_list was for a wait-queue head or a wait-queue entry.

Furthermore, there's a number of wait-queue users where the lists are
not for 'tasks' but other entities (poll tables, etc.), in which case
the 'task_list' name is actively confusing.

To clear this all up, name the wait-queue head and entry list structure
fields unambiguously:

	struct wait_queue_head::task_list	=> ::head
	struct wait_queue_entry::task_list	=> ::entry

For example, this code:

	rqw->wait.task_list.next != &wait->task_list

... is was pretty unclear (to me) what it's doing, while now it's written this way:

	rqw->wait.head.next != &wait->entry

... which makes it pretty clear that we are iterating a list until we see the head.

Other examples are:

	list_for_each_entry_safe(pos, next, &x->task_list, task_list) {
	list_for_each_entry(wq, &fence->wait.task_list, task_list) {

... where it's unclear (to me) what we are iterating, and during review it's
hard to tell whether it's trying to walk a wait-queue entry (which would be
a bug), while now it's written as:

	list_for_each_entry_safe(pos, next, &x->head, entry) {
	list_for_each_entry(wq, &fence->wait.head, entry) {

Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: linux-kernel@vger.kernel.org
Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent 5822a454
Loading
Loading
Loading
Loading
+1 −1
Original line number Diff line number Diff line
@@ -933,7 +933,7 @@ static int blk_mq_dispatch_wake(wait_queue_entry_t *wait, unsigned mode, int fla

	hctx = container_of(wait, struct blk_mq_hw_ctx, dispatch_wait);

	list_del(&wait->task_list);
	list_del(&wait->entry);
	clear_bit_unlock(BLK_MQ_S_TAG_WAITING, &hctx->state);
	blk_mq_run_hw_queue(hctx, true);
	return 1;
+1 −1
Original line number Diff line number Diff line
@@ -520,7 +520,7 @@ static inline bool may_queue(struct rq_wb *rwb, struct rq_wait *rqw,
	 * in line to be woken up, wait for our turn.
	 */
	if (waitqueue_active(&rqw->wait) &&
	    rqw->wait.task_list.next != &wait->task_list)
	    rqw->wait.head.next != &wait->entry)
		return false;

	return atomic_inc_below(&rqw->inflight, get_limit(rwb, rw));
+4 −4
Original line number Diff line number Diff line
@@ -385,7 +385,7 @@ static int kyber_init_hctx(struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx)

	for (i = 0; i < KYBER_NUM_DOMAINS; i++) {
		INIT_LIST_HEAD(&khd->rqs[i]);
		INIT_LIST_HEAD(&khd->domain_wait[i].task_list);
		INIT_LIST_HEAD(&khd->domain_wait[i].entry);
		atomic_set(&khd->wait_index[i], 0);
	}

@@ -512,7 +512,7 @@ static int kyber_domain_wake(wait_queue_entry_t *wait, unsigned mode, int flags,
{
	struct blk_mq_hw_ctx *hctx = READ_ONCE(wait->private);

	list_del_init(&wait->task_list);
	list_del_init(&wait->entry);
	blk_mq_run_hw_queue(hctx, true);
	return 1;
}
@@ -536,7 +536,7 @@ static int kyber_get_domain_token(struct kyber_queue_data *kqd,
	 * run when one becomes available. Note that this is serialized on
	 * khd->lock, but we still need to be careful about the waker.
	 */
	if (list_empty_careful(&wait->task_list)) {
	if (list_empty_careful(&wait->entry)) {
		init_waitqueue_func_entry(wait, kyber_domain_wake);
		wait->private = hctx;
		ws = sbq_wait_ptr(domain_tokens,
@@ -736,7 +736,7 @@ static int kyber_##name##_waiting_show(void *data, struct seq_file *m) \
	struct kyber_hctx_data *khd = hctx->sched_data;			\
	wait_queue_entry_t *wait = &khd->domain_wait[domain];		\
									\
	seq_printf(m, "%d\n", !list_empty_careful(&wait->task_list));	\
	seq_printf(m, "%d\n", !list_empty_careful(&wait->entry));	\
	return 0;							\
}
KYBER_DEBUGFS_DOMAIN_ATTRS(KYBER_READ, read)
+10 −11
Original line number Diff line number Diff line
@@ -160,31 +160,30 @@ static void __i915_sw_fence_wake_up_all(struct i915_sw_fence *fence,

	/*
	 * To prevent unbounded recursion as we traverse the graph of
	 * i915_sw_fences, we move the task_list from this, the next ready
	 * fence, to the tail of the original fence's task_list
	 * i915_sw_fences, we move the entry list from this, the next ready
	 * fence, to the tail of the original fence's entry list
	 * (and so added to the list to be woken).
	 */

	spin_lock_irqsave_nested(&x->lock, flags, 1 + !!continuation);
	if (continuation) {
		list_for_each_entry_safe(pos, next, &x->task_list, task_list) {
		list_for_each_entry_safe(pos, next, &x->head, entry) {
			if (pos->func == autoremove_wake_function)
				pos->func(pos, TASK_NORMAL, 0, continuation);
			else
				list_move_tail(&pos->task_list, continuation);
				list_move_tail(&pos->entry, continuation);
		}
	} else {
		LIST_HEAD(extra);

		do {
			list_for_each_entry_safe(pos, next,
						 &x->task_list, task_list)
			list_for_each_entry_safe(pos, next, &x->head, entry)
				pos->func(pos, TASK_NORMAL, 0, &extra);

			if (list_empty(&extra))
				break;

			list_splice_tail_init(&extra, &x->task_list);
			list_splice_tail_init(&extra, &x->head);
		} while (1);
	}
	spin_unlock_irqrestore(&x->lock, flags);
@@ -256,7 +255,7 @@ void i915_sw_fence_commit(struct i915_sw_fence *fence)

static int i915_sw_fence_wake(wait_queue_entry_t *wq, unsigned mode, int flags, void *key)
{
	list_del(&wq->task_list);
	list_del(&wq->entry);
	__i915_sw_fence_complete(wq->private, key);
	i915_sw_fence_put(wq->private);
	if (wq->flags & I915_SW_FENCE_FLAG_ALLOC)
@@ -275,7 +274,7 @@ static bool __i915_sw_fence_check_if_after(struct i915_sw_fence *fence,
	if (fence == signaler)
		return true;

	list_for_each_entry(wq, &fence->wait.task_list, task_list) {
	list_for_each_entry(wq, &fence->wait.head, entry) {
		if (wq->func != i915_sw_fence_wake)
			continue;

@@ -293,7 +292,7 @@ static void __i915_sw_fence_clear_checked_bit(struct i915_sw_fence *fence)
	if (!__test_and_clear_bit(I915_SW_FENCE_CHECKED_BIT, &fence->flags))
		return;

	list_for_each_entry(wq, &fence->wait.task_list, task_list) {
	list_for_each_entry(wq, &fence->wait.head, entry) {
		if (wq->func != i915_sw_fence_wake)
			continue;

@@ -350,7 +349,7 @@ static int __i915_sw_fence_await_sw_fence(struct i915_sw_fence *fence,
		pending |= I915_SW_FENCE_FLAG_ALLOC;
	}

	INIT_LIST_HEAD(&wq->task_list);
	INIT_LIST_HEAD(&wq->entry);
	wq->flags = pending;
	wq->func = i915_sw_fence_wake;
	wq->private = i915_sw_fence_get(fence);
+1 −1
Original line number Diff line number Diff line
@@ -709,7 +709,7 @@ static irqreturn_t dryice_irq(int irq, void *dev_id)
		/*If the write wait queue is empty then there is no pending
		  operations. It means the interrupt is for DryIce -Security.
		  IRQ must be returned as none.*/
		if (list_empty_careful(&imxdi->write_wait.task_list))
		if (list_empty_careful(&imxdi->write_wait.head))
			return rc;

		/* DSR_WCF clears itself on DSR read */
Loading