Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 20243058 authored by David S. Miller's avatar David S. Miller
Browse files

Merge branch 'net-avoid-KCSAN-splats'



Eric Dumazet says:

====================
net: avoid KCSAN splats

Often times we use skb_queue_empty() without holding a lock,
meaning that other cpus (or interrupt) can change the queue
under us. This is fine, but we need to properly annotate
the lockless intent to make sure the compiler wont over
optimize things.
====================

Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents fc11078d 7c422d0c
Loading
Loading
Loading
Loading
+1 −1
Original line number Diff line number Diff line
@@ -1702,7 +1702,7 @@ int chtls_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
		return peekmsg(sk, msg, len, nonblock, flags);

	if (sk_can_busy_loop(sk) &&
	    skb_queue_empty(&sk->sk_receive_queue) &&
	    skb_queue_empty_lockless(&sk->sk_receive_queue) &&
	    sk->sk_state == TCP_ESTABLISHED)
		sk_busy_loop(sk, nonblock);

+1 −1
Original line number Diff line number Diff line
@@ -744,7 +744,7 @@ capi_poll(struct file *file, poll_table *wait)

	poll_wait(file, &(cdev->recvwait), wait);
	mask = EPOLLOUT | EPOLLWRNORM;
	if (!skb_queue_empty(&cdev->recvqueue))
	if (!skb_queue_empty_lockless(&cdev->recvqueue))
		mask |= EPOLLIN | EPOLLRDNORM;
	return mask;
}
+1 −1
Original line number Diff line number Diff line
@@ -2219,7 +2219,7 @@ static int nvme_tcp_poll(struct blk_mq_hw_ctx *hctx)
	struct nvme_tcp_queue *queue = hctx->driver_data;
	struct sock *sk = queue->sock->sk;

	if (sk_can_busy_loop(sk) && skb_queue_empty(&sk->sk_receive_queue))
	if (sk_can_busy_loop(sk) && skb_queue_empty_lockless(&sk->sk_receive_queue))
		sk_busy_loop(sk, true);
	nvme_tcp_try_recv(queue);
	return queue->nr_cqe;
+24 −9
Original line number Diff line number Diff line
@@ -1495,6 +1495,19 @@ static inline int skb_queue_empty(const struct sk_buff_head *list)
	return list->next == (const struct sk_buff *) list;
}

/**
 *	skb_queue_empty_lockless - check if a queue is empty
 *	@list: queue head
 *
 *	Returns true if the queue is empty, false otherwise.
 *	This variant can be used in lockless contexts.
 */
static inline bool skb_queue_empty_lockless(const struct sk_buff_head *list)
{
	return READ_ONCE(list->next) == (const struct sk_buff *) list;
}


/**
 *	skb_queue_is_last - check if skb is the last entry in the queue
 *	@list: queue head
@@ -1848,9 +1861,11 @@ static inline void __skb_insert(struct sk_buff *newsk,
				struct sk_buff *prev, struct sk_buff *next,
				struct sk_buff_head *list)
{
	newsk->next = next;
	newsk->prev = prev;
	next->prev  = prev->next = newsk;
	/* see skb_queue_empty_lockless() for the opposite READ_ONCE() */
	WRITE_ONCE(newsk->next, next);
	WRITE_ONCE(newsk->prev, prev);
	WRITE_ONCE(next->prev, newsk);
	WRITE_ONCE(prev->next, newsk);
	list->qlen++;
}

@@ -1861,11 +1876,11 @@ static inline void __skb_queue_splice(const struct sk_buff_head *list,
	struct sk_buff *first = list->next;
	struct sk_buff *last = list->prev;

	first->prev = prev;
	prev->next = first;
	WRITE_ONCE(first->prev, prev);
	WRITE_ONCE(prev->next, first);

	last->next = next;
	next->prev = last;
	WRITE_ONCE(last->next, next);
	WRITE_ONCE(next->prev, last);
}

/**
@@ -2006,8 +2021,8 @@ static inline void __skb_unlink(struct sk_buff *skb, struct sk_buff_head *list)
	next	   = skb->next;
	prev	   = skb->prev;
	skb->next  = skb->prev = NULL;
	next->prev = prev;
	prev->next = next;
	WRITE_ONCE(next->prev, prev);
	WRITE_ONCE(prev->next, next);
}

/**
+1 −1
Original line number Diff line number Diff line
@@ -668,7 +668,7 @@ __poll_t vcc_poll(struct file *file, struct socket *sock, poll_table *wait)
		mask |= EPOLLHUP;

	/* readable? */
	if (!skb_queue_empty(&sk->sk_receive_queue))
	if (!skb_queue_empty_lockless(&sk->sk_receive_queue))
		mask |= EPOLLIN | EPOLLRDNORM;

	/* writable? */
Loading