Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit b8a23e8d authored by Eric Dumazet's avatar Eric Dumazet Committed by David S. Miller
Browse files

caif: fix leaks and race in caif_queue_rcv_skb()



1) If sk_filter() is applied, skb was leaked (not freed)
2) Testing SOCK_DEAD twice is racy :
   packet could be freed while already queued.
3) Remove obsolete comment about caching skb->len

Signed-off-by: default avatarEric Dumazet <edumazet@google.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent e3426ca7
Loading
Loading
Loading
Loading
+8 −11
Original line number Diff line number Diff line
@@ -121,12 +121,13 @@ static void caif_flow_ctrl(struct sock *sk, int mode)
 * Copied from sock.c:sock_queue_rcv_skb(), but changed so packets are
 * not dropped, but CAIF is sending flow off instead.
 */
static int caif_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
static void caif_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
{
	int err;
	unsigned long flags;
	struct sk_buff_head *list = &sk->sk_receive_queue;
	struct caifsock *cf_sk = container_of(sk, struct caifsock, sk);
	bool queued = false;

	if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >=
		(unsigned int)sk->sk_rcvbuf && rx_flow_is_on(cf_sk)) {
@@ -139,7 +140,8 @@ static int caif_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)

	err = sk_filter(sk, skb);
	if (err)
		return err;
		goto out;

	if (!sk_rmem_schedule(sk, skb, skb->truesize) && rx_flow_is_on(cf_sk)) {
		set_rx_flow_off(cf_sk);
		net_dbg_ratelimited("sending flow OFF due to rmem_schedule\n");
@@ -147,21 +149,16 @@ static int caif_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
	}
	skb->dev = NULL;
	skb_set_owner_r(skb, sk);
	/* Cache the SKB length before we tack it onto the receive
	 * queue. Once it is added it no longer belongs to us and
	 * may be freed by other threads of control pulling packets
	 * from the queue.
	 */
	spin_lock_irqsave(&list->lock, flags);
	if (!sock_flag(sk, SOCK_DEAD))
	queued = !sock_flag(sk, SOCK_DEAD);
	if (queued)
		__skb_queue_tail(list, skb);
	spin_unlock_irqrestore(&list->lock, flags);

	if (!sock_flag(sk, SOCK_DEAD))
out:
	if (queued)
		sk->sk_data_ready(sk);
	else
		kfree_skb(skb);
	return 0;
}

/* Packet Receive Callback function called from CAIF Stack */