Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 9de04e6b authored by Herbert Xu's avatar Herbert Xu Committed by Ravinder Konka
Browse files

net: Clone skb before setting peeked flag



[ Upstream commit 738ac1ebb96d02e0d23bc320302a6ea94c612dec ]

Shared skbs must not be modified and this is crucial for broadcast
and/or multicast paths where we use it as an optimisation to avoid
unnecessary cloning.

The function skb_recv_datagram breaks this rule by setting peeked
without cloning the skb first.  This causes funky races which leads
to double-free.

This patch fixes this by cloning the skb and replacing the skb
in the list when setting skb->peeked.

Change-Id: I038b4fae0e1e4a0f57e2ddc7e235838fae96b6f9
Fixes: a59322be ("[UDP]: Only increment counter on first peek/recv")
Reported-by: default avatarKonstantin Khlebnikov <khlebnikov@yandex-team.ru>
Signed-off-by: default avatarHerbert Xu <herbert@gondor.apana.org.au>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Git-commit: 0ba48ae94c393dc4c43b257400046feeeb9c6fad
Git-repo: git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git


Signed-off-by: default avatarRavinder Konka <rkonka@codeaurora.org>
parent 422bb915
Loading
Loading
Loading
Loading
+38 −3
Original line number Diff line number Diff line
@@ -130,6 +130,35 @@ out_noerr:
	goto out;
}

static int skb_set_peeked(struct sk_buff *skb)
{
	struct sk_buff *nskb;

	if (skb->peeked)
		return 0;

	/* We have to unshare an skb before modifying it. */
	if (!skb_shared(skb))
		goto done;

	nskb = skb_clone(skb, GFP_ATOMIC);
	if (!nskb)
		return -ENOMEM;

	skb->prev->next = nskb;
	skb->next->prev = nskb;
	nskb->prev = skb->prev;
	nskb->next = skb->next;

	consume_skb(skb);
	skb = nskb;

done:
	skb->peeked = 1;

	return 0;
}

/**
 *	__skb_recv_datagram - Receive a datagram skbuff
 *	@sk: socket
@@ -164,7 +193,9 @@ out_noerr:
struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned int flags,
				    int *peeked, int *off, int *err)
{
	struct sk_buff_head *queue = &sk->sk_receive_queue;
	struct sk_buff *skb, *last;
	unsigned long cpu_flags;
	long timeo;
	/*
	 * Caller is allowed not to check sk->sk_err before skb_recv_datagram()
@@ -183,8 +214,6 @@ struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned int flags,
		 * Look at current nfs client by the way...
		 * However, this function was correct in any case. 8)
		 */
		unsigned long cpu_flags;
		struct sk_buff_head *queue = &sk->sk_receive_queue;
		int _off = *off;

		last = (struct sk_buff *)queue;
@@ -198,7 +227,11 @@ struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned int flags,
					_off -= skb->len;
					continue;
				}
				skb->peeked = 1;

				error = skb_set_peeked(skb);
				if (error)
					goto unlock_err;

				atomic_inc(&skb->users);
			} else
				__skb_unlink(skb, queue);
@@ -222,6 +255,8 @@ struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned int flags,

	return NULL;

unlock_err:
	spin_unlock_irqrestore(&queue->lock, cpu_flags);
no_packet:
	*err = error;
	return NULL;