Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit f545a38f authored by Eric Dumazet's avatar Eric Dumazet Committed by David S. Miller
Browse files

net: add a limit parameter to sk_add_backlog()



sk_add_backlog() & sk_rcvqueues_full() hard coded sk_rcvbuf as the
memory limit. We need to make this limit a parameter for TCP use.

No functional change expected in this patch, all callers still using the
old sk_rcvbuf limit.

Signed-off-by: default avatarEric Dumazet <edumazet@google.com>
Cc: Neal Cardwell <ncardwell@google.com>
Cc: Tom Herbert <therbert@google.com>
Cc: Maciej Żenczykowski <maze@google.com>
Cc: Yuchung Cheng <ycheng@google.com>
Cc: Ilpo Järvinen <ilpo.jarvinen@helsinki.fi>
Cc: Rick Jones <rick.jones2@hp.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent b9898507
Loading
Loading
Loading
Loading
+6 −4
Original line number Diff line number Diff line
@@ -709,17 +709,19 @@ static inline void __sk_add_backlog(struct sock *sk, struct sk_buff *skb)
 * Do not take into account this skb truesize,
 * to allow even a single big packet to come.
 */
static inline bool sk_rcvqueues_full(const struct sock *sk, const struct sk_buff *skb)
static inline bool sk_rcvqueues_full(const struct sock *sk, const struct sk_buff *skb,
				     unsigned int limit)
{
	unsigned int qsize = sk->sk_backlog.len + atomic_read(&sk->sk_rmem_alloc);

	return qsize > sk->sk_rcvbuf;
	return qsize > limit;
}

/* The per-socket spinlock must be held here. */
static inline __must_check int sk_add_backlog(struct sock *sk, struct sk_buff *skb)
static inline __must_check int sk_add_backlog(struct sock *sk, struct sk_buff *skb,
					      unsigned int limit)
{
	if (sk_rcvqueues_full(sk, skb))
	if (sk_rcvqueues_full(sk, skb, limit))
		return -ENOBUFS;

	__sk_add_backlog(sk, skb);
+2 −2
Original line number Diff line number Diff line
@@ -389,7 +389,7 @@ int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested)

	skb->dev = NULL;

	if (sk_rcvqueues_full(sk, skb)) {
	if (sk_rcvqueues_full(sk, skb, sk->sk_rcvbuf)) {
		atomic_inc(&sk->sk_drops);
		goto discard_and_relse;
	}
@@ -406,7 +406,7 @@ int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested)
		rc = sk_backlog_rcv(sk, skb);

		mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
	} else if (sk_add_backlog(sk, skb)) {
	} else if (sk_add_backlog(sk, skb, sk->sk_rcvbuf)) {
		bh_unlock_sock(sk);
		atomic_inc(&sk->sk_drops);
		goto discard_and_relse;
+1 −1
Original line number Diff line number Diff line
@@ -1752,7 +1752,7 @@ process:
			if (!tcp_prequeue(sk, skb))
				ret = tcp_v4_do_rcv(sk, skb);
		}
	} else if (unlikely(sk_add_backlog(sk, skb))) {
	} else if (unlikely(sk_add_backlog(sk, skb, sk->sk_rcvbuf))) {
		bh_unlock_sock(sk);
		NET_INC_STATS_BH(net, LINUX_MIB_TCPBACKLOGDROP);
		goto discard_and_relse;
+2 −2
Original line number Diff line number Diff line
@@ -1479,7 +1479,7 @@ int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
		goto drop;


	if (sk_rcvqueues_full(sk, skb))
	if (sk_rcvqueues_full(sk, skb, sk->sk_rcvbuf))
		goto drop;

	rc = 0;
@@ -1488,7 +1488,7 @@ int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
	bh_lock_sock(sk);
	if (!sock_owned_by_user(sk))
		rc = __udp_queue_rcv_skb(sk, skb);
	else if (sk_add_backlog(sk, skb)) {
	else if (sk_add_backlog(sk, skb, sk->sk_rcvbuf)) {
		bh_unlock_sock(sk);
		goto drop;
	}
+1 −1
Original line number Diff line number Diff line
@@ -1654,7 +1654,7 @@ process:
			if (!tcp_prequeue(sk, skb))
				ret = tcp_v6_do_rcv(sk, skb);
		}
	} else if (unlikely(sk_add_backlog(sk, skb))) {
	} else if (unlikely(sk_add_backlog(sk, skb, sk->sk_rcvbuf))) {
		bh_unlock_sock(sk);
		NET_INC_STATS_BH(net, LINUX_MIB_TCPBACKLOGDROP);
		goto discard_and_relse;
Loading