Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit a84db540 authored by qctecmdr Service's avatar qctecmdr Service Committed by Gerrit - the friendly Code Review server
Browse files

Merge "tcp: add tcp_ooo_try_coalesce() helper"

parents 03db21d0 34dc7c0a
Loading
Loading
Loading
Loading
+39 −8
Original line number Diff line number Diff line
@@ -4323,6 +4323,23 @@ static bool tcp_try_coalesce(struct sock *sk,
	return true;
}

static bool tcp_ooo_try_coalesce(struct sock *sk,
			     struct sk_buff *to,
			     struct sk_buff *from,
			     bool *fragstolen)
{
	bool res = tcp_try_coalesce(sk, OOO_QUEUE, to, from, fragstolen);

	/* In case tcp_drop() is called later, update to->gso_segs */
	if (res) {
		u32 gso_segs = max_t(u16, 1, skb_shinfo(to)->gso_segs) +
			       max_t(u16, 1, skb_shinfo(from)->gso_segs);

		skb_shinfo(to)->gso_segs = min_t(u32, gso_segs, 0xFFFF);
	}
	return res;
}

static void tcp_drop(struct sock *sk, struct sk_buff *skb)
{
	sk_drops_add(sk, skb);
@@ -4454,7 +4471,7 @@ static void tcp_data_queue_ofo(struct sock *sk, struct sk_buff *skb)
	/* In the typical case, we are adding an skb to the end of the list.
	 * Use of ooo_last_skb avoids the O(Log(N)) rbtree lookup.
	 */
	if (tcp_try_coalesce(sk, OOO_QUEUE, tp->ooo_last_skb,
	if (tcp_ooo_try_coalesce(sk, tp->ooo_last_skb,
				 skb, &fragstolen)) {
coalesce_done:
		tcp_grow_window(sk, skb);
@@ -4483,7 +4500,7 @@ static void tcp_data_queue_ofo(struct sock *sk, struct sk_buff *skb)
				/* All the bits are present. Drop. */
				NET_INC_STATS(sock_net(sk),
					      LINUX_MIB_TCPOFOMERGE);
				__kfree_skb(skb);
				tcp_drop(sk, skb);
				skb = NULL;
				tcp_dsack_set(sk, seq, end_seq);
				goto add_sack;
@@ -4502,10 +4519,10 @@ static void tcp_data_queue_ofo(struct sock *sk, struct sk_buff *skb)
						 TCP_SKB_CB(skb1)->end_seq);
				NET_INC_STATS(sock_net(sk),
					      LINUX_MIB_TCPOFOMERGE);
				__kfree_skb(skb1);
				tcp_drop(sk, skb1);
				goto merge_right;
			}
		} else if (tcp_try_coalesce(sk, OOO_QUEUE, skb1,
		} else if (tcp_ooo_try_coalesce(sk, skb1,
						skb, &fragstolen)) {
			goto coalesce_done;
		}
@@ -4868,6 +4885,7 @@ tcp_collapse(struct sock *sk, struct sk_buff_head *list, struct rb_root *root,
static void tcp_collapse_ofo_queue(struct sock *sk)
{
	struct tcp_sock *tp = tcp_sk(sk);
	u32 range_truesize, sum_tiny = 0;
	struct sk_buff *skb, *head;
	struct rb_node *p;
	u32 start, end;
@@ -4886,6 +4904,7 @@ static void tcp_collapse_ofo_queue(struct sock *sk)
	}
	start = TCP_SKB_CB(skb)->seq;
	end = TCP_SKB_CB(skb)->end_seq;
	range_truesize = skb->truesize;

	for (head = skb;;) {
		skb = tcp_skb_next(skb, NULL);
@@ -4896,11 +4915,20 @@ static void tcp_collapse_ofo_queue(struct sock *sk)
		if (!skb ||
		    after(TCP_SKB_CB(skb)->seq, end) ||
		    before(TCP_SKB_CB(skb)->end_seq, start)) {
			/* Do not attempt collapsing tiny skbs */
			if (range_truesize != head->truesize ||
			    end - start >= SKB_WITH_OVERHEAD(SK_MEM_QUANTUM)) {
				tcp_collapse(sk, NULL, &tp->out_of_order_queue,
					     head, skb, start, end);
			} else {
				sum_tiny += range_truesize;
				if (sum_tiny > sk->sk_rcvbuf >> 3)
					return;
			}
			goto new_range;
		}

		range_truesize += skb->truesize;
		if (unlikely(before(TCP_SKB_CB(skb)->seq, start)))
			start = TCP_SKB_CB(skb)->seq;
		if (after(TCP_SKB_CB(skb)->end_seq, end))
@@ -4977,6 +5005,9 @@ static int tcp_prune_queue(struct sock *sk)
	else if (tcp_under_memory_pressure(sk))
		tp->rcv_ssthresh = min(tp->rcv_ssthresh, 4U * tp->advmss);

	if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf)
		return 0;

	tcp_collapse_ofo_queue(sk);
	if (!skb_queue_empty(&sk->sk_receive_queue))
		tcp_collapse(sk, &sk->sk_receive_queue, NULL,