Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit c65f7f00 authored by David S. Miller's avatar David S. Miller
Browse files

[TCP]: Simplify SKB data portion allocation with NETIF_F_SG.



The ideal and most optimal layout for an SKB when doing
scatter-gather is to put all the headers at skb->data, and
all the user data in the page array.

This makes SKB splitting and combining extremely simple,
especially before a packet goes onto the wire the first
time.

So, when sk_stream_alloc_pskb() is given a zero size, make
sure there is no skb_tailroom().  This is achieved by applying
SKB_DATA_ALIGN() to the header length used here.

Next, make select_size() in TCP output segmentation use a
length of zero when NETIF_F_SG is true on the outgoing
interface.

Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent b8259d9a
Loading
Loading
Loading
Loading
+5 −2
Original line number Original line Diff line number Diff line
@@ -1134,13 +1134,16 @@ static inline void sk_stream_moderate_sndbuf(struct sock *sk)
static inline struct sk_buff *sk_stream_alloc_pskb(struct sock *sk,
static inline struct sk_buff *sk_stream_alloc_pskb(struct sock *sk,
						   int size, int mem, int gfp)
						   int size, int mem, int gfp)
{
{
	struct sk_buff *skb = alloc_skb(size + sk->sk_prot->max_header, gfp);
	struct sk_buff *skb;
	int hdr_len;


	hdr_len = SKB_DATA_ALIGN(sk->sk_prot->max_header);
	skb = alloc_skb(size + hdr_len, gfp);
	if (skb) {
	if (skb) {
		skb->truesize += mem;
		skb->truesize += mem;
		if (sk->sk_forward_alloc >= (int)skb->truesize ||
		if (sk->sk_forward_alloc >= (int)skb->truesize ||
		    sk_stream_mem_schedule(sk, skb->truesize, 0)) {
		    sk_stream_mem_schedule(sk, skb->truesize, 0)) {
			skb_reserve(skb, sk->sk_prot->max_header);
			skb_reserve(skb, hdr_len);
			return skb;
			return skb;
		}
		}
		__kfree_skb(skb);
		__kfree_skb(skb);
+2 −11
Original line number Original line Diff line number Diff line
@@ -756,13 +756,9 @@ static inline int select_size(struct sock *sk, struct tcp_sock *tp)
{
{
	int tmp = tp->mss_cache_std;
	int tmp = tp->mss_cache_std;


	if (sk->sk_route_caps & NETIF_F_SG) {
	if (sk->sk_route_caps & NETIF_F_SG)
		int pgbreak = SKB_MAX_HEAD(MAX_TCP_HEADER);
		tmp = 0;


		if (tmp >= pgbreak &&
		    tmp <= pgbreak + (MAX_SKB_FRAGS - 1) * PAGE_SIZE)
			tmp = pgbreak;
	}
	return tmp;
	return tmp;
}
}


@@ -872,11 +868,6 @@ int tcp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
					tcp_mark_push(tp, skb);
					tcp_mark_push(tp, skb);
					goto new_segment;
					goto new_segment;
				} else if (page) {
				} else if (page) {
					/* If page is cached, align
					 * offset to L1 cache boundary
					 */
					off = (off + L1_CACHE_BYTES - 1) &
					      ~(L1_CACHE_BYTES - 1);
					if (off == PAGE_SIZE) {
					if (off == PAGE_SIZE) {
						put_page(page);
						put_page(page);
						TCP_PAGE(sk) = page = NULL;
						TCP_PAGE(sk) = page = NULL;