Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 2e4e4410 authored by Eric Dumazet's avatar Eric Dumazet Committed by David S. Miller
Browse files

net: add alloc_skb_with_frags() helper



Extract from sock_alloc_send_pskb() code building skb with frags,
so that we can reuse this in other contexts.

Intent is to use it from tcp_send_rcvq(), tcp_collapse(), ...

We also want to replace some skb_linearize() calls to a more reliable
strategy in pathological cases where we need to reduce number of frags.

Signed-off-by: default avatarEric Dumazet <edumazet@google.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent cb93471a
Loading
Loading
Loading
Loading
+6 −0
Original line number Diff line number Diff line
@@ -769,6 +769,12 @@ static inline struct sk_buff *alloc_skb(unsigned int size,
	return __alloc_skb(size, priority, 0, NUMA_NO_NODE);
}

struct sk_buff *alloc_skb_with_frags(unsigned long header_len,
				     unsigned long data_len,
				     int max_page_order,
				     int *errcode,
				     gfp_t gfp_mask);

static inline struct sk_buff *alloc_skb_fclone(unsigned int size,
					       gfp_t priority)
{
+78 −0
Original line number Diff line number Diff line
@@ -4102,3 +4102,81 @@ struct sk_buff *skb_vlan_untag(struct sk_buff *skb)
	return NULL;
}
EXPORT_SYMBOL(skb_vlan_untag);

/**
 * alloc_skb_with_frags - allocate skb with page frags
 *
 * header_len: size of linear part
 * data_len: needed length in frags
 * max_page_order: max page order desired.
 * errcode: pointer to error code if any
 * gfp_mask: allocation mask
 *
 * This can be used to allocate a paged skb, given a maximal order for frags.
 */
struct sk_buff *alloc_skb_with_frags(unsigned long header_len,
				     unsigned long data_len,
				     int max_page_order,
				     int *errcode,
				     gfp_t gfp_mask)
{
	int npages = (data_len + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
	unsigned long chunk;
	struct sk_buff *skb;
	struct page *page;
	gfp_t gfp_head;
	int i;

	*errcode = -EMSGSIZE;
	/* Note this test could be relaxed, if we succeed to allocate
	 * high order pages...
	 */
	if (npages > MAX_SKB_FRAGS)
		return NULL;

	gfp_head = gfp_mask;
	if (gfp_head & __GFP_WAIT)
		gfp_head |= __GFP_REPEAT;

	*errcode = -ENOBUFS;
	skb = alloc_skb(header_len, gfp_head);
	if (!skb)
		return NULL;

	skb->truesize += npages << PAGE_SHIFT;

	for (i = 0; npages > 0; i++) {
		int order = max_page_order;

		while (order) {
			if (npages >= 1 << order) {
				page = alloc_pages(gfp_mask |
						   __GFP_COMP |
						   __GFP_NOWARN |
						   __GFP_NORETRY,
						   order);
				if (page)
					goto fill_page;
				/* Do not retry other high order allocations */
				order = 1;
				max_page_order = 0;
			}
			order--;
		}
		page = alloc_page(gfp_mask);
		if (!page)
			goto failure;
fill_page:
		chunk = min_t(unsigned long, data_len,
			      PAGE_SIZE << order);
		skb_fill_page_desc(skb, i, page, 0, chunk);
		data_len -= chunk;
		npages -= 1 << order;
	}
	return skb;

failure:
	kfree_skb(skb);
	return NULL;
}
EXPORT_SYMBOL(alloc_skb_with_frags);
+15 −63
Original line number Diff line number Diff line
@@ -1762,21 +1762,12 @@ struct sk_buff *sock_alloc_send_pskb(struct sock *sk, unsigned long header_len,
				     unsigned long data_len, int noblock,
				     int *errcode, int max_page_order)
{
	struct sk_buff *skb = NULL;
	unsigned long chunk;
	gfp_t gfp_mask;
	struct sk_buff *skb;
	long timeo;
	int err;
	int npages = (data_len + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
	struct page *page;
	int i;

	err = -EMSGSIZE;
	if (npages > MAX_SKB_FRAGS)
		goto failure;

	timeo = sock_sndtimeo(sk, noblock);
	while (!skb) {
	for (;;) {
		err = sock_error(sk);
		if (err != 0)
			goto failure;
@@ -1785,7 +1776,9 @@ struct sk_buff *sock_alloc_send_pskb(struct sock *sk, unsigned long header_len,
		if (sk->sk_shutdown & SEND_SHUTDOWN)
			goto failure;

		if (atomic_read(&sk->sk_wmem_alloc) >= sk->sk_sndbuf) {
		if (sk_wmem_alloc_get(sk) < sk->sk_sndbuf)
			break;

		set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
		set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
		err = -EAGAIN;
@@ -1794,57 +1787,16 @@ struct sk_buff *sock_alloc_send_pskb(struct sock *sk, unsigned long header_len,
		if (signal_pending(current))
			goto interrupted;
		timeo = sock_wait_for_wmem(sk, timeo);
			continue;
	}

		err = -ENOBUFS;
		gfp_mask = sk->sk_allocation;
		if (gfp_mask & __GFP_WAIT)
			gfp_mask |= __GFP_REPEAT;

		skb = alloc_skb(header_len, gfp_mask);
		if (!skb)
			goto failure;

		skb->truesize += data_len;

		for (i = 0; npages > 0; i++) {
			int order = max_page_order;

			while (order) {
				if (npages >= 1 << order) {
					page = alloc_pages(sk->sk_allocation |
							   __GFP_COMP |
							   __GFP_NOWARN |
							   __GFP_NORETRY,
							   order);
					if (page)
						goto fill_page;
					/* Do not retry other high order allocations */
					order = 1;
					max_page_order = 0;
				}
				order--;
			}
			page = alloc_page(sk->sk_allocation);
			if (!page)
				goto failure;
fill_page:
			chunk = min_t(unsigned long, data_len,
				      PAGE_SIZE << order);
			skb_fill_page_desc(skb, i, page, 0, chunk);
			data_len -= chunk;
			npages -= 1 << order;
		}
	}

	skb = alloc_skb_with_frags(header_len, data_len, max_page_order,
				   errcode, sk->sk_allocation);
	if (skb)
		skb_set_owner_w(skb, sk);
	return skb;

interrupted:
	err = sock_intr_errno(timeo);
failure:
	kfree_skb(skb);
	*errcode = err;
	return NULL;
}