Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 3e4cd081 authored by Subash Abhinov Kasiviswanathan's avatar Subash Abhinov Kasiviswanathan Committed by Sean Tranchetti
Browse files

net: qualcomm: rmnet: Do not clone UDP and command packets



UDP packets cloned from aggregated packets may cause drops due to
overestimated buffer sizes (skb truesize). Since clone doesn’t
involve a memcpy, there is some benefit for using it for UDP.

When datapath switched to cloning for UDP, the skb truesize was
forced to match size of UDP packet + metadata in rmnet driver (2150)
rather than the actual size of QMAP aggregated packet (16640).
While this works most of the time, upstream changes checks for memory
more accurately during memory pressure situations.
This means is that when socket buffer reaches half of allocated
receive buffer size, kernel starts to recompute the actual memory
used per packet to avoid OOM situations.

CRs-Fixed: 2335054
Change-Id: Id5cdc383fea1e7759e7419c40639045b6711e15d
Signed-off-by: default avatarSubash Abhinov Kasiviswanathan <subashab@codeaurora.org>
parent 09e63e25
Loading
Loading
Loading
Loading
+9 −44
Original line number Diff line number Diff line
@@ -15,6 +15,8 @@
#include "rmnet_handlers.h"

#define RMNET_MAP_PKT_COPY_THRESHOLD 64
#define RMNET_MAP_DEAGGR_SPACING  64
#define RMNET_MAP_DEAGGR_HEADROOM (RMNET_MAP_DEAGGR_SPACING / 2)

static __sum16 *rmnet_map_get_csum_field(unsigned char protocol,
					 const void *txporthdr)
@@ -299,34 +301,11 @@ struct rmnet_map_header *rmnet_map_add_map_header(struct sk_buff *skb,
}

/* Deaggregates a single packet
 * A whole new buffer is allocated for each portion of an aggregated frame
 * except when a UDP or command packet is received.
 * A whole new buffer is allocated for each portion of an aggregated frame.
 * Caller should keep calling deaggregate() on the source skb until 0 is
 * returned, indicating that there are no more packets to deaggregate. Caller
 * is responsible for freeing the original skb.
 */
static int rmnet_validate_clone(struct sk_buff *skb)
{
	if (RMNET_MAP_GET_CD_BIT(skb))
		return 0;

	if (skb->len < RMNET_MAP_PKT_COPY_THRESHOLD)
		return 1;

	switch (skb->data[4] & 0xF0) {
	case 0x40:
		if (((struct iphdr *)&skb->data[4])->protocol == IPPROTO_UDP)
			return 0;
		break;
	case 0x60:
		if (((struct ipv6hdr *)&skb->data[4])->nexthdr == IPPROTO_UDP)
			return 0;
		/* Fall through */
	}

	return 1;
}

struct sk_buff *rmnet_map_deaggregate(struct sk_buff *skb,
				      struct rmnet_port *port)
{
@@ -350,27 +329,13 @@ struct sk_buff *rmnet_map_deaggregate(struct sk_buff *skb,
	if (ntohs(maph->pkt_len) == 0)
		return NULL;


	if (rmnet_validate_clone(skb)) {
		skbn = alloc_skb(packet_len + RMNET_MAP_DEAGGR_SPACING,
				 GFP_ATOMIC);
	skbn = alloc_skb(packet_len + RMNET_MAP_DEAGGR_SPACING, GFP_ATOMIC);
	if (!skbn)
		return NULL;

	skb_reserve(skbn, RMNET_MAP_DEAGGR_HEADROOM);
	skb_put(skbn, packet_len);
	memcpy(skbn->data, skb->data, packet_len);

	} else {
		skbn = skb_clone(skb, GFP_ATOMIC);
		if (!skbn)
			return NULL;

		skb_trim(skbn, packet_len);
		skbn->truesize = SKB_TRUESIZE(packet_len);
		__skb_set_hash(skbn, 0, 0, 0);
	}

	skb_pull(skb, packet_len);

	return skbn;