Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit e4dc46df authored by Sean Tranchetti's avatar Sean Tranchetti
Browse files

net: qualcomm: rmnet: Add NETIF_F_GRO_HW support



When the NETIF_F_GRO_HW bit is set in the device features, we no longer
need to rebuild every packet in a hardware coalesced frame. Instead, we
send larger SKBs made up of multiple packets that hardware has coalesced
for us.

When sending these large packets, we fill in the various extra metadata
needed by the network stack so that way the larger packets can be properly
broken up if need be during forwarding, or during the UDP GRO path before
queueing the data to the socket.

Change-Id: I014d2095e838ec71ca3588e5b510f7652f22375f
Signed-off-by: default avatarSean Tranchetti <stranche@codeaurora.org>
parent a9a0dbdf
Loading
Loading
Loading
Loading
+118 −16
Original line number Diff line number Diff line
@@ -529,11 +529,11 @@ void rmnet_map_checksum_uplink_packet(struct sk_buff *skb,
static void rmnet_map_nonlinear_copy(struct sk_buff *coal_skb,
				     u32 hdr_len,
				     u32 start,
				     u16 pkt_len,
				     u16 pkt_len, u8 pkt_count,
				     struct sk_buff *dest)
{
	unsigned char *data_start = rmnet_map_data_ptr(coal_skb) + hdr_len;
	u32 copy_len = pkt_len;
	u32 copy_len = pkt_len * pkt_count;

	if (skb_is_nonlinear(coal_skb)) {
		skb_frag_t *frag0 = skb_shinfo(coal_skb)->frags;
@@ -549,13 +549,67 @@ static void rmnet_map_nonlinear_copy(struct sk_buff *coal_skb,
	}
}

/* Fill in GSO metadata to allow the SKB to be segmented by the NW stack
 * if needed (i.e. forwarding, UDP GRO)
 */
static void rmnet_map_gso_stamp(struct sk_buff *skb, u16 gso_size, u8 gso_segs)
{
	struct skb_shared_info *shinfo = skb_shinfo(skb);
	struct iphdr *iph = ip_hdr(skb);
	void *addr;
	__sum16 *check;
	__wsum partial;
	int csum_len;
	u16 pkt_len = gso_size * gso_segs;
	u8 protocol;
	bool ipv4 = iph->version == 4;

	if (ipv4) {
		addr = &iph->saddr;
		csum_len = sizeof(iph->saddr) * 2;
		protocol = iph->protocol;
	} else {
		struct ipv6hdr *ip6h = ipv6_hdr(skb);

		addr = &ip6h->saddr;
		csum_len = sizeof(ip6h->saddr) * 2;
		protocol = ip6h->nexthdr;
	}

	if (protocol == IPPROTO_TCP) {
		struct tcphdr *tp = tcp_hdr(skb);

		pkt_len += tp->doff * 4;
		check = &tp->check;
		shinfo->gso_type = (ipv4) ? SKB_GSO_TCPV4 : SKB_GSO_TCPV6;
		skb->csum_offset = offsetof(struct tcphdr, check);
	} else {
		struct udphdr *up = udp_hdr(skb);

		pkt_len += sizeof(*up);
		check = &up->check;
		shinfo->gso_type = SKB_GSO_UDP_L4;
		skb->csum_offset = offsetof(struct udphdr, check);
	}

	partial = csum_partial(addr, csum_len, 0);
	partial = csum16_add(partial, htons((u16)protocol));
	partial = csum16_add(partial, htons(pkt_len));
	*check = ~csum_fold(partial);

	skb->ip_summed = CHECKSUM_PARTIAL;
	skb->csum_start = skb_transport_header(skb) - skb->head;
	shinfo->gso_size = gso_size;
	shinfo->gso_segs = gso_segs;
}

/* Create a new UDP SKB from the coalesced SKB. Appropriate IP and UDP headers
 * will be added.
 */
static struct sk_buff *rmnet_map_segment_udp_skb(struct sk_buff *coal_skb,
						 u32 start,
						 int start_pkt_num,
						 u16 pkt_len)
						 u16 pkt_len, u8 pkt_count)
{
	struct sk_buff *skbn;
	struct iphdr *iph = (struct iphdr *)rmnet_map_data_ptr(coal_skb);
@@ -589,7 +643,7 @@ static struct sk_buff *rmnet_map_segment_udp_skb(struct sk_buff *coal_skb,

	skb_reserve(skbn, ip_len + udp_len);
	rmnet_map_nonlinear_copy(coal_skb, ip_len + udp_len,
				 start, pkt_len, skbn);
				 start, pkt_len, pkt_count, skbn);

	/* Push UDP header and update length */
	skb_push(skbn, udp_len);
@@ -616,6 +670,10 @@ static struct sk_buff *rmnet_map_segment_udp_skb(struct sk_buff *coal_skb,
	skbn->dev = coal_skb->dev;
	priv->stats.coal.coal_reconstruct++;

	/* Stamp GSO information if necessary */
	if (pkt_count > 1)
		rmnet_map_gso_stamp(skbn, pkt_len, pkt_count);

	return skbn;
}

@@ -625,7 +683,7 @@ static struct sk_buff *rmnet_map_segment_udp_skb(struct sk_buff *coal_skb,
static struct sk_buff *rmnet_map_segment_tcp_skb(struct sk_buff *coal_skb,
						 u32 start,
						 int start_pkt_num,
						 u16 pkt_len)
						 u16 pkt_len, u8 pkt_count)
{
	struct sk_buff *skbn;
	struct iphdr *iph = (struct iphdr *)rmnet_map_data_ptr(coal_skb);
@@ -660,7 +718,7 @@ static struct sk_buff *rmnet_map_segment_tcp_skb(struct sk_buff *coal_skb,

	skb_reserve(skbn, ip_len + tcp_len);
	rmnet_map_nonlinear_copy(coal_skb, ip_len + tcp_len,
				 start, pkt_len, skbn);
				 start, pkt_len, pkt_count, skbn);

	/* Push TCP header and update sequence number */
	skb_push(skbn, tcp_len);
@@ -687,6 +745,10 @@ static struct sk_buff *rmnet_map_segment_tcp_skb(struct sk_buff *coal_skb,
	skbn->dev = coal_skb->dev;
	priv->stats.coal.coal_reconstruct++;

	/* Stamp GSO information if necessary */
	if (pkt_count > 1)
		rmnet_map_gso_stamp(skbn, pkt_len, pkt_count);

	return skbn;
}

@@ -703,7 +765,7 @@ static void rmnet_map_segment_coal_data(struct sk_buff *coal_skb,
	struct sk_buff *(*segment)(struct sk_buff *coal_skb,
				   u32 start,
				   int start_pkt_num,
				   u16 pkt_len);
				   u16 pkt_len, u8 pkt_count);
	struct iphdr *iph;
	struct rmnet_priv *priv = netdev_priv(coal_skb->dev);
	struct rmnet_map_v5_coal_header *coal_hdr;
@@ -711,7 +773,8 @@ static void rmnet_map_segment_coal_data(struct sk_buff *coal_skb,
	u16 pkt_len, ip_len, trans_len;
	u8 protocol, start_pkt_num = 0;
	u8 pkt, total_pkt = 0;
	u8 nlo;
	u8 nlo, gro_count = 0;
	bool gro = coal_skb->dev->features & NETIF_F_GRO_HW;

	/* Pull off the headers we no longer need */
	pskb_pull(coal_skb, sizeof(struct rmnet_map_header));
@@ -753,12 +816,32 @@ static void rmnet_map_segment_coal_data(struct sk_buff *coal_skb,
			nlo_err_mask <<= 1;
			if (nlo_err_mask & (1ULL << 63)) {
				priv->stats.coal.coal_csum_err++;

				/* Segment out the good data */
				if (gro && gro_count) {
					new_skb = segment(coal_skb, start,
							  start_pkt_num,
							  pkt_len, gro_count);
					if (!new_skb)
						return;

					__skb_queue_tail(list, new_skb);
					gro_count = 0;
				}

				/* skip over bad packet */
				start += pkt_len;
				start_pkt_num = total_pkt + 1;
			} else {
				gro_count++;

				/* Segment the packet if we aren't sending the
				 * larger packet up the stack.
				 */
				if (!gro) {
					new_skb = segment(coal_skb, start,
						  start_pkt_num, pkt_len);
							  start_pkt_num,
							  pkt_len, 1);
					if (!new_skb)
						return;

@@ -766,8 +849,27 @@ static void rmnet_map_segment_coal_data(struct sk_buff *coal_skb,

					start += pkt_len;
					start_pkt_num = total_pkt + 1;
					gro_count = 0;
				}
			}
		}

		/* If we're switching NLOs, we need to send out everything from
		 * the previous one, if we haven't done so. NLOs only switch
		 * when the packet length changes.
		 */
		if (gro && gro_count) {
			new_skb = segment(coal_skb, start, start_pkt_num,
					  pkt_len, gro_count);
			if (!new_skb)
				return;

			__skb_queue_tail(list, new_skb);

			start += pkt_len;
			start_pkt_num = total_pkt + 1;
			gro_count = 0;
		}
	}
}

+1 −0
Original line number Diff line number Diff line
@@ -340,6 +340,7 @@ int rmnet_vnd_newlink(u8 id, struct net_device *rmnet_dev,
	rmnet_dev->hw_features = NETIF_F_RXCSUM;
	rmnet_dev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
	rmnet_dev->hw_features |= NETIF_F_SG;
	rmnet_dev->hw_features |= NETIF_F_GRO_HW;

	priv->real_dev = real_dev;