Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit bfe47078 authored by Sean Tranchetti's avatar Sean Tranchetti Committed by Gerrit - the friendly Code Review server
Browse files

net: qualcomm: rmnet: Pass on bad csum segments



When receiving coalesced frames from hardware where there are segments
marked has having invalid L4 checksums, the RmNet driver should pass these
on to the network stack instead of silently dropping them. This allows the
statistics maintained by the network stack to accurately represent the
data flow.

Change-Id: I9e520060a2854492ef158fffecfe60b0fe587265
Signed-off-by: default avatarSean Tranchetti <stranche@codeaurora.org>
parent e56fe938
Loading
Loading
Loading
Loading
+116 −45
Original line number Diff line number Diff line
@@ -381,7 +381,6 @@ static void rmnet_frag_gso_stamp(struct sk_buff *skb,
	bool ipv4 = frag_desc->ip_proto == 4;

	if (ipv4) {
		iph->tot_len = htons(skb->len);
		iph->check = 0;
		iph->check = ip_fast_csum(iph, iph->ihl);
		pseudo = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
@@ -390,8 +389,6 @@ static void rmnet_frag_gso_stamp(struct sk_buff *skb,
	} else {
		struct ipv6hdr *ip6h = (struct ipv6hdr *)iph;

		/* Payload length includes any extension headers */
		ip6h->payload_len = htons(skb->len - sizeof(*ip6h));
		pseudo = ~csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr,
					  pkt_len, frag_desc->trans_proto, 0);
	}
@@ -407,7 +404,6 @@ static void rmnet_frag_gso_stamp(struct sk_buff *skb,
		struct udphdr *up = (struct udphdr *)
				    ((u8 *)iph + frag_desc->ip_len);

		up->len = htons(pkt_len);
		up->check = pseudo;
		shinfo->gso_type = SKB_GSO_UDP_L4;
		skb->csum_offset = offsetof(struct udphdr, check);
@@ -432,6 +428,7 @@ static struct sk_buff *rmnet_alloc_skb(struct rmnet_frag_descriptor *frag_desc,
	/* Use the exact sizes if we know them (i.e. RSB/RSC, rmnet_perf) */
	if (frag_desc->hdrs_valid) {
		u16 hdr_len = frag_desc->ip_len + frag_desc->trans_len;
		u16 data_len = frag_desc->gso_size * frag_desc->gso_segs;

		head_skb = alloc_skb(hdr_len + RMNET_MAP_DEAGGR_HEADROOM,
				     GFP_ATOMIC);
@@ -441,9 +438,31 @@ static struct sk_buff *rmnet_alloc_skb(struct rmnet_frag_descriptor *frag_desc,
		skb_reserve(head_skb, RMNET_MAP_DEAGGR_HEADROOM);
		skb_put_data(head_skb, frag_desc->hdr_ptr, hdr_len);
		skb_reset_network_header(head_skb);
		if (frag_desc->trans_len)

		/* Update header lengths after RSB/RSC/perf */
		if (frag_desc->ip_proto == 4) {
			struct iphdr *iph = ip_hdr(head_skb);
			__be16 tot_len = htons(hdr_len + data_len);

			csum_replace2(&iph->check, iph->tot_len, tot_len);
			iph->tot_len = tot_len;
		} else {
			struct ipv6hdr *ip6h = ipv6_hdr(head_skb);

			ip6h->payload_len = htons(hdr_len + data_len -
						  sizeof(*ip6h));
		}

		if (frag_desc->trans_len) {
			skb_set_transport_header(head_skb, frag_desc->ip_len);

			if (frag_desc->trans_proto == IPPROTO_UDP) {
				struct udphdr *uh = udp_hdr(head_skb);

				uh->len = htons(data_len + sizeof(*uh));
			}
		}

		/* Packets that have no data portion don't need any frags */
		if (hdr_len == skb_frag_size(&frag_desc->frag))
			goto skip_frags;
@@ -540,8 +559,52 @@ static struct sk_buff *rmnet_alloc_skb(struct rmnet_frag_descriptor *frag_desc,
	}

	/* Handle csum offloading */
	if (frag_desc->csum_valid)
	if (frag_desc->csum_valid) {
		head_skb->ip_summed = CHECKSUM_UNNECESSARY;
	} else if (frag_desc->hdrs_valid &&
		   (frag_desc->trans_proto == IPPROTO_TCP ||
		    frag_desc->trans_proto == IPPROTO_UDP)) {
		/* Unfortunately, we have to fake a bad checksum here, since
		 * the original bad value is lost by the hardware. The only
		 * reliable way to do it is to calculate the actual checksum
		 * and corrupt it.
		 */
		__sum16 *check;
		__wsum csum;
		unsigned int offset = skb_transport_offset(head_skb);
		__sum16 pseudo;

		/* Calculate pseudo header */
		if (frag_desc->ip_proto == 4) {
			struct iphdr *iph = ip_hdr(head_skb);

			pseudo = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
						    head_skb->len -
						    frag_desc->ip_len,
						    frag_desc->trans_proto, 0);
		} else {
			struct ipv6hdr *ip6h = ipv6_hdr(head_skb);

			pseudo = ~csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr,
						  head_skb->len -
						  frag_desc->ip_len,
						  frag_desc->trans_proto, 0);
		}

		if (frag_desc->trans_proto == IPPROTO_TCP)
			check = &tcp_hdr(head_skb)->check;
		else
			check = &udp_hdr(head_skb)->check;

		*check = pseudo;
		csum = skb_checksum(head_skb, offset, head_skb->len - offset,
				    0);
		/* Add 1 to corrupt. This cannot produce a final value of 0
		 * since csum_fold() can't return a value of 0xFFFF
		 */
		*check = csum16_add(csum_fold(csum), htons(1));
		head_skb->ip_summed = CHECKSUM_NONE;
	}

	/* Handle any rmnet_perf metadata */
	if (frag_desc->hash) {
@@ -574,8 +637,8 @@ EXPORT_SYMBOL(rmnet_frag_deliver);

static void __rmnet_frag_segment_data(struct rmnet_frag_descriptor *coal_desc,
				      struct rmnet_port *port,
				      struct list_head *list,
				      u8 pkt_id)
				      struct list_head *list, u8 pkt_id,
				      bool csum_valid)
{
	struct rmnet_priv *priv = netdev_priv(coal_desc->dev);
	struct rmnet_frag_descriptor *new_frag;
@@ -615,7 +678,7 @@ static void __rmnet_frag_segment_data(struct rmnet_frag_descriptor *coal_desc,
	}

	new_frag->hdr_ptr = hdr_start;
	new_frag->csum_valid = true;
	new_frag->csum_valid = csum_valid;
	priv->stats.coal.coal_reconstruct++;

	/* Update meta information to move past the data we just segmented */
@@ -749,38 +812,62 @@ rmnet_frag_segment_coal_data(struct rmnet_frag_descriptor *coal_desc,
		return;
	}

	/* Fast-forward the case where we have 1 NLO (i.e. 1 packet length),
	 * no checksum errors, and are allowing GRO. We can just reuse this
	 * descriptor unchanged.
	 */
	if (gro && coal_hdr->num_nlos == 1 && coal_hdr->csum_valid) {
		coal_desc->csum_valid = true;
		coal_desc->hdr_ptr = rmnet_frag_data_ptr(coal_desc);
		coal_desc->gso_size = ntohs(coal_hdr->nl_pairs[0].pkt_len);
		coal_desc->gso_size -= coal_desc->ip_len + coal_desc->trans_len;
		coal_desc->gso_segs = coal_hdr->nl_pairs[0].num_packets;
		list_add_tail(&coal_desc->list, list);
		return;
	}

	/* Segment the coalesced descriptor into new packets */
	for (nlo = 0; nlo < coal_hdr->num_nlos; nlo++) {
		pkt_len = ntohs(coal_hdr->nl_pairs[nlo].pkt_len);
		pkt_len -= coal_desc->ip_len + coal_desc->trans_len;
		coal_desc->gso_size = pkt_len;
		for (pkt = 0; pkt < coal_hdr->nl_pairs[nlo].num_packets;
		     pkt++, total_pkt++) {
			nlo_err_mask <<= 1;
			if (nlo_err_mask & (1ULL << 63)) {
		     pkt++, total_pkt++, nlo_err_mask >>= 1) {
			bool csum_err = nlo_err_mask & 1;

			/* Segment the packet if we're not sending the larger
			 * packet up the stack.
			 */
			if (!gro) {
				coal_desc->gso_segs = 1;
				if (csum_err)
					priv->stats.coal.coal_csum_err++;

				__rmnet_frag_segment_data(coal_desc, port,
							  list, total_pkt,
							  !csum_err);
				continue;
			}

			if (csum_err) {
				priv->stats.coal.coal_csum_err++;

				/* Segment out the good data */
				if (gro && coal_desc->gso_segs)
				if (coal_desc->gso_segs)
					__rmnet_frag_segment_data(coal_desc,
								  port,
								  list,
								  total_pkt);

				/* skip over bad packet */
				coal_desc->data_offset += pkt_len;
				coal_desc->pkt_id = total_pkt + 1;
								  total_pkt,
								  true);

				/* Segment out the bad checksum */
				coal_desc->gso_segs = 1;
				__rmnet_frag_segment_data(coal_desc, port,
							  list, total_pkt,
							  false);
			} else {
				coal_desc->gso_segs++;

				/* Segment the packet if we aren't sending the
				 * larger packet up the stack.
				 */
				if (!gro)
					__rmnet_frag_segment_data(coal_desc,
								  port,
								  list,
								  total_pkt);
			}
		}

@@ -788,25 +875,9 @@ rmnet_frag_segment_coal_data(struct rmnet_frag_descriptor *coal_desc,
		 * the previous one, if we haven't done so. NLOs only switch
		 * when the packet length changes.
		 */
		if (gro && coal_desc->gso_segs) {
			/* Fast forward the (hopefully) common case.
			 * Frames with only one NLO (i.e. one packet length) and
			 * no checksum errors don't need to be segmented here.
			 * We can just pass off the original skb.
			 */
			if (coal_desc->gso_size * coal_desc->gso_segs ==
			    skb_frag_size(&coal_desc->frag) -
			    coal_desc->ip_len - coal_desc->trans_len) {
				coal_desc->hdr_ptr =
					rmnet_frag_data_ptr(coal_desc);
				coal_desc->csum_valid = true;
				list_add_tail(&coal_desc->list, list);
				return;
			}

		if (coal_desc->gso_segs)
			__rmnet_frag_segment_data(coal_desc, port, list,
						  total_pkt);
		}
						  total_pkt, true);
	}
}

@@ -887,7 +958,7 @@ rmnet_frag_data_check_coal_header(struct rmnet_frag_descriptor *frag_desc,
		u8 err = coal_hdr->nl_pairs[i].csum_error_bitmap;
		u8 pkt = coal_hdr->nl_pairs[i].num_packets;

		mask |= ((u64)err) << (7 - i) * 8;
		mask |= ((u64)err) << (8 * i);

		/* Track total packets in frame */
		pkts += pkt;
+94 −43
Original line number Diff line number Diff line
@@ -697,10 +697,12 @@ static void rmnet_map_gso_stamp(struct sk_buff *skb,
static void
__rmnet_map_segment_coal_skb(struct sk_buff *coal_skb,
			     struct rmnet_map_coal_metadata *coal_meta,
			     struct sk_buff_head *list, u8 pkt_id)
			     struct sk_buff_head *list, u8 pkt_id,
			     bool csum_valid)
{
	struct sk_buff *skbn;
	struct rmnet_priv *priv = netdev_priv(coal_skb->dev);
	__sum16 *check = NULL;
	u32 alloc_len;

	/* We can avoid copying the data if the SKB we got from the lower-level
@@ -727,8 +729,12 @@ __rmnet_map_segment_coal_skb(struct sk_buff *coal_skb,
		struct tcphdr *th = tcp_hdr(skbn);

		th->seq = htonl(ntohl(th->seq) + coal_meta->data_offset);
		check = &th->check;
	} else if (coal_meta->trans_proto == IPPROTO_UDP) {
		udp_hdr(skbn)->len = htons(skbn->len);
		struct udphdr *uh = udp_hdr(skbn);

		uh->len = htons(skbn->len);
		check = &uh->check;
	}

	/* Push IP header and update necessary fields */
@@ -748,7 +754,44 @@ __rmnet_map_segment_coal_skb(struct sk_buff *coal_skb,
						    sizeof(struct ipv6hdr));
	}

	/* Handle checksum status */
	if (likely(csum_valid)) {
		skbn->ip_summed = CHECKSUM_UNNECESSARY;
	} else if (check) {
		/* Unfortunately, we have to fake a bad checksum here, since
		 * the original bad value is lost by the hardware. The only
		 * reliable way to do it is to calculate the actual checksum
		 * and corrupt it.
		 */
		__wsum csum;
		unsigned int offset = skb_transport_offset(skbn);
		__sum16 pseudo;

		/* Calculate pseudo header */
		if (coal_meta->ip_proto == 4) {
			struct iphdr *iph = ip_hdr(skbn);

			pseudo = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
						    skbn->len -
						    coal_meta->ip_len,
						    coal_meta->trans_proto, 0);
		} else {
			struct ipv6hdr *ip6h = ipv6_hdr(skbn);

			pseudo = ~csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr,
						  skbn->len - coal_meta->ip_len,
						  coal_meta->trans_proto, 0);
		}

		*check = pseudo;
		csum = skb_checksum(skbn, offset, skbn->len - offset, 0);
		/* Add 1 to corrupt. This cannot produce a final value of 0
		 * since csum_fold() can't return a value of 0xFFFF.
		 */
		*check = csum16_add(csum_fold(csum), htons(1));
		skbn->ip_summed = CHECKSUM_NONE;
	}

	skbn->dev = coal_skb->dev;
	priv->stats.coal.coal_reconstruct++;

@@ -885,39 +928,65 @@ static void rmnet_map_segment_coal_skb(struct sk_buff *coal_skb,
		return;
	}

	/* Fast-forward the case where we have 1 NLO (i.e. 1 packet length),
	 * no checksum errors, and are allowing GRO. We can just reuse this
	 * SKB unchanged.
	 */
	if (gro && coal_hdr->num_nlos == 1 && coal_hdr->csum_valid) {
		rmnet_map_move_headers(coal_skb);
		coal_skb->ip_summed = CHECKSUM_UNNECESSARY;
		coal_meta.data_len = ntohs(coal_hdr->nl_pairs[0].pkt_len);
		coal_meta.data_len -= coal_meta.ip_len + coal_meta.trans_len;
		coal_meta.pkt_count = coal_hdr->nl_pairs[0].num_packets;
		if (coal_meta.pkt_count > 1)
			rmnet_map_gso_stamp(coal_skb, &coal_meta);

		__skb_queue_tail(list, coal_skb);
		return;
	}

	/* Segment the coalesced SKB into new packets */
	for (nlo = 0; nlo < coal_hdr->num_nlos; nlo++) {
		pkt_len = ntohs(coal_hdr->nl_pairs[nlo].pkt_len);
		pkt_len -= coal_meta.ip_len + coal_meta.trans_len;
		coal_meta.data_len = pkt_len;
		for (pkt = 0; pkt < coal_hdr->nl_pairs[nlo].num_packets;
		     pkt++, total_pkt++) {
			nlo_err_mask <<= 1;
			if (nlo_err_mask & (1ULL << 63)) {
		     pkt++, total_pkt++, nlo_err_mask >>= 1) {
			bool csum_err = nlo_err_mask & 1;

			/* Segment the packet if we're not sending the larger
			 * packet up the stack.
			 */
			if (!gro) {
				coal_meta.pkt_count = 1;
				if (csum_err)
					priv->stats.coal.coal_csum_err++;

				/* Segment out the good data */
				if (gro && coal_meta.pkt_count) {
				__rmnet_map_segment_coal_skb(coal_skb,
								     &coal_meta,
								     list,
								     total_pkt);
							     &coal_meta, list,
							     total_pkt,
							     !csum_err);
				continue;
			}

				/* skip over bad packet */
				coal_meta.data_offset += pkt_len;
				coal_meta.pkt_id = total_pkt + 1;
			} else {
				coal_meta.pkt_count++;
			if (csum_err) {
				priv->stats.coal.coal_csum_err++;

				/* Segment the packet if we aren't sending the
				 * larger packet up the stack.
				 */
				if (!gro)
				/* Segment out the good data */
				if (gro && coal_meta.pkt_count)
					__rmnet_map_segment_coal_skb(coal_skb,
								     &coal_meta,
								     list,
								     total_pkt);
								     total_pkt,
								     true);

				/* Segment out the bad checksum */
				coal_meta.pkt_count = 1;
				__rmnet_map_segment_coal_skb(coal_skb,
							     &coal_meta, list,
							     total_pkt, false);
			} else {
				coal_meta.pkt_count++;
			}
		}

@@ -925,27 +994,9 @@ static void rmnet_map_segment_coal_skb(struct sk_buff *coal_skb,
		 * the previous one, if we haven't done so. NLOs only switch
		 * when the packet length changes.
		 */
		if (gro && coal_meta.pkt_count) {
			/* Fast forward the (hopefully) common case.
			 * Frames with only one NLO (i.e. one packet length) and
			 * no checksum errors don't need to be segmented here.
			 * We can just pass off the original skb.
			 */
			if (pkt_len * coal_meta.pkt_count ==
			    coal_skb->len - coal_meta.ip_len -
			    coal_meta.trans_len) {
				rmnet_map_move_headers(coal_skb);
				coal_skb->ip_summed = CHECKSUM_UNNECESSARY;
				if (coal_meta.pkt_count > 1)
					rmnet_map_gso_stamp(coal_skb,
							    &coal_meta);
				__skb_queue_tail(list, coal_skb);
				return;
			}

		if (coal_meta.pkt_count)
			__rmnet_map_segment_coal_skb(coal_skb, &coal_meta, list,
						     total_pkt);
		}
						     total_pkt, true);
	}
}

@@ -1025,7 +1076,7 @@ static int rmnet_map_data_check_coal_header(struct sk_buff *skb,
		u8 err = coal_hdr->nl_pairs[i].csum_error_bitmap;
		u8 pkt = coal_hdr->nl_pairs[i].num_packets;

		mask |= ((u64)err) << (7 - i) * 8;
		mask |= ((u64)err) << (8 * i);

		/* Track total packets in frame */
		pkts += pkt;