Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit e22ea883 authored by qctecmdr Service's avatar qctecmdr Service Committed by Gerrit - the friendly Code Review server
Browse files

Merge "net: qualcomm: rmnet: Add NETIF_F_GRO_HW support"

parents 4306f175 e4dc46df
Loading
Loading
Loading
Loading
+28 −1
Original line number Diff line number Diff line
/* Copyright (c) 2013-2014, 2016-2018 The Linux Foundation. All rights reserved.
/* Copyright (c) 2013-2014, 2016-2019 The Linux Foundation. All rights reserved.
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 and
@@ -20,6 +20,7 @@
#define _RMNET_CONFIG_H_

#define RMNET_MAX_LOGICAL_EP 255
#define RMNET_MAX_VEID 4

struct rmnet_endpoint {
	u8 mux_id;
@@ -88,6 +89,31 @@ struct rmnet_pcpu_stats {
	struct u64_stats_sync syncp;
};

struct rmnet_coal_close_stats {
	u64 non_coal;
	u64 ip_miss;
	u64 trans_miss;
	u64 hw_nl;
	u64 hw_pkt;
	u64 hw_byte;
	u64 hw_time;
	u64 hw_evict;
	u64 coal;
};

struct rmnet_coal_stats {
	u64 coal_rx;
	u64 coal_pkts;
	u64 coal_hdr_nlo_err;
	u64 coal_hdr_pkt_err;
	u64 coal_csum_err;
	u64 coal_reconstruct;
	u64 coal_ip_invalid;
	u64 coal_trans_invalid;
	struct rmnet_coal_close_stats close;
	u64 coal_veid[RMNET_MAX_VEID];
};

struct rmnet_priv_stats {
	u64 csum_ok;
	u64 csum_valid_unset;
@@ -99,6 +125,7 @@ struct rmnet_priv_stats {
	u64 csum_skipped;
	u64 csum_sw;
	u64 csum_hw;
	struct rmnet_coal_stats coal;
};

struct rmnet_priv {
+53 −34
Original line number Diff line number Diff line
@@ -190,6 +190,18 @@ rmnet_deliver_skb_wq(struct sk_buff *skb, struct rmnet_port *port,
}
EXPORT_SYMBOL(rmnet_deliver_skb_wq);

/* Deliver a list of skbs after undoing coalescing */
static void rmnet_deliver_skb_list(struct sk_buff_head *head,
				   struct rmnet_port *port)
{
	struct sk_buff *skb;

	while ((skb = __skb_dequeue(head))) {
		rmnet_set_skb_proto(skb);
		rmnet_deliver_skb(skb, port);
	}
}

/* MAP handler */

static void
@@ -198,9 +210,13 @@ __rmnet_map_ingress_handler(struct sk_buff *skb,
{
	struct rmnet_map_header *qmap;
	struct rmnet_endpoint *ep;
	struct sk_buff_head list;
	u16 len, pad;
	u8 mux_id;

	/* We don't need the spinlock since only we touch this */
	__skb_queue_head_init(&list);

	qmap = (struct rmnet_map_header *)rmnet_map_data_ptr(skb);
	if (qmap->cd_bit) {
		if (port->data_format & RMNET_INGRESS_FORMAT_DL_MARKER) {
@@ -231,25 +247,27 @@ __rmnet_map_ingress_handler(struct sk_buff *skb,
	if (qmap->next_hdr &&
	    (port->data_format & (RMNET_FLAGS_INGRESS_COALESCE |
				  RMNET_FLAGS_INGRESS_MAP_CKSUMV5))) {
		if (rmnet_map_process_next_hdr_packet(skb))
		if (rmnet_map_process_next_hdr_packet(skb, &list))
			goto free_skb;
	} else {
		/* We only have the main QMAP header to worry about */
		pskb_pull(skb, sizeof(*qmap));
	}

	rmnet_set_skb_proto(skb);

		if (port->data_format & RMNET_FLAGS_INGRESS_MAP_CKSUMV4) {
			if (!rmnet_map_checksum_downlink_packet(skb, len + pad))
				skb->ip_summed = CHECKSUM_UNNECESSARY;
		}

		pskb_trim(skb, len);

		/* Push the single packet onto the list */
		__skb_queue_tail(&list, skb);
	}

	if (port->data_format & RMNET_INGRESS_FORMAT_PS)
		qmi_rmnet_work_maybe_restart(port);

	pskb_trim(skb, len);
	rmnet_deliver_skb(skb, port);
	rmnet_deliver_skb_list(&list, port);
	return;

free_skb:
@@ -264,6 +282,9 @@ static void
rmnet_map_ingress_handler(struct sk_buff *skb,
			  struct rmnet_port *port)
{
	struct sk_buff *skbn;
	int (*rmnet_perf_core_deaggregate)(struct sk_buff *skb,
					   struct rmnet_port *port);

	if (skb->dev->type == ARPHRD_ETHER) {
		if (pskb_expand_head(skb, ETH_HLEN, 0, GFP_KERNEL)) {
@@ -274,35 +295,33 @@ rmnet_map_ingress_handler(struct sk_buff *skb,
		skb_push(skb, ETH_HLEN);
	}

	if (port->data_format & RMNET_FLAGS_INGRESS_DEAGGREGATION) {
		int (*rmnet_perf_core_deaggregate)(struct sk_buff *skb,
						   struct rmnet_port *port);
		/* Deaggregation and freeing of HW originating
		 * buffers is done within here
		 */
		rmnet_perf_core_deaggregate =
					rcu_dereference(rmnet_perf_deag_entry);
	/* No aggregation. Pass the frame on as is */
	if (!(port->data_format & RMNET_FLAGS_INGRESS_DEAGGREGATION)) {
		__rmnet_map_ingress_handler(skb, port);
		return;
	}

	/* Pass off handling to rmnet_perf module, if present */
	rmnet_perf_core_deaggregate = rcu_dereference(rmnet_perf_deag_entry);
	if (rmnet_perf_core_deaggregate) {
		rmnet_perf_core_deaggregate(skb, port);
		} else {
			struct sk_buff *skbn;
		return;
	}

	/* Deaggregation and freeing of HW originating
	 * buffers is done within here
	 */
	while (skb) {
				struct sk_buff *skb_frag =
						skb_shinfo(skb)->frag_list;
		struct sk_buff *skb_frag = skb_shinfo(skb)->frag_list;

		skb_shinfo(skb)->frag_list = NULL;
				while ((skbn = rmnet_map_deaggregate(skb, port))
					!= NULL)
		while ((skbn = rmnet_map_deaggregate(skb, port)) != NULL)
			__rmnet_map_ingress_handler(skbn, port);

		consume_skb(skb);
		skb = skb_frag;
	}
}
	} else {
		__rmnet_map_ingress_handler(skb, port);
	}
}

static int rmnet_map_egress_handler(struct sk_buff *skb,
				    struct rmnet_port *port, u8 mux_id,
+2 −1
Original line number Diff line number Diff line
@@ -247,7 +247,8 @@ int rmnet_map_checksum_downlink_packet(struct sk_buff *skb, u16 len);
void rmnet_map_checksum_uplink_packet(struct sk_buff *skb,
				      struct net_device *orig_dev,
				      int csum_type);
int rmnet_map_process_next_hdr_packet(struct sk_buff *skb);
int rmnet_map_process_next_hdr_packet(struct sk_buff *skb,
				      struct sk_buff_head *list);
int rmnet_map_tx_agg_skip(struct sk_buff *skb, int offset);
void rmnet_map_tx_aggregate(struct sk_buff *skb, struct rmnet_port *port);
void rmnet_map_tx_aggregate_init(struct rmnet_port *port);
+461 −1
Original line number Diff line number Diff line
@@ -526,13 +526,472 @@ void rmnet_map_checksum_uplink_packet(struct sk_buff *skb,
	}
}

static void rmnet_map_nonlinear_copy(struct sk_buff *coal_skb,
				     u32 hdr_len,
				     u32 start,
				     u16 pkt_len, u8 pkt_count,
				     struct sk_buff *dest)
{
	unsigned char *data_start = rmnet_map_data_ptr(coal_skb) + hdr_len;
	u32 copy_len = pkt_len * pkt_count;

	if (skb_is_nonlinear(coal_skb)) {
		skb_frag_t *frag0 = skb_shinfo(coal_skb)->frags;
		struct page *page = skb_frag_page(frag0);

		skb_append_pagefrags(dest, page,
				     frag0->page_offset + hdr_len + start,
				     copy_len);
		dest->data_len += copy_len;
		dest->len += copy_len;
	} else {
		skb_put_data(dest, data_start + start, copy_len);
	}
}

/* Fill in GSO metadata to allow the SKB to be segmented by the NW stack
 * if needed (i.e. forwarding, UDP GRO)
 */
static void rmnet_map_gso_stamp(struct sk_buff *skb, u16 gso_size, u8 gso_segs)
{
	struct skb_shared_info *shinfo = skb_shinfo(skb);
	struct iphdr *iph = ip_hdr(skb);
	void *addr;
	__sum16 *check;
	__wsum partial;
	int csum_len;
	u16 pkt_len = gso_size * gso_segs;
	u8 protocol;
	bool ipv4 = iph->version == 4;

	if (ipv4) {
		addr = &iph->saddr;
		csum_len = sizeof(iph->saddr) * 2;
		protocol = iph->protocol;
	} else {
		struct ipv6hdr *ip6h = ipv6_hdr(skb);

		addr = &ip6h->saddr;
		csum_len = sizeof(ip6h->saddr) * 2;
		protocol = ip6h->nexthdr;
	}

	if (protocol == IPPROTO_TCP) {
		struct tcphdr *tp = tcp_hdr(skb);

		pkt_len += tp->doff * 4;
		check = &tp->check;
		shinfo->gso_type = (ipv4) ? SKB_GSO_TCPV4 : SKB_GSO_TCPV6;
		skb->csum_offset = offsetof(struct tcphdr, check);
	} else {
		struct udphdr *up = udp_hdr(skb);

		pkt_len += sizeof(*up);
		check = &up->check;
		shinfo->gso_type = SKB_GSO_UDP_L4;
		skb->csum_offset = offsetof(struct udphdr, check);
	}

	partial = csum_partial(addr, csum_len, 0);
	partial = csum16_add(partial, htons((u16)protocol));
	partial = csum16_add(partial, htons(pkt_len));
	*check = ~csum_fold(partial);

	skb->ip_summed = CHECKSUM_PARTIAL;
	skb->csum_start = skb_transport_header(skb) - skb->head;
	shinfo->gso_size = gso_size;
	shinfo->gso_segs = gso_segs;
}

/* Create a new UDP SKB from the coalesced SKB. Appropriate IP and UDP headers
 * will be added.
 */
static struct sk_buff *rmnet_map_segment_udp_skb(struct sk_buff *coal_skb,
						 u32 start,
						 int start_pkt_num,
						 u16 pkt_len, u8 pkt_count)
{
	struct sk_buff *skbn;
	struct iphdr *iph = (struct iphdr *)rmnet_map_data_ptr(coal_skb);
	struct rmnet_priv *priv = netdev_priv(coal_skb->dev);
	struct udphdr *uh;
	u32 alloc_len;
	u16 ip_len, udp_len = sizeof(*uh);

	if (iph->version == 4) {
		ip_len = iph->ihl * 4;
	} else if (iph->version == 6) {
		ip_len = sizeof(struct ipv6hdr);
	} else {
		priv->stats.coal.coal_ip_invalid++;
		return NULL;
	}

	uh = (struct udphdr *)(rmnet_map_data_ptr(coal_skb) + ip_len);

	/* We can avoid copying the data if the SKB we got from the lower-level
	 * drivers was nonlinear.
	 */
	if (skb_is_nonlinear(coal_skb))
		alloc_len = ip_len + udp_len;
	else
		alloc_len = ip_len + udp_len + pkt_len;

	skbn = alloc_skb(alloc_len, GFP_ATOMIC);
	if (!skbn)
		return NULL;

	skb_reserve(skbn, ip_len + udp_len);
	rmnet_map_nonlinear_copy(coal_skb, ip_len + udp_len,
				 start, pkt_len, pkt_count, skbn);

	/* Push UDP header and update length */
	skb_push(skbn, udp_len);
	memcpy(skbn->data, uh, udp_len);
	skb_reset_transport_header(skbn);
	udp_hdr(skbn)->len = htons(skbn->len);

	/* Push IP header and update necessary fields */
	skb_push(skbn, ip_len);
	memcpy(skbn->data, iph, ip_len);
	skb_reset_network_header(skbn);
	if (iph->version == 4) {
		iph = ip_hdr(skbn);
		iph->id = htons(ntohs(iph->id) + start_pkt_num);
		iph->tot_len = htons(skbn->len);
		iph->check = 0;
		iph->check = ip_fast_csum(iph, iph->ihl);
	} else {
		ipv6_hdr(skbn)->payload_len = htons(skbn->len -
						    ip_len);
	}

	skbn->ip_summed = CHECKSUM_UNNECESSARY;
	skbn->dev = coal_skb->dev;
	priv->stats.coal.coal_reconstruct++;

	/* Stamp GSO information if necessary */
	if (pkt_count > 1)
		rmnet_map_gso_stamp(skbn, pkt_len, pkt_count);

	return skbn;
}

/* Create a new TCP SKB from the coalesced SKB. Appropriate IP and TCP headers
 * will be added.
 */
static struct sk_buff *rmnet_map_segment_tcp_skb(struct sk_buff *coal_skb,
						 u32 start,
						 int start_pkt_num,
						 u16 pkt_len, u8 pkt_count)
{
	struct sk_buff *skbn;
	struct iphdr *iph = (struct iphdr *)rmnet_map_data_ptr(coal_skb);
	struct rmnet_priv *priv = netdev_priv(coal_skb->dev);
	struct tcphdr *th;
	u32 alloc_len;
	u16 ip_len, tcp_len;

	if (iph->version == 4) {
		ip_len = iph->ihl * 4;
	} else if (iph->version == 6) {
		ip_len = sizeof(struct ipv6hdr);
	} else {
		priv->stats.coal.coal_ip_invalid++;
		return NULL;
	}

	th = (struct tcphdr *)(rmnet_map_data_ptr(coal_skb) + ip_len);
	tcp_len = th->doff * 4;

	/* We can avoid copying the data if the SKB we got from the lower-level
	 * drivers was nonlinear.
	 */
	if (skb_is_nonlinear(coal_skb))
		alloc_len = ip_len + tcp_len;
	else
		alloc_len = ip_len + tcp_len + pkt_len;

	skbn = alloc_skb(alloc_len, GFP_ATOMIC);
	if (!skbn)
		return NULL;

	skb_reserve(skbn, ip_len + tcp_len);
	rmnet_map_nonlinear_copy(coal_skb, ip_len + tcp_len,
				 start, pkt_len, pkt_count, skbn);

	/* Push TCP header and update sequence number */
	skb_push(skbn, tcp_len);
	memcpy(skbn->data, th, tcp_len);
	skb_reset_transport_header(skbn);
	th = tcp_hdr(skbn);
	th->seq = htonl(ntohl(th->seq) + start);

	/* Push IP header and update necessary fields */
	skb_push(skbn, ip_len);
	memcpy(skbn->data, iph, ip_len);
	skb_reset_network_header(skbn);
	if (iph->version == 4) {
		iph = ip_hdr(skbn);
		iph->id = htons(ntohs(iph->id) + start_pkt_num);
		iph->tot_len = htons(skbn->len);
		iph->check = 0;
		iph->check = ip_fast_csum(iph, iph->ihl);
	} else {
		ipv6_hdr(skbn)->payload_len = htons(skbn->len - ip_len);
	}

	skbn->ip_summed = CHECKSUM_UNNECESSARY;
	skbn->dev = coal_skb->dev;
	priv->stats.coal.coal_reconstruct++;

	/* Stamp GSO information if necessary */
	if (pkt_count > 1)
		rmnet_map_gso_stamp(skbn, pkt_len, pkt_count);

	return skbn;
}

/* Converts the coalesced SKB into a list of SKBs.
 * NLOs containing csum erros will not be included.
 * The original coalesced SKB should be treated as invalid and
 * must be freed by the caller
 */
static void rmnet_map_segment_coal_data(struct sk_buff *coal_skb,
					u64 nlo_err_mask,
					struct sk_buff_head *list)
{
	struct sk_buff *new_skb;
	struct sk_buff *(*segment)(struct sk_buff *coal_skb,
				   u32 start,
				   int start_pkt_num,
				   u16 pkt_len, u8 pkt_count);
	struct iphdr *iph;
	struct rmnet_priv *priv = netdev_priv(coal_skb->dev);
	struct rmnet_map_v5_coal_header *coal_hdr;
	u32 start = 0;
	u16 pkt_len, ip_len, trans_len;
	u8 protocol, start_pkt_num = 0;
	u8 pkt, total_pkt = 0;
	u8 nlo, gro_count = 0;
	bool gro = coal_skb->dev->features & NETIF_F_GRO_HW;

	/* Pull off the headers we no longer need */
	pskb_pull(coal_skb, sizeof(struct rmnet_map_header));
	coal_hdr = (struct rmnet_map_v5_coal_header *)
		   rmnet_map_data_ptr(coal_skb);
	pskb_pull(coal_skb, sizeof(*coal_hdr));

	iph = (struct iphdr *)rmnet_map_data_ptr(coal_skb);

	if (iph->version == 4) {
		protocol = iph->protocol;
		ip_len = iph->ihl * 4;
	} else if (iph->version == 6) {
		protocol = ((struct ipv6hdr *)iph)->nexthdr;
		ip_len = sizeof(struct ipv6hdr);
	} else {
		priv->stats.coal.coal_ip_invalid++;
		return;
	}

	if (protocol == IPPROTO_TCP) {
		struct tcphdr *th = (struct tcphdr *)
				    ((unsigned char *)iph + ip_len);
		trans_len = th->doff * 4;
		segment = rmnet_map_segment_tcp_skb;
	} else if (protocol == IPPROTO_UDP) {
		trans_len = sizeof(struct udphdr);
		segment = rmnet_map_segment_udp_skb;
	} else {
		priv->stats.coal.coal_trans_invalid++;
		return;
	}

	for (nlo = 0; nlo < coal_hdr->num_nlos; nlo++) {
		pkt_len = ntohs(coal_hdr->nl_pairs[nlo].pkt_len);
		pkt_len -= ip_len + trans_len;
		for (pkt = 0; pkt < coal_hdr->nl_pairs[nlo].num_packets;
		     pkt++, total_pkt++) {
			nlo_err_mask <<= 1;
			if (nlo_err_mask & (1ULL << 63)) {
				priv->stats.coal.coal_csum_err++;

				/* Segment out the good data */
				if (gro && gro_count) {
					new_skb = segment(coal_skb, start,
							  start_pkt_num,
							  pkt_len, gro_count);
					if (!new_skb)
						return;

					__skb_queue_tail(list, new_skb);
					gro_count = 0;
				}

				/* skip over bad packet */
				start += pkt_len;
				start_pkt_num = total_pkt + 1;
			} else {
				gro_count++;

				/* Segment the packet if we aren't sending the
				 * larger packet up the stack.
				 */
				if (!gro) {
					new_skb = segment(coal_skb, start,
							  start_pkt_num,
							  pkt_len, 1);
					if (!new_skb)
						return;

					__skb_queue_tail(list, new_skb);

					start += pkt_len;
					start_pkt_num = total_pkt + 1;
					gro_count = 0;
				}
			}
		}

		/* If we're switching NLOs, we need to send out everything from
		 * the previous one, if we haven't done so. NLOs only switch
		 * when the packet length changes.
		 */
		if (gro && gro_count) {
			new_skb = segment(coal_skb, start, start_pkt_num,
					  pkt_len, gro_count);
			if (!new_skb)
				return;

			__skb_queue_tail(list, new_skb);

			start += pkt_len;
			start_pkt_num = total_pkt + 1;
			gro_count = 0;
		}
	}
}

/* Record reason for coalescing pipe closure */
static void rmnet_map_data_log_close_stats(struct rmnet_priv *priv, u8 type,
					   u8 code)
{
	struct rmnet_coal_close_stats *stats = &priv->stats.coal.close;

	switch (type) {
	case RMNET_MAP_COAL_CLOSE_NON_COAL:
		stats->non_coal++;
		break;
	case RMNET_MAP_COAL_CLOSE_IP_MISS:
		stats->ip_miss++;
		break;
	case RMNET_MAP_COAL_CLOSE_TRANS_MISS:
		stats->trans_miss++;
		break;
	case RMNET_MAP_COAL_CLOSE_HW:
		switch (code) {
		case RMNET_MAP_COAL_CLOSE_HW_NL:
			stats->hw_nl++;
			break;
		case RMNET_MAP_COAL_CLOSE_HW_PKT:
			stats->hw_pkt++;
			break;
		case RMNET_MAP_COAL_CLOSE_HW_BYTE:
			stats->hw_byte++;
			break;
		case RMNET_MAP_COAL_CLOSE_HW_TIME:
			stats->hw_time++;
			break;
		case RMNET_MAP_COAL_CLOSE_HW_EVICT:
			stats->hw_evict++;
			break;
		default:
			break;
		}
		break;
	case RMNET_MAP_COAL_CLOSE_COAL:
		stats->coal++;
		break;
	default:
		break;
	}
}

/* Check if the coalesced header has any incorrect values, in which case, the
 * entire coalesced skb must be dropped. Then check if there are any
 * checksum issues
 */
static int rmnet_map_data_check_coal_header(struct sk_buff *skb,
					    u64 *nlo_err_mask)
{
	struct rmnet_map_v5_coal_header *coal_hdr;
	unsigned char *data = rmnet_map_data_ptr(skb);
	struct rmnet_priv *priv = netdev_priv(skb->dev);
	u64 mask = 0;
	int i;
	u8 veid, pkts = 0;

	coal_hdr = ((struct rmnet_map_v5_coal_header *)
		    (data + sizeof(struct rmnet_map_header)));
	veid = coal_hdr->virtual_channel_id;

	if (coal_hdr->num_nlos == 0 ||
	    coal_hdr->num_nlos > RMNET_MAP_V5_MAX_NLOS) {
		priv->stats.coal.coal_hdr_nlo_err++;
		return -EINVAL;
	}

	for (i = 0; i < RMNET_MAP_V5_MAX_NLOS; i++) {
		/* If there is a checksum issue, we need to split
		 * up the skb. Rebuild the full csum error field
		 */
		u8 err = coal_hdr->nl_pairs[i].csum_error_bitmap;
		u8 pkt = coal_hdr->nl_pairs[i].num_packets;

		mask |= ((u64)err) << (7 - i) * 8;

		/* Track total packets in frame */
		pkts += pkt;
		if (pkts > RMNET_MAP_V5_MAX_PACKETS) {
			priv->stats.coal.coal_hdr_pkt_err++;
			return -EINVAL;
		}
	}

	/* Track number of packets we get inside of coalesced frames */
	priv->stats.coal.coal_pkts += pkts;

	/* Update ethtool stats */
	rmnet_map_data_log_close_stats(priv,
				       coal_hdr->close_type,
				       coal_hdr->close_value);
	if (veid < RMNET_MAX_VEID)
		priv->stats.coal.coal_veid[veid]++;

	*nlo_err_mask = mask;

	return 0;
}

/* Process a QMAPv5 packet header */
int rmnet_map_process_next_hdr_packet(struct sk_buff *skb)
int rmnet_map_process_next_hdr_packet(struct sk_buff *skb,
				      struct sk_buff_head *list)
{
	struct rmnet_priv *priv = netdev_priv(skb->dev);
	u64 nlo_err_mask;
	int rc = 0;

	switch (rmnet_map_get_next_hdr_type(skb)) {
	case RMNET_MAP_HEADER_TYPE_COALESCING:
		priv->stats.coal.coal_rx++;
		rc = rmnet_map_data_check_coal_header(skb, &nlo_err_mask);
		if (rc)
			return rc;

		rmnet_map_segment_coal_data(skb, nlo_err_mask, list);
		consume_skb(skb);
		break;
	case RMNET_MAP_HEADER_TYPE_CSUM_OFFLOAD:
		if (rmnet_map_get_csum_valid(skb)) {
			priv->stats.csum_ok++;
@@ -544,6 +1003,7 @@ int rmnet_map_process_next_hdr_packet(struct sk_buff *skb)
		pskb_pull(skb,
			  (sizeof(struct rmnet_map_header) +
			   sizeof(struct rmnet_map_v5_csum_header)));
		__skb_queue_tail(list, skb);
		break;
	default:
		rc = -EINVAL;
+22 −0
Original line number Diff line number Diff line
@@ -198,6 +198,27 @@ static const char rmnet_gstrings_stats[][ETH_GSTRING_LEN] = {
	"Checksum skipped",
	"Checksum computed in software",
	"Checksum computed in hardware",
	"Coalescing packets received",
	"Coalesced packets",
	"Coalescing header NLO errors",
	"Coalescing header pcount errors",
	"Coalescing checksum errors",
	"Coalescing packet reconstructs",
	"Coalescing IP version invalid",
	"Coalescing L4 header invalid",
	"Coalescing close Non-coalescable",
	"Coalescing close L3 mismatch",
	"Coalescing close L4 mismatch",
	"Coalescing close HW NLO limit",
	"Coalescing close HW packet limit",
	"Coalescing close HW byte limit",
	"Coalescing close HW time limit",
	"Coalescing close HW eviction",
	"Coalescing close Coalescable",
	"Coalescing packets over VEID0",
	"Coalescing packets over VEID1",
	"Coalescing packets over VEID2",
	"Coalescing packets over VEID3",
};

static const char rmnet_port_gstrings_stats[][ETH_GSTRING_LEN] = {
@@ -319,6 +340,7 @@ int rmnet_vnd_newlink(u8 id, struct net_device *rmnet_dev,
	rmnet_dev->hw_features = NETIF_F_RXCSUM;
	rmnet_dev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
	rmnet_dev->hw_features |= NETIF_F_SG;
	rmnet_dev->hw_features |= NETIF_F_GRO_HW;

	priv->real_dev = real_dev;