Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit d0d2d0bf authored by Sean Tranchetti's avatar Sean Tranchetti
Browse files

net: qualcomm: rmnet: Add support for QMAPv5 checksum offload



Hardware can now perform full checksum validation for IP packets. As such,
we can simply mark all incoming SKBs as CHECKSUM_UNNECESSARY if they pass
validation in the new QMAPv5 format. Outgoing SKBs will now have a new
checksum offload header added as well to request offload from the lower
level drivers.

Change-Id: I39636e83de6a96c4c06d48b885bef037817e9379
Signed-off-by: default avatarSean Tranchetti <stranche@codeaurora.org>
parent 6eaa043b
Loading
Loading
Loading
Loading
+19 −6
Original line number Original line Diff line number Diff line
@@ -162,8 +162,15 @@ __rmnet_map_ingress_handler(struct sk_buff *skb,


	skb->dev = ep->egress_dev;
	skb->dev = ep->egress_dev;


	/* Subtract MAP header */
	/* Handle QMAPv5 packet */
	pskb_pull(skb, sizeof(struct rmnet_map_header));
	if (qmap->next_hdr) {
		if (rmnet_map_process_next_hdr_packet(skb))
			goto free_skb;
	} else {
		/* We only have the main QMAP header to worry about */
		pskb_pull(skb, sizeof(*qmap));
	}

	rmnet_set_skb_proto(skb);
	rmnet_set_skb_proto(skb);


	if (port->data_format & RMNET_FLAGS_INGRESS_MAP_CKSUMV4) {
	if (port->data_format & RMNET_FLAGS_INGRESS_MAP_CKSUMV4) {
@@ -236,17 +243,23 @@ static int rmnet_map_egress_handler(struct sk_buff *skb,
				    struct rmnet_port *port, u8 mux_id,
				    struct rmnet_port *port, u8 mux_id,
				    struct net_device *orig_dev)
				    struct net_device *orig_dev)
{
{
	int required_headroom, additional_header_len;
	int required_headroom, additional_header_len, csum_type;
	struct rmnet_map_header *map_header;
	struct rmnet_map_header *map_header;


	additional_header_len = 0;
	additional_header_len = 0;
	required_headroom = sizeof(struct rmnet_map_header);
	required_headroom = sizeof(struct rmnet_map_header);
	csum_type = 0;


	if (port->data_format & RMNET_FLAGS_EGRESS_MAP_CKSUMV4) {
	if (port->data_format & RMNET_FLAGS_EGRESS_MAP_CKSUMV4) {
		additional_header_len = sizeof(struct rmnet_map_ul_csum_header);
		additional_header_len = sizeof(struct rmnet_map_ul_csum_header);
		required_headroom += additional_header_len;
		csum_type = RMNET_FLAGS_EGRESS_MAP_CKSUMV4;
	} else if (port->data_format & RMNET_FLAGS_EGRESS_MAP_CKSUMV5) {
		additional_header_len = sizeof(struct rmnet_map_v5_csum_header);
		csum_type = RMNET_FLAGS_EGRESS_MAP_CKSUMV5;
	}
	}


	required_headroom += additional_header_len;

	if (skb_headroom(skb) < required_headroom) {
	if (skb_headroom(skb) < required_headroom) {
		if (pskb_expand_head(skb, required_headroom, 0, GFP_ATOMIC))
		if (pskb_expand_head(skb, required_headroom, 0, GFP_ATOMIC))
			return -ENOMEM;
			return -ENOMEM;
@@ -257,8 +270,8 @@ static int rmnet_map_egress_handler(struct sk_buff *skb,
		qmi_rmnet_work_maybe_restart(port);
		qmi_rmnet_work_maybe_restart(port);
#endif
#endif


	if (port->data_format & RMNET_FLAGS_EGRESS_MAP_CKSUMV4)
	if (csum_type)
		rmnet_map_checksum_uplink_packet(skb, orig_dev);
		rmnet_map_checksum_uplink_packet(skb, orig_dev, csum_type);


	map_header = rmnet_map_add_map_header(skb, additional_header_len, 0);
	map_header = rmnet_map_add_map_header(skb, additional_header_len, 0);
	if (!map_header)
	if (!map_header)
+19 −1
Original line number Original line Diff line number Diff line
@@ -212,6 +212,22 @@ rmnet_map_get_cmd_start(struct sk_buff *skb)
	return (struct rmnet_map_control_command *)data;
	return (struct rmnet_map_control_command *)data;
}
}


static inline u8 rmnet_map_get_next_hdr_type(struct sk_buff *skb)
{
	unsigned char *data = rmnet_map_data_ptr(skb);

	data += sizeof(struct rmnet_map_header);
	return ((struct rmnet_map_v5_coal_header *)data)->header_type;
}

static inline bool rmnet_map_get_csum_valid(struct sk_buff *skb)
{
	unsigned char *data = rmnet_map_data_ptr(skb);

	data += sizeof(struct rmnet_map_header);
	return ((struct rmnet_map_v5_csum_header *)data)->csum_valid_required;
}

struct sk_buff *rmnet_map_deaggregate(struct sk_buff *skb,
struct sk_buff *rmnet_map_deaggregate(struct sk_buff *skb,
				      struct rmnet_port *port);
				      struct rmnet_port *port);
struct rmnet_map_header *rmnet_map_add_map_header(struct sk_buff *skb,
struct rmnet_map_header *rmnet_map_add_map_header(struct sk_buff *skb,
@@ -219,7 +235,9 @@ struct rmnet_map_header *rmnet_map_add_map_header(struct sk_buff *skb,
void rmnet_map_command(struct sk_buff *skb, struct rmnet_port *port);
void rmnet_map_command(struct sk_buff *skb, struct rmnet_port *port);
int rmnet_map_checksum_downlink_packet(struct sk_buff *skb, u16 len);
int rmnet_map_checksum_downlink_packet(struct sk_buff *skb, u16 len);
void rmnet_map_checksum_uplink_packet(struct sk_buff *skb,
void rmnet_map_checksum_uplink_packet(struct sk_buff *skb,
				      struct net_device *orig_dev);
				      struct net_device *orig_dev,
				      int csum_type);
int rmnet_map_process_next_hdr_packet(struct sk_buff *skb);
int rmnet_map_tx_agg_skip(struct sk_buff *skb, int offset);
int rmnet_map_tx_agg_skip(struct sk_buff *skb, int offset);
void rmnet_map_tx_aggregate(struct sk_buff *skb, struct rmnet_port *port);
void rmnet_map_tx_aggregate(struct sk_buff *skb, struct rmnet_port *port);
void rmnet_map_tx_aggregate_init(struct rmnet_port *port);
void rmnet_map_tx_aggregate_init(struct rmnet_port *port);
+102 −5
Original line number Original line Diff line number Diff line
@@ -269,6 +269,7 @@ struct rmnet_map_header *rmnet_map_add_map_header(struct sk_buff *skb,
						  int hdrlen, int pad)
						  int hdrlen, int pad)
{
{
	struct rmnet_map_header *map_header;
	struct rmnet_map_header *map_header;
	struct rmnet_port *port = rmnet_get_port(skb->dev);
	u32 padding, map_datalen;
	u32 padding, map_datalen;
	u8 *padbytes;
	u8 *padbytes;


@@ -277,6 +278,10 @@ struct rmnet_map_header *rmnet_map_add_map_header(struct sk_buff *skb,
			skb_push(skb, sizeof(struct rmnet_map_header));
			skb_push(skb, sizeof(struct rmnet_map_header));
	memset(map_header, 0, sizeof(struct rmnet_map_header));
	memset(map_header, 0, sizeof(struct rmnet_map_header));


	/* Set next_hdr bit for csum offload packets */
	if (port->data_format & RMNET_FLAGS_EGRESS_MAP_CKSUMV5)
		map_header->next_hdr = 1;

	if (pad == RMNET_MAP_NO_PAD_BYTES) {
	if (pad == RMNET_MAP_NO_PAD_BYTES) {
		map_header->pkt_len = htons(map_datalen);
		map_header->pkt_len = htons(map_datalen);
		return map_header;
		return map_header;
@@ -322,6 +327,8 @@ struct sk_buff *rmnet_map_deaggregate(struct sk_buff *skb,


	if (port->data_format & RMNET_FLAGS_INGRESS_MAP_CKSUMV4)
	if (port->data_format & RMNET_FLAGS_INGRESS_MAP_CKSUMV4)
		packet_len += sizeof(struct rmnet_map_dl_csum_trailer);
		packet_len += sizeof(struct rmnet_map_dl_csum_trailer);
	else if (port->data_format & RMNET_FLAGS_INGRESS_MAP_CKSUMV5)
		packet_len += sizeof(struct rmnet_map_v5_csum_header);


	if (((int)skb->len - (int)packet_len) < 0)
	if (((int)skb->len - (int)packet_len) < 0)
		return NULL;
		return NULL;
@@ -400,10 +407,7 @@ int rmnet_map_checksum_downlink_packet(struct sk_buff *skb, u16 len)
}
}
EXPORT_SYMBOL(rmnet_map_checksum_downlink_packet);
EXPORT_SYMBOL(rmnet_map_checksum_downlink_packet);


/* Generates UL checksum meta info header for IPv4 and IPv6 over TCP and UDP
void rmnet_map_v4_checksum_uplink_packet(struct sk_buff *skb,
 * packets that are supported for UL checksum offload.
 */
void rmnet_map_checksum_uplink_packet(struct sk_buff *skb,
					 struct net_device *orig_dev)
					 struct net_device *orig_dev)
{
{
	struct rmnet_priv *priv = netdev_priv(orig_dev);
	struct rmnet_priv *priv = netdev_priv(orig_dev);
@@ -448,6 +452,99 @@ void rmnet_map_checksum_uplink_packet(struct sk_buff *skb,
	priv->stats.csum_sw++;
	priv->stats.csum_sw++;
}
}


void rmnet_map_v5_checksum_uplink_packet(struct sk_buff *skb,
					 struct net_device *orig_dev)
{
	struct rmnet_priv *priv = netdev_priv(orig_dev);
	struct rmnet_map_v5_csum_header *ul_header;

	ul_header = (struct rmnet_map_v5_csum_header *)
		    skb_push(skb, sizeof(*ul_header));
	memset(ul_header, 0, sizeof(*ul_header));
	ul_header->header_type = RMNET_MAP_HEADER_TYPE_CSUM_OFFLOAD;

	if (skb->ip_summed == CHECKSUM_PARTIAL) {
		void *iph = (char *)ul_header + sizeof(*ul_header);
		void *trans;
		__sum16 *check;
		u8 proto;

		if (skb->protocol == htons(ETH_P_IP)) {
			u16 ip_len = ((struct iphdr *)iph)->ihl * 4;

			proto = ((struct iphdr *)iph)->protocol;
			trans = iph + ip_len;
		} else if (skb->protocol == htons(ETH_P_IPV6)) {
			u16 ip_len = sizeof(struct ipv6hdr);

			proto = ((struct ipv6hdr *)iph)->nexthdr;
			trans = iph + ip_len;
		} else {
			priv->stats.csum_err_invalid_ip_version++;
			goto sw_csum;
		}

		check = rmnet_map_get_csum_field(proto, trans);
		if (check) {
			*check = 0;
			skb->ip_summed = CHECKSUM_NONE;
			/* Ask for checksum offloading */
			ul_header->csum_valid_required = 1;
			priv->stats.csum_hw++;
			return;
		}
	}

sw_csum:
	priv->stats.csum_sw++;
}

/* Generates UL checksum meta info header for IPv4 and IPv6 over TCP and UDP
 * packets that are supported for UL checksum offload.
 */
void rmnet_map_checksum_uplink_packet(struct sk_buff *skb,
				      struct net_device *orig_dev,
				      int csum_type)
{
	switch (csum_type) {
	case RMNET_FLAGS_EGRESS_MAP_CKSUMV4:
		rmnet_map_v4_checksum_uplink_packet(skb, orig_dev);
		break;
	case RMNET_FLAGS_EGRESS_MAP_CKSUMV5:
		rmnet_map_v5_checksum_uplink_packet(skb, orig_dev);
		break;
	default:
		break;
	}
}

/* Process a QMAPv5 packet header */
int rmnet_map_process_next_hdr_packet(struct sk_buff *skb)
{
	struct rmnet_priv *priv = netdev_priv(skb->dev);
	int rc = 0;

	switch (rmnet_map_get_next_hdr_type(skb)) {
	case RMNET_MAP_HEADER_TYPE_CSUM_OFFLOAD:
		if (rmnet_map_get_csum_valid(skb)) {
			priv->stats.csum_ok++;
			skb->ip_summed = CHECKSUM_UNNECESSARY;
		} else {
			priv->stats.csum_valid_unset++;
		}

		pskb_pull(skb,
			  (sizeof(struct rmnet_map_header) +
			   sizeof(struct rmnet_map_v5_csum_header)));
		break;
	default:
		rc = -EINVAL;
		break;
	}

	return rc;
}

struct rmnet_agg_work {
struct rmnet_agg_work {
	struct work_struct work;
	struct work_struct work;
	struct rmnet_port *port;
	struct rmnet_port *port;