Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit b451b394 authored by qctecmdr Service's avatar qctecmdr Service Committed by Gerrit - the friendly Code Review server
Browse files

Merge "net: qualcomm: rmnet: Add support for QMAPv5 checksum offload"

parents 965b77ab d0d2d0bf
Loading
Loading
Loading
Loading
+19 −6
Original line number Original line Diff line number Diff line
@@ -162,8 +162,15 @@ __rmnet_map_ingress_handler(struct sk_buff *skb,


	skb->dev = ep->egress_dev;
	skb->dev = ep->egress_dev;


	/* Subtract MAP header */
	/* Handle QMAPv5 packet */
	pskb_pull(skb, sizeof(struct rmnet_map_header));
	if (qmap->next_hdr) {
		if (rmnet_map_process_next_hdr_packet(skb))
			goto free_skb;
	} else {
		/* We only have the main QMAP header to worry about */
		pskb_pull(skb, sizeof(*qmap));
	}

	rmnet_set_skb_proto(skb);
	rmnet_set_skb_proto(skb);


	if (port->data_format & RMNET_FLAGS_INGRESS_MAP_CKSUMV4) {
	if (port->data_format & RMNET_FLAGS_INGRESS_MAP_CKSUMV4) {
@@ -236,17 +243,23 @@ static int rmnet_map_egress_handler(struct sk_buff *skb,
				    struct rmnet_port *port, u8 mux_id,
				    struct rmnet_port *port, u8 mux_id,
				    struct net_device *orig_dev)
				    struct net_device *orig_dev)
{
{
	int required_headroom, additional_header_len;
	int required_headroom, additional_header_len, csum_type;
	struct rmnet_map_header *map_header;
	struct rmnet_map_header *map_header;


	additional_header_len = 0;
	additional_header_len = 0;
	required_headroom = sizeof(struct rmnet_map_header);
	required_headroom = sizeof(struct rmnet_map_header);
	csum_type = 0;


	if (port->data_format & RMNET_FLAGS_EGRESS_MAP_CKSUMV4) {
	if (port->data_format & RMNET_FLAGS_EGRESS_MAP_CKSUMV4) {
		additional_header_len = sizeof(struct rmnet_map_ul_csum_header);
		additional_header_len = sizeof(struct rmnet_map_ul_csum_header);
		required_headroom += additional_header_len;
		csum_type = RMNET_FLAGS_EGRESS_MAP_CKSUMV4;
	} else if (port->data_format & RMNET_FLAGS_EGRESS_MAP_CKSUMV5) {
		additional_header_len = sizeof(struct rmnet_map_v5_csum_header);
		csum_type = RMNET_FLAGS_EGRESS_MAP_CKSUMV5;
	}
	}


	required_headroom += additional_header_len;

	if (skb_headroom(skb) < required_headroom) {
	if (skb_headroom(skb) < required_headroom) {
		if (pskb_expand_head(skb, required_headroom, 0, GFP_ATOMIC))
		if (pskb_expand_head(skb, required_headroom, 0, GFP_ATOMIC))
			return -ENOMEM;
			return -ENOMEM;
@@ -257,8 +270,8 @@ static int rmnet_map_egress_handler(struct sk_buff *skb,
		qmi_rmnet_work_maybe_restart(port);
		qmi_rmnet_work_maybe_restart(port);
#endif
#endif


	if (port->data_format & RMNET_FLAGS_EGRESS_MAP_CKSUMV4)
	if (csum_type)
		rmnet_map_checksum_uplink_packet(skb, orig_dev);
		rmnet_map_checksum_uplink_packet(skb, orig_dev, csum_type);


	map_header = rmnet_map_add_map_header(skb, additional_header_len, 0);
	map_header = rmnet_map_add_map_header(skb, additional_header_len, 0);
	if (!map_header)
	if (!map_header)
+80 −2
Original line number Original line Diff line number Diff line
@@ -3,6 +3,8 @@


#ifndef _RMNET_MAP_H_
#ifndef _RMNET_MAP_H_
#define _RMNET_MAP_H_
#define _RMNET_MAP_H_

#include <linux/skbuff.h>
#include "rmnet_config.h"
#include "rmnet_config.h"


struct rmnet_map_control_command {
struct rmnet_map_control_command {
@@ -33,14 +35,72 @@ enum rmnet_map_commands {
	RMNET_MAP_COMMAND_ENUM_LENGTH
	RMNET_MAP_COMMAND_ENUM_LENGTH
};
};


enum rmnet_map_v5_header_type {
	RMNET_MAP_HEADER_TYPE_UNKNOWN,
	RMNET_MAP_HEADER_TYPE_COALESCING = 0x1,
	RMNET_MAP_HEADER_TYPE_CSUM_OFFLOAD = 0x2,
	RMNET_MAP_HEADER_TYPE_ENUM_LENGTH
};

enum rmnet_map_v5_close_type {
	RMNET_MAP_COAL_CLOSE_NON_COAL,
	RMNET_MAP_COAL_CLOSE_IP_MISS,
	RMNET_MAP_COAL_CLOSE_TRANS_MISS,
	RMNET_MAP_COAL_CLOSE_HW,
	RMNET_MAP_COAL_CLOSE_COAL,
};

enum rmnet_map_v5_close_value {
	RMNET_MAP_COAL_CLOSE_HW_NL,
	RMNET_MAP_COAL_CLOSE_HW_PKT,
	RMNET_MAP_COAL_CLOSE_HW_BYTE,
	RMNET_MAP_COAL_CLOSE_HW_TIME,
	RMNET_MAP_COAL_CLOSE_HW_EVICT,
};

/* Main QMAP header */
struct rmnet_map_header {
struct rmnet_map_header {
	u8  pad_len:6;
	u8  pad_len:6;
	u8  reserved_bit:1;
	u8  next_hdr:1;
	u8  cd_bit:1;
	u8  cd_bit:1;
	u8  mux_id;
	u8  mux_id;
	__be16 pkt_len;
	__be16 pkt_len;
}  __aligned(1);
}  __aligned(1);


/* QMAP v5 headers */
struct rmnet_map_v5_csum_header {
	u8  next_hdr:1;
	u8  header_type:7;
	u8  hw_reserved:7;
	u8  csum_valid_required:1;
	__be16 reserved;
} __aligned(1);

struct rmnet_map_v5_nl_pair {
	__be16 pkt_len;
	u8  csum_error_bitmap;
	u8  num_packets;
} __aligned(1);

/* NLO: Number-length object */
#define RMNET_MAP_V5_MAX_NLOS         (6)
#define RMNET_MAP_V5_MAX_PACKETS      (48)

struct rmnet_map_v5_coal_header {
	u8  next_hdr:1;
	u8  header_type:7;
	u8  reserved1:4;
	u8  num_nlos:3;
	u8  csum_valid:1;
	u8  close_type:4;
	u8  close_value:4;
	u8  reserved2:4;
	u8  virtual_channel_id:4;

	struct rmnet_map_v5_nl_pair nl_pairs[RMNET_MAP_V5_MAX_NLOS];
} __aligned(1);

/* QMAP v4 headers */
struct rmnet_map_dl_csum_trailer {
struct rmnet_map_dl_csum_trailer {
	u8  reserved1;
	u8  reserved1;
	u8  valid:1;
	u8  valid:1;
@@ -152,6 +212,22 @@ rmnet_map_get_cmd_start(struct sk_buff *skb)
	return (struct rmnet_map_control_command *)data;
	return (struct rmnet_map_control_command *)data;
}
}


static inline u8 rmnet_map_get_next_hdr_type(struct sk_buff *skb)
{
	unsigned char *data = rmnet_map_data_ptr(skb);

	data += sizeof(struct rmnet_map_header);
	return ((struct rmnet_map_v5_coal_header *)data)->header_type;
}

static inline bool rmnet_map_get_csum_valid(struct sk_buff *skb)
{
	unsigned char *data = rmnet_map_data_ptr(skb);

	data += sizeof(struct rmnet_map_header);
	return ((struct rmnet_map_v5_csum_header *)data)->csum_valid_required;
}

struct sk_buff *rmnet_map_deaggregate(struct sk_buff *skb,
struct sk_buff *rmnet_map_deaggregate(struct sk_buff *skb,
				      struct rmnet_port *port);
				      struct rmnet_port *port);
struct rmnet_map_header *rmnet_map_add_map_header(struct sk_buff *skb,
struct rmnet_map_header *rmnet_map_add_map_header(struct sk_buff *skb,
@@ -159,7 +235,9 @@ struct rmnet_map_header *rmnet_map_add_map_header(struct sk_buff *skb,
void rmnet_map_command(struct sk_buff *skb, struct rmnet_port *port);
void rmnet_map_command(struct sk_buff *skb, struct rmnet_port *port);
int rmnet_map_checksum_downlink_packet(struct sk_buff *skb, u16 len);
int rmnet_map_checksum_downlink_packet(struct sk_buff *skb, u16 len);
void rmnet_map_checksum_uplink_packet(struct sk_buff *skb,
void rmnet_map_checksum_uplink_packet(struct sk_buff *skb,
				      struct net_device *orig_dev);
				      struct net_device *orig_dev,
				      int csum_type);
int rmnet_map_process_next_hdr_packet(struct sk_buff *skb);
int rmnet_map_tx_agg_skip(struct sk_buff *skb, int offset);
int rmnet_map_tx_agg_skip(struct sk_buff *skb, int offset);
void rmnet_map_tx_aggregate(struct sk_buff *skb, struct rmnet_port *port);
void rmnet_map_tx_aggregate(struct sk_buff *skb, struct rmnet_port *port);
void rmnet_map_tx_aggregate_init(struct rmnet_port *port);
void rmnet_map_tx_aggregate_init(struct rmnet_port *port);
+102 −5
Original line number Original line Diff line number Diff line
@@ -269,6 +269,7 @@ struct rmnet_map_header *rmnet_map_add_map_header(struct sk_buff *skb,
						  int hdrlen, int pad)
						  int hdrlen, int pad)
{
{
	struct rmnet_map_header *map_header;
	struct rmnet_map_header *map_header;
	struct rmnet_port *port = rmnet_get_port(skb->dev);
	u32 padding, map_datalen;
	u32 padding, map_datalen;
	u8 *padbytes;
	u8 *padbytes;


@@ -277,6 +278,10 @@ struct rmnet_map_header *rmnet_map_add_map_header(struct sk_buff *skb,
			skb_push(skb, sizeof(struct rmnet_map_header));
			skb_push(skb, sizeof(struct rmnet_map_header));
	memset(map_header, 0, sizeof(struct rmnet_map_header));
	memset(map_header, 0, sizeof(struct rmnet_map_header));


	/* Set next_hdr bit for csum offload packets */
	if (port->data_format & RMNET_FLAGS_EGRESS_MAP_CKSUMV5)
		map_header->next_hdr = 1;

	if (pad == RMNET_MAP_NO_PAD_BYTES) {
	if (pad == RMNET_MAP_NO_PAD_BYTES) {
		map_header->pkt_len = htons(map_datalen);
		map_header->pkt_len = htons(map_datalen);
		return map_header;
		return map_header;
@@ -322,6 +327,8 @@ struct sk_buff *rmnet_map_deaggregate(struct sk_buff *skb,


	if (port->data_format & RMNET_FLAGS_INGRESS_MAP_CKSUMV4)
	if (port->data_format & RMNET_FLAGS_INGRESS_MAP_CKSUMV4)
		packet_len += sizeof(struct rmnet_map_dl_csum_trailer);
		packet_len += sizeof(struct rmnet_map_dl_csum_trailer);
	else if (port->data_format & RMNET_FLAGS_INGRESS_MAP_CKSUMV5)
		packet_len += sizeof(struct rmnet_map_v5_csum_header);


	if (((int)skb->len - (int)packet_len) < 0)
	if (((int)skb->len - (int)packet_len) < 0)
		return NULL;
		return NULL;
@@ -400,10 +407,7 @@ int rmnet_map_checksum_downlink_packet(struct sk_buff *skb, u16 len)
}
}
EXPORT_SYMBOL(rmnet_map_checksum_downlink_packet);
EXPORT_SYMBOL(rmnet_map_checksum_downlink_packet);


/* Generates UL checksum meta info header for IPv4 and IPv6 over TCP and UDP
void rmnet_map_v4_checksum_uplink_packet(struct sk_buff *skb,
 * packets that are supported for UL checksum offload.
 */
void rmnet_map_checksum_uplink_packet(struct sk_buff *skb,
					 struct net_device *orig_dev)
					 struct net_device *orig_dev)
{
{
	struct rmnet_priv *priv = netdev_priv(orig_dev);
	struct rmnet_priv *priv = netdev_priv(orig_dev);
@@ -448,6 +452,99 @@ void rmnet_map_checksum_uplink_packet(struct sk_buff *skb,
	priv->stats.csum_sw++;
	priv->stats.csum_sw++;
}
}


void rmnet_map_v5_checksum_uplink_packet(struct sk_buff *skb,
					 struct net_device *orig_dev)
{
	struct rmnet_priv *priv = netdev_priv(orig_dev);
	struct rmnet_map_v5_csum_header *ul_header;

	ul_header = (struct rmnet_map_v5_csum_header *)
		    skb_push(skb, sizeof(*ul_header));
	memset(ul_header, 0, sizeof(*ul_header));
	ul_header->header_type = RMNET_MAP_HEADER_TYPE_CSUM_OFFLOAD;

	if (skb->ip_summed == CHECKSUM_PARTIAL) {
		void *iph = (char *)ul_header + sizeof(*ul_header);
		void *trans;
		__sum16 *check;
		u8 proto;

		if (skb->protocol == htons(ETH_P_IP)) {
			u16 ip_len = ((struct iphdr *)iph)->ihl * 4;

			proto = ((struct iphdr *)iph)->protocol;
			trans = iph + ip_len;
		} else if (skb->protocol == htons(ETH_P_IPV6)) {
			u16 ip_len = sizeof(struct ipv6hdr);

			proto = ((struct ipv6hdr *)iph)->nexthdr;
			trans = iph + ip_len;
		} else {
			priv->stats.csum_err_invalid_ip_version++;
			goto sw_csum;
		}

		check = rmnet_map_get_csum_field(proto, trans);
		if (check) {
			*check = 0;
			skb->ip_summed = CHECKSUM_NONE;
			/* Ask for checksum offloading */
			ul_header->csum_valid_required = 1;
			priv->stats.csum_hw++;
			return;
		}
	}

sw_csum:
	priv->stats.csum_sw++;
}

/* Generates UL checksum meta info header for IPv4 and IPv6 over TCP and UDP
 * packets that are supported for UL checksum offload.
 */
void rmnet_map_checksum_uplink_packet(struct sk_buff *skb,
				      struct net_device *orig_dev,
				      int csum_type)
{
	switch (csum_type) {
	case RMNET_FLAGS_EGRESS_MAP_CKSUMV4:
		rmnet_map_v4_checksum_uplink_packet(skb, orig_dev);
		break;
	case RMNET_FLAGS_EGRESS_MAP_CKSUMV5:
		rmnet_map_v5_checksum_uplink_packet(skb, orig_dev);
		break;
	default:
		break;
	}
}

/* Process a QMAPv5 packet header */
int rmnet_map_process_next_hdr_packet(struct sk_buff *skb)
{
	struct rmnet_priv *priv = netdev_priv(skb->dev);
	int rc = 0;

	switch (rmnet_map_get_next_hdr_type(skb)) {
	case RMNET_MAP_HEADER_TYPE_CSUM_OFFLOAD:
		if (rmnet_map_get_csum_valid(skb)) {
			priv->stats.csum_ok++;
			skb->ip_summed = CHECKSUM_UNNECESSARY;
		} else {
			priv->stats.csum_valid_unset++;
		}

		pskb_pull(skb,
			  (sizeof(struct rmnet_map_header) +
			   sizeof(struct rmnet_map_v5_csum_header)));
		break;
	default:
		rc = -EINVAL;
		break;
	}

	return rc;
}

struct rmnet_agg_work {
struct rmnet_agg_work {
	struct work_struct work;
	struct work_struct work;
	struct rmnet_port *port;
	struct rmnet_port *port;
+3 −0
Original line number Original line Diff line number Diff line
@@ -984,6 +984,9 @@ enum {
#define RMNET_FLAGS_INGRESS_MAP_COMMANDS          (1U << 1)
#define RMNET_FLAGS_INGRESS_MAP_COMMANDS          (1U << 1)
#define RMNET_FLAGS_INGRESS_MAP_CKSUMV4           (1U << 2)
#define RMNET_FLAGS_INGRESS_MAP_CKSUMV4           (1U << 2)
#define RMNET_FLAGS_EGRESS_MAP_CKSUMV4            (1U << 3)
#define RMNET_FLAGS_EGRESS_MAP_CKSUMV4            (1U << 3)
#define RMNET_FLAGS_INGRESS_COALESCE              (1U << 4)
#define RMNET_FLAGS_INGRESS_MAP_CKSUMV5           (1U << 5)
#define RMNET_FLAGS_EGRESS_MAP_CKSUMV5            (1U << 6)


enum {
enum {
	IFLA_RMNET_UNSPEC,
	IFLA_RMNET_UNSPEC,