Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit c9d0dc4b authored by David S. Miller's avatar David S. Miller
Browse files

Merge branch 'qualcomm-rmnet-Add-64-bit-stats-and-GRO'



Subash Abhinov Kasiviswanathan says:

====================
net: qualcomm: rmnet: Add 64 bit stats and GRO

This series adds support for 64 bit per cpu stats and GRO

Patches 1-2 are cleanups of return code and a redundant condition
Patch 3 adds support for 64 bit per cpu stats
Patch 4 adds support for GRO using GRO cells

v1->v2: Since gro_cells_init() could potentially fail, move it from device
setup to ndo_init() as mentioned by Eric.
====================

Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents cc49c8ff ca32fb03
Loading
Loading
Loading
Loading
+1 −0
Original line number Diff line number Diff line
@@ -5,6 +5,7 @@
menuconfig RMNET
	tristate "RmNet MAP driver"
	default n
	select GRO_CELLS
	---help---
	  If you select this, you will enable the RMNET module which is used
	  for handling data in the multiplexing and aggregation protocol (MAP)
+16 −0
Original line number Diff line number Diff line
@@ -14,6 +14,7 @@
 */

#include <linux/skbuff.h>
#include <net/gro_cells.h>

#ifndef _RMNET_CONFIG_H_
#define _RMNET_CONFIG_H_
@@ -41,9 +42,24 @@ struct rmnet_port {

extern struct rtnl_link_ops rmnet_link_ops;

struct rmnet_vnd_stats {
	u64 rx_pkts;
	u64 rx_bytes;
	u64 tx_pkts;
	u64 tx_bytes;
	u32 tx_drops;
};

struct rmnet_pcpu_stats {
	struct rmnet_vnd_stats stats;
	struct u64_stats_sync syncp;
};

struct rmnet_priv {
	u8 mux_id;
	struct net_device *real_dev;
	struct rmnet_pcpu_stats __percpu *pcpu_stats;
	struct gro_cells gro_cells;
};

struct rmnet_port *rmnet_get_port(struct net_device *real_dev);
+16 −22
Original line number Diff line number Diff line
@@ -43,22 +43,23 @@ static void rmnet_set_skb_proto(struct sk_buff *skb)

/* Generic handler */

static rx_handler_result_t
static void
rmnet_deliver_skb(struct sk_buff *skb)
{
	struct rmnet_priv *priv = netdev_priv(skb->dev);

	skb_reset_transport_header(skb);
	skb_reset_network_header(skb);
	rmnet_vnd_rx_fixup(skb, skb->dev);

	skb->pkt_type = PACKET_HOST;
	skb_set_mac_header(skb, 0);
	netif_receive_skb(skb);
	return RX_HANDLER_CONSUMED;
	gro_cells_receive(&priv->gro_cells, skb);
}

/* MAP handler */

static rx_handler_result_t
static void
__rmnet_map_ingress_handler(struct sk_buff *skb,
			    struct rmnet_port *port)
{
@@ -84,38 +85,33 @@ __rmnet_map_ingress_handler(struct sk_buff *skb,
	if (!ep)
		goto free_skb;

	if (port->ingress_data_format & RMNET_INGRESS_FORMAT_DEMUXING)
	skb->dev = ep->egress_dev;

	/* Subtract MAP header */
	skb_pull(skb, sizeof(struct rmnet_map_header));
	skb_trim(skb, len);
	rmnet_set_skb_proto(skb);
	return rmnet_deliver_skb(skb);
	rmnet_deliver_skb(skb);
	return;

free_skb:
	kfree_skb(skb);
	return RX_HANDLER_CONSUMED;
}

static rx_handler_result_t
static void
rmnet_map_ingress_handler(struct sk_buff *skb,
			  struct rmnet_port *port)
{
	struct sk_buff *skbn;
	int rc;

	if (port->ingress_data_format & RMNET_INGRESS_FORMAT_DEAGGREGATION) {
		while ((skbn = rmnet_map_deaggregate(skb)) != NULL)
			__rmnet_map_ingress_handler(skbn, port);

		consume_skb(skb);
		rc = RX_HANDLER_CONSUMED;
	} else {
		rc = __rmnet_map_ingress_handler(skb, port);
		__rmnet_map_ingress_handler(skb, port);
	}

	return rc;
}

static int rmnet_map_egress_handler(struct sk_buff *skb,
@@ -149,15 +145,13 @@ static int rmnet_map_egress_handler(struct sk_buff *skb,
	return RMNET_MAP_SUCCESS;
}

static rx_handler_result_t
static void
rmnet_bridge_handler(struct sk_buff *skb, struct net_device *bridge_dev)
{
	if (bridge_dev) {
		skb->dev = bridge_dev;
		dev_queue_xmit(skb);
	}

	return RX_HANDLER_CONSUMED;
}

/* Ingress / Egress Entry Points */
@@ -168,13 +162,12 @@ rmnet_bridge_handler(struct sk_buff *skb, struct net_device *bridge_dev)
 */
rx_handler_result_t rmnet_rx_handler(struct sk_buff **pskb)
{
	int rc = RX_HANDLER_CONSUMED;
	struct sk_buff *skb = *pskb;
	struct rmnet_port *port;
	struct net_device *dev;

	if (!skb)
		return RX_HANDLER_CONSUMED;
		goto done;

	dev = skb->dev;
	port = rmnet_get_port(dev);
@@ -182,14 +175,15 @@ rx_handler_result_t rmnet_rx_handler(struct sk_buff **pskb)
	switch (port->rmnet_mode) {
	case RMNET_EPMODE_VND:
		if (port->ingress_data_format & RMNET_INGRESS_FORMAT_MAP)
			rc = rmnet_map_ingress_handler(skb, port);
			rmnet_map_ingress_handler(skb, port);
		break;
	case RMNET_EPMODE_BRIDGE:
		rc = rmnet_bridge_handler(skb, port->bridge_ep);
		rmnet_bridge_handler(skb, port->bridge_ep);
		break;
	}

	return rc;
done:
	return RX_HANDLER_CONSUMED;
}

/* Modifies packet as per logical endpoint configuration and egress data format
+1 −2
Original line number Diff line number Diff line
@@ -80,7 +80,6 @@ u8 rmnet_map_demultiplex(struct sk_buff *skb);
struct sk_buff *rmnet_map_deaggregate(struct sk_buff *skb);
struct rmnet_map_header *rmnet_map_add_map_header(struct sk_buff *skb,
						  int hdrlen, int pad);
rx_handler_result_t rmnet_map_command(struct sk_buff *skb,
				      struct rmnet_port *port);
void rmnet_map_command(struct sk_buff *skb, struct rmnet_port *port);

#endif /* _RMNET_MAP_H_ */
+1 −3
Original line number Diff line number Diff line
@@ -76,8 +76,7 @@ static void rmnet_map_send_ack(struct sk_buff *skb,
/* Process MAP command frame and send N/ACK message as appropriate. Message cmd
 * name is decoded here and appropriate handler is called.
 */
rx_handler_result_t rmnet_map_command(struct sk_buff *skb,
				      struct rmnet_port *port)
void rmnet_map_command(struct sk_buff *skb, struct rmnet_port *port)
{
	struct rmnet_map_control_command *cmd;
	unsigned char command_name;
@@ -102,5 +101,4 @@ rx_handler_result_t rmnet_map_command(struct sk_buff *skb,
	}
	if (rc == RMNET_MAP_COMMAND_ACK)
		rmnet_map_send_ack(skb, rc);
	return RX_HANDLER_CONSUMED;
}
Loading