Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 29e6452e authored by Subash Abhinov Kasiviswanathan's avatar Subash Abhinov Kasiviswanathan
Browse files

net: rmnet_data: Fix comments on code review



- Remove redundant checks in tx / rx fixup.
- Remove debug aggregation counter
- Remove unused recycle handler
- Add CAP_NET_ADMIN checks for set IOCTLs
- Simplify the packet deliver code

CRs-Fixed: 2111801
Change-Id: I14c02462c27b319f7517caa178f25a8efa22abeb
Signed-off-by: default avatarSubash Abhinov Kasiviswanathan <subashab@codeaurora.org>
parent 8e96f4f7
Loading
Loading
Loading
Loading
+53 −57
Original line number Diff line number Diff line
@@ -184,19 +184,35 @@ static rx_handler_result_t rmnet_bridge_handler
	return RX_HANDLER_CONSUMED;
}

#ifdef NET_SKBUFF_DATA_USES_OFFSET
static void rmnet_reset_mac_header(struct sk_buff *skb)
/* RX/TX Fixup */

/* rmnet_vnd_rx_fixup() - Virtual Network Device receive fixup hook
 * @skb:        Socket buffer ("packet") to modify
 * @dev:        Virtual network device
 *
 * Additional VND specific packet processing for ingress packets
 *
 * Return: void
 */
static void rmnet_vnd_rx_fixup(struct sk_buff *skb, struct net_device *dev)
{
	skb->mac_header = 0;
	skb->mac_len = 0;
	dev->stats.rx_packets++;
	dev->stats.rx_bytes += skb->len;
}
#else
static void rmnet_reset_mac_header(struct sk_buff *skb)

/* rmnet_vnd_tx_fixup() - Virtual Network Device transmic fixup hook
 * @skb:      Socket buffer ("packet") to modify
 * @dev:      Virtual network device
 *
 * Additional VND specific packet processing for egress packets
 *
 * Return: void
 */
static void rmnet_vnd_tx_fixup(struct sk_buff *skb, struct net_device *dev)
{
	skb->mac_header = skb->network_header;
	skb->mac_len = 0;
	dev->stats.tx_packets++;
	dev->stats.tx_bytes += skb->len;
}
#endif /*NET_SKBUFF_DATA_USES_OFFSET*/

/* rmnet_check_skb_can_gro() - Check is skb can be passed through GRO handler
 *
@@ -321,42 +337,33 @@ static rx_handler_result_t __rmnet_deliver_skb

	trace___rmnet_deliver_skb(skb);
	switch (ep->rmnet_mode) {
	case RMNET_EPMODE_NONE:
		return RX_HANDLER_PASS;

	case RMNET_EPMODE_BRIDGE:
		return rmnet_bridge_handler(skb, ep);

	case RMNET_EPMODE_VND:
		skb_reset_transport_header(skb);
		skb_reset_network_header(skb);
		switch (rmnet_vnd_rx_fixup(skb, skb->dev)) {
		case RX_HANDLER_CONSUMED:
			return RX_HANDLER_CONSUMED;
		rmnet_vnd_rx_fixup(skb, skb->dev);

		case RX_HANDLER_PASS:
		skb->pkt_type = PACKET_HOST;
			rmnet_reset_mac_header(skb);
		skb_set_mac_header(skb, 0);

		if (rmnet_check_skb_can_gro(skb) &&
		    (skb->dev->features & NETIF_F_GRO)) {
			napi = get_current_napi_context();
				if (napi) {

			skb_size = skb->len;
			gro_res = napi_gro_receive(napi, skb);
			trace_rmnet_gro_downlink(gro_res);
					rmnet_optional_gro_flush(napi, ep,
								 skb_size);
				} else {
					WARN_ONCE(1, "current napi is NULL\n");
					netif_receive_skb(skb);
				}
			rmnet_optional_gro_flush(napi, ep, skb_size);
		} else{
			netif_receive_skb(skb);
		}
		return RX_HANDLER_CONSUMED;
		}

	case RMNET_EPMODE_NONE:
		return RX_HANDLER_PASS;

	case RMNET_EPMODE_BRIDGE:
		return rmnet_bridge_handler(skb, ep);

	default:
		LOGD("Unknown ep mode %d", ep->rmnet_mode);
		rmnet_kfree_skb(skb, RMNET_STATS_SKBFREE_DELIVER_NO_EP);
@@ -441,15 +448,6 @@ static rx_handler_result_t _rmnet_map_ingress_handler

	ep = &config->muxed_ep[mux_id];

	if (!ep->refcount) {
		LOGD("Packet on %s:%d; has no logical endpoint config",
		     skb->dev->name, mux_id);

		rmnet_kfree_skb(skb, RMNET_STATS_SKBFREE_MAPINGRESS_MUX_NO_EP);
		return RX_HANDLER_CONSUMED;
	}

	if (config->ingress_data_format & RMNET_INGRESS_FORMAT_DEMUXING)
	skb->dev = ep->egress_dev;

	if ((config->ingress_data_format & RMNET_INGRESS_FORMAT_MAP_CKSUMV3) ||
@@ -495,17 +493,13 @@ static rx_handler_result_t rmnet_map_ingress_handler
	(struct sk_buff *skb, struct rmnet_phys_ep_config *config)
{
	struct sk_buff *skbn;
	int rc, co = 0;
	int rc;

	if (config->ingress_data_format & RMNET_INGRESS_FORMAT_DEAGGREGATION) {
		trace_rmnet_start_deaggregation(skb);
		while ((skbn = rmnet_map_deaggregate(skb, config)) != 0) {
			_rmnet_map_ingress_handler(skbn, config);
			co++;
		}
		trace_rmnet_end_deaggregation(skb, co);
		LOGD("De-aggregated %d packets", co);
		rmnet_stats_deagg_pkts(co);
		rmnet_kfree_skb(skb, RMNET_STATS_SKBFREE_MAPINGRESS_AGGBUF);
		rc = RX_HANDLER_CONSUMED;
	} else {
@@ -538,12 +532,15 @@ static int rmnet_map_egress_handler(struct sk_buff *skb,
	int required_headroom, additional_header_length, ckresult;
	struct rmnet_map_header_s *map_header;
	int non_linear_skb;
	int csum_required = (config->egress_data_format &
			     RMNET_EGRESS_FORMAT_MAP_CKSUMV3) ||
			    (config->egress_data_format &
			     RMNET_EGRESS_FORMAT_MAP_CKSUMV4);

	additional_header_length = 0;

	required_headroom = sizeof(struct rmnet_map_header_s);
	if ((config->egress_data_format & RMNET_EGRESS_FORMAT_MAP_CKSUMV3) ||
	    (config->egress_data_format & RMNET_EGRESS_FORMAT_MAP_CKSUMV4)) {
	if (csum_required) {
		required_headroom +=
			sizeof(struct rmnet_map_ul_checksum_header_s);
		additional_header_length +=
@@ -558,8 +555,7 @@ static int rmnet_map_egress_handler(struct sk_buff *skb,
		return 1;
	}

	if ((config->egress_data_format & RMNET_EGRESS_FORMAT_MAP_CKSUMV3) ||
	    (config->egress_data_format & RMNET_EGRESS_FORMAT_MAP_CKSUMV4)) {
	if (csum_required) {
		ckresult = rmnet_map_checksum_uplink_packet
				(skb, orig_dev, config->egress_data_format);
		trace_rmnet_map_checksum_uplink_packet(orig_dev, ckresult);
+1 −25
Original line number Diff line number Diff line
@@ -41,11 +41,6 @@ unsigned long int queue_xmit[RMNET_STATS_QUEUE_XMIT_MAX * 2];
module_param_array(queue_xmit, ulong, 0, 0444);
MODULE_PARM_DESC(queue_xmit, "SKBs queued for transmit");

static DEFINE_SPINLOCK(rmnet_deagg_count);
unsigned long int deagg_count[RMNET_STATS_AGG_MAX];
module_param_array(deagg_count, ulong, 0, 0444);
MODULE_PARM_DESC(deagg_count, "SKBs De-aggregated");

static DEFINE_SPINLOCK(rmnet_agg_count);
unsigned long int agg_count[RMNET_STATS_AGG_MAX];
module_param_array(agg_count, ulong, 0, 0444);
@@ -72,17 +67,8 @@ void rmnet_kfree_skb(struct sk_buff *skb, unsigned int reason)
	skb_free[reason]++;
	spin_unlock_irqrestore(&rmnet_skb_free_lock, flags);

	if (likely(skb)) {
		struct rmnet_phys_ep_conf_s *config;

		config = (struct rmnet_phys_ep_conf_s *)rcu_dereference
			 (skb->dev->rx_handler_data);
		if (likely(config))
			config->recycle(skb);
		else
	kfree_skb(skb);
}
}

void rmnet_stats_queue_xmit(int rc, unsigned int reason)
{
@@ -108,16 +94,6 @@ void rmnet_stats_agg_pkts(int aggcount)
	spin_unlock_irqrestore(&rmnet_agg_count, flags);
}

void rmnet_stats_deagg_pkts(int aggcount)
{
	unsigned long flags;

	spin_lock_irqsave(&rmnet_deagg_count, flags);
	deagg_count[RMNET_STATS_AGG_BUFF]++;
	deagg_count[RMNET_STATS_AGG_PKT] += aggcount;
	spin_unlock_irqrestore(&rmnet_deagg_count, flags);
}

void rmnet_stats_dl_checksum(unsigned int rc)
{
	unsigned long flags;
+0 −1
Original line number Diff line number Diff line
@@ -24,7 +24,6 @@ enum rmnet_skb_free_e {
	RMNET_STATS_SKBFREE_DELIVER_NO_EP,
	RMNET_STATS_SKBFREE_IPINGRESS_NO_EP,
	RMNET_STATS_SKBFREE_MAPINGRESS_BAD_MUX,
	RMNET_STATS_SKBFREE_MAPINGRESS_MUX_NO_EP,
	RMNET_STATS_SKBFREE_MAPINGRESS_AGGBUF,
	RMNET_STATS_SKBFREE_INGRESS_NOT_EXPECT_MAPD,
	RMNET_STATS_SKBFREE_INGRESS_NOT_EXPECT_MAPC,
+10 −49
Original line number Diff line number Diff line
@@ -101,55 +101,6 @@ static void rmnet_vnd_add_qos_header(struct sk_buff *skb,
	}
}

/* RX/TX Fixup */

/* rmnet_vnd_rx_fixup() - Virtual Network Device receive fixup hook
 * @skb:        Socket buffer ("packet") to modify
 * @dev:        Virtual network device
 *
 * Additional VND specific packet processing for ingress packets
 *
 * Return:
 *      - RX_HANDLER_PASS if packet should continue to process in stack
 *      - RX_HANDLER_CONSUMED if packet should not be processed in stack
 *
 */
int rmnet_vnd_rx_fixup(struct sk_buff *skb, struct net_device *dev)
{
	if (unlikely(!dev || !skb))
		return RX_HANDLER_CONSUMED;

	dev->stats.rx_packets++;
	dev->stats.rx_bytes += skb->len;

	return RX_HANDLER_PASS;
}

/* rmnet_vnd_tx_fixup() - Virtual Network Device transmic fixup hook
 * @skb:      Socket buffer ("packet") to modify
 * @dev:      Virtual network device
 *
 * Additional VND specific packet processing for egress packets
 *
 * Return:
 *      - RX_HANDLER_PASS if packet should continue to be transmitted
 *      - RX_HANDLER_CONSUMED if packet should not be transmitted by stack
 */
int rmnet_vnd_tx_fixup(struct sk_buff *skb, struct net_device *dev)
{
	struct rmnet_vnd_private_s *dev_conf;

	dev_conf = (struct rmnet_vnd_private_s *)netdev_priv(dev);

	if (unlikely(!dev || !skb))
		return RX_HANDLER_CONSUMED;

	dev->stats.tx_packets++;
	dev->stats.tx_bytes += skb->len;

	return RX_HANDLER_PASS;
}

/* Network Device Operations */

/* rmnet_vnd_start_xmit() - Transmit NDO callback
@@ -220,12 +171,16 @@ static int _rmnet_vnd_do_qos_ioctl(struct net_device *dev,

	switch (cmd) {
	case RMNET_IOCTL_SET_QOS_ENABLE:
		if (!capable(CAP_NET_ADMIN))
			return -EPERM;
		LOGM("RMNET_IOCTL_SET_QOS_ENABLE on %s", dev->name);
		if (!dev_conf->qos_version)
			dev_conf->qos_version = RMNET_IOCTL_QOS_MODE_6;
		break;

	case RMNET_IOCTL_SET_QOS_DISABLE:
		if (!capable(CAP_NET_ADMIN))
			return -EPERM;
		LOGM("RMNET_IOCTL_SET_QOS_DISABLE on %s", dev->name);
		dev_conf->qos_version = 0;
		break;
@@ -240,6 +195,8 @@ static int _rmnet_vnd_do_qos_ioctl(struct net_device *dev,
		break;

	case RMNET_IOCTL_FLOW_ENABLE:
		if (!capable(CAP_NET_ADMIN))
			return -EPERM;
		LOGL("RMNET_IOCTL_FLOW_ENABLE on %s", dev->name);
		if (copy_from_user(&ioctl_data, ifr->ifr_ifru.ifru_data,
				   sizeof(struct rmnet_ioctl_data_s))) {
@@ -252,6 +209,8 @@ static int _rmnet_vnd_do_qos_ioctl(struct net_device *dev,
		break;

	case RMNET_IOCTL_FLOW_DISABLE:
		if (!capable(CAP_NET_ADMIN))
			return -EPERM;
		LOGL("RMNET_IOCTL_FLOW_DISABLE on %s", dev->name);
		if (copy_from_user(&ioctl_data, ifr->ifr_ifru.ifru_data,
				   sizeof(struct rmnet_ioctl_data_s))) {
@@ -367,6 +326,8 @@ static int rmnet_vnd_ioctl_extended(struct net_device *dev, struct ifreq *ifr)
		break;

	case RMNET_IOCTL_SET_QOS_VERSION:
		if (!capable(CAP_NET_ADMIN))
			return -EPERM;
		if (ext_cmd.u.data == RMNET_IOCTL_QOS_MODE_6 ||
		    ext_cmd.u.data == RMNET_IOCTL_QOS_MODE_8 ||
		    ext_cmd.u.data == 0) {
+0 −2
Original line number Diff line number Diff line
@@ -27,8 +27,6 @@ int rmnet_vnd_get_name(int id, char *name, int name_len);
int rmnet_vnd_create_dev(int id, struct net_device **new_device,
			 const char *prefix, int use_name);
int rmnet_vnd_free_dev(int id);
int rmnet_vnd_rx_fixup(struct sk_buff *skb, struct net_device *dev);
int rmnet_vnd_tx_fixup(struct sk_buff *skb, struct net_device *dev);
int rmnet_vnd_is_vnd(struct net_device *dev);
int rmnet_vnd_add_tc_flow(u32 id, u32 map_flow, u32 tc_flow);
int rmnet_vnd_del_tc_flow(u32 id, u32 map_flow, u32 tc_flow);