Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit f5c70f1c authored by arnav_s's avatar arnav_s Committed by Gerrit - the friendly Code Review server
Browse files

Fast-Forward driver/rmnet folder from 4.0 to 5.0

CRs-Fixed: 2423695
Change-Id: I3524afc3252d918b336a6026727ce7f8026dc7b0
parent 1ec54bfc
Loading
Loading
Loading
Loading
+2 −1
Original line number Diff line number Diff line
obj-m += rmnet_perf.o
rmnet_perf-y := rmnet_perf_config.o rmnet_perf_core.o rmnet_perf_tcp_opt.o
 No newline at end of file
rmnet_perf-y := rmnet_perf_config.o rmnet_perf_core.o rmnet_perf_opt.o \
		rmnet_perf_tcp_opt.o rmnet_perf_udp_opt.o
+125 −66
Original line number Diff line number Diff line
/* Copyright (c) 2018, The Linux Foundation. All rights reserved.
/* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 and
@@ -17,10 +17,9 @@
#include <linux/module.h>
#include <linux/netdevice.h>
#include "rmnet_perf_core.h"
#include "rmnet_perf_tcp_opt.h"
#include "rmnet_perf_opt.h"
#include "rmnet_perf_config.h"
#include <../drivers/net/ethernet/qualcomm/rmnet/rmnet_map.h>
#include <../drivers/net/ethernet/qualcomm/rmnet/rmnet_private.h>
#include <../drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.h>
#include <../drivers/net/ethernet/qualcomm/rmnet/rmnet_config.h>

@@ -113,8 +112,9 @@ rmnet_perf_config_free_resources(struct rmnet_perf *perf,
	if (!perf)
		return RMNET_PERF_RESOURCE_MGMT_FAIL;

	/* Free everything tcp_opt currently holds */
	rmnet_perf_tcp_opt_flush_all_flow_nodes(perf);
	/* Free everything flow nodes currently hold */
	rmnet_perf_opt_flush_all_flow_nodes(perf);

	/* Get rid of 64k sk_buff cache */
	rmnet_perf_config_free_64k_buffs(perf);
	/* Before we free tcp_opt's structures, make sure we arent holding
@@ -124,7 +124,7 @@ rmnet_perf_config_free_resources(struct rmnet_perf *perf,

	//rmnet_perf_core_timer_exit(perf->core_meta);
	/* Since we allocated in one chunk, we will also free in one chunk */
	kfree(perf->tcp_opt_meta);
	kfree(perf);

	return RMNET_PERF_RESOURCE_MGMT_SUCCESS;
}
@@ -144,23 +144,24 @@ static int rmnet_perf_config_allocate_resources(struct rmnet_perf **perf)
{
	int i;
	void *buffer_head;
	struct rmnet_perf_tcp_opt_meta *tcp_opt_meta;
	struct rmnet_perf_opt_meta *opt_meta;
	struct rmnet_perf_core_meta *core_meta;
	struct rmnet_perf *local_perf;

	int perf_size = sizeof(**perf);
	int tcp_opt_meta_size = sizeof(struct rmnet_perf_tcp_opt_meta);
	int opt_meta_size = sizeof(struct rmnet_perf_opt_meta);
	int flow_node_pool_size =
			sizeof(struct rmnet_perf_tcp_opt_flow_node_pool);
			sizeof(struct rmnet_perf_opt_flow_node_pool);
	int bm_state_size = sizeof(struct rmnet_perf_core_burst_marker_state);
	int flow_node_size = sizeof(struct rmnet_perf_tcp_opt_flow_node);
	int flow_node_size = sizeof(struct rmnet_perf_opt_flow_node);
	int core_meta_size = sizeof(struct rmnet_perf_core_meta);
	int skb_list_size = sizeof(struct rmnet_perf_core_skb_list);
	int skb_buff_pool_size = sizeof(struct rmnet_perf_core_64k_buff_pool);

	int total_size = perf_size + tcp_opt_meta_size + flow_node_pool_size +
	int total_size = perf_size + opt_meta_size + flow_node_pool_size +
			(flow_node_size * RMNET_PERF_NUM_FLOW_NODES) +
			core_meta_size + skb_list_size + skb_buff_pool_size;
			core_meta_size + skb_list_size + skb_buff_pool_size
			+ bm_state_size;

	/* allocate all the memory in one chunk for cache coherency sake */
	buffer_head = kmalloc(total_size, GFP_KERNEL);
@@ -171,21 +172,21 @@ static int rmnet_perf_config_allocate_resources(struct rmnet_perf **perf)
	local_perf = *perf;
	buffer_head += perf_size;

	local_perf->tcp_opt_meta = buffer_head;
	tcp_opt_meta = local_perf->tcp_opt_meta;
	buffer_head += tcp_opt_meta_size;
	local_perf->opt_meta = buffer_head;
	opt_meta = local_perf->opt_meta;
	buffer_head += opt_meta_size;

	/* assign the node pool */
	tcp_opt_meta->node_pool = buffer_head;
	tcp_opt_meta->node_pool->num_flows_in_use = 0;
	tcp_opt_meta->node_pool->flow_recycle_counter = 0;
	opt_meta->node_pool = buffer_head;
	opt_meta->node_pool->num_flows_in_use = 0;
	opt_meta->node_pool->flow_recycle_counter = 0;
	buffer_head += flow_node_pool_size;

	/* assign the individual flow nodes themselves */
	for (i = 0; i < RMNET_PERF_NUM_FLOW_NODES; i++) {
		struct rmnet_perf_tcp_opt_flow_node **flow_node;
		struct rmnet_perf_opt_flow_node **flow_node;

		flow_node = &tcp_opt_meta->node_pool->node_list[i];
		flow_node = &opt_meta->node_pool->node_list[i];
		*flow_node = buffer_head;
		buffer_head += flow_node_size;
		(*flow_node)->num_pkts_held = 0;
@@ -196,12 +197,12 @@ static int rmnet_perf_config_allocate_resources(struct rmnet_perf **perf)
	//rmnet_perf_core_timer_init(core_meta);
	buffer_head += core_meta_size;

	/* Assign common (not specific to something like tcp_opt) structures */
	/* Assign common (not specific to something like opt) structures */
	core_meta->skb_needs_free_list = buffer_head;
	core_meta->skb_needs_free_list->num_skbs_held = 0;
	buffer_head += skb_list_size;

	/* allocate buffer pool struct (also not specific to tcp_opt) */
	/* allocate buffer pool struct (also not specific to opt) */
	core_meta->buff_pool = buffer_head;
	buffer_head += skb_buff_pool_size;

@@ -215,6 +216,54 @@ static int rmnet_perf_config_allocate_resources(struct rmnet_perf **perf)
	return RMNET_PERF_RESOURCE_MGMT_SUCCESS;
}

enum rmnet_perf_resource_management_e
rmnet_perf_config_register_callbacks(struct net_device *dev,
				     struct rmnet_port *port)
{
	struct rmnet_map_dl_ind *dl_ind;
	struct qmi_rmnet_ps_ind *ps_ind;
	enum rmnet_perf_resource_management_e rc =
					RMNET_PERF_RESOURCE_MGMT_SUCCESS;

	perf->core_meta->dev = dev;
	/* register for DL marker */
	dl_ind = kzalloc(sizeof(struct rmnet_map_dl_ind), GFP_ATOMIC);
	if (dl_ind) {
		dl_ind->priority = RMNET_PERF;
		dl_ind->dl_hdr_handler =
			&rmnet_perf_core_handle_map_control_start;
		dl_ind->dl_trl_handler =
			&rmnet_perf_core_handle_map_control_end;
		perf->core_meta->dl_ind = dl_ind;
		if (rmnet_map_dl_ind_register(port, dl_ind)) {
			kfree(dl_ind);
			pr_err("%s(): Failed to register dl_ind\n", __func__);
			rc = RMNET_PERF_RESOURCE_MGMT_FAIL;
		}
	} else {
		pr_err("%s(): Failed to allocate dl_ind\n", __func__);
		rc = RMNET_PERF_RESOURCE_MGMT_FAIL;
	}

	/* register for PS mode indications */
	ps_ind = kzalloc(sizeof(struct qmi_rmnet_ps_ind), GFP_ATOMIC);
	if (ps_ind) {
		ps_ind->ps_on_handler = &rmnet_perf_core_ps_on;
		ps_ind->ps_off_handler = &rmnet_perf_core_ps_off;
		perf->core_meta->ps_ind = ps_ind;
		if (qmi_rmnet_ps_ind_register(port, ps_ind)) {
			kfree(ps_ind);
			rc = RMNET_PERF_RESOURCE_MGMT_FAIL;
			pr_err("%s(): Failed to register ps_ind\n", __func__);
		}
	} else {
		rc = RMNET_PERF_RESOURCE_MGMT_FAIL;
		pr_err("%s(): Failed to allocate ps_ind\n", __func__);
	}

	return rc;
}

static void rmnet_perf_netdev_down(struct net_device *dev)
{
	enum rmnet_perf_resource_management_e config_status;
@@ -222,35 +271,45 @@ static void rmnet_perf_netdev_down(struct net_device *dev)
	config_status = rmnet_perf_config_free_resources(perf, dev);
}

static int rmnet_perf_netdev_up(void)
static int rmnet_perf_netdev_up(struct net_device *real_dev,
				struct rmnet_port *port)
{
	enum rmnet_perf_resource_management_e alloc_rc;
	enum rmnet_perf_resource_management_e rc;

	alloc_rc = rmnet_perf_config_allocate_resources(&perf);
	if (alloc_rc == RMNET_PERF_RESOURCE_MGMT_FAIL)
		pr_err("Failed to allocate tcp_opt and core resources");
	rc = rmnet_perf_config_allocate_resources(&perf);
	if (rc == RMNET_PERF_RESOURCE_MGMT_FAIL) {
		pr_err("Failed to allocate tcp_opt and core resources\n");
		return RMNET_PERF_RESOURCE_MGMT_FAIL;
	}

	/* structs to contain these have already been allocated. Here we are
	 * simply allocating the buffers themselves
	 */
	alloc_rc |= rmnet_perf_config_alloc_64k_buffs(perf);
	if (alloc_rc == RMNET_PERF_RESOURCE_MGMT_FAIL)
	rc = rmnet_perf_config_alloc_64k_buffs(perf);
	if (rc == RMNET_PERF_RESOURCE_MGMT_FAIL) {
		pr_err("%s(): Failed to allocate 64k buffers for recycling\n",
		       __func__);
	else
		pr_err("%s(): Allocated 64k buffers for recycling\n",
		       __func__);
		return RMNET_PERF_RESOURCE_MGMT_SEMI_FAIL;
	}

	rc = rmnet_perf_config_register_callbacks(real_dev, port);
	if (rc == RMNET_PERF_RESOURCE_MGMT_FAIL) {
		pr_err("%s(): Failed to register for required "
			"callbacks\n", __func__);
		return RMNET_PERF_RESOURCE_MGMT_SEMI_FAIL;
	}

	return alloc_rc;
	return RMNET_PERF_RESOURCE_MGMT_SUCCESS;
}

/* TODO Needs modifying*/
static int rmnet_perf_config_notify_cb(struct notifier_block *nb,
				       unsigned long event, void *data)
{
	/*Not sure if we need this*/
	enum rmnet_perf_resource_management_e return_val =
					RMNET_PERF_RESOURCE_MGMT_SUCCESS;
	struct net_device *dev = netdev_notifier_info_to_dev(data);
	unsigned int return_val;
	struct rmnet_port *port = rmnet_get_port(dev);

	if (!dev)
		return NOTIFY_DONE;
@@ -258,53 +317,54 @@ static int rmnet_perf_config_notify_cb(struct notifier_block *nb,
	switch (event) {
	case NETDEV_UNREGISTER:
		if (rmnet_is_real_dev_registered(dev) &&
		    rmnet_perf_deag_entry &&
		    !strncmp(dev->name, "rmnet_ipa0", 10)) {
			pr_err("%s(): rmnet_perf netdevice unregister,",
			struct rmnet_perf_core_meta *core_meta =
				perf->core_meta;
			pr_err("%s(): rmnet_perf netdevice unregister\n",
			       __func__);
			/* Unregister for DL marker */
			rmnet_map_dl_ind_deregister(rmnet_get_port(dev),
						    perf->core_meta->dl_ind);
			rmnet_map_dl_ind_deregister(port,
						    core_meta->dl_ind);
			kfree(core_meta->dl_ind);
			qmi_rmnet_ps_ind_deregister(port,
						    core_meta->ps_ind);
			kfree(core_meta->ps_ind);
			rmnet_perf_netdev_down(dev);
			RCU_INIT_POINTER(rmnet_perf_deag_entry, NULL);
		}
		break;
	case NETDEV_REGISTER:
		pr_err("%s(): rmnet_perf netdevice register, name = %s,",
		pr_err("%s(): rmnet_perf netdevice register, name = %s\n",
		       __func__, dev->name);
		/* Check prevents us from allocating resources for every
		 * interface
		 */
		if (!rmnet_perf_deag_entry) {
			rmnet_perf_netdev_up();
			RCU_INIT_POINTER(rmnet_perf_deag_entry,
					 rmnet_perf_core_deaggregate);
		}
		if (strncmp(dev->name, "rmnet_ipa0", 10) == 0 &&
		    rmnet_perf_deag_entry) {
			struct rmnet_map_dl_ind *dl_ind;

			/* register for DL marker */
			dl_ind = kzalloc(sizeof(struct rmnet_map_dl_ind),
					 GFP_ATOMIC);
			if (dl_ind) {
				dev_net_set(dev, &init_net);
				perf->core_meta->dev = dev;

				dl_ind->priority = RMNET_PERF;
				dl_ind->dl_hdr_handler =
					&rmnet_perf_core_handle_map_control_start;
				dl_ind->dl_trl_handler =
					&rmnet_perf_core_handle_map_control_end;
				perf->core_meta->dl_ind = dl_ind;
				return_val =
					rmnet_map_dl_ind_register(rmnet_get_port(dev),
								dl_ind);
		if (!rmnet_perf_deag_entry &&
		    strncmp(dev->name, "rmnet_data", 10) == 0) {
			struct rmnet_priv *priv = netdev_priv(dev);
			port = rmnet_get_port(priv->real_dev);
			return_val |= rmnet_perf_netdev_up(priv->real_dev,
							   port);
			if (return_val == RMNET_PERF_RESOURCE_MGMT_FAIL) {
				pr_err("%s(): rmnet_perf allocation or "
				       "registry failed. Potentially falling "
				       "back on legacy path\n",
					__func__);
				goto exit;
			}
			RCU_INIT_POINTER(rmnet_perf_deag_entry,
					 rmnet_perf_core_deaggregate);
			pr_err("%s(): rmnet_perf registered on "
			       "name = %s\n", __func__, dev->name);
		}
		break;
	default:
		break;
	}

exit:
	return NOTIFY_DONE;
}

@@ -314,14 +374,13 @@ static struct notifier_block rmnet_perf_dev_notifier __read_mostly = {

int __init rmnet_perf_init(void)
{
	pr_err("%s(): initializing rmnet_perf, 5\n", __func__);
	pr_err("%s(): initializing rmnet_perf\n", __func__);
	return register_netdevice_notifier(&rmnet_perf_dev_notifier);
}

void __exit rmnet_perf_exit(void)
{
	pr_err("%s(): exiting rmnet_perf\n", __func__);
	RCU_INIT_POINTER(rmnet_perf_deag_entry, NULL);
	unregister_netdevice_notifier(&rmnet_perf_dev_notifier);
}

+12 −2
Original line number Diff line number Diff line
/* Copyright (c) 2013-2014, 2016-2017 The Linux Foundation. All rights reserved.
/* Copyright (c) 2013-2014, 2016-2017, 2019 The Linux Foundation. All rights reserved.
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 and
@@ -15,15 +15,20 @@

#include <linux/skbuff.h>
#include <../drivers/net/ethernet/qualcomm/rmnet/rmnet_config.h>
#include <../drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.h>
#include <../drivers/net/ethernet/qualcomm/rmnet/rmnet_map.h>
#include <../drivers/net/ethernet/qualcomm/rmnet/rmnet_trace.h>
#include <../include/soc/qcom/qmi_rmnet.h>
#include "rmnet_perf_core.h"


#ifndef _RMNET_PERF_CONFIG_H_
#define _RMNET_PERF_CONFIG_H_

enum rmnet_perf_resource_management_e {
	RMNET_PERF_RESOURCE_MGMT_SUCCESS,
	RMNET_PERF_RESOURCE_MGMT_FAIL
	RMNET_PERF_RESOURCE_MGMT_SEMI_FAIL,
	RMNET_PERF_RESOURCE_MGMT_FAIL,
};

/* rmnet based variables that we rely on*/
@@ -47,6 +52,11 @@ extern struct rmnet_port *rmnet_get_port(struct net_device *real_dev);
extern void rmnet_map_cmd_init(struct rmnet_port *port);
extern void rmnet_map_cmd_exit(struct rmnet_port *port);

/* Function declarations */
struct rmnet_perf *rmnet_perf_config_get_perf(void);
enum rmnet_perf_resource_management_e
	rmnet_perf_config_register_callbacks(struct net_device *dev,
					     struct rmnet_port *port);


#endif /* _RMNET_PERF_CONFIG_H_ */
+89 −48
Original line number Diff line number Diff line
/* Copyright (c) 2018, The Linux Foundation. All rights reserved.
/* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 and
@@ -25,10 +25,14 @@
#include <../drivers/net/ethernet/qualcomm/rmnet/rmnet_private.h>
#include <../drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.h>
#include <../drivers/net/ethernet/qualcomm/rmnet/rmnet_config.h>
#include "rmnet_perf_tcp_opt.h"
#include "rmnet_perf_opt.h"
#include "rmnet_perf_core.h"
#include "rmnet_perf_config.h"

#ifdef CONFIG_QCOM_QMI_POWER_COLLAPSE
#include <soc/qcom/qmi_rmnet.h>
#endif

/* Each index tells us the number of iterations it took us to find a recycled
 * skb
 */
@@ -90,6 +94,17 @@ unsigned long int packet_dropper_time = 1;
module_param(packet_dropper_time, ulong, 0644);
MODULE_PARM_DESC(packet_dropper_time, "packet_dropper_time");

unsigned long int rmnet_perf_flush_shs = 0;
module_param(rmnet_perf_flush_shs, ulong, 0644);
MODULE_PARM_DESC(rmnet_perf_flush_shs, "rmnet_perf_flush_shs");

unsigned long int rmnet_perf_frag_flush = 0;
module_param(rmnet_perf_frag_flush, ulong, 0444);
MODULE_PARM_DESC(rmnet_perf_frag_flush,
		 "Number of packet fragments flushed to stack");

#define SHS_FLUSH 0

/* rmnet_perf_core_free_held_skbs() - Free held SKBs given to us by physical
 *		device
 * @perf: allows access to our required global structures
@@ -357,11 +372,10 @@ void rmnet_perf_core_send_skb(struct sk_buff *skb, struct rmnet_endpoint *ep,
		rmnet_set_skb_proto(skb);
		/* If the checksum is unnecessary, update the header fields.
		 * Otherwise, we know that this is a single packet that
		 * failed checksum validation, so we don't want to touch
		 * the headers.
		 * either failed checksum validation, or is not coalescable
		 * (fragment, ICMP, etc), so don't touch the headers.
		 */
		if (ip4hn->protocol == IPPROTO_TCP &&
		    skb->ip_summed == CHECKSUM_UNNECESSARY) {
		if (skb_csum_unnecessary(skb)) {
			ip4hn->tot_len = htons(skb->len);
			ip4hn->check = 0;
			ip4hn->check = ip_fast_csum(ip4hn, (int)ip4hn->ihl);
@@ -370,8 +384,7 @@ void rmnet_perf_core_send_skb(struct sk_buff *skb, struct rmnet_endpoint *ep,
	} else if (ip_version == 0x06) {
		ip6hn = (struct ipv6hdr *)data;
		rmnet_set_skb_proto(skb);
		if (ip6hn->nexthdr == IPPROTO_TCP &&
		    skb->ip_summed == CHECKSUM_UNNECESSARY) {
		if (skb_csum_unnecessary(skb)) {
			ip6hn->payload_len = htons(skb->len -
						   sizeof(struct ipv6hdr));
		}
@@ -397,7 +410,8 @@ void rmnet_perf_core_send_skb(struct sk_buff *skb, struct rmnet_endpoint *ep,
void rmnet_perf_core_flush_curr_pkt(struct rmnet_perf *perf,
				    struct sk_buff *skb,
				    struct rmnet_perf_pkt_info *pkt_info,
				    u16 packet_len)
				    u16 packet_len, bool flush_shs,
				    bool skip_hash)
{
	struct sk_buff *skbn;
	struct rmnet_endpoint *ep = pkt_info->ep;
@@ -407,7 +421,6 @@ void rmnet_perf_core_flush_curr_pkt(struct rmnet_perf *perf,
		return;
	}

	if (pkt_info->trans_proto != IPPROTO_UDP || packet_len < 64) {
	/* allocate the sk_buff of proper size for this packet */
	skbn = alloc_skb(packet_len + RMNET_MAP_DEAGGR_SPACING,
			 GFP_ATOMIC);
@@ -417,26 +430,40 @@ void rmnet_perf_core_flush_curr_pkt(struct rmnet_perf *perf,
	skb_reserve(skbn, RMNET_MAP_DEAGGR_HEADROOM);
	skb_put(skbn, packet_len);
	memcpy(skbn->data, pkt_info->iphdr.v4hdr, packet_len);
	} else {
		skbn = skb_clone(skb, GFP_ATOMIC);
		if (!skbn)
			return;

		skb_pull(skbn, sizeof(struct rmnet_map_header));
		skb_trim(skbn, packet_len);
		skbn->truesize = SKB_TRUESIZE(packet_len);
		__skb_set_hash(skbn, 0, 0, 0);
	}

	/* If the packet passed checksum validation, tell the stack */
	if (pkt_info->csum_valid)
		skbn->ip_summed = CHECKSUM_UNNECESSARY;
	skbn->dev = skb->dev;

	/* Only set hash info if we actually calculated it */
	if (!skip_hash) {
		skbn->hash = pkt_info->hash_key;
		skbn->sw_hash = 1;
	}

	skbn->cb[SHS_FLUSH] = (char) flush_shs;
	rmnet_perf_core_send_skb(skbn, ep, perf, pkt_info);
}

/* DL marker is off, we need to flush more aggresively at end of chains */
void rmnet_perf_core_ps_on(void *port)
{
	struct rmnet_perf *perf = rmnet_perf_config_get_perf();

	rmnet_perf_core_bm_flush_on = 0;
	rmnet_perf_opt_flush_all_flow_nodes(perf);
	rmnet_perf_core_flush_reason_cnt[RMNET_PERF_CORE_PS_MODE_ON]++;
	/* Essentially resets expected packet count to safe state */
	perf->core_meta->bm_state->expect_packets = -1;
}

/* DL marker on, we can try to coalesce more packets */
void rmnet_perf_core_ps_off(void *port)
{
	rmnet_perf_core_bm_flush_on = 1;
}

void
rmnet_perf_core_handle_map_control_start(struct rmnet_map_dl_ind_hdr *dlhdr)
{
@@ -449,7 +476,7 @@ rmnet_perf_core_handle_map_control_start(struct rmnet_map_dl_ind_hdr *dlhdr)
	 */
	if (!bm_state->wait_for_start) {
		/* flush everything, we got a 2nd start */
		rmnet_perf_tcp_opt_flush_all_flow_nodes(perf);
		rmnet_perf_opt_flush_all_flow_nodes(perf);
		rmnet_perf_core_flush_reason_cnt[
					RMNET_PERF_CORE_DL_MARKER_FLUSHES]++;
	} else {
@@ -469,7 +496,7 @@ void rmnet_perf_core_handle_map_control_end(struct rmnet_map_dl_ind_trl *dltrl)
	struct rmnet_perf_core_burst_marker_state *bm_state;

	bm_state = perf->core_meta->bm_state;
	rmnet_perf_tcp_opt_flush_all_flow_nodes(perf);
	rmnet_perf_opt_flush_all_flow_nodes(perf);
	rmnet_perf_core_flush_reason_cnt[RMNET_PERF_CORE_DL_MARKER_FLUSHES]++;
	bm_state->wait_for_start = true;
	bm_state->curr_seq = 0;
@@ -510,7 +537,9 @@ void rmnet_perf_core_handle_packet_ingress(struct sk_buff *skb,
				 (skb->data + sizeof(struct rmnet_map_header));
	struct rmnet_perf *perf = rmnet_perf_config_get_perf();
	u16 pkt_len;
	bool skip_hash = false;

	pkt_len = frame_len - sizeof(struct rmnet_map_header) - trailer_len;
	pkt_info->ep = ep;
	pkt_info->ip_proto = (*payload & 0xF0) >> 4;
	if (pkt_info->ip_proto == 4) {
@@ -519,18 +548,23 @@ void rmnet_perf_core_handle_packet_ingress(struct sk_buff *skb,
		pkt_info->iphdr.v4hdr = iph;
		pkt_info->trans_proto = iph->protocol;
		pkt_info->header_len = iph->ihl * 4;
		skip_hash = !!(ntohs(iph->frag_off) & (IP_MF | IP_OFFSET));
	} else if (pkt_info->ip_proto == 6) {
		struct ipv6hdr *iph = (struct ipv6hdr *)payload;

		pkt_info->iphdr.v6hdr = iph;
		pkt_info->trans_proto = iph->nexthdr;
		pkt_info->header_len = sizeof(*iph);
		skip_hash = iph->nexthdr == NEXTHDR_FRAGMENT;
	} else {
		pr_err("%s(): invalid packet\n", __func__);
		return;
	}

	pkt_len = frame_len - sizeof(struct rmnet_map_header) - trailer_len;
	/* Push out fragments immediately */
	if (skip_hash) {
		rmnet_perf_frag_flush++;
		goto flush;
	}

	if (pkt_info->trans_proto == IPPROTO_TCP) {
		struct tcphdr *tp = (struct tcphdr *)
@@ -545,7 +579,8 @@ void rmnet_perf_core_handle_packet_ingress(struct sk_buff *skb,
		if (rmnet_perf_core_validate_pkt_csum(skb, pkt_info))
			goto flush;

		rmnet_perf_tcp_opt_ingress(perf, skb, pkt_info);
		if (!rmnet_perf_opt_ingress(perf, skb, pkt_info))
			goto flush;
	} else if (pkt_info->trans_proto == IPPROTO_UDP) {
		struct udphdr *up = (struct udphdr *)
				    (payload + pkt_info->header_len);
@@ -556,16 +591,14 @@ void rmnet_perf_core_handle_packet_ingress(struct sk_buff *skb,
		pkt_info->hash_key =
			rmnet_perf_core_compute_flow_hash(pkt_info);

		/* We flush anyway, so the result of the validation
		 * does not need to be checked.
		 */
		rmnet_perf_core_validate_pkt_csum(skb, pkt_info);
		if (rmnet_perf_core_validate_pkt_csum(skb, pkt_info))
			goto flush;

		if (!rmnet_perf_opt_ingress(perf, skb, pkt_info))
			goto flush;
	} else {
		pkt_info->payload_len = pkt_len - pkt_info->header_len;
		pkt_info->hash_key =
			rmnet_perf_core_compute_flow_hash(pkt_info);

		skip_hash = true;
		/* We flush anyway, so the result of the validation
		 * does not need to be checked.
		 */
@@ -576,7 +609,8 @@ void rmnet_perf_core_handle_packet_ingress(struct sk_buff *skb,
	return;

flush:
	rmnet_perf_core_flush_curr_pkt(perf, skb, pkt_info, pkt_len);
	rmnet_perf_core_flush_curr_pkt(perf, skb, pkt_info, pkt_len, false,
				       skip_hash);
}

/* rmnet_perf_core_deaggregate() - Deaggregated ip packets from map frame
@@ -659,6 +693,13 @@ void rmnet_perf_core_deaggregate(struct sk_buff *skb,
				goto bad_data;
			skb->dev = ep->egress_dev;

#ifdef CONFIG_QCOM_QMI_POWER_COLLAPSE
			/* Wakeup PS work on DL packets */
			if ((port->data_format & RMNET_INGRESS_FORMAT_PS) &&
					!RMNET_MAP_GET_CD_BIT(skb))
				qmi_rmnet_work_maybe_restart(port);
#endif

			if (enable_packet_dropper) {
				getnstimeofday(&curr_time);
				if (last_drop_time.tv_sec == 0 &&
@@ -694,13 +735,13 @@ void rmnet_perf_core_deaggregate(struct sk_buff *skb,
	 */
	if (!rmnet_perf_core_bm_flush_on ||
	    (int) perf->core_meta->bm_state->expect_packets <= 0) {
		rmnet_perf_tcp_opt_flush_all_flow_nodes(perf);
		rmnet_perf_opt_flush_all_flow_nodes(perf);
		rmnet_perf_core_free_held_skbs(perf);
		rmnet_perf_core_flush_reason_cnt[
					RMNET_PERF_CORE_IPA_ZERO_FLUSH]++;
	} else if (perf->core_meta->skb_needs_free_list->num_skbs_held >=
		   rmnet_perf_core_num_skbs_max) {
		rmnet_perf_tcp_opt_flush_all_flow_nodes(perf);
		rmnet_perf_opt_flush_all_flow_nodes(perf);
		rmnet_perf_core_free_held_skbs(perf);
		rmnet_perf_core_flush_reason_cnt[
					RMNET_PERF_CORE_SK_BUFF_HELD_LIMIT]++;
@@ -708,7 +749,7 @@ void rmnet_perf_core_deaggregate(struct sk_buff *skb,

	goto update_stats;
drop_packets:
	rmnet_perf_tcp_opt_flush_all_flow_nodes(perf);
	rmnet_perf_opt_flush_all_flow_nodes(perf);
	rmnet_perf_core_free_held_skbs(perf);
update_stats:
	rmnet_perf_core_pre_ip_count += co;
+8 −3
Original line number Diff line number Diff line
/* Copyright (c) 2018, The Linux Foundation. All rights reserved.
/* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 and
@@ -20,7 +20,7 @@
#define RMNET_PERF_CORE_RECYCLE_SKB_SIZE    65600//33000//32768//65600

struct rmnet_perf {
	struct rmnet_perf_tcp_opt_meta *tcp_opt_meta;
	struct rmnet_perf_opt_meta *opt_meta;
	struct rmnet_perf_core_meta *core_meta;
	struct rmnet_port *rmnet_port;
};
@@ -79,12 +79,14 @@ struct rmnet_perf_core_meta {
	//spinlock_t timer_lock;
	struct rmnet_perf_core_burst_marker_state *bm_state;
	struct rmnet_map_dl_ind *dl_ind;
	struct qmi_rmnet_ps_ind *ps_ind;
};

enum rmnet_perf_core_flush_reasons {
	RMNET_PERF_CORE_IPA_ZERO_FLUSH,
	RMNET_PERF_CORE_SK_BUFF_HELD_LIMIT,
	RMNET_PERF_CORE_DL_MARKER_FLUSHES,
	RMNET_PERF_CORE_PS_MODE_ON,
	RMNET_PERF_CORE_NUM_CONDITIONS
};

@@ -109,6 +111,8 @@ enum rmnet_perf_trace_evt {
	RMNET_PERF_DEAG_PKT,
};

void rmnet_perf_core_ps_on(void *port);
void rmnet_perf_core_ps_off(void *port);
void rmnet_perf_core_reset_recycled_skb(struct sk_buff *skb);
struct sk_buff *rmnet_perf_core_elligible_for_cache_skb(struct rmnet_perf *perf,
							u32 len);
@@ -119,7 +123,8 @@ void rmnet_perf_core_send_skb(struct sk_buff *skb, struct rmnet_endpoint *ep,
void rmnet_perf_core_flush_curr_pkt(struct rmnet_perf *perf,
				    struct sk_buff *skb,
				    struct rmnet_perf_pkt_info *pkt_info,
				    u16 packet_len);
				    u16 packet_len, bool flush_shs,
				    bool skip_hash);
void rmnet_perf_core_deaggregate(struct sk_buff *skb,
				struct rmnet_port *port);
u32 rmnet_perf_core_compute_flow_hash(struct rmnet_perf_pkt_info *pkt_info);
Loading