Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 72a8f8d6 authored by Sean Tranchetti's avatar Sean Tranchetti Committed by Gerrit - the friendly Code Review server
Browse files

drivers: rmnet_perf: Update logic for nonlinear SKBs



Perf now has to handle nonlinear SKBs arriving over a new path from the
core RmNet driver. No deaggregation will be performed. Instead, perf's
job is to perform GRO on packets that (possibly) have GSO information
already present in the SKB.

Change-Id: I66d5b01f0b0f0c45a8efcf79dc90f3cc22bc76e5
Signed-off-by: default avatarSean Tranchetti <stranche@codeaurora.org>
parent 7c335cd9
Loading
Loading
Loading
Loading
+33 −19
Original line number Diff line number Diff line
@@ -115,16 +115,15 @@ rmnet_perf_config_free_resources(struct rmnet_perf *perf)
	}

	/* Free everything flow nodes currently hold */
	rmnet_perf_opt_flush_all_flow_nodes(perf);
	rmnet_perf_opt_flush_all_flow_nodes();

	/* Get rid of 64k sk_buff cache */
	rmnet_perf_config_free_64k_buffs(perf);
	/* Before we free tcp_opt's structures, make sure we arent holding
	 * any SKB's hostage
	 */
	rmnet_perf_core_free_held_skbs(perf);
	rmnet_perf_core_free_held_skbs();

	//rmnet_perf_core_timer_exit(perf->core_meta);
	/* Since we allocated in one chunk, we will also free in one chunk */
	kfree(perf);

@@ -192,11 +191,11 @@ static int rmnet_perf_config_allocate_resources(struct rmnet_perf **perf)
		*flow_node = buffer_head;
		buffer_head += flow_node_size;
		(*flow_node)->num_pkts_held = 0;
		(*flow_node)->len = 0;
	}

	local_perf->core_meta = buffer_head;
	core_meta = local_perf->core_meta;
	//rmnet_perf_core_timer_init(core_meta);
	buffer_head += core_meta_size;

	/* Assign common (not specific to something like opt) structures */
@@ -337,6 +336,18 @@ rmnet_perf_dereg_callbacks(struct net_device *dev,
	return return_val_final;
}

static bool rmnet_perf_config_hook_registered(void)
{
	int (*deag_entry)(struct sk_buff *skb);
	void (*frag_entry)(struct rmnet_frag_descriptor *frag_desc,
			   struct rmnet_port *port);

	deag_entry = rcu_dereference(rmnet_perf_deag_entry);
	frag_entry = rcu_dereference(rmnet_perf_desc_entry);

	return deag_entry || frag_entry;
}

/* TODO Needs modifying*/
static int rmnet_perf_config_notify_cb(struct notifier_block *nb,
				       unsigned long event, void *data)
@@ -352,11 +363,12 @@ static int rmnet_perf_config_notify_cb(struct notifier_block *nb,
	switch (event) {
	case NETDEV_UNREGISTER:
		if (rmnet_is_real_dev_registered(dev) &&
		    rmnet_perf_deag_entry &&
		    !strncmp(dev->name, "rmnet_ipa0", 10)) {
		    rmnet_perf_config_hook_registered() &&
		    (!strncmp(dev->name, "rmnet_ipa0", 10) ||
		     !strncmp(dev->name, "rmnet_mhi0", 10))) {
			struct rmnet_perf_core_meta *core_meta =
				perf->core_meta;
			pr_err("%s(): rmnet_perf netdevice unregister\n",
			pr_info("%s(): rmnet_perf netdevice unregister\n",
				__func__);
			return_val = rmnet_perf_dereg_callbacks(dev, core_meta);
			return_val |= rmnet_perf_netdev_down();
@@ -364,19 +376,22 @@ static int rmnet_perf_config_notify_cb(struct notifier_block *nb,
				pr_err("%s(): Error on netdev down event\n",
				       __func__);
			RCU_INIT_POINTER(rmnet_perf_deag_entry, NULL);
			RCU_INIT_POINTER(rmnet_perf_desc_entry, NULL);
			RCU_INIT_POINTER(rmnet_perf_chain_end, NULL);
		}
		break;
	case NETDEV_REGISTER:
		pr_err("%s(): rmnet_perf netdevice register, name = %s\n",
		pr_info("%s(): rmnet_perf netdevice register, name = %s\n",
			__func__, dev->name);
		/* Check prevents us from allocating resources for every
		 * interface
		 */
		if (!rmnet_perf_deag_entry &&
		if (!rmnet_perf_config_hook_registered() &&
		    strncmp(dev->name, "rmnet_data", 10) == 0) {
			struct rmnet_priv *priv = netdev_priv(dev);

			port = rmnet_get_port(priv->real_dev);
			return_val |= rmnet_perf_netdev_up(priv->real_dev,
			return_val = rmnet_perf_netdev_up(priv->real_dev,
							  port);
			if (return_val == RMNET_PERF_RESOURCE_MGMT_FAIL) {
				pr_err("%s(): rmnet_perf allocation "
@@ -390,10 +405,9 @@ static int rmnet_perf_config_notify_cb(struct notifier_block *nb,
				       "failed. Continue without them\n",
					__func__);
			}
			RCU_INIT_POINTER(rmnet_perf_deag_entry,
					 rmnet_perf_core_deaggregate);
			pr_err("%s(): rmnet_perf registered on "
			       "name = %s\n", __func__, dev->name);
			rmnet_perf_core_set_ingress_hook();
			pr_info("%s(): rmnet_perf registered on name = %s\n",
				__func__, dev->name);
		}
		break;
	default:
@@ -409,13 +423,13 @@ static struct notifier_block rmnet_perf_dev_notifier __read_mostly = {

int __init rmnet_perf_init(void)
{
	pr_err("%s(): initializing rmnet_perf\n", __func__);
	pr_info("%s(): initializing rmnet_perf\n", __func__);
	return register_netdevice_notifier(&rmnet_perf_dev_notifier);
}

void __exit rmnet_perf_exit(void)
{
	pr_err("%s(): exiting rmnet_perf\n", __func__);
	pr_info("%s(): exiting rmnet_perf\n", __func__);
	unregister_netdevice_notifier(&rmnet_perf_dev_notifier);
}

+5 −18
Original line number Diff line number Diff line
@@ -15,6 +15,7 @@

#include <linux/skbuff.h>
#include <../drivers/net/ethernet/qualcomm/rmnet/rmnet_config.h>
#include <../drivers/net/ethernet/qualcomm/rmnet/rmnet_descriptor.h>
#include <../drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.h>
#include <../drivers/net/ethernet/qualcomm/rmnet/rmnet_map.h>
#include <../drivers/net/ethernet/qualcomm/rmnet/rmnet_trace.h>
@@ -32,25 +33,11 @@ enum rmnet_perf_resource_management_e {
};

/* rmnet based variables that we rely on*/
extern void rmnet_deliver_skb(struct sk_buff *skb, struct rmnet_port *port);
extern struct rmnet_endpoint *rmnet_get_endpoint(struct rmnet_port *port,
						 u8 mux_id);
extern int rmnet_is_real_dev_registered(const struct net_device *real_dev);
extern void rmnet_set_skb_proto(struct sk_buff *skb);
extern int (*rmnet_perf_deag_entry)(struct sk_buff *skb);
extern int rmnet_map_checksum_downlink_packet(struct sk_buff *skb, u16 len);
extern struct napi_struct *get_current_napi_context(void);
//extern int napi_gro_complete(struct sk_buff *skb);

extern int rmnet_map_flow_command(struct sk_buff *skb, struct rmnet_port *port,
				bool rmnet_perf);
extern int rmnet_map_dl_ind_register(struct rmnet_port *port,
			      struct rmnet_map_dl_ind *dl_ind);
extern int rmnet_map_dl_ind_deregister(struct rmnet_port *port,
				struct rmnet_map_dl_ind *dl_ind);
extern struct rmnet_port *rmnet_get_port(struct net_device *real_dev);
extern void rmnet_map_cmd_init(struct rmnet_port *port);
extern void rmnet_map_cmd_exit(struct rmnet_port *port);
extern void (*rmnet_perf_desc_entry)(struct rmnet_frag_descriptor *frag_desc,
				     struct rmnet_port *port);
extern void (*rmnet_perf_chain_end)(void);


/* Function declarations */
struct rmnet_perf *rmnet_perf_config_get_perf(void);
+442 −280

File changed.

Preview size limit exceeded, changes collapsed.

+37 −18
Original line number Diff line number Diff line
@@ -12,6 +12,7 @@
#include <linux/skbuff.h>
#include <../drivers/net/ethernet/qualcomm/rmnet/rmnet_config.h>
#include <../drivers/net/ethernet/qualcomm/rmnet/rmnet_map.h>
#include <../drivers/net/ethernet/qualcomm/rmnet/rmnet_descriptor.h>

#ifndef _RMNET_PERF_CORE_H_
#define _RMNET_PERF_CORE_H_
@@ -30,23 +31,44 @@ struct rmnet_perf {
 * header of the packet. Also prevents excessive parameters
 */
struct rmnet_perf_pkt_info {
	/* True if this is the first packet being put into a flow node. */
	bool first_packet;
	bool csum_valid;
	unsigned char ip_proto;
	unsigned char trans_proto;
	u16 header_len;

	/* Header protocols */
	u8 ip_proto;
	u8 trans_proto;

	/* Header lengths */
	u16 ip_len;
	u16 trans_len;

	/* Data length */
	u16 payload_len;

	/* Hash over standard 5 tuple */
	u32 hash_key;

	/* TCP timestamp */
	u32 curr_timestamp;

	/* Headers */
	union {
		struct iphdr *v4hdr;
		struct ipv6hdr *v6hdr;
	} iphdr;
	} ip_hdr;
	union {
		struct tcphdr *tp;
		struct udphdr *up;
	} trns_hdr;
	} trans_hdr;

	struct rmnet_endpoint *ep;

	/* The base packet itself */
	union {
		struct sk_buff *skb;
		struct rmnet_frag_descriptor *frag_desc;
	};
};

struct rmnet_perf_core_64k_buff_pool {
@@ -75,8 +97,6 @@ struct rmnet_perf_core_meta {
	/* recycled buffer pool */
	struct rmnet_perf_core_64k_buff_pool *buff_pool;
	struct net_device *dev;
	//struct hrtimer hrtimer;
	//spinlock_t timer_lock;
	struct rmnet_perf_core_burst_marker_state *bm_state;
	struct rmnet_map_dl_ind *dl_ind;
	struct qmi_rmnet_ps_ind *ps_ind;
@@ -113,22 +133,21 @@ enum rmnet_perf_trace_evt {

void rmnet_perf_core_ps_on(void *port);
void rmnet_perf_core_ps_off(void *port);
bool rmnet_perf_core_is_deag_mode(void);
void rmnet_perf_core_set_ingress_hook(void);
void rmnet_perf_core_reset_recycled_skb(struct sk_buff *skb);
struct sk_buff *rmnet_perf_core_elligible_for_cache_skb(struct rmnet_perf *perf,
							u32 len);
void rmnet_perf_core_free_held_skbs(struct rmnet_perf *perf);
void rmnet_perf_core_send_skb(struct sk_buff *skb, struct rmnet_endpoint *ep,
			      struct rmnet_perf *perf,
			      struct rmnet_perf_pkt_info *pkt_info);
void rmnet_perf_core_flush_curr_pkt(struct rmnet_perf *perf,
				    struct sk_buff *skb,
				    struct rmnet_perf_pkt_info *pkt_info,
struct sk_buff *rmnet_perf_core_elligible_for_cache_skb(u32 len);
void rmnet_perf_core_free_held_skbs(void);
void rmnet_perf_core_send_skb(struct sk_buff *skb, struct rmnet_endpoint *ep);
void rmnet_perf_core_send_desc(struct rmnet_frag_descriptor *frag_desc);
void rmnet_perf_core_flush_curr_pkt(struct rmnet_perf_pkt_info *pkt_info,
				    u16 packet_len, bool flush_shs,
				    bool skip_hash);
void rmnet_perf_core_deaggregate(struct sk_buff *skb,
				 struct rmnet_port *port);
void rmnet_perf_core_desc_entry(struct rmnet_frag_descriptor *frag_desc,
				struct rmnet_port *port);
u32 rmnet_perf_core_compute_flow_hash(struct rmnet_perf_pkt_info *pkt_info);
void rmnet_perf_core_flush_single_gro_flow(u32 hash_key);
void rmnet_perf_core_handle_map_control_end(struct rmnet_map_dl_ind_trl *dltrl);
void
rmnet_perf_core_handle_map_control_start(struct rmnet_map_dl_ind_hdr *dlhdr);
+240 −176

File changed.

Preview size limit exceeded, changes collapsed.

Loading