Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit fe3dede6 authored by Subash Abhinov Kasiviswanathan's avatar Subash Abhinov Kasiviswanathan
Browse files

net: rmnet_data: Go back to worker thread for UL aggregation



A crash was seen when running the hrtimer solution, so revert the
change till it is resoloved. The crash occurs due to interrupts
being enabled by a later function in the call stack within
interrupt context.

[ffffff8cc22b157c] __local_bh_enable_ip+0xa8/0x114
[ffffff8cc33495d0] _raw_spin_unlock_bh+0x30/0x3c
[ffffff8cc2e57ed8] ipa3_send+0x2a0/0xaa0
[ffffff8cc2e5c544] ipa3_tx_dp+0x59c/0x990
[ffffff8cc2ea7ef0] ipa3_wwan_xmit+0x204/0x338
[ffffff8cc30f0160] dev_hard_start_xmit+0xc4/0x29c
[ffffff8cc311d0f4] sch_direct_xmit+0x100/0x1cc
[ffffff8cc30f0890] __dev_queue_xmit+0x1f4/0x704
[ffffff8cc30f0dc8] dev_queue_xmit+0x28/0x34
[ffffff8cc333285c] rmnet_map_flush_packet_queue+0xc0/0x218
[ffffff8cc2344f9c] __hrtimer_run_queues+0x158/0x36c
[ffffff8cc2345f38] hrtimer_interrupt+0xb0/0x1f4

CRs-fixed: 2147503
Change-Id: I7b3617577cbf5e166380d4361b73e2f57a4bd042
Signed-off-by: default avatarSubash Abhinov Kasiviswanathan <subashab@codeaurora.org>
parent 54d765eb
Loading
Loading
Loading
Loading
+1 −19
Original line number Diff line number Diff line
@@ -25,7 +25,6 @@
#include "rmnet_data_vnd.h"
#include "rmnet_data_private.h"
#include "rmnet_data_trace.h"
#include "rmnet_map.h"

RMNET_LOG_MODULE(RMNET_DATA_LOGMASK_CONFIG);

@@ -870,8 +869,7 @@ int rmnet_associate_network_device(struct net_device *dev)
	conf->dev = dev;
	spin_lock_init(&conf->agg_lock);
	config->recycle = kfree_skb;
	hrtimer_init(&conf->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
	conf->hrtimer.function = rmnet_map_flush_packet_queue;

	rc = netdev_rx_handler_register(dev, rmnet_rx_handler, config);

	if (rc) {
@@ -1234,22 +1232,6 @@ static void rmnet_force_unassociate_device(struct net_device *dev)
	config = _rmnet_get_phys_ep_config(dev);

	if (config) {
		unsigned long flags;

		hrtimer_cancel(&config->hrtimer);
		spin_lock_irqsave(&config->agg_lock, flags);
		if (config->agg_state == RMNET_MAP_TXFER_SCHEDULED) {
			if (config->agg_skb) {
				kfree_skb(config->agg_skb);
				config->agg_skb = NULL;
				config->agg_count = 0;
				memset(&config->agg_time, 0,
				       sizeof(struct timespec));
			}
			config->agg_state = RMNET_MAP_AGG_IDLE;
		}
		spin_unlock_irqrestore(&config->agg_lock, flags);

		cfg = &config->local_ep;

		if (cfg && cfg->refcount)
+0 −2
Original line number Diff line number Diff line
@@ -16,7 +16,6 @@
#include <linux/time.h>
#include <linux/spinlock.h>
#include <net/rmnet_config.h>
#include <linux/hrtimer.h>

#ifndef _RMNET_DATA_CONFIG_H_
#define _RMNET_DATA_CONFIG_H_
@@ -86,7 +85,6 @@ struct rmnet_phys_ep_config {
	u8 agg_count;
	struct timespec agg_time;
	struct timespec agg_last;
	struct hrtimer hrtimer;
};

int rmnet_config_init(void);
+0 −1
Original line number Diff line number Diff line
@@ -147,5 +147,4 @@ int rmnet_map_checksum_uplink_packet(struct sk_buff *skb,
				     struct net_device *orig_dev,
				     u32 egress_data_format);
int rmnet_ul_aggregation_skip(struct sk_buff *skb, int offset);
enum hrtimer_restart rmnet_map_flush_packet_queue(struct hrtimer *t);
#endif /* _RMNET_MAP_H_ */
+26 −9
Original line number Diff line number Diff line
@@ -18,6 +18,7 @@
#include <linux/netdevice.h>
#include <linux/rmnet_data.h>
#include <linux/spinlock.h>
#include <linux/workqueue.h>
#include <linux/time.h>
#include <linux/net_map.h>
#include <linux/ip.h>
@@ -47,6 +48,11 @@ long agg_bypass_time __read_mostly = 10000000L;
module_param(agg_bypass_time, long, 0644);
MODULE_PARM_DESC(agg_bypass_time, "Skip agg when apart spaced more than this");

struct agg_work {
	struct delayed_work work;
	struct rmnet_phys_ep_config *config;
};

#define RMNET_MAP_DEAGGR_SPACING  64
#define RMNET_MAP_DEAGGR_HEADROOM (RMNET_MAP_DEAGGR_SPACING / 2)

@@ -160,21 +166,24 @@ struct sk_buff *rmnet_map_deaggregate(struct sk_buff *skb,
}

/* rmnet_map_flush_packet_queue() - Transmits aggregeted frame on timeout
 * @work:        struct agg_work containing delayed work and skb to flush
 *
 * This function is scheduled to run in a specified number of ns after
 * This function is scheduled to run in a specified number of jiffies after
 * the last frame transmitted by the network stack. When run, the buffer
 * containing aggregated packets is finally transmitted on the underlying link.
 *
 */
enum hrtimer_restart rmnet_map_flush_packet_queue(struct hrtimer *t)
static void rmnet_map_flush_packet_queue(struct work_struct *work)
{
	struct agg_work *real_work;
	struct rmnet_phys_ep_config *config;
	unsigned long flags;
	struct sk_buff *skb;
	int rc, agg_count = 0;

	config = container_of(t, struct rmnet_phys_ep_config, hrtimer);
	skb = 0;
	real_work = (struct agg_work *)work;
	config = real_work->config;
	LOGD("%s", "Entering flush thread");
	spin_lock_irqsave(&config->agg_lock, flags);
	if (likely(config->agg_state == RMNET_MAP_TXFER_SCHEDULED)) {
@@ -202,8 +211,7 @@ enum hrtimer_restart rmnet_map_flush_packet_queue(struct hrtimer *t)
		rc = dev_queue_xmit(skb);
		rmnet_stats_queue_xmit(rc, RMNET_STATS_QUEUE_XMIT_AGG_TIMEOUT);
	}

	return HRTIMER_NORESTART;
	kfree(work);
}

/* rmnet_map_aggregate() - Software aggregates multiple packets.
@@ -218,6 +226,7 @@ enum hrtimer_restart rmnet_map_flush_packet_queue(struct hrtimer *t)
void rmnet_map_aggregate(struct sk_buff *skb,
			 struct rmnet_phys_ep_config *config) {
	u8 *dest_buff;
	struct agg_work *work;
	unsigned long flags;
	struct sk_buff *agg_skb;
	struct timespec diff, last;
@@ -281,9 +290,7 @@ void rmnet_map_aggregate(struct sk_buff *skb,
		config->agg_skb = 0;
		config->agg_count = 0;
		memset(&config->agg_time, 0, sizeof(struct timespec));
		config->agg_state = RMNET_MAP_AGG_IDLE;
		spin_unlock_irqrestore(&config->agg_lock, flags);
		hrtimer_cancel(&config->hrtimer);
		LOGL("delta t: %ld.%09lu\tcount: %d", diff.tv_sec,
		     diff.tv_nsec, agg_count);
		trace_rmnet_map_aggregate(skb, agg_count);
@@ -300,9 +307,19 @@ void rmnet_map_aggregate(struct sk_buff *skb,

schedule:
	if (config->agg_state != RMNET_MAP_TXFER_SCHEDULED) {
		work = kmalloc(sizeof(*work), GFP_ATOMIC);
		if (!work) {
			LOGE("Failed to allocate work item for packet %s",
			     "transfer. DATA PATH LIKELY BROKEN!");
			config->agg_state = RMNET_MAP_AGG_IDLE;
			spin_unlock_irqrestore(&config->agg_lock, flags);
			return;
		}
		INIT_DELAYED_WORK((struct delayed_work *)work,
				  rmnet_map_flush_packet_queue);
		work->config = config;
		config->agg_state = RMNET_MAP_TXFER_SCHEDULED;
		hrtimer_start(&config->hrtimer, ns_to_ktime(3000000),
			      HRTIMER_MODE_REL);
		schedule_delayed_work((struct delayed_work *)work, 1);
	}
	spin_unlock_irqrestore(&config->agg_lock, flags);
}