Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 855e50cb authored by Subash Abhinov Kasiviswanathan's avatar Subash Abhinov Kasiviswanathan
Browse files

net: rmnet_data: Switch aggregation from delayed work to hrtimer



The delay argument in schedule_delayed_work(struct delayed_work
*dwork, unsigned long delay) API is jiffies. The system tick for
seems to be 100Hz, so the minimum time resolution for the work to
be scheduled is 10ms.

Switch to hrtimer to achieve 1 ms granularity with a current timer
of 3ms for the flush thread.

CRs-fixed: 2147503
Change-Id: If33fea37146f91b2980e6637db71567fb5ca12a5
Signed-off-by: default avatarSubash Abhinov Kasiviswanathan <subashab@codeaurora.org>
parent a8118139
Loading
Loading
Loading
Loading
+19 −1
Original line number Diff line number Diff line
@@ -25,6 +25,7 @@
#include "rmnet_data_vnd.h"
#include "rmnet_data_private.h"
#include "rmnet_data_trace.h"
#include "rmnet_map.h"

RMNET_LOG_MODULE(RMNET_DATA_LOGMASK_CONFIG);

@@ -869,7 +870,8 @@ int rmnet_associate_network_device(struct net_device *dev)
	conf->dev = dev;
	spin_lock_init(&conf->agg_lock);
	config->recycle = kfree_skb;

	hrtimer_init(&conf->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
	conf->hrtimer.function = rmnet_map_flush_packet_queue;
	rc = netdev_rx_handler_register(dev, rmnet_rx_handler, config);

	if (rc) {
@@ -1232,6 +1234,22 @@ static void rmnet_force_unassociate_device(struct net_device *dev)
	config = _rmnet_get_phys_ep_config(dev);

	if (config) {
		unsigned long flags;

		hrtimer_cancel(&config->hrtimer);
		spin_lock_irqsave(&config->agg_lock, flags);
		if (config->agg_state == RMNET_MAP_TXFER_SCHEDULED) {
			if (config->agg_skb) {
				kfree_skb(config->agg_skb);
				config->agg_skb = NULL;
				config->agg_count = 0;
				memset(&config->agg_time, 0,
				       sizeof(struct timespec));
			}
			config->agg_state = RMNET_MAP_AGG_IDLE;
		}
		spin_unlock_irqrestore(&config->agg_lock, flags);

		cfg = &config->local_ep;

		if (cfg && cfg->refcount)
+2 −0
Original line number Diff line number Diff line
@@ -16,6 +16,7 @@
#include <linux/time.h>
#include <linux/spinlock.h>
#include <net/rmnet_config.h>
#include <linux/hrtimer.h>

#ifndef _RMNET_DATA_CONFIG_H_
#define _RMNET_DATA_CONFIG_H_
@@ -85,6 +86,7 @@ struct rmnet_phys_ep_config {
	u8 agg_count;
	struct timespec agg_time;
	struct timespec agg_last;
	struct hrtimer hrtimer;
};

int rmnet_config_init(void);
+1 −0
Original line number Diff line number Diff line
@@ -147,4 +147,5 @@ int rmnet_map_checksum_uplink_packet(struct sk_buff *skb,
				     struct net_device *orig_dev,
				     u32 egress_data_format);
int rmnet_ul_aggregation_skip(struct sk_buff *skb, int offset);
enum hrtimer_restart rmnet_map_flush_packet_queue(struct hrtimer *t);
#endif /* _RMNET_MAP_H_ */
+9 −26
Original line number Diff line number Diff line
@@ -18,7 +18,6 @@
#include <linux/netdevice.h>
#include <linux/rmnet_data.h>
#include <linux/spinlock.h>
#include <linux/workqueue.h>
#include <linux/time.h>
#include <linux/net_map.h>
#include <linux/ip.h>
@@ -48,11 +47,6 @@ long agg_bypass_time __read_mostly = 10000000L;
module_param(agg_bypass_time, long, 0644);
MODULE_PARM_DESC(agg_bypass_time, "Skip agg when apart spaced more than this");

struct agg_work {
	struct delayed_work work;
	struct rmnet_phys_ep_config *config;
};

#define RMNET_MAP_DEAGGR_SPACING  64
#define RMNET_MAP_DEAGGR_HEADROOM (RMNET_MAP_DEAGGR_SPACING / 2)

@@ -166,24 +160,21 @@ struct sk_buff *rmnet_map_deaggregate(struct sk_buff *skb,
}

/* rmnet_map_flush_packet_queue() - Transmits aggregeted frame on timeout
 * @work:        struct agg_work containing delayed work and skb to flush
 *
 * This function is scheduled to run in a specified number of jiffies after
 * This function is scheduled to run in a specified number of ns after
 * the last frame transmitted by the network stack. When run, the buffer
 * containing aggregated packets is finally transmitted on the underlying link.
 *
 */
static void rmnet_map_flush_packet_queue(struct work_struct *work)
enum hrtimer_restart rmnet_map_flush_packet_queue(struct hrtimer *t)
{
	struct agg_work *real_work;
	struct rmnet_phys_ep_config *config;
	unsigned long flags;
	struct sk_buff *skb;
	int rc, agg_count = 0;

	config = container_of(t, struct rmnet_phys_ep_config, hrtimer);
	skb = 0;
	real_work = (struct agg_work *)work;
	config = real_work->config;
	LOGD("%s", "Entering flush thread");
	spin_lock_irqsave(&config->agg_lock, flags);
	if (likely(config->agg_state == RMNET_MAP_TXFER_SCHEDULED)) {
@@ -211,7 +202,8 @@ static void rmnet_map_flush_packet_queue(struct work_struct *work)
		rc = dev_queue_xmit(skb);
		rmnet_stats_queue_xmit(rc, RMNET_STATS_QUEUE_XMIT_AGG_TIMEOUT);
	}
	kfree(work);

	return HRTIMER_NORESTART;
}

/* rmnet_map_aggregate() - Software aggregates multiple packets.
@@ -226,7 +218,6 @@ static void rmnet_map_flush_packet_queue(struct work_struct *work)
void rmnet_map_aggregate(struct sk_buff *skb,
			 struct rmnet_phys_ep_config *config) {
	u8 *dest_buff;
	struct agg_work *work;
	unsigned long flags;
	struct sk_buff *agg_skb;
	struct timespec diff, last;
@@ -290,7 +281,9 @@ void rmnet_map_aggregate(struct sk_buff *skb,
		config->agg_skb = 0;
		config->agg_count = 0;
		memset(&config->agg_time, 0, sizeof(struct timespec));
		config->agg_state = RMNET_MAP_AGG_IDLE;
		spin_unlock_irqrestore(&config->agg_lock, flags);
		hrtimer_cancel(&config->hrtimer);
		LOGL("delta t: %ld.%09lu\tcount: %d", diff.tv_sec,
		     diff.tv_nsec, agg_count);
		trace_rmnet_map_aggregate(skb, agg_count);
@@ -307,19 +300,9 @@ void rmnet_map_aggregate(struct sk_buff *skb,

schedule:
	if (config->agg_state != RMNET_MAP_TXFER_SCHEDULED) {
		work = kmalloc(sizeof(*work), GFP_ATOMIC);
		if (!work) {
			LOGE("Failed to allocate work item for packet %s",
			     "transfer. DATA PATH LIKELY BROKEN!");
			config->agg_state = RMNET_MAP_AGG_IDLE;
			spin_unlock_irqrestore(&config->agg_lock, flags);
			return;
		}
		INIT_DELAYED_WORK((struct delayed_work *)work,
				  rmnet_map_flush_packet_queue);
		work->config = config;
		config->agg_state = RMNET_MAP_TXFER_SCHEDULED;
		schedule_delayed_work((struct delayed_work *)work, 1);
		hrtimer_start(&config->hrtimer, ns_to_ktime(3000000),
			      HRTIMER_MODE_REL);
	}
	spin_unlock_irqrestore(&config->agg_lock, flags);
}