Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 08fed02b authored by Subash Abhinov Kasiviswanathan's avatar Subash Abhinov Kasiviswanathan
Browse files

net: rmnet_data: Use hrtimer for UL aggregation timer



The delay argument in schedule_delayed_work(struct delayed_work
*dwork, unsigned long delay) API is jiffies. The system tick for
seems to be 100Hz, so the minimum time resolution for the work to
be scheduled is 10ms.

Switch to hrtimer to achieve 3 ms granularity with a current timer
of 1ms for the flush thread. A workqueue is immediately scheduled
in the same context after this timer expiry to do the tx work.

CRs-Fixed: 2157214
Change-Id: I29cbbee417e84b101ed34c0d29c2731bd52c3ec5
Signed-off-by: default avatarSubash Abhinov Kasiviswanathan <subashab@codeaurora.org>
parent 0098feef
Loading
Loading
Loading
Loading
+19 −1
Original line number Diff line number Diff line
@@ -25,6 +25,7 @@
#include "rmnet_data_vnd.h"
#include "rmnet_data_private.h"
#include "rmnet_data_trace.h"
#include "rmnet_map.h"

RMNET_LOG_MODULE(RMNET_DATA_LOGMASK_CONFIG);

@@ -869,7 +870,8 @@ int rmnet_associate_network_device(struct net_device *dev)
	conf->dev = dev;
	spin_lock_init(&conf->agg_lock);
	config->recycle = kfree_skb;

	hrtimer_init(&conf->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
	conf->hrtimer.function = rmnet_map_flush_packet_queue;
	rc = netdev_rx_handler_register(dev, rmnet_rx_handler, config);

	if (rc) {
@@ -1232,6 +1234,22 @@ static void rmnet_force_unassociate_device(struct net_device *dev)
	config = _rmnet_get_phys_ep_config(dev);

	if (config) {
		unsigned long flags;

		hrtimer_cancel(&config->hrtimer);
		spin_lock_irqsave(&config->agg_lock, flags);
		if (config->agg_state == RMNET_MAP_TXFER_SCHEDULED) {
			if (config->agg_skb) {
				kfree_skb(config->agg_skb);
				config->agg_skb = NULL;
				config->agg_count = 0;
				memset(&config->agg_time, 0,
				       sizeof(struct timespec));
			}
			config->agg_state = RMNET_MAP_AGG_IDLE;
		}
		spin_unlock_irqrestore(&config->agg_lock, flags);

		cfg = &config->local_ep;

		if (cfg && cfg->refcount)
+2 −0
Original line number Diff line number Diff line
@@ -16,6 +16,7 @@
#include <linux/time.h>
#include <linux/spinlock.h>
#include <net/rmnet_config.h>
#include <linux/hrtimer.h>

#ifndef _RMNET_DATA_CONFIG_H_
#define _RMNET_DATA_CONFIG_H_
@@ -85,6 +86,7 @@ struct rmnet_phys_ep_config {
	u8 agg_count;
	struct timespec agg_time;
	struct timespec agg_last;
	struct hrtimer hrtimer;
};

int rmnet_config_init(void);
+3 −3
Original line number Diff line number Diff line
@@ -734,6 +734,9 @@ void rmnet_egress_handler(struct sk_buff *skb,
	LOGD("Packet going out on %s with egress format 0x%08X",
	     skb->dev->name, config->egress_data_format);

	if (ep->rmnet_mode == RMNET_EPMODE_VND)
		rmnet_vnd_tx_fixup(skb, orig_dev);

	if (config->egress_data_format & RMNET_EGRESS_FORMAT_MAP) {
		switch (rmnet_map_egress_handler(skb, config, ep, orig_dev)) {
		case RMNET_MAP_CONSUMED:
@@ -751,9 +754,6 @@ void rmnet_egress_handler(struct sk_buff *skb,
		}
	}

	if (ep->rmnet_mode == RMNET_EPMODE_VND)
		rmnet_vnd_tx_fixup(skb, orig_dev);

	rmnet_print_packet(skb, skb->dev->name, 't');
	trace_rmnet_egress_handler(skb);
	rc = dev_queue_xmit(skb);
+1 −0
Original line number Diff line number Diff line
@@ -147,4 +147,5 @@ int rmnet_map_checksum_uplink_packet(struct sk_buff *skb,
				     struct net_device *orig_dev,
				     u32 egress_data_format);
int rmnet_ul_aggregation_skip(struct sk_buff *skb, int offset);
enum hrtimer_restart rmnet_map_flush_packet_queue(struct hrtimer *t);
#endif /* _RMNET_MAP_H_ */
+39 −27
Original line number Diff line number Diff line
@@ -49,7 +49,7 @@ module_param(agg_bypass_time, long, 0644);
MODULE_PARM_DESC(agg_bypass_time, "Skip agg when apart spaced more than this");

struct agg_work {
	struct delayed_work work;
	struct work_struct work;
	struct rmnet_phys_ep_config *config;
};

@@ -165,25 +165,18 @@ struct sk_buff *rmnet_map_deaggregate(struct sk_buff *skb,
	return skbn;
}

/* rmnet_map_flush_packet_queue() - Transmits aggregeted frame on timeout
 * @work:        struct agg_work containing delayed work and skb to flush
 *
 * This function is scheduled to run in a specified number of jiffies after
 * the last frame transmitted by the network stack. When run, the buffer
 * containing aggregated packets is finally transmitted on the underlying link.
 *
 */
static void rmnet_map_flush_packet_queue(struct work_struct *work)
static void rmnet_map_flush_packet_work(struct work_struct *work)
{
	struct agg_work *real_work;
	struct rmnet_phys_ep_config *config;
	struct agg_work *real_work;
	int rc, agg_count = 0;
	unsigned long flags;
	struct sk_buff *skb;
	int rc, agg_count = 0;

	skb = 0;
	real_work = (struct agg_work *)work;
	config = real_work->config;
	skb = NULL;

	LOGD("%s", "Entering flush thread");
	spin_lock_irqsave(&config->agg_lock, flags);
	if (likely(config->agg_state == RMNET_MAP_TXFER_SCHEDULED)) {
@@ -194,7 +187,7 @@ static void rmnet_map_flush_packet_queue(struct work_struct *work)
				LOGL("Agg count: %d", config->agg_count);
			skb = config->agg_skb;
			agg_count = config->agg_count;
			config->agg_skb = 0;
			config->agg_skb = NULL;
			config->agg_count = 0;
			memset(&config->agg_time, 0, sizeof(struct timespec));
		}
@@ -211,9 +204,37 @@ static void rmnet_map_flush_packet_queue(struct work_struct *work)
		rc = dev_queue_xmit(skb);
		rmnet_stats_queue_xmit(rc, RMNET_STATS_QUEUE_XMIT_AGG_TIMEOUT);
	}

	kfree(work);
}

/* rmnet_map_flush_packet_queue() - Transmits aggregeted frame on timeout
 *
 * This function is scheduled to run in a specified number of ns after
 * the last frame transmitted by the network stack. When run, the buffer
 * containing aggregated packets is finally transmitted on the underlying link.
 *
 */
enum hrtimer_restart rmnet_map_flush_packet_queue(struct hrtimer *t)
{
	struct rmnet_phys_ep_config *config;
	struct agg_work *work;

	config = container_of(t, struct rmnet_phys_ep_config, hrtimer);

	work = kmalloc(sizeof(*work), GFP_ATOMIC);
	if (!work) {
		config->agg_state = RMNET_MAP_AGG_IDLE;

		return HRTIMER_NORESTART;
	}

	INIT_WORK(&work->work, rmnet_map_flush_packet_work);
	work->config = config;
	schedule_work((struct work_struct *)work);
	return HRTIMER_NORESTART;
}

/* rmnet_map_aggregate() - Software aggregates multiple packets.
 * @skb:        current packet being transmitted
 * @config:     Physical endpoint configuration of the ingress device
@@ -226,7 +247,6 @@ static void rmnet_map_flush_packet_queue(struct work_struct *work)
void rmnet_map_aggregate(struct sk_buff *skb,
			 struct rmnet_phys_ep_config *config) {
	u8 *dest_buff;
	struct agg_work *work;
	unsigned long flags;
	struct sk_buff *agg_skb;
	struct timespec diff, last;
@@ -290,7 +310,9 @@ void rmnet_map_aggregate(struct sk_buff *skb,
		config->agg_skb = 0;
		config->agg_count = 0;
		memset(&config->agg_time, 0, sizeof(struct timespec));
		config->agg_state = RMNET_MAP_AGG_IDLE;
		spin_unlock_irqrestore(&config->agg_lock, flags);
		hrtimer_cancel(&config->hrtimer);
		LOGL("delta t: %ld.%09lu\tcount: %d", diff.tv_sec,
		     diff.tv_nsec, agg_count);
		trace_rmnet_map_aggregate(skb, agg_count);
@@ -307,19 +329,9 @@ void rmnet_map_aggregate(struct sk_buff *skb,

schedule:
	if (config->agg_state != RMNET_MAP_TXFER_SCHEDULED) {
		work = kmalloc(sizeof(*work), GFP_ATOMIC);
		if (!work) {
			LOGE("Failed to allocate work item for packet %s",
			     "transfer. DATA PATH LIKELY BROKEN!");
			config->agg_state = RMNET_MAP_AGG_IDLE;
			spin_unlock_irqrestore(&config->agg_lock, flags);
			return;
		}
		INIT_DELAYED_WORK((struct delayed_work *)work,
				  rmnet_map_flush_packet_queue);
		work->config = config;
		config->agg_state = RMNET_MAP_TXFER_SCHEDULED;
		schedule_delayed_work((struct delayed_work *)work, 1);
		hrtimer_start(&config->hrtimer, ns_to_ktime(3000000),
			      HRTIMER_MODE_REL);
	}
	spin_unlock_irqrestore(&config->agg_lock, flags);
}