Loading net/rmnet_data/rmnet_data_config.c +1 −19 Original line number Diff line number Diff line Loading @@ -25,7 +25,6 @@ #include "rmnet_data_vnd.h" #include "rmnet_data_private.h" #include "rmnet_data_trace.h" #include "rmnet_map.h" RMNET_LOG_MODULE(RMNET_DATA_LOGMASK_CONFIG); Loading Loading @@ -870,8 +869,7 @@ int rmnet_associate_network_device(struct net_device *dev) conf->dev = dev; spin_lock_init(&conf->agg_lock); config->recycle = kfree_skb; hrtimer_init(&conf->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); conf->hrtimer.function = rmnet_map_flush_packet_queue; rc = netdev_rx_handler_register(dev, rmnet_rx_handler, config); if (rc) { Loading Loading @@ -1234,22 +1232,6 @@ static void rmnet_force_unassociate_device(struct net_device *dev) config = _rmnet_get_phys_ep_config(dev); if (config) { unsigned long flags; hrtimer_cancel(&config->hrtimer); spin_lock_irqsave(&config->agg_lock, flags); if (config->agg_state == RMNET_MAP_TXFER_SCHEDULED) { if (config->agg_skb) { kfree_skb(config->agg_skb); config->agg_skb = NULL; config->agg_count = 0; memset(&config->agg_time, 0, sizeof(struct timespec)); } config->agg_state = RMNET_MAP_AGG_IDLE; } spin_unlock_irqrestore(&config->agg_lock, flags); cfg = &config->local_ep; if (cfg && cfg->refcount) Loading net/rmnet_data/rmnet_data_config.h +0 −2 Original line number Diff line number Diff line Loading @@ -16,7 +16,6 @@ #include <linux/time.h> #include <linux/spinlock.h> #include <net/rmnet_config.h> #include <linux/hrtimer.h> #ifndef _RMNET_DATA_CONFIG_H_ #define _RMNET_DATA_CONFIG_H_ Loading Loading @@ -86,7 +85,6 @@ struct rmnet_phys_ep_config { u8 agg_count; struct timespec agg_time; struct timespec agg_last; struct hrtimer hrtimer; }; int rmnet_config_init(void); Loading net/rmnet_data/rmnet_map.h +0 −1 Original line number Diff line number Diff line Loading @@ -147,5 +147,4 @@ int rmnet_map_checksum_uplink_packet(struct sk_buff *skb, struct net_device *orig_dev, u32 egress_data_format); int rmnet_ul_aggregation_skip(struct sk_buff *skb, int offset); enum hrtimer_restart rmnet_map_flush_packet_queue(struct hrtimer *t); #endif /* _RMNET_MAP_H_ */ net/rmnet_data/rmnet_map_data.c +26 −9 Original line number Diff line number Diff line Loading @@ -18,6 +18,7 @@ #include <linux/netdevice.h> #include <linux/rmnet_data.h> #include <linux/spinlock.h> #include <linux/workqueue.h> #include <linux/time.h> #include <linux/net_map.h> #include <linux/ip.h> Loading Loading @@ -47,6 +48,11 @@ long agg_bypass_time __read_mostly = 10000000L; module_param(agg_bypass_time, long, 0644); MODULE_PARM_DESC(agg_bypass_time, "Skip agg when apart spaced more than this"); struct agg_work { struct delayed_work work; struct rmnet_phys_ep_config *config; }; #define RMNET_MAP_DEAGGR_SPACING 64 #define RMNET_MAP_DEAGGR_HEADROOM (RMNET_MAP_DEAGGR_SPACING / 2) Loading Loading @@ -160,21 +166,24 @@ struct sk_buff *rmnet_map_deaggregate(struct sk_buff *skb, } /* rmnet_map_flush_packet_queue() - Transmits aggregeted frame on timeout * @work: struct agg_work containing delayed work and skb to flush * * This function is scheduled to run in a specified number of ns after * This function is scheduled to run in a specified number of jiffies after * the last frame transmitted by the network stack. When run, the buffer * containing aggregated packets is finally transmitted on the underlying link. * */ enum hrtimer_restart rmnet_map_flush_packet_queue(struct hrtimer *t) static void rmnet_map_flush_packet_queue(struct work_struct *work) { struct agg_work *real_work; struct rmnet_phys_ep_config *config; unsigned long flags; struct sk_buff *skb; int rc, agg_count = 0; config = container_of(t, struct rmnet_phys_ep_config, hrtimer); skb = 0; real_work = (struct agg_work *)work; config = real_work->config; LOGD("%s", "Entering flush thread"); spin_lock_irqsave(&config->agg_lock, flags); if (likely(config->agg_state == RMNET_MAP_TXFER_SCHEDULED)) { Loading Loading @@ -202,8 +211,7 @@ enum hrtimer_restart rmnet_map_flush_packet_queue(struct hrtimer *t) rc = dev_queue_xmit(skb); rmnet_stats_queue_xmit(rc, RMNET_STATS_QUEUE_XMIT_AGG_TIMEOUT); } return HRTIMER_NORESTART; kfree(work); } /* rmnet_map_aggregate() - Software aggregates multiple packets. Loading @@ -218,6 +226,7 @@ enum hrtimer_restart rmnet_map_flush_packet_queue(struct hrtimer *t) void rmnet_map_aggregate(struct sk_buff *skb, struct rmnet_phys_ep_config *config) { u8 *dest_buff; struct agg_work *work; unsigned long flags; struct sk_buff *agg_skb; struct timespec diff, last; Loading Loading @@ -281,9 +290,7 @@ void rmnet_map_aggregate(struct sk_buff *skb, config->agg_skb = 0; config->agg_count = 0; memset(&config->agg_time, 0, sizeof(struct timespec)); config->agg_state = RMNET_MAP_AGG_IDLE; spin_unlock_irqrestore(&config->agg_lock, flags); hrtimer_cancel(&config->hrtimer); LOGL("delta t: %ld.%09lu\tcount: %d", diff.tv_sec, diff.tv_nsec, agg_count); trace_rmnet_map_aggregate(skb, agg_count); Loading @@ -300,9 +307,19 @@ void rmnet_map_aggregate(struct sk_buff *skb, schedule: if (config->agg_state != RMNET_MAP_TXFER_SCHEDULED) { work = kmalloc(sizeof(*work), GFP_ATOMIC); if (!work) { LOGE("Failed to allocate work item for packet %s", "transfer. DATA PATH LIKELY BROKEN!"); config->agg_state = RMNET_MAP_AGG_IDLE; spin_unlock_irqrestore(&config->agg_lock, flags); return; } INIT_DELAYED_WORK((struct delayed_work *)work, rmnet_map_flush_packet_queue); work->config = config; config->agg_state = RMNET_MAP_TXFER_SCHEDULED; hrtimer_start(&config->hrtimer, ns_to_ktime(3000000), HRTIMER_MODE_REL); schedule_delayed_work((struct delayed_work *)work, 1); } spin_unlock_irqrestore(&config->agg_lock, flags); } Loading Loading
net/rmnet_data/rmnet_data_config.c +1 −19 Original line number Diff line number Diff line Loading @@ -25,7 +25,6 @@ #include "rmnet_data_vnd.h" #include "rmnet_data_private.h" #include "rmnet_data_trace.h" #include "rmnet_map.h" RMNET_LOG_MODULE(RMNET_DATA_LOGMASK_CONFIG); Loading Loading @@ -870,8 +869,7 @@ int rmnet_associate_network_device(struct net_device *dev) conf->dev = dev; spin_lock_init(&conf->agg_lock); config->recycle = kfree_skb; hrtimer_init(&conf->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); conf->hrtimer.function = rmnet_map_flush_packet_queue; rc = netdev_rx_handler_register(dev, rmnet_rx_handler, config); if (rc) { Loading Loading @@ -1234,22 +1232,6 @@ static void rmnet_force_unassociate_device(struct net_device *dev) config = _rmnet_get_phys_ep_config(dev); if (config) { unsigned long flags; hrtimer_cancel(&config->hrtimer); spin_lock_irqsave(&config->agg_lock, flags); if (config->agg_state == RMNET_MAP_TXFER_SCHEDULED) { if (config->agg_skb) { kfree_skb(config->agg_skb); config->agg_skb = NULL; config->agg_count = 0; memset(&config->agg_time, 0, sizeof(struct timespec)); } config->agg_state = RMNET_MAP_AGG_IDLE; } spin_unlock_irqrestore(&config->agg_lock, flags); cfg = &config->local_ep; if (cfg && cfg->refcount) Loading
net/rmnet_data/rmnet_data_config.h +0 −2 Original line number Diff line number Diff line Loading @@ -16,7 +16,6 @@ #include <linux/time.h> #include <linux/spinlock.h> #include <net/rmnet_config.h> #include <linux/hrtimer.h> #ifndef _RMNET_DATA_CONFIG_H_ #define _RMNET_DATA_CONFIG_H_ Loading Loading @@ -86,7 +85,6 @@ struct rmnet_phys_ep_config { u8 agg_count; struct timespec agg_time; struct timespec agg_last; struct hrtimer hrtimer; }; int rmnet_config_init(void); Loading
net/rmnet_data/rmnet_map.h +0 −1 Original line number Diff line number Diff line Loading @@ -147,5 +147,4 @@ int rmnet_map_checksum_uplink_packet(struct sk_buff *skb, struct net_device *orig_dev, u32 egress_data_format); int rmnet_ul_aggregation_skip(struct sk_buff *skb, int offset); enum hrtimer_restart rmnet_map_flush_packet_queue(struct hrtimer *t); #endif /* _RMNET_MAP_H_ */
net/rmnet_data/rmnet_map_data.c +26 −9 Original line number Diff line number Diff line Loading @@ -18,6 +18,7 @@ #include <linux/netdevice.h> #include <linux/rmnet_data.h> #include <linux/spinlock.h> #include <linux/workqueue.h> #include <linux/time.h> #include <linux/net_map.h> #include <linux/ip.h> Loading Loading @@ -47,6 +48,11 @@ long agg_bypass_time __read_mostly = 10000000L; module_param(agg_bypass_time, long, 0644); MODULE_PARM_DESC(agg_bypass_time, "Skip agg when apart spaced more than this"); struct agg_work { struct delayed_work work; struct rmnet_phys_ep_config *config; }; #define RMNET_MAP_DEAGGR_SPACING 64 #define RMNET_MAP_DEAGGR_HEADROOM (RMNET_MAP_DEAGGR_SPACING / 2) Loading Loading @@ -160,21 +166,24 @@ struct sk_buff *rmnet_map_deaggregate(struct sk_buff *skb, } /* rmnet_map_flush_packet_queue() - Transmits aggregeted frame on timeout * @work: struct agg_work containing delayed work and skb to flush * * This function is scheduled to run in a specified number of ns after * This function is scheduled to run in a specified number of jiffies after * the last frame transmitted by the network stack. When run, the buffer * containing aggregated packets is finally transmitted on the underlying link. * */ enum hrtimer_restart rmnet_map_flush_packet_queue(struct hrtimer *t) static void rmnet_map_flush_packet_queue(struct work_struct *work) { struct agg_work *real_work; struct rmnet_phys_ep_config *config; unsigned long flags; struct sk_buff *skb; int rc, agg_count = 0; config = container_of(t, struct rmnet_phys_ep_config, hrtimer); skb = 0; real_work = (struct agg_work *)work; config = real_work->config; LOGD("%s", "Entering flush thread"); spin_lock_irqsave(&config->agg_lock, flags); if (likely(config->agg_state == RMNET_MAP_TXFER_SCHEDULED)) { Loading Loading @@ -202,8 +211,7 @@ enum hrtimer_restart rmnet_map_flush_packet_queue(struct hrtimer *t) rc = dev_queue_xmit(skb); rmnet_stats_queue_xmit(rc, RMNET_STATS_QUEUE_XMIT_AGG_TIMEOUT); } return HRTIMER_NORESTART; kfree(work); } /* rmnet_map_aggregate() - Software aggregates multiple packets. Loading @@ -218,6 +226,7 @@ enum hrtimer_restart rmnet_map_flush_packet_queue(struct hrtimer *t) void rmnet_map_aggregate(struct sk_buff *skb, struct rmnet_phys_ep_config *config) { u8 *dest_buff; struct agg_work *work; unsigned long flags; struct sk_buff *agg_skb; struct timespec diff, last; Loading Loading @@ -281,9 +290,7 @@ void rmnet_map_aggregate(struct sk_buff *skb, config->agg_skb = 0; config->agg_count = 0; memset(&config->agg_time, 0, sizeof(struct timespec)); config->agg_state = RMNET_MAP_AGG_IDLE; spin_unlock_irqrestore(&config->agg_lock, flags); hrtimer_cancel(&config->hrtimer); LOGL("delta t: %ld.%09lu\tcount: %d", diff.tv_sec, diff.tv_nsec, agg_count); trace_rmnet_map_aggregate(skb, agg_count); Loading @@ -300,9 +307,19 @@ void rmnet_map_aggregate(struct sk_buff *skb, schedule: if (config->agg_state != RMNET_MAP_TXFER_SCHEDULED) { work = kmalloc(sizeof(*work), GFP_ATOMIC); if (!work) { LOGE("Failed to allocate work item for packet %s", "transfer. DATA PATH LIKELY BROKEN!"); config->agg_state = RMNET_MAP_AGG_IDLE; spin_unlock_irqrestore(&config->agg_lock, flags); return; } INIT_DELAYED_WORK((struct delayed_work *)work, rmnet_map_flush_packet_queue); work->config = config; config->agg_state = RMNET_MAP_TXFER_SCHEDULED; hrtimer_start(&config->hrtimer, ns_to_ktime(3000000), HRTIMER_MODE_REL); schedule_delayed_work((struct delayed_work *)work, 1); } spin_unlock_irqrestore(&config->agg_lock, flags); } Loading