Loading net/rmnet_data/rmnet_data_config.c +19 −1 Original line number Diff line number Diff line Loading @@ -25,6 +25,7 @@ #include "rmnet_data_vnd.h" #include "rmnet_data_private.h" #include "rmnet_data_trace.h" #include "rmnet_map.h" RMNET_LOG_MODULE(RMNET_DATA_LOGMASK_CONFIG); Loading Loading @@ -869,7 +870,8 @@ int rmnet_associate_network_device(struct net_device *dev) conf->dev = dev; spin_lock_init(&conf->agg_lock); config->recycle = kfree_skb; hrtimer_init(&conf->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); conf->hrtimer.function = rmnet_map_flush_packet_queue; rc = netdev_rx_handler_register(dev, rmnet_rx_handler, config); if (rc) { Loading Loading @@ -1232,6 +1234,22 @@ static void rmnet_force_unassociate_device(struct net_device *dev) config = _rmnet_get_phys_ep_config(dev); if (config) { unsigned long flags; hrtimer_cancel(&config->hrtimer); spin_lock_irqsave(&config->agg_lock, flags); if (config->agg_state == RMNET_MAP_TXFER_SCHEDULED) { if (config->agg_skb) { kfree_skb(config->agg_skb); config->agg_skb = NULL; config->agg_count = 0; memset(&config->agg_time, 0, sizeof(struct timespec)); } config->agg_state = RMNET_MAP_AGG_IDLE; } spin_unlock_irqrestore(&config->agg_lock, flags); cfg = &config->local_ep; if (cfg && cfg->refcount) Loading net/rmnet_data/rmnet_data_config.h +2 −0 Original line number Diff line number Diff line Loading @@ -16,6 +16,7 @@ #include <linux/time.h> #include <linux/spinlock.h> #include <net/rmnet_config.h> #include <linux/hrtimer.h> #ifndef _RMNET_DATA_CONFIG_H_ #define _RMNET_DATA_CONFIG_H_ Loading Loading @@ -85,6 +86,7 @@ struct rmnet_phys_ep_config { u8 agg_count; struct timespec agg_time; struct timespec agg_last; struct hrtimer hrtimer; }; int rmnet_config_init(void); Loading net/rmnet_data/rmnet_data_handlers.c +3 −3 Original line number Diff line number Diff line Loading @@ -734,6 +734,9 @@ void rmnet_egress_handler(struct sk_buff *skb, LOGD("Packet going out on %s with egress format 0x%08X", skb->dev->name, config->egress_data_format); if (ep->rmnet_mode == RMNET_EPMODE_VND) rmnet_vnd_tx_fixup(skb, orig_dev); if (config->egress_data_format & RMNET_EGRESS_FORMAT_MAP) { switch (rmnet_map_egress_handler(skb, config, ep, orig_dev)) { case RMNET_MAP_CONSUMED: Loading @@ -751,9 +754,6 @@ void rmnet_egress_handler(struct sk_buff *skb, } } if (ep->rmnet_mode == RMNET_EPMODE_VND) rmnet_vnd_tx_fixup(skb, orig_dev); rmnet_print_packet(skb, skb->dev->name, 't'); trace_rmnet_egress_handler(skb); rc = dev_queue_xmit(skb); Loading net/rmnet_data/rmnet_map.h +1 −0 Original line number Diff line number Diff line Loading @@ -147,4 +147,5 @@ int rmnet_map_checksum_uplink_packet(struct sk_buff *skb, struct net_device *orig_dev, u32 egress_data_format); int rmnet_ul_aggregation_skip(struct sk_buff *skb, int offset); enum hrtimer_restart rmnet_map_flush_packet_queue(struct hrtimer *t); #endif /* _RMNET_MAP_H_ */ net/rmnet_data/rmnet_map_data.c +39 −27 Original line number Diff line number Diff line Loading @@ -49,7 +49,7 @@ module_param(agg_bypass_time, long, 0644); MODULE_PARM_DESC(agg_bypass_time, "Skip agg when apart spaced more than this"); struct agg_work { struct delayed_work work; struct work_struct work; struct rmnet_phys_ep_config *config; }; Loading Loading @@ -165,25 +165,18 @@ struct sk_buff *rmnet_map_deaggregate(struct sk_buff *skb, return skbn; } /* rmnet_map_flush_packet_queue() - Transmits aggregeted frame on timeout * @work: struct agg_work containing delayed work and skb to flush * * This function is scheduled to run in a specified number of jiffies after * the last frame transmitted by the network stack. When run, the buffer * containing aggregated packets is finally transmitted on the underlying link. * */ static void rmnet_map_flush_packet_queue(struct work_struct *work) static void rmnet_map_flush_packet_work(struct work_struct *work) { struct agg_work *real_work; struct rmnet_phys_ep_config *config; struct agg_work *real_work; int rc, agg_count = 0; unsigned long flags; struct sk_buff *skb; int rc, agg_count = 0; skb = 0; real_work = (struct agg_work *)work; config = real_work->config; skb = NULL; LOGD("%s", "Entering flush thread"); spin_lock_irqsave(&config->agg_lock, flags); if (likely(config->agg_state == RMNET_MAP_TXFER_SCHEDULED)) { Loading @@ -194,7 +187,7 @@ static void rmnet_map_flush_packet_queue(struct work_struct *work) LOGL("Agg count: %d", config->agg_count); skb = config->agg_skb; agg_count = config->agg_count; config->agg_skb = 0; config->agg_skb = NULL; config->agg_count = 0; memset(&config->agg_time, 0, sizeof(struct timespec)); } Loading @@ -211,9 +204,37 @@ static void rmnet_map_flush_packet_queue(struct work_struct *work) rc = dev_queue_xmit(skb); rmnet_stats_queue_xmit(rc, RMNET_STATS_QUEUE_XMIT_AGG_TIMEOUT); } kfree(work); } /* rmnet_map_flush_packet_queue() - Transmits aggregeted frame on timeout * * This function is scheduled to run in a specified number of ns after * the last frame transmitted by the network stack. When run, the buffer * containing aggregated packets is finally transmitted on the underlying link. * */ enum hrtimer_restart rmnet_map_flush_packet_queue(struct hrtimer *t) { struct rmnet_phys_ep_config *config; struct agg_work *work; config = container_of(t, struct rmnet_phys_ep_config, hrtimer); work = kmalloc(sizeof(*work), GFP_ATOMIC); if (!work) { config->agg_state = RMNET_MAP_AGG_IDLE; return HRTIMER_NORESTART; } INIT_WORK(&work->work, rmnet_map_flush_packet_work); work->config = config; schedule_work((struct work_struct *)work); return HRTIMER_NORESTART; } /* rmnet_map_aggregate() - Software aggregates multiple packets. * @skb: current packet being transmitted * @config: Physical endpoint configuration of the ingress device Loading @@ -226,7 +247,6 @@ static void rmnet_map_flush_packet_queue(struct work_struct *work) void rmnet_map_aggregate(struct sk_buff *skb, struct rmnet_phys_ep_config *config) { u8 *dest_buff; struct agg_work *work; unsigned long flags; struct sk_buff *agg_skb; struct timespec diff, last; Loading Loading @@ -290,7 +310,9 @@ void rmnet_map_aggregate(struct sk_buff *skb, config->agg_skb = 0; config->agg_count = 0; memset(&config->agg_time, 0, sizeof(struct timespec)); config->agg_state = RMNET_MAP_AGG_IDLE; spin_unlock_irqrestore(&config->agg_lock, flags); hrtimer_cancel(&config->hrtimer); LOGL("delta t: %ld.%09lu\tcount: %d", diff.tv_sec, diff.tv_nsec, agg_count); trace_rmnet_map_aggregate(skb, agg_count); Loading @@ -307,19 +329,9 @@ void rmnet_map_aggregate(struct sk_buff *skb, schedule: if (config->agg_state != RMNET_MAP_TXFER_SCHEDULED) { work = kmalloc(sizeof(*work), GFP_ATOMIC); if (!work) { LOGE("Failed to allocate work item for packet %s", "transfer. DATA PATH LIKELY BROKEN!"); config->agg_state = RMNET_MAP_AGG_IDLE; spin_unlock_irqrestore(&config->agg_lock, flags); return; } INIT_DELAYED_WORK((struct delayed_work *)work, rmnet_map_flush_packet_queue); work->config = config; config->agg_state = RMNET_MAP_TXFER_SCHEDULED; schedule_delayed_work((struct delayed_work *)work, 1); hrtimer_start(&config->hrtimer, ns_to_ktime(3000000), HRTIMER_MODE_REL); } spin_unlock_irqrestore(&config->agg_lock, flags); } Loading Loading
net/rmnet_data/rmnet_data_config.c +19 −1 Original line number Diff line number Diff line Loading @@ -25,6 +25,7 @@ #include "rmnet_data_vnd.h" #include "rmnet_data_private.h" #include "rmnet_data_trace.h" #include "rmnet_map.h" RMNET_LOG_MODULE(RMNET_DATA_LOGMASK_CONFIG); Loading Loading @@ -869,7 +870,8 @@ int rmnet_associate_network_device(struct net_device *dev) conf->dev = dev; spin_lock_init(&conf->agg_lock); config->recycle = kfree_skb; hrtimer_init(&conf->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); conf->hrtimer.function = rmnet_map_flush_packet_queue; rc = netdev_rx_handler_register(dev, rmnet_rx_handler, config); if (rc) { Loading Loading @@ -1232,6 +1234,22 @@ static void rmnet_force_unassociate_device(struct net_device *dev) config = _rmnet_get_phys_ep_config(dev); if (config) { unsigned long flags; hrtimer_cancel(&config->hrtimer); spin_lock_irqsave(&config->agg_lock, flags); if (config->agg_state == RMNET_MAP_TXFER_SCHEDULED) { if (config->agg_skb) { kfree_skb(config->agg_skb); config->agg_skb = NULL; config->agg_count = 0; memset(&config->agg_time, 0, sizeof(struct timespec)); } config->agg_state = RMNET_MAP_AGG_IDLE; } spin_unlock_irqrestore(&config->agg_lock, flags); cfg = &config->local_ep; if (cfg && cfg->refcount) Loading
net/rmnet_data/rmnet_data_config.h +2 −0 Original line number Diff line number Diff line Loading @@ -16,6 +16,7 @@ #include <linux/time.h> #include <linux/spinlock.h> #include <net/rmnet_config.h> #include <linux/hrtimer.h> #ifndef _RMNET_DATA_CONFIG_H_ #define _RMNET_DATA_CONFIG_H_ Loading Loading @@ -85,6 +86,7 @@ struct rmnet_phys_ep_config { u8 agg_count; struct timespec agg_time; struct timespec agg_last; struct hrtimer hrtimer; }; int rmnet_config_init(void); Loading
net/rmnet_data/rmnet_data_handlers.c +3 −3 Original line number Diff line number Diff line Loading @@ -734,6 +734,9 @@ void rmnet_egress_handler(struct sk_buff *skb, LOGD("Packet going out on %s with egress format 0x%08X", skb->dev->name, config->egress_data_format); if (ep->rmnet_mode == RMNET_EPMODE_VND) rmnet_vnd_tx_fixup(skb, orig_dev); if (config->egress_data_format & RMNET_EGRESS_FORMAT_MAP) { switch (rmnet_map_egress_handler(skb, config, ep, orig_dev)) { case RMNET_MAP_CONSUMED: Loading @@ -751,9 +754,6 @@ void rmnet_egress_handler(struct sk_buff *skb, } } if (ep->rmnet_mode == RMNET_EPMODE_VND) rmnet_vnd_tx_fixup(skb, orig_dev); rmnet_print_packet(skb, skb->dev->name, 't'); trace_rmnet_egress_handler(skb); rc = dev_queue_xmit(skb); Loading
net/rmnet_data/rmnet_map.h +1 −0 Original line number Diff line number Diff line Loading @@ -147,4 +147,5 @@ int rmnet_map_checksum_uplink_packet(struct sk_buff *skb, struct net_device *orig_dev, u32 egress_data_format); int rmnet_ul_aggregation_skip(struct sk_buff *skb, int offset); enum hrtimer_restart rmnet_map_flush_packet_queue(struct hrtimer *t); #endif /* _RMNET_MAP_H_ */
net/rmnet_data/rmnet_map_data.c +39 −27 Original line number Diff line number Diff line Loading @@ -49,7 +49,7 @@ module_param(agg_bypass_time, long, 0644); MODULE_PARM_DESC(agg_bypass_time, "Skip agg when apart spaced more than this"); struct agg_work { struct delayed_work work; struct work_struct work; struct rmnet_phys_ep_config *config; }; Loading Loading @@ -165,25 +165,18 @@ struct sk_buff *rmnet_map_deaggregate(struct sk_buff *skb, return skbn; } /* rmnet_map_flush_packet_queue() - Transmits aggregeted frame on timeout * @work: struct agg_work containing delayed work and skb to flush * * This function is scheduled to run in a specified number of jiffies after * the last frame transmitted by the network stack. When run, the buffer * containing aggregated packets is finally transmitted on the underlying link. * */ static void rmnet_map_flush_packet_queue(struct work_struct *work) static void rmnet_map_flush_packet_work(struct work_struct *work) { struct agg_work *real_work; struct rmnet_phys_ep_config *config; struct agg_work *real_work; int rc, agg_count = 0; unsigned long flags; struct sk_buff *skb; int rc, agg_count = 0; skb = 0; real_work = (struct agg_work *)work; config = real_work->config; skb = NULL; LOGD("%s", "Entering flush thread"); spin_lock_irqsave(&config->agg_lock, flags); if (likely(config->agg_state == RMNET_MAP_TXFER_SCHEDULED)) { Loading @@ -194,7 +187,7 @@ static void rmnet_map_flush_packet_queue(struct work_struct *work) LOGL("Agg count: %d", config->agg_count); skb = config->agg_skb; agg_count = config->agg_count; config->agg_skb = 0; config->agg_skb = NULL; config->agg_count = 0; memset(&config->agg_time, 0, sizeof(struct timespec)); } Loading @@ -211,9 +204,37 @@ static void rmnet_map_flush_packet_queue(struct work_struct *work) rc = dev_queue_xmit(skb); rmnet_stats_queue_xmit(rc, RMNET_STATS_QUEUE_XMIT_AGG_TIMEOUT); } kfree(work); } /* rmnet_map_flush_packet_queue() - Transmits aggregeted frame on timeout * * This function is scheduled to run in a specified number of ns after * the last frame transmitted by the network stack. When run, the buffer * containing aggregated packets is finally transmitted on the underlying link. * */ enum hrtimer_restart rmnet_map_flush_packet_queue(struct hrtimer *t) { struct rmnet_phys_ep_config *config; struct agg_work *work; config = container_of(t, struct rmnet_phys_ep_config, hrtimer); work = kmalloc(sizeof(*work), GFP_ATOMIC); if (!work) { config->agg_state = RMNET_MAP_AGG_IDLE; return HRTIMER_NORESTART; } INIT_WORK(&work->work, rmnet_map_flush_packet_work); work->config = config; schedule_work((struct work_struct *)work); return HRTIMER_NORESTART; } /* rmnet_map_aggregate() - Software aggregates multiple packets. * @skb: current packet being transmitted * @config: Physical endpoint configuration of the ingress device Loading @@ -226,7 +247,6 @@ static void rmnet_map_flush_packet_queue(struct work_struct *work) void rmnet_map_aggregate(struct sk_buff *skb, struct rmnet_phys_ep_config *config) { u8 *dest_buff; struct agg_work *work; unsigned long flags; struct sk_buff *agg_skb; struct timespec diff, last; Loading Loading @@ -290,7 +310,9 @@ void rmnet_map_aggregate(struct sk_buff *skb, config->agg_skb = 0; config->agg_count = 0; memset(&config->agg_time, 0, sizeof(struct timespec)); config->agg_state = RMNET_MAP_AGG_IDLE; spin_unlock_irqrestore(&config->agg_lock, flags); hrtimer_cancel(&config->hrtimer); LOGL("delta t: %ld.%09lu\tcount: %d", diff.tv_sec, diff.tv_nsec, agg_count); trace_rmnet_map_aggregate(skb, agg_count); Loading @@ -307,19 +329,9 @@ void rmnet_map_aggregate(struct sk_buff *skb, schedule: if (config->agg_state != RMNET_MAP_TXFER_SCHEDULED) { work = kmalloc(sizeof(*work), GFP_ATOMIC); if (!work) { LOGE("Failed to allocate work item for packet %s", "transfer. DATA PATH LIKELY BROKEN!"); config->agg_state = RMNET_MAP_AGG_IDLE; spin_unlock_irqrestore(&config->agg_lock, flags); return; } INIT_DELAYED_WORK((struct delayed_work *)work, rmnet_map_flush_packet_queue); work->config = config; config->agg_state = RMNET_MAP_TXFER_SCHEDULED; schedule_delayed_work((struct delayed_work *)work, 1); hrtimer_start(&config->hrtimer, ns_to_ktime(3000000), HRTIMER_MODE_REL); } spin_unlock_irqrestore(&config->agg_lock, flags); } Loading