Loading drivers/rmnet/shs/rmnet_shs_config.c +1 −3 Original line number Diff line number Diff line Loading @@ -41,11 +41,9 @@ static int rmnet_shs_dev_notify_cb(struct notifier_block *nb, static struct notifier_block rmnet_shs_dev_notifier __read_mostly = { .notifier_call = rmnet_shs_dev_notify_cb, .priority = 2, }; static int rmnet_shs_dev_notify_cb(struct notifier_block *nb, unsigned long event, void *data); static int rmnet_vnd_total; /* Enable smart hashing capability upon call to initialize module*/ int __init rmnet_shs_module_init(void) Loading drivers/rmnet/shs/rmnet_shs_wq.c +19 −8 Original line number Diff line number Diff line Loading @@ -149,8 +149,8 @@ unsigned long long rmnet_shs_flow_rx_pps[MAX_SUPPORTED_FLOWS_DEBUG]; module_param_array(rmnet_shs_flow_rx_pps, ullong, 0, 0444); MODULE_PARM_DESC(rmnet_shs_flow_rx_pps, "SHS stamp pkt enq rate per flow"); static spinlock_t rmnet_shs_wq_splock; static DEFINE_SPINLOCK(rmnet_shs_hstat_tbl_lock); static DEFINE_SPINLOCK(rmnet_shs_ep_lock); static time_t rmnet_shs_wq_tnsec; static struct workqueue_struct *rmnet_shs_wq; Loading Loading @@ -726,7 +726,7 @@ void rmnet_shs_wq_update_cpu_rx_tbl(struct rmnet_shs_wq_hstat_s *hstat_p) } static void rmnet_shs_wq_chng_suggested_cpu(u16 old_cpu, u16 new_cpu, void rmnet_shs_wq_chng_suggested_cpu(u16 old_cpu, u16 new_cpu, struct rmnet_shs_wq_ep_s *ep) { struct rmnet_shs_skbn_s *node_p; Loading Loading @@ -785,7 +785,6 @@ u32 rmnet_shs_wq_get_dev_rps_msk(struct net_device *dev) if (ep->ep->egress_dev == dev) dev_rps_msk = ep->rps_config_msk; } return dev_rps_msk; } Loading Loading @@ -1250,13 +1249,14 @@ void rmnet_shs_wq_cleanup_hash_tbl(u8 force_clean) void rmnet_shs_wq_update_ep_rps_msk(struct rmnet_shs_wq_ep_s *ep) { u8 len = 0; struct rps_map *map; u8 len = 0; if (!ep) { if (!ep || !ep->ep || !ep->ep->egress_dev) { rmnet_shs_crit_err[RMNET_SHS_WQ_EP_ACCESS_ERR]++; return; } rcu_read_lock(); map = rcu_dereference(ep->ep->egress_dev->_rx->rps_map); ep->rps_config_msk = 0; Loading @@ -1265,6 +1265,7 @@ void rmnet_shs_wq_update_ep_rps_msk(struct rmnet_shs_wq_ep_s *ep) ep->rps_config_msk |= (1 << map->cpus[len]); } rcu_read_unlock(); ep->default_core_msk = ep->rps_config_msk & 0x0F; ep->pri_core_msk = ep->rps_config_msk & 0xF0; } Loading @@ -1272,7 +1273,9 @@ void rmnet_shs_wq_update_ep_rps_msk(struct rmnet_shs_wq_ep_s *ep) void rmnet_shs_wq_reset_ep_active(struct net_device *dev) { struct rmnet_shs_wq_ep_s *ep = NULL; unsigned long flags; spin_lock_irqsave(&rmnet_shs_ep_lock, flags); list_for_each_entry(ep, &rmnet_shs_wq_ep_tbl, ep_list_id) { if (!ep) continue; Loading @@ -1282,13 +1285,16 @@ void rmnet_shs_wq_reset_ep_active(struct net_device *dev) ep->netdev = NULL; } } spin_unlock_irqrestore(&rmnet_shs_ep_lock, flags); } void rmnet_shs_wq_set_ep_active(struct net_device *dev) { struct rmnet_shs_wq_ep_s *ep = NULL; unsigned long flags; spin_lock_irqsave(&rmnet_shs_ep_lock, flags); list_for_each_entry(ep, &rmnet_shs_wq_ep_tbl, ep_list_id) { if (!ep) continue; Loading @@ -1299,7 +1305,7 @@ void rmnet_shs_wq_set_ep_active(struct net_device *dev) } } spin_unlock_irqrestore(&rmnet_shs_ep_lock, flags); } void rmnet_shs_wq_refresh_ep_masks(void) Loading Loading @@ -1336,7 +1342,7 @@ void rmnet_shs_update_cfg_mask(void) rmnet_shs_cfg.map_len = rmnet_shs_get_mask_len(mask); } static void rmnet_shs_wq_update_stats(void) void rmnet_shs_wq_update_stats(void) { struct timespec time; struct rmnet_shs_wq_hstat_s *hnode = NULL; Loading Loading @@ -1369,10 +1375,16 @@ static void rmnet_shs_wq_update_stats(void) void rmnet_shs_wq_process_wq(struct work_struct *work) { unsigned long flags; trace_rmnet_shs_wq_high(RMNET_SHS_WQ_PROCESS_WQ, RMNET_SHS_WQ_PROCESS_WQ_START, 0xDEF, 0xDEF, 0xDEF, 0xDEF, NULL, NULL); spin_lock_irqsave(&rmnet_shs_ep_lock, flags); rmnet_shs_wq_update_stats(); spin_unlock_irqrestore(&rmnet_shs_ep_lock, flags); queue_delayed_work(rmnet_shs_wq, &rmnet_shs_delayed_wq->wq, rmnet_shs_wq_frequency); Loading Loading @@ -1491,7 +1503,6 @@ void rmnet_shs_wq_init(struct net_device *dev) trace_rmnet_shs_wq_high(RMNET_SHS_WQ_INIT, RMNET_SHS_WQ_INIT_START, 0xDEF, 0xDEF, 0xDEF, 0xDEF, NULL, NULL); spin_lock_init(&rmnet_shs_wq_splock); rmnet_shs_wq = alloc_workqueue("rmnet_shs_wq", WQ_MEM_RECLAIM | WQ_CPU_INTENSIVE, 1); if (!rmnet_shs_wq) { Loading Loading
drivers/rmnet/shs/rmnet_shs_config.c +1 −3 Original line number Diff line number Diff line Loading @@ -41,11 +41,9 @@ static int rmnet_shs_dev_notify_cb(struct notifier_block *nb, static struct notifier_block rmnet_shs_dev_notifier __read_mostly = { .notifier_call = rmnet_shs_dev_notify_cb, .priority = 2, }; static int rmnet_shs_dev_notify_cb(struct notifier_block *nb, unsigned long event, void *data); static int rmnet_vnd_total; /* Enable smart hashing capability upon call to initialize module*/ int __init rmnet_shs_module_init(void) Loading
drivers/rmnet/shs/rmnet_shs_wq.c +19 −8 Original line number Diff line number Diff line Loading @@ -149,8 +149,8 @@ unsigned long long rmnet_shs_flow_rx_pps[MAX_SUPPORTED_FLOWS_DEBUG]; module_param_array(rmnet_shs_flow_rx_pps, ullong, 0, 0444); MODULE_PARM_DESC(rmnet_shs_flow_rx_pps, "SHS stamp pkt enq rate per flow"); static spinlock_t rmnet_shs_wq_splock; static DEFINE_SPINLOCK(rmnet_shs_hstat_tbl_lock); static DEFINE_SPINLOCK(rmnet_shs_ep_lock); static time_t rmnet_shs_wq_tnsec; static struct workqueue_struct *rmnet_shs_wq; Loading Loading @@ -726,7 +726,7 @@ void rmnet_shs_wq_update_cpu_rx_tbl(struct rmnet_shs_wq_hstat_s *hstat_p) } static void rmnet_shs_wq_chng_suggested_cpu(u16 old_cpu, u16 new_cpu, void rmnet_shs_wq_chng_suggested_cpu(u16 old_cpu, u16 new_cpu, struct rmnet_shs_wq_ep_s *ep) { struct rmnet_shs_skbn_s *node_p; Loading Loading @@ -785,7 +785,6 @@ u32 rmnet_shs_wq_get_dev_rps_msk(struct net_device *dev) if (ep->ep->egress_dev == dev) dev_rps_msk = ep->rps_config_msk; } return dev_rps_msk; } Loading Loading @@ -1250,13 +1249,14 @@ void rmnet_shs_wq_cleanup_hash_tbl(u8 force_clean) void rmnet_shs_wq_update_ep_rps_msk(struct rmnet_shs_wq_ep_s *ep) { u8 len = 0; struct rps_map *map; u8 len = 0; if (!ep) { if (!ep || !ep->ep || !ep->ep->egress_dev) { rmnet_shs_crit_err[RMNET_SHS_WQ_EP_ACCESS_ERR]++; return; } rcu_read_lock(); map = rcu_dereference(ep->ep->egress_dev->_rx->rps_map); ep->rps_config_msk = 0; Loading @@ -1265,6 +1265,7 @@ void rmnet_shs_wq_update_ep_rps_msk(struct rmnet_shs_wq_ep_s *ep) ep->rps_config_msk |= (1 << map->cpus[len]); } rcu_read_unlock(); ep->default_core_msk = ep->rps_config_msk & 0x0F; ep->pri_core_msk = ep->rps_config_msk & 0xF0; } Loading @@ -1272,7 +1273,9 @@ void rmnet_shs_wq_update_ep_rps_msk(struct rmnet_shs_wq_ep_s *ep) void rmnet_shs_wq_reset_ep_active(struct net_device *dev) { struct rmnet_shs_wq_ep_s *ep = NULL; unsigned long flags; spin_lock_irqsave(&rmnet_shs_ep_lock, flags); list_for_each_entry(ep, &rmnet_shs_wq_ep_tbl, ep_list_id) { if (!ep) continue; Loading @@ -1282,13 +1285,16 @@ void rmnet_shs_wq_reset_ep_active(struct net_device *dev) ep->netdev = NULL; } } spin_unlock_irqrestore(&rmnet_shs_ep_lock, flags); } void rmnet_shs_wq_set_ep_active(struct net_device *dev) { struct rmnet_shs_wq_ep_s *ep = NULL; unsigned long flags; spin_lock_irqsave(&rmnet_shs_ep_lock, flags); list_for_each_entry(ep, &rmnet_shs_wq_ep_tbl, ep_list_id) { if (!ep) continue; Loading @@ -1299,7 +1305,7 @@ void rmnet_shs_wq_set_ep_active(struct net_device *dev) } } spin_unlock_irqrestore(&rmnet_shs_ep_lock, flags); } void rmnet_shs_wq_refresh_ep_masks(void) Loading Loading @@ -1336,7 +1342,7 @@ void rmnet_shs_update_cfg_mask(void) rmnet_shs_cfg.map_len = rmnet_shs_get_mask_len(mask); } static void rmnet_shs_wq_update_stats(void) void rmnet_shs_wq_update_stats(void) { struct timespec time; struct rmnet_shs_wq_hstat_s *hnode = NULL; Loading Loading @@ -1369,10 +1375,16 @@ static void rmnet_shs_wq_update_stats(void) void rmnet_shs_wq_process_wq(struct work_struct *work) { unsigned long flags; trace_rmnet_shs_wq_high(RMNET_SHS_WQ_PROCESS_WQ, RMNET_SHS_WQ_PROCESS_WQ_START, 0xDEF, 0xDEF, 0xDEF, 0xDEF, NULL, NULL); spin_lock_irqsave(&rmnet_shs_ep_lock, flags); rmnet_shs_wq_update_stats(); spin_unlock_irqrestore(&rmnet_shs_ep_lock, flags); queue_delayed_work(rmnet_shs_wq, &rmnet_shs_delayed_wq->wq, rmnet_shs_wq_frequency); Loading Loading @@ -1491,7 +1503,6 @@ void rmnet_shs_wq_init(struct net_device *dev) trace_rmnet_shs_wq_high(RMNET_SHS_WQ_INIT, RMNET_SHS_WQ_INIT_START, 0xDEF, 0xDEF, 0xDEF, 0xDEF, NULL, NULL); spin_lock_init(&rmnet_shs_wq_splock); rmnet_shs_wq = alloc_workqueue("rmnet_shs_wq", WQ_MEM_RECLAIM | WQ_CPU_INTENSIVE, 1); if (!rmnet_shs_wq) { Loading