Loading net/rmnet_data/rmnet_data_config.c +58 −0 Original line number Diff line number Diff line Loading @@ -45,6 +45,11 @@ static struct notifier_block rmnet_dev_notifier = { #define RMNET_NL_MSG_SIZE(Y) (sizeof(((struct rmnet_nl_msg_s *)0)->Y)) struct rmnet_free_vnd_work { struct work_struct work; int vnd_id; }; /* ***************** Init and Cleanup *************************************** */ #ifdef RMNET_KERNEL_PRE_3_8 Loading Loading @@ -966,6 +971,36 @@ int rmnet_free_vnd(int id) return rmnet_vnd_free_dev(id); } static void _rmnet_free_vnd_later(struct work_struct *work) { struct rmnet_free_vnd_work *fwork; fwork = (struct rmnet_free_vnd_work *) work; rmnet_free_vnd(fwork->vnd_id); kfree(work); } /** * rmnet_free_vnd_later() - Schedule a work item to free virtual network device * @id: RmNet virtual device node id * * Schedule the VND to be freed at a later time. We need to do this if the * rtnl lock is already held as to prevent a deadlock. */ static void rmnet_free_vnd_later(int id) { struct rmnet_free_vnd_work *work; LOGL("(%d);", id); work = (struct rmnet_free_vnd_work *) kmalloc(sizeof(struct rmnet_free_vnd_work), GFP_KERNEL); if (!work) { LOGH("Failed to queue removal of VND:%d", id); return; } INIT_WORK((struct work_struct *)work, _rmnet_free_vnd_later); work->vnd_id = id; schedule_work((struct work_struct *)work); } /** * rmnet_force_unassociate_device() - Force a device to unassociate * @dev: Device to unassociate Loading @@ -976,6 +1011,8 @@ int rmnet_free_vnd(int id) static void rmnet_force_unassociate_device(struct net_device *dev) { int i; struct net_device *vndev; struct rmnet_logical_ep_conf_s *cfg; if (!dev) BUG(); Loading @@ -985,6 +1022,27 @@ static void rmnet_force_unassociate_device(struct net_device *dev) return; } /* Check the VNDs for offending mappings */ for (i = 0; i < RMNET_DATA_MAX_VND; i++) { vndev = rmnet_vnd_get_by_id(i); if (!vndev) { LOGL("VND %d not in use; skipping", i); continue; } cfg = rmnet_vnd_get_le_config(vndev); if (!cfg) { LOGH("Got NULL config from VND %d", i); BUG(); continue; } if (cfg->refcount && (cfg->egress_dev == dev)) { rmnet_unset_logical_endpoint_config(vndev, RMNET_LOCAL_LOGICAL_ENDPOINT); rmnet_free_vnd_later(i); } } /* Clear on the mappings on the phys ep */ rmnet_unset_logical_endpoint_config(dev, RMNET_LOCAL_LOGICAL_ENDPOINT); for (i = 0; i < RMNET_DATA_MAX_LOGICAL_EP; i++) rmnet_unset_logical_endpoint_config(dev, i); Loading net/rmnet_data/rmnet_data_vnd.c +34 −6 Original line number Diff line number Diff line Loading @@ -611,22 +611,32 @@ int rmnet_vnd_create_dev(int id, struct net_device **new_device, int rmnet_vnd_free_dev(int id) { struct rmnet_logical_ep_conf_s *epconfig_l; struct net_device *dev; rtnl_lock(); if ((id < 0) || (id >= RMNET_DATA_MAX_VND) || !rmnet_devices[id]) { rtnl_unlock(); LOGM("Invalid id [%d]", id); return RMNET_CONFIG_NO_SUCH_DEVICE; } epconfig_l = rmnet_vnd_get_le_config(rmnet_devices[id]); if (epconfig_l && epconfig_l->refcount) if (epconfig_l && epconfig_l->refcount) { rtnl_unlock(); return RMNET_CONFIG_DEVICE_IN_USE; } unregister_netdev(rmnet_devices[id]); free_netdev(rmnet_devices[id]); rtnl_lock(); dev = rmnet_devices[id]; rmnet_devices[id] = 0; rtnl_unlock(); if (dev) { unregister_netdev(dev); free_netdev(dev); return 0; } else { return RMNET_CONFIG_NO_SUCH_DEVICE; } } /** Loading Loading @@ -1024,3 +1034,21 @@ fcdone: return error; } /** * rmnet_vnd_get_by_id() - Get VND by array index ID * @id: Virtual network deice id [0:RMNET_DATA_MAX_VND] * * Return: * - 0 if no device or ID out of range * - otherwise return pointer to VND net_device struct */ struct net_device *rmnet_vnd_get_by_id(int id) { if (id < 0 || id >= RMNET_DATA_MAX_VND) { pr_err("Bug; VND ID out of bounds"); BUG(); return 0; } return rmnet_devices[id]; } net/rmnet_data/rmnet_data_vnd.h +1 −0 Original line number Diff line number Diff line Loading @@ -36,5 +36,6 @@ int rmnet_vnd_add_tc_flow(uint32_t id, uint32_t map_flow, uint32_t tc_flow); int rmnet_vnd_del_tc_flow(uint32_t id, uint32_t map_flow, uint32_t tc_flow); int rmnet_vnd_init(void); void rmnet_vnd_exit(void); struct net_device *rmnet_vnd_get_by_id(int id); #endif /* _RMNET_DATA_VND_H_ */ Loading
net/rmnet_data/rmnet_data_config.c +58 −0 Original line number Diff line number Diff line Loading @@ -45,6 +45,11 @@ static struct notifier_block rmnet_dev_notifier = { #define RMNET_NL_MSG_SIZE(Y) (sizeof(((struct rmnet_nl_msg_s *)0)->Y)) struct rmnet_free_vnd_work { struct work_struct work; int vnd_id; }; /* ***************** Init and Cleanup *************************************** */ #ifdef RMNET_KERNEL_PRE_3_8 Loading Loading @@ -966,6 +971,36 @@ int rmnet_free_vnd(int id) return rmnet_vnd_free_dev(id); } static void _rmnet_free_vnd_later(struct work_struct *work) { struct rmnet_free_vnd_work *fwork; fwork = (struct rmnet_free_vnd_work *) work; rmnet_free_vnd(fwork->vnd_id); kfree(work); } /** * rmnet_free_vnd_later() - Schedule a work item to free virtual network device * @id: RmNet virtual device node id * * Schedule the VND to be freed at a later time. We need to do this if the * rtnl lock is already held as to prevent a deadlock. */ static void rmnet_free_vnd_later(int id) { struct rmnet_free_vnd_work *work; LOGL("(%d);", id); work = (struct rmnet_free_vnd_work *) kmalloc(sizeof(struct rmnet_free_vnd_work), GFP_KERNEL); if (!work) { LOGH("Failed to queue removal of VND:%d", id); return; } INIT_WORK((struct work_struct *)work, _rmnet_free_vnd_later); work->vnd_id = id; schedule_work((struct work_struct *)work); } /** * rmnet_force_unassociate_device() - Force a device to unassociate * @dev: Device to unassociate Loading @@ -976,6 +1011,8 @@ int rmnet_free_vnd(int id) static void rmnet_force_unassociate_device(struct net_device *dev) { int i; struct net_device *vndev; struct rmnet_logical_ep_conf_s *cfg; if (!dev) BUG(); Loading @@ -985,6 +1022,27 @@ static void rmnet_force_unassociate_device(struct net_device *dev) return; } /* Check the VNDs for offending mappings */ for (i = 0; i < RMNET_DATA_MAX_VND; i++) { vndev = rmnet_vnd_get_by_id(i); if (!vndev) { LOGL("VND %d not in use; skipping", i); continue; } cfg = rmnet_vnd_get_le_config(vndev); if (!cfg) { LOGH("Got NULL config from VND %d", i); BUG(); continue; } if (cfg->refcount && (cfg->egress_dev == dev)) { rmnet_unset_logical_endpoint_config(vndev, RMNET_LOCAL_LOGICAL_ENDPOINT); rmnet_free_vnd_later(i); } } /* Clear on the mappings on the phys ep */ rmnet_unset_logical_endpoint_config(dev, RMNET_LOCAL_LOGICAL_ENDPOINT); for (i = 0; i < RMNET_DATA_MAX_LOGICAL_EP; i++) rmnet_unset_logical_endpoint_config(dev, i); Loading
net/rmnet_data/rmnet_data_vnd.c +34 −6 Original line number Diff line number Diff line Loading @@ -611,22 +611,32 @@ int rmnet_vnd_create_dev(int id, struct net_device **new_device, int rmnet_vnd_free_dev(int id) { struct rmnet_logical_ep_conf_s *epconfig_l; struct net_device *dev; rtnl_lock(); if ((id < 0) || (id >= RMNET_DATA_MAX_VND) || !rmnet_devices[id]) { rtnl_unlock(); LOGM("Invalid id [%d]", id); return RMNET_CONFIG_NO_SUCH_DEVICE; } epconfig_l = rmnet_vnd_get_le_config(rmnet_devices[id]); if (epconfig_l && epconfig_l->refcount) if (epconfig_l && epconfig_l->refcount) { rtnl_unlock(); return RMNET_CONFIG_DEVICE_IN_USE; } unregister_netdev(rmnet_devices[id]); free_netdev(rmnet_devices[id]); rtnl_lock(); dev = rmnet_devices[id]; rmnet_devices[id] = 0; rtnl_unlock(); if (dev) { unregister_netdev(dev); free_netdev(dev); return 0; } else { return RMNET_CONFIG_NO_SUCH_DEVICE; } } /** Loading Loading @@ -1024,3 +1034,21 @@ fcdone: return error; } /** * rmnet_vnd_get_by_id() - Get VND by array index ID * @id: Virtual network deice id [0:RMNET_DATA_MAX_VND] * * Return: * - 0 if no device or ID out of range * - otherwise return pointer to VND net_device struct */ struct net_device *rmnet_vnd_get_by_id(int id) { if (id < 0 || id >= RMNET_DATA_MAX_VND) { pr_err("Bug; VND ID out of bounds"); BUG(); return 0; } return rmnet_devices[id]; }
net/rmnet_data/rmnet_data_vnd.h +1 −0 Original line number Diff line number Diff line Loading @@ -36,5 +36,6 @@ int rmnet_vnd_add_tc_flow(uint32_t id, uint32_t map_flow, uint32_t tc_flow); int rmnet_vnd_del_tc_flow(uint32_t id, uint32_t map_flow, uint32_t tc_flow); int rmnet_vnd_init(void); void rmnet_vnd_exit(void); struct net_device *rmnet_vnd_get_by_id(int id); #endif /* _RMNET_DATA_VND_H_ */