Loading drivers/net/ethernet/qualcomm/rmnet/rmnet_config.h +2 −1 Original line number Diff line number Diff line Loading @@ -73,7 +73,8 @@ struct rmnet_port { struct rmnet_port_priv_stats stats; int dl_marker_flush; struct rmnet_descriptor *rmnet_desc; /* Descriptor pool */ spinlock_t desc_pool_lock; struct rmnet_frag_descriptor_pool *frag_desc_pool; }; Loading drivers/net/ethernet/qualcomm/rmnet/rmnet_descriptor.c +18 −21 Original line number Diff line number Diff line Loading @@ -35,6 +35,7 @@ rmnet_get_frag_descriptor(struct rmnet_port *port) struct rmnet_frag_descriptor_pool *pool = port->frag_desc_pool; struct rmnet_frag_descriptor *frag_desc; spin_lock(&port->desc_pool_lock); if (!list_empty(&pool->free_list)) { frag_desc = list_first_entry(&pool->free_list, struct rmnet_frag_descriptor, Loading @@ -43,13 +44,15 @@ rmnet_get_frag_descriptor(struct rmnet_port *port) } else { frag_desc = kzalloc(sizeof(*frag_desc), GFP_ATOMIC); if (!frag_desc) return NULL; goto out; INIT_LIST_HEAD(&frag_desc->list); INIT_LIST_HEAD(&frag_desc->sub_frags); pool->pool_size++; } out: spin_unlock(&port->desc_pool_lock); return frag_desc; } EXPORT_SYMBOL(rmnet_get_frag_descriptor); Loading @@ -65,12 +68,14 @@ void rmnet_recycle_frag_descriptor(struct rmnet_frag_descriptor *frag_desc, memset(frag_desc, 0, sizeof(*frag_desc)); INIT_LIST_HEAD(&frag_desc->list); INIT_LIST_HEAD(&frag_desc->sub_frags); spin_lock(&port->desc_pool_lock); list_add_tail(&frag_desc->list, &pool->free_list); spin_unlock(&port->desc_pool_lock); } EXPORT_SYMBOL(rmnet_recycle_frag_descriptor); void rmnet_descriptor_add_frag(struct rmnet_port *port, struct page *p, u32 page_offset, u32 len) void rmnet_descriptor_add_frag(struct rmnet_port *port, struct list_head *list, struct page *p, u32 page_offset, u32 len) { struct rmnet_frag_descriptor *frag_desc; Loading @@ -79,8 +84,7 @@ void rmnet_descriptor_add_frag(struct rmnet_port *port, struct page *p, return; rmnet_frag_fill(frag_desc, p, page_offset, len); list_add_tail(&frag_desc->list, &port->rmnet_desc->frags); port->rmnet_desc->nr_frags++; list_add_tail(&frag_desc->list, list); } EXPORT_SYMBOL(rmnet_descriptor_add_frag); Loading Loading @@ -310,7 +314,8 @@ int rmnet_frag_flow_command(struct rmnet_map_header *qmap, } EXPORT_SYMBOL(rmnet_frag_flow_command); void rmnet_frag_deaggregate(skb_frag_t *frag, struct rmnet_port *port) void rmnet_frag_deaggregate(skb_frag_t *frag, struct rmnet_port *port, struct list_head *list) { struct rmnet_map_header *maph; u8 *data = skb_frag_address(frag); Loading Loading @@ -352,7 +357,7 @@ void rmnet_frag_deaggregate(skb_frag_t *frag, struct rmnet_port *port) if ((int)skb_frag_size(frag) - (int)packet_len < 0) return; rmnet_descriptor_add_frag(port, skb_frag_page(frag), rmnet_descriptor_add_frag(port, list, skb_frag_page(frag), frag->page_offset + offset, packet_len); Loading Loading @@ -1000,6 +1005,7 @@ void rmnet_frag_ingress_handler(struct sk_buff *skb, struct rmnet_port *port) { rmnet_perf_chain_hook_t rmnet_perf_opt_chain_end; LIST_HEAD(desc_list); /* Deaggregation and freeing of HW originating * buffers is done within here Loading @@ -1007,19 +1013,18 @@ void rmnet_frag_ingress_handler(struct sk_buff *skb, while (skb) { struct sk_buff *skb_frag; rmnet_frag_deaggregate(skb_shinfo(skb)->frags, port); if (port->rmnet_desc->nr_frags) { rmnet_frag_deaggregate(skb_shinfo(skb)->frags, port, &desc_list); if (!list_empty(&desc_list)) { struct rmnet_frag_descriptor *frag_desc, *tmp; list_for_each_entry_safe(frag_desc, tmp, &port->rmnet_desc->frags, list_for_each_entry_safe(frag_desc, tmp, &desc_list, list) { list_del_init(&frag_desc->list); __rmnet_frag_ingress_handler(frag_desc, port); } } port->rmnet_desc->nr_frags = 0; skb_frag = skb_shinfo(skb)->frag_list; skb_shinfo(skb)->frag_list = NULL; consume_skb(skb); Loading @@ -1046,22 +1051,14 @@ void rmnet_descriptor_deinit(struct rmnet_port *port) } kfree(pool); kfree(port->rmnet_desc); } int rmnet_descriptor_init(struct rmnet_port *port) { struct rmnet_descriptor *rmnet_desc; struct rmnet_frag_descriptor_pool *pool; int i; rmnet_desc = kzalloc(sizeof(*rmnet_desc), GFP_ATOMIC); if (!rmnet_desc) return -ENOMEM; INIT_LIST_HEAD(&rmnet_desc->frags); port->rmnet_desc = rmnet_desc; spin_lock_init(&port->desc_pool_lock); pool = kzalloc(sizeof(*pool), GFP_ATOMIC); if (!pool) return -ENOMEM; Loading drivers/net/ethernet/qualcomm/rmnet/rmnet_descriptor.h +4 −8 Original line number Diff line number Diff line Loading @@ -44,18 +44,13 @@ struct rmnet_frag_descriptor { reserved:3; }; struct rmnet_descriptor { struct list_head frags; u8 nr_frags; }; /* Descriptor management */ struct rmnet_frag_descriptor * rmnet_get_frag_descriptor(struct rmnet_port *port); void rmnet_recycle_frag_descriptor(struct rmnet_frag_descriptor *frag_desc, struct rmnet_port *port); void rmnet_descriptor_add_frag(struct rmnet_port *port, struct page *p, u32 page_offset, u32 len); void rmnet_descriptor_add_frag(struct rmnet_port *port, struct list_head *list, struct page *p, u32 page_offset, u32 len); int rmnet_frag_ipv6_skip_exthdr(struct rmnet_frag_descriptor *frag_desc, int start, u8 *nexthdrp, __be16 *fragp); Loading @@ -65,7 +60,8 @@ int rmnet_frag_flow_command(struct rmnet_map_header *qmap, struct rmnet_port *port, u16 pkt_len); /* Ingress data handlers */ void rmnet_frag_deaggregate(skb_frag_t *frag, struct rmnet_port *port); void rmnet_frag_deaggregate(skb_frag_t *frag, struct rmnet_port *port, struct list_head *list); void rmnet_frag_deliver(struct rmnet_frag_descriptor *frag_desc, struct rmnet_port *port); int rmnet_frag_process_next_hdr_packet(struct rmnet_frag_descriptor *frag_desc, Loading Loading
drivers/net/ethernet/qualcomm/rmnet/rmnet_config.h +2 −1 Original line number Diff line number Diff line Loading @@ -73,7 +73,8 @@ struct rmnet_port { struct rmnet_port_priv_stats stats; int dl_marker_flush; struct rmnet_descriptor *rmnet_desc; /* Descriptor pool */ spinlock_t desc_pool_lock; struct rmnet_frag_descriptor_pool *frag_desc_pool; }; Loading
drivers/net/ethernet/qualcomm/rmnet/rmnet_descriptor.c +18 −21 Original line number Diff line number Diff line Loading @@ -35,6 +35,7 @@ rmnet_get_frag_descriptor(struct rmnet_port *port) struct rmnet_frag_descriptor_pool *pool = port->frag_desc_pool; struct rmnet_frag_descriptor *frag_desc; spin_lock(&port->desc_pool_lock); if (!list_empty(&pool->free_list)) { frag_desc = list_first_entry(&pool->free_list, struct rmnet_frag_descriptor, Loading @@ -43,13 +44,15 @@ rmnet_get_frag_descriptor(struct rmnet_port *port) } else { frag_desc = kzalloc(sizeof(*frag_desc), GFP_ATOMIC); if (!frag_desc) return NULL; goto out; INIT_LIST_HEAD(&frag_desc->list); INIT_LIST_HEAD(&frag_desc->sub_frags); pool->pool_size++; } out: spin_unlock(&port->desc_pool_lock); return frag_desc; } EXPORT_SYMBOL(rmnet_get_frag_descriptor); Loading @@ -65,12 +68,14 @@ void rmnet_recycle_frag_descriptor(struct rmnet_frag_descriptor *frag_desc, memset(frag_desc, 0, sizeof(*frag_desc)); INIT_LIST_HEAD(&frag_desc->list); INIT_LIST_HEAD(&frag_desc->sub_frags); spin_lock(&port->desc_pool_lock); list_add_tail(&frag_desc->list, &pool->free_list); spin_unlock(&port->desc_pool_lock); } EXPORT_SYMBOL(rmnet_recycle_frag_descriptor); void rmnet_descriptor_add_frag(struct rmnet_port *port, struct page *p, u32 page_offset, u32 len) void rmnet_descriptor_add_frag(struct rmnet_port *port, struct list_head *list, struct page *p, u32 page_offset, u32 len) { struct rmnet_frag_descriptor *frag_desc; Loading @@ -79,8 +84,7 @@ void rmnet_descriptor_add_frag(struct rmnet_port *port, struct page *p, return; rmnet_frag_fill(frag_desc, p, page_offset, len); list_add_tail(&frag_desc->list, &port->rmnet_desc->frags); port->rmnet_desc->nr_frags++; list_add_tail(&frag_desc->list, list); } EXPORT_SYMBOL(rmnet_descriptor_add_frag); Loading Loading @@ -310,7 +314,8 @@ int rmnet_frag_flow_command(struct rmnet_map_header *qmap, } EXPORT_SYMBOL(rmnet_frag_flow_command); void rmnet_frag_deaggregate(skb_frag_t *frag, struct rmnet_port *port) void rmnet_frag_deaggregate(skb_frag_t *frag, struct rmnet_port *port, struct list_head *list) { struct rmnet_map_header *maph; u8 *data = skb_frag_address(frag); Loading Loading @@ -352,7 +357,7 @@ void rmnet_frag_deaggregate(skb_frag_t *frag, struct rmnet_port *port) if ((int)skb_frag_size(frag) - (int)packet_len < 0) return; rmnet_descriptor_add_frag(port, skb_frag_page(frag), rmnet_descriptor_add_frag(port, list, skb_frag_page(frag), frag->page_offset + offset, packet_len); Loading Loading @@ -1000,6 +1005,7 @@ void rmnet_frag_ingress_handler(struct sk_buff *skb, struct rmnet_port *port) { rmnet_perf_chain_hook_t rmnet_perf_opt_chain_end; LIST_HEAD(desc_list); /* Deaggregation and freeing of HW originating * buffers is done within here Loading @@ -1007,19 +1013,18 @@ void rmnet_frag_ingress_handler(struct sk_buff *skb, while (skb) { struct sk_buff *skb_frag; rmnet_frag_deaggregate(skb_shinfo(skb)->frags, port); if (port->rmnet_desc->nr_frags) { rmnet_frag_deaggregate(skb_shinfo(skb)->frags, port, &desc_list); if (!list_empty(&desc_list)) { struct rmnet_frag_descriptor *frag_desc, *tmp; list_for_each_entry_safe(frag_desc, tmp, &port->rmnet_desc->frags, list_for_each_entry_safe(frag_desc, tmp, &desc_list, list) { list_del_init(&frag_desc->list); __rmnet_frag_ingress_handler(frag_desc, port); } } port->rmnet_desc->nr_frags = 0; skb_frag = skb_shinfo(skb)->frag_list; skb_shinfo(skb)->frag_list = NULL; consume_skb(skb); Loading @@ -1046,22 +1051,14 @@ void rmnet_descriptor_deinit(struct rmnet_port *port) } kfree(pool); kfree(port->rmnet_desc); } int rmnet_descriptor_init(struct rmnet_port *port) { struct rmnet_descriptor *rmnet_desc; struct rmnet_frag_descriptor_pool *pool; int i; rmnet_desc = kzalloc(sizeof(*rmnet_desc), GFP_ATOMIC); if (!rmnet_desc) return -ENOMEM; INIT_LIST_HEAD(&rmnet_desc->frags); port->rmnet_desc = rmnet_desc; spin_lock_init(&port->desc_pool_lock); pool = kzalloc(sizeof(*pool), GFP_ATOMIC); if (!pool) return -ENOMEM; Loading
drivers/net/ethernet/qualcomm/rmnet/rmnet_descriptor.h +4 −8 Original line number Diff line number Diff line Loading @@ -44,18 +44,13 @@ struct rmnet_frag_descriptor { reserved:3; }; struct rmnet_descriptor { struct list_head frags; u8 nr_frags; }; /* Descriptor management */ struct rmnet_frag_descriptor * rmnet_get_frag_descriptor(struct rmnet_port *port); void rmnet_recycle_frag_descriptor(struct rmnet_frag_descriptor *frag_desc, struct rmnet_port *port); void rmnet_descriptor_add_frag(struct rmnet_port *port, struct page *p, u32 page_offset, u32 len); void rmnet_descriptor_add_frag(struct rmnet_port *port, struct list_head *list, struct page *p, u32 page_offset, u32 len); int rmnet_frag_ipv6_skip_exthdr(struct rmnet_frag_descriptor *frag_desc, int start, u8 *nexthdrp, __be16 *fragp); Loading @@ -65,7 +60,8 @@ int rmnet_frag_flow_command(struct rmnet_map_header *qmap, struct rmnet_port *port, u16 pkt_len); /* Ingress data handlers */ void rmnet_frag_deaggregate(skb_frag_t *frag, struct rmnet_port *port); void rmnet_frag_deaggregate(skb_frag_t *frag, struct rmnet_port *port, struct list_head *list); void rmnet_frag_deliver(struct rmnet_frag_descriptor *frag_desc, struct rmnet_port *port); int rmnet_frag_process_next_hdr_packet(struct rmnet_frag_descriptor *frag_desc, Loading