Loading drivers/net/e1000/e1000_main.c +1 −0 Original line number Original line Diff line number Diff line Loading @@ -3789,6 +3789,7 @@ e1000_netpoll(struct net_device *netdev) struct e1000_adapter *adapter = netdev_priv(netdev); struct e1000_adapter *adapter = netdev_priv(netdev); disable_irq(adapter->pdev->irq); disable_irq(adapter->pdev->irq); e1000_intr(adapter->pdev->irq, netdev, NULL); e1000_intr(adapter->pdev->irq, netdev, NULL); e1000_clean_tx_irq(adapter); enable_irq(adapter->pdev->irq); enable_irq(adapter->pdev->irq); } } #endif #endif Loading include/linux/netpoll.h +14 −6 Original line number Original line Diff line number Diff line Loading @@ -9,6 +9,7 @@ #include <linux/netdevice.h> #include <linux/netdevice.h> #include <linux/interrupt.h> #include <linux/interrupt.h> #include <linux/rcupdate.h> #include <linux/list.h> #include <linux/list.h> struct netpoll; struct netpoll; Loading @@ -26,6 +27,7 @@ struct netpoll { struct netpoll_info { struct netpoll_info { spinlock_t poll_lock; spinlock_t poll_lock; int poll_owner; int poll_owner; int tries; int rx_flags; int rx_flags; spinlock_t rx_lock; spinlock_t rx_lock; struct netpoll *rx_np; /* netpoll that registered an rx_hook */ struct netpoll *rx_np; /* netpoll that registered an rx_hook */ Loading Loading @@ -60,25 +62,31 @@ static inline int netpoll_rx(struct sk_buff *skb) return ret; return ret; } } static inline void netpoll_poll_lock(struct net_device *dev) static inline void *netpoll_poll_lock(struct net_device *dev) { { rcu_read_lock(); /* deal with race on ->npinfo */ if (dev->npinfo) { if (dev->npinfo) { spin_lock(&dev->npinfo->poll_lock); spin_lock(&dev->npinfo->poll_lock); dev->npinfo->poll_owner = smp_processor_id(); dev->npinfo->poll_owner = smp_processor_id(); return dev->npinfo; } } return NULL; } } static inline void netpoll_poll_unlock(struct net_device *dev) static inline void netpoll_poll_unlock(void *have) { { if (dev->npinfo) { struct netpoll_info *npi = have; dev->npinfo->poll_owner = -1; spin_unlock(&dev->npinfo->poll_lock); if (npi) { npi->poll_owner = -1; spin_unlock(&npi->poll_lock); } } rcu_read_unlock(); } } #else #else #define netpoll_rx(a) 0 #define netpoll_rx(a) 0 #define netpoll_poll_lock(a) #define netpoll_poll_lock(a) 0 #define netpoll_poll_unlock(a) #define netpoll_poll_unlock(a) #endif #endif Loading include/linux/skbuff.h +1 −1 Original line number Original line Diff line number Diff line Loading @@ -255,7 +255,7 @@ struct sk_buff { nohdr:1; nohdr:1; /* 3 bits spare */ /* 3 bits spare */ __u8 pkt_type; __u8 pkt_type; __u16 protocol; __be16 protocol; void (*destructor)(struct sk_buff *skb); void (*destructor)(struct sk_buff *skb); #ifdef CONFIG_NETFILTER #ifdef CONFIG_NETFILTER Loading net/core/dev.c +5 −4 Original line number Original line Diff line number Diff line Loading @@ -1696,6 +1696,7 @@ static void net_rx_action(struct softirq_action *h) struct softnet_data *queue = &__get_cpu_var(softnet_data); struct softnet_data *queue = &__get_cpu_var(softnet_data); unsigned long start_time = jiffies; unsigned long start_time = jiffies; int budget = netdev_budget; int budget = netdev_budget; void *have; local_irq_disable(); local_irq_disable(); Loading @@ -1709,10 +1710,10 @@ static void net_rx_action(struct softirq_action *h) dev = list_entry(queue->poll_list.next, dev = list_entry(queue->poll_list.next, struct net_device, poll_list); struct net_device, poll_list); netpoll_poll_lock(dev); have = netpoll_poll_lock(dev); if (dev->quota <= 0 || dev->poll(dev, &budget)) { if (dev->quota <= 0 || dev->poll(dev, &budget)) { netpoll_poll_unlock(dev); netpoll_poll_unlock(have); local_irq_disable(); local_irq_disable(); list_del(&dev->poll_list); list_del(&dev->poll_list); list_add_tail(&dev->poll_list, &queue->poll_list); list_add_tail(&dev->poll_list, &queue->poll_list); Loading @@ -1721,7 +1722,7 @@ static void net_rx_action(struct softirq_action *h) else else dev->quota = dev->weight; dev->quota = dev->weight; } else { } else { netpoll_poll_unlock(dev); netpoll_poll_unlock(have); dev_put(dev); dev_put(dev); local_irq_disable(); local_irq_disable(); } } Loading net/core/netpoll.c +38 −25 Original line number Original line Diff line number Diff line Loading @@ -33,6 +33,7 @@ #define MAX_UDP_CHUNK 1460 #define MAX_UDP_CHUNK 1460 #define MAX_SKBS 32 #define MAX_SKBS 32 #define MAX_QUEUE_DEPTH (MAX_SKBS / 2) #define MAX_QUEUE_DEPTH (MAX_SKBS / 2) #define MAX_RETRIES 20000 static DEFINE_SPINLOCK(skb_list_lock); static DEFINE_SPINLOCK(skb_list_lock); static int nr_skbs; static int nr_skbs; Loading Loading @@ -248,14 +249,14 @@ static void netpoll_send_skb(struct netpoll *np, struct sk_buff *skb) int status; int status; struct netpoll_info *npinfo; struct netpoll_info *npinfo; repeat: if (!np || !np->dev || !netif_running(np->dev)) { if (!np || !np->dev || !netif_running(np->dev)) { __kfree_skb(skb); __kfree_skb(skb); return; return; } } /* avoid recursion */ npinfo = np->dev->npinfo; npinfo = np->dev->npinfo; /* avoid recursion */ if (npinfo->poll_owner == smp_processor_id() || if (npinfo->poll_owner == smp_processor_id() || np->dev->xmit_lock_owner == smp_processor_id()) { np->dev->xmit_lock_owner == smp_processor_id()) { if (np->drop) if (np->drop) Loading @@ -265,6 +266,8 @@ repeat: return; return; } } do { npinfo->tries--; spin_lock(&np->dev->xmit_lock); spin_lock(&np->dev->xmit_lock); np->dev->xmit_lock_owner = smp_processor_id(); np->dev->xmit_lock_owner = smp_processor_id(); Loading @@ -275,20 +278,25 @@ repeat: if (netif_queue_stopped(np->dev)) { if (netif_queue_stopped(np->dev)) { np->dev->xmit_lock_owner = -1; np->dev->xmit_lock_owner = -1; spin_unlock(&np->dev->xmit_lock); spin_unlock(&np->dev->xmit_lock); netpoll_poll(np); netpoll_poll(np); goto repeat; udelay(50); continue; } } status = np->dev->hard_start_xmit(skb, np->dev); status = np->dev->hard_start_xmit(skb, np->dev); np->dev->xmit_lock_owner = -1; np->dev->xmit_lock_owner = -1; spin_unlock(&np->dev->xmit_lock); spin_unlock(&np->dev->xmit_lock); /* success */ if(!status) { npinfo->tries = MAX_RETRIES; /* reset */ return; } /* transmit busy */ /* transmit busy */ if(status) { netpoll_poll(np); netpoll_poll(np); goto repeat; udelay(50); } } while (npinfo->tries > 0); } } void netpoll_send_udp(struct netpoll *np, const char *msg, int len) void netpoll_send_udp(struct netpoll *np, const char *msg, int len) Loading Loading @@ -349,15 +357,11 @@ static void arp_reply(struct sk_buff *skb) unsigned char *arp_ptr; unsigned char *arp_ptr; int size, type = ARPOP_REPLY, ptype = ETH_P_ARP; int size, type = ARPOP_REPLY, ptype = ETH_P_ARP; u32 sip, tip; u32 sip, tip; unsigned long flags; struct sk_buff *send_skb; struct sk_buff *send_skb; struct netpoll *np = NULL; struct netpoll *np = NULL; spin_lock_irqsave(&npinfo->rx_lock, flags); if (npinfo->rx_np && npinfo->rx_np->dev == skb->dev) if (npinfo->rx_np && npinfo->rx_np->dev == skb->dev) np = npinfo->rx_np; np = npinfo->rx_np; spin_unlock_irqrestore(&npinfo->rx_lock, flags); if (!np) if (!np) return; return; Loading Loading @@ -639,9 +643,11 @@ int netpoll_setup(struct netpoll *np) if (!npinfo) if (!npinfo) goto release; goto release; npinfo->rx_flags = 0; npinfo->rx_np = NULL; npinfo->rx_np = NULL; npinfo->poll_lock = SPIN_LOCK_UNLOCKED; npinfo->poll_lock = SPIN_LOCK_UNLOCKED; npinfo->poll_owner = -1; npinfo->poll_owner = -1; npinfo->tries = MAX_RETRIES; npinfo->rx_lock = SPIN_LOCK_UNLOCKED; npinfo->rx_lock = SPIN_LOCK_UNLOCKED; } else } else npinfo = ndev->npinfo; npinfo = ndev->npinfo; Loading Loading @@ -718,9 +724,16 @@ int netpoll_setup(struct netpoll *np) npinfo->rx_np = np; npinfo->rx_np = np; spin_unlock_irqrestore(&npinfo->rx_lock, flags); spin_unlock_irqrestore(&npinfo->rx_lock, flags); } } /* fill up the skb queue */ refill_skbs(); /* last thing to do is link it to the net device structure */ /* last thing to do is link it to the net device structure */ ndev->npinfo = npinfo; ndev->npinfo = npinfo; /* avoid racing with NAPI reading npinfo */ synchronize_rcu(); return 0; return 0; release: release: Loading Loading
drivers/net/e1000/e1000_main.c +1 −0 Original line number Original line Diff line number Diff line Loading @@ -3789,6 +3789,7 @@ e1000_netpoll(struct net_device *netdev) struct e1000_adapter *adapter = netdev_priv(netdev); struct e1000_adapter *adapter = netdev_priv(netdev); disable_irq(adapter->pdev->irq); disable_irq(adapter->pdev->irq); e1000_intr(adapter->pdev->irq, netdev, NULL); e1000_intr(adapter->pdev->irq, netdev, NULL); e1000_clean_tx_irq(adapter); enable_irq(adapter->pdev->irq); enable_irq(adapter->pdev->irq); } } #endif #endif Loading
include/linux/netpoll.h +14 −6 Original line number Original line Diff line number Diff line Loading @@ -9,6 +9,7 @@ #include <linux/netdevice.h> #include <linux/netdevice.h> #include <linux/interrupt.h> #include <linux/interrupt.h> #include <linux/rcupdate.h> #include <linux/list.h> #include <linux/list.h> struct netpoll; struct netpoll; Loading @@ -26,6 +27,7 @@ struct netpoll { struct netpoll_info { struct netpoll_info { spinlock_t poll_lock; spinlock_t poll_lock; int poll_owner; int poll_owner; int tries; int rx_flags; int rx_flags; spinlock_t rx_lock; spinlock_t rx_lock; struct netpoll *rx_np; /* netpoll that registered an rx_hook */ struct netpoll *rx_np; /* netpoll that registered an rx_hook */ Loading Loading @@ -60,25 +62,31 @@ static inline int netpoll_rx(struct sk_buff *skb) return ret; return ret; } } static inline void netpoll_poll_lock(struct net_device *dev) static inline void *netpoll_poll_lock(struct net_device *dev) { { rcu_read_lock(); /* deal with race on ->npinfo */ if (dev->npinfo) { if (dev->npinfo) { spin_lock(&dev->npinfo->poll_lock); spin_lock(&dev->npinfo->poll_lock); dev->npinfo->poll_owner = smp_processor_id(); dev->npinfo->poll_owner = smp_processor_id(); return dev->npinfo; } } return NULL; } } static inline void netpoll_poll_unlock(struct net_device *dev) static inline void netpoll_poll_unlock(void *have) { { if (dev->npinfo) { struct netpoll_info *npi = have; dev->npinfo->poll_owner = -1; spin_unlock(&dev->npinfo->poll_lock); if (npi) { npi->poll_owner = -1; spin_unlock(&npi->poll_lock); } } rcu_read_unlock(); } } #else #else #define netpoll_rx(a) 0 #define netpoll_rx(a) 0 #define netpoll_poll_lock(a) #define netpoll_poll_lock(a) 0 #define netpoll_poll_unlock(a) #define netpoll_poll_unlock(a) #endif #endif Loading
include/linux/skbuff.h +1 −1 Original line number Original line Diff line number Diff line Loading @@ -255,7 +255,7 @@ struct sk_buff { nohdr:1; nohdr:1; /* 3 bits spare */ /* 3 bits spare */ __u8 pkt_type; __u8 pkt_type; __u16 protocol; __be16 protocol; void (*destructor)(struct sk_buff *skb); void (*destructor)(struct sk_buff *skb); #ifdef CONFIG_NETFILTER #ifdef CONFIG_NETFILTER Loading
net/core/dev.c +5 −4 Original line number Original line Diff line number Diff line Loading @@ -1696,6 +1696,7 @@ static void net_rx_action(struct softirq_action *h) struct softnet_data *queue = &__get_cpu_var(softnet_data); struct softnet_data *queue = &__get_cpu_var(softnet_data); unsigned long start_time = jiffies; unsigned long start_time = jiffies; int budget = netdev_budget; int budget = netdev_budget; void *have; local_irq_disable(); local_irq_disable(); Loading @@ -1709,10 +1710,10 @@ static void net_rx_action(struct softirq_action *h) dev = list_entry(queue->poll_list.next, dev = list_entry(queue->poll_list.next, struct net_device, poll_list); struct net_device, poll_list); netpoll_poll_lock(dev); have = netpoll_poll_lock(dev); if (dev->quota <= 0 || dev->poll(dev, &budget)) { if (dev->quota <= 0 || dev->poll(dev, &budget)) { netpoll_poll_unlock(dev); netpoll_poll_unlock(have); local_irq_disable(); local_irq_disable(); list_del(&dev->poll_list); list_del(&dev->poll_list); list_add_tail(&dev->poll_list, &queue->poll_list); list_add_tail(&dev->poll_list, &queue->poll_list); Loading @@ -1721,7 +1722,7 @@ static void net_rx_action(struct softirq_action *h) else else dev->quota = dev->weight; dev->quota = dev->weight; } else { } else { netpoll_poll_unlock(dev); netpoll_poll_unlock(have); dev_put(dev); dev_put(dev); local_irq_disable(); local_irq_disable(); } } Loading
net/core/netpoll.c +38 −25 Original line number Original line Diff line number Diff line Loading @@ -33,6 +33,7 @@ #define MAX_UDP_CHUNK 1460 #define MAX_UDP_CHUNK 1460 #define MAX_SKBS 32 #define MAX_SKBS 32 #define MAX_QUEUE_DEPTH (MAX_SKBS / 2) #define MAX_QUEUE_DEPTH (MAX_SKBS / 2) #define MAX_RETRIES 20000 static DEFINE_SPINLOCK(skb_list_lock); static DEFINE_SPINLOCK(skb_list_lock); static int nr_skbs; static int nr_skbs; Loading Loading @@ -248,14 +249,14 @@ static void netpoll_send_skb(struct netpoll *np, struct sk_buff *skb) int status; int status; struct netpoll_info *npinfo; struct netpoll_info *npinfo; repeat: if (!np || !np->dev || !netif_running(np->dev)) { if (!np || !np->dev || !netif_running(np->dev)) { __kfree_skb(skb); __kfree_skb(skb); return; return; } } /* avoid recursion */ npinfo = np->dev->npinfo; npinfo = np->dev->npinfo; /* avoid recursion */ if (npinfo->poll_owner == smp_processor_id() || if (npinfo->poll_owner == smp_processor_id() || np->dev->xmit_lock_owner == smp_processor_id()) { np->dev->xmit_lock_owner == smp_processor_id()) { if (np->drop) if (np->drop) Loading @@ -265,6 +266,8 @@ repeat: return; return; } } do { npinfo->tries--; spin_lock(&np->dev->xmit_lock); spin_lock(&np->dev->xmit_lock); np->dev->xmit_lock_owner = smp_processor_id(); np->dev->xmit_lock_owner = smp_processor_id(); Loading @@ -275,20 +278,25 @@ repeat: if (netif_queue_stopped(np->dev)) { if (netif_queue_stopped(np->dev)) { np->dev->xmit_lock_owner = -1; np->dev->xmit_lock_owner = -1; spin_unlock(&np->dev->xmit_lock); spin_unlock(&np->dev->xmit_lock); netpoll_poll(np); netpoll_poll(np); goto repeat; udelay(50); continue; } } status = np->dev->hard_start_xmit(skb, np->dev); status = np->dev->hard_start_xmit(skb, np->dev); np->dev->xmit_lock_owner = -1; np->dev->xmit_lock_owner = -1; spin_unlock(&np->dev->xmit_lock); spin_unlock(&np->dev->xmit_lock); /* success */ if(!status) { npinfo->tries = MAX_RETRIES; /* reset */ return; } /* transmit busy */ /* transmit busy */ if(status) { netpoll_poll(np); netpoll_poll(np); goto repeat; udelay(50); } } while (npinfo->tries > 0); } } void netpoll_send_udp(struct netpoll *np, const char *msg, int len) void netpoll_send_udp(struct netpoll *np, const char *msg, int len) Loading Loading @@ -349,15 +357,11 @@ static void arp_reply(struct sk_buff *skb) unsigned char *arp_ptr; unsigned char *arp_ptr; int size, type = ARPOP_REPLY, ptype = ETH_P_ARP; int size, type = ARPOP_REPLY, ptype = ETH_P_ARP; u32 sip, tip; u32 sip, tip; unsigned long flags; struct sk_buff *send_skb; struct sk_buff *send_skb; struct netpoll *np = NULL; struct netpoll *np = NULL; spin_lock_irqsave(&npinfo->rx_lock, flags); if (npinfo->rx_np && npinfo->rx_np->dev == skb->dev) if (npinfo->rx_np && npinfo->rx_np->dev == skb->dev) np = npinfo->rx_np; np = npinfo->rx_np; spin_unlock_irqrestore(&npinfo->rx_lock, flags); if (!np) if (!np) return; return; Loading Loading @@ -639,9 +643,11 @@ int netpoll_setup(struct netpoll *np) if (!npinfo) if (!npinfo) goto release; goto release; npinfo->rx_flags = 0; npinfo->rx_np = NULL; npinfo->rx_np = NULL; npinfo->poll_lock = SPIN_LOCK_UNLOCKED; npinfo->poll_lock = SPIN_LOCK_UNLOCKED; npinfo->poll_owner = -1; npinfo->poll_owner = -1; npinfo->tries = MAX_RETRIES; npinfo->rx_lock = SPIN_LOCK_UNLOCKED; npinfo->rx_lock = SPIN_LOCK_UNLOCKED; } else } else npinfo = ndev->npinfo; npinfo = ndev->npinfo; Loading Loading @@ -718,9 +724,16 @@ int netpoll_setup(struct netpoll *np) npinfo->rx_np = np; npinfo->rx_np = np; spin_unlock_irqrestore(&npinfo->rx_lock, flags); spin_unlock_irqrestore(&npinfo->rx_lock, flags); } } /* fill up the skb queue */ refill_skbs(); /* last thing to do is link it to the net device structure */ /* last thing to do is link it to the net device structure */ ndev->npinfo = npinfo; ndev->npinfo = npinfo; /* avoid racing with NAPI reading npinfo */ synchronize_rcu(); return 0; return 0; release: release: Loading