Loading drivers/net/Kconfig +1 −1 Original line number Original line Diff line number Diff line Loading @@ -2635,7 +2635,7 @@ config NIU config PASEMI_MAC config PASEMI_MAC tristate "PA Semi 1/10Gbit MAC" tristate "PA Semi 1/10Gbit MAC" depends on PPC64 && PCI depends on PPC_PASEMI && PCI select PHYLIB select PHYLIB select INET_LRO select INET_LRO help help Loading drivers/net/bfin_mac.c +0 −2 Original line number Original line Diff line number Diff line Loading @@ -575,7 +575,6 @@ static void adjust_tx_list(void) static int bf537mac_hard_start_xmit(struct sk_buff *skb, static int bf537mac_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) struct net_device *dev) { { struct bf537mac_local *lp = netdev_priv(dev); unsigned int data; unsigned int data; current_tx_ptr->skb = skb; current_tx_ptr->skb = skb; Loading Loading @@ -634,7 +633,6 @@ static int bf537mac_hard_start_xmit(struct sk_buff *skb, static void bf537mac_rx(struct net_device *dev) static void bf537mac_rx(struct net_device *dev) { { struct sk_buff *skb, *new_skb; struct sk_buff *skb, *new_skb; struct bf537mac_local *lp = netdev_priv(dev); unsigned short len; unsigned short len; /* allocate a new skb for next time receive */ /* allocate a new skb for next time receive */ Loading drivers/net/forcedeth.c +10 −8 Original line number Original line Diff line number Diff line Loading @@ -1854,6 +1854,7 @@ static int nv_start_xmit(struct sk_buff *skb, struct net_device *dev) struct ring_desc* start_tx; struct ring_desc* start_tx; struct ring_desc* prev_tx; struct ring_desc* prev_tx; struct nv_skb_map* prev_tx_ctx; struct nv_skb_map* prev_tx_ctx; unsigned long flags; /* add fragments to entries count */ /* add fragments to entries count */ for (i = 0; i < fragments; i++) { for (i = 0; i < fragments; i++) { Loading @@ -1863,10 +1864,10 @@ static int nv_start_xmit(struct sk_buff *skb, struct net_device *dev) empty_slots = nv_get_empty_tx_slots(np); empty_slots = nv_get_empty_tx_slots(np); if (unlikely(empty_slots <= entries)) { if (unlikely(empty_slots <= entries)) { spin_lock_irq(&np->lock); spin_lock_irqsave(&np->lock, flags); netif_stop_queue(dev); netif_stop_queue(dev); np->tx_stop = 1; np->tx_stop = 1; spin_unlock_irq(&np->lock); spin_unlock_irqrestore(&np->lock, flags); return NETDEV_TX_BUSY; return NETDEV_TX_BUSY; } } Loading Loading @@ -1929,13 +1930,13 @@ static int nv_start_xmit(struct sk_buff *skb, struct net_device *dev) tx_flags_extra = skb->ip_summed == CHECKSUM_PARTIAL ? tx_flags_extra = skb->ip_summed == CHECKSUM_PARTIAL ? NV_TX2_CHECKSUM_L3 | NV_TX2_CHECKSUM_L4 : 0; NV_TX2_CHECKSUM_L3 | NV_TX2_CHECKSUM_L4 : 0; spin_lock_irq(&np->lock); spin_lock_irqsave(&np->lock, flags); /* set tx flags */ /* set tx flags */ start_tx->flaglen |= cpu_to_le32(tx_flags | tx_flags_extra); start_tx->flaglen |= cpu_to_le32(tx_flags | tx_flags_extra); np->put_tx.orig = put_tx; np->put_tx.orig = put_tx; spin_unlock_irq(&np->lock); spin_unlock_irqrestore(&np->lock, flags); dprintk(KERN_DEBUG "%s: nv_start_xmit: entries %d queued for transmission. tx_flags_extra: %x\n", dprintk(KERN_DEBUG "%s: nv_start_xmit: entries %d queued for transmission. tx_flags_extra: %x\n", dev->name, entries, tx_flags_extra); dev->name, entries, tx_flags_extra); Loading Loading @@ -1971,6 +1972,7 @@ static int nv_start_xmit_optimized(struct sk_buff *skb, struct net_device *dev) struct ring_desc_ex* prev_tx; struct ring_desc_ex* prev_tx; struct nv_skb_map* prev_tx_ctx; struct nv_skb_map* prev_tx_ctx; struct nv_skb_map* start_tx_ctx; struct nv_skb_map* start_tx_ctx; unsigned long flags; /* add fragments to entries count */ /* add fragments to entries count */ for (i = 0; i < fragments; i++) { for (i = 0; i < fragments; i++) { Loading @@ -1980,10 +1982,10 @@ static int nv_start_xmit_optimized(struct sk_buff *skb, struct net_device *dev) empty_slots = nv_get_empty_tx_slots(np); empty_slots = nv_get_empty_tx_slots(np); if (unlikely(empty_slots <= entries)) { if (unlikely(empty_slots <= entries)) { spin_lock_irq(&np->lock); spin_lock_irqsave(&np->lock, flags); netif_stop_queue(dev); netif_stop_queue(dev); np->tx_stop = 1; np->tx_stop = 1; spin_unlock_irq(&np->lock); spin_unlock_irqrestore(&np->lock, flags); return NETDEV_TX_BUSY; return NETDEV_TX_BUSY; } } Loading Loading @@ -2059,7 +2061,7 @@ static int nv_start_xmit_optimized(struct sk_buff *skb, struct net_device *dev) start_tx->txvlan = 0; start_tx->txvlan = 0; } } spin_lock_irq(&np->lock); spin_lock_irqsave(&np->lock, flags); if (np->tx_limit) { if (np->tx_limit) { /* Limit the number of outstanding tx. Setup all fragments, but /* Limit the number of outstanding tx. Setup all fragments, but Loading @@ -2085,7 +2087,7 @@ static int nv_start_xmit_optimized(struct sk_buff *skb, struct net_device *dev) start_tx->flaglen |= cpu_to_le32(tx_flags | tx_flags_extra); start_tx->flaglen |= cpu_to_le32(tx_flags | tx_flags_extra); np->put_tx.ex = put_tx; np->put_tx.ex = put_tx; spin_unlock_irq(&np->lock); spin_unlock_irqrestore(&np->lock, flags); dprintk(KERN_DEBUG "%s: nv_start_xmit_optimized: entries %d queued for transmission. tx_flags_extra: %x\n", dprintk(KERN_DEBUG "%s: nv_start_xmit_optimized: entries %d queued for transmission. tx_flags_extra: %x\n", dev->name, entries, tx_flags_extra); dev->name, entries, tx_flags_extra); Loading drivers/net/ibm_newemac/core.c +2 −2 Original line number Original line Diff line number Diff line Loading @@ -1242,8 +1242,8 @@ static int emac_close(struct net_device *ndev) static inline u16 emac_tx_csum(struct emac_instance *dev, static inline u16 emac_tx_csum(struct emac_instance *dev, struct sk_buff *skb) struct sk_buff *skb) { { if (emac_has_feature(dev, EMAC_FTR_HAS_TAH && if (emac_has_feature(dev, EMAC_FTR_HAS_TAH) && skb->ip_summed == CHECKSUM_PARTIAL)) { (skb->ip_summed == CHECKSUM_PARTIAL)) { ++dev->stats.tx_packets_csum; ++dev->stats.tx_packets_csum; return EMAC_TX_CTRL_TAH_CSUM; return EMAC_TX_CTRL_TAH_CSUM; } } Loading drivers/net/s2io.c +1 −1 Original line number Original line Diff line number Diff line Loading @@ -84,7 +84,7 @@ #include "s2io.h" #include "s2io.h" #include "s2io-regs.h" #include "s2io-regs.h" #define DRV_VERSION "2.0.26.15-2" #define DRV_VERSION "2.0.26.20" /* S2io Driver name & version. */ /* S2io Driver name & version. */ static char s2io_driver_name[] = "Neterion"; static char s2io_driver_name[] = "Neterion"; Loading Loading
drivers/net/Kconfig +1 −1 Original line number Original line Diff line number Diff line Loading @@ -2635,7 +2635,7 @@ config NIU config PASEMI_MAC config PASEMI_MAC tristate "PA Semi 1/10Gbit MAC" tristate "PA Semi 1/10Gbit MAC" depends on PPC64 && PCI depends on PPC_PASEMI && PCI select PHYLIB select PHYLIB select INET_LRO select INET_LRO help help Loading
drivers/net/bfin_mac.c +0 −2 Original line number Original line Diff line number Diff line Loading @@ -575,7 +575,6 @@ static void adjust_tx_list(void) static int bf537mac_hard_start_xmit(struct sk_buff *skb, static int bf537mac_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) struct net_device *dev) { { struct bf537mac_local *lp = netdev_priv(dev); unsigned int data; unsigned int data; current_tx_ptr->skb = skb; current_tx_ptr->skb = skb; Loading Loading @@ -634,7 +633,6 @@ static int bf537mac_hard_start_xmit(struct sk_buff *skb, static void bf537mac_rx(struct net_device *dev) static void bf537mac_rx(struct net_device *dev) { { struct sk_buff *skb, *new_skb; struct sk_buff *skb, *new_skb; struct bf537mac_local *lp = netdev_priv(dev); unsigned short len; unsigned short len; /* allocate a new skb for next time receive */ /* allocate a new skb for next time receive */ Loading
drivers/net/forcedeth.c +10 −8 Original line number Original line Diff line number Diff line Loading @@ -1854,6 +1854,7 @@ static int nv_start_xmit(struct sk_buff *skb, struct net_device *dev) struct ring_desc* start_tx; struct ring_desc* start_tx; struct ring_desc* prev_tx; struct ring_desc* prev_tx; struct nv_skb_map* prev_tx_ctx; struct nv_skb_map* prev_tx_ctx; unsigned long flags; /* add fragments to entries count */ /* add fragments to entries count */ for (i = 0; i < fragments; i++) { for (i = 0; i < fragments; i++) { Loading @@ -1863,10 +1864,10 @@ static int nv_start_xmit(struct sk_buff *skb, struct net_device *dev) empty_slots = nv_get_empty_tx_slots(np); empty_slots = nv_get_empty_tx_slots(np); if (unlikely(empty_slots <= entries)) { if (unlikely(empty_slots <= entries)) { spin_lock_irq(&np->lock); spin_lock_irqsave(&np->lock, flags); netif_stop_queue(dev); netif_stop_queue(dev); np->tx_stop = 1; np->tx_stop = 1; spin_unlock_irq(&np->lock); spin_unlock_irqrestore(&np->lock, flags); return NETDEV_TX_BUSY; return NETDEV_TX_BUSY; } } Loading Loading @@ -1929,13 +1930,13 @@ static int nv_start_xmit(struct sk_buff *skb, struct net_device *dev) tx_flags_extra = skb->ip_summed == CHECKSUM_PARTIAL ? tx_flags_extra = skb->ip_summed == CHECKSUM_PARTIAL ? NV_TX2_CHECKSUM_L3 | NV_TX2_CHECKSUM_L4 : 0; NV_TX2_CHECKSUM_L3 | NV_TX2_CHECKSUM_L4 : 0; spin_lock_irq(&np->lock); spin_lock_irqsave(&np->lock, flags); /* set tx flags */ /* set tx flags */ start_tx->flaglen |= cpu_to_le32(tx_flags | tx_flags_extra); start_tx->flaglen |= cpu_to_le32(tx_flags | tx_flags_extra); np->put_tx.orig = put_tx; np->put_tx.orig = put_tx; spin_unlock_irq(&np->lock); spin_unlock_irqrestore(&np->lock, flags); dprintk(KERN_DEBUG "%s: nv_start_xmit: entries %d queued for transmission. tx_flags_extra: %x\n", dprintk(KERN_DEBUG "%s: nv_start_xmit: entries %d queued for transmission. tx_flags_extra: %x\n", dev->name, entries, tx_flags_extra); dev->name, entries, tx_flags_extra); Loading Loading @@ -1971,6 +1972,7 @@ static int nv_start_xmit_optimized(struct sk_buff *skb, struct net_device *dev) struct ring_desc_ex* prev_tx; struct ring_desc_ex* prev_tx; struct nv_skb_map* prev_tx_ctx; struct nv_skb_map* prev_tx_ctx; struct nv_skb_map* start_tx_ctx; struct nv_skb_map* start_tx_ctx; unsigned long flags; /* add fragments to entries count */ /* add fragments to entries count */ for (i = 0; i < fragments; i++) { for (i = 0; i < fragments; i++) { Loading @@ -1980,10 +1982,10 @@ static int nv_start_xmit_optimized(struct sk_buff *skb, struct net_device *dev) empty_slots = nv_get_empty_tx_slots(np); empty_slots = nv_get_empty_tx_slots(np); if (unlikely(empty_slots <= entries)) { if (unlikely(empty_slots <= entries)) { spin_lock_irq(&np->lock); spin_lock_irqsave(&np->lock, flags); netif_stop_queue(dev); netif_stop_queue(dev); np->tx_stop = 1; np->tx_stop = 1; spin_unlock_irq(&np->lock); spin_unlock_irqrestore(&np->lock, flags); return NETDEV_TX_BUSY; return NETDEV_TX_BUSY; } } Loading Loading @@ -2059,7 +2061,7 @@ static int nv_start_xmit_optimized(struct sk_buff *skb, struct net_device *dev) start_tx->txvlan = 0; start_tx->txvlan = 0; } } spin_lock_irq(&np->lock); spin_lock_irqsave(&np->lock, flags); if (np->tx_limit) { if (np->tx_limit) { /* Limit the number of outstanding tx. Setup all fragments, but /* Limit the number of outstanding tx. Setup all fragments, but Loading @@ -2085,7 +2087,7 @@ static int nv_start_xmit_optimized(struct sk_buff *skb, struct net_device *dev) start_tx->flaglen |= cpu_to_le32(tx_flags | tx_flags_extra); start_tx->flaglen |= cpu_to_le32(tx_flags | tx_flags_extra); np->put_tx.ex = put_tx; np->put_tx.ex = put_tx; spin_unlock_irq(&np->lock); spin_unlock_irqrestore(&np->lock, flags); dprintk(KERN_DEBUG "%s: nv_start_xmit_optimized: entries %d queued for transmission. tx_flags_extra: %x\n", dprintk(KERN_DEBUG "%s: nv_start_xmit_optimized: entries %d queued for transmission. tx_flags_extra: %x\n", dev->name, entries, tx_flags_extra); dev->name, entries, tx_flags_extra); Loading
drivers/net/ibm_newemac/core.c +2 −2 Original line number Original line Diff line number Diff line Loading @@ -1242,8 +1242,8 @@ static int emac_close(struct net_device *ndev) static inline u16 emac_tx_csum(struct emac_instance *dev, static inline u16 emac_tx_csum(struct emac_instance *dev, struct sk_buff *skb) struct sk_buff *skb) { { if (emac_has_feature(dev, EMAC_FTR_HAS_TAH && if (emac_has_feature(dev, EMAC_FTR_HAS_TAH) && skb->ip_summed == CHECKSUM_PARTIAL)) { (skb->ip_summed == CHECKSUM_PARTIAL)) { ++dev->stats.tx_packets_csum; ++dev->stats.tx_packets_csum; return EMAC_TX_CTRL_TAH_CSUM; return EMAC_TX_CTRL_TAH_CSUM; } } Loading
drivers/net/s2io.c +1 −1 Original line number Original line Diff line number Diff line Loading @@ -84,7 +84,7 @@ #include "s2io.h" #include "s2io.h" #include "s2io-regs.h" #include "s2io-regs.h" #define DRV_VERSION "2.0.26.15-2" #define DRV_VERSION "2.0.26.20" /* S2io Driver name & version. */ /* S2io Driver name & version. */ static char s2io_driver_name[] = "Neterion"; static char s2io_driver_name[] = "Neterion"; Loading