Loading drivers/net/forcedeth.c +84 −228 Original line number Diff line number Diff line Loading @@ -106,7 +106,6 @@ * 0.51: 20 Jan 2006: Add 64bit consistent memory allocation for rings. * 0.52: 20 Jan 2006: Add MSI/MSIX support. * 0.53: 19 Mar 2006: Fix init from low power mode and add hw reset. * 0.54: 21 Mar 2006: Fix spin locks for multi irqs and cleanup. * * Known bugs: * We suspect that on some hardware no TX done interrupts are generated. Loading @@ -118,7 +117,7 @@ * DEV_NEED_TIMERIRQ will not harm you on sane hardware, only generating a few * superfluous timer interrupts from the nic. */ #define FORCEDETH_VERSION "0.54" #define FORCEDETH_VERSION "0.53" #define DRV_NAME "forcedeth" #include <linux/module.h> Loading Loading @@ -711,72 +710,6 @@ static void setup_hw_rings(struct net_device *dev, int rxtx_flags) } } static int using_multi_irqs(struct net_device *dev) { struct fe_priv *np = get_nvpriv(dev); if (!(np->msi_flags & NV_MSI_X_ENABLED) || ((np->msi_flags & NV_MSI_X_ENABLED) && ((np->msi_flags & NV_MSI_X_VECTORS_MASK) == 0x1))) return 0; else return 1; } static void nv_enable_irq(struct net_device *dev) { struct fe_priv *np = get_nvpriv(dev); if (!using_multi_irqs(dev)) { if (np->msi_flags & NV_MSI_X_ENABLED) enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector); else enable_irq(dev->irq); } else { enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector); enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector); enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector); } } static void nv_disable_irq(struct net_device *dev) { struct fe_priv *np = get_nvpriv(dev); if (!using_multi_irqs(dev)) { if (np->msi_flags & NV_MSI_X_ENABLED) disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector); else disable_irq(dev->irq); } else { disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector); disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector); disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector); } } /* In MSIX mode, a write to irqmask behaves as XOR */ static void nv_enable_hw_interrupts(struct net_device *dev, u32 mask) { u8 __iomem *base = get_hwbase(dev); writel(mask, base + NvRegIrqMask); } static void nv_disable_hw_interrupts(struct net_device *dev, u32 mask) { struct fe_priv *np = get_nvpriv(dev); u8 __iomem *base = get_hwbase(dev); if (np->msi_flags & NV_MSI_X_ENABLED) { writel(mask, base + NvRegIrqMask); } else { if (np->msi_flags & NV_MSI_ENABLED) writel(0, base + NvRegMSIIrqMask); writel(0, base + NvRegIrqMask); } } #define MII_READ (-1) /* mii_rw: read/write a register on the PHY. * Loading Loading @@ -1086,24 +1019,23 @@ static void nv_do_rx_refill(unsigned long data) struct net_device *dev = (struct net_device *) data; struct fe_priv *np = netdev_priv(dev); if (!using_multi_irqs(dev)) { if (np->msi_flags & NV_MSI_X_ENABLED) disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector); else if (!(np->msi_flags & NV_MSI_X_ENABLED) || ((np->msi_flags & NV_MSI_X_ENABLED) && ((np->msi_flags & NV_MSI_X_VECTORS_MASK) == 0x1))) { disable_irq(dev->irq); } else { disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector); } if (nv_alloc_rx(dev)) { spin_lock_irq(&np->lock); spin_lock(&np->lock); if (!np->in_shutdown) mod_timer(&np->oom_kick, jiffies + OOM_REFILL); spin_unlock_irq(&np->lock); spin_unlock(&np->lock); } if (!using_multi_irqs(dev)) { if (np->msi_flags & NV_MSI_X_ENABLED) enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector); else if (!(np->msi_flags & NV_MSI_X_ENABLED) || ((np->msi_flags & NV_MSI_X_ENABLED) && ((np->msi_flags & NV_MSI_X_VECTORS_MASK) == 0x1))) { enable_irq(dev->irq); } else { enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector); Loading Loading @@ -1736,7 +1668,15 @@ static int nv_change_mtu(struct net_device *dev, int new_mtu) * guessed, there is probably a simpler approach. * Changing the MTU is a rare event, it shouldn't matter. */ nv_disable_irq(dev); if (!(np->msi_flags & NV_MSI_X_ENABLED) || ((np->msi_flags & NV_MSI_X_ENABLED) && ((np->msi_flags & NV_MSI_X_VECTORS_MASK) == 0x1))) { disable_irq(dev->irq); } else { disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector); disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector); disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector); } spin_lock_bh(&dev->xmit_lock); spin_lock(&np->lock); /* stop engines */ Loading Loading @@ -1769,7 +1709,15 @@ static int nv_change_mtu(struct net_device *dev, int new_mtu) nv_start_tx(dev); spin_unlock(&np->lock); spin_unlock_bh(&dev->xmit_lock); nv_enable_irq(dev); if (!(np->msi_flags & NV_MSI_X_ENABLED) || ((np->msi_flags & NV_MSI_X_ENABLED) && ((np->msi_flags & NV_MSI_X_VECTORS_MASK) == 0x1))) { enable_irq(dev->irq); } else { enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector); enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector); enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector); } } return 0; } Loading Loading @@ -2160,16 +2108,16 @@ static irqreturn_t nv_nic_irq_tx(int foo, void *data, struct pt_regs *regs) if (!(events & np->irqmask)) break; spin_lock_irq(&np->lock); spin_lock(&np->lock); nv_tx_done(dev); spin_unlock_irq(&np->lock); spin_unlock(&np->lock); if (events & (NVREG_IRQ_TX_ERR)) { dprintk(KERN_DEBUG "%s: received irq with events 0x%x. Probably TX fail.\n", dev->name, events); } if (i > max_interrupt_work) { spin_lock_irq(&np->lock); spin_lock(&np->lock); /* disable interrupts on the nic */ writel(NVREG_IRQ_TX_ALL, base + NvRegIrqMask); pci_push(base); Loading @@ -2179,7 +2127,7 @@ static irqreturn_t nv_nic_irq_tx(int foo, void *data, struct pt_regs *regs) mod_timer(&np->nic_poll, jiffies + POLL_WAIT); } printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq_tx.\n", dev->name, i); spin_unlock_irq(&np->lock); spin_unlock(&np->lock); break; } Loading Loading @@ -2209,14 +2157,14 @@ static irqreturn_t nv_nic_irq_rx(int foo, void *data, struct pt_regs *regs) nv_rx_process(dev); if (nv_alloc_rx(dev)) { spin_lock_irq(&np->lock); spin_lock(&np->lock); if (!np->in_shutdown) mod_timer(&np->oom_kick, jiffies + OOM_REFILL); spin_unlock_irq(&np->lock); spin_unlock(&np->lock); } if (i > max_interrupt_work) { spin_lock_irq(&np->lock); spin_lock(&np->lock); /* disable interrupts on the nic */ writel(NVREG_IRQ_RX_ALL, base + NvRegIrqMask); pci_push(base); Loading @@ -2226,7 +2174,7 @@ static irqreturn_t nv_nic_irq_rx(int foo, void *data, struct pt_regs *regs) mod_timer(&np->nic_poll, jiffies + POLL_WAIT); } printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq_rx.\n", dev->name, i); spin_unlock_irq(&np->lock); spin_unlock(&np->lock); break; } Loading Loading @@ -2255,14 +2203,14 @@ static irqreturn_t nv_nic_irq_other(int foo, void *data, struct pt_regs *regs) break; if (events & NVREG_IRQ_LINK) { spin_lock_irq(&np->lock); spin_lock(&np->lock); nv_link_irq(dev); spin_unlock_irq(&np->lock); spin_unlock(&np->lock); } if (np->need_linktimer && time_after(jiffies, np->link_timeout)) { spin_lock_irq(&np->lock); spin_lock(&np->lock); nv_linkchange(dev); spin_unlock_irq(&np->lock); spin_unlock(&np->lock); np->link_timeout = jiffies + LINK_TIMEOUT; } if (events & (NVREG_IRQ_UNKNOWN)) { Loading @@ -2270,7 +2218,7 @@ static irqreturn_t nv_nic_irq_other(int foo, void *data, struct pt_regs *regs) dev->name, events); } if (i > max_interrupt_work) { spin_lock_irq(&np->lock); spin_lock(&np->lock); /* disable interrupts on the nic */ writel(NVREG_IRQ_OTHER, base + NvRegIrqMask); pci_push(base); Loading @@ -2280,7 +2228,7 @@ static irqreturn_t nv_nic_irq_other(int foo, void *data, struct pt_regs *regs) mod_timer(&np->nic_poll, jiffies + POLL_WAIT); } printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq_other.\n", dev->name, i); spin_unlock_irq(&np->lock); spin_unlock(&np->lock); break; } Loading @@ -2303,10 +2251,9 @@ static void nv_do_nic_poll(unsigned long data) * nv_nic_irq because that may decide to do otherwise */ if (!using_multi_irqs(dev)) { if (np->msi_flags & NV_MSI_X_ENABLED) disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector); else if (!(np->msi_flags & NV_MSI_X_ENABLED) || ((np->msi_flags & NV_MSI_X_ENABLED) && ((np->msi_flags & NV_MSI_X_VECTORS_MASK) == 0x1))) { disable_irq(dev->irq); mask = np->irqmask; } else { Loading @@ -2330,11 +2277,10 @@ static void nv_do_nic_poll(unsigned long data) writel(mask, base + NvRegIrqMask); pci_push(base); if (!using_multi_irqs(dev)) { if (!(np->msi_flags & NV_MSI_X_ENABLED) || ((np->msi_flags & NV_MSI_X_ENABLED) && ((np->msi_flags & NV_MSI_X_VECTORS_MASK) == 0x1))) { nv_nic_irq((int) 0, (void *) data, (struct pt_regs *) NULL); if (np->msi_flags & NV_MSI_X_ENABLED) enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector); else enable_irq(dev->irq); } else { if (np->nic_poll_irq & NVREG_IRQ_RX_ALL) { Loading Loading @@ -2682,113 +2628,6 @@ static void set_msix_vector_map(struct net_device *dev, u32 vector, u32 irqmask) writel(readl(base + NvRegMSIXMap1) | msixmap, base + NvRegMSIXMap1); } static int nv_request_irq(struct net_device *dev) { struct fe_priv *np = get_nvpriv(dev); u8 __iomem *base = get_hwbase(dev); int ret = 1; int i; if (np->msi_flags & NV_MSI_X_CAPABLE) { for (i = 0; i < (np->msi_flags & NV_MSI_X_VECTORS_MASK); i++) { np->msi_x_entry[i].entry = i; } if ((ret = pci_enable_msix(np->pci_dev, np->msi_x_entry, (np->msi_flags & NV_MSI_X_VECTORS_MASK))) == 0) { np->msi_flags |= NV_MSI_X_ENABLED; if (optimization_mode == NV_OPTIMIZATION_MODE_THROUGHPUT) { /* Request irq for rx handling */ if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector, &nv_nic_irq_rx, SA_SHIRQ, dev->name, dev) != 0) { printk(KERN_INFO "forcedeth: request_irq failed for rx %d\n", ret); pci_disable_msix(np->pci_dev); np->msi_flags &= ~NV_MSI_X_ENABLED; goto out_err; } /* Request irq for tx handling */ if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector, &nv_nic_irq_tx, SA_SHIRQ, dev->name, dev) != 0) { printk(KERN_INFO "forcedeth: request_irq failed for tx %d\n", ret); pci_disable_msix(np->pci_dev); np->msi_flags &= ~NV_MSI_X_ENABLED; goto out_free_rx; } /* Request irq for link and timer handling */ if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector, &nv_nic_irq_other, SA_SHIRQ, dev->name, dev) != 0) { printk(KERN_INFO "forcedeth: request_irq failed for link %d\n", ret); pci_disable_msix(np->pci_dev); np->msi_flags &= ~NV_MSI_X_ENABLED; goto out_free_tx; } /* map interrupts to their respective vector */ writel(0, base + NvRegMSIXMap0); writel(0, base + NvRegMSIXMap1); set_msix_vector_map(dev, NV_MSI_X_VECTOR_RX, NVREG_IRQ_RX_ALL); set_msix_vector_map(dev, NV_MSI_X_VECTOR_TX, NVREG_IRQ_TX_ALL); set_msix_vector_map(dev, NV_MSI_X_VECTOR_OTHER, NVREG_IRQ_OTHER); } else { /* Request irq for all interrupts */ if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector, &nv_nic_irq, SA_SHIRQ, dev->name, dev) != 0) { printk(KERN_INFO "forcedeth: request_irq failed %d\n", ret); pci_disable_msix(np->pci_dev); np->msi_flags &= ~NV_MSI_X_ENABLED; goto out_err; } /* map interrupts to vector 0 */ writel(0, base + NvRegMSIXMap0); writel(0, base + NvRegMSIXMap1); } } } if (ret != 0 && np->msi_flags & NV_MSI_CAPABLE) { if ((ret = pci_enable_msi(np->pci_dev)) == 0) { np->msi_flags |= NV_MSI_ENABLED; if (request_irq(np->pci_dev->irq, &nv_nic_irq, SA_SHIRQ, dev->name, dev) != 0) { printk(KERN_INFO "forcedeth: request_irq failed %d\n", ret); pci_disable_msi(np->pci_dev); np->msi_flags &= ~NV_MSI_ENABLED; goto out_err; } /* map interrupts to vector 0 */ writel(0, base + NvRegMSIMap0); writel(0, base + NvRegMSIMap1); /* enable msi vector 0 */ writel(NVREG_MSI_VECTOR_0_ENABLED, base + NvRegMSIIrqMask); } } if (ret != 0) { if (request_irq(np->pci_dev->irq, &nv_nic_irq, SA_SHIRQ, dev->name, dev) != 0) goto out_err; } return 0; out_free_tx: free_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector, dev); out_free_rx: free_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector, dev); out_err: return 1; } static void nv_free_irq(struct net_device *dev) { struct fe_priv *np = get_nvpriv(dev); int i; if (np->msi_flags & NV_MSI_X_ENABLED) { for (i = 0; i < (np->msi_flags & NV_MSI_X_VECTORS_MASK); i++) { free_irq(np->msi_x_entry[i].vector, dev); } pci_disable_msix(np->pci_dev); np->msi_flags &= ~NV_MSI_X_ENABLED; } else { free_irq(np->pci_dev->irq, dev); if (np->msi_flags & NV_MSI_ENABLED) { pci_disable_msi(np->pci_dev); np->msi_flags &= ~NV_MSI_ENABLED; } } } static int nv_open(struct net_device *dev) { struct fe_priv *np = netdev_priv(dev); Loading Loading @@ -2881,16 +2720,12 @@ static int nv_open(struct net_device *dev) udelay(10); writel(readl(base + NvRegPowerState) | NVREG_POWERSTATE_VALID, base + NvRegPowerState); nv_disable_hw_interrupts(dev, np->irqmask); writel(0, base + NvRegIrqMask); pci_push(base); writel(NVREG_MIISTAT_MASK2, base + NvRegMIIStatus); writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus); pci_push(base); if (nv_request_irq(dev)) { goto out_drain; } if (np->msi_flags & NV_MSI_X_CAPABLE) { for (i = 0; i < (np->msi_flags & NV_MSI_X_VECTORS_MASK); i++) { np->msi_x_entry[i].entry = i; Loading Loading @@ -2964,7 +2799,7 @@ static int nv_open(struct net_device *dev) } /* ask for interrupts */ nv_enable_hw_interrupts(dev, np->irqmask); writel(np->irqmask, base + NvRegIrqMask); spin_lock_irq(&np->lock); writel(NVREG_MCASTADDRA_FORCE, base + NvRegMulticastAddrA); Loading Loading @@ -3008,6 +2843,7 @@ static int nv_close(struct net_device *dev) { struct fe_priv *np = netdev_priv(dev); u8 __iomem *base; int i; spin_lock_irq(&np->lock); np->in_shutdown = 1; Loading @@ -3025,13 +2861,31 @@ static int nv_close(struct net_device *dev) /* disable interrupts on the nic or we will lock up */ base = get_hwbase(dev); nv_disable_hw_interrupts(dev, np->irqmask); if (np->msi_flags & NV_MSI_X_ENABLED) { writel(np->irqmask, base + NvRegIrqMask); } else { if (np->msi_flags & NV_MSI_ENABLED) writel(0, base + NvRegMSIIrqMask); writel(0, base + NvRegIrqMask); } pci_push(base); dprintk(KERN_INFO "%s: Irqmask is zero again\n", dev->name); spin_unlock_irq(&np->lock); nv_free_irq(dev); if (np->msi_flags & NV_MSI_X_ENABLED) { for (i = 0; i < (np->msi_flags & NV_MSI_X_VECTORS_MASK); i++) { free_irq(np->msi_x_entry[i].vector, dev); } pci_disable_msix(np->pci_dev); np->msi_flags &= ~NV_MSI_X_ENABLED; } else { free_irq(np->pci_dev->irq, dev); if (np->msi_flags & NV_MSI_ENABLED) { pci_disable_msi(np->pci_dev); np->msi_flags &= ~NV_MSI_ENABLED; } } drain_ring(dev); Loading Loading @@ -3120,18 +2974,20 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i if (id->driver_data & DEV_HAS_HIGH_DMA) { /* packet format 3: supports 40-bit addressing */ np->desc_ver = DESC_VER_3; np->txrxctl_bits = NVREG_TXRXCTL_DESC_3; if (pci_set_dma_mask(pci_dev, DMA_39BIT_MASK)) { printk(KERN_INFO "forcedeth: 64-bit DMA failed, using 32-bit addressing for device %s.\n", pci_name(pci_dev)); } else { dev->features |= NETIF_F_HIGHDMA; printk(KERN_INFO "forcedeth: using HIGHDMA\n"); } if (pci_set_consistent_dma_mask(pci_dev, 0x0000007fffffffffULL)) { printk(KERN_INFO "forcedeth: 64-bit DMA (consistent) failed for device %s.\n", pci_name(pci_dev)); goto out_relreg; } else { dev->features |= NETIF_F_HIGHDMA; printk(KERN_INFO "forcedeth: using HIGHDMA\n"); } } np->txrxctl_bits = NVREG_TXRXCTL_DESC_3; } else if (id->driver_data & DEV_HAS_LARGEDESC) { /* packet format 2: supports jumbo frames */ np->desc_ver = DESC_VER_2; Loading drivers/net/pcmcia/axnet_cs.c +2 −11 Original line number Diff line number Diff line Loading @@ -1691,17 +1691,6 @@ static void do_set_multicast_list(struct net_device *dev) memset(ei_local->mcfilter, 0xFF, 8); } /* * DP8390 manuals don't specify any magic sequence for altering * the multicast regs on an already running card. To be safe, we * ensure multicast mode is off prior to loading up the new hash * table. If this proves to be not enough, we can always resort * to stopping the NIC, loading the table and then restarting. */ if (netif_running(dev)) outb_p(E8390_RXCONFIG, e8390_base + EN0_RXCR); outb_p(E8390_NODMA + E8390_PAGE1, e8390_base + E8390_CMD); for(i = 0; i < 8; i++) { Loading @@ -1715,6 +1704,8 @@ static void do_set_multicast_list(struct net_device *dev) outb_p(E8390_RXCONFIG | 0x48, e8390_base + EN0_RXCR); else outb_p(E8390_RXCONFIG | 0x40, e8390_base + EN0_RXCR); outb_p(E8390_NODMA+E8390_PAGE0+E8390_START, e8390_base+E8390_CMD); } /* Loading drivers/net/skge.c +3 −5 Original line number Diff line number Diff line Loading @@ -78,8 +78,7 @@ static const struct pci_device_id skge_id_table[] = { { PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_GE) }, { PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_YU) }, { PCI_DEVICE(PCI_VENDOR_ID_DLINK, PCI_DEVICE_ID_DLINK_DGE510T), }, { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4b00) }, { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4b01) }, { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4b01) }, /* DGE-530T */ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4320) }, { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x5005) }, /* Belkin */ { PCI_DEVICE(PCI_VENDOR_ID_CNET, PCI_DEVICE_ID_CNET_GIGACARD) }, Loading Loading @@ -402,7 +401,7 @@ static int skge_set_ring_param(struct net_device *dev, int err; if (p->rx_pending == 0 || p->rx_pending > MAX_RX_RING_SIZE || p->tx_pending == 0 || p->tx_pending > MAX_TX_RING_SIZE) p->tx_pending < MAX_SKB_FRAGS+1 || p->tx_pending > MAX_TX_RING_SIZE) return -EINVAL; skge->rx_ring.count = p->rx_pending; Loading Loading @@ -2717,8 +2716,7 @@ static int skge_poll(struct net_device *dev, int *budget) if (control & BMU_OWN) break; skb = skge_rx_get(skge, e, control, rd->status, le16_to_cpu(rd->csum2)); skb = skge_rx_get(skge, e, control, rd->status, rd->csum2); if (likely(skb)) { dev->last_rx = jiffies; netif_receive_skb(skb); Loading drivers/net/sky2.c +32 −22 Original line number Diff line number Diff line Loading @@ -51,7 +51,7 @@ #include "sky2.h" #define DRV_NAME "sky2" #define DRV_VERSION "1.3" #define DRV_VERSION "1.4" #define PFX DRV_NAME " " /* Loading Loading @@ -105,6 +105,7 @@ MODULE_PARM_DESC(idle_timeout, "Idle timeout workaround for lost interrupts (ms) static const struct pci_device_id sky2_id_table[] = { { PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, 0x9000) }, { PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, 0x9E00) }, { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4b00) }, /* DGE-560T */ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4340) }, { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4341) }, { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4342) }, Loading Loading @@ -235,6 +236,7 @@ static int sky2_set_power_state(struct sky2_hw *hw, pci_power_t state) } if (hw->chip_id == CHIP_ID_YUKON_EC_U) { sky2_write16(hw, B0_CTST, Y2_HW_WOL_ON); sky2_pci_write32(hw, PCI_DEV_REG3, 0); reg1 = sky2_pci_read32(hw, PCI_DEV_REG4); reg1 &= P_ASPM_CONTROL_MSK; Loading Loading @@ -306,7 +308,7 @@ static void sky2_phy_init(struct sky2_hw *hw, unsigned port) u16 ctrl, ct1000, adv, pg, ledctrl, ledover; if (sky2->autoneg == AUTONEG_ENABLE && (hw->chip_id != CHIP_ID_YUKON_XL || hw->chip_id == CHIP_ID_YUKON_EC_U)) { !(hw->chip_id == CHIP_ID_YUKON_XL || hw->chip_id == CHIP_ID_YUKON_EC_U)) { u16 ectrl = gm_phy_read(hw, port, PHY_MARV_EXT_CTRL); ectrl &= ~(PHY_M_EC_M_DSC_MSK | PHY_M_EC_S_DSC_MSK | Loading Loading @@ -1020,19 +1022,26 @@ static int sky2_up(struct net_device *dev) struct sky2_hw *hw = sky2->hw; unsigned port = sky2->port; u32 ramsize, rxspace, imask; int err; int cap, err = -ENOMEM; struct net_device *otherdev = hw->dev[sky2->port^1]; /* Block bringing up both ports at the same time on a dual port card. * There is an unfixed bug where receiver gets confused and picks up * packets out of order. Until this is fixed, prevent data corruption. /* * On dual port PCI-X card, there is an problem where status * can be received out of order due to split transactions */ if (otherdev && netif_running(otherdev)) { printk(KERN_INFO PFX "dual port support is disabled.\n"); return -EBUSY; if (otherdev && netif_running(otherdev) && (cap = pci_find_capability(hw->pdev, PCI_CAP_ID_PCIX))) { struct sky2_port *osky2 = netdev_priv(otherdev); u16 cmd; cmd = sky2_pci_read16(hw, cap + PCI_X_CMD); cmd &= ~PCI_X_CMD_MAX_SPLIT; sky2_pci_write16(hw, cap + PCI_X_CMD, cmd); sky2->rx_csum = 0; osky2->rx_csum = 0; } err = -ENOMEM; if (netif_msg_ifup(sky2)) printk(KERN_INFO PFX "%s: enabling interface\n", dev->name); Loading Loading @@ -1910,6 +1919,12 @@ static inline void sky2_tx_done(struct net_device *dev, u16 last) } } /* Is status ring empty or is there more to do? */ static inline int sky2_more_work(const struct sky2_hw *hw) { return (hw->st_idx != sky2_read16(hw, STAT_PUT_IDX)); } /* Process status response ring */ static int sky2_status_intr(struct sky2_hw *hw, int to_do) { Loading Loading @@ -2182,19 +2197,19 @@ static int sky2_poll(struct net_device *dev0, int *budget) if (status & Y2_IS_CHK_TXA2) sky2_descriptor_error(hw, 1, "transmit", Y2_IS_CHK_TXA2); if (status & Y2_IS_STAT_BMU) sky2_write32(hw, STAT_CTRL, SC_STAT_CLR_IRQ); work_done = sky2_status_intr(hw, work_limit); *budget -= work_done; dev0->quota -= work_done; if (work_done >= work_limit) if (status & Y2_IS_STAT_BMU) sky2_write32(hw, STAT_CTRL, SC_STAT_CLR_IRQ); if (sky2_more_work(hw)) return 1; netif_rx_complete(dev0); status = sky2_read32(hw, B0_Y2_SP_LISR); sky2_read32(hw, B0_Y2_SP_LISR); return 0; } Loading Loading @@ -3078,12 +3093,7 @@ static __devinit struct net_device *sky2_init_netdev(struct sky2_hw *hw, sky2->duplex = -1; sky2->speed = -1; sky2->advertising = sky2_supported_modes(hw); /* Receive checksum disabled for Yukon XL * because of observed problems with incorrect * values when multiple packets are received in one interrupt */ sky2->rx_csum = (hw->chip_id != CHIP_ID_YUKON_XL); sky2->rx_csum = 1; spin_lock_init(&sky2->phy_lock); sky2->tx_pending = TX_DEF_PENDING; Loading drivers/net/sky2.h +2 −0 Original line number Diff line number Diff line Loading @@ -214,6 +214,8 @@ enum csr_regs { enum { Y2_VMAIN_AVAIL = 1<<17,/* VMAIN available (YUKON-2 only) */ Y2_VAUX_AVAIL = 1<<16,/* VAUX available (YUKON-2 only) */ Y2_HW_WOL_ON = 1<<15,/* HW WOL On (Yukon-EC Ultra A1 only) */ Y2_HW_WOL_OFF = 1<<14,/* HW WOL On (Yukon-EC Ultra A1 only) */ Y2_ASF_ENABLE = 1<<13,/* ASF Unit Enable (YUKON-2 only) */ Y2_ASF_DISABLE = 1<<12,/* ASF Unit Disable (YUKON-2 only) */ Y2_CLK_RUN_ENA = 1<<11,/* CLK_RUN Enable (YUKON-2 only) */ Loading Loading
drivers/net/forcedeth.c +84 −228 Original line number Diff line number Diff line Loading @@ -106,7 +106,6 @@ * 0.51: 20 Jan 2006: Add 64bit consistent memory allocation for rings. * 0.52: 20 Jan 2006: Add MSI/MSIX support. * 0.53: 19 Mar 2006: Fix init from low power mode and add hw reset. * 0.54: 21 Mar 2006: Fix spin locks for multi irqs and cleanup. * * Known bugs: * We suspect that on some hardware no TX done interrupts are generated. Loading @@ -118,7 +117,7 @@ * DEV_NEED_TIMERIRQ will not harm you on sane hardware, only generating a few * superfluous timer interrupts from the nic. */ #define FORCEDETH_VERSION "0.54" #define FORCEDETH_VERSION "0.53" #define DRV_NAME "forcedeth" #include <linux/module.h> Loading Loading @@ -711,72 +710,6 @@ static void setup_hw_rings(struct net_device *dev, int rxtx_flags) } } static int using_multi_irqs(struct net_device *dev) { struct fe_priv *np = get_nvpriv(dev); if (!(np->msi_flags & NV_MSI_X_ENABLED) || ((np->msi_flags & NV_MSI_X_ENABLED) && ((np->msi_flags & NV_MSI_X_VECTORS_MASK) == 0x1))) return 0; else return 1; } static void nv_enable_irq(struct net_device *dev) { struct fe_priv *np = get_nvpriv(dev); if (!using_multi_irqs(dev)) { if (np->msi_flags & NV_MSI_X_ENABLED) enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector); else enable_irq(dev->irq); } else { enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector); enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector); enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector); } } static void nv_disable_irq(struct net_device *dev) { struct fe_priv *np = get_nvpriv(dev); if (!using_multi_irqs(dev)) { if (np->msi_flags & NV_MSI_X_ENABLED) disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector); else disable_irq(dev->irq); } else { disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector); disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector); disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector); } } /* In MSIX mode, a write to irqmask behaves as XOR */ static void nv_enable_hw_interrupts(struct net_device *dev, u32 mask) { u8 __iomem *base = get_hwbase(dev); writel(mask, base + NvRegIrqMask); } static void nv_disable_hw_interrupts(struct net_device *dev, u32 mask) { struct fe_priv *np = get_nvpriv(dev); u8 __iomem *base = get_hwbase(dev); if (np->msi_flags & NV_MSI_X_ENABLED) { writel(mask, base + NvRegIrqMask); } else { if (np->msi_flags & NV_MSI_ENABLED) writel(0, base + NvRegMSIIrqMask); writel(0, base + NvRegIrqMask); } } #define MII_READ (-1) /* mii_rw: read/write a register on the PHY. * Loading Loading @@ -1086,24 +1019,23 @@ static void nv_do_rx_refill(unsigned long data) struct net_device *dev = (struct net_device *) data; struct fe_priv *np = netdev_priv(dev); if (!using_multi_irqs(dev)) { if (np->msi_flags & NV_MSI_X_ENABLED) disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector); else if (!(np->msi_flags & NV_MSI_X_ENABLED) || ((np->msi_flags & NV_MSI_X_ENABLED) && ((np->msi_flags & NV_MSI_X_VECTORS_MASK) == 0x1))) { disable_irq(dev->irq); } else { disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector); } if (nv_alloc_rx(dev)) { spin_lock_irq(&np->lock); spin_lock(&np->lock); if (!np->in_shutdown) mod_timer(&np->oom_kick, jiffies + OOM_REFILL); spin_unlock_irq(&np->lock); spin_unlock(&np->lock); } if (!using_multi_irqs(dev)) { if (np->msi_flags & NV_MSI_X_ENABLED) enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector); else if (!(np->msi_flags & NV_MSI_X_ENABLED) || ((np->msi_flags & NV_MSI_X_ENABLED) && ((np->msi_flags & NV_MSI_X_VECTORS_MASK) == 0x1))) { enable_irq(dev->irq); } else { enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector); Loading Loading @@ -1736,7 +1668,15 @@ static int nv_change_mtu(struct net_device *dev, int new_mtu) * guessed, there is probably a simpler approach. * Changing the MTU is a rare event, it shouldn't matter. */ nv_disable_irq(dev); if (!(np->msi_flags & NV_MSI_X_ENABLED) || ((np->msi_flags & NV_MSI_X_ENABLED) && ((np->msi_flags & NV_MSI_X_VECTORS_MASK) == 0x1))) { disable_irq(dev->irq); } else { disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector); disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector); disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector); } spin_lock_bh(&dev->xmit_lock); spin_lock(&np->lock); /* stop engines */ Loading Loading @@ -1769,7 +1709,15 @@ static int nv_change_mtu(struct net_device *dev, int new_mtu) nv_start_tx(dev); spin_unlock(&np->lock); spin_unlock_bh(&dev->xmit_lock); nv_enable_irq(dev); if (!(np->msi_flags & NV_MSI_X_ENABLED) || ((np->msi_flags & NV_MSI_X_ENABLED) && ((np->msi_flags & NV_MSI_X_VECTORS_MASK) == 0x1))) { enable_irq(dev->irq); } else { enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector); enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector); enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector); } } return 0; } Loading Loading @@ -2160,16 +2108,16 @@ static irqreturn_t nv_nic_irq_tx(int foo, void *data, struct pt_regs *regs) if (!(events & np->irqmask)) break; spin_lock_irq(&np->lock); spin_lock(&np->lock); nv_tx_done(dev); spin_unlock_irq(&np->lock); spin_unlock(&np->lock); if (events & (NVREG_IRQ_TX_ERR)) { dprintk(KERN_DEBUG "%s: received irq with events 0x%x. Probably TX fail.\n", dev->name, events); } if (i > max_interrupt_work) { spin_lock_irq(&np->lock); spin_lock(&np->lock); /* disable interrupts on the nic */ writel(NVREG_IRQ_TX_ALL, base + NvRegIrqMask); pci_push(base); Loading @@ -2179,7 +2127,7 @@ static irqreturn_t nv_nic_irq_tx(int foo, void *data, struct pt_regs *regs) mod_timer(&np->nic_poll, jiffies + POLL_WAIT); } printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq_tx.\n", dev->name, i); spin_unlock_irq(&np->lock); spin_unlock(&np->lock); break; } Loading Loading @@ -2209,14 +2157,14 @@ static irqreturn_t nv_nic_irq_rx(int foo, void *data, struct pt_regs *regs) nv_rx_process(dev); if (nv_alloc_rx(dev)) { spin_lock_irq(&np->lock); spin_lock(&np->lock); if (!np->in_shutdown) mod_timer(&np->oom_kick, jiffies + OOM_REFILL); spin_unlock_irq(&np->lock); spin_unlock(&np->lock); } if (i > max_interrupt_work) { spin_lock_irq(&np->lock); spin_lock(&np->lock); /* disable interrupts on the nic */ writel(NVREG_IRQ_RX_ALL, base + NvRegIrqMask); pci_push(base); Loading @@ -2226,7 +2174,7 @@ static irqreturn_t nv_nic_irq_rx(int foo, void *data, struct pt_regs *regs) mod_timer(&np->nic_poll, jiffies + POLL_WAIT); } printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq_rx.\n", dev->name, i); spin_unlock_irq(&np->lock); spin_unlock(&np->lock); break; } Loading Loading @@ -2255,14 +2203,14 @@ static irqreturn_t nv_nic_irq_other(int foo, void *data, struct pt_regs *regs) break; if (events & NVREG_IRQ_LINK) { spin_lock_irq(&np->lock); spin_lock(&np->lock); nv_link_irq(dev); spin_unlock_irq(&np->lock); spin_unlock(&np->lock); } if (np->need_linktimer && time_after(jiffies, np->link_timeout)) { spin_lock_irq(&np->lock); spin_lock(&np->lock); nv_linkchange(dev); spin_unlock_irq(&np->lock); spin_unlock(&np->lock); np->link_timeout = jiffies + LINK_TIMEOUT; } if (events & (NVREG_IRQ_UNKNOWN)) { Loading @@ -2270,7 +2218,7 @@ static irqreturn_t nv_nic_irq_other(int foo, void *data, struct pt_regs *regs) dev->name, events); } if (i > max_interrupt_work) { spin_lock_irq(&np->lock); spin_lock(&np->lock); /* disable interrupts on the nic */ writel(NVREG_IRQ_OTHER, base + NvRegIrqMask); pci_push(base); Loading @@ -2280,7 +2228,7 @@ static irqreturn_t nv_nic_irq_other(int foo, void *data, struct pt_regs *regs) mod_timer(&np->nic_poll, jiffies + POLL_WAIT); } printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq_other.\n", dev->name, i); spin_unlock_irq(&np->lock); spin_unlock(&np->lock); break; } Loading @@ -2303,10 +2251,9 @@ static void nv_do_nic_poll(unsigned long data) * nv_nic_irq because that may decide to do otherwise */ if (!using_multi_irqs(dev)) { if (np->msi_flags & NV_MSI_X_ENABLED) disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector); else if (!(np->msi_flags & NV_MSI_X_ENABLED) || ((np->msi_flags & NV_MSI_X_ENABLED) && ((np->msi_flags & NV_MSI_X_VECTORS_MASK) == 0x1))) { disable_irq(dev->irq); mask = np->irqmask; } else { Loading @@ -2330,11 +2277,10 @@ static void nv_do_nic_poll(unsigned long data) writel(mask, base + NvRegIrqMask); pci_push(base); if (!using_multi_irqs(dev)) { if (!(np->msi_flags & NV_MSI_X_ENABLED) || ((np->msi_flags & NV_MSI_X_ENABLED) && ((np->msi_flags & NV_MSI_X_VECTORS_MASK) == 0x1))) { nv_nic_irq((int) 0, (void *) data, (struct pt_regs *) NULL); if (np->msi_flags & NV_MSI_X_ENABLED) enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector); else enable_irq(dev->irq); } else { if (np->nic_poll_irq & NVREG_IRQ_RX_ALL) { Loading Loading @@ -2682,113 +2628,6 @@ static void set_msix_vector_map(struct net_device *dev, u32 vector, u32 irqmask) writel(readl(base + NvRegMSIXMap1) | msixmap, base + NvRegMSIXMap1); } static int nv_request_irq(struct net_device *dev) { struct fe_priv *np = get_nvpriv(dev); u8 __iomem *base = get_hwbase(dev); int ret = 1; int i; if (np->msi_flags & NV_MSI_X_CAPABLE) { for (i = 0; i < (np->msi_flags & NV_MSI_X_VECTORS_MASK); i++) { np->msi_x_entry[i].entry = i; } if ((ret = pci_enable_msix(np->pci_dev, np->msi_x_entry, (np->msi_flags & NV_MSI_X_VECTORS_MASK))) == 0) { np->msi_flags |= NV_MSI_X_ENABLED; if (optimization_mode == NV_OPTIMIZATION_MODE_THROUGHPUT) { /* Request irq for rx handling */ if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector, &nv_nic_irq_rx, SA_SHIRQ, dev->name, dev) != 0) { printk(KERN_INFO "forcedeth: request_irq failed for rx %d\n", ret); pci_disable_msix(np->pci_dev); np->msi_flags &= ~NV_MSI_X_ENABLED; goto out_err; } /* Request irq for tx handling */ if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector, &nv_nic_irq_tx, SA_SHIRQ, dev->name, dev) != 0) { printk(KERN_INFO "forcedeth: request_irq failed for tx %d\n", ret); pci_disable_msix(np->pci_dev); np->msi_flags &= ~NV_MSI_X_ENABLED; goto out_free_rx; } /* Request irq for link and timer handling */ if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector, &nv_nic_irq_other, SA_SHIRQ, dev->name, dev) != 0) { printk(KERN_INFO "forcedeth: request_irq failed for link %d\n", ret); pci_disable_msix(np->pci_dev); np->msi_flags &= ~NV_MSI_X_ENABLED; goto out_free_tx; } /* map interrupts to their respective vector */ writel(0, base + NvRegMSIXMap0); writel(0, base + NvRegMSIXMap1); set_msix_vector_map(dev, NV_MSI_X_VECTOR_RX, NVREG_IRQ_RX_ALL); set_msix_vector_map(dev, NV_MSI_X_VECTOR_TX, NVREG_IRQ_TX_ALL); set_msix_vector_map(dev, NV_MSI_X_VECTOR_OTHER, NVREG_IRQ_OTHER); } else { /* Request irq for all interrupts */ if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector, &nv_nic_irq, SA_SHIRQ, dev->name, dev) != 0) { printk(KERN_INFO "forcedeth: request_irq failed %d\n", ret); pci_disable_msix(np->pci_dev); np->msi_flags &= ~NV_MSI_X_ENABLED; goto out_err; } /* map interrupts to vector 0 */ writel(0, base + NvRegMSIXMap0); writel(0, base + NvRegMSIXMap1); } } } if (ret != 0 && np->msi_flags & NV_MSI_CAPABLE) { if ((ret = pci_enable_msi(np->pci_dev)) == 0) { np->msi_flags |= NV_MSI_ENABLED; if (request_irq(np->pci_dev->irq, &nv_nic_irq, SA_SHIRQ, dev->name, dev) != 0) { printk(KERN_INFO "forcedeth: request_irq failed %d\n", ret); pci_disable_msi(np->pci_dev); np->msi_flags &= ~NV_MSI_ENABLED; goto out_err; } /* map interrupts to vector 0 */ writel(0, base + NvRegMSIMap0); writel(0, base + NvRegMSIMap1); /* enable msi vector 0 */ writel(NVREG_MSI_VECTOR_0_ENABLED, base + NvRegMSIIrqMask); } } if (ret != 0) { if (request_irq(np->pci_dev->irq, &nv_nic_irq, SA_SHIRQ, dev->name, dev) != 0) goto out_err; } return 0; out_free_tx: free_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector, dev); out_free_rx: free_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector, dev); out_err: return 1; } static void nv_free_irq(struct net_device *dev) { struct fe_priv *np = get_nvpriv(dev); int i; if (np->msi_flags & NV_MSI_X_ENABLED) { for (i = 0; i < (np->msi_flags & NV_MSI_X_VECTORS_MASK); i++) { free_irq(np->msi_x_entry[i].vector, dev); } pci_disable_msix(np->pci_dev); np->msi_flags &= ~NV_MSI_X_ENABLED; } else { free_irq(np->pci_dev->irq, dev); if (np->msi_flags & NV_MSI_ENABLED) { pci_disable_msi(np->pci_dev); np->msi_flags &= ~NV_MSI_ENABLED; } } } static int nv_open(struct net_device *dev) { struct fe_priv *np = netdev_priv(dev); Loading Loading @@ -2881,16 +2720,12 @@ static int nv_open(struct net_device *dev) udelay(10); writel(readl(base + NvRegPowerState) | NVREG_POWERSTATE_VALID, base + NvRegPowerState); nv_disable_hw_interrupts(dev, np->irqmask); writel(0, base + NvRegIrqMask); pci_push(base); writel(NVREG_MIISTAT_MASK2, base + NvRegMIIStatus); writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus); pci_push(base); if (nv_request_irq(dev)) { goto out_drain; } if (np->msi_flags & NV_MSI_X_CAPABLE) { for (i = 0; i < (np->msi_flags & NV_MSI_X_VECTORS_MASK); i++) { np->msi_x_entry[i].entry = i; Loading Loading @@ -2964,7 +2799,7 @@ static int nv_open(struct net_device *dev) } /* ask for interrupts */ nv_enable_hw_interrupts(dev, np->irqmask); writel(np->irqmask, base + NvRegIrqMask); spin_lock_irq(&np->lock); writel(NVREG_MCASTADDRA_FORCE, base + NvRegMulticastAddrA); Loading Loading @@ -3008,6 +2843,7 @@ static int nv_close(struct net_device *dev) { struct fe_priv *np = netdev_priv(dev); u8 __iomem *base; int i; spin_lock_irq(&np->lock); np->in_shutdown = 1; Loading @@ -3025,13 +2861,31 @@ static int nv_close(struct net_device *dev) /* disable interrupts on the nic or we will lock up */ base = get_hwbase(dev); nv_disable_hw_interrupts(dev, np->irqmask); if (np->msi_flags & NV_MSI_X_ENABLED) { writel(np->irqmask, base + NvRegIrqMask); } else { if (np->msi_flags & NV_MSI_ENABLED) writel(0, base + NvRegMSIIrqMask); writel(0, base + NvRegIrqMask); } pci_push(base); dprintk(KERN_INFO "%s: Irqmask is zero again\n", dev->name); spin_unlock_irq(&np->lock); nv_free_irq(dev); if (np->msi_flags & NV_MSI_X_ENABLED) { for (i = 0; i < (np->msi_flags & NV_MSI_X_VECTORS_MASK); i++) { free_irq(np->msi_x_entry[i].vector, dev); } pci_disable_msix(np->pci_dev); np->msi_flags &= ~NV_MSI_X_ENABLED; } else { free_irq(np->pci_dev->irq, dev); if (np->msi_flags & NV_MSI_ENABLED) { pci_disable_msi(np->pci_dev); np->msi_flags &= ~NV_MSI_ENABLED; } } drain_ring(dev); Loading Loading @@ -3120,18 +2974,20 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i if (id->driver_data & DEV_HAS_HIGH_DMA) { /* packet format 3: supports 40-bit addressing */ np->desc_ver = DESC_VER_3; np->txrxctl_bits = NVREG_TXRXCTL_DESC_3; if (pci_set_dma_mask(pci_dev, DMA_39BIT_MASK)) { printk(KERN_INFO "forcedeth: 64-bit DMA failed, using 32-bit addressing for device %s.\n", pci_name(pci_dev)); } else { dev->features |= NETIF_F_HIGHDMA; printk(KERN_INFO "forcedeth: using HIGHDMA\n"); } if (pci_set_consistent_dma_mask(pci_dev, 0x0000007fffffffffULL)) { printk(KERN_INFO "forcedeth: 64-bit DMA (consistent) failed for device %s.\n", pci_name(pci_dev)); goto out_relreg; } else { dev->features |= NETIF_F_HIGHDMA; printk(KERN_INFO "forcedeth: using HIGHDMA\n"); } } np->txrxctl_bits = NVREG_TXRXCTL_DESC_3; } else if (id->driver_data & DEV_HAS_LARGEDESC) { /* packet format 2: supports jumbo frames */ np->desc_ver = DESC_VER_2; Loading
drivers/net/pcmcia/axnet_cs.c +2 −11 Original line number Diff line number Diff line Loading @@ -1691,17 +1691,6 @@ static void do_set_multicast_list(struct net_device *dev) memset(ei_local->mcfilter, 0xFF, 8); } /* * DP8390 manuals don't specify any magic sequence for altering * the multicast regs on an already running card. To be safe, we * ensure multicast mode is off prior to loading up the new hash * table. If this proves to be not enough, we can always resort * to stopping the NIC, loading the table and then restarting. */ if (netif_running(dev)) outb_p(E8390_RXCONFIG, e8390_base + EN0_RXCR); outb_p(E8390_NODMA + E8390_PAGE1, e8390_base + E8390_CMD); for(i = 0; i < 8; i++) { Loading @@ -1715,6 +1704,8 @@ static void do_set_multicast_list(struct net_device *dev) outb_p(E8390_RXCONFIG | 0x48, e8390_base + EN0_RXCR); else outb_p(E8390_RXCONFIG | 0x40, e8390_base + EN0_RXCR); outb_p(E8390_NODMA+E8390_PAGE0+E8390_START, e8390_base+E8390_CMD); } /* Loading
drivers/net/skge.c +3 −5 Original line number Diff line number Diff line Loading @@ -78,8 +78,7 @@ static const struct pci_device_id skge_id_table[] = { { PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_GE) }, { PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_YU) }, { PCI_DEVICE(PCI_VENDOR_ID_DLINK, PCI_DEVICE_ID_DLINK_DGE510T), }, { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4b00) }, { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4b01) }, { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4b01) }, /* DGE-530T */ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4320) }, { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x5005) }, /* Belkin */ { PCI_DEVICE(PCI_VENDOR_ID_CNET, PCI_DEVICE_ID_CNET_GIGACARD) }, Loading Loading @@ -402,7 +401,7 @@ static int skge_set_ring_param(struct net_device *dev, int err; if (p->rx_pending == 0 || p->rx_pending > MAX_RX_RING_SIZE || p->tx_pending == 0 || p->tx_pending > MAX_TX_RING_SIZE) p->tx_pending < MAX_SKB_FRAGS+1 || p->tx_pending > MAX_TX_RING_SIZE) return -EINVAL; skge->rx_ring.count = p->rx_pending; Loading Loading @@ -2717,8 +2716,7 @@ static int skge_poll(struct net_device *dev, int *budget) if (control & BMU_OWN) break; skb = skge_rx_get(skge, e, control, rd->status, le16_to_cpu(rd->csum2)); skb = skge_rx_get(skge, e, control, rd->status, rd->csum2); if (likely(skb)) { dev->last_rx = jiffies; netif_receive_skb(skb); Loading
drivers/net/sky2.c +32 −22 Original line number Diff line number Diff line Loading @@ -51,7 +51,7 @@ #include "sky2.h" #define DRV_NAME "sky2" #define DRV_VERSION "1.3" #define DRV_VERSION "1.4" #define PFX DRV_NAME " " /* Loading Loading @@ -105,6 +105,7 @@ MODULE_PARM_DESC(idle_timeout, "Idle timeout workaround for lost interrupts (ms) static const struct pci_device_id sky2_id_table[] = { { PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, 0x9000) }, { PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, 0x9E00) }, { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4b00) }, /* DGE-560T */ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4340) }, { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4341) }, { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4342) }, Loading Loading @@ -235,6 +236,7 @@ static int sky2_set_power_state(struct sky2_hw *hw, pci_power_t state) } if (hw->chip_id == CHIP_ID_YUKON_EC_U) { sky2_write16(hw, B0_CTST, Y2_HW_WOL_ON); sky2_pci_write32(hw, PCI_DEV_REG3, 0); reg1 = sky2_pci_read32(hw, PCI_DEV_REG4); reg1 &= P_ASPM_CONTROL_MSK; Loading Loading @@ -306,7 +308,7 @@ static void sky2_phy_init(struct sky2_hw *hw, unsigned port) u16 ctrl, ct1000, adv, pg, ledctrl, ledover; if (sky2->autoneg == AUTONEG_ENABLE && (hw->chip_id != CHIP_ID_YUKON_XL || hw->chip_id == CHIP_ID_YUKON_EC_U)) { !(hw->chip_id == CHIP_ID_YUKON_XL || hw->chip_id == CHIP_ID_YUKON_EC_U)) { u16 ectrl = gm_phy_read(hw, port, PHY_MARV_EXT_CTRL); ectrl &= ~(PHY_M_EC_M_DSC_MSK | PHY_M_EC_S_DSC_MSK | Loading Loading @@ -1020,19 +1022,26 @@ static int sky2_up(struct net_device *dev) struct sky2_hw *hw = sky2->hw; unsigned port = sky2->port; u32 ramsize, rxspace, imask; int err; int cap, err = -ENOMEM; struct net_device *otherdev = hw->dev[sky2->port^1]; /* Block bringing up both ports at the same time on a dual port card. * There is an unfixed bug where receiver gets confused and picks up * packets out of order. Until this is fixed, prevent data corruption. /* * On dual port PCI-X card, there is an problem where status * can be received out of order due to split transactions */ if (otherdev && netif_running(otherdev)) { printk(KERN_INFO PFX "dual port support is disabled.\n"); return -EBUSY; if (otherdev && netif_running(otherdev) && (cap = pci_find_capability(hw->pdev, PCI_CAP_ID_PCIX))) { struct sky2_port *osky2 = netdev_priv(otherdev); u16 cmd; cmd = sky2_pci_read16(hw, cap + PCI_X_CMD); cmd &= ~PCI_X_CMD_MAX_SPLIT; sky2_pci_write16(hw, cap + PCI_X_CMD, cmd); sky2->rx_csum = 0; osky2->rx_csum = 0; } err = -ENOMEM; if (netif_msg_ifup(sky2)) printk(KERN_INFO PFX "%s: enabling interface\n", dev->name); Loading Loading @@ -1910,6 +1919,12 @@ static inline void sky2_tx_done(struct net_device *dev, u16 last) } } /* Is status ring empty or is there more to do? */ static inline int sky2_more_work(const struct sky2_hw *hw) { return (hw->st_idx != sky2_read16(hw, STAT_PUT_IDX)); } /* Process status response ring */ static int sky2_status_intr(struct sky2_hw *hw, int to_do) { Loading Loading @@ -2182,19 +2197,19 @@ static int sky2_poll(struct net_device *dev0, int *budget) if (status & Y2_IS_CHK_TXA2) sky2_descriptor_error(hw, 1, "transmit", Y2_IS_CHK_TXA2); if (status & Y2_IS_STAT_BMU) sky2_write32(hw, STAT_CTRL, SC_STAT_CLR_IRQ); work_done = sky2_status_intr(hw, work_limit); *budget -= work_done; dev0->quota -= work_done; if (work_done >= work_limit) if (status & Y2_IS_STAT_BMU) sky2_write32(hw, STAT_CTRL, SC_STAT_CLR_IRQ); if (sky2_more_work(hw)) return 1; netif_rx_complete(dev0); status = sky2_read32(hw, B0_Y2_SP_LISR); sky2_read32(hw, B0_Y2_SP_LISR); return 0; } Loading Loading @@ -3078,12 +3093,7 @@ static __devinit struct net_device *sky2_init_netdev(struct sky2_hw *hw, sky2->duplex = -1; sky2->speed = -1; sky2->advertising = sky2_supported_modes(hw); /* Receive checksum disabled for Yukon XL * because of observed problems with incorrect * values when multiple packets are received in one interrupt */ sky2->rx_csum = (hw->chip_id != CHIP_ID_YUKON_XL); sky2->rx_csum = 1; spin_lock_init(&sky2->phy_lock); sky2->tx_pending = TX_DEF_PENDING; Loading
drivers/net/sky2.h +2 −0 Original line number Diff line number Diff line Loading @@ -214,6 +214,8 @@ enum csr_regs { enum { Y2_VMAIN_AVAIL = 1<<17,/* VMAIN available (YUKON-2 only) */ Y2_VAUX_AVAIL = 1<<16,/* VAUX available (YUKON-2 only) */ Y2_HW_WOL_ON = 1<<15,/* HW WOL On (Yukon-EC Ultra A1 only) */ Y2_HW_WOL_OFF = 1<<14,/* HW WOL On (Yukon-EC Ultra A1 only) */ Y2_ASF_ENABLE = 1<<13,/* ASF Unit Enable (YUKON-2 only) */ Y2_ASF_DISABLE = 1<<12,/* ASF Unit Disable (YUKON-2 only) */ Y2_CLK_RUN_ENA = 1<<11,/* CLK_RUN Enable (YUKON-2 only) */ Loading