Loading drivers/net/amd8111e.c +14 −10 Original line number Diff line number Diff line Loading @@ -738,6 +738,7 @@ static int amd8111e_rx_poll(struct net_device *dev, int * budget) short vtag; #endif int rx_pkt_limit = dev->quota; unsigned long flags; do{ /* process receive packets until we use the quota*/ Loading Loading @@ -841,18 +842,19 @@ static int amd8111e_rx_poll(struct net_device *dev, int * budget) /* Receive descriptor is empty now */ dev->quota -= num_rx_pkt; *budget -= num_rx_pkt; spin_lock_irqsave(&lp->lock, flags); netif_rx_complete(dev); /* enable receive interrupt */ writel(VAL0|RINTEN0, mmio + INTEN0); writel(VAL2 | RDMD0, mmio + CMD0); spin_unlock_irqrestore(&lp->lock, flags); return 0; rx_not_empty: /* Do not call a netif_rx_complete */ dev->quota -= num_rx_pkt; *budget -= num_rx_pkt; return 1; } #else Loading Loading @@ -1261,18 +1263,20 @@ static irqreturn_t amd8111e_interrupt(int irq, void *dev_id, struct pt_regs *reg struct net_device * dev = (struct net_device *) dev_id; struct amd8111e_priv *lp = netdev_priv(dev); void __iomem *mmio = lp->mmio; unsigned int intr0; unsigned int intr0, intren0; unsigned int handled = 1; if(dev == NULL) if(unlikely(dev == NULL)) return IRQ_NONE; if (regs) spin_lock (&lp->lock); spin_lock(&lp->lock); /* disabling interrupt */ writel(INTREN, mmio + CMD0); /* Read interrupt status */ intr0 = readl(mmio + INT0); intren0 = readl(mmio + INTEN0); /* Process all the INT event until INTR bit is clear. */ Loading @@ -1293,11 +1297,11 @@ static irqreturn_t amd8111e_interrupt(int irq, void *dev_id, struct pt_regs *reg /* Schedule a polling routine */ __netif_rx_schedule(dev); } else { else if (intren0 & RINTEN0) { printk("************Driver bug! \ interrupt while in poll\n"); /* Fix by disabling interrupts */ writel(RINT0, mmio + INT0); /* Fix by disable receive interrupts */ writel(RINTEN0, mmio + INTEN0); } } #else Loading @@ -1321,7 +1325,7 @@ static irqreturn_t amd8111e_interrupt(int irq, void *dev_id, struct pt_regs *reg err_no_interrupt: writel( VAL0 | INTREN,mmio + CMD0); if (regs) spin_unlock(&lp->lock); spin_unlock(&lp->lock); return IRQ_RETVAL(handled); } Loading drivers/net/e100.c +139 −26 Original line number Diff line number Diff line Loading @@ -155,9 +155,9 @@ #define DRV_NAME "e100" #define DRV_EXT "-NAPI" #define DRV_VERSION "3.3.6-k2"DRV_EXT #define DRV_VERSION "3.4.8-k2"DRV_EXT #define DRV_DESCRIPTION "Intel(R) PRO/100 Network Driver" #define DRV_COPYRIGHT "Copyright(c) 1999-2004 Intel Corporation" #define DRV_COPYRIGHT "Copyright(c) 1999-2005 Intel Corporation" #define PFX DRV_NAME ": " #define E100_WATCHDOG_PERIOD (2 * HZ) Loading Loading @@ -210,11 +210,17 @@ static struct pci_device_id e100_id_table[] = { INTEL_8255X_ETHERNET_DEVICE(0x1069, 6), INTEL_8255X_ETHERNET_DEVICE(0x106A, 6), INTEL_8255X_ETHERNET_DEVICE(0x106B, 6), INTEL_8255X_ETHERNET_DEVICE(0x1091, 7), INTEL_8255X_ETHERNET_DEVICE(0x1092, 7), INTEL_8255X_ETHERNET_DEVICE(0x1093, 7), INTEL_8255X_ETHERNET_DEVICE(0x1094, 7), INTEL_8255X_ETHERNET_DEVICE(0x1095, 7), INTEL_8255X_ETHERNET_DEVICE(0x1209, 0), INTEL_8255X_ETHERNET_DEVICE(0x1229, 0), INTEL_8255X_ETHERNET_DEVICE(0x2449, 2), INTEL_8255X_ETHERNET_DEVICE(0x2459, 2), INTEL_8255X_ETHERNET_DEVICE(0x245D, 2), INTEL_8255X_ETHERNET_DEVICE(0x27DC, 7), { 0, } }; MODULE_DEVICE_TABLE(pci, e100_id_table); Loading Loading @@ -269,6 +275,12 @@ enum scb_status { rus_mask = 0x3C, }; enum ru_state { RU_SUSPENDED = 0, RU_RUNNING = 1, RU_UNINITIALIZED = -1, }; enum scb_stat_ack { stat_ack_not_ours = 0x00, stat_ack_sw_gen = 0x04, Loading Loading @@ -510,7 +522,7 @@ struct nic { struct rx *rx_to_use; struct rx *rx_to_clean; struct rfd blank_rfd; int ru_running; enum ru_state ru_running; spinlock_t cb_lock ____cacheline_aligned; spinlock_t cmd_lock; Loading Loading @@ -539,6 +551,7 @@ struct nic { struct timer_list watchdog; struct timer_list blink_timer; struct mii_if_info mii; struct work_struct tx_timeout_task; enum loopback loopback; struct mem *mem; Loading Loading @@ -770,7 +783,7 @@ static int e100_eeprom_save(struct nic *nic, u16 start, u16 count) return 0; } #define E100_WAIT_SCB_TIMEOUT 40 #define E100_WAIT_SCB_TIMEOUT 20000 /* we might have to wait 100ms!!! */ static inline int e100_exec_cmd(struct nic *nic, u8 cmd, dma_addr_t dma_addr) { unsigned long flags; Loading Loading @@ -840,6 +853,10 @@ static inline int e100_exec_cb(struct nic *nic, struct sk_buff *skb, * because the controller is too busy, so * let's just queue the command and try again * when another command is scheduled. */ if(err == -ENOSPC) { //request a reset schedule_work(&nic->tx_timeout_task); } break; } else { nic->cuc_cmd = cuc_resume; Loading Loading @@ -884,7 +901,7 @@ static void mdio_write(struct net_device *netdev, int addr, int reg, int data) static void e100_get_defaults(struct nic *nic) { struct param_range rfds = { .min = 64, .max = 256, .count = 64 }; struct param_range rfds = { .min = 16, .max = 256, .count = 64 }; struct param_range cbs = { .min = 64, .max = 256, .count = 64 }; pci_read_config_byte(nic->pdev, PCI_REVISION_ID, &nic->rev_id); Loading @@ -899,8 +916,9 @@ static void e100_get_defaults(struct nic *nic) /* Quadwords to DMA into FIFO before starting frame transmit */ nic->tx_threshold = 0xE0; nic->tx_command = cpu_to_le16(cb_tx | cb_i | cb_tx_sf | ((nic->mac >= mac_82558_D101_A4) ? cb_cid : 0)); /* no interrupt for every tx completion, delay = 256us if not 557*/ nic->tx_command = cpu_to_le16(cb_tx | cb_tx_sf | ((nic->mac >= mac_82558_D101_A4) ? cb_cid : cb_i)); /* Template for a freshly allocated RFD */ nic->blank_rfd.command = cpu_to_le16(cb_el); Loading Loading @@ -964,7 +982,8 @@ static void e100_configure(struct nic *nic, struct cb *cb, struct sk_buff *skb) if(nic->flags & multicast_all) config->multicast_all = 0x1; /* 1=accept, 0=no */ if(!(nic->flags & wol_magic)) /* disable WoL when up */ if(netif_running(nic->netdev) || !(nic->flags & wol_magic)) config->magic_packet_disable = 0x1; /* 1=off, 0=on */ if(nic->mac >= mac_82558_D101_A4) { Loading Loading @@ -1203,7 +1222,9 @@ static void e100_update_stats(struct nic *nic) } } e100_exec_cmd(nic, cuc_dump_reset, 0); if(e100_exec_cmd(nic, cuc_dump_reset, 0)) DPRINTK(TX_ERR, DEBUG, "exec cuc_dump_reset failed\n"); } static void e100_adjust_adaptive_ifs(struct nic *nic, int speed, int duplex) Loading Loading @@ -1279,12 +1300,15 @@ static inline void e100_xmit_prepare(struct nic *nic, struct cb *cb, struct sk_buff *skb) { cb->command = nic->tx_command; /* interrupt every 16 packets regardless of delay */ if((nic->cbs_avail & ~15) == nic->cbs_avail) cb->command |= cb_i; cb->u.tcb.tbd_array = cb->dma_addr + offsetof(struct cb, u.tcb.tbd); cb->u.tcb.tcb_byte_count = 0; cb->u.tcb.threshold = nic->tx_threshold; cb->u.tcb.tbd_count = 1; cb->u.tcb.tbd.buf_addr = cpu_to_le32(pci_map_single(nic->pdev, skb->data, skb->len, PCI_DMA_TODEVICE)); // check for mapping failure? cb->u.tcb.tbd.size = cpu_to_le16(skb->len); } Loading @@ -1297,7 +1321,8 @@ static int e100_xmit_frame(struct sk_buff *skb, struct net_device *netdev) /* SW workaround for ICH[x] 10Mbps/half duplex Tx hang. Issue a NOP command followed by a 1us delay before issuing the Tx command. */ e100_exec_cmd(nic, cuc_nop, 0); if(e100_exec_cmd(nic, cuc_nop, 0)) DPRINTK(TX_ERR, DEBUG, "exec cuc_nop failed\n"); udelay(1); } Loading Loading @@ -1415,12 +1440,18 @@ static int e100_alloc_cbs(struct nic *nic) return 0; } static inline void e100_start_receiver(struct nic *nic) static inline void e100_start_receiver(struct nic *nic, struct rx *rx) { if(!nic->rxs) return; if(RU_SUSPENDED != nic->ru_running) return; /* handle init time starts */ if(!rx) rx = nic->rxs; /* (Re)start RU if suspended or idle and RFA is non-NULL */ if(!nic->ru_running && nic->rx_to_clean->skb) { e100_exec_cmd(nic, ruc_start, nic->rx_to_clean->dma_addr); nic->ru_running = 1; if(rx->skb) { e100_exec_cmd(nic, ruc_start, rx->dma_addr); nic->ru_running = RU_RUNNING; } } Loading @@ -1437,6 +1468,13 @@ static inline int e100_rx_alloc_skb(struct nic *nic, struct rx *rx) rx->dma_addr = pci_map_single(nic->pdev, rx->skb->data, RFD_BUF_LEN, PCI_DMA_BIDIRECTIONAL); if(pci_dma_mapping_error(rx->dma_addr)) { dev_kfree_skb_any(rx->skb); rx->skb = 0; rx->dma_addr = 0; return -ENOMEM; } /* Link the RFD to end of RFA by linking previous RFD to * this one, and clearing EL bit of previous. */ if(rx->prev->skb) { Loading Loading @@ -1471,7 +1509,7 @@ static inline int e100_rx_indicate(struct nic *nic, struct rx *rx, /* If data isn't ready, nothing to indicate */ if(unlikely(!(rfd_status & cb_complete))) return -EAGAIN; return -ENODATA; /* Get actual data size */ actual_size = le16_to_cpu(rfd->actual_size) & 0x3FFF; Loading @@ -1482,6 +1520,10 @@ static inline int e100_rx_indicate(struct nic *nic, struct rx *rx, pci_unmap_single(nic->pdev, rx->dma_addr, RFD_BUF_LEN, PCI_DMA_FROMDEVICE); /* this allows for a fast restart without re-enabling interrupts */ if(le16_to_cpu(rfd->command) & cb_el) nic->ru_running = RU_SUSPENDED; /* Pull off the RFD and put the actual data (minus eth hdr) */ skb_reserve(skb, sizeof(struct rfd)); skb_put(skb, actual_size); Loading Loading @@ -1514,20 +1556,45 @@ static inline void e100_rx_clean(struct nic *nic, unsigned int *work_done, unsigned int work_to_do) { struct rx *rx; int restart_required = 0; struct rx *rx_to_start = NULL; /* are we already rnr? then pay attention!!! this ensures that * the state machine progression never allows a start with a * partially cleaned list, avoiding a race between hardware * and rx_to_clean when in NAPI mode */ if(RU_SUSPENDED == nic->ru_running) restart_required = 1; /* Indicate newly arrived packets */ for(rx = nic->rx_to_clean; rx->skb; rx = nic->rx_to_clean = rx->next) { if(e100_rx_indicate(nic, rx, work_done, work_to_do)) int err = e100_rx_indicate(nic, rx, work_done, work_to_do); if(-EAGAIN == err) { /* hit quota so have more work to do, restart once * cleanup is complete */ restart_required = 0; break; } else if(-ENODATA == err) break; /* No more to clean */ } /* save our starting point as the place we'll restart the receiver */ if(restart_required) rx_to_start = nic->rx_to_clean; /* Alloc new skbs to refill list */ for(rx = nic->rx_to_use; !rx->skb; rx = nic->rx_to_use = rx->next) { if(unlikely(e100_rx_alloc_skb(nic, rx))) break; /* Better luck next time (see watchdog) */ } e100_start_receiver(nic); if(restart_required) { // ack the rnr? writeb(stat_ack_rnr, &nic->csr->scb.stat_ack); e100_start_receiver(nic, rx_to_start); if(work_done) (*work_done)++; } } static void e100_rx_clean_list(struct nic *nic) Loading @@ -1535,6 +1602,8 @@ static void e100_rx_clean_list(struct nic *nic) struct rx *rx; unsigned int i, count = nic->params.rfds.count; nic->ru_running = RU_UNINITIALIZED; if(nic->rxs) { for(rx = nic->rxs, i = 0; i < count; rx++, i++) { if(rx->skb) { Loading @@ -1548,7 +1617,6 @@ static void e100_rx_clean_list(struct nic *nic) } nic->rx_to_use = nic->rx_to_clean = NULL; nic->ru_running = 0; } static int e100_rx_alloc_list(struct nic *nic) Loading @@ -1557,6 +1625,7 @@ static int e100_rx_alloc_list(struct nic *nic) unsigned int i, count = nic->params.rfds.count; nic->rx_to_use = nic->rx_to_clean = NULL; nic->ru_running = RU_UNINITIALIZED; if(!(nic->rxs = kmalloc(sizeof(struct rx) * count, GFP_ATOMIC))) return -ENOMEM; Loading @@ -1572,6 +1641,7 @@ static int e100_rx_alloc_list(struct nic *nic) } nic->rx_to_use = nic->rx_to_clean = nic->rxs; nic->ru_running = RU_SUSPENDED; return 0; } Loading @@ -1593,7 +1663,7 @@ static irqreturn_t e100_intr(int irq, void *dev_id, struct pt_regs *regs) /* We hit Receive No Resource (RNR); restart RU after cleaning */ if(stat_ack & stat_ack_rnr) nic->ru_running = 0; nic->ru_running = RU_SUSPENDED; e100_disable_irq(nic); netif_rx_schedule(netdev); Loading Loading @@ -1663,6 +1733,7 @@ static int e100_change_mtu(struct net_device *netdev, int new_mtu) return 0; } #ifdef CONFIG_PM static int e100_asf(struct nic *nic) { /* ASF can be enabled from eeprom */ Loading @@ -1671,6 +1742,7 @@ static int e100_asf(struct nic *nic) !(nic->eeprom[eeprom_config_asf] & eeprom_gcl) && ((nic->eeprom[eeprom_smbus_addr] & 0xFF) != 0xFE)); } #endif static int e100_up(struct nic *nic) { Loading @@ -1683,13 +1755,16 @@ static int e100_up(struct nic *nic) if((err = e100_hw_init(nic))) goto err_clean_cbs; e100_set_multicast_list(nic->netdev); e100_start_receiver(nic); e100_start_receiver(nic, 0); mod_timer(&nic->watchdog, jiffies); if((err = request_irq(nic->pdev->irq, e100_intr, SA_SHIRQ, nic->netdev->name, nic->netdev))) goto err_no_irq; e100_enable_irq(nic); netif_wake_queue(nic->netdev); netif_poll_enable(nic->netdev); /* enable ints _after_ enabling poll, preventing a race between * disable ints+schedule */ e100_enable_irq(nic); return 0; err_no_irq: Loading @@ -1703,11 +1778,13 @@ static int e100_up(struct nic *nic) static void e100_down(struct nic *nic) { /* wait here for poll to complete */ netif_poll_disable(nic->netdev); netif_stop_queue(nic->netdev); e100_hw_reset(nic); free_irq(nic->pdev->irq, nic->netdev); del_timer_sync(&nic->watchdog); netif_carrier_off(nic->netdev); netif_stop_queue(nic->netdev); e100_clean_cbs(nic); e100_rx_clean_list(nic); } Loading @@ -1716,6 +1793,15 @@ static void e100_tx_timeout(struct net_device *netdev) { struct nic *nic = netdev_priv(netdev); /* Reset outside of interrupt context, to avoid request_irq * in interrupt context */ schedule_work(&nic->tx_timeout_task); } static void e100_tx_timeout_task(struct net_device *netdev) { struct nic *nic = netdev_priv(netdev); DPRINTK(TX_ERR, DEBUG, "scb.status=0x%02X\n", readb(&nic->csr->scb.status)); e100_down(netdev_priv(netdev)); Loading Loading @@ -1749,7 +1835,7 @@ static int e100_loopback_test(struct nic *nic, enum loopback loopback_mode) mdio_write(nic->netdev, nic->mii.phy_id, MII_BMCR, BMCR_LOOPBACK); e100_start_receiver(nic); e100_start_receiver(nic, 0); if(!(skb = dev_alloc_skb(ETH_DATA_LEN))) { err = -ENOMEM; Loading Loading @@ -1869,7 +1955,6 @@ static int e100_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) else nic->flags &= ~wol_magic; pci_enable_wake(nic->pdev, 0, nic->flags & (wol_magic | e100_asf(nic))); e100_exec_cb(nic, NULL, e100_configure); return 0; Loading Loading @@ -2223,6 +2308,7 @@ static int __devinit e100_probe(struct pci_dev *pdev, e100_get_defaults(nic); /* locks must be initialized before calling hw_reset */ spin_lock_init(&nic->cb_lock); spin_lock_init(&nic->cmd_lock); Loading @@ -2240,6 +2326,9 @@ static int __devinit e100_probe(struct pci_dev *pdev, nic->blink_timer.function = e100_blink_led; nic->blink_timer.data = (unsigned long)nic; INIT_WORK(&nic->tx_timeout_task, (void (*)(void *))e100_tx_timeout_task, netdev); if((err = e100_alloc(nic))) { DPRINTK(PROBE, ERR, "Cannot alloc driver memory, aborting.\n"); goto err_out_iounmap; Loading @@ -2263,7 +2352,8 @@ static int __devinit e100_probe(struct pci_dev *pdev, (nic->eeprom[eeprom_id] & eeprom_id_wol)) nic->flags |= wol_magic; pci_enable_wake(pdev, 0, nic->flags & (wol_magic | e100_asf(nic))); /* ack any pending wake events, disable PME */ pci_enable_wake(pdev, 0, 0); strcpy(netdev->name, "eth%d"); if((err = register_netdev(netdev))) { Loading Loading @@ -2335,7 +2425,10 @@ static int e100_resume(struct pci_dev *pdev) pci_set_power_state(pdev, PCI_D0); pci_restore_state(pdev); e100_hw_init(nic); /* ack any pending wake events, disable PME */ pci_enable_wake(pdev, 0, 0); if(e100_hw_init(nic)) DPRINTK(HW, ERR, "e100_hw_init failed\n"); netif_device_attach(netdev); if(netif_running(netdev)) Loading @@ -2345,6 +2438,21 @@ static int e100_resume(struct pci_dev *pdev) } #endif static void e100_shutdown(struct device *dev) { struct pci_dev *pdev = container_of(dev, struct pci_dev, dev); struct net_device *netdev = pci_get_drvdata(pdev); struct nic *nic = netdev_priv(netdev); #ifdef CONFIG_PM pci_enable_wake(pdev, 0, nic->flags & (wol_magic | e100_asf(nic))); #else pci_enable_wake(pdev, 0, nic->flags & (wol_magic)); #endif } static struct pci_driver e100_driver = { .name = DRV_NAME, .id_table = e100_id_table, Loading @@ -2354,6 +2462,11 @@ static struct pci_driver e100_driver = { .suspend = e100_suspend, .resume = e100_resume, #endif .driver = { .shutdown = e100_shutdown, } }; static int __init e100_init_module(void) Loading drivers/net/e1000/e1000.h +33 −4 Original line number Diff line number Diff line /******************************************************************************* Copyright(c) 1999 - 2004 Intel Corporation. All rights reserved. Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Loading Loading @@ -112,6 +112,8 @@ struct e1000_adapter; #define E1000_MAX_82544_RXD 4096 /* Supported Rx Buffer Sizes */ #define E1000_RXBUFFER_128 128 /* Used for packet split */ #define E1000_RXBUFFER_256 256 /* Used for packet split */ #define E1000_RXBUFFER_2048 2048 #define E1000_RXBUFFER_4096 4096 #define E1000_RXBUFFER_8192 8192 Loading @@ -138,7 +140,7 @@ struct e1000_adapter; #define E1000_RX_BUFFER_WRITE 16 /* Must be power of 2 */ #define AUTO_ALL_MODES 0 #define E1000_EEPROM_82544_APM 0x0004 #define E1000_EEPROM_82544_APM 0x0400 #define E1000_EEPROM_APME 0x0400 #ifndef E1000_MASTER_SLAVE Loading @@ -146,6 +148,10 @@ struct e1000_adapter; #define E1000_MASTER_SLAVE e1000_ms_hw_default #endif #define E1000_MNG_VLAN_NONE -1 /* Number of packet split data buffers (not including the header buffer) */ #define PS_PAGE_BUFFERS MAX_PS_BUFFERS-1 /* only works for sizes that are powers of 2 */ #define E1000_ROUNDUP(i, size) ((i) = (((i) + (size) - 1) & ~((size) - 1))) Loading @@ -159,6 +165,9 @@ struct e1000_buffer { uint16_t next_to_watch; }; struct e1000_ps_page { struct page *ps_page[MAX_PS_BUFFERS]; }; struct e1000_ps_page_dma { uint64_t ps_page_dma[MAX_PS_BUFFERS]; }; struct e1000_desc_ring { /* pointer to the descriptor ring memory */ void *desc; Loading @@ -174,12 +183,19 @@ struct e1000_desc_ring { unsigned int next_to_clean; /* array of buffer information structs */ struct e1000_buffer *buffer_info; /* arrays of page information for packet split */ struct e1000_ps_page *ps_page; struct e1000_ps_page_dma *ps_page_dma; }; #define E1000_DESC_UNUSED(R) \ ((((R)->next_to_clean > (R)->next_to_use) ? 0 : (R)->count) + \ (R)->next_to_clean - (R)->next_to_use - 1) #define E1000_RX_DESC_PS(R, i) \ (&(((union e1000_rx_desc_packet_split *)((R).desc))[i])) #define E1000_RX_DESC_EXT(R, i) \ (&(((union e1000_rx_desc_extended *)((R).desc))[i])) #define E1000_GET_DESC(R, i, type) (&(((struct type *)((R).desc))[i])) #define E1000_RX_DESC(R, i) E1000_GET_DESC(R, i, e1000_rx_desc) #define E1000_TX_DESC(R, i) E1000_GET_DESC(R, i, e1000_tx_desc) Loading @@ -192,6 +208,7 @@ struct e1000_adapter { struct timer_list watchdog_timer; struct timer_list phy_info_timer; struct vlan_group *vlgrp; uint16_t mng_vlan_id; uint32_t bd_number; uint32_t rx_buffer_len; uint32_t part_num; Loading Loading @@ -228,14 +245,23 @@ struct e1000_adapter { boolean_t detect_tx_hung; /* RX */ #ifdef CONFIG_E1000_NAPI boolean_t (*clean_rx) (struct e1000_adapter *adapter, int *work_done, int work_to_do); #else boolean_t (*clean_rx) (struct e1000_adapter *adapter); #endif void (*alloc_rx_buf) (struct e1000_adapter *adapter); struct e1000_desc_ring rx_ring; uint64_t hw_csum_err; uint64_t hw_csum_good; uint32_t rx_int_delay; uint32_t rx_abs_int_delay; boolean_t rx_csum; boolean_t rx_ps; uint32_t gorcl; uint64_t gorcl_old; uint16_t rx_ps_bsize0; /* Interrupt Throttle Rate */ uint32_t itr; Loading @@ -257,5 +283,8 @@ struct e1000_adapter { int msg_enable; #ifdef CONFIG_PCI_MSI boolean_t have_msi; #endif }; #endif /* _E1000_H_ */ drivers/net/e1000/e1000_ethtool.c +69 −36 Original line number Diff line number Diff line /******************************************************************************* Copyright(c) 1999 - 2004 Intel Corporation. All rights reserved. Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Loading Loading @@ -69,6 +69,7 @@ static const struct e1000_stats e1000_gstrings_stats[] = { { "rx_crc_errors", E1000_STAT(net_stats.rx_crc_errors) }, { "rx_frame_errors", E1000_STAT(net_stats.rx_frame_errors) }, { "rx_fifo_errors", E1000_STAT(net_stats.rx_fifo_errors) }, { "rx_no_buffer_count", E1000_STAT(stats.rnbc) }, { "rx_missed_errors", E1000_STAT(net_stats.rx_missed_errors) }, { "tx_aborted_errors", E1000_STAT(net_stats.tx_aborted_errors) }, { "tx_carrier_errors", E1000_STAT(net_stats.tx_carrier_errors) }, Loading Loading @@ -842,10 +843,8 @@ e1000_intr_test(struct e1000_adapter *adapter, uint64_t *data) * test failed. */ adapter->test_icr = 0; E1000_WRITE_REG(&adapter->hw, IMC, (~mask & 0x00007FFF)); E1000_WRITE_REG(&adapter->hw, ICS, (~mask & 0x00007FFF)); E1000_WRITE_REG(&adapter->hw, IMC, ~mask & 0x00007FFF); E1000_WRITE_REG(&adapter->hw, ICS, ~mask & 0x00007FFF); msec_delay(10); if(adapter->test_icr) { Loading Loading @@ -919,7 +918,8 @@ e1000_setup_desc_rings(struct e1000_adapter *adapter) /* Setup Tx descriptor ring and Tx buffers */ txdr->count = 80; if(!txdr->count) txdr->count = E1000_DEFAULT_TXD; size = txdr->count * sizeof(struct e1000_buffer); if(!(txdr->buffer_info = kmalloc(size, GFP_KERNEL))) { Loading Loading @@ -974,7 +974,8 @@ e1000_setup_desc_rings(struct e1000_adapter *adapter) /* Setup Rx descriptor ring and Rx buffers */ rxdr->count = 80; if(!rxdr->count) rxdr->count = E1000_DEFAULT_RXD; size = rxdr->count * sizeof(struct e1000_buffer); if(!(rxdr->buffer_info = kmalloc(size, GFP_KERNEL))) { Loading Loading @@ -1310,31 +1311,62 @@ e1000_run_loopback_test(struct e1000_adapter *adapter) struct e1000_desc_ring *txdr = &adapter->test_tx_ring; struct e1000_desc_ring *rxdr = &adapter->test_rx_ring; struct pci_dev *pdev = adapter->pdev; int i, ret_val; int i, j, k, l, lc, good_cnt, ret_val=0; unsigned long time; E1000_WRITE_REG(&adapter->hw, RDT, rxdr->count - 1); for(i = 0; i < 64; i++) { e1000_create_lbtest_frame(txdr->buffer_info[i].skb, 1024); pci_dma_sync_single_for_device(pdev, txdr->buffer_info[i].dma, txdr->buffer_info[i].length, /* Calculate the loop count based on the largest descriptor ring * The idea is to wrap the largest ring a number of times using 64 * send/receive pairs during each loop */ if(rxdr->count <= txdr->count) lc = ((txdr->count / 64) * 2) + 1; else lc = ((rxdr->count / 64) * 2) + 1; k = l = 0; for(j = 0; j <= lc; j++) { /* loop count loop */ for(i = 0; i < 64; i++) { /* send the packets */ e1000_create_lbtest_frame(txdr->buffer_info[i].skb, 1024); pci_dma_sync_single_for_device(pdev, txdr->buffer_info[k].dma, txdr->buffer_info[k].length, PCI_DMA_TODEVICE); if(unlikely(++k == txdr->count)) k = 0; } E1000_WRITE_REG(&adapter->hw, TDT, i); E1000_WRITE_REG(&adapter->hw, TDT, k); msec_delay(200); i = 0; do { pci_dma_sync_single_for_cpu(pdev, rxdr->buffer_info[i].dma, rxdr->buffer_info[i].length, time = jiffies; /* set the start time for the receive */ good_cnt = 0; do { /* receive the sent packets */ pci_dma_sync_single_for_cpu(pdev, rxdr->buffer_info[l].dma, rxdr->buffer_info[l].length, PCI_DMA_FROMDEVICE); ret_val = e1000_check_lbtest_frame(rxdr->buffer_info[i].skb, ret_val = e1000_check_lbtest_frame( rxdr->buffer_info[l].skb, 1024); i++; } while (ret_val != 0 && i < 64); if(!ret_val) good_cnt++; if(unlikely(++l == rxdr->count)) l = 0; /* time + 20 msecs (200 msecs on 2.4) is more than * enough time to complete the receives, if it's * exceeded, break and error off */ } while (good_cnt < 64 && jiffies < (time + 20)); if(good_cnt != 64) { ret_val = 13; /* ret_val is the same as mis-compare */ break; } if(jiffies >= (time + 2)) { ret_val = 14; /* error code for time out error */ break; } } /* end loop count loop */ return ret_val; } Loading @@ -1354,13 +1386,12 @@ static int e1000_link_test(struct e1000_adapter *adapter, uint64_t *data) { *data = 0; if (adapter->hw.media_type == e1000_media_type_internal_serdes) { int i = 0; adapter->hw.serdes_link_down = TRUE; /* on some blade server designs link establishment */ /* could take as long as 2-3 minutes. */ /* On some blade server designs, link establishment * could take as long as 2-3 minutes */ do { e1000_check_for_link(&adapter->hw); if (adapter->hw.serdes_link_down == FALSE) Loading @@ -1371,6 +1402,8 @@ e1000_link_test(struct e1000_adapter *adapter, uint64_t *data) *data = 1; } else { e1000_check_for_link(&adapter->hw); if(adapter->hw.autoneg) /* if auto_neg is set wait for it */ msec_delay(4000); if(!(E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_LU)) { *data = 1; Loading Loading
drivers/net/amd8111e.c +14 −10 Original line number Diff line number Diff line Loading @@ -738,6 +738,7 @@ static int amd8111e_rx_poll(struct net_device *dev, int * budget) short vtag; #endif int rx_pkt_limit = dev->quota; unsigned long flags; do{ /* process receive packets until we use the quota*/ Loading Loading @@ -841,18 +842,19 @@ static int amd8111e_rx_poll(struct net_device *dev, int * budget) /* Receive descriptor is empty now */ dev->quota -= num_rx_pkt; *budget -= num_rx_pkt; spin_lock_irqsave(&lp->lock, flags); netif_rx_complete(dev); /* enable receive interrupt */ writel(VAL0|RINTEN0, mmio + INTEN0); writel(VAL2 | RDMD0, mmio + CMD0); spin_unlock_irqrestore(&lp->lock, flags); return 0; rx_not_empty: /* Do not call a netif_rx_complete */ dev->quota -= num_rx_pkt; *budget -= num_rx_pkt; return 1; } #else Loading Loading @@ -1261,18 +1263,20 @@ static irqreturn_t amd8111e_interrupt(int irq, void *dev_id, struct pt_regs *reg struct net_device * dev = (struct net_device *) dev_id; struct amd8111e_priv *lp = netdev_priv(dev); void __iomem *mmio = lp->mmio; unsigned int intr0; unsigned int intr0, intren0; unsigned int handled = 1; if(dev == NULL) if(unlikely(dev == NULL)) return IRQ_NONE; if (regs) spin_lock (&lp->lock); spin_lock(&lp->lock); /* disabling interrupt */ writel(INTREN, mmio + CMD0); /* Read interrupt status */ intr0 = readl(mmio + INT0); intren0 = readl(mmio + INTEN0); /* Process all the INT event until INTR bit is clear. */ Loading @@ -1293,11 +1297,11 @@ static irqreturn_t amd8111e_interrupt(int irq, void *dev_id, struct pt_regs *reg /* Schedule a polling routine */ __netif_rx_schedule(dev); } else { else if (intren0 & RINTEN0) { printk("************Driver bug! \ interrupt while in poll\n"); /* Fix by disabling interrupts */ writel(RINT0, mmio + INT0); /* Fix by disable receive interrupts */ writel(RINTEN0, mmio + INTEN0); } } #else Loading @@ -1321,7 +1325,7 @@ static irqreturn_t amd8111e_interrupt(int irq, void *dev_id, struct pt_regs *reg err_no_interrupt: writel( VAL0 | INTREN,mmio + CMD0); if (regs) spin_unlock(&lp->lock); spin_unlock(&lp->lock); return IRQ_RETVAL(handled); } Loading
drivers/net/e100.c +139 −26 Original line number Diff line number Diff line Loading @@ -155,9 +155,9 @@ #define DRV_NAME "e100" #define DRV_EXT "-NAPI" #define DRV_VERSION "3.3.6-k2"DRV_EXT #define DRV_VERSION "3.4.8-k2"DRV_EXT #define DRV_DESCRIPTION "Intel(R) PRO/100 Network Driver" #define DRV_COPYRIGHT "Copyright(c) 1999-2004 Intel Corporation" #define DRV_COPYRIGHT "Copyright(c) 1999-2005 Intel Corporation" #define PFX DRV_NAME ": " #define E100_WATCHDOG_PERIOD (2 * HZ) Loading Loading @@ -210,11 +210,17 @@ static struct pci_device_id e100_id_table[] = { INTEL_8255X_ETHERNET_DEVICE(0x1069, 6), INTEL_8255X_ETHERNET_DEVICE(0x106A, 6), INTEL_8255X_ETHERNET_DEVICE(0x106B, 6), INTEL_8255X_ETHERNET_DEVICE(0x1091, 7), INTEL_8255X_ETHERNET_DEVICE(0x1092, 7), INTEL_8255X_ETHERNET_DEVICE(0x1093, 7), INTEL_8255X_ETHERNET_DEVICE(0x1094, 7), INTEL_8255X_ETHERNET_DEVICE(0x1095, 7), INTEL_8255X_ETHERNET_DEVICE(0x1209, 0), INTEL_8255X_ETHERNET_DEVICE(0x1229, 0), INTEL_8255X_ETHERNET_DEVICE(0x2449, 2), INTEL_8255X_ETHERNET_DEVICE(0x2459, 2), INTEL_8255X_ETHERNET_DEVICE(0x245D, 2), INTEL_8255X_ETHERNET_DEVICE(0x27DC, 7), { 0, } }; MODULE_DEVICE_TABLE(pci, e100_id_table); Loading Loading @@ -269,6 +275,12 @@ enum scb_status { rus_mask = 0x3C, }; enum ru_state { RU_SUSPENDED = 0, RU_RUNNING = 1, RU_UNINITIALIZED = -1, }; enum scb_stat_ack { stat_ack_not_ours = 0x00, stat_ack_sw_gen = 0x04, Loading Loading @@ -510,7 +522,7 @@ struct nic { struct rx *rx_to_use; struct rx *rx_to_clean; struct rfd blank_rfd; int ru_running; enum ru_state ru_running; spinlock_t cb_lock ____cacheline_aligned; spinlock_t cmd_lock; Loading Loading @@ -539,6 +551,7 @@ struct nic { struct timer_list watchdog; struct timer_list blink_timer; struct mii_if_info mii; struct work_struct tx_timeout_task; enum loopback loopback; struct mem *mem; Loading Loading @@ -770,7 +783,7 @@ static int e100_eeprom_save(struct nic *nic, u16 start, u16 count) return 0; } #define E100_WAIT_SCB_TIMEOUT 40 #define E100_WAIT_SCB_TIMEOUT 20000 /* we might have to wait 100ms!!! */ static inline int e100_exec_cmd(struct nic *nic, u8 cmd, dma_addr_t dma_addr) { unsigned long flags; Loading Loading @@ -840,6 +853,10 @@ static inline int e100_exec_cb(struct nic *nic, struct sk_buff *skb, * because the controller is too busy, so * let's just queue the command and try again * when another command is scheduled. */ if(err == -ENOSPC) { //request a reset schedule_work(&nic->tx_timeout_task); } break; } else { nic->cuc_cmd = cuc_resume; Loading Loading @@ -884,7 +901,7 @@ static void mdio_write(struct net_device *netdev, int addr, int reg, int data) static void e100_get_defaults(struct nic *nic) { struct param_range rfds = { .min = 64, .max = 256, .count = 64 }; struct param_range rfds = { .min = 16, .max = 256, .count = 64 }; struct param_range cbs = { .min = 64, .max = 256, .count = 64 }; pci_read_config_byte(nic->pdev, PCI_REVISION_ID, &nic->rev_id); Loading @@ -899,8 +916,9 @@ static void e100_get_defaults(struct nic *nic) /* Quadwords to DMA into FIFO before starting frame transmit */ nic->tx_threshold = 0xE0; nic->tx_command = cpu_to_le16(cb_tx | cb_i | cb_tx_sf | ((nic->mac >= mac_82558_D101_A4) ? cb_cid : 0)); /* no interrupt for every tx completion, delay = 256us if not 557*/ nic->tx_command = cpu_to_le16(cb_tx | cb_tx_sf | ((nic->mac >= mac_82558_D101_A4) ? cb_cid : cb_i)); /* Template for a freshly allocated RFD */ nic->blank_rfd.command = cpu_to_le16(cb_el); Loading Loading @@ -964,7 +982,8 @@ static void e100_configure(struct nic *nic, struct cb *cb, struct sk_buff *skb) if(nic->flags & multicast_all) config->multicast_all = 0x1; /* 1=accept, 0=no */ if(!(nic->flags & wol_magic)) /* disable WoL when up */ if(netif_running(nic->netdev) || !(nic->flags & wol_magic)) config->magic_packet_disable = 0x1; /* 1=off, 0=on */ if(nic->mac >= mac_82558_D101_A4) { Loading Loading @@ -1203,7 +1222,9 @@ static void e100_update_stats(struct nic *nic) } } e100_exec_cmd(nic, cuc_dump_reset, 0); if(e100_exec_cmd(nic, cuc_dump_reset, 0)) DPRINTK(TX_ERR, DEBUG, "exec cuc_dump_reset failed\n"); } static void e100_adjust_adaptive_ifs(struct nic *nic, int speed, int duplex) Loading Loading @@ -1279,12 +1300,15 @@ static inline void e100_xmit_prepare(struct nic *nic, struct cb *cb, struct sk_buff *skb) { cb->command = nic->tx_command; /* interrupt every 16 packets regardless of delay */ if((nic->cbs_avail & ~15) == nic->cbs_avail) cb->command |= cb_i; cb->u.tcb.tbd_array = cb->dma_addr + offsetof(struct cb, u.tcb.tbd); cb->u.tcb.tcb_byte_count = 0; cb->u.tcb.threshold = nic->tx_threshold; cb->u.tcb.tbd_count = 1; cb->u.tcb.tbd.buf_addr = cpu_to_le32(pci_map_single(nic->pdev, skb->data, skb->len, PCI_DMA_TODEVICE)); // check for mapping failure? cb->u.tcb.tbd.size = cpu_to_le16(skb->len); } Loading @@ -1297,7 +1321,8 @@ static int e100_xmit_frame(struct sk_buff *skb, struct net_device *netdev) /* SW workaround for ICH[x] 10Mbps/half duplex Tx hang. Issue a NOP command followed by a 1us delay before issuing the Tx command. */ e100_exec_cmd(nic, cuc_nop, 0); if(e100_exec_cmd(nic, cuc_nop, 0)) DPRINTK(TX_ERR, DEBUG, "exec cuc_nop failed\n"); udelay(1); } Loading Loading @@ -1415,12 +1440,18 @@ static int e100_alloc_cbs(struct nic *nic) return 0; } static inline void e100_start_receiver(struct nic *nic) static inline void e100_start_receiver(struct nic *nic, struct rx *rx) { if(!nic->rxs) return; if(RU_SUSPENDED != nic->ru_running) return; /* handle init time starts */ if(!rx) rx = nic->rxs; /* (Re)start RU if suspended or idle and RFA is non-NULL */ if(!nic->ru_running && nic->rx_to_clean->skb) { e100_exec_cmd(nic, ruc_start, nic->rx_to_clean->dma_addr); nic->ru_running = 1; if(rx->skb) { e100_exec_cmd(nic, ruc_start, rx->dma_addr); nic->ru_running = RU_RUNNING; } } Loading @@ -1437,6 +1468,13 @@ static inline int e100_rx_alloc_skb(struct nic *nic, struct rx *rx) rx->dma_addr = pci_map_single(nic->pdev, rx->skb->data, RFD_BUF_LEN, PCI_DMA_BIDIRECTIONAL); if(pci_dma_mapping_error(rx->dma_addr)) { dev_kfree_skb_any(rx->skb); rx->skb = 0; rx->dma_addr = 0; return -ENOMEM; } /* Link the RFD to end of RFA by linking previous RFD to * this one, and clearing EL bit of previous. */ if(rx->prev->skb) { Loading Loading @@ -1471,7 +1509,7 @@ static inline int e100_rx_indicate(struct nic *nic, struct rx *rx, /* If data isn't ready, nothing to indicate */ if(unlikely(!(rfd_status & cb_complete))) return -EAGAIN; return -ENODATA; /* Get actual data size */ actual_size = le16_to_cpu(rfd->actual_size) & 0x3FFF; Loading @@ -1482,6 +1520,10 @@ static inline int e100_rx_indicate(struct nic *nic, struct rx *rx, pci_unmap_single(nic->pdev, rx->dma_addr, RFD_BUF_LEN, PCI_DMA_FROMDEVICE); /* this allows for a fast restart without re-enabling interrupts */ if(le16_to_cpu(rfd->command) & cb_el) nic->ru_running = RU_SUSPENDED; /* Pull off the RFD and put the actual data (minus eth hdr) */ skb_reserve(skb, sizeof(struct rfd)); skb_put(skb, actual_size); Loading Loading @@ -1514,20 +1556,45 @@ static inline void e100_rx_clean(struct nic *nic, unsigned int *work_done, unsigned int work_to_do) { struct rx *rx; int restart_required = 0; struct rx *rx_to_start = NULL; /* are we already rnr? then pay attention!!! this ensures that * the state machine progression never allows a start with a * partially cleaned list, avoiding a race between hardware * and rx_to_clean when in NAPI mode */ if(RU_SUSPENDED == nic->ru_running) restart_required = 1; /* Indicate newly arrived packets */ for(rx = nic->rx_to_clean; rx->skb; rx = nic->rx_to_clean = rx->next) { if(e100_rx_indicate(nic, rx, work_done, work_to_do)) int err = e100_rx_indicate(nic, rx, work_done, work_to_do); if(-EAGAIN == err) { /* hit quota so have more work to do, restart once * cleanup is complete */ restart_required = 0; break; } else if(-ENODATA == err) break; /* No more to clean */ } /* save our starting point as the place we'll restart the receiver */ if(restart_required) rx_to_start = nic->rx_to_clean; /* Alloc new skbs to refill list */ for(rx = nic->rx_to_use; !rx->skb; rx = nic->rx_to_use = rx->next) { if(unlikely(e100_rx_alloc_skb(nic, rx))) break; /* Better luck next time (see watchdog) */ } e100_start_receiver(nic); if(restart_required) { // ack the rnr? writeb(stat_ack_rnr, &nic->csr->scb.stat_ack); e100_start_receiver(nic, rx_to_start); if(work_done) (*work_done)++; } } static void e100_rx_clean_list(struct nic *nic) Loading @@ -1535,6 +1602,8 @@ static void e100_rx_clean_list(struct nic *nic) struct rx *rx; unsigned int i, count = nic->params.rfds.count; nic->ru_running = RU_UNINITIALIZED; if(nic->rxs) { for(rx = nic->rxs, i = 0; i < count; rx++, i++) { if(rx->skb) { Loading @@ -1548,7 +1617,6 @@ static void e100_rx_clean_list(struct nic *nic) } nic->rx_to_use = nic->rx_to_clean = NULL; nic->ru_running = 0; } static int e100_rx_alloc_list(struct nic *nic) Loading @@ -1557,6 +1625,7 @@ static int e100_rx_alloc_list(struct nic *nic) unsigned int i, count = nic->params.rfds.count; nic->rx_to_use = nic->rx_to_clean = NULL; nic->ru_running = RU_UNINITIALIZED; if(!(nic->rxs = kmalloc(sizeof(struct rx) * count, GFP_ATOMIC))) return -ENOMEM; Loading @@ -1572,6 +1641,7 @@ static int e100_rx_alloc_list(struct nic *nic) } nic->rx_to_use = nic->rx_to_clean = nic->rxs; nic->ru_running = RU_SUSPENDED; return 0; } Loading @@ -1593,7 +1663,7 @@ static irqreturn_t e100_intr(int irq, void *dev_id, struct pt_regs *regs) /* We hit Receive No Resource (RNR); restart RU after cleaning */ if(stat_ack & stat_ack_rnr) nic->ru_running = 0; nic->ru_running = RU_SUSPENDED; e100_disable_irq(nic); netif_rx_schedule(netdev); Loading Loading @@ -1663,6 +1733,7 @@ static int e100_change_mtu(struct net_device *netdev, int new_mtu) return 0; } #ifdef CONFIG_PM static int e100_asf(struct nic *nic) { /* ASF can be enabled from eeprom */ Loading @@ -1671,6 +1742,7 @@ static int e100_asf(struct nic *nic) !(nic->eeprom[eeprom_config_asf] & eeprom_gcl) && ((nic->eeprom[eeprom_smbus_addr] & 0xFF) != 0xFE)); } #endif static int e100_up(struct nic *nic) { Loading @@ -1683,13 +1755,16 @@ static int e100_up(struct nic *nic) if((err = e100_hw_init(nic))) goto err_clean_cbs; e100_set_multicast_list(nic->netdev); e100_start_receiver(nic); e100_start_receiver(nic, 0); mod_timer(&nic->watchdog, jiffies); if((err = request_irq(nic->pdev->irq, e100_intr, SA_SHIRQ, nic->netdev->name, nic->netdev))) goto err_no_irq; e100_enable_irq(nic); netif_wake_queue(nic->netdev); netif_poll_enable(nic->netdev); /* enable ints _after_ enabling poll, preventing a race between * disable ints+schedule */ e100_enable_irq(nic); return 0; err_no_irq: Loading @@ -1703,11 +1778,13 @@ static int e100_up(struct nic *nic) static void e100_down(struct nic *nic) { /* wait here for poll to complete */ netif_poll_disable(nic->netdev); netif_stop_queue(nic->netdev); e100_hw_reset(nic); free_irq(nic->pdev->irq, nic->netdev); del_timer_sync(&nic->watchdog); netif_carrier_off(nic->netdev); netif_stop_queue(nic->netdev); e100_clean_cbs(nic); e100_rx_clean_list(nic); } Loading @@ -1716,6 +1793,15 @@ static void e100_tx_timeout(struct net_device *netdev) { struct nic *nic = netdev_priv(netdev); /* Reset outside of interrupt context, to avoid request_irq * in interrupt context */ schedule_work(&nic->tx_timeout_task); } static void e100_tx_timeout_task(struct net_device *netdev) { struct nic *nic = netdev_priv(netdev); DPRINTK(TX_ERR, DEBUG, "scb.status=0x%02X\n", readb(&nic->csr->scb.status)); e100_down(netdev_priv(netdev)); Loading Loading @@ -1749,7 +1835,7 @@ static int e100_loopback_test(struct nic *nic, enum loopback loopback_mode) mdio_write(nic->netdev, nic->mii.phy_id, MII_BMCR, BMCR_LOOPBACK); e100_start_receiver(nic); e100_start_receiver(nic, 0); if(!(skb = dev_alloc_skb(ETH_DATA_LEN))) { err = -ENOMEM; Loading Loading @@ -1869,7 +1955,6 @@ static int e100_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) else nic->flags &= ~wol_magic; pci_enable_wake(nic->pdev, 0, nic->flags & (wol_magic | e100_asf(nic))); e100_exec_cb(nic, NULL, e100_configure); return 0; Loading Loading @@ -2223,6 +2308,7 @@ static int __devinit e100_probe(struct pci_dev *pdev, e100_get_defaults(nic); /* locks must be initialized before calling hw_reset */ spin_lock_init(&nic->cb_lock); spin_lock_init(&nic->cmd_lock); Loading @@ -2240,6 +2326,9 @@ static int __devinit e100_probe(struct pci_dev *pdev, nic->blink_timer.function = e100_blink_led; nic->blink_timer.data = (unsigned long)nic; INIT_WORK(&nic->tx_timeout_task, (void (*)(void *))e100_tx_timeout_task, netdev); if((err = e100_alloc(nic))) { DPRINTK(PROBE, ERR, "Cannot alloc driver memory, aborting.\n"); goto err_out_iounmap; Loading @@ -2263,7 +2352,8 @@ static int __devinit e100_probe(struct pci_dev *pdev, (nic->eeprom[eeprom_id] & eeprom_id_wol)) nic->flags |= wol_magic; pci_enable_wake(pdev, 0, nic->flags & (wol_magic | e100_asf(nic))); /* ack any pending wake events, disable PME */ pci_enable_wake(pdev, 0, 0); strcpy(netdev->name, "eth%d"); if((err = register_netdev(netdev))) { Loading Loading @@ -2335,7 +2425,10 @@ static int e100_resume(struct pci_dev *pdev) pci_set_power_state(pdev, PCI_D0); pci_restore_state(pdev); e100_hw_init(nic); /* ack any pending wake events, disable PME */ pci_enable_wake(pdev, 0, 0); if(e100_hw_init(nic)) DPRINTK(HW, ERR, "e100_hw_init failed\n"); netif_device_attach(netdev); if(netif_running(netdev)) Loading @@ -2345,6 +2438,21 @@ static int e100_resume(struct pci_dev *pdev) } #endif static void e100_shutdown(struct device *dev) { struct pci_dev *pdev = container_of(dev, struct pci_dev, dev); struct net_device *netdev = pci_get_drvdata(pdev); struct nic *nic = netdev_priv(netdev); #ifdef CONFIG_PM pci_enable_wake(pdev, 0, nic->flags & (wol_magic | e100_asf(nic))); #else pci_enable_wake(pdev, 0, nic->flags & (wol_magic)); #endif } static struct pci_driver e100_driver = { .name = DRV_NAME, .id_table = e100_id_table, Loading @@ -2354,6 +2462,11 @@ static struct pci_driver e100_driver = { .suspend = e100_suspend, .resume = e100_resume, #endif .driver = { .shutdown = e100_shutdown, } }; static int __init e100_init_module(void) Loading
drivers/net/e1000/e1000.h +33 −4 Original line number Diff line number Diff line /******************************************************************************* Copyright(c) 1999 - 2004 Intel Corporation. All rights reserved. Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Loading Loading @@ -112,6 +112,8 @@ struct e1000_adapter; #define E1000_MAX_82544_RXD 4096 /* Supported Rx Buffer Sizes */ #define E1000_RXBUFFER_128 128 /* Used for packet split */ #define E1000_RXBUFFER_256 256 /* Used for packet split */ #define E1000_RXBUFFER_2048 2048 #define E1000_RXBUFFER_4096 4096 #define E1000_RXBUFFER_8192 8192 Loading @@ -138,7 +140,7 @@ struct e1000_adapter; #define E1000_RX_BUFFER_WRITE 16 /* Must be power of 2 */ #define AUTO_ALL_MODES 0 #define E1000_EEPROM_82544_APM 0x0004 #define E1000_EEPROM_82544_APM 0x0400 #define E1000_EEPROM_APME 0x0400 #ifndef E1000_MASTER_SLAVE Loading @@ -146,6 +148,10 @@ struct e1000_adapter; #define E1000_MASTER_SLAVE e1000_ms_hw_default #endif #define E1000_MNG_VLAN_NONE -1 /* Number of packet split data buffers (not including the header buffer) */ #define PS_PAGE_BUFFERS MAX_PS_BUFFERS-1 /* only works for sizes that are powers of 2 */ #define E1000_ROUNDUP(i, size) ((i) = (((i) + (size) - 1) & ~((size) - 1))) Loading @@ -159,6 +165,9 @@ struct e1000_buffer { uint16_t next_to_watch; }; struct e1000_ps_page { struct page *ps_page[MAX_PS_BUFFERS]; }; struct e1000_ps_page_dma { uint64_t ps_page_dma[MAX_PS_BUFFERS]; }; struct e1000_desc_ring { /* pointer to the descriptor ring memory */ void *desc; Loading @@ -174,12 +183,19 @@ struct e1000_desc_ring { unsigned int next_to_clean; /* array of buffer information structs */ struct e1000_buffer *buffer_info; /* arrays of page information for packet split */ struct e1000_ps_page *ps_page; struct e1000_ps_page_dma *ps_page_dma; }; #define E1000_DESC_UNUSED(R) \ ((((R)->next_to_clean > (R)->next_to_use) ? 0 : (R)->count) + \ (R)->next_to_clean - (R)->next_to_use - 1) #define E1000_RX_DESC_PS(R, i) \ (&(((union e1000_rx_desc_packet_split *)((R).desc))[i])) #define E1000_RX_DESC_EXT(R, i) \ (&(((union e1000_rx_desc_extended *)((R).desc))[i])) #define E1000_GET_DESC(R, i, type) (&(((struct type *)((R).desc))[i])) #define E1000_RX_DESC(R, i) E1000_GET_DESC(R, i, e1000_rx_desc) #define E1000_TX_DESC(R, i) E1000_GET_DESC(R, i, e1000_tx_desc) Loading @@ -192,6 +208,7 @@ struct e1000_adapter { struct timer_list watchdog_timer; struct timer_list phy_info_timer; struct vlan_group *vlgrp; uint16_t mng_vlan_id; uint32_t bd_number; uint32_t rx_buffer_len; uint32_t part_num; Loading Loading @@ -228,14 +245,23 @@ struct e1000_adapter { boolean_t detect_tx_hung; /* RX */ #ifdef CONFIG_E1000_NAPI boolean_t (*clean_rx) (struct e1000_adapter *adapter, int *work_done, int work_to_do); #else boolean_t (*clean_rx) (struct e1000_adapter *adapter); #endif void (*alloc_rx_buf) (struct e1000_adapter *adapter); struct e1000_desc_ring rx_ring; uint64_t hw_csum_err; uint64_t hw_csum_good; uint32_t rx_int_delay; uint32_t rx_abs_int_delay; boolean_t rx_csum; boolean_t rx_ps; uint32_t gorcl; uint64_t gorcl_old; uint16_t rx_ps_bsize0; /* Interrupt Throttle Rate */ uint32_t itr; Loading @@ -257,5 +283,8 @@ struct e1000_adapter { int msg_enable; #ifdef CONFIG_PCI_MSI boolean_t have_msi; #endif }; #endif /* _E1000_H_ */
drivers/net/e1000/e1000_ethtool.c +69 −36 Original line number Diff line number Diff line /******************************************************************************* Copyright(c) 1999 - 2004 Intel Corporation. All rights reserved. Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Loading Loading @@ -69,6 +69,7 @@ static const struct e1000_stats e1000_gstrings_stats[] = { { "rx_crc_errors", E1000_STAT(net_stats.rx_crc_errors) }, { "rx_frame_errors", E1000_STAT(net_stats.rx_frame_errors) }, { "rx_fifo_errors", E1000_STAT(net_stats.rx_fifo_errors) }, { "rx_no_buffer_count", E1000_STAT(stats.rnbc) }, { "rx_missed_errors", E1000_STAT(net_stats.rx_missed_errors) }, { "tx_aborted_errors", E1000_STAT(net_stats.tx_aborted_errors) }, { "tx_carrier_errors", E1000_STAT(net_stats.tx_carrier_errors) }, Loading Loading @@ -842,10 +843,8 @@ e1000_intr_test(struct e1000_adapter *adapter, uint64_t *data) * test failed. */ adapter->test_icr = 0; E1000_WRITE_REG(&adapter->hw, IMC, (~mask & 0x00007FFF)); E1000_WRITE_REG(&adapter->hw, ICS, (~mask & 0x00007FFF)); E1000_WRITE_REG(&adapter->hw, IMC, ~mask & 0x00007FFF); E1000_WRITE_REG(&adapter->hw, ICS, ~mask & 0x00007FFF); msec_delay(10); if(adapter->test_icr) { Loading Loading @@ -919,7 +918,8 @@ e1000_setup_desc_rings(struct e1000_adapter *adapter) /* Setup Tx descriptor ring and Tx buffers */ txdr->count = 80; if(!txdr->count) txdr->count = E1000_DEFAULT_TXD; size = txdr->count * sizeof(struct e1000_buffer); if(!(txdr->buffer_info = kmalloc(size, GFP_KERNEL))) { Loading Loading @@ -974,7 +974,8 @@ e1000_setup_desc_rings(struct e1000_adapter *adapter) /* Setup Rx descriptor ring and Rx buffers */ rxdr->count = 80; if(!rxdr->count) rxdr->count = E1000_DEFAULT_RXD; size = rxdr->count * sizeof(struct e1000_buffer); if(!(rxdr->buffer_info = kmalloc(size, GFP_KERNEL))) { Loading Loading @@ -1310,31 +1311,62 @@ e1000_run_loopback_test(struct e1000_adapter *adapter) struct e1000_desc_ring *txdr = &adapter->test_tx_ring; struct e1000_desc_ring *rxdr = &adapter->test_rx_ring; struct pci_dev *pdev = adapter->pdev; int i, ret_val; int i, j, k, l, lc, good_cnt, ret_val=0; unsigned long time; E1000_WRITE_REG(&adapter->hw, RDT, rxdr->count - 1); for(i = 0; i < 64; i++) { e1000_create_lbtest_frame(txdr->buffer_info[i].skb, 1024); pci_dma_sync_single_for_device(pdev, txdr->buffer_info[i].dma, txdr->buffer_info[i].length, /* Calculate the loop count based on the largest descriptor ring * The idea is to wrap the largest ring a number of times using 64 * send/receive pairs during each loop */ if(rxdr->count <= txdr->count) lc = ((txdr->count / 64) * 2) + 1; else lc = ((rxdr->count / 64) * 2) + 1; k = l = 0; for(j = 0; j <= lc; j++) { /* loop count loop */ for(i = 0; i < 64; i++) { /* send the packets */ e1000_create_lbtest_frame(txdr->buffer_info[i].skb, 1024); pci_dma_sync_single_for_device(pdev, txdr->buffer_info[k].dma, txdr->buffer_info[k].length, PCI_DMA_TODEVICE); if(unlikely(++k == txdr->count)) k = 0; } E1000_WRITE_REG(&adapter->hw, TDT, i); E1000_WRITE_REG(&adapter->hw, TDT, k); msec_delay(200); i = 0; do { pci_dma_sync_single_for_cpu(pdev, rxdr->buffer_info[i].dma, rxdr->buffer_info[i].length, time = jiffies; /* set the start time for the receive */ good_cnt = 0; do { /* receive the sent packets */ pci_dma_sync_single_for_cpu(pdev, rxdr->buffer_info[l].dma, rxdr->buffer_info[l].length, PCI_DMA_FROMDEVICE); ret_val = e1000_check_lbtest_frame(rxdr->buffer_info[i].skb, ret_val = e1000_check_lbtest_frame( rxdr->buffer_info[l].skb, 1024); i++; } while (ret_val != 0 && i < 64); if(!ret_val) good_cnt++; if(unlikely(++l == rxdr->count)) l = 0; /* time + 20 msecs (200 msecs on 2.4) is more than * enough time to complete the receives, if it's * exceeded, break and error off */ } while (good_cnt < 64 && jiffies < (time + 20)); if(good_cnt != 64) { ret_val = 13; /* ret_val is the same as mis-compare */ break; } if(jiffies >= (time + 2)) { ret_val = 14; /* error code for time out error */ break; } } /* end loop count loop */ return ret_val; } Loading @@ -1354,13 +1386,12 @@ static int e1000_link_test(struct e1000_adapter *adapter, uint64_t *data) { *data = 0; if (adapter->hw.media_type == e1000_media_type_internal_serdes) { int i = 0; adapter->hw.serdes_link_down = TRUE; /* on some blade server designs link establishment */ /* could take as long as 2-3 minutes. */ /* On some blade server designs, link establishment * could take as long as 2-3 minutes */ do { e1000_check_for_link(&adapter->hw); if (adapter->hw.serdes_link_down == FALSE) Loading @@ -1371,6 +1402,8 @@ e1000_link_test(struct e1000_adapter *adapter, uint64_t *data) *data = 1; } else { e1000_check_for_link(&adapter->hw); if(adapter->hw.autoneg) /* if auto_neg is set wait for it */ msec_delay(4000); if(!(E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_LU)) { *data = 1; Loading