Loading drivers/net/tg3.c +59 −28 Original line number Diff line number Diff line Loading @@ -3532,9 +3532,23 @@ static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len) (base + len + 8 < base)); } /* Test for DMA addresses > 40-bit */ static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping, int len) { #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64) if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) return (((u64) mapping + len) > DMA_40BIT_MASK); return 0; #else return 0; #endif } static void tg3_set_txd(struct tg3 *, int, dma_addr_t, int, u32, u32); static int tigon3_4gb_hwbug_workaround(struct tg3 *tp, struct sk_buff *skb, /* Workaround 4GB and 40-bit hardware DMA bugs. */ static int tigon3_dma_hwbug_workaround(struct tg3 *tp, struct sk_buff *skb, u32 last_plus_one, u32 *start, u32 base_flags, u32 mss) { Loading Loading @@ -3742,6 +3756,9 @@ static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev) if (tg3_4g_overflow_test(mapping, len)) would_hit_hwbug = 1; if (tg3_40bit_overflow_test(tp, mapping, len)) would_hit_hwbug = 1; if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) tg3_set_txd(tp, entry, mapping, len, base_flags, (i == last)|(mss << 1)); Loading @@ -3763,7 +3780,7 @@ static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev) /* If the workaround fails due to memory/mapping * failure, silently drop this packet. */ if (tigon3_4gb_hwbug_workaround(tp, skb, last_plus_one, if (tigon3_dma_hwbug_workaround(tp, skb, last_plus_one, &start, base_flags, mss)) goto out_unlock; Loading Loading @@ -10608,8 +10625,9 @@ static int __devinit tg3_init_one(struct pci_dev *pdev, unsigned long tg3reg_base, tg3reg_len; struct net_device *dev; struct tg3 *tp; int i, err, pci_using_dac, pm_cap; int i, err, pm_cap; char str[40]; u64 dma_mask, persist_dma_mask; if (tg3_version_printed++ == 0) printk(KERN_INFO "%s", version); Loading Loading @@ -10646,26 +10664,6 @@ static int __devinit tg3_init_one(struct pci_dev *pdev, goto err_out_free_res; } /* Configure DMA attributes. */ err = pci_set_dma_mask(pdev, DMA_64BIT_MASK); if (!err) { pci_using_dac = 1; err = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK); if (err < 0) { printk(KERN_ERR PFX "Unable to obtain 64 bit DMA " "for consistent allocations\n"); goto err_out_free_res; } } else { err = pci_set_dma_mask(pdev, DMA_32BIT_MASK); if (err) { printk(KERN_ERR PFX "No usable DMA configuration, " "aborting.\n"); goto err_out_free_res; } pci_using_dac = 0; } tg3reg_base = pci_resource_start(pdev, 0); tg3reg_len = pci_resource_len(pdev, 0); Loading @@ -10679,8 +10677,6 @@ static int __devinit tg3_init_one(struct pci_dev *pdev, SET_MODULE_OWNER(dev); SET_NETDEV_DEV(dev, &pdev->dev); if (pci_using_dac) dev->features |= NETIF_F_HIGHDMA; dev->features |= NETIF_F_LLTX; #if TG3_VLAN_TAG_USED dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX; Loading Loading @@ -10765,6 +10761,44 @@ static int __devinit tg3_init_one(struct pci_dev *pdev, goto err_out_iounmap; } /* 5714, 5715 and 5780 cannot support DMA addresses > 40-bit. * On 64-bit systems with IOMMU, use 40-bit dma_mask. * On 64-bit systems without IOMMU, use 64-bit dma_mask and * do DMA address check in tg3_start_xmit(). */ if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) { persist_dma_mask = dma_mask = DMA_40BIT_MASK; #ifdef CONFIG_HIGHMEM dma_mask = DMA_64BIT_MASK; #endif } else if (tp->tg3_flags2 & TG3_FLG2_IS_5788) persist_dma_mask = dma_mask = DMA_32BIT_MASK; else persist_dma_mask = dma_mask = DMA_64BIT_MASK; /* Configure DMA attributes. */ if (dma_mask > DMA_32BIT_MASK) { err = pci_set_dma_mask(pdev, dma_mask); if (!err) { dev->features |= NETIF_F_HIGHDMA; err = pci_set_consistent_dma_mask(pdev, persist_dma_mask); if (err < 0) { printk(KERN_ERR PFX "Unable to obtain 64 bit " "DMA for consistent allocations\n"); goto err_out_iounmap; } } } if (err || dma_mask == DMA_32BIT_MASK) { err = pci_set_dma_mask(pdev, DMA_32BIT_MASK); if (err) { printk(KERN_ERR PFX "No usable DMA configuration, " "aborting.\n"); goto err_out_iounmap; } } tg3_init_bufmgr_config(tp); #if TG3_TSO_SUPPORT != 0 Loading Loading @@ -10833,9 +10867,6 @@ static int __devinit tg3_init_one(struct pci_dev *pdev, } else tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS; if (tp->tg3_flags2 & TG3_FLG2_IS_5788) dev->features &= ~NETIF_F_HIGHDMA; /* flow control autonegotiation is default behavior */ tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG; Loading Loading
drivers/net/tg3.c +59 −28 Original line number Diff line number Diff line Loading @@ -3532,9 +3532,23 @@ static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len) (base + len + 8 < base)); } /* Test for DMA addresses > 40-bit */ static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping, int len) { #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64) if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) return (((u64) mapping + len) > DMA_40BIT_MASK); return 0; #else return 0; #endif } static void tg3_set_txd(struct tg3 *, int, dma_addr_t, int, u32, u32); static int tigon3_4gb_hwbug_workaround(struct tg3 *tp, struct sk_buff *skb, /* Workaround 4GB and 40-bit hardware DMA bugs. */ static int tigon3_dma_hwbug_workaround(struct tg3 *tp, struct sk_buff *skb, u32 last_plus_one, u32 *start, u32 base_flags, u32 mss) { Loading Loading @@ -3742,6 +3756,9 @@ static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev) if (tg3_4g_overflow_test(mapping, len)) would_hit_hwbug = 1; if (tg3_40bit_overflow_test(tp, mapping, len)) would_hit_hwbug = 1; if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) tg3_set_txd(tp, entry, mapping, len, base_flags, (i == last)|(mss << 1)); Loading @@ -3763,7 +3780,7 @@ static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev) /* If the workaround fails due to memory/mapping * failure, silently drop this packet. */ if (tigon3_4gb_hwbug_workaround(tp, skb, last_plus_one, if (tigon3_dma_hwbug_workaround(tp, skb, last_plus_one, &start, base_flags, mss)) goto out_unlock; Loading Loading @@ -10608,8 +10625,9 @@ static int __devinit tg3_init_one(struct pci_dev *pdev, unsigned long tg3reg_base, tg3reg_len; struct net_device *dev; struct tg3 *tp; int i, err, pci_using_dac, pm_cap; int i, err, pm_cap; char str[40]; u64 dma_mask, persist_dma_mask; if (tg3_version_printed++ == 0) printk(KERN_INFO "%s", version); Loading Loading @@ -10646,26 +10664,6 @@ static int __devinit tg3_init_one(struct pci_dev *pdev, goto err_out_free_res; } /* Configure DMA attributes. */ err = pci_set_dma_mask(pdev, DMA_64BIT_MASK); if (!err) { pci_using_dac = 1; err = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK); if (err < 0) { printk(KERN_ERR PFX "Unable to obtain 64 bit DMA " "for consistent allocations\n"); goto err_out_free_res; } } else { err = pci_set_dma_mask(pdev, DMA_32BIT_MASK); if (err) { printk(KERN_ERR PFX "No usable DMA configuration, " "aborting.\n"); goto err_out_free_res; } pci_using_dac = 0; } tg3reg_base = pci_resource_start(pdev, 0); tg3reg_len = pci_resource_len(pdev, 0); Loading @@ -10679,8 +10677,6 @@ static int __devinit tg3_init_one(struct pci_dev *pdev, SET_MODULE_OWNER(dev); SET_NETDEV_DEV(dev, &pdev->dev); if (pci_using_dac) dev->features |= NETIF_F_HIGHDMA; dev->features |= NETIF_F_LLTX; #if TG3_VLAN_TAG_USED dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX; Loading Loading @@ -10765,6 +10761,44 @@ static int __devinit tg3_init_one(struct pci_dev *pdev, goto err_out_iounmap; } /* 5714, 5715 and 5780 cannot support DMA addresses > 40-bit. * On 64-bit systems with IOMMU, use 40-bit dma_mask. * On 64-bit systems without IOMMU, use 64-bit dma_mask and * do DMA address check in tg3_start_xmit(). */ if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) { persist_dma_mask = dma_mask = DMA_40BIT_MASK; #ifdef CONFIG_HIGHMEM dma_mask = DMA_64BIT_MASK; #endif } else if (tp->tg3_flags2 & TG3_FLG2_IS_5788) persist_dma_mask = dma_mask = DMA_32BIT_MASK; else persist_dma_mask = dma_mask = DMA_64BIT_MASK; /* Configure DMA attributes. */ if (dma_mask > DMA_32BIT_MASK) { err = pci_set_dma_mask(pdev, dma_mask); if (!err) { dev->features |= NETIF_F_HIGHDMA; err = pci_set_consistent_dma_mask(pdev, persist_dma_mask); if (err < 0) { printk(KERN_ERR PFX "Unable to obtain 64 bit " "DMA for consistent allocations\n"); goto err_out_iounmap; } } } if (err || dma_mask == DMA_32BIT_MASK) { err = pci_set_dma_mask(pdev, DMA_32BIT_MASK); if (err) { printk(KERN_ERR PFX "No usable DMA configuration, " "aborting.\n"); goto err_out_iounmap; } } tg3_init_bufmgr_config(tp); #if TG3_TSO_SUPPORT != 0 Loading Loading @@ -10833,9 +10867,6 @@ static int __devinit tg3_init_one(struct pci_dev *pdev, } else tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS; if (tp->tg3_flags2 & TG3_FLG2_IS_5788) dev->features &= ~NETIF_F_HIGHDMA; /* flow control autonegotiation is default behavior */ tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG; Loading