Loading drivers/net/bnx2.c +9 −8 Original line number Diff line number Diff line Loading @@ -7966,11 +7966,8 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev) /* AER (Advanced Error Reporting) hooks */ err = pci_enable_pcie_error_reporting(pdev); if (err) { dev_err(&pdev->dev, "pci_enable_pcie_error_reporting " "failed 0x%x\n", err); /* non-fatal, continue */ } if (!err) bp->flags |= BNX2_FLAG_AER_ENABLED; } else { bp->pcix_cap = pci_find_capability(pdev, PCI_CAP_ID_PCIX); Loading Loading @@ -8233,8 +8230,10 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev) return 0; err_out_unmap: if (bp->flags & BNX2_FLAG_PCIE) if (bp->flags & BNX2_FLAG_AER_ENABLED) { pci_disable_pcie_error_reporting(pdev); bp->flags &= ~BNX2_FLAG_AER_ENABLED; } if (bp->regview) { iounmap(bp->regview); Loading Loading @@ -8422,8 +8421,10 @@ bnx2_remove_one(struct pci_dev *pdev) kfree(bp->temp_stats_blk); if (bp->flags & BNX2_FLAG_PCIE) if (bp->flags & BNX2_FLAG_AER_ENABLED) { pci_disable_pcie_error_reporting(pdev); bp->flags &= ~BNX2_FLAG_AER_ENABLED; } free_netdev(dev); Loading Loading @@ -8539,7 +8540,7 @@ static pci_ers_result_t bnx2_io_slot_reset(struct pci_dev *pdev) } rtnl_unlock(); if (!(bp->flags & BNX2_FLAG_PCIE)) if (!(bp->flags & BNX2_FLAG_AER_ENABLED)) return result; err = pci_cleanup_aer_uncorrect_error_status(pdev); Loading drivers/net/bnx2.h +1 −0 Original line number Diff line number Diff line Loading @@ -6741,6 +6741,7 @@ struct bnx2 { #define BNX2_FLAG_JUMBO_BROKEN 0x00000800 #define BNX2_FLAG_CAN_KEEP_VLAN 0x00001000 #define BNX2_FLAG_BROKEN_STATS 0x00002000 #define BNX2_FLAG_AER_ENABLED 0x00004000 struct bnx2_napi bnx2_napi[BNX2_MAX_MSIX_VEC]; Loading drivers/net/cnic.c +6 −6 Original line number Diff line number Diff line Loading @@ -699,13 +699,13 @@ static void cnic_free_dma(struct cnic_dev *dev, struct cnic_dma *dma) static void cnic_setup_page_tbl(struct cnic_dev *dev, struct cnic_dma *dma) { int i; u32 *page_table = dma->pgtbl; __le32 *page_table = (__le32 *) dma->pgtbl; for (i = 0; i < dma->num_pages; i++) { /* Each entry needs to be in big endian format. */ *page_table = (u32) ((u64) dma->pg_map_arr[i] >> 32); *page_table = cpu_to_le32((u64) dma->pg_map_arr[i] >> 32); page_table++; *page_table = (u32) dma->pg_map_arr[i]; *page_table = cpu_to_le32(dma->pg_map_arr[i] & 0xffffffff); page_table++; } } Loading @@ -713,13 +713,13 @@ static void cnic_setup_page_tbl(struct cnic_dev *dev, struct cnic_dma *dma) static void cnic_setup_page_tbl_le(struct cnic_dev *dev, struct cnic_dma *dma) { int i; u32 *page_table = dma->pgtbl; __le32 *page_table = (__le32 *) dma->pgtbl; for (i = 0; i < dma->num_pages; i++) { /* Each entry needs to be in little endian format. */ *page_table = dma->pg_map_arr[i] & 0xffffffff; *page_table = cpu_to_le32(dma->pg_map_arr[i] & 0xffffffff); page_table++; *page_table = (u32) ((u64) dma->pg_map_arr[i] >> 32); *page_table = cpu_to_le32((u64) dma->pg_map_arr[i] >> 32); page_table++; } } Loading drivers/net/dl2k.c +2 −2 Original line number Diff line number Diff line Loading @@ -1753,8 +1753,6 @@ rio_close (struct net_device *dev) /* Free all the skbuffs in the queue. */ for (i = 0; i < RX_RING_SIZE; i++) { np->rx_ring[i].status = 0; np->rx_ring[i].fraginfo = 0; skb = np->rx_skbuff[i]; if (skb) { pci_unmap_single(np->pdev, Loading @@ -1763,6 +1761,8 @@ rio_close (struct net_device *dev) dev_kfree_skb (skb); np->rx_skbuff[i] = NULL; } np->rx_ring[i].status = 0; np->rx_ring[i].fraginfo = 0; } for (i = 0; i < TX_RING_SIZE; i++) { skb = np->tx_skbuff[i]; Loading drivers/net/xen-netfront.c +88 −8 Original line number Diff line number Diff line Loading @@ -120,6 +120,9 @@ struct netfront_info { unsigned long rx_pfn_array[NET_RX_RING_SIZE]; struct multicall_entry rx_mcl[NET_RX_RING_SIZE+1]; struct mmu_update rx_mmu[NET_RX_RING_SIZE]; /* Statistics */ int rx_gso_checksum_fixup; }; struct netfront_rx_info { Loading Loading @@ -770,11 +773,29 @@ static RING_IDX xennet_fill_frags(struct netfront_info *np, return cons; } static int skb_checksum_setup(struct sk_buff *skb) static int checksum_setup(struct net_device *dev, struct sk_buff *skb) { struct iphdr *iph; unsigned char *th; int err = -EPROTO; int recalculate_partial_csum = 0; /* * A GSO SKB must be CHECKSUM_PARTIAL. However some buggy * peers can fail to set NETRXF_csum_blank when sending a GSO * frame. In this case force the SKB to CHECKSUM_PARTIAL and * recalculate the partial checksum. */ if (skb->ip_summed != CHECKSUM_PARTIAL && skb_is_gso(skb)) { struct netfront_info *np = netdev_priv(dev); np->rx_gso_checksum_fixup++; skb->ip_summed = CHECKSUM_PARTIAL; recalculate_partial_csum = 1; } /* A non-CHECKSUM_PARTIAL SKB does not require setup. */ if (skb->ip_summed != CHECKSUM_PARTIAL) return 0; if (skb->protocol != htons(ETH_P_IP)) goto out; Loading @@ -788,9 +809,23 @@ static int skb_checksum_setup(struct sk_buff *skb) switch (iph->protocol) { case IPPROTO_TCP: skb->csum_offset = offsetof(struct tcphdr, check); if (recalculate_partial_csum) { struct tcphdr *tcph = (struct tcphdr *)th; tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, skb->len - iph->ihl*4, IPPROTO_TCP, 0); } break; case IPPROTO_UDP: skb->csum_offset = offsetof(struct udphdr, check); if (recalculate_partial_csum) { struct udphdr *udph = (struct udphdr *)th; udph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, skb->len - iph->ihl*4, IPPROTO_UDP, 0); } break; default: if (net_ratelimit()) Loading Loading @@ -829,14 +864,12 @@ static int handle_incoming_queue(struct net_device *dev, /* Ethernet work: Delayed to here as it peeks the header. */ skb->protocol = eth_type_trans(skb, dev); if (skb->ip_summed == CHECKSUM_PARTIAL) { if (skb_checksum_setup(skb)) { if (checksum_setup(dev, skb)) { kfree_skb(skb); packets_dropped++; dev->stats.rx_errors++; continue; } } dev->stats.rx_packets++; dev->stats.rx_bytes += skb->len; Loading Loading @@ -1632,12 +1665,59 @@ static void netback_changed(struct xenbus_device *dev, } } static const struct xennet_stat { char name[ETH_GSTRING_LEN]; u16 offset; } xennet_stats[] = { { "rx_gso_checksum_fixup", offsetof(struct netfront_info, rx_gso_checksum_fixup) }, }; static int xennet_get_sset_count(struct net_device *dev, int string_set) { switch (string_set) { case ETH_SS_STATS: return ARRAY_SIZE(xennet_stats); default: return -EINVAL; } } static void xennet_get_ethtool_stats(struct net_device *dev, struct ethtool_stats *stats, u64 * data) { void *np = netdev_priv(dev); int i; for (i = 0; i < ARRAY_SIZE(xennet_stats); i++) data[i] = *(int *)(np + xennet_stats[i].offset); } static void xennet_get_strings(struct net_device *dev, u32 stringset, u8 * data) { int i; switch (stringset) { case ETH_SS_STATS: for (i = 0; i < ARRAY_SIZE(xennet_stats); i++) memcpy(data + i * ETH_GSTRING_LEN, xennet_stats[i].name, ETH_GSTRING_LEN); break; } } static const struct ethtool_ops xennet_ethtool_ops = { .set_tx_csum = ethtool_op_set_tx_csum, .set_sg = xennet_set_sg, .set_tso = xennet_set_tso, .get_link = ethtool_op_get_link, .get_sset_count = xennet_get_sset_count, .get_ethtool_stats = xennet_get_ethtool_stats, .get_strings = xennet_get_strings, }; #ifdef CONFIG_SYSFS Loading Loading
drivers/net/bnx2.c +9 −8 Original line number Diff line number Diff line Loading @@ -7966,11 +7966,8 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev) /* AER (Advanced Error Reporting) hooks */ err = pci_enable_pcie_error_reporting(pdev); if (err) { dev_err(&pdev->dev, "pci_enable_pcie_error_reporting " "failed 0x%x\n", err); /* non-fatal, continue */ } if (!err) bp->flags |= BNX2_FLAG_AER_ENABLED; } else { bp->pcix_cap = pci_find_capability(pdev, PCI_CAP_ID_PCIX); Loading Loading @@ -8233,8 +8230,10 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev) return 0; err_out_unmap: if (bp->flags & BNX2_FLAG_PCIE) if (bp->flags & BNX2_FLAG_AER_ENABLED) { pci_disable_pcie_error_reporting(pdev); bp->flags &= ~BNX2_FLAG_AER_ENABLED; } if (bp->regview) { iounmap(bp->regview); Loading Loading @@ -8422,8 +8421,10 @@ bnx2_remove_one(struct pci_dev *pdev) kfree(bp->temp_stats_blk); if (bp->flags & BNX2_FLAG_PCIE) if (bp->flags & BNX2_FLAG_AER_ENABLED) { pci_disable_pcie_error_reporting(pdev); bp->flags &= ~BNX2_FLAG_AER_ENABLED; } free_netdev(dev); Loading Loading @@ -8539,7 +8540,7 @@ static pci_ers_result_t bnx2_io_slot_reset(struct pci_dev *pdev) } rtnl_unlock(); if (!(bp->flags & BNX2_FLAG_PCIE)) if (!(bp->flags & BNX2_FLAG_AER_ENABLED)) return result; err = pci_cleanup_aer_uncorrect_error_status(pdev); Loading
drivers/net/bnx2.h +1 −0 Original line number Diff line number Diff line Loading @@ -6741,6 +6741,7 @@ struct bnx2 { #define BNX2_FLAG_JUMBO_BROKEN 0x00000800 #define BNX2_FLAG_CAN_KEEP_VLAN 0x00001000 #define BNX2_FLAG_BROKEN_STATS 0x00002000 #define BNX2_FLAG_AER_ENABLED 0x00004000 struct bnx2_napi bnx2_napi[BNX2_MAX_MSIX_VEC]; Loading
drivers/net/cnic.c +6 −6 Original line number Diff line number Diff line Loading @@ -699,13 +699,13 @@ static void cnic_free_dma(struct cnic_dev *dev, struct cnic_dma *dma) static void cnic_setup_page_tbl(struct cnic_dev *dev, struct cnic_dma *dma) { int i; u32 *page_table = dma->pgtbl; __le32 *page_table = (__le32 *) dma->pgtbl; for (i = 0; i < dma->num_pages; i++) { /* Each entry needs to be in big endian format. */ *page_table = (u32) ((u64) dma->pg_map_arr[i] >> 32); *page_table = cpu_to_le32((u64) dma->pg_map_arr[i] >> 32); page_table++; *page_table = (u32) dma->pg_map_arr[i]; *page_table = cpu_to_le32(dma->pg_map_arr[i] & 0xffffffff); page_table++; } } Loading @@ -713,13 +713,13 @@ static void cnic_setup_page_tbl(struct cnic_dev *dev, struct cnic_dma *dma) static void cnic_setup_page_tbl_le(struct cnic_dev *dev, struct cnic_dma *dma) { int i; u32 *page_table = dma->pgtbl; __le32 *page_table = (__le32 *) dma->pgtbl; for (i = 0; i < dma->num_pages; i++) { /* Each entry needs to be in little endian format. */ *page_table = dma->pg_map_arr[i] & 0xffffffff; *page_table = cpu_to_le32(dma->pg_map_arr[i] & 0xffffffff); page_table++; *page_table = (u32) ((u64) dma->pg_map_arr[i] >> 32); *page_table = cpu_to_le32((u64) dma->pg_map_arr[i] >> 32); page_table++; } } Loading
drivers/net/dl2k.c +2 −2 Original line number Diff line number Diff line Loading @@ -1753,8 +1753,6 @@ rio_close (struct net_device *dev) /* Free all the skbuffs in the queue. */ for (i = 0; i < RX_RING_SIZE; i++) { np->rx_ring[i].status = 0; np->rx_ring[i].fraginfo = 0; skb = np->rx_skbuff[i]; if (skb) { pci_unmap_single(np->pdev, Loading @@ -1763,6 +1761,8 @@ rio_close (struct net_device *dev) dev_kfree_skb (skb); np->rx_skbuff[i] = NULL; } np->rx_ring[i].status = 0; np->rx_ring[i].fraginfo = 0; } for (i = 0; i < TX_RING_SIZE; i++) { skb = np->tx_skbuff[i]; Loading
drivers/net/xen-netfront.c +88 −8 Original line number Diff line number Diff line Loading @@ -120,6 +120,9 @@ struct netfront_info { unsigned long rx_pfn_array[NET_RX_RING_SIZE]; struct multicall_entry rx_mcl[NET_RX_RING_SIZE+1]; struct mmu_update rx_mmu[NET_RX_RING_SIZE]; /* Statistics */ int rx_gso_checksum_fixup; }; struct netfront_rx_info { Loading Loading @@ -770,11 +773,29 @@ static RING_IDX xennet_fill_frags(struct netfront_info *np, return cons; } static int skb_checksum_setup(struct sk_buff *skb) static int checksum_setup(struct net_device *dev, struct sk_buff *skb) { struct iphdr *iph; unsigned char *th; int err = -EPROTO; int recalculate_partial_csum = 0; /* * A GSO SKB must be CHECKSUM_PARTIAL. However some buggy * peers can fail to set NETRXF_csum_blank when sending a GSO * frame. In this case force the SKB to CHECKSUM_PARTIAL and * recalculate the partial checksum. */ if (skb->ip_summed != CHECKSUM_PARTIAL && skb_is_gso(skb)) { struct netfront_info *np = netdev_priv(dev); np->rx_gso_checksum_fixup++; skb->ip_summed = CHECKSUM_PARTIAL; recalculate_partial_csum = 1; } /* A non-CHECKSUM_PARTIAL SKB does not require setup. */ if (skb->ip_summed != CHECKSUM_PARTIAL) return 0; if (skb->protocol != htons(ETH_P_IP)) goto out; Loading @@ -788,9 +809,23 @@ static int skb_checksum_setup(struct sk_buff *skb) switch (iph->protocol) { case IPPROTO_TCP: skb->csum_offset = offsetof(struct tcphdr, check); if (recalculate_partial_csum) { struct tcphdr *tcph = (struct tcphdr *)th; tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, skb->len - iph->ihl*4, IPPROTO_TCP, 0); } break; case IPPROTO_UDP: skb->csum_offset = offsetof(struct udphdr, check); if (recalculate_partial_csum) { struct udphdr *udph = (struct udphdr *)th; udph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, skb->len - iph->ihl*4, IPPROTO_UDP, 0); } break; default: if (net_ratelimit()) Loading Loading @@ -829,14 +864,12 @@ static int handle_incoming_queue(struct net_device *dev, /* Ethernet work: Delayed to here as it peeks the header. */ skb->protocol = eth_type_trans(skb, dev); if (skb->ip_summed == CHECKSUM_PARTIAL) { if (skb_checksum_setup(skb)) { if (checksum_setup(dev, skb)) { kfree_skb(skb); packets_dropped++; dev->stats.rx_errors++; continue; } } dev->stats.rx_packets++; dev->stats.rx_bytes += skb->len; Loading Loading @@ -1632,12 +1665,59 @@ static void netback_changed(struct xenbus_device *dev, } } static const struct xennet_stat { char name[ETH_GSTRING_LEN]; u16 offset; } xennet_stats[] = { { "rx_gso_checksum_fixup", offsetof(struct netfront_info, rx_gso_checksum_fixup) }, }; static int xennet_get_sset_count(struct net_device *dev, int string_set) { switch (string_set) { case ETH_SS_STATS: return ARRAY_SIZE(xennet_stats); default: return -EINVAL; } } static void xennet_get_ethtool_stats(struct net_device *dev, struct ethtool_stats *stats, u64 * data) { void *np = netdev_priv(dev); int i; for (i = 0; i < ARRAY_SIZE(xennet_stats); i++) data[i] = *(int *)(np + xennet_stats[i].offset); } static void xennet_get_strings(struct net_device *dev, u32 stringset, u8 * data) { int i; switch (stringset) { case ETH_SS_STATS: for (i = 0; i < ARRAY_SIZE(xennet_stats); i++) memcpy(data + i * ETH_GSTRING_LEN, xennet_stats[i].name, ETH_GSTRING_LEN); break; } } static const struct ethtool_ops xennet_ethtool_ops = { .set_tx_csum = ethtool_op_set_tx_csum, .set_sg = xennet_set_sg, .set_tso = xennet_set_tso, .get_link = ethtool_op_get_link, .get_sset_count = xennet_get_sset_count, .get_ethtool_stats = xennet_get_ethtool_stats, .get_strings = xennet_get_strings, }; #ifdef CONFIG_SYSFS Loading