Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 26acc792 authored by Linus Torvalds's avatar Linus Torvalds
Browse files
Pull networking fixes from David Miller:

 1) Check klogctl failure correctly, from Colin Ian King.

 2) Prevent OOM when under memory pressure in flowcache, from Steffen
    Klassert.

 3) Fix info leak in llc and rtnetlink ifmap code, from Kangjie Lu.

 4) Memory barrier and multicast handling fixes in bnxt_en, from Michael
    Chan.

 5) Endianness bug in mlx5, from Daniel Jurgens.

 6) Fix disconnect handling in VSOCK, from Ian Campbell.

 7) Fix locking of netdev list walking in get_bridge_ifindices(), from
    Nikolay Aleksandrov.

 8) Bridge multicast MLD parser can look at wrong packet offsets, fix
    from Linus Lüssing.

 9) Fix chip hang in qede driver, from Sudarsana Reddy Kalluru.

10) Fix missing setting of encapsulation before inner handling completes
    in udp_offload code, from Jarno Rajahalme.

11) Missing rollbacks during LAG join and flood configuration failures
    in mlxsw driver, from Ido Schimmel.

12) Fix error code checks in netxen driver, from Dan Carpenter.

13) Fix key size in new macsec driver, from Sabrina Dubroca.

14) Fix mlx5/VXLAN dependencies, from Arnd Bergmann.

* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (29 commits)
  net/mlx5e: make VXLAN support conditional
  Revert "net/mlx5: Kconfig: Fix MLX5_EN/VXLAN build issue"
  macsec: key identifier is 128 bits, not 64
  Documentation/networking: more accurate LCO explanation
  macvtap: segmented packet is consumed
  tools: bpf_jit_disasm: check for klogctl failure
  qede: uninitialized variable in qede_start_xmit()
  netxen: netxen_rom_fast_read() doesn't return -1
  netxen: reversed condition in netxen_nic_set_link_parameters()
  netxen: fix error handling in netxen_get_flash_block()
  mlxsw: spectrum: Add missing rollback in flood configuration
  mlxsw: spectrum: Fix rollback order in LAG join failure
  udp_offload: Set encapsulation before inner completes.
  udp_tunnel: Remove redundant udp_tunnel_gro_complete().
  qede: prevent chip hang when increasing channels
  net: ipv6: tcp reset, icmp need to consider L3 domain
  bridge: fix igmp / mld query parsing
  net: bridge: fix old ioctl unlocked net device walk
  VSOCK: do not disconnect socket when peer has shutdown SEND only
  net/mlx4_en: Fix endianness bug in IPV6 csum calculation
  ...
parents 8634de6d 8846a125
Loading
Loading
Loading
Loading
+7 −7
Original line number Original line Diff line number Diff line
@@ -69,18 +69,18 @@ LCO: Local Checksum Offload
LCO is a technique for efficiently computing the outer checksum of an
LCO is a technique for efficiently computing the outer checksum of an
 encapsulated datagram when the inner checksum is due to be offloaded.
 encapsulated datagram when the inner checksum is due to be offloaded.
The ones-complement sum of a correctly checksummed TCP or UDP packet is
The ones-complement sum of a correctly checksummed TCP or UDP packet is
 equal to the sum of the pseudo header, because everything else gets
 equal to the complement of the sum of the pseudo header, because everything
 'cancelled out' by the checksum field.  This is because the sum was
 else gets 'cancelled out' by the checksum field.  This is because the sum was
 complemented before being written to the checksum field.
 complemented before being written to the checksum field.
More generally, this holds in any case where the 'IP-style' ones complement
More generally, this holds in any case where the 'IP-style' ones complement
 checksum is used, and thus any checksum that TX Checksum Offload supports.
 checksum is used, and thus any checksum that TX Checksum Offload supports.
That is, if we have set up TX Checksum Offload with a start/offset pair, we
That is, if we have set up TX Checksum Offload with a start/offset pair, we
 know that _after the device has filled in that checksum_, the ones
 know that after the device has filled in that checksum, the ones
 complement sum from csum_start to the end of the packet will be equal to
 complement sum from csum_start to the end of the packet will be equal to
 _whatever value we put in the checksum field beforehand_.  This allows us
 the complement of whatever value we put in the checksum field beforehand.
 to compute the outer checksum without looking at the payload: we simply
 This allows us to compute the outer checksum without looking at the payload:
 stop summing when we get to csum_start, then add the 16-bit word at
 we simply stop summing when we get to csum_start, then add the complement of
 (csum_start + csum_offset).
 the 16-bit word at (csum_start + csum_offset).
Then, when the true inner checksum is filled in (either by hardware or by
Then, when the true inner checksum is filled in (either by hardware or by
 skb_checksum_help()), the outer checksum will become correct by virtue of
 skb_checksum_help()), the outer checksum will become correct by virtue of
 the arithmetic.
 the arithmetic.
+4 −3
Original line number Original line Diff line number Diff line
@@ -1595,21 +1595,22 @@ static int xgene_enet_probe(struct platform_device *pdev)


	ret = xgene_enet_init_hw(pdata);
	ret = xgene_enet_init_hw(pdata);
	if (ret)
	if (ret)
		goto err;
		goto err_netdev;


	mac_ops = pdata->mac_ops;
	mac_ops = pdata->mac_ops;
	if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII) {
	if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII) {
		ret = xgene_enet_mdio_config(pdata);
		ret = xgene_enet_mdio_config(pdata);
		if (ret)
		if (ret)
			goto err;
			goto err_netdev;
	} else {
	} else {
		INIT_DELAYED_WORK(&pdata->link_work, mac_ops->link_state);
		INIT_DELAYED_WORK(&pdata->link_work, mac_ops->link_state);
	}
	}


	xgene_enet_napi_add(pdata);
	xgene_enet_napi_add(pdata);
	return 0;
	return 0;
err:
err_netdev:
	unregister_netdev(ndev);
	unregister_netdev(ndev);
err:
	free_netdev(ndev);
	free_netdev(ndev);
	return ret;
	return ret;
}
}
+19 −4
Original line number Original line Diff line number Diff line
@@ -1388,6 +1388,10 @@ static int bnxt_poll_work(struct bnxt *bp, struct bnxt_napi *bnapi, int budget)
		if (!TX_CMP_VALID(txcmp, raw_cons))
		if (!TX_CMP_VALID(txcmp, raw_cons))
			break;
			break;


		/* The valid test of the entry must be done first before
		 * reading any further.
		 */
		rmb();
		if (TX_CMP_TYPE(txcmp) == CMP_TYPE_TX_L2_CMP) {
		if (TX_CMP_TYPE(txcmp) == CMP_TYPE_TX_L2_CMP) {
			tx_pkts++;
			tx_pkts++;
			/* return full budget so NAPI will complete. */
			/* return full budget so NAPI will complete. */
@@ -4038,9 +4042,11 @@ static int bnxt_alloc_rfs_vnics(struct bnxt *bp)
}
}


static int bnxt_cfg_rx_mode(struct bnxt *);
static int bnxt_cfg_rx_mode(struct bnxt *);
static bool bnxt_mc_list_updated(struct bnxt *, u32 *);


static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init)
static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init)
{
{
	struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
	int rc = 0;
	int rc = 0;


	if (irq_re_init) {
	if (irq_re_init) {
@@ -4096,13 +4102,22 @@ static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init)
		netdev_err(bp->dev, "HWRM vnic filter failure rc: %x\n", rc);
		netdev_err(bp->dev, "HWRM vnic filter failure rc: %x\n", rc);
		goto err_out;
		goto err_out;
	}
	}
	bp->vnic_info[0].uc_filter_count = 1;
	vnic->uc_filter_count = 1;


	bp->vnic_info[0].rx_mask = CFA_L2_SET_RX_MASK_REQ_MASK_BCAST;
	vnic->rx_mask = CFA_L2_SET_RX_MASK_REQ_MASK_BCAST;


	if ((bp->dev->flags & IFF_PROMISC) && BNXT_PF(bp))
	if ((bp->dev->flags & IFF_PROMISC) && BNXT_PF(bp))
		bp->vnic_info[0].rx_mask |=
		vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
				CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;

	if (bp->dev->flags & IFF_ALLMULTI) {
		vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
		vnic->mc_list_count = 0;
	} else {
		u32 mask = 0;

		bnxt_mc_list_updated(bp, &mask);
		vnic->rx_mask |= mask;
	}


	rc = bnxt_cfg_rx_mode(bp);
	rc = bnxt_cfg_rx_mode(bp);
	if (rc)
	if (rc)
+8 −2
Original line number Original line Diff line number Diff line
@@ -1521,9 +1521,15 @@ fec_enet_rx(struct net_device *ndev, int budget)
	struct fec_enet_private *fep = netdev_priv(ndev);
	struct fec_enet_private *fep = netdev_priv(ndev);


	for_each_set_bit(queue_id, &fep->work_rx, FEC_ENET_MAX_RX_QS) {
	for_each_set_bit(queue_id, &fep->work_rx, FEC_ENET_MAX_RX_QS) {
		clear_bit(queue_id, &fep->work_rx);
		int ret;
		pkt_received += fec_enet_rx_queue(ndev,

		ret = fec_enet_rx_queue(ndev,
					budget - pkt_received, queue_id);
					budget - pkt_received, queue_id);

		if (ret < budget - pkt_received)
			clear_bit(queue_id, &fep->work_rx);

		pkt_received += ret;
	}
	}
	return pkt_received;
	return pkt_received;
}
}
+1 −1
Original line number Original line Diff line number Diff line
@@ -707,7 +707,7 @@ static int get_fixed_ipv6_csum(__wsum hw_checksum, struct sk_buff *skb,


	if (ipv6h->nexthdr == IPPROTO_FRAGMENT || ipv6h->nexthdr == IPPROTO_HOPOPTS)
	if (ipv6h->nexthdr == IPPROTO_FRAGMENT || ipv6h->nexthdr == IPPROTO_HOPOPTS)
		return -1;
		return -1;
	hw_checksum = csum_add(hw_checksum, (__force __wsum)(ipv6h->nexthdr << 8));
	hw_checksum = csum_add(hw_checksum, (__force __wsum)htons(ipv6h->nexthdr));


	csum_pseudo_hdr = csum_partial(&ipv6h->saddr,
	csum_pseudo_hdr = csum_partial(&ipv6h->saddr,
				       sizeof(ipv6h->saddr) + sizeof(ipv6h->daddr), 0);
				       sizeof(ipv6h->saddr) + sizeof(ipv6h->daddr), 0);
Loading