Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 9215310c authored by Linus Torvalds's avatar Linus Torvalds
Browse files
Pull networking fixes from David Miller:

 1) Various netfilter fixlets from Pablo and the netfilter team.

 2) Fix regression in IPVS caused by lack of PMTU exceptions on local
    routes in ipv6, from Julian Anastasov.

 3) Check pskb_trim_rcsum for failure in DSA, from Zhouyang Jia.

 4) Don't crash on poll in TLS, from Daniel Borkmann.

 5) Revert SO_REUSE{ADDR,PORT} change, it regresses various things
    including Avahi mDNS. From Bart Van Assche.

 6) Missing of_node_put in qcom/emac driver, from Yue Haibing.

 7) We lack checking of the TCP checking in one special case during SYN
    receive, from Frank van der Linden.

 8) Fix module init error paths of mac80211 hwsim, from Johannes Berg.

 9) Handle 802.1ad properly in stmmac driver, from Elad Nachman.

10) Must grab HW caps before doing quirk checks in stmmac driver, from
    Jose Abreu.

* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (81 commits)
  net: stmmac: Run HWIF Quirks after getting HW caps
  neighbour: skip NTF_EXT_LEARNED entries during forced gc
  net: cxgb3: add error handling for sysfs_create_group
  tls: fix waitall behavior in tls_sw_recvmsg
  tls: fix use-after-free in tls_push_record
  l2tp: filter out non-PPP sessions in pppol2tp_tunnel_ioctl()
  l2tp: reject creation of non-PPP sessions on L2TPv2 tunnels
  mlxsw: spectrum_switchdev: Fix port_vlan refcounting
  mlxsw: spectrum_router: Align with new route replace logic
  mlxsw: spectrum_router: Allow appending to dev-only routes
  ipv6: Only emit append events for appended routes
  stmmac: added support for 802.1ad vlan stripping
  cfg80211: fix rcu in cfg80211_unregister_wdev
  mac80211: Move up init of TXQs
  mac80211_hwsim: fix module init error paths
  cfg80211: initialize sinfo in cfg80211_get_station
  nl80211: fix some kernel doc tag mistakes
  hv_netvsc: Fix the variable sizes in ipsecv2 and rsc offload
  rds: avoid unenecessary cong_update in loop transport
  l2tp: clean up stale tunnel or session in pppol2tp_connect's error path
  ...
parents de7f01c2 7cfde0af
Loading
Loading
Loading
Loading
+2 −0
Original line number Diff line number Diff line
@@ -325,6 +325,8 @@ struct nicvf {
	struct tasklet_struct	qs_err_task;
	struct work_struct	reset_task;
	struct nicvf_work       rx_mode_work;
	/* spinlock to protect workqueue arguments from concurrent access */
	spinlock_t              rx_mode_wq_lock;

	/* PTP timestamp */
	struct cavium_ptp	*ptp_clock;
+36 −14
Original line number Diff line number Diff line
@@ -1923,17 +1923,12 @@ static int nicvf_ioctl(struct net_device *netdev, struct ifreq *req, int cmd)
	}
}

static void nicvf_set_rx_mode_task(struct work_struct *work_arg)
static void __nicvf_set_rx_mode_task(u8 mode, struct xcast_addr_list *mc_addrs,
				     struct nicvf *nic)
{
	struct nicvf_work *vf_work = container_of(work_arg, struct nicvf_work,
						  work.work);
	struct nicvf *nic = container_of(vf_work, struct nicvf, rx_mode_work);
	union nic_mbx mbx = {};
	int idx;

	if (!vf_work)
		return;

	/* From the inside of VM code flow we have only 128 bits memory
	 * available to send message to host's PF, so send all mc addrs
	 * one by one, starting from flush command in case if kernel
@@ -1944,7 +1939,7 @@ static void nicvf_set_rx_mode_task(struct work_struct *work_arg)
	mbx.xcast.msg = NIC_MBOX_MSG_RESET_XCAST;
	nicvf_send_msg_to_pf(nic, &mbx);

	if (vf_work->mode & BGX_XCAST_MCAST_FILTER) {
	if (mode & BGX_XCAST_MCAST_FILTER) {
		/* once enabling filtering, we need to signal to PF to add
		 * its' own LMAC to the filter to accept packets for it.
		 */
@@ -1954,23 +1949,46 @@ static void nicvf_set_rx_mode_task(struct work_struct *work_arg)
	}

	/* check if we have any specific MACs to be added to PF DMAC filter */
	if (vf_work->mc) {
	if (mc_addrs) {
		/* now go through kernel list of MACs and add them one by one */
		for (idx = 0; idx < vf_work->mc->count; idx++) {
		for (idx = 0; idx < mc_addrs->count; idx++) {
			mbx.xcast.msg = NIC_MBOX_MSG_ADD_MCAST;
			mbx.xcast.data.mac = vf_work->mc->mc[idx];
			mbx.xcast.data.mac = mc_addrs->mc[idx];
			nicvf_send_msg_to_pf(nic, &mbx);
		}
		kfree(vf_work->mc);
		kfree(mc_addrs);
	}

	/* and finally set rx mode for PF accordingly */
	mbx.xcast.msg = NIC_MBOX_MSG_SET_XCAST;
	mbx.xcast.data.mode = vf_work->mode;
	mbx.xcast.data.mode = mode;

	nicvf_send_msg_to_pf(nic, &mbx);
}

static void nicvf_set_rx_mode_task(struct work_struct *work_arg)
{
	struct nicvf_work *vf_work = container_of(work_arg, struct nicvf_work,
						  work.work);
	struct nicvf *nic = container_of(vf_work, struct nicvf, rx_mode_work);
	u8 mode;
	struct xcast_addr_list *mc;

	if (!vf_work)
		return;

	/* Save message data locally to prevent them from
	 * being overwritten by next ndo_set_rx_mode call().
	 */
	spin_lock(&nic->rx_mode_wq_lock);
	mode = vf_work->mode;
	mc = vf_work->mc;
	vf_work->mc = NULL;
	spin_unlock(&nic->rx_mode_wq_lock);

	__nicvf_set_rx_mode_task(mode, mc, nic);
}

static void nicvf_set_rx_mode(struct net_device *netdev)
{
	struct nicvf *nic = netdev_priv(netdev);
@@ -2004,9 +2022,12 @@ static void nicvf_set_rx_mode(struct net_device *netdev)
			}
		}
	}
	spin_lock(&nic->rx_mode_wq_lock);
	kfree(nic->rx_mode_work.mc);
	nic->rx_mode_work.mc = mc_list;
	nic->rx_mode_work.mode = mode;
	queue_delayed_work(nicvf_rx_mode_wq, &nic->rx_mode_work.work, 2 * HZ);
	queue_delayed_work(nicvf_rx_mode_wq, &nic->rx_mode_work.work, 0);
	spin_unlock(&nic->rx_mode_wq_lock);
}

static const struct net_device_ops nicvf_netdev_ops = {
@@ -2163,6 +2184,7 @@ static int nicvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
	INIT_WORK(&nic->reset_task, nicvf_reset_task);

	INIT_DELAYED_WORK(&nic->rx_mode_work.work, nicvf_set_rx_mode_task);
	spin_lock_init(&nic->rx_mode_wq_lock);

	err = register_netdev(netdev);
	if (err) {
+7 −0
Original line number Diff line number Diff line
@@ -3362,10 +3362,17 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)

	err = sysfs_create_group(&adapter->port[0]->dev.kobj,
				 &cxgb3_attr_group);
	if (err) {
		dev_err(&pdev->dev, "cannot create sysfs group\n");
		goto out_close_led;
	}

	print_port_info(adapter, ai);
	return 0;

out_close_led:
	t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL, 0);

out_free_dev:
	iounmap(adapter->regs);
	for (i = ai->nports0 + ai->nports1 - 1; i >= 0; --i)
+2 −2
Original line number Diff line number Diff line
@@ -760,9 +760,9 @@ struct ixgbe_adapter {
#define IXGBE_RSS_KEY_SIZE     40  /* size of RSS Hash Key in bytes */
	u32 *rss_key;

#ifdef CONFIG_XFRM
#ifdef CONFIG_XFRM_OFFLOAD
	struct ixgbe_ipsec *ipsec;
#endif /* CONFIG_XFRM */
#endif /* CONFIG_XFRM_OFFLOAD */
};

static inline u8 ixgbe_max_rss_indices(struct ixgbe_adapter *adapter)
+24 −10
Original line number Diff line number Diff line
@@ -158,7 +158,16 @@ static void ixgbe_ipsec_stop_data(struct ixgbe_adapter *adapter)
	reg |= IXGBE_SECRXCTRL_RX_DIS;
	IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, reg);

	IXGBE_WRITE_FLUSH(hw);
	/* If both Tx and Rx are ready there are no packets
	 * that we need to flush so the loopback configuration
	 * below is not necessary.
	 */
	t_rdy = IXGBE_READ_REG(hw, IXGBE_SECTXSTAT) &
		IXGBE_SECTXSTAT_SECTX_RDY;
	r_rdy = IXGBE_READ_REG(hw, IXGBE_SECRXSTAT) &
		IXGBE_SECRXSTAT_SECRX_RDY;
	if (t_rdy && r_rdy)
		return;

	/* If the tx fifo doesn't have link, but still has data,
	 * we can't clear the tx sec block.  Set the MAC loopback
@@ -185,7 +194,7 @@ static void ixgbe_ipsec_stop_data(struct ixgbe_adapter *adapter)
			IXGBE_SECTXSTAT_SECTX_RDY;
		r_rdy = IXGBE_READ_REG(hw, IXGBE_SECRXSTAT) &
			IXGBE_SECRXSTAT_SECRX_RDY;
	} while (!t_rdy && !r_rdy && limit--);
	} while (!(t_rdy && r_rdy) && limit--);

	/* undo loopback if we played with it earlier */
	if (!link) {
@@ -966,10 +975,22 @@ void ixgbe_ipsec_rx(struct ixgbe_ring *rx_ring,
 **/
void ixgbe_init_ipsec_offload(struct ixgbe_adapter *adapter)
{
	struct ixgbe_hw *hw = &adapter->hw;
	struct ixgbe_ipsec *ipsec;
	u32 t_dis, r_dis;
	size_t size;

	if (adapter->hw.mac.type == ixgbe_mac_82598EB)
	if (hw->mac.type == ixgbe_mac_82598EB)
		return;

	/* If there is no support for either Tx or Rx offload
	 * we should not be advertising support for IPsec.
	 */
	t_dis = IXGBE_READ_REG(hw, IXGBE_SECTXSTAT) &
		IXGBE_SECTXSTAT_SECTX_OFF_DIS;
	r_dis = IXGBE_READ_REG(hw, IXGBE_SECRXSTAT) &
		IXGBE_SECRXSTAT_SECRX_OFF_DIS;
	if (t_dis || r_dis)
		return;

	ipsec = kzalloc(sizeof(*ipsec), GFP_KERNEL);
@@ -1001,13 +1022,6 @@ void ixgbe_init_ipsec_offload(struct ixgbe_adapter *adapter)

	adapter->netdev->xfrmdev_ops = &ixgbe_xfrmdev_ops;

#define IXGBE_ESP_FEATURES	(NETIF_F_HW_ESP | \
				 NETIF_F_HW_ESP_TX_CSUM | \
				 NETIF_F_GSO_ESP)

	adapter->netdev->features |= IXGBE_ESP_FEATURES;
	adapter->netdev->hw_enc_features |= IXGBE_ESP_FEATURES;

	return;

err2:
Loading