Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit a84a8ab9 authored by Linus Torvalds's avatar Linus Torvalds
Browse files
Pull networking fixes from David Miller:

 1) Fix divide by zero in mlx5, from Talut Batheesh.

 2) Guard against invalid GSO packets coming from untrusted guests and
    arriving in qdisc_pkt_len_init(), from Eric Dumazet.

 3) Similarly add such protection to the various protocol GSO handlers.
    From Willem de Bruijn.

 4) Fix regression added to IGMP source address checking for IGMPv3
    reports, from Felix Feitkau.

* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net:
  tls: Correct length of scatterlist in tls_sw_sendpage
  be2net: restore properly promisc mode after queues reconfiguration
  net: igmp: fix source address check for IGMPv3 reports
  gso: validate gso_type in GSO handlers
  net: qdisc_pkt_len_init() should be more robust
  ibmvnic: Allocate and request vpd in init_resources
  ibmvnic: Revert to previous mtu when unsupported value requested
  ibmvnic: Modify buffer size and number of queues on failover
  rds: tcp: compute m_ack_seq as offset from ->write_seq
  usbnet: silence an unnecessary warning
  cxgb4: fix endianness for vlan value in cxgb4_tc_flower
  cxgb4: set filter type to 1 for ETH_P_IPV6
  net/mlx5e: Fix fixpoint divide exception in mlx5e_am_stats_compare
parents 19952667 7a8c4dd9
Loading
Loading
Loading
Loading
+5 −2
Original line number Diff line number Diff line
@@ -111,6 +111,9 @@ static void cxgb4_process_flow_match(struct net_device *dev,
			ethtype_mask = 0;
		}

		if (ethtype_key == ETH_P_IPV6)
			fs->type = 1;

		fs->val.ethtype = ethtype_key;
		fs->mask.ethtype = ethtype_mask;
		fs->val.proto = key->ip_proto;
@@ -205,8 +208,8 @@ static void cxgb4_process_flow_match(struct net_device *dev,
					   VLAN_PRIO_SHIFT);
		vlan_tci_mask = mask->vlan_id | (mask->vlan_priority <<
						 VLAN_PRIO_SHIFT);
		fs->val.ivlan = cpu_to_be16(vlan_tci);
		fs->mask.ivlan = cpu_to_be16(vlan_tci_mask);
		fs->val.ivlan = vlan_tci;
		fs->mask.ivlan = vlan_tci_mask;

		/* Chelsio adapters use ivlan_vld bit to match vlan packets
		 * as 802.1Q. Also, when vlan tag is present in packets,
+9 −0
Original line number Diff line number Diff line
@@ -4634,6 +4634,15 @@ int be_update_queues(struct be_adapter *adapter)

	be_schedule_worker(adapter);

	/*
	 * The IF was destroyed and re-created. We need to clear
	 * all promiscuous flags valid for the destroyed IF.
	 * Without this promisc mode is not restored during
	 * be_open() because the driver thinks that it is
	 * already enabled in HW.
	 */
	adapter->if_flags &= ~BE_IF_FLAGS_ALL_PROMISCUOUS;

	if (netif_running(netdev))
		status = be_open(netdev);

+58 −15
Original line number Diff line number Diff line
@@ -410,6 +410,10 @@ static int reset_rx_pools(struct ibmvnic_adapter *adapter)
	struct ibmvnic_rx_pool *rx_pool;
	int rx_scrqs;
	int i, j, rc;
	u64 *size_array;

	size_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
		be32_to_cpu(adapter->login_rsp_buf->off_rxadd_buff_size));

	rx_scrqs = be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
	for (i = 0; i < rx_scrqs; i++) {
@@ -417,7 +421,17 @@ static int reset_rx_pools(struct ibmvnic_adapter *adapter)

		netdev_dbg(adapter->netdev, "Re-setting rx_pool[%d]\n", i);

		rc = reset_long_term_buff(adapter, &rx_pool->long_term_buff);
		if (rx_pool->buff_size != be64_to_cpu(size_array[i])) {
			free_long_term_buff(adapter, &rx_pool->long_term_buff);
			rx_pool->buff_size = be64_to_cpu(size_array[i]);
			alloc_long_term_buff(adapter, &rx_pool->long_term_buff,
					     rx_pool->size *
					     rx_pool->buff_size);
		} else {
			rc = reset_long_term_buff(adapter,
						  &rx_pool->long_term_buff);
		}

		if (rc)
			return rc;

@@ -439,14 +453,12 @@ static int reset_rx_pools(struct ibmvnic_adapter *adapter)
static void release_rx_pools(struct ibmvnic_adapter *adapter)
{
	struct ibmvnic_rx_pool *rx_pool;
	int rx_scrqs;
	int i, j;

	if (!adapter->rx_pool)
		return;

	rx_scrqs = be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
	for (i = 0; i < rx_scrqs; i++) {
	for (i = 0; i < adapter->num_active_rx_pools; i++) {
		rx_pool = &adapter->rx_pool[i];

		netdev_dbg(adapter->netdev, "Releasing rx_pool[%d]\n", i);
@@ -469,6 +481,7 @@ static void release_rx_pools(struct ibmvnic_adapter *adapter)

	kfree(adapter->rx_pool);
	adapter->rx_pool = NULL;
	adapter->num_active_rx_pools = 0;
}

static int init_rx_pools(struct net_device *netdev)
@@ -493,6 +506,8 @@ static int init_rx_pools(struct net_device *netdev)
		return -1;
	}

	adapter->num_active_rx_pools = 0;

	for (i = 0; i < rxadd_subcrqs; i++) {
		rx_pool = &adapter->rx_pool[i];

@@ -536,6 +551,8 @@ static int init_rx_pools(struct net_device *netdev)
		rx_pool->next_free = 0;
	}

	adapter->num_active_rx_pools = rxadd_subcrqs;

	return 0;
}

@@ -586,13 +603,12 @@ static void release_vpd_data(struct ibmvnic_adapter *adapter)
static void release_tx_pools(struct ibmvnic_adapter *adapter)
{
	struct ibmvnic_tx_pool *tx_pool;
	int i, tx_scrqs;
	int i;

	if (!adapter->tx_pool)
		return;

	tx_scrqs = be32_to_cpu(adapter->login_rsp_buf->num_txsubm_subcrqs);
	for (i = 0; i < tx_scrqs; i++) {
	for (i = 0; i < adapter->num_active_tx_pools; i++) {
		netdev_dbg(adapter->netdev, "Releasing tx_pool[%d]\n", i);
		tx_pool = &adapter->tx_pool[i];
		kfree(tx_pool->tx_buff);
@@ -603,6 +619,7 @@ static void release_tx_pools(struct ibmvnic_adapter *adapter)

	kfree(adapter->tx_pool);
	adapter->tx_pool = NULL;
	adapter->num_active_tx_pools = 0;
}

static int init_tx_pools(struct net_device *netdev)
@@ -619,6 +636,8 @@ static int init_tx_pools(struct net_device *netdev)
	if (!adapter->tx_pool)
		return -1;

	adapter->num_active_tx_pools = 0;

	for (i = 0; i < tx_subcrqs; i++) {
		tx_pool = &adapter->tx_pool[i];

@@ -666,6 +685,8 @@ static int init_tx_pools(struct net_device *netdev)
		tx_pool->producer_index = 0;
	}

	adapter->num_active_tx_pools = tx_subcrqs;

	return 0;
}

@@ -860,7 +881,7 @@ static int ibmvnic_get_vpd(struct ibmvnic_adapter *adapter)
	if (adapter->vpd->buff)
		len = adapter->vpd->len;

	reinit_completion(&adapter->fw_done);
	init_completion(&adapter->fw_done);
	crq.get_vpd_size.first = IBMVNIC_CRQ_CMD;
	crq.get_vpd_size.cmd = GET_VPD_SIZE;
	ibmvnic_send_crq(adapter, &crq);
@@ -922,6 +943,13 @@ static int init_resources(struct ibmvnic_adapter *adapter)
	if (!adapter->vpd)
		return -ENOMEM;

	/* Vital Product Data (VPD) */
	rc = ibmvnic_get_vpd(adapter);
	if (rc) {
		netdev_err(netdev, "failed to initialize Vital Product Data (VPD)\n");
		return rc;
	}

	adapter->map_id = 1;
	adapter->napi = kcalloc(adapter->req_rx_queues,
				sizeof(struct napi_struct), GFP_KERNEL);
@@ -995,7 +1023,7 @@ static int __ibmvnic_open(struct net_device *netdev)
static int ibmvnic_open(struct net_device *netdev)
{
	struct ibmvnic_adapter *adapter = netdev_priv(netdev);
	int rc, vpd;
	int rc;

	mutex_lock(&adapter->reset_lock);

@@ -1018,11 +1046,6 @@ static int ibmvnic_open(struct net_device *netdev)
	rc = __ibmvnic_open(netdev);
	netif_carrier_on(netdev);

	/* Vital Product Data (VPD) */
	vpd = ibmvnic_get_vpd(adapter);
	if (vpd)
		netdev_err(netdev, "failed to initialize Vital Product Data (VPD)\n");

	mutex_unlock(&adapter->reset_lock);

	return rc;
@@ -1548,6 +1571,7 @@ static int ibmvnic_set_mac(struct net_device *netdev, void *p)
static int do_reset(struct ibmvnic_adapter *adapter,
		    struct ibmvnic_rwi *rwi, u32 reset_state)
{
	u64 old_num_rx_queues, old_num_tx_queues;
	struct net_device *netdev = adapter->netdev;
	int i, rc;

@@ -1557,6 +1581,9 @@ static int do_reset(struct ibmvnic_adapter *adapter,
	netif_carrier_off(netdev);
	adapter->reset_reason = rwi->reset_reason;

	old_num_rx_queues = adapter->req_rx_queues;
	old_num_tx_queues = adapter->req_tx_queues;

	if (rwi->reset_reason == VNIC_RESET_MOBILITY) {
		rc = ibmvnic_reenable_crq_queue(adapter);
		if (rc)
@@ -1601,6 +1628,12 @@ static int do_reset(struct ibmvnic_adapter *adapter,
			rc = init_resources(adapter);
			if (rc)
				return rc;
		} else if (adapter->req_rx_queues != old_num_rx_queues ||
			   adapter->req_tx_queues != old_num_tx_queues) {
			release_rx_pools(adapter);
			release_tx_pools(adapter);
			init_rx_pools(netdev);
			init_tx_pools(netdev);
		} else {
			rc = reset_tx_pools(adapter);
			if (rc)
@@ -3592,7 +3625,17 @@ static void handle_request_cap_rsp(union ibmvnic_crq *crq,
			 *req_value,
			 (long int)be64_to_cpu(crq->request_capability_rsp.
					       number), name);
		*req_value = be64_to_cpu(crq->request_capability_rsp.number);

		if (be16_to_cpu(crq->request_capability_rsp.capability) ==
		    REQ_MTU) {
			pr_err("mtu of %llu is not supported. Reverting.\n",
			       *req_value);
			*req_value = adapter->fallback.mtu;
		} else {
			*req_value =
				be64_to_cpu(crq->request_capability_rsp.number);
		}

		ibmvnic_send_req_caps(adapter, 1);
		return;
	default:
+2 −0
Original line number Diff line number Diff line
@@ -1091,6 +1091,8 @@ struct ibmvnic_adapter {
	u64 opt_rxba_entries_per_subcrq;
	__be64 tx_rx_desc_req;
	u8 map_id;
	u64 num_active_rx_pools;
	u64 num_active_tx_pools;

	struct tasklet_struct tasklet;
	enum vnic_state state;
+6 −0
Original line number Diff line number Diff line
@@ -201,9 +201,15 @@ static int mlx5e_am_stats_compare(struct mlx5e_rx_am_stats *curr,
		return (curr->bpms > prev->bpms) ? MLX5E_AM_STATS_BETTER :
						   MLX5E_AM_STATS_WORSE;

	if (!prev->ppms)
		return curr->ppms ? MLX5E_AM_STATS_BETTER :
				    MLX5E_AM_STATS_SAME;

	if (IS_SIGNIFICANT_DIFF(curr->ppms, prev->ppms))
		return (curr->ppms > prev->ppms) ? MLX5E_AM_STATS_BETTER :
						   MLX5E_AM_STATS_WORSE;
	if (!prev->epms)
		return MLX5E_AM_STATS_SAME;

	if (IS_SIGNIFICANT_DIFF(curr->epms, prev->epms))
		return (curr->epms < prev->epms) ? MLX5E_AM_STATS_BETTER :
Loading