Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit c3fe065c authored by David S. Miller's avatar David S. Miller
Browse files


Jerr Kirsher says:

====================
This series contains updates to ixgbe & ixgbevf.
 ...
Alexander Duyck (6):
  ixgbe: Ping the VFs on link status change to trigger link change
  ixgbe: Handle failures in the ixgbe_setup_rx/tx_resources calls
  ixgbe: Move configuration of set_real_num_rx/tx_queues into open
  ixgbe: Update the logic for ixgbe_cache_ring_dcb and DCB RSS
    configuration
  ixgbe: Cleanup logic for MRQC and MTQC configuration
  ixgbevf: Update descriptor macros to accept pointers and drop _ADV
    suffix
====================

Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 1c652966 908421f6
Loading
Loading
Loading
Loading
+52 −86
Original line number Original line Diff line number Diff line
@@ -42,42 +42,37 @@ static void ixgbe_get_first_reg_idx(struct ixgbe_adapter *adapter, u8 tc,


	switch (hw->mac.type) {
	switch (hw->mac.type) {
	case ixgbe_mac_82598EB:
	case ixgbe_mac_82598EB:
		*tx = tc << 2;
		/* TxQs/TC: 4	RxQs/TC: 8 */
		*rx = tc << 3;
		*tx = tc << 2; /* 0, 4,  8, 12, 16, 20, 24, 28 */
		*rx = tc << 3; /* 0, 8, 16, 24, 32, 40, 48, 56 */
		break;
		break;
	case ixgbe_mac_82599EB:
	case ixgbe_mac_82599EB:
	case ixgbe_mac_X540:
	case ixgbe_mac_X540:
		if (num_tcs > 4) {
		if (num_tcs > 4) {
			if (tc < 3) {
			/*
				*tx = tc << 5;
			 * TCs    : TC0/1 TC2/3 TC4-7
				*rx = tc << 4;
			 * TxQs/TC:    32    16     8
			} else if (tc <  5) {
			 * RxQs/TC:    16    16    16
				*tx = ((tc + 2) << 4);
			 */
				*rx = tc << 4;
			} else if (tc < num_tcs) {
				*tx = ((tc + 8) << 3);
			*rx = tc << 4;
			*rx = tc << 4;
			}
			if (tc < 3)
				*tx = tc << 5;		/*   0,  32,  64 */
			else if (tc < 5)
				*tx = (tc + 2) << 4;	/*  80,  96 */
			else
				*tx = (tc + 8) << 3;	/* 104, 112, 120 */
		} else {
		} else {
			/*
			 * TCs    : TC0 TC1 TC2/3
			 * TxQs/TC:  64  32    16
			 * RxQs/TC:  32  32    32
			 */
			*rx = tc << 5;
			*rx = tc << 5;
			switch (tc) {
			if (tc < 2)
			case 0:
				*tx = tc << 6;		/*  0,  64 */
				*tx =  0;
			else
				break;
				*tx = (tc + 4) << 4;	/* 96, 112 */
			case 1:
				*tx = 64;
				break;
			case 2:
				*tx = 96;
				break;
			case 3:
				*tx = 112;
				break;
			default:
				break;
			}
		}
		}
		break;
	default:
	default:
		break;
		break;
	}
	}
@@ -90,25 +85,26 @@ static void ixgbe_get_first_reg_idx(struct ixgbe_adapter *adapter, u8 tc,
 * Cache the descriptor ring offsets for DCB to the assigned rings.
 * Cache the descriptor ring offsets for DCB to the assigned rings.
 *
 *
 **/
 **/
static inline bool ixgbe_cache_ring_dcb(struct ixgbe_adapter *adapter)
static bool ixgbe_cache_ring_dcb(struct ixgbe_adapter *adapter)
{
{
	struct net_device *dev = adapter->netdev;
	struct net_device *dev = adapter->netdev;
	int i, j, k;
	unsigned int tx_idx, rx_idx;
	int tc, offset, rss_i, i;
	u8 num_tcs = netdev_get_num_tc(dev);
	u8 num_tcs = netdev_get_num_tc(dev);


	if (!num_tcs)
	/* verify we have DCB queueing enabled before proceeding */
	if (num_tcs <= 1)
		return false;
		return false;


	for (i = 0, k = 0; i < num_tcs; i++) {
	rss_i = adapter->ring_feature[RING_F_RSS].indices;
		unsigned int tx_s, rx_s;
		u16 count = dev->tc_to_txq[i].count;


		ixgbe_get_first_reg_idx(adapter, i, &tx_s, &rx_s);
	for (tc = 0, offset = 0; tc < num_tcs; tc++, offset += rss_i) {
		for (j = 0; j < count; j++, k++) {
		ixgbe_get_first_reg_idx(adapter, tc, &tx_idx, &rx_idx);
			adapter->tx_ring[k]->reg_idx = tx_s + j;
		for (i = 0; i < rss_i; i++, tx_idx++, rx_idx++) {
			adapter->rx_ring[k]->reg_idx = rx_s + j;
			adapter->tx_ring[offset + i]->reg_idx = tx_idx;
			adapter->tx_ring[k]->dcb_tc = i;
			adapter->rx_ring[offset + i]->reg_idx = rx_idx;
			adapter->rx_ring[k]->dcb_tc = i;
			adapter->tx_ring[offset + i]->dcb_tc = tc;
			adapter->rx_ring[offset + i]->dcb_tc = tc;
		}
		}
	}
	}


@@ -349,7 +345,7 @@ static bool ixgbe_set_rss_queues(struct ixgbe_adapter *adapter)
 * fallthrough conditions.
 * fallthrough conditions.
 *
 *
 **/
 **/
static int ixgbe_set_num_queues(struct ixgbe_adapter *adapter)
static void ixgbe_set_num_queues(struct ixgbe_adapter *adapter)
{
{
	/* Start with base case */
	/* Start with base case */
	adapter->num_rx_queues = 1;
	adapter->num_rx_queues = 1;
@@ -358,29 +354,14 @@ static int ixgbe_set_num_queues(struct ixgbe_adapter *adapter)
	adapter->num_rx_queues_per_pool = 1;
	adapter->num_rx_queues_per_pool = 1;


	if (ixgbe_set_sriov_queues(adapter))
	if (ixgbe_set_sriov_queues(adapter))
		goto done;
		return;


#ifdef CONFIG_IXGBE_DCB
#ifdef CONFIG_IXGBE_DCB
	if (ixgbe_set_dcb_queues(adapter))
	if (ixgbe_set_dcb_queues(adapter))
		goto done;
		return;


#endif
#endif
	if (ixgbe_set_rss_queues(adapter))
	ixgbe_set_rss_queues(adapter);
		goto done;

	/* fallback to base case */
	adapter->num_rx_queues = 1;
	adapter->num_tx_queues = 1;

done:
	if ((adapter->netdev->reg_state == NETREG_UNREGISTERED) ||
	    (adapter->netdev->reg_state == NETREG_UNREGISTERING))
		return 0;

	/* Notify the stack of the (possibly) reduced queue counts. */
	netif_set_real_num_tx_queues(adapter->netdev, adapter->num_tx_queues);
	return netif_set_real_num_rx_queues(adapter->netdev,
					    adapter->num_rx_queues);
}
}


static void ixgbe_acquire_msix_vectors(struct ixgbe_adapter *adapter,
static void ixgbe_acquire_msix_vectors(struct ixgbe_adapter *adapter,
@@ -710,11 +691,10 @@ static void ixgbe_reset_interrupt_capability(struct ixgbe_adapter *adapter)
 * Attempt to configure the interrupts using the best available
 * Attempt to configure the interrupts using the best available
 * capabilities of the hardware and the kernel.
 * capabilities of the hardware and the kernel.
 **/
 **/
static int ixgbe_set_interrupt_capability(struct ixgbe_adapter *adapter)
static void ixgbe_set_interrupt_capability(struct ixgbe_adapter *adapter)
{
{
	struct ixgbe_hw *hw = &adapter->hw;
	struct ixgbe_hw *hw = &adapter->hw;
	int err = 0;
	int vector, v_budget, err;
	int vector, v_budget;


	/*
	/*
	 * It's easy to be greedy for MSI-X vectors, but it really
	 * It's easy to be greedy for MSI-X vectors, but it really
@@ -747,7 +727,7 @@ static int ixgbe_set_interrupt_capability(struct ixgbe_adapter *adapter)
		ixgbe_acquire_msix_vectors(adapter, v_budget);
		ixgbe_acquire_msix_vectors(adapter, v_budget);


		if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED)
		if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED)
			goto out;
			return;
	}
	}


	adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED;
	adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED;
@@ -762,25 +742,17 @@ static int ixgbe_set_interrupt_capability(struct ixgbe_adapter *adapter)
	if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
	if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
		ixgbe_disable_sriov(adapter);
		ixgbe_disable_sriov(adapter);


	err = ixgbe_set_num_queues(adapter);
	ixgbe_set_num_queues(adapter);
	if (err)
		return err;

	adapter->num_q_vectors = 1;
	adapter->num_q_vectors = 1;


	err = pci_enable_msi(adapter->pdev);
	err = pci_enable_msi(adapter->pdev);
	if (!err) {
	if (err) {
		adapter->flags |= IXGBE_FLAG_MSI_ENABLED;
	} else {
		netif_printk(adapter, hw, KERN_DEBUG, adapter->netdev,
		netif_printk(adapter, hw, KERN_DEBUG, adapter->netdev,
			     "Unable to allocate MSI interrupt, "
			     "Unable to allocate MSI interrupt, "
			     "falling back to legacy.  Error: %d\n", err);
			     "falling back to legacy.  Error: %d\n", err);
		/* reset err */
		return;
		err = 0;
	}
	}

	adapter->flags |= IXGBE_FLAG_MSI_ENABLED;
out:
	return err;
}
}


/**
/**
@@ -798,15 +770,10 @@ int ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter)
	int err;
	int err;


	/* Number of supported queues */
	/* Number of supported queues */
	err = ixgbe_set_num_queues(adapter);
	ixgbe_set_num_queues(adapter);
	if (err)
		return err;


	err = ixgbe_set_interrupt_capability(adapter);
	/* Set interrupt mode */
	if (err) {
	ixgbe_set_interrupt_capability(adapter);
		e_dev_err("Unable to setup interrupt capabilities\n");
		goto err_set_interrupt;
	}


	err = ixgbe_alloc_q_vectors(adapter);
	err = ixgbe_alloc_q_vectors(adapter);
	if (err) {
	if (err) {
@@ -826,7 +793,6 @@ int ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter)


err_alloc_q_vectors:
err_alloc_q_vectors:
	ixgbe_reset_interrupt_capability(adapter);
	ixgbe_reset_interrupt_capability(adapter);
err_set_interrupt:
	return err;
	return err;
}
}


+116 −74
Original line number Original line Diff line number Diff line
@@ -2719,8 +2719,7 @@ void ixgbe_configure_tx_ring(struct ixgbe_adapter *adapter,
static void ixgbe_setup_mtqc(struct ixgbe_adapter *adapter)
static void ixgbe_setup_mtqc(struct ixgbe_adapter *adapter)
{
{
	struct ixgbe_hw *hw = &adapter->hw;
	struct ixgbe_hw *hw = &adapter->hw;
	u32 rttdcs;
	u32 rttdcs, mtqc;
	u32 reg;
	u8 tcs = netdev_get_num_tc(adapter->netdev);
	u8 tcs = netdev_get_num_tc(adapter->netdev);


	if (hw->mac.type == ixgbe_mac_82598EB)
	if (hw->mac.type == ixgbe_mac_82598EB)
@@ -2732,28 +2731,32 @@ static void ixgbe_setup_mtqc(struct ixgbe_adapter *adapter)
	IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
	IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);


	/* set transmit pool layout */
	/* set transmit pool layout */
	switch (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
	if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
	case (IXGBE_FLAG_SRIOV_ENABLED):
		mtqc = IXGBE_MTQC_VT_ENA;
		IXGBE_WRITE_REG(hw, IXGBE_MTQC,
		if (tcs > 4)
				(IXGBE_MTQC_VT_ENA | IXGBE_MTQC_64VF));
			mtqc |= IXGBE_MTQC_RT_ENA | IXGBE_MTQC_8TC_8TQ;
		break;
		else if (tcs > 1)
	default:
			mtqc |= IXGBE_MTQC_RT_ENA | IXGBE_MTQC_4TC_4TQ;
		if (!tcs)
		else if (adapter->ring_feature[RING_F_RSS].indices == 4)
			reg = IXGBE_MTQC_64Q_1PB;
			mtqc |= IXGBE_MTQC_32VF;
		else if (tcs <= 4)
		else
			reg = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_4TC_4TQ;
			mtqc |= IXGBE_MTQC_64VF;
	} else {
		if (tcs > 4)
			mtqc = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_8TC_8TQ;
		else if (tcs > 1)
			mtqc = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_4TC_4TQ;
		else
		else
			reg = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_8TC_8TQ;
			mtqc = IXGBE_MTQC_64Q_1PB;
	}


		IXGBE_WRITE_REG(hw, IXGBE_MTQC, reg);
	IXGBE_WRITE_REG(hw, IXGBE_MTQC, mtqc);


	/* Enable Security TX Buffer IFG for multiple pb */
	/* Enable Security TX Buffer IFG for multiple pb */
	if (tcs) {
	if (tcs) {
			reg = IXGBE_READ_REG(hw, IXGBE_SECTXMINIFG);
		u32 sectx = IXGBE_READ_REG(hw, IXGBE_SECTXMINIFG);
			reg |= IXGBE_SECTX_DCB;
		sectx |= IXGBE_SECTX_DCB;
			IXGBE_WRITE_REG(hw, IXGBE_SECTXMINIFG, reg);
		IXGBE_WRITE_REG(hw, IXGBE_SECTXMINIFG, sectx);
		}
		break;
	}
	}


	/* re-enable the arbiter */
	/* re-enable the arbiter */
@@ -2886,11 +2889,18 @@ static void ixgbe_setup_mrqc(struct ixgbe_adapter *adapter)
	u32 mrqc = 0, reta = 0;
	u32 mrqc = 0, reta = 0;
	u32 rxcsum;
	u32 rxcsum;
	int i, j;
	int i, j;
	u8 tcs = netdev_get_num_tc(adapter->netdev);
	u16 rss_i = adapter->ring_feature[RING_F_RSS].indices;
	int maxq = adapter->ring_feature[RING_F_RSS].indices;


	if (tcs)
	if (!(adapter->flags & IXGBE_FLAG_RSS_ENABLED))
		maxq = min(maxq, adapter->num_tx_queues / tcs);
		rss_i = 1;

	/*
	 * Program table for at least 2 queues w/ SR-IOV so that VFs can
	 * make full use of any rings they may have.  We will use the
	 * PSRTYPE register to control how many rings we use within the PF.
	 */
	if ((adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) && (rss_i < 2))
		rss_i = 2;


	/* Fill out hash function seeds */
	/* Fill out hash function seeds */
	for (i = 0; i < 10; i++)
	for (i = 0; i < 10; i++)
@@ -2898,7 +2908,7 @@ static void ixgbe_setup_mrqc(struct ixgbe_adapter *adapter)


	/* Fill out redirection table */
	/* Fill out redirection table */
	for (i = 0, j = 0; i < 128; i++, j++) {
	for (i = 0, j = 0; i < 128; i++, j++) {
		if (j == maxq)
		if (j == rss_i)
			j = 0;
			j = 0;
		/* reta = 4-byte sliding window of
		/* reta = 4-byte sliding window of
		 * 0x00..(indices-1)(indices-1)00..etc. */
		 * 0x00..(indices-1)(indices-1)00..etc. */
@@ -2912,35 +2922,36 @@ static void ixgbe_setup_mrqc(struct ixgbe_adapter *adapter)
	rxcsum |= IXGBE_RXCSUM_PCSD;
	rxcsum |= IXGBE_RXCSUM_PCSD;
	IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
	IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);


	if (adapter->hw.mac.type == ixgbe_mac_82598EB &&
	if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
	    (adapter->flags & IXGBE_FLAG_RSS_ENABLED)) {
		if (adapter->flags & IXGBE_FLAG_RSS_ENABLED)
			mrqc = IXGBE_MRQC_RSSEN;
			mrqc = IXGBE_MRQC_RSSEN;
	} else {
	} else {
		int mask = adapter->flags & (IXGBE_FLAG_RSS_ENABLED
		u8 tcs = netdev_get_num_tc(adapter->netdev);
					     | IXGBE_FLAG_SRIOV_ENABLED);


		switch (mask) {
		if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
		case (IXGBE_FLAG_RSS_ENABLED):
			if (tcs > 4)
			if (!tcs)
				mrqc = IXGBE_MRQC_VMDQRT8TCEN;	/* 8 TCs */
				mrqc = IXGBE_MRQC_RSSEN;
			else if (tcs > 1)
			else if (tcs <= 4)
				mrqc = IXGBE_MRQC_VMDQRT4TCEN;	/* 4 TCs */
				mrqc = IXGBE_MRQC_RTRSS4TCEN;
			else if (adapter->ring_feature[RING_F_RSS].indices == 4)
				mrqc = IXGBE_MRQC_VMDQRSS32EN;
			else
			else
				mrqc = IXGBE_MRQC_VMDQRSS64EN;
		} else {
			if (tcs > 4)
				mrqc = IXGBE_MRQC_RTRSS8TCEN;
				mrqc = IXGBE_MRQC_RTRSS8TCEN;
			break;
			else if (tcs > 1)
		case (IXGBE_FLAG_SRIOV_ENABLED):
				mrqc = IXGBE_MRQC_RTRSS4TCEN;
			mrqc = IXGBE_MRQC_VMDQEN;
			else
			break;
				mrqc = IXGBE_MRQC_RSSEN;
		default:
			break;
		}
		}
	}
	}


	/* Perform hash on these packet types */
	/* Perform hash on these packet types */
	mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4
	mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4 |
	      | IXGBE_MRQC_RSS_FIELD_IPV4_TCP
		IXGBE_MRQC_RSS_FIELD_IPV4_TCP |
	      | IXGBE_MRQC_RSS_FIELD_IPV6
		IXGBE_MRQC_RSS_FIELD_IPV6 |
	      | IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
		IXGBE_MRQC_RSS_FIELD_IPV6_TCP;


	if (adapter->flags2 & IXGBE_FLAG2_RSS_FIELD_IPV4_UDP)
	if (adapter->flags2 & IXGBE_FLAG2_RSS_FIELD_IPV4_UDP)
		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP;
		mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP;
@@ -3103,8 +3114,13 @@ static void ixgbe_setup_psrtype(struct ixgbe_adapter *adapter)
	if (hw->mac.type == ixgbe_mac_82598EB)
	if (hw->mac.type == ixgbe_mac_82598EB)
		return;
		return;


	if (adapter->flags & IXGBE_FLAG_RSS_ENABLED)
	if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) {
		psrtype |= (adapter->num_rx_queues_per_pool << 29);
		int rss_i = adapter->ring_feature[RING_F_RSS].indices;
		if (rss_i > 3)
			psrtype |= 2 << 29;
		else if (rss_i > 1)
			psrtype |= 1 << 29;
	}


	for (p = 0; p < adapter->num_rx_pools; p++)
	for (p = 0; p < adapter->num_rx_pools; p++)
		IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(adapter->num_vfs + p),
		IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(adapter->num_vfs + p),
@@ -3608,20 +3624,16 @@ static void ixgbe_configure_dcb(struct ixgbe_adapter *adapter)


	/* Enable RSS Hash per TC */
	/* Enable RSS Hash per TC */
	if (hw->mac.type != ixgbe_mac_82598EB) {
	if (hw->mac.type != ixgbe_mac_82598EB) {
		int i;
		u32 msb = 0;
		u32 reg = 0;
		u16 rss_i = adapter->ring_feature[RING_F_RSS].indices - 1;
		u8 msb = 0;
		u8 rss_i = adapter->netdev->tc_to_txq[0].count - 1;


		while (rss_i) {
		while (rss_i) {
			msb++;
			msb++;
			rss_i >>= 1;
			rss_i >>= 1;
		}
		}


		for (i = 0; i < MAX_TRAFFIC_CLASS; i++)
		/* write msb to all 8 TCs in one write */
			reg |= msb << IXGBE_RQTC_SHIFT_TC(i);
		IXGBE_WRITE_REG(hw, IXGBE_RQTC, msb * 0x11111111);

		IXGBE_WRITE_REG(hw, IXGBE_RQTC, reg);
	}
	}
}
}
#endif
#endif
@@ -4549,10 +4561,16 @@ static int ixgbe_setup_all_tx_resources(struct ixgbe_adapter *adapter)
		err = ixgbe_setup_tx_resources(adapter->tx_ring[i]);
		err = ixgbe_setup_tx_resources(adapter->tx_ring[i]);
		if (!err)
		if (!err)
			continue;
			continue;

		e_err(probe, "Allocation for Tx Queue %u failed\n", i);
		e_err(probe, "Allocation for Tx Queue %u failed\n", i);
		break;
		goto err_setup_tx;
	}
	}


	return 0;
err_setup_tx:
	/* rewind the index freeing the rings as we go */
	while (i--)
		ixgbe_free_tx_resources(adapter->tx_ring[i]);
	return err;
	return err;
}
}


@@ -4627,10 +4645,16 @@ static int ixgbe_setup_all_rx_resources(struct ixgbe_adapter *adapter)
		err = ixgbe_setup_rx_resources(adapter->rx_ring[i]);
		err = ixgbe_setup_rx_resources(adapter->rx_ring[i]);
		if (!err)
		if (!err)
			continue;
			continue;

		e_err(probe, "Allocation for Rx Queue %u failed\n", i);
		e_err(probe, "Allocation for Rx Queue %u failed\n", i);
		break;
		goto err_setup_rx;
	}
	}


	return 0;
err_setup_rx:
	/* rewind the index freeing the rings as we go */
	while (i--)
		ixgbe_free_rx_resources(adapter->rx_ring[i]);
	return err;
	return err;
}
}


@@ -4786,15 +4810,31 @@ static int ixgbe_open(struct net_device *netdev)
	if (err)
	if (err)
		goto err_req_irq;
		goto err_req_irq;


	/* Notify the stack of the actual queue counts. */
	err = netif_set_real_num_tx_queues(netdev,
					   adapter->num_rx_pools > 1 ? 1 :
					   adapter->num_tx_queues);
	if (err)
		goto err_set_queues;


	err = netif_set_real_num_rx_queues(netdev,
					   adapter->num_rx_pools > 1 ? 1 :
					   adapter->num_rx_queues);
	if (err)
		goto err_set_queues;

	ixgbe_up_complete(adapter);
	ixgbe_up_complete(adapter);


	return 0;
	return 0;


err_set_queues:
	ixgbe_free_irq(adapter);
err_req_irq:
err_req_irq:
err_setup_rx:
	ixgbe_free_all_rx_resources(adapter);
	ixgbe_free_all_rx_resources(adapter);
err_setup_tx:
err_setup_rx:
	ixgbe_free_all_tx_resources(adapter);
	ixgbe_free_all_tx_resources(adapter);
err_setup_tx:
	ixgbe_reset(adapter);
	ixgbe_reset(adapter);


	return err;
	return err;
@@ -4852,23 +4892,19 @@ static int ixgbe_resume(struct pci_dev *pdev)


	pci_wake_from_d3(pdev, false);
	pci_wake_from_d3(pdev, false);


	rtnl_lock();
	err = ixgbe_init_interrupt_scheme(adapter);
	rtnl_unlock();
	if (err) {
		e_dev_err("Cannot initialize interrupts for device\n");
		return err;
	}

	ixgbe_reset(adapter);
	ixgbe_reset(adapter);


	IXGBE_WRITE_REG(&adapter->hw, IXGBE_WUS, ~0);
	IXGBE_WRITE_REG(&adapter->hw, IXGBE_WUS, ~0);


	if (netif_running(netdev)) {
	rtnl_lock();
	err = ixgbe_init_interrupt_scheme(adapter);
	if (!err && netif_running(netdev))
		err = ixgbe_open(netdev);
		err = ixgbe_open(netdev);

	rtnl_unlock();

	if (err)
	if (err)
		return err;
		return err;
	}


	netif_device_attach(netdev);
	netif_device_attach(netdev);


@@ -5390,6 +5426,9 @@ static void ixgbe_watchdog_link_is_up(struct ixgbe_adapter *adapter)


	netif_carrier_on(netdev);
	netif_carrier_on(netdev);
	ixgbe_check_vf_rate_limit(adapter);
	ixgbe_check_vf_rate_limit(adapter);

	/* ping all the active vfs to let them know link has changed */
	ixgbe_ping_all_vfs(adapter);
}
}


/**
/**
@@ -5419,6 +5458,9 @@ static void ixgbe_watchdog_link_is_down(struct ixgbe_adapter *adapter)


	e_info(drv, "NIC Link is Down\n");
	e_info(drv, "NIC Link is Down\n");
	netif_carrier_off(netdev);
	netif_carrier_off(netdev);

	/* ping all the active vfs to let them know link has changed */
	ixgbe_ping_all_vfs(adapter);
}
}


/**
/**
+6 −6
Original line number Original line Diff line number Diff line
@@ -164,12 +164,12 @@ struct ixgbevf_q_vector {
	((((R)->next_to_clean > (R)->next_to_use) ? 0 : (R)->count) + \
	((((R)->next_to_clean > (R)->next_to_use) ? 0 : (R)->count) + \
	(R)->next_to_clean - (R)->next_to_use - 1)
	(R)->next_to_clean - (R)->next_to_use - 1)


#define IXGBE_RX_DESC_ADV(R, i)	    \
#define IXGBEVF_RX_DESC(R, i)	    \
	(&(((union ixgbe_adv_rx_desc *)((R).desc))[i]))
	(&(((union ixgbe_adv_rx_desc *)((R)->desc))[i]))
#define IXGBE_TX_DESC_ADV(R, i)	    \
#define IXGBEVF_TX_DESC(R, i)	    \
	(&(((union ixgbe_adv_tx_desc *)((R).desc))[i]))
	(&(((union ixgbe_adv_tx_desc *)((R)->desc))[i]))
#define IXGBE_TX_CTXTDESC_ADV(R, i)	    \
#define IXGBEVF_TX_CTXTDESC(R, i)	    \
	(&(((struct ixgbe_adv_tx_context_desc *)((R).desc))[i]))
	(&(((struct ixgbe_adv_tx_context_desc *)((R)->desc))[i]))


#define IXGBE_MAX_JUMBO_FRAME_SIZE        16128
#define IXGBE_MAX_JUMBO_FRAME_SIZE        16128


+9 −9
Original line number Original line Diff line number Diff line
@@ -195,7 +195,7 @@ static bool ixgbevf_clean_tx_irq(struct ixgbevf_q_vector *q_vector,


	i = tx_ring->next_to_clean;
	i = tx_ring->next_to_clean;
	eop = tx_ring->tx_buffer_info[i].next_to_watch;
	eop = tx_ring->tx_buffer_info[i].next_to_watch;
	eop_desc = IXGBE_TX_DESC_ADV(*tx_ring, eop);
	eop_desc = IXGBEVF_TX_DESC(tx_ring, eop);


	while ((eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)) &&
	while ((eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)) &&
	       (count < tx_ring->count)) {
	       (count < tx_ring->count)) {
@@ -206,7 +206,7 @@ static bool ixgbevf_clean_tx_irq(struct ixgbevf_q_vector *q_vector,
			goto cont_loop;
			goto cont_loop;
		for ( ; !cleaned; count++) {
		for ( ; !cleaned; count++) {
			struct sk_buff *skb;
			struct sk_buff *skb;
			tx_desc = IXGBE_TX_DESC_ADV(*tx_ring, i);
			tx_desc = IXGBEVF_TX_DESC(tx_ring, i);
			tx_buffer_info = &tx_ring->tx_buffer_info[i];
			tx_buffer_info = &tx_ring->tx_buffer_info[i];
			cleaned = (i == eop);
			cleaned = (i == eop);
			skb = tx_buffer_info->skb;
			skb = tx_buffer_info->skb;
@@ -235,7 +235,7 @@ static bool ixgbevf_clean_tx_irq(struct ixgbevf_q_vector *q_vector,


cont_loop:
cont_loop:
		eop = tx_ring->tx_buffer_info[i].next_to_watch;
		eop = tx_ring->tx_buffer_info[i].next_to_watch;
		eop_desc = IXGBE_TX_DESC_ADV(*tx_ring, eop);
		eop_desc = IXGBEVF_TX_DESC(tx_ring, eop);
	}
	}


	tx_ring->next_to_clean = i;
	tx_ring->next_to_clean = i;
@@ -339,7 +339,7 @@ static void ixgbevf_alloc_rx_buffers(struct ixgbevf_adapter *adapter,
	bi = &rx_ring->rx_buffer_info[i];
	bi = &rx_ring->rx_buffer_info[i];


	while (cleaned_count--) {
	while (cleaned_count--) {
		rx_desc = IXGBE_RX_DESC_ADV(*rx_ring, i);
		rx_desc = IXGBEVF_RX_DESC(rx_ring, i);
		skb = bi->skb;
		skb = bi->skb;
		if (!skb) {
		if (!skb) {
			skb = netdev_alloc_skb(adapter->netdev,
			skb = netdev_alloc_skb(adapter->netdev,
@@ -405,7 +405,7 @@ static bool ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector,
	unsigned int total_rx_bytes = 0, total_rx_packets = 0;
	unsigned int total_rx_bytes = 0, total_rx_packets = 0;


	i = rx_ring->next_to_clean;
	i = rx_ring->next_to_clean;
	rx_desc = IXGBE_RX_DESC_ADV(*rx_ring, i);
	rx_desc = IXGBEVF_RX_DESC(rx_ring, i);
	staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
	staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
	rx_buffer_info = &rx_ring->rx_buffer_info[i];
	rx_buffer_info = &rx_ring->rx_buffer_info[i];


@@ -432,7 +432,7 @@ static bool ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector,
		if (i == rx_ring->count)
		if (i == rx_ring->count)
			i = 0;
			i = 0;


		next_rxd = IXGBE_RX_DESC_ADV(*rx_ring, i);
		next_rxd = IXGBEVF_RX_DESC(rx_ring, i);
		prefetch(next_rxd);
		prefetch(next_rxd);
		cleaned_count++;
		cleaned_count++;


@@ -2437,7 +2437,7 @@ static int ixgbevf_tso(struct ixgbevf_adapter *adapter,
		i = tx_ring->next_to_use;
		i = tx_ring->next_to_use;


		tx_buffer_info = &tx_ring->tx_buffer_info[i];
		tx_buffer_info = &tx_ring->tx_buffer_info[i];
		context_desc = IXGBE_TX_CTXTDESC_ADV(*tx_ring, i);
		context_desc = IXGBEVF_TX_CTXTDESC(tx_ring, i);


		/* VLAN MACLEN IPLEN */
		/* VLAN MACLEN IPLEN */
		if (tx_flags & IXGBE_TX_FLAGS_VLAN)
		if (tx_flags & IXGBE_TX_FLAGS_VLAN)
@@ -2497,7 +2497,7 @@ static bool ixgbevf_tx_csum(struct ixgbevf_adapter *adapter,
	    (tx_flags & IXGBE_TX_FLAGS_VLAN)) {
	    (tx_flags & IXGBE_TX_FLAGS_VLAN)) {
		i = tx_ring->next_to_use;
		i = tx_ring->next_to_use;
		tx_buffer_info = &tx_ring->tx_buffer_info[i];
		tx_buffer_info = &tx_ring->tx_buffer_info[i];
		context_desc = IXGBE_TX_CTXTDESC_ADV(*tx_ring, i);
		context_desc = IXGBEVF_TX_CTXTDESC(tx_ring, i);


		if (tx_flags & IXGBE_TX_FLAGS_VLAN)
		if (tx_flags & IXGBE_TX_FLAGS_VLAN)
			vlan_macip_lens |= (tx_flags &
			vlan_macip_lens |= (tx_flags &
@@ -2700,7 +2700,7 @@ static void ixgbevf_tx_queue(struct ixgbevf_adapter *adapter,
	i = tx_ring->next_to_use;
	i = tx_ring->next_to_use;
	while (count--) {
	while (count--) {
		tx_buffer_info = &tx_ring->tx_buffer_info[i];
		tx_buffer_info = &tx_ring->tx_buffer_info[i];
		tx_desc = IXGBE_TX_DESC_ADV(*tx_ring, i);
		tx_desc = IXGBEVF_TX_DESC(tx_ring, i);
		tx_desc->read.buffer_addr = cpu_to_le64(tx_buffer_info->dma);
		tx_desc->read.buffer_addr = cpu_to_le64(tx_buffer_info->dma);
		tx_desc->read.cmd_type_len =
		tx_desc->read.cmd_type_len =
			cpu_to_le32(cmd_type_len | tx_buffer_info->length);
			cpu_to_le32(cmd_type_len | tx_buffer_info->length);