Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit f7cbdb7d authored by David S. Miller's avatar David S. Miller
Browse files

Merge branch 'ixgbe-next'



Aaron Brown says:

====================
Intel Wired LAN Driver Updates

This series contains updates to ixgbe and ixgbevf.

John adds rtnl lock / unlock semantics for ixgbe_reinit_locked()
which was being called without the rtnl lock being held.

Jacob corrects an issue where ixgbevf_qv_disable function does not
set the disabled bit correctly.

From the community, Wei uses a type of struct for pci driver-specific
data in ixgbevf_suspend()

Don changes the way we store ring arrays in a manner that allows
support of multiple queues on multiple nodes and creates new ring
initialization functions for work previously done across multiple
functions - making the code closer to ixgbe and hopefully more readable.
He also fixes incorrect fiber eeprom write logic.
====================

Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 0864c158 d3cec927
Loading
Loading
Loading
Loading
+1 −1
Original line number Diff line number Diff line
@@ -626,7 +626,7 @@ static void ixgbe_set_fiber_fixed_speed(struct ixgbe_hw *hw,
		goto out;
	}

	eeprom_data = (eeprom_data & ~IXGBE_SFF_SOFT_RS_SELECT_MASK) & rs;
	eeprom_data = (eeprom_data & ~IXGBE_SFF_SOFT_RS_SELECT_MASK) | rs;

	status = hw->phy.ops.write_i2c_byte(hw, IXGBE_SFF_SFF_8472_OSCB,
					    IXGBE_I2C_EEPROM_DEV_ADDR2,
+2 −0
Original line number Diff line number Diff line
@@ -6392,7 +6392,9 @@ static void ixgbe_reset_subtask(struct ixgbe_adapter *adapter)
	netdev_err(adapter->netdev, "Reset adapter\n");
	adapter->tx_timeout_count++;

	rtnl_lock();
	ixgbe_reinit_locked(adapter);
	rtnl_unlock();
}

/**
+17 −0
Original line number Diff line number Diff line
@@ -277,4 +277,21 @@ struct ixgbe_adv_tx_context_desc {
#define IXGBE_ERR_RESET_FAILED                  -2
#define IXGBE_ERR_INVALID_ARGUMENT              -3

/* Transmit Config masks */
#define IXGBE_TXDCTL_ENABLE		0x02000000 /* Ena specific Tx Queue */
#define IXGBE_TXDCTL_SWFLSH		0x04000000 /* Tx Desc. wr-bk flushing */
#define IXGBE_TXDCTL_WTHRESH_SHIFT	16	   /* shift to WTHRESH bits */

#define IXGBE_DCA_RXCTRL_DESC_DCA_EN	(1 << 5)  /* Rx Desc enable */
#define IXGBE_DCA_RXCTRL_HEAD_DCA_EN	(1 << 6)  /* Rx Desc header ena */
#define IXGBE_DCA_RXCTRL_DATA_DCA_EN	(1 << 7)  /* Rx Desc payload ena */
#define IXGBE_DCA_RXCTRL_DESC_RRO_EN	(1 << 9)  /* Rx rd Desc Relax Order */
#define IXGBE_DCA_RXCTRL_DATA_WRO_EN	(1 << 13) /* Rx wr data Relax Order */
#define IXGBE_DCA_RXCTRL_HEAD_WRO_EN	(1 << 15) /* Rx wr header RO */

#define IXGBE_DCA_TXCTRL_DESC_DCA_EN	(1 << 5)  /* DCA Tx Desc enable */
#define IXGBE_DCA_TXCTRL_DESC_RRO_EN	(1 << 9)  /* Tx rd Desc Relax Order */
#define IXGBE_DCA_TXCTRL_DESC_WRO_EN	(1 << 11) /* Tx Desc writeback RO bit */
#define IXGBE_DCA_TXCTRL_DATA_RRO_EN	(1 << 13) /* Tx rd data Relax Order */

#endif /* _IXGBEVF_DEFINES_H_ */
+14 −16
Original line number Diff line number Diff line
@@ -286,9 +286,9 @@ static int ixgbevf_set_ringparam(struct net_device *netdev,

	if (!netif_running(adapter->netdev)) {
		for (i = 0; i < adapter->num_tx_queues; i++)
			adapter->tx_ring[i].count = new_tx_count;
			adapter->tx_ring[i]->count = new_tx_count;
		for (i = 0; i < adapter->num_rx_queues; i++)
			adapter->rx_ring[i].count = new_rx_count;
			adapter->rx_ring[i]->count = new_rx_count;
		adapter->tx_ring_count = new_tx_count;
		adapter->rx_ring_count = new_rx_count;
		goto clear_reset;
@@ -303,7 +303,7 @@ static int ixgbevf_set_ringparam(struct net_device *netdev,

		for (i = 0; i < adapter->num_tx_queues; i++) {
			/* clone ring and setup updated count */
			tx_ring[i] = adapter->tx_ring[i];
			tx_ring[i] = *adapter->tx_ring[i];
			tx_ring[i].count = new_tx_count;
			err = ixgbevf_setup_tx_resources(adapter, &tx_ring[i]);
			if (!err)
@@ -329,7 +329,7 @@ static int ixgbevf_set_ringparam(struct net_device *netdev,

		for (i = 0; i < adapter->num_rx_queues; i++) {
			/* clone ring and setup updated count */
			rx_ring[i] = adapter->rx_ring[i];
			rx_ring[i] = *adapter->rx_ring[i];
			rx_ring[i].count = new_rx_count;
			err = ixgbevf_setup_rx_resources(adapter, &rx_ring[i]);
			if (!err)
@@ -352,9 +352,8 @@ static int ixgbevf_set_ringparam(struct net_device *netdev,
	/* Tx */
	if (tx_ring) {
		for (i = 0; i < adapter->num_tx_queues; i++) {
			ixgbevf_free_tx_resources(adapter,
						  &adapter->tx_ring[i]);
			adapter->tx_ring[i] = tx_ring[i];
			ixgbevf_free_tx_resources(adapter, adapter->tx_ring[i]);
			*adapter->tx_ring[i] = tx_ring[i];
		}
		adapter->tx_ring_count = new_tx_count;

@@ -365,9 +364,8 @@ static int ixgbevf_set_ringparam(struct net_device *netdev,
	/* Rx */
	if (rx_ring) {
		for (i = 0; i < adapter->num_rx_queues; i++) {
			ixgbevf_free_rx_resources(adapter,
						  &adapter->rx_ring[i]);
			adapter->rx_ring[i] = rx_ring[i];
			ixgbevf_free_rx_resources(adapter, adapter->rx_ring[i]);
			*adapter->rx_ring[i] = rx_ring[i];
		}
		adapter->rx_ring_count = new_rx_count;

@@ -413,15 +411,15 @@ static void ixgbevf_get_ethtool_stats(struct net_device *netdev,
	    tx_yields = 0, tx_cleaned = 0, tx_missed = 0;

	for (i = 0; i < adapter->num_rx_queues; i++) {
		rx_yields += adapter->rx_ring[i].bp_yields;
		rx_cleaned += adapter->rx_ring[i].bp_cleaned;
		rx_yields += adapter->rx_ring[i].bp_yields;
		rx_yields += adapter->rx_ring[i]->bp_yields;
		rx_cleaned += adapter->rx_ring[i]->bp_cleaned;
		rx_yields += adapter->rx_ring[i]->bp_yields;
	}

	for (i = 0; i < adapter->num_tx_queues; i++) {
		tx_yields += adapter->tx_ring[i].bp_yields;
		tx_cleaned += adapter->tx_ring[i].bp_cleaned;
		tx_yields += adapter->tx_ring[i].bp_yields;
		tx_yields += adapter->tx_ring[i]->bp_yields;
		tx_cleaned += adapter->tx_ring[i]->bp_cleaned;
		tx_yields += adapter->tx_ring[i]->bp_yields;
	}

	adapter->bp_rx_yields = rx_yields;
+3 −2
Original line number Diff line number Diff line
@@ -260,6 +260,7 @@ static inline bool ixgbevf_qv_disable(struct ixgbevf_q_vector *q_vector)
	spin_lock_bh(&q_vector->lock);
	if (q_vector->state & IXGBEVF_QV_OWNED)
		rc = false;
	q_vector->state |= IXGBEVF_QV_STATE_DISABLED;
	spin_unlock_bh(&q_vector->lock);
	return rc;
}
@@ -326,7 +327,7 @@ struct ixgbevf_adapter {
	u32 eims_other;

	/* TX */
	struct ixgbevf_ring *tx_ring;	/* One per active queue */
	struct ixgbevf_ring *tx_ring[MAX_TX_QUEUES]; /* One per active queue */
	int num_tx_queues;
	u64 restart_queue;
	u64 hw_csum_tx_good;
@@ -336,7 +337,7 @@ struct ixgbevf_adapter {
	u32 tx_timeout_count;

	/* RX */
	struct ixgbevf_ring *rx_ring;	/* One per active queue */
	struct ixgbevf_ring *rx_ring[MAX_TX_QUEUES]; /* One per active queue */
	int num_rx_queues;
	u64 hw_csum_rx_error;
	u64 hw_rx_no_dma_resources;
Loading