Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 58308451 authored by David S. Miller's avatar David S. Miller
Browse files


Jeff Kirsher says:

====================
This series contains updates to i40e only.

Alex provides the majority of the patches against i40e, where he does
cleanup of the Tx and RX queues and to align the code with the known
good Tx/Rx queue code in the ixgbe driver.

Anjali provides an i40e patch to update link events to not print to
the log until the device is administratively up.

Catherine provides a patch to update the driver version.
====================

Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents b44084c2 d04795d6
Loading
Loading
Loading
Loading
+6 −5
Original line number Diff line number Diff line
@@ -347,9 +347,9 @@ struct i40e_vsi {
	u32 rx_buf_failed;
	u32 rx_page_failed;

	/* These are arrays of rings, allocated at run-time */
	struct i40e_ring *rx_rings;
	struct i40e_ring *tx_rings;
	/* These are containers of ring pointers, allocated at run-time */
	struct i40e_ring **rx_rings;
	struct i40e_ring **tx_rings;

	u16 work_limit;
	/* high bit set means dynamic, use accessor routines to read/write.
@@ -366,7 +366,7 @@ struct i40e_vsi {
	u8  dtype;

	/* List of q_vectors allocated to this VSI */
	struct i40e_q_vector *q_vectors;
	struct i40e_q_vector **q_vectors;
	int num_q_vectors;
	int base_vector;

@@ -422,8 +422,9 @@ struct i40e_q_vector {

	u8 num_ringpairs;	/* total number of ring pairs in vector */

	char name[IFNAMSIZ + 9];
	cpumask_t affinity_mask;
	struct rcu_head rcu;	/* to avoid race with update stats on free */
	char name[IFNAMSIZ + 9];
} ____cacheline_internodealigned_in_smp;

/* lan device */
+101 −106
Original line number Diff line number Diff line
@@ -258,12 +258,12 @@ static ssize_t i40e_dbg_dump_write(struct file *filp,

			for (i = 0; i < vsi->num_queue_pairs; i++) {
				len = sizeof(struct i40e_tx_buffer);
				memcpy(p, vsi->tx_rings[i].tx_bi, len);
				memcpy(p, vsi->tx_rings[i]->tx_bi, len);
				p += len;
			}
			for (i = 0; i < vsi->num_queue_pairs; i++) {
				len = sizeof(struct i40e_rx_buffer);
				memcpy(p, vsi->rx_rings[i].rx_bi, len);
				memcpy(p, vsi->rx_rings[i]->rx_bi, len);
				p += len;
			}

@@ -484,100 +484,104 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid)
		 "    tx_restart = %d, tx_busy = %d, rx_buf_failed = %d, rx_page_failed = %d\n",
		 vsi->tx_restart, vsi->tx_busy,
		 vsi->rx_buf_failed, vsi->rx_page_failed);
	if (vsi->rx_rings) {
	rcu_read_lock();
	for (i = 0; i < vsi->num_queue_pairs; i++) {
		struct i40e_ring *rx_ring = ACCESS_ONCE(vsi->rx_rings[i]);
		if (!rx_ring)
			continue;

		dev_info(&pf->pdev->dev,
			 "    rx_rings[%i]: desc = %p\n",
				 i, vsi->rx_rings[i].desc);
			 i, rx_ring->desc);
		dev_info(&pf->pdev->dev,
			 "    rx_rings[%i]: dev = %p, netdev = %p, rx_bi = %p\n",
				 i, vsi->rx_rings[i].dev,
				 vsi->rx_rings[i].netdev,
				 vsi->rx_rings[i].rx_bi);
			 i, rx_ring->dev,
			 rx_ring->netdev,
			 rx_ring->rx_bi);
		dev_info(&pf->pdev->dev,
			 "    rx_rings[%i]: state = %li, queue_index = %d, reg_idx = %d\n",
				 i, vsi->rx_rings[i].state,
				 vsi->rx_rings[i].queue_index,
				 vsi->rx_rings[i].reg_idx);
			 i, rx_ring->state,
			 rx_ring->queue_index,
			 rx_ring->reg_idx);
		dev_info(&pf->pdev->dev,
			 "    rx_rings[%i]: rx_hdr_len = %d, rx_buf_len = %d, dtype = %d\n",
				 i, vsi->rx_rings[i].rx_hdr_len,
				 vsi->rx_rings[i].rx_buf_len,
				 vsi->rx_rings[i].dtype);
			 i, rx_ring->rx_hdr_len,
			 rx_ring->rx_buf_len,
			 rx_ring->dtype);
		dev_info(&pf->pdev->dev,
			 "    rx_rings[%i]: hsplit = %d, next_to_use = %d, next_to_clean = %d, ring_active = %i\n",
				 i, vsi->rx_rings[i].hsplit,
				 vsi->rx_rings[i].next_to_use,
				 vsi->rx_rings[i].next_to_clean,
				 vsi->rx_rings[i].ring_active);
			 i, rx_ring->hsplit,
			 rx_ring->next_to_use,
			 rx_ring->next_to_clean,
			 rx_ring->ring_active);
		dev_info(&pf->pdev->dev,
			 "    rx_rings[%i]: rx_stats: packets = %lld, bytes = %lld, non_eop_descs = %lld\n",
				 i, vsi->rx_rings[i].rx_stats.packets,
				 vsi->rx_rings[i].rx_stats.bytes,
				 vsi->rx_rings[i].rx_stats.non_eop_descs);
			 i, rx_ring->stats.packets,
			 rx_ring->stats.bytes,
			 rx_ring->rx_stats.non_eop_descs);
		dev_info(&pf->pdev->dev,
			 "    rx_rings[%i]: rx_stats: alloc_rx_page_failed = %lld, alloc_rx_buff_failed = %lld\n",
			 i,
				 vsi->rx_rings[i].rx_stats.alloc_rx_page_failed,
				vsi->rx_rings[i].rx_stats.alloc_rx_buff_failed);
			 rx_ring->rx_stats.alloc_rx_page_failed,
			rx_ring->rx_stats.alloc_rx_buff_failed);
		dev_info(&pf->pdev->dev,
			 "    rx_rings[%i]: size = %i, dma = 0x%08lx\n",
				 i, vsi->rx_rings[i].size,
				 (long unsigned int)vsi->rx_rings[i].dma);
			 i, rx_ring->size,
			 (long unsigned int)rx_ring->dma);
		dev_info(&pf->pdev->dev,
			 "    rx_rings[%i]: vsi = %p, q_vector = %p\n",
				 i, vsi->rx_rings[i].vsi,
				 vsi->rx_rings[i].q_vector);
		}
			 i, rx_ring->vsi,
			 rx_ring->q_vector);
	}
	if (vsi->tx_rings) {
	for (i = 0; i < vsi->num_queue_pairs; i++) {
		struct i40e_ring *tx_ring = ACCESS_ONCE(vsi->tx_rings[i]);
		if (!tx_ring)
			continue;
		dev_info(&pf->pdev->dev,
			 "    tx_rings[%i]: desc = %p\n",
				 i, vsi->tx_rings[i].desc);
			 i, tx_ring->desc);
		dev_info(&pf->pdev->dev,
			 "    tx_rings[%i]: dev = %p, netdev = %p, tx_bi = %p\n",
				 i, vsi->tx_rings[i].dev,
				 vsi->tx_rings[i].netdev,
				 vsi->tx_rings[i].tx_bi);
			 i, tx_ring->dev,
			 tx_ring->netdev,
			 tx_ring->tx_bi);
		dev_info(&pf->pdev->dev,
			 "    tx_rings[%i]: state = %li, queue_index = %d, reg_idx = %d\n",
				 i, vsi->tx_rings[i].state,
				 vsi->tx_rings[i].queue_index,
				 vsi->tx_rings[i].reg_idx);
			 i, tx_ring->state,
			 tx_ring->queue_index,
			 tx_ring->reg_idx);
		dev_info(&pf->pdev->dev,
			 "    tx_rings[%i]: dtype = %d\n",
				 i, vsi->tx_rings[i].dtype);
			 i, tx_ring->dtype);
		dev_info(&pf->pdev->dev,
			 "    tx_rings[%i]: hsplit = %d, next_to_use = %d, next_to_clean = %d, ring_active = %i\n",
				 i, vsi->tx_rings[i].hsplit,
				 vsi->tx_rings[i].next_to_use,
				 vsi->tx_rings[i].next_to_clean,
				 vsi->tx_rings[i].ring_active);
			 i, tx_ring->hsplit,
			 tx_ring->next_to_use,
			 tx_ring->next_to_clean,
			 tx_ring->ring_active);
		dev_info(&pf->pdev->dev,
			 "    tx_rings[%i]: tx_stats: packets = %lld, bytes = %lld, restart_queue = %lld\n",
				 i, vsi->tx_rings[i].tx_stats.packets,
				 vsi->tx_rings[i].tx_stats.bytes,
				 vsi->tx_rings[i].tx_stats.restart_queue);
			 i, tx_ring->stats.packets,
			 tx_ring->stats.bytes,
			 tx_ring->tx_stats.restart_queue);
		dev_info(&pf->pdev->dev,
				 "    tx_rings[%i]: tx_stats: tx_busy = %lld, completed = %lld, tx_done_old = %lld\n",
			 "    tx_rings[%i]: tx_stats: tx_busy = %lld, tx_done_old = %lld\n",
			 i,
				 vsi->tx_rings[i].tx_stats.tx_busy,
				 vsi->tx_rings[i].tx_stats.completed,
				 vsi->tx_rings[i].tx_stats.tx_done_old);
			 tx_ring->tx_stats.tx_busy,
			 tx_ring->tx_stats.tx_done_old);
		dev_info(&pf->pdev->dev,
			 "    tx_rings[%i]: size = %i, dma = 0x%08lx\n",
				 i, vsi->tx_rings[i].size,
				 (long unsigned int)vsi->tx_rings[i].dma);
			 i, tx_ring->size,
			 (long unsigned int)tx_ring->dma);
		dev_info(&pf->pdev->dev,
			 "    tx_rings[%i]: vsi = %p, q_vector = %p\n",
				 i, vsi->tx_rings[i].vsi,
				 vsi->tx_rings[i].q_vector);
			 i, tx_ring->vsi,
			 tx_ring->q_vector);
		dev_info(&pf->pdev->dev,
			 "    tx_rings[%i]: DCB tc = %d\n",
				 i, vsi->tx_rings[i].dcb_tc);
		}
			 i, tx_ring->dcb_tc);
	}
	rcu_read_unlock();
	dev_info(&pf->pdev->dev,
		 "    work_limit = %d, rx_itr_setting = %d (%s), tx_itr_setting = %d (%s)\n",
		 vsi->work_limit, vsi->rx_itr_setting,
@@ -587,15 +591,6 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid)
	dev_info(&pf->pdev->dev,
		 "    max_frame = %d, rx_hdr_len = %d, rx_buf_len = %d dtype = %d\n",
		 vsi->max_frame, vsi->rx_hdr_len, vsi->rx_buf_len, vsi->dtype);
	if (vsi->q_vectors) {
		for (i = 0; i < vsi->num_q_vectors; i++) {
			dev_info(&pf->pdev->dev,
				 "    q_vectors[%i]: base index = %ld\n",
				 i, ((long int)*vsi->q_vectors[i].rx.ring-
					(long int)*vsi->q_vectors[0].rx.ring)/
					sizeof(struct i40e_ring));
		}
	}
	dev_info(&pf->pdev->dev,
		 "    num_q_vectors = %i, base_vector = %i\n",
		 vsi->num_q_vectors, vsi->base_vector);
@@ -792,9 +787,9 @@ static void i40e_dbg_dump_desc(int cnt, int vsi_seid, int ring_id, int desc_n,
		return;
	}
	if (is_rx_ring)
		ring = vsi->rx_rings[ring_id];
		ring = *vsi->rx_rings[ring_id];
	else
		ring = vsi->tx_rings[ring_id];
		ring = *vsi->tx_rings[ring_id];
	if (cnt == 2) {
		dev_info(&pf->pdev->dev, "vsi = %02i %s ring = %02i\n",
			 vsi_seid, is_rx_ring ? "rx" : "tx", ring_id);
@@ -1996,7 +1991,7 @@ static ssize_t i40e_dbg_netdev_ops_write(struct file *filp,
			goto netdev_ops_write_done;
		}
		for (i = 0; i < vsi->num_q_vectors; i++)
			napi_schedule(&vsi->q_vectors[i].napi);
			napi_schedule(&vsi->q_vectors[i]->napi);
		dev_info(&pf->pdev->dev, "napi called\n");
	} else {
		dev_info(&pf->pdev->dev, "unknown command '%s'\n",
+42 −27
Original line number Diff line number Diff line
@@ -399,8 +399,8 @@ static void i40e_get_ringparam(struct net_device *netdev,
	ring->tx_max_pending = I40E_MAX_NUM_DESCRIPTORS;
	ring->rx_mini_max_pending = 0;
	ring->rx_jumbo_max_pending = 0;
	ring->rx_pending = vsi->rx_rings[0].count;
	ring->tx_pending = vsi->tx_rings[0].count;
	ring->rx_pending = vsi->rx_rings[0]->count;
	ring->tx_pending = vsi->tx_rings[0]->count;
	ring->rx_mini_pending = 0;
	ring->rx_jumbo_pending = 0;
}
@@ -429,8 +429,8 @@ static int i40e_set_ringparam(struct net_device *netdev,
	new_rx_count = ALIGN(new_rx_count, I40E_REQ_DESCRIPTOR_MULTIPLE);

	/* if nothing to do return success */
	if ((new_tx_count == vsi->tx_rings[0].count) &&
	    (new_rx_count == vsi->rx_rings[0].count))
	if ((new_tx_count == vsi->tx_rings[0]->count) &&
	    (new_rx_count == vsi->rx_rings[0]->count))
		return 0;

	while (test_and_set_bit(__I40E_CONFIG_BUSY, &pf->state))
@@ -439,8 +439,8 @@ static int i40e_set_ringparam(struct net_device *netdev,
	if (!netif_running(vsi->netdev)) {
		/* simple case - set for the next time the netdev is started */
		for (i = 0; i < vsi->num_queue_pairs; i++) {
			vsi->tx_rings[i].count = new_tx_count;
			vsi->rx_rings[i].count = new_rx_count;
			vsi->tx_rings[i]->count = new_tx_count;
			vsi->rx_rings[i]->count = new_rx_count;
		}
		goto done;
	}
@@ -451,10 +451,10 @@ static int i40e_set_ringparam(struct net_device *netdev,
	 */

	/* alloc updated Tx resources */
	if (new_tx_count != vsi->tx_rings[0].count) {
	if (new_tx_count != vsi->tx_rings[0]->count) {
		netdev_info(netdev,
			    "Changing Tx descriptor count from %d to %d.\n",
			    vsi->tx_rings[0].count, new_tx_count);
			    vsi->tx_rings[0]->count, new_tx_count);
		tx_rings = kcalloc(vsi->alloc_queue_pairs,
				   sizeof(struct i40e_ring), GFP_KERNEL);
		if (!tx_rings) {
@@ -464,7 +464,7 @@ static int i40e_set_ringparam(struct net_device *netdev,

		for (i = 0; i < vsi->num_queue_pairs; i++) {
			/* clone ring and setup updated count */
			tx_rings[i] = vsi->tx_rings[i];
			tx_rings[i] = *vsi->tx_rings[i];
			tx_rings[i].count = new_tx_count;
			err = i40e_setup_tx_descriptors(&tx_rings[i]);
			if (err) {
@@ -481,10 +481,10 @@ static int i40e_set_ringparam(struct net_device *netdev,
	}

	/* alloc updated Rx resources */
	if (new_rx_count != vsi->rx_rings[0].count) {
	if (new_rx_count != vsi->rx_rings[0]->count) {
		netdev_info(netdev,
			    "Changing Rx descriptor count from %d to %d\n",
			    vsi->rx_rings[0].count, new_rx_count);
			    vsi->rx_rings[0]->count, new_rx_count);
		rx_rings = kcalloc(vsi->alloc_queue_pairs,
				   sizeof(struct i40e_ring), GFP_KERNEL);
		if (!rx_rings) {
@@ -494,7 +494,7 @@ static int i40e_set_ringparam(struct net_device *netdev,

		for (i = 0; i < vsi->num_queue_pairs; i++) {
			/* clone ring and setup updated count */
			rx_rings[i] = vsi->rx_rings[i];
			rx_rings[i] = *vsi->rx_rings[i];
			rx_rings[i].count = new_rx_count;
			err = i40e_setup_rx_descriptors(&rx_rings[i]);
			if (err) {
@@ -517,8 +517,8 @@ static int i40e_set_ringparam(struct net_device *netdev,

	if (tx_rings) {
		for (i = 0; i < vsi->num_queue_pairs; i++) {
			i40e_free_tx_resources(&vsi->tx_rings[i]);
			vsi->tx_rings[i] = tx_rings[i];
			i40e_free_tx_resources(vsi->tx_rings[i]);
			*vsi->tx_rings[i] = tx_rings[i];
		}
		kfree(tx_rings);
		tx_rings = NULL;
@@ -526,8 +526,8 @@ static int i40e_set_ringparam(struct net_device *netdev,

	if (rx_rings) {
		for (i = 0; i < vsi->num_queue_pairs; i++) {
			i40e_free_rx_resources(&vsi->rx_rings[i]);
			vsi->rx_rings[i] = rx_rings[i];
			i40e_free_rx_resources(vsi->rx_rings[i]);
			*vsi->rx_rings[i] = rx_rings[i];
		}
		kfree(rx_rings);
		rx_rings = NULL;
@@ -579,6 +579,7 @@ static void i40e_get_ethtool_stats(struct net_device *netdev,
	char *p;
	int j;
	struct rtnl_link_stats64 *net_stats = i40e_get_vsi_stats_struct(vsi);
	unsigned int start;

	i40e_update_stats(vsi);

@@ -587,14 +588,30 @@ static void i40e_get_ethtool_stats(struct net_device *netdev,
		data[i++] = (i40e_gstrings_net_stats[j].sizeof_stat ==
			sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
	}
	for (j = 0; j < vsi->num_queue_pairs; j++) {
		data[i++] = vsi->tx_rings[j].tx_stats.packets;
		data[i++] = vsi->tx_rings[j].tx_stats.bytes;
	}
	for (j = 0; j < vsi->num_queue_pairs; j++) {
		data[i++] = vsi->rx_rings[j].rx_stats.packets;
		data[i++] = vsi->rx_rings[j].rx_stats.bytes;
	}
	rcu_read_lock();
	for (j = 0; j < vsi->num_queue_pairs; j++, i += 4) {
		struct i40e_ring *tx_ring = ACCESS_ONCE(vsi->tx_rings[j]);
		struct i40e_ring *rx_ring;

		if (!tx_ring)
			continue;

		/* process Tx ring statistics */
		do {
			start = u64_stats_fetch_begin_bh(&tx_ring->syncp);
			data[i] = tx_ring->stats.packets;
			data[i + 1] = tx_ring->stats.bytes;
		} while (u64_stats_fetch_retry_bh(&tx_ring->syncp, start));

		/* Rx ring is the 2nd half of the queue pair */
		rx_ring = &tx_ring[1];
		do {
			start = u64_stats_fetch_begin_bh(&rx_ring->syncp);
			data[i + 2] = rx_ring->stats.packets;
			data[i + 3] = rx_ring->stats.bytes;
		} while (u64_stats_fetch_retry_bh(&rx_ring->syncp, start));
	}
	rcu_read_unlock();
	if (vsi == pf->vsi[pf->lan_vsi]) {
		for (j = 0; j < I40E_GLOBAL_STATS_LEN; j++) {
			p = (char *)pf + i40e_gstrings_stats[j].stat_offset;
@@ -641,8 +658,6 @@ static void i40e_get_strings(struct net_device *netdev, u32 stringset,
			p += ETH_GSTRING_LEN;
			snprintf(p, ETH_GSTRING_LEN, "tx-%u.tx_bytes", i);
			p += ETH_GSTRING_LEN;
		}
		for (i = 0; i < vsi->num_queue_pairs; i++) {
			snprintf(p, ETH_GSTRING_LEN, "rx-%u.rx_packets", i);
			p += ETH_GSTRING_LEN;
			snprintf(p, ETH_GSTRING_LEN, "rx-%u.rx_bytes", i);
@@ -910,8 +925,8 @@ static int i40e_set_coalesce(struct net_device *netdev,
	}

	vector = vsi->base_vector;
	q_vector = vsi->q_vectors;
	for (i = 0; i < vsi->num_q_vectors; i++, vector++, q_vector++) {
	for (i = 0; i < vsi->num_q_vectors; i++, vector++) {
		q_vector = vsi->q_vectors[i];
		q_vector->rx.itr = ITR_TO_REG(vsi->rx_itr_setting);
		wr32(hw, I40E_PFINT_ITRN(0, vector - 1), q_vector->rx.itr);
		q_vector->tx.itr = ITR_TO_REG(vsi->tx_itr_setting);
+289 −153

File changed.

Preview size limit exceeded, changes collapsed.

+192 −166

File changed.

Preview size limit exceeded, changes collapsed.

Loading