Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit cd0b6fa6 authored by Alexander Duyck's avatar Alexander Duyck Committed by Jeff Kirsher
Browse files

i40e: Replace ring container array with linked list



This replaces the ring container array with a linked list.  The idea is
to make the logic much easier to deal with since this will allow us to
call a simple helper function from the q_vectors to go through the
entire list.

Signed-off-by: default avatarAlexander Duyck <alexander.h.duyck@intel.com>
Signed-off-by: default avatarJesse Brandeburg <jesse.brandeburg@intel.com>
Tested-by: default avatarKavindya Deegala <kavindya.s.deegala@intel.com>
Signed-off-by: default avatarJeff Kirsher <jeffrey.t.kirsher@intel.com>
parent 493fb300
Loading
Loading
Loading
Loading
+42 −42
Original line number Original line Diff line number Diff line
@@ -2516,7 +2516,7 @@ static irqreturn_t i40e_msix_clean_rings(int irq, void *data)
{
{
	struct i40e_q_vector *q_vector = data;
	struct i40e_q_vector *q_vector = data;


	if (!q_vector->tx.ring[0] && !q_vector->rx.ring[0])
	if (!q_vector->tx.ring && !q_vector->rx.ring)
		return IRQ_HANDLED;
		return IRQ_HANDLED;


	napi_schedule(&q_vector->napi);
	napi_schedule(&q_vector->napi);
@@ -2533,7 +2533,7 @@ static irqreturn_t i40e_fdir_clean_rings(int irq, void *data)
{
{
	struct i40e_q_vector *q_vector = data;
	struct i40e_q_vector *q_vector = data;


	if (!q_vector->tx.ring[0] && !q_vector->rx.ring[0])
	if (!q_vector->tx.ring && !q_vector->rx.ring)
		return IRQ_HANDLED;
		return IRQ_HANDLED;


	pr_info("fdir ring cleaning needed\n");
	pr_info("fdir ring cleaning needed\n");
@@ -2560,14 +2560,14 @@ static int i40e_vsi_request_irq_msix(struct i40e_vsi *vsi, char *basename)
	for (vector = 0; vector < q_vectors; vector++) {
	for (vector = 0; vector < q_vectors; vector++) {
		struct i40e_q_vector *q_vector = vsi->q_vectors[vector];
		struct i40e_q_vector *q_vector = vsi->q_vectors[vector];


		if (q_vector->tx.ring[0] && q_vector->rx.ring[0]) {
		if (q_vector->tx.ring && q_vector->rx.ring) {
			snprintf(q_vector->name, sizeof(q_vector->name) - 1,
			snprintf(q_vector->name, sizeof(q_vector->name) - 1,
				 "%s-%s-%d", basename, "TxRx", rx_int_idx++);
				 "%s-%s-%d", basename, "TxRx", rx_int_idx++);
			tx_int_idx++;
			tx_int_idx++;
		} else if (q_vector->rx.ring[0]) {
		} else if (q_vector->rx.ring) {
			snprintf(q_vector->name, sizeof(q_vector->name) - 1,
			snprintf(q_vector->name, sizeof(q_vector->name) - 1,
				 "%s-%s-%d", basename, "rx", rx_int_idx++);
				 "%s-%s-%d", basename, "rx", rx_int_idx++);
		} else if (q_vector->tx.ring[0]) {
		} else if (q_vector->tx.ring) {
			snprintf(q_vector->name, sizeof(q_vector->name) - 1,
			snprintf(q_vector->name, sizeof(q_vector->name) - 1,
				 "%s-%s-%d", basename, "tx", tx_int_idx++);
				 "%s-%s-%d", basename, "tx", tx_int_idx++);
		} else {
		} else {
@@ -2778,40 +2778,26 @@ static irqreturn_t i40e_intr(int irq, void *data)
}
}


/**
/**
 * i40e_map_vector_to_rxq - Assigns the Rx queue to the vector
 * i40e_map_vector_to_qp - Assigns the queue pair to the vector
 * @vsi: the VSI being configured
 * @vsi: the VSI being configured
 * @v_idx: vector index
 * @v_idx: vector index
 * @r_idx: rx queue index
 * @qp_idx: queue pair index
 **/
 **/
static void map_vector_to_rxq(struct i40e_vsi *vsi, int v_idx, int r_idx)
static void map_vector_to_qp(struct i40e_vsi *vsi, int v_idx, int qp_idx)
{
{
	struct i40e_q_vector *q_vector = vsi->q_vectors[v_idx];
	struct i40e_q_vector *q_vector = vsi->q_vectors[v_idx];
	struct i40e_ring *rx_ring = &(vsi->rx_rings[r_idx]);
	struct i40e_ring *tx_ring = &(vsi->tx_rings[qp_idx]);

	struct i40e_ring *rx_ring = &(vsi->rx_rings[qp_idx]);
	rx_ring->q_vector = q_vector;
	q_vector->rx.ring[q_vector->rx.count] = rx_ring;
	q_vector->rx.count++;
	q_vector->rx.latency_range = I40E_LOW_LATENCY;
	q_vector->vsi = vsi;
}

/**
 * i40e_map_vector_to_txq - Assigns the Tx queue to the vector
 * @vsi: the VSI being configured
 * @v_idx: vector index
 * @t_idx: tx queue index
 **/
static void map_vector_to_txq(struct i40e_vsi *vsi, int v_idx, int t_idx)
{
	struct i40e_q_vector *q_vector = vsi->q_vectors[v_idx];
	struct i40e_ring *tx_ring = &(vsi->tx_rings[t_idx]);


	tx_ring->q_vector = q_vector;
	tx_ring->q_vector = q_vector;
	q_vector->tx.ring[q_vector->tx.count] = tx_ring;
	tx_ring->next = q_vector->tx.ring;
	q_vector->tx.ring = tx_ring;
	q_vector->tx.count++;
	q_vector->tx.count++;
	q_vector->tx.latency_range = I40E_LOW_LATENCY;

	q_vector->num_ringpairs++;
	rx_ring->q_vector = q_vector;
	q_vector->vsi = vsi;
	rx_ring->next = q_vector->rx.ring;
	q_vector->rx.ring = rx_ring;
	q_vector->rx.count++;
}
}


/**
/**
@@ -2827,7 +2813,7 @@ static void i40e_vsi_map_rings_to_vectors(struct i40e_vsi *vsi)
{
{
	int qp_remaining = vsi->num_queue_pairs;
	int qp_remaining = vsi->num_queue_pairs;
	int q_vectors = vsi->num_q_vectors;
	int q_vectors = vsi->num_q_vectors;
	int qp_per_vector;
	int num_ringpairs;
	int v_start = 0;
	int v_start = 0;
	int qp_idx = 0;
	int qp_idx = 0;


@@ -2835,11 +2821,21 @@ static void i40e_vsi_map_rings_to_vectors(struct i40e_vsi *vsi)
	 * group them so there are multiple queues per vector.
	 * group them so there are multiple queues per vector.
	 */
	 */
	for (; v_start < q_vectors && qp_remaining; v_start++) {
	for (; v_start < q_vectors && qp_remaining; v_start++) {
		qp_per_vector = DIV_ROUND_UP(qp_remaining, q_vectors - v_start);
		struct i40e_q_vector *q_vector = vsi->q_vectors[v_start];
		for (; qp_per_vector;

		     qp_per_vector--, qp_idx++, qp_remaining--)	{
		num_ringpairs = DIV_ROUND_UP(qp_remaining, q_vectors - v_start);
			map_vector_to_rxq(vsi, v_start, qp_idx);

			map_vector_to_txq(vsi, v_start, qp_idx);
		q_vector->num_ringpairs = num_ringpairs;

		q_vector->rx.count = 0;
		q_vector->tx.count = 0;
		q_vector->rx.ring = NULL;
		q_vector->tx.ring = NULL;

		while (num_ringpairs--) {
			map_vector_to_qp(vsi, v_start, qp_idx);
			qp_idx++;
			qp_remaining--;
		}
		}
	}
	}
}
}
@@ -3179,16 +3175,17 @@ static void i40e_vsi_free_irq(struct i40e_vsi *vsi)
static void i40e_free_q_vector(struct i40e_vsi *vsi, int v_idx)
static void i40e_free_q_vector(struct i40e_vsi *vsi, int v_idx)
{
{
	struct i40e_q_vector *q_vector = vsi->q_vectors[v_idx];
	struct i40e_q_vector *q_vector = vsi->q_vectors[v_idx];
	int r_idx;
	struct i40e_ring *ring;


	if (!q_vector)
	if (!q_vector)
		return;
		return;


	/* disassociate q_vector from rings */
	/* disassociate q_vector from rings */
	for (r_idx = 0; r_idx < q_vector->tx.count; r_idx++)
	i40e_for_each_ring(ring, q_vector->tx)
		q_vector->tx.ring[r_idx]->q_vector = NULL;
		ring->q_vector = NULL;
	for (r_idx = 0; r_idx < q_vector->rx.count; r_idx++)

		q_vector->rx.ring[r_idx]->q_vector = NULL;
	i40e_for_each_ring(ring, q_vector->rx)
		ring->q_vector = NULL;


	/* only VSI w/ an associated netdev is set up w/ NAPI */
	/* only VSI w/ an associated netdev is set up w/ NAPI */
	if (vsi->netdev)
	if (vsi->netdev)
@@ -5312,6 +5309,9 @@ static int i40e_alloc_q_vector(struct i40e_vsi *vsi, int v_idx)
		netif_napi_add(vsi->netdev, &q_vector->napi,
		netif_napi_add(vsi->netdev, &q_vector->napi,
			       i40e_napi_poll, vsi->work_limit);
			       i40e_napi_poll, vsi->work_limit);


	q_vector->rx.latency_range = I40E_LOW_LATENCY;
	q_vector->tx.latency_range = I40E_LOW_LATENCY;

	/* tie q_vector and vsi together */
	/* tie q_vector and vsi together */
	vsi->q_vectors[v_idx] = q_vector;
	vsi->q_vectors[v_idx] = q_vector;


+10 −9
Original line number Original line Diff line number Diff line
@@ -1100,27 +1100,28 @@ int i40e_napi_poll(struct napi_struct *napi, int budget)
	struct i40e_q_vector *q_vector =
	struct i40e_q_vector *q_vector =
			       container_of(napi, struct i40e_q_vector, napi);
			       container_of(napi, struct i40e_q_vector, napi);
	struct i40e_vsi *vsi = q_vector->vsi;
	struct i40e_vsi *vsi = q_vector->vsi;
	struct i40e_ring *ring;
	bool clean_complete = true;
	bool clean_complete = true;
	int budget_per_ring;
	int budget_per_ring;
	int i;


	if (test_bit(__I40E_DOWN, &vsi->state)) {
	if (test_bit(__I40E_DOWN, &vsi->state)) {
		napi_complete(napi);
		napi_complete(napi);
		return 0;
		return 0;
	}
	}


	/* Since the actual Tx work is minimal, we can give the Tx a larger
	 * budget and be more aggressive about cleaning up the Tx descriptors.
	 */
	i40e_for_each_ring(ring, q_vector->tx)
		clean_complete &= i40e_clean_tx_irq(ring, vsi->work_limit);

	/* We attempt to distribute budget to each Rx queue fairly, but don't
	/* We attempt to distribute budget to each Rx queue fairly, but don't
	 * allow the budget to go below 1 because that would exit polling early.
	 * allow the budget to go below 1 because that would exit polling early.
	 * Since the actual Tx work is minimal, we can give the Tx a larger
	 * budget and be more aggressive about cleaning up the Tx descriptors.
	 */
	 */
	budget_per_ring = max(budget/q_vector->num_ringpairs, 1);
	budget_per_ring = max(budget/q_vector->num_ringpairs, 1);
	for (i = 0; i < q_vector->num_ringpairs; i++) {

		clean_complete &= i40e_clean_tx_irq(q_vector->tx.ring[i],
	i40e_for_each_ring(ring, q_vector->rx)
						    vsi->work_limit);
		clean_complete &= i40e_clean_rx_irq(ring, budget_per_ring);
		clean_complete &= i40e_clean_rx_irq(q_vector->rx.ring[i],
						    budget_per_ring);
	}


	/* If work not completed, return budget and polling will return */
	/* If work not completed, return budget and polling will return */
	if (!clean_complete)
	if (!clean_complete)
+6 −2
Original line number Original line Diff line number Diff line
@@ -180,6 +180,7 @@ enum i40e_ring_state_t {


/* struct that defines a descriptor ring, associated with a VSI */
/* struct that defines a descriptor ring, associated with a VSI */
struct i40e_ring {
struct i40e_ring {
	struct i40e_ring *next;		/* pointer to next ring in q_vector */
	void *desc;			/* Descriptor ring memory */
	void *desc;			/* Descriptor ring memory */
	struct device *dev;		/* Used for DMA mapping */
	struct device *dev;		/* Used for DMA mapping */
	struct net_device *netdev;	/* netdev ring maps to */
	struct net_device *netdev;	/* netdev ring maps to */
@@ -236,9 +237,8 @@ enum i40e_latency_range {
};
};


struct i40e_ring_container {
struct i40e_ring_container {
#define I40E_MAX_RINGPAIR_PER_VECTOR 8
	/* array of pointers to rings */
	/* array of pointers to rings */
	struct i40e_ring *ring[I40E_MAX_RINGPAIR_PER_VECTOR];
	struct i40e_ring *ring;
	unsigned int total_bytes;	/* total bytes processed this int */
	unsigned int total_bytes;	/* total bytes processed this int */
	unsigned int total_packets;	/* total packets processed this int */
	unsigned int total_packets;	/* total packets processed this int */
	u16 count;
	u16 count;
@@ -246,6 +246,10 @@ struct i40e_ring_container {
	u16 itr;
	u16 itr;
};
};


/* iterator for handling rings in ring container */
#define i40e_for_each_ring(pos, head) \
	for (pos = (head).ring; pos != NULL; pos = pos->next)

void i40e_alloc_rx_buffers(struct i40e_ring *rxr, u16 cleaned_count);
void i40e_alloc_rx_buffers(struct i40e_ring *rxr, u16 cleaned_count);
netdev_tx_t i40e_lan_xmit_frame(struct sk_buff *skb, struct net_device *netdev);
netdev_tx_t i40e_lan_xmit_frame(struct sk_buff *skb, struct net_device *netdev);
void i40e_clean_tx_ring(struct i40e_ring *tx_ring);
void i40e_clean_tx_ring(struct i40e_ring *tx_ring);