Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 41b0cd36 authored by Zhu Yanjun's avatar Zhu Yanjun Committed by David S. Miller
Browse files

forcedeth: remove duplicate structure member in xmit



Since both first_tx_ctx and tx_skb are the head of tx ctx, it not
necessary to use two structure members to statically indicate
the head of tx ctx. So first_tx_ctx is removed.

CC: Srinivas Eeda <srinivas.eeda@oracle.com>
CC: Joe Jin <joe.jin@oracle.com>
CC: Junxiao Bi <junxiao.bi@oracle.com>
Signed-off-by: default avatarZhu Yanjun <yanjun.zhu@oracle.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent e9c5a106
Loading
Loading
Loading
Loading
+13 −12
Original line number Diff line number Diff line
@@ -824,7 +824,7 @@ struct fe_priv {
	 */
	union ring_type get_tx, put_tx, last_tx;
	struct nv_skb_map *get_tx_ctx, *put_tx_ctx;
	struct nv_skb_map *first_tx_ctx, *last_tx_ctx;
	struct nv_skb_map *last_tx_ctx;
	struct nv_skb_map *tx_skb;

	union ring_type tx_ring;
@@ -1939,7 +1939,8 @@ static void nv_init_tx(struct net_device *dev)
		np->last_tx.orig = &np->tx_ring.orig[np->tx_ring_size-1];
	else
		np->last_tx.ex = &np->tx_ring.ex[np->tx_ring_size-1];
	np->get_tx_ctx = np->put_tx_ctx = np->first_tx_ctx = np->tx_skb;
	np->get_tx_ctx = np->tx_skb;
	np->put_tx_ctx = np->tx_skb;
	np->last_tx_ctx = &np->tx_skb[np->tx_ring_size-1];
	netdev_reset_queue(np->dev);
	np->tx_pkts_in_progress = 0;
@@ -2251,7 +2252,7 @@ static netdev_tx_t nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
		if (unlikely(put_tx++ == np->last_tx.orig))
			put_tx = np->tx_ring.orig;
		if (unlikely(np->put_tx_ctx++ == np->last_tx_ctx))
			np->put_tx_ctx = np->first_tx_ctx;
			np->put_tx_ctx = np->tx_skb;
	} while (size);

	/* setup the fragments */
@@ -2277,7 +2278,7 @@ static netdev_tx_t nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
				do {
					nv_unmap_txskb(np, start_tx_ctx);
					if (unlikely(tmp_tx_ctx++ == np->last_tx_ctx))
						tmp_tx_ctx = np->first_tx_ctx;
						tmp_tx_ctx = np->tx_skb;
				} while (tmp_tx_ctx != np->put_tx_ctx);
				dev_kfree_skb_any(skb);
				np->put_tx_ctx = start_tx_ctx;
@@ -2297,7 +2298,7 @@ static netdev_tx_t nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
			if (unlikely(put_tx++ == np->last_tx.orig))
				put_tx = np->tx_ring.orig;
			if (unlikely(np->put_tx_ctx++ == np->last_tx_ctx))
				np->put_tx_ctx = np->first_tx_ctx;
				np->put_tx_ctx = np->tx_skb;
		} while (frag_size);
	}

@@ -2306,7 +2307,7 @@ static netdev_tx_t nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
	else
		prev_tx = put_tx - 1;

	if (unlikely(np->put_tx_ctx == np->first_tx_ctx))
	if (unlikely(np->put_tx_ctx == np->tx_skb))
		prev_tx_ctx = np->last_tx_ctx;
	else
		prev_tx_ctx = np->put_tx_ctx - 1;
@@ -2409,7 +2410,7 @@ static netdev_tx_t nv_start_xmit_optimized(struct sk_buff *skb,
		if (unlikely(put_tx++ == np->last_tx.ex))
			put_tx = np->tx_ring.ex;
		if (unlikely(np->put_tx_ctx++ == np->last_tx_ctx))
			np->put_tx_ctx = np->first_tx_ctx;
			np->put_tx_ctx = np->tx_skb;
	} while (size);

	/* setup the fragments */
@@ -2435,7 +2436,7 @@ static netdev_tx_t nv_start_xmit_optimized(struct sk_buff *skb,
				do {
					nv_unmap_txskb(np, start_tx_ctx);
					if (unlikely(tmp_tx_ctx++ == np->last_tx_ctx))
						tmp_tx_ctx = np->first_tx_ctx;
						tmp_tx_ctx = np->tx_skb;
				} while (tmp_tx_ctx != np->put_tx_ctx);
				dev_kfree_skb_any(skb);
				np->put_tx_ctx = start_tx_ctx;
@@ -2455,7 +2456,7 @@ static netdev_tx_t nv_start_xmit_optimized(struct sk_buff *skb,
			if (unlikely(put_tx++ == np->last_tx.ex))
				put_tx = np->tx_ring.ex;
			if (unlikely(np->put_tx_ctx++ == np->last_tx_ctx))
				np->put_tx_ctx = np->first_tx_ctx;
				np->put_tx_ctx = np->tx_skb;
		} while (frag_size);
	}

@@ -2464,7 +2465,7 @@ static netdev_tx_t nv_start_xmit_optimized(struct sk_buff *skb,
	else
		prev_tx = put_tx - 1;

	if (unlikely(np->put_tx_ctx == np->first_tx_ctx))
	if (unlikely(np->put_tx_ctx == np->tx_skb))
		prev_tx_ctx = np->last_tx_ctx;
	else
		prev_tx_ctx = np->put_tx_ctx - 1;
@@ -2600,7 +2601,7 @@ static int nv_tx_done(struct net_device *dev, int limit)
		if (unlikely(np->get_tx.orig++ == np->last_tx.orig))
			np->get_tx.orig = np->tx_ring.orig;
		if (unlikely(np->get_tx_ctx++ == np->last_tx_ctx))
			np->get_tx_ctx = np->first_tx_ctx;
			np->get_tx_ctx = np->tx_skb;
	}

	netdev_completed_queue(np->dev, tx_work, bytes_compl);
@@ -2654,7 +2655,7 @@ static int nv_tx_done_optimized(struct net_device *dev, int limit)
		if (unlikely(np->get_tx.ex++ == np->last_tx.ex))
			np->get_tx.ex = np->tx_ring.ex;
		if (unlikely(np->get_tx_ctx++ == np->last_tx_ctx))
			np->get_tx_ctx = np->first_tx_ctx;
			np->get_tx_ctx = np->tx_skb;
	}

	netdev_completed_queue(np->dev, tx_work, bytes_cleaned);