Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit acb600de authored by Eric Dumazet's avatar Eric Dumazet Committed by David S. Miller
Browse files

net: remove skb recycling



Over time, skb recycling infrastructure got litle interest and
many bugs. Generic rx path skb allocation is now using page
fragments for efficient GRO / TCP coalescing, and recyling
a tx skb for rx path is not worth the pain.

Last identified bug is that fat skbs can be recycled
and it can endup using high order pages after few iterations.

With help from Maxime Bizon, who pointed out that commit
87151b86 (net: allow pskb_expand_head() to get maximum tailroom)
introduced this regression for recycled skbs.

Instead of fixing this bug, lets remove skb recycling.

Drivers wanting really hot skbs should use build_skb() anyway,
to allocate/populate sk_buff right before netif_receive_skb()

Signed-off-by: default avatarEric Dumazet <edumazet@google.com>
Cc: Maxime Bizon <mbizon@freebox.fr>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 809d5fc9
Loading
Loading
Loading
Loading
+2 −17
Original line number Diff line number Diff line
@@ -375,7 +375,6 @@ struct xgmac_priv {
	unsigned int tx_tail;

	void __iomem *base;
	struct sk_buff_head rx_recycle;
	unsigned int dma_buf_sz;
	dma_addr_t dma_rx_phy;
	dma_addr_t dma_tx_phy;
@@ -672,8 +671,6 @@ static void xgmac_rx_refill(struct xgmac_priv *priv)
		p = priv->dma_rx + entry;

		if (priv->rx_skbuff[entry] == NULL) {
			skb = __skb_dequeue(&priv->rx_recycle);
			if (skb == NULL)
			skb = netdev_alloc_skb(priv->dev, priv->dma_buf_sz);
			if (unlikely(skb == NULL))
				break;
@@ -887,16 +884,6 @@ static void xgmac_tx_complete(struct xgmac_priv *priv)
				       desc_get_buf_len(p), DMA_TO_DEVICE);
		}

		/*
		 * If there's room in the queue (limit it to size)
		 * we add this skb back into the pool,
		 * if it's the right size.
		 */
		if ((skb_queue_len(&priv->rx_recycle) <
			DMA_RX_RING_SZ) &&
			skb_recycle_check(skb, priv->dma_buf_sz))
			__skb_queue_head(&priv->rx_recycle, skb);
		else
		dev_kfree_skb(skb);
	}

@@ -1016,7 +1003,6 @@ static int xgmac_open(struct net_device *dev)
			dev->dev_addr);
	}

	skb_queue_head_init(&priv->rx_recycle);
	memset(&priv->xstats, 0, sizeof(struct xgmac_extra_stats));

	/* Initialize the XGMAC and descriptors */
@@ -1053,7 +1039,6 @@ static int xgmac_stop(struct net_device *dev)
		napi_disable(&priv->napi);

	writel(0, priv->base + XGMAC_DMA_INTR_ENA);
	skb_queue_purge(&priv->rx_recycle);

	/* Disable the MAC core */
	xgmac_mac_disable(priv->base);
+4 −23
Original line number Diff line number Diff line
@@ -1765,7 +1765,6 @@ static void free_skb_resources(struct gfar_private *priv)
			  sizeof(struct rxbd8) * priv->total_rx_ring_size,
			  priv->tx_queue[0]->tx_bd_base,
			  priv->tx_queue[0]->tx_bd_dma_base);
	skb_queue_purge(&priv->rx_recycle);
}

void gfar_start(struct net_device *dev)
@@ -1943,8 +1942,6 @@ static int gfar_enet_open(struct net_device *dev)

	enable_napi(priv);

	skb_queue_head_init(&priv->rx_recycle);

	/* Initialize a bunch of registers */
	init_registers(dev);

@@ -2533,15 +2530,6 @@ static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)

		bytes_sent += skb->len;

		/* If there's room in the queue (limit it to rx_buffer_size)
		 * we add this skb back into the pool, if it's the right size
		 */
		if (skb_queue_len(&priv->rx_recycle) < rx_queue->rx_ring_size &&
		    skb_recycle_check(skb, priv->rx_buffer_size +
				      RXBUF_ALIGNMENT)) {
			gfar_align_skb(skb);
			skb_queue_head(&priv->rx_recycle, skb);
		} else
		dev_kfree_skb_any(skb);

		tx_queue->tx_skbuff[skb_dirtytx] = NULL;
@@ -2608,7 +2596,7 @@ static void gfar_new_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp,
static struct sk_buff *gfar_alloc_skb(struct net_device *dev)
{
	struct gfar_private *priv = netdev_priv(dev);
	struct sk_buff *skb = NULL;
	struct sk_buff *skb;

	skb = netdev_alloc_skb(dev, priv->rx_buffer_size + RXBUF_ALIGNMENT);
	if (!skb)
@@ -2621,14 +2609,7 @@ static struct sk_buff *gfar_alloc_skb(struct net_device *dev)

struct sk_buff *gfar_new_skb(struct net_device *dev)
{
	struct gfar_private *priv = netdev_priv(dev);
	struct sk_buff *skb = NULL;

	skb = skb_dequeue(&priv->rx_recycle);
	if (!skb)
		skb = gfar_alloc_skb(dev);

	return skb;
	return gfar_alloc_skb(dev);
}

static inline void count_errors(unsigned short status, struct net_device *dev)
@@ -2787,7 +2768,7 @@ int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit)
			if (unlikely(!newskb))
				newskb = skb;
			else if (skb)
				skb_queue_head(&priv->rx_recycle, skb);
				dev_kfree_skb(skb);
		} else {
			/* Increment the number of packets */
			rx_queue->stats.rx_packets++;
+0 −2
Original line number Diff line number Diff line
@@ -1080,8 +1080,6 @@ struct gfar_private {

	u32 cur_filer_idx;

	struct sk_buff_head rx_recycle;

	/* RX queue filer rule set*/
	struct ethtool_rx_list rx_list;
	struct mutex rx_queue_access;
+6 −23
Original line number Diff line number Diff line
@@ -209,14 +209,12 @@ static struct list_head *dequeue(struct list_head *lh)
static struct sk_buff *get_new_skb(struct ucc_geth_private *ugeth,
		u8 __iomem *bd)
{
	struct sk_buff *skb = NULL;
	struct sk_buff *skb;

	skb = __skb_dequeue(&ugeth->rx_recycle);
	if (!skb)
	skb = netdev_alloc_skb(ugeth->ndev,
			       ugeth->ug_info->uf_info.max_rx_buf_length +
			       UCC_GETH_RX_DATA_BUF_ALIGNMENT);
	if (skb == NULL)
	if (!skb)
		return NULL;

	/* We need the data buffer to be aligned properly.  We will reserve
@@ -2020,8 +2018,6 @@ static void ucc_geth_memclean(struct ucc_geth_private *ugeth)
		iounmap(ugeth->ug_regs);
		ugeth->ug_regs = NULL;
	}

	skb_queue_purge(&ugeth->rx_recycle);
}

static void ucc_geth_set_multi(struct net_device *dev)
@@ -2230,8 +2226,6 @@ static int ucc_struct_init(struct ucc_geth_private *ugeth)
		return -ENOMEM;
	}

	skb_queue_head_init(&ugeth->rx_recycle);

	return 0;
}

@@ -3274,12 +3268,7 @@ static int ucc_geth_rx(struct ucc_geth_private *ugeth, u8 rxQ, int rx_work_limit
			if (netif_msg_rx_err(ugeth))
				ugeth_err("%s, %d: ERROR!!! skb - 0x%08x",
					   __func__, __LINE__, (u32) skb);
			if (skb) {
				skb->data = skb->head + NET_SKB_PAD;
				skb->len = 0;
				skb_reset_tail_pointer(skb);
				__skb_queue_head(&ugeth->rx_recycle, skb);
			}
			dev_free_skb(skb);

			ugeth->rx_skbuff[rxQ][ugeth->skb_currx[rxQ]] = NULL;
			dev->stats.rx_dropped++;
@@ -3349,12 +3338,6 @@ static int ucc_geth_tx(struct net_device *dev, u8 txQ)

		dev->stats.tx_packets++;

		if (skb_queue_len(&ugeth->rx_recycle) < RX_BD_RING_LEN &&
			     skb_recycle_check(skb,
				    ugeth->ug_info->uf_info.max_rx_buf_length +
				    UCC_GETH_RX_DATA_BUF_ALIGNMENT))
			__skb_queue_head(&ugeth->rx_recycle, skb);
		else
		dev_kfree_skb(skb);

		ugeth->tx_skbuff[txQ][ugeth->skb_dirtytx[txQ]] = NULL;
+0 −2
Original line number Diff line number Diff line
@@ -1214,8 +1214,6 @@ struct ucc_geth_private {
	/* index of the first skb which hasn't been transmitted yet. */
	u16 skb_dirtytx[NUM_TX_QUEUES];

	struct sk_buff_head rx_recycle;

	struct ugeth_mii_info *mii_info;
	struct phy_device *phydev;
	phy_interface_t phy_interface;
Loading