Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 4fc9b87b authored by LEROY Christophe's avatar LEROY Christophe Committed by David S. Miller
Browse files

net: fs_enet: Implement NETIF_F_SG feature



Freescale ethernet controllers have the capability to re-assemble fragmented
data into a single ethernet frame. This patch uses this capability and
implements NETIP_F_SG feature into the fs_enet ethernet driver.

On a MPC885, I get 53% performance improvement on a ftp transfer of a 15Mb file:
  * Without the patch : 2,8 Mbps
  * With the patch : 4,3 Mbps

Signed-off-by: default avatarChristophe Leroy <christophe.leroy@c-s.fr>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 2bd82484
Loading
Loading
Loading
Loading
+65 −30
Original line number Original line Diff line number Diff line
@@ -278,14 +278,20 @@ static int fs_enet_tx_napi(struct napi_struct *napi, int budget)
			fep->stats.collisions++;
			fep->stats.collisions++;


		/* unmap */
		/* unmap */
		if (fep->mapped_as_page[dirtyidx])
			dma_unmap_page(fep->dev, CBDR_BUFADDR(bdp),
				       CBDR_DATLEN(bdp), DMA_TO_DEVICE);
		else
			dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp),
			dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp),
				skb->len, DMA_TO_DEVICE);
					 CBDR_DATLEN(bdp), DMA_TO_DEVICE);


		/*
		/*
		 * Free the sk buffer associated with this last transmit.
		 * Free the sk buffer associated with this last transmit.
		 */
		 */
		if (skb) {
			dev_kfree_skb(skb);
			dev_kfree_skb(skb);
			fep->tx_skbuff[dirtyidx] = NULL;
			fep->tx_skbuff[dirtyidx] = NULL;
		}


		/*
		/*
		 * Update pointer to next buffer descriptor to be transmitted.
		 * Update pointer to next buffer descriptor to be transmitted.
@@ -299,7 +305,7 @@ static int fs_enet_tx_napi(struct napi_struct *napi, int budget)
		 * Since we have freed up a buffer, the ring is no longer
		 * Since we have freed up a buffer, the ring is no longer
		 * full.
		 * full.
		 */
		 */
		if (!fep->tx_free++)
		if (++fep->tx_free >= MAX_SKB_FRAGS)
			do_wake = 1;
			do_wake = 1;
		has_tx_work = 1;
		has_tx_work = 1;
	}
	}
@@ -509,6 +515,9 @@ static int fs_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
	cbd_t __iomem *bdp;
	cbd_t __iomem *bdp;
	int curidx;
	int curidx;
	u16 sc;
	u16 sc;
	int nr_frags = skb_shinfo(skb)->nr_frags;
	skb_frag_t *frag;
	int len;


#ifdef CONFIG_FS_ENET_MPC5121_FEC
#ifdef CONFIG_FS_ENET_MPC5121_FEC
	if (((unsigned long)skb->data) & 0x3) {
	if (((unsigned long)skb->data) & 0x3) {
@@ -530,7 +539,7 @@ static int fs_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
	 */
	 */
	bdp = fep->cur_tx;
	bdp = fep->cur_tx;


	if (!fep->tx_free || (CBDR_SC(bdp) & BD_ENET_TX_READY)) {
	if (fep->tx_free <= nr_frags || (CBDR_SC(bdp) & BD_ENET_TX_READY)) {
		netif_stop_queue(dev);
		netif_stop_queue(dev);
		spin_unlock(&fep->tx_lock);
		spin_unlock(&fep->tx_lock);


@@ -543,35 +552,42 @@ static int fs_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
	}
	}


	curidx = bdp - fep->tx_bd_base;
	curidx = bdp - fep->tx_bd_base;
	/*
	 * Clear all of the status flags.
	 */
	CBDC_SC(bdp, BD_ENET_TX_STATS);

	/*
	 * Save skb pointer.
	 */
	fep->tx_skbuff[curidx] = skb;

	fep->stats.tx_bytes += skb->len;


	len = skb->len;
	fep->stats.tx_bytes += len;
	if (nr_frags)
		len -= skb->data_len;
	fep->tx_free -= nr_frags + 1;
	/*
	/*
	 * Push the data cache so the CPM does not get stale memory data.
	 * Push the data cache so the CPM does not get stale memory data.
	 */
	 */
	CBDW_BUFADDR(bdp, dma_map_single(fep->dev,
	CBDW_BUFADDR(bdp, dma_map_single(fep->dev,
				skb->data, skb->len, DMA_TO_DEVICE));
				skb->data, len, DMA_TO_DEVICE));
	CBDW_DATLEN(bdp, skb->len);
	CBDW_DATLEN(bdp, len);

	fep->mapped_as_page[curidx] = 0;
	frag = skb_shinfo(skb)->frags;
	while (nr_frags) {
		CBDC_SC(bdp,
			BD_ENET_TX_STATS | BD_ENET_TX_LAST | BD_ENET_TX_TC);
		CBDS_SC(bdp, BD_ENET_TX_READY);


	/*
	 * If this was the last BD in the ring, start at the beginning again.
	 */
		if ((CBDR_SC(bdp) & BD_ENET_TX_WRAP) == 0)
		if ((CBDR_SC(bdp) & BD_ENET_TX_WRAP) == 0)
		fep->cur_tx++;
			bdp++, curidx++;
		else
		else
		fep->cur_tx = fep->tx_bd_base;
			bdp = fep->tx_bd_base, curidx = 0;


	if (!--fep->tx_free)
		len = skb_frag_size(frag);
		netif_stop_queue(dev);
		CBDW_BUFADDR(bdp, skb_frag_dma_map(fep->dev, frag, 0, len,
						   DMA_TO_DEVICE));
		CBDW_DATLEN(bdp, len);

		fep->tx_skbuff[curidx] = NULL;
		fep->mapped_as_page[curidx] = 1;

		frag++;
		nr_frags--;
	}


	/* Trigger transmission start */
	/* Trigger transmission start */
	sc = BD_ENET_TX_READY | BD_ENET_TX_INTR |
	sc = BD_ENET_TX_READY | BD_ENET_TX_INTR |
@@ -582,8 +598,22 @@ static int fs_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
	 * yay for hw reuse :) */
	 * yay for hw reuse :) */
	if (skb->len <= 60)
	if (skb->len <= 60)
		sc |= BD_ENET_TX_PAD;
		sc |= BD_ENET_TX_PAD;
	CBDC_SC(bdp, BD_ENET_TX_STATS);
	CBDS_SC(bdp, sc);
	CBDS_SC(bdp, sc);


	/* Save skb pointer. */
	fep->tx_skbuff[curidx] = skb;

	/* If this was the last BD in the ring, start at the beginning again. */
	if ((CBDR_SC(bdp) & BD_ENET_TX_WRAP) == 0)
		bdp++;
	else
		bdp = fep->tx_bd_base;
	fep->cur_tx = bdp;

	if (fep->tx_free < MAX_SKB_FRAGS)
		netif_stop_queue(dev);

	skb_tx_timestamp(skb);
	skb_tx_timestamp(skb);


	(*fep->ops->tx_kickstart)(dev);
	(*fep->ops->tx_kickstart)(dev);
@@ -917,7 +947,7 @@ static int fs_enet_probe(struct platform_device *ofdev)
	}
	}


	fpi->rx_ring = 32;
	fpi->rx_ring = 32;
	fpi->tx_ring = 32;
	fpi->tx_ring = 64;
	fpi->rx_copybreak = 240;
	fpi->rx_copybreak = 240;
	fpi->napi_weight = 17;
	fpi->napi_weight = 17;
	fpi->phy_node = of_parse_phandle(ofdev->dev.of_node, "phy-handle", 0);
	fpi->phy_node = of_parse_phandle(ofdev->dev.of_node, "phy-handle", 0);
@@ -955,7 +985,8 @@ static int fs_enet_probe(struct platform_device *ofdev)


	privsize = sizeof(*fep) +
	privsize = sizeof(*fep) +
	           sizeof(struct sk_buff **) *
	           sizeof(struct sk_buff **) *
	           (fpi->rx_ring + fpi->tx_ring);
		     (fpi->rx_ring + fpi->tx_ring) +
		   sizeof(char) * fpi->tx_ring;


	ndev = alloc_etherdev(privsize);
	ndev = alloc_etherdev(privsize);
	if (!ndev) {
	if (!ndev) {
@@ -978,6 +1009,8 @@ static int fs_enet_probe(struct platform_device *ofdev)


	fep->rx_skbuff = (struct sk_buff **)&fep[1];
	fep->rx_skbuff = (struct sk_buff **)&fep[1];
	fep->tx_skbuff = fep->rx_skbuff + fpi->rx_ring;
	fep->tx_skbuff = fep->rx_skbuff + fpi->rx_ring;
	fep->mapped_as_page = (char *)(fep->rx_skbuff + fpi->rx_ring +
				       fpi->tx_ring);


	spin_lock_init(&fep->lock);
	spin_lock_init(&fep->lock);
	spin_lock_init(&fep->tx_lock);
	spin_lock_init(&fep->tx_lock);
@@ -1007,6 +1040,8 @@ static int fs_enet_probe(struct platform_device *ofdev)


	netif_carrier_off(ndev);
	netif_carrier_off(ndev);


	ndev->features |= NETIF_F_SG;

	ret = register_netdev(ndev);
	ret = register_netdev(ndev);
	if (ret)
	if (ret)
		goto out_free_bd;
		goto out_free_bd;
+1 −0
Original line number Original line Diff line number Diff line
@@ -134,6 +134,7 @@ struct fs_enet_private {
	void __iomem *ring_base;
	void __iomem *ring_base;
	struct sk_buff **rx_skbuff;
	struct sk_buff **rx_skbuff;
	struct sk_buff **tx_skbuff;
	struct sk_buff **tx_skbuff;
	char *mapped_as_page;
	cbd_t __iomem *rx_bd_base;	/* Address of Rx and Tx buffers.    */
	cbd_t __iomem *rx_bd_base;	/* Address of Rx and Tx buffers.    */
	cbd_t __iomem *tx_bd_base;
	cbd_t __iomem *tx_bd_base;
	cbd_t __iomem *dirty_tx;	/* ring entries to be free()ed.     */
	cbd_t __iomem *dirty_tx;	/* ring entries to be free()ed.     */