Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit c4bc44c6 authored by Kevin Hao's avatar Kevin Hao Committed by David S. Miller
Browse files

net: fec: fix the race between xmit and bdp reclaiming path



When we transmit a fragmented skb, we may run into a race like the
following scenario (assume txq->cur_tx is next to txq->dirty_tx):
           cpu 0                                          cpu 1
  fec_enet_txq_submit_skb
    reserve a bdp for the first fragment
    fec_enet_txq_submit_frag_skb
       update the bdp for the other fragment
       update txq->cur_tx
                                                   fec_enet_tx_queue
                                                     bdp = fec_enet_get_nextdesc(txq->dirty_tx, fep, queue_id);
                                                     This bdp is the bdp reserved for the first segment. Given
                                                     that this bdp BD_ENET_TX_READY bit is not set and txq->cur_tx
                                                     is already pointed to a bdp beyond this one. We think this is a
                                                     completed bdp and try to reclaim it.
    update the bdp for the first segment
    update txq->cur_tx

So we shouldn't update the txq->cur_tx until all the update to the
bdps used for fragments are performed. Also add the corresponding
memory barrier to guarantee that the update to the bdps, dirty_tx and
cur_tx performed in the proper order.

Signed-off-by: default avatarKevin Hao <haokexin@gmail.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 2cf1b5ce
Loading
Loading
Loading
Loading
+20 −15
Original line number Original line Diff line number Diff line
@@ -364,7 +364,7 @@ fec_enet_clear_csum(struct sk_buff *skb, struct net_device *ndev)
	return 0;
	return 0;
}
}


static int
static struct bufdesc *
fec_enet_txq_submit_frag_skb(struct fec_enet_priv_tx_q *txq,
fec_enet_txq_submit_frag_skb(struct fec_enet_priv_tx_q *txq,
			     struct sk_buff *skb,
			     struct sk_buff *skb,
			     struct net_device *ndev)
			     struct net_device *ndev)
@@ -439,10 +439,7 @@ fec_enet_txq_submit_frag_skb(struct fec_enet_priv_tx_q *txq,
		bdp->cbd_sc = status;
		bdp->cbd_sc = status;
	}
	}


	txq->cur_tx = bdp;
	return bdp;

	return 0;

dma_mapping_error:
dma_mapping_error:
	bdp = txq->cur_tx;
	bdp = txq->cur_tx;
	for (i = 0; i < frag; i++) {
	for (i = 0; i < frag; i++) {
@@ -450,7 +447,7 @@ fec_enet_txq_submit_frag_skb(struct fec_enet_priv_tx_q *txq,
		dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr,
		dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr,
				bdp->cbd_datlen, DMA_TO_DEVICE);
				bdp->cbd_datlen, DMA_TO_DEVICE);
	}
	}
	return NETDEV_TX_OK;
	return ERR_PTR(-ENOMEM);
}
}


static int fec_enet_txq_submit_skb(struct fec_enet_priv_tx_q *txq,
static int fec_enet_txq_submit_skb(struct fec_enet_priv_tx_q *txq,
@@ -467,7 +464,6 @@ static int fec_enet_txq_submit_skb(struct fec_enet_priv_tx_q *txq,
	unsigned int estatus = 0;
	unsigned int estatus = 0;
	unsigned int index;
	unsigned int index;
	int entries_free;
	int entries_free;
	int ret;


	entries_free = fec_enet_get_free_txdesc_num(fep, txq);
	entries_free = fec_enet_get_free_txdesc_num(fep, txq);
	if (entries_free < MAX_SKB_FRAGS + 1) {
	if (entries_free < MAX_SKB_FRAGS + 1) {
@@ -485,6 +481,7 @@ static int fec_enet_txq_submit_skb(struct fec_enet_priv_tx_q *txq,


	/* Fill in a Tx ring entry */
	/* Fill in a Tx ring entry */
	bdp = txq->cur_tx;
	bdp = txq->cur_tx;
	last_bdp = bdp;
	status = bdp->cbd_sc;
	status = bdp->cbd_sc;
	status &= ~BD_ENET_TX_STATS;
	status &= ~BD_ENET_TX_STATS;


@@ -513,9 +510,9 @@ static int fec_enet_txq_submit_skb(struct fec_enet_priv_tx_q *txq,
	}
	}


	if (nr_frags) {
	if (nr_frags) {
		ret = fec_enet_txq_submit_frag_skb(txq, skb, ndev);
		last_bdp = fec_enet_txq_submit_frag_skb(txq, skb, ndev);
		if (ret)
		if (IS_ERR(last_bdp))
			return ret;
			return NETDEV_TX_OK;
	} else {
	} else {
		status |= (BD_ENET_TX_INTR | BD_ENET_TX_LAST);
		status |= (BD_ENET_TX_INTR | BD_ENET_TX_LAST);
		if (fep->bufdesc_ex) {
		if (fep->bufdesc_ex) {
@@ -544,7 +541,6 @@ static int fec_enet_txq_submit_skb(struct fec_enet_priv_tx_q *txq,
		ebdp->cbd_esc = estatus;
		ebdp->cbd_esc = estatus;
	}
	}


	last_bdp = txq->cur_tx;
	index = fec_enet_get_bd_index(txq->tx_bd_base, last_bdp, fep);
	index = fec_enet_get_bd_index(txq->tx_bd_base, last_bdp, fep);
	/* Save skb pointer */
	/* Save skb pointer */
	txq->tx_skbuff[index] = skb;
	txq->tx_skbuff[index] = skb;
@@ -563,6 +559,10 @@ static int fec_enet_txq_submit_skb(struct fec_enet_priv_tx_q *txq,


	skb_tx_timestamp(skb);
	skb_tx_timestamp(skb);


	/* Make sure the update to bdp and tx_skbuff are performed before
	 * cur_tx.
	 */
	wmb();
	txq->cur_tx = bdp;
	txq->cur_tx = bdp;


	/* Trigger transmission start */
	/* Trigger transmission start */
@@ -1218,10 +1218,11 @@ fec_enet_tx_queue(struct net_device *ndev, u16 queue_id)
	/* get next bdp of dirty_tx */
	/* get next bdp of dirty_tx */
	bdp = fec_enet_get_nextdesc(bdp, fep, queue_id);
	bdp = fec_enet_get_nextdesc(bdp, fep, queue_id);


	while (((status = bdp->cbd_sc) & BD_ENET_TX_READY) == 0) {
	while (bdp != READ_ONCE(txq->cur_tx)) {

		/* Order the load of cur_tx and cbd_sc */
		/* current queue is empty */
		rmb();
		if (bdp == txq->cur_tx)
		status = READ_ONCE(bdp->cbd_sc);
		if (status & BD_ENET_TX_READY)
			break;
			break;


		index = fec_enet_get_bd_index(txq->tx_bd_base, bdp, fep);
		index = fec_enet_get_bd_index(txq->tx_bd_base, bdp, fep);
@@ -1275,6 +1276,10 @@ fec_enet_tx_queue(struct net_device *ndev, u16 queue_id)
		/* Free the sk buffer associated with this last transmit */
		/* Free the sk buffer associated with this last transmit */
		dev_kfree_skb_any(skb);
		dev_kfree_skb_any(skb);


		/* Make sure the update to bdp and tx_skbuff are performed
		 * before dirty_tx
		 */
		wmb();
		txq->dirty_tx = bdp;
		txq->dirty_tx = bdp;


		/* Update pointer to next buffer descriptor to be transmitted */
		/* Update pointer to next buffer descriptor to be transmitted */