Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 222e4d0b authored by françois romieu's avatar françois romieu Committed by David S. Miller
Browse files

pch_gbe: replace private tx ring lock with common netif_tx_lock



pch_gbe_tx_ring.tx_lock is only used in the hard_xmit handler and
in the transmit completion reaper called from NAPI context.

Compile-tested only. Potential victims Cced.

Someone more knowledgeable may check if pch_gbe_tx_queue could
have some use for a mmiowb.

Signed-off-by: default avatarFrancois Romieu <romieu@fr.zoreil.com>
Cc: Darren Hart <dvhart@infradead.org>
Cc: Andy Cress <andy.cress@us.kontron.com>
Cc: bryan@fossetcon.org
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent badf3ada
Loading
Loading
Loading
Loading
+0 −2
Original line number Diff line number Diff line
@@ -481,7 +481,6 @@ struct pch_gbe_buffer {

/**
 * struct pch_gbe_tx_ring - tx ring information
 * @tx_lock:	spinlock structs
 * @desc:	pointer to the descriptor ring memory
 * @dma:	physical address of the descriptor ring
 * @size:	length of descriptor ring in bytes
@@ -491,7 +490,6 @@ struct pch_gbe_buffer {
 * @buffer_info:	array of buffer information structs
 */
struct pch_gbe_tx_ring {
	spinlock_t tx_lock;
	struct pch_gbe_tx_desc *desc;
	dma_addr_t dma;
	unsigned int size;
+2 −8
Original line number Diff line number Diff line
@@ -1640,7 +1640,7 @@ pch_gbe_clean_tx(struct pch_gbe_adapter *adapter,
		   cleaned_count);
	if (cleaned_count > 0)  { /*skip this if nothing cleaned*/
		/* Recover from running out of Tx resources in xmit_frame */
		spin_lock(&tx_ring->tx_lock);
		netif_tx_lock(adapter->netdev);
		if (unlikely(cleaned && (netif_queue_stopped(adapter->netdev))))
		{
			netif_wake_queue(adapter->netdev);
@@ -1652,7 +1652,7 @@ pch_gbe_clean_tx(struct pch_gbe_adapter *adapter,

		netdev_dbg(adapter->netdev, "next_to_clean : %d\n",
			   tx_ring->next_to_clean);
		spin_unlock(&tx_ring->tx_lock);
		netif_tx_unlock(adapter->netdev);
	}
	return cleaned;
}
@@ -1805,7 +1805,6 @@ int pch_gbe_setup_tx_resources(struct pch_gbe_adapter *adapter,

	tx_ring->next_to_use = 0;
	tx_ring->next_to_clean = 0;
	spin_lock_init(&tx_ring->tx_lock);

	for (desNo = 0; desNo < tx_ring->count; desNo++) {
		tx_desc = PCH_GBE_TX_DESC(*tx_ring, desNo);
@@ -2135,13 +2134,9 @@ static int pch_gbe_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
{
	struct pch_gbe_adapter *adapter = netdev_priv(netdev);
	struct pch_gbe_tx_ring *tx_ring = adapter->tx_ring;
	unsigned long flags;

	spin_lock_irqsave(&tx_ring->tx_lock, flags);

	if (unlikely(!PCH_GBE_DESC_UNUSED(tx_ring))) {
		netif_stop_queue(netdev);
		spin_unlock_irqrestore(&tx_ring->tx_lock, flags);
		netdev_dbg(netdev,
			   "Return : BUSY  next_to use : 0x%08x  next_to clean : 0x%08x\n",
			   tx_ring->next_to_use, tx_ring->next_to_clean);
@@ -2150,7 +2145,6 @@ static int pch_gbe_xmit_frame(struct sk_buff *skb, struct net_device *netdev)

	/* CRC,ITAG no support */
	pch_gbe_tx_queue(adapter, tx_ring, skb);
	spin_unlock_irqrestore(&tx_ring->tx_lock, flags);
	return NETDEV_TX_OK;
}