Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit e3e9652a authored by Sean Wang's avatar Sean Wang Committed by David S. Miller
Browse files

net: ethernet: mediatek: enhance the locking using the lightweight ones



Since these critical sections protected by page_lock are all entered
from the user context or bottom half context, they can be replaced
with the spin_lock() or spin_lock_bh instead of spin_lock_irqsave().

Signed-off-by: default avatarSean Wang <sean.wang@mediatek.com>
Acked-by: default avatarJohn Crispin <john@phrozen.org>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 1738cd3e
Loading
Loading
Loading
Loading
+8 −11
Original line number Diff line number Diff line
@@ -353,18 +353,17 @@ static int mtk_set_mac_address(struct net_device *dev, void *p)
	int ret = eth_mac_addr(dev, p);
	struct mtk_mac *mac = netdev_priv(dev);
	const char *macaddr = dev->dev_addr;
	unsigned long flags;

	if (ret)
		return ret;

	spin_lock_irqsave(&mac->hw->page_lock, flags);
	spin_lock_bh(&mac->hw->page_lock);
	mtk_w32(mac->hw, (macaddr[0] << 8) | macaddr[1],
		MTK_GDMA_MAC_ADRH(mac->id));
	mtk_w32(mac->hw, (macaddr[2] << 24) | (macaddr[3] << 16) |
		(macaddr[4] << 8) | macaddr[5],
		MTK_GDMA_MAC_ADRL(mac->id));
	spin_unlock_irqrestore(&mac->hw->page_lock, flags);
	spin_unlock_bh(&mac->hw->page_lock);

	return 0;
}
@@ -748,7 +747,6 @@ static int mtk_start_xmit(struct sk_buff *skb, struct net_device *dev)
	struct mtk_eth *eth = mac->hw;
	struct mtk_tx_ring *ring = &eth->tx_ring;
	struct net_device_stats *stats = &dev->stats;
	unsigned long flags;
	bool gso = false;
	int tx_num;

@@ -756,14 +754,14 @@ static int mtk_start_xmit(struct sk_buff *skb, struct net_device *dev)
	 * however we have 2 queues running on the same ring so we need to lock
	 * the ring access
	 */
	spin_lock_irqsave(&eth->page_lock, flags);
	spin_lock(&eth->page_lock);

	tx_num = mtk_cal_txd_req(skb);
	if (unlikely(atomic_read(&ring->free_count) <= tx_num)) {
		mtk_stop_queue(eth);
		netif_err(eth, tx_queued, dev,
			  "Tx Ring full when queue awake!\n");
		spin_unlock_irqrestore(&eth->page_lock, flags);
		spin_unlock(&eth->page_lock);
		return NETDEV_TX_BUSY;
	}

@@ -788,12 +786,12 @@ static int mtk_start_xmit(struct sk_buff *skb, struct net_device *dev)
	if (unlikely(atomic_read(&ring->free_count) <= ring->thresh))
		mtk_stop_queue(eth);

	spin_unlock_irqrestore(&eth->page_lock, flags);
	spin_unlock(&eth->page_lock);

	return NETDEV_TX_OK;

drop:
	spin_unlock_irqrestore(&eth->page_lock, flags);
	spin_unlock(&eth->page_lock);
	stats->tx_dropped++;
	dev_kfree_skb(skb);
	return NETDEV_TX_OK;
@@ -1347,16 +1345,15 @@ static int mtk_open(struct net_device *dev)

static void mtk_stop_dma(struct mtk_eth *eth, u32 glo_cfg)
{
	unsigned long flags;
	u32 val;
	int i;

	/* stop the dma engine */
	spin_lock_irqsave(&eth->page_lock, flags);
	spin_lock_bh(&eth->page_lock);
	val = mtk_r32(eth, glo_cfg);
	mtk_w32(eth, val & ~(MTK_TX_WB_DDONE | MTK_RX_DMA_EN | MTK_TX_DMA_EN),
		glo_cfg);
	spin_unlock_irqrestore(&eth->page_lock, flags);
	spin_unlock_bh(&eth->page_lock);

	/* wait for dma stop */
	for (i = 0; i < 10; i++) {