Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit ad688cdb authored by Pavel Machek's avatar Pavel Machek Committed by David S. Miller
Browse files

stmmac: fix memory barriers



Fix up memory barriers in stmmac driver. They are meant to protect
against DMA engine, so smp_ variants are certainly wrong, and dma_
variants are preferable.

Signed-off-by: default avatarPavel Machek <pavel@denx.de>
Tested-by: default avatarNiklas Cassel <niklas.cassel@axis.com>
Acked-by: default avatarGiuseppe Cavallaro <peppe.cavallaro@st.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 162809df
Loading
Loading
Loading
Loading
+2 −2
Original line number Diff line number Diff line
@@ -334,7 +334,7 @@ static void dwmac4_rd_prepare_tx_desc(struct dma_desc *p, int is_fs, int len,
		 * descriptors for the same frame has to be set before, to
		 * avoid race condition.
		 */
		wmb();
		dma_wmb();

	p->des3 = cpu_to_le32(tdes3);
}
@@ -377,7 +377,7 @@ static void dwmac4_rd_prepare_tso_tx_desc(struct dma_desc *p, int is_fs,
		 * descriptors for the same frame has to be set before, to
		 * avoid race condition.
		 */
		wmb();
		dma_wmb();

	p->des3 = cpu_to_le32(tdes3);
}
+1 −1
Original line number Diff line number Diff line
@@ -350,7 +350,7 @@ static void enh_desc_prepare_tx_desc(struct dma_desc *p, int is_fs, int len,
		 * descriptors for the same frame has to be set before, to
		 * avoid race condition.
		 */
		wmb();
		dma_wmb();

	p->des0 = cpu_to_le32(tdes0);
}
+4 −4
Original line number Diff line number Diff line
@@ -2125,7 +2125,7 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
	 * descriptor and then barrier is needed to make sure that
	 * all is coherent before granting the DMA engine.
	 */
	smp_wmb();
	dma_wmb();

	if (netif_msg_pktdata(priv)) {
		pr_info("%s: curr=%d dirty=%d f=%d, e=%d, f_p=%p, nfrags %d\n",
@@ -2338,7 +2338,7 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
		 * descriptor and then barrier is needed to make sure that
		 * all is coherent before granting the DMA engine.
		 */
		smp_wmb();
		dma_wmb();
	}

	netdev_sent_queue(dev, skb->len);
@@ -2443,14 +2443,14 @@ static inline void stmmac_rx_refill(struct stmmac_priv *priv)
			netif_dbg(priv, rx_status, priv->dev,
				  "refill entry #%d\n", entry);
		}
		wmb();
		dma_wmb();

		if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00))
			priv->hw->desc->init_rx_desc(p, priv->use_riwt, 0, 0);
		else
			priv->hw->desc->set_rx_owner(p);

		wmb();
		dma_wmb();

		entry = STMMAC_GET_ENTRY(entry, DMA_RX_SIZE);
	}