Commit ad688cdb authored by Pavel Machek's avatar Pavel Machek Committed by David S. Miller

stmmac: fix memory barriers

Fix up memory barriers in stmmac driver. They are meant to protect
against DMA engine, so smp_ variants are certainly wrong, and dma_
variants are preferable.
Signed-off-by: default avatarPavel Machek <pavel@denx.de>
Tested-by: default avatarNiklas Cassel <niklas.cassel@axis.com>
Acked-by: default avatarGiuseppe Cavallaro <peppe.cavallaro@st.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 162809df
...@@ -334,7 +334,7 @@ static void dwmac4_rd_prepare_tx_desc(struct dma_desc *p, int is_fs, int len, ...@@ -334,7 +334,7 @@ static void dwmac4_rd_prepare_tx_desc(struct dma_desc *p, int is_fs, int len,
* descriptors for the same frame has to be set before, to * descriptors for the same frame has to be set before, to
* avoid race condition. * avoid race condition.
*/ */
wmb(); dma_wmb();
p->des3 = cpu_to_le32(tdes3); p->des3 = cpu_to_le32(tdes3);
} }
...@@ -377,7 +377,7 @@ static void dwmac4_rd_prepare_tso_tx_desc(struct dma_desc *p, int is_fs, ...@@ -377,7 +377,7 @@ static void dwmac4_rd_prepare_tso_tx_desc(struct dma_desc *p, int is_fs,
* descriptors for the same frame has to be set before, to * descriptors for the same frame has to be set before, to
* avoid race condition. * avoid race condition.
*/ */
wmb(); dma_wmb();
p->des3 = cpu_to_le32(tdes3); p->des3 = cpu_to_le32(tdes3);
} }
......
...@@ -350,7 +350,7 @@ static void enh_desc_prepare_tx_desc(struct dma_desc *p, int is_fs, int len, ...@@ -350,7 +350,7 @@ static void enh_desc_prepare_tx_desc(struct dma_desc *p, int is_fs, int len,
* descriptors for the same frame has to be set before, to * descriptors for the same frame has to be set before, to
* avoid race condition. * avoid race condition.
*/ */
wmb(); dma_wmb();
p->des0 = cpu_to_le32(tdes0); p->des0 = cpu_to_le32(tdes0);
} }
......
...@@ -2125,7 +2125,7 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -2125,7 +2125,7 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
* descriptor and then barrier is needed to make sure that * descriptor and then barrier is needed to make sure that
* all is coherent before granting the DMA engine. * all is coherent before granting the DMA engine.
*/ */
smp_wmb(); dma_wmb();
if (netif_msg_pktdata(priv)) { if (netif_msg_pktdata(priv)) {
pr_info("%s: curr=%d dirty=%d f=%d, e=%d, f_p=%p, nfrags %d\n", pr_info("%s: curr=%d dirty=%d f=%d, e=%d, f_p=%p, nfrags %d\n",
...@@ -2338,7 +2338,7 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -2338,7 +2338,7 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
* descriptor and then barrier is needed to make sure that * descriptor and then barrier is needed to make sure that
* all is coherent before granting the DMA engine. * all is coherent before granting the DMA engine.
*/ */
smp_wmb(); dma_wmb();
} }
netdev_sent_queue(dev, skb->len); netdev_sent_queue(dev, skb->len);
...@@ -2443,14 +2443,14 @@ static inline void stmmac_rx_refill(struct stmmac_priv *priv) ...@@ -2443,14 +2443,14 @@ static inline void stmmac_rx_refill(struct stmmac_priv *priv)
netif_dbg(priv, rx_status, priv->dev, netif_dbg(priv, rx_status, priv->dev,
"refill entry #%d\n", entry); "refill entry #%d\n", entry);
} }
wmb(); dma_wmb();
if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00)) if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00))
priv->hw->desc->init_rx_desc(p, priv->use_riwt, 0, 0); priv->hw->desc->init_rx_desc(p, priv->use_riwt, 0, 0);
else else
priv->hw->desc->set_rx_owner(p); priv->hw->desc->set_rx_owner(p);
wmb(); dma_wmb();
entry = STMMAC_GET_ENTRY(entry, DMA_RX_SIZE); entry = STMMAC_GET_ENTRY(entry, DMA_RX_SIZE);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment