Commit 55a4e778 authored by sean.wang@mediatek.com's avatar sean.wang@mediatek.com Committed by David S. Miller

net: ethernet: mediatek: fix runtime warning raised by inconsistent struct...

net: ethernet: mediatek: fix runtime warning raised by inconsistent struct device pointers passed to DMA API

Runtime warning occurs if DMA-API debug feature is enabled that would be
raised by pointers passed to DMA API as arguments to inconsistent struct
device objects, so that the patch makes them usage aligned between DMA
operations such as dma_map_*() and dma_unmap_*() to eliminate the warning.
Signed-off-by: default avatarSean Wang <sean.wang@mediatek.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent b2025c7c
...@@ -558,15 +558,15 @@ static inline struct mtk_tx_buf *mtk_desc_to_tx_buf(struct mtk_tx_ring *ring, ...@@ -558,15 +558,15 @@ static inline struct mtk_tx_buf *mtk_desc_to_tx_buf(struct mtk_tx_ring *ring,
return &ring->buf[idx]; return &ring->buf[idx];
} }
static void mtk_tx_unmap(struct device *dev, struct mtk_tx_buf *tx_buf) static void mtk_tx_unmap(struct mtk_eth *eth, struct mtk_tx_buf *tx_buf)
{ {
if (tx_buf->flags & MTK_TX_FLAGS_SINGLE0) { if (tx_buf->flags & MTK_TX_FLAGS_SINGLE0) {
dma_unmap_single(dev, dma_unmap_single(eth->dev,
dma_unmap_addr(tx_buf, dma_addr0), dma_unmap_addr(tx_buf, dma_addr0),
dma_unmap_len(tx_buf, dma_len0), dma_unmap_len(tx_buf, dma_len0),
DMA_TO_DEVICE); DMA_TO_DEVICE);
} else if (tx_buf->flags & MTK_TX_FLAGS_PAGE0) { } else if (tx_buf->flags & MTK_TX_FLAGS_PAGE0) {
dma_unmap_page(dev, dma_unmap_page(eth->dev,
dma_unmap_addr(tx_buf, dma_addr0), dma_unmap_addr(tx_buf, dma_addr0),
dma_unmap_len(tx_buf, dma_len0), dma_unmap_len(tx_buf, dma_len0),
DMA_TO_DEVICE); DMA_TO_DEVICE);
...@@ -611,9 +611,9 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev, ...@@ -611,9 +611,9 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
if (skb_vlan_tag_present(skb)) if (skb_vlan_tag_present(skb))
txd4 |= TX_DMA_INS_VLAN | skb_vlan_tag_get(skb); txd4 |= TX_DMA_INS_VLAN | skb_vlan_tag_get(skb);
mapped_addr = dma_map_single(&dev->dev, skb->data, mapped_addr = dma_map_single(eth->dev, skb->data,
skb_headlen(skb), DMA_TO_DEVICE); skb_headlen(skb), DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(&dev->dev, mapped_addr))) if (unlikely(dma_mapping_error(eth->dev, mapped_addr)))
return -ENOMEM; return -ENOMEM;
WRITE_ONCE(itxd->txd1, mapped_addr); WRITE_ONCE(itxd->txd1, mapped_addr);
...@@ -639,10 +639,10 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev, ...@@ -639,10 +639,10 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
n_desc++; n_desc++;
frag_map_size = min(frag_size, MTK_TX_DMA_BUF_LEN); frag_map_size = min(frag_size, MTK_TX_DMA_BUF_LEN);
mapped_addr = skb_frag_dma_map(&dev->dev, frag, offset, mapped_addr = skb_frag_dma_map(eth->dev, frag, offset,
frag_map_size, frag_map_size,
DMA_TO_DEVICE); DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(&dev->dev, mapped_addr))) if (unlikely(dma_mapping_error(eth->dev, mapped_addr)))
goto err_dma; goto err_dma;
if (i == nr_frags - 1 && if (i == nr_frags - 1 &&
...@@ -695,7 +695,7 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev, ...@@ -695,7 +695,7 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
tx_buf = mtk_desc_to_tx_buf(ring, itxd); tx_buf = mtk_desc_to_tx_buf(ring, itxd);
/* unmap dma */ /* unmap dma */
mtk_tx_unmap(&dev->dev, tx_buf); mtk_tx_unmap(eth, tx_buf);
itxd->txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU; itxd->txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU;
itxd = mtk_qdma_phys_to_virt(ring, itxd->txd2); itxd = mtk_qdma_phys_to_virt(ring, itxd->txd2);
...@@ -852,11 +852,11 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget, ...@@ -852,11 +852,11 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget,
netdev->stats.rx_dropped++; netdev->stats.rx_dropped++;
goto release_desc; goto release_desc;
} }
dma_addr = dma_map_single(&eth->netdev[mac]->dev, dma_addr = dma_map_single(eth->dev,
new_data + NET_SKB_PAD, new_data + NET_SKB_PAD,
ring->buf_size, ring->buf_size,
DMA_FROM_DEVICE); DMA_FROM_DEVICE);
if (unlikely(dma_mapping_error(&netdev->dev, dma_addr))) { if (unlikely(dma_mapping_error(eth->dev, dma_addr))) {
skb_free_frag(new_data); skb_free_frag(new_data);
netdev->stats.rx_dropped++; netdev->stats.rx_dropped++;
goto release_desc; goto release_desc;
...@@ -871,7 +871,7 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget, ...@@ -871,7 +871,7 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget,
} }
skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN); skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
dma_unmap_single(&netdev->dev, trxd.rxd1, dma_unmap_single(eth->dev, trxd.rxd1,
ring->buf_size, DMA_FROM_DEVICE); ring->buf_size, DMA_FROM_DEVICE);
pktlen = RX_DMA_GET_PLEN0(trxd.rxd2); pktlen = RX_DMA_GET_PLEN0(trxd.rxd2);
skb->dev = netdev; skb->dev = netdev;
...@@ -953,7 +953,7 @@ static int mtk_poll_tx(struct mtk_eth *eth, int budget) ...@@ -953,7 +953,7 @@ static int mtk_poll_tx(struct mtk_eth *eth, int budget)
done[mac]++; done[mac]++;
budget--; budget--;
} }
mtk_tx_unmap(eth->dev, tx_buf); mtk_tx_unmap(eth, tx_buf);
ring->last_free = desc; ring->last_free = desc;
atomic_inc(&ring->free_count); atomic_inc(&ring->free_count);
...@@ -1108,7 +1108,7 @@ static void mtk_tx_clean(struct mtk_eth *eth) ...@@ -1108,7 +1108,7 @@ static void mtk_tx_clean(struct mtk_eth *eth)
if (ring->buf) { if (ring->buf) {
for (i = 0; i < MTK_DMA_SIZE; i++) for (i = 0; i < MTK_DMA_SIZE; i++)
mtk_tx_unmap(eth->dev, &ring->buf[i]); mtk_tx_unmap(eth, &ring->buf[i]);
kfree(ring->buf); kfree(ring->buf);
ring->buf = NULL; ring->buf = NULL;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment