Commit 3a12df22 authored by Sergei Antonov's avatar Sergei Antonov Committed by Jakub Kicinski

net: moxa: pass pdev instead of ndev to DMA functions

dma_map_single() calls fail in moxart_mac_setup_desc_ring() and
moxart_mac_start_xmit() which leads to an incessant output of this:

[   16.043925] moxart-ethernet 92000000.mac eth0: DMA mapping error
[   16.050957] moxart-ethernet 92000000.mac eth0: DMA mapping error
[   16.058229] moxart-ethernet 92000000.mac eth0: DMA mapping error

Passing pdev to DMA is a common approach among net drivers.

Fixes: 6c821bd9 ("net: Add MOXA ART SoCs ethernet driver")
Signed-off-by: default avatarSergei Antonov <saproj@gmail.com>
Suggested-by: default avatarAndrew Lunn <andrew@lunn.ch>
Reviewed-by: default avatarAndrew Lunn <andrew@lunn.ch>
Link: https://lore.kernel.org/r/20220812171339.2271788-1-saproj@gmail.comSigned-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parent 5061e34c
...@@ -77,7 +77,7 @@ static void moxart_mac_free_memory(struct net_device *ndev) ...@@ -77,7 +77,7 @@ static void moxart_mac_free_memory(struct net_device *ndev)
int i; int i;
for (i = 0; i < RX_DESC_NUM; i++) for (i = 0; i < RX_DESC_NUM; i++)
dma_unmap_single(&ndev->dev, priv->rx_mapping[i], dma_unmap_single(&priv->pdev->dev, priv->rx_mapping[i],
priv->rx_buf_size, DMA_FROM_DEVICE); priv->rx_buf_size, DMA_FROM_DEVICE);
if (priv->tx_desc_base) if (priv->tx_desc_base)
...@@ -147,11 +147,11 @@ static void moxart_mac_setup_desc_ring(struct net_device *ndev) ...@@ -147,11 +147,11 @@ static void moxart_mac_setup_desc_ring(struct net_device *ndev)
desc + RX_REG_OFFSET_DESC1); desc + RX_REG_OFFSET_DESC1);
priv->rx_buf[i] = priv->rx_buf_base + priv->rx_buf_size * i; priv->rx_buf[i] = priv->rx_buf_base + priv->rx_buf_size * i;
priv->rx_mapping[i] = dma_map_single(&ndev->dev, priv->rx_mapping[i] = dma_map_single(&priv->pdev->dev,
priv->rx_buf[i], priv->rx_buf[i],
priv->rx_buf_size, priv->rx_buf_size,
DMA_FROM_DEVICE); DMA_FROM_DEVICE);
if (dma_mapping_error(&ndev->dev, priv->rx_mapping[i])) if (dma_mapping_error(&priv->pdev->dev, priv->rx_mapping[i]))
netdev_err(ndev, "DMA mapping error\n"); netdev_err(ndev, "DMA mapping error\n");
moxart_desc_write(priv->rx_mapping[i], moxart_desc_write(priv->rx_mapping[i],
...@@ -240,7 +240,7 @@ static int moxart_rx_poll(struct napi_struct *napi, int budget) ...@@ -240,7 +240,7 @@ static int moxart_rx_poll(struct napi_struct *napi, int budget)
if (len > RX_BUF_SIZE) if (len > RX_BUF_SIZE)
len = RX_BUF_SIZE; len = RX_BUF_SIZE;
dma_sync_single_for_cpu(&ndev->dev, dma_sync_single_for_cpu(&priv->pdev->dev,
priv->rx_mapping[rx_head], priv->rx_mapping[rx_head],
priv->rx_buf_size, DMA_FROM_DEVICE); priv->rx_buf_size, DMA_FROM_DEVICE);
skb = netdev_alloc_skb_ip_align(ndev, len); skb = netdev_alloc_skb_ip_align(ndev, len);
...@@ -294,7 +294,7 @@ static void moxart_tx_finished(struct net_device *ndev) ...@@ -294,7 +294,7 @@ static void moxart_tx_finished(struct net_device *ndev)
unsigned int tx_tail = priv->tx_tail; unsigned int tx_tail = priv->tx_tail;
while (tx_tail != tx_head) { while (tx_tail != tx_head) {
dma_unmap_single(&ndev->dev, priv->tx_mapping[tx_tail], dma_unmap_single(&priv->pdev->dev, priv->tx_mapping[tx_tail],
priv->tx_len[tx_tail], DMA_TO_DEVICE); priv->tx_len[tx_tail], DMA_TO_DEVICE);
ndev->stats.tx_packets++; ndev->stats.tx_packets++;
...@@ -358,9 +358,9 @@ static netdev_tx_t moxart_mac_start_xmit(struct sk_buff *skb, ...@@ -358,9 +358,9 @@ static netdev_tx_t moxart_mac_start_xmit(struct sk_buff *skb,
len = skb->len > TX_BUF_SIZE ? TX_BUF_SIZE : skb->len; len = skb->len > TX_BUF_SIZE ? TX_BUF_SIZE : skb->len;
priv->tx_mapping[tx_head] = dma_map_single(&ndev->dev, skb->data, priv->tx_mapping[tx_head] = dma_map_single(&priv->pdev->dev, skb->data,
len, DMA_TO_DEVICE); len, DMA_TO_DEVICE);
if (dma_mapping_error(&ndev->dev, priv->tx_mapping[tx_head])) { if (dma_mapping_error(&priv->pdev->dev, priv->tx_mapping[tx_head])) {
netdev_err(ndev, "DMA mapping error\n"); netdev_err(ndev, "DMA mapping error\n");
goto out_unlock; goto out_unlock;
} }
...@@ -379,7 +379,7 @@ static netdev_tx_t moxart_mac_start_xmit(struct sk_buff *skb, ...@@ -379,7 +379,7 @@ static netdev_tx_t moxart_mac_start_xmit(struct sk_buff *skb,
len = ETH_ZLEN; len = ETH_ZLEN;
} }
dma_sync_single_for_device(&ndev->dev, priv->tx_mapping[tx_head], dma_sync_single_for_device(&priv->pdev->dev, priv->tx_mapping[tx_head],
priv->tx_buf_size, DMA_TO_DEVICE); priv->tx_buf_size, DMA_TO_DEVICE);
txdes1 = TX_DESC1_LTS | TX_DESC1_FTS | (len & TX_DESC1_BUF_SIZE_MASK); txdes1 = TX_DESC1_LTS | TX_DESC1_FTS | (len & TX_DESC1_BUF_SIZE_MASK);
...@@ -493,7 +493,7 @@ static int moxart_mac_probe(struct platform_device *pdev) ...@@ -493,7 +493,7 @@ static int moxart_mac_probe(struct platform_device *pdev)
priv->tx_buf_size = TX_BUF_SIZE; priv->tx_buf_size = TX_BUF_SIZE;
priv->rx_buf_size = RX_BUF_SIZE; priv->rx_buf_size = RX_BUF_SIZE;
priv->tx_desc_base = dma_alloc_coherent(&pdev->dev, TX_REG_DESC_SIZE * priv->tx_desc_base = dma_alloc_coherent(p_dev, TX_REG_DESC_SIZE *
TX_DESC_NUM, &priv->tx_base, TX_DESC_NUM, &priv->tx_base,
GFP_DMA | GFP_KERNEL); GFP_DMA | GFP_KERNEL);
if (!priv->tx_desc_base) { if (!priv->tx_desc_base) {
...@@ -501,7 +501,7 @@ static int moxart_mac_probe(struct platform_device *pdev) ...@@ -501,7 +501,7 @@ static int moxart_mac_probe(struct platform_device *pdev)
goto init_fail; goto init_fail;
} }
priv->rx_desc_base = dma_alloc_coherent(&pdev->dev, RX_REG_DESC_SIZE * priv->rx_desc_base = dma_alloc_coherent(p_dev, RX_REG_DESC_SIZE *
RX_DESC_NUM, &priv->rx_base, RX_DESC_NUM, &priv->rx_base,
GFP_DMA | GFP_KERNEL); GFP_DMA | GFP_KERNEL);
if (!priv->rx_desc_base) { if (!priv->rx_desc_base) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment