Commit 75bacb6d authored by Christophe JAILLET's avatar Christophe JAILLET Committed by David S. Miller

myri10ge: switch from 'pci_' to 'dma_' API

The wrappers in include/linux/pci-dma-compat.h should go away.

The patch has been generated with the coccinelle script below.

It has been hand modified to use 'dma_set_mask_and_coherent()' instead of
'pci_set_dma_mask()/pci_set_consistent_dma_mask()' when applicable.
This is less verbose.

A message split on 2 lines has been merged.

It has been compile tested.

@@
@@
-    PCI_DMA_BIDIRECTIONAL
+    DMA_BIDIRECTIONAL

@@
@@
-    PCI_DMA_TODEVICE
+    DMA_TO_DEVICE

@@
@@
-    PCI_DMA_FROMDEVICE
+    DMA_FROM_DEVICE

@@
@@
-    PCI_DMA_NONE
+    DMA_NONE

@@
expression e1, e2, e3;
@@
-    pci_alloc_consistent(e1, e2, e3)
+    dma_alloc_coherent(&e1->dev, e2, e3, GFP_)

@@
expression e1, e2, e3;
@@
-    pci_zalloc_consistent(e1, e2, e3)
+    dma_alloc_coherent(&e1->dev, e2, e3, GFP_)

@@
expression e1, e2, e3, e4;
@@
-    pci_free_consistent(e1, e2, e3, e4)
+    dma_free_coherent(&e1->dev, e2, e3, e4)

@@
expression e1, e2, e3, e4;
@@
-    pci_map_single(e1, e2, e3, e4)
+    dma_map_single(&e1->dev, e2, e3, e4)

@@
expression e1, e2, e3, e4;
@@
-    pci_unmap_single(e1, e2, e3, e4)
+    dma_unmap_single(&e1->dev, e2, e3, e4)

@@
expression e1, e2, e3, e4, e5;
@@
-    pci_map_page(e1, e2, e3, e4, e5)
+    dma_map_page(&e1->dev, e2, e3, e4, e5)

@@
expression e1, e2, e3, e4;
@@
-    pci_unmap_page(e1, e2, e3, e4)
+    dma_unmap_page(&e1->dev, e2, e3, e4)

@@
expression e1, e2, e3, e4;
@@
-    pci_map_sg(e1, e2, e3, e4)
+    dma_map_sg(&e1->dev, e2, e3, e4)

@@
expression e1, e2, e3, e4;
@@
-    pci_unmap_sg(e1, e2, e3, e4)
+    dma_unmap_sg(&e1->dev, e2, e3, e4)

@@
expression e1, e2, e3, e4;
@@
-    pci_dma_sync_single_for_cpu(e1, e2, e3, e4)
+    dma_sync_single_for_cpu(&e1->dev, e2, e3, e4)

@@
expression e1, e2, e3, e4;
@@
-    pci_dma_sync_single_for_device(e1, e2, e3, e4)
+    dma_sync_single_for_device(&e1->dev, e2, e3, e4)

@@
expression e1, e2, e3, e4;
@@
-    pci_dma_sync_sg_for_cpu(e1, e2, e3, e4)
+    dma_sync_sg_for_cpu(&e1->dev, e2, e3, e4)

@@
expression e1, e2, e3, e4;
@@
-    pci_dma_sync_sg_for_device(e1, e2, e3, e4)
+    dma_sync_sg_for_device(&e1->dev, e2, e3, e4)

@@
expression e1, e2;
@@
-    pci_dma_mapping_error(e1, e2)
+    dma_mapping_error(&e1->dev, e2)

@@
expression e1, e2;
@@
-    pci_set_dma_mask(e1, e2)
+    dma_set_mask(&e1->dev, e2)

@@
expression e1, e2;
@@
-    pci_set_consistent_dma_mask(e1, e2)
+    dma_set_coherent_mask(&e1->dev, e2)
Signed-off-by: default avatarChristophe JAILLET <christophe.jaillet@wanadoo.fr>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent e6a70a02
...@@ -850,9 +850,9 @@ static int myri10ge_dma_test(struct myri10ge_priv *mgp, int test_type) ...@@ -850,9 +850,9 @@ static int myri10ge_dma_test(struct myri10ge_priv *mgp, int test_type)
dmatest_page = alloc_page(GFP_KERNEL); dmatest_page = alloc_page(GFP_KERNEL);
if (!dmatest_page) if (!dmatest_page)
return -ENOMEM; return -ENOMEM;
dmatest_bus = pci_map_page(mgp->pdev, dmatest_page, 0, PAGE_SIZE, dmatest_bus = dma_map_page(&mgp->pdev->dev, dmatest_page, 0,
DMA_BIDIRECTIONAL); PAGE_SIZE, DMA_BIDIRECTIONAL);
if (unlikely(pci_dma_mapping_error(mgp->pdev, dmatest_bus))) { if (unlikely(dma_mapping_error(&mgp->pdev->dev, dmatest_bus))) {
__free_page(dmatest_page); __free_page(dmatest_page);
return -ENOMEM; return -ENOMEM;
} }
...@@ -899,7 +899,8 @@ static int myri10ge_dma_test(struct myri10ge_priv *mgp, int test_type) ...@@ -899,7 +899,8 @@ static int myri10ge_dma_test(struct myri10ge_priv *mgp, int test_type)
(cmd.data0 & 0xffff); (cmd.data0 & 0xffff);
abort: abort:
pci_unmap_page(mgp->pdev, dmatest_bus, PAGE_SIZE, DMA_BIDIRECTIONAL); dma_unmap_page(&mgp->pdev->dev, dmatest_bus, PAGE_SIZE,
DMA_BIDIRECTIONAL);
put_page(dmatest_page); put_page(dmatest_page);
if (status != 0 && test_type != MXGEFW_CMD_UNALIGNED_TEST) if (status != 0 && test_type != MXGEFW_CMD_UNALIGNED_TEST)
...@@ -1205,10 +1206,10 @@ myri10ge_alloc_rx_pages(struct myri10ge_priv *mgp, struct myri10ge_rx_buf *rx, ...@@ -1205,10 +1206,10 @@ myri10ge_alloc_rx_pages(struct myri10ge_priv *mgp, struct myri10ge_rx_buf *rx,
return; return;
} }
bus = pci_map_page(mgp->pdev, page, 0, bus = dma_map_page(&mgp->pdev->dev, page, 0,
MYRI10GE_ALLOC_SIZE, MYRI10GE_ALLOC_SIZE,
PCI_DMA_FROMDEVICE); DMA_FROM_DEVICE);
if (unlikely(pci_dma_mapping_error(mgp->pdev, bus))) { if (unlikely(dma_mapping_error(&mgp->pdev->dev, bus))) {
__free_pages(page, MYRI10GE_ALLOC_ORDER); __free_pages(page, MYRI10GE_ALLOC_ORDER);
if (rx->fill_cnt - rx->cnt < 16) if (rx->fill_cnt - rx->cnt < 16)
rx->watchdog_needed = 1; rx->watchdog_needed = 1;
...@@ -1256,9 +1257,9 @@ myri10ge_unmap_rx_page(struct pci_dev *pdev, ...@@ -1256,9 +1257,9 @@ myri10ge_unmap_rx_page(struct pci_dev *pdev,
/* unmap the recvd page if we're the only or last user of it */ /* unmap the recvd page if we're the only or last user of it */
if (bytes >= MYRI10GE_ALLOC_SIZE / 2 || if (bytes >= MYRI10GE_ALLOC_SIZE / 2 ||
(info->page_offset + 2 * bytes) > MYRI10GE_ALLOC_SIZE) { (info->page_offset + 2 * bytes) > MYRI10GE_ALLOC_SIZE) {
pci_unmap_page(pdev, (dma_unmap_addr(info, bus) dma_unmap_page(&pdev->dev, (dma_unmap_addr(info, bus)
& ~(MYRI10GE_ALLOC_SIZE - 1)), & ~(MYRI10GE_ALLOC_SIZE - 1)),
MYRI10GE_ALLOC_SIZE, PCI_DMA_FROMDEVICE); MYRI10GE_ALLOC_SIZE, DMA_FROM_DEVICE);
} }
} }
...@@ -1398,16 +1399,16 @@ myri10ge_tx_done(struct myri10ge_slice_state *ss, int mcp_index) ...@@ -1398,16 +1399,16 @@ myri10ge_tx_done(struct myri10ge_slice_state *ss, int mcp_index)
ss->stats.tx_packets++; ss->stats.tx_packets++;
dev_consume_skb_irq(skb); dev_consume_skb_irq(skb);
if (len) if (len)
pci_unmap_single(pdev, dma_unmap_single(&pdev->dev,
dma_unmap_addr(&tx->info[idx], dma_unmap_addr(&tx->info[idx],
bus), len, bus), len,
PCI_DMA_TODEVICE); DMA_TO_DEVICE);
} else { } else {
if (len) if (len)
pci_unmap_page(pdev, dma_unmap_page(&pdev->dev,
dma_unmap_addr(&tx->info[idx], dma_unmap_addr(&tx->info[idx],
bus), len, bus), len,
PCI_DMA_TODEVICE); DMA_TO_DEVICE);
} }
} }
...@@ -2110,16 +2111,16 @@ static void myri10ge_free_rings(struct myri10ge_slice_state *ss) ...@@ -2110,16 +2111,16 @@ static void myri10ge_free_rings(struct myri10ge_slice_state *ss)
ss->stats.tx_dropped++; ss->stats.tx_dropped++;
dev_kfree_skb_any(skb); dev_kfree_skb_any(skb);
if (len) if (len)
pci_unmap_single(mgp->pdev, dma_unmap_single(&mgp->pdev->dev,
dma_unmap_addr(&tx->info[idx], dma_unmap_addr(&tx->info[idx],
bus), len, bus), len,
PCI_DMA_TODEVICE); DMA_TO_DEVICE);
} else { } else {
if (len) if (len)
pci_unmap_page(mgp->pdev, dma_unmap_page(&mgp->pdev->dev,
dma_unmap_addr(&tx->info[idx], dma_unmap_addr(&tx->info[idx],
bus), len, bus), len,
PCI_DMA_TODEVICE); DMA_TO_DEVICE);
} }
} }
kfree(ss->rx_big.info); kfree(ss->rx_big.info);
...@@ -2584,15 +2585,15 @@ static void myri10ge_unmap_tx_dma(struct myri10ge_priv *mgp, ...@@ -2584,15 +2585,15 @@ static void myri10ge_unmap_tx_dma(struct myri10ge_priv *mgp,
len = dma_unmap_len(&tx->info[idx], len); len = dma_unmap_len(&tx->info[idx], len);
if (len) { if (len) {
if (tx->info[idx].skb != NULL) if (tx->info[idx].skb != NULL)
pci_unmap_single(mgp->pdev, dma_unmap_single(&mgp->pdev->dev,
dma_unmap_addr(&tx->info[idx], dma_unmap_addr(&tx->info[idx],
bus), len, bus), len,
PCI_DMA_TODEVICE); DMA_TO_DEVICE);
else else
pci_unmap_page(mgp->pdev, dma_unmap_page(&mgp->pdev->dev,
dma_unmap_addr(&tx->info[idx], dma_unmap_addr(&tx->info[idx],
bus), len, bus), len,
PCI_DMA_TODEVICE); DMA_TO_DEVICE);
dma_unmap_len_set(&tx->info[idx], len, 0); dma_unmap_len_set(&tx->info[idx], len, 0);
tx->info[idx].skb = NULL; tx->info[idx].skb = NULL;
} }
...@@ -2715,8 +2716,8 @@ static netdev_tx_t myri10ge_xmit(struct sk_buff *skb, ...@@ -2715,8 +2716,8 @@ static netdev_tx_t myri10ge_xmit(struct sk_buff *skb,
/* map the skb for DMA */ /* map the skb for DMA */
len = skb_headlen(skb); len = skb_headlen(skb);
bus = pci_map_single(mgp->pdev, skb->data, len, PCI_DMA_TODEVICE); bus = dma_map_single(&mgp->pdev->dev, skb->data, len, DMA_TO_DEVICE);
if (unlikely(pci_dma_mapping_error(mgp->pdev, bus))) if (unlikely(dma_mapping_error(&mgp->pdev->dev, bus)))
goto drop; goto drop;
idx = tx->req & tx->mask; idx = tx->req & tx->mask;
...@@ -2824,7 +2825,7 @@ static netdev_tx_t myri10ge_xmit(struct sk_buff *skb, ...@@ -2824,7 +2825,7 @@ static netdev_tx_t myri10ge_xmit(struct sk_buff *skb,
len = skb_frag_size(frag); len = skb_frag_size(frag);
bus = skb_frag_dma_map(&mgp->pdev->dev, frag, 0, len, bus = skb_frag_dma_map(&mgp->pdev->dev, frag, 0, len,
DMA_TO_DEVICE); DMA_TO_DEVICE);
if (unlikely(pci_dma_mapping_error(mgp->pdev, bus))) { if (unlikely(dma_mapping_error(&mgp->pdev->dev, bus))) {
myri10ge_unmap_tx_dma(mgp, tx, idx); myri10ge_unmap_tx_dma(mgp, tx, idx);
goto drop; goto drop;
} }
...@@ -3776,19 +3777,17 @@ static int myri10ge_probe(struct pci_dev *pdev, const struct pci_device_id *ent) ...@@ -3776,19 +3777,17 @@ static int myri10ge_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
myri10ge_mask_surprise_down(pdev); myri10ge_mask_surprise_down(pdev);
pci_set_master(pdev); pci_set_master(pdev);
dac_enabled = 1; dac_enabled = 1;
status = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
if (status != 0) { if (status != 0) {
dac_enabled = 0; dac_enabled = 0;
dev_err(&pdev->dev, dev_err(&pdev->dev,
"64-bit pci address mask was refused, " "64-bit pci address mask was refused, trying 32-bit\n");
"trying 32-bit\n"); status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
status = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
} }
if (status != 0) { if (status != 0) {
dev_err(&pdev->dev, "Error %d setting DMA mask\n", status); dev_err(&pdev->dev, "Error %d setting DMA mask\n", status);
goto abort_with_enabled; goto abort_with_enabled;
} }
(void)pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
mgp->cmd = dma_alloc_coherent(&pdev->dev, sizeof(*mgp->cmd), mgp->cmd = dma_alloc_coherent(&pdev->dev, sizeof(*mgp->cmd),
&mgp->cmd_bus, GFP_KERNEL); &mgp->cmd_bus, GFP_KERNEL);
if (!mgp->cmd) { if (!mgp->cmd) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment