Commit 297af515 authored by Christophe JAILLET's avatar Christophe JAILLET Committed by Jakub Kicinski

netxen_nic: switch from 'pci_' to 'dma_' API

The wrappers in include/linux/pci-dma-compat.h should go away.

The patch has been generated with the coccinelle script below and has been
hand modified to replace GFP_ with a correct flag.
It has been compile tested.

When memory is allocated in 'netxen_get_minidump_template()' GFP_KERNEL can
be used because its only caller, ' netxen_setup_minidump(()' already uses
it and no lock is acquired in the between.

When memory is allocated in other function in 'netxen_nic_ctx.c' GFP_KERNEL
can be used because the call chain already uses GFP_KERNEL and no lock is
taken in the between.
The call chain is:
  netxen_nic_attach()
    --> netxen_alloc_sw_resources()          : already uses GFP_KERNEL
    --> netxen_alloc_hw_resources()
      --> nx_fw_cmd_create_rx_ctx()
      --> nx_fw_cmd_create_tx_ctx()

When memory is allocated in 'netxen_init_dummy_dma()' GFP_KERNEL can be
used because its only call chain already uses it and no lock is acquired in
the between.
The call chain is:
  --> netxen_start_firmware
    --> netxen_request_firmware()
      --> request_firmware()
        --> _request_firmware(()
          --> fw_get_filesystem_firmware()
            --> __getname()                  : already uses GFP_KERNEL
    --> netxen_init_dummy_dma()

@@
@@
-    PCI_DMA_BIDIRECTIONAL
+    DMA_BIDIRECTIONAL

@@
@@
-    PCI_DMA_TODEVICE
+    DMA_TO_DEVICE

@@
@@
-    PCI_DMA_FROMDEVICE
+    DMA_FROM_DEVICE

@@
@@
-    PCI_DMA_NONE
+    DMA_NONE

@@
expression e1, e2, e3;
@@
-    pci_alloc_consistent(e1, e2, e3)
+    dma_alloc_coherent(&e1->dev, e2, e3, GFP_)

@@
expression e1, e2, e3;
@@
-    pci_zalloc_consistent(e1, e2, e3)
+    dma_alloc_coherent(&e1->dev, e2, e3, GFP_)

@@
expression e1, e2, e3, e4;
@@
-    pci_free_consistent(e1, e2, e3, e4)
+    dma_free_coherent(&e1->dev, e2, e3, e4)

@@
expression e1, e2, e3, e4;
@@
-    pci_map_single(e1, e2, e3, e4)
+    dma_map_single(&e1->dev, e2, e3, e4)

@@
expression e1, e2, e3, e4;
@@
-    pci_unmap_single(e1, e2, e3, e4)
+    dma_unmap_single(&e1->dev, e2, e3, e4)

@@
expression e1, e2, e3, e4, e5;
@@
-    pci_map_page(e1, e2, e3, e4, e5)
+    dma_map_page(&e1->dev, e2, e3, e4, e5)

@@
expression e1, e2, e3, e4;
@@
-    pci_unmap_page(e1, e2, e3, e4)
+    dma_unmap_page(&e1->dev, e2, e3, e4)

@@
expression e1, e2, e3, e4;
@@
-    pci_map_sg(e1, e2, e3, e4)
+    dma_map_sg(&e1->dev, e2, e3, e4)

@@
expression e1, e2, e3, e4;
@@
-    pci_unmap_sg(e1, e2, e3, e4)
+    dma_unmap_sg(&e1->dev, e2, e3, e4)

@@
expression e1, e2, e3, e4;
@@
-    pci_dma_sync_single_for_cpu(e1, e2, e3, e4)
+    dma_sync_single_for_cpu(&e1->dev, e2, e3, e4)

@@
expression e1, e2, e3, e4;
@@
-    pci_dma_sync_single_for_device(e1, e2, e3, e4)
+    dma_sync_single_for_device(&e1->dev, e2, e3, e4)

@@
expression e1, e2, e3, e4;
@@
-    pci_dma_sync_sg_for_cpu(e1, e2, e3, e4)
+    dma_sync_sg_for_cpu(&e1->dev, e2, e3, e4)

@@
expression e1, e2, e3, e4;
@@
-    pci_dma_sync_sg_for_device(e1, e2, e3, e4)
+    dma_sync_sg_for_device(&e1->dev, e2, e3, e4)

@@
expression e1, e2;
@@
-    pci_dma_mapping_error(e1, e2)
+    dma_mapping_error(&e1->dev, e2)

@@
expression e1, e2;
@@
-    pci_set_dma_mask(e1, e2)
+    dma_set_mask(&e1->dev, e2)

@@
expression e1, e2;
@@
-    pci_set_consistent_dma_mask(e1, e2)
+    dma_set_coherent_mask(&e1->dev, e2)
Signed-off-by: default avatarChristophe JAILLET <christophe.jaillet@wanadoo.fr>
Link: https://lore.kernel.org/r/20210113202519.487672-1-christophe.jaillet@wanadoo.frSigned-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parent 7c140b05
...@@ -113,7 +113,8 @@ netxen_get_minidump_template(struct netxen_adapter *adapter) ...@@ -113,7 +113,8 @@ netxen_get_minidump_template(struct netxen_adapter *adapter)
return NX_RCODE_INVALID_ARGS; return NX_RCODE_INVALID_ARGS;
} }
addr = pci_zalloc_consistent(adapter->pdev, size, &md_template_addr); addr = dma_alloc_coherent(&adapter->pdev->dev, size,
&md_template_addr, GFP_KERNEL);
if (!addr) { if (!addr) {
dev_err(&adapter->pdev->dev, "Unable to allocate dmable memory for template.\n"); dev_err(&adapter->pdev->dev, "Unable to allocate dmable memory for template.\n");
return -ENOMEM; return -ENOMEM;
...@@ -133,7 +134,7 @@ netxen_get_minidump_template(struct netxen_adapter *adapter) ...@@ -133,7 +134,7 @@ netxen_get_minidump_template(struct netxen_adapter *adapter)
dev_err(&adapter->pdev->dev, "Failed to get minidump template, err_code : %d, requested_size : %d, actual_size : %d\n", dev_err(&adapter->pdev->dev, "Failed to get minidump template, err_code : %d, requested_size : %d, actual_size : %d\n",
cmd.rsp.cmd, size, cmd.rsp.arg2); cmd.rsp.cmd, size, cmd.rsp.arg2);
} }
pci_free_consistent(adapter->pdev, size, addr, md_template_addr); dma_free_coherent(&adapter->pdev->dev, size, addr, md_template_addr);
return 0; return 0;
} }
...@@ -281,14 +282,14 @@ nx_fw_cmd_create_rx_ctx(struct netxen_adapter *adapter) ...@@ -281,14 +282,14 @@ nx_fw_cmd_create_rx_ctx(struct netxen_adapter *adapter)
rsp_size = rsp_size =
SIZEOF_CARDRSP_RX(nx_cardrsp_rx_ctx_t, nrds_rings, nsds_rings); SIZEOF_CARDRSP_RX(nx_cardrsp_rx_ctx_t, nrds_rings, nsds_rings);
addr = pci_alloc_consistent(adapter->pdev, addr = dma_alloc_coherent(&adapter->pdev->dev, rq_size,
rq_size, &hostrq_phys_addr); &hostrq_phys_addr, GFP_KERNEL);
if (addr == NULL) if (addr == NULL)
return -ENOMEM; return -ENOMEM;
prq = addr; prq = addr;
addr = pci_alloc_consistent(adapter->pdev, addr = dma_alloc_coherent(&adapter->pdev->dev, rsp_size,
rsp_size, &cardrsp_phys_addr); &cardrsp_phys_addr, GFP_KERNEL);
if (addr == NULL) { if (addr == NULL) {
err = -ENOMEM; err = -ENOMEM;
goto out_free_rq; goto out_free_rq;
...@@ -387,9 +388,10 @@ nx_fw_cmd_create_rx_ctx(struct netxen_adapter *adapter) ...@@ -387,9 +388,10 @@ nx_fw_cmd_create_rx_ctx(struct netxen_adapter *adapter)
recv_ctx->virt_port = prsp->virt_port; recv_ctx->virt_port = prsp->virt_port;
out_free_rsp: out_free_rsp:
pci_free_consistent(adapter->pdev, rsp_size, prsp, cardrsp_phys_addr); dma_free_coherent(&adapter->pdev->dev, rsp_size, prsp,
cardrsp_phys_addr);
out_free_rq: out_free_rq:
pci_free_consistent(adapter->pdev, rq_size, prq, hostrq_phys_addr); dma_free_coherent(&adapter->pdev->dev, rq_size, prq, hostrq_phys_addr);
return err; return err;
} }
...@@ -429,14 +431,14 @@ nx_fw_cmd_create_tx_ctx(struct netxen_adapter *adapter) ...@@ -429,14 +431,14 @@ nx_fw_cmd_create_tx_ctx(struct netxen_adapter *adapter)
struct netxen_cmd_args cmd; struct netxen_cmd_args cmd;
rq_size = SIZEOF_HOSTRQ_TX(nx_hostrq_tx_ctx_t); rq_size = SIZEOF_HOSTRQ_TX(nx_hostrq_tx_ctx_t);
rq_addr = pci_alloc_consistent(adapter->pdev, rq_addr = dma_alloc_coherent(&adapter->pdev->dev, rq_size,
rq_size, &rq_phys_addr); &rq_phys_addr, GFP_KERNEL);
if (!rq_addr) if (!rq_addr)
return -ENOMEM; return -ENOMEM;
rsp_size = SIZEOF_CARDRSP_TX(nx_cardrsp_tx_ctx_t); rsp_size = SIZEOF_CARDRSP_TX(nx_cardrsp_tx_ctx_t);
rsp_addr = pci_alloc_consistent(adapter->pdev, rsp_addr = dma_alloc_coherent(&adapter->pdev->dev, rsp_size,
rsp_size, &rsp_phys_addr); &rsp_phys_addr, GFP_KERNEL);
if (!rsp_addr) { if (!rsp_addr) {
err = -ENOMEM; err = -ENOMEM;
goto out_free_rq; goto out_free_rq;
...@@ -491,10 +493,11 @@ nx_fw_cmd_create_tx_ctx(struct netxen_adapter *adapter) ...@@ -491,10 +493,11 @@ nx_fw_cmd_create_tx_ctx(struct netxen_adapter *adapter)
err = -EIO; err = -EIO;
} }
pci_free_consistent(adapter->pdev, rsp_size, rsp_addr, rsp_phys_addr); dma_free_coherent(&adapter->pdev->dev, rsp_size, rsp_addr,
rsp_phys_addr);
out_free_rq: out_free_rq:
pci_free_consistent(adapter->pdev, rq_size, rq_addr, rq_phys_addr); dma_free_coherent(&adapter->pdev->dev, rq_size, rq_addr, rq_phys_addr);
return err; return err;
} }
...@@ -745,9 +748,9 @@ int netxen_alloc_hw_resources(struct netxen_adapter *adapter) ...@@ -745,9 +748,9 @@ int netxen_alloc_hw_resources(struct netxen_adapter *adapter)
recv_ctx = &adapter->recv_ctx; recv_ctx = &adapter->recv_ctx;
tx_ring = adapter->tx_ring; tx_ring = adapter->tx_ring;
addr = pci_alloc_consistent(pdev, addr = dma_alloc_coherent(&pdev->dev,
sizeof(struct netxen_ring_ctx) + sizeof(uint32_t), sizeof(struct netxen_ring_ctx) + sizeof(uint32_t),
&recv_ctx->phys_addr); &recv_ctx->phys_addr, GFP_KERNEL);
if (addr == NULL) { if (addr == NULL) {
dev_err(&pdev->dev, "failed to allocate hw context\n"); dev_err(&pdev->dev, "failed to allocate hw context\n");
return -ENOMEM; return -ENOMEM;
...@@ -762,8 +765,8 @@ int netxen_alloc_hw_resources(struct netxen_adapter *adapter) ...@@ -762,8 +765,8 @@ int netxen_alloc_hw_resources(struct netxen_adapter *adapter)
(__le32 *)(((char *)addr) + sizeof(struct netxen_ring_ctx)); (__le32 *)(((char *)addr) + sizeof(struct netxen_ring_ctx));
/* cmd desc ring */ /* cmd desc ring */
addr = pci_alloc_consistent(pdev, TX_DESC_RINGSIZE(tx_ring), addr = dma_alloc_coherent(&pdev->dev, TX_DESC_RINGSIZE(tx_ring),
&tx_ring->phys_addr); &tx_ring->phys_addr, GFP_KERNEL);
if (addr == NULL) { if (addr == NULL) {
dev_err(&pdev->dev, "%s: failed to allocate tx desc ring\n", dev_err(&pdev->dev, "%s: failed to allocate tx desc ring\n",
...@@ -776,9 +779,9 @@ int netxen_alloc_hw_resources(struct netxen_adapter *adapter) ...@@ -776,9 +779,9 @@ int netxen_alloc_hw_resources(struct netxen_adapter *adapter)
for (ring = 0; ring < adapter->max_rds_rings; ring++) { for (ring = 0; ring < adapter->max_rds_rings; ring++) {
rds_ring = &recv_ctx->rds_rings[ring]; rds_ring = &recv_ctx->rds_rings[ring];
addr = pci_alloc_consistent(adapter->pdev, addr = dma_alloc_coherent(&adapter->pdev->dev,
RCV_DESC_RINGSIZE(rds_ring), RCV_DESC_RINGSIZE(rds_ring),
&rds_ring->phys_addr); &rds_ring->phys_addr, GFP_KERNEL);
if (addr == NULL) { if (addr == NULL) {
dev_err(&pdev->dev, dev_err(&pdev->dev,
"%s: failed to allocate rds ring [%d]\n", "%s: failed to allocate rds ring [%d]\n",
...@@ -797,9 +800,9 @@ int netxen_alloc_hw_resources(struct netxen_adapter *adapter) ...@@ -797,9 +800,9 @@ int netxen_alloc_hw_resources(struct netxen_adapter *adapter)
for (ring = 0; ring < adapter->max_sds_rings; ring++) { for (ring = 0; ring < adapter->max_sds_rings; ring++) {
sds_ring = &recv_ctx->sds_rings[ring]; sds_ring = &recv_ctx->sds_rings[ring];
addr = pci_alloc_consistent(adapter->pdev, addr = dma_alloc_coherent(&adapter->pdev->dev,
STATUS_DESC_RINGSIZE(sds_ring), STATUS_DESC_RINGSIZE(sds_ring),
&sds_ring->phys_addr); &sds_ring->phys_addr, GFP_KERNEL);
if (addr == NULL) { if (addr == NULL) {
dev_err(&pdev->dev, dev_err(&pdev->dev,
"%s: failed to allocate sds ring [%d]\n", "%s: failed to allocate sds ring [%d]\n",
...@@ -874,19 +877,17 @@ void netxen_free_hw_resources(struct netxen_adapter *adapter) ...@@ -874,19 +877,17 @@ void netxen_free_hw_resources(struct netxen_adapter *adapter)
recv_ctx = &adapter->recv_ctx; recv_ctx = &adapter->recv_ctx;
if (recv_ctx->hwctx != NULL) { if (recv_ctx->hwctx != NULL) {
pci_free_consistent(adapter->pdev, dma_free_coherent(&adapter->pdev->dev,
sizeof(struct netxen_ring_ctx) + sizeof(struct netxen_ring_ctx) + sizeof(uint32_t),
sizeof(uint32_t), recv_ctx->hwctx, recv_ctx->phys_addr);
recv_ctx->hwctx,
recv_ctx->phys_addr);
recv_ctx->hwctx = NULL; recv_ctx->hwctx = NULL;
} }
tx_ring = adapter->tx_ring; tx_ring = adapter->tx_ring;
if (tx_ring->desc_head != NULL) { if (tx_ring->desc_head != NULL) {
pci_free_consistent(adapter->pdev, dma_free_coherent(&adapter->pdev->dev,
TX_DESC_RINGSIZE(tx_ring), TX_DESC_RINGSIZE(tx_ring),
tx_ring->desc_head, tx_ring->phys_addr); tx_ring->desc_head, tx_ring->phys_addr);
tx_ring->desc_head = NULL; tx_ring->desc_head = NULL;
} }
...@@ -894,10 +895,10 @@ void netxen_free_hw_resources(struct netxen_adapter *adapter) ...@@ -894,10 +895,10 @@ void netxen_free_hw_resources(struct netxen_adapter *adapter)
rds_ring = &recv_ctx->rds_rings[ring]; rds_ring = &recv_ctx->rds_rings[ring];
if (rds_ring->desc_head != NULL) { if (rds_ring->desc_head != NULL) {
pci_free_consistent(adapter->pdev, dma_free_coherent(&adapter->pdev->dev,
RCV_DESC_RINGSIZE(rds_ring), RCV_DESC_RINGSIZE(rds_ring),
rds_ring->desc_head, rds_ring->desc_head,
rds_ring->phys_addr); rds_ring->phys_addr);
rds_ring->desc_head = NULL; rds_ring->desc_head = NULL;
} }
} }
...@@ -906,10 +907,10 @@ void netxen_free_hw_resources(struct netxen_adapter *adapter) ...@@ -906,10 +907,10 @@ void netxen_free_hw_resources(struct netxen_adapter *adapter)
sds_ring = &recv_ctx->sds_rings[ring]; sds_ring = &recv_ctx->sds_rings[ring];
if (sds_ring->desc_head != NULL) { if (sds_ring->desc_head != NULL) {
pci_free_consistent(adapter->pdev, dma_free_coherent(&adapter->pdev->dev,
STATUS_DESC_RINGSIZE(sds_ring), STATUS_DESC_RINGSIZE(sds_ring),
sds_ring->desc_head, sds_ring->desc_head,
sds_ring->phys_addr); sds_ring->phys_addr);
sds_ring->desc_head = NULL; sds_ring->desc_head = NULL;
} }
} }
......
...@@ -102,10 +102,8 @@ void netxen_release_rx_buffers(struct netxen_adapter *adapter) ...@@ -102,10 +102,8 @@ void netxen_release_rx_buffers(struct netxen_adapter *adapter)
rx_buf = &(rds_ring->rx_buf_arr[i]); rx_buf = &(rds_ring->rx_buf_arr[i]);
if (rx_buf->state == NETXEN_BUFFER_FREE) if (rx_buf->state == NETXEN_BUFFER_FREE)
continue; continue;
pci_unmap_single(adapter->pdev, dma_unmap_single(&adapter->pdev->dev, rx_buf->dma,
rx_buf->dma, rds_ring->dma_size, DMA_FROM_DEVICE);
rds_ring->dma_size,
PCI_DMA_FROMDEVICE);
if (rx_buf->skb != NULL) if (rx_buf->skb != NULL)
dev_kfree_skb_any(rx_buf->skb); dev_kfree_skb_any(rx_buf->skb);
} }
...@@ -124,16 +122,16 @@ void netxen_release_tx_buffers(struct netxen_adapter *adapter) ...@@ -124,16 +122,16 @@ void netxen_release_tx_buffers(struct netxen_adapter *adapter)
for (i = 0; i < tx_ring->num_desc; i++) { for (i = 0; i < tx_ring->num_desc; i++) {
buffrag = cmd_buf->frag_array; buffrag = cmd_buf->frag_array;
if (buffrag->dma) { if (buffrag->dma) {
pci_unmap_single(adapter->pdev, buffrag->dma, dma_unmap_single(&adapter->pdev->dev, buffrag->dma,
buffrag->length, PCI_DMA_TODEVICE); buffrag->length, DMA_TO_DEVICE);
buffrag->dma = 0ULL; buffrag->dma = 0ULL;
} }
for (j = 1; j < cmd_buf->frag_count; j++) { for (j = 1; j < cmd_buf->frag_count; j++) {
buffrag++; buffrag++;
if (buffrag->dma) { if (buffrag->dma) {
pci_unmap_page(adapter->pdev, buffrag->dma, dma_unmap_page(&adapter->pdev->dev,
buffrag->length, buffrag->dma, buffrag->length,
PCI_DMA_TODEVICE); DMA_TO_DEVICE);
buffrag->dma = 0ULL; buffrag->dma = 0ULL;
} }
} }
...@@ -1250,9 +1248,10 @@ int netxen_init_dummy_dma(struct netxen_adapter *adapter) ...@@ -1250,9 +1248,10 @@ int netxen_init_dummy_dma(struct netxen_adapter *adapter)
if (!NX_IS_REVISION_P2(adapter->ahw.revision_id)) if (!NX_IS_REVISION_P2(adapter->ahw.revision_id))
return 0; return 0;
adapter->dummy_dma.addr = pci_alloc_consistent(adapter->pdev, adapter->dummy_dma.addr = dma_alloc_coherent(&adapter->pdev->dev,
NETXEN_HOST_DUMMY_DMA_SIZE, NETXEN_HOST_DUMMY_DMA_SIZE,
&adapter->dummy_dma.phys_addr); &adapter->dummy_dma.phys_addr,
GFP_KERNEL);
if (adapter->dummy_dma.addr == NULL) { if (adapter->dummy_dma.addr == NULL) {
dev_err(&adapter->pdev->dev, dev_err(&adapter->pdev->dev,
"ERROR: Could not allocate dummy DMA memory\n"); "ERROR: Could not allocate dummy DMA memory\n");
...@@ -1304,10 +1303,10 @@ void netxen_free_dummy_dma(struct netxen_adapter *adapter) ...@@ -1304,10 +1303,10 @@ void netxen_free_dummy_dma(struct netxen_adapter *adapter)
} }
if (i) { if (i) {
pci_free_consistent(adapter->pdev, dma_free_coherent(&adapter->pdev->dev,
NETXEN_HOST_DUMMY_DMA_SIZE, NETXEN_HOST_DUMMY_DMA_SIZE,
adapter->dummy_dma.addr, adapter->dummy_dma.addr,
adapter->dummy_dma.phys_addr); adapter->dummy_dma.phys_addr);
adapter->dummy_dma.addr = NULL; adapter->dummy_dma.addr = NULL;
} else } else
dev_err(&adapter->pdev->dev, "dma_watchdog_shutdown failed\n"); dev_err(&adapter->pdev->dev, "dma_watchdog_shutdown failed\n");
...@@ -1467,10 +1466,10 @@ netxen_alloc_rx_skb(struct netxen_adapter *adapter, ...@@ -1467,10 +1466,10 @@ netxen_alloc_rx_skb(struct netxen_adapter *adapter,
if (!adapter->ahw.cut_through) if (!adapter->ahw.cut_through)
skb_reserve(skb, 2); skb_reserve(skb, 2);
dma = pci_map_single(pdev, skb->data, dma = dma_map_single(&pdev->dev, skb->data, rds_ring->dma_size,
rds_ring->dma_size, PCI_DMA_FROMDEVICE); DMA_FROM_DEVICE);
if (pci_dma_mapping_error(pdev, dma)) { if (dma_mapping_error(&pdev->dev, dma)) {
dev_kfree_skb_any(skb); dev_kfree_skb_any(skb);
buffer->skb = NULL; buffer->skb = NULL;
return 1; return 1;
...@@ -1491,8 +1490,8 @@ static struct sk_buff *netxen_process_rxbuf(struct netxen_adapter *adapter, ...@@ -1491,8 +1490,8 @@ static struct sk_buff *netxen_process_rxbuf(struct netxen_adapter *adapter,
buffer = &rds_ring->rx_buf_arr[index]; buffer = &rds_ring->rx_buf_arr[index];
pci_unmap_single(adapter->pdev, buffer->dma, rds_ring->dma_size, dma_unmap_single(&adapter->pdev->dev, buffer->dma, rds_ring->dma_size,
PCI_DMA_FROMDEVICE); DMA_FROM_DEVICE);
skb = buffer->skb; skb = buffer->skb;
if (!skb) if (!skb)
...@@ -1754,13 +1753,13 @@ int netxen_process_cmd_ring(struct netxen_adapter *adapter) ...@@ -1754,13 +1753,13 @@ int netxen_process_cmd_ring(struct netxen_adapter *adapter)
buffer = &tx_ring->cmd_buf_arr[sw_consumer]; buffer = &tx_ring->cmd_buf_arr[sw_consumer];
if (buffer->skb) { if (buffer->skb) {
frag = &buffer->frag_array[0]; frag = &buffer->frag_array[0];
pci_unmap_single(pdev, frag->dma, frag->length, dma_unmap_single(&pdev->dev, frag->dma, frag->length,
PCI_DMA_TODEVICE); DMA_TO_DEVICE);
frag->dma = 0ULL; frag->dma = 0ULL;
for (i = 1; i < buffer->frag_count; i++) { for (i = 1; i < buffer->frag_count; i++) {
frag++; /* Get the next frag */ frag++; /* Get the next frag */
pci_unmap_page(pdev, frag->dma, frag->length, dma_unmap_page(&pdev->dev, frag->dma,
PCI_DMA_TODEVICE); frag->length, DMA_TO_DEVICE);
frag->dma = 0ULL; frag->dma = 0ULL;
} }
......
...@@ -243,8 +243,8 @@ static int nx_set_dma_mask(struct netxen_adapter *adapter) ...@@ -243,8 +243,8 @@ static int nx_set_dma_mask(struct netxen_adapter *adapter)
cmask = mask; cmask = mask;
} }
if (pci_set_dma_mask(pdev, mask) == 0 && if (dma_set_mask(&pdev->dev, mask) == 0 &&
pci_set_consistent_dma_mask(pdev, cmask) == 0) { dma_set_coherent_mask(&pdev->dev, cmask) == 0) {
adapter->pci_using_dac = 1; adapter->pci_using_dac = 1;
return 0; return 0;
} }
...@@ -277,13 +277,13 @@ nx_update_dma_mask(struct netxen_adapter *adapter) ...@@ -277,13 +277,13 @@ nx_update_dma_mask(struct netxen_adapter *adapter)
mask = DMA_BIT_MASK(32+shift); mask = DMA_BIT_MASK(32+shift);
err = pci_set_dma_mask(pdev, mask); err = dma_set_mask(&pdev->dev, mask);
if (err) if (err)
goto err_out; goto err_out;
if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) { if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) {
err = pci_set_consistent_dma_mask(pdev, mask); err = dma_set_coherent_mask(&pdev->dev, mask);
if (err) if (err)
goto err_out; goto err_out;
} }
...@@ -293,8 +293,8 @@ nx_update_dma_mask(struct netxen_adapter *adapter) ...@@ -293,8 +293,8 @@ nx_update_dma_mask(struct netxen_adapter *adapter)
return 0; return 0;
err_out: err_out:
pci_set_dma_mask(pdev, old_mask); dma_set_mask(&pdev->dev, old_mask);
pci_set_consistent_dma_mask(pdev, old_cmask); dma_set_coherent_mask(&pdev->dev, old_cmask);
return err; return err;
} }
...@@ -1978,9 +1978,9 @@ netxen_map_tx_skb(struct pci_dev *pdev, ...@@ -1978,9 +1978,9 @@ netxen_map_tx_skb(struct pci_dev *pdev,
nr_frags = skb_shinfo(skb)->nr_frags; nr_frags = skb_shinfo(skb)->nr_frags;
nf = &pbuf->frag_array[0]; nf = &pbuf->frag_array[0];
map = pci_map_single(pdev, skb->data, map = dma_map_single(&pdev->dev, skb->data, skb_headlen(skb),
skb_headlen(skb), PCI_DMA_TODEVICE); DMA_TO_DEVICE);
if (pci_dma_mapping_error(pdev, map)) if (dma_mapping_error(&pdev->dev, map))
goto out_err; goto out_err;
nf->dma = map; nf->dma = map;
...@@ -2004,12 +2004,12 @@ netxen_map_tx_skb(struct pci_dev *pdev, ...@@ -2004,12 +2004,12 @@ netxen_map_tx_skb(struct pci_dev *pdev,
unwind: unwind:
while (--i >= 0) { while (--i >= 0) {
nf = &pbuf->frag_array[i+1]; nf = &pbuf->frag_array[i+1];
pci_unmap_page(pdev, nf->dma, nf->length, PCI_DMA_TODEVICE); dma_unmap_page(&pdev->dev, nf->dma, nf->length, DMA_TO_DEVICE);
nf->dma = 0ULL; nf->dma = 0ULL;
} }
nf = &pbuf->frag_array[0]; nf = &pbuf->frag_array[0];
pci_unmap_single(pdev, nf->dma, skb_headlen(skb), PCI_DMA_TODEVICE); dma_unmap_single(&pdev->dev, nf->dma, skb_headlen(skb), DMA_TO_DEVICE);
nf->dma = 0ULL; nf->dma = 0ULL;
out_err: out_err:
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment