Commit 3f69f4e0 authored by Christophe JAILLET's avatar Christophe JAILLET Committed by Jason Gunthorpe

RDMA: switch from 'pci_' to 'dma_' API

The wrappers in include/linux/pci-dma-compat.h should go away.

The patch has been generated with the coccinelle script below.

It has been hand modified to use 'dma_set_mask_and_coherent()' instead of
'pci_set_dma_mask()/pci_set_consistent_dma_mask()' when applicable.
This is less verbose.

It has been compile tested.

@@
@@
-    PCI_DMA_BIDIRECTIONAL
+    DMA_BIDIRECTIONAL

@@
@@
-    PCI_DMA_TODEVICE
+    DMA_TO_DEVICE

@@
@@
-    PCI_DMA_FROMDEVICE
+    DMA_FROM_DEVICE

@@
@@
-    PCI_DMA_NONE
+    DMA_NONE

@@
expression e1, e2, e3;
@@
-    pci_alloc_consistent(e1, e2, e3)
+    dma_alloc_coherent(&e1->dev, e2, e3, GFP_)

@@
expression e1, e2, e3;
@@
-    pci_zalloc_consistent(e1, e2, e3)
+    dma_alloc_coherent(&e1->dev, e2, e3, GFP_)

@@
expression e1, e2, e3, e4;
@@
-    pci_free_consistent(e1, e2, e3, e4)
+    dma_free_coherent(&e1->dev, e2, e3, e4)

@@
expression e1, e2, e3, e4;
@@
-    pci_map_single(e1, e2, e3, e4)
+    dma_map_single(&e1->dev, e2, e3, e4)

@@
expression e1, e2, e3, e4;
@@
-    pci_unmap_single(e1, e2, e3, e4)
+    dma_unmap_single(&e1->dev, e2, e3, e4)

@@
expression e1, e2, e3, e4, e5;
@@
-    pci_map_page(e1, e2, e3, e4, e5)
+    dma_map_page(&e1->dev, e2, e3, e4, e5)

@@
expression e1, e2, e3, e4;
@@
-    pci_unmap_page(e1, e2, e3, e4)
+    dma_unmap_page(&e1->dev, e2, e3, e4)

@@
expression e1, e2, e3, e4;
@@
-    pci_map_sg(e1, e2, e3, e4)
+    dma_map_sg(&e1->dev, e2, e3, e4)

@@
expression e1, e2, e3, e4;
@@
-    pci_unmap_sg(e1, e2, e3, e4)
+    dma_unmap_sg(&e1->dev, e2, e3, e4)

@@
expression e1, e2, e3, e4;
@@
-    pci_dma_sync_single_for_cpu(e1, e2, e3, e4)
+    dma_sync_single_for_cpu(&e1->dev, e2, e3, e4)

@@
expression e1, e2, e3, e4;
@@
-    pci_dma_sync_single_for_device(e1, e2, e3, e4)
+    dma_sync_single_for_device(&e1->dev, e2, e3, e4)

@@
expression e1, e2, e3, e4;
@@
-    pci_dma_sync_sg_for_cpu(e1, e2, e3, e4)
+    dma_sync_sg_for_cpu(&e1->dev, e2, e3, e4)

@@
expression e1, e2, e3, e4;
@@
-    pci_dma_sync_sg_for_device(e1, e2, e3, e4)
+    dma_sync_sg_for_device(&e1->dev, e2, e3, e4)

@@
expression e1, e2;
@@
-    pci_dma_mapping_error(e1, e2)
+    dma_mapping_error(&e1->dev, e2)

@@
expression e1, e2;
@@
-    pci_set_dma_mask(e1, e2)
+    dma_set_mask(&e1->dev, e2)

@@
expression e1, e2;
@@
-    pci_set_consistent_dma_mask(e1, e2)
+    dma_set_coherent_mask(&e1->dev, e2)

Link: https://lore.kernel.org/r/259e53b7a00f64bf081d41da8761b171b2ad8f5c.1629634798.git.christophe.jaillet@wanadoo.frSigned-off-by: default avatarChristophe JAILLET <christophe.jaillet@wanadoo.fr>
Signed-off-by: default avatarJason Gunthorpe <jgg@nvidia.com>
parent 03da1b26
...@@ -92,25 +92,18 @@ int hfi1_pcie_init(struct hfi1_devdata *dd) ...@@ -92,25 +92,18 @@ int hfi1_pcie_init(struct hfi1_devdata *dd)
goto bail; goto bail;
} }
ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
if (ret) { if (ret) {
/* /*
* If the 64 bit setup fails, try 32 bit. Some systems * If the 64 bit setup fails, try 32 bit. Some systems
* do not setup 64 bit maps on systems with 2GB or less * do not setup 64 bit maps on systems with 2GB or less
* memory installed. * memory installed.
*/ */
ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
if (ret) { if (ret) {
dd_dev_err(dd, "Unable to set DMA mask: %d\n", ret); dd_dev_err(dd, "Unable to set DMA mask: %d\n", ret);
goto bail; goto bail;
} }
ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
} else {
ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
}
if (ret) {
dd_dev_err(dd, "Unable to set DMA consistent mask: %d\n", ret);
goto bail;
} }
pci_set_master(pdev); pci_set_master(pdev);
......
...@@ -177,8 +177,8 @@ static void unpin_rcv_pages(struct hfi1_filedata *fd, ...@@ -177,8 +177,8 @@ static void unpin_rcv_pages(struct hfi1_filedata *fd,
struct mm_struct *mm; struct mm_struct *mm;
if (mapped) { if (mapped) {
pci_unmap_single(dd->pcidev, node->dma_addr, dma_unmap_single(&dd->pcidev->dev, node->dma_addr,
node->npages * PAGE_SIZE, PCI_DMA_FROMDEVICE); node->npages * PAGE_SIZE, DMA_FROM_DEVICE);
pages = &node->pages[idx]; pages = &node->pages[idx];
mm = mm_from_tid_node(node); mm = mm_from_tid_node(node);
} else { } else {
...@@ -739,9 +739,8 @@ static int set_rcvarray_entry(struct hfi1_filedata *fd, ...@@ -739,9 +739,8 @@ static int set_rcvarray_entry(struct hfi1_filedata *fd,
if (!node) if (!node)
return -ENOMEM; return -ENOMEM;
phys = pci_map_single(dd->pcidev, phys = dma_map_single(&dd->pcidev->dev, __va(page_to_phys(pages[0])),
__va(page_to_phys(pages[0])), npages * PAGE_SIZE, DMA_FROM_DEVICE);
npages * PAGE_SIZE, PCI_DMA_FROMDEVICE);
if (dma_mapping_error(&dd->pcidev->dev, phys)) { if (dma_mapping_error(&dd->pcidev->dev, phys)) {
dd_dev_err(dd, "Failed to DMA map Exp Rcv pages 0x%llx\n", dd_dev_err(dd, "Failed to DMA map Exp Rcv pages 0x%llx\n",
phys); phys);
...@@ -783,8 +782,8 @@ static int set_rcvarray_entry(struct hfi1_filedata *fd, ...@@ -783,8 +782,8 @@ static int set_rcvarray_entry(struct hfi1_filedata *fd,
hfi1_cdbg(TID, "Failed to insert RB node %u 0x%lx, 0x%lx %d", hfi1_cdbg(TID, "Failed to insert RB node %u 0x%lx, 0x%lx %d",
node->rcventry, node->notifier.interval_tree.start, node->rcventry, node->notifier.interval_tree.start,
node->phys, ret); node->phys, ret);
pci_unmap_single(dd->pcidev, phys, npages * PAGE_SIZE, dma_unmap_single(&dd->pcidev->dev, phys, npages * PAGE_SIZE,
PCI_DMA_FROMDEVICE); DMA_FROM_DEVICE);
kfree(node); kfree(node);
return -EFAULT; return -EFAULT;
} }
......
...@@ -617,9 +617,9 @@ static void mthca_free_eq(struct mthca_dev *dev, ...@@ -617,9 +617,9 @@ static void mthca_free_eq(struct mthca_dev *dev,
mthca_free_mr(dev, &eq->mr); mthca_free_mr(dev, &eq->mr);
for (i = 0; i < npages; ++i) for (i = 0; i < npages; ++i)
pci_free_consistent(dev->pdev, PAGE_SIZE, dma_free_coherent(&dev->pdev->dev, PAGE_SIZE,
eq->page_list[i].buf, eq->page_list[i].buf,
dma_unmap_addr(&eq->page_list[i], mapping)); dma_unmap_addr(&eq->page_list[i], mapping));
kfree(eq->page_list); kfree(eq->page_list);
mthca_free_mailbox(dev, mailbox); mthca_free_mailbox(dev, mailbox);
...@@ -739,17 +739,18 @@ int mthca_map_eq_icm(struct mthca_dev *dev, u64 icm_virt) ...@@ -739,17 +739,18 @@ int mthca_map_eq_icm(struct mthca_dev *dev, u64 icm_virt)
dev->eq_table.icm_page = alloc_page(GFP_HIGHUSER); dev->eq_table.icm_page = alloc_page(GFP_HIGHUSER);
if (!dev->eq_table.icm_page) if (!dev->eq_table.icm_page)
return -ENOMEM; return -ENOMEM;
dev->eq_table.icm_dma = pci_map_page(dev->pdev, dev->eq_table.icm_page, 0, dev->eq_table.icm_dma =
PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); dma_map_page(&dev->pdev->dev, dev->eq_table.icm_page, 0,
if (pci_dma_mapping_error(dev->pdev, dev->eq_table.icm_dma)) { PAGE_SIZE, DMA_BIDIRECTIONAL);
if (dma_mapping_error(&dev->pdev->dev, dev->eq_table.icm_dma)) {
__free_page(dev->eq_table.icm_page); __free_page(dev->eq_table.icm_page);
return -ENOMEM; return -ENOMEM;
} }
ret = mthca_MAP_ICM_page(dev, dev->eq_table.icm_dma, icm_virt); ret = mthca_MAP_ICM_page(dev, dev->eq_table.icm_dma, icm_virt);
if (ret) { if (ret) {
pci_unmap_page(dev->pdev, dev->eq_table.icm_dma, PAGE_SIZE, dma_unmap_page(&dev->pdev->dev, dev->eq_table.icm_dma,
PCI_DMA_BIDIRECTIONAL); PAGE_SIZE, DMA_BIDIRECTIONAL);
__free_page(dev->eq_table.icm_page); __free_page(dev->eq_table.icm_page);
} }
...@@ -759,8 +760,8 @@ int mthca_map_eq_icm(struct mthca_dev *dev, u64 icm_virt) ...@@ -759,8 +760,8 @@ int mthca_map_eq_icm(struct mthca_dev *dev, u64 icm_virt)
void mthca_unmap_eq_icm(struct mthca_dev *dev) void mthca_unmap_eq_icm(struct mthca_dev *dev)
{ {
mthca_UNMAP_ICM(dev, dev->eq_table.icm_virt, 1); mthca_UNMAP_ICM(dev, dev->eq_table.icm_virt, 1);
pci_unmap_page(dev->pdev, dev->eq_table.icm_dma, PAGE_SIZE, dma_unmap_page(&dev->pdev->dev, dev->eq_table.icm_dma, PAGE_SIZE,
PCI_DMA_BIDIRECTIONAL); DMA_BIDIRECTIONAL);
__free_page(dev->eq_table.icm_page); __free_page(dev->eq_table.icm_page);
} }
......
...@@ -937,26 +937,15 @@ static int __mthca_init_one(struct pci_dev *pdev, int hca_type) ...@@ -937,26 +937,15 @@ static int __mthca_init_one(struct pci_dev *pdev, int hca_type)
pci_set_master(pdev); pci_set_master(pdev);
err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
if (err) { if (err) {
dev_warn(&pdev->dev, "Warning: couldn't set 64-bit PCI DMA mask.\n"); dev_warn(&pdev->dev, "Warning: couldn't set 64-bit PCI DMA mask.\n");
err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
if (err) { if (err) {
dev_err(&pdev->dev, "Can't set PCI DMA mask, aborting.\n"); dev_err(&pdev->dev, "Can't set PCI DMA mask, aborting.\n");
goto err_free_res; goto err_free_res;
} }
} }
err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
if (err) {
dev_warn(&pdev->dev, "Warning: couldn't set 64-bit "
"consistent PCI DMA mask.\n");
err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
if (err) {
dev_err(&pdev->dev, "Can't set consistent PCI DMA mask, "
"aborting.\n");
goto err_free_res;
}
}
/* We can handle large RDMA requests, so allow larger segments. */ /* We can handle large RDMA requests, so allow larger segments. */
dma_set_max_seg_size(&pdev->dev, 1024 * 1024 * 1024); dma_set_max_seg_size(&pdev->dev, 1024 * 1024 * 1024);
......
...@@ -66,8 +66,8 @@ static void mthca_free_icm_pages(struct mthca_dev *dev, struct mthca_icm_chunk * ...@@ -66,8 +66,8 @@ static void mthca_free_icm_pages(struct mthca_dev *dev, struct mthca_icm_chunk *
int i; int i;
if (chunk->nsg > 0) if (chunk->nsg > 0)
pci_unmap_sg(dev->pdev, chunk->mem, chunk->npages, dma_unmap_sg(&dev->pdev->dev, chunk->mem, chunk->npages,
PCI_DMA_BIDIRECTIONAL); DMA_BIDIRECTIONAL);
for (i = 0; i < chunk->npages; ++i) for (i = 0; i < chunk->npages; ++i)
__free_pages(sg_page(&chunk->mem[i]), __free_pages(sg_page(&chunk->mem[i]),
...@@ -184,9 +184,10 @@ struct mthca_icm *mthca_alloc_icm(struct mthca_dev *dev, int npages, ...@@ -184,9 +184,10 @@ struct mthca_icm *mthca_alloc_icm(struct mthca_dev *dev, int npages,
if (coherent) if (coherent)
++chunk->nsg; ++chunk->nsg;
else if (chunk->npages == MTHCA_ICM_CHUNK_LEN) { else if (chunk->npages == MTHCA_ICM_CHUNK_LEN) {
chunk->nsg = pci_map_sg(dev->pdev, chunk->mem, chunk->nsg =
chunk->npages, dma_map_sg(&dev->pdev->dev, chunk->mem,
PCI_DMA_BIDIRECTIONAL); chunk->npages,
DMA_BIDIRECTIONAL);
if (chunk->nsg <= 0) if (chunk->nsg <= 0)
goto fail; goto fail;
...@@ -204,9 +205,8 @@ struct mthca_icm *mthca_alloc_icm(struct mthca_dev *dev, int npages, ...@@ -204,9 +205,8 @@ struct mthca_icm *mthca_alloc_icm(struct mthca_dev *dev, int npages,
} }
if (!coherent && chunk) { if (!coherent && chunk) {
chunk->nsg = pci_map_sg(dev->pdev, chunk->mem, chunk->nsg = dma_map_sg(&dev->pdev->dev, chunk->mem,
chunk->npages, chunk->npages, DMA_BIDIRECTIONAL);
PCI_DMA_BIDIRECTIONAL);
if (chunk->nsg <= 0) if (chunk->nsg <= 0)
goto fail; goto fail;
...@@ -480,7 +480,8 @@ int mthca_map_user_db(struct mthca_dev *dev, struct mthca_uar *uar, ...@@ -480,7 +480,8 @@ int mthca_map_user_db(struct mthca_dev *dev, struct mthca_uar *uar,
sg_set_page(&db_tab->page[i].mem, pages[0], MTHCA_ICM_PAGE_SIZE, sg_set_page(&db_tab->page[i].mem, pages[0], MTHCA_ICM_PAGE_SIZE,
uaddr & ~PAGE_MASK); uaddr & ~PAGE_MASK);
ret = pci_map_sg(dev->pdev, &db_tab->page[i].mem, 1, PCI_DMA_TODEVICE); ret = dma_map_sg(&dev->pdev->dev, &db_tab->page[i].mem, 1,
DMA_TO_DEVICE);
if (ret < 0) { if (ret < 0) {
unpin_user_page(pages[0]); unpin_user_page(pages[0]);
goto out; goto out;
...@@ -489,7 +490,8 @@ int mthca_map_user_db(struct mthca_dev *dev, struct mthca_uar *uar, ...@@ -489,7 +490,8 @@ int mthca_map_user_db(struct mthca_dev *dev, struct mthca_uar *uar,
ret = mthca_MAP_ICM_page(dev, sg_dma_address(&db_tab->page[i].mem), ret = mthca_MAP_ICM_page(dev, sg_dma_address(&db_tab->page[i].mem),
mthca_uarc_virt(dev, uar, i)); mthca_uarc_virt(dev, uar, i));
if (ret) { if (ret) {
pci_unmap_sg(dev->pdev, &db_tab->page[i].mem, 1, PCI_DMA_TODEVICE); dma_unmap_sg(&dev->pdev->dev, &db_tab->page[i].mem, 1,
DMA_TO_DEVICE);
unpin_user_page(sg_page(&db_tab->page[i].mem)); unpin_user_page(sg_page(&db_tab->page[i].mem));
goto out; goto out;
} }
...@@ -555,7 +557,8 @@ void mthca_cleanup_user_db_tab(struct mthca_dev *dev, struct mthca_uar *uar, ...@@ -555,7 +557,8 @@ void mthca_cleanup_user_db_tab(struct mthca_dev *dev, struct mthca_uar *uar,
for (i = 0; i < dev->uar_table.uarc_size / MTHCA_ICM_PAGE_SIZE; ++i) { for (i = 0; i < dev->uar_table.uarc_size / MTHCA_ICM_PAGE_SIZE; ++i) {
if (db_tab->page[i].uvirt) { if (db_tab->page[i].uvirt) {
mthca_UNMAP_ICM(dev, mthca_uarc_virt(dev, uar, i), 1); mthca_UNMAP_ICM(dev, mthca_uarc_virt(dev, uar, i), 1);
pci_unmap_sg(dev->pdev, &db_tab->page[i].mem, 1, PCI_DMA_TODEVICE); dma_unmap_sg(&dev->pdev->dev, &db_tab->page[i].mem, 1,
DMA_TO_DEVICE);
unpin_user_page(sg_page(&db_tab->page[i].mem)); unpin_user_page(sg_page(&db_tab->page[i].mem));
} }
} }
......
...@@ -429,8 +429,8 @@ static int qib_tid_update(struct qib_ctxtdata *rcd, struct file *fp, ...@@ -429,8 +429,8 @@ static int qib_tid_update(struct qib_ctxtdata *rcd, struct file *fp,
dd->f_put_tid(dd, &tidbase[tid], dd->f_put_tid(dd, &tidbase[tid],
RCVHQ_RCV_TYPE_EXPECTED, RCVHQ_RCV_TYPE_EXPECTED,
dd->tidinvalid); dd->tidinvalid);
pci_unmap_page(dd->pcidev, phys, PAGE_SIZE, dma_unmap_page(&dd->pcidev->dev, phys,
PCI_DMA_FROMDEVICE); PAGE_SIZE, DMA_FROM_DEVICE);
dd->pageshadow[ctxttid + tid] = NULL; dd->pageshadow[ctxttid + tid] = NULL;
} }
} }
...@@ -544,8 +544,8 @@ static int qib_tid_free(struct qib_ctxtdata *rcd, unsigned subctxt, ...@@ -544,8 +544,8 @@ static int qib_tid_free(struct qib_ctxtdata *rcd, unsigned subctxt,
*/ */
dd->f_put_tid(dd, &tidbase[tid], dd->f_put_tid(dd, &tidbase[tid],
RCVHQ_RCV_TYPE_EXPECTED, dd->tidinvalid); RCVHQ_RCV_TYPE_EXPECTED, dd->tidinvalid);
pci_unmap_page(dd->pcidev, phys, PAGE_SIZE, dma_unmap_page(&dd->pcidev->dev, phys, PAGE_SIZE,
PCI_DMA_FROMDEVICE); DMA_FROM_DEVICE);
qib_release_user_pages(&p, 1); qib_release_user_pages(&p, 1);
} }
} }
...@@ -1781,8 +1781,8 @@ static void unlock_expected_tids(struct qib_ctxtdata *rcd) ...@@ -1781,8 +1781,8 @@ static void unlock_expected_tids(struct qib_ctxtdata *rcd)
phys = dd->physshadow[i]; phys = dd->physshadow[i];
dd->physshadow[i] = dd->tidinvalid; dd->physshadow[i] = dd->tidinvalid;
dd->pageshadow[i] = NULL; dd->pageshadow[i] = NULL;
pci_unmap_page(dd->pcidev, phys, PAGE_SIZE, dma_unmap_page(&dd->pcidev->dev, phys, PAGE_SIZE,
PCI_DMA_FROMDEVICE); DMA_FROM_DEVICE);
qib_release_user_pages(&p, 1); qib_release_user_pages(&p, 1);
cnt++; cnt++;
} }
......
...@@ -1335,8 +1335,8 @@ static void cleanup_device_data(struct qib_devdata *dd) ...@@ -1335,8 +1335,8 @@ static void cleanup_device_data(struct qib_devdata *dd)
for (i = ctxt_tidbase; i < maxtid; i++) { for (i = ctxt_tidbase; i < maxtid; i++) {
if (!tmpp[i]) if (!tmpp[i])
continue; continue;
pci_unmap_page(dd->pcidev, tmpd[i], dma_unmap_page(&dd->pcidev->dev, tmpd[i],
PAGE_SIZE, PCI_DMA_FROMDEVICE); PAGE_SIZE, DMA_FROM_DEVICE);
qib_release_user_pages(&tmpp[i], 1); qib_release_user_pages(&tmpp[i], 1);
tmpp[i] = NULL; tmpp[i] = NULL;
} }
......
...@@ -60,15 +60,15 @@ int qib_map_page(struct pci_dev *hwdev, struct page *page, dma_addr_t *daddr) ...@@ -60,15 +60,15 @@ int qib_map_page(struct pci_dev *hwdev, struct page *page, dma_addr_t *daddr)
{ {
dma_addr_t phys; dma_addr_t phys;
phys = pci_map_page(hwdev, page, 0, PAGE_SIZE, PCI_DMA_FROMDEVICE); phys = dma_map_page(&hwdev->dev, page, 0, PAGE_SIZE, DMA_FROM_DEVICE);
if (pci_dma_mapping_error(hwdev, phys)) if (dma_mapping_error(&hwdev->dev, phys))
return -ENOMEM; return -ENOMEM;
if (!phys) { if (!phys) {
pci_unmap_page(hwdev, phys, PAGE_SIZE, PCI_DMA_FROMDEVICE); dma_unmap_page(&hwdev->dev, phys, PAGE_SIZE, DMA_FROM_DEVICE);
phys = pci_map_page(hwdev, page, 0, PAGE_SIZE, phys = dma_map_page(&hwdev->dev, page, 0, PAGE_SIZE,
PCI_DMA_FROMDEVICE); DMA_FROM_DEVICE);
if (pci_dma_mapping_error(hwdev, phys)) if (dma_mapping_error(&hwdev->dev, phys))
return -ENOMEM; return -ENOMEM;
/* /*
* FIXME: If we get 0 again, we should keep this page, * FIXME: If we get 0 again, we should keep this page,
......
...@@ -811,18 +811,10 @@ static int pvrdma_pci_probe(struct pci_dev *pdev, ...@@ -811,18 +811,10 @@ static int pvrdma_pci_probe(struct pci_dev *pdev,
} }
/* Enable 64-Bit DMA */ /* Enable 64-Bit DMA */
if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) == 0) { if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)) != 0) {
ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); ret = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
if (ret != 0) { if (ret != 0) {
dev_err(&pdev->dev, dev_err(&pdev->dev, "dma_set_mask failed\n");
"pci_set_consistent_dma_mask failed\n");
goto err_free_resource;
}
} else {
ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
if (ret != 0) {
dev_err(&pdev->dev,
"pci_set_dma_mask failed\n");
goto err_free_resource; goto err_free_resource;
} }
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment