Commit 7f9b0f77 authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Martin K. Petersen

scsi: fnic: switch to generic DMA API

Switch from the legacy PCI DMA API to the generic DMA API.
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Reviewed-by: default avatarJohannes Thumshirn <jthumshirn@suse.de>
Signed-off-by: default avatarMartin K. Petersen <martin.petersen@oracle.com>
parent c22b332d
...@@ -836,8 +836,8 @@ static void fnic_rq_cmpl_frame_recv(struct vnic_rq *rq, struct cq_desc ...@@ -836,8 +836,8 @@ static void fnic_rq_cmpl_frame_recv(struct vnic_rq *rq, struct cq_desc
u32 fcp_bytes_written = 0; u32 fcp_bytes_written = 0;
unsigned long flags; unsigned long flags;
pci_unmap_single(fnic->pdev, buf->dma_addr, buf->len, dma_unmap_single(&fnic->pdev->dev, buf->dma_addr, buf->len,
PCI_DMA_FROMDEVICE); DMA_FROM_DEVICE);
skb = buf->os_buf; skb = buf->os_buf;
fp = (struct fc_frame *)skb; fp = (struct fc_frame *)skb;
buf->os_buf = NULL; buf->os_buf = NULL;
...@@ -977,9 +977,8 @@ int fnic_alloc_rq_frame(struct vnic_rq *rq) ...@@ -977,9 +977,8 @@ int fnic_alloc_rq_frame(struct vnic_rq *rq)
skb_reset_transport_header(skb); skb_reset_transport_header(skb);
skb_reset_network_header(skb); skb_reset_network_header(skb);
skb_put(skb, len); skb_put(skb, len);
pa = pci_map_single(fnic->pdev, skb->data, len, PCI_DMA_FROMDEVICE); pa = dma_map_single(&fnic->pdev->dev, skb->data, len, DMA_FROM_DEVICE);
if (dma_mapping_error(&fnic->pdev->dev, pa)) {
if (pci_dma_mapping_error(fnic->pdev, pa)) {
r = -ENOMEM; r = -ENOMEM;
printk(KERN_ERR "PCI mapping failed with error %d\n", r); printk(KERN_ERR "PCI mapping failed with error %d\n", r);
goto free_skb; goto free_skb;
...@@ -998,8 +997,8 @@ void fnic_free_rq_buf(struct vnic_rq *rq, struct vnic_rq_buf *buf) ...@@ -998,8 +997,8 @@ void fnic_free_rq_buf(struct vnic_rq *rq, struct vnic_rq_buf *buf)
struct fc_frame *fp = buf->os_buf; struct fc_frame *fp = buf->os_buf;
struct fnic *fnic = vnic_dev_priv(rq->vdev); struct fnic *fnic = vnic_dev_priv(rq->vdev);
pci_unmap_single(fnic->pdev, buf->dma_addr, buf->len, dma_unmap_single(&fnic->pdev->dev, buf->dma_addr, buf->len,
PCI_DMA_FROMDEVICE); DMA_FROM_DEVICE);
dev_kfree_skb(fp_skb(fp)); dev_kfree_skb(fp_skb(fp));
buf->os_buf = NULL; buf->os_buf = NULL;
...@@ -1018,7 +1017,6 @@ void fnic_eth_send(struct fcoe_ctlr *fip, struct sk_buff *skb) ...@@ -1018,7 +1017,6 @@ void fnic_eth_send(struct fcoe_ctlr *fip, struct sk_buff *skb)
struct ethhdr *eth_hdr; struct ethhdr *eth_hdr;
struct vlan_ethhdr *vlan_hdr; struct vlan_ethhdr *vlan_hdr;
unsigned long flags; unsigned long flags;
int r;
if (!fnic->vlan_hw_insert) { if (!fnic->vlan_hw_insert) {
eth_hdr = (struct ethhdr *)skb_mac_header(skb); eth_hdr = (struct ethhdr *)skb_mac_header(skb);
...@@ -1038,11 +1036,10 @@ void fnic_eth_send(struct fcoe_ctlr *fip, struct sk_buff *skb) ...@@ -1038,11 +1036,10 @@ void fnic_eth_send(struct fcoe_ctlr *fip, struct sk_buff *skb)
} }
} }
pa = pci_map_single(fnic->pdev, skb->data, skb->len, PCI_DMA_TODEVICE); pa = dma_map_single(&fnic->pdev->dev, skb->data, skb->len,
DMA_TO_DEVICE);
r = pci_dma_mapping_error(fnic->pdev, pa); if (dma_mapping_error(&fnic->pdev->dev, pa)) {
if (r) { printk(KERN_ERR "DMA mapping failed\n");
printk(KERN_ERR "PCI mapping failed with error %d\n", r);
goto free_skb; goto free_skb;
} }
...@@ -1058,7 +1055,7 @@ void fnic_eth_send(struct fcoe_ctlr *fip, struct sk_buff *skb) ...@@ -1058,7 +1055,7 @@ void fnic_eth_send(struct fcoe_ctlr *fip, struct sk_buff *skb)
irq_restore: irq_restore:
spin_unlock_irqrestore(&fnic->wq_lock[0], flags); spin_unlock_irqrestore(&fnic->wq_lock[0], flags);
pci_unmap_single(fnic->pdev, pa, skb->len, PCI_DMA_TODEVICE); dma_unmap_single(&fnic->pdev->dev, pa, skb->len, DMA_TO_DEVICE);
free_skb: free_skb:
kfree_skb(skb); kfree_skb(skb);
} }
...@@ -1115,9 +1112,8 @@ static int fnic_send_frame(struct fnic *fnic, struct fc_frame *fp) ...@@ -1115,9 +1112,8 @@ static int fnic_send_frame(struct fnic *fnic, struct fc_frame *fp)
if (FC_FCOE_VER) if (FC_FCOE_VER)
FC_FCOE_ENCAPS_VER(fcoe_hdr, FC_FCOE_VER); FC_FCOE_ENCAPS_VER(fcoe_hdr, FC_FCOE_VER);
pa = pci_map_single(fnic->pdev, eth_hdr, tot_len, PCI_DMA_TODEVICE); pa = dma_map_single(&fnic->pdev->dev, eth_hdr, tot_len, DMA_TO_DEVICE);
if (dma_mapping_error(&fnic->pdev->dev, pa)) {
if (pci_dma_mapping_error(fnic->pdev, pa)) {
ret = -ENOMEM; ret = -ENOMEM;
printk(KERN_ERR "DMA map failed with error %d\n", ret); printk(KERN_ERR "DMA map failed with error %d\n", ret);
goto free_skb_on_err; goto free_skb_on_err;
...@@ -1131,8 +1127,7 @@ static int fnic_send_frame(struct fnic *fnic, struct fc_frame *fp) ...@@ -1131,8 +1127,7 @@ static int fnic_send_frame(struct fnic *fnic, struct fc_frame *fp)
spin_lock_irqsave(&fnic->wq_lock[0], flags); spin_lock_irqsave(&fnic->wq_lock[0], flags);
if (!vnic_wq_desc_avail(wq)) { if (!vnic_wq_desc_avail(wq)) {
pci_unmap_single(fnic->pdev, pa, dma_unmap_single(&fnic->pdev->dev, pa, tot_len, DMA_TO_DEVICE);
tot_len, PCI_DMA_TODEVICE);
ret = -1; ret = -1;
goto irq_restore; goto irq_restore;
} }
...@@ -1247,8 +1242,8 @@ static void fnic_wq_complete_frame_send(struct vnic_wq *wq, ...@@ -1247,8 +1242,8 @@ static void fnic_wq_complete_frame_send(struct vnic_wq *wq,
struct fc_frame *fp = (struct fc_frame *)skb; struct fc_frame *fp = (struct fc_frame *)skb;
struct fnic *fnic = vnic_dev_priv(wq->vdev); struct fnic *fnic = vnic_dev_priv(wq->vdev);
pci_unmap_single(fnic->pdev, buf->dma_addr, dma_unmap_single(&fnic->pdev->dev, buf->dma_addr, buf->len,
buf->len, PCI_DMA_TODEVICE); DMA_TO_DEVICE);
dev_kfree_skb_irq(fp_skb(fp)); dev_kfree_skb_irq(fp_skb(fp));
buf->os_buf = NULL; buf->os_buf = NULL;
} }
...@@ -1290,8 +1285,8 @@ void fnic_free_wq_buf(struct vnic_wq *wq, struct vnic_wq_buf *buf) ...@@ -1290,8 +1285,8 @@ void fnic_free_wq_buf(struct vnic_wq *wq, struct vnic_wq_buf *buf)
struct fc_frame *fp = buf->os_buf; struct fc_frame *fp = buf->os_buf;
struct fnic *fnic = vnic_dev_priv(wq->vdev); struct fnic *fnic = vnic_dev_priv(wq->vdev);
pci_unmap_single(fnic->pdev, buf->dma_addr, dma_unmap_single(&fnic->pdev->dev, buf->dma_addr, buf->len,
buf->len, PCI_DMA_TODEVICE); DMA_TO_DEVICE);
dev_kfree_skb(fp_skb(fp)); dev_kfree_skb(fp_skb(fp));
buf->os_buf = NULL; buf->os_buf = NULL;
......
...@@ -611,30 +611,15 @@ static int fnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) ...@@ -611,30 +611,15 @@ static int fnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
* limitation for the device. Try 64-bit first, and * limitation for the device. Try 64-bit first, and
* fail to 32-bit. * fail to 32-bit.
*/ */
err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
if (err) { if (err) {
err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
if (err) { if (err) {
shost_printk(KERN_ERR, fnic->lport->host, shost_printk(KERN_ERR, fnic->lport->host,
"No usable DMA configuration " "No usable DMA configuration "
"aborting\n"); "aborting\n");
goto err_out_release_regions; goto err_out_release_regions;
} }
err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
if (err) {
shost_printk(KERN_ERR, fnic->lport->host,
"Unable to obtain 32-bit DMA "
"for consistent allocations, aborting.\n");
goto err_out_release_regions;
}
} else {
err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
if (err) {
shost_printk(KERN_ERR, fnic->lport->host,
"Unable to obtain 64-bit DMA "
"for consistent allocations, aborting.\n");
goto err_out_release_regions;
}
} }
/* Map vNIC resources from BAR0 */ /* Map vNIC resources from BAR0 */
......
...@@ -126,17 +126,17 @@ static void fnic_release_ioreq_buf(struct fnic *fnic, ...@@ -126,17 +126,17 @@ static void fnic_release_ioreq_buf(struct fnic *fnic,
struct scsi_cmnd *sc) struct scsi_cmnd *sc)
{ {
if (io_req->sgl_list_pa) if (io_req->sgl_list_pa)
pci_unmap_single(fnic->pdev, io_req->sgl_list_pa, dma_unmap_single(&fnic->pdev->dev, io_req->sgl_list_pa,
sizeof(io_req->sgl_list[0]) * io_req->sgl_cnt, sizeof(io_req->sgl_list[0]) * io_req->sgl_cnt,
PCI_DMA_TODEVICE); DMA_TO_DEVICE);
scsi_dma_unmap(sc); scsi_dma_unmap(sc);
if (io_req->sgl_cnt) if (io_req->sgl_cnt)
mempool_free(io_req->sgl_list_alloc, mempool_free(io_req->sgl_list_alloc,
fnic->io_sgl_pool[io_req->sgl_type]); fnic->io_sgl_pool[io_req->sgl_type]);
if (io_req->sense_buf_pa) if (io_req->sense_buf_pa)
pci_unmap_single(fnic->pdev, io_req->sense_buf_pa, dma_unmap_single(&fnic->pdev->dev, io_req->sense_buf_pa,
SCSI_SENSE_BUFFERSIZE, PCI_DMA_FROMDEVICE); SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE);
} }
/* Free up Copy Wq descriptors. Called with copy_wq lock held */ /* Free up Copy Wq descriptors. Called with copy_wq lock held */
...@@ -330,7 +330,6 @@ static inline int fnic_queue_wq_copy_desc(struct fnic *fnic, ...@@ -330,7 +330,6 @@ static inline int fnic_queue_wq_copy_desc(struct fnic *fnic,
int flags; int flags;
u8 exch_flags; u8 exch_flags;
struct scsi_lun fc_lun; struct scsi_lun fc_lun;
int r;
if (sg_count) { if (sg_count) {
/* For each SGE, create a device desc entry */ /* For each SGE, create a device desc entry */
...@@ -342,30 +341,25 @@ static inline int fnic_queue_wq_copy_desc(struct fnic *fnic, ...@@ -342,30 +341,25 @@ static inline int fnic_queue_wq_copy_desc(struct fnic *fnic,
desc++; desc++;
} }
io_req->sgl_list_pa = pci_map_single io_req->sgl_list_pa = dma_map_single(&fnic->pdev->dev,
(fnic->pdev,
io_req->sgl_list, io_req->sgl_list,
sizeof(io_req->sgl_list[0]) * sg_count, sizeof(io_req->sgl_list[0]) * sg_count,
PCI_DMA_TODEVICE); DMA_TO_DEVICE);
if (dma_mapping_error(&fnic->pdev->dev, io_req->sgl_list_pa)) {
r = pci_dma_mapping_error(fnic->pdev, io_req->sgl_list_pa); printk(KERN_ERR "DMA mapping failed\n");
if (r) {
printk(KERN_ERR "PCI mapping failed with error %d\n", r);
return SCSI_MLQUEUE_HOST_BUSY; return SCSI_MLQUEUE_HOST_BUSY;
} }
} }
io_req->sense_buf_pa = pci_map_single(fnic->pdev, io_req->sense_buf_pa = dma_map_single(&fnic->pdev->dev,
sc->sense_buffer, sc->sense_buffer,
SCSI_SENSE_BUFFERSIZE, SCSI_SENSE_BUFFERSIZE,
PCI_DMA_FROMDEVICE); DMA_FROM_DEVICE);
if (dma_mapping_error(&fnic->pdev->dev, io_req->sense_buf_pa)) {
r = pci_dma_mapping_error(fnic->pdev, io_req->sense_buf_pa); dma_unmap_single(&fnic->pdev->dev, io_req->sgl_list_pa,
if (r) {
pci_unmap_single(fnic->pdev, io_req->sgl_list_pa,
sizeof(io_req->sgl_list[0]) * sg_count, sizeof(io_req->sgl_list[0]) * sg_count,
PCI_DMA_TODEVICE); DMA_TO_DEVICE);
printk(KERN_ERR "PCI mapping failed with error %d\n", r); printk(KERN_ERR "DMA mapping failed\n");
return SCSI_MLQUEUE_HOST_BUSY; return SCSI_MLQUEUE_HOST_BUSY;
} }
......
...@@ -195,9 +195,9 @@ int vnic_dev_alloc_desc_ring(struct vnic_dev *vdev, struct vnic_dev_ring *ring, ...@@ -195,9 +195,9 @@ int vnic_dev_alloc_desc_ring(struct vnic_dev *vdev, struct vnic_dev_ring *ring,
{ {
vnic_dev_desc_ring_size(ring, desc_count, desc_size); vnic_dev_desc_ring_size(ring, desc_count, desc_size);
ring->descs_unaligned = pci_alloc_consistent(vdev->pdev, ring->descs_unaligned = dma_alloc_coherent(&vdev->pdev->dev,
ring->size_unaligned, ring->size_unaligned,
&ring->base_addr_unaligned); &ring->base_addr_unaligned, GFP_KERNEL);
if (!ring->descs_unaligned) { if (!ring->descs_unaligned) {
printk(KERN_ERR printk(KERN_ERR
...@@ -221,7 +221,7 @@ int vnic_dev_alloc_desc_ring(struct vnic_dev *vdev, struct vnic_dev_ring *ring, ...@@ -221,7 +221,7 @@ int vnic_dev_alloc_desc_ring(struct vnic_dev *vdev, struct vnic_dev_ring *ring,
void vnic_dev_free_desc_ring(struct vnic_dev *vdev, struct vnic_dev_ring *ring) void vnic_dev_free_desc_ring(struct vnic_dev *vdev, struct vnic_dev_ring *ring)
{ {
if (ring->descs) { if (ring->descs) {
pci_free_consistent(vdev->pdev, dma_free_coherent(&vdev->pdev->dev,
ring->size_unaligned, ring->size_unaligned,
ring->descs_unaligned, ring->descs_unaligned,
ring->base_addr_unaligned); ring->base_addr_unaligned);
...@@ -298,9 +298,9 @@ int vnic_dev_fw_info(struct vnic_dev *vdev, ...@@ -298,9 +298,9 @@ int vnic_dev_fw_info(struct vnic_dev *vdev,
int err = 0; int err = 0;
if (!vdev->fw_info) { if (!vdev->fw_info) {
vdev->fw_info = pci_alloc_consistent(vdev->pdev, vdev->fw_info = dma_alloc_coherent(&vdev->pdev->dev,
sizeof(struct vnic_devcmd_fw_info), sizeof(struct vnic_devcmd_fw_info),
&vdev->fw_info_pa); &vdev->fw_info_pa, GFP_KERNEL);
if (!vdev->fw_info) if (!vdev->fw_info)
return -ENOMEM; return -ENOMEM;
...@@ -361,8 +361,8 @@ int vnic_dev_stats_dump(struct vnic_dev *vdev, struct vnic_stats **stats) ...@@ -361,8 +361,8 @@ int vnic_dev_stats_dump(struct vnic_dev *vdev, struct vnic_stats **stats)
int wait = 1000; int wait = 1000;
if (!vdev->stats) { if (!vdev->stats) {
vdev->stats = pci_alloc_consistent(vdev->pdev, vdev->stats = dma_alloc_coherent(&vdev->pdev->dev,
sizeof(struct vnic_stats), &vdev->stats_pa); sizeof(struct vnic_stats), &vdev->stats_pa, GFP_KERNEL);
if (!vdev->stats) if (!vdev->stats)
return -ENOMEM; return -ENOMEM;
} }
...@@ -523,9 +523,9 @@ int vnic_dev_notify_set(struct vnic_dev *vdev, u16 intr) ...@@ -523,9 +523,9 @@ int vnic_dev_notify_set(struct vnic_dev *vdev, u16 intr)
int wait = 1000; int wait = 1000;
if (!vdev->notify) { if (!vdev->notify) {
vdev->notify = pci_alloc_consistent(vdev->pdev, vdev->notify = dma_alloc_coherent(&vdev->pdev->dev,
sizeof(struct vnic_devcmd_notify), sizeof(struct vnic_devcmd_notify),
&vdev->notify_pa); &vdev->notify_pa, GFP_KERNEL);
if (!vdev->notify) if (!vdev->notify)
return -ENOMEM; return -ENOMEM;
} }
...@@ -647,21 +647,21 @@ void vnic_dev_unregister(struct vnic_dev *vdev) ...@@ -647,21 +647,21 @@ void vnic_dev_unregister(struct vnic_dev *vdev)
{ {
if (vdev) { if (vdev) {
if (vdev->notify) if (vdev->notify)
pci_free_consistent(vdev->pdev, dma_free_coherent(&vdev->pdev->dev,
sizeof(struct vnic_devcmd_notify), sizeof(struct vnic_devcmd_notify),
vdev->notify, vdev->notify,
vdev->notify_pa); vdev->notify_pa);
if (vdev->linkstatus) if (vdev->linkstatus)
pci_free_consistent(vdev->pdev, dma_free_coherent(&vdev->pdev->dev,
sizeof(u32), sizeof(u32),
vdev->linkstatus, vdev->linkstatus,
vdev->linkstatus_pa); vdev->linkstatus_pa);
if (vdev->stats) if (vdev->stats)
pci_free_consistent(vdev->pdev, dma_free_coherent(&vdev->pdev->dev,
sizeof(struct vnic_stats), sizeof(struct vnic_stats),
vdev->stats, vdev->stats_pa); vdev->stats, vdev->stats_pa);
if (vdev->fw_info) if (vdev->fw_info)
pci_free_consistent(vdev->pdev, dma_free_coherent(&vdev->pdev->dev,
sizeof(struct vnic_devcmd_fw_info), sizeof(struct vnic_devcmd_fw_info),
vdev->fw_info, vdev->fw_info_pa); vdev->fw_info, vdev->fw_info_pa);
kfree(vdev); kfree(vdev);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment