Commit 1527106f authored by Ralph Campbell's avatar Ralph Campbell Committed by Roland Dreier

IB/core: Use the new verbs DMA mapping functions

Convert code in core/ to use the new DMA mapping functions for kernel
verbs consumers.
Signed-off-by: default avatarRalph Campbell <ralph.campbell@qlogic.com>
Signed-off-by: default avatarRoland Dreier <rolandd@cisco.com>
parent f2cbb660
......@@ -998,17 +998,17 @@ int ib_send_mad(struct ib_mad_send_wr_private *mad_send_wr)
mad_agent = mad_send_wr->send_buf.mad_agent;
sge = mad_send_wr->sg_list;
sge[0].addr = dma_map_single(mad_agent->device->dma_device,
sge[0].addr = ib_dma_map_single(mad_agent->device,
mad_send_wr->send_buf.mad,
sge[0].length,
DMA_TO_DEVICE);
pci_unmap_addr_set(mad_send_wr, header_mapping, sge[0].addr);
mad_send_wr->header_mapping = sge[0].addr;
sge[1].addr = dma_map_single(mad_agent->device->dma_device,
sge[1].addr = ib_dma_map_single(mad_agent->device,
ib_get_payload(mad_send_wr),
sge[1].length,
DMA_TO_DEVICE);
pci_unmap_addr_set(mad_send_wr, payload_mapping, sge[1].addr);
mad_send_wr->payload_mapping = sge[1].addr;
spin_lock_irqsave(&qp_info->send_queue.lock, flags);
if (qp_info->send_queue.count < qp_info->send_queue.max_active) {
......@@ -1026,11 +1026,11 @@ int ib_send_mad(struct ib_mad_send_wr_private *mad_send_wr)
}
spin_unlock_irqrestore(&qp_info->send_queue.lock, flags);
if (ret) {
dma_unmap_single(mad_agent->device->dma_device,
pci_unmap_addr(mad_send_wr, header_mapping),
ib_dma_unmap_single(mad_agent->device,
mad_send_wr->header_mapping,
sge[0].length, DMA_TO_DEVICE);
dma_unmap_single(mad_agent->device->dma_device,
pci_unmap_addr(mad_send_wr, payload_mapping),
ib_dma_unmap_single(mad_agent->device,
mad_send_wr->payload_mapping,
sge[1].length, DMA_TO_DEVICE);
}
return ret;
......@@ -1850,8 +1850,8 @@ static void ib_mad_recv_done_handler(struct ib_mad_port_private *port_priv,
mad_priv_hdr = container_of(mad_list, struct ib_mad_private_header,
mad_list);
recv = container_of(mad_priv_hdr, struct ib_mad_private, header);
dma_unmap_single(port_priv->device->dma_device,
pci_unmap_addr(&recv->header, mapping),
ib_dma_unmap_single(port_priv->device,
recv->header.mapping,
sizeof(struct ib_mad_private) -
sizeof(struct ib_mad_private_header),
DMA_FROM_DEVICE);
......@@ -2080,11 +2080,11 @@ static void ib_mad_send_done_handler(struct ib_mad_port_private *port_priv,
qp_info = send_queue->qp_info;
retry:
dma_unmap_single(mad_send_wr->send_buf.mad_agent->device->dma_device,
pci_unmap_addr(mad_send_wr, header_mapping),
ib_dma_unmap_single(mad_send_wr->send_buf.mad_agent->device,
mad_send_wr->header_mapping,
mad_send_wr->sg_list[0].length, DMA_TO_DEVICE);
dma_unmap_single(mad_send_wr->send_buf.mad_agent->device->dma_device,
pci_unmap_addr(mad_send_wr, payload_mapping),
ib_dma_unmap_single(mad_send_wr->send_buf.mad_agent->device,
mad_send_wr->payload_mapping,
mad_send_wr->sg_list[1].length, DMA_TO_DEVICE);
queued_send_wr = NULL;
spin_lock_irqsave(&send_queue->lock, flags);
......@@ -2528,13 +2528,12 @@ static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info,
break;
}
}
sg_list.addr = dma_map_single(qp_info->port_priv->
device->dma_device,
sg_list.addr = ib_dma_map_single(qp_info->port_priv->device,
&mad_priv->grh,
sizeof *mad_priv -
sizeof mad_priv->header,
DMA_FROM_DEVICE);
pci_unmap_addr_set(&mad_priv->header, mapping, sg_list.addr);
mad_priv->header.mapping = sg_list.addr;
recv_wr.wr_id = (unsigned long)&mad_priv->header.mad_list;
mad_priv->header.mad_list.mad_queue = recv_queue;
......@@ -2549,9 +2548,8 @@ static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info,
list_del(&mad_priv->header.mad_list.list);
recv_queue->count--;
spin_unlock_irqrestore(&recv_queue->lock, flags);
dma_unmap_single(qp_info->port_priv->device->dma_device,
pci_unmap_addr(&mad_priv->header,
mapping),
ib_dma_unmap_single(qp_info->port_priv->device,
mad_priv->header.mapping,
sizeof *mad_priv -
sizeof mad_priv->header,
DMA_FROM_DEVICE);
......@@ -2586,8 +2584,8 @@ static void cleanup_recv_queue(struct ib_mad_qp_info *qp_info)
/* Remove from posted receive MAD list */
list_del(&mad_list->list);
dma_unmap_single(qp_info->port_priv->device->dma_device,
pci_unmap_addr(&recv->header, mapping),
ib_dma_unmap_single(qp_info->port_priv->device,
recv->header.mapping,
sizeof(struct ib_mad_private) -
sizeof(struct ib_mad_private_header),
DMA_FROM_DEVICE);
......
......@@ -73,7 +73,7 @@ struct ib_mad_private_header {
struct ib_mad_list_head mad_list;
struct ib_mad_recv_wc recv_wc;
struct ib_wc wc;
DECLARE_PCI_UNMAP_ADDR(mapping)
u64 mapping;
} __attribute__ ((packed));
struct ib_mad_private {
......@@ -126,8 +126,8 @@ struct ib_mad_send_wr_private {
struct list_head agent_list;
struct ib_mad_agent_private *mad_agent_priv;
struct ib_mad_send_buf send_buf;
DECLARE_PCI_UNMAP_ADDR(header_mapping)
DECLARE_PCI_UNMAP_ADDR(payload_mapping)
u64 header_mapping;
u64 payload_mapping;
struct ib_send_wr send_wr;
struct ib_sge sg_list[IB_MAD_SEND_REQ_MAX_SG];
__be64 tid;
......
......@@ -52,7 +52,7 @@ static void __ib_umem_release(struct ib_device *dev, struct ib_umem *umem, int d
int i;
list_for_each_entry_safe(chunk, tmp, &umem->chunk_list, list) {
dma_unmap_sg(dev->dma_device, chunk->page_list,
ib_dma_unmap_sg(dev, chunk->page_list,
chunk->nents, DMA_BIDIRECTIONAL);
for (i = 0; i < chunk->nents; ++i) {
if (umem->writable && dirty)
......@@ -136,7 +136,7 @@ int ib_umem_get(struct ib_device *dev, struct ib_umem *mem,
chunk->page_list[i].length = PAGE_SIZE;
}
chunk->nmap = dma_map_sg(dev->dma_device,
chunk->nmap = ib_dma_map_sg(dev,
&chunk->page_list[0],
chunk->nents,
DMA_BIDIRECTIONAL);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment