Commit 106f6442 authored by Stephen Rothwell's avatar Stephen Rothwell Committed by Linus Torvalds

[PATCH] ppc64: replace last usage of vio dma mapping routines

This patch just replaces the last usage of the vio dma mapping routines
with the equivalent generic dma mapping routines.
Signed-off-by: default avatarStephen Rothwell <sfr@canb.auug.org.au>
Signed-off-by: default avatarPaul Mackerras <paulus@samba.org>
Signed-off-by: default avatarAndrew Morton <akpm@osdl.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@osdl.org>
parent 4c8b7b25
...@@ -218,7 +218,8 @@ static void ibmveth_replenish_buffer_pool(struct ibmveth_adapter *adapter, struc ...@@ -218,7 +218,8 @@ static void ibmveth_replenish_buffer_pool(struct ibmveth_adapter *adapter, struc
ibmveth_assert(index != IBM_VETH_INVALID_MAP); ibmveth_assert(index != IBM_VETH_INVALID_MAP);
ibmveth_assert(pool->skbuff[index] == NULL); ibmveth_assert(pool->skbuff[index] == NULL);
dma_addr = vio_map_single(adapter->vdev, skb->data, pool->buff_size, DMA_FROM_DEVICE); dma_addr = dma_map_single(&adapter->vdev->dev, skb->data,
pool->buff_size, DMA_FROM_DEVICE);
pool->free_map[free_index] = IBM_VETH_INVALID_MAP; pool->free_map[free_index] = IBM_VETH_INVALID_MAP;
pool->dma_addr[index] = dma_addr; pool->dma_addr[index] = dma_addr;
...@@ -238,7 +239,9 @@ static void ibmveth_replenish_buffer_pool(struct ibmveth_adapter *adapter, struc ...@@ -238,7 +239,9 @@ static void ibmveth_replenish_buffer_pool(struct ibmveth_adapter *adapter, struc
pool->free_map[free_index] = IBM_VETH_INVALID_MAP; pool->free_map[free_index] = IBM_VETH_INVALID_MAP;
pool->skbuff[index] = NULL; pool->skbuff[index] = NULL;
pool->consumer_index--; pool->consumer_index--;
vio_unmap_single(adapter->vdev, pool->dma_addr[index], pool->buff_size, DMA_FROM_DEVICE); dma_unmap_single(&adapter->vdev->dev,
pool->dma_addr[index], pool->buff_size,
DMA_FROM_DEVICE);
dev_kfree_skb_any(skb); dev_kfree_skb_any(skb);
adapter->replenish_add_buff_failure++; adapter->replenish_add_buff_failure++;
break; break;
...@@ -299,7 +302,7 @@ static void ibmveth_free_buffer_pool(struct ibmveth_adapter *adapter, struct ibm ...@@ -299,7 +302,7 @@ static void ibmveth_free_buffer_pool(struct ibmveth_adapter *adapter, struct ibm
for(i = 0; i < pool->size; ++i) { for(i = 0; i < pool->size; ++i) {
struct sk_buff *skb = pool->skbuff[i]; struct sk_buff *skb = pool->skbuff[i];
if(skb) { if(skb) {
vio_unmap_single(adapter->vdev, dma_unmap_single(&adapter->vdev->dev,
pool->dma_addr[i], pool->dma_addr[i],
pool->buff_size, pool->buff_size,
DMA_FROM_DEVICE); DMA_FROM_DEVICE);
...@@ -337,7 +340,7 @@ static void ibmveth_remove_buffer_from_pool(struct ibmveth_adapter *adapter, u64 ...@@ -337,7 +340,7 @@ static void ibmveth_remove_buffer_from_pool(struct ibmveth_adapter *adapter, u64
adapter->rx_buff_pool[pool].skbuff[index] = NULL; adapter->rx_buff_pool[pool].skbuff[index] = NULL;
vio_unmap_single(adapter->vdev, dma_unmap_single(&adapter->vdev->dev,
adapter->rx_buff_pool[pool].dma_addr[index], adapter->rx_buff_pool[pool].dma_addr[index],
adapter->rx_buff_pool[pool].buff_size, adapter->rx_buff_pool[pool].buff_size,
DMA_FROM_DEVICE); DMA_FROM_DEVICE);
...@@ -408,7 +411,9 @@ static void ibmveth_cleanup(struct ibmveth_adapter *adapter) ...@@ -408,7 +411,9 @@ static void ibmveth_cleanup(struct ibmveth_adapter *adapter)
{ {
if(adapter->buffer_list_addr != NULL) { if(adapter->buffer_list_addr != NULL) {
if(!dma_mapping_error(adapter->buffer_list_dma)) { if(!dma_mapping_error(adapter->buffer_list_dma)) {
vio_unmap_single(adapter->vdev, adapter->buffer_list_dma, 4096, DMA_BIDIRECTIONAL); dma_unmap_single(&adapter->vdev->dev,
adapter->buffer_list_dma, 4096,
DMA_BIDIRECTIONAL);
adapter->buffer_list_dma = DMA_ERROR_CODE; adapter->buffer_list_dma = DMA_ERROR_CODE;
} }
free_page((unsigned long)adapter->buffer_list_addr); free_page((unsigned long)adapter->buffer_list_addr);
...@@ -417,7 +422,9 @@ static void ibmveth_cleanup(struct ibmveth_adapter *adapter) ...@@ -417,7 +422,9 @@ static void ibmveth_cleanup(struct ibmveth_adapter *adapter)
if(adapter->filter_list_addr != NULL) { if(adapter->filter_list_addr != NULL) {
if(!dma_mapping_error(adapter->filter_list_dma)) { if(!dma_mapping_error(adapter->filter_list_dma)) {
vio_unmap_single(adapter->vdev, adapter->filter_list_dma, 4096, DMA_BIDIRECTIONAL); dma_unmap_single(&adapter->vdev->dev,
adapter->filter_list_dma, 4096,
DMA_BIDIRECTIONAL);
adapter->filter_list_dma = DMA_ERROR_CODE; adapter->filter_list_dma = DMA_ERROR_CODE;
} }
free_page((unsigned long)adapter->filter_list_addr); free_page((unsigned long)adapter->filter_list_addr);
...@@ -426,7 +433,10 @@ static void ibmveth_cleanup(struct ibmveth_adapter *adapter) ...@@ -426,7 +433,10 @@ static void ibmveth_cleanup(struct ibmveth_adapter *adapter)
if(adapter->rx_queue.queue_addr != NULL) { if(adapter->rx_queue.queue_addr != NULL) {
if(!dma_mapping_error(adapter->rx_queue.queue_dma)) { if(!dma_mapping_error(adapter->rx_queue.queue_dma)) {
vio_unmap_single(adapter->vdev, adapter->rx_queue.queue_dma, adapter->rx_queue.queue_len, DMA_BIDIRECTIONAL); dma_unmap_single(&adapter->vdev->dev,
adapter->rx_queue.queue_dma,
adapter->rx_queue.queue_len,
DMA_BIDIRECTIONAL);
adapter->rx_queue.queue_dma = DMA_ERROR_CODE; adapter->rx_queue.queue_dma = DMA_ERROR_CODE;
} }
kfree(adapter->rx_queue.queue_addr); kfree(adapter->rx_queue.queue_addr);
...@@ -472,9 +482,13 @@ static int ibmveth_open(struct net_device *netdev) ...@@ -472,9 +482,13 @@ static int ibmveth_open(struct net_device *netdev)
return -ENOMEM; return -ENOMEM;
} }
adapter->buffer_list_dma = vio_map_single(adapter->vdev, adapter->buffer_list_addr, 4096, DMA_BIDIRECTIONAL); adapter->buffer_list_dma = dma_map_single(&adapter->vdev->dev,
adapter->filter_list_dma = vio_map_single(adapter->vdev, adapter->filter_list_addr, 4096, DMA_BIDIRECTIONAL); adapter->buffer_list_addr, 4096, DMA_BIDIRECTIONAL);
adapter->rx_queue.queue_dma = vio_map_single(adapter->vdev, adapter->rx_queue.queue_addr, adapter->rx_queue.queue_len, DMA_BIDIRECTIONAL); adapter->filter_list_dma = dma_map_single(&adapter->vdev->dev,
adapter->filter_list_addr, 4096, DMA_BIDIRECTIONAL);
adapter->rx_queue.queue_dma = dma_map_single(&adapter->vdev->dev,
adapter->rx_queue.queue_addr,
adapter->rx_queue.queue_len, DMA_BIDIRECTIONAL);
if((dma_mapping_error(adapter->buffer_list_dma) ) || if((dma_mapping_error(adapter->buffer_list_dma) ) ||
(dma_mapping_error(adapter->filter_list_dma)) || (dma_mapping_error(adapter->filter_list_dma)) ||
...@@ -644,7 +658,7 @@ static int ibmveth_start_xmit(struct sk_buff *skb, struct net_device *netdev) ...@@ -644,7 +658,7 @@ static int ibmveth_start_xmit(struct sk_buff *skb, struct net_device *netdev)
/* map the initial fragment */ /* map the initial fragment */
desc[0].fields.length = nfrags ? skb->len - skb->data_len : skb->len; desc[0].fields.length = nfrags ? skb->len - skb->data_len : skb->len;
desc[0].fields.address = vio_map_single(adapter->vdev, skb->data, desc[0].fields.address = dma_map_single(&adapter->vdev->dev, skb->data,
desc[0].fields.length, DMA_TO_DEVICE); desc[0].fields.length, DMA_TO_DEVICE);
desc[0].fields.valid = 1; desc[0].fields.valid = 1;
...@@ -662,7 +676,7 @@ static int ibmveth_start_xmit(struct sk_buff *skb, struct net_device *netdev) ...@@ -662,7 +676,7 @@ static int ibmveth_start_xmit(struct sk_buff *skb, struct net_device *netdev)
while(curfrag--) { while(curfrag--) {
skb_frag_t *frag = &skb_shinfo(skb)->frags[curfrag]; skb_frag_t *frag = &skb_shinfo(skb)->frags[curfrag];
desc[curfrag+1].fields.address desc[curfrag+1].fields.address
= vio_map_single(adapter->vdev, = dma_map_single(&adapter->vdev->dev,
page_address(frag->page) + frag->page_offset, page_address(frag->page) + frag->page_offset,
frag->size, DMA_TO_DEVICE); frag->size, DMA_TO_DEVICE);
desc[curfrag+1].fields.length = frag->size; desc[curfrag+1].fields.length = frag->size;
...@@ -674,7 +688,7 @@ static int ibmveth_start_xmit(struct sk_buff *skb, struct net_device *netdev) ...@@ -674,7 +688,7 @@ static int ibmveth_start_xmit(struct sk_buff *skb, struct net_device *netdev)
adapter->stats.tx_dropped++; adapter->stats.tx_dropped++;
/* Free all the mappings we just created */ /* Free all the mappings we just created */
while(curfrag < nfrags) { while(curfrag < nfrags) {
vio_unmap_single(adapter->vdev, dma_unmap_single(&adapter->vdev->dev,
desc[curfrag+1].fields.address, desc[curfrag+1].fields.address,
desc[curfrag+1].fields.length, desc[curfrag+1].fields.length,
DMA_TO_DEVICE); DMA_TO_DEVICE);
...@@ -714,7 +728,9 @@ static int ibmveth_start_xmit(struct sk_buff *skb, struct net_device *netdev) ...@@ -714,7 +728,9 @@ static int ibmveth_start_xmit(struct sk_buff *skb, struct net_device *netdev)
} }
do { do {
vio_unmap_single(adapter->vdev, desc[nfrags].fields.address, desc[nfrags].fields.length, DMA_TO_DEVICE); dma_unmap_single(&adapter->vdev->dev,
desc[nfrags].fields.address,
desc[nfrags].fields.length, DMA_TO_DEVICE);
} while(--nfrags >= 0); } while(--nfrags >= 0);
dev_kfree_skb(skb); dev_kfree_skb(skb);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment