Commit 5ea74318 authored by Ivan Vecera's avatar Ivan Vecera Committed by David S. Miller

bna: use device model DMA API

Use DMA API as PCI equivalents will be deprecated.
Signed-off-by: default avatarIvan Vecera <ivecera@redhat.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 8fe73503
...@@ -126,22 +126,22 @@ bnad_free_all_txbufs(struct bnad *bnad, ...@@ -126,22 +126,22 @@ bnad_free_all_txbufs(struct bnad *bnad,
} }
unmap_array[unmap_cons].skb = NULL; unmap_array[unmap_cons].skb = NULL;
pci_unmap_single(bnad->pcidev, dma_unmap_single(&bnad->pcidev->dev,
pci_unmap_addr(&unmap_array[unmap_cons], dma_unmap_addr(&unmap_array[unmap_cons],
dma_addr), skb_headlen(skb), dma_addr), skb_headlen(skb),
PCI_DMA_TODEVICE); DMA_TO_DEVICE);
pci_unmap_addr_set(&unmap_array[unmap_cons], dma_addr, 0); dma_unmap_addr_set(&unmap_array[unmap_cons], dma_addr, 0);
if (++unmap_cons >= unmap_q->q_depth) if (++unmap_cons >= unmap_q->q_depth)
break; break;
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
pci_unmap_page(bnad->pcidev, dma_unmap_page(&bnad->pcidev->dev,
pci_unmap_addr(&unmap_array[unmap_cons], dma_unmap_addr(&unmap_array[unmap_cons],
dma_addr), dma_addr),
skb_shinfo(skb)->frags[i].size, skb_shinfo(skb)->frags[i].size,
PCI_DMA_TODEVICE); DMA_TO_DEVICE);
pci_unmap_addr_set(&unmap_array[unmap_cons], dma_addr, dma_unmap_addr_set(&unmap_array[unmap_cons], dma_addr,
0); 0);
if (++unmap_cons >= unmap_q->q_depth) if (++unmap_cons >= unmap_q->q_depth)
break; break;
...@@ -199,23 +199,23 @@ bnad_free_txbufs(struct bnad *bnad, ...@@ -199,23 +199,23 @@ bnad_free_txbufs(struct bnad *bnad,
sent_bytes += skb->len; sent_bytes += skb->len;
wis -= BNA_TXQ_WI_NEEDED(1 + skb_shinfo(skb)->nr_frags); wis -= BNA_TXQ_WI_NEEDED(1 + skb_shinfo(skb)->nr_frags);
pci_unmap_single(bnad->pcidev, dma_unmap_single(&bnad->pcidev->dev,
pci_unmap_addr(&unmap_array[unmap_cons], dma_unmap_addr(&unmap_array[unmap_cons],
dma_addr), skb_headlen(skb), dma_addr), skb_headlen(skb),
PCI_DMA_TODEVICE); DMA_TO_DEVICE);
pci_unmap_addr_set(&unmap_array[unmap_cons], dma_addr, 0); dma_unmap_addr_set(&unmap_array[unmap_cons], dma_addr, 0);
BNA_QE_INDX_ADD(unmap_cons, 1, unmap_q->q_depth); BNA_QE_INDX_ADD(unmap_cons, 1, unmap_q->q_depth);
prefetch(&unmap_array[unmap_cons + 1]); prefetch(&unmap_array[unmap_cons + 1]);
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
prefetch(&unmap_array[unmap_cons + 1]); prefetch(&unmap_array[unmap_cons + 1]);
pci_unmap_page(bnad->pcidev, dma_unmap_page(&bnad->pcidev->dev,
pci_unmap_addr(&unmap_array[unmap_cons], dma_unmap_addr(&unmap_array[unmap_cons],
dma_addr), dma_addr),
skb_shinfo(skb)->frags[i].size, skb_shinfo(skb)->frags[i].size,
PCI_DMA_TODEVICE); DMA_TO_DEVICE);
pci_unmap_addr_set(&unmap_array[unmap_cons], dma_addr, dma_unmap_addr_set(&unmap_array[unmap_cons], dma_addr,
0); 0);
BNA_QE_INDX_ADD(unmap_cons, 1, unmap_q->q_depth); BNA_QE_INDX_ADD(unmap_cons, 1, unmap_q->q_depth);
} }
...@@ -340,19 +340,22 @@ static void ...@@ -340,19 +340,22 @@ static void
bnad_free_all_rxbufs(struct bnad *bnad, struct bna_rcb *rcb) bnad_free_all_rxbufs(struct bnad *bnad, struct bna_rcb *rcb)
{ {
struct bnad_unmap_q *unmap_q; struct bnad_unmap_q *unmap_q;
struct bnad_skb_unmap *unmap_array;
struct sk_buff *skb; struct sk_buff *skb;
int unmap_cons; int unmap_cons;
unmap_q = rcb->unmap_q; unmap_q = rcb->unmap_q;
unmap_array = unmap_q->unmap_array;
for (unmap_cons = 0; unmap_cons < unmap_q->q_depth; unmap_cons++) { for (unmap_cons = 0; unmap_cons < unmap_q->q_depth; unmap_cons++) {
skb = unmap_q->unmap_array[unmap_cons].skb; skb = unmap_array[unmap_cons].skb;
if (!skb) if (!skb)
continue; continue;
unmap_q->unmap_array[unmap_cons].skb = NULL; unmap_array[unmap_cons].skb = NULL;
pci_unmap_single(bnad->pcidev, pci_unmap_addr(&unmap_q-> dma_unmap_single(&bnad->pcidev->dev,
unmap_array[unmap_cons], dma_unmap_addr(&unmap_array[unmap_cons],
dma_addr), rcb->rxq->buffer_size, dma_addr),
PCI_DMA_FROMDEVICE); rcb->rxq->buffer_size,
DMA_FROM_DEVICE);
dev_kfree_skb(skb); dev_kfree_skb(skb);
} }
bnad_reset_rcb(bnad, rcb); bnad_reset_rcb(bnad, rcb);
...@@ -391,9 +394,10 @@ bnad_alloc_n_post_rxbufs(struct bnad *bnad, struct bna_rcb *rcb) ...@@ -391,9 +394,10 @@ bnad_alloc_n_post_rxbufs(struct bnad *bnad, struct bna_rcb *rcb)
skb->dev = bnad->netdev; skb->dev = bnad->netdev;
skb_reserve(skb, NET_IP_ALIGN); skb_reserve(skb, NET_IP_ALIGN);
unmap_array[unmap_prod].skb = skb; unmap_array[unmap_prod].skb = skb;
dma_addr = pci_map_single(bnad->pcidev, skb->data, dma_addr = dma_map_single(&bnad->pcidev->dev, skb->data,
rcb->rxq->buffer_size, PCI_DMA_FROMDEVICE); rcb->rxq->buffer_size,
pci_unmap_addr_set(&unmap_array[unmap_prod], dma_addr, DMA_FROM_DEVICE);
dma_unmap_addr_set(&unmap_array[unmap_prod], dma_addr,
dma_addr); dma_addr);
BNA_SET_DMA_ADDR(dma_addr, &rxent->host_addr); BNA_SET_DMA_ADDR(dma_addr, &rxent->host_addr);
BNA_QE_INDX_ADD(unmap_prod, 1, unmap_q->q_depth); BNA_QE_INDX_ADD(unmap_prod, 1, unmap_q->q_depth);
...@@ -434,8 +438,9 @@ bnad_poll_cq(struct bnad *bnad, struct bna_ccb *ccb, int budget) ...@@ -434,8 +438,9 @@ bnad_poll_cq(struct bnad *bnad, struct bna_ccb *ccb, int budget)
struct bna_rcb *rcb = NULL; struct bna_rcb *rcb = NULL;
unsigned int wi_range, packets = 0, wis = 0; unsigned int wi_range, packets = 0, wis = 0;
struct bnad_unmap_q *unmap_q; struct bnad_unmap_q *unmap_q;
struct bnad_skb_unmap *unmap_array;
struct sk_buff *skb; struct sk_buff *skb;
u32 flags; u32 flags, unmap_cons;
u32 qid0 = ccb->rcb[0]->rxq->rxq_id; u32 qid0 = ccb->rcb[0]->rxq->rxq_id;
struct bna_pkt_rate *pkt_rt = &ccb->pkt_rate; struct bna_pkt_rate *pkt_rt = &ccb->pkt_rate;
...@@ -456,17 +461,17 @@ bnad_poll_cq(struct bnad *bnad, struct bna_ccb *ccb, int budget) ...@@ -456,17 +461,17 @@ bnad_poll_cq(struct bnad *bnad, struct bna_ccb *ccb, int budget)
rcb = ccb->rcb[1]; rcb = ccb->rcb[1];
unmap_q = rcb->unmap_q; unmap_q = rcb->unmap_q;
unmap_array = unmap_q->unmap_array;
unmap_cons = unmap_q->consumer_index;
skb = unmap_q->unmap_array[unmap_q->consumer_index].skb; skb = unmap_array[unmap_cons].skb;
BUG_ON(!(skb)); BUG_ON(!(skb));
unmap_q->unmap_array[unmap_q->consumer_index].skb = NULL; unmap_array[unmap_cons].skb = NULL;
pci_unmap_single(bnad->pcidev, dma_unmap_single(&bnad->pcidev->dev,
pci_unmap_addr(&unmap_q-> dma_unmap_addr(&unmap_array[unmap_cons],
unmap_array[unmap_q->
consumer_index],
dma_addr), dma_addr),
rcb->rxq->buffer_size, rcb->rxq->buffer_size,
PCI_DMA_FROMDEVICE); DMA_FROM_DEVICE);
BNA_QE_INDX_ADD(unmap_q->consumer_index, 1, unmap_q->q_depth); BNA_QE_INDX_ADD(unmap_q->consumer_index, 1, unmap_q->q_depth);
/* Should be more efficient ? Performance ? */ /* Should be more efficient ? Performance ? */
...@@ -1015,7 +1020,7 @@ bnad_mem_free(struct bnad *bnad, ...@@ -1015,7 +1020,7 @@ bnad_mem_free(struct bnad *bnad,
if (mem_info->mem_type == BNA_MEM_T_DMA) { if (mem_info->mem_type == BNA_MEM_T_DMA) {
BNA_GET_DMA_ADDR(&(mem_info->mdl[i].dma), BNA_GET_DMA_ADDR(&(mem_info->mdl[i].dma),
dma_pa); dma_pa);
pci_free_consistent(bnad->pcidev, dma_free_coherent(&bnad->pcidev->dev,
mem_info->mdl[i].len, mem_info->mdl[i].len,
mem_info->mdl[i].kva, dma_pa); mem_info->mdl[i].kva, dma_pa);
} else } else
...@@ -1047,8 +1052,9 @@ bnad_mem_alloc(struct bnad *bnad, ...@@ -1047,8 +1052,9 @@ bnad_mem_alloc(struct bnad *bnad,
for (i = 0; i < mem_info->num; i++) { for (i = 0; i < mem_info->num; i++) {
mem_info->mdl[i].len = mem_info->len; mem_info->mdl[i].len = mem_info->len;
mem_info->mdl[i].kva = mem_info->mdl[i].kva =
pci_alloc_consistent(bnad->pcidev, dma_alloc_coherent(&bnad->pcidev->dev,
mem_info->len, &dma_pa); mem_info->len, &dma_pa,
GFP_KERNEL);
if (mem_info->mdl[i].kva == NULL) if (mem_info->mdl[i].kva == NULL)
goto err_return; goto err_return;
...@@ -2600,9 +2606,9 @@ bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev) ...@@ -2600,9 +2606,9 @@ bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev)
unmap_q->unmap_array[unmap_prod].skb = skb; unmap_q->unmap_array[unmap_prod].skb = skb;
BUG_ON(!(skb_headlen(skb) <= BFI_TX_MAX_DATA_PER_VECTOR)); BUG_ON(!(skb_headlen(skb) <= BFI_TX_MAX_DATA_PER_VECTOR));
txqent->vector[vect_id].length = htons(skb_headlen(skb)); txqent->vector[vect_id].length = htons(skb_headlen(skb));
dma_addr = pci_map_single(bnad->pcidev, skb->data, skb_headlen(skb), dma_addr = dma_map_single(&bnad->pcidev->dev, skb->data,
PCI_DMA_TODEVICE); skb_headlen(skb), DMA_TO_DEVICE);
pci_unmap_addr_set(&unmap_q->unmap_array[unmap_prod], dma_addr, dma_unmap_addr_set(&unmap_q->unmap_array[unmap_prod], dma_addr,
dma_addr); dma_addr);
BNA_SET_DMA_ADDR(dma_addr, &txqent->vector[vect_id].host_addr); BNA_SET_DMA_ADDR(dma_addr, &txqent->vector[vect_id].host_addr);
...@@ -2630,11 +2636,9 @@ bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev) ...@@ -2630,11 +2636,9 @@ bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev)
BUG_ON(!(size <= BFI_TX_MAX_DATA_PER_VECTOR)); BUG_ON(!(size <= BFI_TX_MAX_DATA_PER_VECTOR));
txqent->vector[vect_id].length = htons(size); txqent->vector[vect_id].length = htons(size);
dma_addr = dma_addr = dma_map_page(&bnad->pcidev->dev, frag->page,
pci_map_page(bnad->pcidev, frag->page, frag->page_offset, size, DMA_TO_DEVICE);
frag->page_offset, size, dma_unmap_addr_set(&unmap_q->unmap_array[unmap_prod], dma_addr,
PCI_DMA_TODEVICE);
pci_unmap_addr_set(&unmap_q->unmap_array[unmap_prod], dma_addr,
dma_addr); dma_addr);
BNA_SET_DMA_ADDR(dma_addr, &txqent->vector[vect_id].host_addr); BNA_SET_DMA_ADDR(dma_addr, &txqent->vector[vect_id].host_addr);
BNA_QE_INDX_ADD(unmap_prod, 1, unmap_q->q_depth); BNA_QE_INDX_ADD(unmap_prod, 1, unmap_q->q_depth);
...@@ -3022,13 +3026,13 @@ bnad_pci_init(struct bnad *bnad, ...@@ -3022,13 +3026,13 @@ bnad_pci_init(struct bnad *bnad,
err = pci_request_regions(pdev, BNAD_NAME); err = pci_request_regions(pdev, BNAD_NAME);
if (err) if (err)
goto disable_device; goto disable_device;
if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) && if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) &&
!pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) { !dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64))) {
*using_dac = 1; *using_dac = 1;
} else { } else {
err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
if (err) { if (err) {
err = pci_set_consistent_dma_mask(pdev, err = dma_set_coherent_mask(&pdev->dev,
DMA_BIT_MASK(32)); DMA_BIT_MASK(32));
if (err) if (err)
goto release_regions; goto release_regions;
......
...@@ -181,7 +181,7 @@ struct bnad_rx_info { ...@@ -181,7 +181,7 @@ struct bnad_rx_info {
/* Unmap queues for Tx / Rx cleanup */ /* Unmap queues for Tx / Rx cleanup */
struct bnad_skb_unmap { struct bnad_skb_unmap {
struct sk_buff *skb; struct sk_buff *skb;
DECLARE_PCI_UNMAP_ADDR(dma_addr) DEFINE_DMA_UNMAP_ADDR(dma_addr);
}; };
struct bnad_unmap_q { struct bnad_unmap_q {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment