Commit 87196eb7 authored by FUJITA Tomonori's avatar FUJITA Tomonori Committed by David S. Miller

qla3xxx: use the DMA state API instead of the pci equivalents

This replace the PCI DMA state API (include/linux/pci-dma.h) with the
DMA equivalents since the PCI DMA state API will be obsolete.

No functional change.

For further information about the background:

http://marc.info/?l=linux-netdev&m=127037540020276&w=2Signed-off-by: default avatarFUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp>
Cc:  Ron Mercer <ron.mercer@qlogic.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 4e5e4f0d
...@@ -343,8 +343,8 @@ static void ql_release_to_lrg_buf_free_list(struct ql3_adapter *qdev, ...@@ -343,8 +343,8 @@ static void ql_release_to_lrg_buf_free_list(struct ql3_adapter *qdev,
cpu_to_le32(LS_64BITS(map)); cpu_to_le32(LS_64BITS(map));
lrg_buf_cb->buf_phy_addr_high = lrg_buf_cb->buf_phy_addr_high =
cpu_to_le32(MS_64BITS(map)); cpu_to_le32(MS_64BITS(map));
pci_unmap_addr_set(lrg_buf_cb, mapaddr, map); dma_unmap_addr_set(lrg_buf_cb, mapaddr, map);
pci_unmap_len_set(lrg_buf_cb, maplen, dma_unmap_len_set(lrg_buf_cb, maplen,
qdev->lrg_buffer_len - qdev->lrg_buffer_len -
QL_HEADER_SPACE); QL_HEADER_SPACE);
} }
...@@ -1924,8 +1924,8 @@ static int ql_populate_free_queue(struct ql3_adapter *qdev) ...@@ -1924,8 +1924,8 @@ static int ql_populate_free_queue(struct ql3_adapter *qdev)
cpu_to_le32(LS_64BITS(map)); cpu_to_le32(LS_64BITS(map));
lrg_buf_cb->buf_phy_addr_high = lrg_buf_cb->buf_phy_addr_high =
cpu_to_le32(MS_64BITS(map)); cpu_to_le32(MS_64BITS(map));
pci_unmap_addr_set(lrg_buf_cb, mapaddr, map); dma_unmap_addr_set(lrg_buf_cb, mapaddr, map);
pci_unmap_len_set(lrg_buf_cb, maplen, dma_unmap_len_set(lrg_buf_cb, maplen,
qdev->lrg_buffer_len - qdev->lrg_buffer_len -
QL_HEADER_SPACE); QL_HEADER_SPACE);
--qdev->lrg_buf_skb_check; --qdev->lrg_buf_skb_check;
...@@ -2041,16 +2041,16 @@ static void ql_process_mac_tx_intr(struct ql3_adapter *qdev, ...@@ -2041,16 +2041,16 @@ static void ql_process_mac_tx_intr(struct ql3_adapter *qdev,
} }
pci_unmap_single(qdev->pdev, pci_unmap_single(qdev->pdev,
pci_unmap_addr(&tx_cb->map[0], mapaddr), dma_unmap_addr(&tx_cb->map[0], mapaddr),
pci_unmap_len(&tx_cb->map[0], maplen), dma_unmap_len(&tx_cb->map[0], maplen),
PCI_DMA_TODEVICE); PCI_DMA_TODEVICE);
tx_cb->seg_count--; tx_cb->seg_count--;
if (tx_cb->seg_count) { if (tx_cb->seg_count) {
for (i = 1; i < tx_cb->seg_count; i++) { for (i = 1; i < tx_cb->seg_count; i++) {
pci_unmap_page(qdev->pdev, pci_unmap_page(qdev->pdev,
pci_unmap_addr(&tx_cb->map[i], dma_unmap_addr(&tx_cb->map[i],
mapaddr), mapaddr),
pci_unmap_len(&tx_cb->map[i], maplen), dma_unmap_len(&tx_cb->map[i], maplen),
PCI_DMA_TODEVICE); PCI_DMA_TODEVICE);
} }
} }
...@@ -2119,8 +2119,8 @@ static void ql_process_mac_rx_intr(struct ql3_adapter *qdev, ...@@ -2119,8 +2119,8 @@ static void ql_process_mac_rx_intr(struct ql3_adapter *qdev,
skb_put(skb, length); skb_put(skb, length);
pci_unmap_single(qdev->pdev, pci_unmap_single(qdev->pdev,
pci_unmap_addr(lrg_buf_cb2, mapaddr), dma_unmap_addr(lrg_buf_cb2, mapaddr),
pci_unmap_len(lrg_buf_cb2, maplen), dma_unmap_len(lrg_buf_cb2, maplen),
PCI_DMA_FROMDEVICE); PCI_DMA_FROMDEVICE);
prefetch(skb->data); prefetch(skb->data);
skb->ip_summed = CHECKSUM_NONE; skb->ip_summed = CHECKSUM_NONE;
...@@ -2165,8 +2165,8 @@ static void ql_process_macip_rx_intr(struct ql3_adapter *qdev, ...@@ -2165,8 +2165,8 @@ static void ql_process_macip_rx_intr(struct ql3_adapter *qdev,
skb_put(skb2, length); /* Just the second buffer length here. */ skb_put(skb2, length); /* Just the second buffer length here. */
pci_unmap_single(qdev->pdev, pci_unmap_single(qdev->pdev,
pci_unmap_addr(lrg_buf_cb2, mapaddr), dma_unmap_addr(lrg_buf_cb2, mapaddr),
pci_unmap_len(lrg_buf_cb2, maplen), dma_unmap_len(lrg_buf_cb2, maplen),
PCI_DMA_FROMDEVICE); PCI_DMA_FROMDEVICE);
prefetch(skb2->data); prefetch(skb2->data);
...@@ -2454,8 +2454,8 @@ static int ql_send_map(struct ql3_adapter *qdev, ...@@ -2454,8 +2454,8 @@ static int ql_send_map(struct ql3_adapter *qdev,
oal_entry->dma_lo = cpu_to_le32(LS_64BITS(map)); oal_entry->dma_lo = cpu_to_le32(LS_64BITS(map));
oal_entry->dma_hi = cpu_to_le32(MS_64BITS(map)); oal_entry->dma_hi = cpu_to_le32(MS_64BITS(map));
oal_entry->len = cpu_to_le32(len); oal_entry->len = cpu_to_le32(len);
pci_unmap_addr_set(&tx_cb->map[seg], mapaddr, map); dma_unmap_addr_set(&tx_cb->map[seg], mapaddr, map);
pci_unmap_len_set(&tx_cb->map[seg], maplen, len); dma_unmap_len_set(&tx_cb->map[seg], maplen, len);
seg++; seg++;
if (seg_cnt == 1) { if (seg_cnt == 1) {
...@@ -2488,9 +2488,9 @@ static int ql_send_map(struct ql3_adapter *qdev, ...@@ -2488,9 +2488,9 @@ static int ql_send_map(struct ql3_adapter *qdev,
oal_entry->len = oal_entry->len =
cpu_to_le32(sizeof(struct oal) | cpu_to_le32(sizeof(struct oal) |
OAL_CONT_ENTRY); OAL_CONT_ENTRY);
pci_unmap_addr_set(&tx_cb->map[seg], mapaddr, dma_unmap_addr_set(&tx_cb->map[seg], mapaddr,
map); map);
pci_unmap_len_set(&tx_cb->map[seg], maplen, dma_unmap_len_set(&tx_cb->map[seg], maplen,
sizeof(struct oal)); sizeof(struct oal));
oal_entry = (struct oal_entry *)oal; oal_entry = (struct oal_entry *)oal;
oal++; oal++;
...@@ -2512,8 +2512,8 @@ static int ql_send_map(struct ql3_adapter *qdev, ...@@ -2512,8 +2512,8 @@ static int ql_send_map(struct ql3_adapter *qdev,
oal_entry->dma_lo = cpu_to_le32(LS_64BITS(map)); oal_entry->dma_lo = cpu_to_le32(LS_64BITS(map));
oal_entry->dma_hi = cpu_to_le32(MS_64BITS(map)); oal_entry->dma_hi = cpu_to_le32(MS_64BITS(map));
oal_entry->len = cpu_to_le32(frag->size); oal_entry->len = cpu_to_le32(frag->size);
pci_unmap_addr_set(&tx_cb->map[seg], mapaddr, map); dma_unmap_addr_set(&tx_cb->map[seg], mapaddr, map);
pci_unmap_len_set(&tx_cb->map[seg], maplen, dma_unmap_len_set(&tx_cb->map[seg], maplen,
frag->size); frag->size);
} }
/* Terminate the last segment. */ /* Terminate the last segment. */
...@@ -2539,22 +2539,22 @@ static int ql_send_map(struct ql3_adapter *qdev, ...@@ -2539,22 +2539,22 @@ static int ql_send_map(struct ql3_adapter *qdev,
(seg == 12 && seg_cnt > 13) || /* but necessary. */ (seg == 12 && seg_cnt > 13) || /* but necessary. */
(seg == 17 && seg_cnt > 18)) { (seg == 17 && seg_cnt > 18)) {
pci_unmap_single(qdev->pdev, pci_unmap_single(qdev->pdev,
pci_unmap_addr(&tx_cb->map[seg], mapaddr), dma_unmap_addr(&tx_cb->map[seg], mapaddr),
pci_unmap_len(&tx_cb->map[seg], maplen), dma_unmap_len(&tx_cb->map[seg], maplen),
PCI_DMA_TODEVICE); PCI_DMA_TODEVICE);
oal++; oal++;
seg++; seg++;
} }
pci_unmap_page(qdev->pdev, pci_unmap_page(qdev->pdev,
pci_unmap_addr(&tx_cb->map[seg], mapaddr), dma_unmap_addr(&tx_cb->map[seg], mapaddr),
pci_unmap_len(&tx_cb->map[seg], maplen), dma_unmap_len(&tx_cb->map[seg], maplen),
PCI_DMA_TODEVICE); PCI_DMA_TODEVICE);
} }
pci_unmap_single(qdev->pdev, pci_unmap_single(qdev->pdev,
pci_unmap_addr(&tx_cb->map[0], mapaddr), dma_unmap_addr(&tx_cb->map[0], mapaddr),
pci_unmap_addr(&tx_cb->map[0], maplen), dma_unmap_addr(&tx_cb->map[0], maplen),
PCI_DMA_TODEVICE); PCI_DMA_TODEVICE);
return NETDEV_TX_BUSY; return NETDEV_TX_BUSY;
...@@ -2841,8 +2841,8 @@ static void ql_free_large_buffers(struct ql3_adapter *qdev) ...@@ -2841,8 +2841,8 @@ static void ql_free_large_buffers(struct ql3_adapter *qdev)
if (lrg_buf_cb->skb) { if (lrg_buf_cb->skb) {
dev_kfree_skb(lrg_buf_cb->skb); dev_kfree_skb(lrg_buf_cb->skb);
pci_unmap_single(qdev->pdev, pci_unmap_single(qdev->pdev,
pci_unmap_addr(lrg_buf_cb, mapaddr), dma_unmap_addr(lrg_buf_cb, mapaddr),
pci_unmap_len(lrg_buf_cb, maplen), dma_unmap_len(lrg_buf_cb, maplen),
PCI_DMA_FROMDEVICE); PCI_DMA_FROMDEVICE);
memset(lrg_buf_cb, 0, sizeof(struct ql_rcv_buf_cb)); memset(lrg_buf_cb, 0, sizeof(struct ql_rcv_buf_cb));
} else { } else {
...@@ -2912,8 +2912,8 @@ static int ql_alloc_large_buffers(struct ql3_adapter *qdev) ...@@ -2912,8 +2912,8 @@ static int ql_alloc_large_buffers(struct ql3_adapter *qdev)
return -ENOMEM; return -ENOMEM;
} }
pci_unmap_addr_set(lrg_buf_cb, mapaddr, map); dma_unmap_addr_set(lrg_buf_cb, mapaddr, map);
pci_unmap_len_set(lrg_buf_cb, maplen, dma_unmap_len_set(lrg_buf_cb, maplen,
qdev->lrg_buffer_len - qdev->lrg_buffer_len -
QL_HEADER_SPACE); QL_HEADER_SPACE);
lrg_buf_cb->buf_phy_addr_low = lrg_buf_cb->buf_phy_addr_low =
...@@ -3793,13 +3793,13 @@ static void ql_reset_work(struct work_struct *work) ...@@ -3793,13 +3793,13 @@ static void ql_reset_work(struct work_struct *work)
"%s: Freeing lost SKB.\n", "%s: Freeing lost SKB.\n",
qdev->ndev->name); qdev->ndev->name);
pci_unmap_single(qdev->pdev, pci_unmap_single(qdev->pdev,
pci_unmap_addr(&tx_cb->map[0], mapaddr), dma_unmap_addr(&tx_cb->map[0], mapaddr),
pci_unmap_len(&tx_cb->map[0], maplen), dma_unmap_len(&tx_cb->map[0], maplen),
PCI_DMA_TODEVICE); PCI_DMA_TODEVICE);
for(j=1;j<tx_cb->seg_count;j++) { for(j=1;j<tx_cb->seg_count;j++) {
pci_unmap_page(qdev->pdev, pci_unmap_page(qdev->pdev,
pci_unmap_addr(&tx_cb->map[j],mapaddr), dma_unmap_addr(&tx_cb->map[j],mapaddr),
pci_unmap_len(&tx_cb->map[j],maplen), dma_unmap_len(&tx_cb->map[j],maplen),
PCI_DMA_TODEVICE); PCI_DMA_TODEVICE);
} }
dev_kfree_skb(tx_cb->skb); dev_kfree_skb(tx_cb->skb);
......
...@@ -998,8 +998,8 @@ enum link_state_t { ...@@ -998,8 +998,8 @@ enum link_state_t {
struct ql_rcv_buf_cb { struct ql_rcv_buf_cb {
struct ql_rcv_buf_cb *next; struct ql_rcv_buf_cb *next;
struct sk_buff *skb; struct sk_buff *skb;
DECLARE_PCI_UNMAP_ADDR(mapaddr); DEFINE_DMA_UNMAP_ADDR(mapaddr);
DECLARE_PCI_UNMAP_LEN(maplen); DEFINE_DMA_UNMAP_LEN(maplen);
__le32 buf_phy_addr_low; __le32 buf_phy_addr_low;
__le32 buf_phy_addr_high; __le32 buf_phy_addr_high;
int index; int index;
...@@ -1029,8 +1029,8 @@ struct oal { ...@@ -1029,8 +1029,8 @@ struct oal {
}; };
struct map_list { struct map_list {
DECLARE_PCI_UNMAP_ADDR(mapaddr); DEFINE_DMA_UNMAP_ADDR(mapaddr);
DECLARE_PCI_UNMAP_LEN(maplen); DEFINE_DMA_UNMAP_LEN(maplen);
}; };
struct ql_tx_buf_cb { struct ql_tx_buf_cb {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment