Commit ce644ed4 authored by Dhananjay Phadke's avatar Dhananjay Phadke Committed by David S. Miller

netxen: refactor tx dma mapping code

Move all tx skb mapping code into netxen_map_tx_skb().
Signed-off-by: default avatarDhananjay Phadke <dhananjay@netxen.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 1dbc84a7
...@@ -1518,22 +1518,52 @@ netxen_tso_check(struct net_device *netdev, ...@@ -1518,22 +1518,52 @@ netxen_tso_check(struct net_device *netdev,
barrier(); barrier();
} }
static void static int
netxen_clean_tx_dma_mapping(struct pci_dev *pdev, netxen_map_tx_skb(struct pci_dev *pdev,
struct netxen_cmd_buffer *pbuf, int last) struct sk_buff *skb, struct netxen_cmd_buffer *pbuf)
{ {
int k; struct netxen_skb_frag *nf;
struct netxen_skb_frag *buffrag; struct skb_frag_struct *frag;
int i, nr_frags;
dma_addr_t map;
nr_frags = skb_shinfo(skb)->nr_frags;
nf = &pbuf->frag_array[0];
map = pci_map_single(pdev, skb->data,
skb_headlen(skb), PCI_DMA_TODEVICE);
if (pci_dma_mapping_error(pdev, map))
goto out_err;
nf->dma = map;
nf->length = skb_headlen(skb);
buffrag = &pbuf->frag_array[0]; for (i = 0; i < nr_frags; i++) {
pci_unmap_single(pdev, buffrag->dma, frag = &skb_shinfo(skb)->frags[i];
buffrag->length, PCI_DMA_TODEVICE); nf = &pbuf->frag_array[i+1];
for (k = 1; k < last; k++) { map = pci_map_page(pdev, frag->page, frag->page_offset,
buffrag = &pbuf->frag_array[k]; frag->size, PCI_DMA_TODEVICE);
pci_unmap_page(pdev, buffrag->dma, if (pci_dma_mapping_error(pdev, map))
buffrag->length, PCI_DMA_TODEVICE); goto unwind;
nf->dma = map;
nf->length = frag->size;
} }
return 0;
unwind:
while (i > 0) {
nf = &pbuf->frag_array[i];
pci_unmap_page(pdev, nf->dma, nf->length, PCI_DMA_TODEVICE);
}
nf = &pbuf->frag_array[0];
pci_unmap_single(pdev, nf->dma, skb_headlen(skb), PCI_DMA_TODEVICE);
out_err:
return -ENOMEM;
} }
static inline void static inline void
...@@ -1548,17 +1578,14 @@ netxen_nic_xmit_frame(struct sk_buff *skb, struct net_device *netdev) ...@@ -1548,17 +1578,14 @@ netxen_nic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
{ {
struct netxen_adapter *adapter = netdev_priv(netdev); struct netxen_adapter *adapter = netdev_priv(netdev);
struct nx_host_tx_ring *tx_ring = adapter->tx_ring; struct nx_host_tx_ring *tx_ring = adapter->tx_ring;
struct skb_frag_struct *frag;
struct netxen_cmd_buffer *pbuf; struct netxen_cmd_buffer *pbuf;
struct netxen_skb_frag *buffrag; struct netxen_skb_frag *buffrag;
struct cmd_desc_type0 *hwdesc, *first_desc; struct cmd_desc_type0 *hwdesc, *first_desc;
struct pci_dev *pdev; struct pci_dev *pdev;
dma_addr_t temp_dma;
int i, k; int i, k;
unsigned long offset;
u32 producer; u32 producer;
int len, frag_count, no_of_desc; int frag_count, no_of_desc;
u32 num_txd = tx_ring->num_desc; u32 num_txd = tx_ring->num_desc;
frag_count = skb_shinfo(skb)->nr_frags + 1; frag_count = skb_shinfo(skb)->nr_frags + 1;
...@@ -1572,72 +1599,53 @@ netxen_nic_xmit_frame(struct sk_buff *skb, struct net_device *netdev) ...@@ -1572,72 +1599,53 @@ netxen_nic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
} }
producer = tx_ring->producer; producer = tx_ring->producer;
pbuf = &tx_ring->cmd_buf_arr[producer];
pdev = adapter->pdev; pdev = adapter->pdev;
len = skb->len - skb->data_len;
temp_dma = pci_map_single(pdev, skb->data, len, PCI_DMA_TODEVICE); if (netxen_map_tx_skb(pdev, skb, pbuf))
if (pci_dma_mapping_error(pdev, temp_dma))
goto drop_packet; goto drop_packet;
pbuf = &tx_ring->cmd_buf_arr[producer];
pbuf->skb = skb; pbuf->skb = skb;
pbuf->frag_count = frag_count; pbuf->frag_count = frag_count;
buffrag = &pbuf->frag_array[0];
buffrag->dma = temp_dma;
buffrag->length = len;
first_desc = hwdesc = &tx_ring->desc_head[producer]; first_desc = hwdesc = &tx_ring->desc_head[producer];
netxen_clear_cmddesc((u64 *)hwdesc); netxen_clear_cmddesc((u64 *)hwdesc);
netxen_set_tx_frags_len(hwdesc, frag_count, skb->len);
netxen_set_tx_port(hwdesc, adapter->portnum);
hwdesc->buffer_length[0] = cpu_to_le16(len); netxen_set_tx_frags_len(first_desc, frag_count, skb->len);
hwdesc->addr_buffer1 = cpu_to_le64(temp_dma); netxen_set_tx_port(first_desc, adapter->portnum);
for (i = 0; i < frag_count; i++) {
for (i = 1, k = 1; i < frag_count; i++, k++) { k = i % 4;
/* move to next desc. if there is a need */ if ((k == 0) && (i > 0)) {
if ((i & 0x3) == 0) { /* move to next desc.*/
k = 0;
producer = get_next_index(producer, num_txd); producer = get_next_index(producer, num_txd);
hwdesc = &tx_ring->desc_head[producer]; hwdesc = &tx_ring->desc_head[producer];
netxen_clear_cmddesc((u64 *)hwdesc); netxen_clear_cmddesc((u64 *)hwdesc);
pbuf = &tx_ring->cmd_buf_arr[producer]; tx_ring->cmd_buf_arr[producer].skb = NULL;
pbuf->skb = NULL;
}
buffrag = &pbuf->frag_array[i];
frag = &skb_shinfo(skb)->frags[i - 1];
len = frag->size;
offset = frag->page_offset;
temp_dma = pci_map_page(pdev, frag->page, offset,
len, PCI_DMA_TODEVICE);
if (pci_dma_mapping_error(pdev, temp_dma)) {
netxen_clean_tx_dma_mapping(pdev, pbuf, i);
goto drop_packet;
} }
buffrag->dma = temp_dma; buffrag = &pbuf->frag_array[i];
buffrag->length = len;
hwdesc->buffer_length[k] = cpu_to_le16(len); hwdesc->buffer_length[k] = cpu_to_le16(buffrag->length);
switch (k) { switch (k) {
case 0: case 0:
hwdesc->addr_buffer1 = cpu_to_le64(temp_dma); hwdesc->addr_buffer1 = cpu_to_le64(buffrag->dma);
break; break;
case 1: case 1:
hwdesc->addr_buffer2 = cpu_to_le64(temp_dma); hwdesc->addr_buffer2 = cpu_to_le64(buffrag->dma);
break; break;
case 2: case 2:
hwdesc->addr_buffer3 = cpu_to_le64(temp_dma); hwdesc->addr_buffer3 = cpu_to_le64(buffrag->dma);
break; break;
case 3: case 3:
hwdesc->addr_buffer4 = cpu_to_le64(temp_dma); hwdesc->addr_buffer4 = cpu_to_le64(buffrag->dma);
break; break;
} }
} }
tx_ring->producer = get_next_index(producer, num_txd); tx_ring->producer = get_next_index(producer, num_txd);
netxen_tso_check(netdev, tx_ring, first_desc, skb); netxen_tso_check(netdev, tx_ring, first_desc, skb);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment