Commit 79a0d7d8 authored by Sriharsha Basavapatna's avatar Sriharsha Basavapatna Committed by David S. Miller

be2net: Refactor be_xmit_enqueue() routine

- Reduce code duplication by moving WRB-frags setup into a function.
- Do not setup WRB-header before frags are setup, which is unncessary if
  there's errors while setting up frags. We should only grab an entry for
  the header, setup the frags and if everything is fine setup the header.
- The error cleanup can be moved into a small function.
Signed-off-by: default avatarSriharsha Basavapatna <sriharsha.basavapatna@emulex.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 804abcdb
...@@ -811,7 +811,80 @@ static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb, ...@@ -811,7 +811,80 @@ static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
} }
} }
/* Returns the number of WRBs used up by the skb */ /* Grab a WRB header for xmit */
static u16 be_tx_get_wrb_hdr(struct be_tx_obj *txo)
{
u16 head = txo->q.head;
queue_head_inc(&txo->q);
return head;
}
/* Set up the WRB header for xmit */
static void be_tx_setup_wrb_hdr(struct be_adapter *adapter,
struct be_tx_obj *txo,
struct be_wrb_params *wrb_params,
struct sk_buff *skb, u16 head)
{
u32 num_frags = skb_wrb_cnt(skb);
struct be_queue_info *txq = &txo->q;
struct be_eth_hdr_wrb *hdr = queue_index_node(txq, head);
wrb_fill_hdr(adapter, hdr, wrb_params, skb);
be_dws_cpu_to_le(hdr, sizeof(*hdr));
BUG_ON(txo->sent_skb_list[head]);
txo->sent_skb_list[head] = skb;
txo->last_req_hdr = head;
atomic_add(num_frags, &txq->used);
txo->last_req_wrb_cnt = num_frags;
txo->pend_wrb_cnt += num_frags;
}
/* Setup a WRB fragment (buffer descriptor) for xmit */
static void be_tx_setup_wrb_frag(struct be_tx_obj *txo, dma_addr_t busaddr,
int len)
{
struct be_eth_wrb *wrb;
struct be_queue_info *txq = &txo->q;
wrb = queue_head_node(txq);
wrb_fill(wrb, busaddr, len);
queue_head_inc(txq);
}
/* Bring the queue back to the state it was in before be_xmit_enqueue() routine
* was invoked. The producer index is restored to the previous packet and the
* WRBs of the current packet are unmapped. Invoked to handle tx setup errors.
*/
static void be_xmit_restore(struct be_adapter *adapter,
struct be_tx_obj *txo, u16 head, bool map_single,
u32 copied)
{
struct device *dev;
struct be_eth_wrb *wrb;
struct be_queue_info *txq = &txo->q;
dev = &adapter->pdev->dev;
txq->head = head;
/* skip the first wrb (hdr); it's not mapped */
queue_head_inc(txq);
while (copied) {
wrb = queue_head_node(txq);
unmap_tx_frag(dev, wrb, map_single);
map_single = false;
copied -= le32_to_cpu(wrb->frag_len);
queue_head_inc(txq);
}
txq->head = head;
}
/* Enqueue the given packet for transmit. This routine allocates WRBs for the
* packet, dma maps the packet buffers and sets up the WRBs. Returns the number
* of WRBs used up by the packet.
*/
static u32 be_xmit_enqueue(struct be_adapter *adapter, struct be_tx_obj *txo, static u32 be_xmit_enqueue(struct be_adapter *adapter, struct be_tx_obj *txo,
struct sk_buff *skb, struct sk_buff *skb,
struct be_wrb_params *wrb_params) struct be_wrb_params *wrb_params)
...@@ -819,70 +892,43 @@ static u32 be_xmit_enqueue(struct be_adapter *adapter, struct be_tx_obj *txo, ...@@ -819,70 +892,43 @@ static u32 be_xmit_enqueue(struct be_adapter *adapter, struct be_tx_obj *txo,
u32 i, copied = 0, wrb_cnt = skb_wrb_cnt(skb); u32 i, copied = 0, wrb_cnt = skb_wrb_cnt(skb);
struct device *dev = &adapter->pdev->dev; struct device *dev = &adapter->pdev->dev;
struct be_queue_info *txq = &txo->q; struct be_queue_info *txq = &txo->q;
struct be_eth_hdr_wrb *hdr;
bool map_single = false; bool map_single = false;
struct be_eth_wrb *wrb;
dma_addr_t busaddr;
u16 head = txq->head; u16 head = txq->head;
dma_addr_t busaddr;
int len;
hdr = queue_head_node(txq); head = be_tx_get_wrb_hdr(txo);
wrb_fill_hdr(adapter, hdr, wrb_params, skb);
be_dws_cpu_to_le(hdr, sizeof(*hdr));
queue_head_inc(txq);
if (skb->len > skb->data_len) { if (skb->len > skb->data_len) {
int len = skb_headlen(skb); len = skb_headlen(skb);
busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE); busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
if (dma_mapping_error(dev, busaddr)) if (dma_mapping_error(dev, busaddr))
goto dma_err; goto dma_err;
map_single = true; map_single = true;
wrb = queue_head_node(txq); be_tx_setup_wrb_frag(txo, busaddr, len);
wrb_fill(wrb, busaddr, len);
queue_head_inc(txq);
copied += len; copied += len;
} }
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i]; const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
len = skb_frag_size(frag);
busaddr = skb_frag_dma_map(dev, frag, 0, busaddr = skb_frag_dma_map(dev, frag, 0, len, DMA_TO_DEVICE);
skb_frag_size(frag), DMA_TO_DEVICE);
if (dma_mapping_error(dev, busaddr)) if (dma_mapping_error(dev, busaddr))
goto dma_err; goto dma_err;
wrb = queue_head_node(txq); be_tx_setup_wrb_frag(txo, busaddr, len);
wrb_fill(wrb, busaddr, skb_frag_size(frag)); copied += len;
queue_head_inc(txq);
copied += skb_frag_size(frag);
} }
BUG_ON(txo->sent_skb_list[head]); be_tx_setup_wrb_hdr(adapter, txo, wrb_params, skb, head);
txo->sent_skb_list[head] = skb;
txo->last_req_hdr = head;
atomic_add(wrb_cnt, &txq->used);
txo->last_req_wrb_cnt = wrb_cnt;
txo->pend_wrb_cnt += wrb_cnt;
be_tx_stats_update(txo, skb); be_tx_stats_update(txo, skb);
return wrb_cnt; return wrb_cnt;
dma_err: dma_err:
/* Bring the queue back to the state it was in before this adapter->drv_stats.dma_map_errors++;
* routine was invoked. be_xmit_restore(adapter, txo, head, map_single, copied);
*/
txq->head = head;
/* skip the first wrb (hdr); it's not mapped */
queue_head_inc(txq);
while (copied) {
wrb = queue_head_node(txq);
unmap_tx_frag(dev, wrb, map_single);
map_single = false;
copied -= le32_to_cpu(wrb->frag_len);
adapter->drv_stats.dma_map_errors++;
queue_head_inc(txq);
}
txq->head = head;
return 0; return 0;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment