Commit c4b16068 authored by Neil Horman's avatar Neil Horman Committed by David S. Miller

jme: Fix unmap loop counting error:

In my recent fix (76a691d0: fix dma unmap warning), Ben Hutchings noted that my
loop count was incorrect.  Where j started at startidx, it should have started
at zero, and gone on for count entries, not to endidx.  Additionally, a DMA
resource exhaustion should drop the frame and (for now), return
NETDEV_TX_OK, not NETEV_TX_BUSY.  This patch fixes both of those issues:
Signed-off-by: default avatarNeil Horman <nhorman@tuxdriver.com>
CC: Ben Hutchings <ben@decadent.org.uk>
CC: "David S. Miller" <davem@davemloft.net>
CC: Guo-Fu Tseng <cooldavid@cooldavid.org>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 3a1cebe7
...@@ -2027,14 +2027,14 @@ jme_fill_tx_map(struct pci_dev *pdev, ...@@ -2027,14 +2027,14 @@ jme_fill_tx_map(struct pci_dev *pdev,
return 0; return 0;
} }
static void jme_drop_tx_map(struct jme_adapter *jme, int startidx, int endidx) static void jme_drop_tx_map(struct jme_adapter *jme, int startidx, int count)
{ {
struct jme_ring *txring = &(jme->txring[0]); struct jme_ring *txring = &(jme->txring[0]);
struct jme_buffer_info *txbi = txring->bufinf, *ctxbi; struct jme_buffer_info *txbi = txring->bufinf, *ctxbi;
int mask = jme->tx_ring_mask; int mask = jme->tx_ring_mask;
int j; int j;
for (j = startidx ; j < endidx ; ++j) { for (j = 0 ; j < count ; j++) {
ctxbi = txbi + ((startidx + j + 2) & (mask)); ctxbi = txbi + ((startidx + j + 2) & (mask));
pci_unmap_page(jme->pdev, pci_unmap_page(jme->pdev,
ctxbi->mapping, ctxbi->mapping,
...@@ -2069,7 +2069,7 @@ jme_map_tx_skb(struct jme_adapter *jme, struct sk_buff *skb, int idx) ...@@ -2069,7 +2069,7 @@ jme_map_tx_skb(struct jme_adapter *jme, struct sk_buff *skb, int idx)
skb_frag_page(frag), skb_frag_page(frag),
frag->page_offset, skb_frag_size(frag), hidma); frag->page_offset, skb_frag_size(frag), hidma);
if (ret) { if (ret) {
jme_drop_tx_map(jme, idx, idx+i); jme_drop_tx_map(jme, idx, i);
goto out; goto out;
} }
...@@ -2081,7 +2081,7 @@ jme_map_tx_skb(struct jme_adapter *jme, struct sk_buff *skb, int idx) ...@@ -2081,7 +2081,7 @@ jme_map_tx_skb(struct jme_adapter *jme, struct sk_buff *skb, int idx)
ret = jme_fill_tx_map(jme->pdev, ctxdesc, ctxbi, virt_to_page(skb->data), ret = jme_fill_tx_map(jme->pdev, ctxdesc, ctxbi, virt_to_page(skb->data),
offset_in_page(skb->data), len, hidma); offset_in_page(skb->data), len, hidma);
if (ret) if (ret)
jme_drop_tx_map(jme, idx, idx+i); jme_drop_tx_map(jme, idx, i);
out: out:
return ret; return ret;
...@@ -2269,7 +2269,7 @@ jme_start_xmit(struct sk_buff *skb, struct net_device *netdev) ...@@ -2269,7 +2269,7 @@ jme_start_xmit(struct sk_buff *skb, struct net_device *netdev)
} }
if (jme_fill_tx_desc(jme, skb, idx)) if (jme_fill_tx_desc(jme, skb, idx))
return NETDEV_TX_BUSY; return NETDEV_TX_OK;
jwrite32(jme, JME_TXCS, jme->reg_txcs | jwrite32(jme, JME_TXCS, jme->reg_txcs |
TXCS_SELECT_QUEUE0 | TXCS_SELECT_QUEUE0 |
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment