Commit cf3c4c03 authored by Neil Horman's avatar Neil Horman Committed by David S. Miller

8139cp: Add dma_mapping_error checking

Self explanitory dma_mapping_error addition to the 8139 driver, based on this:
https://bugzilla.redhat.com/show_bug.cgi?id=947250

It showed several backtraces arising for dma_map_* usage without checking the
return code on the mapping.  Add the check and abort the rx/tx operation if its
failed.  Untested as I have no hardware and the reporter has wandered off, but
seems pretty straightforward.
Signed-off-by: default avatarNeil Horman <nhorman@tuxdriver.com>
CC: "David S. Miller" <davem@davemloft.net>
CC: Francois Romieu <romieu@fr.zoreil.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent d9d10a30
...@@ -478,7 +478,7 @@ static int cp_rx_poll(struct napi_struct *napi, int budget) ...@@ -478,7 +478,7 @@ static int cp_rx_poll(struct napi_struct *napi, int budget)
while (1) { while (1) {
u32 status, len; u32 status, len;
dma_addr_t mapping; dma_addr_t mapping, new_mapping;
struct sk_buff *skb, *new_skb; struct sk_buff *skb, *new_skb;
struct cp_desc *desc; struct cp_desc *desc;
const unsigned buflen = cp->rx_buf_sz; const unsigned buflen = cp->rx_buf_sz;
...@@ -520,6 +520,13 @@ static int cp_rx_poll(struct napi_struct *napi, int budget) ...@@ -520,6 +520,13 @@ static int cp_rx_poll(struct napi_struct *napi, int budget)
goto rx_next; goto rx_next;
} }
new_mapping = dma_map_single(&cp->pdev->dev, new_skb->data, buflen,
PCI_DMA_FROMDEVICE);
if (dma_mapping_error(&cp->pdev->dev, new_mapping)) {
dev->stats.rx_dropped++;
goto rx_next;
}
dma_unmap_single(&cp->pdev->dev, mapping, dma_unmap_single(&cp->pdev->dev, mapping,
buflen, PCI_DMA_FROMDEVICE); buflen, PCI_DMA_FROMDEVICE);
...@@ -531,12 +538,11 @@ static int cp_rx_poll(struct napi_struct *napi, int budget) ...@@ -531,12 +538,11 @@ static int cp_rx_poll(struct napi_struct *napi, int budget)
skb_put(skb, len); skb_put(skb, len);
mapping = dma_map_single(&cp->pdev->dev, new_skb->data, buflen,
PCI_DMA_FROMDEVICE);
cp->rx_skb[rx_tail] = new_skb; cp->rx_skb[rx_tail] = new_skb;
cp_rx_skb(cp, skb, desc); cp_rx_skb(cp, skb, desc);
rx++; rx++;
mapping = new_mapping;
rx_next: rx_next:
cp->rx_ring[rx_tail].opts2 = 0; cp->rx_ring[rx_tail].opts2 = 0;
...@@ -716,6 +722,22 @@ static inline u32 cp_tx_vlan_tag(struct sk_buff *skb) ...@@ -716,6 +722,22 @@ static inline u32 cp_tx_vlan_tag(struct sk_buff *skb)
TxVlanTag | swab16(vlan_tx_tag_get(skb)) : 0x00; TxVlanTag | swab16(vlan_tx_tag_get(skb)) : 0x00;
} }
static void unwind_tx_frag_mapping(struct cp_private *cp, struct sk_buff *skb,
int first, int entry_last)
{
int frag, index;
struct cp_desc *txd;
skb_frag_t *this_frag;
for (frag = 0; frag+first < entry_last; frag++) {
index = first+frag;
cp->tx_skb[index] = NULL;
txd = &cp->tx_ring[index];
this_frag = &skb_shinfo(skb)->frags[frag];
dma_unmap_single(&cp->pdev->dev, le64_to_cpu(txd->addr),
skb_frag_size(this_frag), PCI_DMA_TODEVICE);
}
}
static netdev_tx_t cp_start_xmit (struct sk_buff *skb, static netdev_tx_t cp_start_xmit (struct sk_buff *skb,
struct net_device *dev) struct net_device *dev)
{ {
...@@ -749,6 +771,9 @@ static netdev_tx_t cp_start_xmit (struct sk_buff *skb, ...@@ -749,6 +771,9 @@ static netdev_tx_t cp_start_xmit (struct sk_buff *skb,
len = skb->len; len = skb->len;
mapping = dma_map_single(&cp->pdev->dev, skb->data, len, PCI_DMA_TODEVICE); mapping = dma_map_single(&cp->pdev->dev, skb->data, len, PCI_DMA_TODEVICE);
if (dma_mapping_error(&cp->pdev->dev, mapping))
goto out_dma_error;
txd->opts2 = opts2; txd->opts2 = opts2;
txd->addr = cpu_to_le64(mapping); txd->addr = cpu_to_le64(mapping);
wmb(); wmb();
...@@ -786,6 +811,9 @@ static netdev_tx_t cp_start_xmit (struct sk_buff *skb, ...@@ -786,6 +811,9 @@ static netdev_tx_t cp_start_xmit (struct sk_buff *skb,
first_len = skb_headlen(skb); first_len = skb_headlen(skb);
first_mapping = dma_map_single(&cp->pdev->dev, skb->data, first_mapping = dma_map_single(&cp->pdev->dev, skb->data,
first_len, PCI_DMA_TODEVICE); first_len, PCI_DMA_TODEVICE);
if (dma_mapping_error(&cp->pdev->dev, first_mapping))
goto out_dma_error;
cp->tx_skb[entry] = skb; cp->tx_skb[entry] = skb;
entry = NEXT_TX(entry); entry = NEXT_TX(entry);
...@@ -799,6 +827,11 @@ static netdev_tx_t cp_start_xmit (struct sk_buff *skb, ...@@ -799,6 +827,11 @@ static netdev_tx_t cp_start_xmit (struct sk_buff *skb,
mapping = dma_map_single(&cp->pdev->dev, mapping = dma_map_single(&cp->pdev->dev,
skb_frag_address(this_frag), skb_frag_address(this_frag),
len, PCI_DMA_TODEVICE); len, PCI_DMA_TODEVICE);
if (dma_mapping_error(&cp->pdev->dev, mapping)) {
unwind_tx_frag_mapping(cp, skb, first_entry, entry);
goto out_dma_error;
}
eor = (entry == (CP_TX_RING_SIZE - 1)) ? RingEnd : 0; eor = (entry == (CP_TX_RING_SIZE - 1)) ? RingEnd : 0;
ctrl = eor | len | DescOwn; ctrl = eor | len | DescOwn;
...@@ -859,11 +892,16 @@ static netdev_tx_t cp_start_xmit (struct sk_buff *skb, ...@@ -859,11 +892,16 @@ static netdev_tx_t cp_start_xmit (struct sk_buff *skb,
if (TX_BUFFS_AVAIL(cp) <= (MAX_SKB_FRAGS + 1)) if (TX_BUFFS_AVAIL(cp) <= (MAX_SKB_FRAGS + 1))
netif_stop_queue(dev); netif_stop_queue(dev);
out_unlock:
spin_unlock_irqrestore(&cp->lock, intr_flags); spin_unlock_irqrestore(&cp->lock, intr_flags);
cpw8(TxPoll, NormalTxPoll); cpw8(TxPoll, NormalTxPoll);
return NETDEV_TX_OK; return NETDEV_TX_OK;
out_dma_error:
kfree_skb(skb);
cp->dev->stats.tx_dropped++;
goto out_unlock;
} }
/* Set or clear the multicast filter for this adaptor. /* Set or clear the multicast filter for this adaptor.
...@@ -1054,6 +1092,10 @@ static int cp_refill_rx(struct cp_private *cp) ...@@ -1054,6 +1092,10 @@ static int cp_refill_rx(struct cp_private *cp)
mapping = dma_map_single(&cp->pdev->dev, skb->data, mapping = dma_map_single(&cp->pdev->dev, skb->data,
cp->rx_buf_sz, PCI_DMA_FROMDEVICE); cp->rx_buf_sz, PCI_DMA_FROMDEVICE);
if (dma_mapping_error(&cp->pdev->dev, mapping)) {
kfree_skb(skb);
goto err_out;
}
cp->rx_skb[i] = skb; cp->rx_skb[i] = skb;
cp->rx_ring[i].opts2 = 0; cp->rx_ring[i].opts2 = 0;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment