Commit 533da29b authored by David S. Miller's avatar David S. Miller

Merge branch 'bcmgenet-Fragmented-SKB-corrections'

Doug Berger says:

====================
bcmgenet: Fragmented SKB corrections

Two issues were observed in a review of the bcmgenet driver support for
fragmented SKBs which are addressed by this patch set.

The first addresses a problem that could occur if the driver is not able
to DMA map a fragment of the SKB.  This would be a highly unusual event
but it would leave the hardware descriptors in an invalid state which
should be prevented.

The second is a hazard that could occur if the driver is able to reclaim
the first control block of a fragmented SKB before all of its fragments
have completed processing by the hardware.  In this case the SKB could
be freed leading to reuse of memory that is still in use by hardware.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents ea6c3077 f48bed16
...@@ -1202,12 +1202,21 @@ static struct enet_cb *bcmgenet_get_txcb(struct bcmgenet_priv *priv, ...@@ -1202,12 +1202,21 @@ static struct enet_cb *bcmgenet_get_txcb(struct bcmgenet_priv *priv,
return tx_cb_ptr; return tx_cb_ptr;
} }
/* Simple helper to free a control block's resources */ static struct enet_cb *bcmgenet_put_txcb(struct bcmgenet_priv *priv,
static void bcmgenet_free_cb(struct enet_cb *cb) struct bcmgenet_tx_ring *ring)
{ {
dev_kfree_skb_any(cb->skb); struct enet_cb *tx_cb_ptr;
cb->skb = NULL;
dma_unmap_addr_set(cb, dma_addr, 0); tx_cb_ptr = ring->cbs;
tx_cb_ptr += ring->write_ptr - ring->cb_ptr;
/* Rewinding local write pointer */
if (ring->write_ptr == ring->cb_ptr)
ring->write_ptr = ring->end_ptr;
else
ring->write_ptr--;
return tx_cb_ptr;
} }
static inline void bcmgenet_rx_ring16_int_disable(struct bcmgenet_rx_ring *ring) static inline void bcmgenet_rx_ring16_int_disable(struct bcmgenet_rx_ring *ring)
...@@ -1260,18 +1269,72 @@ static inline void bcmgenet_tx_ring_int_disable(struct bcmgenet_tx_ring *ring) ...@@ -1260,18 +1269,72 @@ static inline void bcmgenet_tx_ring_int_disable(struct bcmgenet_tx_ring *ring)
INTRL2_CPU_MASK_SET); INTRL2_CPU_MASK_SET);
} }
/* Simple helper to free a transmit control block's resources
* Returns an skb when the last transmit control block associated with the
* skb is freed. The skb should be freed by the caller if necessary.
*/
static struct sk_buff *bcmgenet_free_tx_cb(struct device *dev,
struct enet_cb *cb)
{
struct sk_buff *skb;
skb = cb->skb;
if (skb) {
cb->skb = NULL;
if (cb == GENET_CB(skb)->first_cb)
dma_unmap_single(dev, dma_unmap_addr(cb, dma_addr),
dma_unmap_len(cb, dma_len),
DMA_TO_DEVICE);
else
dma_unmap_page(dev, dma_unmap_addr(cb, dma_addr),
dma_unmap_len(cb, dma_len),
DMA_TO_DEVICE);
dma_unmap_addr_set(cb, dma_addr, 0);
if (cb == GENET_CB(skb)->last_cb)
return skb;
} else if (dma_unmap_addr(cb, dma_addr)) {
dma_unmap_page(dev,
dma_unmap_addr(cb, dma_addr),
dma_unmap_len(cb, dma_len),
DMA_TO_DEVICE);
dma_unmap_addr_set(cb, dma_addr, 0);
}
return 0;
}
/* Simple helper to free a receive control block's resources */
static struct sk_buff *bcmgenet_free_rx_cb(struct device *dev,
struct enet_cb *cb)
{
struct sk_buff *skb;
skb = cb->skb;
cb->skb = NULL;
if (dma_unmap_addr(cb, dma_addr)) {
dma_unmap_single(dev, dma_unmap_addr(cb, dma_addr),
dma_unmap_len(cb, dma_len), DMA_FROM_DEVICE);
dma_unmap_addr_set(cb, dma_addr, 0);
}
return skb;
}
/* Unlocked version of the reclaim routine */ /* Unlocked version of the reclaim routine */
static unsigned int __bcmgenet_tx_reclaim(struct net_device *dev, static unsigned int __bcmgenet_tx_reclaim(struct net_device *dev,
struct bcmgenet_tx_ring *ring) struct bcmgenet_tx_ring *ring)
{ {
struct bcmgenet_priv *priv = netdev_priv(dev); struct bcmgenet_priv *priv = netdev_priv(dev);
struct device *kdev = &priv->pdev->dev; unsigned int txbds_processed = 0;
struct enet_cb *tx_cb_ptr;
unsigned int pkts_compl = 0;
unsigned int bytes_compl = 0; unsigned int bytes_compl = 0;
unsigned int c_index; unsigned int pkts_compl = 0;
unsigned int txbds_ready; unsigned int txbds_ready;
unsigned int txbds_processed = 0; unsigned int c_index;
struct sk_buff *skb;
/* Clear status before servicing to reduce spurious interrupts */ /* Clear status before servicing to reduce spurious interrupts */
if (ring->index == DESC_INDEX) if (ring->index == DESC_INDEX)
...@@ -1292,21 +1355,12 @@ static unsigned int __bcmgenet_tx_reclaim(struct net_device *dev, ...@@ -1292,21 +1355,12 @@ static unsigned int __bcmgenet_tx_reclaim(struct net_device *dev,
/* Reclaim transmitted buffers */ /* Reclaim transmitted buffers */
while (txbds_processed < txbds_ready) { while (txbds_processed < txbds_ready) {
tx_cb_ptr = &priv->tx_cbs[ring->clean_ptr]; skb = bcmgenet_free_tx_cb(&priv->pdev->dev,
if (tx_cb_ptr->skb) { &priv->tx_cbs[ring->clean_ptr]);
if (skb) {
pkts_compl++; pkts_compl++;
bytes_compl += GENET_CB(tx_cb_ptr->skb)->bytes_sent; bytes_compl += GENET_CB(skb)->bytes_sent;
dma_unmap_single(kdev, dev_kfree_skb_any(skb);
dma_unmap_addr(tx_cb_ptr, dma_addr),
dma_unmap_len(tx_cb_ptr, dma_len),
DMA_TO_DEVICE);
bcmgenet_free_cb(tx_cb_ptr);
} else if (dma_unmap_addr(tx_cb_ptr, dma_addr)) {
dma_unmap_page(kdev,
dma_unmap_addr(tx_cb_ptr, dma_addr),
dma_unmap_len(tx_cb_ptr, dma_len),
DMA_TO_DEVICE);
dma_unmap_addr_set(tx_cb_ptr, dma_addr, 0);
} }
txbds_processed++; txbds_processed++;
...@@ -1380,95 +1434,6 @@ static void bcmgenet_tx_reclaim_all(struct net_device *dev) ...@@ -1380,95 +1434,6 @@ static void bcmgenet_tx_reclaim_all(struct net_device *dev)
bcmgenet_tx_reclaim(dev, &priv->tx_rings[DESC_INDEX]); bcmgenet_tx_reclaim(dev, &priv->tx_rings[DESC_INDEX]);
} }
/* Transmits a single SKB (either head of a fragment or a single SKB)
* caller must hold priv->lock
*/
static int bcmgenet_xmit_single(struct net_device *dev,
struct sk_buff *skb,
u16 dma_desc_flags,
struct bcmgenet_tx_ring *ring)
{
struct bcmgenet_priv *priv = netdev_priv(dev);
struct device *kdev = &priv->pdev->dev;
struct enet_cb *tx_cb_ptr;
unsigned int skb_len;
dma_addr_t mapping;
u32 length_status;
int ret;
tx_cb_ptr = bcmgenet_get_txcb(priv, ring);
if (unlikely(!tx_cb_ptr))
BUG();
tx_cb_ptr->skb = skb;
skb_len = skb_headlen(skb);
mapping = dma_map_single(kdev, skb->data, skb_len, DMA_TO_DEVICE);
ret = dma_mapping_error(kdev, mapping);
if (ret) {
priv->mib.tx_dma_failed++;
netif_err(priv, tx_err, dev, "Tx DMA map failed\n");
dev_kfree_skb(skb);
return ret;
}
dma_unmap_addr_set(tx_cb_ptr, dma_addr, mapping);
dma_unmap_len_set(tx_cb_ptr, dma_len, skb_len);
length_status = (skb_len << DMA_BUFLENGTH_SHIFT) | dma_desc_flags |
(priv->hw_params->qtag_mask << DMA_TX_QTAG_SHIFT) |
DMA_TX_APPEND_CRC;
if (skb->ip_summed == CHECKSUM_PARTIAL)
length_status |= DMA_TX_DO_CSUM;
dmadesc_set(priv, tx_cb_ptr->bd_addr, mapping, length_status);
return 0;
}
/* Transmit a SKB fragment */
static int bcmgenet_xmit_frag(struct net_device *dev,
skb_frag_t *frag,
u16 dma_desc_flags,
struct bcmgenet_tx_ring *ring)
{
struct bcmgenet_priv *priv = netdev_priv(dev);
struct device *kdev = &priv->pdev->dev;
struct enet_cb *tx_cb_ptr;
unsigned int frag_size;
dma_addr_t mapping;
int ret;
tx_cb_ptr = bcmgenet_get_txcb(priv, ring);
if (unlikely(!tx_cb_ptr))
BUG();
tx_cb_ptr->skb = NULL;
frag_size = skb_frag_size(frag);
mapping = skb_frag_dma_map(kdev, frag, 0, frag_size, DMA_TO_DEVICE);
ret = dma_mapping_error(kdev, mapping);
if (ret) {
priv->mib.tx_dma_failed++;
netif_err(priv, tx_err, dev, "%s: Tx DMA map failed\n",
__func__);
return ret;
}
dma_unmap_addr_set(tx_cb_ptr, dma_addr, mapping);
dma_unmap_len_set(tx_cb_ptr, dma_len, frag_size);
dmadesc_set(priv, tx_cb_ptr->bd_addr, mapping,
(frag_size << DMA_BUFLENGTH_SHIFT) | dma_desc_flags |
(priv->hw_params->qtag_mask << DMA_TX_QTAG_SHIFT));
return 0;
}
/* Reallocate the SKB to put enough headroom in front of it and insert /* Reallocate the SKB to put enough headroom in front of it and insert
* the transmit checksum offsets in the descriptors * the transmit checksum offsets in the descriptors
*/ */
...@@ -1535,11 +1500,16 @@ static struct sk_buff *bcmgenet_put_tx_csum(struct net_device *dev, ...@@ -1535,11 +1500,16 @@ static struct sk_buff *bcmgenet_put_tx_csum(struct net_device *dev,
static netdev_tx_t bcmgenet_xmit(struct sk_buff *skb, struct net_device *dev) static netdev_tx_t bcmgenet_xmit(struct sk_buff *skb, struct net_device *dev)
{ {
struct bcmgenet_priv *priv = netdev_priv(dev); struct bcmgenet_priv *priv = netdev_priv(dev);
struct device *kdev = &priv->pdev->dev;
struct bcmgenet_tx_ring *ring = NULL; struct bcmgenet_tx_ring *ring = NULL;
struct enet_cb *tx_cb_ptr;
struct netdev_queue *txq; struct netdev_queue *txq;
unsigned long flags = 0; unsigned long flags = 0;
int nr_frags, index; int nr_frags, index;
u16 dma_desc_flags; dma_addr_t mapping;
unsigned int size;
skb_frag_t *frag;
u32 len_stat;
int ret; int ret;
int i; int i;
...@@ -1592,29 +1562,53 @@ static netdev_tx_t bcmgenet_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -1592,29 +1562,53 @@ static netdev_tx_t bcmgenet_xmit(struct sk_buff *skb, struct net_device *dev)
} }
} }
dma_desc_flags = DMA_SOP; for (i = 0; i <= nr_frags; i++) {
if (nr_frags == 0) tx_cb_ptr = bcmgenet_get_txcb(priv, ring);
dma_desc_flags |= DMA_EOP;
/* Transmit single SKB or head of fragment list */ if (unlikely(!tx_cb_ptr))
ret = bcmgenet_xmit_single(dev, skb, dma_desc_flags, ring); BUG();
if (ret) {
ret = NETDEV_TX_OK; if (!i) {
goto out; /* Transmit single SKB or head of fragment list */
} GENET_CB(skb)->first_cb = tx_cb_ptr;
size = skb_headlen(skb);
mapping = dma_map_single(kdev, skb->data, size,
DMA_TO_DEVICE);
} else {
/* xmit fragment */
frag = &skb_shinfo(skb)->frags[i - 1];
size = skb_frag_size(frag);
mapping = skb_frag_dma_map(kdev, frag, 0, size,
DMA_TO_DEVICE);
}
/* xmit fragment */ ret = dma_mapping_error(kdev, mapping);
for (i = 0; i < nr_frags; i++) {
ret = bcmgenet_xmit_frag(dev,
&skb_shinfo(skb)->frags[i],
(i == nr_frags - 1) ? DMA_EOP : 0,
ring);
if (ret) { if (ret) {
priv->mib.tx_dma_failed++;
netif_err(priv, tx_err, dev, "Tx DMA map failed\n");
ret = NETDEV_TX_OK; ret = NETDEV_TX_OK;
goto out; goto out_unmap_frags;
}
dma_unmap_addr_set(tx_cb_ptr, dma_addr, mapping);
dma_unmap_len_set(tx_cb_ptr, dma_len, size);
tx_cb_ptr->skb = skb;
len_stat = (size << DMA_BUFLENGTH_SHIFT) |
(priv->hw_params->qtag_mask << DMA_TX_QTAG_SHIFT);
if (!i) {
len_stat |= DMA_TX_APPEND_CRC | DMA_SOP;
if (skb->ip_summed == CHECKSUM_PARTIAL)
len_stat |= DMA_TX_DO_CSUM;
} }
if (i == nr_frags)
len_stat |= DMA_EOP;
dmadesc_set(priv, tx_cb_ptr->bd_addr, mapping, len_stat);
} }
GENET_CB(skb)->last_cb = tx_cb_ptr;
skb_tx_timestamp(skb); skb_tx_timestamp(skb);
/* Decrement total BD count and advance our write pointer */ /* Decrement total BD count and advance our write pointer */
...@@ -1635,6 +1629,19 @@ static netdev_tx_t bcmgenet_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -1635,6 +1629,19 @@ static netdev_tx_t bcmgenet_xmit(struct sk_buff *skb, struct net_device *dev)
spin_unlock_irqrestore(&ring->lock, flags); spin_unlock_irqrestore(&ring->lock, flags);
return ret; return ret;
out_unmap_frags:
/* Back up for failed control block mapping */
bcmgenet_put_txcb(priv, ring);
/* Unmap successfully mapped control blocks */
while (i-- > 0) {
tx_cb_ptr = bcmgenet_put_txcb(priv, ring);
bcmgenet_free_tx_cb(kdev, tx_cb_ptr);
}
dev_kfree_skb(skb);
goto out;
} }
static struct sk_buff *bcmgenet_rx_refill(struct bcmgenet_priv *priv, static struct sk_buff *bcmgenet_rx_refill(struct bcmgenet_priv *priv,
...@@ -1666,14 +1673,12 @@ static struct sk_buff *bcmgenet_rx_refill(struct bcmgenet_priv *priv, ...@@ -1666,14 +1673,12 @@ static struct sk_buff *bcmgenet_rx_refill(struct bcmgenet_priv *priv,
} }
/* Grab the current Rx skb from the ring and DMA-unmap it */ /* Grab the current Rx skb from the ring and DMA-unmap it */
rx_skb = cb->skb; rx_skb = bcmgenet_free_rx_cb(kdev, cb);
if (likely(rx_skb))
dma_unmap_single(kdev, dma_unmap_addr(cb, dma_addr),
priv->rx_buf_len, DMA_FROM_DEVICE);
/* Put the new Rx skb on the ring */ /* Put the new Rx skb on the ring */
cb->skb = skb; cb->skb = skb;
dma_unmap_addr_set(cb, dma_addr, mapping); dma_unmap_addr_set(cb, dma_addr, mapping);
dma_unmap_len_set(cb, dma_len, priv->rx_buf_len);
dmadesc_set_addr(priv, cb->bd_addr, mapping); dmadesc_set_addr(priv, cb->bd_addr, mapping);
/* Return the current Rx skb to caller */ /* Return the current Rx skb to caller */
...@@ -1880,22 +1885,16 @@ static int bcmgenet_alloc_rx_buffers(struct bcmgenet_priv *priv, ...@@ -1880,22 +1885,16 @@ static int bcmgenet_alloc_rx_buffers(struct bcmgenet_priv *priv,
static void bcmgenet_free_rx_buffers(struct bcmgenet_priv *priv) static void bcmgenet_free_rx_buffers(struct bcmgenet_priv *priv)
{ {
struct device *kdev = &priv->pdev->dev; struct sk_buff *skb;
struct enet_cb *cb; struct enet_cb *cb;
int i; int i;
for (i = 0; i < priv->num_rx_bds; i++) { for (i = 0; i < priv->num_rx_bds; i++) {
cb = &priv->rx_cbs[i]; cb = &priv->rx_cbs[i];
if (dma_unmap_addr(cb, dma_addr)) { skb = bcmgenet_free_rx_cb(&priv->pdev->dev, cb);
dma_unmap_single(kdev, if (skb)
dma_unmap_addr(cb, dma_addr), dev_kfree_skb_any(skb);
priv->rx_buf_len, DMA_FROM_DEVICE);
dma_unmap_addr_set(cb, dma_addr, 0);
}
if (cb->skb)
bcmgenet_free_cb(cb);
} }
} }
...@@ -2479,8 +2478,10 @@ static int bcmgenet_dma_teardown(struct bcmgenet_priv *priv) ...@@ -2479,8 +2478,10 @@ static int bcmgenet_dma_teardown(struct bcmgenet_priv *priv)
static void bcmgenet_fini_dma(struct bcmgenet_priv *priv) static void bcmgenet_fini_dma(struct bcmgenet_priv *priv)
{ {
int i;
struct netdev_queue *txq; struct netdev_queue *txq;
struct sk_buff *skb;
struct enet_cb *cb;
int i;
bcmgenet_fini_rx_napi(priv); bcmgenet_fini_rx_napi(priv);
bcmgenet_fini_tx_napi(priv); bcmgenet_fini_tx_napi(priv);
...@@ -2489,10 +2490,10 @@ static void bcmgenet_fini_dma(struct bcmgenet_priv *priv) ...@@ -2489,10 +2490,10 @@ static void bcmgenet_fini_dma(struct bcmgenet_priv *priv)
bcmgenet_dma_teardown(priv); bcmgenet_dma_teardown(priv);
for (i = 0; i < priv->num_tx_bds; i++) { for (i = 0; i < priv->num_tx_bds; i++) {
if (priv->tx_cbs[i].skb != NULL) { cb = priv->tx_cbs + i;
dev_kfree_skb(priv->tx_cbs[i].skb); skb = bcmgenet_free_tx_cb(&priv->pdev->dev, cb);
priv->tx_cbs[i].skb = NULL; if (skb)
} dev_kfree_skb(skb);
} }
for (i = 0; i < priv->hw_params->tx_queues; i++) { for (i = 0; i < priv->hw_params->tx_queues; i++) {
......
...@@ -544,6 +544,8 @@ struct bcmgenet_hw_params { ...@@ -544,6 +544,8 @@ struct bcmgenet_hw_params {
}; };
struct bcmgenet_skb_cb { struct bcmgenet_skb_cb {
struct enet_cb *first_cb; /* First control block of SKB */
struct enet_cb *last_cb; /* Last control block of SKB */
unsigned int bytes_sent; /* bytes on the wire (no TSB) */ unsigned int bytes_sent; /* bytes on the wire (no TSB) */
}; };
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment