Commit 590ddaa4 authored by David S. Miller's avatar David S. Miller

Merge branch 'bcmgenet-cleanups'

Petri Gynther says:

====================
bcmgenet cleanups

Three cleanup patches for bcmgenet.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 66f87790 7ee40625
...@@ -104,8 +104,8 @@ static inline void dmadesc_set_addr(struct bcmgenet_priv *priv, ...@@ -104,8 +104,8 @@ static inline void dmadesc_set_addr(struct bcmgenet_priv *priv,
static inline void dmadesc_set(struct bcmgenet_priv *priv, static inline void dmadesc_set(struct bcmgenet_priv *priv,
void __iomem *d, dma_addr_t addr, u32 val) void __iomem *d, dma_addr_t addr, u32 val)
{ {
dmadesc_set_length_status(priv, d, val);
dmadesc_set_addr(priv, d, addr); dmadesc_set_addr(priv, d, addr);
dmadesc_set_length_status(priv, d, val);
} }
static inline dma_addr_t dmadesc_get_addr(struct bcmgenet_priv *priv, static inline dma_addr_t dmadesc_get_addr(struct bcmgenet_priv *priv,
...@@ -1331,6 +1331,7 @@ static int bcmgenet_xmit_frag(struct net_device *dev, ...@@ -1331,6 +1331,7 @@ static int bcmgenet_xmit_frag(struct net_device *dev,
struct bcmgenet_priv *priv = netdev_priv(dev); struct bcmgenet_priv *priv = netdev_priv(dev);
struct device *kdev = &priv->pdev->dev; struct device *kdev = &priv->pdev->dev;
struct enet_cb *tx_cb_ptr; struct enet_cb *tx_cb_ptr;
unsigned int frag_size;
dma_addr_t mapping; dma_addr_t mapping;
int ret; int ret;
...@@ -1338,10 +1339,12 @@ static int bcmgenet_xmit_frag(struct net_device *dev, ...@@ -1338,10 +1339,12 @@ static int bcmgenet_xmit_frag(struct net_device *dev,
if (unlikely(!tx_cb_ptr)) if (unlikely(!tx_cb_ptr))
BUG(); BUG();
tx_cb_ptr->skb = NULL; tx_cb_ptr->skb = NULL;
mapping = skb_frag_dma_map(kdev, frag, 0, frag_size = skb_frag_size(frag);
skb_frag_size(frag), DMA_TO_DEVICE);
mapping = skb_frag_dma_map(kdev, frag, 0, frag_size, DMA_TO_DEVICE);
ret = dma_mapping_error(kdev, mapping); ret = dma_mapping_error(kdev, mapping);
if (ret) { if (ret) {
priv->mib.tx_dma_failed++; priv->mib.tx_dma_failed++;
...@@ -1351,10 +1354,10 @@ static int bcmgenet_xmit_frag(struct net_device *dev, ...@@ -1351,10 +1354,10 @@ static int bcmgenet_xmit_frag(struct net_device *dev,
} }
dma_unmap_addr_set(tx_cb_ptr, dma_addr, mapping); dma_unmap_addr_set(tx_cb_ptr, dma_addr, mapping);
dma_unmap_len_set(tx_cb_ptr, dma_len, frag->size); dma_unmap_len_set(tx_cb_ptr, dma_len, frag_size);
dmadesc_set(priv, tx_cb_ptr->bd_addr, mapping, dmadesc_set(priv, tx_cb_ptr->bd_addr, mapping,
(frag->size << DMA_BUFLENGTH_SHIFT) | dma_desc_flags | (frag_size << DMA_BUFLENGTH_SHIFT) | dma_desc_flags |
(priv->hw_params->qtag_mask << DMA_TX_QTAG_SHIFT)); (priv->hw_params->qtag_mask << DMA_TX_QTAG_SHIFT));
return 0; return 0;
...@@ -1447,15 +1450,19 @@ static netdev_tx_t bcmgenet_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -1447,15 +1450,19 @@ static netdev_tx_t bcmgenet_xmit(struct sk_buff *skb, struct net_device *dev)
else else
index -= 1; index -= 1;
nr_frags = skb_shinfo(skb)->nr_frags;
ring = &priv->tx_rings[index]; ring = &priv->tx_rings[index];
txq = netdev_get_tx_queue(dev, ring->queue); txq = netdev_get_tx_queue(dev, ring->queue);
nr_frags = skb_shinfo(skb)->nr_frags;
spin_lock_irqsave(&ring->lock, flags); spin_lock_irqsave(&ring->lock, flags);
if (ring->free_bds <= nr_frags + 1) { if (ring->free_bds <= (nr_frags + 1)) {
netif_tx_stop_queue(txq); if (!netif_tx_queue_stopped(txq)) {
netdev_err(dev, "%s: tx ring %d full when queue %d awake\n", netif_tx_stop_queue(txq);
__func__, index, ring->queue); netdev_err(dev,
"%s: tx ring %d full when queue %d awake\n",
__func__, index, ring->queue);
}
ret = NETDEV_TX_BUSY; ret = NETDEV_TX_BUSY;
goto out; goto out;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment