Commit 943c246e authored by Roland Dreier's avatar Roland Dreier

IPoIB: Use netif_tx_lock() and get rid of private tx_lock, LLTX

Currently, IPoIB is an LLTX driver that uses its own IRQ-disabling
tx_lock.  Not only do we want to get rid of LLTX, this actually causes
problems because of the skb_orphan() done with this tx_lock held: some
skb destructors expect to be run with interrupts enabled.

The simplest fix for this is to get rid of the driver-private tx_lock
and stop using LLTX.  We kill off priv->tx_lock and use
netif_tx_lock[_bh]() instead; the patch to do this is a tiny bit
tricky because we need to update places that take priv->lock inside
the tx_lock to disable IRQs, rather than relying on tx_lock having
already disabled IRQs.

Also, there are a couple of places where we need to disable BHs to
make sure we have a consistent context to call netif_tx_lock() (since
we no longer can use _irqsave() variants), and we also have to change
ipoib_send_comp_handler() to call drain_tx_cq() through a timer rather
than directly, because ipoib_send_comp_handler() runs in interrupt
context and drain_tx_cq() must run in BH context so it can call
netif_tx_lock().
Signed-off-by: default avatarRoland Dreier <rolandd@cisco.com>
parent c9da4bad
...@@ -268,10 +268,9 @@ struct ipoib_lro { ...@@ -268,10 +268,9 @@ struct ipoib_lro {
}; };
/* /*
* Device private locking: tx_lock protects members used in TX fast * Device private locking: network stack tx_lock protects members used
* path (and we use LLTX so upper layers don't do extra locking). * in TX fast path, lock protects everything else. lock nests inside
* lock protects everything else. lock nests inside of tx_lock (ie * of tx_lock (ie tx_lock must be acquired first if needed).
* tx_lock must be acquired first if needed).
*/ */
struct ipoib_dev_priv { struct ipoib_dev_priv {
spinlock_t lock; spinlock_t lock;
...@@ -320,7 +319,6 @@ struct ipoib_dev_priv { ...@@ -320,7 +319,6 @@ struct ipoib_dev_priv {
struct ipoib_rx_buf *rx_ring; struct ipoib_rx_buf *rx_ring;
spinlock_t tx_lock;
struct ipoib_tx_buf *tx_ring; struct ipoib_tx_buf *tx_ring;
unsigned tx_head; unsigned tx_head;
unsigned tx_tail; unsigned tx_tail;
......
...@@ -786,7 +786,8 @@ void ipoib_cm_handle_tx_wc(struct net_device *dev, struct ib_wc *wc) ...@@ -786,7 +786,8 @@ void ipoib_cm_handle_tx_wc(struct net_device *dev, struct ib_wc *wc)
dev_kfree_skb_any(tx_req->skb); dev_kfree_skb_any(tx_req->skb);
spin_lock_irqsave(&priv->tx_lock, flags); netif_tx_lock(dev);
++tx->tx_tail; ++tx->tx_tail;
if (unlikely(--priv->tx_outstanding == ipoib_sendq_size >> 1) && if (unlikely(--priv->tx_outstanding == ipoib_sendq_size >> 1) &&
netif_queue_stopped(dev) && netif_queue_stopped(dev) &&
...@@ -801,7 +802,7 @@ void ipoib_cm_handle_tx_wc(struct net_device *dev, struct ib_wc *wc) ...@@ -801,7 +802,7 @@ void ipoib_cm_handle_tx_wc(struct net_device *dev, struct ib_wc *wc)
"(status=%d, wrid=%d vend_err %x)\n", "(status=%d, wrid=%d vend_err %x)\n",
wc->status, wr_id, wc->vendor_err); wc->status, wr_id, wc->vendor_err);
spin_lock(&priv->lock); spin_lock_irqsave(&priv->lock, flags);
neigh = tx->neigh; neigh = tx->neigh;
if (neigh) { if (neigh) {
...@@ -821,10 +822,10 @@ void ipoib_cm_handle_tx_wc(struct net_device *dev, struct ib_wc *wc) ...@@ -821,10 +822,10 @@ void ipoib_cm_handle_tx_wc(struct net_device *dev, struct ib_wc *wc)
clear_bit(IPOIB_FLAG_OPER_UP, &tx->flags); clear_bit(IPOIB_FLAG_OPER_UP, &tx->flags);
spin_unlock(&priv->lock); spin_unlock_irqrestore(&priv->lock, flags);
} }
spin_unlock_irqrestore(&priv->tx_lock, flags); netif_tx_unlock(dev);
} }
int ipoib_cm_dev_open(struct net_device *dev) int ipoib_cm_dev_open(struct net_device *dev)
...@@ -1149,7 +1150,6 @@ static void ipoib_cm_tx_destroy(struct ipoib_cm_tx *p) ...@@ -1149,7 +1150,6 @@ static void ipoib_cm_tx_destroy(struct ipoib_cm_tx *p)
{ {
struct ipoib_dev_priv *priv = netdev_priv(p->dev); struct ipoib_dev_priv *priv = netdev_priv(p->dev);
struct ipoib_cm_tx_buf *tx_req; struct ipoib_cm_tx_buf *tx_req;
unsigned long flags;
unsigned long begin; unsigned long begin;
ipoib_dbg(priv, "Destroy active connection 0x%x head 0x%x tail 0x%x\n", ipoib_dbg(priv, "Destroy active connection 0x%x head 0x%x tail 0x%x\n",
...@@ -1180,12 +1180,12 @@ static void ipoib_cm_tx_destroy(struct ipoib_cm_tx *p) ...@@ -1180,12 +1180,12 @@ static void ipoib_cm_tx_destroy(struct ipoib_cm_tx *p)
DMA_TO_DEVICE); DMA_TO_DEVICE);
dev_kfree_skb_any(tx_req->skb); dev_kfree_skb_any(tx_req->skb);
++p->tx_tail; ++p->tx_tail;
spin_lock_irqsave(&priv->tx_lock, flags); netif_tx_lock_bh(p->dev);
if (unlikely(--priv->tx_outstanding == ipoib_sendq_size >> 1) && if (unlikely(--priv->tx_outstanding == ipoib_sendq_size >> 1) &&
netif_queue_stopped(p->dev) && netif_queue_stopped(p->dev) &&
test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags)) test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags))
netif_wake_queue(p->dev); netif_wake_queue(p->dev);
spin_unlock_irqrestore(&priv->tx_lock, flags); netif_tx_unlock_bh(p->dev);
} }
if (p->qp) if (p->qp)
...@@ -1202,6 +1202,7 @@ static int ipoib_cm_tx_handler(struct ib_cm_id *cm_id, ...@@ -1202,6 +1202,7 @@ static int ipoib_cm_tx_handler(struct ib_cm_id *cm_id,
struct ipoib_dev_priv *priv = netdev_priv(tx->dev); struct ipoib_dev_priv *priv = netdev_priv(tx->dev);
struct net_device *dev = priv->dev; struct net_device *dev = priv->dev;
struct ipoib_neigh *neigh; struct ipoib_neigh *neigh;
unsigned long flags;
int ret; int ret;
switch (event->event) { switch (event->event) {
...@@ -1220,8 +1221,8 @@ static int ipoib_cm_tx_handler(struct ib_cm_id *cm_id, ...@@ -1220,8 +1221,8 @@ static int ipoib_cm_tx_handler(struct ib_cm_id *cm_id,
case IB_CM_REJ_RECEIVED: case IB_CM_REJ_RECEIVED:
case IB_CM_TIMEWAIT_EXIT: case IB_CM_TIMEWAIT_EXIT:
ipoib_dbg(priv, "CM error %d.\n", event->event); ipoib_dbg(priv, "CM error %d.\n", event->event);
spin_lock_irq(&priv->tx_lock); netif_tx_lock_bh(dev);
spin_lock(&priv->lock); spin_lock_irqsave(&priv->lock, flags);
neigh = tx->neigh; neigh = tx->neigh;
if (neigh) { if (neigh) {
...@@ -1239,8 +1240,8 @@ static int ipoib_cm_tx_handler(struct ib_cm_id *cm_id, ...@@ -1239,8 +1240,8 @@ static int ipoib_cm_tx_handler(struct ib_cm_id *cm_id,
queue_work(ipoib_workqueue, &priv->cm.reap_task); queue_work(ipoib_workqueue, &priv->cm.reap_task);
} }
spin_unlock(&priv->lock); spin_unlock_irqrestore(&priv->lock, flags);
spin_unlock_irq(&priv->tx_lock); netif_tx_unlock_bh(dev);
break; break;
default: default:
break; break;
...@@ -1294,19 +1295,24 @@ static void ipoib_cm_tx_start(struct work_struct *work) ...@@ -1294,19 +1295,24 @@ static void ipoib_cm_tx_start(struct work_struct *work)
struct ib_sa_path_rec pathrec; struct ib_sa_path_rec pathrec;
u32 qpn; u32 qpn;
spin_lock_irqsave(&priv->tx_lock, flags); netif_tx_lock_bh(dev);
spin_lock(&priv->lock); spin_lock_irqsave(&priv->lock, flags);
while (!list_empty(&priv->cm.start_list)) { while (!list_empty(&priv->cm.start_list)) {
p = list_entry(priv->cm.start_list.next, typeof(*p), list); p = list_entry(priv->cm.start_list.next, typeof(*p), list);
list_del_init(&p->list); list_del_init(&p->list);
neigh = p->neigh; neigh = p->neigh;
qpn = IPOIB_QPN(neigh->neighbour->ha); qpn = IPOIB_QPN(neigh->neighbour->ha);
memcpy(&pathrec, &p->path->pathrec, sizeof pathrec); memcpy(&pathrec, &p->path->pathrec, sizeof pathrec);
spin_unlock(&priv->lock);
spin_unlock_irqrestore(&priv->tx_lock, flags); spin_unlock_irqrestore(&priv->lock, flags);
netif_tx_unlock_bh(dev);
ret = ipoib_cm_tx_init(p, qpn, &pathrec); ret = ipoib_cm_tx_init(p, qpn, &pathrec);
spin_lock_irqsave(&priv->tx_lock, flags);
spin_lock(&priv->lock); netif_tx_lock_bh(dev);
spin_lock_irqsave(&priv->lock, flags);
if (ret) { if (ret) {
neigh = p->neigh; neigh = p->neigh;
if (neigh) { if (neigh) {
...@@ -1320,44 +1326,52 @@ static void ipoib_cm_tx_start(struct work_struct *work) ...@@ -1320,44 +1326,52 @@ static void ipoib_cm_tx_start(struct work_struct *work)
kfree(p); kfree(p);
} }
} }
spin_unlock(&priv->lock);
spin_unlock_irqrestore(&priv->tx_lock, flags); spin_unlock_irqrestore(&priv->lock, flags);
netif_tx_unlock_bh(dev);
} }
static void ipoib_cm_tx_reap(struct work_struct *work) static void ipoib_cm_tx_reap(struct work_struct *work)
{ {
struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv, struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv,
cm.reap_task); cm.reap_task);
struct net_device *dev = priv->dev;
struct ipoib_cm_tx *p; struct ipoib_cm_tx *p;
unsigned long flags;
netif_tx_lock_bh(dev);
spin_lock_irqsave(&priv->lock, flags);
spin_lock_irq(&priv->tx_lock);
spin_lock(&priv->lock);
while (!list_empty(&priv->cm.reap_list)) { while (!list_empty(&priv->cm.reap_list)) {
p = list_entry(priv->cm.reap_list.next, typeof(*p), list); p = list_entry(priv->cm.reap_list.next, typeof(*p), list);
list_del(&p->list); list_del(&p->list);
spin_unlock(&priv->lock); spin_unlock_irqrestore(&priv->lock, flags);
spin_unlock_irq(&priv->tx_lock); netif_tx_unlock_bh(dev);
ipoib_cm_tx_destroy(p); ipoib_cm_tx_destroy(p);
spin_lock_irq(&priv->tx_lock); netif_tx_lock_bh(dev);
spin_lock(&priv->lock); spin_lock_irqsave(&priv->lock, flags);
} }
spin_unlock(&priv->lock);
spin_unlock_irq(&priv->tx_lock); spin_unlock_irqrestore(&priv->lock, flags);
netif_tx_unlock_bh(dev);
} }
static void ipoib_cm_skb_reap(struct work_struct *work) static void ipoib_cm_skb_reap(struct work_struct *work)
{ {
struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv, struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv,
cm.skb_task); cm.skb_task);
struct net_device *dev = priv->dev;
struct sk_buff *skb; struct sk_buff *skb;
unsigned long flags;
unsigned mtu = priv->mcast_mtu; unsigned mtu = priv->mcast_mtu;
spin_lock_irq(&priv->tx_lock); netif_tx_lock_bh(dev);
spin_lock(&priv->lock); spin_lock_irqsave(&priv->lock, flags);
while ((skb = skb_dequeue(&priv->cm.skb_queue))) { while ((skb = skb_dequeue(&priv->cm.skb_queue))) {
spin_unlock(&priv->lock); spin_unlock_irqrestore(&priv->lock, flags);
spin_unlock_irq(&priv->tx_lock); netif_tx_unlock_bh(dev);
if (skb->protocol == htons(ETH_P_IP)) if (skb->protocol == htons(ETH_P_IP))
icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, htonl(mtu)); icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, htonl(mtu));
#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
...@@ -1365,11 +1379,13 @@ static void ipoib_cm_skb_reap(struct work_struct *work) ...@@ -1365,11 +1379,13 @@ static void ipoib_cm_skb_reap(struct work_struct *work)
icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu, priv->dev); icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu, priv->dev);
#endif #endif
dev_kfree_skb_any(skb); dev_kfree_skb_any(skb);
spin_lock_irq(&priv->tx_lock);
spin_lock(&priv->lock); netif_tx_lock_bh(dev);
spin_lock_irqsave(&priv->lock, flags);
} }
spin_unlock(&priv->lock);
spin_unlock_irq(&priv->tx_lock); spin_unlock_irqrestore(&priv->lock, flags);
netif_tx_unlock_bh(dev);
} }
void ipoib_cm_skb_too_long(struct net_device *dev, struct sk_buff *skb, void ipoib_cm_skb_too_long(struct net_device *dev, struct sk_buff *skb,
......
...@@ -468,21 +468,22 @@ void ipoib_ib_completion(struct ib_cq *cq, void *dev_ptr) ...@@ -468,21 +468,22 @@ void ipoib_ib_completion(struct ib_cq *cq, void *dev_ptr)
static void drain_tx_cq(struct net_device *dev) static void drain_tx_cq(struct net_device *dev)
{ {
struct ipoib_dev_priv *priv = netdev_priv(dev); struct ipoib_dev_priv *priv = netdev_priv(dev);
unsigned long flags;
spin_lock_irqsave(&priv->tx_lock, flags); netif_tx_lock(dev);
while (poll_tx(priv)) while (poll_tx(priv))
; /* nothing */ ; /* nothing */
if (netif_queue_stopped(dev)) if (netif_queue_stopped(dev))
mod_timer(&priv->poll_timer, jiffies + 1); mod_timer(&priv->poll_timer, jiffies + 1);
spin_unlock_irqrestore(&priv->tx_lock, flags); netif_tx_unlock(dev);
} }
void ipoib_send_comp_handler(struct ib_cq *cq, void *dev_ptr) void ipoib_send_comp_handler(struct ib_cq *cq, void *dev_ptr)
{ {
drain_tx_cq((struct net_device *)dev_ptr); struct ipoib_dev_priv *priv = netdev_priv(dev_ptr);
mod_timer(&priv->poll_timer, jiffies);
} }
static inline int post_send(struct ipoib_dev_priv *priv, static inline int post_send(struct ipoib_dev_priv *priv,
...@@ -614,17 +615,20 @@ static void __ipoib_reap_ah(struct net_device *dev) ...@@ -614,17 +615,20 @@ static void __ipoib_reap_ah(struct net_device *dev)
struct ipoib_dev_priv *priv = netdev_priv(dev); struct ipoib_dev_priv *priv = netdev_priv(dev);
struct ipoib_ah *ah, *tah; struct ipoib_ah *ah, *tah;
LIST_HEAD(remove_list); LIST_HEAD(remove_list);
unsigned long flags;
netif_tx_lock_bh(dev);
spin_lock_irqsave(&priv->lock, flags);
spin_lock_irq(&priv->tx_lock);
spin_lock(&priv->lock);
list_for_each_entry_safe(ah, tah, &priv->dead_ahs, list) list_for_each_entry_safe(ah, tah, &priv->dead_ahs, list)
if ((int) priv->tx_tail - (int) ah->last_send >= 0) { if ((int) priv->tx_tail - (int) ah->last_send >= 0) {
list_del(&ah->list); list_del(&ah->list);
ib_destroy_ah(ah->ah); ib_destroy_ah(ah->ah);
kfree(ah); kfree(ah);
} }
spin_unlock(&priv->lock);
spin_unlock_irq(&priv->tx_lock); spin_unlock_irqrestore(&priv->lock, flags);
netif_tx_unlock_bh(dev);
} }
void ipoib_reap_ah(struct work_struct *work) void ipoib_reap_ah(struct work_struct *work)
...@@ -761,6 +765,14 @@ void ipoib_drain_cq(struct net_device *dev) ...@@ -761,6 +765,14 @@ void ipoib_drain_cq(struct net_device *dev)
{ {
struct ipoib_dev_priv *priv = netdev_priv(dev); struct ipoib_dev_priv *priv = netdev_priv(dev);
int i, n; int i, n;
/*
* We call completion handling routines that expect to be
* called from the BH-disabled NAPI poll context, so disable
* BHs here too.
*/
local_bh_disable();
do { do {
n = ib_poll_cq(priv->recv_cq, IPOIB_NUM_WC, priv->ibwc); n = ib_poll_cq(priv->recv_cq, IPOIB_NUM_WC, priv->ibwc);
for (i = 0; i < n; ++i) { for (i = 0; i < n; ++i) {
...@@ -784,6 +796,8 @@ void ipoib_drain_cq(struct net_device *dev) ...@@ -784,6 +796,8 @@ void ipoib_drain_cq(struct net_device *dev)
while (poll_tx(priv)) while (poll_tx(priv))
; /* nothing */ ; /* nothing */
local_bh_enable();
} }
int ipoib_ib_dev_stop(struct net_device *dev, int flush) int ipoib_ib_dev_stop(struct net_device *dev, int flush)
......
...@@ -373,9 +373,10 @@ void ipoib_flush_paths(struct net_device *dev) ...@@ -373,9 +373,10 @@ void ipoib_flush_paths(struct net_device *dev)
struct ipoib_dev_priv *priv = netdev_priv(dev); struct ipoib_dev_priv *priv = netdev_priv(dev);
struct ipoib_path *path, *tp; struct ipoib_path *path, *tp;
LIST_HEAD(remove_list); LIST_HEAD(remove_list);
unsigned long flags;
spin_lock_irq(&priv->tx_lock); netif_tx_lock_bh(dev);
spin_lock(&priv->lock); spin_lock_irqsave(&priv->lock, flags);
list_splice_init(&priv->path_list, &remove_list); list_splice_init(&priv->path_list, &remove_list);
...@@ -385,15 +386,16 @@ void ipoib_flush_paths(struct net_device *dev) ...@@ -385,15 +386,16 @@ void ipoib_flush_paths(struct net_device *dev)
list_for_each_entry_safe(path, tp, &remove_list, list) { list_for_each_entry_safe(path, tp, &remove_list, list) {
if (path->query) if (path->query)
ib_sa_cancel_query(path->query_id, path->query); ib_sa_cancel_query(path->query_id, path->query);
spin_unlock(&priv->lock); spin_unlock_irqrestore(&priv->lock, flags);
spin_unlock_irq(&priv->tx_lock); netif_tx_unlock_bh(dev);
wait_for_completion(&path->done); wait_for_completion(&path->done);
path_free(dev, path); path_free(dev, path);
spin_lock_irq(&priv->tx_lock); netif_tx_lock_bh(dev);
spin_lock(&priv->lock); spin_lock_irqsave(&priv->lock, flags);
} }
spin_unlock(&priv->lock);
spin_unlock_irq(&priv->tx_lock); spin_unlock_irqrestore(&priv->lock, flags);
netif_tx_unlock_bh(dev);
} }
static void path_rec_completion(int status, static void path_rec_completion(int status,
...@@ -555,6 +557,7 @@ static void neigh_add_path(struct sk_buff *skb, struct net_device *dev) ...@@ -555,6 +557,7 @@ static void neigh_add_path(struct sk_buff *skb, struct net_device *dev)
struct ipoib_dev_priv *priv = netdev_priv(dev); struct ipoib_dev_priv *priv = netdev_priv(dev);
struct ipoib_path *path; struct ipoib_path *path;
struct ipoib_neigh *neigh; struct ipoib_neigh *neigh;
unsigned long flags;
neigh = ipoib_neigh_alloc(skb->dst->neighbour, skb->dev); neigh = ipoib_neigh_alloc(skb->dst->neighbour, skb->dev);
if (!neigh) { if (!neigh) {
...@@ -563,11 +566,7 @@ static void neigh_add_path(struct sk_buff *skb, struct net_device *dev) ...@@ -563,11 +566,7 @@ static void neigh_add_path(struct sk_buff *skb, struct net_device *dev)
return; return;
} }
/* spin_lock_irqsave(&priv->lock, flags);
* We can only be called from ipoib_start_xmit, so we're
* inside tx_lock -- no need to save/restore flags.
*/
spin_lock(&priv->lock);
path = __path_find(dev, skb->dst->neighbour->ha + 4); path = __path_find(dev, skb->dst->neighbour->ha + 4);
if (!path) { if (!path) {
...@@ -614,7 +613,7 @@ static void neigh_add_path(struct sk_buff *skb, struct net_device *dev) ...@@ -614,7 +613,7 @@ static void neigh_add_path(struct sk_buff *skb, struct net_device *dev)
__skb_queue_tail(&neigh->queue, skb); __skb_queue_tail(&neigh->queue, skb);
} }
spin_unlock(&priv->lock); spin_unlock_irqrestore(&priv->lock, flags);
return; return;
err_list: err_list:
...@@ -626,7 +625,7 @@ static void neigh_add_path(struct sk_buff *skb, struct net_device *dev) ...@@ -626,7 +625,7 @@ static void neigh_add_path(struct sk_buff *skb, struct net_device *dev)
++dev->stats.tx_dropped; ++dev->stats.tx_dropped;
dev_kfree_skb_any(skb); dev_kfree_skb_any(skb);
spin_unlock(&priv->lock); spin_unlock_irqrestore(&priv->lock, flags);
} }
static void ipoib_path_lookup(struct sk_buff *skb, struct net_device *dev) static void ipoib_path_lookup(struct sk_buff *skb, struct net_device *dev)
...@@ -650,12 +649,9 @@ static void unicast_arp_send(struct sk_buff *skb, struct net_device *dev, ...@@ -650,12 +649,9 @@ static void unicast_arp_send(struct sk_buff *skb, struct net_device *dev,
{ {
struct ipoib_dev_priv *priv = netdev_priv(dev); struct ipoib_dev_priv *priv = netdev_priv(dev);
struct ipoib_path *path; struct ipoib_path *path;
unsigned long flags;
/* spin_lock_irqsave(&priv->lock, flags);
* We can only be called from ipoib_start_xmit, so we're
* inside tx_lock -- no need to save/restore flags.
*/
spin_lock(&priv->lock);
path = __path_find(dev, phdr->hwaddr + 4); path = __path_find(dev, phdr->hwaddr + 4);
if (!path || !path->valid) { if (!path || !path->valid) {
...@@ -667,7 +663,7 @@ static void unicast_arp_send(struct sk_buff *skb, struct net_device *dev, ...@@ -667,7 +663,7 @@ static void unicast_arp_send(struct sk_buff *skb, struct net_device *dev,
__skb_queue_tail(&path->queue, skb); __skb_queue_tail(&path->queue, skb);
if (path_rec_start(dev, path)) { if (path_rec_start(dev, path)) {
spin_unlock(&priv->lock); spin_unlock_irqrestore(&priv->lock, flags);
path_free(dev, path); path_free(dev, path);
return; return;
} else } else
...@@ -677,7 +673,7 @@ static void unicast_arp_send(struct sk_buff *skb, struct net_device *dev, ...@@ -677,7 +673,7 @@ static void unicast_arp_send(struct sk_buff *skb, struct net_device *dev,
dev_kfree_skb_any(skb); dev_kfree_skb_any(skb);
} }
spin_unlock(&priv->lock); spin_unlock_irqrestore(&priv->lock, flags);
return; return;
} }
...@@ -696,7 +692,7 @@ static void unicast_arp_send(struct sk_buff *skb, struct net_device *dev, ...@@ -696,7 +692,7 @@ static void unicast_arp_send(struct sk_buff *skb, struct net_device *dev,
dev_kfree_skb_any(skb); dev_kfree_skb_any(skb);
} }
spin_unlock(&priv->lock); spin_unlock_irqrestore(&priv->lock, flags);
} }
static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev) static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev)
...@@ -705,13 +701,10 @@ static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -705,13 +701,10 @@ static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev)
struct ipoib_neigh *neigh; struct ipoib_neigh *neigh;
unsigned long flags; unsigned long flags;
if (unlikely(!spin_trylock_irqsave(&priv->tx_lock, flags)))
return NETDEV_TX_LOCKED;
if (likely(skb->dst && skb->dst->neighbour)) { if (likely(skb->dst && skb->dst->neighbour)) {
if (unlikely(!*to_ipoib_neigh(skb->dst->neighbour))) { if (unlikely(!*to_ipoib_neigh(skb->dst->neighbour))) {
ipoib_path_lookup(skb, dev); ipoib_path_lookup(skb, dev);
goto out; return NETDEV_TX_OK;
} }
neigh = *to_ipoib_neigh(skb->dst->neighbour); neigh = *to_ipoib_neigh(skb->dst->neighbour);
...@@ -721,7 +714,7 @@ static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -721,7 +714,7 @@ static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev)
skb->dst->neighbour->ha + 4, skb->dst->neighbour->ha + 4,
sizeof(union ib_gid))) || sizeof(union ib_gid))) ||
(neigh->dev != dev))) { (neigh->dev != dev))) {
spin_lock(&priv->lock); spin_lock_irqsave(&priv->lock, flags);
/* /*
* It's safe to call ipoib_put_ah() inside * It's safe to call ipoib_put_ah() inside
* priv->lock here, because we know that * priv->lock here, because we know that
...@@ -732,25 +725,25 @@ static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -732,25 +725,25 @@ static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev)
ipoib_put_ah(neigh->ah); ipoib_put_ah(neigh->ah);
list_del(&neigh->list); list_del(&neigh->list);
ipoib_neigh_free(dev, neigh); ipoib_neigh_free(dev, neigh);
spin_unlock(&priv->lock); spin_unlock_irqrestore(&priv->lock, flags);
ipoib_path_lookup(skb, dev); ipoib_path_lookup(skb, dev);
goto out; return NETDEV_TX_OK;
} }
if (ipoib_cm_get(neigh)) { if (ipoib_cm_get(neigh)) {
if (ipoib_cm_up(neigh)) { if (ipoib_cm_up(neigh)) {
ipoib_cm_send(dev, skb, ipoib_cm_get(neigh)); ipoib_cm_send(dev, skb, ipoib_cm_get(neigh));
goto out; return NETDEV_TX_OK;
} }
} else if (neigh->ah) { } else if (neigh->ah) {
ipoib_send(dev, skb, neigh->ah, IPOIB_QPN(skb->dst->neighbour->ha)); ipoib_send(dev, skb, neigh->ah, IPOIB_QPN(skb->dst->neighbour->ha));
goto out; return NETDEV_TX_OK;
} }
if (skb_queue_len(&neigh->queue) < IPOIB_MAX_PATH_REC_QUEUE) { if (skb_queue_len(&neigh->queue) < IPOIB_MAX_PATH_REC_QUEUE) {
spin_lock(&priv->lock); spin_lock_irqsave(&priv->lock, flags);
__skb_queue_tail(&neigh->queue, skb); __skb_queue_tail(&neigh->queue, skb);
spin_unlock(&priv->lock); spin_unlock_irqrestore(&priv->lock, flags);
} else { } else {
++dev->stats.tx_dropped; ++dev->stats.tx_dropped;
dev_kfree_skb_any(skb); dev_kfree_skb_any(skb);
...@@ -779,16 +772,13 @@ static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -779,16 +772,13 @@ static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev)
IPOIB_GID_RAW_ARG(phdr->hwaddr + 4)); IPOIB_GID_RAW_ARG(phdr->hwaddr + 4));
dev_kfree_skb_any(skb); dev_kfree_skb_any(skb);
++dev->stats.tx_dropped; ++dev->stats.tx_dropped;
goto out; return NETDEV_TX_OK;
} }
unicast_arp_send(skb, dev, phdr); unicast_arp_send(skb, dev, phdr);
} }
} }
out:
spin_unlock_irqrestore(&priv->tx_lock, flags);
return NETDEV_TX_OK; return NETDEV_TX_OK;
} }
...@@ -1052,7 +1042,6 @@ static void ipoib_setup(struct net_device *dev) ...@@ -1052,7 +1042,6 @@ static void ipoib_setup(struct net_device *dev)
dev->type = ARPHRD_INFINIBAND; dev->type = ARPHRD_INFINIBAND;
dev->tx_queue_len = ipoib_sendq_size * 2; dev->tx_queue_len = ipoib_sendq_size * 2;
dev->features = (NETIF_F_VLAN_CHALLENGED | dev->features = (NETIF_F_VLAN_CHALLENGED |
NETIF_F_LLTX |
NETIF_F_HIGHDMA); NETIF_F_HIGHDMA);
memcpy(dev->broadcast, ipv4_bcast_addr, INFINIBAND_ALEN); memcpy(dev->broadcast, ipv4_bcast_addr, INFINIBAND_ALEN);
...@@ -1064,7 +1053,6 @@ static void ipoib_setup(struct net_device *dev) ...@@ -1064,7 +1053,6 @@ static void ipoib_setup(struct net_device *dev)
ipoib_lro_setup(priv); ipoib_lro_setup(priv);
spin_lock_init(&priv->lock); spin_lock_init(&priv->lock);
spin_lock_init(&priv->tx_lock);
mutex_init(&priv->vlan_mutex); mutex_init(&priv->vlan_mutex);
......
...@@ -69,14 +69,13 @@ static void ipoib_mcast_free(struct ipoib_mcast *mcast) ...@@ -69,14 +69,13 @@ static void ipoib_mcast_free(struct ipoib_mcast *mcast)
struct net_device *dev = mcast->dev; struct net_device *dev = mcast->dev;
struct ipoib_dev_priv *priv = netdev_priv(dev); struct ipoib_dev_priv *priv = netdev_priv(dev);
struct ipoib_neigh *neigh, *tmp; struct ipoib_neigh *neigh, *tmp;
unsigned long flags;
int tx_dropped = 0; int tx_dropped = 0;
ipoib_dbg_mcast(netdev_priv(dev), ipoib_dbg_mcast(netdev_priv(dev),
"deleting multicast group " IPOIB_GID_FMT "\n", "deleting multicast group " IPOIB_GID_FMT "\n",
IPOIB_GID_ARG(mcast->mcmember.mgid)); IPOIB_GID_ARG(mcast->mcmember.mgid));
spin_lock_irqsave(&priv->lock, flags); spin_lock_irq(&priv->lock);
list_for_each_entry_safe(neigh, tmp, &mcast->neigh_list, list) { list_for_each_entry_safe(neigh, tmp, &mcast->neigh_list, list) {
/* /*
...@@ -90,7 +89,7 @@ static void ipoib_mcast_free(struct ipoib_mcast *mcast) ...@@ -90,7 +89,7 @@ static void ipoib_mcast_free(struct ipoib_mcast *mcast)
ipoib_neigh_free(dev, neigh); ipoib_neigh_free(dev, neigh);
} }
spin_unlock_irqrestore(&priv->lock, flags); spin_unlock_irq(&priv->lock);
if (mcast->ah) if (mcast->ah)
ipoib_put_ah(mcast->ah); ipoib_put_ah(mcast->ah);
...@@ -100,9 +99,9 @@ static void ipoib_mcast_free(struct ipoib_mcast *mcast) ...@@ -100,9 +99,9 @@ static void ipoib_mcast_free(struct ipoib_mcast *mcast)
dev_kfree_skb_any(skb_dequeue(&mcast->pkt_queue)); dev_kfree_skb_any(skb_dequeue(&mcast->pkt_queue));
} }
spin_lock_irqsave(&priv->tx_lock, flags); netif_tx_lock_bh(dev);
dev->stats.tx_dropped += tx_dropped; dev->stats.tx_dropped += tx_dropped;
spin_unlock_irqrestore(&priv->tx_lock, flags); netif_tx_unlock_bh(dev);
kfree(mcast); kfree(mcast);
} }
...@@ -259,10 +258,10 @@ static int ipoib_mcast_join_finish(struct ipoib_mcast *mcast, ...@@ -259,10 +258,10 @@ static int ipoib_mcast_join_finish(struct ipoib_mcast *mcast,
} }
/* actually send any queued packets */ /* actually send any queued packets */
spin_lock_irq(&priv->tx_lock); netif_tx_lock_bh(dev);
while (!skb_queue_empty(&mcast->pkt_queue)) { while (!skb_queue_empty(&mcast->pkt_queue)) {
struct sk_buff *skb = skb_dequeue(&mcast->pkt_queue); struct sk_buff *skb = skb_dequeue(&mcast->pkt_queue);
spin_unlock_irq(&priv->tx_lock); netif_tx_unlock_bh(dev);
skb->dev = dev; skb->dev = dev;
...@@ -273,9 +272,9 @@ static int ipoib_mcast_join_finish(struct ipoib_mcast *mcast, ...@@ -273,9 +272,9 @@ static int ipoib_mcast_join_finish(struct ipoib_mcast *mcast,
if (dev_queue_xmit(skb)) if (dev_queue_xmit(skb))
ipoib_warn(priv, "dev_queue_xmit failed to requeue packet\n"); ipoib_warn(priv, "dev_queue_xmit failed to requeue packet\n");
spin_lock_irq(&priv->tx_lock); netif_tx_lock_bh(dev);
} }
spin_unlock_irq(&priv->tx_lock); netif_tx_unlock_bh(dev);
return 0; return 0;
} }
...@@ -286,7 +285,6 @@ ipoib_mcast_sendonly_join_complete(int status, ...@@ -286,7 +285,6 @@ ipoib_mcast_sendonly_join_complete(int status,
{ {
struct ipoib_mcast *mcast = multicast->context; struct ipoib_mcast *mcast = multicast->context;
struct net_device *dev = mcast->dev; struct net_device *dev = mcast->dev;
struct ipoib_dev_priv *priv = netdev_priv(dev);
/* We trap for port events ourselves. */ /* We trap for port events ourselves. */
if (status == -ENETRESET) if (status == -ENETRESET)
...@@ -302,12 +300,12 @@ ipoib_mcast_sendonly_join_complete(int status, ...@@ -302,12 +300,12 @@ ipoib_mcast_sendonly_join_complete(int status,
IPOIB_GID_ARG(mcast->mcmember.mgid), status); IPOIB_GID_ARG(mcast->mcmember.mgid), status);
/* Flush out any queued packets */ /* Flush out any queued packets */
spin_lock_irq(&priv->tx_lock); netif_tx_lock_bh(dev);
while (!skb_queue_empty(&mcast->pkt_queue)) { while (!skb_queue_empty(&mcast->pkt_queue)) {
++dev->stats.tx_dropped; ++dev->stats.tx_dropped;
dev_kfree_skb_any(skb_dequeue(&mcast->pkt_queue)); dev_kfree_skb_any(skb_dequeue(&mcast->pkt_queue));
} }
spin_unlock_irq(&priv->tx_lock); netif_tx_unlock_bh(dev);
/* Clear the busy flag so we try again */ /* Clear the busy flag so we try again */
status = test_and_clear_bit(IPOIB_MCAST_FLAG_BUSY, status = test_and_clear_bit(IPOIB_MCAST_FLAG_BUSY,
...@@ -662,12 +660,9 @@ void ipoib_mcast_send(struct net_device *dev, void *mgid, struct sk_buff *skb) ...@@ -662,12 +660,9 @@ void ipoib_mcast_send(struct net_device *dev, void *mgid, struct sk_buff *skb)
{ {
struct ipoib_dev_priv *priv = netdev_priv(dev); struct ipoib_dev_priv *priv = netdev_priv(dev);
struct ipoib_mcast *mcast; struct ipoib_mcast *mcast;
unsigned long flags;
/* spin_lock_irqsave(&priv->lock, flags);
* We can only be called from ipoib_start_xmit, so we're
* inside tx_lock -- no need to save/restore flags.
*/
spin_lock(&priv->lock);
if (!test_bit(IPOIB_FLAG_OPER_UP, &priv->flags) || if (!test_bit(IPOIB_FLAG_OPER_UP, &priv->flags) ||
!priv->broadcast || !priv->broadcast ||
...@@ -738,7 +733,7 @@ void ipoib_mcast_send(struct net_device *dev, void *mgid, struct sk_buff *skb) ...@@ -738,7 +733,7 @@ void ipoib_mcast_send(struct net_device *dev, void *mgid, struct sk_buff *skb)
} }
unlock: unlock:
spin_unlock(&priv->lock); spin_unlock_irqrestore(&priv->lock, flags);
} }
void ipoib_mcast_dev_flush(struct net_device *dev) void ipoib_mcast_dev_flush(struct net_device *dev)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment