Commit 9a2620c8 authored by Yuval Mintz's avatar Yuval Mintz Committed by David S. Miller

bnx2x: prevent WARN during driver unload

Starting with commit 80c33ddd "net: add might_sleep() call to napi_disable"
bnx2x fails the might_sleep tests causing a stack trace to appear whenever
the driver is unloaded, as local_bh_disable() is being called before
napi_disable().

This changes the locking schematics related to CONFIG_NET_RX_BUSY_POLL,
preventing the need for calling local_bh_disable() and thus eliminating
the issue.
Signed-off-by: default avatarYuval Mintz <yuvalmin@broadcom.com>
Signed-off-by: default avatarDmitry Kravkov <dmitry@broadcom.com>
Signed-off-by: default avatarAriel Elior <ariele@broadcom.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent a04c0e2c
...@@ -520,10 +520,12 @@ struct bnx2x_fastpath { ...@@ -520,10 +520,12 @@ struct bnx2x_fastpath {
#define BNX2X_FP_STATE_IDLE 0 #define BNX2X_FP_STATE_IDLE 0
#define BNX2X_FP_STATE_NAPI (1 << 0) /* NAPI owns this FP */ #define BNX2X_FP_STATE_NAPI (1 << 0) /* NAPI owns this FP */
#define BNX2X_FP_STATE_POLL (1 << 1) /* poll owns this FP */ #define BNX2X_FP_STATE_POLL (1 << 1) /* poll owns this FP */
#define BNX2X_FP_STATE_NAPI_YIELD (1 << 2) /* NAPI yielded this FP */ #define BNX2X_FP_STATE_DISABLED (1 << 2)
#define BNX2X_FP_STATE_POLL_YIELD (1 << 3) /* poll yielded this FP */ #define BNX2X_FP_STATE_NAPI_YIELD (1 << 3) /* NAPI yielded this FP */
#define BNX2X_FP_STATE_POLL_YIELD (1 << 4) /* poll yielded this FP */
#define BNX2X_FP_OWNED (BNX2X_FP_STATE_NAPI | BNX2X_FP_STATE_POLL)
#define BNX2X_FP_YIELD (BNX2X_FP_STATE_NAPI_YIELD | BNX2X_FP_STATE_POLL_YIELD) #define BNX2X_FP_YIELD (BNX2X_FP_STATE_NAPI_YIELD | BNX2X_FP_STATE_POLL_YIELD)
#define BNX2X_FP_LOCKED (BNX2X_FP_STATE_NAPI | BNX2X_FP_STATE_POLL) #define BNX2X_FP_LOCKED (BNX2X_FP_OWNED | BNX2X_FP_STATE_DISABLED)
#define BNX2X_FP_USER_PEND (BNX2X_FP_STATE_POLL | BNX2X_FP_STATE_POLL_YIELD) #define BNX2X_FP_USER_PEND (BNX2X_FP_STATE_POLL | BNX2X_FP_STATE_POLL_YIELD)
/* protect state */ /* protect state */
spinlock_t lock; spinlock_t lock;
...@@ -613,7 +615,7 @@ static inline bool bnx2x_fp_lock_napi(struct bnx2x_fastpath *fp) ...@@ -613,7 +615,7 @@ static inline bool bnx2x_fp_lock_napi(struct bnx2x_fastpath *fp)
{ {
bool rc = true; bool rc = true;
spin_lock(&fp->lock); spin_lock_bh(&fp->lock);
if (fp->state & BNX2X_FP_LOCKED) { if (fp->state & BNX2X_FP_LOCKED) {
WARN_ON(fp->state & BNX2X_FP_STATE_NAPI); WARN_ON(fp->state & BNX2X_FP_STATE_NAPI);
fp->state |= BNX2X_FP_STATE_NAPI_YIELD; fp->state |= BNX2X_FP_STATE_NAPI_YIELD;
...@@ -622,7 +624,7 @@ static inline bool bnx2x_fp_lock_napi(struct bnx2x_fastpath *fp) ...@@ -622,7 +624,7 @@ static inline bool bnx2x_fp_lock_napi(struct bnx2x_fastpath *fp)
/* we don't care if someone yielded */ /* we don't care if someone yielded */
fp->state = BNX2X_FP_STATE_NAPI; fp->state = BNX2X_FP_STATE_NAPI;
} }
spin_unlock(&fp->lock); spin_unlock_bh(&fp->lock);
return rc; return rc;
} }
...@@ -631,14 +633,16 @@ static inline bool bnx2x_fp_unlock_napi(struct bnx2x_fastpath *fp) ...@@ -631,14 +633,16 @@ static inline bool bnx2x_fp_unlock_napi(struct bnx2x_fastpath *fp)
{ {
bool rc = false; bool rc = false;
spin_lock(&fp->lock); spin_lock_bh(&fp->lock);
WARN_ON(fp->state & WARN_ON(fp->state &
(BNX2X_FP_STATE_POLL | BNX2X_FP_STATE_NAPI_YIELD)); (BNX2X_FP_STATE_POLL | BNX2X_FP_STATE_NAPI_YIELD));
if (fp->state & BNX2X_FP_STATE_POLL_YIELD) if (fp->state & BNX2X_FP_STATE_POLL_YIELD)
rc = true; rc = true;
fp->state = BNX2X_FP_STATE_IDLE;
spin_unlock(&fp->lock); /* state ==> idle, unless currently disabled */
fp->state &= BNX2X_FP_STATE_DISABLED;
spin_unlock_bh(&fp->lock);
return rc; return rc;
} }
...@@ -669,7 +673,9 @@ static inline bool bnx2x_fp_unlock_poll(struct bnx2x_fastpath *fp) ...@@ -669,7 +673,9 @@ static inline bool bnx2x_fp_unlock_poll(struct bnx2x_fastpath *fp)
if (fp->state & BNX2X_FP_STATE_POLL_YIELD) if (fp->state & BNX2X_FP_STATE_POLL_YIELD)
rc = true; rc = true;
fp->state = BNX2X_FP_STATE_IDLE;
/* state ==> idle, unless currently disabled */
fp->state &= BNX2X_FP_STATE_DISABLED;
spin_unlock_bh(&fp->lock); spin_unlock_bh(&fp->lock);
return rc; return rc;
} }
...@@ -677,9 +683,23 @@ static inline bool bnx2x_fp_unlock_poll(struct bnx2x_fastpath *fp) ...@@ -677,9 +683,23 @@ static inline bool bnx2x_fp_unlock_poll(struct bnx2x_fastpath *fp)
/* true if a socket is polling, even if it did not get the lock */ /* true if a socket is polling, even if it did not get the lock */
static inline bool bnx2x_fp_ll_polling(struct bnx2x_fastpath *fp) static inline bool bnx2x_fp_ll_polling(struct bnx2x_fastpath *fp)
{ {
WARN_ON(!(fp->state & BNX2X_FP_LOCKED)); WARN_ON(!(fp->state & BNX2X_FP_OWNED));
return fp->state & BNX2X_FP_USER_PEND; return fp->state & BNX2X_FP_USER_PEND;
} }
/* false if fp is currently owned */
static inline bool bnx2x_fp_ll_disable(struct bnx2x_fastpath *fp)
{
int rc = true;
spin_lock_bh(&fp->lock);
if (fp->state & BNX2X_FP_OWNED)
rc = false;
fp->state |= BNX2X_FP_STATE_DISABLED;
spin_unlock_bh(&fp->lock);
return rc;
}
#else #else
static inline void bnx2x_fp_init_lock(struct bnx2x_fastpath *fp) static inline void bnx2x_fp_init_lock(struct bnx2x_fastpath *fp)
{ {
...@@ -709,6 +729,10 @@ static inline bool bnx2x_fp_ll_polling(struct bnx2x_fastpath *fp) ...@@ -709,6 +729,10 @@ static inline bool bnx2x_fp_ll_polling(struct bnx2x_fastpath *fp)
{ {
return false; return false;
} }
static inline bool bnx2x_fp_ll_disable(struct bnx2x_fastpath *fp)
{
return true;
}
#endif /* CONFIG_NET_RX_BUSY_POLL */ #endif /* CONFIG_NET_RX_BUSY_POLL */
/* Use 2500 as a mini-jumbo MTU for FCoE */ /* Use 2500 as a mini-jumbo MTU for FCoE */
......
...@@ -1790,26 +1790,22 @@ static void bnx2x_napi_disable_cnic(struct bnx2x *bp) ...@@ -1790,26 +1790,22 @@ static void bnx2x_napi_disable_cnic(struct bnx2x *bp)
{ {
int i; int i;
local_bh_disable();
for_each_rx_queue_cnic(bp, i) { for_each_rx_queue_cnic(bp, i) {
napi_disable(&bnx2x_fp(bp, i, napi)); napi_disable(&bnx2x_fp(bp, i, napi));
while (!bnx2x_fp_lock_napi(&bp->fp[i])) while (!bnx2x_fp_ll_disable(&bp->fp[i]))
mdelay(1); usleep_range(1000, 2000);
} }
local_bh_enable();
} }
static void bnx2x_napi_disable(struct bnx2x *bp) static void bnx2x_napi_disable(struct bnx2x *bp)
{ {
int i; int i;
local_bh_disable();
for_each_eth_queue(bp, i) { for_each_eth_queue(bp, i) {
napi_disable(&bnx2x_fp(bp, i, napi)); napi_disable(&bnx2x_fp(bp, i, napi));
while (!bnx2x_fp_lock_napi(&bp->fp[i])) while (!bnx2x_fp_ll_disable(&bp->fp[i]))
mdelay(1); usleep_range(1000, 2000);
} }
local_bh_enable();
} }
void bnx2x_netif_start(struct bnx2x *bp) void bnx2x_netif_start(struct bnx2x *bp)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment