Commit 7c96aef7 authored by NeilBrown's avatar NeilBrown Committed by J. Bruce Fields

sunrpc: remove xpt_pool

The xpt_pool field is only used for reporting BUGs.
And it isn't used correctly.

In particular, when it is cleared in svc_xprt_received before
XPT_BUSY is cleared, there is no guarantee that either the
compiler or the CPU might not re-order to two assignments, just
setting xpt_pool to NULL after XPT_BUSY is cleared.

If a different cpu were running svc_xprt_enqueue at this moment,
it might see XPT_BUSY clear and then xpt_pool non-NULL, and
so BUG.

This could be fixed by calling
  smp_mb__before_clear_bit()
before the clear_bit.  However as xpt_pool isn't really used,
it seems safest to simply remove xpt_pool.

Another alternate would be to change the clear_bit to
clear_bit_unlock, and the test_and_set_bit to test_and_set_bit_lock.
Signed-off-by: default avatarNeilBrown <neilb@suse.de>
Signed-off-by: default avatarJ. Bruce Fields <bfields@redhat.com>
parent 18b631f8
...@@ -63,7 +63,6 @@ struct svc_xprt { ...@@ -63,7 +63,6 @@ struct svc_xprt {
#define XPT_LISTENER 11 /* listening endpoint */ #define XPT_LISTENER 11 /* listening endpoint */
#define XPT_CACHE_AUTH 12 /* cache auth info */ #define XPT_CACHE_AUTH 12 /* cache auth info */
struct svc_pool *xpt_pool; /* current pool iff queued */
struct svc_serv *xpt_server; /* service for transport */ struct svc_serv *xpt_server; /* service for transport */
atomic_t xpt_reserved; /* space on outq that is rsvd */ atomic_t xpt_reserved; /* space on outq that is rsvd */
struct mutex xpt_mutex; /* to serialize sending data */ struct mutex xpt_mutex; /* to serialize sending data */
......
...@@ -351,8 +351,6 @@ void svc_xprt_enqueue(struct svc_xprt *xprt) ...@@ -351,8 +351,6 @@ void svc_xprt_enqueue(struct svc_xprt *xprt)
dprintk("svc: transport %p busy, not enqueued\n", xprt); dprintk("svc: transport %p busy, not enqueued\n", xprt);
goto out_unlock; goto out_unlock;
} }
BUG_ON(xprt->xpt_pool != NULL);
xprt->xpt_pool = pool;
if (!list_empty(&pool->sp_threads)) { if (!list_empty(&pool->sp_threads)) {
rqstp = list_entry(pool->sp_threads.next, rqstp = list_entry(pool->sp_threads.next,
...@@ -370,13 +368,11 @@ void svc_xprt_enqueue(struct svc_xprt *xprt) ...@@ -370,13 +368,11 @@ void svc_xprt_enqueue(struct svc_xprt *xprt)
rqstp->rq_reserved = serv->sv_max_mesg; rqstp->rq_reserved = serv->sv_max_mesg;
atomic_add(rqstp->rq_reserved, &xprt->xpt_reserved); atomic_add(rqstp->rq_reserved, &xprt->xpt_reserved);
pool->sp_stats.threads_woken++; pool->sp_stats.threads_woken++;
BUG_ON(xprt->xpt_pool != pool);
wake_up(&rqstp->rq_wait); wake_up(&rqstp->rq_wait);
} else { } else {
dprintk("svc: transport %p put into queue\n", xprt); dprintk("svc: transport %p put into queue\n", xprt);
list_add_tail(&xprt->xpt_ready, &pool->sp_sockets); list_add_tail(&xprt->xpt_ready, &pool->sp_sockets);
pool->sp_stats.sockets_queued++; pool->sp_stats.sockets_queued++;
BUG_ON(xprt->xpt_pool != pool);
} }
out_unlock: out_unlock:
...@@ -415,7 +411,6 @@ static struct svc_xprt *svc_xprt_dequeue(struct svc_pool *pool) ...@@ -415,7 +411,6 @@ static struct svc_xprt *svc_xprt_dequeue(struct svc_pool *pool)
void svc_xprt_received(struct svc_xprt *xprt) void svc_xprt_received(struct svc_xprt *xprt)
{ {
BUG_ON(!test_bit(XPT_BUSY, &xprt->xpt_flags)); BUG_ON(!test_bit(XPT_BUSY, &xprt->xpt_flags));
xprt->xpt_pool = NULL;
/* As soon as we clear busy, the xprt could be closed and /* As soon as we clear busy, the xprt could be closed and
* 'put', so we need a reference to call svc_xprt_enqueue with: * 'put', so we need a reference to call svc_xprt_enqueue with:
*/ */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment