Commit 78c210ef authored by J. Bruce Fields's avatar J. Bruce Fields

Revert "knfsd: avoid overloading the CPU scheduler with enormous load averages"

This reverts commit 59a252ff.

This helps in an entirely cached workload but not necessarily in
workloads that require waiting on disk.

Conflicts:

	include/linux/sunrpc/svc.h
	net/sunrpc/svc_xprt.c
Reported-by: default avatarSimon Kirby <sim@hostway.ca>
Tested-by: default avatarJesper Krogh <jesper@krogh.cc>
Signed-off-by: default avatarJ. Bruce Fields <bfields@citi.umich.edu>
parent 0a3adade
...@@ -29,7 +29,6 @@ struct svc_pool_stats { ...@@ -29,7 +29,6 @@ struct svc_pool_stats {
unsigned long packets; unsigned long packets;
unsigned long sockets_queued; unsigned long sockets_queued;
unsigned long threads_woken; unsigned long threads_woken;
unsigned long overloads_avoided;
unsigned long threads_timedout; unsigned long threads_timedout;
}; };
...@@ -50,7 +49,6 @@ struct svc_pool { ...@@ -50,7 +49,6 @@ struct svc_pool {
struct list_head sp_sockets; /* pending sockets */ struct list_head sp_sockets; /* pending sockets */
unsigned int sp_nrthreads; /* # of threads in pool */ unsigned int sp_nrthreads; /* # of threads in pool */
struct list_head sp_all_threads; /* all server threads */ struct list_head sp_all_threads; /* all server threads */
int sp_nwaking; /* number of threads woken but not yet active */
struct svc_pool_stats sp_stats; /* statistics on pool operation */ struct svc_pool_stats sp_stats; /* statistics on pool operation */
} ____cacheline_aligned_in_smp; } ____cacheline_aligned_in_smp;
...@@ -284,7 +282,6 @@ struct svc_rqst { ...@@ -284,7 +282,6 @@ struct svc_rqst {
* cache pages */ * cache pages */
wait_queue_head_t rq_wait; /* synchronization */ wait_queue_head_t rq_wait; /* synchronization */
struct task_struct *rq_task; /* service thread */ struct task_struct *rq_task; /* service thread */
int rq_waking; /* 1 if thread is being woken */
}; };
/* /*
......
...@@ -16,8 +16,6 @@ ...@@ -16,8 +16,6 @@
#define RPCDBG_FACILITY RPCDBG_SVCXPRT #define RPCDBG_FACILITY RPCDBG_SVCXPRT
#define SVC_MAX_WAKING 5
static struct svc_deferred_req *svc_deferred_dequeue(struct svc_xprt *xprt); static struct svc_deferred_req *svc_deferred_dequeue(struct svc_xprt *xprt);
static int svc_deferred_recv(struct svc_rqst *rqstp); static int svc_deferred_recv(struct svc_rqst *rqstp);
static struct cache_deferred_req *svc_defer(struct cache_req *req); static struct cache_deferred_req *svc_defer(struct cache_req *req);
...@@ -306,7 +304,6 @@ void svc_xprt_enqueue(struct svc_xprt *xprt) ...@@ -306,7 +304,6 @@ void svc_xprt_enqueue(struct svc_xprt *xprt)
struct svc_pool *pool; struct svc_pool *pool;
struct svc_rqst *rqstp; struct svc_rqst *rqstp;
int cpu; int cpu;
int thread_avail;
if (!(xprt->xpt_flags & if (!(xprt->xpt_flags &
((1<<XPT_CONN)|(1<<XPT_DATA)|(1<<XPT_CLOSE)|(1<<XPT_DEFERRED)))) ((1<<XPT_CONN)|(1<<XPT_DATA)|(1<<XPT_CLOSE)|(1<<XPT_DEFERRED))))
...@@ -318,6 +315,12 @@ void svc_xprt_enqueue(struct svc_xprt *xprt) ...@@ -318,6 +315,12 @@ void svc_xprt_enqueue(struct svc_xprt *xprt)
spin_lock_bh(&pool->sp_lock); spin_lock_bh(&pool->sp_lock);
if (!list_empty(&pool->sp_threads) &&
!list_empty(&pool->sp_sockets))
printk(KERN_ERR
"svc_xprt_enqueue: "
"threads and transports both waiting??\n");
if (test_bit(XPT_DEAD, &xprt->xpt_flags)) { if (test_bit(XPT_DEAD, &xprt->xpt_flags)) {
/* Don't enqueue dead transports */ /* Don't enqueue dead transports */
dprintk("svc: transport %p is dead, not enqueued\n", xprt); dprintk("svc: transport %p is dead, not enqueued\n", xprt);
...@@ -358,15 +361,7 @@ void svc_xprt_enqueue(struct svc_xprt *xprt) ...@@ -358,15 +361,7 @@ void svc_xprt_enqueue(struct svc_xprt *xprt)
} }
process: process:
/* Work out whether threads are available */ if (!list_empty(&pool->sp_threads)) {
thread_avail = !list_empty(&pool->sp_threads); /* threads are asleep */
if (pool->sp_nwaking >= SVC_MAX_WAKING) {
/* too many threads are runnable and trying to wake up */
thread_avail = 0;
pool->sp_stats.overloads_avoided++;
}
if (thread_avail) {
rqstp = list_entry(pool->sp_threads.next, rqstp = list_entry(pool->sp_threads.next,
struct svc_rqst, struct svc_rqst,
rq_list); rq_list);
...@@ -381,8 +376,6 @@ void svc_xprt_enqueue(struct svc_xprt *xprt) ...@@ -381,8 +376,6 @@ void svc_xprt_enqueue(struct svc_xprt *xprt)
svc_xprt_get(xprt); svc_xprt_get(xprt);
rqstp->rq_reserved = serv->sv_max_mesg; rqstp->rq_reserved = serv->sv_max_mesg;
atomic_add(rqstp->rq_reserved, &xprt->xpt_reserved); atomic_add(rqstp->rq_reserved, &xprt->xpt_reserved);
rqstp->rq_waking = 1;
pool->sp_nwaking++;
pool->sp_stats.threads_woken++; pool->sp_stats.threads_woken++;
BUG_ON(xprt->xpt_pool != pool); BUG_ON(xprt->xpt_pool != pool);
wake_up(&rqstp->rq_wait); wake_up(&rqstp->rq_wait);
...@@ -651,11 +644,6 @@ int svc_recv(struct svc_rqst *rqstp, long timeout) ...@@ -651,11 +644,6 @@ int svc_recv(struct svc_rqst *rqstp, long timeout)
return -EINTR; return -EINTR;
spin_lock_bh(&pool->sp_lock); spin_lock_bh(&pool->sp_lock);
if (rqstp->rq_waking) {
rqstp->rq_waking = 0;
pool->sp_nwaking--;
BUG_ON(pool->sp_nwaking < 0);
}
xprt = svc_xprt_dequeue(pool); xprt = svc_xprt_dequeue(pool);
if (xprt) { if (xprt) {
rqstp->rq_xprt = xprt; rqstp->rq_xprt = xprt;
...@@ -1204,16 +1192,15 @@ static int svc_pool_stats_show(struct seq_file *m, void *p) ...@@ -1204,16 +1192,15 @@ static int svc_pool_stats_show(struct seq_file *m, void *p)
struct svc_pool *pool = p; struct svc_pool *pool = p;
if (p == SEQ_START_TOKEN) { if (p == SEQ_START_TOKEN) {
seq_puts(m, "# pool packets-arrived sockets-enqueued threads-woken overloads-avoided threads-timedout\n"); seq_puts(m, "# pool packets-arrived sockets-enqueued threads-woken threads-timedout\n");
return 0; return 0;
} }
seq_printf(m, "%u %lu %lu %lu %lu %lu\n", seq_printf(m, "%u %lu %lu %lu %lu\n",
pool->sp_id, pool->sp_id,
pool->sp_stats.packets, pool->sp_stats.packets,
pool->sp_stats.sockets_queued, pool->sp_stats.sockets_queued,
pool->sp_stats.threads_woken, pool->sp_stats.threads_woken,
pool->sp_stats.overloads_avoided,
pool->sp_stats.threads_timedout); pool->sp_stats.threads_timedout);
return 0; return 0;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment