Commit 4ada539e authored by Trond Myklebust's avatar Trond Myklebust

SUNRPC: Make create_client() take a reference to the rpciod workqueue

Ensures that an rpc_client always has the possibility to send asynchronous
RPC calls.
Signed-off-by: default avatarTrond Myklebust <Trond.Myklebust@netapp.com>
parent ab418d70
......@@ -111,6 +111,9 @@ static struct rpc_clnt * rpc_new_client(struct rpc_xprt *xprt, char *servname, s
dprintk("RPC: creating %s client for %s (xprt %p)\n",
program->name, servname, xprt);
err = rpciod_up();
if (err)
goto out_no_rpciod;
err = -EINVAL;
if (!xprt)
goto out_no_xprt;
......@@ -191,6 +194,8 @@ static struct rpc_clnt * rpc_new_client(struct rpc_xprt *xprt, char *servname, s
out_err:
xprt_put(xprt);
out_no_xprt:
rpciod_down();
out_no_rpciod:
return ERR_PTR(err);
}
......@@ -287,6 +292,7 @@ rpc_clone_client(struct rpc_clnt *clnt)
xprt_get(clnt->cl_xprt);
kref_get(&clnt->cl_kref);
rpc_register_client(new);
rpciod_up();
return new;
out_no_path:
rpc_free_iostats(new->cl_metrics);
......@@ -344,6 +350,7 @@ rpc_free_client(struct kref *kref)
rpc_free_iostats(clnt->cl_metrics);
clnt->cl_metrics = NULL;
xprt_put(clnt->cl_xprt);
rpciod_down();
kfree(clnt);
}
......
......@@ -39,7 +39,6 @@ static mempool_t *rpc_task_mempool __read_mostly;
static mempool_t *rpc_buffer_mempool __read_mostly;
static void __rpc_default_timer(struct rpc_task *task);
static void rpciod_killall(void);
static void rpc_async_schedule(struct work_struct *);
static void rpc_release_task(struct rpc_task *task);
......@@ -52,7 +51,6 @@ static RPC_WAITQ(delay_queue, "delayq");
* All RPC clients are linked into this list
*/
static LIST_HEAD(all_clients);
static DECLARE_WAIT_QUEUE_HEAD(client_kill_wait);
/*
* rpciod-related stuff
......@@ -996,32 +994,6 @@ void rpc_killall_tasks(struct rpc_clnt *clnt)
spin_unlock(&clnt->cl_lock);
}
static void rpciod_killall(void)
{
struct rpc_clnt *clnt;
unsigned long flags;
for(;;) {
clear_thread_flag(TIF_SIGPENDING);
spin_lock(&rpc_sched_lock);
list_for_each_entry(clnt, &all_clients, cl_clients)
rpc_killall_tasks(clnt);
spin_unlock(&rpc_sched_lock);
flush_workqueue(rpciod_workqueue);
if (!list_empty(&all_clients))
break;
dprintk("RPC: rpciod_killall: waiting for tasks "
"to exit\n");
wait_event_timeout(client_kill_wait,
list_empty(&all_clients), 1*HZ);
}
spin_lock_irqsave(&current->sighand->siglock, flags);
recalc_sigpending();
spin_unlock_irqrestore(&current->sighand->siglock, flags);
}
void rpc_register_client(struct rpc_clnt *clnt)
{
spin_lock(&rpc_sched_lock);
......@@ -1033,8 +1005,6 @@ void rpc_unregister_client(struct rpc_clnt *clnt)
{
spin_lock(&rpc_sched_lock);
list_del(&clnt->cl_clients);
if (list_empty(&all_clients))
wake_up(&client_kill_wait);
spin_unlock(&rpc_sched_lock);
}
......@@ -1083,7 +1053,6 @@ rpciod_down(void)
dprintk("RPC: destroying workqueue rpciod\n");
if (atomic_read(&rpciod_users) == 0 && rpciod_workqueue != NULL) {
rpciod_killall();
destroy_workqueue(rpciod_workqueue);
rpciod_workqueue = NULL;
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment