sched.c 31.5 KB
Newer Older
Linus Torvalds's avatar
Linus Torvalds committed
1 2 3 4 5 6
/*
 * linux/net/sunrpc/sched.c
 *
 * Scheduling for synchronous and asynchronous RPC requests.
 *
 * Copyright (C) 1996 Olaf Kirch, <okir@monad.swb.de>
7
 *
Linus Torvalds's avatar
Linus Torvalds committed
8 9 10 11 12 13 14 15 16 17 18 19
 * TCP NFS related read + write fixes
 * (C) 1999 Dave Airlie, University of Limerick, Ireland <airlied@linux.ie>
 */

#include <linux/module.h>

#include <linux/sched.h>
#include <linux/interrupt.h>
#include <linux/slab.h>
#include <linux/mempool.h>
#include <linux/smp.h>
#include <linux/spinlock.h>
Arjan van de Ven's avatar
Arjan van de Ven committed
20
#include <linux/mutex.h>
21
#include <linux/freezer.h>
Linus Torvalds's avatar
Linus Torvalds committed
22 23 24

#include <linux/sunrpc/clnt.h>

25 26
#include "sunrpc.h"

Jeff Layton's avatar
Jeff Layton committed
27
#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
Linus Torvalds's avatar
Linus Torvalds committed
28 29 30
#define RPCDBG_FACILITY		RPCDBG_SCHED
#endif

31 32 33
#define CREATE_TRACE_POINTS
#include <trace/events/sunrpc.h>

Linus Torvalds's avatar
Linus Torvalds committed
34 35 36 37 38 39
/*
 * RPC slabs and memory pools
 */
#define RPC_BUFFER_MAXSIZE	(2048)
#define RPC_BUFFER_POOLSIZE	(8)
#define RPC_TASK_POOLSIZE	(8)
40 41
static struct kmem_cache	*rpc_task_slabp __read_mostly;
static struct kmem_cache	*rpc_buffer_slabp __read_mostly;
42 43
static mempool_t	*rpc_task_mempool __read_mostly;
static mempool_t	*rpc_buffer_mempool __read_mostly;
Linus Torvalds's avatar
Linus Torvalds committed
44

45
static void			rpc_async_schedule(struct work_struct *);
46
static void			 rpc_release_task(struct rpc_task *task);
47
static void __rpc_queue_timer_fn(struct timer_list *t);
Linus Torvalds's avatar
Linus Torvalds committed
48 49 50 51

/*
 * RPC tasks sit here while waiting for conditions to improve.
 */
52
static struct rpc_wait_queue delay_queue;
Linus Torvalds's avatar
Linus Torvalds committed
53 54 55 56

/*
 * rpciod-related stuff
 */
57 58
struct workqueue_struct *rpciod_workqueue __read_mostly;
struct workqueue_struct *xprtiod_workqueue __read_mostly;
Linus Torvalds's avatar
Linus Torvalds committed
59 60 61 62 63 64

/*
 * Disable the timer for a given RPC task. Should be called with
 * queue->lock and bh_disabled in order to avoid races within
 * rpc_run_timer().
 */
65
static void
66
__rpc_disable_timer(struct rpc_wait_queue *queue, struct rpc_task *task)
Linus Torvalds's avatar
Linus Torvalds committed
67
{
68 69
	if (task->tk_timeout == 0)
		return;
70
	dprintk("RPC: %5u disabling timer\n", task->tk_pid);
Linus Torvalds's avatar
Linus Torvalds committed
71
	task->tk_timeout = 0;
72
	list_del(&task->u.tk_wait.timer_list);
73 74
	if (list_empty(&queue->timer_list.list))
		del_timer(&queue->timer_list.timer);
75 76 77 78 79 80 81
}

static void
rpc_set_queue_timer(struct rpc_wait_queue *queue, unsigned long expires)
{
	queue->timer_list.expires = expires;
	mod_timer(&queue->timer_list.timer, expires);
Linus Torvalds's avatar
Linus Torvalds committed
82 83 84 85 86
}

/*
 * Set up a timer for the current task.
 */
87
static void
88
__rpc_add_timer(struct rpc_wait_queue *queue, struct rpc_task *task)
Linus Torvalds's avatar
Linus Torvalds committed
89 90 91 92
{
	if (!task->tk_timeout)
		return;

93 94
	dprintk("RPC: %5u setting alarm for %u ms\n",
		task->tk_pid, jiffies_to_msecs(task->tk_timeout));
Linus Torvalds's avatar
Linus Torvalds committed
95

96 97 98 99
	task->u.tk_wait.expires = jiffies + task->tk_timeout;
	if (list_empty(&queue->timer_list.list) || time_before(task->u.tk_wait.expires, queue->timer_list.expires))
		rpc_set_queue_timer(queue, task->u.tk_wait.expires);
	list_add(&task->u.tk_wait.timer_list, &queue->timer_list.list);
Linus Torvalds's avatar
Linus Torvalds committed
100 101
}

102 103
static void rpc_set_waitqueue_priority(struct rpc_wait_queue *queue, int priority)
{
104 105
	if (queue->priority != priority) {
		queue->priority = priority;
106
		queue->nr = 1U << priority;
107
	}
108 109 110 111 112 113 114
}

static void rpc_reset_waitqueue_priority(struct rpc_wait_queue *queue)
{
	rpc_set_waitqueue_priority(queue, queue->maxpriority);
}

Linus Torvalds's avatar
Linus Torvalds committed
115
/*
116
 * Add a request to a queue list
Linus Torvalds's avatar
Linus Torvalds committed
117
 */
118 119
static void
__rpc_list_enqueue_task(struct list_head *q, struct rpc_task *task)
Linus Torvalds's avatar
Linus Torvalds committed
120 121 122 123
{
	struct rpc_task *t;

	list_for_each_entry(t, q, u.tk_wait.list) {
124
		if (t->tk_owner == task->tk_owner) {
125 126 127 128 129
			list_add_tail(&task->u.tk_wait.links,
					&t->u.tk_wait.links);
			/* Cache the queue head in task->u.tk_wait.list */
			task->u.tk_wait.list.next = q;
			task->u.tk_wait.list.prev = NULL;
Linus Torvalds's avatar
Linus Torvalds committed
130 131 132
			return;
		}
	}
133
	INIT_LIST_HEAD(&task->u.tk_wait.links);
Linus Torvalds's avatar
Linus Torvalds committed
134 135 136
	list_add_tail(&task->u.tk_wait.list, q);
}

137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173
/*
 * Remove request from a queue list
 */
static void
__rpc_list_dequeue_task(struct rpc_task *task)
{
	struct list_head *q;
	struct rpc_task *t;

	if (task->u.tk_wait.list.prev == NULL) {
		list_del(&task->u.tk_wait.links);
		return;
	}
	if (!list_empty(&task->u.tk_wait.links)) {
		t = list_first_entry(&task->u.tk_wait.links,
				struct rpc_task,
				u.tk_wait.links);
		/* Assume __rpc_list_enqueue_task() cached the queue head */
		q = t->u.tk_wait.list.next;
		list_add_tail(&t->u.tk_wait.list, q);
		list_del(&task->u.tk_wait.links);
	}
	list_del(&task->u.tk_wait.list);
}

/*
 * Add new request to a priority queue.
 */
static void __rpc_add_wait_queue_priority(struct rpc_wait_queue *queue,
		struct rpc_task *task,
		unsigned char queue_priority)
{
	if (unlikely(queue_priority > queue->maxpriority))
		queue_priority = queue->maxpriority;
	__rpc_list_enqueue_task(&queue->tasks[queue_priority], task);
}

Linus Torvalds's avatar
Linus Torvalds committed
174 175 176 177 178 179 180 181
/*
 * Add new request to wait queue.
 *
 * Swapper tasks always get inserted at the head of the queue.
 * This should avoid many nasty memory deadlocks and hopefully
 * improve overall performance.
 * Everyone else gets appended to the queue to ensure proper FIFO behavior.
 */
182 183 184
static void __rpc_add_wait_queue(struct rpc_wait_queue *queue,
		struct rpc_task *task,
		unsigned char queue_priority)
Linus Torvalds's avatar
Linus Torvalds committed
185
{
186 187 188
	WARN_ON_ONCE(RPC_IS_QUEUED(task));
	if (RPC_IS_QUEUED(task))
		return;
Linus Torvalds's avatar
Linus Torvalds committed
189 190

	if (RPC_IS_PRIORITY(queue))
191
		__rpc_add_wait_queue_priority(queue, task, queue_priority);
Linus Torvalds's avatar
Linus Torvalds committed
192 193 194 195
	else if (RPC_IS_SWAPPER(task))
		list_add(&task->u.tk_wait.list, &queue->tasks[0]);
	else
		list_add_tail(&task->u.tk_wait.list, &queue->tasks[0]);
196
	task->tk_waitqueue = queue;
197
	queue->qlen++;
198 199
	/* barrier matches the read in rpc_wake_up_task_queue_locked() */
	smp_wmb();
Linus Torvalds's avatar
Linus Torvalds committed
200 201
	rpc_set_queued(task);

202 203
	dprintk("RPC: %5u added to queue %p \"%s\"\n",
			task->tk_pid, queue, rpc_qname(queue));
Linus Torvalds's avatar
Linus Torvalds committed
204 205 206 207 208 209 210
}

/*
 * Remove request from a priority queue.
 */
static void __rpc_remove_wait_queue_priority(struct rpc_task *task)
{
211
	__rpc_list_dequeue_task(task);
Linus Torvalds's avatar
Linus Torvalds committed
212 213 214 215 216 217
}

/*
 * Remove request from queue.
 * Note: must be called with spin lock held.
 */
218
static void __rpc_remove_wait_queue(struct rpc_wait_queue *queue, struct rpc_task *task)
Linus Torvalds's avatar
Linus Torvalds committed
219
{
220
	__rpc_disable_timer(queue, task);
Linus Torvalds's avatar
Linus Torvalds committed
221 222
	if (RPC_IS_PRIORITY(queue))
		__rpc_remove_wait_queue_priority(task);
223 224
	else
		list_del(&task->u.tk_wait.list);
225
	queue->qlen--;
226 227
	dprintk("RPC: %5u removed from queue %p \"%s\"\n",
			task->tk_pid, queue, rpc_qname(queue));
Linus Torvalds's avatar
Linus Torvalds committed
228 229
}

230
static void __rpc_init_priority_wait_queue(struct rpc_wait_queue *queue, const char *qname, unsigned char nr_queues)
Linus Torvalds's avatar
Linus Torvalds committed
231 232 233 234 235 236
{
	int i;

	spin_lock_init(&queue->lock);
	for (i = 0; i < ARRAY_SIZE(queue->tasks); i++)
		INIT_LIST_HEAD(&queue->tasks[i]);
237
	queue->maxpriority = nr_queues - 1;
Linus Torvalds's avatar
Linus Torvalds committed
238
	rpc_reset_waitqueue_priority(queue);
239
	queue->qlen = 0;
240
	timer_setup(&queue->timer_list.timer, __rpc_queue_timer_fn, 0);
241
	INIT_LIST_HEAD(&queue->timer_list.list);
242
	rpc_assign_waitqueue_name(queue, qname);
Linus Torvalds's avatar
Linus Torvalds committed
243 244 245 246
}

void rpc_init_priority_wait_queue(struct rpc_wait_queue *queue, const char *qname)
{
247
	__rpc_init_priority_wait_queue(queue, qname, RPC_NR_PRIORITY);
Linus Torvalds's avatar
Linus Torvalds committed
248
}
249
EXPORT_SYMBOL_GPL(rpc_init_priority_wait_queue);
Linus Torvalds's avatar
Linus Torvalds committed
250 251 252

void rpc_init_wait_queue(struct rpc_wait_queue *queue, const char *qname)
{
253
	__rpc_init_priority_wait_queue(queue, qname, 1);
Linus Torvalds's avatar
Linus Torvalds committed
254
}
255
EXPORT_SYMBOL_GPL(rpc_init_wait_queue);
Linus Torvalds's avatar
Linus Torvalds committed
256

257 258
void rpc_destroy_wait_queue(struct rpc_wait_queue *queue)
{
259
	del_timer_sync(&queue->timer_list.timer);
260 261 262
}
EXPORT_SYMBOL_GPL(rpc_destroy_wait_queue);

263
static int rpc_wait_bit_killable(struct wait_bit_key *key, int mode)
264
{
265
	freezable_schedule_unsafe();
266 267
	if (signal_pending_state(mode, current))
		return -ERESTARTSYS;
268 269 270
	return 0;
}

271
#if IS_ENABLED(CONFIG_SUNRPC_DEBUG) || IS_ENABLED(CONFIG_TRACEPOINTS)
272 273 274 275 276 277 278 279 280 281 282 283
static void rpc_task_set_debuginfo(struct rpc_task *task)
{
	static atomic_t rpc_pid;

	task->tk_pid = atomic_inc_return(&rpc_pid);
}
#else
static inline void rpc_task_set_debuginfo(struct rpc_task *task)
{
}
#endif

284 285
static void rpc_set_active(struct rpc_task *task)
{
286
	rpc_task_set_debuginfo(task);
287
	set_bit(RPC_TASK_ACTIVE, &task->tk_runstate);
288
	trace_rpc_task_begin(task, NULL);
289 290
}

291 292
/*
 * Mark an RPC call as having completed by clearing the 'active' bit
293
 * and then waking up all tasks that were sleeping.
294
 */
295
static int rpc_complete_task(struct rpc_task *task)
296
{
297 298 299 300 301 302
	void *m = &task->tk_runstate;
	wait_queue_head_t *wq = bit_waitqueue(m, RPC_TASK_ACTIVE);
	struct wait_bit_key k = __WAIT_BIT_KEY_INITIALIZER(m, RPC_TASK_ACTIVE);
	unsigned long flags;
	int ret;

303
	trace_rpc_task_complete(task, NULL);
304

305
	spin_lock_irqsave(&wq->lock, flags);
306
	clear_bit(RPC_TASK_ACTIVE, &task->tk_runstate);
307 308
	ret = atomic_dec_and_test(&task->tk_count);
	if (waitqueue_active(wq))
309
		__wake_up_locked_key(wq, TASK_NORMAL, &k);
310 311
	spin_unlock_irqrestore(&wq->lock, flags);
	return ret;
312 313 314 315
}

/*
 * Allow callers to wait for completion of an RPC call
316 317 318 319
 *
 * Note the use of out_of_line_wait_on_bit() rather than wait_on_bit()
 * to enforce taking of the wq->lock and hence avoid races with
 * rpc_complete_task().
320
 */
321
int __rpc_wait_for_completion_task(struct rpc_task *task, wait_bit_action_f *action)
322 323
{
	if (action == NULL)
324
		action = rpc_wait_bit_killable;
325
	return out_of_line_wait_on_bit(&task->tk_runstate, RPC_TASK_ACTIVE,
326
			action, TASK_KILLABLE);
327
}
328
EXPORT_SYMBOL_GPL(__rpc_wait_for_completion_task);
329

Linus Torvalds's avatar
Linus Torvalds committed
330 331 332
/*
 * Make an RPC task runnable.
 *
333 334 335
 * Note: If the task is ASYNC, and is being made runnable after sitting on an
 * rpc_wait_queue, this must be called with the queue spinlock held to protect
 * the wait queue operation.
336 337 338 339
 * Note the ordering of rpc_test_and_set_running() and rpc_clear_queued(),
 * which is needed to ensure that __rpc_execute() doesn't loop (due to the
 * lockless RPC_IS_QUEUED() test) before we've had a chance to test
 * the RPC_TASK_RUNNING flag.
Linus Torvalds's avatar
Linus Torvalds committed
340
 */
341 342
static void rpc_make_runnable(struct workqueue_struct *wq,
		struct rpc_task *task)
Linus Torvalds's avatar
Linus Torvalds committed
343
{
344 345
	bool need_wakeup = !rpc_test_and_set_running(task);

Linus Torvalds's avatar
Linus Torvalds committed
346
	rpc_clear_queued(task);
347
	if (!need_wakeup)
348
		return;
Linus Torvalds's avatar
Linus Torvalds committed
349
	if (RPC_IS_ASYNC(task)) {
350
		INIT_WORK(&task->u.tk_work, rpc_async_schedule);
351
		queue_work(wq, &task->u.tk_work);
Linus Torvalds's avatar
Linus Torvalds committed
352
	} else
353
		wake_up_bit(&task->tk_runstate, RPC_TASK_QUEUED);
Linus Torvalds's avatar
Linus Torvalds committed
354 355 356 357 358 359 360 361
}

/*
 * Prepare for sleeping on a wait queue.
 * By always appending tasks to the list we ensure FIFO behavior.
 * NB: An RPC task will only receive interrupt-driven events as long
 * as it's on a wait queue.
 */
362 363 364 365
static void __rpc_sleep_on_priority(struct rpc_wait_queue *q,
		struct rpc_task *task,
		rpc_action action,
		unsigned char queue_priority)
Linus Torvalds's avatar
Linus Torvalds committed
366
{
367 368
	dprintk("RPC: %5u sleep_on(queue \"%s\" time %lu)\n",
			task->tk_pid, rpc_qname(q), jiffies);
Linus Torvalds's avatar
Linus Torvalds committed
369

370
	trace_rpc_task_sleep(task, q);
371

372
	__rpc_add_wait_queue(q, task, queue_priority);
Linus Torvalds's avatar
Linus Torvalds committed
373

374
	WARN_ON_ONCE(task->tk_callback != NULL);
Linus Torvalds's avatar
Linus Torvalds committed
375
	task->tk_callback = action;
376
	__rpc_add_timer(q, task);
Linus Torvalds's avatar
Linus Torvalds committed
377 378 379
}

void rpc_sleep_on(struct rpc_wait_queue *q, struct rpc_task *task,
380
				rpc_action action)
Linus Torvalds's avatar
Linus Torvalds committed
381
{
382
	/* We shouldn't ever put an inactive task to sleep */
383 384 385 386 387 388
	WARN_ON_ONCE(!RPC_IS_ACTIVATED(task));
	if (!RPC_IS_ACTIVATED(task)) {
		task->tk_status = -EIO;
		rpc_put_task_async(task);
		return;
	}
389

Linus Torvalds's avatar
Linus Torvalds committed
390 391 392 393
	/*
	 * Protect the queue operations.
	 */
	spin_lock_bh(&q->lock);
394
	__rpc_sleep_on_priority(q, task, action, task->tk_priority);
Linus Torvalds's avatar
Linus Torvalds committed
395 396
	spin_unlock_bh(&q->lock);
}
397
EXPORT_SYMBOL_GPL(rpc_sleep_on);
Linus Torvalds's avatar
Linus Torvalds committed
398

399 400 401 402
void rpc_sleep_on_priority(struct rpc_wait_queue *q, struct rpc_task *task,
		rpc_action action, int priority)
{
	/* We shouldn't ever put an inactive task to sleep */
403 404 405 406 407 408
	WARN_ON_ONCE(!RPC_IS_ACTIVATED(task));
	if (!RPC_IS_ACTIVATED(task)) {
		task->tk_status = -EIO;
		rpc_put_task_async(task);
		return;
	}
409 410 411 412 413 414 415 416

	/*
	 * Protect the queue operations.
	 */
	spin_lock_bh(&q->lock);
	__rpc_sleep_on_priority(q, task, action, priority - RPC_PRIORITY_LOW);
	spin_unlock_bh(&q->lock);
}
417
EXPORT_SYMBOL_GPL(rpc_sleep_on_priority);
418

Linus Torvalds's avatar
Linus Torvalds committed
419
/**
420 421
 * __rpc_do_wake_up_task_on_wq - wake up a single rpc_task
 * @wq: workqueue on which to run task
422
 * @queue: wait queue
Linus Torvalds's avatar
Linus Torvalds committed
423 424 425 426
 * @task: task to be woken up
 *
 * Caller must hold queue->lock, and have cleared the task queued flag.
 */
427 428 429
static void __rpc_do_wake_up_task_on_wq(struct workqueue_struct *wq,
		struct rpc_wait_queue *queue,
		struct rpc_task *task)
Linus Torvalds's avatar
Linus Torvalds committed
430
{
431 432
	dprintk("RPC: %5u __rpc_wake_up_task (now %lu)\n",
			task->tk_pid, jiffies);
Linus Torvalds's avatar
Linus Torvalds committed
433 434 435 436 437 438 439

	/* Has the task been executed yet? If not, we cannot wake it up! */
	if (!RPC_IS_ACTIVATED(task)) {
		printk(KERN_ERR "RPC: Inactive task (%p) being woken up!\n", task);
		return;
	}

440
	trace_rpc_task_wakeup(task, queue);
441

442
	__rpc_remove_wait_queue(queue, task);
Linus Torvalds's avatar
Linus Torvalds committed
443

444
	rpc_make_runnable(wq, task);
Linus Torvalds's avatar
Linus Torvalds committed
445

446
	dprintk("RPC:       __rpc_wake_up_task done\n");
Linus Torvalds's avatar
Linus Torvalds committed
447 448 449
}

/*
450
 * Wake up a queued task while the queue lock is being held
Linus Torvalds's avatar
Linus Torvalds committed
451
 */
452 453 454 455
static struct rpc_task *
rpc_wake_up_task_on_wq_queue_action_locked(struct workqueue_struct *wq,
		struct rpc_wait_queue *queue, struct rpc_task *task,
		bool (*action)(struct rpc_task *, void *), void *data)
Linus Torvalds's avatar
Linus Torvalds committed
456
{
457 458
	if (RPC_IS_QUEUED(task)) {
		smp_rmb();
459 460 461 462 463 464
		if (task->tk_waitqueue == queue) {
			if (action == NULL || action(task, data)) {
				__rpc_do_wake_up_task_on_wq(wq, queue, task);
				return task;
			}
		}
465
	}
466 467 468 469 470 471 472 473
	return NULL;
}

static void
rpc_wake_up_task_on_wq_queue_locked(struct workqueue_struct *wq,
		struct rpc_wait_queue *queue, struct rpc_task *task)
{
	rpc_wake_up_task_on_wq_queue_action_locked(wq, queue, task, NULL, NULL);
Linus Torvalds's avatar
Linus Torvalds committed
474 475
}

476 477 478 479 480 481 482 483
/*
 * Wake up a queued task while the queue lock is being held
 */
static void rpc_wake_up_task_queue_locked(struct rpc_wait_queue *queue, struct rpc_task *task)
{
	rpc_wake_up_task_on_wq_queue_locked(rpciod_workqueue, queue, task);
}

484 485 486 487 488 489 490
/*
 * Wake up a task on a specific queue
 */
void rpc_wake_up_queued_task_on_wq(struct workqueue_struct *wq,
		struct rpc_wait_queue *queue,
		struct rpc_task *task)
{
491 492
	if (!RPC_IS_QUEUED(task))
		return;
493 494 495 496 497
	spin_lock_bh(&queue->lock);
	rpc_wake_up_task_on_wq_queue_locked(wq, queue, task);
	spin_unlock_bh(&queue->lock);
}

Linus Torvalds's avatar
Linus Torvalds committed
498
/*
499
 * Wake up a task on a specific queue
Linus Torvalds's avatar
Linus Torvalds committed
500
 */
501
void rpc_wake_up_queued_task(struct rpc_wait_queue *queue, struct rpc_task *task)
Linus Torvalds's avatar
Linus Torvalds committed
502
{
503 504
	if (!RPC_IS_QUEUED(task))
		return;
505
	spin_lock_bh(&queue->lock);
506
	rpc_wake_up_task_queue_locked(queue, task);
507
	spin_unlock_bh(&queue->lock);
Linus Torvalds's avatar
Linus Torvalds committed
508
}
509 510
EXPORT_SYMBOL_GPL(rpc_wake_up_queued_task);

511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544
static bool rpc_task_action_set_status(struct rpc_task *task, void *status)
{
	task->tk_status = *(int *)status;
	return true;
}

static void
rpc_wake_up_task_queue_set_status_locked(struct rpc_wait_queue *queue,
		struct rpc_task *task, int status)
{
	rpc_wake_up_task_on_wq_queue_action_locked(rpciod_workqueue, queue,
			task, rpc_task_action_set_status, &status);
}

/**
 * rpc_wake_up_queued_task_set_status - wake up a task and set task->tk_status
 * @queue: pointer to rpc_wait_queue
 * @task: pointer to rpc_task
 * @status: integer error value
 *
 * If @task is queued on @queue, then it is woken up, and @task->tk_status is
 * set to the value of @status.
 */
void
rpc_wake_up_queued_task_set_status(struct rpc_wait_queue *queue,
		struct rpc_task *task, int status)
{
	if (!RPC_IS_QUEUED(task))
		return;
	spin_lock_bh(&queue->lock);
	rpc_wake_up_task_queue_set_status_locked(queue, task, status);
	spin_unlock_bh(&queue->lock);
}

Linus Torvalds's avatar
Linus Torvalds committed
545 546 547
/*
 * Wake up the next task on a priority queue.
 */
548
static struct rpc_task *__rpc_find_next_queued_priority(struct rpc_wait_queue *queue)
Linus Torvalds's avatar
Linus Torvalds committed
549 550 551 552 553
{
	struct list_head *q;
	struct rpc_task *task;

	/*
554
	 * Service a batch of tasks from a single owner.
Linus Torvalds's avatar
Linus Torvalds committed
555 556
	 */
	q = &queue->tasks[queue->priority];
557 558 559
	if (!list_empty(q) && --queue->nr) {
		task = list_first_entry(q, struct rpc_task, u.tk_wait.list);
		goto out;
Linus Torvalds's avatar
Linus Torvalds committed
560 561 562 563 564 565 566 567 568 569 570
	}

	/*
	 * Service the next queue.
	 */
	do {
		if (q == &queue->tasks[0])
			q = &queue->tasks[queue->maxpriority];
		else
			q = q - 1;
		if (!list_empty(q)) {
571
			task = list_first_entry(q, struct rpc_task, u.tk_wait.list);
Linus Torvalds's avatar
Linus Torvalds committed
572 573 574 575 576 577 578 579 580 581 582 583 584
			goto new_queue;
		}
	} while (q != &queue->tasks[queue->priority]);

	rpc_reset_waitqueue_priority(queue);
	return NULL;

new_queue:
	rpc_set_waitqueue_priority(queue, (unsigned int)(q - &queue->tasks[0]));
out:
	return task;
}

585 586 587 588 589 590 591 592 593
static struct rpc_task *__rpc_find_next_queued(struct rpc_wait_queue *queue)
{
	if (RPC_IS_PRIORITY(queue))
		return __rpc_find_next_queued_priority(queue);
	if (!list_empty(&queue->tasks[0]))
		return list_first_entry(&queue->tasks[0], struct rpc_task, u.tk_wait.list);
	return NULL;
}

Linus Torvalds's avatar
Linus Torvalds committed
594
/*
595
 * Wake up the first task on the wait queue.
Linus Torvalds's avatar
Linus Torvalds committed
596
 */
597 598
struct rpc_task *rpc_wake_up_first_on_wq(struct workqueue_struct *wq,
		struct rpc_wait_queue *queue,
599
		bool (*func)(struct rpc_task *, void *), void *data)
Linus Torvalds's avatar
Linus Torvalds committed
600 601 602
{
	struct rpc_task	*task = NULL;

603
	dprintk("RPC:       wake_up_first(%p \"%s\")\n",
604
			queue, rpc_qname(queue));
605
	spin_lock_bh(&queue->lock);
606
	task = __rpc_find_next_queued(queue);
607 608 609
	if (task != NULL)
		task = rpc_wake_up_task_on_wq_queue_action_locked(wq, queue,
				task, func, data);
610
	spin_unlock_bh(&queue->lock);
Linus Torvalds's avatar
Linus Torvalds committed
611 612 613

	return task;
}
614 615 616 617 618 619 620 621 622

/*
 * Wake up the first task on the wait queue.
 */
struct rpc_task *rpc_wake_up_first(struct rpc_wait_queue *queue,
		bool (*func)(struct rpc_task *, void *), void *data)
{
	return rpc_wake_up_first_on_wq(rpciod_workqueue, queue, func, data);
}
623 624 625 626 627 628 629 630 631 632 633 634 635 636
EXPORT_SYMBOL_GPL(rpc_wake_up_first);

static bool rpc_wake_up_next_func(struct rpc_task *task, void *data)
{
	return true;
}

/*
 * Wake up the next task on the wait queue.
*/
struct rpc_task *rpc_wake_up_next(struct rpc_wait_queue *queue)
{
	return rpc_wake_up_first(queue, rpc_wake_up_next_func, NULL);
}
637
EXPORT_SYMBOL_GPL(rpc_wake_up_next);
Linus Torvalds's avatar
Linus Torvalds committed
638 639 640 641 642 643 644 645 646 647

/**
 * rpc_wake_up - wake up all rpc_tasks
 * @queue: rpc_wait_queue on which the tasks are sleeping
 *
 * Grabs queue->lock
 */
void rpc_wake_up(struct rpc_wait_queue *queue)
{
	struct list_head *head;
648

649
	spin_lock_bh(&queue->lock);
Linus Torvalds's avatar
Linus Torvalds committed
650 651
	head = &queue->tasks[queue->maxpriority];
	for (;;) {
652 653 654 655 656
		while (!list_empty(head)) {
			struct rpc_task *task;
			task = list_first_entry(head,
					struct rpc_task,
					u.tk_wait.list);
657
			rpc_wake_up_task_queue_locked(queue, task);
658
		}
Linus Torvalds's avatar
Linus Torvalds committed
659 660 661 662
		if (head == &queue->tasks[0])
			break;
		head--;
	}
663
	spin_unlock_bh(&queue->lock);
Linus Torvalds's avatar
Linus Torvalds committed
664
}
665
EXPORT_SYMBOL_GPL(rpc_wake_up);
Linus Torvalds's avatar
Linus Torvalds committed
666 667 668 669 670 671 672 673 674 675 676 677

/**
 * rpc_wake_up_status - wake up all rpc_tasks and set their status value.
 * @queue: rpc_wait_queue on which the tasks are sleeping
 * @status: status value to set
 *
 * Grabs queue->lock
 */
void rpc_wake_up_status(struct rpc_wait_queue *queue, int status)
{
	struct list_head *head;

678
	spin_lock_bh(&queue->lock);
Linus Torvalds's avatar
Linus Torvalds committed
679 680
	head = &queue->tasks[queue->maxpriority];
	for (;;) {
681 682 683 684 685
		while (!list_empty(head)) {
			struct rpc_task *task;
			task = list_first_entry(head,
					struct rpc_task,
					u.tk_wait.list);
Linus Torvalds's avatar
Linus Torvalds committed
686
			task->tk_status = status;
687
			rpc_wake_up_task_queue_locked(queue, task);
Linus Torvalds's avatar
Linus Torvalds committed
688 689 690 691 692
		}
		if (head == &queue->tasks[0])
			break;
		head--;
	}
693
	spin_unlock_bh(&queue->lock);
Linus Torvalds's avatar
Linus Torvalds committed
694
}
695
EXPORT_SYMBOL_GPL(rpc_wake_up_status);
Linus Torvalds's avatar
Linus Torvalds committed
696

697
static void __rpc_queue_timer_fn(struct timer_list *t)
698
{
699
	struct rpc_wait_queue *queue = from_timer(queue, t, timer_list.timer);
700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720
	struct rpc_task *task, *n;
	unsigned long expires, now, timeo;

	spin_lock(&queue->lock);
	expires = now = jiffies;
	list_for_each_entry_safe(task, n, &queue->timer_list.list, u.tk_wait.timer_list) {
		timeo = task->u.tk_wait.expires;
		if (time_after_eq(now, timeo)) {
			dprintk("RPC: %5u timeout\n", task->tk_pid);
			task->tk_status = -ETIMEDOUT;
			rpc_wake_up_task_queue_locked(queue, task);
			continue;
		}
		if (expires == now || time_after(expires, timeo))
			expires = timeo;
	}
	if (!list_empty(&queue->timer_list.list))
		rpc_set_queue_timer(queue, expires);
	spin_unlock(&queue->lock);
}

721 722
static void __rpc_atrun(struct rpc_task *task)
{
723 724
	if (task->tk_status == -ETIMEDOUT)
		task->tk_status = 0;
725 726
}

Linus Torvalds's avatar
Linus Torvalds committed
727 728 729
/*
 * Run a task at a later time
 */
730
void rpc_delay(struct rpc_task *task, unsigned long delay)
Linus Torvalds's avatar
Linus Torvalds committed
731 732
{
	task->tk_timeout = delay;
733
	rpc_sleep_on(&delay_queue, task, __rpc_atrun);
Linus Torvalds's avatar
Linus Torvalds committed
734
}
735
EXPORT_SYMBOL_GPL(rpc_delay);
Linus Torvalds's avatar
Linus Torvalds committed
736

Trond Myklebust's avatar
Trond Myklebust committed
737 738 739
/*
 * Helper to call task->tk_ops->rpc_call_prepare
 */
740
void rpc_prepare_task(struct rpc_task *task)
Trond Myklebust's avatar
Trond Myklebust committed
741 742 743 744
{
	task->tk_ops->rpc_call_prepare(task, task->tk_calldata);
}

745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765
static void
rpc_init_task_statistics(struct rpc_task *task)
{
	/* Initialize retry counters */
	task->tk_garb_retry = 2;
	task->tk_cred_retry = 2;
	task->tk_rebind_retry = 2;

	/* starting timestamp */
	task->tk_start = ktime_get();
}

static void
rpc_reset_task_statistics(struct rpc_task *task)
{
	task->tk_timeouts = 0;
	task->tk_flags &= ~(RPC_CALL_MAJORSEEN|RPC_TASK_KILLED|RPC_TASK_SENT);

	rpc_init_task_statistics(task);
}

766
/*
767
 * Helper that calls task->tk_ops->rpc_call_done if it exists
768
 */
769
void rpc_exit_task(struct rpc_task *task)
770
{
771
	task->tk_action = NULL;
772 773
	if (task->tk_ops->rpc_call_done != NULL) {
		task->tk_ops->rpc_call_done(task, task->tk_calldata);
774
		if (task->tk_action != NULL) {
775 776 777
			WARN_ON(RPC_ASSASSINATED(task));
			/* Always release the RPC slot and buffer memory */
			xprt_release(task);
778
			rpc_reset_task_statistics(task);
779 780 781
		}
	}
}
782 783 784 785 786 787 788 789 790

void rpc_exit(struct rpc_task *task, int status)
{
	task->tk_status = status;
	task->tk_action = rpc_exit_task;
	if (RPC_IS_QUEUED(task))
		rpc_wake_up_queued_task(task->tk_waitqueue, task);
}
EXPORT_SYMBOL_GPL(rpc_exit);
791

792 793
void rpc_release_calldata(const struct rpc_call_ops *ops, void *calldata)
{
794
	if (ops->rpc_release != NULL)
795 796 797
		ops->rpc_release(calldata);
}

Linus Torvalds's avatar
Linus Torvalds committed
798 799 800
/*
 * This is the RPC `scheduler' (or rather, the finite state machine).
 */
801
static void __rpc_execute(struct rpc_task *task)
Linus Torvalds's avatar
Linus Torvalds committed
802
{
803 804 805
	struct rpc_wait_queue *queue;
	int task_is_async = RPC_IS_ASYNC(task);
	int status = 0;
Linus Torvalds's avatar
Linus Torvalds committed
806

807 808
	dprintk("RPC: %5u __rpc_execute flags=0x%x\n",
			task->tk_pid, task->tk_flags);
Linus Torvalds's avatar
Linus Torvalds committed
809

810 811 812
	WARN_ON_ONCE(RPC_IS_QUEUED(task));
	if (RPC_IS_QUEUED(task))
		return;
Linus Torvalds's avatar
Linus Torvalds committed
813

814
	for (;;) {
815
		void (*do_action)(struct rpc_task *);
Linus Torvalds's avatar
Linus Torvalds committed
816 817

		/*
818 819 820 821 822
		 * Perform the next FSM step or a pending callback.
		 *
		 * tk_action may be NULL if the task has been killed.
		 * In particular, note that rpc_killall_tasks may
		 * do this at any time, so beware when dereferencing.
Linus Torvalds's avatar
Linus Torvalds committed
823
		 */
824 825 826 827
		do_action = task->tk_action;
		if (task->tk_callback) {
			do_action = task->tk_callback;
			task->tk_callback = NULL;
Linus Torvalds's avatar
Linus Torvalds committed
828
		}
829 830
		if (!do_action)
			break;
831
		trace_rpc_task_run_action(task, do_action);
832
		do_action(task);
Linus Torvalds's avatar
Linus Torvalds committed
833 834 835 836 837 838

		/*
		 * Lockless check for whether task is sleeping or not.
		 */
		if (!RPC_IS_QUEUED(task))
			continue;
839 840 841 842 843 844 845 846 847 848 849 850 851
		/*
		 * The queue->lock protects against races with
		 * rpc_make_runnable().
		 *
		 * Note that once we clear RPC_TASK_RUNNING on an asynchronous
		 * rpc_task, rpc_make_runnable() can assign it to a
		 * different workqueue. We therefore cannot assume that the
		 * rpc_task pointer may still be dereferenced.
		 */
		queue = task->tk_waitqueue;
		spin_lock_bh(&queue->lock);
		if (!RPC_IS_QUEUED(task)) {
			spin_unlock_bh(&queue->lock);
Linus Torvalds's avatar
Linus Torvalds committed
852 853
			continue;
		}
854 855 856 857
		rpc_clear_running(task);
		spin_unlock_bh(&queue->lock);
		if (task_is_async)
			return;
Linus Torvalds's avatar
Linus Torvalds committed
858 859

		/* sync task: sleep here */
860
		dprintk("RPC: %5u sync task going to sleep\n", task->tk_pid);
861
		status = out_of_line_wait_on_bit(&task->tk_runstate,
862 863
				RPC_TASK_QUEUED, rpc_wait_bit_killable,
				TASK_KILLABLE);
864
		if (status == -ERESTARTSYS) {
Linus Torvalds's avatar
Linus Torvalds committed
865 866 867 868 869 870
			/*
			 * When a sync task receives a signal, it exits with
			 * -ERESTARTSYS. In order to catch any callbacks that
			 * clean up after sleeping on some queue, we don't
			 * break the loop here, but go around once more.
			 */
871
			dprintk("RPC: %5u got signal\n", task->tk_pid);
872 873
			task->tk_flags |= RPC_TASK_KILLED;
			rpc_exit(task, -ERESTARTSYS);
Linus Torvalds's avatar
Linus Torvalds committed
874
		}
875
		dprintk("RPC: %5u sync task resuming\n", task->tk_pid);
Linus Torvalds's avatar
Linus Torvalds committed
876 877
	}

878 879
	dprintk("RPC: %5u return %d, status %d\n", task->tk_pid, status,
			task->tk_status);
Linus Torvalds's avatar
Linus Torvalds committed
880 881 882 883 884 885 886 887 888 889 890 891 892
	/* Release all resources associated with the task */
	rpc_release_task(task);
}

/*
 * User-visible entry point to the scheduler.
 *
 * This may be called recursively if e.g. an async NFS task updates
 * the attributes and finds that dirty pages must be flushed.
 * NOTE: Upon exit of this function the task is guaranteed to be
 *	 released. In particular note that tk_release() will have
 *	 been called, so your task memory may have been freed.
 */
893
void rpc_execute(struct rpc_task *task)
Linus Torvalds's avatar
Linus Torvalds committed
894
{
895 896
	bool is_async = RPC_IS_ASYNC(task);

897
	rpc_set_active(task);
898
	rpc_make_runnable(rpciod_workqueue, task);
899
	if (!is_async)
900
		__rpc_execute(task);
Linus Torvalds's avatar
Linus Torvalds committed
901 902
}

903
static void rpc_async_schedule(struct work_struct *work)
Linus Torvalds's avatar
Linus Torvalds committed
904
{
905
	__rpc_execute(container_of(work, struct rpc_task, u.tk_work));
Linus Torvalds's avatar
Linus Torvalds committed
906 907
}

908
/**
909 910 911 912 913 914
 * rpc_malloc - allocate RPC buffer resources
 * @task: RPC task
 *
 * A single memory region is allocated, which is split between the
 * RPC call and RPC reply that this task is being used for. When
 * this RPC is retired, the memory is released by calling rpc_free.
Linus Torvalds's avatar
Linus Torvalds committed
915
 *
916
 * To prevent rpciod from hanging, this allocator never sleeps,
917 918 919
 * returning -ENOMEM and suppressing warning if the request cannot
 * be serviced immediately. The caller can arrange to sleep in a
 * way that is safe for rpciod.
920 921 922 923 924
 *
 * Most requests are 'small' (under 2KiB) and can be serviced from a
 * mempool, ensuring that NFS reads and writes can always proceed,
 * and that there is good locality of reference for these buffers.
 *
Linus Torvalds's avatar
Linus Torvalds committed
925
 * In order to avoid memory starvation triggering more writebacks of
926
 * NFS requests, we avoid using GFP_KERNEL.
Linus Torvalds's avatar
Linus Torvalds committed
927
 */
928
int rpc_malloc(struct rpc_task *task)
Linus Torvalds's avatar
Linus Torvalds committed
929
{
930 931
	struct rpc_rqst *rqst = task->tk_rqstp;
	size_t size = rqst->rq_callsize + rqst->rq_rcvsize;
932
	struct rpc_buffer *buf;
933
	gfp_t gfp = GFP_NOIO | __GFP_NOWARN;
Mel Gorman's avatar
Mel Gorman committed
934 935

	if (RPC_IS_SWAPPER(task))
936
		gfp = __GFP_MEMALLOC | GFP_NOWAIT | __GFP_NOWARN;
Linus Torvalds's avatar
Linus Torvalds committed
937

938
	size += sizeof(struct rpc_buffer);
939 940
	if (size <= RPC_BUFFER_MAXSIZE)
		buf = mempool_alloc(rpc_buffer_mempool, gfp);
Linus Torvalds's avatar
Linus Torvalds committed
941
	else
942
		buf = kmalloc(size, gfp);
943 944

	if (!buf)
945
		return -ENOMEM;
946

947
	buf->len = size;
948
	dprintk("RPC: %5u allocated buffer of size %zu at %p\n",
949
			task->tk_pid, size, buf);
950
	rqst->rq_buffer = buf->data;
951
	rqst->rq_rbuffer = (char *)rqst->rq_buffer + rqst->rq_callsize;
952
	return 0;
Linus Torvalds's avatar
Linus Torvalds committed
953
}
954
EXPORT_SYMBOL_GPL(rpc_malloc);
Linus Torvalds's avatar
Linus Torvalds committed
955

956
/**
957 958
 * rpc_free - free RPC buffer resources allocated via rpc_malloc
 * @task: RPC task
959 960
 *
 */
961
void rpc_free(struct rpc_task *task)
Linus Torvalds's avatar
Linus Torvalds committed
962
{
963
	void *buffer = task->tk_rqstp->rq_buffer;
964 965
	size_t size;
	struct rpc_buffer *buf;
966

967 968
	buf = container_of(buffer, struct rpc_buffer, data);
	size = buf->len;
969

970
	dprintk("RPC:       freeing buffer of size %zu at %p\n",
971
			size, buf);
972

973 974 975 976
	if (size <= RPC_BUFFER_MAXSIZE)
		mempool_free(buf, rpc_buffer_mempool);
	else
		kfree(buf);
Linus Torvalds's avatar
Linus Torvalds committed
977
}
978
EXPORT_SYMBOL_GPL(rpc_free);
Linus Torvalds's avatar
Linus Torvalds committed
979 980 981 982

/*
 * Creation and deletion of RPC task structures
 */
983
static void rpc_init_task(struct rpc_task *task, const struct rpc_task_setup *task_setup_data)
Linus Torvalds's avatar
Linus Torvalds committed
984 985
{
	memset(task, 0, sizeof(*task));
986
	atomic_set(&task->tk_count, 1);
987 988 989
	task->tk_flags  = task_setup_data->flags;
	task->tk_ops = task_setup_data->callback_ops;
	task->tk_calldata = task_setup_data->callback_data;
990
	INIT_LIST_HEAD(&task->tk_task);
Linus Torvalds's avatar
Linus Torvalds committed
991

992 993
	task->tk_priority = task_setup_data->priority - RPC_PRIORITY_LOW;
	task->tk_owner = current->tgid;
Linus Torvalds's avatar
Linus Torvalds committed
994 995

	/* Initialize workqueue for async tasks */
996
	task->tk_workqueue = task_setup_data->workqueue;
Linus Torvalds's avatar
Linus Torvalds committed
997

998 999
	task->tk_xprt = xprt_get(task_setup_data->rpc_xprt);

1000 1001
	task->tk_op_cred = get_rpccred(task_setup_data->rpc_op_cred);

1002 1003
	if (task->tk_ops->rpc_call_prepare != NULL)
		task->tk_action = rpc_prepare_task;
1004

1005
	rpc_init_task_statistics(task);
1006

1007
	dprintk("RPC:       new task initialized, procpid %u\n",
1008
				task_pid_nr(current));
Linus Torvalds's avatar
Linus Torvalds committed
1009 1010 1011 1012 1013
}

static struct rpc_task *
rpc_alloc_task(void)
{
Mel Gorman's avatar
Mel Gorman committed
1014
	return (struct rpc_task *)mempool_alloc(rpc_task_mempool, GFP_NOIO);
Linus Torvalds's avatar
Linus Torvalds committed
1015 1016 1017
}

/*
1018
 * Create a new task for the specified client.
Linus Torvalds's avatar
Linus Torvalds committed
1019
 */
1020
struct rpc_task *rpc_new_task(const struct rpc_task_setup *setup_data)
Linus Torvalds's avatar
Linus Torvalds committed
1021
{
1022 1023 1024 1025 1026 1027 1028
	struct rpc_task	*task = setup_data->task;
	unsigned short flags = 0;

	if (task == NULL) {
		task = rpc_alloc_task();
		flags = RPC_TASK_DYNAMIC;
	}
Linus Torvalds's avatar
Linus Torvalds committed
1029

1030
	rpc_init_task(task, setup_data);
1031
	task->tk_flags |= flags;
1032
	dprintk("RPC:       allocated task %p\n", task);
Linus Torvalds's avatar
Linus Torvalds committed
1033 1034 1035
	return task;
}

1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054
/*
 * rpc_free_task - release rpc task and perform cleanups
 *
 * Note that we free up the rpc_task _after_ rpc_release_calldata()
 * in order to work around a workqueue dependency issue.
 *
 * Tejun Heo states:
 * "Workqueue currently considers two work items to be the same if they're
 * on the same address and won't execute them concurrently - ie. it
 * makes a work item which is queued again while being executed wait
 * for the previous execution to complete.
 *
 * If a work function frees the work item, and then waits for an event
 * which should be performed by another work item and *that* work item
 * recycles the freed work item, it can create a false dependency loop.
 * There really is no reliable way to detect this short of verifying
 * every memory free."
 *
 */
1055
static void rpc_free_task(struct rpc_task *task)
Linus Torvalds's avatar
Linus Torvalds committed
1056
{
1057 1058
	unsigned short tk_flags = task->tk_flags;

1059
	put_rpccred(task->tk_op_cred);
1060
	rpc_release_calldata(task->tk_ops, task->tk_calldata);
Linus Torvalds's avatar
Linus Torvalds committed
1061

1062
	if (tk_flags & RPC_TASK_DYNAMIC) {
1063 1064 1065
		dprintk("RPC: %5u freeing task\n", task->tk_pid);
		mempool_free(task, rpc_task_mempool);
	}
1066 1067 1068 1069 1070 1071 1072
}

static void rpc_async_release(struct work_struct *work)
{
	rpc_free_task(container_of(work, struct rpc_task, u.tk_work));
}

1073
static void rpc_release_resources_task(struct rpc_task *task)
1074
{
1075
	xprt_release(task);
1076
	if (task->tk_msg.rpc_cred) {
1077
		put_cred(task->tk_msg.rpc_cred);
1078 1079
		task->tk_msg.rpc_cred = NULL;
	}
1080
	rpc_task_release_client(task);
1081 1082 1083 1084 1085 1086
}

static void rpc_final_put_task(struct rpc_task *task,
		struct workqueue_struct *q)
{
	if (q != NULL) {
1087
		INIT_WORK(&task->u.tk_work, rpc_async_release);
1088
		queue_work(q, &task->u.tk_work);
1089 1090
	} else
		rpc_free_task(task);
1091
}
1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104

static void rpc_do_put_task(struct rpc_task *task, struct workqueue_struct *q)
{
	if (atomic_dec_and_test(&task->tk_count)) {
		rpc_release_resources_task(task);
		rpc_final_put_task(task, q);
	}
}

void rpc_put_task(struct rpc_task *task)
{
	rpc_do_put_task(task, NULL);
}
1105
EXPORT_SYMBOL_GPL(rpc_put_task);
1106

1107 1108 1109 1110 1111 1112
void rpc_put_task_async(struct rpc_task *task)
{
	rpc_do_put_task(task, task->tk_workqueue);
}
EXPORT_SYMBOL_GPL(rpc_put_task_async);

1113
static void rpc_release_task(struct rpc_task *task)
1114
{
1115
	dprintk("RPC: %5u release task\n", task->tk_pid);
Linus Torvalds's avatar
Linus Torvalds committed
1116

1117
	WARN_ON_ONCE(RPC_IS_QUEUED(task));
Linus Torvalds's avatar
Linus Torvalds committed
1118

1119
	rpc_release_resources_task(task);
1120

1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134
	/*
	 * Note: at this point we have been removed from rpc_clnt->cl_tasks,
	 * so it should be safe to use task->tk_count as a test for whether
	 * or not any other processes still hold references to our rpc_task.
	 */
	if (atomic_read(&task->tk_count) != 1 + !RPC_IS_ASYNC(task)) {
		/* Wake up anyone who may be waiting for task completion */
		if (!rpc_complete_task(task))
			return;
	} else {
		if (!atomic_dec_and_test(&task->tk_count))
			return;
	}
	rpc_final_put_task(task, task->tk_workqueue);
Linus Torvalds's avatar
Linus Torvalds committed
1135 1136
}

1137 1138 1139 1140 1141 1142 1143 1144 1145 1146
int rpciod_up(void)
{
	return try_module_get(THIS_MODULE) ? 0 : -EINVAL;
}

void rpciod_down(void)
{
	module_put(THIS_MODULE);
}

Linus Torvalds's avatar
Linus Torvalds committed
1147
/*
1148
 * Start up the rpciod workqueue.
Linus Torvalds's avatar
Linus Torvalds committed
1149
 */
1150
static int rpciod_start(void)
Linus Torvalds's avatar
Linus Torvalds committed
1151 1152
{
	struct workqueue_struct *wq;
1153

Linus Torvalds's avatar
Linus Torvalds committed
1154 1155 1156
	/*
	 * Create the rpciod thread and wait for it to start.
	 */
1157
	dprintk("RPC:       creating workqueue rpciod\n");
1158
	wq = alloc_workqueue("rpciod", WQ_MEM_RECLAIM | WQ_UNBOUND, 0);
1159 1160
	if (!wq)
		goto out_failed;
Linus Torvalds's avatar
Linus Torvalds committed
1161
	rpciod_workqueue = wq;
1162
	/* Note: highpri because network receive is latency sensitive */
1163
	wq = alloc_workqueue("xprtiod", WQ_UNBOUND|WQ_MEM_RECLAIM|WQ_HIGHPRI, 0);
1164 1165 1166 1167 1168 1169 1170 1171 1172 1173
	if (!wq)
		goto free_rpciod;
	xprtiod_workqueue = wq;
	return 1;
free_rpciod:
	wq = rpciod_workqueue;
	rpciod_workqueue = NULL;
	destroy_workqueue(wq);
out_failed:
	return 0;
Linus Torvalds's avatar
Linus Torvalds committed
1174 1175
}

1176
static void rpciod_stop(void)
Linus Torvalds's avatar
Linus Torvalds committed
1177
{
1178
	struct workqueue_struct *wq = NULL;
1179

1180 1181
	if (rpciod_workqueue == NULL)
		return;
1182
	dprintk("RPC:       destroying workqueue rpciod\n");
Linus Torvalds's avatar
Linus Torvalds committed
1183

1184 1185 1186
	wq = rpciod_workqueue;
	rpciod_workqueue = NULL;
	destroy_workqueue(wq);
1187 1188 1189
	wq = xprtiod_workqueue;
	xprtiod_workqueue = NULL;
	destroy_workqueue(wq);
Linus Torvalds's avatar
Linus Torvalds committed
1190 1191 1192 1193 1194
}

void
rpc_destroy_mempool(void)
{
1195
	rpciod_stop();
1196 1197 1198 1199
	mempool_destroy(rpc_buffer_mempool);
	mempool_destroy(rpc_task_mempool);
	kmem_cache_destroy(rpc_task_slabp);
	kmem_cache_destroy(rpc_buffer_slabp);
1200
	rpc_destroy_wait_queue(&delay_queue);
Linus Torvalds's avatar
Linus Torvalds committed
1201 1202 1203 1204 1205
}

int
rpc_init_mempool(void)
{
1206 1207 1208 1209 1210 1211 1212 1213
	/*
	 * The following is not strictly a mempool initialisation,
	 * but there is no harm in doing it here
	 */
	rpc_init_wait_queue(&delay_queue, "delayq");
	if (!rpciod_start())
		goto err_nomem;

Linus Torvalds's avatar
Linus Torvalds committed
1214 1215 1216
	rpc_task_slabp = kmem_cache_create("rpc_tasks",
					     sizeof(struct rpc_task),
					     0, SLAB_HWCACHE_ALIGN,
1217
					     NULL);
Linus Torvalds's avatar
Linus Torvalds committed
1218 1219 1220 1221 1222
	if (!rpc_task_slabp)
		goto err_nomem;
	rpc_buffer_slabp = kmem_cache_create("rpc_buffers",
					     RPC_BUFFER_MAXSIZE,
					     0, SLAB_HWCACHE_ALIGN,
1223
					     NULL);
Linus Torvalds's avatar
Linus Torvalds committed
1224 1225
	if (!rpc_buffer_slabp)
		goto err_nomem;
1226 1227
	rpc_task_mempool = mempool_create_slab_pool(RPC_TASK_POOLSIZE,
						    rpc_task_slabp);
Linus Torvalds's avatar
Linus Torvalds committed
1228 1229
	if (!rpc_task_mempool)
		goto err_nomem;
1230 1231
	rpc_buffer_mempool = mempool_create_slab_pool(RPC_BUFFER_POOLSIZE,
						      rpc_buffer_slabp);
Linus Torvalds's avatar
Linus Torvalds committed
1232 1233 1234 1235 1236 1237 1238
	if (!rpc_buffer_mempool)
		goto err_nomem;
	return 0;
err_nomem:
	rpc_destroy_mempool();
	return -ENOMEM;
}