pnfs.c 82.5 KB
Newer Older
Ricardo Labiaga's avatar
Ricardo Labiaga committed
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30
/*
 *  pNFS functions to call and manage layout drivers.
 *
 *  Copyright (c) 2002 [year of first publication]
 *  The Regents of the University of Michigan
 *  All Rights Reserved
 *
 *  Dean Hildebrand <dhildebz@umich.edu>
 *
 *  Permission is granted to use, copy, create derivative works, and
 *  redistribute this software and such derivative works for any purpose,
 *  so long as the name of the University of Michigan is not used in
 *  any advertising or publicity pertaining to the use or distribution
 *  of this software without specific, written prior authorization. If
 *  the above copyright notice or any other identification of the
 *  University of Michigan is included in any copy of any portion of
 *  this software, then the disclaimer below must also be included.
 *
 *  This software is provided as is, without representation or warranty
 *  of any kind either express or implied, including without limitation
 *  the implied warranties of merchantability, fitness for a particular
 *  purpose, or noninfringement.  The Regents of the University of
 *  Michigan shall not be liable for any damages, including special,
 *  indirect, incidental, or consequential damages, with respect to any
 *  claim arising out of or in connection with the use of the software,
 *  even if it has been or is hereafter advised of the possibility of
 *  such damages.
 */

#include <linux/nfs_fs.h>
31
#include <linux/nfs_page.h>
32
#include <linux/module.h>
33
#include <linux/sort.h>
34
#include "internal.h"
Ricardo Labiaga's avatar
Ricardo Labiaga committed
35
#include "pnfs.h"
Andy Adamson's avatar
Andy Adamson committed
36
#include "iostat.h"
37
#include "nfs4trace.h"
38
#include "delegation.h"
39
#include "nfs42.h"
40
#include "nfs4_fs.h"
Ricardo Labiaga's avatar
Ricardo Labiaga committed
41 42

#define NFSDBG_FACILITY		NFSDBG_PNFS
43
#define PNFS_LAYOUTGET_RETRY_TIMEOUT (120*HZ)
Ricardo Labiaga's avatar
Ricardo Labiaga committed
44

45 46 47 48 49 50 51 52 53 54 55 56
/* Locking:
 *
 * pnfs_spinlock:
 *      protects pnfs_modules_tbl.
 */
static DEFINE_SPINLOCK(pnfs_spinlock);

/*
 * pnfs_modules_tbl holds all pnfs modules
 */
static LIST_HEAD(pnfs_modules_tbl);

57
static void pnfs_layoutreturn_before_put_layout_hdr(struct pnfs_layout_hdr *lo);
58 59 60 61
static void pnfs_free_returned_lsegs(struct pnfs_layout_hdr *lo,
		struct list_head *free_me,
		const struct pnfs_layout_range *range,
		u32 seq);
62 63
static bool pnfs_lseg_dec_and_remove_zero(struct pnfs_layout_segment *lseg,
		                struct list_head *tmp_list);
64

65 66 67 68 69 70 71 72 73 74 75 76 77 78 79
/* Return the registered pnfs layout driver module matching given id */
static struct pnfs_layoutdriver_type *
find_pnfs_driver_locked(u32 id)
{
	struct pnfs_layoutdriver_type *local;

	list_for_each_entry(local, &pnfs_modules_tbl, pnfs_tblid)
		if (local->id == id)
			goto out;
	local = NULL;
out:
	dprintk("%s: Searching for id %u, found %p\n", __func__, id, local);
	return local;
}

Ricardo Labiaga's avatar
Ricardo Labiaga committed
80 81 82
static struct pnfs_layoutdriver_type *
find_pnfs_driver(u32 id)
{
83 84 85 86
	struct pnfs_layoutdriver_type *local;

	spin_lock(&pnfs_spinlock);
	local = find_pnfs_driver_locked(id);
87 88 89 90
	if (local != NULL && !try_module_get(local->owner)) {
		dprintk("%s: Could not grab reference on module\n", __func__);
		local = NULL;
	}
91 92
	spin_unlock(&pnfs_spinlock);
	return local;
Ricardo Labiaga's avatar
Ricardo Labiaga committed
93 94 95 96 97
}

void
unset_pnfs_layoutdriver(struct nfs_server *nfss)
{
98 99 100
	if (nfss->pnfs_curr_ld) {
		if (nfss->pnfs_curr_ld->clear_layoutdriver)
			nfss->pnfs_curr_ld->clear_layoutdriver(nfss);
101 102 103
		/* Decrement the MDS count. Purge the deviceid cache if zero */
		if (atomic_dec_and_test(&nfss->nfs_client->cl_mds_count))
			nfs4_deviceid_purge_client(nfss->nfs_client);
104
		module_put(nfss->pnfs_curr_ld->owner);
105
	}
Ricardo Labiaga's avatar
Ricardo Labiaga committed
106 107 108
	nfss->pnfs_curr_ld = NULL;
}

109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141
/*
 * When the server sends a list of layout types, we choose one in the order
 * given in the list below.
 *
 * FIXME: should this list be configurable in some fashion? module param?
 * 	  mount option? something else?
 */
static const u32 ld_prefs[] = {
	LAYOUT_SCSI,
	LAYOUT_BLOCK_VOLUME,
	LAYOUT_OSD2_OBJECTS,
	LAYOUT_FLEX_FILES,
	LAYOUT_NFSV4_1_FILES,
	0
};

static int
ld_cmp(const void *e1, const void *e2)
{
	u32 ld1 = *((u32 *)e1);
	u32 ld2 = *((u32 *)e2);
	int i;

	for (i = 0; ld_prefs[i] != 0; i++) {
		if (ld1 == ld_prefs[i])
			return -1;

		if (ld2 == ld_prefs[i])
			return 1;
	}
	return 0;
}

Ricardo Labiaga's avatar
Ricardo Labiaga committed
142 143 144 145
/*
 * Try to set the server's pnfs module to the pnfs layout type specified by id.
 * Currently only one pNFS layout driver per filesystem is supported.
 *
146
 * @ids array of layout types supported by MDS.
Ricardo Labiaga's avatar
Ricardo Labiaga committed
147 148
 */
void
149
set_pnfs_layoutdriver(struct nfs_server *server, const struct nfs_fh *mntfh,
150
		      struct nfs_fsinfo *fsinfo)
Ricardo Labiaga's avatar
Ricardo Labiaga committed
151 152
{
	struct pnfs_layoutdriver_type *ld_type = NULL;
153
	u32 id;
154
	int i;
Ricardo Labiaga's avatar
Ricardo Labiaga committed
155

156 157
	if (fsinfo->nlayouttypes == 0)
		goto out_no_driver;
Ricardo Labiaga's avatar
Ricardo Labiaga committed
158 159
	if (!(server->nfs_client->cl_exchange_flags &
		 (EXCHGID4_FLAG_USE_NON_PNFS | EXCHGID4_FLAG_USE_PNFS_MDS))) {
160 161
		printk(KERN_ERR "NFS: %s: cl_exchange_flags 0x%x\n",
			__func__, server->nfs_client->cl_exchange_flags);
Ricardo Labiaga's avatar
Ricardo Labiaga committed
162 163
		goto out_no_driver;
	}
164

165 166
	sort(fsinfo->layouttype, fsinfo->nlayouttypes,
		sizeof(*fsinfo->layouttype), ld_cmp, NULL);
167

168 169
	for (i = 0; i < fsinfo->nlayouttypes; i++) {
		id = fsinfo->layouttype[i];
Ricardo Labiaga's avatar
Ricardo Labiaga committed
170
		ld_type = find_pnfs_driver(id);
171 172 173 174 175 176 177
		if (!ld_type) {
			request_module("%s-%u", LAYOUT_NFSV4_1_MODULE_PREFIX,
					id);
			ld_type = find_pnfs_driver(id);
		}
		if (ld_type)
			break;
Ricardo Labiaga's avatar
Ricardo Labiaga committed
178
	}
179 180

	if (!ld_type) {
181
		dprintk("%s: No pNFS module found!\n", __func__);
182 183 184
		goto out_no_driver;
	}

Ricardo Labiaga's avatar
Ricardo Labiaga committed
185
	server->pnfs_curr_ld = ld_type;
186 187
	if (ld_type->set_layoutdriver
	    && ld_type->set_layoutdriver(server, mntfh)) {
188 189
		printk(KERN_ERR "NFS: %s: Error initializing pNFS layout "
			"driver %u.\n", __func__, id);
190 191 192
		module_put(ld_type->owner);
		goto out_no_driver;
	}
193 194
	/* Bump the MDS count */
	atomic_inc(&server->nfs_client->cl_mds_count);
195

Ricardo Labiaga's avatar
Ricardo Labiaga committed
196 197 198 199 200 201 202
	dprintk("%s: pNFS module for %u set\n", __func__, id);
	return;

out_no_driver:
	dprintk("%s: Using NFSv4 I/O\n", __func__);
	server->pnfs_curr_ld = NULL;
}
203 204 205 206 207 208 209 210

int
pnfs_register_layoutdriver(struct pnfs_layoutdriver_type *ld_type)
{
	int status = -EINVAL;
	struct pnfs_layoutdriver_type *tmp;

	if (ld_type->id == 0) {
211
		printk(KERN_ERR "NFS: %s id 0 is reserved\n", __func__);
212 213
		return status;
	}
214
	if (!ld_type->alloc_lseg || !ld_type->free_lseg) {
215
		printk(KERN_ERR "NFS: %s Layout driver must provide "
216 217 218
		       "alloc_lseg and free_lseg.\n", __func__);
		return status;
	}
219 220 221 222 223 224 225 226 227

	spin_lock(&pnfs_spinlock);
	tmp = find_pnfs_driver_locked(ld_type->id);
	if (!tmp) {
		list_add(&ld_type->pnfs_tblid, &pnfs_modules_tbl);
		status = 0;
		dprintk("%s Registering id:%u name:%s\n", __func__, ld_type->id,
			ld_type->name);
	} else {
228
		printk(KERN_ERR "NFS: %s Module with id %d already loaded!\n",
229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245
			__func__, ld_type->id);
	}
	spin_unlock(&pnfs_spinlock);

	return status;
}
EXPORT_SYMBOL_GPL(pnfs_register_layoutdriver);

void
pnfs_unregister_layoutdriver(struct pnfs_layoutdriver_type *ld_type)
{
	dprintk("%s Deregistering id:%u\n", __func__, ld_type->id);
	spin_lock(&pnfs_spinlock);
	list_del(&ld_type->pnfs_tblid);
	spin_unlock(&pnfs_spinlock);
}
EXPORT_SYMBOL_GPL(pnfs_unregister_layoutdriver);
246

247 248 249 250
/*
 * pNFS client layout cache
 */

251
/* Need to hold i_lock if caller does not already hold reference */
252
void
253
pnfs_get_layout_hdr(struct pnfs_layout_hdr *lo)
254
{
255
	refcount_inc(&lo->plh_refcount);
256 257
}

258 259 260 261
static struct pnfs_layout_hdr *
pnfs_alloc_layout_hdr(struct inode *ino, gfp_t gfp_flags)
{
	struct pnfs_layoutdriver_type *ld = NFS_SERVER(ino)->pnfs_curr_ld;
262
	return ld->alloc_layout_hdr(ino, gfp_flags);
263 264 265 266 267
}

static void
pnfs_free_layout_hdr(struct pnfs_layout_hdr *lo)
{
268 269 270 271 272 273 274 275 276 277
	struct nfs_server *server = NFS_SERVER(lo->plh_inode);
	struct pnfs_layoutdriver_type *ld = server->pnfs_curr_ld;

	if (!list_empty(&lo->plh_layouts)) {
		struct nfs_client *clp = server->nfs_client;

		spin_lock(&clp->cl_lock);
		list_del_init(&lo->plh_layouts);
		spin_unlock(&clp->cl_lock);
	}
278
	put_cred(lo->plh_lc_cred);
279
	return ld->free_layout_hdr(lo);
280 281
}

282
static void
283
pnfs_detach_layout_hdr(struct pnfs_layout_hdr *lo)
284
{
285
	struct nfs_inode *nfsi = NFS_I(lo->plh_inode);
286
	dprintk("%s: freeing layout cache %p\n", __func__, lo);
287 288 289 290
	nfsi->layout = NULL;
	/* Reset MDS Threshold I/O counters */
	nfsi->write_io = 0;
	nfsi->read_io = 0;
291 292
}

293
void
294
pnfs_put_layout_hdr(struct pnfs_layout_hdr *lo)
295
{
296
	struct inode *inode;
297

298 299 300
	if (!lo)
		return;
	inode = lo->plh_inode;
301 302
	pnfs_layoutreturn_before_put_layout_hdr(lo);

303
	if (refcount_dec_and_lock(&lo->plh_refcount, &inode->i_lock)) {
304 305
		if (!list_empty(&lo->plh_segs))
			WARN_ONCE(1, "NFS: BUG unfreed layout segments.\n");
306
		pnfs_detach_layout_hdr(lo);
307
		spin_unlock(&inode->i_lock);
308
		pnfs_free_layout_hdr(lo);
309
	}
310 311
}

312 313 314 315 316 317 318 319 320 321 322 323 324 325
static void
pnfs_set_plh_return_info(struct pnfs_layout_hdr *lo, enum pnfs_iomode iomode,
			 u32 seq)
{
	if (lo->plh_return_iomode != 0 && lo->plh_return_iomode != iomode)
		iomode = IOMODE_ANY;
	lo->plh_return_iomode = iomode;
	set_bit(NFS_LAYOUT_RETURN_REQUESTED, &lo->plh_flags);
	if (seq != 0) {
		WARN_ON_ONCE(lo->plh_return_seq != 0 && lo->plh_return_seq != seq);
		lo->plh_return_seq = seq;
	}
}

326 327 328
static void
pnfs_clear_layoutreturn_info(struct pnfs_layout_hdr *lo)
{
329
	struct pnfs_layout_segment *lseg;
330 331 332
	lo->plh_return_iomode = 0;
	lo->plh_return_seq = 0;
	clear_bit(NFS_LAYOUT_RETURN_REQUESTED, &lo->plh_flags);
333 334 335 336 337
	list_for_each_entry(lseg, &lo->plh_segs, pls_list) {
		if (!test_bit(NFS_LSEG_LAYOUTRETURN, &lseg->pls_flags))
			continue;
		pnfs_set_plh_return_info(lo, lseg->pls_range.iomode, 0);
	}
338 339
}

340 341 342 343 344 345 346 347 348
static void pnfs_clear_layoutreturn_waitbit(struct pnfs_layout_hdr *lo)
{
	clear_bit_unlock(NFS_LAYOUT_RETURN, &lo->plh_flags);
	clear_bit(NFS_LAYOUT_RETURN_LOCK, &lo->plh_flags);
	smp_mb__after_atomic();
	wake_up_bit(&lo->plh_flags, NFS_LAYOUT_RETURN);
	rpc_wake_up(&NFS_SERVER(lo->plh_inode)->roc_rpcwaitq);
}

349 350 351 352 353 354 355 356 357 358 359 360
static void
pnfs_clear_lseg_state(struct pnfs_layout_segment *lseg,
		struct list_head *free_me)
{
	clear_bit(NFS_LSEG_ROC, &lseg->pls_flags);
	clear_bit(NFS_LSEG_LAYOUTRETURN, &lseg->pls_flags);
	if (test_and_clear_bit(NFS_LSEG_VALID, &lseg->pls_flags))
		pnfs_lseg_dec_and_remove_zero(lseg, free_me);
	if (test_and_clear_bit(NFS_LSEG_LAYOUTCOMMIT, &lseg->pls_flags))
		pnfs_lseg_dec_and_remove_zero(lseg, free_me);
}

361
/*
362 363
 * Update the seqid of a layout stateid after receiving
 * NFS4ERR_OLD_STATEID
364
 */
365
bool nfs4_layout_refresh_old_stateid(nfs4_stateid *dst,
366 367
		struct pnfs_layout_range *dst_range,
		struct inode *inode)
368 369
{
	struct pnfs_layout_hdr *lo;
370 371 372 373 374
	struct pnfs_layout_range range = {
		.iomode = IOMODE_ANY,
		.offset = 0,
		.length = NFS4_MAX_UINT64,
	};
375
	bool ret = false;
376 377
	LIST_HEAD(head);
	int err;
378 379 380

	spin_lock(&inode->i_lock);
	lo = NFS_I(inode)->layout;
381 382 383 384 385 386 387 388 389
	if (lo &&  pnfs_layout_is_valid(lo) &&
	    nfs4_stateid_match_other(dst, &lo->plh_stateid)) {
		/* Is our call using the most recent seqid? If so, bump it */
		if (!nfs4_stateid_is_newer(&lo->plh_stateid, dst)) {
			nfs4_stateid_seqid_inc(dst);
			ret = true;
			goto out;
		}
		/* Try to update the seqid to the most recent */
390 391 392
		err = pnfs_mark_matching_lsegs_return(lo, &head, &range, 0);
		if (err != -EBUSY) {
			dst->seqid = lo->plh_stateid.seqid;
393
			*dst_range = range;
394 395
			ret = true;
		}
396
	}
397
out:
398
	spin_unlock(&inode->i_lock);
399
	pnfs_free_lseg_list(&head);
400 401 402
	return ret;
}

403 404 405 406 407 408 409
/*
 * Mark a pnfs_layout_hdr and all associated layout segments as invalid
 *
 * In order to continue using the pnfs_layout_hdr, a full recovery
 * is required.
 * Note that caller must hold inode->i_lock.
 */
410
int
411 412 413 414 415 416 417 418
pnfs_mark_layout_stateid_invalid(struct pnfs_layout_hdr *lo,
		struct list_head *lseg_list)
{
	struct pnfs_layout_range range = {
		.iomode = IOMODE_ANY,
		.offset = 0,
		.length = NFS4_MAX_UINT64,
	};
419
	struct pnfs_layout_segment *lseg, *next;
420 421

	set_bit(NFS_LAYOUT_INVALID_STID, &lo->plh_flags);
422 423
	list_for_each_entry_safe(lseg, next, &lo->plh_segs, pls_list)
		pnfs_clear_lseg_state(lseg, lseg_list);
424
	pnfs_clear_layoutreturn_info(lo);
425
	pnfs_free_returned_lsegs(lo, lseg_list, &range, 0);
426 427 428
	if (test_bit(NFS_LAYOUT_RETURN, &lo->plh_flags) &&
	    !test_and_set_bit(NFS_LAYOUT_RETURN_LOCK, &lo->plh_flags))
		pnfs_clear_layoutreturn_waitbit(lo);
429
	return !list_empty(&lo->plh_segs);
430 431
}

432 433 434 435 436 437 438 439
static int
pnfs_iomode_to_fail_bit(u32 iomode)
{
	return iomode == IOMODE_RW ?
		NFS_LAYOUT_RW_FAILED : NFS_LAYOUT_RO_FAILED;
}

static void
440
pnfs_layout_set_fail_bit(struct pnfs_layout_hdr *lo, int fail_bit)
441
{
442
	lo->plh_retry_timestamp = jiffies;
443
	if (!test_and_set_bit(fail_bit, &lo->plh_flags))
444
		refcount_inc(&lo->plh_refcount);
445 446 447 448 449 450
}

static void
pnfs_layout_clear_fail_bit(struct pnfs_layout_hdr *lo, int fail_bit)
{
	if (test_and_clear_bit(fail_bit, &lo->plh_flags))
451
		refcount_dec(&lo->plh_refcount);
452 453 454 455 456 457
}

static void
pnfs_layout_io_set_failed(struct pnfs_layout_hdr *lo, u32 iomode)
{
	struct inode *inode = lo->plh_inode;
458 459 460 461 462 463
	struct pnfs_layout_range range = {
		.iomode = iomode,
		.offset = 0,
		.length = NFS4_MAX_UINT64,
	};
	LIST_HEAD(head);
464 465 466

	spin_lock(&inode->i_lock);
	pnfs_layout_set_fail_bit(lo, pnfs_iomode_to_fail_bit(iomode));
467
	pnfs_mark_matching_lsegs_invalid(lo, &head, &range, 0);
468
	spin_unlock(&inode->i_lock);
469
	pnfs_free_lseg_list(&head);
470 471 472 473 474 475 476
	dprintk("%s Setting layout IOMODE_%s fail bit\n", __func__,
			iomode == IOMODE_RW ?  "RW" : "READ");
}

static bool
pnfs_layout_io_test_failed(struct pnfs_layout_hdr *lo, u32 iomode)
{
477
	unsigned long start, end;
478 479 480
	int fail_bit = pnfs_iomode_to_fail_bit(iomode);

	if (test_bit(fail_bit, &lo->plh_flags) == 0)
481 482 483 484 485
		return false;
	end = jiffies;
	start = end - PNFS_LAYOUTGET_RETRY_TIMEOUT;
	if (!time_in_range(lo->plh_retry_timestamp, start, end)) {
		/* It is time to retry the failed layoutgets */
486
		pnfs_layout_clear_fail_bit(lo, fail_bit);
487 488 489
		return false;
	}
	return true;
490 491
}

492
static void
493 494 495
pnfs_init_lseg(struct pnfs_layout_hdr *lo, struct pnfs_layout_segment *lseg,
		const struct pnfs_layout_range *range,
		const nfs4_stateid *stateid)
496
{
497
	INIT_LIST_HEAD(&lseg->pls_list);
498
	INIT_LIST_HEAD(&lseg->pls_lc_list);
499
	refcount_set(&lseg->pls_refcount, 1);
500
	set_bit(NFS_LSEG_VALID, &lseg->pls_flags);
501
	lseg->pls_layout = lo;
502 503
	lseg->pls_range = *range;
	lseg->pls_seq = be32_to_cpu(stateid->seqid);
504 505
}

506
static void pnfs_free_lseg(struct pnfs_layout_segment *lseg)
507
{
508 509 510 511
	if (lseg != NULL) {
		struct inode *inode = lseg->pls_layout->plh_inode;
		NFS_SERVER(inode)->pnfs_curr_ld->free_lseg(lseg);
	}
512 513
}

Fred Isaman's avatar
Fred Isaman committed
514
static void
515 516
pnfs_layout_remove_lseg(struct pnfs_layout_hdr *lo,
		struct pnfs_layout_segment *lseg)
Fred Isaman's avatar
Fred Isaman committed
517
{
518
	WARN_ON(test_bit(NFS_LSEG_VALID, &lseg->pls_flags));
Fred Isaman's avatar
Fred Isaman committed
519
	list_del_init(&lseg->pls_list);
520
	/* Matched by pnfs_get_layout_hdr in pnfs_layout_insert_lseg */
521
	refcount_dec(&lo->plh_refcount);
522 523
	if (test_bit(NFS_LSEG_LAYOUTRETURN, &lseg->pls_flags))
		return;
524 525 526
	if (list_empty(&lo->plh_segs) &&
	    !test_bit(NFS_LAYOUT_RETURN_REQUESTED, &lo->plh_flags) &&
	    !test_bit(NFS_LAYOUT_RETURN, &lo->plh_flags)) {
527 528
		if (atomic_read(&lo->plh_outstanding) == 0)
			set_bit(NFS_LAYOUT_INVALID_STID, &lo->plh_flags);
529
		clear_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags);
530
	}
Fred Isaman's avatar
Fred Isaman committed
531 532
}

533 534 535 536 537 538
static bool
pnfs_cache_lseg_for_layoutreturn(struct pnfs_layout_hdr *lo,
		struct pnfs_layout_segment *lseg)
{
	if (test_and_clear_bit(NFS_LSEG_LAYOUTRETURN, &lseg->pls_flags) &&
	    pnfs_layout_is_valid(lo)) {
539
		pnfs_set_plh_return_info(lo, lseg->pls_range.iomode, 0);
540 541 542 543 544 545
		list_move_tail(&lseg->pls_list, &lo->plh_return_segs);
		return true;
	}
	return false;
}

546
void
547
pnfs_put_lseg(struct pnfs_layout_segment *lseg)
548
{
549
	struct pnfs_layout_hdr *lo;
Fred Isaman's avatar
Fred Isaman committed
550 551 552 553 554
	struct inode *inode;

	if (!lseg)
		return;

555
	dprintk("%s: lseg %p ref %d valid %d\n", __func__, lseg,
556
		refcount_read(&lseg->pls_refcount),
557
		test_bit(NFS_LSEG_VALID, &lseg->pls_flags));
558

559 560
	lo = lseg->pls_layout;
	inode = lo->plh_inode;
561

562
	if (refcount_dec_and_lock(&lseg->pls_refcount, &inode->i_lock)) {
563 564 565 566
		if (test_bit(NFS_LSEG_VALID, &lseg->pls_flags)) {
			spin_unlock(&inode->i_lock);
			return;
		}
567
		pnfs_get_layout_hdr(lo);
568
		pnfs_layout_remove_lseg(lo, lseg);
569 570
		if (pnfs_cache_lseg_for_layoutreturn(lo, lseg))
			lseg = NULL;
571 572 573
		spin_unlock(&inode->i_lock);
		pnfs_free_lseg(lseg);
		pnfs_put_layout_hdr(lo);
574 575
	}
}
576
EXPORT_SYMBOL_GPL(pnfs_put_lseg);
577

578 579 580 581 582 583 584
/*
 * is l2 fully contained in l1?
 *   start1                             end1
 *   [----------------------------------)
 *           start2           end2
 *           [----------------)
 */
585
static bool
586
pnfs_lseg_range_contained(const struct pnfs_layout_range *l1,
587
		 const struct pnfs_layout_range *l2)
588 589
{
	u64 start1 = l1->offset;
590
	u64 end1 = pnfs_end_offset(start1, l1->length);
591
	u64 start2 = l2->offset;
592
	u64 end2 = pnfs_end_offset(start2, l2->length);
593 594 595 596

	return (start1 <= start2) && (end1 >= end2);
}

597 598 599
static bool pnfs_lseg_dec_and_remove_zero(struct pnfs_layout_segment *lseg,
		struct list_head *tmp_list)
{
600
	if (!refcount_dec_and_test(&lseg->pls_refcount))
601 602 603 604 605 606
		return false;
	pnfs_layout_remove_lseg(lseg->pls_layout, lseg);
	list_add(&lseg->pls_list, tmp_list);
	return true;
}

607 608 609 610 611 612 613 614 615 616 617
/* Returns 1 if lseg is removed from list, 0 otherwise */
static int mark_lseg_invalid(struct pnfs_layout_segment *lseg,
			     struct list_head *tmp_list)
{
	int rv = 0;

	if (test_and_clear_bit(NFS_LSEG_VALID, &lseg->pls_flags)) {
		/* Remove the reference keeping the lseg in the
		 * list.  It will now be removed when all
		 * outstanding io is finished.
		 */
Fred Isaman's avatar
Fred Isaman committed
618
		dprintk("%s: lseg %p ref %d\n", __func__, lseg,
619
			refcount_read(&lseg->pls_refcount));
620
		if (pnfs_lseg_dec_and_remove_zero(lseg, tmp_list))
Fred Isaman's avatar
Fred Isaman committed
621
			rv = 1;
622 623 624 625
	}
	return rv;
}

626 627 628 629 630 631 632 633 634
/*
 * Compare 2 layout stateid sequence ids, to see which is newer,
 * taking into account wraparound issues.
 */
static bool pnfs_seqid_is_newer(u32 s1, u32 s2)
{
	return (s32)(s1 - s2) > 0;
}

635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655
static bool
pnfs_should_free_range(const struct pnfs_layout_range *lseg_range,
		 const struct pnfs_layout_range *recall_range)
{
	return (recall_range->iomode == IOMODE_ANY ||
		lseg_range->iomode == recall_range->iomode) &&
	       pnfs_lseg_range_intersecting(lseg_range, recall_range);
}

static bool
pnfs_match_lseg_recall(const struct pnfs_layout_segment *lseg,
		const struct pnfs_layout_range *recall_range,
		u32 seq)
{
	if (seq != 0 && pnfs_seqid_is_newer(lseg->pls_seq, seq))
		return false;
	if (recall_range == NULL)
		return true;
	return pnfs_should_free_range(&lseg->pls_range, recall_range);
}

656 657 658 659 660 661 662 663 664 665 666 667 668 669
/**
 * pnfs_mark_matching_lsegs_invalid - tear down lsegs or mark them for later
 * @lo: layout header containing the lsegs
 * @tmp_list: list head where doomed lsegs should go
 * @recall_range: optional recall range argument to match (may be NULL)
 * @seq: only invalidate lsegs obtained prior to this sequence (may be 0)
 *
 * Walk the list of lsegs in the layout header, and tear down any that should
 * be destroyed. If "recall_range" is specified then the segment must match
 * that range. If "seq" is non-zero, then only match segments that were handed
 * out at or before that sequence.
 *
 * Returns number of matching invalid lsegs remaining in list after scanning
 * it and purging them.
670
 */
671
int
672
pnfs_mark_matching_lsegs_invalid(struct pnfs_layout_hdr *lo,
673
			    struct list_head *tmp_list,
674 675
			    const struct pnfs_layout_range *recall_range,
			    u32 seq)
676 677
{
	struct pnfs_layout_segment *lseg, *next;
678
	int remaining = 0;
679 680 681

	dprintk("%s:Begin lo %p\n", __func__, lo);

682
	if (list_empty(&lo->plh_segs))
683
		return 0;
684
	list_for_each_entry_safe(lseg, next, &lo->plh_segs, pls_list)
685
		if (pnfs_match_lseg_recall(lseg, recall_range, seq)) {
686
			dprintk("%s: freeing lseg %p iomode %d seq %u "
687
				"offset %llu length %llu\n", __func__,
688 689
				lseg, lseg->pls_range.iomode, lseg->pls_seq,
				lseg->pls_range.offset, lseg->pls_range.length);
690 691
			if (!mark_lseg_invalid(lseg, tmp_list))
				remaining++;
692
		}
693 694
	dprintk("%s:Return %i\n", __func__, remaining);
	return remaining;
695 696
}

697 698 699 700 701 702 703 704 705 706 707 708 709 710
static void
pnfs_free_returned_lsegs(struct pnfs_layout_hdr *lo,
		struct list_head *free_me,
		const struct pnfs_layout_range *range,
		u32 seq)
{
	struct pnfs_layout_segment *lseg, *next;

	list_for_each_entry_safe(lseg, next, &lo->plh_return_segs, pls_list) {
		if (pnfs_match_lseg_recall(lseg, range, seq))
			list_move_tail(&lseg->pls_list, free_me);
	}
}

711
/* note free_me must contain lsegs from a single layout_hdr */
712
void
713
pnfs_free_lseg_list(struct list_head *free_me)
714
{
715
	struct pnfs_layout_segment *lseg, *tmp;
716 717 718 719

	if (list_empty(free_me))
		return;

720
	list_for_each_entry_safe(lseg, tmp, free_me, pls_list) {
721
		list_del(&lseg->pls_list);
722
		pnfs_free_lseg(lseg);
723 724 725
	}
}

726 727 728 729
void
pnfs_destroy_layout(struct nfs_inode *nfsi)
{
	struct pnfs_layout_hdr *lo;
730
	LIST_HEAD(tmp_list);
731 732 733 734

	spin_lock(&nfsi->vfs_inode.i_lock);
	lo = nfsi->layout;
	if (lo) {
735
		pnfs_get_layout_hdr(lo);
736
		pnfs_mark_layout_stateid_invalid(lo, &tmp_list);
737 738 739 740
		pnfs_layout_clear_fail_bit(lo, NFS_LAYOUT_RO_FAILED);
		pnfs_layout_clear_fail_bit(lo, NFS_LAYOUT_RW_FAILED);
		spin_unlock(&nfsi->vfs_inode.i_lock);
		pnfs_free_lseg_list(&tmp_list);
741
		nfs_commit_inode(&nfsi->vfs_inode, 0);
742 743 744
		pnfs_put_layout_hdr(lo);
	} else
		spin_unlock(&nfsi->vfs_inode.i_lock);
745
}
746
EXPORT_SYMBOL_GPL(pnfs_destroy_layout);
747

748 749 750
static bool
pnfs_layout_add_bulk_destroy_list(struct inode *inode,
		struct list_head *layout_list)
751 752
{
	struct pnfs_layout_hdr *lo;
753
	bool ret = false;
754

755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770
	spin_lock(&inode->i_lock);
	lo = NFS_I(inode)->layout;
	if (lo != NULL && list_empty(&lo->plh_bulk_destroy)) {
		pnfs_get_layout_hdr(lo);
		list_add(&lo->plh_bulk_destroy, layout_list);
		ret = true;
	}
	spin_unlock(&inode->i_lock);
	return ret;
}

/* Caller must hold rcu_read_lock and clp->cl_lock */
static int
pnfs_layout_bulk_destroy_byserver_locked(struct nfs_client *clp,
		struct nfs_server *server,
		struct list_head *layout_list)
771 772
	__must_hold(&clp->cl_lock)
	__must_hold(RCU)
773 774 775 776 777
{
	struct pnfs_layout_hdr *lo, *next;
	struct inode *inode;

	list_for_each_entry_safe(lo, next, &server->layouts, plh_layouts) {
778 779 780
		if (test_bit(NFS_LAYOUT_INVALID_STID, &lo->plh_flags) ||
		    test_bit(NFS_LAYOUT_INODE_FREEING, &lo->plh_flags) ||
		    !list_empty(&lo->plh_bulk_destroy))
781
			continue;
782 783 784
		/* If the sb is being destroyed, just bail */
		if (!nfs_sb_active(server->super))
			break;
785
		inode = igrab(lo->plh_inode);
786 787 788 789 790 791 792 793 794 795 796 797 798 799
		if (inode != NULL) {
			list_del_init(&lo->plh_layouts);
			if (pnfs_layout_add_bulk_destroy_list(inode,
						layout_list))
				continue;
			rcu_read_unlock();
			spin_unlock(&clp->cl_lock);
			iput(inode);
		} else {
			rcu_read_unlock();
			spin_unlock(&clp->cl_lock);
			set_bit(NFS_LAYOUT_INODE_FREEING, &lo->plh_flags);
		}
		nfs_sb_deactive(server->super);
800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821
		spin_lock(&clp->cl_lock);
		rcu_read_lock();
		return -EAGAIN;
	}
	return 0;
}

static int
pnfs_layout_free_bulk_destroy_list(struct list_head *layout_list,
		bool is_bulk_recall)
{
	struct pnfs_layout_hdr *lo;
	struct inode *inode;
	LIST_HEAD(lseg_list);
	int ret = 0;

	while (!list_empty(layout_list)) {
		lo = list_entry(layout_list->next, struct pnfs_layout_hdr,
				plh_bulk_destroy);
		dprintk("%s freeing layout for inode %lu\n", __func__,
			lo->plh_inode->i_ino);
		inode = lo->plh_inode;
822 823 824

		pnfs_layoutcommit_inode(inode, false);

825 826
		spin_lock(&inode->i_lock);
		list_del_init(&lo->plh_bulk_destroy);
827 828 829
		if (pnfs_mark_layout_stateid_invalid(lo, &lseg_list)) {
			if (is_bulk_recall)
				set_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags);
830
			ret = -EAGAIN;
831
		}
832 833
		spin_unlock(&inode->i_lock);
		pnfs_free_lseg_list(&lseg_list);
834 835
		/* Free all lsegs that are attached to commit buckets */
		nfs_commit_inode(inode, 0);
836
		pnfs_put_layout_hdr(lo);
837
		nfs_iput_and_deactive(inode);
838 839 840 841 842 843 844 845 846 847 848
	}
	return ret;
}

int
pnfs_destroy_layouts_byfsid(struct nfs_client *clp,
		struct nfs_fsid *fsid,
		bool is_recall)
{
	struct nfs_server *server;
	LIST_HEAD(layout_list);
849

850
	spin_lock(&clp->cl_lock);
851
	rcu_read_lock();
852
restart:
853
	list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) {
854 855 856 857 858 859
		if (memcmp(&server->fsid, fsid, sizeof(*fsid)) != 0)
			continue;
		if (pnfs_layout_bulk_destroy_byserver_locked(clp,
				server,
				&layout_list) != 0)
			goto restart;
860 861
	}
	rcu_read_unlock();
862 863
	spin_unlock(&clp->cl_lock);

864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883
	if (list_empty(&layout_list))
		return 0;
	return pnfs_layout_free_bulk_destroy_list(&layout_list, is_recall);
}

int
pnfs_destroy_layouts_byclid(struct nfs_client *clp,
		bool is_recall)
{
	struct nfs_server *server;
	LIST_HEAD(layout_list);

	spin_lock(&clp->cl_lock);
	rcu_read_lock();
restart:
	list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) {
		if (pnfs_layout_bulk_destroy_byserver_locked(clp,
					server,
					&layout_list) != 0)
			goto restart;
884
	}
885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903
	rcu_read_unlock();
	spin_unlock(&clp->cl_lock);

	if (list_empty(&layout_list))
		return 0;
	return pnfs_layout_free_bulk_destroy_list(&layout_list, is_recall);
}

/*
 * Called by the state manger to remove all layouts established under an
 * expired lease.
 */
void
pnfs_destroy_all_layouts(struct nfs_client *clp)
{
	nfs4_deviceid_mark_client_invalid(clp);
	nfs4_deviceid_purge_client(clp);

	pnfs_destroy_layouts_byclid(clp, false);
904 905
}

906 907 908 909 910 911 912 913 914 915 916
static void
pnfs_set_layout_cred(struct pnfs_layout_hdr *lo, const struct cred *cred)
{
	const struct cred *old;

	if (cred && cred_fscmp(lo->plh_lc_cred, cred) != 0) {
		old = xchg(&lo->plh_lc_cred, get_cred(cred));
		put_cred(old);
	}
}

917
/* update lo->plh_stateid with new if is more recent */
918 919
void
pnfs_set_layout_stateid(struct pnfs_layout_hdr *lo, const nfs4_stateid *new,
920
			const struct cred *cred, bool update_barrier)
921
{
922
	u32 oldseq, newseq, new_barrier = 0;
923

924 925
	oldseq = be32_to_cpu(lo->plh_stateid.seqid);
	newseq = be32_to_cpu(new->seqid);
926 927

	if (!pnfs_layout_is_valid(lo)) {
928
		pnfs_set_layout_cred(lo, cred);
929 930 931 932 933 934 935
		nfs4_stateid_copy(&lo->plh_stateid, new);
		lo->plh_barrier = newseq;
		pnfs_clear_layoutreturn_info(lo);
		clear_bit(NFS_LAYOUT_INVALID_STID, &lo->plh_flags);
		return;
	}
	if (pnfs_seqid_is_newer(newseq, oldseq)) {
936
		nfs4_stateid_copy(&lo->plh_stateid, new);
937 938 939 940 941
		/*
		 * Because of wraparound, we want to keep the barrier
		 * "close" to the current seqids.
		 */
		new_barrier = newseq - atomic_read(&lo->plh_outstanding);
942
	}
943 944 945 946
	if (update_barrier)
		new_barrier = be32_to_cpu(new->seqid);
	else if (new_barrier == 0)
		return;
947
	if (pnfs_seqid_is_newer(new_barrier, lo->plh_barrier))
948
		lo->plh_barrier = new_barrier;
949 950
}

951
static bool
952 953
pnfs_layout_stateid_blocked(const struct pnfs_layout_hdr *lo,
		const nfs4_stateid *stateid)
954
{
955
	u32 seqid = be32_to_cpu(stateid->seqid);
956

957 958 959 960 961
	return !pnfs_seqid_is_newer(seqid, lo->plh_barrier);
}

/* lget is set to 1 if called from inside send_layoutget call chain */
static bool
962
pnfs_layoutgets_blocked(const struct pnfs_layout_hdr *lo)
963
{
Fred Isaman's avatar
Fred Isaman committed
964
	return lo->plh_block_lgets ||
965
		test_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags);
966 967
}

968 969 970 971 972
static struct nfs_server *
pnfs_find_server(struct inode *inode, struct nfs_open_context *ctx)
{
	struct nfs_server *server;

973
	if (inode) {
974
		server = NFS_SERVER(inode);
975
	} else {
976 977 978 979 980 981 982
		struct dentry *parent_dir = dget_parent(ctx->dentry);
		server = NFS_SERVER(parent_dir->d_inode);
		dput(parent_dir);
	}
	return server;
}

983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002
static void nfs4_free_pages(struct page **pages, size_t size)
{
	int i;

	if (!pages)
		return;

	for (i = 0; i < size; i++) {
		if (!pages[i])
			break;
		__free_page(pages[i]);
	}
	kfree(pages);
}

static struct page **nfs4_alloc_pages(size_t size, gfp_t gfp_flags)
{
	struct page **pages;
	int i;

1003
	pages = kmalloc_array(size, sizeof(struct page *), gfp_flags);
1004 1005 1006 1007 1008 1009 1010 1011 1012
	if (!pages) {
		dprintk("%s: can't alloc array of %zu pages\n", __func__, size);
		return NULL;
	}

	for (i = 0; i < size; i++) {
		pages[i] = alloc_page(gfp_flags);
		if (!pages[i]) {
			dprintk("%s: failed to allocate page\n", __func__);
1013
			nfs4_free_pages(pages, i);
1014 1015 1016 1017 1018 1019 1020
			return NULL;
		}
	}

	return pages;
}

1021
static struct nfs4_layoutget *
1022
pnfs_alloc_init_layoutget_args(struct inode *ino,
1023
	   struct nfs_open_context *ctx,
1024
	   const nfs4_stateid *stateid,
1025
	   const struct pnfs_layout_range *range,
1026
	   gfp_t gfp_flags)
1027
{
1028
	struct nfs_server *server = pnfs_find_server(ino, ctx);
1029
	size_t max_reply_sz = server->pnfs_curr_ld->max_layoutget_response;
1030
	size_t max_pages = max_response_pages(server);
1031 1032 1033
	struct nfs4_layoutget *lgp;

	dprintk("--> %s\n", __func__);
1034

1035 1036
	lgp = kzalloc(sizeof(*lgp), gfp_flags);
	if (lgp == NULL)
1037
		return NULL;
1038

1039 1040 1041 1042 1043 1044
	if (max_reply_sz) {
		size_t npages = (max_reply_sz + PAGE_SIZE - 1) >> PAGE_SHIFT;
		if (npages < max_pages)
			max_pages = npages;
	}

1045 1046 1047 1048 1049 1050 1051 1052
	lgp->args.layout.pages = nfs4_alloc_pages(max_pages, gfp_flags);
	if (!lgp->args.layout.pages) {
		kfree(lgp);
		return NULL;
	}
	lgp->args.layout.pglen = max_pages * PAGE_SIZE;
	lgp->res.layoutp = &lgp->args.layout;

1053 1054
	/* Don't confuse uninitialised result and success */
	lgp->res.status = -NFS4ERR_DELAY;
1055 1056 1057 1058

	lgp->args.minlength = PAGE_SIZE;
	if (lgp->args.minlength > range->length)
		lgp->args.minlength = range->length;
1059 1060 1061 1062 1063 1064 1065 1066 1067
	if (ino) {
		loff_t i_size = i_size_read(ino);

		if (range->iomode == IOMODE_READ) {
			if (range->offset >= i_size)
				lgp->args.minlength = 0;
			else if (i_size - range->offset < lgp->args.minlength)
				lgp->args.minlength = i_size - range->offset;
		}
1068
	}
1069 1070 1071 1072 1073
	lgp->args.maxcount = PNFS_LAYOUT_MAXSIZE;
	pnfs_copy_range(&lgp->args.range, range);
	lgp->args.type = server->pnfs_curr_ld->id;
	lgp->args.inode = ino;
	lgp->args.ctx = get_nfs_open_context(ctx);
1074
	nfs4_stateid_copy(&lgp->args.stateid, stateid);
1075
	lgp->gfp_flags = gfp_flags;
1076
	lgp->cred = get_cred(ctx->cred);
1077
	return lgp;
1078 1079
}

1080 1081 1082 1083 1084 1085 1086
void pnfs_layoutget_free(struct nfs4_layoutget *lgp)
{
	size_t max_pages = lgp->args.layout.pglen / PAGE_SIZE;

	nfs4_free_pages(lgp->args.layout.pages, max_pages);
	if (lgp->args.inode)
		pnfs_put_layout_hdr(NFS_I(lgp->args.inode)->layout);
1087
	put_cred(lgp->cred);
1088 1089 1090 1091
	put_nfs_open_context(lgp->args.ctx);
	kfree(lgp);
}

1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106
static void pnfs_clear_layoutcommit(struct inode *inode,
		struct list_head *head)
{
	struct nfs_inode *nfsi = NFS_I(inode);
	struct pnfs_layout_segment *lseg, *tmp;

	if (!test_and_clear_bit(NFS_INO_LAYOUTCOMMIT, &nfsi->flags))
		return;
	list_for_each_entry_safe(lseg, tmp, &nfsi->layout->plh_segs, pls_list) {
		if (!test_and_clear_bit(NFS_LSEG_LAYOUTCOMMIT, &lseg->pls_flags))
			continue;
		pnfs_lseg_dec_and_remove_zero(lseg, head);
	}
}

1107
void pnfs_layoutreturn_free_lsegs(struct pnfs_layout_hdr *lo,
1108
		const nfs4_stateid *arg_stateid,
1109 1110 1111 1112 1113 1114 1115
		const struct pnfs_layout_range *range,
		const nfs4_stateid *stateid)
{
	struct inode *inode = lo->plh_inode;
	LIST_HEAD(freeme);

	spin_lock(&inode->i_lock);
1116 1117 1118
	if (!pnfs_layout_is_valid(lo) || !arg_stateid ||
	    !nfs4_stateid_match_other(&lo->plh_stateid, arg_stateid))
		goto out_unlock;
1119
	if (stateid) {
1120 1121
		u32 seq = be32_to_cpu(arg_stateid->seqid);

1122 1123
		pnfs_mark_matching_lsegs_invalid(lo, &freeme, range, seq);
		pnfs_free_returned_lsegs(lo, &freeme, range, seq);
1124
		pnfs_set_layout_stateid(lo, stateid, NULL, true);
1125 1126
	} else
		pnfs_mark_layout_stateid_invalid(lo, &freeme);
1127
out_unlock:
1128 1129 1130 1131 1132 1133
	pnfs_clear_layoutreturn_waitbit(lo);
	spin_unlock(&inode->i_lock);
	pnfs_free_lseg_list(&freeme);

}

1134
static bool
1135 1136 1137
pnfs_prepare_layoutreturn(struct pnfs_layout_hdr *lo,
		nfs4_stateid *stateid,
		enum pnfs_iomode *iomode)
1138
{
1139 1140 1141
	/* Serialise LAYOUTGET/LAYOUTRETURN */
	if (atomic_read(&lo->plh_outstanding) != 0)
		return false;
1142
	if (test_and_set_bit(NFS_LAYOUT_RETURN_LOCK, &lo->plh_flags))
1143
		return false;
1144
	set_bit(NFS_LAYOUT_RETURN, &lo->plh_flags);
1145
	pnfs_get_layout_hdr(lo);
1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160
	if (test_bit(NFS_LAYOUT_RETURN_REQUESTED, &lo->plh_flags)) {
		if (stateid != NULL) {
			nfs4_stateid_copy(stateid, &lo->plh_stateid);
			if (lo->plh_return_seq != 0)
				stateid->seqid = cpu_to_be32(lo->plh_return_seq);
		}
		if (iomode != NULL)
			*iomode = lo->plh_return_iomode;
		pnfs_clear_layoutreturn_info(lo);
		return true;
	}
	if (stateid != NULL)
		nfs4_stateid_copy(stateid, &lo->plh_stateid);
	if (iomode != NULL)
		*iomode = IOMODE_ANY;
1161 1162 1163
	return true;
}

1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180
static void
pnfs_init_layoutreturn_args(struct nfs4_layoutreturn_args *args,
		struct pnfs_layout_hdr *lo,
		const nfs4_stateid *stateid,
		enum pnfs_iomode iomode)
{
	struct inode *inode = lo->plh_inode;

	args->layout_type = NFS_SERVER(inode)->pnfs_curr_ld->id;
	args->inode = inode;
	args->range.iomode = iomode;
	args->range.offset = 0;
	args->range.length = NFS4_MAX_UINT64;
	args->layout = lo;
	nfs4_stateid_copy(&args->stateid, stateid);
}

1181
static int
1182
pnfs_send_layoutreturn(struct pnfs_layout_hdr *lo, const nfs4_stateid *stateid,
1183
		       enum pnfs_iomode iomode, bool sync)
1184 1185
{
	struct inode *ino = lo->plh_inode;
1186
	struct pnfs_layoutdriver_type *ld = NFS_SERVER(ino)->pnfs_curr_ld;
1187 1188 1189
	struct nfs4_layoutreturn *lrp;
	int status = 0;

1190
	lrp = kzalloc(sizeof(*lrp), GFP_NOFS);
1191 1192 1193
	if (unlikely(lrp == NULL)) {
		status = -ENOMEM;
		spin_lock(&ino->i_lock);
1194
		pnfs_clear_layoutreturn_waitbit(lo);
1195 1196 1197 1198 1199
		spin_unlock(&ino->i_lock);
		pnfs_put_layout_hdr(lo);
		goto out;
	}

1200
	pnfs_init_layoutreturn_args(&lrp->args, lo, stateid, iomode);
1201
	lrp->args.ld_private = &lrp->ld_private;
1202 1203
	lrp->clp = NFS_SERVER(ino)->nfs_client;
	lrp->cred = lo->plh_lc_cred;
1204 1205
	if (ld->prepare_layoutreturn)
		ld->prepare_layoutreturn(&lrp->args);
1206

1207
	status = nfs4_proc_layoutreturn(lrp, sync);
1208 1209 1210 1211 1212
out:
	dprintk("<-- %s status: %d\n", __func__, status);
	return status;
}

1213 1214 1215 1216 1217
/* Return true if layoutreturn is needed */
static bool
pnfs_layout_need_return(struct pnfs_layout_hdr *lo)
{
	struct pnfs_layout_segment *s;
1218 1219
	enum pnfs_iomode iomode;
	u32 seq;
1220

1221
	if (!test_bit(NFS_LAYOUT_RETURN_REQUESTED, &lo->plh_flags))
1222 1223
		return false;

1224 1225 1226 1227
	seq = lo->plh_return_seq;
	iomode = lo->plh_return_iomode;

	/* Defer layoutreturn until all recalled lsegs are done */
1228
	list_for_each_entry(s, &lo->plh_segs, pls_list) {
1229 1230 1231 1232
		if (seq && pnfs_seqid_is_newer(s->pls_seq, seq))
			continue;
		if (iomode != IOMODE_ANY && s->pls_range.iomode != iomode)
			continue;
1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243
		if (test_bit(NFS_LSEG_LAYOUTRETURN, &s->pls_flags))
			return false;
	}

	return true;
}

static void pnfs_layoutreturn_before_put_layout_hdr(struct pnfs_layout_hdr *lo)
{
	struct inode *inode= lo->plh_inode;

1244
	if (!test_bit(NFS_LAYOUT_RETURN_REQUESTED, &lo->plh_flags))
1245 1246 1247 1248 1249 1250 1251
		return;
	spin_lock(&inode->i_lock);
	if (pnfs_layout_need_return(lo)) {
		nfs4_stateid stateid;
		enum pnfs_iomode iomode;
		bool send;

1252
		send = pnfs_prepare_layoutreturn(lo, &stateid, &iomode);
1253 1254 1255 1256 1257 1258 1259 1260 1261
		spin_unlock(&inode->i_lock);
		if (send) {
			/* Send an async layoutreturn so we dont deadlock */
			pnfs_send_layoutreturn(lo, &stateid, iomode, false);
		}
	} else
		spin_unlock(&inode->i_lock);
}

1262 1263 1264 1265 1266 1267 1268 1269
/*
 * Initiates a LAYOUTRETURN(FILE), and removes the pnfs_layout_hdr
 * when the layout segment list is empty.
 *
 * Note that a pnfs_layout_hdr can exist with an empty layout segment
 * list when LAYOUTGET has failed, or when LAYOUTGET succeeded, but the
 * deviceid is marked invalid.
 */
Benny Halevy's avatar
Benny Halevy committed
1270 1271 1272 1273 1274 1275 1276
int
_pnfs_return_layout(struct inode *ino)
{
	struct pnfs_layout_hdr *lo = NULL;
	struct nfs_inode *nfsi = NFS_I(ino);
	LIST_HEAD(tmp_list);
	nfs4_stateid stateid;
1277
	int status = 0;
1278
	bool send, valid_layout;
Benny Halevy's avatar
Benny Halevy committed
1279

1280
	dprintk("NFS: %s for inode %lu\n", __func__, ino->i_ino);
Benny Halevy's avatar
Benny Halevy committed
1281 1282 1283

	spin_lock(&ino->i_lock);
	lo = nfsi->layout;
1284
	if (!lo) {
Benny Halevy's avatar
Benny Halevy committed
1285
		spin_unlock(&ino->i_lock);
1286 1287
		dprintk("NFS: %s no layout to return\n", __func__);
		goto out;
Benny Halevy's avatar
Benny Halevy committed
1288 1289
	}
	/* Reference matched in nfs4_layoutreturn_release */
1290
	pnfs_get_layout_hdr(lo);
1291 1292 1293 1294 1295 1296 1297 1298
	/* Is there an outstanding layoutreturn ? */
	if (test_bit(NFS_LAYOUT_RETURN_LOCK, &lo->plh_flags)) {
		spin_unlock(&ino->i_lock);
		if (wait_on_bit(&lo->plh_flags, NFS_LAYOUT_RETURN,
					TASK_UNINTERRUPTIBLE))
			goto out_put_layout_hdr;
		spin_lock(&ino->i_lock);
	}
1299
	valid_layout = pnfs_layout_is_valid(lo);
1300
	pnfs_clear_layoutcommit(ino, &tmp_list);
1301
	pnfs_mark_matching_lsegs_invalid(lo, &tmp_list, NULL, 0);
1302 1303 1304 1305 1306 1307 1308 1309 1310 1311

	if (NFS_SERVER(ino)->pnfs_curr_ld->return_range) {
		struct pnfs_layout_range range = {
			.iomode		= IOMODE_ANY,
			.offset		= 0,
			.length		= NFS4_MAX_UINT64,
		};
		NFS_SERVER(ino)->pnfs_curr_ld->return_range(lo, &range);
	}

1312
	/* Don't send a LAYOUTRETURN if list was initially empty */
1313 1314
	if (!test_bit(NFS_LAYOUT_RETURN_REQUESTED, &lo->plh_flags) ||
			!valid_layout) {
1315 1316
		spin_unlock(&ino->i_lock);
		dprintk("NFS: %s no layout segments to return\n", __func__);
1317
		goto out_put_layout_hdr;
1318
	}
1319

1320
	send = pnfs_prepare_layoutreturn(lo, &stateid, NULL);
Benny Halevy's avatar
Benny Halevy committed
1321
	spin_unlock(&ino->i_lock);
1322
	if (send)
1323
		status = pnfs_send_layoutreturn(lo, &stateid, IOMODE_ANY, true);
1324
out_put_layout_hdr:
1325
	pnfs_free_lseg_list(&tmp_list);
1326
	pnfs_put_layout_hdr(lo);
Benny Halevy's avatar
Benny Halevy committed
1327 1328 1329 1330 1331
out:
	dprintk("<-- %s status: %d\n", __func__, status);
	return status;
}

1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358
int
pnfs_commit_and_return_layout(struct inode *inode)
{
	struct pnfs_layout_hdr *lo;
	int ret;

	spin_lock(&inode->i_lock);
	lo = NFS_I(inode)->layout;
	if (lo == NULL) {
		spin_unlock(&inode->i_lock);
		return 0;
	}
	pnfs_get_layout_hdr(lo);
	/* Block new layoutgets and read/write to ds */
	lo->plh_block_lgets++;
	spin_unlock(&inode->i_lock);
	filemap_fdatawait(inode->i_mapping);
	ret = pnfs_layoutcommit_inode(inode, true);
	if (ret == 0)
		ret = _pnfs_return_layout(inode);
	spin_lock(&inode->i_lock);
	lo->plh_block_lgets--;
	spin_unlock(&inode->i_lock);
	pnfs_put_layout_hdr(lo);
	return ret;
}

1359 1360 1361
bool pnfs_roc(struct inode *ino,
		struct nfs4_layoutreturn_args *args,
		struct nfs4_layoutreturn_res *res,
1362
		const struct cred *cred)
Fred Isaman's avatar
Fred Isaman committed
1363
{
1364 1365 1366
	struct nfs_inode *nfsi = NFS_I(ino);
	struct nfs_open_context *ctx;
	struct nfs4_state *state;
Fred Isaman's avatar
Fred Isaman committed
1367
	struct pnfs_layout_hdr *lo;
1368
	struct pnfs_layout_segment *lseg, *next;
1369
	nfs4_stateid stateid;
1370 1371
	enum pnfs_iomode iomode = 0;
	bool layoutreturn = false, roc = false;
1372
	bool skip_read = false;
Fred Isaman's avatar
Fred Isaman committed
1373

1374 1375
	if (!nfs_have_layout(ino))
		return false;
1376
retry:
1377
	rcu_read_lock();
Fred Isaman's avatar
Fred Isaman committed
1378
	spin_lock(&ino->i_lock);
1379
	lo = nfsi->layout;
1380
	if (!lo || !pnfs_layout_is_valid(lo) ||
1381 1382
	    test_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags)) {
		lo = NULL;
1383
		goto out_noroc;
1384 1385
	}
	pnfs_get_layout_hdr(lo);
1386 1387
	if (test_bit(NFS_LAYOUT_RETURN_LOCK, &lo->plh_flags)) {
		spin_unlock(&ino->i_lock);
1388
		rcu_read_unlock();
1389 1390 1391 1392 1393
		wait_on_bit(&lo->plh_flags, NFS_LAYOUT_RETURN,
				TASK_UNINTERRUPTIBLE);
		pnfs_put_layout_hdr(lo);
		goto retry;
	}
1394

1395
	/* no roc if we hold a delegation */
1396 1397 1398 1399 1400
	if (nfs4_check_delegation(ino, FMODE_READ)) {
		if (nfs4_check_delegation(ino, FMODE_WRITE))
			goto out_noroc;
		skip_read = true;
	}
1401

1402
	list_for_each_entry_rcu(ctx, &nfsi->open_files, list) {
1403
		state = ctx->state;
1404 1405
		if (state == NULL)
			continue;
1406
		/* Don't return layout if there is open file state */
1407
		if (state->state & FMODE_WRITE)
1408
			goto out_noroc;
1409 1410
		if (state->state & FMODE_READ)
			skip_read = true;
1411 1412
	}

1413

1414
	list_for_each_entry_safe(lseg, next, &lo->plh_segs, pls_list) {
1415 1416
		if (skip_read && lseg->pls_range.iomode == IOMODE_READ)
			continue;
1417
		/* If we are sending layoutreturn, invalidate all valid lsegs */
1418 1419 1420 1421 1422 1423 1424 1425 1426 1427
		if (!test_and_clear_bit(NFS_LSEG_ROC, &lseg->pls_flags))
			continue;
		/*
		 * Note: mark lseg for return so pnfs_layout_remove_lseg
		 * doesn't invalidate the layout for us.
		 */
		set_bit(NFS_LSEG_LAYOUTRETURN, &lseg->pls_flags);
		if (!mark_lseg_invalid(lseg, &lo->plh_return_segs))
			continue;
		pnfs_set_plh_return_info(lo, lseg->pls_range.iomode, 0);
1428 1429
	}

1430 1431
	if (!test_bit(NFS_LAYOUT_RETURN_REQUESTED, &lo->plh_flags))
		goto out_noroc;
1432

1433
	/* ROC in two conditions:
1434 1435 1436
	 * 1. there are ROC lsegs
	 * 2. we don't send layoutreturn
	 */
1437 1438 1439
	/* lo ref dropped in pnfs_roc_release() */
	layoutreturn = pnfs_prepare_layoutreturn(lo, &stateid, &iomode);
	/* If the creds don't match, we can't compound the layoutreturn */
1440
	if (!layoutreturn || cred_fscmp(cred, lo->plh_lc_cred) != 0)
1441 1442 1443 1444 1445 1446
		goto out_noroc;

	roc = layoutreturn;
	pnfs_init_layoutreturn_args(args, lo, &stateid, iomode);
	res->lrs_present = 0;
	layoutreturn = false;
Fred Isaman's avatar
Fred Isaman committed
1447

1448
out_noroc:
Fred Isaman's avatar
Fred Isaman committed
1449
	spin_unlock(&ino->i_lock);
1450
	rcu_read_unlock();
1451
	pnfs_layoutcommit_inode(ino, true);
1452 1453 1454 1455
	if (roc) {
		struct pnfs_layoutdriver_type *ld = NFS_SERVER(ino)->pnfs_curr_ld;
		if (ld->prepare_layoutreturn)
			ld->prepare_layoutreturn(args);
1456
		pnfs_put_layout_hdr(lo);
1457 1458
		return true;
	}
1459
	if (layoutreturn)
1460
		pnfs_send_layoutreturn(lo, &stateid, iomode, true);
1461
	pnfs_put_layout_hdr(lo);
1462
	return false;
Fred Isaman's avatar
Fred Isaman committed
1463 1464
}

1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479
int pnfs_roc_done(struct rpc_task *task, struct inode *inode,
		struct nfs4_layoutreturn_args **argpp,
		struct nfs4_layoutreturn_res **respp,
		int *ret)
{
	struct nfs4_layoutreturn_args *arg = *argpp;
	int retval = -EAGAIN;

	if (!arg)
		return 0;
	/* Handle Layoutreturn errors */
	switch (*ret) {
	case 0:
		retval = 0;
		break;
1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494
	case -NFS4ERR_NOMATCHING_LAYOUT:
		/* Was there an RPC level error? If not, retry */
		if (task->tk_rpc_status == 0)
			break;
		/* If the call was not sent, let caller handle it */
		if (!RPC_WAS_SENT(task))
			return 0;
		/*
		 * Otherwise, assume the call succeeded and
		 * that we need to release the layout
		 */
		*ret = 0;
		(*respp)->lrs_present = 0;
		retval = 0;
		break;
1495 1496 1497 1498
	case -NFS4ERR_DELAY:
		/* Let the caller handle the retry */
		*ret = -NFS4ERR_NOMATCHING_LAYOUT;
		return 0;
1499
	case -NFS4ERR_OLD_STATEID:
1500
		if (!nfs4_layout_refresh_old_stateid(&arg->stateid,
1501 1502 1503 1504 1505 1506 1507 1508 1509 1510
					&arg->range, inode))
			break;
		*ret = -NFS4ERR_NOMATCHING_LAYOUT;
		return -EAGAIN;
	}
	*argpp = NULL;
	*respp = NULL;
	return retval;
}

1511 1512 1513
void pnfs_roc_release(struct nfs4_layoutreturn_args *args,
		struct nfs4_layoutreturn_res *res,
		int ret)
Fred Isaman's avatar
Fred Isaman committed
1514
{
1515 1516 1517
	struct pnfs_layout_hdr *lo = args->layout;
	const nfs4_stateid *arg_stateid = NULL;
	const nfs4_stateid *res_stateid = NULL;
1518
	struct nfs4_xdr_opaque_data *ld_private = args->ld_private;
Fred Isaman's avatar
Fred Isaman committed
1519

1520 1521 1522 1523
	switch (ret) {
	case -NFS4ERR_NOMATCHING_LAYOUT:
		break;
	case 0:
1524 1525
		if (res->lrs_present)
			res_stateid = &res->stateid;
1526 1527 1528
		/* Fallthrough */
	default:
		arg_stateid = &args->stateid;
1529 1530 1531
	}
	pnfs_layoutreturn_free_lsegs(lo, arg_stateid, &args->range,
			res_stateid);
1532 1533
	if (ld_private && ld_private->ops && ld_private->ops->free)
		ld_private->ops->free(ld_private);
1534 1535
	pnfs_put_layout_hdr(lo);
	trace_nfs4_layoutreturn_on_close(args->inode, 0);
Fred Isaman's avatar
Fred Isaman committed
1536 1537
}

1538 1539 1540 1541 1542 1543 1544 1545 1546 1547
bool pnfs_wait_on_layoutreturn(struct inode *ino, struct rpc_task *task)
{
	struct nfs_inode *nfsi = NFS_I(ino);
        struct pnfs_layout_hdr *lo;
        bool sleep = false;

	/* we might not have grabbed lo reference. so need to check under
	 * i_lock */
        spin_lock(&ino->i_lock);
        lo = nfsi->layout;
1548 1549
        if (lo && test_bit(NFS_LAYOUT_RETURN, &lo->plh_flags)) {
                rpc_sleep_on(&NFS_SERVER(ino)->roc_rpcwaitq, task, NULL);
1550
                sleep = true;
1551
	}
1552 1553 1554 1555
        spin_unlock(&ino->i_lock);
        return sleep;
}

1556 1557 1558 1559 1560 1561
/*
 * Compare two layout segments for sorting into layout cache.
 * We want to preferentially return RW over RO layouts, so ensure those
 * are seen first.
 */
static s64
1562
pnfs_lseg_range_cmp(const struct pnfs_layout_range *l1,
1563
	   const struct pnfs_layout_range *l2)
1564
{
1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576
	s64 d;

	/* high offset > low offset */
	d = l1->offset - l2->offset;
	if (d)
		return d;

	/* short length > long length */
	d = l2->length - l1->length;
	if (d)
		return d;

1577
	/* read > read/write */
1578
	return (int)(l1->iomode == IOMODE_READ) - (int)(l2->iomode == IOMODE_READ);
1579 1580
}

1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602
static bool
pnfs_lseg_range_is_after(const struct pnfs_layout_range *l1,
		const struct pnfs_layout_range *l2)
{
	return pnfs_lseg_range_cmp(l1, l2) > 0;
}

static bool
pnfs_lseg_no_merge(struct pnfs_layout_segment *lseg,
		struct pnfs_layout_segment *old)
{
	return false;
}

void
pnfs_generic_layout_insert_lseg(struct pnfs_layout_hdr *lo,
		   struct pnfs_layout_segment *lseg,
		   bool (*is_after)(const struct pnfs_layout_range *,
			   const struct pnfs_layout_range *),
		   bool (*do_merge)(struct pnfs_layout_segment *,
			   struct pnfs_layout_segment *),
		   struct list_head *free_me)
1603
{
1604
	struct pnfs_layout_segment *lp, *tmp;
1605

1606 1607
	dprintk("%s:Begin\n", __func__);

1608 1609 1610 1611 1612 1613 1614 1615
	list_for_each_entry_safe(lp, tmp, &lo->plh_segs, pls_list) {
		if (test_bit(NFS_LSEG_VALID, &lp->pls_flags) == 0)
			continue;
		if (do_merge(lseg, lp)) {
			mark_lseg_invalid(lp, free_me);
			continue;
		}
		if (is_after(&lseg->pls_range, &lp->pls_range))
1616
			continue;
1617
		list_add_tail(&lseg->pls_list, &lp->pls_list);
1618 1619 1620
		dprintk("%s: inserted lseg %p "
			"iomode %d offset %llu length %llu before "
			"lp %p iomode %d offset %llu length %llu\n",
1621 1622 1623 1624
			__func__, lseg, lseg->pls_range.iomode,
			lseg->pls_range.offset, lseg->pls_range.length,
			lp, lp->pls_range.iomode, lp->pls_range.offset,
			lp->pls_range.length);
1625
		goto out;
1626
	}
1627 1628 1629 1630 1631 1632
	list_add_tail(&lseg->pls_list, &lo->plh_segs);
	dprintk("%s: inserted lseg %p "
		"iomode %d offset %llu length %llu at tail\n",
		__func__, lseg, lseg->pls_range.iomode,
		lseg->pls_range.offset, lseg->pls_range.length);
out:
1633
	pnfs_get_layout_hdr(lo);
1634 1635

	dprintk("%s:Return\n", __func__);
1636
}
1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654
EXPORT_SYMBOL_GPL(pnfs_generic_layout_insert_lseg);

static void
pnfs_layout_insert_lseg(struct pnfs_layout_hdr *lo,
		   struct pnfs_layout_segment *lseg,
		   struct list_head *free_me)
{
	struct inode *inode = lo->plh_inode;
	struct pnfs_layoutdriver_type *ld = NFS_SERVER(inode)->pnfs_curr_ld;

	if (ld->add_lseg != NULL)
		ld->add_lseg(lo, lseg, free_me);
	else
		pnfs_generic_layout_insert_lseg(lo, lseg,
				pnfs_lseg_range_is_after,
				pnfs_lseg_no_merge,
				free_me);
}
1655 1656

static struct pnfs_layout_hdr *
1657 1658 1659
alloc_init_layout_hdr(struct inode *ino,
		      struct nfs_open_context *ctx,
		      gfp_t gfp_flags)
1660 1661 1662
{
	struct pnfs_layout_hdr *lo;

1663
	lo = pnfs_alloc_layout_hdr(ino, gfp_flags);
1664 1665
	if (!lo)
		return NULL;
1666
	refcount_set(&lo->plh_refcount, 1);
1667 1668
	INIT_LIST_HEAD(&lo->plh_layouts);
	INIT_LIST_HEAD(&lo->plh_segs);
1669
	INIT_LIST_HEAD(&lo->plh_return_segs);
1670
	INIT_LIST_HEAD(&lo->plh_bulk_destroy);
1671
	lo->plh_inode = ino;
1672
	lo->plh_lc_cred = get_cred(ctx->cred);
1673
	lo->plh_flags |= 1 << NFS_LAYOUT_INVALID_STID;
1674 1675 1676 1677
	return lo;
}

static struct pnfs_layout_hdr *
1678 1679 1680
pnfs_find_alloc_layout(struct inode *ino,
		       struct nfs_open_context *ctx,
		       gfp_t gfp_flags)
1681 1682
	__releases(&ino->i_lock)
	__acquires(&ino->i_lock)
1683 1684 1685 1686 1687 1688
{
	struct nfs_inode *nfsi = NFS_I(ino);
	struct pnfs_layout_hdr *new = NULL;

	dprintk("%s Begin ino=%p layout=%p\n", __func__, ino, nfsi->layout);

1689 1690
	if (nfsi->layout != NULL)
		goto out_existing;
1691
	spin_unlock(&ino->i_lock);
1692
	new = alloc_init_layout_hdr(ino, ctx, gfp_flags);
1693 1694
	spin_lock(&ino->i_lock);

1695
	if (likely(nfsi->layout == NULL)) {	/* Won the race? */
1696
		nfsi->layout = new;
1697
		return new;
1698 1699
	} else if (new != NULL)
		pnfs_free_layout_hdr(new);
1700 1701
out_existing:
	pnfs_get_layout_hdr(nfsi->layout);
1702 1703 1704
	return nfsi->layout;
}

1705 1706
/*
 * iomode matching rules:
1707 1708 1709 1710 1711 1712 1713 1714 1715 1716
 * iomode	lseg	strict match
 *                      iomode
 * -----	-----	------ -----
 * ANY		READ	N/A    true
 * ANY		RW	N/A    true
 * RW		READ	N/A    false
 * RW		RW	N/A    true
 * READ		READ	N/A    true
 * READ		RW	true   false
 * READ		RW	false  true
1717
 */
1718
static bool
1719
pnfs_lseg_range_match(const struct pnfs_layout_range *ls_range,
1720 1721
		 const struct pnfs_layout_range *range,
		 bool strict_iomode)
1722
{
1723 1724 1725 1726
	struct pnfs_layout_range range1;

	if ((range->iomode == IOMODE_RW &&
	     ls_range->iomode != IOMODE_RW) ||
1727
	    (range->iomode != ls_range->iomode &&
1728
	     strict_iomode) ||
1729
	    !pnfs_lseg_range_intersecting(ls_range, range))
1730
		return false;
1731 1732 1733 1734

	/* range1 covers only the first byte in the range */
	range1 = *range;
	range1.length = 1;
1735
	return pnfs_lseg_range_contained(ls_range, &range1);
1736 1737 1738 1739 1740
}

/*
 * lookup range in layout
 */
1741
static struct pnfs_layout_segment *
1742
pnfs_find_lseg(struct pnfs_layout_hdr *lo,
1743 1744
		struct pnfs_layout_range *range,
		bool strict_iomode)
1745
{
1746 1747 1748 1749
	struct pnfs_layout_segment *lseg, *ret = NULL;

	dprintk("%s:Begin\n", __func__);

1750
	list_for_each_entry(lseg, &lo->plh_segs, pls_list) {
1751
		if (test_bit(NFS_LSEG_VALID, &lseg->pls_flags) &&
1752 1753
		    pnfs_lseg_range_match(&lseg->pls_range, range,
					  strict_iomode)) {
1754
			ret = pnfs_get_lseg(lseg);
1755 1756 1757 1758 1759
			break;
		}
	}

	dprintk("%s:Return lseg %p ref %d\n",
1760
		__func__, ret, ret ? refcount_read(&ret->pls_refcount) : 0);
1761
	return ret;
1762 1763
}

1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838
/*
 * Use mdsthreshold hints set at each OPEN to determine if I/O should go
 * to the MDS or over pNFS
 *
 * The nfs_inode read_io and write_io fields are cumulative counters reset
 * when there are no layout segments. Note that in pnfs_update_layout iomode
 * is set to IOMODE_READ for a READ request, and set to IOMODE_RW for a
 * WRITE request.
 *
 * A return of true means use MDS I/O.
 *
 * From rfc 5661:
 * If a file's size is smaller than the file size threshold, data accesses
 * SHOULD be sent to the metadata server.  If an I/O request has a length that
 * is below the I/O size threshold, the I/O SHOULD be sent to the metadata
 * server.  If both file size and I/O size are provided, the client SHOULD
 * reach or exceed  both thresholds before sending its read or write
 * requests to the data server.
 */
static bool pnfs_within_mdsthreshold(struct nfs_open_context *ctx,
				     struct inode *ino, int iomode)
{
	struct nfs4_threshold *t = ctx->mdsthreshold;
	struct nfs_inode *nfsi = NFS_I(ino);
	loff_t fsize = i_size_read(ino);
	bool size = false, size_set = false, io = false, io_set = false, ret = false;

	if (t == NULL)
		return ret;

	dprintk("%s bm=0x%x rd_sz=%llu wr_sz=%llu rd_io=%llu wr_io=%llu\n",
		__func__, t->bm, t->rd_sz, t->wr_sz, t->rd_io_sz, t->wr_io_sz);

	switch (iomode) {
	case IOMODE_READ:
		if (t->bm & THRESHOLD_RD) {
			dprintk("%s fsize %llu\n", __func__, fsize);
			size_set = true;
			if (fsize < t->rd_sz)
				size = true;
		}
		if (t->bm & THRESHOLD_RD_IO) {
			dprintk("%s nfsi->read_io %llu\n", __func__,
				nfsi->read_io);
			io_set = true;
			if (nfsi->read_io < t->rd_io_sz)
				io = true;
		}
		break;
	case IOMODE_RW:
		if (t->bm & THRESHOLD_WR) {
			dprintk("%s fsize %llu\n", __func__, fsize);
			size_set = true;
			if (fsize < t->wr_sz)
				size = true;
		}
		if (t->bm & THRESHOLD_WR_IO) {
			dprintk("%s nfsi->write_io %llu\n", __func__,
				nfsi->write_io);
			io_set = true;
			if (nfsi->write_io < t->wr_io_sz)
				io = true;
		}
		break;
	}
	if (size_set && io_set) {
		if (size && io)
			ret = true;
	} else if (size || io)
		ret = true;

	dprintk("<-- %s size %d io %d ret %d\n", __func__, size, io, ret);
	return ret;
}

1839
static int pnfs_prepare_to_retry_layoutget(struct pnfs_layout_hdr *lo)
1840 1841 1842 1843 1844 1845
{
	/*
	 * send layoutcommit as it can hold up layoutreturn due to lseg
	 * reference
	 */
	pnfs_layoutcommit_inode(lo->plh_inode, false);
1846
	return wait_on_bit_action(&lo->plh_flags, NFS_LAYOUT_RETURN,
1847
				   nfs_wait_bit_killable,
1848
				   TASK_KILLABLE);
1849 1850
}

1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861
static void nfs_layoutget_begin(struct pnfs_layout_hdr *lo)
{
	atomic_inc(&lo->plh_outstanding);
}

static void nfs_layoutget_end(struct pnfs_layout_hdr *lo)
{
	if (atomic_dec_and_test(&lo->plh_outstanding))
		wake_up_var(&lo->plh_outstanding);
}

1862 1863 1864 1865 1866 1867 1868 1869 1870
static void pnfs_clear_first_layoutget(struct pnfs_layout_hdr *lo)
{
	unsigned long *bitlock = &lo->plh_flags;

	clear_bit_unlock(NFS_LAYOUT_FIRST_LAYOUTGET, bitlock);
	smp_mb__after_atomic();
	wake_up_bit(bitlock, NFS_LAYOUT_FIRST_LAYOUTGET);
}

1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886
static void _add_to_server_list(struct pnfs_layout_hdr *lo,
				struct nfs_server *server)
{
	if (list_empty(&lo->plh_layouts)) {
		struct nfs_client *clp = server->nfs_client;

		/* The lo must be on the clp list if there is any
		 * chance of a CB_LAYOUTRECALL(FILE) coming in.
		 */
		spin_lock(&clp->cl_lock);
		if (list_empty(&lo->plh_layouts))
			list_add_tail(&lo->plh_layouts, &server->layouts);
		spin_unlock(&clp->cl_lock);
	}
}

1887 1888 1889 1890
/*
 * Layout segment is retreived from the server if not cached.
 * The appropriate layout segment is referenced and returned to the caller.
 */
1891
struct pnfs_layout_segment *
1892 1893
pnfs_update_layout(struct inode *ino,
		   struct nfs_open_context *ctx,
1894 1895
		   loff_t pos,
		   u64 count,
1896
		   enum pnfs_iomode iomode,
1897
		   bool strict_iomode,
1898
		   gfp_t gfp_flags)
1899
{
1900 1901 1902 1903 1904
	struct pnfs_layout_range arg = {
		.iomode = iomode,
		.offset = pos,
		.length = count,
	};
1905
	unsigned pg_offset;
1906 1907
	struct nfs_server *server = NFS_SERVER(ino);
	struct nfs_client *clp = server->nfs_client;
1908
	struct pnfs_layout_hdr *lo = NULL;
1909
	struct pnfs_layout_segment *lseg = NULL;
1910
	struct nfs4_layoutget *lgp;
1911 1912
	nfs4_stateid stateid;
	long timeout = 0;
1913
	unsigned long giveup = jiffies + (clp->cl_lease_time << 1);
1914
	bool first;
1915

1916
	if (!pnfs_enabled_sb(NFS_SERVER(ino))) {
1917
		trace_pnfs_update_layout(ino, pos, count, iomode, lo, lseg,
1918
				 PNFS_UPDATE_LAYOUT_NO_PNFS);
1919
		goto out;
1920
	}
1921

1922
	if (pnfs_within_mdsthreshold(ctx, ino, iomode)) {
1923
		trace_pnfs_update_layout(ino, pos, count, iomode, lo, lseg,
1924
				 PNFS_UPDATE_LAYOUT_MDSTHRESH);
1925
		goto out;
1926
	}
1927

1928
lookup_again:
1929 1930 1931
	lseg = ERR_PTR(nfs4_client_recover_expired_lease(clp));
	if (IS_ERR(lseg))
		goto out;
1932
	first = false;
1933
	spin_lock(&ino->i_lock);
1934
	lo = pnfs_find_alloc_layout(ino, ctx, gfp_flags);
1935 1936
	if (lo == NULL) {
		spin_unlock(&ino->i_lock);
1937
		trace_pnfs_update_layout(ino, pos, count, iomode, lo, lseg,
1938
				 PNFS_UPDATE_LAYOUT_NOMEM);
1939 1940
		goto out;
	}
1941

1942
	/* Do we even need to bother with this? */
1943
	if (test_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags)) {
1944
		trace_pnfs_update_layout(ino, pos, count, iomode, lo, lseg,
1945
				 PNFS_UPDATE_LAYOUT_BULK_RECALL);
1946
		dprintk("%s matches recall, use MDS\n", __func__);
1947 1948 1949 1950
		goto out_unlock;
	}

	/* if LAYOUTGET already failed once we don't try again */
1951
	if (pnfs_layout_io_test_failed(lo, iomode)) {
1952
		trace_pnfs_update_layout(ino, pos, count, iomode, lo, lseg,
1953
				 PNFS_UPDATE_LAYOUT_IO_TEST_FAIL);
1954
		goto out_unlock;
1955
	}
1956

1957 1958 1959 1960 1961 1962 1963
	/*
	 * If the layout segment list is empty, but there are outstanding
	 * layoutget calls, then they might be subject to a layoutrecall.
	 */
	if (list_empty(&lo->plh_segs) &&
	    atomic_read(&lo->plh_outstanding) != 0) {
		spin_unlock(&ino->i_lock);
1964
		lseg = ERR_PTR(wait_var_event_killable(&lo->plh_outstanding,
1965
					!atomic_read(&lo->plh_outstanding)));
1966
		if (IS_ERR(lseg))
1967 1968 1969 1970 1971
			goto out_put_layout_hdr;
		pnfs_put_layout_hdr(lo);
		goto lookup_again;
	}

1972
	lseg = pnfs_find_lseg(lo, &arg, strict_iomode);
1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983
	if (lseg) {
		trace_pnfs_update_layout(ino, pos, count, iomode, lo, lseg,
				PNFS_UPDATE_LAYOUT_FOUND_CACHED);
		goto out_unlock;
	}

	/*
	 * Choose a stateid for the LAYOUTGET. If we don't have a layout
	 * stateid, or it has been invalidated, then we must use the open
	 * stateid.
	 */
1984
	if (test_bit(NFS_LAYOUT_INVALID_STID, &lo->plh_flags)) {
1985
		int status;
1986 1987 1988

		/*
		 * The first layoutget for the file. Need to serialize per
1989 1990 1991 1992 1993
		 * RFC 5661 Errata 3208.
		 */
		if (test_and_set_bit(NFS_LAYOUT_FIRST_LAYOUTGET,
				     &lo->plh_flags)) {
			spin_unlock(&ino->i_lock);
1994 1995 1996 1997 1998
			lseg = ERR_PTR(wait_on_bit(&lo->plh_flags,
						NFS_LAYOUT_FIRST_LAYOUTGET,
						TASK_KILLABLE));
			if (IS_ERR(lseg))
				goto out_put_layout_hdr;
1999
			pnfs_put_layout_hdr(lo);
2000
			dprintk("%s retrying\n", __func__);
2001 2002
			goto lookup_again;
		}
2003 2004

		first = true;
2005
		status = nfs4_select_rw_stateid(ctx->state,
2006
					iomode == IOMODE_RW ? FMODE_WRITE : FMODE_READ,
2007 2008
					NULL, &stateid, NULL);
		if (status != 0) {
2009
			lseg = ERR_PTR(status);
2010 2011 2012
			trace_pnfs_update_layout(ino, pos, count,
					iomode, lo, lseg,
					PNFS_UPDATE_LAYOUT_INVALID_OPEN);
2013 2014 2015 2016 2017
			spin_unlock(&ino->i_lock);
			nfs4_schedule_stateid_recovery(server, ctx->state);
			pnfs_clear_first_layoutget(lo);
			pnfs_put_layout_hdr(lo);
			goto lookup_again;
2018
		}
2019
	} else {
2020
		nfs4_stateid_copy(&stateid, &lo->plh_stateid);
2021
	}
2022

2023 2024 2025 2026
	/*
	 * Because we free lsegs before sending LAYOUTRETURN, we need to wait
	 * for LAYOUTRETURN even if first is true.
	 */
2027
	if (test_bit(NFS_LAYOUT_RETURN, &lo->plh_flags)) {
2028 2029
		spin_unlock(&ino->i_lock);
		dprintk("%s wait for layoutreturn\n", __func__);
2030 2031
		lseg = ERR_PTR(pnfs_prepare_to_retry_layoutget(lo));
		if (!IS_ERR(lseg)) {
2032 2033
			if (first)
				pnfs_clear_first_layoutget(lo);
2034 2035
			pnfs_put_layout_hdr(lo);
			dprintk("%s retrying\n", __func__);
2036 2037
			trace_pnfs_update_layout(ino, pos, count, iomode, lo,
					lseg, PNFS_UPDATE_LAYOUT_RETRY);
2038 2039
			goto lookup_again;
		}
2040
		trace_pnfs_update_layout(ino, pos, count, iomode, lo, lseg,
2041
				PNFS_UPDATE_LAYOUT_RETURN);
2042 2043 2044
		goto out_put_layout_hdr;
	}

2045
	if (pnfs_layoutgets_blocked(lo)) {
2046
		trace_pnfs_update_layout(ino, pos, count, iomode, lo, lseg,
2047
				PNFS_UPDATE_LAYOUT_BLOCKED);
2048
		goto out_unlock;
2049
	}
2050
	nfs_layoutget_begin(lo);
2051
	spin_unlock(&ino->i_lock);
2052

2053
	_add_to_server_list(lo, server);
2054

2055
	pg_offset = arg.offset & ~PAGE_MASK;
2056 2057 2058 2059
	if (pg_offset) {
		arg.offset -= pg_offset;
		arg.length += pg_offset;
	}
2060
	if (arg.length != NFS4_MAX_UINT64)
2061
		arg.length = PAGE_ALIGN(arg.length);
2062

2063
	lgp = pnfs_alloc_init_layoutget_args(ino, ctx, &stateid, &arg, gfp_flags);
2064 2065 2066
	if (!lgp) {
		trace_pnfs_update_layout(ino, pos, count, iomode, lo, NULL,
					 PNFS_UPDATE_LAYOUT_NOMEM);
2067
		nfs_layoutget_end(lo);
2068 2069 2070
		goto out_put_layout_hdr;
	}

2071
	lseg = nfs4_proc_layoutget(lgp, &timeout);
2072 2073
	trace_pnfs_update_layout(ino, pos, count, iomode, lo, lseg,
				 PNFS_UPDATE_LAYOUT_SEND_LAYOUTGET);
2074
	nfs_layoutget_end(lo);
2075
	if (IS_ERR(lseg)) {
2076
		switch(PTR_ERR(lseg)) {
2077
		case -EBUSY:
2078 2079
			if (time_after(jiffies, giveup))
				lseg = NULL;
2080 2081
			break;
		case -ERECALLCONFLICT:
2082
		case -EAGAIN:
2083
			break;
2084 2085 2086 2087 2088
		default:
			if (!nfs_error_is_fatal(PTR_ERR(lseg))) {
				pnfs_layout_clear_fail_bit(lo, pnfs_iomode_to_fail_bit(iomode));
				lseg = NULL;
			}
2089 2090 2091 2092 2093 2094 2095 2096 2097
			goto out_put_layout_hdr;
		}
		if (lseg) {
			if (first)
				pnfs_clear_first_layoutget(lo);
			trace_pnfs_update_layout(ino, pos, count,
				iomode, lo, lseg, PNFS_UPDATE_LAYOUT_RETRY);
			pnfs_put_layout_hdr(lo);
			goto lookup_again;
2098 2099 2100 2101 2102
		}
	} else {
		pnfs_layout_clear_fail_bit(lo, pnfs_iomode_to_fail_bit(iomode));
	}

2103
out_put_layout_hdr:
2104 2105
	if (first)
		pnfs_clear_first_layoutget(lo);
2106 2107
	trace_pnfs_update_layout(ino, pos, count, iomode, lo, lseg,
				 PNFS_UPDATE_LAYOUT_EXIT);
2108
	pnfs_put_layout_hdr(lo);
2109
out:
2110 2111 2112 2113
	dprintk("%s: inode %s/%llu pNFS layout segment %s for "
			"(%s, offset: %llu, length: %llu)\n",
			__func__, ino->i_sb->s_id,
			(unsigned long long)NFS_FILEID(ino),
2114
			IS_ERR_OR_NULL(lseg) ? "not found" : "found",
2115 2116 2117
			iomode==IOMODE_RW ?  "read/write" : "read-only",
			(unsigned long long)pos,
			(unsigned long long)count);
2118 2119 2120
	return lseg;
out_unlock:
	spin_unlock(&ino->i_lock);
2121
	goto out_put_layout_hdr;
2122
}
2123
EXPORT_SYMBOL_GPL(pnfs_update_layout);
2124

2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144
static bool
pnfs_sanity_check_layout_range(struct pnfs_layout_range *range)
{
	switch (range->iomode) {
	case IOMODE_READ:
	case IOMODE_RW:
		break;
	default:
		return false;
	}
	if (range->offset == NFS4_MAX_UINT64)
		return false;
	if (range->length == 0)
		return false;
	if (range->length != NFS4_MAX_UINT64 &&
	    range->length > NFS4_MAX_UINT64 - range->offset)
		return false;
	return true;
}

2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161
static struct pnfs_layout_hdr *
_pnfs_grab_empty_layout(struct inode *ino, struct nfs_open_context *ctx)
{
	struct pnfs_layout_hdr *lo;

	spin_lock(&ino->i_lock);
	lo = pnfs_find_alloc_layout(ino, ctx, GFP_KERNEL);
	if (!lo)
		goto out_unlock;
	if (!test_bit(NFS_LAYOUT_INVALID_STID, &lo->plh_flags))
		goto out_unlock;
	if (test_bit(NFS_LAYOUT_RETURN, &lo->plh_flags))
		goto out_unlock;
	if (pnfs_layoutgets_blocked(lo))
		goto out_unlock;
	if (test_and_set_bit(NFS_LAYOUT_FIRST_LAYOUTGET, &lo->plh_flags))
		goto out_unlock;
2162
	nfs_layoutget_begin(lo);
2163 2164 2165 2166 2167 2168 2169 2170 2171 2172
	spin_unlock(&ino->i_lock);
	_add_to_server_list(lo, NFS_SERVER(ino));
	return lo;

out_unlock:
	spin_unlock(&ino->i_lock);
	pnfs_put_layout_hdr(lo);
	return NULL;
}

2173 2174 2175
static void _lgopen_prepare_attached(struct nfs4_opendata *data,
				     struct nfs_open_context *ctx)
{
2176 2177 2178 2179 2180 2181 2182 2183 2184 2185
	struct inode *ino = data->dentry->d_inode;
	struct pnfs_layout_range rng = {
		.iomode = (data->o_arg.fmode & FMODE_WRITE) ?
			  IOMODE_RW: IOMODE_READ,
		.offset = 0,
		.length = NFS4_MAX_UINT64,
	};
	struct nfs4_layoutget *lgp;
	struct pnfs_layout_hdr *lo;

2186 2187 2188 2189 2190
	/* Heuristic: don't send layoutget if we have cached data */
	if (rng.iomode == IOMODE_READ &&
	   (i_size_read(ino) == 0 || ino->i_mapping->nrpages != 0))
		return;

2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203
	lo = _pnfs_grab_empty_layout(ino, ctx);
	if (!lo)
		return;
	lgp = pnfs_alloc_init_layoutget_args(ino, ctx, &current_stateid,
					     &rng, GFP_KERNEL);
	if (!lgp) {
		pnfs_clear_first_layoutget(lo);
		pnfs_put_layout_hdr(lo);
		return;
	}
	data->lgp = lgp;
	data->o_arg.lg_args = &lgp->args;
	data->o_res.lg_res = &lgp->res;
2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234
}

static void _lgopen_prepare_floating(struct nfs4_opendata *data,
				     struct nfs_open_context *ctx)
{
	struct pnfs_layout_range rng = {
		.iomode = (data->o_arg.fmode & FMODE_WRITE) ?
			  IOMODE_RW: IOMODE_READ,
		.offset = 0,
		.length = NFS4_MAX_UINT64,
	};
	struct nfs4_layoutget *lgp;

	lgp = pnfs_alloc_init_layoutget_args(NULL, ctx, &current_stateid,
					     &rng, GFP_KERNEL);
	if (!lgp)
		return;
	data->lgp = lgp;
	data->o_arg.lg_args = &lgp->args;
	data->o_res.lg_res = &lgp->res;
}

void pnfs_lgopen_prepare(struct nfs4_opendata *data,
			 struct nfs_open_context *ctx)
{
	struct nfs_server *server = NFS_SERVER(data->dir->d_inode);

	if (!(pnfs_enabled_sb(server) &&
	      server->pnfs_curr_ld->flags & PNFS_LAYOUTGET_ON_OPEN))
		return;
	/* Could check on max_ops, but currently hardcoded high enough */
2235 2236
	if (!nfs_server_capable(data->dir->d_inode, NFS_CAP_LGOPEN))
		return;
2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247
	if (data->state)
		_lgopen_prepare_attached(data, ctx);
	else
		_lgopen_prepare_floating(data, ctx);
}

void pnfs_parse_lgopen(struct inode *ino, struct nfs4_layoutget *lgp,
		       struct nfs_open_context *ctx)
{
	struct pnfs_layout_hdr *lo;
	struct pnfs_layout_segment *lseg;
2248
	struct nfs_server *srv = NFS_SERVER(ino);
2249 2250
	u32 iomode;

2251
	if (!lgp)
2252
		return;
2253 2254 2255 2256
	dprintk("%s: entered with status %i\n", __func__, lgp->res.status);
	if (lgp->res.status) {
		switch (lgp->res.status) {
		default:
2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271
			break;
		/*
		 * Halt lgopen attempts if the server doesn't recognise
		 * the "current stateid" value, the layout type, or the
		 * layoutget operation as being valid.
		 * Also if it complains about too many ops in the compound
		 * or of the request/reply being too big.
		 */
		case -NFS4ERR_BAD_STATEID:
		case -NFS4ERR_NOTSUPP:
		case -NFS4ERR_REP_TOO_BIG:
		case -NFS4ERR_REP_TOO_BIG_TO_CACHE:
		case -NFS4ERR_REQ_TOO_BIG:
		case -NFS4ERR_TOO_MANY_OPS:
		case -NFS4ERR_UNKNOWN_LAYOUTTYPE:
2272
			srv->caps &= ~NFS_CAP_LGOPEN;
2273 2274 2275
		}
		return;
	}
2276
	if (!lgp->args.inode) {
2277 2278 2279
		lo = _pnfs_grab_empty_layout(ino, ctx);
		if (!lo)
			return;
2280 2281 2282 2283 2284
		lgp->args.inode = ino;
	} else
		lo = NFS_I(lgp->args.inode)->layout;

	lseg = pnfs_layout_process(lgp);
2285
	if (!IS_ERR(lseg)) {
2286 2287 2288 2289
		iomode = lgp->args.range.iomode;
		pnfs_layout_clear_fail_bit(lo, pnfs_iomode_to_fail_bit(iomode));
		pnfs_put_lseg(lseg);
	}
2290 2291 2292 2293 2294 2295 2296 2297 2298
}

void nfs4_lgopen_release(struct nfs4_layoutget *lgp)
{
	if (lgp != NULL) {
		struct inode *inode = lgp->args.inode;
		if (inode) {
			struct pnfs_layout_hdr *lo = NFS_I(inode)->layout;
			pnfs_clear_first_layoutget(lo);
2299
			nfs_layoutget_end(lo);
2300 2301 2302
		}
		pnfs_layoutget_free(lgp);
	}
2303 2304
}

2305
struct pnfs_layout_segment *
2306 2307 2308 2309 2310
pnfs_layout_process(struct nfs4_layoutget *lgp)
{
	struct pnfs_layout_hdr *lo = NFS_I(lgp->args.inode)->layout;
	struct nfs4_layoutget_res *res = &lgp->res;
	struct pnfs_layout_segment *lseg;
2311
	struct inode *ino = lo->plh_inode;
2312
	LIST_HEAD(free_me);
2313 2314

	if (!pnfs_sanity_check_layout_range(&res->range))
2315
		return ERR_PTR(-EINVAL);
2316 2317

	/* Inject layout blob into I/O device driver */
2318
	lseg = NFS_SERVER(ino)->pnfs_curr_ld->alloc_lseg(lo, res, lgp->gfp_flags);
2319
	if (IS_ERR_OR_NULL(lseg)) {
2320
		if (!lseg)
2321 2322 2323 2324 2325
			lseg = ERR_PTR(-ENOMEM);

		dprintk("%s: Could not allocate layout: error %ld\n",
		       __func__, PTR_ERR(lseg));
		return lseg;
2326 2327
	}

2328
	pnfs_init_lseg(lo, lseg, &res->range, &res->stateid);
2329

2330
	spin_lock(&ino->i_lock);
2331
	if (pnfs_layoutgets_blocked(lo)) {
2332
		dprintk("%s forget reply due to state\n", __func__);
2333
		goto out_forget;
2334
	}
2335

2336 2337
	if (!pnfs_layout_is_valid(lo)) {
		/* We have a completely new layout */
2338
		pnfs_set_layout_stateid(lo, &res->stateid, lgp->cred, true);
2339
	} else if (nfs4_stateid_match_other(&lo->plh_stateid, &res->stateid)) {
2340 2341 2342
		/* existing state ID, make sure the sequence number matches. */
		if (pnfs_layout_stateid_blocked(lo, &res->stateid)) {
			dprintk("%s forget reply due to sequence\n", __func__);
2343
			goto out_forget;
2344
		}
2345
		pnfs_set_layout_stateid(lo, &res->stateid, lgp->cred, false);
2346 2347 2348
	} else {
		/*
		 * We got an entirely new state ID.  Mark all segments for the
2349
		 * inode invalid, and retry the layoutget
2350
		 */
2351
		pnfs_mark_layout_stateid_invalid(lo, &free_me);
2352
		goto out_forget;
2353
	}
2354

2355
	pnfs_get_lseg(lseg);
2356
	pnfs_layout_insert_lseg(lo, lseg, &free_me);
2357

2358

2359
	if (res->return_on_close)
Fred Isaman's avatar
Fred Isaman committed
2360 2361
		set_bit(NFS_LSEG_ROC, &lseg->pls_flags);

2362
	spin_unlock(&ino->i_lock);
2363
	pnfs_free_lseg_list(&free_me);
2364
	return lseg;
2365

2366
out_forget:
2367 2368 2369
	spin_unlock(&ino->i_lock);
	lseg->pls_layout = lo;
	NFS_SERVER(ino)->pnfs_curr_ld->free_lseg(lseg);
2370
	return ERR_PTR(-EAGAIN);
2371 2372
}

2373 2374 2375 2376 2377 2378 2379 2380 2381 2382
static int
mark_lseg_invalid_or_return(struct pnfs_layout_segment *lseg,
		struct list_head *tmp_list)
{
	if (!mark_lseg_invalid(lseg, tmp_list))
		return 0;
	pnfs_cache_lseg_for_layoutreturn(lseg->pls_layout, lseg);
	return 1;
}

2383 2384 2385 2386 2387
/**
 * pnfs_mark_matching_lsegs_return - Free or return matching layout segments
 * @lo: pointer to layout header
 * @tmp_list: list header to be used with pnfs_free_lseg_list()
 * @return_range: describe layout segment ranges to be returned
2388
 * @seq: stateid seqid to match
2389 2390 2391 2392
 *
 * This function is mainly intended for use by layoutrecall. It attempts
 * to free the layout segment immediately, or else to mark it for return
 * as soon as its reference count drops to zero.
2393 2394 2395 2396 2397
 *
 * Returns
 * - 0: a layoutreturn needs to be scheduled.
 * - EBUSY: there are layout segment that are still in use.
 * - ENOENT: there are no layout segments that need to be returned.
2398
 */
2399
int
2400 2401
pnfs_mark_matching_lsegs_return(struct pnfs_layout_hdr *lo,
				struct list_head *tmp_list,
2402 2403
				const struct pnfs_layout_range *return_range,
				u32 seq)
2404 2405
{
	struct pnfs_layout_segment *lseg, *next;
2406
	int remaining = 0;
2407 2408 2409

	dprintk("%s:Begin lo %p\n", __func__, lo);

2410
	assert_spin_locked(&lo->plh_inode->i_lock);
2411 2412

	list_for_each_entry_safe(lseg, next, &lo->plh_segs, pls_list)
2413
		if (pnfs_match_lseg_recall(lseg, return_range, seq)) {
2414 2415 2416 2417 2418
			dprintk("%s: marking lseg %p iomode %d "
				"offset %llu length %llu\n", __func__,
				lseg, lseg->pls_range.iomode,
				lseg->pls_range.offset,
				lseg->pls_range.length);
2419
			if (mark_lseg_invalid_or_return(lseg, tmp_list))
2420 2421
				continue;
			remaining++;
2422 2423
			set_bit(NFS_LSEG_LAYOUTRETURN, &lseg->pls_flags);
		}
2424

2425
	if (remaining) {
2426
		pnfs_set_plh_return_info(lo, return_range->iomode, seq);
2427 2428
		return -EBUSY;
	}
2429

2430 2431 2432 2433 2434 2435
	if (!list_empty(&lo->plh_return_segs)) {
		pnfs_set_plh_return_info(lo, return_range->iomode, seq);
		return 0;
	}

	return -ENOENT;
2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446
}

void pnfs_error_mark_layout_for_return(struct inode *inode,
				       struct pnfs_layout_segment *lseg)
{
	struct pnfs_layout_hdr *lo = NFS_I(inode)->layout;
	struct pnfs_layout_range range = {
		.iomode = lseg->pls_range.iomode,
		.offset = 0,
		.length = NFS4_MAX_UINT64,
	};
2447
	bool return_now = false;
2448 2449

	spin_lock(&inode->i_lock);
2450 2451 2452 2453
	if (!pnfs_layout_is_valid(lo)) {
		spin_unlock(&inode->i_lock);
		return;
	}
2454
	pnfs_set_plh_return_info(lo, range.iomode, 0);
2455 2456 2457 2458 2459
	/*
	 * mark all matching lsegs so that we are sure to have no live
	 * segments at hand when sending layoutreturn. See pnfs_put_lseg()
	 * for how it works.
	 */
2460
	if (pnfs_mark_matching_lsegs_return(lo, &lo->plh_return_segs, &range, 0) != -EBUSY) {
2461
		nfs4_stateid stateid;
2462
		enum pnfs_iomode iomode;
2463

2464
		return_now = pnfs_prepare_layoutreturn(lo, &stateid, &iomode);
2465 2466 2467 2468 2469 2470 2471
		spin_unlock(&inode->i_lock);
		if (return_now)
			pnfs_send_layoutreturn(lo, &stateid, iomode, false);
	} else {
		spin_unlock(&inode->i_lock);
		nfs_commit_inode(inode, 0);
	}
2472 2473 2474
}
EXPORT_SYMBOL_GPL(pnfs_error_mark_layout_for_return);

2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485
void
pnfs_generic_pg_check_layout(struct nfs_pageio_descriptor *pgio)
{
	if (pgio->pg_lseg == NULL ||
	    test_bit(NFS_LSEG_VALID, &pgio->pg_lseg->pls_flags))
		return;
	pnfs_put_lseg(pgio->pg_lseg);
	pgio->pg_lseg = NULL;
}
EXPORT_SYMBOL_GPL(pnfs_generic_pg_check_layout);

2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498
/*
 * Check for any intersection between the request and the pgio->pg_lseg,
 * and if none, put this pgio->pg_lseg away.
 */
static void
pnfs_generic_pg_check_range(struct nfs_pageio_descriptor *pgio, struct nfs_page *req)
{
	if (pgio->pg_lseg && !pnfs_lseg_request_intersecting(pgio->pg_lseg, req)) {
		pnfs_put_lseg(pgio->pg_lseg);
		pgio->pg_lseg = NULL;
	}
}

2499 2500 2501
void
pnfs_generic_pg_init_read(struct nfs_pageio_descriptor *pgio, struct nfs_page *req)
{
2502 2503
	u64 rd_size = req->wb_bytes;

2504
	pnfs_generic_pg_check_layout(pgio);
2505
	pnfs_generic_pg_check_range(pgio, req);
2506 2507 2508 2509 2510 2511 2512
	if (pgio->pg_lseg == NULL) {
		if (pgio->pg_dreq == NULL)
			rd_size = i_size_read(pgio->pg_inode) - req_offset(req);
		else
			rd_size = nfs_dreq_bytes_left(pgio->pg_dreq);

		pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode,
2513
						   nfs_req_openctx(req),
2514 2515 2516
						   req_offset(req),
						   rd_size,
						   IOMODE_READ,
2517
						   false,
2518
						   GFP_KERNEL);
2519 2520 2521 2522 2523
		if (IS_ERR(pgio->pg_lseg)) {
			pgio->pg_error = PTR_ERR(pgio->pg_lseg);
			pgio->pg_lseg = NULL;
			return;
		}
2524
	}
2525 2526
	/* If no lseg, fall back to read through mds */
	if (pgio->pg_lseg == NULL)
2527
		nfs_pageio_reset_read_mds(pgio);
2528

2529 2530 2531 2532
}
EXPORT_SYMBOL_GPL(pnfs_generic_pg_init_read);

void
2533 2534
pnfs_generic_pg_init_write(struct nfs_pageio_descriptor *pgio,
			   struct nfs_page *req, u64 wb_size)
2535
{
2536
	pnfs_generic_pg_check_layout(pgio);
2537
	pnfs_generic_pg_check_range(pgio, req);
2538
	if (pgio->pg_lseg == NULL) {
2539
		pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode,
2540
						   nfs_req_openctx(req),
2541 2542 2543
						   req_offset(req),
						   wb_size,
						   IOMODE_RW,
2544
						   false,
2545
						   GFP_KERNEL);
2546 2547 2548 2549 2550 2551
		if (IS_ERR(pgio->pg_lseg)) {
			pgio->pg_error = PTR_ERR(pgio->pg_lseg);
			pgio->pg_lseg = NULL;
			return;
		}
	}
2552 2553
	/* If no lseg, fall back to write through mds */
	if (pgio->pg_lseg == NULL)
2554
		nfs_pageio_reset_write_mds(pgio);
2555 2556 2557
}
EXPORT_SYMBOL_GPL(pnfs_generic_pg_init_write);

2558 2559 2560 2561 2562 2563 2564 2565 2566 2567
void
pnfs_generic_pg_cleanup(struct nfs_pageio_descriptor *desc)
{
	if (desc->pg_lseg) {
		pnfs_put_lseg(desc->pg_lseg);
		desc->pg_lseg = NULL;
	}
}
EXPORT_SYMBOL_GPL(pnfs_generic_pg_cleanup);

2568 2569 2570 2571 2572
/*
 * Return 0 if @req cannot be coalesced into @pgio, otherwise return the number
 * of bytes (maximum @req->wb_bytes) that can be coalesced.
 */
size_t
2573 2574
pnfs_generic_pg_test(struct nfs_pageio_descriptor *pgio,
		     struct nfs_page *prev, struct nfs_page *req)
2575
{
2576
	unsigned int size;
2577
	u64 seg_end, req_start, seg_left;
2578 2579 2580 2581

	size = nfs_generic_pg_test(pgio, prev, req);
	if (!size)
		return 0;
2582

2583
	/*
2584 2585 2586 2587 2588
	 * 'size' contains the number of bytes left in the current page (up
	 * to the original size asked for in @req->wb_bytes).
	 *
	 * Calculate how many bytes are left in the layout segment
	 * and if there are less bytes than 'size', return that instead.
2589 2590 2591 2592 2593
	 *
	 * Please also note that 'end_offset' is actually the offset of the
	 * first byte that lies outside the pnfs_layout_range. FIXME?
	 *
	 */
2594
	if (pgio->pg_lseg) {
2595
		seg_end = pnfs_end_offset(pgio->pg_lseg->pls_range.offset,
2596 2597
				     pgio->pg_lseg->pls_range.length);
		req_start = req_offset(req);
2598

2599
		/* start of request is past the last byte of this segment */
2600
		if (req_start >= seg_end)
2601
			return 0;
2602 2603 2604 2605 2606 2607

		/* adjust 'size' iff there are fewer bytes left in the
		 * segment than what nfs_generic_pg_test returned */
		seg_left = seg_end - req_start;
		if (seg_left < size)
			size = (unsigned int)seg_left;
2608
	}
2609

2610
	return size;
2611
}
2612
EXPORT_SYMBOL_GPL(pnfs_generic_pg_test);
2613

2614
int pnfs_write_done_resend_to_mds(struct nfs_pgio_header *hdr)
2615 2616 2617 2618
{
	struct nfs_pageio_descriptor pgio;

	/* Resend all requests through the MDS */
2619 2620
	nfs_pageio_init_write(&pgio, hdr->inode, FLUSH_STABLE, true,
			      hdr->completion_ops);
2621
	set_bit(NFS_CONTEXT_RESEND_WRITES, &hdr->args.context->flags);
2622
	return nfs_pageio_resend(&pgio, hdr);
2623
}
2624
EXPORT_SYMBOL_GPL(pnfs_write_done_resend_to_mds);
2625

2626
static void pnfs_ld_handle_write_error(struct nfs_pgio_header *hdr)
2627
{
2628 2629 2630

	dprintk("pnfs write error = %d\n", hdr->pnfs_error);
	if (NFS_SERVER(hdr->inode)->pnfs_curr_ld->flags &
2631
	    PNFS_LAYOUTRET_ON_ERROR) {
2632
		pnfs_return_layout(hdr->inode);
2633
	}
2634
	if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags))
2635
		hdr->task.tk_status = pnfs_write_done_resend_to_mds(hdr);
2636 2637
}

2638 2639 2640
/*
 * Called by non rpc-based layout drivers
 */
2641
void pnfs_ld_write_done(struct nfs_pgio_header *hdr)
2642
{
2643
	if (likely(!hdr->pnfs_error)) {
2644 2645
		pnfs_set_layoutcommit(hdr->inode, hdr->lseg,
				hdr->mds_offset + hdr->res.count);
2646
		hdr->mds_ops->rpc_call_done(&hdr->task, hdr);
2647 2648 2649
	}
	trace_nfs4_pnfs_write(hdr, hdr->pnfs_error);
	if (unlikely(hdr->pnfs_error))
2650 2651
		pnfs_ld_handle_write_error(hdr);
	hdr->mds_ops->rpc_release(hdr);
2652
}
2653
EXPORT_SYMBOL_GPL(pnfs_ld_write_done);
2654

2655 2656
static void
pnfs_write_through_mds(struct nfs_pageio_descriptor *desc,
2657
		struct nfs_pgio_header *hdr)
2658
{
2659
	struct nfs_pgio_mirror *mirror = nfs_pgio_current_mirror(desc);
2660

2661
	if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags)) {
2662
		list_splice_tail_init(&hdr->pages, &mirror->pg_list);
2663
		nfs_pageio_reset_write_mds(desc);
2664
		mirror->pg_recoalesce = 1;
2665
	}
2666
	hdr->completion_ops->completion(hdr);
2667 2668 2669
}

static enum pnfs_try_status
2670
pnfs_try_to_write_data(struct nfs_pgio_header *hdr,
2671 2672 2673
			const struct rpc_call_ops *call_ops,
			struct pnfs_layout_segment *lseg,
			int how)
2674
{
2675
	struct inode *inode = hdr->inode;
2676 2677 2678
	enum pnfs_try_status trypnfs;
	struct nfs_server *nfss = NFS_SERVER(inode);

2679
	hdr->mds_ops = call_ops;
2680 2681

	dprintk("%s: Writing ino:%lu %u@%llu (how %d)\n", __func__,
2682 2683
		inode->i_ino, hdr->args.count, hdr->args.offset, how);
	trypnfs = nfss->pnfs_curr_ld->write_pagelist(hdr, how);
2684
	if (trypnfs != PNFS_NOT_ATTEMPTED)
2685 2686 2687 2688 2689
		nfs_inc_stats(inode, NFSIOS_PNFS_WRITE);
	dprintk("%s End (trypnfs:%d)\n", __func__, trypnfs);
	return trypnfs;
}

2690
static void
2691 2692
pnfs_do_write(struct nfs_pageio_descriptor *desc,
	      struct nfs_pgio_header *hdr, int how)
2693 2694 2695
{
	const struct rpc_call_ops *call_ops = desc->pg_rpc_callops;
	struct pnfs_layout_segment *lseg = desc->pg_lseg;
2696
	enum pnfs_try_status trypnfs;
2697

2698
	trypnfs = pnfs_try_to_write_data(hdr, call_ops, lseg, how);
2699 2700
	switch (trypnfs) {
	case PNFS_NOT_ATTEMPTED:
2701
		pnfs_write_through_mds(desc, hdr);
2702 2703 2704 2705 2706 2707 2708 2709 2710 2711 2712
	case PNFS_ATTEMPTED:
		break;
	case PNFS_TRY_AGAIN:
		/* cleanup hdr and prepare to redo pnfs */
		if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags)) {
			struct nfs_pgio_mirror *mirror = nfs_pgio_current_mirror(desc);
			list_splice_init(&hdr->pages, &mirror->pg_list);
			mirror->pg_recoalesce = 1;
		}
		hdr->mds_ops->rpc_release(hdr);
	}
2713 2714
}

2715 2716
static void pnfs_writehdr_free(struct nfs_pgio_header *hdr)
{
2717
	pnfs_put_lseg(hdr->lseg);
2718
	nfs_pgio_header_free(hdr);
2719 2720
}

2721 2722 2723
int
pnfs_generic_pg_writepages(struct nfs_pageio_descriptor *desc)
{
2724
	struct nfs_pgio_header *hdr;
2725 2726
	int ret;

2727 2728
	hdr = nfs_pgio_header_alloc(desc->pg_rw_ops);
	if (!hdr) {
2729 2730
		desc->pg_error = -ENOMEM;
		return desc->pg_error;
2731
	}
2732
	nfs_pgheader_init(desc, hdr, pnfs_writehdr_free);
2733

2734
	hdr->lseg = pnfs_get_lseg(desc->pg_lseg);
2735
	ret = nfs_generic_pgio(desc, hdr);
2736
	if (!ret)
2737
		pnfs_do_write(desc, hdr, desc->pg_ioflags);
2738

2739
	return ret;
2740 2741 2742
}
EXPORT_SYMBOL_GPL(pnfs_generic_pg_writepages);

2743
int pnfs_read_done_resend_to_mds(struct nfs_pgio_header *hdr)
2744 2745 2746
{
	struct nfs_pageio_descriptor pgio;

2747
	/* Resend all requests through the MDS */
2748 2749
	nfs_pageio_init_read(&pgio, hdr->inode, true, hdr->completion_ops);
	return nfs_pageio_resend(&pgio, hdr);
2750
}
2751
EXPORT_SYMBOL_GPL(pnfs_read_done_resend_to_mds);
2752

2753
static void pnfs_ld_handle_read_error(struct nfs_pgio_header *hdr)
2754
{
2755 2756
	dprintk("pnfs read error = %d\n", hdr->pnfs_error);
	if (NFS_SERVER(hdr->inode)->pnfs_curr_ld->flags &
2757
	    PNFS_LAYOUTRET_ON_ERROR) {
2758
		pnfs_return_layout(hdr->inode);
2759
	}
2760
	if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags))
2761
		hdr->task.tk_status = pnfs_read_done_resend_to_mds(hdr);
2762 2763
}

2764 2765 2766
/*
 * Called by non rpc-based layout drivers
 */
2767
void pnfs_ld_read_done(struct nfs_pgio_header *hdr)
2768
{
2769
	if (likely(!hdr->pnfs_error))
2770
		hdr->mds_ops->rpc_call_done(&hdr->task, hdr);
2771 2772
	trace_nfs4_pnfs_read(hdr, hdr->pnfs_error);
	if (unlikely(hdr->pnfs_error))
2773 2774
		pnfs_ld_handle_read_error(hdr);
	hdr->mds_ops->rpc_release(hdr);
2775 2776 2777
}
EXPORT_SYMBOL_GPL(pnfs_ld_read_done);

2778 2779
static void
pnfs_read_through_mds(struct nfs_pageio_descriptor *desc,
2780
		struct nfs_pgio_header *hdr)
2781
{
2782
	struct nfs_pgio_mirror *mirror = nfs_pgio_current_mirror(desc);
2783

2784
	if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags)) {
2785
		list_splice_tail_init(&hdr->pages, &mirror->pg_list);
2786
		nfs_pageio_reset_read_mds(desc);
2787
		mirror->pg_recoalesce = 1;
2788
	}
2789
	hdr->completion_ops->completion(hdr);
2790 2791
}

Andy Adamson's avatar
Andy Adamson committed
2792 2793 2794
/*
 * Call the appropriate parallel I/O subsystem read function.
 */
2795
static enum pnfs_try_status
2796
pnfs_try_to_read_data(struct nfs_pgio_header *hdr,
2797 2798
		       const struct rpc_call_ops *call_ops,
		       struct pnfs_layout_segment *lseg)
Andy Adamson's avatar
Andy Adamson committed
2799
{
2800
	struct inode *inode = hdr->inode;
Andy Adamson's avatar
Andy Adamson committed
2801 2802 2803
	struct nfs_server *nfss = NFS_SERVER(inode);
	enum pnfs_try_status trypnfs;

2804
	hdr->mds_ops = call_ops;
Andy Adamson's avatar
Andy Adamson committed
2805 2806

	dprintk("%s: Reading ino:%lu %u@%llu\n",
2807
		__func__, inode->i_ino, hdr->args.count, hdr->args.offset);
Andy Adamson's avatar
Andy Adamson committed
2808

2809
	trypnfs = nfss->pnfs_curr_ld->read_pagelist(hdr);
2810
	if (trypnfs != PNFS_NOT_ATTEMPTED)
Andy Adamson's avatar
Andy Adamson committed
2811 2812 2813 2814
		nfs_inc_stats(inode, NFSIOS_PNFS_READ);
	dprintk("%s End (trypnfs:%d)\n", __func__, trypnfs);
	return trypnfs;
}
Andy Adamson's avatar
Andy Adamson committed
2815

2816
/* Resend all requests through pnfs. */
2817
void pnfs_read_resend_pnfs(struct nfs_pgio_header *hdr)
2818 2819 2820
{
	struct nfs_pageio_descriptor pgio;

2821
	if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags)) {
2822 2823 2824 2825
		/* Prevent deadlocks with layoutreturn! */
		pnfs_put_lseg(hdr->lseg);
		hdr->lseg = NULL;

2826 2827 2828 2829
		nfs_pageio_init_read(&pgio, hdr->inode, false,
					hdr->completion_ops);
		hdr->task.tk_status = nfs_pageio_resend(&pgio, hdr);
	}
2830 2831 2832
}
EXPORT_SYMBOL_GPL(pnfs_read_resend_pnfs);

2833
static void
2834
pnfs_do_read(struct nfs_pageio_descriptor *desc, struct nfs_pgio_header *hdr)
2835 2836 2837
{
	const struct rpc_call_ops *call_ops = desc->pg_rpc_callops;
	struct pnfs_layout_segment *lseg = desc->pg_lseg;
2838
	enum pnfs_try_status trypnfs;
2839

2840
	trypnfs = pnfs_try_to_read_data(hdr, call_ops, lseg);
2841 2842
	switch (trypnfs) {
	case PNFS_NOT_ATTEMPTED:
2843
		pnfs_read_through_mds(desc, hdr);
2844 2845 2846 2847 2848 2849 2850 2851 2852 2853 2854
	case PNFS_ATTEMPTED:
		break;
	case PNFS_TRY_AGAIN:
		/* cleanup hdr and prepare to redo pnfs */
		if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags)) {
			struct nfs_pgio_mirror *mirror = nfs_pgio_current_mirror(desc);
			list_splice_init(&hdr->pages, &mirror->pg_list);
			mirror->pg_recoalesce = 1;
		}
		hdr->mds_ops->rpc_release(hdr);
	}
2855 2856
}

2857 2858
static void pnfs_readhdr_free(struct nfs_pgio_header *hdr)
{
2859
	pnfs_put_lseg(hdr->lseg);
2860
	nfs_pgio_header_free(hdr);
2861 2862
}

2863 2864 2865
int
pnfs_generic_pg_readpages(struct nfs_pageio_descriptor *desc)
{
2866
	struct nfs_pgio_header *hdr;
2867 2868
	int ret;

2869 2870
	hdr = nfs_pgio_header_alloc(desc->pg_rw_ops);
	if (!hdr) {
2871 2872
		desc->pg_error = -ENOMEM;
		return desc->pg_error;
2873
	}
2874
	nfs_pgheader_init(desc, hdr, pnfs_readhdr_free);
2875
	hdr->lseg = pnfs_get_lseg(desc->pg_lseg);
2876
	ret = nfs_generic_pgio(desc, hdr);
2877
	if (!ret)
2878
		pnfs_do_read(desc, hdr);
2879
	return ret;
2880 2881 2882
}
EXPORT_SYMBOL_GPL(pnfs_generic_pg_readpages);

2883 2884 2885 2886 2887
static void pnfs_clear_layoutcommitting(struct inode *inode)
{
	unsigned long *bitlock = &NFS_I(inode)->flags;

	clear_bit_unlock(NFS_INO_LAYOUTCOMMITTING, bitlock);
2888
	smp_mb__after_atomic();
2889 2890 2891
	wake_up_bit(bitlock, NFS_INO_LAYOUTCOMMITTING);
}

Andy Adamson's avatar
Andy Adamson committed
2892
/*
2893
 * There can be multiple RW segments.
Andy Adamson's avatar
Andy Adamson committed
2894
 */
2895
static void pnfs_list_write_lseg(struct inode *inode, struct list_head *listp)
Andy Adamson's avatar
Andy Adamson committed
2896
{
2897
	struct pnfs_layout_segment *lseg;
Andy Adamson's avatar
Andy Adamson committed
2898

2899 2900
	list_for_each_entry(lseg, &NFS_I(inode)->layout->plh_segs, pls_list) {
		if (lseg->pls_range.iomode == IOMODE_RW &&
2901
		    test_and_clear_bit(NFS_LSEG_LAYOUTCOMMIT, &lseg->pls_flags))
2902 2903
			list_add(&lseg->pls_lc_list, listp);
	}
Andy Adamson's avatar
Andy Adamson committed
2904 2905
}

2906 2907 2908 2909 2910 2911 2912 2913 2914 2915
static void pnfs_list_write_lseg_done(struct inode *inode, struct list_head *listp)
{
	struct pnfs_layout_segment *lseg, *tmp;

	/* Matched by references in pnfs_set_layoutcommit */
	list_for_each_entry_safe(lseg, tmp, listp, pls_lc_list) {
		list_del_init(&lseg->pls_lc_list);
		pnfs_put_lseg(lseg);
	}

2916
	pnfs_clear_layoutcommitting(inode);
2917 2918
}

Peng Tao's avatar
Peng Tao committed
2919 2920
void pnfs_set_lo_fail(struct pnfs_layout_segment *lseg)
{
2921
	pnfs_layout_io_set_failed(lseg->pls_layout, lseg->pls_range.iomode);
Peng Tao's avatar
Peng Tao committed
2922 2923 2924
}
EXPORT_SYMBOL_GPL(pnfs_set_lo_fail);

Andy Adamson's avatar
Andy Adamson committed
2925
void
2926 2927
pnfs_set_layoutcommit(struct inode *inode, struct pnfs_layout_segment *lseg,
		loff_t end_pos)
Andy Adamson's avatar
Andy Adamson committed
2928
{
2929
	struct nfs_inode *nfsi = NFS_I(inode);
2930
	bool mark_as_dirty = false;
Andy Adamson's avatar
Andy Adamson committed
2931

2932
	spin_lock(&inode->i_lock);
Andy Adamson's avatar
Andy Adamson committed
2933
	if (!test_and_set_bit(NFS_INO_LAYOUTCOMMIT, &nfsi->flags)) {
2934
		nfsi->layout->plh_lwb = end_pos;
2935
		mark_as_dirty = true;
Andy Adamson's avatar
Andy Adamson committed
2936
		dprintk("%s: Set layoutcommit for inode %lu ",
2937
			__func__, inode->i_ino);
2938 2939
	} else if (end_pos > nfsi->layout->plh_lwb)
		nfsi->layout->plh_lwb = end_pos;
2940
	if (!test_and_set_bit(NFS_LSEG_LAYOUTCOMMIT, &lseg->pls_flags)) {
2941
		/* references matched in nfs4_layoutcommit_release */
2942
		pnfs_get_lseg(lseg);
2943
	}
2944
	spin_unlock(&inode->i_lock);
2945
	dprintk("%s: lseg %p end_pos %llu\n",
2946
		__func__, lseg, nfsi->layout->plh_lwb);
2947 2948 2949 2950

	/* if pnfs_layoutcommit_inode() runs between inode locks, the next one
	 * will be a noop because NFS_INO_LAYOUTCOMMIT will not be set */
	if (mark_as_dirty)
2951
		mark_inode_dirty_sync(inode);
Andy Adamson's avatar
Andy Adamson committed
2952 2953 2954
}
EXPORT_SYMBOL_GPL(pnfs_set_layoutcommit);

Andy Adamson's avatar
Andy Adamson committed
2955 2956 2957 2958 2959 2960
void pnfs_cleanup_layoutcommit(struct nfs4_layoutcommit_data *data)
{
	struct nfs_server *nfss = NFS_SERVER(data->args.inode);

	if (nfss->pnfs_curr_ld->cleanup_layoutcommit)
		nfss->pnfs_curr_ld->cleanup_layoutcommit(data);
2961
	pnfs_list_write_lseg_done(data->args.inode, &data->lseg_list);
Andy Adamson's avatar
Andy Adamson committed
2962 2963
}

2964 2965 2966 2967 2968 2969 2970 2971
/*
 * For the LAYOUT4_NFSV4_1_FILES layout type, NFS_DATA_SYNC WRITEs and
 * NFS_UNSTABLE WRITEs with a COMMIT to data servers must store enough
 * data to disk to allow the server to recover the data if it crashes.
 * LAYOUTCOMMIT is only needed when the NFL4_UFLG_COMMIT_THRU_MDS flag
 * is off, and a COMMIT is sent to a data server, or
 * if WRITEs to a data server return NFS_DATA_SYNC.
 */
Andy Adamson's avatar
Andy Adamson committed
2972
int
2973
pnfs_layoutcommit_inode(struct inode *inode, bool sync)
Andy Adamson's avatar
Andy Adamson committed
2974
{
2975
	struct pnfs_layoutdriver_type *ld = NFS_SERVER(inode)->pnfs_curr_ld;
Andy Adamson's avatar
Andy Adamson committed
2976 2977 2978
	struct nfs4_layoutcommit_data *data;
	struct nfs_inode *nfsi = NFS_I(inode);
	loff_t end_pos;
2979
	int status;
Andy Adamson's avatar
Andy Adamson committed
2980

2981
	if (!pnfs_layoutcommit_outstanding(inode))
2982 2983
		return 0;

2984
	dprintk("--> %s inode %lu\n", __func__, inode->i_ino);
Peng Tao's avatar
Peng Tao committed
2985

2986
	status = -EAGAIN;
Peng Tao's avatar
Peng Tao committed
2987
	if (test_and_set_bit(NFS_INO_LAYOUTCOMMITTING, &nfsi->flags)) {
2988 2989
		if (!sync)
			goto out;
2990
		status = wait_on_bit_lock_action(&nfsi->flags,
2991 2992 2993
				NFS_INO_LAYOUTCOMMITTING,
				nfs_wait_bit_killable,
				TASK_KILLABLE);
Peng Tao's avatar
Peng Tao committed
2994
		if (status)
2995
			goto out;
Peng Tao's avatar
Peng Tao committed
2996 2997
	}

2998 2999 3000 3001 3002 3003 3004
	status = -ENOMEM;
	/* Note kzalloc ensures data->res.seq_res.sr_slot == NULL */
	data = kzalloc(sizeof(*data), GFP_NOFS);
	if (!data)
		goto clear_layoutcommitting;

	status = 0;
3005
	spin_lock(&inode->i_lock);
3006 3007
	if (!test_and_clear_bit(NFS_INO_LAYOUTCOMMIT, &nfsi->flags))
		goto out_unlock;
3008

3009
	INIT_LIST_HEAD(&data->lseg_list);
3010
	pnfs_list_write_lseg(inode, &data->lseg_list);
Andy Adamson's avatar
Andy Adamson committed
3011

3012
	end_pos = nfsi->layout->plh_lwb;
Andy Adamson's avatar
Andy Adamson committed
3013

3014
	nfs4_stateid_copy(&data->args.stateid, &nfsi->layout->plh_stateid);
Andy Adamson's avatar
Andy Adamson committed
3015 3016 3017
	spin_unlock(&inode->i_lock);

	data->args.inode = inode;
3018
	data->cred = get_cred(nfsi->layout->plh_lc_cred);
Andy Adamson's avatar
Andy Adamson committed
3019 3020 3021
	nfs_fattr_init(&data->fattr);
	data->args.bitmask = NFS_SERVER(inode)->cache_consistency_bitmask;
	data->res.fattr = &data->fattr;
3022 3023 3024 3025
	if (end_pos != 0)
		data->args.lastbytewritten = end_pos - 1;
	else
		data->args.lastbytewritten = U64_MAX;
Andy Adamson's avatar
Andy Adamson committed
3026 3027
	data->res.server = NFS_SERVER(inode);

3028 3029 3030
	if (ld->prepare_layoutcommit) {
		status = ld->prepare_layoutcommit(&data->args);
		if (status) {
3031
			put_cred(data->cred);
3032
			spin_lock(&inode->i_lock);
3033 3034
			set_bit(NFS_INO_LAYOUTCOMMIT, &nfsi->flags);
			if (end_pos > nfsi->layout->plh_lwb)
3035
				nfsi->layout->plh_lwb = end_pos;
3036
			goto out_unlock;
3037 3038 3039 3040
		}
	}


Andy Adamson's avatar
Andy Adamson committed
3041 3042
	status = nfs4_proc_layoutcommit(data, sync);
out:
Peng Tao's avatar
Peng Tao committed
3043 3044
	if (status)
		mark_inode_dirty_sync(inode);
Andy Adamson's avatar
Andy Adamson committed
3045 3046
	dprintk("<-- %s status %d\n", __func__, status);
	return status;
3047 3048
out_unlock:
	spin_unlock(&inode->i_lock);
Peng Tao's avatar
Peng Tao committed
3049
	kfree(data);
3050 3051
clear_layoutcommitting:
	pnfs_clear_layoutcommitting(inode);
Peng Tao's avatar
Peng Tao committed
3052
	goto out;
Andy Adamson's avatar
Andy Adamson committed
3053
}
3054
EXPORT_SYMBOL_GPL(pnfs_layoutcommit_inode);
3055

3056 3057 3058 3059 3060 3061 3062
int
pnfs_generic_sync(struct inode *inode, bool datasync)
{
	return pnfs_layoutcommit_inode(inode, true);
}
EXPORT_SYMBOL_GPL(pnfs_generic_sync);

3063 3064 3065 3066 3067 3068 3069 3070 3071 3072 3073
struct nfs4_threshold *pnfs_mdsthreshold_alloc(void)
{
	struct nfs4_threshold *thp;

	thp = kzalloc(sizeof(*thp), GFP_NOFS);
	if (!thp) {
		dprintk("%s mdsthreshold allocation failed\n", __func__);
		return NULL;
	}
	return thp;
}
3074

3075
#if IS_ENABLED(CONFIG_NFS_V4_2)
3076
int
3077
pnfs_report_layoutstat(struct inode *inode, gfp_t gfp_flags)
3078 3079 3080
{
	struct pnfs_layoutdriver_type *ld = NFS_SERVER(inode)->pnfs_curr_ld;
	struct nfs_server *server = NFS_SERVER(inode);
3081
	struct nfs_inode *nfsi = NFS_I(inode);
3082 3083 3084 3085 3086 3087 3088
	struct nfs42_layoutstat_data *data;
	struct pnfs_layout_hdr *hdr;
	int status = 0;

	if (!pnfs_enabled_sb(server) || !ld->prepare_layoutstats)
		goto out;

3089 3090 3091
	if (!nfs_server_capable(inode, NFS_CAP_LAYOUTSTATS))
		goto out;

3092 3093 3094
	if (test_and_set_bit(NFS_INO_LAYOUTSTATS, &nfsi->flags))
		goto out;

3095 3096 3097
	spin_lock(&inode->i_lock);
	if (!NFS_I(inode)->layout) {
		spin_unlock(&inode->i_lock);
3098
		goto out_clear_layoutstats;
3099 3100 3101 3102 3103
	}
	hdr = NFS_I(inode)->layout;
	pnfs_get_layout_hdr(hdr);
	spin_unlock(&inode->i_lock);

3104
	data = kzalloc(sizeof(*data), gfp_flags);
3105 3106 3107 3108 3109 3110 3111 3112 3113 3114 3115 3116 3117 3118 3119 3120 3121 3122 3123 3124 3125
	if (!data) {
		status = -ENOMEM;
		goto out_put;
	}

	data->args.fh = NFS_FH(inode);
	data->args.inode = inode;
	status = ld->prepare_layoutstats(&data->args);
	if (status)
		goto out_free;

	status = nfs42_proc_layoutstats_generic(NFS_SERVER(inode), data);

out:
	dprintk("%s returns %d\n", __func__, status);
	return status;

out_free:
	kfree(data);
out_put:
	pnfs_put_layout_hdr(hdr);
3126
out_clear_layoutstats:
3127 3128 3129
	smp_mb__before_atomic();
	clear_bit(NFS_INO_LAYOUTSTATS, &nfsi->flags);
	smp_mb__after_atomic();
3130 3131 3132
	goto out;
}
EXPORT_SYMBOL_GPL(pnfs_report_layoutstat);
3133
#endif
3134 3135 3136 3137

unsigned int layoutstats_timer;
module_param(layoutstats_timer, uint, 0644);
EXPORT_SYMBOL_GPL(layoutstats_timer);