Commit 425c1d4e authored by Trond Myklebust's avatar Trond Myklebust

NFSv4: Fix lock on-wire reordering issues

This patch ensures that the server cannot reorder our LOCK/LOCKU
requests if they are sent in parallel on the wire.
Signed-off-by: default avatarTrond Myklebust <trond.myklebust@primarydata.com>
parent 6b447539
...@@ -5393,7 +5393,6 @@ static struct nfs4_unlockdata *nfs4_alloc_unlockdata(struct file_lock *fl, ...@@ -5393,7 +5393,6 @@ static struct nfs4_unlockdata *nfs4_alloc_unlockdata(struct file_lock *fl,
p->arg.fl = &p->fl; p->arg.fl = &p->fl;
p->arg.seqid = seqid; p->arg.seqid = seqid;
p->res.seqid = seqid; p->res.seqid = seqid;
p->arg.stateid = &lsp->ls_stateid;
p->lsp = lsp; p->lsp = lsp;
atomic_inc(&lsp->ls_count); atomic_inc(&lsp->ls_count);
/* Ensure we don't close file until we're done freeing locks! */ /* Ensure we don't close file until we're done freeing locks! */
...@@ -5428,6 +5427,9 @@ static void nfs4_locku_done(struct rpc_task *task, void *data) ...@@ -5428,6 +5427,9 @@ static void nfs4_locku_done(struct rpc_task *task, void *data)
case -NFS4ERR_OLD_STATEID: case -NFS4ERR_OLD_STATEID:
case -NFS4ERR_STALE_STATEID: case -NFS4ERR_STALE_STATEID:
case -NFS4ERR_EXPIRED: case -NFS4ERR_EXPIRED:
if (!nfs4_stateid_match(&calldata->arg.stateid,
&calldata->lsp->ls_stateid))
rpc_restart_call_prepare(task);
break; break;
default: default:
if (nfs4_async_handle_error(task, calldata->server, if (nfs4_async_handle_error(task, calldata->server,
...@@ -5443,6 +5445,7 @@ static void nfs4_locku_prepare(struct rpc_task *task, void *data) ...@@ -5443,6 +5445,7 @@ static void nfs4_locku_prepare(struct rpc_task *task, void *data)
if (nfs_wait_on_sequence(calldata->arg.seqid, task) != 0) if (nfs_wait_on_sequence(calldata->arg.seqid, task) != 0)
goto out_wait; goto out_wait;
nfs4_stateid_copy(&calldata->arg.stateid, &calldata->lsp->ls_stateid);
if (test_bit(NFS_LOCK_INITIALIZED, &calldata->lsp->ls_flags) == 0) { if (test_bit(NFS_LOCK_INITIALIZED, &calldata->lsp->ls_flags) == 0) {
/* Note: exit _without_ running nfs4_locku_done */ /* Note: exit _without_ running nfs4_locku_done */
goto out_no_action; goto out_no_action;
...@@ -5584,7 +5587,6 @@ static struct nfs4_lockdata *nfs4_alloc_lockdata(struct file_lock *fl, ...@@ -5584,7 +5587,6 @@ static struct nfs4_lockdata *nfs4_alloc_lockdata(struct file_lock *fl,
p->arg.lock_seqid = nfs_alloc_seqid(&lsp->ls_seqid, gfp_mask); p->arg.lock_seqid = nfs_alloc_seqid(&lsp->ls_seqid, gfp_mask);
if (IS_ERR(p->arg.lock_seqid)) if (IS_ERR(p->arg.lock_seqid))
goto out_free_seqid; goto out_free_seqid;
p->arg.lock_stateid = &lsp->ls_stateid;
p->arg.lock_owner.clientid = server->nfs_client->cl_clientid; p->arg.lock_owner.clientid = server->nfs_client->cl_clientid;
p->arg.lock_owner.id = lsp->ls_seqid.owner_id; p->arg.lock_owner.id = lsp->ls_seqid.owner_id;
p->arg.lock_owner.s_dev = server->s_dev; p->arg.lock_owner.s_dev = server->s_dev;
...@@ -5615,11 +5617,15 @@ static void nfs4_lock_prepare(struct rpc_task *task, void *calldata) ...@@ -5615,11 +5617,15 @@ static void nfs4_lock_prepare(struct rpc_task *task, void *calldata)
if (nfs_wait_on_sequence(data->arg.open_seqid, task) != 0) { if (nfs_wait_on_sequence(data->arg.open_seqid, task) != 0) {
goto out_release_lock_seqid; goto out_release_lock_seqid;
} }
data->arg.open_stateid = &state->open_stateid; nfs4_stateid_copy(&data->arg.open_stateid,
&state->open_stateid);
data->arg.new_lock_owner = 1; data->arg.new_lock_owner = 1;
data->res.open_seqid = data->arg.open_seqid; data->res.open_seqid = data->arg.open_seqid;
} else } else {
data->arg.new_lock_owner = 0; data->arg.new_lock_owner = 0;
nfs4_stateid_copy(&data->arg.lock_stateid,
&data->lsp->ls_stateid);
}
if (!nfs4_valid_open_stateid(state)) { if (!nfs4_valid_open_stateid(state)) {
data->rpc_status = -EBADF; data->rpc_status = -EBADF;
task->tk_action = NULL; task->tk_action = NULL;
...@@ -5651,7 +5657,8 @@ static void nfs4_lock_done(struct rpc_task *task, void *calldata) ...@@ -5651,7 +5657,8 @@ static void nfs4_lock_done(struct rpc_task *task, void *calldata)
return; return;
data->rpc_status = task->tk_status; data->rpc_status = task->tk_status;
if (task->tk_status == 0) { switch (task->tk_status) {
case 0:
renew_lease(NFS_SERVER(data->ctx->dentry->d_inode), renew_lease(NFS_SERVER(data->ctx->dentry->d_inode),
data->timestamp); data->timestamp);
if (data->arg.new_lock_owner != 0) { if (data->arg.new_lock_owner != 0) {
...@@ -5660,6 +5667,18 @@ static void nfs4_lock_done(struct rpc_task *task, void *calldata) ...@@ -5660,6 +5667,18 @@ static void nfs4_lock_done(struct rpc_task *task, void *calldata)
set_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags); set_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags);
} else if (!nfs4_update_lock_stateid(lsp, &data->res.stateid)) } else if (!nfs4_update_lock_stateid(lsp, &data->res.stateid))
rpc_restart_call_prepare(task); rpc_restart_call_prepare(task);
break;
case -NFS4ERR_BAD_STATEID:
case -NFS4ERR_OLD_STATEID:
case -NFS4ERR_STALE_STATEID:
case -NFS4ERR_EXPIRED:
if (data->arg.new_lock_owner != 0) {
if (!nfs4_stateid_match(&data->arg.open_stateid,
&lsp->ls_state->open_stateid))
rpc_restart_call_prepare(task);
} else if (!nfs4_stateid_match(&data->arg.lock_stateid,
&lsp->ls_stateid))
rpc_restart_call_prepare(task);
} }
dprintk("%s: done, ret = %d!\n", __func__, data->rpc_status); dprintk("%s: done, ret = %d!\n", __func__, data->rpc_status);
} }
......
...@@ -1304,12 +1304,12 @@ static void encode_lock(struct xdr_stream *xdr, const struct nfs_lock_args *args ...@@ -1304,12 +1304,12 @@ static void encode_lock(struct xdr_stream *xdr, const struct nfs_lock_args *args
*p = cpu_to_be32(args->new_lock_owner); *p = cpu_to_be32(args->new_lock_owner);
if (args->new_lock_owner){ if (args->new_lock_owner){
encode_nfs4_seqid(xdr, args->open_seqid); encode_nfs4_seqid(xdr, args->open_seqid);
encode_nfs4_stateid(xdr, args->open_stateid); encode_nfs4_stateid(xdr, &args->open_stateid);
encode_nfs4_seqid(xdr, args->lock_seqid); encode_nfs4_seqid(xdr, args->lock_seqid);
encode_lockowner(xdr, &args->lock_owner); encode_lockowner(xdr, &args->lock_owner);
} }
else { else {
encode_nfs4_stateid(xdr, args->lock_stateid); encode_nfs4_stateid(xdr, &args->lock_stateid);
encode_nfs4_seqid(xdr, args->lock_seqid); encode_nfs4_seqid(xdr, args->lock_seqid);
} }
} }
...@@ -1333,7 +1333,7 @@ static void encode_locku(struct xdr_stream *xdr, const struct nfs_locku_args *ar ...@@ -1333,7 +1333,7 @@ static void encode_locku(struct xdr_stream *xdr, const struct nfs_locku_args *ar
encode_op_hdr(xdr, OP_LOCKU, decode_locku_maxsz, hdr); encode_op_hdr(xdr, OP_LOCKU, decode_locku_maxsz, hdr);
encode_uint32(xdr, nfs4_lock_type(args->fl, 0)); encode_uint32(xdr, nfs4_lock_type(args->fl, 0));
encode_nfs4_seqid(xdr, args->seqid); encode_nfs4_seqid(xdr, args->seqid);
encode_nfs4_stateid(xdr, args->stateid); encode_nfs4_stateid(xdr, &args->stateid);
p = reserve_space(xdr, 16); p = reserve_space(xdr, 16);
p = xdr_encode_hyper(p, args->fl->fl_start); p = xdr_encode_hyper(p, args->fl->fl_start);
xdr_encode_hyper(p, nfs4_lock_length(args->fl)); xdr_encode_hyper(p, nfs4_lock_length(args->fl));
......
...@@ -416,9 +416,9 @@ struct nfs_lock_args { ...@@ -416,9 +416,9 @@ struct nfs_lock_args {
struct nfs_fh * fh; struct nfs_fh * fh;
struct file_lock * fl; struct file_lock * fl;
struct nfs_seqid * lock_seqid; struct nfs_seqid * lock_seqid;
nfs4_stateid * lock_stateid; nfs4_stateid lock_stateid;
struct nfs_seqid * open_seqid; struct nfs_seqid * open_seqid;
nfs4_stateid * open_stateid; nfs4_stateid open_stateid;
struct nfs_lowner lock_owner; struct nfs_lowner lock_owner;
unsigned char block : 1; unsigned char block : 1;
unsigned char reclaim : 1; unsigned char reclaim : 1;
...@@ -437,7 +437,7 @@ struct nfs_locku_args { ...@@ -437,7 +437,7 @@ struct nfs_locku_args {
struct nfs_fh * fh; struct nfs_fh * fh;
struct file_lock * fl; struct file_lock * fl;
struct nfs_seqid * seqid; struct nfs_seqid * seqid;
nfs4_stateid * stateid; nfs4_stateid stateid;
}; };
struct nfs_locku_res { struct nfs_locku_res {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment