Commit 4cfceaf0 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'for-4.1' of git://linux-nfs.org/~bfields/linux

Pull nfsd bugfixes from Bruce Fields:
 "Mainly pnfs fixes (and for problems with generic callback code made
  more obvious by pnfs)"

* 'for-4.1' of git://linux-nfs.org/~bfields/linux:
  nfsd: skip CB_NULL probes for 4.1 or later
  nfsd: fix callback restarts
  nfsd: split transport vs operation errors for callbacks
  svcrpc: fix potential GSSX_ACCEPT_SEC_CONTEXT decoding failures
  nfsd: fix pNFS return on close semantics
  nfsd: fix the check for confirmed openowner in nfs4_preprocess_stateid_op
  nfsd/blocklayout: pretend we can send deviceid notifications
parents ef208162 4bd9e9b7
......@@ -181,6 +181,17 @@ nfsd4_block_proc_layoutcommit(struct inode *inode,
}
const struct nfsd4_layout_ops bl_layout_ops = {
/*
* Pretend that we send notification to the client. This is a blatant
* lie to force recent Linux clients to cache our device IDs.
* We rarely ever change the device ID, so the harm of leaking deviceids
* for a while isn't too bad. Unfortunately RFC5661 is a complete mess
* in this regard, but I filed errata 4119 for this a while ago, and
* hopefully the Linux client will eventually start caching deviceids
* without this again.
*/
.notify_types =
NOTIFY_DEVICEID4_DELETE | NOTIFY_DEVICEID4_CHANGE,
.proc_getdeviceinfo = nfsd4_block_proc_getdeviceinfo,
.encode_getdeviceinfo = nfsd4_block_encode_getdeviceinfo,
.proc_layoutget = nfsd4_block_proc_layoutget,
......
......@@ -224,7 +224,7 @@ static int nfs_cb_stat_to_errno(int status)
}
static int decode_cb_op_status(struct xdr_stream *xdr, enum nfs_opnum4 expected,
enum nfsstat4 *status)
int *status)
{
__be32 *p;
u32 op;
......@@ -235,7 +235,7 @@ static int decode_cb_op_status(struct xdr_stream *xdr, enum nfs_opnum4 expected,
op = be32_to_cpup(p++);
if (unlikely(op != expected))
goto out_unexpected;
*status = be32_to_cpup(p);
*status = nfs_cb_stat_to_errno(be32_to_cpup(p));
return 0;
out_overflow:
print_overflow_msg(__func__, xdr);
......@@ -446,22 +446,16 @@ static int decode_cb_sequence4resok(struct xdr_stream *xdr,
static int decode_cb_sequence4res(struct xdr_stream *xdr,
struct nfsd4_callback *cb)
{
enum nfsstat4 nfserr;
int status;
if (cb->cb_minorversion == 0)
return 0;
status = decode_cb_op_status(xdr, OP_CB_SEQUENCE, &nfserr);
if (unlikely(status))
goto out;
if (unlikely(nfserr != NFS4_OK))
goto out_default;
status = decode_cb_sequence4resok(xdr, cb);
out:
return status;
out_default:
return nfs_cb_stat_to_errno(nfserr);
status = decode_cb_op_status(xdr, OP_CB_SEQUENCE, &cb->cb_status);
if (unlikely(status || cb->cb_status))
return status;
return decode_cb_sequence4resok(xdr, cb);
}
/*
......@@ -524,26 +518,19 @@ static int nfs4_xdr_dec_cb_recall(struct rpc_rqst *rqstp,
struct nfsd4_callback *cb)
{
struct nfs4_cb_compound_hdr hdr;
enum nfsstat4 nfserr;
int status;
status = decode_cb_compound4res(xdr, &hdr);
if (unlikely(status))
goto out;
return status;
if (cb != NULL) {
status = decode_cb_sequence4res(xdr, cb);
if (unlikely(status))
goto out;
if (unlikely(status || cb->cb_status))
return status;
}
status = decode_cb_op_status(xdr, OP_CB_RECALL, &nfserr);
if (unlikely(status))
goto out;
if (unlikely(nfserr != NFS4_OK))
status = nfs_cb_stat_to_errno(nfserr);
out:
return status;
return decode_cb_op_status(xdr, OP_CB_RECALL, &cb->cb_status);
}
#ifdef CONFIG_NFSD_PNFS
......@@ -621,24 +608,18 @@ static int nfs4_xdr_dec_cb_layout(struct rpc_rqst *rqstp,
struct nfsd4_callback *cb)
{
struct nfs4_cb_compound_hdr hdr;
enum nfsstat4 nfserr;
int status;
status = decode_cb_compound4res(xdr, &hdr);
if (unlikely(status))
goto out;
return status;
if (cb) {
status = decode_cb_sequence4res(xdr, cb);
if (unlikely(status))
goto out;
if (unlikely(status || cb->cb_status))
return status;
}
status = decode_cb_op_status(xdr, OP_CB_LAYOUTRECALL, &nfserr);
if (unlikely(status))
goto out;
if (unlikely(nfserr != NFS4_OK))
status = nfs_cb_stat_to_errno(nfserr);
out:
return status;
return decode_cb_op_status(xdr, OP_CB_LAYOUTRECALL, &cb->cb_status);
}
#endif /* CONFIG_NFSD_PNFS */
......@@ -898,13 +879,6 @@ static void nfsd4_cb_prepare(struct rpc_task *task, void *calldata)
if (!nfsd41_cb_get_slot(clp, task))
return;
}
spin_lock(&clp->cl_lock);
if (list_empty(&cb->cb_per_client)) {
/* This is the first call, not a restart */
cb->cb_done = false;
list_add(&cb->cb_per_client, &clp->cl_callbacks);
}
spin_unlock(&clp->cl_lock);
rpc_call_start(task);
}
......@@ -918,22 +892,33 @@ static void nfsd4_cb_done(struct rpc_task *task, void *calldata)
if (clp->cl_minorversion) {
/* No need for lock, access serialized in nfsd4_cb_prepare */
++clp->cl_cb_session->se_cb_seq_nr;
if (!task->tk_status)
++clp->cl_cb_session->se_cb_seq_nr;
clear_bit(0, &clp->cl_cb_slot_busy);
rpc_wake_up_next(&clp->cl_cb_waitq);
dprintk("%s: freed slot, new seqid=%d\n", __func__,
clp->cl_cb_session->se_cb_seq_nr);
}
if (clp->cl_cb_client != task->tk_client) {
/* We're shutting down or changing cl_cb_client; leave
* it to nfsd4_process_cb_update to restart the call if
* necessary. */
/*
* If the backchannel connection was shut down while this
* task was queued, we need to resubmit it after setting up
* a new backchannel connection.
*
* Note that if we lost our callback connection permanently
* the submission code will error out, so we don't need to
* handle that case here.
*/
if (task->tk_flags & RPC_TASK_KILLED) {
task->tk_status = 0;
cb->cb_need_restart = true;
return;
}
if (cb->cb_done)
return;
if (cb->cb_status) {
WARN_ON_ONCE(task->tk_status);
task->tk_status = cb->cb_status;
}
switch (cb->cb_ops->done(cb, task)) {
case 0:
......@@ -949,21 +934,17 @@ static void nfsd4_cb_done(struct rpc_task *task, void *calldata)
default:
BUG();
}
cb->cb_done = true;
}
static void nfsd4_cb_release(void *calldata)
{
struct nfsd4_callback *cb = calldata;
struct nfs4_client *clp = cb->cb_clp;
if (cb->cb_done) {
spin_lock(&clp->cl_lock);
list_del(&cb->cb_per_client);
spin_unlock(&clp->cl_lock);
if (cb->cb_need_restart)
nfsd4_run_cb(cb);
else
cb->cb_ops->release(cb);
}
}
static const struct rpc_call_ops nfsd4_cb_ops = {
......@@ -1058,9 +1039,6 @@ static void nfsd4_process_cb_update(struct nfsd4_callback *cb)
nfsd4_mark_cb_down(clp, err);
return;
}
/* Yay, the callback channel's back! Restart any callbacks: */
list_for_each_entry(cb, &clp->cl_callbacks, cb_per_client)
queue_work(callback_wq, &cb->cb_work);
}
static void
......@@ -1071,8 +1049,12 @@ nfsd4_run_cb_work(struct work_struct *work)
struct nfs4_client *clp = cb->cb_clp;
struct rpc_clnt *clnt;
if (cb->cb_ops && cb->cb_ops->prepare)
cb->cb_ops->prepare(cb);
if (cb->cb_need_restart) {
cb->cb_need_restart = false;
} else {
if (cb->cb_ops && cb->cb_ops->prepare)
cb->cb_ops->prepare(cb);
}
if (clp->cl_flags & NFSD4_CLIENT_CB_FLAG_MASK)
nfsd4_process_cb_update(cb);
......@@ -1084,6 +1066,15 @@ nfsd4_run_cb_work(struct work_struct *work)
cb->cb_ops->release(cb);
return;
}
/*
* Don't send probe messages for 4.1 or later.
*/
if (!cb->cb_ops && clp->cl_minorversion) {
clp->cl_cb_state = NFSD4_CB_UP;
return;
}
cb->cb_msg.rpc_cred = clp->cl_cb_cred;
rpc_call_async(clnt, &cb->cb_msg, RPC_TASK_SOFT | RPC_TASK_SOFTCONN,
cb->cb_ops ? &nfsd4_cb_ops : &nfsd4_cb_probe_ops, cb);
......@@ -1098,8 +1089,8 @@ void nfsd4_init_cb(struct nfsd4_callback *cb, struct nfs4_client *clp,
cb->cb_msg.rpc_resp = cb;
cb->cb_ops = ops;
INIT_WORK(&cb->cb_work, nfsd4_run_cb_work);
INIT_LIST_HEAD(&cb->cb_per_client);
cb->cb_done = true;
cb->cb_status = 0;
cb->cb_need_restart = false;
}
void nfsd4_run_cb(struct nfsd4_callback *cb)
......
......@@ -94,6 +94,7 @@ static struct kmem_cache *lockowner_slab;
static struct kmem_cache *file_slab;
static struct kmem_cache *stateid_slab;
static struct kmem_cache *deleg_slab;
static struct kmem_cache *odstate_slab;
static void free_session(struct nfsd4_session *);
......@@ -281,6 +282,7 @@ put_nfs4_file(struct nfs4_file *fi)
if (atomic_dec_and_lock(&fi->fi_ref, &state_lock)) {
hlist_del_rcu(&fi->fi_hash);
spin_unlock(&state_lock);
WARN_ON_ONCE(!list_empty(&fi->fi_clnt_odstate));
WARN_ON_ONCE(!list_empty(&fi->fi_delegations));
call_rcu(&fi->fi_rcu, nfsd4_free_file_rcu);
}
......@@ -471,6 +473,86 @@ static void nfs4_file_put_access(struct nfs4_file *fp, u32 access)
__nfs4_file_put_access(fp, O_RDONLY);
}
/*
* Allocate a new open/delegation state counter. This is needed for
* pNFS for proper return on close semantics.
*
* Note that we only allocate it for pNFS-enabled exports, otherwise
* all pointers to struct nfs4_clnt_odstate are always NULL.
*/
static struct nfs4_clnt_odstate *
alloc_clnt_odstate(struct nfs4_client *clp)
{
struct nfs4_clnt_odstate *co;
co = kmem_cache_zalloc(odstate_slab, GFP_KERNEL);
if (co) {
co->co_client = clp;
atomic_set(&co->co_odcount, 1);
}
return co;
}
static void
hash_clnt_odstate_locked(struct nfs4_clnt_odstate *co)
{
struct nfs4_file *fp = co->co_file;
lockdep_assert_held(&fp->fi_lock);
list_add(&co->co_perfile, &fp->fi_clnt_odstate);
}
static inline void
get_clnt_odstate(struct nfs4_clnt_odstate *co)
{
if (co)
atomic_inc(&co->co_odcount);
}
static void
put_clnt_odstate(struct nfs4_clnt_odstate *co)
{
struct nfs4_file *fp;
if (!co)
return;
fp = co->co_file;
if (atomic_dec_and_lock(&co->co_odcount, &fp->fi_lock)) {
list_del(&co->co_perfile);
spin_unlock(&fp->fi_lock);
nfsd4_return_all_file_layouts(co->co_client, fp);
kmem_cache_free(odstate_slab, co);
}
}
static struct nfs4_clnt_odstate *
find_or_hash_clnt_odstate(struct nfs4_file *fp, struct nfs4_clnt_odstate *new)
{
struct nfs4_clnt_odstate *co;
struct nfs4_client *cl;
if (!new)
return NULL;
cl = new->co_client;
spin_lock(&fp->fi_lock);
list_for_each_entry(co, &fp->fi_clnt_odstate, co_perfile) {
if (co->co_client == cl) {
get_clnt_odstate(co);
goto out;
}
}
co = new;
co->co_file = fp;
hash_clnt_odstate_locked(new);
out:
spin_unlock(&fp->fi_lock);
return co;
}
struct nfs4_stid *nfs4_alloc_stid(struct nfs4_client *cl,
struct kmem_cache *slab)
{
......@@ -606,7 +688,8 @@ static void block_delegations(struct knfsd_fh *fh)
}
static struct nfs4_delegation *
alloc_init_deleg(struct nfs4_client *clp, struct svc_fh *current_fh)
alloc_init_deleg(struct nfs4_client *clp, struct svc_fh *current_fh,
struct nfs4_clnt_odstate *odstate)
{
struct nfs4_delegation *dp;
long n;
......@@ -631,6 +714,8 @@ alloc_init_deleg(struct nfs4_client *clp, struct svc_fh *current_fh)
INIT_LIST_HEAD(&dp->dl_perfile);
INIT_LIST_HEAD(&dp->dl_perclnt);
INIT_LIST_HEAD(&dp->dl_recall_lru);
dp->dl_clnt_odstate = odstate;
get_clnt_odstate(odstate);
dp->dl_type = NFS4_OPEN_DELEGATE_READ;
dp->dl_retries = 1;
nfsd4_init_cb(&dp->dl_recall, dp->dl_stid.sc_client,
......@@ -714,6 +799,7 @@ static void destroy_delegation(struct nfs4_delegation *dp)
spin_lock(&state_lock);
unhash_delegation_locked(dp);
spin_unlock(&state_lock);
put_clnt_odstate(dp->dl_clnt_odstate);
nfs4_put_deleg_lease(dp->dl_stid.sc_file);
nfs4_put_stid(&dp->dl_stid);
}
......@@ -724,6 +810,7 @@ static void revoke_delegation(struct nfs4_delegation *dp)
WARN_ON(!list_empty(&dp->dl_recall_lru));
put_clnt_odstate(dp->dl_clnt_odstate);
nfs4_put_deleg_lease(dp->dl_stid.sc_file);
if (clp->cl_minorversion == 0)
......@@ -933,6 +1020,7 @@ static void nfs4_free_ol_stateid(struct nfs4_stid *stid)
{
struct nfs4_ol_stateid *stp = openlockstateid(stid);
put_clnt_odstate(stp->st_clnt_odstate);
release_all_access(stp);
if (stp->st_stateowner)
nfs4_put_stateowner(stp->st_stateowner);
......@@ -1538,7 +1626,6 @@ static struct nfs4_client *alloc_client(struct xdr_netobj name)
INIT_LIST_HEAD(&clp->cl_openowners);
INIT_LIST_HEAD(&clp->cl_delegations);
INIT_LIST_HEAD(&clp->cl_lru);
INIT_LIST_HEAD(&clp->cl_callbacks);
INIT_LIST_HEAD(&clp->cl_revoked);
#ifdef CONFIG_NFSD_PNFS
INIT_LIST_HEAD(&clp->cl_lo_states);
......@@ -1634,6 +1721,7 @@ __destroy_client(struct nfs4_client *clp)
while (!list_empty(&reaplist)) {
dp = list_entry(reaplist.next, struct nfs4_delegation, dl_recall_lru);
list_del_init(&dp->dl_recall_lru);
put_clnt_odstate(dp->dl_clnt_odstate);
nfs4_put_deleg_lease(dp->dl_stid.sc_file);
nfs4_put_stid(&dp->dl_stid);
}
......@@ -3057,6 +3145,7 @@ static void nfsd4_init_file(struct knfsd_fh *fh, unsigned int hashval,
spin_lock_init(&fp->fi_lock);
INIT_LIST_HEAD(&fp->fi_stateids);
INIT_LIST_HEAD(&fp->fi_delegations);
INIT_LIST_HEAD(&fp->fi_clnt_odstate);
fh_copy_shallow(&fp->fi_fhandle, fh);
fp->fi_deleg_file = NULL;
fp->fi_had_conflict = false;
......@@ -3073,6 +3162,7 @@ static void nfsd4_init_file(struct knfsd_fh *fh, unsigned int hashval,
void
nfsd4_free_slabs(void)
{
kmem_cache_destroy(odstate_slab);
kmem_cache_destroy(openowner_slab);
kmem_cache_destroy(lockowner_slab);
kmem_cache_destroy(file_slab);
......@@ -3103,8 +3193,14 @@ nfsd4_init_slabs(void)
sizeof(struct nfs4_delegation), 0, 0, NULL);
if (deleg_slab == NULL)
goto out_free_stateid_slab;
odstate_slab = kmem_cache_create("nfsd4_odstate",
sizeof(struct nfs4_clnt_odstate), 0, 0, NULL);
if (odstate_slab == NULL)
goto out_free_deleg_slab;
return 0;
out_free_deleg_slab:
kmem_cache_destroy(deleg_slab);
out_free_stateid_slab:
kmem_cache_destroy(stateid_slab);
out_free_file_slab:
......@@ -3581,6 +3677,14 @@ nfsd4_process_open1(struct nfsd4_compound_state *cstate,
open->op_stp = nfs4_alloc_open_stateid(clp);
if (!open->op_stp)
return nfserr_jukebox;
if (nfsd4_has_session(cstate) &&
(cstate->current_fh.fh_export->ex_flags & NFSEXP_PNFS)) {
open->op_odstate = alloc_clnt_odstate(clp);
if (!open->op_odstate)
return nfserr_jukebox;
}
return nfs_ok;
}
......@@ -3869,7 +3973,7 @@ static int nfs4_setlease(struct nfs4_delegation *dp)
static struct nfs4_delegation *
nfs4_set_delegation(struct nfs4_client *clp, struct svc_fh *fh,
struct nfs4_file *fp)
struct nfs4_file *fp, struct nfs4_clnt_odstate *odstate)
{
int status;
struct nfs4_delegation *dp;
......@@ -3877,7 +3981,7 @@ nfs4_set_delegation(struct nfs4_client *clp, struct svc_fh *fh,
if (fp->fi_had_conflict)
return ERR_PTR(-EAGAIN);
dp = alloc_init_deleg(clp, fh);
dp = alloc_init_deleg(clp, fh, odstate);
if (!dp)
return ERR_PTR(-ENOMEM);
......@@ -3903,6 +4007,7 @@ nfs4_set_delegation(struct nfs4_client *clp, struct svc_fh *fh,
spin_unlock(&state_lock);
out:
if (status) {
put_clnt_odstate(dp->dl_clnt_odstate);
nfs4_put_stid(&dp->dl_stid);
return ERR_PTR(status);
}
......@@ -3980,7 +4085,7 @@ nfs4_open_delegation(struct svc_fh *fh, struct nfsd4_open *open,
default:
goto out_no_deleg;
}
dp = nfs4_set_delegation(clp, fh, stp->st_stid.sc_file);
dp = nfs4_set_delegation(clp, fh, stp->st_stid.sc_file, stp->st_clnt_odstate);
if (IS_ERR(dp))
goto out_no_deleg;
......@@ -4069,6 +4174,11 @@ nfsd4_process_open2(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nf
release_open_stateid(stp);
goto out;
}
stp->st_clnt_odstate = find_or_hash_clnt_odstate(fp,
open->op_odstate);
if (stp->st_clnt_odstate == open->op_odstate)
open->op_odstate = NULL;
}
update_stateid(&stp->st_stid.sc_stateid);
memcpy(&open->op_stateid, &stp->st_stid.sc_stateid, sizeof(stateid_t));
......@@ -4129,6 +4239,8 @@ void nfsd4_cleanup_open_state(struct nfsd4_compound_state *cstate,
kmem_cache_free(file_slab, open->op_file);
if (open->op_stp)
nfs4_put_stid(&open->op_stp->st_stid);
if (open->op_odstate)
kmem_cache_free(odstate_slab, open->op_odstate);
}
__be32
......@@ -4385,10 +4497,17 @@ static __be32 check_stateid_generation(stateid_t *in, stateid_t *ref, bool has_s
return nfserr_old_stateid;
}
static __be32 nfsd4_check_openowner_confirmed(struct nfs4_ol_stateid *ols)
{
if (ols->st_stateowner->so_is_open_owner &&
!(openowner(ols->st_stateowner)->oo_flags & NFS4_OO_CONFIRMED))
return nfserr_bad_stateid;
return nfs_ok;
}
static __be32 nfsd4_validate_stateid(struct nfs4_client *cl, stateid_t *stateid)
{
struct nfs4_stid *s;
struct nfs4_ol_stateid *ols;
__be32 status = nfserr_bad_stateid;
if (ZERO_STATEID(stateid) || ONE_STATEID(stateid))
......@@ -4418,13 +4537,7 @@ static __be32 nfsd4_validate_stateid(struct nfs4_client *cl, stateid_t *stateid)
break;
case NFS4_OPEN_STID:
case NFS4_LOCK_STID:
ols = openlockstateid(s);
if (ols->st_stateowner->so_is_open_owner
&& !(openowner(ols->st_stateowner)->oo_flags
& NFS4_OO_CONFIRMED))
status = nfserr_bad_stateid;
else
status = nfs_ok;
status = nfsd4_check_openowner_confirmed(openlockstateid(s));
break;
default:
printk("unknown stateid type %x\n", s->sc_type);
......@@ -4516,8 +4629,8 @@ nfs4_preprocess_stateid_op(struct net *net, struct nfsd4_compound_state *cstate,
status = nfs4_check_fh(current_fh, stp);
if (status)
goto out;
if (stp->st_stateowner->so_is_open_owner
&& !(openowner(stp->st_stateowner)->oo_flags & NFS4_OO_CONFIRMED))
status = nfsd4_check_openowner_confirmed(stp);
if (status)
goto out;
status = nfs4_check_openmode(stp, flags);
if (status)
......@@ -4852,9 +4965,6 @@ nfsd4_close(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
update_stateid(&stp->st_stid.sc_stateid);
memcpy(&close->cl_stateid, &stp->st_stid.sc_stateid, sizeof(stateid_t));
nfsd4_return_all_file_layouts(stp->st_stateowner->so_client,
stp->st_stid.sc_file);
nfsd4_close_open_stateid(stp);
/* put reference from nfs4_preprocess_seqid_op */
......@@ -6488,6 +6598,7 @@ nfs4_state_shutdown_net(struct net *net)
list_for_each_safe(pos, next, &reaplist) {
dp = list_entry (pos, struct nfs4_delegation, dl_recall_lru);
list_del_init(&dp->dl_recall_lru);
put_clnt_odstate(dp->dl_clnt_odstate);
nfs4_put_deleg_lease(dp->dl_stid.sc_file);
nfs4_put_stid(&dp->dl_stid);
}
......
......@@ -63,12 +63,12 @@ typedef struct {
struct nfsd4_callback {
struct nfs4_client *cb_clp;
struct list_head cb_per_client;
u32 cb_minorversion;
struct rpc_message cb_msg;
struct nfsd4_callback_ops *cb_ops;
struct work_struct cb_work;
bool cb_done;
int cb_status;
bool cb_need_restart;
};
struct nfsd4_callback_ops {
......@@ -126,6 +126,7 @@ struct nfs4_delegation {
struct list_head dl_perfile;
struct list_head dl_perclnt;
struct list_head dl_recall_lru; /* delegation recalled */
struct nfs4_clnt_odstate *dl_clnt_odstate;
u32 dl_type;
time_t dl_time;
/* For recall: */
......@@ -332,7 +333,6 @@ struct nfs4_client {
int cl_cb_state;
struct nfsd4_callback cl_cb_null;
struct nfsd4_session *cl_cb_session;
struct list_head cl_callbacks; /* list of in-progress callbacks */
/* for all client information that callback code might need: */
spinlock_t cl_lock;
......@@ -464,6 +464,17 @@ static inline struct nfs4_lockowner * lockowner(struct nfs4_stateowner *so)
return container_of(so, struct nfs4_lockowner, lo_owner);
}
/*
* Per-client state indicating no. of opens and outstanding delegations
* on a file from a particular client.'od' stands for 'open & delegation'
*/
struct nfs4_clnt_odstate {
struct nfs4_client *co_client;
struct nfs4_file *co_file;
struct list_head co_perfile;
atomic_t co_odcount;
};
/*
* nfs4_file: a file opened by some number of (open) nfs4_stateowners.
*
......@@ -485,6 +496,7 @@ struct nfs4_file {
struct list_head fi_delegations;
struct rcu_head fi_rcu;
};
struct list_head fi_clnt_odstate;
/* One each for O_RDONLY, O_WRONLY, O_RDWR: */
struct file * fi_fds[3];
/*
......@@ -526,6 +538,7 @@ struct nfs4_ol_stateid {
struct list_head st_perstateowner;
struct list_head st_locks;
struct nfs4_stateowner * st_stateowner;
struct nfs4_clnt_odstate * st_clnt_odstate;
unsigned char st_access_bmap;
unsigned char st_deny_bmap;
struct nfs4_ol_stateid * st_openstp;
......
......@@ -247,6 +247,7 @@ struct nfsd4_open {
struct nfs4_openowner *op_openowner; /* used during processing */
struct nfs4_file *op_file; /* used during processing */
struct nfs4_ol_stateid *op_stp; /* used during processing */
struct nfs4_clnt_odstate *op_odstate; /* used during processing */
struct nfs4_acl *op_acl;
struct xdr_netobj op_label;
};
......
......@@ -793,20 +793,26 @@ int gssx_dec_accept_sec_context(struct rpc_rqst *rqstp,
{
u32 value_follows;
int err;
struct page *scratch;
scratch = alloc_page(GFP_KERNEL);
if (!scratch)
return -ENOMEM;
xdr_set_scratch_buffer(xdr, page_address(scratch), PAGE_SIZE);
/* res->status */
err = gssx_dec_status(xdr, &res->status);
if (err)
return err;
goto out_free;
/* res->context_handle */
err = gssx_dec_bool(xdr, &value_follows);
if (err)
return err;
goto out_free;
if (value_follows) {
err = gssx_dec_ctx(xdr, res->context_handle);
if (err)
return err;
goto out_free;
} else {
res->context_handle = NULL;
}
......@@ -814,11 +820,11 @@ int gssx_dec_accept_sec_context(struct rpc_rqst *rqstp,
/* res->output_token */
err = gssx_dec_bool(xdr, &value_follows);
if (err)
return err;
goto out_free;
if (value_follows) {
err = gssx_dec_buffer(xdr, res->output_token);
if (err)
return err;
goto out_free;
} else {
res->output_token = NULL;
}
......@@ -826,14 +832,17 @@ int gssx_dec_accept_sec_context(struct rpc_rqst *rqstp,
/* res->delegated_cred_handle */
err = gssx_dec_bool(xdr, &value_follows);
if (err)
return err;
goto out_free;
if (value_follows) {
/* we do not support upcall servers sending this data. */
return -EINVAL;
err = -EINVAL;
goto out_free;
}
/* res->options */
err = gssx_dec_option_array(xdr, &res->options);
out_free:
__free_page(scratch);
return err;
}
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment