Commit 31c1febd authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'nfsd-4.4' of git://linux-nfs.org/~bfields/linux

Pull nfsd updates from Bruce Fields:
 "Apologies for coming a little late in the merge window.  Fortunately
  this is another fairly quiet one:

  Mainly smaller bugfixes and cleanup.  We're still finding some bugs
  from the breakup of the big NFSv4 state lock in 3.17 -- thanks
  especially to Andrew Elble and Jeff Layton for tracking down some of
  the remaining races"

* tag 'nfsd-4.4' of git://linux-nfs.org/~bfields/linux:
  svcrpc: document lack of some memory barriers
  nfsd: fix race with open / open upgrade stateids
  nfsd: eliminate sending duplicate and repeated delegations
  nfsd: remove recurring workqueue job to clean DRC
  SUNRPC: drop stale comment in svc_setup_socket()
  nfsd: ensure that seqid morphing operations are atomic wrt to copies
  nfsd: serialize layout stateid morphing operations
  nfsd: improve client_has_state to check for unused openowners
  nfsd: fix clid_inuse on mount with security change
  sunrpc/cache: make cache flushing more reliable.
  nfsd: move include of state.h from trace.c to trace.h
  sunrpc: avoid warning in gss_key_timeout
  lockd: get rid of reference-counted NSM RPC clients
  SUNRPC: Use MSG_SENDPAGE_NOTLAST when calling sendpage()
  lockd: create NSM handles per net namespace
  nfsd: switch unsigned char flags in svc_fh to bools
  nfsd: move svc_fh->fh_maxsize to just after fh_handle
  nfsd: drop null test before destroy functions
  nfsd: serialize state seqid morphing operations
parents b4a23759 0442f14b
...@@ -116,7 +116,7 @@ static struct nlm_host *nlm_alloc_host(struct nlm_lookup_host_info *ni, ...@@ -116,7 +116,7 @@ static struct nlm_host *nlm_alloc_host(struct nlm_lookup_host_info *ni,
atomic_inc(&nsm->sm_count); atomic_inc(&nsm->sm_count);
else { else {
host = NULL; host = NULL;
nsm = nsm_get_handle(ni->sap, ni->salen, nsm = nsm_get_handle(ni->net, ni->sap, ni->salen,
ni->hostname, ni->hostname_len); ni->hostname, ni->hostname_len);
if (unlikely(nsm == NULL)) { if (unlikely(nsm == NULL)) {
dprintk("lockd: %s failed; no nsm handle\n", dprintk("lockd: %s failed; no nsm handle\n",
...@@ -161,6 +161,7 @@ static struct nlm_host *nlm_alloc_host(struct nlm_lookup_host_info *ni, ...@@ -161,6 +161,7 @@ static struct nlm_host *nlm_alloc_host(struct nlm_lookup_host_info *ni,
host->h_nsmhandle = nsm; host->h_nsmhandle = nsm;
host->h_addrbuf = nsm->sm_addrbuf; host->h_addrbuf = nsm->sm_addrbuf;
host->net = ni->net; host->net = ni->net;
strlcpy(host->nodename, utsname()->nodename, sizeof(host->nodename));
out: out:
return host; return host;
...@@ -534,17 +535,18 @@ static struct nlm_host *next_host_state(struct hlist_head *cache, ...@@ -534,17 +535,18 @@ static struct nlm_host *next_host_state(struct hlist_head *cache,
/** /**
* nlm_host_rebooted - Release all resources held by rebooted host * nlm_host_rebooted - Release all resources held by rebooted host
* @net: network namespace
* @info: pointer to decoded results of NLM_SM_NOTIFY call * @info: pointer to decoded results of NLM_SM_NOTIFY call
* *
* We were notified that the specified host has rebooted. Release * We were notified that the specified host has rebooted. Release
* all resources held by that peer. * all resources held by that peer.
*/ */
void nlm_host_rebooted(const struct nlm_reboot *info) void nlm_host_rebooted(const struct net *net, const struct nlm_reboot *info)
{ {
struct nsm_handle *nsm; struct nsm_handle *nsm;
struct nlm_host *host; struct nlm_host *host;
nsm = nsm_reboot_lookup(info); nsm = nsm_reboot_lookup(net, info);
if (unlikely(nsm == NULL)) if (unlikely(nsm == NULL))
return; return;
......
...@@ -42,7 +42,7 @@ struct nsm_args { ...@@ -42,7 +42,7 @@ struct nsm_args {
u32 proc; u32 proc;
char *mon_name; char *mon_name;
char *nodename; const char *nodename;
}; };
struct nsm_res { struct nsm_res {
...@@ -51,7 +51,6 @@ struct nsm_res { ...@@ -51,7 +51,6 @@ struct nsm_res {
}; };
static const struct rpc_program nsm_program; static const struct rpc_program nsm_program;
static LIST_HEAD(nsm_handles);
static DEFINE_SPINLOCK(nsm_lock); static DEFINE_SPINLOCK(nsm_lock);
/* /*
...@@ -87,69 +86,18 @@ static struct rpc_clnt *nsm_create(struct net *net, const char *nodename) ...@@ -87,69 +86,18 @@ static struct rpc_clnt *nsm_create(struct net *net, const char *nodename)
return rpc_create(&args); return rpc_create(&args);
} }
static struct rpc_clnt *nsm_client_set(struct lockd_net *ln,
struct rpc_clnt *clnt)
{
spin_lock(&ln->nsm_clnt_lock);
if (ln->nsm_users == 0) {
if (clnt == NULL)
goto out;
ln->nsm_clnt = clnt;
}
clnt = ln->nsm_clnt;
ln->nsm_users++;
out:
spin_unlock(&ln->nsm_clnt_lock);
return clnt;
}
static struct rpc_clnt *nsm_client_get(struct net *net, const char *nodename)
{
struct rpc_clnt *clnt, *new;
struct lockd_net *ln = net_generic(net, lockd_net_id);
clnt = nsm_client_set(ln, NULL);
if (clnt != NULL)
goto out;
clnt = new = nsm_create(net, nodename);
if (IS_ERR(clnt))
goto out;
clnt = nsm_client_set(ln, new);
if (clnt != new)
rpc_shutdown_client(new);
out:
return clnt;
}
static void nsm_client_put(struct net *net)
{
struct lockd_net *ln = net_generic(net, lockd_net_id);
struct rpc_clnt *clnt = NULL;
spin_lock(&ln->nsm_clnt_lock);
ln->nsm_users--;
if (ln->nsm_users == 0) {
clnt = ln->nsm_clnt;
ln->nsm_clnt = NULL;
}
spin_unlock(&ln->nsm_clnt_lock);
if (clnt != NULL)
rpc_shutdown_client(clnt);
}
static int nsm_mon_unmon(struct nsm_handle *nsm, u32 proc, struct nsm_res *res, static int nsm_mon_unmon(struct nsm_handle *nsm, u32 proc, struct nsm_res *res,
struct rpc_clnt *clnt) const struct nlm_host *host)
{ {
int status; int status;
struct rpc_clnt *clnt;
struct nsm_args args = { struct nsm_args args = {
.priv = &nsm->sm_priv, .priv = &nsm->sm_priv,
.prog = NLM_PROGRAM, .prog = NLM_PROGRAM,
.vers = 3, .vers = 3,
.proc = NLMPROC_NSM_NOTIFY, .proc = NLMPROC_NSM_NOTIFY,
.mon_name = nsm->sm_mon_name, .mon_name = nsm->sm_mon_name,
.nodename = clnt->cl_nodename, .nodename = host->nodename,
}; };
struct rpc_message msg = { struct rpc_message msg = {
.rpc_argp = &args, .rpc_argp = &args,
...@@ -158,6 +106,13 @@ static int nsm_mon_unmon(struct nsm_handle *nsm, u32 proc, struct nsm_res *res, ...@@ -158,6 +106,13 @@ static int nsm_mon_unmon(struct nsm_handle *nsm, u32 proc, struct nsm_res *res,
memset(res, 0, sizeof(*res)); memset(res, 0, sizeof(*res));
clnt = nsm_create(host->net, host->nodename);
if (IS_ERR(clnt)) {
dprintk("lockd: failed to create NSM upcall transport, "
"status=%ld, net=%p\n", PTR_ERR(clnt), host->net);
return PTR_ERR(clnt);
}
msg.rpc_proc = &clnt->cl_procinfo[proc]; msg.rpc_proc = &clnt->cl_procinfo[proc];
status = rpc_call_sync(clnt, &msg, RPC_TASK_SOFTCONN); status = rpc_call_sync(clnt, &msg, RPC_TASK_SOFTCONN);
if (status == -ECONNREFUSED) { if (status == -ECONNREFUSED) {
...@@ -171,6 +126,8 @@ static int nsm_mon_unmon(struct nsm_handle *nsm, u32 proc, struct nsm_res *res, ...@@ -171,6 +126,8 @@ static int nsm_mon_unmon(struct nsm_handle *nsm, u32 proc, struct nsm_res *res,
status); status);
else else
status = 0; status = 0;
rpc_shutdown_client(clnt);
return status; return status;
} }
...@@ -190,32 +147,19 @@ int nsm_monitor(const struct nlm_host *host) ...@@ -190,32 +147,19 @@ int nsm_monitor(const struct nlm_host *host)
struct nsm_handle *nsm = host->h_nsmhandle; struct nsm_handle *nsm = host->h_nsmhandle;
struct nsm_res res; struct nsm_res res;
int status; int status;
struct rpc_clnt *clnt;
const char *nodename = NULL;
dprintk("lockd: nsm_monitor(%s)\n", nsm->sm_name); dprintk("lockd: nsm_monitor(%s)\n", nsm->sm_name);
if (nsm->sm_monitored) if (nsm->sm_monitored)
return 0; return 0;
if (host->h_rpcclnt)
nodename = host->h_rpcclnt->cl_nodename;
/* /*
* Choose whether to record the caller_name or IP address of * Choose whether to record the caller_name or IP address of
* this peer in the local rpc.statd's database. * this peer in the local rpc.statd's database.
*/ */
nsm->sm_mon_name = nsm_use_hostnames ? nsm->sm_name : nsm->sm_addrbuf; nsm->sm_mon_name = nsm_use_hostnames ? nsm->sm_name : nsm->sm_addrbuf;
clnt = nsm_client_get(host->net, nodename); status = nsm_mon_unmon(nsm, NSMPROC_MON, &res, host);
if (IS_ERR(clnt)) {
status = PTR_ERR(clnt);
dprintk("lockd: failed to create NSM upcall transport, "
"status=%d, net=%p\n", status, host->net);
return status;
}
status = nsm_mon_unmon(nsm, NSMPROC_MON, &res, clnt);
if (unlikely(res.status != 0)) if (unlikely(res.status != 0))
status = -EIO; status = -EIO;
if (unlikely(status < 0)) { if (unlikely(status < 0)) {
...@@ -247,11 +191,9 @@ void nsm_unmonitor(const struct nlm_host *host) ...@@ -247,11 +191,9 @@ void nsm_unmonitor(const struct nlm_host *host)
if (atomic_read(&nsm->sm_count) == 1 if (atomic_read(&nsm->sm_count) == 1
&& nsm->sm_monitored && !nsm->sm_sticky) { && nsm->sm_monitored && !nsm->sm_sticky) {
struct lockd_net *ln = net_generic(host->net, lockd_net_id);
dprintk("lockd: nsm_unmonitor(%s)\n", nsm->sm_name); dprintk("lockd: nsm_unmonitor(%s)\n", nsm->sm_name);
status = nsm_mon_unmon(nsm, NSMPROC_UNMON, &res, ln->nsm_clnt); status = nsm_mon_unmon(nsm, NSMPROC_UNMON, &res, host);
if (res.status != 0) if (res.status != 0)
status = -EIO; status = -EIO;
if (status < 0) if (status < 0)
...@@ -259,38 +201,38 @@ void nsm_unmonitor(const struct nlm_host *host) ...@@ -259,38 +201,38 @@ void nsm_unmonitor(const struct nlm_host *host)
nsm->sm_name); nsm->sm_name);
else else
nsm->sm_monitored = 0; nsm->sm_monitored = 0;
nsm_client_put(host->net);
} }
} }
static struct nsm_handle *nsm_lookup_hostname(const char *hostname, static struct nsm_handle *nsm_lookup_hostname(const struct list_head *nsm_handles,
const size_t len) const char *hostname, const size_t len)
{ {
struct nsm_handle *nsm; struct nsm_handle *nsm;
list_for_each_entry(nsm, &nsm_handles, sm_link) list_for_each_entry(nsm, nsm_handles, sm_link)
if (strlen(nsm->sm_name) == len && if (strlen(nsm->sm_name) == len &&
memcmp(nsm->sm_name, hostname, len) == 0) memcmp(nsm->sm_name, hostname, len) == 0)
return nsm; return nsm;
return NULL; return NULL;
} }
static struct nsm_handle *nsm_lookup_addr(const struct sockaddr *sap) static struct nsm_handle *nsm_lookup_addr(const struct list_head *nsm_handles,
const struct sockaddr *sap)
{ {
struct nsm_handle *nsm; struct nsm_handle *nsm;
list_for_each_entry(nsm, &nsm_handles, sm_link) list_for_each_entry(nsm, nsm_handles, sm_link)
if (rpc_cmp_addr(nsm_addr(nsm), sap)) if (rpc_cmp_addr(nsm_addr(nsm), sap))
return nsm; return nsm;
return NULL; return NULL;
} }
static struct nsm_handle *nsm_lookup_priv(const struct nsm_private *priv) static struct nsm_handle *nsm_lookup_priv(const struct list_head *nsm_handles,
const struct nsm_private *priv)
{ {
struct nsm_handle *nsm; struct nsm_handle *nsm;
list_for_each_entry(nsm, &nsm_handles, sm_link) list_for_each_entry(nsm, nsm_handles, sm_link)
if (memcmp(nsm->sm_priv.data, priv->data, if (memcmp(nsm->sm_priv.data, priv->data,
sizeof(priv->data)) == 0) sizeof(priv->data)) == 0)
return nsm; return nsm;
...@@ -353,6 +295,7 @@ static struct nsm_handle *nsm_create_handle(const struct sockaddr *sap, ...@@ -353,6 +295,7 @@ static struct nsm_handle *nsm_create_handle(const struct sockaddr *sap,
/** /**
* nsm_get_handle - Find or create a cached nsm_handle * nsm_get_handle - Find or create a cached nsm_handle
* @net: network namespace
* @sap: pointer to socket address of handle to find * @sap: pointer to socket address of handle to find
* @salen: length of socket address * @salen: length of socket address
* @hostname: pointer to C string containing hostname to find * @hostname: pointer to C string containing hostname to find
...@@ -365,11 +308,13 @@ static struct nsm_handle *nsm_create_handle(const struct sockaddr *sap, ...@@ -365,11 +308,13 @@ static struct nsm_handle *nsm_create_handle(const struct sockaddr *sap,
* @hostname cannot be found in the handle cache. Returns NULL if * @hostname cannot be found in the handle cache. Returns NULL if
* an error occurs. * an error occurs.
*/ */
struct nsm_handle *nsm_get_handle(const struct sockaddr *sap, struct nsm_handle *nsm_get_handle(const struct net *net,
const struct sockaddr *sap,
const size_t salen, const char *hostname, const size_t salen, const char *hostname,
const size_t hostname_len) const size_t hostname_len)
{ {
struct nsm_handle *cached, *new = NULL; struct nsm_handle *cached, *new = NULL;
struct lockd_net *ln = net_generic(net, lockd_net_id);
if (hostname && memchr(hostname, '/', hostname_len) != NULL) { if (hostname && memchr(hostname, '/', hostname_len) != NULL) {
if (printk_ratelimit()) { if (printk_ratelimit()) {
...@@ -384,9 +329,10 @@ struct nsm_handle *nsm_get_handle(const struct sockaddr *sap, ...@@ -384,9 +329,10 @@ struct nsm_handle *nsm_get_handle(const struct sockaddr *sap,
spin_lock(&nsm_lock); spin_lock(&nsm_lock);
if (nsm_use_hostnames && hostname != NULL) if (nsm_use_hostnames && hostname != NULL)
cached = nsm_lookup_hostname(hostname, hostname_len); cached = nsm_lookup_hostname(&ln->nsm_handles,
hostname, hostname_len);
else else
cached = nsm_lookup_addr(sap); cached = nsm_lookup_addr(&ln->nsm_handles, sap);
if (cached != NULL) { if (cached != NULL) {
atomic_inc(&cached->sm_count); atomic_inc(&cached->sm_count);
...@@ -400,7 +346,7 @@ struct nsm_handle *nsm_get_handle(const struct sockaddr *sap, ...@@ -400,7 +346,7 @@ struct nsm_handle *nsm_get_handle(const struct sockaddr *sap,
} }
if (new != NULL) { if (new != NULL) {
list_add(&new->sm_link, &nsm_handles); list_add(&new->sm_link, &ln->nsm_handles);
spin_unlock(&nsm_lock); spin_unlock(&nsm_lock);
dprintk("lockd: created nsm_handle for %s (%s)\n", dprintk("lockd: created nsm_handle for %s (%s)\n",
new->sm_name, new->sm_addrbuf); new->sm_name, new->sm_addrbuf);
...@@ -417,19 +363,22 @@ struct nsm_handle *nsm_get_handle(const struct sockaddr *sap, ...@@ -417,19 +363,22 @@ struct nsm_handle *nsm_get_handle(const struct sockaddr *sap,
/** /**
* nsm_reboot_lookup - match NLMPROC_SM_NOTIFY arguments to an nsm_handle * nsm_reboot_lookup - match NLMPROC_SM_NOTIFY arguments to an nsm_handle
* @net: network namespace
* @info: pointer to NLMPROC_SM_NOTIFY arguments * @info: pointer to NLMPROC_SM_NOTIFY arguments
* *
* Returns a matching nsm_handle if found in the nsm cache. The returned * Returns a matching nsm_handle if found in the nsm cache. The returned
* nsm_handle's reference count is bumped. Otherwise returns NULL if some * nsm_handle's reference count is bumped. Otherwise returns NULL if some
* error occurred. * error occurred.
*/ */
struct nsm_handle *nsm_reboot_lookup(const struct nlm_reboot *info) struct nsm_handle *nsm_reboot_lookup(const struct net *net,
const struct nlm_reboot *info)
{ {
struct nsm_handle *cached; struct nsm_handle *cached;
struct lockd_net *ln = net_generic(net, lockd_net_id);
spin_lock(&nsm_lock); spin_lock(&nsm_lock);
cached = nsm_lookup_priv(&info->priv); cached = nsm_lookup_priv(&ln->nsm_handles, &info->priv);
if (unlikely(cached == NULL)) { if (unlikely(cached == NULL)) {
spin_unlock(&nsm_lock); spin_unlock(&nsm_lock);
dprintk("lockd: never saw rebooted peer '%.*s' before\n", dprintk("lockd: never saw rebooted peer '%.*s' before\n",
......
...@@ -12,9 +12,7 @@ struct lockd_net { ...@@ -12,9 +12,7 @@ struct lockd_net {
struct delayed_work grace_period_end; struct delayed_work grace_period_end;
struct lock_manager lockd_manager; struct lock_manager lockd_manager;
spinlock_t nsm_clnt_lock; struct list_head nsm_handles;
unsigned int nsm_users;
struct rpc_clnt *nsm_clnt;
}; };
extern int lockd_net_id; extern int lockd_net_id;
......
...@@ -592,7 +592,7 @@ static int lockd_init_net(struct net *net) ...@@ -592,7 +592,7 @@ static int lockd_init_net(struct net *net)
INIT_DELAYED_WORK(&ln->grace_period_end, grace_ender); INIT_DELAYED_WORK(&ln->grace_period_end, grace_ender);
INIT_LIST_HEAD(&ln->lockd_manager.list); INIT_LIST_HEAD(&ln->lockd_manager.list);
ln->lockd_manager.block_opens = false; ln->lockd_manager.block_opens = false;
spin_lock_init(&ln->nsm_clnt_lock); INIT_LIST_HEAD(&ln->nsm_handles);
return 0; return 0;
} }
......
...@@ -421,7 +421,7 @@ nlm4svc_proc_sm_notify(struct svc_rqst *rqstp, struct nlm_reboot *argp, ...@@ -421,7 +421,7 @@ nlm4svc_proc_sm_notify(struct svc_rqst *rqstp, struct nlm_reboot *argp,
return rpc_system_err; return rpc_system_err;
} }
nlm_host_rebooted(argp); nlm_host_rebooted(SVC_NET(rqstp), argp);
return rpc_success; return rpc_success;
} }
......
...@@ -464,7 +464,7 @@ nlmsvc_proc_sm_notify(struct svc_rqst *rqstp, struct nlm_reboot *argp, ...@@ -464,7 +464,7 @@ nlmsvc_proc_sm_notify(struct svc_rqst *rqstp, struct nlm_reboot *argp,
return rpc_system_err; return rpc_system_err;
} }
nlm_host_rebooted(argp); nlm_host_rebooted(SVC_NET(rqstp), argp);
return rpc_success; return rpc_success;
} }
......
...@@ -262,11 +262,11 @@ void fill_post_wcc(struct svc_fh *fhp) ...@@ -262,11 +262,11 @@ void fill_post_wcc(struct svc_fh *fhp)
err = fh_getattr(fhp, &fhp->fh_post_attr); err = fh_getattr(fhp, &fhp->fh_post_attr);
fhp->fh_post_change = d_inode(fhp->fh_dentry)->i_version; fhp->fh_post_change = d_inode(fhp->fh_dentry)->i_version;
if (err) { if (err) {
fhp->fh_post_saved = 0; fhp->fh_post_saved = false;
/* Grab the ctime anyway - set_change_info might use it */ /* Grab the ctime anyway - set_change_info might use it */
fhp->fh_post_attr.ctime = d_inode(fhp->fh_dentry)->i_ctime; fhp->fh_post_attr.ctime = d_inode(fhp->fh_dentry)->i_ctime;
} else } else
fhp->fh_post_saved = 1; fhp->fh_post_saved = true;
} }
/* /*
......
...@@ -201,6 +201,7 @@ nfsd4_alloc_layout_stateid(struct nfsd4_compound_state *cstate, ...@@ -201,6 +201,7 @@ nfsd4_alloc_layout_stateid(struct nfsd4_compound_state *cstate,
INIT_LIST_HEAD(&ls->ls_perfile); INIT_LIST_HEAD(&ls->ls_perfile);
spin_lock_init(&ls->ls_lock); spin_lock_init(&ls->ls_lock);
INIT_LIST_HEAD(&ls->ls_layouts); INIT_LIST_HEAD(&ls->ls_layouts);
mutex_init(&ls->ls_mutex);
ls->ls_layout_type = layout_type; ls->ls_layout_type = layout_type;
nfsd4_init_cb(&ls->ls_recall, clp, &nfsd4_cb_layout_ops, nfsd4_init_cb(&ls->ls_recall, clp, &nfsd4_cb_layout_ops,
NFSPROC4_CLNT_CB_LAYOUT); NFSPROC4_CLNT_CB_LAYOUT);
...@@ -262,19 +263,23 @@ nfsd4_preprocess_layout_stateid(struct svc_rqst *rqstp, ...@@ -262,19 +263,23 @@ nfsd4_preprocess_layout_stateid(struct svc_rqst *rqstp,
status = nfserr_jukebox; status = nfserr_jukebox;
if (!ls) if (!ls)
goto out; goto out;
mutex_lock(&ls->ls_mutex);
} else { } else {
ls = container_of(stid, struct nfs4_layout_stateid, ls_stid); ls = container_of(stid, struct nfs4_layout_stateid, ls_stid);
status = nfserr_bad_stateid; status = nfserr_bad_stateid;
mutex_lock(&ls->ls_mutex);
if (stateid->si_generation > stid->sc_stateid.si_generation) if (stateid->si_generation > stid->sc_stateid.si_generation)
goto out_put_stid; goto out_unlock_stid;
if (layout_type != ls->ls_layout_type) if (layout_type != ls->ls_layout_type)
goto out_put_stid; goto out_unlock_stid;
} }
*lsp = ls; *lsp = ls;
return 0; return 0;
out_unlock_stid:
mutex_unlock(&ls->ls_mutex);
out_put_stid: out_put_stid:
nfs4_put_stid(stid); nfs4_put_stid(stid);
out: out:
...@@ -296,8 +301,6 @@ nfsd4_recall_file_layout(struct nfs4_layout_stateid *ls) ...@@ -296,8 +301,6 @@ nfsd4_recall_file_layout(struct nfs4_layout_stateid *ls)
trace_layout_recall(&ls->ls_stid.sc_stateid); trace_layout_recall(&ls->ls_stid.sc_stateid);
atomic_inc(&ls->ls_stid.sc_count); atomic_inc(&ls->ls_stid.sc_count);
update_stateid(&ls->ls_stid.sc_stateid);
memcpy(&ls->ls_recall_sid, &ls->ls_stid.sc_stateid, sizeof(stateid_t));
nfsd4_run_cb(&ls->ls_recall); nfsd4_run_cb(&ls->ls_recall);
out_unlock: out_unlock:
...@@ -406,8 +409,7 @@ nfsd4_insert_layout(struct nfsd4_layoutget *lgp, struct nfs4_layout_stateid *ls) ...@@ -406,8 +409,7 @@ nfsd4_insert_layout(struct nfsd4_layoutget *lgp, struct nfs4_layout_stateid *ls)
list_add_tail(&new->lo_perstate, &ls->ls_layouts); list_add_tail(&new->lo_perstate, &ls->ls_layouts);
new = NULL; new = NULL;
done: done:
update_stateid(&ls->ls_stid.sc_stateid); nfs4_inc_and_copy_stateid(&lgp->lg_sid, &ls->ls_stid);
memcpy(&lgp->lg_sid, &ls->ls_stid.sc_stateid, sizeof(stateid_t));
spin_unlock(&ls->ls_lock); spin_unlock(&ls->ls_lock);
out: out:
spin_unlock(&fp->fi_lock); spin_unlock(&fp->fi_lock);
...@@ -481,11 +483,8 @@ nfsd4_return_file_layouts(struct svc_rqst *rqstp, ...@@ -481,11 +483,8 @@ nfsd4_return_file_layouts(struct svc_rqst *rqstp,
} }
} }
if (!list_empty(&ls->ls_layouts)) { if (!list_empty(&ls->ls_layouts)) {
if (found) { if (found)
update_stateid(&ls->ls_stid.sc_stateid); nfs4_inc_and_copy_stateid(&lrp->lr_sid, &ls->ls_stid);
memcpy(&lrp->lr_sid, &ls->ls_stid.sc_stateid,
sizeof(stateid_t));
}
lrp->lrs_present = 1; lrp->lrs_present = 1;
} else { } else {
trace_layoutstate_unhash(&ls->ls_stid.sc_stateid); trace_layoutstate_unhash(&ls->ls_stid.sc_stateid);
...@@ -494,6 +493,7 @@ nfsd4_return_file_layouts(struct svc_rqst *rqstp, ...@@ -494,6 +493,7 @@ nfsd4_return_file_layouts(struct svc_rqst *rqstp,
} }
spin_unlock(&ls->ls_lock); spin_unlock(&ls->ls_lock);
mutex_unlock(&ls->ls_mutex);
nfs4_put_stid(&ls->ls_stid); nfs4_put_stid(&ls->ls_stid);
nfsd4_free_layouts(&reaplist); nfsd4_free_layouts(&reaplist);
return nfs_ok; return nfs_ok;
...@@ -608,6 +608,16 @@ nfsd4_cb_layout_fail(struct nfs4_layout_stateid *ls) ...@@ -608,6 +608,16 @@ nfsd4_cb_layout_fail(struct nfs4_layout_stateid *ls)
} }
} }
static void
nfsd4_cb_layout_prepare(struct nfsd4_callback *cb)
{
struct nfs4_layout_stateid *ls =
container_of(cb, struct nfs4_layout_stateid, ls_recall);
mutex_lock(&ls->ls_mutex);
nfs4_inc_and_copy_stateid(&ls->ls_recall_sid, &ls->ls_stid);
}
static int static int
nfsd4_cb_layout_done(struct nfsd4_callback *cb, struct rpc_task *task) nfsd4_cb_layout_done(struct nfsd4_callback *cb, struct rpc_task *task)
{ {
...@@ -649,12 +659,14 @@ nfsd4_cb_layout_release(struct nfsd4_callback *cb) ...@@ -649,12 +659,14 @@ nfsd4_cb_layout_release(struct nfsd4_callback *cb)
trace_layout_recall_release(&ls->ls_stid.sc_stateid); trace_layout_recall_release(&ls->ls_stid.sc_stateid);
mutex_unlock(&ls->ls_mutex);
nfsd4_return_all_layouts(ls, &reaplist); nfsd4_return_all_layouts(ls, &reaplist);
nfsd4_free_layouts(&reaplist); nfsd4_free_layouts(&reaplist);
nfs4_put_stid(&ls->ls_stid); nfs4_put_stid(&ls->ls_stid);
} }
static struct nfsd4_callback_ops nfsd4_cb_layout_ops = { static struct nfsd4_callback_ops nfsd4_cb_layout_ops = {
.prepare = nfsd4_cb_layout_prepare,
.done = nfsd4_cb_layout_done, .done = nfsd4_cb_layout_done,
.release = nfsd4_cb_layout_release, .release = nfsd4_cb_layout_release,
}; };
......
...@@ -1309,6 +1309,7 @@ nfsd4_layoutget(struct svc_rqst *rqstp, ...@@ -1309,6 +1309,7 @@ nfsd4_layoutget(struct svc_rqst *rqstp,
nfserr = nfsd4_insert_layout(lgp, ls); nfserr = nfsd4_insert_layout(lgp, ls);
out_put_stid: out_put_stid:
mutex_unlock(&ls->ls_mutex);
nfs4_put_stid(&ls->ls_stid); nfs4_put_stid(&ls->ls_stid);
out: out:
return nfserr; return nfserr;
...@@ -1362,6 +1363,9 @@ nfsd4_layoutcommit(struct svc_rqst *rqstp, ...@@ -1362,6 +1363,9 @@ nfsd4_layoutcommit(struct svc_rqst *rqstp,
goto out; goto out;
} }
/* LAYOUTCOMMIT does not require any serialization */
mutex_unlock(&ls->ls_mutex);
if (new_size > i_size_read(inode)) { if (new_size > i_size_read(inode)) {
lcp->lc_size_chg = 1; lcp->lc_size_chg = 1;
lcp->lc_newsize = new_size; lcp->lc_newsize = new_size;
......
...@@ -575,6 +575,7 @@ struct nfs4_stid *nfs4_alloc_stid(struct nfs4_client *cl, ...@@ -575,6 +575,7 @@ struct nfs4_stid *nfs4_alloc_stid(struct nfs4_client *cl,
stid->sc_stateid.si_opaque.so_clid = cl->cl_clientid; stid->sc_stateid.si_opaque.so_clid = cl->cl_clientid;
/* Will be incremented before return to client: */ /* Will be incremented before return to client: */
atomic_set(&stid->sc_count, 1); atomic_set(&stid->sc_count, 1);
spin_lock_init(&stid->sc_lock);
/* /*
* It shouldn't be a problem to reuse an opaque stateid value. * It shouldn't be a problem to reuse an opaque stateid value.
...@@ -745,6 +746,18 @@ nfs4_put_stid(struct nfs4_stid *s) ...@@ -745,6 +746,18 @@ nfs4_put_stid(struct nfs4_stid *s)
put_nfs4_file(fp); put_nfs4_file(fp);
} }
void
nfs4_inc_and_copy_stateid(stateid_t *dst, struct nfs4_stid *stid)
{
stateid_t *src = &stid->sc_stateid;
spin_lock(&stid->sc_lock);
if (unlikely(++src->si_generation == 0))
src->si_generation = 1;
memcpy(dst, src, sizeof(*dst));
spin_unlock(&stid->sc_lock);
}
static void nfs4_put_deleg_lease(struct nfs4_file *fp) static void nfs4_put_deleg_lease(struct nfs4_file *fp)
{ {
struct file *filp = NULL; struct file *filp = NULL;
...@@ -765,16 +778,68 @@ void nfs4_unhash_stid(struct nfs4_stid *s) ...@@ -765,16 +778,68 @@ void nfs4_unhash_stid(struct nfs4_stid *s)
s->sc_type = 0; s->sc_type = 0;
} }
static void /**
* nfs4_get_existing_delegation - Discover if this delegation already exists
* @clp: a pointer to the nfs4_client we're granting a delegation to
* @fp: a pointer to the nfs4_file we're granting a delegation on
*
* Return:
* On success: NULL if an existing delegation was not found.
*
* On error: -EAGAIN if one was previously granted to this nfs4_client
* for this nfs4_file.
*
*/
static int
nfs4_get_existing_delegation(struct nfs4_client *clp, struct nfs4_file *fp)
{
struct nfs4_delegation *searchdp = NULL;
struct nfs4_client *searchclp = NULL;
lockdep_assert_held(&state_lock);
lockdep_assert_held(&fp->fi_lock);
list_for_each_entry(searchdp, &fp->fi_delegations, dl_perfile) {
searchclp = searchdp->dl_stid.sc_client;
if (clp == searchclp) {
return -EAGAIN;
}
}
return 0;
}
/**
* hash_delegation_locked - Add a delegation to the appropriate lists
* @dp: a pointer to the nfs4_delegation we are adding.
* @fp: a pointer to the nfs4_file we're granting a delegation on
*
* Return:
* On success: NULL if the delegation was successfully hashed.
*
* On error: -EAGAIN if one was previously granted to this
* nfs4_client for this nfs4_file. Delegation is not hashed.
*
*/
static int
hash_delegation_locked(struct nfs4_delegation *dp, struct nfs4_file *fp) hash_delegation_locked(struct nfs4_delegation *dp, struct nfs4_file *fp)
{ {
int status;
struct nfs4_client *clp = dp->dl_stid.sc_client;
lockdep_assert_held(&state_lock); lockdep_assert_held(&state_lock);
lockdep_assert_held(&fp->fi_lock); lockdep_assert_held(&fp->fi_lock);
status = nfs4_get_existing_delegation(clp, fp);
if (status)
return status;
++fp->fi_delegees;
atomic_inc(&dp->dl_stid.sc_count); atomic_inc(&dp->dl_stid.sc_count);
dp->dl_stid.sc_type = NFS4_DELEG_STID; dp->dl_stid.sc_type = NFS4_DELEG_STID;
list_add(&dp->dl_perfile, &fp->fi_delegations); list_add(&dp->dl_perfile, &fp->fi_delegations);
list_add(&dp->dl_perclnt, &dp->dl_stid.sc_client->cl_delegations); list_add(&dp->dl_perclnt, &clp->cl_delegations);
return 0;
} }
static bool static bool
...@@ -2256,15 +2321,20 @@ nfsd4_set_ex_flags(struct nfs4_client *new, struct nfsd4_exchange_id *clid) ...@@ -2256,15 +2321,20 @@ nfsd4_set_ex_flags(struct nfs4_client *new, struct nfsd4_exchange_id *clid)
clid->flags = new->cl_exchange_flags; clid->flags = new->cl_exchange_flags;
} }
static bool client_has_openowners(struct nfs4_client *clp)
{
struct nfs4_openowner *oo;
list_for_each_entry(oo, &clp->cl_openowners, oo_perclient) {
if (!list_empty(&oo->oo_owner.so_stateids))
return true;
}
return false;
}
static bool client_has_state(struct nfs4_client *clp) static bool client_has_state(struct nfs4_client *clp)
{ {
/* return client_has_openowners(clp)
* Note clp->cl_openowners check isn't quite right: there's no
* need to count owners without stateid's.
*
* Also note we should probably be using this in 4.0 case too.
*/
return !list_empty(&clp->cl_openowners)
#ifdef CONFIG_NFSD_PNFS #ifdef CONFIG_NFSD_PNFS
|| !list_empty(&clp->cl_lo_states) || !list_empty(&clp->cl_lo_states)
#endif #endif
...@@ -3049,7 +3119,7 @@ nfsd4_setclientid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, ...@@ -3049,7 +3119,7 @@ nfsd4_setclientid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
/* Cases below refer to rfc 3530 section 14.2.33: */ /* Cases below refer to rfc 3530 section 14.2.33: */
spin_lock(&nn->client_lock); spin_lock(&nn->client_lock);
conf = find_confirmed_client_by_name(&clname, nn); conf = find_confirmed_client_by_name(&clname, nn);
if (conf) { if (conf && client_has_state(conf)) {
/* case 0: */ /* case 0: */
status = nfserr_clid_inuse; status = nfserr_clid_inuse;
if (clp_used_exchangeid(conf)) if (clp_used_exchangeid(conf))
...@@ -3136,6 +3206,11 @@ nfsd4_setclientid_confirm(struct svc_rqst *rqstp, ...@@ -3136,6 +3206,11 @@ nfsd4_setclientid_confirm(struct svc_rqst *rqstp,
} else { /* case 3: normal case; new or rebooted client */ } else { /* case 3: normal case; new or rebooted client */
old = find_confirmed_client_by_name(&unconf->cl_name, nn); old = find_confirmed_client_by_name(&unconf->cl_name, nn);
if (old) { if (old) {
status = nfserr_clid_inuse;
if (client_has_state(old)
&& !same_creds(&unconf->cl_cred,
&old->cl_cred))
goto out;
status = mark_client_expired_locked(old); status = mark_client_expired_locked(old);
if (status) { if (status) {
old = NULL; old = NULL;
...@@ -3317,6 +3392,27 @@ static const struct nfs4_stateowner_operations openowner_ops = { ...@@ -3317,6 +3392,27 @@ static const struct nfs4_stateowner_operations openowner_ops = {
.so_free = nfs4_free_openowner, .so_free = nfs4_free_openowner,
}; };
static struct nfs4_ol_stateid *
nfsd4_find_existing_open(struct nfs4_file *fp, struct nfsd4_open *open)
{
struct nfs4_ol_stateid *local, *ret = NULL;
struct nfs4_openowner *oo = open->op_openowner;
lockdep_assert_held(&fp->fi_lock);
list_for_each_entry(local, &fp->fi_stateids, st_perfile) {
/* ignore lock owners */
if (local->st_stateowner->so_is_open_owner == 0)
continue;
if (local->st_stateowner == &oo->oo_owner) {
ret = local;
atomic_inc(&ret->st_stid.sc_count);
break;
}
}
return ret;
}
static struct nfs4_openowner * static struct nfs4_openowner *
alloc_init_open_stateowner(unsigned int strhashval, struct nfsd4_open *open, alloc_init_open_stateowner(unsigned int strhashval, struct nfsd4_open *open,
struct nfsd4_compound_state *cstate) struct nfsd4_compound_state *cstate)
...@@ -3348,9 +3444,20 @@ alloc_init_open_stateowner(unsigned int strhashval, struct nfsd4_open *open, ...@@ -3348,9 +3444,20 @@ alloc_init_open_stateowner(unsigned int strhashval, struct nfsd4_open *open,
return ret; return ret;
} }
static void init_open_stateid(struct nfs4_ol_stateid *stp, struct nfs4_file *fp, struct nfsd4_open *open) { static struct nfs4_ol_stateid *
init_open_stateid(struct nfs4_ol_stateid *stp, struct nfs4_file *fp,
struct nfsd4_open *open)
{
struct nfs4_openowner *oo = open->op_openowner; struct nfs4_openowner *oo = open->op_openowner;
struct nfs4_ol_stateid *retstp = NULL;
spin_lock(&oo->oo_owner.so_client->cl_lock);
spin_lock(&fp->fi_lock);
retstp = nfsd4_find_existing_open(fp, open);
if (retstp)
goto out_unlock;
atomic_inc(&stp->st_stid.sc_count); atomic_inc(&stp->st_stid.sc_count);
stp->st_stid.sc_type = NFS4_OPEN_STID; stp->st_stid.sc_type = NFS4_OPEN_STID;
INIT_LIST_HEAD(&stp->st_locks); INIT_LIST_HEAD(&stp->st_locks);
...@@ -3360,12 +3467,14 @@ static void init_open_stateid(struct nfs4_ol_stateid *stp, struct nfs4_file *fp, ...@@ -3360,12 +3467,14 @@ static void init_open_stateid(struct nfs4_ol_stateid *stp, struct nfs4_file *fp,
stp->st_access_bmap = 0; stp->st_access_bmap = 0;
stp->st_deny_bmap = 0; stp->st_deny_bmap = 0;
stp->st_openstp = NULL; stp->st_openstp = NULL;
spin_lock(&oo->oo_owner.so_client->cl_lock); init_rwsem(&stp->st_rwsem);
list_add(&stp->st_perstateowner, &oo->oo_owner.so_stateids); list_add(&stp->st_perstateowner, &oo->oo_owner.so_stateids);
spin_lock(&fp->fi_lock);
list_add(&stp->st_perfile, &fp->fi_stateids); list_add(&stp->st_perfile, &fp->fi_stateids);
out_unlock:
spin_unlock(&fp->fi_lock); spin_unlock(&fp->fi_lock);
spin_unlock(&oo->oo_owner.so_client->cl_lock); spin_unlock(&oo->oo_owner.so_client->cl_lock);
return retstp;
} }
/* /*
...@@ -3776,27 +3885,6 @@ nfs4_check_deleg(struct nfs4_client *cl, struct nfsd4_open *open, ...@@ -3776,27 +3885,6 @@ nfs4_check_deleg(struct nfs4_client *cl, struct nfsd4_open *open,
return nfs_ok; return nfs_ok;
} }
static struct nfs4_ol_stateid *
nfsd4_find_existing_open(struct nfs4_file *fp, struct nfsd4_open *open)
{
struct nfs4_ol_stateid *local, *ret = NULL;
struct nfs4_openowner *oo = open->op_openowner;
spin_lock(&fp->fi_lock);
list_for_each_entry(local, &fp->fi_stateids, st_perfile) {
/* ignore lock owners */
if (local->st_stateowner->so_is_open_owner == 0)
continue;
if (local->st_stateowner == &oo->oo_owner) {
ret = local;
atomic_inc(&ret->st_stid.sc_count);
break;
}
}
spin_unlock(&fp->fi_lock);
return ret;
}
static inline int nfs4_access_to_access(u32 nfs4_access) static inline int nfs4_access_to_access(u32 nfs4_access)
{ {
int flags = 0; int flags = 0;
...@@ -3945,6 +4033,18 @@ static struct file_lock *nfs4_alloc_init_lease(struct nfs4_file *fp, int flag) ...@@ -3945,6 +4033,18 @@ static struct file_lock *nfs4_alloc_init_lease(struct nfs4_file *fp, int flag)
return fl; return fl;
} }
/**
* nfs4_setlease - Obtain a delegation by requesting lease from vfs layer
* @dp: a pointer to the nfs4_delegation we're adding.
*
* Return:
* On success: Return code will be 0 on success.
*
* On error: -EAGAIN if there was an existing delegation.
* nonzero if there is an error in other cases.
*
*/
static int nfs4_setlease(struct nfs4_delegation *dp) static int nfs4_setlease(struct nfs4_delegation *dp)
{ {
struct nfs4_file *fp = dp->dl_stid.sc_file; struct nfs4_file *fp = dp->dl_stid.sc_file;
...@@ -3976,16 +4076,19 @@ static int nfs4_setlease(struct nfs4_delegation *dp) ...@@ -3976,16 +4076,19 @@ static int nfs4_setlease(struct nfs4_delegation *dp)
goto out_unlock; goto out_unlock;
/* Race breaker */ /* Race breaker */
if (fp->fi_deleg_file) { if (fp->fi_deleg_file) {
status = 0; status = hash_delegation_locked(dp, fp);
++fp->fi_delegees;
hash_delegation_locked(dp, fp);
goto out_unlock; goto out_unlock;
} }
fp->fi_deleg_file = filp; fp->fi_deleg_file = filp;
fp->fi_delegees = 1; fp->fi_delegees = 0;
hash_delegation_locked(dp, fp); status = hash_delegation_locked(dp, fp);
spin_unlock(&fp->fi_lock); spin_unlock(&fp->fi_lock);
spin_unlock(&state_lock); spin_unlock(&state_lock);
if (status) {
/* Should never happen, this is a new fi_deleg_file */
WARN_ON_ONCE(1);
goto out_fput;
}
return 0; return 0;
out_unlock: out_unlock:
spin_unlock(&fp->fi_lock); spin_unlock(&fp->fi_lock);
...@@ -4005,6 +4108,15 @@ nfs4_set_delegation(struct nfs4_client *clp, struct svc_fh *fh, ...@@ -4005,6 +4108,15 @@ nfs4_set_delegation(struct nfs4_client *clp, struct svc_fh *fh,
if (fp->fi_had_conflict) if (fp->fi_had_conflict)
return ERR_PTR(-EAGAIN); return ERR_PTR(-EAGAIN);
spin_lock(&state_lock);
spin_lock(&fp->fi_lock);
status = nfs4_get_existing_delegation(clp, fp);
spin_unlock(&fp->fi_lock);
spin_unlock(&state_lock);
if (status)
return ERR_PTR(status);
dp = alloc_init_deleg(clp, fh, odstate); dp = alloc_init_deleg(clp, fh, odstate);
if (!dp) if (!dp)
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
...@@ -4023,9 +4135,7 @@ nfs4_set_delegation(struct nfs4_client *clp, struct svc_fh *fh, ...@@ -4023,9 +4135,7 @@ nfs4_set_delegation(struct nfs4_client *clp, struct svc_fh *fh,
status = -EAGAIN; status = -EAGAIN;
goto out_unlock; goto out_unlock;
} }
++fp->fi_delegees; status = hash_delegation_locked(dp, fp);
hash_delegation_locked(dp, fp);
status = 0;
out_unlock: out_unlock:
spin_unlock(&fp->fi_lock); spin_unlock(&fp->fi_lock);
spin_unlock(&state_lock); spin_unlock(&state_lock);
...@@ -4160,6 +4270,7 @@ nfsd4_process_open2(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nf ...@@ -4160,6 +4270,7 @@ nfsd4_process_open2(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nf
struct nfs4_client *cl = open->op_openowner->oo_owner.so_client; struct nfs4_client *cl = open->op_openowner->oo_owner.so_client;
struct nfs4_file *fp = NULL; struct nfs4_file *fp = NULL;
struct nfs4_ol_stateid *stp = NULL; struct nfs4_ol_stateid *stp = NULL;
struct nfs4_ol_stateid *swapstp = NULL;
struct nfs4_delegation *dp = NULL; struct nfs4_delegation *dp = NULL;
__be32 status; __be32 status;
...@@ -4173,7 +4284,9 @@ nfsd4_process_open2(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nf ...@@ -4173,7 +4284,9 @@ nfsd4_process_open2(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nf
status = nfs4_check_deleg(cl, open, &dp); status = nfs4_check_deleg(cl, open, &dp);
if (status) if (status)
goto out; goto out;
spin_lock(&fp->fi_lock);
stp = nfsd4_find_existing_open(fp, open); stp = nfsd4_find_existing_open(fp, open);
spin_unlock(&fp->fi_lock);
} else { } else {
open->op_file = NULL; open->op_file = NULL;
status = nfserr_bad_stateid; status = nfserr_bad_stateid;
...@@ -4187,15 +4300,32 @@ nfsd4_process_open2(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nf ...@@ -4187,15 +4300,32 @@ nfsd4_process_open2(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nf
*/ */
if (stp) { if (stp) {
/* Stateid was found, this is an OPEN upgrade */ /* Stateid was found, this is an OPEN upgrade */
down_read(&stp->st_rwsem);
status = nfs4_upgrade_open(rqstp, fp, current_fh, stp, open); status = nfs4_upgrade_open(rqstp, fp, current_fh, stp, open);
if (status) if (status) {
up_read(&stp->st_rwsem);
goto out; goto out;
}
} else { } else {
stp = open->op_stp; stp = open->op_stp;
open->op_stp = NULL; open->op_stp = NULL;
init_open_stateid(stp, fp, open); swapstp = init_open_stateid(stp, fp, open);
if (swapstp) {
nfs4_put_stid(&stp->st_stid);
stp = swapstp;
down_read(&stp->st_rwsem);
status = nfs4_upgrade_open(rqstp, fp, current_fh,
stp, open);
if (status) {
up_read(&stp->st_rwsem);
goto out;
}
goto upgrade_out;
}
down_read(&stp->st_rwsem);
status = nfs4_get_vfs_file(rqstp, fp, current_fh, stp, open); status = nfs4_get_vfs_file(rqstp, fp, current_fh, stp, open);
if (status) { if (status) {
up_read(&stp->st_rwsem);
release_open_stateid(stp); release_open_stateid(stp);
goto out; goto out;
} }
...@@ -4205,8 +4335,9 @@ nfsd4_process_open2(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nf ...@@ -4205,8 +4335,9 @@ nfsd4_process_open2(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nf
if (stp->st_clnt_odstate == open->op_odstate) if (stp->st_clnt_odstate == open->op_odstate)
open->op_odstate = NULL; open->op_odstate = NULL;
} }
update_stateid(&stp->st_stid.sc_stateid); upgrade_out:
memcpy(&open->op_stateid, &stp->st_stid.sc_stateid, sizeof(stateid_t)); nfs4_inc_and_copy_stateid(&open->op_stateid, &stp->st_stid);
up_read(&stp->st_rwsem);
if (nfsd4_has_session(&resp->cstate)) { if (nfsd4_has_session(&resp->cstate)) {
if (open->op_deleg_want & NFS4_SHARE_WANT_NO_DELEG) { if (open->op_deleg_want & NFS4_SHARE_WANT_NO_DELEG) {
...@@ -4819,10 +4950,13 @@ static __be32 nfs4_seqid_op_checks(struct nfsd4_compound_state *cstate, stateid_ ...@@ -4819,10 +4950,13 @@ static __be32 nfs4_seqid_op_checks(struct nfsd4_compound_state *cstate, stateid_
* revoked delegations are kept only for free_stateid. * revoked delegations are kept only for free_stateid.
*/ */
return nfserr_bad_stateid; return nfserr_bad_stateid;
down_write(&stp->st_rwsem);
status = check_stateid_generation(stateid, &stp->st_stid.sc_stateid, nfsd4_has_session(cstate)); status = check_stateid_generation(stateid, &stp->st_stid.sc_stateid, nfsd4_has_session(cstate));
if (status) if (status == nfs_ok)
return status; status = nfs4_check_fh(current_fh, &stp->st_stid);
return nfs4_check_fh(current_fh, &stp->st_stid); if (status != nfs_ok)
up_write(&stp->st_rwsem);
return status;
} }
/* /*
...@@ -4869,6 +5003,7 @@ static __be32 nfs4_preprocess_confirmed_seqid_op(struct nfsd4_compound_state *cs ...@@ -4869,6 +5003,7 @@ static __be32 nfs4_preprocess_confirmed_seqid_op(struct nfsd4_compound_state *cs
return status; return status;
oo = openowner(stp->st_stateowner); oo = openowner(stp->st_stateowner);
if (!(oo->oo_flags & NFS4_OO_CONFIRMED)) { if (!(oo->oo_flags & NFS4_OO_CONFIRMED)) {
up_write(&stp->st_rwsem);
nfs4_put_stid(&stp->st_stid); nfs4_put_stid(&stp->st_stid);
return nfserr_bad_stateid; return nfserr_bad_stateid;
} }
...@@ -4899,11 +5034,13 @@ nfsd4_open_confirm(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, ...@@ -4899,11 +5034,13 @@ nfsd4_open_confirm(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
goto out; goto out;
oo = openowner(stp->st_stateowner); oo = openowner(stp->st_stateowner);
status = nfserr_bad_stateid; status = nfserr_bad_stateid;
if (oo->oo_flags & NFS4_OO_CONFIRMED) if (oo->oo_flags & NFS4_OO_CONFIRMED) {
up_write(&stp->st_rwsem);
goto put_stateid; goto put_stateid;
}
oo->oo_flags |= NFS4_OO_CONFIRMED; oo->oo_flags |= NFS4_OO_CONFIRMED;
update_stateid(&stp->st_stid.sc_stateid); nfs4_inc_and_copy_stateid(&oc->oc_resp_stateid, &stp->st_stid);
memcpy(&oc->oc_resp_stateid, &stp->st_stid.sc_stateid, sizeof(stateid_t)); up_write(&stp->st_rwsem);
dprintk("NFSD: %s: success, seqid=%d stateid=" STATEID_FMT "\n", dprintk("NFSD: %s: success, seqid=%d stateid=" STATEID_FMT "\n",
__func__, oc->oc_seqid, STATEID_VAL(&stp->st_stid.sc_stateid)); __func__, oc->oc_seqid, STATEID_VAL(&stp->st_stid.sc_stateid));
...@@ -4975,13 +5112,11 @@ nfsd4_open_downgrade(struct svc_rqst *rqstp, ...@@ -4975,13 +5112,11 @@ nfsd4_open_downgrade(struct svc_rqst *rqstp,
goto put_stateid; goto put_stateid;
} }
nfs4_stateid_downgrade(stp, od->od_share_access); nfs4_stateid_downgrade(stp, od->od_share_access);
reset_union_bmap_deny(od->od_share_deny, stp); reset_union_bmap_deny(od->od_share_deny, stp);
nfs4_inc_and_copy_stateid(&od->od_stateid, &stp->st_stid);
update_stateid(&stp->st_stid.sc_stateid);
memcpy(&od->od_stateid, &stp->st_stid.sc_stateid, sizeof(stateid_t));
status = nfs_ok; status = nfs_ok;
put_stateid: put_stateid:
up_write(&stp->st_rwsem);
nfs4_put_stid(&stp->st_stid); nfs4_put_stid(&stp->st_stid);
out: out:
nfsd4_bump_seqid(cstate, status); nfsd4_bump_seqid(cstate, status);
...@@ -5033,8 +5168,8 @@ nfsd4_close(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, ...@@ -5033,8 +5168,8 @@ nfsd4_close(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
nfsd4_bump_seqid(cstate, status); nfsd4_bump_seqid(cstate, status);
if (status) if (status)
goto out; goto out;
update_stateid(&stp->st_stid.sc_stateid); nfs4_inc_and_copy_stateid(&close->cl_stateid, &stp->st_stid);
memcpy(&close->cl_stateid, &stp->st_stid.sc_stateid, sizeof(stateid_t)); up_write(&stp->st_rwsem);
nfsd4_close_open_stateid(stp); nfsd4_close_open_stateid(stp);
...@@ -5260,6 +5395,7 @@ init_lock_stateid(struct nfs4_ol_stateid *stp, struct nfs4_lockowner *lo, ...@@ -5260,6 +5395,7 @@ init_lock_stateid(struct nfs4_ol_stateid *stp, struct nfs4_lockowner *lo,
stp->st_access_bmap = 0; stp->st_access_bmap = 0;
stp->st_deny_bmap = open_stp->st_deny_bmap; stp->st_deny_bmap = open_stp->st_deny_bmap;
stp->st_openstp = open_stp; stp->st_openstp = open_stp;
init_rwsem(&stp->st_rwsem);
list_add(&stp->st_locks, &open_stp->st_locks); list_add(&stp->st_locks, &open_stp->st_locks);
list_add(&stp->st_perstateowner, &lo->lo_owner.so_stateids); list_add(&stp->st_perstateowner, &lo->lo_owner.so_stateids);
spin_lock(&fp->fi_lock); spin_lock(&fp->fi_lock);
...@@ -5428,6 +5564,7 @@ nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, ...@@ -5428,6 +5564,7 @@ nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
&open_stp, nn); &open_stp, nn);
if (status) if (status)
goto out; goto out;
up_write(&open_stp->st_rwsem);
open_sop = openowner(open_stp->st_stateowner); open_sop = openowner(open_stp->st_stateowner);
status = nfserr_bad_stateid; status = nfserr_bad_stateid;
if (!same_clid(&open_sop->oo_owner.so_client->cl_clientid, if (!same_clid(&open_sop->oo_owner.so_client->cl_clientid,
...@@ -5435,6 +5572,8 @@ nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, ...@@ -5435,6 +5572,8 @@ nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
goto out; goto out;
status = lookup_or_create_lock_state(cstate, open_stp, lock, status = lookup_or_create_lock_state(cstate, open_stp, lock,
&lock_stp, &new); &lock_stp, &new);
if (status == nfs_ok)
down_write(&lock_stp->st_rwsem);
} else { } else {
status = nfs4_preprocess_seqid_op(cstate, status = nfs4_preprocess_seqid_op(cstate,
lock->lk_old_lock_seqid, lock->lk_old_lock_seqid,
...@@ -5512,9 +5651,7 @@ nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, ...@@ -5512,9 +5651,7 @@ nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
err = vfs_lock_file(filp, F_SETLK, file_lock, conflock); err = vfs_lock_file(filp, F_SETLK, file_lock, conflock);
switch (-err) { switch (-err) {
case 0: /* success! */ case 0: /* success! */
update_stateid(&lock_stp->st_stid.sc_stateid); nfs4_inc_and_copy_stateid(&lock->lk_resp_stateid, &lock_stp->st_stid);
memcpy(&lock->lk_resp_stateid, &lock_stp->st_stid.sc_stateid,
sizeof(stateid_t));
status = 0; status = 0;
break; break;
case (EAGAIN): /* conflock holds conflicting lock */ case (EAGAIN): /* conflock holds conflicting lock */
...@@ -5540,6 +5677,8 @@ nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, ...@@ -5540,6 +5677,8 @@ nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
seqid_mutating_err(ntohl(status))) seqid_mutating_err(ntohl(status)))
lock_sop->lo_owner.so_seqid++; lock_sop->lo_owner.so_seqid++;
up_write(&lock_stp->st_rwsem);
/* /*
* If this is a new, never-before-used stateid, and we are * If this is a new, never-before-used stateid, and we are
* returning an error, then just go ahead and release it. * returning an error, then just go ahead and release it.
...@@ -5704,11 +5843,11 @@ nfsd4_locku(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, ...@@ -5704,11 +5843,11 @@ nfsd4_locku(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
dprintk("NFSD: nfs4_locku: vfs_lock_file failed!\n"); dprintk("NFSD: nfs4_locku: vfs_lock_file failed!\n");
goto out_nfserr; goto out_nfserr;
} }
update_stateid(&stp->st_stid.sc_stateid); nfs4_inc_and_copy_stateid(&locku->lu_stateid, &stp->st_stid);
memcpy(&locku->lu_stateid, &stp->st_stid.sc_stateid, sizeof(stateid_t));
fput: fput:
fput(filp); fput(filp);
put_stateid: put_stateid:
up_write(&stp->st_rwsem);
nfs4_put_stid(&stp->st_stid); nfs4_put_stid(&stp->st_stid);
out: out:
nfsd4_bump_seqid(cstate, status); nfsd4_bump_seqid(cstate, status);
......
...@@ -63,7 +63,6 @@ static unsigned int longest_chain; ...@@ -63,7 +63,6 @@ static unsigned int longest_chain;
static unsigned int longest_chain_cachesize; static unsigned int longest_chain_cachesize;
static int nfsd_cache_append(struct svc_rqst *rqstp, struct kvec *vec); static int nfsd_cache_append(struct svc_rqst *rqstp, struct kvec *vec);
static void cache_cleaner_func(struct work_struct *unused);
static unsigned long nfsd_reply_cache_count(struct shrinker *shrink, static unsigned long nfsd_reply_cache_count(struct shrinker *shrink,
struct shrink_control *sc); struct shrink_control *sc);
static unsigned long nfsd_reply_cache_scan(struct shrinker *shrink, static unsigned long nfsd_reply_cache_scan(struct shrinker *shrink,
...@@ -75,13 +74,6 @@ static struct shrinker nfsd_reply_cache_shrinker = { ...@@ -75,13 +74,6 @@ static struct shrinker nfsd_reply_cache_shrinker = {
.seeks = 1, .seeks = 1,
}; };
/*
* locking for the reply cache:
* A cache entry is "single use" if c_state == RC_INPROG
* Otherwise, it when accessing _prev or _next, the lock must be held.
*/
static DECLARE_DELAYED_WORK(cache_cleaner, cache_cleaner_func);
/* /*
* Put a cap on the size of the DRC based on the amount of available * Put a cap on the size of the DRC based on the amount of available
* low memory in the machine. * low memory in the machine.
...@@ -203,7 +195,6 @@ void nfsd_reply_cache_shutdown(void) ...@@ -203,7 +195,6 @@ void nfsd_reply_cache_shutdown(void)
unsigned int i; unsigned int i;
unregister_shrinker(&nfsd_reply_cache_shrinker); unregister_shrinker(&nfsd_reply_cache_shrinker);
cancel_delayed_work_sync(&cache_cleaner);
for (i = 0; i < drc_hashsize; i++) { for (i = 0; i < drc_hashsize; i++) {
struct list_head *head = &drc_hashtbl[i].lru_head; struct list_head *head = &drc_hashtbl[i].lru_head;
...@@ -217,10 +208,8 @@ void nfsd_reply_cache_shutdown(void) ...@@ -217,10 +208,8 @@ void nfsd_reply_cache_shutdown(void)
drc_hashtbl = NULL; drc_hashtbl = NULL;
drc_hashsize = 0; drc_hashsize = 0;
if (drc_slab) { kmem_cache_destroy(drc_slab);
kmem_cache_destroy(drc_slab); drc_slab = NULL;
drc_slab = NULL;
}
} }
/* /*
...@@ -232,7 +221,6 @@ lru_put_end(struct nfsd_drc_bucket *b, struct svc_cacherep *rp) ...@@ -232,7 +221,6 @@ lru_put_end(struct nfsd_drc_bucket *b, struct svc_cacherep *rp)
{ {
rp->c_timestamp = jiffies; rp->c_timestamp = jiffies;
list_move_tail(&rp->c_lru, &b->lru_head); list_move_tail(&rp->c_lru, &b->lru_head);
schedule_delayed_work(&cache_cleaner, RC_EXPIRE);
} }
static long static long
...@@ -266,7 +254,6 @@ prune_cache_entries(void) ...@@ -266,7 +254,6 @@ prune_cache_entries(void)
{ {
unsigned int i; unsigned int i;
long freed = 0; long freed = 0;
bool cancel = true;
for (i = 0; i < drc_hashsize; i++) { for (i = 0; i < drc_hashsize; i++) {
struct nfsd_drc_bucket *b = &drc_hashtbl[i]; struct nfsd_drc_bucket *b = &drc_hashtbl[i];
...@@ -275,26 +262,11 @@ prune_cache_entries(void) ...@@ -275,26 +262,11 @@ prune_cache_entries(void)
continue; continue;
spin_lock(&b->cache_lock); spin_lock(&b->cache_lock);
freed += prune_bucket(b); freed += prune_bucket(b);
if (!list_empty(&b->lru_head))
cancel = false;
spin_unlock(&b->cache_lock); spin_unlock(&b->cache_lock);
} }
/*
* Conditionally rearm the job to run in RC_EXPIRE since we just
* ran the pruner.
*/
if (!cancel)
mod_delayed_work(system_wq, &cache_cleaner, RC_EXPIRE);
return freed; return freed;
} }
static void
cache_cleaner_func(struct work_struct *unused)
{
prune_cache_entries();
}
static unsigned long static unsigned long
nfsd_reply_cache_count(struct shrinker *shrink, struct shrink_control *sc) nfsd_reply_cache_count(struct shrinker *shrink, struct shrink_control *sc)
{ {
......
...@@ -631,10 +631,7 @@ fh_put(struct svc_fh *fhp) ...@@ -631,10 +631,7 @@ fh_put(struct svc_fh *fhp)
fh_unlock(fhp); fh_unlock(fhp);
fhp->fh_dentry = NULL; fhp->fh_dentry = NULL;
dput(dentry); dput(dentry);
#ifdef CONFIG_NFSD_V3 fh_clear_wcc(fhp);
fhp->fh_pre_saved = 0;
fhp->fh_post_saved = 0;
#endif
} }
fh_drop_write(fhp); fh_drop_write(fhp);
if (exp) { if (exp) {
......
...@@ -26,16 +26,16 @@ static inline ino_t u32_to_ino_t(__u32 uino) ...@@ -26,16 +26,16 @@ static inline ino_t u32_to_ino_t(__u32 uino)
*/ */
typedef struct svc_fh { typedef struct svc_fh {
struct knfsd_fh fh_handle; /* FH data */ struct knfsd_fh fh_handle; /* FH data */
int fh_maxsize; /* max size for fh_handle */
struct dentry * fh_dentry; /* validated dentry */ struct dentry * fh_dentry; /* validated dentry */
struct svc_export * fh_export; /* export pointer */ struct svc_export * fh_export; /* export pointer */
int fh_maxsize; /* max size for fh_handle */
unsigned char fh_locked; /* inode locked by us */ bool fh_locked; /* inode locked by us */
unsigned char fh_want_write; /* remount protection taken */ bool fh_want_write; /* remount protection taken */
#ifdef CONFIG_NFSD_V3 #ifdef CONFIG_NFSD_V3
unsigned char fh_post_saved; /* post-op attrs saved */ bool fh_post_saved; /* post-op attrs saved */
unsigned char fh_pre_saved; /* pre-op attrs saved */ bool fh_pre_saved; /* pre-op attrs saved */
/* Pre-op attributes saved during fh_lock */ /* Pre-op attributes saved during fh_lock */
__u64 fh_pre_size; /* size before operation */ __u64 fh_pre_size; /* size before operation */
...@@ -213,8 +213,8 @@ static inline bool fh_fsid_match(struct knfsd_fh *fh1, struct knfsd_fh *fh2) ...@@ -213,8 +213,8 @@ static inline bool fh_fsid_match(struct knfsd_fh *fh1, struct knfsd_fh *fh2)
static inline void static inline void
fh_clear_wcc(struct svc_fh *fhp) fh_clear_wcc(struct svc_fh *fhp)
{ {
fhp->fh_post_saved = 0; fhp->fh_post_saved = false;
fhp->fh_pre_saved = 0; fhp->fh_pre_saved = false;
} }
/* /*
...@@ -231,7 +231,7 @@ fill_pre_wcc(struct svc_fh *fhp) ...@@ -231,7 +231,7 @@ fill_pre_wcc(struct svc_fh *fhp)
fhp->fh_pre_ctime = inode->i_ctime; fhp->fh_pre_ctime = inode->i_ctime;
fhp->fh_pre_size = inode->i_size; fhp->fh_pre_size = inode->i_size;
fhp->fh_pre_change = inode->i_version; fhp->fh_pre_change = inode->i_version;
fhp->fh_pre_saved = 1; fhp->fh_pre_saved = true;
} }
} }
...@@ -267,7 +267,7 @@ fh_lock_nested(struct svc_fh *fhp, unsigned int subclass) ...@@ -267,7 +267,7 @@ fh_lock_nested(struct svc_fh *fhp, unsigned int subclass)
inode = d_inode(dentry); inode = d_inode(dentry);
mutex_lock_nested(&inode->i_mutex, subclass); mutex_lock_nested(&inode->i_mutex, subclass);
fill_pre_wcc(fhp); fill_pre_wcc(fhp);
fhp->fh_locked = 1; fhp->fh_locked = true;
} }
static inline void static inline void
...@@ -285,7 +285,7 @@ fh_unlock(struct svc_fh *fhp) ...@@ -285,7 +285,7 @@ fh_unlock(struct svc_fh *fhp)
if (fhp->fh_locked) { if (fhp->fh_locked) {
fill_post_wcc(fhp); fill_post_wcc(fhp);
mutex_unlock(&d_inode(fhp->fh_dentry)->i_mutex); mutex_unlock(&d_inode(fhp->fh_dentry)->i_mutex);
fhp->fh_locked = 0; fhp->fh_locked = false;
} }
} }
......
...@@ -84,7 +84,7 @@ struct nfsd4_callback_ops { ...@@ -84,7 +84,7 @@ struct nfsd4_callback_ops {
* fields that are of general use to any stateid. * fields that are of general use to any stateid.
*/ */
struct nfs4_stid { struct nfs4_stid {
atomic_t sc_count; atomic_t sc_count;
#define NFS4_OPEN_STID 1 #define NFS4_OPEN_STID 1
#define NFS4_LOCK_STID 2 #define NFS4_LOCK_STID 2
#define NFS4_DELEG_STID 4 #define NFS4_DELEG_STID 4
...@@ -94,11 +94,12 @@ struct nfs4_stid { ...@@ -94,11 +94,12 @@ struct nfs4_stid {
#define NFS4_REVOKED_DELEG_STID 16 #define NFS4_REVOKED_DELEG_STID 16
#define NFS4_CLOSED_DELEG_STID 32 #define NFS4_CLOSED_DELEG_STID 32
#define NFS4_LAYOUT_STID 64 #define NFS4_LAYOUT_STID 64
unsigned char sc_type; unsigned char sc_type;
stateid_t sc_stateid; stateid_t sc_stateid;
struct nfs4_client *sc_client; spinlock_t sc_lock;
struct nfs4_file *sc_file; struct nfs4_client *sc_client;
void (*sc_free)(struct nfs4_stid *); struct nfs4_file *sc_file;
void (*sc_free)(struct nfs4_stid *);
}; };
/* /*
...@@ -364,15 +365,6 @@ struct nfs4_client_reclaim { ...@@ -364,15 +365,6 @@ struct nfs4_client_reclaim {
char cr_recdir[HEXDIR_LEN]; /* recover dir */ char cr_recdir[HEXDIR_LEN]; /* recover dir */
}; };
static inline void
update_stateid(stateid_t *stateid)
{
stateid->si_generation++;
/* Wraparound recommendation from 3530bis-13 9.1.3.2: */
if (stateid->si_generation == 0)
stateid->si_generation = 1;
}
/* A reasonable value for REPLAY_ISIZE was estimated as follows: /* A reasonable value for REPLAY_ISIZE was estimated as follows:
* The OPEN response, typically the largest, requires * The OPEN response, typically the largest, requires
* 4(status) + 8(stateid) + 20(changeinfo) + 4(rflags) + 8(verifier) + * 4(status) + 8(stateid) + 20(changeinfo) + 4(rflags) + 8(verifier) +
...@@ -534,15 +526,16 @@ struct nfs4_file { ...@@ -534,15 +526,16 @@ struct nfs4_file {
* Better suggestions welcome. * Better suggestions welcome.
*/ */
struct nfs4_ol_stateid { struct nfs4_ol_stateid {
struct nfs4_stid st_stid; /* must be first field */ struct nfs4_stid st_stid;
struct list_head st_perfile; struct list_head st_perfile;
struct list_head st_perstateowner; struct list_head st_perstateowner;
struct list_head st_locks; struct list_head st_locks;
struct nfs4_stateowner * st_stateowner; struct nfs4_stateowner *st_stateowner;
struct nfs4_clnt_odstate * st_clnt_odstate; struct nfs4_clnt_odstate *st_clnt_odstate;
unsigned char st_access_bmap; unsigned char st_access_bmap;
unsigned char st_deny_bmap; unsigned char st_deny_bmap;
struct nfs4_ol_stateid * st_openstp; struct nfs4_ol_stateid *st_openstp;
struct rw_semaphore st_rwsem;
}; };
static inline struct nfs4_ol_stateid *openlockstateid(struct nfs4_stid *s) static inline struct nfs4_ol_stateid *openlockstateid(struct nfs4_stid *s)
...@@ -561,6 +554,7 @@ struct nfs4_layout_stateid { ...@@ -561,6 +554,7 @@ struct nfs4_layout_stateid {
struct nfsd4_callback ls_recall; struct nfsd4_callback ls_recall;
stateid_t ls_recall_sid; stateid_t ls_recall_sid;
bool ls_recalled; bool ls_recalled;
struct mutex ls_mutex;
}; };
static inline struct nfs4_layout_stateid *layoutstateid(struct nfs4_stid *s) static inline struct nfs4_layout_stateid *layoutstateid(struct nfs4_stid *s)
...@@ -593,6 +587,7 @@ struct nfs4_stid *nfs4_alloc_stid(struct nfs4_client *cl, ...@@ -593,6 +587,7 @@ struct nfs4_stid *nfs4_alloc_stid(struct nfs4_client *cl,
struct kmem_cache *slab); struct kmem_cache *slab);
void nfs4_unhash_stid(struct nfs4_stid *s); void nfs4_unhash_stid(struct nfs4_stid *s);
void nfs4_put_stid(struct nfs4_stid *s); void nfs4_put_stid(struct nfs4_stid *s);
void nfs4_inc_and_copy_stateid(stateid_t *dst, struct nfs4_stid *stid);
void nfs4_remove_reclaim_record(struct nfs4_client_reclaim *, struct nfsd_net *); void nfs4_remove_reclaim_record(struct nfs4_client_reclaim *, struct nfsd_net *);
extern void nfs4_release_reclaim(struct nfsd_net *); extern void nfs4_release_reclaim(struct nfsd_net *);
extern struct nfs4_client_reclaim *nfsd4_find_reclaim_client(const char *recdir, extern struct nfs4_client_reclaim *nfsd4_find_reclaim_client(const char *recdir,
......
#include "state.h"
#define CREATE_TRACE_POINTS #define CREATE_TRACE_POINTS
#include "trace.h" #include "trace.h"
...@@ -9,6 +9,8 @@ ...@@ -9,6 +9,8 @@
#include <linux/tracepoint.h> #include <linux/tracepoint.h>
#include "state.h"
DECLARE_EVENT_CLASS(nfsd_stateid_class, DECLARE_EVENT_CLASS(nfsd_stateid_class,
TP_PROTO(stateid_t *stp), TP_PROTO(stateid_t *stp),
TP_ARGS(stp), TP_ARGS(stp),
......
...@@ -1631,7 +1631,7 @@ nfsd_rename(struct svc_rqst *rqstp, struct svc_fh *ffhp, char *fname, int flen, ...@@ -1631,7 +1631,7 @@ nfsd_rename(struct svc_rqst *rqstp, struct svc_fh *ffhp, char *fname, int flen,
/* cannot use fh_lock as we need deadlock protective ordering /* cannot use fh_lock as we need deadlock protective ordering
* so do it by hand */ * so do it by hand */
trap = lock_rename(tdentry, fdentry); trap = lock_rename(tdentry, fdentry);
ffhp->fh_locked = tfhp->fh_locked = 1; ffhp->fh_locked = tfhp->fh_locked = true;
fill_pre_wcc(ffhp); fill_pre_wcc(ffhp);
fill_pre_wcc(tfhp); fill_pre_wcc(tfhp);
...@@ -1681,7 +1681,7 @@ nfsd_rename(struct svc_rqst *rqstp, struct svc_fh *ffhp, char *fname, int flen, ...@@ -1681,7 +1681,7 @@ nfsd_rename(struct svc_rqst *rqstp, struct svc_fh *ffhp, char *fname, int flen,
fill_post_wcc(ffhp); fill_post_wcc(ffhp);
fill_post_wcc(tfhp); fill_post_wcc(tfhp);
unlock_rename(tdentry, fdentry); unlock_rename(tdentry, fdentry);
ffhp->fh_locked = tfhp->fh_locked = 0; ffhp->fh_locked = tfhp->fh_locked = false;
fh_drop_write(ffhp); fh_drop_write(ffhp);
out: out:
......
...@@ -112,14 +112,14 @@ static inline int fh_want_write(struct svc_fh *fh) ...@@ -112,14 +112,14 @@ static inline int fh_want_write(struct svc_fh *fh)
int ret = mnt_want_write(fh->fh_export->ex_path.mnt); int ret = mnt_want_write(fh->fh_export->ex_path.mnt);
if (!ret) if (!ret)
fh->fh_want_write = 1; fh->fh_want_write = true;
return ret; return ret;
} }
static inline void fh_drop_write(struct svc_fh *fh) static inline void fh_drop_write(struct svc_fh *fh)
{ {
if (fh->fh_want_write) { if (fh->fh_want_write) {
fh->fh_want_write = 0; fh->fh_want_write = false;
mnt_drop_write(fh->fh_export->ex_path.mnt); mnt_drop_write(fh->fh_export->ex_path.mnt);
} }
} }
......
...@@ -632,7 +632,7 @@ static inline void ...@@ -632,7 +632,7 @@ static inline void
set_change_info(struct nfsd4_change_info *cinfo, struct svc_fh *fhp) set_change_info(struct nfsd4_change_info *cinfo, struct svc_fh *fhp)
{ {
BUG_ON(!fhp->fh_pre_saved); BUG_ON(!fhp->fh_pre_saved);
cinfo->atomic = fhp->fh_post_saved; cinfo->atomic = (u32)fhp->fh_post_saved;
cinfo->change_supported = IS_I_VERSION(d_inode(fhp->fh_dentry)); cinfo->change_supported = IS_I_VERSION(d_inode(fhp->fh_dentry));
cinfo->before_change = fhp->fh_pre_change; cinfo->before_change = fhp->fh_pre_change;
......
...@@ -68,6 +68,7 @@ struct nlm_host { ...@@ -68,6 +68,7 @@ struct nlm_host {
struct nsm_handle *h_nsmhandle; /* NSM status handle */ struct nsm_handle *h_nsmhandle; /* NSM status handle */
char *h_addrbuf; /* address eyecatcher */ char *h_addrbuf; /* address eyecatcher */
struct net *net; /* host net */ struct net *net; /* host net */
char nodename[UNX_MAXNODENAME + 1];
}; };
/* /*
...@@ -235,7 +236,8 @@ void nlm_rebind_host(struct nlm_host *); ...@@ -235,7 +236,8 @@ void nlm_rebind_host(struct nlm_host *);
struct nlm_host * nlm_get_host(struct nlm_host *); struct nlm_host * nlm_get_host(struct nlm_host *);
void nlm_shutdown_hosts(void); void nlm_shutdown_hosts(void);
void nlm_shutdown_hosts_net(struct net *net); void nlm_shutdown_hosts_net(struct net *net);
void nlm_host_rebooted(const struct nlm_reboot *); void nlm_host_rebooted(const struct net *net,
const struct nlm_reboot *);
/* /*
* Host monitoring * Host monitoring
...@@ -243,11 +245,13 @@ void nlm_host_rebooted(const struct nlm_reboot *); ...@@ -243,11 +245,13 @@ void nlm_host_rebooted(const struct nlm_reboot *);
int nsm_monitor(const struct nlm_host *host); int nsm_monitor(const struct nlm_host *host);
void nsm_unmonitor(const struct nlm_host *host); void nsm_unmonitor(const struct nlm_host *host);
struct nsm_handle *nsm_get_handle(const struct sockaddr *sap, struct nsm_handle *nsm_get_handle(const struct net *net,
const struct sockaddr *sap,
const size_t salen, const size_t salen,
const char *hostname, const char *hostname,
const size_t hostname_len); const size_t hostname_len);
struct nsm_handle *nsm_reboot_lookup(const struct nlm_reboot *info); struct nsm_handle *nsm_reboot_lookup(const struct net *net,
const struct nlm_reboot *info);
void nsm_release(struct nsm_handle *nsm); void nsm_release(struct nsm_handle *nsm);
/* /*
......
...@@ -48,8 +48,10 @@ ...@@ -48,8 +48,10 @@
struct cache_head { struct cache_head {
struct hlist_node cache_list; struct hlist_node cache_list;
time_t expiry_time; /* After time time, don't use the data */ time_t expiry_time; /* After time time, don't use the data */
time_t last_refresh; /* If CACHE_PENDING, this is when upcall time_t last_refresh; /* If CACHE_PENDING, this is when upcall was
* was sent, else this is when update was received * sent, else this is when update was
* received, though it is alway set to
* be *after* ->flush_time.
*/ */
struct kref ref; struct kref ref;
unsigned long flags; unsigned long flags;
...@@ -105,8 +107,12 @@ struct cache_detail { ...@@ -105,8 +107,12 @@ struct cache_detail {
/* fields below this comment are for internal use /* fields below this comment are for internal use
* and should not be touched by cache owners * and should not be touched by cache owners
*/ */
time_t flush_time; /* flush all cache items with last_refresh time_t flush_time; /* flush all cache items with
* earlier than this */ * last_refresh at or earlier
* than this. last_refresh
* is never set at or earlier
* than this.
*/
struct list_head others; struct list_head others;
time_t nextcheck; time_t nextcheck;
int entries; int entries;
...@@ -203,7 +209,7 @@ static inline void cache_put(struct cache_head *h, struct cache_detail *cd) ...@@ -203,7 +209,7 @@ static inline void cache_put(struct cache_head *h, struct cache_detail *cd)
static inline int cache_is_expired(struct cache_detail *detail, struct cache_head *h) static inline int cache_is_expired(struct cache_detail *detail, struct cache_head *h)
{ {
return (h->expiry_time < seconds_since_boot()) || return (h->expiry_time < seconds_since_boot()) ||
(detail->flush_time > h->last_refresh); (detail->flush_time >= h->last_refresh);
} }
extern int cache_check(struct cache_detail *detail, extern int cache_check(struct cache_detail *detail,
......
...@@ -1411,17 +1411,16 @@ gss_key_timeout(struct rpc_cred *rc) ...@@ -1411,17 +1411,16 @@ gss_key_timeout(struct rpc_cred *rc)
{ {
struct gss_cred *gss_cred = container_of(rc, struct gss_cred, gc_base); struct gss_cred *gss_cred = container_of(rc, struct gss_cred, gc_base);
struct gss_cl_ctx *ctx; struct gss_cl_ctx *ctx;
unsigned long now = jiffies; unsigned long timeout = jiffies + (gss_key_expire_timeo * HZ);
unsigned long expire; int ret = 0;
rcu_read_lock(); rcu_read_lock();
ctx = rcu_dereference(gss_cred->gc_ctx); ctx = rcu_dereference(gss_cred->gc_ctx);
if (ctx) if (!ctx || time_after(timeout, ctx->gc_expiry))
expire = ctx->gc_expiry - (gss_key_expire_timeo * HZ); ret = -EACCES;
rcu_read_unlock(); rcu_read_unlock();
if (!ctx || time_after(now, expire))
return -EACCES; return ret;
return 0;
} }
static int static int
......
...@@ -41,13 +41,16 @@ ...@@ -41,13 +41,16 @@
static bool cache_defer_req(struct cache_req *req, struct cache_head *item); static bool cache_defer_req(struct cache_req *req, struct cache_head *item);
static void cache_revisit_request(struct cache_head *item); static void cache_revisit_request(struct cache_head *item);
static void cache_init(struct cache_head *h) static void cache_init(struct cache_head *h, struct cache_detail *detail)
{ {
time_t now = seconds_since_boot(); time_t now = seconds_since_boot();
INIT_HLIST_NODE(&h->cache_list); INIT_HLIST_NODE(&h->cache_list);
h->flags = 0; h->flags = 0;
kref_init(&h->ref); kref_init(&h->ref);
h->expiry_time = now + CACHE_NEW_EXPIRY; h->expiry_time = now + CACHE_NEW_EXPIRY;
if (now <= detail->flush_time)
/* ensure it isn't already expired */
now = detail->flush_time + 1;
h->last_refresh = now; h->last_refresh = now;
} }
...@@ -81,7 +84,7 @@ struct cache_head *sunrpc_cache_lookup(struct cache_detail *detail, ...@@ -81,7 +84,7 @@ struct cache_head *sunrpc_cache_lookup(struct cache_detail *detail,
* we might get lose if we need to * we might get lose if we need to
* cache_put it soon. * cache_put it soon.
*/ */
cache_init(new); cache_init(new, detail);
detail->init(new, key); detail->init(new, key);
write_lock(&detail->hash_lock); write_lock(&detail->hash_lock);
...@@ -116,10 +119,15 @@ EXPORT_SYMBOL_GPL(sunrpc_cache_lookup); ...@@ -116,10 +119,15 @@ EXPORT_SYMBOL_GPL(sunrpc_cache_lookup);
static void cache_dequeue(struct cache_detail *detail, struct cache_head *ch); static void cache_dequeue(struct cache_detail *detail, struct cache_head *ch);
static void cache_fresh_locked(struct cache_head *head, time_t expiry) static void cache_fresh_locked(struct cache_head *head, time_t expiry,
struct cache_detail *detail)
{ {
time_t now = seconds_since_boot();
if (now <= detail->flush_time)
/* ensure it isn't immediately treated as expired */
now = detail->flush_time + 1;
head->expiry_time = expiry; head->expiry_time = expiry;
head->last_refresh = seconds_since_boot(); head->last_refresh = now;
smp_wmb(); /* paired with smp_rmb() in cache_is_valid() */ smp_wmb(); /* paired with smp_rmb() in cache_is_valid() */
set_bit(CACHE_VALID, &head->flags); set_bit(CACHE_VALID, &head->flags);
} }
...@@ -149,7 +157,7 @@ struct cache_head *sunrpc_cache_update(struct cache_detail *detail, ...@@ -149,7 +157,7 @@ struct cache_head *sunrpc_cache_update(struct cache_detail *detail,
set_bit(CACHE_NEGATIVE, &old->flags); set_bit(CACHE_NEGATIVE, &old->flags);
else else
detail->update(old, new); detail->update(old, new);
cache_fresh_locked(old, new->expiry_time); cache_fresh_locked(old, new->expiry_time, detail);
write_unlock(&detail->hash_lock); write_unlock(&detail->hash_lock);
cache_fresh_unlocked(old, detail); cache_fresh_unlocked(old, detail);
return old; return old;
...@@ -162,7 +170,7 @@ struct cache_head *sunrpc_cache_update(struct cache_detail *detail, ...@@ -162,7 +170,7 @@ struct cache_head *sunrpc_cache_update(struct cache_detail *detail,
cache_put(old, detail); cache_put(old, detail);
return NULL; return NULL;
} }
cache_init(tmp); cache_init(tmp, detail);
detail->init(tmp, old); detail->init(tmp, old);
write_lock(&detail->hash_lock); write_lock(&detail->hash_lock);
...@@ -173,8 +181,8 @@ struct cache_head *sunrpc_cache_update(struct cache_detail *detail, ...@@ -173,8 +181,8 @@ struct cache_head *sunrpc_cache_update(struct cache_detail *detail,
hlist_add_head(&tmp->cache_list, &detail->hash_table[hash]); hlist_add_head(&tmp->cache_list, &detail->hash_table[hash]);
detail->entries++; detail->entries++;
cache_get(tmp); cache_get(tmp);
cache_fresh_locked(tmp, new->expiry_time); cache_fresh_locked(tmp, new->expiry_time, detail);
cache_fresh_locked(old, 0); cache_fresh_locked(old, 0, detail);
write_unlock(&detail->hash_lock); write_unlock(&detail->hash_lock);
cache_fresh_unlocked(tmp, detail); cache_fresh_unlocked(tmp, detail);
cache_fresh_unlocked(old, detail); cache_fresh_unlocked(old, detail);
...@@ -219,7 +227,8 @@ static int try_to_negate_entry(struct cache_detail *detail, struct cache_head *h ...@@ -219,7 +227,8 @@ static int try_to_negate_entry(struct cache_detail *detail, struct cache_head *h
rv = cache_is_valid(h); rv = cache_is_valid(h);
if (rv == -EAGAIN) { if (rv == -EAGAIN) {
set_bit(CACHE_NEGATIVE, &h->flags); set_bit(CACHE_NEGATIVE, &h->flags);
cache_fresh_locked(h, seconds_since_boot()+CACHE_NEW_EXPIRY); cache_fresh_locked(h, seconds_since_boot()+CACHE_NEW_EXPIRY,
detail);
rv = -ENOENT; rv = -ENOENT;
} }
write_unlock(&detail->hash_lock); write_unlock(&detail->hash_lock);
...@@ -487,10 +496,13 @@ EXPORT_SYMBOL_GPL(cache_flush); ...@@ -487,10 +496,13 @@ EXPORT_SYMBOL_GPL(cache_flush);
void cache_purge(struct cache_detail *detail) void cache_purge(struct cache_detail *detail)
{ {
detail->flush_time = LONG_MAX; time_t now = seconds_since_boot();
if (detail->flush_time >= now)
now = detail->flush_time + 1;
/* 'now' is the maximum value any 'last_refresh' can have */
detail->flush_time = now;
detail->nextcheck = seconds_since_boot(); detail->nextcheck = seconds_since_boot();
cache_flush(); cache_flush();
detail->flush_time = 1;
} }
EXPORT_SYMBOL_GPL(cache_purge); EXPORT_SYMBOL_GPL(cache_purge);
...@@ -1436,6 +1448,7 @@ static ssize_t write_flush(struct file *file, const char __user *buf, ...@@ -1436,6 +1448,7 @@ static ssize_t write_flush(struct file *file, const char __user *buf,
{ {
char tbuf[20]; char tbuf[20];
char *bp, *ep; char *bp, *ep;
time_t then, now;
if (*ppos || count > sizeof(tbuf)-1) if (*ppos || count > sizeof(tbuf)-1)
return -EINVAL; return -EINVAL;
...@@ -1447,8 +1460,22 @@ static ssize_t write_flush(struct file *file, const char __user *buf, ...@@ -1447,8 +1460,22 @@ static ssize_t write_flush(struct file *file, const char __user *buf,
return -EINVAL; return -EINVAL;
bp = tbuf; bp = tbuf;
cd->flush_time = get_expiry(&bp); then = get_expiry(&bp);
cd->nextcheck = seconds_since_boot(); now = seconds_since_boot();
cd->nextcheck = now;
/* Can only set flush_time to 1 second beyond "now", or
* possibly 1 second beyond flushtime. This is because
* flush_time never goes backwards so it mustn't get too far
* ahead of time.
*/
if (then >= now) {
/* Want to flush everything, so behave like cache_purge() */
if (cd->flush_time >= now)
now = cd->flush_time + 1;
then = now;
}
cd->flush_time = then;
cache_flush(); cache_flush();
*ppos += count; *ppos += count;
......
...@@ -181,7 +181,7 @@ int svc_send_common(struct socket *sock, struct xdr_buf *xdr, ...@@ -181,7 +181,7 @@ int svc_send_common(struct socket *sock, struct xdr_buf *xdr,
struct page **ppage = xdr->pages; struct page **ppage = xdr->pages;
size_t base = xdr->page_base; size_t base = xdr->page_base;
unsigned int pglen = xdr->page_len; unsigned int pglen = xdr->page_len;
unsigned int flags = MSG_MORE; unsigned int flags = MSG_MORE | MSG_SENDPAGE_NOTLAST;
int slen; int slen;
int len = 0; int len = 0;
...@@ -399,6 +399,31 @@ static int svc_sock_secure_port(struct svc_rqst *rqstp) ...@@ -399,6 +399,31 @@ static int svc_sock_secure_port(struct svc_rqst *rqstp)
return svc_port_is_privileged(svc_addr(rqstp)); return svc_port_is_privileged(svc_addr(rqstp));
} }
static bool sunrpc_waitqueue_active(wait_queue_head_t *wq)
{
if (!wq)
return false;
/*
* There should normally be a memory * barrier here--see
* wq_has_sleeper().
*
* It appears that isn't currently necessary, though, basically
* because callers all appear to have sufficient memory barriers
* between the time the relevant change is made and the
* time they call these callbacks.
*
* The nfsd code itself doesn't actually explicitly wait on
* these waitqueues, but it may wait on them for example in
* sendpage() or sendmsg() calls. (And those may be the only
* places, since it it uses nonblocking reads.)
*
* Maybe we should add the memory barriers anyway, but these are
* hot paths so we'd need to be convinced there's no sigificant
* penalty.
*/
return waitqueue_active(wq);
}
/* /*
* INET callback when data has been received on the socket. * INET callback when data has been received on the socket.
*/ */
...@@ -414,7 +439,7 @@ static void svc_udp_data_ready(struct sock *sk) ...@@ -414,7 +439,7 @@ static void svc_udp_data_ready(struct sock *sk)
set_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags); set_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags);
svc_xprt_enqueue(&svsk->sk_xprt); svc_xprt_enqueue(&svsk->sk_xprt);
} }
if (wq && waitqueue_active(wq)) if (sunrpc_waitqueue_active(wq))
wake_up_interruptible(wq); wake_up_interruptible(wq);
} }
...@@ -432,7 +457,7 @@ static void svc_write_space(struct sock *sk) ...@@ -432,7 +457,7 @@ static void svc_write_space(struct sock *sk)
svc_xprt_enqueue(&svsk->sk_xprt); svc_xprt_enqueue(&svsk->sk_xprt);
} }
if (wq && waitqueue_active(wq)) { if (sunrpc_waitqueue_active(wq)) {
dprintk("RPC svc_write_space: someone sleeping on %p\n", dprintk("RPC svc_write_space: someone sleeping on %p\n",
svsk); svsk);
wake_up_interruptible(wq); wake_up_interruptible(wq);
...@@ -787,7 +812,7 @@ static void svc_tcp_listen_data_ready(struct sock *sk) ...@@ -787,7 +812,7 @@ static void svc_tcp_listen_data_ready(struct sock *sk)
} }
wq = sk_sleep(sk); wq = sk_sleep(sk);
if (wq && waitqueue_active(wq)) if (sunrpc_waitqueue_active(wq))
wake_up_interruptible_all(wq); wake_up_interruptible_all(wq);
} }
...@@ -808,7 +833,7 @@ static void svc_tcp_state_change(struct sock *sk) ...@@ -808,7 +833,7 @@ static void svc_tcp_state_change(struct sock *sk)
set_bit(XPT_CLOSE, &svsk->sk_xprt.xpt_flags); set_bit(XPT_CLOSE, &svsk->sk_xprt.xpt_flags);
svc_xprt_enqueue(&svsk->sk_xprt); svc_xprt_enqueue(&svsk->sk_xprt);
} }
if (wq && waitqueue_active(wq)) if (sunrpc_waitqueue_active(wq))
wake_up_interruptible_all(wq); wake_up_interruptible_all(wq);
} }
...@@ -823,7 +848,7 @@ static void svc_tcp_data_ready(struct sock *sk) ...@@ -823,7 +848,7 @@ static void svc_tcp_data_ready(struct sock *sk)
set_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags); set_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags);
svc_xprt_enqueue(&svsk->sk_xprt); svc_xprt_enqueue(&svsk->sk_xprt);
} }
if (wq && waitqueue_active(wq)) if (sunrpc_waitqueue_active(wq))
wake_up_interruptible(wq); wake_up_interruptible(wq);
} }
...@@ -1367,7 +1392,6 @@ EXPORT_SYMBOL_GPL(svc_sock_update_bufs); ...@@ -1367,7 +1392,6 @@ EXPORT_SYMBOL_GPL(svc_sock_update_bufs);
/* /*
* Initialize socket for RPC use and create svc_sock struct * Initialize socket for RPC use and create svc_sock struct
* XXX: May want to setsockopt SO_SNDBUF and SO_RCVBUF.
*/ */
static struct svc_sock *svc_setup_socket(struct svc_serv *serv, static struct svc_sock *svc_setup_socket(struct svc_serv *serv,
struct socket *sock, struct socket *sock,
...@@ -1594,7 +1618,7 @@ static void svc_sock_detach(struct svc_xprt *xprt) ...@@ -1594,7 +1618,7 @@ static void svc_sock_detach(struct svc_xprt *xprt)
sk->sk_write_space = svsk->sk_owspace; sk->sk_write_space = svsk->sk_owspace;
wq = sk_sleep(sk); wq = sk_sleep(sk);
if (wq && waitqueue_active(wq)) if (sunrpc_waitqueue_active(wq))
wake_up_interruptible(wq); wake_up_interruptible(wq);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment