Commit 89e0edfb authored by Benjamin Coddington's avatar Benjamin Coddington Committed by J. Bruce Fields

lockd: Convert NLM service fl_owner to nlm_lockowner

Do as the NLM client: allocate and track a struct nlm_lockowner for use as
the fl_owner for locks created by the NLM sever.  This allows us to keep
the svid within this structure for matching locks, and will allow us to
track the pid of lockd in a future patch.  It should also allow easier
reference of the nlm_host in conflicting locks, and simplify lock hashing
and comparison.
Signed-off-by: default avatarBenjamin Coddington <bcodding@redhat.com>
[bfields@redhat.com: fix type of some error returns]
Signed-off-by: default avatarJ. Bruce Fields <bfields@redhat.com>
parent 9de3ec1d
......@@ -46,8 +46,13 @@ nlm4svc_retrieve_args(struct svc_rqst *rqstp, struct nlm_args *argp,
/* Set up the missing parts of the file_lock structure */
lock->fl.fl_file = file->f_file;
lock->fl.fl_owner = (fl_owner_t) host;
lock->fl.fl_lmops = &nlmsvc_lock_operations;
nlmsvc_locks_init_private(&lock->fl, host, (pid_t)lock->svid);
if (!lock->fl.fl_owner) {
/* lockowner allocation has failed */
nlmsvc_release_host(host);
return nlm_lck_denied_nolocks;
}
}
return 0;
......@@ -94,6 +99,7 @@ __nlm4svc_proc_test(struct svc_rqst *rqstp, struct nlm_res *resp)
else
dprintk("lockd: TEST4 status %d\n", ntohl(resp->status));
nlmsvc_release_lockowner(&argp->lock);
nlmsvc_release_host(host);
nlm_release_file(file);
return rc;
......@@ -142,6 +148,7 @@ __nlm4svc_proc_lock(struct svc_rqst *rqstp, struct nlm_res *resp)
else
dprintk("lockd: LOCK status %d\n", ntohl(resp->status));
nlmsvc_release_lockowner(&argp->lock);
nlmsvc_release_host(host);
nlm_release_file(file);
return rc;
......@@ -178,6 +185,7 @@ __nlm4svc_proc_cancel(struct svc_rqst *rqstp, struct nlm_res *resp)
resp->status = nlmsvc_cancel_blocked(SVC_NET(rqstp), file, &argp->lock);
dprintk("lockd: CANCEL status %d\n", ntohl(resp->status));
nlmsvc_release_lockowner(&argp->lock);
nlmsvc_release_host(host);
nlm_release_file(file);
return rpc_success;
......@@ -217,6 +225,7 @@ __nlm4svc_proc_unlock(struct svc_rqst *rqstp, struct nlm_res *resp)
resp->status = nlmsvc_unlock(SVC_NET(rqstp), file, &argp->lock);
dprintk("lockd: UNLOCK status %d\n", ntohl(resp->status));
nlmsvc_release_lockowner(&argp->lock);
nlmsvc_release_host(host);
nlm_release_file(file);
return rpc_success;
......@@ -365,6 +374,7 @@ nlm4svc_proc_share(struct svc_rqst *rqstp)
resp->status = nlmsvc_share_file(host, file, argp);
dprintk("lockd: SHARE status %d\n", ntohl(resp->status));
nlmsvc_release_lockowner(&argp->lock);
nlmsvc_release_host(host);
nlm_release_file(file);
return rpc_success;
......@@ -399,6 +409,7 @@ nlm4svc_proc_unshare(struct svc_rqst *rqstp)
resp->status = nlmsvc_unshare_file(host, file, argp);
dprintk("lockd: UNSHARE status %d\n", ntohl(resp->status));
nlmsvc_release_lockowner(&argp->lock);
nlmsvc_release_host(host);
nlm_release_file(file);
return rpc_success;
......
......@@ -332,6 +332,93 @@ void nlmsvc_traverse_blocks(struct nlm_host *host,
mutex_unlock(&file->f_mutex);
}
static struct nlm_lockowner *
nlmsvc_get_lockowner(struct nlm_lockowner *lockowner)
{
refcount_inc(&lockowner->count);
return lockowner;
}
static void nlmsvc_put_lockowner(struct nlm_lockowner *lockowner)
{
if (!refcount_dec_and_lock(&lockowner->count, &lockowner->host->h_lock))
return;
list_del(&lockowner->list);
spin_unlock(&lockowner->host->h_lock);
nlmsvc_release_host(lockowner->host);
kfree(lockowner);
}
static struct nlm_lockowner *__nlmsvc_find_lockowner(struct nlm_host *host, pid_t pid)
{
struct nlm_lockowner *lockowner;
list_for_each_entry(lockowner, &host->h_lockowners, list) {
if (lockowner->pid != pid)
continue;
return nlmsvc_get_lockowner(lockowner);
}
return NULL;
}
static struct nlm_lockowner *nlmsvc_find_lockowner(struct nlm_host *host, pid_t pid)
{
struct nlm_lockowner *res, *new = NULL;
spin_lock(&host->h_lock);
res = __nlmsvc_find_lockowner(host, pid);
if (res == NULL) {
spin_unlock(&host->h_lock);
new = kmalloc(sizeof(*res), GFP_KERNEL);
spin_lock(&host->h_lock);
res = __nlmsvc_find_lockowner(host, pid);
if (res == NULL && new != NULL) {
res = new;
/* fs/locks.c will manage the refcount through lock_ops */
refcount_set(&new->count, 1);
new->pid = pid;
new->host = nlm_get_host(host);
list_add(&new->list, &host->h_lockowners);
new = NULL;
}
}
spin_unlock(&host->h_lock);
kfree(new);
return res;
}
void
nlmsvc_release_lockowner(struct nlm_lock *lock)
{
if (lock->fl.fl_owner)
nlmsvc_put_lockowner(lock->fl.fl_owner);
}
static void nlmsvc_locks_copy_lock(struct file_lock *new, struct file_lock *fl)
{
struct nlm_lockowner *nlm_lo = (struct nlm_lockowner *)fl->fl_owner;
new->fl_owner = nlmsvc_get_lockowner(nlm_lo);
}
static void nlmsvc_locks_release_private(struct file_lock *fl)
{
nlmsvc_put_lockowner((struct nlm_lockowner *)fl->fl_owner);
}
const struct file_lock_operations nlmsvc_lock_ops = {
.fl_copy_lock = nlmsvc_locks_copy_lock,
.fl_release_private = nlmsvc_locks_release_private,
};
void nlmsvc_locks_init_private(struct file_lock *fl, struct nlm_host *host,
pid_t pid)
{
fl->fl_owner = nlmsvc_find_lockowner(host, pid);
if (fl->fl_owner != NULL)
fl->fl_ops = &nlmsvc_lock_ops;
}
/*
* Initialize arguments for GRANTED call. The nlm_rqst structure
* has been cleared already.
......@@ -509,6 +596,7 @@ nlmsvc_testlock(struct svc_rqst *rqstp, struct nlm_file *file,
{
int error;
__be32 ret;
struct nlm_lockowner *test_owner;
dprintk("lockd: nlmsvc_testlock(%s/%ld, ty=%d, %Ld-%Ld)\n",
locks_inode(file->f_file)->i_sb->s_id,
......@@ -522,6 +610,9 @@ nlmsvc_testlock(struct svc_rqst *rqstp, struct nlm_file *file,
goto out;
}
/* If there's a conflicting lock, remember to clean up the test lock */
test_owner = (struct nlm_lockowner *)lock->fl.fl_owner;
error = vfs_test_lock(file->f_file, &lock->fl);
if (error) {
/* We can't currently deal with deferred test requests */
......@@ -548,6 +639,11 @@ nlmsvc_testlock(struct svc_rqst *rqstp, struct nlm_file *file,
conflock->fl.fl_start = lock->fl.fl_start;
conflock->fl.fl_end = lock->fl.fl_end;
locks_release_private(&lock->fl);
/* Clean up the test lock */
lock->fl.fl_owner = NULL;
nlmsvc_put_lockowner(test_owner);
ret = nlm_lck_denied;
out:
return ret;
......
......@@ -76,8 +76,13 @@ nlmsvc_retrieve_args(struct svc_rqst *rqstp, struct nlm_args *argp,
/* Set up the missing parts of the file_lock structure */
lock->fl.fl_file = file->f_file;
lock->fl.fl_owner = (fl_owner_t) host;
lock->fl.fl_lmops = &nlmsvc_lock_operations;
nlmsvc_locks_init_private(&lock->fl, host, (pid_t)lock->svid);
if (!lock->fl.fl_owner) {
/* lockowner allocation has failed */
nlmsvc_release_host(host);
return nlm_lck_denied_nolocks;
}
}
return 0;
......@@ -125,6 +130,7 @@ __nlmsvc_proc_test(struct svc_rqst *rqstp, struct nlm_res *resp)
dprintk("lockd: TEST status %d vers %d\n",
ntohl(resp->status), rqstp->rq_vers);
nlmsvc_release_lockowner(&argp->lock);
nlmsvc_release_host(host);
nlm_release_file(file);
return rc;
......@@ -173,6 +179,7 @@ __nlmsvc_proc_lock(struct svc_rqst *rqstp, struct nlm_res *resp)
else
dprintk("lockd: LOCK status %d\n", ntohl(resp->status));
nlmsvc_release_lockowner(&argp->lock);
nlmsvc_release_host(host);
nlm_release_file(file);
return rc;
......@@ -210,6 +217,7 @@ __nlmsvc_proc_cancel(struct svc_rqst *rqstp, struct nlm_res *resp)
resp->status = cast_status(nlmsvc_cancel_blocked(net, file, &argp->lock));
dprintk("lockd: CANCEL status %d\n", ntohl(resp->status));
nlmsvc_release_lockowner(&argp->lock);
nlmsvc_release_host(host);
nlm_release_file(file);
return rpc_success;
......@@ -250,6 +258,7 @@ __nlmsvc_proc_unlock(struct svc_rqst *rqstp, struct nlm_res *resp)
resp->status = cast_status(nlmsvc_unlock(net, file, &argp->lock));
dprintk("lockd: UNLOCK status %d\n", ntohl(resp->status));
nlmsvc_release_lockowner(&argp->lock);
nlmsvc_release_host(host);
nlm_release_file(file);
return rpc_success;
......@@ -408,6 +417,7 @@ nlmsvc_proc_share(struct svc_rqst *rqstp)
resp->status = cast_status(nlmsvc_share_file(host, file, argp));
dprintk("lockd: SHARE status %d\n", ntohl(resp->status));
nlmsvc_release_lockowner(&argp->lock);
nlmsvc_release_host(host);
nlm_release_file(file);
return rpc_success;
......@@ -442,6 +452,7 @@ nlmsvc_proc_unshare(struct svc_rqst *rqstp)
resp->status = cast_status(nlmsvc_unshare_file(host, file, argp));
dprintk("lockd: UNSHARE status %d\n", ntohl(resp->status));
nlmsvc_release_lockowner(&argp->lock);
nlmsvc_release_host(host);
nlm_release_file(file);
return rpc_success;
......
......@@ -180,7 +180,7 @@ nlm_traverse_locks(struct nlm_host *host, struct nlm_file *file,
/* update current lock count */
file->f_locks++;
lockhost = (struct nlm_host *) fl->fl_owner;
lockhost = ((struct nlm_lockowner *)fl->fl_owner)->host;
if (match(lockhost, host)) {
struct file_lock lock = *fl;
......
......@@ -126,7 +126,6 @@ nlm_decode_lock(__be32 *p, struct nlm_lock *lock)
lock->svid = ntohl(*p++);
locks_init_lock(fl);
fl->fl_owner = current->files;
fl->fl_pid = (pid_t)lock->svid;
fl->fl_flags = FL_POSIX;
fl->fl_type = F_RDLCK; /* as good as anything else */
......
......@@ -118,7 +118,6 @@ nlm4_decode_lock(__be32 *p, struct nlm_lock *lock)
lock->svid = ntohl(*p++);
locks_init_lock(fl);
fl->fl_owner = current->files;
fl->fl_pid = (pid_t)lock->svid;
fl->fl_flags = FL_POSIX;
fl->fl_type = F_RDLCK; /* as good as anything else */
......
......@@ -282,6 +282,7 @@ void nlmsvc_traverse_blocks(struct nlm_host *, struct nlm_file *,
nlm_host_match_fn_t match);
void nlmsvc_grant_reply(struct nlm_cookie *, __be32);
void nlmsvc_release_call(struct nlm_rqst *);
void nlmsvc_locks_init_private(struct file_lock *, struct nlm_host *, pid_t);
/*
* File handling for the server personality
......@@ -289,6 +290,7 @@ void nlmsvc_release_call(struct nlm_rqst *);
__be32 nlm_lookup_file(struct svc_rqst *, struct nlm_file **,
struct nfs_fh *);
void nlm_release_file(struct nlm_file *);
void nlmsvc_release_lockowner(struct nlm_lock *);
void nlmsvc_mark_resources(struct net *);
void nlmsvc_free_host_resources(struct nlm_host *);
void nlmsvc_invalidate_all(void);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment