Commit 2dad3206 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'for-3.1' of git://linux-nfs.org/~bfields/linux

* 'for-3.1' of git://linux-nfs.org/~bfields/linux:
  nfsd: don't break lease on CLAIM_DELEGATE_CUR
  locks: rename lock-manager ops
  nfsd4: update nfsv4.1 implementation notes
  nfsd: turn on reply cache for NFSv4
  nfsd4: call nfsd4_release_compoundargs from pc_release
  nfsd41: Deny new lock before RECLAIM_COMPLETE done
  fs: locks: remove init_once
  nfsd41: check the size of request
  nfsd41: error out when client sets maxreq_sz or maxresp_sz too small
  nfsd4: fix file leak on open_downgrade
  nfsd4: remember to put RW access on stateid destruction
  NFSD: Added TEST_STATEID operation
  NFSD: added FREE_STATEID operation
  svcrpc: fix list-corrupting race on nfsd shutdown
  rpc: allow autoloading of gss mechanisms
  svcauth_unix.c: quiet sparse noise
  svcsock.c: include sunrpc.h to quiet sparse noise
  nfsd: Remove deprecated nfsctl system call and related code.
  NFSD: allow OP_DESTROY_CLIENTID to be only op in COMPOUND

Fix up trivial conflicts in Documentation/feature-removal-schedule.txt
parents 84635d68 0c12eaff
......@@ -491,16 +491,6 @@ Who: Wey-Yi Guy <wey-yi.w.guy@intel.com>
----------------------------
What: access to nfsd auth cache through sys_nfsservctl or '.' files
in the 'nfsd' filesystem.
When: 3.0
Why: This is a legacy interface which have been replaced by a more
dynamic cache. Continuing to maintain this interface is an
unnecessary burden.
Who: NeilBrown <neilb@suse.de>
----------------------------
What: Legacy, non-standard chassis intrusion detection interface.
When: June 2011
Why: The adm9240, w83792d and w83793 hardware monitoring drivers have
......
......@@ -338,21 +338,21 @@ fl_release_private: maybe no
----------------------- lock_manager_operations ---------------------------
prototypes:
int (*fl_compare_owner)(struct file_lock *, struct file_lock *);
void (*fl_notify)(struct file_lock *); /* unblock callback */
int (*fl_grant)(struct file_lock *, struct file_lock *, int);
void (*fl_release_private)(struct file_lock *);
void (*fl_break)(struct file_lock *); /* break_lease callback */
int (*fl_change)(struct file_lock **, int);
int (*lm_compare_owner)(struct file_lock *, struct file_lock *);
void (*lm_notify)(struct file_lock *); /* unblock callback */
int (*lm_grant)(struct file_lock *, struct file_lock *, int);
void (*lm_release_private)(struct file_lock *);
void (*lm_break)(struct file_lock *); /* break_lease callback */
int (*lm_change)(struct file_lock **, int);
locking rules:
file_lock_lock may block
fl_compare_owner: yes no
fl_notify: yes no
fl_grant: no no
fl_release_private: maybe no
fl_break: yes no
fl_change yes no
lm_compare_owner: yes no
lm_notify: yes no
lm_grant: no no
lm_release_private: maybe no
lm_break: yes no
lm_change yes no
--------------------------- buffer_head -----------------------------------
prototypes:
......
......@@ -39,27 +39,17 @@ interoperability problems with future clients. Known issues:
from a linux client are possible, but we aren't really
conformant with the spec (for example, we don't use kerberos
on the backchannel correctly).
- no trunking support: no clients currently take advantage of
trunking, but this is a mandatory feature, and its use is
recommended to clients in a number of places. (E.g. to ensure
timely renewal in case an existing connection's retry timeouts
have gotten too long; see section 8.3 of the RFC.)
Therefore, lack of this feature may cause future clients to
fail.
- Incomplete backchannel support: incomplete backchannel gss
support and no support for BACKCHANNEL_CTL mean that
callbacks (hence delegations and layouts) may not be
available and clients confused by the incomplete
implementation may fail.
- Server reboot recovery is unsupported; if the server reboots,
clients may fail.
- We do not support SSV, which provides security for shared
client-server state (thus preventing unauthorized tampering
with locks and opens, for example). It is mandatory for
servers to support this, though no clients use it yet.
- Mandatory operations which we do not support, such as
DESTROY_CLIENTID, FREE_STATEID, SECINFO_NO_NAME, and
TEST_STATEID, are not currently used by clients, but will be
DESTROY_CLIENTID, are not currently used by clients, but will be
(and the spec recommends their uses in common cases), and
clients should not be expected to know how to recover from the
case where they are not supported. This will eventually cause
......@@ -69,8 +59,9 @@ In addition, some limitations are inherited from the current NFSv4
implementation:
- Incomplete delegation enforcement: if a file is renamed or
unlinked, a client holding a delegation may continue to
indefinitely allow opens of the file under the old name.
unlinked by a local process, a client holding a delegation may
continue to indefinitely allow opens of the file under the old
name.
The table below, taken from the NFSv4.1 document, lists
the operations that are mandatory to implement (REQ), optional
......@@ -99,7 +90,7 @@ Operations
+----------------------+------------+--------------+----------------+
| ACCESS | REQ | | Section 18.1 |
NS | BACKCHANNEL_CTL | REQ | | Section 18.33 |
NS | BIND_CONN_TO_SESSION | REQ | | Section 18.34 |
I | BIND_CONN_TO_SESSION | REQ | | Section 18.34 |
| CLOSE | REQ | | Section 18.2 |
| COMMIT | REQ | | Section 18.3 |
| CREATE | REQ | | Section 18.4 |
......@@ -111,7 +102,7 @@ NS*| DELEGPURGE | OPT | FDELG (REQ) | Section 18.5 |
NS | DESTROY_CLIENTID | REQ | | Section 18.50 |
I | DESTROY_SESSION | REQ | | Section 18.37 |
I | EXCHANGE_ID | REQ | | Section 18.35 |
NS | FREE_STATEID | REQ | | Section 18.38 |
I | FREE_STATEID | REQ | | Section 18.38 |
| GETATTR | REQ | | Section 18.7 |
P | GETDEVICEINFO | OPT | pNFS (REQ) | Section 18.40 |
P | GETDEVICELIST | OPT | pNFS (OPT) | Section 18.41 |
......@@ -145,14 +136,14 @@ NS*| OPENATTR | OPT | | Section 18.17 |
| RESTOREFH | REQ | | Section 18.27 |
| SAVEFH | REQ | | Section 18.28 |
| SECINFO | REQ | | Section 18.29 |
NS | SECINFO_NO_NAME | REC | pNFS files | Section 18.45, |
I | SECINFO_NO_NAME | REC | pNFS files | Section 18.45, |
| | | layout (REQ) | Section 13.12 |
I | SEQUENCE | REQ | | Section 18.46 |
| SETATTR | REQ | | Section 18.30 |
| SETCLIENTID | MNI | | N/A |
| SETCLIENTID_CONFIRM | MNI | | N/A |
NS | SET_SSV | REQ | | Section 18.47 |
NS | TEST_STATEID | REQ | | Section 18.48 |
I | TEST_STATEID | REQ | | Section 18.48 |
| VERIFY | REQ | | Section 18.31 |
NS*| WANT_DELEGATION | OPT | FDELG (OPT) | Section 18.49 |
| WRITE | REQ | | Section 18.32 |
......@@ -206,12 +197,6 @@ CREATE_SESSION:
SEQUENCE:
* no support for dynamic slot table renegotiation (optional)
nfsv4.1 COMPOUND rules:
The following cases aren't supported yet:
* Enforcing of NFS4ERR_NOT_ONLY_OP for: BIND_CONN_TO_SESSION, CREATE_SESSION,
DESTROY_CLIENTID, DESTROY_SESSION, EXCHANGE_ID.
* DESTROY_SESSION MUST be the final operation in the COMPOUND request.
Nonstandard compound limitations:
* No support for a sessions fore channel RPC compound that requires both a
ca_maxrequestsize request and a ca_maxresponsesize reply, so we may
......@@ -219,3 +204,5 @@ Nonstandard compound limitations:
negotiation.
* No more than one IO operation (read, write, readdir) allowed per
compound.
See also http://wiki.linux-nfs.org/wiki/index.php/Server_4.0_and_4.1_issues.
......@@ -1479,7 +1479,6 @@ CONFIG_NFS_FSCACHE=y
CONFIG_NFS_USE_KERNEL_DNS=y
# CONFIG_NFS_USE_NEW_IDMAPPER is not set
CONFIG_NFSD=m
CONFIG_NFSD_DEPRECATED=y
CONFIG_NFSD_V2_ACL=y
CONFIG_NFSD_V3=y
CONFIG_NFSD_V3_ACL=y
......
......@@ -29,7 +29,6 @@ obj-$(CONFIG_EVENTFD) += eventfd.o
obj-$(CONFIG_AIO) += aio.o
obj-$(CONFIG_FILE_LOCKING) += locks.o
obj-$(CONFIG_COMPAT) += compat.o compat_ioctl.o
obj-$(CONFIG_NFSD_DEPRECATED) += nfsctl.o
obj-$(CONFIG_BINFMT_AOUT) += binfmt_aout.o
obj-$(CONFIG_BINFMT_EM86) += binfmt_em86.o
obj-$(CONFIG_BINFMT_MISC) += binfmt_misc.o
......
......@@ -1675,256 +1675,10 @@ asmlinkage long compat_sys_ppoll(struct pollfd __user *ufds,
}
#endif /* HAVE_SET_RESTORE_SIGMASK */
#if (defined(CONFIG_NFSD) || defined(CONFIG_NFSD_MODULE)) && !defined(CONFIG_NFSD_DEPRECATED)
/* Stuff for NFS server syscalls... */
struct compat_nfsctl_svc {
u16 svc32_port;
s32 svc32_nthreads;
};
struct compat_nfsctl_client {
s8 cl32_ident[NFSCLNT_IDMAX+1];
s32 cl32_naddr;
struct in_addr cl32_addrlist[NFSCLNT_ADDRMAX];
s32 cl32_fhkeytype;
s32 cl32_fhkeylen;
u8 cl32_fhkey[NFSCLNT_KEYMAX];
};
struct compat_nfsctl_export {
char ex32_client[NFSCLNT_IDMAX+1];
char ex32_path[NFS_MAXPATHLEN+1];
compat_dev_t ex32_dev;
compat_ino_t ex32_ino;
compat_int_t ex32_flags;
__compat_uid_t ex32_anon_uid;
__compat_gid_t ex32_anon_gid;
};
struct compat_nfsctl_fdparm {
struct sockaddr gd32_addr;
s8 gd32_path[NFS_MAXPATHLEN+1];
compat_int_t gd32_version;
};
struct compat_nfsctl_fsparm {
struct sockaddr gd32_addr;
s8 gd32_path[NFS_MAXPATHLEN+1];
compat_int_t gd32_maxlen;
};
struct compat_nfsctl_arg {
compat_int_t ca32_version; /* safeguard */
union {
struct compat_nfsctl_svc u32_svc;
struct compat_nfsctl_client u32_client;
struct compat_nfsctl_export u32_export;
struct compat_nfsctl_fdparm u32_getfd;
struct compat_nfsctl_fsparm u32_getfs;
} u;
#define ca32_svc u.u32_svc
#define ca32_client u.u32_client
#define ca32_export u.u32_export
#define ca32_getfd u.u32_getfd
#define ca32_getfs u.u32_getfs
};
union compat_nfsctl_res {
__u8 cr32_getfh[NFS_FHSIZE];
struct knfsd_fh cr32_getfs;
};
static int compat_nfs_svc_trans(struct nfsctl_arg *karg,
struct compat_nfsctl_arg __user *arg)
{
if (!access_ok(VERIFY_READ, &arg->ca32_svc, sizeof(arg->ca32_svc)) ||
get_user(karg->ca_version, &arg->ca32_version) ||
__get_user(karg->ca_svc.svc_port, &arg->ca32_svc.svc32_port) ||
__get_user(karg->ca_svc.svc_nthreads,
&arg->ca32_svc.svc32_nthreads))
return -EFAULT;
return 0;
}
static int compat_nfs_clnt_trans(struct nfsctl_arg *karg,
struct compat_nfsctl_arg __user *arg)
{
if (!access_ok(VERIFY_READ, &arg->ca32_client,
sizeof(arg->ca32_client)) ||
get_user(karg->ca_version, &arg->ca32_version) ||
__copy_from_user(&karg->ca_client.cl_ident[0],
&arg->ca32_client.cl32_ident[0],
NFSCLNT_IDMAX) ||
__get_user(karg->ca_client.cl_naddr,
&arg->ca32_client.cl32_naddr) ||
__copy_from_user(&karg->ca_client.cl_addrlist[0],
&arg->ca32_client.cl32_addrlist[0],
(sizeof(struct in_addr) * NFSCLNT_ADDRMAX)) ||
__get_user(karg->ca_client.cl_fhkeytype,
&arg->ca32_client.cl32_fhkeytype) ||
__get_user(karg->ca_client.cl_fhkeylen,
&arg->ca32_client.cl32_fhkeylen) ||
__copy_from_user(&karg->ca_client.cl_fhkey[0],
&arg->ca32_client.cl32_fhkey[0],
NFSCLNT_KEYMAX))
return -EFAULT;
return 0;
}
static int compat_nfs_exp_trans(struct nfsctl_arg *karg,
struct compat_nfsctl_arg __user *arg)
{
if (!access_ok(VERIFY_READ, &arg->ca32_export,
sizeof(arg->ca32_export)) ||
get_user(karg->ca_version, &arg->ca32_version) ||
__copy_from_user(&karg->ca_export.ex_client[0],
&arg->ca32_export.ex32_client[0],
NFSCLNT_IDMAX) ||
__copy_from_user(&karg->ca_export.ex_path[0],
&arg->ca32_export.ex32_path[0],
NFS_MAXPATHLEN) ||
__get_user(karg->ca_export.ex_dev,
&arg->ca32_export.ex32_dev) ||
__get_user(karg->ca_export.ex_ino,
&arg->ca32_export.ex32_ino) ||
__get_user(karg->ca_export.ex_flags,
&arg->ca32_export.ex32_flags) ||
__get_user(karg->ca_export.ex_anon_uid,
&arg->ca32_export.ex32_anon_uid) ||
__get_user(karg->ca_export.ex_anon_gid,
&arg->ca32_export.ex32_anon_gid))
return -EFAULT;
SET_UID(karg->ca_export.ex_anon_uid, karg->ca_export.ex_anon_uid);
SET_GID(karg->ca_export.ex_anon_gid, karg->ca_export.ex_anon_gid);
return 0;
}
static int compat_nfs_getfd_trans(struct nfsctl_arg *karg,
struct compat_nfsctl_arg __user *arg)
{
if (!access_ok(VERIFY_READ, &arg->ca32_getfd,
sizeof(arg->ca32_getfd)) ||
get_user(karg->ca_version, &arg->ca32_version) ||
__copy_from_user(&karg->ca_getfd.gd_addr,
&arg->ca32_getfd.gd32_addr,
(sizeof(struct sockaddr))) ||
__copy_from_user(&karg->ca_getfd.gd_path,
&arg->ca32_getfd.gd32_path,
(NFS_MAXPATHLEN+1)) ||
__get_user(karg->ca_getfd.gd_version,
&arg->ca32_getfd.gd32_version))
return -EFAULT;
return 0;
}
static int compat_nfs_getfs_trans(struct nfsctl_arg *karg,
struct compat_nfsctl_arg __user *arg)
{
if (!access_ok(VERIFY_READ,&arg->ca32_getfs,sizeof(arg->ca32_getfs)) ||
get_user(karg->ca_version, &arg->ca32_version) ||
__copy_from_user(&karg->ca_getfs.gd_addr,
&arg->ca32_getfs.gd32_addr,
(sizeof(struct sockaddr))) ||
__copy_from_user(&karg->ca_getfs.gd_path,
&arg->ca32_getfs.gd32_path,
(NFS_MAXPATHLEN+1)) ||
__get_user(karg->ca_getfs.gd_maxlen,
&arg->ca32_getfs.gd32_maxlen))
return -EFAULT;
return 0;
}
/* This really doesn't need translations, we are only passing
* back a union which contains opaque nfs file handle data.
*/
static int compat_nfs_getfh_res_trans(union nfsctl_res *kres,
union compat_nfsctl_res __user *res)
{
int err;
err = copy_to_user(res, kres, sizeof(*res));
return (err) ? -EFAULT : 0;
}
asmlinkage long compat_sys_nfsservctl(int cmd,
struct compat_nfsctl_arg __user *arg,
union compat_nfsctl_res __user *res)
{
struct nfsctl_arg *karg;
union nfsctl_res *kres;
mm_segment_t oldfs;
int err;
karg = kmalloc(sizeof(*karg), GFP_USER);
kres = kmalloc(sizeof(*kres), GFP_USER);
if(!karg || !kres) {
err = -ENOMEM;
goto done;
}
switch(cmd) {
case NFSCTL_SVC:
err = compat_nfs_svc_trans(karg, arg);
break;
case NFSCTL_ADDCLIENT:
err = compat_nfs_clnt_trans(karg, arg);
break;
case NFSCTL_DELCLIENT:
err = compat_nfs_clnt_trans(karg, arg);
break;
case NFSCTL_EXPORT:
case NFSCTL_UNEXPORT:
err = compat_nfs_exp_trans(karg, arg);
break;
case NFSCTL_GETFD:
err = compat_nfs_getfd_trans(karg, arg);
break;
case NFSCTL_GETFS:
err = compat_nfs_getfs_trans(karg, arg);
break;
default:
err = -EINVAL;
break;
}
if (err)
goto done;
oldfs = get_fs();
set_fs(KERNEL_DS);
/* The __user pointer casts are valid because of the set_fs() */
err = sys_nfsservctl(cmd, (void __user *) karg, (void __user *) kres);
set_fs(oldfs);
if (err)
goto done;
if((cmd == NFSCTL_GETFD) ||
(cmd == NFSCTL_GETFS))
err = compat_nfs_getfh_res_trans(kres, res);
done:
kfree(karg);
kfree(kres);
return err;
}
#else /* !NFSD */
long asmlinkage compat_sys_nfsservctl(int cmd, void *notused, void *notused2)
{
return sys_ni_syscall();
}
#endif
#ifdef CONFIG_EPOLL
......
......@@ -92,7 +92,7 @@ static void do_unlock_close(struct dlm_ls *ls, u64 number,
op->info.number = number;
op->info.start = 0;
op->info.end = OFFSET_MAX;
if (fl->fl_lmops && fl->fl_lmops->fl_grant)
if (fl->fl_lmops && fl->fl_lmops->lm_grant)
op->info.owner = (__u64) fl->fl_pid;
else
op->info.owner = (__u64)(long) fl->fl_owner;
......@@ -128,11 +128,11 @@ int dlm_posix_lock(dlm_lockspace_t *lockspace, u64 number, struct file *file,
op->info.number = number;
op->info.start = fl->fl_start;
op->info.end = fl->fl_end;
if (fl->fl_lmops && fl->fl_lmops->fl_grant) {
if (fl->fl_lmops && fl->fl_lmops->lm_grant) {
/* fl_owner is lockd which doesn't distinguish
processes on the nfs client */
op->info.owner = (__u64) fl->fl_pid;
xop->callback = fl->fl_lmops->fl_grant;
xop->callback = fl->fl_lmops->lm_grant;
locks_init_lock(&xop->flc);
locks_copy_lock(&xop->flc, fl);
xop->fl = fl;
......@@ -268,7 +268,7 @@ int dlm_posix_unlock(dlm_lockspace_t *lockspace, u64 number, struct file *file,
op->info.number = number;
op->info.start = fl->fl_start;
op->info.end = fl->fl_end;
if (fl->fl_lmops && fl->fl_lmops->fl_grant)
if (fl->fl_lmops && fl->fl_lmops->lm_grant)
op->info.owner = (__u64) fl->fl_pid;
else
op->info.owner = (__u64)(long) fl->fl_owner;
......@@ -327,7 +327,7 @@ int dlm_posix_get(dlm_lockspace_t *lockspace, u64 number, struct file *file,
op->info.number = number;
op->info.start = fl->fl_start;
op->info.end = fl->fl_end;
if (fl->fl_lmops && fl->fl_lmops->fl_grant)
if (fl->fl_lmops && fl->fl_lmops->lm_grant)
op->info.owner = (__u64) fl->fl_pid;
else
op->info.owner = (__u64)(long) fl->fl_owner;
......
......@@ -1507,7 +1507,7 @@ static int fuse_setlk(struct file *file, struct file_lock *fl, int flock)
pid_t pid = fl->fl_type != F_UNLCK ? current->tgid : 0;
int err;
if (fl->fl_lmops && fl->fl_lmops->fl_grant) {
if (fl->fl_lmops && fl->fl_lmops->lm_grant) {
/* NLM needs asynchronous locks, which we don't support yet */
return -ENOLCK;
}
......
......@@ -632,7 +632,7 @@ nlmsvc_cancel_blocked(struct nlm_file *file, struct nlm_lock *lock)
/*
* This is a callback from the filesystem for VFS file lock requests.
* It will be used if fl_grant is defined and the filesystem can not
* It will be used if lm_grant is defined and the filesystem can not
* respond to the request immediately.
* For GETLK request it will copy the reply to the nlm_block.
* For SETLK or SETLKW request it will get the local posix lock.
......@@ -719,9 +719,9 @@ static int nlmsvc_same_owner(struct file_lock *fl1, struct file_lock *fl2)
}
const struct lock_manager_operations nlmsvc_lock_operations = {
.fl_compare_owner = nlmsvc_same_owner,
.fl_notify = nlmsvc_notify_blocked,
.fl_grant = nlmsvc_grant_deferred,
.lm_compare_owner = nlmsvc_same_owner,
.lm_notify = nlmsvc_notify_blocked,
.lm_grant = nlmsvc_grant_deferred,
};
/*
......
......@@ -160,26 +160,20 @@ EXPORT_SYMBOL_GPL(unlock_flocks);
static struct kmem_cache *filelock_cache __read_mostly;
static void locks_init_lock_always(struct file_lock *fl)
static void locks_init_lock_heads(struct file_lock *fl)
{
fl->fl_next = NULL;
fl->fl_fasync = NULL;
fl->fl_owner = NULL;
fl->fl_pid = 0;
fl->fl_nspid = NULL;
fl->fl_file = NULL;
fl->fl_flags = 0;
fl->fl_type = 0;
fl->fl_start = fl->fl_end = 0;
INIT_LIST_HEAD(&fl->fl_link);
INIT_LIST_HEAD(&fl->fl_block);
init_waitqueue_head(&fl->fl_wait);
}
/* Allocate an empty lock structure. */
struct file_lock *locks_alloc_lock(void)
{
struct file_lock *fl = kmem_cache_alloc(filelock_cache, GFP_KERNEL);
struct file_lock *fl = kmem_cache_zalloc(filelock_cache, GFP_KERNEL);
if (fl)
locks_init_lock_always(fl);
locks_init_lock_heads(fl);
return fl;
}
......@@ -193,8 +187,8 @@ void locks_release_private(struct file_lock *fl)
fl->fl_ops = NULL;
}
if (fl->fl_lmops) {
if (fl->fl_lmops->fl_release_private)
fl->fl_lmops->fl_release_private(fl);
if (fl->fl_lmops->lm_release_private)
fl->fl_lmops->lm_release_private(fl);
fl->fl_lmops = NULL;
}
......@@ -215,27 +209,12 @@ EXPORT_SYMBOL(locks_free_lock);
void locks_init_lock(struct file_lock *fl)
{
INIT_LIST_HEAD(&fl->fl_link);
INIT_LIST_HEAD(&fl->fl_block);
init_waitqueue_head(&fl->fl_wait);
fl->fl_ops = NULL;
fl->fl_lmops = NULL;
locks_init_lock_always(fl);
memset(fl, 0, sizeof(struct file_lock));
locks_init_lock_heads(fl);
}
EXPORT_SYMBOL(locks_init_lock);
/*
* Initialises the fields of the file lock which are invariant for
* free file_locks.
*/
static void init_once(void *foo)
{
struct file_lock *lock = (struct file_lock *) foo;
locks_init_lock(lock);
}
static void locks_copy_private(struct file_lock *new, struct file_lock *fl)
{
if (fl->fl_ops) {
......@@ -444,9 +423,9 @@ static void lease_release_private_callback(struct file_lock *fl)
}
static const struct lock_manager_operations lease_manager_ops = {
.fl_break = lease_break_callback,
.fl_release_private = lease_release_private_callback,
.fl_change = lease_modify,
.lm_break = lease_break_callback,
.lm_release_private = lease_release_private_callback,
.lm_change = lease_modify,
};
/*
......@@ -499,9 +478,9 @@ static inline int locks_overlap(struct file_lock *fl1, struct file_lock *fl2)
*/
static int posix_same_owner(struct file_lock *fl1, struct file_lock *fl2)
{
if (fl1->fl_lmops && fl1->fl_lmops->fl_compare_owner)
if (fl1->fl_lmops && fl1->fl_lmops->lm_compare_owner)
return fl2->fl_lmops == fl1->fl_lmops &&
fl1->fl_lmops->fl_compare_owner(fl1, fl2);
fl1->fl_lmops->lm_compare_owner(fl1, fl2);
return fl1->fl_owner == fl2->fl_owner;
}
......@@ -551,8 +530,8 @@ static void locks_wake_up_blocks(struct file_lock *blocker)
waiter = list_first_entry(&blocker->fl_block,
struct file_lock, fl_block);
__locks_delete_block(waiter);
if (waiter->fl_lmops && waiter->fl_lmops->fl_notify)
waiter->fl_lmops->fl_notify(waiter);
if (waiter->fl_lmops && waiter->fl_lmops->lm_notify)
waiter->fl_lmops->lm_notify(waiter);
else
wake_up(&waiter->fl_wait);
}
......@@ -1239,7 +1218,7 @@ int __break_lease(struct inode *inode, unsigned int mode)
fl->fl_type = future;
fl->fl_break_time = break_time;
/* lease must have lmops break callback */
fl->fl_lmops->fl_break(fl);
fl->fl_lmops->lm_break(fl);
}
}
......@@ -1349,7 +1328,7 @@ int fcntl_getlease(struct file *filp)
* @arg: type of lease to obtain
* @flp: input - file_lock to use, output - file_lock inserted
*
* The (input) flp->fl_lmops->fl_break function is required
* The (input) flp->fl_lmops->lm_break function is required
* by break_lease().
*
* Called with file_lock_lock held.
......@@ -1375,7 +1354,7 @@ int generic_setlease(struct file *filp, long arg, struct file_lock **flp)
time_out_leases(inode);
BUG_ON(!(*flp)->fl_lmops->fl_break);
BUG_ON(!(*flp)->fl_lmops->lm_break);
if (arg != F_UNLCK) {
error = -EAGAIN;
......@@ -1417,7 +1396,7 @@ int generic_setlease(struct file *filp, long arg, struct file_lock **flp)
goto out;
if (my_before != NULL) {
error = lease->fl_lmops->fl_change(my_before, arg);
error = lease->fl_lmops->lm_change(my_before, arg);
if (!error)
*flp = *my_before;
goto out;
......@@ -1453,7 +1432,7 @@ static int __vfs_setlease(struct file *filp, long arg, struct file_lock **lease)
* @lease: file_lock to use
*
* Call this to establish a lease on the file.
* The (*lease)->fl_lmops->fl_break operation must be set; if not,
* The (*lease)->fl_lmops->lm_break operation must be set; if not,
* break_lease will oops!
*
* This will call the filesystem's setlease file method, if
......@@ -1751,10 +1730,10 @@ int fcntl_getlk(struct file *filp, struct flock __user *l)
* To avoid blocking kernel daemons, such as lockd, that need to acquire POSIX
* locks, the ->lock() interface may return asynchronously, before the lock has
* been granted or denied by the underlying filesystem, if (and only if)
* fl_grant is set. Callers expecting ->lock() to return asynchronously
* lm_grant is set. Callers expecting ->lock() to return asynchronously
* will only use F_SETLK, not F_SETLKW; they will set FL_SLEEP if (and only if)
* the request is for a blocking lock. When ->lock() does return asynchronously,
* it must return FILE_LOCK_DEFERRED, and call ->fl_grant() when the lock
* it must return FILE_LOCK_DEFERRED, and call ->lm_grant() when the lock
* request completes.
* If the request is for non-blocking lock the file system should return
* FILE_LOCK_DEFERRED then try to get the lock and call the callback routine
......@@ -1764,7 +1743,7 @@ int fcntl_getlk(struct file *filp, struct flock __user *l)
* grants a lock so the VFS can find out which locks are locally held and do
* the correct lock cleanup when required.
* The underlying filesystem must not drop the kernel lock or call
* ->fl_grant() before returning to the caller with a FILE_LOCK_DEFERRED
* ->lm_grant() before returning to the caller with a FILE_LOCK_DEFERRED
* return code.
*/
int vfs_lock_file(struct file *filp, unsigned int cmd, struct file_lock *fl, struct file_lock *conf)
......@@ -2333,8 +2312,8 @@ EXPORT_SYMBOL(lock_may_write);
static int __init filelock_init(void)
{
filelock_cache = kmem_cache_create("file_lock_cache",
sizeof(struct file_lock), 0, SLAB_PANIC,
init_once);
sizeof(struct file_lock), 0, SLAB_PANIC, NULL);
return 0;
}
......
/*
* fs/nfsctl.c
*
* This should eventually move to userland.
*
*/
#include <linux/types.h>
#include <linux/file.h>
#include <linux/fs.h>
#include <linux/nfsd/syscall.h>
#include <linux/cred.h>
#include <linux/sched.h>
#include <linux/linkage.h>
#include <linux/namei.h>
#include <linux/mount.h>
#include <linux/syscalls.h>
#include <asm/uaccess.h>
/*
* open a file on nfsd fs
*/
static struct file *do_open(char *name, int flags)
{
struct vfsmount *mnt;
struct file *file;
mnt = do_kern_mount("nfsd", 0, "nfsd", NULL);
if (IS_ERR(mnt))
return (struct file *)mnt;
file = file_open_root(mnt->mnt_root, mnt, name, flags);
mntput(mnt); /* drop do_kern_mount reference */
return file;
}
static struct {
char *name; int wsize; int rsize;
} map[] = {
[NFSCTL_SVC] = {
.name = ".svc",
.wsize = sizeof(struct nfsctl_svc)
},
[NFSCTL_ADDCLIENT] = {
.name = ".add",
.wsize = sizeof(struct nfsctl_client)
},
[NFSCTL_DELCLIENT] = {
.name = ".del",
.wsize = sizeof(struct nfsctl_client)
},
[NFSCTL_EXPORT] = {
.name = ".export",
.wsize = sizeof(struct nfsctl_export)
},
[NFSCTL_UNEXPORT] = {
.name = ".unexport",
.wsize = sizeof(struct nfsctl_export)
},
[NFSCTL_GETFD] = {
.name = ".getfd",
.wsize = sizeof(struct nfsctl_fdparm),
.rsize = NFS_FHSIZE
},
[NFSCTL_GETFS] = {
.name = ".getfs",
.wsize = sizeof(struct nfsctl_fsparm),
.rsize = sizeof(struct knfsd_fh)
},
};
SYSCALL_DEFINE3(nfsservctl, int, cmd, struct nfsctl_arg __user *, arg,
void __user *, res)
{
struct file *file;
void __user *p = &arg->u;
int version;
int err;
if (copy_from_user(&version, &arg->ca_version, sizeof(int)))
return -EFAULT;
if (version != NFSCTL_VERSION)
return -EINVAL;
if (cmd < 0 || cmd >= ARRAY_SIZE(map) || !map[cmd].name)
return -EINVAL;
file = do_open(map[cmd].name, map[cmd].rsize ? O_RDWR : O_WRONLY);
if (IS_ERR(file))
return PTR_ERR(file);
err = file->f_op->write(file, p, map[cmd].wsize, &file->f_pos);
if (err >= 0 && map[cmd].rsize)
err = file->f_op->read(file, res, map[cmd].rsize, &file->f_pos);
if (err >= 0)
err = 0;
fput(file);
return err;
}
......@@ -28,18 +28,6 @@ config NFSD
If unsure, say N.
config NFSD_DEPRECATED
bool "Include support for deprecated syscall interface to NFSD"
depends on NFSD
default y
help
The syscall interface to nfsd was obsoleted in 2.6.0 by a new
filesystem based interface. The old interface is due for removal
in 2.6.40. If you wish to remove the interface before then
say N.
In unsure, say Y.
config NFSD_V2_ACL
bool
depends on NFSD
......
......@@ -69,7 +69,7 @@ enum {
int nfsd_reply_cache_init(void);
void nfsd_reply_cache_shutdown(void);
int nfsd_cache_lookup(struct svc_rqst *, int);
int nfsd_cache_lookup(struct svc_rqst *);
void nfsd_cache_update(struct svc_rqst *, int, __be32 *);
#ifdef CONFIG_NFSD_V4
......
This diff is collapsed.
......@@ -35,10 +35,8 @@ nlm_fopen(struct svc_rqst *rqstp, struct nfs_fh *f, struct file **filp)
memcpy((char*)&fh.fh_handle.fh_base, f->data, f->size);
fh.fh_export = NULL;
exp_readlock();
nfserr = nfsd_open(rqstp, &fh, S_IFREG, NFSD_MAY_LOCK, filp);
fh_put(&fh);
exp_readunlock();
/* We return nlm error codes as nlm doesn't know
* about nfsd, but nfsd does know about nlm..
*/
......
......@@ -291,6 +291,15 @@ nfsd4_open(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
if (open->op_create && open->op_claim_type != NFS4_OPEN_CLAIM_NULL)
return nfserr_inval;
/*
* RFC5661 18.51.3
* Before RECLAIM_COMPLETE done, server should deny new lock
*/
if (nfsd4_has_session(cstate) &&
!cstate->session->se_client->cl_firststate &&
open->op_claim_type != NFS4_OPEN_CLAIM_PREVIOUS)
return nfserr_grace;
if (nfsd4_has_session(cstate))
copy_clientid(&open->op_clientid, cstate->session);
......@@ -998,6 +1007,15 @@ struct nfsd4_operation {
nfsd4op_func op_func;
u32 op_flags;
char *op_name;
/*
* We use the DRC for compounds containing non-idempotent
* operations, *except* those that are 4.1-specific (since
* sessions provide their own EOS), and except for stateful
* operations other than setclientid and setclientid_confirm
* (since sequence numbers provide EOS for open, lock, etc in
* the v4.0 case).
*/
bool op_cacheresult;
};
static struct nfsd4_operation nfsd4_ops[];
......@@ -1042,6 +1060,11 @@ static inline struct nfsd4_operation *OPDESC(struct nfsd4_op *op)
return &nfsd4_ops[op->opnum];
}
bool nfsd4_cache_this_op(struct nfsd4_op *op)
{
return OPDESC(op)->op_cacheresult;
}
static bool need_wrongsec_check(struct svc_rqst *rqstp)
{
struct nfsd4_compoundres *resp = rqstp->rq_resp;
......@@ -1209,7 +1232,6 @@ nfsd4_proc_compound(struct svc_rqst *rqstp,
fh_put(&resp->cstate.save_fh);
BUG_ON(resp->cstate.replay_owner);
out:
nfsd4_release_compoundargs(args);
/* Reset deferral mechanism for RPC deferrals */
rqstp->rq_usedeferral = 1;
dprintk("nfsv4 compound returned %d\n", ntohl(status));
......@@ -1232,6 +1254,7 @@ static struct nfsd4_operation nfsd4_ops[] = {
[OP_CREATE] = {
.op_func = (nfsd4op_func)nfsd4_create,
.op_name = "OP_CREATE",
.op_cacheresult = true,
},
[OP_DELEGRETURN] = {
.op_func = (nfsd4op_func)nfsd4_delegreturn,
......@@ -1249,6 +1272,7 @@ static struct nfsd4_operation nfsd4_ops[] = {
[OP_LINK] = {
.op_func = (nfsd4op_func)nfsd4_link,
.op_name = "OP_LINK",
.op_cacheresult = true,
},
[OP_LOCK] = {
.op_func = (nfsd4op_func)nfsd4_lock,
......@@ -1322,10 +1346,12 @@ static struct nfsd4_operation nfsd4_ops[] = {
[OP_REMOVE] = {
.op_func = (nfsd4op_func)nfsd4_remove,
.op_name = "OP_REMOVE",
.op_cacheresult = true,
},
[OP_RENAME] = {
.op_name = "OP_RENAME",
.op_func = (nfsd4op_func)nfsd4_rename,
.op_cacheresult = true,
},
[OP_RENEW] = {
.op_func = (nfsd4op_func)nfsd4_renew,
......@@ -1351,16 +1377,19 @@ static struct nfsd4_operation nfsd4_ops[] = {
[OP_SETATTR] = {
.op_func = (nfsd4op_func)nfsd4_setattr,
.op_name = "OP_SETATTR",
.op_cacheresult = true,
},
[OP_SETCLIENTID] = {
.op_func = (nfsd4op_func)nfsd4_setclientid,
.op_flags = ALLOWED_WITHOUT_FH | ALLOWED_ON_ABSENT_FS,
.op_name = "OP_SETCLIENTID",
.op_cacheresult = true,
},
[OP_SETCLIENTID_CONFIRM] = {
.op_func = (nfsd4op_func)nfsd4_setclientid_confirm,
.op_flags = ALLOWED_WITHOUT_FH | ALLOWED_ON_ABSENT_FS,
.op_name = "OP_SETCLIENTID_CONFIRM",
.op_cacheresult = true,
},
[OP_VERIFY] = {
.op_func = (nfsd4op_func)nfsd4_verify,
......@@ -1369,6 +1398,7 @@ static struct nfsd4_operation nfsd4_ops[] = {
[OP_WRITE] = {
.op_func = (nfsd4op_func)nfsd4_write,
.op_name = "OP_WRITE",
.op_cacheresult = true,
},
[OP_RELEASE_LOCKOWNER] = {
.op_func = (nfsd4op_func)nfsd4_release_lockowner,
......@@ -1402,6 +1432,11 @@ static struct nfsd4_operation nfsd4_ops[] = {
.op_flags = ALLOWED_WITHOUT_FH | ALLOWED_AS_FIRST_OP,
.op_name = "OP_SEQUENCE",
},
[OP_DESTROY_CLIENTID] = {
.op_func = NULL,
.op_flags = ALLOWED_WITHOUT_FH | ALLOWED_AS_FIRST_OP,
.op_name = "OP_DESTROY_CLIENTID",
},
[OP_RECLAIM_COMPLETE] = {
.op_func = (nfsd4op_func)nfsd4_reclaim_complete,
.op_flags = ALLOWED_WITHOUT_FH,
......@@ -1412,6 +1447,16 @@ static struct nfsd4_operation nfsd4_ops[] = {
.op_flags = OP_HANDLES_WRONGSEC,
.op_name = "OP_SECINFO_NO_NAME",
},
[OP_TEST_STATEID] = {
.op_func = (nfsd4op_func)nfsd4_test_stateid,
.op_flags = ALLOWED_WITHOUT_FH,
.op_name = "OP_TEST_STATEID",
},
[OP_FREE_STATEID] = {
.op_func = (nfsd4op_func)nfsd4_free_stateid,
.op_flags = ALLOWED_WITHOUT_FH,
.op_name = "OP_FREE_STATEID",
},
};
static const char *nfsd4_op_name(unsigned opnum)
......@@ -1424,16 +1469,6 @@ static const char *nfsd4_op_name(unsigned opnum)
#define nfsd4_voidres nfsd4_voidargs
struct nfsd4_voidargs { int dummy; };
/*
* TODO: At the present time, the NFSv4 server does not do XID caching
* of requests. Implementing XID caching would not be a serious problem,
* although it would require a mild change in interfaces since one
* doesn't know whether an NFSv4 request is idempotent until after the
* XDR decode. However, XID caching totally confuses pynfs (Peter
* Astrand's regression testsuite for NFSv4 servers), which reuses
* XID's liberally, so I've left it unimplemented until pynfs generates
* better XID's.
*/
static struct svc_procedure nfsd_procedures4[2] = {
[NFSPROC4_NULL] = {
.pc_func = (svc_procfunc) nfsd4_proc_null,
......@@ -1449,6 +1484,7 @@ static struct svc_procedure nfsd_procedures4[2] = {
.pc_encode = (kxdrproc_t) nfs4svc_encode_compoundres,
.pc_argsize = sizeof(struct nfsd4_compoundargs),
.pc_ressize = sizeof(struct nfsd4_compoundres),
.pc_release = nfsd4_release_compoundargs,
.pc_cachetype = RC_NOCACHE,
.pc_xdrressize = NFSD_BUFSIZE/4,
},
......
This diff is collapsed.
......@@ -44,13 +44,15 @@
#include <linux/namei.h>
#include <linux/statfs.h>
#include <linux/utsname.h>
#include <linux/pagemap.h>
#include <linux/sunrpc/svcauth_gss.h>
#include "idmap.h"
#include "acl.h"
#include "xdr4.h"
#include "vfs.h"
#include "state.h"
#include "cache.h"
#define NFSDDBG_FACILITY NFSDDBG_XDR
......@@ -131,6 +133,22 @@ xdr_error: \
} \
} while (0)
static void save_buf(struct nfsd4_compoundargs *argp, struct nfsd4_saved_compoundargs *savep)
{
savep->p = argp->p;
savep->end = argp->end;
savep->pagelen = argp->pagelen;
savep->pagelist = argp->pagelist;
}
static void restore_buf(struct nfsd4_compoundargs *argp, struct nfsd4_saved_compoundargs *savep)
{
argp->p = savep->p;
argp->end = savep->end;
argp->pagelen = savep->pagelen;
argp->pagelist = savep->pagelist;
}
static __be32 *read_buf(struct nfsd4_compoundargs *argp, u32 nbytes)
{
/* We want more bytes than seem to be available.
......@@ -1245,6 +1263,19 @@ nfsd4_decode_destroy_session(struct nfsd4_compoundargs *argp,
DECODE_TAIL;
}
static __be32
nfsd4_decode_free_stateid(struct nfsd4_compoundargs *argp,
struct nfsd4_free_stateid *free_stateid)
{
DECODE_HEAD;
READ_BUF(sizeof(stateid_t));
READ32(free_stateid->fr_stateid.si_generation);
COPYMEM(&free_stateid->fr_stateid.si_opaque, sizeof(stateid_opaque_t));
DECODE_TAIL;
}
static __be32
nfsd4_decode_sequence(struct nfsd4_compoundargs *argp,
struct nfsd4_sequence *seq)
......@@ -1261,6 +1292,40 @@ nfsd4_decode_sequence(struct nfsd4_compoundargs *argp,
DECODE_TAIL;
}
static __be32
nfsd4_decode_test_stateid(struct nfsd4_compoundargs *argp, struct nfsd4_test_stateid *test_stateid)
{
unsigned int nbytes;
stateid_t si;
int i;
__be32 *p;
__be32 status;
READ_BUF(4);
test_stateid->ts_num_ids = ntohl(*p++);
nbytes = test_stateid->ts_num_ids * sizeof(stateid_t);
if (nbytes > (u32)((char *)argp->end - (char *)argp->p))
goto xdr_error;
test_stateid->ts_saved_args = argp;
save_buf(argp, &test_stateid->ts_savedp);
for (i = 0; i < test_stateid->ts_num_ids; i++) {
status = nfsd4_decode_stateid(argp, &si);
if (status)
return status;
}
status = 0;
out:
return status;
xdr_error:
dprintk("NFSD: xdr error (%s:%d)\n", __FILE__, __LINE__);
status = nfserr_bad_xdr;
goto out;
}
static __be32 nfsd4_decode_reclaim_complete(struct nfsd4_compoundargs *argp, struct nfsd4_reclaim_complete *rc)
{
DECODE_HEAD;
......@@ -1370,7 +1435,7 @@ static nfsd4_dec nfsd41_dec_ops[] = {
[OP_EXCHANGE_ID] = (nfsd4_dec)nfsd4_decode_exchange_id,
[OP_CREATE_SESSION] = (nfsd4_dec)nfsd4_decode_create_session,
[OP_DESTROY_SESSION] = (nfsd4_dec)nfsd4_decode_destroy_session,
[OP_FREE_STATEID] = (nfsd4_dec)nfsd4_decode_notsupp,
[OP_FREE_STATEID] = (nfsd4_dec)nfsd4_decode_free_stateid,
[OP_GET_DIR_DELEGATION] = (nfsd4_dec)nfsd4_decode_notsupp,
[OP_GETDEVICEINFO] = (nfsd4_dec)nfsd4_decode_notsupp,
[OP_GETDEVICELIST] = (nfsd4_dec)nfsd4_decode_notsupp,
......@@ -1380,7 +1445,7 @@ static nfsd4_dec nfsd41_dec_ops[] = {
[OP_SECINFO_NO_NAME] = (nfsd4_dec)nfsd4_decode_secinfo_no_name,
[OP_SEQUENCE] = (nfsd4_dec)nfsd4_decode_sequence,
[OP_SET_SSV] = (nfsd4_dec)nfsd4_decode_notsupp,
[OP_TEST_STATEID] = (nfsd4_dec)nfsd4_decode_notsupp,
[OP_TEST_STATEID] = (nfsd4_dec)nfsd4_decode_test_stateid,
[OP_WANT_DELEGATION] = (nfsd4_dec)nfsd4_decode_notsupp,
[OP_DESTROY_CLIENTID] = (nfsd4_dec)nfsd4_decode_notsupp,
[OP_RECLAIM_COMPLETE] = (nfsd4_dec)nfsd4_decode_reclaim_complete,
......@@ -1402,6 +1467,7 @@ nfsd4_decode_compound(struct nfsd4_compoundargs *argp)
DECODE_HEAD;
struct nfsd4_op *op;
struct nfsd4_minorversion_ops *ops;
bool cachethis = false;
int i;
/*
......@@ -1483,7 +1549,16 @@ nfsd4_decode_compound(struct nfsd4_compoundargs *argp)
argp->opcnt = i+1;
break;
}
/*
* We'll try to cache the result in the DRC if any one
* op in the compound wants to be cached:
*/
cachethis |= nfsd4_cache_this_op(op);
}
/* Sessions make the DRC unnecessary: */
if (argp->minorversion)
cachethis = false;
argp->rqstp->rq_cachetype = cachethis ? RC_REPLBUFF : RC_NOCACHE;
DECODE_TAIL;
}
......@@ -3115,6 +3190,21 @@ nfsd4_encode_destroy_session(struct nfsd4_compoundres *resp, int nfserr,
return nfserr;
}
static __be32
nfsd4_encode_free_stateid(struct nfsd4_compoundres *resp, int nfserr,
struct nfsd4_free_stateid *free_stateid)
{
__be32 *p;
if (nfserr)
return nfserr;
RESERVE_SPACE(4);
WRITE32(nfserr);
ADJUST_ARGS();
return nfserr;
}
static __be32
nfsd4_encode_sequence(struct nfsd4_compoundres *resp, int nfserr,
struct nfsd4_sequence *seq)
......@@ -3138,6 +3228,36 @@ nfsd4_encode_sequence(struct nfsd4_compoundres *resp, int nfserr,
return 0;
}
__be32
nfsd4_encode_test_stateid(struct nfsd4_compoundres *resp, int nfserr,
struct nfsd4_test_stateid *test_stateid)
{
struct nfsd4_compoundargs *argp;
stateid_t si;
__be32 *p;
int i;
int valid;
restore_buf(test_stateid->ts_saved_args, &test_stateid->ts_savedp);
argp = test_stateid->ts_saved_args;
RESERVE_SPACE(4);
*p++ = htonl(test_stateid->ts_num_ids);
resp->p = p;
nfs4_lock_state();
for (i = 0; i < test_stateid->ts_num_ids; i++) {
nfsd4_decode_stateid(argp, &si);
valid = nfs4_validate_stateid(&si, test_stateid->ts_has_session);
RESERVE_SPACE(4);
*p++ = htonl(valid);
resp->p = p;
}
nfs4_unlock_state();
return nfserr;
}
static __be32
nfsd4_encode_noop(struct nfsd4_compoundres *resp, __be32 nfserr, void *p)
{
......@@ -3196,7 +3316,7 @@ static nfsd4_enc nfsd4_enc_ops[] = {
[OP_EXCHANGE_ID] = (nfsd4_enc)nfsd4_encode_exchange_id,
[OP_CREATE_SESSION] = (nfsd4_enc)nfsd4_encode_create_session,
[OP_DESTROY_SESSION] = (nfsd4_enc)nfsd4_encode_destroy_session,
[OP_FREE_STATEID] = (nfsd4_enc)nfsd4_encode_noop,
[OP_FREE_STATEID] = (nfsd4_enc)nfsd4_encode_free_stateid,
[OP_GET_DIR_DELEGATION] = (nfsd4_enc)nfsd4_encode_noop,
[OP_GETDEVICEINFO] = (nfsd4_enc)nfsd4_encode_noop,
[OP_GETDEVICELIST] = (nfsd4_enc)nfsd4_encode_noop,
......@@ -3206,7 +3326,7 @@ static nfsd4_enc nfsd4_enc_ops[] = {
[OP_SECINFO_NO_NAME] = (nfsd4_enc)nfsd4_encode_secinfo_no_name,
[OP_SEQUENCE] = (nfsd4_enc)nfsd4_encode_sequence,
[OP_SET_SSV] = (nfsd4_enc)nfsd4_encode_noop,
[OP_TEST_STATEID] = (nfsd4_enc)nfsd4_encode_noop,
[OP_TEST_STATEID] = (nfsd4_enc)nfsd4_encode_test_stateid,
[OP_WANT_DELEGATION] = (nfsd4_enc)nfsd4_encode_noop,
[OP_DESTROY_CLIENTID] = (nfsd4_enc)nfsd4_encode_noop,
[OP_RECLAIM_COMPLETE] = (nfsd4_enc)nfsd4_encode_noop,
......@@ -3319,8 +3439,11 @@ nfs4svc_encode_voidres(struct svc_rqst *rqstp, __be32 *p, void *dummy)
return xdr_ressize_check(rqstp, p);
}
void nfsd4_release_compoundargs(struct nfsd4_compoundargs *args)
int nfsd4_release_compoundargs(void *rq, __be32 *p, void *resp)
{
struct svc_rqst *rqstp = rq;
struct nfsd4_compoundargs *args = rqstp->rq_argp;
if (args->ops != args->iops) {
kfree(args->ops);
args->ops = args->iops;
......@@ -3333,13 +3456,12 @@ void nfsd4_release_compoundargs(struct nfsd4_compoundargs *args)
tb->release(tb->buf);
kfree(tb);
}
return 1;
}
int
nfs4svc_decode_compoundargs(struct svc_rqst *rqstp, __be32 *p, struct nfsd4_compoundargs *args)
{
__be32 status;
args->p = p;
args->end = rqstp->rq_arg.head[0].iov_base + rqstp->rq_arg.head[0].iov_len;
args->pagelist = rqstp->rq_arg.pages;
......@@ -3349,11 +3471,7 @@ nfs4svc_decode_compoundargs(struct svc_rqst *rqstp, __be32 *p, struct nfsd4_comp
args->ops = args->iops;
args->rqstp = rqstp;
status = nfsd4_decode_compound(args);
if (status) {
nfsd4_release_compoundargs(args);
}
return !status;
return !nfsd4_decode_compound(args);
}
int
......
......@@ -118,7 +118,7 @@ hash_refile(struct svc_cacherep *rp)
* Note that no operation within the loop may sleep.
*/
int
nfsd_cache_lookup(struct svc_rqst *rqstp, int type)
nfsd_cache_lookup(struct svc_rqst *rqstp)
{
struct hlist_node *hn;
struct hlist_head *rh;
......@@ -128,6 +128,7 @@ nfsd_cache_lookup(struct svc_rqst *rqstp, int type)
vers = rqstp->rq_vers,
proc = rqstp->rq_proc;
unsigned long age;
int type = rqstp->rq_cachetype;
int rtn;
rqstp->rq_cacherep = NULL;
......
This diff is collapsed.
......@@ -528,16 +528,9 @@ nfsd(void *vrqstp)
continue;
}
/* Lock the export hash tables for reading. */
exp_readlock();
validate_process_creds();
svc_process(rqstp);
validate_process_creds();
/* Unlock export hash tables */
exp_readunlock();
}
/* Clear signals before calling svc_exit_thread() */
......@@ -577,8 +570,22 @@ nfsd_dispatch(struct svc_rqst *rqstp, __be32 *statp)
rqstp->rq_vers, rqstp->rq_proc);
proc = rqstp->rq_procinfo;
/*
* Give the xdr decoder a chance to change this if it wants
* (necessary in the NFSv4.0 compound case)
*/
rqstp->rq_cachetype = proc->pc_cachetype;
/* Decode arguments */
xdr = proc->pc_decode;
if (xdr && !xdr(rqstp, (__be32*)rqstp->rq_arg.head[0].iov_base,
rqstp->rq_argp)) {
dprintk("nfsd: failed to decode arguments!\n");
*statp = rpc_garbage_args;
return 1;
}
/* Check whether we have this call in the cache. */
switch (nfsd_cache_lookup(rqstp, proc->pc_cachetype)) {
switch (nfsd_cache_lookup(rqstp)) {
case RC_INTR:
case RC_DROPIT:
return 0;
......@@ -588,16 +595,6 @@ nfsd_dispatch(struct svc_rqst *rqstp, __be32 *statp)
/* do it */
}
/* Decode arguments */
xdr = proc->pc_decode;
if (xdr && !xdr(rqstp, (__be32*)rqstp->rq_arg.head[0].iov_base,
rqstp->rq_argp)) {
dprintk("nfsd: failed to decode arguments!\n");
nfsd_cache_update(rqstp, RC_NOCACHE, NULL);
*statp = rpc_garbage_args;
return 1;
}
/* need to grab the location to store the status, as
* nfsv4 does some encoding while processing
*/
......
......@@ -482,6 +482,7 @@ extern void nfsd4_recdir_purge_old(void);
extern int nfsd4_create_clid_dir(struct nfs4_client *clp);
extern void nfsd4_remove_clid_dir(struct nfs4_client *clp);
extern void release_session_client(struct nfsd4_session *);
extern __be32 nfs4_validate_stateid(stateid_t *, int);
static inline void
nfs4_put_stateowner(struct nfs4_stateowner *so)
......
......@@ -342,6 +342,25 @@ struct nfsd4_setclientid_confirm {
nfs4_verifier sc_confirm;
};
struct nfsd4_saved_compoundargs {
__be32 *p;
__be32 *end;
int pagelen;
struct page **pagelist;
};
struct nfsd4_test_stateid {
__be32 ts_num_ids;
__be32 ts_has_session;
struct nfsd4_compoundargs *ts_saved_args;
struct nfsd4_saved_compoundargs ts_savedp;
};
struct nfsd4_free_stateid {
stateid_t fr_stateid; /* request */
__be32 fr_status; /* response */
};
/* also used for NVERIFY */
struct nfsd4_verify {
u32 ve_bmval[3]; /* request */
......@@ -432,10 +451,14 @@ struct nfsd4_op {
struct nfsd4_destroy_session destroy_session;
struct nfsd4_sequence sequence;
struct nfsd4_reclaim_complete reclaim_complete;
struct nfsd4_test_stateid test_stateid;
struct nfsd4_free_stateid free_stateid;
} u;
struct nfs4_replay * replay;
};
bool nfsd4_cache_this_op(struct nfsd4_op *);
struct nfsd4_compoundargs {
/* scratch variables for XDR decode */
__be32 * p;
......@@ -458,6 +481,7 @@ struct nfsd4_compoundargs {
u32 opcnt;
struct nfsd4_op *ops;
struct nfsd4_op iops[8];
int cachetype;
};
struct nfsd4_compoundres {
......@@ -559,11 +583,15 @@ extern __be32
nfsd4_release_lockowner(struct svc_rqst *rqstp,
struct nfsd4_compound_state *,
struct nfsd4_release_lockowner *rlockowner);
extern void nfsd4_release_compoundargs(struct nfsd4_compoundargs *);
extern int nfsd4_release_compoundargs(void *rq, __be32 *p, void *resp);
extern __be32 nfsd4_delegreturn(struct svc_rqst *rqstp,
struct nfsd4_compound_state *, struct nfsd4_delegreturn *dr);
extern __be32 nfsd4_renew(struct svc_rqst *rqstp,
struct nfsd4_compound_state *, clientid_t *clid);
extern __be32 nfsd4_test_stateid(struct svc_rqst *rqstp,
struct nfsd4_compound_state *, struct nfsd4_test_stateid *test_stateid);
extern __be32 nfsd4_free_stateid(struct svc_rqst *rqstp,
struct nfsd4_compound_state *, struct nfsd4_free_stateid *free_stateid);
#endif
/*
......
......@@ -438,16 +438,7 @@ asmlinkage long compat_sys_ppoll(struct pollfd __user *ufds,
struct compat_timespec __user *tsp,
const compat_sigset_t __user *sigmask,
compat_size_t sigsetsize);
#if (defined(CONFIG_NFSD) || defined(CONFIG_NFSD_MODULE)) && \
!defined(CONFIG_NFSD_DEPRECATED)
union compat_nfsctl_res;
struct compat_nfsctl_arg;
asmlinkage long compat_sys_nfsservctl(int cmd,
struct compat_nfsctl_arg __user *arg,
union compat_nfsctl_res __user *res);
#else
asmlinkage long compat_sys_nfsservctl(int cmd, void *notused, void *notused2);
#endif
asmlinkage long compat_sys_signalfd4(int ufd,
const compat_sigset_t __user *sigmask,
compat_size_t sigsetsize, int flags);
......
......@@ -1072,12 +1072,12 @@ struct file_lock_operations {
};
struct lock_manager_operations {
int (*fl_compare_owner)(struct file_lock *, struct file_lock *);
void (*fl_notify)(struct file_lock *); /* unblock callback */
int (*fl_grant)(struct file_lock *, struct file_lock *, int);
void (*fl_release_private)(struct file_lock *);
void (*fl_break)(struct file_lock *);
int (*fl_change)(struct file_lock **, int);
int (*lm_compare_owner)(struct file_lock *, struct file_lock *);
void (*lm_notify)(struct file_lock *); /* unblock callback */
int (*lm_grant)(struct file_lock *, struct file_lock *, int);
void (*lm_release_private)(struct file_lock *);
void (*lm_break)(struct file_lock *);
int (*lm_change)(struct file_lock **, int);
};
struct lock_manager {
......
......@@ -133,8 +133,6 @@ __be32 check_nfsd_access(struct svc_export *exp, struct svc_rqst *rqstp);
int nfsd_export_init(void);
void nfsd_export_shutdown(void);
void nfsd_export_flush(void);
void exp_readlock(void);
void exp_readunlock(void);
struct svc_export * rqst_exp_get_by_name(struct svc_rqst *,
struct path *);
struct svc_export * rqst_exp_parent(struct svc_rqst *,
......
......@@ -256,13 +256,4 @@ static inline time_t get_expiry(char **bpp)
return rv - boot.tv_sec;
}
#ifdef CONFIG_NFSD_DEPRECATED
static inline void sunrpc_invalidate(struct cache_head *h,
struct cache_detail *detail)
{
h->expiry_time = seconds_since_boot() - 1;
detail->nextcheck = seconds_since_boot();
}
#endif /* CONFIG_NFSD_DEPRECATED */
#endif /* _LINUX_SUNRPC_CACHE_H_ */
......@@ -273,6 +273,7 @@ struct svc_rqst {
/* Catering to nfsd */
struct auth_domain * rq_client; /* RPC peer info */
struct auth_domain * rq_gssclient; /* "gss/"-style peer info */
int rq_cachetype;
struct svc_cacherep * rq_cacherep; /* cache info */
int rq_splice_ok; /* turned off in gss privacy
* to prevent encrypting page
......
......@@ -744,6 +744,13 @@ static struct pf_desc gss_kerberos_pfs[] = {
},
};
MODULE_ALIAS("rpc-auth-gss-krb5");
MODULE_ALIAS("rpc-auth-gss-krb5i");
MODULE_ALIAS("rpc-auth-gss-krb5p");
MODULE_ALIAS("rpc-auth-gss-390003");
MODULE_ALIAS("rpc-auth-gss-390004");
MODULE_ALIAS("rpc-auth-gss-390005");
static struct gss_api_mech gss_kerberos_mech = {
.gm_name = "krb5",
.gm_owner = THIS_MODULE,
......
......@@ -141,7 +141,7 @@ gss_mech_get(struct gss_api_mech *gm)
EXPORT_SYMBOL_GPL(gss_mech_get);
struct gss_api_mech *
gss_mech_get_by_name(const char *name)
_gss_mech_get_by_name(const char *name)
{
struct gss_api_mech *pos, *gm = NULL;
......@@ -158,6 +158,17 @@ gss_mech_get_by_name(const char *name)
}
struct gss_api_mech * gss_mech_get_by_name(const char *name)
{
struct gss_api_mech *gm = NULL;
gm = _gss_mech_get_by_name(name);
if (!gm) {
request_module("rpc-auth-gss-%s", name);
gm = _gss_mech_get_by_name(name);
}
return gm;
}
EXPORT_SYMBOL_GPL(gss_mech_get_by_name);
struct gss_api_mech *
......@@ -194,10 +205,9 @@ mech_supports_pseudoflavor(struct gss_api_mech *gm, u32 pseudoflavor)
return 0;
}
struct gss_api_mech *
gss_mech_get_by_pseudoflavor(u32 pseudoflavor)
struct gss_api_mech *_gss_mech_get_by_pseudoflavor(u32 pseudoflavor)
{
struct gss_api_mech *pos, *gm = NULL;
struct gss_api_mech *gm = NULL, *pos;
spin_lock(&registered_mechs_lock);
list_for_each_entry(pos, &registered_mechs, gm_list) {
......@@ -213,6 +223,20 @@ gss_mech_get_by_pseudoflavor(u32 pseudoflavor)
return gm;
}
struct gss_api_mech *
gss_mech_get_by_pseudoflavor(u32 pseudoflavor)
{
struct gss_api_mech *gm;
gm = _gss_mech_get_by_pseudoflavor(pseudoflavor);
if (!gm) {
request_module("rpc-auth-gss-%u", pseudoflavor);
gm = _gss_mech_get_by_pseudoflavor(pseudoflavor);
}
return gm;
}
EXPORT_SYMBOL_GPL(gss_mech_get_by_pseudoflavor);
int gss_mech_list_pseudoflavors(rpc_authflavor_t *array_ptr)
......
......@@ -902,12 +902,13 @@ void svc_delete_xprt(struct svc_xprt *xprt)
if (!test_and_set_bit(XPT_DETACHED, &xprt->xpt_flags))
list_del_init(&xprt->xpt_list);
/*
* We used to delete the transport from whichever list
* it's sk_xprt.xpt_ready node was on, but we don't actually
* need to. This is because the only time we're called
* while still attached to a queue, the queue itself
* is about to be destroyed (in svc_destroy).
* The only time we're called while xpt_ready is still on a list
* is while the list itself is about to be destroyed (in
* svc_destroy). BUT svc_xprt_enqueue could still be attempting
* to add new entries to the sp_sockets list, so we can't leave
* a freed xprt on it.
*/
list_del_init(&xprt->xpt_ready);
if (test_bit(XPT_TEMP, &xprt->xpt_flags))
serv->sv_tmpcnt--;
spin_unlock_bh(&serv->sv_lock);
......
......@@ -30,12 +30,10 @@
struct unix_domain {
struct auth_domain h;
#ifdef CONFIG_NFSD_DEPRECATED
int addr_changes;
#endif /* CONFIG_NFSD_DEPRECATED */
/* other stuff later */
};
extern struct auth_ops svcauth_null;
extern struct auth_ops svcauth_unix;
static void svcauth_unix_domain_release(struct auth_domain *dom)
......@@ -74,9 +72,6 @@ struct auth_domain *unix_domain_find(char *name)
return NULL;
}
new->h.flavour = &svcauth_unix;
#ifdef CONFIG_NFSD_DEPRECATED
new->addr_changes = 0;
#endif /* CONFIG_NFSD_DEPRECATED */
rv = auth_domain_lookup(name, &new->h);
}
}
......@@ -95,9 +90,6 @@ struct ip_map {
char m_class[8]; /* e.g. "nfsd" */
struct in6_addr m_addr;
struct unix_domain *m_client;
#ifdef CONFIG_NFSD_DEPRECATED
int m_add_change;
#endif /* CONFIG_NFSD_DEPRECATED */
};
static void ip_map_put(struct kref *kref)
......@@ -151,9 +143,6 @@ static void update(struct cache_head *cnew, struct cache_head *citem)
kref_get(&item->m_client->h.ref);
new->m_client = item->m_client;
#ifdef CONFIG_NFSD_DEPRECATED
new->m_add_change = item->m_add_change;
#endif /* CONFIG_NFSD_DEPRECATED */
}
static struct cache_head *ip_map_alloc(void)
{
......@@ -338,16 +327,6 @@ static int __ip_map_update(struct cache_detail *cd, struct ip_map *ipm,
ip.h.flags = 0;
if (!udom)
set_bit(CACHE_NEGATIVE, &ip.h.flags);
#ifdef CONFIG_NFSD_DEPRECATED
else {
ip.m_add_change = udom->addr_changes;
/* if this is from the legacy set_client system call,
* we need m_add_change to be one higher
*/
if (expiry == NEVER)
ip.m_add_change++;
}
#endif /* CONFIG_NFSD_DEPRECATED */
ip.h.expiry_time = expiry;
ch = sunrpc_cache_update(cd, &ip.h, &ipm->h,
hash_str(ipm->m_class, IP_HASHBITS) ^
......@@ -367,62 +346,6 @@ static inline int ip_map_update(struct net *net, struct ip_map *ipm,
return __ip_map_update(sn->ip_map_cache, ipm, udom, expiry);
}
#ifdef CONFIG_NFSD_DEPRECATED
int auth_unix_add_addr(struct net *net, struct in6_addr *addr, struct auth_domain *dom)
{
struct unix_domain *udom;
struct ip_map *ipmp;
if (dom->flavour != &svcauth_unix)
return -EINVAL;
udom = container_of(dom, struct unix_domain, h);
ipmp = ip_map_lookup(net, "nfsd", addr);
if (ipmp)
return ip_map_update(net, ipmp, udom, NEVER);
else
return -ENOMEM;
}
EXPORT_SYMBOL_GPL(auth_unix_add_addr);
int auth_unix_forget_old(struct auth_domain *dom)
{
struct unix_domain *udom;
if (dom->flavour != &svcauth_unix)
return -EINVAL;
udom = container_of(dom, struct unix_domain, h);
udom->addr_changes++;
return 0;
}
EXPORT_SYMBOL_GPL(auth_unix_forget_old);
struct auth_domain *auth_unix_lookup(struct net *net, struct in6_addr *addr)
{
struct ip_map *ipm;
struct auth_domain *rv;
struct sunrpc_net *sn;
sn = net_generic(net, sunrpc_net_id);
ipm = ip_map_lookup(net, "nfsd", addr);
if (!ipm)
return NULL;
if (cache_check(sn->ip_map_cache, &ipm->h, NULL))
return NULL;
if ((ipm->m_client->addr_changes - ipm->m_add_change) >0) {
sunrpc_invalidate(&ipm->h, sn->ip_map_cache);
rv = NULL;
} else {
rv = &ipm->m_client->h;
kref_get(&rv->ref);
}
cache_put(&ipm->h, sn->ip_map_cache);
return rv;
}
EXPORT_SYMBOL_GPL(auth_unix_lookup);
#endif /* CONFIG_NFSD_DEPRECATED */
void svcauth_unix_purge(void)
{
......
......@@ -51,6 +51,8 @@
#include <linux/sunrpc/stats.h>
#include <linux/sunrpc/xprt.h>
#include "sunrpc.h"
#define RPCDBG_FACILITY RPCDBG_SVCXPRT
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment