Commit 71db34fc authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'for-3.4' of git://linux-nfs.org/~bfields/linux

Pull nfsd changes from Bruce Fields:

Highlights:
 - Benny Halevy and Tigran Mkrtchyan implemented some more 4.1 features,
   moving us closer to a complete 4.1 implementation.
 - Bernd Schubert fixed a long-standing problem with readdir cookies on
   ext2/3/4.
 - Jeff Layton performed a long-overdue overhaul of the server reboot
   recovery code which will allow us to deprecate the current code (a
   rather unusual user of the vfs), and give us some needed flexibility
   for further improvements.
 - Like the client, we now support numeric uid's and gid's in the
   auth_sys case, allowing easier upgrades from NFSv2/v3 to v4.x.

Plus miscellaneous bugfixes and cleanup.

Thanks to everyone!

There are also some delegation fixes waiting on vfs review that I
suppose will have to wait for 3.5.  With that done I think we'll finally
turn off the "EXPERIMENTAL" dependency for v4 (though that's mostly
symbolic as it's been on by default in distro's for a while).

And the list of 4.1 todo's should be achievable for 3.5 as well:

   http://wiki.linux-nfs.org/wiki/index.php/Server_4.0_and_4.1_issues

though we may still want a bit more experience with it before turning it
on by default.

* 'for-3.4' of git://linux-nfs.org/~bfields/linux: (55 commits)
  nfsd: only register cld pipe notifier when CONFIG_NFSD_V4 is enabled
  nfsd4: use auth_unix unconditionally on backchannel
  nfsd: fix NULL pointer dereference in cld_pipe_downcall
  nfsd4: memory corruption in numeric_name_to_id()
  sunrpc: skip portmap calls on sessions backchannel
  nfsd4: allow numeric idmapping
  nfsd: don't allow legacy client tracker init for anything but init_net
  nfsd: add notifier to handle mount/unmount of rpc_pipefs sb
  nfsd: add the infrastructure to handle the cld upcall
  nfsd: add a header describing upcall to nfsdcld
  nfsd: add a per-net-namespace struct for nfsd
  sunrpc: create nfsd dir in rpc_pipefs
  nfsd: add nfsd4_client_tracking_ops struct and a way to set it
  nfsd: convert nfs4_client->cl_cb_flags to a generic flags field
  NFSD: Fix nfs4_verifier memory alignment
  NFSD: Fix warnings when NFSD_DEBUG is not defined
  nfsd: vfs_llseek() with 32 or 64 bit offsets (hashes)
  nfsd: rename 'int access' to 'int may_flags' in nfsd_open()
  ext4: return 32/64-bit dir name hash according to usage type
  fs: add new FMODE flags: FMODE_32bithash and FMODE_64bithash
  ...
parents 50483c32 797a9d79
...@@ -1699,6 +1699,12 @@ bytes respectively. Such letter suffixes can also be entirely omitted. ...@@ -1699,6 +1699,12 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
The default is to send the implementation identification The default is to send the implementation identification
information. information.
nfsd.nfs4_disable_idmapping=
[NFSv4] When set to the default of '1', the NFSv4
server will return only numeric uids and gids to
clients using auth_sys, and will accept numeric uids
and gids from such clients. This is intended to ease
migration from NFSv2/v3.
objlayoutdriver.osd_login_prog= objlayoutdriver.osd_login_prog=
[NFS] [OBJLAYOUT] sets the pathname to the program which [NFS] [OBJLAYOUT] sets the pathname to the program which
......
...@@ -32,24 +32,8 @@ static unsigned char ext4_filetype_table[] = { ...@@ -32,24 +32,8 @@ static unsigned char ext4_filetype_table[] = {
DT_UNKNOWN, DT_REG, DT_DIR, DT_CHR, DT_BLK, DT_FIFO, DT_SOCK, DT_LNK DT_UNKNOWN, DT_REG, DT_DIR, DT_CHR, DT_BLK, DT_FIFO, DT_SOCK, DT_LNK
}; };
static int ext4_readdir(struct file *, void *, filldir_t);
static int ext4_dx_readdir(struct file *filp, static int ext4_dx_readdir(struct file *filp,
void *dirent, filldir_t filldir); void *dirent, filldir_t filldir);
static int ext4_release_dir(struct inode *inode,
struct file *filp);
const struct file_operations ext4_dir_operations = {
.llseek = ext4_llseek,
.read = generic_read_dir,
.readdir = ext4_readdir, /* we take BKL. needed?*/
.unlocked_ioctl = ext4_ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl = ext4_compat_ioctl,
#endif
.fsync = ext4_sync_file,
.release = ext4_release_dir,
};
static unsigned char get_dtype(struct super_block *sb, int filetype) static unsigned char get_dtype(struct super_block *sb, int filetype)
{ {
...@@ -60,6 +44,26 @@ static unsigned char get_dtype(struct super_block *sb, int filetype) ...@@ -60,6 +44,26 @@ static unsigned char get_dtype(struct super_block *sb, int filetype)
return (ext4_filetype_table[filetype]); return (ext4_filetype_table[filetype]);
} }
/**
* Check if the given dir-inode refers to an htree-indexed directory
* (or a directory which chould potentially get coverted to use htree
* indexing).
*
* Return 1 if it is a dx dir, 0 if not
*/
static int is_dx_dir(struct inode *inode)
{
struct super_block *sb = inode->i_sb;
if (EXT4_HAS_COMPAT_FEATURE(inode->i_sb,
EXT4_FEATURE_COMPAT_DIR_INDEX) &&
((ext4_test_inode_flag(inode, EXT4_INODE_INDEX)) ||
((inode->i_size >> sb->s_blocksize_bits) == 1)))
return 1;
return 0;
}
/* /*
* Return 0 if the directory entry is OK, and 1 if there is a problem * Return 0 if the directory entry is OK, and 1 if there is a problem
* *
...@@ -115,18 +119,13 @@ static int ext4_readdir(struct file *filp, ...@@ -115,18 +119,13 @@ static int ext4_readdir(struct file *filp,
unsigned int offset; unsigned int offset;
int i, stored; int i, stored;
struct ext4_dir_entry_2 *de; struct ext4_dir_entry_2 *de;
struct super_block *sb;
int err; int err;
struct inode *inode = filp->f_path.dentry->d_inode; struct inode *inode = filp->f_path.dentry->d_inode;
struct super_block *sb = inode->i_sb;
int ret = 0; int ret = 0;
int dir_has_error = 0; int dir_has_error = 0;
sb = inode->i_sb; if (is_dx_dir(inode)) {
if (EXT4_HAS_COMPAT_FEATURE(inode->i_sb,
EXT4_FEATURE_COMPAT_DIR_INDEX) &&
((ext4_test_inode_flag(inode, EXT4_INODE_INDEX)) ||
((inode->i_size >> sb->s_blocksize_bits) == 1))) {
err = ext4_dx_readdir(filp, dirent, filldir); err = ext4_dx_readdir(filp, dirent, filldir);
if (err != ERR_BAD_DX_DIR) { if (err != ERR_BAD_DX_DIR) {
ret = err; ret = err;
...@@ -254,22 +253,134 @@ static int ext4_readdir(struct file *filp, ...@@ -254,22 +253,134 @@ static int ext4_readdir(struct file *filp,
return ret; return ret;
} }
static inline int is_32bit_api(void)
{
#ifdef CONFIG_COMPAT
return is_compat_task();
#else
return (BITS_PER_LONG == 32);
#endif
}
/* /*
* These functions convert from the major/minor hash to an f_pos * These functions convert from the major/minor hash to an f_pos
* value. * value for dx directories
*
* Upper layer (for example NFS) should specify FMODE_32BITHASH or
* FMODE_64BITHASH explicitly. On the other hand, we allow ext4 to be mounted
* directly on both 32-bit and 64-bit nodes, under such case, neither
* FMODE_32BITHASH nor FMODE_64BITHASH is specified.
*/
static inline loff_t hash2pos(struct file *filp, __u32 major, __u32 minor)
{
if ((filp->f_mode & FMODE_32BITHASH) ||
(!(filp->f_mode & FMODE_64BITHASH) && is_32bit_api()))
return major >> 1;
else
return ((__u64)(major >> 1) << 32) | (__u64)minor;
}
static inline __u32 pos2maj_hash(struct file *filp, loff_t pos)
{
if ((filp->f_mode & FMODE_32BITHASH) ||
(!(filp->f_mode & FMODE_64BITHASH) && is_32bit_api()))
return (pos << 1) & 0xffffffff;
else
return ((pos >> 32) << 1) & 0xffffffff;
}
static inline __u32 pos2min_hash(struct file *filp, loff_t pos)
{
if ((filp->f_mode & FMODE_32BITHASH) ||
(!(filp->f_mode & FMODE_64BITHASH) && is_32bit_api()))
return 0;
else
return pos & 0xffffffff;
}
/*
* Return 32- or 64-bit end-of-file for dx directories
*/
static inline loff_t ext4_get_htree_eof(struct file *filp)
{
if ((filp->f_mode & FMODE_32BITHASH) ||
(!(filp->f_mode & FMODE_64BITHASH) && is_32bit_api()))
return EXT4_HTREE_EOF_32BIT;
else
return EXT4_HTREE_EOF_64BIT;
}
/*
* ext4_dir_llseek() based on generic_file_llseek() to handle both
* non-htree and htree directories, where the "offset" is in terms
* of the filename hash value instead of the byte offset.
* *
* Currently we only use major hash numer. This is unfortunate, but * NOTE: offsets obtained *before* ext4_set_inode_flag(dir, EXT4_INODE_INDEX)
* on 32-bit machines, the same VFS interface is used for lseek and * will be invalid once the directory was converted into a dx directory
* llseek, so if we use the 64 bit offset, then the 32-bit versions of
* lseek/telldir/seekdir will blow out spectacularly, and from within
* the ext2 low-level routine, we don't know if we're being called by
* a 64-bit version of the system call or the 32-bit version of the
* system call. Worse yet, NFSv2 only allows for a 32-bit readdir
* cookie. Sigh.
*/ */
#define hash2pos(major, minor) (major >> 1) loff_t ext4_dir_llseek(struct file *file, loff_t offset, int origin)
#define pos2maj_hash(pos) ((pos << 1) & 0xffffffff) {
#define pos2min_hash(pos) (0) struct inode *inode = file->f_mapping->host;
loff_t ret = -EINVAL;
int dx_dir = is_dx_dir(inode);
mutex_lock(&inode->i_mutex);
/* NOTE: relative offsets with dx directories might not work
* as expected, as it is difficult to figure out the
* correct offset between dx hashes */
switch (origin) {
case SEEK_END:
if (unlikely(offset > 0))
goto out_err; /* not supported for directories */
/* so only negative offsets are left, does that have a
* meaning for directories at all? */
if (dx_dir)
offset += ext4_get_htree_eof(file);
else
offset += inode->i_size;
break;
case SEEK_CUR:
/*
* Here we special-case the lseek(fd, 0, SEEK_CUR)
* position-querying operation. Avoid rewriting the "same"
* f_pos value back to the file because a concurrent read(),
* write() or lseek() might have altered it
*/
if (offset == 0) {
offset = file->f_pos;
goto out_ok;
}
offset += file->f_pos;
break;
}
if (unlikely(offset < 0))
goto out_err;
if (!dx_dir) {
if (offset > inode->i_sb->s_maxbytes)
goto out_err;
} else if (offset > ext4_get_htree_eof(file))
goto out_err;
/* Special lock needed here? */
if (offset != file->f_pos) {
file->f_pos = offset;
file->f_version = 0;
}
out_ok:
ret = offset;
out_err:
mutex_unlock(&inode->i_mutex);
return ret;
}
/* /*
* This structure holds the nodes of the red-black tree used to store * This structure holds the nodes of the red-black tree used to store
...@@ -330,15 +441,16 @@ static void free_rb_tree_fname(struct rb_root *root) ...@@ -330,15 +441,16 @@ static void free_rb_tree_fname(struct rb_root *root)
} }
static struct dir_private_info *ext4_htree_create_dir_info(loff_t pos) static struct dir_private_info *ext4_htree_create_dir_info(struct file *filp,
loff_t pos)
{ {
struct dir_private_info *p; struct dir_private_info *p;
p = kzalloc(sizeof(struct dir_private_info), GFP_KERNEL); p = kzalloc(sizeof(struct dir_private_info), GFP_KERNEL);
if (!p) if (!p)
return NULL; return NULL;
p->curr_hash = pos2maj_hash(pos); p->curr_hash = pos2maj_hash(filp, pos);
p->curr_minor_hash = pos2min_hash(pos); p->curr_minor_hash = pos2min_hash(filp, pos);
return p; return p;
} }
...@@ -430,7 +542,7 @@ static int call_filldir(struct file *filp, void *dirent, ...@@ -430,7 +542,7 @@ static int call_filldir(struct file *filp, void *dirent,
inode->i_ino, current->comm); inode->i_ino, current->comm);
return 0; return 0;
} }
curr_pos = hash2pos(fname->hash, fname->minor_hash); curr_pos = hash2pos(filp, fname->hash, fname->minor_hash);
while (fname) { while (fname) {
error = filldir(dirent, fname->name, error = filldir(dirent, fname->name,
fname->name_len, curr_pos, fname->name_len, curr_pos,
...@@ -455,13 +567,13 @@ static int ext4_dx_readdir(struct file *filp, ...@@ -455,13 +567,13 @@ static int ext4_dx_readdir(struct file *filp,
int ret; int ret;
if (!info) { if (!info) {
info = ext4_htree_create_dir_info(filp->f_pos); info = ext4_htree_create_dir_info(filp, filp->f_pos);
if (!info) if (!info)
return -ENOMEM; return -ENOMEM;
filp->private_data = info; filp->private_data = info;
} }
if (filp->f_pos == EXT4_HTREE_EOF) if (filp->f_pos == ext4_get_htree_eof(filp))
return 0; /* EOF */ return 0; /* EOF */
/* Some one has messed with f_pos; reset the world */ /* Some one has messed with f_pos; reset the world */
...@@ -469,8 +581,8 @@ static int ext4_dx_readdir(struct file *filp, ...@@ -469,8 +581,8 @@ static int ext4_dx_readdir(struct file *filp,
free_rb_tree_fname(&info->root); free_rb_tree_fname(&info->root);
info->curr_node = NULL; info->curr_node = NULL;
info->extra_fname = NULL; info->extra_fname = NULL;
info->curr_hash = pos2maj_hash(filp->f_pos); info->curr_hash = pos2maj_hash(filp, filp->f_pos);
info->curr_minor_hash = pos2min_hash(filp->f_pos); info->curr_minor_hash = pos2min_hash(filp, filp->f_pos);
} }
/* /*
...@@ -502,7 +614,7 @@ static int ext4_dx_readdir(struct file *filp, ...@@ -502,7 +614,7 @@ static int ext4_dx_readdir(struct file *filp,
if (ret < 0) if (ret < 0)
return ret; return ret;
if (ret == 0) { if (ret == 0) {
filp->f_pos = EXT4_HTREE_EOF; filp->f_pos = ext4_get_htree_eof(filp);
break; break;
} }
info->curr_node = rb_first(&info->root); info->curr_node = rb_first(&info->root);
...@@ -522,7 +634,7 @@ static int ext4_dx_readdir(struct file *filp, ...@@ -522,7 +634,7 @@ static int ext4_dx_readdir(struct file *filp,
info->curr_minor_hash = fname->minor_hash; info->curr_minor_hash = fname->minor_hash;
} else { } else {
if (info->next_hash == ~0) { if (info->next_hash == ~0) {
filp->f_pos = EXT4_HTREE_EOF; filp->f_pos = ext4_get_htree_eof(filp);
break; break;
} }
info->curr_hash = info->next_hash; info->curr_hash = info->next_hash;
...@@ -541,3 +653,15 @@ static int ext4_release_dir(struct inode *inode, struct file *filp) ...@@ -541,3 +653,15 @@ static int ext4_release_dir(struct inode *inode, struct file *filp)
return 0; return 0;
} }
const struct file_operations ext4_dir_operations = {
.llseek = ext4_dir_llseek,
.read = generic_read_dir,
.readdir = ext4_readdir,
.unlocked_ioctl = ext4_ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl = ext4_compat_ioctl,
#endif
.fsync = ext4_sync_file,
.release = ext4_release_dir,
};
...@@ -1623,7 +1623,11 @@ struct dx_hash_info ...@@ -1623,7 +1623,11 @@ struct dx_hash_info
u32 *seed; u32 *seed;
}; };
#define EXT4_HTREE_EOF 0x7fffffff
/* 32 and 64 bit signed EOF for dx directories */
#define EXT4_HTREE_EOF_32BIT ((1UL << (32 - 1)) - 1)
#define EXT4_HTREE_EOF_64BIT ((1ULL << (64 - 1)) - 1)
/* /*
* Control parameters used by ext4_htree_next_block * Control parameters used by ext4_htree_next_block
......
...@@ -200,8 +200,8 @@ int ext4fs_dirhash(const char *name, int len, struct dx_hash_info *hinfo) ...@@ -200,8 +200,8 @@ int ext4fs_dirhash(const char *name, int len, struct dx_hash_info *hinfo)
return -1; return -1;
} }
hash = hash & ~1; hash = hash & ~1;
if (hash == (EXT4_HTREE_EOF << 1)) if (hash == (EXT4_HTREE_EOF_32BIT << 1))
hash = (EXT4_HTREE_EOF-1) << 1; hash = (EXT4_HTREE_EOF_32BIT - 1) << 1;
hinfo->hash = hash; hinfo->hash = hash;
hinfo->minor_hash = minor_hash; hinfo->minor_hash = minor_hash;
return 0; return 0;
......
...@@ -496,7 +496,7 @@ static int param_set_##name(const char *val, struct kernel_param *kp) \ ...@@ -496,7 +496,7 @@ static int param_set_##name(const char *val, struct kernel_param *kp) \
__typeof__(type) num = which_strtol(val, &endp, 0); \ __typeof__(type) num = which_strtol(val, &endp, 0); \
if (endp == val || *endp || num < (min) || num > (max)) \ if (endp == val || *endp || num < (min) || num > (max)) \
return -EINVAL; \ return -EINVAL; \
*((int *) kp->arg) = num; \ *((type *) kp->arg) = num; \
return 0; \ return 0; \
} }
......
#ifndef _NFSD4_CURRENT_STATE_H
#define _NFSD4_CURRENT_STATE_H
#include "state.h"
#include "xdr4.h"
extern void clear_current_stateid(struct nfsd4_compound_state *cstate);
/*
* functions to set current state id
*/
extern void nfsd4_set_opendowngradestateid(struct nfsd4_compound_state *cstate, struct nfsd4_open_downgrade *);
extern void nfsd4_set_openstateid(struct nfsd4_compound_state *, struct nfsd4_open *);
extern void nfsd4_set_lockstateid(struct nfsd4_compound_state *, struct nfsd4_lock *);
extern void nfsd4_set_closestateid(struct nfsd4_compound_state *, struct nfsd4_close *);
/*
* functions to consume current state id
*/
extern void nfsd4_get_opendowngradestateid(struct nfsd4_compound_state *cstate, struct nfsd4_open_downgrade *);
extern void nfsd4_get_delegreturnstateid(struct nfsd4_compound_state *, struct nfsd4_delegreturn *);
extern void nfsd4_get_freestateid(struct nfsd4_compound_state *, struct nfsd4_free_stateid *);
extern void nfsd4_get_setattrstateid(struct nfsd4_compound_state *, struct nfsd4_setattr *);
extern void nfsd4_get_closestateid(struct nfsd4_compound_state *, struct nfsd4_close *);
extern void nfsd4_get_lockustateid(struct nfsd4_compound_state *, struct nfsd4_locku *);
extern void nfsd4_get_readstateid(struct nfsd4_compound_state *, struct nfsd4_read *);
extern void nfsd4_get_writestateid(struct nfsd4_compound_state *, struct nfsd4_write *);
#endif /* _NFSD4_CURRENT_STATE_H */
...@@ -87,7 +87,7 @@ static int expkey_parse(struct cache_detail *cd, char *mesg, int mlen) ...@@ -87,7 +87,7 @@ static int expkey_parse(struct cache_detail *cd, char *mesg, int mlen)
struct svc_expkey key; struct svc_expkey key;
struct svc_expkey *ek = NULL; struct svc_expkey *ek = NULL;
if (mlen < 1 || mesg[mlen-1] != '\n') if (mesg[mlen - 1] != '\n')
return -EINVAL; return -EINVAL;
mesg[mlen-1] = 0; mesg[mlen-1] = 0;
......
/*
* per net namespace data structures for nfsd
*
* Copyright (C) 2012, Jeff Layton <jlayton@redhat.com>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
* Software Foundation; either version 2 of the License, or (at your option)
* any later version.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program; if not, write to the Free Software Foundation, Inc., 51
* Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
#ifndef __NFSD_NETNS_H__
#define __NFSD_NETNS_H__
#include <net/net_namespace.h>
#include <net/netns/generic.h>
struct cld_net;
struct nfsd_net {
struct cld_net *cld_net;
};
extern int nfsd_net_id;
#endif /* __NFSD_NETNS_H__ */
...@@ -645,7 +645,6 @@ static int setup_callback_client(struct nfs4_client *clp, struct nfs4_cb_conn *c ...@@ -645,7 +645,6 @@ static int setup_callback_client(struct nfs4_client *clp, struct nfs4_cb_conn *c
.timeout = &timeparms, .timeout = &timeparms,
.program = &cb_program, .program = &cb_program,
.version = 0, .version = 0,
.authflavor = clp->cl_flavor,
.flags = (RPC_CLNT_CREATE_NOPING | RPC_CLNT_CREATE_QUIET), .flags = (RPC_CLNT_CREATE_NOPING | RPC_CLNT_CREATE_QUIET),
}; };
struct rpc_clnt *client; struct rpc_clnt *client;
...@@ -656,6 +655,7 @@ static int setup_callback_client(struct nfs4_client *clp, struct nfs4_cb_conn *c ...@@ -656,6 +655,7 @@ static int setup_callback_client(struct nfs4_client *clp, struct nfs4_cb_conn *c
args.client_name = clp->cl_principal; args.client_name = clp->cl_principal;
args.prognumber = conn->cb_prog, args.prognumber = conn->cb_prog,
args.protocol = XPRT_TRANSPORT_TCP; args.protocol = XPRT_TRANSPORT_TCP;
args.authflavor = clp->cl_flavor;
clp->cl_cb_ident = conn->cb_ident; clp->cl_cb_ident = conn->cb_ident;
} else { } else {
if (!conn->cb_xprt) if (!conn->cb_xprt)
...@@ -665,6 +665,7 @@ static int setup_callback_client(struct nfs4_client *clp, struct nfs4_cb_conn *c ...@@ -665,6 +665,7 @@ static int setup_callback_client(struct nfs4_client *clp, struct nfs4_cb_conn *c
args.bc_xprt = conn->cb_xprt; args.bc_xprt = conn->cb_xprt;
args.prognumber = clp->cl_cb_session->se_cb_prog; args.prognumber = clp->cl_cb_session->se_cb_prog;
args.protocol = XPRT_TRANSPORT_BC_TCP; args.protocol = XPRT_TRANSPORT_BC_TCP;
args.authflavor = RPC_AUTH_UNIX;
} }
/* Create RPC client */ /* Create RPC client */
client = rpc_create(&args); client = rpc_create(&args);
...@@ -754,9 +755,9 @@ static void do_probe_callback(struct nfs4_client *clp) ...@@ -754,9 +755,9 @@ static void do_probe_callback(struct nfs4_client *clp)
*/ */
void nfsd4_probe_callback(struct nfs4_client *clp) void nfsd4_probe_callback(struct nfs4_client *clp)
{ {
/* XXX: atomicity? Also, should we be using cl_cb_flags? */ /* XXX: atomicity? Also, should we be using cl_flags? */
clp->cl_cb_state = NFSD4_CB_UNKNOWN; clp->cl_cb_state = NFSD4_CB_UNKNOWN;
set_bit(NFSD4_CLIENT_CB_UPDATE, &clp->cl_cb_flags); set_bit(NFSD4_CLIENT_CB_UPDATE, &clp->cl_flags);
do_probe_callback(clp); do_probe_callback(clp);
} }
...@@ -915,7 +916,7 @@ void nfsd4_destroy_callback_queue(void) ...@@ -915,7 +916,7 @@ void nfsd4_destroy_callback_queue(void)
/* must be called under the state lock */ /* must be called under the state lock */
void nfsd4_shutdown_callback(struct nfs4_client *clp) void nfsd4_shutdown_callback(struct nfs4_client *clp)
{ {
set_bit(NFSD4_CLIENT_KILL, &clp->cl_cb_flags); set_bit(NFSD4_CLIENT_CB_KILL, &clp->cl_flags);
/* /*
* Note this won't actually result in a null callback; * Note this won't actually result in a null callback;
* instead, nfsd4_do_callback_rpc() will detect the killed * instead, nfsd4_do_callback_rpc() will detect the killed
...@@ -966,15 +967,15 @@ static void nfsd4_process_cb_update(struct nfsd4_callback *cb) ...@@ -966,15 +967,15 @@ static void nfsd4_process_cb_update(struct nfsd4_callback *cb)
svc_xprt_put(clp->cl_cb_conn.cb_xprt); svc_xprt_put(clp->cl_cb_conn.cb_xprt);
clp->cl_cb_conn.cb_xprt = NULL; clp->cl_cb_conn.cb_xprt = NULL;
} }
if (test_bit(NFSD4_CLIENT_KILL, &clp->cl_cb_flags)) if (test_bit(NFSD4_CLIENT_CB_KILL, &clp->cl_flags))
return; return;
spin_lock(&clp->cl_lock); spin_lock(&clp->cl_lock);
/* /*
* Only serialized callback code is allowed to clear these * Only serialized callback code is allowed to clear these
* flags; main nfsd code can only set them: * flags; main nfsd code can only set them:
*/ */
BUG_ON(!clp->cl_cb_flags); BUG_ON(!(clp->cl_flags & NFSD4_CLIENT_CB_FLAG_MASK));
clear_bit(NFSD4_CLIENT_CB_UPDATE, &clp->cl_cb_flags); clear_bit(NFSD4_CLIENT_CB_UPDATE, &clp->cl_flags);
memcpy(&conn, &cb->cb_clp->cl_cb_conn, sizeof(struct nfs4_cb_conn)); memcpy(&conn, &cb->cb_clp->cl_cb_conn, sizeof(struct nfs4_cb_conn));
c = __nfsd4_find_backchannel(clp); c = __nfsd4_find_backchannel(clp);
if (c) { if (c) {
...@@ -986,7 +987,7 @@ static void nfsd4_process_cb_update(struct nfsd4_callback *cb) ...@@ -986,7 +987,7 @@ static void nfsd4_process_cb_update(struct nfsd4_callback *cb)
err = setup_callback_client(clp, &conn, ses); err = setup_callback_client(clp, &conn, ses);
if (err) { if (err) {
warn_no_callback_path(clp, err); nfsd4_mark_cb_down(clp, err);
return; return;
} }
/* Yay, the callback channel's back! Restart any callbacks: */ /* Yay, the callback channel's back! Restart any callbacks: */
...@@ -1000,7 +1001,7 @@ void nfsd4_do_callback_rpc(struct work_struct *w) ...@@ -1000,7 +1001,7 @@ void nfsd4_do_callback_rpc(struct work_struct *w)
struct nfs4_client *clp = cb->cb_clp; struct nfs4_client *clp = cb->cb_clp;
struct rpc_clnt *clnt; struct rpc_clnt *clnt;
if (clp->cl_cb_flags) if (clp->cl_flags & NFSD4_CLIENT_CB_FLAG_MASK)
nfsd4_process_cb_update(cb); nfsd4_process_cb_update(cb);
clnt = clp->cl_cb_client; clnt = clp->cl_cb_client;
......
...@@ -40,6 +40,14 @@ ...@@ -40,6 +40,14 @@
#include "idmap.h" #include "idmap.h"
#include "nfsd.h" #include "nfsd.h"
/*
* Turn off idmapping when using AUTH_SYS.
*/
static bool nfs4_disable_idmapping = true;
module_param(nfs4_disable_idmapping, bool, 0644);
MODULE_PARM_DESC(nfs4_disable_idmapping,
"Turn off server's NFSv4 idmapping when using 'sec=sys'");
/* /*
* Cache entry * Cache entry
*/ */
...@@ -561,28 +569,65 @@ idmap_id_to_name(struct svc_rqst *rqstp, int type, uid_t id, char *name) ...@@ -561,28 +569,65 @@ idmap_id_to_name(struct svc_rqst *rqstp, int type, uid_t id, char *name)
return ret; return ret;
} }
static bool
numeric_name_to_id(struct svc_rqst *rqstp, int type, const char *name, u32 namelen, uid_t *id)
{
int ret;
char buf[11];
if (namelen + 1 > sizeof(buf))
/* too long to represent a 32-bit id: */
return false;
/* Just to make sure it's null-terminated: */
memcpy(buf, name, namelen);
buf[namelen] = '\0';
ret = kstrtouint(name, 10, id);
return ret == 0;
}
static __be32
do_name_to_id(struct svc_rqst *rqstp, int type, const char *name, u32 namelen, uid_t *id)
{
if (nfs4_disable_idmapping && rqstp->rq_flavor < RPC_AUTH_GSS)
if (numeric_name_to_id(rqstp, type, name, namelen, id))
return 0;
/*
* otherwise, fall through and try idmapping, for
* backwards compatibility with clients sending names:
*/
return idmap_name_to_id(rqstp, type, name, namelen, id);
}
static int
do_id_to_name(struct svc_rqst *rqstp, int type, uid_t id, char *name)
{
if (nfs4_disable_idmapping && rqstp->rq_flavor < RPC_AUTH_GSS)
return sprintf(name, "%u", id);
return idmap_id_to_name(rqstp, type, id, name);
}
__be32 __be32
nfsd_map_name_to_uid(struct svc_rqst *rqstp, const char *name, size_t namelen, nfsd_map_name_to_uid(struct svc_rqst *rqstp, const char *name, size_t namelen,
__u32 *id) __u32 *id)
{ {
return idmap_name_to_id(rqstp, IDMAP_TYPE_USER, name, namelen, id); return do_name_to_id(rqstp, IDMAP_TYPE_USER, name, namelen, id);
} }
__be32 __be32
nfsd_map_name_to_gid(struct svc_rqst *rqstp, const char *name, size_t namelen, nfsd_map_name_to_gid(struct svc_rqst *rqstp, const char *name, size_t namelen,
__u32 *id) __u32 *id)
{ {
return idmap_name_to_id(rqstp, IDMAP_TYPE_GROUP, name, namelen, id); return do_name_to_id(rqstp, IDMAP_TYPE_GROUP, name, namelen, id);
} }
int int
nfsd_map_uid_to_name(struct svc_rqst *rqstp, __u32 id, char *name) nfsd_map_uid_to_name(struct svc_rqst *rqstp, __u32 id, char *name)
{ {
return idmap_id_to_name(rqstp, IDMAP_TYPE_USER, id, name); return do_id_to_name(rqstp, IDMAP_TYPE_USER, id, name);
} }
int int
nfsd_map_gid_to_name(struct svc_rqst *rqstp, __u32 id, char *name) nfsd_map_gid_to_name(struct svc_rqst *rqstp, __u32 id, char *name)
{ {
return idmap_id_to_name(rqstp, IDMAP_TYPE_GROUP, id, name); return do_id_to_name(rqstp, IDMAP_TYPE_GROUP, id, name);
} }
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
...@@ -133,22 +133,6 @@ xdr_error: \ ...@@ -133,22 +133,6 @@ xdr_error: \
} \ } \
} while (0) } while (0)
static void save_buf(struct nfsd4_compoundargs *argp, struct nfsd4_saved_compoundargs *savep)
{
savep->p = argp->p;
savep->end = argp->end;
savep->pagelen = argp->pagelen;
savep->pagelist = argp->pagelist;
}
static void restore_buf(struct nfsd4_compoundargs *argp, struct nfsd4_saved_compoundargs *savep)
{
argp->p = savep->p;
argp->end = savep->end;
argp->pagelen = savep->pagelen;
argp->pagelist = savep->pagelist;
}
static __be32 *read_buf(struct nfsd4_compoundargs *argp, u32 nbytes) static __be32 *read_buf(struct nfsd4_compoundargs *argp, u32 nbytes)
{ {
/* We want more bytes than seem to be available. /* We want more bytes than seem to be available.
...@@ -638,14 +622,18 @@ nfsd4_decode_lookup(struct nfsd4_compoundargs *argp, struct nfsd4_lookup *lookup ...@@ -638,14 +622,18 @@ nfsd4_decode_lookup(struct nfsd4_compoundargs *argp, struct nfsd4_lookup *lookup
DECODE_TAIL; DECODE_TAIL;
} }
static __be32 nfsd4_decode_share_access(struct nfsd4_compoundargs *argp, u32 *x) static __be32 nfsd4_decode_share_access(struct nfsd4_compoundargs *argp, u32 *share_access, u32 *deleg_want, u32 *deleg_when)
{ {
__be32 *p; __be32 *p;
u32 w; u32 w;
READ_BUF(4); READ_BUF(4);
READ32(w); READ32(w);
*x = w; *share_access = w & NFS4_SHARE_ACCESS_MASK;
*deleg_want = w & NFS4_SHARE_WANT_MASK;
if (deleg_when)
*deleg_when = w & NFS4_SHARE_WHEN_MASK;
switch (w & NFS4_SHARE_ACCESS_MASK) { switch (w & NFS4_SHARE_ACCESS_MASK) {
case NFS4_SHARE_ACCESS_READ: case NFS4_SHARE_ACCESS_READ:
case NFS4_SHARE_ACCESS_WRITE: case NFS4_SHARE_ACCESS_WRITE:
...@@ -673,6 +661,9 @@ static __be32 nfsd4_decode_share_access(struct nfsd4_compoundargs *argp, u32 *x) ...@@ -673,6 +661,9 @@ static __be32 nfsd4_decode_share_access(struct nfsd4_compoundargs *argp, u32 *x)
w &= ~NFS4_SHARE_WANT_MASK; w &= ~NFS4_SHARE_WANT_MASK;
if (!w) if (!w)
return nfs_ok; return nfs_ok;
if (!deleg_when) /* open_downgrade */
return nfserr_inval;
switch (w) { switch (w) {
case NFS4_SHARE_SIGNAL_DELEG_WHEN_RESRC_AVAIL: case NFS4_SHARE_SIGNAL_DELEG_WHEN_RESRC_AVAIL:
case NFS4_SHARE_PUSH_DELEG_WHEN_UNCONTENDED: case NFS4_SHARE_PUSH_DELEG_WHEN_UNCONTENDED:
...@@ -719,6 +710,7 @@ static __be32 ...@@ -719,6 +710,7 @@ static __be32
nfsd4_decode_open(struct nfsd4_compoundargs *argp, struct nfsd4_open *open) nfsd4_decode_open(struct nfsd4_compoundargs *argp, struct nfsd4_open *open)
{ {
DECODE_HEAD; DECODE_HEAD;
u32 dummy;
memset(open->op_bmval, 0, sizeof(open->op_bmval)); memset(open->op_bmval, 0, sizeof(open->op_bmval));
open->op_iattr.ia_valid = 0; open->op_iattr.ia_valid = 0;
...@@ -727,7 +719,9 @@ nfsd4_decode_open(struct nfsd4_compoundargs *argp, struct nfsd4_open *open) ...@@ -727,7 +719,9 @@ nfsd4_decode_open(struct nfsd4_compoundargs *argp, struct nfsd4_open *open)
/* seqid, share_access, share_deny, clientid, ownerlen */ /* seqid, share_access, share_deny, clientid, ownerlen */
READ_BUF(4); READ_BUF(4);
READ32(open->op_seqid); READ32(open->op_seqid);
status = nfsd4_decode_share_access(argp, &open->op_share_access); /* decode, yet ignore deleg_when until supported */
status = nfsd4_decode_share_access(argp, &open->op_share_access,
&open->op_deleg_want, &dummy);
if (status) if (status)
goto xdr_error; goto xdr_error;
status = nfsd4_decode_share_deny(argp, &open->op_share_deny); status = nfsd4_decode_share_deny(argp, &open->op_share_deny);
...@@ -755,14 +749,14 @@ nfsd4_decode_open(struct nfsd4_compoundargs *argp, struct nfsd4_open *open) ...@@ -755,14 +749,14 @@ nfsd4_decode_open(struct nfsd4_compoundargs *argp, struct nfsd4_open *open)
goto out; goto out;
break; break;
case NFS4_CREATE_EXCLUSIVE: case NFS4_CREATE_EXCLUSIVE:
READ_BUF(8); READ_BUF(NFS4_VERIFIER_SIZE);
COPYMEM(open->op_verf.data, 8); COPYMEM(open->op_verf.data, NFS4_VERIFIER_SIZE);
break; break;
case NFS4_CREATE_EXCLUSIVE4_1: case NFS4_CREATE_EXCLUSIVE4_1:
if (argp->minorversion < 1) if (argp->minorversion < 1)
goto xdr_error; goto xdr_error;
READ_BUF(8); READ_BUF(NFS4_VERIFIER_SIZE);
COPYMEM(open->op_verf.data, 8); COPYMEM(open->op_verf.data, NFS4_VERIFIER_SIZE);
status = nfsd4_decode_fattr(argp, open->op_bmval, status = nfsd4_decode_fattr(argp, open->op_bmval,
&open->op_iattr, &open->op_acl); &open->op_iattr, &open->op_acl);
if (status) if (status)
...@@ -848,7 +842,8 @@ nfsd4_decode_open_downgrade(struct nfsd4_compoundargs *argp, struct nfsd4_open_d ...@@ -848,7 +842,8 @@ nfsd4_decode_open_downgrade(struct nfsd4_compoundargs *argp, struct nfsd4_open_d
return status; return status;
READ_BUF(4); READ_BUF(4);
READ32(open_down->od_seqid); READ32(open_down->od_seqid);
status = nfsd4_decode_share_access(argp, &open_down->od_share_access); status = nfsd4_decode_share_access(argp, &open_down->od_share_access,
&open_down->od_deleg_want, NULL);
if (status) if (status)
return status; return status;
status = nfsd4_decode_share_deny(argp, &open_down->od_share_deny); status = nfsd4_decode_share_deny(argp, &open_down->od_share_deny);
...@@ -994,8 +989,8 @@ nfsd4_decode_setclientid(struct nfsd4_compoundargs *argp, struct nfsd4_setclient ...@@ -994,8 +989,8 @@ nfsd4_decode_setclientid(struct nfsd4_compoundargs *argp, struct nfsd4_setclient
{ {
DECODE_HEAD; DECODE_HEAD;
READ_BUF(8); READ_BUF(NFS4_VERIFIER_SIZE);
COPYMEM(setclientid->se_verf.data, 8); COPYMEM(setclientid->se_verf.data, NFS4_VERIFIER_SIZE);
status = nfsd4_decode_opaque(argp, &setclientid->se_name); status = nfsd4_decode_opaque(argp, &setclientid->se_name);
if (status) if (status)
...@@ -1020,9 +1015,9 @@ nfsd4_decode_setclientid_confirm(struct nfsd4_compoundargs *argp, struct nfsd4_s ...@@ -1020,9 +1015,9 @@ nfsd4_decode_setclientid_confirm(struct nfsd4_compoundargs *argp, struct nfsd4_s
{ {
DECODE_HEAD; DECODE_HEAD;
READ_BUF(8 + sizeof(nfs4_verifier)); READ_BUF(8 + NFS4_VERIFIER_SIZE);
COPYMEM(&scd_c->sc_clientid, 8); COPYMEM(&scd_c->sc_clientid, 8);
COPYMEM(&scd_c->sc_confirm, sizeof(nfs4_verifier)); COPYMEM(&scd_c->sc_confirm, NFS4_VERIFIER_SIZE);
DECODE_TAIL; DECODE_TAIL;
} }
...@@ -1385,26 +1380,29 @@ nfsd4_decode_sequence(struct nfsd4_compoundargs *argp, ...@@ -1385,26 +1380,29 @@ nfsd4_decode_sequence(struct nfsd4_compoundargs *argp,
static __be32 static __be32
nfsd4_decode_test_stateid(struct nfsd4_compoundargs *argp, struct nfsd4_test_stateid *test_stateid) nfsd4_decode_test_stateid(struct nfsd4_compoundargs *argp, struct nfsd4_test_stateid *test_stateid)
{ {
unsigned int nbytes;
stateid_t si;
int i; int i;
__be32 *p; __be32 *p, status;
__be32 status; struct nfsd4_test_stateid_id *stateid;
READ_BUF(4); READ_BUF(4);
test_stateid->ts_num_ids = ntohl(*p++); test_stateid->ts_num_ids = ntohl(*p++);
nbytes = test_stateid->ts_num_ids * sizeof(stateid_t); INIT_LIST_HEAD(&test_stateid->ts_stateid_list);
if (nbytes > (u32)((char *)argp->end - (char *)argp->p))
goto xdr_error;
test_stateid->ts_saved_args = argp;
save_buf(argp, &test_stateid->ts_savedp);
for (i = 0; i < test_stateid->ts_num_ids; i++) { for (i = 0; i < test_stateid->ts_num_ids; i++) {
status = nfsd4_decode_stateid(argp, &si); stateid = kmalloc(sizeof(struct nfsd4_test_stateid_id), GFP_KERNEL);
if (!stateid) {
status = PTR_ERR(stateid);
goto out;
}
defer_free(argp, kfree, stateid);
INIT_LIST_HEAD(&stateid->ts_id_list);
list_add_tail(&stateid->ts_id_list, &test_stateid->ts_stateid_list);
status = nfsd4_decode_stateid(argp, &stateid->ts_id_stateid);
if (status) if (status)
return status; goto out;
} }
status = 0; status = 0;
...@@ -2661,8 +2659,8 @@ nfsd4_encode_commit(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_ ...@@ -2661,8 +2659,8 @@ nfsd4_encode_commit(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_
__be32 *p; __be32 *p;
if (!nfserr) { if (!nfserr) {
RESERVE_SPACE(8); RESERVE_SPACE(NFS4_VERIFIER_SIZE);
WRITEMEM(commit->co_verf.data, 8); WRITEMEM(commit->co_verf.data, NFS4_VERIFIER_SIZE);
ADJUST_ARGS(); ADJUST_ARGS();
} }
return nfserr; return nfserr;
...@@ -2851,6 +2849,20 @@ nfsd4_encode_open(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_op ...@@ -2851,6 +2849,20 @@ nfsd4_encode_open(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_op
WRITE32(0); /* XXX: is NULL principal ok? */ WRITE32(0); /* XXX: is NULL principal ok? */
ADJUST_ARGS(); ADJUST_ARGS();
break; break;
case NFS4_OPEN_DELEGATE_NONE_EXT: /* 4.1 */
switch (open->op_why_no_deleg) {
case WND4_CONTENTION:
case WND4_RESOURCE:
RESERVE_SPACE(8);
WRITE32(open->op_why_no_deleg);
WRITE32(0); /* deleg signaling not supported yet */
break;
default:
RESERVE_SPACE(4);
WRITE32(open->op_why_no_deleg);
}
ADJUST_ARGS();
break;
default: default:
BUG(); BUG();
} }
...@@ -3008,7 +3020,7 @@ nfsd4_encode_readdir(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4 ...@@ -3008,7 +3020,7 @@ nfsd4_encode_readdir(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4
if (resp->xbuf->page_len) if (resp->xbuf->page_len)
return nfserr_resource; return nfserr_resource;
RESERVE_SPACE(8); /* verifier */ RESERVE_SPACE(NFS4_VERIFIER_SIZE);
savep = p; savep = p;
/* XXX: Following NFSv3, we ignore the READDIR verifier for now. */ /* XXX: Following NFSv3, we ignore the READDIR verifier for now. */
...@@ -3209,9 +3221,9 @@ nfsd4_encode_setclientid(struct nfsd4_compoundres *resp, __be32 nfserr, struct n ...@@ -3209,9 +3221,9 @@ nfsd4_encode_setclientid(struct nfsd4_compoundres *resp, __be32 nfserr, struct n
__be32 *p; __be32 *p;
if (!nfserr) { if (!nfserr) {
RESERVE_SPACE(8 + sizeof(nfs4_verifier)); RESERVE_SPACE(8 + NFS4_VERIFIER_SIZE);
WRITEMEM(&scd->se_clientid, 8); WRITEMEM(&scd->se_clientid, 8);
WRITEMEM(&scd->se_confirm, sizeof(nfs4_verifier)); WRITEMEM(&scd->se_confirm, NFS4_VERIFIER_SIZE);
ADJUST_ARGS(); ADJUST_ARGS();
} }
else if (nfserr == nfserr_clid_inuse) { else if (nfserr == nfserr_clid_inuse) {
...@@ -3232,7 +3244,7 @@ nfsd4_encode_write(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_w ...@@ -3232,7 +3244,7 @@ nfsd4_encode_write(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_w
RESERVE_SPACE(16); RESERVE_SPACE(16);
WRITE32(write->wr_bytes_written); WRITE32(write->wr_bytes_written);
WRITE32(write->wr_how_written); WRITE32(write->wr_how_written);
WRITEMEM(write->wr_verifier.data, 8); WRITEMEM(write->wr_verifier.data, NFS4_VERIFIER_SIZE);
ADJUST_ARGS(); ADJUST_ARGS();
} }
return nfserr; return nfserr;
...@@ -3391,30 +3403,17 @@ __be32 ...@@ -3391,30 +3403,17 @@ __be32
nfsd4_encode_test_stateid(struct nfsd4_compoundres *resp, int nfserr, nfsd4_encode_test_stateid(struct nfsd4_compoundres *resp, int nfserr,
struct nfsd4_test_stateid *test_stateid) struct nfsd4_test_stateid *test_stateid)
{ {
struct nfsd4_compoundargs *argp; struct nfsd4_test_stateid_id *stateid, *next;
struct nfs4_client *cl = resp->cstate.session->se_client;
stateid_t si;
__be32 *p; __be32 *p;
int i;
int valid;
restore_buf(test_stateid->ts_saved_args, &test_stateid->ts_savedp);
argp = test_stateid->ts_saved_args;
RESERVE_SPACE(4); RESERVE_SPACE(4 + (4 * test_stateid->ts_num_ids));
*p++ = htonl(test_stateid->ts_num_ids); *p++ = htonl(test_stateid->ts_num_ids);
resp->p = p;
nfs4_lock_state(); list_for_each_entry_safe(stateid, next, &test_stateid->ts_stateid_list, ts_id_list) {
for (i = 0; i < test_stateid->ts_num_ids; i++) { *p++ = htonl(stateid->ts_id_status);
nfsd4_decode_stateid(argp, &si);
valid = nfs4_validate_stateid(cl, &si);
RESERVE_SPACE(4);
*p++ = htonl(valid);
resp->p = p;
} }
nfs4_unlock_state();
ADJUST_ARGS();
return nfserr; return nfserr;
} }
...@@ -3532,7 +3531,7 @@ int nfsd4_check_resp_size(struct nfsd4_compoundres *resp, u32 pad) ...@@ -3532,7 +3531,7 @@ int nfsd4_check_resp_size(struct nfsd4_compoundres *resp, u32 pad)
if (length > session->se_fchannel.maxresp_sz) if (length > session->se_fchannel.maxresp_sz)
return nfserr_rep_too_big; return nfserr_rep_too_big;
if (slot->sl_cachethis == 1 && if ((slot->sl_flags & NFSD4_SLOT_CACHETHIS) &&
length > session->se_fchannel.maxresp_cached) length > session->se_fchannel.maxresp_cached)
return nfserr_rep_too_big_to_cache; return nfserr_rep_too_big_to_cache;
...@@ -3656,8 +3655,7 @@ nfs4svc_encode_compoundres(struct svc_rqst *rqstp, __be32 *p, struct nfsd4_compo ...@@ -3656,8 +3655,7 @@ nfs4svc_encode_compoundres(struct svc_rqst *rqstp, __be32 *p, struct nfsd4_compo
if (nfsd4_has_session(cs)) { if (nfsd4_has_session(cs)) {
if (cs->status != nfserr_replay_cache) { if (cs->status != nfserr_replay_cache) {
nfsd4_store_cache_entry(resp); nfsd4_store_cache_entry(resp);
dprintk("%s: SET SLOT STATE TO AVAILABLE\n", __func__); cs->slot->sl_flags &= ~NFSD4_SLOT_INUSE;
cs->slot->sl_inuse = false;
} }
/* Renew the clientid on success and on replay */ /* Renew the clientid on success and on replay */
release_session_client(cs->session); release_session_client(cs->session);
......
...@@ -13,12 +13,14 @@ ...@@ -13,12 +13,14 @@
#include <linux/sunrpc/clnt.h> #include <linux/sunrpc/clnt.h>
#include <linux/sunrpc/gss_api.h> #include <linux/sunrpc/gss_api.h>
#include <linux/sunrpc/gss_krb5_enctypes.h> #include <linux/sunrpc/gss_krb5_enctypes.h>
#include <linux/sunrpc/rpc_pipe_fs.h>
#include <linux/module.h> #include <linux/module.h>
#include "idmap.h" #include "idmap.h"
#include "nfsd.h" #include "nfsd.h"
#include "cache.h" #include "cache.h"
#include "fault_inject.h" #include "fault_inject.h"
#include "netns.h"
/* /*
* We have a single directory with several nodes in it. * We have a single directory with several nodes in it.
...@@ -1124,14 +1126,26 @@ static int create_proc_exports_entry(void) ...@@ -1124,14 +1126,26 @@ static int create_proc_exports_entry(void)
} }
#endif #endif
int nfsd_net_id;
static struct pernet_operations nfsd_net_ops = {
.id = &nfsd_net_id,
.size = sizeof(struct nfsd_net),
};
static int __init init_nfsd(void) static int __init init_nfsd(void)
{ {
int retval; int retval;
printk(KERN_INFO "Installing knfsd (copyright (C) 1996 okir@monad.swb.de).\n"); printk(KERN_INFO "Installing knfsd (copyright (C) 1996 okir@monad.swb.de).\n");
retval = nfsd4_init_slabs(); retval = register_cld_notifier();
if (retval) if (retval)
return retval; return retval;
retval = register_pernet_subsys(&nfsd_net_ops);
if (retval < 0)
goto out_unregister_notifier;
retval = nfsd4_init_slabs();
if (retval)
goto out_unregister_pernet;
nfs4_state_init(); nfs4_state_init();
retval = nfsd_fault_inject_init(); /* nfsd fault injection controls */ retval = nfsd_fault_inject_init(); /* nfsd fault injection controls */
if (retval) if (retval)
...@@ -1169,6 +1183,10 @@ static int __init init_nfsd(void) ...@@ -1169,6 +1183,10 @@ static int __init init_nfsd(void)
nfsd_fault_inject_cleanup(); nfsd_fault_inject_cleanup();
out_free_slabs: out_free_slabs:
nfsd4_free_slabs(); nfsd4_free_slabs();
out_unregister_pernet:
unregister_pernet_subsys(&nfsd_net_ops);
out_unregister_notifier:
unregister_cld_notifier();
return retval; return retval;
} }
...@@ -1184,6 +1202,8 @@ static void __exit exit_nfsd(void) ...@@ -1184,6 +1202,8 @@ static void __exit exit_nfsd(void)
nfsd4_free_slabs(); nfsd4_free_slabs();
nfsd_fault_inject_cleanup(); nfsd_fault_inject_cleanup();
unregister_filesystem(&nfsd_fs_type); unregister_filesystem(&nfsd_fs_type);
unregister_pernet_subsys(&nfsd_net_ops);
unregister_cld_notifier();
} }
MODULE_AUTHOR("Olaf Kirch <okir@monad.swb.de>"); MODULE_AUTHOR("Olaf Kirch <okir@monad.swb.de>");
......
...@@ -364,12 +364,17 @@ static inline u32 nfsd_suppattrs2(u32 minorversion) ...@@ -364,12 +364,17 @@ static inline u32 nfsd_suppattrs2(u32 minorversion)
NFSD_WRITEABLE_ATTRS_WORD2 NFSD_WRITEABLE_ATTRS_WORD2
extern int nfsd4_is_junction(struct dentry *dentry); extern int nfsd4_is_junction(struct dentry *dentry);
#else extern int register_cld_notifier(void);
extern void unregister_cld_notifier(void);
#else /* CONFIG_NFSD_V4 */
static inline int nfsd4_is_junction(struct dentry *dentry) static inline int nfsd4_is_junction(struct dentry *dentry)
{ {
return 0; return 0;
} }
#define register_cld_notifier() 0
#define unregister_cld_notifier() do { } while(0)
#endif /* CONFIG_NFSD_V4 */ #endif /* CONFIG_NFSD_V4 */
#endif /* LINUX_NFSD_NFSD_H */ #endif /* LINUX_NFSD_NFSD_H */
...@@ -307,33 +307,37 @@ static void set_max_drc(void) ...@@ -307,33 +307,37 @@ static void set_max_drc(void)
dprintk("%s nfsd_drc_max_mem %u \n", __func__, nfsd_drc_max_mem); dprintk("%s nfsd_drc_max_mem %u \n", __func__, nfsd_drc_max_mem);
} }
int nfsd_create_serv(void) static int nfsd_get_default_max_blksize(void)
{ {
int err = 0; struct sysinfo i;
unsigned long long target;
unsigned long ret;
si_meminfo(&i);
target = (i.totalram - i.totalhigh) << PAGE_SHIFT;
/*
* Aim for 1/4096 of memory per thread This gives 1MB on 4Gig
* machines, but only uses 32K on 128M machines. Bottom out at
* 8K on 32M and smaller. Of course, this is only a default.
*/
target >>= 12;
ret = NFSSVC_MAXBLKSIZE;
while (ret > target && ret >= 8*1024*2)
ret /= 2;
return ret;
}
int nfsd_create_serv(void)
{
WARN_ON(!mutex_is_locked(&nfsd_mutex)); WARN_ON(!mutex_is_locked(&nfsd_mutex));
if (nfsd_serv) { if (nfsd_serv) {
svc_get(nfsd_serv); svc_get(nfsd_serv);
return 0; return 0;
} }
if (nfsd_max_blksize == 0) { if (nfsd_max_blksize == 0)
/* choose a suitable default */ nfsd_max_blksize = nfsd_get_default_max_blksize();
struct sysinfo i;
si_meminfo(&i);
/* Aim for 1/4096 of memory per thread
* This gives 1MB on 4Gig machines
* But only uses 32K on 128M machines.
* Bottom out at 8K on 32M and smaller.
* Of course, this is only a default.
*/
nfsd_max_blksize = NFSSVC_MAXBLKSIZE;
i.totalram <<= PAGE_SHIFT - 12;
while (nfsd_max_blksize > i.totalram &&
nfsd_max_blksize >= 8*1024*2)
nfsd_max_blksize /= 2;
}
nfsd_reset_versions(); nfsd_reset_versions();
nfsd_serv = svc_create_pooled(&nfsd_program, nfsd_max_blksize, nfsd_serv = svc_create_pooled(&nfsd_program, nfsd_max_blksize,
nfsd_last_thread, nfsd, THIS_MODULE); nfsd_last_thread, nfsd, THIS_MODULE);
if (nfsd_serv == NULL) if (nfsd_serv == NULL)
...@@ -341,7 +345,7 @@ int nfsd_create_serv(void) ...@@ -341,7 +345,7 @@ int nfsd_create_serv(void)
set_max_drc(); set_max_drc();
do_gettimeofday(&nfssvc_boot); /* record boot time */ do_gettimeofday(&nfssvc_boot); /* record boot time */
return err; return 0;
} }
int nfsd_nrpools(void) int nfsd_nrpools(void)
......
...@@ -128,12 +128,14 @@ static inline struct nfs4_delegation *delegstateid(struct nfs4_stid *s) ...@@ -128,12 +128,14 @@ static inline struct nfs4_delegation *delegstateid(struct nfs4_stid *s)
(NFSD_CACHE_SIZE_SLOTS_PER_SESSION * NFSD_SLOT_CACHE_SIZE) (NFSD_CACHE_SIZE_SLOTS_PER_SESSION * NFSD_SLOT_CACHE_SIZE)
struct nfsd4_slot { struct nfsd4_slot {
bool sl_inuse;
bool sl_cachethis;
u16 sl_opcnt;
u32 sl_seqid; u32 sl_seqid;
__be32 sl_status; __be32 sl_status;
u32 sl_datalen; u32 sl_datalen;
u16 sl_opcnt;
#define NFSD4_SLOT_INUSE (1 << 0)
#define NFSD4_SLOT_CACHETHIS (1 << 1)
#define NFSD4_SLOT_INITIALIZED (1 << 2)
u8 sl_flags;
char sl_data[]; char sl_data[];
}; };
...@@ -196,18 +198,7 @@ struct nfsd4_session { ...@@ -196,18 +198,7 @@ struct nfsd4_session {
struct nfsd4_slot *se_slots[]; /* forward channel slots */ struct nfsd4_slot *se_slots[]; /* forward channel slots */
}; };
static inline void extern void nfsd4_put_session(struct nfsd4_session *ses);
nfsd4_put_session(struct nfsd4_session *ses)
{
extern void free_session(struct kref *kref);
kref_put(&ses->se_ref, free_session);
}
static inline void
nfsd4_get_session(struct nfsd4_session *ses)
{
kref_get(&ses->se_ref);
}
/* formatted contents of nfs4_sessionid */ /* formatted contents of nfs4_sessionid */
struct nfsd4_sessionid { struct nfsd4_sessionid {
...@@ -245,14 +236,17 @@ struct nfs4_client { ...@@ -245,14 +236,17 @@ struct nfs4_client {
struct svc_cred cl_cred; /* setclientid principal */ struct svc_cred cl_cred; /* setclientid principal */
clientid_t cl_clientid; /* generated by server */ clientid_t cl_clientid; /* generated by server */
nfs4_verifier cl_confirm; /* generated by server */ nfs4_verifier cl_confirm; /* generated by server */
u32 cl_firststate; /* recovery dir creation */
u32 cl_minorversion; u32 cl_minorversion;
/* for v4.0 and v4.1 callbacks: */ /* for v4.0 and v4.1 callbacks: */
struct nfs4_cb_conn cl_cb_conn; struct nfs4_cb_conn cl_cb_conn;
#define NFSD4_CLIENT_CB_UPDATE 1 #define NFSD4_CLIENT_CB_UPDATE (0)
#define NFSD4_CLIENT_KILL 2 #define NFSD4_CLIENT_CB_KILL (1)
unsigned long cl_cb_flags; #define NFSD4_CLIENT_STABLE (2) /* client on stable storage */
#define NFSD4_CLIENT_RECLAIM_COMPLETE (3) /* reclaim_complete done */
#define NFSD4_CLIENT_CB_FLAG_MASK (1 << NFSD4_CLIENT_CB_UPDATE | \
1 << NFSD4_CLIENT_CB_KILL)
unsigned long cl_flags;
struct rpc_clnt *cl_cb_client; struct rpc_clnt *cl_cb_client;
u32 cl_cb_ident; u32 cl_cb_ident;
#define NFSD4_CB_UP 0 #define NFSD4_CB_UP 0
...@@ -463,6 +457,8 @@ extern __be32 nfs4_preprocess_stateid_op(struct nfsd4_compound_state *cstate, ...@@ -463,6 +457,8 @@ extern __be32 nfs4_preprocess_stateid_op(struct nfsd4_compound_state *cstate,
extern void nfs4_lock_state(void); extern void nfs4_lock_state(void);
extern void nfs4_unlock_state(void); extern void nfs4_unlock_state(void);
extern int nfs4_in_grace(void); extern int nfs4_in_grace(void);
extern void nfs4_release_reclaim(void);
extern struct nfs4_client_reclaim *nfsd4_find_reclaim_client(struct nfs4_client *crp);
extern __be32 nfs4_check_open_reclaim(clientid_t *clid); extern __be32 nfs4_check_open_reclaim(clientid_t *clid);
extern void nfs4_free_openowner(struct nfs4_openowner *); extern void nfs4_free_openowner(struct nfs4_openowner *);
extern void nfs4_free_lockowner(struct nfs4_lockowner *); extern void nfs4_free_lockowner(struct nfs4_lockowner *);
...@@ -477,16 +473,17 @@ extern void nfsd4_destroy_callback_queue(void); ...@@ -477,16 +473,17 @@ extern void nfsd4_destroy_callback_queue(void);
extern void nfsd4_shutdown_callback(struct nfs4_client *); extern void nfsd4_shutdown_callback(struct nfs4_client *);
extern void nfs4_put_delegation(struct nfs4_delegation *dp); extern void nfs4_put_delegation(struct nfs4_delegation *dp);
extern __be32 nfs4_make_rec_clidname(char *clidname, struct xdr_netobj *clname); extern __be32 nfs4_make_rec_clidname(char *clidname, struct xdr_netobj *clname);
extern void nfsd4_init_recdir(void);
extern int nfsd4_recdir_load(void);
extern void nfsd4_shutdown_recdir(void);
extern int nfs4_client_to_reclaim(const char *name); extern int nfs4_client_to_reclaim(const char *name);
extern int nfs4_has_reclaimed_state(const char *name, bool use_exchange_id); extern int nfs4_has_reclaimed_state(const char *name, bool use_exchange_id);
extern void nfsd4_recdir_purge_old(void);
extern void nfsd4_create_clid_dir(struct nfs4_client *clp);
extern void nfsd4_remove_clid_dir(struct nfs4_client *clp);
extern void release_session_client(struct nfsd4_session *); extern void release_session_client(struct nfsd4_session *);
extern __be32 nfs4_validate_stateid(struct nfs4_client *, stateid_t *); extern __be32 nfs4_validate_stateid(struct nfs4_client *, stateid_t *);
extern void nfsd4_purge_closed_stateid(struct nfs4_stateowner *); extern void nfsd4_purge_closed_stateid(struct nfs4_stateowner *);
/* nfs4recover operations */
extern int nfsd4_client_tracking_init(struct net *net);
extern void nfsd4_client_tracking_exit(struct net *net);
extern void nfsd4_client_record_create(struct nfs4_client *clp);
extern void nfsd4_client_record_remove(struct nfs4_client *clp);
extern int nfsd4_client_record_check(struct nfs4_client *clp);
extern void nfsd4_record_grace_done(struct net *net, time_t boot_time);
#endif /* NFSD4_STATE_H */ #endif /* NFSD4_STATE_H */
...@@ -737,12 +737,13 @@ static int nfsd_open_break_lease(struct inode *inode, int access) ...@@ -737,12 +737,13 @@ static int nfsd_open_break_lease(struct inode *inode, int access)
/* /*
* Open an existing file or directory. * Open an existing file or directory.
* The access argument indicates the type of open (read/write/lock) * The may_flags argument indicates the type of open (read/write/lock)
* and additional flags.
* N.B. After this call fhp needs an fh_put * N.B. After this call fhp needs an fh_put
*/ */
__be32 __be32
nfsd_open(struct svc_rqst *rqstp, struct svc_fh *fhp, umode_t type, nfsd_open(struct svc_rqst *rqstp, struct svc_fh *fhp, umode_t type,
int access, struct file **filp) int may_flags, struct file **filp)
{ {
struct dentry *dentry; struct dentry *dentry;
struct inode *inode; struct inode *inode;
...@@ -757,7 +758,7 @@ nfsd_open(struct svc_rqst *rqstp, struct svc_fh *fhp, umode_t type, ...@@ -757,7 +758,7 @@ nfsd_open(struct svc_rqst *rqstp, struct svc_fh *fhp, umode_t type,
* and (hopefully) checked permission - so allow OWNER_OVERRIDE * and (hopefully) checked permission - so allow OWNER_OVERRIDE
* in case a chmod has now revoked permission. * in case a chmod has now revoked permission.
*/ */
err = fh_verify(rqstp, fhp, type, access | NFSD_MAY_OWNER_OVERRIDE); err = fh_verify(rqstp, fhp, type, may_flags | NFSD_MAY_OWNER_OVERRIDE);
if (err) if (err)
goto out; goto out;
...@@ -768,7 +769,7 @@ nfsd_open(struct svc_rqst *rqstp, struct svc_fh *fhp, umode_t type, ...@@ -768,7 +769,7 @@ nfsd_open(struct svc_rqst *rqstp, struct svc_fh *fhp, umode_t type,
* or any access when mandatory locking enabled * or any access when mandatory locking enabled
*/ */
err = nfserr_perm; err = nfserr_perm;
if (IS_APPEND(inode) && (access & NFSD_MAY_WRITE)) if (IS_APPEND(inode) && (may_flags & NFSD_MAY_WRITE))
goto out; goto out;
/* /*
* We must ignore files (but only files) which might have mandatory * We must ignore files (but only files) which might have mandatory
...@@ -781,12 +782,12 @@ nfsd_open(struct svc_rqst *rqstp, struct svc_fh *fhp, umode_t type, ...@@ -781,12 +782,12 @@ nfsd_open(struct svc_rqst *rqstp, struct svc_fh *fhp, umode_t type,
if (!inode->i_fop) if (!inode->i_fop)
goto out; goto out;
host_err = nfsd_open_break_lease(inode, access); host_err = nfsd_open_break_lease(inode, may_flags);
if (host_err) /* NOMEM or WOULDBLOCK */ if (host_err) /* NOMEM or WOULDBLOCK */
goto out_nfserr; goto out_nfserr;
if (access & NFSD_MAY_WRITE) { if (may_flags & NFSD_MAY_WRITE) {
if (access & NFSD_MAY_READ) if (may_flags & NFSD_MAY_READ)
flags = O_RDWR|O_LARGEFILE; flags = O_RDWR|O_LARGEFILE;
else else
flags = O_WRONLY|O_LARGEFILE; flags = O_WRONLY|O_LARGEFILE;
...@@ -795,8 +796,15 @@ nfsd_open(struct svc_rqst *rqstp, struct svc_fh *fhp, umode_t type, ...@@ -795,8 +796,15 @@ nfsd_open(struct svc_rqst *rqstp, struct svc_fh *fhp, umode_t type,
flags, current_cred()); flags, current_cred());
if (IS_ERR(*filp)) if (IS_ERR(*filp))
host_err = PTR_ERR(*filp); host_err = PTR_ERR(*filp);
else else {
host_err = ima_file_check(*filp, access); host_err = ima_file_check(*filp, may_flags);
if (may_flags & NFSD_MAY_64BIT_COOKIE)
(*filp)->f_mode |= FMODE_64BITHASH;
else
(*filp)->f_mode |= FMODE_32BITHASH;
}
out_nfserr: out_nfserr:
err = nfserrno(host_err); err = nfserrno(host_err);
out: out:
...@@ -2021,8 +2029,13 @@ nfsd_readdir(struct svc_rqst *rqstp, struct svc_fh *fhp, loff_t *offsetp, ...@@ -2021,8 +2029,13 @@ nfsd_readdir(struct svc_rqst *rqstp, struct svc_fh *fhp, loff_t *offsetp,
__be32 err; __be32 err;
struct file *file; struct file *file;
loff_t offset = *offsetp; loff_t offset = *offsetp;
int may_flags = NFSD_MAY_READ;
/* NFSv2 only supports 32 bit cookies */
if (rqstp->rq_vers > 2)
may_flags |= NFSD_MAY_64BIT_COOKIE;
err = nfsd_open(rqstp, fhp, S_IFDIR, NFSD_MAY_READ, &file); err = nfsd_open(rqstp, fhp, S_IFDIR, may_flags, &file);
if (err) if (err)
goto out; goto out;
......
...@@ -27,6 +27,8 @@ ...@@ -27,6 +27,8 @@
#define NFSD_MAY_BYPASS_GSS 0x400 #define NFSD_MAY_BYPASS_GSS 0x400
#define NFSD_MAY_READ_IF_EXEC 0x800 #define NFSD_MAY_READ_IF_EXEC 0x800
#define NFSD_MAY_64BIT_COOKIE 0x1000 /* 64 bit readdir cookies for >= NFSv3 */
#define NFSD_MAY_CREATE (NFSD_MAY_EXEC|NFSD_MAY_WRITE) #define NFSD_MAY_CREATE (NFSD_MAY_EXEC|NFSD_MAY_WRITE)
#define NFSD_MAY_REMOVE (NFSD_MAY_EXEC|NFSD_MAY_WRITE|NFSD_MAY_TRUNC) #define NFSD_MAY_REMOVE (NFSD_MAY_EXEC|NFSD_MAY_WRITE|NFSD_MAY_TRUNC)
......
...@@ -43,6 +43,13 @@ ...@@ -43,6 +43,13 @@
#define NFSD4_MAX_TAGLEN 128 #define NFSD4_MAX_TAGLEN 128
#define XDR_LEN(n) (((n) + 3) & ~3) #define XDR_LEN(n) (((n) + 3) & ~3)
#define CURRENT_STATE_ID_FLAG (1<<0)
#define SAVED_STATE_ID_FLAG (1<<1)
#define SET_STATE_ID(c, f) ((c)->sid_flags |= (f))
#define HAS_STATE_ID(c, f) ((c)->sid_flags & (f))
#define CLEAR_STATE_ID(c, f) ((c)->sid_flags &= ~(f))
struct nfsd4_compound_state { struct nfsd4_compound_state {
struct svc_fh current_fh; struct svc_fh current_fh;
struct svc_fh save_fh; struct svc_fh save_fh;
...@@ -54,6 +61,10 @@ struct nfsd4_compound_state { ...@@ -54,6 +61,10 @@ struct nfsd4_compound_state {
size_t iovlen; size_t iovlen;
u32 minorversion; u32 minorversion;
u32 status; u32 status;
stateid_t current_stateid;
stateid_t save_stateid;
/* to indicate current and saved state id presents */
u32 sid_flags;
}; };
static inline bool nfsd4_has_session(struct nfsd4_compound_state *cs) static inline bool nfsd4_has_session(struct nfsd4_compound_state *cs)
...@@ -212,16 +223,19 @@ struct nfsd4_open { ...@@ -212,16 +223,19 @@ struct nfsd4_open {
struct xdr_netobj op_fname; /* request - everything but CLAIM_PREV */ struct xdr_netobj op_fname; /* request - everything but CLAIM_PREV */
u32 op_delegate_type; /* request - CLAIM_PREV only */ u32 op_delegate_type; /* request - CLAIM_PREV only */
stateid_t op_delegate_stateid; /* request - response */ stateid_t op_delegate_stateid; /* request - response */
u32 op_why_no_deleg; /* response - DELEG_NONE_EXT only */
u32 op_create; /* request */ u32 op_create; /* request */
u32 op_createmode; /* request */ u32 op_createmode; /* request */
u32 op_bmval[3]; /* request */ u32 op_bmval[3]; /* request */
struct iattr iattr; /* UNCHECKED4, GUARDED4, EXCLUSIVE4_1 */ struct iattr iattr; /* UNCHECKED4, GUARDED4, EXCLUSIVE4_1 */
nfs4_verifier verf; /* EXCLUSIVE4 */ nfs4_verifier op_verf __attribute__((aligned(32)));
/* EXCLUSIVE4 */
clientid_t op_clientid; /* request */ clientid_t op_clientid; /* request */
struct xdr_netobj op_owner; /* request */ struct xdr_netobj op_owner; /* request */
u32 op_seqid; /* request */ u32 op_seqid; /* request */
u32 op_share_access; /* request */ u32 op_share_access; /* request */
u32 op_share_deny; /* request */ u32 op_share_deny; /* request */
u32 op_deleg_want; /* request */
stateid_t op_stateid; /* response */ stateid_t op_stateid; /* response */
u32 op_recall; /* recall */ u32 op_recall; /* recall */
struct nfsd4_change_info op_cinfo; /* response */ struct nfsd4_change_info op_cinfo; /* response */
...@@ -234,7 +248,6 @@ struct nfsd4_open { ...@@ -234,7 +248,6 @@ struct nfsd4_open {
struct nfs4_acl *op_acl; struct nfs4_acl *op_acl;
}; };
#define op_iattr iattr #define op_iattr iattr
#define op_verf verf
struct nfsd4_open_confirm { struct nfsd4_open_confirm {
stateid_t oc_req_stateid /* request */; stateid_t oc_req_stateid /* request */;
...@@ -245,8 +258,9 @@ struct nfsd4_open_confirm { ...@@ -245,8 +258,9 @@ struct nfsd4_open_confirm {
struct nfsd4_open_downgrade { struct nfsd4_open_downgrade {
stateid_t od_stateid; stateid_t od_stateid;
u32 od_seqid; u32 od_seqid;
u32 od_share_access; u32 od_share_access; /* request */
u32 od_share_deny; u32 od_deleg_want; /* request */
u32 od_share_deny; /* request */
}; };
...@@ -343,10 +357,15 @@ struct nfsd4_saved_compoundargs { ...@@ -343,10 +357,15 @@ struct nfsd4_saved_compoundargs {
struct page **pagelist; struct page **pagelist;
}; };
struct nfsd4_test_stateid_id {
__be32 ts_id_status;
stateid_t ts_id_stateid;
struct list_head ts_id_list;
};
struct nfsd4_test_stateid { struct nfsd4_test_stateid {
__be32 ts_num_ids; __be32 ts_num_ids;
struct nfsd4_compoundargs *ts_saved_args; struct list_head ts_stateid_list;
struct nfsd4_saved_compoundargs ts_savedp;
}; };
struct nfsd4_free_stateid { struct nfsd4_free_stateid {
...@@ -503,7 +522,8 @@ static inline bool nfsd4_is_solo_sequence(struct nfsd4_compoundres *resp) ...@@ -503,7 +522,8 @@ static inline bool nfsd4_is_solo_sequence(struct nfsd4_compoundres *resp)
static inline bool nfsd4_not_cached(struct nfsd4_compoundres *resp) static inline bool nfsd4_not_cached(struct nfsd4_compoundres *resp)
{ {
return !resp->cstate.slot->sl_cachethis || nfsd4_is_solo_sequence(resp); return !(resp->cstate.slot->sl_flags & NFSD4_SLOT_CACHETHIS)
|| nfsd4_is_solo_sequence(resp);
} }
#define NFS4_SVC_XDRSIZE sizeof(struct nfsd4_compoundargs) #define NFS4_SVC_XDRSIZE sizeof(struct nfsd4_compoundargs)
......
...@@ -92,6 +92,10 @@ struct inodes_stat_t { ...@@ -92,6 +92,10 @@ struct inodes_stat_t {
/* File is opened using open(.., 3, ..) and is writeable only for ioctls /* File is opened using open(.., 3, ..) and is writeable only for ioctls
(specialy hack for floppy.c) */ (specialy hack for floppy.c) */
#define FMODE_WRITE_IOCTL ((__force fmode_t)0x100) #define FMODE_WRITE_IOCTL ((__force fmode_t)0x100)
/* 32bit hashes as llseek() offset (for directories) */
#define FMODE_32BITHASH ((__force fmode_t)0x200)
/* 64bit hashes as llseek() offset (for directories) */
#define FMODE_64BITHASH ((__force fmode_t)0x400)
/* /*
* Don't update ctime and mtime. * Don't update ctime and mtime.
......
...@@ -438,7 +438,20 @@ enum limit_by4 { ...@@ -438,7 +438,20 @@ enum limit_by4 {
enum open_delegation_type4 { enum open_delegation_type4 {
NFS4_OPEN_DELEGATE_NONE = 0, NFS4_OPEN_DELEGATE_NONE = 0,
NFS4_OPEN_DELEGATE_READ = 1, NFS4_OPEN_DELEGATE_READ = 1,
NFS4_OPEN_DELEGATE_WRITE = 2 NFS4_OPEN_DELEGATE_WRITE = 2,
NFS4_OPEN_DELEGATE_NONE_EXT = 3, /* 4.1 */
};
enum why_no_delegation4 { /* new to v4.1 */
WND4_NOT_WANTED = 0,
WND4_CONTENTION = 1,
WND4_RESOURCE = 2,
WND4_NOT_SUPP_FTYPE = 3,
WND4_WRITE_DELEG_NOT_SUPP_FTYPE = 4,
WND4_NOT_SUPP_UPGRADE = 5,
WND4_NOT_SUPP_DOWNGRADE = 6,
WND4_CANCELLED = 7,
WND4_IS_DIR = 8,
}; };
enum lock_type4 { enum lock_type4 {
......
/*
* Upcall description for nfsdcld communication
*
* Copyright (c) 2012 Red Hat, Inc.
* Author(s): Jeff Layton <jlayton@redhat.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#ifndef _NFSD_CLD_H
#define _NFSD_CLD_H
/* latest upcall version available */
#define CLD_UPCALL_VERSION 1
/* defined by RFC3530 */
#define NFS4_OPAQUE_LIMIT 1024
enum cld_command {
Cld_Create, /* create a record for this cm_id */
Cld_Remove, /* remove record of this cm_id */
Cld_Check, /* is this cm_id allowed? */
Cld_GraceDone, /* grace period is complete */
};
/* representation of long-form NFSv4 client ID */
struct cld_name {
uint16_t cn_len; /* length of cm_id */
unsigned char cn_id[NFS4_OPAQUE_LIMIT]; /* client-provided */
} __attribute__((packed));
/* message struct for communication with userspace */
struct cld_msg {
uint8_t cm_vers; /* upcall version */
uint8_t cm_cmd; /* upcall command */
int16_t cm_status; /* return code */
uint32_t cm_xid; /* transaction id */
union {
int64_t cm_gracetime; /* grace period start time */
struct cld_name cm_name;
} __attribute__((packed)) cm_u;
} __attribute__((packed));
#endif /* !_NFSD_CLD_H */
...@@ -190,7 +190,7 @@ extern int svc_rdma_xdr_encode_error(struct svcxprt_rdma *, ...@@ -190,7 +190,7 @@ extern int svc_rdma_xdr_encode_error(struct svcxprt_rdma *,
extern void svc_rdma_xdr_encode_write_list(struct rpcrdma_msg *, int); extern void svc_rdma_xdr_encode_write_list(struct rpcrdma_msg *, int);
extern void svc_rdma_xdr_encode_reply_array(struct rpcrdma_write_array *, int); extern void svc_rdma_xdr_encode_reply_array(struct rpcrdma_write_array *, int);
extern void svc_rdma_xdr_encode_array_chunk(struct rpcrdma_write_array *, int, extern void svc_rdma_xdr_encode_array_chunk(struct rpcrdma_write_array *, int,
u32, u64, u32); __be32, __be64, u32);
extern void svc_rdma_xdr_encode_reply_header(struct svcxprt_rdma *, extern void svc_rdma_xdr_encode_reply_header(struct svcxprt_rdma *,
struct rpcrdma_msg *, struct rpcrdma_msg *,
struct rpcrdma_msg *, struct rpcrdma_msg *,
...@@ -292,7 +292,7 @@ svc_rdma_get_reply_array(struct rpcrdma_msg *rmsgp) ...@@ -292,7 +292,7 @@ svc_rdma_get_reply_array(struct rpcrdma_msg *rmsgp)
if (wr_ary) { if (wr_ary) {
rp_ary = (struct rpcrdma_write_array *) rp_ary = (struct rpcrdma_write_array *)
&wr_ary-> &wr_ary->
wc_array[wr_ary->wc_nchunks].wc_target.rs_length; wc_array[ntohl(wr_ary->wc_nchunks)].wc_target.rs_length;
goto found_it; goto found_it;
} }
......
...@@ -830,6 +830,8 @@ static ssize_t cache_do_downcall(char *kaddr, const char __user *buf, ...@@ -830,6 +830,8 @@ static ssize_t cache_do_downcall(char *kaddr, const char __user *buf,
{ {
ssize_t ret; ssize_t ret;
if (count == 0)
return -EINVAL;
if (copy_from_user(kaddr, buf, count)) if (copy_from_user(kaddr, buf, count))
return -EFAULT; return -EFAULT;
kaddr[count] = '\0'; kaddr[count] = '\0';
......
...@@ -1014,6 +1014,7 @@ enum { ...@@ -1014,6 +1014,7 @@ enum {
RPCAUTH_statd, RPCAUTH_statd,
RPCAUTH_nfsd4_cb, RPCAUTH_nfsd4_cb,
RPCAUTH_cache, RPCAUTH_cache,
RPCAUTH_nfsd,
RPCAUTH_RootEOF RPCAUTH_RootEOF
}; };
...@@ -1046,6 +1047,10 @@ static const struct rpc_filelist files[] = { ...@@ -1046,6 +1047,10 @@ static const struct rpc_filelist files[] = {
.name = "cache", .name = "cache",
.mode = S_IFDIR | S_IRUGO | S_IXUGO, .mode = S_IFDIR | S_IRUGO | S_IXUGO,
}, },
[RPCAUTH_nfsd] = {
.name = "nfsd",
.mode = S_IFDIR | S_IRUGO | S_IXUGO,
},
}; };
/* /*
......
...@@ -507,7 +507,7 @@ static int unix_gid_parse(struct cache_detail *cd, ...@@ -507,7 +507,7 @@ static int unix_gid_parse(struct cache_detail *cd,
time_t expiry; time_t expiry;
struct unix_gid ug, *ugp; struct unix_gid ug, *ugp;
if (mlen <= 0 || mesg[mlen-1] != '\n') if (mesg[mlen - 1] != '\n')
return -EINVAL; return -EINVAL;
mesg[mlen-1] = 0; mesg[mlen-1] = 0;
......
...@@ -1381,8 +1381,6 @@ void svc_sock_update_bufs(struct svc_serv *serv) ...@@ -1381,8 +1381,6 @@ void svc_sock_update_bufs(struct svc_serv *serv)
spin_lock_bh(&serv->sv_lock); spin_lock_bh(&serv->sv_lock);
list_for_each_entry(svsk, &serv->sv_permsocks, sk_xprt.xpt_list) list_for_each_entry(svsk, &serv->sv_permsocks, sk_xprt.xpt_list)
set_bit(XPT_CHNGBUF, &svsk->sk_xprt.xpt_flags); set_bit(XPT_CHNGBUF, &svsk->sk_xprt.xpt_flags);
list_for_each_entry(svsk, &serv->sv_tempsocks, sk_xprt.xpt_list)
set_bit(XPT_CHNGBUF, &svsk->sk_xprt.xpt_flags);
spin_unlock_bh(&serv->sv_lock); spin_unlock_bh(&serv->sv_lock);
} }
EXPORT_SYMBOL_GPL(svc_sock_update_bufs); EXPORT_SYMBOL_GPL(svc_sock_update_bufs);
......
...@@ -47,6 +47,7 @@ ...@@ -47,6 +47,7 @@
#include <linux/sunrpc/clnt.h> #include <linux/sunrpc/clnt.h>
#include <linux/sunrpc/sched.h> #include <linux/sunrpc/sched.h>
#include <linux/sunrpc/svc_rdma.h> #include <linux/sunrpc/svc_rdma.h>
#include "xprt_rdma.h"
#define RPCDBG_FACILITY RPCDBG_SVCXPRT #define RPCDBG_FACILITY RPCDBG_SVCXPRT
......
...@@ -60,21 +60,11 @@ static u32 *decode_read_list(u32 *va, u32 *vaend) ...@@ -60,21 +60,11 @@ static u32 *decode_read_list(u32 *va, u32 *vaend)
struct rpcrdma_read_chunk *ch = (struct rpcrdma_read_chunk *)va; struct rpcrdma_read_chunk *ch = (struct rpcrdma_read_chunk *)va;
while (ch->rc_discrim != xdr_zero) { while (ch->rc_discrim != xdr_zero) {
u64 ch_offset;
if (((unsigned long)ch + sizeof(struct rpcrdma_read_chunk)) > if (((unsigned long)ch + sizeof(struct rpcrdma_read_chunk)) >
(unsigned long)vaend) { (unsigned long)vaend) {
dprintk("svcrdma: vaend=%p, ch=%p\n", vaend, ch); dprintk("svcrdma: vaend=%p, ch=%p\n", vaend, ch);
return NULL; return NULL;
} }
ch->rc_discrim = ntohl(ch->rc_discrim);
ch->rc_position = ntohl(ch->rc_position);
ch->rc_target.rs_handle = ntohl(ch->rc_target.rs_handle);
ch->rc_target.rs_length = ntohl(ch->rc_target.rs_length);
va = (u32 *)&ch->rc_target.rs_offset;
xdr_decode_hyper(va, &ch_offset);
put_unaligned(ch_offset, (u64 *)va);
ch++; ch++;
} }
return (u32 *)&ch->rc_position; return (u32 *)&ch->rc_position;
...@@ -91,7 +81,7 @@ void svc_rdma_rcl_chunk_counts(struct rpcrdma_read_chunk *ch, ...@@ -91,7 +81,7 @@ void svc_rdma_rcl_chunk_counts(struct rpcrdma_read_chunk *ch,
*byte_count = 0; *byte_count = 0;
*ch_count = 0; *ch_count = 0;
for (; ch->rc_discrim != 0; ch++) { for (; ch->rc_discrim != 0; ch++) {
*byte_count = *byte_count + ch->rc_target.rs_length; *byte_count = *byte_count + ntohl(ch->rc_target.rs_length);
*ch_count = *ch_count + 1; *ch_count = *ch_count + 1;
} }
} }
...@@ -108,7 +98,8 @@ void svc_rdma_rcl_chunk_counts(struct rpcrdma_read_chunk *ch, ...@@ -108,7 +98,8 @@ void svc_rdma_rcl_chunk_counts(struct rpcrdma_read_chunk *ch,
*/ */
static u32 *decode_write_list(u32 *va, u32 *vaend) static u32 *decode_write_list(u32 *va, u32 *vaend)
{ {
int ch_no; int nchunks;
struct rpcrdma_write_array *ary = struct rpcrdma_write_array *ary =
(struct rpcrdma_write_array *)va; (struct rpcrdma_write_array *)va;
...@@ -121,37 +112,24 @@ static u32 *decode_write_list(u32 *va, u32 *vaend) ...@@ -121,37 +112,24 @@ static u32 *decode_write_list(u32 *va, u32 *vaend)
dprintk("svcrdma: ary=%p, vaend=%p\n", ary, vaend); dprintk("svcrdma: ary=%p, vaend=%p\n", ary, vaend);
return NULL; return NULL;
} }
ary->wc_discrim = ntohl(ary->wc_discrim); nchunks = ntohl(ary->wc_nchunks);
ary->wc_nchunks = ntohl(ary->wc_nchunks);
if (((unsigned long)&ary->wc_array[0] + if (((unsigned long)&ary->wc_array[0] +
(sizeof(struct rpcrdma_write_chunk) * ary->wc_nchunks)) > (sizeof(struct rpcrdma_write_chunk) * nchunks)) >
(unsigned long)vaend) { (unsigned long)vaend) {
dprintk("svcrdma: ary=%p, wc_nchunks=%d, vaend=%p\n", dprintk("svcrdma: ary=%p, wc_nchunks=%d, vaend=%p\n",
ary, ary->wc_nchunks, vaend); ary, nchunks, vaend);
return NULL; return NULL;
} }
for (ch_no = 0; ch_no < ary->wc_nchunks; ch_no++) {
u64 ch_offset;
ary->wc_array[ch_no].wc_target.rs_handle =
ntohl(ary->wc_array[ch_no].wc_target.rs_handle);
ary->wc_array[ch_no].wc_target.rs_length =
ntohl(ary->wc_array[ch_no].wc_target.rs_length);
va = (u32 *)&ary->wc_array[ch_no].wc_target.rs_offset;
xdr_decode_hyper(va, &ch_offset);
put_unaligned(ch_offset, (u64 *)va);
}
/* /*
* rs_length is the 2nd 4B field in wc_target and taking its * rs_length is the 2nd 4B field in wc_target and taking its
* address skips the list terminator * address skips the list terminator
*/ */
return (u32 *)&ary->wc_array[ch_no].wc_target.rs_length; return (u32 *)&ary->wc_array[nchunks].wc_target.rs_length;
} }
static u32 *decode_reply_array(u32 *va, u32 *vaend) static u32 *decode_reply_array(u32 *va, u32 *vaend)
{ {
int ch_no; int nchunks;
struct rpcrdma_write_array *ary = struct rpcrdma_write_array *ary =
(struct rpcrdma_write_array *)va; (struct rpcrdma_write_array *)va;
...@@ -164,28 +142,15 @@ static u32 *decode_reply_array(u32 *va, u32 *vaend) ...@@ -164,28 +142,15 @@ static u32 *decode_reply_array(u32 *va, u32 *vaend)
dprintk("svcrdma: ary=%p, vaend=%p\n", ary, vaend); dprintk("svcrdma: ary=%p, vaend=%p\n", ary, vaend);
return NULL; return NULL;
} }
ary->wc_discrim = ntohl(ary->wc_discrim); nchunks = ntohl(ary->wc_nchunks);
ary->wc_nchunks = ntohl(ary->wc_nchunks);
if (((unsigned long)&ary->wc_array[0] + if (((unsigned long)&ary->wc_array[0] +
(sizeof(struct rpcrdma_write_chunk) * ary->wc_nchunks)) > (sizeof(struct rpcrdma_write_chunk) * nchunks)) >
(unsigned long)vaend) { (unsigned long)vaend) {
dprintk("svcrdma: ary=%p, wc_nchunks=%d, vaend=%p\n", dprintk("svcrdma: ary=%p, wc_nchunks=%d, vaend=%p\n",
ary, ary->wc_nchunks, vaend); ary, nchunks, vaend);
return NULL; return NULL;
} }
for (ch_no = 0; ch_no < ary->wc_nchunks; ch_no++) { return (u32 *)&ary->wc_array[nchunks];
u64 ch_offset;
ary->wc_array[ch_no].wc_target.rs_handle =
ntohl(ary->wc_array[ch_no].wc_target.rs_handle);
ary->wc_array[ch_no].wc_target.rs_length =
ntohl(ary->wc_array[ch_no].wc_target.rs_length);
va = (u32 *)&ary->wc_array[ch_no].wc_target.rs_offset;
xdr_decode_hyper(va, &ch_offset);
put_unaligned(ch_offset, (u64 *)va);
}
return (u32 *)&ary->wc_array[ch_no];
} }
int svc_rdma_xdr_decode_req(struct rpcrdma_msg **rdma_req, int svc_rdma_xdr_decode_req(struct rpcrdma_msg **rdma_req,
...@@ -386,13 +351,14 @@ void svc_rdma_xdr_encode_reply_array(struct rpcrdma_write_array *ary, ...@@ -386,13 +351,14 @@ void svc_rdma_xdr_encode_reply_array(struct rpcrdma_write_array *ary,
void svc_rdma_xdr_encode_array_chunk(struct rpcrdma_write_array *ary, void svc_rdma_xdr_encode_array_chunk(struct rpcrdma_write_array *ary,
int chunk_no, int chunk_no,
u32 rs_handle, u64 rs_offset, __be32 rs_handle,
__be64 rs_offset,
u32 write_len) u32 write_len)
{ {
struct rpcrdma_segment *seg = &ary->wc_array[chunk_no].wc_target; struct rpcrdma_segment *seg = &ary->wc_array[chunk_no].wc_target;
seg->rs_handle = htonl(rs_handle); seg->rs_handle = rs_handle;
seg->rs_offset = rs_offset;
seg->rs_length = htonl(write_len); seg->rs_length = htonl(write_len);
xdr_encode_hyper((u32 *) &seg->rs_offset, rs_offset);
} }
void svc_rdma_xdr_encode_reply_header(struct svcxprt_rdma *xprt, void svc_rdma_xdr_encode_reply_header(struct svcxprt_rdma *xprt,
......
...@@ -147,7 +147,7 @@ static int map_read_chunks(struct svcxprt_rdma *xprt, ...@@ -147,7 +147,7 @@ static int map_read_chunks(struct svcxprt_rdma *xprt,
page_off = 0; page_off = 0;
ch = (struct rpcrdma_read_chunk *)&rmsgp->rm_body.rm_chunks[0]; ch = (struct rpcrdma_read_chunk *)&rmsgp->rm_body.rm_chunks[0];
ch_no = 0; ch_no = 0;
ch_bytes = ch->rc_target.rs_length; ch_bytes = ntohl(ch->rc_target.rs_length);
head->arg.head[0] = rqstp->rq_arg.head[0]; head->arg.head[0] = rqstp->rq_arg.head[0];
head->arg.tail[0] = rqstp->rq_arg.tail[0]; head->arg.tail[0] = rqstp->rq_arg.tail[0];
head->arg.pages = &head->pages[head->count]; head->arg.pages = &head->pages[head->count];
...@@ -183,7 +183,7 @@ static int map_read_chunks(struct svcxprt_rdma *xprt, ...@@ -183,7 +183,7 @@ static int map_read_chunks(struct svcxprt_rdma *xprt,
ch_no++; ch_no++;
ch++; ch++;
chl_map->ch[ch_no].start = sge_no; chl_map->ch[ch_no].start = sge_no;
ch_bytes = ch->rc_target.rs_length; ch_bytes = ntohl(ch->rc_target.rs_length);
/* If bytes remaining account for next chunk */ /* If bytes remaining account for next chunk */
if (byte_count) { if (byte_count) {
head->arg.page_len += ch_bytes; head->arg.page_len += ch_bytes;
...@@ -281,11 +281,12 @@ static int fast_reg_read_chunks(struct svcxprt_rdma *xprt, ...@@ -281,11 +281,12 @@ static int fast_reg_read_chunks(struct svcxprt_rdma *xprt,
offset = 0; offset = 0;
ch = (struct rpcrdma_read_chunk *)&rmsgp->rm_body.rm_chunks[0]; ch = (struct rpcrdma_read_chunk *)&rmsgp->rm_body.rm_chunks[0];
for (ch_no = 0; ch_no < ch_count; ch_no++) { for (ch_no = 0; ch_no < ch_count; ch_no++) {
int len = ntohl(ch->rc_target.rs_length);
rpl_map->sge[ch_no].iov_base = frmr->kva + offset; rpl_map->sge[ch_no].iov_base = frmr->kva + offset;
rpl_map->sge[ch_no].iov_len = ch->rc_target.rs_length; rpl_map->sge[ch_no].iov_len = len;
chl_map->ch[ch_no].count = 1; chl_map->ch[ch_no].count = 1;
chl_map->ch[ch_no].start = ch_no; chl_map->ch[ch_no].start = ch_no;
offset += ch->rc_target.rs_length; offset += len;
ch++; ch++;
} }
...@@ -316,7 +317,7 @@ static int rdma_set_ctxt_sge(struct svcxprt_rdma *xprt, ...@@ -316,7 +317,7 @@ static int rdma_set_ctxt_sge(struct svcxprt_rdma *xprt,
for (i = 0; i < count; i++) { for (i = 0; i < count; i++) {
ctxt->sge[i].length = 0; /* in case map fails */ ctxt->sge[i].length = 0; /* in case map fails */
if (!frmr) { if (!frmr) {
BUG_ON(0 == virt_to_page(vec[i].iov_base)); BUG_ON(!virt_to_page(vec[i].iov_base));
off = (unsigned long)vec[i].iov_base & ~PAGE_MASK; off = (unsigned long)vec[i].iov_base & ~PAGE_MASK;
ctxt->sge[i].addr = ctxt->sge[i].addr =
ib_dma_map_page(xprt->sc_cm_id->device, ib_dma_map_page(xprt->sc_cm_id->device,
...@@ -426,6 +427,7 @@ static int rdma_read_xdr(struct svcxprt_rdma *xprt, ...@@ -426,6 +427,7 @@ static int rdma_read_xdr(struct svcxprt_rdma *xprt,
for (ch = (struct rpcrdma_read_chunk *)&rmsgp->rm_body.rm_chunks[0]; for (ch = (struct rpcrdma_read_chunk *)&rmsgp->rm_body.rm_chunks[0];
ch->rc_discrim != 0; ch++, ch_no++) { ch->rc_discrim != 0; ch++, ch_no++) {
u64 rs_offset;
next_sge: next_sge:
ctxt = svc_rdma_get_context(xprt); ctxt = svc_rdma_get_context(xprt);
ctxt->direction = DMA_FROM_DEVICE; ctxt->direction = DMA_FROM_DEVICE;
...@@ -440,10 +442,10 @@ static int rdma_read_xdr(struct svcxprt_rdma *xprt, ...@@ -440,10 +442,10 @@ static int rdma_read_xdr(struct svcxprt_rdma *xprt,
read_wr.opcode = IB_WR_RDMA_READ; read_wr.opcode = IB_WR_RDMA_READ;
ctxt->wr_op = read_wr.opcode; ctxt->wr_op = read_wr.opcode;
read_wr.send_flags = IB_SEND_SIGNALED; read_wr.send_flags = IB_SEND_SIGNALED;
read_wr.wr.rdma.rkey = ch->rc_target.rs_handle; read_wr.wr.rdma.rkey = ntohl(ch->rc_target.rs_handle);
read_wr.wr.rdma.remote_addr = xdr_decode_hyper((__be32 *)&ch->rc_target.rs_offset,
get_unaligned(&(ch->rc_target.rs_offset)) + &rs_offset);
sgl_offset; read_wr.wr.rdma.remote_addr = rs_offset + sgl_offset;
read_wr.sg_list = ctxt->sge; read_wr.sg_list = ctxt->sge;
read_wr.num_sge = read_wr.num_sge =
rdma_read_max_sge(xprt, chl_map->ch[ch_no].count); rdma_read_max_sge(xprt, chl_map->ch[ch_no].count);
......
...@@ -409,21 +409,21 @@ static int send_write_chunks(struct svcxprt_rdma *xprt, ...@@ -409,21 +409,21 @@ static int send_write_chunks(struct svcxprt_rdma *xprt,
u64 rs_offset; u64 rs_offset;
arg_ch = &arg_ary->wc_array[chunk_no].wc_target; arg_ch = &arg_ary->wc_array[chunk_no].wc_target;
write_len = min(xfer_len, arg_ch->rs_length); write_len = min(xfer_len, ntohl(arg_ch->rs_length));
/* Prepare the response chunk given the length actually /* Prepare the response chunk given the length actually
* written */ * written */
rs_offset = get_unaligned(&(arg_ch->rs_offset)); xdr_decode_hyper((__be32 *)&arg_ch->rs_offset, &rs_offset);
svc_rdma_xdr_encode_array_chunk(res_ary, chunk_no, svc_rdma_xdr_encode_array_chunk(res_ary, chunk_no,
arg_ch->rs_handle, arg_ch->rs_handle,
rs_offset, arg_ch->rs_offset,
write_len); write_len);
chunk_off = 0; chunk_off = 0;
while (write_len) { while (write_len) {
int this_write; int this_write;
this_write = min(write_len, max_write); this_write = min(write_len, max_write);
ret = send_write(xprt, rqstp, ret = send_write(xprt, rqstp,
arg_ch->rs_handle, ntohl(arg_ch->rs_handle),
rs_offset + chunk_off, rs_offset + chunk_off,
xdr_off, xdr_off,
this_write, this_write,
...@@ -457,6 +457,7 @@ static int send_reply_chunks(struct svcxprt_rdma *xprt, ...@@ -457,6 +457,7 @@ static int send_reply_chunks(struct svcxprt_rdma *xprt,
u32 xdr_off; u32 xdr_off;
int chunk_no; int chunk_no;
int chunk_off; int chunk_off;
int nchunks;
struct rpcrdma_segment *ch; struct rpcrdma_segment *ch;
struct rpcrdma_write_array *arg_ary; struct rpcrdma_write_array *arg_ary;
struct rpcrdma_write_array *res_ary; struct rpcrdma_write_array *res_ary;
...@@ -476,26 +477,27 @@ static int send_reply_chunks(struct svcxprt_rdma *xprt, ...@@ -476,26 +477,27 @@ static int send_reply_chunks(struct svcxprt_rdma *xprt,
max_write = xprt->sc_max_sge * PAGE_SIZE; max_write = xprt->sc_max_sge * PAGE_SIZE;
/* xdr offset starts at RPC message */ /* xdr offset starts at RPC message */
nchunks = ntohl(arg_ary->wc_nchunks);
for (xdr_off = 0, chunk_no = 0; for (xdr_off = 0, chunk_no = 0;
xfer_len && chunk_no < arg_ary->wc_nchunks; xfer_len && chunk_no < nchunks;
chunk_no++) { chunk_no++) {
u64 rs_offset; u64 rs_offset;
ch = &arg_ary->wc_array[chunk_no].wc_target; ch = &arg_ary->wc_array[chunk_no].wc_target;
write_len = min(xfer_len, ch->rs_length); write_len = min(xfer_len, htonl(ch->rs_length));
/* Prepare the reply chunk given the length actually /* Prepare the reply chunk given the length actually
* written */ * written */
rs_offset = get_unaligned(&(ch->rs_offset)); xdr_decode_hyper((__be32 *)&ch->rs_offset, &rs_offset);
svc_rdma_xdr_encode_array_chunk(res_ary, chunk_no, svc_rdma_xdr_encode_array_chunk(res_ary, chunk_no,
ch->rs_handle, rs_offset, ch->rs_handle, ch->rs_offset,
write_len); write_len);
chunk_off = 0; chunk_off = 0;
while (write_len) { while (write_len) {
int this_write; int this_write;
this_write = min(write_len, max_write); this_write = min(write_len, max_write);
ret = send_write(xprt, rqstp, ret = send_write(xprt, rqstp,
ch->rs_handle, ntohl(ch->rs_handle),
rs_offset + chunk_off, rs_offset + chunk_off,
xdr_off, xdr_off,
this_write, this_write,
......
...@@ -51,6 +51,7 @@ ...@@ -51,6 +51,7 @@
#include <rdma/rdma_cm.h> #include <rdma/rdma_cm.h>
#include <linux/sunrpc/svc_rdma.h> #include <linux/sunrpc/svc_rdma.h>
#include <linux/export.h> #include <linux/export.h>
#include "xprt_rdma.h"
#define RPCDBG_FACILITY RPCDBG_SVCXPRT #define RPCDBG_FACILITY RPCDBG_SVCXPRT
...@@ -90,12 +91,6 @@ struct svc_xprt_class svc_rdma_class = { ...@@ -90,12 +91,6 @@ struct svc_xprt_class svc_rdma_class = {
.xcl_max_payload = RPCSVC_MAXPAYLOAD_TCP, .xcl_max_payload = RPCSVC_MAXPAYLOAD_TCP,
}; };
/* WR context cache. Created in svc_rdma.c */
extern struct kmem_cache *svc_rdma_ctxt_cachep;
/* Workqueue created in svc_rdma.c */
extern struct workqueue_struct *svc_rdma_wq;
struct svc_rdma_op_ctxt *svc_rdma_get_context(struct svcxprt_rdma *xprt) struct svc_rdma_op_ctxt *svc_rdma_get_context(struct svcxprt_rdma *xprt)
{ {
struct svc_rdma_op_ctxt *ctxt; struct svc_rdma_op_ctxt *ctxt;
...@@ -150,9 +145,6 @@ void svc_rdma_put_context(struct svc_rdma_op_ctxt *ctxt, int free_pages) ...@@ -150,9 +145,6 @@ void svc_rdma_put_context(struct svc_rdma_op_ctxt *ctxt, int free_pages)
atomic_dec(&xprt->sc_ctxt_used); atomic_dec(&xprt->sc_ctxt_used);
} }
/* Temporary NFS request map cache. Created in svc_rdma.c */
extern struct kmem_cache *svc_rdma_map_cachep;
/* /*
* Temporary NFS req mappings are shared across all transport * Temporary NFS req mappings are shared across all transport
* instances. These are short lived and should be bounded by the number * instances. These are short lived and should be bounded by the number
......
...@@ -343,4 +343,11 @@ void rpcrdma_reply_handler(struct rpcrdma_rep *); ...@@ -343,4 +343,11 @@ void rpcrdma_reply_handler(struct rpcrdma_rep *);
*/ */
int rpcrdma_marshal_req(struct rpc_rqst *); int rpcrdma_marshal_req(struct rpc_rqst *);
/* Temporary NFS request map cache. Created in svc_rdma.c */
extern struct kmem_cache *svc_rdma_map_cachep;
/* WR context cache. Created in svc_rdma.c */
extern struct kmem_cache *svc_rdma_ctxt_cachep;
/* Workqueue created in svc_rdma.c */
extern struct workqueue_struct *svc_rdma_wq;
#endif /* _LINUX_SUNRPC_XPRT_RDMA_H */ #endif /* _LINUX_SUNRPC_XPRT_RDMA_H */
...@@ -2475,6 +2475,7 @@ static struct rpc_xprt_ops xs_tcp_ops = { ...@@ -2475,6 +2475,7 @@ static struct rpc_xprt_ops xs_tcp_ops = {
static struct rpc_xprt_ops bc_tcp_ops = { static struct rpc_xprt_ops bc_tcp_ops = {
.reserve_xprt = xprt_reserve_xprt, .reserve_xprt = xprt_reserve_xprt,
.release_xprt = xprt_release_xprt, .release_xprt = xprt_release_xprt,
.rpcbind = xs_local_rpcbind,
.buf_alloc = bc_malloc, .buf_alloc = bc_malloc,
.buf_free = bc_free, .buf_free = bc_free,
.send_request = bc_send_request, .send_request = bc_send_request,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment