Commit 27785564 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'nfsd-4.9' of git://linux-nfs.org/~bfields/linux

Pull nfsd updates from Bruce Fields:
 "Some RDMA work and some good bugfixes, and two new features that could
  benefit from user testing:

   - Anna Schumacker contributed a simple NFSv4.2 COPY implementation.
     COPY is already supported on the client side, so a call to
     copy_file_range() on a recent client should now result in a
     server-side copy that doesn't require all the data to make a round
     trip to the client and back.

   - Jeff Layton implemented callbacks to notify clients when contended
     locks become available, which should reduce latency on workloads
     with contended locks"

* tag 'nfsd-4.9' of git://linux-nfs.org/~bfields/linux:
  NFSD: Implement the COPY call
  nfsd: handle EUCLEAN
  nfsd: only WARN once on unmapped errors
  exportfs: be careful to only return expected errors.
  nfsd4: setclientid_confirm with unmatched verifier should fail
  nfsd: randomize SETCLIENTID reply to help distinguish servers
  nfsd: set the MAY_NOTIFY_LOCK flag in OPEN replies
  nfs: add a new NFS4_OPEN_RESULT_MAY_NOTIFY_LOCK constant
  nfsd: add a LRU list for blocked locks
  nfsd: have nfsd4_lock use blocking locks for v4.1+ locks
  nfsd: plumb in a CB_NOTIFY_LOCK operation
  NFSD: fix corruption in notifier registration
  svcrdma: support Remote Invalidation
  svcrdma: Server-side support for rpcrdma_connect_private
  rpcrdma: RDMA/CM private message data structure
  svcrdma: Skip put_page() when send_reply() fails
  svcrdma: Tail iovec leaves an orphaned DMA mapping
  nfsd: fix dprintk in nfsd4_encode_getdeviceinfo
  nfsd: eliminate cb_minorversion field
  nfsd: don't set a FL_LAYOUT lease for flexfiles layouts
parents 35a891be 29ae7f9d
...@@ -428,10 +428,10 @@ struct dentry *exportfs_decode_fh(struct vfsmount *mnt, struct fid *fid, ...@@ -428,10 +428,10 @@ struct dentry *exportfs_decode_fh(struct vfsmount *mnt, struct fid *fid,
if (!nop || !nop->fh_to_dentry) if (!nop || !nop->fh_to_dentry)
return ERR_PTR(-ESTALE); return ERR_PTR(-ESTALE);
result = nop->fh_to_dentry(mnt->mnt_sb, fid, fh_len, fileid_type); result = nop->fh_to_dentry(mnt->mnt_sb, fid, fh_len, fileid_type);
if (!result) if (PTR_ERR(result) == -ENOMEM)
result = ERR_PTR(-ESTALE); return ERR_CAST(result);
if (IS_ERR(result)) if (IS_ERR_OR_NULL(result))
return result; return ERR_PTR(-ESTALE);
if (d_is_dir(result)) { if (d_is_dir(result)) {
/* /*
...@@ -541,6 +541,8 @@ struct dentry *exportfs_decode_fh(struct vfsmount *mnt, struct fid *fid, ...@@ -541,6 +541,8 @@ struct dentry *exportfs_decode_fh(struct vfsmount *mnt, struct fid *fid,
err_result: err_result:
dput(result); dput(result);
if (err != -ENOMEM)
err = -ESTALE;
return ERR_PTR(err); return ERR_PTR(err);
} }
EXPORT_SYMBOL_GPL(exportfs_decode_fh); EXPORT_SYMBOL_GPL(exportfs_decode_fh);
......
...@@ -126,6 +126,7 @@ nfsd4_ff_proc_getdeviceinfo(struct super_block *sb, struct svc_rqst *rqstp, ...@@ -126,6 +126,7 @@ nfsd4_ff_proc_getdeviceinfo(struct super_block *sb, struct svc_rqst *rqstp,
const struct nfsd4_layout_ops ff_layout_ops = { const struct nfsd4_layout_ops ff_layout_ops = {
.notify_types = .notify_types =
NOTIFY_DEVICEID4_DELETE | NOTIFY_DEVICEID4_CHANGE, NOTIFY_DEVICEID4_DELETE | NOTIFY_DEVICEID4_CHANGE,
.disable_recalls = true,
.proc_getdeviceinfo = nfsd4_ff_proc_getdeviceinfo, .proc_getdeviceinfo = nfsd4_ff_proc_getdeviceinfo,
.encode_getdeviceinfo = nfsd4_ff_encode_getdeviceinfo, .encode_getdeviceinfo = nfsd4_ff_encode_getdeviceinfo,
.proc_layoutget = nfsd4_ff_proc_layoutget, .proc_layoutget = nfsd4_ff_proc_layoutget,
......
...@@ -84,6 +84,7 @@ struct nfsd_net { ...@@ -84,6 +84,7 @@ struct nfsd_net {
struct list_head client_lru; struct list_head client_lru;
struct list_head close_lru; struct list_head close_lru;
struct list_head del_recall_lru; struct list_head del_recall_lru;
struct list_head blocked_locks_lru;
struct delayed_work laundromat_work; struct delayed_work laundromat_work;
......
...@@ -448,7 +448,7 @@ static int decode_cb_sequence4res(struct xdr_stream *xdr, ...@@ -448,7 +448,7 @@ static int decode_cb_sequence4res(struct xdr_stream *xdr,
{ {
int status; int status;
if (cb->cb_minorversion == 0) if (cb->cb_clp->cl_minorversion == 0)
return 0; return 0;
status = decode_cb_op_status(xdr, OP_CB_SEQUENCE, &cb->cb_seq_status); status = decode_cb_op_status(xdr, OP_CB_SEQUENCE, &cb->cb_seq_status);
...@@ -485,7 +485,7 @@ static void nfs4_xdr_enc_cb_recall(struct rpc_rqst *req, struct xdr_stream *xdr, ...@@ -485,7 +485,7 @@ static void nfs4_xdr_enc_cb_recall(struct rpc_rqst *req, struct xdr_stream *xdr,
const struct nfs4_delegation *dp = cb_to_delegation(cb); const struct nfs4_delegation *dp = cb_to_delegation(cb);
struct nfs4_cb_compound_hdr hdr = { struct nfs4_cb_compound_hdr hdr = {
.ident = cb->cb_clp->cl_cb_ident, .ident = cb->cb_clp->cl_cb_ident,
.minorversion = cb->cb_minorversion, .minorversion = cb->cb_clp->cl_minorversion,
}; };
encode_cb_compound4args(xdr, &hdr); encode_cb_compound4args(xdr, &hdr);
...@@ -594,7 +594,7 @@ static void nfs4_xdr_enc_cb_layout(struct rpc_rqst *req, ...@@ -594,7 +594,7 @@ static void nfs4_xdr_enc_cb_layout(struct rpc_rqst *req,
container_of(cb, struct nfs4_layout_stateid, ls_recall); container_of(cb, struct nfs4_layout_stateid, ls_recall);
struct nfs4_cb_compound_hdr hdr = { struct nfs4_cb_compound_hdr hdr = {
.ident = 0, .ident = 0,
.minorversion = cb->cb_minorversion, .minorversion = cb->cb_clp->cl_minorversion,
}; };
encode_cb_compound4args(xdr, &hdr); encode_cb_compound4args(xdr, &hdr);
...@@ -623,6 +623,62 @@ static int nfs4_xdr_dec_cb_layout(struct rpc_rqst *rqstp, ...@@ -623,6 +623,62 @@ static int nfs4_xdr_dec_cb_layout(struct rpc_rqst *rqstp,
} }
#endif /* CONFIG_NFSD_PNFS */ #endif /* CONFIG_NFSD_PNFS */
static void encode_stateowner(struct xdr_stream *xdr, struct nfs4_stateowner *so)
{
__be32 *p;
p = xdr_reserve_space(xdr, 8 + 4 + so->so_owner.len);
p = xdr_encode_opaque_fixed(p, &so->so_client->cl_clientid, 8);
xdr_encode_opaque(p, so->so_owner.data, so->so_owner.len);
}
static void nfs4_xdr_enc_cb_notify_lock(struct rpc_rqst *req,
struct xdr_stream *xdr,
const struct nfsd4_callback *cb)
{
const struct nfsd4_blocked_lock *nbl =
container_of(cb, struct nfsd4_blocked_lock, nbl_cb);
struct nfs4_lockowner *lo = (struct nfs4_lockowner *)nbl->nbl_lock.fl_owner;
struct nfs4_cb_compound_hdr hdr = {
.ident = 0,
.minorversion = cb->cb_clp->cl_minorversion,
};
__be32 *p;
BUG_ON(hdr.minorversion == 0);
encode_cb_compound4args(xdr, &hdr);
encode_cb_sequence4args(xdr, cb, &hdr);
p = xdr_reserve_space(xdr, 4);
*p = cpu_to_be32(OP_CB_NOTIFY_LOCK);
encode_nfs_fh4(xdr, &nbl->nbl_fh);
encode_stateowner(xdr, &lo->lo_owner);
hdr.nops++;
encode_cb_nops(&hdr);
}
static int nfs4_xdr_dec_cb_notify_lock(struct rpc_rqst *rqstp,
struct xdr_stream *xdr,
struct nfsd4_callback *cb)
{
struct nfs4_cb_compound_hdr hdr;
int status;
status = decode_cb_compound4res(xdr, &hdr);
if (unlikely(status))
return status;
if (cb) {
status = decode_cb_sequence4res(xdr, cb);
if (unlikely(status || cb->cb_seq_status))
return status;
}
return decode_cb_op_status(xdr, OP_CB_NOTIFY_LOCK, &cb->cb_status);
}
/* /*
* RPC procedure tables * RPC procedure tables
*/ */
...@@ -643,6 +699,7 @@ static struct rpc_procinfo nfs4_cb_procedures[] = { ...@@ -643,6 +699,7 @@ static struct rpc_procinfo nfs4_cb_procedures[] = {
#ifdef CONFIG_NFSD_PNFS #ifdef CONFIG_NFSD_PNFS
PROC(CB_LAYOUT, COMPOUND, cb_layout, cb_layout), PROC(CB_LAYOUT, COMPOUND, cb_layout, cb_layout),
#endif #endif
PROC(CB_NOTIFY_LOCK, COMPOUND, cb_notify_lock, cb_notify_lock),
}; };
static struct rpc_version nfs_cb_version4 = { static struct rpc_version nfs_cb_version4 = {
...@@ -862,7 +919,6 @@ static void nfsd4_cb_prepare(struct rpc_task *task, void *calldata) ...@@ -862,7 +919,6 @@ static void nfsd4_cb_prepare(struct rpc_task *task, void *calldata)
struct nfs4_client *clp = cb->cb_clp; struct nfs4_client *clp = cb->cb_clp;
u32 minorversion = clp->cl_minorversion; u32 minorversion = clp->cl_minorversion;
cb->cb_minorversion = minorversion;
/* /*
* cb_seq_status is only set in decode_cb_sequence4res, * cb_seq_status is only set in decode_cb_sequence4res,
* and so will remain 1 if an rpc level failure occurs. * and so will remain 1 if an rpc level failure occurs.
......
...@@ -174,6 +174,7 @@ nfsd4_free_layout_stateid(struct nfs4_stid *stid) ...@@ -174,6 +174,7 @@ nfsd4_free_layout_stateid(struct nfs4_stid *stid)
list_del_init(&ls->ls_perfile); list_del_init(&ls->ls_perfile);
spin_unlock(&fp->fi_lock); spin_unlock(&fp->fi_lock);
if (!nfsd4_layout_ops[ls->ls_layout_type]->disable_recalls)
vfs_setlease(ls->ls_file, F_UNLCK, NULL, (void **)&ls); vfs_setlease(ls->ls_file, F_UNLCK, NULL, (void **)&ls);
fput(ls->ls_file); fput(ls->ls_file);
...@@ -189,6 +190,9 @@ nfsd4_layout_setlease(struct nfs4_layout_stateid *ls) ...@@ -189,6 +190,9 @@ nfsd4_layout_setlease(struct nfs4_layout_stateid *ls)
struct file_lock *fl; struct file_lock *fl;
int status; int status;
if (nfsd4_layout_ops[ls->ls_layout_type]->disable_recalls)
return 0;
fl = locks_alloc_lock(); fl = locks_alloc_lock();
if (!fl) if (!fl)
return -ENOMEM; return -ENOMEM;
......
...@@ -1010,46 +1010,96 @@ nfsd4_write(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, ...@@ -1010,46 +1010,96 @@ nfsd4_write(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
} }
static __be32 static __be32
nfsd4_clone(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, nfsd4_verify_copy(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
struct nfsd4_clone *clone) stateid_t *src_stateid, struct file **src,
stateid_t *dst_stateid, struct file **dst)
{ {
struct file *src, *dst;
__be32 status; __be32 status;
status = nfs4_preprocess_stateid_op(rqstp, cstate, &cstate->save_fh, status = nfs4_preprocess_stateid_op(rqstp, cstate, &cstate->save_fh,
&clone->cl_src_stateid, RD_STATE, src_stateid, RD_STATE, src, NULL);
&src, NULL);
if (status) { if (status) {
dprintk("NFSD: %s: couldn't process src stateid!\n", __func__); dprintk("NFSD: %s: couldn't process src stateid!\n", __func__);
goto out; goto out;
} }
status = nfs4_preprocess_stateid_op(rqstp, cstate, &cstate->current_fh, status = nfs4_preprocess_stateid_op(rqstp, cstate, &cstate->current_fh,
&clone->cl_dst_stateid, WR_STATE, dst_stateid, WR_STATE, dst, NULL);
&dst, NULL);
if (status) { if (status) {
dprintk("NFSD: %s: couldn't process dst stateid!\n", __func__); dprintk("NFSD: %s: couldn't process dst stateid!\n", __func__);
goto out_put_src; goto out_put_src;
} }
/* fix up for NFS-specific error code */ /* fix up for NFS-specific error code */
if (!S_ISREG(file_inode(src)->i_mode) || if (!S_ISREG(file_inode(*src)->i_mode) ||
!S_ISREG(file_inode(dst)->i_mode)) { !S_ISREG(file_inode(*dst)->i_mode)) {
status = nfserr_wrong_type; status = nfserr_wrong_type;
goto out_put_dst; goto out_put_dst;
} }
out:
return status;
out_put_dst:
fput(*dst);
out_put_src:
fput(*src);
goto out;
}
static __be32
nfsd4_clone(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
struct nfsd4_clone *clone)
{
struct file *src, *dst;
__be32 status;
status = nfsd4_verify_copy(rqstp, cstate, &clone->cl_src_stateid, &src,
&clone->cl_dst_stateid, &dst);
if (status)
goto out;
status = nfsd4_clone_file_range(src, clone->cl_src_pos, status = nfsd4_clone_file_range(src, clone->cl_src_pos,
dst, clone->cl_dst_pos, clone->cl_count); dst, clone->cl_dst_pos, clone->cl_count);
out_put_dst:
fput(dst); fput(dst);
out_put_src:
fput(src); fput(src);
out: out:
return status; return status;
} }
static __be32
nfsd4_copy(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
struct nfsd4_copy *copy)
{
struct file *src, *dst;
__be32 status;
ssize_t bytes;
status = nfsd4_verify_copy(rqstp, cstate, &copy->cp_src_stateid, &src,
&copy->cp_dst_stateid, &dst);
if (status)
goto out;
bytes = nfsd_copy_file_range(src, copy->cp_src_pos,
dst, copy->cp_dst_pos, copy->cp_count);
if (bytes < 0)
status = nfserrno(bytes);
else {
copy->cp_res.wr_bytes_written = bytes;
copy->cp_res.wr_stable_how = NFS_UNSTABLE;
copy->cp_consecutive = 1;
copy->cp_synchronous = 1;
gen_boot_verifier(&copy->cp_res.wr_verifier, SVC_NET(rqstp));
status = nfs_ok;
}
fput(src);
fput(dst);
out:
return status;
}
static __be32 static __be32
nfsd4_fallocate(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, nfsd4_fallocate(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
struct nfsd4_fallocate *fallocate, int flags) struct nfsd4_fallocate *fallocate, int flags)
...@@ -1966,6 +2016,18 @@ static inline u32 nfsd4_create_session_rsize(struct svc_rqst *rqstp, struct nfsd ...@@ -1966,6 +2016,18 @@ static inline u32 nfsd4_create_session_rsize(struct svc_rqst *rqstp, struct nfsd
op_encode_channel_attrs_maxsz) * sizeof(__be32); op_encode_channel_attrs_maxsz) * sizeof(__be32);
} }
static inline u32 nfsd4_copy_rsize(struct svc_rqst *rqstp, struct nfsd4_op *op)
{
return (op_encode_hdr_size +
1 /* wr_callback */ +
op_encode_stateid_maxsz /* wr_callback */ +
2 /* wr_count */ +
1 /* wr_committed */ +
op_encode_verifier_maxsz +
1 /* cr_consecutive */ +
1 /* cr_synchronous */) * sizeof(__be32);
}
#ifdef CONFIG_NFSD_PNFS #ifdef CONFIG_NFSD_PNFS
/* /*
* At this stage we don't really know what layout driver will handle the request, * At this stage we don't really know what layout driver will handle the request,
...@@ -2328,6 +2390,12 @@ static struct nfsd4_operation nfsd4_ops[] = { ...@@ -2328,6 +2390,12 @@ static struct nfsd4_operation nfsd4_ops[] = {
.op_name = "OP_CLONE", .op_name = "OP_CLONE",
.op_rsize_bop = (nfsd4op_rsize)nfsd4_only_status_rsize, .op_rsize_bop = (nfsd4op_rsize)nfsd4_only_status_rsize,
}, },
[OP_COPY] = {
.op_func = (nfsd4op_func)nfsd4_copy,
.op_flags = OP_MODIFIES_SOMETHING | OP_CACHEME,
.op_name = "OP_COPY",
.op_rsize_bop = (nfsd4op_rsize)nfsd4_copy_rsize,
},
[OP_SEEK] = { [OP_SEEK] = {
.op_func = (nfsd4op_func)nfsd4_seek, .op_func = (nfsd4op_func)nfsd4_seek,
.op_name = "OP_SEEK", .op_name = "OP_SEEK",
......
This diff is collapsed.
...@@ -1693,6 +1693,30 @@ nfsd4_decode_clone(struct nfsd4_compoundargs *argp, struct nfsd4_clone *clone) ...@@ -1693,6 +1693,30 @@ nfsd4_decode_clone(struct nfsd4_compoundargs *argp, struct nfsd4_clone *clone)
DECODE_TAIL; DECODE_TAIL;
} }
static __be32
nfsd4_decode_copy(struct nfsd4_compoundargs *argp, struct nfsd4_copy *copy)
{
DECODE_HEAD;
unsigned int tmp;
status = nfsd4_decode_stateid(argp, &copy->cp_src_stateid);
if (status)
return status;
status = nfsd4_decode_stateid(argp, &copy->cp_dst_stateid);
if (status)
return status;
READ_BUF(8 + 8 + 8 + 4 + 4 + 4);
p = xdr_decode_hyper(p, &copy->cp_src_pos);
p = xdr_decode_hyper(p, &copy->cp_dst_pos);
p = xdr_decode_hyper(p, &copy->cp_count);
copy->cp_consecutive = be32_to_cpup(p++);
copy->cp_synchronous = be32_to_cpup(p++);
tmp = be32_to_cpup(p); /* Source server list not supported */
DECODE_TAIL;
}
static __be32 static __be32
nfsd4_decode_seek(struct nfsd4_compoundargs *argp, struct nfsd4_seek *seek) nfsd4_decode_seek(struct nfsd4_compoundargs *argp, struct nfsd4_seek *seek)
{ {
...@@ -1793,7 +1817,7 @@ static nfsd4_dec nfsd4_dec_ops[] = { ...@@ -1793,7 +1817,7 @@ static nfsd4_dec nfsd4_dec_ops[] = {
/* new operations for NFSv4.2 */ /* new operations for NFSv4.2 */
[OP_ALLOCATE] = (nfsd4_dec)nfsd4_decode_fallocate, [OP_ALLOCATE] = (nfsd4_dec)nfsd4_decode_fallocate,
[OP_COPY] = (nfsd4_dec)nfsd4_decode_notsupp, [OP_COPY] = (nfsd4_dec)nfsd4_decode_copy,
[OP_COPY_NOTIFY] = (nfsd4_dec)nfsd4_decode_notsupp, [OP_COPY_NOTIFY] = (nfsd4_dec)nfsd4_decode_notsupp,
[OP_DEALLOCATE] = (nfsd4_dec)nfsd4_decode_fallocate, [OP_DEALLOCATE] = (nfsd4_dec)nfsd4_decode_fallocate,
[OP_IO_ADVISE] = (nfsd4_dec)nfsd4_decode_notsupp, [OP_IO_ADVISE] = (nfsd4_dec)nfsd4_decode_notsupp,
...@@ -4062,7 +4086,7 @@ nfsd4_encode_getdeviceinfo(struct nfsd4_compoundres *resp, __be32 nfserr, ...@@ -4062,7 +4086,7 @@ nfsd4_encode_getdeviceinfo(struct nfsd4_compoundres *resp, __be32 nfserr,
u32 starting_len = xdr->buf->len, needed_len; u32 starting_len = xdr->buf->len, needed_len;
__be32 *p; __be32 *p;
dprintk("%s: err %d\n", __func__, nfserr); dprintk("%s: err %d\n", __func__, be32_to_cpu(nfserr));
if (nfserr) if (nfserr)
goto out; goto out;
...@@ -4201,6 +4225,41 @@ nfsd4_encode_layoutreturn(struct nfsd4_compoundres *resp, __be32 nfserr, ...@@ -4201,6 +4225,41 @@ nfsd4_encode_layoutreturn(struct nfsd4_compoundres *resp, __be32 nfserr,
} }
#endif /* CONFIG_NFSD_PNFS */ #endif /* CONFIG_NFSD_PNFS */
static __be32
nfsd42_encode_write_res(struct nfsd4_compoundres *resp, struct nfsd42_write_res *write)
{
__be32 *p;
p = xdr_reserve_space(&resp->xdr, 4 + 8 + 4 + NFS4_VERIFIER_SIZE);
if (!p)
return nfserr_resource;
*p++ = cpu_to_be32(0);
p = xdr_encode_hyper(p, write->wr_bytes_written);
*p++ = cpu_to_be32(write->wr_stable_how);
p = xdr_encode_opaque_fixed(p, write->wr_verifier.data,
NFS4_VERIFIER_SIZE);
return nfs_ok;
}
static __be32
nfsd4_encode_copy(struct nfsd4_compoundres *resp, __be32 nfserr,
struct nfsd4_copy *copy)
{
__be32 *p;
if (!nfserr) {
nfserr = nfsd42_encode_write_res(resp, &copy->cp_res);
if (nfserr)
return nfserr;
p = xdr_reserve_space(&resp->xdr, 4 + 4);
*p++ = cpu_to_be32(copy->cp_consecutive);
*p++ = cpu_to_be32(copy->cp_synchronous);
}
return nfserr;
}
static __be32 static __be32
nfsd4_encode_seek(struct nfsd4_compoundres *resp, __be32 nfserr, nfsd4_encode_seek(struct nfsd4_compoundres *resp, __be32 nfserr,
struct nfsd4_seek *seek) struct nfsd4_seek *seek)
...@@ -4300,7 +4359,7 @@ static nfsd4_enc nfsd4_enc_ops[] = { ...@@ -4300,7 +4359,7 @@ static nfsd4_enc nfsd4_enc_ops[] = {
/* NFSv4.2 operations */ /* NFSv4.2 operations */
[OP_ALLOCATE] = (nfsd4_enc)nfsd4_encode_noop, [OP_ALLOCATE] = (nfsd4_enc)nfsd4_encode_noop,
[OP_COPY] = (nfsd4_enc)nfsd4_encode_noop, [OP_COPY] = (nfsd4_enc)nfsd4_encode_copy,
[OP_COPY_NOTIFY] = (nfsd4_enc)nfsd4_encode_noop, [OP_COPY_NOTIFY] = (nfsd4_enc)nfsd4_encode_noop,
[OP_DEALLOCATE] = (nfsd4_enc)nfsd4_encode_noop, [OP_DEALLOCATE] = (nfsd4_enc)nfsd4_encode_noop,
[OP_IO_ADVISE] = (nfsd4_enc)nfsd4_encode_noop, [OP_IO_ADVISE] = (nfsd4_enc)nfsd4_encode_noop,
......
...@@ -1216,6 +1216,8 @@ static __net_init int nfsd_init_net(struct net *net) ...@@ -1216,6 +1216,8 @@ static __net_init int nfsd_init_net(struct net *net)
goto out_idmap_error; goto out_idmap_error;
nn->nfsd4_lease = 90; /* default lease time */ nn->nfsd4_lease = 90; /* default lease time */
nn->nfsd4_grace = 90; nn->nfsd4_grace = 90;
nn->clverifier_counter = prandom_u32();
nn->clientid_counter = prandom_u32();
return 0; return 0;
out_idmap_error: out_idmap_error:
......
...@@ -789,6 +789,7 @@ nfserrno (int errno) ...@@ -789,6 +789,7 @@ nfserrno (int errno)
{ nfserr_toosmall, -ETOOSMALL }, { nfserr_toosmall, -ETOOSMALL },
{ nfserr_serverfault, -ESERVERFAULT }, { nfserr_serverfault, -ESERVERFAULT },
{ nfserr_serverfault, -ENFILE }, { nfserr_serverfault, -ENFILE },
{ nfserr_io, -EUCLEAN },
}; };
int i; int i;
...@@ -796,7 +797,7 @@ nfserrno (int errno) ...@@ -796,7 +797,7 @@ nfserrno (int errno)
if (nfs_errtbl[i].syserr == errno) if (nfs_errtbl[i].syserr == errno)
return nfs_errtbl[i].nfserr; return nfs_errtbl[i].nfserr;
} }
WARN(1, "nfsd: non-standard errno: %d\n", errno); WARN_ONCE(1, "nfsd: non-standard errno: %d\n", errno);
return nfserr_io; return nfserr_io;
} }
...@@ -366,14 +366,21 @@ static struct notifier_block nfsd_inet6addr_notifier = { ...@@ -366,14 +366,21 @@ static struct notifier_block nfsd_inet6addr_notifier = {
}; };
#endif #endif
/* Only used under nfsd_mutex, so this atomic may be overkill: */
static atomic_t nfsd_notifier_refcount = ATOMIC_INIT(0);
static void nfsd_last_thread(struct svc_serv *serv, struct net *net) static void nfsd_last_thread(struct svc_serv *serv, struct net *net)
{ {
struct nfsd_net *nn = net_generic(net, nfsd_net_id); struct nfsd_net *nn = net_generic(net, nfsd_net_id);
/* check if the notifier still has clients */
if (atomic_dec_return(&nfsd_notifier_refcount) == 0) {
unregister_inetaddr_notifier(&nfsd_inetaddr_notifier); unregister_inetaddr_notifier(&nfsd_inetaddr_notifier);
#if IS_ENABLED(CONFIG_IPV6) #if IS_ENABLED(CONFIG_IPV6)
unregister_inet6addr_notifier(&nfsd_inet6addr_notifier); unregister_inet6addr_notifier(&nfsd_inet6addr_notifier);
#endif #endif
}
/* /*
* write_ports can create the server without actually starting * write_ports can create the server without actually starting
* any threads--if we get shut down before any threads are * any threads--if we get shut down before any threads are
...@@ -488,10 +495,13 @@ int nfsd_create_serv(struct net *net) ...@@ -488,10 +495,13 @@ int nfsd_create_serv(struct net *net)
} }
set_max_drc(); set_max_drc();
/* check if the notifier is already set */
if (atomic_inc_return(&nfsd_notifier_refcount) == 1) {
register_inetaddr_notifier(&nfsd_inetaddr_notifier); register_inetaddr_notifier(&nfsd_inetaddr_notifier);
#if IS_ENABLED(CONFIG_IPV6) #if IS_ENABLED(CONFIG_IPV6)
register_inet6addr_notifier(&nfsd_inet6addr_notifier); register_inet6addr_notifier(&nfsd_inet6addr_notifier);
#endif #endif
}
do_gettimeofday(&nn->nfssvc_boot); /* record boot time */ do_gettimeofday(&nn->nfssvc_boot); /* record boot time */
return 0; return 0;
} }
......
...@@ -19,6 +19,7 @@ struct nfsd4_deviceid_map { ...@@ -19,6 +19,7 @@ struct nfsd4_deviceid_map {
struct nfsd4_layout_ops { struct nfsd4_layout_ops {
u32 notify_types; u32 notify_types;
bool disable_recalls;
__be32 (*proc_getdeviceinfo)(struct super_block *sb, __be32 (*proc_getdeviceinfo)(struct super_block *sb,
struct svc_rqst *rqstp, struct svc_rqst *rqstp,
......
...@@ -63,7 +63,6 @@ typedef struct { ...@@ -63,7 +63,6 @@ typedef struct {
struct nfsd4_callback { struct nfsd4_callback {
struct nfs4_client *cb_clp; struct nfs4_client *cb_clp;
u32 cb_minorversion;
struct rpc_message cb_msg; struct rpc_message cb_msg;
const struct nfsd4_callback_ops *cb_ops; const struct nfsd4_callback_ops *cb_ops;
struct work_struct cb_work; struct work_struct cb_work;
...@@ -441,11 +440,11 @@ struct nfs4_openowner { ...@@ -441,11 +440,11 @@ struct nfs4_openowner {
/* /*
* Represents a generic "lockowner". Similar to an openowner. References to it * Represents a generic "lockowner". Similar to an openowner. References to it
* are held by the lock stateids that are created on its behalf. This object is * are held by the lock stateids that are created on its behalf. This object is
* a superset of the nfs4_stateowner struct (or would be if it needed any extra * a superset of the nfs4_stateowner struct.
* fields).
*/ */
struct nfs4_lockowner { struct nfs4_lockowner {
struct nfs4_stateowner lo_owner; /* must be first element */ struct nfs4_stateowner lo_owner; /* must be first element */
struct list_head lo_blocked; /* blocked file_locks */
}; };
static inline struct nfs4_openowner * openowner(struct nfs4_stateowner *so) static inline struct nfs4_openowner * openowner(struct nfs4_stateowner *so)
...@@ -572,6 +571,7 @@ enum nfsd4_cb_op { ...@@ -572,6 +571,7 @@ enum nfsd4_cb_op {
NFSPROC4_CLNT_CB_RECALL, NFSPROC4_CLNT_CB_RECALL,
NFSPROC4_CLNT_CB_LAYOUT, NFSPROC4_CLNT_CB_LAYOUT,
NFSPROC4_CLNT_CB_SEQUENCE, NFSPROC4_CLNT_CB_SEQUENCE,
NFSPROC4_CLNT_CB_NOTIFY_LOCK,
}; };
/* Returns true iff a is later than b: */ /* Returns true iff a is later than b: */
...@@ -580,6 +580,20 @@ static inline bool nfsd4_stateid_generation_after(stateid_t *a, stateid_t *b) ...@@ -580,6 +580,20 @@ static inline bool nfsd4_stateid_generation_after(stateid_t *a, stateid_t *b)
return (s32)(a->si_generation - b->si_generation) > 0; return (s32)(a->si_generation - b->si_generation) > 0;
} }
/*
* When a client tries to get a lock on a file, we set one of these objects
* on the blocking lock. When the lock becomes free, we can then issue a
* CB_NOTIFY_LOCK to the server.
*/
struct nfsd4_blocked_lock {
struct list_head nbl_list;
struct list_head nbl_lru;
unsigned long nbl_time;
struct file_lock nbl_lock;
struct knfsd_fh nbl_fh;
struct nfsd4_callback nbl_cb;
};
struct nfsd4_compound_state; struct nfsd4_compound_state;
struct nfsd_net; struct nfsd_net;
......
...@@ -513,6 +513,22 @@ __be32 nfsd4_clone_file_range(struct file *src, u64 src_pos, struct file *dst, ...@@ -513,6 +513,22 @@ __be32 nfsd4_clone_file_range(struct file *src, u64 src_pos, struct file *dst,
count)); count));
} }
ssize_t nfsd_copy_file_range(struct file *src, u64 src_pos, struct file *dst,
u64 dst_pos, u64 count)
{
/*
* Limit copy to 4MB to prevent indefinitely blocking an nfsd
* thread and client rpc slot. The choice of 4MB is somewhat
* arbitrary. We might instead base this on r/wsize, or make it
* tunable, or use a time instead of a byte limit, or implement
* asynchronous copy. In theory a client could also recognize a
* limit like this and pipeline multiple COPY requests.
*/
count = min_t(u64, count, 1 << 22);
return vfs_copy_file_range(src, src_pos, dst, dst_pos, count, 0);
}
__be32 nfsd4_vfs_fallocate(struct svc_rqst *rqstp, struct svc_fh *fhp, __be32 nfsd4_vfs_fallocate(struct svc_rqst *rqstp, struct svc_fh *fhp,
struct file *file, loff_t offset, loff_t len, struct file *file, loff_t offset, loff_t len,
int flags) int flags)
......
...@@ -96,6 +96,8 @@ __be32 nfsd_symlink(struct svc_rqst *, struct svc_fh *, ...@@ -96,6 +96,8 @@ __be32 nfsd_symlink(struct svc_rqst *, struct svc_fh *,
struct svc_fh *res); struct svc_fh *res);
__be32 nfsd_link(struct svc_rqst *, struct svc_fh *, __be32 nfsd_link(struct svc_rqst *, struct svc_fh *,
char *, int, struct svc_fh *); char *, int, struct svc_fh *);
ssize_t nfsd_copy_file_range(struct file *, u64,
struct file *, u64, u64);
__be32 nfsd_rename(struct svc_rqst *, __be32 nfsd_rename(struct svc_rqst *,
struct svc_fh *, char *, int, struct svc_fh *, char *, int,
struct svc_fh *, char *, int); struct svc_fh *, char *, int);
......
...@@ -503,6 +503,28 @@ struct nfsd4_clone { ...@@ -503,6 +503,28 @@ struct nfsd4_clone {
u64 cl_count; u64 cl_count;
}; };
struct nfsd42_write_res {
u64 wr_bytes_written;
u32 wr_stable_how;
nfs4_verifier wr_verifier;
};
struct nfsd4_copy {
/* request */
stateid_t cp_src_stateid;
stateid_t cp_dst_stateid;
u64 cp_src_pos;
u64 cp_dst_pos;
u64 cp_count;
/* both */
bool cp_consecutive;
bool cp_synchronous;
/* response */
struct nfsd42_write_res cp_res;
};
struct nfsd4_seek { struct nfsd4_seek {
/* request */ /* request */
stateid_t seek_stateid; stateid_t seek_stateid;
...@@ -568,6 +590,7 @@ struct nfsd4_op { ...@@ -568,6 +590,7 @@ struct nfsd4_op {
struct nfsd4_fallocate allocate; struct nfsd4_fallocate allocate;
struct nfsd4_fallocate deallocate; struct nfsd4_fallocate deallocate;
struct nfsd4_clone clone; struct nfsd4_clone clone;
struct nfsd4_copy copy;
struct nfsd4_seek seek; struct nfsd4_seek seek;
} u; } u;
struct nfs4_replay * replay; struct nfs4_replay * replay;
......
...@@ -28,3 +28,12 @@ ...@@ -28,3 +28,12 @@
#define NFS4_dec_cb_layout_sz (cb_compound_dec_hdr_sz + \ #define NFS4_dec_cb_layout_sz (cb_compound_dec_hdr_sz + \
cb_sequence_dec_sz + \ cb_sequence_dec_sz + \
op_dec_sz) op_dec_sz)
#define NFS4_enc_cb_notify_lock_sz (cb_compound_enc_hdr_sz + \
cb_sequence_enc_sz + \
2 + 1 + \
XDR_QUADLEN(NFS4_OPAQUE_LIMIT) + \
enc_nfs4_fh_sz)
#define NFS4_dec_cb_notify_lock_sz (cb_compound_dec_hdr_sz + \
cb_sequence_dec_sz + \
op_dec_sz)
...@@ -157,12 +157,13 @@ struct fid { ...@@ -157,12 +157,13 @@ struct fid {
* @fh_to_dentry is given a &struct super_block (@sb) and a file handle * @fh_to_dentry is given a &struct super_block (@sb) and a file handle
* fragment (@fh, @fh_len). It should return a &struct dentry which refers * fragment (@fh, @fh_len). It should return a &struct dentry which refers
* to the same file that the file handle fragment refers to. If it cannot, * to the same file that the file handle fragment refers to. If it cannot,
* it should return a %NULL pointer if the file was found but no acceptable * it should return a %NULL pointer if the file cannot be found, or an
* &dentries were available, or an %ERR_PTR error code indicating why it * %ERR_PTR error code of %ENOMEM if a memory allocation failure occurred.
* couldn't be found (e.g. %ENOENT or %ENOMEM). Any suitable dentry can be * Any other error code is treated like %NULL, and will cause an %ESTALE error
* returned including, if necessary, a new dentry created with d_alloc_root. * for callers of exportfs_decode_fh().
* The caller can then find any other extant dentries by following the * Any suitable dentry can be returned including, if necessary, a new dentry
* d_alias links. * created with d_alloc_root. The caller can then find any other extant
* dentries by following the d_alias links.
* *
* fh_to_parent: * fh_to_parent:
* Same as @fh_to_dentry, except that it returns a pointer to the parent * Same as @fh_to_dentry, except that it returns a pointer to the parent
......
...@@ -41,6 +41,7 @@ ...@@ -41,6 +41,7 @@
#define _LINUX_SUNRPC_RPC_RDMA_H #define _LINUX_SUNRPC_RPC_RDMA_H
#include <linux/types.h> #include <linux/types.h>
#include <linux/bitops.h>
#define RPCRDMA_VERSION 1 #define RPCRDMA_VERSION 1
#define rpcrdma_version cpu_to_be32(RPCRDMA_VERSION) #define rpcrdma_version cpu_to_be32(RPCRDMA_VERSION)
...@@ -129,4 +130,38 @@ enum rpcrdma_proc { ...@@ -129,4 +130,38 @@ enum rpcrdma_proc {
#define rdma_done cpu_to_be32(RDMA_DONE) #define rdma_done cpu_to_be32(RDMA_DONE)
#define rdma_error cpu_to_be32(RDMA_ERROR) #define rdma_error cpu_to_be32(RDMA_ERROR)
/*
* Private extension to RPC-over-RDMA Version One.
* Message passed during RDMA-CM connection set-up.
*
* Add new fields at the end, and don't permute existing
* fields.
*/
struct rpcrdma_connect_private {
__be32 cp_magic;
u8 cp_version;
u8 cp_flags;
u8 cp_send_size;
u8 cp_recv_size;
} __packed;
#define rpcrdma_cmp_magic __cpu_to_be32(0xf6ab0e18)
enum {
RPCRDMA_CMP_VERSION = 1,
RPCRDMA_CMP_F_SND_W_INV_OK = BIT(0),
};
static inline u8
rpcrdma_encode_buffer_size(unsigned int size)
{
return (size >> 10) - 1;
}
static inline unsigned int
rpcrdma_decode_buffer_size(u8 val)
{
return ((unsigned int)val + 1) << 10;
}
#endif /* _LINUX_SUNRPC_RPC_RDMA_H */ #endif /* _LINUX_SUNRPC_RPC_RDMA_H */
...@@ -86,6 +86,7 @@ struct svc_rdma_op_ctxt { ...@@ -86,6 +86,7 @@ struct svc_rdma_op_ctxt {
unsigned long flags; unsigned long flags;
enum dma_data_direction direction; enum dma_data_direction direction;
int count; int count;
unsigned int mapped_sges;
struct ib_sge sge[RPCSVC_MAXPAGES]; struct ib_sge sge[RPCSVC_MAXPAGES];
struct page *pages[RPCSVC_MAXPAGES]; struct page *pages[RPCSVC_MAXPAGES];
}; };
...@@ -136,6 +137,7 @@ struct svcxprt_rdma { ...@@ -136,6 +137,7 @@ struct svcxprt_rdma {
int sc_ord; /* RDMA read limit */ int sc_ord; /* RDMA read limit */
int sc_max_sge; int sc_max_sge;
int sc_max_sge_rd; /* max sge for read target */ int sc_max_sge_rd; /* max sge for read target */
bool sc_snd_w_inv; /* OK to use Send With Invalidate */
atomic_t sc_sq_count; /* Number of SQ WR on queue */ atomic_t sc_sq_count; /* Number of SQ WR on queue */
unsigned int sc_sq_depth; /* Depth of SQ */ unsigned int sc_sq_depth; /* Depth of SQ */
...@@ -193,6 +195,14 @@ struct svcxprt_rdma { ...@@ -193,6 +195,14 @@ struct svcxprt_rdma {
#define RPCSVC_MAXPAYLOAD_RDMA RPCSVC_MAXPAYLOAD #define RPCSVC_MAXPAYLOAD_RDMA RPCSVC_MAXPAYLOAD
/* Track DMA maps for this transport and context */
static inline void svc_rdma_count_mappings(struct svcxprt_rdma *rdma,
struct svc_rdma_op_ctxt *ctxt)
{
ctxt->mapped_sges++;
atomic_inc(&rdma->sc_dma_used);
}
/* svc_rdma_backchannel.c */ /* svc_rdma_backchannel.c */
extern int svc_rdma_handle_bc_reply(struct rpc_xprt *xprt, extern int svc_rdma_handle_bc_reply(struct rpc_xprt *xprt,
struct rpcrdma_msg *rmsgp, struct rpcrdma_msg *rmsgp,
......
...@@ -41,6 +41,7 @@ ...@@ -41,6 +41,7 @@
#define NFS4_OPEN_RESULT_CONFIRM 0x0002 #define NFS4_OPEN_RESULT_CONFIRM 0x0002
#define NFS4_OPEN_RESULT_LOCKTYPE_POSIX 0x0004 #define NFS4_OPEN_RESULT_LOCKTYPE_POSIX 0x0004
#define NFS4_OPEN_RESULT_MAY_NOTIFY_LOCK 0x0020
#define NFS4_SHARE_ACCESS_MASK 0x000F #define NFS4_SHARE_ACCESS_MASK 0x000F
#define NFS4_SHARE_ACCESS_READ 0x0001 #define NFS4_SHARE_ACCESS_READ 0x0001
......
...@@ -129,7 +129,7 @@ static int svc_rdma_bc_sendto(struct svcxprt_rdma *rdma, ...@@ -129,7 +129,7 @@ static int svc_rdma_bc_sendto(struct svcxprt_rdma *rdma,
ret = -EIO; ret = -EIO;
goto out_unmap; goto out_unmap;
} }
atomic_inc(&rdma->sc_dma_used); svc_rdma_count_mappings(rdma, ctxt);
memset(&send_wr, 0, sizeof(send_wr)); memset(&send_wr, 0, sizeof(send_wr));
ctxt->cqe.done = svc_rdma_wc_send; ctxt->cqe.done = svc_rdma_wc_send;
......
...@@ -159,7 +159,7 @@ int rdma_read_chunk_lcl(struct svcxprt_rdma *xprt, ...@@ -159,7 +159,7 @@ int rdma_read_chunk_lcl(struct svcxprt_rdma *xprt,
ctxt->sge[pno].addr); ctxt->sge[pno].addr);
if (ret) if (ret)
goto err; goto err;
atomic_inc(&xprt->sc_dma_used); svc_rdma_count_mappings(xprt, ctxt);
ctxt->sge[pno].lkey = xprt->sc_pd->local_dma_lkey; ctxt->sge[pno].lkey = xprt->sc_pd->local_dma_lkey;
ctxt->sge[pno].length = len; ctxt->sge[pno].length = len;
......
...@@ -225,6 +225,48 @@ svc_rdma_get_reply_array(struct rpcrdma_msg *rmsgp, ...@@ -225,6 +225,48 @@ svc_rdma_get_reply_array(struct rpcrdma_msg *rmsgp,
return rp_ary; return rp_ary;
} }
/* RPC-over-RDMA Version One private extension: Remote Invalidation.
* Responder's choice: requester signals it can handle Send With
* Invalidate, and responder chooses one rkey to invalidate.
*
* Find a candidate rkey to invalidate when sending a reply. Picks the
* first rkey it finds in the chunks lists.
*
* Returns zero if RPC's chunk lists are empty.
*/
static u32 svc_rdma_get_inv_rkey(struct rpcrdma_msg *rdma_argp,
struct rpcrdma_write_array *wr_ary,
struct rpcrdma_write_array *rp_ary)
{
struct rpcrdma_read_chunk *rd_ary;
struct rpcrdma_segment *arg_ch;
u32 inv_rkey;
inv_rkey = 0;
rd_ary = svc_rdma_get_read_chunk(rdma_argp);
if (rd_ary) {
inv_rkey = be32_to_cpu(rd_ary->rc_target.rs_handle);
goto out;
}
if (wr_ary && be32_to_cpu(wr_ary->wc_nchunks)) {
arg_ch = &wr_ary->wc_array[0].wc_target;
inv_rkey = be32_to_cpu(arg_ch->rs_handle);
goto out;
}
if (rp_ary && be32_to_cpu(rp_ary->wc_nchunks)) {
arg_ch = &rp_ary->wc_array[0].wc_target;
inv_rkey = be32_to_cpu(arg_ch->rs_handle);
goto out;
}
out:
dprintk("svcrdma: Send With Invalidate rkey=%08x\n", inv_rkey);
return inv_rkey;
}
/* Assumptions: /* Assumptions:
* - The specified write_len can be represented in sc_max_sge * PAGE_SIZE * - The specified write_len can be represented in sc_max_sge * PAGE_SIZE
*/ */
...@@ -280,7 +322,7 @@ static int send_write(struct svcxprt_rdma *xprt, struct svc_rqst *rqstp, ...@@ -280,7 +322,7 @@ static int send_write(struct svcxprt_rdma *xprt, struct svc_rqst *rqstp,
if (ib_dma_mapping_error(xprt->sc_cm_id->device, if (ib_dma_mapping_error(xprt->sc_cm_id->device,
sge[sge_no].addr)) sge[sge_no].addr))
goto err; goto err;
atomic_inc(&xprt->sc_dma_used); svc_rdma_count_mappings(xprt, ctxt);
sge[sge_no].lkey = xprt->sc_pd->local_dma_lkey; sge[sge_no].lkey = xprt->sc_pd->local_dma_lkey;
ctxt->count++; ctxt->count++;
sge_off = 0; sge_off = 0;
...@@ -464,7 +506,8 @@ static int send_reply(struct svcxprt_rdma *rdma, ...@@ -464,7 +506,8 @@ static int send_reply(struct svcxprt_rdma *rdma,
struct page *page, struct page *page,
struct rpcrdma_msg *rdma_resp, struct rpcrdma_msg *rdma_resp,
struct svc_rdma_req_map *vec, struct svc_rdma_req_map *vec,
int byte_count) int byte_count,
u32 inv_rkey)
{ {
struct svc_rdma_op_ctxt *ctxt; struct svc_rdma_op_ctxt *ctxt;
struct ib_send_wr send_wr; struct ib_send_wr send_wr;
...@@ -489,7 +532,7 @@ static int send_reply(struct svcxprt_rdma *rdma, ...@@ -489,7 +532,7 @@ static int send_reply(struct svcxprt_rdma *rdma,
ctxt->sge[0].length, DMA_TO_DEVICE); ctxt->sge[0].length, DMA_TO_DEVICE);
if (ib_dma_mapping_error(rdma->sc_cm_id->device, ctxt->sge[0].addr)) if (ib_dma_mapping_error(rdma->sc_cm_id->device, ctxt->sge[0].addr))
goto err; goto err;
atomic_inc(&rdma->sc_dma_used); svc_rdma_count_mappings(rdma, ctxt);
ctxt->direction = DMA_TO_DEVICE; ctxt->direction = DMA_TO_DEVICE;
...@@ -505,7 +548,7 @@ static int send_reply(struct svcxprt_rdma *rdma, ...@@ -505,7 +548,7 @@ static int send_reply(struct svcxprt_rdma *rdma,
if (ib_dma_mapping_error(rdma->sc_cm_id->device, if (ib_dma_mapping_error(rdma->sc_cm_id->device,
ctxt->sge[sge_no].addr)) ctxt->sge[sge_no].addr))
goto err; goto err;
atomic_inc(&rdma->sc_dma_used); svc_rdma_count_mappings(rdma, ctxt);
ctxt->sge[sge_no].lkey = rdma->sc_pd->local_dma_lkey; ctxt->sge[sge_no].lkey = rdma->sc_pd->local_dma_lkey;
ctxt->sge[sge_no].length = sge_bytes; ctxt->sge[sge_no].length = sge_bytes;
} }
...@@ -523,23 +566,9 @@ static int send_reply(struct svcxprt_rdma *rdma, ...@@ -523,23 +566,9 @@ static int send_reply(struct svcxprt_rdma *rdma,
ctxt->pages[page_no+1] = rqstp->rq_respages[page_no]; ctxt->pages[page_no+1] = rqstp->rq_respages[page_no];
ctxt->count++; ctxt->count++;
rqstp->rq_respages[page_no] = NULL; rqstp->rq_respages[page_no] = NULL;
/*
* If there are more pages than SGE, terminate SGE
* list so that svc_rdma_unmap_dma doesn't attempt to
* unmap garbage.
*/
if (page_no+1 >= sge_no)
ctxt->sge[page_no+1].length = 0;
} }
rqstp->rq_next_page = rqstp->rq_respages + 1; rqstp->rq_next_page = rqstp->rq_respages + 1;
/* The loop above bumps sc_dma_used for each sge. The
* xdr_buf.tail gets a separate sge, but resides in the
* same page as xdr_buf.head. Don't count it twice.
*/
if (sge_no > ctxt->count)
atomic_dec(&rdma->sc_dma_used);
if (sge_no > rdma->sc_max_sge) { if (sge_no > rdma->sc_max_sge) {
pr_err("svcrdma: Too many sges (%d)\n", sge_no); pr_err("svcrdma: Too many sges (%d)\n", sge_no);
goto err; goto err;
...@@ -549,6 +578,10 @@ static int send_reply(struct svcxprt_rdma *rdma, ...@@ -549,6 +578,10 @@ static int send_reply(struct svcxprt_rdma *rdma,
send_wr.wr_cqe = &ctxt->cqe; send_wr.wr_cqe = &ctxt->cqe;
send_wr.sg_list = ctxt->sge; send_wr.sg_list = ctxt->sge;
send_wr.num_sge = sge_no; send_wr.num_sge = sge_no;
if (inv_rkey) {
send_wr.opcode = IB_WR_SEND_WITH_INV;
send_wr.ex.invalidate_rkey = inv_rkey;
} else
send_wr.opcode = IB_WR_SEND; send_wr.opcode = IB_WR_SEND;
send_wr.send_flags = IB_SEND_SIGNALED; send_wr.send_flags = IB_SEND_SIGNALED;
...@@ -581,6 +614,7 @@ int svc_rdma_sendto(struct svc_rqst *rqstp) ...@@ -581,6 +614,7 @@ int svc_rdma_sendto(struct svc_rqst *rqstp)
int inline_bytes; int inline_bytes;
struct page *res_page; struct page *res_page;
struct svc_rdma_req_map *vec; struct svc_rdma_req_map *vec;
u32 inv_rkey;
dprintk("svcrdma: sending response for rqstp=%p\n", rqstp); dprintk("svcrdma: sending response for rqstp=%p\n", rqstp);
...@@ -591,6 +625,10 @@ int svc_rdma_sendto(struct svc_rqst *rqstp) ...@@ -591,6 +625,10 @@ int svc_rdma_sendto(struct svc_rqst *rqstp)
wr_ary = svc_rdma_get_write_array(rdma_argp); wr_ary = svc_rdma_get_write_array(rdma_argp);
rp_ary = svc_rdma_get_reply_array(rdma_argp, wr_ary); rp_ary = svc_rdma_get_reply_array(rdma_argp, wr_ary);
inv_rkey = 0;
if (rdma->sc_snd_w_inv)
inv_rkey = svc_rdma_get_inv_rkey(rdma_argp, wr_ary, rp_ary);
/* Build an req vec for the XDR */ /* Build an req vec for the XDR */
vec = svc_rdma_get_req_map(rdma); vec = svc_rdma_get_req_map(rdma);
ret = svc_rdma_map_xdr(rdma, &rqstp->rq_res, vec, wr_ary != NULL); ret = svc_rdma_map_xdr(rdma, &rqstp->rq_res, vec, wr_ary != NULL);
...@@ -633,9 +671,9 @@ int svc_rdma_sendto(struct svc_rqst *rqstp) ...@@ -633,9 +671,9 @@ int svc_rdma_sendto(struct svc_rqst *rqstp)
goto err1; goto err1;
ret = send_reply(rdma, rqstp, res_page, rdma_resp, vec, ret = send_reply(rdma, rqstp, res_page, rdma_resp, vec,
inline_bytes); inline_bytes, inv_rkey);
if (ret < 0) if (ret < 0)
goto err1; goto err0;
svc_rdma_put_req_map(rdma, vec); svc_rdma_put_req_map(rdma, vec);
dprintk("svcrdma: send_reply returns %d\n", ret); dprintk("svcrdma: send_reply returns %d\n", ret);
...@@ -692,7 +730,7 @@ void svc_rdma_send_error(struct svcxprt_rdma *xprt, struct rpcrdma_msg *rmsgp, ...@@ -692,7 +730,7 @@ void svc_rdma_send_error(struct svcxprt_rdma *xprt, struct rpcrdma_msg *rmsgp,
svc_rdma_put_context(ctxt, 1); svc_rdma_put_context(ctxt, 1);
return; return;
} }
atomic_inc(&xprt->sc_dma_used); svc_rdma_count_mappings(xprt, ctxt);
/* Prepare SEND WR */ /* Prepare SEND WR */
memset(&err_wr, 0, sizeof(err_wr)); memset(&err_wr, 0, sizeof(err_wr));
......
...@@ -198,6 +198,7 @@ struct svc_rdma_op_ctxt *svc_rdma_get_context(struct svcxprt_rdma *xprt) ...@@ -198,6 +198,7 @@ struct svc_rdma_op_ctxt *svc_rdma_get_context(struct svcxprt_rdma *xprt)
out: out:
ctxt->count = 0; ctxt->count = 0;
ctxt->mapped_sges = 0;
ctxt->frmr = NULL; ctxt->frmr = NULL;
return ctxt; return ctxt;
...@@ -221,22 +222,27 @@ struct svc_rdma_op_ctxt *svc_rdma_get_context(struct svcxprt_rdma *xprt) ...@@ -221,22 +222,27 @@ struct svc_rdma_op_ctxt *svc_rdma_get_context(struct svcxprt_rdma *xprt)
void svc_rdma_unmap_dma(struct svc_rdma_op_ctxt *ctxt) void svc_rdma_unmap_dma(struct svc_rdma_op_ctxt *ctxt)
{ {
struct svcxprt_rdma *xprt = ctxt->xprt; struct svcxprt_rdma *xprt = ctxt->xprt;
int i; struct ib_device *device = xprt->sc_cm_id->device;
for (i = 0; i < ctxt->count && ctxt->sge[i].length; i++) { u32 lkey = xprt->sc_pd->local_dma_lkey;
unsigned int i, count;
for (count = 0, i = 0; i < ctxt->mapped_sges; i++) {
/* /*
* Unmap the DMA addr in the SGE if the lkey matches * Unmap the DMA addr in the SGE if the lkey matches
* the local_dma_lkey, otherwise, ignore it since it is * the local_dma_lkey, otherwise, ignore it since it is
* an FRMR lkey and will be unmapped later when the * an FRMR lkey and will be unmapped later when the
* last WR that uses it completes. * last WR that uses it completes.
*/ */
if (ctxt->sge[i].lkey == xprt->sc_pd->local_dma_lkey) { if (ctxt->sge[i].lkey == lkey) {
atomic_dec(&xprt->sc_dma_used); count++;
ib_dma_unmap_page(xprt->sc_cm_id->device, ib_dma_unmap_page(device,
ctxt->sge[i].addr, ctxt->sge[i].addr,
ctxt->sge[i].length, ctxt->sge[i].length,
ctxt->direction); ctxt->direction);
} }
} }
ctxt->mapped_sges = 0;
atomic_sub(count, &xprt->sc_dma_used);
} }
void svc_rdma_put_context(struct svc_rdma_op_ctxt *ctxt, int free_pages) void svc_rdma_put_context(struct svc_rdma_op_ctxt *ctxt, int free_pages)
...@@ -600,7 +606,7 @@ int svc_rdma_post_recv(struct svcxprt_rdma *xprt, gfp_t flags) ...@@ -600,7 +606,7 @@ int svc_rdma_post_recv(struct svcxprt_rdma *xprt, gfp_t flags)
DMA_FROM_DEVICE); DMA_FROM_DEVICE);
if (ib_dma_mapping_error(xprt->sc_cm_id->device, pa)) if (ib_dma_mapping_error(xprt->sc_cm_id->device, pa))
goto err_put_ctxt; goto err_put_ctxt;
atomic_inc(&xprt->sc_dma_used); svc_rdma_count_mappings(xprt, ctxt);
ctxt->sge[sge_no].addr = pa; ctxt->sge[sge_no].addr = pa;
ctxt->sge[sge_no].length = PAGE_SIZE; ctxt->sge[sge_no].length = PAGE_SIZE;
ctxt->sge[sge_no].lkey = xprt->sc_pd->local_dma_lkey; ctxt->sge[sge_no].lkey = xprt->sc_pd->local_dma_lkey;
...@@ -642,6 +648,26 @@ int svc_rdma_repost_recv(struct svcxprt_rdma *xprt, gfp_t flags) ...@@ -642,6 +648,26 @@ int svc_rdma_repost_recv(struct svcxprt_rdma *xprt, gfp_t flags)
return ret; return ret;
} }
static void
svc_rdma_parse_connect_private(struct svcxprt_rdma *newxprt,
struct rdma_conn_param *param)
{
const struct rpcrdma_connect_private *pmsg = param->private_data;
if (pmsg &&
pmsg->cp_magic == rpcrdma_cmp_magic &&
pmsg->cp_version == RPCRDMA_CMP_VERSION) {
newxprt->sc_snd_w_inv = pmsg->cp_flags &
RPCRDMA_CMP_F_SND_W_INV_OK;
dprintk("svcrdma: client send_size %u, recv_size %u "
"remote inv %ssupported\n",
rpcrdma_decode_buffer_size(pmsg->cp_send_size),
rpcrdma_decode_buffer_size(pmsg->cp_recv_size),
newxprt->sc_snd_w_inv ? "" : "un");
}
}
/* /*
* This function handles the CONNECT_REQUEST event on a listening * This function handles the CONNECT_REQUEST event on a listening
* endpoint. It is passed the cma_id for the _new_ connection. The context in * endpoint. It is passed the cma_id for the _new_ connection. The context in
...@@ -653,7 +679,8 @@ int svc_rdma_repost_recv(struct svcxprt_rdma *xprt, gfp_t flags) ...@@ -653,7 +679,8 @@ int svc_rdma_repost_recv(struct svcxprt_rdma *xprt, gfp_t flags)
* will call the recvfrom method on the listen xprt which will accept the new * will call the recvfrom method on the listen xprt which will accept the new
* connection. * connection.
*/ */
static void handle_connect_req(struct rdma_cm_id *new_cma_id, size_t client_ird) static void handle_connect_req(struct rdma_cm_id *new_cma_id,
struct rdma_conn_param *param)
{ {
struct svcxprt_rdma *listen_xprt = new_cma_id->context; struct svcxprt_rdma *listen_xprt = new_cma_id->context;
struct svcxprt_rdma *newxprt; struct svcxprt_rdma *newxprt;
...@@ -669,9 +696,10 @@ static void handle_connect_req(struct rdma_cm_id *new_cma_id, size_t client_ird) ...@@ -669,9 +696,10 @@ static void handle_connect_req(struct rdma_cm_id *new_cma_id, size_t client_ird)
new_cma_id->context = newxprt; new_cma_id->context = newxprt;
dprintk("svcrdma: Creating newxprt=%p, cm_id=%p, listenxprt=%p\n", dprintk("svcrdma: Creating newxprt=%p, cm_id=%p, listenxprt=%p\n",
newxprt, newxprt->sc_cm_id, listen_xprt); newxprt, newxprt->sc_cm_id, listen_xprt);
svc_rdma_parse_connect_private(newxprt, param);
/* Save client advertised inbound read limit for use later in accept. */ /* Save client advertised inbound read limit for use later in accept. */
newxprt->sc_ord = client_ird; newxprt->sc_ord = param->initiator_depth;
/* Set the local and remote addresses in the transport */ /* Set the local and remote addresses in the transport */
sa = (struct sockaddr *)&newxprt->sc_cm_id->route.addr.dst_addr; sa = (struct sockaddr *)&newxprt->sc_cm_id->route.addr.dst_addr;
...@@ -706,8 +734,7 @@ static int rdma_listen_handler(struct rdma_cm_id *cma_id, ...@@ -706,8 +734,7 @@ static int rdma_listen_handler(struct rdma_cm_id *cma_id,
dprintk("svcrdma: Connect request on cma_id=%p, xprt = %p, " dprintk("svcrdma: Connect request on cma_id=%p, xprt = %p, "
"event = %s (%d)\n", cma_id, cma_id->context, "event = %s (%d)\n", cma_id, cma_id->context,
rdma_event_msg(event->event), event->event); rdma_event_msg(event->event), event->event);
handle_connect_req(cma_id, handle_connect_req(cma_id, &event->param.conn);
event->param.conn.initiator_depth);
break; break;
case RDMA_CM_EVENT_ESTABLISHED: case RDMA_CM_EVENT_ESTABLISHED:
...@@ -941,6 +968,7 @@ static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt) ...@@ -941,6 +968,7 @@ static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt)
struct svcxprt_rdma *listen_rdma; struct svcxprt_rdma *listen_rdma;
struct svcxprt_rdma *newxprt = NULL; struct svcxprt_rdma *newxprt = NULL;
struct rdma_conn_param conn_param; struct rdma_conn_param conn_param;
struct rpcrdma_connect_private pmsg;
struct ib_qp_init_attr qp_attr; struct ib_qp_init_attr qp_attr;
struct ib_device *dev; struct ib_device *dev;
unsigned int i; unsigned int i;
...@@ -1070,7 +1098,8 @@ static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt) ...@@ -1070,7 +1098,8 @@ static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt)
dev->attrs.max_fast_reg_page_list_len; dev->attrs.max_fast_reg_page_list_len;
newxprt->sc_dev_caps |= SVCRDMA_DEVCAP_FAST_REG; newxprt->sc_dev_caps |= SVCRDMA_DEVCAP_FAST_REG;
newxprt->sc_reader = rdma_read_chunk_frmr; newxprt->sc_reader = rdma_read_chunk_frmr;
} } else
newxprt->sc_snd_w_inv = false;
/* /*
* Determine if a DMA MR is required and if so, what privs are required * Determine if a DMA MR is required and if so, what privs are required
...@@ -1094,11 +1123,20 @@ static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt) ...@@ -1094,11 +1123,20 @@ static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt)
/* Swap out the handler */ /* Swap out the handler */
newxprt->sc_cm_id->event_handler = rdma_cma_handler; newxprt->sc_cm_id->event_handler = rdma_cma_handler;
/* Construct RDMA-CM private message */
pmsg.cp_magic = rpcrdma_cmp_magic;
pmsg.cp_version = RPCRDMA_CMP_VERSION;
pmsg.cp_flags = 0;
pmsg.cp_send_size = pmsg.cp_recv_size =
rpcrdma_encode_buffer_size(newxprt->sc_max_req_size);
/* Accept Connection */ /* Accept Connection */
set_bit(RDMAXPRT_CONN_PENDING, &newxprt->sc_flags); set_bit(RDMAXPRT_CONN_PENDING, &newxprt->sc_flags);
memset(&conn_param, 0, sizeof conn_param); memset(&conn_param, 0, sizeof conn_param);
conn_param.responder_resources = 0; conn_param.responder_resources = 0;
conn_param.initiator_depth = newxprt->sc_ord; conn_param.initiator_depth = newxprt->sc_ord;
conn_param.private_data = &pmsg;
conn_param.private_data_len = sizeof(pmsg);
ret = rdma_accept(newxprt->sc_cm_id, &conn_param); ret = rdma_accept(newxprt->sc_cm_id, &conn_param);
if (ret) { if (ret) {
dprintk("svcrdma: failed to accept new connection, ret=%d\n", dprintk("svcrdma: failed to accept new connection, ret=%d\n",
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment