Commit f1517df8 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'nfsd-4.16' of git://linux-nfs.org/~bfields/linux

Pull nfsd update from Bruce Fields:
 "A fairly small update this time around. Some cleanup, RDMA fixes,
  overlayfs fixes, and a fix for an NFSv4 state bug.

  The bigger deal for nfsd this time around was Jeff Layton's
  already-merged i_version patches"

* tag 'nfsd-4.16' of git://linux-nfs.org/~bfields/linux:
  svcrdma: Fix Read chunk round-up
  NFSD: hide unused svcxdr_dupstr()
  nfsd: store stat times in fill_pre_wcc() instead of inode times
  nfsd: encode stat->mtime for getattr instead of inode->i_mtime
  nfsd: return RESOURCE not GARBAGE_ARGS on too many ops
  nfsd4: don't set lock stateid's sc_type to CLOSED
  nfsd: Detect unhashed stids in nfsd4_verify_open_stid()
  sunrpc: remove dead code in svc_sock_setbufsize
  svcrdma: Post Receives in the Receive completion handler
  nfsd4: permit layoutget of executable-only files
  lockd: convert nlm_rqst.a_count from atomic_t to refcount_t
  lockd: convert nlm_lockowner.count from atomic_t to refcount_t
  lockd: convert nsm_handle.sm_count from atomic_t to refcount_t
parents 9d21874d 175e0310
...@@ -1554,9 +1554,9 @@ int __break_lease(struct inode *inode, unsigned int mode, unsigned int type) ...@@ -1554,9 +1554,9 @@ int __break_lease(struct inode *inode, unsigned int mode, unsigned int type)
EXPORT_SYMBOL(__break_lease); EXPORT_SYMBOL(__break_lease);
/** /**
* lease_get_mtime - get the last modified time of an inode * lease_get_mtime - update modified time of an inode with exclusive lease
* @inode: the inode * @inode: the inode
* @time: pointer to a timespec which will contain the last modified time * @time: pointer to a timespec which contains the last modified time
* *
* This is to force NFS clients to flush their caches for files with * This is to force NFS clients to flush their caches for files with
* exclusive leases. The justification is that if someone has an * exclusive leases. The justification is that if someone has an
...@@ -1580,8 +1580,6 @@ void lease_get_mtime(struct inode *inode, struct timespec *time) ...@@ -1580,8 +1580,6 @@ void lease_get_mtime(struct inode *inode, struct timespec *time)
if (has_lease) if (has_lease)
*time = current_time(inode); *time = current_time(inode);
else
*time = inode->i_mtime;
} }
EXPORT_SYMBOL(lease_get_mtime); EXPORT_SYMBOL(lease_get_mtime);
......
...@@ -250,6 +250,34 @@ encode_wcc_data(struct svc_rqst *rqstp, __be32 *p, struct svc_fh *fhp) ...@@ -250,6 +250,34 @@ encode_wcc_data(struct svc_rqst *rqstp, __be32 *p, struct svc_fh *fhp)
return encode_post_op_attr(rqstp, p, fhp); return encode_post_op_attr(rqstp, p, fhp);
} }
/*
* Fill in the pre_op attr for the wcc data
*/
void fill_pre_wcc(struct svc_fh *fhp)
{
struct inode *inode;
struct kstat stat;
__be32 err;
if (fhp->fh_pre_saved)
return;
inode = d_inode(fhp->fh_dentry);
err = fh_getattr(fhp, &stat);
if (err) {
/* Grab the times from inode anyway */
stat.mtime = inode->i_mtime;
stat.ctime = inode->i_ctime;
stat.size = inode->i_size;
}
fhp->fh_pre_mtime = stat.mtime;
fhp->fh_pre_ctime = stat.ctime;
fhp->fh_pre_size = stat.size;
fhp->fh_pre_change = nfsd4_change_attribute(&stat, inode);
fhp->fh_pre_saved = true;
}
/* /*
* Fill in the post_op attr for the wcc data * Fill in the post_op attr for the wcc data
*/ */
...@@ -261,7 +289,8 @@ void fill_post_wcc(struct svc_fh *fhp) ...@@ -261,7 +289,8 @@ void fill_post_wcc(struct svc_fh *fhp)
printk("nfsd: inode locked twice during operation.\n"); printk("nfsd: inode locked twice during operation.\n");
err = fh_getattr(fhp, &fhp->fh_post_attr); err = fh_getattr(fhp, &fhp->fh_post_attr);
fhp->fh_post_change = nfsd4_change_attribute(d_inode(fhp->fh_dentry)); fhp->fh_post_change = nfsd4_change_attribute(&fhp->fh_post_attr,
d_inode(fhp->fh_dentry));
if (err) { if (err) {
fhp->fh_post_saved = false; fhp->fh_post_saved = false;
/* Grab the ctime anyway - set_change_info might use it */ /* Grab the ctime anyway - set_change_info might use it */
......
...@@ -1363,14 +1363,14 @@ nfsd4_layoutget(struct svc_rqst *rqstp, ...@@ -1363,14 +1363,14 @@ nfsd4_layoutget(struct svc_rqst *rqstp,
const struct nfsd4_layout_ops *ops; const struct nfsd4_layout_ops *ops;
struct nfs4_layout_stateid *ls; struct nfs4_layout_stateid *ls;
__be32 nfserr; __be32 nfserr;
int accmode; int accmode = NFSD_MAY_READ_IF_EXEC;
switch (lgp->lg_seg.iomode) { switch (lgp->lg_seg.iomode) {
case IOMODE_READ: case IOMODE_READ:
accmode = NFSD_MAY_READ; accmode |= NFSD_MAY_READ;
break; break;
case IOMODE_RW: case IOMODE_RW:
accmode = NFSD_MAY_READ | NFSD_MAY_WRITE; accmode |= NFSD_MAY_READ | NFSD_MAY_WRITE;
break; break;
default: default:
dprintk("%s: invalid iomode %d\n", dprintk("%s: invalid iomode %d\n",
...@@ -1703,6 +1703,9 @@ nfsd4_proc_compound(struct svc_rqst *rqstp) ...@@ -1703,6 +1703,9 @@ nfsd4_proc_compound(struct svc_rqst *rqstp)
status = nfserr_minor_vers_mismatch; status = nfserr_minor_vers_mismatch;
if (nfsd_minorversion(args->minorversion, NFSD_TEST) <= 0) if (nfsd_minorversion(args->minorversion, NFSD_TEST) <= 0)
goto out; goto out;
status = nfserr_resource;
if (args->opcnt > NFSD_MAX_OPS_PER_COMPOUND)
goto out;
status = nfs41_check_op_ordering(args); status = nfs41_check_op_ordering(args);
if (status) { if (status) {
......
...@@ -3590,6 +3590,7 @@ nfsd4_verify_open_stid(struct nfs4_stid *s) ...@@ -3590,6 +3590,7 @@ nfsd4_verify_open_stid(struct nfs4_stid *s)
switch (s->sc_type) { switch (s->sc_type) {
default: default:
break; break;
case 0:
case NFS4_CLOSED_STID: case NFS4_CLOSED_STID:
case NFS4_CLOSED_DELEG_STID: case NFS4_CLOSED_DELEG_STID:
ret = nfserr_bad_stateid; ret = nfserr_bad_stateid;
...@@ -5182,7 +5183,6 @@ nfsd4_free_lock_stateid(stateid_t *stateid, struct nfs4_stid *s) ...@@ -5182,7 +5183,6 @@ nfsd4_free_lock_stateid(stateid_t *stateid, struct nfs4_stid *s)
lockowner(stp->st_stateowner))) lockowner(stp->st_stateowner)))
goto out; goto out;
stp->st_stid.sc_type = NFS4_CLOSED_STID;
release_lock_stateid(stp); release_lock_stateid(stp);
ret = nfs_ok; ret = nfs_ok;
...@@ -6078,10 +6078,8 @@ nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, ...@@ -6078,10 +6078,8 @@ nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
* If this is a new, never-before-used stateid, and we are * If this is a new, never-before-used stateid, and we are
* returning an error, then just go ahead and release it. * returning an error, then just go ahead and release it.
*/ */
if (status && new) { if (status && new)
lock_stp->st_stid.sc_type = NFS4_CLOSED_STID;
release_lock_stateid(lock_stp); release_lock_stateid(lock_stp);
}
mutex_unlock(&lock_stp->st_mutex); mutex_unlock(&lock_stp->st_mutex);
......
...@@ -455,8 +455,8 @@ nfsd4_decode_fattr(struct nfsd4_compoundargs *argp, u32 *bmval, ...@@ -455,8 +455,8 @@ nfsd4_decode_fattr(struct nfsd4_compoundargs *argp, u32 *bmval,
} }
label->len = 0; label->len = 0;
#ifdef CONFIG_NFSD_V4_SECURITY_LABEL if (IS_ENABLED(CONFIG_NFSD_V4_SECURITY_LABEL) &&
if (bmval[2] & FATTR4_WORD2_SECURITY_LABEL) { bmval[2] & FATTR4_WORD2_SECURITY_LABEL) {
READ_BUF(4); READ_BUF(4);
len += 4; len += 4;
dummy32 = be32_to_cpup(p++); /* lfs: we don't use it */ dummy32 = be32_to_cpup(p++); /* lfs: we don't use it */
...@@ -476,7 +476,6 @@ nfsd4_decode_fattr(struct nfsd4_compoundargs *argp, u32 *bmval, ...@@ -476,7 +476,6 @@ nfsd4_decode_fattr(struct nfsd4_compoundargs *argp, u32 *bmval,
if (!label->data) if (!label->data)
return nfserr_jukebox; return nfserr_jukebox;
} }
#endif
if (bmval[2] & FATTR4_WORD2_MODE_UMASK) { if (bmval[2] & FATTR4_WORD2_MODE_UMASK) {
if (!umask) if (!umask)
goto xdr_error; goto xdr_error;
...@@ -1918,8 +1917,13 @@ nfsd4_decode_compound(struct nfsd4_compoundargs *argp) ...@@ -1918,8 +1917,13 @@ nfsd4_decode_compound(struct nfsd4_compoundargs *argp)
if (argp->taglen > NFSD4_MAX_TAGLEN) if (argp->taglen > NFSD4_MAX_TAGLEN)
goto xdr_error; goto xdr_error;
if (argp->opcnt > 100) /*
goto xdr_error; * NFS4ERR_RESOURCE is a more helpful error than GARBAGE_ARGS
* here, so we return success at the xdr level so that
* nfsd4_proc can handle this is an NFS-level error.
*/
if (argp->opcnt > NFSD_MAX_OPS_PER_COMPOUND)
return 0;
if (argp->opcnt > ARRAY_SIZE(argp->iops)) { if (argp->opcnt > ARRAY_SIZE(argp->iops)) {
argp->ops = kzalloc(argp->opcnt * sizeof(*argp->ops), GFP_KERNEL); argp->ops = kzalloc(argp->opcnt * sizeof(*argp->ops), GFP_KERNEL);
...@@ -1991,7 +1995,7 @@ static __be32 *encode_change(__be32 *p, struct kstat *stat, struct inode *inode, ...@@ -1991,7 +1995,7 @@ static __be32 *encode_change(__be32 *p, struct kstat *stat, struct inode *inode,
*p++ = cpu_to_be32(convert_to_wallclock(exp->cd->flush_time)); *p++ = cpu_to_be32(convert_to_wallclock(exp->cd->flush_time));
*p++ = 0; *p++ = 0;
} else if (IS_I_VERSION(inode)) { } else if (IS_I_VERSION(inode)) {
p = xdr_encode_hyper(p, nfsd4_change_attribute(inode)); p = xdr_encode_hyper(p, nfsd4_change_attribute(stat, inode));
} else { } else {
*p++ = cpu_to_be32(stat->ctime.tv_sec); *p++ = cpu_to_be32(stat->ctime.tv_sec);
*p++ = cpu_to_be32(stat->ctime.tv_nsec); *p++ = cpu_to_be32(stat->ctime.tv_nsec);
......
...@@ -253,36 +253,20 @@ fh_clear_wcc(struct svc_fh *fhp) ...@@ -253,36 +253,20 @@ fh_clear_wcc(struct svc_fh *fhp)
* By using both ctime and the i_version counter we guarantee that as * By using both ctime and the i_version counter we guarantee that as
* long as time doesn't go backwards we never reuse an old value. * long as time doesn't go backwards we never reuse an old value.
*/ */
static inline u64 nfsd4_change_attribute(struct inode *inode) static inline u64 nfsd4_change_attribute(struct kstat *stat,
struct inode *inode)
{ {
u64 chattr; u64 chattr;
chattr = inode->i_ctime.tv_sec; chattr = stat->ctime.tv_sec;
chattr <<= 30; chattr <<= 30;
chattr += inode->i_ctime.tv_nsec; chattr += stat->ctime.tv_nsec;
chattr += inode_query_iversion(inode); chattr += inode_query_iversion(inode);
return chattr; return chattr;
} }
/* extern void fill_pre_wcc(struct svc_fh *fhp);
* Fill in the pre_op attr for the wcc data extern void fill_post_wcc(struct svc_fh *fhp);
*/
static inline void
fill_pre_wcc(struct svc_fh *fhp)
{
struct inode *inode;
inode = d_inode(fhp->fh_dentry);
if (!fhp->fh_pre_saved) {
fhp->fh_pre_mtime = inode->i_mtime;
fhp->fh_pre_ctime = inode->i_ctime;
fhp->fh_pre_size = inode->i_size;
fhp->fh_pre_change = nfsd4_change_attribute(inode);
fhp->fh_pre_saved = true;
}
}
extern void fill_post_wcc(struct svc_fh *);
#else #else
#define fh_clear_wcc(ignored) #define fh_clear_wcc(ignored)
#define fill_pre_wcc(ignored) #define fill_pre_wcc(ignored)
......
...@@ -188,6 +188,7 @@ encode_fattr(struct svc_rqst *rqstp, __be32 *p, struct svc_fh *fhp, ...@@ -188,6 +188,7 @@ encode_fattr(struct svc_rqst *rqstp, __be32 *p, struct svc_fh *fhp,
*p++ = htonl((u32) stat->ino); *p++ = htonl((u32) stat->ino);
*p++ = htonl((u32) stat->atime.tv_sec); *p++ = htonl((u32) stat->atime.tv_sec);
*p++ = htonl(stat->atime.tv_nsec ? stat->atime.tv_nsec / 1000 : 0); *p++ = htonl(stat->atime.tv_nsec ? stat->atime.tv_nsec / 1000 : 0);
time = stat->mtime;
lease_get_mtime(d_inode(dentry), &time); lease_get_mtime(d_inode(dentry), &time);
*p++ = htonl((u32) time.tv_sec); *p++ = htonl((u32) time.tv_sec);
*p++ = htonl(time.tv_nsec ? time.tv_nsec / 1000 : 0); *p++ = htonl(time.tv_nsec ? time.tv_nsec / 1000 : 0);
......
...@@ -185,8 +185,6 @@ extern void svc_rdma_wc_reg(struct ib_cq *, struct ib_wc *); ...@@ -185,8 +185,6 @@ extern void svc_rdma_wc_reg(struct ib_cq *, struct ib_wc *);
extern void svc_rdma_wc_read(struct ib_cq *, struct ib_wc *); extern void svc_rdma_wc_read(struct ib_cq *, struct ib_wc *);
extern void svc_rdma_wc_inv(struct ib_cq *, struct ib_wc *); extern void svc_rdma_wc_inv(struct ib_cq *, struct ib_wc *);
extern int svc_rdma_send(struct svcxprt_rdma *, struct ib_send_wr *); extern int svc_rdma_send(struct svcxprt_rdma *, struct ib_send_wr *);
extern int svc_rdma_post_recv(struct svcxprt_rdma *, gfp_t);
extern int svc_rdma_repost_recv(struct svcxprt_rdma *, gfp_t);
extern int svc_rdma_create_listen(struct svc_serv *, int, struct sockaddr *); extern int svc_rdma_create_listen(struct svc_serv *, int, struct sockaddr *);
extern struct svc_rdma_op_ctxt *svc_rdma_get_context(struct svcxprt_rdma *); extern struct svc_rdma_op_ctxt *svc_rdma_get_context(struct svcxprt_rdma *);
extern void svc_rdma_put_context(struct svc_rdma_op_ctxt *, int); extern void svc_rdma_put_context(struct svc_rdma_op_ctxt *, int);
......
...@@ -384,25 +384,11 @@ static int svc_partial_recvfrom(struct svc_rqst *rqstp, ...@@ -384,25 +384,11 @@ static int svc_partial_recvfrom(struct svc_rqst *rqstp,
static void svc_sock_setbufsize(struct socket *sock, unsigned int snd, static void svc_sock_setbufsize(struct socket *sock, unsigned int snd,
unsigned int rcv) unsigned int rcv)
{ {
#if 0
mm_segment_t oldfs;
oldfs = get_fs(); set_fs(KERNEL_DS);
sock_setsockopt(sock, SOL_SOCKET, SO_SNDBUF,
(char*)&snd, sizeof(snd));
sock_setsockopt(sock, SOL_SOCKET, SO_RCVBUF,
(char*)&rcv, sizeof(rcv));
#else
/* sock_setsockopt limits use to sysctl_?mem_max,
* which isn't acceptable. Until that is made conditional
* on not having CAP_SYS_RESOURCE or similar, we go direct...
* DaveM said I could!
*/
lock_sock(sock->sk); lock_sock(sock->sk);
sock->sk->sk_sndbuf = snd * 2; sock->sk->sk_sndbuf = snd * 2;
sock->sk->sk_rcvbuf = rcv * 2; sock->sk->sk_rcvbuf = rcv * 2;
sock->sk->sk_write_space(sock->sk); sock->sk->sk_write_space(sock->sk);
release_sock(sock->sk); release_sock(sock->sk);
#endif
} }
static int svc_sock_secure_port(struct svc_rqst *rqstp) static int svc_sock_secure_port(struct svc_rqst *rqstp)
......
...@@ -95,7 +95,6 @@ int svc_rdma_handle_bc_reply(struct rpc_xprt *xprt, __be32 *rdma_resp, ...@@ -95,7 +95,6 @@ int svc_rdma_handle_bc_reply(struct rpc_xprt *xprt, __be32 *rdma_resp,
out_notfound: out_notfound:
dprintk("svcrdma: unrecognized bc reply: xprt=%p, xid=%08x\n", dprintk("svcrdma: unrecognized bc reply: xprt=%p, xid=%08x\n",
xprt, be32_to_cpu(xid)); xprt, be32_to_cpu(xid));
goto out_unlock; goto out_unlock;
} }
...@@ -129,10 +128,6 @@ static int svc_rdma_bc_sendto(struct svcxprt_rdma *rdma, ...@@ -129,10 +128,6 @@ static int svc_rdma_bc_sendto(struct svcxprt_rdma *rdma,
if (ret < 0) if (ret < 0)
goto out_err; goto out_err;
ret = svc_rdma_repost_recv(rdma, GFP_NOIO);
if (ret)
goto out_err;
/* Bump page refcnt so Send completion doesn't release /* Bump page refcnt so Send completion doesn't release
* the rq_buffer before all retransmits are complete. * the rq_buffer before all retransmits are complete.
*/ */
......
...@@ -400,10 +400,6 @@ static void svc_rdma_send_error(struct svcxprt_rdma *xprt, ...@@ -400,10 +400,6 @@ static void svc_rdma_send_error(struct svcxprt_rdma *xprt,
struct page *page; struct page *page;
int ret; int ret;
ret = svc_rdma_repost_recv(xprt, GFP_KERNEL);
if (ret)
return;
page = alloc_page(GFP_KERNEL); page = alloc_page(GFP_KERNEL);
if (!page) if (!page)
return; return;
...@@ -554,8 +550,6 @@ int svc_rdma_recvfrom(struct svc_rqst *rqstp) ...@@ -554,8 +550,6 @@ int svc_rdma_recvfrom(struct svc_rqst *rqstp)
ret = svc_rdma_handle_bc_reply(xprt->xpt_bc_xprt, p, ret = svc_rdma_handle_bc_reply(xprt->xpt_bc_xprt, p,
&rqstp->rq_arg); &rqstp->rq_arg);
svc_rdma_put_context(ctxt, 0); svc_rdma_put_context(ctxt, 0);
if (ret)
goto repost;
return ret; return ret;
} }
...@@ -590,6 +584,5 @@ int svc_rdma_recvfrom(struct svc_rqst *rqstp) ...@@ -590,6 +584,5 @@ int svc_rdma_recvfrom(struct svc_rqst *rqstp)
out_drop: out_drop:
svc_rdma_put_context(ctxt, 1); svc_rdma_put_context(ctxt, 1);
repost: return 0;
return svc_rdma_repost_recv(rdma_xprt, GFP_KERNEL);
} }
...@@ -727,12 +727,16 @@ static int svc_rdma_build_normal_read_chunk(struct svc_rqst *rqstp, ...@@ -727,12 +727,16 @@ static int svc_rdma_build_normal_read_chunk(struct svc_rqst *rqstp,
head->arg.head[0].iov_len - info->ri_position; head->arg.head[0].iov_len - info->ri_position;
head->arg.head[0].iov_len = info->ri_position; head->arg.head[0].iov_len = info->ri_position;
/* Read chunk may need XDR roundup (see RFC 5666, s. 3.7). /* Read chunk may need XDR roundup (see RFC 8166, s. 3.4.5.2).
* *
* NFSv2/3 write decoders need the length of the tail to * If the client already rounded up the chunk length, the
* contain the size of the roundup padding. * length does not change. Otherwise, the length of the page
* list is increased to include XDR round-up.
*
* Currently these chunks always start at page offset 0,
* thus the rounded-up length never crosses a page boundary.
*/ */
head->arg.tail[0].iov_len += 4 - (info->ri_chunklen & 3); info->ri_chunklen = XDR_QUADLEN(info->ri_chunklen) << 2;
head->arg.page_len = info->ri_chunklen; head->arg.page_len = info->ri_chunklen;
head->arg.len += info->ri_chunklen; head->arg.len += info->ri_chunklen;
......
...@@ -674,9 +674,6 @@ int svc_rdma_sendto(struct svc_rqst *rqstp) ...@@ -674,9 +674,6 @@ int svc_rdma_sendto(struct svc_rqst *rqstp)
svc_rdma_xdr_encode_reply_chunk(rdma_resp, rp_ch, ret); svc_rdma_xdr_encode_reply_chunk(rdma_resp, rp_ch, ret);
} }
ret = svc_rdma_post_recv(rdma, GFP_KERNEL);
if (ret)
goto err1;
ret = svc_rdma_send_reply_msg(rdma, rdma_argp, rdma_resp, rqstp, ret = svc_rdma_send_reply_msg(rdma, rdma_argp, rdma_resp, rqstp,
wr_lst, rp_ch); wr_lst, rp_ch);
if (ret < 0) if (ret < 0)
...@@ -687,9 +684,6 @@ int svc_rdma_sendto(struct svc_rqst *rqstp) ...@@ -687,9 +684,6 @@ int svc_rdma_sendto(struct svc_rqst *rqstp)
if (ret != -E2BIG && ret != -EINVAL) if (ret != -E2BIG && ret != -EINVAL)
goto err1; goto err1;
ret = svc_rdma_post_recv(rdma, GFP_KERNEL);
if (ret)
goto err1;
ret = svc_rdma_send_error_msg(rdma, rdma_resp, rqstp); ret = svc_rdma_send_error_msg(rdma, rdma_resp, rqstp);
if (ret < 0) if (ret < 0)
goto err0; goto err0;
......
...@@ -58,6 +58,7 @@ ...@@ -58,6 +58,7 @@
#define RPCDBG_FACILITY RPCDBG_SVCXPRT #define RPCDBG_FACILITY RPCDBG_SVCXPRT
static int svc_rdma_post_recv(struct svcxprt_rdma *xprt);
static struct svcxprt_rdma *rdma_create_xprt(struct svc_serv *, int); static struct svcxprt_rdma *rdma_create_xprt(struct svc_serv *, int);
static struct svc_xprt *svc_rdma_create(struct svc_serv *serv, static struct svc_xprt *svc_rdma_create(struct svc_serv *serv,
struct net *net, struct net *net,
...@@ -320,6 +321,8 @@ static void svc_rdma_wc_receive(struct ib_cq *cq, struct ib_wc *wc) ...@@ -320,6 +321,8 @@ static void svc_rdma_wc_receive(struct ib_cq *cq, struct ib_wc *wc)
list_add_tail(&ctxt->list, &xprt->sc_rq_dto_q); list_add_tail(&ctxt->list, &xprt->sc_rq_dto_q);
spin_unlock(&xprt->sc_rq_dto_lock); spin_unlock(&xprt->sc_rq_dto_lock);
svc_rdma_post_recv(xprt);
set_bit(XPT_DATA, &xprt->sc_xprt.xpt_flags); set_bit(XPT_DATA, &xprt->sc_xprt.xpt_flags);
if (test_bit(RDMAXPRT_CONN_PENDING, &xprt->sc_flags)) if (test_bit(RDMAXPRT_CONN_PENDING, &xprt->sc_flags))
goto out; goto out;
...@@ -404,7 +407,8 @@ static struct svcxprt_rdma *rdma_create_xprt(struct svc_serv *serv, ...@@ -404,7 +407,8 @@ static struct svcxprt_rdma *rdma_create_xprt(struct svc_serv *serv,
return cma_xprt; return cma_xprt;
} }
int svc_rdma_post_recv(struct svcxprt_rdma *xprt, gfp_t flags) static int
svc_rdma_post_recv(struct svcxprt_rdma *xprt)
{ {
struct ib_recv_wr recv_wr, *bad_recv_wr; struct ib_recv_wr recv_wr, *bad_recv_wr;
struct svc_rdma_op_ctxt *ctxt; struct svc_rdma_op_ctxt *ctxt;
...@@ -423,7 +427,7 @@ int svc_rdma_post_recv(struct svcxprt_rdma *xprt, gfp_t flags) ...@@ -423,7 +427,7 @@ int svc_rdma_post_recv(struct svcxprt_rdma *xprt, gfp_t flags)
pr_err("svcrdma: Too many sges (%d)\n", sge_no); pr_err("svcrdma: Too many sges (%d)\n", sge_no);
goto err_put_ctxt; goto err_put_ctxt;
} }
page = alloc_page(flags); page = alloc_page(GFP_KERNEL);
if (!page) if (!page)
goto err_put_ctxt; goto err_put_ctxt;
ctxt->pages[sge_no] = page; ctxt->pages[sge_no] = page;
...@@ -459,21 +463,6 @@ int svc_rdma_post_recv(struct svcxprt_rdma *xprt, gfp_t flags) ...@@ -459,21 +463,6 @@ int svc_rdma_post_recv(struct svcxprt_rdma *xprt, gfp_t flags)
return -ENOMEM; return -ENOMEM;
} }
int svc_rdma_repost_recv(struct svcxprt_rdma *xprt, gfp_t flags)
{
int ret = 0;
ret = svc_rdma_post_recv(xprt, flags);
if (ret) {
pr_err("svcrdma: could not post a receive buffer, err=%d.\n",
ret);
pr_err("svcrdma: closing transport %p.\n", xprt);
set_bit(XPT_CLOSE, &xprt->sc_xprt.xpt_flags);
ret = -ENOTCONN;
}
return ret;
}
static void static void
svc_rdma_parse_connect_private(struct svcxprt_rdma *newxprt, svc_rdma_parse_connect_private(struct svcxprt_rdma *newxprt,
struct rdma_conn_param *param) struct rdma_conn_param *param)
...@@ -833,7 +822,7 @@ static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt) ...@@ -833,7 +822,7 @@ static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt)
/* Post receive buffers */ /* Post receive buffers */
for (i = 0; i < newxprt->sc_max_requests; i++) { for (i = 0; i < newxprt->sc_max_requests; i++) {
ret = svc_rdma_post_recv(newxprt, GFP_KERNEL); ret = svc_rdma_post_recv(newxprt);
if (ret) { if (ret) {
dprintk("svcrdma: failure posting receive buffers\n"); dprintk("svcrdma: failure posting receive buffers\n");
goto errout; goto errout;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment