Commit 5d22c5ab authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'nfsd-4.7' of git://linux-nfs.org/~bfields/linux

Pull nfsd updates from Bruce Fields:
 "A very quiet cycle for nfsd, mainly just an RDMA update from Chuck
  Lever"

* tag 'nfsd-4.7' of git://linux-nfs.org/~bfields/linux:
  sunrpc: fix stripping of padded MIC tokens
  svcrpc: autoload rdma module
  svcrdma: Generalize svc_rdma_xdr_decode_req()
  svcrdma: Eliminate code duplication in svc_rdma_recvfrom()
  svcrdma: Drain QP before freeing svcrdma_xprt
  svcrdma: Post Receives only for forward channel requests
  svcrdma: Remove superfluous line from rdma_read_chunks()
  svcrdma: svc_rdma_put_context() is invoked twice in Send error path
  svcrdma: Do not add XDR padding to xdr_buf page vector
  svcrdma: Support IPv6 with NFS/RDMA
  nfsd: handle seqid wraparound in nfsd4_preprocess_layout_stateid
  Remove unnecessary allocation
parents 0e01df10 c0cb8bf3
...@@ -379,7 +379,7 @@ nfs3svc_decode_writeargs(struct svc_rqst *rqstp, __be32 *p, ...@@ -379,7 +379,7 @@ nfs3svc_decode_writeargs(struct svc_rqst *rqstp, __be32 *p,
*/ */
hdr = (void*)p - rqstp->rq_arg.head[0].iov_base; hdr = (void*)p - rqstp->rq_arg.head[0].iov_base;
dlen = rqstp->rq_arg.head[0].iov_len + rqstp->rq_arg.page_len dlen = rqstp->rq_arg.head[0].iov_len + rqstp->rq_arg.page_len
- hdr; + rqstp->rq_arg.tail[0].iov_len - hdr;
/* /*
* Round the length of the data which was specified up to * Round the length of the data which was specified up to
* the next multiple of XDR units and then compare that * the next multiple of XDR units and then compare that
......
...@@ -289,7 +289,7 @@ nfsd4_preprocess_layout_stateid(struct svc_rqst *rqstp, ...@@ -289,7 +289,7 @@ nfsd4_preprocess_layout_stateid(struct svc_rqst *rqstp,
status = nfserr_bad_stateid; status = nfserr_bad_stateid;
mutex_lock(&ls->ls_mutex); mutex_lock(&ls->ls_mutex);
if (stateid->si_generation > stid->sc_stateid.si_generation) if (nfsd4_stateid_generation_after(stateid, &stid->sc_stateid))
goto out_unlock_stid; goto out_unlock_stid;
if (layout_type != ls->ls_layout_type) if (layout_type != ls->ls_layout_type)
goto out_unlock_stid; goto out_unlock_stid;
......
...@@ -4651,12 +4651,6 @@ grace_disallows_io(struct net *net, struct inode *inode) ...@@ -4651,12 +4651,6 @@ grace_disallows_io(struct net *net, struct inode *inode)
return opens_in_grace(net) && mandatory_lock(inode); return opens_in_grace(net) && mandatory_lock(inode);
} }
/* Returns true iff a is later than b: */
static bool stateid_generation_after(stateid_t *a, stateid_t *b)
{
return (s32)(a->si_generation - b->si_generation) > 0;
}
static __be32 check_stateid_generation(stateid_t *in, stateid_t *ref, bool has_session) static __be32 check_stateid_generation(stateid_t *in, stateid_t *ref, bool has_session)
{ {
/* /*
...@@ -4670,7 +4664,7 @@ static __be32 check_stateid_generation(stateid_t *in, stateid_t *ref, bool has_s ...@@ -4670,7 +4664,7 @@ static __be32 check_stateid_generation(stateid_t *in, stateid_t *ref, bool has_s
return nfs_ok; return nfs_ok;
/* If the client sends us a stateid from the future, it's buggy: */ /* If the client sends us a stateid from the future, it's buggy: */
if (stateid_generation_after(in, ref)) if (nfsd4_stateid_generation_after(in, ref))
return nfserr_bad_stateid; return nfserr_bad_stateid;
/* /*
* However, we could see a stateid from the past, even from a * However, we could see a stateid from the past, even from a
......
...@@ -573,6 +573,11 @@ enum nfsd4_cb_op { ...@@ -573,6 +573,11 @@ enum nfsd4_cb_op {
NFSPROC4_CLNT_CB_SEQUENCE, NFSPROC4_CLNT_CB_SEQUENCE,
}; };
/* Returns true iff a is later than b: */
static inline bool nfsd4_stateid_generation_after(stateid_t *a, stateid_t *b)
{
return (s32)(a->si_generation - b->si_generation) > 0;
}
struct nfsd4_compound_state; struct nfsd4_compound_state;
struct nfsd_net; struct nfsd_net;
......
...@@ -199,7 +199,7 @@ extern int svc_rdma_handle_bc_reply(struct rpc_xprt *xprt, ...@@ -199,7 +199,7 @@ extern int svc_rdma_handle_bc_reply(struct rpc_xprt *xprt,
struct xdr_buf *rcvbuf); struct xdr_buf *rcvbuf);
/* svc_rdma_marshal.c */ /* svc_rdma_marshal.c */
extern int svc_rdma_xdr_decode_req(struct rpcrdma_msg *, struct svc_rqst *); extern int svc_rdma_xdr_decode_req(struct xdr_buf *);
extern int svc_rdma_xdr_encode_error(struct svcxprt_rdma *, extern int svc_rdma_xdr_encode_error(struct svcxprt_rdma *,
struct rpcrdma_msg *, struct rpcrdma_msg *,
enum rpcrdma_errcode, __be32 *); enum rpcrdma_errcode, __be32 *);
......
...@@ -569,10 +569,9 @@ gss_svc_searchbyctx(struct cache_detail *cd, struct xdr_netobj *handle) ...@@ -569,10 +569,9 @@ gss_svc_searchbyctx(struct cache_detail *cd, struct xdr_netobj *handle)
struct rsc *found; struct rsc *found;
memset(&rsci, 0, sizeof(rsci)); memset(&rsci, 0, sizeof(rsci));
if (dup_to_netobj(&rsci.handle, handle->data, handle->len)) rsci.handle.data = handle->data;
return NULL; rsci.handle.len = handle->len;
found = rsc_lookup(cd, &rsci); found = rsc_lookup(cd, &rsci);
rsc_free(&rsci);
if (!found) if (!found)
return NULL; return NULL;
if (cache_check(cd, &found->h, NULL)) if (cache_check(cd, &found->h, NULL))
...@@ -857,8 +856,8 @@ unwrap_integ_data(struct svc_rqst *rqstp, struct xdr_buf *buf, u32 seq, struct g ...@@ -857,8 +856,8 @@ unwrap_integ_data(struct svc_rqst *rqstp, struct xdr_buf *buf, u32 seq, struct g
goto out; goto out;
if (svc_getnl(&buf->head[0]) != seq) if (svc_getnl(&buf->head[0]) != seq)
goto out; goto out;
/* trim off the mic at the end before returning */ /* trim off the mic and padding at the end before returning */
xdr_buf_trim(buf, mic.len + 4); xdr_buf_trim(buf, round_up_to_quad(mic.len) + 4);
stat = 0; stat = 0;
out: out:
kfree(mic.data); kfree(mic.data);
......
...@@ -244,13 +244,12 @@ void svc_add_new_perm_xprt(struct svc_serv *serv, struct svc_xprt *new) ...@@ -244,13 +244,12 @@ void svc_add_new_perm_xprt(struct svc_serv *serv, struct svc_xprt *new)
svc_xprt_received(new); svc_xprt_received(new);
} }
int svc_create_xprt(struct svc_serv *serv, const char *xprt_name, int _svc_create_xprt(struct svc_serv *serv, const char *xprt_name,
struct net *net, const int family, struct net *net, const int family,
const unsigned short port, int flags) const unsigned short port, int flags)
{ {
struct svc_xprt_class *xcl; struct svc_xprt_class *xcl;
dprintk("svc: creating transport %s[%d]\n", xprt_name, port);
spin_lock(&svc_xprt_class_lock); spin_lock(&svc_xprt_class_lock);
list_for_each_entry(xcl, &svc_xprt_class_list, xcl_list) { list_for_each_entry(xcl, &svc_xprt_class_list, xcl_list) {
struct svc_xprt *newxprt; struct svc_xprt *newxprt;
...@@ -274,12 +273,28 @@ int svc_create_xprt(struct svc_serv *serv, const char *xprt_name, ...@@ -274,12 +273,28 @@ int svc_create_xprt(struct svc_serv *serv, const char *xprt_name,
} }
err: err:
spin_unlock(&svc_xprt_class_lock); spin_unlock(&svc_xprt_class_lock);
dprintk("svc: transport %s not found\n", xprt_name);
/* This errno is exposed to user space. Provide a reasonable /* This errno is exposed to user space. Provide a reasonable
* perror msg for a bad transport. */ * perror msg for a bad transport. */
return -EPROTONOSUPPORT; return -EPROTONOSUPPORT;
} }
int svc_create_xprt(struct svc_serv *serv, const char *xprt_name,
struct net *net, const int family,
const unsigned short port, int flags)
{
int err;
dprintk("svc: creating transport %s[%d]\n", xprt_name, port);
err = _svc_create_xprt(serv, xprt_name, net, family, port, flags);
if (err == -EPROTONOSUPPORT) {
request_module("svc%s", xprt_name);
err = _svc_create_xprt(serv, xprt_name, net, family, port, flags);
}
if (err)
dprintk("svc: transport %s not found, err %d\n",
xprt_name, err);
return err;
}
EXPORT_SYMBOL_GPL(svc_create_xprt); EXPORT_SYMBOL_GPL(svc_create_xprt);
/* /*
......
...@@ -145,19 +145,32 @@ static __be32 *decode_reply_array(__be32 *va, __be32 *vaend) ...@@ -145,19 +145,32 @@ static __be32 *decode_reply_array(__be32 *va, __be32 *vaend)
return (__be32 *)&ary->wc_array[nchunks]; return (__be32 *)&ary->wc_array[nchunks];
} }
int svc_rdma_xdr_decode_req(struct rpcrdma_msg *rmsgp, struct svc_rqst *rqstp) /**
* svc_rdma_xdr_decode_req - Parse incoming RPC-over-RDMA header
* @rq_arg: Receive buffer
*
* On entry, xdr->head[0].iov_base points to first byte in the
* RPC-over-RDMA header.
*
* On successful exit, head[0] points to first byte past the
* RPC-over-RDMA header. For RDMA_MSG, this is the RPC message.
* The length of the RPC-over-RDMA header is returned.
*/
int svc_rdma_xdr_decode_req(struct xdr_buf *rq_arg)
{ {
struct rpcrdma_msg *rmsgp;
__be32 *va, *vaend; __be32 *va, *vaend;
unsigned int len; unsigned int len;
u32 hdr_len; u32 hdr_len;
/* Verify that there's enough bytes for header + something */ /* Verify that there's enough bytes for header + something */
if (rqstp->rq_arg.len <= RPCRDMA_HDRLEN_ERR) { if (rq_arg->len <= RPCRDMA_HDRLEN_ERR) {
dprintk("svcrdma: header too short = %d\n", dprintk("svcrdma: header too short = %d\n",
rqstp->rq_arg.len); rq_arg->len);
return -EINVAL; return -EINVAL;
} }
rmsgp = (struct rpcrdma_msg *)rq_arg->head[0].iov_base;
if (rmsgp->rm_vers != rpcrdma_version) { if (rmsgp->rm_vers != rpcrdma_version) {
dprintk("%s: bad version %u\n", __func__, dprintk("%s: bad version %u\n", __func__,
be32_to_cpu(rmsgp->rm_vers)); be32_to_cpu(rmsgp->rm_vers));
...@@ -189,10 +202,10 @@ int svc_rdma_xdr_decode_req(struct rpcrdma_msg *rmsgp, struct svc_rqst *rqstp) ...@@ -189,10 +202,10 @@ int svc_rdma_xdr_decode_req(struct rpcrdma_msg *rmsgp, struct svc_rqst *rqstp)
be32_to_cpu(rmsgp->rm_body.rm_padded.rm_thresh); be32_to_cpu(rmsgp->rm_body.rm_padded.rm_thresh);
va = &rmsgp->rm_body.rm_padded.rm_pempty[4]; va = &rmsgp->rm_body.rm_padded.rm_pempty[4];
rqstp->rq_arg.head[0].iov_base = va; rq_arg->head[0].iov_base = va;
len = (u32)((unsigned long)va - (unsigned long)rmsgp); len = (u32)((unsigned long)va - (unsigned long)rmsgp);
rqstp->rq_arg.head[0].iov_len -= len; rq_arg->head[0].iov_len -= len;
if (len > rqstp->rq_arg.len) if (len > rq_arg->len)
return -EINVAL; return -EINVAL;
return len; return len;
default: default:
...@@ -205,7 +218,7 @@ int svc_rdma_xdr_decode_req(struct rpcrdma_msg *rmsgp, struct svc_rqst *rqstp) ...@@ -205,7 +218,7 @@ int svc_rdma_xdr_decode_req(struct rpcrdma_msg *rmsgp, struct svc_rqst *rqstp)
* chunk list and a reply chunk list. * chunk list and a reply chunk list.
*/ */
va = &rmsgp->rm_body.rm_chunks[0]; va = &rmsgp->rm_body.rm_chunks[0];
vaend = (__be32 *)((unsigned long)rmsgp + rqstp->rq_arg.len); vaend = (__be32 *)((unsigned long)rmsgp + rq_arg->len);
va = decode_read_list(va, vaend); va = decode_read_list(va, vaend);
if (!va) { if (!va) {
dprintk("svcrdma: failed to decode read list\n"); dprintk("svcrdma: failed to decode read list\n");
...@@ -222,10 +235,9 @@ int svc_rdma_xdr_decode_req(struct rpcrdma_msg *rmsgp, struct svc_rqst *rqstp) ...@@ -222,10 +235,9 @@ int svc_rdma_xdr_decode_req(struct rpcrdma_msg *rmsgp, struct svc_rqst *rqstp)
return -EINVAL; return -EINVAL;
} }
rqstp->rq_arg.head[0].iov_base = va; rq_arg->head[0].iov_base = va;
hdr_len = (unsigned long)va - (unsigned long)rmsgp; hdr_len = (unsigned long)va - (unsigned long)rmsgp;
rqstp->rq_arg.head[0].iov_len -= hdr_len; rq_arg->head[0].iov_len -= hdr_len;
return hdr_len; return hdr_len;
} }
......
...@@ -447,10 +447,8 @@ static int rdma_read_chunks(struct svcxprt_rdma *xprt, ...@@ -447,10 +447,8 @@ static int rdma_read_chunks(struct svcxprt_rdma *xprt,
head->arg.len = rqstp->rq_arg.len; head->arg.len = rqstp->rq_arg.len;
head->arg.buflen = rqstp->rq_arg.buflen; head->arg.buflen = rqstp->rq_arg.buflen;
ch = (struct rpcrdma_read_chunk *)&rmsgp->rm_body.rm_chunks[0];
position = be32_to_cpu(ch->rc_position);
/* RDMA_NOMSG: RDMA READ data should land just after RDMA RECV data */ /* RDMA_NOMSG: RDMA READ data should land just after RDMA RECV data */
position = be32_to_cpu(ch->rc_position);
if (position == 0) { if (position == 0) {
head->arg.pages = &head->pages[0]; head->arg.pages = &head->pages[0];
page_offset = head->byte_len; page_offset = head->byte_len;
...@@ -488,7 +486,7 @@ static int rdma_read_chunks(struct svcxprt_rdma *xprt, ...@@ -488,7 +486,7 @@ static int rdma_read_chunks(struct svcxprt_rdma *xprt,
if (page_offset & 3) { if (page_offset & 3) {
u32 pad = 4 - (page_offset & 3); u32 pad = 4 - (page_offset & 3);
head->arg.page_len += pad; head->arg.tail[0].iov_len += pad;
head->arg.len += pad; head->arg.len += pad;
head->arg.buflen += pad; head->arg.buflen += pad;
page_offset += pad; page_offset += pad;
...@@ -510,11 +508,10 @@ static int rdma_read_chunks(struct svcxprt_rdma *xprt, ...@@ -510,11 +508,10 @@ static int rdma_read_chunks(struct svcxprt_rdma *xprt,
return ret; return ret;
} }
static int rdma_read_complete(struct svc_rqst *rqstp, static void rdma_read_complete(struct svc_rqst *rqstp,
struct svc_rdma_op_ctxt *head) struct svc_rdma_op_ctxt *head)
{ {
int page_no; int page_no;
int ret;
/* Copy RPC pages */ /* Copy RPC pages */
for (page_no = 0; page_no < head->count; page_no++) { for (page_no = 0; page_no < head->count; page_no++) {
...@@ -550,23 +547,6 @@ static int rdma_read_complete(struct svc_rqst *rqstp, ...@@ -550,23 +547,6 @@ static int rdma_read_complete(struct svc_rqst *rqstp,
rqstp->rq_arg.tail[0] = head->arg.tail[0]; rqstp->rq_arg.tail[0] = head->arg.tail[0];
rqstp->rq_arg.len = head->arg.len; rqstp->rq_arg.len = head->arg.len;
rqstp->rq_arg.buflen = head->arg.buflen; rqstp->rq_arg.buflen = head->arg.buflen;
/* Free the context */
svc_rdma_put_context(head, 0);
/* XXX: What should this be? */
rqstp->rq_prot = IPPROTO_MAX;
svc_xprt_copy_addrs(rqstp, rqstp->rq_xprt);
ret = rqstp->rq_arg.head[0].iov_len
+ rqstp->rq_arg.page_len
+ rqstp->rq_arg.tail[0].iov_len;
dprintk("svcrdma: deferred read ret=%d, rq_arg.len=%u, "
"rq_arg.head[0].iov_base=%p, rq_arg.head[0].iov_len=%zu\n",
ret, rqstp->rq_arg.len, rqstp->rq_arg.head[0].iov_base,
rqstp->rq_arg.head[0].iov_len);
return ret;
} }
/* By convention, backchannel calls arrive via rdma_msg type /* By convention, backchannel calls arrive via rdma_msg type
...@@ -624,7 +604,8 @@ int svc_rdma_recvfrom(struct svc_rqst *rqstp) ...@@ -624,7 +604,8 @@ int svc_rdma_recvfrom(struct svc_rqst *rqstp)
dto_q); dto_q);
list_del_init(&ctxt->dto_q); list_del_init(&ctxt->dto_q);
spin_unlock_bh(&rdma_xprt->sc_rq_dto_lock); spin_unlock_bh(&rdma_xprt->sc_rq_dto_lock);
return rdma_read_complete(rqstp, ctxt); rdma_read_complete(rqstp, ctxt);
goto complete;
} else if (!list_empty(&rdma_xprt->sc_rq_dto_q)) { } else if (!list_empty(&rdma_xprt->sc_rq_dto_q)) {
ctxt = list_entry(rdma_xprt->sc_rq_dto_q.next, ctxt = list_entry(rdma_xprt->sc_rq_dto_q.next,
struct svc_rdma_op_ctxt, struct svc_rdma_op_ctxt,
...@@ -655,7 +636,7 @@ int svc_rdma_recvfrom(struct svc_rqst *rqstp) ...@@ -655,7 +636,7 @@ int svc_rdma_recvfrom(struct svc_rqst *rqstp)
/* Decode the RDMA header. */ /* Decode the RDMA header. */
rmsgp = (struct rpcrdma_msg *)rqstp->rq_arg.head[0].iov_base; rmsgp = (struct rpcrdma_msg *)rqstp->rq_arg.head[0].iov_base;
ret = svc_rdma_xdr_decode_req(rmsgp, rqstp); ret = svc_rdma_xdr_decode_req(&rqstp->rq_arg);
if (ret < 0) if (ret < 0)
goto out_err; goto out_err;
if (ret == 0) if (ret == 0)
...@@ -682,6 +663,7 @@ int svc_rdma_recvfrom(struct svc_rqst *rqstp) ...@@ -682,6 +663,7 @@ int svc_rdma_recvfrom(struct svc_rqst *rqstp)
return 0; return 0;
} }
complete:
ret = rqstp->rq_arg.head[0].iov_len ret = rqstp->rq_arg.head[0].iov_len
+ rqstp->rq_arg.page_len + rqstp->rq_arg.page_len
+ rqstp->rq_arg.tail[0].iov_len; + rqstp->rq_arg.tail[0].iov_len;
......
...@@ -463,25 +463,21 @@ static int send_reply(struct svcxprt_rdma *rdma, ...@@ -463,25 +463,21 @@ static int send_reply(struct svcxprt_rdma *rdma,
struct svc_rqst *rqstp, struct svc_rqst *rqstp,
struct page *page, struct page *page,
struct rpcrdma_msg *rdma_resp, struct rpcrdma_msg *rdma_resp,
struct svc_rdma_op_ctxt *ctxt,
struct svc_rdma_req_map *vec, struct svc_rdma_req_map *vec,
int byte_count) int byte_count)
{ {
struct svc_rdma_op_ctxt *ctxt;
struct ib_send_wr send_wr; struct ib_send_wr send_wr;
u32 xdr_off; u32 xdr_off;
int sge_no; int sge_no;
int sge_bytes; int sge_bytes;
int page_no; int page_no;
int pages; int pages;
int ret; int ret = -EIO;
ret = svc_rdma_repost_recv(rdma, GFP_KERNEL);
if (ret) {
svc_rdma_put_context(ctxt, 0);
return -ENOTCONN;
}
/* Prepare the context */ /* Prepare the context */
ctxt = svc_rdma_get_context(rdma);
ctxt->direction = DMA_TO_DEVICE;
ctxt->pages[0] = page; ctxt->pages[0] = page;
ctxt->count = 1; ctxt->count = 1;
...@@ -565,8 +561,7 @@ static int send_reply(struct svcxprt_rdma *rdma, ...@@ -565,8 +561,7 @@ static int send_reply(struct svcxprt_rdma *rdma,
err: err:
svc_rdma_unmap_dma(ctxt); svc_rdma_unmap_dma(ctxt);
svc_rdma_put_context(ctxt, 1); svc_rdma_put_context(ctxt, 1);
pr_err("svcrdma: failed to send reply, rc=%d\n", ret); return ret;
return -EIO;
} }
void svc_rdma_prep_reply_hdr(struct svc_rqst *rqstp) void svc_rdma_prep_reply_hdr(struct svc_rqst *rqstp)
...@@ -585,7 +580,6 @@ int svc_rdma_sendto(struct svc_rqst *rqstp) ...@@ -585,7 +580,6 @@ int svc_rdma_sendto(struct svc_rqst *rqstp)
int ret; int ret;
int inline_bytes; int inline_bytes;
struct page *res_page; struct page *res_page;
struct svc_rdma_op_ctxt *ctxt;
struct svc_rdma_req_map *vec; struct svc_rdma_req_map *vec;
dprintk("svcrdma: sending response for rqstp=%p\n", rqstp); dprintk("svcrdma: sending response for rqstp=%p\n", rqstp);
...@@ -598,8 +592,6 @@ int svc_rdma_sendto(struct svc_rqst *rqstp) ...@@ -598,8 +592,6 @@ int svc_rdma_sendto(struct svc_rqst *rqstp)
rp_ary = svc_rdma_get_reply_array(rdma_argp, wr_ary); rp_ary = svc_rdma_get_reply_array(rdma_argp, wr_ary);
/* Build an req vec for the XDR */ /* Build an req vec for the XDR */
ctxt = svc_rdma_get_context(rdma);
ctxt->direction = DMA_TO_DEVICE;
vec = svc_rdma_get_req_map(rdma); vec = svc_rdma_get_req_map(rdma);
ret = svc_rdma_map_xdr(rdma, &rqstp->rq_res, vec, wr_ary != NULL); ret = svc_rdma_map_xdr(rdma, &rqstp->rq_res, vec, wr_ary != NULL);
if (ret) if (ret)
...@@ -635,7 +627,12 @@ int svc_rdma_sendto(struct svc_rqst *rqstp) ...@@ -635,7 +627,12 @@ int svc_rdma_sendto(struct svc_rqst *rqstp)
inline_bytes -= ret; inline_bytes -= ret;
} }
ret = send_reply(rdma, rqstp, res_page, rdma_resp, ctxt, vec, /* Post a fresh Receive buffer _before_ sending the reply */
ret = svc_rdma_post_recv(rdma, GFP_KERNEL);
if (ret)
goto err1;
ret = send_reply(rdma, rqstp, res_page, rdma_resp, vec,
inline_bytes); inline_bytes);
if (ret < 0) if (ret < 0)
goto err1; goto err1;
...@@ -648,7 +645,8 @@ int svc_rdma_sendto(struct svc_rqst *rqstp) ...@@ -648,7 +645,8 @@ int svc_rdma_sendto(struct svc_rqst *rqstp)
put_page(res_page); put_page(res_page);
err0: err0:
svc_rdma_put_req_map(rdma, vec); svc_rdma_put_req_map(rdma, vec);
svc_rdma_put_context(ctxt, 0); pr_err("svcrdma: Could not send reply, err=%d. Closing transport.\n",
ret);
set_bit(XPT_CLOSE, &rdma->sc_xprt.xpt_flags); set_bit(XPT_CLOSE, &rdma->sc_xprt.xpt_flags);
return -ENOTCONN; return -ENOTCONN;
} }
......
...@@ -789,7 +789,7 @@ static struct svc_xprt *svc_rdma_create(struct svc_serv *serv, ...@@ -789,7 +789,7 @@ static struct svc_xprt *svc_rdma_create(struct svc_serv *serv,
int ret; int ret;
dprintk("svcrdma: Creating RDMA socket\n"); dprintk("svcrdma: Creating RDMA socket\n");
if (sa->sa_family != AF_INET) { if ((sa->sa_family != AF_INET) && (sa->sa_family != AF_INET6)) {
dprintk("svcrdma: Address family %d is not supported.\n", sa->sa_family); dprintk("svcrdma: Address family %d is not supported.\n", sa->sa_family);
return ERR_PTR(-EAFNOSUPPORT); return ERR_PTR(-EAFNOSUPPORT);
} }
...@@ -805,6 +805,16 @@ static struct svc_xprt *svc_rdma_create(struct svc_serv *serv, ...@@ -805,6 +805,16 @@ static struct svc_xprt *svc_rdma_create(struct svc_serv *serv,
goto err0; goto err0;
} }
/* Allow both IPv4 and IPv6 sockets to bind a single port
* at the same time.
*/
#if IS_ENABLED(CONFIG_IPV6)
ret = rdma_set_afonly(listen_id, 1);
if (ret) {
dprintk("svcrdma: rdma_set_afonly failed = %d\n", ret);
goto err1;
}
#endif
ret = rdma_bind_addr(listen_id, sa); ret = rdma_bind_addr(listen_id, sa);
if (ret) { if (ret) {
dprintk("svcrdma: rdma_bind_addr failed = %d\n", ret); dprintk("svcrdma: rdma_bind_addr failed = %d\n", ret);
...@@ -1073,7 +1083,7 @@ static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt) ...@@ -1073,7 +1083,7 @@ static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt)
newxprt->sc_dev_caps |= SVCRDMA_DEVCAP_READ_W_INV; newxprt->sc_dev_caps |= SVCRDMA_DEVCAP_READ_W_INV;
/* Post receive buffers */ /* Post receive buffers */
for (i = 0; i < newxprt->sc_rq_depth; i++) { for (i = 0; i < newxprt->sc_max_requests; i++) {
ret = svc_rdma_post_recv(newxprt, GFP_KERNEL); ret = svc_rdma_post_recv(newxprt, GFP_KERNEL);
if (ret) { if (ret) {
dprintk("svcrdma: failure posting receive buffers\n"); dprintk("svcrdma: failure posting receive buffers\n");
...@@ -1170,6 +1180,9 @@ static void __svc_rdma_free(struct work_struct *work) ...@@ -1170,6 +1180,9 @@ static void __svc_rdma_free(struct work_struct *work)
dprintk("svcrdma: %s(%p)\n", __func__, rdma); dprintk("svcrdma: %s(%p)\n", __func__, rdma);
if (rdma->sc_qp && !IS_ERR(rdma->sc_qp))
ib_drain_qp(rdma->sc_qp);
/* We should only be called from kref_put */ /* We should only be called from kref_put */
if (atomic_read(&xprt->xpt_ref.refcount) != 0) if (atomic_read(&xprt->xpt_ref.refcount) != 0)
pr_err("svcrdma: sc_xprt still in use? (%d)\n", pr_err("svcrdma: sc_xprt still in use? (%d)\n",
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment