Commit 4866073e authored by Chuck Lever's avatar Chuck Lever Committed by J. Bruce Fields

svcrdma: Use llist for managing cache of recv_ctxts

Use a wait-free mechanism for managing the svc_rdma_recv_ctxts free
list. Subsequently, sc_recv_lock can be eliminated.
Signed-off-by: default avatarChuck Lever <chuck.lever@oracle.com>
Signed-off-by: default avatarJ. Bruce Fields <bfields@redhat.com>
parent d6dfe43e
...@@ -42,6 +42,7 @@ ...@@ -42,6 +42,7 @@
#ifndef SVC_RDMA_H #ifndef SVC_RDMA_H
#define SVC_RDMA_H #define SVC_RDMA_H
#include <linux/llist.h>
#include <linux/sunrpc/xdr.h> #include <linux/sunrpc/xdr.h>
#include <linux/sunrpc/svcsock.h> #include <linux/sunrpc/svcsock.h>
#include <linux/sunrpc/rpc_rdma.h> #include <linux/sunrpc/rpc_rdma.h>
...@@ -107,8 +108,7 @@ struct svcxprt_rdma { ...@@ -107,8 +108,7 @@ struct svcxprt_rdma {
struct list_head sc_read_complete_q; struct list_head sc_read_complete_q;
struct work_struct sc_work; struct work_struct sc_work;
spinlock_t sc_recv_lock; struct llist_head sc_recv_ctxts;
struct list_head sc_recv_ctxts;
}; };
/* sc_flags */ /* sc_flags */
#define RDMAXPRT_CONN_PENDING 3 #define RDMAXPRT_CONN_PENDING 3
...@@ -125,6 +125,7 @@ enum { ...@@ -125,6 +125,7 @@ enum {
#define RPCSVC_MAXPAYLOAD_RDMA RPCSVC_MAXPAYLOAD #define RPCSVC_MAXPAYLOAD_RDMA RPCSVC_MAXPAYLOAD
struct svc_rdma_recv_ctxt { struct svc_rdma_recv_ctxt {
struct llist_node rc_node;
struct list_head rc_list; struct list_head rc_list;
struct ib_recv_wr rc_recv_wr; struct ib_recv_wr rc_recv_wr;
struct ib_cqe rc_cqe; struct ib_cqe rc_cqe;
......
...@@ -172,9 +172,10 @@ static void svc_rdma_recv_ctxt_destroy(struct svcxprt_rdma *rdma, ...@@ -172,9 +172,10 @@ static void svc_rdma_recv_ctxt_destroy(struct svcxprt_rdma *rdma,
void svc_rdma_recv_ctxts_destroy(struct svcxprt_rdma *rdma) void svc_rdma_recv_ctxts_destroy(struct svcxprt_rdma *rdma)
{ {
struct svc_rdma_recv_ctxt *ctxt; struct svc_rdma_recv_ctxt *ctxt;
struct llist_node *node;
while ((ctxt = svc_rdma_next_recv_ctxt(&rdma->sc_recv_ctxts))) { while ((node = llist_del_first(&rdma->sc_recv_ctxts))) {
list_del(&ctxt->rc_list); ctxt = llist_entry(node, struct svc_rdma_recv_ctxt, rc_node);
svc_rdma_recv_ctxt_destroy(rdma, ctxt); svc_rdma_recv_ctxt_destroy(rdma, ctxt);
} }
} }
...@@ -183,21 +184,18 @@ static struct svc_rdma_recv_ctxt * ...@@ -183,21 +184,18 @@ static struct svc_rdma_recv_ctxt *
svc_rdma_recv_ctxt_get(struct svcxprt_rdma *rdma) svc_rdma_recv_ctxt_get(struct svcxprt_rdma *rdma)
{ {
struct svc_rdma_recv_ctxt *ctxt; struct svc_rdma_recv_ctxt *ctxt;
struct llist_node *node;
spin_lock(&rdma->sc_recv_lock); node = llist_del_first(&rdma->sc_recv_ctxts);
ctxt = svc_rdma_next_recv_ctxt(&rdma->sc_recv_ctxts); if (!node)
if (!ctxt)
goto out_empty; goto out_empty;
list_del(&ctxt->rc_list); ctxt = llist_entry(node, struct svc_rdma_recv_ctxt, rc_node);
spin_unlock(&rdma->sc_recv_lock);
out: out:
ctxt->rc_page_count = 0; ctxt->rc_page_count = 0;
return ctxt; return ctxt;
out_empty: out_empty:
spin_unlock(&rdma->sc_recv_lock);
ctxt = svc_rdma_recv_ctxt_alloc(rdma); ctxt = svc_rdma_recv_ctxt_alloc(rdma);
if (!ctxt) if (!ctxt)
return NULL; return NULL;
...@@ -218,11 +216,9 @@ void svc_rdma_recv_ctxt_put(struct svcxprt_rdma *rdma, ...@@ -218,11 +216,9 @@ void svc_rdma_recv_ctxt_put(struct svcxprt_rdma *rdma,
for (i = 0; i < ctxt->rc_page_count; i++) for (i = 0; i < ctxt->rc_page_count; i++)
put_page(ctxt->rc_pages[i]); put_page(ctxt->rc_pages[i]);
if (!ctxt->rc_temp) { if (!ctxt->rc_temp)
spin_lock(&rdma->sc_recv_lock); llist_add(&ctxt->rc_node, &rdma->sc_recv_ctxts);
list_add(&ctxt->rc_list, &rdma->sc_recv_ctxts); else
spin_unlock(&rdma->sc_recv_lock);
} else
svc_rdma_recv_ctxt_destroy(rdma, ctxt); svc_rdma_recv_ctxt_destroy(rdma, ctxt);
} }
......
...@@ -140,14 +140,13 @@ static struct svcxprt_rdma *svc_rdma_create_xprt(struct svc_serv *serv, ...@@ -140,14 +140,13 @@ static struct svcxprt_rdma *svc_rdma_create_xprt(struct svc_serv *serv,
INIT_LIST_HEAD(&cma_xprt->sc_rq_dto_q); INIT_LIST_HEAD(&cma_xprt->sc_rq_dto_q);
INIT_LIST_HEAD(&cma_xprt->sc_read_complete_q); INIT_LIST_HEAD(&cma_xprt->sc_read_complete_q);
INIT_LIST_HEAD(&cma_xprt->sc_send_ctxts); INIT_LIST_HEAD(&cma_xprt->sc_send_ctxts);
INIT_LIST_HEAD(&cma_xprt->sc_recv_ctxts); init_llist_head(&cma_xprt->sc_recv_ctxts);
INIT_LIST_HEAD(&cma_xprt->sc_rw_ctxts); INIT_LIST_HEAD(&cma_xprt->sc_rw_ctxts);
init_waitqueue_head(&cma_xprt->sc_send_wait); init_waitqueue_head(&cma_xprt->sc_send_wait);
spin_lock_init(&cma_xprt->sc_lock); spin_lock_init(&cma_xprt->sc_lock);
spin_lock_init(&cma_xprt->sc_rq_dto_lock); spin_lock_init(&cma_xprt->sc_rq_dto_lock);
spin_lock_init(&cma_xprt->sc_send_lock); spin_lock_init(&cma_xprt->sc_send_lock);
spin_lock_init(&cma_xprt->sc_recv_lock);
spin_lock_init(&cma_xprt->sc_rw_ctxt_lock); spin_lock_init(&cma_xprt->sc_rw_ctxt_lock);
/* /*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment