Commit a3ab867f authored by Chuck Lever's avatar Chuck Lever Committed by J. Bruce Fields

svcrdma: Combine list fields in struct svc_rdma_op_ctxt

Clean up: The free list and the dto_q list fields are never used at
the same time. Reduce the size of struct svc_rdma_op_ctxt by
combining these fields.
Signed-off-by: default avatarChuck Lever <chuck.lever@oracle.com>
Reviewed-by: default avatarChristoph Hellwig <hch@lst.de>
Signed-off-by: default avatarJ. Bruce Fields <bfields@redhat.com>
parent aba7d14b
...@@ -70,7 +70,7 @@ extern atomic_t rdma_stat_sq_prod; ...@@ -70,7 +70,7 @@ extern atomic_t rdma_stat_sq_prod;
* completes. * completes.
*/ */
struct svc_rdma_op_ctxt { struct svc_rdma_op_ctxt {
struct list_head free; struct list_head list;
struct svc_rdma_op_ctxt *read_hdr; struct svc_rdma_op_ctxt *read_hdr;
struct svc_rdma_fastreg_mr *frmr; struct svc_rdma_fastreg_mr *frmr;
int hdr_count; int hdr_count;
...@@ -78,7 +78,6 @@ struct svc_rdma_op_ctxt { ...@@ -78,7 +78,6 @@ struct svc_rdma_op_ctxt {
struct ib_cqe cqe; struct ib_cqe cqe;
struct ib_cqe reg_cqe; struct ib_cqe reg_cqe;
struct ib_cqe inv_cqe; struct ib_cqe inv_cqe;
struct list_head dto_q;
u32 byte_len; u32 byte_len;
u32 position; u32 position;
struct svcxprt_rdma *xprt; struct svcxprt_rdma *xprt;
......
...@@ -608,18 +608,16 @@ int svc_rdma_recvfrom(struct svc_rqst *rqstp) ...@@ -608,18 +608,16 @@ int svc_rdma_recvfrom(struct svc_rqst *rqstp)
spin_lock_bh(&rdma_xprt->sc_rq_dto_lock); spin_lock_bh(&rdma_xprt->sc_rq_dto_lock);
if (!list_empty(&rdma_xprt->sc_read_complete_q)) { if (!list_empty(&rdma_xprt->sc_read_complete_q)) {
ctxt = list_entry(rdma_xprt->sc_read_complete_q.next, ctxt = list_first_entry(&rdma_xprt->sc_read_complete_q,
struct svc_rdma_op_ctxt, struct svc_rdma_op_ctxt, list);
dto_q); list_del(&ctxt->list);
list_del_init(&ctxt->dto_q);
spin_unlock_bh(&rdma_xprt->sc_rq_dto_lock); spin_unlock_bh(&rdma_xprt->sc_rq_dto_lock);
rdma_read_complete(rqstp, ctxt); rdma_read_complete(rqstp, ctxt);
goto complete; goto complete;
} else if (!list_empty(&rdma_xprt->sc_rq_dto_q)) { } else if (!list_empty(&rdma_xprt->sc_rq_dto_q)) {
ctxt = list_entry(rdma_xprt->sc_rq_dto_q.next, ctxt = list_first_entry(&rdma_xprt->sc_rq_dto_q,
struct svc_rdma_op_ctxt, struct svc_rdma_op_ctxt, list);
dto_q); list_del(&ctxt->list);
list_del_init(&ctxt->dto_q);
} else { } else {
atomic_inc(&rdma_stat_rq_starve); atomic_inc(&rdma_stat_rq_starve);
clear_bit(XPT_DATA, &xprt->xpt_flags); clear_bit(XPT_DATA, &xprt->xpt_flags);
......
...@@ -157,8 +157,7 @@ static struct svc_rdma_op_ctxt *alloc_ctxt(struct svcxprt_rdma *xprt, ...@@ -157,8 +157,7 @@ static struct svc_rdma_op_ctxt *alloc_ctxt(struct svcxprt_rdma *xprt,
ctxt = kmalloc(sizeof(*ctxt), flags); ctxt = kmalloc(sizeof(*ctxt), flags);
if (ctxt) { if (ctxt) {
ctxt->xprt = xprt; ctxt->xprt = xprt;
INIT_LIST_HEAD(&ctxt->free); INIT_LIST_HEAD(&ctxt->list);
INIT_LIST_HEAD(&ctxt->dto_q);
} }
return ctxt; return ctxt;
} }
...@@ -180,7 +179,7 @@ static bool svc_rdma_prealloc_ctxts(struct svcxprt_rdma *xprt) ...@@ -180,7 +179,7 @@ static bool svc_rdma_prealloc_ctxts(struct svcxprt_rdma *xprt)
dprintk("svcrdma: No memory for RDMA ctxt\n"); dprintk("svcrdma: No memory for RDMA ctxt\n");
return false; return false;
} }
list_add(&ctxt->free, &xprt->sc_ctxts); list_add(&ctxt->list, &xprt->sc_ctxts);
} }
return true; return true;
} }
...@@ -195,8 +194,8 @@ struct svc_rdma_op_ctxt *svc_rdma_get_context(struct svcxprt_rdma *xprt) ...@@ -195,8 +194,8 @@ struct svc_rdma_op_ctxt *svc_rdma_get_context(struct svcxprt_rdma *xprt)
goto out_empty; goto out_empty;
ctxt = list_first_entry(&xprt->sc_ctxts, ctxt = list_first_entry(&xprt->sc_ctxts,
struct svc_rdma_op_ctxt, free); struct svc_rdma_op_ctxt, list);
list_del_init(&ctxt->free); list_del(&ctxt->list);
spin_unlock_bh(&xprt->sc_ctxt_lock); spin_unlock_bh(&xprt->sc_ctxt_lock);
out: out:
...@@ -256,7 +255,7 @@ void svc_rdma_put_context(struct svc_rdma_op_ctxt *ctxt, int free_pages) ...@@ -256,7 +255,7 @@ void svc_rdma_put_context(struct svc_rdma_op_ctxt *ctxt, int free_pages)
spin_lock_bh(&xprt->sc_ctxt_lock); spin_lock_bh(&xprt->sc_ctxt_lock);
xprt->sc_ctxt_used--; xprt->sc_ctxt_used--;
list_add(&ctxt->free, &xprt->sc_ctxts); list_add(&ctxt->list, &xprt->sc_ctxts);
spin_unlock_bh(&xprt->sc_ctxt_lock); spin_unlock_bh(&xprt->sc_ctxt_lock);
} }
...@@ -266,8 +265,8 @@ static void svc_rdma_destroy_ctxts(struct svcxprt_rdma *xprt) ...@@ -266,8 +265,8 @@ static void svc_rdma_destroy_ctxts(struct svcxprt_rdma *xprt)
struct svc_rdma_op_ctxt *ctxt; struct svc_rdma_op_ctxt *ctxt;
ctxt = list_first_entry(&xprt->sc_ctxts, ctxt = list_first_entry(&xprt->sc_ctxts,
struct svc_rdma_op_ctxt, free); struct svc_rdma_op_ctxt, list);
list_del(&ctxt->free); list_del(&ctxt->list);
kfree(ctxt); kfree(ctxt);
} }
} }
...@@ -404,7 +403,7 @@ static void svc_rdma_wc_receive(struct ib_cq *cq, struct ib_wc *wc) ...@@ -404,7 +403,7 @@ static void svc_rdma_wc_receive(struct ib_cq *cq, struct ib_wc *wc)
/* All wc fields are now known to be valid */ /* All wc fields are now known to be valid */
ctxt->byte_len = wc->byte_len; ctxt->byte_len = wc->byte_len;
spin_lock(&xprt->sc_rq_dto_lock); spin_lock(&xprt->sc_rq_dto_lock);
list_add_tail(&ctxt->dto_q, &xprt->sc_rq_dto_q); list_add_tail(&ctxt->list, &xprt->sc_rq_dto_q);
spin_unlock(&xprt->sc_rq_dto_lock); spin_unlock(&xprt->sc_rq_dto_lock);
set_bit(XPT_DATA, &xprt->sc_xprt.xpt_flags); set_bit(XPT_DATA, &xprt->sc_xprt.xpt_flags);
...@@ -525,7 +524,7 @@ void svc_rdma_wc_read(struct ib_cq *cq, struct ib_wc *wc) ...@@ -525,7 +524,7 @@ void svc_rdma_wc_read(struct ib_cq *cq, struct ib_wc *wc)
read_hdr = ctxt->read_hdr; read_hdr = ctxt->read_hdr;
spin_lock(&xprt->sc_rq_dto_lock); spin_lock(&xprt->sc_rq_dto_lock);
list_add_tail(&read_hdr->dto_q, list_add_tail(&read_hdr->list,
&xprt->sc_read_complete_q); &xprt->sc_read_complete_q);
spin_unlock(&xprt->sc_rq_dto_lock); spin_unlock(&xprt->sc_rq_dto_lock);
...@@ -1213,20 +1212,18 @@ static void __svc_rdma_free(struct work_struct *work) ...@@ -1213,20 +1212,18 @@ static void __svc_rdma_free(struct work_struct *work)
*/ */
while (!list_empty(&rdma->sc_read_complete_q)) { while (!list_empty(&rdma->sc_read_complete_q)) {
struct svc_rdma_op_ctxt *ctxt; struct svc_rdma_op_ctxt *ctxt;
ctxt = list_entry(rdma->sc_read_complete_q.next, ctxt = list_first_entry(&rdma->sc_read_complete_q,
struct svc_rdma_op_ctxt, struct svc_rdma_op_ctxt, list);
dto_q); list_del(&ctxt->list);
list_del_init(&ctxt->dto_q);
svc_rdma_put_context(ctxt, 1); svc_rdma_put_context(ctxt, 1);
} }
/* Destroy queued, but not processed recv completions */ /* Destroy queued, but not processed recv completions */
while (!list_empty(&rdma->sc_rq_dto_q)) { while (!list_empty(&rdma->sc_rq_dto_q)) {
struct svc_rdma_op_ctxt *ctxt; struct svc_rdma_op_ctxt *ctxt;
ctxt = list_entry(rdma->sc_rq_dto_q.next, ctxt = list_first_entry(&rdma->sc_rq_dto_q,
struct svc_rdma_op_ctxt, struct svc_rdma_op_ctxt, list);
dto_q); list_del(&ctxt->list);
list_del_init(&ctxt->dto_q);
svc_rdma_put_context(ctxt, 1); svc_rdma_put_context(ctxt, 1);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment