Commit 6246f1cc authored by Shiraz Saleem's avatar Shiraz Saleem Committed by Jason Gunthorpe

RDMA/irdma: Use list_last_entry/list_first_entry

Use list_last_entry and list_first_entry instead of using prev and next
pointers.

Link: https://lore.kernel.org/r/20210608211415.680-1-shiraz.saleem@intel.comReported-by: default avatarDan Carpenter <dan.carpenter@oracle.com>
Signed-off-by: default avatarShiraz Saleem <shiraz.saleem@intel.com>
Signed-off-by: default avatarJason Gunthorpe <jgg@nvidia.com>
parent ac477efc
...@@ -1419,7 +1419,7 @@ irdma_ieq_handle_partial(struct irdma_puda_rsrc *ieq, struct irdma_pfpdu *pfpdu, ...@@ -1419,7 +1419,7 @@ irdma_ieq_handle_partial(struct irdma_puda_rsrc *ieq, struct irdma_pfpdu *pfpdu,
error: error:
while (!list_empty(&pbufl)) { while (!list_empty(&pbufl)) {
buf = (struct irdma_puda_buf *)(pbufl.prev); buf = list_last_entry(&pbufl, struct irdma_puda_buf, list);
list_move(&buf->list, rxlist); list_move(&buf->list, rxlist);
} }
if (txbuf) if (txbuf)
......
...@@ -425,8 +425,8 @@ struct irdma_cqp_request *irdma_alloc_and_get_cqp_request(struct irdma_cqp *cqp, ...@@ -425,8 +425,8 @@ struct irdma_cqp_request *irdma_alloc_and_get_cqp_request(struct irdma_cqp *cqp,
spin_lock_irqsave(&cqp->req_lock, flags); spin_lock_irqsave(&cqp->req_lock, flags);
if (!list_empty(&cqp->cqp_avail_reqs)) { if (!list_empty(&cqp->cqp_avail_reqs)) {
cqp_request = list_entry(cqp->cqp_avail_reqs.next, cqp_request = list_first_entry(&cqp->cqp_avail_reqs,
struct irdma_cqp_request, list); struct irdma_cqp_request, list);
list_del_init(&cqp_request->list); list_del_init(&cqp_request->list);
} }
spin_unlock_irqrestore(&cqp->req_lock, flags); spin_unlock_irqrestore(&cqp->req_lock, flags);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment