Commit 954afc5a authored by Daisuke Matsuda's avatar Daisuke Matsuda Committed by Leon Romanovsky

RDMA/rxe: Use members of generic struct in rxe_mr

rxe_mr and ib_mr have interchangeable members. Remove device specific
members and use ones in the generic struct. Both 'iova' and 'length' are
filled in ib_uverbs or ib_core layer after MR registration.
Signed-off-by: default avatarDaisuke Matsuda <matsuda-daisuke@fujitsu.com>
Link: https://lore.kernel.org/r/20220921080844.1616883-2-matsuda-daisuke@fujitsu.comSigned-off-by: default avatarLeon Romanovsky <leon@kernel.org>
parent 241f9a27
......@@ -32,8 +32,8 @@ int mr_check_range(struct rxe_mr *mr, u64 iova, size_t length)
case IB_MR_TYPE_USER:
case IB_MR_TYPE_MEM_REG:
if (iova < mr->iova || length > mr->length ||
iova > mr->iova + mr->length - length)
if (iova < mr->ibmr.iova || length > mr->ibmr.length ||
iova > mr->ibmr.iova + mr->ibmr.length - length)
return -EFAULT;
return 0;
......@@ -178,8 +178,6 @@ int rxe_mr_init_user(struct rxe_pd *pd, u64 start, u64 length, u64 iova,
mr->ibmr.pd = &pd->ibpd;
mr->umem = umem;
mr->access = access;
mr->length = length;
mr->iova = iova;
mr->offset = ib_umem_offset(umem);
mr->state = RXE_MR_STATE_VALID;
mr->type = IB_MR_TYPE_USER;
......@@ -221,7 +219,7 @@ int rxe_mr_init_fast(struct rxe_pd *pd, int max_pages, struct rxe_mr *mr)
static void lookup_iova(struct rxe_mr *mr, u64 iova, int *m_out, int *n_out,
size_t *offset_out)
{
size_t offset = iova - mr->iova + mr->offset;
size_t offset = iova - mr->ibmr.iova + mr->offset;
int map_index;
int buf_index;
u64 length;
......@@ -604,7 +602,7 @@ int rxe_reg_fast_mr(struct rxe_qp *qp, struct rxe_send_wqe *wqe)
mr->access = access;
mr->lkey = key;
mr->rkey = (access & IB_ACCESS_REMOTE) ? key : 0;
mr->iova = wqe->wr.wr.reg.mr->iova;
mr->ibmr.iova = wqe->wr.wr.reg.mr->iova;
mr->state = RXE_MR_STATE_VALID;
return 0;
......
......@@ -114,15 +114,15 @@ static int rxe_check_bind_mw(struct rxe_qp *qp, struct rxe_send_wqe *wqe,
/* C10-75 */
if (mw->access & IB_ZERO_BASED) {
if (unlikely(wqe->wr.wr.mw.length > mr->length)) {
if (unlikely(wqe->wr.wr.mw.length > mr->ibmr.length)) {
pr_err_once(
"attempt to bind a ZB MW outside of the MR\n");
return -EINVAL;
}
} else {
if (unlikely((wqe->wr.wr.mw.addr < mr->iova) ||
if (unlikely((wqe->wr.wr.mw.addr < mr->ibmr.iova) ||
((wqe->wr.wr.mw.addr + wqe->wr.wr.mw.length) >
(mr->iova + mr->length)))) {
(mr->ibmr.iova + mr->ibmr.length)))) {
pr_err_once(
"attempt to bind a VA MW outside of the MR\n");
return -EINVAL;
......
......@@ -1007,11 +1007,9 @@ static int rxe_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg,
n = ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, rxe_set_page);
mr->iova = ibmr->iova;
mr->length = ibmr->length;
mr->page_shift = ilog2(ibmr->page_size);
mr->page_mask = ibmr->page_size - 1;
mr->offset = mr->iova & mr->page_mask;
mr->offset = ibmr->iova & mr->page_mask;
return n;
}
......
......@@ -305,8 +305,6 @@ struct rxe_mr {
u32 rkey;
enum rxe_mr_state state;
enum ib_mr_type type;
u64 iova;
size_t length;
u32 offset;
int access;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment