Commit 38071a46 authored by Sagi Grimberg's avatar Sagi Grimberg Committed by Doug Ledford

IB/qib: Support the new memory registration API

Support the new memory registration API by allocating a
private page list array in qib_mr and populate it when
qib_map_mr_sg is invoked. Also, support IB_WR_REG_MR
by duplicating qib_fastreg_mr just take the needed information
from different places:
- page_size, iova, length (ib_mr)
- page array (qib_mr)
- key, access flags (ib_reg_wr)

The IB_WR_FAST_REG_MR handlers will be removed later when
all the ULPs will be converted.
Signed-off-by: default avatarSagi Grimberg <sagig@mellanox.com>
Acked-by: default avatarChristoph Hellwig <hch@lst.de>
Signed-off-by: default avatarDoug Ledford <dledford@redhat.com>
parent 8376b86d
...@@ -390,3 +390,59 @@ int qib_fast_reg_mr(struct qib_qp *qp, struct ib_send_wr *send_wr) ...@@ -390,3 +390,59 @@ int qib_fast_reg_mr(struct qib_qp *qp, struct ib_send_wr *send_wr)
spin_unlock_irqrestore(&rkt->lock, flags); spin_unlock_irqrestore(&rkt->lock, flags);
return ret; return ret;
} }
/*
* Initialize the memory region specified by the work request.
*/
int qib_reg_mr(struct qib_qp *qp, struct ib_reg_wr *wr)
{
struct qib_lkey_table *rkt = &to_idev(qp->ibqp.device)->lk_table;
struct qib_pd *pd = to_ipd(qp->ibqp.pd);
struct qib_mr *mr = to_imr(wr->mr);
struct qib_mregion *mrg;
u32 key = wr->key;
unsigned i, n, m;
int ret = -EINVAL;
unsigned long flags;
u64 *page_list;
size_t ps;
spin_lock_irqsave(&rkt->lock, flags);
if (pd->user || key == 0)
goto bail;
mrg = rcu_dereference_protected(
rkt->table[(key >> (32 - ib_qib_lkey_table_size))],
lockdep_is_held(&rkt->lock));
if (unlikely(mrg == NULL || qp->ibqp.pd != mrg->pd))
goto bail;
if (mr->npages > mrg->max_segs)
goto bail;
ps = mr->ibmr.page_size;
if (mr->ibmr.length > ps * mr->npages)
goto bail;
mrg->user_base = mr->ibmr.iova;
mrg->iova = mr->ibmr.iova;
mrg->lkey = key;
mrg->length = mr->ibmr.length;
mrg->access_flags = wr->access;
page_list = mr->pages;
m = 0;
n = 0;
for (i = 0; i < mr->npages; i++) {
mrg->map[m]->segs[n].vaddr = (void *) page_list[i];
mrg->map[m]->segs[n].length = ps;
if (++n == QIB_SEGSZ) {
m++;
n = 0;
}
}
ret = 0;
bail:
spin_unlock_irqrestore(&rkt->lock, flags);
return ret;
}
...@@ -303,6 +303,7 @@ int qib_dereg_mr(struct ib_mr *ibmr) ...@@ -303,6 +303,7 @@ int qib_dereg_mr(struct ib_mr *ibmr)
int ret = 0; int ret = 0;
unsigned long timeout; unsigned long timeout;
kfree(mr->pages);
qib_free_lkey(&mr->mr); qib_free_lkey(&mr->mr);
qib_put_mr(&mr->mr); /* will set completion if last */ qib_put_mr(&mr->mr); /* will set completion if last */
...@@ -340,7 +341,38 @@ struct ib_mr *qib_alloc_mr(struct ib_pd *pd, ...@@ -340,7 +341,38 @@ struct ib_mr *qib_alloc_mr(struct ib_pd *pd,
if (IS_ERR(mr)) if (IS_ERR(mr))
return (struct ib_mr *)mr; return (struct ib_mr *)mr;
mr->pages = kcalloc(max_num_sg, sizeof(u64), GFP_KERNEL);
if (!mr->pages)
goto err;
return &mr->ibmr; return &mr->ibmr;
err:
qib_dereg_mr(&mr->ibmr);
return ERR_PTR(-ENOMEM);
}
static int qib_set_page(struct ib_mr *ibmr, u64 addr)
{
struct qib_mr *mr = to_imr(ibmr);
if (unlikely(mr->npages == mr->mr.max_segs))
return -ENOMEM;
mr->pages[mr->npages++] = addr;
return 0;
}
int qib_map_mr_sg(struct ib_mr *ibmr,
struct scatterlist *sg,
int sg_nents)
{
struct qib_mr *mr = to_imr(ibmr);
mr->npages = 0;
return ib_sg_to_pages(ibmr, sg, sg_nents, qib_set_page);
} }
struct ib_fast_reg_page_list * struct ib_fast_reg_page_list *
......
...@@ -362,7 +362,10 @@ static int qib_post_one_send(struct qib_qp *qp, struct ib_send_wr *wr, ...@@ -362,7 +362,10 @@ static int qib_post_one_send(struct qib_qp *qp, struct ib_send_wr *wr,
* undefined operations. * undefined operations.
* Make sure buffer is large enough to hold the result for atomics. * Make sure buffer is large enough to hold the result for atomics.
*/ */
if (wr->opcode == IB_WR_FAST_REG_MR) { if (wr->opcode == IB_WR_REG_MR) {
if (qib_reg_mr(qp, reg_wr(wr)))
goto bail_inval;
} else if (wr->opcode == IB_WR_FAST_REG_MR) {
if (qib_fast_reg_mr(qp, wr)) if (qib_fast_reg_mr(qp, wr))
goto bail_inval; goto bail_inval;
} else if (qp->ibqp.qp_type == IB_QPT_UC) { } else if (qp->ibqp.qp_type == IB_QPT_UC) {
...@@ -401,6 +404,9 @@ static int qib_post_one_send(struct qib_qp *qp, struct ib_send_wr *wr, ...@@ -401,6 +404,9 @@ static int qib_post_one_send(struct qib_qp *qp, struct ib_send_wr *wr,
if (qp->ibqp.qp_type != IB_QPT_UC && if (qp->ibqp.qp_type != IB_QPT_UC &&
qp->ibqp.qp_type != IB_QPT_RC) qp->ibqp.qp_type != IB_QPT_RC)
memcpy(&wqe->ud_wr, ud_wr(wr), sizeof(wqe->ud_wr)); memcpy(&wqe->ud_wr, ud_wr(wr), sizeof(wqe->ud_wr));
else if (wr->opcode == IB_WR_REG_MR)
memcpy(&wqe->reg_wr, reg_wr(wr),
sizeof(wqe->reg_wr));
else if (wr->opcode == IB_WR_FAST_REG_MR) else if (wr->opcode == IB_WR_FAST_REG_MR)
memcpy(&wqe->fast_reg_wr, fast_reg_wr(wr), memcpy(&wqe->fast_reg_wr, fast_reg_wr(wr),
sizeof(wqe->fast_reg_wr)); sizeof(wqe->fast_reg_wr));
...@@ -2260,6 +2266,7 @@ int qib_register_ib_device(struct qib_devdata *dd) ...@@ -2260,6 +2266,7 @@ int qib_register_ib_device(struct qib_devdata *dd)
ibdev->reg_user_mr = qib_reg_user_mr; ibdev->reg_user_mr = qib_reg_user_mr;
ibdev->dereg_mr = qib_dereg_mr; ibdev->dereg_mr = qib_dereg_mr;
ibdev->alloc_mr = qib_alloc_mr; ibdev->alloc_mr = qib_alloc_mr;
ibdev->map_mr_sg = qib_map_mr_sg;
ibdev->alloc_fast_reg_page_list = qib_alloc_fast_reg_page_list; ibdev->alloc_fast_reg_page_list = qib_alloc_fast_reg_page_list;
ibdev->free_fast_reg_page_list = qib_free_fast_reg_page_list; ibdev->free_fast_reg_page_list = qib_free_fast_reg_page_list;
ibdev->alloc_fmr = qib_alloc_fmr; ibdev->alloc_fmr = qib_alloc_fmr;
......
...@@ -330,6 +330,8 @@ struct qib_mr { ...@@ -330,6 +330,8 @@ struct qib_mr {
struct ib_mr ibmr; struct ib_mr ibmr;
struct ib_umem *umem; struct ib_umem *umem;
struct qib_mregion mr; /* must be last */ struct qib_mregion mr; /* must be last */
u64 *pages;
u32 npages;
}; };
/* /*
...@@ -341,6 +343,7 @@ struct qib_swqe { ...@@ -341,6 +343,7 @@ struct qib_swqe {
union { union {
struct ib_send_wr wr; /* don't use wr.sg_list */ struct ib_send_wr wr; /* don't use wr.sg_list */
struct ib_ud_wr ud_wr; struct ib_ud_wr ud_wr;
struct ib_reg_wr reg_wr;
struct ib_fast_reg_wr fast_reg_wr; struct ib_fast_reg_wr fast_reg_wr;
struct ib_rdma_wr rdma_wr; struct ib_rdma_wr rdma_wr;
struct ib_atomic_wr atomic_wr; struct ib_atomic_wr atomic_wr;
...@@ -1044,12 +1047,17 @@ struct ib_mr *qib_alloc_mr(struct ib_pd *pd, ...@@ -1044,12 +1047,17 @@ struct ib_mr *qib_alloc_mr(struct ib_pd *pd,
enum ib_mr_type mr_type, enum ib_mr_type mr_type,
u32 max_entries); u32 max_entries);
int qib_map_mr_sg(struct ib_mr *ibmr,
struct scatterlist *sg,
int sg_nents);
struct ib_fast_reg_page_list *qib_alloc_fast_reg_page_list( struct ib_fast_reg_page_list *qib_alloc_fast_reg_page_list(
struct ib_device *ibdev, int page_list_len); struct ib_device *ibdev, int page_list_len);
void qib_free_fast_reg_page_list(struct ib_fast_reg_page_list *pl); void qib_free_fast_reg_page_list(struct ib_fast_reg_page_list *pl);
int qib_fast_reg_mr(struct qib_qp *qp, struct ib_send_wr *wr); int qib_fast_reg_mr(struct qib_qp *qp, struct ib_send_wr *wr);
int qib_reg_mr(struct qib_qp *qp, struct ib_reg_wr *wr);
struct ib_fmr *qib_alloc_fmr(struct ib_pd *pd, int mr_access_flags, struct ib_fmr *qib_alloc_fmr(struct ib_pd *pd, int mr_access_flags,
struct ib_fmr_attr *fmr_attr); struct ib_fmr_attr *fmr_attr);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment