Commit a74d2416 authored by Shachar Raindel's avatar Shachar Raindel Committed by Roland Dreier

IB/mlx5: Refactor UMR to have its own context struct

Instead of having the UMR context part of each memory region, allocate
a struct on the stack.  This allows queuing multiple UMRs that access
the same memory region.
Signed-off-by: default avatarShachar Raindel <raindel@mellanox.com>
Signed-off-by: default avatarHaggai Eran <haggaie@mellanox.com>
Signed-off-by: default avatarRoland Dreier <roland@purestorage.com>
parent 48fea837
...@@ -264,8 +264,6 @@ struct mlx5_ib_mr { ...@@ -264,8 +264,6 @@ struct mlx5_ib_mr {
__be64 *pas; __be64 *pas;
dma_addr_t dma; dma_addr_t dma;
int npages; int npages;
struct completion done;
enum ib_wc_status status;
struct mlx5_ib_dev *dev; struct mlx5_ib_dev *dev;
struct mlx5_create_mkey_mbox_out out; struct mlx5_create_mkey_mbox_out out;
struct mlx5_core_sig_ctx *sig; struct mlx5_core_sig_ctx *sig;
...@@ -277,6 +275,17 @@ struct mlx5_ib_fast_reg_page_list { ...@@ -277,6 +275,17 @@ struct mlx5_ib_fast_reg_page_list {
dma_addr_t map; dma_addr_t map;
}; };
struct mlx5_ib_umr_context {
enum ib_wc_status status;
struct completion done;
};
static inline void mlx5_ib_init_umr_context(struct mlx5_ib_umr_context *context)
{
context->status = -1;
init_completion(&context->done);
}
struct umr_common { struct umr_common {
struct ib_pd *pd; struct ib_pd *pd;
struct ib_cq *cq; struct ib_cq *cq;
......
...@@ -708,7 +708,7 @@ static void prep_umr_unreg_wqe(struct mlx5_ib_dev *dev, ...@@ -708,7 +708,7 @@ static void prep_umr_unreg_wqe(struct mlx5_ib_dev *dev,
void mlx5_umr_cq_handler(struct ib_cq *cq, void *cq_context) void mlx5_umr_cq_handler(struct ib_cq *cq, void *cq_context)
{ {
struct mlx5_ib_mr *mr; struct mlx5_ib_umr_context *context;
struct ib_wc wc; struct ib_wc wc;
int err; int err;
...@@ -721,9 +721,9 @@ void mlx5_umr_cq_handler(struct ib_cq *cq, void *cq_context) ...@@ -721,9 +721,9 @@ void mlx5_umr_cq_handler(struct ib_cq *cq, void *cq_context)
if (err == 0) if (err == 0)
break; break;
mr = (struct mlx5_ib_mr *)(unsigned long)wc.wr_id; context = (struct mlx5_ib_umr_context *)wc.wr_id;
mr->status = wc.status; context->status = wc.status;
complete(&mr->done); complete(&context->done);
} }
ib_req_notify_cq(cq, IB_CQ_NEXT_COMP); ib_req_notify_cq(cq, IB_CQ_NEXT_COMP);
} }
...@@ -735,6 +735,7 @@ static struct mlx5_ib_mr *reg_umr(struct ib_pd *pd, struct ib_umem *umem, ...@@ -735,6 +735,7 @@ static struct mlx5_ib_mr *reg_umr(struct ib_pd *pd, struct ib_umem *umem,
struct mlx5_ib_dev *dev = to_mdev(pd->device); struct mlx5_ib_dev *dev = to_mdev(pd->device);
struct device *ddev = dev->ib_dev.dma_device; struct device *ddev = dev->ib_dev.dma_device;
struct umr_common *umrc = &dev->umrc; struct umr_common *umrc = &dev->umrc;
struct mlx5_ib_umr_context umr_context;
struct ib_send_wr wr, *bad; struct ib_send_wr wr, *bad;
struct mlx5_ib_mr *mr; struct mlx5_ib_mr *mr;
struct ib_sge sg; struct ib_sge sg;
...@@ -774,25 +775,22 @@ static struct mlx5_ib_mr *reg_umr(struct ib_pd *pd, struct ib_umem *umem, ...@@ -774,25 +775,22 @@ static struct mlx5_ib_mr *reg_umr(struct ib_pd *pd, struct ib_umem *umem,
} }
memset(&wr, 0, sizeof(wr)); memset(&wr, 0, sizeof(wr));
wr.wr_id = (u64)(unsigned long)mr; wr.wr_id = (u64)(unsigned long)&umr_context;
prep_umr_reg_wqe(pd, &wr, &sg, mr->dma, npages, mr->mmr.key, page_shift, virt_addr, len, access_flags); prep_umr_reg_wqe(pd, &wr, &sg, mr->dma, npages, mr->mmr.key, page_shift, virt_addr, len, access_flags);
/* We serialize polls so one process does not kidnap another's mlx5_ib_init_umr_context(&umr_context);
* completion. This is not a problem since wr is completed in
* around 1 usec
*/
down(&umrc->sem); down(&umrc->sem);
init_completion(&mr->done);
err = ib_post_send(umrc->qp, &wr, &bad); err = ib_post_send(umrc->qp, &wr, &bad);
if (err) { if (err) {
mlx5_ib_warn(dev, "post send failed, err %d\n", err); mlx5_ib_warn(dev, "post send failed, err %d\n", err);
goto unmap_dma; goto unmap_dma;
} } else {
wait_for_completion(&mr->done); wait_for_completion(&umr_context.done);
if (mr->status != IB_WC_SUCCESS) { if (umr_context.status != IB_WC_SUCCESS) {
mlx5_ib_warn(dev, "reg umr failed\n"); mlx5_ib_warn(dev, "reg umr failed\n");
err = -EFAULT; err = -EFAULT;
} }
}
mr->mmr.iova = virt_addr; mr->mmr.iova = virt_addr;
mr->mmr.size = len; mr->mmr.size = len;
...@@ -940,24 +938,26 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, ...@@ -940,24 +938,26 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
static int unreg_umr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr) static int unreg_umr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
{ {
struct umr_common *umrc = &dev->umrc; struct umr_common *umrc = &dev->umrc;
struct mlx5_ib_umr_context umr_context;
struct ib_send_wr wr, *bad; struct ib_send_wr wr, *bad;
int err; int err;
memset(&wr, 0, sizeof(wr)); memset(&wr, 0, sizeof(wr));
wr.wr_id = (u64)(unsigned long)mr; wr.wr_id = (u64)(unsigned long)&umr_context;
prep_umr_unreg_wqe(dev, &wr, mr->mmr.key); prep_umr_unreg_wqe(dev, &wr, mr->mmr.key);
mlx5_ib_init_umr_context(&umr_context);
down(&umrc->sem); down(&umrc->sem);
init_completion(&mr->done);
err = ib_post_send(umrc->qp, &wr, &bad); err = ib_post_send(umrc->qp, &wr, &bad);
if (err) { if (err) {
up(&umrc->sem); up(&umrc->sem);
mlx5_ib_dbg(dev, "err %d\n", err); mlx5_ib_dbg(dev, "err %d\n", err);
goto error; goto error;
} } else {
wait_for_completion(&mr->done); wait_for_completion(&umr_context.done);
up(&umrc->sem); up(&umrc->sem);
if (mr->status != IB_WC_SUCCESS) { }
if (umr_context.status != IB_WC_SUCCESS) {
mlx5_ib_warn(dev, "unreg umr failed\n"); mlx5_ib_warn(dev, "unreg umr failed\n");
err = -EFAULT; err = -EFAULT;
goto error; goto error;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment