Commit f7f7aab1 authored by Sagi Grimberg's avatar Sagi Grimberg Committed by Doug Ledford

IB/srp: Convert to new registration API

Instead of constructing a page list, call ib_map_mr_sg
and post a new ib_reg_wr. srp_map_finish_fr now returns
the number of sg elements registered.

Remove srp_finish_mapping since no one is calling it.
Signed-off-by: default avatarSagi Grimberg <sagig@mellanox.com>
Tested-by: default avatarBart Van Assche <bart.vanassche@sandisk.com>
Signed-off-by: default avatarDoug Ledford <dledford@redhat.com>
parent 26630e8a
...@@ -340,8 +340,6 @@ static void srp_destroy_fr_pool(struct srp_fr_pool *pool) ...@@ -340,8 +340,6 @@ static void srp_destroy_fr_pool(struct srp_fr_pool *pool)
return; return;
for (i = 0, d = &pool->desc[0]; i < pool->size; i++, d++) { for (i = 0, d = &pool->desc[0]; i < pool->size; i++, d++) {
if (d->frpl)
ib_free_fast_reg_page_list(d->frpl);
if (d->mr) if (d->mr)
ib_dereg_mr(d->mr); ib_dereg_mr(d->mr);
} }
...@@ -362,7 +360,6 @@ static struct srp_fr_pool *srp_create_fr_pool(struct ib_device *device, ...@@ -362,7 +360,6 @@ static struct srp_fr_pool *srp_create_fr_pool(struct ib_device *device,
struct srp_fr_pool *pool; struct srp_fr_pool *pool;
struct srp_fr_desc *d; struct srp_fr_desc *d;
struct ib_mr *mr; struct ib_mr *mr;
struct ib_fast_reg_page_list *frpl;
int i, ret = -EINVAL; int i, ret = -EINVAL;
if (pool_size <= 0) if (pool_size <= 0)
...@@ -385,12 +382,6 @@ static struct srp_fr_pool *srp_create_fr_pool(struct ib_device *device, ...@@ -385,12 +382,6 @@ static struct srp_fr_pool *srp_create_fr_pool(struct ib_device *device,
goto destroy_pool; goto destroy_pool;
} }
d->mr = mr; d->mr = mr;
frpl = ib_alloc_fast_reg_page_list(device, max_page_list_len);
if (IS_ERR(frpl)) {
ret = PTR_ERR(frpl);
goto destroy_pool;
}
d->frpl = frpl;
list_add_tail(&d->entry, &pool->free_list); list_add_tail(&d->entry, &pool->free_list);
} }
...@@ -1321,23 +1312,24 @@ static int srp_map_finish_fr(struct srp_map_state *state, ...@@ -1321,23 +1312,24 @@ static int srp_map_finish_fr(struct srp_map_state *state,
struct srp_target_port *target = ch->target; struct srp_target_port *target = ch->target;
struct srp_device *dev = target->srp_host->srp_dev; struct srp_device *dev = target->srp_host->srp_dev;
struct ib_send_wr *bad_wr; struct ib_send_wr *bad_wr;
struct ib_fast_reg_wr wr; struct ib_reg_wr wr;
struct srp_fr_desc *desc; struct srp_fr_desc *desc;
u32 rkey; u32 rkey;
int err; int n, err;
if (state->fr.next >= state->fr.end) if (state->fr.next >= state->fr.end)
return -ENOMEM; return -ENOMEM;
WARN_ON_ONCE(!dev->use_fast_reg); WARN_ON_ONCE(!dev->use_fast_reg);
if (state->npages == 0) if (state->sg_nents == 0)
return 0; return 0;
if (state->npages == 1 && target->global_mr) { if (state->sg_nents == 1 && target->global_mr) {
srp_map_desc(state, state->base_dma_addr, state->dma_len, srp_map_desc(state, sg_dma_address(state->sg),
sg_dma_len(state->sg),
target->global_mr->rkey); target->global_mr->rkey);
goto reset_state; return 1;
} }
desc = srp_fr_pool_get(ch->fr_pool); desc = srp_fr_pool_get(ch->fr_pool);
...@@ -1347,37 +1339,33 @@ static int srp_map_finish_fr(struct srp_map_state *state, ...@@ -1347,37 +1339,33 @@ static int srp_map_finish_fr(struct srp_map_state *state,
rkey = ib_inc_rkey(desc->mr->rkey); rkey = ib_inc_rkey(desc->mr->rkey);
ib_update_fast_reg_key(desc->mr, rkey); ib_update_fast_reg_key(desc->mr, rkey);
memcpy(desc->frpl->page_list, state->pages, n = ib_map_mr_sg(desc->mr, state->sg, state->sg_nents,
sizeof(state->pages[0]) * state->npages); dev->mr_page_size);
if (unlikely(n < 0))
return n;
memset(&wr, 0, sizeof(wr)); wr.wr.next = NULL;
wr.wr.opcode = IB_WR_FAST_REG_MR; wr.wr.opcode = IB_WR_REG_MR;
wr.wr.wr_id = FAST_REG_WR_ID_MASK; wr.wr.wr_id = FAST_REG_WR_ID_MASK;
wr.iova_start = state->base_dma_addr; wr.wr.num_sge = 0;
wr.page_list = desc->frpl; wr.wr.send_flags = 0;
wr.page_list_len = state->npages; wr.mr = desc->mr;
wr.page_shift = ilog2(dev->mr_page_size); wr.key = desc->mr->rkey;
wr.length = state->dma_len; wr.access = (IB_ACCESS_LOCAL_WRITE |
wr.access_flags = (IB_ACCESS_LOCAL_WRITE | IB_ACCESS_REMOTE_READ |
IB_ACCESS_REMOTE_READ | IB_ACCESS_REMOTE_WRITE);
IB_ACCESS_REMOTE_WRITE);
wr.rkey = desc->mr->lkey;
*state->fr.next++ = desc; *state->fr.next++ = desc;
state->nmdesc++; state->nmdesc++;
srp_map_desc(state, state->base_dma_addr, state->dma_len, srp_map_desc(state, desc->mr->iova,
desc->mr->rkey); desc->mr->length, desc->mr->rkey);
err = ib_post_send(ch->qp, &wr.wr, &bad_wr); err = ib_post_send(ch->qp, &wr.wr, &bad_wr);
if (err) if (unlikely(err))
return err; return err;
reset_state: return n;
state->npages = 0;
state->dma_len = 0;
return 0;
} }
static int srp_finish_mapping(struct srp_map_state *state, static int srp_finish_mapping(struct srp_map_state *state,
...@@ -1407,7 +1395,7 @@ static int srp_map_sg_entry(struct srp_map_state *state, ...@@ -1407,7 +1395,7 @@ static int srp_map_sg_entry(struct srp_map_state *state,
while (dma_len) { while (dma_len) {
unsigned offset = dma_addr & ~dev->mr_page_mask; unsigned offset = dma_addr & ~dev->mr_page_mask;
if (state->npages == dev->max_pages_per_mr || offset != 0) { if (state->npages == dev->max_pages_per_mr || offset != 0) {
ret = srp_finish_mapping(state, ch); ret = srp_map_finish_fmr(state, ch);
if (ret) if (ret)
return ret; return ret;
} }
...@@ -1429,7 +1417,7 @@ static int srp_map_sg_entry(struct srp_map_state *state, ...@@ -1429,7 +1417,7 @@ static int srp_map_sg_entry(struct srp_map_state *state,
*/ */
ret = 0; ret = 0;
if (len != dev->mr_page_size) if (len != dev->mr_page_size)
ret = srp_finish_mapping(state, ch); ret = srp_map_finish_fmr(state, ch);
return ret; return ret;
} }
...@@ -1451,7 +1439,7 @@ static int srp_map_sg_fmr(struct srp_map_state *state, struct srp_rdma_ch *ch, ...@@ -1451,7 +1439,7 @@ static int srp_map_sg_fmr(struct srp_map_state *state, struct srp_rdma_ch *ch,
return ret; return ret;
} }
ret = srp_finish_mapping(state, ch); ret = srp_map_finish_fmr(state, ch);
if (ret) if (ret)
return ret; return ret;
...@@ -1464,23 +1452,23 @@ static int srp_map_sg_fr(struct srp_map_state *state, struct srp_rdma_ch *ch, ...@@ -1464,23 +1452,23 @@ static int srp_map_sg_fr(struct srp_map_state *state, struct srp_rdma_ch *ch,
struct srp_request *req, struct scatterlist *scat, struct srp_request *req, struct scatterlist *scat,
int count) int count)
{ {
struct scatterlist *sg;
int i, ret;
state->desc = req->indirect_desc; state->desc = req->indirect_desc;
state->pages = req->map_page; state->fr.next = req->fr_list;
state->fmr.next = req->fmr_list; state->fr.end = req->fr_list + ch->target->cmd_sg_cnt;
state->fmr.end = req->fmr_list + ch->target->cmd_sg_cnt; state->sg = scat;
state->sg_nents = scsi_sg_count(req->scmnd);
for_each_sg(scat, sg, count, i) { while (state->sg_nents) {
ret = srp_map_sg_entry(state, ch, sg, i); int i, n;
if (ret)
return ret;
}
ret = srp_finish_mapping(state, ch); n = srp_map_finish_fr(state, ch);
if (ret) if (unlikely(n < 0))
return ret; return n;
state->sg_nents -= n;
for (i = 0; i < n; i++)
state->sg = sg_next(state->sg);
}
req->nmdesc = state->nmdesc; req->nmdesc = state->nmdesc;
...@@ -1524,6 +1512,7 @@ static int srp_map_idb(struct srp_rdma_ch *ch, struct srp_request *req, ...@@ -1524,6 +1512,7 @@ static int srp_map_idb(struct srp_rdma_ch *ch, struct srp_request *req,
struct srp_map_state state; struct srp_map_state state;
struct srp_direct_buf idb_desc; struct srp_direct_buf idb_desc;
u64 idb_pages[1]; u64 idb_pages[1];
struct scatterlist idb_sg[1];
int ret; int ret;
memset(&state, 0, sizeof(state)); memset(&state, 0, sizeof(state));
...@@ -1531,20 +1520,32 @@ static int srp_map_idb(struct srp_rdma_ch *ch, struct srp_request *req, ...@@ -1531,20 +1520,32 @@ static int srp_map_idb(struct srp_rdma_ch *ch, struct srp_request *req,
state.gen.next = next_mr; state.gen.next = next_mr;
state.gen.end = end_mr; state.gen.end = end_mr;
state.desc = &idb_desc; state.desc = &idb_desc;
state.pages = idb_pages;
state.pages[0] = (req->indirect_dma_addr &
dev->mr_page_mask);
state.npages = 1;
state.base_dma_addr = req->indirect_dma_addr; state.base_dma_addr = req->indirect_dma_addr;
state.dma_len = idb_len; state.dma_len = idb_len;
ret = srp_finish_mapping(&state, ch);
if (ret < 0) if (dev->use_fast_reg) {
goto out; state.sg = idb_sg;
state.sg_nents = 1;
sg_set_buf(idb_sg, req->indirect_desc, idb_len);
idb_sg->dma_address = req->indirect_dma_addr; /* hack! */
ret = srp_map_finish_fr(&state, ch);
if (ret < 0)
return ret;
} else if (dev->use_fmr) {
state.pages = idb_pages;
state.pages[0] = (req->indirect_dma_addr &
dev->mr_page_mask);
state.npages = 1;
ret = srp_map_finish_fmr(&state, ch);
if (ret < 0)
return ret;
} else {
return -EINVAL;
}
*idb_rkey = idb_desc.key; *idb_rkey = idb_desc.key;
out: return 0;
return ret;
} }
static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_rdma_ch *ch, static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_rdma_ch *ch,
......
...@@ -242,7 +242,6 @@ struct srp_iu { ...@@ -242,7 +242,6 @@ struct srp_iu {
struct srp_fr_desc { struct srp_fr_desc {
struct list_head entry; struct list_head entry;
struct ib_mr *mr; struct ib_mr *mr;
struct ib_fast_reg_page_list *frpl;
}; };
/** /**
...@@ -294,11 +293,17 @@ struct srp_map_state { ...@@ -294,11 +293,17 @@ struct srp_map_state {
} gen; } gen;
}; };
struct srp_direct_buf *desc; struct srp_direct_buf *desc;
u64 *pages; union {
u64 *pages;
struct scatterlist *sg;
};
dma_addr_t base_dma_addr; dma_addr_t base_dma_addr;
u32 dma_len; u32 dma_len;
u32 total_len; u32 total_len;
unsigned int npages; union {
unsigned int npages;
int sg_nents;
};
unsigned int nmdesc; unsigned int nmdesc;
unsigned int ndesc; unsigned int ndesc;
}; };
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment