Commit 0e0d3a48 authored by Bart Van Assche's avatar Bart Van Assche Committed by Doug Ledford

IB/srp: Remove the memory registration backtracking code

Mapping a discontiguous sg-list requires multiple memory regions
and hence can exhaust the memory region pool. The SRP initiator
already handles this by temporarily reducing the queue depth. This
means that it is safe to remove the memory registration backtracking
code. This patch has been tested with direct I/O sizes up to 256 MB.
Signed-off-by: default avatarBart Van Assche <bart.vanassche@sandisk.com>
Signed-off-by: default avatarDoug Ledford <dledford@redhat.com>
parent f731ed62
...@@ -1364,15 +1364,6 @@ static int srp_finish_mapping(struct srp_map_state *state, ...@@ -1364,15 +1364,6 @@ static int srp_finish_mapping(struct srp_map_state *state,
return ret; return ret;
} }
static void srp_map_update_start(struct srp_map_state *state,
struct scatterlist *sg, int sg_index,
dma_addr_t dma_addr)
{
state->unmapped_sg = sg;
state->unmapped_index = sg_index;
state->unmapped_addr = dma_addr;
}
static int srp_map_sg_entry(struct srp_map_state *state, static int srp_map_sg_entry(struct srp_map_state *state,
struct srp_rdma_ch *ch, struct srp_rdma_ch *ch,
struct scatterlist *sg, int sg_index, struct scatterlist *sg, int sg_index,
...@@ -1399,23 +1390,12 @@ static int srp_map_sg_entry(struct srp_map_state *state, ...@@ -1399,23 +1390,12 @@ static int srp_map_sg_entry(struct srp_map_state *state,
return 0; return 0;
} }
/*
* If this is the first sg that will be mapped via FMR or via FR, save
* our position. We need to know the first unmapped entry, its index,
* and the first unmapped address within that entry to be able to
* restart mapping after an error.
*/
if (!state->unmapped_sg)
srp_map_update_start(state, sg, sg_index, dma_addr);
while (dma_len) { while (dma_len) {
unsigned offset = dma_addr & ~dev->mr_page_mask; unsigned offset = dma_addr & ~dev->mr_page_mask;
if (state->npages == dev->max_pages_per_mr || offset != 0) { if (state->npages == dev->max_pages_per_mr || offset != 0) {
ret = srp_finish_mapping(state, ch); ret = srp_finish_mapping(state, ch);
if (ret) if (ret)
return ret; return ret;
srp_map_update_start(state, sg, sg_index, dma_addr);
} }
len = min_t(unsigned int, dma_len, dev->mr_page_size - offset); len = min_t(unsigned int, dma_len, dev->mr_page_size - offset);
...@@ -1434,11 +1414,8 @@ static int srp_map_sg_entry(struct srp_map_state *state, ...@@ -1434,11 +1414,8 @@ static int srp_map_sg_entry(struct srp_map_state *state,
* boundries. * boundries.
*/ */
ret = 0; ret = 0;
if (len != dev->mr_page_size) { if (len != dev->mr_page_size)
ret = srp_finish_mapping(state, ch); ret = srp_finish_mapping(state, ch);
if (!ret)
srp_map_update_start(state, NULL, 0, 0);
}
return ret; return ret;
} }
...@@ -1448,9 +1425,8 @@ static int srp_map_sg(struct srp_map_state *state, struct srp_rdma_ch *ch, ...@@ -1448,9 +1425,8 @@ static int srp_map_sg(struct srp_map_state *state, struct srp_rdma_ch *ch,
{ {
struct srp_target_port *target = ch->target; struct srp_target_port *target = ch->target;
struct srp_device *dev = target->srp_host->srp_dev; struct srp_device *dev = target->srp_host->srp_dev;
struct ib_device *ibdev = dev->dev;
struct scatterlist *sg; struct scatterlist *sg;
int i; int i, ret;
bool use_mr; bool use_mr;
state->desc = req->indirect_desc; state->desc = req->indirect_desc;
...@@ -1466,34 +1442,22 @@ static int srp_map_sg(struct srp_map_state *state, struct srp_rdma_ch *ch, ...@@ -1466,34 +1442,22 @@ static int srp_map_sg(struct srp_map_state *state, struct srp_rdma_ch *ch,
} }
for_each_sg(scat, sg, count, i) { for_each_sg(scat, sg, count, i) {
if (srp_map_sg_entry(state, ch, sg, i, use_mr)) { ret = srp_map_sg_entry(state, ch, sg, i, use_mr);
/* if (ret)
* Memory registration failed, so backtrack to the goto out;
* first unmapped entry and continue on without using
* memory registration.
*/
dma_addr_t dma_addr;
unsigned int dma_len;
backtrack:
sg = state->unmapped_sg;
i = state->unmapped_index;
dma_addr = ib_sg_dma_address(ibdev, sg);
dma_len = ib_sg_dma_len(ibdev, sg);
dma_len -= (state->unmapped_addr - dma_addr);
dma_addr = state->unmapped_addr;
use_mr = false;
srp_map_desc(state, dma_addr, dma_len, target->rkey);
}
} }
if (use_mr && srp_finish_mapping(state, ch)) if (use_mr) {
goto backtrack; ret = srp_finish_mapping(state, ch);
if (ret)
goto out;
}
req->nmdesc = state->nmdesc; req->nmdesc = state->nmdesc;
ret = 0;
return 0; out:
return ret;
} }
static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_rdma_ch *ch, static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_rdma_ch *ch,
......
...@@ -276,9 +276,6 @@ struct srp_fr_pool { ...@@ -276,9 +276,6 @@ struct srp_fr_pool {
* @npages: Number of page addresses in the pages[] array. * @npages: Number of page addresses in the pages[] array.
* @nmdesc: Number of FMR or FR memory descriptors used for mapping. * @nmdesc: Number of FMR or FR memory descriptors used for mapping.
* @ndesc: Number of SRP buffer descriptors that have been filled in. * @ndesc: Number of SRP buffer descriptors that have been filled in.
* @unmapped_sg: First element of the sg-list that is mapped via FMR or FR.
* @unmapped_index: Index of the first element mapped via FMR or FR.
* @unmapped_addr: DMA address of the first element mapped via FMR or FR.
*/ */
struct srp_map_state { struct srp_map_state {
union { union {
...@@ -299,9 +296,6 @@ struct srp_map_state { ...@@ -299,9 +296,6 @@ struct srp_map_state {
unsigned int npages; unsigned int npages;
unsigned int nmdesc; unsigned int nmdesc;
unsigned int ndesc; unsigned int ndesc;
struct scatterlist *unmapped_sg;
int unmapped_index;
dma_addr_t unmapped_addr;
}; };
#endif /* IB_SRP_H */ #endif /* IB_SRP_H */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment