Commit 77f2c1a4 authored by Bart Van Assche's avatar Bart Van Assche Committed by Christoph Hellwig

IB/srp: Use block layer tags

Since the block layer already contains functionality to assign
a tag to each request, use that functionality instead of
reimplementing that functionality in the SRP initiator driver.
This change makes the free_reqs list superfluous. Hence remove
that list.

[hch: updated to use .use_blk_tags instead scsi_activate_tcq]
Signed-off-by: default avatarBart Van Assche <bvanassche@acm.org>
Reviewed-by: default avatarSagi Grimberg <sagig@mellanox.com>
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
parent 509c07bc
...@@ -821,8 +821,6 @@ static int srp_alloc_req_data(struct srp_rdma_ch *ch) ...@@ -821,8 +821,6 @@ static int srp_alloc_req_data(struct srp_rdma_ch *ch)
dma_addr_t dma_addr; dma_addr_t dma_addr;
int i, ret = -ENOMEM; int i, ret = -ENOMEM;
INIT_LIST_HEAD(&ch->free_reqs);
ch->req_ring = kcalloc(target->req_ring_size, sizeof(*ch->req_ring), ch->req_ring = kcalloc(target->req_ring_size, sizeof(*ch->req_ring),
GFP_KERNEL); GFP_KERNEL);
if (!ch->req_ring) if (!ch->req_ring)
...@@ -853,8 +851,6 @@ static int srp_alloc_req_data(struct srp_rdma_ch *ch) ...@@ -853,8 +851,6 @@ static int srp_alloc_req_data(struct srp_rdma_ch *ch)
goto out; goto out;
req->indirect_dma_addr = dma_addr; req->indirect_dma_addr = dma_addr;
req->index = i;
list_add_tail(&req->list, &ch->free_reqs);
} }
ret = 0; ret = 0;
...@@ -1076,7 +1072,6 @@ static void srp_free_req(struct srp_rdma_ch *ch, struct srp_request *req, ...@@ -1076,7 +1072,6 @@ static void srp_free_req(struct srp_rdma_ch *ch, struct srp_request *req,
spin_lock_irqsave(&ch->lock, flags); spin_lock_irqsave(&ch->lock, flags);
ch->req_lim += req_lim_delta; ch->req_lim += req_lim_delta;
list_add_tail(&req->list, &ch->free_reqs);
spin_unlock_irqrestore(&ch->lock, flags); spin_unlock_irqrestore(&ch->lock, flags);
} }
...@@ -1648,8 +1643,11 @@ static void srp_process_rsp(struct srp_rdma_ch *ch, struct srp_rsp *rsp) ...@@ -1648,8 +1643,11 @@ static void srp_process_rsp(struct srp_rdma_ch *ch, struct srp_rsp *rsp)
ch->tsk_mgmt_status = rsp->data[3]; ch->tsk_mgmt_status = rsp->data[3];
complete(&ch->tsk_mgmt_done); complete(&ch->tsk_mgmt_done);
} else { } else {
req = &ch->req_ring[rsp->tag]; scmnd = scsi_host_find_tag(target->scsi_host, rsp->tag);
scmnd = srp_claim_req(ch, req, NULL, NULL); if (scmnd) {
req = (void *)scmnd->host_scribble;
scmnd = srp_claim_req(ch, req, NULL, scmnd);
}
if (!scmnd) { if (!scmnd) {
shost_printk(KERN_ERR, target->scsi_host, shost_printk(KERN_ERR, target->scsi_host,
"Null scmnd for RSP w/tag %016llx\n", "Null scmnd for RSP w/tag %016llx\n",
...@@ -1889,6 +1887,8 @@ static int srp_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scmnd) ...@@ -1889,6 +1887,8 @@ static int srp_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scmnd)
struct srp_cmd *cmd; struct srp_cmd *cmd;
struct ib_device *dev; struct ib_device *dev;
unsigned long flags; unsigned long flags;
u32 tag;
u16 idx;
int len, ret; int len, ret;
const bool in_scsi_eh = !in_interrupt() && current == shost->ehandler; const bool in_scsi_eh = !in_interrupt() && current == shost->ehandler;
...@@ -1905,17 +1905,22 @@ static int srp_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scmnd) ...@@ -1905,17 +1905,22 @@ static int srp_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scmnd)
if (unlikely(scmnd->result)) if (unlikely(scmnd->result))
goto err; goto err;
WARN_ON_ONCE(scmnd->request->tag < 0);
tag = blk_mq_unique_tag(scmnd->request);
ch = &target->ch; ch = &target->ch;
idx = blk_mq_unique_tag_to_tag(tag);
WARN_ONCE(idx >= target->req_ring_size, "%s: tag %#x: idx %d >= %d\n",
dev_name(&shost->shost_gendev), tag, idx,
target->req_ring_size);
spin_lock_irqsave(&ch->lock, flags); spin_lock_irqsave(&ch->lock, flags);
iu = __srp_get_tx_iu(ch, SRP_IU_CMD); iu = __srp_get_tx_iu(ch, SRP_IU_CMD);
if (!iu)
goto err_unlock;
req = list_first_entry(&ch->free_reqs, struct srp_request, list);
list_del(&req->list);
spin_unlock_irqrestore(&ch->lock, flags); spin_unlock_irqrestore(&ch->lock, flags);
if (!iu)
goto err;
req = &ch->req_ring[idx];
dev = target->srp_host->srp_dev->dev; dev = target->srp_host->srp_dev->dev;
ib_dma_sync_single_for_cpu(dev, iu->dma, target->max_iu_len, ib_dma_sync_single_for_cpu(dev, iu->dma, target->max_iu_len,
DMA_TO_DEVICE); DMA_TO_DEVICE);
...@@ -1927,7 +1932,7 @@ static int srp_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scmnd) ...@@ -1927,7 +1932,7 @@ static int srp_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scmnd)
cmd->opcode = SRP_CMD; cmd->opcode = SRP_CMD;
cmd->lun = cpu_to_be64((u64) scmnd->device->lun << 48); cmd->lun = cpu_to_be64((u64) scmnd->device->lun << 48);
cmd->tag = req->index; cmd->tag = tag;
memcpy(cmd->cdb, scmnd->cmnd, scmnd->cmd_len); memcpy(cmd->cdb, scmnd->cmnd, scmnd->cmd_len);
req->scmnd = scmnd; req->scmnd = scmnd;
...@@ -1976,12 +1981,6 @@ static int srp_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scmnd) ...@@ -1976,12 +1981,6 @@ static int srp_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scmnd)
*/ */
req->scmnd = NULL; req->scmnd = NULL;
spin_lock_irqsave(&ch->lock, flags);
list_add(&req->list, &ch->free_reqs);
err_unlock:
spin_unlock_irqrestore(&ch->lock, flags);
err: err:
if (scmnd->result) { if (scmnd->result) {
scmnd->scsi_done(scmnd); scmnd->scsi_done(scmnd);
...@@ -2387,6 +2386,7 @@ static int srp_abort(struct scsi_cmnd *scmnd) ...@@ -2387,6 +2386,7 @@ static int srp_abort(struct scsi_cmnd *scmnd)
{ {
struct srp_target_port *target = host_to_target(scmnd->device->host); struct srp_target_port *target = host_to_target(scmnd->device->host);
struct srp_request *req = (struct srp_request *) scmnd->host_scribble; struct srp_request *req = (struct srp_request *) scmnd->host_scribble;
u32 tag;
struct srp_rdma_ch *ch; struct srp_rdma_ch *ch;
int ret; int ret;
...@@ -2395,7 +2395,8 @@ static int srp_abort(struct scsi_cmnd *scmnd) ...@@ -2395,7 +2395,8 @@ static int srp_abort(struct scsi_cmnd *scmnd)
ch = &target->ch; ch = &target->ch;
if (!req || !srp_claim_req(ch, req, NULL, scmnd)) if (!req || !srp_claim_req(ch, req, NULL, scmnd))
return SUCCESS; return SUCCESS;
if (srp_send_tsk_mgmt(ch, req->index, scmnd->device->lun, tag = blk_mq_unique_tag(scmnd->request);
if (srp_send_tsk_mgmt(ch, tag, scmnd->device->lun,
SRP_TSK_ABORT_TASK) == 0) SRP_TSK_ABORT_TASK) == 0)
ret = SUCCESS; ret = SUCCESS;
else if (target->rport->state == SRP_RPORT_LOST) else if (target->rport->state == SRP_RPORT_LOST)
...@@ -2633,7 +2634,8 @@ static struct scsi_host_template srp_template = { ...@@ -2633,7 +2634,8 @@ static struct scsi_host_template srp_template = {
.this_id = -1, .this_id = -1,
.cmd_per_lun = SRP_DEFAULT_CMD_SQ_SIZE, .cmd_per_lun = SRP_DEFAULT_CMD_SQ_SIZE,
.use_clustering = ENABLE_CLUSTERING, .use_clustering = ENABLE_CLUSTERING,
.shost_attrs = srp_host_attrs .shost_attrs = srp_host_attrs,
.use_blk_tags = 1,
}; };
static int srp_sdev_count(struct Scsi_Host *host) static int srp_sdev_count(struct Scsi_Host *host)
...@@ -3054,6 +3056,10 @@ static ssize_t srp_create_target(struct device *dev, ...@@ -3054,6 +3056,10 @@ static ssize_t srp_create_target(struct device *dev,
if (ret) if (ret)
goto err; goto err;
ret = scsi_init_shared_tag_map(target_host, target_host->can_queue);
if (ret)
goto err;
target->req_ring_size = target->queue_size - SRP_TSK_MGMT_SQ_SIZE; target->req_ring_size = target->queue_size - SRP_TSK_MGMT_SQ_SIZE;
if (!srp_conn_unique(target->srp_host, target)) { if (!srp_conn_unique(target->srp_host, target)) {
......
...@@ -116,7 +116,6 @@ struct srp_host { ...@@ -116,7 +116,6 @@ struct srp_host {
}; };
struct srp_request { struct srp_request {
struct list_head list;
struct scsi_cmnd *scmnd; struct scsi_cmnd *scmnd;
struct srp_iu *cmd; struct srp_iu *cmd;
union { union {
...@@ -127,7 +126,6 @@ struct srp_request { ...@@ -127,7 +126,6 @@ struct srp_request {
struct srp_direct_buf *indirect_desc; struct srp_direct_buf *indirect_desc;
dma_addr_t indirect_dma_addr; dma_addr_t indirect_dma_addr;
short nmdesc; short nmdesc;
short index;
}; };
/** /**
...@@ -137,7 +135,6 @@ struct srp_request { ...@@ -137,7 +135,6 @@ struct srp_request {
struct srp_rdma_ch { struct srp_rdma_ch {
/* These are RW in the hot path, and commonly used together */ /* These are RW in the hot path, and commonly used together */
struct list_head free_tx; struct list_head free_tx;
struct list_head free_reqs;
spinlock_t lock; spinlock_t lock;
s32 req_lim; s32 req_lim;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment