Commit 68ca949c authored by Anirban Chakraborty's avatar Anirban Chakraborty Committed by James Bottomley

[SCSI] qla2xxx: Add CPU affinity support.

Set the module parameter ql2xmultique_tag to 1 to enable this
feature. In this mode, the total number of response queues
created is equal to the number of online cpus. Turning the block
layer's rq_affinity mode on enables requests to be routed to the
proper cpu and at the same time it enables completion of the IO
in a response queue that is affined to the cpu in the request
path.
Signed-off-by: default avatarAnirban Chakraborty <anirban.chakraborty@qlogic.com>
Signed-off-by: default avatarAndrew Vasquez <andrew.vasquez@qlogic.com>
Signed-off-by: default avatarJames Bottomley <James.Bottomley@HansenPartnership.com>
parent 2afa19a9
...@@ -1531,7 +1531,7 @@ qla24xx_vport_create(struct fc_vport *fc_vport, bool disable) ...@@ -1531,7 +1531,7 @@ qla24xx_vport_create(struct fc_vport *fc_vport, bool disable)
qla24xx_vport_disable(fc_vport, disable); qla24xx_vport_disable(fc_vport, disable);
ret = 0; ret = 0;
if (ha->cur_vport_count <= ha->flex_port_count if (ha->cur_vport_count <= ha->flex_port_count || ql2xmultique_tag
|| ha->max_req_queues == 1 || !ha->npiv_info) || ha->max_req_queues == 1 || !ha->npiv_info)
goto vport_queue; goto vport_queue;
/* Create a request queue in QoS mode for the vport */ /* Create a request queue in QoS mode for the vport */
...@@ -1599,7 +1599,7 @@ qla24xx_vport_delete(struct fc_vport *fc_vport) ...@@ -1599,7 +1599,7 @@ qla24xx_vport_delete(struct fc_vport *fc_vport)
vha->host_no, vha->vp_idx, vha)); vha->host_no, vha->vp_idx, vha));
} }
if (vha->req->id) { if (vha->req->id && !ql2xmultique_tag) {
if (qla25xx_delete_req_que(vha, vha->req) != QLA_SUCCESS) if (qla25xx_delete_req_que(vha, vha->req) != QLA_SUCCESS)
qla_printk(KERN_WARNING, ha, qla_printk(KERN_WARNING, ha,
"Queue delete failed.\n"); "Queue delete failed.\n");
......
...@@ -2171,6 +2171,7 @@ struct rsp_que { ...@@ -2171,6 +2171,7 @@ struct rsp_que {
struct qla_msix_entry *msix; struct qla_msix_entry *msix;
struct req_que *req; struct req_que *req;
srb_t *status_srb; /* status continuation entry */ srb_t *status_srb; /* status continuation entry */
struct work_struct q_work;
}; };
/* Request queue data structure */ /* Request queue data structure */
...@@ -2539,6 +2540,7 @@ struct qla_hw_data { ...@@ -2539,6 +2540,7 @@ struct qla_hw_data {
struct qla_chip_state_84xx *cs84xx; struct qla_chip_state_84xx *cs84xx;
struct qla_statistics qla_stats; struct qla_statistics qla_stats;
struct isp_operations *isp_ops; struct isp_operations *isp_ops;
struct workqueue_struct *wq;
}; };
/* /*
......
...@@ -15,6 +15,7 @@ static request_t *qla2x00_req_pkt(struct scsi_qla_host *, struct req_que *, ...@@ -15,6 +15,7 @@ static request_t *qla2x00_req_pkt(struct scsi_qla_host *, struct req_que *,
struct rsp_que *rsp); struct rsp_que *rsp);
static void qla2x00_isp_cmd(struct scsi_qla_host *, struct req_que *); static void qla2x00_isp_cmd(struct scsi_qla_host *, struct req_que *);
static void qla25xx_set_que(srb_t *, struct req_que **, struct rsp_que **);
/** /**
* qla2x00_get_cmd_direction() - Determine control_flag data direction. * qla2x00_get_cmd_direction() - Determine control_flag data direction.
* @cmd: SCSI command * @cmd: SCSI command
...@@ -726,8 +727,7 @@ qla24xx_start_scsi(srb_t *sp) ...@@ -726,8 +727,7 @@ qla24xx_start_scsi(srb_t *sp)
/* Setup device pointers. */ /* Setup device pointers. */
ret = 0; ret = 0;
req = vha->req; qla25xx_set_que(sp, &req, &rsp);
rsp = ha->rsp_q_map[0];
sp->que = req; sp->que = req;
/* So we know we haven't pci_map'ed anything yet */ /* So we know we haven't pci_map'ed anything yet */
...@@ -850,3 +850,21 @@ qla24xx_start_scsi(srb_t *sp) ...@@ -850,3 +850,21 @@ qla24xx_start_scsi(srb_t *sp)
return QLA_FUNCTION_FAILED; return QLA_FUNCTION_FAILED;
} }
static void qla25xx_set_que(srb_t *sp, struct req_que **req,
struct rsp_que **rsp)
{
struct scsi_cmnd *cmd = sp->cmd;
struct scsi_qla_host *vha = sp->fcport->vha;
struct qla_hw_data *ha = sp->fcport->vha->hw;
int affinity = cmd->request->cpu;
if (ql2xmultique_tag && affinity >= 0 &&
affinity < ha->max_rsp_queues - 1) {
*rsp = ha->rsp_q_map[affinity + 1];
*req = ha->req_q_map[1];
} else {
*req = vha->req;
*rsp = ha->rsp_q_map[0];
}
}
...@@ -1717,6 +1717,25 @@ qla24xx_msix_rsp_q(int irq, void *dev_id) ...@@ -1717,6 +1717,25 @@ qla24xx_msix_rsp_q(int irq, void *dev_id)
return IRQ_HANDLED; return IRQ_HANDLED;
} }
static irqreturn_t
qla25xx_msix_rsp_q(int irq, void *dev_id)
{
struct qla_hw_data *ha;
struct rsp_que *rsp;
rsp = (struct rsp_que *) dev_id;
if (!rsp) {
printk(KERN_INFO
"%s(): NULL response queue pointer\n", __func__);
return IRQ_NONE;
}
ha = rsp->hw;
queue_work_on((int) (rsp->id - 1), ha->wq, &rsp->q_work);
return IRQ_HANDLED;
}
static irqreturn_t static irqreturn_t
qla24xx_msix_default(int irq, void *dev_id) qla24xx_msix_default(int irq, void *dev_id)
{ {
...@@ -1806,9 +1825,10 @@ struct qla_init_msix_entry { ...@@ -1806,9 +1825,10 @@ struct qla_init_msix_entry {
irq_handler_t handler; irq_handler_t handler;
}; };
static struct qla_init_msix_entry msix_entries[2] = { static struct qla_init_msix_entry msix_entries[3] = {
{ "qla2xxx (default)", qla24xx_msix_default }, { "qla2xxx (default)", qla24xx_msix_default },
{ "qla2xxx (rsp_q)", qla24xx_msix_rsp_q }, { "qla2xxx (rsp_q)", qla24xx_msix_rsp_q },
{ "qla2xxx (multiq)", qla25xx_msix_rsp_q },
}; };
static void static void
......
...@@ -1497,6 +1497,9 @@ qla24xx_login_fabric(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain, ...@@ -1497,6 +1497,9 @@ qla24xx_login_fabric(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no)); DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
if (ql2xmultique_tag)
req = ha->req_q_map[0];
else
req = vha->req; req = vha->req;
rsp = req->rsp; rsp = req->rsp;
...@@ -2311,6 +2314,9 @@ __qla24xx_issue_tmf(char *name, uint32_t type, struct fc_port *fcport, ...@@ -2311,6 +2314,9 @@ __qla24xx_issue_tmf(char *name, uint32_t type, struct fc_port *fcport,
vha = fcport->vha; vha = fcport->vha;
ha = vha->hw; ha = vha->hw;
req = vha->req; req = vha->req;
if (ql2xmultique_tag)
rsp = ha->rsp_q_map[tag + 1];
else
rsp = req->rsp; rsp = req->rsp;
tsk = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &tsk_dma); tsk = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &tsk_dma);
if (tsk == NULL) { if (tsk == NULL) {
......
...@@ -633,6 +633,15 @@ qla25xx_create_req_que(struct qla_hw_data *ha, uint16_t options, ...@@ -633,6 +633,15 @@ qla25xx_create_req_que(struct qla_hw_data *ha, uint16_t options,
return 0; return 0;
} }
static void qla_do_work(struct work_struct *work)
{
struct rsp_que *rsp = container_of(work, struct rsp_que, q_work);
struct scsi_qla_host *vha;
vha = qla25xx_get_host(rsp);
qla24xx_process_response_queue(vha, rsp);
}
/* create response queue */ /* create response queue */
int int
qla25xx_create_rsp_que(struct qla_hw_data *ha, uint16_t options, qla25xx_create_rsp_que(struct qla_hw_data *ha, uint16_t options,
...@@ -711,6 +720,8 @@ qla25xx_create_rsp_que(struct qla_hw_data *ha, uint16_t options, ...@@ -711,6 +720,8 @@ qla25xx_create_rsp_que(struct qla_hw_data *ha, uint16_t options,
rsp->req = NULL; rsp->req = NULL;
qla2x00_init_response_q_entries(rsp); qla2x00_init_response_q_entries(rsp);
if (rsp->hw->wq)
INIT_WORK(&rsp->q_work, qla_do_work);
return rsp->id; return rsp->id;
que_failed: que_failed:
......
...@@ -96,6 +96,13 @@ MODULE_PARM_DESC(ql2xmaxqueues, ...@@ -96,6 +96,13 @@ MODULE_PARM_DESC(ql2xmaxqueues,
"Enables MQ settings " "Enables MQ settings "
"Default is 1 for single queue. Set it to number \ "Default is 1 for single queue. Set it to number \
of queues in MQ mode."); of queues in MQ mode.");
int ql2xmultique_tag;
module_param(ql2xmultique_tag, int, S_IRUGO|S_IRUSR);
MODULE_PARM_DESC(ql2xmultique_tag,
"Enables CPU affinity settings for the driver "
"Default is 0 for no affinity of request and response IO. "
"Set it to 1 to turn on the cpu affinity.");
/* /*
* SCSI host template entry points * SCSI host template entry points
*/ */
...@@ -256,6 +263,47 @@ static void qla2x00_free_queues(struct qla_hw_data *ha) ...@@ -256,6 +263,47 @@ static void qla2x00_free_queues(struct qla_hw_data *ha)
ha->rsp_q_map = NULL; ha->rsp_q_map = NULL;
} }
static int qla25xx_setup_mode(struct scsi_qla_host *vha)
{
uint16_t options = 0;
int ques, req, ret;
struct qla_hw_data *ha = vha->hw;
if (ql2xmultique_tag) {
/* CPU affinity mode */
ha->wq = create_workqueue("qla2xxx_wq");
/* create a request queue for IO */
options |= BIT_7;
req = qla25xx_create_req_que(ha, options, 0, 0, -1,
QLA_DEFAULT_QUE_QOS);
if (!req) {
qla_printk(KERN_WARNING, ha,
"Can't create request queue\n");
goto fail;
}
vha->req = ha->req_q_map[req];
options |= BIT_1;
for (ques = 1; ques < ha->max_rsp_queues; ques++) {
ret = qla25xx_create_rsp_que(ha, options, 0, 0, req);
if (!ret) {
qla_printk(KERN_WARNING, ha,
"Response Queue create failed\n");
goto fail2;
}
}
DEBUG2(qla_printk(KERN_INFO, ha,
"CPU affinity mode enabled, no. of response"
" queues:%d, no. of request queues:%d\n",
ha->max_rsp_queues, ha->max_req_queues));
}
return 0;
fail2:
qla25xx_delete_queues(vha);
fail:
ha->mqenable = 0;
return 1;
}
static char * static char *
qla2x00_pci_info_str(struct scsi_qla_host *vha, char *str) qla2x00_pci_info_str(struct scsi_qla_host *vha, char *str)
{ {
...@@ -998,6 +1046,9 @@ qla2xxx_eh_host_reset(struct scsi_cmnd *cmd) ...@@ -998,6 +1046,9 @@ qla2xxx_eh_host_reset(struct scsi_cmnd *cmd)
if (qla2x00_vp_abort_isp(vha)) if (qla2x00_vp_abort_isp(vha))
goto eh_host_reset_lock; goto eh_host_reset_lock;
} else { } else {
if (ha->wq)
flush_workqueue(ha->wq);
set_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags); set_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
if (qla2x00_abort_isp(base_vha)) { if (qla2x00_abort_isp(base_vha)) {
clear_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags); clear_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
...@@ -1521,6 +1572,7 @@ qla2x00_iospace_config(struct qla_hw_data *ha) ...@@ -1521,6 +1572,7 @@ qla2x00_iospace_config(struct qla_hw_data *ha)
{ {
resource_size_t pio; resource_size_t pio;
uint16_t msix; uint16_t msix;
int cpus;
if (pci_request_selected_regions(ha->pdev, ha->bars, if (pci_request_selected_regions(ha->pdev, ha->bars,
QLA2XXX_DRIVER_NAME)) { QLA2XXX_DRIVER_NAME)) {
...@@ -1575,7 +1627,7 @@ qla2x00_iospace_config(struct qla_hw_data *ha) ...@@ -1575,7 +1627,7 @@ qla2x00_iospace_config(struct qla_hw_data *ha)
/* Determine queue resources */ /* Determine queue resources */
ha->max_req_queues = ha->max_rsp_queues = 1; ha->max_req_queues = ha->max_rsp_queues = 1;
if (ql2xmaxqueues <= 1 && if ((ql2xmaxqueues <= 1 || ql2xmultique_tag < 1) &&
(!IS_QLA25XX(ha) && !IS_QLA81XX(ha))) (!IS_QLA25XX(ha) && !IS_QLA81XX(ha)))
goto mqiobase_exit; goto mqiobase_exit;
ha->mqiobase = ioremap(pci_resource_start(ha->pdev, 3), ha->mqiobase = ioremap(pci_resource_start(ha->pdev, 3),
...@@ -1584,12 +1636,21 @@ qla2x00_iospace_config(struct qla_hw_data *ha) ...@@ -1584,12 +1636,21 @@ qla2x00_iospace_config(struct qla_hw_data *ha)
/* Read MSIX vector size of the board */ /* Read MSIX vector size of the board */
pci_read_config_word(ha->pdev, QLA_PCI_MSIX_CONTROL, &msix); pci_read_config_word(ha->pdev, QLA_PCI_MSIX_CONTROL, &msix);
ha->msix_count = msix; ha->msix_count = msix;
if (ql2xmaxqueues > 1) { /* Max queues are bounded by available msix vectors */
/* queue 0 uses two msix vectors */
if (ql2xmultique_tag) {
cpus = num_online_cpus();
ha->max_rsp_queues = (ha->msix_count - 1 - cpus) ?
(cpus + 1) : (ha->msix_count - 1);
ha->max_req_queues = 2;
} else if (ql2xmaxqueues > 1) {
ha->max_req_queues = ql2xmaxqueues > QLA_MQ_SIZE ? ha->max_req_queues = ql2xmaxqueues > QLA_MQ_SIZE ?
QLA_MQ_SIZE : ql2xmaxqueues; QLA_MQ_SIZE : ql2xmaxqueues;
DEBUG2(qla_printk(KERN_INFO, ha, "QoS mode set, max no" DEBUG2(qla_printk(KERN_INFO, ha, "QoS mode set, max no"
" of request queues:%d\n", ha->max_req_queues)); " of request queues:%d\n", ha->max_req_queues));
} }
qla_printk(KERN_INFO, ha,
"MSI-X vector count: %d\n", msix);
} else } else
qla_printk(KERN_INFO, ha, "BAR 3 not enabled\n"); qla_printk(KERN_INFO, ha, "BAR 3 not enabled\n");
...@@ -1871,6 +1932,12 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) ...@@ -1871,6 +1932,12 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
goto probe_failed; goto probe_failed;
} }
if (ha->mqenable)
if (qla25xx_setup_mode(base_vha))
qla_printk(KERN_WARNING, ha,
"Can't create queues, falling back to single"
" queue mode\n");
/* /*
* Startup the kernel thread for this host adapter * Startup the kernel thread for this host adapter
*/ */
...@@ -1982,6 +2049,13 @@ qla2x00_remove_one(struct pci_dev *pdev) ...@@ -1982,6 +2049,13 @@ qla2x00_remove_one(struct pci_dev *pdev)
base_vha->flags.online = 0; base_vha->flags.online = 0;
/* Flush the work queue and remove it */
if (ha->wq) {
flush_workqueue(ha->wq);
destroy_workqueue(ha->wq);
ha->wq = NULL;
}
/* Kill the kernel thread for this host */ /* Kill the kernel thread for this host */
if (ha->dpc_thread) { if (ha->dpc_thread) {
struct task_struct *t = ha->dpc_thread; struct task_struct *t = ha->dpc_thread;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment